Pull misc-2.6.39 into release branch
diff --git a/.gitignore b/.gitignore
index 8faa6c0..5d56a3f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,6 +28,7 @@
 *.gz
 *.bz2
 *.lzma
+*.xz
 *.lzo
 *.patch
 *.gcno
diff --git a/.mailmap b/.mailmap
index 581fd39..1eba28a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -23,6 +23,7 @@
 Arnaud Patard <arnaud.patard@rtp-net.org>
 Arnd Bergmann <arnd@arndb.de>
 Axel Dyks <xl@xlsigned.net>
+Axel Lin <axel.lin@gmail.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
diff --git a/CREDITS b/CREDITS
index 494b6e4..1d39a6d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2811,8 +2811,8 @@
 N: Stelian Pop
 E: stelian@popies.net
 P: 1024D/EDBB6147 7B36 0E07 04BC 11DC A7A0  D3F7 7185 9E7A EDBB 6147
-D: sonypi, meye drivers, mct_u232 usb serial hacks
-S: Paris, France
+D: random kernel hacks
+S: Paimpont, France
 
 N: Pete Popov
 E: pete_popov@yahoo.com
diff --git a/Documentation/ABI/stable/thermal-notification b/Documentation/ABI/stable/thermal-notification
new file mode 100644
index 0000000..9723e8b
--- /dev/null
+++ b/Documentation/ABI/stable/thermal-notification
@@ -0,0 +1,4 @@
+What:		A notification mechanism for thermal related events
+Description:
+	This interface enables notification for thermal related events.
+	The notification is in the form of a netlink event.
diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
index 9e4541d..edff663 100644
--- a/Documentation/ABI/testing/sysfs-class-led
+++ b/Documentation/ABI/testing/sysfs-class-led
@@ -26,3 +26,12 @@
 		scheduler is chosen. Trigger specific parameters can appear in
 		/sys/class/leds/<led> once a given trigger is selected.
 
+What:		/sys/class/leds/<led>/inverted
+Date:		January 2011
+KernelVersion:	2.6.38
+Contact:	Richard Purdie <rpurdie@rpsys.net>
+Description:
+		Invert the LED on/off state. This parameter is specific to
+		gpio and backlight triggers. In case of the backlight trigger,
+		it is usefull when driving a LED which is intended to indicate
+		a device in a standby like state.
diff --git a/Documentation/ABI/testing/sysfs-platform-at91 b/Documentation/ABI/testing/sysfs-platform-at91
new file mode 100644
index 0000000..4cc6a86
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-at91
@@ -0,0 +1,25 @@
+What:		/sys/devices/platform/at91_can/net/<iface>/mb0_id
+Date:		January 2011
+KernelVersion:	2.6.38
+Contact:	Marc Kleine-Budde <kernel@pengutronix.de>
+Description:
+		Value representing the can_id of mailbox 0.
+
+		Default: 0x7ff (standard frame)
+
+		Due to a chip bug (errata 50.2.6.3 & 50.3.5.3 in
+		"AT91SAM9263 Preliminary 6249H-ATARM-27-Jul-09") the
+		contents of mailbox 0 may be send under certain
+		conditions (even if disabled or in rx mode).
+
+		The workaround in the errata suggests not to use the
+		mailbox and load it with an unused identifier.
+
+		In order to use an extended can_id add the
+		CAN_EFF_FLAG (0x80000000U) to the can_id. Example:
+
+		- standard id 0x7ff:
+		echo 0x7ff      > /sys/class/net/can0/mb0_id
+
+		- extended id 0x1fffffff:
+		echo 0x9fffffff > /sys/class/net/can0/mb0_id
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index 03641a0..8906648 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -268,10 +268,6 @@
 !Finclude/net/mac80211.h ieee80211_ops
 !Finclude/net/mac80211.h ieee80211_alloc_hw
 !Finclude/net/mac80211.h ieee80211_register_hw
-!Finclude/net/mac80211.h ieee80211_get_tx_led_name
-!Finclude/net/mac80211.h ieee80211_get_rx_led_name
-!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
-!Finclude/net/mac80211.h ieee80211_get_radio_led_name
 !Finclude/net/mac80211.h ieee80211_unregister_hw
 !Finclude/net/mac80211.h ieee80211_free_hw
       </chapter>
@@ -382,6 +378,23 @@
         </para>
       </partintro>
 
+      <chapter id="led-support">
+        <title>LED support</title>
+        <para>
+         Mac80211 supports various ways of blinking LEDs. Wherever possible,
+         device LEDs should be exposed as LED class devices and hooked up to
+         the appropriate trigger, which will then be triggered appropriately
+         by mac80211.
+        </para>
+!Finclude/net/mac80211.h ieee80211_get_tx_led_name
+!Finclude/net/mac80211.h ieee80211_get_rx_led_name
+!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
+!Finclude/net/mac80211.h ieee80211_get_radio_led_name
+!Finclude/net/mac80211.h ieee80211_tpt_blink
+!Finclude/net/mac80211.h ieee80211_tpt_led_trigger_flags
+!Finclude/net/mac80211.h ieee80211_create_tpt_led_trigger
+      </chapter>
+
       <chapter id="hardware-crypto-offload">
         <title>Hardware crypto acceleration</title>
 !Pinclude/net/mac80211.h Hardware crypto acceleration
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 35447e0..36f63d4 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -217,8 +217,8 @@
   <chapter id="uart16x50">
      <title>16x50 UART Driver</title>
 !Iinclude/linux/serial_core.h
-!Edrivers/serial/serial_core.c
-!Edrivers/serial/8250.c
+!Edrivers/tty/serial/serial_core.c
+!Edrivers/tty/serial/8250.c
   </chapter>
 
   <chapter id="fbdev">
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 2861055..c279158 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -73,8 +73,8 @@
       services.
     </para>
     <para>
-      The core of every DRM driver is struct drm_device.  Drivers
-      will typically statically initialize a drm_device structure,
+      The core of every DRM driver is struct drm_driver.  Drivers
+      will typically statically initialize a drm_driver structure,
       then pass it to drm_init() at load time.
     </para>
 
@@ -84,7 +84,7 @@
     <title>Driver initialization</title>
     <para>
       Before calling the DRM initialization routines, the driver must
-      first create and fill out a struct drm_device structure.
+      first create and fill out a struct drm_driver structure.
     </para>
     <programlisting>
       static struct drm_driver driver = {
diff --git a/Documentation/DocBook/dvb/dvbapi.xml b/Documentation/DocBook/dvb/dvbapi.xml
index e3a97fd..ad8678d 100644
--- a/Documentation/DocBook/dvb/dvbapi.xml
+++ b/Documentation/DocBook/dvb/dvbapi.xml
@@ -28,7 +28,7 @@
 	<holder>Convergence GmbH</holder>
 </copyright>
 <copyright>
-	<year>2009-2010</year>
+	<year>2009-2011</year>
 	<holder>Mauro Carvalho Chehab</holder>
 </copyright>
 
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 5e87ad5..f51f285 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -82,6 +82,11 @@
      </sect1>
   </chapter>
 
+  <chapter id="fs_events">
+     <title>Events based on file descriptors</title>
+!Efs/eventfd.c
+  </chapter>
+
   <chapter id="sysfs">
      <title>The Filesystem for Exporting Kernel Objects</title>
 !Efs/sysfs/file.c
diff --git a/Documentation/DocBook/media.tmpl b/Documentation/DocBook/media.tmpl
index f11048d..a99088a 100644
--- a/Documentation/DocBook/media.tmpl
+++ b/Documentation/DocBook/media.tmpl
@@ -28,7 +28,7 @@
 <title>LINUX MEDIA INFRASTRUCTURE API</title>
 
 <copyright>
-	<year>2009-2010</year>
+	<year>2009-2011</year>
 	<holder>LinuxTV Developers</holder>
 </copyright>
 
@@ -86,7 +86,7 @@
 </author>
 </authorgroup>
 <copyright>
-	<year>2009-2010</year>
+	<year>2009-2011</year>
 	<holder>Mauro Carvalho Chehab</holder>
 </copyright>
 
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 020ac80..620eb3f 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -250,7 +250,7 @@
 		<title>Device ready function</title>
 		<para>
 			If the hardware interface has the ready busy pin of the NAND chip connected to a
-			GPIO or other accesible I/O pin, this function is used to read back the state of the
+			GPIO or other accessible I/O pin, this function is used to read back the state of the
 			pin. The function has no arguments and should return 0, if the device is busy (R/B pin 
 			is low) and 1, if the device is ready (R/B pin is high).
 			If the hardware interface does not give access to the ready busy pin, then
diff --git a/Documentation/DocBook/v4l/dev-rds.xml b/Documentation/DocBook/v4l/dev-rds.xml
index 360d273..2427f54 100644
--- a/Documentation/DocBook/v4l/dev-rds.xml
+++ b/Documentation/DocBook/v4l/dev-rds.xml
@@ -75,6 +75,7 @@
   </section>
 
   <section>
+    <title>RDS datastructures</title>
     <table frame="none" pgwide="1" id="v4l2-rds-data">
       <title>struct
 <structname>v4l2_rds_data</structname></title>
@@ -129,10 +130,11 @@
 
     <table frame="none" pgwide="1" id="v4l2-rds-block-codes">
       <title>Block defines</title>
-      <tgroup cols="3">
+      <tgroup cols="4">
 	<colspec colname="c1" colwidth="1*" />
 	<colspec colname="c2" colwidth="1*" />
-	<colspec colname="c3" colwidth="5*" />
+	<colspec colname="c3" colwidth="1*" />
+	<colspec colname="c4" colwidth="5*" />
 	<tbody valign="top">
 	  <row>
 	    <entry>V4L2_RDS_BLOCK_MSK</entry>
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 839e93e..9288af9 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -100,6 +100,7 @@
       <year>2008</year>
       <year>2009</year>
       <year>2010</year>
+      <year>2011</year>
       <holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
 Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
     </copyright>
@@ -381,7 +382,7 @@
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 2.6.33</subtitle>
+ <subtitle>Revision 2.6.38</subtitle>
 
   <chapter id="common">
     &sub-common;
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 69dd29e..b2bea15 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -533,6 +533,33 @@
 Other Pieces
 ------------
 
+Get the detailed info related with the IPMI device
+--------------------------------------------------
+
+Some users need more detailed information about a device, like where
+the address came from or the raw base device for the IPMI interface.
+You can use the IPMI smi_watcher to catch the IPMI interfaces as they
+come or go, and to grab the information, you can use the function
+ipmi_get_smi_info(), which returns the following structure:
+
+struct ipmi_smi_info {
+	enum ipmi_addr_src addr_src;
+	struct device *dev;
+	union {
+		struct {
+			void *acpi_handle;
+		} acpi_info;
+	} addr_info;
+};
+
+Currently special info for only for SI_ACPI address sources is
+returned.  Others may be added as necessary.
+
+Note that the dev pointer is included in the above structure, and
+assuming ipmi_smi_get_info returns success, you must call put_device
+on the dev pointer.
+
+
 Watchdog
 --------
 
diff --git a/Documentation/acpi/apei/output_format.txt b/Documentation/acpi/apei/output_format.txt
new file mode 100644
index 0000000..9146952
--- /dev/null
+++ b/Documentation/acpi/apei/output_format.txt
@@ -0,0 +1,122 @@
+                     APEI output format
+                     ~~~~~~~~~~~~~~~~~~
+
+APEI uses printk as hardware error reporting interface, the output
+format is as follow.
+
+<error record> :=
+APEI generic hardware error status
+severity: <integer>, <severity string>
+section: <integer>, severity: <integer>, <severity string>
+flags: <integer>
+<section flags strings>
+fru_id: <uuid string>
+fru_text: <string>
+section_type: <section type string>
+<section data>
+
+<severity string>* := recoverable | fatal | corrected | info
+
+<section flags strings># :=
+[primary][, containment warning][, reset][, threshold exceeded]\
+[, resource not accessible][, latent error]
+
+<section type string> := generic processor error | memory error | \
+PCIe error | unknown, <uuid string>
+
+<section data> :=
+<generic processor section data> | <memory section data> | \
+<pcie section data> | <null>
+
+<generic processor section data> :=
+[processor_type: <integer>, <proc type string>]
+[processor_isa: <integer>, <proc isa string>]
+[error_type: <integer>
+<proc error type strings>]
+[operation: <integer>, <proc operation string>]
+[flags: <integer>
+<proc flags strings>]
+[level: <integer>]
+[version_info: <integer>]
+[processor_id: <integer>]
+[target_address: <integer>]
+[requestor_id: <integer>]
+[responder_id: <integer>]
+[IP: <integer>]
+
+<proc type string>* := IA32/X64 | IA64
+
+<proc isa string>* := IA32 | IA64 | X64
+
+<processor error type strings># :=
+[cache error][, TLB error][, bus error][, micro-architectural error]
+
+<proc operation string>* := unknown or generic | data read | data write | \
+instruction execution
+
+<proc flags strings># :=
+[restartable][, precise IP][, overflow][, corrected]
+
+<memory section data> :=
+[error_status: <integer>]
+[physical_address: <integer>]
+[physical_address_mask: <integer>]
+[node: <integer>]
+[card: <integer>]
+[module: <integer>]
+[bank: <integer>]
+[device: <integer>]
+[row: <integer>]
+[column: <integer>]
+[bit_position: <integer>]
+[requestor_id: <integer>]
+[responder_id: <integer>]
+[target_id: <integer>]
+[error_type: <integer>, <mem error type string>]
+
+<mem error type string>* :=
+unknown | no error | single-bit ECC | multi-bit ECC | \
+single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \
+target abort | parity error | watchdog timeout | invalid address | \
+mirror Broken | memory sparing | scrub corrected error | \
+scrub uncorrected error
+
+<pcie section data> :=
+[port_type: <integer>, <pcie port type string>]
+[version: <integer>.<integer>]
+[command: <integer>, status: <integer>]
+[device_id: <integer>:<integer>:<integer>.<integer>
+slot: <integer>
+secondary_bus: <integer>
+vendor_id: <integer>, device_id: <integer>
+class_code: <integer>]
+[serial number: <integer>, <integer>]
+[bridge: secondary_status: <integer>, control: <integer>]
+
+<pcie port type string>* := PCIe end point | legacy PCI end point | \
+unknown | unknown | root port | upstream switch port | \
+downstream switch port | PCIe to PCI/PCI-X bridge | \
+PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \
+root complex event collector
+
+Where, [] designate corresponding content is optional
+
+All <field string> description with * has the following format:
+
+field: <integer>, <field string>
+
+Where value of <integer> should be the position of "string" in <field
+string> description. Otherwise, <field string> will be "unknown".
+
+All <field strings> description with # has the following format:
+
+field: <integer>
+<field strings>
+
+Where each string in <fields strings> corresponding to one set bit of
+<integer>. The bit position is the position of "string" in <field
+strings> description.
+
+For more detailed explanation of every field, please refer to UEFI
+specification version 2.3 or later, section Appendix N: Common
+Platform Error Record.
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index d6da611..4ed7b5c 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -89,6 +89,33 @@
 
  Limits for writes can be put using blkio.write_bps_device file.
 
+Hierarchical Cgroups
+====================
+- Currently none of the IO control policy supports hierarhical groups. But
+  cgroup interface does allow creation of hierarhical cgroups and internally
+  IO policies treat them as flat hierarchy.
+
+  So this patch will allow creation of cgroup hierarhcy but at the backend
+  everything will be treated as flat. So if somebody created a hierarchy like
+  as follows.
+
+			root
+			/  \
+		     test1 test2
+			|
+		     test3
+
+  CFQ and throttling will practically treat all groups at same level.
+
+				pivot
+			     /  |   \  \
+			root  test1 test2  test3
+
+  Down the line we can implement hierarchical accounting/control support
+  and also introduce a new cgroup file "use_hierarchy" which will control
+  whether cgroup hierarchy is viewed as flat or hierarchical by the policy..
+  This is how memory controller also has implemented the things.
+
 Various user visible config options
 ===================================
 CONFIG_BLK_CGROUP
diff --git a/Documentation/cgroups/cgroup_event_listener.c b/Documentation/cgroups/cgroup_event_listener.c
index 8c2bfc4..3e082f9 100644
--- a/Documentation/cgroups/cgroup_event_listener.c
+++ b/Documentation/cgroups/cgroup_event_listener.c
@@ -91,7 +91,7 @@
 
 		if (ret == -1) {
 			perror("cgroup.event_control "
-					"is not accessable any more");
+					"is not accessible any more");
 			break;
 		}
 
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 190018b..44b8b7a 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -355,13 +355,13 @@
 
 To change the set of subsystems bound to a mounted hierarchy, just
 remount with different options:
-# mount -o remount,cpuset,ns hier1 /dev/cgroup
+# mount -o remount,cpuset,blkio hier1 /dev/cgroup
 
-Now memory is removed from the hierarchy and ns is added.
+Now memory is removed from the hierarchy and blkio is added.
 
-Note this will add ns to the hierarchy but won't remove memory or
+Note this will add blkio to the hierarchy but won't remove memory or
 cpuset, because the new options are appended to the old ones:
-# mount -o remount,ns /dev/cgroup
+# mount -o remount,blkio /dev/cgroup
 
 To Specify a hierarchy's release_agent:
 # mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt
index b7eecec..fc8fa97 100644
--- a/Documentation/cgroups/memcg_test.txt
+++ b/Documentation/cgroups/memcg_test.txt
@@ -398,7 +398,7 @@
 	written to move_charge_at_immigrate.
 
  9.10 Memory thresholds
-	Memory controler implements memory thresholds using cgroups notification
+	Memory controller implements memory thresholds using cgroups notification
 	API. You can use Documentation/cgroups/cgroup_event_listener.c to test
 	it.
 
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index 524de92..59293ac 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -8,7 +8,7 @@
 
 <cipher>
     Encryption cipher and an optional IV generation mode.
-    (In format cipher-chainmode-ivopts:ivmode).
+    (In format cipher[:keycount]-chainmode-ivopts:ivmode).
     Examples:
        des
        aes-cbc-essiv:sha256
@@ -20,6 +20,11 @@
     Key used for encryption. It is encoded as a hexadecimal number.
     You can only use key sizes that are valid for the selected cipher.
 
+<keycount>
+    Multi-key compatibility mode. You can define <keycount> keys and
+    then sectors are encrypted according to their offsets (sector 0 uses key0;
+    sector 1 uses key1 etc.).  <keycount> must be a power of two.
+
 <iv_offset>
     The IV offset is a sector count that is added to the sector number
     before creating the IV.
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
new file mode 100644
index 0000000..33b6b70
--- /dev/null
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -0,0 +1,70 @@
+Device-mapper RAID (dm-raid) is a bridge from DM to MD.  It
+provides a way to use device-mapper interfaces to access the MD RAID
+drivers.
+
+As with all device-mapper targets, the nominal public interfaces are the
+constructor (CTR) tables and the status outputs (both STATUSTYPE_INFO
+and STATUSTYPE_TABLE).  The CTR table looks like the following:
+
+1: <s> <l> raid \
+2:      <raid_type> <#raid_params> <raid_params> \
+3:      <#raid_devs> <meta_dev1> <dev1> .. <meta_devN> <devN>
+
+Line 1 contains the standard first three arguments to any device-mapper
+target - the start, length, and target type fields.  The target type in
+this case is "raid".
+
+Line 2 contains the arguments that define the particular raid
+type/personality/level, the required arguments for that raid type, and
+any optional arguments.  Possible raid types include: raid4, raid5_la,
+raid5_ls, raid5_rs, raid6_zr, raid6_nr, and raid6_nc.  (raid1 is
+planned for the future.)  The list of required and optional parameters
+is the same for all the current raid types.  The required parameters are
+positional, while the optional parameters are given as key/value pairs.
+The possible parameters are as follows:
+ <chunk_size>           Chunk size in sectors.
+ [[no]sync]             Force/Prevent RAID initialization
+ [rebuild <idx>]        Rebuild the drive indicated by the index
+ [daemon_sleep <ms>]    Time between bitmap daemon work to clear bits
+ [min_recovery_rate <kB/sec/disk>]      Throttle RAID initialization
+ [max_recovery_rate <kB/sec/disk>]      Throttle RAID initialization
+ [max_write_behind <sectors>]           See '-write-behind=' (man mdadm)
+ [stripe_cache <sectors>]               Stripe cache size for higher RAIDs
+
+Line 3 contains the list of devices that compose the array in
+metadata/data device pairs.  If the metadata is stored separately, a '-'
+is given for the metadata device position.  If a drive has failed or is
+missing at creation time, a '-' can be given for both the metadata and
+data drives for a given position.
+
+NB. Currently all metadata devices must be specified as '-'.
+
+Examples:
+# RAID4 - 4 data drives, 1 parity
+# No metadata devices specified to hold superblock/bitmap info
+# Chunk size of 1MiB
+# (Lines separated for easy reading)
+0 1960893648 raid \
+        raid4 1 2048 \
+        5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
+
+# RAID4 - 4 data drives, 1 parity (no metadata devices)
+# Chunk size of 1MiB, force RAID initialization,
+#       min recovery rate at 20 kiB/sec/disk
+0 1960893648 raid \
+        raid4 4 2048 min_recovery_rate 20 sync\
+        5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
+
+Performing a 'dmsetup table' should display the CTR table used to
+construct the mapping (with possible reordering of optional
+parameters).
+
+Performing a 'dmsetup status' will yield information on the state and
+health of the array.  The output is as follows:
+1: <s> <l> raid \
+2:      <raid_type> <#devices> <1 health char for each dev> <resync_ratio>
+
+Line 1 is standard DM output.  Line 2 is best shown by example:
+        0 1960893648 raid raid4 5 AAAAA 2/490221568
+Here we can see the RAID type is raid4, there are 5 devices - all of
+which are 'A'live, and the array is 2/490221568 complete with recovery.
diff --git a/Documentation/powerpc/dts-bindings/fsl/sata.txt b/Documentation/devicetree/bindings/ata/fsl-sata.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/sata.txt
rename to Documentation/devicetree/bindings/ata/fsl-sata.txt
diff --git a/Documentation/powerpc/dts-bindings/eeprom.txt b/Documentation/devicetree/bindings/eeprom.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/eeprom.txt
rename to Documentation/devicetree/bindings/eeprom.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
rename to Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
diff --git a/Documentation/powerpc/dts-bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/gpio/gpio.txt
rename to Documentation/devicetree/bindings/gpio/gpio.txt
diff --git a/Documentation/powerpc/dts-bindings/gpio/led.txt b/Documentation/devicetree/bindings/gpio/led.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/gpio/led.txt
rename to Documentation/devicetree/bindings/gpio/led.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/i2c.txt b/Documentation/devicetree/bindings/i2c/fsl-i2c.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/i2c.txt
rename to Documentation/devicetree/bindings/i2c/fsl-i2c.txt
diff --git a/Documentation/powerpc/dts-bindings/marvell.txt b/Documentation/devicetree/bindings/marvell.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/marvell.txt
rename to Documentation/devicetree/bindings/marvell.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/esdhc.txt
rename to Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
diff --git a/Documentation/powerpc/dts-bindings/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/mmc-spi-slot.txt
rename to Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/upm-nand.txt b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/upm-nand.txt
rename to Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
diff --git a/Documentation/powerpc/dts-bindings/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/mtd-physmap.txt
rename to Documentation/devicetree/bindings/mtd/mtd-physmap.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/can.txt b/Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/can.txt
rename to Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt
diff --git a/Documentation/powerpc/dts-bindings/can/sja1000.txt b/Documentation/devicetree/bindings/net/can/sja1000.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/can/sja1000.txt
rename to Documentation/devicetree/bindings/net/can/sja1000.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/tsec.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/tsec.txt
rename to Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
diff --git a/Documentation/powerpc/dts-bindings/gpio/mdio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/gpio/mdio.txt
rename to Documentation/devicetree/bindings/net/mdio-gpio.txt
diff --git a/Documentation/powerpc/dts-bindings/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/phy.txt
rename to Documentation/devicetree/bindings/net/phy.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt b/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt
rename to Documentation/devicetree/bindings/pci/83xx-512x-pci.txt
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt b/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt
new file mode 100644
index 0000000..ee45980
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt
@@ -0,0 +1,52 @@
+PPC4xx Clock Power Management (CPM) node
+
+Required properties:
+	- compatible		: compatible list, currently only "ibm,cpm"
+	- dcr-access-method	: "native"
+	- dcr-reg		: < DCR register range >
+
+Optional properties:
+	- er-offset		: All 4xx SoCs with a CPM controller have
+				  one of two different order for the CPM
+				  registers. Some have the CPM registers
+				  in the following order (ER,FR,SR). The
+				  others have them in the following order
+				  (SR,ER,FR). For the second case set
+				  er-offset = <1>.
+	- unused-units		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set to turn off unused
+				  devices.
+	- idle-doze		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set to turn off unused
+				  devices. This is usually just CPM[CPU].
+	- standby		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set on standby and
+				  restored on resume.
+	- suspend		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set on suspend (mem) and
+				  restored on resume. Note, for standby
+				  and suspend the corresponding bits can
+				  be different or the same. Usually for
+				  standby only class 2 and 3 units are set.
+				  However, the interface does not care.
+				  If they are the same, the additional
+				  power saving will be seeing if support
+				  is available to put the DDR in self
+				  refresh mode and any additional power
+				  saving techniques for the specific SoC.
+
+Example:
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x160 0x003>;
+		er-offset = <0>;
+		unused-units = <0x00000100>;
+		idle-doze = <0x02000000>;
+		standby = <0xfeff0000>;
+		suspend = <0xfeff791d>;
+};
diff --git a/Documentation/powerpc/dts-bindings/4xx/emac.txt b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/4xx/emac.txt
rename to Documentation/devicetree/bindings/powerpc/4xx/emac.txt
diff --git a/Documentation/powerpc/dts-bindings/4xx/ndfc.txt b/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/4xx/ndfc.txt
rename to Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt
diff --git a/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
rename to Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
diff --git a/Documentation/powerpc/dts-bindings/4xx/reboot.txt b/Documentation/devicetree/bindings/powerpc/4xx/reboot.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/4xx/reboot.txt
rename to Documentation/devicetree/bindings/powerpc/4xx/reboot.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/devicetree/bindings/powerpc/fsl/board.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/board.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/board.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/diu.txt b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/diu.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/diu.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/dma.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/dma.txt
diff --git a/Documentation/powerpc/dts-bindings/ecm.txt b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/ecm.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/gtm.txt b/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/gtm.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/gtm.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/guts.txt b/Documentation/devicetree/bindings/powerpc/fsl/guts.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/guts.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/guts.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/lbc.txt b/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/lbc.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mcm.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mcm.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpic.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mpic.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/msi-pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/msi-pic.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/pmc.txt b/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/pmc.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/sec.txt b/Documentation/devicetree/bindings/powerpc/fsl/sec.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/sec.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/sec.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/ssi.txt b/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/ssi.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/ssi.txt
diff --git a/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt b/Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/nintendo/gamecube.txt
rename to Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt
diff --git a/Documentation/powerpc/dts-bindings/nintendo/wii.txt b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/nintendo/wii.txt
rename to Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/spi.txt
rename to Documentation/devicetree/bindings/spi/fsl-spi.txt
diff --git a/Documentation/powerpc/dts-bindings/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/spi-bus.txt
rename to Documentation/devicetree/bindings/spi/spi-bus.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/usb.txt b/Documentation/devicetree/bindings/usb/fsl-usb.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/usb.txt
rename to Documentation/devicetree/bindings/usb/fsl-usb.txt
diff --git a/Documentation/powerpc/dts-bindings/usb-ehci.txt b/Documentation/devicetree/bindings/usb/usb-ehci.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/usb-ehci.txt
rename to Documentation/devicetree/bindings/usb/usb-ehci.txt
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/xilinx.txt
rename to Documentation/devicetree/bindings/xilinx.txt
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
similarity index 90%
rename from Documentation/powerpc/booting-without-of.txt
rename to Documentation/devicetree/booting-without-of.txt
index 302db5d..28b1c9d 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -13,7 +13,6 @@
 
   I - Introduction
     1) Entry point for arch/powerpc
-    2) Board support
 
   II - The DT block format
     1) Header
@@ -41,13 +40,6 @@
   VI - System-on-a-chip devices and nodes
     1) Defining child nodes of an SOC
     2) Representing devices without a current OF specification
-      a) PHY nodes
-      b) Interrupt controllers
-      c) 4xx/Axon EMAC ethernet nodes
-      d) Xilinx IP cores
-      e) USB EHCI controllers
-      f) MDIO on GPIOs
-      g) SPI busses
 
   VII - Specifying interrupt information for devices
     1) interrupts property
@@ -123,7 +115,7 @@
 I - Introduction
 ================
 
-During the recent development of the Linux/ppc64 kernel, and more
+During the development of the Linux/ppc64 kernel, and more
 specifically, the addition of new platform types outside of the old
 IBM pSeries/iSeries pair, it was decided to enforce some strict rules
 regarding the kernel entry and bootloader <-> kernel interfaces, in
@@ -131,7 +123,7 @@
 point and the way a new platform should be added to the kernel. The
 legacy iSeries platform breaks those rules as it predates this scheme,
 but no new board support will be accepted in the main tree that
-doesn't follows them properly.  In addition, since the advent of the
+doesn't follow them properly.  In addition, since the advent of the
 arch/powerpc merged architecture for ppc32 and ppc64, new 32-bit
 platforms and 32-bit platforms which move into arch/powerpc will be
 required to use these rules as well.
@@ -146,7 +138,7 @@
 create a node for every PCI device in the system. It is a requirement
 to have a node for PCI host bridges in order to provide interrupt
 routing informations and memory/IO ranges, among others. It is also
-recommended to define nodes for on chip devices and other busses that
+recommended to define nodes for on chip devices and other buses that
 don't specifically fit in an existing OF specification. This creates a
 great flexibility in the way the kernel can then probe those and match
 drivers to device, without having to hard code all sorts of tables. It
@@ -158,7 +150,7 @@
 1) Entry point for arch/powerpc
 -------------------------------
 
-   There is one and one single entry point to the kernel, at the start
+   There is one single entry point to the kernel, at the start
    of the kernel image. That entry point supports two calling
    conventions:
 
@@ -210,12 +202,6 @@
         with all CPUs. The way to do that with method b) will be
         described in a later revision of this document.
 
-
-2) Board support
-----------------
-
-64-bit kernels:
-
    Board supports (platforms) are not exclusive config options. An
    arbitrary set of board supports can be built in a single kernel
    image. The kernel will "know" what set of functions to use for a
@@ -234,48 +220,11 @@
         containing the various callbacks that the generic code will
         use to get to your platform specific code
 
-        c) Add a reference to your "ppc_md" structure in the
-        "machines" table in arch/powerpc/kernel/setup_64.c if you are
-        a 64-bit platform.
-
-        d) request and get assigned a platform number (see PLATFORM_*
-        constants in arch/powerpc/include/asm/processor.h
-
-32-bit embedded kernels:
-
-  Currently, board support is essentially an exclusive config option.
-  The kernel is configured for a single platform.  Part of the reason
-  for this is to keep kernels on embedded systems small and efficient;
-  part of this is due to the fact the code is already that way. In the
-  future, a kernel may support multiple platforms, but only if the
+  A kernel image may support multiple platforms, but only if the
   platforms feature the same core architecture.  A single kernel build
   cannot support both configurations with Book E and configurations
   with classic Powerpc architectures.
 
-  32-bit embedded platforms that are moved into arch/powerpc using a
-  flattened device tree should adopt the merged tree practice of
-  setting ppc_md up dynamically, even though the kernel is currently
-  built with support for only a single platform at a time.  This allows
-  unification of the setup code, and will make it easier to go to a
-  multiple-platform-support model in the future.
-
-NOTE: I believe the above will be true once Ben's done with the merge
-of the boot sequences.... someone speak up if this is wrong!
-
-  To add a 32-bit embedded platform support, follow the instructions
-  for 64-bit platforms above, with the exception that the Kconfig
-  option should be set up such that the kernel builds exclusively for
-  the platform selected.  The processor type for the platform should
-  enable another config option to select the specific board
-  supported.
-
-NOTE: If Ben doesn't merge the setup files, may need to change this to
-point to setup_32.c
-
-
-   I will describe later the boot process and various callbacks that
-   your platform should implement.
-
 
 II - The DT block format
 ========================
@@ -300,8 +249,8 @@
 1) Header
 ---------
 
-   The kernel is entered with r3 pointing to an area of memory that is
-   roughly described in arch/powerpc/include/asm/prom.h by the structure
+   The kernel is passed the physical address pointing to an area of memory
+   that is roughly described in include/linux/of_fdt.h by the structure
    boot_param_header:
 
 struct boot_param_header {
@@ -339,7 +288,7 @@
    All values in this header are in big endian format, the various
    fields in this header are defined more precisely below. All
    "offset" values are in bytes from the start of the header; that is
-   from the value of r3.
+   from the physical base address of the device tree block.
 
    - magic
 
@@ -437,7 +386,7 @@
 
 
              ------------------------------
-       r3 -> |  struct boot_param_header  |
+     base -> |  struct boot_param_header  |
              ------------------------------
              |      (alignment gap) (*)   |
              ------------------------------
@@ -457,7 +406,7 @@
       -----> ------------------------------
       |
       |
-      --- (r3 + totalsize)
+      --- (base + totalsize)
 
   (*) The alignment gaps are not necessarily present; their presence
       and size are dependent on the various alignment requirements of
@@ -500,7 +449,7 @@
 the device-tree. More details about the actual format of these will be
 below.
 
-The kernel powerpc generic code does not make any formal use of the
+The kernel generic code does not make any formal use of the
 unit address (though some board support code may do) so the only real
 requirement here for the unit address is to ensure uniqueness of
 the node unit name at a given level of the tree. Nodes with no notion
@@ -518,20 +467,21 @@
 
 Every node which actually represents an actual device (that is, a node
 which isn't only a virtual "container" for more nodes, like "/cpus"
-is) is also required to have a "device_type" property indicating the
-type of node .
+is) is also required to have a "compatible" property indicating the
+specific hardware and an optional list of devices it is fully
+backwards compatible with.
 
 Finally, every node that can be referenced from a property in another
-node is required to have a "linux,phandle" property. Real open
-firmware implementations provide a unique "phandle" value for every
-node that the "prom_init()" trampoline code turns into
-"linux,phandle" properties. However, this is made optional if the
-flattened device tree is used directly. An example of a node
+node is required to have either a "phandle" or a "linux,phandle"
+property. Real Open Firmware implementations provide a unique
+"phandle" value for every node that the "prom_init()" trampoline code
+turns into "linux,phandle" properties. However, this is made optional
+if the flattened device tree is used directly. An example of a node
 referencing another node via "phandle" is when laying out the
 interrupt tree which will be described in a further version of this
 document.
 
-This "linux, phandle" property is a 32-bit value that uniquely
+The "phandle" property is a 32-bit value that uniquely
 identifies a node. You are free to use whatever values or system of
 values, internal pointers, or whatever to generate these, the only
 requirement is that every node for which you provide that property has
@@ -694,7 +644,7 @@
 while the top cell contains address space indication, flags, and pci
 bus & device numbers.
 
-For busses that support dynamic allocation, it's the accepted practice
+For buses that support dynamic allocation, it's the accepted practice
 to then not provide the address in "reg" (keep it 0) though while
 providing a flag indicating the address is dynamically allocated, and
 then, to provide a separate "assigned-addresses" property that
@@ -711,7 +661,7 @@
 The "reg" property only defines addresses and sizes (if #size-cells is
 non-0) within a given bus. In order to translate addresses upward
 (that is into parent bus addresses, and possibly into CPU physical
-addresses), all busses must contain a "ranges" property. If the
+addresses), all buses must contain a "ranges" property. If the
 "ranges" property is missing at a given level, it's assumed that
 translation isn't possible, i.e., the registers are not visible on the
 parent bus.  The format of the "ranges" property for a bus is a list
@@ -727,9 +677,9 @@
 PCI<->ISA bridge, that would be a PCI address. It defines the base
 address in the parent bus where the beginning of that range is mapped.
 
-For a new 64-bit powerpc board, I recommend either the 2/2 format or
+For new 64-bit board support, I recommend either the 2/2 format or
 Apple's 2/1 format which is slightly more compact since sizes usually
-fit in a single 32-bit word.   New 32-bit powerpc boards should use a
+fit in a single 32-bit word.   New 32-bit board support should use a
 1/1 format, unless the processor supports physical addresses greater
 than 32-bits, in which case a 2/1 format is recommended.
 
@@ -754,7 +704,7 @@
 While earlier users of Open Firmware like OldWorld macintoshes tended
 to use the actual device name for the "name" property, it's nowadays
 considered a good practice to use a name that is closer to the device
-class (often equal to device_type). For example, nowadays, ethernet
+class (often equal to device_type). For example, nowadays, Ethernet
 controllers are named "ethernet", an additional "model" property
 defining precisely the chip type/model, and "compatible" property
 defining the family in case a single driver can driver more than one
@@ -772,7 +722,7 @@
 4) Note about node and property names and character set
 -------------------------------------------------------
 
-While open firmware provides more flexible usage of 8859-1, this
+While Open Firmware provides more flexible usage of 8859-1, this
 specification enforces more strict rules. Nodes and properties should
 be comprised only of ASCII characters 'a' to 'z', '0' to
 '9', ',', '.', '_', '+', '#', '?', and '-'. Node names additionally
@@ -792,7 +742,7 @@
 --------------------------------
   These are all that are currently required. However, it is strongly
   recommended that you expose PCI host bridges as documented in the
-  PCI binding to open firmware, and your interrupt tree as documented
+  PCI binding to Open Firmware, and your interrupt tree as documented
   in OF interrupt tree specification.
 
   a) The root node
@@ -802,20 +752,12 @@
     - model : this is your board name/model
     - #address-cells : address representation for "root" devices
     - #size-cells: the size representation for "root" devices
-    - device_type : This property shouldn't be necessary. However, if
-      you decide to create a device_type for your root node, make sure it
-      is _not_ "chrp" unless your platform is a pSeries or PAPR compliant
-      one for 64-bit, or a CHRP-type machine for 32-bit as this will
-      matched by the kernel this way.
-
-  Additionally, some recommended properties are:
-
     - compatible : the board "family" generally finds its way here,
       for example, if you have 2 board models with a similar layout,
       that typically get driven by the same platform code in the
-      kernel, you would use a different "model" property but put a
-      value in "compatible". The kernel doesn't directly use that
-      value but it is generally useful.
+      kernel, you would specify the exact board model in the
+      compatible property followed by an entry that represents the SoC
+      model.
 
   The root node is also generally where you add additional properties
   specific to your board like the serial number if any, that sort of
@@ -841,8 +783,11 @@
 
   So under /cpus, you are supposed to create a node for every CPU on
   the machine. There is no specific restriction on the name of the
-  CPU, though It's common practice to call it PowerPC,<name>. For
+  CPU, though it's common to call it <architecture>,<core>. For
   example, Apple uses PowerPC,G5 while IBM uses PowerPC,970FX.
+  However, the Generic Names convention suggests that it would be
+  better to simply use 'cpu' for each cpu node and use the compatible
+  property to identify the specific cpu core.
 
   Required properties:
 
@@ -923,7 +868,7 @@
 
   e) The /chosen node
 
-  This node is a bit "special". Normally, that's where open firmware
+  This node is a bit "special". Normally, that's where Open Firmware
   puts some variable environment information, like the arguments, or
   the default input/output devices.
 
@@ -940,11 +885,7 @@
       console device if any. Typically, if you have serial devices on
       your board, you may want to put the full path to the one set as
       the default console in the firmware here, for the kernel to pick
-      it up as its own default console. If you look at the function
-      set_preferred_console() in arch/ppc64/kernel/setup.c, you'll see
-      that the kernel tries to find out the default console and has
-      knowledge of various types like 8250 serial ports. You may want
-      to extend this function to add your own.
+      it up as its own default console.
 
   Note that u-boot creates and fills in the chosen node for platforms
   that use it.
@@ -955,23 +896,23 @@
 
   f) the /soc<SOCname> node
 
-  This node is used to represent a system-on-a-chip (SOC) and must be
-  present if the processor is a SOC. The top-level soc node contains
-  information that is global to all devices on the SOC. The node name
-  should contain a unit address for the SOC, which is the base address
-  of the memory-mapped register set for the SOC. The name of an soc
+  This node is used to represent a system-on-a-chip (SoC) and must be
+  present if the processor is a SoC. The top-level soc node contains
+  information that is global to all devices on the SoC. The node name
+  should contain a unit address for the SoC, which is the base address
+  of the memory-mapped register set for the SoC. The name of an SoC
   node should start with "soc", and the remainder of the name should
   represent the part number for the soc.  For example, the MPC8540's
   soc node would be called "soc8540".
 
   Required properties:
 
-    - device_type : Should be "soc"
     - ranges : Should be defined as specified in 1) to describe the
-      translation of SOC addresses for memory mapped SOC registers.
-    - bus-frequency: Contains the bus frequency for the SOC node.
+      translation of SoC addresses for memory mapped SoC registers.
+    - bus-frequency: Contains the bus frequency for the SoC node.
       Typically, the value of this field is filled in by the boot
       loader.
+    - compatible : Exact model of the SoC
 
 
   Recommended properties:
@@ -1025,7 +966,7 @@
 
 WARNING: This version is still in early development stage; the
 resulting device-tree "blobs" have not yet been validated with the
-kernel. The current generated bloc lacks a useful reserve map (it will
+kernel. The current generated block lacks a useful reserve map (it will
 be fixed to generate an empty one, it's up to the bootloader to fill
 it up) among others. The error handling needs work, bugs are lurking,
 etc...
@@ -1098,7 +1039,7 @@
                                  * an arbitrary array of bytes
                                  */
 
-  childnode@addresss {	/* define a child node named "childnode"
+  childnode@address {	/* define a child node named "childnode"
                                  * whose unit name is "childnode at
 				 * address"
                                  */
@@ -1155,12 +1096,13 @@
 
   - An example of code for iterating nodes & retrieving properties
     directly from the flattened tree format can be found in the kernel
-    file arch/ppc64/kernel/prom.c, look at scan_flat_dt() function,
+    file drivers/of/fdt.c.  Look at the of_scan_flat_dt() function,
     its usage in early_init_devtree(), and the corresponding various
     early_init_dt_scan_*() callbacks. That code can be re-used in a
     GPL bootloader, and as the author of that code, I would be happy
     to discuss possible free licensing to any vendor who wishes to
     integrate all or part of this code into a non-GPL bootloader.
+    (reference needed; who is 'I' here? ---gcl Jan 31, 2011)
 
 
 
@@ -1203,18 +1145,19 @@
 2) Representing devices without a current OF specification
 ----------------------------------------------------------
 
-Currently, there are many devices on SOCs that do not have a standard
-representation pre-defined as part of the open firmware
-specifications, mainly because the boards that contain these SOCs are
-not currently booted using open firmware.   This section contains
-descriptions for the SOC devices for which new nodes have been
-defined; this list will expand as more and more SOC-containing
-platforms are moved over to use the flattened-device-tree model.
+Currently, there are many devices on SoCs that do not have a standard
+representation defined as part of the Open Firmware specifications,
+mainly because the boards that contain these SoCs are not currently
+booted using Open Firmware.  Binding documentation for new devices
+should be added to the Documentation/devicetree/bindings directory.
+That directory will expand as device tree support is added to more and
+more SoCs.
+
 
 VII - Specifying interrupt information for devices
 ===================================================
 
-The device tree represents the busses and devices of a hardware
+The device tree represents the buses and devices of a hardware
 system in a form similar to the physical bus topology of the
 hardware.
 
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 945ff3f..a0b58e2 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -104,6 +104,13 @@
 As an added bonus you can customise the message creation toolbar menu
 and put the "insert file" icon there.
 
+Make the the composer window wide enough so that no lines wrap. As of
+KMail 1.13.5 (KDE 4.5.4), KMail will apply word wrapping when sending
+the email if the lines wrap in the composer window. Having word wrapping
+disabled in the Options menu isn't enough. Thus, if your patch has very
+long lines, you must make the composer window very wide before sending
+the email. See: https://bugs.kde.org/show_bug.cgi?id=174034
+
 You can safely GPG sign attachments, but inlined text is preferred for
 patches so do not GPG sign them.  Signing patches that have been inserted
 as inlined text will make them tricky to extract from their 7-bit encoding.
@@ -179,26 +186,8 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Thunderbird (GUI)
 
-By default, thunderbird likes to mangle text, but there are ways to
-coerce it into being nice.
-
-- Under account settings, composition and addressing, uncheck "Compose
-  messages in HTML format".
-
-- Edit your Thunderbird config settings to tell it not to wrap lines:
-      user_pref("mailnews.wraplength", 0);
-
-- Edit your Thunderbird config settings so that it won't use format=flowed:
-      user_pref("mailnews.send_plaintext_flowed", false);
-
-- You need to get Thunderbird into preformat mode:
-. If you compose HTML messages by default, it's not too hard. Just select
-  "Preformat" from the drop-down box just under the subject line.
-. If you compose in text by default, you have to tell it to compose a new
-  message in HTML (just as a one-off), and then force it from there back to
-  text, else it will wrap lines. To do this, use shift-click on the Write
-  icon to compose to get HTML compose mode, then select "Preformat" from
-  the drop-down box just under the subject line.
+Thunderbird is an Outlook clone that likes to mangle text, but there are ways
+to coerce it into behaving.
 
 - Allows use of an external editor:
   The easiest thing to do with Thunderbird and patches is to use an
@@ -208,6 +197,27 @@
   View->Toolbars->Customize... and finally just click on it when in the
   Compose dialog.
 
+To beat some sense out of the internal editor, do this:
+
+- Under account settings, composition and addressing, uncheck "Compose
+  messages in HTML format".
+
+- Edit your Thunderbird config settings so that it won't use format=flowed.
+  Go to "edit->preferences->advanced->config editor" to bring up the
+  thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
+  "false".
+
+- Enable "preformat" mode: Shft-click on the Write icon to bring up the HTML
+  composer, select "Preformat" from the drop-down box just under the subject
+  line, then close the message without saving.  (This setting also applies to
+  the text composer, but the only control for it is in the HTML composer.)
+
+- Install the "toggle wordwrap" extension.  Download the file from:
+    https://addons.mozilla.org/thunderbird/addon/2351/
+  Then go to "tools->add ons", select "install" at the bottom of the screen,
+  and browse to where you saved the .xul file.  This adds an "Enable
+  Wordwrap" entry under the Options menu of the message composer.
+
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 TkRat (GUI)
 
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 22f1081..b3f35e5 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -193,6 +193,20 @@
 
 ---------------------------
 
+What:	CS5535/CS5536 obsolete GPIO driver
+When:	June 2011
+Files:	drivers/staging/cs5535_gpio/*
+Check:	drivers/staging/cs5535_gpio/cs5535_gpio.c
+Why:	A newer driver replaces this; it is drivers/gpio/cs5535-gpio.c, and
+	integrates with the Linux GPIO subsystem.  The old driver has been
+	moved to staging, and will be removed altogether around 2.6.40.
+	Please test the new driver, and ensure that the functionality you
+	need and any bugfixes from the old driver are available in the new
+	one.
+Who:	Andres Salomon <dilinger@queued.net>
+
+--------------------------
+
 What:	remove EXPORT_SYMBOL(kernel_thread)
 When:	August 2006
 Files:	arch/*/kernel/*_ksyms.c
@@ -234,6 +248,17 @@
 
 ---------------------------
 
+What:	CONFIG_ACPI_PROCFS_POWER
+When:	2.6.39
+Why:	sysfs I/F for ACPI power devices, including AC and Battery,
+        has been working in upstream kenrel since 2.6.24, Sep 2007.
+	In 2.6.37, we make the sysfs I/F always built in and this option
+	disabled by default.
+	Remove this option and the ACPI power procfs interface in 2.6.39.
+Who:	Zhang Rui <rui.zhang@intel.com>
+
+---------------------------
+
 What:	/proc/acpi/button
 When:	August 2007
 Why:	/proc/acpi/button has been replaced by events to the input layer
@@ -332,14 +357,6 @@
 
 -----------------------------
 
-What:	__do_IRQ all in one fits nothing interrupt handler
-When:	2.6.32
-Why:	__do_IRQ was kept for easy migration to the type flow handlers.
-	More than two years of migration time is enough.
-Who:	Thomas Gleixner <tglx@linutronix.de>
-
------------------------------
-
 What:	fakephp and associated sysfs files in /sys/bus/pci/slots/
 When:	2011
 Why:	In 2.6.27, the semantics of /sys/bus/pci/slots was redefined to
@@ -576,3 +593,29 @@
 Who:	Tejun Heo <tj@kernel.org>
 
 ----------------------------
+
+What:	Legacy, non-standard chassis intrusion detection interface.
+When:	June 2011
+Why:	The adm9240, w83792d and w83793 hardware monitoring drivers have
+	legacy interfaces for chassis intrusion detection. A standard
+	interface has been added to each driver, so the legacy interface
+	can be removed.
+Who:	Jean Delvare <khali@linux-fr.org>
+
+----------------------------
+
+What:	noswapaccount kernel command line parameter
+When:	2.6.40
+Why:	The original implementation of memsw feature enabled by
+	CONFIG_CGROUP_MEM_RES_CTLR_SWAP could be disabled by the noswapaccount
+	kernel parameter (introduced in 2.6.29-rc1). Later on, this decision
+	turned out to be not ideal because we cannot have the feature compiled
+	in and disabled by default and let only interested to enable it
+	(e.g. general distribution kernels might need it). Therefore we have
+	added swapaccount[=0|1] parameter (introduced in 2.6.37) which provides
+	the both possibilities. If we remove noswapaccount we will have
+	less command line parameters with the same functionality and we
+	can also cleanup the parameter handling a bit ().
+Who:	Michal Hocko <mhocko@suse.cz>
+
+----------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 977d891..4471a41 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -19,6 +19,8 @@
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
+	struct vfsmount *(*d_automount)(struct path *path);
+	int (*d_manage)(struct dentry *, bool);
 
 locking rules:
 		rename_lock	->d_lock	may block	rcu-walk
@@ -29,6 +31,8 @@
 d_release:	no		no		yes		no
 d_iput:		no		no		yes		no
 d_dname:	no		no		no		no
+d_automount:	no		no		yes		no
+d_manage:	no		no		yes (ref-walk)	maybe
 
 --------------------------- inode_operations --------------------------- 
 prototypes:
@@ -56,7 +60,6 @@
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
 	void (*truncate_range)(struct inode *, loff_t, loff_t);
-	long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
 
 locking rules:
@@ -84,7 +87,6 @@
 listxattr:	no
 removexattr:	yes
 truncate_range:	yes
-fallocate:	no
 fiemap:		no
 	Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 victim.
@@ -343,7 +345,6 @@
 	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_break)(struct file_lock *); /* break_lease callback */
-	int (*fl_mylease)(struct file_lock *, struct file_lock *);
 	int (*fl_change)(struct file_lock **, int);
 
 locking rules:
@@ -353,7 +354,6 @@
 fl_grant:		no		no
 fl_release_private:	maybe		no
 fl_break:		yes		no
-fl_mylease:		yes		no
 fl_change		yes		no
 
 --------------------------- buffer_head -----------------------------------
@@ -435,6 +435,7 @@
 	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
 			size_t, unsigned int);
 	int (*setlease)(struct file *, long, struct file_lock **);
+	long (*fallocate)(struct file *, int, loff_t, loff_t);
 };
 
 locking rules:
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index ac2a261..933bc66 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -457,6 +457,11 @@
 
 Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
 
+2.1.30:
+	- Fix writev() (it kept writing the first segment over and over again
+	  instead of moving onto subsequent segments).
+	- Fix crash in ntfs_mft_record_alloc() when mapping the new extent mft
+	  record failed.
 2.1.29:
 	- Fix a deadlock when mounting read-write.
 2.1.28:
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 07a32b4..dfbcd1b 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -365,8 +365,8 @@
 [recommended]
 	vfs now tries to do path walking in "rcu-walk mode", which avoids
 atomic operations and scalability hazards on dentries and inodes (see
-Documentation/filesystems/path-walk.txt). d_hash and d_compare changes (above)
-are examples of the changes required to support this. For more complex
+Documentation/filesystems/path-lookup.txt). d_hash and d_compare changes
+(above) are examples of the changes required to support this. For more complex
 filesystem callbacks, the vfs drops out of rcu-walk mode before the fs call, so
 no changes are required to the filesystem. However, this is costly and loses
 the benefits of rcu-walk mode. We will begin to add filesystem callbacks that
@@ -383,5 +383,14 @@
 
 	permission and check_acl are inode permission checks that are called
 on many or all directory inodes on the way down a path walk (to check for
-exec permission). These must now be rcu-walk aware (flags & IPERM_RCU). See
-Documentation/filesystems/vfs.txt for more details.
+exec permission). These must now be rcu-walk aware (flags & IPERM_FLAG_RCU).
+See Documentation/filesystems/vfs.txt for more details.
+ 
+--
+[mandatory]
+	In ->fallocate() you must check the mode option passed in.  If your
+filesystem does not support hole punching (deallocating space in the middle of a
+file) you must return -EOPNOTSUPP if FALLOC_FL_PUNCH_HOLE is set in mode.
+Currently you can only have FALLOC_FL_PUNCH_HOLE with FALLOC_FL_KEEP_SIZE set,
+so the i_size should not change when hole punching, even when puching the end of
+a file off.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 9471225..23cae65 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -375,6 +375,7 @@
 Swap:                  0 kB
 KernelPageSize:        4 kB
 MMUPageSize:           4 kB
+Locked:              374 kB
 
 The first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
@@ -670,6 +671,8 @@
 
 > cat /proc/meminfo
 
+The "Locked" indicates whether the mapping is locked in memory or not.
+
 
 MemTotal:     16344972 kB
 MemFree:      13634064 kB
@@ -1320,6 +1323,10 @@
 Writing to /proc/<pid>/oom_score_adj or /proc/<pid>/oom_adj will change the
 other with its scaled value.
 
+The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last
+value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
+requires CAP_SYS_RESOURCE.
+
 NOTICE: /proc/<pid>/oom_adj is deprecated and will be removed, please see
 Documentation/feature-removal-schedule.txt.
 
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index fbb324e..94cf97b 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -415,8 +415,8 @@
   permission: called by the VFS to check for access rights on a POSIX-like
   	filesystem.
 
-	May be called in rcu-walk mode (flags & IPERM_RCU). If in rcu-walk
-	mode, the filesystem must check the permission without blocking or
+	May be called in rcu-walk mode (flags & IPERM_FLAG_RCU). If in rcu-walk
+        mode, the filesystem must check the permission without blocking or
 	storing to the inode.
 
 	If a situation is encountered that rcu-walk cannot handle, return
@@ -864,6 +864,8 @@
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)(struct dentry *, char *, int);
+	struct vfsmount *(*d_automount)(struct path *);
+	int (*d_manage)(struct dentry *, bool, bool);
 };
 
   d_revalidate: called when the VFS needs to revalidate a dentry. This
@@ -930,6 +932,47 @@
 	at the end of the buffer, and returns a pointer to the first char.
 	dynamic_dname() helper function is provided to take care of this.
 
+  d_automount: called when an automount dentry is to be traversed (optional).
+	This should create a new VFS mount record and return the record to the
+	caller.  The caller is supplied with a path parameter giving the
+	automount directory to describe the automount target and the parent
+	VFS mount record to provide inheritable mount parameters.  NULL should
+	be returned if someone else managed to make the automount first.  If
+	the vfsmount creation failed, then an error code should be returned.
+	If -EISDIR is returned, then the directory will be treated as an
+	ordinary directory and returned to pathwalk to continue walking.
+
+	If a vfsmount is returned, the caller will attempt to mount it on the
+	mountpoint and will remove the vfsmount from its expiration list in
+	the case of failure.  The vfsmount should be returned with 2 refs on
+	it to prevent automatic expiration - the caller will clean up the
+	additional ref.
+
+	This function is only used if DCACHE_NEED_AUTOMOUNT is set on the
+	dentry.  This is set by __d_instantiate() if S_AUTOMOUNT is set on the
+	inode being added.
+
+  d_manage: called to allow the filesystem to manage the transition from a
+	dentry (optional).  This allows autofs, for example, to hold up clients
+	waiting to explore behind a 'mountpoint' whilst letting the daemon go
+	past and construct the subtree there.  0 should be returned to let the
+	calling process continue.  -EISDIR can be returned to tell pathwalk to
+	use this directory as an ordinary directory and to ignore anything
+	mounted on it and not to check the automount flag.  Any other error
+	code will abort pathwalk completely.
+
+	If the 'mounting_here' parameter is true, then namespace_sem is being
+	held by the caller and the function should not initiate any mounts or
+	unmounts that it will then wait for.
+
+	If the 'rcu_walk' parameter is true, then the caller is doing a
+	pathwalk in RCU-walk mode.  Sleeping is not permitted in this mode,
+	and the caller can be asked to leave it and call again by returing
+	-ECHILD.
+
+	This function is only used if DCACHE_MANAGE_TRANSIT is set on the
+	dentry being transited from.
+
 Example :
 
 static char *pipefs_dname(struct dentry *dent, char *buffer, int buflen)
diff --git a/Documentation/hwmon/adm9240 b/Documentation/hwmon/adm9240
index 2c6f1fe..36e8ec6 100644
--- a/Documentation/hwmon/adm9240
+++ b/Documentation/hwmon/adm9240
@@ -155,7 +155,7 @@
 The ADM9240 provides an internal open drain on this line, and may output
 a 20 ms active low pulse to reset an external Chassis Intrusion latch.
 
-Clear the CI latch by writing value 1 to the sysfs chassis_clear file.
+Clear the CI latch by writing value 0 to the sysfs intrusion0_alarm file.
 
 Alarm flags reported as 16-bit word
 
diff --git a/Documentation/hwmon/ads7828 b/Documentation/hwmon/ads7828
index 75bc4be..2bbebe6 100644
--- a/Documentation/hwmon/ads7828
+++ b/Documentation/hwmon/ads7828
@@ -9,7 +9,7 @@
                http://focus.ti.com/lit/ds/symlink/ads7828.pdf
 
 Authors:
-        Steve Hardy <steve@linuxrealtime.co.uk>
+        Steve Hardy <shardy@redhat.com>
 
 Module Parameters
 -----------------
diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737
index fc5df76..4d29351 100644
--- a/Documentation/hwmon/dme1737
+++ b/Documentation/hwmon/dme1737
@@ -42,7 +42,7 @@
 This driver implements support for the hardware monitoring capabilities of the
 SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, SCH311x,
 and SCH5127 Super-I/O chips. These chips feature monitoring of 3 temp sensors
-temp[1-3] (2 remote diodes and 1 internal), 7 voltages in[0-6] (6 external and
+temp[1-3] (2 remote diodes and 1 internal), 8 voltages in[0-7] (7 external and
 1 internal) and up to 6 fan speeds fan[1-6]. Additionally, the chips implement
 up to 5 PWM outputs pwm[1-3,5-6] for controlling fan speeds both manually and
 automatically.
@@ -105,6 +105,7 @@
 	in4: V1_IN				0V - 1.5V
 	in5: VTR	(+3.3V standby)		0V - 4.38V
 	in6: Vbat	(+3.0V)			0V - 4.38V
+	in7: Vtrip	(+1.5V)			0V - 1.99V
 
 Each voltage input has associated min and max limits which trigger an alarm
 when crossed.
@@ -217,10 +218,10 @@
 vrm				RW	Voltage regulator module version
 					number.
 
-in[0-6]_input			RO	Measured voltage in millivolts.
-in[0-6]_min			RW	Low limit for voltage input.
-in[0-6]_max			RW	High limit for voltage input.
-in[0-6]_alarm			RO	Voltage input alarm. Returns 1 if
+in[0-7]_input			RO	Measured voltage in millivolts.
+in[0-7]_min			RW	Low limit for voltage input.
+in[0-7]_max			RW	High limit for voltage input.
+in[0-7]_alarm			RO	Voltage input alarm. Returns 1 if
 					voltage input is or went outside the
 					associated min-max range, 0 otherwise.
 
@@ -324,3 +325,4 @@
 pwm5			opt		opt
 fan6			opt		opt
 pwm6			opt		opt
+in7						yes
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42
index 0e76ef1..a22ecf4 100644
--- a/Documentation/hwmon/jc42
+++ b/Documentation/hwmon/jc42
@@ -51,7 +51,8 @@
   * JEDEC JC 42.4 compliant temperature sensor chips
     Prefix: 'jc42'
     Addresses scanned: I2C 0x18 - 0x1f
-    Datasheet: -
+    Datasheet:
+	http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
 
 Author:
 	Guenter Roeck <guenter.roeck@ericsson.com>
@@ -60,7 +61,11 @@
 Description
 -----------
 
-This driver implements support for JEDEC JC 42.4 compliant temperature sensors.
+This driver implements support for JEDEC JC 42.4 compliant temperature sensors,
+which are used on many DDR3 memory modules for mobile devices and servers. Some
+systems use the sensor to prevent memory overheating by automatically throttling
+the memory controller.
+
 The driver auto-detects the chips listed above, but can be manually instantiated
 to support other JC 42.4 compliant chips.
 
@@ -81,15 +86,19 @@
 which applies to all limits. This register can be written by writing into
 temp1_crit_hyst. Other hysteresis attributes are read-only.
 
+If the BIOS has configured the sensor for automatic temperature management, it
+is likely that it has locked the registers, i.e., that the temperature limits
+cannot be changed.
+
 Sysfs entries
 -------------
 
 temp1_input		Temperature (RO)
-temp1_min		Minimum temperature (RW)
-temp1_max		Maximum temperature (RW)
-temp1_crit		Critical high temperature (RW)
+temp1_min		Minimum temperature (RO or RW)
+temp1_max		Maximum temperature (RO or RW)
+temp1_crit		Critical high temperature (RO or RW)
 
-temp1_crit_hyst		Critical hysteresis temperature (RW)
+temp1_crit_hyst		Critical hysteresis temperature (RO or RW)
 temp1_max_hyst		Maximum hysteresis temperature (RO)
 
 temp1_min_alarm		Temperature low alarm
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index 6526eee..d2b56a4 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -9,6 +9,8 @@
   Socket S1G3: Athlon II, Sempron, Turion II
 * AMD Family 11h processors:
   Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
+* AMD Family 12h processors: "Llano"
+* AMD Family 14h processors: "Brazos" (C/E/G-Series)
 
   Prefix: 'k10temp'
   Addresses scanned: PCI space
@@ -17,10 +19,14 @@
     http://support.amd.com/us/Processor_TechDocs/31116.pdf
   BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41256.pdf
+  BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
+    http://support.amd.com/us/Processor_TechDocs/43170.pdf
   Revision Guide for AMD Family 10h Processors:
     http://support.amd.com/us/Processor_TechDocs/41322.pdf
   Revision Guide for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41788.pdf
+  Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
+    http://support.amd.com/us/Processor_TechDocs/47534.pdf
   AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
     http://support.amd.com/us/Processor_TechDocs/43373.pdf
   AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
@@ -34,7 +40,7 @@
 -----------
 
 This driver permits reading of the internal temperature sensor of AMD
-Family 10h and 11h processors.
+Family 10h/11h/12h/14h processors.
 
 All these processors have a sensor, but on those for Socket F or AM2+,
 the sensor may return inconsistent values (erratum 319).  The driver
diff --git a/Documentation/hwmon/lm93 b/Documentation/hwmon/lm93
index 7a10616..f3b2ad2 100644
--- a/Documentation/hwmon/lm93
+++ b/Documentation/hwmon/lm93
@@ -6,6 +6,10 @@
     Prefix 'lm93'
     Addresses scanned: I2C 0x2c-0x2e
     Datasheet: http://www.national.com/ds.cgi/LM/LM93.pdf
+  * National Semiconductor LM94
+    Prefix 'lm94'
+    Addresses scanned: I2C 0x2c-0x2e
+    Datasheet: http://www.national.com/ds.cgi/LM/LM94.pdf
 
 Authors:
 	Mark M. Hoffman <mhoffman@lightlink.com>
@@ -56,6 +60,9 @@
 for dynamic Vccp monitoring and PROCHOT. It is designed to monitor a dual
 processor Xeon class motherboard with a minimum of external components.
 
+LM94 is also supported in LM93 compatible mode. Extra sensors and features of
+LM94 are not supported.
+
 
 User Interface
 --------------
diff --git a/Documentation/hwmon/w83627hf b/Documentation/hwmon/w83627hf
index fb145e5..8432e11 100644
--- a/Documentation/hwmon/w83627hf
+++ b/Documentation/hwmon/w83627hf
@@ -91,3 +91,25 @@
 
 The above sequence assumes a Super-I/O config space at 0x2e/0x2f, but
 0x4e/0x4f is also possible.
+
+Voltage pin mapping
+-------------------
+
+Here is a summary of the voltage pin mapping for the W83627THF. This
+can be useful to convert data provided by board manufacturers into
+working libsensors configuration statements.
+
+    W83627THF		|
+  Pin	| Name		| Register	| Sysfs attribute
+-----------------------------------------------------
+  100	| CPUVCORE	| 20h		| in0
+   99	| VIN0		| 21h		| in1
+   98	| VIN1		| 22h		| in2
+   97	| VIN2		| 24h		| in4
+  114	| AVCC		| 23h		| in3
+   61	| 5VSB		| 50h (bank 5)	| in7
+   74	| VBAT		| 51h (bank 5)	| in8
+
+For other supported devices, you'll have to take the hard path and
+look up the information in the datasheet yourself (and then add it
+to this document please.)
diff --git a/Documentation/hwmon/w83793 b/Documentation/hwmon/w83793
index 51171a8..6cc5f63 100644
--- a/Documentation/hwmon/w83793
+++ b/Documentation/hwmon/w83793
@@ -92,7 +92,7 @@
 
 * Chassis
   If the case open alarm triggers, it will stay in this state unless cleared
-  by any write to the sysfs file "chassis".
+  by writing 0 to the sysfs file "intrusion0_alarm".
 
 * VID and VRM
   The VRM version is detected automatically, don't modify the it unless you
diff --git a/Documentation/input/ff.txt b/Documentation/input/ff.txt
index ded4d5f..b3867bf 100644
--- a/Documentation/input/ff.txt
+++ b/Documentation/input/ff.txt
@@ -49,7 +49,9 @@
 #include <linux/input.h>
 #include <sys/ioctl.h>
 
-unsigned long features[1 + FF_MAX/sizeof(unsigned long)];
+#define BITS_TO_LONGS(x) \
+	(((x) + 8 * sizeof (unsigned long) - 1) / (8 * sizeof (unsigned long)))
+unsigned long features[BITS_TO_LONGS(FF_CNT)];
 int ioctl(int file_descriptor, int request, unsigned long *features);
 
 "request" must be EVIOCGBIT(EV_FF, size of features array in bytes )
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index d6a63c7..ac293e9 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -247,7 +247,7 @@
 'p'	40-7F	linux/nvram.h
 'p'	80-9F	linux/ppdev.h		user-space parport
 					<mailto:tim@cyberelk.net>
-'p'	A1-A4	linux/pps.h		LinuxPPS
+'p'	A1-A5	linux/pps.h		LinuxPPS
 					<mailto:giometti@linux.it>
 'q'	00-1F	linux/serio.h
 'q'	80-FF	linux/telephony.h	Internet PhoneJACK, Internet LineJACK
diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt
index 59a69ec..f6dece5 100644
--- a/Documentation/iostats.txt
+++ b/Documentation/iostats.txt
@@ -81,7 +81,7 @@
     The only field that should go to zero. Incremented as requests are
     given to appropriate struct request_queue and decremented as they finish.
 Field 10 -- # of milliseconds spent doing I/Os
-    This field is increases so long as field 9 is nonzero.
+    This field increases so long as field 9 is nonzero.
 Field 11 -- weighted # of milliseconds spent doing I/Os
     This field is incremented at each I/O start, I/O completion, I/O
     merge, or read of these stats by the number of I/Os in progress
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index cab61d8..7a9e0b4 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -65,18 +65,21 @@
 
 2) Download the kexec-tools user-space package from the following URL:
 
-http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/kexec-tools.tar.gz
+http://kernel.org/pub/linux/utils/kernel/kexec/kexec-tools.tar.gz
 
 This is a symlink to the latest version.
 
 The latest kexec-tools git tree is available at:
 
-git://git.kernel.org/pub/scm/linux/kernel/git/horms/kexec-tools.git
-or
-http://www.kernel.org/git/?p=linux/kernel/git/horms/kexec-tools.git
+git://git.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git
+and
+http://www.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git
+
+There is also a gitweb interface available at
+http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git
 
 More information about kexec-tools can be found at
-http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/README.html
+http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html
 
 3) Unpack the tarball with the tar command, as follows:
 
@@ -439,6 +442,6 @@
 Contact
 =======
 
-Vivek Goyal (vgoyal@in.ibm.com)
+Vivek Goyal (vgoyal@redhat.com)
 Maneesh Soni (maneesh@in.ibm.com)
 
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f3dc951..f4a04c0 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -43,11 +43,11 @@
 	AVR32	AVR32 architecture is enabled.
 	AX25	Appropriate AX.25 support is enabled.
 	BLACKFIN Blackfin architecture is enabled.
+	DRM	Direct Rendering Management support is enabled.
+	DYNAMIC_DEBUG Build in debug messages and enable them at runtime
 	EDD	BIOS Enhanced Disk Drive Services (EDD) is enabled
 	EFI	EFI Partitioning (GPT) is enabled
 	EIDE	EIDE/ATAPI support is enabled.
-	DRM	Direct Rendering Management support is enabled.
-	DYNAMIC_DEBUG Build in debug messages and enable them at runtime
 	FB	The frame buffer device is enabled.
 	GCOV	GCOV profiling is enabled.
 	HW	Appropriate hardware is enabled.
@@ -144,6 +144,11 @@
 and is between 256 and 4096 characters. It is defined in the file
 ./include/asm/setup.h as COMMAND_LINE_SIZE.
 
+Finally, the [KMG] suffix is commonly described after a number of kernel
+parameter values. These 'K', 'M', and 'G' letters represent the _binary_
+multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
+bytes respectively. Such letter suffixes can also be entirely omitted.
+
 
 	acpi=		[HW,ACPI,X86]
 			Advanced Configuration and Power Interface
@@ -199,11 +204,6 @@
 			unusable.  The "log_buf_len" parameter may be useful
 			if you need to capture more output.
 
-	acpi_display_output=	[HW,ACPI]
-			acpi_display_output=vendor
-			acpi_display_output=video
-			See above.
-
 	acpi_irq_balance [HW,ACPI]
 			ACPI will balance active IRQs
 			default in APIC mode
@@ -403,6 +403,10 @@
 	bttv.pll=	See Documentation/video4linux/bttv/Insmod-options
 	bttv.tuner=	and Documentation/video4linux/bttv/CARDLIST
 
+	bulk_remove=off	[PPC]  This parameter disables the use of the pSeries
+			firmware feature for flushing multiple hpte entries
+			at a time.
+
 	c101=		[NET] Moxa C101 synchronous serial card
 
 	cachesize=	[BUGS=X86-32] Override level 2 CPU cache size detection.
@@ -546,16 +550,20 @@
 			Format:
 			<first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
 
-	crashkernel=nn[KMG]@ss[KMG]
-			[KNL] Reserve a chunk of physical memory to
-			hold a kernel to switch to with kexec on panic.
+	crashkernel=size[KMG][@offset[KMG]]
+			[KNL] Using kexec, Linux can switch to a 'crash kernel'
+			upon panic. This parameter reserves the physical
+			memory region [offset, offset + size] for that kernel
+			image. If '@offset' is omitted, then a suitable offset
+			is selected automatically. Check
+			Documentation/kdump/kdump.txt for further details.
 
 	crashkernel=range1:size1[,range2:size2,...][@offset]
 			[KNL] Same as above, but depends on the memory
 			in the running system. The syntax of range is
 			start-[end] where start and end are both
 			a memory unit (amount[KMG]). See also
-			Documentation/kdump/kdump.txt for a example.
+			Documentation/kdump/kdump.txt for an example.
 
 	cs89x0_dma=	[HW,NET]
 			Format: <dma>
@@ -655,11 +663,6 @@
 
 	dscc4.setup=	[NET]
 
-	dynamic_printk	Enables pr_debug()/dev_dbg() calls if
-			CONFIG_DYNAMIC_PRINTK_DEBUG has been enabled.
-			These can also be switched on/off via
-			<debugfs>/dynamic_printk/modules
-
 	earlycon=	[KNL] Output early console device and options.
 		uart[8250],io,<addr>[,options]
 		uart[8250],mmio,<addr>[,options]
@@ -884,6 +887,7 @@
 			     controller
 	i8042.nopnp	[HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
 			     controllers
+	i8042.notimeout	[HW] Ignore timeout condition signalled by conroller
 	i8042.reset	[HW] Reset the controller during init and cleanup
 	i8042.unlock	[HW] Unlock (ignore) the keylock
 
@@ -1267,10 +1271,9 @@
 			6 (KERN_INFO)		informational
 			7 (KERN_DEBUG)		debug-level messages
 
-	log_buf_len=n	Sets the size of the printk ring buffer, in bytes.
-			Format: { n | nk | nM }
-			n must be a power of two.  The default size
-			is set in the kernel config file.
+	log_buf_len=n[KMG]	Sets the size of the printk ring buffer,
+			in bytes.  n must be a power of two.  The default
+			size is set in the kernel config file.
 
 	logo.nologo	[FB] Disables display of the built-in Linux logo.
 			This may be used to provide more screen space for
@@ -1490,6 +1493,10 @@
 	mtdparts=	[MTD]
 			See drivers/mtd/cmdlinepart.c.
 
+	multitce=off	[PPC]  This parameter disables the use of the pSeries
+			firmware feature for updating multiple TCE entries
+			at a time.
+
 	onenand.bdry=	[HW,MTD] Flex-OneNAND Boundary Configuration
 
 			Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
@@ -1701,6 +1708,9 @@
 
 	no-kvmclock	[X86,KVM] Disable paravirtualized KVM clock driver
 
+	no-kvmapf	[X86,KVM] Disable paravirtualized asynchronous page
+			fault handling.
+
 	nolapic		[X86-32,APIC] Do not enable or use the local APIC.
 
 	nolapic_timer	[X86-32,APIC] Do not use the local APIC timer.
diff --git a/Documentation/ko_KR/HOWTO b/Documentation/ko_KR/HOWTO
index e3a55b6..ab5189a 100644
--- a/Documentation/ko_KR/HOWTO
+++ b/Documentation/ko_KR/HOWTO
@@ -391,8 +391,8 @@
 bugme-janitor 메일링 리스트(bugzilla에 모든 변화들이 여기서 메일로 전해진다)
 에 등록하면 된다.
 
-      http://lists.osdl.org/mailman/listinfo/bugme-new
-      http://lists.osdl.org/mailman/listinfo/bugme-janitors
+      https://lists.linux-foundation.org/mailman/listinfo/bugme-new
+      https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
 
 
 
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 741fe66..0cfb00f 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -598,7 +598,7 @@
 a) The instructions in DCR must be relocatable.
 b) The instructions in DCR must not include a call instruction.
 c) JTPR must not be targeted by any jump or call instruction.
-d) DCR must not straddle the border betweeen functions.
+d) DCR must not straddle the border between functions.
 
 Anyway, these limitations are checked by the in-kernel instruction
 decoder, so you don't need to worry about that.
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index b336266..ad85797 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -874,7 +874,7 @@
  - KVM_MP_STATE_HALTED:          the vcpu has executed a HLT instruction and
                                  is waiting for an interrupt
  - KVM_MP_STATE_SIPI_RECEIVED:   the vcpu has just received a SIPI (vector
-                                 accesible via KVM_GET_VCPU_EVENTS)
+                                 accessible via KVM_GET_VCPU_EVENTS)
 
 This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
 irqchip, the multiprocessing state must be maintained by userspace.
@@ -1085,6 +1085,184 @@
 If any additional field gets added to this structure later on, a bit for that
 additional piece of information will be set in the flags bitmap.
 
+4.47 KVM_ASSIGN_PCI_DEVICE
+
+Capability: KVM_CAP_DEVICE_ASSIGNMENT
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_pci_dev (in)
+Returns: 0 on success, -1 on error
+
+Assigns a host PCI device to the VM.
+
+struct kvm_assigned_pci_dev {
+	__u32 assigned_dev_id;
+	__u32 busnr;
+	__u32 devfn;
+	__u32 flags;
+	__u32 segnr;
+	union {
+		__u32 reserved[11];
+	};
+};
+
+The PCI device is specified by the triple segnr, busnr, and devfn.
+Identification in succeeding service requests is done via assigned_dev_id. The
+following flags are specified:
+
+/* Depends on KVM_CAP_IOMMU */
+#define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
+
+4.48 KVM_DEASSIGN_PCI_DEVICE
+
+Capability: KVM_CAP_DEVICE_DEASSIGNMENT
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_pci_dev (in)
+Returns: 0 on success, -1 on error
+
+Ends PCI device assignment, releasing all associated resources.
+
+See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
+used in kvm_assigned_pci_dev to identify the device.
+
+4.49 KVM_ASSIGN_DEV_IRQ
+
+Capability: KVM_CAP_ASSIGN_DEV_IRQ
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_irq (in)
+Returns: 0 on success, -1 on error
+
+Assigns an IRQ to a passed-through device.
+
+struct kvm_assigned_irq {
+	__u32 assigned_dev_id;
+	__u32 host_irq;
+	__u32 guest_irq;
+	__u32 flags;
+	union {
+		struct {
+			__u32 addr_lo;
+			__u32 addr_hi;
+			__u32 data;
+		} guest_msi;
+		__u32 reserved[12];
+	};
+};
+
+The following flags are defined:
+
+#define KVM_DEV_IRQ_HOST_INTX    (1 << 0)
+#define KVM_DEV_IRQ_HOST_MSI     (1 << 1)
+#define KVM_DEV_IRQ_HOST_MSIX    (1 << 2)
+
+#define KVM_DEV_IRQ_GUEST_INTX   (1 << 8)
+#define KVM_DEV_IRQ_GUEST_MSI    (1 << 9)
+#define KVM_DEV_IRQ_GUEST_MSIX   (1 << 10)
+
+It is not valid to specify multiple types per host or guest IRQ. However, the
+IRQ type of host and guest can differ or can even be null.
+
+4.50 KVM_DEASSIGN_DEV_IRQ
+
+Capability: KVM_CAP_ASSIGN_DEV_IRQ
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_irq (in)
+Returns: 0 on success, -1 on error
+
+Ends an IRQ assignment to a passed-through device.
+
+See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
+by assigned_dev_id, flags must correspond to the IRQ type specified on
+KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
+
+4.51 KVM_SET_GSI_ROUTING
+
+Capability: KVM_CAP_IRQ_ROUTING
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_irq_routing (in)
+Returns: 0 on success, -1 on error
+
+Sets the GSI routing table entries, overwriting any previously set entries.
+
+struct kvm_irq_routing {
+	__u32 nr;
+	__u32 flags;
+	struct kvm_irq_routing_entry entries[0];
+};
+
+No flags are specified so far, the corresponding field must be set to zero.
+
+struct kvm_irq_routing_entry {
+	__u32 gsi;
+	__u32 type;
+	__u32 flags;
+	__u32 pad;
+	union {
+		struct kvm_irq_routing_irqchip irqchip;
+		struct kvm_irq_routing_msi msi;
+		__u32 pad[8];
+	} u;
+};
+
+/* gsi routing entry types */
+#define KVM_IRQ_ROUTING_IRQCHIP 1
+#define KVM_IRQ_ROUTING_MSI 2
+
+No flags are specified so far, the corresponding field must be set to zero.
+
+struct kvm_irq_routing_irqchip {
+	__u32 irqchip;
+	__u32 pin;
+};
+
+struct kvm_irq_routing_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 pad;
+};
+
+4.52 KVM_ASSIGN_SET_MSIX_NR
+
+Capability: KVM_CAP_DEVICE_MSIX
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_msix_nr (in)
+Returns: 0 on success, -1 on error
+
+Set the number of MSI-X interrupts for an assigned device. This service can
+only be called once in the lifetime of an assigned device.
+
+struct kvm_assigned_msix_nr {
+	__u32 assigned_dev_id;
+	__u16 entry_nr;
+	__u16 padding;
+};
+
+#define KVM_MAX_MSIX_PER_DEV		256
+
+4.53 KVM_ASSIGN_SET_MSIX_ENTRY
+
+Capability: KVM_CAP_DEVICE_MSIX
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_msix_entry (in)
+Returns: 0 on success, -1 on error
+
+Specifies the routing of an MSI-X assigned device interrupt to a GSI. Setting
+the GSI vector to zero means disabling the interrupt.
+
+struct kvm_assigned_msix_entry {
+	__u32 assigned_dev_id;
+	__u32 gsi;
+	__u16 entry; /* The index of entry in the MSI-X table */
+	__u16 padding[3];
+};
+
 5. The kvm_run structure
 
 Application code obtains a pointer to the kvm_run structure by
diff --git a/Documentation/kvm/cpuid.txt b/Documentation/kvm/cpuid.txt
index 14a12ea..8820685 100644
--- a/Documentation/kvm/cpuid.txt
+++ b/Documentation/kvm/cpuid.txt
@@ -36,6 +36,9 @@
 KVM_FEATURE_CLOCKSOURCE2           ||     3 || kvmclock available at msrs
                                    ||       || 0x4b564d00 and 0x4b564d01
 ------------------------------------------------------------------------------
+KVM_FEATURE_ASYNC_PF               ||     4 || async pf can be enabled by
+                                   ||       || writing to msr 0x4b564d02
+------------------------------------------------------------------------------
 KVM_FEATURE_CLOCKSOURCE_STABLE_BIT ||    24 || host will warn if no guest-side
                                    ||       || per-cpu warps are expected in
                                    ||       || kvmclock.
diff --git a/Documentation/kvm/msr.txt b/Documentation/kvm/msr.txt
index 8ddcfe8..d079aed 100644
--- a/Documentation/kvm/msr.txt
+++ b/Documentation/kvm/msr.txt
@@ -3,7 +3,6 @@
 =====================================================
 
 KVM makes use of some custom MSRs to service some requests.
-At present, this facility is only used by kvmclock.
 
 Custom MSRs have a range reserved for them, that goes from
 0x4b564d00 to 0x4b564dff. There are MSRs outside this area,
@@ -151,3 +150,38 @@
 			return PRESENT;
 		} else
 			return NON_PRESENT;
+
+MSR_KVM_ASYNC_PF_EN: 0x4b564d02
+	data: Bits 63-6 hold 64-byte aligned physical address of a
+	64 byte memory area which must be in guest RAM and must be
+	zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
+	when asynchronous page faults are enabled on the vcpu 0 when
+	disabled. Bit 2 is 1 if asynchronous page faults can be injected
+	when vcpu is in cpl == 0.
+
+	First 4 byte of 64 byte memory location will be written to by
+	the hypervisor at the time of asynchronous page fault (APF)
+	injection to indicate type of asynchronous page fault. Value
+	of 1 means that the page referred to by the page fault is not
+	present. Value 2 means that the page is now available. Disabling
+	interrupt inhibits APFs. Guest must not enable interrupt
+	before the reason is read, or it may be overwritten by another
+	APF. Since APF uses the same exception vector as regular page
+	fault guest must reset the reason to 0 before it does
+	something that can generate normal page fault.  If during page
+	fault APF reason is 0 it means that this is regular page
+	fault.
+
+	During delivery of type 1 APF cr2 contains a token that will
+	be used to notify a guest when missing page becomes
+	available. When page becomes available type 2 APF is sent with
+	cr2 set to the token associated with the page. There is special
+	kind of token 0xffffffff which tells vcpu that it should wake
+	up all processes waiting for APFs and no individual type 2 APFs
+	will be sent.
+
+	If APF is disabled while there are outstanding APFs, they will
+	not be delivered.
+
+	Currently type 2 APF will be always delivered on the same vcpu as
+	type 1 was, but guest should not rely on that.
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index dc73bc54..d9da7e1 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -39,6 +39,9 @@
 #include <limits.h>
 #include <stddef.h>
 #include <signal.h>
+#include <pwd.h>
+#include <grp.h>
+
 #include <linux/virtio_config.h>
 #include <linux/virtio_net.h>
 #include <linux/virtio_blk.h>
@@ -298,20 +301,27 @@
 
 	/*
 	 * We use a private mapping (ie. if we write to the page, it will be
-	 * copied).
+	 * copied). We allocate an extra two pages PROT_NONE to act as guard
+	 * pages against read/write attempts that exceed allocated space.
 	 */
-	addr = mmap(NULL, getpagesize() * num,
-		    PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0);
+	addr = mmap(NULL, getpagesize() * (num+2),
+		    PROT_NONE, MAP_PRIVATE, fd, 0);
+
 	if (addr == MAP_FAILED)
 		err(1, "Mmapping %u pages of /dev/zero", num);
 
+	if (mprotect(addr + getpagesize(), getpagesize() * num,
+		     PROT_READ|PROT_WRITE) == -1)
+		err(1, "mprotect rw %u pages failed", num);
+
 	/*
 	 * One neat mmap feature is that you can close the fd, and it
 	 * stays mapped.
 	 */
 	close(fd);
 
-	return addr;
+	/* Return address after PROT_NONE page */
+	return addr + getpagesize();
 }
 
 /* Get some more pages for a device. */
@@ -343,7 +353,7 @@
 	 * done to it.  This allows us to share untouched memory between
 	 * Guests.
 	 */
-	if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC,
+	if (mmap(addr, len, PROT_READ|PROT_WRITE,
 		 MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED)
 		return;
 
@@ -573,10 +583,10 @@
 			    unsigned int line)
 {
 	/*
-	 * We have to separately check addr and addr+size, because size could
-	 * be huge and addr + size might wrap around.
+	 * Check if the requested address and size exceeds the allocated memory,
+	 * or addr + size wraps around.
 	 */
-	if (addr >= guest_limit || addr + size >= guest_limit)
+	if ((addr + size) > guest_limit || (addr + size) < addr)
 		errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr);
 	/*
 	 * We return a pointer for the caller's convenience, now we know it's
@@ -1872,6 +1882,8 @@
 	{ "block", 1, NULL, 'b' },
 	{ "rng", 0, NULL, 'r' },
 	{ "initrd", 1, NULL, 'i' },
+	{ "username", 1, NULL, 'u' },
+	{ "chroot", 1, NULL, 'c' },
 	{ NULL },
 };
 static void usage(void)
@@ -1894,6 +1906,12 @@
 	/* If they specify an initrd file to load. */
 	const char *initrd_name = NULL;
 
+	/* Password structure for initgroups/setres[gu]id */
+	struct passwd *user_details = NULL;
+
+	/* Directory to chroot to */
+	char *chroot_path = NULL;
+
 	/* Save the args: we "reboot" by execing ourselves again. */
 	main_args = argv;
 
@@ -1950,6 +1968,14 @@
 		case 'i':
 			initrd_name = optarg;
 			break;
+		case 'u':
+			user_details = getpwnam(optarg);
+			if (!user_details)
+				err(1, "getpwnam failed, incorrect username?");
+			break;
+		case 'c':
+			chroot_path = optarg;
+			break;
 		default:
 			warnx("Unknown argument %s", argv[optind]);
 			usage();
@@ -2021,6 +2047,37 @@
 	/* If we exit via err(), this kills all the threads, restores tty. */
 	atexit(cleanup_devices);
 
+	/* If requested, chroot to a directory */
+	if (chroot_path) {
+		if (chroot(chroot_path) != 0)
+			err(1, "chroot(\"%s\") failed", chroot_path);
+
+		if (chdir("/") != 0)
+			err(1, "chdir(\"/\") failed");
+
+		verbose("chroot done\n");
+	}
+
+	/* If requested, drop privileges */
+	if (user_details) {
+		uid_t u;
+		gid_t g;
+
+		u = user_details->pw_uid;
+		g = user_details->pw_gid;
+
+		if (initgroups(user_details->pw_name, g) != 0)
+			err(1, "initgroups failed");
+
+		if (setresgid(g, g, g) != 0)
+			err(1, "setresgid failed");
+
+		if (setresuid(u, u, u) != 0)
+			err(1, "setresuid failed");
+
+		verbose("Dropping privileges completed\n");
+	}
+
 	/* Finally, run the Guest.  This doesn't return. */
 	run_guest();
 }
diff --git a/Documentation/lguest/lguest.txt b/Documentation/lguest/lguest.txt
index efb3a6a..dad9997 100644
--- a/Documentation/lguest/lguest.txt
+++ b/Documentation/lguest/lguest.txt
@@ -111,8 +111,16 @@
 
   Then use --tunnet=bridge:lg0 when launching the guest.
 
-  See http://linux-net.osdl.org/index.php/Bridge for general information
-  on how to get bridging working.
+  See:
+  
+    http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge
+    
+  for general information on how to get bridging to work.
+
+- Random number generation. Using the --rng option will provide a
+  /dev/hwrng in the guest that will read from the host's /dev/random.
+  Use this option in conjunction with rng-tools (see ../hw_random.txt)
+  to provide entropy to the guest kernel's /dev/random.
 
 There is a helpful mailing list at http://ozlabs.org/mailman/listinfo/lguest
 
diff --git a/Documentation/magic-number.txt b/Documentation/magic-number.txt
index 505f196..4b12abc 100644
--- a/Documentation/magic-number.txt
+++ b/Documentation/magic-number.txt
@@ -150,7 +150,7 @@
 STL_BOARDMAGIC        0xa2267f52  stlbrd            include/linux/stallion.h
 ENI155_MAGIC          0xa54b872d  midway_eprom	    drivers/atm/eni.h
 SCI_MAGIC             0xbabeface  gs_port           drivers/char/sh-sci.h
-CODA_MAGIC            0xC0DAC0DA  coda_file_info    include/linux/coda_fs_i.h
+CODA_MAGIC            0xC0DAC0DA  coda_file_info    fs/coda/coda_fs_i.h
 DPMEM_MAGIC           0xc0ffee11  gdt_pci_sram      drivers/scsi/gdth.h
 STLI_PORTMAGIC        0xe671c7a1  stliport          include/linux/istallion.h
 YAM_MAGIC             0xF10A7654  yam_port          drivers/net/hamradio/yam.c
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index fe5c099..4edd78d 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -40,8 +40,6 @@
 	- info on using the DECnet networking layer in Linux.
 depca.txt
 	- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
-dgrs.txt
-	- the Digi International RightSwitch SE-X Ethernet driver
 dmfe.txt
 	- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
 e100.txt
@@ -50,8 +48,6 @@
 	- info on Intel's E1000 line of gigabit ethernet boards
 eql.txt
 	- serial IP load balancing
-ethertap.txt
-	- the Ethertap user space packet reception and transmission driver
 ewrk3.txt
 	- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
 filter.txt
@@ -104,8 +100,6 @@
 	- TUN/TAP device driver, allowing user space Rx/Tx of packets.
 vortex.txt
 	- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
-wavelan.txt
-	- AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
 x25.txt
 	- general info on X.25 development.
 x25-iface.txt
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile
index 5aba7a3..24c308d 100644
--- a/Documentation/networking/Makefile
+++ b/Documentation/networking/Makefile
@@ -4,6 +4,8 @@
 # List of programs to build
 hostprogs-y := ifenslave
 
+HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include
+
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
 
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 5dc6387..25d2f41 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -49,7 +49,8 @@
 3.3	Configuring Bonding Manually with Ifenslave
 3.3.1		Configuring Multiple Bonds Manually
 3.4	Configuring Bonding Manually via Sysfs
-3.5	Overriding Configuration for Special Cases
+3.5	Configuration with Interfaces Support
+3.6	Overriding Configuration for Special Cases
 
 4. Querying Bonding Configuration
 4.1	Bonding Configuration
@@ -161,8 +162,8 @@
 default kernel source include directory.
 
 SECOND IMPORTANT NOTE:
-	If you plan to configure bonding using sysfs, you do not need
-to use ifenslave.
+	If you plan to configure bonding using sysfs or using the
+/etc/network/interfaces file, you do not need to use ifenslave.
 
 2. Bonding Driver Options
 =========================
@@ -779,22 +780,26 @@
 
 	You can configure bonding using either your distro's network
 initialization scripts, or manually using either ifenslave or the
-sysfs interface.  Distros generally use one of two packages for the
-network initialization scripts: initscripts or sysconfig.  Recent
-versions of these packages have support for bonding, while older
+sysfs interface.  Distros generally use one of three packages for the
+network initialization scripts: initscripts, sysconfig or interfaces.
+Recent versions of these packages have support for bonding, while older
 versions do not.
 
 	We will first describe the options for configuring bonding for
-distros using versions of initscripts and sysconfig with full or
-partial support for bonding, then provide information on enabling
+distros using versions of initscripts, sysconfig and interfaces with full
+or partial support for bonding, then provide information on enabling
 bonding without support from the network initialization scripts (i.e.,
 older versions of initscripts or sysconfig).
 
-	If you're unsure whether your distro uses sysconfig or
-initscripts, or don't know if it's new enough, have no fear.
+	If you're unsure whether your distro uses sysconfig,
+initscripts or interfaces, or don't know if it's new enough, have no fear.
 Determining this is fairly straightforward.
 
-	First, issue the command:
+	First, look for a file called interfaces in /etc/network directory.
+If this file is present in your system, then your system use interfaces. See
+Configuration with Interfaces Support.
+
+	Else, issue the command:
 
 $ rpm -qf /sbin/ifup
 
@@ -1327,8 +1332,62 @@
 echo +eth2 > /sys/class/net/bond1/bonding/slaves
 echo +eth3 > /sys/class/net/bond1/bonding/slaves
 
-3.5 Overriding Configuration for Special Cases
+3.5 Configuration with Interfaces Support
+-----------------------------------------
+
+        This section applies to distros which use /etc/network/interfaces file
+to describe network interface configuration, most notably Debian and it's
+derivatives.
+
+	The ifup and ifdown commands on Debian don't support bonding out of
+the box. The ifenslave-2.6 package should be installed to provide bonding
+support.  Once installed, this package will provide bond-* options to be used
+into /etc/network/interfaces.
+
+	Note that ifenslave-2.6 package will load the bonding module and use
+the ifenslave command when appropriate.
+
+Example Configurations
+----------------------
+
+In /etc/network/interfaces, the following stanza will configure bond0, in
+active-backup mode, with eth0 and eth1 as slaves.
+
+auto bond0
+iface bond0 inet dhcp
+	bond-slaves eth0 eth1
+	bond-mode active-backup
+	bond-miimon 100
+	bond-primary eth0 eth1
+
+If the above configuration doesn't work, you might have a system using
+upstart for system startup. This is most notably true for recent
+Ubuntu versions. The following stanza in /etc/network/interfaces will
+produce the same result on those systems.
+
+auto bond0
+iface bond0 inet dhcp
+	bond-slaves none
+	bond-mode active-backup
+	bond-miimon 100
+
+auto eth0
+iface eth0 inet manual
+	bond-master bond0
+	bond-primary eth0 eth1
+
+auto eth1
+iface eth1 inet manual
+	bond-master bond0
+	bond-primary eth0 eth1
+
+For a full list of bond-* supported options in /etc/network/interfaces and some
+more advanced examples tailored to you particular distros, see the files in
+/usr/share/doc/ifenslave-2.6.
+
+3.6 Overriding Configuration for Special Cases
 ----------------------------------------------
+
 When using the bonding driver, the physical port which transmits a frame is
 typically selected by the bonding driver, and is not relevant to the user or
 system administrator.  The output port is simply selected using the policies of
diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.txt
index bec69a8..a7ba5e4 100644
--- a/Documentation/networking/bridge.txt
+++ b/Documentation/networking/bridge.txt
@@ -1,8 +1,8 @@
 In order to use the Ethernet bridging functionality, you'll need the
 userspace tools. These programs and documentation are available
-at http://www.linux-foundation.org/en/Net:Bridge.  The download page is
+at http://www.linuxfoundation.org/en/Net:Bridge.  The download page is
 http://prdownloads.sourceforge.net/bridge.
 
 If you still have questions, don't hesitate to post to the mailing list 
-(more info http://lists.osdl.org/mailman/listinfo/bridge).
+(more info https://lists.linux-foundation.org/mailman/listinfo/bridge).
 
diff --git a/Documentation/networking/caif/spi_porting.txt b/Documentation/networking/caif/spi_porting.txt
index 61d7c92..0cb8cb9 100644
--- a/Documentation/networking/caif/spi_porting.txt
+++ b/Documentation/networking/caif/spi_porting.txt
@@ -32,7 +32,7 @@
 	This function is called by the CAIF SPI interface to give
 	you a chance to set up your hardware to be ready to receive
 	a stream of data from the master. The xfer structure contains
-	both physical and logical adresses, as well as the total length
+	both physical and logical addresses, as well as the total length
 	of the transfer in both directions.The dev parameter can be used
 	to map to different CAIF SPI slave devices.
 
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index b395ca6..d718bc2 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -38,11 +38,11 @@
 specified in RFCs 4340...42.
 
 The known bugs are at:
-	http://linux-net.osdl.org/index.php/TODO#DCCP
+	http://www.linuxfoundation.org/collaborate/workgroups/networking/todo#DCCP
 
 For more up-to-date versions of the DCCP implementation, please consider using
 the experimental DCCP test tree; instructions for checking this out are on:
-http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
+http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp_testing#Experimental_DCCP_source_tree
 
 
 Socket options
@@ -167,6 +167,7 @@
 seq_window = 100
 	The initial sequence window (sec. 7.5.2) of the sender. This influences
 	the local ackno validity and the remote seqno validity windows (7.5.1).
+	Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set.
 
 tx_qlen = 5
 	The size of the transmit buffer in packets. A value of 0 corresponds
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt
index aefd1e6..04ca0632 100644
--- a/Documentation/networking/dns_resolver.txt
+++ b/Documentation/networking/dns_resolver.txt
@@ -61,7 +61,6 @@
 	create	dns_resolver  	foo:*	*	/usr/sbin/dns.foo %k
 
 
-
 =====
 USAGE
 =====
@@ -104,6 +103,14 @@
      returned also.
 
 
+===============================
+READING DNS KEYS FROM USERSPACE
+===============================
+
+Keys of dns_resolver type can be read from userspace using keyctl_read() or
+"keyctl read/print/pipe".
+
+
 =========
 MECHANISM
 =========
diff --git a/Documentation/networking/generic_netlink.txt b/Documentation/networking/generic_netlink.txt
index d4f8b8b..3e07111 100644
--- a/Documentation/networking/generic_netlink.txt
+++ b/Documentation/networking/generic_netlink.txt
@@ -1,3 +1,3 @@
 A wiki document on how to use Generic Netlink can be found here:
 
- * http://linux-net.osdl.org/index.php/Generic_Netlink_HOWTO
+ * http://www.linuxfoundation.org/collaborate/workgroups/networking/generic_netlink_howto
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index d99940d..ac3b4a7 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -187,7 +187,7 @@
 tcp_dsack - BOOLEAN
 	Allows TCP to send "duplicate" SACKs.
 
-tcp_ecn - BOOLEAN
+tcp_ecn - INTEGER
 	Enable Explicit Congestion Notification (ECN) in TCP. ECN is only
 	used when both ends of the TCP flow support it. It is useful to
 	avoid losses due to congestion (when the bottleneck router supports
diff --git a/Documentation/nfc/nfc-pn544.txt b/Documentation/nfc/nfc-pn544.txt
new file mode 100644
index 0000000..2fcac9f5
--- /dev/null
+++ b/Documentation/nfc/nfc-pn544.txt
@@ -0,0 +1,114 @@
+Kernel driver for the NXP Semiconductors PN544 Near Field
+Communication chip
+
+Author: Jari Vanhala
+Contact: Matti Aaltonen (matti.j.aaltonen at nokia.com)
+
+General
+-------
+
+The PN544 is an integrated transmission module for contactless
+communication. The driver goes under drives/nfc/ and is compiled as a
+module named "pn544". It registers a misc device and creates a device
+file named "/dev/pn544".
+
+Host Interfaces: I2C, SPI and HSU, this driver supports currently only I2C.
+
+The Interface
+-------------
+
+The driver offers a sysfs interface for a hardware test and an IOCTL
+interface for selecting between two operating modes. There are read,
+write and poll functions for transferring messages. The two operating
+modes are the normal (HCI) mode and the firmware update mode.
+
+PN544 is controlled by sending messages from the userspace to the
+chip. The main function of the driver is just to pass those messages
+without caring about the message content.
+
+
+Protocols
+---------
+
+In the normal (HCI) mode and in the firmware update mode read and
+write functions behave a bit differently because the message formats
+or the protocols are different.
+
+In the normal (HCI) mode the protocol used is derived from the ETSI
+HCI specification. The firmware is updated using a specific protocol,
+which is different from HCI.
+
+HCI messages consist of an eight bit header and the message body. The
+header contains the message length. Maximum size for an HCI message is
+33. In HCI mode sent messages are tested for a correct
+checksum. Firmware update messages have the length in the second (MSB)
+and third (LSB) bytes of the message. The maximum FW message length is
+1024 bytes.
+
+For the ETSI HCI specification see
+http://www.etsi.org/WebSite/Technologies/ProtocolSpecification.aspx
+
+The Hardware Test
+-----------------
+
+The idea of the test is that it can performed by reading from the
+corresponding sysfs file. The test is implemented in the board file
+and it should test that PN544 can be put into the firmware update
+mode. If the test is not implemented the sysfs file does not get
+created.
+
+Example:
+> cat /sys/module/pn544/drivers/i2c\:pn544/3-002b/nfc_test
+1
+
+Normal Operation
+----------------
+
+PN544 is powered up when the device file is opened, otherwise it's
+turned off. Only one instance can use the device at a time.
+
+Userspace applications control PN544 with HCI messages. The hardware
+sends an interrupt when data is available for reading. Data is
+physically read when the read function is called by a userspace
+application. Poll() checks the read interrupt state. Configuration and
+self testing are also done from the userspace using read and write.
+
+Example platform data:
+
+static int rx71_pn544_nfc_request_resources(struct i2c_client *client)
+{
+	/* Get and setup the HW resources for the device */
+}
+
+static void rx71_pn544_nfc_free_resources(void)
+{
+	/* Release the HW resources */
+}
+
+static void rx71_pn544_nfc_enable(int fw)
+{
+	/* Turn the device on */
+}
+
+static int rx71_pn544_nfc_test(void)
+{
+	/*
+	 * Put the device into the FW update mode
+	 * and then back to the normal mode.
+	 * Check the behavior and return one on success,
+	 * zero on failure.
+	 */
+}
+
+static void rx71_pn544_nfc_disable(void)
+{
+	/* turn the power off */
+}
+
+static struct pn544_nfc_platform_data rx71_nfc_data = {
+	.request_resources = rx71_pn544_nfc_request_resources,
+	.free_resources = rx71_pn544_nfc_free_resources,
+	.enable = rx71_pn544_nfc_enable,
+	.test = rx71_pn544_nfc_test,
+	.disable = rx71_pn544_nfc_disable,
+};
diff --git a/Documentation/pps/pps.txt b/Documentation/pps/pps.txt
index 125f4ab..d35dcdd 100644
--- a/Documentation/pps/pps.txt
+++ b/Documentation/pps/pps.txt
@@ -170,3 +170,49 @@
 
 Please, note that to compile userland programs you need the file timepps.h
 (see Documentation/pps/).
+
+
+Generators
+----------
+
+Sometimes one needs to be able not only to catch PPS signals but to produce
+them also. For example, running a distributed simulation, which requires
+computers' clock to be synchronized very tightly. One way to do this is to
+invent some complicated hardware solutions but it may be neither necessary
+nor affordable. The cheap way is to load a PPS generator on one of the
+computers (master) and PPS clients on others (slaves), and use very simple
+cables to deliver signals using parallel ports, for example.
+
+Parallel port cable pinout:
+pin	name	master      slave
+1	STROBE	  *------     *
+2	D0	  *     |     *
+3	D1	  *     |     *
+4	D2	  *     |     *
+5	D3	  *     |     *
+6	D4	  *     |     *
+7	D5	  *     |     *
+8	D6	  *     |     *
+9	D7	  *     |     *
+10	ACK	  *     ------*
+11	BUSY	  *           *
+12	PE	  *           *
+13	SEL	  *           *
+14	AUTOFD	  *           *
+15	ERROR	  *           *
+16	INIT	  *           *
+17	SELIN	  *           *
+18-25	GND	  *-----------*
+
+Please note that parallel port interrupt occurs only on high->low transition,
+so it is used for PPS assert edge. PPS clear edge can be determined only
+using polling in the interrupt handler which actually can be done way more
+precisely because interrupt handling delays can be quite big and random. So
+current parport PPS generator implementation (pps_gen_parport module) is
+geared towards using the clear edge for time synchronization.
+
+Clear edge polling is done with disabled interrupts so it's better to select
+delay between assert and clear edge as small as possible to reduce system
+latencies. But if it is too small slave won't be able to capture clear edge
+transition. The default of 30us should be good enough in most situations.
+The delay can be selected using 'delay' pps_gen_parport module parameter.
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
index 3c00c9c..d2651c4 100644
--- a/Documentation/scheduler/00-INDEX
+++ b/Documentation/scheduler/00-INDEX
@@ -3,7 +3,7 @@
 sched-arch.txt
 	- CPU Scheduler implementation hints for architecture specific code.
 sched-design-CFS.txt
-	- goals, design and implementation of the Complete Fair Scheduler.
+	- goals, design and implementation of the Completely Fair Scheduler.
 sched-domains.txt
 	- information on scheduling domains.
 sched-nice-design.txt
diff --git a/Documentation/scheduler/sched-stats.txt b/Documentation/scheduler/sched-stats.txt
index 01e6940..1cd5d51 100644
--- a/Documentation/scheduler/sched-stats.txt
+++ b/Documentation/scheduler/sched-stats.txt
@@ -1,3 +1,7 @@
+Version 15 of schedstats dropped counters for some sched_yield:
+yld_exp_empty, yld_act_empty and yld_both_empty. Otherwise, it is
+identical to version 14.
+
 Version 14 of schedstats includes support for sched_domains, which hit the
 mainline kernel in 2.6.20 although it is identical to the stats from version
 12 which was in the kernel from 2.6.13-2.6.19 (version 13 never saw a kernel
@@ -28,32 +32,25 @@
 
 CPU statistics
 --------------
-cpu<N> 1 2 3 4 5 6 7 8 9 10 11 12
+cpu<N> 1 2 3 4 5 6 7 8 9
 
-NOTE: In the sched_yield() statistics, the active queue is considered empty
-    if it has only one process in it, since obviously the process calling
-    sched_yield() is that process.
-
-First four fields are sched_yield() statistics:
-     1) # of times both the active and the expired queue were empty
-     2) # of times just the active queue was empty
-     3) # of times just the expired queue was empty
-     4) # of times sched_yield() was called
+First field is a sched_yield() statistic:
+     1) # of times sched_yield() was called
 
 Next three are schedule() statistics:
-     5) # of times we switched to the expired queue and reused it
-     6) # of times schedule() was called
-     7) # of times schedule() left the processor idle
+     2) # of times we switched to the expired queue and reused it
+     3) # of times schedule() was called
+     4) # of times schedule() left the processor idle
 
 Next two are try_to_wake_up() statistics:
-     8) # of times try_to_wake_up() was called
-     9) # of times try_to_wake_up() was called to wake up the local cpu
+     5) # of times try_to_wake_up() was called
+     6) # of times try_to_wake_up() was called to wake up the local cpu
 
 Next three are statistics describing scheduling latency:
-    10) sum of all time spent running by tasks on this processor (in jiffies)
-    11) sum of all time spent waiting to run by tasks on this processor (in
+     7) sum of all time spent running by tasks on this processor (in jiffies)
+     8) sum of all time spent waiting to run by tasks on this processor (in
         jiffies)
-    12) # of timeslices run on this cpu
+     9) # of timeslices run on this cpu
 
 
 Domain statistics
diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc
index 337c924..5e83769 100644
--- a/Documentation/scsi/ChangeLog.lpfc
+++ b/Documentation/scsi/ChangeLog.lpfc
@@ -573,7 +573,7 @@
 	* Backround nodev_timeout processing to DPC This enables us to
 	  unblock (stop dev_loss_tmo) when appopriate.
 	* Fix array discovery with multiple luns.  The max_luns was 0 at
-	  the time the host structure was intialized.  lpfc_cfg_params
+	  the time the host structure was initialized.  lpfc_cfg_params
 	  then set the max_luns to the correct value afterwards.
 	* Remove unused define LPFC_MAX_LUN and set the default value of
 	  lpfc_max_lun parameter to 512.
diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
index 7c90050..540db41 100644
--- a/Documentation/serial/tty.txt
+++ b/Documentation/serial/tty.txt
@@ -107,7 +107,7 @@
 
 dcd_change()	-	Report to the tty line the current DCD pin status
 			changes and the relative timestamp. The timestamp
-			can be NULL.
+			cannot be NULL.
 
 
 Driver Access
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index d0eb696..3c1eddd 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -974,13 +974,6 @@
 
     See hdspm.txt for details.
 
-  Module snd-hifier
-  -----------------
-
-    Module for the MediaTek/TempoTec HiFier Fantasia sound card.
-
-    This module supports autoprobe and multiple cards.
-
   Module snd-ice1712
   ------------------
 
@@ -1531,15 +1524,20 @@
   Module snd-oxygen
   -----------------
 
-    Module for sound cards based on the C-Media CMI8788 chip:
+    Module for sound cards based on the C-Media CMI8786/8787/8788 chip:
     * Asound A-8788
+    * Asus Xonar DG
     * AuzenTech X-Meridian
+    * AuzenTech X-Meridian 2G
     * Bgears b-Enspirer
     * Club3D Theatron DTS
     * HT-Omega Claro (plus)
     * HT-Omega Claro halo (XT)
+    * Kuroutoshikou CMI8787-HG2PCI
     * Razer Barracuda AC-1
     * Sondigo Inferno
+    * TempoTec HiFier Fantasia
+    * TempoTec HiFier Serenade
 
     This module supports autoprobe and multiple cards.
 
@@ -2006,9 +2004,9 @@
   Module snd-virtuoso
   -------------------
 
-    Module for sound cards based on the Asus AV100/AV200 chips,
-    i.e., Xonar D1, DX, D2, D2X, DS, HDAV1.3 (Deluxe), Essence ST
-    (Deluxe) and Essence STX.
+    Module for sound cards based on the Asus AV66/AV100/AV200 chips,
+    i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX,
+    HDAV1.3 (Deluxe), and HDAV1.3 Slim.
 
     This module supports autoprobe and multiple cards.
 
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 37c6aad..0caf77e 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -149,7 +149,6 @@
   acer-aspire-7730g Acer Aspire 7730G
   acer-aspire-8930g Acer Aspire 8930G
   medion	Medion Laptops
-  medion-md2	Medion MD2
   targa-dig	Targa/MSI
   targa-2ch-dig	Targa/MSI with 2-channel
   targa-8ch-dig Targa/MSI with 8-channel (MSI GX620)
@@ -297,6 +296,7 @@
 =============
   laptop	Basic Laptop config (default)
   hp-laptop	HP laptops, e g G60
+  asus		Asus K52JU, Lenovo G560
   dell-laptop	Dell laptops
   dell-vostro	Dell Vostro
   olpc-xo-1_5	OLPC XO 1.5
diff --git a/Documentation/sound/alsa/soc/codec.txt b/Documentation/sound/alsa/soc/codec.txt
index 37ba3a7..bce23a4 100644
--- a/Documentation/sound/alsa/soc/codec.txt
+++ b/Documentation/sound/alsa/soc/codec.txt
@@ -27,42 +27,38 @@
 
 1 - Codec DAI and PCM configuration
 -----------------------------------
-Each codec driver must have a struct snd_soc_codec_dai to define its DAI and
+Each codec driver must have a struct snd_soc_dai_driver to define its DAI and
 PCM capabilities and operations. This struct is exported so that it can be
 registered with the core by your machine driver.
 
 e.g.
 
-struct snd_soc_codec_dai wm8731_dai = {
-	.name = "WM8731",
-	/* playback capabilities */
+static struct snd_soc_dai_ops wm8731_dai_ops = {
+	.prepare	= wm8731_pcm_prepare,
+	.hw_params	= wm8731_hw_params,
+	.shutdown	= wm8731_shutdown,
+	.digital_mute	= wm8731_mute,
+	.set_sysclk	= wm8731_set_dai_sysclk,
+	.set_fmt	= wm8731_set_dai_fmt,
+};
+
+struct snd_soc_dai_driver wm8731_dai = {
+	.name = "wm8731-hifi",
 	.playback = {
 		.stream_name = "Playback",
 		.channels_min = 1,
 		.channels_max = 2,
 		.rates = WM8731_RATES,
 		.formats = WM8731_FORMATS,},
-	/* capture capabilities */
 	.capture = {
 		.stream_name = "Capture",
 		.channels_min = 1,
 		.channels_max = 2,
 		.rates = WM8731_RATES,
 		.formats = WM8731_FORMATS,},
-	/* pcm operations - see section 4 below */
-	.ops = {
-		.prepare = wm8731_pcm_prepare,
-		.hw_params = wm8731_hw_params,
-		.shutdown = wm8731_shutdown,
-	},
-	/* DAI operations - see DAI.txt */
-	.dai_ops = {
-		.digital_mute = wm8731_mute,
-		.set_sysclk = wm8731_set_dai_sysclk,
-		.set_fmt = wm8731_set_dai_fmt,
-	}
+	.ops = &wm8731_dai_ops,
+	.symmetric_rates = 1,
 };
-EXPORT_SYMBOL_GPL(wm8731_dai);
 
 
 2 - Codec control IO
@@ -186,13 +182,14 @@
 
 i.e.
 
-static int wm8974_mute(struct snd_soc_codec *codec,
-	struct snd_soc_codec_dai *dai, int mute)
+static int wm8974_mute(struct snd_soc_dai *dai, int mute)
 {
-	u16 mute_reg = wm8974_read_reg_cache(codec, WM8974_DAC) & 0xffbf;
-	if(mute)
-		wm8974_write(codec, WM8974_DAC, mute_reg | 0x40);
+	struct snd_soc_codec *codec = dai->codec;
+	u16 mute_reg = snd_soc_read(codec, WM8974_DAC) & 0xffbf;
+
+	if (mute)
+		snd_soc_write(codec, WM8974_DAC, mute_reg | 0x40);
 	else
-		wm8974_write(codec, WM8974_DAC, mute_reg);
+		snd_soc_write(codec, WM8974_DAC, mute_reg);
 	return 0;
 }
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index 2524c75..3e2ec9c 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -12,6 +12,8 @@
 struct snd_soc_card {
 	char *name;
 
+	...
+
 	int (*probe)(struct platform_device *pdev);
 	int (*remove)(struct platform_device *pdev);
 
@@ -22,12 +24,13 @@
 	int (*resume_pre)(struct platform_device *pdev);
 	int (*resume_post)(struct platform_device *pdev);
 
-	/* machine stream operations */
-	struct snd_soc_ops *ops;
+	...
 
 	/* CPU <--> Codec DAI links  */
 	struct snd_soc_dai_link *dai_link;
 	int num_links;
+
+	...
 };
 
 probe()/remove()
@@ -42,11 +45,6 @@
 and DMA is suspended and resumed. Optional.
 
 
-Machine operations
-------------------
-The machine specific audio operations can be set here. Again this is optional.
-
-
 Machine DAI Configuration
 -------------------------
 The machine DAI configuration glues all the codec and CPU DAIs together. It can
@@ -61,8 +59,10 @@
 static struct snd_soc_dai_link corgi_dai = {
 	.name = "WM8731",
 	.stream_name = "WM8731",
-	.cpu_dai = &pxa_i2s_dai,
-	.codec_dai = &wm8731_dai,
+	.cpu_dai_name = "pxa-is2-dai",
+	.codec_dai_name = "wm8731-hifi",
+	.platform_name = "pxa-pcm-audio",
+	.codec_name = "wm8713-codec.0-001a",
 	.init = corgi_wm8731_init,
 	.ops = &corgi_ops,
 };
@@ -77,26 +77,6 @@
 };
 
 
-Machine Audio Subsystem
------------------------
-
-The machine soc device glues the platform, machine and codec driver together.
-Private data can also be set here. e.g.
-
-/* corgi audio private data */
-static struct wm8731_setup_data corgi_wm8731_setup = {
-	.i2c_address = 0x1b,
-};
-
-/* corgi audio subsystem */
-static struct snd_soc_device corgi_snd_devdata = {
-	.machine = &snd_soc_corgi,
-	.platform = &pxa2xx_soc_platform,
-	.codec_dev = &soc_codec_dev_wm8731,
-	.codec_data = &corgi_wm8731_setup,
-};
-
-
 Machine Power Map
 -----------------
 
diff --git a/Documentation/sound/alsa/soc/platform.txt b/Documentation/sound/alsa/soc/platform.txt
index 06d8359..d57efad 100644
--- a/Documentation/sound/alsa/soc/platform.txt
+++ b/Documentation/sound/alsa/soc/platform.txt
@@ -20,9 +20,10 @@
 	int (*trigger)(struct snd_pcm_substream *, int);
 };
 
-The platform driver exports its DMA functionality via struct snd_soc_platform:-
+The platform driver exports its DMA functionality via struct
+snd_soc_platform_driver:-
 
-struct snd_soc_platform {
+struct snd_soc_platform_driver {
 	char *name;
 
 	int (*probe)(struct platform_device *pdev);
@@ -34,6 +35,13 @@
 	int (*pcm_new)(struct snd_card *, struct snd_soc_codec_dai *, struct snd_pcm *);
 	void (*pcm_free)(struct snd_pcm *);
 
+	/*
+	 * For platform caused delay reporting.
+	 * Optional.
+	 */
+	snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
+		struct snd_soc_dai *);
+
 	/* platform stream ops */
 	struct snd_pcm_ops *pcm_ops;
 };
diff --git a/Documentation/sysctl/00-INDEX b/Documentation/sysctl/00-INDEX
index 1286f45..8cf5d49 100644
--- a/Documentation/sysctl/00-INDEX
+++ b/Documentation/sysctl/00-INDEX
@@ -4,8 +4,6 @@
 	- general information about /proc/sys/ sysctl files.
 abi.txt
 	- documentation for /proc/sys/abi/*.
-ctl_unnumbered.txt
-	- explanation of why one should not add new binary sysctl numbers.
 fs.txt
 	- documentation for /proc/sys/fs/*.
 kernel.txt
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 5740671..11d5ced 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -34,6 +34,7 @@
 - hotplug
 - java-appletviewer           [ binfmt_java, obsolete ]
 - java-interpreter            [ binfmt_java, obsolete ]
+- kptr_restrict
 - kstack_depth_to_print       [ X86 only ]
 - l2cr                        [ PPC only ]
 - modprobe                    ==> Documentation/debugging-modules.txt
@@ -261,6 +262,19 @@
 
 ==============================================================
 
+kptr_restrict:
+
+This toggle indicates whether restrictions are placed on
+exposing kernel addresses via /proc and other interfaces.  When
+kptr_restrict is set to (0), there are no restrictions.  When
+kptr_restrict is set to (1), the default, kernel pointers
+printed using the %pK format specifier will be replaced with 0's
+unless the user has CAP_SYSLOG.  When kptr_restrict is set to
+(2), kernel pointers printed using %pK will be replaced with 0's
+regardless of privileges.
+
+==============================================================
+
 kstack_depth_to_print: (X86 only)
 
 Controls the number of words to print when dumping the raw
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
new file mode 100755
index 0000000..dbeb8a0
--- /dev/null
+++ b/Documentation/target/tcm_mod_builder.py
@@ -0,0 +1,1094 @@
+#!/usr/bin/python
+# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
+#
+# Copyright (c) 2010 Rising Tide Systems
+# Copyright (c) 2010 Linux-iSCSI.org
+#
+# Author: nab@kernel.org
+#
+import os, sys
+import subprocess as sub
+import string
+import re
+import optparse
+
+tcm_dir = ""
+
+fabric_ops = []
+fabric_mod_dir = ""
+fabric_mod_port = ""
+fabric_mod_init_port = ""
+
+def tcm_mod_err(msg):
+	print msg
+	sys.exit(1)
+
+def tcm_mod_create_module_subdir(fabric_mod_dir_var):
+
+	if os.path.isdir(fabric_mod_dir_var) == True:
+		return 1
+
+	print "Creating fabric_mod_dir: " + fabric_mod_dir_var
+	ret = os.mkdir(fabric_mod_dir_var)
+	if ret:
+		tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
+
+	return
+
+def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
+	global fabric_mod_port
+	global fabric_mod_init_port
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
+	print "Writing file: " + f
+
+	p = open(f, 'w');
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#define " + fabric_mod_name.upper() + "_VERSION	\"v0.1\"\n"
+	buf += "#define " + fabric_mod_name.upper() + "_NAMELEN	32\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_nacl {\n"
+	buf += "	/* Binary World Wide unique Port Name for FC Initiator Nport */\n"
+	buf += "	u64 nport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for FC Initiator Nport */\n"
+	buf += "	char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
+	buf += "	struct se_node_acl se_node_acl;\n"
+	buf += "};\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_tpg {\n"
+	buf += "	/* FC lport target portal group tag for TCM */\n"
+	buf += "	u16 lport_tpgt;\n"
+	buf += "	/* Pointer back to " + fabric_mod_name + "_lport */\n"
+	buf += "	struct " + fabric_mod_name + "_lport *lport;\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tpg() */\n"
+	buf += "	struct se_portal_group se_tpg;\n"
+	buf += "};\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_lport {\n"
+	buf += "	/* SCSI protocol the lport is providing */\n"
+	buf += "	u8 lport_proto_id;\n"
+	buf += "	/* Binary World Wide unique Port Name for FC Target Lport */\n"
+	buf += "	u64 lport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for FC Target Lport */\n"
+	buf += "	char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_lport() */\n"
+	buf += "	struct se_wwn lport_wwn;\n"
+	buf += "};\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	fabric_mod_port = "lport"
+	fabric_mod_init_port = "nport"
+
+	return
+
+def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
+	global fabric_mod_port
+	global fabric_mod_init_port
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
+	print "Writing file: " + f
+
+	p = open(f, 'w');
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#define " + fabric_mod_name.upper() + "_VERSION  \"v0.1\"\n"
+	buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_nacl {\n"
+	buf += "	/* Binary World Wide unique Port Name for SAS Initiator port */\n"
+	buf += "	u64 iport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for Sas Initiator port */\n"
+	buf += "	char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
+	buf += "	struct se_node_acl se_node_acl;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tpg {\n"
+	buf += "	/* SAS port target portal group tag for TCM */\n"
+	buf += "	u16 tport_tpgt;\n"
+	buf += "	/* Pointer back to " + fabric_mod_name + "_tport */\n"
+	buf += "	struct " + fabric_mod_name + "_tport *tport;\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tpg() */\n"
+	buf += "	struct se_portal_group se_tpg;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tport {\n"
+	buf += "	/* SCSI protocol the tport is providing */\n"
+	buf += "	u8 tport_proto_id;\n"
+	buf += "	/* Binary World Wide unique Port Name for SAS Target port */\n"
+	buf += "	u64 tport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for SAS Target port */\n"
+	buf += "	char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tport() */\n"
+	buf += "	struct se_wwn tport_wwn;\n"
+	buf += "};\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	fabric_mod_port = "tport"
+	fabric_mod_init_port = "iport"
+
+	return
+
+def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
+	global fabric_mod_port
+	global fabric_mod_init_port
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
+	print "Writing file: " + f
+
+	p = open(f, 'w');
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#define " + fabric_mod_name.upper() + "_VERSION  \"v0.1\"\n"
+	buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_nacl {\n"
+	buf += "	/* ASCII formatted InitiatorName */\n"
+	buf += "	char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
+	buf += "	struct se_node_acl se_node_acl;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tpg {\n"
+	buf += "	/* iSCSI target portal group tag for TCM */\n"
+	buf += "	u16 tport_tpgt;\n"
+	buf += "	/* Pointer back to " + fabric_mod_name + "_tport */\n"
+	buf += "	struct " + fabric_mod_name + "_tport *tport;\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tpg() */\n"
+	buf += "	struct se_portal_group se_tpg;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tport {\n"
+	buf += "	/* SCSI protocol the tport is providing */\n"
+	buf += "	u8 tport_proto_id;\n"
+	buf += "	/* ASCII formatted TargetName for IQN */\n"
+	buf += "	char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tport() */\n"
+	buf += "	struct se_wwn tport_wwn;\n"
+	buf += "};\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	fabric_mod_port = "tport"
+	fabric_mod_init_port = "iport"
+
+	return
+
+def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
+
+	if proto_ident == "FC":
+		tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
+	elif proto_ident == "SAS":
+		tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
+	elif proto_ident == "iSCSI":
+		tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
+	else:
+		print "Unsupported proto_ident: " + proto_ident
+		sys.exit(1)
+
+	return
+
+def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
+	print "Writing file: " + f
+
+        p = open(f, 'w');
+        if not p:
+                tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#include <linux/module.h>\n"
+	buf += "#include <linux/moduleparam.h>\n"
+	buf += "#include <linux/version.h>\n"
+	buf += "#include <generated/utsrelease.h>\n"
+	buf += "#include <linux/utsname.h>\n"
+	buf += "#include <linux/init.h>\n"
+	buf += "#include <linux/slab.h>\n"
+	buf += "#include <linux/kthread.h>\n"
+	buf += "#include <linux/types.h>\n"
+	buf += "#include <linux/string.h>\n"
+	buf += "#include <linux/configfs.h>\n"
+	buf += "#include <linux/ctype.h>\n"
+	buf += "#include <asm/unaligned.h>\n\n"
+	buf += "#include <target/target_core_base.h>\n"
+	buf += "#include <target/target_core_transport.h>\n"
+	buf += "#include <target/target_core_fabric_ops.h>\n"
+	buf += "#include <target/target_core_fabric_configfs.h>\n"
+	buf += "#include <target/target_core_fabric_lib.h>\n"
+	buf += "#include <target/target_core_device.h>\n"
+	buf += "#include <target/target_core_tpg.h>\n"
+	buf += "#include <target/target_core_configfs.h>\n"
+	buf += "#include <target/target_core_base.h>\n"
+	buf += "#include <target/configfs_macros.h>\n\n"
+	buf += "#include <" + fabric_mod_name + "_base.h>\n"
+	buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
+
+	buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
+	buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
+
+	buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
+	buf += "	struct se_portal_group *se_tpg,\n"
+	buf += "	struct config_group *group,\n"
+	buf += "	const char *name)\n"
+	buf += "{\n"
+	buf += "	struct se_node_acl *se_nacl, *se_nacl_new;\n"
+	buf += "	struct " + fabric_mod_name + "_nacl *nacl;\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	u64 wwpn = 0;\n"
+
+	buf += "	u32 nexus_depth;\n\n"
+	buf += "	/* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
+	buf += "		return ERR_PTR(-EINVAL); */\n"
+	buf += "	se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
+	buf += "	if (!(se_nacl_new))\n"
+	buf += "		return ERR_PTR(-ENOMEM);\n"
+	buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
+	buf += "	nexus_depth = 1;\n"
+	buf += "	/*\n"
+	buf += "	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
+	buf += "	 * when converting a NodeACL from demo mode -> explict\n"
+	buf += "	 */\n"
+	buf += "	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
+	buf += "				name, nexus_depth);\n"
+	buf += "	if (IS_ERR(se_nacl)) {\n"
+	buf += "		" + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
+	buf += "		return se_nacl;\n"
+	buf += "	}\n"
+	buf += "	/*\n"
+	buf += "	 * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
+	buf += "	 */\n"
+	buf += "	nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
+
+	buf += "	/* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
+	buf += "	return se_nacl;\n"
+	buf += "}\n\n"
+	buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
+	buf += "				struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+	buf += "	kfree(nacl);\n"
+	buf += "}\n\n"
+
+	buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
+	buf += "	struct se_wwn *wwn,\n"
+	buf += "	struct config_group *group,\n"
+	buf += "	const char *name)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
+	buf += "			struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
+	buf += "	struct " + fabric_mod_name + "_tpg *tpg;\n"
+	buf += "	unsigned long tpgt;\n"
+	buf += "	int ret;\n\n"
+	buf += "	if (strstr(name, \"tpgt_\") != name)\n"
+	buf += "		return ERR_PTR(-EINVAL);\n"
+	buf += "	if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
+	buf += "		return ERR_PTR(-EINVAL);\n\n"
+	buf += "	tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
+	buf += "	if (!(tpg)) {\n"
+	buf += "		printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
+	buf += "		return ERR_PTR(-ENOMEM);\n"
+	buf += "	}\n"
+	buf += "	tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
+	buf += "	tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
+	buf += "	ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
+	buf += "				&tpg->se_tpg, (void *)tpg,\n"
+	buf += "				TRANSPORT_TPG_TYPE_NORMAL);\n"
+	buf += "	if (ret < 0) {\n"
+	buf += "		kfree(tpg);\n"
+	buf += "		return NULL;\n"
+	buf += "	}\n"
+	buf += "	return &tpg->se_tpg;\n"
+	buf += "}\n\n"
+	buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+	buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
+	buf += "	core_tpg_deregister(se_tpg);\n"
+	buf += "	kfree(tpg);\n"
+	buf += "}\n\n"
+
+	buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
+	buf += "	struct target_fabric_configfs *tf,\n"
+	buf += "	struct config_group *group,\n"
+	buf += "	const char *name)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	u64 wwpn = 0;\n\n"
+
+	buf += "	/* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
+	buf += "		return ERR_PTR(-EINVAL); */\n\n"
+	buf += "	" + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
+	buf += "	if (!(" + fabric_mod_port + ")) {\n"
+	buf += "		printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
+	buf += "		return ERR_PTR(-ENOMEM);\n"
+	buf += "	}\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	" + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
+
+	buf += "	/* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
+	buf += "	return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
+	buf += "}\n\n"
+	buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
+	buf += "				struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
+	buf += "	kfree(" + fabric_mod_port + ");\n"
+	buf += "}\n\n"
+	buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
+	buf += "	struct target_fabric_configfs *tf,\n"
+	buf += "	char *page)\n"
+	buf += "{\n"
+	buf += "	return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
+	buf += "		\"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
+	buf += "		utsname()->machine);\n"
+	buf += "}\n\n"
+	buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
+	buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
+	buf += "	&" + fabric_mod_name + "_wwn_version.attr,\n"
+	buf += "	NULL,\n"
+	buf += "};\n\n"
+
+	buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
+	buf += "	.get_fabric_name		= " + fabric_mod_name + "_get_fabric_name,\n"
+	buf += "	.get_fabric_proto_ident		= " + fabric_mod_name + "_get_fabric_proto_ident,\n"
+	buf += "	.tpg_get_wwn			= " + fabric_mod_name + "_get_fabric_wwn,\n"
+	buf += "	.tpg_get_tag			= " + fabric_mod_name + "_get_tag,\n"
+	buf += "	.tpg_get_default_depth		= " + fabric_mod_name + "_get_default_depth,\n"
+	buf += "	.tpg_get_pr_transport_id	= " + fabric_mod_name + "_get_pr_transport_id,\n"
+	buf += "	.tpg_get_pr_transport_id_len	= " + fabric_mod_name + "_get_pr_transport_id_len,\n"
+	buf += "	.tpg_parse_pr_out_transport_id	= " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
+	buf += "	.tpg_check_demo_mode		= " + fabric_mod_name + "_check_false,\n"
+	buf += "	.tpg_check_demo_mode_cache	= " + fabric_mod_name + "_check_true,\n"
+	buf += "	.tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
+	buf += "	.tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
+	buf += "	.tpg_alloc_fabric_acl		= " + fabric_mod_name + "_alloc_fabric_acl,\n"
+	buf += "	.tpg_release_fabric_acl		= " + fabric_mod_name + "_release_fabric_acl,\n"
+	buf += "	.tpg_get_inst_index		= " + fabric_mod_name + "_tpg_get_inst_index,\n"
+	buf += "	.release_cmd_to_pool		= " + fabric_mod_name + "_release_cmd,\n"
+	buf += "	.release_cmd_direct		= " + fabric_mod_name + "_release_cmd,\n"
+	buf += "	.shutdown_session		= " + fabric_mod_name + "_shutdown_session,\n"
+	buf += "	.close_session			= " + fabric_mod_name + "_close_session,\n"
+	buf += "	.stop_session			= " + fabric_mod_name + "_stop_session,\n"
+	buf += "	.fall_back_to_erl0		= " + fabric_mod_name + "_reset_nexus,\n"
+	buf += "	.sess_logged_in			= " + fabric_mod_name + "_sess_logged_in,\n"
+	buf += "	.sess_get_index			= " + fabric_mod_name + "_sess_get_index,\n"
+	buf += "	.sess_get_initiator_sid		= NULL,\n"
+	buf += "	.write_pending			= " + fabric_mod_name + "_write_pending,\n"
+	buf += "	.write_pending_status		= " + fabric_mod_name + "_write_pending_status,\n"
+	buf += "	.set_default_node_attributes	= " + fabric_mod_name + "_set_default_node_attrs,\n"
+	buf += "	.get_task_tag			= " + fabric_mod_name + "_get_task_tag,\n"
+	buf += "	.get_cmd_state			= " + fabric_mod_name + "_get_cmd_state,\n"
+	buf += "	.new_cmd_failure		= " + fabric_mod_name + "_new_cmd_failure,\n"
+	buf += "	.queue_data_in			= " + fabric_mod_name + "_queue_data_in,\n"
+	buf += "	.queue_status			= " + fabric_mod_name + "_queue_status,\n"
+	buf += "	.queue_tm_rsp			= " + fabric_mod_name + "_queue_tm_rsp,\n"
+	buf += "	.get_fabric_sense_len		= " + fabric_mod_name + "_get_fabric_sense_len,\n"
+	buf += "	.set_fabric_sense_len		= " + fabric_mod_name + "_set_fabric_sense_len,\n"
+	buf += "	.is_state_remove		= " + fabric_mod_name + "_is_state_remove,\n"
+	buf += "	.pack_lun			= " + fabric_mod_name + "_pack_lun,\n"
+	buf += "	/*\n"
+	buf += "	 * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
+	buf += "	 */\n"
+	buf += "	.fabric_make_wwn		= " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
+	buf += "	.fabric_drop_wwn		= " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
+	buf += "	.fabric_make_tpg		= " + fabric_mod_name + "_make_tpg,\n"
+	buf += "	.fabric_drop_tpg		= " + fabric_mod_name + "_drop_tpg,\n"
+	buf += "	.fabric_post_link		= NULL,\n"
+	buf += "	.fabric_pre_unlink		= NULL,\n"
+	buf += "	.fabric_make_np			= NULL,\n"
+	buf += "	.fabric_drop_np			= NULL,\n"
+	buf += "	.fabric_make_nodeacl		= " + fabric_mod_name + "_make_nodeacl,\n"
+	buf += "	.fabric_drop_nodeacl		= " + fabric_mod_name + "_drop_nodeacl,\n"
+	buf += "};\n\n"
+
+	buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
+	buf += "{\n"
+	buf += "	struct target_fabric_configfs *fabric;\n"
+	buf += "	int ret;\n\n"
+	buf += "	printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
+	buf += "		\" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
+	buf += "		utsname()->machine);\n"
+	buf += "	/*\n"
+	buf += "	 * Register the top level struct config_item_type with TCM core\n"
+	buf += "	 */\n"
+	buf += "	fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+	buf += "	if (!(fabric)) {\n"
+	buf += "		printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
+	buf += "		return -ENOMEM;\n"
+	buf += "	}\n"
+	buf += "	/*\n"
+	buf += "	 * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
+	buf += "	 */\n"
+	buf += "	fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
+	buf += "	/*\n"
+	buf += "	 * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
+	buf += "	 */\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
+	buf += "	/*\n"
+	buf += "	 * Register the fabric for use within TCM\n"
+	buf += "	 */\n"
+	buf += "	ret = target_fabric_configfs_register(fabric);\n"
+	buf += "	if (ret < 0) {\n"
+	buf += "		printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
+	buf += "				\" for " + fabric_mod_name.upper() + "\\n\");\n"
+	buf += "		return ret;\n"
+	buf += "	}\n"
+	buf += "	/*\n"
+	buf += "	 * Setup our local pointer to *fabric\n"
+	buf += "	 */\n"
+	buf += "	" + fabric_mod_name + "_fabric_configfs = fabric;\n"
+	buf += "	printk(KERN_INFO \"" +  fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
+	buf += "	return 0;\n"
+	buf += "};\n\n"
+	buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
+	buf += "{\n"
+	buf += "	if (!(" + fabric_mod_name + "_fabric_configfs))\n"
+	buf += "		return;\n\n"
+	buf += "	target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
+	buf += "	" + fabric_mod_name + "_fabric_configfs = NULL;\n"
+	buf += "	printk(KERN_INFO \"" +  fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
+	buf += "};\n\n"
+
+	buf += "static int __init " + fabric_mod_name + "_init(void)\n"
+	buf += "{\n"
+	buf += "	int ret;\n\n"
+	buf += "	ret = " + fabric_mod_name + "_register_configfs();\n"
+	buf += "	if (ret < 0)\n"
+	buf += "		return ret;\n\n"
+	buf += "	return 0;\n"
+	buf += "};\n\n"
+	buf += "static void " + fabric_mod_name + "_exit(void)\n"
+	buf += "{\n"
+	buf += "	" + fabric_mod_name + "_deregister_configfs();\n"
+	buf += "};\n\n"
+
+	buf += "#ifdef MODULE\n"
+	buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
+	buf += "MODULE_LICENSE(\"GPL\");\n"
+	buf += "module_init(" + fabric_mod_name + "_init);\n"
+	buf += "module_exit(" + fabric_mod_name + "_exit);\n"
+	buf += "#endif\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	return
+
+def tcm_mod_scan_fabric_ops(tcm_dir):
+
+	fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
+
+	print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
+	process_fo = 0;
+
+	p = open(fabric_ops_api, 'r')
+
+	line = p.readline()
+	while line:
+		if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
+			line = p.readline()
+			continue
+
+		if process_fo == 0:
+			process_fo = 1;
+			line = p.readline()
+			# Search for function pointer
+			if not re.search('\(\*', line):
+				continue
+
+			fabric_ops.append(line.rstrip())
+			continue
+
+		line = p.readline()
+		# Search for function pointer
+		if not re.search('\(\*', line):
+			continue
+
+		fabric_ops.append(line.rstrip())
+
+	p.close()
+	return
+
+def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
+	buf = ""
+	bufi = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
+	print "Writing file: " + f
+
+	p = open(f, 'w')
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
+	print "Writing file: " + fi
+
+	pi = open(fi, 'w')
+	if not pi:
+		tcm_mod_err("Unable to open file: " + fi)
+
+	buf = "#include <linux/slab.h>\n"
+	buf += "#include <linux/kthread.h>\n"
+	buf += "#include <linux/types.h>\n"
+	buf += "#include <linux/list.h>\n"
+	buf += "#include <linux/types.h>\n"
+	buf += "#include <linux/string.h>\n"
+	buf += "#include <linux/ctype.h>\n"
+	buf += "#include <asm/unaligned.h>\n"
+	buf += "#include <scsi/scsi.h>\n"
+	buf += "#include <scsi/scsi_host.h>\n"
+	buf += "#include <scsi/scsi_device.h>\n"
+	buf += "#include <scsi/scsi_cmnd.h>\n"
+	buf += "#include <scsi/libfc.h>\n\n"
+	buf += "#include <target/target_core_base.h>\n"
+	buf += "#include <target/target_core_transport.h>\n"
+	buf += "#include <target/target_core_fabric_ops.h>\n"
+	buf += "#include <target/target_core_fabric_lib.h>\n"
+	buf += "#include <target/target_core_device.h>\n"
+	buf += "#include <target/target_core_tpg.h>\n"
+	buf += "#include <target/target_core_configfs.h>\n"
+	buf += "#include <" + fabric_mod_name + "_base.h>\n"
+	buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
+
+	buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
+	buf += "{\n"
+	buf += "	return 1;\n"
+	buf += "}\n\n"
+	bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
+
+	buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
+	buf += "{\n"
+	buf += "	return 0;\n"
+	buf += "}\n\n"
+	bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
+
+	total_fabric_ops = len(fabric_ops)
+	i = 0
+
+	while i < total_fabric_ops:
+		fo = fabric_ops[i]
+		i += 1
+#		print "fabric_ops: " + fo
+
+		if re.search('get_fabric_name', fo):
+			buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
+			buf += "{\n"
+			buf += "	return \"" + fabric_mod_name[4:] + "\";\n"
+			buf += "}\n\n"
+			bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
+			continue
+
+		if re.search('get_fabric_proto_ident', fo):
+			buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	u8 proto_id;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
+				buf += "		break;\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
+				buf += "		break;\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
+				buf += "		break;\n"
+
+			buf += "	}\n\n"
+			buf += "	return proto_id;\n"
+			buf += "}\n\n"
+			bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
+
+		if re.search('get_wwn', fo):
+			buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
+			buf += "	return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
+			buf += "}\n\n"
+			bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
+
+		if re.search('get_tag', fo):
+			buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	return tpg->" + fabric_mod_port + "_tpgt;\n"
+			buf += "}\n\n"
+			bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
+
+		if re.search('get_default_depth', fo):
+			buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	return 1;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
+
+		if re.search('get_pr_transport_id\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	struct se_node_acl *se_nacl,\n"
+			buf += "	struct t10_pr_registration *pr_reg,\n"
+			buf += "	int *format_code,\n"
+			buf += "	unsigned char *buf)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	int ret = 0;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code, buf);\n"
+				buf += "		break;\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code, buf);\n"
+				buf += "		break;\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code, buf);\n"
+				buf += "		break;\n"
+
+			buf += "	}\n\n"
+			buf += "	return ret;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
+			bufi += "			struct se_node_acl *, struct t10_pr_registration *,\n"
+			bufi += "			int *, unsigned char *);\n"
+
+		if re.search('get_pr_transport_id_len\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	struct se_node_acl *se_nacl,\n"
+			buf += "	struct t10_pr_registration *pr_reg,\n"
+			buf += "	int *format_code)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	int ret = 0;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code);\n"
+				buf += "		break;\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code);\n"
+				buf += "		break;\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code);\n"
+				buf += "		break;\n"
+
+
+			buf += "	}\n\n"
+			buf += "	return ret;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
+			bufi += "			struct se_node_acl *, struct t10_pr_registration *,\n"
+			bufi += "			int *);\n"
+
+		if re.search('parse_pr_out_transport_id\)\(', fo):
+			buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	const char *buf,\n"
+			buf += "	u32 *out_tid_len,\n"
+			buf += "	char **port_nexus_ptr)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	char *tid = NULL;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
+				buf += "					port_nexus_ptr);\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
+				buf += "					port_nexus_ptr);\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
+				buf += "					port_nexus_ptr);\n"
+
+			buf += "	}\n\n"
+			buf += "	return tid;\n"
+			buf += "}\n\n"
+			bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
+			bufi +=	"			const char *, u32 *, char **);\n"
+
+		if re.search('alloc_fabric_acl\)\(', fo):
+			buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_nacl *nacl;\n\n"
+			buf += "	nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
+			buf += "	if (!(nacl)) {\n"
+			buf += "		printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
+			buf += "		return NULL;\n"
+			buf += "	}\n\n"
+			buf += "	return &nacl->se_node_acl;\n"
+			buf += "}\n\n"
+			bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
+
+		if re.search('release_fabric_acl\)\(', fo):
+			buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	struct se_node_acl *se_nacl)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
+			buf += "			struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+			buf += "	kfree(nacl);\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
+			bufi +=	"			struct se_node_acl *);\n"
+
+		if re.search('tpg_get_inst_index\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	return 1;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
+
+		if re.search('release_cmd_to_pool', fo):
+			buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
+
+		if re.search('shutdown_session\)\(', fo):
+			buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
+
+		if re.search('close_session\)\(', fo):
+			buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
+
+		if re.search('stop_session\)\(', fo):
+			buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
+
+		if re.search('fall_back_to_erl0\)\(', fo):
+			buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
+
+		if re.search('sess_logged_in\)\(', fo):
+			buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
+
+		if re.search('sess_get_index\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
+
+		if re.search('write_pending\)\(', fo):
+			buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
+
+		if re.search('write_pending_status\)\(', fo):
+			buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
+
+		if re.search('set_default_node_attributes\)\(', fo):
+			buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
+
+		if re.search('get_task_tag\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
+
+		if re.search('get_cmd_state\)\(', fo):
+			buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
+
+		if re.search('new_cmd_failure\)\(', fo):
+			buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
+
+		if re.search('queue_data_in\)\(', fo):
+			buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
+
+		if re.search('queue_status\)\(', fo):
+			buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
+
+		if re.search('queue_tm_rsp\)\(', fo):
+			buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+
+		if re.search('get_fabric_sense_len\)\(', fo):
+			buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
+
+		if re.search('set_fabric_sense_len\)\(', fo):
+			buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
+
+		if re.search('is_state_remove\)\(', fo):
+			buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
+
+		if re.search('pack_lun\)\(', fo):
+			buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
+			buf += "{\n"
+			buf += "	WARN_ON(lun >= 256);\n"
+			buf += "	/* Caller wants this byte-swapped */\n"
+			buf += "	return cpu_to_le64((lun & 0xff) << 8);\n"
+			buf += "}\n\n"
+			bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
+
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	ret = pi.write(bufi)
+	if ret:
+		tcm_mod_err("Unable to write fi: " + fi)
+
+	pi.close()
+	return
+
+def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
+
+	buf = ""
+	f = fabric_mod_dir_var + "/Kbuild"
+	print "Writing file: " + f
+
+	p = open(f, 'w')
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/include/ -I$(srctree)/drivers/scsi/ -I$(srctree)/include/scsi/ -I$(srctree)/drivers/target/" + fabric_mod_name + "\n\n"
+	buf += fabric_mod_name + "-objs			:= " + fabric_mod_name + "_fabric.o \\\n"
+	buf += "					   " + fabric_mod_name + "_configfs.o\n"
+	buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ")		+= " + fabric_mod_name + ".o\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+	return
+
+def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
+
+	buf = ""
+	f = fabric_mod_dir_var + "/Kconfig"
+	print "Writing file: " + f
+
+	p = open(f, 'w')
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "config " + fabric_mod_name.upper() + "\n"
+	buf += "	tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
+	buf += "	depends on TARGET_CORE && CONFIGFS_FS\n"
+	buf += "	default n\n"
+	buf += "	---help---\n"
+	buf += "	Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+	return
+
+def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
+	buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ")	+= " + fabric_mod_name.lower() + "/\n"
+	kbuild = tcm_dir + "/drivers/target/Kbuild"
+
+	f = open(kbuild, 'a')
+	f.write(buf)
+	f.close()
+	return
+
+def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
+	buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
+	kconfig = tcm_dir + "/drivers/target/Kconfig"
+
+	f = open(kconfig, 'a')
+	f.write(buf)
+	f.close()
+	return
+
+def main(modname, proto_ident):
+#	proto_ident = "FC"
+#	proto_ident = "SAS"
+#	proto_ident = "iSCSI"
+
+	tcm_dir = os.getcwd();
+	tcm_dir += "/../../"
+	print "tcm_dir: " + tcm_dir
+	fabric_mod_name = modname
+	fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
+	print "Set fabric_mod_name: " + fabric_mod_name
+	print "Set fabric_mod_dir: " + fabric_mod_dir
+	print "Using proto_ident: " + proto_ident
+
+	if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
+		print "Unsupported proto_ident: " + proto_ident
+		sys.exit(1)
+
+	ret = tcm_mod_create_module_subdir(fabric_mod_dir)
+	if ret:
+		print "tcm_mod_create_module_subdir() failed because module already exists!"
+		sys.exit(1)
+
+	tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
+	tcm_mod_scan_fabric_ops(tcm_dir)
+	tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
+	tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
+	tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
+	tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
+
+	input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kbuild..? [yes,no]: ")
+	if input == "yes" or input == "y":
+		tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
+
+	input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+	if input == "yes" or input == "y":
+		tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
+
+	return
+
+parser = optparse.OptionParser()
+parser.add_option('-m', '--modulename', help='Module name', dest='modname',
+		action='store', nargs=1, type='string')
+parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
+		action='store', nargs=1, type='string')
+
+(opts, args) = parser.parse_args()
+
+mandatories = ['modname', 'protoident']
+for m in mandatories:
+	if not opts.__dict__[m]:
+		print "mandatory option is missing\n"
+		parser.print_help()
+		exit(-1)
+
+if __name__ == "__main__":
+
+	main(str(opts.modname), opts.protoident)
diff --git a/Documentation/target/tcm_mod_builder.txt b/Documentation/target/tcm_mod_builder.txt
new file mode 100644
index 0000000..84533d8
--- /dev/null
+++ b/Documentation/target/tcm_mod_builder.txt
@@ -0,0 +1,145 @@
+>>>>>>>>>> The TCM v4 fabric module script generator <<<<<<<<<<
+
+Greetings all,
+
+This document is intended to be a mini-HOWTO for using the tcm_mod_builder.py
+script to generate a brand new functional TCM v4 fabric .ko module of your very own,
+that once built can be immediately be loaded to start access the new TCM/ConfigFS
+fabric skeleton, by simply using:
+
+	modprobe $TCM_NEW_MOD
+	mkdir -p /sys/kernel/config/target/$TCM_NEW_MOD
+
+This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following
+
+	*) Generate new API callers for drivers/target/target_core_fabric_configs.c logic
+	   ->make_nodeacl(), ->drop_nodeacl(), ->make_tpg(), ->drop_tpg()
+	   ->make_wwn(), ->drop_wwn().  These are created into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c
+	*) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module
+	   using a skeleton struct target_core_fabric_ops API template.
+	*) Based on user defined T10 Proto_Ident for the new fabric module being built,
+	   the TransportID / Initiator and Target WWPN related handlers for
+	   SPC-3 persistent reservation are automatically generated in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
+	   using drivers/target/target_core_fabric_lib.c logic.
+	*) NOP API calls for all other Data I/O path and fabric dependent attribute logic
+	   in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
+
+tcm_mod_builder.py depends upon the mandatory '-p $PROTO_IDENT' and '-m
+$FABRIC_MOD_name' parameters, and actually running the script looks like:
+
+target:/mnt/sdb/lio-core-2.6.git/Documentation/target# python tcm_mod_builder.py -p iSCSI -m tcm_nab5000
+tcm_dir: /mnt/sdb/lio-core-2.6.git/Documentation/target/../../
+Set fabric_mod_name: tcm_nab5000
+Set fabric_mod_dir:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
+Using proto_ident: iSCSI
+Creating fabric_mod_dir:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_base.h
+Using tcm_mod_scan_fabric_ops:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../include/target/target_core_fabric_ops.h
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.c
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.h
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_configfs.c
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kbuild
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kconfig
+Would you like to add tcm_nab5000to drivers/target/Kbuild..? [yes,no]: yes
+Would you like to add tcm_nab5000to drivers/target/Kconfig..? [yes,no]: yes
+
+At the end of tcm_mod_builder.py. the script will ask to add the following
+line to drivers/target/Kbuild:
+
+	obj-$(CONFIG_TCM_NAB5000)       += tcm_nab5000/
+
+and the same for drivers/target/Kconfig:
+
+	source "drivers/target/tcm_nab5000/Kconfig"
+
+*) Run 'make menuconfig' and select the new CONFIG_TCM_NAB5000 item:
+
+	<M>   TCM_NAB5000 fabric module
+
+*) Build using 'make modules', once completed you will have:
+
+target:/mnt/sdb/lio-core-2.6.git# ls -la drivers/target/tcm_nab5000/
+total 1348
+drwxr-xr-x 2 root root   4096 2010-10-05 03:23 .
+drwxr-xr-x 9 root root   4096 2010-10-05 03:22 ..
+-rw-r--r-- 1 root root    282 2010-10-05 03:22 Kbuild
+-rw-r--r-- 1 root root    171 2010-10-05 03:22 Kconfig
+-rw-r--r-- 1 root root     49 2010-10-05 03:23 modules.order
+-rw-r--r-- 1 root root    738 2010-10-05 03:22 tcm_nab5000_base.h
+-rw-r--r-- 1 root root   9096 2010-10-05 03:22 tcm_nab5000_configfs.c
+-rw-r--r-- 1 root root 191200 2010-10-05 03:23 tcm_nab5000_configfs.o
+-rw-r--r-- 1 root root  40504 2010-10-05 03:23 .tcm_nab5000_configfs.o.cmd
+-rw-r--r-- 1 root root   5414 2010-10-05 03:22 tcm_nab5000_fabric.c
+-rw-r--r-- 1 root root   2016 2010-10-05 03:22 tcm_nab5000_fabric.h
+-rw-r--r-- 1 root root 190932 2010-10-05 03:23 tcm_nab5000_fabric.o
+-rw-r--r-- 1 root root  40713 2010-10-05 03:23 .tcm_nab5000_fabric.o.cmd
+-rw-r--r-- 1 root root 401861 2010-10-05 03:23 tcm_nab5000.ko
+-rw-r--r-- 1 root root    265 2010-10-05 03:23 .tcm_nab5000.ko.cmd
+-rw-r--r-- 1 root root    459 2010-10-05 03:23 tcm_nab5000.mod.c
+-rw-r--r-- 1 root root  23896 2010-10-05 03:23 tcm_nab5000.mod.o
+-rw-r--r-- 1 root root  22655 2010-10-05 03:23 .tcm_nab5000.mod.o.cmd
+-rw-r--r-- 1 root root 379022 2010-10-05 03:23 tcm_nab5000.o
+-rw-r--r-- 1 root root    211 2010-10-05 03:23 .tcm_nab5000.o.cmd
+
+*) Load the new module, create a lun_0 configfs group, and add new TCM Core
+   IBLOCK backstore symlink to port:
+
+target:/mnt/sdb/lio-core-2.6.git# insmod drivers/target/tcm_nab5000.ko
+target:/mnt/sdb/lio-core-2.6.git# mkdir -p /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0
+target:/mnt/sdb/lio-core-2.6.git# cd /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0/
+target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# ln -s /sys/kernel/config/target/core/iblock_0/lvm_test0 nab5000_port
+
+target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# cd -
+target:/mnt/sdb/lio-core-2.6.git# tree /sys/kernel/config/target/nab5000/
+/sys/kernel/config/target/nab5000/
+|-- discovery_auth
+|-- iqn.foo
+|   `-- tpgt_1
+|       |-- acls
+|       |-- attrib
+|       |-- lun
+|       |   `-- lun_0
+|       |       |-- alua_tg_pt_gp
+|       |       |-- alua_tg_pt_offline
+|       |       |-- alua_tg_pt_status
+|       |       |-- alua_tg_pt_write_md
+|	|	`-- nab5000_port -> ../../../../../../target/core/iblock_0/lvm_test0
+|       |-- np
+|       `-- param
+`-- version
+
+target:/mnt/sdb/lio-core-2.6.git# lsmod
+Module                  Size  Used by
+tcm_nab5000             3935  4
+iscsi_target_mod      193211  0
+target_core_stgt        8090  0
+target_core_pscsi      11122  1
+target_core_file        9172  2
+target_core_iblock      9280  1
+target_core_mod       228575  31
+tcm_nab5000,iscsi_target_mod,target_core_stgt,target_core_pscsi,target_core_file,target_core_iblock
+libfc                  73681  0
+scsi_debug             56265  0
+scsi_tgt                8666  1 target_core_stgt
+configfs               20644  2 target_core_mod
+
+----------------------------------------------------------------------
+
+Future TODO items:
+
+	*) Add more T10 proto_idents
+	*) Make tcm_mod_dump_fabric_ops() smarter and generate function pointer
+	   defs directly from include/target/target_core_fabric_ops.h:struct target_core_fabric_ops
+	   structure members.
+
+October 5th, 2010
+Nicholas A. Bellinger <nab@linux-iscsi.org>
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index cb3d15b..b61e46f 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -278,3 +278,15 @@
     |---name:			acpitz
     |---temp1_input:		37000
     |---temp1_crit:		100000
+
+4. Event Notification
+
+The framework includes a simple notification mechanism, in the form of a
+netlink event. Netlink socket initialization is done during the _init_
+of the framework. Drivers which intend to use the notification mechanism
+just need to call generate_netlink_event() with two arguments viz
+(originator, event). Typically the originator will be an integer assigned
+to a thermal_zone_device when it registers itself with the framework. The
+event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
+THERMAL_DEV_FAULT}. Notification can be sent when the current temperature
+crosses any of the configured thresholds.
diff --git a/Documentation/timers/timer_stats.txt b/Documentation/timers/timer_stats.txt
index 9bd00fc..8abd40b 100644
--- a/Documentation/timers/timer_stats.txt
+++ b/Documentation/timers/timer_stats.txt
@@ -19,7 +19,7 @@
 
 - the pid of the task(process) which initialized the timer
 - the name of the process which initialized the timer
-- the function where the timer was intialized
+- the function where the timer was initialized
 - the callback function which is associated to the timer
 - the number of events (callbacks)
 
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 09bd8e9..b510564 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -125,7 +125,7 @@
 For example, here's the information displayed for the 'sched_wakeup'
 event:
 
-# cat /debug/tracing/events/sched/sched_wakeup/format
+# cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/format
 
 name: sched_wakeup
 ID: 60
@@ -201,19 +201,19 @@
 
 For example:
 
-# cd /debug/tracing/events/sched/sched_wakeup
+# cd /sys/kernel/debug/tracing/events/sched/sched_wakeup
 # echo "common_preempt_count > 4" > filter
 
 A slightly more involved example:
 
-# cd /debug/tracing/events/sched/sched_signal_send
+# cd /sys/kernel/debug/tracing/events/signal/signal_generate
 # echo "((sig >= 10 && sig < 15) || sig == 17) && comm != bash" > filter
 
 If there is an error in the expression, you'll get an 'Invalid
 argument' error when setting it, and the erroneous string along with
 an error message can be seen by looking at the filter e.g.:
 
-# cd /debug/tracing/events/sched/sched_signal_send
+# cd /sys/kernel/debug/tracing/events/signal/signal_generate
 # echo "((sig >= 10 && sig < 15) || dsig == 17) && comm != bash" > filter
 -bash: echo: write error: Invalid argument
 # cat filter
diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt
index 8773778..881e7f4 100644
--- a/Documentation/video4linux/v4l2-controls.txt
+++ b/Documentation/video4linux/v4l2-controls.txt
@@ -285,6 +285,9 @@
 The 'new value' union is not used in g_volatile_ctrl. In general controls
 that need to implement g_volatile_ctrl are read-only controls.
 
+Note that if one or more controls in a control cluster are marked as volatile,
+then all the controls in the cluster are seen as volatile.
+
 To mark a control as volatile you have to set the is_volatile flag:
 
 	ctrl = v4l2_ctrl_new_std(&sd->ctrl_handler, ...);
@@ -462,6 +465,15 @@
 Obviously, all controls in the cluster array must be initialized to either
 a valid control or to NULL.
 
+In rare cases you might want to know which controls of a cluster actually
+were set explicitly by the user. For this you can check the 'is_new' flag of
+each control. For example, in the case of a volume/mute cluster the 'is_new'
+flag of the mute control would be set if the user called VIDIOC_S_CTRL for
+mute only. If the user would call VIDIOC_S_EXT_CTRLS for both mute and volume
+controls, then the 'is_new' flag would be 1 for both controls.
+
+The 'is_new' flag is always 1 when called from v4l2_ctrl_handler_setup().
+
 
 VIDIOC_LOG_STATUS Support
 =========================
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
new file mode 100644
index 0000000..0924aac
--- /dev/null
+++ b/Documentation/vm/transhuge.txt
@@ -0,0 +1,298 @@
+= Transparent Hugepage Support =
+
+== Objective ==
+
+Performance critical computing applications dealing with large memory
+working sets are already running on top of libhugetlbfs and in turn
+hugetlbfs. Transparent Hugepage Support is an alternative means of
+using huge pages for the backing of virtual memory with huge pages
+that supports the automatic promotion and demotion of page sizes and
+without the shortcomings of hugetlbfs.
+
+Currently it only works for anonymous memory mappings but in the
+future it can expand over the pagecache layer starting with tmpfs.
+
+The reason applications are running faster is because of two
+factors. The first factor is almost completely irrelevant and it's not
+of significant interest because it'll also have the downside of
+requiring larger clear-page copy-page in page faults which is a
+potentially negative effect. The first factor consists in taking a
+single page fault for each 2M virtual region touched by userland (so
+reducing the enter/exit kernel frequency by a 512 times factor). This
+only matters the first time the memory is accessed for the lifetime of
+a memory mapping. The second long lasting and much more important
+factor will affect all subsequent accesses to the memory for the whole
+runtime of the application. The second factor consist of two
+components: 1) the TLB miss will run faster (especially with
+virtualization using nested pagetables but almost always also on bare
+metal without virtualization) and 2) a single TLB entry will be
+mapping a much larger amount of virtual memory in turn reducing the
+number of TLB misses. With virtualization and nested pagetables the
+TLB can be mapped of larger size only if both KVM and the Linux guest
+are using hugepages but a significant speedup already happens if only
+one of the two is using hugepages just because of the fact the TLB
+miss is going to run faster.
+
+== Design ==
+
+- "graceful fallback": mm components which don't have transparent
+  hugepage knowledge fall back to breaking a transparent hugepage and
+  working on the regular pages and their respective regular pmd/pte
+  mappings
+
+- if a hugepage allocation fails because of memory fragmentation,
+  regular pages should be gracefully allocated instead and mixed in
+  the same vma without any failure or significant delay and without
+  userland noticing
+
+- if some task quits and more hugepages become available (either
+  immediately in the buddy or through the VM), guest physical memory
+  backed by regular pages should be relocated on hugepages
+  automatically (with khugepaged)
+
+- it doesn't require memory reservation and in turn it uses hugepages
+  whenever possible (the only possible reservation here is kernelcore=
+  to avoid unmovable pages to fragment all the memory but such a tweak
+  is not specific to transparent hugepage support and it's a generic
+  feature that applies to all dynamic high order allocations in the
+  kernel)
+
+- this initial support only offers the feature in the anonymous memory
+  regions but it'd be ideal to move it to tmpfs and the pagecache
+  later
+
+Transparent Hugepage Support maximizes the usefulness of free memory
+if compared to the reservation approach of hugetlbfs by allowing all
+unused memory to be used as cache or other movable (or even unmovable
+entities). It doesn't require reservation to prevent hugepage
+allocation failures to be noticeable from userland. It allows paging
+and all other advanced VM features to be available on the
+hugepages. It requires no modifications for applications to take
+advantage of it.
+
+Applications however can be further optimized to take advantage of
+this feature, like for example they've been optimized before to avoid
+a flood of mmap system calls for every malloc(4k). Optimizing userland
+is by far not mandatory and khugepaged already can take care of long
+lived page allocations even for hugepage unaware applications that
+deals with large amounts of memory.
+
+In certain cases when hugepages are enabled system wide, application
+may end up allocating more memory resources. An application may mmap a
+large region but only touch 1 byte of it, in that case a 2M page might
+be allocated instead of a 4k page for no good. This is why it's
+possible to disable hugepages system-wide and to only have them inside
+MADV_HUGEPAGE madvise regions.
+
+Embedded systems should enable hugepages only inside madvise regions
+to eliminate any risk of wasting any precious byte of memory and to
+only run faster.
+
+Applications that gets a lot of benefit from hugepages and that don't
+risk to lose memory by using hugepages, should use
+madvise(MADV_HUGEPAGE) on their critical mmapped regions.
+
+== sysfs ==
+
+Transparent Hugepage Support can be entirely disabled (mostly for
+debugging purposes) or only enabled inside MADV_HUGEPAGE regions (to
+avoid the risk of consuming more memory resources) or enabled system
+wide. This can be achieved with one of:
+
+echo always >/sys/kernel/mm/transparent_hugepage/enabled
+echo madvise >/sys/kernel/mm/transparent_hugepage/enabled
+echo never >/sys/kernel/mm/transparent_hugepage/enabled
+
+It's also possible to limit defrag efforts in the VM to generate
+hugepages in case they're not immediately free to madvise regions or
+to never try to defrag memory and simply fallback to regular pages
+unless hugepages are immediately available. Clearly if we spend CPU
+time to defrag memory, we would expect to gain even more by the fact
+we use hugepages later instead of regular pages. This isn't always
+guaranteed, but it may be more likely in case the allocation is for a
+MADV_HUGEPAGE region.
+
+echo always >/sys/kernel/mm/transparent_hugepage/defrag
+echo madvise >/sys/kernel/mm/transparent_hugepage/defrag
+echo never >/sys/kernel/mm/transparent_hugepage/defrag
+
+khugepaged will be automatically started when
+transparent_hugepage/enabled is set to "always" or "madvise, and it'll
+be automatically shutdown if it's set to "never".
+
+khugepaged runs usually at low frequency so while one may not want to
+invoke defrag algorithms synchronously during the page faults, it
+should be worth invoking defrag at least in khugepaged. However it's
+also possible to disable defrag in khugepaged:
+
+echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+
+You can also control how many pages khugepaged should scan at each
+pass:
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/pages_to_scan
+
+and how many milliseconds to wait in khugepaged between each pass (you
+can set this to 0 to run khugepaged at 100% utilization of one core):
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/scan_sleep_millisecs
+
+and how many milliseconds to wait in khugepaged if there's an hugepage
+allocation failure to throttle the next allocation attempt.
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/alloc_sleep_millisecs
+
+The khugepaged progress can be seen in the number of pages collapsed:
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/pages_collapsed
+
+for each pass:
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/full_scans
+
+== Boot parameter ==
+
+You can change the sysfs boot time defaults of Transparent Hugepage
+Support by passing the parameter "transparent_hugepage=always" or
+"transparent_hugepage=madvise" or "transparent_hugepage=never"
+(without "") to the kernel command line.
+
+== Need of application restart ==
+
+The transparent_hugepage/enabled values only affect future
+behavior. So to make them effective you need to restart any
+application that could have been using hugepages. This also applies to
+the regions registered in khugepaged.
+
+== get_user_pages and follow_page ==
+
+get_user_pages and follow_page if run on a hugepage, will return the
+head or tail pages as usual (exactly as they would do on
+hugetlbfs). Most gup users will only care about the actual physical
+address of the page and its temporary pinning to release after the I/O
+is complete, so they won't ever notice the fact the page is huge. But
+if any driver is going to mangle over the page structure of the tail
+page (like for checking page->mapping or other bits that are relevant
+for the head page and not the tail page), it should be updated to jump
+to check head page instead (while serializing properly against
+split_huge_page() to avoid the head and tail pages to disappear from
+under it, see the futex code to see an example of that, hugetlbfs also
+needed special handling in futex code for similar reasons).
+
+NOTE: these aren't new constraints to the GUP API, and they match the
+same constrains that applies to hugetlbfs too, so any driver capable
+of handling GUP on hugetlbfs will also work fine on transparent
+hugepage backed mappings.
+
+In case you can't handle compound pages if they're returned by
+follow_page, the FOLL_SPLIT bit can be specified as parameter to
+follow_page, so that it will split the hugepages before returning
+them. Migration for example passes FOLL_SPLIT as parameter to
+follow_page because it's not hugepage aware and in fact it can't work
+at all on hugetlbfs (but it instead works fine on transparent
+hugepages thanks to FOLL_SPLIT). migration simply can't deal with
+hugepages being returned (as it's not only checking the pfn of the
+page and pinning it during the copy but it pretends to migrate the
+memory in regular page sizes and with regular pte/pmd mappings).
+
+== Optimizing the applications ==
+
+To be guaranteed that the kernel will map a 2M page immediately in any
+memory region, the mmap region has to be hugepage naturally
+aligned. posix_memalign() can provide that guarantee.
+
+== Hugetlbfs ==
+
+You can use hugetlbfs on a kernel that has transparent hugepage
+support enabled just fine as always. No difference can be noted in
+hugetlbfs other than there will be less overall fragmentation. All
+usual features belonging to hugetlbfs are preserved and
+unaffected. libhugetlbfs will also work fine as usual.
+
+== Graceful fallback ==
+
+Code walking pagetables but unware about huge pmds can simply call
+split_huge_page_pmd(mm, pmd) where the pmd is the one returned by
+pmd_offset. It's trivial to make the code transparent hugepage aware
+by just grepping for "pmd_offset" and adding split_huge_page_pmd where
+missing after pmd_offset returns the pmd. Thanks to the graceful
+fallback design, with a one liner change, you can avoid to write
+hundred if not thousand of lines of complex code to make your code
+hugepage aware.
+
+If you're not walking pagetables but you run into a physical hugepage
+but you can't handle it natively in your code, you can split it by
+calling split_huge_page(page). This is what the Linux VM does before
+it tries to swapout the hugepage for example.
+
+Example to make mremap.c transparent hugepage aware with a one liner
+change:
+
+diff --git a/mm/mremap.c b/mm/mremap.c
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -41,6 +41,7 @@ static pmd_t *get_old_pmd(struct mm_stru
+		return NULL;
+
+	pmd = pmd_offset(pud, addr);
++	split_huge_page_pmd(mm, pmd);
+	if (pmd_none_or_clear_bad(pmd))
+		return NULL;
+
+== Locking in hugepage aware code ==
+
+We want as much code as possible hugepage aware, as calling
+split_huge_page() or split_huge_page_pmd() has a cost.
+
+To make pagetable walks huge pmd aware, all you need to do is to call
+pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the
+mmap_sem in read (or write) mode to be sure an huge pmd cannot be
+created from under you by khugepaged (khugepaged collapse_huge_page
+takes the mmap_sem in write mode in addition to the anon_vma lock). If
+pmd_trans_huge returns false, you just fallback in the old code
+paths. If instead pmd_trans_huge returns true, you have to take the
+mm->page_table_lock and re-run pmd_trans_huge. Taking the
+page_table_lock will prevent the huge pmd to be converted into a
+regular pmd from under you (split_huge_page can run in parallel to the
+pagetable walk). If the second pmd_trans_huge returns false, you
+should just drop the page_table_lock and fallback to the old code as
+before. Otherwise you should run pmd_trans_splitting on the pmd. In
+case pmd_trans_splitting returns true, it means split_huge_page is
+already in the middle of splitting the page. So if pmd_trans_splitting
+returns true it's enough to drop the page_table_lock and call
+wait_split_huge_page and then fallback the old code paths. You are
+guaranteed by the time wait_split_huge_page returns, the pmd isn't
+huge anymore. If pmd_trans_splitting returns false, you can proceed to
+process the huge pmd and the hugepage natively. Once finished you can
+drop the page_table_lock.
+
+== compound_lock, get_user_pages and put_page ==
+
+split_huge_page internally has to distribute the refcounts in the head
+page to the tail pages before clearing all PG_head/tail bits from the
+page structures. It can do that easily for refcounts taken by huge pmd
+mappings. But the GUI API as created by hugetlbfs (that returns head
+and tail pages if running get_user_pages on an address backed by any
+hugepage), requires the refcount to be accounted on the tail pages and
+not only in the head pages, if we want to be able to run
+split_huge_page while there are gup pins established on any tail
+page. Failure to be able to run split_huge_page if there's any gup pin
+on any tail page, would mean having to split all hugepages upfront in
+get_user_pages which is unacceptable as too many gup users are
+performance critical and they must work natively on hugepages like
+they work natively on hugetlbfs already (hugetlbfs is simpler because
+hugetlbfs pages cannot be splitted so there wouldn't be requirement of
+accounting the pins on the tail pages for hugetlbfs). If we wouldn't
+account the gup refcounts on the tail pages during gup, we won't know
+anymore which tail page is pinned by gup and which is not while we run
+split_huge_page. But we still have to add the gup pin to the head page
+too, to know when we can free the compound page in case it's never
+splitted during its lifetime. That requires changing not just
+get_page, but put_page as well so that when put_page runs on a tail
+page (and only on a tail page) it will find its respective head page,
+and then it will decrease the head page refcount in addition to the
+tail page refcount. To obtain a head page reliably and to decrease its
+refcount without race conditions, put_page has to serialize against
+__split_huge_page_refcount using a special per-page lock called
+compound_lock.
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index f8101d6..75613c9 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -2,3 +2,5 @@
 	- This file
 w1_therm
 	- The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
+w1_ds2423
+	- The Maxim/Dallas Semiconductor ds2423 counter device.
diff --git a/Documentation/w1/slaves/w1_ds2423 b/Documentation/w1/slaves/w1_ds2423
new file mode 100644
index 0000000..90a65d2
--- /dev/null
+++ b/Documentation/w1/slaves/w1_ds2423
@@ -0,0 +1,47 @@
+Kernel driver w1_ds2423
+=======================
+
+Supported chips:
+  * Maxim DS2423 based counter devices.
+
+supported family codes:
+	W1_THERM_DS2423	0x1D
+
+Author: Mika Laitio <lamikr@pilppa.org>
+
+Description
+-----------
+
+Support is provided through the sysfs w1_slave file. Each opening and
+read sequence of w1_slave file initiates the read of counters and ram
+available in DS2423 pages 12 - 15.
+
+Result of each page is provided as an ASCII output where each counter
+value and associated ram buffer is outpputed to own line.
+
+Each lines will contain the values of 42 bytes read from the counter and
+memory page along the crc=YES or NO for indicating whether the read operation
+was successfull and CRC matched.
+If the operation was successfull, there is also in the end of each line
+a counter value expressed as an integer after c=
+
+Meaning of 42 bytes represented is following:
+ - 1 byte from ram page
+ - 4 bytes for the counter value
+ - 4 zero bytes
+ - 2 bytes for crc16 which was calculated from the data read since the previous crc bytes
+ - 31 remaining bytes from the ram page
+ - crc=YES/NO indicating whether read was ok and crc matched
+ - c=<int> current counter value
+
+example from the successfull read:
+00 02 00 00 00 00 00 00 00 6d 38 00 ff ff 00 00 fe ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2
+00 02 00 00 00 00 00 00 00 e0 1f 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2
+00 29 c6 5d 18 00 00 00 00 04 37 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=408798761
+00 05 00 00 00 00 00 00 00 8d 39 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff crc=YES c=5
+
+example from the read with crc errors:
+00 02 00 00 00 00 00 00 00 6d 38 00 ff ff 00 00 fe ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2
+00 02 00 00 22 00 00 00 00 e0 1f 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=NO
+00 e1 61 5d 19 00 00 00 00 df 0b 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=NO
+00 05 00 00 20 00 00 00 00 8d 39 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff crc=NO
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
index 996a27d..01c513f 100644
--- a/Documentation/workqueue.txt
+++ b/Documentation/workqueue.txt
@@ -190,9 +190,9 @@
 	* Long running CPU intensive workloads which can be better
 	  managed by the system scheduler.
 
-  WQ_FREEZEABLE
+  WQ_FREEZABLE
 
-	A freezeable wq participates in the freeze phase of the system
+	A freezable wq participates in the freeze phase of the system
 	suspend operations.  Work items on the wq are drained and no
 	new work item starts execution until thawed.
 
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index bdeb81c..9b7221a 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -622,9 +622,9 @@
   The payload may be compressed. The format of both the compressed and
   uncompressed data should be determined using the standard magic
   numbers.  The currently supported compression formats are gzip
-  (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A) and LZMA
-  (magic number 5D 00).  The uncompressed payload is currently always ELF
-  (magic number 7F 45 4C 46).
+  (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA
+  (magic number 5D 00), and XZ (magic number FD 37).  The uncompressed
+  payload is currently always ELF (magic number 7F 45 4C 46).
   
 Field name:	payload_length
 Type:		read
diff --git a/Documentation/xz.txt b/Documentation/xz.txt
new file mode 100644
index 0000000..2cf3e26
--- /dev/null
+++ b/Documentation/xz.txt
@@ -0,0 +1,121 @@
+
+XZ data compression in Linux
+============================
+
+Introduction
+
+    XZ is a general purpose data compression format with high compression
+    ratio and relatively fast decompression. The primary compression
+    algorithm (filter) is LZMA2. Additional filters can be used to improve
+    compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters
+    improve compression ratio of executable data.
+
+    The XZ decompressor in Linux is called XZ Embedded. It supports
+    the LZMA2 filter and optionally also BCJ filters. CRC32 is supported
+    for integrity checking. The home page of XZ Embedded is at
+    <http://tukaani.org/xz/embedded.html>, where you can find the
+    latest version and also information about using the code outside
+    the Linux kernel.
+
+    For userspace, XZ Utils provide a zlib-like compression library
+    and a gzip-like command line tool. XZ Utils can be downloaded from
+    <http://tukaani.org/xz/>.
+
+XZ related components in the kernel
+
+    The xz_dec module provides XZ decompressor with single-call (buffer
+    to buffer) and multi-call (stateful) APIs. The usage of the xz_dec
+    module is documented in include/linux/xz.h.
+
+    The xz_dec_test module is for testing xz_dec. xz_dec_test is not
+    useful unless you are hacking the XZ decompressor. xz_dec_test
+    allocates a char device major dynamically to which one can write
+    .xz files from userspace. The decompressed output is thrown away.
+    Keep an eye on dmesg to see diagnostics printed by xz_dec_test.
+    See the xz_dec_test source code for the details.
+
+    For decompressing the kernel image, initramfs, and initrd, there
+    is a wrapper function in lib/decompress_unxz.c. Its API is the
+    same as in other decompress_*.c files, which is defined in
+    include/linux/decompress/generic.h.
+
+    scripts/xz_wrap.sh is a wrapper for the xz command line tool found
+    from XZ Utils. The wrapper sets compression options to values suitable
+    for compressing the kernel image.
+
+    For kernel makefiles, two commands are provided for use with
+    $(call if_needed). The kernel image should be compressed with
+    $(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2
+    dictionary. It will also append a four-byte trailer containing the
+    uncompressed size of the file, which is needed by the boot code.
+    Other things should be compressed with $(call if_needed,xzmisc)
+    which will use no BCJ filter and 1 MiB LZMA2 dictionary.
+
+Notes on compression options
+
+    Since the XZ Embedded supports only streams with no integrity check or
+    CRC32, make sure that you don't use some other integrity check type
+    when encoding files that are supposed to be decoded by the kernel. With
+    liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32
+    when encoding. With the xz command line tool, use --check=none or
+    --check=crc32.
+
+    Using CRC32 is strongly recommended unless there is some other layer
+    which will verify the integrity of the uncompressed data anyway.
+    Double checking the integrity would probably be waste of CPU cycles.
+    Note that the headers will always have a CRC32 which will be validated
+    by the decoder; you can only change the integrity check type (or
+    disable it) for the actual uncompressed data.
+
+    In userspace, LZMA2 is typically used with dictionary sizes of several
+    megabytes. The decoder needs to have the dictionary in RAM, thus big
+    dictionaries cannot be used for files that are intended to be decoded
+    by the kernel. 1 MiB is probably the maximum reasonable dictionary
+    size for in-kernel use (maybe more is OK for initramfs). The presets
+    in XZ Utils may not be optimal when creating files for the kernel,
+    so don't hesitate to use custom settings. Example:
+
+        xz --check=crc32 --lzma2=dict=512KiB inputfile
+
+    An exception to above dictionary size limitation is when the decoder
+    is used in single-call mode. Decompressing the kernel itself is an
+    example of this situation. In single-call mode, the memory usage
+    doesn't depend on the dictionary size, and it is perfectly fine to
+    use a big dictionary: for maximum compression, the dictionary should
+    be at least as big as the uncompressed data itself.
+
+Future plans
+
+    Creating a limited XZ encoder may be considered if people think it is
+    useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at
+    the fastest settings, so it isn't clear if LZMA2 encoder is wanted
+    into the kernel.
+
+    Support for limited random-access reading is planned for the
+    decompression code. I don't know if it could have any use in the
+    kernel, but I know that it would be useful in some embedded projects
+    outside the Linux kernel.
+
+Conformance to the .xz file format specification
+
+    There are a couple of corner cases where things have been simplified
+    at expense of detecting errors as early as possible. These should not
+    matter in practice all, since they don't cause security issues. But
+    it is good to know this if testing the code e.g. with the test files
+    from XZ Utils.
+
+Reporting bugs
+
+    Before reporting a bug, please check that it's not fixed already
+    at upstream. See <http://tukaani.org/xz/embedded.html> to get the
+    latest code.
+
+    Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on
+    Freenode and talk to Larhzu. I don't actively read LKML or other
+    kernel-related mailing lists, so if there's something I should know,
+    you should email to me personally or use IRC.
+
+    Don't bother Igor Pavlov with questions about the XZ implementation
+    in the kernel or about XZ Utils. While these two implementations
+    include essential code that is directly based on Igor Pavlov's code,
+    these implementations aren't maintained nor supported by him.
diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO
index 6916077..faf976c 100644
--- a/Documentation/zh_CN/HOWTO
+++ b/Documentation/zh_CN/HOWTO
@@ -347,8 +347,8 @@
 最新bug的通知,可以订阅bugme-new邮件列表(只有新的bug报告会被寄到这里)
 或者订阅bugme-janitor邮件列表(所有bugzilla的变动都会被寄到这里)。
 
-	http://lists.osdl.org/mailman/listinfo/bugme-new
-	http://lists.osdl.org/mailman/listinfo/bugme-janitors
+	https://lists.linux-foundation.org/mailman/listinfo/bugme-new
+	https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
 
 
 邮件列表
diff --git a/Documentation/zh_CN/SubmittingDrivers b/Documentation/zh_CN/SubmittingDrivers
index c27b0f6..5889f8d 100644
--- a/Documentation/zh_CN/SubmittingDrivers
+++ b/Documentation/zh_CN/SubmittingDrivers
@@ -61,7 +61,7 @@
 Linux 2.6:
 	除了遵循和 2.4 版内核同样的规则外,你还需要在 linux-kernel 邮件
 	列表上跟踪最新的 API 变化。向 Linux 2.6 内核提交驱动的顶级联系人
-	是 Andrew Morton <akpm@osdl.org>。
+	是 Andrew Morton <akpm@linux-foundation.org>。
 
 决定设备驱动能否被接受的条件
 ----------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index bb6c1ac..f1bc3dc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -162,7 +162,7 @@
 W:	http://serial.sourceforge.net
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
-F:	drivers/serial/8250*
+F:	drivers/tty/serial/8250*
 F:	include/linux/serial_8250.h
 
 8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.]
@@ -496,7 +496,6 @@
 F:	arch/x86/kernel/microcode_amd.c
 
 AMS (Apple Motion Sensor) DRIVER
-M:	Stelian Pop <stelian@popies.net>
 M:	Michael Hanselmann <linux-kernel@hansmi.ch>
 S:	Supported
 F:	drivers/macintosh/ams/
@@ -625,11 +624,15 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
-ARM/ATMEL AT91RM9200 ARM ARCHITECTURE
+ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
 M:	Andrew Victor <linux@maxim.org.za>
+M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://maxim.org.za/at91_26.html
-S:	Maintained
+W:	http://www.linux4sam.org
+S:	Supported
+F:	arch/arm/mach-at91/
 
 ARM/BCMRING ARM ARCHITECTURE
 M:	Jiandong Zheng <jdzheng@broadcom.com>
@@ -882,15 +885,15 @@
 
 ARM/QUALCOMM MSM MACHINE SUPPORT
 M:	David Brown <davidb@codeaurora.org>
-M:	Daniel Walker <dwalker@codeaurora.org>
+M:	Daniel Walker <dwalker@fifo99.com>
 M:	Bryan Huntsman <bryanh@codeaurora.org>
 L:	linux-arm-msm@vger.kernel.org
 F:	arch/arm/mach-msm/
 F:	drivers/video/msm/
 F:	drivers/mmc/host/msm_sdcc.c
 F:	drivers/mmc/host/msm_sdcc.h
-F:	drivers/serial/msm_serial.h
-F:	drivers/serial/msm_serial.c
+F:	drivers/tty/serial/msm_serial.h
+F:	drivers/tty/serial/msm_serial.c
 T:	git git://codeaurora.org/quic/kernel/davidb/linux-msm.git
 S:	Maintained
 
@@ -975,6 +978,8 @@
 F:	arch/arm/plat-samsung/
 F:	arch/arm/plat-s3c24xx/
 F:	arch/arm/plat-s5p/
+F:	drivers/*/*s3c2410*
+F:	drivers/*/*/*s3c2410*
 
 ARM/S3C2410 ARM ARCHITECTURE
 M:	Ben Dooks <ben-linux@fluff.org>
@@ -1005,6 +1010,15 @@
 S:	Maintained
 F:	arch/arm/mach-s5p*/
 
+ARM/SAMSUNG MOBILE MACHINE SUPPORT
+M:	Kyungmin Park <kyungmin.park@samsung.com>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Maintained
+F:	arch/arm/mach-s5pv210/mach-aquila.c
+F:	arch/arm/mach-s5pv210/mach-goni.c
+F:	arch/arm/mach-exynos4/mach-universal_c210.c
+F:	arch/arm/mach-exynos4/mach-nuri.c
+
 ARM/SAMSUNG S5P SERIES FIMC SUPPORT
 M:	Kyungmin Park <kyungmin.park@samsung.com>
 M:	Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -1257,7 +1271,7 @@
 ATMEL AT91 / AT32 SERIAL DRIVER
 M:	Nicolas Ferre <nicolas.ferre@atmel.com>
 S:	Supported
-F:	drivers/serial/atmel_serial.c
+F:	drivers/tty/serial/atmel_serial.c
 
 ATMEL LCDFB DRIVER
 M:	Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -1413,7 +1427,7 @@
 L:	uclinux-dist-devel@blackfin.uclinux.org
 W:	http://blackfin.uclinux.org
 S:	Supported
-F:	drivers/serial/bfin_5xx.c
+F:	drivers/tty/serial/bfin_5xx.c
 
 BLACKFIN WATCHDOG DRIVER
 M:	Mike Frysinger <vapier.adi@gmail.com>
@@ -1462,6 +1476,7 @@
 
 BONDING DRIVER
 M:	Jay Vosburgh <fubar@us.ibm.com>
+M:	Andy Gospodarek <andy@greyhouse.net>
 L:	netdev@vger.kernel.org
 W:	http://sourceforge.net/projects/bonding/
 S:	Supported
@@ -1524,6 +1539,14 @@
 F:	block/bsg.c
 F:	include/linux/bsg.h
 
+BT87X AUDIO DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	Documentation/sound/alsa/Bt87x.txt
+F:	sound/pci/bt87x.c
+
 BT8XXGPIO DRIVER
 M:	Michael Buesch <mb@bu3sch.de>
 W:	http://bu3sch.de/btgpio.php
@@ -1549,6 +1572,13 @@
 F:	Documentation/video4linux/bttv/
 F:	drivers/media/video/bt8xx/bttv*
 
+C-MEDIA CMI8788 DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/pci/oxygen/
+
 CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
 M:	David Howells <dhowells@redhat.com>
 L:	linux-cachefs@redhat.com
@@ -1672,6 +1702,13 @@
 S:	Supported
 F:	scripts/checkpatch.pl
 
+CHINESE DOCUMENTATION
+M:	Harry Wei <harryxiyou@gmail.com>
+L:	xiyoulinuxkernelgroup@googlegroups.com
+L:	linux-kernel@zh-kernel.org (moderated for non-subscribers)
+S:	Maintained
+F:	Documentation/zh_CN/
+
 CISCO VIC ETHERNET NIC DRIVER
 M:	Vasanthy Kolluri <vkolluri@cisco.com>
 M:	Roopa Prabhu <roprabhu@cisco.com>
@@ -1785,7 +1822,8 @@
 F:	drivers/usb/atm/cxacru.c
 
 CONFIGFS
-M:	Joel Becker <joel.becker@oracle.com>
+M:	Joel Becker <jlbec@evilplan.org>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/configfs.git
 S:	Supported
 F:	fs/configfs/
 F:	include/linux/configfs.h
@@ -1862,7 +1900,7 @@
 W:	http://developer.axis.com
 S:	Maintained
 F:	arch/cris/
-F:	drivers/serial/crisv10.*
+F:	drivers/tty/serial/crisv10.*
 
 CRYPTO API
 M:	Herbert Xu <herbert@gondor.apana.org.au>
@@ -2005,9 +2043,9 @@
 F:	drivers/scsi/dc395x.*
 
 DCCP PROTOCOL
-M:	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+M:	Gerrit Renker <gerrit@erg.abdn.ac.uk>
 L:	dccp@vger.kernel.org
-W:	http://linux-net.osdl.org/index.php/DCCP
+W:	http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
 S:	Maintained
 F:	include/linux/dccp.h
 F:	include/linux/tfrc.h
@@ -2105,6 +2143,7 @@
 F:	fs/dlm/
 
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
+M:	Vinod Koul <vinod.koul@intel.com>
 M:	Dan Williams <dan.j.williams@intel.com>
 S:	Supported
 F:	drivers/dma/
@@ -2201,7 +2240,7 @@
 DZ DECSTATION DZ11 SERIAL DRIVER
 M:	"Maciej W. Rozycki" <macro@linux-mips.org>
 S:	Maintained
-F:	drivers/serial/dz.*
+F:	drivers/tty/serial/dz.*
 
 EATA-DMA SCSI DRIVER
 M:	Michael Neuffer <mike@i-Connect.Net>
@@ -2339,6 +2378,13 @@
 S:	Maintained
 F:	drivers/edac/r82600_edac.c
 
+EDIROL UA-101/UA-1000 DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/usb/misc/ua101.c
+
 EEEPC LAPTOP EXTRAS DRIVER
 M:	Corentin Chary <corentincj@iksaif.net>
 L:	acpi4asus-user@lists.sourceforge.net
@@ -2429,7 +2475,7 @@
 M:	Stephen Hemminger <shemminger@linux-foundation.org>
 L:	bridge@lists.linux-foundation.org
 L:	netdev@vger.kernel.org
-W:	http://www.linux-foundation.org/en/Net:Bridge
+W:	http://www.linuxfoundation.org/en/Net:Bridge
 S:	Maintained
 F:	include/linux/netfilter_bridge/
 F:	net/bridge/
@@ -2621,7 +2667,7 @@
 M:	Timur Tabi <timur@freescale.com>
 L:	linuxppc-dev@lists.ozlabs.org
 S:	Supported
-F:	drivers/serial/ucc_uart.c
+F:	drivers/tty/serial/ucc_uart.c
 
 FREESCALE SOC SOUND DRIVERS
 M:	Timur Tabi <timur@freescale.com>
@@ -2746,6 +2792,15 @@
 F:	drivers/isdn/gigaset/
 F:	include/linux/gigaset_dev.h
 
+GPIO SUBSYSTEM
+M:	Grant Likely <grant.likely@secretlab.ca>
+L:	linux-kernel@vger.kernel.org
+S:	Maintained
+T:	git git://git.secretlab.ca/git/linux-2.6.git
+F:	Documentation/gpio/gpio.txt
+F:	drivers/gpio/
+F:	include/linux/gpio*
+
 GRETH 10/100/1G Ethernet MAC device driver
 M:	Kristoffer Glembo <kristoffer@gaisler.com>
 L:	netdev@vger.kernel.org
@@ -2835,7 +2890,6 @@
 L:	lm-sensors@lm-sensors.org
 W:	http://www.lm-sensors.org/
 T:	quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
-T:	quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:	Maintained
 F:	Documentation/hwmon/
@@ -3113,6 +3167,12 @@
 F:	net/ieee802154/
 F:	drivers/ieee802154/
 
+IKANOS/ADI EAGLE ADSL USB DRIVER
+M:	Matthieu Castet <castet.matthieu@free.fr>
+M:	Stanislaw Gruszka <stf_xl@wp.pl>
+S:	Maintained
+F:	drivers/usb/atm/ueagle-atm.c
+
 INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
 M:	Mimi Zohar <zohar@us.ibm.com>
 S:	Supported
@@ -3124,7 +3184,7 @@
 F:	drivers/video/imsttfb.c
 
 INFINIBAND SUBSYSTEM
-M:	Roland Dreier <rolandd@cisco.com>
+M:	Roland Dreier <roland@kernel.org>
 M:	Sean Hefty <sean.hefty@intel.com>
 M:	Hal Rosenstock <hal.rosenstock@gmail.com>
 L:	linux-rdma@vger.kernel.org
@@ -3301,7 +3361,6 @@
 F:	include/linux/wimax/i2400m.h
 
 INTEL WIRELESS WIFI LINK (iwlwifi)
-M:	Reinette Chatre <reinette.chatre@intel.com>
 M:	Wey-Yi Guy <wey-yi.w.guy@intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 L:	linux-wireless@vger.kernel.org
@@ -3328,7 +3387,7 @@
 M:	Pat Gefre <pfg@sgi.com>
 L:	linux-serial@vger.kernel.org
 S:	Maintained
-F:	drivers/serial/ioc3_serial.c
+F:	drivers/tty/serial/ioc3_serial.c
 
 IP MASQUERADING
 M:	Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
@@ -3470,7 +3529,7 @@
 F:	Documentation/hwmon/jc42
 
 JFS FILESYSTEM
-M:	Dave Kleikamp <shaggy@linux.vnet.ibm.com>
+M:	Dave Kleikamp <shaggy@kernel.org>
 L:	jfs-discussion@lists.sourceforge.net
 W:	http://jfs.sourceforge.net/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
@@ -3505,7 +3564,14 @@
 M:	Breno Leitao <leitao@linux.vnet.ibm.com>
 L:	linux-serial@vger.kernel.org
 S:	Maintained
-F:	drivers/serial/jsm/
+F:	drivers/tty/serial/jsm/
+
+K10TEMP HARDWARE MONITORING DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+F:	Documentation/hwmon/k10temp
+F:	drivers/hwmon/k10temp.c
 
 K8TEMP HARDWARE MONITORING DRIVER
 M:	Rudolf Marek <r.marek@assembler.cz>
@@ -3641,6 +3707,28 @@
 F:	include/keys/
 F:	security/keys/
 
+KEYS-TRUSTED
+M:	David Safford <safford@watson.ibm.com>
+M:	Mimi Zohar <zohar@us.ibm.com>
+L:	linux-security-module@vger.kernel.org
+L:	keyrings@linux-nfs.org
+S:	Supported
+F:	Documentation/keys-trusted-encrypted.txt
+F:	include/keys/trusted-type.h
+F:	security/keys/trusted.c
+F:	security/keys/trusted.h
+
+KEYS-ENCRYPTED
+M:	Mimi Zohar <zohar@us.ibm.com>
+M:	David Safford <safford@watson.ibm.com>
+L:	linux-security-module@vger.kernel.org
+L:	keyrings@linux-nfs.org
+S:	Supported
+F:	Documentation/keys-trusted-encrypted.txt
+F:	include/keys/encrypted-type.h
+F:	security/keys/encrypted.c
+F:	security/keys/encrypted.h
+
 KGDB / KDB /debug_core
 M:	Jason Wessel <jason.wessel@windriver.com>
 W:	http://kgdb.wiki.kernel.org/
@@ -3648,14 +3736,14 @@
 S:	Maintained
 F:	Documentation/DocBook/kgdb.tmpl
 F:	drivers/misc/kgdbts.c
-F:	drivers/serial/kgdboc.c
+F:	drivers/tty/serial/kgdboc.c
 F:	include/linux/kdb.h
 F:	include/linux/kgdb.h
 F:	kernel/debug/
 
 KMEMCHECK
 M:	Vegard Nossum <vegardno@ifi.uio.no>
-M:	Pekka Enberg <penberg@cs.helsinki.fi>
+M:	Pekka Enberg <penberg@kernel.org>
 S:	Maintained
 F:	Documentation/kmemcheck.txt
 F:	arch/x86/include/asm/kmemcheck.h
@@ -4092,9 +4180,8 @@
 F:	kernel/module.c
 
 MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER
-M:	Stelian Pop <stelian@popies.net>
 W:	http://popies.net/meye/
-S:	Maintained
+S:	Orphan
 F:	Documentation/video4linux/meye.txt
 F:	drivers/media/video/meye.*
 F:	include/linux/meye.h
@@ -4205,10 +4292,7 @@
 F:	net/sched/sch_netem.c
 
 NETERION 10GbE DRIVERS (s2io/vxge)
-M:	Ramkrishna Vepa <ramkrishna.vepa@exar.com>
-M:	Sivakumar Subramani <sivakumar.subramani@exar.com>
-M:	Sreenivasa Honnur <sreenivasa.honnur@exar.com>
-M:	Jon Mason <jon.mason@exar.com>
+M:	Jon Mason <jdmason@kudzu.us>
 L:	netdev@vger.kernel.org
 W:	http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
 W:	http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
@@ -4382,11 +4466,11 @@
 F:	drivers/scsi/nsp32*
 
 NTFS FILESYSTEM
-M:	Anton Altaparmakov <aia21@cantab.net>
+M:	Anton Altaparmakov <anton@tuxera.com>
 L:	linux-ntfs-dev@lists.sourceforge.net
-W:	http://www.linux-ntfs.org/
+W:	http://www.tuxera.com/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
-S:	Maintained
+S:	Supported
 F:	Documentation/filesystems/ntfs.txt
 F:	fs/ntfs/
 
@@ -4530,7 +4614,7 @@
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:	Grant Likely <grant.likely@secretlab.ca>
-L:	devicetree-discuss@lists.ozlabs.org
+L:	devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
 W:	http://fdt.secretlab.ca
 T:	git git://git.secretlab.ca/git/linux-2.6.git
 S:	Maintained
@@ -4538,6 +4622,13 @@
 F:	include/linux/of*.h
 K:	of_get_property
 
+OPL4 DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/drivers/opl4/
+
 OPROFILE
 M:	Robert Richter <robert.richter@amd.com>
 L:	oprofile-list@lists.sf.net
@@ -4549,7 +4640,7 @@
 
 ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
 M:	Mark Fasheh <mfasheh@suse.com>
-M:	Joel Becker <joel.becker@oracle.com>
+M:	Joel Becker <jlbec@evilplan.org>
 L:	ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
 W:	http://oss.oracle.com/projects/ocfs2/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
@@ -4634,7 +4725,7 @@
 M:	Chris Wright <chrisw@sous-sol.org>
 M:	Alok Kataria <akataria@vmware.com>
 M:	Rusty Russell <rusty@rustcorp.com.au>
-L:	virtualization@lists.osdl.org
+L:	virtualization@lists.linux-foundation.org
 S:	Supported
 F:	Documentation/ia64/paravirt_ops.txt
 F:	arch/*/kernel/paravirt*
@@ -5087,6 +5178,7 @@
 
 RAPIDIO SUBSYSTEM
 M:	Matt Porter <mporter@kernel.crashing.org>
+M:	Alexandre Bounine <alexandre.bounine@idt.com>
 S:	Maintained
 F:	drivers/rapidio/
 
@@ -5132,11 +5224,6 @@
 F:	kernel/srcu*
 X:	kernel/rcutorture.c
 
-REAL TIME CLOCK DRIVER (LEGACY)
-M:	Paul Gortmaker <p_gortmaker@yahoo.com>
-S:	Maintained
-F:	drivers/char/rtc.c
-
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:	Alessandro Zummo <a.zummo@towertech.it>
 L:	rtc-linux@googlegroups.com
@@ -5194,7 +5281,7 @@
 F:	drivers/net/wireless/rtl818x/rtl8180/
 
 RTL8187 WIRELESS DRIVER
-M:	Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M:	Herton Ronaldo Krzesinski <herton@canonical.com>
 M:	Hin-Tak Leung <htl10@users.sourceforge.net>
 M:	Larry Finger <Larry.Finger@lwfinger.net>
 L:	linux-wireless@vger.kernel.org
@@ -5242,8 +5329,7 @@
 F:	drivers/s390/net/
 
 S390 ZCRYPT DRIVER
-M:	Felix Beck <felix.beck@de.ibm.com>
-M:	Ralph Wuerthner <ralph.wuerthner@de.ibm.com>
+M:	Holger Dengler <hd@linux.vnet.ibm.com>
 M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
@@ -5289,7 +5375,7 @@
 M:	Jassi Brar <jassi.brar@samsung.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:	Supported
-F:	sound/soc/s3c24xx
+F:	sound/soc/samsung
 
 TIMEKEEPING, NTP
 M:	John Stultz <johnstul@us.ibm.com>
@@ -5489,12 +5575,11 @@
 F:	drivers/scsi/be2iscsi/
 
 SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
-M:	Sathya Perla <sathyap@serverengines.com>
-M:	Subbu Seetharaman <subbus@serverengines.com>
-M:	Sarveshwar Bandi <sarveshwarb@serverengines.com>
-M:	Ajit Khaparde <ajitk@serverengines.com>
+M:	Sathya Perla <sathya.perla@emulex.com>
+M:	Subbu Seetharaman <subbu.seetharaman@emulex.com>
+M:	Ajit Khaparde <ajit.khaparde@emulex.com>
 L:	netdev@vger.kernel.org
-W:	http://www.serverengines.com
+W:	http://www.emulex.com
 S:	Supported
 F:	drivers/net/benet/
 
@@ -5516,7 +5601,7 @@
 L:	linux-ia64@vger.kernel.org
 S:	Supported
 F:	Documentation/ia64/serial.txt
-F:	drivers/serial/ioc?_serial.c
+F:	drivers/tty/serial/ioc?_serial.c
 F:	include/linux/ioc?.h
 
 SGI VISUAL WORKSTATION 320 AND 540
@@ -5538,7 +5623,7 @@
 S:	Maintained
 F:	Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
 F:	arch/arm/mach-lh7a40x/
-F:	drivers/serial/serial_lh7a40x.c
+F:	drivers/tty/serial/serial_lh7a40x.c
 F:	drivers/usb/gadget/lh7a40*
 F:	drivers/usb/host/ohci-lh7a40*
 
@@ -5554,18 +5639,20 @@
 
 SIMTEC EB110ATX (Chalice CATS)
 P:	Ben Dooks
-M:	Vincent Sanders <support@simtec.co.uk>
+P:	Vincent Sanders <vince@simtec.co.uk>
+M:	Simtec Linux Team <linux@simtec.co.uk>
 W:	http://www.simtec.co.uk/products/EB110ATX/
 S:	Supported
 
 SIMTEC EB2410ITX (BAST)
 P:	Ben Dooks
-M:	Vincent Sanders <support@simtec.co.uk>
+P:	Vincent Sanders <vince@simtec.co.uk>
+M:	Simtec Linux Team <linux@simtec.co.uk>
 W:	http://www.simtec.co.uk/products/EB2410ITX/
 S:	Supported
-F:	arch/arm/mach-s3c2410/
-F:	drivers/*/*s3c2410*
-F:	drivers/*/*/*s3c2410*
+F:	arch/arm/mach-s3c2410/mach-bast.c
+F:	arch/arm/mach-s3c2410/bast-ide.c
+F:	arch/arm/mach-s3c2410/bast-irq.c
 
 TI DAVINCI MACHINE SUPPORT
 M:	Kevin Hilman <khilman@deeprootsystems.com>
@@ -5617,7 +5704,7 @@
 
 SLAB ALLOCATOR
 M:	Christoph Lameter <cl@linux-foundation.org>
-M:	Pekka Enberg <penberg@cs.helsinki.fi>
+M:	Pekka Enberg <penberg@kernel.org>
 M:	Matt Mackall <mpm@selenic.com>
 L:	linux-mm@kvack.org
 S:	Maintained
@@ -5758,14 +5845,14 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
 S:	Maintained
-F:	drivers/serial/suncore.c
-F:	drivers/serial/suncore.h
-F:	drivers/serial/sunhv.c
-F:	drivers/serial/sunsab.c
-F:	drivers/serial/sunsab.h
-F:	drivers/serial/sunsu.c
-F:	drivers/serial/sunzilog.c
-F:	drivers/serial/sunzilog.h
+F:	drivers/tty/serial/suncore.c
+F:	drivers/tty/serial/suncore.h
+F:	drivers/tty/serial/sunhv.c
+F:	drivers/tty/serial/sunsab.c
+F:	drivers/tty/serial/sunsab.h
+F:	drivers/tty/serial/sunsu.c
+F:	drivers/tty/serial/sunzilog.c
+F:	drivers/tty/serial/sunzilog.h
 
 SPEAR PLATFORM SUPPORT
 M:	Viresh Kumar <viresh.kumar@st.com>
@@ -6032,7 +6119,7 @@
 F:	security/tomoyo/
 
 TOPSTAR LAPTOP EXTRAS DRIVER
-M:	Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M:	Herton Ronaldo Krzesinski <herton@canonical.com>
 L:	platform-driver-x86@vger.kernel.org
 S:	Maintained
 F:	drivers/platform/x86/topstar-laptop.c
@@ -6095,8 +6182,8 @@
 M:	Greg Kroah-Hartman <gregkh@suse.de>
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
-F:	drivers/char/tty_*
-F:	drivers/serial/serial_core.c
+F:	drivers/tty/*
+F:	drivers/tty/serial/serial_core.c
 F:	include/linux/serial_core.h
 F:	include/linux/serial.h
 F:	include/linux/tty.h
@@ -6297,6 +6384,13 @@
 W:	http://www.one-eyed-alien.net/~mdharm/linux-usb/
 F:	drivers/usb/storage/
 
+USB MIDI DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/usb/midi.*
+
 USB OHCI DRIVER
 M:	David Brownell <dbrownell@users.sourceforge.net>
 L:	linux-usb@vger.kernel.org
@@ -6533,10 +6627,20 @@
 F:	drivers/char/virtio_console.c
 F:	include/linux/virtio_console.h
 
+VIRTIO CORE, NET AND BLOCK DRIVERS
+M:	Rusty Russell <rusty@rustcorp.com.au>
+M:	"Michael S. Tsirkin" <mst@redhat.com>
+L:	virtualization@lists.linux-foundation.org
+S:	Maintained
+F:	drivers/virtio/
+F:	drivers/net/virtio_net.c
+F:	drivers/block/virtio_blk.c
+F:	include/linux/virtio_*.h
+
 VIRTIO HOST (VHOST)
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 L:	kvm@vger.kernel.org
-L:	virtualization@lists.osdl.org
+L:	virtualization@lists.linux-foundation.org
 L:	netdev@vger.kernel.org
 S:	Maintained
 F:	drivers/vhost/
@@ -6555,13 +6659,12 @@
 F:	drivers/i2c/busses/i2c-viapro.c
 
 VIA SD/MMC CARD CONTROLLER DRIVER
-M:	Joseph Chan <JosephChan@via.com.tw>
+M:	Bruce Chang <brucechang@via.com.tw>
 M:	Harald Welte <HaraldWelte@viatech.com>
 S:	Maintained
 F:	drivers/mmc/host/via-sdmmc.c
 
 VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
-M:	Joseph Chan <JosephChan@via.com.tw>
 M:	Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
 L:	linux-fbdev@vger.kernel.org
 S:	Maintained
@@ -6586,7 +6689,7 @@
 
 VLYNQ BUS
 M:	Florian Fainelli <florian@openwrt.org>
-L:	openwrt-devel@lists.openwrt.org
+L:	openwrt-devel@lists.openwrt.org (subscribers-only)
 S:	Maintained
 F:	drivers/vlynq/vlynq.c
 F:	include/linux/vlynq.h
@@ -6707,12 +6810,12 @@
 F:	drivers/net/wireless/wl1251/*
 
 WL1271 WIRELESS DRIVER
-M:	Luciano Coelho <luciano.coelho@nokia.com>
+M:	Luciano Coelho <coelho@ti.com>
 L:	linux-wireless@vger.kernel.org
-W:	http://wireless.kernel.org
+W:	http://wireless.kernel.org/en/users/Drivers/wl12xx
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
 S:	Maintained
-F:	drivers/net/wireless/wl12xx/wl1271*
+F:	drivers/net/wireless/wl12xx/
 F:	include/linux/wl12xx.h
 
 WL3501 WIRELESS PCMCIA CARD DRIVER
@@ -6806,7 +6909,7 @@
 M:	Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
-L:	virtualization@lists.osdl.org
+L:	virtualization@lists.linux-foundation.org
 S:	Supported
 F:	arch/x86/xen/
 F:	drivers/*/xen-*front.c
@@ -6835,7 +6938,7 @@
 M:	Peter Korsgaard <jacmet@sunsite.dk>
 L:	linux-serial@vger.kernel.org
 S:	Maintained
-F:	drivers/serial/uartlite.c
+F:	drivers/tty/serial/uartlite.c
 
 YAM DRIVER FOR AX.25
 M:	Jean-Paul Roubelat <jpr@f6fbb.org>
@@ -6881,7 +6984,7 @@
 ZS DECSTATION Z85C30 SERIAL DRIVER
 M:	"Maciej W. Rozycki" <macro@linux-mips.org>
 S:	Maintained
-F:	drivers/serial/zs.*
+F:	drivers/tty/serial/zs.*
 
 GRE DEMULTIPLEXER DRIVER
 M:	Dmitry Kozlov <xeb@mail.ru>
diff --git a/Makefile b/Makefile
index 6a45769..d6592b6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 2
 PATCHLEVEL = 6
-SUBLEVEL = 37
+SUBLEVEL = 38
 EXTRAVERSION =
 NAME = Flesh-Eating Bats with Fangs
 
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 943fe69..cc31bec 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -8,6 +8,10 @@
 	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
 	select HAVE_DMA_ATTRS
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select AUTO_IRQ_AFFINITY if SMP
+	select GENERIC_HARDIRQS_NO_DEPRECATED
 	help
 	  The Alpha is a 64-bit general-purpose processor designed and
 	  marketed by the Digital Equipment Corporation of blessed memory,
@@ -68,19 +72,6 @@
 	bool
 	default n
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_IRQ_PROBE
-	bool
-	default y
-
-config AUTO_IRQ_AFFINITY
-	bool
-	depends on SMP
-	default y
-
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index eda9b90..56ff965 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -37,8 +37,9 @@
  */
 extern inline void __set_hae(unsigned long new_hae)
 {
-	unsigned long flags;
-	local_irq_save(flags);
+	unsigned long flags = swpipl(IPL_MAX);
+
+	barrier();
 
 	alpha_mv.hae_cache = new_hae;
 	*alpha_mv.hae_register = new_hae;
@@ -46,7 +47,8 @@
 	/* Re-read to make sure it was written.  */
 	new_hae = *alpha_mv.hae_register;
 
-	local_irq_restore(flags);
+	setipl(flags);
+	barrier();
 }
 
 extern inline void set_hae(unsigned long new_hae)
diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h
index 99c56d4..72db984 100644
--- a/arch/alpha/include/asm/mman.h
+++ b/arch/alpha/include/asm/mman.h
@@ -53,6 +53,9 @@
 #define MADV_MERGEABLE   12		/* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 13		/* KSM may not merge identical pages */
 
+#define MADV_HUGEPAGE	14		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	15		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 1ee9b5b..9bb7b858 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -3,8 +3,8 @@
 #
 
 extra-y		:= head.o vmlinux.lds
-EXTRA_AFLAGS	:= $(KBUILD_CFLAGS)
-EXTRA_CFLAGS	:= -Werror -Wno-sign-compare
+asflags-y	:= $(KBUILD_CFLAGS)
+ccflags-y	:= -Werror -Wno-sign-compare
 
 obj-y    := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
 	    irq_alpha.o signal.o setup.o ptrace.o time.o \
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index fe91298..a19d600 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -44,10 +44,16 @@
 
 int irq_select_affinity(unsigned int irq)
 {
+	struct irq_data *data = irq_get_irq_data(irq);
+	struct irq_chip *chip;
 	static int last_cpu;
 	int cpu = last_cpu + 1;
 
-	if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
+	if (!data)
+		return 1;
+	chip = irq_data_get_irq_chip(data);
+
+	if (!chip->irq_set_affinity || irq_user_affinity[irq])
 		return 1;
 
 	while (!cpu_possible(cpu) ||
@@ -55,8 +61,8 @@
 		cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
 	last_cpu = cpu;
 
-	cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
-	irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
+	cpumask_copy(data->affinity, cpumask_of(cpu));
+	chip->irq_set_affinity(data, cpumask_of(cpu), false);
 	return 0;
 }
 #endif /* CONFIG_SMP */
@@ -67,6 +73,7 @@
 	int j;
 	int irq = *(loff_t *) v;
 	struct irqaction * action;
+	struct irq_desc *desc;
 	unsigned long flags;
 
 #ifdef CONFIG_SMP
@@ -79,8 +86,13 @@
 #endif
 
 	if (irq < ACTUAL_NR_IRQS) {
-		raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
-		action = irq_desc[irq].action;
+		desc = irq_to_desc(irq);
+
+		if (!desc)
+			return 0;
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+		action = desc->action;
 		if (!action) 
 			goto unlock;
 		seq_printf(p, "%3d: ", irq);
@@ -90,7 +102,7 @@
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j));
 #endif
-		seq_printf(p, " %14s", irq_desc[irq].chip->name);
+		seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
 		seq_printf(p, "  %c%s",
 			(action->flags & IRQF_DISABLED)?'+':' ',
 			action->name);
@@ -103,7 +115,7 @@
 
 		seq_putc(p, '\n');
 unlock:
-		raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	} else if (irq == ACTUAL_NR_IRQS) {
 #ifdef CONFIG_SMP
 		seq_puts(p, "IPI: ");
@@ -142,8 +154,10 @@
 	 * handled by some other CPU. (or is disabled)
 	 */
 	static unsigned int illegal_count=0;
+	struct irq_desc *desc = irq_to_desc(irq);
 	
-	if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
+	if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS &&
+	    illegal_count < MAX_ILLEGAL_IRQS)) {
 		irq_err_count++;
 		illegal_count++;
 		printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
@@ -151,14 +165,14 @@
 		return;
 	}
 
-	irq_enter();
 	/*
-	 * __do_IRQ() must be called with IPL_MAX. Note that we do not
+	 * From here we must proceed with IPL_MAX. Note that we do not
 	 * explicitly enable interrupts afterwards - some MILO PALcode
 	 * (namely LX164 one) seems to have severe problems with RTI
 	 * at IPL 0.
 	 */
 	local_irq_disable();
-	__do_IRQ(irq);
+	irq_enter();
+	generic_handle_irq_desc(irq, desc);
 	irq_exit();
 }
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 4c8bb37..411ca11 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -219,30 +219,17 @@
  * processed by PALcode, and comes in via entInt vector 1.
  */
 
-static void rtc_enable_disable(unsigned int irq) { }
-static unsigned int rtc_startup(unsigned int irq) { return 0; }
-
 struct irqaction timer_irqaction = {
 	.handler	= timer_interrupt,
 	.flags		= IRQF_DISABLED,
 	.name		= "timer",
 };
 
-static struct irq_chip rtc_irq_type = {
-	.name		= "RTC",
-	.startup	= rtc_startup,
-	.shutdown	= rtc_enable_disable,
-	.enable		= rtc_enable_disable,
-	.disable	= rtc_enable_disable,
-	.ack		= rtc_enable_disable,
-	.end		= rtc_enable_disable,
-};
-
 void __init
 init_rtc_irq(void)
 {
-	irq_desc[RTC_IRQ].status = IRQ_DISABLED;
-	irq_desc[RTC_IRQ].chip = &rtc_irq_type;
+	set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+				      handle_simple_irq, "RTC");
 	setup_irq(RTC_IRQ, &timer_irqaction);
 }
 
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 83a9ac2..c7cc981 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -33,10 +33,10 @@
 }
 
 inline void
-i8259a_enable_irq(unsigned int irq)
+i8259a_enable_irq(struct irq_data *d)
 {
 	spin_lock(&i8259_irq_lock);
-	i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+	i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
 	spin_unlock(&i8259_irq_lock);
 }
 
@@ -47,16 +47,18 @@
 }
 
 void
-i8259a_disable_irq(unsigned int irq)
+i8259a_disable_irq(struct irq_data *d)
 {
 	spin_lock(&i8259_irq_lock);
-	__i8259a_disable_irq(irq);
+	__i8259a_disable_irq(d->irq);
 	spin_unlock(&i8259_irq_lock);
 }
 
 void
-i8259a_mask_and_ack_irq(unsigned int irq)
+i8259a_mask_and_ack_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	spin_lock(&i8259_irq_lock);
 	__i8259a_disable_irq(irq);
 
@@ -69,28 +71,11 @@
 	spin_unlock(&i8259_irq_lock);
 }
 
-unsigned int
-i8259a_startup_irq(unsigned int irq)
-{
-	i8259a_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-void
-i8259a_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		i8259a_enable_irq(irq);
-}
-
 struct irq_chip i8259a_irq_type = {
 	.name		= "XT-PIC",
-	.startup	= i8259a_startup_irq,
-	.shutdown	= i8259a_disable_irq,
-	.enable		= i8259a_enable_irq,
-	.disable	= i8259a_disable_irq,
-	.ack		= i8259a_mask_and_ack_irq,
-	.end		= i8259a_end_irq,
+	.irq_unmask	= i8259a_enable_irq,
+	.irq_mask	= i8259a_disable_irq,
+	.irq_mask_ack	= i8259a_mask_and_ack_irq,
 };
 
 void __init
@@ -107,8 +92,7 @@
 	outb(0xff, 0xA1);	/* mask all of 8259A-2 */
 
 	for (i = 0; i < 16; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].chip = &i8259a_irq_type;
+		set_irq_chip_and_handler(i, &i8259a_irq_type, handle_level_irq);
 	}
 
 	setup_irq(2, &cascade);
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h
index b63ccd7..d507a23 100644
--- a/arch/alpha/kernel/irq_impl.h
+++ b/arch/alpha/kernel/irq_impl.h
@@ -31,11 +31,9 @@
 
 extern void common_init_isa_dma(void);
 
-extern void i8259a_enable_irq(unsigned int);
-extern void i8259a_disable_irq(unsigned int);
-extern void i8259a_mask_and_ack_irq(unsigned int);
-extern unsigned int i8259a_startup_irq(unsigned int);
-extern void i8259a_end_irq(unsigned int);
+extern void i8259a_enable_irq(struct irq_data *d);
+extern void i8259a_disable_irq(struct irq_data *d);
+extern void i8259a_mask_and_ack_irq(struct irq_data *d);
 extern struct irq_chip i8259a_irq_type;
 extern void init_i8259a_irqs(void);
 
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index 989ce46..b30227f 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -29,35 +29,21 @@
 }
 
 static inline void
-pyxis_enable_irq(unsigned int irq)
+pyxis_enable_irq(struct irq_data *d)
 {
-	pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+	pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-pyxis_disable_irq(unsigned int irq)
+pyxis_disable_irq(struct irq_data *d)
 {
-	pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
-}
-
-static unsigned int
-pyxis_startup_irq(unsigned int irq)
-{
-	pyxis_enable_irq(irq);
-	return 0;
+	pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static void
-pyxis_end_irq(unsigned int irq)
+pyxis_mask_and_ack_irq(struct irq_data *d)
 {
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		pyxis_enable_irq(irq);
-}
-
-static void
-pyxis_mask_and_ack_irq(unsigned int irq)
-{
-	unsigned long bit = 1UL << (irq - 16);
+	unsigned long bit = 1UL << (d->irq - 16);
 	unsigned long mask = cached_irq_mask &= ~bit;
 
 	/* Disable the interrupt.  */
@@ -72,12 +58,9 @@
 
 static struct irq_chip pyxis_irq_type = {
 	.name		= "PYXIS",
-	.startup	= pyxis_startup_irq,
-	.shutdown	= pyxis_disable_irq,
-	.enable		= pyxis_enable_irq,
-	.disable	= pyxis_disable_irq,
-	.ack		= pyxis_mask_and_ack_irq,
-	.end		= pyxis_end_irq,
+	.irq_mask_ack	= pyxis_mask_and_ack_irq,
+	.irq_mask	= pyxis_disable_irq,
+	.irq_unmask	= pyxis_enable_irq,
 };
 
 void 
@@ -119,8 +102,8 @@
 	for (i = 16; i < 48; ++i) {
 		if ((ignore_mask >> i) & 1)
 			continue;
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &pyxis_irq_type;
+		set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 
 	setup_irq(16+7, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index d63e93e..82a47bb 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -18,44 +18,27 @@
 DEFINE_SPINLOCK(srm_irq_lock);
 
 static inline void
-srm_enable_irq(unsigned int irq)
+srm_enable_irq(struct irq_data *d)
 {
 	spin_lock(&srm_irq_lock);
-	cserve_ena(irq - 16);
+	cserve_ena(d->irq - 16);
 	spin_unlock(&srm_irq_lock);
 }
 
 static void
-srm_disable_irq(unsigned int irq)
+srm_disable_irq(struct irq_data *d)
 {
 	spin_lock(&srm_irq_lock);
-	cserve_dis(irq - 16);
+	cserve_dis(d->irq - 16);
 	spin_unlock(&srm_irq_lock);
 }
 
-static unsigned int
-srm_startup_irq(unsigned int irq)
-{
-	srm_enable_irq(irq);
-	return 0;
-}
-
-static void
-srm_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		srm_enable_irq(irq);
-}
-
 /* Handle interrupts from the SRM, assuming no additional weirdness.  */
 static struct irq_chip srm_irq_type = {
 	.name		= "SRM",
-	.startup	= srm_startup_irq,
-	.shutdown	= srm_disable_irq,
-	.enable		= srm_enable_irq,
-	.disable	= srm_disable_irq,
-	.ack		= srm_disable_irq,
-	.end		= srm_end_irq,
+	.irq_unmask	= srm_enable_irq,
+	.irq_mask	= srm_disable_irq,
+	.irq_mask_ack	= srm_disable_irq,
 };
 
 void __init
@@ -68,8 +51,8 @@
 	for (i = 16; i < max; ++i) {
 		if (i < 64 && ((ignore_mask >> i) & 1))
 			continue;
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &srm_irq_type;
+		set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 }
 
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 547e8b8..fe698b5 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -951,9 +951,6 @@
 	return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0);
 }
 
-#define MAX_SELECT_SECONDS \
-	((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
-
 SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
 		fd_set __user *, exp, struct timeval32 __user *, tvp)
 {
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 20a30b8..88d95e8 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -44,59 +44,42 @@
 }
 
 static inline void
-alcor_enable_irq(unsigned int irq)
+alcor_enable_irq(struct irq_data *d)
 {
-	alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+	alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-alcor_disable_irq(unsigned int irq)
+alcor_disable_irq(struct irq_data *d)
 {
-	alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+	alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static void
-alcor_mask_and_ack_irq(unsigned int irq)
+alcor_mask_and_ack_irq(struct irq_data *d)
 {
-	alcor_disable_irq(irq);
+	alcor_disable_irq(d);
 
 	/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
-	*(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
+	*(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
 	*(vuip)GRU_INT_CLEAR = 0; mb();
 }
 
-static unsigned int
-alcor_startup_irq(unsigned int irq)
-{
-	alcor_enable_irq(irq);
-	return 0;
-}
-
 static void
-alcor_isa_mask_and_ack_irq(unsigned int irq)
+alcor_isa_mask_and_ack_irq(struct irq_data *d)
 {
-	i8259a_mask_and_ack_irq(irq);
+	i8259a_mask_and_ack_irq(d);
 
 	/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
 	*(vuip)GRU_INT_CLEAR = 0x80000000; mb();
 	*(vuip)GRU_INT_CLEAR = 0; mb();
 }
 
-static void
-alcor_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		alcor_enable_irq(irq);
-}
-
 static struct irq_chip alcor_irq_type = {
 	.name		= "ALCOR",
-	.startup	= alcor_startup_irq,
-	.shutdown	= alcor_disable_irq,
-	.enable		= alcor_enable_irq,
-	.disable	= alcor_disable_irq,
-	.ack		= alcor_mask_and_ack_irq,
-	.end		= alcor_end_irq,
+	.irq_unmask	= alcor_enable_irq,
+	.irq_mask	= alcor_disable_irq,
+	.irq_mask_ack	= alcor_mask_and_ack_irq,
 };
 
 static void
@@ -142,10 +125,10 @@
 		   on while IRQ probing.  */
 		if (i >= 16+20 && i <= 16+30)
 			continue;
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &alcor_irq_type;
+		set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
-	i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
+	i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
 
 	init_i8259a_irqs();
 	common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 14c8898..57eb630 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -46,39 +46,22 @@
 }
 
 static inline void
-cabriolet_enable_irq(unsigned int irq)
+cabriolet_enable_irq(struct irq_data *d)
 {
-	cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
+	cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
 }
 
 static void
-cabriolet_disable_irq(unsigned int irq)
+cabriolet_disable_irq(struct irq_data *d)
 {
-	cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
-}
-
-static unsigned int
-cabriolet_startup_irq(unsigned int irq)
-{ 
-	cabriolet_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-cabriolet_end_irq(unsigned int irq)
-{ 
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		cabriolet_enable_irq(irq);
+	cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
 }
 
 static struct irq_chip cabriolet_irq_type = {
 	.name		= "CABRIOLET",
-	.startup	= cabriolet_startup_irq,
-	.shutdown	= cabriolet_disable_irq,
-	.enable		= cabriolet_enable_irq,
-	.disable	= cabriolet_disable_irq,
-	.ack		= cabriolet_disable_irq,
-	.end		= cabriolet_end_irq,
+	.irq_unmask	= cabriolet_enable_irq,
+	.irq_mask	= cabriolet_disable_irq,
+	.irq_mask_ack	= cabriolet_disable_irq,
 };
 
 static void 
@@ -122,8 +105,9 @@
 		outb(0xff, 0x806);
 
 		for (i = 16; i < 35; ++i) {
-			irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-			irq_desc[i].chip = &cabriolet_irq_type;
+			set_irq_chip_and_handler(i, &cabriolet_irq_type,
+				handle_level_irq);
+			irq_set_status_flags(i, IRQ_LEVEL);
 		}
 	}
 
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 4026502..481df4e 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -98,67 +98,39 @@
 }
 
 static void
-dp264_enable_irq(unsigned int irq)
+dp264_enable_irq(struct irq_data *d)
 {
 	spin_lock(&dp264_irq_lock);
-	cached_irq_mask |= 1UL << irq;
+	cached_irq_mask |= 1UL << d->irq;
 	tsunami_update_irq_hw(cached_irq_mask);
 	spin_unlock(&dp264_irq_lock);
 }
 
 static void
-dp264_disable_irq(unsigned int irq)
+dp264_disable_irq(struct irq_data *d)
 {
 	spin_lock(&dp264_irq_lock);
-	cached_irq_mask &= ~(1UL << irq);
-	tsunami_update_irq_hw(cached_irq_mask);
-	spin_unlock(&dp264_irq_lock);
-}
-
-static unsigned int
-dp264_startup_irq(unsigned int irq)
-{ 
-	dp264_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-dp264_end_irq(unsigned int irq)
-{ 
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		dp264_enable_irq(irq);
-}
-
-static void
-clipper_enable_irq(unsigned int irq)
-{
-	spin_lock(&dp264_irq_lock);
-	cached_irq_mask |= 1UL << (irq - 16);
+	cached_irq_mask &= ~(1UL << d->irq);
 	tsunami_update_irq_hw(cached_irq_mask);
 	spin_unlock(&dp264_irq_lock);
 }
 
 static void
-clipper_disable_irq(unsigned int irq)
+clipper_enable_irq(struct irq_data *d)
 {
 	spin_lock(&dp264_irq_lock);
-	cached_irq_mask &= ~(1UL << (irq - 16));
+	cached_irq_mask |= 1UL << (d->irq - 16);
 	tsunami_update_irq_hw(cached_irq_mask);
 	spin_unlock(&dp264_irq_lock);
 }
 
-static unsigned int
-clipper_startup_irq(unsigned int irq)
-{ 
-	clipper_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
 static void
-clipper_end_irq(unsigned int irq)
-{ 
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		clipper_enable_irq(irq);
+clipper_disable_irq(struct irq_data *d)
+{
+	spin_lock(&dp264_irq_lock);
+	cached_irq_mask &= ~(1UL << (d->irq - 16));
+	tsunami_update_irq_hw(cached_irq_mask);
+	spin_unlock(&dp264_irq_lock);
 }
 
 static void
@@ -177,10 +149,11 @@
 }
 
 static int
-dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
-{ 
+dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
+		   bool force)
+{
 	spin_lock(&dp264_irq_lock);
-	cpu_set_irq_affinity(irq, *affinity);
+	cpu_set_irq_affinity(d->irq, *affinity);
 	tsunami_update_irq_hw(cached_irq_mask);
 	spin_unlock(&dp264_irq_lock);
 
@@ -188,10 +161,11 @@
 }
 
 static int
-clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
-{ 
+clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
+		     bool force)
+{
 	spin_lock(&dp264_irq_lock);
-	cpu_set_irq_affinity(irq - 16, *affinity);
+	cpu_set_irq_affinity(d->irq - 16, *affinity);
 	tsunami_update_irq_hw(cached_irq_mask);
 	spin_unlock(&dp264_irq_lock);
 
@@ -199,25 +173,19 @@
 }
 
 static struct irq_chip dp264_irq_type = {
-	.name		= "DP264",
-	.startup	= dp264_startup_irq,
-	.shutdown	= dp264_disable_irq,
-	.enable		= dp264_enable_irq,
-	.disable	= dp264_disable_irq,
-	.ack		= dp264_disable_irq,
-	.end		= dp264_end_irq,
-	.set_affinity	= dp264_set_affinity,
+	.name			= "DP264",
+	.irq_unmask		= dp264_enable_irq,
+	.irq_mask		= dp264_disable_irq,
+	.irq_mask_ack		= dp264_disable_irq,
+	.irq_set_affinity	= dp264_set_affinity,
 };
 
 static struct irq_chip clipper_irq_type = {
-	.name		= "CLIPPER",
-	.startup	= clipper_startup_irq,
-	.shutdown	= clipper_disable_irq,
-	.enable		= clipper_enable_irq,
-	.disable	= clipper_disable_irq,
-	.ack		= clipper_disable_irq,
-	.end		= clipper_end_irq,
-	.set_affinity	= clipper_set_affinity,
+	.name			= "CLIPPER",
+	.irq_unmask		= clipper_enable_irq,
+	.irq_mask		= clipper_disable_irq,
+	.irq_mask_ack		= clipper_disable_irq,
+	.irq_set_affinity	= clipper_set_affinity,
 };
 
 static void
@@ -302,8 +270,8 @@
 {
 	long i;
 	for (i = imin; i <= imax; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = ops;
+		set_irq_chip_and_handler(i, ops, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 }
 
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index df2090c..402e908 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -44,39 +44,22 @@
 }
 
 static inline void
-eb64p_enable_irq(unsigned int irq)
+eb64p_enable_irq(struct irq_data *d)
 {
-	eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+	eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
 }
 
 static void
-eb64p_disable_irq(unsigned int irq)
+eb64p_disable_irq(struct irq_data *d)
 {
-	eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
-}
-
-static unsigned int
-eb64p_startup_irq(unsigned int irq)
-{
-	eb64p_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-eb64p_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		eb64p_enable_irq(irq);
+	eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
 }
 
 static struct irq_chip eb64p_irq_type = {
 	.name		= "EB64P",
-	.startup	= eb64p_startup_irq,
-	.shutdown	= eb64p_disable_irq,
-	.enable		= eb64p_enable_irq,
-	.disable	= eb64p_disable_irq,
-	.ack		= eb64p_disable_irq,
-	.end		= eb64p_end_irq,
+	.irq_unmask	= eb64p_enable_irq,
+	.irq_mask	= eb64p_disable_irq,
+	.irq_mask_ack	= eb64p_disable_irq,
 };
 
 static void 
@@ -135,9 +118,9 @@
 	init_i8259a_irqs();
 
 	for (i = 16; i < 32; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &eb64p_irq_type;
-	}		
+		set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
+	}
 
 	common_init_isa_dma();
 	setup_irq(16+5, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 3ca1dbc..0b44a54 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -51,43 +51,28 @@
 }
 
 static inline void
-eiger_enable_irq(unsigned int irq)
+eiger_enable_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	unsigned long mask;
 	mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
 	eiger_update_irq_hw(irq, mask);
 }
 
 static void
-eiger_disable_irq(unsigned int irq)
+eiger_disable_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	unsigned long mask;
 	mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
 	eiger_update_irq_hw(irq, mask);
 }
 
-static unsigned int
-eiger_startup_irq(unsigned int irq)
-{
-	eiger_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-eiger_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		eiger_enable_irq(irq);
-}
-
 static struct irq_chip eiger_irq_type = {
 	.name		= "EIGER",
-	.startup	= eiger_startup_irq,
-	.shutdown	= eiger_disable_irq,
-	.enable		= eiger_enable_irq,
-	.disable	= eiger_disable_irq,
-	.ack		= eiger_disable_irq,
-	.end		= eiger_end_irq,
+	.irq_unmask	= eiger_enable_irq,
+	.irq_mask	= eiger_disable_irq,
+	.irq_mask_ack	= eiger_disable_irq,
 };
 
 static void
@@ -153,8 +138,8 @@
 	init_i8259a_irqs();
 
 	for (i = 16; i < 128; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &eiger_irq_type;
+		set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 }
 
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 7a7ae36..00341b7 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -62,70 +62,35 @@
  * world.
  */
 
-static unsigned int
-jensen_local_startup(unsigned int irq)
+static void
+jensen_local_enable(struct irq_data *d)
 {
 	/* the parport is really hw IRQ 1, silly Jensen.  */
-	if (irq == 7)
-		i8259a_startup_irq(1);
-	else
-		/*
-		 * For all true local interrupts, set the flag that prevents
-		 * the IPL from being dropped during handler processing.
-		 */
-		if (irq_desc[irq].action)
-			irq_desc[irq].action->flags |= IRQF_DISABLED;
-	return 0;
+	if (d->irq == 7)
+		i8259a_enable_irq(d);
 }
 
 static void
-jensen_local_shutdown(unsigned int irq)
+jensen_local_disable(struct irq_data *d)
 {
 	/* the parport is really hw IRQ 1, silly Jensen.  */
-	if (irq == 7)
-		i8259a_disable_irq(1);
+	if (d->irq == 7)
+		i8259a_disable_irq(d);
 }
 
 static void
-jensen_local_enable(unsigned int irq)
+jensen_local_mask_ack(struct irq_data *d)
 {
 	/* the parport is really hw IRQ 1, silly Jensen.  */
-	if (irq == 7)
-		i8259a_enable_irq(1);
-}
-
-static void
-jensen_local_disable(unsigned int irq)
-{
-	/* the parport is really hw IRQ 1, silly Jensen.  */
-	if (irq == 7)
-		i8259a_disable_irq(1);
-}
-
-static void
-jensen_local_ack(unsigned int irq)
-{
-	/* the parport is really hw IRQ 1, silly Jensen.  */
-	if (irq == 7)
-		i8259a_mask_and_ack_irq(1);
-}
-
-static void
-jensen_local_end(unsigned int irq)
-{
-	/* the parport is really hw IRQ 1, silly Jensen.  */
-	if (irq == 7)
-		i8259a_end_irq(1);
+	if (d->irq == 7)
+		i8259a_mask_and_ack_irq(d);
 }
 
 static struct irq_chip jensen_local_irq_type = {
 	.name		= "LOCAL",
-	.startup	= jensen_local_startup,
-	.shutdown	= jensen_local_shutdown,
-	.enable		= jensen_local_enable,
-	.disable	= jensen_local_disable,
-	.ack		= jensen_local_ack,
-	.end		= jensen_local_end,
+	.irq_unmask	= jensen_local_enable,
+	.irq_mask	= jensen_local_disable,
+	.irq_mask_ack	= jensen_local_mask_ack,
 };
 
 static void 
@@ -158,7 +123,7 @@
 	}
 
 	/* If there is no handler yet... */
-	if (irq_desc[irq].action == NULL) {
+	if (!irq_has_action(irq)) {
 	    /* If it is a local interrupt that cannot be masked... */
 	    if (vector >= 0x900)
 	    {
@@ -206,11 +171,11 @@
 {
 	init_i8259a_irqs();
 
-	irq_desc[1].chip = &jensen_local_irq_type;
-	irq_desc[4].chip = &jensen_local_irq_type;
-	irq_desc[3].chip = &jensen_local_irq_type;
-	irq_desc[7].chip = &jensen_local_irq_type;
-	irq_desc[9].chip = &jensen_local_irq_type;
+	set_irq_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq);
+	set_irq_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq);
+	set_irq_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq);
+	set_irq_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq);
+	set_irq_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq);
 
 	common_init_isa_dma();
 }
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 0bb3b5c..e619107 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -104,9 +104,10 @@
 }
 
 static void
-io7_enable_irq(unsigned int irq)
+io7_enable_irq(struct irq_data *d)
 {
 	volatile unsigned long *ctl;
+	unsigned int irq = d->irq;
 	struct io7 *io7;
 
 	ctl = io7_get_irq_ctl(irq, &io7);
@@ -115,7 +116,7 @@
 		       __func__, irq);
 		return;
 	}
-		
+
 	spin_lock(&io7->irq_lock);
 	*ctl |= 1UL << 24;
 	mb();
@@ -124,9 +125,10 @@
 }
 
 static void
-io7_disable_irq(unsigned int irq)
+io7_disable_irq(struct irq_data *d)
 {
 	volatile unsigned long *ctl;
+	unsigned int irq = d->irq;
 	struct io7 *io7;
 
 	ctl = io7_get_irq_ctl(irq, &io7);
@@ -135,7 +137,7 @@
 		       __func__, irq);
 		return;
 	}
-		
+
 	spin_lock(&io7->irq_lock);
 	*ctl &= ~(1UL << 24);
 	mb();
@@ -143,60 +145,30 @@
 	spin_unlock(&io7->irq_lock);
 }
 
-static unsigned int
-io7_startup_irq(unsigned int irq)
-{
-	io7_enable_irq(irq);
-	return 0;	/* never anything pending */
-}
-
 static void
-io7_end_irq(unsigned int irq)
+marvel_irq_noop(struct irq_data *d)
 {
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		io7_enable_irq(irq);
-}
-
-static void
-marvel_irq_noop(unsigned int irq) 
-{ 
-	return; 
-}
-
-static unsigned int
-marvel_irq_noop_return(unsigned int irq) 
-{ 
-	return 0; 
+	return;
 }
 
 static struct irq_chip marvel_legacy_irq_type = {
 	.name		= "LEGACY",
-	.startup	= marvel_irq_noop_return,
-	.shutdown	= marvel_irq_noop,
-	.enable		= marvel_irq_noop,
-	.disable	= marvel_irq_noop,
-	.ack		= marvel_irq_noop,
-	.end		= marvel_irq_noop,
+	.irq_mask	= marvel_irq_noop,
+	.irq_unmask	= marvel_irq_noop,
 };
 
 static struct irq_chip io7_lsi_irq_type = {
 	.name		= "LSI",
-	.startup	= io7_startup_irq,
-	.shutdown	= io7_disable_irq,
-	.enable		= io7_enable_irq,
-	.disable	= io7_disable_irq,
-	.ack		= io7_disable_irq,
-	.end		= io7_end_irq,
+	.irq_unmask	= io7_enable_irq,
+	.irq_mask	= io7_disable_irq,
+	.irq_mask_ack	= io7_disable_irq,
 };
 
 static struct irq_chip io7_msi_irq_type = {
 	.name		= "MSI",
-	.startup	= io7_startup_irq,
-	.shutdown	= io7_disable_irq,
-	.enable		= io7_enable_irq,
-	.disable	= io7_disable_irq,
-	.ack		= marvel_irq_noop,
-	.end		= io7_end_irq,
+	.irq_unmask	= io7_enable_irq,
+	.irq_mask	= io7_disable_irq,
+	.irq_ack	= marvel_irq_noop,
 };
 
 static void
@@ -304,8 +276,8 @@
 
 	/* Set up the lsi irqs.  */
 	for (i = 0; i < 128; ++i) {
-		irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[base + i].chip = lsi_ops;
+		set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 
 	/* Disable the implemented irqs in hardware.  */
@@ -318,8 +290,8 @@
 
 	/* Set up the msi irqs.  */
 	for (i = 128; i < (128 + 512); ++i) {
-		irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[base + i].chip = msi_ops;
+		set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 
 	for (i = 0; i < 16; ++i)
@@ -336,8 +308,8 @@
 
 	/* Reserve the legacy irqs.  */
 	for (i = 0; i < 16; ++i) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].chip = &marvel_legacy_irq_type;
+		set_irq_chip_and_handler(i, &marvel_legacy_irq_type,
+			handle_level_irq);
 	}
 
 	/* Init the io7 irqs.  */
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index ee88651..cf7f43d 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -43,39 +43,22 @@
 }
 
 static inline void
-mikasa_enable_irq(unsigned int irq)
+mikasa_enable_irq(struct irq_data *d)
 {
-	mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
+	mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
 }
 
 static void
-mikasa_disable_irq(unsigned int irq)
+mikasa_disable_irq(struct irq_data *d)
 {
-	mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
-}
-
-static unsigned int
-mikasa_startup_irq(unsigned int irq)
-{
-	mikasa_enable_irq(irq);
-	return 0;
-}
-
-static void
-mikasa_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		mikasa_enable_irq(irq);
+	mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
 }
 
 static struct irq_chip mikasa_irq_type = {
 	.name		= "MIKASA",
-	.startup	= mikasa_startup_irq,
-	.shutdown	= mikasa_disable_irq,
-	.enable		= mikasa_enable_irq,
-	.disable	= mikasa_disable_irq,
-	.ack		= mikasa_disable_irq,
-	.end		= mikasa_end_irq,
+	.irq_unmask	= mikasa_enable_irq,
+	.irq_mask	= mikasa_disable_irq,
+	.irq_mask_ack	= mikasa_disable_irq,
 };
 
 static void 
@@ -115,8 +98,8 @@
 	mikasa_update_irq_hw(0);
 
 	for (i = 16; i < 32; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &mikasa_irq_type;
+		set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 
 	init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 86503fe..92bc188 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -48,39 +48,22 @@
 }
 
 static void
-noritake_enable_irq(unsigned int irq)
+noritake_enable_irq(struct irq_data *d)
 {
-	noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
+	noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
 }
 
 static void
-noritake_disable_irq(unsigned int irq)
+noritake_disable_irq(struct irq_data *d)
 {
-	noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
-}
-
-static unsigned int
-noritake_startup_irq(unsigned int irq)
-{
-	noritake_enable_irq(irq);
-	return 0;
-}
-
-static void
-noritake_end_irq(unsigned int irq)
-{
-        if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-                noritake_enable_irq(irq);
+	noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
 }
 
 static struct irq_chip noritake_irq_type = {
 	.name		= "NORITAKE",
-	.startup	= noritake_startup_irq,
-	.shutdown	= noritake_disable_irq,
-	.enable		= noritake_enable_irq,
-	.disable	= noritake_disable_irq,
-	.ack		= noritake_disable_irq,
-	.end		= noritake_end_irq,
+	.irq_unmask	= noritake_enable_irq,
+	.irq_mask	= noritake_disable_irq,
+	.irq_mask_ack	= noritake_disable_irq,
 };
 
 static void 
@@ -144,8 +127,8 @@
 	outw(0, 0x54c);
 
 	for (i = 16; i < 48; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &noritake_irq_type;
+		set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 
 	init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 26c322b..936d414 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -56,9 +56,10 @@
   (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
 
 static inline void 
-rawhide_enable_irq(unsigned int irq)
+rawhide_enable_irq(struct irq_data *d)
 {
 	unsigned int mask, hose;
+	unsigned int irq = d->irq;
 
 	irq -= 16;
 	hose = irq / 24;
@@ -76,9 +77,10 @@
 }
 
 static void 
-rawhide_disable_irq(unsigned int irq)
+rawhide_disable_irq(struct irq_data *d)
 {
 	unsigned int mask, hose;
+	unsigned int irq = d->irq;
 
 	irq -= 16;
 	hose = irq / 24;
@@ -96,9 +98,10 @@
 }
 
 static void
-rawhide_mask_and_ack_irq(unsigned int irq)
+rawhide_mask_and_ack_irq(struct irq_data *d)
 {
 	unsigned int mask, mask1, hose;
+	unsigned int irq = d->irq;
 
 	irq -= 16;
 	hose = irq / 24;
@@ -121,28 +124,11 @@
 	spin_unlock(&rawhide_irq_lock);
 }
 
-static unsigned int
-rawhide_startup_irq(unsigned int irq)
-{
-	rawhide_enable_irq(irq);
-	return 0;
-}
-
-static void
-rawhide_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		rawhide_enable_irq(irq);
-}
-
 static struct irq_chip rawhide_irq_type = {
 	.name		= "RAWHIDE",
-	.startup	= rawhide_startup_irq,
-	.shutdown	= rawhide_disable_irq,
-	.enable		= rawhide_enable_irq,
-	.disable	= rawhide_disable_irq,
-	.ack		= rawhide_mask_and_ack_irq,
-	.end		= rawhide_end_irq,
+	.irq_unmask	= rawhide_enable_irq,
+	.irq_mask	= rawhide_disable_irq,
+	.irq_mask_ack	= rawhide_mask_and_ack_irq,
 };
 
 static void 
@@ -194,8 +180,8 @@
 	}
 
 	for (i = 16; i < 128; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &rawhide_irq_type;
+		set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 
 	init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index be16112..cea22a6 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -47,39 +47,22 @@
 }
 
 static inline void
-rx164_enable_irq(unsigned int irq)
+rx164_enable_irq(struct irq_data *d)
 {
-	rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+	rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-rx164_disable_irq(unsigned int irq)
+rx164_disable_irq(struct irq_data *d)
 {
-	rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
-}
-
-static unsigned int
-rx164_startup_irq(unsigned int irq)
-{
-	rx164_enable_irq(irq);
-	return 0;
-}
-
-static void
-rx164_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		rx164_enable_irq(irq);
+	rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static struct irq_chip rx164_irq_type = {
 	.name		= "RX164",
-	.startup	= rx164_startup_irq,
-	.shutdown	= rx164_disable_irq,
-	.enable		= rx164_enable_irq,
-	.disable	= rx164_disable_irq,
-	.ack		= rx164_disable_irq,
-	.end		= rx164_end_irq,
+	.irq_unmask	= rx164_enable_irq,
+	.irq_mask	= rx164_disable_irq,
+	.irq_mask_ack	= rx164_disable_irq,
 };
 
 static void 
@@ -116,8 +99,8 @@
 
 	rx164_update_irq_hw(0);
 	for (i = 16; i < 40; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &rx164_irq_type;
+		set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 
 	init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index b2abe27..a349538 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -443,11 +443,11 @@
 /* GENERIC irq routines */
 
 static inline void
-sable_lynx_enable_irq(unsigned int irq)
+sable_lynx_enable_irq(struct irq_data *d)
 {
 	unsigned long bit, mask;
 
-	bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+	bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
 	spin_lock(&sable_lynx_irq_lock);
 	mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
 	sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -459,11 +459,11 @@
 }
 
 static void
-sable_lynx_disable_irq(unsigned int irq)
+sable_lynx_disable_irq(struct irq_data *d)
 {
 	unsigned long bit, mask;
 
-	bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+	bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
 	spin_lock(&sable_lynx_irq_lock);
 	mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
 	sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -474,26 +474,12 @@
 #endif
 }
 
-static unsigned int
-sable_lynx_startup_irq(unsigned int irq)
-{
-	sable_lynx_enable_irq(irq);
-	return 0;
-}
-
 static void
-sable_lynx_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		sable_lynx_enable_irq(irq);
-}
-
-static void
-sable_lynx_mask_and_ack_irq(unsigned int irq)
+sable_lynx_mask_and_ack_irq(struct irq_data *d)
 {
 	unsigned long bit, mask;
 
-	bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+	bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
 	spin_lock(&sable_lynx_irq_lock);
 	mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
 	sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -503,12 +489,9 @@
 
 static struct irq_chip sable_lynx_irq_type = {
 	.name		= "SABLE/LYNX",
-	.startup	= sable_lynx_startup_irq,
-	.shutdown	= sable_lynx_disable_irq,
-	.enable		= sable_lynx_enable_irq,
-	.disable	= sable_lynx_disable_irq,
-	.ack		= sable_lynx_mask_and_ack_irq,
-	.end		= sable_lynx_end_irq,
+	.irq_unmask	= sable_lynx_enable_irq,
+	.irq_mask	= sable_lynx_disable_irq,
+	.irq_mask_ack	= sable_lynx_mask_and_ack_irq,
 };
 
 static void 
@@ -535,8 +518,9 @@
 	long i;
 
 	for (i = 0; i < nr_of_irqs; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &sable_lynx_irq_type;
+		set_irq_chip_and_handler(i, &sable_lynx_irq_type,
+			handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 
 	common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 4da596b..42a5331 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -45,43 +45,28 @@
 }
 
 static inline void
-takara_enable_irq(unsigned int irq)
+takara_enable_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	unsigned long mask;
 	mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
 	takara_update_irq_hw(irq, mask);
 }
 
 static void
-takara_disable_irq(unsigned int irq)
+takara_disable_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	unsigned long mask;
 	mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
 	takara_update_irq_hw(irq, mask);
 }
 
-static unsigned int
-takara_startup_irq(unsigned int irq)
-{
-	takara_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-takara_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		takara_enable_irq(irq);
-}
-
 static struct irq_chip takara_irq_type = {
 	.name		= "TAKARA",
-	.startup	= takara_startup_irq,
-	.shutdown	= takara_disable_irq,
-	.enable		= takara_enable_irq,
-	.disable	= takara_disable_irq,
-	.ack		= takara_disable_irq,
-	.end		= takara_end_irq,
+	.irq_unmask	= takara_enable_irq,
+	.irq_mask	= takara_disable_irq,
+	.irq_mask_ack	= takara_disable_irq,
 };
 
 static void
@@ -153,8 +138,8 @@
 		takara_update_irq_hw(i, -1);
 
 	for (i = 16; i < 128; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = &takara_irq_type;
+		set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 
 	common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 9008d0f..8c13a0c 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -112,8 +112,9 @@
 }
 
 static inline void
-titan_enable_irq(unsigned int irq)
+titan_enable_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	spin_lock(&titan_irq_lock);
 	titan_cached_irq_mask |= 1UL << (irq - 16);
 	titan_update_irq_hw(titan_cached_irq_mask);
@@ -121,28 +122,15 @@
 }
 
 static inline void
-titan_disable_irq(unsigned int irq)
+titan_disable_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	spin_lock(&titan_irq_lock);
 	titan_cached_irq_mask &= ~(1UL << (irq - 16));
 	titan_update_irq_hw(titan_cached_irq_mask);
 	spin_unlock(&titan_irq_lock);
 }
 
-static unsigned int
-titan_startup_irq(unsigned int irq)
-{
-	titan_enable_irq(irq);
-	return 0;	/* never anything pending */
-}
-
-static void
-titan_end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		titan_enable_irq(irq);
-}
-
 static void
 titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 {
@@ -158,8 +146,10 @@
 }
 
 static int
-titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
+titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
+		       bool force)
 { 
+	unsigned int irq = d->irq;
 	spin_lock(&titan_irq_lock);
 	titan_cpu_set_irq_affinity(irq - 16, *affinity);
 	titan_update_irq_hw(titan_cached_irq_mask);
@@ -189,20 +179,17 @@
 {
 	long i;
 	for (i = imin; i <= imax; ++i) {
-		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i].chip = ops;
+		set_irq_chip_and_handler(i, ops, handle_level_irq);
+		irq_set_status_flags(i, IRQ_LEVEL);
 	}
 }
 
 static struct irq_chip titan_irq_type = {
-       .name	       = "TITAN",
-       .startup        = titan_startup_irq,
-       .shutdown       = titan_disable_irq,
-       .enable         = titan_enable_irq,
-       .disable        = titan_disable_irq,
-       .ack            = titan_disable_irq,
-       .end            = titan_end_irq,
-       .set_affinity   = titan_set_irq_affinity,
+       .name			= "TITAN",
+       .irq_unmask		= titan_enable_irq,
+       .irq_mask		= titan_disable_irq,
+       .irq_mask_ack		= titan_disable_irq,
+       .irq_set_affinity	= titan_set_irq_affinity,
 };
 
 static irqreturn_t
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 62fd972..ca60a38 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -104,10 +104,12 @@
 }
 
 static void
-wildfire_enable_irq(unsigned int irq)
+wildfire_enable_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	if (irq < 16)
-		i8259a_enable_irq(irq);
+		i8259a_enable_irq(d);
 
 	spin_lock(&wildfire_irq_lock);
 	set_bit(irq, &cached_irq_mask);
@@ -116,10 +118,12 @@
 }
 
 static void
-wildfire_disable_irq(unsigned int irq)
+wildfire_disable_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	if (irq < 16)
-		i8259a_disable_irq(irq);
+		i8259a_disable_irq(d);
 
 	spin_lock(&wildfire_irq_lock);
 	clear_bit(irq, &cached_irq_mask);
@@ -128,10 +132,12 @@
 }
 
 static void
-wildfire_mask_and_ack_irq(unsigned int irq)
+wildfire_mask_and_ack_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	if (irq < 16)
-		i8259a_mask_and_ack_irq(irq);
+		i8259a_mask_and_ack_irq(d);
 
 	spin_lock(&wildfire_irq_lock);
 	clear_bit(irq, &cached_irq_mask);
@@ -139,32 +145,11 @@
 	spin_unlock(&wildfire_irq_lock);
 }
 
-static unsigned int
-wildfire_startup_irq(unsigned int irq)
-{ 
-	wildfire_enable_irq(irq);
-	return 0; /* never anything pending */
-}
-
-static void
-wildfire_end_irq(unsigned int irq)
-{ 
-#if 0
-	if (!irq_desc[irq].action)
-		printk("got irq %d\n", irq);
-#endif
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		wildfire_enable_irq(irq);
-}
-
 static struct irq_chip wildfire_irq_type = {
 	.name		= "WILDFIRE",
-	.startup	= wildfire_startup_irq,
-	.shutdown	= wildfire_disable_irq,
-	.enable		= wildfire_enable_irq,
-	.disable	= wildfire_disable_irq,
-	.ack		= wildfire_mask_and_ack_irq,
-	.end		= wildfire_end_irq,
+	.irq_unmask	= wildfire_enable_irq,
+	.irq_mask	= wildfire_disable_irq,
+	.irq_mask_ack	= wildfire_mask_and_ack_irq,
 };
 
 static void __init
@@ -198,18 +183,21 @@
 	for (i = 0; i < 16; ++i) {
 		if (i == 2)
 			continue;
-		irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i+irq_bias].chip = &wildfire_irq_type;
+		set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
+			handle_level_irq);
+		irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
 	}
 
-	irq_desc[36+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
-	irq_desc[36+irq_bias].chip = &wildfire_irq_type;
+	set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
+		handle_level_irq);
+	irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
 	for (i = 40; i < 64; ++i) {
-		irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
-		irq_desc[i+irq_bias].chip = &wildfire_irq_type;
+		set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
+			handle_level_irq);
+		irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
 	}
 
-	setup_irq(32+irq_bias, &isa_enable);	
+	setup_irq(32+irq_bias, &isa_enable);
 }
 
 static void __init
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 0f1d849..c1f3e7c 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -506,7 +506,7 @@
 		CMOS_WRITE(real_seconds,RTC_SECONDS);
 		CMOS_WRITE(real_minutes,RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
  		retval = -1;
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 9b72c59..c0a83ab 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -2,8 +2,8 @@
 # Makefile for alpha-specific library files..
 #
 
-EXTRA_AFLAGS := $(KBUILD_CFLAGS)
-EXTRA_CFLAGS := -Werror
+asflags-y := $(KBUILD_CFLAGS)
+ccflags-y := -Werror
 
 # Many of these routines have implementations tuned for ev6.
 # Choose them iff we're targeting ev6 specifically.
diff --git a/arch/alpha/math-emu/Makefile b/arch/alpha/math-emu/Makefile
index 359ef08..7f46719 100644
--- a/arch/alpha/math-emu/Makefile
+++ b/arch/alpha/math-emu/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the FPU instruction emulation.
 #
 
-EXTRA_CFLAGS := -w
+ccflags-y := -w
 
 obj-$(CONFIG_MATHEMU) += math-emu.o
 
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile
index 09399c5..c993d3f 100644
--- a/arch/alpha/mm/Makefile
+++ b/arch/alpha/mm/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the linux alpha-specific parts of the memory manager.
 #
 
-EXTRA_CFLAGS := -Werror
+ccflags-y := -Werror
 
 obj-y	:= init.o fault.o extable.o
 
diff --git a/arch/alpha/oprofile/Makefile b/arch/alpha/oprofile/Makefile
index 4aa5624..3473de7 100644
--- a/arch/alpha/oprofile/Makefile
+++ b/arch/alpha/oprofile/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS := -Werror -Wno-sign-compare
+ccflags-y := -Werror -Wno-sign-compare
 
 obj-$(CONFIG_OPROFILE) += oprofile.o
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e2f8011..166efa2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -26,6 +26,8 @@
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7))
 	select HAVE_C_RECORDMCOUNT
+	select HAVE_GENERIC_HARDIRQS
+	select HAVE_SPARSE_IRQ
 	help
 	  The ARM series is a line of low-power-consumption RISC chip designs
 	  licensed by ARM Ltd and targeted at embedded applications and
@@ -97,10 +99,6 @@
 	  <file:Documentation/mca.txt> (and especially the web page given
 	  there) before attempting to build an MCA bus kernel.
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
 config STACKTRACE_SUPPORT
 	bool
 	default y
@@ -180,9 +178,6 @@
 config ARCH_MTD_XIP
 	bool
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config ARM_L1_CACHE_SHIFT_6
 	bool
 	help
@@ -368,7 +363,7 @@
 	bool "Freescale MXS-based"
 	select GENERIC_CLOCKEVENTS
 	select ARCH_REQUIRE_GPIOLIB
-	select COMMON_CLKDEV
+	select CLKDEV_LOOKUP
 	help
 	  Support for Freescale MXS-based family of processors
 
@@ -771,6 +766,7 @@
 	select ARCH_SPARSEMEM_ENABLE
 	select GENERIC_GPIO
 	select HAVE_CLK
+	select ARCH_HAS_CPUFREQ
 	select GENERIC_CLOCKEVENTS
 	select HAVE_S3C_RTC if RTC_CLASS
 	select HAVE_S3C2410_I2C if I2C
@@ -1181,6 +1177,31 @@
 	  visible impact on the overall performance or power consumption of the
 	  processor.
 
+config ARM_ERRATA_751472
+	bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
+	depends on CPU_V7 && SMP
+	help
+	  This option enables the workaround for the 751472 Cortex-A9 (prior
+	  to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
+	  completion of a following broadcasted operation if the second
+	  operation is received by a CPU before the ICIALLUIS has completed,
+	  potentially leading to corrupted entries in the cache or TLB.
+
+config ARM_ERRATA_753970
+	bool "ARM errata: cache sync operation may be faulty"
+	depends on CACHE_PL310
+	help
+	  This option enables the workaround for the 753970 PL310 (r3p0) erratum.
+
+	  Under some condition the effect of cache sync operation on
+	  the store buffer still remains when the operation completes.
+	  This means that the store buffer is always asked to drain and
+	  this prevents it from merging any further writes. The workaround
+	  is to replace the normal offset of cache sync operation (0x730)
+	  by another offset targeting an unmapped PL310 register 0x740.
+	  This has the same effect as the cache sync operation: store buffer
+	  drain and waiting for all buffers empty.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
@@ -1281,7 +1302,7 @@
 config SMP_ON_UP
 	bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
-	depends on SMP && !XIP
+	depends on SMP && !XIP_KERNEL
 	default y
 	help
 	  SMP kernels contain instructions which fail on non-SMP processors.
@@ -1395,7 +1416,7 @@
 
 config OABI_COMPAT
 	bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
-	depends on AEABI && EXPERIMENTAL
+	depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
 	default y
 	help
 	  This option preserves the old syscall interface along with the
@@ -1452,15 +1473,6 @@
 	  Enable hardware performance counter support for perf events. If
 	  disabled, perf events will use software events only.
 
-config SPARSE_IRQ
-	def_bool n
-	help
-	  This enables support for sparse irqs. This is useful in general
-	  as most CPUs have a fairly sparse array of IRQ vectors, which
-	  the irq_desc then maps directly on to. Systems with a high
-	  number of off-chip IRQs will want to treat this as
-	  experimental until they have been independently verified.
-
 source "mm/Kconfig"
 
 config FORCE_MAX_ZONEORDER
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c22c1ad..6f7b292 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -15,7 +15,7 @@
 LDFLAGS_vmlinux	+= --be8
 endif
 
-OBJCOPYFLAGS	:=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+OBJCOPYFLAGS	:=-O binary -R .comment -S
 GZFLAGS		:=-9
 #KBUILD_CFLAGS	+=-pipe
 # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index ab204db..c602896 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,3 +1,7 @@
 font.c
-piggy.gz
+lib1funcs.S
+piggy.gzip
+piggy.lzo
+piggy.lzma
+vmlinux
 vmlinux.lds
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 778655f..ea5ee4d 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -6,6 +6,8 @@
 
 config ARM_VIC_NR
 	int
+	default 4 if ARCH_S5PV210
+	default 3 if ARCH_S5P6442 || ARCH_S5PC100
 	default 2
 	depends on ARM_VIC
 	help
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 0b89ef0..2243772 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -50,57 +50,56 @@
 
 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
 
-static inline void __iomem *gic_dist_base(unsigned int irq)
+static inline void __iomem *gic_dist_base(struct irq_data *d)
 {
-	struct gic_chip_data *gic_data = get_irq_chip_data(irq);
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
 	return gic_data->dist_base;
 }
 
-static inline void __iomem *gic_cpu_base(unsigned int irq)
+static inline void __iomem *gic_cpu_base(struct irq_data *d)
 {
-	struct gic_chip_data *gic_data = get_irq_chip_data(irq);
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
 	return gic_data->cpu_base;
 }
 
-static inline unsigned int gic_irq(unsigned int irq)
+static inline unsigned int gic_irq(struct irq_data *d)
 {
-	struct gic_chip_data *gic_data = get_irq_chip_data(irq);
-	return irq - gic_data->irq_offset;
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+	return d->irq - gic_data->irq_offset;
 }
 
 /*
  * Routines to acknowledge, disable and enable interrupts
  */
-static void gic_ack_irq(unsigned int irq)
+static void gic_ack_irq(struct irq_data *d)
 {
-
 	spin_lock(&irq_controller_lock);
-	writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI);
+	writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
 	spin_unlock(&irq_controller_lock);
 }
 
-static void gic_mask_irq(unsigned int irq)
+static void gic_mask_irq(struct irq_data *d)
 {
-	u32 mask = 1 << (irq % 32);
+	u32 mask = 1 << (d->irq % 32);
 
 	spin_lock(&irq_controller_lock);
-	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
+	writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
 	spin_unlock(&irq_controller_lock);
 }
 
-static void gic_unmask_irq(unsigned int irq)
+static void gic_unmask_irq(struct irq_data *d)
 {
-	u32 mask = 1 << (irq % 32);
+	u32 mask = 1 << (d->irq % 32);
 
 	spin_lock(&irq_controller_lock);
-	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
+	writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
 	spin_unlock(&irq_controller_lock);
 }
 
-static int gic_set_type(unsigned int irq, unsigned int type)
+static int gic_set_type(struct irq_data *d, unsigned int type)
 {
-	void __iomem *base = gic_dist_base(irq);
-	unsigned int gicirq = gic_irq(irq);
+	void __iomem *base = gic_dist_base(d);
+	unsigned int gicirq = gic_irq(d);
 	u32 enablemask = 1 << (gicirq % 32);
 	u32 enableoff = (gicirq / 32) * 4;
 	u32 confmask = 0x2 << ((gicirq % 16) * 2);
@@ -143,21 +142,22 @@
 }
 
 #ifdef CONFIG_SMP
-static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
+static int
+gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force)
 {
-	void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
-	unsigned int shift = (irq % 4) * 8;
+	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
+	unsigned int shift = (d->irq % 4) * 8;
 	unsigned int cpu = cpumask_first(mask_val);
 	u32 val;
 	struct irq_desc *desc;
 
 	spin_lock(&irq_controller_lock);
-	desc = irq_to_desc(irq);
+	desc = irq_to_desc(d->irq);
 	if (desc == NULL) {
 		spin_unlock(&irq_controller_lock);
 		return -EINVAL;
 	}
-	desc->node = cpu;
+	d->node = cpu;
 	val = readl(reg) & ~(0xff << shift);
 	val |= 1 << (cpu + shift);
 	writel(val, reg);
@@ -175,7 +175,7 @@
 	unsigned long status;
 
 	/* primary controller ack'ing */
-	chip->ack(irq);
+	chip->irq_ack(&desc->irq_data);
 
 	spin_lock(&irq_controller_lock);
 	status = readl(chip_data->cpu_base + GIC_CPU_INTACK);
@@ -193,17 +193,17 @@
 
  out:
 	/* primary controller unmasking */
-	chip->unmask(irq);
+	chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip gic_chip = {
-	.name		= "GIC",
-	.ack		= gic_ack_irq,
-	.mask		= gic_mask_irq,
-	.unmask		= gic_unmask_irq,
-	.set_type	= gic_set_type,
+	.name			= "GIC",
+	.irq_ack		= gic_ack_irq,
+	.irq_mask		= gic_mask_irq,
+	.irq_unmask		= gic_unmask_irq,
+	.irq_set_type		= gic_set_type,
 #ifdef CONFIG_SMP
-	.set_affinity	= gic_set_cpu,
+	.irq_set_affinity	= gic_set_cpu,
 #endif
 };
 
@@ -337,7 +337,7 @@
 
 	local_irq_save(flags);
 	irq_to_desc(irq)->status |= IRQ_NOPROBE;
-	gic_unmask_irq(irq);
+	gic_unmask_irq(irq_get_irq_data(irq));
 	local_irq_restore(flags);
 }
 
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 42ff90b..fcddd48 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -31,8 +31,10 @@
 
 #define MAX_SLOTS		21
 
-static void it8152_mask_irq(unsigned int irq)
+static void it8152_mask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
        if (irq >= IT8152_LD_IRQ(0)) {
 	       __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) |
 			    (1 << (irq - IT8152_LD_IRQ(0)))),
@@ -48,8 +50,10 @@
        }
 }
 
-static void it8152_unmask_irq(unsigned int irq)
+static void it8152_unmask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
        if (irq >= IT8152_LD_IRQ(0)) {
 	       __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) &
 			     ~(1 << (irq - IT8152_LD_IRQ(0)))),
@@ -67,9 +71,9 @@
 
 static struct irq_chip it8152_irq_chip = {
 	.name		= "it8152",
-	.ack		= it8152_mask_irq,
-	.mask		= it8152_mask_irq,
-	.unmask		= it8152_unmask_irq,
+	.irq_ack	= it8152_mask_irq,
+	.irq_mask	= it8152_mask_irq,
+	.irq_unmask	= it8152_unmask_irq,
 };
 
 void it8152_init_irq(void)
@@ -236,7 +240,7 @@
 
 /*
  * The following functions are needed for DMA bouncing.
- * ITE8152 chip can addrees up to 64MByte, so all the devices
+ * ITE8152 chip can address up to 64MByte, so all the devices
  * connected to ITE8152 (PCI and USB) should have limited DMA window
  */
 
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 9dff07c..a026a6b 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -144,7 +144,7 @@
 	int req, i;
 
 	/* Acknowledge the parent IRQ */
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	/* check why this interrupt was generated */
 	req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00;
@@ -161,33 +161,33 @@
 	}
 }
 
-static void locomo_ack_irq(unsigned int irq)
+static void locomo_ack_irq(struct irq_data *d)
 {
 }
 
-static void locomo_mask_irq(unsigned int irq)
+static void locomo_mask_irq(struct irq_data *d)
 {
-	struct locomo *lchip = get_irq_chip_data(irq);
+	struct locomo *lchip = irq_data_get_irq_chip_data(d);
 	unsigned int r;
 	r = locomo_readl(lchip->base + LOCOMO_ICR);
-	r &= ~(0x0010 << (irq - lchip->irq_base));
+	r &= ~(0x0010 << (d->irq - lchip->irq_base));
 	locomo_writel(r, lchip->base + LOCOMO_ICR);
 }
 
-static void locomo_unmask_irq(unsigned int irq)
+static void locomo_unmask_irq(struct irq_data *d)
 {
-	struct locomo *lchip = get_irq_chip_data(irq);
+	struct locomo *lchip = irq_data_get_irq_chip_data(d);
 	unsigned int r;
 	r = locomo_readl(lchip->base + LOCOMO_ICR);
-	r |= (0x0010 << (irq - lchip->irq_base));
+	r |= (0x0010 << (d->irq - lchip->irq_base));
 	locomo_writel(r, lchip->base + LOCOMO_ICR);
 }
 
 static struct irq_chip locomo_chip = {
-	.name	= "LOCOMO",
-	.ack	= locomo_ack_irq,
-	.mask	= locomo_mask_irq,
-	.unmask	= locomo_unmask_irq,
+	.name		= "LOCOMO",
+	.irq_ack	= locomo_ack_irq,
+	.irq_mask	= locomo_mask_irq,
+	.irq_unmask	= locomo_unmask_irq,
 };
 
 static void locomo_setup_irq(struct locomo *lchip)
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index c0258a8..eb9796b 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -210,7 +210,7 @@
 
 	sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0);
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
 
@@ -228,35 +228,35 @@
 			generic_handle_irq(i + sachip->irq_base);
 
 	/* For level-based interrupts */
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 #define SA1111_IRQMASK_LO(x)	(1 << (x - sachip->irq_base))
 #define SA1111_IRQMASK_HI(x)	(1 << (x - sachip->irq_base - 32))
 
-static void sa1111_ack_irq(unsigned int irq)
+static void sa1111_ack_irq(struct irq_data *d)
 {
 }
 
-static void sa1111_mask_lowirq(unsigned int irq)
+static void sa1111_mask_lowirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
 	unsigned long ie0;
 
 	ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
-	ie0 &= ~SA1111_IRQMASK_LO(irq);
+	ie0 &= ~SA1111_IRQMASK_LO(d->irq);
 	writel(ie0, mapbase + SA1111_INTEN0);
 }
 
-static void sa1111_unmask_lowirq(unsigned int irq)
+static void sa1111_unmask_lowirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
 	unsigned long ie0;
 
 	ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
-	ie0 |= SA1111_IRQMASK_LO(irq);
+	ie0 |= SA1111_IRQMASK_LO(d->irq);
 	sa1111_writel(ie0, mapbase + SA1111_INTEN0);
 }
 
@@ -267,11 +267,11 @@
  * be triggered.  In fact, its very difficult, if not impossible to get
  * INTSET to re-trigger the interrupt.
  */
-static int sa1111_retrigger_lowirq(unsigned int irq)
+static int sa1111_retrigger_lowirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_LO(irq);
+	unsigned int mask = SA1111_IRQMASK_LO(d->irq);
 	unsigned long ip0;
 	int i;
 
@@ -279,21 +279,21 @@
 	for (i = 0; i < 8; i++) {
 		sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0);
 		sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
-		if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask)
+		if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
 			break;
 	}
 
 	if (i == 8)
 		printk(KERN_ERR "Danger Will Robinson: failed to "
-			"re-trigger IRQ%d\n", irq);
+			"re-trigger IRQ%d\n", d->irq);
 	return i == 8 ? -1 : 0;
 }
 
-static int sa1111_type_lowirq(unsigned int irq, unsigned int flags)
+static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_LO(irq);
+	unsigned int mask = SA1111_IRQMASK_LO(d->irq);
 	unsigned long ip0;
 
 	if (flags == IRQ_TYPE_PROBE)
@@ -313,11 +313,11 @@
 	return 0;
 }
 
-static int sa1111_wake_lowirq(unsigned int irq, unsigned int on)
+static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_LO(irq);
+	unsigned int mask = SA1111_IRQMASK_LO(d->irq);
 	unsigned long we0;
 
 	we0 = sa1111_readl(mapbase + SA1111_WAKEEN0);
@@ -332,33 +332,33 @@
 
 static struct irq_chip sa1111_low_chip = {
 	.name		= "SA1111-l",
-	.ack		= sa1111_ack_irq,
-	.mask		= sa1111_mask_lowirq,
-	.unmask		= sa1111_unmask_lowirq,
-	.retrigger	= sa1111_retrigger_lowirq,
-	.set_type	= sa1111_type_lowirq,
-	.set_wake	= sa1111_wake_lowirq,
+	.irq_ack	= sa1111_ack_irq,
+	.irq_mask	= sa1111_mask_lowirq,
+	.irq_unmask	= sa1111_unmask_lowirq,
+	.irq_retrigger	= sa1111_retrigger_lowirq,
+	.irq_set_type	= sa1111_type_lowirq,
+	.irq_set_wake	= sa1111_wake_lowirq,
 };
 
-static void sa1111_mask_highirq(unsigned int irq)
+static void sa1111_mask_highirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
 	unsigned long ie1;
 
 	ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
-	ie1 &= ~SA1111_IRQMASK_HI(irq);
+	ie1 &= ~SA1111_IRQMASK_HI(d->irq);
 	sa1111_writel(ie1, mapbase + SA1111_INTEN1);
 }
 
-static void sa1111_unmask_highirq(unsigned int irq)
+static void sa1111_unmask_highirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
 	unsigned long ie1;
 
 	ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
-	ie1 |= SA1111_IRQMASK_HI(irq);
+	ie1 |= SA1111_IRQMASK_HI(d->irq);
 	sa1111_writel(ie1, mapbase + SA1111_INTEN1);
 }
 
@@ -369,11 +369,11 @@
  * be triggered.  In fact, its very difficult, if not impossible to get
  * INTSET to re-trigger the interrupt.
  */
-static int sa1111_retrigger_highirq(unsigned int irq)
+static int sa1111_retrigger_highirq(struct irq_data *d)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_HI(irq);
+	unsigned int mask = SA1111_IRQMASK_HI(d->irq);
 	unsigned long ip1;
 	int i;
 
@@ -387,15 +387,15 @@
 
 	if (i == 8)
 		printk(KERN_ERR "Danger Will Robinson: failed to "
-			"re-trigger IRQ%d\n", irq);
+			"re-trigger IRQ%d\n", d->irq);
 	return i == 8 ? -1 : 0;
 }
 
-static int sa1111_type_highirq(unsigned int irq, unsigned int flags)
+static int sa1111_type_highirq(struct irq_data *d, unsigned int flags)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_HI(irq);
+	unsigned int mask = SA1111_IRQMASK_HI(d->irq);
 	unsigned long ip1;
 
 	if (flags == IRQ_TYPE_PROBE)
@@ -415,11 +415,11 @@
 	return 0;
 }
 
-static int sa1111_wake_highirq(unsigned int irq, unsigned int on)
+static int sa1111_wake_highirq(struct irq_data *d, unsigned int on)
 {
-	struct sa1111 *sachip = get_irq_chip_data(irq);
+	struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
-	unsigned int mask = SA1111_IRQMASK_HI(irq);
+	unsigned int mask = SA1111_IRQMASK_HI(d->irq);
 	unsigned long we1;
 
 	we1 = sa1111_readl(mapbase + SA1111_WAKEEN1);
@@ -434,12 +434,12 @@
 
 static struct irq_chip sa1111_high_chip = {
 	.name		= "SA1111-h",
-	.ack		= sa1111_ack_irq,
-	.mask		= sa1111_mask_highirq,
-	.unmask		= sa1111_unmask_highirq,
-	.retrigger	= sa1111_retrigger_highirq,
-	.set_type	= sa1111_type_highirq,
-	.set_wake	= sa1111_wake_highirq,
+	.irq_ack	= sa1111_ack_irq,
+	.irq_mask	= sa1111_mask_highirq,
+	.irq_unmask	= sa1111_unmask_highirq,
+	.irq_retrigger	= sa1111_retrigger_highirq,
+	.irq_set_type	= sa1111_type_highirq,
+	.irq_set_wake	= sa1111_wake_highirq,
 };
 
 static void sa1111_setup_irq(struct sa1111 *sachip)
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index ba65f6e..ae5fe72 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -70,7 +70,7 @@
  * vic_init2 - common initialisation code
  * @base: Base of the VIC.
  *
- * Common initialisation code for registeration
+ * Common initialisation code for registration
  * and resume.
 */
 static void vic_init2(void __iomem *base)
@@ -204,26 +204,26 @@
 static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
 #endif /* CONFIG_PM */
 
-static void vic_ack_irq(unsigned int irq)
+static void vic_ack_irq(struct irq_data *d)
 {
-	void __iomem *base = get_irq_chip_data(irq);
-	irq &= 31;
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->irq & 31;
 	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
 	/* moreover, clear the soft-triggered, in case it was the reason */
 	writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
 }
 
-static void vic_mask_irq(unsigned int irq)
+static void vic_mask_irq(struct irq_data *d)
 {
-	void __iomem *base = get_irq_chip_data(irq);
-	irq &= 31;
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->irq & 31;
 	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
 }
 
-static void vic_unmask_irq(unsigned int irq)
+static void vic_unmask_irq(struct irq_data *d)
 {
-	void __iomem *base = get_irq_chip_data(irq);
-	irq &= 31;
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->irq & 31;
 	writel(1 << irq, base + VIC_INT_ENABLE);
 }
 
@@ -242,10 +242,10 @@
 	return NULL;
 }
 
-static int vic_set_wake(unsigned int irq, unsigned int on)
+static int vic_set_wake(struct irq_data *d, unsigned int on)
 {
-	struct vic_device *v = vic_from_irq(irq);
-	unsigned int off = irq & 31;
+	struct vic_device *v = vic_from_irq(d->irq);
+	unsigned int off = d->irq & 31;
 	u32 bit = 1 << off;
 
 	if (!v)
@@ -267,10 +267,10 @@
 
 static struct irq_chip vic_chip = {
 	.name		= "VIC",
-	.ack		= vic_ack_irq,
-	.mask		= vic_mask_irq,
-	.unmask		= vic_unmask_irq,
-	.set_wake	= vic_set_wake,
+	.irq_ack	= vic_ack_irq,
+	.irq_mask	= vic_mask_irq,
+	.irq_unmask	= vic_unmask_irq,
+	.irq_set_wake	= vic_set_wake,
 };
 
 static void __init vic_disable(void __iomem *base)
diff --git a/arch/arm/configs/ag5evm_defconfig b/arch/arm/configs/ag5evm_defconfig
index 2b9cf56..212ead3 100644
--- a/arch/arm/configs/ag5evm_defconfig
+++ b/arch/arm/configs/ag5evm_defconfig
@@ -10,7 +10,7 @@
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig
index 5536c48..f0dea52 100644
--- a/arch/arm/configs/am200epdkit_defconfig
+++ b/arch/arm/configs/am200epdkit_defconfig
@@ -3,7 +3,7 @@
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SHMEM is not set
diff --git a/arch/arm/configs/at572d940hfek_defconfig b/arch/arm/configs/at572d940hfek_defconfig
index 695e32d..1b1158a 100644
--- a/arch/arm/configs/at572d940hfek_defconfig
+++ b/arch/arm/configs/at572d940hfek_defconfig
@@ -17,7 +17,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 3a1ad15..5b54abb 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -1,6 +1,6 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODVERSIONS=y
 CONFIG_ARCH_SA1100=y
diff --git a/arch/arm/configs/bcmring_defconfig b/arch/arm/configs/bcmring_defconfig
index 75984cd..795374d 100644
--- a/arch/arm/configs/bcmring_defconfig
+++ b/arch/arm/configs/bcmring_defconfig
@@ -2,7 +2,7 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/arm/configs/cm_x2xx_defconfig b/arch/arm/configs/cm_x2xx_defconfig
index dcfbcf3..a93ff8d 100644
--- a/arch/arm/configs/cm_x2xx_defconfig
+++ b/arch/arm/configs/cm_x2xx_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/arm/configs/colibri_pxa270_defconfig b/arch/arm/configs/colibri_pxa270_defconfig
index f52c64e..2ef2c5e 100644
--- a/arch/arm/configs/colibri_pxa270_defconfig
+++ b/arch/arm/configs/colibri_pxa270_defconfig
@@ -8,7 +8,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index 310f9a6..6c56ad0 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BASE_FULL is not set
 # CONFIG_EPOLL is not set
 CONFIG_SLOB=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 4a1fa81..e53c475 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/da8xx_omapl_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index cdc40c4..88ccde0 100644
--- a/arch/arm/configs/da8xx_omapl_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 2519cc5..889922a 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index 9359e1b..54bf5ee 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/ebsa110_defconfig b/arch/arm/configs/ebsa110_defconfig
index c319418..14559db 100644
--- a/arch/arm/configs/ebsa110_defconfig
+++ b/arch/arm/configs/ebsa110_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_ARCH_EBSA110=y
 CONFIG_PCCARD=m
diff --git a/arch/arm/configs/edb7211_defconfig b/arch/arm/configs/edb7211_defconfig
index 7b62be1..d52ded35 100644
--- a/arch/arm/configs/edb7211_defconfig
+++ b/arch/arm/configs/edb7211_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_ARCH_CLPS711X=y
 CONFIG_ARCH_EDB7211=y
diff --git a/arch/arm/configs/em_x270_defconfig b/arch/arm/configs/em_x270_defconfig
index d7db34f..60a21e0 100644
--- a/arch/arm/configs/em_x270_defconfig
+++ b/arch/arm/configs/em_x270_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index 6d6689c..8e97b2f 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/eseries_pxa_defconfig b/arch/arm/configs/eseries_pxa_defconfig
index 1691dea..d68ac67 100644
--- a/arch/arm/configs/eseries_pxa_defconfig
+++ b/arch/arm/configs/eseries_pxa_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index c4eeb6d..227a477 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -7,7 +7,7 @@
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 4f925ea..038518a 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -3,7 +3,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_MODULES=y
 CONFIG_ARCH_FOOTBRIDGE=y
diff --git a/arch/arm/configs/fortunet_defconfig b/arch/arm/configs/fortunet_defconfig
index e11c7ea..840fced 100644
--- a/arch/arm/configs/fortunet_defconfig
+++ b/arch/arm/configs/fortunet_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_ARCH_CLPS711X=y
 CONFIG_ARCH_FORTUNET=y
diff --git a/arch/arm/configs/h5000_defconfig b/arch/arm/configs/h5000_defconfig
index ac336f1..37903e3 100644
--- a/arch/arm/configs/h5000_defconfig
+++ b/arch/arm/configs/h5000_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index ade55c8..176ec22 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -6,7 +6,7 @@
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/ixp2000_defconfig b/arch/arm/configs/ixp2000_defconfig
index 9083246..8405ade 100644
--- a/arch/arm/configs/ixp2000_defconfig
+++ b/arch/arm/configs/ixp2000_defconfig
@@ -3,7 +3,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/ixp23xx_defconfig b/arch/arm/configs/ixp23xx_defconfig
index 7fc056a..6887176 100644
--- a/arch/arm/configs/ixp23xx_defconfig
+++ b/arch/arm/configs/ixp23xx_defconfig
@@ -3,7 +3,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index 5c50239..063e2ab 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -3,7 +3,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODVERSIONS=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/arm/configs/loki_defconfig b/arch/arm/configs/loki_defconfig
index e1eaff7..1ba752b 100644
--- a/arch/arm/configs/loki_defconfig
+++ b/arch/arm/configs/loki_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/lpd7a400_defconfig b/arch/arm/configs/lpd7a400_defconfig
index 20caaab..5a48f17 100644
--- a/arch/arm/configs/lpd7a400_defconfig
+++ b/arch/arm/configs/lpd7a400_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
 # CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/arm/configs/lpd7a404_defconfig b/arch/arm/configs/lpd7a404_defconfig
index 1efcce9..22d0631 100644
--- a/arch/arm/configs/lpd7a404_defconfig
+++ b/arch/arm/configs/lpd7a404_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=16
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index af805e8..a88e64d 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index b0d0824..7305ebd 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_SLUB_DEBUG is not set
 CONFIG_PROFILING=y
diff --git a/arch/arm/configs/mx1_defconfig b/arch/arm/configs/mx1_defconfig
index 2f38d97..b39b5ce 100644
--- a/arch/arm/configs/mx1_defconfig
+++ b/arch/arm/configs/mx1_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mx21_defconfig b/arch/arm/configs/mx21_defconfig
index 6454e18..411f88d 100644
--- a/arch/arm/configs/mx21_defconfig
+++ b/arch/arm/configs/mx21_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/mx27_defconfig b/arch/arm/configs/mx27_defconfig
index 813cfb3..9ad4c656 100644
--- a/arch/arm/configs/mx27_defconfig
+++ b/arch/arm/configs/mx27_defconfig
@@ -4,7 +4,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/mx3_defconfig b/arch/arm/configs/mx3_defconfig
index e648ea3..7c4b30b 100644
--- a/arch/arm/configs/mx3_defconfig
+++ b/arch/arm/configs/mx3_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mx51_defconfig b/arch/arm/configs/mx51_defconfig
index 5c7a872..9cba68c 100644
--- a/arch/arm/configs/mx51_defconfig
+++ b/arch/arm/configs/mx51_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_RELAY=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 0e2dc26..37207d1 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -7,7 +7,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index a350cc6..7b63462 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index ccedde1..ae890ca 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 439323b..a288d70 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SLUB_DEBUG is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
diff --git a/arch/arm/configs/pcm027_defconfig b/arch/arm/configs/pcm027_defconfig
index 583a061..2f136c3 100644
--- a/arch/arm/configs/pcm027_defconfig
+++ b/arch/arm/configs/pcm027_defconfig
@@ -7,7 +7,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/pcontrol_g20_defconfig b/arch/arm/configs/pcontrol_g20_defconfig
index b42ee62..c75c9fc 100644
--- a/arch/arm/configs/pcontrol_g20_defconfig
+++ b/arch/arm/configs/pcontrol_g20_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arm/configs/pleb_defconfig b/arch/arm/configs/pleb_defconfig
index d1efbdc..cb08cc5 100644
--- a/arch/arm/configs/pleb_defconfig
+++ b/arch/arm/configs/pleb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_SHMEM is not set
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/pnx4008_defconfig b/arch/arm/configs/pnx4008_defconfig
index bd481f0..35a31cc 100644
--- a/arch/arm/configs/pnx4008_defconfig
+++ b/arch/arm/configs/pnx4008_defconfig
@@ -5,7 +5,7 @@
 CONFIG_AUDIT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/simpad_defconfig b/arch/arm/configs/simpad_defconfig
index af3b12e..d335815 100644
--- a/arch/arm/configs/simpad_defconfig
+++ b/arch/arm/configs/simpad_defconfig
@@ -2,7 +2,7 @@
 CONFIG_LOCALVERSION="oe1"
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index aebd4bb..7015827 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/stmp378x_defconfig b/arch/arm/configs/stmp378x_defconfig
index 94a2d90..1079c2b 100644
--- a/arch/arm/configs/stmp378x_defconfig
+++ b/arch/arm/configs/stmp378x_defconfig
@@ -5,7 +5,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/stmp37xx_defconfig b/arch/arm/configs/stmp37xx_defconfig
index d8ee58c..564a5cc 100644
--- a/arch/arm/configs/stmp37xx_defconfig
+++ b/arch/arm/configs/stmp37xx_defconfig
@@ -5,7 +5,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig
index e89ca19..95c0f0d 100644
--- a/arch/arm/configs/tct_hammer_defconfig
+++ b/arch/arm/configs/tct_hammer_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_BUG is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index 37f4834..3162173 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -7,7 +7,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index c1c252c..4a5a126 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -3,7 +3,7 @@
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_AIO is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 9d7bf5e..8b0c717 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=13
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_ELF_CORE is not set
 # CONFIG_SHMEM is not set
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
index 70d47db..5b55041 100644
--- a/arch/arm/configs/xcep_defconfig
+++ b/arch/arm/configs/xcep_defconfig
@@ -8,7 +8,7 @@
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SHMEM is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 338ff19..7b1bb2b 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -285,7 +285,7 @@
 	if (__builtin_constant_p(x))
 	       return constant_fls(x);
 
-	asm("clz\t%0, %1" : "=r" (ret) : "r" (x) : "cc");
+	asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
        	ret = 32 - ret;
 	return ret;
 }
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 5aeec1e..16bd480 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -36,6 +36,7 @@
 #define L2X0_RAW_INTR_STAT		0x21C
 #define L2X0_INTR_CLEAR			0x220
 #define L2X0_CACHE_SYNC			0x730
+#define L2X0_DUMMY_REG			0x740
 #define L2X0_INV_LINE_PA		0x770
 #define L2X0_INV_WAY			0x77C
 #define L2X0_CLEAN_LINE_PA		0x7B0
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index a101f10..e0d1c0c 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -50,8 +50,17 @@
 #define SCPCELLID2		0xFF8
 #define SCPCELLID3		0xFFC
 
+#define SCCTRL_TIMEREN0SEL_REFCLK	(0 << 15)
+#define SCCTRL_TIMEREN0SEL_TIMCLK	(1 << 15)
+
+#define SCCTRL_TIMEREN1SEL_REFCLK	(0 << 17)
+#define SCCTRL_TIMEREN1SEL_TIMCLK	(1 << 17)
+
 static inline void sysctl_soft_reset(void __iomem *base)
 {
+	/* switch to slow mode */
+	writel(0x2, base + SCCTRL);
+
 	/* writing any value to SCSYSSTAT reg will reset system */
 	writel(0, base + SCSYSSTAT);
 }
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 20e0f7c..d66605de 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -95,6 +95,15 @@
 	return (void __iomem *)addr;
 }
 
+/* IO barriers */
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+#define __iormb()		rmb()
+#define __iowmb()		wmb()
+#else
+#define __iormb()		do { } while (0)
+#define __iowmb()		do { } while (0)
+#endif
+
 /*
  * Now, pick up the machine-defined IO definitions
  */
@@ -125,17 +134,17 @@
  * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
  */
 #ifdef __io
-#define outb(v,p)		__raw_writeb(v,__io(p))
-#define outw(v,p)		__raw_writew((__force __u16) \
-					cpu_to_le16(v),__io(p))
-#define outl(v,p)		__raw_writel((__force __u32) \
-					cpu_to_le32(v),__io(p))
+#define outb(v,p)	({ __iowmb(); __raw_writeb(v,__io(p)); })
+#define outw(v,p)	({ __iowmb(); __raw_writew((__force __u16) \
+					cpu_to_le16(v),__io(p)); })
+#define outl(v,p)	({ __iowmb(); __raw_writel((__force __u32) \
+					cpu_to_le32(v),__io(p)); })
 
-#define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __v; })
+#define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
 #define inw(p)	({ __u16 __v = le16_to_cpu((__force __le16) \
-			__raw_readw(__io(p))); __v; })
+			__raw_readw(__io(p))); __iormb(); __v; })
 #define inl(p)	({ __u32 __v = le32_to_cpu((__force __le32) \
-			__raw_readl(__io(p))); __v; })
+			__raw_readl(__io(p))); __iormb(); __v; })
 
 #define outsb(p,d,l)		__raw_writesb(__io(p),d,l)
 #define outsw(p,d,l)		__raw_writesw(__io(p),d,l)
@@ -192,14 +201,6 @@
 #define writel_relaxed(v,c)	((void)__raw_writel((__force u32) \
 					cpu_to_le32(v),__mem_pci(c)))
 
-#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
-#define __iormb()		rmb()
-#define __iowmb()		wmb()
-#else
-#define __iormb()		do { } while (0)
-#define __iowmb()		do { } while (0)
-#endif
-
 #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
 #define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 3a0893a..bf13b81 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -15,10 +15,6 @@
 struct sys_timer;
 
 struct machine_desc {
-	/*
-	 * Note! The first two elements are used
-	 * by assembler code in head.S, head-common.S
-	 */
 	unsigned int		nr;		/* architecture number	*/
 	const char		*name;		/* architecture name	*/
 	unsigned long		boot_params;	/* tagged list		*/
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 23c2e8e..d0ee74b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -188,7 +188,7 @@
  * translation for translating DMA addresses.  Use the driver
  * DMA support - see dma-mapping.h.
  */
-static inline unsigned long virt_to_phys(void *x)
+static inline unsigned long virt_to_phys(const volatile void *x)
 {
 	return __virt_to_phys((unsigned long)(x));
 }
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 9763be0..22de005 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -10,6 +10,8 @@
 #ifndef _ASMARM_PGALLOC_H
 #define _ASMARM_PGALLOC_H
 
+#include <linux/pagemap.h>
+
 #include <asm/domain.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/processor.h>
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index a84628b..c8e6ddf 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -115,4 +115,6 @@
 	}
 }
 
+extern void sched_clock_postinit(void);
+
 #endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f41a6f5..82dfe5d 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -18,16 +18,34 @@
 #define __ASMARM_TLB_H
 
 #include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
 
 #ifndef CONFIG_MMU
 
 #include <linux/pagemap.h>
+
+#define tlb_flush(tlb)	((void) tlb)
+
 #include <asm-generic/tlb.h>
 
 #else /* !CONFIG_MMU */
 
+#include <linux/swap.h>
 #include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * We need to delay page freeing for SMP as other CPUs can access pages
+ * which have been removed but not yet had their TLB entries invalidated.
+ * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
+ * we need to apply this same delaying tactic to ensure correct operation.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
+#define tlb_fast_mode(tlb)	0
+#define FREE_PTE_NR		500
+#else
+#define tlb_fast_mode(tlb)	1
+#define FREE_PTE_NR		0
+#endif
 
 /*
  * TLB handling.  This allows us to remove pages from the page
@@ -36,12 +54,58 @@
 struct mmu_gather {
 	struct mm_struct	*mm;
 	unsigned int		fullmm;
+	struct vm_area_struct	*vma;
 	unsigned long		range_start;
 	unsigned long		range_end;
+	unsigned int		nr;
+	struct page		*pages[FREE_PTE_NR];
 };
 
 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
 
+/*
+ * This is unnecessarily complex.  There's three ways the TLB shootdown
+ * code is used:
+ *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
+ *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
+ *     tlb->vma will be non-NULL.
+ *  2. Unmapping all vmas.  See exit_mmap().
+ *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
+ *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
+ *  3. Unmapping argument pages.  See shift_arg_pages().
+ *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
+ *     tlb->vma will be NULL.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+	if (tlb->fullmm || !tlb->vma)
+		flush_tlb_mm(tlb->mm);
+	else if (tlb->range_end > 0) {
+		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
+		tlb->range_start = TASK_SIZE;
+		tlb->range_end = 0;
+	}
+}
+
+static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
+{
+	if (!tlb->fullmm) {
+		if (addr < tlb->range_start)
+			tlb->range_start = addr;
+		if (addr + PAGE_SIZE > tlb->range_end)
+			tlb->range_end = addr + PAGE_SIZE;
+	}
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+	tlb_flush(tlb);
+	if (!tlb_fast_mode(tlb)) {
+		free_pages_and_swap_cache(tlb->pages, tlb->nr);
+		tlb->nr = 0;
+	}
+}
+
 static inline struct mmu_gather *
 tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
 {
@@ -49,6 +113,8 @@
 
 	tlb->mm = mm;
 	tlb->fullmm = full_mm_flush;
+	tlb->vma = NULL;
+	tlb->nr = 0;
 
 	return tlb;
 }
@@ -56,8 +122,7 @@
 static inline void
 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 {
-	if (tlb->fullmm)
-		flush_tlb_mm(tlb->mm);
+	tlb_flush_mmu(tlb);
 
 	/* keep the page table cache within bounds */
 	check_pgt_cache();
@@ -71,12 +136,7 @@
 static inline void
 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
 {
-	if (!tlb->fullmm) {
-		if (addr < tlb->range_start)
-			tlb->range_start = addr;
-		if (addr + PAGE_SIZE > tlb->range_end)
-			tlb->range_end = addr + PAGE_SIZE;
-	}
+	tlb_add_flush(tlb, addr);
 }
 
 /*
@@ -89,6 +149,7 @@
 {
 	if (!tlb->fullmm) {
 		flush_cache_range(vma, vma->vm_start, vma->vm_end);
+		tlb->vma = vma;
 		tlb->range_start = TASK_SIZE;
 		tlb->range_end = 0;
 	}
@@ -97,12 +158,30 @@
 static inline void
 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
-	if (!tlb->fullmm && tlb->range_end > 0)
-		flush_tlb_range(vma, tlb->range_start, tlb->range_end);
+	if (!tlb->fullmm)
+		tlb_flush(tlb);
 }
 
-#define tlb_remove_page(tlb,page)	free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb, ptep, addr)	pte_free((tlb)->mm, ptep)
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	if (tlb_fast_mode(tlb)) {
+		free_page_and_swap_cache(page);
+	} else {
+		tlb->pages[tlb->nr++] = page;
+		if (tlb->nr >= FREE_PTE_NR)
+			tlb_flush_mmu(tlb);
+	}
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+	unsigned long addr)
+{
+	pgtable_page_dtor(pte);
+	tlb_add_flush(tlb, addr);
+	tlb_remove_page(tlb, pte);
+}
+
+#define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
 #define pmd_free_tlb(tlb, pmdp, addr)	pmd_free((tlb)->mm, pmdp)
 
 #define tlb_migrate_finish(mm)		do { } while (0)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index ce7378ea..d2005de 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -10,12 +10,7 @@
 #ifndef _ASMARM_TLBFLUSH_H
 #define _ASMARM_TLBFLUSH_H
 
-
-#ifndef CONFIG_MMU
-
-#define tlb_flush(tlb)	((void) tlb)
-
-#else /* CONFIG_MMU */
+#ifdef CONFIG_MMU
 
 #include <asm/glue.h>
 
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index eed2f79..2ad62df 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -443,40 +443,40 @@
  *
  * They are not meant to be called directly, but via enable/disable_irq.
  */
-static void ecard_irq_unmask(unsigned int irqnr)
+static void ecard_irq_unmask(struct irq_data *d)
 {
-	ecard_t *ec = slot_to_ecard(irqnr - 32);
+	ecard_t *ec = slot_to_ecard(d->irq - 32);
 
 	if (ec) {
 		if (!ec->ops)
 			ec->ops = &ecard_default_ops;
 
 		if (ec->claimed && ec->ops->irqenable)
-			ec->ops->irqenable(ec, irqnr);
+			ec->ops->irqenable(ec, d->irq);
 		else
 			printk(KERN_ERR "ecard: rejecting request to "
-				"enable IRQs for %d\n", irqnr);
+				"enable IRQs for %d\n", d->irq);
 	}
 }
 
-static void ecard_irq_mask(unsigned int irqnr)
+static void ecard_irq_mask(struct irq_data *d)
 {
-	ecard_t *ec = slot_to_ecard(irqnr - 32);
+	ecard_t *ec = slot_to_ecard(d->irq - 32);
 
 	if (ec) {
 		if (!ec->ops)
 			ec->ops = &ecard_default_ops;
 
 		if (ec->ops && ec->ops->irqdisable)
-			ec->ops->irqdisable(ec, irqnr);
+			ec->ops->irqdisable(ec, d->irq);
 	}
 }
 
 static struct irq_chip ecard_chip = {
-	.name	= "ECARD",
-	.ack	= ecard_irq_mask,
-	.mask	= ecard_irq_mask,
-	.unmask = ecard_irq_unmask,
+	.name		= "ECARD",
+	.irq_ack	= ecard_irq_mask,
+	.irq_mask	= ecard_irq_mask,
+	.irq_unmask	= ecard_irq_unmask,
 };
 
 void ecard_enablefiq(unsigned int fiqnr)
@@ -551,7 +551,7 @@
 			printk(KERN_ERR "\nInterrupt lockup detected - "
 			       "disabling all expansion card interrupts\n");
 
-			desc->chip->mask(IRQ_EXPANSIONCARD);
+			desc->irq_data.chip->irq_mask(&desc->irq_data);
 			ecard_dump_irq_state();
 		}
 	} else
@@ -574,7 +574,7 @@
 	ecard_t *ec;
 	int called = 0;
 
-	desc->chip->mask(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 	for (ec = cards; ec; ec = ec->next) {
 		int pending;
 
@@ -591,7 +591,7 @@
 			called ++;
 		}
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 
 	if (called == 0)
 		ecard_check_lockup(desc);
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index bbecaac..8f57515 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -60,6 +60,8 @@
 str_a2:	.asciz	").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
 str_a3:	.asciz	"\nPlease check your kernel config and/or bootloader.\n"
 	.align
+#else
+	b	__error
 #endif
 
 /*
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f17d9a0..f06ff9f 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -391,25 +391,24 @@
 
 
 #ifdef CONFIG_SMP_ON_UP
+	__INIT
 __fixup_smp:
-	mov	r4, #0x00070000
-	orr	r3, r4, #0xff000000	@ mask 0xff070000
-	orr	r4, r4, #0x41000000	@ val 0x41070000
-	and	r0, r9, r3
-	teq	r0, r4			@ ARM CPU and ARMv6/v7?
+	and	r3, r9, #0x000f0000	@ architecture version
+	teq	r3, #0x000f0000		@ CPU ID supported?
 	bne	__fixup_smp_on_up	@ no, assume UP
 
-	orr	r3, r3, #0x0000ff00
-	orr	r3, r3, #0x000000f0	@ mask 0xff07fff0
+	bic	r3, r9, #0x00ff0000
+	bic	r3, r3, #0x0000000f	@ mask 0xff00fff0
+	mov	r4, #0x41000000
 	orr	r4, r4, #0x0000b000
-	orr	r4, r4, #0x00000020	@ val 0x4107b020
-	and	r0, r9, r3
-	teq	r0, r4			@ ARM 11MPCore?
+	orr	r4, r4, #0x00000020	@ val 0x4100b020
+	teq	r3, r4			@ ARM 11MPCore?
 	moveq	pc, lr			@ yes, assume SMP
 
 	mrc	p15, 0, r0, c0, c0, 5	@ read MPIDR
-	tst	r0, #1 << 31
-	movne	pc, lr			@ bit 31 => SMP
+	and	r0, r0, #0xc0000000	@ multiprocessing extensions and
+	teq	r0, #0x80000000		@ not part of a uniprocessor system?
+	moveq	pc, lr			@ yes, assume SMP
 
 __fixup_smp_on_up:
 	adr	r0, 1f
@@ -417,18 +416,7 @@
 	sub	r3, r0, r3
 	add	r4, r4, r3
 	add	r5, r5, r3
-2:	cmp	r4, r5
-	movhs	pc, lr
-	ldmia	r4!, {r0, r6}
- ARM(	str	r6, [r0, r3]	)
- THUMB(	add	r0, r0, r3	)
-#ifdef __ARMEB__
- THUMB(	mov	r6, r6, ror #16	)	@ Convert word order for big-endian.
-#endif
- THUMB(	strh	r6, [r0], #2	)	@ For Thumb-2, store as two halfwords
- THUMB(	mov	r6, r6, lsr #16	)	@ to be robust against misaligned r3.
- THUMB(	strh	r6, [r0]	)
-	b	2b
+	b	__do_fixup_smp_on_up
 ENDPROC(__fixup_smp)
 
 	.align
@@ -442,7 +430,31 @@
 	ALT_SMP(.long	1)
 	ALT_UP(.long	0)
 	.popsection
-
 #endif
 
+	.text
+__do_fixup_smp_on_up:
+	cmp	r4, r5
+	movhs	pc, lr
+	ldmia	r4!, {r0, r6}
+ ARM(	str	r6, [r0, r3]	)
+ THUMB(	add	r0, r0, r3	)
+#ifdef __ARMEB__
+ THUMB(	mov	r6, r6, ror #16	)	@ Convert word order for big-endian.
+#endif
+ THUMB(	strh	r6, [r0], #2	)	@ For Thumb-2, store as two halfwords
+ THUMB(	mov	r6, r6, lsr #16	)	@ to be robust against misaligned r3.
+ THUMB(	strh	r6, [r0]	)
+	b	__do_fixup_smp_on_up
+ENDPROC(__do_fixup_smp_on_up)
+
+ENTRY(fixup_smp)
+	stmfd	sp!, {r4 - r6, lr}
+	mov	r4, r0
+	add	r5, r0, r1
+	mov	r3, #0
+	bl	__do_fixup_smp_on_up
+	ldmfd	sp!, {r4 - r6, pc}
+ENDPROC(fixup_smp)
+
 #include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index c9f3f04..44b84fe 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -137,11 +137,10 @@
 	u32 didr;
 
 	/* Do we implement the extended CPUID interface? */
-	if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
-		pr_warning("CPUID feature registers not supported. "
-				"Assuming v6 debug is present.\n");
+	if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
+	    "CPUID feature registers not supported. "
+	    "Assuming v6 debug is present.\n"))
 		return ARM_DEBUG_ARCH_V6;
-	}
 
 	ARM_DBG_READ(c0, 0, didr);
 	return (didr >> 16) & 0xf;
@@ -152,6 +151,12 @@
 	return debug_arch;
 }
 
+static int debug_arch_supported(void)
+{
+	u8 arch = get_debug_arch();
+	return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
+}
+
 /* Determine number of BRP register available. */
 static int get_num_brp_resources(void)
 {
@@ -268,6 +273,9 @@
 
 int hw_breakpoint_slots(int type)
 {
+	if (!debug_arch_supported())
+		return 0;
+
 	/*
 	 * We can be called early, so don't rely on
 	 * our static variables being initialised.
@@ -828,20 +836,33 @@
 /*
  * One-time initialisation.
  */
-static void reset_ctrl_regs(void *unused)
+static void reset_ctrl_regs(void *info)
 {
-	int i;
+	int i, cpu = smp_processor_id();
+	u32 dbg_power;
+	cpumask_t *cpumask = info;
 
 	/*
 	 * v7 debug contains save and restore registers so that debug state
-	 * can be maintained across low-power modes without leaving
-	 * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
-	 * we can write to the debug registers out of reset, so we must
-	 * unlock the OS Lock Access Register to avoid taking undefined
-	 * instruction exceptions later on.
+	 * can be maintained across low-power modes without leaving the debug
+	 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
+	 * the debug registers out of reset, so we must unlock the OS Lock
+	 * Access Register to avoid taking undefined instruction exceptions
+	 * later on.
 	 */
 	if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
 		/*
+		 * Ensure sticky power-down is clear (i.e. debug logic is
+		 * powered up).
+		 */
+		asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
+		if ((dbg_power & 0x1) == 0) {
+			pr_warning("CPU %d debug is powered down!\n", cpu);
+			cpumask_or(cpumask, cpumask, cpumask_of(cpu));
+			return;
+		}
+
+		/*
 		 * Unconditionally clear the lock by writing a value
 		 * other than 0xC5ACCE55 to the access register.
 		 */
@@ -879,10 +900,11 @@
 static int __init arch_hw_breakpoint_init(void)
 {
 	u32 dscr;
+	cpumask_t cpumask = { CPU_BITS_NONE };
 
 	debug_arch = get_debug_arch();
 
-	if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
+	if (!debug_arch_supported()) {
 		pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
 		return 0;
 	}
@@ -899,18 +921,24 @@
 		pr_info("%d breakpoint(s) reserved for watchpoint "
 				"single-step.\n", core_num_reserved_brps);
 
+	/*
+	 * Reset the breakpoint resources. We assume that a halting
+	 * debugger will leave the world in a nice state for us.
+	 */
+	on_each_cpu(reset_ctrl_regs, &cpumask, 1);
+	if (!cpumask_empty(&cpumask)) {
+		core_num_brps = 0;
+		core_num_reserved_brps = 0;
+		core_num_wrps = 0;
+		return 0;
+	}
+
 	ARM_DBG_READ(c1, 0, dscr);
 	if (dscr & ARM_DSCR_HDBGEN) {
+		max_watchpoint_len = 4;
 		pr_warning("halting debug mode enabled. Assuming maximum "
-				"watchpoint size of 4 bytes.");
+			   "watchpoint size of %u bytes.", max_watchpoint_len);
 	} else {
-		/*
-		 * Reset the breakpoint resources. We assume that a halting
-		 * debugger will leave the world in a nice state for us.
-		 */
-		smp_call_function(reset_ctrl_regs, NULL, 1);
-		reset_ctrl_regs(NULL);
-
 		/* Work out the maximum supported watchpoint length. */
 		max_watchpoint_len = get_max_wp_len();
 		pr_info("maximum watchpoint size is %u bytes.\n",
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 8135438..28536e3 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -88,7 +88,7 @@
 		seq_printf(p, "%*d: ", prec, i);
 		for_each_present_cpu(cpu)
 			seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
-		seq_printf(p, " %10s", desc->chip->name ? : "-");
+		seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-");
 		seq_printf(p, "  %s", action->name);
 		for (action = action->next; action; action = action->next)
 			seq_printf(p, ", %s", action->name);
@@ -181,10 +181,11 @@
 
 static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
 {
-	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
+	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu);
 
 	raw_spin_lock_irq(&desc->lock);
-	desc->chip->set_affinity(irq, cpumask_of(cpu));
+	desc->irq_data.chip->irq_set_affinity(&desc->irq_data,
+					      cpumask_of(cpu), false);
 	raw_spin_unlock_irq(&desc->lock);
 }
 
@@ -199,16 +200,18 @@
 	struct irq_desc *desc;
 
 	for_each_irq_desc(i, desc) {
-		if (desc->node == cpu) {
-			unsigned int newcpu = cpumask_any_and(desc->affinity,
+		struct irq_data *d = &desc->irq_data;
+
+		if (d->node == cpu) {
+			unsigned int newcpu = cpumask_any_and(d->affinity,
 							      cpu_online_mask);
 			if (newcpu >= nr_cpu_ids) {
 				if (printk_ratelimit())
 					printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
 					       i, cpu);
 
-				cpumask_setall(desc->affinity);
-				newcpu = cpumask_any_and(desc->affinity,
+				cpumask_setall(d->affinity);
+				newcpu = cpumask_any_and(d->affinity,
 							 cpu_online_mask);
 			}
 
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
index 2c1f005..8f6ed43 100644
--- a/arch/arm/kernel/kprobes-decode.c
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -1437,7 +1437,7 @@
 
 		return space_cccc_1100_010x(insn, asi);
 
-	} else if ((insn & 0x0e000000) == 0x0c400000) {
+	} else if ((insn & 0x0e000000) == 0x0c000000) {
 
 		return space_cccc_110x(insn, asi);
 
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 0c1bb68..6d4105e 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -22,6 +22,7 @@
 
 #include <asm/pgtable.h>
 #include <asm/sections.h>
+#include <asm/smp_plat.h>
 #include <asm/unwind.h>
 
 #ifdef CONFIG_XIP_KERNEL
@@ -38,17 +39,9 @@
 #ifdef CONFIG_MMU
 void *module_alloc(unsigned long size)
 {
-	struct vm_struct *area;
-
-	size = PAGE_ALIGN(size);
-	if (!size)
-		return NULL;
-
-	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
-	if (!area)
-		return NULL;
-
-	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
+	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+				GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+				__builtin_return_address(0));
 }
 #else /* CONFIG_MMU */
 void *module_alloc(unsigned long size)
@@ -276,12 +269,28 @@
 	const Elf_Shdr *txt_sec;
 };
 
+static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
+	const Elf_Shdr *sechdrs, const char *name)
+{
+	const Elf_Shdr *s, *se;
+	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
+		if (strcmp(name, secstrs + s->sh_name) == 0)
+			return s;
+
+	return NULL;
+}
+
+extern void fixup_smp(const void *, unsigned long);
+
 int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
 		    struct module *mod)
 {
+	const Elf_Shdr * __maybe_unused s = NULL;
 #ifdef CONFIG_ARM_UNWIND
 	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-	const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
+	const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
 	struct mod_unwind_map maps[ARM_SEC_MAX];
 	int i;
 
@@ -323,6 +332,9 @@
 					         maps[i].txt_sec->sh_addr,
 					         maps[i].txt_sec->sh_size);
 #endif
+	s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
+	if (s && !is_smp())
+		fixup_smp((void *)s->sh_addr, s->sh_size);
 	return 0;
 }
 
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5efa264..d150ad1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -700,7 +700,7 @@
 	 * Frame pointers should strictly progress back up the stack
 	 * (towards higher addresses).
 	 */
-	if (tail >= buftail.fp)
+	if (tail + 1 >= buftail.fp)
 		return NULL;
 
 	return buftail.fp - 1;
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index b8af96e..2c79eec 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -97,28 +97,34 @@
 			   irq, cpu);
 	return err;
 #else
-	return 0;
+	return -EINVAL;
 #endif
 }
 
 static int
 init_cpu_pmu(void)
 {
-	int i, err = 0;
+	int i, irqs, err = 0;
 	struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
 
-	if (!pdev) {
-		err = -ENODEV;
-		goto out;
-	}
+	if (!pdev)
+		return -ENODEV;
 
-	for (i = 0; i < pdev->num_resources; ++i) {
+	irqs = pdev->num_resources;
+
+	/*
+	 * If we have a single PMU interrupt that we can't shift, assume that
+	 * we're running on a uniprocessor machine and continue.
+	 */
+	if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
+		return 0;
+
+	for (i = 0; i < irqs; ++i) {
 		err = set_irq_affinity(platform_get_irq(pdev, i), i);
 		if (err)
 			break;
 	}
 
-out:
 	return err;
 }
 
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index e76fcaa..94bbedb 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -483,6 +483,7 @@
 	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
 }
 
+#ifdef CONFIG_MMU
 /*
  * The vectors page is always readable from user space for the
  * atomic helpers and the signal restart code.  Let's declare a mapping
@@ -503,3 +504,4 @@
 {
 	return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL;
 }
+#endif
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 19c6816..b13e70f 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -996,10 +996,10 @@
 		while (!(arch_ctrl.len & 0x1))
 			arch_ctrl.len >>= 1;
 
-		if (idx & 0x1)
-			reg = encode_ctrl_reg(arch_ctrl);
-		else
+		if (num & 0x1)
 			reg = bp->attr.bp_addr;
+		else
+			reg = encode_ctrl_reg(arch_ctrl);
 	}
 
 put:
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 2cdcc92..9a46370 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -34,7 +34,7 @@
 	sched_clock_update_fn = update;
 
 	/* calculate the mult/shift to convert counter ticks to ns. */
-	clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 60);
+	clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0);
 
 	r = rate;
 	if (r >= 4000000) {
@@ -60,10 +60,15 @@
 	 * sets the initial epoch.
 	 */
 	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
-	sched_clock_poll(sched_clock_timer.data);
+	update();
 
 	/*
 	 * Ensure that sched_clock() starts off at 0ns
 	 */
 	cd->epoch_ns = 0;
 }
+
+void __init sched_clock_postinit(void)
+{
+	sched_clock_poll(sched_clock_timer.data);
+}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3455ad3..5ea4fb71 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -226,8 +226,8 @@
 		 * Register 0 and check for VMSAv7 or PMSAv7 */
 		asm("mrc	p15, 0, %0, c0, c1, 4"
 		    : "=r" (mmfr0));
-		if ((mmfr0 & 0x0000000f) == 0x00000003 ||
-		    (mmfr0 & 0x000000f0) == 0x00000030)
+		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
+		    (mmfr0 & 0x000000f0) >= 0x00000030)
 			cpu_arch = CPU_ARCH_ARMv7;
 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
 			 (mmfr0 & 0x000000f0) == 0x00000020)
@@ -518,25 +518,21 @@
 #endif
 }
 
-static void __init
-request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
+static void __init request_standard_resources(struct machine_desc *mdesc)
 {
+	struct memblock_region *region;
 	struct resource *res;
-	int i;
 
 	kernel_code.start   = virt_to_phys(_text);
 	kernel_code.end     = virt_to_phys(_etext - 1);
 	kernel_data.start   = virt_to_phys(_sdata);
 	kernel_data.end     = virt_to_phys(_end - 1);
 
-	for (i = 0; i < mi->nr_banks; i++) {
-		if (mi->bank[i].size == 0)
-			continue;
-
+	for_each_memblock(memory, region) {
 		res = alloc_bootmem_low(sizeof(*res));
 		res->name  = "System RAM";
-		res->start = mi->bank[i].start;
-		res->end   = mi->bank[i].start + mi->bank[i].size - 1;
+		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 
 		request_resource(&iomem_resource, res);
@@ -650,15 +646,17 @@
 
 __tagtable(ATAG_REVISION, parse_tag_revision);
 
-#ifndef CONFIG_CMDLINE_FORCE
 static int __init parse_tag_cmdline(const struct tag *tag)
 {
+#ifndef CONFIG_CMDLINE_FORCE
 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
+#else
+	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
+#endif /* CONFIG_CMDLINE_FORCE */
 	return 0;
 }
 
 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
-#endif /* CONFIG_CMDLINE_FORCE */
 
 /*
  * Scan the tag table for this tag, and call its parse function.
@@ -857,7 +855,7 @@
 	arm_memblock_init(&meminfo, mdesc);
 
 	paging_init(mdesc);
-	request_standard_resources(&meminfo, mdesc);
+	request_standard_resources(mdesc);
 
 #ifdef CONFIG_SMP
 	if (is_smp())
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 907d5a6..abaf844 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -474,7 +474,9 @@
 	unsigned long handler = (unsigned long)ka->sa.sa_handler;
 	unsigned long retcode;
 	int thumb = 0;
-	unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
+	unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
+
+	cpsr |= PSR_ENDSTATE;
 
 	/*
 	 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index dd79074..60636f4 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -36,6 +36,7 @@
 		/* timer load already set up */
 		ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
 			| TWD_TIMER_CONTROL_PERIODIC;
+		__raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
 		break;
 	case CLOCK_EVT_MODE_ONESHOT:
 		/* period set, and timer enabled in 'next_event' hook */
@@ -81,7 +82,7 @@
 
 static void __cpuinit twd_calibrate_rate(void)
 {
-	unsigned long load, count;
+	unsigned long count;
 	u64 waitjiffies;
 
 	/*
@@ -114,12 +115,8 @@
 		twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
 
 		printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
-			(twd_timer_rate / 100000) % 100);
+			(twd_timer_rate / 1000000) % 100);
 	}
-
-	load = twd_timer_rate / HZ;
-
-	__raw_writel(load, twd_base + TWD_TIMER_LOAD);
 }
 
 /*
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index c2e112e..381d23a 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -94,10 +94,13 @@
 	if (tsk != current) {
 #ifdef CONFIG_SMP
 		/*
-		 * What guarantees do we have here that 'tsk'
-		 * is not running on another CPU?
+		 * What guarantees do we have here that 'tsk' is not
+		 * running on another CPU?  For now, ignore it as we
+		 * can't guarantee we won't explode.
 		 */
-		BUG();
+		if (trace->nr_entries < trace->max_entries)
+			trace->entries[trace->nr_entries++] = ULONG_MAX;
+		return;
 #else
 		data.no_sched_functions = 1;
 		frame.fp = thread_saved_fp(tsk);
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index f1e2eb1..3d76bf2 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -29,6 +29,7 @@
 
 #include <asm/leds.h>
 #include <asm/thread_info.h>
+#include <asm/sched_clock.h>
 #include <asm/stacktrace.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
@@ -163,5 +164,8 @@
 {
 	system_timer = machine_desc->timer;
 	system_timer->init();
+#ifdef CONFIG_HAVE_SCHED_CLOCK
+	sched_clock_postinit();
+#endif
 }
 
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 86b66f3..6146279 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -21,6 +21,12 @@
 #define ARM_CPU_KEEP(x)
 #endif
 
+#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
+#define ARM_EXIT_KEEP(x)	x
+#else
+#define ARM_EXIT_KEEP(x)
+#endif
+
 OUTPUT_ARCH(arm)
 ENTRY(stext)
 
@@ -43,6 +49,7 @@
 		_sinittext = .;
 			HEAD_TEXT
 			INIT_TEXT
+			ARM_EXIT_KEEP(EXIT_TEXT)
 		_einittext = .;
 		ARM_CPU_DISCARD(PROC_INFO)
 		__arch_info_begin = .;
@@ -67,6 +74,7 @@
 #ifndef CONFIG_XIP_KERNEL
 		__init_begin = _stext;
 		INIT_DATA
+		ARM_EXIT_KEEP(EXIT_DATA)
 #endif
 	}
 
@@ -162,6 +170,7 @@
 		. = ALIGN(PAGE_SIZE);
 		__init_begin = .;
 		INIT_DATA
+		ARM_EXIT_KEEP(EXIT_DATA)
 		. = ALIGN(PAGE_SIZE);
 		__init_end = .;
 #endif
@@ -247,6 +256,8 @@
 	}
 #endif
 
+	NOTES
+
 	BSS_SECTION(0, 0, 0)
 	_end = .;
 
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
index 8d6a876..3c9a05c 100644
--- a/arch/arm/lib/delay.S
+++ b/arch/arm/lib/delay.S
@@ -25,11 +25,15 @@
 		ldr	r2, .LC1
 		mul	r0, r2, r0
 ENTRY(__const_udelay)				@ 0 <= r0 <= 0x7fffff06
+		mov	r1, #-1
 		ldr	r2, .LC0
 		ldr	r2, [r2]		@ max = 0x01ffffff
+		add	r0, r0, r1, lsr #32-14
 		mov	r0, r0, lsr #14		@ max = 0x0001ffff
+		add	r2, r2, r1, lsr #32-10
 		mov	r2, r2, lsr #10		@ max = 0x00007fff
 		mul	r0, r2, r0		@ max = 2^32-1
+		add	r0, r0, r1, lsr #32-6
 		movs	r0, r0, lsr #6
 		moveq	pc, lr
 
diff --git a/arch/arm/mach-aaec2000/core.c b/arch/arm/mach-aaec2000/core.c
index 3ef6833..f8465bd 100644
--- a/arch/arm/mach-aaec2000/core.c
+++ b/arch/arm/mach-aaec2000/core.c
@@ -68,25 +68,25 @@
 /*
  * Interrupt handling routines
  */
-static void aaec2000_int_ack(unsigned int irq)
+static void aaec2000_int_ack(struct irq_data *d)
 {
-	IRQ_INTSR = 1 << irq;
+	IRQ_INTSR = 1 << d->irq;
 }
 
-static void aaec2000_int_mask(unsigned int irq)
+static void aaec2000_int_mask(struct irq_data *d)
 {
-	IRQ_INTENC |= (1 << irq);
+	IRQ_INTENC |= (1 << d->irq);
 }
 
-static void aaec2000_int_unmask(unsigned int irq)
+static void aaec2000_int_unmask(struct irq_data *d)
 {
-	IRQ_INTENS |= (1 << irq);
+	IRQ_INTENS |= (1 << d->irq);
 }
 
 static struct irq_chip aaec2000_irq_chip = {
-	.ack	= aaec2000_int_ack,
-	.mask	= aaec2000_int_mask,
-	.unmask	= aaec2000_int_unmask,
+	.irq_ack	= aaec2000_int_ack,
+	.irq_mask	= aaec2000_int_mask,
+	.irq_unmask	= aaec2000_int_unmask,
 };
 
 void __init aaec2000_init_irq(void)
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index c015b68..1939023 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -362,6 +362,12 @@
 	  Select this if you are using a Eukrea Electromatique's
 	  CPU9G20 Board <http://www.eukrea.com/>
 
+config MACH_ACMENETUSFOXG20
+	bool "Acme Systems srl FOX Board G20"
+	help
+	  Select this if you are using Acme Systems
+	  FOX Board G20 <http://www.acmesystems.it>
+
 config MACH_PORTUXG20
 	bool "taskit PortuxG20"
 	help
@@ -381,6 +387,13 @@
 	  Select this if you are using taskit's Stamp9G20 CPU module on this
 	  carrier board, beeing the decentralized unit of a building automation
 	  system; featuring nvram, eth-switch, iso-rs485, display, io
+
+config MACH_GSIA18S
+	bool "GS_IA18_S board"
+	help
+	  This enables support for the GS_IA18_S board
+	  produced by GeoSIG Ltd company. This is an internet accelerograph.
+	  <http://www.geosig.com>
 endif
 
 if (ARCH_AT91SAM9260 || ARCH_AT91SAM9G20)
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index d13add7..a83835e 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -63,9 +63,11 @@
 # AT91SAM9G20 board-specific support
 obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
 obj-$(CONFIG_MACH_CPU9G20)	+= board-cpu9krea.o
+obj-$(CONFIG_MACH_ACMENETUSFOXG20) += board-foxg20.o
 obj-$(CONFIG_MACH_STAMP9G20)	+= board-stamp9g20.o
 obj-$(CONFIG_MACH_PORTUXG20)	+= board-stamp9g20.o
 obj-$(CONFIG_MACH_PCONTROL_G20)	+= board-pcontrol-g20.o board-stamp9g20.o
+obj-$(CONFIG_MACH_GSIA18S)	+= board-gsia18s.o board-stamp9g20.o
 
 # AT91SAM9260/AT91SAM9G20 board-specific support
 obj-$(CONFIG_MACH_SNAPPER_9260)	+= board-snapper9260.o
diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c
index 7b58c94..de2fd04 100644
--- a/arch/arm/mach-at91/board-ecbat91.c
+++ b/arch/arm/mach-at91/board-ecbat91.c
@@ -128,17 +128,17 @@
 		.platform_data	= &my_flash0_platform,
 #endif
 	},
-	{	/* User accessable spi - cs1 (250KHz) */
+	{	/* User accessible spi - cs1 (250KHz) */
 		.modalias	= "spi-cs1",
 		.chip_select	= 1,
 		.max_speed_hz	= 250 * 1000,
 	},
-	{	/* User accessable spi - cs2 (1MHz) */
+	{	/* User accessible spi - cs2 (1MHz) */
 		.modalias	= "spi-cs2",
 		.chip_select	= 2,
 		.max_speed_hz	= 1 * 1000 * 1000,
 	},
-	{	/* User accessable spi - cs3 (10MHz) */
+	{	/* User accessible spi - cs3 (10MHz) */
 		.modalias	= "spi-cs3",
 		.chip_select	= 3,
 		.max_speed_hz	= 10 * 1000 * 1000,
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
new file mode 100644
index 0000000..dfc7dfe
--- /dev/null
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -0,0 +1,274 @@
+/*
+ *  Copyright (C) 2005 SAN People
+ *  Copyright (C) 2008 Atmel
+ *  Copyright (C) 2010 Lee McLoughlin - lee@lmmrtech.com
+ *  Copyright (C) 2010 Sergio Tanzilli - tanzilli@acmesystems.it
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/at73c213.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/clk.h>
+#include <linux/w1-gpio.h>
+
+#include <mach/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/board.h>
+#include <mach/at91sam9_smc.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+/*
+ * The FOX Board G20 hardware comes as the "Netus G20" board with
+ * just the cpu, ram, dataflash and two header connectors.
+ * This is plugged into the FOX Board which provides the ethernet,
+ * usb, rtc, leds, switch, ...
+ *
+ * For more info visit: http://www.acmesystems.it/foxg20
+ */
+
+
+static void __init foxg20_map_io(void)
+{
+	/* Initialize processor: 18.432 MHz crystal */
+	at91sam9260_initialize(18432000);
+
+	/* DBGU on ttyS0. (Rx & Tx only) */
+	at91_register_uart(0, 0, 0);
+
+	/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+	at91_register_uart(AT91SAM9260_ID_US0, 1,
+				ATMEL_UART_CTS
+				| ATMEL_UART_RTS
+				| ATMEL_UART_DTR
+				| ATMEL_UART_DSR
+				| ATMEL_UART_DCD
+				| ATMEL_UART_RI);
+
+	/* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */
+	at91_register_uart(AT91SAM9260_ID_US1, 2,
+		ATMEL_UART_CTS
+		| ATMEL_UART_RTS);
+
+	/* USART2 on ttyS3. (Rx & Tx only) */
+	at91_register_uart(AT91SAM9260_ID_US2, 3, 0);
+
+	/* USART3 on ttyS4. (Rx, Tx, RTS, CTS) */
+	at91_register_uart(AT91SAM9260_ID_US3, 4,
+		ATMEL_UART_CTS
+		| ATMEL_UART_RTS);
+
+	/* USART4 on ttyS5. (Rx & Tx only) */
+	at91_register_uart(AT91SAM9260_ID_US4, 5, 0);
+
+	/* USART5 on ttyS6. (Rx & Tx only) */
+	at91_register_uart(AT91SAM9260_ID_US5, 6, 0);
+
+	/* set serial console to ttyS0 (ie, DBGU) */
+	at91_set_serial_console(0);
+
+	/* Set the internal pull-up resistor on DRXD */
+	at91_set_A_periph(AT91_PIN_PB14, 1);
+
+}
+
+static void __init foxg20_init_irq(void)
+{
+	at91sam9260_init_interrupts(NULL);
+}
+
+
+/*
+ * USB Host port
+ */
+static struct at91_usbh_data __initdata foxg20_usbh_data = {
+	.ports		= 2,
+};
+
+/*
+ * USB Device port
+ */
+static struct at91_udc_data __initdata foxg20_udc_data = {
+	.vbus_pin	= AT91_PIN_PC6,
+	.pullup_pin	= 0,		/* pull-up driven by UDC */
+};
+
+
+/*
+ * SPI devices.
+ */
+static struct spi_board_info foxg20_spi_devices[] = {
+#if !defined(CONFIG_MMC_AT91)
+	{
+		.modalias	= "mtd_dataflash",
+		.chip_select	= 1,
+		.max_speed_hz	= 15 * 1000 * 1000,
+		.bus_num	= 0,
+	},
+#endif
+};
+
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata foxg20_macb_data = {
+	.phy_irq_pin	= AT91_PIN_PA7,
+	.is_rmii	= 1,
+};
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+static struct at91_mmc_data __initdata foxg20_mmc_data = {
+	.slot_b		= 1,
+	.wire4		= 1,
+};
+
+
+/*
+ * LEDs
+ */
+static struct gpio_led foxg20_leds[] = {
+	{	/* user led, red */
+		.name			= "user_led",
+		.gpio			= AT91_PIN_PC7,
+		.active_low		= 0,
+		.default_trigger	= "heartbeat",
+	},
+};
+
+
+/*
+ * GPIO Buttons
+ */
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+static struct gpio_keys_button foxg20_buttons[] = {
+	{
+		.gpio		= AT91_PIN_PC4,
+		.code		= BTN_1,
+		.desc		= "Button 1",
+		.active_low	= 1,
+		.wakeup		= 1,
+	},
+};
+
+static struct gpio_keys_platform_data foxg20_button_data = {
+	.buttons	= foxg20_buttons,
+	.nbuttons	= ARRAY_SIZE(foxg20_buttons),
+};
+
+static struct platform_device foxg20_button_device = {
+	.name		= "gpio-keys",
+	.id		= -1,
+	.num_resources	= 0,
+	.dev		= {
+		.platform_data	= &foxg20_button_data,
+	}
+};
+
+static void __init foxg20_add_device_buttons(void)
+{
+	at91_set_gpio_input(AT91_PIN_PC4, 1);	/* btn1 */
+	at91_set_deglitch(AT91_PIN_PC4, 1);
+
+	platform_device_register(&foxg20_button_device);
+}
+#else
+static void __init foxg20_add_device_buttons(void) {}
+#endif
+
+
+#if defined(CONFIG_W1_MASTER_GPIO) || defined(CONFIG_W1_MASTER_GPIO_MODULE)
+static struct w1_gpio_platform_data w1_gpio_pdata = {
+	/* If you choose to use a pin other than PB16 it needs to be 3.3V */
+	.pin		= AT91_PIN_PB16,
+	.is_open_drain  = 1,
+};
+
+static struct platform_device w1_device = {
+	.name			= "w1-gpio",
+	.id			= -1,
+	.dev.platform_data	= &w1_gpio_pdata,
+};
+
+static void __init at91_add_device_w1(void)
+{
+	at91_set_GPIO_periph(w1_gpio_pdata.pin, 1);
+	at91_set_multi_drive(w1_gpio_pdata.pin, 1);
+	platform_device_register(&w1_device);
+}
+
+#endif
+
+
+static struct i2c_board_info __initdata foxg20_i2c_devices[] = {
+	{
+		I2C_BOARD_INFO("24c512", 0x50),
+	},
+};
+
+
+static void __init foxg20_board_init(void)
+{
+	/* Serial */
+	at91_add_device_serial();
+	/* USB Host */
+	at91_add_device_usbh(&foxg20_usbh_data);
+	/* USB Device */
+	at91_add_device_udc(&foxg20_udc_data);
+	/* SPI */
+	at91_add_device_spi(foxg20_spi_devices, ARRAY_SIZE(foxg20_spi_devices));
+	/* Ethernet */
+	at91_add_device_eth(&foxg20_macb_data);
+	/* MMC */
+	at91_add_device_mmc(0, &foxg20_mmc_data);
+	/* I2C */
+	at91_add_device_i2c(foxg20_i2c_devices, ARRAY_SIZE(foxg20_i2c_devices));
+	/* LEDs */
+	at91_gpio_leds(foxg20_leds, ARRAY_SIZE(foxg20_leds));
+	/* Push Buttons */
+	foxg20_add_device_buttons();
+#if defined(CONFIG_W1_MASTER_GPIO) || defined(CONFIG_W1_MASTER_GPIO_MODULE)
+	at91_add_device_w1();
+#endif
+}
+
+MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20")
+	/* Maintainer: Sergio Tanzilli */
+	.boot_params	= AT91_SDRAM_BASE + 0x100,
+	.timer		= &at91sam926x_timer,
+	.map_io		= foxg20_map_io,
+	.init_irq	= foxg20_init_irq,
+	.init_machine	= foxg20_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/board-gsia18s.c b/arch/arm/mach-at91/board-gsia18s.c
new file mode 100644
index 0000000..bc28136
--- /dev/null
+++ b/arch/arm/mach-at91/board-gsia18s.c
@@ -0,0 +1,584 @@
+/*
+ *  Copyright (C) 2010 Christian Glindkamp <christian.glindkamp@taskit.de>
+ *                     taskit GmbH
+ *                2010 Igor Plyatov <plyatov@gmail.com>
+ *                     GeoSIG Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/w1-gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pcf857x.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/board.h>
+#include <mach/at91sam9_smc.h>
+#include <mach/gsia18s.h>
+#include <mach/stamp9g20.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+static void __init gsia18s_map_io(void)
+{
+	stamp9g20_map_io();
+
+	/*
+	 * USART0 on ttyS1 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI).
+	 * Used for Internal Analog Modem.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US0, 1,
+				ATMEL_UART_CTS | ATMEL_UART_RTS |
+				ATMEL_UART_DTR | ATMEL_UART_DSR |
+				ATMEL_UART_DCD | ATMEL_UART_RI);
+	/*
+	 * USART1 on ttyS2 (Rx, Tx, CTS, RTS).
+	 * Used for GPS or WiFi or Data stream.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US1, 2,
+				ATMEL_UART_CTS | ATMEL_UART_RTS);
+	/*
+	 * USART2 on ttyS3 (Rx, Tx, CTS, RTS).
+	 * Used for External Modem.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US2, 3,
+				ATMEL_UART_CTS | ATMEL_UART_RTS);
+	/*
+	 * USART3 on ttyS4 (Rx, Tx, RTS).
+	 * Used for RS-485.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US3, 4, ATMEL_UART_RTS);
+
+	/*
+	 * USART4 on ttyS5 (Rx, Tx).
+	 * Used for TRX433 Radio Module.
+	 */
+	at91_register_uart(AT91SAM9260_ID_US4, 5, 0);
+}
+
+static void __init init_irq(void)
+{
+	at91sam9260_init_interrupts(NULL);
+}
+
+/*
+ * Two USB Host ports
+ */
+static struct at91_usbh_data __initdata usbh_data = {
+	.ports		= 2,
+};
+
+/*
+ * USB Device port
+ */
+static struct at91_udc_data __initdata udc_data = {
+	.vbus_pin	= AT91_PIN_PA22,
+	.pullup_pin	= 0,		/* pull-up driven by UDC */
+};
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata macb_data = {
+	.phy_irq_pin	= AT91_PIN_PA28,
+	.is_rmii	= 1,
+};
+
+/*
+ * LEDs and GPOs
+ */
+static struct gpio_led gpio_leds[] = {
+	{
+		.name			= "gpo:spi1reset",
+		.gpio			= AT91_PIN_PC1,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "gpo:trig_net_out",
+		.gpio			= AT91_PIN_PB20,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "gpo:trig_net_dir",
+		.gpio			= AT91_PIN_PB19,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "gpo:charge_dis",
+		.gpio			= AT91_PIN_PC2,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "led:event",
+		.gpio			= AT91_PIN_PB17,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "led:lan",
+		.gpio			= AT91_PIN_PB18,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{
+		.name			= "led:error",
+		.gpio			= AT91_PIN_PB16,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	}
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+	.leds		= gpio_leds,
+	.num_leds	= ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device leds = {
+	.name	= "leds-gpio",
+	.id	= 0,
+	.dev	= {
+		.platform_data	= &gpio_led_info,
+	}
+};
+
+static void __init gsia18s_leds_init(void)
+{
+	platform_device_register(&leds);
+}
+
+/* PCF8574 0x20 GPIO - U1 on the GS_IA18-CB_V3 board */
+static struct gpio_led pcf_gpio_leds1[] = {
+	{ /* bit 0 */
+		.name			= "gpo:hdc_power",
+		.gpio			= PCF_GPIO_HDC_POWER,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 1 */
+		.name			= "gpo:wifi_setup",
+		.gpio			= PCF_GPIO_WIFI_SETUP,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 2 */
+		.name			= "gpo:wifi_enable",
+		.gpio			= PCF_GPIO_WIFI_ENABLE,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 3	*/
+		.name			= "gpo:wifi_reset",
+		.gpio			= PCF_GPIO_WIFI_RESET,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	},
+	/* bit 4 used as GPI	*/
+	{ /* bit 5 */
+		.name			= "gpo:gps_setup",
+		.gpio			= PCF_GPIO_GPS_SETUP,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 6 */
+		.name			= "gpo:gps_standby",
+		.gpio			= PCF_GPIO_GPS_STANDBY,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	},
+	{ /* bit 7 */
+		.name			= "gpo:gps_power",
+		.gpio			= PCF_GPIO_GPS_POWER,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	}
+};
+
+static struct gpio_led_platform_data pcf_gpio_led_info1 = {
+	.leds		= pcf_gpio_leds1,
+	.num_leds	= ARRAY_SIZE(pcf_gpio_leds1),
+};
+
+static struct platform_device pcf_leds1 = {
+	.name	= "leds-gpio", /* GS_IA18-CB_board */
+	.id	= 1,
+	.dev	= {
+		.platform_data	= &pcf_gpio_led_info1,
+	}
+};
+
+/* PCF8574 0x22 GPIO - U1 on the GS_2G_OPT1-A_V0 board (Alarm) */
+static struct gpio_led pcf_gpio_leds2[] = {
+	{ /* bit 0 */
+		.name			= "gpo:alarm_1",
+		.gpio			= PCF_GPIO_ALARM1,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 1 */
+		.name			= "gpo:alarm_2",
+		.gpio			= PCF_GPIO_ALARM2,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 2 */
+		.name			= "gpo:alarm_3",
+		.gpio			= PCF_GPIO_ALARM3,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	{ /* bit 3 */
+		.name			= "gpo:alarm_4",
+		.gpio			= PCF_GPIO_ALARM4,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+	/* bits 4, 5, 6 not used */
+	{ /* bit 7 */
+		.name			= "gpo:alarm_v_relay_on",
+		.gpio			= PCF_GPIO_ALARM_V_RELAY_ON,
+		.active_low		= 0,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+};
+
+static struct gpio_led_platform_data pcf_gpio_led_info2 = {
+	.leds		= pcf_gpio_leds2,
+	.num_leds	= ARRAY_SIZE(pcf_gpio_leds2),
+};
+
+static struct platform_device pcf_leds2 = {
+	.name	= "leds-gpio",
+	.id	= 2,
+	.dev	= {
+		.platform_data	= &pcf_gpio_led_info2,
+	}
+};
+
+/* PCF8574 0x24 GPIO U1 on the GS_2G-OPT23-A_V0 board (Modem) */
+static struct gpio_led pcf_gpio_leds3[] = {
+	{ /* bit 0 */
+		.name			= "gpo:modem_power",
+		.gpio			= PCF_GPIO_MODEM_POWER,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+		/* bits 1 and 2 not used */
+	{ /* bit 3 */
+		.name			= "gpo:modem_reset",
+		.gpio			= PCF_GPIO_MODEM_RESET,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	},
+		/* bits 4, 5 and 6 not used */
+	{ /* bit 7 */
+		.name			= "gpo:trx_reset",
+		.gpio			= PCF_GPIO_TRX_RESET,
+		.active_low		= 1,
+		.default_trigger	= "none",
+		.default_state		= LEDS_GPIO_DEFSTATE_ON,
+	}
+};
+
+static struct gpio_led_platform_data pcf_gpio_led_info3 = {
+	.leds		= pcf_gpio_leds3,
+	.num_leds	= ARRAY_SIZE(pcf_gpio_leds3),
+};
+
+static struct platform_device pcf_leds3 = {
+	.name	= "leds-gpio",
+	.id	= 3,
+	.dev	= {
+		.platform_data	= &pcf_gpio_led_info3,
+	}
+};
+
+static void __init gsia18s_pcf_leds_init(void)
+{
+	platform_device_register(&pcf_leds1);
+	platform_device_register(&pcf_leds2);
+	platform_device_register(&pcf_leds3);
+}
+
+/*
+ * SPI busses.
+ */
+static struct spi_board_info gsia18s_spi_devices[] = {
+	{ /* User accessible spi0, cs0 used for communication with MSP RTC */
+		.modalias	= "spidev",
+		.bus_num	= 0,
+		.chip_select	= 0,
+		.max_speed_hz	= 580000,
+		.mode		= SPI_MODE_1,
+	},
+	{ /* User accessible spi1, cs0 used for communication with int. DSP */
+		.modalias	= "spidev",
+		.bus_num	= 1,
+		.chip_select	= 0,
+		.max_speed_hz	= 5600000,
+		.mode		= SPI_MODE_0,
+	},
+	{ /* User accessible spi1, cs1 used for communication with ext. DSP */
+		.modalias	= "spidev",
+		.bus_num	= 1,
+		.chip_select	= 1,
+		.max_speed_hz	= 5600000,
+		.mode		= SPI_MODE_0,
+	},
+	{ /* User accessible spi1, cs2 used for communication with ext. DSP */
+		.modalias	= "spidev",
+		.bus_num	= 1,
+		.chip_select	= 2,
+		.max_speed_hz	= 5600000,
+		.mode		= SPI_MODE_0,
+	},
+	{ /* User accessible spi1, cs3 used for communication with ext. DSP */
+		.modalias	= "spidev",
+		.bus_num	= 1,
+		.chip_select	= 3,
+		.max_speed_hz	= 5600000,
+		.mode		= SPI_MODE_0,
+	}
+};
+
+/*
+ * GPI Buttons
+ */
+static struct gpio_keys_button buttons[] = {
+	{
+		.gpio		= GPIO_TRIG_NET_IN,
+		.code		= BTN_1,
+		.desc		= "TRIG_NET_IN",
+		.type		= EV_KEY,
+		.active_low	= 0,
+		.wakeup		= 1,
+	},
+	{ /* SW80 on the GS_IA18_S-MN board*/
+		.gpio		= GPIO_CARD_UNMOUNT_0,
+		.code		= BTN_2,
+		.desc		= "Card umount 0",
+		.type		= EV_KEY,
+		.active_low	= 1,
+		.wakeup		= 1,
+	},
+	{ /* SW79 on the GS_IA18_S-MN board*/
+		.gpio		= GPIO_CARD_UNMOUNT_1,
+		.code		= BTN_3,
+		.desc		= "Card umount 1",
+		.type		= EV_KEY,
+		.active_low	= 1,
+		.wakeup		= 1,
+	},
+	{ /* SW280 on the GS_IA18-CB board*/
+		.gpio		= GPIO_KEY_POWER,
+		.code		= KEY_POWER,
+		.desc		= "Power Off Button",
+		.type		= EV_KEY,
+		.active_low	= 0,
+		.wakeup		= 1,
+	}
+};
+
+static struct gpio_keys_platform_data button_data = {
+	.buttons	= buttons,
+	.nbuttons	= ARRAY_SIZE(buttons),
+};
+
+static struct platform_device button_device = {
+	.name		= "gpio-keys",
+	.id		= -1,
+	.num_resources	= 0,
+	.dev		= {
+		.platform_data	= &button_data,
+	}
+};
+
+static void __init gsia18s_add_device_buttons(void)
+{
+	at91_set_gpio_input(GPIO_TRIG_NET_IN, 1);
+	at91_set_deglitch(GPIO_TRIG_NET_IN, 1);
+	at91_set_gpio_input(GPIO_CARD_UNMOUNT_0, 1);
+	at91_set_deglitch(GPIO_CARD_UNMOUNT_0, 1);
+	at91_set_gpio_input(GPIO_CARD_UNMOUNT_1, 1);
+	at91_set_deglitch(GPIO_CARD_UNMOUNT_1, 1);
+	at91_set_gpio_input(GPIO_KEY_POWER, 0);
+	at91_set_deglitch(GPIO_KEY_POWER, 1);
+
+	platform_device_register(&button_device);
+}
+
+/*
+ * I2C
+ */
+static int pcf8574x_0x20_setup(struct i2c_client *client, int gpio,
+				unsigned int ngpio, void *context)
+{
+	int status;
+
+	status = gpio_request(gpio + PCF_GPIO_ETH_DETECT, "eth_det");
+	if (status < 0) {
+		pr_err("error: can't request GPIO%d\n",
+			gpio + PCF_GPIO_ETH_DETECT);
+		return status;
+	}
+	status = gpio_direction_input(gpio + PCF_GPIO_ETH_DETECT);
+	if (status < 0) {
+		pr_err("error: can't setup GPIO%d as input\n",
+			gpio + PCF_GPIO_ETH_DETECT);
+		return status;
+	}
+	status = gpio_export(gpio + PCF_GPIO_ETH_DETECT, false);
+	if (status < 0) {
+		pr_err("error: can't export GPIO%d\n",
+			gpio + PCF_GPIO_ETH_DETECT);
+		return status;
+	}
+	status = gpio_sysfs_set_active_low(gpio + PCF_GPIO_ETH_DETECT, 1);
+	if (status < 0) {
+		pr_err("error: gpio_sysfs_set active_low(GPIO%d, 1)\n",
+			gpio + PCF_GPIO_ETH_DETECT);
+		return status;
+	}
+
+	return 0;
+}
+
+static int pcf8574x_0x20_teardown(struct i2c_client *client, int gpio,
+					unsigned ngpio, void *context)
+{
+	gpio_free(gpio + PCF_GPIO_ETH_DETECT);
+	return 0;
+}
+
+static struct pcf857x_platform_data pcf20_pdata = {
+	.gpio_base	= GS_IA18_S_PCF_GPIO_BASE0,
+	.n_latch	= (1 << 4),
+	.setup		= pcf8574x_0x20_setup,
+	.teardown	= pcf8574x_0x20_teardown,
+};
+
+static struct pcf857x_platform_data pcf22_pdata = {
+	.gpio_base	= GS_IA18_S_PCF_GPIO_BASE1,
+};
+
+static struct pcf857x_platform_data pcf24_pdata = {
+	.gpio_base	= GS_IA18_S_PCF_GPIO_BASE2,
+};
+
+static struct i2c_board_info __initdata gsia18s_i2c_devices[] = {
+	{ /* U1 on the GS_IA18-CB_V3 board */
+		I2C_BOARD_INFO("pcf8574", 0x20),
+		.platform_data = &pcf20_pdata,
+	},
+	{ /* U1 on the GS_2G_OPT1-A_V0 board (Alarm) */
+		I2C_BOARD_INFO("pcf8574", 0x22),
+		.platform_data = &pcf22_pdata,
+	},
+	{ /* U1 on the GS_2G-OPT23-A_V0 board (Modem) */
+		I2C_BOARD_INFO("pcf8574", 0x24),
+		.platform_data = &pcf24_pdata,
+	},
+	{ /* U161 on the GS_IA18_S-MN board */
+		I2C_BOARD_INFO("24c1024", 0x50),
+	},
+	{ /* U162 on the GS_IA18_S-MN board */
+		I2C_BOARD_INFO("24c01", 0x53),
+	},
+};
+
+/*
+ * Compact Flash
+ */
+static struct at91_cf_data __initdata gsia18s_cf1_data = {
+	.irq_pin	= AT91_PIN_PA27,
+	.det_pin	= AT91_PIN_PB30,
+	.rst_pin	= AT91_PIN_PB31,
+	.chipselect	= 5,
+	.flags		= AT91_CF_TRUE_IDE,
+};
+
+/* Power Off by RTC */
+static void gsia18s_power_off(void)
+{
+	pr_notice("Power supply will be switched off automatically now or after 60 seconds without ArmDAS.\n");
+	at91_set_gpio_output(AT91_PIN_PA25, 1);
+	/* Spin to death... */
+	while (1)
+		;
+}
+
+static int __init gsia18s_power_off_init(void)
+{
+	pm_power_off = gsia18s_power_off;
+	return 0;
+}
+
+/* ---------------------------------------------------------------------------*/
+
+static void __init gsia18s_board_init(void)
+{
+	stamp9g20_board_init();
+	at91_add_device_usbh(&usbh_data);
+	at91_add_device_udc(&udc_data);
+	at91_add_device_eth(&macb_data);
+	gsia18s_leds_init();
+	gsia18s_pcf_leds_init();
+	gsia18s_add_device_buttons();
+	at91_add_device_i2c(gsia18s_i2c_devices,
+				ARRAY_SIZE(gsia18s_i2c_devices));
+	at91_add_device_cf(&gsia18s_cf1_data);
+	at91_add_device_spi(gsia18s_spi_devices,
+				ARRAY_SIZE(gsia18s_spi_devices));
+	gsia18s_power_off_init();
+}
+
+MACHINE_START(GSIA18S, "GS_IA18_S")
+	.boot_params	= AT91_SDRAM_BASE + 0x100,
+	.timer		= &at91sam926x_timer,
+	.map_io		= gsia18s_map_io,
+	.init_irq	= init_irq,
+	.init_machine	= gsia18s_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 86ff4b5..6c999db 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -37,7 +37,6 @@
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
 
-#include <mach/hardware.h>
 #include <mach/board.h>
 #include <mach/gpio.h>
 #include <mach/at91sam9_smc.h>
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c
index ae4772e..af818a2 100644
--- a/arch/arm/mach-at91/gpio.c
+++ b/arch/arm/mach-at91/gpio.c
@@ -274,10 +274,10 @@
 static u32 wakeups[MAX_GPIO_BANKS];
 static u32 backups[MAX_GPIO_BANKS];
 
-static int gpio_irq_set_wake(unsigned pin, unsigned state)
+static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
 {
-	unsigned	mask = pin_to_mask(pin);
-	unsigned	bank = (pin - PIN_BASE) / 32;
+	unsigned	mask = pin_to_mask(d->irq);
+	unsigned	bank = (d->irq - PIN_BASE) / 32;
 
 	if (unlikely(bank >= MAX_GPIO_BANKS))
 		return -EINVAL;
@@ -344,25 +344,25 @@
  * IRQ0..IRQ6 should be configurable, e.g. level vs edge triggering.
  */
 
-static void gpio_irq_mask(unsigned pin)
+static void gpio_irq_mask(struct irq_data *d)
 {
-	void __iomem	*pio = pin_to_controller(pin);
-	unsigned	mask = pin_to_mask(pin);
+	void __iomem	*pio = pin_to_controller(d->irq);
+	unsigned	mask = pin_to_mask(d->irq);
 
 	if (pio)
 		__raw_writel(mask, pio + PIO_IDR);
 }
 
-static void gpio_irq_unmask(unsigned pin)
+static void gpio_irq_unmask(struct irq_data *d)
 {
-	void __iomem	*pio = pin_to_controller(pin);
-	unsigned	mask = pin_to_mask(pin);
+	void __iomem	*pio = pin_to_controller(d->irq);
+	unsigned	mask = pin_to_mask(d->irq);
 
 	if (pio)
 		__raw_writel(mask, pio + PIO_IER);
 }
 
-static int gpio_irq_type(unsigned pin, unsigned type)
+static int gpio_irq_type(struct irq_data *d, unsigned type)
 {
 	switch (type) {
 	case IRQ_TYPE_NONE:
@@ -375,10 +375,10 @@
 
 static struct irq_chip gpio_irqchip = {
 	.name		= "GPIO",
-	.mask		= gpio_irq_mask,
-	.unmask		= gpio_irq_unmask,
-	.set_type	= gpio_irq_type,
-	.set_wake	= gpio_irq_set_wake,
+	.irq_mask	= gpio_irq_mask,
+	.irq_unmask	= gpio_irq_unmask,
+	.irq_set_type	= gpio_irq_type,
+	.irq_set_wake	= gpio_irq_set_wake,
 };
 
 static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -393,7 +393,7 @@
 	pio = at91_gpio->regbase;
 
 	/* temporarily mask (level sensitive) parent IRQ */
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	for (;;) {
 		/* Reading ISR acks pending (edge triggered) GPIO interrupts.
 		 * When there none are pending, we're finished unless we need
@@ -419,7 +419,7 @@
 					 * another IRQ must be generated before it actually gets
 					 * here to be disabled on the GPIO controller.
 					 */
-					gpio_irq_mask(pin);
+					gpio_irq_mask(irq_get_irq_data(pin));
 				}
 				else
 					generic_handle_irq(pin);
@@ -429,7 +429,7 @@
 			isr >>= 1;
 		}
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 	/* now it may re-trigger */
 }
 
diff --git a/arch/arm/mach-at91/include/mach/gsia18s.h b/arch/arm/mach-at91/include/mach/gsia18s.h
new file mode 100644
index 0000000..307c194
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/gsia18s.h
@@ -0,0 +1,33 @@
+/* Buttons */
+#define GPIO_TRIG_NET_IN		AT91_PIN_PB21
+#define GPIO_CARD_UNMOUNT_0		AT91_PIN_PB13
+#define GPIO_CARD_UNMOUNT_1		AT91_PIN_PB12
+#define GPIO_KEY_POWER			AT91_PIN_PA25
+
+/* PCF8574 0x20 GPIO - U1 on the GS_IA18-CB_V3 board */
+#define GS_IA18_S_PCF_GPIO_BASE0	NR_BUILTIN_GPIO
+#define PCF_GPIO_HDC_POWER		(GS_IA18_S_PCF_GPIO_BASE0 + 0)
+#define PCF_GPIO_WIFI_SETUP		(GS_IA18_S_PCF_GPIO_BASE0 + 1)
+#define PCF_GPIO_WIFI_ENABLE		(GS_IA18_S_PCF_GPIO_BASE0 + 2)
+#define PCF_GPIO_WIFI_RESET		(GS_IA18_S_PCF_GPIO_BASE0 + 3)
+#define PCF_GPIO_ETH_DETECT		4 /* this is a GPI */
+#define PCF_GPIO_GPS_SETUP		(GS_IA18_S_PCF_GPIO_BASE0 + 5)
+#define PCF_GPIO_GPS_STANDBY		(GS_IA18_S_PCF_GPIO_BASE0 + 6)
+#define PCF_GPIO_GPS_POWER		(GS_IA18_S_PCF_GPIO_BASE0 + 7)
+
+/* PCF8574 0x22 GPIO - U1 on the GS_2G_OPT1-A_V0 board (Alarm) */
+#define GS_IA18_S_PCF_GPIO_BASE1	(GS_IA18_S_PCF_GPIO_BASE0 + 8)
+#define PCF_GPIO_ALARM1			(GS_IA18_S_PCF_GPIO_BASE1 + 0)
+#define PCF_GPIO_ALARM2			(GS_IA18_S_PCF_GPIO_BASE1 + 1)
+#define PCF_GPIO_ALARM3			(GS_IA18_S_PCF_GPIO_BASE1 + 2)
+#define PCF_GPIO_ALARM4			(GS_IA18_S_PCF_GPIO_BASE1 + 3)
+/* bits 4, 5, 6 not used */
+#define PCF_GPIO_ALARM_V_RELAY_ON	(GS_IA18_S_PCF_GPIO_BASE1 + 7)
+
+/* PCF8574 0x24 GPIO U1 on the GS_2G-OPT23-A_V0 board (Modem) */
+#define GS_IA18_S_PCF_GPIO_BASE2	(GS_IA18_S_PCF_GPIO_BASE1 + 8)
+#define PCF_GPIO_MODEM_POWER		(GS_IA18_S_PCF_GPIO_BASE2 + 0)
+#define PCF_GPIO_MODEM_RESET		(GS_IA18_S_PCF_GPIO_BASE2 + 3)
+/* bits 1, 2, 4, 5 not used */
+#define PCF_GPIO_TRX_RESET		(GS_IA18_S_PCF_GPIO_BASE2 + 6)
+/* bit 7 not used */
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index da3494a..b56d6b3a 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -34,23 +34,23 @@
 #include <asm/mach/map.h>
 
 
-static void at91_aic_mask_irq(unsigned int irq)
+static void at91_aic_mask_irq(struct irq_data *d)
 {
 	/* Disable interrupt on AIC */
-	at91_sys_write(AT91_AIC_IDCR, 1 << irq);
+	at91_sys_write(AT91_AIC_IDCR, 1 << d->irq);
 }
 
-static void at91_aic_unmask_irq(unsigned int irq)
+static void at91_aic_unmask_irq(struct irq_data *d)
 {
 	/* Enable interrupt on AIC */
-	at91_sys_write(AT91_AIC_IECR, 1 << irq);
+	at91_sys_write(AT91_AIC_IECR, 1 << d->irq);
 }
 
 unsigned int at91_extern_irq;
 
 #define is_extern_irq(irq) ((1 << (irq)) & at91_extern_irq)
 
-static int at91_aic_set_type(unsigned irq, unsigned type)
+static int at91_aic_set_type(struct irq_data *d, unsigned type)
 {
 	unsigned int smr, srctype;
 
@@ -62,13 +62,13 @@
 		srctype = AT91_AIC_SRCTYPE_RISING;
 		break;
 	case IRQ_TYPE_LEVEL_LOW:
-		if ((irq == AT91_ID_FIQ) || is_extern_irq(irq))		/* only supported on external interrupts */
+		if ((d->irq == AT91_ID_FIQ) || is_extern_irq(d->irq))		/* only supported on external interrupts */
 			srctype = AT91_AIC_SRCTYPE_LOW;
 		else
 			return -EINVAL;
 		break;
 	case IRQ_TYPE_EDGE_FALLING:
-		if ((irq == AT91_ID_FIQ) || is_extern_irq(irq))		/* only supported on external interrupts */
+		if ((d->irq == AT91_ID_FIQ) || is_extern_irq(d->irq))		/* only supported on external interrupts */
 			srctype = AT91_AIC_SRCTYPE_FALLING;
 		else
 			return -EINVAL;
@@ -77,8 +77,8 @@
 		return -EINVAL;
 	}
 
-	smr = at91_sys_read(AT91_AIC_SMR(irq)) & ~AT91_AIC_SRCTYPE;
-	at91_sys_write(AT91_AIC_SMR(irq), smr | srctype);
+	smr = at91_sys_read(AT91_AIC_SMR(d->irq)) & ~AT91_AIC_SRCTYPE;
+	at91_sys_write(AT91_AIC_SMR(d->irq), smr | srctype);
 	return 0;
 }
 
@@ -87,15 +87,15 @@
 static u32 wakeups;
 static u32 backups;
 
-static int at91_aic_set_wake(unsigned irq, unsigned value)
+static int at91_aic_set_wake(struct irq_data *d, unsigned value)
 {
-	if (unlikely(irq >= 32))
+	if (unlikely(d->irq >= 32))
 		return -EINVAL;
 
 	if (value)
-		wakeups |= (1 << irq);
+		wakeups |= (1 << d->irq);
 	else
-		wakeups &= ~(1 << irq);
+		wakeups &= ~(1 << d->irq);
 
 	return 0;
 }
@@ -119,11 +119,11 @@
 
 static struct irq_chip at91_aic_chip = {
 	.name		= "AIC",
-	.ack		= at91_aic_mask_irq,
-	.mask		= at91_aic_mask_irq,
-	.unmask		= at91_aic_unmask_irq,
-	.set_type	= at91_aic_set_type,
-	.set_wake	= at91_aic_set_wake,
+	.irq_ack	= at91_aic_mask_irq,
+	.irq_mask	= at91_aic_mask_irq,
+	.irq_unmask	= at91_aic_unmask_irq,
+	.irq_set_type	= at91_aic_set_type,
+	.irq_set_wake	= at91_aic_set_wake,
 };
 
 /*
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index dafbacc..ea53f4d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -301,7 +301,7 @@
 }
 
 
-static struct platform_suspend_ops at91_pm_ops ={
+static const struct platform_suspend_ops at91_pm_ops = {
 	.valid	= at91_pm_valid_state,
 	.begin	= at91_pm_begin,
 	.enter	= at91_pm_enter,
diff --git a/arch/arm/mach-bcmring/csp/chipc/chipcHw.c b/arch/arm/mach-bcmring/csp/chipc/chipcHw.c
index b3a61d8..96273ff 100644
--- a/arch/arm/mach-bcmring/csp/chipc/chipcHw.c
+++ b/arch/arm/mach-bcmring/csp/chipc/chipcHw.c
@@ -757,7 +757,7 @@
 		t = t << 1;
 	}
 
-	/* Intialize the result */
+	/* Initialize the result */
 	r = 0;
 
 	do {
diff --git a/arch/arm/mach-bcmring/csp/dmac/dmacHw.c b/arch/arm/mach-bcmring/csp/dmac/dmacHw.c
index 7b9bac2..6b9be2e 100644
--- a/arch/arm/mach-bcmring/csp/dmac/dmacHw.c
+++ b/arch/arm/mach-bcmring/csp/dmac/dmacHw.c
@@ -893,7 +893,7 @@
 */
 /****************************************************************************/
 uint32_t dmacHw_getDmaControllerAttribute(dmacHw_HANDLE_t handle,	/*  [ IN ]  DMA Channel handle */
-					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controler attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
+					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controller attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
     ) {
 	dmacHw_CBLK_t *pCblk = dmacHw_HANDLE_TO_CBLK(handle);
 
diff --git a/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c b/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c
index ff7b436..77f84b4 100644
--- a/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c
+++ b/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c
@@ -316,7 +316,7 @@
 /**
 *  @brief   Check if DMA channel is the flow controller
 *
-*  @return  1 : If DMA is a flow controler
+*  @return  1 : If DMA is a flow controller
 *           0 : Peripheral is the flow controller
 *
 *  @note
diff --git a/arch/arm/mach-bcmring/csp/tmr/tmrHw.c b/arch/arm/mach-bcmring/csp/tmr/tmrHw.c
index 5c1c9a0..16225e4 100644
--- a/arch/arm/mach-bcmring/csp/tmr/tmrHw.c
+++ b/arch/arm/mach-bcmring/csp/tmr/tmrHw.c
@@ -558,7 +558,7 @@
 		t = t << 1;
 	}
 
-	/* Intialize the result */
+	/* Initialize the result */
 	r = 0;
 
 	do {
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 77eb35c..8d1baf3 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -671,7 +671,7 @@
 
 /****************************************************************************/
 /**
-*   Intializes all of the data structures associated with the DMA.
+*   Initializes all of the data structures associated with the DMA.
 *   @return
 *       >= 0    - Initialization was successfull.
 *
diff --git a/arch/arm/mach-bcmring/include/csp/dmacHw.h b/arch/arm/mach-bcmring/include/csp/dmacHw.h
index 5d51013..6c8da2b 100644
--- a/arch/arm/mach-bcmring/include/csp/dmacHw.h
+++ b/arch/arm/mach-bcmring/include/csp/dmacHw.h
@@ -590,7 +590,7 @@
 */
 /****************************************************************************/
 uint32_t dmacHw_getDmaControllerAttribute(dmacHw_HANDLE_t handle,	/*  [ IN ]  DMA Channel handle  */
-					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controler attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
+					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controller attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
     );
 
 #endif /* _DMACHW_H */
diff --git a/arch/arm/mach-bcmring/include/csp/tmrHw.h b/arch/arm/mach-bcmring/include/csp/tmrHw.h
index f1236d0..2cbb530 100644
--- a/arch/arm/mach-bcmring/include/csp/tmrHw.h
+++ b/arch/arm/mach-bcmring/include/csp/tmrHw.h
@@ -76,7 +76,7 @@
 *           certain time interval
 *
 *  This function initializes a periodic timer to generate timer interrupt
-*  after every time interval in milisecond
+*  after every time interval in millisecond
 *
 *  @return   On success: Effective interval set in mili-second
 *            On failure: 0
@@ -93,7 +93,7 @@
 *           after certain time interval
 *
 *  This function initializes a periodic timer to generate a single ticks after
-*  certain time interval in milisecond
+*  certain time interval in millisecond
 *
 *  @return   On success: Effective interval set in mili-second
 *            On failure: 0
diff --git a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h
index cbf334d..d67e2f8 100644
--- a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h
+++ b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h
@@ -28,7 +28,7 @@
 
 /* Data type for DMA Link List Item */
 typedef struct {
-	uint32_t sar;		/* Source Adress Register.
+	uint32_t sar;		/* Source Address Register.
 				   Address must be aligned to CTLx.SRC_TR_WIDTH.             */
 	uint32_t dar;		/* Destination Address Register.
 				   Address must be aligned to CTLx.DST_TR_WIDTH.             */
diff --git a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h
index 891cea8..f1ecf96 100644
--- a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h
+++ b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h
@@ -35,7 +35,7 @@
 
 /* Data type representing DMA channel registers */
 typedef struct {
-	dmacHw_REG64_t ChannelSar;	/*  Source Adress Register. 64 bits (upper 32 bits are reserved)
+	dmacHw_REG64_t ChannelSar;	/*  Source Address Register. 64 bits (upper 32 bits are reserved)
 					   Address must be aligned to CTLx.SRC_TR_WIDTH.
 					 */
 	dmacHw_REG64_t ChannelDar;	/*  Destination Address Register.64 bits (upper 32 bits are reserved)
diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c
index e315263..84dcda0 100644
--- a/arch/arm/mach-bcmring/irq.c
+++ b/arch/arm/mach-bcmring/irq.c
@@ -30,61 +30,61 @@
 #include <mach/csp/intcHw_reg.h>
 #include <mach/csp/mm_io.h>
 
-static void bcmring_mask_irq0(unsigned int irq)
+static void bcmring_mask_irq0(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_INTC0_START),
+	writel(1 << (d->irq - IRQ_INTC0_START),
 	       MM_IO_BASE_INTC0 + INTCHW_INTENCLEAR);
 }
 
-static void bcmring_unmask_irq0(unsigned int irq)
+static void bcmring_unmask_irq0(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_INTC0_START),
+	writel(1 << (d->irq - IRQ_INTC0_START),
 	       MM_IO_BASE_INTC0 + INTCHW_INTENABLE);
 }
 
-static void bcmring_mask_irq1(unsigned int irq)
+static void bcmring_mask_irq1(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_INTC1_START),
+	writel(1 << (d->irq - IRQ_INTC1_START),
 	       MM_IO_BASE_INTC1 + INTCHW_INTENCLEAR);
 }
 
-static void bcmring_unmask_irq1(unsigned int irq)
+static void bcmring_unmask_irq1(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_INTC1_START),
+	writel(1 << (d->irq - IRQ_INTC1_START),
 	       MM_IO_BASE_INTC1 + INTCHW_INTENABLE);
 }
 
-static void bcmring_mask_irq2(unsigned int irq)
+static void bcmring_mask_irq2(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_SINTC_START),
+	writel(1 << (d->irq - IRQ_SINTC_START),
 	       MM_IO_BASE_SINTC + INTCHW_INTENCLEAR);
 }
 
-static void bcmring_unmask_irq2(unsigned int irq)
+static void bcmring_unmask_irq2(struct irq_data *d)
 {
-	writel(1 << (irq - IRQ_SINTC_START),
+	writel(1 << (d->irq - IRQ_SINTC_START),
 	       MM_IO_BASE_SINTC + INTCHW_INTENABLE);
 }
 
 static struct irq_chip bcmring_irq0_chip = {
 	.name = "ARM-INTC0",
-	.ack = bcmring_mask_irq0,
-	.mask = bcmring_mask_irq0,	/* mask a specific interrupt, blocking its delivery. */
-	.unmask = bcmring_unmask_irq0,	/* unmaks an interrupt */
+	.irq_ack = bcmring_mask_irq0,
+	.irq_mask = bcmring_mask_irq0,	/* mask a specific interrupt, blocking its delivery. */
+	.irq_unmask = bcmring_unmask_irq0,	/* unmaks an interrupt */
 };
 
 static struct irq_chip bcmring_irq1_chip = {
 	.name = "ARM-INTC1",
-	.ack = bcmring_mask_irq1,
-	.mask = bcmring_mask_irq1,
-	.unmask = bcmring_unmask_irq1,
+	.irq_ack = bcmring_mask_irq1,
+	.irq_mask = bcmring_mask_irq1,
+	.irq_unmask = bcmring_unmask_irq1,
 };
 
 static struct irq_chip bcmring_irq2_chip = {
 	.name = "ARM-SINTC",
-	.ack = bcmring_mask_irq2,
-	.mask = bcmring_mask_irq2,
-	.unmask = bcmring_unmask_irq2,
+	.irq_ack = bcmring_mask_irq2,
+	.irq_mask = bcmring_mask_irq2,
+	.irq_unmask = bcmring_unmask_irq2,
 };
 
 static void vic_init(void __iomem *base, struct irq_chip *chip,
diff --git a/arch/arm/mach-clps711x/irq.c b/arch/arm/mach-clps711x/irq.c
index 9a12d85..86da7a1 100644
--- a/arch/arm/mach-clps711x/irq.c
+++ b/arch/arm/mach-clps711x/irq.c
@@ -27,24 +27,24 @@
 
 #include <asm/hardware/clps7111.h>
 
-static void int1_mask(unsigned int irq)
+static void int1_mask(struct irq_data *d)
 {
 	u32 intmr1;
 
 	intmr1 = clps_readl(INTMR1);
-	intmr1 &= ~(1 << irq);
+	intmr1 &= ~(1 << d->irq);
 	clps_writel(intmr1, INTMR1);
 }
 
-static void int1_ack(unsigned int irq)
+static void int1_ack(struct irq_data *d)
 {
 	u32 intmr1;
 
 	intmr1 = clps_readl(INTMR1);
-	intmr1 &= ~(1 << irq);
+	intmr1 &= ~(1 << d->irq);
 	clps_writel(intmr1, INTMR1);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_CSINT:  clps_writel(0, COEOI);  break;
 	case IRQ_TC1OI:  clps_writel(0, TC1EOI); break;
 	case IRQ_TC2OI:  clps_writel(0, TC2EOI); break;
@@ -54,56 +54,56 @@
 	}
 }
 
-static void int1_unmask(unsigned int irq)
+static void int1_unmask(struct irq_data *d)
 {
 	u32 intmr1;
 
 	intmr1 = clps_readl(INTMR1);
-	intmr1 |= 1 << irq;
+	intmr1 |= 1 << d->irq;
 	clps_writel(intmr1, INTMR1);
 }
 
 static struct irq_chip int1_chip = {
-	.ack	= int1_ack,
-	.mask	= int1_mask,
-	.unmask = int1_unmask,
+	.irq_ack	= int1_ack,
+	.irq_mask	= int1_mask,
+	.irq_unmask	= int1_unmask,
 };
 
-static void int2_mask(unsigned int irq)
+static void int2_mask(struct irq_data *d)
 {
 	u32 intmr2;
 
 	intmr2 = clps_readl(INTMR2);
-	intmr2 &= ~(1 << (irq - 16));
+	intmr2 &= ~(1 << (d->irq - 16));
 	clps_writel(intmr2, INTMR2);
 }
 
-static void int2_ack(unsigned int irq)
+static void int2_ack(struct irq_data *d)
 {
 	u32 intmr2;
 
 	intmr2 = clps_readl(INTMR2);
-	intmr2 &= ~(1 << (irq - 16));
+	intmr2 &= ~(1 << (d->irq - 16));
 	clps_writel(intmr2, INTMR2);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_KBDINT: clps_writel(0, KBDEOI); break;
 	}
 }
 
-static void int2_unmask(unsigned int irq)
+static void int2_unmask(struct irq_data *d)
 {
 	u32 intmr2;
 
 	intmr2 = clps_readl(INTMR2);
-	intmr2 |= 1 << (irq - 16);
+	intmr2 |= 1 << (d->irq - 16);
 	clps_writel(intmr2, INTMR2);
 }
 
 static struct irq_chip int2_chip = {
-	.ack	= int2_ack,
-	.mask	= int2_mask,
-	.unmask = int2_unmask,
+	.irq_ack	= int2_ack,
+	.irq_mask	= int2_mask,
+	.irq_unmask	= int2_unmask,
 };
 
 void __init clps711x_init_irq(void)
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index bb4c40e..9abc80a 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -26,30 +26,30 @@
 	__raw_writel(value, davinci_intc_base + offset);
 }
 
-static void cp_intc_ack_irq(unsigned int irq)
+static void cp_intc_ack_irq(struct irq_data *d)
 {
-	cp_intc_write(irq, CP_INTC_SYS_STAT_IDX_CLR);
+	cp_intc_write(d->irq, CP_INTC_SYS_STAT_IDX_CLR);
 }
 
 /* Disable interrupt */
-static void cp_intc_mask_irq(unsigned int irq)
+static void cp_intc_mask_irq(struct irq_data *d)
 {
 	/* XXX don't know why we need to disable nIRQ here... */
 	cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_CLR);
-	cp_intc_write(irq, CP_INTC_SYS_ENABLE_IDX_CLR);
+	cp_intc_write(d->irq, CP_INTC_SYS_ENABLE_IDX_CLR);
 	cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
 }
 
 /* Enable interrupt */
-static void cp_intc_unmask_irq(unsigned int irq)
+static void cp_intc_unmask_irq(struct irq_data *d)
 {
-	cp_intc_write(irq, CP_INTC_SYS_ENABLE_IDX_SET);
+	cp_intc_write(d->irq, CP_INTC_SYS_ENABLE_IDX_SET);
 }
 
-static int cp_intc_set_irq_type(unsigned int irq, unsigned int flow_type)
+static int cp_intc_set_irq_type(struct irq_data *d, unsigned int flow_type)
 {
-	unsigned reg		= BIT_WORD(irq);
-	unsigned mask		= BIT_MASK(irq);
+	unsigned reg		= BIT_WORD(d->irq);
+	unsigned mask		= BIT_MASK(d->irq);
 	unsigned polarity	= cp_intc_read(CP_INTC_SYS_POLARITY(reg));
 	unsigned type		= cp_intc_read(CP_INTC_SYS_TYPE(reg));
 
@@ -85,18 +85,18 @@
  * generic drivers which call {enable|disable}_irq_wake for
  * wake up interrupt sources (eg RTC on DA850).
  */
-static int cp_intc_set_wake(unsigned int irq, unsigned int on)
+static int cp_intc_set_wake(struct irq_data *d, unsigned int on)
 {
 	return 0;
 }
 
 static struct irq_chip cp_intc_irq_chip = {
 	.name		= "cp_intc",
-	.ack		= cp_intc_ack_irq,
-	.mask		= cp_intc_mask_irq,
-	.unmask		= cp_intc_unmask_irq,
-	.set_type	= cp_intc_set_irq_type,
-	.set_wake	= cp_intc_set_wake,
+	.irq_ack	= cp_intc_ack_irq,
+	.irq_mask	= cp_intc_mask_irq,
+	.irq_unmask	= cp_intc_unmask_irq,
+	.irq_set_type	= cp_intc_set_irq_type,
+	.irq_set_wake	= cp_intc_set_wake,
 };
 
 void __init cp_intc_init(void)
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
index 343de73..4a68c2b 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -132,7 +132,7 @@
 	return ret;
 }
 
-static int __init davinci_cpu_init(struct cpufreq_policy *policy)
+static int davinci_cpu_init(struct cpufreq_policy *policy)
 {
 	int result = 0;
 	struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 9eec630..beda8a4 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -480,8 +480,15 @@
 	.resource	= da850_mcasp_resources,
 };
 
+struct platform_device davinci_pcm_device = {
+	.name	= "davinci-pcm-audio",
+	.id	= -1,
+};
+
 void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
 {
+	platform_device_register(&davinci_pcm_device);
+
 	/* DA830/OMAP-L137 has 3 instances of McASP */
 	if (cpu_is_davinci_da830() && id == 1) {
 		da830_mcasp1_device.dev.platform_data = pdata;
diff --git a/arch/arm/mach-davinci/gpio-tnetv107x.c b/arch/arm/mach-davinci/gpio-tnetv107x.c
index d102986..3fa3e28 100644
--- a/arch/arm/mach-davinci/gpio-tnetv107x.c
+++ b/arch/arm/mach-davinci/gpio-tnetv107x.c
@@ -58,7 +58,7 @@
 
 	spin_lock_irqsave(&ctlr->lock, flags);
 
-	gpio_reg_set_bit(&regs->enable, gpio);
+	gpio_reg_set_bit(regs->enable, gpio);
 
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 
@@ -74,7 +74,7 @@
 
 	spin_lock_irqsave(&ctlr->lock, flags);
 
-	gpio_reg_clear_bit(&regs->enable, gpio);
+	gpio_reg_clear_bit(regs->enable, gpio);
 
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 }
@@ -88,7 +88,7 @@
 
 	spin_lock_irqsave(&ctlr->lock, flags);
 
-	gpio_reg_set_bit(&regs->direction, gpio);
+	gpio_reg_set_bit(regs->direction, gpio);
 
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 
@@ -106,11 +106,11 @@
 	spin_lock_irqsave(&ctlr->lock, flags);
 
 	if (value)
-		gpio_reg_set_bit(&regs->data_out, gpio);
+		gpio_reg_set_bit(regs->data_out, gpio);
 	else
-		gpio_reg_clear_bit(&regs->data_out, gpio);
+		gpio_reg_clear_bit(regs->data_out, gpio);
 
-	gpio_reg_clear_bit(&regs->direction, gpio);
+	gpio_reg_clear_bit(regs->direction, gpio);
 
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 
@@ -124,7 +124,7 @@
 	unsigned gpio = chip->base + offset;
 	int ret;
 
-	ret = gpio_reg_get_bit(&regs->data_in, gpio);
+	ret = gpio_reg_get_bit(regs->data_in, gpio);
 
 	return ret ? 1 : 0;
 }
@@ -140,9 +140,9 @@
 	spin_lock_irqsave(&ctlr->lock, flags);
 
 	if (value)
-		gpio_reg_set_bit(&regs->data_out, gpio);
+		gpio_reg_set_bit(regs->data_out, gpio);
 	else
-		gpio_reg_clear_bit(&regs->data_out, gpio);
+		gpio_reg_clear_bit(regs->data_out, gpio);
 
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 }
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index bf0ff58..20d66e5 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -205,20 +205,20 @@
  * serve as EDMA event triggers.
  */
 
-static void gpio_irq_disable(unsigned irq)
+static void gpio_irq_disable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(irq);
-	u32 mask = (u32) get_irq_data(irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	u32 mask = (u32) irq_data_get_irq_data(d);
 
 	__raw_writel(mask, &g->clr_falling);
 	__raw_writel(mask, &g->clr_rising);
 }
 
-static void gpio_irq_enable(unsigned irq)
+static void gpio_irq_enable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(irq);
-	u32 mask = (u32) get_irq_data(irq);
-	unsigned status = irq_desc[irq].status;
+	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	u32 mask = (u32) irq_data_get_irq_data(d);
+	unsigned status = irq_desc[d->irq].status;
 
 	status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
 	if (!status)
@@ -230,19 +230,19 @@
 		__raw_writel(mask, &g->set_rising);
 }
 
-static int gpio_irq_type(unsigned irq, unsigned trigger)
+static int gpio_irq_type(struct irq_data *d, unsigned trigger)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(irq);
-	u32 mask = (u32) get_irq_data(irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	u32 mask = (u32) irq_data_get_irq_data(d);
 
 	if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
 		return -EINVAL;
 
-	irq_desc[irq].status &= ~IRQ_TYPE_SENSE_MASK;
-	irq_desc[irq].status |= trigger;
+	irq_desc[d->irq].status &= ~IRQ_TYPE_SENSE_MASK;
+	irq_desc[d->irq].status |= trigger;
 
 	/* don't enable the IRQ if it's currently disabled */
-	if (irq_desc[irq].depth == 0) {
+	if (irq_desc[d->irq].depth == 0) {
 		__raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
 			     ? &g->set_falling : &g->clr_falling);
 		__raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING)
@@ -253,9 +253,9 @@
 
 static struct irq_chip gpio_irqchip = {
 	.name		= "GPIO",
-	.enable		= gpio_irq_enable,
-	.disable	= gpio_irq_disable,
-	.set_type	= gpio_irq_type,
+	.irq_enable	= gpio_irq_enable,
+	.irq_disable	= gpio_irq_disable,
+	.irq_set_type	= gpio_irq_type,
 };
 
 static void
@@ -269,8 +269,8 @@
 		mask <<= 16;
 
 	/* temporarily mask (level sensitive) parent IRQ */
-	desc->chip->mask(irq);
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	while (1) {
 		u32		status;
 		int		n;
@@ -293,7 +293,7 @@
 			status >>= res;
 		}
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 	/* now it may re-trigger */
 }
 
@@ -320,10 +320,10 @@
 		return -ENODEV;
 }
 
-static int gpio_irq_type_unbanked(unsigned irq, unsigned trigger)
+static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(irq);
-	u32 mask = (u32) get_irq_data(irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	u32 mask = (u32) irq_data_get_irq_data(d);
 
 	if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
 		return -EINVAL;
@@ -397,7 +397,7 @@
 		irq = bank_irq;
 		gpio_irqchip_unbanked = *get_irq_desc_chip(irq_to_desc(irq));
 		gpio_irqchip_unbanked.name = "GPIO-AINTC";
-		gpio_irqchip_unbanked.set_type = gpio_irq_type_unbanked;
+		gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked;
 
 		/* default trigger: both edges */
 		g = gpio2regs(0);
diff --git a/arch/arm/mach-davinci/include/mach/clkdev.h b/arch/arm/mach-davinci/include/mach/clkdev.h
index 730c49d..14a5048 100644
--- a/arch/arm/mach-davinci/include/mach/clkdev.h
+++ b/arch/arm/mach-davinci/include/mach/clkdev.h
@@ -1,6 +1,8 @@
 #ifndef __MACH_CLKDEV_H
 #define __MACH_CLKDEV_H
 
+struct clk;
+
 static inline int __clk_get(struct clk *clk)
 {
 	return 1;
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c
index 784ddf3..5e05c9b 100644
--- a/arch/arm/mach-davinci/irq.c
+++ b/arch/arm/mach-davinci/irq.c
@@ -53,14 +53,14 @@
 }
 
 /* Disable interrupt */
-static void davinci_mask_irq(unsigned int irq)
+static void davinci_mask_irq(struct irq_data *d)
 {
 	unsigned int mask;
 	u32 l;
 
-	mask = 1 << IRQ_BIT(irq);
+	mask = 1 << IRQ_BIT(d->irq);
 
-	if (irq > 31) {
+	if (d->irq > 31) {
 		l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET);
 		l &= ~mask;
 		davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET);
@@ -72,14 +72,14 @@
 }
 
 /* Enable interrupt */
-static void davinci_unmask_irq(unsigned int irq)
+static void davinci_unmask_irq(struct irq_data *d)
 {
 	unsigned int mask;
 	u32 l;
 
-	mask = 1 << IRQ_BIT(irq);
+	mask = 1 << IRQ_BIT(d->irq);
 
-	if (irq > 31) {
+	if (d->irq > 31) {
 		l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET);
 		l |= mask;
 		davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET);
@@ -91,23 +91,23 @@
 }
 
 /* EOI interrupt */
-static void davinci_ack_irq(unsigned int irq)
+static void davinci_ack_irq(struct irq_data *d)
 {
 	unsigned int mask;
 
-	mask = 1 << IRQ_BIT(irq);
+	mask = 1 << IRQ_BIT(d->irq);
 
-	if (irq > 31)
+	if (d->irq > 31)
 		davinci_irq_writel(mask, IRQ_REG1_OFFSET);
 	else
 		davinci_irq_writel(mask, IRQ_REG0_OFFSET);
 }
 
 static struct irq_chip davinci_irq_chip_0 = {
-	.name	= "AINTC",
-	.ack	= davinci_ack_irq,
-	.mask	= davinci_mask_irq,
-	.unmask = davinci_unmask_irq,
+	.name		= "AINTC",
+	.irq_ack	= davinci_ack_irq,
+	.irq_mask	= davinci_mask_irq,
+	.irq_unmask	= davinci_unmask_irq,
 };
 
 /* ARM Interrupt Controller Initialization */
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index fab953b..1bd73a04 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -110,7 +110,7 @@
 	return ret;
 }
 
-static struct platform_suspend_ops davinci_pm_ops = {
+static const struct platform_suspend_ops davinci_pm_ops = {
 	.enter		= davinci_pm_enter,
 	.valid		= suspend_valid_only_mem,
 };
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index f7a1258..fe627ab 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -770,7 +770,7 @@
 };
 
 static struct platform_device dove_sdio0 = {
-	.name		= "sdhci-mv",
+	.name		= "sdhci-dove",
 	.id		= 0,
 	.dev		= {
 		.dma_mask		= &sdio_dmamask,
@@ -798,7 +798,7 @@
 };
 
 static struct platform_device dove_sdio1 = {
-	.name		= "sdhci-mv",
+	.name		= "sdhci-dove",
 	.id		= 1,
 	.dev		= {
 		.dma_mask		= &sdio_dmamask,
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
index 61bfcb3..9317f05 100644
--- a/arch/arm/mach-dove/irq.c
+++ b/arch/arm/mach-dove/irq.c
@@ -36,9 +36,9 @@
 	}
 }
 
-static void pmu_irq_mask(unsigned int irq)
+static void pmu_irq_mask(struct irq_data *d)
 {
-	int pin = irq_to_pmu(irq);
+	int pin = irq_to_pmu(d->irq);
 	u32 u;
 
 	u = readl(PMU_INTERRUPT_MASK);
@@ -46,9 +46,9 @@
 	writel(u, PMU_INTERRUPT_MASK);
 }
 
-static void pmu_irq_unmask(unsigned int irq)
+static void pmu_irq_unmask(struct irq_data *d)
 {
-	int pin = irq_to_pmu(irq);
+	int pin = irq_to_pmu(d->irq);
 	u32 u;
 
 	u = readl(PMU_INTERRUPT_MASK);
@@ -56,9 +56,9 @@
 	writel(u, PMU_INTERRUPT_MASK);
 }
 
-static void pmu_irq_ack(unsigned int irq)
+static void pmu_irq_ack(struct irq_data *d)
 {
-	int pin = irq_to_pmu(irq);
+	int pin = irq_to_pmu(d->irq);
 	u32 u;
 
 	u = ~(1 << (pin & 31));
@@ -67,9 +67,9 @@
 
 static struct irq_chip pmu_irq_chip = {
 	.name		= "pmu_irq",
-	.mask		= pmu_irq_mask,
-	.unmask		= pmu_irq_unmask,
-	.ack		= pmu_irq_ack,
+	.irq_mask	= pmu_irq_mask,
+	.irq_unmask	= pmu_irq_unmask,
+	.irq_ack	= pmu_irq_ack,
 };
 
 static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index 5df4099..7df083f 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -35,20 +35,20 @@
 #define IRQ_STAT		0xff000000	/* read */
 #define IRQ_MCLR		0xff000000	/* write */
 
-static void ebsa110_mask_irq(unsigned int irq)
+static void ebsa110_mask_irq(struct irq_data *d)
 {
-	__raw_writeb(1 << irq, IRQ_MCLR);
+	__raw_writeb(1 << d->irq, IRQ_MCLR);
 }
 
-static void ebsa110_unmask_irq(unsigned int irq)
+static void ebsa110_unmask_irq(struct irq_data *d)
 {
-	__raw_writeb(1 << irq, IRQ_MSET);
+	__raw_writeb(1 << d->irq, IRQ_MSET);
 }
 
 static struct irq_chip ebsa110_irq_chip = {
-	.ack	= ebsa110_mask_irq,
-	.mask	= ebsa110_mask_irq,
-	.unmask = ebsa110_unmask_irq,
+	.irq_ack	= ebsa110_mask_irq,
+	.irq_mask	= ebsa110_mask_irq,
+	.irq_unmask	= ebsa110_unmask_irq,
 };
  
 static void __init ebsa110_init_irq(void)
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index ffdf87b..8207954 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -838,7 +838,7 @@
 static struct resource ep93xx_ac97_resources[] = {
 	{
 		.start	= EP93XX_AAC_PHYS_BASE,
-		.end	= EP93XX_AAC_PHYS_BASE + 0xb0 - 1,
+		.end	= EP93XX_AAC_PHYS_BASE + 0xac - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c
index cf547ad..bec34b8 100644
--- a/arch/arm/mach-ep93xx/gpio.c
+++ b/arch/arm/mach-ep93xx/gpio.c
@@ -112,13 +112,13 @@
 	generic_handle_irq(gpio_irq);
 }
 
-static void ep93xx_gpio_irq_ack(unsigned int irq)
+static void ep93xx_gpio_irq_ack(struct irq_data *d)
 {
-	int line = irq_to_gpio(irq);
+	int line = irq_to_gpio(d->irq);
 	int port = line >> 3;
 	int port_mask = 1 << (line & 7);
 
-	if ((irq_desc[irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+	if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
 		gpio_int_type2[port] ^= port_mask; /* switch edge direction */
 		ep93xx_gpio_update_int_params(port);
 	}
@@ -126,13 +126,13 @@
 	__raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
 }
 
-static void ep93xx_gpio_irq_mask_ack(unsigned int irq)
+static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
 {
-	int line = irq_to_gpio(irq);
+	int line = irq_to_gpio(d->irq);
 	int port = line >> 3;
 	int port_mask = 1 << (line & 7);
 
-	if ((irq_desc[irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+	if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
 		gpio_int_type2[port] ^= port_mask; /* switch edge direction */
 
 	gpio_int_unmasked[port] &= ~port_mask;
@@ -141,18 +141,18 @@
 	__raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
 }
 
-static void ep93xx_gpio_irq_mask(unsigned int irq)
+static void ep93xx_gpio_irq_mask(struct irq_data *d)
 {
-	int line = irq_to_gpio(irq);
+	int line = irq_to_gpio(d->irq);
 	int port = line >> 3;
 
 	gpio_int_unmasked[port] &= ~(1 << (line & 7));
 	ep93xx_gpio_update_int_params(port);
 }
 
-static void ep93xx_gpio_irq_unmask(unsigned int irq)
+static void ep93xx_gpio_irq_unmask(struct irq_data *d)
 {
-	int line = irq_to_gpio(irq);
+	int line = irq_to_gpio(d->irq);
 	int port = line >> 3;
 
 	gpio_int_unmasked[port] |= 1 << (line & 7);
@@ -164,10 +164,10 @@
  * edge (1) triggered, while gpio_int_type2 controls whether it
  * triggers on low/falling (0) or high/rising (1).
  */
-static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type)
+static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
 {
-	struct irq_desc *desc = irq_desc + irq;
-	const int gpio = irq_to_gpio(irq);
+	struct irq_desc *desc = irq_desc + d->irq;
+	const int gpio = irq_to_gpio(d->irq);
 	const int port = gpio >> 3;
 	const int port_mask = 1 << (gpio & 7);
 
@@ -220,11 +220,11 @@
 
 static struct irq_chip ep93xx_gpio_irq_chip = {
 	.name		= "GPIO",
-	.ack		= ep93xx_gpio_irq_ack,
-	.mask_ack	= ep93xx_gpio_irq_mask_ack,
-	.mask		= ep93xx_gpio_irq_mask,
-	.unmask		= ep93xx_gpio_irq_unmask,
-	.set_type	= ep93xx_gpio_irq_type,
+	.irq_ack	= ep93xx_gpio_irq_ack,
+	.irq_mask_ack	= ep93xx_gpio_irq_mask_ack,
+	.irq_mask	= ep93xx_gpio_irq_mask,
+	.irq_unmask	= ep93xx_gpio_irq_unmask,
+	.irq_set_type	= ep93xx_gpio_irq_type,
 };
 
 void __init ep93xx_gpio_init_irq(void)
@@ -427,6 +427,13 @@
 {
 	int i;
 
+	/* Set Ports C, D, E, G, and H for GPIO use */
+	ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
+				 EP93XX_SYSCON_DEVCFG_GONK |
+				 EP93XX_SYSCON_DEVCFG_EONIDE |
+				 EP93XX_SYSCON_DEVCFG_GONIDE |
+				 EP93XX_SYSCON_DEVCFG_HONIDE);
+
 	for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++)
 		gpiochip_add(&ep93xx_gpio_banks[i].chip);
 }
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 88b3dd8..84c5f25 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -75,20 +75,20 @@
 	IRQ_MASK_PCI_PERR,	/* 19 */
 };
 
-static void fb_mask_irq(unsigned int irq)
+static void fb_mask_irq(struct irq_data *d)
 {
-	*CSR_IRQ_DISABLE = fb_irq_mask[_DC21285_INR(irq)];
+	*CSR_IRQ_DISABLE = fb_irq_mask[_DC21285_INR(d->irq)];
 }
 
-static void fb_unmask_irq(unsigned int irq)
+static void fb_unmask_irq(struct irq_data *d)
 {
-	*CSR_IRQ_ENABLE = fb_irq_mask[_DC21285_INR(irq)];
+	*CSR_IRQ_ENABLE = fb_irq_mask[_DC21285_INR(d->irq)];
 }
 
 static struct irq_chip fb_chip = {
-	.ack	= fb_mask_irq,
-	.mask	= fb_mask_irq,
-	.unmask = fb_unmask_irq,
+	.irq_ack	= fb_mask_irq,
+	.irq_mask	= fb_mask_irq,
+	.irq_unmask	= fb_unmask_irq,
 };
 
 static void __init __fb_init_irq(void)
diff --git a/arch/arm/mach-footbridge/include/mach/debug-macro.S b/arch/arm/mach-footbridge/include/mach/debug-macro.S
index 3c9e0c4..30b971d 100644
--- a/arch/arm/mach-footbridge/include/mach/debug-macro.S
+++ b/arch/arm/mach-footbridge/include/mach/debug-macro.S
@@ -17,8 +17,8 @@
 	/* For NetWinder debugging */
 		.macro	addruart, rp, rv
 		mov	\rp, #0x000003f8
-		orr	\rv, \rp, #0x7c000000	@ physical
-		orr	\rp, \rp, #0xff000000	@ virtual
+		orr	\rv, \rp, #0xff000000	@ virtual
+		orr	\rp, \rp, #0x7c000000	@ physical
 		.endm
 
 #define UART_SHIFT	0
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c
index 8bfd06a..de7a5cb 100644
--- a/arch/arm/mach-footbridge/isa-irq.c
+++ b/arch/arm/mach-footbridge/isa-irq.c
@@ -30,61 +30,61 @@
 
 #include "common.h"
 
-static void isa_mask_pic_lo_irq(unsigned int irq)
+static void isa_mask_pic_lo_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_LO) | mask, PIC_MASK_LO);
 }
 
-static void isa_ack_pic_lo_irq(unsigned int irq)
+static void isa_ack_pic_lo_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_LO) | mask, PIC_MASK_LO);
 	outb(0x20, PIC_LO);
 }
 
-static void isa_unmask_pic_lo_irq(unsigned int irq)
+static void isa_unmask_pic_lo_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_LO) & ~mask, PIC_MASK_LO);
 }
 
 static struct irq_chip isa_lo_chip = {
-	.ack	= isa_ack_pic_lo_irq,
-	.mask	= isa_mask_pic_lo_irq,
-	.unmask = isa_unmask_pic_lo_irq,
+	.irq_ack	= isa_ack_pic_lo_irq,
+	.irq_mask	= isa_mask_pic_lo_irq,
+	.irq_unmask	= isa_unmask_pic_lo_irq,
 };
 
-static void isa_mask_pic_hi_irq(unsigned int irq)
+static void isa_mask_pic_hi_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_HI) | mask, PIC_MASK_HI);
 }
 
-static void isa_ack_pic_hi_irq(unsigned int irq)
+static void isa_ack_pic_hi_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_HI) | mask, PIC_MASK_HI);
 	outb(0x62, PIC_LO);
 	outb(0x20, PIC_HI);
 }
 
-static void isa_unmask_pic_hi_irq(unsigned int irq)
+static void isa_unmask_pic_hi_irq(struct irq_data *d)
 {
-	unsigned int mask = 1 << (irq & 7);
+	unsigned int mask = 1 << (d->irq & 7);
 
 	outb(inb(PIC_MASK_HI) & ~mask, PIC_MASK_HI);
 }
 
 static struct irq_chip isa_hi_chip = {
-	.ack	= isa_ack_pic_hi_irq,
-	.mask	= isa_mask_pic_hi_irq,
-	.unmask = isa_unmask_pic_hi_irq,
+	.irq_ack	= isa_ack_pic_hi_irq,
+	.irq_mask	= isa_mask_pic_hi_irq,
+	.irq_unmask	= isa_unmask_pic_hi_irq,
 };
 
 static void
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c
index fe3bd5a..fa3d333 100644
--- a/arch/arm/mach-gemini/gpio.c
+++ b/arch/arm/mach-gemini/gpio.c
@@ -54,33 +54,33 @@
 	__raw_writel(reg, base + GPIO_INT_EN);
 }
 
-static void gpio_ack_irq(unsigned int irq)
+static void gpio_ack_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq_to_gpio(irq);
+	unsigned int gpio = irq_to_gpio(d->irq);
 	unsigned int base = GPIO_BASE(gpio / 32);
 
 	__raw_writel(1 << (gpio % 32), base + GPIO_INT_CLR);
 }
 
-static void gpio_mask_irq(unsigned int irq)
+static void gpio_mask_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq_to_gpio(irq);
+	unsigned int gpio = irq_to_gpio(d->irq);
 	unsigned int base = GPIO_BASE(gpio / 32);
 
 	_set_gpio_irqenable(base, gpio % 32, 0);
 }
 
-static void gpio_unmask_irq(unsigned int irq)
+static void gpio_unmask_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq_to_gpio(irq);
+	unsigned int gpio = irq_to_gpio(d->irq);
 	unsigned int base = GPIO_BASE(gpio / 32);
 
 	_set_gpio_irqenable(base, gpio % 32, 1);
 }
 
-static int gpio_set_irq_type(unsigned int irq, unsigned int type)
+static int gpio_set_irq_type(struct irq_data *d, unsigned int type)
 {
-	unsigned int gpio = irq_to_gpio(irq);
+	unsigned int gpio = irq_to_gpio(d->irq);
 	unsigned int gpio_mask = 1 << (gpio % 32);
 	unsigned int base = GPIO_BASE(gpio / 32);
 	unsigned int reg_both, reg_level, reg_type;
@@ -120,7 +120,7 @@
 	__raw_writel(reg_level, base + GPIO_INT_LEVEL);
 	__raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
 
-	gpio_ack_irq(irq);
+	gpio_ack_irq(d->irq);
 
 	return 0;
 }
@@ -146,10 +146,10 @@
 
 static struct irq_chip gpio_irq_chip = {
 	.name = "GPIO",
-	.ack = gpio_ack_irq,
-	.mask = gpio_mask_irq,
-	.unmask = gpio_unmask_irq,
-	.set_type = gpio_set_irq_type,
+	.irq_ack = gpio_ack_irq,
+	.irq_mask = gpio_mask_irq,
+	.irq_unmask = gpio_unmask_irq,
+	.irq_set_type = gpio_set_irq_type,
 };
 
 static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
diff --git a/arch/arm/mach-gemini/include/mach/hardware.h b/arch/arm/mach-gemini/include/mach/hardware.h
index 213a4fc..8c950e1 100644
--- a/arch/arm/mach-gemini/include/mach/hardware.h
+++ b/arch/arm/mach-gemini/include/mach/hardware.h
@@ -33,7 +33,7 @@
 #define GEMINI_LPC_HOST_BASE	0x47000000
 #define GEMINI_LPC_IO_BASE	0x47800000
 #define GEMINI_INTERRUPT_BASE	0x48000000
-/* TODO: Different interrupt controlers when SMP
+/* TODO: Different interrupt controllers when SMP
  * #define GEMINI_INTERRUPT0_BASE	0x48000000
  * #define GEMINI_INTERRUPT1_BASE	0x49000000
  */
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index 9e613ca..96bc227 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -32,34 +32,34 @@
 #define FIQ_LEVEL(base_addr)	(base_addr + 0x30)
 #define FIQ_STATUS(base_addr)	(base_addr + 0x34)
 
-static void gemini_ack_irq(unsigned int irq)
+static void gemini_ack_irq(struct irq_data *d)
 {
-	__raw_writel(1 << irq, IRQ_CLEAR(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
+	__raw_writel(1 << d->irq, IRQ_CLEAR(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
 }
 
-static void gemini_mask_irq(unsigned int irq)
+static void gemini_mask_irq(struct irq_data *d)
 {
 	unsigned int mask;
 
 	mask = __raw_readl(IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
-	mask &= ~(1 << irq);
+	mask &= ~(1 << d->irq);
 	__raw_writel(mask, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
 }
 
-static void gemini_unmask_irq(unsigned int irq)
+static void gemini_unmask_irq(struct irq_data *d)
 {
 	unsigned int mask;
 
 	mask = __raw_readl(IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
-	mask |= (1 << irq);
+	mask |= (1 << d->irq);
 	__raw_writel(mask, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
 }
 
 static struct irq_chip gemini_irq_chip = {
-	.name	= "INTC",
-	.ack	= gemini_ack_irq,
-	.mask	= gemini_mask_irq,
-	.unmask	= gemini_unmask_irq,
+	.name		= "INTC",
+	.irq_ack	= gemini_ack_irq,
+	.irq_mask	= gemini_mask_irq,
+	.irq_unmask	= gemini_unmask_irq,
 };
 
 static struct resource irq_resource = {
diff --git a/arch/arm/mach-h720x/common.c b/arch/arm/mach-h720x/common.c
index bdb3f67..1f28c90 100644
--- a/arch/arm/mach-h720x/common.c
+++ b/arch/arm/mach-h720x/common.c
@@ -52,17 +52,17 @@
 /*
  * mask Global irq's
  */
-static void mask_global_irq (unsigned int irq )
+static void mask_global_irq(struct irq_data *d)
 {
-	CPU_REG (IRQC_VIRT, IRQC_IER) &= ~(1 << irq);
+	CPU_REG (IRQC_VIRT, IRQC_IER) &= ~(1 << d->irq);
 }
 
 /*
  * unmask Global irq's
  */
-static void unmask_global_irq (unsigned int irq )
+static void unmask_global_irq(struct irq_data *d)
 {
-	CPU_REG (IRQC_VIRT, IRQC_IER) |= (1 << irq);
+	CPU_REG (IRQC_VIRT, IRQC_IER) |= (1 << d->irq);
 }
 
 
@@ -70,10 +70,10 @@
  * ack GPIO irq's
  * Ack only for edge triggered int's valid
  */
-static void inline ack_gpio_irq(u32 irq)
+static void inline ack_gpio_irq(struct irq_data *d)
 {
-	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(irq));
-	u32 bit = IRQ_TO_BIT(irq);
+	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(d->irq));
+	u32 bit = IRQ_TO_BIT(d->irq);
 	if ( (CPU_REG (reg_base, GPIO_EDGE) & bit))
 		CPU_REG (reg_base, GPIO_CLR) = bit;
 }
@@ -81,20 +81,20 @@
 /*
  * mask GPIO irq's
  */
-static void inline mask_gpio_irq(u32 irq)
+static void inline mask_gpio_irq(struct irq_data *d)
 {
-	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(irq));
-	u32 bit = IRQ_TO_BIT(irq);
+	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(d->irq));
+	u32 bit = IRQ_TO_BIT(d->irq);
 	CPU_REG (reg_base, GPIO_MASK) &= ~bit;
 }
 
 /*
  * unmask GPIO irq's
  */
-static void inline unmask_gpio_irq(u32 irq)
+static void inline unmask_gpio_irq(struct irq_data *d)
 {
-	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(irq));
-	u32 bit = IRQ_TO_BIT(irq);
+	u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(d->irq));
+	u32 bit = IRQ_TO_BIT(d->irq);
 	CPU_REG (reg_base, GPIO_MASK) |= bit;
 }
 
@@ -170,15 +170,15 @@
 #endif
 
 static struct irq_chip h720x_global_chip = {
-	.ack = mask_global_irq,
-	.mask = mask_global_irq,
-	.unmask = unmask_global_irq,
+	.irq_ack = mask_global_irq,
+	.irq_mask = mask_global_irq,
+	.irq_unmask = unmask_global_irq,
 };
 
 static struct irq_chip h720x_gpio_chip = {
-	.ack = ack_gpio_irq,
-	.mask = mask_gpio_irq,
-	.unmask = unmask_gpio_irq,
+	.irq_ack = ack_gpio_irq,
+	.irq_mask = mask_gpio_irq,
+	.irq_unmask = unmask_gpio_irq,
 };
 
 /*
diff --git a/arch/arm/mach-h720x/cpu-h7202.c b/arch/arm/mach-h720x/cpu-h7202.c
index fd33a19..ac3f914 100644
--- a/arch/arm/mach-h720x/cpu-h7202.c
+++ b/arch/arm/mach-h720x/cpu-h7202.c
@@ -141,27 +141,27 @@
 /*
  * mask multiplexed timer IRQs
  */
-static void inline mask_timerx_irq (u32 irq)
+static void inline mask_timerx_irq(struct irq_data *d)
 {
 	unsigned int bit;
-	bit = 2 << ((irq == IRQ_TIMER64B) ? 4 : (irq - IRQ_TIMER1));
+	bit = 2 << ((d->irq == IRQ_TIMER64B) ? 4 : (d->irq - IRQ_TIMER1));
 	CPU_REG (TIMER_VIRT, TIMER_TOPCTRL) &= ~bit;
 }
 
 /*
  * unmask multiplexed timer IRQs
  */
-static void inline unmask_timerx_irq (u32 irq)
+static void inline unmask_timerx_irq(struct irq_data *d)
 {
 	unsigned int bit;
-	bit = 2 << ((irq == IRQ_TIMER64B) ? 4 : (irq - IRQ_TIMER1));
+	bit = 2 << ((d->irq == IRQ_TIMER64B) ? 4 : (d->irq - IRQ_TIMER1));
 	CPU_REG (TIMER_VIRT, TIMER_TOPCTRL) |= bit;
 }
 
 static struct irq_chip h7202_timerx_chip = {
-	.ack = mask_timerx_irq,
-	.mask = mask_timerx_irq,
-	.unmask = unmask_timerx_irq,
+	.irq_ack = mask_timerx_irq,
+	.irq_mask = mask_timerx_irq,
+	.irq_unmask = unmask_timerx_irq,
 };
 
 static struct irqaction h7202_timer_irq = {
diff --git a/arch/arm/mach-h720x/h7201-eval.c b/arch/arm/mach-h720x/h7201-eval.c
index 79f0b89..629454d 100644
--- a/arch/arm/mach-h720x/h7201-eval.c
+++ b/arch/arm/mach-h720x/h7201-eval.c
@@ -23,7 +23,6 @@
 #include <asm/types.h>
 #include <asm/mach-types.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/mach/arch.h>
 #include <mach/hardware.h>
 #include "common.h"
diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c
index cc28b1e..e9f46b6 100644
--- a/arch/arm/mach-h720x/h7202-eval.c
+++ b/arch/arm/mach-h720x/h7202-eval.c
@@ -23,7 +23,6 @@
 #include <asm/types.h>
 #include <asm/mach-types.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/mach/arch.h>
 #include <mach/irqs.h>
 #include <mach/hardware.h>
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 17d2e60..56684b5 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -243,6 +243,7 @@
 	select IMX_HAVE_PLATFORM_MXC_EHCI
 	select IMX_HAVE_PLATFORM_MXC_MMC
 	select IMX_HAVE_PLATFORM_SPI_IMX
+	select MXC_DEBUG_BOARD
 	select MXC_ULPI if USB_ULPI
 	help
 	  Include support for MX27PDK platform. This includes specific
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
index aa76cfd..8382e79 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -180,7 +180,7 @@
 	KEY(3, 3, KEY_POWER),
 };
 
-static const struct matrix_keymap_data mx25pdk_keymap_data __initdata = {
+static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = {
 	.keymap		= mx25pdk_keymap,
 	.keymap_size	= ARRAY_SIZE(mx25pdk_keymap),
 };
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index 6fd0f8f..1643315 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -37,12 +37,15 @@
 #include <mach/common.h>
 #include <mach/iomux-mx27.h>
 #include <mach/ulpi.h>
+#include <mach/irqs.h>
+#include <mach/3ds_debugboard.h>
 
 #include "devices-imx27.h"
 
 #define SD1_EN_GPIO (GPIO_PORTB + 25)
 #define OTG_PHY_RESET_GPIO (GPIO_PORTB + 23)
 #define SPI2_SS0 (GPIO_PORTD + 21)
+#define EXPIO_PARENT_INT	(MXC_INTERNAL_IRQS + GPIO_PORTC + 28)
 
 static const int mx27pdk_pins[] __initconst = {
 	/* UART1 */
@@ -215,10 +218,10 @@
 
 static struct mc13783_regulator_init_data mx27_3ds_regulators[] = {
 	{
-		.id = MC13783_REGU_VMMC1,
+		.id = MC13783_REG_VMMC1,
 		.init_data = &vmmc1_init,
 	}, {
-		.id = MC13783_REGU_VGEN,
+		.id = MC13783_REG_VGEN,
 		.init_data = &vgen_init,
 	},
 };
@@ -276,6 +279,9 @@
 	imx27_add_spi_imx1(&spi2_pdata);
 	spi_register_board_info(mx27_3ds_spi_devs,
 						ARRAY_SIZE(mx27_3ds_spi_devs));
+
+	if (mxc_expio_init(MX27_CS5_BASE_ADDR, EXPIO_PARENT_INT))
+		pr_warn("Init of the debugboard failed, all devices on the debugboard are unusable.\n");
 }
 
 static void __init mx27pdk_timer_init(void)
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index f667a26..5056148 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -254,10 +254,10 @@
 
 static struct mc13783_regulator_init_data pcm038_regulators[] = {
 	{
-		.id = MC13783_REGU_VCAM,
+		.id = MC13783_REG_VCAM,
 		.init_data = &cam_data,
 	}, {
-		.id = MC13783_REGU_VMMC1,
+		.id = MC13783_REG_VMMC1,
 		.init_data = &sdhc1_data,
 	},
 };
diff --git a/arch/arm/mach-imx/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c
index 6bf81ce..acf1769 100644
--- a/arch/arm/mach-imx/pm-imx27.c
+++ b/arch/arm/mach-imx/pm-imx27.c
@@ -32,7 +32,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops mx27_suspend_ops = {
+static const struct platform_suspend_ops mx27_suspend_ops = {
 	.enter = mx27_suspend_enter,
 	.valid = suspend_valid_only_mem,
 };
diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c
index a3fbcb3a..fbb4577 100644
--- a/arch/arm/mach-integrator/cpu.c
+++ b/arch/arm/mach-integrator/cpu.c
@@ -173,7 +173,7 @@
 
 	if (machine_is_integrator()) {
 		vco.s = (cm_osc >> 8) & 7;
-	} else if (machine_is_cintegrator()) {
+	} else {
 		vco.s = 1;
 	}
 	vco.v = cm_osc & 255;
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 2774df8..b666443 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -156,21 +156,21 @@
 
 #define INTEGRATOR_SC_VALID_INT	0x003fffff
 
-static void sc_mask_irq(unsigned int irq)
+static void sc_mask_irq(struct irq_data *d)
 {
-	writel(1 << irq, VA_IC_BASE + IRQ_ENABLE_CLEAR);
+	writel(1 << d->irq, VA_IC_BASE + IRQ_ENABLE_CLEAR);
 }
 
-static void sc_unmask_irq(unsigned int irq)
+static void sc_unmask_irq(struct irq_data *d)
 {
-	writel(1 << irq, VA_IC_BASE + IRQ_ENABLE_SET);
+	writel(1 << d->irq, VA_IC_BASE + IRQ_ENABLE_SET);
 }
 
 static struct irq_chip sc_chip = {
-	.name	= "SC",
-	.ack	= sc_mask_irq,
-	.mask	= sc_mask_irq,
-	.unmask = sc_unmask_irq,
+	.name		= "SC",
+	.irq_ack	= sc_mask_irq,
+	.irq_mask	= sc_mask_irq,
+	.irq_unmask	= sc_unmask_irq,
 };
 
 static void __init ap_init_irq(void)
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 85e48a5..e9327da 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -146,61 +146,61 @@
 #define sic_writel	__raw_writel
 #define sic_readl	__raw_readl
 
-static void cic_mask_irq(unsigned int irq)
+static void cic_mask_irq(struct irq_data *d)
 {
-	irq -= IRQ_CIC_START;
+	unsigned int irq = d->irq - IRQ_CIC_START;
 	cic_writel(1 << irq, INTCP_VA_CIC_BASE + IRQ_ENABLE_CLEAR);
 }
 
-static void cic_unmask_irq(unsigned int irq)
+static void cic_unmask_irq(struct irq_data *d)
 {
-	irq -= IRQ_CIC_START;
+	unsigned int irq = d->irq - IRQ_CIC_START;
 	cic_writel(1 << irq, INTCP_VA_CIC_BASE + IRQ_ENABLE_SET);
 }
 
 static struct irq_chip cic_chip = {
-	.name	= "CIC",
-	.ack	= cic_mask_irq,
-	.mask	= cic_mask_irq,
-	.unmask	= cic_unmask_irq,
+	.name		= "CIC",
+	.irq_ack	= cic_mask_irq,
+	.irq_mask	= cic_mask_irq,
+	.irq_unmask	= cic_unmask_irq,
 };
 
-static void pic_mask_irq(unsigned int irq)
+static void pic_mask_irq(struct irq_data *d)
 {
-	irq -= IRQ_PIC_START;
+	unsigned int irq = d->irq - IRQ_PIC_START;
 	pic_writel(1 << irq, INTCP_VA_PIC_BASE + IRQ_ENABLE_CLEAR);
 }
 
-static void pic_unmask_irq(unsigned int irq)
+static void pic_unmask_irq(struct irq_data *d)
 {
-	irq -= IRQ_PIC_START;
+	unsigned int irq = d->irq - IRQ_PIC_START;
 	pic_writel(1 << irq, INTCP_VA_PIC_BASE + IRQ_ENABLE_SET);
 }
 
 static struct irq_chip pic_chip = {
-	.name	= "PIC",
-	.ack	= pic_mask_irq,
-	.mask	= pic_mask_irq,
-	.unmask = pic_unmask_irq,
+	.name		= "PIC",
+	.irq_ack	= pic_mask_irq,
+	.irq_mask	= pic_mask_irq,
+	.irq_unmask	= pic_unmask_irq,
 };
 
-static void sic_mask_irq(unsigned int irq)
+static void sic_mask_irq(struct irq_data *d)
 {
-	irq -= IRQ_SIC_START;
+	unsigned int irq = d->irq - IRQ_SIC_START;
 	sic_writel(1 << irq, INTCP_VA_SIC_BASE + IRQ_ENABLE_CLEAR);
 }
 
-static void sic_unmask_irq(unsigned int irq)
+static void sic_unmask_irq(struct irq_data *d)
 {
-	irq -= IRQ_SIC_START;
+	unsigned int irq = d->irq - IRQ_SIC_START;
 	sic_writel(1 << irq, INTCP_VA_SIC_BASE + IRQ_ENABLE_SET);
 }
 
 static struct irq_chip sic_chip = {
-	.name	= "SIC",
-	.ack	= sic_mask_irq,
-	.mask	= sic_mask_irq,
-	.unmask	= sic_unmask_irq,
+	.name		= "SIC",
+	.irq_ack	= sic_mask_irq,
+	.irq_mask	= sic_mask_irq,
+	.irq_unmask	= sic_unmask_irq,
 };
 
 static void
diff --git a/arch/arm/mach-iop13xx/irq.c b/arch/arm/mach-iop13xx/irq.c
index 0d099ca..a233470 100644
--- a/arch/arm/mach-iop13xx/irq.c
+++ b/arch/arm/mach-iop13xx/irq.c
@@ -123,79 +123,79 @@
 
 /* 0 = Interrupt Masked and 1 = Interrupt not masked */
 static void
-iop13xx_irq_mask0 (unsigned int irq)
+iop13xx_irq_mask0 (struct irq_data *d)
 {
-	write_intctl_0(read_intctl_0() & ~(1 << (irq - 0)));
+	write_intctl_0(read_intctl_0() & ~(1 << (d->irq - 0)));
 }
 
 static void
-iop13xx_irq_mask1 (unsigned int irq)
+iop13xx_irq_mask1 (struct irq_data *d)
 {
-	write_intctl_1(read_intctl_1() & ~(1 << (irq - 32)));
+	write_intctl_1(read_intctl_1() & ~(1 << (d->irq - 32)));
 }
 
 static void
-iop13xx_irq_mask2 (unsigned int irq)
+iop13xx_irq_mask2 (struct irq_data *d)
 {
-	write_intctl_2(read_intctl_2() & ~(1 << (irq - 64)));
+	write_intctl_2(read_intctl_2() & ~(1 << (d->irq - 64)));
 }
 
 static void
-iop13xx_irq_mask3 (unsigned int irq)
+iop13xx_irq_mask3 (struct irq_data *d)
 {
-	write_intctl_3(read_intctl_3() & ~(1 << (irq - 96)));
+	write_intctl_3(read_intctl_3() & ~(1 << (d->irq - 96)));
 }
 
 static void
-iop13xx_irq_unmask0(unsigned int irq)
+iop13xx_irq_unmask0(struct irq_data *d)
 {
-	write_intctl_0(read_intctl_0() | (1 << (irq - 0)));
+	write_intctl_0(read_intctl_0() | (1 << (d->irq - 0)));
 }
 
 static void
-iop13xx_irq_unmask1(unsigned int irq)
+iop13xx_irq_unmask1(struct irq_data *d)
 {
-	write_intctl_1(read_intctl_1() | (1 << (irq - 32)));
+	write_intctl_1(read_intctl_1() | (1 << (d->irq - 32)));
 }
 
 static void
-iop13xx_irq_unmask2(unsigned int irq)
+iop13xx_irq_unmask2(struct irq_data *d)
 {
-	write_intctl_2(read_intctl_2() | (1 << (irq - 64)));
+	write_intctl_2(read_intctl_2() | (1 << (d->irq - 64)));
 }
 
 static void
-iop13xx_irq_unmask3(unsigned int irq)
+iop13xx_irq_unmask3(struct irq_data *d)
 {
-	write_intctl_3(read_intctl_3() | (1 << (irq - 96)));
+	write_intctl_3(read_intctl_3() | (1 << (d->irq - 96)));
 }
 
 static struct irq_chip iop13xx_irqchip1 = {
-	.name	= "IOP13xx-1",
-	.ack    = iop13xx_irq_mask0,
-	.mask   = iop13xx_irq_mask0,
-	.unmask = iop13xx_irq_unmask0,
+	.name       = "IOP13xx-1",
+	.irq_ack    = iop13xx_irq_mask0,
+	.irq_mask   = iop13xx_irq_mask0,
+	.irq_unmask = iop13xx_irq_unmask0,
 };
 
 static struct irq_chip iop13xx_irqchip2 = {
-	.name	= "IOP13xx-2",
-	.ack    = iop13xx_irq_mask1,
-	.mask   = iop13xx_irq_mask1,
-	.unmask = iop13xx_irq_unmask1,
+	.name       = "IOP13xx-2",
+	.irq_ack    = iop13xx_irq_mask1,
+	.irq_mask   = iop13xx_irq_mask1,
+	.irq_unmask = iop13xx_irq_unmask1,
 };
 
 static struct irq_chip iop13xx_irqchip3 = {
-	.name	= "IOP13xx-3",
-	.ack    = iop13xx_irq_mask2,
-	.mask   = iop13xx_irq_mask2,
-	.unmask = iop13xx_irq_unmask2,
+	.name       = "IOP13xx-3",
+	.irq_ack    = iop13xx_irq_mask2,
+	.irq_mask   = iop13xx_irq_mask2,
+	.irq_unmask = iop13xx_irq_unmask2,
 };
 
 static struct irq_chip iop13xx_irqchip4 = {
-	.name	= "IOP13xx-4",
-	.ack    = iop13xx_irq_mask3,
-	.mask   = iop13xx_irq_mask3,
-	.unmask = iop13xx_irq_unmask3,
+	.name       = "IOP13xx-4",
+	.irq_ack    = iop13xx_irq_mask3,
+	.irq_mask   = iop13xx_irq_mask3,
+	.irq_unmask = iop13xx_irq_unmask3,
 };
 
 extern void iop_init_cp6_handler(void);
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c
index 7149fcc..c9c02e3 100644
--- a/arch/arm/mach-iop13xx/msi.c
+++ b/arch/arm/mach-iop13xx/msi.c
@@ -156,14 +156,14 @@
 	destroy_irq(irq);
 }
 
-static void iop13xx_msi_nop(unsigned int irq)
+static void iop13xx_msi_nop(struct irq_data *d)
 {
 	return;
 }
 
 static struct irq_chip iop13xx_msi_chip = {
 	.name = "PCI-MSI",
-	.ack = iop13xx_msi_nop,
+	.irq_ack = iop13xx_msi_nop,
 	.irq_enable = unmask_msi_irq,
 	.irq_disable = mask_msi_irq,
 	.irq_mask = mask_msi_irq,
diff --git a/arch/arm/mach-iop32x/irq.c b/arch/arm/mach-iop32x/irq.c
index ba59b2d..d3426a1 100644
--- a/arch/arm/mach-iop32x/irq.c
+++ b/arch/arm/mach-iop32x/irq.c
@@ -32,24 +32,24 @@
 }
 
 static void
-iop32x_irq_mask(unsigned int irq)
+iop32x_irq_mask(struct irq_data *d)
 {
-	iop32x_mask &= ~(1 << irq);
+	iop32x_mask &= ~(1 << d->irq);
 	intctl_write(iop32x_mask);
 }
 
 static void
-iop32x_irq_unmask(unsigned int irq)
+iop32x_irq_unmask(struct irq_data *d)
 {
-	iop32x_mask |= 1 << irq;
+	iop32x_mask |= 1 << d->irq;
 	intctl_write(iop32x_mask);
 }
 
 struct irq_chip ext_chip = {
-	.name	= "IOP32x",
-	.ack	= iop32x_irq_mask,
-	.mask	= iop32x_irq_mask,
-	.unmask	= iop32x_irq_unmask,
+	.name		= "IOP32x",
+	.irq_ack	= iop32x_irq_mask,
+	.irq_mask	= iop32x_irq_mask,
+	.irq_unmask	= iop32x_irq_unmask,
 };
 
 void __init iop32x_init_irq(void)
diff --git a/arch/arm/mach-iop33x/irq.c b/arch/arm/mach-iop33x/irq.c
index abb4ea2..0ff2f74 100644
--- a/arch/arm/mach-iop33x/irq.c
+++ b/arch/arm/mach-iop33x/irq.c
@@ -53,45 +53,45 @@
 }
 
 static void
-iop33x_irq_mask1 (unsigned int irq)
+iop33x_irq_mask1 (struct irq_data *d)
 {
-	iop33x_mask0 &= ~(1 << irq);
+	iop33x_mask0 &= ~(1 << d->irq);
 	intctl0_write(iop33x_mask0);
 }
 
 static void
-iop33x_irq_mask2 (unsigned int irq)
+iop33x_irq_mask2 (struct irq_data *d)
 {
-	iop33x_mask1 &= ~(1 << (irq - 32));
+	iop33x_mask1 &= ~(1 << (d->irq - 32));
 	intctl1_write(iop33x_mask1);
 }
 
 static void
-iop33x_irq_unmask1(unsigned int irq)
+iop33x_irq_unmask1(struct irq_data *d)
 {
-	iop33x_mask0 |= 1 << irq;
+	iop33x_mask0 |= 1 << d->irq;
 	intctl0_write(iop33x_mask0);
 }
 
 static void
-iop33x_irq_unmask2(unsigned int irq)
+iop33x_irq_unmask2(struct irq_data *d)
 {
-	iop33x_mask1 |= (1 << (irq - 32));
+	iop33x_mask1 |= (1 << (d->irq - 32));
 	intctl1_write(iop33x_mask1);
 }
 
 struct irq_chip iop33x_irqchip1 = {
-	.name	= "IOP33x-1",
-	.ack	= iop33x_irq_mask1,
-	.mask	= iop33x_irq_mask1,
-	.unmask	= iop33x_irq_unmask1,
+	.name		= "IOP33x-1",
+	.irq_ack	= iop33x_irq_mask1,
+	.irq_mask	= iop33x_irq_mask1,
+	.irq_unmask	= iop33x_irq_unmask1,
 };
 
 struct irq_chip iop33x_irqchip2 = {
-	.name	= "IOP33x-2",
-	.ack	= iop33x_irq_mask2,
-	.mask	= iop33x_irq_mask2,
-	.unmask	= iop33x_irq_unmask2,
+	.name		= "IOP33x-2",
+	.irq_ack	= iop33x_irq_mask2,
+	.irq_mask	= iop33x_irq_mask2,
+	.irq_unmask	= iop33x_irq_unmask2,
 };
 
 void __init iop33x_init_irq(void)
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index e24e3d0..5fc4e06 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -309,9 +309,9 @@
 	}
 }
 
-static int ixp2000_GPIO_irq_type(unsigned int irq, unsigned int type)
+static int ixp2000_GPIO_irq_type(struct irq_data *d, unsigned int type)
 {
-	int line = irq - IRQ_IXP2000_GPIO0;
+	int line = d->irq - IRQ_IXP2000_GPIO0;
 
 	/*
 	 * First, configure this GPIO line as an input.
@@ -342,8 +342,10 @@
 	return 0;
 }
 
-static void ixp2000_GPIO_irq_mask_ack(unsigned int irq)
+static void ixp2000_GPIO_irq_mask_ack(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	ixp2000_reg_write(IXP2000_GPIO_INCR, (1 << (irq - IRQ_IXP2000_GPIO0)));
 
 	ixp2000_reg_write(IXP2000_GPIO_EDSR, (1 << (irq - IRQ_IXP2000_GPIO0)));
@@ -351,38 +353,42 @@
 	ixp2000_reg_wrb(IXP2000_GPIO_INST, (1 << (irq - IRQ_IXP2000_GPIO0)));
 }
 
-static void ixp2000_GPIO_irq_mask(unsigned int irq)
+static void ixp2000_GPIO_irq_mask(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	ixp2000_reg_wrb(IXP2000_GPIO_INCR, (1 << (irq - IRQ_IXP2000_GPIO0)));
 }
 
-static void ixp2000_GPIO_irq_unmask(unsigned int irq)
+static void ixp2000_GPIO_irq_unmask(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	ixp2000_reg_write(IXP2000_GPIO_INSR, (1 << (irq - IRQ_IXP2000_GPIO0)));
 }
 
 static struct irq_chip ixp2000_GPIO_irq_chip = {
-	.ack		= ixp2000_GPIO_irq_mask_ack,
-	.mask		= ixp2000_GPIO_irq_mask,
-	.unmask		= ixp2000_GPIO_irq_unmask,
-	.set_type	= ixp2000_GPIO_irq_type,
+	.irq_ack	= ixp2000_GPIO_irq_mask_ack,
+	.irq_mask	= ixp2000_GPIO_irq_mask,
+	.irq_unmask	= ixp2000_GPIO_irq_unmask,
+	.irq_set_type	= ixp2000_GPIO_irq_type,
 };
 
-static void ixp2000_pci_irq_mask(unsigned int irq)
+static void ixp2000_pci_irq_mask(struct irq_data *d)
 {
 	unsigned long temp = *IXP2000_PCI_XSCALE_INT_ENABLE;
-	if (irq == IRQ_IXP2000_PCIA)
+	if (d->irq == IRQ_IXP2000_PCIA)
 		ixp2000_reg_wrb(IXP2000_PCI_XSCALE_INT_ENABLE, (temp & ~(1 << 26)));
-	else if (irq == IRQ_IXP2000_PCIB)
+	else if (d->irq == IRQ_IXP2000_PCIB)
 		ixp2000_reg_wrb(IXP2000_PCI_XSCALE_INT_ENABLE, (temp & ~(1 << 27)));
 }
 
-static void ixp2000_pci_irq_unmask(unsigned int irq)
+static void ixp2000_pci_irq_unmask(struct irq_data *d)
 {
 	unsigned long temp = *IXP2000_PCI_XSCALE_INT_ENABLE;
-	if (irq == IRQ_IXP2000_PCIA)
+	if (d->irq == IRQ_IXP2000_PCIA)
 		ixp2000_reg_write(IXP2000_PCI_XSCALE_INT_ENABLE, (temp | (1 << 26)));
-	else if (irq == IRQ_IXP2000_PCIB)
+	else if (d->irq == IRQ_IXP2000_PCIB)
 		ixp2000_reg_write(IXP2000_PCI_XSCALE_INT_ENABLE, (temp | (1 << 27)));
 }
 
@@ -401,44 +407,44 @@
 	}
 }
 
-static void ixp2000_err_irq_mask(unsigned int irq)
+static void ixp2000_err_irq_mask(struct irq_data *d)
 {
 	ixp2000_reg_write(IXP2000_IRQ_ERR_ENABLE_CLR,
-			(1 << (irq - IRQ_IXP2000_DRAM0_MIN_ERR)));
+			(1 << (d->irq - IRQ_IXP2000_DRAM0_MIN_ERR)));
 }
 
-static void ixp2000_err_irq_unmask(unsigned int irq)
+static void ixp2000_err_irq_unmask(struct irq_data *d)
 {
 	ixp2000_reg_write(IXP2000_IRQ_ERR_ENABLE_SET,
-			(1 << (irq - IRQ_IXP2000_DRAM0_MIN_ERR)));
+			(1 << (d->irq - IRQ_IXP2000_DRAM0_MIN_ERR)));
 }
 
 static struct irq_chip ixp2000_err_irq_chip = {
-	.ack	= ixp2000_err_irq_mask,
-	.mask	= ixp2000_err_irq_mask,
-	.unmask	= ixp2000_err_irq_unmask
+	.irq_ack	= ixp2000_err_irq_mask,
+	.irq_mask	= ixp2000_err_irq_mask,
+	.irq_unmask	= ixp2000_err_irq_unmask
 };
 
 static struct irq_chip ixp2000_pci_irq_chip = {
-	.ack	= ixp2000_pci_irq_mask,
-	.mask	= ixp2000_pci_irq_mask,
-	.unmask	= ixp2000_pci_irq_unmask
+	.irq_ack	= ixp2000_pci_irq_mask,
+	.irq_mask	= ixp2000_pci_irq_mask,
+	.irq_unmask	= ixp2000_pci_irq_unmask
 };
 
-static void ixp2000_irq_mask(unsigned int irq)
+static void ixp2000_irq_mask(struct irq_data *d)
 {
-	ixp2000_reg_wrb(IXP2000_IRQ_ENABLE_CLR, (1 << irq));
+	ixp2000_reg_wrb(IXP2000_IRQ_ENABLE_CLR, (1 << d->irq));
 }
 
-static void ixp2000_irq_unmask(unsigned int irq)
+static void ixp2000_irq_unmask(struct irq_data *d)
 {
-	ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << irq));
+	ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << d->irq));
 }
 
 static struct irq_chip ixp2000_irq_chip = {
-	.ack	= ixp2000_irq_mask,
-	.mask	= ixp2000_irq_mask,
-	.unmask	= ixp2000_irq_unmask
+	.irq_ack	= ixp2000_irq_mask,
+	.irq_mask	= ixp2000_irq_mask,
+	.irq_unmask	= ixp2000_irq_unmask
 };
 
 void __init ixp2000_init_irq(void)
diff --git a/arch/arm/mach-ixp2000/ixdp2x00.c b/arch/arm/mach-ixp2000/ixdp2x00.c
index 91fffb9..7d90d3f 100644
--- a/arch/arm/mach-ixp2000/ixdp2x00.c
+++ b/arch/arm/mach-ixp2000/ixdp2x00.c
@@ -63,7 +63,7 @@
 };
 #endif
 
-static void ixdp2x00_irq_mask(unsigned int irq)
+static void ixdp2x00_irq_mask(struct irq_data *d)
 {
 	unsigned long dummy;
 	static struct slowport_cfg old_cfg;
@@ -78,7 +78,7 @@
 #endif
 
 	dummy = *board_irq_mask;
-	dummy |=  IXP2000_BOARD_IRQ_MASK(irq);
+	dummy |=  IXP2000_BOARD_IRQ_MASK(d->irq);
 	ixp2000_reg_wrb(board_irq_mask, dummy);
 
 #ifdef CONFIG_ARCH_IXDP2400
@@ -87,7 +87,7 @@
 #endif
 }
 
-static void ixdp2x00_irq_unmask(unsigned int irq)
+static void ixdp2x00_irq_unmask(struct irq_data *d)
 {
 	unsigned long dummy;
 	static struct slowport_cfg old_cfg;
@@ -98,7 +98,7 @@
 #endif
 
 	dummy = *board_irq_mask;
-	dummy &=  ~IXP2000_BOARD_IRQ_MASK(irq);
+	dummy &=  ~IXP2000_BOARD_IRQ_MASK(d->irq);
 	ixp2000_reg_wrb(board_irq_mask, dummy);
 
 	if (machine_is_ixdp2400()) 
@@ -111,7 +111,7 @@
 	static struct slowport_cfg old_cfg;
 	int i;
 
-	desc->chip->mask(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 #ifdef CONFIG_ARCH_IXDP2400
 	if (machine_is_ixdp2400())
@@ -133,13 +133,13 @@
 		}
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixdp2x00_cpld_irq_chip = {
-	.ack	= ixdp2x00_irq_mask,
-	.mask	= ixdp2x00_irq_mask,
-	.unmask	= ixdp2x00_irq_unmask
+	.irq_ack	= ixdp2x00_irq_mask,
+	.irq_mask	= ixdp2x00_irq_mask,
+	.irq_unmask	= ixdp2x00_irq_unmask
 };
 
 void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_of_irqs)
diff --git a/arch/arm/mach-ixp2000/ixdp2x01.c b/arch/arm/mach-ixp2000/ixdp2x01.c
index 6c121bd..34b1b2a 100644
--- a/arch/arm/mach-ixp2000/ixdp2x01.c
+++ b/arch/arm/mach-ixp2000/ixdp2x01.c
@@ -48,16 +48,16 @@
 /*************************************************************************
  * IXDP2x01 IRQ Handling
  *************************************************************************/
-static void ixdp2x01_irq_mask(unsigned int irq)
+static void ixdp2x01_irq_mask(struct irq_data *d)
 {
 	ixp2000_reg_wrb(IXDP2X01_INT_MASK_SET_REG,
-				IXP2000_BOARD_IRQ_MASK(irq));
+				IXP2000_BOARD_IRQ_MASK(d->irq));
 }
 
-static void ixdp2x01_irq_unmask(unsigned int irq)
+static void ixdp2x01_irq_unmask(struct irq_data *d)
 {
 	ixp2000_reg_write(IXDP2X01_INT_MASK_CLR_REG,
-				IXP2000_BOARD_IRQ_MASK(irq));
+				IXP2000_BOARD_IRQ_MASK(d->irq));
 }
 
 static u32 valid_irq_mask;
@@ -67,7 +67,7 @@
 	u32 ex_interrupt;
 	int i;
 
-	desc->chip->mask(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 	ex_interrupt = *IXDP2X01_INT_STAT_REG & valid_irq_mask;
 
@@ -83,13 +83,13 @@
 		}
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixdp2x01_irq_chip = {
-	.mask	= ixdp2x01_irq_mask,
-	.ack	= ixdp2x01_irq_mask,
-	.unmask	= ixdp2x01_irq_unmask
+	.irq_mask	= ixdp2x01_irq_mask,
+	.irq_ack	= ixdp2x01_irq_mask,
+	.irq_unmask	= ixdp2x01_irq_unmask
 };
 
 /*
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index aa4c442..9c8a339 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -111,9 +111,9 @@
 
 static void ixp23xx_config_irq(unsigned int, enum ixp23xx_irq_type);
 
-static int ixp23xx_irq_set_type(unsigned int irq, unsigned int type)
+static int ixp23xx_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	int line = irq - IRQ_IXP23XX_GPIO6 + 6;
+	int line = d->irq - IRQ_IXP23XX_GPIO6 + 6;
 	u32 int_style;
 	enum ixp23xx_irq_type irq_type;
 	volatile u32 *int_reg;
@@ -149,7 +149,7 @@
 		return -EINVAL;
 	}
 
-	ixp23xx_config_irq(irq, irq_type);
+	ixp23xx_config_irq(d->irq, irq_type);
 
 	if (line >= 8) {	/* pins 8-15 */
 		line -= 8;
@@ -173,9 +173,10 @@
 	return 0;
 }
 
-static void ixp23xx_irq_mask(unsigned int irq)
+static void ixp23xx_irq_mask(struct irq_data *d)
 {
 	volatile unsigned long *intr_reg;
+	unsigned int irq = d->irq;
 
 	if (irq >= 56)
 		irq += 8;
@@ -184,9 +185,9 @@
 	*intr_reg &= ~(1 << (irq % 32));
 }
 
-static void ixp23xx_irq_ack(unsigned int irq)
+static void ixp23xx_irq_ack(struct irq_data *d)
 {
-	int line = irq - IRQ_IXP23XX_GPIO6 + 6;
+	int line = d->irq - IRQ_IXP23XX_GPIO6 + 6;
 
 	if ((line < 6) || (line > 15))
 		return;
@@ -198,11 +199,12 @@
  * Level triggered interrupts on GPIO lines can only be cleared when the
  * interrupt condition disappears.
  */
-static void ixp23xx_irq_level_unmask(unsigned int irq)
+static void ixp23xx_irq_level_unmask(struct irq_data *d)
 {
 	volatile unsigned long *intr_reg;
+	unsigned int irq = d->irq;
 
-	ixp23xx_irq_ack(irq);
+	ixp23xx_irq_ack(d);
 
 	if (irq >= 56)
 		irq += 8;
@@ -211,9 +213,10 @@
 	*intr_reg |= (1 << (irq % 32));
 }
 
-static void ixp23xx_irq_edge_unmask(unsigned int irq)
+static void ixp23xx_irq_edge_unmask(struct irq_data *d)
 {
 	volatile unsigned long *intr_reg;
+	unsigned int irq = d->irq;
 
 	if (irq >= 56)
 		irq += 8;
@@ -223,26 +226,30 @@
 }
 
 static struct irq_chip ixp23xx_irq_level_chip = {
-	.ack		= ixp23xx_irq_mask,
-	.mask		= ixp23xx_irq_mask,
-	.unmask		= ixp23xx_irq_level_unmask,
-	.set_type	= ixp23xx_irq_set_type
+	.irq_ack	= ixp23xx_irq_mask,
+	.irq_mask	= ixp23xx_irq_mask,
+	.irq_unmask	= ixp23xx_irq_level_unmask,
+	.irq_set_type	= ixp23xx_irq_set_type
 };
 
 static struct irq_chip ixp23xx_irq_edge_chip = {
-	.ack		= ixp23xx_irq_ack,
-	.mask		= ixp23xx_irq_mask,
-	.unmask		= ixp23xx_irq_edge_unmask,
-	.set_type	= ixp23xx_irq_set_type
+	.irq_ack	= ixp23xx_irq_ack,
+	.irq_mask	= ixp23xx_irq_mask,
+	.irq_unmask	= ixp23xx_irq_edge_unmask,
+	.irq_set_type	= ixp23xx_irq_set_type
 };
 
-static void ixp23xx_pci_irq_mask(unsigned int irq)
+static void ixp23xx_pci_irq_mask(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	*IXP23XX_PCI_XSCALE_INT_ENABLE &= ~(1 << (IRQ_IXP23XX_INTA + 27 - irq));
 }
 
-static void ixp23xx_pci_irq_unmask(unsigned int irq)
+static void ixp23xx_pci_irq_unmask(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	*IXP23XX_PCI_XSCALE_INT_ENABLE |= (1 << (IRQ_IXP23XX_INTA + 27 - irq));
 }
 
@@ -256,7 +263,7 @@
 
 	pci_interrupt = *IXP23XX_PCI_XSCALE_INT_STATUS;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	/* See which PCI_INTA, or PCI_INTB interrupted */
 	if (pci_interrupt & (1 << 26)) {
@@ -269,13 +276,13 @@
 
 	generic_handle_irq(irqno);
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixp23xx_pci_irq_chip = {
-	.ack	= ixp23xx_pci_irq_mask,
-	.mask	= ixp23xx_pci_irq_mask,
-	.unmask	= ixp23xx_pci_irq_unmask
+	.irq_ack	= ixp23xx_pci_irq_mask,
+	.irq_mask	= ixp23xx_pci_irq_mask,
+	.irq_unmask	= ixp23xx_pci_irq_unmask
 };
 
 static void ixp23xx_config_irq(unsigned int irq, enum ixp23xx_irq_type type)
diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c
index 664e39c..181116a 100644
--- a/arch/arm/mach-ixp23xx/ixdp2351.c
+++ b/arch/arm/mach-ixp23xx/ixdp2351.c
@@ -48,14 +48,14 @@
 /*
  * IXDP2351 Interrupt Handling
  */
-static void ixdp2351_inta_mask(unsigned int irq)
+static void ixdp2351_inta_mask(struct irq_data *d)
 {
-	*IXDP2351_CPLD_INTA_MASK_SET_REG = IXDP2351_INTA_IRQ_MASK(irq);
+	*IXDP2351_CPLD_INTA_MASK_SET_REG = IXDP2351_INTA_IRQ_MASK(d->irq);
 }
 
-static void ixdp2351_inta_unmask(unsigned int irq)
+static void ixdp2351_inta_unmask(struct irq_data *d)
 {
-	*IXDP2351_CPLD_INTA_MASK_CLR_REG = IXDP2351_INTA_IRQ_MASK(irq);
+	*IXDP2351_CPLD_INTA_MASK_CLR_REG = IXDP2351_INTA_IRQ_MASK(d->irq);
 }
 
 static void ixdp2351_inta_handler(unsigned int irq, struct irq_desc *desc)
@@ -64,7 +64,7 @@
 		*IXDP2351_CPLD_INTA_STAT_REG & IXDP2351_INTA_IRQ_VALID;
 	int i;
 
-	desc->chip->mask(irq);
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 	for (i = 0; i < IXDP2351_INTA_IRQ_NUM; i++) {
 		if (ex_interrupt & (1 << i)) {
@@ -74,23 +74,23 @@
 		}
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixdp2351_inta_chip = {
-	.ack	= ixdp2351_inta_mask,
-	.mask	= ixdp2351_inta_mask,
-	.unmask	= ixdp2351_inta_unmask
+	.irq_ack	= ixdp2351_inta_mask,
+	.irq_mask	= ixdp2351_inta_mask,
+	.irq_unmask	= ixdp2351_inta_unmask
 };
 
-static void ixdp2351_intb_mask(unsigned int irq)
+static void ixdp2351_intb_mask(struct irq_data *d)
 {
-	*IXDP2351_CPLD_INTB_MASK_SET_REG = IXDP2351_INTB_IRQ_MASK(irq);
+	*IXDP2351_CPLD_INTB_MASK_SET_REG = IXDP2351_INTB_IRQ_MASK(d->irq);
 }
 
-static void ixdp2351_intb_unmask(unsigned int irq)
+static void ixdp2351_intb_unmask(struct irq_data *d)
 {
-	*IXDP2351_CPLD_INTB_MASK_CLR_REG = IXDP2351_INTB_IRQ_MASK(irq);
+	*IXDP2351_CPLD_INTB_MASK_CLR_REG = IXDP2351_INTB_IRQ_MASK(d->irq);
 }
 
 static void ixdp2351_intb_handler(unsigned int irq, struct irq_desc *desc)
@@ -99,7 +99,7 @@
 		*IXDP2351_CPLD_INTB_STAT_REG & IXDP2351_INTB_IRQ_VALID;
 	int i;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	for (i = 0; i < IXDP2351_INTB_IRQ_NUM; i++) {
 		if (ex_interrupt & (1 << i)) {
@@ -109,13 +109,13 @@
 		}
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip ixdp2351_intb_chip = {
-	.ack	= ixdp2351_intb_mask,
-	.mask	= ixdp2351_intb_mask,
-	.unmask	= ixdp2351_intb_unmask
+	.irq_ack	= ixdp2351_intb_mask,
+	.irq_mask	= ixdp2351_intb_mask,
+	.irq_unmask	= ixdp2351_intb_unmask
 };
 
 void __init ixdp2351_init_irq(void)
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 4dbfcbb..9fd8942 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -128,9 +128,9 @@
 }
 EXPORT_SYMBOL(irq_to_gpio);
 
-static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type)
+static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type)
 {
-	int line = irq2gpio[irq];
+	int line = irq2gpio[d->irq];
 	u32 int_style;
 	enum ixp4xx_irq_type irq_type;
 	volatile u32 *int_reg;
@@ -167,9 +167,9 @@
 	}
 
 	if (irq_type == IXP4XX_IRQ_EDGE)
-		ixp4xx_irq_edge |= (1 << irq);
+		ixp4xx_irq_edge |= (1 << d->irq);
 	else
-		ixp4xx_irq_edge &= ~(1 << irq);
+		ixp4xx_irq_edge &= ~(1 << d->irq);
 
 	if (line >= 8) {	/* pins 8-15 */
 		line -= 8;
@@ -188,22 +188,22 @@
 	*int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
 
 	/* Configure the line as an input */
-	gpio_line_config(irq2gpio[irq], IXP4XX_GPIO_IN);
+	gpio_line_config(irq2gpio[d->irq], IXP4XX_GPIO_IN);
 
 	return 0;
 }
 
-static void ixp4xx_irq_mask(unsigned int irq)
+static void ixp4xx_irq_mask(struct irq_data *d)
 {
-	if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && irq >= 32)
-		*IXP4XX_ICMR2 &= ~(1 << (irq - 32));
+	if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32)
+		*IXP4XX_ICMR2 &= ~(1 << (d->irq - 32));
 	else
-		*IXP4XX_ICMR &= ~(1 << irq);
+		*IXP4XX_ICMR &= ~(1 << d->irq);
 }
 
-static void ixp4xx_irq_ack(unsigned int irq)
+static void ixp4xx_irq_ack(struct irq_data *d)
 {
-	int line = (irq < 32) ? irq2gpio[irq] : -1;
+	int line = (d->irq < 32) ? irq2gpio[d->irq] : -1;
 
 	if (line >= 0)
 		*IXP4XX_GPIO_GPISR = (1 << line);
@@ -213,23 +213,23 @@
  * Level triggered interrupts on GPIO lines can only be cleared when the
  * interrupt condition disappears.
  */
-static void ixp4xx_irq_unmask(unsigned int irq)
+static void ixp4xx_irq_unmask(struct irq_data *d)
 {
-	if (!(ixp4xx_irq_edge & (1 << irq)))
-		ixp4xx_irq_ack(irq);
+	if (!(ixp4xx_irq_edge & (1 << d->irq)))
+		ixp4xx_irq_ack(d);
 
-	if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && irq >= 32)
-		*IXP4XX_ICMR2 |= (1 << (irq - 32));
+	if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32)
+		*IXP4XX_ICMR2 |= (1 << (d->irq - 32));
 	else
-		*IXP4XX_ICMR |= (1 << irq);
+		*IXP4XX_ICMR |= (1 << d->irq);
 }
 
 static struct irq_chip ixp4xx_irq_chip = {
 	.name		= "IXP4xx",
-	.ack		= ixp4xx_irq_ack,
-	.mask		= ixp4xx_irq_mask,
-	.unmask		= ixp4xx_irq_unmask,
-	.set_type	= ixp4xx_set_irq_type,
+	.irq_ack	= ixp4xx_irq_ack,
+	.irq_mask	= ixp4xx_irq_mask,
+	.irq_unmask	= ixp4xx_irq_unmask,
+	.irq_set_type	= ixp4xx_set_irq_type,
 };
 
 void __init ixp4xx_init_irq(void)
@@ -432,7 +432,7 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-unsigned long ixp4xx_timer_freq = FREQ;
+unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
 EXPORT_SYMBOL(ixp4xx_timer_freq);
 static void __init ixp4xx_clocksource_init(void)
 {
@@ -496,7 +496,7 @@
 
 static void __init ixp4xx_clockevent_init(void)
 {
-	clockevent_ixp4xx.mult = div_sc(FREQ, NSEC_PER_SEC,
+	clockevent_ixp4xx.mult = div_sc(IXP4XX_TIMER_FREQ, NSEC_PER_SEC,
 					clockevent_ixp4xx.shift);
 	clockevent_ixp4xx.max_delta_ns =
 		clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx);
diff --git a/arch/arm/mach-ixp4xx/include/mach/timex.h b/arch/arm/mach-ixp4xx/include/mach/timex.h
index 2c3f93c..c9e930f 100644
--- a/arch/arm/mach-ixp4xx/include/mach/timex.h
+++ b/arch/arm/mach-ixp4xx/include/mach/timex.h
@@ -10,6 +10,7 @@
  * 66.66... MHz. We do a convulted calculation of CLOCK_TICK_RATE b/c the
  * timer register ignores the bottom 2 bits of the LATCH value.
  */
-#define FREQ 66666000
-#define CLOCK_TICK_RATE (((FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
+#define IXP4XX_TIMER_FREQ 66666000
+#define CLOCK_TICK_RATE \
+	(((IXP4XX_TIMER_FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
 
diff --git a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
index bfdbe4b..852f7c9 100644
--- a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
+++ b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
@@ -265,6 +265,11 @@
 	       qmgr_queue_descs[queue], queue);
 	qmgr_queue_descs[queue][0] = '\x0';
 #endif
+
+	while ((addr = qmgr_get_entry(queue)))
+		printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
+		       queue, addr);
+
 	__raw_writel(0, &qmgr_regs->sram[queue]);
 
 	used_sram_bitmap[0] &= ~mask[0];
@@ -275,10 +280,6 @@
 	spin_unlock_irq(&qmgr_lock);
 
 	module_put(THIS_MODULE);
-
-	while ((addr = qmgr_get_entry(queue)))
-		printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
-		       queue, addr);
 }
 
 static int qmgr_init(void)
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c
index c9d77fa..cfcca41 100644
--- a/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/arch/arm/mach-kirkwood/openrd-setup.c
@@ -171,7 +171,7 @@
 
 	kirkwood_i2c_init();
 
-	if (machine_is_openrd_client()) {
+	if (machine_is_openrd_client() || machine_is_openrd_ultimate()) {
 		i2c_register_board_info(0, i2c_board_info,
 			ARRAY_SIZE(i2c_board_info));
 		kirkwood_audio_init();
diff --git a/arch/arm/mach-ks8695/irq.c b/arch/arm/mach-ks8695/irq.c
index e375c1d..7998cca 100644
--- a/arch/arm/mach-ks8695/irq.c
+++ b/arch/arm/mach-ks8695/irq.c
@@ -34,29 +34,29 @@
 #include <mach/regs-irq.h>
 #include <mach/regs-gpio.h>
 
-static void ks8695_irq_mask(unsigned int irqno)
+static void ks8695_irq_mask(struct irq_data *d)
 {
 	unsigned long inten;
 
 	inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN);
-	inten &= ~(1 << irqno);
+	inten &= ~(1 << d->irq);
 
 	__raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN);
 }
 
-static void ks8695_irq_unmask(unsigned int irqno)
+static void ks8695_irq_unmask(struct irq_data *d)
 {
 	unsigned long inten;
 
 	inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN);
-	inten |= (1 << irqno);
+	inten |= (1 << d->irq);
 
 	__raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN);
 }
 
-static void ks8695_irq_ack(unsigned int irqno)
+static void ks8695_irq_ack(struct irq_data *d)
 {
-	__raw_writel((1 << irqno), KS8695_IRQ_VA + KS8695_INTST);
+	__raw_writel((1 << d->irq), KS8695_IRQ_VA + KS8695_INTST);
 }
 
 
@@ -64,7 +64,7 @@
 static struct irq_chip ks8695_irq_edge_chip;
 
 
-static int ks8695_irq_set_type(unsigned int irqno, unsigned int type)
+static int ks8695_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	unsigned long ctrl, mode;
 	unsigned short level_triggered = 0;
@@ -93,7 +93,7 @@
 			return -EINVAL;
 	}
 
-	switch (irqno) {
+	switch (d->irq) {
 		case KS8695_IRQ_EXTERN0:
 			ctrl &= ~IOPC_IOEINT0TM;
 			ctrl |= IOPC_IOEINT0_MODE(mode);
@@ -115,12 +115,12 @@
 	}
 
 	if (level_triggered) {
-		set_irq_chip(irqno, &ks8695_irq_level_chip);
-		set_irq_handler(irqno, handle_level_irq);
+		set_irq_chip(d->irq, &ks8695_irq_level_chip);
+		set_irq_handler(d->irq, handle_level_irq);
 	}
 	else {
-		set_irq_chip(irqno, &ks8695_irq_edge_chip);
-		set_irq_handler(irqno, handle_edge_irq);
+		set_irq_chip(d->irq, &ks8695_irq_edge_chip);
+		set_irq_handler(d->irq, handle_edge_irq);
 	}
 
 	__raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC);
@@ -128,17 +128,17 @@
 }
 
 static struct irq_chip ks8695_irq_level_chip = {
-	.ack		= ks8695_irq_mask,
-	.mask		= ks8695_irq_mask,
-	.unmask		= ks8695_irq_unmask,
-	.set_type	= ks8695_irq_set_type,
+	.irq_ack	= ks8695_irq_mask,
+	.irq_mask	= ks8695_irq_mask,
+	.irq_unmask	= ks8695_irq_unmask,
+	.irq_set_type	= ks8695_irq_set_type,
 };
 
 static struct irq_chip ks8695_irq_edge_chip = {
-	.ack		= ks8695_irq_ack,
-	.mask		= ks8695_irq_mask,
-	.unmask		= ks8695_irq_unmask,
-	.set_type	= ks8695_irq_set_type,
+	.irq_ack	= ks8695_irq_ack,
+	.irq_mask	= ks8695_irq_mask,
+	.irq_unmask	= ks8695_irq_unmask,
+	.irq_set_type	= ks8695_irq_set_type,
 };
 
 void __init ks8695_init_irq(void)
@@ -164,7 +164,8 @@
 
 			/* Edge-triggered interrupts */
 			default:
-				ks8695_irq_ack(irq);	/* clear pending bit */
+				/* clear pending bit */
+				ks8695_irq_ack(irq_get_irq_data(irq));
 				set_irq_chip(irq, &ks8695_irq_edge_chip);
 				set_irq_handler(irq, handle_edge_irq);
 		}
diff --git a/arch/arm/mach-lh7a40x/arch-kev7a400.c b/arch/arm/mach-lh7a40x/arch-kev7a400.c
index 9088c166..71129c3 100644
--- a/arch/arm/mach-lh7a40x/arch-kev7a400.c
+++ b/arch/arm/mach-lh7a40x/arch-kev7a400.c
@@ -46,28 +46,28 @@
 
 static u16 CPLD_IRQ_mask;	/* Mask for CPLD IRQs, 1 == unmasked */
 
-static void kev7a400_ack_cpld_irq (u32 irq)
+static void kev7a400_ack_cpld_irq(struct irq_data *d)
 {
-	CPLD_CL_INT = 1 << (irq - IRQ_KEV7A400_CPLD);
+	CPLD_CL_INT = 1 << (d->irq - IRQ_KEV7A400_CPLD);
 }
 
-static void kev7a400_mask_cpld_irq (u32 irq)
+static void kev7a400_mask_cpld_irq(struct irq_data *d)
 {
-	CPLD_IRQ_mask &= ~(1 << (irq - IRQ_KEV7A400_CPLD));
+	CPLD_IRQ_mask &= ~(1 << (d->irq - IRQ_KEV7A400_CPLD));
 	CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
 }
 
-static void kev7a400_unmask_cpld_irq (u32 irq)
+static void kev7a400_unmask_cpld_irq(struct irq_data *d)
 {
-	CPLD_IRQ_mask |= 1 << (irq - IRQ_KEV7A400_CPLD);
+	CPLD_IRQ_mask |= 1 << (d->irq - IRQ_KEV7A400_CPLD);
 	CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
 }
 
 static struct irq_chip kev7a400_cpld_chip = {
-	.name	= "CPLD",
-	.ack	= kev7a400_ack_cpld_irq,
-	.mask	= kev7a400_mask_cpld_irq,
-	.unmask	= kev7a400_unmask_cpld_irq,
+	.name		= "CPLD",
+	.irq_ack	= kev7a400_ack_cpld_irq,
+	.irq_mask	= kev7a400_mask_cpld_irq,
+	.irq_unmask	= kev7a400_unmask_cpld_irq,
 };
 
 
diff --git a/arch/arm/mach-lh7a40x/arch-lpd7a40x.c b/arch/arm/mach-lh7a40x/arch-lpd7a40x.c
index 7315a56..e735546 100644
--- a/arch/arm/mach-lh7a40x/arch-lpd7a40x.c
+++ b/arch/arm/mach-lh7a40x/arch-lpd7a40x.c
@@ -159,7 +159,7 @@
 #endif
 }
 
-static void lh7a40x_ack_cpld_irq (u32 irq)
+static void lh7a40x_ack_cpld_irq(struct irq_data *d)
 {
 	/* CPLD doesn't have ack capability, but some devices may */
 
@@ -167,14 +167,14 @@
 	/* The touch control *must* mask the interrupt because the
 	 * interrupt bit is read by the driver to determine if the pen
 	 * is still down. */
-	if (irq == IRQ_TOUCH)
+	if (d->irq == IRQ_TOUCH)
 		CPLD_INTERRUPTS |= CPLD_INTMASK_TOUCH;
 #endif
 }
 
-static void lh7a40x_mask_cpld_irq (u32 irq)
+static void lh7a40x_mask_cpld_irq(struct irq_data *d)
 {
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_LPD7A40X_ETH_INT:
 		CPLD_INTERRUPTS |= CPLD_INTMASK_ETHERNET;
 		break;
@@ -186,9 +186,9 @@
 	}
 }
 
-static void lh7a40x_unmask_cpld_irq (u32 irq)
+static void lh7a40x_unmask_cpld_irq(struct irq_data *d)
 {
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_LPD7A40X_ETH_INT:
 		CPLD_INTERRUPTS &= ~CPLD_INTMASK_ETHERNET;
 		break;
@@ -201,17 +201,17 @@
 }
 
 static struct irq_chip lpd7a40x_cpld_chip = {
-	.name	= "CPLD",
-	.ack	= lh7a40x_ack_cpld_irq,
-	.mask	= lh7a40x_mask_cpld_irq,
-	.unmask	= lh7a40x_unmask_cpld_irq,
+	.name		= "CPLD",
+	.irq_ack	= lh7a40x_ack_cpld_irq,
+	.irq_mask	= lh7a40x_mask_cpld_irq,
+	.irq_unmask	= lh7a40x_unmask_cpld_irq,
 };
 
 static void lpd7a40x_cpld_handler (unsigned int irq, struct irq_desc *desc)
 {
 	unsigned int mask = CPLD_INTERRUPTS;
 
-	desc->chip->ack (irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	if ((mask & (1<<0)) == 0)	/* WLAN */
 		generic_handle_irq(IRQ_LPD7A40X_ETH_INT);
@@ -221,7 +221,8 @@
 		generic_handle_irq(IRQ_TOUCH);
 #endif
 
-	desc->chip->unmask (irq); /* Level-triggered need this */
+	/* Level-triggered need this */
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 
diff --git a/arch/arm/mach-lh7a40x/irq-lh7a400.c b/arch/arm/mach-lh7a40x/irq-lh7a400.c
index 1ad3afc..f2e7e65 100644
--- a/arch/arm/mach-lh7a40x/irq-lh7a400.c
+++ b/arch/arm/mach-lh7a40x/irq-lh7a400.c
@@ -21,34 +21,34 @@
 
   /* CPU IRQ handling */
 
-static void lh7a400_mask_irq (u32 irq)
+static void lh7a400_mask_irq(struct irq_data *d)
 {
-	INTC_INTENC = (1 << irq);
+	INTC_INTENC = (1 << d->irq);
 }
 
-static void lh7a400_unmask_irq (u32 irq)
+static void lh7a400_unmask_irq(struct irq_data *d)
 {
-	INTC_INTENS = (1 << irq);
+	INTC_INTENS = (1 << d->irq);
 }
 
-static void lh7a400_ack_gpio_irq (u32 irq)
+static void lh7a400_ack_gpio_irq(struct irq_data *d)
 {
-	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (irq));
-	INTC_INTENC = (1 << irq);
+	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (d->irq));
+	INTC_INTENC = (1 << d->irq);
 }
 
 static struct irq_chip lh7a400_internal_chip = {
-	.name	= "MPU",
-	.ack	= lh7a400_mask_irq, /* Level triggering -> mask is ack */
-	.mask	= lh7a400_mask_irq,
-	.unmask	= lh7a400_unmask_irq,
+	.name		= "MPU",
+	.irq_ack	= lh7a400_mask_irq, /* Level triggering -> mask is ack */
+	.irq_mask	= lh7a400_mask_irq,
+	.irq_unmask	= lh7a400_unmask_irq,
 };
 
 static struct irq_chip lh7a400_gpio_chip = {
-	.name	= "GPIO",
-	.ack	= lh7a400_ack_gpio_irq,
-	.mask	= lh7a400_mask_irq,
-	.unmask	= lh7a400_unmask_irq,
+	.name		= "GPIO",
+	.irq_ack	= lh7a400_ack_gpio_irq,
+	.irq_mask	= lh7a400_mask_irq,
+	.irq_unmask	= lh7a400_unmask_irq,
 };
 
 
diff --git a/arch/arm/mach-lh7a40x/irq-lh7a404.c b/arch/arm/mach-lh7a40x/irq-lh7a404.c
index 12b045b..14b1733 100644
--- a/arch/arm/mach-lh7a40x/irq-lh7a404.c
+++ b/arch/arm/mach-lh7a40x/irq-lh7a404.c
@@ -43,64 +43,64 @@
 
   /* CPU IRQ handling */
 
-static void lh7a404_vic1_mask_irq (u32 irq)
+static void lh7a404_vic1_mask_irq(struct irq_data *d)
 {
-	VIC1_INTENCLR = (1 << irq);
+	VIC1_INTENCLR = (1 << d->irq);
 }
 
-static void lh7a404_vic1_unmask_irq (u32 irq)
+static void lh7a404_vic1_unmask_irq(struct irq_data *d)
 {
-	VIC1_INTEN = (1 << irq);
+	VIC1_INTEN = (1 << d->irq);
 }
 
-static void lh7a404_vic2_mask_irq (u32 irq)
+static void lh7a404_vic2_mask_irq(struct irq_data *d)
 {
-	VIC2_INTENCLR = (1 << (irq - 32));
+	VIC2_INTENCLR = (1 << (d->irq - 32));
 }
 
-static void lh7a404_vic2_unmask_irq (u32 irq)
+static void lh7a404_vic2_unmask_irq(struct irq_data *d)
 {
-	VIC2_INTEN = (1 << (irq - 32));
+	VIC2_INTEN = (1 << (d->irq - 32));
 }
 
-static void lh7a404_vic1_ack_gpio_irq (u32 irq)
+static void lh7a404_vic1_ack_gpio_irq(struct irq_data *d)
 {
-	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (irq));
-	VIC1_INTENCLR = (1 << irq);
+	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (d->irq));
+	VIC1_INTENCLR = (1 << d->irq);
 }
 
-static void lh7a404_vic2_ack_gpio_irq (u32 irq)
+static void lh7a404_vic2_ack_gpio_irq(struct irq_data *d)
 {
-	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (irq));
-	VIC2_INTENCLR = (1 << irq);
+	GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (d->irq));
+	VIC2_INTENCLR = (1 << d->irq);
 }
 
 static struct irq_chip lh7a404_vic1_chip = {
-	.name	= "VIC1",
-	.ack	= lh7a404_vic1_mask_irq, /* Because level-triggered */
-	.mask	= lh7a404_vic1_mask_irq,
-	.unmask	= lh7a404_vic1_unmask_irq,
+	.name		= "VIC1",
+	.irq_ack	= lh7a404_vic1_mask_irq, /* Because level-triggered */
+	.irq_mask	= lh7a404_vic1_mask_irq,
+	.irq_unmask	= lh7a404_vic1_unmask_irq,
 };
 
 static struct irq_chip lh7a404_vic2_chip = {
-	.name	= "VIC2",
-	.ack	= lh7a404_vic2_mask_irq, /* Because level-triggered */
-	.mask	= lh7a404_vic2_mask_irq,
-	.unmask	= lh7a404_vic2_unmask_irq,
+	.name		= "VIC2",
+	.irq_ack	= lh7a404_vic2_mask_irq, /* Because level-triggered */
+	.irq_mask	= lh7a404_vic2_mask_irq,
+	.irq_unmask	= lh7a404_vic2_unmask_irq,
 };
 
 static struct irq_chip lh7a404_gpio_vic1_chip = {
-	.name	= "GPIO-VIC1",
-	.ack	= lh7a404_vic1_ack_gpio_irq,
-	.mask	= lh7a404_vic1_mask_irq,
-	.unmask	= lh7a404_vic1_unmask_irq,
+	.name		= "GPIO-VIC1",
+	.irq_ack	= lh7a404_vic1_ack_gpio_irq,
+	.irq_mask	= lh7a404_vic1_mask_irq,
+	.irq_unmask	= lh7a404_vic1_unmask_irq,
 };
 
 static struct irq_chip lh7a404_gpio_vic2_chip = {
-	.name	= "GPIO-VIC2",
-	.ack	= lh7a404_vic2_ack_gpio_irq,
-	.mask	= lh7a404_vic2_mask_irq,
-	.unmask	= lh7a404_vic2_unmask_irq,
+	.name		= "GPIO-VIC2",
+	.irq_ack	= lh7a404_vic2_ack_gpio_irq,
+	.irq_mask	= lh7a404_vic2_mask_irq,
+	.irq_unmask	= lh7a404_vic2_unmask_irq,
 };
 
   /* IRQ initialization */
diff --git a/arch/arm/mach-lh7a40x/irq-lpd7a40x.c b/arch/arm/mach-lh7a40x/irq-lpd7a40x.c
index fd033bb..1bfdcdd 100644
--- a/arch/arm/mach-lh7a40x/irq-lpd7a40x.c
+++ b/arch/arm/mach-lh7a40x/irq-lpd7a40x.c
@@ -20,14 +20,14 @@
 
 #include "common.h"
 
-static void lh7a40x_ack_cpld_irq (u32 irq)
+static void lh7a40x_ack_cpld_irq(struct irq_data *d)
 {
 	/* CPLD doesn't have ack capability */
 }
 
-static void lh7a40x_mask_cpld_irq (u32 irq)
+static void lh7a40x_mask_cpld_irq(struct irq_data *d)
 {
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_LPD7A40X_ETH_INT:
 		CPLD_INTERRUPTS = CPLD_INTERRUPTS | 0x4;
 		break;
@@ -37,9 +37,9 @@
 	}
 }
 
-static void lh7a40x_unmask_cpld_irq (u32 irq)
+static void lh7a40x_unmask_cpld_irq(struct irq_data *d)
 {
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_LPD7A40X_ETH_INT:
 		CPLD_INTERRUPTS = CPLD_INTERRUPTS & ~ 0x4;
 		break;
@@ -50,17 +50,17 @@
 }
 
 static struct irq_chip lh7a40x_cpld_chip = {
-	.name	= "CPLD",
-	.ack	= lh7a40x_ack_cpld_irq,
-	.mask	= lh7a40x_mask_cpld_irq,
-	.unmask	= lh7a40x_unmask_cpld_irq,
+	.name		= "CPLD",
+	.irq_ack	= lh7a40x_ack_cpld_irq,
+	.irq_mask	= lh7a40x_mask_cpld_irq,
+	.irq_unmask	= lh7a40x_unmask_cpld_irq,
 };
 
 static void lh7a40x_cpld_handler (unsigned int irq, struct irq_desc *desc)
 {
 	unsigned int mask = CPLD_INTERRUPTS;
 
-	desc->chip->ack (irq);
+	desc->irq_data.chip->ack (irq);
 
 	if ((mask & 0x1) == 0)	/* WLAN */
 		generic_handle_irq(IRQ_LPD7A40X_ETH_INT);
@@ -68,7 +68,7 @@
 	if ((mask & 0x2) == 0)	/* Touch */
 		generic_handle_irq(IRQ_LPD7A400_TS);
 
-	desc->chip->unmask (irq); /* Level-triggered need this */
+	desc->irq_data.chip->unmask (irq); /* Level-triggered need this */
 }
 
 
diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c
index bd0df26c..316ecbf 100644
--- a/arch/arm/mach-lpc32xx/irq.c
+++ b/arch/arm/mach-lpc32xx/irq.c
@@ -191,38 +191,38 @@
 	}
 }
 
-static void lpc32xx_mask_irq(unsigned int irq)
+static void lpc32xx_mask_irq(struct irq_data *d)
 {
 	unsigned int reg, ctrl, mask;
 
-	get_controller(irq, &ctrl, &mask);
+	get_controller(d->irq, &ctrl, &mask);
 
 	reg = __raw_readl(LPC32XX_INTC_MASK(ctrl)) & ~mask;
 	__raw_writel(reg, LPC32XX_INTC_MASK(ctrl));
 }
 
-static void lpc32xx_unmask_irq(unsigned int irq)
+static void lpc32xx_unmask_irq(struct irq_data *d)
 {
 	unsigned int reg, ctrl, mask;
 
-	get_controller(irq, &ctrl, &mask);
+	get_controller(d->irq, &ctrl, &mask);
 
 	reg = __raw_readl(LPC32XX_INTC_MASK(ctrl)) | mask;
 	__raw_writel(reg, LPC32XX_INTC_MASK(ctrl));
 }
 
-static void lpc32xx_ack_irq(unsigned int irq)
+static void lpc32xx_ack_irq(struct irq_data *d)
 {
 	unsigned int ctrl, mask;
 
-	get_controller(irq, &ctrl, &mask);
+	get_controller(d->irq, &ctrl, &mask);
 
 	__raw_writel(mask, LPC32XX_INTC_RAW_STAT(ctrl));
 
 	/* Also need to clear pending wake event */
-	if (lpc32xx_events[irq].mask != 0)
-		__raw_writel(lpc32xx_events[irq].mask,
-			lpc32xx_events[irq].event_group->rawstat_reg);
+	if (lpc32xx_events[d->irq].mask != 0)
+		__raw_writel(lpc32xx_events[d->irq].mask,
+			lpc32xx_events[d->irq].event_group->rawstat_reg);
 }
 
 static void __lpc32xx_set_irq_type(unsigned int irq, int use_high_level,
@@ -261,27 +261,27 @@
 	}
 }
 
-static int lpc32xx_set_irq_type(unsigned int irq, unsigned int type)
+static int lpc32xx_set_irq_type(struct irq_data *d, unsigned int type)
 {
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
 		/* Rising edge sensitive */
-		__lpc32xx_set_irq_type(irq, 1, 1);
+		__lpc32xx_set_irq_type(d->irq, 1, 1);
 		break;
 
 	case IRQ_TYPE_EDGE_FALLING:
 		/* Falling edge sensitive */
-		__lpc32xx_set_irq_type(irq, 0, 1);
+		__lpc32xx_set_irq_type(d->irq, 0, 1);
 		break;
 
 	case IRQ_TYPE_LEVEL_LOW:
 		/* Low level sensitive */
-		__lpc32xx_set_irq_type(irq, 0, 0);
+		__lpc32xx_set_irq_type(d->irq, 0, 0);
 		break;
 
 	case IRQ_TYPE_LEVEL_HIGH:
 		/* High level sensitive */
-		__lpc32xx_set_irq_type(irq, 1, 0);
+		__lpc32xx_set_irq_type(d->irq, 1, 0);
 		break;
 
 	/* Other modes are not supported */
@@ -290,33 +290,33 @@
 	}
 
 	/* Ok to use the level handler for all types */
-	set_irq_handler(irq, handle_level_irq);
+	set_irq_handler(d->irq, handle_level_irq);
 
 	return 0;
 }
 
-static int lpc32xx_irq_wake(unsigned int irqno, unsigned int state)
+static int lpc32xx_irq_wake(struct irq_data *d, unsigned int state)
 {
 	unsigned long eventreg;
 
-	if (lpc32xx_events[irqno].mask != 0) {
-		eventreg = __raw_readl(lpc32xx_events[irqno].
+	if (lpc32xx_events[d->irq].mask != 0) {
+		eventreg = __raw_readl(lpc32xx_events[d->irq].
 			event_group->enab_reg);
 
 		if (state)
-			eventreg |= lpc32xx_events[irqno].mask;
+			eventreg |= lpc32xx_events[d->irq].mask;
 		else
-			eventreg &= ~lpc32xx_events[irqno].mask;
+			eventreg &= ~lpc32xx_events[d->irq].mask;
 
 		__raw_writel(eventreg,
-			lpc32xx_events[irqno].event_group->enab_reg);
+			lpc32xx_events[d->irq].event_group->enab_reg);
 
 		return 0;
 	}
 
 	/* Clear event */
-	__raw_writel(lpc32xx_events[irqno].mask,
-		lpc32xx_events[irqno].event_group->rawstat_reg);
+	__raw_writel(lpc32xx_events[d->irq].mask,
+		lpc32xx_events[d->irq].event_group->rawstat_reg);
 
 	return -ENODEV;
 }
@@ -336,11 +336,11 @@
 }
 
 static struct irq_chip lpc32xx_irq_chip = {
-	.ack = lpc32xx_ack_irq,
-	.mask = lpc32xx_mask_irq,
-	.unmask = lpc32xx_unmask_irq,
-	.set_type = lpc32xx_set_irq_type,
-	.set_wake = lpc32xx_irq_wake
+	.irq_ack = lpc32xx_ack_irq,
+	.irq_mask = lpc32xx_mask_irq,
+	.irq_unmask = lpc32xx_unmask_irq,
+	.irq_set_type = lpc32xx_set_irq_type,
+	.irq_set_wake = lpc32xx_irq_wake
 };
 
 static void lpc32xx_sic1_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/arm/mach-lpc32xx/pm.c b/arch/arm/mach-lpc32xx/pm.c
index a6e2aed..e76d41b 100644
--- a/arch/arm/mach-lpc32xx/pm.c
+++ b/arch/arm/mach-lpc32xx/pm.c
@@ -123,7 +123,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops lpc32xx_pm_ops = {
+static const struct platform_suspend_ops lpc32xx_pm_ops = {
 	.valid	= suspend_valid_only_mem,
 	.enter	= lpc32xx_pm_enter,
 };
diff --git a/arch/arm/mach-mmp/include/mach/mfp-mmp2.h b/arch/arm/mach-mmp/include/mach/mfp-mmp2.h
index 117e303..4ad3862 100644
--- a/arch/arm/mach-mmp/include/mach/mfp-mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mfp-mmp2.h
@@ -6,7 +6,7 @@
 #define MFP_DRIVE_VERY_SLOW	(0x0 << 13)
 #define MFP_DRIVE_SLOW		(0x2 << 13)
 #define MFP_DRIVE_MEDIUM	(0x4 << 13)
-#define MFP_DRIVE_FAST		(0x8 << 13)
+#define MFP_DRIVE_FAST		(0x6 << 13)
 
 /* GPIO */
 #define GPIO0_GPIO	MFP_CFG(GPIO0, AF0)
diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa910.h b/arch/arm/mach-mmp/include/mach/mfp-pxa910.h
index 7e8a80f..fbd7ee8 100644
--- a/arch/arm/mach-mmp/include/mach/mfp-pxa910.h
+++ b/arch/arm/mach-mmp/include/mach/mfp-pxa910.h
@@ -6,7 +6,7 @@
 #define MFP_DRIVE_VERY_SLOW	(0x0 << 13)
 #define MFP_DRIVE_SLOW		(0x2 << 13)
 #define MFP_DRIVE_MEDIUM	(0x4 << 13)
-#define MFP_DRIVE_FAST		(0x8 << 13)
+#define MFP_DRIVE_FAST		(0x6 << 13)
 
 /* UART2 */
 #define GPIO47_UART2_RXD	MFP_CFG(GPIO47, AF6)
diff --git a/arch/arm/mach-mmp/irq-mmp2.c b/arch/arm/mach-mmp/irq-mmp2.c
index 01342be..fa03703 100644
--- a/arch/arm/mach-mmp/irq-mmp2.c
+++ b/arch/arm/mach-mmp/irq-mmp2.c
@@ -20,48 +20,48 @@
 
 #include "common.h"
 
-static void icu_mask_irq(unsigned int irq)
+static void icu_mask_irq(struct irq_data *d)
 {
-	uint32_t r = __raw_readl(ICU_INT_CONF(irq));
+	uint32_t r = __raw_readl(ICU_INT_CONF(d->irq));
 
 	r &= ~ICU_INT_ROUTE_PJ4_IRQ;
-	__raw_writel(r, ICU_INT_CONF(irq));
+	__raw_writel(r, ICU_INT_CONF(d->irq));
 }
 
-static void icu_unmask_irq(unsigned int irq)
+static void icu_unmask_irq(struct irq_data *d)
 {
-	uint32_t r = __raw_readl(ICU_INT_CONF(irq));
+	uint32_t r = __raw_readl(ICU_INT_CONF(d->irq));
 
 	r |= ICU_INT_ROUTE_PJ4_IRQ;
-	__raw_writel(r, ICU_INT_CONF(irq));
+	__raw_writel(r, ICU_INT_CONF(d->irq));
 }
 
 static struct irq_chip icu_irq_chip = {
 	.name		= "icu_irq",
-	.mask		= icu_mask_irq,
-	.mask_ack	= icu_mask_irq,
-	.unmask		= icu_unmask_irq,
+	.irq_mask	= icu_mask_irq,
+	.irq_mask_ack	= icu_mask_irq,
+	.irq_unmask	= icu_unmask_irq,
 };
 
-static void pmic_irq_ack(unsigned int irq)
+static void pmic_irq_ack(struct irq_data *d)
 {
-	if (irq == IRQ_MMP2_PMIC)
+	if (d->irq == IRQ_MMP2_PMIC)
 		mmp2_clear_pmic_int();
 }
 
 #define SECOND_IRQ_MASK(_name_, irq_base, prefix)			\
-static void _name_##_mask_irq(unsigned int irq)				\
+static void _name_##_mask_irq(struct irq_data *d)			\
 {									\
 	uint32_t r;							\
-	r = __raw_readl(prefix##_MASK) | (1 << (irq - irq_base));	\
+	r = __raw_readl(prefix##_MASK) | (1 << (d->irq - irq_base));	\
 	__raw_writel(r, prefix##_MASK);					\
 }
 
 #define SECOND_IRQ_UNMASK(_name_, irq_base, prefix)			\
-static void _name_##_unmask_irq(unsigned int irq)			\
+static void _name_##_unmask_irq(struct irq_data *d)			\
 {									\
 	uint32_t r;							\
-	r = __raw_readl(prefix##_MASK) & ~(1 << (irq - irq_base));	\
+	r = __raw_readl(prefix##_MASK) & ~(1 << (d->irq - irq_base));	\
 	__raw_writel(r, prefix##_MASK);					\
 }
 
@@ -88,8 +88,8 @@
 SECOND_IRQ_DEMUX(_name_, irq_base, prefix)				\
 static struct irq_chip _name_##_irq_chip = {				\
 	.name		= #_name_,					\
-	.mask		= _name_##_mask_irq,				\
-	.unmask		= _name_##_unmask_irq,				\
+	.irq_mask	= _name_##_mask_irq,				\
+	.irq_unmask	= _name_##_unmask_irq,				\
 }
 
 SECOND_IRQ_CHIP(pmic, IRQ_MMP2_PMIC_BASE, MMP2_ICU_INT4);
@@ -103,10 +103,12 @@
 	int irq;
 
 	for (irq = start; num > 0; irq++, num--) {
+		struct irq_data *d = irq_get_irq_data(irq);
+
 		/* mask and clear the IRQ */
-		chip->mask(irq);
-		if (chip->ack)
-			chip->ack(irq);
+		chip->irq_mask(d);
+		if (chip->irq_ack)
+			chip->irq_ack(d);
 
 		set_irq_chip(irq, chip);
 		set_irq_flags(irq, IRQF_VALID);
@@ -119,7 +121,7 @@
 	int irq;
 
 	for (irq = 0; irq < IRQ_MMP2_MUX_BASE; irq++) {
-		icu_mask_irq(irq);
+		icu_mask_irq(irq_get_irq_data(irq));
 		set_irq_chip(irq, &icu_irq_chip);
 		set_irq_flags(irq, IRQF_VALID);
 
@@ -139,7 +141,7 @@
 	/* NOTE: IRQ_MMP2_PMIC requires the PMIC MFPR register
 	 * to be written to clear the interrupt
 	 */
-	pmic_irq_chip.ack = pmic_irq_ack;
+	pmic_irq_chip.irq_ack = pmic_irq_ack;
 
 	init_mux_irq(&pmic_irq_chip, IRQ_MMP2_PMIC_BASE, 2);
 	init_mux_irq(&rtc_irq_chip, IRQ_MMP2_RTC_BASE, 2);
diff --git a/arch/arm/mach-mmp/irq-pxa168.c b/arch/arm/mach-mmp/irq-pxa168.c
index 52ff2f0..f86b450 100644
--- a/arch/arm/mach-mmp/irq-pxa168.c
+++ b/arch/arm/mach-mmp/irq-pxa168.c
@@ -25,21 +25,21 @@
 #define PRIORITY_DEFAULT	0x1
 #define PRIORITY_NONE		0x0	/* means IRQ disabled */
 
-static void icu_mask_irq(unsigned int irq)
+static void icu_mask_irq(struct irq_data *d)
 {
-	__raw_writel(PRIORITY_NONE, ICU_INT_CONF(irq));
+	__raw_writel(PRIORITY_NONE, ICU_INT_CONF(d->irq));
 }
 
-static void icu_unmask_irq(unsigned int irq)
+static void icu_unmask_irq(struct irq_data *d)
 {
-	__raw_writel(IRQ_ROUTE_TO_AP | PRIORITY_DEFAULT, ICU_INT_CONF(irq));
+	__raw_writel(IRQ_ROUTE_TO_AP | PRIORITY_DEFAULT, ICU_INT_CONF(d->irq));
 }
 
 static struct irq_chip icu_irq_chip = {
-	.name	= "icu_irq",
-	.ack	= icu_mask_irq,
-	.mask	= icu_mask_irq,
-	.unmask	= icu_unmask_irq,
+	.name		= "icu_irq",
+	.irq_ack	= icu_mask_irq,
+	.irq_mask	= icu_mask_irq,
+	.irq_unmask	= icu_unmask_irq,
 };
 
 void __init icu_init_irq(void)
@@ -47,7 +47,7 @@
 	int irq;
 
 	for (irq = 0; irq < 64; irq++) {
-		icu_mask_irq(irq);
+		icu_mask_irq(irq_get_irq_data(irq));
 		set_irq_chip(irq, &icu_irq_chip);
 		set_irq_handler(irq, handle_level_irq);
 		set_irq_flags(irq, IRQF_VALID);
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 2e83913..6dde818 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -43,7 +43,7 @@
  * at run-time: they vary from board to board, and the true
  * configuration won't be known until boot.
  */
-static struct resource smc91x_resources[] __initdata = {
+static struct resource smc91x_resources[] = {
 	[0] = {
 		.flags = IORESOURCE_MEM,
 	},
@@ -52,7 +52,7 @@
 	},
 };
 
-static struct platform_device smc91x_device __initdata = {
+static struct platform_device smc91x_device = {
 	.name           = "smc91x",
 	.id             = 0,
 	.num_resources  = ARRAY_SIZE(smc91x_resources),
diff --git a/arch/arm/mach-msm/board-trout-gpio.c b/arch/arm/mach-msm/board-trout-gpio.c
index f8c09ef..a604ec1 100644
--- a/arch/arm/mach-msm/board-trout-gpio.c
+++ b/arch/arm/mach-msm/board-trout-gpio.c
@@ -113,52 +113,52 @@
 	TROUT_GPIO_BANK("VIRTUAL", 0x12, TROUT_GPIO_VIRTUAL_BASE, 0),
 };
 
-static void trout_gpio_irq_ack(unsigned int irq)
+static void trout_gpio_irq_ack(struct irq_data *d)
 {
-	int bank = TROUT_INT_TO_BANK(irq);
-	uint8_t mask = TROUT_INT_TO_MASK(irq);
+	int bank = TROUT_INT_TO_BANK(d->irq);
+	uint8_t mask = TROUT_INT_TO_MASK(d->irq);
 	int reg = TROUT_BANK_TO_STAT_REG(bank);
-	/*printk(KERN_INFO "trout_gpio_irq_ack irq %d\n", irq);*/
+	/*printk(KERN_INFO "trout_gpio_irq_ack irq %d\n", d->irq);*/
 	writeb(mask, TROUT_CPLD_BASE + reg);
 }
 
-static void trout_gpio_irq_mask(unsigned int irq)
+static void trout_gpio_irq_mask(struct irq_data *d)
 {
 	unsigned long flags;
 	uint8_t reg_val;
-	int bank = TROUT_INT_TO_BANK(irq);
-	uint8_t mask = TROUT_INT_TO_MASK(irq);
+	int bank = TROUT_INT_TO_BANK(d->irq);
+	uint8_t mask = TROUT_INT_TO_MASK(d->irq);
 	int reg = TROUT_BANK_TO_MASK_REG(bank);
 
 	local_irq_save(flags);
 	reg_val = trout_int_mask[bank] |= mask;
 	/*printk(KERN_INFO "trout_gpio_irq_mask irq %d => %d:%02x\n",
-	       irq, bank, reg_val);*/
+	       d->irq, bank, reg_val);*/
 	writeb(reg_val, TROUT_CPLD_BASE + reg);
 	local_irq_restore(flags);
 }
 
-static void trout_gpio_irq_unmask(unsigned int irq)
+static void trout_gpio_irq_unmask(struct irq_data *d)
 {
 	unsigned long flags;
 	uint8_t reg_val;
-	int bank = TROUT_INT_TO_BANK(irq);
-	uint8_t mask = TROUT_INT_TO_MASK(irq);
+	int bank = TROUT_INT_TO_BANK(d->irq);
+	uint8_t mask = TROUT_INT_TO_MASK(d->irq);
 	int reg = TROUT_BANK_TO_MASK_REG(bank);
 
 	local_irq_save(flags);
 	reg_val = trout_int_mask[bank] &= ~mask;
 	/*printk(KERN_INFO "trout_gpio_irq_unmask irq %d => %d:%02x\n",
-	       irq, bank, reg_val);*/
+	       d->irq, bank, reg_val);*/
 	writeb(reg_val, TROUT_CPLD_BASE + reg);
 	local_irq_restore(flags);
 }
 
-int trout_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+int trout_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	unsigned long flags;
-	int bank = TROUT_INT_TO_BANK(irq);
-	uint8_t mask = TROUT_INT_TO_MASK(irq);
+	int bank = TROUT_INT_TO_BANK(d->irq);
+	uint8_t mask = TROUT_INT_TO_MASK(d->irq);
 
 	local_irq_save(flags);
 	if(on)
@@ -198,15 +198,15 @@
 		}
 		int_base += TROUT_INT_BANK0_COUNT;
 	}
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 }
 
 static struct irq_chip trout_gpio_irq_chip = {
-	.name      = "troutgpio",
-	.ack       = trout_gpio_irq_ack,
-	.mask      = trout_gpio_irq_mask,
-	.unmask    = trout_gpio_irq_unmask,
-	.set_wake  = trout_gpio_irq_set_wake,
+	.name          = "troutgpio",
+	.irq_ack       = trout_gpio_irq_ack,
+	.irq_mask      = trout_gpio_irq_mask,
+	.irq_unmask    = trout_gpio_irq_unmask,
+	.irq_set_wake  = trout_gpio_irq_set_wake,
 };
 
 /*
diff --git a/arch/arm/mach-msm/gpio.c b/arch/arm/mach-msm/gpio.c
index 33051b5..176af9d 100644
--- a/arch/arm/mach-msm/gpio.c
+++ b/arch/arm/mach-msm/gpio.c
@@ -225,21 +225,21 @@
 #endif
 };
 
-static void msm_gpio_irq_ack(unsigned int irq)
+static void msm_gpio_irq_ack(struct irq_data *d)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 	msm_gpio_clear_detect_status(msm_chip,
-				     irq - gpio_to_irq(msm_chip->chip.base));
+				     d->irq - gpio_to_irq(msm_chip->chip.base));
 	spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
 }
 
-static void msm_gpio_irq_mask(unsigned int irq)
+static void msm_gpio_irq_mask(struct irq_data *d)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
-	unsigned offset = irq - gpio_to_irq(msm_chip->chip.base);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+	unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
 
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 	/* level triggered interrupts are also latched */
@@ -250,11 +250,11 @@
 	spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
 }
 
-static void msm_gpio_irq_unmask(unsigned int irq)
+static void msm_gpio_irq_unmask(struct irq_data *d)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
-	unsigned offset = irq - gpio_to_irq(msm_chip->chip.base);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+	unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
 
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 	/* level triggered interrupts are also latched */
@@ -265,11 +265,11 @@
 	spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
 }
 
-static int msm_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
-	unsigned offset = irq - gpio_to_irq(msm_chip->chip.base);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+	unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
 
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 
@@ -282,21 +282,21 @@
 	return 0;
 }
 
-static int msm_gpio_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
 	unsigned long irq_flags;
-	struct msm_gpio_chip *msm_chip = get_irq_chip_data(irq);
-	unsigned offset = irq - gpio_to_irq(msm_chip->chip.base);
+	struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+	unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
 	unsigned val, mask = BIT(offset);
 
 	spin_lock_irqsave(&msm_chip->lock, irq_flags);
 	val = readl(msm_chip->regs.int_edge);
 	if (flow_type & IRQ_TYPE_EDGE_BOTH) {
 		writel(val | mask, msm_chip->regs.int_edge);
-		irq_desc[irq].handle_irq = handle_edge_irq;
+		irq_desc[d->irq].handle_irq = handle_edge_irq;
 	} else {
 		writel(val & ~mask, msm_chip->regs.int_edge);
-		irq_desc[irq].handle_irq = handle_level_irq;
+		irq_desc[d->irq].handle_irq = handle_level_irq;
 	}
 	if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
 		msm_chip->both_edge_detect |= mask;
@@ -333,16 +333,16 @@
 					   msm_chip->chip.base + j);
 		}
 	}
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 }
 
 static struct irq_chip msm_gpio_irq_chip = {
-	.name      = "msmgpio",
-	.ack       = msm_gpio_irq_ack,
-	.mask      = msm_gpio_irq_mask,
-	.unmask    = msm_gpio_irq_unmask,
-	.set_wake  = msm_gpio_irq_set_wake,
-	.set_type  = msm_gpio_irq_set_type,
+	.name          = "msmgpio",
+	.irq_ack       = msm_gpio_irq_ack,
+	.irq_mask      = msm_gpio_irq_mask,
+	.irq_unmask    = msm_gpio_irq_unmask,
+	.irq_set_wake  = msm_gpio_irq_set_wake,
+	.irq_set_type  = msm_gpio_irq_set_type,
 };
 
 static int __init msm_init_gpio(void)
diff --git a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
index 4dc99aa..1246715 100644
--- a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
+++ b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
@@ -26,7 +26,7 @@
 	 * The interrupt numbering scheme is defined in the
 	 * interrupt controller spec.  To wit:
 	 *
-	 * Migrated the code from ARM MP port to be more consistant
+	 * Migrated the code from ARM MP port to be more consistent
 	 * with interrupt processing , the following still holds true
 	 * however, all interrupts are treated the same regardless of
 	 * if they are local IPI or PPI
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 800f327..1260007 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -154,7 +154,7 @@
 {
 	if (mtype == MT_DEVICE) {
 		/* The peripherals in the 88000000 - D0000000 range
-		 * are only accessable by type MT_DEVICE_NONSHARED.
+		 * are only accessible by type MT_DEVICE_NONSHARED.
 		 * Adjust mtype as necessary to make this "just work."
 		 */
 		if ((phys_addr >= 0x88000000) && (phys_addr < 0xD0000000))
diff --git a/arch/arm/mach-msm/irq-vic.c b/arch/arm/mach-msm/irq-vic.c
index 99f2c34..68c28bb 100644
--- a/arch/arm/mach-msm/irq-vic.c
+++ b/arch/arm/mach-msm/irq-vic.c
@@ -226,19 +226,18 @@
 		writel(val, base + (i * 4));
 }
 
-static void msm_irq_ack(unsigned int irq)
+static void msm_irq_ack(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, irq);
-	irq = 1 << (irq & 31);
-	writel(irq, reg);
+	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, d->irq);
+	writel(1 << (d->irq & 31), reg);
 }
 
-static void msm_irq_mask(unsigned int irq)
+static void msm_irq_mask(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, irq);
-	unsigned index = VIC_INT_TO_REG_INDEX(irq);
-	uint32_t mask = 1UL << (irq & 31);
-	int smsm_irq = msm_irq_to_smsm[irq];
+	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, d->irq);
+	unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
+	uint32_t mask = 1UL << (d->irq & 31);
+	int smsm_irq = msm_irq_to_smsm[d->irq];
 
 	msm_irq_shadow_reg[index].int_en[0] &= ~mask;
 	writel(mask, reg);
@@ -250,12 +249,12 @@
 	}
 }
 
-static void msm_irq_unmask(unsigned int irq)
+static void msm_irq_unmask(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, irq);
-	unsigned index = VIC_INT_TO_REG_INDEX(irq);
-	uint32_t mask = 1UL << (irq & 31);
-	int smsm_irq = msm_irq_to_smsm[irq];
+	void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, d->irq);
+	unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
+	uint32_t mask = 1UL << (d->irq & 31);
+	int smsm_irq = msm_irq_to_smsm[d->irq];
 
 	msm_irq_shadow_reg[index].int_en[0] |= mask;
 	writel(mask, reg);
@@ -268,14 +267,14 @@
 	}
 }
 
-static int msm_irq_set_wake(unsigned int irq, unsigned int on)
+static int msm_irq_set_wake(struct irq_data *d, unsigned int on)
 {
-	unsigned index = VIC_INT_TO_REG_INDEX(irq);
-	uint32_t mask = 1UL << (irq & 31);
-	int smsm_irq = msm_irq_to_smsm[irq];
+	unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
+	uint32_t mask = 1UL << (d->irq & 31);
+	int smsm_irq = msm_irq_to_smsm[d->irq];
 
 	if (smsm_irq == 0) {
-		printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", irq);
+		printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", d->irq);
 		return -EINVAL;
 	}
 	if (on)
@@ -294,12 +293,12 @@
 	return 0;
 }
 
-static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, irq);
-	void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, irq);
-	unsigned index = VIC_INT_TO_REG_INDEX(irq);
-	int b = 1 << (irq & 31);
+	void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, d->irq);
+	void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, d->irq);
+	unsigned index = VIC_INT_TO_REG_INDEX(d->irq);
+	int b = 1 << (d->irq & 31);
 	uint32_t polarity;
 	uint32_t type;
 
@@ -314,11 +313,11 @@
 	type = msm_irq_shadow_reg[index].int_type;
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
 		type |= b;
-		irq_desc[irq].handle_irq = handle_edge_irq;
+		irq_desc[d->irq].handle_irq = handle_edge_irq;
 	}
 	if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
 		type &= ~b;
-		irq_desc[irq].handle_irq = handle_level_irq;
+		irq_desc[d->irq].handle_irq = handle_level_irq;
 	}
 	writel(type, treg);
 	msm_irq_shadow_reg[index].int_type = type;
@@ -326,13 +325,13 @@
 }
 
 static struct irq_chip msm_irq_chip = {
-	.name      = "msm",
-	.disable   = msm_irq_mask,
-	.ack       = msm_irq_ack,
-	.mask      = msm_irq_mask,
-	.unmask    = msm_irq_unmask,
-	.set_wake  = msm_irq_set_wake,
-	.set_type  = msm_irq_set_type,
+	.name          = "msm",
+	.irq_disable   = msm_irq_mask,
+	.irq_ack       = msm_irq_ack,
+	.irq_mask      = msm_irq_mask,
+	.irq_unmask    = msm_irq_unmask,
+	.irq_set_wake  = msm_irq_set_wake,
+	.irq_set_type  = msm_irq_set_type,
 };
 
 void __init msm_init_irq(void)
diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c
index 6c8d5f8..0b27d89 100644
--- a/arch/arm/mach-msm/irq.c
+++ b/arch/arm/mach-msm/irq.c
@@ -64,35 +64,34 @@
 #define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4))
 #define VIC_VECTADDR(n)     VIC_REG(0x0400+((n) * 4))
 
-static void msm_irq_ack(unsigned int irq)
+static void msm_irq_ack(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_CLEAR0 + ((irq & 32) ? 4 : 0);
-	irq = 1 << (irq & 31);
-	writel(irq, reg);
+	void __iomem *reg = VIC_INT_CLEAR0 + ((d->irq & 32) ? 4 : 0);
+	writel(1 << (d->irq & 31), reg);
 }
 
-static void msm_irq_mask(unsigned int irq)
+static void msm_irq_mask(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_ENCLEAR0 + ((irq & 32) ? 4 : 0);
-	writel(1 << (irq & 31), reg);
+	void __iomem *reg = VIC_INT_ENCLEAR0 + ((d->irq & 32) ? 4 : 0);
+	writel(1 << (d->irq & 31), reg);
 }
 
-static void msm_irq_unmask(unsigned int irq)
+static void msm_irq_unmask(struct irq_data *d)
 {
-	void __iomem *reg = VIC_INT_ENSET0 + ((irq & 32) ? 4 : 0);
-	writel(1 << (irq & 31), reg);
+	void __iomem *reg = VIC_INT_ENSET0 + ((d->irq & 32) ? 4 : 0);
+	writel(1 << (d->irq & 31), reg);
 }
 
-static int msm_irq_set_wake(unsigned int irq, unsigned int on)
+static int msm_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	return -EINVAL;
 }
 
-static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	void __iomem *treg = VIC_INT_TYPE0 + ((irq & 32) ? 4 : 0);
-	void __iomem *preg = VIC_INT_POLARITY0 + ((irq & 32) ? 4 : 0);
-	int b = 1 << (irq & 31);
+	void __iomem *treg = VIC_INT_TYPE0 + ((d->irq & 32) ? 4 : 0);
+	void __iomem *preg = VIC_INT_POLARITY0 + ((d->irq & 32) ? 4 : 0);
+	int b = 1 << (d->irq & 31);
 
 	if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
 		writel(readl(preg) | b, preg);
@@ -101,22 +100,22 @@
 
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
 		writel(readl(treg) | b, treg);
-		irq_desc[irq].handle_irq = handle_edge_irq;
+		irq_desc[d->irq].handle_irq = handle_edge_irq;
 	}
 	if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
 		writel(readl(treg) & (~b), treg);
-		irq_desc[irq].handle_irq = handle_level_irq;
+		irq_desc[d->irq].handle_irq = handle_level_irq;
 	}
 	return 0;
 }
 
 static struct irq_chip msm_irq_chip = {
-	.name      = "msm",
-	.ack       = msm_irq_ack,
-	.mask      = msm_irq_mask,
-	.unmask    = msm_irq_unmask,
-	.set_wake  = msm_irq_set_wake,
-	.set_type  = msm_irq_set_type,
+	.name          = "msm",
+	.irq_ack       = msm_irq_ack,
+	.irq_mask      = msm_irq_mask,
+	.irq_unmask    = msm_irq_unmask,
+	.irq_set_wake  = msm_irq_set_wake,
+	.irq_set_type  = msm_irq_set_type,
 };
 
 void __init msm_init_irq(void)
diff --git a/arch/arm/mach-msm/sirc.c b/arch/arm/mach-msm/sirc.c
index 152eefd..11b54c7 100644
--- a/arch/arm/mach-msm/sirc.c
+++ b/arch/arm/mach-msm/sirc.c
@@ -42,12 +42,11 @@
 
 /* Mask off the given interrupt. Keep the int_enable mask in sync with
    the enable reg, so it can be restored after power collapse. */
-static void sirc_irq_mask(unsigned int irq)
+static void sirc_irq_mask(struct irq_data *d)
 {
 	unsigned int mask;
 
-
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	writel(mask, sirc_regs.int_enable_clear);
 	int_enable &= ~mask;
 	return;
@@ -55,31 +54,31 @@
 
 /* Unmask the given interrupt. Keep the int_enable mask in sync with
    the enable reg, so it can be restored after power collapse. */
-static void sirc_irq_unmask(unsigned int irq)
+static void sirc_irq_unmask(struct irq_data *d)
 {
 	unsigned int mask;
 
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	writel(mask, sirc_regs.int_enable_set);
 	int_enable |= mask;
 	return;
 }
 
-static void sirc_irq_ack(unsigned int irq)
+static void sirc_irq_ack(struct irq_data *d)
 {
 	unsigned int mask;
 
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	writel(mask, sirc_regs.int_clear);
 	return;
 }
 
-static int sirc_irq_set_wake(unsigned int irq, unsigned int on)
+static int sirc_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	unsigned int mask;
 
 	/* Used to set the interrupt enable mask during power collapse. */
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	if (on)
 		wake_enable |= mask;
 	else
@@ -88,12 +87,12 @@
 	return 0;
 }
 
-static int sirc_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int sirc_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
 	unsigned int mask;
 	unsigned int val;
 
-	mask = 1 << (irq - FIRST_SIRC_IRQ);
+	mask = 1 << (d->irq - FIRST_SIRC_IRQ);
 	val = readl(sirc_regs.int_polarity);
 
 	if (flow_type & (IRQF_TRIGGER_LOW | IRQF_TRIGGER_FALLING))
@@ -106,10 +105,10 @@
 	val = readl(sirc_regs.int_type);
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
 		val |= mask;
-		irq_desc[irq].handle_irq = handle_edge_irq;
+		irq_desc[d->irq].handle_irq = handle_edge_irq;
 	} else {
 		val &= ~mask;
-		irq_desc[irq].handle_irq = handle_level_irq;
+		irq_desc[d->irq].handle_irq = handle_level_irq;
 	}
 
 	writel(val, sirc_regs.int_type);
@@ -139,16 +138,16 @@
 		;
 	generic_handle_irq(sirq+FIRST_SIRC_IRQ);
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 }
 
 static struct irq_chip sirc_irq_chip = {
-	.name      = "sirc",
-	.ack       = sirc_irq_ack,
-	.mask      = sirc_irq_mask,
-	.unmask    = sirc_irq_unmask,
-	.set_wake  = sirc_irq_set_wake,
-	.set_type  = sirc_irq_set_type,
+	.name          = "sirc",
+	.irq_ack       = sirc_irq_ack,
+	.irq_mask      = sirc_irq_mask,
+	.irq_unmask    = sirc_irq_unmask,
+	.irq_set_wake  = sirc_irq_set_wake,
+	.irq_set_type  = sirc_irq_set_type,
 };
 
 void __init msm_init_sirc(void)
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index 4e516b4..0d65db8 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -140,17 +140,17 @@
 
 static struct mc13783_regulator_init_data mx31_3ds_regulators[] = {
 	{
-		.id = MC13783_REGU_PWGT1SPI, /* Power Gate for ARM core. */
+		.id = MC13783_REG_PWGT1SPI, /* Power Gate for ARM core. */
 		.init_data = &pwgtx_init,
 	}, {
-		.id = MC13783_REGU_PWGT2SPI, /* Power Gate for L2 Cache. */
+		.id = MC13783_REG_PWGT2SPI, /* Power Gate for L2 Cache. */
 		.init_data = &pwgtx_init,
 	}, {
 
-		.id = MC13783_REGU_GPO1, /* Turn on 1.8V */
+		.id = MC13783_REG_GPO1, /* Turn on 1.8V */
 		.init_data = &gpo_init,
 	}, {
-		.id = MC13783_REGU_GPO3, /* Turn on 3.3V */
+		.id = MC13783_REG_GPO3, /* Turn on 3.3V */
 		.init_data = &gpo_init,
 	},
 };
diff --git a/arch/arm/mach-mx3/mach-mx31ads.c b/arch/arm/mach-mx3/mach-mx31ads.c
index b993b9b..88b97d6 100644
--- a/arch/arm/mach-mx3/mach-mx31ads.c
+++ b/arch/arm/mach-mx3/mach-mx31ads.c
@@ -162,9 +162,9 @@
  * Disable an expio pin's interrupt by setting the bit in the imr.
  * @param irq           an expio virtual irq number
  */
-static void expio_mask_irq(u32 irq)
+static void expio_mask_irq(struct irq_data *d)
 {
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 	/* mask the interrupt */
 	__raw_writew(1 << expio, PBC_INTMASK_CLEAR_REG);
 	__raw_readw(PBC_INTMASK_CLEAR_REG);
@@ -174,9 +174,9 @@
  * Acknowledge an expanded io pin's interrupt by clearing the bit in the isr.
  * @param irq           an expanded io virtual irq number
  */
-static void expio_ack_irq(u32 irq)
+static void expio_ack_irq(struct irq_data *d)
 {
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 	/* clear the interrupt status */
 	__raw_writew(1 << expio, PBC_INTSTATUS_REG);
 }
@@ -185,18 +185,18 @@
  * Enable a expio pin's interrupt by clearing the bit in the imr.
  * @param irq           a expio virtual irq number
  */
-static void expio_unmask_irq(u32 irq)
+static void expio_unmask_irq(struct irq_data *d)
 {
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 	/* unmask the interrupt */
 	__raw_writew(1 << expio, PBC_INTMASK_SET_REG);
 }
 
 static struct irq_chip expio_irq_chip = {
 	.name = "EXPIO(CPLD)",
-	.ack = expio_ack_irq,
-	.mask = expio_mask_irq,
-	.unmask = expio_unmask_irq,
+	.irq_ack = expio_ack_irq,
+	.irq_mask = expio_mask_irq,
+	.irq_unmask = expio_unmask_irq,
 };
 
 static void __init mx31ads_init_expio(void)
diff --git a/arch/arm/mach-mx3/mach-mx31moboard.c b/arch/arm/mach-mx3/mach-mx31moboard.c
index 203d21a..1aa8d65 100644
--- a/arch/arm/mach-mx3/mach-mx31moboard.c
+++ b/arch/arm/mach-mx3/mach-mx31moboard.c
@@ -216,11 +216,11 @@
 
 static struct mc13783_regulator_init_data moboard_regulators[] = {
 	{
-		.id = MC13783_REGU_VMMC1,
+		.id = MC13783_REG_VMMC1,
 		.init_data = &sdhc_vreg_data,
 	},
 	{
-		.id = MC13783_REGU_VCAM,
+		.id = MC13783_REG_VCAM,
 		.init_data = &cam_vreg_data,
 	},
 };
diff --git a/arch/arm/mach-mx5/Kconfig b/arch/arm/mach-mx5/Kconfig
index 55254b6..de4fa992f 100644
--- a/arch/arm/mach-mx5/Kconfig
+++ b/arch/arm/mach-mx5/Kconfig
@@ -50,6 +50,7 @@
 config MACH_MX51_3DS
 	bool "Support MX51PDK (3DS)"
 	select SOC_IMX51
+	select IMX_HAVE_PLATFORM_IMX_KEYPAD
 	select IMX_HAVE_PLATFORM_IMX_UART
 	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
 	select IMX_HAVE_PLATFORM_SPI_IMX
@@ -77,6 +78,7 @@
 config MACH_EUKREA_MBIMX51_BASEBOARD
 	prompt "Eukrea MBIMX51 development board"
 	bool
+	select IMX_HAVE_PLATFORM_IMX_KEYPAD
 	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
 	help
 	  This adds board specific devices that can be found on Eukrea's
@@ -124,10 +126,28 @@
 	bool "Support MX53 EVK platforms"
 	select SOC_IMX53
 	select IMX_HAVE_PLATFORM_IMX_UART
+	select IMX_HAVE_PLATFORM_IMX_I2C
+	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+	select IMX_HAVE_PLATFORM_SPI_IMX
 	help
 	  Include support for MX53 EVK platform. This includes specific
 	  configurations for the board and its peripherals.
 
+config MACH_MX53_SMD
+	bool "Support MX53 SMD platforms"
+	select SOC_IMX53
+	select IMX_HAVE_PLATFORM_IMX_UART
+	help
+	  Include support for MX53 SMD platform. This includes specific
+	  configurations for the board and its peripherals.
+
+config MACH_MX53_LOCO
+	bool "Support MX53 LOCO platforms"
+	select SOC_IMX53
+	select IMX_HAVE_PLATFORM_IMX_UART
+	help
+	  Include support for MX53 LOCO platform. This includes specific
+	  configurations for the board and its peripherals.
 
 config MACH_MX50_RDP
 	bool "Support MX50 reference design platform"
diff --git a/arch/arm/mach-mx5/Makefile b/arch/arm/mach-mx5/Makefile
index 0c398ba..0d43be9 100644
--- a/arch/arm/mach-mx5/Makefile
+++ b/arch/arm/mach-mx5/Makefile
@@ -10,6 +10,8 @@
 obj-$(CONFIG_MACH_MX51_BABBAGE) += board-mx51_babbage.o
 obj-$(CONFIG_MACH_MX51_3DS) += board-mx51_3ds.o
 obj-$(CONFIG_MACH_MX53_EVK) += board-mx53_evk.o
+obj-$(CONFIG_MACH_MX53_SMD) += board-mx53_smd.o
+obj-$(CONFIG_MACH_MX53_LOCO) += board-mx53_loco.o
 obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += board-cpuimx51.o
 obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
 obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += board-cpuimx51sd.o
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c
index e42bd2e..49d6448 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-mx5/board-mx51_3ds.c
@@ -12,7 +12,6 @@
 
 #include <linux/irq.h>
 #include <linux/platform_device.h>
-#include <linux/input/matrix_keypad.h>
 #include <linux/spi/spi.h>
 
 #include <asm/mach-types.h>
@@ -120,14 +119,14 @@
 	KEY(3, 5, KEY_BACK)
 };
 
-static struct matrix_keymap_data mx51_3ds_map_data = {
+static const struct matrix_keymap_data mx51_3ds_map_data __initconst = {
 	.keymap		= mx51_3ds_board_keymap,
 	.keymap_size	= ARRAY_SIZE(mx51_3ds_board_keymap),
 };
 
 static void mxc_init_keypad(void)
 {
-	mxc_register_device(&mxc_keypad_device, &mx51_3ds_map_data);
+	imx51_add_imx_keypad(&mx51_3ds_map_data);
 }
 #else
 static inline void mxc_init_keypad(void)
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index fa97d0d..caee04c 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -21,6 +21,11 @@
 
 #include <linux/init.h>
 #include <linux/clk.h>
+#include <linux/fec.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
 #include <mach/common.h>
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
@@ -29,6 +34,10 @@
 #include <mach/imx-uart.h>
 #include <mach/iomux-mx53.h>
 
+#define SMD_FEC_PHY_RST		IMX_GPIO_NR(7, 6)
+#define EVK_ECSPI1_CS0		IMX_GPIO_NR(2, 30)
+#define EVK_ECSPI1_CS1		IMX_GPIO_NR(3, 19)
+
 #include "crm_regs.h"
 #include "devices-imx53.h"
 
@@ -47,6 +56,14 @@
 	MX53_PAD_ATA_CS_1__UART3_RXD,
 	MX53_PAD_ATA_DA_1__UART3_CTS,
 	MX53_PAD_ATA_DA_2__UART3_RTS,
+
+	MX53_PAD_EIM_D16__CSPI1_SCLK,
+	MX53_PAD_EIM_D17__CSPI1_MISO,
+	MX53_PAD_EIM_D18__CSPI1_MOSI,
+
+	/* ecspi chip select lines */
+	MX53_PAD_EIM_EB2__GPIO_2_30,
+	MX53_PAD_EIM_D19__GPIO_3_19,
 };
 
 static const struct imxuart_platform_data mx53_evk_uart_pdata __initconst = {
@@ -60,11 +77,68 @@
 	imx53_add_imx_uart(2, &mx53_evk_uart_pdata);
 }
 
+static const struct imxi2c_platform_data mx53_evk_i2c_data __initconst = {
+	.bitrate = 100000,
+};
+
+static inline void mx53_evk_fec_reset(void)
+{
+	int ret;
+
+	/* reset FEC PHY */
+	ret = gpio_request(SMD_FEC_PHY_RST, "fec-phy-reset");
+	if (ret) {
+		printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret);
+		return;
+	}
+	gpio_direction_output(SMD_FEC_PHY_RST, 0);
+	gpio_set_value(SMD_FEC_PHY_RST, 0);
+	msleep(1);
+	gpio_set_value(SMD_FEC_PHY_RST, 1);
+}
+
+static struct fec_platform_data mx53_evk_fec_pdata = {
+	.phy = PHY_INTERFACE_MODE_RMII,
+};
+
+static struct spi_board_info mx53_evk_spi_board_info[] __initdata = {
+	{
+		.modalias = "mtd_dataflash",
+		.max_speed_hz = 25000000,
+		.bus_num = 0,
+		.chip_select = 1,
+		.mode = SPI_MODE_0,
+		.platform_data = NULL,
+	},
+};
+
+static int mx53_evk_spi_cs[] = {
+	EVK_ECSPI1_CS0,
+	EVK_ECSPI1_CS1,
+};
+
+static const struct spi_imx_master mx53_evk_spi_data __initconst = {
+	.chipselect     = mx53_evk_spi_cs,
+	.num_chipselect = ARRAY_SIZE(mx53_evk_spi_cs),
+};
+
 static void __init mx53_evk_board_init(void)
 {
 	mxc_iomux_v3_setup_multiple_pads(mx53_evk_pads,
 					ARRAY_SIZE(mx53_evk_pads));
 	mx53_evk_init_uart();
+	mx53_evk_fec_reset();
+	imx53_add_fec(&mx53_evk_fec_pdata);
+
+	imx53_add_imx_i2c(0, &mx53_evk_i2c_data);
+	imx53_add_imx_i2c(1, &mx53_evk_i2c_data);
+
+	imx53_add_sdhci_esdhc_imx(0, NULL);
+	imx53_add_sdhci_esdhc_imx(1, NULL);
+
+	spi_register_board_info(mx53_evk_spi_board_info,
+		ARRAY_SIZE(mx53_evk_spi_board_info));
+	imx53_add_ecspi(0, &mx53_evk_spi_data);
 }
 
 static void __init mx53_evk_timer_init(void)
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
new file mode 100644
index 0000000..d1348e0
--- /dev/null
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/fec.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/imx-uart.h>
+#include <mach/iomux-mx53.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+
+#include "crm_regs.h"
+#include "devices-imx53.h"
+
+#define LOCO_FEC_PHY_RST		IMX_GPIO_NR(7, 6)
+
+static iomux_v3_cfg_t mx53_loco_pads[] = {
+	MX53_PAD_CSI0_D10__UART1_TXD,
+	MX53_PAD_CSI0_D11__UART1_RXD,
+	MX53_PAD_ATA_DIOW__UART1_TXD,
+	MX53_PAD_ATA_DMACK__UART1_RXD,
+
+	MX53_PAD_ATA_BUFFER_EN__UART2_RXD,
+	MX53_PAD_ATA_DMARQ__UART2_TXD,
+	MX53_PAD_ATA_DIOR__UART2_RTS,
+	MX53_PAD_ATA_INTRQ__UART2_CTS,
+
+	MX53_PAD_ATA_CS_0__UART3_TXD,
+	MX53_PAD_ATA_CS_1__UART3_RXD,
+	MX53_PAD_ATA_DA_1__UART3_CTS,
+	MX53_PAD_ATA_DA_2__UART3_RTS,
+};
+
+static const struct imxuart_platform_data mx53_loco_uart_data __initconst = {
+	.flags = IMXUART_HAVE_RTSCTS,
+};
+
+static inline void mx53_loco_init_uart(void)
+{
+	imx53_add_imx_uart(0, &mx53_loco_uart_data);
+	imx53_add_imx_uart(1, &mx53_loco_uart_data);
+	imx53_add_imx_uart(2, &mx53_loco_uart_data);
+}
+
+static inline void mx53_loco_fec_reset(void)
+{
+	int ret;
+
+	/* reset FEC PHY */
+	ret = gpio_request(LOCO_FEC_PHY_RST, "fec-phy-reset");
+	if (ret) {
+		printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret);
+		return;
+	}
+	gpio_direction_output(LOCO_FEC_PHY_RST, 0);
+	msleep(1);
+	gpio_set_value(LOCO_FEC_PHY_RST, 1);
+}
+
+static struct fec_platform_data mx53_loco_fec_data = {
+	.phy = PHY_INTERFACE_MODE_RMII,
+};
+
+static void __init mx53_loco_board_init(void)
+{
+	mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads,
+					ARRAY_SIZE(mx53_loco_pads));
+	mx53_loco_init_uart();
+	mx53_loco_fec_reset();
+	imx53_add_fec(&mx53_loco_fec_data);
+}
+
+static void __init mx53_loco_timer_init(void)
+{
+	mx53_clocks_init(32768, 24000000, 0, 0);
+}
+
+static struct sys_timer mx53_loco_timer = {
+	.init	= mx53_loco_timer_init,
+};
+
+MACHINE_START(MX53_LOCO, "Freescale MX53 LOCO Board")
+	.map_io = mx53_map_io,
+	.init_irq = mx53_init_irq,
+	.init_machine = mx53_loco_board_init,
+	.timer = &mx53_loco_timer,
+MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
new file mode 100644
index 0000000..7970f7a
--- /dev/null
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/fec.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/imx-uart.h>
+#include <mach/iomux-mx53.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+
+#include "crm_regs.h"
+#include "devices-imx53.h"
+
+#define SMD_FEC_PHY_RST		IMX_GPIO_NR(7, 6)
+
+static iomux_v3_cfg_t mx53_smd_pads[] = {
+	MX53_PAD_CSI0_D10__UART1_TXD,
+	MX53_PAD_CSI0_D11__UART1_RXD,
+	MX53_PAD_ATA_DIOW__UART1_TXD,
+	MX53_PAD_ATA_DMACK__UART1_RXD,
+
+	MX53_PAD_ATA_BUFFER_EN__UART2_RXD,
+	MX53_PAD_ATA_DMARQ__UART2_TXD,
+	MX53_PAD_ATA_DIOR__UART2_RTS,
+	MX53_PAD_ATA_INTRQ__UART2_CTS,
+
+	MX53_PAD_ATA_CS_0__UART3_TXD,
+	MX53_PAD_ATA_CS_1__UART3_RXD,
+	MX53_PAD_ATA_DA_1__UART3_CTS,
+	MX53_PAD_ATA_DA_2__UART3_RTS,
+};
+
+static const struct imxuart_platform_data mx53_smd_uart_data __initconst = {
+	.flags = IMXUART_HAVE_RTSCTS,
+};
+
+static inline void mx53_smd_init_uart(void)
+{
+	imx53_add_imx_uart(0, &mx53_smd_uart_data);
+	imx53_add_imx_uart(1, &mx53_smd_uart_data);
+	imx53_add_imx_uart(2, &mx53_smd_uart_data);
+}
+
+static inline void mx53_smd_fec_reset(void)
+{
+	int ret;
+
+	/* reset FEC PHY */
+	ret = gpio_request(SMD_FEC_PHY_RST, "fec-phy-reset");
+	if (ret) {
+		printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret);
+		return;
+	}
+	gpio_direction_output(SMD_FEC_PHY_RST, 0);
+	msleep(1);
+	gpio_set_value(SMD_FEC_PHY_RST, 1);
+}
+
+static struct fec_platform_data mx53_smd_fec_data = {
+	.phy = PHY_INTERFACE_MODE_RMII,
+};
+
+static void __init mx53_smd_board_init(void)
+{
+	mxc_iomux_v3_setup_multiple_pads(mx53_smd_pads,
+					ARRAY_SIZE(mx53_smd_pads));
+	mx53_smd_init_uart();
+	mx53_smd_fec_reset();
+	imx53_add_fec(&mx53_smd_fec_data);
+}
+
+static void __init mx53_smd_timer_init(void)
+{
+	mx53_clocks_init(32768, 24000000, 22579200, 0);
+}
+
+static struct sys_timer mx53_smd_timer = {
+	.init	= mx53_smd_timer_init,
+};
+
+MACHINE_START(MX53_SMD, "Freescale MX53 SMD Board")
+	.map_io = mx53_map_io,
+	.init_irq = mx53_init_irq,
+	.init_machine = mx53_smd_board_init,
+	.timer = &mx53_smd_timer,
+MACHINE_END
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index 785e1a3..0a19e75 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -1191,6 +1191,11 @@
 DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
 	NULL,  NULL, &ipg_clk, &gpt_ipg_clk);
 
+DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
+	NULL, NULL, &ipg_clk, NULL);
+DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
+	NULL, NULL, &ipg_clk, NULL);
+
 /* I2C */
 DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
 	NULL, NULL, &ipg_clk, NULL);
@@ -1283,6 +1288,8 @@
 	_REGISTER_CLOCK("imx-uart.2", NULL, uart3_clk)
 	_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
 	_REGISTER_CLOCK("fec.0", NULL, fec_clk)
+	_REGISTER_CLOCK("mxc_pwm.0", "pwm", pwm1_clk)
+	_REGISTER_CLOCK("mxc_pwm.1", "pwm", pwm2_clk)
 	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
 	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
 	_REGISTER_CLOCK("imx-i2c.2", NULL, hsi2c_clk)
@@ -1295,7 +1302,7 @@
 	_REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
 	_REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
 	_REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
-	_REGISTER_CLOCK("imx-keypad.0", NULL, kpp_clk)
+	_REGISTER_CLOCK("imx-keypad", NULL, kpp_clk)
 	_REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
 	_REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
 	_REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
@@ -1326,6 +1333,13 @@
 	_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
 	_REGISTER_CLOCK("fec.0", NULL, fec_clk)
 	_REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
+	_REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
+	_REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
+	_REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk)
+	_REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk)
+	_REGISTER_CLOCK("imx53-ecspi.0", NULL, ecspi1_clk)
+	_REGISTER_CLOCK("imx53-ecspi.1", NULL, ecspi2_clk)
+	_REGISTER_CLOCK("imx53-cspi.0", NULL, cspi_clk)
 };
 
 static void clk_tree_init(void)
@@ -1363,7 +1377,6 @@
 
 	clk_tree_init();
 
-	clk_set_parent(&uart_root_clk, &pll3_sw_clk);
 	clk_enable(&cpu_clk);
 	clk_enable(&main_bus_clk);
 
@@ -1406,6 +1419,7 @@
 
 	clk_tree_init();
 
+	clk_set_parent(&uart_root_clk, &pll3_sw_clk);
 	clk_enable(&cpu_clk);
 	clk_enable(&main_bus_clk);
 
diff --git a/arch/arm/mach-mx5/devices-imx51.h b/arch/arm/mach-mx5/devices-imx51.h
index 6302e46..7fff485 100644
--- a/arch/arm/mach-mx5/devices-imx51.h
+++ b/arch/arm/mach-mx5/devices-imx51.h
@@ -47,3 +47,11 @@
 extern const struct imx_imx2_wdt_data imx51_imx2_wdt_data[] __initconst;
 #define imx51_add_imx2_wdt(id, pdata)	\
 	imx_add_imx2_wdt(&imx51_imx2_wdt_data[id])
+
+extern const struct imx_mxc_pwm_data imx51_mxc_pwm_data[] __initconst;
+#define imx51_add_mxc_pwm(id)	\
+	imx_add_mxc_pwm(&imx51_mxc_pwm_data[id])
+
+extern const struct imx_imx_keypad_data imx51_imx_keypad_data __initconst;
+#define imx51_add_imx_keypad(pdata)	\
+	imx_add_imx_keypad(&imx51_imx_keypad_data, pdata)
diff --git a/arch/arm/mach-mx5/devices-imx53.h b/arch/arm/mach-mx5/devices-imx53.h
index 9d0ec25..8639735 100644
--- a/arch/arm/mach-mx5/devices-imx53.h
+++ b/arch/arm/mach-mx5/devices-imx53.h
@@ -8,6 +8,24 @@
 #include <mach/mx53.h>
 #include <mach/devices-common.h>
 
+extern const struct imx_fec_data imx53_fec_data __initconst;
+#define imx53_add_fec(pdata)   \
+	imx_add_fec(&imx53_fec_data, pdata)
+
 extern const struct imx_imx_uart_1irq_data imx53_imx_uart_data[] __initconst;
 #define imx53_add_imx_uart(id, pdata)	\
 	imx_add_imx_uart_1irq(&imx53_imx_uart_data[id], pdata)
+
+
+extern const struct imx_imx_i2c_data imx53_imx_i2c_data[] __initconst;
+#define imx53_add_imx_i2c(id, pdata)	\
+	imx_add_imx_i2c(&imx53_imx_i2c_data[id], pdata)
+
+extern const struct imx_sdhci_esdhc_imx_data
+imx53_sdhci_esdhc_imx_data[] __initconst;
+#define imx53_add_sdhci_esdhc_imx(id, pdata)	\
+	imx_add_sdhci_esdhc_imx(&imx53_sdhci_esdhc_imx_data[id], pdata)
+
+extern const struct imx_spi_imx_data imx53_ecspi_data[] __initconst;
+#define imx53_add_ecspi(id, pdata)	\
+	imx_add_spi_imx(&imx53_ecspi_data[id], pdata)
diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c
index 1bda5cb..153ada5 100644
--- a/arch/arm/mach-mx5/devices.c
+++ b/arch/arm/mach-mx5/devices.c
@@ -120,25 +120,6 @@
 	},
 };
 
-static struct resource mxc_kpp_resources[] = {
-	{
-		.start = MX51_MXC_INT_KPP,
-		.end = MX51_MXC_INT_KPP,
-		.flags = IORESOURCE_IRQ,
-	} , {
-		.start = MX51_KPP_BASE_ADDR,
-		.end = MX51_KPP_BASE_ADDR + 0x8 - 1,
-		.flags = IORESOURCE_MEM,
-	},
-};
-
-struct platform_device mxc_keypad_device = {
-	.name = "imx-keypad",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(mxc_kpp_resources),
-	.resource = mxc_kpp_resources,
-};
-
 static struct mxc_gpio_port mxc_gpio_ports[] = {
 	{
 		.chip.label = "gpio-0",
diff --git a/arch/arm/mach-mx5/devices.h b/arch/arm/mach-mx5/devices.h
index 16891aa..55a5129 100644
--- a/arch/arm/mach-mx5/devices.h
+++ b/arch/arm/mach-mx5/devices.h
@@ -3,4 +3,3 @@
 extern struct platform_device mxc_usbh2_device;
 extern struct platform_device mxc_usbdr_udc_device;
 extern struct platform_device mxc_hsi2c_device;
-extern struct platform_device mxc_keypad_device;
diff --git a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c b/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
index c96d018..e83ffad 100644
--- a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
+++ b/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
@@ -21,7 +21,6 @@
 #include <linux/fsl_devices.h>
 #include <linux/i2c/tsc2007.h>
 #include <linux/leds.h>
-#include <linux/input/matrix_keypad.h>
 
 #include <mach/common.h>
 #include <mach/hardware.h>
@@ -157,7 +156,7 @@
 	KEY(3, 3, KEY_ENTER),
 };
 
-static struct matrix_keymap_data mbimx51_map_data = {
+static const struct matrix_keymap_data mbimx51_map_data __initconst = {
 	.keymap		= mbimx51_keymap,
 	.keymap_size	= ARRAY_SIZE(mbimx51_keymap),
 };
@@ -209,7 +208,7 @@
 
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 
-	mxc_register_device(&mxc_keypad_device, &mbimx51_map_data);
+	imx51_add_imx_keypad(&mbimx51_map_data);
 
 	gpio_request(MBIMX51_TSC2007_GPIO, "tsc2007_irq");
 	gpio_direction_input(MBIMX51_TSC2007_GPIO);
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index c4ac7b4..8bfc8df 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -15,7 +15,7 @@
 config MACH_MX23EVK
 	bool "Support MX23EVK Platform"
 	select SOC_IMX23
-	select MXS_HAVE_PLATFORM_DUART
+	select MXS_HAVE_AMBA_DUART
 	default y
 	help
 	  Include support for MX23EVK platform. This includes specific
@@ -24,7 +24,7 @@
 config MACH_MX28EVK
 	bool "Support MX28EVK Platform"
 	select SOC_IMX28
-	select MXS_HAVE_PLATFORM_DUART
+	select MXS_HAVE_AMBA_DUART
 	select MXS_HAVE_PLATFORM_FEC
 	default y
 	help
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
index 8f5a19a..ca72a05 100644
--- a/arch/arm/mach-mxs/clock-mx23.c
+++ b/arch/arm/mach-mxs/clock-mx23.c
@@ -21,6 +21,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/jiffies.h>
+#include <linux/clkdev.h>
 
 #include <asm/clkdev.h>
 #include <asm/div64.h>
@@ -303,7 +304,7 @@
 	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
 	reg &= ~BM_CLKCTRL_##dr##_DIV;					\
 	reg |= div << BP_CLKCTRL_##dr##_DIV;				\
-	if (reg | (1 << clk->enable_shift)) {				\
+	if (reg & (1 << clk->enable_shift)) {				\
 		pr_err("%s: clock is gated\n", __func__);		\
 		return -EINVAL;						\
 	}								\
@@ -346,7 +347,7 @@
 {									\
 	if (parent != clk->parent) {					\
 		__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,		\
-			 HW_CLKCTRL_CLKSEQ_TOG);			\
+			 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);	\
 		clk->parent = parent;					\
 	}								\
 									\
@@ -437,10 +438,12 @@
 	},
 
 static struct clk_lookup lookups[] = {
-	_REGISTER_CLOCK("mxs-duart.0", NULL, uart_clk)
+	/* for amba bus driver */
+	_REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
+	/* for amba-pl011 driver */
+	_REGISTER_CLOCK("duart", NULL, uart_clk)
 	_REGISTER_CLOCK("rtc", NULL, rtc_clk)
 	_REGISTER_CLOCK(NULL, "hclk", hbus_clk)
-	_REGISTER_CLOCK(NULL, "xclk", xbus_clk)
 	_REGISTER_CLOCK(NULL, "usb", usb_clk)
 	_REGISTER_CLOCK(NULL, "audio", audio_clk)
 	_REGISTER_CLOCK(NULL, "pwm", pwm_clk)
@@ -518,6 +521,12 @@
 {
 	clk_misc_init();
 
+	clk_enable(&cpu_clk);
+	clk_enable(&hbus_clk);
+	clk_enable(&xbus_clk);
+	clk_enable(&emi_clk);
+	clk_enable(&uart_clk);
+
 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
 
 	mxs_timer_init(&clk32k_clk, MX23_INT_TIMER0);
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index 74e2103..fd1c4c5 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -21,6 +21,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/jiffies.h>
+#include <linux/clkdev.h>
 
 #include <asm/clkdev.h>
 #include <asm/div64.h>
@@ -354,12 +355,12 @@
 	} else {							\
 		reg &= ~BM_CLKCTRL_##dr##_DIV;				\
 		reg |= div << BP_CLKCTRL_##dr##_DIV;			\
-		if (reg | (1 << clk->enable_shift)) {			\
+		if (reg & (1 << clk->enable_shift)) {			\
 			pr_err("%s: clock is gated\n", __func__);	\
 			return -EINVAL;					\
 		}							\
 	}								\
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);		\
+	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
 									\
 	for (i = 10000; i; i--)						\
 		if (!(__raw_readl(CLKCTRL_BASE_ADDR +			\
@@ -482,7 +483,7 @@
 {									\
 	if (parent != clk->parent) {					\
 		__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,		\
-			 HW_CLKCTRL_CLKSEQ_TOG);			\
+			 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);	\
 		clk->parent = parent;					\
 	}								\
 									\
@@ -602,8 +603,12 @@
 	},
 
 static struct clk_lookup lookups[] = {
-	_REGISTER_CLOCK("mxs-duart.0", NULL, uart_clk)
-	_REGISTER_CLOCK("fec.0", NULL, fec_clk)
+	/* for amba bus driver */
+	_REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
+	/* for amba-pl011 driver */
+	_REGISTER_CLOCK("duart", NULL, uart_clk)
+	_REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
+	_REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
 	_REGISTER_CLOCK("rtc", NULL, rtc_clk)
 	_REGISTER_CLOCK("pll2", NULL, pll2_clk)
 	_REGISTER_CLOCK(NULL, "hclk", hbus_clk)
@@ -726,6 +731,12 @@
 {
 	clk_misc_init();
 
+	clk_enable(&cpu_clk);
+	clk_enable(&hbus_clk);
+	clk_enable(&xbus_clk);
+	clk_enable(&emi_clk);
+	clk_enable(&uart_clk);
+
 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
 
 	mxs_timer_init(&clk32k_clk, MX28_INT_TIMER0);
diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c
index e7d2269..a7093c8 100644
--- a/arch/arm/mach-mxs/clock.c
+++ b/arch/arm/mach-mxs/clock.c
@@ -57,7 +57,6 @@
 		if (clk->disable)
 			clk->disable(clk);
 		__clk_disable(clk->parent);
-		__clk_disable(clk->secondary);
 	}
 }
 
@@ -68,7 +67,6 @@
 
 	if (clk->usecount++ == 0) {
 		__clk_enable(clk->parent);
-		__clk_enable(clk->secondary);
 
 		if (clk->enable)
 			clk->enable(clk);
diff --git a/arch/arm/mach-mxs/devices-mx23.h b/arch/arm/mach-mxs/devices-mx23.h
index d0f49fc..1256788 100644
--- a/arch/arm/mach-mxs/devices-mx23.h
+++ b/arch/arm/mach-mxs/devices-mx23.h
@@ -11,6 +11,6 @@
 #include <mach/mx23.h>
 #include <mach/devices-common.h>
 
-extern const struct mxs_duart_data mx23_duart_data __initconst;
+extern const struct amba_device mx23_duart_device __initconst;
 #define mx23_add_duart() \
-	mxs_add_duart(&mx23_duart_data)
+	mxs_add_duart(&mx23_duart_device)
diff --git a/arch/arm/mach-mxs/devices-mx28.h b/arch/arm/mach-mxs/devices-mx28.h
index 00b736c..33773a6 100644
--- a/arch/arm/mach-mxs/devices-mx28.h
+++ b/arch/arm/mach-mxs/devices-mx28.h
@@ -11,9 +11,9 @@
 #include <mach/mx28.h>
 #include <mach/devices-common.h>
 
-extern const struct mxs_duart_data mx28_duart_data __initconst;
+extern const struct amba_device mx28_duart_device __initconst;
 #define mx28_add_duart() \
-	mxs_add_duart(&mx28_duart_data)
+	mxs_add_duart(&mx28_duart_device)
 
 extern const struct mxs_fec_data mx28_fec_data[] __initconst;
 #define mx28_add_fec(id, pdata) \
diff --git a/arch/arm/mach-mxs/devices.c b/arch/arm/mach-mxs/devices.c
index 6b60f02..c20d547 100644
--- a/arch/arm/mach-mxs/devices.c
+++ b/arch/arm/mach-mxs/devices.c
@@ -19,9 +19,8 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/init.h>
-#include <linux/err.h>
 #include <linux/platform_device.h>
-#include <mach/common.h>
+#include <linux/amba/bus.h>
 
 struct platform_device *__init mxs_add_platform_device_dmamask(
 		const char *name, int id,
@@ -73,3 +72,17 @@
 
 	return pdev;
 }
+
+int __init mxs_add_amba_device(const struct amba_device *dev)
+{
+	struct amba_device *adev = kmalloc(sizeof(*adev), GFP_KERNEL);
+
+	if (!adev) {
+		pr_err("%s: failed to allocate memory", __func__);
+		return -ENOMEM;
+	}
+
+	*adev = *dev;
+
+	return amba_device_register(adev, &iomem_resource);
+}
diff --git a/arch/arm/mach-mxs/devices/Kconfig b/arch/arm/mach-mxs/devices/Kconfig
index a35a2dc..cf7dc1a 100644
--- a/arch/arm/mach-mxs/devices/Kconfig
+++ b/arch/arm/mach-mxs/devices/Kconfig
@@ -1,5 +1,6 @@
-config MXS_HAVE_PLATFORM_DUART
+config MXS_HAVE_AMBA_DUART
 	bool
+	select ARM_AMBA
 
 config MXS_HAVE_PLATFORM_FEC
 	bool
diff --git a/arch/arm/mach-mxs/devices/Makefile b/arch/arm/mach-mxs/devices/Makefile
index 4b5266a..d0a09f6 100644
--- a/arch/arm/mach-mxs/devices/Makefile
+++ b/arch/arm/mach-mxs/devices/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_MXS_HAVE_PLATFORM_DUART) += platform-duart.o
+obj-$(CONFIG_MXS_HAVE_AMBA_DUART) += amba-duart.o
 obj-$(CONFIG_MXS_HAVE_PLATFORM_FEC) += platform-fec.o
diff --git a/arch/arm/mach-mxs/devices/amba-duart.c b/arch/arm/mach-mxs/devices/amba-duart.c
new file mode 100644
index 0000000..a559db0
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/amba-duart.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <asm/irq.h>
+#include <mach/mx23.h>
+#include <mach/mx28.h>
+#include <mach/devices-common.h>
+
+#define MXS_AMBA_DUART_DEVICE(name, soc)			\
+const struct amba_device name##_device __initconst = {		\
+	.dev = {						\
+		.init_name = "duart",				\
+	},							\
+	.res = {						\
+		.start = soc ## _DUART_BASE_ADDR,		\
+		.end = (soc ## _DUART_BASE_ADDR) + SZ_8K - 1,	\
+		.flags = IORESOURCE_MEM,			\
+	},							\
+	.irq = {soc ## _INT_DUART, NO_IRQ},			\
+}
+
+#ifdef CONFIG_SOC_IMX23
+MXS_AMBA_DUART_DEVICE(mx23_duart, MX23);
+#endif
+
+#ifdef CONFIG_SOC_IMX28
+MXS_AMBA_DUART_DEVICE(mx28_duart, MX28);
+#endif
+
+int __init mxs_add_duart(const struct amba_device *dev)
+{
+	return mxs_add_amba_device(dev);
+}
diff --git a/arch/arm/mach-mxs/devices/platform-duart.c b/arch/arm/mach-mxs/devices/platform-duart.c
deleted file mode 100644
index 2fe0df5..0000000
--- a/arch/arm/mach-mxs/devices/platform-duart.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- */
-#include <mach/mx23.h>
-#include <mach/mx28.h>
-#include <mach/devices-common.h>
-
-#define mxs_duart_data_entry(soc)					\
-	{								\
-		.iobase = soc ## _DUART_BASE_ADDR,			\
-		.irq = soc ## _INT_DUART,				\
-	}
-
-#ifdef CONFIG_SOC_IMX23
-const struct mxs_duart_data mx23_duart_data __initconst =
-	mxs_duart_data_entry(MX23);
-#endif
-
-#ifdef CONFIG_SOC_IMX28
-const struct mxs_duart_data mx28_duart_data __initconst =
-	mxs_duart_data_entry(MX28);
-#endif
-
-struct platform_device *__init mxs_add_duart(
-		const struct mxs_duart_data *data)
-{
-	struct resource res[] = {
-		{
-			.start = data->iobase,
-			.end = data->iobase + SZ_8K - 1,
-			.flags = IORESOURCE_MEM,
-		}, {
-			.start = data->irq,
-			.end = data->irq,
-			.flags = IORESOURCE_IRQ,
-		},
-	};
-
-	return mxs_add_platform_device("mxs-duart", 0, res, ARRAY_SIZE(res),
-					NULL, 0);
-}
diff --git a/arch/arm/mach-mxs/devices/platform-fec.c b/arch/arm/mach-mxs/devices/platform-fec.c
index c08168c..c42dff7 100644
--- a/arch/arm/mach-mxs/devices/platform-fec.c
+++ b/arch/arm/mach-mxs/devices/platform-fec.c
@@ -45,6 +45,6 @@
 		},
 	};
 
-	return mxs_add_platform_device("fec", data->id,
+	return mxs_add_platform_device("imx28-fec", data->id,
 			res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
 }
diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c
index d7ad7a6..cb0c0e8 100644
--- a/arch/arm/mach-mxs/gpio.c
+++ b/arch/arm/mach-mxs/gpio.c
@@ -139,6 +139,8 @@
 	struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq);
 	u32 gpio_irq_no_base = port->virtual_irq_start;
 
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
+
 	irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) &
 			__raw_readl(port->base + PINCTRL_IRQEN(port->id));
 
diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h
index 041e276..592c9ab 100644
--- a/arch/arm/mach-mxs/include/mach/clock.h
+++ b/arch/arm/mach-mxs/include/mach/clock.h
@@ -29,8 +29,6 @@
 	int id;
 	/* Source clock this clk depends on */
 	struct clk *parent;
-	/* Secondary clock to enable/disable with this clock */
-	struct clk *secondary;
 	/* Reference count of clock enable/disable */
 	__s8 usecount;
 	/* Register bit position for clock's enable/disable control. */
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 3da48d4..6c3d1a1 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/init.h>
+#include <linux/amba/bus.h>
 
 struct platform_device *mxs_add_platform_device_dmamask(
 		const char *name, int id,
@@ -24,14 +25,10 @@
 			name, id, res, num_resources, data, size_data, 0);
 }
 
+int __init mxs_add_amba_device(const struct amba_device *dev);
+
 /* duart */
-struct mxs_duart_data {
-	resource_size_t iobase;
-	resource_size_t iosize;
-	resource_size_t irq;
-};
-struct platform_device *__init mxs_add_duart(
-		const struct mxs_duart_data *data);
+int __init mxs_add_duart(const struct amba_device *dev);
 
 /* fec */
 #include <linux/fec.h>
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index d162e95..8e2c597 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -57,6 +57,19 @@
 		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
 	MX28_PAD_ENET_CLK__CLKCTRL_ENET |
 		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	/* fec1 */
+	MX28_PAD_ENET0_CRS__ENET1_RX_EN |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_RXD2__ENET1_RXD0 |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_RXD3__ENET1_RXD1 |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_COL__ENET1_TX_EN |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_TXD2__ENET1_TXD0 |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+	MX28_PAD_ENET0_TXD3__ENET1_TXD1 |
+		(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
 	/* phy power line */
 	MX28_PAD_SSP1_DATA3__GPIO_2_15 |
 		(MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
@@ -106,8 +119,14 @@
 	gpio_set_value(MX28EVK_FEC_PHY_RESET, 1);
 }
 
-static const struct fec_platform_data mx28_fec_pdata __initconst = {
-	.phy = PHY_INTERFACE_MODE_RMII,
+static struct fec_platform_data mx28_fec_pdata[] = {
+	{
+		/* fec0 */
+		.phy = PHY_INTERFACE_MODE_RMII,
+	}, {
+		/* fec1 */
+		.phy = PHY_INTERFACE_MODE_RMII,
+	},
 };
 
 static void __init mx28evk_init(void)
@@ -117,7 +136,8 @@
 	mx28_add_duart();
 
 	mx28evk_fec_reset();
-	mx28_add_fec(0, &mx28_fec_pdata);
+	mx28_add_fec(0, &mx28_fec_pdata[0]);
+	mx28_add_fec(1, &mx28_fec_pdata[1]);
 }
 
 static void __init mx28evk_timer_init(void)
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 43da8bb..29ffa75 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -88,13 +88,13 @@
 }
 
 static int
-netx_hif_irq_type(unsigned int _irq, unsigned int type)
+netx_hif_irq_type(struct irq_data *d, unsigned int type)
 {
 	unsigned int val, irq;
 
 	val = readl(NETX_DPMAS_IF_CONF1);
 
-	irq = _irq - NETX_IRQ_HIF_CHAINED(0);
+	irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
 
 	if (type & IRQ_TYPE_EDGE_RISING) {
 		DEBUG_IRQ("rising edges\n");
@@ -119,49 +119,49 @@
 }
 
 static void
-netx_hif_ack_irq(unsigned int _irq)
+netx_hif_ack_irq(struct irq_data *d)
 {
 	unsigned int val, irq;
 
-	irq = _irq - NETX_IRQ_HIF_CHAINED(0);
+	irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
 	writel((1 << 24) << irq, NETX_DPMAS_INT_STAT);
 
 	val = readl(NETX_DPMAS_INT_EN);
 	val &= ~((1 << 24) << irq);
 	writel(val, NETX_DPMAS_INT_EN);
 
-	DEBUG_IRQ("%s: irq %d\n", __func__, _irq);
+	DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
 }
 
 static void
-netx_hif_mask_irq(unsigned int _irq)
+netx_hif_mask_irq(struct irq_data *d)
 {
 	unsigned int val, irq;
 
-	irq = _irq - NETX_IRQ_HIF_CHAINED(0);
+	irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
 	val = readl(NETX_DPMAS_INT_EN);
 	val &= ~((1 << 24) << irq);
 	writel(val, NETX_DPMAS_INT_EN);
-	DEBUG_IRQ("%s: irq %d\n", __func__, _irq);
+	DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
 }
 
 static void
-netx_hif_unmask_irq(unsigned int _irq)
+netx_hif_unmask_irq(struct irq_data *d)
 {
 	unsigned int val, irq;
 
-	irq = _irq - NETX_IRQ_HIF_CHAINED(0);
+	irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
 	val = readl(NETX_DPMAS_INT_EN);
 	val |= (1 << 24) << irq;
 	writel(val, NETX_DPMAS_INT_EN);
-	DEBUG_IRQ("%s: irq %d\n", __func__, _irq);
+	DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
 }
 
 static struct irq_chip netx_hif_chip = {
-	.ack = netx_hif_ack_irq,
-	.mask = netx_hif_mask_irq,
-	.unmask = netx_hif_unmask_irq,
-	.set_type = netx_hif_irq_type,
+	.irq_ack = netx_hif_ack_irq,
+	.irq_mask = netx_hif_mask_irq,
+	.irq_unmask = netx_hif_unmask_irq,
+	.irq_set_type = netx_hif_irq_type,
 };
 
 void __init netx_init_irq(void)
diff --git a/arch/arm/mach-ns9xxx/board-a9m9750dev.c b/arch/arm/mach-ns9xxx/board-a9m9750dev.c
index b45bb3b..0c0d524 100644
--- a/arch/arm/mach-ns9xxx/board-a9m9750dev.c
+++ b/arch/arm/mach-ns9xxx/board-a9m9750dev.c
@@ -37,44 +37,44 @@
 		     ARRAY_SIZE(board_a9m9750dev_io_desc));
 }
 
-static void a9m9750dev_fpga_ack_irq(unsigned int irq)
+static void a9m9750dev_fpga_ack_irq(struct irq_data *d)
 {
 	/* nothing */
 }
 
-static void a9m9750dev_fpga_mask_irq(unsigned int irq)
+static void a9m9750dev_fpga_mask_irq(struct irq_data *d)
 {
 	u8 ier;
 
 	ier = __raw_readb(FPGA_IER);
 
-	ier &= ~(1 << (irq - FPGA_IRQ(0)));
+	ier &= ~(1 << (d->irq - FPGA_IRQ(0)));
 
 	__raw_writeb(ier, FPGA_IER);
 }
 
-static void a9m9750dev_fpga_maskack_irq(unsigned int irq)
+static void a9m9750dev_fpga_maskack_irq(struct irq_data *d)
 {
-	a9m9750dev_fpga_mask_irq(irq);
-	a9m9750dev_fpga_ack_irq(irq);
+	a9m9750dev_fpga_mask_irq(d);
+	a9m9750dev_fpga_ack_irq(d);
 }
 
-static void a9m9750dev_fpga_unmask_irq(unsigned int irq)
+static void a9m9750dev_fpga_unmask_irq(struct irq_data *d)
 {
 	u8 ier;
 
 	ier = __raw_readb(FPGA_IER);
 
-	ier |= 1 << (irq - FPGA_IRQ(0));
+	ier |= 1 << (d->irq - FPGA_IRQ(0));
 
 	__raw_writeb(ier, FPGA_IER);
 }
 
 static struct irq_chip a9m9750dev_fpga_chip = {
-	.ack		= a9m9750dev_fpga_ack_irq,
-	.mask		= a9m9750dev_fpga_mask_irq,
-	.mask_ack	= a9m9750dev_fpga_maskack_irq,
-	.unmask		= a9m9750dev_fpga_unmask_irq,
+	.irq_ack	= a9m9750dev_fpga_ack_irq,
+	.irq_mask	= a9m9750dev_fpga_mask_irq,
+	.irq_mask_ack	= a9m9750dev_fpga_maskack_irq,
+	.irq_unmask	= a9m9750dev_fpga_unmask_irq,
 };
 
 static void a9m9750dev_fpga_demux_handler(unsigned int irq,
@@ -82,7 +82,7 @@
 {
 	u8 stat = __raw_readb(FPGA_ISR);
 
-	desc->chip->mask_ack(irq);
+	desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
 
 	while (stat != 0) {
 		int irqno = fls(stat) - 1;
@@ -92,7 +92,7 @@
 		generic_handle_irq(FPGA_IRQ(irqno));
 	}
 
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 void __init board_a9m9750dev_init_irq(void)
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c
index 038f24d..389fa5c 100644
--- a/arch/arm/mach-ns9xxx/irq.c
+++ b/arch/arm/mach-ns9xxx/irq.c
@@ -22,40 +22,40 @@
 #define irq2prio(i) (i)
 #define prio2irq(p) (p)
 
-static void ns9xxx_mask_irq(unsigned int irq)
+static void ns9xxx_mask_irq(struct irq_data *d)
 {
 	/* XXX: better use cpp symbols */
-	int prio = irq2prio(irq);
+	int prio = irq2prio(d->irq);
 	u32 ic = __raw_readl(SYS_IC(prio / 4));
 	ic &= ~(1 << (7 + 8 * (3 - (prio & 3))));
 	__raw_writel(ic, SYS_IC(prio / 4));
 }
 
-static void ns9xxx_ack_irq(unsigned int irq)
+static void ns9xxx_ack_irq(struct irq_data *d)
 {
 	__raw_writel(0, SYS_ISRADDR);
 }
 
-static void ns9xxx_maskack_irq(unsigned int irq)
+static void ns9xxx_maskack_irq(struct irq_data *d)
 {
-	ns9xxx_mask_irq(irq);
-	ns9xxx_ack_irq(irq);
+	ns9xxx_mask_irq(d);
+	ns9xxx_ack_irq(d);
 }
 
-static void ns9xxx_unmask_irq(unsigned int irq)
+static void ns9xxx_unmask_irq(struct irq_data *d)
 {
 	/* XXX: better use cpp symbols */
-	int prio = irq2prio(irq);
+	int prio = irq2prio(d->irq);
 	u32 ic = __raw_readl(SYS_IC(prio / 4));
 	ic |= 1 << (7 + 8 * (3 - (prio & 3)));
 	__raw_writel(ic, SYS_IC(prio / 4));
 }
 
 static struct irq_chip ns9xxx_chip = {
-	.ack		= ns9xxx_ack_irq,
-	.mask		= ns9xxx_mask_irq,
-	.mask_ack	= ns9xxx_maskack_irq,
-	.unmask		= ns9xxx_unmask_irq,
+	.irq_ack	= ns9xxx_ack_irq,
+	.irq_mask	= ns9xxx_mask_irq,
+	.irq_mask_ack	= ns9xxx_maskack_irq,
+	.irq_unmask	= ns9xxx_unmask_irq,
 };
 
 #if 0
@@ -92,10 +92,10 @@
 
 	if (desc->status & IRQ_DISABLED)
 out_mask:
-		desc->chip->mask(irq);
+		desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 	/* ack unconditionally to unmask lower prio irqs */
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	raw_spin_unlock(&desc->lock);
 }
diff --git a/arch/arm/mach-nuc93x/irq.c b/arch/arm/mach-nuc93x/irq.c
index a7a88ea..1f8a05a 100644
--- a/arch/arm/mach-nuc93x/irq.c
+++ b/arch/arm/mach-nuc93x/irq.c
@@ -25,9 +25,9 @@
 #include <mach/hardware.h>
 #include <mach/regs-irq.h>
 
-static void nuc93x_irq_mask(unsigned int irq)
+static void nuc93x_irq_mask(struct irq_data *d)
 {
-	__raw_writel(1 << irq, REG_AIC_MDCR);
+	__raw_writel(1 << d->irq, REG_AIC_MDCR);
 }
 
 /*
@@ -35,21 +35,21 @@
  * to REG_AIC_EOSCR for ACK
  */
 
-static void nuc93x_irq_ack(unsigned int irq)
+static void nuc93x_irq_ack(struct irq_data *d)
 {
 	__raw_writel(0x01, REG_AIC_EOSCR);
 }
 
-static void nuc93x_irq_unmask(unsigned int irq)
+static void nuc93x_irq_unmask(struct irq_data *d)
 {
-	__raw_writel(1 << irq, REG_AIC_MECR);
+	__raw_writel(1 << d->irq, REG_AIC_MECR);
 
 }
 
 static struct irq_chip nuc93x_irq_chip = {
-	.ack	   = nuc93x_irq_ack,
-	.mask	   = nuc93x_irq_mask,
-	.unmask	   = nuc93x_irq_unmask,
+	.irq_ack	= nuc93x_irq_ack,
+	.irq_mask	= nuc93x_irq_mask,
+	.irq_unmask	= nuc93x_irq_unmask,
 };
 
 void __init nuc93x_init_irq(void)
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 8d2f2da..e0a0281 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -9,6 +9,7 @@
 	depends on ARCH_OMAP1
 	bool "OMAP730 Based System"
 	select CPU_ARM926T
+	select OMAP_MPU_TIMER
 	select ARCH_OMAP_OTG
 
 config ARCH_OMAP850
@@ -22,6 +23,7 @@
 	default y
 	bool "OMAP15xx Based System"
 	select CPU_ARM925T
+	select OMAP_MPU_TIMER
 
 config ARCH_OMAP16XX
 	depends on ARCH_OMAP1
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 6ee1950..ba6009f 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,12 +3,11 @@
 #
 
 # Common support
-obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o dma.o
+obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
 obj-y += clock.o clock_data.o opp_data.o
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 
-obj-$(CONFIG_OMAP_MPU_TIMER)	+= time.o
 obj-$(CONFIG_OMAP_32K_TIMER)	+= timer32k.o
 
 # Power Management
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index 6c994e2..152b32c 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -49,7 +49,7 @@
 
 	irq_desc = irq_to_desc(IH_GPIO_BASE);
 	if (irq_desc)
-		irq_chip = irq_desc->chip;
+		irq_chip = irq_desc->irq_data.chip;
 
 	/*
 	 * For each handled GPIO interrupt, keep calling its interrupt handler
@@ -62,13 +62,15 @@
 
 		while (irq_counter[gpio] < fiq_count) {
 			if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
+				struct irq_data *d = irq_get_irq_data(irq_num);
+
 				/*
 				 * It looks like handle_edge_irq() that
 				 * OMAP GPIO edge interrupts default to,
 				 * expects interrupt already unmasked.
 				 */
-				if (irq_chip && irq_chip->unmask)
-					irq_chip->unmask(irq_num);
+				if (irq_chip && irq_chip->irq_unmask)
+					irq_chip->irq_unmask(d);
 			}
 			generic_handle_irq(irq_num);
 
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index bd0495a..22cc8c8 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -179,6 +179,22 @@
 	{ OMAP_TAG_LCD,		&ams_delta_lcd_config },
 };
 
+static struct resource ams_delta_nand_resources[] = {
+	[0] = {
+		.start	= OMAP1_MPUIO_BASE,
+		.end	= OMAP1_MPUIO_BASE +
+				OMAP_MPUIO_IO_CNTL + sizeof(u32) - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device ams_delta_nand_device = {
+	.name	= "ams-delta-nand",
+	.id	= -1,
+	.num_resources	= ARRAY_SIZE(ams_delta_nand_resources),
+	.resource	= ams_delta_nand_resources,
+};
+
 static struct resource ams_delta_kp_resources[] = {
 	[0] = {
 		.start	= INT_KEYBOARD,
@@ -265,6 +281,7 @@
 };
 
 static struct platform_device *ams_delta_devices[] __initdata = {
+	&ams_delta_nand_device,
 	&ams_delta_kp_device,
 	&ams_delta_lcd_device,
 	&ams_delta_led_device,
diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c
index 8780e75..0ace799 100644
--- a/arch/arm/mach-omap1/fpga.c
+++ b/arch/arm/mach-omap1/fpga.c
@@ -30,9 +30,9 @@
 #include <plat/fpga.h>
 #include <mach/gpio.h>
 
-static void fpga_mask_irq(unsigned int irq)
+static void fpga_mask_irq(struct irq_data *d)
 {
-	irq -= OMAP_FPGA_IRQ_BASE;
+	unsigned int irq = d->irq - OMAP_FPGA_IRQ_BASE;
 
 	if (irq < 8)
 		__raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_LO)
@@ -58,14 +58,14 @@
 }
 
 
-static void fpga_ack_irq(unsigned int irq)
+static void fpga_ack_irq(struct irq_data *d)
 {
 	/* Don't need to explicitly ACK FPGA interrupts */
 }
 
-static void fpga_unmask_irq(unsigned int irq)
+static void fpga_unmask_irq(struct irq_data *d)
 {
-	irq -= OMAP_FPGA_IRQ_BASE;
+	unsigned int irq = d->irq - OMAP_FPGA_IRQ_BASE;
 
 	if (irq < 8)
 		__raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_LO) | (1 << irq)),
@@ -78,10 +78,10 @@
 			      | (1 << (irq - 16))), INNOVATOR_FPGA_IMR2);
 }
 
-static void fpga_mask_ack_irq(unsigned int irq)
+static void fpga_mask_ack_irq(struct irq_data *d)
 {
-	fpga_mask_irq(irq);
-	fpga_ack_irq(irq);
+	fpga_mask_irq(d);
+	fpga_ack_irq(d);
 }
 
 void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc)
@@ -105,17 +105,17 @@
 
 static struct irq_chip omap_fpga_irq_ack = {
 	.name		= "FPGA-ack",
-	.ack		= fpga_mask_ack_irq,
-	.mask		= fpga_mask_irq,
-	.unmask		= fpga_unmask_irq,
+	.irq_ack	= fpga_mask_ack_irq,
+	.irq_mask	= fpga_mask_irq,
+	.irq_unmask	= fpga_unmask_irq,
 };
 
 
 static struct irq_chip omap_fpga_irq = {
 	.name		= "FPGA",
-	.ack		= fpga_ack_irq,
-	.mask		= fpga_mask_irq,
-	.unmask		= fpga_unmask_irq,
+	.irq_ack	= fpga_ack_irq,
+	.irq_mask	= fpga_mask_irq,
+	.irq_unmask	= fpga_unmask_irq,
 };
 
 /*
diff --git a/arch/arm/mach-omap1/include/mach/entry-macro.S b/arch/arm/mach-omap1/include/mach/entry-macro.S
index c9be6d4..bfb4fb1 100644
--- a/arch/arm/mach-omap1/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap1/include/mach/entry-macro.S
@@ -14,19 +14,6 @@
 #include <mach/irqs.h>
 #include <asm/hardware/gic.h>
 
-/*
- * We use __glue to avoid errors with multiple definitions of
- * .globl omap_irq_flags as it's included from entry-armv.S but not
- * from entry-common.S.
- */
-#ifdef __glue
-		.pushsection .data
-		.globl	omap_irq_flags
-omap_irq_flags:
-		.word	0
-		.popsection
-#endif
-
  		.macro	disable_fiq
 		.endm
 
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 6bddbc8..731dd33 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -57,6 +57,7 @@
 	unsigned long wake_enable;
 };
 
+u32 omap_irq_flags;
 static unsigned int irq_bank_count;
 static struct omap_irq_bank *irq_banks;
 
@@ -70,48 +71,48 @@
 	omap_writel(value, irq_banks[bank].base_reg + offset);
 }
 
-static void omap_ack_irq(unsigned int irq)
+static void omap_ack_irq(struct irq_data *d)
 {
-	if (irq > 31)
+	if (d->irq > 31)
 		omap_writel(0x1, OMAP_IH2_BASE + IRQ_CONTROL_REG_OFFSET);
 
 	omap_writel(0x1, OMAP_IH1_BASE + IRQ_CONTROL_REG_OFFSET);
 }
 
-static void omap_mask_irq(unsigned int irq)
+static void omap_mask_irq(struct irq_data *d)
 {
-	int bank = IRQ_BANK(irq);
+	int bank = IRQ_BANK(d->irq);
 	u32 l;
 
 	l = omap_readl(irq_banks[bank].base_reg + IRQ_MIR_REG_OFFSET);
-	l |= 1 << IRQ_BIT(irq);
+	l |= 1 << IRQ_BIT(d->irq);
 	omap_writel(l, irq_banks[bank].base_reg + IRQ_MIR_REG_OFFSET);
 }
 
-static void omap_unmask_irq(unsigned int irq)
+static void omap_unmask_irq(struct irq_data *d)
 {
-	int bank = IRQ_BANK(irq);
+	int bank = IRQ_BANK(d->irq);
 	u32 l;
 
 	l = omap_readl(irq_banks[bank].base_reg + IRQ_MIR_REG_OFFSET);
-	l &= ~(1 << IRQ_BIT(irq));
+	l &= ~(1 << IRQ_BIT(d->irq));
 	omap_writel(l, irq_banks[bank].base_reg + IRQ_MIR_REG_OFFSET);
 }
 
-static void omap_mask_ack_irq(unsigned int irq)
+static void omap_mask_ack_irq(struct irq_data *d)
 {
-	omap_mask_irq(irq);
-	omap_ack_irq(irq);
+	omap_mask_irq(d);
+	omap_ack_irq(d);
 }
 
-static int omap_wake_irq(unsigned int irq, unsigned int enable)
+static int omap_wake_irq(struct irq_data *d, unsigned int enable)
 {
-	int bank = IRQ_BANK(irq);
+	int bank = IRQ_BANK(d->irq);
 
 	if (enable)
-		irq_banks[bank].wake_enable |= IRQ_BIT(irq);
+		irq_banks[bank].wake_enable |= IRQ_BIT(d->irq);
 	else
-		irq_banks[bank].wake_enable &= ~IRQ_BIT(irq);
+		irq_banks[bank].wake_enable &= ~IRQ_BIT(d->irq);
 
 	return 0;
 }
@@ -168,15 +169,14 @@
 
 static struct irq_chip omap_irq_chip = {
 	.name		= "MPU",
-	.ack		= omap_mask_ack_irq,
-	.mask		= omap_mask_irq,
-	.unmask		= omap_unmask_irq,
-	.set_wake	= omap_wake_irq,
+	.irq_ack	= omap_mask_ack_irq,
+	.irq_mask	= omap_mask_irq,
+	.irq_unmask	= omap_unmask_irq,
+	.irq_set_wake	= omap_wake_irq,
 };
 
 void __init omap_init_irq(void)
 {
-	extern unsigned int omap_irq_flags;
 	int i, j;
 
 #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
@@ -239,9 +239,9 @@
 	/* Unmask level 2 handler */
 
 	if (cpu_is_omap7xx())
-		omap_unmask_irq(INT_7XX_IH2_IRQ);
+		omap_unmask_irq(irq_get_irq_data(INT_7XX_IH2_IRQ));
 	else if (cpu_is_omap15xx())
-		omap_unmask_irq(INT_1510_IH2_IRQ);
+		omap_unmask_irq(irq_get_irq_data(INT_1510_IH2_IRQ));
 	else if (cpu_is_omap16xx())
-		omap_unmask_irq(INT_1610_IH2_IRQ);
+		omap_unmask_irq(irq_get_irq_data(INT_1610_IH2_IRQ));
 }
diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c
index c9088d8..4538093 100644
--- a/arch/arm/mach-omap1/lcd_dma.c
+++ b/arch/arm/mach-omap1/lcd_dma.c
@@ -37,7 +37,7 @@
 	 * On OMAP1510, internal LCD controller will start the transfer
 	 * when it gets enabled, so assume DMA running if LCD enabled.
 	 */
-	if (cpu_is_omap1510())
+	if (cpu_is_omap15xx())
 		if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN)
 			return 1;
 
@@ -95,7 +95,7 @@
 
 void omap_set_lcd_dma_b1_rotation(int rotate)
 {
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
 		BUG();
 		return;
@@ -106,7 +106,7 @@
 
 void omap_set_lcd_dma_b1_mirror(int mirror)
 {
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
 		BUG();
 	}
@@ -116,7 +116,7 @@
 
 void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
 {
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		printk(KERN_ERR "DMA virtual resulotion is not supported "
 				"in 1510 mode\n");
 		BUG();
@@ -127,7 +127,7 @@
 
 void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
 {
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
 		BUG();
 	}
@@ -177,7 +177,7 @@
 			bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
 			/* 1510 DMA requires the bottom address to be 2 more
 			 * than the actual last memory access location. */
-			if (cpu_is_omap1510() &&
+			if (cpu_is_omap15xx() &&
 				lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
 					bottom += 2;
 			ei = PIXSTEP(0, 0, 1, 0);
@@ -241,7 +241,7 @@
 		return;	/* Suppress warning about uninitialized vars */
 	}
 
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
 		omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
 		omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
@@ -343,7 +343,7 @@
 		BUG();
 		return;
 	}
-	if (!cpu_is_omap1510())
+	if (!cpu_is_omap15xx())
 		omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
 			    OMAP1610_DMA_LCD_CCR);
 	lcd_dma.reserved = 0;
@@ -360,7 +360,7 @@
 	 * connected. Otherwise the OMAP internal controller will
 	 * start the transfer when it gets enabled.
 	 */
-	if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+	if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
 		return;
 
 	w = omap_readw(OMAP1610_DMA_LCD_CTRL);
@@ -378,14 +378,14 @@
 void omap_setup_lcd_dma(void)
 {
 	BUG_ON(lcd_dma.active);
-	if (!cpu_is_omap1510()) {
+	if (!cpu_is_omap15xx()) {
 		/* Set some reasonable defaults */
 		omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
 		omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
 		omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
 	}
 	set_b1_regs();
-	if (!cpu_is_omap1510()) {
+	if (!cpu_is_omap15xx()) {
 		u16 w;
 
 		w = omap_readw(OMAP1610_DMA_LCD_CCR);
@@ -407,7 +407,7 @@
 	u16 w;
 
 	lcd_dma.active = 0;
-	if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+	if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
 		return;
 
 	w = omap_readw(OMAP1610_DMA_LCD_CCR);
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 0cca23a..98ba978 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -647,7 +647,7 @@
 
 
 
-static struct platform_suspend_ops omap_pm_ops ={
+static const struct platform_suspend_ops omap_pm_ops = {
 	.prepare	= omap_pm_prepare,
 	.enter		= omap_pm_enter,
 	.finish		= omap_pm_finish,
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index ed7a61f..6885d2f 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -49,11 +49,15 @@
 #include <mach/hardware.h>
 #include <asm/leds.h>
 #include <asm/irq.h>
+#include <asm/sched_clock.h>
+
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 
 #include <plat/common.h>
 
+#ifdef CONFIG_OMAP_MPU_TIMER
+
 #define OMAP_MPU_TIMER_BASE		OMAP_MPU_TIMER1_BASE
 #define OMAP_MPU_TIMER_OFFSET		0x100
 
@@ -67,7 +71,7 @@
 ((volatile omap_mpu_timer_regs_t*)OMAP1_IO_ADDRESS(OMAP_MPU_TIMER_BASE +	\
 				 (n)*OMAP_MPU_TIMER_OFFSET))
 
-static inline unsigned long omap_mpu_timer_read(int nr)
+static inline unsigned long notrace omap_mpu_timer_read(int nr)
 {
 	volatile omap_mpu_timer_regs_t* timer = omap_mpu_timer_base(nr);
 	return timer->read_tim;
@@ -212,6 +216,32 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static DEFINE_CLOCK_DATA(cd);
+
+static inline unsigned long long notrace _omap_mpu_sched_clock(void)
+{
+	u32 cyc = mpu_read(&clocksource_mpu);
+	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+#ifndef CONFIG_OMAP_32K_TIMER
+unsigned long long notrace sched_clock(void)
+{
+	return _omap_mpu_sched_clock();
+}
+#else
+static unsigned long long notrace omap_mpu_sched_clock(void)
+{
+	return _omap_mpu_sched_clock();
+}
+#endif
+
+static void notrace mpu_update_sched_clock(void)
+{
+	u32 cyc = mpu_read(&clocksource_mpu);
+	update_sched_clock(&cd, cyc, (u32)~0);
+}
+
 static void __init omap_init_clocksource(unsigned long rate)
 {
 	static char err[] __initdata = KERN_ERR
@@ -219,17 +249,13 @@
 
 	setup_irq(INT_TIMER2, &omap_mpu_timer2_irq);
 	omap_mpu_timer_start(1, ~0, 1);
+	init_sched_clock(&cd, mpu_update_sched_clock, 32, rate);
 
 	if (clocksource_register_hz(&clocksource_mpu, rate))
 		printk(err, clocksource_mpu.name);
 }
 
-/*
- * ---------------------------------------------------------------------------
- * Timer initialization
- * ---------------------------------------------------------------------------
- */
-static void __init omap_timer_init(void)
+static void __init omap_mpu_timer_init(void)
 {
 	struct clk	*ck_ref = clk_get(NULL, "ck_ref");
 	unsigned long	rate;
@@ -246,6 +272,66 @@
 	omap_init_clocksource(rate);
 }
 
+#else
+static inline void omap_mpu_timer_init(void)
+{
+	pr_err("Bogus timer, should not happen\n");
+}
+#endif	/* CONFIG_OMAP_MPU_TIMER */
+
+#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER)
+static unsigned long long (*preferred_sched_clock)(void);
+
+unsigned long long notrace sched_clock(void)
+{
+	if (!preferred_sched_clock)
+		return 0;
+
+	return preferred_sched_clock();
+}
+
+static inline void preferred_sched_clock_init(bool use_32k_sched_clock)
+{
+	if (use_32k_sched_clock)
+		preferred_sched_clock = omap_32k_sched_clock;
+	else
+		preferred_sched_clock = omap_mpu_sched_clock;
+}
+#else
+static inline void preferred_sched_clock_init(bool use_32k_sched_clcok)
+{
+}
+#endif
+
+static inline int omap_32k_timer_usable(void)
+{
+	int res = false;
+
+	if (cpu_is_omap730() || cpu_is_omap15xx())
+		return res;
+
+#ifdef CONFIG_OMAP_32K_TIMER
+	res = omap_32k_timer_init();
+#endif
+
+	return res;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * Timer initialization
+ * ---------------------------------------------------------------------------
+ */
+static void __init omap_timer_init(void)
+{
+	if (omap_32k_timer_usable()) {
+		preferred_sched_clock_init(1);
+	} else {
+		omap_mpu_timer_init();
+		preferred_sched_clock_init(0);
+	}
+}
+
 struct sys_timer omap_timer = {
 	.init		= omap_timer_init,
 };
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 20cfbcc..13d7b8f 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -52,10 +52,9 @@
 #include <asm/irq.h>
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
+#include <plat/common.h>
 #include <plat/dmtimer.h>
 
-struct sys_timer omap_timer;
-
 /*
  * ---------------------------------------------------------------------------
  * 32KHz OS timer
@@ -181,14 +180,14 @@
  * Timer initialization
  * ---------------------------------------------------------------------------
  */
-static void __init omap_timer_init(void)
+bool __init omap_32k_timer_init(void)
 {
+	omap_init_clocksource_32k();
+
 #ifdef CONFIG_OMAP_DM_TIMER
 	omap_dm_timer_init();
 #endif
 	omap_init_32k_timer();
-}
 
-struct sys_timer omap_timer = {
-	.init		= omap_timer_init,
-};
+	return true;
+}
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index cd7332f..1c0c2b0 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -187,16 +187,19 @@
 					   hsmmc.o
 obj-$(CONFIG_MACH_OMAP_ZOOM2)		+= board-zoom.o \
 					   board-zoom-peripherals.o \
+					   board-zoom-display.o \
 					   board-flash.o \
 					   hsmmc.o \
 					   board-zoom-debugboard.o
 obj-$(CONFIG_MACH_OMAP_ZOOM3)		+= board-zoom.o \
 					   board-zoom-peripherals.o \
+					   board-zoom-display.o \
 					   board-flash.o \
 					   hsmmc.o \
 					   board-zoom-debugboard.o
 obj-$(CONFIG_MACH_OMAP_3630SDP)		+= board-3630sdp.o \
 					   board-zoom-peripherals.o \
+					   board-zoom-display.o \
 					   board-flash.o \
 					   hsmmc.o
 obj-$(CONFIG_MACH_CM_T35)		+= board-cm-t35.o \
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 3b39ef1..d4e41ef 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -38,6 +38,7 @@
 #include <plat/dma.h>
 #include <plat/gpmc.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include <plat/gpmc-smc91x.h>
 
@@ -270,15 +271,20 @@
 	.platform_disable	= sdp3430_panel_disable_lcd,
 };
 
-static struct omap_dss_device sdp3430_dvi_device = {
-	.name			= "dvi",
-	.driver_name		= "generic_panel",
-	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 24,
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable	= sdp3430_panel_enable_dvi,
 	.platform_disable	= sdp3430_panel_disable_dvi,
 };
 
+static struct omap_dss_device sdp3430_dvi_device = {
+	.name			= "dvi",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines	= 24,
+};
+
 static struct omap_dss_device sdp3430_tv_device = {
 	.name			= "tv",
 	.driver_name		= "venc",
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 5d41dbe..6264564 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -207,6 +207,7 @@
 {
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
 	zoom_peripherals_init();
+	zoom_display_init();
 	board_smc91x_init();
 	board_flash_init(sdp_flash_partitions, chip_sel_sdp);
 	enable_board_wakeup_source();
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index a70bdf2..07d1b20 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -554,6 +554,7 @@
 
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
+	OMAP4_MUX(USBB2_ULPITLL_CLK, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #else
@@ -576,11 +577,12 @@
 	omap4_twl6030_hsmmc_init(mmc);
 
 	/* Power on the ULPI PHY */
-	if (gpio_is_valid(OMAP4SDP_MDM_PWR_EN_GPIO)) {
-		/* FIXME: Assumes pad is already muxed for GPIO mode */
-		gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3");
+	status = gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3");
+	if (status)
+		pr_err("%s: Could not get USBB1 PHY GPIO\n", __func__);
+	else
 		gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1);
-	}
+
 	usb_ehci_init(&ehci_pdata);
 	usb_musb_init(&musb_board_data);
 
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index bc15626..10d60b7 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -35,6 +35,7 @@
 #include <plat/common.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include "mux.h"
 #include "control.h"
@@ -303,13 +304,18 @@
 	lcd_enabled = 0;
 }
 
+static struct panel_generic_dpi_data lcd_panel = {
+	.name			= "sharp_lq",
+	.platform_enable	= am3517_evm_panel_enable_lcd,
+	.platform_disable	= am3517_evm_panel_disable_lcd,
+};
+
 static struct omap_dss_device am3517_evm_lcd_device = {
 	.type			= OMAP_DISPLAY_TYPE_DPI,
 	.name			= "lcd",
-	.driver_name		= "sharp_lq_panel",
+	.driver_name		= "generic_dpi_panel",
+	.data			= &lcd_panel,
 	.phy.dpi.data_lines 	= 16,
-	.platform_enable	= am3517_evm_panel_enable_lcd,
-	.platform_disable	= am3517_evm_panel_disable_lcd,
 };
 
 static int am3517_evm_panel_enable_tv(struct omap_dss_device *dssdev)
@@ -346,13 +352,18 @@
 	dvi_enabled = 0;
 }
 
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
+	.platform_enable	= am3517_evm_panel_enable_dvi,
+	.platform_disable	= am3517_evm_panel_disable_dvi,
+};
+
 static struct omap_dss_device am3517_evm_dvi_device = {
 	.type			= OMAP_DISPLAY_TYPE_DPI,
 	.name			= "dvi",
-	.driver_name		= "generic_panel",
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
 	.phy.dpi.data_lines	= 24,
-	.platform_enable	= am3517_evm_panel_enable_dvi,
-	.platform_disable	= am3517_evm_panel_disable_dvi,
 };
 
 static struct omap_dss_device *am3517_evm_dss_devices[] = {
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 486a3de..dac1416 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -46,6 +46,7 @@
 #include <plat/gpmc.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 #include <plat/mcspi.h>
 
 #include <mach/hardware.h>
@@ -351,24 +352,34 @@
 {
 }
 
-static struct omap_dss_device cm_t35_lcd_device = {
-	.name			= "lcd",
-	.driver_name		= "toppoly_tdo35s_panel",
-	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 18,
+static struct panel_generic_dpi_data lcd_panel = {
+	.name			= "toppoly_tdo35s",
 	.platform_enable	= cm_t35_panel_enable_lcd,
 	.platform_disable	= cm_t35_panel_disable_lcd,
 };
 
-static struct omap_dss_device cm_t35_dvi_device = {
-	.name			= "dvi",
-	.driver_name		= "generic_panel",
+static struct omap_dss_device cm_t35_lcd_device = {
+	.name			= "lcd",
 	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 24,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &lcd_panel,
+	.phy.dpi.data_lines	= 18,
+};
+
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable	= cm_t35_panel_enable_dvi,
 	.platform_disable	= cm_t35_panel_disable_dvi,
 };
 
+static struct omap_dss_device cm_t35_dvi_device = {
+	.name			= "dvi",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines	= 24,
+};
+
 static struct omap_dss_device cm_t35_tv_device = {
 	.name			= "tv",
 	.driver_name		= "venc",
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 5b0c777..8f9a64d 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -124,8 +124,9 @@
 #if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
 #define RTC_IO_GPIO		(153)
 #define RTC_WR_GPIO		(154)
-#define RTC_RD_GPIO		(160)
+#define RTC_RD_GPIO		(53)
 #define RTC_CS_GPIO		(163)
+#define RTC_CS_EN_GPIO		(160)
 
 struct v3020_platform_data cm_t3517_v3020_pdata = {
 	.use_gpio	= 1,
@@ -145,6 +146,16 @@
 
 static void __init cm_t3517_init_rtc(void)
 {
+	int err;
+
+	err = gpio_request(RTC_CS_EN_GPIO, "rtc cs en");
+	if (err) {
+		pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err);
+		return;
+	}
+
+	gpio_direction_output(RTC_CS_EN_GPIO, 1);
+
 	platform_device_register(&cm_t3517_rtc_device);
 }
 #else
@@ -214,12 +225,12 @@
 	},
 	{
 		.name           = "linux",
-		.offset         = MTDPART_OFS_APPEND,	/* Offset = 0x280000 */
+		.offset         = MTDPART_OFS_APPEND,	/* Offset = 0x2A0000 */
 		.size           = 32 * NAND_BLOCK_SIZE,
 	},
 	{
 		.name           = "rootfs",
-		.offset         = MTDPART_OFS_APPEND,	/* Offset = 0x680000 */
+		.offset         = MTDPART_OFS_APPEND,	/* Offset = 0x6A0000 */
 		.size           = MTDPART_SIZ_FULL,
 	},
 };
@@ -256,11 +267,19 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	/* GPIO186 - Green LED */
 	OMAP3_MUX(SYS_CLKOUT2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-	/* RTC GPIOs: IO, WR#, RD#, CS# */
+
+	/* RTC GPIOs: */
+	/* IO - GPIO153 */
 	OMAP3_MUX(MCBSP4_DR, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+	/* WR# - GPIO154 */
 	OMAP3_MUX(MCBSP4_DX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
-	OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+	/* RD# - GPIO53 */
+	OMAP3_MUX(GPMC_NCS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+	/* CS# - GPIO163 */
 	OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+	/* CS EN - GPIO160 */
+	OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
 	/* HSUSB1 RESET */
 	OMAP3_MUX(UART2_TX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
 	/* HSUSB2 RESET */
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 451e7ff..9a2a31e 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -46,6 +46,7 @@
 #include <plat/nand.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include <plat/mcspi.h>
 #include <linux/input/matrix_keypad.h>
@@ -114,9 +115,6 @@
 
 static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev)
 {
-	twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1);
-	twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0);
-
 	if (gpio_is_valid(dssdev->reset_gpio))
 		gpio_set_value_cansleep(dssdev->reset_gpio, 1);
 	return 0;
@@ -149,25 +147,34 @@
 static struct regulator_consumer_supply devkit8000_vio_supply =
 	REGULATOR_SUPPLY("vcc", "spi2.0");
 
-static struct omap_dss_device devkit8000_lcd_device = {
-	.name                   = "lcd",
-	.driver_name            = "generic_panel",
-	.type                   = OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines     = 24,
-	.reset_gpio             = -EINVAL, /* will be replaced */
+static struct panel_generic_dpi_data lcd_panel = {
+	.name			= "generic",
 	.platform_enable        = devkit8000_panel_enable_lcd,
 	.platform_disable       = devkit8000_panel_disable_lcd,
 };
-static struct omap_dss_device devkit8000_dvi_device = {
-	.name                   = "dvi",
-	.driver_name            = "generic_panel",
+
+static struct omap_dss_device devkit8000_lcd_device = {
+	.name                   = "lcd",
 	.type                   = OMAP_DISPLAY_TYPE_DPI,
+	.driver_name            = "generic_dpi_panel",
+	.data			= &lcd_panel,
 	.phy.dpi.data_lines     = 24,
-	.reset_gpio             = -EINVAL, /* will be replaced */
+};
+
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable        = devkit8000_panel_enable_dvi,
 	.platform_disable       = devkit8000_panel_disable_dvi,
 };
 
+static struct omap_dss_device devkit8000_dvi_device = {
+	.name                   = "dvi",
+	.type                   = OMAP_DISPLAY_TYPE_DPI,
+	.driver_name            = "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines     = 24,
+};
+
 static struct omap_dss_device devkit8000_tv_device = {
 	.name                   = "tv",
 	.driver_name            = "venc",
@@ -237,6 +244,8 @@
 static int devkit8000_twl_gpio_setup(struct device *dev,
 		unsigned gpio, unsigned ngpio)
 {
+	int ret;
+
 	omap_mux_init_gpio(29, OMAP_PIN_INPUT);
 	/* gpio + 0 is "mmc0_cd" (input/IRQ) */
 	mmc[0].gpio_cd = gpio + 0;
@@ -245,17 +254,23 @@
 	/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
 	gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
 
-        /* gpio + 1 is "LCD_PWREN" (out, active high) */
-	devkit8000_lcd_device.reset_gpio = gpio + 1;
-	gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN");
-	/* Disable until needed */
-	gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0);
+	/* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
+	devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0;
+	ret = gpio_request_one(devkit8000_lcd_device.reset_gpio,
+			GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN");
+	if (ret < 0) {
+		devkit8000_lcd_device.reset_gpio = -EINVAL;
+		printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n");
+	}
 
 	/* gpio + 7 is "DVI_PD" (out, active low) */
 	devkit8000_dvi_device.reset_gpio = gpio + 7;
-	gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown");
-	/* Disable until needed */
-	gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0);
+	ret = gpio_request_one(devkit8000_dvi_device.reset_gpio,
+			GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown");
+	if (ret < 0) {
+		devkit8000_dvi_device.reset_gpio = -EINVAL;
+		printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n");
+	}
 
 	return 0;
 }
@@ -265,8 +280,7 @@
 	.irq_base	= TWL4030_GPIO_IRQ_BASE,
 	.irq_end	= TWL4030_GPIO_IRQ_END,
 	.use_leds	= true,
-	.pullups	= BIT(1),
-	.pulldowns	= BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13)
+	.pulldowns	= BIT(1) | BIT(2) | BIT(6) | BIT(8) | BIT(13)
 				| BIT(15) | BIT(16) | BIT(17),
 	.setup		= devkit8000_twl_gpio_setup,
 };
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 0afa301..3be85a1 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -17,6 +17,7 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
+#include <linux/input.h>
 
 #include <linux/regulator/machine.h>
 #include <linux/regulator/fixed.h>
@@ -31,6 +32,7 @@
 #include <plat/gpmc.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 #include <plat/onenand.h>
 
 #include "mux.h"
@@ -459,13 +461,18 @@
 	gpio_direction_output(IGEP2_GPIO_DVI_PUP, 0);
 }
 
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
+	.platform_enable	= igep2_enable_dvi,
+	.platform_disable	= igep2_disable_dvi,
+};
+
 static struct omap_dss_device igep2_dvi_device = {
 	.type			= OMAP_DISPLAY_TYPE_DPI,
 	.name			= "dvi",
-	.driver_name		= "generic_panel",
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
 	.phy.dpi.data_lines	= 24,
-	.platform_enable	= igep2_enable_dvi,
-	.platform_disable	= igep2_disable_dvi,
 };
 
 static struct omap_dss_device *igep2_dss_devices[] = {
@@ -535,6 +542,37 @@
 	.audio = &igep2_audio_data,
 };
 
+static int igep2_keymap[] = {
+	KEY(0, 0, KEY_LEFT),
+	KEY(0, 1, KEY_RIGHT),
+	KEY(0, 2, KEY_A),
+	KEY(0, 3, KEY_B),
+	KEY(1, 0, KEY_DOWN),
+	KEY(1, 1, KEY_UP),
+	KEY(1, 2, KEY_E),
+	KEY(1, 3, KEY_F),
+	KEY(2, 0, KEY_ENTER),
+	KEY(2, 1, KEY_I),
+	KEY(2, 2, KEY_J),
+	KEY(2, 3, KEY_K),
+	KEY(3, 0, KEY_M),
+	KEY(3, 1, KEY_N),
+	KEY(3, 2, KEY_O),
+	KEY(3, 3, KEY_P)
+};
+
+static struct matrix_keymap_data igep2_keymap_data = {
+	.keymap			= igep2_keymap,
+	.keymap_size		= ARRAY_SIZE(igep2_keymap),
+};
+
+static struct twl4030_keypad_data igep2_keypad_pdata = {
+	.keymap_data	= &igep2_keymap_data,
+	.rows		= 4,
+	.cols		= 4,
+	.rep		= 1,
+};
+
 static struct twl4030_platform_data igep2_twldata = {
 	.irq_base	= TWL4030_IRQ_BASE,
 	.irq_end	= TWL4030_IRQ_END,
@@ -543,6 +581,7 @@
 	.usb		= &igep2_usb_data,
 	.codec		= &igep2_codec_data,
 	.gpio		= &igep2_twl4030_gpio_pdata,
+	.keypad		= &igep2_keypad_pdata,
 	.vmmc1          = &igep2_vmmc1,
 	.vpll2		= &igep2_vpll2,
 	.vio		= &igep2_vio,
diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c
index bcccd68..4dc62a9 100644
--- a/arch/arm/mach-omap2/board-igep0030.c
+++ b/arch/arm/mach-omap2/board-igep0030.c
@@ -19,6 +19,7 @@
 #include <linux/interrupt.h>
 
 #include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 #include <linux/i2c/twl.h>
 #include <linux/mmc/host.h>
 
@@ -43,7 +44,7 @@
 #define IGEP3_GPIO_WIFI_NRESET	139
 #define IGEP3_GPIO_BT_NRESET	137
 
-#define IGEP3_GPIO_USBH_NRESET  115
+#define IGEP3_GPIO_USBH_NRESET  183
 
 
 #if defined(CONFIG_MTD_ONENAND_OMAP2) || \
@@ -103,7 +104,7 @@
 	},
 };
 
-void __init igep3_flash_init(void)
+static void __init igep3_flash_init(void)
 {
 	u8 cs = 0;
 	u8 onenandcs = GPMC_CS_NUM + 1;
@@ -137,12 +138,11 @@
 }
 
 #else
-void __init igep3_flash_init(void) {}
+static void __init igep3_flash_init(void) {}
 #endif
 
-static struct regulator_consumer_supply igep3_vmmc1_supply = {
-	.supply		= "vmmc",
-};
+static struct regulator_consumer_supply igep3_vmmc1_supply =
+	REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
 
 /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
 static struct regulator_init_data igep3_vmmc1 = {
@@ -159,6 +159,52 @@
 	.consumer_supplies      = &igep3_vmmc1_supply,
 };
 
+static struct regulator_consumer_supply igep3_vio_supply =
+	REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.1");
+
+static struct regulator_init_data igep3_vio = {
+	.constraints = {
+		.min_uV			= 1800000,
+		.max_uV			= 1800000,
+		.apply_uV		= 1,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &igep3_vio_supply,
+};
+
+static struct regulator_consumer_supply igep3_vmmc2_supply =
+	REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+
+static struct regulator_init_data igep3_vmmc2 = {
+	.constraints	= {
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
+		.always_on		= 1,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &igep3_vmmc2_supply,
+};
+
+static struct fixed_voltage_config igep3_vwlan = {
+	.supply_name		= "vwlan",
+	.microvolts		= 3300000,
+	.gpio			= -EINVAL,
+	.enabled_at_boot	= 1,
+	.init_data		= &igep3_vmmc2,
+};
+
+static struct platform_device igep3_vwlan_device = {
+	.name	= "reg-fixed-voltage",
+	.id	= 0,
+	.dev	= {
+		.platform_data = &igep3_vwlan,
+	},
+};
+
 static struct omap2_hsmmc_info mmc[] = {
 	[0] = {
 		.mmc		= 1,
@@ -254,12 +300,6 @@
 	mmc[0].gpio_cd = gpio + 0;
 	omap2_hsmmc_init(mmc);
 
-	/*
-	 * link regulators to MMC adapters ... we "know" the
-	 * regulators will be set up only *after* we return.
-	 */
-	igep3_vmmc1_supply.dev = mmc[0].dev;
-
 	/* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
 #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
 	if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0)
@@ -287,6 +327,10 @@
 	.usb_mode	= T2_USB_MODE_ULPI,
 };
 
+static struct platform_device *igep3_devices[] __initdata = {
+	&igep3_vwlan_device,
+};
+
 static void __init igep3_init_irq(void)
 {
 	omap2_init_common_infrastructure();
@@ -303,6 +347,7 @@
 	.usb		= &igep3_twl4030_usb_data,
 	.gpio		= &igep3_twl4030_gpio_pdata,
 	.vmmc1		= &igep3_vmmc1,
+	.vio		= &igep3_vio,
 };
 
 static struct i2c_board_info __initdata igep3_i2c_boardinfo[] = {
@@ -363,8 +408,20 @@
 void __init igep3_wifi_bt_init(void) {}
 #endif
 
+static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+	.port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+	.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+
+	.phy_reset = true,
+	.reset_gpio_port[0] = -EINVAL,
+	.reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET,
+	.reset_gpio_port[2] = -EINVAL,
+};
+
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
+	OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #endif
@@ -375,9 +432,10 @@
 
 	/* Register I2C busses and drivers */
 	igep3_i2c_init();
-
+	platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices));
 	omap_serial_init();
 	usb_musb_init(&musb_board_data);
+	usb_ehci_init(&ehci_pdata);
 
 	igep3_flash_init();
 	igep3_leds_init();
@@ -392,6 +450,7 @@
 
 MACHINE_START(IGEP0030, "IGEP OMAP3 module")
 	.boot_params	= 0x80000100,
+	.reserve	= omap_reserve,
 	.map_io		= omap3_map_io,
 	.init_irq	= igep3_init_irq,
 	.init_machine	= igep3_init,
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 6c12760..46d814a 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -41,6 +41,7 @@
 #include <plat/board.h>
 #include <plat/common.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 #include <plat/gpmc.h>
 #include <plat/nand.h>
 #include <plat/usb.h>
@@ -194,14 +195,19 @@
 		gpio_set_value(dssdev->reset_gpio, 0);
 }
 
+static struct panel_generic_dpi_data dvi_panel = {
+	.name = "generic",
+	.platform_enable = beagle_enable_dvi,
+	.platform_disable = beagle_disable_dvi,
+};
+
 static struct omap_dss_device beagle_dvi_device = {
 	.type = OMAP_DISPLAY_TYPE_DPI,
 	.name = "dvi",
-	.driver_name = "generic_panel",
+	.driver_name = "generic_dpi_panel",
+	.data = &dvi_panel,
 	.phy.dpi.data_lines = 24,
-	.reset_gpio = 170,
-	.platform_enable = beagle_enable_dvi,
-	.platform_disable = beagle_disable_dvi,
+	.reset_gpio = -EINVAL,
 };
 
 static struct omap_dss_device beagle_tv_device = {
@@ -273,6 +279,8 @@
 static int beagle_twl_gpio_setup(struct device *dev,
 		unsigned gpio, unsigned ngpio)
 {
+	int r;
+
 	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
 		mmc[0].gpio_wp = -EINVAL;
 	} else if ((omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_C1_3) ||
@@ -293,17 +301,63 @@
 	/* REVISIT: need ehci-omap hooks for external VBUS
 	 * power switch and overcurrent detect
 	 */
+	if (omap3_beagle_get_rev() != OMAP3BEAGLE_BOARD_XM) {
+		r = gpio_request(gpio + 1, "EHCI_nOC");
+		if (!r) {
+			r = gpio_direction_input(gpio + 1);
+			if (r)
+				gpio_free(gpio + 1);
+		}
+		if (r)
+			pr_err("%s: unable to configure EHCI_nOC\n", __func__);
+	}
 
-	gpio_request(gpio + 1, "EHCI_nOC");
-	gpio_direction_input(gpio + 1);
-
-	/* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
+	/*
+	 * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active
+	 * high / others active low)
+	 */
 	gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
-	gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
+		gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1);
+	else
+		gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+
+	/* DVI reset GPIO is different between beagle revisions */
+	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
+		beagle_dvi_device.reset_gpio = 129;
+	else
+		beagle_dvi_device.reset_gpio = 170;
 
 	/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
 	gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
 
+	/*
+	 * gpio + 1 on Xm controls the TFP410's enable line (active low)
+	 * gpio + 2 control varies depending on the board rev as follows:
+	 * P7/P8 revisions(prototype): Camera EN
+	 * A2+ revisions (production): LDO (supplies DVI, serial, led blocks)
+	 */
+	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
+		r = gpio_request(gpio + 1, "nDVI_PWR_EN");
+		if (!r) {
+			r = gpio_direction_output(gpio + 1, 0);
+			if (r)
+				gpio_free(gpio + 1);
+		}
+		if (r)
+			pr_err("%s: unable to configure nDVI_PWR_EN\n",
+				__func__);
+		r = gpio_request(gpio + 2, "DVI_LDO_EN");
+		if (!r) {
+			r = gpio_direction_output(gpio + 2, 1);
+			if (r)
+				gpio_free(gpio + 2);
+		}
+		if (r)
+			pr_err("%s: unable to configure DVI_LDO_EN\n",
+				__func__);
+	}
+
 	return 0;
 }
 
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 3de8d9b..323c380 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -43,6 +43,7 @@
 #include <plat/common.h>
 #include <plat/mcspi.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include "mux.h"
 #include "sdram-micron-mt46h32m32lf-6.h"
@@ -301,15 +302,20 @@
 	dvi_enabled = 0;
 }
 
-static struct omap_dss_device omap3_evm_dvi_device = {
-	.name			= "dvi",
-	.driver_name		= "generic_panel",
-	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 24,
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable	= omap3_evm_enable_dvi,
 	.platform_disable	= omap3_evm_disable_dvi,
 };
 
+static struct omap_dss_device omap3_evm_dvi_device = {
+	.name			= "dvi",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines	= 24,
+};
+
 static struct omap_dss_device *omap3_evm_dss_devices[] = {
 	&omap3_evm_lcd_device,
 	&omap3_evm_tv_device,
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 9df9d93..2a2dad4 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -40,6 +40,7 @@
 #include <plat/nand.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include <plat/mcspi.h>
 #include <linux/input/matrix_keypad.h>
@@ -160,15 +161,20 @@
 	lcd_enabled = 0;
 }
 
-static struct omap_dss_device omap3_stalker_lcd_device = {
-	.name			= "lcd",
-	.driver_name		= "generic_panel",
-	.phy.dpi.data_lines	= 24,
-	.type			= OMAP_DISPLAY_TYPE_DPI,
+static struct panel_generic_dpi_data lcd_panel = {
+	.name			= "generic",
 	.platform_enable	= omap3_stalker_enable_lcd,
 	.platform_disable	= omap3_stalker_disable_lcd,
 };
 
+static struct omap_dss_device omap3_stalker_lcd_device = {
+	.name			= "lcd",
+	.driver_name		= "generic_dpi_panel",
+	.data			= &lcd_panel,
+	.phy.dpi.data_lines	= 24,
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+};
+
 static int omap3_stalker_enable_tv(struct omap_dss_device *dssdev)
 {
 	return 0;
@@ -208,15 +214,20 @@
 	dvi_enabled = 0;
 }
 
-static struct omap_dss_device omap3_stalker_dvi_device = {
-	.name			= "dvi",
-	.driver_name		= "generic_panel",
-	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 24,
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable	= omap3_stalker_enable_dvi,
 	.platform_disable	= omap3_stalker_disable_dvi,
 };
 
+static struct omap_dss_device omap3_stalker_dvi_device = {
+	.name			= "dvi",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines	= 24,
+};
+
 static struct omap_dss_device *omap3_stalker_dss_devices[] = {
 	&omap3_stalker_lcd_device,
 	&omap3_stalker_tv_device,
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 3094e20..e944025 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
+#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/leds.h>
 #include <linux/gpio.h>
@@ -95,7 +96,16 @@
 static void __init omap4_ehci_init(void)
 {
 	int ret;
+	struct clk *phy_ref_clk;
 
+	/* FREF_CLK3 provides the 19.2 MHz reference clock to the PHY */
+	phy_ref_clk = clk_get(NULL, "auxclk3_ck");
+	if (IS_ERR(phy_ref_clk)) {
+		pr_err("Cannot request auxclk3\n");
+		goto error1;
+	}
+	clk_set_rate(phy_ref_clk, 19200000);
+	clk_enable(phy_ref_clk);
 
 	/* disable the power to the usb hub prior to init */
 	ret = gpio_request(GPIO_HUB_POWER, "hub_power");
@@ -399,8 +409,6 @@
 	platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
 	omap_serial_init();
 	omap4_twl6030_hsmmc_init(mmc);
-	/* OMAP4 Panda uses internal transceiver so register nop transceiver */
-	usb_nop_xceiv_register();
 	omap4_ehci_init();
 	usb_musb_init(&musb_board_data);
 }
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index cb77be7..39a71bb 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -40,9 +40,6 @@
 static struct regulator_init_data rm680_vemmc = {
 	.constraints =	{
 		.name			= "rm680_vemmc",
-		.min_uV			= 2900000,
-		.max_uV			= 2900000,
-		.apply_uV		= 1,
 		.valid_modes_mask	= REGULATOR_MODE_NORMAL
 					| REGULATOR_MODE_STANDBY,
 		.valid_ops_mask		= REGULATOR_CHANGE_STATUS
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
new file mode 100644
index 0000000..6bcd436
--- /dev/null
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc.
+ *
+ * Modified from mach-omap2/board-zoom-peripherals.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/i2c/twl.h>
+#include <linux/spi/spi.h>
+#include <plat/mcspi.h>
+#include <plat/display.h>
+
+#define LCD_PANEL_RESET_GPIO_PROD	96
+#define LCD_PANEL_RESET_GPIO_PILOT	55
+#define LCD_PANEL_QVGA_GPIO		56
+
+static void zoom_lcd_panel_init(void)
+{
+	int ret;
+	unsigned char lcd_panel_reset_gpio;
+
+	lcd_panel_reset_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
+			LCD_PANEL_RESET_GPIO_PROD :
+			LCD_PANEL_RESET_GPIO_PILOT;
+
+	ret = gpio_request(lcd_panel_reset_gpio, "lcd reset");
+	if (ret) {
+		pr_err("Failed to get LCD reset GPIO (gpio%d).\n",
+			lcd_panel_reset_gpio);
+		return;
+	}
+	gpio_direction_output(lcd_panel_reset_gpio, 1);
+
+	ret = gpio_request(LCD_PANEL_QVGA_GPIO, "lcd qvga");
+	if (ret) {
+		pr_err("Failed to get LCD_PANEL_QVGA_GPIO (gpio%d).\n",
+			LCD_PANEL_QVGA_GPIO);
+		goto err0;
+	}
+	gpio_direction_output(LCD_PANEL_QVGA_GPIO, 1);
+
+	return;
+err0:
+	gpio_free(lcd_panel_reset_gpio);
+}
+
+static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev)
+{
+	return 0;
+}
+
+static void zoom_panel_disable_lcd(struct omap_dss_device *dssdev)
+{
+}
+
+/*
+ * PWMA/B register offsets (TWL4030_MODULE_PWMA)
+ */
+#define TWL_INTBR_PMBR1	0xD
+#define TWL_INTBR_GPBR1	0xC
+#define TWL_LED_PWMON	0x0
+#define TWL_LED_PWMOFF	0x1
+
+static int zoom_set_bl_intensity(struct omap_dss_device *dssdev, int level)
+{
+	unsigned char c;
+	u8 mux_pwm, enb_pwm;
+
+	if (level > 100)
+		return -1;
+
+	twl_i2c_read_u8(TWL4030_MODULE_INTBR, &mux_pwm, TWL_INTBR_PMBR1);
+	twl_i2c_read_u8(TWL4030_MODULE_INTBR, &enb_pwm, TWL_INTBR_GPBR1);
+
+	if (level == 0) {
+		/* disable pwm1 output and clock */
+		enb_pwm = enb_pwm & 0xF5;
+		/* change pwm1 pin to gpio pin */
+		mux_pwm = mux_pwm & 0xCF;
+		twl_i2c_write_u8(TWL4030_MODULE_INTBR,
+					enb_pwm, TWL_INTBR_GPBR1);
+		twl_i2c_write_u8(TWL4030_MODULE_INTBR,
+					mux_pwm, TWL_INTBR_PMBR1);
+		return 0;
+	}
+
+	if (!((enb_pwm & 0xA) && (mux_pwm & 0x30))) {
+		/* change gpio pin to pwm1 pin */
+		mux_pwm = mux_pwm | 0x30;
+		/* enable pwm1 output and clock*/
+		enb_pwm = enb_pwm | 0x0A;
+		twl_i2c_write_u8(TWL4030_MODULE_INTBR,
+					mux_pwm, TWL_INTBR_PMBR1);
+		twl_i2c_write_u8(TWL4030_MODULE_INTBR,
+					enb_pwm, TWL_INTBR_GPBR1);
+	}
+
+	c = ((50 * (100 - level)) / 100) + 1;
+	twl_i2c_write_u8(TWL4030_MODULE_PWM1, 0x7F, TWL_LED_PWMOFF);
+	twl_i2c_write_u8(TWL4030_MODULE_PWM1, c, TWL_LED_PWMON);
+
+	return 0;
+}
+
+static struct omap_dss_device zoom_lcd_device = {
+	.name			= "lcd",
+	.driver_name		= "NEC_8048_panel",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.phy.dpi.data_lines	= 24,
+	.platform_enable	= zoom_panel_enable_lcd,
+	.platform_disable	= zoom_panel_disable_lcd,
+	.max_backlight_level	= 100,
+	.set_backlight		= zoom_set_bl_intensity,
+};
+
+static struct omap_dss_device *zoom_dss_devices[] = {
+	&zoom_lcd_device,
+};
+
+static struct omap_dss_board_info zoom_dss_data = {
+	.num_devices		= ARRAY_SIZE(zoom_dss_devices),
+	.devices		= zoom_dss_devices,
+	.default_device		= &zoom_lcd_device,
+};
+
+static struct platform_device zoom_dss_device = {
+	.name				= "omapdss",
+	.id				= -1,
+	.dev				= {
+		.platform_data		= &zoom_dss_data,
+	},
+};
+
+static struct omap2_mcspi_device_config dss_lcd_mcspi_config = {
+	.turbo_mode		= 1,
+	.single_channel	= 1,  /* 0: slave, 1: master */
+};
+
+static struct spi_board_info nec_8048_spi_board_info[] __initdata = {
+	[0] = {
+		.modalias		= "nec_8048_spi",
+		.bus_num		= 1,
+		.chip_select		= 2,
+		.max_speed_hz		= 375000,
+		.controller_data	= &dss_lcd_mcspi_config,
+	},
+};
+
+static struct platform_device *zoom_display_devices[] __initdata = {
+	&zoom_dss_device,
+};
+
+void __init zoom_display_init(void)
+{
+	platform_add_devices(zoom_display_devices,
+				ARRAY_SIZE(zoom_display_devices));
+	spi_register_board_info(nec_8048_spi_board_info,
+				ARRAY_SIZE(nec_8048_spi_board_info));
+	zoom_lcd_panel_init();
+}
+
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 3fbd0ed..e0e040f 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -35,6 +35,8 @@
 #define OMAP_ZOOM_WLAN_PMENA_GPIO	(101)
 #define OMAP_ZOOM_WLAN_IRQ_GPIO		(162)
 
+#define LCD_PANEL_ENABLE_GPIO		(7 + OMAP_MAX_GPIO_LINES)
+
 /* Zoom2 has Qwerty keyboard*/
 static uint32_t board_keymap[] = {
 	KEY(0, 0, KEY_E),
@@ -190,7 +192,7 @@
 	},
 };
 
-struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
+static struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
 	.irq = OMAP_GPIO_IRQ(OMAP_ZOOM_WLAN_IRQ_GPIO),
 	/* ZOOM ref clock is 26 MHz */
 	.board_ref_clock = 1,
@@ -224,9 +226,43 @@
 	{}      /* Terminator */
 };
 
+static struct regulator_consumer_supply zoom_vpll2_supply =
+	REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+
+static struct regulator_consumer_supply zoom_vdda_dac_supply =
+	REGULATOR_SUPPLY("vdda_dac", "omapdss");
+
+static struct regulator_init_data zoom_vpll2 = {
+	.constraints = {
+		.min_uV                 = 1800000,
+		.max_uV                 = 1800000,
+		.valid_modes_mask       = REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask         = REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies		= 1,
+	.consumer_supplies		= &zoom_vpll2_supply,
+};
+
+static struct regulator_init_data zoom_vdac = {
+	.constraints = {
+		.min_uV                 = 1800000,
+		.max_uV                 = 1800000,
+		.valid_modes_mask       = REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask         = REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies		= 1,
+	.consumer_supplies		= &zoom_vdda_dac_supply,
+};
+
 static int zoom_twl_gpio_setup(struct device *dev,
 		unsigned gpio, unsigned ngpio)
 {
+	int ret;
+
 	/* gpio + 0 is "mmc0_cd" (input/IRQ) */
 	mmc[0].gpio_cd = gpio + 0;
 	omap2_hsmmc_init(mmc);
@@ -238,11 +274,19 @@
 	zoom_vsim_supply.dev = mmc[0].dev;
 	zoom_vmmc2_supply.dev = mmc[1].dev;
 
-	return 0;
+	ret = gpio_request(LCD_PANEL_ENABLE_GPIO, "lcd enable");
+	if (ret) {
+		pr_err("Failed to get LCD_PANEL_ENABLE_GPIO (gpio%d).\n",
+				LCD_PANEL_ENABLE_GPIO);
+		return ret;
+	}
+	gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0);
+
+	return ret;
 }
 
 /* EXTMUTE callback function */
-void zoom2_set_hs_extmute(int mute)
+static void zoom2_set_hs_extmute(int mute)
 {
 	gpio_set_value(ZOOM2_HEADSET_EXTMUTE_GPIO, mute);
 }
@@ -301,7 +345,8 @@
 	.vmmc1          = &zoom_vmmc1,
 	.vmmc2          = &zoom_vmmc2,
 	.vsim           = &zoom_vsim,
-
+	.vpll2		= &zoom_vpll2,
+	.vdac		= &zoom_vdac,
 };
 
 static struct i2c_board_info __initdata zoom_i2c_boardinfo[] = {
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index e041c53..e26754c 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -130,6 +130,7 @@
 			ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS);
 	zoom_debugboard_init();
 	zoom_peripherals_init();
+	zoom_display_init();
 }
 
 MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 337392c..acb7ae5 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -77,7 +77,7 @@
 	dd = clk->dpll_data;
 
 	/* DPLL divider must result in a valid jitter correction val */
-	fint = clk->parent->rate / (n + 1);
+	fint = clk->parent->rate / n;
 	if (fint < DPLL_FINT_BAND1_MIN) {
 
 		pr_debug("rejecting n=%d due to Fint failure, "
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index d3ab1c9e..403a4a1 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3286,7 +3286,7 @@
 	CLK(NULL,	"cpefuse_fck",	&cpefuse_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"ts_fck",	&ts_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"usbtll_fck",	&usbtll_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
-	CLK("ehci-omap.0",	"usbtll_fck",	&usbtll_fck,	CK_3430ES2 | CK_AM35XX),
+	CLK("ehci-omap.0",	"usbtll_fck",	&usbtll_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK("omap-mcbsp.1",	"prcm_fck",	&core_96m_fck,	CK_3XXX),
 	CLK("omap-mcbsp.5",	"prcm_fck",	&core_96m_fck,	CK_3XXX),
 	CLK(NULL,	"core_96m_fck",	&core_96m_fck,	CK_3XXX),
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index e8cb32f..de9ec8d 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -34,7 +34,6 @@
 #include "cm2_44xx.h"
 #include "cm-regbits-44xx.h"
 #include "prm44xx.h"
-#include "prm44xx.h"
 #include "prm-regbits-44xx.h"
 #include "control.h"
 #include "scrm44xx.h"
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index e20b986..58e42f7 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -423,6 +423,12 @@
 {
 	struct clkdm_dep *cd;
 
+	if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+		pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+		       clkdm1->name, clkdm2->name, __func__);
+		return -EINVAL;
+	}
+
 	if (!clkdm1 || !clkdm2)
 		return -EINVAL;
 
@@ -458,6 +464,12 @@
 {
 	struct clkdm_dep *cd;
 
+	if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+		pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+		       clkdm1->name, clkdm2->name, __func__);
+		return -EINVAL;
+	}
+
 	if (!clkdm1 || !clkdm2)
 		return -EINVAL;
 
@@ -500,6 +512,12 @@
 	if (!clkdm1 || !clkdm2)
 		return -EINVAL;
 
+	if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+		pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+		       clkdm1->name, clkdm2->name, __func__);
+		return -EINVAL;
+	}
+
 	cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
 	if (IS_ERR(cd)) {
 		pr_debug("clockdomain: hardware cannot set/clear wake up of "
@@ -527,6 +545,12 @@
 	struct clkdm_dep *cd;
 	u32 mask = 0;
 
+	if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+		pr_err("clockdomain: %s: %s: not yet implemented\n",
+		       clkdm->name, __func__);
+		return -EINVAL;
+	}
+
 	if (!clkdm)
 		return -EINVAL;
 
@@ -830,8 +854,7 @@
 	 * dependency code and data for OMAP4.
 	 */
 	if (cpu_is_omap44xx()) {
-		WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency "
-			  "support is not yet implemented\n");
+		pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name);
 	} else {
 		if (atomic_read(&clkdm->usecount) > 0)
 			_clkdm_add_autodeps(clkdm);
@@ -872,8 +895,7 @@
 	 * dependency code and data for OMAP4.
 	 */
 	if (cpu_is_omap44xx()) {
-		WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency "
-			  "support is not yet implemented\n");
+		pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name);
 	} else {
 		if (atomic_read(&clkdm->usecount) > 0)
 			_clkdm_del_autodeps(clkdm);
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index de3faa2..9b459c2 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -103,9 +103,7 @@
 		const char *name;
 		struct powerdomain *ptr;
 	} pwrdm;
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
 	const u16 clktrctrl_mask;
-#endif
 	const u8 flags;
 	const u8 dep_bit;
 	const u8 prcm_partition;
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
index 51920fc..10622c9 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -30,8 +30,6 @@
 #include "cm1_44xx.h"
 #include "cm2_44xx.h"
 
-#include "cm1_44xx.h"
-#include "cm2_44xx.h"
 #include "cm-regbits-44xx.h"
 #include "prm44xx.h"
 #include "prcm44xx.h"
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index f3e043f..f7b22a1 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -47,6 +47,8 @@
 
 #define OMAP3_STATE_MAX OMAP3_STATE_C7
 
+#define CPUIDLE_FLAG_CHECK_BM	0x10000	/* use omap3_enter_idle_bm() */
+
 struct omap3_processor_cx {
 	u8 valid;
 	u8 type;
@@ -252,7 +254,7 @@
 	 * FIXME: we currently manage device-specific idle states
 	 *        for PER and CORE in combination with CPU-specific
 	 *        idle states.  This is wrong, and device-specific
-	 *        idle managment needs to be separated out into 
+	 *        idle management needs to be separated out into 
 	 *        its own code.
 	 */
 
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 381f4eb..2c9c912 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -978,7 +978,7 @@
 arch_initcall(omap2_init_devices);
 
 #if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
-struct omap_device_pm_latency omap_wdt_latency[] = {
+static struct omap_device_pm_latency omap_wdt_latency[] = {
 	[0] = {
 		.deactivate_func = omap_device_idle_hwmods,
 		.activate_func   = omap_device_enable_hwmods,
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index d2f15f5..34922b2 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -264,7 +264,7 @@
 	if (IS_ERR(od)) {
 		pr_err("%s: Cant build omap_device for %s:%s.\n",
 			__func__, name, oh->name);
-		return IS_ERR(od);
+		return PTR_ERR(od);
 	}
 
 	mem = platform_get_resource(&od->pdev, IORESOURCE_MEM, 0);
diff --git a/arch/arm/mach-omap2/include/mach/board-zoom.h b/arch/arm/mach-omap2/include/mach/board-zoom.h
index f93ca39..d20bd9c 100644
--- a/arch/arm/mach-omap2/include/mach/board-zoom.h
+++ b/arch/arm/mach-omap2/include/mach/board-zoom.h
@@ -1,9 +1,12 @@
 /*
  * Defines for zoom boards
  */
+#include <plat/display.h>
+
 #define ZOOM_NAND_CS    0
 
 extern int __init zoom_debugboard_init(void);
 extern void __init zoom_peripherals_init(void);
+extern void __init zoom_display_init(void);
 
 #define ZOOM2_HEADSET_EXTMUTE_GPIO	153
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index befa321..81985a6 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -38,20 +38,6 @@
  */
 
 #ifdef MULTI_OMAP2
-
-/*
- * We use __glue to avoid errors with multiple definitions of
- * .globl omap_irq_base as it's included from entry-armv.S but not
- * from entry-common.S.
- */
-#ifdef __glue
-		.pushsection .data
-		.globl	omap_irq_base
-omap_irq_base:
-		.word	0
-		.popsection
-#endif
-
 		/*
 		 * Configure the interrupt base on the first interrupt.
 		 * See also omap_irq_base_init for setting omap_irq_base.
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index e66687b..c203204 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -314,14 +314,13 @@
 	return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
 }
 
+void __iomem *omap_irq_base;
+
 /*
  * Initialize asm_irq_base for entry-macro.S
  */
 static inline void omap_irq_base_init(void)
 {
-	extern void __iomem *omap_irq_base;
-
-#ifdef MULTI_OMAP2
 	if (cpu_is_omap24xx())
 		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE);
 	else if (cpu_is_omap34xx())
@@ -330,7 +329,6 @@
 		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE);
 	else
 		pr_err("Could not initialize omap_irq_base\n");
-#endif
 }
 
 void __init omap2_init_common_infrastructure(void)
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 85bf8ca..23049c4 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -100,13 +100,14 @@
 }
 
 /* XXX: FIQ and additional INTC support (only MPU at the moment) */
-static void omap_ack_irq(unsigned int irq)
+static void omap_ack_irq(struct irq_data *d)
 {
 	intc_bank_write_reg(0x1, &irq_banks[0], INTC_CONTROL);
 }
 
-static void omap_mask_irq(unsigned int irq)
+static void omap_mask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	int offset = irq & (~(IRQ_BITS_PER_REG - 1));
 
 	if (cpu_is_omap34xx()) {
@@ -128,8 +129,9 @@
 	intc_bank_write_reg(1 << irq, &irq_banks[0], INTC_MIR_SET0 + offset);
 }
 
-static void omap_unmask_irq(unsigned int irq)
+static void omap_unmask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	int offset = irq & (~(IRQ_BITS_PER_REG - 1));
 
 	irq &= (IRQ_BITS_PER_REG - 1);
@@ -137,17 +139,17 @@
 	intc_bank_write_reg(1 << irq, &irq_banks[0], INTC_MIR_CLEAR0 + offset);
 }
 
-static void omap_mask_ack_irq(unsigned int irq)
+static void omap_mask_ack_irq(struct irq_data *d)
 {
-	omap_mask_irq(irq);
-	omap_ack_irq(irq);
+	omap_mask_irq(d);
+	omap_ack_irq(d);
 }
 
 static struct irq_chip omap_irq_chip = {
-	.name	= "INTC",
-	.ack	= omap_mask_ack_irq,
-	.mask	= omap_mask_irq,
-	.unmask	= omap_unmask_irq,
+	.name		= "INTC",
+	.irq_ack	= omap_mask_ack_irq,
+	.irq_mask	= omap_mask_irq,
+	.irq_unmask	= omap_unmask_irq,
 };
 
 static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank)
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 394413d..24b8850 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -193,10 +193,12 @@
 		omap_mbox_type_t irq)
 {
 	struct omap_mbox2_priv *p = mbox->priv;
-	u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-	l = mbox_read_reg(p->irqdisable);
-	l &= ~bit;
-	mbox_write_reg(l, p->irqdisable);
+	u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
+
+	if (!cpu_is_omap44xx())
+		bit = mbox_read_reg(p->irqdisable) & ~bit;
+
+	mbox_write_reg(bit, p->irqdisable);
 }
 
 static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
@@ -334,7 +336,7 @@
 	.priv	= &omap2_mbox_iva_priv,
 };
 
-struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
+struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
 #endif
 
 #if defined(CONFIG_ARCH_OMAP4)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 17bd639..6c84659 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -160,7 +160,7 @@
 	struct omap_mux *mux = NULL;
 	struct omap_mux_entry *e;
 	const char *mode_name;
-	int found = 0, found_mode, mode0_len = 0;
+	int found = 0, found_mode = 0, mode0_len = 0;
 	struct list_head *muxmodes = &partition->muxmodes;
 
 	mode_name = strchr(muxname, '.');
@@ -605,7 +605,7 @@
 	list_for_each_entry(e, &partition->muxmodes, node) {
 		struct omap_mux *m = &e->mux;
 
-		(void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
+		(void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
 					  m, &omap_mux_dbg_signal_fops);
 	}
 }
@@ -893,7 +893,7 @@
 		return NULL;
 
 	m = &entry->mux;
-	memcpy(m, src, sizeof(struct omap_mux_entry));
+	entry->mux = *src;
 
 #ifdef CONFIG_OMAP_MUX
 	if (omap_mux_copy_names(src, m)) {
@@ -1000,6 +1000,7 @@
 	if (!partition->base) {
 		pr_err("%s: Could not ioremap mux partition at 0x%08x\n",
 			__func__, partition->phys);
+		kfree(partition);
 		return -ENODEV;
 	}
 
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index 440c98e..17f80e4 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -703,7 +703,7 @@
  * Signals different on CBC package compared to the superset
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBC)
-struct omap_mux __initdata omap3_cbc_subset[] = {
+static struct omap_mux __initdata omap3_cbc_subset[] = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #else
@@ -721,7 +721,7 @@
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)	\
 		&& defined(CONFIG_OMAP_PACKAGE_CBC)
-struct omap_ball __initdata omap3_cbc_ball[] = {
+static struct omap_ball __initdata omap3_cbc_ball[] = {
 	_OMAP3_BALLENTRY(CAM_D0, "ae16", NULL),
 	_OMAP3_BALLENTRY(CAM_D1, "ae15", NULL),
 	_OMAP3_BALLENTRY(CAM_D10, "d25", NULL),
diff --git a/arch/arm/mach-omap2/mux44xx.c b/arch/arm/mach-omap2/mux44xx.c
index 980f11d..c322e7b 100644
--- a/arch/arm/mach-omap2/mux44xx.c
+++ b/arch/arm/mach-omap2/mux44xx.c
@@ -544,7 +544,7 @@
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
 		&& defined(CONFIG_OMAP_PACKAGE_CBL)
-struct omap_ball __initdata omap4_core_cbl_ball[] = {
+static struct omap_ball __initdata omap4_core_cbl_ball[] = {
 	_OMAP4_BALLENTRY(GPMC_AD0, "c12", NULL),
 	_OMAP4_BALLENTRY(GPMC_AD1, "d12", NULL),
 	_OMAP4_BALLENTRY(GPMC_AD2, "c13", NULL),
@@ -1262,7 +1262,7 @@
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
 		&& defined(CONFIG_OMAP_PACKAGE_CBS)
-struct omap_ball __initdata omap4_core_cbs_ball[] = {
+static struct omap_ball __initdata omap4_core_cbs_ball[] = {
 	_OMAP4_BALLENTRY(GPMC_AD0, "c12", NULL),
 	_OMAP4_BALLENTRY(GPMC_AD1, "d12", NULL),
 	_OMAP4_BALLENTRY(GPMC_AD2, "c13", NULL),
@@ -1546,7 +1546,7 @@
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
 		&& defined(CONFIG_OMAP_PACKAGE_CBL)
-struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
+static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
 	_OMAP4_BALLENTRY(SIM_IO, "h4", NULL),
 	_OMAP4_BALLENTRY(SIM_CLK, "j2", NULL),
 	_OMAP4_BALLENTRY(SIM_RESET, "g2", NULL),
diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c
index 15f8c6c..00e1d2b 100644
--- a/arch/arm/mach-omap2/omap_twl.c
+++ b/arch/arm/mach-omap2/omap_twl.c
@@ -20,6 +20,8 @@
 
 #include <plat/voltage.h>
 
+#include "pm.h"
+
 #define OMAP3_SRI2C_SLAVE_ADDR		0x12
 #define OMAP3_VDD_MPU_SR_CONTROL_REG	0x00
 #define OMAP3_VDD_CORE_SR_CONTROL_REG	0x01
@@ -60,17 +62,17 @@
 
 #define REG_SMPS_OFFSET         0xE0
 
-unsigned long twl4030_vsel_to_uv(const u8 vsel)
+static unsigned long twl4030_vsel_to_uv(const u8 vsel)
 {
 	return (((vsel * 125) + 6000)) * 100;
 }
 
-u8 twl4030_uv_to_vsel(unsigned long uv)
+static u8 twl4030_uv_to_vsel(unsigned long uv)
 {
 	return DIV_ROUND_UP(uv - 600000, 12500);
 }
 
-unsigned long twl6030_vsel_to_uv(const u8 vsel)
+static unsigned long twl6030_vsel_to_uv(const u8 vsel)
 {
 	/*
 	 * In TWL6030 depending on the value of SMPS_OFFSET
@@ -102,7 +104,7 @@
 		return ((((vsel - 1) * 125) + 6000)) * 100;
 }
 
-u8 twl6030_uv_to_vsel(unsigned long uv)
+static u8 twl6030_uv_to_vsel(unsigned long uv)
 {
 	/*
 	 * In TWL6030 depending on the value of SMPS_OFFSET
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 125f565..a5a83b3 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -637,14 +637,14 @@
 
 		}
 
-	(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
+	(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
 				   &enable_off_mode, &pm_dbg_option_fops);
-	(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
+	(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
 				   &sleep_while_idle, &pm_dbg_option_fops);
-	(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
+	(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
 				   &wakeup_timer_seconds, &pm_dbg_option_fops);
 	(void) debugfs_create_file("wakeup_timer_milliseconds",
-			S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
+			S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
 			&pm_dbg_option_fops);
 	pm_dbg_init_done = 1;
 
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index dac2d1d..97feb3a 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -134,7 +134,7 @@
 
 	/* Block console output in case it is on one of the OMAP UARTs */
 	if (!is_suspending())
-		if (try_acquire_console_sem())
+		if (!console_trylock())
 			goto no_sleep;
 
 	omap_uart_prepare_idle(0);
@@ -151,7 +151,7 @@
 	omap_uart_resume_idle(0);
 
 	if (!is_suspending())
-		release_console_sem();
+		console_unlock();
 
 no_sleep:
 	if (omap2_pm_debug) {
@@ -350,7 +350,7 @@
 	enable_hlt();
 }
 
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
 	.begin		= omap2_pm_begin,
 	.enter		= omap2_pm_enter,
 	.end		= omap2_pm_end,
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 5b323f2..2f864e4 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -168,9 +168,10 @@
  * once during boot sequence, but this works as we are not using secure
  * services.
  */
-static void omap3_save_secure_ram_context(u32 target_mpu_state)
+static void omap3_save_secure_ram_context(void)
 {
 	u32 ret;
+	int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
 
 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
 		/*
@@ -181,7 +182,7 @@
 		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
 		ret = _omap_save_secure_sram((u32 *)
 				__pa(omap3_secure_ram_storage));
-		pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
+		pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
 		/* Following is for error tracking, it should not happen */
 		if (ret) {
 			printk(KERN_ERR "save_secure_sram() returns %08x\n",
@@ -398,7 +399,7 @@
 	if (!is_suspending())
 		if (per_next_state < PWRDM_POWER_ON ||
 		    core_next_state < PWRDM_POWER_ON)
-			if (try_acquire_console_sem())
+			if (!console_trylock())
 				goto console_still_active;
 
 	/* PER */
@@ -481,7 +482,7 @@
 	}
 
 	if (!is_suspending())
-		release_console_sem();
+		console_unlock();
 
 console_still_active:
 	/* Disable IO-PAD and IO-CHAIN wakeup */
@@ -605,7 +606,7 @@
 	return;
 }
 
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
 	.begin		= omap3_pm_begin,
 	.end		= omap3_pm_end,
 	.enter		= omap3_pm_enter,
@@ -1094,7 +1095,7 @@
 		local_fiq_disable();
 
 		omap_dma_global_context_save();
-		omap3_save_secure_ram_context(PWRDM_POWER_ON);
+		omap3_save_secure_ram_context();
 		omap_dma_global_context_restore();
 
 		local_irq_enable();
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index e9f4862c..76cfff2 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -65,7 +65,7 @@
 	return;
 }
 
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
 	.begin		= omap4_pm_begin,
 	.end		= omap4_pm_end,
 	.enter		= omap4_pm_enter,
diff --git a/arch/arm/mach-omap2/pm_bus.c b/arch/arm/mach-omap2/pm_bus.c
index 784989f..5acd2ab 100644
--- a/arch/arm/mach-omap2/pm_bus.c
+++ b/arch/arm/mach-omap2/pm_bus.c
@@ -20,7 +20,7 @@
 #include <plat/omap-pm.h>
 
 #ifdef CONFIG_PM_RUNTIME
-int omap_pm_runtime_suspend(struct device *dev)
+static int omap_pm_runtime_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	int r, ret = 0;
@@ -37,7 +37,7 @@
 	return ret;
 };
 
-int omap_pm_runtime_resume(struct device *dev)
+static int omap_pm_runtime_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	int r;
diff --git a/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
index d523389..cf600e2 100644
--- a/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
@@ -19,7 +19,6 @@
 #include <plat/prcm.h>
 
 #include "powerdomain.h"
-#include "prm-regbits-34xx.h"
 #include "prm.h"
 #include "prm-regbits-24xx.h"
 #include "prm-regbits-34xx.h"
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h
index 729a644..3300ff6 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.h
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.h
@@ -38,8 +38,8 @@
 #define OMAP4430_PRCM_MPU_CPU1_INST		0x0800
 
 /* PRCM_MPU clockdomain register offsets (from instance start) */
-#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS	0x0000
-#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS	0x0000
+#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS	0x0018
+#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS	0x0018
 
 
 /*
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h
index 53d44f6..49654c8 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.h
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h
@@ -228,7 +228,67 @@
 
 
 #ifndef __ASSEMBLER__
-
+/*
+ * Stub omap2xxx/omap3xxx functions so that common files
+ * continue to build when custom builds are used
+ */
+#if defined(CONFIG_ARCH_OMAP4) && !(defined(CONFIG_ARCH_OMAP2) ||	\
+					defined(CONFIG_ARCH_OMAP3))
+static inline u32 omap2_prm_read_mod_reg(s16 module, u16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+}
+static inline u32 omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits,
+		s16 module, s16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline u32 omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline u32 omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline u32 omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+#else
 /* Power/reset management domain register get/set */
 extern u32 omap2_prm_read_mod_reg(s16 module, u16 idx);
 extern void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx);
@@ -242,6 +302,7 @@
 extern int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift);
 extern int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift);
 
+#endif	/* CONFIG_ARCH_OMAP4 */
 #endif
 
 /*
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index c645788..32e91a9 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -812,7 +812,7 @@
 
 	oh->dev_attr = uart;
 
-	acquire_console_sem(); /* in case the earlycon is on the UART */
+	console_lock(); /* in case the earlycon is on the UART */
 
 	/*
 	 * Because of early UART probing, UART did not get idled
@@ -838,7 +838,7 @@
 	omap_uart_block_sleep(uart);
 	uart->timeout = DEFAULT_TIMEOUT;
 
-	release_console_sem();
+	console_unlock();
 
 	if ((cpu_is_omap34xx() && uart->padconf) ||
 	    (uart->wk_en && uart->wk_mask)) {
@@ -852,7 +852,7 @@
 }
 
 /**
- * omap_serial_init() - intialize all supported serial ports
+ * omap_serial_init() - initialize all supported serial ports
  *
  * Initializes all available UARTs as serial ports. Platforms
  * can call this function when they want to have default behaviour
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 77ecebf..1a777e3 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -282,6 +282,7 @@
 		dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
 			"interrupt handler. Smartreflex will"
 			"not function as desired\n", __func__);
+		kfree(name);
 		kfree(sr_info);
 		return ret;
 }
@@ -780,8 +781,7 @@
 	struct omap_sr *sr_info = (struct omap_sr *) data;
 
 	if (!sr_info) {
-		pr_warning("%s: omap_sr struct for sr_%s not found\n",
-			__func__, sr_info->voltdm->name);
+		pr_warning("%s: omap_sr struct not found\n", __func__);
 		return -EINVAL;
 	}
 
@@ -795,8 +795,7 @@
 	struct omap_sr *sr_info = (struct omap_sr *) data;
 
 	if (!sr_info) {
-		pr_warning("%s: omap_sr struct for sr_%s not found\n",
-			__func__, sr_info->voltdm->name);
+		pr_warning("%s: omap_sr struct not found\n", __func__);
 		return -EINVAL;
 	}
 
@@ -834,7 +833,8 @@
 
 	if (!pdata) {
 		dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto err_free_devinfo;
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -880,7 +880,7 @@
 		ret = sr_late_init(sr_info);
 		if (ret) {
 			pr_warning("%s: Error in SR late init\n", __func__);
-			return ret;
+			goto err_release_region;
 		}
 	}
 
@@ -891,17 +891,20 @@
 	 * not try to create rest of the debugfs entries.
 	 */
 	vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
-	if (!vdd_dbg_dir)
-		return -EINVAL;
+	if (!vdd_dbg_dir) {
+		ret = -EINVAL;
+		goto err_release_region;
+	}
 
 	dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
 	if (IS_ERR(dbg_dir)) {
 		dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
 			__func__);
-		return PTR_ERR(dbg_dir);
+		ret = PTR_ERR(dbg_dir);
+		goto err_release_region;
 	}
 
-	(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
+	(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
 				(void *)sr_info, &pm_sr_fops);
 	(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
 			&sr_info->err_weight);
@@ -914,7 +917,8 @@
 	if (IS_ERR(nvalue_dir)) {
 		dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
 			"for n-values\n", __func__);
-		return PTR_ERR(nvalue_dir);
+		ret = PTR_ERR(nvalue_dir);
+		goto err_release_region;
 	}
 
 	omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
@@ -923,24 +927,16 @@
 			" corresponding vdd vdd_%s. Cannot create debugfs"
 			"entries for n-values\n",
 			__func__, sr_info->voltdm->name);
-		return -ENODATA;
+		ret = -ENODATA;
+		goto err_release_region;
 	}
 
 	for (i = 0; i < sr_info->nvalue_count; i++) {
-		char *name;
-		char volt_name[32];
+		char name[NVALUE_NAME_LEN + 1];
 
-		name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL);
-		if (!name) {
-			dev_err(&pdev->dev, "%s: Unable to allocate memory"
-				" for n-value directory name\n",  __func__);
-			return -ENOMEM;
-		}
-
-		strcpy(name, "volt_");
-		sprintf(volt_name, "%d", volt_data[i].volt_nominal);
-		strcat(name, volt_name);
-		(void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
+		snprintf(name, sizeof(name), "volt_%d",
+			 volt_data[i].volt_nominal);
+		(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
 				&(sr_info->nvalue_table[i].nvalue));
 	}
 
@@ -966,7 +962,7 @@
 	}
 
 	sr_info = _sr_lookup(pdata->voltdm);
-	if (!sr_info) {
+	if (IS_ERR(sr_info)) {
 		dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
 			__func__);
 		return -EINVAL;
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index 786d685..b1e0af1 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -27,6 +27,7 @@
 #include <plat/voltage.h>
 
 #include "control.h"
+#include "pm.h"
 
 static bool sr_enable_on_init;
 
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 4e48e78..0fc550e 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -39,9 +39,12 @@
 #include <asm/mach/time.h>
 #include <plat/dmtimer.h>
 #include <asm/localtimer.h>
+#include <asm/sched_clock.h>
 
 #include "timer-gp.h"
 
+#include <plat/common.h>
+
 /* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
 #define MAX_GPTIMER_ID		12
 
@@ -176,14 +179,19 @@
 /* 
  * When 32k-timer is enabled, don't use GPTimer for clocksource
  * instead, just leave default clocksource which uses the 32k
- * sync counter.  See clocksource setup in see plat-omap/common.c. 
+ * sync counter.  See clocksource setup in plat-omap/counter_32k.c
  */
 
-static inline void __init omap2_gp_clocksource_init(void) {}
+static void __init omap2_gp_clocksource_init(void)
+{
+	omap_init_clocksource_32k();
+}
+
 #else
 /*
  * clocksource
  */
+static DEFINE_CLOCK_DATA(cd);
 static struct omap_dm_timer *gpt_clocksource;
 static cycle_t clocksource_read_cycles(struct clocksource *cs)
 {
@@ -198,6 +206,15 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static void notrace dmtimer_update_sched_clock(void)
+{
+	u32 cyc;
+
+	cyc = omap_dm_timer_read_counter(gpt_clocksource);
+
+	update_sched_clock(&cd, cyc, (u32)~0);
+}
+
 /* Setup free-running counter for clocksource */
 static void __init omap2_gp_clocksource_init(void)
 {
@@ -218,6 +235,8 @@
 
 	omap_dm_timer_set_load_start(gpt, 1, 0);
 
+	init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
+
 	if (clocksource_register_hz(&clocksource_gpt, tick_rate))
 		printk(err2, clocksource_gpt.name);
 }
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index ed6079c9..12be525 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -471,6 +471,7 @@
 	strcat(name, vdd->voltdm.name);
 
 	vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
+	kfree(name);
 	if (IS_ERR(vdd->debug_dir)) {
 		pr_warning("%s: Unable to create debugfs directory for"
 			" vdd_%s\n", __func__, vdd->voltdm.name);
diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
index b0c4907..4067669 100644
--- a/arch/arm/mach-omap2/wd_timer.c
+++ b/arch/arm/mach-omap2/wd_timer.c
@@ -13,6 +13,8 @@
 
 #include <plat/omap_hwmod.h>
 
+#include "wd_timer.h"
+
 /*
  * In order to avoid any assumptions from bootloader regarding WDT
  * settings, WDT module is reset during init. This enables the watchdog
diff --git a/arch/arm/mach-pnx4008/irq.c b/arch/arm/mach-pnx4008/irq.c
index a9ce02b..c69c180 100644
--- a/arch/arm/mach-pnx4008/irq.c
+++ b/arch/arm/mach-pnx4008/irq.c
@@ -36,44 +36,44 @@
 
 static u8 pnx4008_irq_type[NR_IRQS] = PNX4008_IRQ_TYPES;
 
-static void pnx4008_mask_irq(unsigned int irq)
+static void pnx4008_mask_irq(struct irq_data *d)
 {
-	__raw_writel(__raw_readl(INTC_ER(irq)) & ~INTC_BIT(irq), INTC_ER(irq));	/* mask interrupt */
+	__raw_writel(__raw_readl(INTC_ER(d->irq)) & ~INTC_BIT(d->irq), INTC_ER(d->irq));	/* mask interrupt */
 }
 
-static void pnx4008_unmask_irq(unsigned int irq)
+static void pnx4008_unmask_irq(struct irq_data *d)
 {
-	__raw_writel(__raw_readl(INTC_ER(irq)) | INTC_BIT(irq), INTC_ER(irq));	/* unmask interrupt */
+	__raw_writel(__raw_readl(INTC_ER(d->irq)) | INTC_BIT(d->irq), INTC_ER(d->irq));	/* unmask interrupt */
 }
 
-static void pnx4008_mask_ack_irq(unsigned int irq)
+static void pnx4008_mask_ack_irq(struct irq_data *d)
 {
-	__raw_writel(__raw_readl(INTC_ER(irq)) & ~INTC_BIT(irq), INTC_ER(irq));	/* mask interrupt */
-	__raw_writel(INTC_BIT(irq), INTC_SR(irq));	/* clear interrupt status */
+	__raw_writel(__raw_readl(INTC_ER(d->irq)) & ~INTC_BIT(d->irq), INTC_ER(d->irq));	/* mask interrupt */
+	__raw_writel(INTC_BIT(d->irq), INTC_SR(d->irq));	/* clear interrupt status */
 }
 
-static int pnx4008_set_irq_type(unsigned int irq, unsigned int type)
+static int pnx4008_set_irq_type(struct irq_data *d, unsigned int type)
 {
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
-		__raw_writel(__raw_readl(INTC_ATR(irq)) | INTC_BIT(irq), INTC_ATR(irq));	/*edge sensitive */
-		__raw_writel(__raw_readl(INTC_APR(irq)) | INTC_BIT(irq), INTC_APR(irq));	/*rising edge */
-		set_irq_handler(irq, handle_edge_irq);
+		__raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq));	/*edge sensitive */
+		__raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq));	/*rising edge */
+		set_irq_handler(d->irq, handle_edge_irq);
 		break;
 	case IRQ_TYPE_EDGE_FALLING:
-		__raw_writel(__raw_readl(INTC_ATR(irq)) | INTC_BIT(irq), INTC_ATR(irq));	/*edge sensitive */
-		__raw_writel(__raw_readl(INTC_APR(irq)) & ~INTC_BIT(irq), INTC_APR(irq));	/*falling edge */
-		set_irq_handler(irq, handle_edge_irq);
+		__raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq));	/*edge sensitive */
+		__raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq));	/*falling edge */
+		set_irq_handler(d->irq, handle_edge_irq);
 		break;
 	case IRQ_TYPE_LEVEL_LOW:
-		__raw_writel(__raw_readl(INTC_ATR(irq)) & ~INTC_BIT(irq), INTC_ATR(irq));	/*level sensitive */
-		__raw_writel(__raw_readl(INTC_APR(irq)) & ~INTC_BIT(irq), INTC_APR(irq));	/*low level */
-		set_irq_handler(irq, handle_level_irq);
+		__raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq));	/*level sensitive */
+		__raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq));	/*low level */
+		set_irq_handler(d->irq, handle_level_irq);
 		break;
 	case IRQ_TYPE_LEVEL_HIGH:
-		__raw_writel(__raw_readl(INTC_ATR(irq)) & ~INTC_BIT(irq), INTC_ATR(irq));	/*level sensitive */
-		__raw_writel(__raw_readl(INTC_APR(irq)) | INTC_BIT(irq), INTC_APR(irq));	/* high level */
-		set_irq_handler(irq, handle_level_irq);
+		__raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq));	/*level sensitive */
+		__raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq));	/* high level */
+		set_irq_handler(d->irq, handle_level_irq);
 		break;
 
 	/* IRQ_TYPE_EDGE_BOTH is not supported */
@@ -85,10 +85,10 @@
 }
 
 static struct irq_chip pnx4008_irq_chip = {
-	.ack = pnx4008_mask_ack_irq,
-	.mask = pnx4008_mask_irq,
-	.unmask = pnx4008_unmask_irq,
-	.set_type = pnx4008_set_irq_type,
+	.irq_ack = pnx4008_mask_ack_irq,
+	.irq_mask = pnx4008_mask_irq,
+	.irq_unmask = pnx4008_unmask_irq,
+	.irq_set_type = pnx4008_set_irq_type,
 };
 
 void __init pnx4008_init_irq(void)
@@ -99,14 +99,18 @@
 	for (i = 0; i < NR_IRQS; i++) {
 		set_irq_flags(i, IRQF_VALID);
 		set_irq_chip(i, &pnx4008_irq_chip);
-		pnx4008_set_irq_type(i, pnx4008_irq_type[i]);
+		pnx4008_set_irq_type(irq_get_irq_data(i), pnx4008_irq_type[i]);
 	}
 
 	/* configure and enable IRQ 0,1,30,31 (cascade interrupts) */
-	pnx4008_set_irq_type(SUB1_IRQ_N, pnx4008_irq_type[SUB1_IRQ_N]);
-	pnx4008_set_irq_type(SUB2_IRQ_N, pnx4008_irq_type[SUB2_IRQ_N]);
-	pnx4008_set_irq_type(SUB1_FIQ_N, pnx4008_irq_type[SUB1_FIQ_N]);
-	pnx4008_set_irq_type(SUB2_FIQ_N, pnx4008_irq_type[SUB2_FIQ_N]);
+	pnx4008_set_irq_type(irq_get_irq_data(SUB1_IRQ_N),
+			     pnx4008_irq_type[SUB1_IRQ_N]);
+	pnx4008_set_irq_type(irq_get_irq_data(SUB2_IRQ_N),
+			     pnx4008_irq_type[SUB2_IRQ_N]);
+	pnx4008_set_irq_type(irq_get_irq_data(SUB1_FIQ_N),
+			     pnx4008_irq_type[SUB1_FIQ_N]);
+	pnx4008_set_irq_type(irq_get_irq_data(SUB2_FIQ_N),
+			     pnx4008_irq_type[SUB2_FIQ_N]);
 
 	/* mask all others */
 	__raw_writel((1 << SUB2_FIQ_N) | (1 << SUB1_FIQ_N) |
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index ee3c29c..f3e60a0 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -119,7 +119,7 @@
 	       (state == PM_SUSPEND_MEM);
 }
 
-static struct platform_suspend_ops pnx4008_pm_ops = {
+static const struct platform_suspend_ops pnx4008_pm_ops = {
 	.enter = pnx4008_pm_enter,
 	.valid = pnx4008_pm_valid,
 };
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index ccb2d0c..a134a14 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -477,25 +477,25 @@
 /******************************************************************************
  * FPGA IRQ
  ******************************************************************************/
-static void balloon3_mask_irq(unsigned int irq)
+static void balloon3_mask_irq(struct irq_data *d)
 {
-	int balloon3_irq = (irq - BALLOON3_IRQ(0));
+	int balloon3_irq = (d->irq - BALLOON3_IRQ(0));
 	balloon3_irq_enabled &= ~(1 << balloon3_irq);
 	__raw_writel(~balloon3_irq_enabled, BALLOON3_INT_CONTROL_REG);
 }
 
-static void balloon3_unmask_irq(unsigned int irq)
+static void balloon3_unmask_irq(struct irq_data *d)
 {
-	int balloon3_irq = (irq - BALLOON3_IRQ(0));
+	int balloon3_irq = (d->irq - BALLOON3_IRQ(0));
 	balloon3_irq_enabled |= (1 << balloon3_irq);
 	__raw_writel(~balloon3_irq_enabled, BALLOON3_INT_CONTROL_REG);
 }
 
 static struct irq_chip balloon3_irq_chip = {
 	.name		= "FPGA",
-	.ack		= balloon3_mask_irq,
-	.mask		= balloon3_mask_irq,
-	.unmask		= balloon3_unmask_irq,
+	.irq_ack	= balloon3_mask_irq,
+	.irq_mask	= balloon3_mask_irq,
+	.irq_unmask	= balloon3_unmask_irq,
 };
 
 static void balloon3_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -504,8 +504,13 @@
 					balloon3_irq_enabled;
 	do {
 		/* clear useless edge notification */
-		if (desc->chip->ack)
-			desc->chip->ack(BALLOON3_AUX_NIRQ);
+		if (desc->irq_data.chip->irq_ack) {
+			struct irq_data *d;
+
+			d = irq_get_irq_data(BALLOON3_AUX_NIRQ);
+			desc->irq_data.chip->irq_ack(d);
+		}
+
 		while (pending) {
 			irq = BALLOON3_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/clock-pxa3xx.c b/arch/arm/mach-pxa/clock-pxa3xx.c
index 1b08a34..3f864cd 100644
--- a/arch/arm/mach-pxa/clock-pxa3xx.c
+++ b/arch/arm/mach-pxa/clock-pxa3xx.c
@@ -115,7 +115,6 @@
 {
 	unsigned long acsr = ACSR;
 	unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
-	unsigned int smcfs = (acsr >> 23) & 0x7;
 
 	return BASE_CLK * smcfs_mult[(acsr >> 23) & 0x7] /
 			df_clkdiv[(memclkcfg >> 16) & 0x3];
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c
index 0f31305..a2380cd 100644
--- a/arch/arm/mach-pxa/cm-x2xx-pci.c
+++ b/arch/arm/mach-pxa/cm-x2xx-pci.c
@@ -59,7 +59,7 @@
 static void cmx2xx_it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
 {
 	/* clear our parent irq */
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	it8152_irq_demux(irq, desc);
 }
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c
index 6b2c800..28f667e 100644
--- a/arch/arm/mach-pxa/colibri-evalboard.c
+++ b/arch/arm/mach-pxa/colibri-evalboard.c
@@ -50,7 +50,7 @@
 			GPIO0_COLIBRI_PXA270_SD_DETECT;
 	if (machine_is_colibri300())	/* PXA300 Colibri */
 		colibri_mci_platform_data.gpio_card_detect =
-			GPIO39_COLIBRI_PXA300_SD_DETECT;
+			GPIO13_COLIBRI_PXA300_SD_DETECT;
 	else				/* PXA320 Colibri */
 		colibri_mci_platform_data.gpio_card_detect =
 			GPIO28_COLIBRI_PXA320_SD_DETECT;
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index fddb16d..66dd81c 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -41,7 +41,7 @@
 	GPIO4_MMC1_DAT1,
 	GPIO5_MMC1_DAT2,
 	GPIO6_MMC1_DAT3,
-	GPIO39_GPIO,	/* SD detect */
+	GPIO13_GPIO,	/* GPIO13_COLIBRI_PXA300_SD_DETECT */
 
 	/* UHC */
 	GPIO0_2_USBH_PEN,
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index d6e15f7..f5d91ef 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -22,7 +22,6 @@
 
 #include <mach/hardware.h>
 #include <asm/system.h>
-#include <asm/pgtable.h>
 #include <asm/mach/map.h>
 #include <asm/mach-types.h>
 
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h
index 6205dc9..a079d8b 100644
--- a/arch/arm/mach-pxa/generic.h
+++ b/arch/arm/mach-pxa/generic.h
@@ -9,11 +9,13 @@
  * published by the Free Software Foundation.
  */
 
+struct irq_data;
 struct sys_timer;
 
 extern struct sys_timer pxa_timer;
 extern void __init pxa_init_irq(int irq_nr,
-				int (*set_wake)(unsigned int, unsigned int));
+				int (*set_wake)(struct irq_data *,
+						unsigned int));
 extern void __init pxa25x_init_irq(void);
 #ifdef CONFIG_CPU_PXA26x
 extern void __init pxa26x_init_irq(void);
diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h
index 388a96f..cb4236e 100644
--- a/arch/arm/mach-pxa/include/mach/colibri.h
+++ b/arch/arm/mach-pxa/include/mach/colibri.h
@@ -60,7 +60,7 @@
 #define GPIO113_COLIBRI_PXA270_TS_IRQ	113
 
 /* GPIO definitions for Colibri PXA300/310 */
-#define GPIO39_COLIBRI_PXA300_SD_DETECT	39
+#define GPIO13_COLIBRI_PXA300_SD_DETECT	13
 
 /* GPIO definitions for Colibri PXA320 */
 #define GPIO28_COLIBRI_PXA320_SD_DETECT	28
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 54e91c9..2693e3c 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -53,37 +53,48 @@
 	return !cpu_is_pxa25x();
 }
 
-static void pxa_mask_irq(unsigned int irq)
+static inline void __iomem *irq_base(int i)
 {
-	void __iomem *base = get_irq_chip_data(irq);
+	static unsigned long phys_base[] = {
+		0x40d00000,
+		0x40d0009c,
+		0x40d00130,
+	};
+
+	return (void __iomem *)io_p2v(phys_base[i]);
+}
+
+static void pxa_mask_irq(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
 	uint32_t icmr = __raw_readl(base + ICMR);
 
-	icmr &= ~(1 << IRQ_BIT(irq));
+	icmr &= ~(1 << IRQ_BIT(d->irq));
 	__raw_writel(icmr, base + ICMR);
 }
 
-static void pxa_unmask_irq(unsigned int irq)
+static void pxa_unmask_irq(struct irq_data *d)
 {
-	void __iomem *base = get_irq_chip_data(irq);
+	void __iomem *base = irq_data_get_irq_chip_data(d);
 	uint32_t icmr = __raw_readl(base + ICMR);
 
-	icmr |= 1 << IRQ_BIT(irq);
+	icmr |= 1 << IRQ_BIT(d->irq);
 	__raw_writel(icmr, base + ICMR);
 }
 
 static struct irq_chip pxa_internal_irq_chip = {
 	.name		= "SC",
-	.ack		= pxa_mask_irq,
-	.mask		= pxa_mask_irq,
-	.unmask		= pxa_unmask_irq,
+	.irq_ack	= pxa_mask_irq,
+	.irq_mask	= pxa_mask_irq,
+	.irq_unmask	= pxa_unmask_irq,
 };
 
 /*
  * GPIO IRQs for GPIO 0 and 1
  */
-static int pxa_set_low_gpio_type(unsigned int irq, unsigned int type)
+static int pxa_set_low_gpio_type(struct irq_data *d, unsigned int type)
 {
-	int gpio = irq - IRQ_GPIO0;
+	int gpio = d->irq - IRQ_GPIO0;
 
 	if (__gpio_is_occupied(gpio)) {
 		pr_err("%s failed: GPIO is configured\n", __func__);
@@ -103,31 +114,17 @@
 	return 0;
 }
 
-static void pxa_ack_low_gpio(unsigned int irq)
+static void pxa_ack_low_gpio(struct irq_data *d)
 {
-	GEDR0 = (1 << (irq - IRQ_GPIO0));
-}
-
-static void pxa_mask_low_gpio(unsigned int irq)
-{
-	struct irq_desc *desc = irq_to_desc(irq);
-
-	desc->chip->mask(irq);
-}
-
-static void pxa_unmask_low_gpio(unsigned int irq)
-{
-	struct irq_desc *desc = irq_to_desc(irq);
-
-	desc->chip->unmask(irq);
+	GEDR0 = (1 << (d->irq - IRQ_GPIO0));
 }
 
 static struct irq_chip pxa_low_gpio_chip = {
 	.name		= "GPIO-l",
-	.ack		= pxa_ack_low_gpio,
-	.mask		= pxa_mask_low_gpio,
-	.unmask		= pxa_unmask_low_gpio,
-	.set_type	= pxa_set_low_gpio_type,
+	.irq_ack	= pxa_ack_low_gpio,
+	.irq_mask	= pxa_mask_irq,
+	.irq_unmask	= pxa_unmask_irq,
+	.irq_set_type	= pxa_set_low_gpio_type,
 };
 
 static void __init pxa_init_low_gpio_irq(set_wake_t fn)
@@ -141,22 +138,12 @@
 
 	for (irq = IRQ_GPIO0; irq <= IRQ_GPIO1; irq++) {
 		set_irq_chip(irq, &pxa_low_gpio_chip);
+		set_irq_chip_data(irq, irq_base(0));
 		set_irq_handler(irq, handle_edge_irq);
 		set_irq_flags(irq, IRQF_VALID);
 	}
 
-	pxa_low_gpio_chip.set_wake = fn;
-}
-
-static inline void __iomem *irq_base(int i)
-{
-	static unsigned long phys_base[] = {
-		0x40d00000,
-		0x40d0009c,
-		0x40d00130,
-	};
-
-	return (void __iomem *)io_p2v(phys_base[i >> 5]);
+	pxa_low_gpio_chip.irq_set_wake = fn;
 }
 
 void __init pxa_init_irq(int irq_nr, set_wake_t fn)
@@ -168,7 +155,7 @@
 	pxa_internal_irq_nr = irq_nr;
 
 	for (n = 0; n < irq_nr; n += 32) {
-		void __iomem *base = irq_base(n);
+		void __iomem *base = irq_base(n >> 5);
 
 		__raw_writel(0, base + ICMR);	/* disable all IRQs */
 		__raw_writel(0, base + ICLR);	/* all IRQs are IRQ, not FIQ */
@@ -188,7 +175,7 @@
 	/* only unmasked interrupts kick us out of idle */
 	__raw_writel(1, irq_base(0) + ICCR);
 
-	pxa_internal_irq_chip.set_wake = fn;
+	pxa_internal_irq_chip.irq_set_wake = fn;
 	pxa_init_low_gpio_irq(fn);
 }
 
@@ -200,7 +187,7 @@
 {
 	int i;
 
-	for (i = 0; i < pxa_internal_irq_nr; i += 32) {
+	for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
 		void __iomem *base = irq_base(i);
 
 		saved_icmr[i] = __raw_readl(base + ICMR);
@@ -219,14 +206,14 @@
 {
 	int i;
 
-	for (i = 0; i < pxa_internal_irq_nr; i += 32) {
+	for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
 		void __iomem *base = irq_base(i);
 
 		__raw_writel(saved_icmr[i], base + ICMR);
 		__raw_writel(0, base + ICLR);
 	}
 
-	if (!cpu_is_pxa25x())
+	if (cpu_has_ipr())
 		for (i = 0; i < pxa_internal_irq_nr; i++)
 			__raw_writel(saved_ipr[i], IRQ_BASE + IPR(i));
 
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 8ab62a6..c9a3e77 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -95,9 +95,9 @@
 
 static unsigned int lpd270_irq_enabled;
 
-static void lpd270_mask_irq(unsigned int irq)
+static void lpd270_mask_irq(struct irq_data *d)
 {
-	int lpd270_irq = irq - LPD270_IRQ(0);
+	int lpd270_irq = d->irq - LPD270_IRQ(0);
 
 	__raw_writew(~(1 << lpd270_irq), LPD270_INT_STATUS);
 
@@ -105,9 +105,9 @@
 	__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
 }
 
-static void lpd270_unmask_irq(unsigned int irq)
+static void lpd270_unmask_irq(struct irq_data *d)
 {
-	int lpd270_irq = irq - LPD270_IRQ(0);
+	int lpd270_irq = d->irq - LPD270_IRQ(0);
 
 	lpd270_irq_enabled |= 1 << lpd270_irq;
 	__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
@@ -115,9 +115,9 @@
 
 static struct irq_chip lpd270_irq_chip = {
 	.name		= "CPLD",
-	.ack		= lpd270_mask_irq,
-	.mask		= lpd270_mask_irq,
-	.unmask		= lpd270_unmask_irq,
+	.irq_ack	= lpd270_mask_irq,
+	.irq_mask	= lpd270_mask_irq,
+	.irq_unmask	= lpd270_unmask_irq,
 };
 
 static void lpd270_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -126,7 +126,8 @@
 
 	pending = __raw_readw(LPD270_INT_STATUS) & lpd270_irq_enabled;
 	do {
-		desc->chip->ack(irq);	/* clear useless edge notification */
+		/* clear useless edge notification */
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 		if (likely(pending)) {
 			irq = LPD270_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index 3072dbe..dca20de 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -122,15 +122,15 @@
 
 static unsigned long lubbock_irq_enabled;
 
-static void lubbock_mask_irq(unsigned int irq)
+static void lubbock_mask_irq(struct irq_data *d)
 {
-	int lubbock_irq = (irq - LUBBOCK_IRQ(0));
+	int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
 	LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq));
 }
 
-static void lubbock_unmask_irq(unsigned int irq)
+static void lubbock_unmask_irq(struct irq_data *d)
 {
-	int lubbock_irq = (irq - LUBBOCK_IRQ(0));
+	int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
 	/* the irq can be acknowledged only if deasserted, so it's done here */
 	LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq);
 	LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
@@ -138,16 +138,17 @@
 
 static struct irq_chip lubbock_irq_chip = {
 	.name		= "FPGA",
-	.ack		= lubbock_mask_irq,
-	.mask		= lubbock_mask_irq,
-	.unmask		= lubbock_unmask_irq,
+	.irq_ack	= lubbock_mask_irq,
+	.irq_mask	= lubbock_mask_irq,
+	.irq_unmask	= lubbock_unmask_irq,
 };
 
 static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
 	do {
-		desc->chip->ack(irq);	/* clear our parent irq */
+		/* clear our parent irq */
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 		if (likely(pending)) {
 			irq = LUBBOCK_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 740c035..d4b6f23 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -123,15 +123,15 @@
 
 static unsigned long mainstone_irq_enabled;
 
-static void mainstone_mask_irq(unsigned int irq)
+static void mainstone_mask_irq(struct irq_data *d)
 {
-	int mainstone_irq = (irq - MAINSTONE_IRQ(0));
+	int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
 	MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq));
 }
 
-static void mainstone_unmask_irq(unsigned int irq)
+static void mainstone_unmask_irq(struct irq_data *d)
 {
-	int mainstone_irq = (irq - MAINSTONE_IRQ(0));
+	int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
 	/* the irq can be acknowledged only if deasserted, so it's done here */
 	MST_INTSETCLR &= ~(1 << mainstone_irq);
 	MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
@@ -139,16 +139,17 @@
 
 static struct irq_chip mainstone_irq_chip = {
 	.name		= "FPGA",
-	.ack		= mainstone_mask_irq,
-	.mask		= mainstone_mask_irq,
-	.unmask		= mainstone_unmask_irq,
+	.irq_ack	= mainstone_mask_irq,
+	.irq_mask	= mainstone_mask_irq,
+	.irq_unmask	= mainstone_unmask_irq,
 };
 
 static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled;
 	do {
-		desc->chip->ack(irq);	/* clear useless edge notification */
+		/* clear useless edge notification */
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 		if (likely(pending)) {
 			irq = MAINSTONE_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index 462167a..cdf7f41 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -337,7 +337,7 @@
 }
 #endif
 
-/* USB Open Host Controler Interface */
+/* USB Open Host Controller Interface */
 static struct pxaohci_platform_data mxm_8x10_ohci_platform_data = {
 	.port_mode = PMM_NPS_MODE,
 	.flags = ENABLE_PORT_ALL
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 405b92a..35572c4 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -323,7 +323,7 @@
 	.pwm_id		= 0,
 	.max_brightness	= 0xfe,
 	.dft_brightness	= 0x7e,
-	.pwm_period_ns	= 3500,
+	.pwm_period_ns	= 3500 * 1024,
 	.init		= palm27x_backlight_init,
 	.notify		= palm27x_backlight_notify,
 	.exit		= palm27x_backlight_exit,
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index f33647a..90820fa 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -241,23 +241,23 @@
 
 static unsigned long pcm990_irq_enabled;
 
-static void pcm990_mask_ack_irq(unsigned int irq)
+static void pcm990_mask_ack_irq(struct irq_data *d)
 {
-	int pcm990_irq = (irq - PCM027_IRQ(0));
+	int pcm990_irq = (d->irq - PCM027_IRQ(0));
 	PCM990_INTMSKENA = (pcm990_irq_enabled &= ~(1 << pcm990_irq));
 }
 
-static void pcm990_unmask_irq(unsigned int irq)
+static void pcm990_unmask_irq(struct irq_data *d)
 {
-	int pcm990_irq = (irq - PCM027_IRQ(0));
+	int pcm990_irq = (d->irq - PCM027_IRQ(0));
 	/* the irq can be acknowledged only if deasserted, so it's done here */
 	PCM990_INTSETCLR |= 1 << pcm990_irq;
 	PCM990_INTMSKENA  = (pcm990_irq_enabled |= (1 << pcm990_irq));
 }
 
 static struct irq_chip pcm990_irq_chip = {
-	.mask_ack	= pcm990_mask_ack_irq,
-	.unmask		= pcm990_unmask_irq,
+	.irq_mask_ack	= pcm990_mask_ack_irq,
+	.irq_unmask	= pcm990_unmask_irq,
 };
 
 static void pcm990_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -265,7 +265,8 @@
 	unsigned long pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled;
 
 	do {
-		desc->chip->ack(irq);	/* clear our parent IRQ */
+		/* clear our parent IRQ */
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 		if (likely(pending)) {
 			irq = PCM027_IRQ(0) + __ffs(pending);
 			generic_handle_irq(irq);
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 166c15f..1807c9a 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -33,7 +33,7 @@
 #endif
 
 	/* skip registers saving for standby */
-	if (state != PM_SUSPEND_STANDBY) {
+	if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) {
 		pxa_cpu_pm_fns->save(sleep_save);
 		/* before sleeping, calculate and save a checksum */
 		for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
@@ -44,7 +44,7 @@
 	pxa_cpu_pm_fns->enter(state);
 	cpu_init();
 
-	if (state != PM_SUSPEND_STANDBY) {
+	if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
 		/* after sleeping, validate the checksum */
 		for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
 			checksum += sleep_save[i];
@@ -96,7 +96,7 @@
 		pxa_cpu_pm_fns->finish();
 }
 
-static struct platform_suspend_ops pxa_pm_ops = {
+static const struct platform_suspend_ops pxa_pm_ops = {
 	.valid		= pxa_pm_valid,
 	.enter		= pxa_pm_enter,
 	.prepare	= pxa_pm_prepare,
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 3f5241c..b166b1d 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -22,6 +22,7 @@
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
 #include <linux/sysdev.h>
+#include <linux/irq.h>
 
 #include <asm/mach/map.h>
 #include <mach/hardware.h>
@@ -282,15 +283,15 @@
 /* PXA25x: supports wakeup from GPIO0..GPIO15 and RTC alarm
  */
 
-static int pxa25x_set_wake(unsigned int irq, unsigned int on)
+static int pxa25x_set_wake(struct irq_data *d, unsigned int on)
 {
-	int gpio = IRQ_TO_GPIO(irq);
+	int gpio = IRQ_TO_GPIO(d->irq);
 	uint32_t mask = 0;
 
 	if (gpio >= 0 && gpio < 85)
 		return gpio_set_wake(gpio, on);
 
-	if (irq == IRQ_RTCAlrm) {
+	if (d->irq == IRQ_RTCAlrm) {
 		mask = PWER_RTC;
 		goto set_pwer;
 	}
@@ -346,6 +347,7 @@
 	&pxa25x_device_assp,
 	&pxa25x_device_pwm0,
 	&pxa25x_device_pwm1,
+	&pxa_device_asoc_platform,
 };
 
 static struct sys_device pxa25x_sysdev[] = {
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index b2130b7..987301f 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/sysdev.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 
 #include <asm/mach/map.h>
 #include <mach/hardware.h>
@@ -343,18 +344,18 @@
 /* PXA27x:  Various gpios can issue wakeup events.  This logic only
  * handles the simple cases, not the WEMUX2 and WEMUX3 options
  */
-static int pxa27x_set_wake(unsigned int irq, unsigned int on)
+static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
 {
-	int gpio = IRQ_TO_GPIO(irq);
+	int gpio = IRQ_TO_GPIO(d->irq);
 	uint32_t mask;
 
 	if (gpio >= 0 && gpio < 128)
 		return gpio_set_wake(gpio, on);
 
-	if (irq == IRQ_KEYPAD)
+	if (d->irq == IRQ_KEYPAD)
 		return keypad_set_wake(on);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_RTCAlrm:
 		mask = PWER_RTC;
 		break;
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index e14818f..a7a19e1 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -229,11 +229,11 @@
 	pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
 }
 
-static int pxa3xx_set_wake(unsigned int irq, unsigned int on)
+static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
 {
 	unsigned long flags, mask = 0;
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_SSP3:
 		mask = ADXER_MFP_WSSP3;
 		break;
@@ -322,40 +322,40 @@
 #define pxa3xx_set_wake	NULL
 #endif
 
-static void pxa_ack_ext_wakeup(unsigned int irq)
+static void pxa_ack_ext_wakeup(struct irq_data *d)
 {
-	PECR |= PECR_IS(irq - IRQ_WAKEUP0);
+	PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
 }
 
-static void pxa_mask_ext_wakeup(unsigned int irq)
+static void pxa_mask_ext_wakeup(struct irq_data *d)
 {
-	ICMR2 &= ~(1 << ((irq - PXA_IRQ(0)) & 0x1f));
-	PECR &= ~PECR_IE(irq - IRQ_WAKEUP0);
+	ICMR2 &= ~(1 << ((d->irq - PXA_IRQ(0)) & 0x1f));
+	PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
 }
 
-static void pxa_unmask_ext_wakeup(unsigned int irq)
+static void pxa_unmask_ext_wakeup(struct irq_data *d)
 {
-	ICMR2 |= 1 << ((irq - PXA_IRQ(0)) & 0x1f);
-	PECR |= PECR_IE(irq - IRQ_WAKEUP0);
+	ICMR2 |= 1 << ((d->irq - PXA_IRQ(0)) & 0x1f);
+	PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
 }
 
-static int pxa_set_ext_wakeup_type(unsigned int irq, unsigned int flow_type)
+static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
 {
 	if (flow_type & IRQ_TYPE_EDGE_RISING)
-		PWER |= 1 << (irq - IRQ_WAKEUP0);
+		PWER |= 1 << (d->irq - IRQ_WAKEUP0);
 
 	if (flow_type & IRQ_TYPE_EDGE_FALLING)
-		PWER |= 1 << (irq - IRQ_WAKEUP0 + 2);
+		PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
 
 	return 0;
 }
 
 static struct irq_chip pxa_ext_wakeup_chip = {
 	.name		= "WAKEUP",
-	.ack		= pxa_ack_ext_wakeup,
-	.mask		= pxa_mask_ext_wakeup,
-	.unmask		= pxa_unmask_ext_wakeup,
-	.set_type	= pxa_set_ext_wakeup_type,
+	.irq_ack	= pxa_ack_ext_wakeup,
+	.irq_mask	= pxa_mask_ext_wakeup,
+	.irq_unmask	= pxa_unmask_ext_wakeup,
+	.irq_set_type	= pxa_set_ext_wakeup_type,
 };
 
 static void __init pxa_init_ext_wakeup_irq(set_wake_t fn)
@@ -368,7 +368,7 @@
 		set_irq_flags(irq, IRQF_VALID);
 	}
 
-	pxa_ext_wakeup_chip.set_wake = fn;
+	pxa_ext_wakeup_chip.irq_set_wake = fn;
 }
 
 void __init pxa3xx_init_irq(void)
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index e68d46d..785880f 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -869,7 +869,7 @@
 }
 
 #ifdef CONFIG_PM
-static struct platform_suspend_ops sharpsl_pm_ops = {
+static const struct platform_suspend_ops sharpsl_pm_ops = {
 	.prepare	= pxa_pm_prepare,
 	.finish		= pxa_pm_finish,
 	.enter		= corgi_pxa_pm_enter,
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 0bc9387..b49a2c2 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -25,6 +25,7 @@
 #include <linux/spi/corgi_lcd.h>
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/mtd/sharpsl.h>
+#include <linux/mtd/physmap.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/regulator/machine.h>
 #include <linux/io.h>
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
index c31e601..b9b1e5c 100644
--- a/arch/arm/mach-pxa/tosa-bt.c
+++ b/arch/arm/mach-pxa/tosa-bt.c
@@ -81,8 +81,6 @@
 		goto err_rfk_alloc;
 	}
 
-	rfkill_set_led_trigger_name(rfk, "tosa-bt");
-
 	rc = rfkill_register(rfk);
 	if (rc)
 		goto err_rfkill;
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index af152e7..f2582ec 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -875,6 +875,11 @@
 	.dev.platform_data = &sharpsl_rom_data,
 };
 
+static struct platform_device wm9712_device = {
+	.name	= "wm9712-codec",
+	.id	= -1,
+};
+
 static struct platform_device *devices[] __initdata = {
 	&tosascoop_device,
 	&tosascoop_jc_device,
@@ -885,6 +890,7 @@
 	&tosaled_device,
 	&tosa_bt_device,
 	&sharpsl_rom_device,
+	&wm9712_device,
 };
 
 static void tosa_poweroff(void)
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index de69b20..49eeeab 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -249,9 +249,9 @@
 	return viper_isa_irqs[bit] + PXA_ISA_IRQ(0);
 }
 
-static void viper_ack_irq(unsigned int irq)
+static void viper_ack_irq(struct irq_data *d)
 {
-	int viper_irq = viper_irq_to_bitmask(irq);
+	int viper_irq = viper_irq_to_bitmask(d->irq);
 
 	if (viper_irq & 0xff)
 		VIPER_LO_IRQ_STATUS = viper_irq;
@@ -259,14 +259,14 @@
 		VIPER_HI_IRQ_STATUS = (viper_irq >> 8);
 }
 
-static void viper_mask_irq(unsigned int irq)
+static void viper_mask_irq(struct irq_data *d)
 {
-	viper_irq_enabled_mask &= ~(viper_irq_to_bitmask(irq));
+	viper_irq_enabled_mask &= ~(viper_irq_to_bitmask(d->irq));
 }
 
-static void viper_unmask_irq(unsigned int irq)
+static void viper_unmask_irq(struct irq_data *d)
 {
-	viper_irq_enabled_mask |= viper_irq_to_bitmask(irq);
+	viper_irq_enabled_mask |= viper_irq_to_bitmask(d->irq);
 }
 
 static inline unsigned long viper_irq_pending(void)
@@ -283,7 +283,7 @@
 	do {
 		/* we're in a chained irq handler,
 		 * so ack the interrupt by hand */
-		desc->chip->ack(irq);
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 		if (likely(pending)) {
 			irq = viper_bit_to_irq(__ffs(pending));
@@ -294,10 +294,10 @@
 }
 
 static struct irq_chip viper_irq_chip = {
-	.name	= "ISA",
-	.ack	= viper_ack_irq,
-	.mask	= viper_mask_irq,
-	.unmask	= viper_unmask_irq
+	.name		= "ISA",
+	.irq_ack	= viper_ack_irq,
+	.irq_mask	= viper_mask_irq,
+	.irq_unmask	= viper_unmask_irq
 };
 
 static void __init viper_init_irq(void)
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index bf034c7..f4b053b 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -83,19 +83,19 @@
 	return zeus_isa_irqs[bit] + PXA_ISA_IRQ(0);
 }
 
-static void zeus_ack_irq(unsigned int irq)
+static void zeus_ack_irq(struct irq_data *d)
 {
-	__raw_writew(zeus_irq_to_bitmask(irq), ZEUS_CPLD_ISA_IRQ);
+	__raw_writew(zeus_irq_to_bitmask(d->irq), ZEUS_CPLD_ISA_IRQ);
 }
 
-static void zeus_mask_irq(unsigned int irq)
+static void zeus_mask_irq(struct irq_data *d)
 {
-	zeus_irq_enabled_mask &= ~(zeus_irq_to_bitmask(irq));
+	zeus_irq_enabled_mask &= ~(zeus_irq_to_bitmask(d->irq));
 }
 
-static void zeus_unmask_irq(unsigned int irq)
+static void zeus_unmask_irq(struct irq_data *d)
 {
-	zeus_irq_enabled_mask |= zeus_irq_to_bitmask(irq);
+	zeus_irq_enabled_mask |= zeus_irq_to_bitmask(d->irq);
 }
 
 static inline unsigned long zeus_irq_pending(void)
@@ -111,7 +111,7 @@
 	do {
 		/* we're in a chained irq handler,
 		 * so ack the interrupt by hand */
-		desc->chip->ack(gpio_to_irq(ZEUS_ISA_GPIO));
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 		if (likely(pending)) {
 			irq = zeus_bit_to_irq(__ffs(pending));
@@ -122,10 +122,10 @@
 }
 
 static struct irq_chip zeus_irq_chip = {
-	.name	= "ISA",
-	.ack	= zeus_ack_irq,
-	.mask	= zeus_mask_irq,
-	.unmask	= zeus_unmask_irq,
+	.name		= "ISA",
+	.irq_ack	= zeus_ack_irq,
+	.irq_mask	= zeus_mask_irq,
+	.irq_unmask	= zeus_unmask_irq,
 };
 
 static void __init zeus_init_irq(void)
@@ -830,8 +830,8 @@
 	pr_info("Zeus CPLD V%dI%d\n", (system_rev & 0xf0) >> 4, (system_rev & 0x0f));
 
 	/* Fix timings for dm9000s (CS1/CS2)*/
-	msc0 = __raw_readl(MSC0) & 0x0000ffff | (dm9000_msc << 16);
-	msc1 = __raw_readl(MSC1) & 0xffff0000 | dm9000_msc;
+	msc0 = (__raw_readl(MSC0) & 0x0000ffff) | (dm9000_msc << 16);
+	msc1 = (__raw_readl(MSC1) & 0xffff0000) | dm9000_msc;
 	__raw_writel(msc0, MSC0);
 	__raw_writel(msc1, MSC1);
 
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index b4575ae..7ca138a 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -2,52 +2,56 @@
 	depends on ARCH_REALVIEW
 
 config MACH_REALVIEW_EB
-	bool "Support RealView/EB platform"
+	bool "Support RealView(R) Emulation Baseboard"
 	select ARM_GIC
 	help
-	  Include support for the ARM(R) RealView Emulation Baseboard platform.
+	  Include support for the ARM(R) RealView(R) Emulation Baseboard
+	  platform.
 
 config REALVIEW_EB_A9MP
-	bool "Support Multicore Cortex-A9"
+	bool "Support Multicore Cortex-A9 Tile"
 	depends on MACH_REALVIEW_EB
 	select CPU_V7
 	help
-	  Enable support for the Cortex-A9MPCore tile on the Realview platform.
+	  Enable support for the Cortex-A9MPCore tile fitted to the
+	  Realview(R) Emulation Baseboard platform.
 
 config REALVIEW_EB_ARM11MP
-	bool "Support ARM11MPCore tile"
+	bool "Support ARM11MPCore Tile"
 	depends on MACH_REALVIEW_EB
 	select CPU_V6
 	select ARCH_HAS_BARRIERS if SMP
 	help
-	  Enable support for the ARM11MPCore tile on the Realview platform.
+	  Enable support for the ARM11MPCore tile fitted to the Realview(R)
+	  Emulation Baseboard platform.
 
 config REALVIEW_EB_ARM11MP_REVB
-	bool "Support ARM11MPCore RevB tile"
+	bool "Support ARM11MPCore RevB Tile"
 	depends on REALVIEW_EB_ARM11MP
 	help
-	  Enable support for the ARM11MPCore RevB tile on the Realview
-	  platform. Since there are device address differences, a
-	  kernel built with this option enabled is not compatible with
-	  other revisions of the ARM11MPCore tile.
+	  Enable support for the ARM11MPCore Revision B tile on the
+	  Realview(R) Emulation Baseboard platform. Since there are device
+	  address differences, a kernel built with this option enabled is
+	  not compatible with other revisions of the ARM11MPCore tile.
 
 config MACH_REALVIEW_PB11MP
-	bool "Support RealView/PB11MPCore platform"
+	bool "Support RealView(R) Platform Baseboard for ARM11MPCore"
 	select CPU_V6
 	select ARM_GIC
 	select HAVE_PATA_PLATFORM
 	select ARCH_HAS_BARRIERS if SMP
 	help
-	  Include support for the ARM(R) RealView MPCore Platform Baseboard.
-	  PB11MPCore is a platform with an on-board ARM11MPCore and has
+	  Include support for the ARM(R) RealView(R) Platform Baseboard for
+	  the ARM11MPCore.  This platform has an on-board ARM11MPCore and has
 	  support for PCI-E and Compact Flash.
 
 config MACH_REALVIEW_PB1176
-	bool "Support RealView/PB1176 platform"
+	bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
 	select CPU_V6
 	select ARM_GIC
 	help
-	  Include support for the ARM(R) RealView ARM1176 Platform Baseboard.
+	  Include support for the ARM(R) RealView(R) Platform Baseboard for
+	  ARM1176JZF-S.
 
 config REALVIEW_PB1176_SECURE_FLASH
 	bool "Allow access to the secure flash memory block"
@@ -59,23 +63,24 @@
 	  block (64MB @ 0x3c000000) is required.
 
 config MACH_REALVIEW_PBA8
-	bool "Support RealView/PB-A8 platform"
+	bool "Support RealView(R) Platform Baseboard for Cortex(tm)-A8 platform"
 	select CPU_V7
 	select ARM_GIC
 	select HAVE_PATA_PLATFORM
 	help
-	  Include support for the ARM(R) RealView Cortex-A8 Platform Baseboard.
-	  PB-A8 is a platform with an on-board Cortex-A8 and has support for
-	  PCI-E and Compact Flash.
+	  Include support for the ARM(R) RealView Platform Baseboard for
+	  Cortex(tm)-A8.  This platform has an on-board Cortex-A8 and has
+	  support for PCI-E and Compact Flash.
 
 config MACH_REALVIEW_PBX
-	bool "Support RealView/PBX platform"
+	bool "Support RealView(R) Platform Baseboard Explore"
 	select ARM_GIC
 	select HAVE_PATA_PLATFORM
 	select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET
 	select ZONE_DMA if SPARSEMEM
 	help
-	  Include support for the ARM(R) RealView PBX platform.
+	  Include support for the ARM(R) RealView(R) Platform Baseboard
+	  Explore.
 
 config REALVIEW_HIGH_PHYS_OFFSET
 	bool "High physical base address for the RealView platform"
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index a22bf67..6959d13 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -41,7 +41,7 @@
  * observers, irrespective of whether they're taking part in coherency
  * or not.  This is necessary for the hotplug code to work reliably.
  */
-static void write_pen_release(int val)
+static void __cpuinit write_pen_release(int val)
 {
 	pen_release = val;
 	smp_wmb();
diff --git a/arch/arm/mach-rpc/irq.c b/arch/arm/mach-rpc/irq.c
index 9dd15d6..d29cd9b 100644
--- a/arch/arm/mach-rpc/irq.c
+++ b/arch/arm/mach-rpc/irq.c
@@ -6,110 +6,110 @@
 #include <asm/hardware/iomd.h>
 #include <asm/irq.h>
 
-static void iomd_ack_irq_a(unsigned int irq)
+static void iomd_ack_irq_a(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << irq;
+	mask = 1 << d->irq;
 	val = iomd_readb(IOMD_IRQMASKA);
 	iomd_writeb(val & ~mask, IOMD_IRQMASKA);
 	iomd_writeb(mask, IOMD_IRQCLRA);
 }
 
-static void iomd_mask_irq_a(unsigned int irq)
+static void iomd_mask_irq_a(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << irq;
+	mask = 1 << d->irq;
 	val = iomd_readb(IOMD_IRQMASKA);
 	iomd_writeb(val & ~mask, IOMD_IRQMASKA);
 }
 
-static void iomd_unmask_irq_a(unsigned int irq)
+static void iomd_unmask_irq_a(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << irq;
+	mask = 1 << d->irq;
 	val = iomd_readb(IOMD_IRQMASKA);
 	iomd_writeb(val | mask, IOMD_IRQMASKA);
 }
 
 static struct irq_chip iomd_a_chip = {
-	.ack	= iomd_ack_irq_a,
-	.mask	= iomd_mask_irq_a,
-	.unmask = iomd_unmask_irq_a,
+	.irq_ack	= iomd_ack_irq_a,
+	.irq_mask	= iomd_mask_irq_a,
+	.irq_unmask	= iomd_unmask_irq_a,
 };
 
-static void iomd_mask_irq_b(unsigned int irq)
+static void iomd_mask_irq_b(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_IRQMASKB);
 	iomd_writeb(val & ~mask, IOMD_IRQMASKB);
 }
 
-static void iomd_unmask_irq_b(unsigned int irq)
+static void iomd_unmask_irq_b(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_IRQMASKB);
 	iomd_writeb(val | mask, IOMD_IRQMASKB);
 }
 
 static struct irq_chip iomd_b_chip = {
-	.ack	= iomd_mask_irq_b,
-	.mask	= iomd_mask_irq_b,
-	.unmask = iomd_unmask_irq_b,
+	.irq_ack	= iomd_mask_irq_b,
+	.irq_mask	= iomd_mask_irq_b,
+	.irq_unmask	= iomd_unmask_irq_b,
 };
 
-static void iomd_mask_irq_dma(unsigned int irq)
+static void iomd_mask_irq_dma(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_DMAMASK);
 	iomd_writeb(val & ~mask, IOMD_DMAMASK);
 }
 
-static void iomd_unmask_irq_dma(unsigned int irq)
+static void iomd_unmask_irq_dma(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_DMAMASK);
 	iomd_writeb(val | mask, IOMD_DMAMASK);
 }
 
 static struct irq_chip iomd_dma_chip = {
-	.ack	= iomd_mask_irq_dma,
-	.mask	= iomd_mask_irq_dma,
-	.unmask = iomd_unmask_irq_dma,
+	.irq_ack	= iomd_mask_irq_dma,
+	.irq_mask	= iomd_mask_irq_dma,
+	.irq_unmask	= iomd_unmask_irq_dma,
 };
 
-static void iomd_mask_irq_fiq(unsigned int irq)
+static void iomd_mask_irq_fiq(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_FIQMASK);
 	iomd_writeb(val & ~mask, IOMD_FIQMASK);
 }
 
-static void iomd_unmask_irq_fiq(unsigned int irq)
+static void iomd_unmask_irq_fiq(struct irq_data *d)
 {
 	unsigned int val, mask;
 
-	mask = 1 << (irq & 7);
+	mask = 1 << (d->irq & 7);
 	val = iomd_readb(IOMD_FIQMASK);
 	iomd_writeb(val | mask, IOMD_FIQMASK);
 }
 
 static struct irq_chip iomd_fiq_chip = {
-	.ack	= iomd_mask_irq_fiq,
-	.mask	= iomd_mask_irq_fiq,
-	.unmask = iomd_unmask_irq_fiq,
+	.irq_ack	= iomd_mask_irq_fiq,
+	.irq_mask	= iomd_mask_irq_fiq,
+	.irq_unmask	= iomd_unmask_irq_fiq,
 };
 
 void __init rpc_init_irq(void)
diff --git a/arch/arm/mach-s3c2410/bast-irq.c b/arch/arm/mach-s3c2410/bast-irq.c
index 217b102..606cb6b 100644
--- a/arch/arm/mach-s3c2410/bast-irq.c
+++ b/arch/arm/mach-s3c2410/bast-irq.c
@@ -75,38 +75,38 @@
 static unsigned char bast_pc104_irqs[] = { 3, 5, 7, 10 };
 
 static void
-bast_pc104_mask(unsigned int irqno)
+bast_pc104_mask(struct irq_data *data)
 {
 	unsigned long temp;
 
 	temp = __raw_readb(BAST_VA_PC104_IRQMASK);
-	temp &= ~bast_pc104_irqmasks[irqno];
+	temp &= ~bast_pc104_irqmasks[data->irq];
 	__raw_writeb(temp, BAST_VA_PC104_IRQMASK);
 }
 
 static void
-bast_pc104_maskack(unsigned int irqno)
+bast_pc104_maskack(struct irq_data *data)
 {
 	struct irq_desc *desc = irq_desc + IRQ_ISA;
 
-	bast_pc104_mask(irqno);
-	desc->chip->ack(IRQ_ISA);
+	bast_pc104_mask(data);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 }
 
 static void
-bast_pc104_unmask(unsigned int irqno)
+bast_pc104_unmask(struct irq_data *data)
 {
 	unsigned long temp;
 
 	temp = __raw_readb(BAST_VA_PC104_IRQMASK);
-	temp |= bast_pc104_irqmasks[irqno];
+	temp |= bast_pc104_irqmasks[data->irq];
 	__raw_writeb(temp, BAST_VA_PC104_IRQMASK);
 }
 
 static struct irq_chip  bast_pc104_chip = {
-	.mask	     = bast_pc104_mask,
-	.unmask	     = bast_pc104_unmask,
-	.ack	     = bast_pc104_maskack
+	.irq_mask	= bast_pc104_mask,
+	.irq_unmask	= bast_pc104_unmask,
+	.irq_ack	= bast_pc104_maskack
 };
 
 static void
@@ -123,7 +123,7 @@
 		/* ack if we get an irq with nothing (ie, startup) */
 
 		desc = irq_desc + IRQ_ISA;
-		desc->chip->ack(IRQ_ISA);
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 	} else {
 		/* handle the IRQ */
 
diff --git a/arch/arm/mach-s3c2410/include/mach/irqs.h b/arch/arm/mach-s3c2410/include/mach/irqs.h
index 11bb0f0..e5a68ea 100644
--- a/arch/arm/mach-s3c2410/include/mach/irqs.h
+++ b/arch/arm/mach-s3c2410/include/mach/irqs.h
@@ -152,8 +152,8 @@
 
 #define IRQ_S3C2416_HSMMC0	S3C2410_IRQ(21)		/* S3C2416/S3C2450 */
 
-#define IRQ_HSMMC0		IRQ_S3C2443_HSMMC
-#define IRQ_HSMMC1		IRQ_S3C2416_HSMMC0
+#define IRQ_HSMMC0		IRQ_S3C2416_HSMMC0
+#define IRQ_HSMMC1		IRQ_S3C2443_HSMMC
 
 #define IRQ_S3C2443_LCD1	S3C2410_IRQSUB(14)
 #define IRQ_S3C2443_LCD2	S3C2410_IRQSUB(15)
diff --git a/arch/arm/mach-s3c2410/include/mach/map.h b/arch/arm/mach-s3c2410/include/mach/map.h
index cd3983a..25bbf5a 100644
--- a/arch/arm/mach-s3c2410/include/mach/map.h
+++ b/arch/arm/mach-s3c2410/include/mach/map.h
@@ -112,8 +112,8 @@
 #define S3C_PA_IIC          S3C2410_PA_IIC
 #define S3C_PA_UART	    S3C24XX_PA_UART
 #define S3C_PA_USBHOST	S3C2410_PA_USBHOST
-#define S3C_PA_HSMMC0	    S3C2443_PA_HSMMC
-#define S3C_PA_HSMMC1	    S3C2416_PA_HSMMC0
+#define S3C_PA_HSMMC0	    S3C2416_PA_HSMMC0
+#define S3C_PA_HSMMC1	    S3C2443_PA_HSMMC
 #define S3C_PA_WDT	    S3C2410_PA_WATCHDOG
 #define S3C_PA_NAND	    S3C24XX_PA_NAND
 
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h b/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
index 101aeea..44494a5 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
@@ -86,6 +86,7 @@
 #define S3C2443_HCLKCON_LCDC		(1<<9)
 #define S3C2443_HCLKCON_USBH		(1<<11)
 #define S3C2443_HCLKCON_USBD		(1<<12)
+#define S3C2416_HCLKCON_HSMMC0		(1<<15)
 #define S3C2443_HCLKCON_HSMMC		(1<<16)
 #define S3C2443_HCLKCON_CFC		(1<<17)
 #define S3C2443_HCLKCON_SSMC		(1<<18)
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index d7ada8c..1a81fe1 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -387,7 +387,7 @@
 	&s3c_device_wdt,
 	&s3c_device_i2c0,
 	&s3c_device_iis,
-	&s3c_device_pcm,
+	&samsung_asoc_dma,
 	&s3c_device_usbgadget,
 	&h1940_device_leds,
 	&h1940_device_bluetooth,
diff --git a/arch/arm/mach-s3c2412/irq.c b/arch/arm/mach-s3c2412/irq.c
index 6000ca9..eddb52b 100644
--- a/arch/arm/mach-s3c2412/irq.c
+++ b/arch/arm/mach-s3c2412/irq.c
@@ -49,9 +49,9 @@
 */
 
 static void
-s3c2412_irq_mask(unsigned int irqno)
+s3c2412_irq_mask(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 	unsigned long mask;
 
 	mask = __raw_readl(S3C2410_INTMSK);
@@ -62,9 +62,9 @@
 }
 
 static inline void
-s3c2412_irq_ack(unsigned int irqno)
+s3c2412_irq_ack(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 
 	__raw_writel(bitval, S3C2412_EINTPEND);
 	__raw_writel(bitval, S3C2410_SRCPND);
@@ -72,9 +72,9 @@
 }
 
 static inline void
-s3c2412_irq_maskack(unsigned int irqno)
+s3c2412_irq_maskack(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 	unsigned long mask;
 
 	mask = __raw_readl(S3C2410_INTMSK);
@@ -89,9 +89,9 @@
 }
 
 static void
-s3c2412_irq_unmask(unsigned int irqno)
+s3c2412_irq_unmask(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 	unsigned long mask;
 
 	mask = __raw_readl(S3C2412_EINTMASK);
@@ -102,11 +102,11 @@
 }
 
 static struct irq_chip s3c2412_irq_eint0t4 = {
-	.ack	   = s3c2412_irq_ack,
-	.mask	   = s3c2412_irq_mask,
-	.unmask	   = s3c2412_irq_unmask,
-	.set_wake  = s3c_irq_wake,
-	.set_type  = s3c_irqext_type,
+	.irq_ack	= s3c2412_irq_ack,
+	.irq_mask	= s3c2412_irq_mask,
+	.irq_unmask	= s3c2412_irq_unmask,
+	.irq_set_wake	= s3c_irq_wake,
+	.irq_set_type	= s3c_irqext_type,
 };
 
 #define INTBIT(x)	(1 << ((x) - S3C2410_IRQSUB(0)))
@@ -132,29 +132,29 @@
 #define INTMSK_CFSDI	(1UL << (IRQ_S3C2412_CFSDI - IRQ_EINT0))
 #define SUBMSK_CFSDI	INTMSK_SUB(IRQ_S3C2412_SDI, IRQ_S3C2412_CF)
 
-static void s3c2412_irq_cfsdi_mask(unsigned int irqno)
+static void s3c2412_irq_cfsdi_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_CFSDI, SUBMSK_CFSDI);
+	s3c_irqsub_mask(data->irq, INTMSK_CFSDI, SUBMSK_CFSDI);
 }
 
-static void s3c2412_irq_cfsdi_unmask(unsigned int irqno)
+static void s3c2412_irq_cfsdi_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_CFSDI);
+	s3c_irqsub_unmask(data->irq, INTMSK_CFSDI);
 }
 
-static void s3c2412_irq_cfsdi_ack(unsigned int irqno)
+static void s3c2412_irq_cfsdi_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_CFSDI, SUBMSK_CFSDI);
+	s3c_irqsub_maskack(data->irq, INTMSK_CFSDI, SUBMSK_CFSDI);
 }
 
 static struct irq_chip s3c2412_irq_cfsdi = {
 	.name		= "s3c2412-cfsdi",
-	.ack		= s3c2412_irq_cfsdi_ack,
-	.mask		= s3c2412_irq_cfsdi_mask,
-	.unmask		= s3c2412_irq_cfsdi_unmask,
+	.irq_ack	= s3c2412_irq_cfsdi_ack,
+	.irq_mask	= s3c2412_irq_cfsdi_mask,
+	.irq_unmask	= s3c2412_irq_cfsdi_unmask,
 };
 
-static int s3c2412_irq_rtc_wake(unsigned int irqno, unsigned int state)
+static int s3c2412_irq_rtc_wake(struct irq_data *data, unsigned int state)
 {
 	unsigned long pwrcfg;
 
@@ -165,7 +165,7 @@
 		pwrcfg |= S3C2412_PWRCFG_RTC_MASKIRQ;
 	__raw_writel(pwrcfg, S3C2412_PWRCFG);
 
-	return s3c_irq_chip.set_wake(irqno, state);
+	return s3c_irq_chip.irq_set_wake(data, state);
 }
 
 static struct irq_chip s3c2412_irq_rtc_chip;
@@ -193,7 +193,7 @@
 	/* change RTC IRQ's set wake method */
 
 	s3c2412_irq_rtc_chip = s3c_irq_chip;
-	s3c2412_irq_rtc_chip.set_wake = s3c2412_irq_rtc_wake;
+	s3c2412_irq_rtc_chip.irq_set_wake = s3c2412_irq_rtc_wake;
 
 	set_irq_chip(IRQ_RTC, &s3c2412_irq_rtc_chip);
 
diff --git a/arch/arm/mach-s3c2416/Kconfig b/arch/arm/mach-s3c2416/Kconfig
index df8d149..69b48a7 100644
--- a/arch/arm/mach-s3c2416/Kconfig
+++ b/arch/arm/mach-s3c2416/Kconfig
@@ -31,6 +31,17 @@
 	help
 	  Internal config node to apply S3C2416 power management
 
+config S3C2416_SETUP_SDHCI
+	bool
+	select S3C2416_SETUP_SDHCI_GPIO
+	help
+	  Internal helper functions for S3C2416 based SDHCI systems
+
+config S3C2416_SETUP_SDHCI_GPIO
+	bool
+	help
+	  Common setup code for SDHCI gpio.
+
 menu "S3C2416 Machines"
 
 config MACH_SMDK2416
@@ -42,6 +53,7 @@
 	select S3C_DEV_HSMMC1
 	select S3C_DEV_NAND
 	select S3C_DEV_USB_HOST
+	select S3C2416_SETUP_SDHCI
 	select S3C2416_PM if PM
 	help
 	  Say Y here if you are using an SMDK2416
diff --git a/arch/arm/mach-s3c2416/Makefile b/arch/arm/mach-s3c2416/Makefile
index ef038d6..7b805b2 100644
--- a/arch/arm/mach-s3c2416/Makefile
+++ b/arch/arm/mach-s3c2416/Makefile
@@ -14,6 +14,10 @@
 obj-$(CONFIG_S3C2416_PM)	+= pm.o
 #obj-$(CONFIG_S3C2416_DMA)	+= dma.o
 
+# Device setup
+obj-$(CONFIG_S3C2416_SETUP_SDHCI) += setup-sdhci.o
+obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+
 # Machine support
 
 obj-$(CONFIG_MACH_SMDK2416)	+= mach-smdk2416.o
diff --git a/arch/arm/mach-s3c2416/clock.c b/arch/arm/mach-s3c2416/clock.c
index 7ccf5a2..3b02d85 100644
--- a/arch/arm/mach-s3c2416/clock.c
+++ b/arch/arm/mach-s3c2416/clock.c
@@ -38,12 +38,11 @@
 	[7] = 8,
 };
 
-/* ID to hardware numbering, 0 is HSMMC1, 1 is HSMMC0 */
 static struct clksrc_clk hsmmc_div[] = {
 	[0] = {
 		.clk = {
 			.name	= "hsmmc-div",
-			.id	= 1,
+			.id	= 0,
 			.parent	= &clk_esysclk.clk,
 		},
 		.reg_div = { .reg = S3C2416_CLKDIV2, .size = 2, .shift = 6 },
@@ -51,7 +50,7 @@
 	[1] = {
 		.clk = {
 			.name	= "hsmmc-div",
-			.id	= 0,
+			.id	= 1,
 			.parent	= &clk_esysclk.clk,
 		},
 		.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
@@ -61,7 +60,7 @@
 static struct clksrc_clk hsmmc_mux[] = {
 	[0] = {
 		.clk	= {
-			.id	= 1,
+			.id	= 0,
 			.name	= "hsmmc-if",
 			.ctrlbit = (1 << 6),
 			.enable = s3c2443_clkcon_enable_s,
@@ -77,7 +76,7 @@
 	},
 	[1] = {
 		.clk	= {
-			.id	= 0,
+			.id	= 1,
 			.name	= "hsmmc-if",
 			.ctrlbit = (1 << 12),
 			.enable = s3c2443_clkcon_enable_s,
@@ -93,6 +92,13 @@
 	},
 };
 
+static struct clk hsmmc0_clk = {
+	.name		= "hsmmc",
+	.id		= 0,
+	.parent		= &clk_h,
+	.enable		= s3c2443_clkcon_enable_h,
+	.ctrlbit	= S3C2416_HCLKCON_HSMMC0,
+};
 
 static inline unsigned int s3c2416_fclk_div(unsigned long clkcon0)
 {
@@ -130,6 +136,8 @@
 	for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
 		s3c_register_clksrc(clksrcs[ptr], 1);
 
+	s3c24xx_register_clock(&hsmmc0_clk);
+
 	s3c_pwmclk_init();
 
 }
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
index 00174da..680fe38 100644
--- a/arch/arm/mach-s3c2416/irq.c
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -77,28 +77,27 @@
 #define INTMSK_WDTAC97	(1UL << (IRQ_WDT - IRQ_EINT0))
 #define SUBMSK_WDTAC97	INTMSK(IRQ_S3C2443_WDT, IRQ_S3C2443_AC97)
 
-static void s3c2416_irq_wdtac97_mask(unsigned int irqno)
+static void s3c2416_irq_wdtac97_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+	s3c_irqsub_mask(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97);
 }
 
-static void s3c2416_irq_wdtac97_unmask(unsigned int irqno)
+static void s3c2416_irq_wdtac97_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_WDTAC97);
+	s3c_irqsub_unmask(data->irq, INTMSK_WDTAC97);
 }
 
-static void s3c2416_irq_wdtac97_ack(unsigned int irqno)
+static void s3c2416_irq_wdtac97_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+	s3c_irqsub_maskack(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97);
 }
 
 static struct irq_chip s3c2416_irq_wdtac97 = {
-	.mask	    = s3c2416_irq_wdtac97_mask,
-	.unmask	    = s3c2416_irq_wdtac97_unmask,
-	.ack	    = s3c2416_irq_wdtac97_ack,
+	.irq_mask	= s3c2416_irq_wdtac97_mask,
+	.irq_unmask	= s3c2416_irq_wdtac97_unmask,
+	.irq_ack	= s3c2416_irq_wdtac97_ack,
 };
 
-
 /* LCD sub interrupts */
 
 static void s3c2416_irq_demux_lcd(unsigned int irq, struct irq_desc *desc)
@@ -109,28 +108,27 @@
 #define INTMSK_LCD	(1UL << (IRQ_LCD - IRQ_EINT0))
 #define SUBMSK_LCD	INTMSK(IRQ_S3C2443_LCD1, IRQ_S3C2443_LCD4)
 
-static void s3c2416_irq_lcd_mask(unsigned int irqno)
+static void s3c2416_irq_lcd_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_LCD, SUBMSK_LCD);
+	s3c_irqsub_mask(data->irq, INTMSK_LCD, SUBMSK_LCD);
 }
 
-static void s3c2416_irq_lcd_unmask(unsigned int irqno)
+static void s3c2416_irq_lcd_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_LCD);
+	s3c_irqsub_unmask(data->irq, INTMSK_LCD);
 }
 
-static void s3c2416_irq_lcd_ack(unsigned int irqno)
+static void s3c2416_irq_lcd_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_LCD, SUBMSK_LCD);
+	s3c_irqsub_maskack(data->irq, INTMSK_LCD, SUBMSK_LCD);
 }
 
 static struct irq_chip s3c2416_irq_lcd = {
-	.mask	    = s3c2416_irq_lcd_mask,
-	.unmask	    = s3c2416_irq_lcd_unmask,
-	.ack	    = s3c2416_irq_lcd_ack,
+	.irq_mask	= s3c2416_irq_lcd_mask,
+	.irq_unmask	= s3c2416_irq_lcd_unmask,
+	.irq_ack	= s3c2416_irq_lcd_ack,
 };
 
-
 /* DMA sub interrupts */
 
 static void s3c2416_irq_demux_dma(unsigned int irq, struct irq_desc *desc)
@@ -142,28 +140,27 @@
 #define SUBMSK_DMA	INTMSK(IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5)
 
 
-static void s3c2416_irq_dma_mask(unsigned int irqno)
+static void s3c2416_irq_dma_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_DMA, SUBMSK_DMA);
+	s3c_irqsub_mask(data->irq, INTMSK_DMA, SUBMSK_DMA);
 }
 
-static void s3c2416_irq_dma_unmask(unsigned int irqno)
+static void s3c2416_irq_dma_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_DMA);
+	s3c_irqsub_unmask(data->irq, INTMSK_DMA);
 }
 
-static void s3c2416_irq_dma_ack(unsigned int irqno)
+static void s3c2416_irq_dma_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_DMA, SUBMSK_DMA);
+	s3c_irqsub_maskack(data->irq, INTMSK_DMA, SUBMSK_DMA);
 }
 
 static struct irq_chip s3c2416_irq_dma = {
-	.mask	    = s3c2416_irq_dma_mask,
-	.unmask	    = s3c2416_irq_dma_unmask,
-	.ack	    = s3c2416_irq_dma_ack,
+	.irq_mask	= s3c2416_irq_dma_mask,
+	.irq_unmask	= s3c2416_irq_dma_unmask,
+	.irq_ack	= s3c2416_irq_dma_ack,
 };
 
-
 /* UART3 sub interrupts */
 
 static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
@@ -174,28 +171,27 @@
 #define INTMSK_UART3	(1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
 #define SUBMSK_UART3	(0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
 
-static void s3c2416_irq_uart3_mask(unsigned int irqno)
+static void s3c2416_irq_uart3_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART3, SUBMSK_UART3);
+	s3c_irqsub_mask(data->irq, INTMSK_UART3, SUBMSK_UART3);
 }
 
-static void s3c2416_irq_uart3_unmask(unsigned int irqno)
+static void s3c2416_irq_uart3_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART3);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART3);
 }
 
-static void s3c2416_irq_uart3_ack(unsigned int irqno)
+static void s3c2416_irq_uart3_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART3, SUBMSK_UART3);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART3, SUBMSK_UART3);
 }
 
 static struct irq_chip s3c2416_irq_uart3 = {
-	.mask	    = s3c2416_irq_uart3_mask,
-	.unmask	    = s3c2416_irq_uart3_unmask,
-	.ack	    = s3c2416_irq_uart3_ack,
+	.irq_mask	= s3c2416_irq_uart3_mask,
+	.irq_unmask	= s3c2416_irq_uart3_unmask,
+	.irq_ack	= s3c2416_irq_uart3_ack,
 };
 
-
 /* IRQ initialisation code */
 
 static int __init s3c2416_add_sub(unsigned int base,
diff --git a/arch/arm/mach-s3c2416/mach-smdk2416.c b/arch/arm/mach-s3c2416/mach-smdk2416.c
index 7fc3664..3f83177 100644
--- a/arch/arm/mach-s3c2416/mach-smdk2416.c
+++ b/arch/arm/mach-s3c2416/mach-smdk2416.c
@@ -46,6 +46,7 @@
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/nand.h>
+#include <plat/sdhci.h>
 
 #include <plat/regs-fb-v4.h>
 #include <plat/fb.h>
@@ -110,6 +111,13 @@
 		.ucon	     = UCON,
 		.ulcon	     = ULCON | 0x50,
 		.ufcon	     = UFCON,
+	},
+	[3] = {
+		.hwport	     = 3,
+		.flags	     = 0,
+		.ucon	     = UCON,
+		.ulcon	     = ULCON,
+		.ufcon	     = UFCON,
 	}
 };
 
@@ -159,6 +167,18 @@
 	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
 };
 
+static struct s3c_sdhci_platdata smdk2416_hsmmc0_pdata __initdata = {
+	.max_width		= 4,
+	.cd_type		= S3C_SDHCI_CD_GPIO,
+	.ext_cd_gpio		= S3C2410_GPF(1),
+	.ext_cd_gpio_invert	= 1,
+};
+
+static struct s3c_sdhci_platdata smdk2416_hsmmc1_pdata __initdata = {
+	.max_width		= 4,
+	.cd_type		= S3C_SDHCI_CD_NONE,
+};
+
 static struct platform_device *smdk2416_devices[] __initdata = {
 	&s3c_device_fb,
 	&s3c_device_wdt,
@@ -180,6 +200,9 @@
 	s3c_i2c0_set_platdata(NULL);
 	s3c_fb_set_platdata(&smdk2416_fb_platdata);
 
+	s3c_sdhci0_set_platdata(&smdk2416_hsmmc0_pdata);
+	s3c_sdhci1_set_platdata(&smdk2416_hsmmc1_pdata);
+
 	gpio_request(S3C2410_GPB(4), "USBHost Power");
 	gpio_direction_output(S3C2410_GPB(4), 1);
 
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
index 63f39cd..ba7fd87 100644
--- a/arch/arm/mach-s3c2416/s3c2416.c
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -53,6 +53,7 @@
 #include <plat/s3c2416.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
+#include <plat/sdhci.h>
 
 #include <plat/iic-core.h>
 #include <plat/fb-core.h>
@@ -115,6 +116,10 @@
 	s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_updown;
 	s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_updown;
 
+	/* initialize device information early */
+	s3c2416_default_sdhci0();
+	s3c2416_default_sdhci1();
+
 	iotable_init(s3c2416_iodesc, ARRAY_SIZE(s3c2416_iodesc));
 }
 
diff --git a/arch/arm/mach-s3c2416/setup-sdhci-gpio.c b/arch/arm/mach-s3c2416/setup-sdhci-gpio.c
new file mode 100644
index 0000000..f65cb3e
--- /dev/null
+++ b/arch/arm/mach-s3c2416/setup-sdhci-gpio.c
@@ -0,0 +1,34 @@
+/* linux/arch/arm/plat-s3c2416/setup-sdhci-gpio.c
+ *
+ * Copyright 2010 Promwad Innovation Company
+ *	Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * S3C2416 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
+ *
+ * Based on mach-s3c64xx/setup-sdhci-gpio.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/regs-gpio.h>
+#include <plat/gpio-cfg.h>
+
+void s3c2416_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
+{
+	s3c_gpio_cfgrange_nopull(S3C2410_GPE(5), 2 + width, S3C_GPIO_SFN(2));
+}
+
+void s3c2416_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
+{
+	s3c_gpio_cfgrange_nopull(S3C2410_GPL(0), width, S3C_GPIO_SFN(2));
+	s3c_gpio_cfgrange_nopull(S3C2410_GPL(8), 2, S3C_GPIO_SFN(2));
+}
diff --git a/arch/arm/mach-s3c2416/setup-sdhci.c b/arch/arm/mach-s3c2416/setup-sdhci.c
new file mode 100644
index 0000000..ed34fad
--- /dev/null
+++ b/arch/arm/mach-s3c2416/setup-sdhci.c
@@ -0,0 +1,61 @@
+/* linux/arch/arm/mach-s3c2416/setup-sdhci.c
+ *
+ * Copyright 2010 Promwad Innovation Company
+ *	Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * S3C2416 - Helper functions for settign up SDHCI device(s) (HSMMC)
+ *
+ * Based on mach-s3c64xx/setup-sdhci.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include <plat/regs-sdhci.h>
+#include <plat/sdhci.h>
+
+/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
+
+char *s3c2416_hsmmc_clksrcs[4] = {
+	[0] = "hsmmc",
+	[1] = "hsmmc",
+	[2] = "hsmmc-if",
+	/* [3] = "48m", - note not successfully used yet */
+};
+
+void s3c2416_setup_sdhci_cfg_card(struct platform_device *dev,
+				  void __iomem *r,
+				  struct mmc_ios *ios,
+				  struct mmc_card *card)
+{
+	u32 ctrl2, ctrl3;
+
+	ctrl2 = __raw_readl(r + S3C_SDHCI_CONTROL2);
+	ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+	ctrl2 |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR |
+		  S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK |
+		  S3C_SDHCI_CTRL2_ENFBCLKRX |
+		  S3C_SDHCI_CTRL2_DFCNT_NONE |
+		  S3C_SDHCI_CTRL2_ENCLKOUTHOLD);
+
+	if (ios->clock < 25 * 1000000)
+		ctrl3 = (S3C_SDHCI_CTRL3_FCSEL3 |
+			 S3C_SDHCI_CTRL3_FCSEL2 |
+			 S3C_SDHCI_CTRL3_FCSEL1 |
+			 S3C_SDHCI_CTRL3_FCSEL0);
+	else
+		ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
+
+	__raw_writel(ctrl2, r + S3C_SDHCI_CONTROL2);
+	__raw_writel(ctrl3, r + S3C_SDHCI_CONTROL3);
+}
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index a0cb258..50825a3 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -99,6 +99,7 @@
 	select POWER_SUPPLY
 	select MACH_NEO1973
 	select S3C2410_PWM
+	select S3C_DEV_USB_HOST
 	help
 	   Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
 
diff --git a/arch/arm/mach-s3c2440/include/mach/gta02.h b/arch/arm/mach-s3c2440/include/mach/gta02.h
index 953331d..3a56a22 100644
--- a/arch/arm/mach-s3c2440/include/mach/gta02.h
+++ b/arch/arm/mach-s3c2440/include/mach/gta02.h
@@ -44,19 +44,19 @@
 #define GTA02v3_GPIO_nUSB_FLT	S3C2410_GPG(10)	/* v3 + v4 only */
 #define GTA02v3_GPIO_nGSM_OC	S3C2410_GPG(11)	/* v3 + v4 only */
 
-#define GTA02_GPIO_AMP_SHUT	S3C2440_GPJ1	/* v2 + v3 + v4 only */
-#define GTA02v1_GPIO_WLAN_GPIO10	S3C2440_GPJ2
-#define GTA02_GPIO_HP_IN	S3C2440_GPJ2	/* v2 + v3 + v4 only */
-#define GTA02_GPIO_INT0		S3C2440_GPJ3	/* v2 + v3 + v4 only */
-#define GTA02_GPIO_nGSM_EN	S3C2440_GPJ4
-#define GTA02_GPIO_3D_RESET	S3C2440_GPJ5
-#define GTA02_GPIO_nDL_GSM	S3C2440_GPJ6	/* v4 + v5 only */
-#define GTA02_GPIO_WLAN_GPIO0	S3C2440_GPJ7
-#define GTA02v1_GPIO_BAT_ID	S3C2440_GPJ8
-#define GTA02_GPIO_KEEPACT	S3C2440_GPJ8
-#define GTA02v1_GPIO_HP_IN	S3C2440_GPJ10
-#define GTA02_CHIP_PWD		S3C2440_GPJ11	/* v2 + v3 + v4 only */
-#define GTA02_GPIO_nWLAN_RESET	S3C2440_GPJ12	/* v2 + v3 + v4 only */
+#define GTA02_GPIO_AMP_SHUT	S3C2410_GPJ(1)	/* v2 + v3 + v4 only */
+#define GTA02v1_GPIO_WLAN_GPIO10	S3C2410_GPJ(2)
+#define GTA02_GPIO_HP_IN	S3C2410_GPJ(2)	/* v2 + v3 + v4 only */
+#define GTA02_GPIO_INT0		S3C2410_GPJ(3)	/* v2 + v3 + v4 only */
+#define GTA02_GPIO_nGSM_EN	S3C2410_GPJ(4)
+#define GTA02_GPIO_3D_RESET	S3C2410_GPJ(5)
+#define GTA02_GPIO_nDL_GSM	S3C2410_GPJ(6)	/* v4 + v5 only */
+#define GTA02_GPIO_WLAN_GPIO0	S3C2410_GPJ(7)
+#define GTA02v1_GPIO_BAT_ID	S3C2410_GPJ(8)
+#define GTA02_GPIO_KEEPACT	S3C2410_GPJ(8)
+#define GTA02v1_GPIO_HP_IN	S3C2410_GPJ(10)
+#define GTA02_CHIP_PWD		S3C2410_GPJ(11)	/* v2 + v3 + v4 only */
+#define GTA02_GPIO_nWLAN_RESET	S3C2410_GPJ(12)	/* v2 + v3 + v4 only */
 
 #define GTA02_IRQ_GSENSOR_1	IRQ_EINT0
 #define GTA02_IRQ_MODEM		IRQ_EINT1
diff --git a/arch/arm/mach-s3c2440/irq.c b/arch/arm/mach-s3c2440/irq.c
index 0c049b9..acad442 100644
--- a/arch/arm/mach-s3c2440/irq.c
+++ b/arch/arm/mach-s3c2440/irq.c
@@ -69,27 +69,27 @@
 #define INTMSK_WDT	 (1UL << (IRQ_WDT - IRQ_EINT0))
 
 static void
-s3c_irq_wdtac97_mask(unsigned int irqno)
+s3c_irq_wdtac97_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_WDT, 3<<13);
+	s3c_irqsub_mask(data->irq, INTMSK_WDT, 3 << 13);
 }
 
 static void
-s3c_irq_wdtac97_unmask(unsigned int irqno)
+s3c_irq_wdtac97_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_WDT);
+	s3c_irqsub_unmask(data->irq, INTMSK_WDT);
 }
 
 static void
-s3c_irq_wdtac97_ack(unsigned int irqno)
+s3c_irq_wdtac97_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_WDT, 3<<13);
+	s3c_irqsub_maskack(data->irq, INTMSK_WDT, 3 << 13);
 }
 
 static struct irq_chip s3c_irq_wdtac97 = {
-	.mask	    = s3c_irq_wdtac97_mask,
-	.unmask	    = s3c_irq_wdtac97_unmask,
-	.ack	    = s3c_irq_wdtac97_ack,
+	.irq_mask	= s3c_irq_wdtac97_mask,
+	.irq_unmask	= s3c_irq_wdtac97_unmask,
+	.irq_ack	= s3c_irq_wdtac97_ack,
 };
 
 static int s3c2440_irq_add(struct sys_device *sysdev)
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index e0622bb..eab6ae5 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -692,7 +692,7 @@
 	&s3c_device_wdt,
 	&s3c_device_i2c0,
 	&s3c_device_iis,
-	&s3c_device_pcm,
+	&samsung_asoc_dma,
 	&s3c_device_usbgadget,
 	&s3c_device_rtc,
 	&s3c_device_nand,
diff --git a/arch/arm/mach-s3c2440/s3c244x-irq.c b/arch/arm/mach-s3c2440/s3c244x-irq.c
index a75c0c2..83daf4e 100644
--- a/arch/arm/mach-s3c2440/s3c244x-irq.c
+++ b/arch/arm/mach-s3c2440/s3c244x-irq.c
@@ -68,27 +68,27 @@
 #define INTMSK_CAM (1UL << (IRQ_CAM - IRQ_EINT0))
 
 static void
-s3c_irq_cam_mask(unsigned int irqno)
+s3c_irq_cam_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_CAM, 3<<11);
+	s3c_irqsub_mask(data->irq, INTMSK_CAM, 3 << 11);
 }
 
 static void
-s3c_irq_cam_unmask(unsigned int irqno)
+s3c_irq_cam_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_CAM);
+	s3c_irqsub_unmask(data->irq, INTMSK_CAM);
 }
 
 static void
-s3c_irq_cam_ack(unsigned int irqno)
+s3c_irq_cam_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_CAM, 3<<11);
+	s3c_irqsub_maskack(data->irq, INTMSK_CAM, 3 << 11);
 }
 
 static struct irq_chip s3c_irq_cam = {
-	.mask	    = s3c_irq_cam_mask,
-	.unmask	    = s3c_irq_cam_unmask,
-	.ack	    = s3c_irq_cam_ack,
+	.irq_mask	= s3c_irq_cam_mask,
+	.irq_unmask	= s3c_irq_cam_unmask,
+	.irq_ack	= s3c_irq_cam_ack,
 };
 
 static int s3c244x_irq_add(struct sys_device *sysdev)
diff --git a/arch/arm/mach-s3c2443/Kconfig b/arch/arm/mach-s3c2443/Kconfig
index 31babec..d8eb868 100644
--- a/arch/arm/mach-s3c2443/Kconfig
+++ b/arch/arm/mach-s3c2443/Kconfig
@@ -10,6 +10,7 @@
 	select CPU_LLSERIAL_S3C2440
 	select SAMSUNG_CLKSRC
 	select S3C2443_CLOCK
+	select S3C_GPIO_PULL_S3C2443
 	help
 	  Support for the S3C2443 SoC from the S3C24XX line
 
@@ -25,7 +26,7 @@
 	bool "SMDK2443"
 	select CPU_S3C2443
 	select MACH_SMDK
-	select S3C_DEV_HSMMC
+	select S3C_DEV_HSMMC1
 	help
 	  Say Y here if you are using an SMDK2443
 
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 0c3c0c8..f4ec6d5 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -196,7 +196,7 @@
 static struct clksrc_clk clk_hsmmc_div = {
 	.clk	= {
 		.name		= "hsmmc-div",
-		.id		= -1,
+		.id		= 1,
 		.parent		= &clk_esysclk.clk,
 	},
 	.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
@@ -231,7 +231,7 @@
 
 static struct clk clk_hsmmc = {
 	.name		= "hsmmc-if",
-	.id		= -1,
+	.id		= 1,
 	.parent		= &clk_hsmmc_div.clk,
 	.enable		= s3c2443_enable_hsmmc,
 	.ops		= &(struct clk_ops) {
diff --git a/arch/arm/mach-s3c2443/irq.c b/arch/arm/mach-s3c2443/irq.c
index 8934247..c7820f9 100644
--- a/arch/arm/mach-s3c2443/irq.c
+++ b/arch/arm/mach-s3c2443/irq.c
@@ -75,28 +75,27 @@
 #define INTMSK_WDTAC97	(1UL << (IRQ_WDT - IRQ_EINT0))
 #define SUBMSK_WDTAC97	INTMSK(IRQ_S3C2443_WDT, IRQ_S3C2443_AC97)
 
-static void s3c2443_irq_wdtac97_mask(unsigned int irqno)
+static void s3c2443_irq_wdtac97_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+	s3c_irqsub_mask(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97);
 }
 
-static void s3c2443_irq_wdtac97_unmask(unsigned int irqno)
+static void s3c2443_irq_wdtac97_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_WDTAC97);
+	s3c_irqsub_unmask(data->irq, INTMSK_WDTAC97);
 }
 
-static void s3c2443_irq_wdtac97_ack(unsigned int irqno)
+static void s3c2443_irq_wdtac97_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+	s3c_irqsub_maskack(data->irq, INTMSK_WDTAC97, SUBMSK_WDTAC97);
 }
 
 static struct irq_chip s3c2443_irq_wdtac97 = {
-	.mask	    = s3c2443_irq_wdtac97_mask,
-	.unmask	    = s3c2443_irq_wdtac97_unmask,
-	.ack	    = s3c2443_irq_wdtac97_ack,
+	.irq_mask	= s3c2443_irq_wdtac97_mask,
+	.irq_unmask	= s3c2443_irq_wdtac97_unmask,
+	.irq_ack	= s3c2443_irq_wdtac97_ack,
 };
 
-
 /* LCD sub interrupts */
 
 static void s3c2443_irq_demux_lcd(unsigned int irq, struct irq_desc *desc)
@@ -107,28 +106,27 @@
 #define INTMSK_LCD	(1UL << (IRQ_LCD - IRQ_EINT0))
 #define SUBMSK_LCD	INTMSK(IRQ_S3C2443_LCD1, IRQ_S3C2443_LCD4)
 
-static void s3c2443_irq_lcd_mask(unsigned int irqno)
+static void s3c2443_irq_lcd_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_LCD, SUBMSK_LCD);
+	s3c_irqsub_mask(data->irq, INTMSK_LCD, SUBMSK_LCD);
 }
 
-static void s3c2443_irq_lcd_unmask(unsigned int irqno)
+static void s3c2443_irq_lcd_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_LCD);
+	s3c_irqsub_unmask(data->irq, INTMSK_LCD);
 }
 
-static void s3c2443_irq_lcd_ack(unsigned int irqno)
+static void s3c2443_irq_lcd_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_LCD, SUBMSK_LCD);
+	s3c_irqsub_maskack(data->irq, INTMSK_LCD, SUBMSK_LCD);
 }
 
 static struct irq_chip s3c2443_irq_lcd = {
-	.mask	    = s3c2443_irq_lcd_mask,
-	.unmask	    = s3c2443_irq_lcd_unmask,
-	.ack	    = s3c2443_irq_lcd_ack,
+	.irq_mask	= s3c2443_irq_lcd_mask,
+	.irq_unmask	= s3c2443_irq_lcd_unmask,
+	.irq_ack	= s3c2443_irq_lcd_ack,
 };
 
-
 /* DMA sub interrupts */
 
 static void s3c2443_irq_demux_dma(unsigned int irq, struct irq_desc *desc)
@@ -139,29 +137,27 @@
 #define INTMSK_DMA	(1UL << (IRQ_S3C2443_DMA - IRQ_EINT0))
 #define SUBMSK_DMA	INTMSK(IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5)
 
-
-static void s3c2443_irq_dma_mask(unsigned int irqno)
+static void s3c2443_irq_dma_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_DMA, SUBMSK_DMA);
+	s3c_irqsub_mask(data->irq, INTMSK_DMA, SUBMSK_DMA);
 }
 
-static void s3c2443_irq_dma_unmask(unsigned int irqno)
+static void s3c2443_irq_dma_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_DMA);
+	s3c_irqsub_unmask(data->irq, INTMSK_DMA);
 }
 
-static void s3c2443_irq_dma_ack(unsigned int irqno)
+static void s3c2443_irq_dma_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_DMA, SUBMSK_DMA);
+	s3c_irqsub_maskack(data->irq, INTMSK_DMA, SUBMSK_DMA);
 }
 
 static struct irq_chip s3c2443_irq_dma = {
-	.mask	    = s3c2443_irq_dma_mask,
-	.unmask	    = s3c2443_irq_dma_unmask,
-	.ack	    = s3c2443_irq_dma_ack,
+	.irq_mask	= s3c2443_irq_dma_mask,
+	.irq_unmask	= s3c2443_irq_dma_unmask,
+	.irq_ack	= s3c2443_irq_dma_ack,
 };
 
-
 /* UART3 sub interrupts */
 
 static void s3c2443_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
@@ -172,28 +168,27 @@
 #define INTMSK_UART3	(1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
 #define SUBMSK_UART3	(0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
 
-static void s3c2443_irq_uart3_mask(unsigned int irqno)
+static void s3c2443_irq_uart3_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART3, SUBMSK_UART3);
+	s3c_irqsub_mask(data->irq, INTMSK_UART3, SUBMSK_UART3);
 }
 
-static void s3c2443_irq_uart3_unmask(unsigned int irqno)
+static void s3c2443_irq_uart3_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART3);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART3);
 }
 
-static void s3c2443_irq_uart3_ack(unsigned int irqno)
+static void s3c2443_irq_uart3_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART3, SUBMSK_UART3);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART3, SUBMSK_UART3);
 }
 
 static struct irq_chip s3c2443_irq_uart3 = {
-	.mask	    = s3c2443_irq_uart3_mask,
-	.unmask	    = s3c2443_irq_uart3_unmask,
-	.ack	    = s3c2443_irq_uart3_ack,
+	.irq_mask	= s3c2443_irq_uart3_mask,
+	.irq_unmask	= s3c2443_irq_uart3_unmask,
+	.irq_ack	= s3c2443_irq_uart3_ack,
 };
 
-
 /* CAM sub interrupts */
 
 static void s3c2443_irq_demux_cam(unsigned int irq, struct irq_desc *desc)
@@ -204,25 +199,25 @@
 #define INTMSK_CAM	(1UL << (IRQ_CAM - IRQ_EINT0))
 #define SUBMSK_CAM	INTMSK(IRQ_S3C2440_CAM_C, IRQ_S3C2440_CAM_P)
 
-static void s3c2443_irq_cam_mask(unsigned int irqno)
+static void s3c2443_irq_cam_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_CAM, SUBMSK_CAM);
+	s3c_irqsub_mask(data->irq, INTMSK_CAM, SUBMSK_CAM);
 }
 
-static void s3c2443_irq_cam_unmask(unsigned int irqno)
+static void s3c2443_irq_cam_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_CAM);
+	s3c_irqsub_unmask(data->irq, INTMSK_CAM);
 }
 
-static void s3c2443_irq_cam_ack(unsigned int irqno)
+static void s3c2443_irq_cam_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_CAM, SUBMSK_CAM);
+	s3c_irqsub_maskack(data->irq, INTMSK_CAM, SUBMSK_CAM);
 }
 
 static struct irq_chip s3c2443_irq_cam = {
-	.mask	    = s3c2443_irq_cam_mask,
-	.unmask	    = s3c2443_irq_cam_unmask,
-	.ack	    = s3c2443_irq_cam_ack,
+	.irq_mask	= s3c2443_irq_cam_mask,
+	.irq_unmask	= s3c2443_irq_cam_unmask,
+	.irq_ack	= s3c2443_irq_cam_ack,
 };
 
 /* IRQ initialisation code */
diff --git a/arch/arm/mach-s3c2443/mach-smdk2443.c b/arch/arm/mach-s3c2443/mach-smdk2443.c
index 4337f0a..514275e 100644
--- a/arch/arm/mach-s3c2443/mach-smdk2443.c
+++ b/arch/arm/mach-s3c2443/mach-smdk2443.c
@@ -99,13 +99,20 @@
 		.ucon	     = 0x3c5,
 		.ulcon	     = 0x43,
 		.ufcon	     = 0x51,
+	},
+	[3] = {
+		.hwport	     = 3,
+		.flags	     = 0,
+		.ucon	     = 0x3c5,
+		.ulcon	     = 0x03,
+		.ufcon	     = 0x51,
 	}
 };
 
 static struct platform_device *smdk2443_devices[] __initdata = {
 	&s3c_device_wdt,
 	&s3c_device_i2c0,
-	&s3c_device_hsmmc0,
+	&s3c_device_hsmmc1,
 #ifdef CONFIG_SND_SOC_SMDK2443_WM9710
 	&s3c_device_ac97,
 #endif
diff --git a/arch/arm/mach-s3c2443/s3c2443.c b/arch/arm/mach-s3c2443/s3c2443.c
index 33d18dd..e6a28ba 100644
--- a/arch/arm/mach-s3c2443/s3c2443.c
+++ b/arch/arm/mach-s3c2443/s3c2443.c
@@ -16,6 +16,7 @@
 #include <linux/list.h>
 #include <linux/timer.h>
 #include <linux/init.h>
+#include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/serial_core.h>
 #include <linux/sysdev.h>
@@ -32,6 +33,9 @@
 #include <mach/regs-s3c2443-clock.h>
 #include <mach/reset.h>
 
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
 #include <plat/s3c2443.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
@@ -86,6 +90,9 @@
 
 void __init s3c2443_map_io(void)
 {
+	s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_s3c2443;
+	s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_s3c2443;
+
 	iotable_init(s3c2443_iodesc, ARRAY_SIZE(s3c2443_iodesc));
 }
 
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 7e03f0a..fdfc4d5 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -127,7 +127,7 @@
 	return s3c64xx_gate(S3C_SCLK_GATE, clk, enable);
 }
 
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "nand",
 		.id		= -1,
@@ -151,6 +151,12 @@
 		.enable		= s3c64xx_pclk_ctrl,
 		.ctrlbit	= S3C_CLKCON_PCLK_IIC,
 	}, {
+		.name		= "i2c",
+		.id		= 1,
+		.parent		= &clk_p,
+		.enable		= s3c64xx_pclk_ctrl,
+		.ctrlbit	= S3C6410_CLKCON_PCLK_I2C1,
+	}, {
 		.name		= "iis",
 		.id		= 0,
 		.parent		= &clk_p,
@@ -695,7 +701,7 @@
 	}, {
 		.clk	= {
 			.name		= "audio-bus",
-			.id		= -1,  /* There's only one IISv4 port */
+			.id		= 2,
 			.ctrlbit        = S3C6410_CLKCON_SCLK_AUDIO2,
 			.enable		= s3c64xx_sclk_ctrl,
 		},
@@ -834,10 +840,6 @@
 void __init s3c64xx_register_clocks(unsigned long xtal, 
 				    unsigned armclk_divlimit)
 {
-	struct clk *clkp;
-	int ret;
-	int ptr;
-
 	armclk_mask = armclk_divlimit;
 
 	s3c24xx_register_baseclocks(xtal);
@@ -845,17 +847,8 @@
 
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c24xx_register_clocks(clks1, ARRAY_SIZE(clks1));
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
index 76426a3..cad6702 100644
--- a/arch/arm/mach-s3c64xx/dev-audio.c
+++ b/arch/arm/mach-s3c64xx/dev-audio.c
@@ -22,7 +22,12 @@
 #include <plat/audio.h>
 #include <plat/gpio-cfg.h>
 
-static int s3c64xx_i2sv3_cfg_gpio(struct platform_device *pdev)
+static const char *rclksrc[] = {
+	[0] = "iis",
+	[1] = "audio-bus",
+};
+
+static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
 {
 	unsigned int base;
 
@@ -33,6 +38,12 @@
 	case 1:
 		base = S3C64XX_GPE(0);
 		break;
+	case 2:
+		s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
+		return 0;
 	default:
 		printk(KERN_DEBUG "Invalid I2S Controller number: %d\n",
 			pdev->id);
@@ -44,16 +55,6 @@
 	return 0;
 }
 
-static int s3c64xx_i2sv4_cfg_gpio(struct platform_device *pdev)
-{
-	s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
-	s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
-	s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
-	s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
-
-	return 0;
-}
-
 static struct resource s3c64xx_iis0_resource[] = {
 	[0] = {
 		.start = S3C64XX_PA_IIS0,
@@ -72,17 +73,22 @@
 	},
 };
 
-static struct s3c_audio_pdata s3c_i2s0_pdata = {
-	.cfg_gpio = s3c64xx_i2sv3_cfg_gpio,
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s3c64xx_i2s_cfg_gpio,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 struct platform_device s3c64xx_device_iis0 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 0,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_iis0_resource),
 	.resource	  = s3c64xx_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s0_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 EXPORT_SYMBOL(s3c64xx_device_iis0);
@@ -105,17 +111,13 @@
 	},
 };
 
-static struct s3c_audio_pdata s3c_i2s1_pdata = {
-	.cfg_gpio = s3c64xx_i2sv3_cfg_gpio,
-};
-
 struct platform_device s3c64xx_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_iis1_resource),
 	.resource	  = s3c64xx_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s1_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 EXPORT_SYMBOL(s3c64xx_device_iis1);
@@ -138,17 +140,23 @@
 	},
 };
 
-static struct s3c_audio_pdata s3c_i2sv4_pdata = {
-	.cfg_gpio = s3c64xx_i2sv4_cfg_gpio,
+static struct s3c_audio_pdata i2sv4_pdata = {
+	.cfg_gpio = s3c64xx_i2s_cfg_gpio,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN,
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 struct platform_device s3c64xx_device_iisv4 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 2,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_iisv4_resource),
 	.resource	  = s3c64xx_iisv4_resource,
 	.dev = {
-		.platform_data = &s3c_i2sv4_pdata,
+		.platform_data = &i2sv4_pdata,
 	},
 };
 EXPORT_SYMBOL(s3c64xx_device_iisv4);
@@ -288,7 +296,7 @@
 static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);
 
 struct platform_device s3c64xx_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_ac97_resource),
 	.resource	  = s3c64xx_ac97_resource,
@@ -307,16 +315,3 @@
 	else
 		s3c_ac97_pdata.cfg_gpio = s3c64xx_ac97_cfg_gpe;
 }
-
-static u64 s3c_device_audio_dmamask = 0xffffffffUL;
-
-struct platform_device s3c_device_pcm = {
-	.name		  = "s3c24xx-pcm-audio",
-	.id		  = -1,
-	.dev              = {
-		.dma_mask = &s3c_device_audio_dmamask,
-		.coherent_dma_mask = 0xffffffffUL
-	}
-};
-EXPORT_SYMBOL(s3c_device_pcm);
-
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index e7d03ab..c35585c 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -212,6 +212,7 @@
 
 	config = readl(chan->regs + PL080S_CH_CONFIG);
 	config |= PL080_CONFIG_ENABLE;
+	config &= ~PL080_CONFIG_HALT;
 
 	pr_debug("%s: writing config %08x\n", __func__, config);
 	writel(config, chan->regs + PL080S_CH_CONFIG);
@@ -689,12 +690,12 @@
 
 	regptr = regs + PL080_Cx_BASE(0);
 
-	for (ch = 0; ch < 8; ch++, chno++, chptr++) {
-		printk(KERN_INFO "%s: registering DMA %d (%p)\n",
-		       __func__, chno, regptr);
+	for (ch = 0; ch < 8; ch++, chptr++) {
+		pr_debug("%s: registering DMA %d (%p)\n",
+			 __func__, chno + ch, regptr);
 
 		chptr->bit = 1 << ch;
-		chptr->number = chno;
+		chptr->number = chno + ch;
 		chptr->dmac = dmac;
 		chptr->regs = regptr;
 		regptr += PL080_Cx_STRIDE;
@@ -703,7 +704,8 @@
 	/* for the moment, permanently enable the controller */
 	writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
 
-	printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs);
+	printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
+	       irq, regs, chno, chno+8);
 
 	return 0;
 
@@ -740,7 +742,7 @@
 	/* Set all DMA configuration to be DMA, not SDMA */
 	writel(0xffffff, S3C_SYSREG(0x110));
 
-	/* Register standard DMA controlers */
+	/* Register standard DMA controllers */
 	s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
 	s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
 
diff --git a/arch/arm/mach-s3c64xx/gpiolib.c b/arch/arm/mach-s3c64xx/gpiolib.c
index fd99a82..92b0908 100644
--- a/arch/arm/mach-s3c64xx/gpiolib.c
+++ b/arch/arm/mach-s3c64xx/gpiolib.c
@@ -72,7 +72,7 @@
 	.get_pull	= s3c_gpio_getpull_updown,
 };
 
-int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
+static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
 {
 	return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
 }
@@ -138,7 +138,7 @@
 	},
 };
 
-int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
+static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
 {
 	return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
 }
diff --git a/arch/arm/mach-s3c64xx/irq-eint.c b/arch/arm/mach-s3c64xx/irq-eint.c
index 5682d6a..2ead818 100644
--- a/arch/arm/mach-s3c64xx/irq-eint.c
+++ b/arch/arm/mach-s3c64xx/irq-eint.c
@@ -30,41 +30,41 @@
 #include <plat/pm.h>
 
 #define eint_offset(irq)	((irq) - IRQ_EINT(0))
-#define eint_irq_to_bit(irq)	(1 << eint_offset(irq))
+#define eint_irq_to_bit(irq)	((u32)(1 << eint_offset(irq)))
 
-static inline void s3c_irq_eint_mask(unsigned int irq)
+static inline void s3c_irq_eint_mask(struct irq_data *data)
 {
 	u32 mask;
 
 	mask = __raw_readl(S3C64XX_EINT0MASK);
-	mask |= eint_irq_to_bit(irq);
+	mask |= (u32)data->chip_data;
 	__raw_writel(mask, S3C64XX_EINT0MASK);
 }
 
-static void s3c_irq_eint_unmask(unsigned int irq)
+static void s3c_irq_eint_unmask(struct irq_data *data)
 {
 	u32 mask;
 
 	mask = __raw_readl(S3C64XX_EINT0MASK);
-	mask &= ~eint_irq_to_bit(irq);
+	mask &= ~((u32)data->chip_data);
 	__raw_writel(mask, S3C64XX_EINT0MASK);
 }
 
-static inline void s3c_irq_eint_ack(unsigned int irq)
+static inline void s3c_irq_eint_ack(struct irq_data *data)
 {
-	__raw_writel(eint_irq_to_bit(irq), S3C64XX_EINT0PEND);
+	__raw_writel((u32)data->chip_data, S3C64XX_EINT0PEND);
 }
 
-static void s3c_irq_eint_maskack(unsigned int irq)
+static void s3c_irq_eint_maskack(struct irq_data *data)
 {
 	/* compiler should in-line these */
-	s3c_irq_eint_mask(irq);
-	s3c_irq_eint_ack(irq);
+	s3c_irq_eint_mask(data);
+	s3c_irq_eint_ack(data);
 }
 
-static int s3c_irq_eint_set_type(unsigned int irq, unsigned int type)
+static int s3c_irq_eint_set_type(struct irq_data *data, unsigned int type)
 {
-	int offs = eint_offset(irq);
+	int offs = eint_offset(data->irq);
 	int pin, pin_val;
 	int shift;
 	u32 ctrl, mask;
@@ -140,12 +140,12 @@
 
 static struct irq_chip s3c_irq_eint = {
 	.name		= "s3c-eint",
-	.mask		= s3c_irq_eint_mask,
-	.unmask		= s3c_irq_eint_unmask,
-	.mask_ack	= s3c_irq_eint_maskack,
-	.ack		= s3c_irq_eint_ack,
-	.set_type	= s3c_irq_eint_set_type,
-	.set_wake	= s3c_irqext_wake,
+	.irq_mask	= s3c_irq_eint_mask,
+	.irq_unmask	= s3c_irq_eint_unmask,
+	.irq_mask_ack	= s3c_irq_eint_maskack,
+	.irq_ack	= s3c_irq_eint_ack,
+	.irq_set_type	= s3c_irq_eint_set_type,
+	.irq_set_wake	= s3c_irqext_wake,
 };
 
 /* s3c_irq_demux_eint
@@ -198,6 +198,7 @@
 
 	for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) {
 		set_irq_chip(irq, &s3c_irq_eint);
+		set_irq_chip_data(irq, (void *)eint_irq_to_bit(irq));
 		set_irq_handler(irq, handle_level_irq);
 		set_irq_flags(irq, IRQF_VALID);
 	}
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 77488fa..a80a316 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -28,6 +28,7 @@
 #include <linux/delay.h>
 #include <linux/smsc911x.h>
 #include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
 
 #ifdef CONFIG_SMDK6410_WM1190_EV1
 #include <linux/mfd/wm8350/core.h>
@@ -283,7 +284,7 @@
 	&s3c_device_fb,
 	&s3c_device_ohci,
 	&s3c_device_usb_hsotg,
-	&s3c_device_pcm,
+	&samsung_asoc_dma,
 	&s3c64xx_device_iisv4,
 	&samsung_device_keypad,
 
@@ -351,7 +352,7 @@
 /* VDD_UH_MMC, LDO5 on J5 */
 static struct regulator_init_data smdk6410_vdduh_mmc = {
 	.constraints = {
-		.name = "PVDD_UH/PVDD_MMC",
+		.name = "PVDD_UH+PVDD_MMC",
 		.always_on = 1,
 	},
 };
@@ -417,7 +418,7 @@
 /* S3C64xx internal logic & PLL */
 static struct regulator_init_data wm8350_dcdc1_data = {
 	.constraints = {
-		.name = "PVDD_INT/PVDD_PLL",
+		.name = "PVDD_INT+PVDD_PLL",
 		.min_uV = 1200000,
 		.max_uV = 1200000,
 		.always_on = 1,
@@ -452,7 +453,7 @@
 
 static struct regulator_init_data wm8350_dcdc4_data = {
 	.constraints = {
-		.name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV",
+		.name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV",
 		.min_uV = 3000000,
 		.max_uV = 3000000,
 		.always_on = 1,
@@ -464,7 +465,7 @@
 /* OTGi/1190-EV1 HPVDD & AVDD */
 static struct regulator_init_data wm8350_ldo4_data = {
 	.constraints = {
-		.name = "PVDD_OTGI/HPVDD/AVDD",
+		.name = "PVDD_OTGI+HPVDD+AVDD",
 		.min_uV = 1200000,
 		.max_uV = 1200000,
 		.apply_uV = 1,
@@ -552,7 +553,7 @@
 
 static struct regulator_init_data wm1192_dcdc3 = {
 	.constraints = {
-		.name = "PVDD_MEM/PVDD_GPS",
+		.name = "PVDD_MEM+PVDD_GPS",
 		.always_on = 1,
 	},
 };
@@ -563,7 +564,7 @@
 
 static struct regulator_init_data wm1192_ldo1 = {
 	.constraints = {
-		.name = "PVDD_LCD/PVDD_EXT",
+		.name = "PVDD_LCD+PVDD_EXT",
 		.always_on = 1,
 	},
 	.consumer_supplies = wm1192_ldo1_consumers,
diff --git a/arch/arm/mach-s3c64xx/setup-keypad.c b/arch/arm/mach-s3c64xx/setup-keypad.c
index f8ed0d2..1d4d0ee 100644
--- a/arch/arm/mach-s3c64xx/setup-keypad.c
+++ b/arch/arm/mach-s3c64xx/setup-keypad.c
@@ -17,7 +17,7 @@
 void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
 {
 	/* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
-	s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3));
+	s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3));
 
 	/* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
 	s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
diff --git a/arch/arm/mach-s3c64xx/setup-sdhci.c b/arch/arm/mach-s3c64xx/setup-sdhci.c
index 1a94203..f344a22 100644
--- a/arch/arm/mach-s3c64xx/setup-sdhci.c
+++ b/arch/arm/mach-s3c64xx/setup-sdhci.c
@@ -56,7 +56,7 @@
 	else
 		ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
 
-	printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
+	pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
 	writel(ctrl2, r + S3C_SDHCI_CONTROL2);
 	writel(ctrl3, r + S3C_SDHCI_CONTROL3);
 }
diff --git a/arch/arm/mach-s5p6442/clock.c b/arch/arm/mach-s5p6442/clock.c
index 16d6e7e..fbbc7be 100644
--- a/arch/arm/mach-s5p6442/clock.c
+++ b/arch/arm/mach-s5p6442/clock.c
@@ -340,7 +340,7 @@
 	clk_pclkd1.rate = pclkd1;
 }
 
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "pdma",
 		.id		= -1,
@@ -408,23 +408,13 @@
 
 void __init s5p6442_register_clocks(void)
 {
-	struct clk *clkptr;
-	int i, ret;
-
 	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
 
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkptr = init_clocks_disable;
-	for (i = 0; i < ARRAY_SIZE(init_clocks_disable); i++, clkptr++) {
-		ret = s3c24xx_register_clock(clkptr);
-		if (ret < 0) {
-			printk(KERN_ERR "Fail to register clock %s (%d)\n",
-					clkptr->name, ret);
-		} else
-			(clkptr->enable)(clkptr, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5p6442/dev-audio.c b/arch/arm/mach-s5p6442/dev-audio.c
index 3462197..8719dc4 100644
--- a/arch/arm/mach-s5p6442/dev-audio.c
+++ b/arch/arm/mach-s5p6442/dev-audio.c
@@ -29,7 +29,7 @@
 		base = S5P6442_GPC1(0);
 		break;
 
-	case -1:
+	case 0:
 		base = S5P6442_GPC0(0);
 		break;
 
@@ -42,8 +42,19 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s3c_i2s_pdata = {
+static const char *rclksrc_v35[] = {
+	[0] = "busclk",
+	[1] = "i2sclk",
+};
+
+static struct s3c_audio_pdata i2sv35_pdata = {
 	.cfg_gpio = s5p6442_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc_v35,
+		},
+	},
 };
 
 static struct resource s5p6442_iis0_resource[] = {
@@ -62,15 +73,34 @@
 		.end   = DMACH_I2S0_RX,
 		.flags = IORESOURCE_DMA,
 	},
+	[3] = {
+		.start = DMACH_I2S0S_TX,
+		.end = DMACH_I2S0S_TX,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
 struct platform_device s5p6442_device_iis0 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 0,
 	.num_resources	  = ARRAY_SIZE(s5p6442_iis0_resource),
 	.resource	  = s5p6442_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv35_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "iis",
+	[1] = "sclk_audio",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5p6442_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc_v3,
+		},
 	},
 };
 
@@ -93,12 +123,12 @@
 };
 
 struct platform_device s5p6442_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s5p6442_iis1_resource),
 	.resource	  = s5p6442_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h
index 31fb2e68..058dab4 100644
--- a/arch/arm/mach-s5p6442/include/mach/map.h
+++ b/arch/arm/mach-s5p6442/include/mach/map.h
@@ -1,6 +1,6 @@
 /* linux/arch/arm/mach-s5p6442/include/mach/map.h
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com/
  *
  * S5P6442 - Memory map definitions
@@ -16,53 +16,61 @@
 #include <plat/map-base.h>
 #include <plat/map-s5p.h>
 
-#define S5P6442_PA_CHIPID	(0xE0000000)
-#define S5P_PA_CHIPID		S5P6442_PA_CHIPID
+#define S5P6442_PA_SDRAM	0x20000000
 
-#define S5P6442_PA_SYSCON	(0xE0100000)
-#define S5P_PA_SYSCON		S5P6442_PA_SYSCON
+#define S5P6442_PA_I2S0		0xC0B00000
+#define S5P6442_PA_I2S1		0xF2200000
 
-#define S5P6442_PA_GPIO		(0xE0200000)
+#define S5P6442_PA_CHIPID	0xE0000000
 
-#define S5P6442_PA_VIC0		(0xE4000000)
-#define S5P6442_PA_VIC1		(0xE4100000)
-#define S5P6442_PA_VIC2		(0xE4200000)
+#define S5P6442_PA_SYSCON	0xE0100000
+
+#define S5P6442_PA_GPIO		0xE0200000
+
+#define S5P6442_PA_VIC0		0xE4000000
+#define S5P6442_PA_VIC1		0xE4100000
+#define S5P6442_PA_VIC2		0xE4200000
+
+#define S5P6442_PA_SROMC	0xE7000000
 
 #define S5P6442_PA_MDMA		0xE8000000
 #define S5P6442_PA_PDMA		0xE9000000
 
-#define S5P6442_PA_TIMER	(0xEA000000)
-#define S5P_PA_TIMER		S5P6442_PA_TIMER
+#define S5P6442_PA_TIMER	0xEA000000
 
-#define S5P6442_PA_SYSTIMER   	(0xEA100000)
+#define S5P6442_PA_SYSTIMER	0xEA100000
 
-#define S5P6442_PA_WATCHDOG	(0xEA200000)
+#define S5P6442_PA_WATCHDOG	0xEA200000
 
-#define S5P6442_PA_UART		(0xEC000000)
+#define S5P6442_PA_UART		0xEC000000
 
-#define S5P_PA_UART0		(S5P6442_PA_UART + 0x0)
-#define S5P_PA_UART1		(S5P6442_PA_UART + 0x400)
-#define S5P_PA_UART2		(S5P6442_PA_UART + 0x800)
-#define S5P_SZ_UART		SZ_256
-
-#define S5P6442_PA_IIC0		(0xEC100000)
-
-#define S5P6442_PA_SDRAM	(0x20000000)
-#define S5P_PA_SDRAM		S5P6442_PA_SDRAM
+#define S5P6442_PA_IIC0		0xEC100000
 
 #define S5P6442_PA_SPI		0xEC300000
 
-/* I2S */
-#define S5P6442_PA_I2S0		0xC0B00000
-#define S5P6442_PA_I2S1		0xF2200000
-
-/* PCM */
 #define S5P6442_PA_PCM0		0xF2400000
 #define S5P6442_PA_PCM1		0xF2500000
 
-/* compatibiltiy defines. */
-#define S3C_PA_WDT		S5P6442_PA_WATCHDOG
-#define S3C_PA_UART		S5P6442_PA_UART
+/* Compatibiltiy Defines */
+
 #define S3C_PA_IIC		S5P6442_PA_IIC0
+#define S3C_PA_WDT		S5P6442_PA_WATCHDOG
+
+#define S5P_PA_CHIPID		S5P6442_PA_CHIPID
+#define S5P_PA_SDRAM		S5P6442_PA_SDRAM
+#define S5P_PA_SROMC		S5P6442_PA_SROMC
+#define S5P_PA_SYSCON		S5P6442_PA_SYSCON
+#define S5P_PA_TIMER		S5P6442_PA_TIMER
+
+/* UART */
+
+#define S3C_PA_UART		S5P6442_PA_UART
+
+#define S5P_PA_UART(x)		(S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0		S5P_PA_UART(0)
+#define S5P_PA_UART1		S5P_PA_UART(1)
+#define S5P_PA_UART2		S5P_PA_UART(2)
+
+#define S5P_SZ_UART		SZ_256
 
 #endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p6442/mach-smdk6442.c b/arch/arm/mach-s5p6442/mach-smdk6442.c
index 819fd80..eaf6b9c 100644
--- a/arch/arm/mach-s5p6442/mach-smdk6442.c
+++ b/arch/arm/mach-s5p6442/mach-smdk6442.c
@@ -12,6 +12,7 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/serial_core.h>
+#include <linux/i2c.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -25,6 +26,7 @@
 #include <plat/s5p6442.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
+#include <plat/iic.h>
 
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDK6442_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
@@ -65,10 +67,16 @@
 };
 
 static struct platform_device *smdk6442_devices[] __initdata = {
+	&s3c_device_i2c0,
+	&samsung_asoc_dma,
 	&s5p6442_device_iis0,
 	&s3c_device_wdt,
 };
 
+static struct i2c_board_info smdk6442_i2c_devs0[] __initdata = {
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
+};
+
 static void __init smdk6442_map_io(void)
 {
 	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -78,6 +86,9 @@
 
 static void __init smdk6442_machine_init(void)
 {
+	s3c_i2c0_set_platdata(NULL);
+	i2c_register_board_info(0, smdk6442_i2c_devs0,
+			ARRAY_SIZE(smdk6442_i2c_devs0));
 	platform_add_devices(smdk6442_devices, ARRAY_SIZE(smdk6442_devices));
 }
 
diff --git a/arch/arm/mach-s5p6442/setup-i2c0.c b/arch/arm/mach-s5p6442/setup-i2c0.c
index 662695d..aad8565 100644
--- a/arch/arm/mach-s5p6442/setup-i2c0.c
+++ b/arch/arm/mach-s5p6442/setup-i2c0.c
@@ -14,12 +14,15 @@
 
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/gpio.h>
 
 struct platform_device; /* don't need the contents */
 
+#include <plat/gpio-cfg.h>
 #include <plat/iic.h>
 
 void s3c_i2c0_cfg_gpio(struct platform_device *dev)
 {
-	/* Will be populated later */
+	s3c_gpio_cfgall_range(S5P6442_GPD1(0), 2,
+			      S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
 }
diff --git a/arch/arm/mach-s5p64x0/Makefile b/arch/arm/mach-s5p64x0/Makefile
index 2655829..ae6bf6f 100644
--- a/arch/arm/mach-s5p64x0/Makefile
+++ b/arch/arm/mach-s5p64x0/Makefile
@@ -12,9 +12,9 @@
 
 # Core support for S5P64X0 system
 
-obj-$(CONFIG_ARCH_S5P64X0)	+= cpu.o init.o clock.o dma.o
+obj-$(CONFIG_ARCH_S5P64X0)	+= cpu.o init.o clock.o dma.o gpiolib.o
 obj-$(CONFIG_ARCH_S5P64X0)	+= setup-i2c0.o
-obj-$(CONFIG_CPU_S5P6440)	+= clock-s5p6440.o gpio.o
+obj-$(CONFIG_CPU_S5P6440)	+= clock-s5p6440.o
 obj-$(CONFIG_CPU_S5P6450)	+= clock-s5p6450.o
 
 # machine support
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index e4883dc..9f12c2e 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -133,7 +133,7 @@
  * recommended to keep the following clocks disabled until the driver requests
  * for enabling the clock.
  */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "nand",
 		.id		= -1,
@@ -261,7 +261,7 @@
 		.enable		= s5p64x0_pclk_ctrl,
 		.ctrlbit	= (1 << 25),
 	}, {
-		.name		= "i2s_v40",
+		.name		= "iis",
 		.id		= 0,
 		.parent		= &clk_pclk_low.clk,
 		.enable		= s5p64x0_pclk_ctrl,
@@ -419,7 +419,7 @@
 static struct clksrc_clk clksrcs[] = {
 	{
 		.clk	= {
-			.name		= "mmc_bus",
+			.name		= "sclk_mmc",
 			.id		= 0,
 			.ctrlbit	= (1 << 24),
 			.enable		= s5p64x0_sclk_ctrl,
@@ -429,7 +429,7 @@
 		.reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 0, .size = 4 },
 	}, {
 		.clk	= {
-			.name		= "mmc_bus",
+			.name		= "sclk_mmc",
 			.id		= 1,
 			.ctrlbit	= (1 << 25),
 			.enable		= s5p64x0_sclk_ctrl,
@@ -439,7 +439,7 @@
 		.reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 4, .size = 4 },
 	}, {
 		.clk	= {
-			.name		= "mmc_bus",
+			.name		= "sclk_mmc",
 			.id		= 2,
 			.ctrlbit	= (1 << 26),
 			.enable		= s5p64x0_sclk_ctrl,
@@ -602,8 +602,6 @@
 
 void __init s5p6440_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
 	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
@@ -614,16 +612,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index 7dbf3c9..4eec457 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -181,7 +181,7 @@
  * recommended to keep the following clocks disabled until the driver requests
  * for enabling the clock.
  */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "usbhost",
 		.id		= -1,
@@ -231,6 +231,12 @@
 		.enable		= s5p64x0_pclk_ctrl,
 		.ctrlbit	= (1 << 5),
 	}, {
+		.name		= "rtc",
+		.id		= -1,
+		.parent		= &clk_pclk_low.clk,
+		.enable		= s5p64x0_pclk_ctrl,
+		.ctrlbit	= (1 << 6),
+	}, {
 		.name		= "adc",
 		.id		= -1,
 		.parent		= &clk_pclk_low.clk,
@@ -256,11 +262,23 @@
 		.ctrlbit	= (1 << 22),
 	}, {
 		.name		= "iis",
-		.id		= -1,
+		.id		= 0,
 		.parent		= &clk_pclk_low.clk,
 		.enable		= s5p64x0_pclk_ctrl,
 		.ctrlbit	= (1 << 26),
 	}, {
+		.name		= "iis",
+		.id		= 1,
+		.parent		= &clk_pclk_low.clk,
+		.enable		= s5p64x0_pclk_ctrl,
+		.ctrlbit	= (1 << 15),
+	}, {
+		.name		= "iis",
+		.id		= 2,
+		.parent		= &clk_pclk_low.clk,
+		.enable		= s5p64x0_pclk_ctrl,
+		.ctrlbit	= (1 << 16),
+	}, {
 		.name		= "i2c",
 		.id		= 1,
 		.parent		= &clk_pclk_low.clk,
@@ -633,8 +651,6 @@
 
 void __init s5p6450_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
 	for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
@@ -643,16 +659,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5p64x0/dev-audio.c b/arch/arm/mach-s5p64x0/dev-audio.c
index 396bacc..35f1f22 100644
--- a/arch/arm/mach-s5p64x0/dev-audio.c
+++ b/arch/arm/mach-s5p64x0/dev-audio.c
@@ -19,34 +19,19 @@
 #include <mach/dma.h>
 #include <mach/irqs.h>
 
+static const char *rclksrc[] = {
+	[0] = "iis",
+	[1] = "sclk_audio2",
+};
+
 static int s5p6440_cfg_i2s(struct platform_device *pdev)
 {
-	/* configure GPIO for i2s port */
 	switch (pdev->id) {
-	case -1:
-		s3c_gpio_cfgpin_range(S5P6440_GPR(4), 5, S3C_GPIO_SFN(5));
-		s3c_gpio_cfgpin_range(S5P6440_GPR(13), 2, S3C_GPIO_SFN(5));
+	case 0:
+		s3c_gpio_cfgpin_range(S5P6440_GPC(4), 2, S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin(S5P6440_GPC(7), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S5P6440_GPH(6), 4, S3C_GPIO_SFN(5));
 		break;
-
-	default:
-		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int s5p6450_cfg_i2s(struct platform_device *pdev)
-{
-	/* configure GPIO for i2s port */
-	switch (pdev->id) {
-	case -1:
-		s3c_gpio_cfgpin(S5P6450_GPB(4), S3C_GPIO_SFN(5));
-		s3c_gpio_cfgpin_range(S5P6450_GPR(4), 5, S3C_GPIO_SFN(5));
-		s3c_gpio_cfgpin_range(S5P6450_GPR(13), 2, S3C_GPIO_SFN(5));
-
-		break;
-
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
 		return -EINVAL;
@@ -57,13 +42,15 @@
 
 static struct s3c_audio_pdata s5p6440_i2s_pdata = {
 	.cfg_gpio = s5p6440_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN,
+			.src_clk = rclksrc,
+		},
+	},
 };
 
-static struct s3c_audio_pdata s5p6450_i2s_pdata = {
-	.cfg_gpio = s5p6450_cfg_i2s,
-};
-
-static struct resource s5p64x0_iis0_resource[] = {
+static struct resource s5p64x0_i2s0_resource[] = {
 	[0] = {
 		.start	= S5P64X0_PA_I2S,
 		.end	= S5P64X0_PA_I2S + 0x100 - 1,
@@ -82,20 +69,117 @@
 };
 
 struct platform_device s5p6440_device_iis = {
-	.name		= "s3c64xx-iis-v4",
-	.id		= -1,
-	.num_resources	= ARRAY_SIZE(s5p64x0_iis0_resource),
-	.resource	= s5p64x0_iis0_resource,
+	.name		= "samsung-i2s",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(s5p64x0_i2s0_resource),
+	.resource	= s5p64x0_i2s0_resource,
 	.dev = {
 		.platform_data = &s5p6440_i2s_pdata,
 	},
 };
 
+static int s5p6450_cfg_i2s(struct platform_device *pdev)
+{
+	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5P6450_GPR(4), 5, S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S5P6450_GPR(13), 2, S3C_GPIO_SFN(5));
+		break;
+	case 1:
+		s3c_gpio_cfgpin(S5P6440_GPB(4), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S5P6450_GPC(0), 4, S3C_GPIO_SFN(5));
+		break;
+	case 2:
+		s3c_gpio_cfgpin_range(S5P6450_GPK(0), 5, S3C_GPIO_SFN(5));
+		break;
+	default:
+		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct s3c_audio_pdata s5p6450_i2s0_pdata = {
+	.cfg_gpio = s5p6450_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN,
+			.src_clk = rclksrc,
+		},
+	},
+};
+
 struct platform_device s5p6450_device_iis0 = {
-	.name		= "s3c64xx-iis-v4",
-	.id		= -1,
-	.num_resources	= ARRAY_SIZE(s5p64x0_iis0_resource),
-	.resource	= s5p64x0_iis0_resource,
+	.name		= "samsung-i2s",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(s5p64x0_i2s0_resource),
+	.resource	= s5p64x0_i2s0_resource,
+	.dev = {
+		.platform_data = &s5p6450_i2s0_pdata,
+	},
+};
+
+static struct s3c_audio_pdata s5p6450_i2s_pdata = {
+	.cfg_gpio = s5p6450_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc,
+		},
+	},
+};
+
+static struct resource s5p6450_i2s1_resource[] = {
+	[0] = {
+		.start	= S5P6450_PA_I2S1,
+		.end	= S5P6450_PA_I2S1 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S1_TX,
+		.end	= DMACH_I2S1_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S1_RX,
+		.end	= DMACH_I2S1_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5p6450_device_iis1 = {
+	.name		= "samsung-i2s",
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(s5p6450_i2s1_resource),
+	.resource	= s5p6450_i2s1_resource,
+	.dev = {
+		.platform_data = &s5p6450_i2s_pdata,
+	},
+};
+
+static struct resource s5p6450_i2s2_resource[] = {
+	[0] = {
+		.start	= S5P6450_PA_I2S2,
+		.end	= S5P6450_PA_I2S2 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S2_TX,
+		.end	= DMACH_I2S2_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S2_RX,
+		.end	= DMACH_I2S2_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5p6450_device_iis2 = {
+	.name		= "samsung-i2s",
+	.id		= 2,
+	.num_resources	= ARRAY_SIZE(s5p6450_i2s2_resource),
+	.resource	= s5p6450_i2s2_resource,
 	.dev = {
 		.platform_data = &s5p6450_i2s_pdata,
 	},
diff --git a/arch/arm/mach-s5p64x0/gpio.c b/arch/arm/mach-s5p64x0/gpio.c
deleted file mode 100644
index 39159dd..0000000
--- a/arch/arm/mach-s5p64x0/gpio.c
+++ /dev/null
@@ -1,342 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/gpio.c
- *
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * S5P64X0 - GPIOlib support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <mach/map.h>
-#include <mach/regs-gpio.h>
-
-#include <plat/gpio-core.h>
-#include <plat/gpio-cfg.h>
-#include <plat/gpio-cfg-helpers.h>
-
-/* To be implemented S5P6450 GPIO */
-
-/*
- * S5P6440 GPIO bank summary:
- *
- * Bank	GPIOs	Style	SlpCon	ExtInt Group
- * A	6	4Bit	Yes	1
- * B	7	4Bit	Yes	1
- * C	8	4Bit	Yes	2
- * F	2	2Bit	Yes	4 [1]
- * G	7	4Bit	Yes	5
- * H	10	4Bit[2]	Yes	6
- * I	16	2Bit	Yes	None
- * J	12	2Bit	Yes	None
- * N	16	2Bit	No	IRQ_EINT
- * P	8	2Bit	Yes	8
- * R	15	4Bit[2]	Yes	8
- *
- * [1] BANKF pins 14,15 do not form part of the external interrupt sources
- * [2] BANK has two control registers, GPxCON0 and GPxCON1
- */
-
-static int s5p64x0_gpiolib_rbank_4bit2_input(struct gpio_chip *chip,
-					     unsigned int offset)
-{
-	struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
-	void __iomem *base = ourchip->base;
-	void __iomem *regcon = base;
-	unsigned long con;
-	unsigned long flags;
-
-	switch (offset) {
-	case 6:
-		offset += 1;
-	case 0:
-	case 1:
-	case 2:
-	case 3:
-	case 4:
-	case 5:
-		regcon -= 4;
-		break;
-	default:
-		offset -= 7;
-		break;
-	}
-
-	s3c_gpio_lock(ourchip, flags);
-
-	con = __raw_readl(regcon);
-	con &= ~(0xf << con_4bit_shift(offset));
-	__raw_writel(con, regcon);
-
-	s3c_gpio_unlock(ourchip, flags);
-
-	return 0;
-}
-
-static int s5p64x0_gpiolib_rbank_4bit2_output(struct gpio_chip *chip,
-					      unsigned int offset, int value)
-{
-	struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
-	void __iomem *base = ourchip->base;
-	void __iomem *regcon = base;
-	unsigned long con;
-	unsigned long dat;
-	unsigned long flags;
-	unsigned con_offset  = offset;
-
-	switch (con_offset) {
-	case 6:
-		con_offset += 1;
-	case 0:
-	case 1:
-	case 2:
-	case 3:
-	case 4:
-	case 5:
-		regcon -= 4;
-		break;
-	default:
-		con_offset -= 7;
-		break;
-	}
-
-	s3c_gpio_lock(ourchip, flags);
-
-	con = __raw_readl(regcon);
-	con &= ~(0xf << con_4bit_shift(con_offset));
-	con |= 0x1 << con_4bit_shift(con_offset);
-
-	dat = __raw_readl(base + GPIODAT_OFF);
-	if (value)
-		dat |= 1 << offset;
-	else
-		dat &= ~(1 << offset);
-
-	__raw_writel(con, regcon);
-	__raw_writel(dat, base + GPIODAT_OFF);
-
-	s3c_gpio_unlock(ourchip, flags);
-
-	return 0;
-}
-
-int s5p64x0_gpio_setcfg_4bit_rbank(struct s3c_gpio_chip *chip,
-				   unsigned int off, unsigned int cfg)
-{
-	void __iomem *reg = chip->base;
-	unsigned int shift;
-	u32 con;
-
-	switch (off) {
-	case 0:
-	case 1:
-	case 2:
-	case 3:
-	case 4:
-	case 5:
-		shift = (off & 7) * 4;
-		reg -= 4;
-		break;
-	case 6:
-		shift = ((off + 1) & 7) * 4;
-		reg -= 4;
-	default:
-		shift = ((off + 1) & 7) * 4;
-		break;
-	}
-
-	if (s3c_gpio_is_cfg_special(cfg)) {
-		cfg &= 0xf;
-		cfg <<= shift;
-	}
-
-	con = __raw_readl(reg);
-	con &= ~(0xf << shift);
-	con |= cfg;
-	__raw_writel(con, reg);
-
-	return 0;
-}
-
-static struct s3c_gpio_cfg s5p64x0_gpio_cfgs[] = {
-	{
-		.cfg_eint	= 0,
-	}, {
-		.cfg_eint	= 7,
-	}, {
-		.cfg_eint	= 3,
-		.set_config	= s5p64x0_gpio_setcfg_4bit_rbank,
-	}, {
-		.cfg_eint	= 0,
-		.set_config	= s3c_gpio_setcfg_s3c24xx,
-		.get_config	= s3c_gpio_getcfg_s3c24xx,
-	}, {
-		.cfg_eint	= 2,
-		.set_config	= s3c_gpio_setcfg_s3c24xx,
-		.get_config	= s3c_gpio_getcfg_s3c24xx,
-	}, {
-		.cfg_eint	= 3,
-		.set_config	= s3c_gpio_setcfg_s3c24xx,
-		.get_config	= s3c_gpio_getcfg_s3c24xx,
-	},
-};
-
-static struct s3c_gpio_chip s5p6440_gpio_4bit[] = {
-	{
-		.base	= S5P6440_GPA_BASE,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPA(0),
-			.ngpio	= S5P6440_GPIO_A_NR,
-			.label	= "GPA",
-		},
-	}, {
-		.base	= S5P6440_GPB_BASE,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPB(0),
-			.ngpio	= S5P6440_GPIO_B_NR,
-			.label	= "GPB",
-		},
-	}, {
-		.base	= S5P6440_GPC_BASE,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPC(0),
-			.ngpio	= S5P6440_GPIO_C_NR,
-			.label	= "GPC",
-		},
-	}, {
-		.base	= S5P6440_GPG_BASE,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPG(0),
-			.ngpio	= S5P6440_GPIO_G_NR,
-			.label	= "GPG",
-		},
-	},
-};
-
-static struct s3c_gpio_chip s5p6440_gpio_4bit2[] = {
-	{
-		.base	= S5P6440_GPH_BASE + 0x4,
-		.config	= &s5p64x0_gpio_cfgs[1],
-		.chip	= {
-			.base	= S5P6440_GPH(0),
-			.ngpio	= S5P6440_GPIO_H_NR,
-			.label	= "GPH",
-		},
-	},
-};
-
-static struct s3c_gpio_chip s5p6440_gpio_rbank_4bit2[] = {
-	{
-		.base	= S5P6440_GPR_BASE + 0x4,
-		.config	= &s5p64x0_gpio_cfgs[2],
-		.chip	= {
-			.base	= S5P6440_GPR(0),
-			.ngpio	= S5P6440_GPIO_R_NR,
-			.label	= "GPR",
-		},
-	},
-};
-
-static struct s3c_gpio_chip s5p6440_gpio_2bit[] = {
-	{
-		.base	= S5P6440_GPF_BASE,
-		.config	= &s5p64x0_gpio_cfgs[5],
-		.chip	= {
-			.base	= S5P6440_GPF(0),
-			.ngpio	= S5P6440_GPIO_F_NR,
-			.label	= "GPF",
-		},
-	}, {
-		.base	= S5P6440_GPI_BASE,
-		.config	= &s5p64x0_gpio_cfgs[3],
-		.chip	= {
-			.base	= S5P6440_GPI(0),
-			.ngpio	= S5P6440_GPIO_I_NR,
-			.label	= "GPI",
-		},
-	}, {
-		.base	= S5P6440_GPJ_BASE,
-		.config	= &s5p64x0_gpio_cfgs[3],
-		.chip	= {
-			.base	= S5P6440_GPJ(0),
-			.ngpio	= S5P6440_GPIO_J_NR,
-			.label	= "GPJ",
-		},
-	}, {
-		.base	= S5P6440_GPN_BASE,
-		.config	= &s5p64x0_gpio_cfgs[4],
-		.chip	= {
-			.base	= S5P6440_GPN(0),
-			.ngpio	= S5P6440_GPIO_N_NR,
-			.label	= "GPN",
-		},
-	}, {
-		.base	= S5P6440_GPP_BASE,
-		.config	= &s5p64x0_gpio_cfgs[5],
-		.chip	= {
-			.base	= S5P6440_GPP(0),
-			.ngpio	= S5P6440_GPIO_P_NR,
-			.label	= "GPP",
-		},
-	},
-};
-
-void __init s5p64x0_gpiolib_set_cfg(struct s3c_gpio_cfg *chipcfg, int nr_chips)
-{
-	for (; nr_chips > 0; nr_chips--, chipcfg++) {
-		if (!chipcfg->set_config)
-			chipcfg->set_config	= s3c_gpio_setcfg_s3c64xx_4bit;
-		if (!chipcfg->get_config)
-			chipcfg->get_config	= s3c_gpio_getcfg_s3c64xx_4bit;
-		if (!chipcfg->set_pull)
-			chipcfg->set_pull	= s3c_gpio_setpull_updown;
-		if (!chipcfg->get_pull)
-			chipcfg->get_pull	= s3c_gpio_getpull_updown;
-	}
-}
-
-static void __init s5p64x0_gpio_add_rbank_4bit2(struct s3c_gpio_chip *chip,
-						int nr_chips)
-{
-	for (; nr_chips > 0; nr_chips--, chip++) {
-		chip->chip.direction_input = s5p64x0_gpiolib_rbank_4bit2_input;
-		chip->chip.direction_output =
-					s5p64x0_gpiolib_rbank_4bit2_output;
-		s3c_gpiolib_add(chip);
-	}
-}
-
-static int __init s5p6440_gpiolib_init(void)
-{
-	struct s3c_gpio_chip *chips = s5p6440_gpio_2bit;
-	int nr_chips = ARRAY_SIZE(s5p6440_gpio_2bit);
-
-	s5p64x0_gpiolib_set_cfg(s5p64x0_gpio_cfgs,
-				ARRAY_SIZE(s5p64x0_gpio_cfgs));
-
-	for (; nr_chips > 0; nr_chips--, chips++)
-		s3c_gpiolib_add(chips);
-
-	samsung_gpiolib_add_4bit_chips(s5p6440_gpio_4bit,
-				ARRAY_SIZE(s5p6440_gpio_4bit));
-
-	samsung_gpiolib_add_4bit2_chips(s5p6440_gpio_4bit2,
-				ARRAY_SIZE(s5p6440_gpio_4bit2));
-
-	s5p64x0_gpio_add_rbank_4bit2(s5p6440_gpio_rbank_4bit2,
-				ARRAY_SIZE(s5p6440_gpio_rbank_4bit2));
-
-	return 0;
-}
-arch_initcall(s5p6440_gpiolib_init);
diff --git a/arch/arm/mach-s5p64x0/gpiolib.c b/arch/arm/mach-s5p64x0/gpiolib.c
new file mode 100644
index 0000000..e7fb3b0
--- /dev/null
+++ b/arch/arm/mach-s5p64x0/gpiolib.c
@@ -0,0 +1,511 @@
+/* linux/arch/arm/mach-s5p64x0/gpiolib.c
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5P64X0 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/map.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-clock.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
+/*
+ * S5P6440 GPIO bank summary:
+ *
+ * Bank	GPIOs	Style	SlpCon	ExtInt Group
+ * A	6	4Bit	Yes	1
+ * B	7	4Bit	Yes	1
+ * C	8	4Bit	Yes	2
+ * F	2	2Bit	Yes	4 [1]
+ * G	7	4Bit	Yes	5
+ * H	10	4Bit[2]	Yes	6
+ * I	16	2Bit	Yes	None
+ * J	12	2Bit	Yes	None
+ * N	16	2Bit	No	IRQ_EINT
+ * P	8	2Bit	Yes	8
+ * R	15	4Bit[2]	Yes	8
+ *
+ * S5P6450 GPIO bank summary:
+ *
+ * Bank	GPIOs	Style	SlpCon	ExtInt Group
+ * A	6	4Bit	Yes	1
+ * B	7	4Bit	Yes	1
+ * C	8	4Bit	Yes	2
+ * D	8	4Bit	Yes	None
+ * F	2	2Bit	Yes	None
+ * G	14	4Bit[2]	Yes	5
+ * H	10	4Bit[2]	Yes	6
+ * I	16	2Bit	Yes	None
+ * J	12	2Bit	Yes	None
+ * K	5	4Bit	Yes	None
+ * N	16	2Bit	No	IRQ_EINT
+ * P	11	2Bit	Yes	8
+ * Q	14	2Bit	Yes	None
+ * R	15	4Bit[2]	Yes	None
+ * S	8	2Bit	Yes	None
+ *
+ * [1] BANKF pins 14,15 do not form part of the external interrupt sources
+ * [2] BANK has two control registers, GPxCON0 and GPxCON1
+ */
+
+static int s5p64x0_gpiolib_rbank_4bit2_input(struct gpio_chip *chip,
+					     unsigned int offset)
+{
+	struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+	void __iomem *base = ourchip->base;
+	void __iomem *regcon = base;
+	unsigned long con;
+	unsigned long flags;
+
+	switch (offset) {
+	case 6:
+		offset += 1;
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		regcon -= 4;
+		break;
+	default:
+		offset -= 7;
+		break;
+	}
+
+	s3c_gpio_lock(ourchip, flags);
+
+	con = __raw_readl(regcon);
+	con &= ~(0xf << con_4bit_shift(offset));
+	__raw_writel(con, regcon);
+
+	s3c_gpio_unlock(ourchip, flags);
+
+	return 0;
+}
+
+static int s5p64x0_gpiolib_rbank_4bit2_output(struct gpio_chip *chip,
+					      unsigned int offset, int value)
+{
+	struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+	void __iomem *base = ourchip->base;
+	void __iomem *regcon = base;
+	unsigned long con;
+	unsigned long dat;
+	unsigned long flags;
+	unsigned con_offset  = offset;
+
+	switch (con_offset) {
+	case 6:
+		con_offset += 1;
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		regcon -= 4;
+		break;
+	default:
+		con_offset -= 7;
+		break;
+	}
+
+	s3c_gpio_lock(ourchip, flags);
+
+	con = __raw_readl(regcon);
+	con &= ~(0xf << con_4bit_shift(con_offset));
+	con |= 0x1 << con_4bit_shift(con_offset);
+
+	dat = __raw_readl(base + GPIODAT_OFF);
+	if (value)
+		dat |= 1 << offset;
+	else
+		dat &= ~(1 << offset);
+
+	__raw_writel(con, regcon);
+	__raw_writel(dat, base + GPIODAT_OFF);
+
+	s3c_gpio_unlock(ourchip, flags);
+
+	return 0;
+}
+
+int s5p64x0_gpio_setcfg_4bit_rbank(struct s3c_gpio_chip *chip,
+				   unsigned int off, unsigned int cfg)
+{
+	void __iomem *reg = chip->base;
+	unsigned int shift;
+	u32 con;
+
+	switch (off) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		shift = (off & 7) * 4;
+		reg -= 4;
+		break;
+	case 6:
+		shift = ((off + 1) & 7) * 4;
+		reg -= 4;
+	default:
+		shift = ((off + 1) & 7) * 4;
+		break;
+	}
+
+	if (s3c_gpio_is_cfg_special(cfg)) {
+		cfg &= 0xf;
+		cfg <<= shift;
+	}
+
+	con = __raw_readl(reg);
+	con &= ~(0xf << shift);
+	con |= cfg;
+	__raw_writel(con, reg);
+
+	return 0;
+}
+
+static struct s3c_gpio_cfg s5p64x0_gpio_cfgs[] = {
+	{
+		.cfg_eint	= 0,
+	}, {
+		.cfg_eint	= 7,
+	}, {
+		.cfg_eint	= 3,
+		.set_config	= s5p64x0_gpio_setcfg_4bit_rbank,
+	}, {
+		.cfg_eint	= 0,
+		.set_config	= s3c_gpio_setcfg_s3c24xx,
+		.get_config	= s3c_gpio_getcfg_s3c24xx,
+	}, {
+		.cfg_eint	= 2,
+		.set_config	= s3c_gpio_setcfg_s3c24xx,
+		.get_config	= s3c_gpio_getcfg_s3c24xx,
+	}, {
+		.cfg_eint	= 3,
+		.set_config	= s3c_gpio_setcfg_s3c24xx,
+		.get_config	= s3c_gpio_getcfg_s3c24xx,
+	},
+};
+
+static struct s3c_gpio_chip s5p6440_gpio_4bit[] = {
+	{
+		.base	= S5P64X0_GPA_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPA(0),
+			.ngpio	= S5P6440_GPIO_A_NR,
+			.label	= "GPA",
+		},
+	}, {
+		.base	= S5P64X0_GPB_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPB(0),
+			.ngpio	= S5P6440_GPIO_B_NR,
+			.label	= "GPB",
+		},
+	}, {
+		.base	= S5P64X0_GPC_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPC(0),
+			.ngpio	= S5P6440_GPIO_C_NR,
+			.label	= "GPC",
+		},
+	}, {
+		.base	= S5P64X0_GPG_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPG(0),
+			.ngpio	= S5P6440_GPIO_G_NR,
+			.label	= "GPG",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6440_gpio_4bit2[] = {
+	{
+		.base	= S5P64X0_GPH_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6440_GPH(0),
+			.ngpio	= S5P6440_GPIO_H_NR,
+			.label	= "GPH",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6440_gpio_rbank_4bit2[] = {
+	{
+		.base	= S5P64X0_GPR_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[2],
+		.chip	= {
+			.base	= S5P6440_GPR(0),
+			.ngpio	= S5P6440_GPIO_R_NR,
+			.label	= "GPR",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6440_gpio_2bit[] = {
+	{
+		.base	= S5P64X0_GPF_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6440_GPF(0),
+			.ngpio	= S5P6440_GPIO_F_NR,
+			.label	= "GPF",
+		},
+	}, {
+		.base	= S5P64X0_GPI_BASE,
+		.config	= &s5p64x0_gpio_cfgs[3],
+		.chip	= {
+			.base	= S5P6440_GPI(0),
+			.ngpio	= S5P6440_GPIO_I_NR,
+			.label	= "GPI",
+		},
+	}, {
+		.base	= S5P64X0_GPJ_BASE,
+		.config	= &s5p64x0_gpio_cfgs[3],
+		.chip	= {
+			.base	= S5P6440_GPJ(0),
+			.ngpio	= S5P6440_GPIO_J_NR,
+			.label	= "GPJ",
+		},
+	}, {
+		.base	= S5P64X0_GPN_BASE,
+		.config	= &s5p64x0_gpio_cfgs[4],
+		.chip	= {
+			.base	= S5P6440_GPN(0),
+			.ngpio	= S5P6440_GPIO_N_NR,
+			.label	= "GPN",
+		},
+	}, {
+		.base	= S5P64X0_GPP_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6440_GPP(0),
+			.ngpio	= S5P6440_GPIO_P_NR,
+			.label	= "GPP",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6450_gpio_4bit[] = {
+	{
+		.base	= S5P64X0_GPA_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPA(0),
+			.ngpio	= S5P6450_GPIO_A_NR,
+			.label	= "GPA",
+		},
+	}, {
+		.base	= S5P64X0_GPB_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPB(0),
+			.ngpio	= S5P6450_GPIO_B_NR,
+			.label	= "GPB",
+		},
+	}, {
+		.base	= S5P64X0_GPC_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPC(0),
+			.ngpio	= S5P6450_GPIO_C_NR,
+			.label	= "GPC",
+		},
+	}, {
+		.base	= S5P6450_GPD_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPD(0),
+			.ngpio	= S5P6450_GPIO_D_NR,
+			.label	= "GPD",
+		},
+	}, {
+		.base	= S5P6450_GPK_BASE,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPK(0),
+			.ngpio	= S5P6450_GPIO_K_NR,
+			.label	= "GPK",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6450_gpio_4bit2[] = {
+	{
+		.base	= S5P64X0_GPG_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPG(0),
+			.ngpio	= S5P6450_GPIO_G_NR,
+			.label	= "GPG",
+		},
+	}, {
+		.base	= S5P64X0_GPH_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[1],
+		.chip	= {
+			.base	= S5P6450_GPH(0),
+			.ngpio	= S5P6450_GPIO_H_NR,
+			.label	= "GPH",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6450_gpio_rbank_4bit2[] = {
+	{
+		.base	= S5P64X0_GPR_BASE + 0x4,
+		.config	= &s5p64x0_gpio_cfgs[2],
+		.chip	= {
+			.base	= S5P6450_GPR(0),
+			.ngpio	= S5P6450_GPIO_R_NR,
+			.label	= "GPR",
+		},
+	},
+};
+
+static struct s3c_gpio_chip s5p6450_gpio_2bit[] = {
+	{
+		.base	= S5P64X0_GPF_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6450_GPF(0),
+			.ngpio	= S5P6450_GPIO_F_NR,
+			.label	= "GPF",
+		},
+	}, {
+		.base	= S5P64X0_GPI_BASE,
+		.config	= &s5p64x0_gpio_cfgs[3],
+		.chip	= {
+			.base	= S5P6450_GPI(0),
+			.ngpio	= S5P6450_GPIO_I_NR,
+			.label	= "GPI",
+		},
+	}, {
+		.base	= S5P64X0_GPJ_BASE,
+		.config	= &s5p64x0_gpio_cfgs[3],
+		.chip	= {
+			.base	= S5P6450_GPJ(0),
+			.ngpio	= S5P6450_GPIO_J_NR,
+			.label	= "GPJ",
+		},
+	}, {
+		.base	= S5P64X0_GPN_BASE,
+		.config	= &s5p64x0_gpio_cfgs[4],
+		.chip	= {
+			.base	= S5P6450_GPN(0),
+			.ngpio	= S5P6450_GPIO_N_NR,
+			.label	= "GPN",
+		},
+	}, {
+		.base	= S5P64X0_GPP_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6450_GPP(0),
+			.ngpio	= S5P6450_GPIO_P_NR,
+			.label	= "GPP",
+		},
+	}, {
+		.base	= S5P6450_GPQ_BASE,
+		.config	= &s5p64x0_gpio_cfgs[4],
+		.chip	= {
+			.base	= S5P6450_GPQ(0),
+			.ngpio	= S5P6450_GPIO_Q_NR,
+			.label	= "GPQ",
+		},
+	}, {
+		.base	= S5P6450_GPS_BASE,
+		.config	= &s5p64x0_gpio_cfgs[5],
+		.chip	= {
+			.base	= S5P6450_GPS(0),
+			.ngpio	= S5P6450_GPIO_S_NR,
+			.label	= "GPS",
+		},
+	},
+};
+
+void __init s5p64x0_gpiolib_set_cfg(struct s3c_gpio_cfg *chipcfg, int nr_chips)
+{
+	for (; nr_chips > 0; nr_chips--, chipcfg++) {
+		if (!chipcfg->set_config)
+			chipcfg->set_config	= s3c_gpio_setcfg_s3c64xx_4bit;
+		if (!chipcfg->get_config)
+			chipcfg->get_config	= s3c_gpio_getcfg_s3c64xx_4bit;
+		if (!chipcfg->set_pull)
+			chipcfg->set_pull	= s3c_gpio_setpull_updown;
+		if (!chipcfg->get_pull)
+			chipcfg->get_pull	= s3c_gpio_getpull_updown;
+	}
+}
+
+static void __init s5p64x0_gpio_add_rbank_4bit2(struct s3c_gpio_chip *chip,
+						int nr_chips)
+{
+	for (; nr_chips > 0; nr_chips--, chip++) {
+		chip->chip.direction_input = s5p64x0_gpiolib_rbank_4bit2_input;
+		chip->chip.direction_output =
+					s5p64x0_gpiolib_rbank_4bit2_output;
+		s3c_gpiolib_add(chip);
+	}
+}
+
+static int __init s5p64x0_gpiolib_init(void)
+{
+	unsigned int chipid;
+
+	chipid = __raw_readl(S5P64X0_SYS_ID);
+
+	s5p64x0_gpiolib_set_cfg(s5p64x0_gpio_cfgs,
+				ARRAY_SIZE(s5p64x0_gpio_cfgs));
+
+	if ((chipid & 0xff000) == 0x50000) {
+		samsung_gpiolib_add_2bit_chips(s5p6450_gpio_2bit,
+					ARRAY_SIZE(s5p6450_gpio_2bit));
+
+		samsung_gpiolib_add_4bit_chips(s5p6450_gpio_4bit,
+					ARRAY_SIZE(s5p6450_gpio_4bit));
+
+		samsung_gpiolib_add_4bit2_chips(s5p6450_gpio_4bit2,
+					ARRAY_SIZE(s5p6450_gpio_4bit2));
+
+		s5p64x0_gpio_add_rbank_4bit2(s5p6450_gpio_rbank_4bit2,
+					ARRAY_SIZE(s5p6450_gpio_rbank_4bit2));
+	} else {
+		samsung_gpiolib_add_2bit_chips(s5p6440_gpio_2bit,
+					ARRAY_SIZE(s5p6440_gpio_2bit));
+
+		samsung_gpiolib_add_4bit_chips(s5p6440_gpio_4bit,
+					ARRAY_SIZE(s5p6440_gpio_4bit));
+
+		samsung_gpiolib_add_4bit2_chips(s5p6440_gpio_4bit2,
+					ARRAY_SIZE(s5p6440_gpio_4bit2));
+
+		s5p64x0_gpio_add_rbank_4bit2(s5p6440_gpio_rbank_4bit2,
+					ARRAY_SIZE(s5p6440_gpio_rbank_4bit2));
+	}
+
+	return 0;
+}
+core_initcall(s5p64x0_gpiolib_init);
diff --git a/arch/arm/mach-s5p64x0/include/mach/gpio.h b/arch/arm/mach-s5p64x0/include/mach/gpio.h
index 5486c8f..adb5f29 100644
--- a/arch/arm/mach-s5p64x0/include/mach/gpio.h
+++ b/arch/arm/mach-s5p64x0/include/mach/gpio.h
@@ -23,7 +23,7 @@
 #define S5P6440_GPIO_A_NR	(6)
 #define S5P6440_GPIO_B_NR	(7)
 #define S5P6440_GPIO_C_NR	(8)
-#define S5P6440_GPIO_F_NR	(2)
+#define S5P6440_GPIO_F_NR	(16)
 #define S5P6440_GPIO_G_NR	(7)
 #define S5P6440_GPIO_H_NR	(10)
 #define S5P6440_GPIO_I_NR	(16)
@@ -36,7 +36,7 @@
 #define S5P6450_GPIO_B_NR	(7)
 #define S5P6450_GPIO_C_NR	(8)
 #define S5P6450_GPIO_D_NR	(8)
-#define S5P6450_GPIO_F_NR	(2)
+#define S5P6450_GPIO_F_NR	(16)
 #define S5P6450_GPIO_G_NR	(14)
 #define S5P6450_GPIO_H_NR	(10)
 #define S5P6450_GPIO_I_NR	(16)
diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h
index 31e5341..95c9125 100644
--- a/arch/arm/mach-s5p64x0/include/mach/map.h
+++ b/arch/arm/mach-s5p64x0/include/mach/map.h
@@ -1,6 +1,6 @@
 /* linux/arch/arm/mach-s5p64x0/include/mach/map.h
  *
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
  * S5P64X0 - Memory map definitions
@@ -16,27 +16,63 @@
 #include <plat/map-base.h>
 #include <plat/map-s5p.h>
 
-#define S5P64X0_PA_SDRAM	(0x20000000)
+#define S5P64X0_PA_SDRAM	0x20000000
 
-#define S5P64X0_PA_CHIPID	(0xE0000000)
+#define S5P64X0_PA_CHIPID	0xE0000000
+
+#define S5P64X0_PA_SYSCON	0xE0100000
+
+#define S5P64X0_PA_GPIO		0xE0308000
+
+#define S5P64X0_PA_VIC0		0xE4000000
+#define S5P64X0_PA_VIC1		0xE4100000
+
+#define S5P64X0_PA_SROMC	0xE7000000
+
+#define S5P64X0_PA_PDMA		0xE9000000
+
+#define S5P64X0_PA_TIMER	0xEA000000
+#define S5P64X0_PA_RTC		0xEA100000
+#define S5P64X0_PA_WDT		0xEA200000
+
+#define S5P6440_PA_IIC0		0xEC104000
+#define S5P6440_PA_IIC1		0xEC20F000
+#define S5P6450_PA_IIC0		0xEC100000
+#define S5P6450_PA_IIC1		0xEC200000
+
+#define S5P64X0_PA_SPI0		0xEC400000
+#define S5P64X0_PA_SPI1		0xEC500000
+
+#define S5P64X0_PA_HSOTG	0xED100000
+
+#define S5P64X0_PA_HSMMC(x)	(0xED800000 + ((x) * 0x100000))
+
+#define S5P64X0_PA_I2S		0xF2000000
+#define S5P6450_PA_I2S1		0xF2800000
+#define S5P6450_PA_I2S2		0xF2900000
+
+#define S5P64X0_PA_PCM		0xF2100000
+
+#define S5P64X0_PA_ADC		0xF3000000
+
+/* Compatibiltiy Defines */
+
+#define S3C_PA_HSMMC0		S5P64X0_PA_HSMMC(0)
+#define S3C_PA_HSMMC1		S5P64X0_PA_HSMMC(1)
+#define S3C_PA_HSMMC2		S5P64X0_PA_HSMMC(2)
+#define S3C_PA_IIC		S5P6440_PA_IIC0
+#define S3C_PA_IIC1		S5P6440_PA_IIC1
+#define S3C_PA_RTC		S5P64X0_PA_RTC
+#define S3C_PA_WDT		S5P64X0_PA_WDT
+
 #define S5P_PA_CHIPID		S5P64X0_PA_CHIPID
-
-#define S5P64X0_PA_SYSCON	(0xE0100000)
+#define S5P_PA_SROMC		S5P64X0_PA_SROMC
 #define S5P_PA_SYSCON		S5P64X0_PA_SYSCON
-
-#define S5P64X0_PA_GPIO		(0xE0308000)
-
-#define S5P64X0_PA_VIC0		(0xE4000000)
-#define S5P64X0_PA_VIC1		(0xE4100000)
-
-#define S5P64X0_PA_PDMA		(0xE9000000)
-
-#define S5P64X0_PA_TIMER	(0xEA000000)
 #define S5P_PA_TIMER		S5P64X0_PA_TIMER
 
-#define S5P64X0_PA_RTC		(0xEA100000)
+#define SAMSUNG_PA_ADC		S5P64X0_PA_ADC
 
-#define S5P64X0_PA_WDT		(0xEA200000)
+/* UART */
 
 #define S5P6440_PA_UART(x)	(0xEC000000 + ((x) * S3C_UART_OFFSET))
 #define S5P6450_PA_UART(x)	((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
@@ -50,34 +86,4 @@
 
 #define S5P_SZ_UART		SZ_256
 
-#define S5P6440_PA_IIC0		(0xEC104000)
-#define S5P6440_PA_IIC1		(0xEC20F000)
-#define S5P6450_PA_IIC0		(0xEC100000)
-#define S5P6450_PA_IIC1		(0xEC200000)
-
-#define S5P64X0_PA_SPI0		(0xEC400000)
-#define S5P64X0_PA_SPI1		(0xEC500000)
-
-#define S5P64X0_PA_HSOTG	(0xED100000)
-
-#define S5P64X0_PA_HSMMC(x)	(0xED800000 + ((x) * 0x100000))
-
-#define S5P64X0_PA_I2S		(0xF2000000)
-
-#define S5P64X0_PA_PCM		(0xF2100000)
-
-#define S5P64X0_PA_ADC		(0xF3000000)
-
-/* compatibiltiy defines. */
-
-#define S3C_PA_HSMMC0		S5P64X0_PA_HSMMC(0)
-#define S3C_PA_HSMMC1		S5P64X0_PA_HSMMC(1)
-#define S3C_PA_HSMMC2		S5P64X0_PA_HSMMC(2)
-#define S3C_PA_IIC		S5P6440_PA_IIC0
-#define S3C_PA_IIC1		S5P6440_PA_IIC1
-#define S3C_PA_RTC		S5P64X0_PA_RTC
-#define S3C_PA_WDT		S5P64X0_PA_WDT
-
-#define SAMSUNG_PA_ADC		S5P64X0_PA_ADC
-
 #endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p64x0/include/mach/regs-gpio.h b/arch/arm/mach-s5p64x0/include/mach/regs-gpio.h
index 85f448e..0953ef6 100644
--- a/arch/arm/mach-s5p64x0/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s5p64x0/include/mach/regs-gpio.h
@@ -15,48 +15,23 @@
 
 #include <mach/map.h>
 
-/* Will be implemented S5P6442 GPIOlib */
-
 /* Base addresses for each of the banks */
 
-#define S5P6440_GPA_BASE		(S5P_VA_GPIO + 0x0000)
-#define S5P6440_GPB_BASE		(S5P_VA_GPIO + 0x0020)
-#define S5P6440_GPC_BASE		(S5P_VA_GPIO + 0x0040)
-#define S5P6440_GPF_BASE		(S5P_VA_GPIO + 0x00A0)
-#define S5P6440_GPG_BASE		(S5P_VA_GPIO + 0x00C0)
-#define S5P6440_GPH_BASE		(S5P_VA_GPIO + 0x00E0)
-#define S5P6440_GPI_BASE		(S5P_VA_GPIO + 0x0100)
-#define S5P6440_GPJ_BASE		(S5P_VA_GPIO + 0x0120)
-#define S5P6440_GPN_BASE		(S5P_VA_GPIO + 0x0830)
-#define S5P6440_GPP_BASE		(S5P_VA_GPIO + 0x0160)
-#define S5P6440_GPR_BASE		(S5P_VA_GPIO + 0x0290)
+#define S5P64X0_GPA_BASE		(S5P_VA_GPIO + 0x0000)
+#define S5P64X0_GPB_BASE		(S5P_VA_GPIO + 0x0020)
+#define S5P64X0_GPC_BASE		(S5P_VA_GPIO + 0x0040)
+#define S5P64X0_GPF_BASE		(S5P_VA_GPIO + 0x00A0)
+#define S5P64X0_GPG_BASE		(S5P_VA_GPIO + 0x00C0)
+#define S5P64X0_GPH_BASE		(S5P_VA_GPIO + 0x00E0)
+#define S5P64X0_GPI_BASE		(S5P_VA_GPIO + 0x0100)
+#define S5P64X0_GPJ_BASE		(S5P_VA_GPIO + 0x0120)
+#define S5P64X0_GPN_BASE		(S5P_VA_GPIO + 0x0830)
+#define S5P64X0_GPP_BASE		(S5P_VA_GPIO + 0x0160)
+#define S5P64X0_GPR_BASE		(S5P_VA_GPIO + 0x0290)
 
-#define S5P6440_EINT0CON0		(S5P_VA_GPIO + 0x900)
-#define S5P6440_EINT0FLTCON0		(S5P_VA_GPIO + 0x910)
-#define S5P6440_EINT0FLTCON1		(S5P_VA_GPIO + 0x914)
-#define S5P6440_EINT0MASK		(S5P_VA_GPIO + 0x920)
-#define S5P6440_EINT0PEND		(S5P_VA_GPIO + 0x924)
-
-/* for LCD */
-
-#define S5P6440_SPCON_LCD_SEL_RGB	(1 << 0)
-#define S5P6440_SPCON_LCD_SEL_MASK	(3 << 0)
-
-/*
- * These set of macros are not really useful for the
- * GPF/GPI/GPJ/GPN/GPP, useful for others set of GPIO's (4 bit)
- */
-
-#define S5P6440_GPIO_CONMASK(__gpio)	(0xf << ((__gpio) * 4))
-#define S5P6440_GPIO_INPUT(__gpio)	(0x0 << ((__gpio) * 4))
-#define S5P6440_GPIO_OUTPUT(__gpio)	(0x1 << ((__gpio) * 4))
-
-/*
- * Use these macros for GPF/GPI/GPJ/GPN/GPP set of GPIO (2 bit)
- */
-
-#define S5P6440_GPIO2_CONMASK(__gpio)	(0x3 << ((__gpio) * 2))
-#define S5P6440_GPIO2_INPUT(__gpio)	(0x0 << ((__gpio) * 2))
-#define S5P6440_GPIO2_OUTPUT(__gpio)	(0x1 << ((__gpio) * 2))
+#define S5P6450_GPD_BASE		(S5P_VA_GPIO + 0x0060)
+#define S5P6450_GPK_BASE		(S5P_VA_GPIO + 0x0140)
+#define S5P6450_GPQ_BASE		(S5P_VA_GPIO + 0x0180)
+#define S5P6450_GPS_BASE		(S5P_VA_GPIO + 0x0300)
 
 #endif /* __ASM_ARCH_REGS_GPIO_H */
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index 87c3f03..e5beb84 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -95,6 +95,7 @@
 	&s3c_device_i2c1,
 	&s3c_device_ts,
 	&s3c_device_wdt,
+	&samsung_asoc_dma,
 	&s5p6440_device_iis,
 };
 
@@ -117,6 +118,7 @@
 
 static struct i2c_board_info smdk6440_i2c_devs0[] __initdata = {
 	{ I2C_BOARD_INFO("24c08", 0x50), },
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
 };
 
 static struct i2c_board_info smdk6440_i2c_devs1[] __initdata = {
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index d609f5a..3a20de0 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -113,6 +113,7 @@
 	&s3c_device_i2c1,
 	&s3c_device_ts,
 	&s3c_device_wdt,
+	&samsung_asoc_dma,
 	&s5p6450_device_iis0,
 	/* s5p6450_device_spi0 will be added */
 };
@@ -135,6 +136,7 @@
 };
 
 static struct i2c_board_info smdk6450_i2c_devs0[] __initdata = {
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
 	{ I2C_BOARD_INFO("24c08", 0x50), },	/* Samsung KS24C080C EEPROM */
 };
 
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index 2d4a761..0305e9b 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -396,7 +396,7 @@
  * recommended to keep the following clocks disabled until the driver requests
  * for enabling the clock.
  */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "cssys",
 		.id		= -1,
@@ -1381,8 +1381,6 @@
 
 void __init s5pc100_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
 	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
@@ -1393,16 +1391,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5pc100/dev-audio.c b/arch/arm/mach-s5pc100/dev-audio.c
index 564e195..ab2d271 100644
--- a/arch/arm/mach-s5pc100/dev-audio.c
+++ b/arch/arm/mach-s5pc100/dev-audio.c
@@ -23,17 +23,14 @@
 {
 	/* configure GPIO for i2s port */
 	switch (pdev->id) {
+	case 0: /* Dedicated pins */
+		break;
 	case 1:
 		s3c_gpio_cfgpin_range(S5PC100_GPC(0), 5, S3C_GPIO_SFN(2));
 		break;
-
 	case 2:
 		s3c_gpio_cfgpin_range(S5PC100_GPG3(0), 5, S3C_GPIO_SFN(4));
 		break;
-
-	case -1: /* Dedicated pins */
-		break;
-
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
 		return -EINVAL;
@@ -42,8 +39,20 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s3c_i2s_pdata = {
+static const char *rclksrc_v5[] = {
+	[0] = "iis",
+	[1] = "i2sclkd2",
+};
+
+static struct s3c_audio_pdata i2sv5_pdata = {
 	.cfg_gpio = s5pc100_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
+					 | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc_v5,
+		},
+	},
 };
 
 static struct resource s5pc100_iis0_resource[] = {
@@ -62,15 +71,34 @@
 		.end   = DMACH_I2S0_RX,
 		.flags = IORESOURCE_DMA,
 	},
+	[3] = {
+		.start = DMACH_I2S0S_TX,
+		.end = DMACH_I2S0S_TX,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
 struct platform_device s5pc100_device_iis0 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 0,
 	.num_resources	  = ARRAY_SIZE(s5pc100_iis0_resource),
 	.resource	  = s5pc100_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv5_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "iis",
+	[1] = "sclk_audio",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5pc100_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc_v3,
+		},
 	},
 };
 
@@ -93,12 +121,12 @@
 };
 
 struct platform_device s5pc100_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s5pc100_iis1_resource),
 	.resource	  = s5pc100_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -121,12 +149,12 @@
 };
 
 struct platform_device s5pc100_device_iis2 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 2,
 	.num_resources	  = ARRAY_SIZE(s5pc100_iis2_resource),
 	.resource	  = s5pc100_iis2_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -253,7 +281,7 @@
 static u64 s5pc100_ac97_dmamask = DMA_BIT_MASK(32);
 
 struct platform_device s5pc100_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s5pc100_ac97_resource),
 	.resource	  = s5pc100_ac97_resource,
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h
index 32e9cab..ccbe6b7 100644
--- a/arch/arm/mach-s5pc100/include/mach/map.h
+++ b/arch/arm/mach-s5pc100/include/mach/map.h
@@ -1,5 +1,8 @@
 /* linux/arch/arm/mach-s5pc100/include/mach/map.h
  *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
  * Copyright 2009 Samsung Electronics Co.
  *	Byungho Min <bhmin@samsung.com>
  *
@@ -16,143 +19,115 @@
 #include <plat/map-base.h>
 #include <plat/map-s5p.h>
 
-/*
- * map-base.h has already defined virtual memory address
- * S3C_VA_IRQ		S3C_ADDR(0x00000000)	irq controller(s)
- * S3C_VA_SYS		S3C_ADDR(0x00100000)	system control
- * S3C_VA_MEM		S3C_ADDR(0x00200000)	system control (not used)
- * S3C_VA_TIMER		S3C_ADDR(0x00300000)	timer block
- * S3C_VA_WATCHDOG	S3C_ADDR(0x00400000)	watchdog
- * S3C_VA_UART		S3C_ADDR(0x01000000)	UART
- *
- * S5PC100 specific virtual memory address can be defined here
- * S5PC1XX_VA_GPIO	S3C_ADDR(0x00500000)	GPIO
- *
- */
+#define S5PC100_PA_SDRAM		0x20000000
 
-#define S5PC100_PA_ONENAND_BUF	(0xB0000000)
-#define S5PC100_SZ_ONENAND_BUF	(SZ_256M - SZ_32M)
+#define S5PC100_PA_ONENAND		0xE7100000
+#define S5PC100_PA_ONENAND_BUF		0xB0000000
 
-/* Chip ID */
+#define S5PC100_PA_CHIPID		0xE0000000
 
-#define S5PC100_PA_CHIPID	(0xE0000000)
-#define S5P_PA_CHIPID		S5PC100_PA_CHIPID
+#define S5PC100_PA_SYSCON		0xE0100000
 
-#define S5PC100_PA_SYSCON	(0xE0100000)
-#define S5P_PA_SYSCON		S5PC100_PA_SYSCON
+#define S5PC100_PA_OTHERS		0xE0200000
 
-#define S5PC100_PA_OTHERS	(0xE0200000)
-#define S5PC100_VA_OTHERS	(S3C_VA_SYS + 0x10000)
+#define S5PC100_PA_GPIO			0xE0300000
 
-#define S5PC100_PA_GPIO		(0xE0300000)
-#define S5PC1XX_VA_GPIO		S3C_ADDR(0x00500000)
+#define S5PC100_PA_VIC0			0xE4000000
+#define S5PC100_PA_VIC1			0xE4100000
+#define S5PC100_PA_VIC2			0xE4200000
 
-/* Interrupt */
-#define S5PC100_PA_VIC0		(0xE4000000)
-#define S5PC100_PA_VIC1		(0xE4100000)
-#define S5PC100_PA_VIC2		(0xE4200000)
-#define S5PC100_VA_VIC		S3C_VA_IRQ
-#define S5PC100_VA_VIC_OFFSET	0x10000
-#define S5PC1XX_VA_VIC(x)	(S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET))
+#define S5PC100_PA_SROMC		0xE7000000
 
+#define S5PC100_PA_CFCON		0xE7800000
 
-#define S5PC100_PA_ONENAND	(0xE7100000)
+#define S5PC100_PA_MDMA			0xE8100000
+#define S5PC100_PA_PDMA0		0xE9000000
+#define S5PC100_PA_PDMA1		0xE9200000
 
-#define S5PC100_PA_CFCON	(0xE7800000)
+#define S5PC100_PA_TIMER		0xEA000000
+#define S5PC100_PA_SYSTIMER		0xEA100000
+#define S5PC100_PA_WATCHDOG		0xEA200000
+#define S5PC100_PA_RTC			0xEA300000
 
-/* DMA */
-#define S5PC100_PA_MDMA		(0xE8100000)
-#define S5PC100_PA_PDMA0	(0xE9000000)
-#define S5PC100_PA_PDMA1	(0xE9200000)
+#define S5PC100_PA_UART			0xEC000000
 
-/* Timer */
-#define S5PC100_PA_TIMER	(0xEA000000)
-#define S5P_PA_TIMER		S5PC100_PA_TIMER
+#define S5PC100_PA_IIC0			0xEC100000
+#define S5PC100_PA_IIC1			0xEC200000
 
-#define S5PC100_PA_SYSTIMER	(0xEA100000)
+#define S5PC100_PA_SPI0			0xEC300000
+#define S5PC100_PA_SPI1			0xEC400000
+#define S5PC100_PA_SPI2			0xEC500000
 
-#define S5PC100_PA_WATCHDOG	(0xEA200000)
-#define S5PC100_PA_RTC		(0xEA300000)
+#define S5PC100_PA_USB_HSOTG		0xED200000
+#define S5PC100_PA_USB_HSPHY		0xED300000
 
-#define S5PC100_PA_UART		(0xEC000000)
+#define S5PC100_PA_HSMMC(x)		(0xED800000 + ((x) * 0x100000))
 
-#define S5P_PA_UART0		(S5PC100_PA_UART + 0x0)
-#define S5P_PA_UART1		(S5PC100_PA_UART + 0x400)
-#define S5P_PA_UART2		(S5PC100_PA_UART + 0x800)
-#define S5P_PA_UART3		(S5PC100_PA_UART + 0xC00)
-#define S5P_SZ_UART		SZ_256
+#define S5PC100_PA_FB			0xEE000000
 
-#define S5PC100_PA_IIC0		(0xEC100000)
-#define S5PC100_PA_IIC1		(0xEC200000)
+#define S5PC100_PA_FIMC0		0xEE200000
+#define S5PC100_PA_FIMC1		0xEE300000
+#define S5PC100_PA_FIMC2		0xEE400000
 
-/* SPI */
-#define S5PC100_PA_SPI0		0xEC300000
-#define S5PC100_PA_SPI1		0xEC400000
-#define S5PC100_PA_SPI2		0xEC500000
+#define S5PC100_PA_I2S0			0xF2000000
+#define S5PC100_PA_I2S1			0xF2100000
+#define S5PC100_PA_I2S2			0xF2200000
 
-/* USB HS OTG */
-#define S5PC100_PA_USB_HSOTG	(0xED200000)
-#define S5PC100_PA_USB_HSPHY	(0xED300000)
+#define S5PC100_PA_AC97			0xF2300000
 
-#define S5PC100_PA_FB		(0xEE000000)
+#define S5PC100_PA_PCM0			0xF2400000
+#define S5PC100_PA_PCM1			0xF2500000
 
-#define S5PC100_PA_FIMC0	(0xEE200000)
-#define S5PC100_PA_FIMC1	(0xEE300000)
-#define S5PC100_PA_FIMC2	(0xEE400000)
+#define S5PC100_PA_SPDIF		0xF2600000
 
-#define S5PC100_PA_I2S0		(0xF2000000)
-#define S5PC100_PA_I2S1		(0xF2100000)
-#define S5PC100_PA_I2S2		(0xF2200000)
+#define S5PC100_PA_TSADC		0xF3000000
 
-#define S5PC100_PA_AC97		0xF2300000
+#define S5PC100_PA_KEYPAD		0xF3100000
 
-/* PCM */
-#define S5PC100_PA_PCM0		0xF2400000
-#define S5PC100_PA_PCM1		0xF2500000
+/* Compatibiltiy Defines */
 
-#define S5PC100_PA_SPDIF	0xF2600000
+#define S3C_PA_FB			S5PC100_PA_FB
+#define S3C_PA_HSMMC0			S5PC100_PA_HSMMC(0)
+#define S3C_PA_HSMMC1			S5PC100_PA_HSMMC(1)
+#define S3C_PA_HSMMC2			S5PC100_PA_HSMMC(2)
+#define S3C_PA_IIC			S5PC100_PA_IIC0
+#define S3C_PA_IIC1			S5PC100_PA_IIC1
+#define S3C_PA_KEYPAD			S5PC100_PA_KEYPAD
+#define S3C_PA_ONENAND			S5PC100_PA_ONENAND
+#define S3C_PA_ONENAND_BUF		S5PC100_PA_ONENAND_BUF
+#define S3C_PA_RTC			S5PC100_PA_RTC
+#define S3C_PA_TSADC			S5PC100_PA_TSADC
+#define S3C_PA_USB_HSOTG		S5PC100_PA_USB_HSOTG
+#define S3C_PA_USB_HSPHY		S5PC100_PA_USB_HSPHY
+#define S3C_PA_WDT			S5PC100_PA_WATCHDOG
 
-#define S5PC100_PA_TSADC	(0xF3000000)
+#define S5P_PA_CHIPID			S5PC100_PA_CHIPID
+#define S5P_PA_FIMC0			S5PC100_PA_FIMC0
+#define S5P_PA_FIMC1			S5PC100_PA_FIMC1
+#define S5P_PA_FIMC2			S5PC100_PA_FIMC2
+#define S5P_PA_SDRAM			S5PC100_PA_SDRAM
+#define S5P_PA_SROMC			S5PC100_PA_SROMC
+#define S5P_PA_SYSCON			S5PC100_PA_SYSCON
+#define S5P_PA_TIMER			S5PC100_PA_TIMER
 
-/* KEYPAD */
-#define S5PC100_PA_KEYPAD	(0xF3100000)
+#define SAMSUNG_PA_ADC			S5PC100_PA_TSADC
+#define SAMSUNG_PA_CFCON		S5PC100_PA_CFCON
+#define SAMSUNG_PA_KEYPAD		S5PC100_PA_KEYPAD
 
-#define S5PC100_PA_HSMMC(x)	(0xED800000 + ((x) * 0x100000))
+#define S5PC100_VA_OTHERS		(S3C_VA_SYS + 0x10000)
 
-#define S5PC100_PA_SDRAM	(0x20000000)
-#define S5P_PA_SDRAM		S5PC100_PA_SDRAM
+#define S3C_SZ_ONENAND_BUF		(SZ_256M - SZ_32M)
 
-/* compatibiltiy defines. */
-#define S3C_PA_UART		S5PC100_PA_UART
-#define S3C_PA_IIC		S5PC100_PA_IIC0
-#define S3C_PA_IIC1		S5PC100_PA_IIC1
-#define S3C_PA_FB		S5PC100_PA_FB
-#define S3C_PA_G2D		S5PC100_PA_G2D
-#define S3C_PA_G3D		S5PC100_PA_G3D
-#define S3C_PA_JPEG		S5PC100_PA_JPEG
-#define S3C_PA_ROTATOR		S5PC100_PA_ROTATOR
-#define S5P_VA_VIC0		S5PC1XX_VA_VIC(0)
-#define S5P_VA_VIC1		S5PC1XX_VA_VIC(1)
-#define S5P_VA_VIC2		S5PC1XX_VA_VIC(2)
-#define S3C_PA_USB_HSOTG	S5PC100_PA_USB_HSOTG
-#define S3C_PA_USB_HSPHY	S5PC100_PA_USB_HSPHY
-#define S3C_PA_HSMMC0		S5PC100_PA_HSMMC(0)
-#define S3C_PA_HSMMC1		S5PC100_PA_HSMMC(1)
-#define S3C_PA_HSMMC2		S5PC100_PA_HSMMC(2)
-#define S3C_PA_KEYPAD		S5PC100_PA_KEYPAD
-#define S3C_PA_WDT		S5PC100_PA_WATCHDOG
-#define S3C_PA_TSADC		S5PC100_PA_TSADC
-#define S3C_PA_ONENAND		S5PC100_PA_ONENAND
-#define S3C_PA_ONENAND_BUF	S5PC100_PA_ONENAND_BUF
-#define S3C_SZ_ONENAND_BUF	S5PC100_SZ_ONENAND_BUF
-#define S3C_PA_RTC		S5PC100_PA_RTC
+/* UART */
 
-#define SAMSUNG_PA_ADC		S5PC100_PA_TSADC
-#define SAMSUNG_PA_CFCON	S5PC100_PA_CFCON
-#define SAMSUNG_PA_KEYPAD	S5PC100_PA_KEYPAD
+#define S3C_PA_UART			S5PC100_PA_UART
 
-#define S5P_PA_FIMC0		S5PC100_PA_FIMC0
-#define S5P_PA_FIMC1		S5PC100_PA_FIMC1
-#define S5P_PA_FIMC2		S5PC100_PA_FIMC2
+#define S5P_PA_UART(x)			(S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0			S5P_PA_UART(0)
+#define S5P_PA_UART1			S5P_PA_UART(1)
+#define S5P_PA_UART2			S5P_PA_UART(2)
+#define S5P_PA_UART3			S5P_PA_UART(3)
 
-#endif /* __ASM_ARCH_C100_MAP_H */
+#define S5P_SZ_UART			SZ_256
+
+#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 18b405d..dd192a2 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -96,6 +96,7 @@
 
 /* I2C0 */
 static struct i2c_board_info i2c_devs0[] __initdata = {
+	{I2C_BOARD_INFO("wm8580", 0x1b),},
 };
 
 /* I2C1 */
@@ -190,6 +191,7 @@
 	&s3c_device_ts,
 	&s3c_device_wdt,
 	&smdkc100_lcd_powerdev,
+	&samsung_asoc_dma,
 	&s5pc100_device_iis0,
 	&samsung_device_keypad,
 	&s5pc100_device_ac97,
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index 862f239..53aabef 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -118,6 +118,7 @@
 config MACH_SMDKV210
 	bool "SMDKV210"
 	select CPU_S5PV210
+	select S3C_DEV_FB
 	select S3C_DEV_HSMMC
 	select S3C_DEV_HSMMC1
 	select S3C_DEV_HSMMC2
@@ -130,6 +131,7 @@
 	select SAMSUNG_DEV_IDE
 	select SAMSUNG_DEV_KEYPAD
 	select SAMSUNG_DEV_TS
+	select S5PV210_SETUP_FB_24BPP
 	select S5PV210_SETUP_I2C1
 	select S5PV210_SETUP_I2C2
 	select S5PV210_SETUP_IDE
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 019c3a6..2d59949 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -309,7 +309,7 @@
 	.get_rate	= s5pv210_clk_fout_apll_get_rate,
 };
 
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "pdma",
 		.id		= 0,
@@ -467,20 +467,20 @@
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1<<21),
 	}, {
-		.name		= "i2s_v50",
+		.name		= "iis",
 		.id		= 0,
 		.parent		= &clk_p,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1<<4),
 	}, {
-		.name		= "i2s_v32",
-		.id		= 0,
+		.name		= "iis",
+		.id		= 1,
 		.parent		= &clk_p,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1 << 5),
 	}, {
-		.name		= "i2s_v32",
-		.id		= 1,
+		.name		= "iis",
+		.id		= 2,
 		.parent		= &clk_p,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1 << 6),
@@ -525,6 +525,12 @@
 		.parent		= &clk_pclk_psys.clk,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1 << 20),
+	}, {
+		.name		= "sromc",
+		.id		= -1,
+		.parent		= &clk_hclk_psys.clk,
+		.enable		= s5pv210_clk_ip1_ctrl,
+		.ctrlbit	= (1 << 26),
 	},
 };
 
@@ -1220,13 +1226,9 @@
 
 void __init s5pv210_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
-	ret = s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
-	if (ret > 0)
-		printk(KERN_ERR "Failed to register %u clocks\n", ret);
+	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
 
 	for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
 		s3c_register_clksrc(sysclks[ptr], 1);
@@ -1234,15 +1236,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c
index 8eb480e..61e6c24 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/cpu.c
@@ -81,11 +81,6 @@
 		.length		= SZ_512K,
 		.type		= MT_DEVICE,
 	}, {
-		.virtual	= (unsigned long)S5P_VA_SROMC,
-		.pfn		= __phys_to_pfn(S5PV210_PA_SROMC),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
 		.virtual	= (unsigned long)S5P_VA_DMC0,
 		.pfn		= __phys_to_pfn(S5PV210_PA_DMC0),
 		.length		= SZ_4K,
diff --git a/arch/arm/mach-s5pv210/dev-audio.c b/arch/arm/mach-s5pv210/dev-audio.c
index 1303fcb..8d58f19 100644
--- a/arch/arm/mach-s5pv210/dev-audio.c
+++ b/arch/arm/mach-s5pv210/dev-audio.c
@@ -19,22 +19,24 @@
 #include <mach/dma.h>
 #include <mach/irqs.h>
 
+static const char *rclksrc[] = {
+	[0] = "busclk",
+	[1] = "i2sclk",
+};
+
 static int s5pv210_cfg_i2s(struct platform_device *pdev)
 {
 	/* configure GPIO for i2s port */
 	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5PV210_GPI(0), 7, S3C_GPIO_SFN(2));
+		break;
 	case 1:
 		s3c_gpio_cfgpin_range(S5PV210_GPC0(0), 5, S3C_GPIO_SFN(2));
 		break;
-
 	case 2:
 		s3c_gpio_cfgpin_range(S5PV210_GPC1(0), 5, S3C_GPIO_SFN(4));
 		break;
-
-	case -1:
-		s3c_gpio_cfgpin_range(S5PV210_GPI(0), 7, S3C_GPIO_SFN(2));
-		break;
-
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
 		return -EINVAL;
@@ -43,8 +45,15 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s3c_i2s_pdata = {
+static struct s3c_audio_pdata i2sv5_pdata = {
 	.cfg_gpio = s5pv210_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
+					 | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 static struct resource s5pv210_iis0_resource[] = {
@@ -63,15 +72,34 @@
 		.end   = DMACH_I2S0_RX,
 		.flags = IORESOURCE_DMA,
 	},
+	[3] = {
+		.start = DMACH_I2S0S_TX,
+		.end = DMACH_I2S0S_TX,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
 struct platform_device s5pv210_device_iis0 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 0,
 	.num_resources	  = ARRAY_SIZE(s5pv210_iis0_resource),
 	.resource	  = s5pv210_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv5_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "iis",
+	[1] = "audio-bus",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5pv210_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc_v3,
+		},
 	},
 };
 
@@ -94,12 +122,12 @@
 };
 
 struct platform_device s5pv210_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s5pv210_iis1_resource),
 	.resource	  = s5pv210_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -122,12 +150,12 @@
 };
 
 struct platform_device s5pv210_device_iis2 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 2,
 	.num_resources	  = ARRAY_SIZE(s5pv210_iis2_resource),
 	.resource	  = s5pv210_iis2_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -283,7 +311,7 @@
 static u64 s5pv210_ac97_dmamask = DMA_BIT_MASK(32);
 
 struct platform_device s5pv210_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s5pv210_ac97_resource),
 	.resource	  = s5pv210_ac97_resource,
diff --git a/arch/arm/mach-s5pv210/include/mach/irqs.h b/arch/arm/mach-s5pv210/include/mach/irqs.h
index 119b95f..26710b3 100644
--- a/arch/arm/mach-s5pv210/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv210/include/mach/irqs.h
@@ -65,7 +65,7 @@
 #define IRQ_HSMMC0		S5P_IRQ_VIC1(26)
 #define IRQ_HSMMC1		S5P_IRQ_VIC1(27)
 #define IRQ_HSMMC2		S5P_IRQ_VIC1(28)
-#define IRQ_MIPICSI		S5P_IRQ_VIC1(29)
+#define IRQ_MIPI_CSIS		S5P_IRQ_VIC1(29)
 #define IRQ_MIPIDSI		S5P_IRQ_VIC1(30)
 #define IRQ_ONENAND_AUDI	S5P_IRQ_VIC1(31)
 
@@ -132,5 +132,6 @@
 #define IRQ_LCD_FIFO		IRQ_LCD0
 #define IRQ_LCD_VSYNC		IRQ_LCD1
 #define IRQ_LCD_SYSTEM		IRQ_LCD2
+#define IRQ_MIPI_CSIS0		IRQ_MIPI_CSIS
 
 #endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h
index 861d7fe..1dd5883 100644
--- a/arch/arm/mach-s5pv210/include/mach/map.h
+++ b/arch/arm/mach-s5pv210/include/mach/map.h
@@ -1,6 +1,6 @@
 /* linux/arch/arm/mach-s5pv210/include/mach/map.h
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com/
  *
  * S5PV210 - Memory map definitions
@@ -16,116 +16,120 @@
 #include <plat/map-base.h>
 #include <plat/map-s5p.h>
 
-#define S5PC110_PA_ONENAND	(0xB0000000)
-#define S5P_PA_ONENAND		S5PC110_PA_ONENAND
+#define S5PV210_PA_SDRAM		0x20000000
 
-#define S5PC110_PA_ONENAND_DMA	(0xB0600000)
-#define S5P_PA_ONENAND_DMA	S5PC110_PA_ONENAND_DMA
+#define S5PV210_PA_SROM_BANK5		0xA8000000
 
-#define S5PV210_PA_CHIPID	(0xE0000000)
-#define S5P_PA_CHIPID		S5PV210_PA_CHIPID
+#define S5PC110_PA_ONENAND		0xB0000000
+#define S5PC110_PA_ONENAND_DMA		0xB0600000
 
-#define S5PV210_PA_SYSCON	(0xE0100000)
-#define S5P_PA_SYSCON		S5PV210_PA_SYSCON
+#define S5PV210_PA_CHIPID		0xE0000000
 
-#define S5PV210_PA_GPIO		(0xE0200000)
+#define S5PV210_PA_SYSCON		0xE0100000
 
-/* SPI */
-#define S5PV210_PA_SPI0		0xE1300000
-#define S5PV210_PA_SPI1		0xE1400000
+#define S5PV210_PA_GPIO			0xE0200000
 
-#define S5PV210_PA_KEYPAD	(0xE1600000)
+#define S5PV210_PA_SPDIF		0xE1100000
 
-#define S5PV210_PA_IIC0		(0xE1800000)
-#define S5PV210_PA_IIC1		(0xFAB00000)
-#define S5PV210_PA_IIC2		(0xE1A00000)
+#define S5PV210_PA_SPI0			0xE1300000
+#define S5PV210_PA_SPI1			0xE1400000
 
-#define S5PV210_PA_TIMER	(0xE2500000)
-#define S5P_PA_TIMER		S5PV210_PA_TIMER
+#define S5PV210_PA_KEYPAD		0xE1600000
 
-#define S5PV210_PA_SYSTIMER	(0xE2600000)
+#define S5PV210_PA_ADC			0xE1700000
 
-#define S5PV210_PA_WATCHDOG	(0xE2700000)
+#define S5PV210_PA_IIC0			0xE1800000
+#define S5PV210_PA_IIC1			0xFAB00000
+#define S5PV210_PA_IIC2			0xE1A00000
 
-#define S5PV210_PA_RTC		(0xE2800000)
-#define S5PV210_PA_UART		(0xE2900000)
+#define S5PV210_PA_AC97			0xE2200000
 
-#define S5P_PA_UART0		(S5PV210_PA_UART + 0x0)
-#define S5P_PA_UART1		(S5PV210_PA_UART + 0x400)
-#define S5P_PA_UART2		(S5PV210_PA_UART + 0x800)
-#define S5P_PA_UART3		(S5PV210_PA_UART + 0xC00)
+#define S5PV210_PA_PCM0			0xE2300000
+#define S5PV210_PA_PCM1			0xE1200000
+#define S5PV210_PA_PCM2			0xE2B00000
 
-#define S5P_SZ_UART		SZ_256
+#define S5PV210_PA_TIMER		0xE2500000
+#define S5PV210_PA_SYSTIMER		0xE2600000
+#define S5PV210_PA_WATCHDOG		0xE2700000
+#define S5PV210_PA_RTC			0xE2800000
 
-#define S3C_VA_UARTx(x)		(S3C_VA_UART + ((x) * S3C_UART_OFFSET))
+#define S5PV210_PA_UART			0xE2900000
 
-#define S5PV210_PA_SROMC	(0xE8000000)
+#define S5PV210_PA_SROMC		0xE8000000
 
-#define S5PV210_PA_CFCON	(0xE8200000)
+#define S5PV210_PA_CFCON		0xE8200000
 
-#define S5PV210_PA_MDMA		0xFA200000
-#define S5PV210_PA_PDMA0	0xE0900000
-#define S5PV210_PA_PDMA1	0xE0A00000
+#define S5PV210_PA_HSMMC(x)		(0xEB000000 + ((x) * 0x100000))
 
-#define S5PV210_PA_FB		(0xF8000000)
+#define S5PV210_PA_HSOTG		0xEC000000
+#define S5PV210_PA_HSPHY		0xEC100000
 
-#define S5PV210_PA_FIMC0	(0xFB200000)
-#define S5PV210_PA_FIMC1	(0xFB300000)
-#define S5PV210_PA_FIMC2	(0xFB400000)
+#define S5PV210_PA_IIS0			0xEEE30000
+#define S5PV210_PA_IIS1			0xE2100000
+#define S5PV210_PA_IIS2			0xE2A00000
 
-#define S5PV210_PA_HSMMC(x)	(0xEB000000 + ((x) * 0x100000))
+#define S5PV210_PA_DMC0			0xF0000000
+#define S5PV210_PA_DMC1			0xF1400000
 
-#define S5PV210_PA_HSOTG	(0xEC000000)
-#define S5PV210_PA_HSPHY	(0xEC100000)
+#define S5PV210_PA_VIC0			0xF2000000
+#define S5PV210_PA_VIC1			0xF2100000
+#define S5PV210_PA_VIC2			0xF2200000
+#define S5PV210_PA_VIC3			0xF2300000
 
-#define S5PV210_PA_VIC0		(0xF2000000)
-#define S5PV210_PA_VIC1		(0xF2100000)
-#define S5PV210_PA_VIC2		(0xF2200000)
-#define S5PV210_PA_VIC3		(0xF2300000)
+#define S5PV210_PA_FB			0xF8000000
 
-#define S5PV210_PA_SDRAM	(0x20000000)
-#define S5P_PA_SDRAM		S5PV210_PA_SDRAM
+#define S5PV210_PA_MDMA			0xFA200000
+#define S5PV210_PA_PDMA0		0xE0900000
+#define S5PV210_PA_PDMA1		0xE0A00000
 
-/* S/PDIF */
-#define S5PV210_PA_SPDIF	0xE1100000
+#define S5PV210_PA_MIPI_CSIS		0xFA600000
 
-/* I2S */
-#define S5PV210_PA_IIS0		0xEEE30000
-#define S5PV210_PA_IIS1		0xE2100000
-#define S5PV210_PA_IIS2		0xE2A00000
+#define S5PV210_PA_FIMC0		0xFB200000
+#define S5PV210_PA_FIMC1		0xFB300000
+#define S5PV210_PA_FIMC2		0xFB400000
 
-/* PCM */
-#define S5PV210_PA_PCM0		0xE2300000
-#define S5PV210_PA_PCM1		0xE1200000
-#define S5PV210_PA_PCM2		0xE2B00000
+/* Compatibiltiy Defines */
 
-/* AC97 */
-#define S5PV210_PA_AC97		0xE2200000
+#define S3C_PA_FB			S5PV210_PA_FB
+#define S3C_PA_HSMMC0			S5PV210_PA_HSMMC(0)
+#define S3C_PA_HSMMC1			S5PV210_PA_HSMMC(1)
+#define S3C_PA_HSMMC2			S5PV210_PA_HSMMC(2)
+#define S3C_PA_HSMMC3			S5PV210_PA_HSMMC(3)
+#define S3C_PA_IIC			S5PV210_PA_IIC0
+#define S3C_PA_IIC1			S5PV210_PA_IIC1
+#define S3C_PA_IIC2			S5PV210_PA_IIC2
+#define S3C_PA_RTC			S5PV210_PA_RTC
+#define S3C_PA_USB_HSOTG		S5PV210_PA_HSOTG
+#define S3C_PA_WDT			S5PV210_PA_WATCHDOG
 
-#define S5PV210_PA_ADC		(0xE1700000)
+#define S5P_PA_CHIPID			S5PV210_PA_CHIPID
+#define S5P_PA_FIMC0			S5PV210_PA_FIMC0
+#define S5P_PA_FIMC1			S5PV210_PA_FIMC1
+#define S5P_PA_FIMC2			S5PV210_PA_FIMC2
+#define S5P_PA_MIPI_CSIS0		S5PV210_PA_MIPI_CSIS
+#define S5P_PA_ONENAND			S5PC110_PA_ONENAND
+#define S5P_PA_ONENAND_DMA		S5PC110_PA_ONENAND_DMA
+#define S5P_PA_SDRAM			S5PV210_PA_SDRAM
+#define S5P_PA_SROMC			S5PV210_PA_SROMC
+#define S5P_PA_SYSCON			S5PV210_PA_SYSCON
+#define S5P_PA_TIMER			S5PV210_PA_TIMER
 
-#define S5PV210_PA_DMC0		(0xF0000000)
-#define S5PV210_PA_DMC1		(0xF1400000)
+#define SAMSUNG_PA_ADC			S5PV210_PA_ADC
+#define SAMSUNG_PA_CFCON		S5PV210_PA_CFCON
+#define SAMSUNG_PA_KEYPAD		S5PV210_PA_KEYPAD
 
-/* compatibiltiy defines. */
-#define S3C_PA_UART		S5PV210_PA_UART
-#define S3C_PA_HSMMC0		S5PV210_PA_HSMMC(0)
-#define S3C_PA_HSMMC1		S5PV210_PA_HSMMC(1)
-#define S3C_PA_HSMMC2		S5PV210_PA_HSMMC(2)
-#define S3C_PA_HSMMC3		S5PV210_PA_HSMMC(3)
-#define S3C_PA_IIC		S5PV210_PA_IIC0
-#define S3C_PA_IIC1		S5PV210_PA_IIC1
-#define S3C_PA_IIC2		S5PV210_PA_IIC2
-#define S3C_PA_FB		S5PV210_PA_FB
-#define S3C_PA_RTC		S5PV210_PA_RTC
-#define S3C_PA_WDT		S5PV210_PA_WATCHDOG
-#define S3C_PA_USB_HSOTG	S5PV210_PA_HSOTG
-#define S5P_PA_FIMC0		S5PV210_PA_FIMC0
-#define S5P_PA_FIMC1		S5PV210_PA_FIMC1
-#define S5P_PA_FIMC2		S5PV210_PA_FIMC2
+/* UART */
 
-#define SAMSUNG_PA_ADC		S5PV210_PA_ADC
-#define SAMSUNG_PA_CFCON	S5PV210_PA_CFCON
-#define SAMSUNG_PA_KEYPAD	S5PV210_PA_KEYPAD
+#define S3C_VA_UARTx(x)			(S3C_VA_UART + ((x) * S3C_UART_OFFSET))
+
+#define S3C_PA_UART			S5PV210_PA_UART
+
+#define S5P_PA_UART(x)			(S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0			S5P_PA_UART(0)
+#define S5P_PA_UART1			S5P_PA_UART(1)
+#define S5P_PA_UART2			S5P_PA_UART(2)
+#define S5P_PA_UART3			S5P_PA_UART(3)
+
+#define S5P_SZ_UART			SZ_256
 
 #endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-clock.h b/arch/arm/mach-s5pv210/include/mach/regs-clock.h
index ebaabe0..4c45b74 100644
--- a/arch/arm/mach-s5pv210/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5pv210/include/mach/regs-clock.h
@@ -161,7 +161,7 @@
 #define S5P_MDNIE_SEL		S5P_CLKREG(0x7008)
 #define S5P_MIPI_PHY_CON0	S5P_CLKREG(0x7200)
 #define S5P_MIPI_PHY_CON1	S5P_CLKREG(0x7204)
-#define S5P_MIPI_CONTROL	S5P_CLKREG(0xE814)
+#define S5P_MIPI_DPHY_CONTROL	S5P_CLKREG(0xE814)
 
 #define S5P_IDLE_CFG_TL_MASK	(3 << 30)
 #define S5P_IDLE_CFG_TM_MASK	(3 << 28)
@@ -195,9 +195,6 @@
 #define S5P_OTHERS_RET_UART		(1 << 28)
 #define S5P_OTHERS_USB_SIG_MASK		(1 << 16)
 
-/* MIPI */
-#define S5P_MIPI_DPHY_EN		(3)
-
 /* S5P_DAC_CONTROL */
 #define S5P_DAC_ENABLE			(1)
 #define S5P_DAC_DISABLE			(0)
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 461aa035..557add4 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -149,7 +149,7 @@
 
 static struct regulator_init_data aquila_ldo3_data = {
 	.constraints	= {
-		.name		= "VUSB/MIPI_1.1V",
+		.name		= "VUSB+MIPI_1.1V",
 		.min_uV		= 1100000,
 		.max_uV		= 1100000,
 		.apply_uV	= 1,
@@ -197,7 +197,7 @@
 
 static struct regulator_init_data aquila_ldo8_data = {
 	.constraints	= {
-		.name		= "VUSB/VADC_3.3V",
+		.name		= "VUSB+VADC_3.3V",
 		.min_uV		= 3300000,
 		.max_uV		= 3300000,
 		.apply_uV	= 1,
@@ -207,7 +207,7 @@
 
 static struct regulator_init_data aquila_ldo9_data = {
 	.constraints	= {
-		.name		= "VCC/VCAM_2.8V",
+		.name		= "VCC+VCAM_2.8V",
 		.min_uV		= 2800000,
 		.max_uV		= 2800000,
 		.apply_uV	= 1,
@@ -381,9 +381,12 @@
 	.buck1_set1	= S5PV210_GPH0(3),
 	.buck1_set2	= S5PV210_GPH0(4),
 	.buck2_set3	= S5PV210_GPH0(5),
-	.buck1_max_voltage1 = 1200000,
-	.buck1_max_voltage2 = 1200000,
-	.buck2_max_voltage = 1200000,
+	.buck1_voltage1	= 1200000,
+	.buck1_voltage2	= 1200000,
+	.buck1_voltage3	= 1200000,
+	.buck1_voltage4	= 1200000,
+	.buck2_voltage1	= 1200000,
+	.buck2_voltage2	= 1200000,
 };
 #endif
 
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index e22d511..056f5c7 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -288,7 +288,7 @@
 
 static struct regulator_init_data goni_ldo3_data = {
 	.constraints	= {
-		.name		= "VUSB/MIPI_1.1V",
+		.name		= "VUSB+MIPI_1.1V",
 		.min_uV		= 1100000,
 		.max_uV		= 1100000,
 		.apply_uV	= 1,
@@ -337,7 +337,7 @@
 
 static struct regulator_init_data goni_ldo8_data = {
 	.constraints	= {
-		.name		= "VUSB/VADC_3.3V",
+		.name		= "VUSB+VADC_3.3V",
 		.min_uV		= 3300000,
 		.max_uV		= 3300000,
 		.apply_uV	= 1,
@@ -347,7 +347,7 @@
 
 static struct regulator_init_data goni_ldo9_data = {
 	.constraints	= {
-		.name		= "VCC/VCAM_2.8V",
+		.name		= "VCC+VCAM_2.8V",
 		.min_uV		= 2800000,
 		.max_uV		= 2800000,
 		.apply_uV	= 1,
@@ -521,9 +521,12 @@
 	.buck1_set1	= S5PV210_GPH0(3),
 	.buck1_set2	= S5PV210_GPH0(4),
 	.buck2_set3	= S5PV210_GPH0(5),
-	.buck1_max_voltage1 = 1200000,
-	.buck1_max_voltage2 = 1200000,
-	.buck2_max_voltage = 1200000,
+	.buck1_voltage1	= 1200000,
+	.buck1_voltage2	= 1200000,
+	.buck1_voltage3	= 1200000,
+	.buck1_voltage4	= 1200000,
+	.buck2_voltage1	= 1200000,
+	.buck2_voltage2	= 1200000,
 };
 #endif
 
diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c
index 5dd1681..ce11a02 100644
--- a/arch/arm/mach-s5pv210/mach-smdkc110.c
+++ b/arch/arm/mach-s5pv210/mach-smdkc110.c
@@ -81,6 +81,7 @@
 };
 
 static struct platform_device *smdkc110_devices[] __initdata = {
+	&samsung_asoc_dma,
 	&s5pv210_device_iis0,
 	&s5pv210_device_ac97,
 	&s5pv210_device_spdif,
@@ -94,6 +95,7 @@
 
 static struct i2c_board_info smdkc110_i2c_devs0[] __initdata = {
 	{ I2C_BOARD_INFO("24c08", 0x50), },     /* Samsung S524AD0XD1 */
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
 };
 
 static struct i2c_board_info smdkc110_i2c_devs1[] __initdata = {
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index 1fbc45b..bc9fdb5 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -14,16 +14,25 @@
 #include <linux/init.h>
 #include <linux/serial_core.h>
 #include <linux/sysdev.h>
+#include <linux/dm9000.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/setup.h>
 #include <asm/mach-types.h>
 
+#include <video/platform_lcd.h>
+
 #include <mach/map.h>
 #include <mach/regs-clock.h>
+#include <mach/regs-fb.h>
 
 #include <plat/regs-serial.h>
+#include <plat/regs-srom.h>
+#include <plat/gpio-cfg.h>
 #include <plat/s5pv210.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
@@ -33,6 +42,7 @@
 #include <plat/iic.h>
 #include <plat/keypad.h>
 #include <plat/pm.h>
+#include <plat/fb.h>
 
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKV210_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
@@ -102,12 +112,106 @@
 	.cols		= 8,
 };
 
+static struct resource smdkv210_dm9000_resources[] = {
+	[0] = {
+		.start	= S5PV210_PA_SROM_BANK5,
+		.end	= S5PV210_PA_SROM_BANK5,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= S5PV210_PA_SROM_BANK5 + 2,
+		.end	= S5PV210_PA_SROM_BANK5 + 2,
+		.flags	= IORESOURCE_MEM,
+	},
+	[2] = {
+		.start	= IRQ_EINT(9),
+		.end	= IRQ_EINT(9),
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+	},
+};
+
+static struct dm9000_plat_data smdkv210_dm9000_platdata = {
+	.flags		= DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
+	.dev_addr	= { 0x00, 0x09, 0xc0, 0xff, 0xec, 0x48 },
+};
+
+struct platform_device smdkv210_dm9000 = {
+	.name		= "dm9000",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smdkv210_dm9000_resources),
+	.resource	= smdkv210_dm9000_resources,
+	.dev		= {
+		.platform_data	= &smdkv210_dm9000_platdata,
+	},
+};
+
+static void smdkv210_lte480wv_set_power(struct plat_lcd_data *pd,
+					unsigned int power)
+{
+	if (power) {
+#if !defined(CONFIG_BACKLIGHT_PWM)
+		gpio_request(S5PV210_GPD0(3), "GPD0");
+		gpio_direction_output(S5PV210_GPD0(3), 1);
+		gpio_free(S5PV210_GPD0(3));
+#endif
+
+		/* fire nRESET on power up */
+		gpio_request(S5PV210_GPH0(6), "GPH0");
+
+		gpio_direction_output(S5PV210_GPH0(6), 1);
+
+		gpio_set_value(S5PV210_GPH0(6), 0);
+		mdelay(10);
+
+		gpio_set_value(S5PV210_GPH0(6), 1);
+		mdelay(10);
+
+		gpio_free(S5PV210_GPH0(6));
+	} else {
+#if !defined(CONFIG_BACKLIGHT_PWM)
+		gpio_request(S5PV210_GPD0(3), "GPD0");
+		gpio_direction_output(S5PV210_GPD0(3), 0);
+		gpio_free(S5PV210_GPD0(3));
+#endif
+	}
+}
+
+static struct plat_lcd_data smdkv210_lcd_lte480wv_data = {
+	.set_power	= smdkv210_lte480wv_set_power,
+};
+
+static struct platform_device smdkv210_lcd_lte480wv = {
+	.name			= "platform-lcd",
+	.dev.parent		= &s3c_device_fb.dev,
+	.dev.platform_data	= &smdkv210_lcd_lte480wv_data,
+};
+
+static struct s3c_fb_pd_win smdkv210_fb_win0 = {
+	.win_mode = {
+		.left_margin	= 13,
+		.right_margin	= 8,
+		.upper_margin	= 7,
+		.lower_margin	= 5,
+		.hsync_len	= 3,
+		.vsync_len	= 1,
+		.xres		= 800,
+		.yres		= 480,
+	},
+	.max_bpp	= 32,
+	.default_bpp	= 24,
+};
+
+static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = {
+	.win[0]		= &smdkv210_fb_win0,
+	.vidcon0	= VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+	.vidcon1	= VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+	.setup_gpio	= s5pv210_fb_gpio_setup_24bpp,
+};
+
 static struct platform_device *smdkv210_devices[] __initdata = {
-	&s5pv210_device_iis0,
-	&s5pv210_device_ac97,
-	&s5pv210_device_spdif,
 	&s3c_device_adc,
 	&s3c_device_cfcon,
+	&s3c_device_fb,
 	&s3c_device_hsmmc0,
 	&s3c_device_hsmmc1,
 	&s3c_device_hsmmc2,
@@ -115,14 +219,38 @@
 	&s3c_device_i2c0,
 	&s3c_device_i2c1,
 	&s3c_device_i2c2,
-	&samsung_device_keypad,
 	&s3c_device_rtc,
 	&s3c_device_ts,
 	&s3c_device_wdt,
+	&s5pv210_device_ac97,
+	&s5pv210_device_iis0,
+	&s5pv210_device_spdif,
+	&samsung_asoc_dma,
+	&samsung_device_keypad,
+	&smdkv210_dm9000,
+	&smdkv210_lcd_lte480wv,
 };
 
+static void __init smdkv210_dm9000_init(void)
+{
+	unsigned int tmp;
+
+	gpio_request(S5PV210_MP01(5), "nCS5");
+	s3c_gpio_cfgpin(S5PV210_MP01(5), S3C_GPIO_SFN(2));
+	gpio_free(S5PV210_MP01(5));
+
+	tmp = (5 << S5P_SROM_BCX__TACC__SHIFT);
+	__raw_writel(tmp, S5P_SROM_BC5);
+
+	tmp = __raw_readl(S5P_SROM_BW);
+	tmp &= (S5P_SROM_BW__CS_MASK << S5P_SROM_BW__NCS5__SHIFT);
+	tmp |= (1 << S5P_SROM_BW__NCS5__SHIFT);
+	__raw_writel(tmp, S5P_SROM_BW);
+}
+
 static struct i2c_board_info smdkv210_i2c_devs0[] __initdata = {
 	{ I2C_BOARD_INFO("24c08", 0x50), },     /* Samsung S524AD0XD1 */
+	{ I2C_BOARD_INFO("wm8580", 0x1b), },
 };
 
 static struct i2c_board_info smdkv210_i2c_devs1[] __initdata = {
@@ -150,6 +278,8 @@
 {
 	s3c_pm_init();
 
+	smdkv210_dm9000_init();
+
 	samsung_keypad_set_platdata(&smdkv210_keypad_data);
 	s3c24xx_ts_set_platdata(&s3c_ts_platform);
 
@@ -165,6 +295,8 @@
 
 	s3c_ide_set_platdata(&smdkv210_ide_pdata);
 
+	s3c_fb_set_platdata(&smdkv210_lcd0_pdata);
+
 	platform_add_devices(smdkv210_devices, ARRAY_SIZE(smdkv210_devices));
 }
 
diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-s5pv310/Kconfig
index 1150b36..b2a9acc 100644
--- a/arch/arm/mach-s5pv310/Kconfig
+++ b/arch/arm/mach-s5pv310/Kconfig
@@ -11,9 +11,15 @@
 
 config CPU_S5PV310
 	bool
+	select S3C_PL330_DMA
 	help
 	  Enable S5PV310 CPU support
 
+config S5PV310_DEV_PD
+	bool
+	help
+	  Compile in platform device definitions for Power Domain
+
 config S5PV310_SETUP_I2C1
 	bool
 	help
@@ -60,6 +66,11 @@
 	help
 	  Common setup code for SDHCI gpio.
 
+config S5PV310_DEV_SYSMMU
+	bool
+	help
+	  Common setup code for SYSTEM MMU in S5PV310
+
 # machine support
 
 menu "S5PC210 Machines"
@@ -69,11 +80,15 @@
 	select CPU_S5PV310
 	select S3C_DEV_RTC
 	select S3C_DEV_WDT
+	select S3C_DEV_I2C1
 	select S3C_DEV_HSMMC
 	select S3C_DEV_HSMMC1
 	select S3C_DEV_HSMMC2
 	select S3C_DEV_HSMMC3
+	select S5PV310_DEV_PD
+	select S5PV310_SETUP_I2C1
 	select S5PV310_SETUP_SDHCI
+	select S5PV310_DEV_SYSMMU
 	help
 	  Machine support for Samsung SMDKC210
 	  S5PC210(MCP) is one of package option of S5PV310
@@ -82,6 +97,10 @@
 	bool "Mobile UNIVERSAL_C210 Board"
 	select CPU_S5PV310
 	select S5P_DEV_ONENAND
+	select S3C_DEV_HSMMC
+	select S3C_DEV_HSMMC2
+	select S3C_DEV_HSMMC3
+	select S5PV310_SETUP_SDHCI
 	select S3C_DEV_I2C1
 	select S5PV310_SETUP_I2C1
 	help
@@ -97,10 +116,14 @@
 	select CPU_S5PV310
 	select S3C_DEV_RTC
 	select S3C_DEV_WDT
+	select S3C_DEV_I2C1
 	select S3C_DEV_HSMMC
 	select S3C_DEV_HSMMC1
 	select S3C_DEV_HSMMC2
 	select S3C_DEV_HSMMC3
+	select S5PV310_DEV_PD
+	select S5PV310_DEV_SYSMMU
+	select S5PV310_SETUP_I2C1
 	select S5PV310_SETUP_SDHCI
 	help
 	  Machine support for Samsung SMDKV310
diff --git a/arch/arm/mach-s5pv310/Makefile b/arch/arm/mach-s5pv310/Makefile
index 84afc64..036fb38 100644
--- a/arch/arm/mach-s5pv310/Makefile
+++ b/arch/arm/mach-s5pv310/Makefile
@@ -13,7 +13,8 @@
 # Core support for S5PV310 system
 
 obj-$(CONFIG_CPU_S5PV310)	+= cpu.o init.o clock.o irq-combiner.o
-obj-$(CONFIG_CPU_S5PV310)	+= setup-i2c0.o time.o gpiolib.o irq-eint.o
+obj-$(CONFIG_CPU_S5PV310)	+= setup-i2c0.o time.o gpiolib.o irq-eint.o dma.o
+obj-$(CONFIG_CPU_FREQ)		+= cpufreq.o
 
 obj-$(CONFIG_SMP)		+= platsmp.o headsmp.o
 obj-$(CONFIG_LOCAL_TIMERS)	+= localtimer.o
@@ -27,6 +28,10 @@
 
 # device support
 
+obj-y					+= dev-audio.o
+obj-$(CONFIG_S5PV310_DEV_PD)		+= dev-pd.o
+obj-$(CONFIG_S5PV310_DEV_SYSMMU)	+= dev-sysmmu.o
+
 obj-$(CONFIG_S5PV310_SETUP_I2C1)	+= setup-i2c1.o
 obj-$(CONFIG_S5PV310_SETUP_I2C2)	+= setup-i2c2.o
 obj-$(CONFIG_S5PV310_SETUP_I2C3)	+= setup-i2c3.o
diff --git a/arch/arm/mach-s5pv310/clock.c b/arch/arm/mach-s5pv310/clock.c
index 58c9d33..fc7c2f8 100644
--- a/arch/arm/mach-s5pv310/clock.c
+++ b/arch/arm/mach-s5pv310/clock.c
@@ -244,7 +244,7 @@
 		.id		= -1,
 	},
 	.sources	= &clkset_mout_corebus,
-	.reg_src	= { .reg = S5P_CLKSRC_CORE, .shift = 4, .size = 1 },
+	.reg_src	= { .reg = S5P_CLKSRC_DMC, .shift = 4, .size = 1 },
 };
 
 static struct clksrc_clk clk_sclk_dmc = {
@@ -253,7 +253,7 @@
 		.id		= -1,
 		.parent		= &clk_mout_corebus.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 12, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 12, .size = 3 },
 };
 
 static struct clksrc_clk clk_aclk_cored = {
@@ -262,7 +262,7 @@
 		.id		= -1,
 		.parent		= &clk_sclk_dmc.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 16, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 16, .size = 3 },
 };
 
 static struct clksrc_clk clk_aclk_corep = {
@@ -271,7 +271,7 @@
 		.id		= -1,
 		.parent		= &clk_aclk_cored.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 20, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 20, .size = 3 },
 };
 
 static struct clksrc_clk clk_aclk_acp = {
@@ -280,7 +280,7 @@
 		.id		= -1,
 		.parent		= &clk_mout_corebus.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 0, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 0, .size = 3 },
 };
 
 static struct clksrc_clk clk_pclk_acp = {
@@ -289,7 +289,7 @@
 		.id		= -1,
 		.parent		= &clk_aclk_acp.clk,
 	},
-	.reg_div	= { .reg = S5P_CLKDIV_CORE0, .shift = 4, .size = 3 },
+	.reg_div	= { .reg = S5P_CLKDIV_DMC0, .shift = 4, .size = 3 },
 };
 
 /* Core list of CMU_TOP side */
@@ -384,7 +384,7 @@
 	.reg_src	= { .reg = S5P_CLKSRC_TOP0, .shift = 8, .size = 1 },
 };
 
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
 	{
 		.name		= "timers",
 		.id		= -1,
@@ -467,6 +467,16 @@
 		.enable		= s5pv310_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 10),
 	}, {
+		.name		= "pdma",
+		.id		= 0,
+		.enable		= s5pv310_clk_ip_fsys_ctrl,
+		.ctrlbit	= (1 << 0),
+	}, {
+		.name		= "pdma",
+		.id		= 1,
+		.enable		= s5pv310_clk_ip_fsys_ctrl,
+		.ctrlbit	= (1 << 1),
+	}, {
 		.name		= "adc",
 		.id		= -1,
 		.enable		= s5pv310_clk_ip_peril_ctrl,
@@ -507,6 +517,26 @@
 		.enable		= s5pv310_clk_ip_peril_ctrl,
 		.ctrlbit	= (1 << 18),
 	}, {
+		.name		= "iis",
+		.id		= 0,
+		.enable		= s5pv310_clk_ip_peril_ctrl,
+		.ctrlbit	= (1 << 19),
+	}, {
+		.name		= "iis",
+		.id		= 1,
+		.enable		= s5pv310_clk_ip_peril_ctrl,
+		.ctrlbit	= (1 << 20),
+	}, {
+		.name		= "iis",
+		.id		= 2,
+		.enable		= s5pv310_clk_ip_peril_ctrl,
+		.ctrlbit	= (1 << 21),
+	}, {
+		.name		= "ac97",
+		.id		= -1,
+		.enable		= s5pv310_clk_ip_peril_ctrl,
+		.ctrlbit	= (1 << 27),
+	}, {
 		.name		= "fimg2d",
 		.id		= -1,
 		.enable		= s5pv310_clk_ip_image_ctrl,
@@ -990,6 +1020,17 @@
 	&clk_dout_mmc4,
 };
 
+static int xtal_rate;
+
+static unsigned long s5pv310_fout_apll_get_rate(struct clk *clk)
+{
+	return s5p_get_pll45xx(xtal_rate, __raw_readl(S5P_APLL_CON0), pll_4508);
+}
+
+static struct clk_ops s5pv310_fout_apll_ops = {
+	.get_rate = s5pv310_fout_apll_get_rate,
+};
+
 void __init_or_cpufreq s5pv310_setup_clocks(void)
 {
 	struct clk *xtal_clk;
@@ -1013,6 +1054,9 @@
 	BUG_ON(IS_ERR(xtal_clk));
 
 	xtal = clk_get_rate(xtal_clk);
+
+	xtal_rate = xtal;
+
 	clk_put(xtal_clk);
 
 	printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
@@ -1026,7 +1070,7 @@
 	vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
 				__raw_readl(S5P_VPLL_CON1), pll_4650);
 
-	clk_fout_apll.rate = apll;
+	clk_fout_apll.ops = &s5pv310_fout_apll_ops;
 	clk_fout_mpll.rate = mpll;
 	clk_fout_epll.rate = epll;
 	clk_fout_vpll.rate = vpll;
@@ -1061,13 +1105,9 @@
 
 void __init s5pv310_register_clocks(void)
 {
-	struct clk *clkp;
-	int ret;
 	int ptr;
 
-	ret = s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
-	if (ret > 0)
-		printk(KERN_ERR "Failed to register %u clocks\n", ret);
+	s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
 
 	for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
 		s3c_register_clksrc(sysclks[ptr], 1);
@@ -1075,15 +1115,8 @@
 	s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
 	s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
 
-	clkp = init_clocks_disable;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-		ret = s3c24xx_register_clock(clkp);
-		if (ret < 0) {
-			printk(KERN_ERR "Failed to register clock %s (%d)\n",
-			       clkp->name, ret);
-		}
-		(clkp->enable)(clkp, 0);
-	}
+	s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+	s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
 
 	s3c_pwmclk_init();
 }
diff --git a/arch/arm/mach-s5pv310/cpu.c b/arch/arm/mach-s5pv310/cpu.c
index 72ab289..0db0fb6 100644
--- a/arch/arm/mach-s5pv310/cpu.c
+++ b/arch/arm/mach-s5pv310/cpu.c
@@ -41,6 +41,11 @@
 		.length		= SZ_128K,
 		.type		= MT_DEVICE,
 	}, {
+		.virtual	= (unsigned long)S5P_VA_PMU,
+		.pfn		= __phys_to_pfn(S5PV310_PA_PMU),
+		.length		= SZ_64K,
+		.type		= MT_DEVICE,
+	}, {
 		.virtual	= (unsigned long)S5P_VA_COMBINER_BASE,
 		.pfn		= __phys_to_pfn(S5PV310_PA_COMBINER),
 		.length		= SZ_4K,
@@ -71,6 +76,11 @@
 		.length		= SZ_256,
 		.type		= MT_DEVICE,
 	}, {
+		.virtual	= (unsigned long)S5P_VA_DMC0,
+		.pfn		= __phys_to_pfn(S5PV310_PA_DMC0),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
 		.virtual	= (unsigned long)S3C_VA_UART,
 		.pfn		= __phys_to_pfn(S3C_PA_UART),
 		.length		= SZ_512K,
@@ -123,6 +133,15 @@
 	gic_init(0, IRQ_LOCALTIMER, S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
 
 	for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
+
+		/*
+		 * From SPI(0) to SPI(39) and SPI(51), SPI(53) are
+		 * connected to the interrupt combiner. These irqs
+		 * should be initialized to support cascade interrupt.
+		 */
+		if ((irq >= 40) && !(irq == 51) && !(irq == 53))
+			continue;
+
 		combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
 				COMBINER_IRQ(irq, 0));
 		combiner_cascade_irq(irq, IRQ_SPI(irq));
@@ -164,7 +183,7 @@
 	__raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
 		     S5P_VA_L2CC + L2X0_POWER_CTRL);
 
-	l2x0_init(S5P_VA_L2CC, 0x7C070001, 0xC200ffff);
+	l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
 
 	return 0;
 }
diff --git a/arch/arm/mach-s5pv310/cpufreq.c b/arch/arm/mach-s5pv310/cpufreq.c
new file mode 100644
index 0000000..b04cbc7
--- /dev/null
+++ b/arch/arm/mach-s5pv310/cpufreq.c
@@ -0,0 +1,580 @@
+/* linux/arch/arm/mach-s5pv310/cpufreq.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - CPU frequency scaling support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/cpufreq.h>
+
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-mem.h>
+
+#include <plat/clock.h>
+#include <plat/pm.h>
+
+static struct clk *cpu_clk;
+static struct clk *moutcore;
+static struct clk *mout_mpll;
+static struct clk *mout_apll;
+
+#ifdef CONFIG_REGULATOR
+static struct regulator *arm_regulator;
+static struct regulator *int_regulator;
+#endif
+
+static struct cpufreq_freqs freqs;
+static unsigned int memtype;
+
+enum s5pv310_memory_type {
+	DDR2 = 4,
+	LPDDR2,
+	DDR3,
+};
+
+enum cpufreq_level_index {
+	L0, L1, L2, L3, CPUFREQ_LEVEL_END,
+};
+
+static struct cpufreq_frequency_table s5pv310_freq_table[] = {
+	{L0, 1000*1000},
+	{L1, 800*1000},
+	{L2, 400*1000},
+	{L3, 100*1000},
+	{0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
+	 *		DIVATB, DIVPCLK_DBG, DIVAPLL }
+	 */
+
+	/* ARM L0: 1000MHz */
+	{ 0, 3, 7, 3, 3, 0, 1 },
+
+	/* ARM L1: 800MHz */
+	{ 0, 3, 7, 3, 3, 0, 1 },
+
+	/* ARM L2: 400MHz */
+	{ 0, 1, 3, 1, 3, 0, 1 },
+
+	/* ARM L3: 100MHz */
+	{ 0, 0, 1, 0, 3, 1, 1 },
+};
+
+static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVCOPY, DIVHPM }
+	 */
+
+	 /* ARM L0: 1000MHz */
+	{ 3, 0 },
+
+	/* ARM L1: 800MHz */
+	{ 3, 0 },
+
+	/* ARM L2: 400MHz */
+	{ 3, 0 },
+
+	/* ARM L3: 100MHz */
+	{ 3, 0 },
+};
+
+static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+	 *		DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+	 */
+
+	/* DMC L0: 400MHz */
+	{ 3, 1, 1, 1, 1, 1, 3, 1 },
+
+	/* DMC L1: 400MHz */
+	{ 3, 1, 1, 1, 1, 1, 3, 1 },
+
+	/* DMC L2: 266.7MHz */
+	{ 7, 1, 1, 2, 1, 1, 3, 1 },
+
+	/* DMC L3: 200MHz */
+	{ 7, 1, 1, 3, 1, 1, 3, 1 },
+};
+
+static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+	 */
+
+	/* ACLK200 L0: 200MHz */
+	{ 3, 7, 4, 5, 1 },
+
+	/* ACLK200 L1: 200MHz */
+	{ 3, 7, 4, 5, 1 },
+
+	/* ACLK200 L2: 160MHz */
+	{ 4, 7, 5, 7, 1 },
+
+	/* ACLK200 L3: 133.3MHz */
+	{ 5, 7, 7, 7, 1 },
+};
+
+static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVGDL/R, DIVGPL/R }
+	 */
+
+	/* ACLK_GDL/R L0: 200MHz */
+	{ 3, 1 },
+
+	/* ACLK_GDL/R L1: 200MHz */
+	{ 3, 1 },
+
+	/* ACLK_GDL/R L2: 160MHz */
+	{ 4, 1 },
+
+	/* ACLK_GDL/R L3: 133.3MHz */
+	{ 5, 1 },
+};
+
+struct cpufreq_voltage_table {
+	unsigned int	index;		/* any */
+	unsigned int	arm_volt;	/* uV */
+	unsigned int	int_volt;
+};
+
+static struct cpufreq_voltage_table s5pv310_volt_table[CPUFREQ_LEVEL_END] = {
+	{
+		.index		= L0,
+		.arm_volt	= 1200000,
+		.int_volt	= 1100000,
+	}, {
+		.index		= L1,
+		.arm_volt	= 1100000,
+		.int_volt	= 1100000,
+	}, {
+		.index		= L2,
+		.arm_volt	= 1000000,
+		.int_volt	= 1000000,
+	}, {
+		.index		= L3,
+		.arm_volt	= 900000,
+		.int_volt	= 1000000,
+	},
+};
+
+static unsigned int s5pv310_apll_pms_table[CPUFREQ_LEVEL_END] = {
+	/* APLL FOUT L0: 1000MHz */
+	((250 << 16) | (6 << 8) | 1),
+
+	/* APLL FOUT L1: 800MHz */
+	((200 << 16) | (6 << 8) | 1),
+
+	/* APLL FOUT L2 : 400MHz */
+	((200 << 16) | (6 << 8) | 2),
+
+	/* APLL FOUT L3: 100MHz */
+	((200 << 16) | (6 << 8) | 4),
+};
+
+int s5pv310_verify_speed(struct cpufreq_policy *policy)
+{
+	return cpufreq_frequency_table_verify(policy, s5pv310_freq_table);
+}
+
+unsigned int s5pv310_getspeed(unsigned int cpu)
+{
+	return clk_get_rate(cpu_clk) / 1000;
+}
+
+void s5pv310_set_clkdiv(unsigned int div_index)
+{
+	unsigned int tmp;
+
+	/* Change Divider - CPU0 */
+
+	tmp = __raw_readl(S5P_CLKDIV_CPU);
+
+	tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
+		S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
+		S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
+		S5P_CLKDIV_CPU0_APLL_MASK);
+
+	tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
+		(clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
+		(clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
+		(clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
+		(clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
+		(clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
+		(clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_CPU);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STATCPU);
+	} while (tmp & 0x1111111);
+
+	/* Change Divider - CPU1 */
+
+	tmp = __raw_readl(S5P_CLKDIV_CPU1);
+
+	tmp &= ~((0x7 << 4) | 0x7);
+
+	tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
+		(clkdiv_cpu1[div_index][1] << 0));
+
+	__raw_writel(tmp, S5P_CLKDIV_CPU1);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
+	} while (tmp & 0x11);
+
+	/* Change Divider - DMC0 */
+
+	tmp = __raw_readl(S5P_CLKDIV_DMC0);
+
+	tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+		S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
+		S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
+		S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
+
+	tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
+		(clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+		(clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+		(clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
+		(clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+		(clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+		(clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+		(clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+	} while (tmp & 0x11111111);
+
+	/* Change Divider - TOP */
+
+	tmp = __raw_readl(S5P_CLKDIV_TOP);
+
+	tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
+		S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
+		S5P_CLKDIV_TOP_ONENAND_MASK);
+
+	tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+		(clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+		(clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+		(clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+		(clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_TOP);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+	} while (tmp & 0x11111);
+
+	/* Change Divider - LEFTBUS */
+
+	tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+	tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+	tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
+		(clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+	} while (tmp & 0x11);
+
+	/* Change Divider - RIGHTBUS */
+
+	tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+	tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+	tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
+		(clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+	} while (tmp & 0x11);
+}
+
+static void s5pv310_set_apll(unsigned int index)
+{
+	unsigned int tmp;
+
+	/* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+	clk_set_parent(moutcore, mout_mpll);
+
+	do {
+		tmp = (__raw_readl(S5P_CLKMUX_STATCPU)
+			>> S5P_CLKSRC_CPU_MUXCORE_SHIFT);
+		tmp &= 0x7;
+	} while (tmp != 0x2);
+
+	/* 2. Set APLL Lock time */
+	__raw_writel(S5P_APLL_LOCKTIME, S5P_APLL_LOCK);
+
+	/* 3. Change PLL PMS values */
+	tmp = __raw_readl(S5P_APLL_CON0);
+	tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
+	tmp |= s5pv310_apll_pms_table[index];
+	__raw_writel(tmp, S5P_APLL_CON0);
+
+	/* 4. wait_lock_time */
+	do {
+		tmp = __raw_readl(S5P_APLL_CON0);
+	} while (!(tmp & (0x1 << S5P_APLLCON0_LOCKED_SHIFT)));
+
+	/* 5. MUX_CORE_SEL = APLL */
+	clk_set_parent(moutcore, mout_apll);
+
+	do {
+		tmp = __raw_readl(S5P_CLKMUX_STATCPU);
+		tmp &= S5P_CLKMUX_STATCPU_MUXCORE_MASK;
+	} while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
+}
+
+static void s5pv310_set_frequency(unsigned int old_index, unsigned int new_index)
+{
+	unsigned int tmp;
+
+	if (old_index > new_index) {
+		/* The frequency changing to L0 needs to change apll */
+		if (freqs.new == s5pv310_freq_table[L0].frequency) {
+			/* 1. Change the system clock divider values */
+			s5pv310_set_clkdiv(new_index);
+
+			/* 2. Change the apll m,p,s value */
+			s5pv310_set_apll(new_index);
+		} else {
+			/* 1. Change the system clock divider values */
+			s5pv310_set_clkdiv(new_index);
+
+			/* 2. Change just s value in apll m,p,s value */
+			tmp = __raw_readl(S5P_APLL_CON0);
+			tmp &= ~(0x7 << 0);
+			tmp |= (s5pv310_apll_pms_table[new_index] & 0x7);
+			__raw_writel(tmp, S5P_APLL_CON0);
+		}
+	}
+
+	else if (old_index < new_index) {
+		/* The frequency changing from L0 needs to change apll */
+		if (freqs.old == s5pv310_freq_table[L0].frequency) {
+			/* 1. Change the apll m,p,s value */
+			s5pv310_set_apll(new_index);
+
+			/* 2. Change the system clock divider values */
+			s5pv310_set_clkdiv(new_index);
+		} else {
+			/* 1. Change just s value in apll m,p,s value */
+			tmp = __raw_readl(S5P_APLL_CON0);
+			tmp &= ~(0x7 << 0);
+			tmp |= (s5pv310_apll_pms_table[new_index] & 0x7);
+			__raw_writel(tmp, S5P_APLL_CON0);
+
+			/* 2. Change the system clock divider values */
+			s5pv310_set_clkdiv(new_index);
+		}
+	}
+}
+
+static int s5pv310_target(struct cpufreq_policy *policy,
+			  unsigned int target_freq,
+			  unsigned int relation)
+{
+	unsigned int index, old_index;
+	unsigned int arm_volt, int_volt;
+
+	freqs.old = s5pv310_getspeed(policy->cpu);
+
+	if (cpufreq_frequency_table_target(policy, s5pv310_freq_table,
+					   freqs.old, relation, &old_index))
+		return -EINVAL;
+
+	if (cpufreq_frequency_table_target(policy, s5pv310_freq_table,
+					   target_freq, relation, &index))
+		return -EINVAL;
+
+	freqs.new = s5pv310_freq_table[index].frequency;
+	freqs.cpu = policy->cpu;
+
+	if (freqs.new == freqs.old)
+		return 0;
+
+	/* get the voltage value */
+	arm_volt = s5pv310_volt_table[index].arm_volt;
+	int_volt = s5pv310_volt_table[index].int_volt;
+
+	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+	/* control regulator */
+	if (freqs.new > freqs.old) {
+		/* Voltage up */
+#ifdef CONFIG_REGULATOR
+		regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+		regulator_set_voltage(int_regulator, int_volt, int_volt);
+#endif
+	}
+
+	/* Clock Configuration Procedure */
+	s5pv310_set_frequency(old_index, index);
+
+	/* control regulator */
+	if (freqs.new < freqs.old) {
+		/* Voltage down */
+#ifdef CONFIG_REGULATOR
+		regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+		regulator_set_voltage(int_regulator, int_volt, int_volt);
+#endif
+	}
+
+	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy,
+				   pm_message_t pmsg)
+{
+	return 0;
+}
+
+static int s5pv310_cpufreq_resume(struct cpufreq_policy *policy)
+{
+	return 0;
+}
+#endif
+
+static int s5pv310_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	policy->cur = policy->min = policy->max = s5pv310_getspeed(policy->cpu);
+
+	cpufreq_frequency_table_get_attr(s5pv310_freq_table, policy->cpu);
+
+	/* set the transition latency value */
+	policy->cpuinfo.transition_latency = 100000;
+
+	/*
+	 * S5PV310 multi-core processors has 2 cores
+	 * that the frequency cannot be set independently.
+	 * Each cpu is bound to the same speed.
+	 * So the affected cpu is all of the cpus.
+	 */
+	cpumask_setall(policy->cpus);
+
+	return cpufreq_frequency_table_cpuinfo(policy, s5pv310_freq_table);
+}
+
+static struct cpufreq_driver s5pv310_driver = {
+	.flags		= CPUFREQ_STICKY,
+	.verify		= s5pv310_verify_speed,
+	.target		= s5pv310_target,
+	.get		= s5pv310_getspeed,
+	.init		= s5pv310_cpufreq_cpu_init,
+	.name		= "s5pv310_cpufreq",
+#ifdef CONFIG_PM
+	.suspend	= s5pv310_cpufreq_suspend,
+	.resume		= s5pv310_cpufreq_resume,
+#endif
+};
+
+static int __init s5pv310_cpufreq_init(void)
+{
+	cpu_clk = clk_get(NULL, "armclk");
+	if (IS_ERR(cpu_clk))
+		return PTR_ERR(cpu_clk);
+
+	moutcore = clk_get(NULL, "moutcore");
+	if (IS_ERR(moutcore))
+		goto out;
+
+	mout_mpll = clk_get(NULL, "mout_mpll");
+	if (IS_ERR(mout_mpll))
+		goto out;
+
+	mout_apll = clk_get(NULL, "mout_apll");
+	if (IS_ERR(mout_apll))
+		goto out;
+
+#ifdef CONFIG_REGULATOR
+	arm_regulator = regulator_get(NULL, "vdd_arm");
+	if (IS_ERR(arm_regulator)) {
+		printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
+		goto out;
+	}
+
+	int_regulator = regulator_get(NULL, "vdd_int");
+	if (IS_ERR(int_regulator)) {
+		printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
+		goto out;
+	}
+#endif
+
+	/*
+	 * Check DRAM type.
+	 * Because DVFS level is different according to DRAM type.
+	 */
+	memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET);
+	memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT);
+	memtype &= S5P_DMC0_MEMTYPE_MASK;
+
+	if ((memtype < DDR2) && (memtype > DDR3)) {
+		printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype);
+		goto out;
+	} else {
+		printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
+	}
+
+	return cpufreq_register_driver(&s5pv310_driver);
+
+out:
+	if (!IS_ERR(cpu_clk))
+		clk_put(cpu_clk);
+
+	if (!IS_ERR(moutcore))
+		clk_put(moutcore);
+
+	if (!IS_ERR(mout_mpll))
+		clk_put(mout_mpll);
+
+	if (!IS_ERR(mout_apll))
+		clk_put(mout_apll);
+
+#ifdef CONFIG_REGULATOR
+	if (!IS_ERR(arm_regulator))
+		regulator_put(arm_regulator);
+
+	if (!IS_ERR(int_regulator))
+		regulator_put(int_regulator);
+#endif
+
+	printk(KERN_ERR "%s: failed initialization\n", __func__);
+
+	return -EINVAL;
+}
+late_initcall(s5pv310_cpufreq_init);
diff --git a/arch/arm/mach-s5pv310/dev-audio.c b/arch/arm/mach-s5pv310/dev-audio.c
new file mode 100644
index 0000000..a196424
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dev-audio.c
@@ -0,0 +1,364 @@
+/* linux/arch/arm/mach-s5pv310/dev-audio.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/audio.h>
+
+#include <mach/map.h>
+#include <mach/dma.h>
+#include <mach/irqs.h>
+
+static const char *rclksrc[] = {
+	[0] = "busclk",
+	[1] = "i2sclk",
+};
+
+static int s5pv310_cfg_i2s(struct platform_device *pdev)
+{
+	/* configure GPIO for i2s port */
+	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 7, S3C_GPIO_SFN(2));
+		break;
+	case 1:
+		s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(2));
+		break;
+	case 2:
+		s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(4));
+		break;
+	default:
+		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct s3c_audio_pdata i2sv5_pdata = {
+	.cfg_gpio = s5pv310_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
+					 | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc,
+		},
+	},
+};
+
+static struct resource s5pv310_i2s0_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_I2S0,
+		.end	= S5PV310_PA_I2S0 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S0_TX,
+		.end	= DMACH_I2S0_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S0_RX,
+		.end	= DMACH_I2S0_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[3] = {
+		.start	= DMACH_I2S0S_TX,
+		.end	= DMACH_I2S0S_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_i2s0 = {
+	.name = "samsung-i2s",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(s5pv310_i2s0_resource),
+	.resource = s5pv310_i2s0_resource,
+	.dev = {
+		.platform_data = &i2sv5_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "sclk_i2s",
+	[1] = "no_such_clock",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5pv310_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_NO_MUXPSR,
+			.src_clk = rclksrc_v3,
+		},
+	},
+};
+
+static struct resource s5pv310_i2s1_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_I2S1,
+		.end	= S5PV310_PA_I2S1 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S1_TX,
+		.end	= DMACH_I2S1_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S1_RX,
+		.end	= DMACH_I2S1_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_i2s1 = {
+	.name = "samsung-i2s",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(s5pv310_i2s1_resource),
+	.resource = s5pv310_i2s1_resource,
+	.dev = {
+		.platform_data = &i2sv3_pdata,
+	},
+};
+
+static struct resource s5pv310_i2s2_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_I2S2,
+		.end	= S5PV310_PA_I2S2 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S2_TX,
+		.end	= DMACH_I2S2_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S2_RX,
+		.end	= DMACH_I2S2_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_i2s2 = {
+	.name = "samsung-i2s",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(s5pv310_i2s2_resource),
+	.resource = s5pv310_i2s2_resource,
+	.dev = {
+		.platform_data = &i2sv3_pdata,
+	},
+};
+
+/* PCM Controller platform_devices */
+
+static int s5pv310_pcm_cfg_gpio(struct platform_device *pdev)
+{
+	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 5, S3C_GPIO_SFN(3));
+		break;
+	case 1:
+		s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(3));
+		break;
+	case 2:
+		s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(3));
+		break;
+	default:
+		printk(KERN_DEBUG "Invalid PCM Controller number!");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct s3c_audio_pdata s3c_pcm_pdata = {
+	.cfg_gpio = s5pv310_pcm_cfg_gpio,
+};
+
+static struct resource s5pv310_pcm0_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PCM0,
+		.end	= S5PV310_PA_PCM0 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_PCM0_TX,
+		.end	= DMACH_PCM0_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_PCM0_RX,
+		.end	= DMACH_PCM0_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_pcm0 = {
+	.name = "samsung-pcm",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(s5pv310_pcm0_resource),
+	.resource = s5pv310_pcm0_resource,
+	.dev = {
+		.platform_data = &s3c_pcm_pdata,
+	},
+};
+
+static struct resource s5pv310_pcm1_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PCM1,
+		.end	= S5PV310_PA_PCM1 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_PCM1_TX,
+		.end	= DMACH_PCM1_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_PCM1_RX,
+		.end	= DMACH_PCM1_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_pcm1 = {
+	.name = "samsung-pcm",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(s5pv310_pcm1_resource),
+	.resource = s5pv310_pcm1_resource,
+	.dev = {
+		.platform_data = &s3c_pcm_pdata,
+	},
+};
+
+static struct resource s5pv310_pcm2_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PCM2,
+		.end	= S5PV310_PA_PCM2 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_PCM2_TX,
+		.end	= DMACH_PCM2_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_PCM2_RX,
+		.end	= DMACH_PCM2_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_pcm2 = {
+	.name = "samsung-pcm",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(s5pv310_pcm2_resource),
+	.resource = s5pv310_pcm2_resource,
+	.dev = {
+		.platform_data = &s3c_pcm_pdata,
+	},
+};
+
+/* AC97 Controller platform devices */
+
+static int s5pv310_ac97_cfg_gpio(struct platform_device *pdev)
+{
+	return s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(4));
+}
+
+static struct resource s5pv310_ac97_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_AC97,
+		.end	= S5PV310_PA_AC97 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_AC97_PCMOUT,
+		.end	= DMACH_AC97_PCMOUT,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_AC97_PCMIN,
+		.end	= DMACH_AC97_PCMIN,
+		.flags	= IORESOURCE_DMA,
+	},
+	[3] = {
+		.start	= DMACH_AC97_MICIN,
+		.end	= DMACH_AC97_MICIN,
+		.flags	= IORESOURCE_DMA,
+	},
+	[4] = {
+		.start	= IRQ_AC97,
+		.end	= IRQ_AC97,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct s3c_audio_pdata s3c_ac97_pdata = {
+	.cfg_gpio = s5pv310_ac97_cfg_gpio,
+};
+
+static u64 s5pv310_ac97_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pv310_device_ac97 = {
+	.name = "samsung-ac97",
+	.id = -1,
+	.num_resources = ARRAY_SIZE(s5pv310_ac97_resource),
+	.resource = s5pv310_ac97_resource,
+	.dev = {
+		.platform_data = &s3c_ac97_pdata,
+		.dma_mask = &s5pv310_ac97_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+};
+
+/* S/PDIF Controller platform_device */
+
+static int s5pv310_spdif_cfg_gpio(struct platform_device *pdev)
+{
+	s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 2, S3C_GPIO_SFN(3));
+
+	return 0;
+}
+
+static struct resource s5pv310_spdif_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_SPDIF,
+		.end	= S5PV310_PA_SPDIF + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_SPDIF,
+		.end	= DMACH_SPDIF,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+static struct s3c_audio_pdata samsung_spdif_pdata = {
+	.cfg_gpio = s5pv310_spdif_cfg_gpio,
+};
+
+static u64 s5pv310_spdif_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pv310_device_spdif = {
+	.name = "samsung-spdif",
+	.id = -1,
+	.num_resources = ARRAY_SIZE(s5pv310_spdif_resource),
+	.resource = s5pv310_spdif_resource,
+	.dev = {
+		.platform_data = &samsung_spdif_pdata,
+		.dma_mask = &s5pv310_spdif_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+};
diff --git a/arch/arm/mach-s5pv310/dev-pd.c b/arch/arm/mach-s5pv310/dev-pd.c
new file mode 100644
index 0000000..58a50c2
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dev-pd.c
@@ -0,0 +1,139 @@
+/* linux/arch/arm/mach-s5pv310/dev-pd.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - Power Domain support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <mach/regs-pmu.h>
+
+#include <plat/pd.h>
+
+static int s5pv310_pd_enable(struct device *dev)
+{
+	struct samsung_pd_info *pdata =  dev->platform_data;
+	u32 timeout;
+
+	__raw_writel(S5P_INT_LOCAL_PWR_EN, pdata->base);
+
+	/* Wait max 1ms */
+	timeout = 10;
+	while ((__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN)
+		!= S5P_INT_LOCAL_PWR_EN) {
+		if (timeout == 0) {
+			printk(KERN_ERR "Power domain %s enable failed.\n",
+				dev_name(dev));
+			return -ETIMEDOUT;
+		}
+		timeout--;
+		udelay(100);
+	}
+
+	return 0;
+}
+
+static int s5pv310_pd_disable(struct device *dev)
+{
+	struct samsung_pd_info *pdata =  dev->platform_data;
+	u32 timeout;
+
+	__raw_writel(0, pdata->base);
+
+	/* Wait max 1ms */
+	timeout = 10;
+	while (__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN) {
+		if (timeout == 0) {
+			printk(KERN_ERR "Power domain %s disable failed.\n",
+				dev_name(dev));
+			return -ETIMEDOUT;
+		}
+		timeout--;
+		udelay(100);
+	}
+
+	return 0;
+}
+
+struct platform_device s5pv310_device_pd[] = {
+	{
+		.name		= "samsung-pd",
+		.id		= 0,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_MFC_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 1,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_G3D_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 2,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_LCD0_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 3,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_LCD1_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 4,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_TV_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 5,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_CAM_CONF,
+			},
+		},
+	}, {
+		.name		= "samsung-pd",
+		.id		= 6,
+		.dev = {
+			.platform_data = &(struct samsung_pd_info) {
+				.enable		= s5pv310_pd_enable,
+				.disable	= s5pv310_pd_disable,
+				.base		= S5P_PMU_GPS_CONF,
+			},
+		},
+	},
+};
diff --git a/arch/arm/mach-s5pv310/dev-sysmmu.c b/arch/arm/mach-s5pv310/dev-sysmmu.c
new file mode 100644
index 0000000..e1bb200
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dev-sysmmu.c
@@ -0,0 +1,187 @@
+/* linux/arch/arm/mach-s5pv310/dev-sysmmu.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+static struct resource s5pv310_sysmmu_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_SYSMMU_MDMA,
+		.end	= S5PV310_PA_SYSMMU_MDMA + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_SYSMMU_MDMA0_0,
+		.end	= IRQ_SYSMMU_MDMA0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[2] = {
+		.start	= S5PV310_PA_SYSMMU_SSS,
+		.end	= S5PV310_PA_SYSMMU_SSS + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[3] = {
+		.start	= IRQ_SYSMMU_SSS_0,
+		.end	= IRQ_SYSMMU_SSS_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[4] = {
+		.start	= S5PV310_PA_SYSMMU_FIMC0,
+		.end	= S5PV310_PA_SYSMMU_FIMC0 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[5] = {
+		.start	= IRQ_SYSMMU_FIMC0_0,
+		.end	= IRQ_SYSMMU_FIMC0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[6] = {
+		.start	= S5PV310_PA_SYSMMU_FIMC1,
+		.end	= S5PV310_PA_SYSMMU_FIMC1 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[7] = {
+		.start	= IRQ_SYSMMU_FIMC1_0,
+		.end	= IRQ_SYSMMU_FIMC1_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[8] = {
+		.start	= S5PV310_PA_SYSMMU_FIMC2,
+		.end	= S5PV310_PA_SYSMMU_FIMC2 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[9] = {
+		.start	= IRQ_SYSMMU_FIMC2_0,
+		.end	= IRQ_SYSMMU_FIMC2_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[10] = {
+		.start	= S5PV310_PA_SYSMMU_FIMC3,
+		.end	= S5PV310_PA_SYSMMU_FIMC3 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[11] = {
+		.start	= IRQ_SYSMMU_FIMC3_0,
+		.end	= IRQ_SYSMMU_FIMC3_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[12] = {
+		.start	= S5PV310_PA_SYSMMU_JPEG,
+		.end	= S5PV310_PA_SYSMMU_JPEG + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[13] = {
+		.start	= IRQ_SYSMMU_JPEG_0,
+		.end	= IRQ_SYSMMU_JPEG_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[14] = {
+		.start	= S5PV310_PA_SYSMMU_FIMD0,
+		.end	= S5PV310_PA_SYSMMU_FIMD0 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[15] = {
+		.start	= IRQ_SYSMMU_LCD0_M0_0,
+		.end	= IRQ_SYSMMU_LCD0_M0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[16] = {
+		.start	= S5PV310_PA_SYSMMU_FIMD1,
+		.end	= S5PV310_PA_SYSMMU_FIMD1 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[17] = {
+		.start	= IRQ_SYSMMU_LCD1_M1_0,
+		.end	= IRQ_SYSMMU_LCD1_M1_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[18] = {
+		.start	= S5PV310_PA_SYSMMU_PCIe,
+		.end	= S5PV310_PA_SYSMMU_PCIe + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[19] = {
+		.start	= IRQ_SYSMMU_PCIE_0,
+		.end	= IRQ_SYSMMU_PCIE_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[20] = {
+		.start	= S5PV310_PA_SYSMMU_G2D,
+		.end	= S5PV310_PA_SYSMMU_G2D + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[21] = {
+		.start	= IRQ_SYSMMU_2D_0,
+		.end	= IRQ_SYSMMU_2D_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[22] = {
+		.start	= S5PV310_PA_SYSMMU_ROTATOR,
+		.end	= S5PV310_PA_SYSMMU_ROTATOR + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[23] = {
+		.start	= IRQ_SYSMMU_ROTATOR_0,
+		.end	= IRQ_SYSMMU_ROTATOR_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[24] = {
+		.start	= S5PV310_PA_SYSMMU_MDMA2,
+		.end	= S5PV310_PA_SYSMMU_MDMA2 + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[25] = {
+		.start	= IRQ_SYSMMU_MDMA1_0,
+		.end	= IRQ_SYSMMU_MDMA1_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[26] = {
+		.start	= S5PV310_PA_SYSMMU_TV,
+		.end	= S5PV310_PA_SYSMMU_TV + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[27] = {
+		.start	= IRQ_SYSMMU_TV_M0_0,
+		.end	= IRQ_SYSMMU_TV_M0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[28] = {
+		.start	= S5PV310_PA_SYSMMU_MFC_L,
+		.end	= S5PV310_PA_SYSMMU_MFC_L + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[29] = {
+		.start	= IRQ_SYSMMU_MFC_M0_0,
+		.end	= IRQ_SYSMMU_MFC_M0_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[30] = {
+		.start	= S5PV310_PA_SYSMMU_MFC_R,
+		.end	= S5PV310_PA_SYSMMU_MFC_R + SZ_64K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[31] = {
+		.start	= IRQ_SYSMMU_MFC_M1_0,
+		.end	= IRQ_SYSMMU_MFC_M1_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+struct platform_device s5pv310_device_sysmmu = {
+	.name		= "s5p-sysmmu",
+	.id		= 32,
+	.num_resources	= ARRAY_SIZE(s5pv310_sysmmu_resource),
+	.resource	= s5pv310_sysmmu_resource,
+};
+
+EXPORT_SYMBOL(s5pv310_device_sysmmu);
diff --git a/arch/arm/mach-s5pv310/dma.c b/arch/arm/mach-s5pv310/dma.c
new file mode 100644
index 0000000..20066c7
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dma.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/devs.h>
+#include <plat/irqs.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+#include <plat/s3c-pl330-pdata.h>
+
+static u64 dma_dmamask = DMA_BIT_MASK(32);
+
+static struct resource s5pv310_pdma0_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PDMA0,
+		.end	= S5PV310_PA_PDMA0 + SZ_4K,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_PDMA0,
+		.end	= IRQ_PDMA0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct s3c_pl330_platdata s5pv310_pdma0_pdata = {
+	.peri = {
+		[0] = DMACH_PCM0_RX,
+		[1] = DMACH_PCM0_TX,
+		[2] = DMACH_PCM2_RX,
+		[3] = DMACH_PCM2_TX,
+		[4] = DMACH_MSM_REQ0,
+		[5] = DMACH_MSM_REQ2,
+		[6] = DMACH_SPI0_RX,
+		[7] = DMACH_SPI0_TX,
+		[8] = DMACH_SPI2_RX,
+		[9] = DMACH_SPI2_TX,
+		[10] = DMACH_I2S0S_TX,
+		[11] = DMACH_I2S0_RX,
+		[12] = DMACH_I2S0_TX,
+		[13] = DMACH_I2S2_RX,
+		[14] = DMACH_I2S2_TX,
+		[15] = DMACH_UART0_RX,
+		[16] = DMACH_UART0_TX,
+		[17] = DMACH_UART2_RX,
+		[18] = DMACH_UART2_TX,
+		[19] = DMACH_UART4_RX,
+		[20] = DMACH_UART4_TX,
+		[21] = DMACH_SLIMBUS0_RX,
+		[22] = DMACH_SLIMBUS0_TX,
+		[23] = DMACH_SLIMBUS2_RX,
+		[24] = DMACH_SLIMBUS2_TX,
+		[25] = DMACH_SLIMBUS4_RX,
+		[26] = DMACH_SLIMBUS4_TX,
+		[27] = DMACH_AC97_MICIN,
+		[28] = DMACH_AC97_PCMIN,
+		[29] = DMACH_AC97_PCMOUT,
+		[30] = DMACH_MAX,
+		[31] = DMACH_MAX,
+	},
+};
+
+static struct platform_device s5pv310_device_pdma0 = {
+	.name		= "s3c-pl330",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(s5pv310_pdma0_resource),
+	.resource	= s5pv310_pdma0_resource,
+	.dev		= {
+		.dma_mask = &dma_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+		.platform_data = &s5pv310_pdma0_pdata,
+	},
+};
+
+static struct resource s5pv310_pdma1_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PDMA1,
+		.end	= S5PV310_PA_PDMA1 + SZ_4K,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_PDMA1,
+		.end	= IRQ_PDMA1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct s3c_pl330_platdata s5pv310_pdma1_pdata = {
+	.peri = {
+		[0] = DMACH_PCM0_RX,
+		[1] = DMACH_PCM0_TX,
+		[2] = DMACH_PCM1_RX,
+		[3] = DMACH_PCM1_TX,
+		[4] = DMACH_MSM_REQ1,
+		[5] = DMACH_MSM_REQ3,
+		[6] = DMACH_SPI1_RX,
+		[7] = DMACH_SPI1_TX,
+		[8] = DMACH_I2S0S_TX,
+		[9] = DMACH_I2S0_RX,
+		[10] = DMACH_I2S0_TX,
+		[11] = DMACH_I2S1_RX,
+		[12] = DMACH_I2S1_TX,
+		[13] = DMACH_UART0_RX,
+		[14] = DMACH_UART0_TX,
+		[15] = DMACH_UART1_RX,
+		[16] = DMACH_UART1_TX,
+		[17] = DMACH_UART3_RX,
+		[18] = DMACH_UART3_TX,
+		[19] = DMACH_SLIMBUS1_RX,
+		[20] = DMACH_SLIMBUS1_TX,
+		[21] = DMACH_SLIMBUS3_RX,
+		[22] = DMACH_SLIMBUS3_TX,
+		[23] = DMACH_SLIMBUS5_RX,
+		[24] = DMACH_SLIMBUS5_TX,
+		[25] = DMACH_SLIMBUS0AUX_RX,
+		[26] = DMACH_SLIMBUS0AUX_TX,
+		[27] = DMACH_SPDIF,
+		[28] = DMACH_MAX,
+		[29] = DMACH_MAX,
+		[30] = DMACH_MAX,
+		[31] = DMACH_MAX,
+	},
+};
+
+static struct platform_device s5pv310_device_pdma1 = {
+	.name		= "s3c-pl330",
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(s5pv310_pdma1_resource),
+	.resource	= s5pv310_pdma1_resource,
+	.dev		= {
+		.dma_mask = &dma_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+		.platform_data = &s5pv310_pdma1_pdata,
+	},
+};
+
+static struct platform_device *s5pv310_dmacs[] __initdata = {
+	&s5pv310_device_pdma0,
+	&s5pv310_device_pdma1,
+};
+
+static int __init s5pv310_dma_init(void)
+{
+	platform_add_devices(s5pv310_dmacs, ARRAY_SIZE(s5pv310_dmacs));
+
+	return 0;
+}
+arch_initcall(s5pv310_dma_init);
diff --git a/arch/arm/mach-s5pv310/hotplug.c b/arch/arm/mach-s5pv310/hotplug.c
index afa5392..c24235c 100644
--- a/arch/arm/mach-s5pv310/hotplug.c
+++ b/arch/arm/mach-s5pv310/hotplug.c
@@ -30,10 +30,10 @@
 	 * Turn off coherency
 	 */
 	"	mrc	p15, 0, %0, c1, c0, 1\n"
-	"	bic	%0, %0, %2\n"
+	"	bic	%0, %0, #0x20\n"
 	"	mcr	p15, 0, %0, c1, c0, 1\n"
 	"	mrc	p15, 0, %0, c1, c0, 0\n"
-	"	bic	%0, %0, #0x04\n"
+	"	bic	%0, %0, %2\n"
 	"	mcr	p15, 0, %0, c1, c0, 0\n"
 	  : "=&r" (v)
 	  : "r" (0), "Ir" (CR_C)
diff --git a/arch/arm/mach-s5pv310/include/mach/dma.h b/arch/arm/mach-s5pv310/include/mach/dma.h
new file mode 100644
index 0000000..81209eb
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/dma.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* This platform uses the common S3C DMA API driver for PL330 */
+#include <plat/s3c-dma-pl330.h>
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/irqs.h b/arch/arm/mach-s5pv310/include/mach/irqs.h
index 99e7dad..536b0b5 100644
--- a/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv310/include/mach/irqs.h
@@ -25,6 +25,8 @@
 
 #define IRQ_SPI(x)		S5P_IRQ(x+32)
 
+#define IRQ_MCT1		IRQ_SPI(35)
+
 #define IRQ_EINT0		IRQ_SPI(40)
 #define IRQ_EINT1		IRQ_SPI(41)
 #define IRQ_EINT2		IRQ_SPI(42)
@@ -36,9 +38,8 @@
 #define IRQ_JPEG		IRQ_SPI(48)
 #define IRQ_2D			IRQ_SPI(49)
 #define IRQ_PCIE		IRQ_SPI(50)
-#define IRQ_SYSTEM_TIMER	IRQ_SPI(51)
+#define IRQ_MCT0		IRQ_SPI(51)
 #define IRQ_MFC			IRQ_SPI(52)
-#define IRQ_WDT			IRQ_SPI(53)
 #define IRQ_AUDIO_SS		IRQ_SPI(54)
 #define IRQ_AC97		IRQ_SPI(55)
 #define IRQ_SPDIF		IRQ_SPI(56)
@@ -54,6 +55,27 @@
 #define COMBINER_GROUP(x)	((x) * MAX_IRQ_IN_COMBINER + IRQ_SPI(64))
 #define COMBINER_IRQ(x, y)	(COMBINER_GROUP(x) + y)
 
+#define IRQ_SYSMMU_MDMA0_0	COMBINER_IRQ(4, 0)
+#define IRQ_SYSMMU_SSS_0	COMBINER_IRQ(4, 1)
+#define IRQ_SYSMMU_FIMC0_0	COMBINER_IRQ(4, 2)
+#define IRQ_SYSMMU_FIMC1_0	COMBINER_IRQ(4, 3)
+#define IRQ_SYSMMU_FIMC2_0	COMBINER_IRQ(4, 4)
+#define IRQ_SYSMMU_FIMC3_0	COMBINER_IRQ(4, 5)
+#define IRQ_SYSMMU_JPEG_0	COMBINER_IRQ(4, 6)
+#define IRQ_SYSMMU_2D_0		COMBINER_IRQ(4, 7)
+
+#define IRQ_SYSMMU_ROTATOR_0	COMBINER_IRQ(5, 0)
+#define IRQ_SYSMMU_MDMA1_0	COMBINER_IRQ(5, 1)
+#define IRQ_SYSMMU_LCD0_M0_0	COMBINER_IRQ(5, 2)
+#define IRQ_SYSMMU_LCD1_M1_0	COMBINER_IRQ(5, 3)
+#define IRQ_SYSMMU_TV_M0_0	COMBINER_IRQ(5, 4)
+#define IRQ_SYSMMU_MFC_M0_0	COMBINER_IRQ(5, 5)
+#define IRQ_SYSMMU_MFC_M1_0	COMBINER_IRQ(5, 6)
+#define IRQ_SYSMMU_PCIE_0	COMBINER_IRQ(5, 7)
+
+#define IRQ_PDMA0		COMBINER_IRQ(21, 0)
+#define IRQ_PDMA1		COMBINER_IRQ(21, 1)
+
 #define IRQ_TIMER0_VIC		COMBINER_IRQ(22, 0)
 #define IRQ_TIMER1_VIC		COMBINER_IRQ(22, 1)
 #define IRQ_TIMER2_VIC		COMBINER_IRQ(22, 2)
@@ -83,8 +105,13 @@
 #define IRQ_HSMMC2		COMBINER_IRQ(29, 2)
 #define IRQ_HSMMC3		COMBINER_IRQ(29, 3)
 
+#define IRQ_MIPI_CSIS0		COMBINER_IRQ(30, 0)
+#define IRQ_MIPI_CSIS1		COMBINER_IRQ(30, 1)
+
 #define IRQ_ONENAND_AUDI	COMBINER_IRQ(34, 0)
 
+#define IRQ_MCT_L1		COMBINER_IRQ(35, 3)
+
 #define IRQ_EINT4		COMBINER_IRQ(37, 0)
 #define IRQ_EINT5		COMBINER_IRQ(37, 1)
 #define IRQ_EINT6		COMBINER_IRQ(37, 2)
@@ -101,7 +128,11 @@
 
 #define IRQ_EINT16_31		COMBINER_IRQ(39, 0)
 
-#define MAX_COMBINER_NR		40
+#define IRQ_MCT_L0		COMBINER_IRQ(51, 0)
+
+#define IRQ_WDT			COMBINER_IRQ(53, 0)
+
+#define MAX_COMBINER_NR		54
 
 #define S5P_IRQ_EINT_BASE	COMBINER_IRQ(MAX_COMBINER_NR, 0)
 
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 7acf4e7..901657f 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -1,6 +1,6 @@
 /* linux/arch/arm/mach-s5pv310/include/mach/map.h
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com/
  *
  * S5PV310 - Memory map definitions
@@ -23,64 +23,86 @@
 
 #include <plat/map-s5p.h>
 
-#define S5PV310_PA_SYSRAM		(0x02025000)
+#define S5PV310_PA_SYSRAM		0x02025000
+
+#define S5PV310_PA_I2S0			0x03830000
+#define S5PV310_PA_I2S1			0xE3100000
+#define S5PV310_PA_I2S2			0xE2A00000
+
+#define S5PV310_PA_PCM0			0x03840000
+#define S5PV310_PA_PCM1			0x13980000
+#define S5PV310_PA_PCM2			0x13990000
 
 #define S5PV310_PA_SROM_BANK(x)		(0x04000000 + ((x) * 0x01000000))
 
-#define S5PC210_PA_ONENAND		(0x0C000000)
-#define S5P_PA_ONENAND			S5PC210_PA_ONENAND
+#define S5PC210_PA_ONENAND		0x0C000000
+#define S5PC210_PA_ONENAND_DMA		0x0C600000
 
-#define S5PC210_PA_ONENAND_DMA		(0x0C600000)
-#define S5P_PA_ONENAND_DMA		S5PC210_PA_ONENAND_DMA
+#define S5PV310_PA_CHIPID		0x10000000
 
-#define S5PV310_PA_CHIPID		(0x10000000)
-#define S5P_PA_CHIPID			S5PV310_PA_CHIPID
+#define S5PV310_PA_SYSCON		0x10010000
+#define S5PV310_PA_PMU			0x10020000
+#define S5PV310_PA_CMU			0x10030000
 
-#define S5PV310_PA_SYSCON		(0x10010000)
-#define S5P_PA_SYSCON			S5PV310_PA_SYSCON
+#define S5PV310_PA_WATCHDOG		0x10060000
+#define S5PV310_PA_RTC			0x10070000
 
-#define S5PV310_PA_CMU			(0x10030000)
+#define S5PV310_PA_DMC0			0x10400000
 
-#define S5PV310_PA_WATCHDOG		(0x10060000)
-#define S5PV310_PA_RTC			(0x10070000)
+#define S5PV310_PA_COMBINER		0x10448000
 
-#define S5PV310_PA_COMBINER		(0x10448000)
+#define S5PV310_PA_COREPERI		0x10500000
+#define S5PV310_PA_GIC_CPU		0x10500100
+#define S5PV310_PA_TWD			0x10500600
+#define S5PV310_PA_GIC_DIST		0x10501000
+#define S5PV310_PA_L2CC			0x10502000
 
-#define S5PV310_PA_COREPERI		(0x10500000)
-#define S5PV310_PA_GIC_CPU		(0x10500100)
-#define S5PV310_PA_TWD			(0x10500600)
-#define S5PV310_PA_GIC_DIST		(0x10501000)
-#define S5PV310_PA_L2CC			(0x10502000)
+#define S5PV310_PA_MDMA			0x10810000
+#define S5PV310_PA_PDMA0		0x12680000
+#define S5PV310_PA_PDMA1		0x12690000
 
-#define S5PV310_PA_GPIO1		(0x11400000)
-#define S5PV310_PA_GPIO2		(0x11000000)
-#define S5PV310_PA_GPIO3		(0x03860000)
+#define S5PV310_PA_SYSMMU_MDMA		0x10A40000
+#define S5PV310_PA_SYSMMU_SSS		0x10A50000
+#define S5PV310_PA_SYSMMU_FIMC0		0x11A20000
+#define S5PV310_PA_SYSMMU_FIMC1		0x11A30000
+#define S5PV310_PA_SYSMMU_FIMC2		0x11A40000
+#define S5PV310_PA_SYSMMU_FIMC3		0x11A50000
+#define S5PV310_PA_SYSMMU_JPEG		0x11A60000
+#define S5PV310_PA_SYSMMU_FIMD0		0x11E20000
+#define S5PV310_PA_SYSMMU_FIMD1		0x12220000
+#define S5PV310_PA_SYSMMU_PCIe		0x12620000
+#define S5PV310_PA_SYSMMU_G2D		0x12A20000
+#define S5PV310_PA_SYSMMU_ROTATOR	0x12A30000
+#define S5PV310_PA_SYSMMU_MDMA2		0x12A40000
+#define S5PV310_PA_SYSMMU_TV		0x12E20000
+#define S5PV310_PA_SYSMMU_MFC_L		0x13620000
+#define S5PV310_PA_SYSMMU_MFC_R		0x13630000
+
+#define S5PV310_PA_GPIO1		0x11400000
+#define S5PV310_PA_GPIO2		0x11000000
+#define S5PV310_PA_GPIO3		0x03860000
+
+#define S5PV310_PA_MIPI_CSIS0		0x11880000
+#define S5PV310_PA_MIPI_CSIS1		0x11890000
 
 #define S5PV310_PA_HSMMC(x)		(0x12510000 + ((x) * 0x10000))
 
-#define S5PV310_PA_SROMC		(0x12570000)
+#define S5PV310_PA_SROMC		0x12570000
 
-#define S5PV310_PA_UART			(0x13800000)
-
-#define S5P_PA_UART(x)			(S5PV310_PA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART0			S5P_PA_UART(0)
-#define S5P_PA_UART1			S5P_PA_UART(1)
-#define S5P_PA_UART2			S5P_PA_UART(2)
-#define S5P_PA_UART3			S5P_PA_UART(3)
-#define S5P_PA_UART4			S5P_PA_UART(4)
-
-#define S5P_SZ_UART			SZ_256
+#define S5PV310_PA_UART			0x13800000
 
 #define S5PV310_PA_IIC(x)		(0x13860000 + ((x) * 0x10000))
 
-#define S5PV310_PA_TIMER		(0x139D0000)
-#define S5P_PA_TIMER			S5PV310_PA_TIMER
+#define S5PV310_PA_AC97			0x139A0000
 
-#define S5PV310_PA_SDRAM		(0x40000000)
-#define S5P_PA_SDRAM			S5PV310_PA_SDRAM
+#define S5PV310_PA_TIMER		0x139D0000
 
-/* compatibiltiy defines. */
-#define S3C_PA_UART			S5PV310_PA_UART
+#define S5PV310_PA_SDRAM		0x40000000
+
+#define S5PV310_PA_SPDIF		0xE1100000
+
+/* Compatibiltiy Defines */
+
 #define S3C_PA_HSMMC0			S5PV310_PA_HSMMC(0)
 #define S3C_PA_HSMMC1			S5PV310_PA_HSMMC(1)
 #define S3C_PA_HSMMC2			S5PV310_PA_HSMMC(2)
@@ -96,4 +118,27 @@
 #define S3C_PA_RTC			S5PV310_PA_RTC
 #define S3C_PA_WDT			S5PV310_PA_WATCHDOG
 
+#define S5P_PA_CHIPID			S5PV310_PA_CHIPID
+#define S5P_PA_MIPI_CSIS0		S5PV310_PA_MIPI_CSIS0
+#define S5P_PA_MIPI_CSIS1		S5PV310_PA_MIPI_CSIS1
+#define S5P_PA_ONENAND			S5PC210_PA_ONENAND
+#define S5P_PA_ONENAND_DMA		S5PC210_PA_ONENAND_DMA
+#define S5P_PA_SDRAM			S5PV310_PA_SDRAM
+#define S5P_PA_SROMC			S5PV310_PA_SROMC
+#define S5P_PA_SYSCON			S5PV310_PA_SYSCON
+#define S5P_PA_TIMER			S5PV310_PA_TIMER
+
+/* UART */
+
+#define S3C_PA_UART			S5PV310_PA_UART
+
+#define S5P_PA_UART(x)			(S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0			S5P_PA_UART(0)
+#define S5P_PA_UART1			S5P_PA_UART(1)
+#define S5P_PA_UART2			S5P_PA_UART(2)
+#define S5P_PA_UART3			S5P_PA_UART(3)
+#define S5P_PA_UART4			S5P_PA_UART(4)
+
+#define S5P_SZ_UART			SZ_256
+
 #endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-clock.h b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
index f1028ca..b5c4ada 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
@@ -19,6 +19,12 @@
 
 #define S5P_INFORM0			S5P_CLKREG(0x800)
 
+#define S5P_CLKDIV_LEFTBUS		S5P_CLKREG(0x04500)
+#define S5P_CLKDIV_STAT_LEFTBUS		S5P_CLKREG(0x04600)
+
+#define S5P_CLKDIV_RIGHTBUS		S5P_CLKREG(0x08500)
+#define S5P_CLKDIV_STAT_RIGHTBUS	S5P_CLKREG(0x08600)
+
 #define S5P_EPLL_CON0			S5P_CLKREG(0x0C110)
 #define S5P_EPLL_CON1			S5P_CLKREG(0x0C114)
 #define S5P_VPLL_CON0			S5P_CLKREG(0x0C120)
@@ -58,6 +64,8 @@
 #define S5P_CLKSRC_MASK_PERIL0		S5P_CLKREG(0x0C350)
 #define S5P_CLKSRC_MASK_PERIL1		S5P_CLKREG(0x0C354)
 
+#define S5P_CLKDIV_STAT_TOP		S5P_CLKREG(0x0C610)
+
 #define S5P_CLKGATE_IP_CAM		S5P_CLKREG(0x0C920)
 #define S5P_CLKGATE_IP_IMAGE		S5P_CLKREG(0x0C930)
 #define S5P_CLKGATE_IP_LCD0		S5P_CLKREG(0x0C934)
@@ -66,8 +74,9 @@
 #define S5P_CLKGATE_IP_PERIL		S5P_CLKREG(0x0C950)
 #define S5P_CLKGATE_IP_PERIR		S5P_CLKREG(0x0C960)
 
-#define S5P_CLKSRC_CORE			S5P_CLKREG(0x10200)
-#define S5P_CLKDIV_CORE0		S5P_CLKREG(0x10500)
+#define S5P_CLKSRC_DMC			S5P_CLKREG(0x10200)
+#define S5P_CLKDIV_DMC0			S5P_CLKREG(0x10500)
+#define S5P_CLKDIV_STAT_DMC0		S5P_CLKREG(0x10600)
 
 #define S5P_APLL_LOCK			S5P_CLKREG(0x14000)
 #define S5P_MPLL_LOCK			S5P_CLKREG(0x14004)
@@ -80,10 +89,77 @@
 #define S5P_CLKMUX_STATCPU		S5P_CLKREG(0x14400)
 
 #define S5P_CLKDIV_CPU			S5P_CLKREG(0x14500)
+#define S5P_CLKDIV_CPU1			S5P_CLKREG(0x14504)
 #define S5P_CLKDIV_STATCPU		S5P_CLKREG(0x14600)
+#define S5P_CLKDIV_STATCPU1		S5P_CLKREG(0x14604)
 
 #define S5P_CLKGATE_SCLKCPU		S5P_CLKREG(0x14800)
 
+/* APLL_LOCK */
+#define S5P_APLL_LOCKTIME		(0x1C20)	/* 300us */
+
+/* APLL_CON0 */
+#define S5P_APLLCON0_ENABLE_SHIFT	(31)
+#define S5P_APLLCON0_LOCKED_SHIFT	(29)
+#define S5P_APLL_VAL_1000		((250 << 16) | (6 << 8) | 1)
+#define S5P_APLL_VAL_800		((200 << 16) | (6 << 8) | 1)
+
+/* CLK_SRC_CPU */
+#define S5P_CLKSRC_CPU_MUXCORE_SHIFT	(16)
+#define S5P_CLKMUX_STATCPU_MUXCORE_MASK	(0x7 << S5P_CLKSRC_CPU_MUXCORE_SHIFT)
+
+/* CLKDIV_CPU0 */
+#define S5P_CLKDIV_CPU0_CORE_SHIFT	(0)
+#define S5P_CLKDIV_CPU0_CORE_MASK	(0x7 << S5P_CLKDIV_CPU0_CORE_SHIFT)
+#define S5P_CLKDIV_CPU0_COREM0_SHIFT	(4)
+#define S5P_CLKDIV_CPU0_COREM0_MASK	(0x7 << S5P_CLKDIV_CPU0_COREM0_SHIFT)
+#define S5P_CLKDIV_CPU0_COREM1_SHIFT	(8)
+#define S5P_CLKDIV_CPU0_COREM1_MASK	(0x7 << S5P_CLKDIV_CPU0_COREM1_SHIFT)
+#define S5P_CLKDIV_CPU0_PERIPH_SHIFT	(12)
+#define S5P_CLKDIV_CPU0_PERIPH_MASK	(0x7 << S5P_CLKDIV_CPU0_PERIPH_SHIFT)
+#define S5P_CLKDIV_CPU0_ATB_SHIFT	(16)
+#define S5P_CLKDIV_CPU0_ATB_MASK	(0x7 << S5P_CLKDIV_CPU0_ATB_SHIFT)
+#define S5P_CLKDIV_CPU0_PCLKDBG_SHIFT	(20)
+#define S5P_CLKDIV_CPU0_PCLKDBG_MASK	(0x7 << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT)
+#define S5P_CLKDIV_CPU0_APLL_SHIFT	(24)
+#define S5P_CLKDIV_CPU0_APLL_MASK	(0x7 << S5P_CLKDIV_CPU0_APLL_SHIFT)
+
+/* CLKDIV_DMC0 */
+#define S5P_CLKDIV_DMC0_ACP_SHIFT	(0)
+#define S5P_CLKDIV_DMC0_ACP_MASK	(0x7 << S5P_CLKDIV_DMC0_ACP_SHIFT)
+#define S5P_CLKDIV_DMC0_ACPPCLK_SHIFT	(4)
+#define S5P_CLKDIV_DMC0_ACPPCLK_MASK	(0x7 << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT)
+#define S5P_CLKDIV_DMC0_DPHY_SHIFT	(8)
+#define S5P_CLKDIV_DMC0_DPHY_MASK	(0x7 << S5P_CLKDIV_DMC0_DPHY_SHIFT)
+#define S5P_CLKDIV_DMC0_DMC_SHIFT	(12)
+#define S5P_CLKDIV_DMC0_DMC_MASK	(0x7 << S5P_CLKDIV_DMC0_DMC_SHIFT)
+#define S5P_CLKDIV_DMC0_DMCD_SHIFT	(16)
+#define S5P_CLKDIV_DMC0_DMCD_MASK	(0x7 << S5P_CLKDIV_DMC0_DMCD_SHIFT)
+#define S5P_CLKDIV_DMC0_DMCP_SHIFT	(20)
+#define S5P_CLKDIV_DMC0_DMCP_MASK	(0x7 << S5P_CLKDIV_DMC0_DMCP_SHIFT)
+#define S5P_CLKDIV_DMC0_COPY2_SHIFT	(24)
+#define S5P_CLKDIV_DMC0_COPY2_MASK	(0x7 << S5P_CLKDIV_DMC0_COPY2_SHIFT)
+#define S5P_CLKDIV_DMC0_CORETI_SHIFT	(28)
+#define S5P_CLKDIV_DMC0_CORETI_MASK	(0x7 << S5P_CLKDIV_DMC0_CORETI_SHIFT)
+
+/* CLKDIV_TOP */
+#define S5P_CLKDIV_TOP_ACLK200_SHIFT	(0)
+#define S5P_CLKDIV_TOP_ACLK200_MASK	(0x7 << S5P_CLKDIV_TOP_ACLK200_SHIFT)
+#define S5P_CLKDIV_TOP_ACLK100_SHIFT	(4)
+#define S5P_CLKDIV_TOP_ACLK100_MASK	(0xf << S5P_CLKDIV_TOP_ACLK100_SHIFT)
+#define S5P_CLKDIV_TOP_ACLK160_SHIFT	(8)
+#define S5P_CLKDIV_TOP_ACLK160_MASK	(0x7 << S5P_CLKDIV_TOP_ACLK160_SHIFT)
+#define S5P_CLKDIV_TOP_ACLK133_SHIFT	(12)
+#define S5P_CLKDIV_TOP_ACLK133_MASK	(0x7 << S5P_CLKDIV_TOP_ACLK133_SHIFT)
+#define S5P_CLKDIV_TOP_ONENAND_SHIFT	(16)
+#define S5P_CLKDIV_TOP_ONENAND_MASK	(0x7 << S5P_CLKDIV_TOP_ONENAND_SHIFT)
+
+/* CLKDIV_LEFTBUS / CLKDIV_RIGHTBUS*/
+#define S5P_CLKDIV_BUS_GDLR_SHIFT	(0)
+#define S5P_CLKDIV_BUS_GDLR_MASK	(0x7 << S5P_CLKDIV_BUS_GDLR_SHIFT)
+#define S5P_CLKDIV_BUS_GPLR_SHIFT	(4)
+#define S5P_CLKDIV_BUS_GPLR_MASK	(0x7 << S5P_CLKDIV_BUS_GPLR_SHIFT)
+
 /* Compatibility defines */
 
 #define S5P_EPLL_CON			S5P_EPLL_CON0
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-mem.h b/arch/arm/mach-s5pv310/include/mach/regs-mem.h
new file mode 100644
index 0000000..8342271
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/regs-mem.h
@@ -0,0 +1,23 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/regs-mem.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - SROMC and DMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_MEM_H
+#define __ASM_ARCH_REGS_MEM_H __FILE__
+
+#include <mach/map.h>
+
+#define S5P_DMC0_MEMCON_OFFSET		0x04
+
+#define S5P_DMC0_MEMTYPE_SHIFT		8
+#define S5P_DMC0_MEMTYPE_MASK		0xF
+
+#endif /* __ASM_ARCH_REGS_MEM_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-pmu.h b/arch/arm/mach-s5pv310/include/mach/regs-pmu.h
new file mode 100644
index 0000000..fb333d0
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/regs-pmu.h
@@ -0,0 +1,30 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/regs-pmu.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - Power management unit definition
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_PMU_H
+#define __ASM_ARCH_REGS_PMU_H __FILE__
+
+#include <mach/map.h>
+
+#define S5P_PMUREG(x)			(S5P_VA_PMU + (x))
+
+#define S5P_PMU_CAM_CONF		S5P_PMUREG(0x3C00)
+#define S5P_PMU_TV_CONF		S5P_PMUREG(0x3C20)
+#define S5P_PMU_MFC_CONF		S5P_PMUREG(0x3C40)
+#define S5P_PMU_G3D_CONF		S5P_PMUREG(0x3C60)
+#define S5P_PMU_LCD0_CONF		S5P_PMUREG(0x3C80)
+#define S5P_PMU_LCD1_CONF		S5P_PMUREG(0x3CA0)
+#define S5P_PMU_GPS_CONF		S5P_PMUREG(0x3CE0)
+
+#define S5P_INT_LOCAL_PWR_EN		0x7
+
+#endif /* __ASM_ARCH_REGS_PMU_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-srom.h b/arch/arm/mach-s5pv310/include/mach/regs-srom.h
deleted file mode 100644
index 1898b3e..0000000
--- a/arch/arm/mach-s5pv310/include/mach/regs-srom.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* linux/arch/arm/mach-s5pv310/include/mach/regs-srom.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * S5PV310 - SROMC register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_SROM_H
-#define __ASM_ARCH_REGS_SROM_H __FILE__
-
-#include <mach/map.h>
-
-#define S5PV310_SROMREG(x)	(S5P_VA_SROMC + (x))
-
-#define S5PV310_SROM_BW		S5PV310_SROMREG(0x0)
-#define S5PV310_SROM_BC0	S5PV310_SROMREG(0x4)
-#define S5PV310_SROM_BC1	S5PV310_SROMREG(0x8)
-#define S5PV310_SROM_BC2	S5PV310_SROMREG(0xc)
-#define S5PV310_SROM_BC3	S5PV310_SROMREG(0x10)
-
-/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
-
-#define S5PV310_SROM_BW__DATAWIDTH__SHIFT	0
-#define S5PV310_SROM_BW__ADDRMODE__SHIFT	1
-#define S5PV310_SROM_BW__WAITENABLE__SHIFT	2
-#define S5PV310_SROM_BW__BYTEENABLE__SHIFT	3
-
-#define S5PV310_SROM_BW__CS_MASK		0xf
-
-#define S5PV310_SROM_BW__NCS0__SHIFT		0
-#define S5PV310_SROM_BW__NCS1__SHIFT		4
-#define S5PV310_SROM_BW__NCS2__SHIFT		8
-#define S5PV310_SROM_BW__NCS3__SHIFT		12
-
-/* applies to same to BCS0 - BCS3 */
-
-#define S5PV310_SROM_BCX__PMC__SHIFT		0
-#define S5PV310_SROM_BCX__TACP__SHIFT		4
-#define S5PV310_SROM_BCX__TCAH__SHIFT		8
-#define S5PV310_SROM_BCX__TCOH__SHIFT		12
-#define S5PV310_SROM_BCX__TACC__SHIFT		16
-#define S5PV310_SROM_BCX__TCOS__SHIFT		24
-#define S5PV310_SROM_BCX__TACS__SHIFT		28
-
-#endif /* __ASM_ARCH_REGS_SROM_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h b/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h
new file mode 100644
index 0000000..0b28e81
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h
@@ -0,0 +1,24 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/regs-sysmmu.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5PV310 - System MMU register
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_SYSMMU_H
+#define __ASM_ARCH_REGS_SYSMMU_H __FILE__
+
+#define S5P_MMU_CTRL			0x000
+#define S5P_MMU_CFG			0x004
+#define S5P_MMU_STATUS			0x008
+#define S5P_MMU_FLUSH			0x00C
+#define S5P_PT_BASE_ADDR		0x014
+#define S5P_INT_STATUS			0x018
+#define S5P_PAGE_FAULT_ADDR		0x024
+
+#endif /* __ASM_ARCH_REGS_SYSMMU_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/sysmmu.h b/arch/arm/mach-s5pv310/include/mach/sysmmu.h
new file mode 100644
index 0000000..598fc5c
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/sysmmu.h
@@ -0,0 +1,122 @@
+/* linux/arch/arm/mach-s5pv310/include/mach/sysmmu.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Samsung sysmmu driver for S5PV310
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARM_ARCH_SYSMMU_H
+#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
+
+#define S5PV310_SYSMMU_TOTAL_IPNUM	16
+#define S5P_SYSMMU_TOTAL_IPNUM		S5PV310_SYSMMU_TOTAL_IPNUM
+
+enum s5pv310_sysmmu_ips {
+	SYSMMU_MDMA,
+	SYSMMU_SSS,
+	SYSMMU_FIMC0,
+	SYSMMU_FIMC1,
+	SYSMMU_FIMC2,
+	SYSMMU_FIMC3,
+	SYSMMU_JPEG,
+	SYSMMU_FIMD0,
+	SYSMMU_FIMD1,
+	SYSMMU_PCIe,
+	SYSMMU_G2D,
+	SYSMMU_ROTATOR,
+	SYSMMU_MDMA2,
+	SYSMMU_TV,
+	SYSMMU_MFC_L,
+	SYSMMU_MFC_R,
+};
+
+static char *sysmmu_ips_name[S5PV310_SYSMMU_TOTAL_IPNUM] = {
+	"SYSMMU_MDMA"	,
+	"SYSMMU_SSS"	,
+	"SYSMMU_FIMC0"	,
+	"SYSMMU_FIMC1"	,
+	"SYSMMU_FIMC2"	,
+	"SYSMMU_FIMC3"	,
+	"SYSMMU_JPEG"	,
+	"SYSMMU_FIMD0"	,
+	"SYSMMU_FIMD1"	,
+	"SYSMMU_PCIe"	,
+	"SYSMMU_G2D"	,
+	"SYSMMU_ROTATOR",
+	"SYSMMU_MDMA2"	,
+	"SYSMMU_TV"	,
+	"SYSMMU_MFC_L"	,
+	"SYSMMU_MFC_R"	,
+};
+
+typedef enum s5pv310_sysmmu_ips sysmmu_ips;
+
+struct sysmmu_tt_info {
+	unsigned long *pgd;
+	unsigned long pgd_paddr;
+	unsigned long *pte;
+};
+
+struct sysmmu_controller {
+	const char		*name;
+
+	/* channels registers */
+	void __iomem		*regs;
+
+	/* channel irq */
+	unsigned int		irq;
+
+	sysmmu_ips		ips;
+
+	/* Translation Table Info. */
+	struct sysmmu_tt_info	*tt_info;
+
+	struct resource		*mem;
+	struct device		*dev;
+
+	/* SysMMU controller enable - true : enable */
+	bool			enable;
+};
+
+/**
+ * s5p_sysmmu_enable() - enable system mmu of ip
+ * @ips: The ip connected system mmu.
+ *
+ * This function enable system mmu to transfer address
+ * from virtual address to physical address
+ */
+int s5p_sysmmu_enable(sysmmu_ips ips);
+
+/**
+ * s5p_sysmmu_disable() - disable sysmmu mmu of ip
+ * @ips: The ip connected system mmu.
+ *
+ * This function disable system mmu to transfer address
+ * from virtual address to physical address
+ */
+int s5p_sysmmu_disable(sysmmu_ips ips);
+
+/**
+ * s5p_sysmmu_set_tablebase_pgd() - set page table base address to refer page table
+ * @ips: The ip connected system mmu.
+ * @pgd: The page table base address.
+ *
+ * This function set page table base address
+ * When system mmu transfer address from virtaul address to physical address,
+ * system mmu refer address information from page table
+ */
+int s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd);
+
+/**
+ * s5p_sysmmu_tlb_invalidate() - flush all TLB entry in system mmu
+ * @ips: The ip connected system mmu.
+ *
+ * This function flush all TLB entry in system mmu
+ */
+int s5p_sysmmu_tlb_invalidate(sysmmu_ips ips);
+#endif /* __ASM_ARM_ARCH_SYSMMU_H */
diff --git a/arch/arm/mach-s5pv310/irq-combiner.c b/arch/arm/mach-s5pv310/irq-combiner.c
index c3f88c3..1ea4a9e 100644
--- a/arch/arm/mach-s5pv310/irq-combiner.c
+++ b/arch/arm/mach-s5pv310/irq-combiner.c
@@ -24,29 +24,32 @@
 
 struct combiner_chip_data {
 	unsigned int irq_offset;
+	unsigned int irq_mask;
 	void __iomem *base;
 };
 
 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
 
-static inline void __iomem *combiner_base(unsigned int irq)
+static inline void __iomem *combiner_base(struct irq_data *data)
 {
-	struct combiner_chip_data *combiner_data = get_irq_chip_data(irq);
+	struct combiner_chip_data *combiner_data =
+		irq_data_get_irq_chip_data(data);
+
 	return combiner_data->base;
 }
 
-static void combiner_mask_irq(unsigned int irq)
+static void combiner_mask_irq(struct irq_data *data)
 {
-	u32 mask = 1 << (irq % 32);
+	u32 mask = 1 << (data->irq % 32);
 
-	__raw_writel(mask, combiner_base(irq) + COMBINER_ENABLE_CLEAR);
+	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
 }
 
-static void combiner_unmask_irq(unsigned int irq)
+static void combiner_unmask_irq(struct irq_data *data)
 {
-	u32 mask = 1 << (irq % 32);
+	u32 mask = 1 << (data->irq % 32);
 
-	__raw_writel(mask, combiner_base(irq) + COMBINER_ENABLE_SET);
+	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
 }
 
 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
@@ -57,11 +60,12 @@
 	unsigned long status;
 
 	/* primary controller ack'ing */
-	chip->ack(irq);
+	chip->irq_ack(&desc->irq_data);
 
 	spin_lock(&irq_controller_lock);
 	status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
 	spin_unlock(&irq_controller_lock);
+	status &= chip_data->irq_mask;
 
 	if (status == 0)
 		goto out;
@@ -76,13 +80,13 @@
 
  out:
 	/* primary controller unmasking */
-	chip->unmask(irq);
+	chip->irq_unmask(&desc->irq_data);
 }
 
 static struct irq_chip combiner_chip = {
 	.name		= "COMBINER",
-	.mask		= combiner_mask_irq,
-	.unmask		= combiner_unmask_irq,
+	.irq_mask	= combiner_mask_irq,
+	.irq_unmask	= combiner_unmask_irq,
 };
 
 void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
@@ -104,10 +108,12 @@
 
 	combiner_data[combiner_nr].base = base;
 	combiner_data[combiner_nr].irq_offset = irq_start;
+	combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
 
 	/* Disable all interrupts */
 
-	__raw_writel(0xffffffff, base + COMBINER_ENABLE_CLEAR);
+	__raw_writel(combiner_data[combiner_nr].irq_mask,
+		     base + COMBINER_ENABLE_CLEAR);
 
 	/* Setup the Linux IRQ subsystem */
 
diff --git a/arch/arm/mach-s5pv310/irq-eint.c b/arch/arm/mach-s5pv310/irq-eint.c
index 5877503..477bd9e 100644
--- a/arch/arm/mach-s5pv310/irq-eint.c
+++ b/arch/arm/mach-s5pv310/irq-eint.c
@@ -48,42 +48,43 @@
 	return ret;
 }
 
-static inline void s5pv310_irq_eint_mask(unsigned int irq)
+static inline void s5pv310_irq_eint_mask(struct irq_data *data)
 {
 	u32 mask;
 
 	spin_lock(&eint_lock);
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
-	mask |= eint_irq_to_bit(irq);
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask |= eint_irq_to_bit(data->irq);
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
 	spin_unlock(&eint_lock);
 }
 
-static void s5pv310_irq_eint_unmask(unsigned int irq)
+static void s5pv310_irq_eint_unmask(struct irq_data *data)
 {
 	u32 mask;
 
 	spin_lock(&eint_lock);
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
-	mask &= ~(eint_irq_to_bit(irq));
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask &= ~(eint_irq_to_bit(data->irq));
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
 	spin_unlock(&eint_lock);
 }
 
-static inline void s5pv310_irq_eint_ack(unsigned int irq)
+static inline void s5pv310_irq_eint_ack(struct irq_data *data)
 {
-	__raw_writel(eint_irq_to_bit(irq), S5P_EINT_PEND(EINT_REG_NR(irq)));
+	__raw_writel(eint_irq_to_bit(data->irq),
+		     S5P_EINT_PEND(EINT_REG_NR(data->irq)));
 }
 
-static void s5pv310_irq_eint_maskack(unsigned int irq)
+static void s5pv310_irq_eint_maskack(struct irq_data *data)
 {
-	s5pv310_irq_eint_mask(irq);
-	s5pv310_irq_eint_ack(irq);
+	s5pv310_irq_eint_mask(data);
+	s5pv310_irq_eint_ack(data);
 }
 
-static int s5pv310_irq_eint_set_type(unsigned int irq, unsigned int type)
+static int s5pv310_irq_eint_set_type(struct irq_data *data, unsigned int type)
 {
-	int offs = EINT_OFFSET(irq);
+	int offs = EINT_OFFSET(data->irq);
 	int shift;
 	u32 ctrl, mask;
 	u32 newvalue = 0;
@@ -118,10 +119,10 @@
 	mask = 0x7 << shift;
 
 	spin_lock(&eint_lock);
-	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(irq)));
+	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
 	ctrl &= ~mask;
 	ctrl |= newvalue << shift;
-	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(irq)));
+	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
 	spin_unlock(&eint_lock);
 
 	switch (offs) {
@@ -146,13 +147,13 @@
 
 static struct irq_chip s5pv310_irq_eint = {
 	.name		= "s5pv310-eint",
-	.mask		= s5pv310_irq_eint_mask,
-	.unmask		= s5pv310_irq_eint_unmask,
-	.mask_ack	= s5pv310_irq_eint_maskack,
-	.ack		= s5pv310_irq_eint_ack,
-	.set_type	= s5pv310_irq_eint_set_type,
+	.irq_mask	= s5pv310_irq_eint_mask,
+	.irq_unmask	= s5pv310_irq_eint_unmask,
+	.irq_mask_ack	= s5pv310_irq_eint_maskack,
+	.irq_ack	= s5pv310_irq_eint_ack,
+	.irq_set_type	= s5pv310_irq_eint_set_type,
 #ifdef CONFIG_PM
-	.set_wake	= s3c_irqext_wake,
+	.irq_set_wake	= s3c_irqext_wake,
 #endif
 };
 
@@ -192,14 +193,14 @@
 	u32 *irq_data = get_irq_data(irq);
 	struct irq_chip *chip = get_irq_chip(irq);
 
-	chip->mask(irq);
+	chip->irq_mask(&desc->irq_data);
 
-	if (chip->ack)
-		chip->ack(irq);
+	if (chip->irq_ack)
+		chip->irq_ack(&desc->irq_data);
 
 	generic_handle_irq(*irq_data);
 
-	chip->unmask(irq);
+	chip->irq_unmask(&desc->irq_data);
 }
 
 int __init s5pv310_init_irq_eint(void)
diff --git a/arch/arm/mach-s5pv310/mach-smdkc210.c b/arch/arm/mach-s5pv310/mach-smdkc210.c
index 2b8d4fc..d9cab02 100644
--- a/arch/arm/mach-s5pv310/mach-smdkc210.c
+++ b/arch/arm/mach-s5pv310/mach-smdkc210.c
@@ -14,18 +14,21 @@
 #include <linux/platform_device.h>
 #include <linux/smsc911x.h>
 #include <linux/io.h>
+#include <linux/i2c.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 
 #include <plat/regs-serial.h>
+#include <plat/regs-srom.h>
 #include <plat/s5pv310.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/sdhci.h>
+#include <plat/iic.h>
+#include <plat/pd.h>
 
 #include <mach/map.h>
-#include <mach/regs-srom.h>
 
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKC210_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
@@ -139,13 +142,29 @@
 	},
 };
 
+static struct i2c_board_info i2c_devs1[] __initdata = {
+	{I2C_BOARD_INFO("wm8994", 0x1a),},
+};
+
 static struct platform_device *smdkc210_devices[] __initdata = {
 	&s3c_device_hsmmc0,
 	&s3c_device_hsmmc1,
 	&s3c_device_hsmmc2,
 	&s3c_device_hsmmc3,
+	&s3c_device_i2c1,
 	&s3c_device_rtc,
 	&s3c_device_wdt,
+	&s5pv310_device_ac97,
+	&s5pv310_device_i2s0,
+	&s5pv310_device_pd[PD_MFC],
+	&s5pv310_device_pd[PD_G3D],
+	&s5pv310_device_pd[PD_LCD0],
+	&s5pv310_device_pd[PD_LCD1],
+	&s5pv310_device_pd[PD_CAM],
+	&s5pv310_device_pd[PD_TV],
+	&s5pv310_device_pd[PD_GPS],
+	&s5pv310_device_sysmmu,
+	&samsung_asoc_dma,
 	&smdkc210_smsc911x,
 };
 
@@ -154,23 +173,22 @@
 	u32 cs1;
 
 	/* configure nCS1 width to 16 bits */
-	cs1 = __raw_readl(S5PV310_SROM_BW) &
-		    ~(S5PV310_SROM_BW__CS_MASK <<
-				    S5PV310_SROM_BW__NCS1__SHIFT);
-	cs1 |= ((1 << S5PV310_SROM_BW__DATAWIDTH__SHIFT) |
-		(1 << S5PV310_SROM_BW__WAITENABLE__SHIFT) |
-		(1 << S5PV310_SROM_BW__BYTEENABLE__SHIFT)) <<
-		S5PV310_SROM_BW__NCS1__SHIFT;
-	__raw_writel(cs1, S5PV310_SROM_BW);
+	cs1 = __raw_readl(S5P_SROM_BW) &
+		~(S5P_SROM_BW__CS_MASK << S5P_SROM_BW__NCS1__SHIFT);
+	cs1 |= ((1 << S5P_SROM_BW__DATAWIDTH__SHIFT) |
+		(1 << S5P_SROM_BW__WAITENABLE__SHIFT) |
+		(1 << S5P_SROM_BW__BYTEENABLE__SHIFT)) <<
+		S5P_SROM_BW__NCS1__SHIFT;
+	__raw_writel(cs1, S5P_SROM_BW);
 
 	/* set timing for nCS1 suitable for ethernet chip */
-	__raw_writel((0x1 << S5PV310_SROM_BCX__PMC__SHIFT) |
-		     (0x9 << S5PV310_SROM_BCX__TACP__SHIFT) |
-		     (0xc << S5PV310_SROM_BCX__TCAH__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TCOH__SHIFT) |
-		     (0x6 << S5PV310_SROM_BCX__TACC__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TCOS__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TACS__SHIFT), S5PV310_SROM_BC1);
+	__raw_writel((0x1 << S5P_SROM_BCX__PMC__SHIFT) |
+		     (0x9 << S5P_SROM_BCX__TACP__SHIFT) |
+		     (0xc << S5P_SROM_BCX__TCAH__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TCOH__SHIFT) |
+		     (0x6 << S5P_SROM_BCX__TACC__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TCOS__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TACS__SHIFT), S5P_SROM_BC1);
 }
 
 static void __init smdkc210_map_io(void)
@@ -182,6 +200,9 @@
 
 static void __init smdkc210_machine_init(void)
 {
+	s3c_i2c1_set_platdata(NULL);
+	i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
+
 	smdkc210_smsc911x_init();
 
 	s3c_sdhci0_set_platdata(&smdkc210_hsmmc0_pdata);
diff --git a/arch/arm/mach-s5pv310/mach-smdkv310.c b/arch/arm/mach-s5pv310/mach-smdkv310.c
index 35826d6..b1cddbf 100644
--- a/arch/arm/mach-s5pv310/mach-smdkv310.c
+++ b/arch/arm/mach-s5pv310/mach-smdkv310.c
@@ -14,18 +14,21 @@
 #include <linux/platform_device.h>
 #include <linux/smsc911x.h>
 #include <linux/io.h>
+#include <linux/i2c.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 
 #include <plat/regs-serial.h>
+#include <plat/regs-srom.h>
 #include <plat/s5pv310.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/sdhci.h>
+#include <plat/iic.h>
+#include <plat/pd.h>
 
 #include <mach/map.h>
-#include <mach/regs-srom.h>
 
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKV310_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
@@ -139,13 +142,29 @@
 	},
 };
 
+static struct i2c_board_info i2c_devs1[] __initdata = {
+	{I2C_BOARD_INFO("wm8994", 0x1a),},
+};
+
 static struct platform_device *smdkv310_devices[] __initdata = {
 	&s3c_device_hsmmc0,
 	&s3c_device_hsmmc1,
 	&s3c_device_hsmmc2,
 	&s3c_device_hsmmc3,
+	&s3c_device_i2c1,
 	&s3c_device_rtc,
 	&s3c_device_wdt,
+	&s5pv310_device_ac97,
+	&s5pv310_device_i2s0,
+	&s5pv310_device_pd[PD_MFC],
+	&s5pv310_device_pd[PD_G3D],
+	&s5pv310_device_pd[PD_LCD0],
+	&s5pv310_device_pd[PD_LCD1],
+	&s5pv310_device_pd[PD_CAM],
+	&s5pv310_device_pd[PD_TV],
+	&s5pv310_device_pd[PD_GPS],
+	&s5pv310_device_sysmmu,
+	&samsung_asoc_dma,
 	&smdkv310_smsc911x,
 };
 
@@ -154,23 +173,22 @@
 	u32 cs1;
 
 	/* configure nCS1 width to 16 bits */
-	cs1 = __raw_readl(S5PV310_SROM_BW) &
-		    ~(S5PV310_SROM_BW__CS_MASK <<
-				    S5PV310_SROM_BW__NCS1__SHIFT);
-	cs1 |= ((1 << S5PV310_SROM_BW__DATAWIDTH__SHIFT) |
-		(1 << S5PV310_SROM_BW__WAITENABLE__SHIFT) |
-		(1 << S5PV310_SROM_BW__BYTEENABLE__SHIFT)) <<
-		S5PV310_SROM_BW__NCS1__SHIFT;
-	__raw_writel(cs1, S5PV310_SROM_BW);
+	cs1 = __raw_readl(S5P_SROM_BW) &
+		~(S5P_SROM_BW__CS_MASK << S5P_SROM_BW__NCS1__SHIFT);
+	cs1 |= ((1 << S5P_SROM_BW__DATAWIDTH__SHIFT) |
+		(1 << S5P_SROM_BW__WAITENABLE__SHIFT) |
+		(1 << S5P_SROM_BW__BYTEENABLE__SHIFT)) <<
+		S5P_SROM_BW__NCS1__SHIFT;
+	__raw_writel(cs1, S5P_SROM_BW);
 
 	/* set timing for nCS1 suitable for ethernet chip */
-	__raw_writel((0x1 << S5PV310_SROM_BCX__PMC__SHIFT) |
-		     (0x9 << S5PV310_SROM_BCX__TACP__SHIFT) |
-		     (0xc << S5PV310_SROM_BCX__TCAH__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TCOH__SHIFT) |
-		     (0x6 << S5PV310_SROM_BCX__TACC__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TCOS__SHIFT) |
-		     (0x1 << S5PV310_SROM_BCX__TACS__SHIFT), S5PV310_SROM_BC1);
+	__raw_writel((0x1 << S5P_SROM_BCX__PMC__SHIFT) |
+		     (0x9 << S5P_SROM_BCX__TACP__SHIFT) |
+		     (0xc << S5P_SROM_BCX__TCAH__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TCOH__SHIFT) |
+		     (0x6 << S5P_SROM_BCX__TACC__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TCOS__SHIFT) |
+		     (0x1 << S5P_SROM_BCX__TACS__SHIFT), S5P_SROM_BC1);
 }
 
 static void __init smdkv310_map_io(void)
@@ -182,6 +200,9 @@
 
 static void __init smdkv310_machine_init(void)
 {
+	s3c_i2c1_set_platdata(NULL);
+	i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
+
 	smdkv310_smsc911x_init();
 
 	s3c_sdhci0_set_platdata(&smdkv310_hsmmc0_pdata);
diff --git a/arch/arm/mach-s5pv310/mach-universal_c210.c b/arch/arm/mach-s5pv310/mach-universal_c210.c
index 16d8fc0..36bc3cf 100644
--- a/arch/arm/mach-s5pv310/mach-universal_c210.c
+++ b/arch/arm/mach-s5pv310/mach-universal_c210.c
@@ -13,6 +13,9 @@
 #include <linux/i2c.h>
 #include <linux/gpio_keys.h>
 #include <linux/gpio.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/mmc/host.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
@@ -21,6 +24,7 @@
 #include <plat/s5pv310.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
+#include <plat/sdhci.h>
 
 #include <mach/map.h>
 
@@ -116,6 +120,73 @@
 	},
 };
 
+/* eMMC */
+static struct s3c_sdhci_platdata universal_hsmmc0_data __initdata = {
+	.max_width		= 8,
+	.host_caps		= (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
+				MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+				MMC_CAP_DISABLE),
+	.cd_type		= S3C_SDHCI_CD_PERMANENT,
+	.clk_type		= S3C_SDHCI_CLK_DIV_EXTERNAL,
+};
+
+static struct regulator_consumer_supply mmc0_supplies[] = {
+	REGULATOR_SUPPLY("vmmc", "s3c-sdhci.0"),
+};
+
+static struct regulator_init_data mmc0_fixed_voltage_init_data = {
+	.constraints		= {
+		.name		= "VMEM_VDD_2.8V",
+		.valid_ops_mask	= REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(mmc0_supplies),
+	.consumer_supplies	= mmc0_supplies,
+};
+
+static struct fixed_voltage_config mmc0_fixed_voltage_config = {
+	.supply_name		= "MASSMEMORY_EN",
+	.microvolts		= 2800000,
+	.gpio			= S5PV310_GPE1(3),
+	.enable_high		= true,
+	.init_data		= &mmc0_fixed_voltage_init_data,
+};
+
+static struct platform_device mmc0_fixed_voltage = {
+	.name			= "reg-fixed-voltage",
+	.id			= 0,
+	.dev			= {
+		.platform_data	= &mmc0_fixed_voltage_config,
+	},
+};
+
+/* SD */
+static struct s3c_sdhci_platdata universal_hsmmc2_data __initdata = {
+	.max_width		= 4,
+	.host_caps		= MMC_CAP_4_BIT_DATA |
+				MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+				MMC_CAP_DISABLE,
+	.ext_cd_gpio		= S5PV310_GPX3(4),      /* XEINT_28 */
+	.ext_cd_gpio_invert	= 1,
+	.cd_type		= S3C_SDHCI_CD_GPIO,
+	.clk_type		= S3C_SDHCI_CLK_DIV_EXTERNAL,
+};
+
+/* WiFi */
+static struct s3c_sdhci_platdata universal_hsmmc3_data __initdata = {
+	.max_width		= 4,
+	.host_caps		= MMC_CAP_4_BIT_DATA |
+				MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+				MMC_CAP_DISABLE,
+	.cd_type		= S3C_SDHCI_CD_EXTERNAL,
+};
+
+static void __init universal_sdhci_init(void)
+{
+	s3c_sdhci0_set_platdata(&universal_hsmmc0_data);
+	s3c_sdhci2_set_platdata(&universal_hsmmc2_data);
+	s3c_sdhci3_set_platdata(&universal_hsmmc3_data);
+}
+
 /* I2C0 */
 static struct i2c_board_info i2c0_devs[] __initdata = {
 	/* Camera, To be updated */
@@ -127,6 +198,13 @@
 };
 
 static struct platform_device *universal_devices[] __initdata = {
+	/* Samsung Platform Devices */
+	&mmc0_fixed_voltage,
+	&s3c_device_hsmmc0,
+	&s3c_device_hsmmc2,
+	&s3c_device_hsmmc3,
+
+	/* Universal Devices */
 	&universal_gpio_keys,
 	&s5p_device_onenand,
 };
@@ -140,6 +218,8 @@
 
 static void __init universal_machine_init(void)
 {
+	universal_sdhci_init();
+
 	i2c_register_board_info(0, i2c0_devs, ARRAY_SIZE(i2c0_devs));
 	i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs));
 
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index d43c5ef..bd3e1bf 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -241,6 +241,9 @@
 struct platform_device collie_locomo_device = {
 	.name		= "locomo",
 	.id		= 0,
+	.dev		= {
+		.platform_data	= &locomo_info,
+	},
 	.num_resources	= ARRAY_SIZE(locomo_resources),
 	.resource	= locomo_resources,
 };
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 59d14f0..e21f347 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -21,7 +21,6 @@
 #include <asm/div64.h>
 #include <mach/hardware.h>
 #include <asm/system.h>
-#include <asm/pgtable.h>
 #include <asm/mach/map.h>
 #include <asm/mach/flash.h>
 #include <asm/irq.h>
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c
index 3093d46..3d85dfa 100644
--- a/arch/arm/mach-sa1100/irq.c
+++ b/arch/arm/mach-sa1100/irq.c
@@ -37,14 +37,14 @@
 #define GPIO_11_27_IRQ(i)	((i) - 21)
 #define GPIO11_27_MASK(irq)	(1 << GPIO_11_27_IRQ(irq))
 
-static int sa1100_gpio_type(unsigned int irq, unsigned int type)
+static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
 {
 	unsigned int mask;
 
-	if (irq <= 10)
-		mask = 1 << irq;
+	if (d->irq <= 10)
+		mask = 1 << d->irq;
 	else
-		mask = GPIO11_27_MASK(irq);
+		mask = GPIO11_27_MASK(d->irq);
 
 	if (type == IRQ_TYPE_PROBE) {
 		if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
@@ -70,37 +70,37 @@
 /*
  * GPIO IRQs must be acknowledged.  This is for IRQs from 0 to 10.
  */
-static void sa1100_low_gpio_ack(unsigned int irq)
+static void sa1100_low_gpio_ack(struct irq_data *d)
 {
-	GEDR = (1 << irq);
+	GEDR = (1 << d->irq);
 }
 
-static void sa1100_low_gpio_mask(unsigned int irq)
+static void sa1100_low_gpio_mask(struct irq_data *d)
 {
-	ICMR &= ~(1 << irq);
+	ICMR &= ~(1 << d->irq);
 }
 
-static void sa1100_low_gpio_unmask(unsigned int irq)
+static void sa1100_low_gpio_unmask(struct irq_data *d)
 {
-	ICMR |= 1 << irq;
+	ICMR |= 1 << d->irq;
 }
 
-static int sa1100_low_gpio_wake(unsigned int irq, unsigned int on)
+static int sa1100_low_gpio_wake(struct irq_data *d, unsigned int on)
 {
 	if (on)
-		PWER |= 1 << irq;
+		PWER |= 1 << d->irq;
 	else
-		PWER &= ~(1 << irq);
+		PWER &= ~(1 << d->irq);
 	return 0;
 }
 
 static struct irq_chip sa1100_low_gpio_chip = {
 	.name		= "GPIO-l",
-	.ack		= sa1100_low_gpio_ack,
-	.mask		= sa1100_low_gpio_mask,
-	.unmask		= sa1100_low_gpio_unmask,
-	.set_type	= sa1100_gpio_type,
-	.set_wake	= sa1100_low_gpio_wake,
+	.irq_ack	= sa1100_low_gpio_ack,
+	.irq_mask	= sa1100_low_gpio_mask,
+	.irq_unmask	= sa1100_low_gpio_unmask,
+	.irq_set_type	= sa1100_gpio_type,
+	.irq_set_wake	= sa1100_low_gpio_wake,
 };
 
 /*
@@ -139,16 +139,16 @@
  * In addition, the IRQs are all collected up into one bit in the
  * interrupt controller registers.
  */
-static void sa1100_high_gpio_ack(unsigned int irq)
+static void sa1100_high_gpio_ack(struct irq_data *d)
 {
-	unsigned int mask = GPIO11_27_MASK(irq);
+	unsigned int mask = GPIO11_27_MASK(d->irq);
 
 	GEDR = mask;
 }
 
-static void sa1100_high_gpio_mask(unsigned int irq)
+static void sa1100_high_gpio_mask(struct irq_data *d)
 {
-	unsigned int mask = GPIO11_27_MASK(irq);
+	unsigned int mask = GPIO11_27_MASK(d->irq);
 
 	GPIO_IRQ_mask &= ~mask;
 
@@ -156,9 +156,9 @@
 	GFER &= ~mask;
 }
 
-static void sa1100_high_gpio_unmask(unsigned int irq)
+static void sa1100_high_gpio_unmask(struct irq_data *d)
 {
-	unsigned int mask = GPIO11_27_MASK(irq);
+	unsigned int mask = GPIO11_27_MASK(d->irq);
 
 	GPIO_IRQ_mask |= mask;
 
@@ -166,44 +166,44 @@
 	GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
 }
 
-static int sa1100_high_gpio_wake(unsigned int irq, unsigned int on)
+static int sa1100_high_gpio_wake(struct irq_data *d, unsigned int on)
 {
 	if (on)
-		PWER |= GPIO11_27_MASK(irq);
+		PWER |= GPIO11_27_MASK(d->irq);
 	else
-		PWER &= ~GPIO11_27_MASK(irq);
+		PWER &= ~GPIO11_27_MASK(d->irq);
 	return 0;
 }
 
 static struct irq_chip sa1100_high_gpio_chip = {
 	.name		= "GPIO-h",
-	.ack		= sa1100_high_gpio_ack,
-	.mask		= sa1100_high_gpio_mask,
-	.unmask		= sa1100_high_gpio_unmask,
-	.set_type	= sa1100_gpio_type,
-	.set_wake	= sa1100_high_gpio_wake,
+	.irq_ack	= sa1100_high_gpio_ack,
+	.irq_mask	= sa1100_high_gpio_mask,
+	.irq_unmask	= sa1100_high_gpio_unmask,
+	.irq_set_type	= sa1100_gpio_type,
+	.irq_set_wake	= sa1100_high_gpio_wake,
 };
 
 /*
  * We don't need to ACK IRQs on the SA1100 unless they're GPIOs
  * this is for internal IRQs i.e. from 11 to 31.
  */
-static void sa1100_mask_irq(unsigned int irq)
+static void sa1100_mask_irq(struct irq_data *d)
 {
-	ICMR &= ~(1 << irq);
+	ICMR &= ~(1 << d->irq);
 }
 
-static void sa1100_unmask_irq(unsigned int irq)
+static void sa1100_unmask_irq(struct irq_data *d)
 {
-	ICMR |= (1 << irq);
+	ICMR |= (1 << d->irq);
 }
 
 /*
  * Apart form GPIOs, only the RTC alarm can be a wakeup event.
  */
-static int sa1100_set_wake(unsigned int irq, unsigned int on)
+static int sa1100_set_wake(struct irq_data *d, unsigned int on)
 {
-	if (irq == IRQ_RTCAlrm) {
+	if (d->irq == IRQ_RTCAlrm) {
 		if (on)
 			PWER |= PWER_RTC;
 		else
@@ -215,10 +215,10 @@
 
 static struct irq_chip sa1100_normal_chip = {
 	.name		= "SC",
-	.ack		= sa1100_mask_irq,
-	.mask		= sa1100_mask_irq,
-	.unmask		= sa1100_unmask_irq,
-	.set_wake	= sa1100_set_wake,
+	.irq_ack	= sa1100_mask_irq,
+	.irq_mask	= sa1100_mask_irq,
+	.irq_unmask	= sa1100_unmask_irq,
+	.irq_set_wake	= sa1100_set_wake,
 };
 
 static struct resource irq_resource = {
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index c601a75..4aad01f 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -35,7 +35,7 @@
 		/*
 		 * Acknowledge the parent IRQ.
 		 */
-		desc->chip->ack(irq);
+		desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 		/*
 		 * Read the interrupt reason register.  Let's have all
@@ -53,7 +53,7 @@
 		 * recheck the register for any pending IRQs.
 		 */
 		if (irr & (IRR_ETHERNET | IRR_USAR)) {
-			desc->chip->mask(irq);
+			desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 			/*
 			 * Ack the interrupt now to prevent re-entering
@@ -61,7 +61,7 @@
 			 * since we'll check the IRR register prior to
 			 * leaving.
 			 */
-			desc->chip->ack(irq);
+			desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 			if (irr & IRR_ETHERNET) {
 				generic_handle_irq(IRQ_NEPONSET_SMC9196);
@@ -71,7 +71,7 @@
 				generic_handle_irq(IRQ_NEPONSET_USAR);
 			}
 
-			desc->chip->unmask(irq);
+			desc->irq_data.chip->irq_unmask(&desc->irq_data);
 		}
 
 		if (irr & IRR_SA1111) {
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index c83fdc8..ab9fc44 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -120,7 +120,7 @@
 	return virt_to_phys(sp);
 }
 
-static struct platform_suspend_ops sa11x0_pm_ops = {
+static const struct platform_suspend_ops sa11x0_pm_ops = {
 	.enter		= sa11x0_pm_enter,
 	.valid		= suspend_valid_only_mem,
 };
diff --git a/arch/arm/mach-shark/irq.c b/arch/arm/mach-shark/irq.c
index c04eb6a..831fc66 100644
--- a/arch/arm/mach-shark/irq.c
+++ b/arch/arm/mach-shark/irq.c
@@ -30,35 +30,35 @@
  * These have to be protected by the irq controller spinlock
  * before being called.
  */
-static void shark_disable_8259A_irq(unsigned int irq)
+static void shark_disable_8259A_irq(struct irq_data *d)
 {
 	unsigned int mask;
-	if (irq<8) {
-	  mask = 1 << irq;
+	if (d->irq<8) {
+	  mask = 1 << d->irq;
 	  cached_irq_mask[0] |= mask;
 	  outb(cached_irq_mask[1],0xA1);
 	} else {
-	  mask = 1 << (irq-8);
+	  mask = 1 << (d->irq-8);
 	  cached_irq_mask[1] |= mask;
 	  outb(cached_irq_mask[0],0x21);
 	}
 }
 
-static void shark_enable_8259A_irq(unsigned int irq)
+static void shark_enable_8259A_irq(struct irq_data *d)
 {
 	unsigned int mask;
-	if (irq<8) {
-	  mask = ~(1 << irq);
+	if (d->irq<8) {
+	  mask = ~(1 << d->irq);
 	  cached_irq_mask[0] &= mask;
 	  outb(cached_irq_mask[0],0x21);
 	} else {
-	  mask = ~(1 << (irq-8));
+	  mask = ~(1 << (d->irq-8));
 	  cached_irq_mask[1] &= mask;
 	  outb(cached_irq_mask[1],0xA1);
 	}
 }
 
-static void shark_ack_8259A_irq(unsigned int irq){}
+static void shark_ack_8259A_irq(struct irq_data *d){}
 
 static irqreturn_t bogus_int(int irq, void *dev_id)
 {
@@ -69,10 +69,10 @@
 static struct irqaction cascade;
 
 static struct irq_chip fb_chip = {
-	.name	= "XT-PIC",
-	.ack	= shark_ack_8259A_irq,
-	.mask	= shark_disable_8259A_irq,
-	.unmask = shark_enable_8259A_irq,
+	.name		= "XT-PIC",
+	.irq_ack	= shark_ack_8259A_irq,
+	.irq_mask	= shark_disable_8259A_irq,
+	.irq_unmask	= shark_enable_8259A_irq,
 };
 
 void __init shark_init_irq(void)
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 4d1b4c5..0c8f6cf 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -60,6 +60,8 @@
 
 config MACH_AG5EVM
 	bool "AG5EVM board"
+	select ARCH_REQUIRE_GPIOLIB
+	select SH_LCD_MIPI_DSI
 	depends on ARCH_SH73A0
 
 config MACH_MACKEREL
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index c18a740..4303a86 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -34,9 +34,10 @@
 #include <linux/input/sh_keysc.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
-
+#include <linux/sh_clk.h>
+#include <video/sh_mobile_lcdc.h>
+#include <video/sh_mipi_dsi.h>
 #include <sound/sh_fsi.h>
-
 #include <mach/hardware.h>
 #include <mach/sh73a0.h>
 #include <mach/common.h>
@@ -183,11 +184,165 @@
 	.resource	= sh_mmcif_resources,
 };
 
+/* IrDA */
+static struct resource irda_resources[] = {
+	[0] = {
+		.start	= 0xE6D00000,
+		.end	= 0xE6D01FD4 - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(95),
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device irda_device = {
+	.name           = "sh_irda",
+	.id		= 0,
+	.resource       = irda_resources,
+	.num_resources  = ARRAY_SIZE(irda_resources),
+};
+
+static unsigned char lcd_backlight_seq[3][2] = {
+	{ 0x04, 0x07 },
+	{ 0x23, 0x80 },
+	{ 0x03, 0x01 },
+};
+
+static void lcd_backlight_on(void)
+{
+	struct i2c_adapter *a;
+	struct i2c_msg msg;
+	int k;
+
+	a = i2c_get_adapter(1);
+	for (k = 0; a && k < 3; k++) {
+		msg.addr = 0x6d;
+		msg.buf = &lcd_backlight_seq[k][0];
+		msg.len = 2;
+		msg.flags = 0;
+		if (i2c_transfer(a, &msg, 1) != 1)
+			break;
+	}
+}
+
+static void lcd_backlight_reset(void)
+{
+	gpio_set_value(GPIO_PORT235, 0);
+	mdelay(24);
+	gpio_set_value(GPIO_PORT235, 1);
+}
+
+static void lcd_on(void *board_data, struct fb_info *info)
+{
+	lcd_backlight_on();
+}
+
+static void lcd_off(void *board_data)
+{
+	lcd_backlight_reset();
+}
+
+/* LCDC0 */
+static const struct fb_videomode lcdc0_modes[] = {
+	{
+		.name		= "R63302(QHD)",
+		.xres		= 544,
+		.yres		= 961,
+		.left_margin	= 72,
+		.right_margin	= 600,
+		.hsync_len	= 16,
+		.upper_margin	= 8,
+		.lower_margin	= 8,
+		.vsync_len	= 2,
+		.sync		= FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
+	},
+};
+
+static struct sh_mobile_lcdc_info lcdc0_info = {
+	.clock_source = LCDC_CLK_PERIPHERAL,
+	.ch[0] = {
+		.chan = LCDC_CHAN_MAINLCD,
+		.interface_type = RGB24,
+		.clock_divider = 1,
+		.flags = LCDC_FLAGS_DWPOL,
+		.lcd_size_cfg.width = 44,
+		.lcd_size_cfg.height = 79,
+		.bpp = 16,
+		.lcd_cfg = lcdc0_modes,
+		.num_cfg = ARRAY_SIZE(lcdc0_modes),
+		.board_cfg = {
+			.display_on = lcd_on,
+			.display_off = lcd_off,
+		},
+	}
+};
+
+static struct resource lcdc0_resources[] = {
+	[0] = {
+		.name	= "LCDC0",
+		.start	= 0xfe940000, /* P4-only space */
+		.end	= 0xfe943fff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= intcs_evt2irq(0x580),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device lcdc0_device = {
+	.name		= "sh_mobile_lcdc_fb",
+	.num_resources	= ARRAY_SIZE(lcdc0_resources),
+	.resource	= lcdc0_resources,
+	.id             = 0,
+	.dev	= {
+		.platform_data	= &lcdc0_info,
+		.coherent_dma_mask = ~0,
+	},
+};
+
+/* MIPI-DSI */
+static struct resource mipidsi0_resources[] = {
+	[0] = {
+		.start  = 0xfeab0000,
+		.end    = 0xfeab3fff,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = 0xfeab4000,
+		.end    = 0xfeab7fff,
+		.flags  = IORESOURCE_MEM,
+	},
+};
+
+static struct sh_mipi_dsi_info mipidsi0_info = {
+	.data_format	= MIPI_RGB888,
+	.lcd_chan	= &lcdc0_info.ch[0],
+	.vsynw_offset	= 20,
+	.clksrc		= 1,
+	.flags		= SH_MIPI_DSI_HSABM,
+};
+
+static struct platform_device mipidsi0_device = {
+	.name           = "sh-mipi-dsi",
+	.num_resources  = ARRAY_SIZE(mipidsi0_resources),
+	.resource       = mipidsi0_resources,
+	.id             = 0,
+	.dev	= {
+		.platform_data	= &mipidsi0_info,
+	},
+};
+
 static struct platform_device *ag5evm_devices[] __initdata = {
 	&eth_device,
 	&keysc_device,
 	&fsi_device,
 	&mmc_device,
+	&irda_device,
+	&lcdc0_device,
+	&mipidsi0_device,
 };
 
 static struct map_desc ag5evm_io_desc[] __initdata = {
@@ -224,6 +379,8 @@
 	__raw_writew(__raw_readw(PINTCR0A) | (2<<10), PINTCR0A);
 }
 
+#define DSI0PHYCR	0xe615006c
+
 static void __init ag5evm_init(void)
 {
 	sh73a0_pinmux_init();
@@ -287,6 +444,26 @@
 	gpio_request(GPIO_FN_FSIAISLD, NULL);
 	gpio_request(GPIO_FN_FSIAOSLD, NULL);
 
+	/* IrDA */
+	gpio_request(GPIO_FN_PORT241_IRDA_OUT, NULL);
+	gpio_request(GPIO_FN_PORT242_IRDA_IN,  NULL);
+	gpio_request(GPIO_FN_PORT243_IRDA_FIRSEL, NULL);
+
+	/* LCD panel */
+	gpio_request(GPIO_PORT217, NULL); /* RESET */
+	gpio_direction_output(GPIO_PORT217, 0);
+	mdelay(1);
+	gpio_set_value(GPIO_PORT217, 1);
+	mdelay(100);
+
+	/* LCD backlight controller */
+	gpio_request(GPIO_PORT235, NULL); /* RESET */
+	gpio_direction_output(GPIO_PORT235, 0);
+	lcd_backlight_reset();
+
+	/* MIPI-DSI clock setup */
+	__raw_writel(0x2a809010, DSI0PHYCR);
+
 #ifdef CONFIG_CACHE_L2X0
 	/* Shared attribute override enable, 64K*8way */
 	l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff);
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index cd79d7c..81d6536 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -247,10 +247,7 @@
  */
 static int slot_cn7_get_cd(struct platform_device *pdev)
 {
-	if (gpio_is_valid(GPIO_PORT41))
-		return !gpio_get_value(GPIO_PORT41);
-	else
-		return -ENXIO;
+	return !gpio_get_value(GPIO_PORT41);
 }
 
 /* SH_MMCIF */
@@ -308,6 +305,7 @@
 static struct sh_mobile_sdhi_info sdhi0_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
+	.tmio_caps	= MMC_CAP_SDIO_IRQ,
 };
 
 static struct resource sdhi0_resources[] = {
@@ -339,7 +337,7 @@
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI1_RX,
 	.tmio_ocr_mask	= MMC_VDD_165_195,
 	.tmio_flags	= TMIO_MMC_WRPROTECT_DISABLE,
-	.tmio_caps	= MMC_CAP_NEEDS_POLL,
+	.tmio_caps	= MMC_CAP_NEEDS_POLL | MMC_CAP_SDIO_IRQ,
 	.get_cd		= slot_cn7_get_cd,
 };
 
@@ -711,6 +709,10 @@
 	},
 };
 
+static struct platform_device fsi_ak4643_device = {
+	.name		= "sh_fsi2_a_ak4643",
+};
+
 static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
 	.clock_source = LCDC_CLK_EXTERNAL,
 	.ch[0] = {
@@ -933,6 +935,7 @@
 	&sdhi1_device,
 	&usb1_host_device,
 	&fsi_device,
+	&fsi_ak4643_device,
 	&sh_mmcif_device,
 	&lcdc1_device,
 	&lcdc_device,
@@ -1300,7 +1303,7 @@
 
 	lcdc_info.clock_source			= LCDC_CLK_BUS;
 	lcdc_info.ch[0].interface_type		= RGB18;
-	lcdc_info.ch[0].clock_divider		= 2;
+	lcdc_info.ch[0].clock_divider		= 3;
 	lcdc_info.ch[0].flags			= 0;
 	lcdc_info.ch[0].lcd_size_cfg.width	= 152;
 	lcdc_info.ch[0].lcd_size_cfg.height	= 91;
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index 686b304..ef4613b 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -347,7 +347,6 @@
 	gpio_request(GPIO_FN_IRDA_OUT, NULL);
 	gpio_request(GPIO_FN_IRDA_IN, NULL);
 	gpio_request(GPIO_FN_IRDA_FIRSEL, NULL);
-	set_irq_type(evt2irq(0x480), IRQ_TYPE_LEVEL_LOW);
 
 	sh7367_add_standard_devices();
 
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index c13f012..dee3e92 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -30,6 +30,7 @@
 #include <linux/io.h>
 #include <linux/input.h>
 #include <linux/input/sh_keysc.h>
+#include <linux/mmc/host.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
 #include <linux/gpio.h>
 #include <mach/sh7377.h>
@@ -196,6 +197,10 @@
 };
 
 /* SDHI */
+static struct sh_mobile_sdhi_info sdhi0_info = {
+	.tmio_caps	= MMC_CAP_SDIO_IRQ,
+};
+
 static struct resource sdhi0_resources[] = {
 	[0] = {
 		.name	= "SDHI0",
@@ -214,6 +219,13 @@
 	.num_resources  = ARRAY_SIZE(sdhi0_resources),
 	.resource       = sdhi0_resources,
 	.id             = 0,
+	.dev	= {
+		.platform_data	= &sdhi0_info,
+	},
+};
+
+static struct sh_mobile_sdhi_info sdhi1_info = {
+	.tmio_caps	= MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
 };
 
 static struct resource sdhi1_resources[] = {
@@ -234,6 +246,9 @@
 	.num_resources  = ARRAY_SIZE(sdhi1_resources),
 	.resource       = sdhi1_resources,
 	.id             = 1,
+	.dev	= {
+		.platform_data	= &sdhi1_info,
+	},
 };
 
 static struct platform_device *g4evm_devices[] __initdata = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 5bcf5c1..1657eac 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -169,9 +169,8 @@
  *	SW1	|	SW33
  *		| bit1 | bit2 | bit3 | bit4
  * -------------+------+------+------+-------
- * MMC0	  OFF	|  OFF |  ON  |  ON  |  X
- * MMC1	  ON	|  OFF |  ON  |  X   | ON
- * SDHI1  OFF	|  ON  |   X  |  OFF | ON
+ * MMC0   OFF	|  OFF |   X  |  ON  |  X       (Use MMCIF)
+ * SDHI1  OFF	|  ON  |   X  |  OFF |  X       (Use MFD_SH_MOBILE_SDHI)
  *
  */
 
@@ -304,7 +303,7 @@
 		.lcd_cfg = mackerel_lcdc_modes,
 		.num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
 		.interface_type		= RGB24,
-		.clock_divider		= 2,
+		.clock_divider		= 3,
 		.flags			= 0,
 		.lcd_size_cfg.width	= 152,
 		.lcd_size_cfg.height	= 91,
@@ -657,17 +656,14 @@
  */
 static int slot_cn7_get_cd(struct platform_device *pdev)
 {
-	if (gpio_is_valid(GPIO_PORT41))
-		return !gpio_get_value(GPIO_PORT41);
-	else
-		return -ENXIO;
+	return !gpio_get_value(GPIO_PORT41);
 }
 
 /* SDHI0 */
 static struct sh_mobile_sdhi_info sdhi0_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
-	.tmio_caps	= MMC_CAP_SD_HIGHSPEED,
+	.tmio_caps	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
 };
 
 static struct resource sdhi0_resources[] = {
@@ -700,7 +696,7 @@
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI1_RX,
 	.tmio_ocr_mask	= MMC_VDD_165_195,
 	.tmio_flags	= TMIO_MMC_WRPROTECT_DISABLE,
-	.tmio_caps	= MMC_CAP_SD_HIGHSPEED |
+	.tmio_caps	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
 			  MMC_CAP_NEEDS_POLL,
 	.get_cd		= slot_cn7_get_cd,
 };
@@ -729,13 +725,23 @@
 };
 #endif
 
+/*
+ * The card detect pin of the top SD/MMC slot (CN23) is active low and is
+ * connected to GPIO SCIFB_SCK of SH7372 (GPIO_PORT162).
+ */
+static int slot_cn23_get_cd(struct platform_device *pdev)
+{
+	return !gpio_get_value(GPIO_PORT162);
+}
+
 /* SDHI2 */
 static struct sh_mobile_sdhi_info sdhi2_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI2_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI2_RX,
 	.tmio_flags	= TMIO_MMC_WRPROTECT_DISABLE,
-	.tmio_caps	= MMC_CAP_SD_HIGHSPEED |
+	.tmio_caps	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
 			  MMC_CAP_NEEDS_POLL,
+	.get_cd		= slot_cn23_get_cd,
 };
 
 static struct resource sdhi2_resources[] = {
@@ -953,6 +959,7 @@
 };
 
 /* I2C */
+#define IRQ7 evt2irq(0x02e0)
 #define IRQ9 evt2irq(0x0320)
 
 static struct i2c_board_info i2c0_devices[] = {
@@ -965,6 +972,11 @@
 		.platform_data = &mackerel_tca6416_keys_info,
 		.irq = IRQ9,
 	},
+	/* Touchscreen */
+	{
+		I2C_BOARD_INFO("st1232-ts", 0x55),
+		.irq = IRQ7,
+	},
 };
 
 #define IRQ21 evt2irq(0x32a0)
@@ -1092,6 +1104,10 @@
 	gpio_request(GPIO_FN_IRQ9_42,	NULL);
 	set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH);
 
+	/* enable Touchscreen */
+	gpio_request(GPIO_FN_IRQ7_40,	NULL);
+	set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW);
+
 	/* enable Accelerometer */
 	gpio_request(GPIO_FN_IRQ21,	NULL);
 	set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH);
@@ -1127,6 +1143,10 @@
 	gpio_request(GPIO_FN_SDHID2_1, NULL);
 	gpio_request(GPIO_FN_SDHID2_0, NULL);
 
+	/* card detect pin for microSD slot (CN23) */
+	gpio_request(GPIO_PORT162, NULL);
+	gpio_direction_input(GPIO_PORT162);
+
 	/* MMCIF */
 	gpio_request(GPIO_FN_MMCD0_0, NULL);
 	gpio_request(GPIO_FN_MMCD0_1, NULL);
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 9aa8d68..e9731b5 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -234,7 +234,9 @@
 
 	value = __raw_readl(PLLC2CR) & ~(0x3f << 24);
 
-	__raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR);
+	__raw_writel(value | ((idx + 19) << 24), PLLC2CR);
+
+	clk->rate = clk->freq_table[idx].frequency;
 
 	return 0;
 }
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 720a714..7e58904 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -118,8 +118,16 @@
 {
 	unsigned long mult = 1;
 
-	if (__raw_readl(PLLECR) & (1 << clk->enable_bit))
+	if (__raw_readl(PLLECR) & (1 << clk->enable_bit)) {
 		mult = (((__raw_readl(clk->enable_reg) >> 24) & 0x3f) + 1);
+		/* handle CFG bit for PLL1 and PLL2 */
+		switch (clk->enable_bit) {
+		case 1:
+		case 2:
+			if (__raw_readl(clk->enable_reg) & (1 << 20))
+				mult *= 2;
+		}
+	}
 
 	return clk->parent->rate * mult;
 }
@@ -212,7 +220,7 @@
 static struct clk div4_clks[DIV4_NR] = {
 	[DIV4_I] = DIV4(FRQCRA, 20, 0xfff, CLK_ENABLE_ON_INIT),
 	[DIV4_ZG] = DIV4(FRQCRA, 16, 0xbff, CLK_ENABLE_ON_INIT),
-	[DIV4_M3] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
+	[DIV4_M3] = DIV4(FRQCRA, 12, 0xfff, CLK_ENABLE_ON_INIT),
 	[DIV4_B] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
 	[DIV4_M1] = DIV4(FRQCRA, 4, 0xfff, 0),
 	[DIV4_M2] = DIV4(FRQCRA, 0, 0xfff, 0),
@@ -255,10 +263,10 @@
 };
 
 enum { MSTP001,
-	MSTP125, MSTP116,
+	MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
 	MSTP219,
 	MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-	MSTP331, MSTP329, MSTP323, MSTP312,
+	MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
 	MSTP411, MSTP410, MSTP403,
 	MSTP_NR };
 
@@ -267,8 +275,14 @@
 
 static struct clk mstp_clks[MSTP_NR] = {
 	[MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
+	[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
+	[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
+	[MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
+	[MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
 	[MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
+	[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
 	[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
+	[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
 	[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
 	[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
 	[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
@@ -279,6 +293,7 @@
 	[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
 	[MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */
 	[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
+	[MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
 	[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
 	[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
 	[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
@@ -288,16 +303,32 @@
 
 #define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
 #define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
+#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
 
 static struct clk_lookup lookups[] = {
 	/* main clocks */
 	CLKDEV_CON_ID("r_clk", &r_clk),
 
+	/* DIV6 clocks */
+	CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
+	CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
+	CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
+	CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
+	CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
+	CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
+	CLKDEV_ICK_ID("dsi1p_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
+
 	/* MSTP32 clocks */
 	CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
+	CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
+	CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
+	CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
+	CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
 	CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
 	CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
+	CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
 	CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
+	CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
 	CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
 	CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
 	CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
@@ -308,6 +339,7 @@
 	CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
 	CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */
 	CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
+	CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
 	CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
 	CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
 	CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
index e3ebfa7..3029aba 100644
--- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
@@ -6,13 +6,10 @@
 EW 0xE6020004, 0xA500
 EW 0xE6030004, 0xA500
 
-DD 0x01001000, 0x01001000
-
 LIST "GPIO Setting"
 EB 0xE6051013, 0xA2
 
 LIST "CPG"
-ED 0xE6150080, 0x00000180
 ED 0xE61500C0, 0x00000002
 
 WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@
 
 WAIT 1, 0xFE40009C
 
+LIST "SUB/USBClk"
+ED 0xE6150080, 0x00000180
+
 LIST "BSC"
 ED 0xFEC10000, 0x00E0001B
 
@@ -53,7 +53,7 @@
 ED 0xFE40004C, 0x00110209
 ED 0xFE400010, 0x00000087
 
-WAIT 10, 0xFE40009C
+WAIT 30, 0xFE40009C
 
 ED 0xFE400084, 0x0000003F
 EB 0xFE500000, 0x00
@@ -84,4 +84,11 @@
 
 WAIT 1, 0xFE40009C
 
-ED 0xE6150354, 0x00000002
+ED 0xFE400354, 0x01AD8002
+
+LIST "SCIF0 - Serial port for earlyprintk"
+EB 0xE6053098, 0x11
+EB 0xE6053098, 0xe1
+EW 0xE6C40000, 0x0000
+EB 0xE6C40004, 0x19
+EW 0xE6C40008, 0x3000
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
index e3ebfa7..3029aba 100644
--- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
@@ -6,13 +6,10 @@
 EW 0xE6020004, 0xA500
 EW 0xE6030004, 0xA500
 
-DD 0x01001000, 0x01001000
-
 LIST "GPIO Setting"
 EB 0xE6051013, 0xA2
 
 LIST "CPG"
-ED 0xE6150080, 0x00000180
 ED 0xE61500C0, 0x00000002
 
 WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@
 
 WAIT 1, 0xFE40009C
 
+LIST "SUB/USBClk"
+ED 0xE6150080, 0x00000180
+
 LIST "BSC"
 ED 0xFEC10000, 0x00E0001B
 
@@ -53,7 +53,7 @@
 ED 0xFE40004C, 0x00110209
 ED 0xFE400010, 0x00000087
 
-WAIT 10, 0xFE40009C
+WAIT 30, 0xFE40009C
 
 ED 0xFE400084, 0x0000003F
 EB 0xFE500000, 0x00
@@ -84,4 +84,11 @@
 
 WAIT 1, 0xFE40009C
 
-ED 0xE6150354, 0x00000002
+ED 0xFE400354, 0x01AD8002
+
+LIST "SCIF0 - Serial port for earlyprintk"
+EB 0xE6053098, 0x11
+EB 0xE6053098, 0xe1
+EW 0xE6C40000, 0x0000
+EB 0xE6C40004, 0x19
+EW 0xE6C40008, 0x3000
diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c
index 1a20c48..2fe9704 100644
--- a/arch/arm/mach-shmobile/intc-sh7367.c
+++ b/arch/arm/mach-shmobile/intc-sh7367.c
@@ -189,10 +189,10 @@
 	  { SCIFB, SCIFA5, SCIFA4, MSIOF1,
 	    0, 0, MSIOF2, 0 } },
 	{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
 	{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    TTI20, USBDMAC_USHDMI, SPU, SIU } },
 	{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
 	  { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -207,7 +207,7 @@
 	  { 0, 0, TPU0, TPU1,
 	    TPU2, TPU3, TPU4, 0 } },
 	{ 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    MISTY, CMT3, RWDT1, RWDT0 } },
 };
 
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 30b2f40..ca5f9d1 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -230,10 +230,10 @@
 	  { SCIFB, SCIFA5, SCIFA4, MSIOF1,
 	    0, 0, MSIOF2, 0 } },
 	{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
 	{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
-	  { 0, DISABLED, ENABLED, ENABLED,
+	  { 0, ENABLED, ENABLED, ENABLED,
 	    TTI20, USBHSDMAC0_USHDMI, 0, 0 } },
 	{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
 	  { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -365,6 +365,7 @@
 
 enum {
 	UNUSED_INTCS = 0,
+	ENABLED_INTCS,
 
 	INTCS,
 
@@ -413,7 +414,7 @@
 	CMT4,
 	DSITX1_DSITX1_0,
 	DSITX1_DSITX1_1,
-	/* MFIS2 */
+	MFIS2_INTCS, /* Priority always enabled using ENABLED_INTCS */
 	CPORTS2R,
 	/* CEC */
 	JPU6E,
@@ -477,7 +478,7 @@
 	INTCS_VECT(CMT4, 0x1980),
 	INTCS_VECT(DSITX1_DSITX1_0, 0x19a0),
 	INTCS_VECT(DSITX1_DSITX1_1, 0x19c0),
-	/* MFIS2 */
+	INTCS_VECT(MFIS2_INTCS, 0x1a00),
 	INTCS_VECT(CPORTS2R, 0x1a20),
 	/* CEC */
 	INTCS_VECT(JPU6E, 0x1a80),
@@ -543,7 +544,7 @@
 	  { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
 	    CMT4, DSITX1_DSITX1_0, DSITX1_DSITX1_1, 0 } },
 	{ 0xffd5019c, 0xffd501dc, 8, /* IMR7SA3 / IMCR7SA3 */
-	  { 0, CPORTS2R, 0, 0,
+	  { MFIS2_INTCS, CPORTS2R, 0, 0,
 	    JPU6E, 0, 0, 0 } },
 	{ 0xffd20104, 0, 16, /* INTAMASK */
 	  { 0, 0, 0, 0, 0, 0, 0, 0,
@@ -571,7 +572,8 @@
 	{ 0xffd50030, 0, 16, 4, /* IPRMS3 */ { TMU1, 0, 0, 0 } },
 	{ 0xffd50034, 0, 16, 4, /* IPRNS3 */ { CMT4, DSITX1_DSITX1_0,
 					       DSITX1_DSITX1_1, 0 } },
-	{ 0xffd50038, 0, 16, 4, /* IPROS3 */ { 0, CPORTS2R, 0, 0 } },
+	{ 0xffd50038, 0, 16, 4, /* IPROS3 */ { ENABLED_INTCS, CPORTS2R,
+					       0, 0 } },
 	{ 0xffd5003c, 0, 16, 4, /* IPRPS3 */ { JPU6E, 0, 0, 0 } },
 };
 
@@ -590,6 +592,7 @@
 
 static struct intc_desc intcs_desc __initdata = {
 	.name = "sh7372-intcs",
+	.force_enable = ENABLED_INTCS,
 	.resource = intcs_resources,
 	.num_resources = ARRAY_SIZE(intcs_resources),
 	.hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c
index 2cdeb8c..dd56838 100644
--- a/arch/arm/mach-shmobile/intc-sh7377.c
+++ b/arch/arm/mach-shmobile/intc-sh7377.c
@@ -234,10 +234,10 @@
 	  { SCIFB, SCIFA5, SCIFA4, MSIOF1,
 	    0, 0, MSIOF2, 0 } },
 	{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
 	{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    TTI20, USBDMAC_USHDMI, 0, MSUG } },
 	{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
 	  { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index 322d8d5..5d0e1503 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -252,10 +252,11 @@
 
 void __init sh73a0_init_irq(void)
 {
-	void __iomem *gic_base = __io(0xf0001000);
+	void __iomem *gic_dist_base = __io(0xf0001000);
+	void __iomem *gic_cpu_base = __io(0xf0000100);
 	void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
 
-	gic_init(0, 29, gic_base, gic_base);
+	gic_init(0, 29, gic_dist_base, gic_cpu_base);
 
 	register_intc_controller(&intcs_desc);
 
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index 003008c..ce28141 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -35,6 +35,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xe6c40000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc00), evt2irq(0xc00),
 			    evt2irq(0xc00), evt2irq(0xc00) },
@@ -52,6 +54,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xe6c50000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc20), evt2irq(0xc20),
 			    evt2irq(0xc20), evt2irq(0xc20) },
@@ -69,6 +73,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xe6c60000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc40), evt2irq(0xc40),
 			    evt2irq(0xc40), evt2irq(0xc40) },
@@ -86,6 +92,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xe6c70000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc60), evt2irq(0xc60),
 			    evt2irq(0xc60), evt2irq(0xc60) },
@@ -103,6 +111,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xe6c80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd20), evt2irq(0xd20),
 			    evt2irq(0xd20), evt2irq(0xd20) },
@@ -120,6 +130,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xe6cb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd40), evt2irq(0xd40),
 			    evt2irq(0xd40), evt2irq(0xd40) },
@@ -137,6 +149,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xe6c30000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd60), evt2irq(0xd60),
 			    evt2irq(0xd60), evt2irq(0xd60) },
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 2e3e11e..ff0494f 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -38,6 +38,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xe6c40000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0c00), evt2irq(0x0c00),
 			    evt2irq(0x0c00), evt2irq(0x0c00) },
@@ -55,6 +57,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xe6c50000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0c20), evt2irq(0x0c20),
 			    evt2irq(0x0c20), evt2irq(0x0c20) },
@@ -72,6 +76,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xe6c60000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0c40), evt2irq(0x0c40),
 			    evt2irq(0x0c40), evt2irq(0x0c40) },
@@ -89,6 +95,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xe6c70000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0c60), evt2irq(0x0c60),
 			    evt2irq(0x0c60), evt2irq(0x0c60) },
@@ -106,6 +114,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xe6c80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0d20), evt2irq(0x0d20),
 			    evt2irq(0x0d20), evt2irq(0x0d20) },
@@ -123,6 +133,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xe6cb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0d40), evt2irq(0x0d40),
 			    evt2irq(0x0d40), evt2irq(0x0d40) },
@@ -140,6 +152,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xe6c30000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFB,
 	.irqs		= { evt2irq(0x0d60), evt2irq(0x0d60),
 			    evt2irq(0x0d60), evt2irq(0x0d60) },
diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c
index 575dbd6..8099b0b 100644
--- a/arch/arm/mach-shmobile/setup-sh7377.c
+++ b/arch/arm/mach-shmobile/setup-sh7377.c
@@ -36,6 +36,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xe6c40000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc00), evt2irq(0xc00),
 			    evt2irq(0xc00), evt2irq(0xc00) },
@@ -53,6 +55,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xe6c50000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc20), evt2irq(0xc20),
 			    evt2irq(0xc20), evt2irq(0xc20) },
@@ -70,6 +74,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xe6c60000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc40), evt2irq(0xc40),
 			    evt2irq(0xc40), evt2irq(0xc40) },
@@ -87,6 +93,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xe6c70000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc60), evt2irq(0xc60),
 			    evt2irq(0xc60), evt2irq(0xc60) },
@@ -104,6 +112,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xe6c80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd20), evt2irq(0xd20),
 			    evt2irq(0xd20), evt2irq(0xd20) },
@@ -121,6 +131,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xe6cb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd40), evt2irq(0xd40),
 			    evt2irq(0xd40), evt2irq(0xd40) },
@@ -138,6 +150,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xe6cc0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80),
 			    intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80) },
@@ -155,6 +169,8 @@
 static struct plat_sci_port scif7_platform_data = {
 	.mapbase	= 0xe6c30000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd60), evt2irq(0xd60),
 			    evt2irq(0xd60), evt2irq(0xd60) },
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index f1eff8b..685c40a 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -36,6 +36,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xe6c40000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { gic_spi(72), gic_spi(72),
 			    gic_spi(72), gic_spi(72) },
@@ -52,6 +54,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xe6c50000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { gic_spi(73), gic_spi(73),
 			    gic_spi(73), gic_spi(73) },
@@ -68,6 +72,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xe6c60000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { gic_spi(74), gic_spi(74),
 			    gic_spi(74), gic_spi(74) },
@@ -84,6 +90,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xe6c70000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { gic_spi(75), gic_spi(75),
 			    gic_spi(75), gic_spi(75) },
@@ -100,6 +108,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xe6c80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { gic_spi(78), gic_spi(78),
 			    gic_spi(78), gic_spi(78) },
@@ -116,6 +126,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xe6cb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { gic_spi(79), gic_spi(79),
 			    gic_spi(79), gic_spi(79) },
@@ -132,6 +144,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xe6cc0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { gic_spi(156), gic_spi(156),
 			    gic_spi(156), gic_spi(156) },
@@ -148,6 +162,8 @@
 static struct plat_sci_port scif7_platform_data = {
 	.mapbase	= 0xe6cd0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { gic_spi(143), gic_spi(143),
 			    gic_spi(143), gic_spi(143) },
@@ -164,6 +180,8 @@
 static struct plat_sci_port scif8_platform_data = {
 	.mapbase	= 0xe6c30000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFB,
 	.irqs		= { gic_spi(80), gic_spi(80),
 			    gic_spi(80), gic_spi(80) },
diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h
index cacf17a..53677e4 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear320.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear320.h
@@ -62,7 +62,7 @@
 #define SPEAR320_SMII1_BASE		0xAB000000
 #define SPEAR320_SMII1_SIZE		0x01000000
 
-#define SPEAR320_SOC_CONFIG_BASE	0xB4000000
+#define SPEAR320_SOC_CONFIG_BASE	0xB3000000
 #define SPEAR320_SOC_CONFIG_SIZE	0x00000070
 /* Interrupt registers offsets and masks */
 #define INT_STS_MASK_REG		0x04
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index 3560f8c..5aa2d54 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -371,7 +371,7 @@
 };
 
 /* Add spear300 specific devices here */
-/* arm gpio1 device registeration */
+/* arm gpio1 device registration */
 static struct pl061_platform_data gpio1_plat_data = {
 	.gpio_base	= 8,
 	.irq_base	= SPEAR_GPIO1_INT_BASE,
@@ -451,7 +451,7 @@
 	/* call spear3xx family common init function */
 	spear3xx_init();
 
-	/* shared irq registeration */
+	/* shared irq registration */
 	shirq_ras1.regs.base =
 		ioremap(SPEAR300_TELECOM_BASE, SPEAR300_TELECOM_REG_SIZE);
 	if (shirq_ras1.regs.base) {
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index 96a1ab8..53b41b5 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -266,7 +266,7 @@
 	/* call spear3xx family common init function */
 	spear3xx_init();
 
-	/* shared irq registeration */
+	/* shared irq registration */
 	base = ioremap(SPEAR310_SOC_CONFIG_BASE, SPEAR310_SOC_CONFIG_SIZE);
 	if (base) {
 		/* shirq 1 */
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index 6a121954..88b4652 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -519,7 +519,7 @@
 	/* call spear3xx family common init function */
 	spear3xx_init();
 
-	/* shared irq registeration */
+	/* shared irq registration */
 	base = ioremap(SPEAR320_SOC_CONFIG_BASE, SPEAR320_SOC_CONFIG_SIZE);
 	if (base) {
 		/* shirq 1 */
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index e87313a..52f553c 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -22,7 +22,7 @@
 #include <mach/spear.h>
 
 /* Add spear3xx machines common devices here */
-/* gpio device registeration */
+/* gpio device registration */
 static struct pl061_platform_data gpio_plat_data = {
 	.gpio_base	= 0,
 	.irq_base	= SPEAR_GPIO_INT_BASE,
@@ -41,7 +41,7 @@
 	.irq = {IRQ_BASIC_GPIO, NO_IRQ},
 };
 
-/* uart device registeration */
+/* uart device registration */
 struct amba_device uart_device = {
 	.dev = {
 		.init_name = "uart",
@@ -543,6 +543,6 @@
 
 pmx_fail:
 	if (ret)
-		printk(KERN_ERR "padmux: registeration failed. err no: %d\n",
+		printk(KERN_ERR "padmux: registration failed. err no: %d\n",
 				ret);
 }
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index baf6bcc..f2fe14e 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -23,7 +23,7 @@
 #include <mach/spear.h>
 
 /* Add spear6xx machines common devices here */
-/* uart device registeration */
+/* uart device registration */
 struct amba_device uart_device[] = {
 	{
 		.dev = {
@@ -50,7 +50,7 @@
 	}
 };
 
-/* gpio device registeration */
+/* gpio device registration */
 static struct pl061_platform_data gpio_plat_data[] = {
 	{
 		.gpio_base	= 0,
diff --git a/arch/arm/mach-stmp378x/stmp378x.c b/arch/arm/mach-stmp378x/stmp378x.c
index ddd49a7..c2f9fe04 100644
--- a/arch/arm/mach-stmp378x/stmp378x.c
+++ b/arch/arm/mach-stmp378x/stmp378x.c
@@ -47,7 +47,7 @@
 /*
  * IRQ handling
  */
-static void stmp378x_ack_irq(unsigned int irq)
+static void stmp378x_ack_irq(struct irq_data *d)
 {
 	/* Tell ICOLL to release IRQ line */
 	__raw_writel(0, REGS_ICOLL_BASE + HW_ICOLL_VECTOR);
@@ -60,24 +60,24 @@
 	(void)__raw_readl(REGS_ICOLL_BASE + HW_ICOLL_STAT);
 }
 
-static void stmp378x_mask_irq(unsigned int irq)
+static void stmp378x_mask_irq(struct irq_data *d)
 {
 	/* IRQ disable */
 	stmp3xxx_clearl(BM_ICOLL_INTERRUPTn_ENABLE,
-			REGS_ICOLL_BASE + HW_ICOLL_INTERRUPTn + irq * 0x10);
+			REGS_ICOLL_BASE + HW_ICOLL_INTERRUPTn + d->irq * 0x10);
 }
 
-static void stmp378x_unmask_irq(unsigned int irq)
+static void stmp378x_unmask_irq(struct irq_data *d)
 {
 	/* IRQ enable */
 	stmp3xxx_setl(BM_ICOLL_INTERRUPTn_ENABLE,
-		      REGS_ICOLL_BASE + HW_ICOLL_INTERRUPTn + irq * 0x10);
+		      REGS_ICOLL_BASE + HW_ICOLL_INTERRUPTn + d->irq * 0x10);
 }
 
 static struct irq_chip stmp378x_chip = {
-	.ack	= stmp378x_ack_irq,
-	.mask	= stmp378x_mask_irq,
-	.unmask = stmp378x_unmask_irq,
+	.irq_ack	= stmp378x_ack_irq,
+	.irq_mask	= stmp378x_mask_irq,
+	.irq_unmask	= stmp378x_unmask_irq,
 };
 
 void __init stmp378x_init_irq(void)
diff --git a/arch/arm/mach-stmp37xx/stmp37xx.c b/arch/arm/mach-stmp37xx/stmp37xx.c
index 8c7d6fb..a9aed06 100644
--- a/arch/arm/mach-stmp37xx/stmp37xx.c
+++ b/arch/arm/mach-stmp37xx/stmp37xx.c
@@ -43,11 +43,11 @@
 /*
  * IRQ handling
  */
-static void stmp37xx_ack_irq(unsigned int irq)
+static void stmp37xx_ack_irq(struct irq_data *d)
 {
 	/* Disable IRQ */
-	stmp3xxx_clearl(0x04 << ((irq % 4) * 8),
-		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + irq / 4 * 0x10);
+	stmp3xxx_clearl(0x04 << ((d->irq % 4) * 8),
+		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + d->irq / 4 * 0x10);
 
 	/* ACK current interrupt */
 	__raw_writel(1, REGS_ICOLL_BASE + HW_ICOLL_LEVELACK);
@@ -56,24 +56,24 @@
 	(void)__raw_readl(REGS_ICOLL_BASE + HW_ICOLL_STAT);
 }
 
-static void stmp37xx_mask_irq(unsigned int irq)
+static void stmp37xx_mask_irq(struct irq_data *d)
 {
 	/* IRQ disable */
-	stmp3xxx_clearl(0x04 << ((irq % 4) * 8),
-		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + irq / 4 * 0x10);
+	stmp3xxx_clearl(0x04 << ((d->irq % 4) * 8),
+		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + d->irq / 4 * 0x10);
 }
 
-static void stmp37xx_unmask_irq(unsigned int irq)
+static void stmp37xx_unmask_irq(struct irq_data *d)
 {
 	/* IRQ enable */
-	stmp3xxx_setl(0x04 << ((irq % 4) * 8),
-		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + irq / 4 * 0x10);
+	stmp3xxx_setl(0x04 << ((d->irq % 4) * 8),
+		REGS_ICOLL_BASE + HW_ICOLL_PRIORITYn + d->irq / 4 * 0x10);
 }
 
 static struct irq_chip stmp37xx_chip = {
-	.ack	= stmp37xx_ack_irq,
-	.mask	= stmp37xx_mask_irq,
-	.unmask = stmp37xx_unmask_irq,
+	.irq_ack	= stmp37xx_ack_irq,
+	.irq_mask	= stmp37xx_mask_irq,
+	.irq_unmask	= stmp37xx_unmask_irq,
 };
 
 void __init stmp37xx_init_irq(void)
diff --git a/arch/arm/mach-tcc8k/irq.c b/arch/arm/mach-tcc8k/irq.c
index 34575c4..aa9231f 100644
--- a/arch/arm/mach-tcc8k/irq.c
+++ b/arch/arm/mach-tcc8k/irq.c
@@ -18,65 +18,65 @@
 #include "common.h"
 
 /* Disable IRQ */
-static void tcc8000_mask_ack_irq0(unsigned int irq)
+static void tcc8000_mask_ack_irq0(struct irq_data *d)
 {
-	PIC0_IEN &= ~(1 << irq);
-	PIC0_CREQ |=  (1 << irq);
+	PIC0_IEN &= ~(1 << d->irq);
+	PIC0_CREQ |=  (1 << d->irq);
 }
 
-static void tcc8000_mask_ack_irq1(unsigned int irq)
+static void tcc8000_mask_ack_irq1(struct irq_data *d)
 {
-	PIC1_IEN &= ~(1 << (irq - 32));
-	PIC1_CREQ |= (1 << (irq - 32));
+	PIC1_IEN &= ~(1 << (d->irq - 32));
+	PIC1_CREQ |= (1 << (d->irq - 32));
 }
 
-static void tcc8000_mask_irq0(unsigned int irq)
+static void tcc8000_mask_irq0(struct irq_data *d)
 {
-	PIC0_IEN &= ~(1 << irq);
+	PIC0_IEN &= ~(1 << d->irq);
 }
 
-static void tcc8000_mask_irq1(unsigned int irq)
+static void tcc8000_mask_irq1(struct irq_data *d)
 {
-	PIC1_IEN &= ~(1 << (irq - 32));
+	PIC1_IEN &= ~(1 << (d->irq - 32));
 }
 
-static void tcc8000_ack_irq0(unsigned int irq)
+static void tcc8000_ack_irq0(struct irq_data *d)
 {
-	PIC0_CREQ |=  (1 << irq);
+	PIC0_CREQ |=  (1 << d->irq);
 }
 
-static void tcc8000_ack_irq1(unsigned int irq)
+static void tcc8000_ack_irq1(struct irq_data *d)
 {
-	PIC1_CREQ |= (1 << (irq - 32));
+	PIC1_CREQ |= (1 << (d->irq - 32));
 }
 
 /* Enable IRQ */
-static void tcc8000_unmask_irq0(unsigned int irq)
+static void tcc8000_unmask_irq0(struct irq_data *d)
 {
-	PIC0_IEN |= (1 << irq);
-	PIC0_INTOEN |= (1 << irq);
+	PIC0_IEN |= (1 << d->irq);
+	PIC0_INTOEN |= (1 << d->irq);
 }
 
-static void tcc8000_unmask_irq1(unsigned int irq)
+static void tcc8000_unmask_irq1(struct irq_data *d)
 {
-	PIC1_IEN |= (1 << (irq - 32));
-	PIC1_INTOEN |= (1 << (irq - 32));
+	PIC1_IEN |= (1 << (d->irq - 32));
+	PIC1_INTOEN |= (1 << (d->irq - 32));
 }
 
 static struct irq_chip tcc8000_irq_chip0 = {
 	.name		= "tcc_irq0",
-	.mask		= tcc8000_mask_irq0,
-	.ack		= tcc8000_ack_irq0,
-	.mask_ack	= tcc8000_mask_ack_irq0,
-	.unmask		= tcc8000_unmask_irq0,
+	.irq_mask	= tcc8000_mask_irq0,
+	.irq_ack	= tcc8000_ack_irq0,
+	.irq_mask_ack	= tcc8000_mask_ack_irq0,
+	.irq_unmask	= tcc8000_unmask_irq0,
 };
 
 static struct irq_chip tcc8000_irq_chip1 = {
 	.name		= "tcc_irq1",
-	.mask		= tcc8000_mask_irq1,
-	.ack		= tcc8000_ack_irq1,
-	.mask_ack	= tcc8000_mask_ack_irq1,
-	.unmask		= tcc8000_unmask_irq1,
+	.irq_mask	= tcc8000_mask_irq1,
+	.irq_ack	= tcc8000_ack_irq1,
+	.irq_mask_ack	= tcc8000_mask_ack_irq1,
+	.irq_unmask	= tcc8000_unmask_irq1,
 };
 
 void __init tcc8k_init_irq(void)
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
index 0775265..ad80488 100644
--- a/arch/arm/mach-tegra/gpio.c
+++ b/arch/arm/mach-tegra/gpio.c
@@ -142,31 +142,31 @@
 	.ngpio			= TEGRA_NR_GPIOS,
 };
 
-static void tegra_gpio_irq_ack(unsigned int irq)
+static void tegra_gpio_irq_ack(struct irq_data *d)
 {
-	int gpio = irq - INT_GPIO_BASE;
+	int gpio = d->irq - INT_GPIO_BASE;
 
 	__raw_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
 }
 
-static void tegra_gpio_irq_mask(unsigned int irq)
+static void tegra_gpio_irq_mask(struct irq_data *d)
 {
-	int gpio = irq - INT_GPIO_BASE;
+	int gpio = d->irq - INT_GPIO_BASE;
 
 	tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0);
 }
 
-static void tegra_gpio_irq_unmask(unsigned int irq)
+static void tegra_gpio_irq_unmask(struct irq_data *d)
 {
-	int gpio = irq - INT_GPIO_BASE;
+	int gpio = d->irq - INT_GPIO_BASE;
 
 	tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1);
 }
 
-static int tegra_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	int gpio = irq - INT_GPIO_BASE;
-	struct tegra_gpio_bank *bank = get_irq_chip_data(irq);
+	int gpio = d->irq - INT_GPIO_BASE;
+	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
 	int port = GPIO_PORT(gpio);
 	int lvl_type;
 	int val;
@@ -207,9 +207,9 @@
 	spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__set_irq_handler_unlocked(irq, handle_level_irq);
+		__set_irq_handler_unlocked(d->irq, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__set_irq_handler_unlocked(irq, handle_edge_irq);
+		__set_irq_handler_unlocked(d->irq, handle_edge_irq);
 
 	return 0;
 }
@@ -221,7 +221,7 @@
 	int pin;
 	int unmasked = 0;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	bank = get_irq_data(irq);
 
@@ -240,7 +240,7 @@
 			 */
 			if (lvl & (0x100 << pin)) {
 				unmasked = 1;
-				desc->chip->unmask(irq);
+				desc->irq_data.chip->irq_unmask(&desc->irq_data);
 			}
 
 			generic_handle_irq(gpio_to_irq(gpio + pin));
@@ -248,7 +248,7 @@
 	}
 
 	if (!unmasked)
-		desc->chip->unmask(irq);
+		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 
 }
 
@@ -316,21 +316,21 @@
 	local_irq_restore(flags);
 }
 
-static int tegra_gpio_wake_enable(unsigned int irq, unsigned int enable)
+static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable)
 {
-	struct tegra_gpio_bank *bank = get_irq_chip_data(irq);
+	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
 	return set_irq_wake(bank->irq, enable);
 }
 #endif
 
 static struct irq_chip tegra_gpio_irq_chip = {
 	.name		= "GPIO",
-	.ack		= tegra_gpio_irq_ack,
-	.mask		= tegra_gpio_irq_mask,
-	.unmask		= tegra_gpio_irq_unmask,
-	.set_type	= tegra_gpio_irq_set_type,
+	.irq_ack	= tegra_gpio_irq_ack,
+	.irq_mask	= tegra_gpio_irq_mask,
+	.irq_unmask	= tegra_gpio_irq_unmask,
+	.irq_set_type	= tegra_gpio_irq_set_type,
 #ifdef CONFIG_PM
-	.set_wake	= tegra_gpio_wake_enable,
+	.irq_set_wake	= tegra_gpio_wake_enable,
 #endif
 };
 
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c
index a5cb1ce..f329404 100644
--- a/arch/arm/mach-tegra/hotplug.c
+++ b/arch/arm/mach-tegra/hotplug.c
@@ -26,10 +26,10 @@
 	 * Turn off coherency
 	 */
 	"	mrc	p15, 0, %0, c1, c0, 1\n"
-	"	bic	%0, %0, %2\n"
+	"	bic	%0, %0, #0x20\n"
 	"	mcr	p15, 0, %0, c1, c0, 1\n"
 	"	mrc	p15, 0, %0, c1, c0, 0\n"
-	"	bic	%0, %0, #0x04\n"
+	"	bic	%0, %0, %2\n"
 	"	mcr	p15, 0, %0, c1, c0, 0\n"
 	  : "=&r" (v)
 	  : "r" (0), "Ir" (CR_C)
diff --git a/arch/arm/mach-tegra/include/mach/clk.h b/arch/arm/mach-tegra/include/mach/clk.h
index d772395..a217f68 100644
--- a/arch/arm/mach-tegra/include/mach/clk.h
+++ b/arch/arm/mach-tegra/include/mach/clk.h
@@ -20,6 +20,8 @@
 #ifndef __MACH_CLK_H
 #define __MACH_CLK_H
 
+struct clk;
+
 void tegra_periph_reset_deassert(struct clk *c);
 void tegra_periph_reset_assert(struct clk *c);
 
diff --git a/arch/arm/mach-tegra/include/mach/clkdev.h b/arch/arm/mach-tegra/include/mach/clkdev.h
index 412f5c6..66cd3f4 100644
--- a/arch/arm/mach-tegra/include/mach/clkdev.h
+++ b/arch/arm/mach-tegra/include/mach/clkdev.h
@@ -20,6 +20,8 @@
 #ifndef __MACH_CLKDEV_H
 #define __MACH_CLKDEV_H
 
+struct clk;
+
 static inline int __clk_get(struct clk *clk)
 {
 	return 1;
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
new file mode 100644
index 0000000..04c7798
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/kbc.h
@@ -0,0 +1,62 @@
+/*
+ * Platform definitions for tegra-kbc keyboard input driver
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef ASMARM_ARCH_TEGRA_KBC_H
+#define ASMARM_ARCH_TEGRA_KBC_H
+
+#include <linux/types.h>
+#include <linux/input/matrix_keypad.h>
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define KBC_MAX_GPIO	24
+#define KBC_MAX_KPENT	8
+#else
+#define KBC_MAX_GPIO	20
+#define KBC_MAX_KPENT	7
+#endif
+
+#define KBC_MAX_ROW	16
+#define KBC_MAX_COL	8
+#define KBC_MAX_KEY	(KBC_MAX_ROW * KBC_MAX_COL)
+
+struct tegra_kbc_pin_cfg {
+	bool is_row;
+	unsigned char num;
+};
+
+struct tegra_kbc_wake_key {
+	u8 row:4;
+	u8 col:4;
+};
+
+struct tegra_kbc_platform_data {
+	unsigned int debounce_cnt;
+	unsigned int repeat_cnt;
+
+	unsigned int wake_cnt; /* 0:wake on any key >1:wake on wake_cfg */
+	const struct tegra_kbc_wake_key *wake_cfg;
+
+	struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
+	const struct matrix_keymap_data *keymap_data;
+
+	bool wakeup;
+	bool use_fn_map;
+};
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/sdhci.h b/arch/arm/mach-tegra/include/mach/sdhci.h
new file mode 100644
index 0000000..3ad086e
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/sdhci.h
@@ -0,0 +1,29 @@
+/*
+ * include/asm-arm/arch-tegra/include/mach/sdhci.h
+ *
+ * Copyright (C) 2009 Palm, Inc.
+ * Author: Yvonne Yip <y@palm.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_ARM_ARCH_TEGRA_SDHCI_H
+#define __ASM_ARM_ARCH_TEGRA_SDHCI_H
+
+#include <linux/mmc/host.h>
+
+struct tegra_sdhci_platform_data {
+	int cd_gpio;
+	int wp_gpio;
+	int power_gpio;
+	int is_8bit;
+};
+
+#endif
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index 5407de0..17c74d2 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -46,30 +46,30 @@
 #define ICTLR_COP_IER_CLR	0x38
 #define ICTLR_COP_IEP_CLASS	0x3c
 
-static void (*gic_mask_irq)(unsigned int irq);
-static void (*gic_unmask_irq)(unsigned int irq);
+static void (*tegra_gic_mask_irq)(struct irq_data *d);
+static void (*tegra_gic_unmask_irq)(struct irq_data *d);
 
-#define irq_to_ictlr(irq) (((irq)-32) >> 5)
+#define irq_to_ictlr(irq) (((irq) - 32) >> 5)
 static void __iomem *tegra_ictlr_base = IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE);
-#define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr)*0x100)
+#define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr) * 0x100)
 
-static void tegra_mask(unsigned int irq)
+static void tegra_mask(struct irq_data *d)
 {
-	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(irq));
-	gic_mask_irq(irq);
-	writel(1<<(irq&31), addr+ICTLR_CPU_IER_CLR);
+	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
+	tegra_gic_mask_irq(d);
+	writel(1 << (d->irq & 31), addr+ICTLR_CPU_IER_CLR);
 }
 
-static void tegra_unmask(unsigned int irq)
+static void tegra_unmask(struct irq_data *d)
 {
-	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(irq));
-	gic_unmask_irq(irq);
-	writel(1<<(irq&31), addr+ICTLR_CPU_IER_SET);
+	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
+	tegra_gic_unmask_irq(d);
+	writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_SET);
 }
 
 #ifdef CONFIG_PM
 
-static int tegra_set_wake(unsigned int irq, unsigned int on)
+static int tegra_set_wake(struct irq_data *d, unsigned int on)
 {
 	return 0;
 }
@@ -77,10 +77,10 @@
 
 static struct irq_chip tegra_irq = {
 	.name		= "PPI",
-	.mask		= tegra_mask,
-	.unmask		= tegra_unmask,
+	.irq_mask	= tegra_mask,
+	.irq_unmask	= tegra_unmask,
 #ifdef CONFIG_PM
-	.set_wake	= tegra_set_wake,
+	.irq_set_wake	= tegra_set_wake,
 #endif
 };
 
@@ -98,11 +98,11 @@
 		 IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
 
 	gic = get_irq_chip(29);
-	gic_unmask_irq = gic->unmask;
-	gic_mask_irq = gic->mask;
-	tegra_irq.ack = gic->ack;
+	tegra_gic_unmask_irq = gic->irq_unmask;
+	tegra_gic_mask_irq = gic->irq_mask;
+	tegra_irq.irq_ack = gic->irq_ack;
 #ifdef CONFIG_SMP
-	tegra_irq.set_affinity = gic->set_affinity;
+	tegra_irq.irq_set_affinity = gic->irq_set_affinity;
 #endif
 
 	for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) {
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 801b21e..32a7b0f 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -64,7 +64,7 @@
 	bool "Dual RAM"
 	help
 		Select this if you want support for Dual RAM phones.
-		This is two RAM memorys on different EMIFs.
+		This is two RAM memories on different EMIFs.
 endchoice
 
 config U300_DEBUG
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h
index 193da2d..6193aaa 100644
--- a/arch/arm/mach-u300/include/mach/coh901318.h
+++ b/arch/arm/mach-u300/include/mach/coh901318.h
@@ -24,7 +24,7 @@
  * @src_addr: transfer source address
  * @dst_addr: transfer destination address
  * @link_addr:  physical address to next lli
- * @virt_link_addr: virtual addres of next lli (only used by pool_free)
+ * @virt_link_addr: virtual address of next lli (only used by pool_free)
  * @phy_this: physical address of current lli (only used by pool_free)
  */
 struct coh901318_lli {
@@ -90,7 +90,7 @@
  * struct coh901318_platform - platform arch structure
  * @chans_slave: specifying dma slave channels
  * @chans_memcpy: specifying dma memcpy channels
- * @access_memory_state: requesting DMA memeory access (on / off)
+ * @access_memory_state: requesting DMA memory access (on / off)
  * @chan_conf: dma channel configurations
  * @max_channels: max number of dma chanenls
  */
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 1187f1f..533967c 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -3,99 +3,94 @@
  *
  * License Terms: GNU General Public License v2
  *
- * Author: Sundar Iyer <sundar.iyer@stericsson.com>
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com>
+ *          Bengt Jonsson <bengt.g.jonsson@stericsson.com>
  *
  * MOP500 board specific initialization for regulators
  */
 #include <linux/kernel.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/ab8500.h>
 
-/* supplies to the display/camera */
-static struct regulator_init_data ab8500_vaux1_regulator = {
-	.constraints = {
-		.name = "V-DISPLAY",
-		.min_uV = 2500000,
-		.max_uV = 2900000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
-					REGULATOR_CHANGE_STATUS,
+/* AB8500 regulators */
+struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
+	/* supplies to the display/camera */
+	[AB8500_LDO_AUX1] = {
+		.constraints = {
+			.name = "V-DISPLAY",
+			.min_uV = 2500000,
+			.max_uV = 2900000,
+			.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+					  REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supplies to the on-board eMMC */
+	[AB8500_LDO_AUX2] = {
+		.constraints = {
+			.name = "V-eMMC1",
+			.min_uV = 1100000,
+			.max_uV = 3300000,
+			.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+					  REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for VAUX3, supplies to SDcard slots */
+	[AB8500_LDO_AUX3] = {
+		.constraints = {
+			.name = "V-MMC-SD",
+			.min_uV = 1100000,
+			.max_uV = 3300000,
+			.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+					  REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for tvout, gpadc, TVOUT LDO */
+	[AB8500_LDO_TVOUT] = {
+		.constraints = {
+			.name = "V-TVOUT",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for ab8500-vaudio, VAUDIO LDO */
+	[AB8500_LDO_AUDIO] = {
+		.constraints = {
+			.name = "V-AUD",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-anamic1 VAMic1-LDO */
+	[AB8500_LDO_ANAMIC1] = {
+		.constraints = {
+			.name = "V-AMIC1",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
+	[AB8500_LDO_ANAMIC2] = {
+		.constraints = {
+			.name = "V-AMIC2",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-dmic, VDMIC LDO */
+	[AB8500_LDO_DMIC] = {
+		.constraints = {
+			.name = "V-DMIC",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-intcore12, VINTCORE12 LDO */
+	[AB8500_LDO_INTCORE] = {
+		.constraints = {
+			.name = "V-INTCORE",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for U8500 CSI/DSI, VANA LDO */
+	[AB8500_LDO_ANA] = {
+		.constraints = {
+			.name = "V-CSI/DSI",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
 	},
 };
-
-/* supplies to the on-board eMMC */
-static struct regulator_init_data ab8500_vaux2_regulator = {
-	.constraints = {
-		.name = "V-eMMC1",
-		.min_uV = 1100000,
-		.max_uV = 3300000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
-					REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for VAUX3, supplies to SDcard slots */
-static struct regulator_init_data ab8500_vaux3_regulator = {
-	.constraints = {
-		.name = "V-MMC-SD",
-		.min_uV = 1100000,
-		.max_uV = 3300000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
-					REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for tvout, gpadc, TVOUT LDO */
-static struct regulator_init_data ab8500_vtvout_init = {
-	.constraints = {
-		.name = "V-TVOUT",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for ab8500-vaudio, VAUDIO LDO */
-static struct regulator_init_data ab8500_vaudio_init = {
-	.constraints = {
-		.name = "V-AUD",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-anamic1 VAMic1-LDO */
-static struct regulator_init_data ab8500_vamic1_init = {
-	.constraints = {
-		.name = "V-AMIC1",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
-static struct regulator_init_data ab8500_vamic2_init = {
-	.constraints = {
-		.name = "V-AMIC2",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-dmic, VDMIC LDO */
-static struct regulator_init_data ab8500_vdmic_init = {
-	.constraints = {
-		.name = "V-DMIC",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-intcore12, VINTCORE12 LDO */
-static struct regulator_init_data ab8500_vintcore_init = {
-	.constraints = {
-		.name = "V-INTCORE",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for U8500 CSI/DSI, VANA LDO */
-static struct regulator_init_data ab8500_vana_init = {
-	.constraints = {
-		.name = "V-CSI/DSI",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
new file mode 100644
index 0000000..2675fae
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * MOP500 board specific initialization for regulators
+ */
+
+#ifndef __BOARD_MOP500_REGULATORS_H
+#define __BOARD_MOP500_REGULATORS_H
+
+#include <linux/regulator/machine.h>
+#include <linux/regulator/ab8500.h>
+
+extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
+
+#endif
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index a1c9ea1..a393f57 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -35,6 +35,7 @@
 #include "devices-db8500.h"
 #include "pins-db8500.h"
 #include "board-mop500.h"
+#include "board-mop500-regulators.h"
 
 static pin_cfg_t mop500_pins[] = {
 	/* SSP0 */
@@ -80,6 +81,8 @@
 
 static struct ab8500_platform_data ab8500_platdata = {
 	.irq_base	= MOP500_AB8500_IRQ_BASE,
+	.regulator	= ab8500_regulators,
+	.num_regulator	= ARRAY_SIZE(ab8500_regulators),
 };
 
 static struct resource ab8500_resources[] = {
diff --git a/arch/arm/mach-versatile/Kconfig b/arch/arm/mach-versatile/Kconfig
index 3f7b5e9..9cdec5a 100644
--- a/arch/arm/mach-versatile/Kconfig
+++ b/arch/arm/mach-versatile/Kconfig
@@ -2,17 +2,19 @@
 	depends on ARCH_VERSATILE
 
 config ARCH_VERSATILE_PB
-	bool "Support Versatile/PB platform"
+	bool "Support Versatile Platform Baseboard for ARM926EJ-S"
 	select CPU_ARM926T
 	select MIGHT_HAVE_PCI
 	default y
 	help
-	  Include support for the ARM(R) Versatile/PB platform.
+	  Include support for the ARM(R) Versatile Platform Baseboard
+	  for the ARM926EJ-S.
 
 config MACH_VERSATILE_AB
-	bool "Support Versatile/AB platform"
+	bool "Support Versatile Application Baseboard for ARM926EJ-S"
 	select CPU_ARM926T
 	help
-	  Include support for the ARM(R) Versatile/AP platform.
+	  Include support for the ARM(R) Versatile Application Baseboard
+	  for the ARM926EJ-S.
 
 endmenu
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 13a83e4..136c32e 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -63,23 +63,25 @@
 #define VA_VIC_BASE		__io_address(VERSATILE_VIC_BASE)
 #define VA_SIC_BASE		__io_address(VERSATILE_SIC_BASE)
 
-static void sic_mask_irq(unsigned int irq)
+static void sic_mask_irq(struct irq_data *d)
 {
-	irq -= IRQ_SIC_START;
+	unsigned int irq = d->irq - IRQ_SIC_START;
+
 	writel(1 << irq, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
 }
 
-static void sic_unmask_irq(unsigned int irq)
+static void sic_unmask_irq(struct irq_data *d)
 {
-	irq -= IRQ_SIC_START;
+	unsigned int irq = d->irq - IRQ_SIC_START;
+
 	writel(1 << irq, VA_SIC_BASE + SIC_IRQ_ENABLE_SET);
 }
 
 static struct irq_chip sic_chip = {
-	.name	= "SIC",
-	.ack	= sic_mask_irq,
-	.mask	= sic_mask_irq,
-	.unmask	= sic_unmask_irq,
+	.name		= "SIC",
+	.irq_ack	= sic_mask_irq,
+	.irq_mask	= sic_mask_irq,
+	.irq_unmask	= sic_unmask_irq,
 };
 
 static void
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
index b1687b6..634bf1d 100644
--- a/arch/arm/mach-vexpress/platsmp.c
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -39,7 +39,7 @@
  * observers, irrespective of whether they're taking part in coherency
  * or not.  This is necessary for the hotplug code to work reliably.
  */
-static void write_pen_release(int val)
+static void __cpuinit write_pen_release(int val)
 {
 	pen_release = val;
 	smp_wmb();
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index a9ed342..1edae65 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -19,6 +19,7 @@
 #include <asm/mach/time.h>
 #include <asm/hardware/arm_timer.h>
 #include <asm/hardware/timer-sp.h>
+#include <asm/hardware/sp810.h>
 
 #include <mach/motherboard.h>
 
@@ -50,8 +51,16 @@
 
 static void __init v2m_timer_init(void)
 {
+	u32 scctrl;
+
 	versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
 
+	/* Select 1MHz TIMCLK as the reference clock for SP804 timers */
+	scctrl = readl(MMIO_P2V(V2M_SYSCTL + SCCTRL));
+	scctrl |= SCCTRL_TIMEREN0SEL_TIMCLK;
+	scctrl |= SCCTRL_TIMEREN1SEL_TIMCLK;
+	writel(scctrl, MMIO_P2V(V2M_SYSCTL + SCCTRL));
+
 	writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
 	writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
 
diff --git a/arch/arm/mach-w90x900/irq.c b/arch/arm/mach-w90x900/irq.c
index 0ce9d8e..9c35010 100644
--- a/arch/arm/mach-w90x900/irq.c
+++ b/arch/arm/mach-w90x900/irq.c
@@ -92,15 +92,15 @@
 	__raw_writel(regval, REG_AIC_GEN);
 }
 
-static void nuc900_irq_mask(unsigned int irq)
+static void nuc900_irq_mask(struct irq_data *d)
 {
 	struct group_irq *group_irq;
 
 	group_irq = NULL;
 
-	__raw_writel(1 << irq, REG_AIC_MDCR);
+	__raw_writel(1 << d->irq, REG_AIC_MDCR);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_GROUP0:
 		group_irq = &group_nirq0;
 		break;
@@ -143,20 +143,20 @@
  * to REG_AIC_EOSCR for ACK
  */
 
-static void nuc900_irq_ack(unsigned int irq)
+static void nuc900_irq_ack(struct irq_data *d)
 {
 	__raw_writel(0x01, REG_AIC_EOSCR);
 }
 
-static void nuc900_irq_unmask(unsigned int irq)
+static void nuc900_irq_unmask(struct irq_data *d)
 {
 	struct group_irq *group_irq;
 
 	group_irq = NULL;
 
-	__raw_writel(1 << irq, REG_AIC_MECR);
+	__raw_writel(1 << d->irq, REG_AIC_MECR);
 
-	switch (irq) {
+	switch (d->irq) {
 	case IRQ_GROUP0:
 		group_irq = &group_nirq0;
 		break;
@@ -195,9 +195,9 @@
 }
 
 static struct irq_chip nuc900_irq_chip = {
-	.ack	   = nuc900_irq_ack,
-	.mask	   = nuc900_irq_mask,
-	.unmask	   = nuc900_irq_unmask,
+	.irq_ack	= nuc900_irq_ack,
+	.irq_mask	= nuc900_irq_mask,
+	.irq_unmask	= nuc900_irq_unmask,
 };
 
 void __init nuc900_init_irq(void)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index fcc1e62..e4509ba 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -405,7 +405,7 @@
 config CPU_32v6K
 	bool "Support ARM V6K processor extensions" if !SMP
 	depends on CPU_V6 || CPU_V7
-	default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
+	default y if SMP
 	help
 	  Say Y here if your ARMv6 processor supports the 'K' extension.
 	  This enables the kernel to use some instructions not present
@@ -416,7 +416,7 @@
 # ARMv7
 config CPU_V7
 	bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
-	select CPU_32v6K if !ARCH_OMAP2
+	select CPU_32v6K
 	select CPU_32v7
 	select CPU_ABRT_EV7
 	select CPU_PABRT_V7
@@ -644,7 +644,7 @@
 
 config SWP_EMULATE
 	bool "Emulate SWP/SWPB instructions"
-	depends on CPU_V7
+	depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
 	select HAVE_PROC_CPU if PROC_FS
 	default y if SMP
 	help
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 170c9bb..f2ce38e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -49,7 +49,13 @@
 static inline void cache_sync(void)
 {
 	void __iomem *base = l2x0_base;
+
+#ifdef CONFIG_ARM_ERRATA_753970
+	/* write to an unmmapped register */
+	writel_relaxed(0, base + L2X0_DUMMY_REG);
+#else
 	writel_relaxed(0, base + L2X0_CACHE_SYNC);
+#endif
 	cache_wait(base + L2X0_CACHE_SYNC, 1);
 }
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6b48e0a..4771dba 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -577,7 +577,7 @@
  * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
- * @nents: number of buffers to unmap (returned from dma_map_sg)
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  *
  * Unmap a set of streaming mode DMA translations.  Again, CPU access
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c29f283..2b269c9 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -18,7 +18,6 @@
 #include <asm/smp_plat.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
-#include <asm/smp_plat.h>
 
 #include "mm.h"
 
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 5164069..cddd684 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -297,6 +297,12 @@
 	memblock_reserve(__pa(_stext), _end - _stext);
 #endif
 #ifdef CONFIG_BLK_DEV_INITRD
+	if (phys_initrd_size &&
+	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
+		pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
+		       phys_initrd_start, phys_initrd_size);
+		phys_initrd_start = phys_initrd_size = 0;
+	}
 	if (phys_initrd_size) {
 		memblock_reserve(phys_initrd_start, phys_initrd_size);
 
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 93292a1..709244c 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -50,7 +50,7 @@
 		if (!new_pmd)
 			goto no_pmd;
 
-		new_pte = pte_alloc_map(mm, new_pmd, 0);
+		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
 		if (!new_pte)
 			goto no_pte;
 
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b49fab2..8e33562 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -159,7 +159,9 @@
 	tstne	r1, #L_PTE_PRESENT
 	moveq	r3, #0
 
-	str	r3, [r0, #2048]!
+ ARM(	str	r3, [r0, #2048]! )
+ THUMB(	add	r0, r0, #2048 )
+ THUMB(	str	r3, [r0] )
 	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
 #endif
 	mov	pc, lr
@@ -262,6 +264,12 @@
 	orreq	r10, r10, #1 << 6		@ set bit #6
 	mcreq	p15, 0, r10, c15, c0, 1		@ write diagnostic register
 #endif
+#ifdef CONFIG_ARM_ERRATA_751472
+	cmp	r6, #0x30			@ present prior to r3p0
+	mrclt	p15, 0, r10, c15, c0, 1		@ read diagnostic register
+	orrlt	r10, r10, #1 << 11		@ set bit #11
+	mcrlt	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+#endif
 
 3:	mov	r10, #0
 #ifdef HARVARD_CACHE
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 8aa9744..c074e66 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -10,8 +10,6 @@
  */
 
 #include <linux/cpumask.h>
-#include <linux/err.h>
-#include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/oprofile.h>
@@ -46,6 +44,7 @@
 		return NULL;
 	}
 }
+#endif
 
 static int report_trace(struct stackframe *frame, void *d)
 {
@@ -85,7 +84,7 @@
 
 	/* frame pointers should strictly progress back up the stack
 	 * (towards higher addresses) */
-	if (tail >= buftail[0].fp)
+	if (tail + 1 >= buftail[0].fp)
 		return NULL;
 
 	return buftail[0].fp-1;
@@ -111,6 +110,7 @@
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
+	/* provide backtrace support also in timer mode: */
 	ops->backtrace		= arm_backtrace;
 
 	return oprofile_perf_init(ops);
@@ -120,11 +120,3 @@
 {
 	oprofile_perf_exit();
 }
-#else
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
-	pr_info("oprofile: hardware counters not available\n");
-	return -ENODEV;
-}
-void __exit oprofile_arch_exit(void) {}
-#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/plat-mxc/3ds_debugboard.c b/arch/arm/plat-mxc/3ds_debugboard.c
index 639c54a..c856fa3 100644
--- a/arch/arm/plat-mxc/3ds_debugboard.c
+++ b/arch/arm/plat-mxc/3ds_debugboard.c
@@ -60,7 +60,6 @@
 #define EXPIO_INT_BUTTON_B	(MXC_BOARD_IRQ_START + 4)
 
 static void __iomem *brd_io;
-static void expio_ack_irq(u32 irq);
 
 static struct resource smsc911x_resources[] = {
 	{
@@ -93,7 +92,8 @@
 	u32 int_valid;
 	u32 expio_irq;
 
-	desc->chip->mask(irq);	/* irq = gpio irq number */
+	/* irq = gpio irq number */
+	desc->irq_data.chip->irq_mask(&desc->irq_data);
 
 	imr_val = __raw_readw(brd_io + INTR_MASK_REG);
 	int_valid = __raw_readw(brd_io + INTR_STATUS_REG) & ~imr_val;
@@ -110,37 +110,37 @@
 			d->handle_irq(expio_irq, d);
 	}
 
-	desc->chip->ack(irq);
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 /*
  * Disable an expio pin's interrupt by setting the bit in the imr.
  * Irq is an expio virtual irq number
  */
-static void expio_mask_irq(u32 irq)
+static void expio_mask_irq(struct irq_data *d)
 {
 	u16 reg;
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 
 	reg = __raw_readw(brd_io + INTR_MASK_REG);
 	reg |= (1 << expio);
 	__raw_writew(reg, brd_io + INTR_MASK_REG);
 }
 
-static void expio_ack_irq(u32 irq)
+static void expio_ack_irq(struct irq_data *d)
 {
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 
 	__raw_writew(1 << expio, brd_io + INTR_RESET_REG);
 	__raw_writew(0, brd_io + INTR_RESET_REG);
-	expio_mask_irq(irq);
+	expio_mask_irq(d);
 }
 
-static void expio_unmask_irq(u32 irq)
+static void expio_unmask_irq(struct irq_data *d)
 {
 	u16 reg;
-	u32 expio = MXC_IRQ_TO_EXPIO(irq);
+	u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
 
 	reg = __raw_readw(brd_io + INTR_MASK_REG);
 	reg &= ~(1 << expio);
@@ -148,9 +148,9 @@
 }
 
 static struct irq_chip expio_irq_chip = {
-	.ack = expio_ack_irq,
-	.mask = expio_mask_irq,
-	.unmask = expio_unmask_irq,
+	.irq_ack = expio_ack_irq,
+	.irq_mask = expio_mask_irq,
+	.irq_unmask = expio_unmask_irq,
 };
 
 int __init mxc_expio_init(u32 base, u32 p_irq)
diff --git a/arch/arm/plat-mxc/avic.c b/arch/arm/plat-mxc/avic.c
index 9a4e8a2..deb284b 100644
--- a/arch/arm/plat-mxc/avic.c
+++ b/arch/arm/plat-mxc/avic.c
@@ -89,22 +89,22 @@
 #endif /* CONFIG_FIQ */
 
 /* Disable interrupt number "irq" in the AVIC */
-static void mxc_mask_irq(unsigned int irq)
+static void mxc_mask_irq(struct irq_data *d)
 {
-	__raw_writel(irq, avic_base + AVIC_INTDISNUM);
+	__raw_writel(d->irq, avic_base + AVIC_INTDISNUM);
 }
 
 /* Enable interrupt number "irq" in the AVIC */
-static void mxc_unmask_irq(unsigned int irq)
+static void mxc_unmask_irq(struct irq_data *d)
 {
-	__raw_writel(irq, avic_base + AVIC_INTENNUM);
+	__raw_writel(d->irq, avic_base + AVIC_INTENNUM);
 }
 
 static struct mxc_irq_chip mxc_avic_chip = {
 	.base = {
-		.ack = mxc_mask_irq,
-		.mask = mxc_mask_irq,
-		.unmask = mxc_unmask_irq,
+		.irq_ack = mxc_mask_irq,
+		.irq_mask = mxc_mask_irq,
+		.irq_unmask = mxc_unmask_irq,
 	},
 #ifdef CONFIG_MXC_IRQ_PRIOR
 	.set_priority = avic_irq_set_priority,
diff --git a/arch/arm/plat-mxc/devices/Kconfig b/arch/arm/plat-mxc/devices/Kconfig
index 2537166..b9ab1d5 100644
--- a/arch/arm/plat-mxc/devices/Kconfig
+++ b/arch/arm/plat-mxc/devices/Kconfig
@@ -1,6 +1,6 @@
 config IMX_HAVE_PLATFORM_FEC
 	bool
-	default y if ARCH_MX25 || SOC_IMX27 || SOC_IMX35 || SOC_IMX51
+	default y if ARCH_MX25 || SOC_IMX27 || SOC_IMX35 || SOC_IMX51 || SOC_IMX53
 
 config IMX_HAVE_PLATFORM_FLEXCAN
 	select HAVE_CAN_FLEXCAN if CAN
diff --git a/arch/arm/plat-mxc/devices/platform-fec.c b/arch/arm/plat-mxc/devices/platform-fec.c
index 269ec78..b50c351 100644
--- a/arch/arm/plat-mxc/devices/platform-fec.c
+++ b/arch/arm/plat-mxc/devices/platform-fec.c
@@ -36,6 +36,11 @@
 	imx_fec_data_entry_single(MX51);
 #endif
 
+#ifdef CONFIG_SOC_IMX53
+const struct imx_fec_data imx53_fec_data __initconst =
+	imx_fec_data_entry_single(MX53);
+#endif
+
 struct platform_device *__init imx_add_fec(
 		const struct imx_fec_data *data,
 		const struct fec_platform_data *pdata)
diff --git a/arch/arm/plat-mxc/devices/platform-imx-i2c.c b/arch/arm/plat-mxc/devices/platform-imx-i2c.c
index 72ba880..7ba94e1 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-i2c.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-i2c.c
@@ -78,6 +78,15 @@
 };
 #endif /* ifdef CONFIG_SOC_IMX51 */
 
+#ifdef CONFIG_SOC_IMX53
+const struct imx_imx_i2c_data imx53_imx_i2c_data[] __initconst = {
+#define imx53_imx_i2c_data_entry(_id, _hwid)				\
+	imx_imx_i2c_data_entry(MX53, _id, _hwid, SZ_4K)
+	imx53_imx_i2c_data_entry(0, 1),
+	imx53_imx_i2c_data_entry(1, 2),
+};
+#endif /* ifdef CONFIG_SOC_IMX51 */
+
 struct platform_device *__init imx_add_imx_i2c(
 		const struct imx_imx_i2c_data *data,
 		const struct imxi2c_platform_data *pdata)
diff --git a/arch/arm/plat-mxc/devices/platform-imx-keypad.c b/arch/arm/plat-mxc/devices/platform-imx-keypad.c
index 40238f0..2636611 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-keypad.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-keypad.c
@@ -41,6 +41,11 @@
 	imx_imx_keypad_data_entry_single(MX35, SZ_16);
 #endif /* ifdef CONFIG_SOC_IMX35 */
 
+#ifdef CONFIG_SOC_IMX51
+const struct imx_imx_keypad_data imx51_imx_keypad_data __initconst =
+	imx_imx_keypad_data_entry_single(MX51, SZ_16);
+#endif /* ifdef CONFIG_SOC_IMX51 */
+
 struct platform_device *__init imx_add_imx_keypad(
 		const struct imx_imx_keypad_data *data,
 		const struct matrix_keymap_data *pdata)
diff --git a/arch/arm/plat-mxc/devices/platform-mxc_pwm.c b/arch/arm/plat-mxc/devices/platform-mxc_pwm.c
index 3d8ebdb..b0c4ae2 100644
--- a/arch/arm/plat-mxc/devices/platform-mxc_pwm.c
+++ b/arch/arm/plat-mxc/devices/platform-mxc_pwm.c
@@ -40,6 +40,15 @@
 	imx_mxc_pwm_data_entry_single(MX27, 0, , SZ_4K);
 #endif /* ifdef CONFIG_SOC_IMX27 */
 
+#ifdef CONFIG_SOC_IMX51
+const struct imx_mxc_pwm_data imx51_mxc_pwm_data[] __initconst = {
+#define imx51_mxc_pwm_data_entry(_id, _hwid)				\
+	imx_mxc_pwm_data_entry(MX51, _id, _hwid, SZ_16K)
+	imx51_mxc_pwm_data_entry(0, 1),
+	imx51_mxc_pwm_data_entry(1, 2),
+};
+#endif /* ifdef CONFIG_SOC_IMX51 */
+
 struct platform_device *__init imx_add_mxc_pwm(
 		const struct imx_mxc_pwm_data *data)
 {
diff --git a/arch/arm/plat-mxc/devices/platform-sdhci-esdhc-imx.c b/arch/arm/plat-mxc/devices/platform-sdhci-esdhc-imx.c
index b352564..6b2940b 100644
--- a/arch/arm/plat-mxc/devices/platform-sdhci-esdhc-imx.c
+++ b/arch/arm/plat-mxc/devices/platform-sdhci-esdhc-imx.c
@@ -53,6 +53,18 @@
 };
 #endif /* ifdef CONFIG_SOC_IMX51 */
 
+#ifdef CONFIG_SOC_IMX53
+const struct imx_sdhci_esdhc_imx_data
+imx53_sdhci_esdhc_imx_data[] __initconst = {
+#define imx53_sdhci_esdhc_imx_data_entry(_id, _hwid)			\
+	imx_sdhci_esdhc_imx_data_entry(MX53, _id, _hwid)
+	imx53_sdhci_esdhc_imx_data_entry(0, 1),
+	imx53_sdhci_esdhc_imx_data_entry(1, 2),
+	imx53_sdhci_esdhc_imx_data_entry(2, 3),
+	imx53_sdhci_esdhc_imx_data_entry(3, 4),
+};
+#endif /* ifdef CONFIG_SOC_IMX53 */
+
 struct platform_device *__init imx_add_sdhci_esdhc_imx(
 		const struct imx_sdhci_esdhc_imx_data *data,
 		const struct esdhc_platform_data *pdata)
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index 8ea49ad..013c85f 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -81,6 +81,18 @@
 };
 #endif /* ifdef CONFIG_SOC_IMX51 */
 
+#ifdef CONFIG_SOC_IMX53
+const struct imx_spi_imx_data imx53_cspi_data __initconst =
+	imx_spi_imx_data_entry_single(MX53, CSPI, "imx53-cspi", 0, , SZ_4K);
+
+const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = {
+#define imx53_ecspi_data_entry(_id, _hwid)				\
+	imx_spi_imx_data_entry(MX53, ECSPI, "imx53-ecspi", _id, _hwid, SZ_4K)
+	imx53_ecspi_data_entry(0, 1),
+	imx53_ecspi_data_entry(1, 2),
+};
+#endif /* ifdef CONFIG_SOC_IMX53 */
+
 struct platform_device *__init imx_add_spi_imx(
 		const struct imx_spi_imx_data *data,
 		const struct spi_imx_master *pdata)
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
index bc2c7bc..d17b3c9 100644
--- a/arch/arm/plat-mxc/gpio.c
+++ b/arch/arm/plat-mxc/gpio.c
@@ -63,29 +63,29 @@
 	__raw_writel(l, port->base + GPIO_IMR);
 }
 
-static void gpio_ack_irq(u32 irq)
+static void gpio_ack_irq(struct irq_data *d)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	_clear_gpio_irqstatus(&mxc_gpio_ports[gpio / 32], gpio & 0x1f);
 }
 
-static void gpio_mask_irq(u32 irq)
+static void gpio_mask_irq(struct irq_data *d)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	_set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 0);
 }
 
-static void gpio_unmask_irq(u32 irq)
+static void gpio_unmask_irq(struct irq_data *d)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	_set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 1);
 }
 
 static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset);
 
-static int gpio_set_irq_type(u32 irq, u32 type)
+static int gpio_set_irq_type(struct irq_data *d, u32 type)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
 	u32 bit, val;
 	int edge;
@@ -211,9 +211,9 @@
  * @param  enable       enable as wake-up if equal to non-zero
  * @return       This function returns 0 on success.
  */
-static int gpio_set_wake_irq(u32 irq, u32 enable)
+static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
 {
-	u32 gpio = irq_to_gpio(irq);
+	u32 gpio = irq_to_gpio(d->irq);
 	u32 gpio_idx = gpio & 0x1F;
 	struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
 
@@ -233,11 +233,11 @@
 }
 
 static struct irq_chip gpio_irq_chip = {
-	.ack = gpio_ack_irq,
-	.mask = gpio_mask_irq,
-	.unmask = gpio_unmask_irq,
-	.set_type = gpio_set_irq_type,
-	.set_wake = gpio_set_wake_irq,
+	.irq_ack = gpio_ack_irq,
+	.irq_mask = gpio_mask_irq,
+	.irq_unmask = gpio_unmask_irq,
+	.irq_set_type = gpio_set_irq_type,
+	.irq_set_wake = gpio_set_wake_irq,
 };
 
 static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx53.h b/arch/arm/plat-mxc/include/mach/iomux-mx53.h
index 5deee01..68e11d7 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx53.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx53.h
@@ -34,7 +34,6 @@
 	IOMUX_CONFIG_ALT6,
 	IOMUX_CONFIG_ALT7,
 	IOMUX_CONFIG_GPIO, /* added to help user use GPIO mode */
-	IOMUX_CONFIG_SION = 0x1 << 4, /* LOOPBACK:MUX SION bit */
 } iomux_pin_cfg_t;
 
 /* These 2 defines are for pins that may not have a mux register, but could
@@ -135,6 +134,9 @@
 #define MX53_PAD_EIM_D16__GPIO_3_16		IOMUX_PAD(0x460, 0x118,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D17__GPIO_3_17		IOMUX_PAD(0x464, 0x11C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D18__GPIO_3_18		IOMUX_PAD(0x468, 0x120,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
+#define MX53_PAD_EIM_D16__CSPI1_SCLK		IOMUX_PAD(0x460, 0x118,IOMUX_CONFIG_ALT4, 0x79c, 3, NO_PAD_CTRL)
+#define MX53_PAD_EIM_D17__CSPI1_MISO		IOMUX_PAD(0x464, 0x11C,IOMUX_CONFIG_ALT4, 0x7a0, 3, NO_PAD_CTRL)
+#define MX53_PAD_EIM_D18__CSPI1_MOSI		IOMUX_PAD(0x468, 0x120,IOMUX_CONFIG_ALT4, 0x7a4, 3, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D19__GPIO_3_19		IOMUX_PAD(0x46C, 0x124,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D20__GPIO_3_20		IOMUX_PAD(0x470, 0x128,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
 #define MX53_PAD_EIM_D21__GPIO_3_21		IOMUX_PAD(0x474, 0x12C,IOMUX_CONFIG_ALT1, 0x0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/iomux-v3.h b/arch/arm/plat-mxc/include/mach/iomux-v3.h
index 2277b01..82620af 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-v3.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-v3.h
@@ -105,6 +105,7 @@
 #define PAD_CTL_SRE_FAST		(1 << 0)
 #define PAD_CTL_SRE_SLOW		(0 << 0)
 
+#define IOMUX_CONFIG_SION		(0x1 << 4)
 
 #define MX51_NUM_GPIO_PORT	4
 
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index 58a49cc..ba65c92 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -70,7 +70,7 @@
 
 /* all normal IRQs can be FIQs */
 #define FIQ_START	0
-/* switch betwean IRQ and FIQ */
+/* switch between IRQ and FIQ */
 extern int mxc_set_irq_fiq(unsigned int irq, unsigned int type);
 
 #endif /* __ASM_ARCH_MXC_IRQS_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/mx51.h b/arch/arm/plat-mxc/include/mach/mx51.h
index 873807f..1eb339e 100644
--- a/arch/arm/plat-mxc/include/mach/mx51.h
+++ b/arch/arm/plat-mxc/include/mach/mx51.h
@@ -301,8 +301,8 @@
 #define MX51_MXC_INT_GPIO4_HIGH		57
 #define MX51_MXC_INT_WDOG1		58
 #define MX51_MXC_INT_WDOG2		59
-#define MX51_MXC_INT_KPP		60
-#define MX51_MXC_INT_PWM1		61
+#define MX51_INT_KPP			60
+#define MX51_INT_PWM1			61
 #define MX51_INT_I2C1			62
 #define MX51_INT_I2C2			63
 #define MX51_MXC_INT_HS_I2C		64
@@ -335,7 +335,7 @@
 #define MX51_MXC_INT_SPDIF		91
 #define MX51_MXC_INT_TVE		92
 #define MX51_MXC_INT_FIRI		93
-#define MX51_MXC_INT_PWM2		94
+#define MX51_INT_PWM2			94
 #define MX51_MXC_INT_SLIM_EXP		95
 #define MX51_INT_SSI3			96
 #define MX51_MXC_INT_EMI_BOOT		97
diff --git a/arch/arm/plat-mxc/include/mach/mx53.h b/arch/arm/plat-mxc/include/mach/mx53.h
index 9577cdb..d7a8e52 100644
--- a/arch/arm/plat-mxc/include/mach/mx53.h
+++ b/arch/arm/plat-mxc/include/mach/mx53.h
@@ -53,13 +53,13 @@
 #define MX53_SPBA0_BASE_ADDR		0x50000000
 #define MX53_SPBA0_SIZE		SZ_1M
 
-#define MX53_MMC_SDHC1_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00004000)
-#define MX53_MMC_SDHC2_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00008000)
+#define MX53_ESDHC1_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00004000)
+#define MX53_ESDHC2_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00008000)
 #define MX53_UART3_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x0000C000)
-#define MX53_CSPI1_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x00010000)
+#define MX53_ECSPI1_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x00010000)
 #define MX53_SSI2_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x00014000)
-#define MX53_MMC_SDHC3_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00020000)
-#define MX53_MMC_SDHC4_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00024000)
+#define MX53_ESDHC3_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00020000)
+#define MX53_ESDHC4_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00024000)
 #define MX53_SPDIF_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x00028000)
 #define MX53_ASRC_BASE_ADDR		(MX53_SPBA0_BASE_ADDR + 0x0002C000)
 #define MX53_ATA_DMA_BASE_ADDR	(MX53_SPBA0_BASE_ADDR + 0x00030000)
@@ -117,12 +117,12 @@
 #define MX53_ARM_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000A0000)
 #define MX53_OWIRE_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000A4000)
 #define MX53_FIRI_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000A8000)
-#define MX53_CSPI2_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000AC000)
+#define MX53_ECSPI2_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000AC000)
 #define MX53_SDMA_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000B0000)
 #define MX53_SCC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000B4000)
 #define MX53_ROMCP_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000B8000)
 #define MX53_RTIC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000BC000)
-#define MX53_CSPI3_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000C0000)
+#define MX53_CSPI_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000C0000)
 #define MX53_I2C2_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000C4000)
 #define MX53_I2C1_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000C8000)
 #define MX53_SSI1_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000CC000)
@@ -136,7 +136,7 @@
 #define MX53_MIPI_HSC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000DC000)
 #define MX53_MLB_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000E4000)
 #define MX53_SSI3_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000E8000)
-#define MX53_MXC_FEC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000EC000)
+#define MX53_FEC_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000EC000)
 #define MX53_TVE_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000F0000)
 #define MX53_VPU_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000F4000)
 #define MX53_SAHARA_BASE_ADDR	(MX53_AIPS2_BASE_ADDR + 0x000F8000)
@@ -229,10 +229,10 @@
  * Interrupt numbers
  */
 #define MX53_INT_RESV0		0
-#define MX53_INT_MMC_SDHC1	1
-#define MX53_INT_MMC_SDHC2	2
-#define MX53_INT_MMC_SDHC3	3
-#define MX53_INT_MMC_SDHC4	4
+#define MX53_INT_ESDHC1	1
+#define MX53_INT_ESDHC2	2
+#define MX53_INT_ESDHC3	3
+#define MX53_INT_ESDHC4	4
 #define MX53_INT_RESV5	5
 #define MX53_INT_SDMA	6
 #define MX53_INT_IOMUX	7
@@ -264,8 +264,8 @@
 #define MX53_INT_UART3	33
 #define MX53_INT_RESV34	34
 #define MX53_INT_RESV35	35
-#define MX53_INT_CSPI1	36
-#define MX53_INT_CSPI2	37
+#define MX53_INT_ECSPI1	36
+#define MX53_INT_ECSPI2	37
 #define MX53_INT_CSPI	38
 #define MX53_INT_GPT	39
 #define MX53_INT_EPIT1	40
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h
index 3a70ebf..ff469c4 100644
--- a/arch/arm/plat-mxc/include/mach/uncompress.h
+++ b/arch/arm/plat-mxc/include/mach/uncompress.h
@@ -95,6 +95,7 @@
 	case MACH_TYPE_MX35_3DS:
 	case MACH_TYPE_PCM043:
 	case MACH_TYPE_LILLY1131:
+	case MACH_TYPE_VPR200:
 		uart_base = MX3X_UART1_BASE_ADDR;
 		break;
 	case MACH_TYPE_MAGX_ZN5:
@@ -102,6 +103,7 @@
 		break;
 	case MACH_TYPE_MX51_BABBAGE:
 	case MACH_TYPE_EUKREA_CPUIMX51SD:
+	case MACH_TYPE_MX51_3DS:
 		uart_base = MX51_UART1_BASE_ADDR;
 		break;
 	case MACH_TYPE_MX50_RDP:
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
index c36f263..7a61ef8 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/arch/arm/plat-mxc/pwm.c
@@ -57,7 +57,7 @@
 	if (pwm == NULL || period_ns == 0 || duty_ns > period_ns)
 		return -EINVAL;
 
-	if (cpu_is_mx27() || cpu_is_mx3() || cpu_is_mx25()) {
+	if (cpu_is_mx27() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) {
 		unsigned long long c;
 		unsigned long period_cycles, duty_cycles, prescale;
 		u32 cr;
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index e69ed8a..bc3a6be 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -69,50 +69,50 @@
 #endif
 
 /**
- * tzic_mask_irq() - Disable interrupt number "irq" in the TZIC
+ * tzic_mask_irq() - Disable interrupt source "d" in the TZIC
  *
- * @param  irq          interrupt source number
+ * @param  d            interrupt source
  */
-static void tzic_mask_irq(unsigned int irq)
+static void tzic_mask_irq(struct irq_data *d)
 {
 	int index, off;
 
-	index = irq >> 5;
-	off = irq & 0x1F;
+	index = d->irq >> 5;
+	off = d->irq & 0x1F;
 	__raw_writel(1 << off, tzic_base + TZIC_ENCLEAR0(index));
 }
 
 /**
- * tzic_unmask_irq() - Enable interrupt number "irq" in the TZIC
+ * tzic_unmask_irq() - Enable interrupt source "d" in the TZIC
  *
- * @param  irq          interrupt source number
+ * @param  d            interrupt source
  */
-static void tzic_unmask_irq(unsigned int irq)
+static void tzic_unmask_irq(struct irq_data *d)
 {
 	int index, off;
 
-	index = irq >> 5;
-	off = irq & 0x1F;
+	index = d->irq >> 5;
+	off = d->irq & 0x1F;
 	__raw_writel(1 << off, tzic_base + TZIC_ENSET0(index));
 }
 
 static unsigned int wakeup_intr[4];
 
 /**
- * tzic_set_wake_irq() - Set interrupt number "irq" in the TZIC as a wake-up source.
+ * tzic_set_wake_irq() - Set interrupt source "d" in the TZIC as a wake-up source.
  *
- * @param  irq          interrupt source number
+ * @param  d            interrupt source
  * @param  enable       enable as wake-up if equal to non-zero
  * 			disble as wake-up if equal to zero
  *
  * @return       This function returns 0 on success.
  */
-static int tzic_set_wake_irq(unsigned int irq, unsigned int enable)
+static int tzic_set_wake_irq(struct irq_data *d, unsigned int enable)
 {
 	unsigned int index, off;
 
-	index = irq >> 5;
-	off = irq & 0x1F;
+	index = d->irq >> 5;
+	off = d->irq & 0x1F;
 
 	if (index > 3)
 		return -EINVAL;
@@ -128,10 +128,10 @@
 static struct mxc_irq_chip mxc_tzic_chip = {
 	.base = {
 		.name = "MXC_TZIC",
-		.ack = tzic_mask_irq,
-		.mask = tzic_mask_irq,
-		.unmask = tzic_unmask_irq,
-		.set_wake = tzic_set_wake_irq,
+		.irq_ack = tzic_mask_irq,
+		.irq_mask = tzic_mask_irq,
+		.irq_unmask = tzic_unmask_irq,
+		.irq_set_wake = tzic_set_wake_irq,
 	},
 #ifdef CONFIG_FIQ
 	.set_irq_fiq = tzic_set_irq_fiq,
diff --git a/arch/arm/plat-nomadik/gpio.c b/arch/arm/plat-nomadik/gpio.c
index eda4e3a..1e88ecb 100644
--- a/arch/arm/plat-nomadik/gpio.c
+++ b/arch/arm/plat-nomadik/gpio.c
@@ -356,13 +356,13 @@
 	return 1 << (gpio % 32);
 }
 
-static void nmk_gpio_irq_ack(unsigned int irq)
+static void nmk_gpio_irq_ack(struct irq_data *d)
 {
 	int gpio;
 	struct nmk_gpio_chip *nmk_chip;
 
-	gpio = NOMADIK_IRQ_TO_GPIO(irq);
-	nmk_chip = get_irq_chip_data(irq);
+	gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+	nmk_chip = irq_data_get_irq_chip_data(d);
 	if (!nmk_chip)
 		return;
 	writel(nmk_gpio_get_bitmask(gpio), nmk_chip->addr + NMK_GPIO_IC);
@@ -401,7 +401,7 @@
 	}
 }
 
-static int nmk_gpio_irq_modify(unsigned int irq, enum nmk_gpio_irq_type which,
+static int nmk_gpio_irq_modify(struct irq_data *d, enum nmk_gpio_irq_type which,
 			       bool enable)
 {
 	int gpio;
@@ -409,8 +409,8 @@
 	unsigned long flags;
 	u32 bitmask;
 
-	gpio = NOMADIK_IRQ_TO_GPIO(irq);
-	nmk_chip = get_irq_chip_data(irq);
+	gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+	nmk_chip = irq_data_get_irq_chip_data(d);
 	bitmask = nmk_gpio_get_bitmask(gpio);
 	if (!nmk_chip)
 		return -EINVAL;
@@ -422,24 +422,24 @@
 	return 0;
 }
 
-static void nmk_gpio_irq_mask(unsigned int irq)
+static void nmk_gpio_irq_mask(struct irq_data *d)
 {
-	nmk_gpio_irq_modify(irq, NORMAL, false);
+	nmk_gpio_irq_modify(d, NORMAL, false);
 }
 
-static void nmk_gpio_irq_unmask(unsigned int irq)
+static void nmk_gpio_irq_unmask(struct irq_data *d)
 {
-	nmk_gpio_irq_modify(irq, NORMAL, true);
+	nmk_gpio_irq_modify(d, NORMAL, true);
 }
 
-static int nmk_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
 	struct nmk_gpio_chip *nmk_chip;
 	unsigned long flags;
 	int gpio;
 
-	gpio = NOMADIK_IRQ_TO_GPIO(irq);
-	nmk_chip = get_irq_chip_data(irq);
+	gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+	nmk_chip = irq_data_get_irq_chip_data(d);
 	if (!nmk_chip)
 		return -EINVAL;
 
@@ -457,9 +457,9 @@
 	return 0;
 }
 
-static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct irq_desc *desc = irq_to_desc(irq);
+	struct irq_desc *desc = irq_to_desc(d->irq);
 	bool enabled = !(desc->status & IRQ_DISABLED);
 	bool wake = desc->wake_depth;
 	int gpio;
@@ -467,8 +467,8 @@
 	unsigned long flags;
 	u32 bitmask;
 
-	gpio = NOMADIK_IRQ_TO_GPIO(irq);
-	nmk_chip = get_irq_chip_data(irq);
+	gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+	nmk_chip = irq_data_get_irq_chip_data(d);
 	bitmask = nmk_gpio_get_bitmask(gpio);
 	if (!nmk_chip)
 		return -EINVAL;
@@ -507,11 +507,11 @@
 
 static struct irq_chip nmk_gpio_irq_chip = {
 	.name		= "Nomadik-GPIO",
-	.ack		= nmk_gpio_irq_ack,
-	.mask		= nmk_gpio_irq_mask,
-	.unmask		= nmk_gpio_irq_unmask,
-	.set_type	= nmk_gpio_irq_set_type,
-	.set_wake	= nmk_gpio_irq_set_wake,
+	.irq_ack	= nmk_gpio_irq_ack,
+	.irq_mask	= nmk_gpio_irq_mask,
+	.irq_unmask	= nmk_gpio_irq_unmask,
+	.irq_set_type	= nmk_gpio_irq_set_type,
+	.irq_set_wake	= nmk_gpio_irq_set_wake,
 };
 
 static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -522,12 +522,12 @@
 	u32 pending;
 	unsigned int first_irq;
 
-	if (host_chip->mask_ack)
-		host_chip->mask_ack(irq);
+	if (host_chip->irq_mask_ack)
+		host_chip->irq_mask_ack(&desc->irq_data);
 	else {
-		host_chip->mask(irq);
-		if (host_chip->ack)
-			host_chip->ack(irq);
+		host_chip->irq_mask(&desc->irq_data);
+		if (host_chip->irq_ack)
+			host_chip->irq_ack(&desc->irq_data);
 	}
 
 	nmk_chip = get_irq_data(irq);
@@ -537,7 +537,7 @@
 		generic_handle_irq(gpio_irq);
 	}
 
-	host_chip->unmask(irq);
+	host_chip->irq_unmask(&desc->irq_data);
 }
 
 static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index 74b62f1..4d6dd4c 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -13,6 +13,14 @@
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
 
+/*
+ * Maxium size for a single dma descriptor
+ * Size is limited to 16 bits.
+ * Size is in the units of addr-widths (1,2,4,8 bytes)
+ * Larger transfers will be split up to multiple linked desc
+ */
+#define STEDMA40_MAX_SEG_SIZE 0xFFFF
+
 /* dev types for memcpy */
 #define STEDMA40_DEV_DST_MEMORY (-1)
 #define	STEDMA40_DEV_SRC_MEMORY (-1)
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 18fe3cb..b6333ae 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -144,12 +144,9 @@
 config OMAP_IOMMU_IVA2
 	bool
 
-choice
-	prompt "System timer"
-	default OMAP_32K_TIMER if !ARCH_OMAP15XX
-
 config OMAP_MPU_TIMER
 	bool "Use mpu timer"
+	depends on ARCH_OMAP1
 	help
 	  Select this option if you want to use the OMAP mpu timer. This
 	  timer provides more intra-tick resolution than the 32KHz timer,
@@ -158,6 +155,7 @@
 config OMAP_32K_TIMER
 	bool "Use 32KHz timer"
 	depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+	default y if (ARCH_OMAP16XX || ARCH_OMAP2PLUS)
 	help
 	  Select this option if you want to enable the OMAP 32KHz timer.
 	  This timer saves power compared to the OMAP_MPU_TIMER, and has
@@ -165,8 +163,6 @@
 	  intra-tick resolution than OMAP_MPU_TIMER. The 32KHz timer is
 	  currently only available for OMAP16XX, 24XX, 34XX and OMAP4.
 
-endchoice
-
 config OMAP3_L2_AUX_SECURE_SAVE_RESTORE
 	bool "OMAP3 HS/EMU save and restore for L2 AUX control register"
 	depends on ARCH_OMAP3 && PM
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index ea46440..862dda9 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -36,8 +36,6 @@
 
 #define OMAP16XX_TIMER_32K_SYNCHRONIZED		0xfffbc410
 
-#if !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX))
-
 #include <linux/clocksource.h>
 
 /*
@@ -122,12 +120,24 @@
 #define SC_MULT		4000000000u
 #define SC_SHIFT	17
 
-unsigned long long notrace sched_clock(void)
+static inline unsigned long long notrace _omap_32k_sched_clock(void)
 {
 	u32 cyc = clocksource_32k.read(&clocksource_32k);
 	return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
 }
 
+#ifndef CONFIG_OMAP_MPU_TIMER
+unsigned long long notrace sched_clock(void)
+{
+	return _omap_32k_sched_clock();
+}
+#else
+unsigned long long notrace omap_32k_sched_clock(void)
+{
+	return _omap_32k_sched_clock();
+}
+#endif
+
 static void notrace omap_update_sched_clock(void)
 {
 	u32 cyc = clocksource_32k.read(&clocksource_32k);
@@ -160,7 +170,7 @@
 	*ts = *tsp;
 }
 
-static int __init omap_init_clocksource_32k(void)
+int __init omap_init_clocksource_32k(void)
 {
 	static char err[] __initdata = KERN_ERR
 			"%s: can't register clocksource!\n";
@@ -195,7 +205,3 @@
 	}
 	return 0;
 }
-arch_initcall(omap_init_clocksource_32k);
-
-#endif	/* !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) */
-
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index c4b2b47..8536308 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -53,7 +53,7 @@
 #endif
 
 #define OMAP_DMA_ACTIVE			0x01
-#define OMAP2_DMA_CSR_CLEAR_MASK	0xffe
+#define OMAP2_DMA_CSR_CLEAR_MASK	0xffffffff
 
 #define OMAP_FUNC_MUX_ARM_BASE		(0xfffe1000 + 0xec)
 
@@ -1873,7 +1873,7 @@
 		printk(KERN_INFO "DMA misaligned error with device %d\n",
 		       dma_chan[ch].dev_id);
 
-	p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
+	p->dma_write(status, CSR, ch);
 	p->dma_write(1 << ch, IRQSTATUS_L0, ch);
 	/* read back the register to flush the write */
 	p->dma_read(IRQSTATUS_L0, ch);
@@ -1893,10 +1893,9 @@
 			OMAP_DMA_CHAIN_INCQHEAD(chain_id);
 
 		status = p->dma_read(CSR, ch);
+		p->dma_write(status, CSR, ch);
 	}
 
-	p->dma_write(status, CSR, ch);
-
 	if (likely(dma_chan[ch].callback != NULL))
 		dma_chan[ch].callback(ch, status, dma_chan[ch].data);
 
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 1f98e0b..971d186 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -718,7 +718,7 @@
 	case METHOD_GPIO_24XX:
 	case METHOD_GPIO_44XX:
 		set_24xx_gpio_triggering(bank, gpio, trigger);
-		break;
+		return 0;
 #endif
 	default:
 		goto bad;
@@ -729,17 +729,17 @@
 	return -EINVAL;
 }
 
-static int gpio_irq_type(unsigned irq, unsigned type)
+static int gpio_irq_type(struct irq_data *d, unsigned type)
 {
 	struct gpio_bank *bank;
 	unsigned gpio;
 	int retval;
 	unsigned long flags;
 
-	if (!cpu_class_is_omap2() && irq > IH_MPUIO_BASE)
-		gpio = OMAP_MPUIO(irq - IH_MPUIO_BASE);
+	if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
+		gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
 	else
-		gpio = irq - IH_GPIO_BASE;
+		gpio = d->irq - IH_GPIO_BASE;
 
 	if (check_gpio(gpio) < 0)
 		return -EINVAL;
@@ -752,19 +752,21 @@
 			&& (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
 		return -EINVAL;
 
-	bank = get_irq_chip_data(irq);
+	bank = irq_data_get_irq_chip_data(d);
 	spin_lock_irqsave(&bank->lock, flags);
 	retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
 	if (retval == 0) {
-		irq_desc[irq].status &= ~IRQ_TYPE_SENSE_MASK;
-		irq_desc[irq].status |= type;
+		struct irq_desc *desc = irq_to_desc(d->irq);
+
+		desc->status &= ~IRQ_TYPE_SENSE_MASK;
+		desc->status |= type;
 	}
 	spin_unlock_irqrestore(&bank->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__set_irq_handler_unlocked(irq, handle_level_irq);
+		__set_irq_handler_unlocked(d->irq, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__set_irq_handler_unlocked(irq, handle_edge_irq);
+		__set_irq_handler_unlocked(d->irq, handle_edge_irq);
 
 	return retval;
 }
@@ -1021,15 +1023,15 @@
 }
 
 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
-static int gpio_wake_enable(unsigned int irq, unsigned int enable)
+static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
 	struct gpio_bank *bank;
 	int retval;
 
 	if (check_gpio(gpio) < 0)
 		return -ENODEV;
-	bank = get_irq_chip_data(irq);
+	bank = irq_data_get_irq_chip_data(d);
 	retval = _set_gpio_wakeup(bank, get_gpio_index(gpio), enable);
 
 	return retval;
@@ -1142,7 +1144,7 @@
 	u32 retrigger = 0;
 	int unmasked = 0;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
 	bank = get_irq_data(irq);
 #ifdef CONFIG_ARCH_OMAP1
@@ -1199,7 +1201,7 @@
 		configured, we could unmask GPIO bank interrupt immediately */
 		if (!level_mask && !unmasked) {
 			unmasked = 1;
-			desc->chip->unmask(irq);
+			desc->irq_data.chip->irq_unmask(&desc->irq_data);
 		}
 
 		isr |= retrigger;
@@ -1235,41 +1237,40 @@
 	interrupt */
 exit:
 	if (!unmasked)
-		desc->chip->unmask(irq);
-
+		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
-static void gpio_irq_shutdown(unsigned int irq)
+static void gpio_irq_shutdown(struct irq_data *d)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_reset_gpio(bank, gpio);
 }
 
-static void gpio_ack_irq(unsigned int irq)
+static void gpio_ack_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_clear_gpio_irqstatus(bank, gpio);
 }
 
-static void gpio_mask_irq(unsigned int irq)
+static void gpio_mask_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_set_gpio_irqenable(bank, gpio, 0);
 	_set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
 }
 
-static void gpio_unmask_irq(unsigned int irq)
+static void gpio_unmask_irq(struct irq_data *d)
 {
-	unsigned int gpio = irq - IH_GPIO_BASE;
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = d->irq - IH_GPIO_BASE;
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 	unsigned int irq_mask = 1 << get_gpio_index(gpio);
-	struct irq_desc *desc = irq_to_desc(irq);
+	struct irq_desc *desc = irq_to_desc(d->irq);
 	u32 trigger = desc->status & IRQ_TYPE_SENSE_MASK;
 
 	if (trigger)
@@ -1287,12 +1288,12 @@
 
 static struct irq_chip gpio_irq_chip = {
 	.name		= "GPIO",
-	.shutdown	= gpio_irq_shutdown,
-	.ack		= gpio_ack_irq,
-	.mask		= gpio_mask_irq,
-	.unmask		= gpio_unmask_irq,
-	.set_type	= gpio_irq_type,
-	.set_wake	= gpio_wake_enable,
+	.irq_shutdown	= gpio_irq_shutdown,
+	.irq_ack	= gpio_ack_irq,
+	.irq_mask	= gpio_mask_irq,
+	.irq_unmask	= gpio_unmask_irq,
+	.irq_set_type	= gpio_irq_type,
+	.irq_set_wake	= gpio_wake_enable,
 };
 
 /*---------------------------------------------------------------------*/
@@ -1301,36 +1302,36 @@
 
 /* MPUIO uses the always-on 32k clock */
 
-static void mpuio_ack_irq(unsigned int irq)
+static void mpuio_ack_irq(struct irq_data *d)
 {
 	/* The ISR is reset automatically, so do nothing here. */
 }
 
-static void mpuio_mask_irq(unsigned int irq)
+static void mpuio_mask_irq(struct irq_data *d)
 {
-	unsigned int gpio = OMAP_MPUIO(irq - IH_MPUIO_BASE);
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_set_gpio_irqenable(bank, gpio, 0);
 }
 
-static void mpuio_unmask_irq(unsigned int irq)
+static void mpuio_unmask_irq(struct irq_data *d)
 {
-	unsigned int gpio = OMAP_MPUIO(irq - IH_MPUIO_BASE);
-	struct gpio_bank *bank = get_irq_chip_data(irq);
+	unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 
 	_set_gpio_irqenable(bank, gpio, 1);
 }
 
 static struct irq_chip mpuio_irq_chip = {
 	.name		= "MPUIO",
-	.ack		= mpuio_ack_irq,
-	.mask		= mpuio_mask_irq,
-	.unmask		= mpuio_unmask_irq,
-	.set_type	= gpio_irq_type,
+	.irq_ack	= mpuio_ack_irq,
+	.irq_mask	= mpuio_mask_irq,
+	.irq_unmask	= mpuio_unmask_irq,
+	.irq_set_type	= gpio_irq_type,
 #ifdef CONFIG_ARCH_OMAP16XX
 	/* REVISIT: assuming only 16xx supports MPUIO wake events */
-	.set_wake	= gpio_wake_enable,
+	.irq_set_wake	= gpio_wake_enable,
 #endif
 };
 
@@ -1671,7 +1672,9 @@
 
 	for (j = bank->virtual_irq_start;
 		     j < bank->virtual_irq_start + bank_width; j++) {
-		lockdep_set_class(&irq_desc[j].lock, &gpio_lock_class);
+		struct irq_desc *d = irq_to_desc(j);
+
+		lockdep_set_class(&d->lock, &gpio_lock_class);
 		set_irq_chip_data(j, bank);
 		if (bank_is_mpuio(bank))
 			set_irq_chip(j, &mpuio_irq_chip);
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 6b8088e..29b2afb 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -35,6 +35,9 @@
 
 extern void omap_map_common_io(void);
 extern struct sys_timer omap_timer;
+extern bool omap_32k_timer_init(void);
+extern int __init omap_init_clocksource_32k(void);
+extern unsigned long long notrace omap_32k_sched_clock(void);
 
 extern void omap_reserve(void);
 
diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h
index c915a66..537f4e4 100644
--- a/arch/arm/plat-omap/include/plat/display.h
+++ b/arch/arm/plat-omap/include/plat/display.h
@@ -42,6 +42,10 @@
 #define DISPC_IRQ_SYNC_LOST		(1 << 14)
 #define DISPC_IRQ_SYNC_LOST_DIGIT	(1 << 15)
 #define DISPC_IRQ_WAKEUP		(1 << 16)
+#define DISPC_IRQ_SYNC_LOST2		(1 << 17)
+#define DISPC_IRQ_VSYNC2		(1 << 18)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT2	(1 << 21)
+#define DISPC_IRQ_FRAMEDONE2		(1 << 22)
 
 struct omap_dss_device;
 struct omap_overlay_manager;
@@ -64,6 +68,7 @@
 enum omap_channel {
 	OMAP_DSS_CHANNEL_LCD	= 0,
 	OMAP_DSS_CHANNEL_DIGIT	= 1,
+	OMAP_DSS_CHANNEL_LCD2	= 2,
 };
 
 enum omap_color_mode {
@@ -142,6 +147,7 @@
 enum omap_dss_overlay_managers {
 	OMAP_DSS_OVL_MGR_LCD,
 	OMAP_DSS_OVL_MGR_TV,
+	OMAP_DSS_OVL_MGR_LCD2,
 };
 
 enum omap_dss_rotation_type {
@@ -268,6 +274,7 @@
 	u16 out_width;	/* if 0, out_width == width */
 	u16 out_height;	/* if 0, out_height == height */
 	u8 global_alpha;
+	u8 pre_mult_alpha;
 };
 
 struct omap_overlay {
@@ -351,6 +358,8 @@
 
 	enum omap_display_type type;
 
+	enum omap_channel channel;
+
 	union {
 		struct {
 			u8 data_lines;
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 6864a99..1eee85a 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -351,7 +351,7 @@
 /**
  * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
  * @clkctrl_reg: PRCM address of the clock control register
- * @rstctrl_reg: adress of the XXX_RSTCTRL register located in the PRM
+ * @rstctrl_reg: address of the XXX_RSTCTRL register located in the PRM
  * @submodule_wkdep_bit: bit shift of the WKDEP range
  */
 struct omap_hwmod_omap4_prcm {
diff --git a/arch/arm/plat-omap/include/plat/onenand.h b/arch/arm/plat-omap/include/plat/onenand.h
index 72f433d..affe87e 100644
--- a/arch/arm/plat-omap/include/plat/onenand.h
+++ b/arch/arm/plat-omap/include/plat/onenand.h
@@ -23,6 +23,7 @@
 	int                     (*onenand_setup)(void __iomem *, int freq);
 	int			dma_channel;
 	u8			flags;
+	u8			regulator_can_sleep;
 };
 
 #define ONENAND_MAX_PARTITIONS 8
diff --git a/arch/arm/plat-omap/include/plat/panel-generic-dpi.h b/arch/arm/plat-omap/include/plat/panel-generic-dpi.h
new file mode 100644
index 0000000..7906197
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/panel-generic-dpi.h
@@ -0,0 +1,37 @@
+/*
+ * Header for generic DPI panel driver
+ *
+ * Copyright (C) 2010 Canonical Ltd.
+ * Author: Bryan Wu <bryan.wu@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H
+#define __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H
+
+#include "display.h"
+
+/**
+ * struct panel_generic_dpi_data - panel driver configuration data
+ * @name: panel name
+ * @platform_enable: platform specific panel enable function
+ * @platform_disable: platform specific panel disable function
+ */
+struct panel_generic_dpi_data {
+	const char *name;
+	int (*platform_enable)(struct omap_dss_device *dssdev);
+	void (*platform_disable)(struct omap_dss_device *dssdev);
+};
+
+#endif /* __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H */
diff --git a/arch/arm/plat-omap/include/plat/voltage.h b/arch/arm/plat-omap/include/plat/voltage.h
index 0ff1233..5bd204e 100644
--- a/arch/arm/plat-omap/include/plat/voltage.h
+++ b/arch/arm/plat-omap/include/plat/voltage.h
@@ -14,6 +14,8 @@
 #ifndef __ARCH_ARM_MACH_OMAP2_VOLTAGE_H
 #define __ARCH_ARM_MACH_OMAP2_VOLTAGE_H
 
+#include <linux/err.h>
+
 #define VOLTSCALE_VPFORCEUPDATE		1
 #define VOLTSCALE_VCBYPASS		2
 
@@ -65,9 +67,6 @@
 	char *name;
 };
 
-/* API to get the voltagedomain pointer */
-struct voltagedomain *omap_voltage_domain_lookup(char *name);
-
 /**
  * struct omap_volt_data - Omap voltage specific data.
  * @voltage_nominal:	The possible voltage value in uV
@@ -131,16 +130,26 @@
 		struct omap_volt_pmic_info *pmic_info);
 void omap_change_voltscale_method(struct voltagedomain *voltdm,
 		int voltscale_method);
+/* API to get the voltagedomain pointer */
+struct voltagedomain *omap_voltage_domain_lookup(char *name);
+
 int omap_voltage_late_init(void);
 #else
 static inline int omap_voltage_register_pmic(struct voltagedomain *voltdm,
-		struct omap_volt_pmic_info *pmic_info) {}
+		struct omap_volt_pmic_info *pmic_info)
+{
+	return -EINVAL;
+}
 static inline  void omap_change_voltscale_method(struct voltagedomain *voltdm,
 		int voltscale_method) {}
 static inline int omap_voltage_late_init(void)
 {
 	return -EINVAL;
 }
+static inline struct voltagedomain *omap_voltage_domain_lookup(char *name)
+{
+	return ERR_PTR(-EINVAL);
+}
 #endif
 
 #endif
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 459b319..49d3208 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -322,15 +322,18 @@
 
 struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
 {
-	struct omap_mbox *mbox;
-	int ret;
+	struct omap_mbox *_mbox, *mbox = NULL;
+	int i, ret;
 
 	if (!mboxes)
 		return ERR_PTR(-EINVAL);
 
-	for (mbox = *mboxes; mbox; mbox++)
-		if (!strcmp(mbox->name, name))
+	for (i = 0; (_mbox = mboxes[i]); i++) {
+		if (!strcmp(_mbox->name, name)) {
+			mbox = _mbox;
 			break;
+		}
+	}
 
 	if (!mbox)
 		return ERR_PTR(-ENOENT);
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index e814803..5f35223 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -232,20 +232,19 @@
  *        polarity    LEVEL          mask
  *
  ****************************************************************************/
-
-static void gpio_irq_ack(u32 irq)
+static void gpio_irq_ack(struct irq_data *d)
 {
-	int type = irq_desc[irq].status & IRQ_TYPE_SENSE_MASK;
+	int type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK;
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
-		int pin = irq_to_gpio(irq);
+		int pin = irq_to_gpio(d->irq);
 		writel(~(1 << (pin & 31)), GPIO_EDGE_CAUSE(pin));
 	}
 }
 
-static void gpio_irq_mask(u32 irq)
+static void gpio_irq_mask(struct irq_data *d)
 {
-	int pin = irq_to_gpio(irq);
-	int type = irq_desc[irq].status & IRQ_TYPE_SENSE_MASK;
+	int pin = irq_to_gpio(d->irq);
+	int type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK;
 	u32 reg = (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) ?
 		GPIO_EDGE_MASK(pin) : GPIO_LEVEL_MASK(pin);
 	u32 u = readl(reg);
@@ -253,10 +252,10 @@
 	writel(u, reg);
 }
 
-static void gpio_irq_unmask(u32 irq)
+static void gpio_irq_unmask(struct irq_data *d)
 {
-	int pin = irq_to_gpio(irq);
-	int type = irq_desc[irq].status & IRQ_TYPE_SENSE_MASK;
+	int pin = irq_to_gpio(d->irq);
+	int type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK;
 	u32 reg = (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) ?
 		GPIO_EDGE_MASK(pin) : GPIO_LEVEL_MASK(pin);
 	u32 u = readl(reg);
@@ -264,20 +263,20 @@
 	writel(u, reg);
 }
 
-static int gpio_irq_set_type(u32 irq, u32 type)
+static int gpio_irq_set_type(struct irq_data *d, u32 type)
 {
-	int pin = irq_to_gpio(irq);
+	int pin = irq_to_gpio(d->irq);
 	struct irq_desc *desc;
 	u32 u;
 
 	u = readl(GPIO_IO_CONF(pin)) & (1 << (pin & 31));
 	if (!u) {
 		printk(KERN_ERR "orion gpio_irq_set_type failed "
-				"(irq %d, pin %d).\n", irq, pin);
+				"(irq %d, pin %d).\n", d->irq, pin);
 		return -EINVAL;
 	}
 
-	desc = irq_desc + irq;
+	desc = irq_desc + d->irq;
 
 	/*
 	 * Set edge/level type.
@@ -287,7 +286,7 @@
 	} else if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
 		desc->handle_irq = handle_level_irq;
 	} else {
-		printk(KERN_ERR "failed to set irq=%d (type=%d)\n", irq, type);
+		printk(KERN_ERR "failed to set irq=%d (type=%d)\n", d->irq, type);
 		return -EINVAL;
 	}
 
@@ -325,10 +324,10 @@
 
 struct irq_chip orion_gpio_irq_chip = {
 	.name		= "orion_gpio_irq",
-	.ack		= gpio_irq_ack,
-	.mask		= gpio_irq_mask,
-	.unmask		= gpio_irq_unmask,
-	.set_type	= gpio_irq_set_type,
+	.irq_ack	= gpio_irq_ack,
+	.irq_mask	= gpio_irq_mask,
+	.irq_unmask	= gpio_irq_unmask,
+	.irq_set_type	= gpio_irq_set_type,
 };
 
 void orion_gpio_irq_handler(int pinoff)
diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c
index 3f9d34f..7d0c7eb 100644
--- a/arch/arm/plat-orion/irq.c
+++ b/arch/arm/plat-orion/irq.c
@@ -14,31 +14,31 @@
 #include <linux/io.h>
 #include <plat/irq.h>
 
-static void orion_irq_mask(u32 irq)
+static void orion_irq_mask(struct irq_data *d)
 {
-	void __iomem *maskaddr = get_irq_chip_data(irq);
+	void __iomem *maskaddr = irq_data_get_irq_chip_data(d);
 	u32 mask;
 
 	mask = readl(maskaddr);
-	mask &= ~(1 << (irq & 31));
+	mask &= ~(1 << (d->irq & 31));
 	writel(mask, maskaddr);
 }
 
-static void orion_irq_unmask(u32 irq)
+static void orion_irq_unmask(struct irq_data *d)
 {
-	void __iomem *maskaddr = get_irq_chip_data(irq);
+	void __iomem *maskaddr = irq_data_get_irq_chip_data(d);
 	u32 mask;
 
 	mask = readl(maskaddr);
-	mask |= 1 << (irq & 31);
+	mask |= 1 << (d->irq & 31);
 	writel(mask, maskaddr);
 }
 
 static struct irq_chip orion_irq_chip = {
 	.name		= "orion_irq",
-	.mask		= orion_irq_mask,
-	.mask_ack	= orion_irq_mask,
-	.unmask		= orion_irq_unmask,
+	.irq_mask	= orion_irq_mask,
+	.irq_mask_ack	= orion_irq_mask,
+	.irq_unmask	= orion_irq_unmask,
 };
 
 void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c
index 98548c6..e7de6ae 100644
--- a/arch/arm/plat-pxa/gpio.c
+++ b/arch/arm/plat-pxa/gpio.c
@@ -155,10 +155,10 @@
 	__raw_writel(gfer, c->regbase + GFER_OFFSET);
 }
 
-static int pxa_gpio_irq_type(unsigned int irq, unsigned int type)
+static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
 {
 	struct pxa_gpio_chip *c;
-	int gpio = irq_to_gpio(irq);
+	int gpio = irq_to_gpio(d->irq);
 	unsigned long gpdr, mask = GPIO_bit(gpio);
 
 	c = gpio_to_chip(gpio);
@@ -195,7 +195,7 @@
 
 	update_edge_detect(c);
 
-	pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, irq, gpio,
+	pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
 		((type & IRQ_TYPE_EDGE_RISING)  ? " rising"  : ""),
 		((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
 	return 0;
@@ -227,17 +227,17 @@
 	} while (loop);
 }
 
-static void pxa_ack_muxed_gpio(unsigned int irq)
+static void pxa_ack_muxed_gpio(struct irq_data *d)
 {
-	int gpio = irq_to_gpio(irq);
+	int gpio = irq_to_gpio(d->irq);
 	struct pxa_gpio_chip *c = gpio_to_chip(gpio);
 
 	__raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
 }
 
-static void pxa_mask_muxed_gpio(unsigned int irq)
+static void pxa_mask_muxed_gpio(struct irq_data *d)
 {
-	int gpio = irq_to_gpio(irq);
+	int gpio = irq_to_gpio(d->irq);
 	struct pxa_gpio_chip *c = gpio_to_chip(gpio);
 	uint32_t grer, gfer;
 
@@ -249,9 +249,9 @@
 	__raw_writel(gfer, c->regbase + GFER_OFFSET);
 }
 
-static void pxa_unmask_muxed_gpio(unsigned int irq)
+static void pxa_unmask_muxed_gpio(struct irq_data *d)
 {
-	int gpio = irq_to_gpio(irq);
+	int gpio = irq_to_gpio(d->irq);
 	struct pxa_gpio_chip *c = gpio_to_chip(gpio);
 
 	c->irq_mask |= GPIO_bit(gpio);
@@ -260,10 +260,10 @@
 
 static struct irq_chip pxa_muxed_gpio_chip = {
 	.name		= "GPIO",
-	.ack		= pxa_ack_muxed_gpio,
-	.mask		= pxa_mask_muxed_gpio,
-	.unmask		= pxa_unmask_muxed_gpio,
-	.set_type	= pxa_gpio_irq_type,
+	.irq_ack	= pxa_ack_muxed_gpio,
+	.irq_mask	= pxa_mask_muxed_gpio,
+	.irq_unmask	= pxa_unmask_muxed_gpio,
+	.irq_set_type	= pxa_gpio_irq_type,
 };
 
 void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn)
@@ -291,7 +291,7 @@
 
 	/* Install handler for GPIO>=2 edge detect interrupts */
 	set_irq_chained_handler(mux_irq, pxa_gpio_demux_handler);
-	pxa_muxed_gpio_chip.set_wake = fn;
+	pxa_muxed_gpio_chip.irq_set_wake = fn;
 }
 
 #ifdef CONFIG_PM
diff --git a/arch/arm/plat-pxa/include/plat/gpio.h b/arch/arm/plat-pxa/include/plat/gpio.h
index 44248cb..1ddd2b9 100644
--- a/arch/arm/plat-pxa/include/plat/gpio.h
+++ b/arch/arm/plat-pxa/include/plat/gpio.h
@@ -1,6 +1,8 @@
 #ifndef __PLAT_GPIO_H
 #define __PLAT_GPIO_H
 
+struct irq_data;
+
 /*
  * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
  * one set of registers. The register offsets are organized below:
@@ -56,7 +58,7 @@
  */
 extern int pxa_last_gpio;
 
-typedef int (*set_wake_t)(unsigned int irq, unsigned int on);
+typedef int (*set_wake_t)(struct irq_data *d, unsigned int on);
 
 extern void pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn);
 #endif /* __PLAT_GPIO_H */
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c
index b77e018..a9aa5ad 100644
--- a/arch/arm/plat-pxa/mfp.c
+++ b/arch/arm/plat-pxa/mfp.c
@@ -139,10 +139,11 @@
 #define mfp_configured(p)	((p)->config != -1)
 
 /*
- * perform a read-back of any MFPR register to make sure the
+ * perform a read-back of any valid MFPR register to make sure the
  * previous writings are finished
  */
-#define mfpr_sync()	(void)__raw_readl(mfpr_mmio_base + 0)
+static unsigned long mfpr_off_readback;
+#define mfpr_sync()	(void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
 
 static inline void __mfp_config_run(struct mfp_pin *p)
 {
@@ -248,6 +249,9 @@
 
 	spin_lock_irqsave(&mfp_spin_lock, flags);
 
+	/* mfp offset for readback */
+	mfpr_off_readback = map[0].offset;
+
 	for (p = map; p->start != MFP_PIN_INVALID; p++) {
 		offset = p->offset;
 		i = p->start;
diff --git a/arch/arm/plat-s3c24xx/devs.c b/arch/arm/plat-s3c24xx/devs.c
index 2f91057..268f3ed 100644
--- a/arch/arm/plat-s3c24xx/devs.c
+++ b/arch/arm/plat-s3c24xx/devs.c
@@ -194,7 +194,6 @@
 	memcpy(&s3c2410ts_info, hard_s3c2410ts_info, sizeof(struct s3c2410_ts_mach_info));
 	s3c_device_ts.dev.platform_data = &s3c2410ts_info;
 }
-EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
 
 /* USB Device (Gadget)*/
 
@@ -259,21 +258,6 @@
 
 EXPORT_SYMBOL(s3c_device_iis);
 
-/* ASoC PCM DMA */
-
-static u64 s3c_device_audio_dmamask = 0xffffffffUL;
-
-struct platform_device s3c_device_pcm = {
-	.name		  = "s3c24xx-pcm-audio",
-	.id		  = -1,
-	.dev              = {
-		.dma_mask = &s3c_device_audio_dmamask,
-		.coherent_dma_mask = 0xffffffffUL
-	}
-};
-
-EXPORT_SYMBOL(s3c_device_pcm);
-
 /* RTC */
 
 static struct resource s3c_rtc_resource[] = {
@@ -496,8 +480,10 @@
 	},
 };
 
+static u64 s3c_device_audio_dmamask = 0xffffffffUL;
+
 struct platform_device s3c_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s3c_ac97_resource),
 	.resource	  = s3c_ac97_resource,
diff --git a/arch/arm/plat-s3c24xx/include/plat/irq.h b/arch/arm/plat-s3c24xx/include/plat/irq.h
index 69e1be8..ec087d6 100644
--- a/arch/arm/plat-s3c24xx/include/plat/irq.h
+++ b/arch/arm/plat-s3c24xx/include/plat/irq.h
@@ -107,9 +107,9 @@
 /* exported for use in arch/arm/mach-s3c2410 */
 
 #ifdef CONFIG_PM
-extern int s3c_irq_wake(unsigned int irqno, unsigned int state);
+extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
 #else
 #define s3c_irq_wake NULL
 #endif
 
-extern int s3c_irqext_type(unsigned int irq, unsigned int type);
+extern int s3c_irqext_type(struct irq_data *d, unsigned int type);
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/plat-s3c24xx/irq-pm.c
index ea8dea3..c3624d8 100644
--- a/arch/arm/plat-s3c24xx/irq-pm.c
+++ b/arch/arm/plat-s3c24xx/irq-pm.c
@@ -15,11 +15,14 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/sysdev.h>
+#include <linux/irq.h>
 
 #include <plat/cpu.h>
 #include <plat/pm.h>
 #include <plat/irq.h>
 
+#include <asm/irq.h>
+
 /* state for IRQs over sleep */
 
 /* default is to allow for EINT0..EINT15, and IRQ_RTC as wakeup sources
@@ -30,15 +33,15 @@
 unsigned long s3c_irqwake_intallow	= 1L << (IRQ_RTC - IRQ_EINT0) | 0xfL;
 unsigned long s3c_irqwake_eintallow	= 0x0000fff0L;
 
-int s3c_irq_wake(unsigned int irqno, unsigned int state)
+int s3c_irq_wake(struct irq_data *data, unsigned int state)
 {
-	unsigned long irqbit = 1 << (irqno - IRQ_EINT0);
+	unsigned long irqbit = 1 << (data->irq - IRQ_EINT0);
 
 	if (!(s3c_irqwake_intallow & irqbit))
 		return -ENOENT;
 
 	printk(KERN_INFO "wake %s for irq %d\n",
-	       state ? "enabled" : "disabled", irqno);
+	       state ? "enabled" : "disabled", data->irq);
 
 	if (!state)
 		s3c_irqwake_intmask |= irqbit;
diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c
index ad0d44e..4434cb5 100644
--- a/arch/arm/plat-s3c24xx/irq.c
+++ b/arch/arm/plat-s3c24xx/irq.c
@@ -34,30 +34,29 @@
 #include <plat/irq.h>
 
 static void
-s3c_irq_mask(unsigned int irqno)
+s3c_irq_mask(struct irq_data *data)
 {
+	unsigned int irqno = data->irq - IRQ_EINT0;
 	unsigned long mask;
 
-	irqno -= IRQ_EINT0;
-
 	mask = __raw_readl(S3C2410_INTMSK);
 	mask |= 1UL << irqno;
 	__raw_writel(mask, S3C2410_INTMSK);
 }
 
 static inline void
-s3c_irq_ack(unsigned int irqno)
+s3c_irq_ack(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 
 	__raw_writel(bitval, S3C2410_SRCPND);
 	__raw_writel(bitval, S3C2410_INTPND);
 }
 
 static inline void
-s3c_irq_maskack(unsigned int irqno)
+s3c_irq_maskack(struct irq_data *data)
 {
-	unsigned long bitval = 1UL << (irqno - IRQ_EINT0);
+	unsigned long bitval = 1UL << (data->irq - IRQ_EINT0);
 	unsigned long mask;
 
 	mask = __raw_readl(S3C2410_INTMSK);
@@ -69,8 +68,9 @@
 
 
 static void
-s3c_irq_unmask(unsigned int irqno)
+s3c_irq_unmask(struct irq_data *data)
 {
+	unsigned int irqno = data->irq;
 	unsigned long mask;
 
 	if (irqno != IRQ_TIMER4 && irqno != IRQ_EINT8t23)
@@ -85,40 +85,39 @@
 
 struct irq_chip s3c_irq_level_chip = {
 	.name		= "s3c-level",
-	.ack		= s3c_irq_maskack,
-	.mask		= s3c_irq_mask,
-	.unmask		= s3c_irq_unmask,
-	.set_wake	= s3c_irq_wake
+	.irq_ack	= s3c_irq_maskack,
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_set_wake	= s3c_irq_wake
 };
 
 struct irq_chip s3c_irq_chip = {
 	.name		= "s3c",
-	.ack		= s3c_irq_ack,
-	.mask		= s3c_irq_mask,
-	.unmask		= s3c_irq_unmask,
-	.set_wake	= s3c_irq_wake
+	.irq_ack	= s3c_irq_ack,
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_set_wake	= s3c_irq_wake
 };
 
 static void
-s3c_irqext_mask(unsigned int irqno)
+s3c_irqext_mask(struct irq_data *data)
 {
+	unsigned int irqno = data->irq - EXTINT_OFF;
 	unsigned long mask;
 
-	irqno -= EXTINT_OFF;
-
 	mask = __raw_readl(S3C24XX_EINTMASK);
 	mask |= ( 1UL << irqno);
 	__raw_writel(mask, S3C24XX_EINTMASK);
 }
 
 static void
-s3c_irqext_ack(unsigned int irqno)
+s3c_irqext_ack(struct irq_data *data)
 {
 	unsigned long req;
 	unsigned long bit;
 	unsigned long mask;
 
-	bit = 1UL << (irqno - EXTINT_OFF);
+	bit = 1UL << (data->irq - EXTINT_OFF);
 
 	mask = __raw_readl(S3C24XX_EINTMASK);
 
@@ -129,64 +128,57 @@
 
 	/* not sure if we should be acking the parent irq... */
 
-	if (irqno <= IRQ_EINT7 ) {
+	if (data->irq <= IRQ_EINT7) {
 		if ((req & 0xf0) == 0)
-			s3c_irq_ack(IRQ_EINT4t7);
+			s3c_irq_ack(irq_get_irq_data(IRQ_EINT4t7));
 	} else {
 		if ((req >> 8) == 0)
-			s3c_irq_ack(IRQ_EINT8t23);
+			s3c_irq_ack(irq_get_irq_data(IRQ_EINT8t23));
 	}
 }
 
 static void
-s3c_irqext_unmask(unsigned int irqno)
+s3c_irqext_unmask(struct irq_data *data)
 {
+	unsigned int irqno = data->irq - EXTINT_OFF;
 	unsigned long mask;
 
-	irqno -= EXTINT_OFF;
-
 	mask = __raw_readl(S3C24XX_EINTMASK);
-	mask &= ~( 1UL << irqno);
+	mask &= ~(1UL << irqno);
 	__raw_writel(mask, S3C24XX_EINTMASK);
 }
 
 int
-s3c_irqext_type(unsigned int irq, unsigned int type)
+s3c_irqext_type(struct irq_data *data, unsigned int type)
 {
 	void __iomem *extint_reg;
 	void __iomem *gpcon_reg;
 	unsigned long gpcon_offset, extint_offset;
 	unsigned long newvalue = 0, value;
 
-	if ((irq >= IRQ_EINT0) && (irq <= IRQ_EINT3))
-	{
+	if ((data->irq >= IRQ_EINT0) && (data->irq <= IRQ_EINT3)) {
 		gpcon_reg = S3C2410_GPFCON;
 		extint_reg = S3C24XX_EXTINT0;
-		gpcon_offset = (irq - IRQ_EINT0) * 2;
-		extint_offset = (irq - IRQ_EINT0) * 4;
-	}
-	else if ((irq >= IRQ_EINT4) && (irq <= IRQ_EINT7))
-	{
+		gpcon_offset = (data->irq - IRQ_EINT0) * 2;
+		extint_offset = (data->irq - IRQ_EINT0) * 4;
+	} else if ((data->irq >= IRQ_EINT4) && (data->irq <= IRQ_EINT7)) {
 		gpcon_reg = S3C2410_GPFCON;
 		extint_reg = S3C24XX_EXTINT0;
-		gpcon_offset = (irq - (EXTINT_OFF)) * 2;
-		extint_offset = (irq - (EXTINT_OFF)) * 4;
-	}
-	else if ((irq >= IRQ_EINT8) && (irq <= IRQ_EINT15))
-	{
+		gpcon_offset = (data->irq - (EXTINT_OFF)) * 2;
+		extint_offset = (data->irq - (EXTINT_OFF)) * 4;
+	} else if ((data->irq >= IRQ_EINT8) && (data->irq <= IRQ_EINT15)) {
 		gpcon_reg = S3C2410_GPGCON;
 		extint_reg = S3C24XX_EXTINT1;
-		gpcon_offset = (irq - IRQ_EINT8) * 2;
-		extint_offset = (irq - IRQ_EINT8) * 4;
-	}
-	else if ((irq >= IRQ_EINT16) && (irq <= IRQ_EINT23))
-	{
+		gpcon_offset = (data->irq - IRQ_EINT8) * 2;
+		extint_offset = (data->irq - IRQ_EINT8) * 4;
+	} else if ((data->irq >= IRQ_EINT16) && (data->irq <= IRQ_EINT23)) {
 		gpcon_reg = S3C2410_GPGCON;
 		extint_reg = S3C24XX_EXTINT2;
-		gpcon_offset = (irq - IRQ_EINT8) * 2;
-		extint_offset = (irq - IRQ_EINT16) * 4;
-	} else
+		gpcon_offset = (data->irq - IRQ_EINT8) * 2;
+		extint_offset = (data->irq - IRQ_EINT16) * 4;
+	} else {
 		return -1;
+	}
 
 	/* Set the GPIO to external interrupt mode */
 	value = __raw_readl(gpcon_reg);
@@ -234,20 +226,20 @@
 
 static struct irq_chip s3c_irqext_chip = {
 	.name		= "s3c-ext",
-	.mask		= s3c_irqext_mask,
-	.unmask		= s3c_irqext_unmask,
-	.ack		= s3c_irqext_ack,
-	.set_type	= s3c_irqext_type,
-	.set_wake	= s3c_irqext_wake
+	.irq_mask	= s3c_irqext_mask,
+	.irq_unmask	= s3c_irqext_unmask,
+	.irq_ack	= s3c_irqext_ack,
+	.irq_set_type	= s3c_irqext_type,
+	.irq_set_wake	= s3c_irqext_wake
 };
 
 static struct irq_chip s3c_irq_eint0t4 = {
 	.name		= "s3c-ext0",
-	.ack		= s3c_irq_ack,
-	.mask		= s3c_irq_mask,
-	.unmask		= s3c_irq_unmask,
-	.set_wake	= s3c_irq_wake,
-	.set_type	= s3c_irqext_type,
+	.irq_ack	= s3c_irq_ack,
+	.irq_mask	= s3c_irq_mask,
+	.irq_unmask	= s3c_irq_unmask,
+	.irq_set_wake	= s3c_irq_wake,
+	.irq_set_type	= s3c_irqext_type,
 };
 
 /* mask values for the parent registers for each of the interrupt types */
@@ -261,109 +253,109 @@
 /* UART0 */
 
 static void
-s3c_irq_uart0_mask(unsigned int irqno)
+s3c_irq_uart0_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART0, 7);
+	s3c_irqsub_mask(data->irq, INTMSK_UART0, 7);
 }
 
 static void
-s3c_irq_uart0_unmask(unsigned int irqno)
+s3c_irq_uart0_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART0);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART0);
 }
 
 static void
-s3c_irq_uart0_ack(unsigned int irqno)
+s3c_irq_uart0_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART0, 7);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART0, 7);
 }
 
 static struct irq_chip s3c_irq_uart0 = {
 	.name		= "s3c-uart0",
-	.mask		= s3c_irq_uart0_mask,
-	.unmask		= s3c_irq_uart0_unmask,
-	.ack		= s3c_irq_uart0_ack,
+	.irq_mask	= s3c_irq_uart0_mask,
+	.irq_unmask	= s3c_irq_uart0_unmask,
+	.irq_ack	= s3c_irq_uart0_ack,
 };
 
 /* UART1 */
 
 static void
-s3c_irq_uart1_mask(unsigned int irqno)
+s3c_irq_uart1_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART1, 7 << 3);
+	s3c_irqsub_mask(data->irq, INTMSK_UART1, 7 << 3);
 }
 
 static void
-s3c_irq_uart1_unmask(unsigned int irqno)
+s3c_irq_uart1_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART1);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART1);
 }
 
 static void
-s3c_irq_uart1_ack(unsigned int irqno)
+s3c_irq_uart1_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART1, 7 << 3);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART1, 7 << 3);
 }
 
 static struct irq_chip s3c_irq_uart1 = {
 	.name		= "s3c-uart1",
-	.mask		= s3c_irq_uart1_mask,
-	.unmask		= s3c_irq_uart1_unmask,
-	.ack		= s3c_irq_uart1_ack,
+	.irq_mask	= s3c_irq_uart1_mask,
+	.irq_unmask	= s3c_irq_uart1_unmask,
+	.irq_ack	= s3c_irq_uart1_ack,
 };
 
 /* UART2 */
 
 static void
-s3c_irq_uart2_mask(unsigned int irqno)
+s3c_irq_uart2_mask(struct irq_data *data)
 {
-	s3c_irqsub_mask(irqno, INTMSK_UART2, 7 << 6);
+	s3c_irqsub_mask(data->irq, INTMSK_UART2, 7 << 6);
 }
 
 static void
-s3c_irq_uart2_unmask(unsigned int irqno)
+s3c_irq_uart2_unmask(struct irq_data *data)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_UART2);
+	s3c_irqsub_unmask(data->irq, INTMSK_UART2);
 }
 
 static void
-s3c_irq_uart2_ack(unsigned int irqno)
+s3c_irq_uart2_ack(struct irq_data *data)
 {
-	s3c_irqsub_maskack(irqno, INTMSK_UART2, 7 << 6);
+	s3c_irqsub_maskack(data->irq, INTMSK_UART2, 7 << 6);
 }
 
 static struct irq_chip s3c_irq_uart2 = {
 	.name		= "s3c-uart2",
-	.mask		= s3c_irq_uart2_mask,
-	.unmask		= s3c_irq_uart2_unmask,
-	.ack		= s3c_irq_uart2_ack,
+	.irq_mask	= s3c_irq_uart2_mask,
+	.irq_unmask	= s3c_irq_uart2_unmask,
+	.irq_ack	= s3c_irq_uart2_ack,
 };
 
 /* ADC and Touchscreen */
 
 static void
-s3c_irq_adc_mask(unsigned int irqno)
+s3c_irq_adc_mask(struct irq_data *d)
 {
-	s3c_irqsub_mask(irqno, INTMSK_ADCPARENT, 3 << 9);
+	s3c_irqsub_mask(d->irq, INTMSK_ADCPARENT, 3 << 9);
 }
 
 static void
-s3c_irq_adc_unmask(unsigned int irqno)
+s3c_irq_adc_unmask(struct irq_data *d)
 {
-	s3c_irqsub_unmask(irqno, INTMSK_ADCPARENT);
+	s3c_irqsub_unmask(d->irq, INTMSK_ADCPARENT);
 }
 
 static void
-s3c_irq_adc_ack(unsigned int irqno)
+s3c_irq_adc_ack(struct irq_data *d)
 {
-	s3c_irqsub_ack(irqno, INTMSK_ADCPARENT, 3 << 9);
+	s3c_irqsub_ack(d->irq, INTMSK_ADCPARENT, 3 << 9);
 }
 
 static struct irq_chip s3c_irq_adc = {
 	.name		= "s3c-adc",
-	.mask		= s3c_irq_adc_mask,
-	.unmask		= s3c_irq_adc_unmask,
-	.ack		= s3c_irq_adc_ack,
+	.irq_mask	= s3c_irq_adc_mask,
+	.irq_unmask	= s3c_irq_adc_unmask,
+	.irq_ack	= s3c_irq_adc_ack,
 };
 
 /* irq demux for adc */
diff --git a/arch/arm/plat-s3c24xx/s3c2443-clock.c b/arch/arm/plat-s3c24xx/s3c2443-clock.c
index 461f070..82f2d4a 100644
--- a/arch/arm/plat-s3c24xx/s3c2443-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c2443-clock.c
@@ -271,7 +271,7 @@
 		.ctrlbit	= S3C2443_HCLKCON_DMA5,
 	}, {
 		.name		= "hsmmc",
-		.id		= 0,
+		.id		= 1,
 		.parent		= &clk_h,
 		.enable		= s3c2443_clkcon_enable_h,
 		.ctrlbit	= S3C2443_HCLKCON_HSMMC,
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 65dbfa8..557f8c5 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -37,6 +37,14 @@
 	help
 	  Common code for the GPIO interrupts (other than external interrupts.)
 
+comment "System MMU"
+
+config S5P_SYSTEM_MMU
+	bool "S5P SYSTEM MMU"
+	depends on ARCH_S5PV310
+	help
+	  Say Y here if you want to enable System MMU
+
 config S5P_DEV_FIMC0
 	bool
 	help
@@ -56,3 +64,13 @@
 	bool
 	help
 	  Compile in platform device definition for OneNAND controller
+
+config S5P_DEV_CSIS0
+	bool
+	help
+	  Compile in platform device definitions for MIPI-CSIS channel 0
+
+config S5P_DEV_CSIS1
+	bool
+	help
+	  Compile in platform device definitions for MIPI-CSIS channel 1
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
index de65238..4bd5cf9 100644
--- a/arch/arm/plat-s5p/Makefile
+++ b/arch/arm/plat-s5p/Makefile
@@ -19,6 +19,7 @@
 obj-y				+= irq.o
 obj-$(CONFIG_S5P_EXT_INT)	+= irq-eint.o
 obj-$(CONFIG_S5P_GPIO_INT)	+= irq-gpioint.o
+obj-$(CONFIG_S5P_SYSTEM_MMU)	+= sysmmu.o
 obj-$(CONFIG_PM)		+= pm.o
 obj-$(CONFIG_PM)		+= irq-pm.o
 
@@ -28,3 +29,5 @@
 obj-$(CONFIG_S5P_DEV_FIMC1)	+= dev-fimc1.o
 obj-$(CONFIG_S5P_DEV_FIMC2)	+= dev-fimc2.o
 obj-$(CONFIG_S5P_DEV_ONENAND)	+= dev-onenand.o
+obj-$(CONFIG_S5P_DEV_CSIS0)	+= dev-csis0.o
+obj-$(CONFIG_S5P_DEV_CSIS1)	+= dev-csis1.o
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
index 74f7f5a..047d31c 100644
--- a/arch/arm/plat-s5p/cpu.c
+++ b/arch/arm/plat-s5p/cpu.c
@@ -108,6 +108,11 @@
 		.pfn		= __phys_to_pfn(S3C_PA_WDT),
 		.length		= SZ_4K,
 		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_SROMC,
+		.pfn		= __phys_to_pfn(S5P_PA_SROMC),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
 	},
 };
 
diff --git a/arch/arm/plat-s5p/dev-csis0.c b/arch/arm/plat-s5p/dev-csis0.c
new file mode 100644
index 0000000..dfab1c8
--- /dev/null
+++ b/arch/arm/plat-s5p/dev-csis0.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * S5P series device definition for MIPI-CSIS channel 0
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <mach/map.h>
+
+static struct resource s5p_mipi_csis0_resource[] = {
+	[0] = {
+		.start = S5P_PA_MIPI_CSIS0,
+		.end   = S5P_PA_MIPI_CSIS0 + SZ_4K - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_MIPI_CSIS0,
+		.end   = IRQ_MIPI_CSIS0,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+struct platform_device s5p_device_mipi_csis0 = {
+	.name		  = "s5p-mipi-csis",
+	.id		  = 0,
+	.num_resources	  = ARRAY_SIZE(s5p_mipi_csis0_resource),
+	.resource	  = s5p_mipi_csis0_resource,
+};
diff --git a/arch/arm/plat-s5p/dev-csis1.c b/arch/arm/plat-s5p/dev-csis1.c
new file mode 100644
index 0000000..e3053f2
--- /dev/null
+++ b/arch/arm/plat-s5p/dev-csis1.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * S5P series device definition for MIPI-CSIS channel 1
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <mach/map.h>
+
+static struct resource s5p_mipi_csis1_resource[] = {
+	[0] = {
+		.start = S5P_PA_MIPI_CSIS1,
+		.end   = S5P_PA_MIPI_CSIS1 + SZ_4K - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_MIPI_CSIS1,
+		.end   = IRQ_MIPI_CSIS1,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+struct platform_device s5p_device_mipi_csis1 = {
+	.name		  = "s5p-mipi-csis",
+	.id		  = 1,
+	.num_resources	  = ARRAY_SIZE(s5p_mipi_csis1_resource),
+	.resource	  = s5p_mipi_csis1_resource,
+};
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c
index 6a73428..afaf87f 100644
--- a/arch/arm/plat-s5p/dev-uart.c
+++ b/arch/arm/plat-s5p/dev-uart.c
@@ -28,7 +28,7 @@
 static struct resource s5p_uart0_resource[] = {
 	[0] = {
 		.start	= S5P_PA_UART0,
-		.end	= S5P_PA_UART0 + S5P_SZ_UART,
+		.end	= S5P_PA_UART0 + S5P_SZ_UART - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -51,7 +51,7 @@
 static struct resource s5p_uart1_resource[] = {
 	[0] = {
 		.start	= S5P_PA_UART1,
-		.end	= S5P_PA_UART1 + S5P_SZ_UART,
+		.end	= S5P_PA_UART1 + S5P_SZ_UART - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -74,7 +74,7 @@
 static struct resource s5p_uart2_resource[] = {
 	[0] = {
 		.start	= S5P_PA_UART2,
-		.end	= S5P_PA_UART2 + S5P_SZ_UART,
+		.end	= S5P_PA_UART2 + S5P_SZ_UART - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -98,7 +98,7 @@
 #if CONFIG_SERIAL_SAMSUNG_UARTS > 3
 	[0] = {
 		.start	= S5P_PA_UART3,
-		.end	= S5P_PA_UART3 + S5P_SZ_UART,
+		.end	= S5P_PA_UART3 + S5P_SZ_UART - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -123,7 +123,7 @@
 #if CONFIG_SERIAL_SAMSUNG_UARTS > 4
 	[0] = {
 		.start	= S5P_PA_UART4,
-		.end	= S5P_PA_UART4 + S5P_SZ_UART,
+		.end	= S5P_PA_UART4 + S5P_SZ_UART - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -148,7 +148,7 @@
 #if CONFIG_SERIAL_SAMSUNG_UARTS > 5
 	[0] = {
 		.start	= S5P_PA_UART5,
-		.end	= S5P_PA_UART5 + S5P_SZ_UART,
+		.end	= S5P_PA_UART5 + S5P_SZ_UART - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
diff --git a/arch/arm/plat-s5p/include/plat/csis.h b/arch/arm/plat-s5p/include/plat/csis.h
new file mode 100644
index 0000000..51e308c
--- /dev/null
+++ b/arch/arm/plat-s5p/include/plat/csis.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * S5P series MIPI CSI slave device support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef PLAT_S5P_CSIS_H_
+#define PLAT_S5P_CSIS_H_ __FILE__
+
+/**
+ * struct s5p_platform_mipi_csis - platform data for MIPI-CSIS
+ * @clk_rate: bus clock frequency
+ * @lanes: number of data lanes used
+ * @alignment: data alignment in bits
+ * @hs_settle: HS-RX settle time
+ */
+struct s5p_platform_mipi_csis {
+	unsigned long clk_rate;
+	u8 lanes;
+	u8 alignment;
+	u8 hs_settle;
+};
+
+#endif /* PLAT_S5P_CSIS_H_ */
diff --git a/arch/arm/plat-s5p/include/plat/map-s5p.h b/arch/arm/plat-s5p/include/plat/map-s5p.h
index fef353d..d973d39 100644
--- a/arch/arm/plat-s5p/include/plat/map-s5p.h
+++ b/arch/arm/plat-s5p/include/plat/map-s5p.h
@@ -15,6 +15,7 @@
 
 #define S5P_VA_CHIPID		S3C_ADDR(0x02000000)
 #define S5P_VA_CMU		S3C_ADDR(0x02100000)
+#define S5P_VA_PMU		S3C_ADDR(0x02180000)
 #define S5P_VA_GPIO		S3C_ADDR(0x02200000)
 #define S5P_VA_GPIO1		S5P_VA_GPIO
 #define S5P_VA_GPIO2		S3C_ADDR(0x02240000)
diff --git a/arch/arm/plat-s5p/include/plat/regs-srom.h b/arch/arm/plat-s5p/include/plat/regs-srom.h
new file mode 100644
index 0000000..f121ab5
--- /dev/null
+++ b/arch/arm/plat-s5p/include/plat/regs-srom.h
@@ -0,0 +1,54 @@
+/* linux/arch/arm/plat-s5p/include/plat/regs-srom.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5P SROMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_PLAT_S5P_REGS_SROM_H
+#define __ASM_PLAT_S5P_REGS_SROM_H __FILE__
+
+#include <mach/map.h>
+
+#define S5P_SROMREG(x)		(S5P_VA_SROMC + (x))
+
+#define S5P_SROM_BW		S5P_SROMREG(0x0)
+#define S5P_SROM_BC0		S5P_SROMREG(0x4)
+#define S5P_SROM_BC1		S5P_SROMREG(0x8)
+#define S5P_SROM_BC2		S5P_SROMREG(0xc)
+#define S5P_SROM_BC3		S5P_SROMREG(0x10)
+#define S5P_SROM_BC4		S5P_SROMREG(0x14)
+#define S5P_SROM_BC5		S5P_SROMREG(0x18)
+
+/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
+
+#define S5P_SROM_BW__DATAWIDTH__SHIFT		0
+#define S5P_SROM_BW__ADDRMODE__SHIFT		1
+#define S5P_SROM_BW__WAITENABLE__SHIFT		2
+#define S5P_SROM_BW__BYTEENABLE__SHIFT		3
+
+#define S5P_SROM_BW__CS_MASK			0xf
+
+#define S5P_SROM_BW__NCS0__SHIFT		0
+#define S5P_SROM_BW__NCS1__SHIFT		4
+#define S5P_SROM_BW__NCS2__SHIFT		8
+#define S5P_SROM_BW__NCS3__SHIFT		12
+#define S5P_SROM_BW__NCS4__SHIFT		16
+#define S5P_SROM_BW__NCS5__SHIFT		20
+
+/* applies to same to BCS0 - BCS3 */
+
+#define S5P_SROM_BCX__PMC__SHIFT		0
+#define S5P_SROM_BCX__TACP__SHIFT		4
+#define S5P_SROM_BCX__TCAH__SHIFT		8
+#define S5P_SROM_BCX__TCOH__SHIFT		12
+#define S5P_SROM_BCX__TACC__SHIFT		16
+#define S5P_SROM_BCX__TCOS__SHIFT		24
+#define S5P_SROM_BCX__TACS__SHIFT		28
+
+#endif /* __ASM_PLAT_S5P_REGS_SROM_H */
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c
index 752f1a6..225aa25 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-s5p/irq-eint.c
@@ -28,39 +28,40 @@
 #include <plat/gpio-cfg.h>
 #include <mach/regs-gpio.h>
 
-static inline void s5p_irq_eint_mask(unsigned int irq)
+static inline void s5p_irq_eint_mask(struct irq_data *data)
 {
 	u32 mask;
 
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
-	mask |= eint_irq_to_bit(irq);
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask |= eint_irq_to_bit(data->irq);
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
 }
 
-static void s5p_irq_eint_unmask(unsigned int irq)
+static void s5p_irq_eint_unmask(struct irq_data *data)
 {
 	u32 mask;
 
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
-	mask &= ~(eint_irq_to_bit(irq));
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask &= ~(eint_irq_to_bit(data->irq));
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
 }
 
-static inline void s5p_irq_eint_ack(unsigned int irq)
+static inline void s5p_irq_eint_ack(struct irq_data *data)
 {
-	__raw_writel(eint_irq_to_bit(irq), S5P_EINT_PEND(EINT_REG_NR(irq)));
+	__raw_writel(eint_irq_to_bit(data->irq),
+		     S5P_EINT_PEND(EINT_REG_NR(data->irq)));
 }
 
-static void s5p_irq_eint_maskack(unsigned int irq)
+static void s5p_irq_eint_maskack(struct irq_data *data)
 {
 	/* compiler should in-line these */
-	s5p_irq_eint_mask(irq);
-	s5p_irq_eint_ack(irq);
+	s5p_irq_eint_mask(data);
+	s5p_irq_eint_ack(data);
 }
 
-static int s5p_irq_eint_set_type(unsigned int irq, unsigned int type)
+static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type)
 {
-	int offs = EINT_OFFSET(irq);
+	int offs = EINT_OFFSET(data->irq);
 	int shift;
 	u32 ctrl, mask;
 	u32 newvalue = 0;
@@ -94,10 +95,10 @@
 	shift = (offs & 0x7) * 4;
 	mask = 0x7 << shift;
 
-	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(irq)));
+	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
 	ctrl &= ~mask;
 	ctrl |= newvalue << shift;
-	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(irq)));
+	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
 
 	if ((0 <= offs) && (offs < 8))
 		s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
@@ -119,13 +120,13 @@
 
 static struct irq_chip s5p_irq_eint = {
 	.name		= "s5p-eint",
-	.mask		= s5p_irq_eint_mask,
-	.unmask		= s5p_irq_eint_unmask,
-	.mask_ack	= s5p_irq_eint_maskack,
-	.ack		= s5p_irq_eint_ack,
-	.set_type	= s5p_irq_eint_set_type,
+	.irq_mask	= s5p_irq_eint_mask,
+	.irq_unmask	= s5p_irq_eint_unmask,
+	.irq_mask_ack	= s5p_irq_eint_maskack,
+	.irq_ack	= s5p_irq_eint_ack,
+	.irq_set_type	= s5p_irq_eint_set_type,
 #ifdef CONFIG_PM
-	.set_wake	= s3c_irqext_wake,
+	.irq_set_wake	= s3c_irqext_wake,
 #endif
 };
 
@@ -159,42 +160,43 @@
 	s5p_irq_demux_eint(IRQ_EINT(24));
 }
 
-static inline void s5p_irq_vic_eint_mask(unsigned int irq)
+static inline void s5p_irq_vic_eint_mask(struct irq_data *data)
 {
-	void __iomem *base = get_irq_chip_data(irq);
+	void __iomem *base = irq_data_get_irq_chip_data(data);
 
-	s5p_irq_eint_mask(irq);
-	writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE_CLEAR);
+	s5p_irq_eint_mask(data);
+	writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE_CLEAR);
 }
 
-static void s5p_irq_vic_eint_unmask(unsigned int irq)
+static void s5p_irq_vic_eint_unmask(struct irq_data *data)
 {
-	void __iomem *base = get_irq_chip_data(irq);
+	void __iomem *base = irq_data_get_irq_chip_data(data);
 
-	s5p_irq_eint_unmask(irq);
-	writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE);
+	s5p_irq_eint_unmask(data);
+	writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE);
 }
 
-static inline void s5p_irq_vic_eint_ack(unsigned int irq)
+static inline void s5p_irq_vic_eint_ack(struct irq_data *data)
 {
-	__raw_writel(eint_irq_to_bit(irq), S5P_EINT_PEND(EINT_REG_NR(irq)));
+	__raw_writel(eint_irq_to_bit(data->irq),
+		     S5P_EINT_PEND(EINT_REG_NR(data->irq)));
 }
 
-static void s5p_irq_vic_eint_maskack(unsigned int irq)
+static void s5p_irq_vic_eint_maskack(struct irq_data *data)
 {
-	s5p_irq_vic_eint_mask(irq);
-	s5p_irq_vic_eint_ack(irq);
+	s5p_irq_vic_eint_mask(data);
+	s5p_irq_vic_eint_ack(data);
 }
 
 static struct irq_chip s5p_irq_vic_eint = {
 	.name		= "s5p_vic_eint",
-	.mask		= s5p_irq_vic_eint_mask,
-	.unmask		= s5p_irq_vic_eint_unmask,
-	.mask_ack	= s5p_irq_vic_eint_maskack,
-	.ack		= s5p_irq_vic_eint_ack,
-	.set_type	= s5p_irq_eint_set_type,
+	.irq_mask	= s5p_irq_vic_eint_mask,
+	.irq_unmask	= s5p_irq_vic_eint_unmask,
+	.irq_mask_ack	= s5p_irq_vic_eint_maskack,
+	.irq_ack	= s5p_irq_vic_eint_ack,
+	.irq_set_type	= s5p_irq_eint_set_type,
 #ifdef CONFIG_PM
-	.set_wake	= s3c_irqext_wake,
+	.irq_set_wake	= s3c_irqext_wake,
 #endif
 };
 
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index 0e5dc8c..3b6bf89 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -30,9 +30,9 @@
 
 static struct s3c_gpio_chip *irq_chips[S5P_GPIOINT_GROUP_MAXNR];
 
-static int s5p_gpioint_get_group(unsigned int irq)
+static int s5p_gpioint_get_group(struct irq_data *data)
 {
-	struct gpio_chip *chip = get_irq_data(irq);
+	struct gpio_chip *chip = irq_data_get_irq_data(data);
 	struct s3c_gpio_chip *s3c_chip = container_of(chip,
 			struct s3c_gpio_chip, chip);
 	int group;
@@ -44,22 +44,22 @@
 	return group;
 }
 
-static int s5p_gpioint_get_offset(unsigned int irq)
+static int s5p_gpioint_get_offset(struct irq_data *data)
 {
-	struct gpio_chip *chip = get_irq_data(irq);
+	struct gpio_chip *chip = irq_data_get_irq_data(data);
 	struct s3c_gpio_chip *s3c_chip = container_of(chip,
 			struct s3c_gpio_chip, chip);
 
-	return irq - s3c_chip->irq_base;
+	return data->irq - s3c_chip->irq_base;
 }
 
-static void s5p_gpioint_ack(unsigned int irq)
+static void s5p_gpioint_ack(struct irq_data *data)
 {
 	int group, offset, pend_offset;
 	unsigned int value;
 
-	group = s5p_gpioint_get_group(irq);
-	offset = s5p_gpioint_get_offset(irq);
+	group = s5p_gpioint_get_group(data);
+	offset = s5p_gpioint_get_offset(data);
 	pend_offset = group << 2;
 
 	value = __raw_readl(S5P_GPIOREG(GPIOINT_PEND_OFFSET) + pend_offset);
@@ -67,13 +67,13 @@
 	__raw_writel(value, S5P_GPIOREG(GPIOINT_PEND_OFFSET) + pend_offset);
 }
 
-static void s5p_gpioint_mask(unsigned int irq)
+static void s5p_gpioint_mask(struct irq_data *data)
 {
 	int group, offset, mask_offset;
 	unsigned int value;
 
-	group = s5p_gpioint_get_group(irq);
-	offset = s5p_gpioint_get_offset(irq);
+	group = s5p_gpioint_get_group(data);
+	offset = s5p_gpioint_get_offset(data);
 	mask_offset = group << 2;
 
 	value = __raw_readl(S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
@@ -81,13 +81,13 @@
 	__raw_writel(value, S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
 }
 
-static void s5p_gpioint_unmask(unsigned int irq)
+static void s5p_gpioint_unmask(struct irq_data *data)
 {
 	int group, offset, mask_offset;
 	unsigned int value;
 
-	group = s5p_gpioint_get_group(irq);
-	offset = s5p_gpioint_get_offset(irq);
+	group = s5p_gpioint_get_group(data);
+	offset = s5p_gpioint_get_offset(data);
 	mask_offset = group << 2;
 
 	value = __raw_readl(S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
@@ -95,19 +95,19 @@
 	__raw_writel(value, S5P_GPIOREG(GPIOINT_MASK_OFFSET) + mask_offset);
 }
 
-static void s5p_gpioint_mask_ack(unsigned int irq)
+static void s5p_gpioint_mask_ack(struct irq_data *data)
 {
-	s5p_gpioint_mask(irq);
-	s5p_gpioint_ack(irq);
+	s5p_gpioint_mask(data);
+	s5p_gpioint_ack(data);
 }
 
-static int s5p_gpioint_set_type(unsigned int irq, unsigned int type)
+static int s5p_gpioint_set_type(struct irq_data *data, unsigned int type)
 {
 	int group, offset, con_offset;
 	unsigned int value;
 
-	group = s5p_gpioint_get_group(irq);
-	offset = s5p_gpioint_get_offset(irq);
+	group = s5p_gpioint_get_group(data);
+	offset = s5p_gpioint_get_offset(data);
 	con_offset = group << 2;
 
 	switch (type) {
@@ -142,11 +142,11 @@
 
 struct irq_chip s5p_gpioint = {
 	.name		= "s5p_gpioint",
-	.ack		= s5p_gpioint_ack,
-	.mask		= s5p_gpioint_mask,
-	.mask_ack	= s5p_gpioint_mask_ack,
-	.unmask		= s5p_gpioint_unmask,
-	.set_type	= s5p_gpioint_set_type,
+	.irq_ack	= s5p_gpioint_ack,
+	.irq_mask	= s5p_gpioint_mask,
+	.irq_mask_ack	= s5p_gpioint_mask_ack,
+	.irq_unmask	= s5p_gpioint_unmask,
+	.irq_set_type	= s5p_gpioint_set_type,
 };
 
 static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-s5p/irq-pm.c
index dc33b9e..5259ad4 100644
--- a/arch/arm/plat-s5p/irq-pm.c
+++ b/arch/arm/plat-s5p/irq-pm.c
@@ -37,14 +37,14 @@
 unsigned long s3c_irqwake_intallow	= 0x00000006L;
 unsigned long s3c_irqwake_eintallow	= 0xffffffffL;
 
-int s3c_irq_wake(unsigned int irqno, unsigned int state)
+int s3c_irq_wake(struct irq_data *data, unsigned int state)
 {
 	unsigned long irqbit;
 
-	switch (irqno) {
+	switch (data->irq) {
 	case IRQ_RTC_TIC:
 	case IRQ_RTC_ALARM:
-		irqbit = 1 << (irqno + 1 - IRQ_RTC_ALARM);
+		irqbit = 1 << (data->irq + 1 - IRQ_RTC_ALARM);
 		if (!state)
 			s3c_irqwake_intmask |= irqbit;
 		else
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
new file mode 100644
index 0000000..ffe8a48
--- /dev/null
+++ b/arch/arm/plat-s5p/sysmmu.c
@@ -0,0 +1,326 @@
+/* linux/arch/arm/plat-s5p/sysmmu.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <mach/map.h>
+#include <mach/regs-sysmmu.h>
+#include <mach/sysmmu.h>
+
+struct sysmmu_controller s5p_sysmmu_cntlrs[S5P_SYSMMU_TOTAL_IPNUM];
+
+void s5p_sysmmu_register(struct sysmmu_controller *sysmmuconp)
+{
+	unsigned int reg_mmu_ctrl;
+	unsigned int reg_mmu_status;
+	unsigned int reg_pt_base_addr;
+	unsigned int reg_int_status;
+	unsigned int reg_page_ft_addr;
+
+	reg_int_status = __raw_readl(sysmmuconp->regs + S5P_INT_STATUS);
+	reg_mmu_ctrl = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+	reg_mmu_status = __raw_readl(sysmmuconp->regs + S5P_MMU_STATUS);
+	reg_pt_base_addr = __raw_readl(sysmmuconp->regs + S5P_PT_BASE_ADDR);
+	reg_page_ft_addr = __raw_readl(sysmmuconp->regs + S5P_PAGE_FAULT_ADDR);
+
+	printk(KERN_INFO "%s: ips:%s\n", __func__, sysmmuconp->name);
+	printk(KERN_INFO "%s: MMU_CTRL:0x%X, ", __func__, reg_mmu_ctrl);
+	printk(KERN_INFO "MMU_STATUS:0x%X, PT_BASE_ADDR:0x%X\n", reg_mmu_status, reg_pt_base_addr);
+	printk(KERN_INFO "%s: INT_STATUS:0x%X, PAGE_FAULT_ADDR:0x%X\n", __func__, reg_int_status, reg_page_ft_addr);
+
+	switch (reg_int_status & 0xFF) {
+	case 0x1:
+		printk(KERN_INFO "%s: Page fault\n", __func__);
+		printk(KERN_INFO "%s: Virtual address causing last page fault or bus error : 0x%x\n", __func__ , reg_page_ft_addr);
+		break;
+	case 0x2:
+		printk(KERN_INFO "%s: AR multi-hit fault\n", __func__);
+		break;
+	case 0x4:
+		printk(KERN_INFO "%s: AW multi-hit fault\n", __func__);
+		break;
+	case 0x8:
+		printk(KERN_INFO "%s: Bus error\n", __func__);
+		break;
+	case 0x10:
+		printk(KERN_INFO "%s: AR Security protection fault\n", __func__);
+		break;
+	case 0x20:
+		printk(KERN_INFO "%s: AR Access protection fault\n", __func__);
+		break;
+	case 0x40:
+		printk(KERN_INFO "%s: AW Security protection fault\n", __func__);
+		break;
+	case 0x80:
+		printk(KERN_INFO "%s: AW Access protection fault\n", __func__);
+		break;
+	}
+}
+
+static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id)
+{
+	unsigned int i;
+	unsigned int reg_int_status;
+	struct sysmmu_controller *sysmmuconp;
+
+	for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
+		sysmmuconp = &s5p_sysmmu_cntlrs[i];
+
+		if (sysmmuconp->enable == true) {
+			reg_int_status = __raw_readl(sysmmuconp->regs + S5P_INT_STATUS);
+
+			if (reg_int_status & 0xFF)
+				s5p_sysmmu_register(sysmmuconp);
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+int s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd)
+{
+	struct sysmmu_controller *sysmmuconp = NULL;
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	/* Set sysmmu page table base address */
+	__raw_writel(pgd, sysmmuconp->regs + S5P_PT_BASE_ADDR);
+
+	if (s5p_sysmmu_tlb_invalidate(ips) != 0)
+		printk(KERN_ERR "failed s5p_sysmmu_tlb_invalidate\n");
+
+	return 0;
+}
+
+static int s5p_sysmmu_set_tablebase(sysmmu_ips ips)
+{
+	unsigned int pg;
+	struct sysmmu_controller *sysmmuconp;
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	__asm__("mrc	p15, 0, %0, c2, c0, 0"	\
+		: "=r" (pg) : : "cc");		\
+		pg &= ~0x3fff;
+
+	printk(KERN_INFO "%s: CP15 TTBR0 : 0x%x\n", __func__, pg);
+
+	/* Set sysmmu page table base address */
+	__raw_writel(pg, sysmmuconp->regs + S5P_PT_BASE_ADDR);
+
+	return 0;
+}
+
+int s5p_sysmmu_enable(sysmmu_ips ips)
+{
+	unsigned int reg;
+
+	struct sysmmu_controller *sysmmuconp;
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	s5p_sysmmu_set_tablebase(ips);
+
+	/* replacement policy : LRU */
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CFG);
+	reg |= 0x1;
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CFG);
+
+	/* Enable interrupt, Enable MMU */
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+	reg |= (0x1 << 2) | (0x1 << 0);
+
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CTRL);
+
+	sysmmuconp->enable = true;
+
+	return 0;
+}
+
+int s5p_sysmmu_disable(sysmmu_ips ips)
+{
+	unsigned int reg;
+
+	struct sysmmu_controller *sysmmuconp = NULL;
+
+	if (ips > S5P_SYSMMU_TOTAL_IPNUM)
+		printk(KERN_ERR "failed to get ips parameter\n");
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CFG);
+
+	/* replacement policy : LRU */
+	reg |= 0x1;
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CFG);
+
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+
+	/* Disable MMU */
+	reg &= ~0x1;
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CTRL);
+
+	sysmmuconp->enable = false;
+
+	return 0;
+}
+
+int s5p_sysmmu_tlb_invalidate(sysmmu_ips ips)
+{
+	unsigned int reg;
+	struct sysmmu_controller *sysmmuconp = NULL;
+
+	sysmmuconp = &s5p_sysmmu_cntlrs[ips];
+
+	if (sysmmuconp == NULL) {
+		printk(KERN_ERR "failed to get ip's sysmmu info\n");
+		return 1;
+	}
+
+	/* set Block MMU for flush TLB */
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+	reg |= 0x1 << 1;
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CTRL);
+
+	/* flush all TLB entry */
+	__raw_writel(0x1, sysmmuconp->regs + S5P_MMU_FLUSH);
+
+	/* set Un-block MMU after flush TLB */
+	reg = __raw_readl(sysmmuconp->regs + S5P_MMU_CTRL);
+	reg &= ~(0x1 << 1);
+	__raw_writel(reg, sysmmuconp->regs + S5P_MMU_CTRL);
+
+	return 0;
+}
+
+static int s5p_sysmmu_probe(struct platform_device *pdev)
+{
+	int i;
+	int ret;
+	struct resource *res;
+	struct sysmmu_controller *sysmmuconp;
+	sysmmu_ips ips;
+
+	for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
+		sysmmuconp = &s5p_sysmmu_cntlrs[i];
+		if (sysmmuconp == NULL) {
+			printk(KERN_ERR "failed to get ip's sysmmu info\n");
+			ret = -ENOENT;
+			goto err_res;
+		}
+
+		sysmmuconp->name = sysmmu_ips_name[i];
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res) {
+			printk(KERN_ERR "failed to get sysmmu resource\n");
+			ret = -ENODEV;
+			goto err_res;
+		}
+
+		sysmmuconp->mem = request_mem_region(res->start,
+				((res->end) - (res->start)) + 1, pdev->name);
+		if (!sysmmuconp->mem) {
+			pr_err("failed to request sysmmu memory region\n");
+			ret = -EBUSY;
+			goto err_res;
+		}
+
+		sysmmuconp->regs = ioremap(res->start, res->end - res->start + 1);
+		if (!sysmmuconp->regs) {
+			pr_err("failed to sysmmu ioremap\n");
+			ret = -ENXIO;
+			goto err_reg;
+		}
+
+		sysmmuconp->irq = platform_get_irq(pdev, i);
+		if (sysmmuconp->irq <= 0) {
+			pr_err("failed to get sysmmu irq resource\n");
+			ret = -ENOENT;
+			goto err_map;
+		}
+
+		ret = request_irq(sysmmuconp->irq, s5p_sysmmu_irq, IRQF_DISABLED, pdev->name, sysmmuconp);
+		if (ret) {
+			pr_err("failed to request irq\n");
+			ret = -ENOENT;
+			goto err_map;
+		}
+
+		ips = (sysmmu_ips)i;
+
+		sysmmuconp->ips = ips;
+	}
+
+	return 0;
+
+err_reg:
+	release_mem_region((resource_size_t)sysmmuconp->mem, (resource_size_t)((res->end) - (res->start) + 1));
+err_map:
+	iounmap(sysmmuconp->regs);
+err_res:
+	return ret;
+}
+
+static int s5p_sysmmu_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+int s5p_sysmmu_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+int s5p_sysmmu_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+const struct dev_pm_ops s5p_sysmmu_pm_ops = {
+	.runtime_suspend	= s5p_sysmmu_runtime_suspend,
+	.runtime_resume		= s5p_sysmmu_runtime_resume,
+};
+
+static struct platform_driver s5p_sysmmu_driver = {
+	.probe		= s5p_sysmmu_probe,
+	.remove		= s5p_sysmmu_remove,
+	.driver		= {
+		.owner		= THIS_MODULE,
+		.name		= "s5p-sysmmu",
+		.pm		= &s5p_sysmmu_pm_ops,
+	}
+};
+
+static int __init s5p_sysmmu_init(void)
+{
+	return platform_driver_register(&s5p_sysmmu_driver);
+}
+arch_initcall(s5p_sysmmu_init);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index dcd6eff4e..32be05c 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -95,6 +95,12 @@
 	help
 	  Internal configuration to enable the correct GPIO pull helper
 
+config S3C_GPIO_PULL_S3C2443
+	bool
+	select S3C_GPIO_PULL_UPDOWN
+	help
+	  Internal configuration to enable the correct GPIO pull helper for S3C2443-style GPIO
+
 config S3C_GPIO_PULL_DOWN
 	bool
 	help
@@ -333,4 +339,12 @@
 	  and above. This code allows a set of interrupt to wakeup-mask
 	  mappings. See <plat/wakeup-mask.h>
 
+comment "Power Domain"
+
+config SAMSUNG_PD
+	bool "Samsung Power Domain"
+	depends on PM_RUNTIME
+	help
+	  Say Y here if you want to control Power Domain by Runtime PM.
+
 endif
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index afcce474..29932f8 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -17,6 +17,7 @@
 obj-y				+= pwm-clock.o
 obj-y				+= gpio.o
 obj-y				+= gpio-config.o
+obj-y				+= dev-asocdma.o
 
 obj-$(CONFIG_SAMSUNG_GPIOLIB_4BIT)	+= gpiolib.o
 obj-$(CONFIG_SAMSUNG_CLKSRC)	+= clock-clksrc.o
@@ -73,6 +74,10 @@
 
 obj-$(CONFIG_SAMSUNG_WAKEMASK)	+= wakeup-mask.o
 
+# PD support
+
+obj-$(CONFIG_SAMSUNG_PD)	+= pd.o
+
 # PWM support
 
 obj-$(CONFIG_HAVE_PWM)		+= pwm.o
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index e8d20b0..7728928 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -39,6 +39,9 @@
 #include <linux/clk.h>
 #include <linux/spinlock.h>
 #include <linux/io.h>
+#if defined(CONFIG_DEBUG_FS)
+#include <linux/debugfs.h>
+#endif
 
 #include <mach/hardware.h>
 #include <asm/irq.h>
@@ -447,3 +450,92 @@
 	return 0;
 }
 
+#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
+/* debugfs support to trace clock tree hierarchy and attributes */
+
+static struct dentry *clk_debugfs_root;
+
+static int clk_debugfs_register_one(struct clk *c)
+{
+	int err;
+	struct dentry *d, *child, *child_tmp;
+	struct clk *pa = c->parent;
+	char s[255];
+	char *p = s;
+
+	p += sprintf(p, "%s", c->name);
+
+	if (c->id >= 0)
+		sprintf(p, ":%d", c->id);
+
+	d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
+	if (!d)
+		return -ENOMEM;
+
+	c->dent = d;
+
+	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usage);
+	if (!d) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+	if (!d) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+	return 0;
+
+err_out:
+	d = c->dent;
+	list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
+		debugfs_remove(child);
+	debugfs_remove(c->dent);
+	return err;
+}
+
+static int clk_debugfs_register(struct clk *c)
+{
+	int err;
+	struct clk *pa = c->parent;
+
+	if (pa && !pa->dent) {
+		err = clk_debugfs_register(pa);
+		if (err)
+			return err;
+	}
+
+	if (!c->dent) {
+		err = clk_debugfs_register_one(c);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int __init clk_debugfs_init(void)
+{
+	struct clk *c;
+	struct dentry *d;
+	int err;
+
+	d = debugfs_create_dir("clock", NULL);
+	if (!d)
+		return -ENOMEM;
+	clk_debugfs_root = d;
+
+	list_for_each_entry(c, &clocks, list) {
+		err = clk_debugfs_register(c);
+		if (err)
+			goto err_out;
+	}
+	return 0;
+
+err_out:
+	debugfs_remove_recursive(clk_debugfs_root);
+	return err;
+}
+late_initcall(clk_debugfs_init);
+
+#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
diff --git a/arch/arm/plat-samsung/dev-asocdma.c b/arch/arm/plat-samsung/dev-asocdma.c
new file mode 100644
index 0000000..a068c4f
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-asocdma.c
@@ -0,0 +1,25 @@
+/* linux/arch/arm/plat-samsung/dev-asocdma.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <plat/devs.h>
+
+static u64 audio_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device samsung_asoc_dma = {
+	.name		  = "samsung-audio",
+	.id		  = -1,
+	.dev              = {
+		.dma_mask = &audio_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	}
+};
+EXPORT_SYMBOL(samsung_asoc_dma);
diff --git a/arch/arm/plat-samsung/dev-nand.c b/arch/arm/plat-samsung/dev-nand.c
index 3a7b889..6927ae8 100644
--- a/arch/arm/plat-samsung/dev-nand.c
+++ b/arch/arm/plat-samsung/dev-nand.c
@@ -126,5 +126,3 @@
 
 	s3c_device_nand.dev.platform_data = npd;
 }
-
-EXPORT_SYMBOL_GPL(s3c_nand_set_platdata);
diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c
index 236ef84..3e4bd81 100644
--- a/arch/arm/plat-samsung/dev-ts.c
+++ b/arch/arm/plat-samsung/dev-ts.c
@@ -58,4 +58,3 @@
 
 	s3c_device_ts.dev.platform_data = npd;
 }
-EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
diff --git a/arch/arm/plat-samsung/dev-uart.c b/arch/arm/plat-samsung/dev-uart.c
index 3776cd9..5928105 100644
--- a/arch/arm/plat-samsung/dev-uart.c
+++ b/arch/arm/plat-samsung/dev-uart.c
@@ -15,6 +15,8 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 
+#include <plat/devs.h>
+
 /* uart devices */
 
 static struct platform_device s3c24xx_uart_device0 = {
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index 0aa32f2..1c0b040 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -278,6 +278,48 @@
 	pup &= 0x3;
 	return (__force s3c_gpio_pull_t)pup;
 }
+
+#ifdef CONFIG_S3C_GPIO_PULL_S3C2443
+int s3c_gpio_setpull_s3c2443(struct s3c_gpio_chip *chip,
+				unsigned int off, s3c_gpio_pull_t pull)
+{
+	switch (pull) {
+	case S3C_GPIO_PULL_NONE:
+		pull = 0x01;
+		break;
+	case S3C_GPIO_PULL_UP:
+		pull = 0x00;
+		break;
+	case S3C_GPIO_PULL_DOWN:
+		pull = 0x02;
+		break;
+	}
+	return s3c_gpio_setpull_updown(chip, off, pull);
+}
+
+s3c_gpio_pull_t s3c_gpio_getpull_s3c2443(struct s3c_gpio_chip *chip,
+					unsigned int off)
+{
+	s3c_gpio_pull_t pull;
+
+	pull = s3c_gpio_getpull_updown(chip, off);
+
+	switch (pull) {
+	case 0x00:
+		pull = S3C_GPIO_PULL_UP;
+		break;
+	case 0x01:
+	case 0x03:
+		pull = S3C_GPIO_PULL_NONE;
+		break;
+	case 0x02:
+		pull = S3C_GPIO_PULL_DOWN;
+		break;
+	}
+
+	return pull;
+}
+#endif
 #endif
 
 #if defined(CONFIG_S3C_GPIO_PULL_UP) || defined(CONFIG_S3C_GPIO_PULL_DOWN)
diff --git a/arch/arm/plat-samsung/gpiolib.c b/arch/arm/plat-samsung/gpiolib.c
index c354089..ea37c04 100644
--- a/arch/arm/plat-samsung/gpiolib.c
+++ b/arch/arm/plat-samsung/gpiolib.c
@@ -197,3 +197,10 @@
 		s3c_gpiolib_add(chip);
 	}
 }
+
+void __init samsung_gpiolib_add_2bit_chips(struct s3c_gpio_chip *chip,
+					   int nr_chips)
+{
+	for (; nr_chips > 0; nr_chips--, chip++)
+		s3c_gpiolib_add(chip);
+}
diff --git a/arch/arm/plat-samsung/include/plat/audio.h b/arch/arm/plat-samsung/include/plat/audio.h
index 7712ff6..a0826ed 100644
--- a/arch/arm/plat-samsung/include/plat/audio.h
+++ b/arch/arm/plat-samsung/include/plat/audio.h
@@ -25,10 +25,34 @@
 #define S5PC100_SPDIF_GPG3 1
 extern void s5pc100_spdif_setup_gpio(int);
 
+struct samsung_i2s {
+/* If the Primary DAI has 5.1 Channels */
+#define QUIRK_PRI_6CHAN		(1 << 0)
+/* If the I2S block has a Stereo Overlay Channel */
+#define QUIRK_SEC_DAI		(1 << 1)
+/*
+ * If the I2S block has no internal prescalar or MUX (I2SMOD[10] bit)
+ * The Machine driver must provide suitably set clock to the I2S block.
+ */
+#define QUIRK_NO_MUXPSR		(1 << 2)
+#define QUIRK_NEED_RSTCLR	(1 << 3)
+	/* Quirks of the I2S controller */
+	u32 quirks;
+
+	/*
+	 * Array of clock names that can be used to generate I2S signals.
+	 * Also corresponds to clocks of I2SMOD[10]
+	 */
+	const char **src_clk;
+};
+
 /**
  * struct s3c_audio_pdata - common platform data for audio device drivers
  * @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode
  */
 struct s3c_audio_pdata {
 	int (*cfg_gpio)(struct platform_device *);
+	union {
+		struct samsung_i2s i2s;
+	} type;
 };
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index 0fbcd0e..9a82b88 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -47,6 +47,9 @@
 
 	struct clk_ops		*ops;
 	int		    (*enable)(struct clk *, int enable);
+#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
+	struct dentry		*dent;	/* For visible tree hierarchy */
+#endif
 };
 
 /* other clocks which may be registered by board support */
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 2d82a6c..b4d208b 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -32,7 +32,7 @@
 extern struct platform_device s3c64xx_device_spi0;
 extern struct platform_device s3c64xx_device_spi1;
 
-extern struct platform_device s3c_device_pcm;
+extern struct platform_device samsung_asoc_dma;
 
 extern struct platform_device s3c64xx_device_pcm0;
 extern struct platform_device s3c64xx_device_pcm1;
@@ -96,6 +96,16 @@
 extern struct platform_device s5pv210_device_iis2;
 extern struct platform_device s5pv210_device_spdif;
 
+extern struct platform_device s5pv310_device_ac97;
+extern struct platform_device s5pv310_device_pcm0;
+extern struct platform_device s5pv310_device_pcm1;
+extern struct platform_device s5pv310_device_pcm2;
+extern struct platform_device s5pv310_device_i2s0;
+extern struct platform_device s5pv310_device_i2s1;
+extern struct platform_device s5pv310_device_i2s2;
+extern struct platform_device s5pv310_device_spdif;
+extern struct platform_device s5pv310_device_pd[];
+
 extern struct platform_device s5p6442_device_pcm0;
 extern struct platform_device s5p6442_device_pcm1;
 extern struct platform_device s5p6442_device_iis0;
@@ -106,6 +116,8 @@
 extern struct platform_device s5p6440_device_iis;
 
 extern struct platform_device s5p6450_device_iis0;
+extern struct platform_device s5p6450_device_iis1;
+extern struct platform_device s5p6450_device_iis2;
 extern struct platform_device s5p6450_device_pcm0;
 
 extern struct platform_device s5pc100_device_ac97;
@@ -122,6 +134,11 @@
 extern struct platform_device s5p_device_fimc1;
 extern struct platform_device s5p_device_fimc2;
 
+extern struct platform_device s5p_device_mipi_csis0;
+extern struct platform_device s5p_device_mipi_csis1;
+
+extern struct platform_device s5pv310_device_sysmmu;
+
 /* s3c2440 specific devices */
 
 #ifdef CONFIG_CPU_S3C2440
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
index 0d2c570..5603db0 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
@@ -244,7 +244,7 @@
  * This helper function reads the state of the pull-{up,down} resistor for the
  * given GPIO in the same case as s3c_gpio_setpull_upown.
 */
-extern s3c_gpio_pull_t s3c_gpio_getpull_s3c24xx(struct s3c_gpio_chip *chip,
+extern s3c_gpio_pull_t s3c_gpio_getpull_s3c2443(struct s3c_gpio_chip *chip,
 						unsigned int off);
 
 #endif /* __PLAT_GPIO_CFG_HELPERS_H */
diff --git a/arch/arm/plat-samsung/include/plat/gpio-core.h b/arch/arm/plat-samsung/include/plat/gpio-core.h
index 13a22b8..dac35d0 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-core.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-core.h
@@ -118,6 +118,8 @@
 					   int nr_chips);
 extern void samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip,
 					    int nr_chips);
+extern void samsung_gpiolib_add_2bit_chips(struct s3c_gpio_chip *chip,
+					   int nr_chips);
 
 extern void samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip);
 extern void samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip);
diff --git a/arch/arm/plat-samsung/include/plat/pd.h b/arch/arm/plat-samsung/include/plat/pd.h
new file mode 100644
index 0000000..5f0ad85
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/pd.h
@@ -0,0 +1,30 @@
+/* linux/arch/arm/plat-samsung/include/plat/pd.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_PLAT_SAMSUNG_PD_H
+#define __ASM_PLAT_SAMSUNG_PD_H __FILE__
+
+struct samsung_pd_info {
+	int (*enable)(struct device *dev);
+	int (*disable)(struct device *dev);
+	void __iomem *base;
+};
+
+enum s5pv310_pd_block {
+	PD_MFC,
+	PD_G3D,
+	PD_LCD0,
+	PD_LCD1,
+	PD_TV,
+	PD_CAM,
+	PD_GPS
+};
+
+#endif /* __ASM_PLAT_SAMSUNG_PD_H */
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 245836d..30518cc 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -15,6 +15,10 @@
  * management
 */
 
+#include <linux/irq.h>
+
+struct sys_device;
+
 #ifdef CONFIG_PM
 
 extern __init int s3c_pm_init(void);
@@ -100,7 +104,7 @@
 extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count);
 
 #ifdef CONFIG_PM
-extern int s3c_irqext_wake(unsigned int irqno, unsigned int state);
+extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
 extern int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state);
 extern int s3c24xx_irq_resume(struct sys_device *dev);
 #else
diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h
index 85853f8..5a41a0b 100644
--- a/arch/arm/plat-samsung/include/plat/sdhci.h
+++ b/arch/arm/plat-samsung/include/plat/sdhci.h
@@ -107,6 +107,8 @@
 
 /* Helper function availablity */
 
+extern void s3c2416_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
+extern void s3c2416_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
 extern void s3c64xx_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
 extern void s3c64xx_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
 extern void s5pc100_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
@@ -122,6 +124,39 @@
 extern void s5pv310_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
 extern void s5pv310_setup_sdhci3_cfg_gpio(struct platform_device *, int w);
 
+/* S3C2416 SDHCI setup */
+
+#ifdef CONFIG_S3C2416_SETUP_SDHCI
+extern char *s3c2416_hsmmc_clksrcs[4];
+
+extern void s3c2416_setup_sdhci_cfg_card(struct platform_device *dev,
+					   void __iomem *r,
+					   struct mmc_ios *ios,
+					   struct mmc_card *card);
+
+static inline void s3c2416_default_sdhci0(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC
+	s3c_hsmmc0_def_platdata.clocks = s3c2416_hsmmc_clksrcs;
+	s3c_hsmmc0_def_platdata.cfg_gpio = s3c2416_setup_sdhci0_cfg_gpio;
+	s3c_hsmmc0_def_platdata.cfg_card = s3c2416_setup_sdhci_cfg_card;
+#endif /* CONFIG_S3C_DEV_HSMMC */
+}
+
+static inline void s3c2416_default_sdhci1(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC1
+	s3c_hsmmc1_def_platdata.clocks = s3c2416_hsmmc_clksrcs;
+	s3c_hsmmc1_def_platdata.cfg_gpio = s3c2416_setup_sdhci1_cfg_gpio;
+	s3c_hsmmc1_def_platdata.cfg_card = s3c2416_setup_sdhci_cfg_card;
+#endif /* CONFIG_S3C_DEV_HSMMC1 */
+}
+
+#else
+static inline void s3c2416_default_sdhci0(void) { }
+static inline void s3c2416_default_sdhci1(void) { }
+
+#endif /* CONFIG_S3C2416_SETUP_SDHCI */
 /* S3C64XX SDHCI setup */
 
 #ifdef CONFIG_S3C64XX_SETUP_SDHCI
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c
index 4f8c102..4e77035 100644
--- a/arch/arm/plat-samsung/irq-uart.c
+++ b/arch/arm/plat-samsung/irq-uart.c
@@ -28,9 +28,9 @@
  * are consecutive when looking up the interrupt in the demux routines.
  */
 
-static inline void __iomem *s3c_irq_uart_base(unsigned int irq)
+static inline void __iomem *s3c_irq_uart_base(struct irq_data *data)
 {
-	struct s3c_uart_irq *uirq = get_irq_chip_data(irq);
+	struct s3c_uart_irq *uirq = irq_data_get_irq_chip_data(data);
 	return uirq->regs;
 }
 
@@ -39,10 +39,10 @@
 	return irq & 3;
 }
 
-static void s3c_irq_uart_mask(unsigned int irq)
+static void s3c_irq_uart_mask(struct irq_data *data)
 {
-	void __iomem *regs = s3c_irq_uart_base(irq);
-	unsigned int bit = s3c_irq_uart_bit(irq);
+	void __iomem *regs = s3c_irq_uart_base(data);
+	unsigned int bit = s3c_irq_uart_bit(data->irq);
 	u32 reg;
 
 	reg = __raw_readl(regs + S3C64XX_UINTM);
@@ -50,10 +50,10 @@
 	__raw_writel(reg, regs + S3C64XX_UINTM);
 }
 
-static void s3c_irq_uart_maskack(unsigned int irq)
+static void s3c_irq_uart_maskack(struct irq_data *data)
 {
-	void __iomem *regs = s3c_irq_uart_base(irq);
-	unsigned int bit = s3c_irq_uart_bit(irq);
+	void __iomem *regs = s3c_irq_uart_base(data);
+	unsigned int bit = s3c_irq_uart_bit(data->irq);
 	u32 reg;
 
 	reg = __raw_readl(regs + S3C64XX_UINTM);
@@ -62,10 +62,10 @@
 	__raw_writel(1 << bit, regs + S3C64XX_UINTP);
 }
 
-static void s3c_irq_uart_unmask(unsigned int irq)
+static void s3c_irq_uart_unmask(struct irq_data *data)
 {
-	void __iomem *regs = s3c_irq_uart_base(irq);
-	unsigned int bit = s3c_irq_uart_bit(irq);
+	void __iomem *regs = s3c_irq_uart_base(data);
+	unsigned int bit = s3c_irq_uart_bit(data->irq);
 	u32 reg;
 
 	reg = __raw_readl(regs + S3C64XX_UINTM);
@@ -73,17 +73,17 @@
 	__raw_writel(reg, regs + S3C64XX_UINTM);
 }
 
-static void s3c_irq_uart_ack(unsigned int irq)
+static void s3c_irq_uart_ack(struct irq_data *data)
 {
-	void __iomem *regs = s3c_irq_uart_base(irq);
-	unsigned int bit = s3c_irq_uart_bit(irq);
+	void __iomem *regs = s3c_irq_uart_base(data);
+	unsigned int bit = s3c_irq_uart_bit(data->irq);
 
 	__raw_writel(1 << bit, regs + S3C64XX_UINTP);
 }
 
 static void s3c_irq_demux_uart(unsigned int irq, struct irq_desc *desc)
 {
-	struct s3c_uart_irq *uirq = desc->handler_data;
+	struct s3c_uart_irq *uirq = desc->irq_data.handler_data;
 	u32 pend = __raw_readl(uirq->regs + S3C64XX_UINTP);
 	int base = uirq->base_irq;
 
@@ -99,10 +99,10 @@
 
 static struct irq_chip s3c_irq_uart = {
 	.name		= "s3c-uart",
-	.mask		= s3c_irq_uart_mask,
-	.unmask		= s3c_irq_uart_unmask,
-	.mask_ack	= s3c_irq_uart_maskack,
-	.ack		= s3c_irq_uart_ack,
+	.irq_mask	= s3c_irq_uart_mask,
+	.irq_unmask	= s3c_irq_uart_unmask,
+	.irq_mask_ack	= s3c_irq_uart_maskack,
+	.irq_ack	= s3c_irq_uart_ack,
 };
 
 static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
@@ -124,7 +124,7 @@
 		set_irq_flags(irq, IRQF_VALID);
 	}
 
-	desc->handler_data = uirq;
+	desc->irq_data.handler_data = uirq;
 	set_irq_chained_handler(uirq->parent_irq, s3c_irq_demux_uart);
 }
 
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c
index 0270519..dd8692a 100644
--- a/arch/arm/plat-samsung/irq-vic-timer.c
+++ b/arch/arm/plat-samsung/irq-vic-timer.c
@@ -24,43 +24,46 @@
 
 static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
 {
-	generic_handle_irq((int)desc->handler_data);
+	generic_handle_irq((int)desc->irq_data.handler_data);
 }
 
 /* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
 
-static void s3c_irq_timer_mask(unsigned int irq)
+static void s3c_irq_timer_mask(struct irq_data *data)
 {
 	u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
+	u32 mask = (u32)data->chip_data;
 
 	reg &= 0x1f;  /* mask out pending interrupts */
-	reg &= ~(1 << (irq - IRQ_TIMER0));
+	reg &= ~mask;
 	__raw_writel(reg, S3C64XX_TINT_CSTAT);
 }
 
-static void s3c_irq_timer_unmask(unsigned int irq)
+static void s3c_irq_timer_unmask(struct irq_data *data)
 {
 	u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
+	u32 mask = (u32)data->chip_data;
 
 	reg &= 0x1f;  /* mask out pending interrupts */
-	reg |= 1 << (irq - IRQ_TIMER0);
+	reg |= mask;
 	__raw_writel(reg, S3C64XX_TINT_CSTAT);
 }
 
-static void s3c_irq_timer_ack(unsigned int irq)
+static void s3c_irq_timer_ack(struct irq_data *data)
 {
 	u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
+	u32 mask = (u32)data->chip_data;
 
 	reg &= 0x1f;
-	reg |= (1 << 5) << (irq - IRQ_TIMER0);
+	reg |= mask << 5;
 	__raw_writel(reg, S3C64XX_TINT_CSTAT);
 }
 
 static struct irq_chip s3c_irq_timer = {
 	.name		= "s3c-timer",
-	.mask		= s3c_irq_timer_mask,
-	.unmask		= s3c_irq_timer_unmask,
-	.ack		= s3c_irq_timer_ack,
+	.irq_mask	= s3c_irq_timer_mask,
+	.irq_unmask	= s3c_irq_timer_unmask,
+	.irq_ack	= s3c_irq_timer_ack,
 };
 
 /**
@@ -79,8 +82,9 @@
 	set_irq_chained_handler(parent_irq, s3c_irq_demux_vic_timer);
 
 	set_irq_chip(timer_irq, &s3c_irq_timer);
+	set_irq_chip_data(timer_irq, (void *)(1 << (timer_irq - IRQ_TIMER0)));
 	set_irq_handler(timer_irq, handle_level_irq);
 	set_irq_flags(timer_irq, IRQF_VALID);
 
-	desc->handler_data = (void *)timer_irq;
+	desc->irq_data.handler_data = (void *)timer_irq;
 }
diff --git a/arch/arm/plat-samsung/pd.c b/arch/arm/plat-samsung/pd.c
new file mode 100644
index 0000000..efe1d56
--- /dev/null
+++ b/arch/arm/plat-samsung/pd.c
@@ -0,0 +1,95 @@
+/* linux/arch/arm/plat-samsung/pd.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Samsung Power domain support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+
+#include <plat/pd.h>
+
+static int samsung_pd_probe(struct platform_device *pdev)
+{
+	struct samsung_pd_info *pdata = pdev->dev.platform_data;
+	struct device *dev = &pdev->dev;
+
+	if (!pdata) {
+		dev_err(dev, "no device data specified\n");
+		return -ENOENT;
+	}
+
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	dev_info(dev, "power domain registered\n");
+	return 0;
+}
+
+static int __devexit samsung_pd_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	pm_runtime_disable(dev);
+	return 0;
+}
+
+static int samsung_pd_runtime_suspend(struct device *dev)
+{
+	struct samsung_pd_info *pdata = dev->platform_data;
+	int ret = 0;
+
+	if (pdata->disable)
+		ret = pdata->disable(dev);
+
+	dev_dbg(dev, "suspended\n");
+	return ret;
+}
+
+static int samsung_pd_runtime_resume(struct device *dev)
+{
+	struct samsung_pd_info *pdata = dev->platform_data;
+	int ret = 0;
+
+	if (pdata->enable)
+		ret = pdata->enable(dev);
+
+	dev_dbg(dev, "resumed\n");
+	return ret;
+}
+
+static const struct dev_pm_ops samsung_pd_pm_ops = {
+	.runtime_suspend	= samsung_pd_runtime_suspend,
+	.runtime_resume		= samsung_pd_runtime_resume,
+};
+
+static struct platform_driver samsung_pd_driver = {
+	.driver		= {
+		.name		= "samsung-pd",
+		.owner		= THIS_MODULE,
+		.pm		= &samsung_pd_pm_ops,
+	},
+	.probe		= samsung_pd_probe,
+	.remove		= __devexit_p(samsung_pd_remove),
+};
+
+static int __init samsung_pd_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&samsung_pd_driver);
+	if (ret)
+		printk(KERN_ERR "%s: failed to add PD driver\n", __func__);
+
+	return ret;
+}
+arch_initcall(samsung_pd_init);
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 27cfca5..02d531f 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -136,15 +136,15 @@
 unsigned long s3c_irqwake_intmask	= 0xffffffffL;
 unsigned long s3c_irqwake_eintmask	= 0xffffffffL;
 
-int s3c_irqext_wake(unsigned int irqno, unsigned int state)
+int s3c_irqext_wake(struct irq_data *data, unsigned int state)
 {
-	unsigned long bit = 1L << IRQ_EINT_BIT(irqno);
+	unsigned long bit = 1L << IRQ_EINT_BIT(data->irq);
 
 	if (!(s3c_irqwake_eintallow & bit))
 		return -ENOENT;
 
 	printk(KERN_INFO "wake %s for irq %d\n",
-	       state ? "enabled" : "disabled", irqno);
+	       state ? "enabled" : "disabled", data->irq);
 
 	if (!state)
 		s3c_irqwake_eintmask |= bit;
@@ -355,7 +355,7 @@
 	s3c_pm_check_cleanup();
 }
 
-static struct platform_suspend_ops s3c_pm_ops = {
+static const struct platform_suspend_ops s3c_pm_ops = {
 	.enter		= s3c_pm_enter,
 	.prepare	= s3c_pm_prepare,
 	.finish		= s3c_pm_finish,
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h
index 99ba678..6dd455b 100644
--- a/arch/arm/plat-spear/include/plat/uncompress.h
+++ b/arch/arm/plat-spear/include/plat/uncompress.h
@@ -24,10 +24,10 @@
 {
 	void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE;
 
-	while (readl(base + UART01x_FR) & UART01x_FR_TXFF)
+	while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF)
 		barrier();
 
-	writel(c, base + UART01x_DR);
+	writel_relaxed(c, base + UART01x_DR);
 }
 
 static inline void flush(void)
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
index 09e9372..8c8b24d 100644
--- a/arch/arm/plat-spear/include/plat/vmalloc.h
+++ b/arch/arm/plat-spear/include/plat/vmalloc.h
@@ -14,6 +14,6 @@
 #ifndef __PLAT_VMALLOC_H
 #define __PLAT_VMALLOC_H
 
-#define VMALLOC_END		0xF0000000
+#define VMALLOC_END		0xF0000000UL
 
 #endif /* __PLAT_VMALLOC_H */
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c
index 2172d69..7818903 100644
--- a/arch/arm/plat-spear/shirq.c
+++ b/arch/arm/plat-spear/shirq.c
@@ -20,10 +20,10 @@
 struct spear_shirq *shirq;
 static DEFINE_SPINLOCK(lock);
 
-static void shirq_irq_mask(unsigned irq)
+static void shirq_irq_mask(struct irq_data *d)
 {
-	struct spear_shirq *shirq = get_irq_chip_data(irq);
-	u32 val, id = irq - shirq->dev_config[0].virq;
+	struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+	u32 val, id = d->irq - shirq->dev_config[0].virq;
 	unsigned long flags;
 
 	if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
@@ -39,10 +39,10 @@
 	spin_unlock_irqrestore(&lock, flags);
 }
 
-static void shirq_irq_unmask(unsigned irq)
+static void shirq_irq_unmask(struct irq_data *d)
 {
-	struct spear_shirq *shirq = get_irq_chip_data(irq);
-	u32 val, id = irq - shirq->dev_config[0].virq;
+	struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+	u32 val, id = d->irq - shirq->dev_config[0].virq;
 	unsigned long flags;
 
 	if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
@@ -60,9 +60,9 @@
 
 static struct irq_chip shirq_chip = {
 	.name		= "spear_shirq",
-	.ack		= shirq_irq_mask,
-	.mask		= shirq_irq_mask,
-	.unmask		= shirq_irq_unmask,
+	.irq_ack	= shirq_irq_mask,
+	.irq_mask	= shirq_irq_mask,
+	.irq_unmask	= shirq_irq_unmask,
 };
 
 static void shirq_handler(unsigned irq, struct irq_desc *desc)
@@ -70,7 +70,7 @@
 	u32 i, val, mask;
 	struct spear_shirq *shirq = get_irq_data(irq);
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	while ((val = readl(shirq->regs.base + shirq->regs.status_reg) &
 				shirq->regs.status_reg_mask)) {
 		for (i = 0; (i < shirq->dev_count) && val; i++) {
@@ -92,7 +92,7 @@
 			writel(mask, shirq->regs.base + shirq->regs.clear_reg);
 		}
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 int spear_shirq_register(struct spear_shirq *shirq)
diff --git a/arch/arm/plat-stmp3xxx/irq.c b/arch/arm/plat-stmp3xxx/irq.c
index 20de4e0..aaa1686 100644
--- a/arch/arm/plat-stmp3xxx/irq.c
+++ b/arch/arm/plat-stmp3xxx/irq.c
@@ -34,7 +34,7 @@
 
 	/* Disable all interrupts initially */
 	for (i = 0; i < NR_REAL_IRQS; i++) {
-		chip->mask(i);
+		chip->irq_mask(irq_get_irq_data(i));
 		set_irq_chip(i, chip);
 		set_irq_handler(i, handle_level_irq);
 		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
diff --git a/arch/arm/plat-stmp3xxx/pinmux.c b/arch/arm/plat-stmp3xxx/pinmux.c
index 6d6b1a4..66d5bac 100644
--- a/arch/arm/plat-stmp3xxx/pinmux.c
+++ b/arch/arm/plat-stmp3xxx/pinmux.c
@@ -351,27 +351,27 @@
 }
 EXPORT_SYMBOL(stmp3xxx_release_pin_group);
 
-static int stmp3xxx_irq_to_gpio(int irq,
+static int stmp3xxx_irq_data_to_gpio(struct irq_data *d,
 	struct stmp3xxx_pinmux_bank **bank, unsigned *gpio)
 {
 	struct stmp3xxx_pinmux_bank *pm;
 
 	for (pm = pinmux_banks; pm < pinmux_banks + NR_BANKS; pm++)
-		if (pm->virq <= irq && irq < pm->virq + 32) {
+		if (pm->virq <= d->irq && d->irq < pm->virq + 32) {
 			*bank = pm;
-			*gpio = irq - pm->virq;
+			*gpio = d->irq - pm->virq;
 			return 0;
 		}
 	return -ENOENT;
 }
 
-static int stmp3xxx_set_irqtype(unsigned irq, unsigned type)
+static int stmp3xxx_set_irqtype(struct irq_data *d, unsigned type)
 {
 	struct stmp3xxx_pinmux_bank *pm;
 	unsigned gpio;
 	int l, p;
 
-	stmp3xxx_irq_to_gpio(irq, &pm, &gpio);
+	stmp3xxx_irq_data_to_gpio(d, &pm, &gpio);
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
 		l = 0; p = 1; break;
@@ -398,33 +398,33 @@
 	return 0;
 }
 
-static void stmp3xxx_pin_ack_irq(unsigned irq)
+static void stmp3xxx_pin_ack_irq(struct irq_data *d)
 {
 	u32 stat;
 	struct stmp3xxx_pinmux_bank *pm;
 	unsigned gpio;
 
-	stmp3xxx_irq_to_gpio(irq, &pm, &gpio);
+	stmp3xxx_irq_data_to_gpio(d, &pm, &gpio);
 	stat = __raw_readl(pm->irqstat) & (1 << gpio);
 	stmp3xxx_clearl(stat, pm->irqstat);
 }
 
-static void stmp3xxx_pin_mask_irq(unsigned irq)
+static void stmp3xxx_pin_mask_irq(struct irq_data *d)
 {
 	struct stmp3xxx_pinmux_bank *pm;
 	unsigned gpio;
 
-	stmp3xxx_irq_to_gpio(irq, &pm, &gpio);
+	stmp3xxx_irq_data_to_gpio(d, &pm, &gpio);
 	stmp3xxx_clearl(1 << gpio, pm->irqen);
 	stmp3xxx_clearl(1 << gpio, pm->pin2irq);
 }
 
-static void stmp3xxx_pin_unmask_irq(unsigned irq)
+static void stmp3xxx_pin_unmask_irq(struct irq_data *d)
 {
 	struct stmp3xxx_pinmux_bank *pm;
 	unsigned gpio;
 
-	stmp3xxx_irq_to_gpio(irq, &pm, &gpio);
+	stmp3xxx_irq_data_to_gpio(d, &pm, &gpio);
 	stmp3xxx_setl(1 << gpio, pm->irqen);
 	stmp3xxx_setl(1 << gpio, pm->pin2irq);
 }
@@ -503,10 +503,10 @@
 }
 
 static struct irq_chip gpio_irq_chip = {
-	.ack	= stmp3xxx_pin_ack_irq,
-	.mask	= stmp3xxx_pin_mask_irq,
-	.unmask	= stmp3xxx_pin_unmask_irq,
-	.set_type = stmp3xxx_set_irqtype,
+	.irq_ack	= stmp3xxx_pin_ack_irq,
+	.irq_mask	= stmp3xxx_pin_mask_irq,
+	.irq_unmask	= stmp3xxx_pin_unmask_irq,
+	.irq_set_type	= stmp3xxx_set_irqtype,
 };
 
 int __init stmp3xxx_pinmux_init(int virtual_irq_start)
@@ -533,7 +533,7 @@
 		pm->virq = virtual_irq_start + b * 32;
 
 		for (virq = pm->virq; virq < pm->virq; virq++) {
-			gpio_irq_chip.mask(virq);
+			gpio_irq_chip.irq_mask(irq_get_irq_data(virq));
 			set_irq_chip(virq, &gpio_irq_chip);
 			set_irq_handler(virq, handle_level_irq);
 			set_irq_flags(virq, IRQF_VALID);
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 2fea897..9d6feaa 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Sun Dec 12 23:24:27 2010
+# Last update: Mon Feb 7 08:59:27 2011
 #
 # machine_is_xxx	CONFIG_xxxx		MACH_TYPE_xxx		number
 #
@@ -2240,7 +2240,7 @@
 vs_v210			MACH_VS_V210		VS_V210			2252
 vs_v212			MACH_VS_V212		VS_V212			2253
 hmt			MACH_HMT		HMT			2254
-suen3			MACH_SUEN3		SUEN3			2255
+km_kirkwood		MACH_KM_KIRKWOOD	KM_KIRKWOOD		2255
 vesper			MACH_VESPER		VESPER			2256
 str9			MACH_STR9		STR9			2257
 omap3_wl_ff		MACH_OMAP3_WL_FF	OMAP3_WL_FF		2258
@@ -2987,7 +2987,7 @@
 ea20			MACH_EA20		EA20			3002
 awm2			MACH_AWM2		AWM2			3003
 ti8148evm		MACH_TI8148EVM		TI8148EVM		3004
-tegra_seaboard		MACH_TEGRA_SEABOARD	TEGRA_SEABOARD		3005
+seaboard		MACH_SEABOARD		SEABOARD		3005
 linkstation_chlv2	MACH_LINKSTATION_CHLV2	LINKSTATION_CHLV2	3006
 tera_pro2_rack		MACH_TERA_PRO2_RACK	TERA_PRO2_RACK		3007
 rubys			MACH_RUBYS		RUBYS			3008
@@ -3190,7 +3190,7 @@
 ics_if_voip		MACH_ICS_IF_VOIP	ICS_IF_VOIP		3206
 wlf_cragg_6410		MACH_WLF_CRAGG_6410	WLF_CRAGG_6410		3207
 punica			MACH_PUNICA		PUNICA			3208
-sbc_nt250		MACH_SBC_NT250		SBC_NT250		3209
+trimslice		MACH_TRIMSLICE		TRIMSLICE		3209
 mx27_wmultra		MACH_MX27_WMULTRA	MX27_WMULTRA		3210
 mackerel		MACH_MACKEREL		MACKEREL		3211
 fa9x27			MACH_FA9X27		FA9X27			3213
@@ -3219,3 +3219,100 @@
 pcm048			MACH_PCM048		PCM048			3236
 dds			MACH_DDS		DDS			3237
 chalten_xa1		MACH_CHALTEN_XA1	CHALTEN_XA1		3238
+ts48xx			MACH_TS48XX		TS48XX			3239
+tonga2_tfttimer		MACH_TONGA2_TFTTIMER	TONGA2_TFTTIMER		3240
+whistler		MACH_WHISTLER		WHISTLER		3241
+asl_phoenix		MACH_ASL_PHOENIX	ASL_PHOENIX		3242
+at91sam9263otlite	MACH_AT91SAM9263OTLITE	AT91SAM9263OTLITE	3243
+ddplug			MACH_DDPLUG		DDPLUG			3244
+d2plug			MACH_D2PLUG		D2PLUG			3245
+kzm9d			MACH_KZM9D		KZM9D			3246
+verdi_lte		MACH_VERDI_LTE		VERDI_LTE		3247
+nanozoom		MACH_NANOZOOM		NANOZOOM		3248
+dm3730_som_lv		MACH_DM3730_SOM_LV	DM3730_SOM_LV		3249
+dm3730_torpedo		MACH_DM3730_TORPEDO	DM3730_TORPEDO		3250
+anchovy			MACH_ANCHOVY		ANCHOVY			3251
+re2rev20		MACH_RE2REV20		RE2REV20		3253
+re2rev21		MACH_RE2REV21		RE2REV21		3254
+cns21xx			MACH_CNS21XX		CNS21XX			3255
+rider			MACH_RIDER		RIDER			3257
+nsk330			MACH_NSK330		NSK330			3258
+cns2133evb		MACH_CNS2133EVB		CNS2133EVB		3259
+z3_816x_mod		MACH_Z3_816X_MOD	Z3_816X_MOD		3260
+z3_814x_mod		MACH_Z3_814X_MOD	Z3_814X_MOD		3261
+beect			MACH_BEECT		BEECT			3262
+dma_thunderbug		MACH_DMA_THUNDERBUG	DMA_THUNDERBUG		3263
+omn_at91sam9g20		MACH_OMN_AT91SAM9G20	OMN_AT91SAM9G20		3264
+mx25_e2s_uc		MACH_MX25_E2S_UC	MX25_E2S_UC		3265
+mione			MACH_MIONE		MIONE			3266
+top9000_tcu		MACH_TOP9000_TCU	TOP9000_TCU		3267
+top9000_bsl		MACH_TOP9000_BSL	TOP9000_BSL		3268
+kingdom			MACH_KINGDOM		KINGDOM			3269
+armadillo460		MACH_ARMADILLO460	ARMADILLO460		3270
+lq2			MACH_LQ2		LQ2			3271
+sweda_tms2		MACH_SWEDA_TMS2		SWEDA_TMS2		3272
+mx53_loco		MACH_MX53_LOCO		MX53_LOCO		3273
+acer_a8			MACH_ACER_A8		ACER_A8			3275
+acer_gauguin		MACH_ACER_GAUGUIN	ACER_GAUGUIN		3276
+guppy			MACH_GUPPY		GUPPY			3277
+mx61_ard		MACH_MX61_ARD		MX61_ARD		3278
+tx53			MACH_TX53		TX53			3279
+omapl138_case_a3	MACH_OMAPL138_CASE_A3	OMAPL138_CASE_A3	3280
+uemd			MACH_UEMD		UEMD			3281
+ccwmx51mut		MACH_CCWMX51MUT		CCWMX51MUT		3282
+rockhopper		MACH_ROCKHOPPER		ROCKHOPPER		3283
+nookcolor		MACH_NOOKCOLOR		NOOKCOLOR		3284
+hkdkc100		MACH_HKDKC100		HKDKC100		3285
+ts42xx			MACH_TS42XX		TS42XX			3286
+aebl			MACH_AEBL		AEBL			3287
+wario			MACH_WARIO		WARIO			3288
+gfs_spm			MACH_GFS_SPM		GFS_SPM			3289
+cm_t3730		MACH_CM_T3730		CM_T3730		3290
+isc3			MACH_ISC3		ISC3			3291
+rascal			MACH_RASCAL		RASCAL			3292
+hrefv60			MACH_HREFV60		HREFV60			3293
+tpt_2_0			MACH_TPT_2_0		TPT_2_0			3294
+pyramid_td		MACH_PYRAMID_TD		PYRAMID_TD		3295
+splendor		MACH_SPLENDOR		SPLENDOR		3296
+guf_planet		MACH_GUF_PLANET		GUF_PLANET		3297
+msm8x60_qt		MACH_MSM8X60_QT		MSM8X60_QT		3298
+htc_hd_mini		MACH_HTC_HD_MINI	HTC_HD_MINI		3299
+athene			MACH_ATHENE		ATHENE			3300
+deep_r_ek_1		MACH_DEEP_R_EK_1	DEEP_R_EK_1		3301
+vivow_ct		MACH_VIVOW_CT		VIVOW_CT		3302
+nery_1000		MACH_NERY_1000		NERY_1000		3303
+rfl109145_ssrv		MACH_RFL109145_SSRV	RFL109145_SSRV		3304
+nmh			MACH_NMH		NMH			3305
+wn802t			MACH_WN802T		WN802T			3306
+dragonet		MACH_DRAGONET		DRAGONET		3307
+geneva_b		MACH_GENEVA_B		GENEVA_B		3308
+at91sam9263desk16l	MACH_AT91SAM9263DESK16L	AT91SAM9263DESK16L	3309
+bcmhana_sv		MACH_BCMHANA_SV		BCMHANA_SV		3310
+bcmhana_tablet		MACH_BCMHANA_TABLET	BCMHANA_TABLET		3311
+koi			MACH_KOI		KOI			3312
+ts4800			MACH_TS4800		TS4800			3313
+tqma9263		MACH_TQMA9263		TQMA9263		3314
+holiday			MACH_HOLIDAY		HOLIDAY			3315
+dma_6410		MACH_DMA6410		DMA6410			3316
+pcats_overlay		MACH_PCATS_OVERLAY	PCATS_OVERLAY		3317
+hwgw6410		MACH_HWGW6410		HWGW6410		3318
+shenzhou		MACH_SHENZHOU		SHENZHOU		3319
+cwme9210		MACH_CWME9210		CWME9210		3320
+cwme9210js		MACH_CWME9210JS		CWME9210JS		3321
+pgs_v1			MACH_PGS_SITARA		PGS_SITARA		3322
+colibri_tegra2		MACH_COLIBRI_TEGRA2	COLIBRI_TEGRA2		3323
+w21			MACH_W21		W21			3324
+polysat1		MACH_POLYSAT1		POLYSAT1		3325
+dataway			MACH_DATAWAY		DATAWAY			3326
+cobral138		MACH_COBRAL138		COBRAL138		3327
+roverpcs8		MACH_ROVERPCS8		ROVERPCS8		3328
+marvelc			MACH_MARVELC		MARVELC			3329
+navefihid		MACH_NAVEFIHID		NAVEFIHID		3330
+dm365_cv100		MACH_DM365_CV100	DM365_CV100		3331
+able			MACH_ABLE		ABLE			3332
+legacy			MACH_LEGACY		LEGACY			3333
+icong			MACH_ICONG		ICONG			3334
+rover_g8		MACH_ROVER_G8		ROVER_G8		3335
+t5388p			MACH_T5388P		T5388P			3336
+dingo			MACH_DINGO		DINGO			3337
+goflexhome		MACH_GOFLEXHOME		GOFLEXHOME		3338
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 313b130..cd2062f 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -1,8 +1,8 @@
 config AVR32
 	def_bool y
-	# With EMBEDDED=n, we get lots of stuff automatically selected
+	# With EXPERT=n, we get lots of stuff automatically selected
 	# that we usually don't need on AVR32.
-	select EMBEDDED
+	select EXPERT
 	select HAVE_CLK
 	select HAVE_OPROFILE
 	select HAVE_KPROBES
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index 8c6a244..659d119 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -188,7 +188,7 @@
 	 */
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 2adc261..6ce30fb 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -203,7 +203,7 @@
 	 */
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 75f19f4..86fab77 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -206,7 +206,7 @@
 	 */
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/hammerhead/setup.c b/arch/avr32/boards/hammerhead/setup.c
index dd009875..da14fbd 100644
--- a/arch/avr32/boards/hammerhead/setup.c
+++ b/arch/avr32/boards/hammerhead/setup.c
@@ -150,7 +150,7 @@
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
 
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/merisc/setup.c b/arch/avr32/boards/merisc/setup.c
index 623b077..e61bc94 100644
--- a/arch/avr32/boards/merisc/setup.c
+++ b/arch/avr32/boards/merisc/setup.c
@@ -134,7 +134,7 @@
 
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c
index 523d8e1..c4da5cb 100644
--- a/arch/avr32/boards/mimc200/setup.c
+++ b/arch/avr32/boards/mimc200/setup.c
@@ -162,7 +162,7 @@
 	 */
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 9854013..6f9ca56 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -29,6 +26,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -72,8 +70,8 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
-CONFIG_EEPROM_AT24=m
 CONFIG_NETDEVICES=y
 CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
@@ -106,6 +104,7 @@
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
 CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=350
 CONFIG_USB_ZERO=m
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
@@ -115,14 +114,12 @@
 CONFIG_MMC=y
 CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
@@ -130,21 +127,23 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=m
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
-CONFIG_UFS_FS=y
+CONFIG_UBIFS_FS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -155,5 +154,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_PCBC=m
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 7ceda35..7eece0a 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -31,6 +28,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -74,8 +72,10 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
 # CONFIG_NETDEV_1000 is not set
@@ -104,6 +104,7 @@
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
@@ -127,6 +128,7 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -141,11 +143,14 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
@@ -155,7 +160,6 @@
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -166,4 +170,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 7bc5b2c..387eb9d 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -30,6 +27,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -73,8 +71,10 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
 # CONFIG_NETDEV_1000 is not set
@@ -103,6 +103,7 @@
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
@@ -126,6 +127,7 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -140,11 +142,14 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
@@ -154,7 +159,6 @@
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -165,4 +169,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index 4bd3682..f0fe237 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -29,6 +26,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -74,6 +72,7 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=m
@@ -107,6 +106,7 @@
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
 CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=350
 CONFIG_USB_ZERO=m
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
@@ -116,14 +116,12 @@
 CONFIG_MMC=y
 CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
@@ -131,21 +129,23 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=m
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
-CONFIG_UFS_FS=y
+CONFIG_UBIFS_FS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -156,5 +156,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_PCBC=m
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index f8437ef..e4a7c1d 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -32,6 +29,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -77,8 +75,10 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
 # CONFIG_NETDEV_1000 is not set
@@ -107,6 +107,7 @@
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
@@ -130,6 +131,7 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -144,11 +146,14 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
@@ -158,7 +163,6 @@
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -169,4 +173,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index 7f58f99..6f37f70 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -31,6 +28,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -76,8 +74,10 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
 # CONFIG_NETDEV_1000 is not set
@@ -106,6 +106,7 @@
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
@@ -129,6 +130,7 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -143,11 +145,14 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
@@ -157,7 +162,6 @@
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -168,4 +172,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index aec4c43..4fb01f5 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -3,7 +3,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
@@ -11,7 +10,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -26,6 +25,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -35,6 +35,7 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
@@ -58,16 +59,14 @@
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_M25P80=m
 CONFIG_MTD_UBI=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_PWM=m
 CONFIG_ATMEL_TCLIB=y
 CONFIG_ATMEL_SSC=m
-CONFIG_EEPROM_AT24=m
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=m
 CONFIG_BLK_DEV_SR=m
@@ -120,7 +119,6 @@
 CONFIG_SND_PCM_OSS=m
 # CONFIG_SND_SUPPORT_OLD_API is not set
 # CONFIG_SND_VERBOSE_PROCFS is not set
-# CONFIG_SND_DRIVERS is not set
 CONFIG_SND_AT73C213=m
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
@@ -131,16 +129,15 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_ATMEL_PWM=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
@@ -149,20 +146,23 @@
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
 CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
-# CONFIG_JFFS2_FS_WRITEBUFFER is not set
 CONFIG_UBIFS_FS=y
-CONFIG_MINIX_FS=m
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
 CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
@@ -170,6 +170,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=m
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index 50ba3db..9faaf9b 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -2,22 +2,15 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_AUDIT=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
-# CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -33,6 +26,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -54,18 +48,18 @@
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_M25P80=m
+CONFIG_MTD_UBI=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_PWM=m
 CONFIG_ATMEL_TCLIB=y
 CONFIG_ATMEL_SSC=m
-CONFIG_EEPROM_AT24=m
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=m
 CONFIG_BLK_DEV_SR=m
+# CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_ATA=m
 # CONFIG_SATA_PMP is not set
 CONFIG_PATA_AT32=m
@@ -77,6 +71,7 @@
 CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_INPUT=m
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_GPIO=m
 # CONFIG_MOUSE_PS2 is not set
@@ -106,7 +101,6 @@
 CONFIG_SND_AT73C213=m
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_DEBUG_FS=y
 CONFIG_USB_ZERO=m
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
@@ -116,36 +110,39 @@
 CONFIG_MMC=y
 CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_ATMEL_PWM=m
-CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
-CONFIG_DW_DMAC=y
-CONFIG_EXT2_FS=m
-CONFIG_EXT3_FS=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=m
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
 CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-CONFIG_CRC_T10DIF=m
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 329e10b..3d2a5d8 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -1,19 +1,32 @@
 CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
-# CONFIG_FUTEX is not set
-# CONFIG_EPOLL is not set
-# CONFIG_SIGNALFD is not set
-# CONFIG_TIMERFD is not set
-# CONFIG_EVENTFD is not set
 # CONFIG_COMPAT_BRK is not set
-CONFIG_SLOB=y
-# CONFIG_BLOCK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+# CONFIG_KPROBES is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BOARD_ATSTK1004=y
 # CONFIG_OWNERSHIP_TRACE is not set
+CONFIG_NMI_DEBUGGING=y
+CONFIG_PM=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -31,40 +44,104 @@
 CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_INPUT is not set
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
+CONFIG_ATMEL_PWM=m
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_SSC=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+CONFIG_BLK_DEV_SR=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=m
+# CONFIG_SATA_PMP is not set
+CONFIG_PATA_AT32=m
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_INPUT=m
+CONFIG_INPUT_EVDEV=m
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_GPIO=m
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ATMEL=y
 CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_SERIAL_ATMEL_PDC is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_GPIO=m
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
 CONFIG_FB=y
 CONFIG_FB_ATMEL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_LTV350QV=y
 # CONFIG_BACKLIGHT_CLASS_DEVICE is not set
 CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=y
-# CONFIG_USB_ETH_RNDIS is not set
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_CDC_COMPOSITE=m
+CONFIG_MMC=y
+CONFIG_MMC_TEST=m
+CONFIG_MMC_ATMELMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_ATMEL_PWM=m
+CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
 CONFIG_RTC_CLASS=y
-# CONFIG_RTC_INTF_PROC is not set
 CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
 CONFIG_PROC_KCORE=y
-# CONFIG_PROC_PAGE_MONITOR is not set
 CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
-# CONFIG_JFFS2_FS_WRITEBUFFER is not set
+CONFIG_UBIFS_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_FRAME_POINTER=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index dbcc1b5..1ed8f22 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -3,7 +3,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
@@ -11,7 +10,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -37,6 +36,7 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
@@ -60,15 +60,13 @@
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_DATAFLASH_OTP=y
-CONFIG_MTD_M25P80=m
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_ATMEL=y
 CONFIG_MTD_UBI=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_PWM=m
 CONFIG_ATMEL_TCLIB=y
 CONFIG_ATMEL_SSC=m
@@ -132,17 +130,17 @@
 CONFIG_USB_GADGETFS=m
 CONFIG_USB_FILE_STORAGE=m
 CONFIG_USB_G_SERIAL=m
+CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_ATMEL_PWM=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
@@ -156,15 +154,18 @@
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
 CONFIG_UBIFS_FS=y
-CONFIG_MINIX_FS=m
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
 CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
@@ -172,7 +173,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_FIPS=y
-# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=m
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index 0c813b6..aeadc95 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -11,7 +11,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index dcc01f0..1692bee 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -12,7 +12,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
index 92ecd84..bc7e8ae 100644
--- a/arch/avr32/include/asm/pgalloc.h
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -8,6 +8,7 @@
 #ifndef __ASM_AVR32_PGALLOC_H
 #define __ASM_AVR32_PGALLOC_H
 
+#include <linux/mm.h>
 #include <linux/quicklist.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
index ab608b7..244f2ac 100644
--- a/arch/avr32/include/asm/syscalls.h
+++ b/arch/avr32/include/asm/syscalls.h
@@ -15,20 +15,6 @@
 #include <linux/types.h>
 #include <linux/signal.h>
 
-/* kernel/process.c */
-asmlinkage int sys_fork(struct pt_regs *);
-asmlinkage int sys_clone(unsigned long, unsigned long,
-			 unsigned long, unsigned long,
-			 struct pt_regs *);
-asmlinkage int sys_vfork(struct pt_regs *);
-asmlinkage int sys_execve(const char __user *, char __user *__user *,
-			  char __user *__user *, struct pt_regs *);
-
-/* kernel/signal.c */
-asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *,
-			       struct pt_regs *);
-asmlinkage int sys_rt_sigreturn(struct pt_regs *);
-
 /* mm/cache.c */
 asmlinkage int sys_cacheflush(int, void __user *, size_t);
 
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 9c46aaa..ef5a2a0 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -367,14 +367,13 @@
 }
 
 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
-			 unsigned long parent_tidptr,
-			 unsigned long child_tidptr, struct pt_regs *regs)
+		void __user *parent_tidptr, void __user *child_tidptr,
+		struct pt_regs *regs)
 {
 	if (!newsp)
 		newsp = regs->sp;
-	return do_fork(clone_flags, newsp, regs, 0,
-		       (int __user *)parent_tidptr,
-		       (int __user *)child_tidptr);
+	return do_fork(clone_flags, newsp, regs, 0, parent_tidptr,
+			child_tidptr);
 }
 
 asmlinkage int sys_vfork(struct pt_regs *regs)
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 668ed28..05ad291 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -35,7 +35,6 @@
 	.rating		= 50,
 	.read		= read_cycle_count,
 	.mask		= CLOCKSOURCE_MASK(32),
-	.shift		= 16,
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
@@ -123,9 +122,7 @@
 
 	/* figure rate for counter */
 	counter_hz = clk_get_rate(boot_cpu_data.clk);
-	counter.mult = clocksource_hz2mult(counter_hz, counter.shift);
-
-	ret = clocksource_register(&counter);
+	ret = clocksource_register_hz(&counter, counter_hz);
 	if (ret)
 		pr_debug("timer: could not register clocksource: %d\n", ret);
 
diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
index f021edf..32d680e 100644
--- a/arch/avr32/mach-at32ap/pm.c
+++ b/arch/avr32/mach-at32ap/pm.c
@@ -176,7 +176,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops avr32_pm_ops = {
+static const struct platform_suspend_ops avr32_pm_ops = {
 	.valid	= avr32_pm_valid_state,
 	.enter	= avr32_pm_enter,
 };
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 0a221d4..c09577d 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -30,6 +30,9 @@
 	select HAVE_KERNEL_LZO if RAMKERNEL
 	select HAVE_OPROFILE
 	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select IRQ_PER_CPU if SMP
 
 config GENERIC_CSUM
 	def_bool y
@@ -44,15 +47,6 @@
 config GENERIC_FIND_NEXT_BIT
 	def_bool y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config GENERIC_GPIO
 	def_bool y
 
@@ -254,11 +248,6 @@
 	depends on SMP && HOTPLUG
 	default y
 
-config IRQ_PER_CPU
-	bool
-	depends on SMP
-	default y
-
 config HAVE_LEGACY_PER_CPU_AREA
 	def_bool y
 	depends on SMP
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index c0b988e..db8d38a 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index 864af5b..3e50d78 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
index 7b6a337..362f59d 100644
--- a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
+++ b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_ELF_CORE is not set
 # CONFIG_AIO is not set
 CONFIG_SLAB=y
diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
index 4faa6b4..023ff0d 100644
--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 9d893eb..4e5a121 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
index 97a2767..cd0636b 100644
--- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
+++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index f847743..9f8fc84 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index 0e7262c..ccc432b 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 4d14a00..5666954 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index fbee9d7..ac22124 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index 05dd11d..944404b 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
index bcb14d1..b7c8451 100644
--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
index 4cf4510..7e67ba3 100644
--- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 843aaa5..141e593 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index dae7adf..97ebe09 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -6,7 +6,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index f341424..c245754 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index 8c7e08f..baf1c15 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -7,7 +7,7 @@
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index bd3cb76..707cbf8 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 82224f3..4596935 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 433598c..df26758 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index ded7d84..6c7b215 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
index 0ebc7d9..f503136 100644
--- a/arch/blackfin/configs/DNP5370_defconfig
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -5,7 +5,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLOB=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index 700fb70..7450127 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index b40156d..5e797cf 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index be866d9..a566a2f 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index b64bdf7..8538095 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
index 1bccd9a..d496ae9 100644
--- a/arch/blackfin/configs/TCM-BF518_defconfig
+++ b/arch/blackfin/configs/TCM-BF518_defconfig
@@ -7,7 +7,7 @@
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index 00ce899..65f6421 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index 1ff9f14..7dbc664 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -10,6 +10,7 @@
 #define __BFIN_ASM_SERIAL_H__
 
 #include <linux/serial_core.h>
+#include <linux/spinlock.h>
 #include <mach/anomaly.h>
 #include <mach/bfin_serial.h>
 
@@ -41,6 +42,7 @@
 	struct circ_buf rx_dma_buf;
 	struct timer_list rx_dma_timer;
 	int rx_dma_nrows;
+	spinlock_t rx_lock;
 	unsigned int tx_dma_channel;
 	unsigned int rx_dma_channel;
 	struct work_struct tx_dma_workqueue;
diff --git a/arch/blackfin/lib/outs.S b/arch/blackfin/lib/outs.S
index 250f4d4..06a5e67 100644
--- a/arch/blackfin/lib/outs.S
+++ b/arch/blackfin/lib/outs.S
@@ -13,6 +13,8 @@
 .align 2
 
 ENTRY(_outsl)
+	CC = R2 == 0;
+	IF CC JUMP 1f;
 	P0 = R0;	/* P0 = port */
 	P1 = R1;	/* P1 = address */
 	P2 = R2;	/* P2 = count */
@@ -20,10 +22,12 @@
 	LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
 .Llong_loop_s: R0 = [P1++];
 .Llong_loop_e: [P0] = R0;
-	RTS;
+1:	RTS;
 ENDPROC(_outsl)
 
 ENTRY(_outsw)
+	CC = R2 == 0;
+	IF CC JUMP 1f;
 	P0 = R0;	/* P0 = port */
 	P1 = R1;	/* P1 = address */
 	P2 = R2;	/* P2 = count */
@@ -31,10 +35,12 @@
 	LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
 .Lword_loop_s: R0 = W[P1++];
 .Lword_loop_e: W[P0] = R0;
-	RTS;
+1:	RTS;
 ENDPROC(_outsw)
 
 ENTRY(_outsb)
+	CC = R2 == 0;
+	IF CC JUMP 1f;
 	P0 = R0;	/* P0 = port */
 	P1 = R1;	/* P1 = address */
 	P2 = R2;	/* P2 = count */
@@ -42,10 +48,12 @@
 	LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
 .Lbyte_loop_s: R0 = B[P1++];
 .Lbyte_loop_e: B[P0] = R0;
-	RTS;
+1:	RTS;
 ENDPROC(_outsb)
 
 ENTRY(_outsw_8)
+	CC = R2 == 0;
+	IF CC JUMP 1f;
 	P0 = R0;	/* P0 = port */
 	P1 = R1;	/* P1 = address */
 	P2 = R2;	/* P2 = count */
@@ -56,5 +64,5 @@
 		R0 = R0 << 8;
 		R0 = R0 + R1;
 .Lword8_loop_e: W[P0] = R0;
-	RTS;
+1:	RTS;
 ENDPROC(_outsw_8)
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index 32529a0..725bb35 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1418,7 +1418,7 @@
 #define	SADD_LEN	0x0002	/* Slave Address Length                                                 */
 #define	STDVAL		0x0004	/* Slave Transmit Data Valid                                    */
 #define	NAK			0x0008	/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010	/* General Call Adrress Matching Enabled                */
+#define	GEN			0x0010	/* General Call Address Matching Enabled                */
 
 /* TWI_SLAVE_STAT Masks															*/
 #define	SDIR		0x0001	/* Slave Transfer Direction (Transmit/Receive*) */
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index 790c767..ab4a925 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -58,6 +58,8 @@
 1:
 .ifeqs "\flushins", BROK_FLUSH_INST
 	\flushins [P0++];
+	nop;
+	nop;
 2:	nop;
 .else
 2:	\flushins [P0++];
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 42fa87e..3c648a0 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -223,7 +223,7 @@
 	return 0;
 }
 
-struct platform_suspend_ops bfin_pm_ops = {
+static const struct platform_suspend_ops bfin_pm_ops = {
 	.enter = bfin_pm_enter,
 	.valid	= bfin_pm_valid,
 };
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 613e628..0a7a4c1 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -54,6 +54,8 @@
 	bool
 	default y
 	select HAVE_IDE
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_HARDIRQS_NO_DEPRECATED
 
 config HZ
 	int
@@ -67,10 +69,6 @@
 
 source "fs/Kconfig.binfmt"
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
 config ETRAX_CMDLINE
 	string "Kernel command line"
 	default "root=/dev/mtdblock3"
diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c
index a0c0df8..7328a7c 100644
--- a/arch/cris/arch-v10/kernel/irq.c
+++ b/arch/cris/arch-v10/kernel/irq.c
@@ -104,43 +104,21 @@
 	IRQ31_interrupt
 };
 
-static void enable_crisv10_irq(unsigned int irq);
-
-static unsigned int startup_crisv10_irq(unsigned int irq)
+static void enable_crisv10_irq(struct irq_data *data)
 {
-	enable_crisv10_irq(irq);
-	return 0;
+	crisv10_unmask_irq(data->irq);
 }
 
-#define shutdown_crisv10_irq	disable_crisv10_irq
-
-static void enable_crisv10_irq(unsigned int irq)
+static void disable_crisv10_irq(struct irq_data *data)
 {
-	crisv10_unmask_irq(irq);
-}
-
-static void disable_crisv10_irq(unsigned int irq)
-{
-	crisv10_mask_irq(irq);
-}
-
-static void ack_crisv10_irq(unsigned int irq)
-{
-}
-
-static void end_crisv10_irq(unsigned int irq)
-{
+	crisv10_mask_irq(data->irq);
 }
 
 static struct irq_chip crisv10_irq_type = {
-	.name =        "CRISv10",
-	.startup =     startup_crisv10_irq,
-	.shutdown =    shutdown_crisv10_irq,
-	.enable =      enable_crisv10_irq,
-	.disable =     disable_crisv10_irq,
-	.ack =         ack_crisv10_irq,
-	.end =         end_crisv10_irq,
-	.set_affinity = NULL
+	.name		= "CRISv10",
+	.irq_shutdown	= disable_crisv10_irq,
+	.irq_enable	= enable_crisv10_irq,
+	.irq_disable	= disable_crisv10_irq,
 };
 
 void weird_irq(void);
@@ -221,7 +199,8 @@
 
 	/* Initialize IRQ handler descriptors. */
 	for(i = 2; i < NR_IRQS; i++) {
-		irq_desc[i].chip = &crisv10_irq_type;
+		set_irq_desc_and_handler(i, &crisv10_irq_type,
+					 handle_simple_irq);
 		set_int_vector(i, interrupt[i]);
 	}
 
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 2ed48ae3d..0ad9db5 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -291,54 +291,33 @@
 }
 
 
-static unsigned int startup_crisv32_irq(unsigned int irq)
+static void enable_crisv32_irq(struct irq_data *data)
 {
-	crisv32_unmask_irq(irq);
-	return 0;
+	crisv32_unmask_irq(data->irq);
 }
 
-static void shutdown_crisv32_irq(unsigned int irq)
+static void disable_crisv32_irq(struct irq_data *data)
 {
-	crisv32_mask_irq(irq);
+	crisv32_mask_irq(data->irq);
 }
 
-static void enable_crisv32_irq(unsigned int irq)
-{
-	crisv32_unmask_irq(irq);
-}
-
-static void disable_crisv32_irq(unsigned int irq)
-{
-	crisv32_mask_irq(irq);
-}
-
-static void ack_crisv32_irq(unsigned int irq)
-{
-}
-
-static void end_crisv32_irq(unsigned int irq)
-{
-}
-
-int set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
+static int set_affinity_crisv32_irq(struct irq_data *data,
+				    const struct cpumask *dest, bool force)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&irq_lock, flags);
-	irq_allocations[irq - FIRST_IRQ].mask = *dest;
-	spin_unlock_irqrestore(&irq_lock, flags);
 
+	spin_lock_irqsave(&irq_lock, flags);
+	irq_allocations[data->irq - FIRST_IRQ].mask = *dest;
+	spin_unlock_irqrestore(&irq_lock, flags);
 	return 0;
 }
 
 static struct irq_chip crisv32_irq_type = {
-	.name =        "CRISv32",
-	.startup =     startup_crisv32_irq,
-	.shutdown =    shutdown_crisv32_irq,
-	.enable =      enable_crisv32_irq,
-	.disable =     disable_crisv32_irq,
-	.ack =         ack_crisv32_irq,
-	.end =         end_crisv32_irq,
-	.set_affinity = set_affinity_crisv32_irq
+	.name			= "CRISv32",
+	.irq_shutdown		= disable_crisv32_irq,
+	.irq_enable		= enable_crisv32_irq,
+	.irq_disable		= disable_crisv32_irq,
+	.irq_set_affinity	= set_affinity_crisv32_irq,
 };
 
 void
@@ -472,7 +451,8 @@
 
 	/* Point all IRQ's to bad handlers. */
 	for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
-		irq_desc[j].chip = &crisv32_irq_type;
+		set_irq_chip_and_handler(j, &crisv32_irq_type,
+					 handle_simple_irq);
 		set_exception_vector(i, interrupt[j]);
 	}
 
diff --git a/arch/cris/arch-v32/lib/nand_init.S b/arch/cris/arch-v32/lib/nand_init.S
index e705f5c..d671fed 100644
--- a/arch/cris/arch-v32/lib/nand_init.S
+++ b/arch/cris/arch-v32/lib/nand_init.S
@@ -139,7 +139,7 @@
 	lsrq	8, $r4
 	move.b	$r4, [$r1]	; Row address
 	lsrq	8, $r4
-	move.b	$r4, [$r1]	; Row adddress
+	move.b	$r4, [$r1]	; Row address
 	moveq	20, $r4
 2:	bne	2b
 	subq	1, $r4
diff --git a/arch/cris/configs/artpec_3_defconfig b/arch/cris/configs/artpec_3_defconfig
index 590f72c..71854d4 100644
--- a/arch/cris/configs/artpec_3_defconfig
+++ b/arch/cris/configs/artpec_3_defconfig
@@ -2,7 +2,7 @@
 # CONFIG_SWAP is not set
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/cris/configs/etrax-100lx_v2_defconfig b/arch/cris/configs/etrax-100lx_v2_defconfig
index 1b2853e..a85aabf 100644
--- a/arch/cris/configs/etrax-100lx_v2_defconfig
+++ b/arch/cris/configs/etrax-100lx_v2_defconfig
@@ -2,7 +2,7 @@
 # CONFIG_SWAP is not set
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/cris/configs/etraxfs_defconfig b/arch/cris/configs/etraxfs_defconfig
index f73d38c..87c7227 100644
--- a/arch/cris/configs/etraxfs_defconfig
+++ b/arch/cris/configs/etraxfs_defconfig
@@ -2,7 +2,7 @@
 # CONFIG_SWAP is not set
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/cris/include/asm/etraxgpio.h b/arch/cris/include/asm/etraxgpio.h
index d474818..461c089 100644
--- a/arch/cris/include/asm/etraxgpio.h
+++ b/arch/cris/include/asm/etraxgpio.h
@@ -1,5 +1,5 @@
 /*
- * The following devices are accessable using this driver using
+ * The following devices are accessible using this driver using
  * GPIO_MAJOR (120) and a couple of minor numbers.
  *
  * For ETRAX 100LX (CONFIG_ETRAX_ARCH_V10):
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 469f7f9..c346952 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -62,7 +62,7 @@
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-		seq_printf(p, " %14s", irq_desc[i].chip->name);
+		seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
 		seq_printf(p, "  %s", action->name);
 
 		for (action=action->next; action; action = action->next)
@@ -93,8 +93,8 @@
 		printk("do_IRQ: stack overflow: %lX\n", sp);
 		show_stack(NULL, (unsigned long *)sp);
 	}
-	__do_IRQ(irq);
-        irq_exit();
+	generic_handle_irq(irq);
+	irq_exit();
 	set_irq_regs(old_regs);
 }
 
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index b509643..4e73092 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -86,7 +86,7 @@
 		CMOS_WRITE(real_seconds,RTC_SECONDS);
 		CMOS_WRITE(real_minutes,RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 4422189..c49be84 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -72,11 +72,6 @@
 	INIT_TEXT_SECTION(PAGE_SIZE)
 	.init.data : { INIT_DATA }
 	.init.setup : { INIT_SETUP(16) }
-#ifdef CONFIG_ETRAX_ARCH_V32
-	__start___param = .;
-	__param : { *(__param) }
-	__stop___param = .;
-#endif
 	.initcall.init : {
 		INIT_CALLS
 	}
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index f6bcb03..747499a 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -5,6 +5,7 @@
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
+	select HAVE_GENERIC_HARDIRQS
 
 config ZONE_DMA
 	bool
@@ -29,14 +30,6 @@
 	bool
 	default n
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	bool
-	default y
-
 config TIME_LOW_RES
 	bool
 	default y
diff --git a/arch/frv/defconfig b/arch/frv/defconfig
index b8ebe9e..b1b7926 100644
--- a/arch/frv/defconfig
+++ b/arch/frv/defconfig
@@ -3,7 +3,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_MMU=y
 CONFIG_FRV_OUTOFLINE_ATOMIC_OPS=y
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 65f897d8..6df692d 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -2,6 +2,8 @@
 	bool
 	default y
 	select HAVE_IDE
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_HARDIRQS_NO_DEPRECATED
 
 config SYMBOL_PREFIX
 	string
@@ -47,10 +49,6 @@
 	bool
 	default y
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
 config GENERIC_CALIBRATE_DELAY
 	bool
 	default y
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
index ee671c3..e8d1b23 100644
--- a/arch/h8300/Kconfig.debug
+++ b/arch/h8300/Kconfig.debug
@@ -48,7 +48,7 @@
 	  builtin kernel commandline enabled.
 
 config KERNEL_COMMAND
-	string "Buildin commmand string"
+	string "Buildin command string"
 	depends on DEFAULT_CMDLINE
 	help
 	  builtin kernel commandline strings.
diff --git a/arch/h8300/defconfig b/arch/h8300/defconfig
index 342f777..042425a 100644
--- a/arch/h8300/defconfig
+++ b/arch/h8300/defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index c25dc2c..7643d39 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -38,34 +38,30 @@
 	return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS));
 }
 
-static void h8300_enable_irq(unsigned int irq)
+static void h8300_enable_irq(struct irq_data *data)
 {
-	if (is_ext_irq(irq))
-		IER_REGS |= 1 << (irq - EXT_IRQ0);
+	if (is_ext_irq(data->irq))
+		IER_REGS |= 1 << (data->irq - EXT_IRQ0);
 }
 
-static void h8300_disable_irq(unsigned int irq)
+static void h8300_disable_irq(struct irq_data *data)
 {
-	if (is_ext_irq(irq))
-		IER_REGS &= ~(1 << (irq - EXT_IRQ0));
+	if (is_ext_irq(data->irq))
+		IER_REGS &= ~(1 << (data->irq - EXT_IRQ0));
 }
 
-static void h8300_end_irq(unsigned int irq)
+static unsigned int h8300_startup_irq(struct irq_data *data)
 {
-}
-
-static unsigned int h8300_startup_irq(unsigned int irq)
-{
-	if (is_ext_irq(irq))
-		return h8300_enable_irq_pin(irq);
+	if (is_ext_irq(data->irq))
+		return h8300_enable_irq_pin(data->irq);
 	else
 		return 0;
 }
 
-static void h8300_shutdown_irq(unsigned int irq)
+static void h8300_shutdown_irq(struct irq_data *data)
 {
-	if (is_ext_irq(irq))
-		h8300_disable_irq_pin(irq);
+	if (is_ext_irq(data->irq))
+		h8300_disable_irq_pin(data->irq);
 }
 
 /*
@@ -73,12 +69,10 @@
  */
 struct irq_chip h8300irq_chip = {
 	.name		= "H8300-INTC",
-	.startup	= h8300_startup_irq,
-	.shutdown	= h8300_shutdown_irq,
-	.enable		= h8300_enable_irq,
-	.disable	= h8300_disable_irq,
-	.ack		= NULL,
-	.end		= h8300_end_irq,
+	.irq_startup	= h8300_startup_irq,
+	.irq_shutdown	= h8300_shutdown_irq,
+	.irq_enable	= h8300_enable_irq,
+	.irq_disable	= h8300_disable_irq,
 };
 
 #if defined(CONFIG_RAMKERNEL)
@@ -160,18 +154,14 @@
 
 	setup_vector();
 
-	for (c = 0; c < NR_IRQS; c++) {
-		irq_desc[c].status = IRQ_DISABLED;
-		irq_desc[c].action = NULL;
-		irq_desc[c].depth = 1;
-		irq_desc[c].chip = &h8300irq_chip;
-	}
+	for (c = 0; c < NR_IRQS; c++)
+		set_irq_chip_and_handler(c, &h8300irq_chip, handle_simple_irq);
 }
 
 asmlinkage void do_IRQ(int irq)
 {
 	irq_enter();
-	__do_IRQ(irq);
+	generic_handle_irq(irq);
 	irq_exit();
 }
 
@@ -192,7 +182,7 @@
 			goto unlock;
 		seq_printf(p, "%3d: ",i);
 		seq_printf(p, "%10u ", kstat_irqs(i));
-		seq_printf(p, " %14s", irq_desc[i].chip->name);
+		seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
 		seq_printf(p, "-%-8s", irq_desc[i].name);
 		seq_printf(p, "  %s", action->name);
 
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e0f5b6d..fcf3b43 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -22,6 +22,10 @@
 	select HAVE_KVM
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_DMA_API_DEBUG
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select GENERIC_PENDING_IRQ if SMP
+	select IRQ_PER_CPU
 	default y
 	help
 	  The Itanium Processor Family is Intel's 64-bit successor to
@@ -678,28 +682,6 @@
 
 source "lib/Kconfig"
 
-#
-# Use the generic interrupt handling code in kernel/irq/:
-#
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	bool
-	default y
-
-config GENERIC_PENDING_IRQ
-	bool
-	depends on GENERIC_HARDIRQS && SMP
-	default y
-
-config IRQ_PER_CPU
-	bool
-	default y
-
 config IOMMU_HELPER
 	def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
 
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index cc8335e..e5a6c35 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -426,6 +426,11 @@
 extern void iounmap (volatile void __iomem *addr);
 extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
 extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
+static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
+{
+	return ioremap(phys_addr, size);
+}
+
 
 /*
  * String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 2f229e5..2689ee5 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -590,6 +590,10 @@
 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 void kvm_sal_emul(struct kvm_vcpu *vcpu);
 
+#define __KVM_HAVE_ARCH_VM_ALLOC 1
+struct kvm *kvm_arch_alloc_vm(void);
+void kvm_arch_free_vm(struct kvm *kvm);
+
 #endif /* __ASSEMBLY__*/
 
 #endif
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 41b6d31..961a16f 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -189,6 +189,7 @@
 # define pgprot_val(x)	((x).pgprot)
 
 # define __pte(x)	((pte_t) { (x) } )
+# define __pmd(x)	((pmd_t) { (x) } )
 # define __pgprot(x)	((pgprot_t) { (x) } )
 
 #else /* !STRICT_MM_TYPECHECKS */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 348e44d..03afe79 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -717,8 +717,9 @@
 #define spin_lock_prefetch(x)	prefetchw(x)
 
 extern unsigned long boot_option_idle_override;
-extern unsigned long idle_halt;
-extern unsigned long idle_nomwait;
+
+enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
+			 IDLE_NOMWAIT, IDLE_POLL};
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 7b897b7..90ebceb 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -479,7 +479,7 @@
 
 	if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) {
 		printk_once(KERN_WARNING
-			    "node_cpuid[%d] is too small, may not be able to use all cpus\n",
+			    "node_cpuid[%ld] is too small, may not be able to use all cpus\n",
 			    ARRAY_SIZE(node_cpuid));
 		return;
 	}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index d92d5b5..89accc6 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -617,11 +617,14 @@
 	return get_unmapped_area(file, addr, len, pgoff, flags);
 }
 
+/* forward declaration */
+static const struct dentry_operations pfmfs_dentry_operations;
 
 static struct dentry *
 pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
+	return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
+			PFMFS_MAGIC);
 }
 
 static struct file_system_type pfm_fs_type = {
@@ -2232,7 +2235,6 @@
 	}
 	path.mnt = mntget(pfmfs_mnt);
 
-	d_set_d_op(path.dentry, &pfmfs_dentry_operations);
 	d_add(path.dentry, inode);
 
 	file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 16f1c7b..6d33c5c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -53,12 +53,8 @@
 
 void (*ia64_mark_idle)(int);
 
-unsigned long boot_option_idle_override = 0;
+unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
 EXPORT_SYMBOL(boot_option_idle_override);
-unsigned long idle_halt;
-EXPORT_SYMBOL(idle_halt);
-unsigned long idle_nomwait;
-EXPORT_SYMBOL(idle_nomwait);
 void (*pm_idle) (void);
 EXPORT_SYMBOL(pm_idle);
 void (*pm_power_off) (void);
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f56a631..70d224d 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -749,7 +749,7 @@
 	return r;
 }
 
-static struct kvm *kvm_alloc_kvm(void)
+struct kvm *kvm_arch_alloc_vm(void)
 {
 
 	struct kvm *kvm;
@@ -760,7 +760,7 @@
 	vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
 
 	if (!vm_base)
-		return ERR_PTR(-ENOMEM);
+		return NULL;
 
 	memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
 	kvm = (struct kvm *)(vm_base +
@@ -806,10 +806,12 @@
 #define GUEST_PHYSICAL_RR4	0x2739
 #define VMM_INIT_RR		0x1660
 
-static void kvm_init_vm(struct kvm *kvm)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
 	BUG_ON(!kvm);
 
+	kvm->arch.is_sn2 = ia64_platform_is("sn2");
+
 	kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
 	kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
 	kvm->arch.vmm_init_rr = VMM_INIT_RR;
@@ -823,21 +825,8 @@
 
 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
-}
 
-struct  kvm *kvm_arch_create_vm(void)
-{
-	struct kvm *kvm = kvm_alloc_kvm();
-
-	if (IS_ERR(kvm))
-		return ERR_PTR(-ENOMEM);
-
-	kvm->arch.is_sn2 = ia64_platform_is("sn2");
-
-	kvm_init_vm(kvm);
-
-	return kvm;
-
+	return 0;
 }
 
 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
@@ -962,7 +951,9 @@
 			goto out;
 		r = kvm_setup_default_irq_routing(kvm);
 		if (r) {
+			mutex_lock(&kvm->slots_lock);
 			kvm_ioapic_destroy(kvm);
+			mutex_unlock(&kvm->slots_lock);
 			goto out;
 		}
 		break;
@@ -1357,7 +1348,7 @@
 	return -EINVAL;
 }
 
-static void free_kvm(struct kvm *kvm)
+void kvm_arch_free_vm(struct kvm *kvm)
 {
 	unsigned long vm_base = kvm->arch.vm_base;
 
@@ -1399,9 +1390,6 @@
 #endif
 	kfree(kvm->arch.vioapic);
 	kvm_release_vm_pages(kvm);
-	kvm_free_physmem(kvm);
-	cleanup_srcu_struct(&kvm->srcu);
-	free_kvm(kvm);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index fb8f9f5..f1e17d3 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -130,7 +130,7 @@
 
 	local_irq_save(psr);
 
-	/*Intercept the acces for PIB range*/
+	/*Intercept the access for PIB range*/
 	if (iot == GPFN_PIB) {
 		if (!dir)
 			lsapic_write(vcpu, src_pa, s, *dest);
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 1841ee7..5ca674b 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -38,7 +38,7 @@
 	if (pud) {
 		pmd = pmd_alloc(mm, pud, taddr);
 		if (pmd)
-			pte = pte_alloc_map(mm, pmd, taddr);
+			pte = pte_alloc_map(mm, NULL, pmd, taddr);
 	}
 	return pte;
 }
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 5c291d6..ef4c1e4 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -7,6 +7,9 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_HARDIRQS_NO_DEPRECATED
+	select GENERIC_IRQ_PROBE
 
 config SBUS
 	bool
@@ -19,14 +22,6 @@
 	bool
 	default y
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_IRQ_PROBE
-	bool
-	default y
-
 config NO_IOPORT
 	def_bool y
 
diff --git a/arch/m32r/configs/m32700ut.smp_defconfig b/arch/m32r/configs/m32700ut.smp_defconfig
index 816c3ec..a3d727e 100644
--- a/arch/m32r/configs/m32700ut.smp_defconfig
+++ b/arch/m32r/configs/m32700ut.smp_defconfig
@@ -5,7 +5,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=15
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/m32700ut.up_defconfig b/arch/m32r/configs/m32700ut.up_defconfig
index 8478568..b833416 100644
--- a/arch/m32r/configs/m32700ut.up_defconfig
+++ b/arch/m32r/configs/m32700ut.up_defconfig
@@ -5,7 +5,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi.nommu_defconfig b/arch/m32r/configs/mappi.nommu_defconfig
index 354a964..7c90ce2 100644
--- a/arch/m32r/configs/mappi.nommu_defconfig
+++ b/arch/m32r/configs/mappi.nommu_defconfig
@@ -3,7 +3,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi.smp_defconfig b/arch/m32r/configs/mappi.smp_defconfig
index 9022307..367d07c 100644
--- a/arch/m32r/configs/mappi.smp_defconfig
+++ b/arch/m32r/configs/mappi.smp_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=15
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi.up_defconfig b/arch/m32r/configs/mappi.up_defconfig
index 3726068..cb11384 100644
--- a/arch/m32r/configs/mappi.up_defconfig
+++ b/arch/m32r/configs/mappi.up_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi2.opsp_defconfig b/arch/m32r/configs/mappi2.opsp_defconfig
index 6136fad..3bff779 100644
--- a/arch/m32r/configs/mappi2.opsp_defconfig
+++ b/arch/m32r/configs/mappi2.opsp_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi2.vdec2_defconfig b/arch/m32r/configs/mappi2.vdec2_defconfig
index dce1fc7..75246c9 100644
--- a/arch/m32r/configs/mappi2.vdec2_defconfig
+++ b/arch/m32r/configs/mappi2.vdec2_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi3.smp_defconfig b/arch/m32r/configs/mappi3.smp_defconfig
index b204e2e..27cefd4 100644
--- a/arch/m32r/configs/mappi3.smp_defconfig
+++ b/arch/m32r/configs/mappi3.smp_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=15
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/oaks32r_defconfig b/arch/m32r/configs/oaks32r_defconfig
index 5aa4ea9..5087a51 100644
--- a/arch/m32r/configs/oaks32r_defconfig
+++ b/arch/m32r/configs/oaks32r_defconfig
@@ -2,7 +2,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/opsput_defconfig b/arch/m32r/configs/opsput_defconfig
index 8494c6a..50c6f52 100644
--- a/arch/m32r/configs/opsput_defconfig
+++ b/arch/m32r/configs/opsput_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/usrv_defconfig b/arch/m32r/configs/usrv_defconfig
index 1df293b..a3cfaae 100644
--- a/arch/m32r/configs/usrv_defconfig
+++ b/arch/m32r/configs/usrv_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=15
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 7db26f1..76eaf38 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -40,8 +40,10 @@
 	}
 
 	if (i < NR_IRQS) {
-		raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
-		action = irq_desc[i].action;
+		struct irq_desc *desc = irq_to_desc(i);
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+		action = desc->action;
 		if (!action)
 			goto skip;
 		seq_printf(p, "%3d: ",i);
@@ -51,7 +53,7 @@
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-		seq_printf(p, " %14s", irq_desc[i].chip->name);
+		seq_printf(p, " %14s", desc->irq_data.chip->name);
 		seq_printf(p, "  %s", action->name);
 
 		for (action=action->next; action; action = action->next)
@@ -59,7 +61,7 @@
 
 		seq_putc(p, '\n');
 skip:
-		raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	}
 	return 0;
 }
@@ -78,7 +80,7 @@
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 	/* FIXME M32R */
 #endif
-	__do_IRQ(irq);
+	generic_handle_irq(irq);
 	irq_exit();
 	set_irq_regs(old_regs);
 
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c
index 402a59d..4a693d0 100644
--- a/arch/m32r/platforms/m32104ut/setup.c
+++ b/arch/m32r/platforms/m32104ut/setup.c
@@ -39,39 +39,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_m32104ut(unsigned int irq)
+static void mask_m32104ut_irq(struct irq_data *data)
 {
-	disable_m32104ut_irq(irq);
+	disable_m32104ut_irq(data->irq);
 }
 
-static void end_m32104ut_irq(unsigned int irq)
+static void unmask_m32104ut_irq(struct irq_data *data)
 {
-	enable_m32104ut_irq(irq);
+	enable_m32104ut_irq(data->irq);
 }
 
-static unsigned int startup_m32104ut_irq(unsigned int irq)
+static void shutdown_m32104ut_irq(struct irq_data *data)
 {
-	enable_m32104ut_irq(irq);
-	return (0);
-}
+	unsigned int irq = data->irq;
+	unsigned long port = irq2port(irq);
 
-static void shutdown_m32104ut_irq(unsigned int irq)
-{
-	unsigned long port;
-
-	port = irq2port(irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32104ut_irq_type =
 {
-	.name = "M32104UT-IRQ",
-	.startup = startup_m32104ut_irq,
-	.shutdown = shutdown_m32104ut_irq,
-	.enable = enable_m32104ut_irq,
-	.disable = disable_m32104ut_irq,
-	.ack = mask_and_ack_m32104ut,
-	.end = end_m32104ut_irq
+	.name		= "M32104UT-IRQ",
+	.irq_shutdown	= shutdown_m32104ut_irq,
+	.irq_unmask	= unmask_m32104ut_irq,
+	.irq_mask	= mask_m32104ut_irq,
 };
 
 void __init init_IRQ(void)
@@ -85,36 +76,29 @@
 
 #if defined(CONFIG_SMC91X)
 	/* INT#0: LAN controller on M32104UT-LAN (SMC91C111)*/
-	irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT0].chip = &m32104ut_irq_type;
-	irq_desc[M32R_IRQ_INT0].action = 0;
-	irq_desc[M32R_IRQ_INT0].depth = 1;
-	icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11; /* "H" level sense */
+	set_irq_chip_and_handler(M32R_IRQ_INT0, &m32104ut_irq_type,
+				 handle_level_irq);
+	/* "H" level sense */
+	cu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11;
 	disable_m32104ut_irq(M32R_IRQ_INT0);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &m32104ut_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &m32104ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_m32104ut_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &m32104ut_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &m32104ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = M32R_ICUCR_IEN;
 	disable_m32104ut_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &m32104ut_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &m32104ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = M32R_ICUCR_IEN;
 	disable_m32104ut_irq(M32R_IRQ_SIO0_S);
 #endif /* CONFIG_SERIAL_M32R_SIO */
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c
index 80b1a02..2074bcc 100644
--- a/arch/m32r/platforms/m32700ut/setup.c
+++ b/arch/m32r/platforms/m32700ut/setup.c
@@ -45,39 +45,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_m32700ut(unsigned int irq)
+static void mask_m32700ut(struct irq_data *data)
 {
-	disable_m32700ut_irq(irq);
+	disable_m32700ut_irq(data->irq);
 }
 
-static void end_m32700ut_irq(unsigned int irq)
+static void unmask_m32700ut(struct irq_data *data)
 {
-	enable_m32700ut_irq(irq);
+	enable_m32700ut_irq(data->irq);
 }
 
-static unsigned int startup_m32700ut_irq(unsigned int irq)
-{
-	enable_m32700ut_irq(irq);
-	return (0);
-}
-
-static void shutdown_m32700ut_irq(unsigned int irq)
+static void shutdown_m32700ut(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_irq_type =
 {
-	.name = "M32700UT-IRQ",
-	.startup = startup_m32700ut_irq,
-	.shutdown = shutdown_m32700ut_irq,
-	.enable = enable_m32700ut_irq,
-	.disable = disable_m32700ut_irq,
-	.ack = mask_and_ack_m32700ut,
-	.end = end_m32700ut_irq
+	.name		= "M32700UT-IRQ",
+	.irq_shutdown	= shutdown_m32700ut,
+	.irq_mask	= mask_m32700ut,
+	.irq_unmask	= unmask_m32700ut
 };
 
 /*
@@ -99,7 +90,6 @@
 	unsigned int pldirq;
 
 	pldirq = irq2pldirq(irq);
-//	disable_m32700ut_irq(M32R_IRQ_INT1);
 	port = pldirq2port(pldirq);
 	data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
 	outw(data, port);
@@ -111,50 +101,38 @@
 	unsigned int pldirq;
 
 	pldirq = irq2pldirq(irq);
-//	enable_m32700ut_irq(M32R_IRQ_INT1);
 	port = pldirq2port(pldirq);
 	data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
 	outw(data, port);
 }
 
-static void mask_and_ack_m32700ut_pld(unsigned int irq)
+static void mask_m32700ut_pld(struct irq_data *data)
 {
-	disable_m32700ut_pld_irq(irq);
-//	mask_and_ack_m32700ut(M32R_IRQ_INT1);
+	disable_m32700ut_pld_irq(data->irq);
 }
 
-static void end_m32700ut_pld_irq(unsigned int irq)
+static void unmask_m32700ut_pld(struct irq_data *data)
 {
-	enable_m32700ut_pld_irq(irq);
-	end_m32700ut_irq(M32R_IRQ_INT1);
+	enable_m32700ut_pld_irq(data->irq);
+	enable_m32700ut_irq(M32R_IRQ_INT1);
 }
 
-static unsigned int startup_m32700ut_pld_irq(unsigned int irq)
-{
-	enable_m32700ut_pld_irq(irq);
-	return (0);
-}
-
-static void shutdown_m32700ut_pld_irq(unsigned int irq)
+static void shutdown_m32700ut_pld_irq(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2pldirq(irq);
-//	shutdown_m32700ut_irq(M32R_IRQ_INT1);
+	pldirq = irq2pldirq(data->irq);
 	port = pldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_pld_irq_type =
 {
-	.name = "M32700UT-PLD-IRQ",
-	.startup = startup_m32700ut_pld_irq,
-	.shutdown = shutdown_m32700ut_pld_irq,
-	.enable = enable_m32700ut_pld_irq,
-	.disable = disable_m32700ut_pld_irq,
-	.ack = mask_and_ack_m32700ut_pld,
-	.end = end_m32700ut_pld_irq
+	.name		= "M32700UT-PLD-IRQ",
+	.irq_shutdown	= shutdown_m32700ut_pld_irq,
+	.irq_mask	= mask_m32700ut_pld,
+	.irq_unmask	= unmask_m32700ut_pld,
 };
 
 /*
@@ -188,42 +166,33 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_m32700ut_lanpld(unsigned int irq)
+static void mask_m32700ut_lanpld(struct irq_data *data)
 {
-	disable_m32700ut_lanpld_irq(irq);
+	disable_m32700ut_lanpld_irq(data->irq);
 }
 
-static void end_m32700ut_lanpld_irq(unsigned int irq)
+static void unmask_m32700ut_lanpld(struct irq_data *data)
 {
-	enable_m32700ut_lanpld_irq(irq);
-	end_m32700ut_irq(M32R_IRQ_INT0);
+	enable_m32700ut_lanpld_irq(data->irq);
+	enable_m32700ut_irq(M32R_IRQ_INT0);
 }
 
-static unsigned int startup_m32700ut_lanpld_irq(unsigned int irq)
-{
-	enable_m32700ut_lanpld_irq(irq);
-	return (0);
-}
-
-static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
+static void shutdown_m32700ut_lanpld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2lanpldirq(irq);
+	pldirq = irq2lanpldirq(data->irq);
 	port = lanpldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_lanpld_irq_type =
 {
-	.name = "M32700UT-PLD-LAN-IRQ",
-	.startup = startup_m32700ut_lanpld_irq,
-	.shutdown = shutdown_m32700ut_lanpld_irq,
-	.enable = enable_m32700ut_lanpld_irq,
-	.disable = disable_m32700ut_lanpld_irq,
-	.ack = mask_and_ack_m32700ut_lanpld,
-	.end = end_m32700ut_lanpld_irq
+	.name		= "M32700UT-PLD-LAN-IRQ",
+	.irq_shutdown	= shutdown_m32700ut_lanpld,
+	.irq_mask	= mask_m32700ut_lanpld,
+	.irq_unmask	= unmask_m32700ut_lanpld,
 };
 
 /*
@@ -257,143 +226,110 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_m32700ut_lcdpld(unsigned int irq)
+static void mask_m32700ut_lcdpld(struct irq_data *data)
 {
-	disable_m32700ut_lcdpld_irq(irq);
+	disable_m32700ut_lcdpld_irq(data->irq);
 }
 
-static void end_m32700ut_lcdpld_irq(unsigned int irq)
+static void unmask_m32700ut_lcdpld(struct irq_data *data)
 {
-	enable_m32700ut_lcdpld_irq(irq);
-	end_m32700ut_irq(M32R_IRQ_INT2);
+	enable_m32700ut_lcdpld_irq(data->irq);
+	enable_m32700ut_irq(M32R_IRQ_INT2);
 }
 
-static unsigned int startup_m32700ut_lcdpld_irq(unsigned int irq)
-{
-	enable_m32700ut_lcdpld_irq(irq);
-	return (0);
-}
-
-static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
+static void shutdown_m32700ut_lcdpld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2lcdpldirq(irq);
+	pldirq = irq2lcdpldirq(data->irq);
 	port = lcdpldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_lcdpld_irq_type =
 {
-	.name = "M32700UT-PLD-LCD-IRQ",
-	.startup = startup_m32700ut_lcdpld_irq,
-	.shutdown = shutdown_m32700ut_lcdpld_irq,
-	.enable = enable_m32700ut_lcdpld_irq,
-	.disable = disable_m32700ut_lcdpld_irq,
-	.ack = mask_and_ack_m32700ut_lcdpld,
-	.end = end_m32700ut_lcdpld_irq
+	.name		= "M32700UT-PLD-LCD-IRQ",
+	.irq_shutdown	= shutdown_m32700ut_lcdpld,
+	.irq_mask	= mask_m32700ut_lcdpld,
+	.irq_unmask	= unmask_m32700ut_lcdpld,
 };
 
 void __init init_IRQ(void)
 {
 #if defined(CONFIG_SMC91X)
 	/* INT#0: LAN controller on M32700UT-LAN (SMC91C111)*/
-	irq_desc[M32700UT_LAN_IRQ_LAN].status = IRQ_DISABLED;
-	irq_desc[M32700UT_LAN_IRQ_LAN].chip = &m32700ut_lanpld_irq_type;
-	irq_desc[M32700UT_LAN_IRQ_LAN].action = 0;
-	irq_desc[M32700UT_LAN_IRQ_LAN].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(M32700UT_LAN_IRQ_LAN,
+				 &m32700ut_lanpld_irq_type, handle_level_irq);
 	lanpld_icu_data[irq2lanpldirq(M32700UT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02;	/* "H" edge sense */
 	disable_m32700ut_lanpld_irq(M32700UT_LAN_IRQ_LAN);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_m32700ut_irq(M32R_IRQ_MFT2);
 
 	/* SIO0 : receive */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0 : send */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1 : receive */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1 : send */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_SIO1_S);
 
 	/* DMA1 : */
-	irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_DMA1].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_DMA1].action = 0;
-	irq_desc[M32R_IRQ_DMA1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_DMA1, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_DMA1].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_DMA1);
 
 #ifdef CONFIG_SERIAL_M32R_PLDSIO
 	/* INT#1: SIO0 Receive on PLD */
-	irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SIO0_RCV].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
-	irq_desc[PLD_IRQ_SIO0_RCV].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SIO0_RCV, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
 	disable_m32700ut_pld_irq(PLD_IRQ_SIO0_RCV);
 
 	/* INT#1: SIO0 Send on PLD */
-	irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SIO0_SND].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_SIO0_SND].action = 0;
-	irq_desc[PLD_IRQ_SIO0_SND].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SIO0_SND, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
 	disable_m32700ut_pld_irq(PLD_IRQ_SIO0_SND);
 #endif  /* CONFIG_SERIAL_M32R_PLDSIO */
 
 	/* INT#1: CFC IREQ on PLD */
-	irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFIREQ].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_CFIREQ].action = 0;
-	irq_desc[PLD_IRQ_CFIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* 'L' level sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_CFIREQ);
 
 	/* INT#1: CFC Insert on PLD */
-	irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_INSERT].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
-	irq_desc[PLD_IRQ_CFC_INSERT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00;	/* 'L' edge sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_CFC_INSERT);
 
 	/* INT#1: CFC Eject on PLD */
-	irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_EJECT].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
-	irq_desc[PLD_IRQ_CFC_EJECT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_EJECT, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02;	/* 'H' edge sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_CFC_EJECT);
 
@@ -413,13 +349,11 @@
 
 #if defined(CONFIG_USB)
 	outw(USBCR_OTGS, USBCR); 	/* USBCR: non-OTG */
+	set_irq_chip_and_handler(M32700UT_LCD_IRQ_USB_INT1,
+				 &m32700ut_lcdpld_irq_type, handle_level_irq);
 
-    irq_desc[M32700UT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
-    irq_desc[M32700UT_LCD_IRQ_USB_INT1].chip = &m32700ut_lcdpld_irq_type;
-    irq_desc[M32700UT_LCD_IRQ_USB_INT1].action = 0;
-    irq_desc[M32700UT_LCD_IRQ_USB_INT1].depth = 1;
-    lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* "L" level sense */
-    disable_m32700ut_lcdpld_irq(M32700UT_LCD_IRQ_USB_INT1);
+	lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* "L" level sense */
+	disable_m32700ut_lcdpld_irq(M32700UT_LCD_IRQ_USB_INT1);
 #endif
 	/*
 	 * INT2# is used for BAT, USB, AUDIO
@@ -432,10 +366,8 @@
 	/*
 	 * INT3# is used for AR
 	 */
-	irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT3].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_INT3].action = 0;
-	irq_desc[M32R_IRQ_INT3].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT3, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_m32700ut_irq(M32R_IRQ_INT3);
 #endif	/* CONFIG_VIDEO_M32R_AR */
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c
index ea00c84..cdd8c45 100644
--- a/arch/m32r/platforms/mappi/setup.c
+++ b/arch/m32r/platforms/mappi/setup.c
@@ -38,40 +38,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi(unsigned int irq)
+static void mask_mappi(struct irq_data *data)
 {
-	disable_mappi_irq(irq);
+	disable_mappi_irq(data->irq);
 }
 
-static void end_mappi_irq(unsigned int irq)
+static void unmask_mappi(struct irq_data *data)
 {
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		enable_mappi_irq(irq);
+	enable_mappi_irq(data->irq);
 }
 
-static unsigned int startup_mappi_irq(unsigned int irq)
-{
-	enable_mappi_irq(irq);
-	return (0);
-}
-
-static void shutdown_mappi_irq(unsigned int irq)
+static void shutdown_mappi(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip mappi_irq_type =
 {
-	.name = "MAPPI-IRQ",
-	.startup = startup_mappi_irq,
-	.shutdown = shutdown_mappi_irq,
-	.enable = enable_mappi_irq,
-	.disable = disable_mappi_irq,
-	.ack = mask_and_ack_mappi,
-	.end = end_mappi_irq
+	.name		= "MAPPI-IRQ",
+	.irq_shutdown	= shutdown_mappi,
+	.irq_mask	= mask_mappi,
+	.irq_unmask	= unmask_mappi,
 };
 
 void __init init_IRQ(void)
@@ -85,70 +75,54 @@
 
 #ifdef CONFIG_NE2000
 	/* INT0 : LAN controller (RTL8019AS) */
-	irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT0].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_INT0].action = NULL;
-	irq_desc[M32R_IRQ_INT0].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT0, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11;
 	disable_mappi_irq(M32R_IRQ_INT0);
 #endif /* CONFIG_M32R_NE2000 */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = NULL;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_mappi_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = NULL;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = NULL;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = NULL;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = NULL;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO1_S);
 #endif /* CONFIG_SERIAL_M32R_SIO */
 
 #if defined(CONFIG_M32R_PCC)
 	/* INT1 : pccard0 interrupt */
-	irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT1].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_INT1].action = NULL;
-	irq_desc[M32R_IRQ_INT1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT1, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
 	disable_mappi_irq(M32R_IRQ_INT1);
 
 	/* INT2 : pccard1 interrupt */
-	irq_desc[M32R_IRQ_INT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT2].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_INT2].action = NULL;
-	irq_desc[M32R_IRQ_INT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT2, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
 	disable_mappi_irq(M32R_IRQ_INT2);
 #endif /* CONFIG_M32RPCC */
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c
index c049376..9117c30 100644
--- a/arch/m32r/platforms/mappi2/setup.c
+++ b/arch/m32r/platforms/mappi2/setup.c
@@ -46,126 +46,97 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi2(unsigned int irq)
+static void mask_mappi2(struct irq_data *data)
 {
-	disable_mappi2_irq(irq);
+	disable_mappi2_irq(data->irq);
 }
 
-static void end_mappi2_irq(unsigned int irq)
+static void unmask_mappi2(struct irq_data *data)
 {
-	enable_mappi2_irq(irq);
+	enable_mappi2_irq(data->irq);
 }
 
-static unsigned int startup_mappi2_irq(unsigned int irq)
-{
-	enable_mappi2_irq(irq);
-	return (0);
-}
-
-static void shutdown_mappi2_irq(unsigned int irq)
+static void shutdown_mappi2(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip mappi2_irq_type =
 {
-	.name = "MAPPI2-IRQ",
-	.startup = startup_mappi2_irq,
-	.shutdown = shutdown_mappi2_irq,
-	.enable = enable_mappi2_irq,
-	.disable = disable_mappi2_irq,
-	.ack = mask_and_ack_mappi2,
-	.end = end_mappi2_irq
+	.name		= "MAPPI2-IRQ",
+	.irq_shutdown	= shutdown_mappi2,
+	.irq_mask	= mask_mappi2,
+	.irq_unmask	= unmask_mappi2,
 };
 
 void __init init_IRQ(void)
 {
 #if defined(CONFIG_SMC91X)
 	/* INT0 : LAN controller (SMC91111) */
-	irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT0].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_INT0].action = 0;
-	irq_desc[M32R_IRQ_INT0].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT0, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_mappi2_irq(M32R_IRQ_INT0);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_mappi2_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_mappi2_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_mappi2_irq(M32R_IRQ_SIO0_S);
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_mappi2_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_mappi2_irq(M32R_IRQ_SIO1_S);
 #endif  /* CONFIG_M32R_USE_DBG_CONSOLE */
 
 #if defined(CONFIG_USB)
 	/* INT1 : USB Host controller interrupt */
-	irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT1].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_INT1].action = 0;
-	irq_desc[M32R_IRQ_INT1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT1, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
 	disable_mappi2_irq(M32R_IRQ_INT1);
 #endif /* CONFIG_USB */
 
 	/* ICUCR40: CFC IREQ */
-	irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFIREQ].chip = &mappi2_irq_type;
-	irq_desc[PLD_IRQ_CFIREQ].action = 0;
-	irq_desc[PLD_IRQ_CFIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
 	disable_mappi2_irq(PLD_IRQ_CFIREQ);
 
 #if defined(CONFIG_M32R_CFC)
 	/* ICUCR41: CFC Insert */
-	irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_INSERT].chip = &mappi2_irq_type;
-	irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
-	irq_desc[PLD_IRQ_CFC_INSERT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
 	disable_mappi2_irq(PLD_IRQ_CFC_INSERT);
 
 	/* ICUCR42: CFC Eject */
-	irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_EJECT].chip = &mappi2_irq_type;
-	irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
-	irq_desc[PLD_IRQ_CFC_EJECT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_EJECT, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFC_EJECT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_mappi2_irq(PLD_IRQ_CFC_EJECT);
 #endif /* CONFIG_MAPPI2_CFC */
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c
index 882de25..b44f5de 100644
--- a/arch/m32r/platforms/mappi3/setup.c
+++ b/arch/m32r/platforms/mappi3/setup.c
@@ -46,128 +46,98 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi3(unsigned int irq)
+static void mask_mappi3(struct irq_data *data)
 {
-	disable_mappi3_irq(irq);
+	disable_mappi3_irq(data->irq);
 }
 
-static void end_mappi3_irq(unsigned int irq)
+static void unmask_mappi3(struct irq_data *data)
 {
-	enable_mappi3_irq(irq);
+	enable_mappi3_irq(data->irq);
 }
 
-static unsigned int startup_mappi3_irq(unsigned int irq)
-{
-	enable_mappi3_irq(irq);
-	return (0);
-}
-
-static void shutdown_mappi3_irq(unsigned int irq)
+static void shutdown_mappi3(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct irq_chip mappi3_irq_type =
-{
-	.name = "MAPPI3-IRQ",
-	.startup = startup_mappi3_irq,
-	.shutdown = shutdown_mappi3_irq,
-	.enable = enable_mappi3_irq,
-	.disable = disable_mappi3_irq,
-	.ack = mask_and_ack_mappi3,
-	.end = end_mappi3_irq
+static struct irq_chip mappi3_irq_type = {
+	.name		= "MAPPI3-IRQ",
+	.irq_shutdown	= shutdown_mappi3,
+	.irq_mask	= mask_mappi3,
+	.irq_unmask	= unmask_mappi3,
 };
 
 void __init init_IRQ(void)
 {
 #if defined(CONFIG_SMC91X)
 	/* INT0 : LAN controller (SMC91111) */
-	irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT0].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_INT0].action = 0;
-	irq_desc[M32R_IRQ_INT0].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT0, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_mappi3_irq(M32R_IRQ_INT0);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_mappi3_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_mappi3_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_mappi3_irq(M32R_IRQ_SIO0_S);
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_mappi3_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_mappi3_irq(M32R_IRQ_SIO1_S);
 #endif  /* CONFIG_M32R_USE_DBG_CONSOLE */
 
 #if defined(CONFIG_USB)
 	/* INT1 : USB Host controller interrupt */
-	irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT1].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_INT1].action = 0;
-	irq_desc[M32R_IRQ_INT1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT1, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
 	disable_mappi3_irq(M32R_IRQ_INT1);
 #endif /* CONFIG_USB */
 
 	/* CFC IREQ */
-	irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFIREQ].chip = &mappi3_irq_type;
-	irq_desc[PLD_IRQ_CFIREQ].action = 0;
-	irq_desc[PLD_IRQ_CFIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
 	disable_mappi3_irq(PLD_IRQ_CFIREQ);
 
 #if defined(CONFIG_M32R_CFC)
 	/* ICUCR41: CFC Insert & eject */
-	irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_INSERT].chip = &mappi3_irq_type;
-	irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
-	irq_desc[PLD_IRQ_CFC_INSERT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
 	disable_mappi3_irq(PLD_IRQ_CFC_INSERT);
 
 #endif /* CONFIG_M32R_CFC */
 
 	/* IDE IREQ */
-	irq_desc[PLD_IRQ_IDEIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_IDEIREQ].chip = &mappi3_irq_type;
-	irq_desc[PLD_IRQ_IDEIREQ].action = 0;
-	irq_desc[PLD_IRQ_IDEIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_IDEIREQ, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_mappi3_irq(PLD_IRQ_IDEIREQ);
 
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c
index d11d93b..19a02db 100644
--- a/arch/m32r/platforms/oaks32r/setup.c
+++ b/arch/m32r/platforms/oaks32r/setup.c
@@ -37,39 +37,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi(unsigned int irq)
+static void mask_oaks32r(struct irq_data *data)
 {
-	disable_oaks32r_irq(irq);
+	disable_oaks32r_irq(data->irq);
 }
 
-static void end_oaks32r_irq(unsigned int irq)
+static void unmask_oaks32r(struct irq_data *data)
 {
-	enable_oaks32r_irq(irq);
+	enable_oaks32r_irq(data->irq);
 }
 
-static unsigned int startup_oaks32r_irq(unsigned int irq)
-{
-	enable_oaks32r_irq(irq);
-	return (0);
-}
-
-static void shutdown_oaks32r_irq(unsigned int irq)
+static void shutdown_oaks32r(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip oaks32r_irq_type =
 {
-	.name = "OAKS32R-IRQ",
-	.startup = startup_oaks32r_irq,
-	.shutdown = shutdown_oaks32r_irq,
-	.enable = enable_oaks32r_irq,
-	.disable = disable_oaks32r_irq,
-	.ack = mask_and_ack_mappi,
-	.end = end_oaks32r_irq
+	.name		= "OAKS32R-IRQ",
+	.irq_shutdown	= shutdown_oaks32r,
+	.irq_mask	= mask_oaks32r,
+	.irq_unmask	= unmask_oaks32r,
 };
 
 void __init init_IRQ(void)
@@ -83,52 +74,40 @@
 
 #ifdef CONFIG_NE2000
 	/* INT3 : LAN controller (RTL8019AS) */
-	irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT3].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_INT3].action = 0;
-	irq_desc[M32R_IRQ_INT3].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT3, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_oaks32r_irq(M32R_IRQ_INT3);
 #endif /* CONFIG_M32R_NE2000 */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_oaks32r_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_oaks32r_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_oaks32r_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_oaks32r_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_oaks32r_irq(M32R_IRQ_SIO1_S);
 #endif /* CONFIG_SERIAL_M32R_SIO */
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c
index 5f3402a..1273154 100644
--- a/arch/m32r/platforms/opsput/setup.c
+++ b/arch/m32r/platforms/opsput/setup.c
@@ -46,39 +46,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_opsput(unsigned int irq)
+static void mask_opsput(struct irq_data *data)
 {
-	disable_opsput_irq(irq);
+	disable_opsput_irq(data->irq);
 }
 
-static void end_opsput_irq(unsigned int irq)
+static void unmask_opsput(struct irq_data *data)
 {
-	enable_opsput_irq(irq);
+	enable_opsput_irq(data->irq);
 }
 
-static unsigned int startup_opsput_irq(unsigned int irq)
-{
-	enable_opsput_irq(irq);
-	return (0);
-}
-
-static void shutdown_opsput_irq(unsigned int irq)
+static void shutdown_opsput(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip opsput_irq_type =
 {
-	.name = "OPSPUT-IRQ",
-	.startup = startup_opsput_irq,
-	.shutdown = shutdown_opsput_irq,
-	.enable = enable_opsput_irq,
-	.disable = disable_opsput_irq,
-	.ack = mask_and_ack_opsput,
-	.end = end_opsput_irq
+	.name		= "OPSPUT-IRQ",
+	.irq_shutdown	= shutdown_opsput,
+	.irq_mask	= mask_opsput,
+	.irq_unmask	= unmask_opsput,
 };
 
 /*
@@ -100,7 +91,6 @@
 	unsigned int pldirq;
 
 	pldirq = irq2pldirq(irq);
-//	disable_opsput_irq(M32R_IRQ_INT1);
 	port = pldirq2port(pldirq);
 	data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
 	outw(data, port);
@@ -112,50 +102,38 @@
 	unsigned int pldirq;
 
 	pldirq = irq2pldirq(irq);
-//	enable_opsput_irq(M32R_IRQ_INT1);
 	port = pldirq2port(pldirq);
 	data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
 	outw(data, port);
 }
 
-static void mask_and_ack_opsput_pld(unsigned int irq)
+static void mask_opsput_pld(struct irq_data *data)
 {
-	disable_opsput_pld_irq(irq);
-//	mask_and_ack_opsput(M32R_IRQ_INT1);
+	disable_opsput_pld_irq(data->irq);
 }
 
-static void end_opsput_pld_irq(unsigned int irq)
+static void unmask_opsput_pld(struct irq_data *data)
 {
-	enable_opsput_pld_irq(irq);
-	end_opsput_irq(M32R_IRQ_INT1);
+	enable_opsput_pld_irq(data->irq);
+	enable_opsput_irq(M32R_IRQ_INT1);
 }
 
-static unsigned int startup_opsput_pld_irq(unsigned int irq)
-{
-	enable_opsput_pld_irq(irq);
-	return (0);
-}
-
-static void shutdown_opsput_pld_irq(unsigned int irq)
+static void shutdown_opsput_pld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2pldirq(irq);
-//	shutdown_opsput_irq(M32R_IRQ_INT1);
+	pldirq = irq2pldirq(data->irq);
 	port = pldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip opsput_pld_irq_type =
 {
-	.name = "OPSPUT-PLD-IRQ",
-	.startup = startup_opsput_pld_irq,
-	.shutdown = shutdown_opsput_pld_irq,
-	.enable = enable_opsput_pld_irq,
-	.disable = disable_opsput_pld_irq,
-	.ack = mask_and_ack_opsput_pld,
-	.end = end_opsput_pld_irq
+	.name		= "OPSPUT-PLD-IRQ",
+	.irq_shutdown	= shutdown_opsput_pld,
+	.irq_mask	= mask_opsput_pld,
+	.irq_unmask	= unmask_opsput_pld,
 };
 
 /*
@@ -189,42 +167,33 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_opsput_lanpld(unsigned int irq)
+static void mask_opsput_lanpld(struct irq_data *data)
 {
-	disable_opsput_lanpld_irq(irq);
+	disable_opsput_lanpld_irq(data->irq);
 }
 
-static void end_opsput_lanpld_irq(unsigned int irq)
+static void unmask_opsput_lanpld(struct irq_data *data)
 {
-	enable_opsput_lanpld_irq(irq);
-	end_opsput_irq(M32R_IRQ_INT0);
+	enable_opsput_lanpld_irq(data->irq);
+	enable_opsput_irq(M32R_IRQ_INT0);
 }
 
-static unsigned int startup_opsput_lanpld_irq(unsigned int irq)
-{
-	enable_opsput_lanpld_irq(irq);
-	return (0);
-}
-
-static void shutdown_opsput_lanpld_irq(unsigned int irq)
+static void shutdown_opsput_lanpld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2lanpldirq(irq);
+	pldirq = irq2lanpldirq(data->irq);
 	port = lanpldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip opsput_lanpld_irq_type =
 {
-	.name = "OPSPUT-PLD-LAN-IRQ",
-	.startup = startup_opsput_lanpld_irq,
-	.shutdown = shutdown_opsput_lanpld_irq,
-	.enable = enable_opsput_lanpld_irq,
-	.disable = disable_opsput_lanpld_irq,
-	.ack = mask_and_ack_opsput_lanpld,
-	.end = end_opsput_lanpld_irq
+	.name		= "OPSPUT-PLD-LAN-IRQ",
+	.irq_shutdown	= shutdown_opsput_lanpld,
+	.irq_mask	= mask_opsput_lanpld,
+	.irq_unmask	= unmask_opsput_lanpld,
 };
 
 /*
@@ -258,143 +227,109 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_opsput_lcdpld(unsigned int irq)
+static void mask_opsput_lcdpld(struct irq_data *data)
 {
-	disable_opsput_lcdpld_irq(irq);
+	disable_opsput_lcdpld_irq(data->irq);
 }
 
-static void end_opsput_lcdpld_irq(unsigned int irq)
+static void unmask_opsput_lcdpld(struct irq_data *data)
 {
-	enable_opsput_lcdpld_irq(irq);
-	end_opsput_irq(M32R_IRQ_INT2);
+	enable_opsput_lcdpld_irq(data->irq);
+	enable_opsput_irq(M32R_IRQ_INT2);
 }
 
-static unsigned int startup_opsput_lcdpld_irq(unsigned int irq)
-{
-	enable_opsput_lcdpld_irq(irq);
-	return (0);
-}
-
-static void shutdown_opsput_lcdpld_irq(unsigned int irq)
+static void shutdown_opsput_lcdpld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2lcdpldirq(irq);
+	pldirq = irq2lcdpldirq(data->irq);
 	port = lcdpldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
-static struct irq_chip opsput_lcdpld_irq_type =
-{
-	"OPSPUT-PLD-LCD-IRQ",
-	startup_opsput_lcdpld_irq,
-	shutdown_opsput_lcdpld_irq,
-	enable_opsput_lcdpld_irq,
-	disable_opsput_lcdpld_irq,
-	mask_and_ack_opsput_lcdpld,
-	end_opsput_lcdpld_irq
+static struct irq_chip opsput_lcdpld_irq_type = {
+	.name		= "OPSPUT-PLD-LCD-IRQ",
+	.irq_shutdown	= shutdown_opsput_lcdpld,
+	.irq_mask	= mask_opsput_lcdpld,
+	.irq_unmask	= unmask_opsput_lcdpld,
 };
 
 void __init init_IRQ(void)
 {
 #if defined(CONFIG_SMC91X)
 	/* INT#0: LAN controller on OPSPUT-LAN (SMC91C111)*/
-	irq_desc[OPSPUT_LAN_IRQ_LAN].status = IRQ_DISABLED;
-	irq_desc[OPSPUT_LAN_IRQ_LAN].chip = &opsput_lanpld_irq_type;
-	irq_desc[OPSPUT_LAN_IRQ_LAN].action = 0;
-	irq_desc[OPSPUT_LAN_IRQ_LAN].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(OPSPUT_LAN_IRQ_LAN, &opsput_lanpld_irq_type,
+				 handle_level_irq);
 	lanpld_icu_data[irq2lanpldirq(OPSPUT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02;	/* "H" edge sense */
 	disable_opsput_lanpld_irq(OPSPUT_LAN_IRQ_LAN);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_opsput_irq(M32R_IRQ_MFT2);
 
 	/* SIO0 : receive */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0 : send */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1 : receive */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1 : send */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_SIO1_S);
 
 	/* DMA1 : */
-	irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_DMA1].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_DMA1].action = 0;
-	irq_desc[M32R_IRQ_DMA1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_DMA1, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_DMA1].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_DMA1);
 
 #ifdef CONFIG_SERIAL_M32R_PLDSIO
 	/* INT#1: SIO0 Receive on PLD */
-	irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SIO0_RCV].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
-	irq_desc[PLD_IRQ_SIO0_RCV].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SIO0_RCV, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
 	disable_opsput_pld_irq(PLD_IRQ_SIO0_RCV);
 
 	/* INT#1: SIO0 Send on PLD */
-	irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SIO0_SND].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_SIO0_SND].action = 0;
-	irq_desc[PLD_IRQ_SIO0_SND].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SIO0_SND, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
 	disable_opsput_pld_irq(PLD_IRQ_SIO0_SND);
 #endif  /* CONFIG_SERIAL_M32R_PLDSIO */
 
 	/* INT#1: CFC IREQ on PLD */
-	irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFIREQ].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_CFIREQ].action = 0;
-	irq_desc[PLD_IRQ_CFIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* 'L' level sense */
 	disable_opsput_pld_irq(PLD_IRQ_CFIREQ);
 
 	/* INT#1: CFC Insert on PLD */
-	irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_INSERT].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
-	irq_desc[PLD_IRQ_CFC_INSERT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00;	/* 'L' edge sense */
 	disable_opsput_pld_irq(PLD_IRQ_CFC_INSERT);
 
 	/* INT#1: CFC Eject on PLD */
-	irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_EJECT].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
-	irq_desc[PLD_IRQ_CFC_EJECT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_EJECT, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02;	/* 'H' edge sense */
 	disable_opsput_pld_irq(PLD_IRQ_CFC_EJECT);
 
@@ -413,14 +348,11 @@
 	enable_opsput_irq(M32R_IRQ_INT1);
 
 #if defined(CONFIG_USB)
-	outw(USBCR_OTGS, USBCR); 	/* USBCR: non-OTG */
-
-    irq_desc[OPSPUT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
-    irq_desc[OPSPUT_LCD_IRQ_USB_INT1].chip = &opsput_lcdpld_irq_type;
-    irq_desc[OPSPUT_LCD_IRQ_USB_INT1].action = 0;
-    irq_desc[OPSPUT_LCD_IRQ_USB_INT1].depth = 1;
-    lcdpld_icu_data[irq2lcdpldirq(OPSPUT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* "L" level sense */
-    disable_opsput_lcdpld_irq(OPSPUT_LCD_IRQ_USB_INT1);
+	outw(USBCR_OTGS, USBCR);	/* USBCR: non-OTG */
+	set_irq_chip_and_handler(OPSPUT_LCD_IRQ_USB_INT1,
+				 &opsput_lcdpld_irq_type, handle_level_irq);
+	lcdpld_icu_data[irq2lcdpldirq(OPSPUT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* "L" level sense */
+	disable_opsput_lcdpld_irq(OPSPUT_LCD_IRQ_USB_INT1);
 #endif
 	/*
 	 * INT2# is used for BAT, USB, AUDIO
@@ -433,10 +365,8 @@
 	/*
 	 * INT3# is used for AR
 	 */
-	irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT3].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_INT3].action = 0;
-	irq_desc[M32R_IRQ_INT3].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT3, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_opsput_irq(M32R_IRQ_INT3);
 #endif /* CONFIG_VIDEO_M32R_AR */
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c
index 1beac7a..f3cff26 100644
--- a/arch/m32r/platforms/usrv/setup.c
+++ b/arch/m32r/platforms/usrv/setup.c
@@ -37,39 +37,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi(unsigned int irq)
+static void mask_mappi(struct irq_data *data)
 {
-	disable_mappi_irq(irq);
+	disable_mappi_irq(data->irq);
 }
 
-static void end_mappi_irq(unsigned int irq)
+static void unmask_mappi(struct irq_data *data)
 {
-	enable_mappi_irq(irq);
+	enable_mappi_irq(data->irq);
 }
 
-static unsigned int startup_mappi_irq(unsigned int irq)
-{
-	enable_mappi_irq(irq);
-	return 0;
-}
-
-static void shutdown_mappi_irq(unsigned int irq)
+static void shutdown_mappi(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip mappi_irq_type =
 {
-	.name = "M32700-IRQ",
-	.startup = startup_mappi_irq,
-	.shutdown = shutdown_mappi_irq,
-	.enable = enable_mappi_irq,
-	.disable = disable_mappi_irq,
-	.ack = mask_and_ack_mappi,
-	.end = end_mappi_irq
+	.name		= "M32700-IRQ",
+	.irq_shutdown	= shutdown_mappi,
+	.irq_mask	= mask_mappi,
+	.irq_unmask	= unmask_mappi,
 };
 
 /*
@@ -107,42 +98,33 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_m32700ut_pld(unsigned int irq)
+static void mask_m32700ut_pld(struct irq_data *data)
 {
-	disable_m32700ut_pld_irq(irq);
+	disable_m32700ut_pld_irq(data->irq);
 }
 
-static void end_m32700ut_pld_irq(unsigned int irq)
+static void unmask_m32700ut_pld(struct irq_data *data)
 {
-	enable_m32700ut_pld_irq(irq);
-	end_mappi_irq(M32R_IRQ_INT1);
+	enable_m32700ut_pld_irq(data->irq);
+	enable_mappi_irq(M32R_IRQ_INT1);
 }
 
-static unsigned int startup_m32700ut_pld_irq(unsigned int irq)
-{
-	enable_m32700ut_pld_irq(irq);
-	return 0;
-}
-
-static void shutdown_m32700ut_pld_irq(unsigned int irq)
+static void shutdown_m32700ut_pld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2pldirq(irq);
+	pldirq = irq2pldirq(data->irq);
 	port = pldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_pld_irq_type =
 {
-	.name = "USRV-PLD-IRQ",
-	.startup = startup_m32700ut_pld_irq,
-	.shutdown = shutdown_m32700ut_pld_irq,
-	.enable = enable_m32700ut_pld_irq,
-	.disable = disable_m32700ut_pld_irq,
-	.ack = mask_and_ack_m32700ut_pld,
-	.end = end_m32700ut_pld_irq
+	.name		= "USRV-PLD-IRQ",
+	.irq_shutdown	= shutdown_m32700ut_pld,
+	.irq_mask	= mask_m32700ut_pld,
+	.irq_unmask	= unmask_m32700ut_pld,
 };
 
 void __init init_IRQ(void)
@@ -156,53 +138,42 @@
 		once++;
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_mappi_irq(M32R_IRQ_MFT2);
 
 #if defined(CONFIG_SERIAL_M32R_SIO)
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO1_S);
 #endif  /* CONFIG_SERIAL_M32R_SIO */
 
 	/* INT#67-#71: CFC#0 IREQ on PLD */
 	for (i = 0 ; i < CONFIG_M32R_CFC_NUM ; i++ ) {
-		irq_desc[PLD_IRQ_CF0 + i].status = IRQ_DISABLED;
-		irq_desc[PLD_IRQ_CF0 + i].chip = &m32700ut_pld_irq_type;
-		irq_desc[PLD_IRQ_CF0 + i].action = 0;
-		irq_desc[PLD_IRQ_CF0 + i].depth = 1;	/* disable nested irq */
+		set_irq_chip_and_handler(PLD_IRQ_CF0 + i,
+					 &m32700ut_pld_irq_type,
+					 handle_level_irq);
 		pld_icu_data[irq2pldirq(PLD_IRQ_CF0 + i)].icucr
 			= PLD_ICUCR_ISMOD01;	/* 'L' level sense */
 		disable_m32700ut_pld_irq(PLD_IRQ_CF0 + i);
@@ -210,19 +181,15 @@
 
 #if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
 	/* INT#76: 16552D#0 IREQ on PLD */
-	irq_desc[PLD_IRQ_UART0].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_UART0].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_UART0].action = 0;
-	irq_desc[PLD_IRQ_UART0].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_UART0, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_UART0)].icucr
 		= PLD_ICUCR_ISMOD03;	/* 'H' level sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_UART0);
 
 	/* INT#77: 16552D#1 IREQ on PLD */
-	irq_desc[PLD_IRQ_UART1].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_UART1].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_UART1].action = 0;
-	irq_desc[PLD_IRQ_UART1].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_UART1, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_UART1)].icucr
 		= PLD_ICUCR_ISMOD03;	/* 'H' level sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_UART1);
@@ -230,10 +197,8 @@
 
 #if defined(CONFIG_IDC_AK4524) || defined(CONFIG_IDC_AK4524_MODULE)
 	/* INT#80: AK4524 IREQ on PLD */
-	irq_desc[PLD_IRQ_SNDINT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SNDINT].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_SNDINT].action = 0;
-	irq_desc[PLD_IRQ_SNDINT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SNDINT, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SNDINT)].icucr
 		= PLD_ICUCR_ISMOD01;	/* 'L' level sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_SNDINT);
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index b1577f7..82a4bb5 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -610,17 +610,17 @@
 
 static int __init amiga_savekmsg_setup(char *arg)
 {
-	static struct resource debug_res = { .name = "Debug" };
-
 	if (!MACH_IS_AMIGA || strcmp(arg, "mem"))
-		goto done;
+		return 0;
 
-	if (!AMIGAHW_PRESENT(CHIP_RAM)) {
-		printk("Warning: no chipram present for debugging\n");
-		goto done;
+	if (amiga_chip_size < SAVEKMSG_MAXMEM) {
+		pr_err("Not enough chipram for debugging\n");
+		return -ENOMEM;
 	}
 
-	savekmsg = amiga_chip_alloc_res(SAVEKMSG_MAXMEM, &debug_res);
+	/* Just steal the block, the chipram allocator isn't functional yet */
+	amiga_chip_size -= SAVEKMSG_MAXMEM;
+	savekmsg = (void *)ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size);
 	savekmsg->magic1 = SAVEKMSG_MAGIC1;
 	savekmsg->magic2 = SAVEKMSG_MAGIC2;
 	savekmsg->magicptr = ZTWO_PADDR(savekmsg);
@@ -628,8 +628,6 @@
 
 	amiga_console_driver.write = amiga_mem_console_write;
 	register_console(&amiga_console_driver);
-
-done:
 	return 0;
 }
 
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 39478dd..26a804e 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -388,9 +388,9 @@
 	}
 
 	if (ATARIHW_PRESENT(SCC) && !atari_SCC_reset_done) {
-		scc.cha_a_ctrl = 9;
+		atari_scc.cha_a_ctrl = 9;
 		MFPDELAY();
-		scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */
+		atari_scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */
 	}
 
 	if (ATARIHW_PRESENT(SCU)) {
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index ae2d96e..4203d10 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -315,7 +315,7 @@
 		ATARIHW_SET(SCC_DMA);
 		printk("SCC_DMA ");
 	}
-	if (scc_test(&scc.cha_a_ctrl)) {
+	if (scc_test(&atari_scc.cha_a_ctrl)) {
 		ATARIHW_SET(SCC);
 		printk("SCC ");
 	}
diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c
index 28efdc3..5a48424 100644
--- a/arch/m68k/atari/debug.c
+++ b/arch/m68k/atari/debug.c
@@ -53,9 +53,9 @@
 {
 	do {
 		MFPDELAY();
-	} while (!(scc.cha_b_ctrl & 0x04)); /* wait for tx buf empty */
+	} while (!(atari_scc.cha_b_ctrl & 0x04)); /* wait for tx buf empty */
 	MFPDELAY();
-	scc.cha_b_data = c;
+	atari_scc.cha_b_data = c;
 }
 
 static void atari_scc_console_write(struct console *co, const char *str,
@@ -140,9 +140,9 @@
 {
 	do {
 		MFPDELAY();
-	} while (!(scc.cha_b_ctrl & 0x01)); /* wait for rx buf filled */
+	} while (!(atari_scc.cha_b_ctrl & 0x01)); /* wait for rx buf filled */
 	MFPDELAY();
-	return scc.cha_b_data;
+	return atari_scc.cha_b_data;
 }
 
 int atari_midi_console_wait_key(struct console *co)
@@ -185,9 +185,9 @@
 
 #define SCC_WRITE(reg, val)				\
 	do {						\
-		scc.cha_b_ctrl = (reg);			\
+		atari_scc.cha_b_ctrl = (reg);		\
 		MFPDELAY();				\
-		scc.cha_b_ctrl = (val);			\
+		atari_scc.cha_b_ctrl = (val);		\
 		MFPDELAY();				\
 	} while (0)
 
@@ -240,7 +240,7 @@
 	reg3 = (cflag & CSIZE) == CS8 ? 0xc0 : 0x40;
 	reg5 = (cflag & CSIZE) == CS8 ? 0x60 : 0x20 | 0x82 /* assert DTR/RTS */;
 
-	(void)scc.cha_b_ctrl;		/* reset reg pointer */
+	(void)atari_scc.cha_b_ctrl;	/* reset reg pointer */
 	SCC_WRITE(9, 0xc0);		/* reset */
 	LONG_DELAY();			/* extra delay after WR9 access */
 	SCC_WRITE(4, (cflag & PARENB) ? ((cflag & PARODD) ? 0x01 : 0x03)
diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S
index 73613b5..26e85e2 100644
--- a/arch/m68k/ifpsp060/src/fpsp.S
+++ b/arch/m68k/ifpsp060/src/fpsp.S
@@ -3881,7 +3881,7 @@
 # FP Unimplemented Instruction stack frame and jump to that entry
 # point.
 #
-# but, if the FPU is disabled, then we need to jump to the FPU diabled
+# but, if the FPU is disabled, then we need to jump to the FPU disabled
 # entry point.
 	movc		%pcr,%d0
 	btst		&0x1,%d0
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index a714e1a..f51f709 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -449,7 +449,7 @@
   u_char char_dummy3;
   u_char cha_b_data;
  };
-# define scc ((*(volatile struct SCC*)SCC_BAS))
+# define atari_scc ((*(volatile struct SCC*)SCC_BAS))
 
 /* The ESCC (Z85230) in an Atari ST. The channels are reversed! */
 # define st_escc ((*(volatile struct SCC*)0xfffffa31))
diff --git a/arch/m68k/include/asm/cacheflush_no.h b/arch/m68k/include/asm/cacheflush_no.h
index 7085bd5..cb88aa9 100644
--- a/arch/m68k/include/asm/cacheflush_no.h
+++ b/arch/m68k/include/asm/cacheflush_no.h
@@ -2,21 +2,22 @@
 #define _M68KNOMMU_CACHEFLUSH_H
 
 /*
- * (C) Copyright 2000-2004, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2000-2010, Greg Ungerer <gerg@snapgear.com>
  */
 #include <linux/mm.h>
+#include <asm/mcfsim.h>
 
 #define flush_cache_all()			__flush_cache_all()
 #define flush_cache_mm(mm)			do { } while (0)
 #define flush_cache_dup_mm(mm)			do { } while (0)
-#define flush_cache_range(vma, start, end)	__flush_cache_all()
+#define flush_cache_range(vma, start, end)	do { } while (0)
 #define flush_cache_page(vma, vmaddr)		do { } while (0)
-#define flush_dcache_range(start,len)		__flush_cache_all()
+#define flush_dcache_range(start, len)		__flush_dcache_all()
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
 #define flush_dcache_page(page)			do { } while (0)
 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
-#define flush_icache_range(start,len)		__flush_cache_all()
+#define flush_icache_range(start, len)		__flush_icache_all()
 #define flush_icache_page(vma,pg)		do { } while (0)
 #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
 #define flush_cache_vmap(start, end)		do { } while (0)
@@ -27,66 +28,52 @@
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 	memcpy(dst, src, len)
 
+void mcf_cache_push(void);
+
 static inline void __flush_cache_all(void)
 {
-#if defined(CONFIG_M5407) || defined(CONFIG_M548x)
-	/*
-	 *	Use cpushl to push and invalidate all cache lines.
-	 *	Gas doesn't seem to know how to generate the ColdFire
-	 *	cpushl instruction... Oh well, bit stuff it for now.
-	 */
+#ifdef CACHE_PUSH
+	mcf_cache_push();
+#endif
+#ifdef CACHE_INVALIDATE
 	__asm__ __volatile__ (
-		"nop\n\t"
-		"clrl	%%d0\n\t"
-		"1:\n\t"
-		"movel	%%d0,%%a0\n\t"
-		"2:\n\t"
-		".word	0xf468\n\t"
-		"addl	#0x10,%%a0\n\t"
-		"cmpl	#0x00000800,%%a0\n\t"
-		"blt	2b\n\t"
-		"addql	#1,%%d0\n\t"
-		"cmpil	#4,%%d0\n\t"
-		"bne	1b\n\t"
-		"movel	#0xb6088500,%%d0\n\t"
-		"movec	%%d0,%%CACR\n\t"
-		: : : "d0", "a0" );
-#endif /* CONFIG_M5407 */
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x)
-	__asm__ __volatile__ (
-		"movel	#0x81400100, %%d0\n\t"
+		"movel	%0, %%d0\n\t"
 		"movec	%%d0, %%CACR\n\t"
 		"nop\n\t"
-		: : : "d0" );
-#endif /* CONFIG_M523x || CONFIG_M527x */
-#if defined(CONFIG_M528x)
-	__asm__ __volatile__ (
-		"movel	#0x81000200, %%d0\n\t"
-		"movec	%%d0, %%CACR\n\t"
-		"nop\n\t"
-		: : : "d0" );
-#endif /* CONFIG_M528x */
-#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || defined(CONFIG_M5272)
-	__asm__ __volatile__ (
-		"movel	#0x81000100, %%d0\n\t"
-		"movec	%%d0, %%CACR\n\t"
-		"nop\n\t"
-		: : : "d0" );
-#endif /* CONFIG_M5206 || CONFIG_M5206e || CONFIG_M5272 */
-#ifdef CONFIG_M5249
-	__asm__ __volatile__ (
-		"movel	#0xa1000200, %%d0\n\t"
-		"movec	%%d0, %%CACR\n\t"
-		"nop\n\t"
-		: : : "d0" );
-#endif /* CONFIG_M5249 */
-#ifdef CONFIG_M532x
-	__asm__ __volatile__ (
-		"movel	#0x81000200, %%d0\n\t"
-		"movec	%%d0, %%CACR\n\t"
-		"nop\n\t"
-		: : : "d0" );
-#endif /* CONFIG_M532x */
+		: : "i" (CACHE_INVALIDATE) : "d0" );
+#endif
 }
 
+/*
+ * Some ColdFire parts implement separate instruction and data caches,
+ * on those we should just flush the appropriate cache. If we don't need
+ * to do any specific flushing then this will be optimized away.
+ */
+static inline void __flush_icache_all(void)
+{
+#ifdef CACHE_INVALIDATEI
+	__asm__ __volatile__ (
+		"movel	%0, %%d0\n\t"
+		"movec	%%d0, %%CACR\n\t"
+		"nop\n\t"
+		: : "i" (CACHE_INVALIDATEI) : "d0" );
+#endif
+}
+
+static inline void __flush_dcache_all(void)
+{
+#ifdef CACHE_PUSH
+	mcf_cache_push();
+#endif
+#ifdef CACHE_INVALIDATED
+	__asm__ __volatile__ (
+		"movel	%0, %%d0\n\t"
+		"movec	%%d0, %%CACR\n\t"
+		"nop\n\t"
+		: : "i" (CACHE_INVALIDATED) : "d0" );
+#else
+	/* Flush the wrtite buffer */
+	__asm__ __volatile__ ( "nop" );
+#endif
+}
 #endif /* _M68KNOMMU_CACHEFLUSH_H */
diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h
index 3b0a34d..213028c 100644
--- a/arch/m68k/include/asm/coldfire.h
+++ b/arch/m68k/include/asm/coldfire.h
@@ -32,7 +32,7 @@
  */
 #define	MCF_MBAR	0x10000000
 #define	MCF_MBAR2	0x80000000
-#if defined(CONFIG_M548x)
+#if defined(CONFIG_M54xx)
 #define	MCF_IPSBAR	MCF_MBAR
 #elif defined(CONFIG_M520x)
 #define	MCF_IPSBAR	0xFC000000
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 26be277..627d69b 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -42,12 +42,16 @@
  */
 
 #ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
 /*
- * This is made a little more tricky on the ColdFire. There is no
- * separate kernel and user stack pointers. Need to artificially
+ * This is made a little more tricky on older ColdFires. There is no
+ * separate supervisor and user stack pointers. Need to artificially
  * construct a usp in software... When doing this we need to disable
- * interrupts, otherwise bad things could happen.
+ * interrupts, otherwise bad things will happen.
  */
+.globl sw_usp
+.globl sw_ksp
+
 .macro SAVE_ALL
 	move	#0x2700,%sr		/* disable intrs */
 	btst	#5,%sp@(2)		/* from user? */
@@ -74,9 +78,7 @@
 	7:
 .endm
 
-.macro RESTORE_ALL
-	btst	#5,%sp@(PT_SR)		/* going user? */
-	bnes	8f			/* no, skip */
+.macro RESTORE_USER
 	move	#0x2700,%sr		/* disable intrs */
 	movel	sw_usp,%a0		/* get usp */
 	movel	%sp@(PT_OFF_PC),%a0@-	/* copy exception program counter */
@@ -91,19 +93,22 @@
 	subql	#8,sw_usp		/* set exception */
 	movel	sw_usp,%sp		/* restore usp */
 	rte
-	8:
-	moveml	%sp@,%d1-%d5/%a0-%a2
-	lea	%sp@(32),%sp		/* space for 8 regs */
-	movel	%sp@+,%d0
-	addql	#4,%sp			/* orig d0 */
-	addl	%sp@+,%sp		/* stkadj */
-	rte
 .endm
 
+.macro RDUSP
+	movel	sw_usp,%a2
+.endm
+
+.macro WRUSP
+	movel	%a0,sw_usp
+.endm
+
+#else /* !CONFIG_COLDFIRE_SW_A7 */
 /*
- * Quick exception save, use current stack only.
+ * Modern ColdFire parts have separate supervisor and user stack
+ * pointers. Simple load and restore macros for this case.
  */
-.macro SAVE_LOCAL
+.macro SAVE_ALL
 	move	#0x2700,%sr		/* disable intrs */
 	clrl	%sp@-			/* stkadj */
 	movel	%d0,%sp@-		/* orig d0 */
@@ -112,7 +117,7 @@
 	moveml	%d1-%d5/%a0-%a2,%sp@
 .endm
 
-.macro RESTORE_LOCAL
+.macro RESTORE_USER
 	moveml	%sp@,%d1-%d5/%a0-%a2
 	lea	%sp@(32),%sp		/* space for 8 regs */
 	movel	%sp@+,%d0
@@ -121,6 +126,18 @@
 	rte
 .endm
 
+.macro RDUSP
+	/*move	%usp,%a2*/
+	.word	0x4e6a
+.endm
+
+.macro WRUSP
+	/*move	%a0,%usp*/
+	.word	0x4e60
+.endm
+
+#endif /* !CONFIG_COLDFIRE_SW_A7 */
+
 .macro SAVE_SWITCH_STACK
 	lea	%sp@(-24),%sp		/* 6 regs */
 	moveml	%a3-%a6/%d6-%d7,%sp@
@@ -131,14 +148,6 @@
 	lea	%sp@(24),%sp		/* 6 regs */
 .endm
 
-/*
- * Software copy of the user and kernel stack pointers... Ugh...
- * Need these to get around ColdFire not having separate kernel
- * and user stack pointers.
- */
-.globl sw_usp
-.globl sw_ksp
-
 #else /* !CONFIG_COLDFIRE */
 
 /*
@@ -167,6 +176,6 @@
 	moveml	%sp@+,%a3-%a6/%d6-%d7
 .endm
 
-#endif /* !CONFIG_COLDFIRE */
+#endif /* !COLDFIRE_SW_A7 */
 #endif /* __ASSEMBLY__ */
 #endif /* __M68KNOMMU_ENTRY_H */
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 1b57adb..c64c7b7 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -37,7 +37,7 @@
 #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
     defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
     defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M532x) || defined(CONFIG_M548x)
+    defined(CONFIG_M532x) || defined(CONFIG_M54xx)
 
 /* These parts have GPIO organized by 8 bit ports */
 
diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h
index 6e2413e..cf20f30 100644
--- a/arch/m68k/include/asm/io_no.h
+++ b/arch/m68k/include/asm/io_no.h
@@ -145,7 +145,6 @@
 #define IOMAP_WRITETHROUGH		3
 
 extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
-extern void __iounmap(void *addr, unsigned long size);
 
 static inline void *ioremap(unsigned long physaddr, unsigned long size)
 {
diff --git a/arch/m68k/include/asm/m5206sim.h b/arch/m68k/include/asm/m5206sim.h
index 9c384e2..561b03b5 100644
--- a/arch/m68k/include/asm/m5206sim.h
+++ b/arch/m68k/include/asm/m5206sim.h
@@ -12,6 +12,10 @@
 #define	m5206sim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m5206)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m52xxacr.h>
 
 /*
  *	Define the 5206 SIM register set addresses.
@@ -88,6 +92,14 @@
 #define	MCFSIM_PADDR		(MCF_MBAR + 0x1c5)	/* Parallel Direction (r/w) */
 #define	MCFSIM_PADAT		(MCF_MBAR + 0x1c9)	/* Parallel Port Value (r/w) */
 
+#if defined(CONFIG_NETtel)
+#define	MCFUART_BASE1		0x180		/* Base address of UART1 */
+#define	MCFUART_BASE2		0x140		/* Base address of UART2 */
+#else
+#define	MCFUART_BASE1		0x140		/* Base address of UART1 */
+#define	MCFUART_BASE2		0x180		/* Base address of UART2 */
+#endif
+
 /*
  *	Define system peripheral IRQ usage.
  */
@@ -95,7 +107,7 @@
 #define	MCF_IRQ_PROFILER	31		/* Timer1, Level 7 */
 
 /*
- * Generic GPIO
+ *	Generic GPIO
  */
 #define MCFGPIO_PIN_MAX		8
 #define MCFGPIO_IRQ_VECBASE	-1
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h
index db824a4..88ed823 100644
--- a/arch/m68k/include/asm/m520xsim.h
+++ b/arch/m68k/include/asm/m520xsim.h
@@ -11,6 +11,11 @@
 #define m520xsim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m520x)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m52xxacr.h>
+
 /*
  *  Define the 520x SIM register set addresses.
  */
@@ -54,6 +59,9 @@
 #define MCFSIM_SDCS0        0x000a8110	/* SDRAM Chip Select 0 Configuration */
 #define MCFSIM_SDCS1        0x000a8114	/* SDRAM Chip Select 1 Configuration */
 
+/*
+ * EPORT and GPIO registers.
+ */
 #define MCFEPORT_EPDDR			0xFC088002
 #define MCFEPORT_EPDR			0xFC088004
 #define MCFEPORT_EPPDR			0xFC088005
@@ -97,6 +105,7 @@
 #define MCFGPIO_PCLRR_UART		0xFC0A402A
 #define MCFGPIO_PCLRR_FECH		0xFC0A402B
 #define MCFGPIO_PCLRR_FECL		0xFC0A402C
+
 /*
  * Generic GPIO support
  */
@@ -109,7 +118,6 @@
 #define MCFGPIO_PIN_MAX			80
 #define MCFGPIO_IRQ_MAX			8
 #define MCFGPIO_IRQ_VECBASE		MCFINT_VECBASE
-/****************************************************************************/
 
 #define MCF_GPIO_PAR_UART                   (0xA4036)
 #define MCF_GPIO_PAR_FECI2C                 (0xA4033)
@@ -126,6 +134,13 @@
 #define MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2   (0x04)
 
 /*
+ *  UART module.
+ */
+#define MCFUART_BASE1		0x60000		/* Base address of UART1 */
+#define MCFUART_BASE2		0x64000		/* Base address of UART2 */
+#define MCFUART_BASE3		0x68000		/* Base address of UART2 */
+
+/*
  *  Reset Controll Unit.
  */
 #define	MCF_RCR			0xFC0A0000
diff --git a/arch/m68k/include/asm/m523xsim.h b/arch/m68k/include/asm/m523xsim.h
index e8d06b2..4ad7a00 100644
--- a/arch/m68k/include/asm/m523xsim.h
+++ b/arch/m68k/include/asm/m523xsim.h
@@ -11,6 +11,10 @@
 #define	m523xsim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m523x)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m52xxacr.h>
 
 /*
  *	Define the 523x SIM register set addresses.
@@ -50,6 +54,13 @@
 #define	MCF_RCR_SWRESET		0x80		/* Software reset bit */
 #define	MCF_RCR_FRCSTOUT	0x40		/* Force external reset */
 
+/*
+ *  UART module.
+ */
+#define MCFUART_BASE1		0x200           /* Base address of UART1 */
+#define MCFUART_BASE2		0x240           /* Base address of UART2 */
+#define MCFUART_BASE3		0x280           /* Base address of UART3 */
+
 #define MCFGPIO_PODR_ADDR	(MCF_IPSBAR + 0x100000)
 #define MCFGPIO_PODR_DATAH	(MCF_IPSBAR + 0x100001)
 #define MCFGPIO_PODR_DATAL	(MCF_IPSBAR + 0x100002)
diff --git a/arch/m68k/include/asm/m5249sim.h b/arch/m68k/include/asm/m5249sim.h
index 79b7b40..4908b11 100644
--- a/arch/m68k/include/asm/m5249sim.h
+++ b/arch/m68k/include/asm/m5249sim.h
@@ -11,6 +11,11 @@
 #define	m5249sim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m5249)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m52xxacr.h>
+
 /*
  *	Define the 5249 SIM register set addresses.
  */
@@ -56,6 +61,11 @@
 #define MCFSIM_DACR1		0x110		/* DRAM 1 Addr and Ctrl (r/w) */
 #define MCFSIM_DMR1		0x114		/* DRAM 1 Mask reg (r/w) */
 
+/*
+ *	UART module.
+ */
+#define MCFUART_BASE1		0x1c0           /* Base address of UART1 */
+#define MCFUART_BASE2		0x200           /* Base address of UART2 */
 
 /*
  *	Some symbol defines for the above...
diff --git a/arch/m68k/include/asm/m5272sim.h b/arch/m68k/include/asm/m5272sim.h
index df3332c..b7cc50a 100644
--- a/arch/m68k/include/asm/m5272sim.h
+++ b/arch/m68k/include/asm/m5272sim.h
@@ -12,6 +12,11 @@
 #define	m5272sim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m5272)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m52xxacr.h>
+
 /*
  *	Define the 5272 SIM register set addresses.
  */
@@ -62,6 +67,9 @@
 #define	MCFSIM_DCMR1		0x5c		/* DRAM 1 Mask reg (r/w) */
 #define	MCFSIM_DCCR1		0x63		/* DRAM 1 Control reg (r/w) */
 
+#define	MCFUART_BASE1		0x100		/* Base address of UART1 */
+#define	MCFUART_BASE2		0x140		/* Base address of UART2 */
+
 #define	MCFSIM_PACNT		(MCF_MBAR + 0x80) /* Port A Control (r/w) */
 #define	MCFSIM_PADDR		(MCF_MBAR + 0x84) /* Port A Direction (r/w) */
 #define	MCFSIM_PADAT		(MCF_MBAR + 0x86) /* Port A Data (r/w) */
diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h
index 1feb46f..e8042e8 100644
--- a/arch/m68k/include/asm/m527xsim.h
+++ b/arch/m68k/include/asm/m527xsim.h
@@ -11,6 +11,10 @@
 #define	m527xsim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m527x)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m52xxacr.h>
 
 /*
  *	Define the 5270/5271 SIM register set addresses.
@@ -55,6 +59,12 @@
 #define	MCFSIM_DMR1		0x5c		/* SDRAM address mask 1 */
 #endif
 
+/*
+ *	UART module.
+ */
+#define MCFUART_BASE1		0x200           /* Base address of UART1 */
+#define MCFUART_BASE2		0x240           /* Base address of UART2 */
+#define MCFUART_BASE3		0x280           /* Base address of UART3 */
 
 #ifdef CONFIG_M5271
 #define MCFGPIO_PODR_ADDR	(MCF_IPSBAR + 0x100000)
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index 891cbed..a6d2f4d 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -11,6 +11,10 @@
 #define	m528xsim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m528x)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m52xxacr.h>
 
 /*
  *	Define the 5280/5282 SIM register set addresses.
@@ -42,6 +46,13 @@
 #define	MCFSIM_DMR1		0x54		/* SDRAM address mask 1 */
 
 /*
+ *	UART module.
+ */
+#define MCFUART_BASE1		0x200           /* Base address of UART1 */
+#define MCFUART_BASE2		0x240           /* Base address of UART2 */
+#define MCFUART_BASE3		0x280           /* Base address of UART3 */
+
+/*
  * 	GPIO registers
  */
 #define MCFGPIO_PORTA		(MCF_IPSBAR + 0x00100000)
diff --git a/arch/m68k/include/asm/m52xxacr.h b/arch/m68k/include/asm/m52xxacr.h
new file mode 100644
index 0000000..abc391a
--- /dev/null
+++ b/arch/m68k/include/asm/m52xxacr.h
@@ -0,0 +1,94 @@
+/****************************************************************************/
+
+/*
+ * m52xxacr.h -- ColdFire version 2 core cache support
+ *
+ * (C) Copyright 2010, Greg Ungerer <gerg@snapgear.com>
+ */
+
+/****************************************************************************/
+#ifndef m52xxacr_h
+#define m52xxacr_h
+/****************************************************************************/
+
+/*
+ * All varients of the ColdFire using version 2 cores have a similar
+ * cache setup. Although not absolutely identical the cache register
+ * definitions are compatible for all of them. Mostly they support a
+ * configurable cache memory that can be instruction only, data only,
+ * or split instruction and data. The exception is the very old version 2
+ * core based parts, like the 5206(e), 5249 and 5272, which are instruction
+ * cache only. Cache size varies from 2k up to 16k.
+ */
+
+/*
+ * Define the Cache Control register flags.
+ */
+#define CACR_CENB	0x80000000	/* Enable cache */
+#define CACR_CDPI	0x10000000	/* Disable invalidation by CPUSHL */
+#define CACR_CFRZ	0x08000000	/* Cache freeze mode */
+#define CACR_CINV	0x01000000	/* Invalidate cache */
+#define CACR_DISI	0x00800000	/* Disable instruction cache */
+#define CACR_DISD	0x00400000	/* Disable data cache */
+#define CACR_INVI	0x00200000	/* Invalidate instruction cache */
+#define CACR_INVD	0x00100000	/* Invalidate data cache */
+#define CACR_CEIB	0x00000400	/* Non-cachable instruction burst */
+#define CACR_DCM	0x00000200	/* Default cache mode */
+#define CACR_DBWE	0x00000100	/* Buffered write enable */
+#define CACR_DWP	0x00000020	/* Write protection */
+#define CACR_EUSP	0x00000010	/* Enable separate user a7 */
+
+/*
+ * Define the Access Control register flags.
+ */
+#define ACR_BASE_POS	24		/* Address Base (upper 8 bits) */
+#define ACR_MASK_POS	16		/* Address Mask (next 8 bits) */
+#define ACR_ENABLE	0x00008000	/* Enable this ACR */
+#define ACR_USER	0x00000000	/* Allow only user accesses */
+#define ACR_SUPER	0x00002000	/* Allow supervisor access only */
+#define ACR_ANY		0x00004000	/* Allow any access type */
+#define ACR_CENB	0x00000000	/* Caching of region enabled */
+#define ACR_CDIS	0x00000040	/* Caching of region disabled */
+#define ACR_BWE		0x00000020	/* Write buffer enabled */
+#define ACR_WPROTECT	0x00000004	/* Write protect region */
+
+/*
+ * Set the cache controller settings we will use. On the cores that support
+ * a split cache configuration we allow all the combinations at Kconfig
+ * time. For those cores that only have an instruction cache we just set
+ * that as on.
+ */
+#if defined(CONFIG_CACHE_I)
+#define CACHE_TYPE	(CACR_DISD + CACR_EUSP)
+#define CACHE_INVTYPEI	0
+#elif defined(CONFIG_CACHE_D)
+#define CACHE_TYPE	(CACR_DISI + CACR_EUSP)
+#define CACHE_INVTYPED	0
+#elif defined(CONFIG_CACHE_BOTH)
+#define CACHE_TYPE	CACR_EUSP
+#define CACHE_INVTYPEI	CACR_INVI
+#define CACHE_INVTYPED	CACR_INVD
+#else
+/* This is the instruction cache only devices (no split cache, no eusp) */
+#define CACHE_TYPE	0
+#define CACHE_INVTYPEI	0
+#endif
+
+#define CACHE_INIT	(CACR_CINV + CACHE_TYPE)
+#define CACHE_MODE	(CACR_CENB + CACHE_TYPE + CACR_DCM)
+
+#define CACHE_INVALIDATE  (CACHE_MODE + CACR_CINV)
+#if defined(CACHE_INVTYPEI)
+#define CACHE_INVALIDATEI (CACHE_MODE + CACR_CINV + CACHE_INVTYPEI)
+#endif
+#if defined(CACHE_INVTYPED)
+#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINV + CACHE_INVTYPED)
+#endif
+
+#define ACR0_MODE	((CONFIG_RAMBASE & 0xff000000) + \
+			 (0x000f0000) + \
+			 (ACR_ENABLE + ACR_ANY + ACR_CENB + ACR_BWE))
+#define ACR1_MODE	0
+
+/****************************************************************************/
+#endif  /* m52xxsim_h */
diff --git a/arch/m68k/include/asm/m5307sim.h b/arch/m68k/include/asm/m5307sim.h
index c6830e5..0bf5739 100644
--- a/arch/m68k/include/asm/m5307sim.h
+++ b/arch/m68k/include/asm/m5307sim.h
@@ -14,6 +14,11 @@
 #define	m5307sim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m5307)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m53xxacr.h>
+
 /*
  *	Define the 5307 SIM register set addresses.
  */
@@ -94,6 +99,17 @@
 #define	MCFSIM_PADAT		(MCF_MBAR + 0x248)
 
 /*
+ *  UART module.
+ */
+#if defined(CONFIG_NETtel) || defined(CONFIG_SECUREEDGEMP3)
+#define MCFUART_BASE1		0x200           /* Base address of UART1 */
+#define MCFUART_BASE2		0x1c0           /* Base address of UART2 */
+#else
+#define MCFUART_BASE1		0x1c0           /* Base address of UART1 */
+#define MCFUART_BASE2		0x200           /* Base address of UART2 */
+#endif
+
+/*
  * Generic GPIO support
  */
 #define MCFGPIO_PIN_MAX			16
@@ -146,32 +162,5 @@
 #define	MCF_IRQ_TIMER		30		/* Timer0, Level 6 */
 #define	MCF_IRQ_PROFILER	31		/* Timer1, Level 7 */
 
-/*
- *	Define the Cache register flags.
- */
-#define	CACR_EC			(1<<31)
-#define	CACR_ESB		(1<<29)
-#define	CACR_DPI		(1<<28)
-#define	CACR_HLCK		(1<<27)
-#define	CACR_CINVA		(1<<24)
-#define	CACR_DNFB		(1<<10)
-#define	CACR_DCM_WTHRU		(0<<8)
-#define	CACR_DCM_WBACK		(1<<8)
-#define	CACR_DCM_OFF_PRE	(2<<8)
-#define	CACR_DCM_OFF_IMP	(3<<8)
-#define	CACR_DW			(1<<5)
-
-#define	ACR_BASE_POS		24
-#define	ACR_MASK_POS		16
-#define	ACR_ENABLE		(1<<15)
-#define	ACR_USER		(0<<13)
-#define	ACR_SUPER		(1<<13)
-#define	ACR_ANY			(2<<13)
-#define	ACR_CM_WTHRU		(0<<5)
-#define	ACR_CM_WBACK		(1<<5)
-#define	ACR_CM_OFF_PRE		(2<<5)
-#define	ACR_CM_OFF_IMP		(3<<5)
-#define	ACR_WPROTECT		(1<<2)
-
 /****************************************************************************/
 #endif	/* m5307sim_h */
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m532xsim.h
index c4bf1c8..e6470f8 100644
--- a/arch/m68k/include/asm/m532xsim.h
+++ b/arch/m68k/include/asm/m532xsim.h
@@ -9,6 +9,11 @@
 #define	m532xsim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m532x)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m53xxacr.h>
+
 #define MCF_REG32(x) (*(volatile unsigned long  *)(x))
 #define MCF_REG16(x) (*(volatile unsigned short *)(x))
 #define MCF_REG08(x) (*(volatile unsigned char  *)(x))
@@ -74,31 +79,11 @@
 #define	MCF_IRQ_PROFILER	(64 + 33)	/* Timer1 */
 
 /*
- *	Define the Cache register flags.
+ *  UART module.
  */
-#define	CACR_EC			(1<<31)
-#define	CACR_ESB		(1<<29)
-#define	CACR_DPI		(1<<28)
-#define	CACR_HLCK		(1<<27)
-#define	CACR_CINVA		(1<<24)
-#define	CACR_DNFB		(1<<10)
-#define	CACR_DCM_WTHRU		(0<<8)
-#define	CACR_DCM_WBACK		(1<<8)
-#define	CACR_DCM_OFF_PRE	(2<<8)
-#define	CACR_DCM_OFF_IMP	(3<<8)
-#define	CACR_DW			(1<<5)
-
-#define	ACR_BASE_POS		24
-#define	ACR_MASK_POS		16
-#define	ACR_ENABLE		(1<<15)
-#define	ACR_USER		(0<<13)
-#define	ACR_SUPER		(1<<13)
-#define	ACR_ANY			(2<<13)
-#define	ACR_CM_WTHRU		(0<<5)
-#define	ACR_CM_WBACK		(1<<5)
-#define	ACR_CM_OFF_PRE		(2<<5)
-#define	ACR_CM_OFF_IMP		(3<<5)
-#define	ACR_WPROTECT		(1<<2)
+#define MCFUART_BASE1		0xFC060000	/* Base address of UART1 */
+#define MCFUART_BASE2		0xFC064000	/* Base address of UART2 */
+#define MCFUART_BASE3		0xFC068000	/* Base address of UART3 */
 
 /*********************************************************************
  *
diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
new file mode 100644
index 0000000..cd952b0
--- /dev/null
+++ b/arch/m68k/include/asm/m53xxacr.h
@@ -0,0 +1,101 @@
+/****************************************************************************/
+
+/*
+ * m53xxacr.h -- ColdFire version 3 core cache support
+ *
+ * (C) Copyright 2010, Greg Ungerer <gerg@snapgear.com>
+ */
+
+/****************************************************************************/
+#ifndef m53xxacr_h
+#define m53xxacr_h
+/****************************************************************************/
+
+/*
+ * All varients of the ColdFire using version 3 cores have a similar
+ * cache setup. They have a unified instruction and data cache, with
+ * configurable write-through or copy-back operation.
+ */
+
+/*
+ * Define the Cache Control register flags.
+ */
+#define CACR_EC		0x80000000	/* Enable cache */
+#define CACR_ESB	0x20000000	/* Enable store buffer */
+#define CACR_DPI	0x10000000	/* Disable invalidation by CPUSHL */
+#define CACR_HLCK	0x08000000	/* Half cache lock mode */
+#define CACR_CINVA	0x01000000	/* Invalidate cache */
+#define CACR_DNFB	0x00000400	/* Inhibited fill buffer */
+#define CACR_DCM_WT	0x00000000	/* Cacheable write-through */
+#define CACR_DCM_CB	0x00000100	/* Cacheable copy-back */
+#define CACR_DCM_PRE	0x00000200	/* Cache inhibited, precise */
+#define CACR_DCM_IMPRE	0x00000300	/* Cache inhibited, imprecise */
+#define CACR_WPROTECT	0x00000020	/* Write protect*/
+#define CACR_EUSP	0x00000010	/* Eanble separate user a7 */
+
+/*
+ * Define the Access Control register flags.
+ */
+#define ACR_BASE_POS	24		/* Address Base (upper 8 bits) */
+#define ACR_MASK_POS	16		/* Address Mask (next 8 bits) */
+#define ACR_ENABLE	0x00008000	/* Enable this ACR */
+#define ACR_USER	0x00000000	/* Allow only user accesses */
+#define ACR_SUPER	0x00002000	/* Allow supervisor access only */
+#define ACR_ANY		0x00004000	/* Allow any access type */
+#define ACR_CM_WT	0x00000000	/* Cacheable, write-through */
+#define ACR_CM_CB	0x00000020	/* Cacheable, copy-back */
+#define ACR_CM_PRE	0x00000040	/* Cache inhibited, precise */
+#define ACR_CM_IMPRE	0x00000060	/* Cache inhibited, imprecise */
+#define ACR_WPROTECT	0x00000004	/* Write protect region */
+
+/*
+ * Define the cache type and arrangement (needed for pushes).
+ */
+#if defined(CONFIG_M5307)
+#define	CACHE_SIZE	0x2000		/* 8k of unified cache */
+#define	ICACHE_SIZE	CACHE_SIZE
+#define	DCACHE_SIZE	CACHE_SIZE
+#elif defined(CONFIG_M532x)
+#define	CACHE_SIZE	0x4000		/* 32k of unified cache */
+#define	ICACHE_SIZE	CACHE_SIZE
+#define	DCACHE_SIZE	CACHE_SIZE
+#endif
+
+#define	CACHE_LINE_SIZE	16		/* 16 byte line size */
+#define	CACHE_WAYS	4		/* 4 ways - set associative */
+
+/*
+ * Set the cache controller settings we will use. This default in the
+ * CACR is cache inhibited, we use the ACR register to set cacheing
+ * enabled on the regions we want (eg RAM).
+ */
+#if defined(CONFIG_CACHE_COPYBACK)
+#define CACHE_TYPE	ACR_CM_CB
+#define CACHE_PUSH
+#else
+#define CACHE_TYPE	ACR_CM_WT
+#endif
+
+#ifdef CONFIG_COLDFIRE_SW_A7
+#define CACHE_MODE	(CACR_EC + CACR_ESB + CACR_DCM_PRE)
+#else
+#define CACHE_MODE	(CACR_EC + CACR_ESB + CACR_DCM_PRE + CACR_EUSP)
+#endif
+
+/*
+ * Unified cache means we will never need to flush for coherency of
+ * instruction fetch. We will need to flush to maintain memory/DMA
+ * coherency though in all cases. And for copyback caches we will need
+ * to push cached data as well.
+ */
+#define CACHE_INIT	  CACR_CINVA
+#define CACHE_INVALIDATE  CACR_CINVA
+#define CACHE_INVALIDATED CACR_CINVA
+
+#define ACR0_MODE	((CONFIG_RAMBASE & 0xff000000) + \
+			 (0x000f0000) + \
+			 (ACR_ENABLE + ACR_ANY + CACHE_TYPE))
+#define ACR1_MODE	0
+
+/****************************************************************************/
+#endif  /* m53xxsim_h */
diff --git a/arch/m68k/include/asm/m5407sim.h b/arch/m68k/include/asm/m5407sim.h
index c399abb..75f5c28 100644
--- a/arch/m68k/include/asm/m5407sim.h
+++ b/arch/m68k/include/asm/m5407sim.h
@@ -14,6 +14,11 @@
 #define	m5407sim_h
 /****************************************************************************/
 
+#define	CPU_NAME		"COLDFIRE(m5407)"
+#define	CPU_INSTR_PER_JIFFY	3
+
+#include <asm/m54xxacr.h>
+
 /*
  *	Define the 5407 SIM register set addresses.
  */
@@ -73,6 +78,9 @@
 #define MCFSIM_DACR1		0x110		/* DRAM 1 Addr and Ctrl (r/w) */
 #define MCFSIM_DMR1		0x114		/* DRAM 1 Mask reg (r/w) */
 
+#define MCFUART_BASE1		0x1c0           /* Base address of UART1 */
+#define MCFUART_BASE2		0x200           /* Base address of UART2 */
+
 #define	MCFSIM_PADDR		(MCF_MBAR + 0x244)
 #define	MCFSIM_PADAT		(MCF_MBAR + 0x248)
 
@@ -117,39 +125,5 @@
 #define	MCF_IRQ_TIMER		30		/* Timer0, Level 6 */
 #define	MCF_IRQ_PROFILER	31		/* Timer1, Level 7 */
 
-/*
- *	Define the Cache register flags.
- */
-#define	CACR_DEC		0x80000000	/* Enable data cache */
-#define	CACR_DWP		0x40000000	/* Data write protection */
-#define	CACR_DESB		0x20000000	/* Enable data store buffer */
-#define	CACR_DDPI		0x10000000	/* Disable CPUSHL */
-#define	CACR_DHCLK		0x08000000	/* Half data cache lock mode */
-#define	CACR_DDCM_WT		0x00000000	/* Write through cache*/
-#define	CACR_DDCM_CP		0x02000000	/* Copyback cache */
-#define	CACR_DDCM_P		0x04000000	/* No cache, precise */
-#define	CACR_DDCM_IMP		0x06000000	/* No cache, imprecise */
-#define	CACR_DCINVA		0x01000000	/* Invalidate data cache */
-#define	CACR_BEC		0x00080000	/* Enable branch cache */
-#define	CACR_BCINVA		0x00040000	/* Invalidate branch cache */
-#define	CACR_IEC		0x00008000	/* Enable instruction cache */
-#define	CACR_DNFB		0x00002000	/* Inhibited fill buffer */
-#define	CACR_IDPI		0x00001000	/* Disable CPUSHL */
-#define	CACR_IHLCK		0x00000800	/* Intruction cache half lock */
-#define	CACR_IDCM		0x00000400	/* Intruction cache inhibit */
-#define	CACR_ICINVA		0x00000100	/* Invalidate instr cache */
-
-#define	ACR_BASE_POS		24		/* Address Base */
-#define	ACR_MASK_POS		16		/* Address Mask */
-#define	ACR_ENABLE		0x00008000	/* Enable address */
-#define	ACR_USER		0x00000000	/* User mode access only */
-#define	ACR_SUPER		0x00002000	/* Supervisor mode only */
-#define	ACR_ANY			0x00004000	/* Match any access mode */
-#define	ACR_CM_WT		0x00000000	/* Write through mode */
-#define	ACR_CM_CP		0x00000020	/* Copyback mode */
-#define	ACR_CM_OFF_PRE		0x00000040	/* No cache, precise */
-#define	ACR_CM_OFF_IMP		0x00000060	/* No cache, imprecise */
-#define	ACR_WPROTECT		0x00000004	/* Write protect */
-
 /****************************************************************************/
 #endif	/* m5407sim_h */
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
new file mode 100644
index 0000000..16a1835
--- /dev/null
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -0,0 +1,97 @@
+/*
+ * Bit definitions for the MCF54xx ACR and CACR registers.
+ */
+
+#ifndef	m54xxacr_h
+#define m54xxacr_h
+
+/*
+ *	Define the Cache register flags.
+ */
+#define CACR_DEC	0x80000000	/* Enable data cache */
+#define CACR_DWP	0x40000000	/* Data write protection */
+#define CACR_DESB	0x20000000	/* Enable data store buffer */
+#define CACR_DDPI	0x10000000	/* Disable invalidation by CPUSHL */
+#define CACR_DHCLK	0x08000000	/* Half data cache lock mode */
+#define CACR_DDCM_WT	0x00000000	/* Write through cache*/
+#define CACR_DDCM_CP	0x02000000	/* Copyback cache */
+#define CACR_DDCM_P	0x04000000	/* No cache, precise */
+#define CACR_DDCM_IMP	0x06000000	/* No cache, imprecise */
+#define CACR_DCINVA	0x01000000	/* Invalidate data cache */
+#define CACR_BEC	0x00080000	/* Enable branch cache */
+#define CACR_BCINVA	0x00040000	/* Invalidate branch cache */
+#define CACR_IEC	0x00008000	/* Enable instruction cache */
+#define CACR_DNFB	0x00002000	/* Inhibited fill buffer */
+#define CACR_IDPI	0x00001000	/* Disable CPUSHL */
+#define CACR_IHLCK	0x00000800	/* Intruction cache half lock */
+#define CACR_IDCM	0x00000400	/* Intruction cache inhibit */
+#define CACR_ICINVA	0x00000100	/* Invalidate instr cache */
+#define CACR_EUSP	0x00000020	/* Enable separate user a7 */
+
+#define ACR_BASE_POS	24		/* Address Base */
+#define ACR_MASK_POS	16		/* Address Mask */
+#define ACR_ENABLE	0x00008000	/* Enable address */
+#define ACR_USER	0x00000000	/* User mode access only */
+#define ACR_SUPER	0x00002000	/* Supervisor mode only */
+#define ACR_ANY		0x00004000	/* Match any access mode */
+#define ACR_CM_WT	0x00000000	/* Write through mode */
+#define ACR_CM_CP	0x00000020	/* Copyback mode */
+#define ACR_CM_OFF_PRE	0x00000040	/* No cache, precise */
+#define ACR_CM_OFF_IMP	0x00000060	/* No cache, imprecise */
+#define ACR_CM		0x00000060	/* Cache mode mask */
+#define ACR_WPROTECT	0x00000004	/* Write protect */
+
+#if defined(CONFIG_M5407)
+
+#define ICACHE_SIZE 0x4000	/* instruction - 16k */
+#define DCACHE_SIZE 0x2000	/* data - 8k */
+
+#elif defined(CONFIG_M54xx)
+
+#define ICACHE_SIZE 0x8000	/* instruction - 32k */
+#define DCACHE_SIZE 0x8000	/* data - 32k */
+
+#endif
+
+#define CACHE_LINE_SIZE 0x0010	/* 16 bytes */
+#define CACHE_WAYS 4		/* 4 ways */
+
+/*
+ *	Version 4 cores have a true harvard style separate instruction
+ *	and data cache. Enable data and instruction caches, also enable write
+ *	buffers and branch accelerator.
+ */
+/* attention : enabling CACR_DESB requires a "nop" to flush the store buffer */
+/* use '+' instead of '|' for assembler's sake */
+
+	/* Enable data cache */
+	/* Enable data store buffer */
+	/* outside ACRs : No cache, precise */
+	/* Enable instruction+branch caches */
+#if defined(CONFIG_M5407)
+#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC)
+#else
+#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP)
+#endif
+#if defined(CONFIG_CACHE_COPYBACK)
+#define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP)
+#else
+#define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_WT)
+#endif
+#define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY)
+
+#define CACHE_INIT	(CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
+#define CACHE_INVALIDATE  (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
+#define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA)
+#define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA)
+#define ACR0_MODE	(0x000f0000+DATA_CACHE_MODE)
+#define ACR1_MODE	0
+#define ACR2_MODE	(0x000f0000+INSN_CACHE_MODE)
+#define ACR3_MODE	0
+
+#if ((DATA_CACHE_MODE & ACR_CM) == ACR_CM_CP)
+/* Copyback cache mode must push dirty cache lines first */
+#define	CACHE_PUSH
+#endif
+
+#endif	/* m54xxacr_h */
diff --git a/arch/m68k/include/asm/m548xgpt.h b/arch/m68k/include/asm/m54xxgpt.h
similarity index 93%
rename from arch/m68k/include/asm/m548xgpt.h
rename to arch/m68k/include/asm/m54xxgpt.h
index c8ef158..df75dd8 100644
--- a/arch/m68k/include/asm/m548xgpt.h
+++ b/arch/m68k/include/asm/m54xxgpt.h
@@ -1,13 +1,13 @@
 /*
- * File:	m548xgpt.h
- * Purpose:	Register and bit definitions for the MCF548X
+ * File:	m54xxgpt.h
+ * Purpose:	Register and bit definitions for the MCF54XX
  *
  * Notes:
  *
  */
 
-#ifndef m548xgpt_h
-#define m548xgpt_h
+#ifndef m54xxgpt_h
+#define m54xxgpt_h
 
 /*********************************************************************
 *
@@ -59,11 +59,13 @@
 #define MCF_GPT_GMS_GPIO_INPUT     (0x00000000)
 #define MCF_GPT_GMS_GPIO_OUTLO     (0x00000020)
 #define MCF_GPT_GMS_GPIO_OUTHI     (0x00000030)
+#define MCF_GPT_GMS_GPIO_MASK      (0x00000030)
 #define MCF_GPT_GMS_TMS_DISABLE    (0x00000000)
 #define MCF_GPT_GMS_TMS_INCAPT     (0x00000001)
 #define MCF_GPT_GMS_TMS_OUTCAPT    (0x00000002)
 #define MCF_GPT_GMS_TMS_PWM        (0x00000003)
 #define MCF_GPT_GMS_TMS_GPIO       (0x00000004)
+#define MCF_GPT_GMS_TMS_MASK       (0x00000007)
 
 /* Bit definitions and macros for MCF_GPT_GCIR */
 #define MCF_GPT_GCIR_CNT(x)        (((x)&0x0000FFFF)<<0)
@@ -85,4 +87,4 @@
 
 /********************************************************************/
 
-#endif /* m548xgpt_h */
+#endif /* m54xxgpt_h */
diff --git a/arch/m68k/include/asm/m548xsim.h b/arch/m68k/include/asm/m54xxsim.h
similarity index 73%
rename from arch/m68k/include/asm/m548xsim.h
rename to arch/m68k/include/asm/m54xxsim.h
index 149135e..462ae53 100644
--- a/arch/m68k/include/asm/m548xsim.h
+++ b/arch/m68k/include/asm/m54xxsim.h
@@ -1,11 +1,16 @@
 /*
- *	m548xsim.h -- ColdFire 547x/548x System Integration Unit support.
+ *	m54xxsim.h -- ColdFire 547x/548x System Integration Unit support.
  */
 
-#ifndef	m548xsim_h
-#define m548xsim_h
+#ifndef	m54xxsim_h
+#define m54xxsim_h
 
-#define MCFINT_VECBASE      64
+#define	CPU_NAME		"COLDFIRE(m54xx)"
+#define	CPU_INSTR_PER_JIFFY	2
+
+#include <asm/m54xxacr.h>
+
+#define MCFINT_VECBASE		64
 
 /*
  *      Interrupt Controller Registers
@@ -22,6 +27,14 @@
 #define MCFINTC_ICR0		0x40		/* Base ICR register */
 
 /*
+ *	UART module.
+ */
+#define MCFUART_BASE1		0x8600		/* Base address of UART1 */
+#define MCFUART_BASE2		0x8700		/* Base address of UART2 */
+#define MCFUART_BASE3		0x8800		/* Base address of UART3 */
+#define MCFUART_BASE4		0x8900		/* Base address of UART4 */
+
+/*
  *	Define system peripheral IRQ usage.
  */
 #define MCF_IRQ_TIMER		(64 + 54)	/* Slice Timer 0 */
@@ -52,4 +65,4 @@
 #define MCF_PAR_PSC_RTS_RTS	(0x30)
 #define MCF_PAR_PSC_CANRX	(0x40)
 
-#endif	/* m548xsim_h */
+#endif	/* m54xxsim_h */
diff --git a/arch/m68k/include/asm/mcfcache.h b/arch/m68k/include/asm/mcfcache.h
deleted file mode 100644
index f49dfc0..0000000
--- a/arch/m68k/include/asm/mcfcache.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/****************************************************************************/
-
-/*
- *	mcfcache.h -- ColdFire CPU cache support code
- *
- *	(C) Copyright 2004, Greg Ungerer <gerg@snapgear.com>
- */
-
-/****************************************************************************/
-#ifndef	__M68KNOMMU_MCFCACHE_H
-#define	__M68KNOMMU_MCFCACHE_H
-/****************************************************************************/
-
-
-/*
- *	The different ColdFire families have different cache arrangments.
- *	Everything from a small instruction only cache, to configurable
- *	data and/or instruction cache, to unified instruction/data, to 
- *	harvard style separate instruction and data caches.
- */
-
-#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || defined(CONFIG_M5272)
-/*
- *	Simple version 2 core cache. These have instruction cache only,
- *	we just need to invalidate it and enable it.
- */
-.macro CACHE_ENABLE
-	movel	#0x01000000,%d0		/* invalidate cache cmd */
-	movec	%d0,%CACR		/* do invalidate cache */
-	movel	#0x80000100,%d0		/* setup cache mask */
-	movec	%d0,%CACR		/* enable cache */
-.endm
-#endif /* CONFIG_M5206 || CONFIG_M5206e || CONFIG_M5272 */
-
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x)
-/*
- *	New version 2 cores have a configurable split cache arrangement.
- *	For now I am just enabling instruction cache - but ultimately I
- *	think a split instruction/data cache would be better.
- */
-.macro CACHE_ENABLE
-	movel	#0x01400000,%d0
-	movec	%d0,%CACR		/* invalidate cache */
-	nop
-	movel	#0x0000c000,%d0		/* set SDRAM cached only */
-	movec	%d0,%ACR0
-	movel	#0x00000000,%d0		/* no other regions cached */
-	movec	%d0,%ACR1
-	movel	#0x80400100,%d0		/* configure cache */
-	movec	%d0,%CACR		/* enable cache */
-	nop
-.endm
-#endif /* CONFIG_M523x || CONFIG_M527x */
-
-#if defined(CONFIG_M528x)
-.macro CACHE_ENABLE
-	nop
-	movel	#0x01000000, %d0
-	movec	%d0, %CACR		/* Invalidate cache */
-	nop
-	movel	#0x0000c020, %d0	/* Set SDRAM cached only */
-	movec	%d0, %ACR0
-	movel	#0x00000000, %d0	/* No other regions cached */
-	movec	%d0, %ACR1
-	movel	#0x80000200, %d0	/* Setup cache mask */
-	movec	%d0, %CACR		/* Enable cache */
-	nop
-.endm
-#endif /* CONFIG_M528x */
-
-#if defined(CONFIG_M5249) || defined(CONFIG_M5307)
-/*
- *	The version 3 core cache. Oddly enough the version 2 core 5249
- *	has the same SDRAM and cache setup as the version 3 cores.
- *	This is a single unified instruction/data cache.
- */
-.macro CACHE_ENABLE
-	movel	#0x01000000,%d0		/* invalidate whole cache */
-	movec	%d0,%CACR
-	nop
-#if defined(DEBUGGER_COMPATIBLE_CACHE) || defined(CONFIG_SECUREEDGEMP3)
-	movel	#0x0000c000,%d0		/* set SDRAM cached (write-thru) */
-#else
-	movel	#0x0000c020,%d0		/* set SDRAM cached (copyback) */
-#endif
-	movec	%d0,%ACR0
-	movel	#0x00000000,%d0		/* no other regions cached */
-	movec	%d0,%ACR1
-	movel	#0xa0000200,%d0		/* enable cache */
-	movec	%d0,%CACR
-	nop
-.endm
-#endif /* CONFIG_M5249 || CONFIG_M5307 */
-
-#if defined(CONFIG_M532x)
-.macro CACHE_ENABLE
-	movel	#0x01000000,%d0		/* invalidate cache cmd */
-	movec	%d0,%CACR		/* do invalidate cache */
-	nop
-	movel	#0x4001C000,%d0		/* set SDRAM cached (write-thru) */
-	movec	%d0,%ACR0
-	movel	#0x00000000,%d0		/* no other regions cached */
-	movec	%d0,%ACR1
-	movel	#0x80000200,%d0		/* setup cache mask */
-	movec	%d0,%CACR		/* enable cache */
-	nop
-.endm
-#endif /* CONFIG_M532x */
-
-#if defined(CONFIG_M5407) || defined(CONFIG_M548x)
-/*
- *	Version 4 cores have a true harvard style separate instruction
- *	and data cache. Invalidate and enable cache, also enable write
- *	buffers and branch accelerator.
- */
-.macro CACHE_ENABLE
-	movel	#0x01040100,%d0		/* invalidate whole cache */
-	movec	%d0,%CACR
-	nop
-	movel	#0x000fc000,%d0		/* set SDRAM cached only */
-	movec	%d0, %ACR0
-	movel	#0x00000000,%d0		/* no other regions cached */
-	movec	%d0, %ACR1
-	movel	#0x000fc000,%d0		/* set SDRAM cached only */
-	movec	%d0, %ACR2
-	movel	#0x00000000,%d0		/* no other regions cached */
-	movec	%d0, %ACR3
-	movel	#0xb6088400,%d0		/* enable caches */
-	movec	%d0,%CACR
-	nop
-.endm
-#endif /* CONFIG_M5407 */
-
-#if defined(CONFIG_M520x)
-.macro CACHE_ENABLE
-	move.l	#0x01000000,%d0		/* invalidate whole cache */
-	movec	%d0,%CACR
-	nop
-	move.l	#0x0000c000,%d0		/* set SDRAM cached (write-thru) */
-	movec	%d0,%ACR0
-	move.l	#0x00000000,%d0		/* no other regions cached */
-	movec	%d0,%ACR1
-	move.l	#0x80400000,%d0		/* enable 8K instruction cache */
-	movec	%d0,%CACR
-	nop
-.endm
-#endif /* CONFIG_M520x */
-
-/****************************************************************************/
-#endif	/* __M68KNOMMU_MCFCACHE_H */
diff --git a/arch/m68k/include/asm/mcfsim.h b/arch/m68k/include/asm/mcfsim.h
index 6901fd6..ebd0304 100644
--- a/arch/m68k/include/asm/mcfsim.h
+++ b/arch/m68k/include/asm/mcfsim.h
@@ -41,8 +41,8 @@
 #elif defined(CONFIG_M5407)
 #include <asm/m5407sim.h>
 #include <asm/mcfintc.h>
-#elif defined(CONFIG_M548x)
-#include <asm/m548xsim.h>
+#elif defined(CONFIG_M54xx)
+#include <asm/m54xxsim.h>
 #endif
 
 /****************************************************************************/
diff --git a/arch/m68k/include/asm/mcfuart.h b/arch/m68k/include/asm/mcfuart.h
index db72e2b..2abedff 100644
--- a/arch/m68k/include/asm/mcfuart.h
+++ b/arch/m68k/include/asm/mcfuart.h
@@ -12,49 +12,6 @@
 #define	mcfuart_h
 /****************************************************************************/
 
-/*
- *	Define the base address of the UARTS within the MBAR address
- *	space.
- */
-#if defined(CONFIG_M5272)
-#define	MCFUART_BASE1		0x100		/* Base address of UART1 */
-#define	MCFUART_BASE2		0x140		/* Base address of UART2 */
-#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e)
-#if defined(CONFIG_NETtel)
-#define	MCFUART_BASE1		0x180		/* Base address of UART1 */
-#define	MCFUART_BASE2		0x140		/* Base address of UART2 */
-#else
-#define	MCFUART_BASE1		0x140		/* Base address of UART1 */
-#define	MCFUART_BASE2		0x180		/* Base address of UART2 */
-#endif
-#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
-#define MCFUART_BASE1		0x200           /* Base address of UART1 */
-#define MCFUART_BASE2		0x240           /* Base address of UART2 */
-#define MCFUART_BASE3		0x280           /* Base address of UART3 */
-#elif defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407)
-#if defined(CONFIG_NETtel) || defined(CONFIG_SECUREEDGEMP3)
-#define MCFUART_BASE1		0x200           /* Base address of UART1 */
-#define MCFUART_BASE2		0x1c0           /* Base address of UART2 */
-#else
-#define MCFUART_BASE1		0x1c0           /* Base address of UART1 */
-#define MCFUART_BASE2		0x200           /* Base address of UART2 */
-#endif
-#elif defined(CONFIG_M520x)
-#define MCFUART_BASE1		0x60000		/* Base address of UART1 */
-#define MCFUART_BASE2		0x64000		/* Base address of UART2 */
-#define MCFUART_BASE3		0x68000		/* Base address of UART2 */
-#elif defined(CONFIG_M532x)
-#define MCFUART_BASE1		0xfc060000	/* Base address of UART1 */
-#define MCFUART_BASE2		0xfc064000	/* Base address of UART2 */
-#define MCFUART_BASE3		0xfc068000	/* Base address of UART3 */
-#elif defined(CONFIG_M548x)
-#define MCFUART_BASE1		0x8600		/* on M548x */
-#define MCFUART_BASE2		0x8700		/* on M548x */
-#define MCFUART_BASE3		0x8800		/* on M548x */
-#define MCFUART_BASE4		0x8900		/* on M548x */
-#endif
-
-
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
 
@@ -217,7 +174,7 @@
 #define	MCFUART_URF_RXS		0xc0		/* Receiver status */
 #endif
 
-#if defined(CONFIG_M548x)
+#if defined(CONFIG_M54xx)
 #define MCFUART_TXFIFOSIZE	512
 #elif defined(CONFIG_M5272)
 #define MCFUART_TXFIFOSIZE	25
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 7a6a759..278c69b 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -20,23 +20,26 @@
 
 static inline unsigned long rdusp(void)
 {
-#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
 	extern unsigned int sw_usp;
 	return sw_usp;
 #else
-	unsigned long usp;
-	__asm__ __volatile__("move %/usp,%0" : "=a" (usp));
+	register unsigned long usp __asm__("a0");
+	/* move %usp,%a0 */
+	__asm__ __volatile__(".word 0x4e68" : "=a" (usp));
 	return usp;
 #endif
 }
 
 static inline void wrusp(unsigned long usp)
 {
-#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
 	extern unsigned int sw_usp;
 	sw_usp = usp;
 #else
-	__asm__ __volatile__("move %0,%/usp" : : "a" (usp));
+	register unsigned long a0 __asm__("a0") = usp;
+	/* move %a0,%usp */
+	__asm__ __volatile__(".word 0x4e60" : : "a" (a0) );
 #endif
 }
 
diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h
index 2936dda..3219845 100644
--- a/arch/m68k/include/asm/string.h
+++ b/arch/m68k/include/asm/string.h
@@ -81,18 +81,6 @@
 	strcpy(__d + strlen(__d), (s));		\
 })
 
-#define __HAVE_ARCH_STRCHR
-static inline char *strchr(const char *s, int c)
-{
-	char sc, ch = c;
-
-	for (; (sc = *s++) != ch; ) {
-		if (!sc)
-			return NULL;
-	}
-	return (char *)s - 1;
-}
-
 #ifndef CONFIG_COLDFIRE
 #define __HAVE_ARCH_STRCMP
 static inline int strcmp(const char *cs, const char *ct)
@@ -111,14 +99,12 @@
 		: "+a" (cs), "+a" (ct), "=d" (res));
 	return res;
 }
+#endif /* CONFIG_COLDFIRE */
 
 #define __HAVE_ARCH_MEMMOVE
 extern void *memmove(void *, const void *, __kernel_size_t);
 
-#define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *, const void *, __kernel_size_t);
 #define memcmp(d, s, n) __builtin_memcmp(d, s, n)
-#endif /* CONFIG_COLDFIRE */
 
 #define __HAVE_ARCH_MEMSET
 extern void *memset(void *, int, __kernel_size_t);
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c
index 4253f87..d399c5f 100644
--- a/arch/m68k/lib/string.c
+++ b/arch/m68k/lib/string.c
@@ -243,14 +243,3 @@
 	return xdest;
 }
 EXPORT_SYMBOL(memmove);
-
-int memcmp(const void *cs, const void *ct, size_t count)
-{
-	const unsigned char *su1, *su2;
-
-	for (su1 = cs, su2 = ct; count > 0; ++su1, ++su2, count--)
-		if (*su1 != *su2)
-			return *su1 < *su2 ? -1 : +1;
-	return 0;
-}
-EXPORT_SYMBOL(memcmp);
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index ba6ccab..a4c3eb6 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -88,7 +88,7 @@
 
 	/*
 	 * The PSC is always at the same spot, but using psc
-	 * keeps things consisant with the psc_xxxx functions.
+	 * keeps things consistent with the psc_xxxx functions.
 	 */
 
 	psc = (void *) PSC_BASE;
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index fa9f746..8b9daca 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -2,6 +2,7 @@
 	bool
 	default y
 	select HAVE_IDE
+	select HAVE_GENERIC_HARDIRQS
 
 config MMU
 	bool
@@ -48,14 +49,6 @@
 	bool
 	default y
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	bool
-	default y
-
 config GENERIC_CALIBRATE_DELAY
 	bool
 	default y
@@ -75,6 +68,16 @@
 config NO_IOPORT
 	def_bool y
 
+config COLDFIRE_SW_A7
+	bool
+	default n
+
+config HAVE_CACHE_SPLIT
+	bool
+
+config HAVE_CACHE_CB
+	bool
+
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
@@ -107,69 +110,90 @@
 
 config M5206
 	bool "MCF5206"
+	select COLDFIRE_SW_A7
 	help
 	  Motorola ColdFire 5206 processor support.
 
 config M5206e
 	bool "MCF5206e"
+	select COLDFIRE_SW_A7
 	help
 	  Motorola ColdFire 5206e processor support.
 
 config M520x
 	bool "MCF520x"
 	select GENERIC_CLOCKEVENTS
+	select HAVE_CACHE_SPLIT
 	help
 	   Freescale Coldfire 5207/5208 processor support.
 
 config M523x
 	bool "MCF523x"
 	select GENERIC_CLOCKEVENTS
+	select HAVE_CACHE_SPLIT
 	help
 	  Freescale Coldfire 5230/1/2/4/5 processor support
 
 config M5249
 	bool "MCF5249"
+	select COLDFIRE_SW_A7
 	help
 	  Motorola ColdFire 5249 processor support.
 
 config M5271
 	bool "MCF5271"
+	select HAVE_CACHE_SPLIT
 	help
 	  Freescale (Motorola) ColdFire 5270/5271 processor support.
 
 config M5272
 	bool "MCF5272"
+	select COLDFIRE_SW_A7
 	help
 	  Motorola ColdFire 5272 processor support.
 
 config M5275
 	bool "MCF5275"
+	select HAVE_CACHE_SPLIT
 	help
 	  Freescale (Motorola) ColdFire 5274/5275 processor support.
 
 config M528x
 	bool "MCF528x"
 	select GENERIC_CLOCKEVENTS
+	select HAVE_CACHE_SPLIT
 	help
 	  Motorola ColdFire 5280/5282 processor support.
 
 config M5307
 	bool "MCF5307"
+	select COLDFIRE_SW_A7
+	select HAVE_CACHE_CB
 	help
 	  Motorola ColdFire 5307 processor support.
 
 config M532x
 	bool "MCF532x"
+	select HAVE_CACHE_CB
 	help
 	  Freescale (Motorola) ColdFire 532x processor support.
 
 config M5407
 	bool "MCF5407"
+	select COLDFIRE_SW_A7
+	select HAVE_CACHE_CB
 	help
 	  Motorola ColdFire 5407 processor support.
 
+config M547x
+	bool "MCF547x"
+	select HAVE_CACHE_CB
+	help
+	  Freescale ColdFire 5470/5471/5472/5473/5474/5475 processor support.
+
 config M548x
 	bool "MCF548x"
+	select HAVE_CACHE_CB
 	help
 	  Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
 
@@ -181,9 +205,14 @@
 	select GENERIC_CLOCKEVENTS
 	default y
 
+config M54xx
+	bool
+	depends on (M548x || M547x)
+	default y
+
 config COLDFIRE
 	bool
-	depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M548x)
+	depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M54xx)
 	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	default y
@@ -230,6 +259,46 @@
 	  Build support for the older revision ColdFire 5307 silicon.
 	  Specifically this is the 1H55J mask revision.
 
+if HAVE_CACHE_SPLIT
+choice
+	prompt "Split Cache Configuration"
+	default CACHE_I
+
+config CACHE_I
+	bool "Instruction"
+	help
+	  Use all of the ColdFire CPU cache memory as an instruction cache.
+
+config CACHE_D
+	bool "Data"
+	help
+	  Use all of the ColdFire CPU cache memory as a data cache.
+
+config CACHE_BOTH
+	bool "Both"
+	help
+	  Split the ColdFire CPU cache, and use half as an instruction cache
+	  and half as a data cache.
+endchoice
+endif
+
+if HAVE_CACHE_CB
+choice
+	prompt "Data cache mode"
+	default CACHE_WRITETHRU
+
+config CACHE_WRITETHRU
+	bool "Write-through"
+	help
+	  The ColdFire CPU cache is set into Write-through mode.
+
+config CACHE_COPYBACK
+	bool "Copy-back"
+	help
+	  The ColdFire CPU cache is set into Copy-back mode.
+endchoice
+endif
+
 comment "Platform"
 
 config PILOT3
@@ -245,16 +314,16 @@
 	  Support the bugs of Xcopilot.
 
 config UC5272
-        bool 'Arcturus Networks uC5272 dimm board support'
-        depends on M5272
-        help
-          Support for the Arcturus Networks uC5272 dimm board.
+	bool 'Arcturus Networks uC5272 dimm board support'
+	depends on M5272
+	help
+	  Support for the Arcturus Networks uC5272 dimm board.
 
 config UC5282
-       bool "Arcturus Networks uC5282 board support"
-          depends on M528x
-       help
-          Support for the Arcturus Networks uC5282 dimm board.
+	bool "Arcturus Networks uC5282 board support"
+	depends on M528x
+	help
+	  Support for the Arcturus Networks uC5282 dimm board.
 
 config UCSIMM
 	bool "uCsimm module support"
@@ -279,7 +348,7 @@
 	depends on (UCSIMM || UCDIMM || DRAGEN2)
 	help
 	  Disable the CPU internal registers protection in user mode,
-          to allow a user application to read/write them.
+	  to allow a user application to read/write them.
 
 config INIT_LCD
 	bool "Initialize LCD"
@@ -517,7 +586,7 @@
 	depends on (SOM5282EM)
 
 config SNEHA
-        bool
+	bool
 	default y
 	depends on CPU16B
 
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index 026ef16..589613f 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -25,7 +25,7 @@
 platform-$(CONFIG_M5307)	:= 5307
 platform-$(CONFIG_M532x)	:= 532x
 platform-$(CONFIG_M5407)	:= 5407
-platform-$(CONFIG_M548x)	:= 548x
+platform-$(CONFIG_M54xx)	:= 54xx
 PLATFORM := $(platform-y)
 
 board-$(CONFIG_PILOT)		:= pilot
@@ -74,7 +74,7 @@
 cpuclass-$(CONFIG_M5307)	:= coldfire
 cpuclass-$(CONFIG_M532x)	:= coldfire
 cpuclass-$(CONFIG_M5407)	:= coldfire
-cpuclass-$(CONFIG_M548x)	:= coldfire
+cpuclass-$(CONFIG_M54xx)	:= coldfire
 cpuclass-$(CONFIG_M68328)	:= 68328
 cpuclass-$(CONFIG_M68EZ328)	:= 68328
 cpuclass-$(CONFIG_M68VZ328)	:= 68328
@@ -91,18 +91,18 @@
 # Some CFLAG additions based on specific CPU type.
 #
 cflags-$(CONFIG_M5206)		:= $(call cc-option,-mcpu=5206,-m5200)
-cflags-$(CONFIG_M5206e)		:= $(call cc-option,-m5206e,-m5200)
+cflags-$(CONFIG_M5206e)		:= $(call cc-option,-mcpu=5206e,-m5200)
 cflags-$(CONFIG_M520x)		:= $(call cc-option,-mcpu=5208,-m5200)
 cflags-$(CONFIG_M523x)		:= $(call cc-option,-mcpu=523x,-m5307)
 cflags-$(CONFIG_M5249)		:= $(call cc-option,-mcpu=5249,-m5200)
 cflags-$(CONFIG_M5271)		:= $(call cc-option,-mcpu=5271,-m5307)
 cflags-$(CONFIG_M5272)		:= $(call cc-option,-mcpu=5272,-m5307)
 cflags-$(CONFIG_M5275)		:= $(call cc-option,-mcpu=5275,-m5307)
-cflags-$(CONFIG_M528x)		:= $(call cc-option,-m528x,-m5307)
-cflags-$(CONFIG_M5307)		:= $(call cc-option,-m5307,-m5200)
+cflags-$(CONFIG_M528x)		:= $(call cc-option,-mcpu=528x,-m5307)
+cflags-$(CONFIG_M5307)		:= $(call cc-option,-mcpu=5307,-m5200)
 cflags-$(CONFIG_M532x)		:= $(call cc-option,-mcpu=532x,-m5307)
-cflags-$(CONFIG_M5407)		:= $(call cc-option,-m5407,-m5200)
-cflags-$(CONFIG_M548x)		:= $(call cc-option,-m5407,-m5200)
+cflags-$(CONFIG_M5407)		:= $(call cc-option,-mcpu=5407,-m5200)
+cflags-$(CONFIG_M54xx)		:= $(call cc-option,-mcpu=5475,-m5200)
 cflags-$(CONFIG_M68328)		:= -m68000
 cflags-$(CONFIG_M68EZ328)	:= -m68000
 cflags-$(CONFIG_M68VZ328)	:= -m68000
diff --git a/arch/m68knommu/configs/m5208evb_defconfig b/arch/m68knommu/configs/m5208evb_defconfig
index 6ac2981..2f5655c 100644
--- a/arch/m68knommu/configs/m5208evb_defconfig
+++ b/arch/m68knommu/configs/m5208evb_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5249evb_defconfig b/arch/m68knommu/configs/m5249evb_defconfig
index 14934ff..16df72b 100644
--- a/arch/m68knommu/configs/m5249evb_defconfig
+++ b/arch/m68knommu/configs/m5249evb_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5272c3_defconfig b/arch/m68knommu/configs/m5272c3_defconfig
index 5985a3b..4e6ea50 100644
--- a/arch/m68knommu/configs/m5272c3_defconfig
+++ b/arch/m68knommu/configs/m5272c3_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5275evb_defconfig b/arch/m68knommu/configs/m5275evb_defconfig
index 5a7857e..f3dd741 100644
--- a/arch/m68knommu/configs/m5275evb_defconfig
+++ b/arch/m68knommu/configs/m5275evb_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5307c3_defconfig b/arch/m68knommu/configs/m5307c3_defconfig
index e810201..bce0a20 100644
--- a/arch/m68knommu/configs/m5307c3_defconfig
+++ b/arch/m68knommu/configs/m5307c3_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5407c3_defconfig b/arch/m68knommu/configs/m5407c3_defconfig
index 5c124a7..618cc32 100644
--- a/arch/m68knommu/configs/m5407c3_defconfig
+++ b/arch/m68knommu/configs/m5407c3_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 6ac2981..2f5655c 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index c684adf..16b2de7 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -55,55 +55,29 @@
 void (*mach_power_off)(void);
 
 #ifdef CONFIG_M68328
-	#define CPU "MC68328"
+#define CPU_NAME	"MC68328"
 #endif
 #ifdef CONFIG_M68EZ328
-	#define CPU "MC68EZ328"
+#define CPU_NAME	"MC68EZ328"
 #endif
 #ifdef CONFIG_M68VZ328
-	#define CPU "MC68VZ328"
+#define CPU_NAME	"MC68VZ328"
 #endif
 #ifdef CONFIG_M68360
-	#define CPU "MC68360"
+#define CPU_NAME	"MC68360"
 #endif
-#if defined(CONFIG_M5206)
-	#define	CPU "COLDFIRE(m5206)"
+#ifndef CPU_NAME
+#define	CPU_NAME	"UNKNOWN"
 #endif
-#if defined(CONFIG_M5206e)
-	#define	CPU "COLDFIRE(m5206e)"
-#endif
-#if defined(CONFIG_M520x)
-	#define CPU "COLDFIRE(m520x)"
-#endif
-#if defined(CONFIG_M523x)
-	#define CPU "COLDFIRE(m523x)"
-#endif
-#if defined(CONFIG_M5249)
-	#define CPU "COLDFIRE(m5249)"
-#endif
-#if defined(CONFIG_M5271)
-	#define CPU "COLDFIRE(m5270/5271)"
-#endif
-#if defined(CONFIG_M5272)
-	#define CPU "COLDFIRE(m5272)"
-#endif
-#if defined(CONFIG_M5275)
-	#define CPU "COLDFIRE(m5274/5275)"
-#endif
-#if defined(CONFIG_M528x)
-	#define CPU "COLDFIRE(m5280/5282)"
-#endif
-#if defined(CONFIG_M5307)
-	#define	CPU "COLDFIRE(m5307)"
-#endif
-#if defined(CONFIG_M532x)
-	#define	CPU "COLDFIRE(m532x)"
-#endif
-#if defined(CONFIG_M5407)
-	#define	CPU "COLDFIRE(m5407)"
-#endif
-#ifndef CPU
-	#define	CPU "UNKNOWN"
+
+/*
+ * Different cores have different instruction execution timings.
+ * The old/traditional 68000 cores are basically all the same, at 16.
+ * The ColdFire cores vary a little, their values are defined in their
+ * headers. We default to the standard 68000 value here.
+ */
+#ifndef CPU_INSTR_PER_JIFFY
+#define	CPU_INSTR_PER_JIFFY	16
 #endif
 
 extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
@@ -208,7 +182,7 @@
 	command_line[sizeof(command_line) - 1] = 0;
 #endif /* CONFIG_UBOOT */
 
-	printk(KERN_INFO "\x0F\r\n\nuClinux/" CPU "\n");
+	printk(KERN_INFO "\x0F\r\n\nuClinux/" CPU_NAME "\n");
 
 #ifdef CONFIG_UCDIMM
 	printk(KERN_INFO "uCdimm by Lineo, Inc. <www.lineo.com>\n");
@@ -257,11 +231,6 @@
 	memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 	boot_command_line[COMMAND_LINE_SIZE-1] = 0;
 
-#ifdef DEBUG
-	if (strlen(*cmdline_p))
-		printk(KERN_DEBUG "Command line: '%s'\n", *cmdline_p);
-#endif
-
 #if defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_DUMMY_CONSOLE)
 	conswitchp = &dummy_con;
 #endif
@@ -303,15 +272,10 @@
 	char *cpu, *mmu, *fpu;
 	u_long clockfreq;
 
-	cpu = CPU;
+	cpu = CPU_NAME;
 	mmu = "none";
 	fpu = "none";
-
-#ifdef CONFIG_COLDFIRE
-	clockfreq = (loops_per_jiffy * HZ) * 3;
-#else
-	clockfreq = (loops_per_jiffy * HZ) * 16;
-#endif
+	clockfreq = (loops_per_jiffy * HZ) * CPU_INSTR_PER_JIFFY;
 
 	seq_printf(m, "CPU:\t\t%s\n"
 		      "MMU:\t\t%s\n"
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index ef33213..47e15eb 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -141,6 +141,12 @@
 		*(__param)
 		__stop___param = .;
 
+		/* Built-in module versions */
+		. = ALIGN(4) ;
+		__start___modver = .;
+		*(__modver)
+		__stop___modver = .;
+
 		. = ALIGN(4) ;
 		_etext = . ;
 	} > TEXT
diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile
index d94d709..32d852e 100644
--- a/arch/m68knommu/lib/Makefile
+++ b/arch/m68knommu/lib/Makefile
@@ -4,4 +4,4 @@
 
 lib-y	:= ashldi3.o ashrdi3.o lshrdi3.o \
 	   muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
-	   checksum.o memcpy.o memset.o delay.o
+	   checksum.o memcpy.o memmove.o memset.o delay.o
diff --git a/arch/m68knommu/lib/memmove.c b/arch/m68knommu/lib/memmove.c
new file mode 100644
index 0000000..b3dcfe9
--- /dev/null
+++ b/arch/m68knommu/lib/memmove.c
@@ -0,0 +1,105 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#define __IN_STRING_C
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+void *memmove(void *dest, const void *src, size_t n)
+{
+	void *xdest = dest;
+	size_t temp;
+
+	if (!n)
+		return xdest;
+
+	if (dest < src) {
+		if ((long)dest & 1) {
+			char *cdest = dest;
+			const char *csrc = src;
+			*cdest++ = *csrc++;
+			dest = cdest;
+			src = csrc;
+			n--;
+		}
+		if (n > 2 && (long)dest & 2) {
+			short *sdest = dest;
+			const short *ssrc = src;
+			*sdest++ = *ssrc++;
+			dest = sdest;
+			src = ssrc;
+			n -= 2;
+		}
+		temp = n >> 2;
+		if (temp) {
+			long *ldest = dest;
+			const long *lsrc = src;
+			temp--;
+			do
+				*ldest++ = *lsrc++;
+			while (temp--);
+			dest = ldest;
+			src = lsrc;
+		}
+		if (n & 2) {
+			short *sdest = dest;
+			const short *ssrc = src;
+			*sdest++ = *ssrc++;
+			dest = sdest;
+			src = ssrc;
+		}
+		if (n & 1) {
+			char *cdest = dest;
+			const char *csrc = src;
+			*cdest = *csrc;
+		}
+	} else {
+		dest = (char *)dest + n;
+		src = (const char *)src + n;
+		if ((long)dest & 1) {
+			char *cdest = dest;
+			const char *csrc = src;
+			*--cdest = *--csrc;
+			dest = cdest;
+			src = csrc;
+			n--;
+		}
+		if (n > 2 && (long)dest & 2) {
+			short *sdest = dest;
+			const short *ssrc = src;
+			*--sdest = *--ssrc;
+			dest = sdest;
+			src = ssrc;
+			n -= 2;
+		}
+		temp = n >> 2;
+		if (temp) {
+			long *ldest = dest;
+			const long *lsrc = src;
+			temp--;
+			do
+				*--ldest = *--lsrc;
+			while (temp--);
+			dest = ldest;
+			src = lsrc;
+		}
+		if (n & 2) {
+			short *sdest = dest;
+			const short *ssrc = src;
+			*--sdest = *--ssrc;
+			dest = sdest;
+			src = ssrc;
+		}
+		if (n & 1) {
+			char *cdest = dest;
+			const char *csrc = src;
+			*--cdest = *--csrc;
+		}
+	}
+	return xdest;
+}
+EXPORT_SYMBOL(memmove);
diff --git a/arch/m68knommu/mm/Makefile b/arch/m68knommu/mm/Makefile
index fc91f25..b54ab6b 100644
--- a/arch/m68knommu/mm/Makefile
+++ b/arch/m68knommu/mm/Makefile
@@ -2,4 +2,4 @@
 # Makefile for the linux m68knommu specific parts of the memory manager.
 #
 
-obj-y += init.o fault.o memory.o kmap.o
+obj-y += init.o kmap.o
diff --git a/arch/m68knommu/mm/fault.c b/arch/m68knommu/mm/fault.c
deleted file mode 100644
index bc05cf7..0000000
--- a/arch/m68knommu/mm/fault.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- *  linux/arch/m68knommu/mm/fault.c
- *
- *  Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
- *  Copyright (C) 2000  Lineo, Inc.  (www.lineo.com)
- *
- *  Based on:
- *
- *  linux/arch/m68k/mm/fault.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- */
-
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-
-#include <asm/system.h>
-#include <asm/pgtable.h>
-
-extern void die_if_kernel(char *, struct pt_regs *, long);
-
-/*
- * This routine handles page faults.  It determines the problem, and
- * then passes it off to one of the appropriate routines.
- *
- * error_code:
- *	bit 0 == 0 means no page found, 1 means protection fault
- *	bit 1 == 0 means read, 1 means write
- *
- * If this routine detects a bad access, it returns 1, otherwise it
- * returns 0.
- */
-asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
-			      unsigned long error_code)
-{
-#ifdef DEBUG
-	printk(KERN_DEBUG "regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n",
-		regs->sr, regs->pc, address, error_code);
-#endif
-
-	/*
-	 * Oops. The kernel tried to access some bad page. We'll have to
-	 * terminate things with extreme prejudice.
-	 */
-	if ((unsigned long) address < PAGE_SIZE)
-		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-	else
-		printk(KERN_ALERT "Unable to handle kernel access");
-	printk(KERN_ALERT " at virtual address %08lx\n", address);
-	die_if_kernel("Oops", regs, error_code);
-	do_exit(SIGKILL);
-
-	return 1;
-}
-
diff --git a/arch/m68knommu/mm/kmap.c b/arch/m68knommu/mm/kmap.c
index 902c1df..ece8d5a 100644
--- a/arch/m68knommu/mm/kmap.c
+++ b/arch/m68knommu/mm/kmap.c
@@ -36,15 +36,6 @@
 }
 
 /*
- * __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
- */
-void __iounmap(void *addr, unsigned long size)
-{
-}
-
-/*
  * Set new cache mode for some kernel address space.
  * The caller must push data for that range itself, if such data may already
  * be in the cache.
diff --git a/arch/m68knommu/mm/memory.c b/arch/m68knommu/mm/memory.c
deleted file mode 100644
index 8f7949e..0000000
--- a/arch/m68knommu/mm/memory.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *  linux/arch/m68knommu/mm/memory.c
- *
- *  Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
- *  Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
- *
- *  Based on:
- *
- *  linux/arch/m68k/mm/memory.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- */
-
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-
-/*
- * Map some physical address range into the kernel address space.
- */
-
-unsigned long kernel_map(unsigned long paddr, unsigned long size,
-			 int nocacheflag, unsigned long *memavailp )
-{
-	return paddr;
-}
-
diff --git a/arch/m68knommu/platform/5249/intc2.c b/arch/m68knommu/platform/5249/intc2.c
index d09d9da..c5151f8 100644
--- a/arch/m68knommu/platform/5249/intc2.c
+++ b/arch/m68knommu/platform/5249/intc2.c
@@ -50,8 +50,10 @@
 	int irq;
 
 	/* GPIO interrupt sources */
-	for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++)
+	for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
 		irq_desc[irq].chip = &intc2_irq_gpio_chip;
+		set_irq_handler(irq, handle_edge_irq);
+	}
 
 	return 0;
 }
diff --git a/arch/m68knommu/platform/548x/Makefile b/arch/m68knommu/platform/54xx/Makefile
similarity index 100%
rename from arch/m68knommu/platform/548x/Makefile
rename to arch/m68knommu/platform/54xx/Makefile
diff --git a/arch/m68knommu/platform/548x/config.c b/arch/m68knommu/platform/54xx/config.c
similarity index 74%
rename from arch/m68knommu/platform/548x/config.c
rename to arch/m68knommu/platform/54xx/config.c
index 9888846..7813098 100644
--- a/arch/m68knommu/platform/548x/config.c
+++ b/arch/m68knommu/platform/54xx/config.c
@@ -1,7 +1,7 @@
 /***************************************************************************/
 
 /*
- *	linux/arch/m68knommu/platform/548x/config.c
+ *	linux/arch/m68knommu/platform/54xx/config.c
  *
  *	Copyright (C) 2010, Philippe De Muyter <phdm@macqel.be>
  */
@@ -15,13 +15,13 @@
 #include <linux/io.h>
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
-#include <asm/m548xsim.h>
+#include <asm/m54xxsim.h>
 #include <asm/mcfuart.h>
-#include <asm/m548xgpt.h>
+#include <asm/m54xxgpt.h>
 
 /***************************************************************************/
 
-static struct mcf_platform_uart m548x_uart_platform[] = {
+static struct mcf_platform_uart m54xx_uart_platform[] = {
 	{
 		.mapbase	= MCF_MBAR + MCFUART_BASE1,
 		.irq		= 64 + 35,
@@ -40,20 +40,20 @@
 	},
 };
 
-static struct platform_device m548x_uart = {
+static struct platform_device m54xx_uart = {
 	.name			= "mcfuart",
 	.id			= 0,
-	.dev.platform_data	= m548x_uart_platform,
+	.dev.platform_data	= m54xx_uart_platform,
 };
 
-static struct platform_device *m548x_devices[] __initdata = {
-	&m548x_uart,
+static struct platform_device *m54xx_devices[] __initdata = {
+	&m54xx_uart,
 };
 
 
 /***************************************************************************/
 
-static void __init m548x_uart_init_line(int line, int irq)
+static void __init m54xx_uart_init_line(int line, int irq)
 {
 	int rts_cts;
 
@@ -72,18 +72,18 @@
 						MCF_MBAR + MCF_PAR_PSC(line));
 }
 
-static void __init m548x_uarts_init(void)
+static void __init m54xx_uarts_init(void)
 {
-	const int nrlines = ARRAY_SIZE(m548x_uart_platform);
+	const int nrlines = ARRAY_SIZE(m54xx_uart_platform);
 	int line;
 
 	for (line = 0; (line < nrlines); line++)
-		m548x_uart_init_line(line, m548x_uart_platform[line].irq);
+		m54xx_uart_init_line(line, m54xx_uart_platform[line].irq);
 }
 
 /***************************************************************************/
 
-static void mcf548x_reset(void)
+static void mcf54xx_reset(void)
 {
 	/* disable interrupts and enable the watchdog */
 	asm("movew #0x2700, %sr\n");
@@ -97,8 +97,8 @@
 
 void __init config_BSP(char *commandp, int size)
 {
-	mach_reset = mcf548x_reset;
-	m548x_uarts_init();
+	mach_reset = mcf54xx_reset;
+	m54xx_uarts_init();
 }
 
 /***************************************************************************/
@@ -106,7 +106,7 @@
 static int __init init_BSP(void)
 {
 
-	platform_add_devices(m548x_devices, ARRAY_SIZE(m548x_devices));
+	platform_add_devices(m54xx_devices, ARRAY_SIZE(m54xx_devices));
 	return 0;
 }
 
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index 240a7a6..676960c 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -108,7 +108,6 @@
 	movel	%d1,%a2
 1:
 	move	%a2@(TI_FLAGS),%d1	/* thread_info->flags */
-	andl	#_TIF_WORK_MASK,%d1
 	jne	Lwork_to_do
 	RESTORE_ALL
 
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index 8658528..2a3af19 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -179,8 +179,8 @@
 	IMR = ~0;
 
 	for (i = 0; (i < NR_IRQS); i++) {
-		set_irq_chip(irq, &intc_irq_chip);
-		set_irq_handler(irq, handle_level_irq);
+		set_irq_chip(i, &intc_irq_chip);
+		set_irq_handler(i, handle_level_irq);
 	}
 }
 
diff --git a/arch/m68knommu/platform/68360/commproc.c b/arch/m68knommu/platform/68360/commproc.c
index f27e688..8e4e10c 100644
--- a/arch/m68knommu/platform/68360/commproc.c
+++ b/arch/m68knommu/platform/68360/commproc.c
@@ -210,7 +210,7 @@
 cpm_install_handler(int vec, void (*handler)(), void *dev_id)
 {
 
-	request_irq(vec, handler, IRQ_FLG_LOCK, "timer", dev_id);
+	request_irq(vec, handler, 0, "timer", dev_id);
 
 /* 	if (cpm_vecs[vec].handler != 0) */
 /* 		printk(KERN_INFO "CPM interrupt %x replacing %x\n", */
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index ac629fa3..9dd5bca 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -75,7 +75,7 @@
   /* Set compare register  32Khz / 32 / 10 = 100 */
   TCMP = 10;                                                              
 
-  request_irq(IRQ_MACHSPEC | 1, timer_routine, IRQ_FLG_LOCK, "timer", NULL);
+  request_irq(IRQ_MACHSPEC | 1, timer_routine, 0, "timer", NULL);
 #endif
 
   /* General purpose quicc timers: MC68360UM p7-20 */
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index 8a28788..46c1b18 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -104,7 +104,6 @@
 	movel	%d1,%a2
 1:
 	move	%a2@(TI_FLAGS),%d1	/* thread_info->flags */
-	andl	#_TIF_WORK_MASK,%d1
 	jne	Lwork_to_do
 	RESTORE_ALL
 
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index ad96ab1..a29041c 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -132,8 +132,8 @@
 	pquicc->intr_cimr = 0x00000000;
 
 	for (i = 0; (i < NR_IRQS); i++) {
-		set_irq_chip(irq, &intc_irq_chip);
-		set_irq_handler(irq, handle_level_irq);
+		set_irq_chip(i, &intc_irq_chip);
+		set_irq_handler(i, handle_level_irq);
 	}
 }
 
diff --git a/arch/m68knommu/platform/coldfire/Makefile b/arch/m68knommu/platform/coldfire/Makefile
index 45f501f..a8967ba 100644
--- a/arch/m68knommu/platform/coldfire/Makefile
+++ b/arch/m68knommu/platform/coldfire/Makefile
@@ -14,7 +14,7 @@
 
 asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
 
-obj-$(CONFIG_COLDFIRE)	+= clk.o dma.o entry.o vectors.o
+obj-$(CONFIG_COLDFIRE)	+= cache.o clk.o dma.o entry.o vectors.o
 obj-$(CONFIG_M5206)	+= timers.o intc.o
 obj-$(CONFIG_M5206e)	+= timers.o intc.o
 obj-$(CONFIG_M520x)	+= pit.o intc-simr.o
@@ -26,7 +26,7 @@
 obj-$(CONFIG_M5307)	+= timers.o intc.o
 obj-$(CONFIG_M532x)	+= timers.o intc-simr.o
 obj-$(CONFIG_M5407)	+= timers.o intc.o
-obj-$(CONFIG_M548x)	+= sltimers.o intc-2.o
+obj-$(CONFIG_M54xx)	+= sltimers.o intc-2.o
 
 obj-y			+= pinmux.o gpio.o
 extra-y := head.o
diff --git a/arch/m68knommu/platform/coldfire/cache.c b/arch/m68knommu/platform/coldfire/cache.c
new file mode 100644
index 0000000..235d3c4
--- /dev/null
+++ b/arch/m68knommu/platform/coldfire/cache.c
@@ -0,0 +1,48 @@
+/***************************************************************************/
+
+/*
+ *	cache.c -- general ColdFire Cache maintainence code
+ *
+ *	Copyright (C) 2010, Greg Ungerer (gerg@snapgear.com)
+ */
+
+/***************************************************************************/
+
+#include <linux/kernel.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+
+/***************************************************************************/
+#ifdef CACHE_PUSH
+/***************************************************************************/
+
+/*
+ *	Use cpushl to push all dirty cache lines back to memory.
+ *	Older versions of GAS don't seem to know how to generate the
+ *	ColdFire cpushl instruction... Oh well, bit stuff it for now.
+ */
+
+void mcf_cache_push(void)
+{
+	__asm__ __volatile__ (
+		"clrl	%%d0\n\t"
+		"1:\n\t"
+		"movel	%%d0,%%a0\n\t"
+		"2:\n\t"
+		".word	0xf468\n\t"
+		"addl	%0,%%a0\n\t"
+		"cmpl	%1,%%a0\n\t"
+		"blt	2b\n\t"
+		"addql	#1,%%d0\n\t"
+		"cmpil	%2,%%d0\n\t"
+		"bne	1b\n\t"
+		: /* No output */
+		: "i" (CACHE_LINE_SIZE),
+		  "i" (DCACHE_SIZE / CACHE_WAYS),
+		  "i" (CACHE_WAYS)
+		: "d0", "a0" );
+}
+
+/***************************************************************************/
+#endif /* CACHE_PUSH */
+/***************************************************************************/
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index e1debc8..5837cf0 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -36,13 +36,16 @@
 #include <asm/asm-offsets.h>
 #include <asm/entry.h>
 
+#ifdef CONFIG_COLDFIRE_SW_A7
+/*
+ *	Define software copies of the supervisor and user stack pointers.
+ */
 .bss
-
 sw_ksp:
 .long	0
-
 sw_usp:
 .long	0
+#endif /* CONFIG_COLDFIRE_SW_A7 */
 
 .text
 
@@ -51,7 +54,6 @@
 .globl ret_from_exception
 .globl ret_from_signal
 .globl sys_call_table
-.globl ret_from_interrupt
 .globl inthandler
 .globl fasthandler
 
@@ -136,24 +138,10 @@
 	andl	#-THREAD_SIZE,%d1	/* at base of kernel stack */
 	movel	%d1,%a0
 	movel	%a0@(TI_FLAGS),%d1	/* get thread_info->flags */
-	andl	#0xefff,%d1
 	jne	Lwork_to_do		/* still work to do */
 
 Lreturn:
-	move	#0x2700,%sr		/* disable intrs */
-	movel	sw_usp,%a0		/* get usp */
-	movel	%sp@(PT_OFF_PC),%a0@-	/* copy exception program counter */
-	movel	%sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */
-	moveml	%sp@,%d1-%d5/%a0-%a2
-	lea	%sp@(32),%sp		/* space for 8 regs */
-	movel	%sp@+,%d0
-	addql	#4,%sp			/* orig d0 */
-	addl	%sp@+,%sp		/* stk adj */
-	addql	#8,%sp			/* remove exception */
-	movel	%sp,sw_ksp		/* save ksp */
-	subql	#8,sw_usp		/* set exception */
-	movel	sw_usp,%sp		/* restore usp */
-	rte
+	RESTORE_USER
 
 Lwork_to_do:
 	movel	%a0@(TI_FLAGS),%d1	/* get thread_info->flags */
@@ -191,31 +179,7 @@
 	jbsr	do_IRQ			/* call high level irq handler */
 	lea	%sp@(8),%sp		/* pop args off stack */
 
-	bra	ret_from_interrupt	/* this was fallthrough */
-
-/*
- * This is the fast interrupt handler (for certain hardware interrupt
- * sources). Unlike the normal interrupt handler it just uses the
- * current stack (doesn't care if it is user or kernel). It also
- * doesn't bother doing the bottom half handlers.
- */
-ENTRY(fasthandler)
-	SAVE_LOCAL
-
-	movew	%sp@(PT_OFF_FORMATVEC),%d0
-	andl	#0x03fc,%d0		/* mask out vector only */
-
-	movel	%sp,%sp@-		/* push regs arg */
-	lsrl	#2,%d0			/* calculate real vector # */
-	movel	%d0,%sp@-		/* push vector number */
-	jbsr	do_IRQ			/* call high level irq handler */
-	lea	%sp@(8),%sp		/* pop args off stack */
-
-	RESTORE_LOCAL
-
-ENTRY(ret_from_interrupt)
-	/* the fasthandler is confusing me, haven't seen any user */
-	jmp	ret_from_exception
+	bra	ret_from_exception
 
 /*
  * Beware - when entering resume, prev (the current task) is
@@ -226,9 +190,8 @@
  */
 ENTRY(resume)
 	movel	%a0, %d1			/* get prev thread in d1 */
-
-	movel	sw_usp,%d0			/* save usp */
-	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
+	RDUSP
+	movel	%a2,%a0@(TASK_THREAD+THREAD_USP)
 
 	SAVE_SWITCH_STACK
 	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
@@ -236,5 +199,5 @@
 	RESTORE_SWITCH_STACK
 
 	movel	%a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
-	movel	%a0, sw_usp
+	WRUSP
 	rts
diff --git a/arch/m68knommu/platform/coldfire/head.S b/arch/m68knommu/platform/coldfire/head.S
index 0b2d7c7..d597790 100644
--- a/arch/m68knommu/platform/coldfire/head.S
+++ b/arch/m68knommu/platform/coldfire/head.S
@@ -3,7 +3,7 @@
 /*
  *	head.S -- common startup code for ColdFire CPUs.
  *
- *	(C) Copyright 1999-2006, Greg Ungerer <gerg@snapgear.com>.
+ *	(C) Copyright 1999-2010, Greg Ungerer <gerg@snapgear.com>.
  */
 
 /*****************************************************************************/
@@ -13,7 +13,6 @@
 #include <linux/init.h>
 #include <asm/asm-offsets.h>
 #include <asm/coldfire.h>
-#include <asm/mcfcache.h>
 #include <asm/mcfsim.h>
 #include <asm/thread_info.h>
 
@@ -173,10 +172,27 @@
 
 	/*
 	 *	Now that we know what the memory is, lets enable cache
-	 *	and get things moving. This is Coldfire CPU specific.
+	 *	and get things moving. This is Coldfire CPU specific. Not
+	 *	all version cores have identical cache register setup. But
+	 *	it is very similar. Define the exact settings in the headers
+	 *	then the code here is the same for all.
 	 */
-	CACHE_ENABLE				/* enable CPU cache */
-
+	movel	#CACHE_INIT,%d0			/* invalidate whole cache */
+	movec	%d0,%CACR
+	nop
+	movel	#ACR0_MODE,%d0			/* set RAM region for caching */
+	movec	%d0,%ACR0
+	movel	#ACR1_MODE,%d0			/* anything else to cache? */
+	movec	%d0,%ACR1
+#ifdef ACR2_MODE
+	movel	#ACR2_MODE,%d0
+	movec	%d0,%ACR2
+	movel	#ACR3_MODE,%d0
+	movec	%d0,%ACR3
+#endif
+	movel	#CACHE_MODE,%d0			/* enable cache */
+	movec	%d0,%CACR
+	nop
 
 #ifdef CONFIG_ROMFS_FS
 	/*
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 5f5018a..3168003 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -15,6 +15,8 @@
 	select TRACING_SUPPORT
 	select OF
 	select OF_EARLY_FLATTREE
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
 
 config SWAP
 	def_bool n
@@ -37,12 +39,6 @@
 config GENERIC_HWEIGHT
 	def_bool y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	def_bool y
-
 config GENERIC_CALIBRATE_DELAY
 	def_bool y
 
@@ -52,9 +48,6 @@
 config GENERIC_CLOCKEVENTS
 	def_bool y
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config GENERIC_GPIO
 	def_bool y
 
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index e66e25c..012e377 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -23,8 +23,4 @@
 	  This option turns on/off heart beat kernel functionality.
 	  First GPIO node is taken.
 
-config DEBUG_BOOTMEM
-	depends on DEBUG_KERNEL
-	bool "Debug BOOTMEM initialization"
-
 endmenu
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 15f1f1d..6f432e6 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -17,7 +17,7 @@
 # The various CONFIG_XILINX cpu features options are integers 0/1/2...
 # rather than bools y/n
 
-# Work out HW multipler support.  This is icky.
+# Work out HW multipler support. This is tricky.
 # 1. Spartan2 has no HW multiplers.
 # 2. MicroBlaze v3.x always uses them, except in Spartan 2
 # 3. All other FPGa/CPU ver combos, we can trust the CONFIG_ settings
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 8b422b1..b3f5eec 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -7,7 +7,7 @@
 CONFIG_INITRAMFS_SOURCE="rootfs.cpio"
 CONFIG_INITRAMFS_COMPRESSION_GZIP=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_HOTPLUG is not set
@@ -66,5 +66,4 @@
 CONFIG_DEBUG_INFO=y
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_EARLY_PRINTK=y
-CONFIG_DEBUG_BOOTMEM=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index ebc143c..0249e4b 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_HOTPLUG is not set
diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h
index 5fd3190..c4532f0 100644
--- a/arch/microblaze/include/asm/irqflags.h
+++ b/arch/microblaze/include/asm/irqflags.h
@@ -12,7 +12,7 @@
 #include <linux/types.h>
 #include <asm/registers.h>
 
-#ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
 
 static inline unsigned long arch_local_irq_save(void)
 {
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index cae268c2..885574a 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -411,20 +411,19 @@
 static inline unsigned long pte_update(pte_t *p, unsigned long clr,
 				unsigned long set)
 {
-	unsigned long old, tmp, msr;
+	unsigned long flags, old, tmp;
 
-	__asm__ __volatile__("\
-	msrclr	%2, 0x2\n\
-	nop\n\
-	lw	%0, %4, r0\n\
-	andn	%1, %0, %5\n\
-	or	%1, %1, %6\n\
-	sw	%1, %4, r0\n\
-	mts     rmsr, %2\n\
-	nop"
-	: "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
-	: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
-	: "cc");
+	raw_local_irq_save(flags);
+
+	__asm__ __volatile__(	"lw	%0, %2, r0	\n"
+				"andn	%1, %0, %3	\n"
+				"or	%1, %1, %4	\n"
+				"sw	%1, %2, r0	\n"
+			: "=&r" (old), "=&r" (tmp)
+			: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
+			: "cc");
+
+	raw_local_irq_restore(flags);
 
 	return old;
 }
@@ -444,8 +443,9 @@
 	*ptep = pte;
 }
 
-static inline int ptep_test_and_clear_young(struct mm_struct *mm,
-		unsigned long addr, pte_t *ptep)
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+		unsigned long address, pte_t *ptep)
 {
 	return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
 }
@@ -457,6 +457,7 @@
 		(_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
 }
 
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 		unsigned long addr, pte_t *ptep)
 {
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index 37db96a..a10bec6 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -1,9 +1,9 @@
 /*
  * Support for the MicroBlaze PVR (Processor Version Register)
  *
- * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 - 2011 Michal Simek <monstr@monstr.eu>
  * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
- * Copyright (C) 2007 - 2009 PetaLogix
+ * Copyright (C) 2007 - 2011 PetaLogix
  *
  * This file is subject to the terms and conditions of the GNU General
  * Public License. See the file COPYING in the main directory of this
@@ -46,11 +46,11 @@
 #define PVR2_I_LMB_MASK			0x10000000
 #define PVR2_INTERRUPT_IS_EDGE_MASK	0x08000000
 #define PVR2_EDGE_IS_POSITIVE_MASK	0x04000000
-#define PVR2_D_PLB_MASK			0x02000000	/* new */
-#define PVR2_I_PLB_MASK			0x01000000	/* new */
-#define PVR2_INTERCONNECT		0x00800000	/* new */
-#define PVR2_USE_EXTEND_FSL		0x00080000	/* new */
-#define PVR2_USE_FSL_EXC		0x00040000	/* new */
+#define PVR2_D_PLB_MASK			0x02000000 /* new */
+#define PVR2_I_PLB_MASK			0x01000000 /* new */
+#define PVR2_INTERCONNECT		0x00800000 /* new */
+#define PVR2_USE_EXTEND_FSL		0x00080000 /* new */
+#define PVR2_USE_FSL_EXC		0x00040000 /* new */
 #define PVR2_USE_MSR_INSTR		0x00020000
 #define PVR2_USE_PCMP_INSTR		0x00010000
 #define PVR2_AREA_OPTIMISED		0x00008000
@@ -59,7 +59,7 @@
 #define PVR2_USE_HW_MUL_MASK		0x00001000
 #define PVR2_USE_FPU_MASK		0x00000800
 #define PVR2_USE_MUL64_MASK		0x00000400
-#define PVR2_USE_FPU2_MASK		0x00000200	/* new */
+#define PVR2_USE_FPU2_MASK		0x00000200 /* new */
 #define PVR2_USE_IPLBEXC 		0x00000100
 #define PVR2_USE_DPLBEXC		0x00000080
 #define PVR2_OPCODE_0x0_ILL_MASK	0x00000040
@@ -122,96 +122,103 @@
 
 
 /* PVR access macros */
-#define PVR_IS_FULL(pvr)		(pvr.pvr[0] & PVR0_PVR_FULL_MASK)
-#define PVR_USE_BARREL(pvr)		(pvr.pvr[0] & PVR0_USE_BARREL_MASK)
-#define PVR_USE_DIV(pvr)		(pvr.pvr[0] & PVR0_USE_DIV_MASK)
-#define PVR_USE_HW_MUL(pvr)		(pvr.pvr[0] & PVR0_USE_HW_MUL_MASK)
-#define PVR_USE_FPU(pvr)		(pvr.pvr[0] & PVR0_USE_FPU_MASK)
-#define PVR_USE_FPU2(pvr)		(pvr.pvr[2] & PVR2_USE_FPU2_MASK)
-#define PVR_USE_ICACHE(pvr)		(pvr.pvr[0] & PVR0_USE_ICACHE_MASK)
-#define PVR_USE_DCACHE(pvr)		(pvr.pvr[0] & PVR0_USE_DCACHE_MASK)
-#define PVR_VERSION(pvr)	((pvr.pvr[0] & PVR0_VERSION_MASK) >> 8)
-#define PVR_USER1(pvr)			(pvr.pvr[0] & PVR0_USER1_MASK)
-#define PVR_USER2(pvr)			(pvr.pvr[1] & PVR1_USER2_MASK)
+#define PVR_IS_FULL(_pvr)	(_pvr.pvr[0] & PVR0_PVR_FULL_MASK)
+#define PVR_USE_BARREL(_pvr)	(_pvr.pvr[0] & PVR0_USE_BARREL_MASK)
+#define PVR_USE_DIV(_pvr)	(_pvr.pvr[0] & PVR0_USE_DIV_MASK)
+#define PVR_USE_HW_MUL(_pvr)	(_pvr.pvr[0] & PVR0_USE_HW_MUL_MASK)
+#define PVR_USE_FPU(_pvr)	(_pvr.pvr[0] & PVR0_USE_FPU_MASK)
+#define PVR_USE_FPU2(_pvr)	(_pvr.pvr[2] & PVR2_USE_FPU2_MASK)
+#define PVR_USE_ICACHE(_pvr)	(_pvr.pvr[0] & PVR0_USE_ICACHE_MASK)
+#define PVR_USE_DCACHE(_pvr)	(_pvr.pvr[0] & PVR0_USE_DCACHE_MASK)
+#define PVR_VERSION(_pvr)	((_pvr.pvr[0] & PVR0_VERSION_MASK) >> 8)
+#define PVR_USER1(_pvr)		(_pvr.pvr[0] & PVR0_USER1_MASK)
+#define PVR_USER2(_pvr)		(_pvr.pvr[1] & PVR1_USER2_MASK)
 
-#define PVR_D_OPB(pvr)			(pvr.pvr[2] & PVR2_D_OPB_MASK)
-#define PVR_D_LMB(pvr)			(pvr.pvr[2] & PVR2_D_LMB_MASK)
-#define PVR_I_OPB(pvr)			(pvr.pvr[2] & PVR2_I_OPB_MASK)
-#define PVR_I_LMB(pvr)			(pvr.pvr[2] & PVR2_I_LMB_MASK)
-#define PVR_INTERRUPT_IS_EDGE(pvr) \
-			(pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
-#define PVR_EDGE_IS_POSITIVE(pvr) \
-			(pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
-#define PVR_USE_MSR_INSTR(pvr)		(pvr.pvr[2] & PVR2_USE_MSR_INSTR)
-#define PVR_USE_PCMP_INSTR(pvr)		(pvr.pvr[2] & PVR2_USE_PCMP_INSTR)
-#define PVR_AREA_OPTIMISED(pvr)		(pvr.pvr[2] & PVR2_AREA_OPTIMISED)
-#define PVR_USE_MUL64(pvr)		(pvr.pvr[2] & PVR2_USE_MUL64_MASK)
-#define PVR_OPCODE_0x0_ILLEGAL(pvr) \
-			(pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK)
-#define PVR_UNALIGNED_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK)
-#define PVR_ILL_OPCODE_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK)
-#define PVR_IOPB_BUS_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK)
-#define PVR_DOPB_BUS_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK)
-#define PVR_DIV_ZERO_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK)
-#define PVR_FPU_EXCEPTION(pvr)		(pvr.pvr[2] & PVR2_FPU_EXC_MASK)
-#define PVR_FSL_EXCEPTION(pvr)		(pvr.pvr[2] & PVR2_USE_EXTEND_FSL)
+#define PVR_D_OPB(_pvr)		(_pvr.pvr[2] & PVR2_D_OPB_MASK)
+#define PVR_D_LMB(_pvr)		(_pvr.pvr[2] & PVR2_D_LMB_MASK)
+#define PVR_I_OPB(_pvr)		(_pvr.pvr[2] & PVR2_I_OPB_MASK)
+#define PVR_I_LMB(_pvr)		(_pvr.pvr[2] & PVR2_I_LMB_MASK)
+#define PVR_INTERRUPT_IS_EDGE(_pvr) \
+			(_pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
+#define PVR_EDGE_IS_POSITIVE(_pvr) \
+			(_pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
+#define PVR_USE_MSR_INSTR(_pvr)		(_pvr.pvr[2] & PVR2_USE_MSR_INSTR)
+#define PVR_USE_PCMP_INSTR(_pvr)	(_pvr.pvr[2] & PVR2_USE_PCMP_INSTR)
+#define PVR_AREA_OPTIMISED(_pvr)	(_pvr.pvr[2] & PVR2_AREA_OPTIMISED)
+#define PVR_USE_MUL64(_pvr)		(_pvr.pvr[2] & PVR2_USE_MUL64_MASK)
+#define PVR_OPCODE_0x0_ILLEGAL(_pvr) \
+			(_pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK)
+#define PVR_UNALIGNED_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK)
+#define PVR_ILL_OPCODE_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK)
+#define PVR_IOPB_BUS_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK)
+#define PVR_DOPB_BUS_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK)
+#define PVR_DIV_ZERO_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK)
+#define PVR_FPU_EXCEPTION(_pvr)		(_pvr.pvr[2] & PVR2_FPU_EXC_MASK)
+#define PVR_FSL_EXCEPTION(_pvr)		(_pvr.pvr[2] & PVR2_USE_EXTEND_FSL)
 
-#define PVR_DEBUG_ENABLED(pvr)		(pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
-#define PVR_NUMBER_OF_PC_BRK(pvr) \
-			((pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
-#define PVR_NUMBER_OF_RD_ADDR_BRK(pvr) \
-			((pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
-#define PVR_NUMBER_OF_WR_ADDR_BRK(pvr) \
-			((pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
-#define PVR_FSL_LINKS(pvr)	((pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
+#define PVR_DEBUG_ENABLED(_pvr)		(_pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
+#define PVR_NUMBER_OF_PC_BRK(_pvr) \
+			((_pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
+#define PVR_NUMBER_OF_RD_ADDR_BRK(_pvr) \
+			((_pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
+#define PVR_NUMBER_OF_WR_ADDR_BRK(_pvr) \
+			((_pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
+#define PVR_FSL_LINKS(_pvr)	((_pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
 
-#define PVR_ICACHE_ADDR_TAG_BITS(pvr) \
-			((pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
-#define PVR_ICACHE_USE_FSL(pvr)		(pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
-#define PVR_ICACHE_ALLOW_WR(pvr)	(pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
-#define PVR_ICACHE_LINE_LEN(pvr) \
-			(1 << ((pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
-#define PVR_ICACHE_BYTE_SIZE(pvr) \
-			(1 << ((pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
+#define PVR_ICACHE_ADDR_TAG_BITS(_pvr) \
+		((_pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_ICACHE_USE_FSL(_pvr) \
+		(_pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
+#define PVR_ICACHE_ALLOW_WR(_pvr) \
+		(_pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
+#define PVR_ICACHE_LINE_LEN(_pvr) \
+		(1 << ((_pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
+#define PVR_ICACHE_BYTE_SIZE(_pvr) \
+		(1 << ((_pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
 
-#define PVR_DCACHE_ADDR_TAG_BITS(pvr) \
-			((pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
-#define PVR_DCACHE_USE_FSL(pvr)		(pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
-#define PVR_DCACHE_ALLOW_WR(pvr)	(pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
+#define PVR_DCACHE_ADDR_TAG_BITS(_pvr) \
+			((_pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_DCACHE_USE_FSL(_pvr)	(_pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
+#define PVR_DCACHE_ALLOW_WR(_pvr) \
+			(_pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
 /* FIXME two shifts on one line needs any comment */
-#define PVR_DCACHE_LINE_LEN(pvr) \
-			(1 << ((pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
-#define PVR_DCACHE_BYTE_SIZE(pvr) \
-			(1 << ((pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
+#define PVR_DCACHE_LINE_LEN(_pvr) \
+		(1 << ((_pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
+#define PVR_DCACHE_BYTE_SIZE(_pvr) \
+		(1 << ((_pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
 
-#define PVR_DCACHE_USE_WRITEBACK(pvr) \
-			((pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
+#define PVR_DCACHE_USE_WRITEBACK(_pvr) \
+			((_pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
 
-#define PVR_ICACHE_BASEADDR(pvr)	(pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
-#define PVR_ICACHE_HIGHADDR(pvr)	(pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
+#define PVR_ICACHE_BASEADDR(_pvr) \
+			(_pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
+#define PVR_ICACHE_HIGHADDR(_pvr) \
+			(_pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
+#define PVR_DCACHE_BASEADDR(_pvr) \
+			(_pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
+#define PVR_DCACHE_HIGHADDR(_pvr) \
+			(_pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
 
-#define PVR_DCACHE_BASEADDR(pvr)	(pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
-#define PVR_DCACHE_HIGHADDR(pvr)	(pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
+#define PVR_TARGET_FAMILY(_pvr) \
+			((_pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
 
-#define PVR_TARGET_FAMILY(pvr)	((pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
-
-#define PVR_MSR_RESET_VALUE(pvr) \
-				(pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
+#define PVR_MSR_RESET_VALUE(_pvr) \
+			(_pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
 
 /* mmu */
-#define PVR_USE_MMU(pvr)	((pvr.pvr[11] & PVR11_USE_MMU) >> 30)
-#define PVR_MMU_ITLB_SIZE(pvr)	(pvr.pvr[11] & PVR11_MMU_ITLB_SIZE)
-#define PVR_MMU_DTLB_SIZE(pvr)	(pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
-#define PVR_MMU_TLB_ACCESS(pvr)	(pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
-#define PVR_MMU_ZONES(pvr)	(pvr.pvr[11] & PVR11_MMU_ZONES)
+#define PVR_USE_MMU(_pvr)		((_pvr.pvr[11] & PVR11_USE_MMU) >> 30)
+#define PVR_MMU_ITLB_SIZE(_pvr)		(_pvr.pvr[11] & PVR11_MMU_ITLB_SIZE)
+#define PVR_MMU_DTLB_SIZE(_pvr)		(_pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
+#define PVR_MMU_TLB_ACCESS(_pvr)	(_pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
+#define PVR_MMU_ZONES(_pvr)		(_pvr.pvr[11] & PVR11_MMU_ZONES)
 
 /* endian */
-#define PVR_ENDIAN(pvr)	(pvr.pvr[0] & PVR0_ENDI)
+#define PVR_ENDIAN(_pvr)	(_pvr.pvr[0] & PVR0_ENDI)
 
 int cpu_has_pvr(void);
 void get_pvr(struct pvr_s *pvr);
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index e8abd4a..8aa9781 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -13,6 +13,7 @@
 
 #define tlb_flush(tlb)	flush_tlb_mm((tlb)->mm)
 
+#include <linux/pagemap.h>
 #include <asm-generic/tlb.h>
 
 #ifdef CONFIG_MMU
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 87c79fa..2c309fc 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -32,6 +32,7 @@
 	{"7.30.a", 0x10},
 	{"7.30.b", 0x11},
 	{"8.00.a", 0x12},
+	{"8.00.b", 0x13},
 	{NULL, 0},
 };
 
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c
index e01afa6..488c1ed 100644
--- a/arch/microblaze/kernel/cpu/pvr.c
+++ b/arch/microblaze/kernel/cpu/pvr.c
@@ -27,7 +27,7 @@
 	register unsigned tmp __asm__("r3");			\
 	tmp = 0x0;	/* Prevent warning about unused */	\
 	__asm__ __volatile__ (					\
-			"mfs	%0, rpvr" #pvrid ";"	\
+			"mfs	%0, rpvr" #pvrid ";"		\
 			: "=r" (tmp) : : "memory"); 		\
 	val = tmp;						\
 }
@@ -54,7 +54,7 @@
 	if (!(flags & PVR_MSR_BIT))
 		return 0;
 
-	get_single_pvr(0x00, pvr0);
+	get_single_pvr(0, pvr0);
 	pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0);
 
 	if (pvr0 & PVR0_PVR_FULL_MASK)
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 819238b..41c30cd 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -287,25 +287,44 @@
  * are masked. This is nice, means we don't have to CLI before state save
  */
 C_ENTRY(_user_exception):
-	addi	r14, r14, 4	/* return address is 4 byte after call */
 	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+	addi	r14, r14, 4	/* return address is 4 byte after call */
 
+	mfs	r1, rmsr
+	nop
+	andi	r1, r1, MSR_UMS
+	bnei	r1, 1f
+
+/* Kernel-mode state save - kernel execve */
+	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+	tophys(r1,r1);
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
+	SAVE_REGS
+
+	swi	r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
+	brid	2f;
+	nop;				/* Fill delay slot */
+
+/* User-mode state save.  */
+1:
 	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
 	tophys(r1,r1);
 	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
-	/* MS these three instructions can be added to one */
-	/* addik	r1, r1, THREAD_SIZE; */
-	/* tophys(r1,r1); */
-	/* addik	r1, r1, -STATE_SAVE_SIZE; */
-	addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
+/* calculate kernel stack pointer from task struct 8k */
+	addik	r1, r1, THREAD_SIZE;
+	tophys(r1,r1);
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
 	SAVE_REGS
 	swi	r0, r1, PTO + PT_R3
 	swi	r0, r1, PTO + PT_R4
 
+	swi	r0, r1, PTO + PT_MODE;			/* Was in user-mode. */
 	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
 	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
 	clear_ums;
-	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+2:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
 	/* Save away the syscall number.  */
 	swi	r12, r1, PTO+PT_R0;
 	tovirt(r1,r1)
@@ -375,6 +394,9 @@
 	swi	r3, r1, PTO + PT_R3
 	swi	r4, r1, PTO + PT_R4
 
+	lwi	r11, r1, PTO + PT_MODE;
+/* See if returning to kernel mode, if so, skip resched &c.  */
+	bnei	r11, 2f;
 	/* We're returning to user mode, so check for various conditions that
 	 * trigger rescheduling. */
 	/* FIXME: Restructure all these flag checks. */
@@ -417,6 +439,16 @@
 	RESTORE_REGS;
 	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
 	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
+	bri	6f;
+
+/* Return to kernel state.  */
+2:	set_bip;			/*  Ints masked for state restore */
+	VM_OFF;
+	tophys(r1,r1);
+	RESTORE_REGS;
+	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
+	tovirt(r1,r1);
+6:
 TRAP_return:		/* Make global symbol for debugging */
 	rtbd	r14, 0;	/* Instructions to return from an IRQ */
 	nop;
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 478f294..a7fa6ae7 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -25,6 +25,7 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <asm/current.h>
+#include <asm/cacheflush.h>
 
 #define MICROBLAZE_ILL_OPCODE_EXCEPTION	0x02
 #define MICROBLAZE_IBUS_EXCEPTION	0x03
@@ -52,6 +53,8 @@
 void sw_exception(struct pt_regs *regs)
 {
 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
+	flush_dcache_range(regs->r16, regs->r16 + 0x4);
+	flush_icache_range(regs->r16, regs->r16 + 0x4);
 }
 
 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 4243400..778a5ce 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -62,23 +62,32 @@
 	andi	r1, r1, ~2
 	mts	rmsr, r1
 /*
- * Here is checking mechanism which check if Microblaze has msr instructions
- * We load msr and compare it with previous r1 value - if is the same,
- * msr instructions works if not - cpu don't have them.
+ * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
+ * if the msrclr instruction is not enabled. We use this to detect
+ * if the opcode is available, by issuing msrclr and then testing the result.
+ * r8 == 0 - msr instructions are implemented
+ * r8 != 0 - msr instructions are not implemented
  */
-	/* r8=0 - I have msr instr, 1 - I don't have them */
-	rsubi	r0, r0, 1	/* set the carry bit */
-	msrclr	r0, 0x4		/* try to clear it */
-	/* read the carry bit, r8 will be '0' if msrclr exists */
-	addik	r8, r0, 0
+	msrclr	r8, 0 /* clear nothing - just read msr for test */
+	cmpu	r8, r8, r1 /* r1 must contain msr reg content */
 
 /* r7 may point to an FDT, or there may be one linked in.
    if it's in r7, we've got to save it away ASAP.
    We ensure r7 points to a valid FDT, just in case the bootloader
    is broken or non-existent */
 	beqi	r7, no_fdt_arg			/* NULL pointer?  don't copy */
-	lw	r11, r0, r7			/* Does r7 point to a */
-	rsubi	r11, r11, OF_DT_HEADER		/* valid FDT? */
+/* Does r7 point to a valid FDT? Load HEADER magic number */
+	/* Run time Big/Little endian platform */
+	/* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */
+	addik	r11, r0, 0x1 /* BIG/LITTLE checking value */
+	/* __bss_start will be zeroed later - it is just temp location */
+	swi	r11, r0, TOPHYS(__bss_start)
+	lbui	r11, r0, TOPHYS(__bss_start)
+	beqid	r11, big_endian /* DO NOT break delay stop dependency */
+	lw	r11, r0, r7 /* Big endian load in delay slot */
+	lwr	r11, r0, r7 /* Little endian load */
+big_endian:
+	rsubi	r11, r11, OF_DT_HEADER	/* Check FDT header */
 	beqi	r11, _prepare_copy_fdt
 	or	r7, r0, r0		/* clear R7 when not valid DTB */
 	bnei	r11, no_fdt_arg			/* No - get out of here */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 7811954..782680d 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -147,10 +147,6 @@
 	#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
 	#define BSRLI(rD, rA, imm)	\
 		bsrli rD, rA, imm
-	#elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
-	#define BSRLI(rD, rA, imm)	\
-		ori rD, r0, (1 << imm);	\
-		idivu rD, rD, rA
 	#else
 	#define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
 	/* Only the used shift constants defined here - add more if needed */
@@ -945,11 +941,20 @@
 store4:	sbi	r3, r4, 3;	/* Delay slot */
 ex_shw_vm:
 	/* Store the lower half-word, byte-by-byte into destination address */
+#ifdef __MICROBLAZEEL__
+	lbui	r3, r5, 0;
+store5:	sbi	r3, r4, 0;
+	lbui	r3, r5, 1;
+	brid	ret_from_exc;
+store6:	sbi	r3, r4, 1;	/* Delay slot */
+#else
 	lbui	r3, r5, 2;
 store5:	sbi	r3, r4, 0;
 	lbui	r3, r5, 3;
 	brid	ret_from_exc;
 store6:	sbi	r3, r4, 1;	/* Delay slot */
+#endif
+
 ex_sw_end_vm:			/* Exception handling of store word, ends. */
 
 /* We have to prevent cases that get/put_user macros get unaligned pointer
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index a105301..bceaa55 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -47,9 +47,9 @@
 	memblock_add(base, size);
 }
 
-u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 {
-	return memblock_alloc(size, align);
+	return __va(memblock_alloc(size, align));
 }
 
 #ifdef CONFIG_EARLY_PRINTK
@@ -61,14 +61,12 @@
 	char *p;
 	int *addr;
 
-	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+	pr_debug("search \"serial\", depth: %d, uname: %s\n", depth, uname);
 
 /* find all serial nodes */
 	if (strncmp(uname, "serial", 6) != 0)
 		return 0;
 
-	early_init_dt_check_for_initrd(node);
-
 /* find compatible node with uartlite */
 	p = of_get_flat_dt_prop(node, "compatible", &l);
 	if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index bb1558e..9312fbb 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -161,11 +161,11 @@
 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
 	if (msr)
 		eprintk("!!!Your kernel has setup MSR instruction but "
-				"CPU don't have it %d\n", msr);
+				"CPU don't have it %x\n", msr);
 #else
 	if (!msr)
 		eprintk("!!!Your kernel not setup MSR instruction but "
-				"CPU have it %d\n", msr);
+				"CPU have it %x\n", msr);
 #endif
 
 	for (src = __ivt_start; src < __ivt_end; src++, dst++)
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 96a88c3..3451bde 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -123,20 +123,10 @@
 
 	__init_end_before_initramfs = .;
 
-	.init.ramfs ALIGN(PAGE_SIZE) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
-		__initramfs_start = .;
-		*(.init.ramfs)
-		__initramfs_end = .;
-		. = ALIGN(4);
-		LONG(0);
-/*
- * FIXME this can break initramfs for MMU.
- * Pad init.ramfs up to page boundary,
- * so that __init_end == __bss_start. This will make image.elf
- * consistent with the image.bin
- */
-		/* . = ALIGN(PAGE_SIZE); */
+	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+		INIT_RAM_FS
 	}
+
 	__init_end = .;
 
 	.bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index fdc48bb..62021d7 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -29,6 +29,10 @@
  *	between mem locations with size of xfer spec'd in bytes
  */
 
+#ifdef __MICROBLAZEEL__
+#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
+#endif
+
 #include <linux/linkage.h>
 	.text
 	.globl	memcpy
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index 123e361..810fd68 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -182,7 +182,7 @@
 			for (; c >= 4; c -= 4) {
 				value = *--i_src;
 				*--i_dst = buf_hold | ((value & 0xFF000000)>> 24);
-				buf_hold = (value & 0xFFFFFF) << 8;;
+				buf_hold = (value & 0xFFFFFF) << 8;
 			}
 #endif
 			/* Realign the source */
diff --git a/arch/microblaze/lib/muldi3.S b/arch/microblaze/lib/muldi3.S
deleted file mode 100644
index ceeaa8c..0000000
--- a/arch/microblaze/lib/muldi3.S
+++ /dev/null
@@ -1,121 +0,0 @@
-#include <linux/linkage.h>
-
-/*
- * Multiply operation for 64 bit integers, for devices with hard multiply
- *	Input :	Operand1[H] in Reg r5
- *		Operand1[L] in Reg r6
- *		Operand2[H] in Reg r7
- *		Operand2[L] in Reg r8
- *	Output: Result[H] in Reg r3
- *		Result[L] in Reg r4
- *
- * Explaination:
- *
- *	Both the input numbers are divided into 16 bit number as follows
- *		op1 = A B C D
- *		op2 = E F G H
- *	result = D * H
- *		 + (C * H + D * G) << 16
- *		 + (B * H + C * G + D * F) << 32
- *		 + (A * H + B * G + C * F + D * E) << 48
- *
- *	Only 64 bits of the output are considered
- */
-
-	.text
-	.globl	__muldi3
-	.type __muldi3, @function
-	.ent __muldi3
-
-__muldi3:
-	addi	r1, r1, -40
-
-/* Save the input operands on the caller's stack */
-	swi	r5, r1, 44
-	swi	r6, r1, 48
-	swi	r7, r1, 52
-	swi	r8, r1, 56
-
-/* Store all the callee saved registers */
-	sw	r20, r1, r0
-	swi	r21, r1, 4
-	swi	r22, r1, 8
-	swi	r23, r1, 12
-	swi	r24, r1, 16
-	swi	r25, r1, 20
-	swi	r26, r1, 24
-	swi	r27, r1, 28
-
-/* Load all the 16 bit values for A thru H */
-	lhui	r20, r1, 44 /* A */
-	lhui	r21, r1, 46 /* B */
-	lhui	r22, r1, 48 /* C */
-	lhui	r23, r1, 50 /* D */
-	lhui	r24, r1, 52 /* E */
-	lhui	r25, r1, 54 /* F */
-	lhui	r26, r1, 56 /* G */
-	lhui	r27, r1, 58 /* H */
-
-/* D * H ==> LSB of the result on stack ==> Store1 */
-	mul	r9, r23, r27
-	swi	r9, r1, 36 /* Pos2 and Pos3 */
-
-/* Hi (Store1) + C * H + D * G ==> Store2 ==> Pos1 and Pos2 */
-/* Store the carry generated in position 2 for Pos 3 */
-	lhui	r11, r1, 36 /* Pos2 */
-	mul	r9, r22, r27 /* C * H */
-	mul	r10, r23, r26 /* D * G */
-	add	r9, r9, r10
-	addc	r12, r0, r0
-	add	r9, r9, r11
-	addc	r12, r12, r0 /* Store the Carry */
-	shi	r9, r1, 36 /* Store Pos2 */
-	swi	r9, r1, 32
-	lhui	r11, r1, 32
-	shi	r11, r1, 34 /* Store Pos1 */
-
-/* Hi (Store2) + B * H + C * G + D * F ==> Store3 ==> Pos0 and Pos1 */
-	mul	r9, r21, r27 /* B * H */
-	mul	r10, r22, r26 /* C * G */
-	mul	r7, r23, r25 /* D * F */
-	add	r9, r9, r11
-	add	r9, r9, r10
-	add	r9, r9, r7
-	swi	r9, r1, 32 /* Pos0 and Pos1 */
-
-/* Hi (Store3) + A * H + B * G + C * F + D * E ==> Store3 ==> Pos0 */
-	lhui	r11, r1, 32 /* Pos0 */
-	mul	r9, r20, r27 /* A * H */
-	mul	r10, r21, r26 /* B * G */
-	mul	r7, r22, r25 /* C * F */
-	mul	r8, r23, r24 /* D * E */
-	add	r9, r9, r11
-	add 	r9, r9, r10
-	add	r9, r9, r7
-	add	r9, r9, r8
-	sext16	r9, r9 /* Sign extend the MSB */
-	shi	r9, r1, 32
-
-/* Move results to r3 and r4 */
-	lhui	r3, r1, 32
-	add	r3, r3, r12
-	shi	r3, r1, 32
-	lwi	r3, r1, 32 /* Hi Part */
-	lwi	r4, r1, 36 /* Lo Part */
-
-/* Restore Callee saved registers */
-	lw	r20, r1, r0
-	lwi	r21, r1, 4
-	lwi	r22, r1, 8
-	lwi	r23, r1, 12
-	lwi	r24, r1, 16
-	lwi	r25, r1, 20
-	lwi	r26, r1, 24
-	lwi	r27, r1, 28
-
-/* Restore Frame and return */
-	rtsd	r15, 8
-	addi	r1, r1, 40
-
-.size __muldi3, . - __muldi3
-.end __muldi3
diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c
new file mode 100644
index 0000000..d4860e1
--- /dev/null
+++ b/arch/microblaze/lib/muldi3.c
@@ -0,0 +1,60 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+#define DWtype long long
+#define UWtype unsigned long
+#define UHWtype unsigned short
+
+#define W_TYPE_SIZE 32
+
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+/* If we still don't have umul_ppmm, define it using plain C.  */
+#if !defined(umul_ppmm)
+#define umul_ppmm(w1, w0, u, v)						\
+	do {								\
+		UWtype __x0, __x1, __x2, __x3;				\
+		UHWtype __ul, __vl, __uh, __vh;				\
+									\
+		__ul = __ll_lowpart(u);					\
+		__uh = __ll_highpart(u);				\
+		__vl = __ll_lowpart(v);					\
+		__vh = __ll_highpart(v);				\
+									\
+		__x0 = (UWtype) __ul * __vl;				\
+		__x1 = (UWtype) __ul * __vh;				\
+		__x2 = (UWtype) __uh * __vl;				\
+		__x3 = (UWtype) __uh * __vh;				\
+									\
+		__x1 += __ll_highpart(__x0); /* this can't give carry */\
+		__x1 += __x2; /* but this indeed can */			\
+		if (__x1 < __x2) /* did we get it? */			\
+		__x3 += __ll_B; /* yes, add it in the proper pos */	\
+									\
+		(w1) = __x3 + __ll_highpart(__x1);			\
+		(w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
+	} while (0)
+#endif
+
+#if !defined(__umulsidi3)
+#define __umulsidi3(u, v) ({				\
+	DWunion __w;					\
+	umul_ppmm(__w.s.high, __w.s.low, u, v);		\
+	__w.ll;						\
+	})
+#endif
+
+DWtype __muldi3(DWtype u, DWtype v)
+{
+	const DWunion uu = {.ll = u};
+	const DWunion vv = {.ll = v};
+	DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
+
+	w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
+		+ (UWtype) uu.s.high * (UWtype) vv.s.low);
+
+	return w.ll;
+}
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 78439b8..7ff9b54 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -2,6 +2,7 @@
 
 platforms += alchemy
 platforms += ar7
+platforms += ath79
 platforms += bcm47xx
 platforms += bcm63xx
 platforms += cavium-octeon
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f489ec3..d889835 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,6 +4,7 @@
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_IDE
 	select HAVE_OPROFILE
+	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
 	select PERF_USE_VMALLOC
 	select HAVE_ARCH_KGDB
@@ -21,6 +22,7 @@
 	select HAVE_DMA_API_DEBUG
 	select HAVE_GENERIC_HARDIRQS
 	select GENERIC_IRQ_PROBE
+	select HAVE_ARCH_JUMP_LABEL
 
 menu "Machine selection"
 
@@ -65,6 +67,22 @@
 	  Support for the Texas Instruments AR7 System-on-a-Chip
 	  family: TNETD7100, 7200 and 7300.
 
+config ATH79
+	bool "Atheros AR71XX/AR724X/AR913X based boards"
+	select ARCH_REQUIRE_GPIOLIB
+	select BOOT_RAW
+	select CEVT_R4K
+	select CSRC_R4K
+	select DMA_NONCOHERENT
+	select IRQ_CPU
+	select MIPS_MACHINE
+	select SYS_HAS_CPU_MIPS32_R2
+	select SYS_HAS_EARLY_PRINTK
+	select SYS_SUPPORTS_32BIT_KERNEL
+	select SYS_SUPPORTS_BIG_ENDIAN
+	help
+	  Support for the Atheros AR71XX/AR724X/AR913X SoCs.
+
 config BCM47XX
 	bool "Broadcom BCM47XX based boards"
 	select CEVT_R4K
@@ -191,6 +209,7 @@
 	select ARCH_REQUIRE_GPIOLIB
 	select SYS_HAS_EARLY_PRINTK
 	select HAVE_PWM
+	select HAVE_CLK
 
 config LASAT
 	bool "LASAT Networks platforms"
@@ -316,6 +335,8 @@
 config PMC_MSP
 	bool "PMC-Sierra MSP chipsets"
 	depends on EXPERIMENTAL
+	select CEVT_R4K
+	select CSRC_R4K
 	select DMA_NONCOHERENT
 	select SWAP_IO_SPACE
 	select NO_EXCEPT_FILL
@@ -717,6 +738,7 @@
 endchoice
 
 source "arch/mips/alchemy/Kconfig"
+source "arch/mips/ath79/Kconfig"
 source "arch/mips/bcm63xx/Kconfig"
 source "arch/mips/jazz/Kconfig"
 source "arch/mips/jz4740/Kconfig"
@@ -775,9 +797,6 @@
 	bool
 	default y
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 #
 # Select some configuration options automatically based on user selections.
 #
@@ -883,6 +902,9 @@
 config SYNC_R4K
 	bool
 
+config MIPS_MACHINE
+	def_bool n
+
 config NO_IOPORT
 	def_bool n
 
@@ -2400,4 +2422,20 @@
 
 source "crypto/Kconfig"
 
+menuconfig VIRTUALIZATION
+	bool "Virtualization"
+	default n
+	---help---
+	  Say Y here to get to see options for using your Linux host to run other
+	  operating systems inside virtual machines (guests).
+	  This option alone does not add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
+
 source "lib/Kconfig"
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index f437cd1..5358f90 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -7,7 +7,7 @@
 source "lib/Kconfig.debug"
 
 config EARLY_PRINTK
-	bool "Early printk" if EMBEDDED
+	bool "Early printk" if EXPERT
 	depends on SYS_HAS_EARLY_PRINTK
 	default y
 	help
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index e5916a5..647e518 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -130,7 +130,7 @@
 	au_writel(sleep_usb[1], USBD_ENABLE);
 	au_sync();
 #else
-	/* enable accces to OTG memory */
+	/* enable access to OTG memory */
 	au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4);
 	au_sync();
 
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index 4bbd313..acaf91b 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -110,7 +110,7 @@
 
 }
 
-static struct platform_suspend_ops db1x_pm_ops = {
+static const struct platform_suspend_ops db1x_pm_ops = {
 	.valid		= suspend_valid_only_mem,
 	.begin		= db1x_pm_begin,
 	.enter		= db1x_pm_enter,
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
index 6398fa9..40b84b9 100644
--- a/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/arch/mips/alchemy/mtx-1/board_setup.c
@@ -54,8 +54,8 @@
 
 static void mtx1_reset(char *c)
 {
-	/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
-	au_writel(0x00000000, 0xAE00001C);
+	/* Jump to the reset vector */
+	__asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
 }
 
 static void mtx1_power_off(void)
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c
index e30e42a..956f946 100644
--- a/arch/mips/alchemy/mtx-1/platform.c
+++ b/arch/mips/alchemy/mtx-1/platform.c
@@ -28,6 +28,8 @@
 #include <linux/mtd/physmap.h>
 #include <mtd/mtd-abi.h>
 
+#include <asm/mach-au1x00/au1xxx_eth.h>
+
 static struct gpio_keys_button mtx1_gpio_button[] = {
 	{
 		.gpio = 207,
@@ -140,10 +142,17 @@
 	&mtx1_mtd,
 };
 
+static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
+	.phy_search_highest_addr	= 1,
+	.phy1_search_mac0 		= 1,
+};
+
 static int __init mtx1_register_devices(void)
 {
 	int rc;
 
+	au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
+
 	rc = gpio_request(mtx1_gpio_button[0].gpio,
 					mtx1_gpio_button[0].desc);
 	if (rc < 0) {
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c
index b43c918..80c521e 100644
--- a/arch/mips/alchemy/xxs1500/board_setup.c
+++ b/arch/mips/alchemy/xxs1500/board_setup.c
@@ -36,8 +36,8 @@
 
 static void xxs1500_reset(char *c)
 {
-	/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
-	au_writel(0x00000000, 0xAE00001C);
+	/* Jump to the reset vector */
+	__asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
 }
 
 static void xxs1500_power_off(void)
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
new file mode 100644
index 0000000..b058282
--- /dev/null
+++ b/arch/mips/ath79/Kconfig
@@ -0,0 +1,50 @@
+if ATH79
+
+menu "Atheros AR71XX/AR724X/AR913X machine selection"
+
+config ATH79_MACH_AP81
+	bool "Atheros AP81 reference board"
+	select SOC_AR913X
+	select ATH79_DEV_AR913X_WMAC
+	select ATH79_DEV_GPIO_BUTTONS
+	select ATH79_DEV_LEDS_GPIO
+	select ATH79_DEV_SPI
+	help
+	  Say 'Y' here if you want your kernel to support the
+	  Atheros AP81 reference board.
+
+config ATH79_MACH_PB44
+	bool "Atheros PB44 reference board"
+	select SOC_AR71XX
+	select ATH79_DEV_GPIO_BUTTONS
+	select ATH79_DEV_LEDS_GPIO
+	select ATH79_DEV_SPI
+	help
+	  Say 'Y' here if you want your kernel to support the
+	  Atheros PB44 reference board.
+
+endmenu
+
+config SOC_AR71XX
+	def_bool n
+
+config SOC_AR724X
+	def_bool n
+
+config SOC_AR913X
+	def_bool n
+
+config ATH79_DEV_AR913X_WMAC
+	depends on SOC_AR913X
+	def_bool n
+
+config ATH79_DEV_GPIO_BUTTONS
+	def_bool n
+
+config ATH79_DEV_LEDS_GPIO
+	def_bool n
+
+config ATH79_DEV_SPI
+	def_bool n
+
+endif
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
new file mode 100644
index 0000000..c33d465
--- /dev/null
+++ b/arch/mips/ath79/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile for the Atheros AR71XX/AR724X/AR913X specific parts of the kernel
+#
+# Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+# Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 as published
+# by the Free Software Foundation.
+
+obj-y	:= prom.o setup.o irq.o common.o clock.o gpio.o
+
+obj-$(CONFIG_EARLY_PRINTK)		+= early_printk.o
+
+#
+# Devices
+#
+obj-y					+= dev-common.o
+obj-$(CONFIG_ATH79_DEV_AR913X_WMAC)	+= dev-ar913x-wmac.o
+obj-$(CONFIG_ATH79_DEV_GPIO_BUTTONS)	+= dev-gpio-buttons.o
+obj-$(CONFIG_ATH79_DEV_LEDS_GPIO)	+= dev-leds-gpio.o
+obj-$(CONFIG_ATH79_DEV_SPI)		+= dev-spi.o
+
+#
+# Machines
+#
+obj-$(CONFIG_ATH79_MACH_AP81)		+= mach-ap81.o
+obj-$(CONFIG_ATH79_MACH_PB44)		+= mach-pb44.o
diff --git a/arch/mips/ath79/Platform b/arch/mips/ath79/Platform
new file mode 100644
index 0000000..2bd6636
--- /dev/null
+++ b/arch/mips/ath79/Platform
@@ -0,0 +1,7 @@
+#
+# Atheros AR71xx/AR724x/AR913x
+#
+
+platform-$(CONFIG_ATH79)	+= ath79/
+cflags-$(CONFIG_ATH79)		+= -I$(srctree)/arch/mips/include/asm/mach-ath79
+load-$(CONFIG_ATH79)		= 0xffffffff80060000
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
new file mode 100644
index 0000000..680bde9
--- /dev/null
+++ b/arch/mips/ath79/clock.c
@@ -0,0 +1,183 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X common routines
+ *
+ *  Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "common.h"
+
+#define AR71XX_BASE_FREQ	40000000
+#define AR724X_BASE_FREQ	5000000
+#define AR913X_BASE_FREQ	5000000
+
+struct clk {
+	unsigned long rate;
+};
+
+static struct clk ath79_ref_clk;
+static struct clk ath79_cpu_clk;
+static struct clk ath79_ddr_clk;
+static struct clk ath79_ahb_clk;
+static struct clk ath79_wdt_clk;
+static struct clk ath79_uart_clk;
+
+static void __init ar71xx_clocks_init(void)
+{
+	u32 pll;
+	u32 freq;
+	u32 div;
+
+	ath79_ref_clk.rate = AR71XX_BASE_FREQ;
+
+	pll = ath79_pll_rr(AR71XX_PLL_REG_CPU_CONFIG);
+
+	div = ((pll >> AR71XX_PLL_DIV_SHIFT) & AR71XX_PLL_DIV_MASK) + 1;
+	freq = div * ath79_ref_clk.rate;
+
+	div = ((pll >> AR71XX_CPU_DIV_SHIFT) & AR71XX_CPU_DIV_MASK) + 1;
+	ath79_cpu_clk.rate = freq / div;
+
+	div = ((pll >> AR71XX_DDR_DIV_SHIFT) & AR71XX_DDR_DIV_MASK) + 1;
+	ath79_ddr_clk.rate = freq / div;
+
+	div = (((pll >> AR71XX_AHB_DIV_SHIFT) & AR71XX_AHB_DIV_MASK) + 1) * 2;
+	ath79_ahb_clk.rate = ath79_cpu_clk.rate / div;
+
+	ath79_wdt_clk.rate = ath79_ahb_clk.rate;
+	ath79_uart_clk.rate = ath79_ahb_clk.rate;
+}
+
+static void __init ar724x_clocks_init(void)
+{
+	u32 pll;
+	u32 freq;
+	u32 div;
+
+	ath79_ref_clk.rate = AR724X_BASE_FREQ;
+	pll = ath79_pll_rr(AR724X_PLL_REG_CPU_CONFIG);
+
+	div = ((pll >> AR724X_PLL_DIV_SHIFT) & AR724X_PLL_DIV_MASK);
+	freq = div * ath79_ref_clk.rate;
+
+	div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK);
+	freq *= div;
+
+	ath79_cpu_clk.rate = freq;
+
+	div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1;
+	ath79_ddr_clk.rate = freq / div;
+
+	div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2;
+	ath79_ahb_clk.rate = ath79_cpu_clk.rate / div;
+
+	ath79_wdt_clk.rate = ath79_ahb_clk.rate;
+	ath79_uart_clk.rate = ath79_ahb_clk.rate;
+}
+
+static void __init ar913x_clocks_init(void)
+{
+	u32 pll;
+	u32 freq;
+	u32 div;
+
+	ath79_ref_clk.rate = AR913X_BASE_FREQ;
+	pll = ath79_pll_rr(AR913X_PLL_REG_CPU_CONFIG);
+
+	div = ((pll >> AR913X_PLL_DIV_SHIFT) & AR913X_PLL_DIV_MASK);
+	freq = div * ath79_ref_clk.rate;
+
+	ath79_cpu_clk.rate = freq;
+
+	div = ((pll >> AR913X_DDR_DIV_SHIFT) & AR913X_DDR_DIV_MASK) + 1;
+	ath79_ddr_clk.rate = freq / div;
+
+	div = (((pll >> AR913X_AHB_DIV_SHIFT) & AR913X_AHB_DIV_MASK) + 1) * 2;
+	ath79_ahb_clk.rate = ath79_cpu_clk.rate / div;
+
+	ath79_wdt_clk.rate = ath79_ahb_clk.rate;
+	ath79_uart_clk.rate = ath79_ahb_clk.rate;
+}
+
+void __init ath79_clocks_init(void)
+{
+	if (soc_is_ar71xx())
+		ar71xx_clocks_init();
+	else if (soc_is_ar724x())
+		ar724x_clocks_init();
+	else if (soc_is_ar913x())
+		ar913x_clocks_init();
+	else
+		BUG();
+
+	pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, "
+		"Ref:%lu.%03luMHz",
+		ath79_cpu_clk.rate / 1000000,
+		(ath79_cpu_clk.rate / 1000) % 1000,
+		ath79_ddr_clk.rate / 1000000,
+		(ath79_ddr_clk.rate / 1000) % 1000,
+		ath79_ahb_clk.rate / 1000000,
+		(ath79_ahb_clk.rate / 1000) % 1000,
+		ath79_ref_clk.rate / 1000000,
+		(ath79_ref_clk.rate / 1000) % 1000);
+}
+
+/*
+ * Linux clock API
+ */
+struct clk *clk_get(struct device *dev, const char *id)
+{
+	if (!strcmp(id, "ref"))
+		return &ath79_ref_clk;
+
+	if (!strcmp(id, "cpu"))
+		return &ath79_cpu_clk;
+
+	if (!strcmp(id, "ddr"))
+		return &ath79_ddr_clk;
+
+	if (!strcmp(id, "ahb"))
+		return &ath79_ahb_clk;
+
+	if (!strcmp(id, "wdt"))
+		return &ath79_wdt_clk;
+
+	if (!strcmp(id, "uart"))
+		return &ath79_uart_clk;
+
+	return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(clk_get);
+
+int clk_enable(struct clk *clk)
+{
+	return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+	return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+void clk_put(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_put);
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
new file mode 100644
index 0000000..58f60e7
--- /dev/null
+++ b/arch/mips/ath79/common.c
@@ -0,0 +1,97 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X common routines
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "common.h"
+
+static DEFINE_SPINLOCK(ath79_device_reset_lock);
+
+u32 ath79_cpu_freq;
+EXPORT_SYMBOL_GPL(ath79_cpu_freq);
+
+u32 ath79_ahb_freq;
+EXPORT_SYMBOL_GPL(ath79_ahb_freq);
+
+u32 ath79_ddr_freq;
+EXPORT_SYMBOL_GPL(ath79_ddr_freq);
+
+enum ath79_soc_type ath79_soc;
+
+void __iomem *ath79_pll_base;
+void __iomem *ath79_reset_base;
+EXPORT_SYMBOL_GPL(ath79_reset_base);
+void __iomem *ath79_ddr_base;
+
+void ath79_ddr_wb_flush(u32 reg)
+{
+	void __iomem *flush_reg = ath79_ddr_base + reg;
+
+	/* Flush the DDR write buffer. */
+	__raw_writel(0x1, flush_reg);
+	while (__raw_readl(flush_reg) & 0x1)
+		;
+
+	/* It must be run twice. */
+	__raw_writel(0x1, flush_reg);
+	while (__raw_readl(flush_reg) & 0x1)
+		;
+}
+EXPORT_SYMBOL_GPL(ath79_ddr_wb_flush);
+
+void ath79_device_reset_set(u32 mask)
+{
+	unsigned long flags;
+	u32 reg;
+	u32 t;
+
+	if (soc_is_ar71xx())
+		reg = AR71XX_RESET_REG_RESET_MODULE;
+	else if (soc_is_ar724x())
+		reg = AR724X_RESET_REG_RESET_MODULE;
+	else if (soc_is_ar913x())
+		reg = AR913X_RESET_REG_RESET_MODULE;
+	else
+		BUG();
+
+	spin_lock_irqsave(&ath79_device_reset_lock, flags);
+	t = ath79_reset_rr(reg);
+	ath79_reset_wr(reg, t | mask);
+	spin_unlock_irqrestore(&ath79_device_reset_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ath79_device_reset_set);
+
+void ath79_device_reset_clear(u32 mask)
+{
+	unsigned long flags;
+	u32 reg;
+	u32 t;
+
+	if (soc_is_ar71xx())
+		reg = AR71XX_RESET_REG_RESET_MODULE;
+	else if (soc_is_ar724x())
+		reg = AR724X_RESET_REG_RESET_MODULE;
+	else if (soc_is_ar913x())
+		reg = AR913X_RESET_REG_RESET_MODULE;
+	else
+		BUG();
+
+	spin_lock_irqsave(&ath79_device_reset_lock, flags);
+	t = ath79_reset_rr(reg);
+	ath79_reset_wr(reg, t & ~mask);
+	spin_unlock_irqrestore(&ath79_device_reset_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ath79_device_reset_clear);
diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h
new file mode 100644
index 0000000..561906c
--- /dev/null
+++ b/arch/mips/ath79/common.h
@@ -0,0 +1,31 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X common definitions
+ *
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef __ATH79_COMMON_H
+#define __ATH79_COMMON_H
+
+#include <linux/types.h>
+#include <linux/init.h>
+
+#define ATH79_MEM_SIZE_MIN	(2 * 1024 * 1024)
+#define ATH79_MEM_SIZE_MAX	(128 * 1024 * 1024)
+
+void ath79_clocks_init(void);
+void ath79_ddr_wb_flush(unsigned int reg);
+
+void ath79_gpio_function_enable(u32 mask);
+void ath79_gpio_function_disable(u32 mask);
+void ath79_gpio_function_setup(u32 set, u32 clear);
+void ath79_gpio_init(void);
+
+#endif /* __ATH79_COMMON_H */
diff --git a/arch/mips/ath79/dev-ar913x-wmac.c b/arch/mips/ath79/dev-ar913x-wmac.c
new file mode 100644
index 0000000..48f425a
--- /dev/null
+++ b/arch/mips/ath79/dev-ar913x-wmac.c
@@ -0,0 +1,60 @@
+/*
+ *  Atheros AR913X SoC built-in WMAC device support
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/ath9k_platform.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "dev-ar913x-wmac.h"
+
+static struct ath9k_platform_data ar913x_wmac_data;
+
+static struct resource ar913x_wmac_resources[] = {
+	{
+		.start	= AR913X_WMAC_BASE,
+		.end	= AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		.start	= ATH79_CPU_IRQ_IP2,
+		.end	= ATH79_CPU_IRQ_IP2,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device ar913x_wmac_device = {
+	.name		= "ath9k",
+	.id		= -1,
+	.resource	= ar913x_wmac_resources,
+	.num_resources	= ARRAY_SIZE(ar913x_wmac_resources),
+	.dev = {
+		.platform_data = &ar913x_wmac_data,
+	},
+};
+
+void __init ath79_register_ar913x_wmac(u8 *cal_data)
+{
+	if (cal_data)
+		memcpy(ar913x_wmac_data.eeprom_data, cal_data,
+		       sizeof(ar913x_wmac_data.eeprom_data));
+
+	/* reset the WMAC */
+	ath79_device_reset_set(AR913X_RESET_AMBA2WMAC);
+	mdelay(10);
+
+	ath79_device_reset_clear(AR913X_RESET_AMBA2WMAC);
+	mdelay(10);
+
+	platform_device_register(&ar913x_wmac_device);
+}
diff --git a/arch/mips/ath79/dev-ar913x-wmac.h b/arch/mips/ath79/dev-ar913x-wmac.h
new file mode 100644
index 0000000..579d562
--- /dev/null
+++ b/arch/mips/ath79/dev-ar913x-wmac.h
@@ -0,0 +1,17 @@
+/*
+ *  Atheros AR913X SoC built-in WMAC device support
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_DEV_AR913X_WMAC_H
+#define _ATH79_DEV_AR913X_WMAC_H
+
+void ath79_register_ar913x_wmac(u8 *cal_data);
+
+#endif /* _ATH79_DEV_AR913X_WMAC_H */
diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c
new file mode 100644
index 0000000..3b82e32
--- /dev/null
+++ b/arch/mips/ath79/dev-common.c
@@ -0,0 +1,77 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X common devices
+ *
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "common.h"
+#include "dev-common.h"
+
+static struct resource ath79_uart_resources[] = {
+	{
+		.start	= AR71XX_UART_BASE,
+		.end	= AR71XX_UART_BASE + AR71XX_UART_SIZE - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+#define AR71XX_UART_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP)
+static struct plat_serial8250_port ath79_uart_data[] = {
+	{
+		.mapbase	= AR71XX_UART_BASE,
+		.irq		= ATH79_MISC_IRQ_UART,
+		.flags		= AR71XX_UART_FLAGS,
+		.iotype		= UPIO_MEM32,
+		.regshift	= 2,
+	}, {
+		/* terminating entry */
+	}
+};
+
+static struct platform_device ath79_uart_device = {
+	.name		= "serial8250",
+	.id		= PLAT8250_DEV_PLATFORM,
+	.resource	= ath79_uart_resources,
+	.num_resources	= ARRAY_SIZE(ath79_uart_resources),
+	.dev = {
+		.platform_data	= ath79_uart_data
+	},
+};
+
+void __init ath79_register_uart(void)
+{
+	struct clk *clk;
+
+	clk = clk_get(NULL, "uart");
+	if (IS_ERR(clk))
+		panic("unable to get UART clock, err=%ld", PTR_ERR(clk));
+
+	ath79_uart_data[0].uartclk = clk_get_rate(clk);
+	platform_device_register(&ath79_uart_device);
+}
+
+static struct platform_device ath79_wdt_device = {
+	.name		= "ath79-wdt",
+	.id		= -1,
+};
+
+void __init ath79_register_wdt(void)
+{
+	platform_device_register(&ath79_wdt_device);
+}
diff --git a/arch/mips/ath79/dev-common.h b/arch/mips/ath79/dev-common.h
new file mode 100644
index 0000000..0f514e1
--- /dev/null
+++ b/arch/mips/ath79/dev-common.h
@@ -0,0 +1,18 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X common devices
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_DEV_COMMON_H
+#define _ATH79_DEV_COMMON_H
+
+void ath79_register_uart(void);
+void ath79_register_wdt(void);
+
+#endif /* _ATH79_DEV_COMMON_H */
diff --git a/arch/mips/ath79/dev-gpio-buttons.c b/arch/mips/ath79/dev-gpio-buttons.c
new file mode 100644
index 0000000..4b0168a
--- /dev/null
+++ b/arch/mips/ath79/dev-gpio-buttons.c
@@ -0,0 +1,58 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X GPIO button support
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include "linux/init.h"
+#include "linux/slab.h"
+#include <linux/platform_device.h>
+
+#include "dev-gpio-buttons.h"
+
+void __init ath79_register_gpio_keys_polled(int id,
+					    unsigned poll_interval,
+					    unsigned nbuttons,
+					    struct gpio_keys_button *buttons)
+{
+	struct platform_device *pdev;
+	struct gpio_keys_platform_data pdata;
+	struct gpio_keys_button *p;
+	int err;
+
+	p = kmalloc(nbuttons * sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return;
+
+	memcpy(p, buttons, nbuttons * sizeof(*p));
+
+	pdev = platform_device_alloc("gpio-keys-polled", id);
+	if (!pdev)
+		goto err_free_buttons;
+
+	memset(&pdata, 0, sizeof(pdata));
+	pdata.poll_interval = poll_interval;
+	pdata.nbuttons = nbuttons;
+	pdata.buttons = p;
+
+	err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
+	if (err)
+		goto err_put_pdev;
+
+	err = platform_device_add(pdev);
+	if (err)
+		goto err_put_pdev;
+
+	return;
+
+err_put_pdev:
+	platform_device_put(pdev);
+
+err_free_buttons:
+	kfree(p);
+}
diff --git a/arch/mips/ath79/dev-gpio-buttons.h b/arch/mips/ath79/dev-gpio-buttons.h
new file mode 100644
index 0000000..481847a
--- /dev/null
+++ b/arch/mips/ath79/dev-gpio-buttons.h
@@ -0,0 +1,23 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X GPIO button support
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_DEV_GPIO_BUTTONS_H
+#define _ATH79_DEV_GPIO_BUTTONS_H
+
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
+void ath79_register_gpio_keys_polled(int id,
+				     unsigned poll_interval,
+				     unsigned nbuttons,
+				     struct gpio_keys_button *buttons);
+
+#endif /* _ATH79_DEV_GPIO_BUTTONS_H */
diff --git a/arch/mips/ath79/dev-leds-gpio.c b/arch/mips/ath79/dev-leds-gpio.c
new file mode 100644
index 0000000..cdade68
--- /dev/null
+++ b/arch/mips/ath79/dev-leds-gpio.c
@@ -0,0 +1,56 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X common GPIO LEDs support
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include "dev-leds-gpio.h"
+
+void __init ath79_register_leds_gpio(int id,
+				     unsigned num_leds,
+				     struct gpio_led *leds)
+{
+	struct platform_device *pdev;
+	struct gpio_led_platform_data pdata;
+	struct gpio_led *p;
+	int err;
+
+	p = kmalloc(num_leds * sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return;
+
+	memcpy(p, leds, num_leds * sizeof(*p));
+
+	pdev = platform_device_alloc("leds-gpio", id);
+	if (!pdev)
+		goto err_free_leds;
+
+	memset(&pdata, 0, sizeof(pdata));
+	pdata.num_leds = num_leds;
+	pdata.leds = p;
+
+	err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
+	if (err)
+		goto err_put_pdev;
+
+	err = platform_device_add(pdev);
+	if (err)
+		goto err_put_pdev;
+
+	return;
+
+err_put_pdev:
+	platform_device_put(pdev);
+
+err_free_leds:
+	kfree(p);
+}
diff --git a/arch/mips/ath79/dev-leds-gpio.h b/arch/mips/ath79/dev-leds-gpio.h
new file mode 100644
index 0000000..6e5d885
--- /dev/null
+++ b/arch/mips/ath79/dev-leds-gpio.h
@@ -0,0 +1,21 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X common GPIO LEDs support
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_DEV_LEDS_GPIO_H
+#define _ATH79_DEV_LEDS_GPIO_H
+
+#include <linux/leds.h>
+
+void ath79_register_leds_gpio(int id,
+			      unsigned num_leds,
+			      struct gpio_led *leds);
+
+#endif /* _ATH79_DEV_LEDS_GPIO_H */
diff --git a/arch/mips/ath79/dev-spi.c b/arch/mips/ath79/dev-spi.c
new file mode 100644
index 0000000..aa30163
--- /dev/null
+++ b/arch/mips/ath79/dev-spi.c
@@ -0,0 +1,38 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X SPI controller device
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "dev-spi.h"
+
+static struct resource ath79_spi_resources[] = {
+	{
+		.start	= AR71XX_SPI_BASE,
+		.end	= AR71XX_SPI_BASE + AR71XX_SPI_SIZE - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device ath79_spi_device = {
+	.name		= "ath79-spi",
+	.id		= -1,
+	.resource	= ath79_spi_resources,
+	.num_resources	= ARRAY_SIZE(ath79_spi_resources),
+};
+
+void __init ath79_register_spi(struct ath79_spi_platform_data *pdata,
+			       struct spi_board_info const *info,
+			       unsigned n)
+{
+	spi_register_board_info(info, n);
+	ath79_spi_device.dev.platform_data = pdata;
+	platform_device_register(&ath79_spi_device);
+}
diff --git a/arch/mips/ath79/dev-spi.h b/arch/mips/ath79/dev-spi.h
new file mode 100644
index 0000000..d732565
--- /dev/null
+++ b/arch/mips/ath79/dev-spi.h
@@ -0,0 +1,22 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X SPI controller device
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_DEV_SPI_H
+#define _ATH79_DEV_SPI_H
+
+#include <linux/spi/spi.h>
+#include <asm/mach-ath79/ath79_spi_platform.h>
+
+void ath79_register_spi(struct ath79_spi_platform_data *pdata,
+			 struct spi_board_info const *info,
+			 unsigned n);
+
+#endif /* _ATH79_DEV_SPI_H */
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
new file mode 100644
index 0000000..7499b0e
--- /dev/null
+++ b/arch/mips/ath79/early_printk.c
@@ -0,0 +1,36 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X SoC early printk support
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/serial_reg.h>
+#include <asm/addrspace.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+static inline void prom_wait_thre(void __iomem *base)
+{
+	u32 lsr;
+
+	do {
+		lsr = __raw_readl(base + UART_LSR * 4);
+		if (lsr & UART_LSR_THRE)
+			break;
+	} while (1);
+}
+
+void prom_putchar(unsigned char ch)
+{
+	void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
+
+	prom_wait_thre(base);
+	__raw_writel(ch, base + UART_TX * 4);
+	prom_wait_thre(base);
+}
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
new file mode 100644
index 0000000..a0c426b
--- /dev/null
+++ b/arch/mips/ath79/gpio.c
@@ -0,0 +1,197 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X GPIO API support
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+#include "common.h"
+
+static void __iomem *ath79_gpio_base;
+static unsigned long ath79_gpio_count;
+static DEFINE_SPINLOCK(ath79_gpio_lock);
+
+static void __ath79_gpio_set_value(unsigned gpio, int value)
+{
+	void __iomem *base = ath79_gpio_base;
+
+	if (value)
+		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR);
+}
+
+static int __ath79_gpio_get_value(unsigned gpio)
+{
+	return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
+}
+
+static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+	return __ath79_gpio_get_value(offset);
+}
+
+static void ath79_gpio_set_value(struct gpio_chip *chip,
+				  unsigned offset, int value)
+{
+	__ath79_gpio_set_value(offset, value);
+}
+
+static int ath79_gpio_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ath79_gpio_direction_output(struct gpio_chip *chip,
+					unsigned offset, int value)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	if (value)
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static struct gpio_chip ath79_gpio_chip = {
+	.label			= "ath79",
+	.get			= ath79_gpio_get_value,
+	.set			= ath79_gpio_set_value,
+	.direction_input	= ath79_gpio_direction_input,
+	.direction_output	= ath79_gpio_direction_output,
+	.base			= 0,
+};
+
+void ath79_gpio_function_enable(u32 mask)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) | mask,
+		     base + AR71XX_GPIO_REG_FUNC);
+	/* flush write */
+	__raw_readl(base + AR71XX_GPIO_REG_FUNC);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+}
+
+void ath79_gpio_function_disable(u32 mask)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~mask,
+		     base + AR71XX_GPIO_REG_FUNC);
+	/* flush write */
+	__raw_readl(base + AR71XX_GPIO_REG_FUNC);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+}
+
+void ath79_gpio_function_setup(u32 set, u32 clear)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel((__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~clear) | set,
+		     base + AR71XX_GPIO_REG_FUNC);
+	/* flush write */
+	__raw_readl(base + AR71XX_GPIO_REG_FUNC);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+}
+
+void __init ath79_gpio_init(void)
+{
+	int err;
+
+	if (soc_is_ar71xx())
+		ath79_gpio_count = AR71XX_GPIO_COUNT;
+	else if (soc_is_ar724x())
+		ath79_gpio_count = AR724X_GPIO_COUNT;
+	else if (soc_is_ar913x())
+		ath79_gpio_count = AR913X_GPIO_COUNT;
+	else
+		BUG();
+
+	ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE);
+	ath79_gpio_chip.ngpio = ath79_gpio_count;
+
+	err = gpiochip_add(&ath79_gpio_chip);
+	if (err)
+		panic("cannot add AR71xx GPIO chip, error=%d", err);
+}
+
+int gpio_get_value(unsigned gpio)
+{
+	if (gpio < ath79_gpio_count)
+		return __ath79_gpio_get_value(gpio);
+
+	return __gpio_get_value(gpio);
+}
+EXPORT_SYMBOL(gpio_get_value);
+
+void gpio_set_value(unsigned gpio, int value)
+{
+	if (gpio < ath79_gpio_count)
+		__ath79_gpio_set_value(gpio, value);
+	else
+		__gpio_set_value(gpio, value);
+}
+EXPORT_SYMBOL(gpio_set_value);
+
+int gpio_to_irq(unsigned gpio)
+{
+	/* FIXME */
+	return -EINVAL;
+}
+EXPORT_SYMBOL(gpio_to_irq);
+
+int irq_to_gpio(unsigned irq)
+{
+	/* FIXME */
+	return -EINVAL;
+}
+EXPORT_SYMBOL(irq_to_gpio);
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
new file mode 100644
index 0000000..1bf7f71
--- /dev/null
+++ b/arch/mips/ath79/irq.c
@@ -0,0 +1,187 @@
+/*
+ *  Atheros AR71xx/AR724x/AR913x specific interrupt handling
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "common.h"
+
+static unsigned int ath79_ip2_flush_reg;
+static unsigned int ath79_ip3_flush_reg;
+
+static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	void __iomem *base = ath79_reset_base;
+	u32 pending;
+
+	pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
+		  __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+	if (pending & MISC_INT_UART)
+		generic_handle_irq(ATH79_MISC_IRQ_UART);
+
+	else if (pending & MISC_INT_DMA)
+		generic_handle_irq(ATH79_MISC_IRQ_DMA);
+
+	else if (pending & MISC_INT_PERFC)
+		generic_handle_irq(ATH79_MISC_IRQ_PERFC);
+
+	else if (pending & MISC_INT_TIMER)
+		generic_handle_irq(ATH79_MISC_IRQ_TIMER);
+
+	else if (pending & MISC_INT_OHCI)
+		generic_handle_irq(ATH79_MISC_IRQ_OHCI);
+
+	else if (pending & MISC_INT_ERROR)
+		generic_handle_irq(ATH79_MISC_IRQ_ERROR);
+
+	else if (pending & MISC_INT_GPIO)
+		generic_handle_irq(ATH79_MISC_IRQ_GPIO);
+
+	else if (pending & MISC_INT_WDOG)
+		generic_handle_irq(ATH79_MISC_IRQ_WDOG);
+
+	else
+		spurious_interrupt();
+}
+
+static void ar71xx_misc_irq_unmask(unsigned int irq)
+{
+	void __iomem *base = ath79_reset_base;
+	u32 t;
+
+	irq -= ATH79_MISC_IRQ_BASE;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+	__raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar71xx_misc_irq_mask(unsigned int irq)
+{
+	void __iomem *base = ath79_reset_base;
+	u32 t;
+
+	irq -= ATH79_MISC_IRQ_BASE;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+	__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar724x_misc_irq_ack(unsigned int irq)
+{
+	void __iomem *base = ath79_reset_base;
+	u32 t;
+
+	irq -= ATH79_MISC_IRQ_BASE;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+	__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+}
+
+static struct irq_chip ath79_misc_irq_chip = {
+	.name		= "MISC",
+	.unmask		= ar71xx_misc_irq_unmask,
+	.mask		= ar71xx_misc_irq_mask,
+};
+
+static void __init ath79_misc_irq_init(void)
+{
+	void __iomem *base = ath79_reset_base;
+	int i;
+
+	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+	if (soc_is_ar71xx() || soc_is_ar913x())
+		ath79_misc_irq_chip.mask_ack = ar71xx_misc_irq_mask;
+	else if (soc_is_ar724x())
+		ath79_misc_irq_chip.ack = ar724x_misc_irq_ack;
+	else
+		BUG();
+
+	for (i = ATH79_MISC_IRQ_BASE;
+	     i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) {
+		irq_desc[i].status = IRQ_DISABLED;
+		set_irq_chip_and_handler(i, &ath79_misc_irq_chip,
+					 handle_level_irq);
+	}
+
+	set_irq_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+	unsigned long pending;
+
+	pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+	if (pending & STATUSF_IP7)
+		do_IRQ(ATH79_CPU_IRQ_TIMER);
+
+	else if (pending & STATUSF_IP2) {
+		ath79_ddr_wb_flush(ath79_ip2_flush_reg);
+		do_IRQ(ATH79_CPU_IRQ_IP2);
+	}
+
+	else if (pending & STATUSF_IP4)
+		do_IRQ(ATH79_CPU_IRQ_GE0);
+
+	else if (pending & STATUSF_IP5)
+		do_IRQ(ATH79_CPU_IRQ_GE1);
+
+	else if (pending & STATUSF_IP3) {
+		ath79_ddr_wb_flush(ath79_ip3_flush_reg);
+		do_IRQ(ATH79_CPU_IRQ_USB);
+	}
+
+	else if (pending & STATUSF_IP6)
+		do_IRQ(ATH79_CPU_IRQ_MISC);
+
+	else
+		spurious_interrupt();
+}
+
+void __init arch_init_irq(void)
+{
+	if (soc_is_ar71xx()) {
+		ath79_ip2_flush_reg = AR71XX_DDR_REG_FLUSH_PCI;
+		ath79_ip3_flush_reg = AR71XX_DDR_REG_FLUSH_USB;
+	} else if (soc_is_ar724x()) {
+		ath79_ip2_flush_reg = AR724X_DDR_REG_FLUSH_PCIE;
+		ath79_ip3_flush_reg = AR724X_DDR_REG_FLUSH_USB;
+	} else if (soc_is_ar913x()) {
+		ath79_ip2_flush_reg = AR913X_DDR_REG_FLUSH_WMAC;
+		ath79_ip3_flush_reg = AR913X_DDR_REG_FLUSH_USB;
+	} else
+		BUG();
+
+	cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC;
+	mips_cpu_irq_init();
+	ath79_misc_irq_init();
+}
diff --git a/arch/mips/ath79/mach-ap81.c b/arch/mips/ath79/mach-ap81.c
new file mode 100644
index 0000000..eee4c12
--- /dev/null
+++ b/arch/mips/ath79/mach-ap81.c
@@ -0,0 +1,98 @@
+/*
+ *  Atheros AP81 board support
+ *
+ *  Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include "machtypes.h"
+#include "dev-ar913x-wmac.h"
+#include "dev-gpio-buttons.h"
+#include "dev-leds-gpio.h"
+#include "dev-spi.h"
+
+#define AP81_GPIO_LED_STATUS	1
+#define AP81_GPIO_LED_AOSS	3
+#define AP81_GPIO_LED_WLAN	6
+#define AP81_GPIO_LED_POWER	14
+
+#define AP81_GPIO_BTN_SW4	12
+#define AP81_GPIO_BTN_SW1	21
+
+#define AP81_KEYS_POLL_INTERVAL		20	/* msecs */
+#define AP81_KEYS_DEBOUNCE_INTERVAL	(3 * AP81_KEYS_POLL_INTERVAL)
+
+#define AP81_CAL_DATA_ADDR	0x1fff1000
+
+static struct gpio_led ap81_leds_gpio[] __initdata = {
+	{
+		.name		= "ap81:green:status",
+		.gpio		= AP81_GPIO_LED_STATUS,
+		.active_low	= 1,
+	}, {
+		.name		= "ap81:amber:aoss",
+		.gpio		= AP81_GPIO_LED_AOSS,
+		.active_low	= 1,
+	}, {
+		.name		= "ap81:green:wlan",
+		.gpio		= AP81_GPIO_LED_WLAN,
+		.active_low	= 1,
+	}, {
+		.name		= "ap81:green:power",
+		.gpio		= AP81_GPIO_LED_POWER,
+		.active_low	= 1,
+	}
+};
+
+static struct gpio_keys_button ap81_gpio_keys[] __initdata = {
+	{
+		.desc		= "sw1",
+		.type		= EV_KEY,
+		.code		= BTN_0,
+		.debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL,
+		.gpio		= AP81_GPIO_BTN_SW1,
+		.active_low	= 1,
+	} , {
+		.desc		= "sw4",
+		.type		= EV_KEY,
+		.code		= BTN_1,
+		.debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL,
+		.gpio		= AP81_GPIO_BTN_SW4,
+		.active_low	= 1,
+	}
+};
+
+static struct spi_board_info ap81_spi_info[] = {
+	{
+		.bus_num	= 0,
+		.chip_select	= 0,
+		.max_speed_hz	= 25000000,
+		.modalias	= "m25p64",
+	}
+};
+
+static struct ath79_spi_platform_data ap81_spi_data = {
+	.bus_num	= 0,
+	.num_chipselect	= 1,
+};
+
+static void __init ap81_setup(void)
+{
+	u8 *cal_data = (u8 *) KSEG1ADDR(AP81_CAL_DATA_ADDR);
+
+	ath79_register_leds_gpio(-1, ARRAY_SIZE(ap81_leds_gpio),
+				 ap81_leds_gpio);
+	ath79_register_gpio_keys_polled(-1, AP81_KEYS_POLL_INTERVAL,
+					ARRAY_SIZE(ap81_gpio_keys),
+					ap81_gpio_keys);
+	ath79_register_spi(&ap81_spi_data, ap81_spi_info,
+			   ARRAY_SIZE(ap81_spi_info));
+	ath79_register_ar913x_wmac(cal_data);
+}
+
+MIPS_MACHINE(ATH79_MACH_AP81, "AP81", "Atheros AP81 reference board",
+	     ap81_setup);
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
new file mode 100644
index 0000000..ec7b7a1
--- /dev/null
+++ b/arch/mips/ath79/mach-pb44.c
@@ -0,0 +1,118 @@
+/*
+ *  Atheros PB44 reference board support
+ *
+ *  Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-gpio.h>
+#include <linux/i2c/pcf857x.h>
+
+#include "machtypes.h"
+#include "dev-gpio-buttons.h"
+#include "dev-leds-gpio.h"
+#include "dev-spi.h"
+
+#define PB44_GPIO_I2C_SCL	0
+#define PB44_GPIO_I2C_SDA	1
+
+#define PB44_GPIO_EXP_BASE	16
+#define PB44_GPIO_SW_RESET	(PB44_GPIO_EXP_BASE + 6)
+#define PB44_GPIO_SW_JUMP	(PB44_GPIO_EXP_BASE + 8)
+#define PB44_GPIO_LED_JUMP1	(PB44_GPIO_EXP_BASE + 9)
+#define PB44_GPIO_LED_JUMP2	(PB44_GPIO_EXP_BASE + 10)
+
+#define PB44_KEYS_POLL_INTERVAL		20	/* msecs */
+#define PB44_KEYS_DEBOUNCE_INTERVAL	(3 * PB44_KEYS_POLL_INTERVAL)
+
+static struct i2c_gpio_platform_data pb44_i2c_gpio_data = {
+	.sda_pin        = PB44_GPIO_I2C_SDA,
+	.scl_pin        = PB44_GPIO_I2C_SCL,
+};
+
+static struct platform_device pb44_i2c_gpio_device = {
+	.name		= "i2c-gpio",
+	.id		= 0,
+	.dev = {
+		.platform_data	= &pb44_i2c_gpio_data,
+	}
+};
+
+static struct pcf857x_platform_data pb44_pcf857x_data = {
+	.gpio_base	= PB44_GPIO_EXP_BASE,
+};
+
+static struct i2c_board_info pb44_i2c_board_info[] __initdata = {
+	{
+		I2C_BOARD_INFO("pcf8575", 0x20),
+		.platform_data  = &pb44_pcf857x_data,
+	},
+};
+
+static struct gpio_led pb44_leds_gpio[] __initdata = {
+	{
+		.name		= "pb44:amber:jump1",
+		.gpio		= PB44_GPIO_LED_JUMP1,
+		.active_low	= 1,
+	}, {
+		.name		= "pb44:green:jump2",
+		.gpio		= PB44_GPIO_LED_JUMP2,
+		.active_low	= 1,
+	},
+};
+
+static struct gpio_keys_button pb44_gpio_keys[] __initdata = {
+	{
+		.desc		= "soft_reset",
+		.type		= EV_KEY,
+		.code		= KEY_RESTART,
+		.debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL,
+		.gpio		= PB44_GPIO_SW_RESET,
+		.active_low	= 1,
+	} , {
+		.desc		= "jumpstart",
+		.type		= EV_KEY,
+		.code		= KEY_WPS_BUTTON,
+		.debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL,
+		.gpio		= PB44_GPIO_SW_JUMP,
+		.active_low	= 1,
+	}
+};
+
+static struct spi_board_info pb44_spi_info[] = {
+	{
+		.bus_num	= 0,
+		.chip_select	= 0,
+		.max_speed_hz	= 25000000,
+		.modalias	= "m25p64",
+	},
+};
+
+static struct ath79_spi_platform_data pb44_spi_data = {
+	.bus_num		= 0,
+	.num_chipselect		= 1,
+};
+
+static void __init pb44_init(void)
+{
+	i2c_register_board_info(0, pb44_i2c_board_info,
+				ARRAY_SIZE(pb44_i2c_board_info));
+	platform_device_register(&pb44_i2c_gpio_device);
+
+	ath79_register_leds_gpio(-1, ARRAY_SIZE(pb44_leds_gpio),
+				 pb44_leds_gpio);
+	ath79_register_gpio_keys_polled(-1, PB44_KEYS_POLL_INTERVAL,
+					ARRAY_SIZE(pb44_gpio_keys),
+					pb44_gpio_keys);
+	ath79_register_spi(&pb44_spi_data, pb44_spi_info,
+			   ARRAY_SIZE(pb44_spi_info));
+}
+
+MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board",
+	     pb44_init);
diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h
new file mode 100644
index 0000000..3940fe4
--- /dev/null
+++ b/arch/mips/ath79/machtypes.h
@@ -0,0 +1,23 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X machine type definitions
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_MACHTYPE_H
+#define _ATH79_MACHTYPE_H
+
+#include <asm/mips_machine.h>
+
+enum ath79_mach_type {
+	ATH79_MACH_GENERIC = 0,
+	ATH79_MACH_AP81,		/* Atheros AP81 reference board */
+	ATH79_MACH_PB44,		/* Atheros PB44 reference board */
+};
+
+#endif /* _ATH79_MACHTYPE_H */
diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c
new file mode 100644
index 0000000..e9cbd7c
--- /dev/null
+++ b/arch/mips/ath79/prom.c
@@ -0,0 +1,57 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X specific prom routines
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+
+#include "common.h"
+
+static inline int is_valid_ram_addr(void *addr)
+{
+	if (((u32) addr > KSEG0) &&
+	    ((u32) addr < (KSEG0 + ATH79_MEM_SIZE_MAX)))
+		return 1;
+
+	if (((u32) addr > KSEG1) &&
+	    ((u32) addr < (KSEG1 + ATH79_MEM_SIZE_MAX)))
+		return 1;
+
+	return 0;
+}
+
+static __init void ath79_prom_init_cmdline(int argc, char **argv)
+{
+	int i;
+
+	if (!is_valid_ram_addr(argv))
+		return;
+
+	for (i = 0; i < argc; i++)
+		if (is_valid_ram_addr(argv[i])) {
+			strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
+			strlcat(arcs_cmdline, argv[i], sizeof(arcs_cmdline));
+		}
+}
+
+void __init prom_init(void)
+{
+	ath79_prom_init_cmdline(fw_arg0, (char **)fw_arg1);
+}
+
+void __init prom_free_prom_memory(void)
+{
+	/* We do not have to prom memory to free */
+}
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
new file mode 100644
index 0000000..159b42f
--- /dev/null
+++ b/arch/mips/ath79/setup.c
@@ -0,0 +1,206 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X specific setup
+ *
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+
+#include <asm/bootinfo.h>
+#include <asm/time.h>		/* for mips_hpt_frequency */
+#include <asm/reboot.h>		/* for _machine_{restart,halt} */
+#include <asm/mips_machine.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "common.h"
+#include "dev-common.h"
+#include "machtypes.h"
+
+#define ATH79_SYS_TYPE_LEN	64
+
+#define AR71XX_BASE_FREQ	40000000
+#define AR724X_BASE_FREQ	5000000
+#define AR913X_BASE_FREQ	5000000
+
+static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
+
+static void ath79_restart(char *command)
+{
+	ath79_device_reset_set(AR71XX_RESET_FULL_CHIP);
+	for (;;)
+		if (cpu_wait)
+			cpu_wait();
+}
+
+static void ath79_halt(void)
+{
+	while (1)
+		cpu_wait();
+}
+
+static void __init ath79_detect_mem_size(void)
+{
+	unsigned long size;
+
+	for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX;
+	     size <<= 1) {
+		if (!memcmp(ath79_detect_mem_size,
+			    ath79_detect_mem_size + size, 1024))
+			break;
+	}
+
+	add_memory_region(0, size, BOOT_MEM_RAM);
+}
+
+static void __init ath79_detect_sys_type(void)
+{
+	char *chip = "????";
+	u32 id;
+	u32 major;
+	u32 minor;
+	u32 rev = 0;
+
+	id = ath79_reset_rr(AR71XX_RESET_REG_REV_ID);
+	major = id & REV_ID_MAJOR_MASK;
+
+	switch (major) {
+	case REV_ID_MAJOR_AR71XX:
+		minor = id & AR71XX_REV_ID_MINOR_MASK;
+		rev = id >> AR71XX_REV_ID_REVISION_SHIFT;
+		rev &= AR71XX_REV_ID_REVISION_MASK;
+		switch (minor) {
+		case AR71XX_REV_ID_MINOR_AR7130:
+			ath79_soc = ATH79_SOC_AR7130;
+			chip = "7130";
+			break;
+
+		case AR71XX_REV_ID_MINOR_AR7141:
+			ath79_soc = ATH79_SOC_AR7141;
+			chip = "7141";
+			break;
+
+		case AR71XX_REV_ID_MINOR_AR7161:
+			ath79_soc = ATH79_SOC_AR7161;
+			chip = "7161";
+			break;
+		}
+		break;
+
+	case REV_ID_MAJOR_AR7240:
+		ath79_soc = ATH79_SOC_AR7240;
+		chip = "7240";
+		rev = (id & AR724X_REV_ID_REVISION_MASK);
+		break;
+
+	case REV_ID_MAJOR_AR7241:
+		ath79_soc = ATH79_SOC_AR7241;
+		chip = "7241";
+		rev = (id & AR724X_REV_ID_REVISION_MASK);
+		break;
+
+	case REV_ID_MAJOR_AR7242:
+		ath79_soc = ATH79_SOC_AR7242;
+		chip = "7242";
+		rev = (id & AR724X_REV_ID_REVISION_MASK);
+		break;
+
+	case REV_ID_MAJOR_AR913X:
+		minor = id & AR913X_REV_ID_MINOR_MASK;
+		rev = id >> AR913X_REV_ID_REVISION_SHIFT;
+		rev &= AR913X_REV_ID_REVISION_MASK;
+		switch (minor) {
+		case AR913X_REV_ID_MINOR_AR9130:
+			ath79_soc = ATH79_SOC_AR9130;
+			chip = "9130";
+			break;
+
+		case AR913X_REV_ID_MINOR_AR9132:
+			ath79_soc = ATH79_SOC_AR9132;
+			chip = "9132";
+			break;
+		}
+		break;
+
+	default:
+		panic("ath79: unknown SoC, id:0x%08x\n", id);
+	}
+
+	sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
+	pr_info("SoC: %s\n", ath79_sys_type);
+}
+
+const char *get_system_type(void)
+{
+	return ath79_sys_type;
+}
+
+unsigned int __cpuinit get_c0_compare_int(void)
+{
+	return CP0_LEGACY_COMPARE_IRQ;
+}
+
+void __init plat_mem_setup(void)
+{
+	set_io_port_base(KSEG1);
+
+	ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
+					   AR71XX_RESET_SIZE);
+	ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
+					 AR71XX_PLL_SIZE);
+	ath79_ddr_base = ioremap_nocache(AR71XX_DDR_CTRL_BASE,
+					 AR71XX_DDR_CTRL_SIZE);
+
+	ath79_detect_sys_type();
+	ath79_detect_mem_size();
+	ath79_clocks_init();
+
+	_machine_restart = ath79_restart;
+	_machine_halt = ath79_halt;
+	pm_power_off = ath79_halt;
+}
+
+void __init plat_time_init(void)
+{
+	struct clk *clk;
+
+	clk = clk_get(NULL, "cpu");
+	if (IS_ERR(clk))
+		panic("unable to get CPU clock, err=%ld", PTR_ERR(clk));
+
+	mips_hpt_frequency = clk_get_rate(clk) / 2;
+}
+
+static int __init ath79_setup(void)
+{
+	ath79_gpio_init();
+	ath79_register_uart();
+	ath79_register_wdt();
+
+	mips_machine_setup();
+
+	return 0;
+}
+
+arch_initcall(ath79_setup);
+
+static void __init ath79_generic_init(void)
+{
+	/* Nothing to do */
+}
+
+MIPS_MACHINE(ATH79_MACH_GENERIC,
+	     "Generic",
+	     "Generic AR71XX/AR724X/AR913X based board",
+	     ath79_generic_init);
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index c78c7e7..6cd5a51 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -14,7 +14,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 927d58b..22fdf2f 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -21,7 +21,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index b806a4e..9190051 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -10,7 +10,7 @@
 # CONFIG_SWAP is not set
 CONFIG_TINY_RCU=y
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 9749bc8..1cdff6b 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -26,7 +26,7 @@
 CONFIG_NET_NS=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/capcella_defconfig b/arch/mips/configs/capcella_defconfig
index 502a8e9..5135dc0 100644
--- a/arch/mips/configs/capcella_defconfig
+++ b/arch/mips/configs/capcella_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/cavium-octeon_defconfig b/arch/mips/configs/cavium-octeon_defconfig
index 3567b6f..75165df 100644
--- a/arch/mips/configs/cavium-octeon_defconfig
+++ b/arch/mips/configs/cavium-octeon_defconfig
@@ -15,7 +15,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
index 6c4f7e9..5419adb 100644
--- a/arch/mips/configs/cobalt_defconfig
+++ b/arch/mips/configs/cobalt_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/mips/configs/db1000_defconfig b/arch/mips/configs/db1000_defconfig
index dda158b..4044c9e 100644
--- a/arch/mips/configs/db1000_defconfig
+++ b/arch/mips/configs/db1000_defconfig
@@ -11,7 +11,7 @@
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/db1100_defconfig b/arch/mips/configs/db1100_defconfig
index 7e4fc76..c6b4993 100644
--- a/arch/mips/configs/db1100_defconfig
+++ b/arch/mips/configs/db1100_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/db1200_defconfig b/arch/mips/configs/db1200_defconfig
index 6fe205f..1f69249 100644
--- a/arch/mips/configs/db1200_defconfig
+++ b/arch/mips/configs/db1200_defconfig
@@ -12,7 +12,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/db1500_defconfig b/arch/mips/configs/db1500_defconfig
index a741c554..b6e21c7 100644
--- a/arch/mips/configs/db1500_defconfig
+++ b/arch/mips/configs/db1500_defconfig
@@ -10,7 +10,7 @@
 CONFIG_KERNEL_LZMA=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/db1550_defconfig b/arch/mips/configs/db1550_defconfig
index cd32dd8..798a553 100644
--- a/arch/mips/configs/db1550_defconfig
+++ b/arch/mips/configs/db1550_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index b15bfd1..87d0340 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/e55_defconfig b/arch/mips/configs/e55_defconfig
index 0b60c06..0126e66 100644
--- a/arch/mips/configs/e55_defconfig
+++ b/arch/mips/configs/e55_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 63944a1..e5b73de 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -17,7 +17,7 @@
 CONFIG_USER_NS=y
 CONFIG_PID_NS=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 53edc13..48a40ae 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 36de199..d160656 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -17,7 +17,7 @@
 CONFIG_USER_NS=y
 CONFIG_PID_NS=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 4b16c48..0e36abc 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -15,7 +15,7 @@
 CONFIG_CPUSETS=y
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
index 98f2c77..4dbf626 100644
--- a/arch/mips/configs/ip28_defconfig
+++ b/arch/mips/configs/ip28_defconfig
@@ -8,7 +8,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 5bea99b..7bbd521 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_RELAY=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 6ae46bc..92a60ae 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig
index bf24e93..db5705e 100644
--- a/arch/mips/configs/jmr3927_defconfig
+++ b/arch/mips/configs/jmr3927_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/lasat_defconfig b/arch/mips/configs/lasat_defconfig
index 6447261..d9f3db2 100644
--- a/arch/mips/configs/lasat_defconfig
+++ b/arch/mips/configs/lasat_defconfig
@@ -8,7 +8,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index f7033f3..167c1d0 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -21,7 +21,7 @@
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index b455d0f..7270f31 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -15,7 +15,7 @@
 CONFIG_IPC_NS=y
 CONFIG_PID_NS=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
@@ -369,7 +369,10 @@
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 # CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
 # CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_HID=m
 CONFIG_LEDS_CLASS=m
 CONFIG_LEDS_TRIGGER_TIMER=m
diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig
index 86bf001..9c9a123 100644
--- a/arch/mips/configs/markeins_defconfig
+++ b/arch/mips/configs/markeins_defconfig
@@ -9,7 +9,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/mipssim_defconfig b/arch/mips/configs/mipssim_defconfig
index 4925f50..b5ad738 100644
--- a/arch/mips/configs/mipssim_defconfig
+++ b/arch/mips/configs/mipssim_defconfig
@@ -7,7 +7,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig
index efb779f..c16de98 100644
--- a/arch/mips/configs/mpc30x_defconfig
+++ b/arch/mips/configs/mpc30x_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/msp71xx_defconfig b/arch/mips/configs/msp71xx_defconfig
index ab05145..d1142e9 100644
--- a/arch/mips/configs/msp71xx_defconfig
+++ b/arch/mips/configs/msp71xx_defconfig
@@ -8,7 +8,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SHMEM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 81469975..a97a42c6 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
diff --git a/arch/mips/configs/pb1100_defconfig b/arch/mips/configs/pb1100_defconfig
index 1597aa1..75eb1b1 100644
--- a/arch/mips/configs/pb1100_defconfig
+++ b/arch/mips/configs/pb1100_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pb1200_defconfig b/arch/mips/configs/pb1200_defconfig
index 96f0d43..dcbe270 100644
--- a/arch/mips/configs/pb1200_defconfig
+++ b/arch/mips/configs/pb1200_defconfig
@@ -12,7 +12,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pb1500_defconfig b/arch/mips/configs/pb1500_defconfig
index b4bfd48..fa00487 100644
--- a/arch/mips/configs/pb1500_defconfig
+++ b/arch/mips/configs/pb1500_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pb1550_defconfig b/arch/mips/configs/pb1550_defconfig
index 5a66002..e83d649 100644
--- a/arch/mips/configs/pb1550_defconfig
+++ b/arch/mips/configs/pb1550_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pnx8335-stb225_defconfig b/arch/mips/configs/pnx8335-stb225_defconfig
index 39926a1..f292576 100644
--- a/arch/mips/configs/pnx8335-stb225_defconfig
+++ b/arch/mips/configs/pnx8335-stb225_defconfig
@@ -11,7 +11,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/pnx8550-jbs_defconfig b/arch/mips/configs/pnx8550-jbs_defconfig
index 3376bc8..1d1f206 100644
--- a/arch/mips/configs/pnx8550-jbs_defconfig
+++ b/arch/mips/configs/pnx8550-jbs_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/pnx8550-stb810_defconfig b/arch/mips/configs/pnx8550-stb810_defconfig
index 6514f1b..15c66a5 100644
--- a/arch/mips/configs/pnx8550-stb810_defconfig
+++ b/arch/mips/configs/pnx8550-stb810_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/powertv_defconfig b/arch/mips/configs/powertv_defconfig
index f1f58e91..3b0b6e8 100644
--- a/arch/mips/configs/powertv_defconfig
+++ b/arch/mips/configs/powertv_defconfig
@@ -14,7 +14,7 @@
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_GZIP is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index d6457bc..55902d9 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -13,7 +13,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 29acfab..9cba856 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -12,7 +12,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 2b3e476..2c0230e 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -12,7 +12,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/sb1250-swarm_defconfig b/arch/mips/configs/sb1250-swarm_defconfig
index 64840d7..5b0463e 100644
--- a/arch/mips/configs/sb1250-swarm_defconfig
+++ b/arch/mips/configs/sb1250-swarm_defconfig
@@ -15,7 +15,7 @@
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig
index d9be37f..30036b4 100644
--- a/arch/mips/configs/tb0219_defconfig
+++ b/arch/mips/configs/tb0219_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig
index 3d25dd0..81bfa1d 100644
--- a/arch/mips/configs/tb0226_defconfig
+++ b/arch/mips/configs/tb0226_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig
index be697c9..c415c4f 100644
--- a/arch/mips/configs/tb0287_defconfig
+++ b/arch/mips/configs/tb0287_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig
index 7ec9287..ee4b2be 100644
--- a/arch/mips/configs/workpad_defconfig
+++ b/arch/mips/configs/workpad_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/wrppmc_defconfig b/arch/mips/configs/wrppmc_defconfig
index a231b73..44a451b 100644
--- a/arch/mips/configs/wrppmc_defconfig
+++ b/arch/mips/configs/wrppmc_defconfig
@@ -7,7 +7,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_EPOLL is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/yosemite_defconfig b/arch/mips/configs/yosemite_defconfig
index ab3a3dc..f72d305 100644
--- a/arch/mips/configs/yosemite_defconfig
+++ b/arch/mips/configs/yosemite_defconfig
@@ -8,7 +8,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
index 02f505f..ea57f39 100644
--- a/arch/mips/dec/time.c
+++ b/arch/mips/dec/time.c
@@ -104,7 +104,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index 37f175c..650ac9b 100644
--- a/arch/mips/include/asm/cache.h
+++ b/arch/mips/include/asm/cache.h
@@ -17,4 +17,6 @@
 #define SMP_CACHE_SHIFT		L1_CACHE_SHIFT
 #define SMP_CACHE_BYTES		L1_CACHE_BYTES
 
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
 #endif /* _ASM_CACHE_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index b39def3..c454550 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -78,6 +78,7 @@
 	unsigned int		watch_reg_use_cnt; /* Usable by ptrace */
 #define NUM_WATCH_REGS 4
 	u16			watch_reg_masks[NUM_WATCH_REGS];
+	unsigned int		kscratch_mask; /* Usable KScratch mask. */
 } __attribute__((aligned(SMP_CACHE_BYTES)));
 
 extern struct cpuinfo_mips cpu_data[];
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index 444ff71..7ebfc39 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -72,6 +72,7 @@
 enum spec3_op {
 	ext_op, dextm_op, dextu_op, dext_op,
 	ins_op, dinsm_op, dinsu_op, dins_op,
+	lx_op = 0x0a,
 	bshfl_op = 0x20,
 	dbshfl_op = 0x24,
 	rdhwr_op = 0x3b
@@ -179,6 +180,19 @@
 };
 
 /*
+ * func field for special3 lx opcodes (Cavium Octeon).
+ */
+enum lx_func {
+	lwx_op	= 0x00,
+	lhx_op	= 0x04,
+	lbux_op	= 0x06,
+	ldx_op	= 0x08,
+	lwux_op	= 0x10,
+	lhux_op	= 0x14,
+	lbx_op	= 0x16,
+};
+
+/*
  * Damn ...  bitfields depend from byteorder :-(
  */
 #ifdef __MIPSEB__
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
new file mode 100644
index 0000000..7622ccf
--- /dev/null
+++ b/arch/mips/include/asm/jump_label.h
@@ -0,0 +1,48 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2010 Cavium Networks, Inc.
+ */
+#ifndef _ASM_MIPS_JUMP_LABEL_H
+#define _ASM_MIPS_JUMP_LABEL_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+#ifdef CONFIG_64BIT
+#define WORD_INSN ".dword"
+#else
+#define WORD_INSN ".word"
+#endif
+
+#define JUMP_LABEL(key, label)						\
+	do {								\
+		asm goto("1:\tnop\n\t"					\
+			"nop\n\t"					\
+			".pushsection __jump_table,  \"a\"\n\t"		\
+			WORD_INSN " 1b, %l[" #label "], %0\n\t"		\
+			".popsection\n\t"				\
+			: :  "i" (key) :  : label);			\
+	} while (0)
+
+
+#endif /* __KERNEL__ */
+
+#ifdef CONFIG_64BIT
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+	jump_label_t code;
+	jump_label_t target;
+	jump_label_t key;
+};
+
+#endif /* _ASM_MIPS_JUMP_LABEL_H */
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
new file mode 100644
index 0000000..cda1c80
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -0,0 +1,233 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X SoC register definitions
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_AR71XX_REGS_H
+#define __ASM_MACH_AR71XX_REGS_H
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#define AR71XX_APB_BASE		0x18000000
+#define AR71XX_SPI_BASE		0x1f000000
+#define AR71XX_SPI_SIZE		0x01000000
+
+#define AR71XX_DDR_CTRL_BASE	(AR71XX_APB_BASE + 0x00000000)
+#define AR71XX_DDR_CTRL_SIZE	0x100
+#define AR71XX_UART_BASE	(AR71XX_APB_BASE + 0x00020000)
+#define AR71XX_UART_SIZE	0x100
+#define AR71XX_GPIO_BASE        (AR71XX_APB_BASE + 0x00040000)
+#define AR71XX_GPIO_SIZE        0x100
+#define AR71XX_PLL_BASE		(AR71XX_APB_BASE + 0x00050000)
+#define AR71XX_PLL_SIZE		0x100
+#define AR71XX_RESET_BASE	(AR71XX_APB_BASE + 0x00060000)
+#define AR71XX_RESET_SIZE	0x100
+
+#define AR913X_WMAC_BASE	(AR71XX_APB_BASE + 0x000C0000)
+#define AR913X_WMAC_SIZE	0x30000
+
+/*
+ * DDR_CTRL block
+ */
+#define AR71XX_DDR_REG_PCI_WIN0		0x7c
+#define AR71XX_DDR_REG_PCI_WIN1		0x80
+#define AR71XX_DDR_REG_PCI_WIN2		0x84
+#define AR71XX_DDR_REG_PCI_WIN3		0x88
+#define AR71XX_DDR_REG_PCI_WIN4		0x8c
+#define AR71XX_DDR_REG_PCI_WIN5		0x90
+#define AR71XX_DDR_REG_PCI_WIN6		0x94
+#define AR71XX_DDR_REG_PCI_WIN7		0x98
+#define AR71XX_DDR_REG_FLUSH_GE0	0x9c
+#define AR71XX_DDR_REG_FLUSH_GE1	0xa0
+#define AR71XX_DDR_REG_FLUSH_USB	0xa4
+#define AR71XX_DDR_REG_FLUSH_PCI	0xa8
+
+#define AR724X_DDR_REG_FLUSH_GE0	0x7c
+#define AR724X_DDR_REG_FLUSH_GE1	0x80
+#define AR724X_DDR_REG_FLUSH_USB	0x84
+#define AR724X_DDR_REG_FLUSH_PCIE	0x88
+
+#define AR913X_DDR_REG_FLUSH_GE0	0x7c
+#define AR913X_DDR_REG_FLUSH_GE1	0x80
+#define AR913X_DDR_REG_FLUSH_USB	0x84
+#define AR913X_DDR_REG_FLUSH_WMAC	0x88
+
+/*
+ * PLL block
+ */
+#define AR71XX_PLL_REG_CPU_CONFIG	0x00
+#define AR71XX_PLL_REG_SEC_CONFIG	0x04
+#define AR71XX_PLL_REG_ETH0_INT_CLOCK	0x10
+#define AR71XX_PLL_REG_ETH1_INT_CLOCK	0x14
+
+#define AR71XX_PLL_DIV_SHIFT		3
+#define AR71XX_PLL_DIV_MASK		0x1f
+#define AR71XX_CPU_DIV_SHIFT		16
+#define AR71XX_CPU_DIV_MASK		0x3
+#define AR71XX_DDR_DIV_SHIFT		18
+#define AR71XX_DDR_DIV_MASK		0x3
+#define AR71XX_AHB_DIV_SHIFT		20
+#define AR71XX_AHB_DIV_MASK		0x7
+
+#define AR724X_PLL_REG_CPU_CONFIG	0x00
+#define AR724X_PLL_REG_PCIE_CONFIG	0x18
+
+#define AR724X_PLL_DIV_SHIFT		0
+#define AR724X_PLL_DIV_MASK		0x3ff
+#define AR724X_PLL_REF_DIV_SHIFT	10
+#define AR724X_PLL_REF_DIV_MASK		0xf
+#define AR724X_AHB_DIV_SHIFT		19
+#define AR724X_AHB_DIV_MASK		0x1
+#define AR724X_DDR_DIV_SHIFT		22
+#define AR724X_DDR_DIV_MASK		0x3
+
+#define AR913X_PLL_REG_CPU_CONFIG	0x00
+#define AR913X_PLL_REG_ETH_CONFIG	0x04
+#define AR913X_PLL_REG_ETH0_INT_CLOCK	0x14
+#define AR913X_PLL_REG_ETH1_INT_CLOCK	0x18
+
+#define AR913X_PLL_DIV_SHIFT		0
+#define AR913X_PLL_DIV_MASK		0x3ff
+#define AR913X_DDR_DIV_SHIFT		22
+#define AR913X_DDR_DIV_MASK		0x3
+#define AR913X_AHB_DIV_SHIFT		19
+#define AR913X_AHB_DIV_MASK		0x1
+
+/*
+ * RESET block
+ */
+#define AR71XX_RESET_REG_TIMER			0x00
+#define AR71XX_RESET_REG_TIMER_RELOAD		0x04
+#define AR71XX_RESET_REG_WDOG_CTRL		0x08
+#define AR71XX_RESET_REG_WDOG			0x0c
+#define AR71XX_RESET_REG_MISC_INT_STATUS	0x10
+#define AR71XX_RESET_REG_MISC_INT_ENABLE	0x14
+#define AR71XX_RESET_REG_PCI_INT_STATUS		0x18
+#define AR71XX_RESET_REG_PCI_INT_ENABLE		0x1c
+#define AR71XX_RESET_REG_GLOBAL_INT_STATUS	0x20
+#define AR71XX_RESET_REG_RESET_MODULE		0x24
+#define AR71XX_RESET_REG_PERFC_CTRL		0x2c
+#define AR71XX_RESET_REG_PERFC0			0x30
+#define AR71XX_RESET_REG_PERFC1			0x34
+#define AR71XX_RESET_REG_REV_ID			0x90
+
+#define AR913X_RESET_REG_GLOBAL_INT_STATUS	0x18
+#define AR913X_RESET_REG_RESET_MODULE		0x1c
+#define AR913X_RESET_REG_PERF_CTRL		0x20
+#define AR913X_RESET_REG_PERFC0			0x24
+#define AR913X_RESET_REG_PERFC1			0x28
+
+#define AR724X_RESET_REG_RESET_MODULE		0x1c
+
+#define MISC_INT_DMA			BIT(7)
+#define MISC_INT_OHCI			BIT(6)
+#define MISC_INT_PERFC			BIT(5)
+#define MISC_INT_WDOG			BIT(4)
+#define MISC_INT_UART			BIT(3)
+#define MISC_INT_GPIO			BIT(2)
+#define MISC_INT_ERROR			BIT(1)
+#define MISC_INT_TIMER			BIT(0)
+
+#define AR71XX_RESET_EXTERNAL		BIT(28)
+#define AR71XX_RESET_FULL_CHIP		BIT(24)
+#define AR71XX_RESET_CPU_NMI		BIT(21)
+#define AR71XX_RESET_CPU_COLD		BIT(20)
+#define AR71XX_RESET_DMA		BIT(19)
+#define AR71XX_RESET_SLIC		BIT(18)
+#define AR71XX_RESET_STEREO		BIT(17)
+#define AR71XX_RESET_DDR		BIT(16)
+#define AR71XX_RESET_GE1_MAC		BIT(13)
+#define AR71XX_RESET_GE1_PHY		BIT(12)
+#define AR71XX_RESET_USBSUS_OVERRIDE	BIT(10)
+#define AR71XX_RESET_GE0_MAC		BIT(9)
+#define AR71XX_RESET_GE0_PHY		BIT(8)
+#define AR71XX_RESET_USB_OHCI_DLL	BIT(6)
+#define AR71XX_RESET_USB_HOST		BIT(5)
+#define AR71XX_RESET_USB_PHY		BIT(4)
+#define AR71XX_RESET_PCI_BUS		BIT(1)
+#define AR71XX_RESET_PCI_CORE		BIT(0)
+
+#define AR724X_RESET_GE1_MDIO		BIT(23)
+#define AR724X_RESET_GE0_MDIO		BIT(22)
+#define AR724X_RESET_PCIE_PHY_SERIAL	BIT(10)
+#define AR724X_RESET_PCIE_PHY		BIT(7)
+#define AR724X_RESET_PCIE		BIT(6)
+#define AR724X_RESET_OHCI_DLL		BIT(3)
+
+#define AR913X_RESET_AMBA2WMAC		BIT(22)
+
+#define REV_ID_MAJOR_MASK		0xfff0
+#define REV_ID_MAJOR_AR71XX		0x00a0
+#define REV_ID_MAJOR_AR913X		0x00b0
+#define REV_ID_MAJOR_AR7240		0x00c0
+#define REV_ID_MAJOR_AR7241		0x0100
+#define REV_ID_MAJOR_AR7242		0x1100
+
+#define AR71XX_REV_ID_MINOR_MASK	0x3
+#define AR71XX_REV_ID_MINOR_AR7130	0x0
+#define AR71XX_REV_ID_MINOR_AR7141	0x1
+#define AR71XX_REV_ID_MINOR_AR7161	0x2
+#define AR71XX_REV_ID_REVISION_MASK	0x3
+#define AR71XX_REV_ID_REVISION_SHIFT	2
+
+#define AR913X_REV_ID_MINOR_MASK	0x3
+#define AR913X_REV_ID_MINOR_AR9130	0x0
+#define AR913X_REV_ID_MINOR_AR9132	0x1
+#define AR913X_REV_ID_REVISION_MASK	0x3
+#define AR913X_REV_ID_REVISION_SHIFT	2
+
+#define AR724X_REV_ID_REVISION_MASK	0x3
+
+/*
+ * SPI block
+ */
+#define AR71XX_SPI_REG_FS	0x00	/* Function Select */
+#define AR71XX_SPI_REG_CTRL	0x04	/* SPI Control */
+#define AR71XX_SPI_REG_IOC	0x08	/* SPI I/O Control */
+#define AR71XX_SPI_REG_RDS	0x0c	/* Read Data Shift */
+
+#define AR71XX_SPI_FS_GPIO	BIT(0)	/* Enable GPIO mode */
+
+#define AR71XX_SPI_CTRL_RD	BIT(6)	/* Remap Disable */
+#define AR71XX_SPI_CTRL_DIV_MASK 0x3f
+
+#define AR71XX_SPI_IOC_DO	BIT(0)	/* Data Out pin */
+#define AR71XX_SPI_IOC_CLK	BIT(8)	/* CLK pin */
+#define AR71XX_SPI_IOC_CS(n)	BIT(16 + (n))
+#define AR71XX_SPI_IOC_CS0	AR71XX_SPI_IOC_CS(0)
+#define AR71XX_SPI_IOC_CS1	AR71XX_SPI_IOC_CS(1)
+#define AR71XX_SPI_IOC_CS2	AR71XX_SPI_IOC_CS(2)
+#define AR71XX_SPI_IOC_CS_ALL	(AR71XX_SPI_IOC_CS0 | AR71XX_SPI_IOC_CS1 | \
+				 AR71XX_SPI_IOC_CS2)
+
+/*
+ * GPIO block
+ */
+#define AR71XX_GPIO_REG_OE		0x00
+#define AR71XX_GPIO_REG_IN		0x04
+#define AR71XX_GPIO_REG_OUT		0x08
+#define AR71XX_GPIO_REG_SET		0x0c
+#define AR71XX_GPIO_REG_CLEAR		0x10
+#define AR71XX_GPIO_REG_INT_MODE	0x14
+#define AR71XX_GPIO_REG_INT_TYPE	0x18
+#define AR71XX_GPIO_REG_INT_POLARITY	0x1c
+#define AR71XX_GPIO_REG_INT_PENDING	0x20
+#define AR71XX_GPIO_REG_INT_ENABLE	0x24
+#define AR71XX_GPIO_REG_FUNC		0x28
+
+#define AR71XX_GPIO_COUNT		16
+#define AR724X_GPIO_COUNT		18
+#define AR913X_GPIO_COUNT		22
+
+#endif /* __ASM_MACH_AR71XX_REGS_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
new file mode 100644
index 0000000..6a9f168
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -0,0 +1,96 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X common definitions
+ *
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_ATH79_H
+#define __ASM_MACH_ATH79_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+enum ath79_soc_type {
+	ATH79_SOC_UNKNOWN,
+	ATH79_SOC_AR7130,
+	ATH79_SOC_AR7141,
+	ATH79_SOC_AR7161,
+	ATH79_SOC_AR7240,
+	ATH79_SOC_AR7241,
+	ATH79_SOC_AR7242,
+	ATH79_SOC_AR9130,
+	ATH79_SOC_AR9132
+};
+
+extern enum ath79_soc_type ath79_soc;
+
+static inline int soc_is_ar71xx(void)
+{
+	return (ath79_soc == ATH79_SOC_AR7130 ||
+		ath79_soc == ATH79_SOC_AR7141 ||
+		ath79_soc == ATH79_SOC_AR7161);
+}
+
+static inline int soc_is_ar724x(void)
+{
+	return (ath79_soc == ATH79_SOC_AR7240 ||
+		ath79_soc == ATH79_SOC_AR7241 ||
+		ath79_soc == ATH79_SOC_AR7242);
+}
+
+static inline int soc_is_ar7240(void)
+{
+	return (ath79_soc == ATH79_SOC_AR7240);
+}
+
+static inline int soc_is_ar7241(void)
+{
+	return (ath79_soc == ATH79_SOC_AR7241);
+}
+
+static inline int soc_is_ar7242(void)
+{
+	return (ath79_soc == ATH79_SOC_AR7242);
+}
+
+static inline int soc_is_ar913x(void)
+{
+	return (ath79_soc == ATH79_SOC_AR9130 ||
+		ath79_soc == ATH79_SOC_AR9132);
+}
+
+extern void __iomem *ath79_ddr_base;
+extern void __iomem *ath79_pll_base;
+extern void __iomem *ath79_reset_base;
+
+static inline void ath79_pll_wr(unsigned reg, u32 val)
+{
+	__raw_writel(val, ath79_pll_base + reg);
+}
+
+static inline u32 ath79_pll_rr(unsigned reg)
+{
+	return __raw_readl(ath79_pll_base + reg);
+}
+
+static inline void ath79_reset_wr(unsigned reg, u32 val)
+{
+	__raw_writel(val, ath79_reset_base + reg);
+}
+
+static inline u32 ath79_reset_rr(unsigned reg)
+{
+	return __raw_readl(ath79_reset_base + reg);
+}
+
+void ath79_device_reset_set(u32 mask);
+void ath79_device_reset_clear(u32 mask);
+
+#endif /* __ASM_MACH_ATH79_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h b/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h
new file mode 100644
index 0000000..aa2283e
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h
@@ -0,0 +1,23 @@
+/*
+ *  Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_SPI_PLATFORM_H
+#define _ATH79_SPI_PLATFORM_H
+
+struct ath79_spi_platform_data {
+	unsigned	bus_num;
+	unsigned	num_chipselect;
+};
+
+struct ath79_spi_controller_data {
+	unsigned	gpio;
+};
+
+#endif /* _ATH79_SPI_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
new file mode 100644
index 0000000..4476fa0
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X specific CPU feature overrides
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This file was derived from: include/asm-mips/cpu-features.h
+ *	Copyright (C) 2003, 2004 Ralf Baechle
+ *	Copyright (C) 2004 Maciej W. Rozycki
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ */
+#ifndef __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb		1
+#define cpu_has_4kex		1
+#define cpu_has_3k_cache	0
+#define cpu_has_4k_cache	1
+#define cpu_has_tx39_cache	0
+#define cpu_has_sb1_cache	0
+#define cpu_has_fpu		0
+#define cpu_has_32fpr		0
+#define cpu_has_counter		1
+#define cpu_has_watch		1
+#define cpu_has_divec		1
+
+#define cpu_has_prefetch	1
+#define cpu_has_ejtag		1
+#define cpu_has_llsc		1
+
+#define cpu_has_mips16		1
+#define cpu_has_mdmx		0
+#define cpu_has_mips3d		0
+#define cpu_has_smartmips	0
+
+#define cpu_has_mips32r1	1
+#define cpu_has_mips32r2	1
+#define cpu_has_mips64r1	0
+#define cpu_has_mips64r2	0
+
+#define cpu_has_dsp		0
+#define cpu_has_mipsmt		0
+
+#define cpu_has_64bits		0
+#define cpu_has_64bit_zero_reg	0
+#define cpu_has_64bit_gp_regs	0
+#define cpu_has_64bit_addresses	0
+
+#define cpu_dcache_line_size()	32
+#define cpu_icache_line_size()	32
+
+#endif /* __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ath79/gpio.h b/arch/mips/include/asm/mach-ath79/gpio.h
new file mode 100644
index 0000000..60dcb62
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/gpio.h
@@ -0,0 +1,26 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X GPIO API definitions
+ *
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_MACH_ATH79_GPIO_H
+#define __ASM_MACH_ATH79_GPIO_H
+
+#define ARCH_NR_GPIOS	64
+#include <asm-generic/gpio.h>
+
+int gpio_to_irq(unsigned gpio);
+int irq_to_gpio(unsigned irq);
+int gpio_get_value(unsigned gpio);
+void gpio_set_value(unsigned gpio, int value);
+
+#define gpio_cansleep	__gpio_cansleep
+
+#endif /* __ASM_MACH_ATH79_GPIO_H */
diff --git a/arch/mips/include/asm/mach-ath79/irq.h b/arch/mips/include/asm/mach-ath79/irq.h
new file mode 100644
index 0000000..189bc6e
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/irq.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+#ifndef __ASM_MACH_ATH79_IRQ_H
+#define __ASM_MACH_ATH79_IRQ_H
+
+#define MIPS_CPU_IRQ_BASE	0
+#define NR_IRQS			16
+
+#define ATH79_MISC_IRQ_BASE	8
+#define ATH79_MISC_IRQ_COUNT	8
+
+#define ATH79_CPU_IRQ_IP2	(MIPS_CPU_IRQ_BASE + 2)
+#define ATH79_CPU_IRQ_USB	(MIPS_CPU_IRQ_BASE + 3)
+#define ATH79_CPU_IRQ_GE0	(MIPS_CPU_IRQ_BASE + 4)
+#define ATH79_CPU_IRQ_GE1	(MIPS_CPU_IRQ_BASE + 5)
+#define ATH79_CPU_IRQ_MISC	(MIPS_CPU_IRQ_BASE + 6)
+#define ATH79_CPU_IRQ_TIMER	(MIPS_CPU_IRQ_BASE + 7)
+
+#define ATH79_MISC_IRQ_TIMER	(ATH79_MISC_IRQ_BASE + 0)
+#define ATH79_MISC_IRQ_ERROR	(ATH79_MISC_IRQ_BASE + 1)
+#define ATH79_MISC_IRQ_GPIO	(ATH79_MISC_IRQ_BASE + 2)
+#define ATH79_MISC_IRQ_UART	(ATH79_MISC_IRQ_BASE + 3)
+#define ATH79_MISC_IRQ_WDOG	(ATH79_MISC_IRQ_BASE + 4)
+#define ATH79_MISC_IRQ_PERFC	(ATH79_MISC_IRQ_BASE + 5)
+#define ATH79_MISC_IRQ_OHCI	(ATH79_MISC_IRQ_BASE + 6)
+#define ATH79_MISC_IRQ_DMA	(ATH79_MISC_IRQ_BASE + 7)
+
+#include_next <irq.h>
+
+#endif /* __ASM_MACH_ATH79_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ath79/kernel-entry-init.h b/arch/mips/include/asm/mach-ath79/kernel-entry-init.h
new file mode 100644
index 0000000..d8d046b
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/kernel-entry-init.h
@@ -0,0 +1,32 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X specific kernel entry setup
+ *
+ *  Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ */
+#ifndef __ASM_MACH_ATH79_KERNEL_ENTRY_H
+#define __ASM_MACH_ATH79_KERNEL_ENTRY_H
+
+	/*
+	 * Some bootloaders set the 'Kseg0 coherency algorithm' to
+	 * 'Cacheable, noncoherent, write-through, no write allocate'
+	 * and this cause performance issues. Let's go and change it to
+	 * 'Cacheable, noncoherent, write-back, write allocate'
+	 */
+	.macro	kernel_entry_setup
+	mfc0	t0, CP0_CONFIG
+	li	t1, ~CONF_CM_CMASK
+	and	t0, t1
+	ori	t0, CONF_CM_CACHABLE_NONCOHERENT
+	mtc0	t0, CP0_CONFIG
+	nop
+	.endm
+
+	.macro	smp_slave_setup
+	.endm
+
+#endif /* __ASM_MACH_ATH79_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-ath79/war.h b/arch/mips/include/asm/mach-ath79/war.h
new file mode 100644
index 0000000..323d9f1
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/war.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MACH_ATH79_WAR_H
+#define __ASM_MACH_ATH79_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR	0
+#define R4600_V1_HIT_CACHEOP_WAR	0
+#define R4600_V2_HIT_CACHEOP_WAR	0
+#define R5432_CP0_INTERRUPT_WAR		0
+#define BCM1250_M3_WAR			0
+#define SIBYTE_1956_WAR			0
+#define MIPS4K_ICACHE_REFILL_WAR	0
+#define MIPS_CACHE_SYNC_WAR		0
+#define TX49XX_ICACHE_INDEX_INV_WAR	0
+#define RM9000_CDEX_SMP_WAR		0
+#define ICACHE_REFILLS_WORKAROUND_WAR	0
+#define R10000_LLSC_WAR			0
+#define MIPS34K_MISSED_ITLB_WAR		0
+
+#endif /* __ASM_MACH_ATH79_WAR_H */
diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h
index 076f2ee..c86ef09 100644
--- a/arch/mips/include/asm/mach-powertv/ioremap.h
+++ b/arch/mips/include/asm/mach-powertv/ioremap.h
@@ -88,7 +88,7 @@
 }
 
 /* These are not portable and should not be used in drivers. Drivers should
- * be using ioremap() and friends to map physical addreses to virtual
+ * be using ioremap() and friends to map physical addresses to virtual
  * addresses and dma_map*() and friends to map virtual addresses into DMA
  * addresses and back.
  */
diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h
index 199b457..4a08dbe 100644
--- a/arch/mips/include/asm/mc146818-time.h
+++ b/arch/mips/include/asm/mc146818-time.h
@@ -66,7 +66,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/mips/include/asm/mips_machine.h b/arch/mips/include/asm/mips_machine.h
new file mode 100644
index 0000000..363bb35
--- /dev/null
+++ b/arch/mips/include/asm/mips_machine.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_MIPS_MACHINE_H
+#define __ASM_MIPS_MACHINE_H
+
+#include <linux/init.h>
+#include <linux/stddef.h>
+
+#include <asm/bootinfo.h>
+
+struct mips_machine {
+	unsigned long		mach_type;
+	const char		*mach_id;
+	const char		*mach_name;
+	void			(*mach_setup)(void);
+};
+
+#define MIPS_MACHINE(_type, _id, _name, _setup)			\
+static const char machine_name_##_type[] __initconst		\
+			__aligned(1) = _name;			\
+static const char machine_id_##_type[] __initconst		\
+			__aligned(1) = _id;			\
+static struct mips_machine machine_##_type			\
+		__used __section(.mips.machines.init) =		\
+{								\
+	.mach_type	= _type,				\
+	.mach_id	= machine_id_##_type,			\
+	.mach_name	= machine_name_##_type,			\
+	.mach_setup	= _setup,				\
+};
+
+extern long __mips_machines_start;
+extern long __mips_machines_end;
+
+#ifdef CONFIG_MIPS_MACHINE
+int  mips_machtype_setup(char *id) __init;
+void mips_machine_setup(void) __init;
+void mips_set_machine_name(const char *name) __init;
+char *mips_get_machine_name(void);
+#else
+static inline int mips_machtype_setup(char *id) { return 1; }
+static inline void mips_machine_setup(void) { }
+static inline void mips_set_machine_name(const char *name) { }
+static inline char *mips_get_machine_name(void) { return NULL; }
+#endif /* CONFIG_MIPS_MACHINE */
+
+#endif /* __ASM_MIPS_MACHINE_H */
diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h
index c892bfb..785b4ea 100644
--- a/arch/mips/include/asm/mman.h
+++ b/arch/mips/include/asm/mman.h
@@ -77,6 +77,9 @@
 #define MADV_UNMERGEABLE 13		/* KSM may not merge identical pages */
 #define MADV_HWPOISON    100		/* poison a page for testing */
 
+#define MADV_HUGEPAGE	14		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	15		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index d959273..73c0d45 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -29,13 +29,7 @@
 #define TLBMISS_HANDLER_SETUP_PGD(pgd)				\
 	tlbmiss_handler_setup_pgd((unsigned long)(pgd))
 
-static inline void tlbmiss_handler_setup_pgd(unsigned long pgd)
-{
-	/* Check for swapper_pg_dir and convert to physical address. */
-	if ((pgd & CKSEG3) == CKSEG0)
-		pgd = CPHYSADDR(pgd);
-	write_c0_context(pgd << 11);
-}
+extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
 
 #define TLBMISS_HANDLER_SETUP()						\
 	do {								\
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h
index e00007c..d0c7749 100644
--- a/arch/mips/include/asm/perf_event.h
+++ b/arch/mips/include/asm/perf_event.h
@@ -11,15 +11,5 @@
 
 #ifndef __MIPS_PERF_EVENT_H__
 #define __MIPS_PERF_EVENT_H__
-
-/*
- * MIPS performance counters do not raise NMI upon overflow, a regular
- * interrupt will be signaled. Hence we can do the pending perf event
- * work at the tail of the irq handler.
- */
-static inline void
-set_perf_event_pending(void)
-{
-}
-
+/* Leave it empty here. The file is required by linux/perf_event.h */
 #endif /* __MIPS_PERF_EVENT_H__ */
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 892062d..dcbd4bb 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -115,7 +115,12 @@
 Ip_u3u1u2(_xor);
 Ip_u2u1u3(_xori);
 Ip_u2u1msbu3(_dins);
+Ip_u2u1msbu3(_dinsm);
 Ip_u1(_syscall);
+Ip_u1u2s3(_bbit0);
+Ip_u1u2s3(_bbit1);
+Ip_u3u1u2(_lwx);
+Ip_u3u1u2(_ldx);
 
 /* Handle labels. */
 struct uasm_label {
@@ -153,6 +158,7 @@
 # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd)
 # define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
 # define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
+# define UASM_i_LWX(buf, rs, rt, rd) uasm_i_ldx(buf, rs, rt, rd)
 #else
 # define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
 # define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
@@ -167,6 +173,7 @@
 # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
 # define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
 # define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
+# define UASM_i_LWX(buf, rs, rt, rd) uasm_i_lwx(buf, rs, rt, rd)
 #endif
 
 #define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off)
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 5c0a357..2c0e107 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -65,7 +65,7 @@
 
 /* Early prototypes of the QI LB60 had only 1GB of NAND.
  * In order to support these devices aswell the partition and ecc layout is
- * initalized depending on the NAND size */
+ * initialized depending on the NAND size */
 static struct mtd_partition qi_lb60_partitions_1gb[] = {
 	{
 		.name = "NAND BOOT partition",
@@ -464,7 +464,7 @@
 	board_gpio_setup();
 
 	if (qi_lb60_init_platform_devices())
-		panic("Failed to initalize platform devices\n");
+		panic("Failed to initialize platform devices\n");
 
 	return 0;
 }
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 38f60f3..88e6aed 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -546,7 +546,7 @@
 	for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
 		jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
 
-	printk(KERN_INFO "JZ4740 GPIO initalized\n");
+	printk(KERN_INFO "JZ4740 GPIO initialized\n");
 
 	return 0;
 }
diff --git a/arch/mips/jz4740/pm.c b/arch/mips/jz4740/pm.c
index a999458..902d5b5 100644
--- a/arch/mips/jz4740/pm.c
+++ b/arch/mips/jz4740/pm.c
@@ -42,7 +42,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops jz4740_pm_ops = {
+static const struct platform_suspend_ops jz4740_pm_ops = {
 	.valid		= suspend_valid_only_mem,
 	.enter		= jz4740_pm_enter,
 };
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 22b2e0e..cedee2b 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -95,6 +95,7 @@
 obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 obj-$(CONFIG_SPINLOCK_TEST)	+= spinlock_test.o
+obj-$(CONFIG_MIPS_MACHINE)	+= mips_machine.o
 
 obj-$(CONFIG_OF)		+= prom.o
 
@@ -106,4 +107,6 @@
 
 obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
 
+obj-$(CONFIG_JUMP_LABEL)	+= jump_label.o
+
 CPPFLAGS_vmlinux.lds		:= $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 68dae7b..f65d4c8 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -739,6 +739,8 @@
 	    && cpu_has_tlb)
 		c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
 
+	c->kscratch_mask = (config4 >> 16) & 0xff;
+
 	return config4 & MIPS_CONF_M;
 }
 
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 5a84a1f..94ca2b0 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -17,29 +17,13 @@
 #include <asm/cacheflush.h>
 #include <asm/uasm.h>
 
-/*
- * If the Instruction Pointer is in module space (0xc0000000), return true;
- * otherwise, it is in kernel space (0x80000000), return false.
- *
- * FIXME: This will not work when the kernel space and module space are the
- * same. If they are the same, we need to modify scripts/recordmcount.pl,
- * ftrace_make_nop/call() and the other related parts to ensure the
- * enabling/disabling of the calling site to _mcount is right for both kernel
- * and module.
- */
-
-static inline int in_module(unsigned long ip)
-{
-	return ip & 0x40000000;
-}
+#include <asm-generic/sections.h>
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 #define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
 #define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
 
-#define INSN_B_1F_4 0x10000004	/* b 1f; offset = 4 */
-#define INSN_B_1F_5 0x10000005	/* b 1f; offset = 5 */
 #define INSN_NOP 0x00000000	/* nop */
 #define INSN_JAL(addr)	\
 	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -69,6 +53,20 @@
 #endif
 }
 
+/*
+ * Check if the address is in kernel space
+ *
+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+ */
+static inline int in_kernel_space(unsigned long ip)
+{
+	if (ip >= (unsigned long)_stext &&
+	    ip <= (unsigned long)_etext)
+		return 1;
+	return 0;
+}
+
 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
 {
 	int faulted;
@@ -84,6 +82,42 @@
 	return 0;
 }
 
+/*
+ * The details about the calling site of mcount on MIPS
+ *
+ * 1. For kernel:
+ *
+ * move at, ra
+ * jal _mcount		--> nop
+ *
+ * 2. For modules:
+ *
+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
+ *
+ * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ *  sub sp, sp, 8
+ *                                  1: offset = 5 instructions
+ * 2.2 For the Other situations
+ *
+ * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * jalr v1
+ *  nop | move $12, ra_address | sub sp, sp, 8
+ *                                  1: offset = 4 instructions
+ */
+
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+
 int ftrace_make_nop(struct module *mod,
 		    struct dyn_ftrace *rec, unsigned long addr)
 {
@@ -91,39 +125,11 @@
 	unsigned long ip = rec->ip;
 
 	/*
-	 * We have compiled module with -mlong-calls, but compiled the kernel
-	 * without it, we need to cope with them respectively.
+	 * If ip is in kernel space, no long call, otherwise, long call is
+	 * needed.
 	 */
-	if (in_module(ip)) {
-#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
-		/*
-		 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
-		 * addiu v1, v1, low_16bit_of_mcount
-		 * move at, ra
-		 * move $12, ra_address
-		 * jalr v1
-		 *  sub sp, sp, 8
-		 *                                  1: offset = 5 instructions
-		 */
-		new = INSN_B_1F_5;
-#else
-		/*
-		 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
-		 * addiu v1, v1, low_16bit_of_mcount
-		 * move at, ra
-		 * jalr v1
-		 *  nop | move $12, ra_address | sub sp, sp, 8
-		 *                                  1: offset = 4 instructions
-		 */
-		new = INSN_B_1F_4;
-#endif
-	} else {
-		/*
-		 * move at, ra
-		 * jal _mcount		--> nop
-		 */
-		new = INSN_NOP;
-	}
+	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+
 	return ftrace_modify_code(ip, new);
 }
 
@@ -132,8 +138,8 @@
 	unsigned int new;
 	unsigned long ip = rec->ip;
 
-	/* ip, module: 0xc0000000, kernel: 0x80000000 */
-	new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
+	new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
+		insn_lui_v1_hi16_mcount;
 
 	return ftrace_modify_code(ip, new);
 }
@@ -190,29 +196,25 @@
 #define S_R_SP	(0xafb0 << 16)  /* s{d,w} R, offset(sp) */
 #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
 
-unsigned long ftrace_get_parent_addr(unsigned long self_addr,
-				     unsigned long parent,
-				     unsigned long parent_addr,
-				     unsigned long fp)
+unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
 {
-	unsigned long sp, ip, ra;
+	unsigned long sp, ip, tmp;
 	unsigned int code;
 	int faulted;
 
 	/*
-	 * For module, move the ip from calling site of mcount to the
-	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
-	 * kernel, move to the instruction "move ra, at"(offset is 12)
+	 * For module, move the ip from the return address after the
+	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
+	 * kernel, move after the instruction "move ra, at"(offset is 16)
 	 */
-	ip = self_addr - (in_module(self_addr) ? 20 : 12);
+	ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
 
 	/*
 	 * search the text until finding the non-store instruction or "s{d,w}
 	 * ra, offset(sp)" instruction
 	 */
 	do {
-		ip -= 4;
-
 		/* get the code at "ip": code = *(unsigned int *)ip; */
 		safe_load_code(code, ip, faulted);
 
@@ -224,18 +226,20 @@
 		 * store the ra on the stack
 		 */
 		if ((code & S_R_SP) != S_R_SP)
-			return parent_addr;
+			return parent_ra_addr;
 
-	} while (((code & S_RA_SP) != S_RA_SP));
+		/* Move to the next instruction */
+		ip -= 4;
+	} while ((code & S_RA_SP) != S_RA_SP);
 
 	sp = fp + (code & OFFSET_MASK);
 
-	/* ra = *(unsigned long *)sp; */
-	safe_load_stack(ra, sp, faulted);
+	/* tmp = *(unsigned long *)sp; */
+	safe_load_stack(tmp, sp, faulted);
 	if (unlikely(faulted))
 		return 0;
 
-	if (ra == parent)
+	if (tmp == old_parent_ra)
 		return sp;
 	return 0;
 }
@@ -246,21 +250,21 @@
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
 			   unsigned long fp)
 {
-	unsigned long old;
+	unsigned long old_parent_ra;
 	struct ftrace_graph_ent trace;
 	unsigned long return_hooker = (unsigned long)
 	    &return_to_handler;
-	int faulted;
+	int faulted, insns;
 
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
 
 	/*
-	 * "parent" is the stack address saved the return address of the caller
-	 * of _mcount.
+	 * "parent_ra_addr" is the stack address saved the return address of
+	 * the caller of _mcount.
 	 *
 	 * if the gcc < 4.5, a leaf function does not save the return address
 	 * in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +279,44 @@
 	 * do it in ftrace_graph_caller of mcount.S.
 	 */
 
-	/* old = *parent; */
-	safe_load_stack(old, parent, faulted);
+	/* old_parent_ra = *parent_ra_addr; */
+	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
 	if (unlikely(faulted))
 		goto out;
 #ifndef KBUILD_MCOUNT_RA_ADDRESS
-	parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
-			(unsigned long)parent, fp);
+	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
+			old_parent_ra, (unsigned long)parent_ra_addr, fp);
 	/*
 	 * If fails when getting the stack address of the non-leaf function's
 	 * ra, stop function graph tracer and return
 	 */
-	if (parent == 0)
+	if (parent_ra_addr == 0)
 		goto out;
 #endif
-	/* *parent = return_hooker; */
-	safe_store_stack(return_hooker, parent, faulted);
+	/* *parent_ra_addr = return_hooker; */
+	safe_store_stack(return_hooker, parent_ra_addr, faulted);
 	if (unlikely(faulted))
 		goto out;
 
-	if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
-	    -EBUSY) {
-		*parent = old;
+	if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
+	    == -EBUSY) {
+		*parent_ra_addr = old_parent_ra;
 		return;
 	}
 
-	trace.func = self_addr;
+	/*
+	 * Get the recorded ip of the current mcount calling site in the
+	 * __mcount_loc section, which will be used to filter the function
+	 * entries configured through the tracing/set_graph_function interface.
+	 */
+
+	insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+	trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
 
 	/* Only trace if the calling function expects to */
 	if (!ftrace_graph_entry(&trace)) {
 		current->curr_ret_stack--;
-		*parent = old;
+		*parent_ra_addr = old_parent_ra;
 	}
 	return;
 out:
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
new file mode 100644
index 0000000..6001610
--- /dev/null
+++ b/arch/mips/kernel/jump_label.c
@@ -0,0 +1,54 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2010 Cavium Networks, Inc.
+ */
+
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/inst.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+#define J_RANGE_MASK ((1ul << 28) - 1)
+
+void arch_jump_label_transform(struct jump_entry *e,
+			       enum jump_label_type type)
+{
+	union mips_instruction insn;
+	union mips_instruction *insn_p =
+		(union mips_instruction *)(unsigned long)e->code;
+
+	/* Jump only works within a 256MB aligned region. */
+	BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK));
+
+	/* Target must have 4 byte alignment. */
+	BUG_ON((e->target & 3) != 0);
+
+	if (type == JUMP_LABEL_ENABLE) {
+		insn.j_format.opcode = j_op;
+		insn.j_format.target = (e->target & J_RANGE_MASK) >> 2;
+	} else {
+		insn.word = 0; /* nop */
+	}
+
+	get_online_cpus();
+	mutex_lock(&text_mutex);
+	*insn_p = insn;
+
+	flush_icache_range((unsigned long)insn_p,
+			   (unsigned long)insn_p + sizeof(*insn_p));
+
+	mutex_unlock(&text_mutex);
+	put_online_cpus();
+}
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c
new file mode 100644
index 0000000..411a058
--- /dev/null
+++ b/arch/mips/kernel/mips_machine.c
@@ -0,0 +1,86 @@
+/*
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <asm/mips_machine.h>
+
+static struct mips_machine *mips_machine __initdata;
+static char *mips_machine_name = "Unknown";
+
+#define for_each_machine(mach) \
+	for ((mach) = (struct mips_machine *)&__mips_machines_start; \
+	     (mach) && \
+	     (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
+	     (mach)++)
+
+__init void mips_set_machine_name(const char *name)
+{
+	char *p;
+
+	if (name == NULL)
+		return;
+
+	p = kstrdup(name, GFP_KERNEL);
+	if (!p)
+		pr_err("MIPS: no memory for machine_name\n");
+
+	mips_machine_name = p;
+}
+
+char *mips_get_machine_name(void)
+{
+	return mips_machine_name;
+}
+
+__init int mips_machtype_setup(char *id)
+{
+	struct mips_machine *mach;
+
+	for_each_machine(mach) {
+		if (mach->mach_id == NULL)
+			continue;
+
+		if (strcmp(mach->mach_id, id) == 0) {
+			mips_machtype = mach->mach_type;
+			return 0;
+		}
+	}
+
+	pr_err("MIPS: no machine found for id '%s', supported machines:\n", id);
+	pr_err("%-24s %s\n", "id", "name");
+	for_each_machine(mach)
+		pr_err("%-24s %s\n", mach->mach_id, mach->mach_name);
+
+	return 1;
+}
+
+__setup("machtype=", mips_machtype_setup);
+
+__init void mips_machine_setup(void)
+{
+	struct mips_machine *mach;
+
+	for_each_machine(mach) {
+		if (mips_machtype == mach->mach_type) {
+			mips_machine = mach;
+			break;
+		}
+	}
+
+	if (!mips_machine)
+		return;
+
+	mips_set_machine_name(mips_machine->mach_name);
+	pr_info("MIPS: machine is %s\n", mips_machine_name);
+
+	if (mips_machine->mach_setup)
+		mips_machine->mach_setup();
+}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 6f51dda..dd940b7 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -30,6 +30,8 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
+#include <linux/jump_label.h>
+
 #include <asm/pgtable.h>	/* MODULE_START */
 
 struct mips_hi16 {
@@ -46,17 +48,9 @@
 void *module_alloc(unsigned long size)
 {
 #ifdef MODULE_START
-	struct vm_struct *area;
-
-	size = PAGE_ALIGN(size);
-	if (!size)
-		return NULL;
-
-	area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
-	if (!area)
-		return NULL;
-
-	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+	return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+				GFP_KERNEL, PAGE_KERNEL, -1,
+				__builtin_return_address(0));
 #else
 	if (size == 0)
 		return NULL;
@@ -390,6 +384,9 @@
 	const Elf_Shdr *s;
 	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
+	/* Make jump label nops. */
+	jump_label_apply_nops(me);
+
 	INIT_LIST_HEAD(&me->arch.dbe_list);
 	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
 		if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index 2b7f3f7..a824485 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -161,41 +161,6 @@
 	return ret;
 }
 
-static int mipspmu_enable(struct perf_event *event)
-{
-	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-	struct hw_perf_event *hwc = &event->hw;
-	int idx;
-	int err = 0;
-
-	/* To look for a free counter for this event. */
-	idx = mipspmu->alloc_counter(cpuc, hwc);
-	if (idx < 0) {
-		err = idx;
-		goto out;
-	}
-
-	/*
-	 * If there is an event in the counter we are going to use then
-	 * make sure it is disabled.
-	 */
-	event->hw.idx = idx;
-	mipspmu->disable_event(idx);
-	cpuc->events[idx] = event;
-
-	/* Set the period for the event. */
-	mipspmu_event_set_period(event, hwc, idx);
-
-	/* Enable the event. */
-	mipspmu->enable_event(hwc, idx);
-
-	/* Propagate our changes to the userspace mapping. */
-	perf_event_update_userpage(event);
-
-out:
-	return err;
-}
-
 static void mipspmu_event_update(struct perf_event *event,
 			struct hw_perf_event *hwc,
 			int idx)
@@ -204,7 +169,7 @@
 	unsigned long flags;
 	int shift = 64 - TOTAL_BITS;
 	s64 prev_raw_count, new_raw_count;
-	s64 delta;
+	u64 delta;
 
 again:
 	prev_raw_count = local64_read(&hwc->prev_count);
@@ -231,34 +196,92 @@
 	return;
 }
 
-static void mipspmu_disable(struct perf_event *event)
+static void mipspmu_start(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (!mipspmu)
+		return;
+
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
+
+	/* Set the period for the event. */
+	mipspmu_event_set_period(event, hwc, hwc->idx);
+
+	/* Enable the event. */
+	mipspmu->enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (!mipspmu)
+		return;
+
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		/* We are working on a local event. */
+		mipspmu->disable_event(hwc->idx);
+		barrier();
+		mipspmu_event_update(event, hwc, hwc->idx);
+		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	}
+}
+
+static int mipspmu_add(struct perf_event *event, int flags)
+{
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int err = 0;
+
+	perf_pmu_disable(event->pmu);
+
+	/* To look for a free counter for this event. */
+	idx = mipspmu->alloc_counter(cpuc, hwc);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
+
+	/*
+	 * If there is an event in the counter we are going to use then
+	 * make sure it is disabled.
+	 */
+	event->hw.idx = idx;
+	mipspmu->disable_event(idx);
+	cpuc->events[idx] = event;
+
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		mipspmu_start(event, PERF_EF_RELOAD);
+
+	/* Propagate our changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+out:
+	perf_pmu_enable(event->pmu);
+	return err;
+}
+
+static void mipspmu_del(struct perf_event *event, int flags)
 {
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 
-
 	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
 
-	/* We are working on a local event. */
-	mipspmu->disable_event(idx);
-
-	barrier();
-
-	mipspmu_event_update(event, hwc, idx);
+	mipspmu_stop(event, PERF_EF_UPDATE);
 	cpuc->events[idx] = NULL;
 	clear_bit(idx, cpuc->used_mask);
 
 	perf_event_update_userpage(event);
 }
 
-static void mipspmu_unthrottle(struct perf_event *event)
-{
-	struct hw_perf_event *hwc = &event->hw;
-
-	mipspmu->enable_event(hwc, hwc->idx);
-}
-
 static void mipspmu_read(struct perf_event *event)
 {
 	struct hw_perf_event *hwc = &event->hw;
@@ -270,12 +293,17 @@
 	mipspmu_event_update(event, hwc, hwc->idx);
 }
 
-static struct pmu pmu = {
-	.enable		= mipspmu_enable,
-	.disable	= mipspmu_disable,
-	.unthrottle	= mipspmu_unthrottle,
-	.read		= mipspmu_read,
-};
+static void mipspmu_enable(struct pmu *pmu)
+{
+	if (mipspmu)
+		mipspmu->start();
+}
+
+static void mipspmu_disable(struct pmu *pmu)
+{
+	if (mipspmu)
+		mipspmu->stop();
+}
 
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmu_reserve_mutex);
@@ -318,6 +346,82 @@
 		perf_irq = save_perf_irq;
 }
 
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+	if (atomic_dec_and_mutex_lock(&active_events,
+				&pmu_reserve_mutex)) {
+		/*
+		 * We must not call the destroy function with interrupts
+		 * disabled.
+		 */
+		on_each_cpu(reset_counters,
+			(void *)(long)mipspmu->num_counters, 1);
+		mipspmu_free_irq();
+		mutex_unlock(&pmu_reserve_mutex);
+	}
+}
+
+static int mipspmu_event_init(struct perf_event *event)
+{
+	int err = 0;
+
+	switch (event->attr.type) {
+	case PERF_TYPE_RAW:
+	case PERF_TYPE_HARDWARE:
+	case PERF_TYPE_HW_CACHE:
+		break;
+
+	default:
+		return -ENOENT;
+	}
+
+	if (!mipspmu || event->cpu >= nr_cpumask_bits ||
+		(event->cpu >= 0 && !cpu_online(event->cpu)))
+		return -ENODEV;
+
+	if (!atomic_inc_not_zero(&active_events)) {
+		if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
+			atomic_dec(&active_events);
+			return -ENOSPC;
+		}
+
+		mutex_lock(&pmu_reserve_mutex);
+		if (atomic_read(&active_events) == 0)
+			err = mipspmu_get_irq();
+
+		if (!err)
+			atomic_inc(&active_events);
+		mutex_unlock(&pmu_reserve_mutex);
+	}
+
+	if (err)
+		return err;
+
+	err = __hw_perf_event_init(event);
+	if (err)
+		hw_perf_event_destroy(event);
+
+	return err;
+}
+
+static struct pmu pmu = {
+	.pmu_enable	= mipspmu_enable,
+	.pmu_disable	= mipspmu_disable,
+	.event_init	= mipspmu_event_init,
+	.add		= mipspmu_add,
+	.del		= mipspmu_del,
+	.start		= mipspmu_start,
+	.stop		= mipspmu_stop,
+	.read		= mipspmu_read,
+};
+
 static inline unsigned int
 mipspmu_perf_event_encode(const struct mips_perf_event *pev)
 {
@@ -382,8 +486,9 @@
 {
 	struct hw_perf_event fake_hwc = event->hw;
 
-	if (event->pmu && event->pmu != &pmu)
-		return 0;
+	/* Allow mixed event group. So return 1 to pass validation. */
+	if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+		return 1;
 
 	return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
 }
@@ -409,73 +514,6 @@
 	return 0;
 }
 
-/*
- * mipsxx/rm9000/loongson2 have different performance counters, they have
- * specific low-level init routines.
- */
-static void reset_counters(void *arg);
-static int __hw_perf_event_init(struct perf_event *event);
-
-static void hw_perf_event_destroy(struct perf_event *event)
-{
-	if (atomic_dec_and_mutex_lock(&active_events,
-				&pmu_reserve_mutex)) {
-		/*
-		 * We must not call the destroy function with interrupts
-		 * disabled.
-		 */
-		on_each_cpu(reset_counters,
-			(void *)(long)mipspmu->num_counters, 1);
-		mipspmu_free_irq();
-		mutex_unlock(&pmu_reserve_mutex);
-	}
-}
-
-const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
-	int err = 0;
-
-	if (!mipspmu || event->cpu >= nr_cpumask_bits ||
-		(event->cpu >= 0 && !cpu_online(event->cpu)))
-		return ERR_PTR(-ENODEV);
-
-	if (!atomic_inc_not_zero(&active_events)) {
-		if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
-			atomic_dec(&active_events);
-			return ERR_PTR(-ENOSPC);
-		}
-
-		mutex_lock(&pmu_reserve_mutex);
-		if (atomic_read(&active_events) == 0)
-			err = mipspmu_get_irq();
-
-		if (!err)
-			atomic_inc(&active_events);
-		mutex_unlock(&pmu_reserve_mutex);
-	}
-
-	if (err)
-		return ERR_PTR(err);
-
-	err = __hw_perf_event_init(event);
-	if (err)
-		hw_perf_event_destroy(event);
-
-	return err ? ERR_PTR(err) : &pmu;
-}
-
-void hw_perf_enable(void)
-{
-	if (mipspmu)
-		mipspmu->start();
-}
-
-void hw_perf_disable(void)
-{
-	if (mipspmu)
-		mipspmu->stop();
-}
-
 /* This is needed by specific irq handlers in perf_event_*.c */
 static void
 handle_associated_event(struct cpu_hw_events *cpuc,
@@ -496,21 +534,13 @@
 #include "perf_event_mipsxx.c"
 
 /* Callchain handling code. */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
-		u64 ip)
-{
-	if (entry->nr < PERF_MAX_STACK_DEPTH)
-		entry->ip[entry->nr++] = ip;
-}
 
 /*
  * Leave userspace callchain empty for now. When we find a way to trace
  * the user stack callchains, we add here.
  */
-static void
-perf_callchain_user(struct pt_regs *regs,
-		    struct perf_callchain_entry *entry)
+void perf_callchain_user(struct perf_callchain_entry *entry,
+		    struct pt_regs *regs)
 {
 }
 
@@ -523,23 +553,21 @@
 	while (!kstack_end(sp)) {
 		addr = *sp++;
 		if (__kernel_text_address(addr)) {
-			callchain_store(entry, addr);
+			perf_callchain_store(entry, addr);
 			if (entry->nr >= PERF_MAX_STACK_DEPTH)
 				break;
 		}
 	}
 }
 
-static void
-perf_callchain_kernel(struct pt_regs *regs,
-		      struct perf_callchain_entry *entry)
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+		      struct pt_regs *regs)
 {
 	unsigned long sp = regs->regs[29];
 #ifdef CONFIG_KALLSYMS
 	unsigned long ra = regs->regs[31];
 	unsigned long pc = regs->cp0_epc;
 
-	callchain_store(entry, PERF_CONTEXT_KERNEL);
 	if (raw_show_trace || !__kernel_text_address(pc)) {
 		unsigned long stack_page =
 			(unsigned long)task_stack_page(current);
@@ -549,53 +577,12 @@
 		return;
 	}
 	do {
-		callchain_store(entry, pc);
+		perf_callchain_store(entry, pc);
 		if (entry->nr >= PERF_MAX_STACK_DEPTH)
 			break;
 		pc = unwind_stack(current, &sp, pc, &ra);
 	} while (pc);
 #else
-	callchain_store(entry, PERF_CONTEXT_KERNEL);
 	save_raw_perf_callchain(entry, sp);
 #endif
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs,
-		  struct perf_callchain_entry *entry)
-{
-	int is_user;
-
-	if (!regs)
-		return;
-
-	is_user = user_mode(regs);
-
-	if (!current || !current->pid)
-		return;
-
-	if (is_user && current->state != TASK_RUNNING)
-		return;
-
-	if (!is_user) {
-		perf_callchain_kernel(regs, entry);
-		if (current->mm)
-			regs = task_pt_regs(current);
-		else
-			regs = NULL;
-	}
-	if (regs)
-		perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
-	struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
-	entry->nr = 0;
-	perf_do_callchain(regs, entry);
-	return entry;
-}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 183e0d2..d9a7db7 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -696,7 +696,7 @@
 	 * interrupt, not NMI.
 	 */
 	if (handled == IRQ_HANDLED)
-		perf_event_do_pending();
+		irq_work_run();
 
 #ifdef CONFIG_MIPS_MT_SMP
 	read_unlock(&pmuint_rwlock);
@@ -1045,6 +1045,8 @@
 			"CPU, irq %d%s\n", mipspmu->name, counters, irq,
 			irq < 0 ? " (share with timer interrupt)" : "");
 
+	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
 	return 0;
 }
 early_initcall(init_hw_perf_events);
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 26109c4..e309665 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -12,6 +12,7 @@
 #include <asm/cpu-features.h>
 #include <asm/mipsregs.h>
 #include <asm/processor.h>
+#include <asm/mips_machine.h>
 
 unsigned int vced_count, vcei_count;
 
@@ -31,8 +32,12 @@
 	/*
 	 * For the first processor also print the system type
 	 */
-	if (n == 0)
+	if (n == 0) {
 		seq_printf(m, "system type\t\t: %s\n", get_system_type());
+		if (mips_get_machine_name())
+			seq_printf(m, "machine\t\t\t: %s\n",
+				   mips_get_machine_name());
+	}
 
 	seq_printf(m, "processor\t\t: %ld\n", n);
 	sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
@@ -69,6 +74,8 @@
 		);
 	seq_printf(m, "shadow register sets\t: %d\n",
 		       cpu_data[n].srsets);
+	seq_printf(m, "kscratch registers\t: %d\n",
+		   hweight8(cpu_data[n].kscratch_mask));
 	seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
 
 	sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 9dbe583..a19811e9 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -45,11 +45,9 @@
 	return free_bootmem(addr, size);
 }
 
-u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 {
-	return virt_to_phys(
-		__alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS))
-		);
+	return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index acd3f2c..8ad1d56 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -70,7 +70,7 @@
  * mips_io_port_base is the begin of the address space to which x86 style
  * I/O ports are mapped.
  */
-const unsigned long mips_io_port_base __read_mostly = -1;
+const unsigned long mips_io_port_base = -1;
 EXPORT_SYMBOL(mips_io_port_base);
 
 static struct resource code_resource = { .name = "Kernel code", };
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 5922342..dbbe0ce 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -84,7 +84,7 @@
 
 static int protected_restore_fp_context(struct sigcontext __user *sc)
 {
-	int err, tmp;
+	int err, tmp __maybe_unused;
 	while (1) {
 		lock_fpu_owner();
 		own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index a0ed0e0..aae9866 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -115,7 +115,7 @@
 
 static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
 {
-	int err, tmp;
+	int err, tmp __maybe_unused;
 	while (1) {
 		lock_fpu_owner();
 		own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 383aeb9..32a2561 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -193,6 +193,22 @@
  */
 static struct task_struct *cpu_idle_thread[NR_CPUS];
 
+struct create_idle {
+	struct work_struct work;
+	struct task_struct *idle;
+	struct completion done;
+	int cpu;
+};
+
+static void __cpuinit do_fork_idle(struct work_struct *work)
+{
+	struct create_idle *c_idle =
+		container_of(work, struct create_idle, work);
+
+	c_idle->idle = fork_idle(c_idle->cpu);
+	complete(&c_idle->done);
+}
+
 int __cpuinit __cpu_up(unsigned int cpu)
 {
 	struct task_struct *idle;
@@ -203,8 +219,19 @@
 	 * Linux can schedule processes on this slave.
 	 */
 	if (!cpu_idle_thread[cpu]) {
-		idle = fork_idle(cpu);
-		cpu_idle_thread[cpu] = idle;
+		/*
+		 * Schedule work item to avoid forking user task
+		 * Ported from arch/x86/kernel/smpboot.c
+		 */
+		struct create_idle c_idle = {
+			.cpu    = cpu,
+			.done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+		};
+
+		INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
+		schedule_work(&c_idle.work);
+		wait_for_completion(&c_idle.done);
+		idle = cpu_idle_thread[cpu] = c_idle.idle;
 
 		if (IS_ERR(idle))
 			panic(KERN_ERR "Fork failed for CPU %d", cpu);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 1dc6edf..58beabf 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -383,12 +383,11 @@
 static int __used noinline
 _sys_sysmips(nabi_no_regargs struct pt_regs regs)
 {
-	long cmd, arg1, arg2, arg3;
+	long cmd, arg1, arg2;
 
 	cmd = regs.regs[4];
 	arg1 = regs.regs[5];
 	arg2 = regs.regs[6];
-	arg3 = regs.regs[7];
 
 	switch (cmd) {
 	case MIPS_ATOMIC_SET:
@@ -405,7 +404,7 @@
 		if (arg1 & 2)
 			set_thread_flag(TIF_LOGADE);
 		else
-			clear_thread_flag(TIF_FIXADE);
+			clear_thread_flag(TIF_LOGADE);
 
 		return 0;
 
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e971043..71350f7 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1592,7 +1592,6 @@
 #endif /* CONFIG_MIPS_MT_SMTC */
 
 	cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
-	TLBMISS_HANDLER_SETUP();
 
 	atomic_inc(&init_mm.mm_count);
 	current->active_mm = &init_mm;
@@ -1614,6 +1613,7 @@
 		write_c0_wired(0);
 	}
 #endif /* CONFIG_MIPS_MT_SMTC */
+	TLBMISS_HANDLER_SETUP();
 }
 
 /* Install CPU exception handler */
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index f25df73..570607b 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -98,6 +98,13 @@
 	INIT_TEXT_SECTION(PAGE_SIZE)
 	INIT_DATA_SECTION(16)
 
+	. = ALIGN(4);
+	.mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
+		__mips_machines_start = .;
+		*(.mips.machines.init)
+		__mips_machines_end = .;
+	}
+
 	/* .exit.text is discarded at runtime, not link time, to deal with
 	 * references from .rodata
 	 */
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 6a1fdfe..ab52b7c 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -148,9 +148,9 @@
 	spinlock_t tc_list_lock;
 	struct list_head tc_list;	/* Thread contexts */
 } vpecontrol = {
-	.vpe_list_lock	= SPIN_LOCK_UNLOCKED,
+	.vpe_list_lock	= __SPIN_LOCK_UNLOCKED(vpe_list_lock),
 	.vpe_list	= LIST_HEAD_INIT(vpecontrol.vpe_list),
-	.tc_list_lock	= SPIN_LOCK_UNLOCKED,
+	.tc_list_lock	= __SPIN_LOCK_UNLOCKED(tc_list_lock),
 	.tc_list	= LIST_HEAD_INIT(vpecontrol.tc_list)
 };
 
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 6e1b77f..aca93ee 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -1,6 +1,7 @@
+if MACH_LOONGSON
+
 choice
 	prompt "Machine Type"
-	depends on MACH_LOONGSON
 
 config LEMOTE_FULOONG2E
 	bool "Lemote Fuloong(2e) mini-PC"
@@ -87,3 +88,5 @@
 config LOONGSON_MC146818
 	bool
 	default n
+
+endif # MACH_LOONGSON
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c
index 1a06def..353e1d2 100644
--- a/arch/mips/loongson/common/cmdline.c
+++ b/arch/mips/loongson/common/cmdline.c
@@ -44,10 +44,5 @@
 		strcat(arcs_cmdline, " ");
 	}
 
-	if ((strstr(arcs_cmdline, "console=")) == NULL)
-		strcat(arcs_cmdline, " console=ttyS0,115200");
-	if ((strstr(arcs_cmdline, "root=")) == NULL)
-		strcat(arcs_cmdline, " root=/dev/hda1");
-
 	prom_init_machtype();
 }
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 81fbe6b..2efd5d9 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -41,7 +41,7 @@
 
 void __init prom_init_machtype(void)
 {
-	char *p, str[MACHTYPE_LEN];
+	char *p, str[MACHTYPE_LEN + 1];
 	int machtype = MACH_LEMOTE_FL2E;
 
 	mips_machtype = LOONGSON_MACHTYPE;
@@ -53,6 +53,7 @@
 	}
 	p += strlen("machtype=");
 	strncpy(str, p, MACHTYPE_LEN);
+	str[MACHTYPE_LEN] = '\0';
 	p = strstr(str, " ");
 	if (p)
 		*p = '\0';
diff --git a/arch/mips/loongson/common/pm.c b/arch/mips/loongson/common/pm.c
index 6c1fd90..f55e07a 100644
--- a/arch/mips/loongson/common/pm.c
+++ b/arch/mips/loongson/common/pm.c
@@ -147,7 +147,7 @@
 	}
 }
 
-static struct platform_suspend_ops loongson_pm_ops = {
+static const struct platform_suspend_ops loongson_pm_ops = {
 	.valid	= loongson_pm_valid_state,
 	.enter	= loongson_pm_enter,
 };
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 2701d95..2a7d43f 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -70,7 +70,7 @@
 
 
 #define COMPXSP \
-  unsigned xm; int xe; int xs; int xc
+  unsigned xm; int xe; int xs __maybe_unused; int xc
 
 #define COMPYSP \
   unsigned ym; int ye; int ys; int yc
@@ -104,7 +104,7 @@
 
 
 #define COMPXDP \
-u64 xm; int xe; int xs; int xc
+u64 xm; int xe; int xs __maybe_unused; int xc
 
 #define COMPYDP \
 u64 ym; int ye; int ys; int yc
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 2efcbd2..279599e 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -324,7 +324,7 @@
 void __init paging_init(void)
 {
 	unsigned long max_zone_pfns[MAX_NR_ZONES];
-	unsigned long lastpfn;
+	unsigned long lastpfn __maybe_unused;
 
 	pagetable_init();
 
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 93816f3..04f9e17 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -26,8 +26,10 @@
 #include <linux/smp.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/cache.h>
 
-#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
 #include <asm/war.h>
 #include <asm/uasm.h>
 
@@ -63,6 +65,54 @@
 	return R10000_LLSC_WAR;
 }
 
+static int use_bbit_insns(void)
+{
+	switch (current_cpu_type()) {
+	case CPU_CAVIUM_OCTEON:
+	case CPU_CAVIUM_OCTEON_PLUS:
+	case CPU_CAVIUM_OCTEON2:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+static int use_lwx_insns(void)
+{
+	switch (current_cpu_type()) {
+	case CPU_CAVIUM_OCTEON2:
+		return 1;
+	default:
+		return 0;
+	}
+}
+#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
+    CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+static bool scratchpad_available(void)
+{
+	return true;
+}
+static int scratchpad_offset(int i)
+{
+	/*
+	 * CVMSEG starts at address -32768 and extends for
+	 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
+	 */
+	i += 1; /* Kernel use starts at the top and works down. */
+	return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
+}
+#else
+static bool scratchpad_available(void)
+{
+	return false;
+}
+static int scratchpad_offset(int i)
+{
+	BUG();
+	/* Really unreachable, but evidently some GCC want this. */
+	return 0;
+}
+#endif
 /*
  * Found by experiment: At least some revisions of the 4kc throw under
  * some circumstances a machine check exception, triggered by invalid
@@ -173,11 +223,41 @@
 static int check_for_high_segbits __cpuinitdata;
 #endif
 
+static int check_for_high_segbits __cpuinitdata;
+
+static unsigned int kscratch_used_mask __cpuinitdata;
+
+static int __cpuinit allocate_kscratch(void)
+{
+	int r;
+	unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
+
+	r = ffs(a);
+
+	if (r == 0)
+		return -1;
+
+	r--; /* make it zero based */
+
+	kscratch_used_mask |= (1 << r);
+
+	return r;
+}
+
+static int scratch_reg __cpuinitdata;
+static int pgd_reg __cpuinitdata;
+enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
+
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+
 /*
  * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
  * we cannot do r3000 under these circumstances.
+ *
+ * Declare pgd_current here instead of including mmu_context.h to avoid type
+ * conflicts for tlbmiss_handler_setup_pgd
  */
+extern unsigned long pgd_current[];
 
 /*
  * The R3000 TLB handler is simple.
@@ -440,21 +520,43 @@
 static __cpuinit void build_restore_pagemask(u32 **p,
 					     struct uasm_reloc **r,
 					     unsigned int tmp,
-					     enum label_id lid)
+					     enum label_id lid,
+					     int restore_scratch)
 {
-	/* Reset default page size */
-	if (PM_DEFAULT_MASK >> 16) {
-		uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
-		uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
-		uasm_il_b(p, r, lid);
-		uasm_i_mtc0(p, tmp, C0_PAGEMASK);
-	} else if (PM_DEFAULT_MASK) {
-		uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
-		uasm_il_b(p, r, lid);
-		uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+	if (restore_scratch) {
+		/* Reset default page size */
+		if (PM_DEFAULT_MASK >> 16) {
+			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
+			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+			uasm_il_b(p, r, lid);
+		} else if (PM_DEFAULT_MASK) {
+			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
+			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+			uasm_il_b(p, r, lid);
+		} else {
+			uasm_i_mtc0(p, 0, C0_PAGEMASK);
+			uasm_il_b(p, r, lid);
+		}
+		if (scratch_reg > 0)
+			UASM_i_MFC0(p, 1, 31, scratch_reg);
+		else
+			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 	} else {
-		uasm_il_b(p, r, lid);
-		uasm_i_mtc0(p, 0, C0_PAGEMASK);
+		/* Reset default page size */
+		if (PM_DEFAULT_MASK >> 16) {
+			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
+			uasm_il_b(p, r, lid);
+			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+		} else if (PM_DEFAULT_MASK) {
+			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
+			uasm_il_b(p, r, lid);
+			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+		} else {
+			uasm_il_b(p, r, lid);
+			uasm_i_mtc0(p, 0, C0_PAGEMASK);
+		}
 	}
 }
 
@@ -462,7 +564,8 @@
 						 struct uasm_label **l,
 						 struct uasm_reloc **r,
 						 unsigned int tmp,
-						 enum tlb_write_entry wmode)
+						 enum tlb_write_entry wmode,
+						 int restore_scratch)
 {
 	/* Set huge page tlb entry size */
 	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
@@ -471,7 +574,7 @@
 
 	build_tlb_write_entry(p, l, r, wmode);
 
-	build_restore_pagemask(p, r, tmp, label_leave);
+	build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
 }
 
 /*
@@ -482,8 +585,12 @@
 		unsigned int pmd, int lid)
 {
 	UASM_i_LW(p, tmp, 0, pmd);
-	uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
-	uasm_il_bnez(p, r, tmp, lid);
+	if (use_bbit_insns()) {
+		uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
+	} else {
+		uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
+		uasm_il_bnez(p, r, tmp, lid);
+	}
 }
 
 static __cpuinit void build_huge_update_entries(u32 **p,
@@ -532,7 +639,7 @@
 	UASM_i_SW(p, pte, 0, ptr);
 #endif
 	build_huge_update_entries(p, pte, ptr);
-	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed);
+	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
 }
 #endif /* CONFIG_HUGETLB_PAGE */
 
@@ -573,13 +680,22 @@
 	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
 
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-	/*
-	 * &pgd << 11 stored in CONTEXT [23..63].
-	 */
-	UASM_i_MFC0(p, ptr, C0_CONTEXT);
-	uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */
-	uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0  1 0 1  << 6  xkphys cached */
-	uasm_i_drotr(p, ptr, ptr, 11);
+	if (pgd_reg != -1) {
+		/* pgd is in pgd_reg */
+		UASM_i_MFC0(p, ptr, 31, pgd_reg);
+	} else {
+		/*
+		 * &pgd << 11 stored in CONTEXT [23..63].
+		 */
+		UASM_i_MFC0(p, ptr, C0_CONTEXT);
+
+		/* Clear lower 23 bits of context. */
+		uasm_i_dins(p, ptr, 0, 0, 23);
+
+		/* 1 0  1 0 1  << 6  xkphys cached */
+		uasm_i_ori(p, ptr, ptr, 0x540);
+		uasm_i_drotr(p, ptr, ptr, 11);
+	}
 #elif defined(CONFIG_SMP)
 # ifdef  CONFIG_MIPS_MT_SMTC
 	/*
@@ -620,7 +736,6 @@
 #endif
 }
 
-enum vmalloc64_mode {not_refill, refill};
 /*
  * BVADDR is the faulting address, PTR is scratch.
  * PTR will hold the pgd for vmalloc.
@@ -638,7 +753,7 @@
 
 	uasm_l_vmalloc(l, *p);
 
-	if (mode == refill && check_for_high_segbits) {
+	if (mode != not_refill && check_for_high_segbits) {
 		if (single_insn_swpd) {
 			uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
 			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
@@ -661,7 +776,7 @@
 				uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
 		}
 	}
-	if (mode == refill && check_for_high_segbits) {
+	if (mode != not_refill && check_for_high_segbits) {
 		uasm_l_large_segbits_fault(l, *p);
 		/*
 		 * We get here if we are an xsseg address, or if we are
@@ -677,7 +792,15 @@
 		 */
 		UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
 		uasm_i_jr(p, ptr);
-		uasm_i_nop(p);
+
+		if (mode == refill_scratch) {
+			if (scratch_reg > 0)
+				UASM_i_MFC0(p, 1, 31, scratch_reg);
+			else
+				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
+		} else {
+			uasm_i_nop(p);
+		}
 	}
 }
 
@@ -834,6 +957,185 @@
 #endif
 }
 
+struct mips_huge_tlb_info {
+	int huge_pte;
+	int restore_scratch;
+};
+
+static struct mips_huge_tlb_info __cpuinit
+build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
+			       struct uasm_reloc **r, unsigned int tmp,
+			       unsigned int ptr, int c0_scratch)
+{
+	struct mips_huge_tlb_info rv;
+	unsigned int even, odd;
+	int vmalloc_branch_delay_filled = 0;
+	const int scratch = 1; /* Our extra working register */
+
+	rv.huge_pte = scratch;
+	rv.restore_scratch = 0;
+
+	if (check_for_high_segbits) {
+		UASM_i_MFC0(p, tmp, C0_BADVADDR);
+
+		if (pgd_reg != -1)
+			UASM_i_MFC0(p, ptr, 31, pgd_reg);
+		else
+			UASM_i_MFC0(p, ptr, C0_CONTEXT);
+
+		if (c0_scratch >= 0)
+			UASM_i_MTC0(p, scratch, 31, c0_scratch);
+		else
+			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
+
+		uasm_i_dsrl_safe(p, scratch, tmp,
+				 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+		uasm_il_bnez(p, r, scratch, label_vmalloc);
+
+		if (pgd_reg == -1) {
+			vmalloc_branch_delay_filled = 1;
+			/* Clear lower 23 bits of context. */
+			uasm_i_dins(p, ptr, 0, 0, 23);
+		}
+	} else {
+		if (pgd_reg != -1)
+			UASM_i_MFC0(p, ptr, 31, pgd_reg);
+		else
+			UASM_i_MFC0(p, ptr, C0_CONTEXT);
+
+		UASM_i_MFC0(p, tmp, C0_BADVADDR);
+
+		if (c0_scratch >= 0)
+			UASM_i_MTC0(p, scratch, 31, c0_scratch);
+		else
+			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
+
+		if (pgd_reg == -1)
+			/* Clear lower 23 bits of context. */
+			uasm_i_dins(p, ptr, 0, 0, 23);
+
+		uasm_il_bltz(p, r, tmp, label_vmalloc);
+	}
+
+	if (pgd_reg == -1) {
+		vmalloc_branch_delay_filled = 1;
+		/* 1 0  1 0 1  << 6  xkphys cached */
+		uasm_i_ori(p, ptr, ptr, 0x540);
+		uasm_i_drotr(p, ptr, ptr, 11);
+	}
+
+#ifdef __PAGETABLE_PMD_FOLDED
+#define LOC_PTEP scratch
+#else
+#define LOC_PTEP ptr
+#endif
+
+	if (!vmalloc_branch_delay_filled)
+		/* get pgd offset in bytes */
+		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
+
+	uasm_l_vmalloc_done(l, *p);
+
+	/*
+	 *                         tmp          ptr
+	 * fall-through case =   badvaddr  *pgd_current
+	 * vmalloc case      =   badvaddr  swapper_pg_dir
+	 */
+
+	if (vmalloc_branch_delay_filled)
+		/* get pgd offset in bytes */
+		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
+
+#ifdef __PAGETABLE_PMD_FOLDED
+	GET_CONTEXT(p, tmp); /* get context reg */
+#endif
+	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
+
+	if (use_lwx_insns()) {
+		UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
+	} else {
+		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
+		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
+	}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+	/* get pmd offset in bytes */
+	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
+	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
+	GET_CONTEXT(p, tmp); /* get context reg */
+
+	if (use_lwx_insns()) {
+		UASM_i_LWX(p, scratch, scratch, ptr);
+	} else {
+		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
+		UASM_i_LW(p, scratch, 0, ptr);
+	}
+#endif
+	/* Adjust the context during the load latency. */
+	build_adjust_context(p, tmp);
+
+#ifdef CONFIG_HUGETLB_PAGE
+	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
+	/*
+	 * The in the LWX case we don't want to do the load in the
+	 * delay slot.  It cannot issue in the same cycle and may be
+	 * speculative and unneeded.
+	 */
+	if (use_lwx_insns())
+		uasm_i_nop(p);
+#endif /* CONFIG_HUGETLB_PAGE */
+
+
+	/* build_update_entries */
+	if (use_lwx_insns()) {
+		even = ptr;
+		odd = tmp;
+		UASM_i_LWX(p, even, scratch, tmp);
+		UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
+		UASM_i_LWX(p, odd, scratch, tmp);
+	} else {
+		UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
+		even = tmp;
+		odd = ptr;
+		UASM_i_LW(p, even, 0, ptr); /* get even pte */
+		UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
+	}
+	if (kernel_uses_smartmips_rixi) {
+		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC));
+		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC));
+		uasm_i_drotr(p, even, even,
+			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
+		uasm_i_drotr(p, odd, odd,
+			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+	} else {
+		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
+		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
+		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
+	}
+	UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
+
+	if (c0_scratch >= 0) {
+		UASM_i_MFC0(p, scratch, 31, c0_scratch);
+		build_tlb_write_entry(p, l, r, tlb_random);
+		uasm_l_leave(l, *p);
+		rv.restore_scratch = 1;
+	} else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
+		build_tlb_write_entry(p, l, r, tlb_random);
+		uasm_l_leave(l, *p);
+		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
+	} else {
+		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
+		build_tlb_write_entry(p, l, r, tlb_random);
+		uasm_l_leave(l, *p);
+		rv.restore_scratch = 1;
+	}
+
+	uasm_i_eret(p); /* return from trap */
+
+	return rv;
+}
+
 /*
  * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
  * because EXL == 0.  If we wrap, we can also use the 32 instruction
@@ -849,54 +1151,67 @@
 	struct uasm_reloc *r = relocs;
 	u32 *f;
 	unsigned int final_len;
+	struct mips_huge_tlb_info htlb_info;
+	enum vmalloc64_mode vmalloc_mode;
 
 	memset(tlb_handler, 0, sizeof(tlb_handler));
 	memset(labels, 0, sizeof(labels));
 	memset(relocs, 0, sizeof(relocs));
 	memset(final_handler, 0, sizeof(final_handler));
 
-	/*
-	 * create the plain linear handler
-	 */
-	if (bcm1250_m3_war()) {
-		unsigned int segbits = 44;
+	if (scratch_reg == 0)
+		scratch_reg = allocate_kscratch();
 
-		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
-		uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
-		uasm_i_xor(&p, K0, K0, K1);
-		uasm_i_dsrl_safe(&p, K1, K0, 62);
-		uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
-		uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
-		uasm_i_or(&p, K0, K0, K1);
-		uasm_il_bnez(&p, &r, K0, label_leave);
-		/* No need for uasm_i_nop */
-	}
+	if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
+		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
+							  scratch_reg);
+		vmalloc_mode = refill_scratch;
+	} else {
+		htlb_info.huge_pte = K0;
+		htlb_info.restore_scratch = 0;
+		vmalloc_mode = refill_noscratch;
+		/*
+		 * create the plain linear handler
+		 */
+		if (bcm1250_m3_war()) {
+			unsigned int segbits = 44;
+
+			uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+			uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
+			uasm_i_xor(&p, K0, K0, K1);
+			uasm_i_dsrl_safe(&p, K1, K0, 62);
+			uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
+			uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
+			uasm_i_or(&p, K0, K0, K1);
+			uasm_il_bnez(&p, &r, K0, label_leave);
+			/* No need for uasm_i_nop */
+		}
 
 #ifdef CONFIG_64BIT
-	build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
+		build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
 #else
-	build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
+		build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
 #endif
 
 #ifdef CONFIG_HUGETLB_PAGE
-	build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
+		build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
 #endif
 
-	build_get_ptep(&p, K0, K1);
-	build_update_entries(&p, K0, K1);
-	build_tlb_write_entry(&p, &l, &r, tlb_random);
-	uasm_l_leave(&l, p);
-	uasm_i_eret(&p); /* return from trap */
-
+		build_get_ptep(&p, K0, K1);
+		build_update_entries(&p, K0, K1);
+		build_tlb_write_entry(&p, &l, &r, tlb_random);
+		uasm_l_leave(&l, p);
+		uasm_i_eret(&p); /* return from trap */
+	}
 #ifdef CONFIG_HUGETLB_PAGE
 	uasm_l_tlb_huge_update(&l, p);
-	UASM_i_LW(&p, K0, 0, K1);
-	build_huge_update_entries(&p, K0, K1);
-	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random);
+	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+				   htlb_info.restore_scratch);
 #endif
 
 #ifdef CONFIG_64BIT
-	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill);
+	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
 #endif
 
 	/*
@@ -1014,6 +1329,55 @@
 u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
+
+static void __cpuinit build_r4000_setup_pgd(void)
+{
+	const int a0 = 4;
+	const int a1 = 5;
+	u32 *p = tlbmiss_handler_setup_pgd;
+	struct uasm_label *l = labels;
+	struct uasm_reloc *r = relocs;
+
+	memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
+	memset(labels, 0, sizeof(labels));
+	memset(relocs, 0, sizeof(relocs));
+
+	pgd_reg = allocate_kscratch();
+
+	if (pgd_reg == -1) {
+		/* PGD << 11 in c0_Context */
+		/*
+		 * If it is a ckseg0 address, convert to a physical
+		 * address.  Shifting right by 29 and adding 4 will
+		 * result in zero for these addresses.
+		 *
+		 */
+		UASM_i_SRA(&p, a1, a0, 29);
+		UASM_i_ADDIU(&p, a1, a1, 4);
+		uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
+		uasm_i_nop(&p);
+		uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
+		uasm_l_tlbl_goaround1(&l, p);
+		UASM_i_SLL(&p, a0, a0, 11);
+		uasm_i_jr(&p, 31);
+		UASM_i_MTC0(&p, a0, C0_CONTEXT);
+	} else {
+		/* PGD in c0_KScratch */
+		uasm_i_jr(&p, 31);
+		UASM_i_MTC0(&p, a0, 31, pgd_reg);
+	}
+	if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
+		panic("tlbmiss_handler_setup_pgd space exceeded");
+	uasm_resolve_relocs(relocs, labels);
+	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
+		 (unsigned int)(p - tlbmiss_handler_setup_pgd));
+
+	dump_handler(tlbmiss_handler_setup_pgd,
+		     ARRAY_SIZE(tlbmiss_handler_setup_pgd));
+}
+#endif
 
 static void __cpuinit
 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
@@ -1100,14 +1464,20 @@
 		  unsigned int pte, unsigned int ptr, enum label_id lid)
 {
 	if (kernel_uses_smartmips_rixi) {
-		uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
-		uasm_il_beqz(p, r, pte, lid);
+		if (use_bbit_insns()) {
+			uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
+			uasm_i_nop(p);
+		} else {
+			uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
+			uasm_il_beqz(p, r, pte, lid);
+			iPTE_LW(p, pte, ptr);
+		}
 	} else {
 		uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
 		uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
 		uasm_il_bnez(p, r, pte, lid);
+		iPTE_LW(p, pte, ptr);
 	}
-	iPTE_LW(p, pte, ptr);
 }
 
 /* Make PTE valid, store result in PTR. */
@@ -1128,10 +1498,17 @@
 build_pte_writable(u32 **p, struct uasm_reloc **r,
 		   unsigned int pte, unsigned int ptr, enum label_id lid)
 {
-	uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
-	uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
-	uasm_il_bnez(p, r, pte, lid);
-	iPTE_LW(p, pte, ptr);
+	if (use_bbit_insns()) {
+		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
+		uasm_i_nop(p);
+		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
+		uasm_i_nop(p);
+	} else {
+		uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
+		uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
+		uasm_il_bnez(p, r, pte, lid);
+		iPTE_LW(p, pte, ptr);
+	}
 }
 
 /* Make PTE writable, update software status bits as well, then store
@@ -1155,12 +1532,19 @@
 build_pte_modifiable(u32 **p, struct uasm_reloc **r,
 		     unsigned int pte, unsigned int ptr, enum label_id lid)
 {
-	uasm_i_andi(p, pte, pte, _PAGE_WRITE);
-	uasm_il_beqz(p, r, pte, lid);
-	iPTE_LW(p, pte, ptr);
+	if (use_bbit_insns()) {
+		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
+		uasm_i_nop(p);
+	} else {
+		uasm_i_andi(p, pte, pte, _PAGE_WRITE);
+		uasm_il_beqz(p, r, pte, lid);
+		iPTE_LW(p, pte, ptr);
+	}
 }
 
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+
+
 /*
  * R3000 style TLB load/store/modify handlers.
  */
@@ -1402,14 +1786,23 @@
 		 * If the page is not _PAGE_VALID, RI or XI could not
 		 * have triggered it.  Skip the expensive test..
 		 */
-		uasm_i_andi(&p, K0, K0, _PAGE_VALID);
-		uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
+		if (use_bbit_insns()) {
+			uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID),
+				      label_tlbl_goaround1);
+		} else {
+			uasm_i_andi(&p, K0, K0, _PAGE_VALID);
+			uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
+		}
 		uasm_i_nop(&p);
 
 		uasm_i_tlbr(&p);
 		/* Examine  entrylo 0 or 1 based on ptr. */
-		uasm_i_andi(&p, K0, K1, sizeof(pte_t));
-		uasm_i_beqz(&p, K0, 8);
+		if (use_bbit_insns()) {
+			uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8);
+		} else {
+			uasm_i_andi(&p, K0, K1, sizeof(pte_t));
+			uasm_i_beqz(&p, K0, 8);
+		}
 
 		UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
 		UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
@@ -1417,12 +1810,18 @@
 		 * If the entryLo (now in K0) is valid (bit 1), RI or
 		 * XI must have triggered it.
 		 */
-		uasm_i_andi(&p, K0, K0, 2);
-		uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
-
-		uasm_l_tlbl_goaround1(&l, p);
-		/* Reload the PTE value */
-		iPTE_LW(&p, K0, K1);
+		if (use_bbit_insns()) {
+			uasm_il_bbit1(&p, &r, K0, 1, label_nopage_tlbl);
+			/* Reload the PTE value */
+			iPTE_LW(&p, K0, K1);
+			uasm_l_tlbl_goaround1(&l, p);
+		} else {
+			uasm_i_andi(&p, K0, K0, 2);
+			uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
+			uasm_l_tlbl_goaround1(&l, p);
+			/* Reload the PTE value */
+			iPTE_LW(&p, K0, K1);
+		}
 	}
 	build_make_valid(&p, &r, K0, K1);
 	build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
@@ -1442,23 +1841,35 @@
 		 * If the page is not _PAGE_VALID, RI or XI could not
 		 * have triggered it.  Skip the expensive test..
 		 */
-		uasm_i_andi(&p, K0, K0, _PAGE_VALID);
-		uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+		if (use_bbit_insns()) {
+			uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID),
+				      label_tlbl_goaround2);
+		} else {
+			uasm_i_andi(&p, K0, K0, _PAGE_VALID);
+			uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+		}
 		uasm_i_nop(&p);
 
 		uasm_i_tlbr(&p);
 		/* Examine  entrylo 0 or 1 based on ptr. */
-		uasm_i_andi(&p, K0, K1, sizeof(pte_t));
-		uasm_i_beqz(&p, K0, 8);
-
+		if (use_bbit_insns()) {
+			uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8);
+		} else {
+			uasm_i_andi(&p, K0, K1, sizeof(pte_t));
+			uasm_i_beqz(&p, K0, 8);
+		}
 		UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
 		UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
 		/*
 		 * If the entryLo (now in K0) is valid (bit 1), RI or
 		 * XI must have triggered it.
 		 */
-		uasm_i_andi(&p, K0, K0, 2);
-		uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+		if (use_bbit_insns()) {
+			uasm_il_bbit0(&p, &r, K0, 1, label_tlbl_goaround2);
+		} else {
+			uasm_i_andi(&p, K0, K0, 2);
+			uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+		}
 		/* Reload the PTE value */
 		iPTE_LW(&p, K0, K1);
 
@@ -1466,7 +1877,7 @@
 		 * We clobbered C0_PAGEMASK, restore it.  On the other branch
 		 * it is restored in build_huge_tlb_write_entry.
 		 */
-		build_restore_pagemask(&p, &r, K0, label_nopage_tlbl);
+		build_restore_pagemask(&p, &r, K0, label_nopage_tlbl, 0);
 
 		uasm_l_tlbl_goaround2(&l, p);
 	}
@@ -1623,13 +2034,16 @@
 		break;
 
 	default:
-		build_r4000_tlb_refill_handler();
 		if (!run_once) {
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+			build_r4000_setup_pgd();
+#endif
 			build_r4000_tlb_load_handler();
 			build_r4000_tlb_store_handler();
 			build_r4000_tlb_modify_handler();
 			run_once++;
 		}
+		build_r4000_tlb_refill_handler();
 	}
 }
 
@@ -1641,4 +2055,8 @@
 			   (unsigned long)handle_tlbs + sizeof(handle_tlbs));
 	local_flush_icache_range((unsigned long)handle_tlbm,
 			   (unsigned long)handle_tlbm + sizeof(handle_tlbm));
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
+			   (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
+#endif
 }
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 23afdeb..5fa1851 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -68,7 +68,8 @@
 	insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
 	insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp,
 	insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
-	insn_dins, insn_syscall, insn_bbit0, insn_bbit1
+	insn_dins, insn_dinsm, insn_syscall, insn_bbit0, insn_bbit1,
+	insn_lwx, insn_ldx
 };
 
 struct insn {
@@ -142,9 +143,12 @@
 	{ insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
 	{ insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
 	{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
+	{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
 	{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
 	{ insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
 	{ insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+	{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
+	{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
 	{ insn_invalid, 0, 0 }
 };
 
@@ -152,91 +156,83 @@
 
 static inline __uasminit u32 build_rs(u32 arg)
 {
-	if (arg & ~RS_MASK)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg & RS_MASK) << RS_SH;
 }
 
 static inline __uasminit u32 build_rt(u32 arg)
 {
-	if (arg & ~RT_MASK)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg & RT_MASK) << RT_SH;
 }
 
 static inline __uasminit u32 build_rd(u32 arg)
 {
-	if (arg & ~RD_MASK)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg & RD_MASK) << RD_SH;
 }
 
 static inline __uasminit u32 build_re(u32 arg)
 {
-	if (arg & ~RE_MASK)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg & RE_MASK) << RE_SH;
 }
 
 static inline __uasminit u32 build_simm(s32 arg)
 {
-	if (arg > 0x7fff || arg < -0x8000)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg > 0x7fff || arg < -0x8000,
+	     KERN_WARNING "Micro-assembler field overflow\n");
 
 	return arg & 0xffff;
 }
 
 static inline __uasminit u32 build_uimm(u32 arg)
 {
-	if (arg & ~IMM_MASK)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return arg & IMM_MASK;
 }
 
 static inline __uasminit u32 build_bimm(s32 arg)
 {
-	if (arg > 0x1ffff || arg < -0x20000)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg > 0x1ffff || arg < -0x20000,
+	     KERN_WARNING "Micro-assembler field overflow\n");
 
-	if (arg & 0x3)
-		printk(KERN_WARNING "Invalid micro-assembler branch target\n");
+	WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
 
 	return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
 }
 
 static inline __uasminit u32 build_jimm(u32 arg)
 {
-	if (arg & ~((JIMM_MASK) << 2))
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg & ~(JIMM_MASK << 2),
+	     KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg >> 2) & JIMM_MASK;
 }
 
 static inline __uasminit u32 build_scimm(u32 arg)
 {
-	if (arg & ~SCIMM_MASK)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg & ~SCIMM_MASK,
+	     KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg & SCIMM_MASK) << SCIMM_SH;
 }
 
 static inline __uasminit u32 build_func(u32 arg)
 {
-	if (arg & ~FUNC_MASK)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return arg & FUNC_MASK;
 }
 
 static inline __uasminit u32 build_set(u32 arg)
 {
-	if (arg & ~SET_MASK)
-		printk(KERN_WARNING "Micro-assembler field overflow\n");
+	WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return arg & SET_MASK;
 }
@@ -340,6 +336,13 @@
 }							\
 UASM_EXPORT_SYMBOL(uasm_i##op);
 
+#define I_u2u1msb32u3(op)				\
+Ip_u2u1msbu3(op)					\
+{							\
+	build_insn(buf, insn##op, b, a, c+d-33, c);	\
+}							\
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
 #define I_u1u2(op)					\
 Ip_u1u2(op)						\
 {							\
@@ -422,9 +425,12 @@
 I_u3u1u2(_xor)
 I_u2u1u3(_xori)
 I_u2u1msbu3(_dins);
+I_u2u1msb32u3(_dinsm);
 I_u1(_syscall);
 I_u1u2s3(_bbit0);
 I_u1u2s3(_bbit1);
+I_u3u1u2(_lwx)
+I_u3u1u2(_ldx)
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #include <asm/octeon/octeon.h>
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index b27419c..a96d281 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -43,7 +43,7 @@
 static char *mtypes[3] = {
 	"Dont use memory",
 	"YAMON PROM memory",
-	"Free memmory",
+	"Free memory",
 };
 #endif
 
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index b7c03d8..68798f8 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -308,7 +308,7 @@
  *  RETURNS:     PCIBIOS_SUCCESSFUL  - success
  *
  ****************************************************************************/
-static int bpci_interrupt(int irq, void *dev_id)
+static irqreturn_t bpci_interrupt(int irq, void *dev_id)
 {
 	struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
 	unsigned int stat = preg->if_status;
@@ -326,7 +326,7 @@
 	/* write to clear all asserted interrupts */
 	preg->if_status = stat;
 
-	return PCIBIOS_SUCCESSFUL;
+	return IRQ_HANDLED;
 }
 
 /*****************************************************************************
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 385f035..0583c463 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -900,7 +900,7 @@
 	mem_access_subid.s.ror = 0;
 	/* Disable Relaxed Ordering for Writes. */
 	mem_access_subid.s.row = 0;
-	/* PCIe Adddress Bits <63:34>. */
+	/* PCIe Address Bits <63:34>. */
 	mem_access_subid.s.ba = 0;
 
 	/*
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig
index c139988..8d79849 100644
--- a/arch/mips/pmc-sierra/Kconfig
+++ b/arch/mips/pmc-sierra/Kconfig
@@ -4,15 +4,11 @@
 
 config PMC_MSP4200_EVAL
 	bool "PMC-Sierra MSP4200 Eval Board"
-	select CEVT_R4K
-	select CSRC_R4K
 	select IRQ_MSP_SLP
 	select HW_HAS_PCI
 
 config PMC_MSP4200_GW
 	bool "PMC-Sierra MSP4200 VoIP Gateway"
-	select CEVT_R4K
-	select CSRC_R4K
 	select IRQ_MSP_SLP
 	select HW_HAS_PCI
 
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c
index cca64e1..01df84c 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_time.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c
@@ -81,7 +81,7 @@
 	mips_hpt_frequency = cpu_rate/2;
 }
 
-unsigned int __init get_c0_compare_int(void)
+unsigned int __cpuinit get_c0_compare_int(void)
 {
 	return MSP_INT_VPE0_TIMER;
 }
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
index 73880ad..fb3d296 100644
--- a/arch/mips/powertv/memory.c
+++ b/arch/mips/powertv/memory.c
@@ -57,7 +57,7 @@
 unsigned long ptv_memsize;
 
 /*
- * struct low_mem_reserved - Items in low memmory that are reserved
+ * struct low_mem_reserved - Items in low memory that are reserved
  * @start:	Physical address of item
  * @size:	Size, in bytes, of this item
  * @is_aliased:	True if this is RAM aliased from another location. If false,
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index 87ccdb4..48853ab 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -410,14 +410,13 @@
 		return -EBUSY;
 
 	memset(&sbp, 0, sizeof(struct sbprof_tb));
-	sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
+	sbp.sbprof_tbbuf = vzalloc(MAX_TBSAMPLE_BYTES);
 	if (!sbp.sbprof_tbbuf) {
 		sbp.open = SB_CLOSED;
 		wmb();
 		return -ENOMEM;
 	}
 
-	memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
 	init_waitqueue_head(&sbp.tb_sync);
 	init_waitqueue_head(&sbp.tb_read);
 	mutex_init(&sbp.lock);
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index 9a0be81..85a87de 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -107,7 +107,7 @@
 
 /*
  * allocate pci_controller and resources.
- * mem_base, io_base: physical addresss.  0 for auto assignment.
+ * mem_base, io_base: physical address.  0 for auto assignment.
  * mem_size and io_size means max size on auto assignment.
  * pcic must be &txx9_primary_pcic or NULL.
  */
@@ -213,11 +213,8 @@
 
 	pcic->mem_offset = 0;	/* busaddr == physaddr */
 
-	printk(KERN_INFO "PCI: IO 0x%08llx-0x%08llx MEM 0x%08llx-0x%08llx\n",
-	       (unsigned long long)pcic->mem_resource[1].start,
-	       (unsigned long long)pcic->mem_resource[1].end,
-	       (unsigned long long)pcic->mem_resource[0].start,
-	       (unsigned long long)pcic->mem_resource[0].end);
+	printk(KERN_INFO "PCI: IO %pR MEM %pR\n",
+	       &pcic->mem_resource[1], &pcic->mem_resource[0]);
 
 	/* register_pci_controller() will request MEM resource */
 	release_resource(&pcic->mem_resource[0]);
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 41ba385..243bfa2 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -1,6 +1,7 @@
 config MN10300
 	def_bool y
 	select HAVE_OPROFILE
+	select GENERIC_HARDIRQS
 
 config AM33_2
 	def_bool n
@@ -34,9 +35,6 @@
 config RWSEM_XCHGADD_ALGORITHM
 	bool
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config GENERIC_CALIBRATE_DELAY
 	def_bool y
 
@@ -79,10 +77,6 @@
 config ARCH_HAS_ILOG2_U32
 	def_bool y
 
-# Use the generic interrupt handling code in kernel/irq/
-config GENERIC_HARDIRQS
-	def_bool y
-
 config HOTPLUG_CPU
 	def_bool n
 
@@ -203,6 +197,7 @@
 config SMP
 	bool "Symmetric multi-processing support"
 	default y
+	select USE_GENERIC_SMP_HELPERS
 	depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
 	---help---
 	  This enables support for systems with more than one CPU. If you have
@@ -226,11 +221,6 @@
 	depends on SMP
 	default "2"
 
-config USE_GENERIC_SMP_HELPERS
-	bool
-	depends on SMP
-	default y
-
 source "kernel/Kconfig.preempt"
 
 config MN10300_CURRENT_IN_E2
diff --git a/arch/mn10300/configs/asb2303_defconfig b/arch/mn10300/configs/asb2303_defconfig
index 3f749b6..1fd41ec 100644
--- a/arch/mn10300/configs/asb2303_defconfig
+++ b/arch/mn10300/configs/asb2303_defconfig
@@ -4,7 +4,7 @@
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
index 83ce2f2..31d7626 100644
--- a/arch/mn10300/configs/asb2364_defconfig
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -15,7 +15,7 @@
 CONFIG_RESOURCE_COUNTERS=y
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 CONFIG_SLAB=y
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 92d2f92..9d773a6 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -139,7 +139,7 @@
  * Atomically reads the value of @v.  Note that the guaranteed
  * useful range of an atomic_t is only 24 bits.
  */
-#define atomic_read(v)	((v)->counter)
+#define atomic_read(v)	(ACCESS_ONCE((v)->counter))
 
 /**
  * atomic_set - set atomic variable
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 679dee0..3d6e60d 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -160,9 +160,10 @@
 
 #define __get_user_check(x, ptr, size)					\
 ({									\
+	const __typeof__(ptr) __guc_ptr = (ptr);			\
 	int _e;								\
-	if (likely(__access_ok((unsigned long) (ptr), (size))))		\
-		_e = __get_user_nocheck((x), (ptr), (size));		\
+	if (likely(__access_ok((unsigned long) __guc_ptr, (size))))	\
+		_e = __get_user_nocheck((x), __guc_ptr, (size));	\
 	else {								\
 		_e = -EFAULT;						\
 		(x) = (__typeof__(x))0;					\
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index e9e20f9..48d7058 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -89,7 +89,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
index a8933a6..a6b63dd 100644
--- a/arch/mn10300/mm/cache-inv-icache.c
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -69,7 +69,7 @@
 
 	/* invalidate the icache coverage on that region */
 	mn10300_local_icache_inv_range2(addr + off, size);
-	smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+	smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
 }
 
 /**
@@ -101,7 +101,7 @@
 		 * directly */
 		start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
 		mn10300_icache_inv_range(start_page, end);
-		smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+		smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
 		if (start_page == start)
 			goto done;
 		end = start_page;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 0888675..fed2946 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -12,7 +12,10 @@
 	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
 	select GENERIC_ATOMIC64 if !64BIT
-	select GENERIC_HARDIRQS_NO__DO_IRQ
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select IRQ_PER_CPU
+
 	help
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
 	  in many of their workstations & servers (HP9000 700 and 800 series,
@@ -66,22 +69,9 @@
 	depends on SMP
 	default y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	def_bool y
-
 config HAVE_LATENCYTOP_SUPPORT
         def_bool y
 
-config IRQ_PER_CPU
-	bool
-	default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 # unless you want to implement ACPI on PA-RISC ... ;-)
 config PM
 	bool
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index f9305f3..b647b18 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -8,7 +8,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 628d3e0..311ca36 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
index 9749c8a..f5b7bf5 100644
--- a/arch/parisc/include/asm/mman.h
+++ b/arch/parisc/include/asm/mman.h
@@ -59,6 +59,9 @@
 #define MADV_MERGEABLE   65		/* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 66		/* KSM may not merge identical pages */
 
+#define MADV_HUGEPAGE	67		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	68		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 #define MAP_VARIABLE	0
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 865f37a..6f1f65d 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -10,11 +10,13 @@
  * we simulate an x86-style page table for the linux mm code
  */
 
-#include <linux/mm.h>		/* for vm_area_struct */
 #include <linux/bitops.h>
+#include <linux/spinlock.h>
 #include <asm/processor.h>
 #include <asm/cache.h>
 
+struct vm_area_struct;
+
 /*
  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
  * memory.  For the return value to be meaningful, ADDR must be >=
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index df971fa..4896ed0 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -1126,15 +1126,13 @@
 	unsigned int i;
 	unsigned long flags;
 
-	for (i = 0; i < count && i < 79;) {
+	for (i = 0; i < count;) {
 		switch(str[i]) {
 		case '\n':
 			iodc_dbuf[i+0] = '\r';
 			iodc_dbuf[i+1] = '\n';
 			i += 2;
 			goto print;
-		case '\b':	/* BS */
-			i--; /* overwrite last */
 		default:
 			iodc_dbuf[i] = str[i];
 			i++;
@@ -1142,15 +1140,6 @@
 		}
 	}
 
-	/* if we're at the end of line, and not already inserting a newline,
-	 * insert one anyway. iodc console doesn't claim to support >79 char
-	 * lines. don't account for this in the return value.
-	 */
-	if (i == 79 && iodc_dbuf[i-1] != '\n') {
-		iodc_dbuf[i+0] = '\r';
-		iodc_dbuf[i+1] = '\n';
-	}
-
 print:
         spin_lock_irqsave(&pdc_lock, flags);
         real32_call(PAGE0->mem_cons.iodc_io,
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 11bdd68..fc770be 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -169,11 +169,11 @@
 
 	struct console *tmp;
 
-	acquire_console_sem();
+	console_lock();
 	for_each_console(tmp)
 		if (tmp == &pdc_cons)
 			break;
-	release_console_sem();
+	console_unlock();
 
 	if (!tmp) {
 		printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 48fb479..7d69e9b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -20,6 +20,9 @@
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool PPC64 || PHYS_64BIT
 
+config ARCH_DMA_ADDR_T_64BIT
+	def_bool ARCH_PHYS_ADDR_T_64BIT
+
 config MMU
 	bool
 	default y
@@ -33,24 +36,12 @@
 config GENERIC_CLOCKEVENTS
 	def_bool y
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	bool
-	default y
-
 config HAVE_SETUP_PER_CPU_AREA
 	def_bool PPC64
 
 config NEED_PER_CPU_EMBED_FIRST_CHUNK
 	def_bool PPC64
 
-config IRQ_PER_CPU
-	bool
-	default y
-
 config NR_IRQS
 	int "Number of virtual interrupt numbers"
 	range 32 32768
@@ -140,6 +131,9 @@
 	select HAVE_PERF_EVENTS
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
+	select HAVE_GENERIC_HARDIRQS
+	select HAVE_SPARSE_IRQ
+	select IRQ_PER_CPU
 
 config EARLY_PRINTK
 	bool
@@ -209,7 +203,7 @@
 config ARCH_SUSPEND_POSSIBLE
 	def_bool y
 	depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
-		   PPC_85xx || PPC_86xx || PPC_PSERIES
+		   PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
 
 config PPC_DCR_NATIVE
 	bool
@@ -389,19 +383,6 @@
 	  CPU.  Generally saying Y is safe, although some problems have been
 	  reported with SMP Power Macintoshes with this option enabled.
 
-config SPARSE_IRQ
-	bool "Support sparse irq numbering"
-	default n
-	help
-	  This enables support for sparse irqs. This is useful for distro
-	  kernels that want to define a high CONFIG_NR_CPUS value but still
-	  want to have low kernel memory footprint on smaller machines.
-
-	  ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
-	    out the irq_desc[] array in a more NUMA-friendly way. )
-
-	  If you don't know what to do here, say N.
-
 config NUMA
 	bool "NUMA support"
 	depends on PPC64
@@ -595,13 +576,11 @@
 
 	  If unsure, leave blank
 
-if !44x || BROKEN
 config ARCH_WANTS_FREEZER_CONTROL
 	def_bool y
 	depends on ADB_PMU
 
 source kernel/power/Kconfig
-endif
 
 config SECCOMP
 	bool "Enable seccomp to safely compute untrusted bytecode"
@@ -682,6 +661,15 @@
 	  Freescale MPC85xx/MPC86xx power management controller support
 	  (suspend/resume). For MPC83xx see platforms/83xx/suspend.c
 
+config PPC4xx_CPM
+	bool
+	default y
+	depends on SUSPEND && (44x || 40x)
+	help
+	  PPC4xx Clock Power Management (CPM) support (suspend/resume).
+	  It also enables support for two different idle states (idle-wait
+	  and idle-doze).
+
 config 4xx_SOC
 	bool
 
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 96deec6..8917816 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -368,7 +368,7 @@
 extra-installed		:= $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y))
 hostprogs-installed	:= $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs-y))
 wrapper-installed	:= $(DESTDIR)$(WRAPPER_BINDIR)/wrapper
-dts-installed		:= $(patsubst $(obj)/dts/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(obj)/dts/*.dts))
+dts-installed		:= $(patsubst $(dtstree)/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(dtstree)/*.dts))
 
 all-installed		:= $(extra-installed) $(hostprogs-installed) $(wrapper-installed) $(dts-installed)
 
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index a303703..5b27a4b 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -105,6 +105,15 @@
 		dcr-reg = <0x00c 0x002>;
 	};
 
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x160 0x003>;
+		unused-units = <0x00000100>;
+		idle-doze = <0x02000000>;
+		standby = <0xfeff791d>;
+	};
+
 	L2C0: l2c {
 		compatible = "ibm,l2-cache-460ex", "ibm,l2-cache";
 		dcr-reg = <0x020 0x008		/* Internal SRAM DCR's */
@@ -270,28 +279,6 @@
 				interrupts = <0x1 0x4>;
 			};
 
-			UART2: serial@ef600500 {
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0xef600500 0x00000008>;
-				virtual-reg = <0xef600500>;
-				clock-frequency = <0>; /* Filled in by U-Boot */
-				current-speed = <0>; /* Filled in by U-Boot */
-				interrupt-parent = <&UIC1>;
-				interrupts = <28 0x4>;
-			};
-
-			UART3: serial@ef600600 {
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0xef600600 0x00000008>;
-				virtual-reg = <0xef600600>;
-				clock-frequency = <0>; /* Filled in by U-Boot */
-				current-speed = <0>; /* Filled in by U-Boot */
-				interrupt-parent = <&UIC1>;
-				interrupts = <29 0x4>;
-			};
-
 			IIC0: i2c@ef600700 {
 				compatible = "ibm,iic-460ex", "ibm,iic";
 				reg = <0xef600700 0x00000014>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 083e68e..89edb16 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -82,6 +82,15 @@
 		interrupt-parent = <&UIC0>;
 	};
 
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x0b0 0x003>;
+		unused-units = <0x00000000>;
+		idle-doze = <0x02000000>;
+		standby = <0xe3e74800>;
+	};
+
 	plb {
 		compatible = "ibm,plb-405ex", "ibm,plb4";
 		#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts
index 05a76cc..697b3f6 100644
--- a/arch/powerpc/boot/dts/mpc8308_p1m.dts
+++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts
@@ -297,6 +297,14 @@
 			interrupt-parent = < &ipic >;
 		};
 
+		dma@2c000 {
+			compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
+			reg = <0x2c000 0x1800>;
+			interrupts = <3 0x8
+					94 0x8>;
+			interrupt-parent = < &ipic >;
+		};
+
 	};
 
 	pci0: pcie@e0009000 {
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index a97eb2d..a0bd188 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -109,7 +109,7 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 		device_type = "soc";
-		compatible = "fsl,mpc8315-immr", "simple-bus";
+		compatible = "fsl,mpc8308-immr", "simple-bus";
 		ranges = <0 0xe0000000 0x00100000>;
 		reg = <0xe0000000 0x00000200>;
 		bus-frequency = <0>;
@@ -265,6 +265,14 @@
 			interrupt-parent = < &ipic >;
 		};
 
+		dma@2c000 {
+			compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
+			reg = <0x2c000 0x1800>;
+			interrupts = <3 0x8
+					94 0x8>;
+			interrupt-parent = < &ipic >;
+		};
+
 	};
 
 	pci0: pcie@e0009000 {
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 2bbecbb..69422eb 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -291,13 +291,13 @@
 			ranges = <0x0 0xc100 0x200>;
 			cell-index = <1>;
 			dma00: dma-channel@0 {
-				compatible = "fsl,eloplus-dma-channel";
+				compatible = "fsl,ssi-dma-channel";
 				reg = <0x0 0x80>;
 				cell-index = <0>;
 				interrupts = <76 2>;
 			};
 			dma01: dma-channel@80 {
-				compatible = "fsl,eloplus-dma-channel";
+				compatible = "fsl,ssi-dma-channel";
 				reg = <0x80 0x80>;
 				cell-index = <1>;
 				interrupts = <77 2>;
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index 97fedce..4182c77 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index 33b3c24..2dbb293 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/hcu4_defconfig b/arch/powerpc/configs/40x/hcu4_defconfig
index 4613079..ebeb4ac 100644
--- a/arch/powerpc/configs/40x/hcu4_defconfig
+++ b/arch/powerpc/configs/40x/hcu4_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 4e19ee7..532ea9d 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -5,13 +5,15 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_KILAUEA=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 # CONFIG_WALNUT is not set
 CONFIG_SPARSE_IRQ=y
 CONFIG_PCI=y
@@ -42,6 +44,9 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_NDFC=y
 CONFIG_PROC_DEVICETREE=y
+CONFIG_PM=y
+CONFIG_SUSPEND=y
+CONFIG_PPC4xx_CPM=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=35000
 # CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index 651be09..3c142ac 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index ded455e..ff57d48 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 63746a0..3ed16d5 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index f5f2a4e..b1b7d2c 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig
index ac65b48..30a0a8e 100644
--- a/arch/powerpc/configs/44x/bluestone_defconfig
+++ b/arch/powerpc/configs/44x/bluestone_defconfig
@@ -4,7 +4,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_PCI_QUIRKS is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 45c64d8..a46942a 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -42,6 +42,9 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_NDFC=y
 CONFIG_PROC_DEVICETREE=y
+CONFIG_PM=y
+CONFIG_SUSPEND=y
+CONFIG_PPC4xx_CPM=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=35000
 # CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index fedd03f..07d77e5 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index ebff701..2ce7e9a 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 865e93f..18730ff 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -6,7 +6,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 8ece4c7..92f863a 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -7,7 +7,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_PROFILING=y
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index 4ca9b48..34c0914 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index e3b65d2..21c33fa 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index 64cd0f3..01cc2b1 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 01d0336..dfcffed 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index 89b2f96..47e399f 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index e3386cf..a6a002e 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 9c13b9d..6cf9d66 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -8,7 +8,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index f234c4d..69b57da 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index a4a795c..f3638ae 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 20d53a1..6828eda 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 6bd5833..7f7e4a8 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -8,7 +8,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 CONFIG_SLAB=y
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 3a1f702..959cd2c 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig
index eed42d8..d2762d9 100644
--- a/arch/powerpc/configs/83xx/asp8347_defconfig
+++ b/arch/powerpc/configs/83xx/asp8347_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index e43ecb2..7a7b731 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index c2e6ab5..c683bce 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index 1d3b200..a721cd3 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
index 91fe73b..a5699a1 100644
--- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index 6d300f2..b4da1a7 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index b236a67..291f822 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index 001dead..f8b228a 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
index 9dccefca..99660c0 100644
--- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
index d4b165d..10b5c4c 100644
--- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
index 89ba672..45925d7 100644
--- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
index 2ea6b40..f367985 100644
--- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index bffe3c7..414eda3 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig
index fa5c9ee..6d6463f 100644
--- a/arch/powerpc/configs/83xx/sbc834x_defconfig
+++ b/arch/powerpc/configs/83xx/sbc834x_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig
index 385b1af..8f7c106 100644
--- a/arch/powerpc/configs/85xx/ksi8560_defconfig
+++ b/arch/powerpc/configs/85xx/ksi8560_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_KSI8560=y
 CONFIG_CPM2=y
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index 222b704..55e0725 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_MPC8540_ADS=y
 CONFIG_NO_HZ=y
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index 619702d..d724095 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_MPC8560_ADS=y
 CONFIG_BINFMT_MISC=y
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index 6bf56e8..4b44bea 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_MPC85xx_CDS=y
 CONFIG_NO_HZ=y
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
index a9a17d0..5b2b651 100644
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8548_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_SBC8548=y
diff --git a/arch/powerpc/configs/85xx/sbc8560_defconfig b/arch/powerpc/configs/85xx/sbc8560_defconfig
index 820e32d..f7fdb03 100644
--- a/arch/powerpc/configs/85xx/sbc8560_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8560_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_SBC8560=y
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig
index b6db3f4..77506b5 100644
--- a/arch/powerpc/configs/85xx/socrates_defconfig
+++ b/arch/powerpc/configs/85xx/socrates_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index 333a41bd2..5d4db15 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODVERSIONS=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig
index 33db352..ddcb9f3 100644
--- a/arch/powerpc/configs/85xx/tqm8540_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8540_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig
index f0c20df..981abd6 100644
--- a/arch/powerpc/configs/85xx/tqm8541_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8541_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index a883450..37b3d72 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig
index ff95f90..3593b32 100644
--- a/arch/powerpc/configs/85xx/tqm8555_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8555_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig
index 8d6c90e..de413ac 100644
--- a/arch/powerpc/configs/85xx/tqm8560_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8560_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index f53efe4..5ea3124 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -11,7 +11,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index 432ebc2..4b24412 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index ce5e919..a360ba4 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 589e71e..be2829d 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
index 321fb47..036bfb2 100644
--- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_ELF_CORE is not set
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
index b5e4639..0c9c7ed 100644
--- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig
index 71145c3..0a92ca0 100644
--- a/arch/powerpc/configs/86xx/sbc8641d_defconfig
+++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index ca84c7f..6912874 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/e55xx_smp_defconfig
index 94d120e..06f9549 100644
--- a/arch/powerpc/configs/e55xx_smp_defconfig
+++ b/arch/powerpc/configs/e55xx_smp_defconfig
@@ -12,7 +12,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 2677b08..fceffb3c 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -2,7 +2,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 # CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index f9a3112..219fd47 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index fcf0a39..e74d3a4 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_ELF_CORE is not set
 CONFIG_PERF_COUNTERS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index b9b63a6..94ebfee 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_PPC_CHRP is not set
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index c4ed255..39518e9 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -3,7 +3,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 # CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/powerpc/configs/mgsuvd_defconfig b/arch/powerpc/configs/mgsuvd_defconfig
index f276c7c..2a49062 100644
--- a/arch/powerpc/configs/mgsuvd_defconfig
+++ b/arch/powerpc/configs/mgsuvd_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BUG is not set
diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig
index 3b94708..75f0bbf 100644
--- a/arch/powerpc/configs/mpc7448_hpc2_defconfig
+++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index c7d68ff..6a22400 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -2,7 +2,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 5b1b10f..5aac9a8 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 3aeb594..99a19d1 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index d62c801..c636f23 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -12,7 +12,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 668215c..5c25882 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BUG is not set
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig
index 63b90d4..55b5431 100644
--- a/arch/powerpc/configs/mpc86xx_defconfig
+++ b/arch/powerpc/configs/mpc86xx_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index f9b8348..9e146cd 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index 93d7425..bfd634b 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 2fa05f7..4713320 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index a4353be..baad8db 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -3,7 +3,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 49cffe0..caba919 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -8,7 +8,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_PERF_EVENTS is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index f87f0e1..9c3f22c 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -2,7 +2,7 @@
 CONFIG_ALTIVEC=y
 CONFIG_VSX=y
 CONFIG_SMP=y
-CONFIG_NR_CPUS=128
+CONFIG_NR_CPUS=1024
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -45,6 +45,8 @@
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_PPC_64K_PAGES=y
+CONFIG_PPC_SUBPAGE_PROT=y
 CONFIG_SCHED_SMT=y
 CONFIG_HOTPLUG_PCI=m
 CONFIG_HOTPLUG_PCI_RPA=m
@@ -184,6 +186,7 @@
 CONFIG_E1000=y
 CONFIG_E1000E=y
 CONFIG_TIGON3=y
+CONFIG_BNX2=m
 CONFIG_CHELSIO_T1=m
 CONFIG_CHELSIO_T3=m
 CONFIG_EHEA=y
@@ -311,9 +314,7 @@
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_LATENCYTOP=y
 CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQSOFF_TRACER=y
 CONFIG_SCHED_TRACER=y
-CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
 CONFIG_DEBUG_STACK_USAGE=y
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index 4f0c10a..ebb2a66 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index d0a5b67..8616fde 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index bb8ba75..175295f 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -7,7 +7,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_ELF_CORE is not set
 CONFIG_PERF_COUNTERS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h
index 4b0e152..6b6dc20 100644
--- a/arch/powerpc/include/asm/8xx_immap.h
+++ b/arch/powerpc/include/asm/8xx_immap.h
@@ -93,7 +93,7 @@
 } memctl8xx_t;
 
 /*-----------------------------------------------------------------------
- * BR - Memory Controler: Base Register					16-9
+ * BR - Memory Controller: Base Register					16-9
  */
 #define BR_BA_MSK	0xffff8000	/* Base Address Mask			*/
 #define BR_AT_MSK	0x00007000	/* Address Type Mask			*/
@@ -110,7 +110,7 @@
 #define BR_V		0x00000001	/* Bank Valid				*/
 
 /*-----------------------------------------------------------------------
- * OR - Memory Controler: Option Register				16-11
+ * OR - Memory Controller: Option Register				16-11
  */
 #define OR_AM_MSK	0xffff8000	/* Address Mask Mask			*/
 #define OR_ATM_MSK	0x00007000	/* Address Type Mask Mask		*/
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 30964ae..8a7e9314 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -267,7 +267,16 @@
 #include <asm-generic/bitops/fls64.h>
 #endif /* __powerpc64__ */
 
+#ifdef CONFIG_PPC64
+unsigned int __arch_hweight8(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+#include <asm-generic/bitops/const_hweight.h>
+#else
 #include <asm-generic/bitops/hweight.h>
+#endif
+
 #include <asm-generic/bitops/find.h>
 
 /* Little-endian versions */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f3a1fdd..f0a211d 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -199,6 +199,8 @@
 #define CPU_FTR_UNALIGNED_LD_STD	LONG_ASM_CONST(0x0080000000000000)
 #define CPU_FTR_ASYM_SMT		LONG_ASM_CONST(0x0100000000000000)
 #define CPU_FTR_STCX_CHECKS_ADDRESS	LONG_ASM_CONST(0x0200000000000000)
+#define CPU_FTR_POPCNTB			LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_POPCNTD			LONG_ASM_CONST(0x0800000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -403,21 +405,22 @@
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
-	    CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS)
+	    CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS | \
+	    CPU_FTR_POPCNTB)
 #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
-	    CPU_FTR_STCX_CHECKS_ADDRESS)
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
 #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_DSCR | CPU_FTR_SAO  | CPU_FTR_ASYM_SMT | \
-	    CPU_FTR_STCX_CHECKS_ADDRESS)
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
 #define CPU_FTRS_CELL	(CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index a8e1844..f71bb4c 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -61,22 +61,25 @@
 	return cpu_thread_mask_to_cores(cpu_online_map);
 }
 
-static inline int cpu_thread_to_core(int cpu)
-{
-	return cpu >> threads_shift;
-}
+#ifdef CONFIG_SMP
+int cpu_core_index_of_thread(int cpu);
+int cpu_first_thread_of_core(int core);
+#else
+static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
+static inline int cpu_first_thread_of_core(int core) { return core; }
+#endif
 
 static inline int cpu_thread_in_core(int cpu)
 {
 	return cpu & (threads_per_core - 1);
 }
 
-static inline int cpu_first_thread_in_core(int cpu)
+static inline int cpu_first_thread_sibling(int cpu)
 {
 	return cpu & ~(threads_per_core - 1);
 }
 
-static inline int cpu_last_thread_in_core(int cpu)
+static inline int cpu_last_thread_sibling(int cpu)
 {
 	return cpu | (threads_per_core - 1);
 }
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index a3954e4..16d25c0 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -9,6 +9,12 @@
 struct dma_map_ops;
 struct device_node;
 
+/*
+ * Arch extensions to struct device.
+ *
+ * When adding fields, consider macio_add_one_device in
+ * drivers/macintosh/macio_asic.c
+ */
 struct dev_archdata {
 	/* DMA operations on that device */
 	struct dma_map_ops	*dma_ops;
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 96a7d06..921a847 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -37,18 +37,21 @@
 	.align 2;					\
 label##3:
 
-#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)	\
-label##4:						\
-	.popsection;					\
-	.pushsection sect,"a";				\
-	.align 3;					\
-label##5:					       	\
-	FTR_ENTRY_LONG msk;				\
-	FTR_ENTRY_LONG val;				\
-	FTR_ENTRY_OFFSET label##1b-label##5b;		\
-	FTR_ENTRY_OFFSET label##2b-label##5b;	 	\
-	FTR_ENTRY_OFFSET label##3b-label##5b;		\
-	FTR_ENTRY_OFFSET label##4b-label##5b;	 	\
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)		\
+label##4:							\
+	.popsection;						\
+	.pushsection sect,"a";					\
+	.align 3;						\
+label##5:							\
+	FTR_ENTRY_LONG msk;					\
+	FTR_ENTRY_LONG val;					\
+	FTR_ENTRY_OFFSET label##1b-label##5b;			\
+	FTR_ENTRY_OFFSET label##2b-label##5b;			\
+	FTR_ENTRY_OFFSET label##3b-label##5b;			\
+	FTR_ENTRY_OFFSET label##4b-label##5b;			\
+	.ifgt (label##4b-label##3b)-(label##2b-label##1b);	\
+	.error "Feature section else case larger than body";	\
+	.endif;							\
 	.popsection;
 
 
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 20778a4..4ef662e 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -46,6 +46,7 @@
 #define FW_FEATURE_PS3_LV1	ASM_CONST(0x0000000000800000)
 #define FW_FEATURE_BEAT		ASM_CONST(0x0000000001000000)
 #define FW_FEATURE_CMO		ASM_CONST(0x0000000002000000)
+#define FW_FEATURE_VPHN		ASM_CONST(0x0000000004000000)
 
 #ifndef __ASSEMBLY__
 
@@ -59,7 +60,7 @@
 		FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
 		FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
 		FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
-		FW_FEATURE_CMO,
+		FW_FEATURE_CMO | FW_FEATURE_VPHN,
 	FW_FEATURE_PSERIES_ALWAYS = 0,
 	FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
 	FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index de03ca5..ec089ac 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -232,7 +232,9 @@
 #define H_GET_EM_PARMS		0x2B8
 #define H_SET_MPP		0x2D0
 #define H_GET_MPP		0x2D4
-#define MAX_HCALL_OPCODE	H_GET_MPP
+#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
+#define H_BEST_ENERGY		0x2F4
+#define MAX_HCALL_OPCODE	H_BEST_ENERGY
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
index 4e10f50..0edb684 100644
--- a/arch/powerpc/include/asm/immap_qe.h
+++ b/arch/powerpc/include/asm/immap_qe.h
@@ -467,13 +467,22 @@
 extern struct qe_immap __iomem *qe_immr;
 extern phys_addr_t get_qe_base(void);
 
-static inline unsigned long immrbar_virt_to_phys(void *address)
+/*
+ * Returns the offset within the QE address space of the given pointer.
+ *
+ * Note that the QE does not support 36-bit physical addresses, so if
+ * get_qe_base() returns a number above 4GB, the caller will probably fail.
+ */
+static inline phys_addr_t immrbar_virt_to_phys(void *address)
 {
-	if ( ((u32)address >= (u32)qe_immr) &&
-			((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) )
-		return (unsigned long)(address - (u32)qe_immr +
-				(u32)get_qe_base());
-	return (unsigned long)virt_to_phys(address);
+	void *q = (void *)qe_immr;
+
+	/* Is it a MURAM address? */
+	if ((address >= q) && (address < (q + QE_IMMAP_SIZE)))
+		return get_qe_base() + (address - q);
+
+	/* It's an address returned by kmalloc */
+	return virt_to_phys(address);
 }
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index b85d8dd..b0b06d8 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -12,24 +12,44 @@
 
 #else
 #ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQSOFF_TRACER
+/*
+ * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+ * which is the stack frame here, we need to force a stack frame
+ * in case we came from user space.
+ */
+#define TRACE_WITH_FRAME_BUFFER(func)		\
+	mflr	r0;				\
+	stdu	r1, -32(r1);			\
+	std	r0, 16(r1);			\
+	stdu	r1, -32(r1);			\
+	bl func;				\
+	ld	r1, 0(r1);			\
+	ld	r1, 0(r1);
+#else
+#define TRACE_WITH_FRAME_BUFFER(func)		\
+	bl func;
+#endif
+
 /*
  * Most of the CPU's IRQ-state tracing is done from assembly code; we
  * have to call a C function so call a wrapper that saves all the
  * C-clobbered registers.
  */
-#define TRACE_ENABLE_INTS	bl .trace_hardirqs_on
-#define TRACE_DISABLE_INTS	bl .trace_hardirqs_off
-#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip)	\
-	cmpdi	en,0;				\
-	bne	95f;				\
-	stb	en,PACASOFTIRQEN(r13);		\
-	bl	.trace_hardirqs_off;		\
-	b	skip;				\
-95:	bl	.trace_hardirqs_on;		\
+#define TRACE_ENABLE_INTS	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)
+#define TRACE_DISABLE_INTS	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
+
+#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip)		\
+	cmpdi	en,0;					\
+	bne	95f;					\
+	stb	en,PACASOFTIRQEN(r13);			\
+	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)	\
+	b	skip;					\
+95:	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)	\
 	li	en,1;
 #define TRACE_AND_RESTORE_IRQ(en)		\
 	TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f);	\
-	stb	en,PACASOFTIRQEN(r13);	        \
+	stb	en,PACASOFTIRQEN(r13);		\
 96:
 #else
 #define TRACE_ENABLE_INTS
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 7f5e0fe..26b8c80 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -33,9 +33,25 @@
 //
 //----------------------------------------------------------------------------
 #include <linux/cache.h>
+#include <linux/threads.h>
 #include <asm/types.h>
 #include <asm/mmu.h>
 
+/*
+ * We only have to have statically allocated lppaca structs on
+ * legacy iSeries, which supports at most 64 cpus.
+ */
+#ifdef CONFIG_PPC_ISERIES
+#if NR_CPUS < 64
+#define NR_LPPACAS	NR_CPUS
+#else
+#define NR_LPPACAS	64
+#endif
+#else /* not iSeries */
+#define NR_LPPACAS	1
+#endif
+
+
 /* The Hypervisor barfs if the lppaca crosses a page boundary.  A 1k
  * alignment is sufficient to prevent this */
 struct lppaca {
@@ -62,7 +78,10 @@
 	volatile u32 dyn_pir;		// Dynamic ProcIdReg value	x20-x23
 	u32	dsei_data;           	// DSEI data                  	x24-x27
 	u64	sprg3;               	// SPRG3 value                	x28-x2F
-	u8	reserved3[80];		// Reserved			x30-x7F
+	u8	reserved3[40];		// Reserved			x30-x57
+	volatile u8 vphn_assoc_counts[8]; // Virtual processor home node
+					// associativity change counters x58-x5F
+	u8	reserved4[32];		// Reserved			x60-x7F
 
 //=============================================================================
 // CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index d045b01..fe56a23 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -27,9 +27,7 @@
 struct rtc_time;
 struct file;
 struct pci_controller;
-#ifdef CONFIG_KEXEC
 struct kimage;
-#endif
 
 #ifdef CONFIG_SMP
 struct smp_ops_t {
@@ -72,7 +70,7 @@
 					     int psize, int ssize);
 	void		(*flush_hash_range)(unsigned long number, int local);
 
-	/* special for kexec, to be called in real mode, linar mapping is
+	/* special for kexec, to be called in real mode, linear mapping is
 	 * destroyed as well */
 	void		(*hpte_clear_all)(void);
 
@@ -118,9 +116,6 @@
 	 * If for some reason there is no irq, but the interrupt
 	 * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */
 	unsigned int	(*get_irq)(void);
-#ifdef CONFIG_KEXEC
-	void		(*kexec_cpu_down)(int crash_shutdown, int secondary);
-#endif
 
 	/* PCI stuff */
 	/* Called after scanning the bus, before allocating resources */
@@ -237,11 +232,7 @@
 	void (*machine_shutdown)(void);
 
 #ifdef CONFIG_KEXEC
-	/* Called to do the minimal shutdown needed to run a kexec'd kernel
-	 * to run successfully.
-	 * XXX Should we move this one out of kexec scope?
-	 */
-	void (*machine_crash_shutdown)(struct pt_regs *regs);
+	void (*kexec_cpu_down)(int crash_shutdown, int secondary);
 
 	/* Called to do what every setup is needed on image and the
 	 * reboot code buffer. Returns 0 on success.
@@ -250,9 +241,6 @@
 	 */
 	int (*machine_kexec_prepare)(struct kimage *image);
 
-	/* Called to handle any machine specific cleanup on image */
-	void (*machine_kexec_cleanup)(struct kimage *image);
-
 	/* Called to perform the _real_ kexec.
 	 * Do NOT allocate memory or fail here. We are past the point of
 	 * no return.
@@ -324,8 +312,6 @@
 
 #endif /* CONFIG_PPC_PMAC */
 
-extern void setup_pci_ptrs(void);
-
 #ifdef CONFIG_SMP
 /* Poor default implementations */
 extern void __devinit smp_generic_give_timebase(void);
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 8eaed81..17194fc 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,8 +40,8 @@
 
 /* MAS registers bit definitions */
 
-#define MAS0_TLBSEL(x)		((x << 28) & 0x30000000)
-#define MAS0_ESEL(x)		((x << 16) & 0x0FFF0000)
+#define MAS0_TLBSEL(x)		(((x) << 28) & 0x30000000)
+#define MAS0_ESEL(x)		(((x) << 16) & 0x0FFF0000)
 #define MAS0_NV(x)		((x) & 0x00000FFF)
 #define MAS0_HES		0x00004000
 #define MAS0_WQ_ALLWAYS		0x00000000
@@ -50,12 +50,12 @@
 
 #define MAS1_VALID		0x80000000
 #define MAS1_IPROT		0x40000000
-#define MAS1_TID(x)		((x << 16) & 0x3FFF0000)
+#define MAS1_TID(x)		(((x) << 16) & 0x3FFF0000)
 #define MAS1_IND		0x00002000
 #define MAS1_TS			0x00001000
 #define MAS1_TSIZE_MASK		0x00000f80
 #define MAS1_TSIZE_SHIFT	7
-#define MAS1_TSIZE(x)		((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
+#define MAS1_TSIZE(x)		(((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
 
 #define MAS2_EPN		0xFFFFF000
 #define MAS2_X0			0x00000040
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index aac87cb..fd3fd58 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -33,6 +33,9 @@
 extern cpumask_var_t node_to_cpumask_map[];
 #ifdef CONFIG_MEMORY_HOTPLUG
 extern unsigned long max_pfn;
+u64 memory_hotplug_max(void);
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
 #endif
 
 /*
@@ -42,6 +45,8 @@
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
 #define node_end_pfn(nid)	(NODE_DATA(nid)->node_end_pfn)
 
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
 #endif /* CONFIG_NEED_MULTIPLE_NODES */
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 850b72f..92efe67 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -10,31 +10,7 @@
 #ifndef _ASM_POWERPC_NVRAM_H
 #define _ASM_POWERPC_NVRAM_H
 
-#include <linux/errno.h>
-
-#define NVRW_CNT 0x20
-#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
-#define NVRAM_BLOCK_LEN 16
-#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN)
-#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN)
-
-#define NVRAM_AS0  0x74
-#define NVRAM_AS1  0x75
-#define NVRAM_DATA 0x77
-
-
-/* RTC Offsets */
-
-#define MOTO_RTC_SECONDS	0x1FF9
-#define MOTO_RTC_MINUTES	0x1FFA
-#define MOTO_RTC_HOURS		0x1FFB
-#define MOTO_RTC_DAY_OF_WEEK	0x1FFC
-#define MOTO_RTC_DAY_OF_MONTH	0x1FFD
-#define MOTO_RTC_MONTH		0x1FFE
-#define MOTO_RTC_YEAR		0x1FFF
-#define MOTO_RTC_CONTROLA       0x1FF8
-#define MOTO_RTC_CONTROLB       0x1FF9
-
+/* Signatures for nvram partitions */
 #define NVRAM_SIG_SP	0x02	/* support processor */
 #define NVRAM_SIG_OF	0x50	/* open firmware config */
 #define NVRAM_SIG_FW	0x51	/* general firmware */
@@ -49,32 +25,19 @@
 #define NVRAM_SIG_OS	0xa0	/* OS defined */
 #define NVRAM_SIG_PANIC	0xa1	/* Apple OSX "panic" */
 
-/* If change this size, then change the size of NVNAME_LEN */
-struct nvram_header {
-	unsigned char signature;
-	unsigned char checksum;
-	unsigned short length;
-	char name[12];
-};
-
 #ifdef __KERNEL__
 
+#include <linux/errno.h>
 #include <linux/list.h>
 
-struct nvram_partition {
-	struct list_head partition;
-	struct nvram_header header;
-	unsigned int index;
-};
-
-
+#ifdef CONFIG_PPC_PSERIES
 extern int nvram_write_error_log(char * buff, int length,
 					 unsigned int err_type, unsigned int err_seq);
 extern int nvram_read_error_log(char * buff, int length,
 					 unsigned int * err_type, unsigned int *err_seq);
 extern int nvram_clear_error_log(void);
-
 extern int pSeries_nvram_init(void);
+#endif /* CONFIG_PPC_PSERIES */
 
 #ifdef CONFIG_MMIO_NVRAM
 extern int mmio_nvram_init(void);
@@ -85,6 +48,13 @@
 }
 #endif
 
+extern int __init nvram_scan_partitions(void);
+extern loff_t nvram_create_partition(const char *name, int sig,
+				     int req_size, int min_size);
+extern int nvram_remove_partition(const char *name, int sig);
+extern int nvram_get_partition_size(loff_t data_index);
+extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
+
 #endif /* __KERNEL__ */
 
 /* PowerMac specific nvram stuffs */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 53b64be..da4b200 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -101,7 +101,7 @@
 
 #ifdef CONFIG_FLATMEM
 #define ARCH_PFN_OFFSET		(MEMORY_START >> PAGE_SHIFT)
-#define pfn_valid(pfn)		((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
+#define pfn_valid(pfn)		((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
 #endif
 
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 43adc8b..1255569 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -36,6 +36,8 @@
 #define PPC_INST_NOP			0x60000000
 #define PPC_INST_POPCNTB		0x7c0000f4
 #define PPC_INST_POPCNTB_MASK		0xfc0007fe
+#define PPC_INST_POPCNTD		0x7c0003f4
+#define PPC_INST_POPCNTW		0x7c0002f4
 #define PPC_INST_RFCI			0x4c000066
 #define PPC_INST_RFDI			0x4c00004e
 #define PPC_INST_RFMCI			0x4c00004c
@@ -88,6 +90,12 @@
 					__PPC_RB(b) | __PPC_EH(eh))
 #define PPC_MSGSND(b)		stringify_in_c(.long PPC_INST_MSGSND | \
 					__PPC_RB(b))
+#define PPC_POPCNTB(a, s)	stringify_in_c(.long PPC_INST_POPCNTB | \
+					__PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTD(a, s)	stringify_in_c(.long PPC_INST_POPCNTD | \
+					__PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTW(a, s)	stringify_in_c(.long PPC_INST_POPCNTW | \
+					__PPC_RA(a) | __PPC_RS(s))
 #define PPC_RFCI		stringify_in_c(.long PPC_INST_RFCI)
 #define PPC_RFDI		stringify_in_c(.long PPC_INST_RFDI)
 #define PPC_RFMCI		stringify_in_c(.long PPC_INST_RFMCI)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 4c14187..de1967a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -122,7 +122,6 @@
 		TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
 #endif
 
-#ifdef __KERNEL__
 #ifdef __powerpc64__
 
 #define STACK_TOP_USER64 TASK_SIZE_USER64
@@ -139,7 +138,6 @@
 #define STACK_TOP_MAX	STACK_TOP
 
 #endif /* __powerpc64__ */
-#endif /* __KERNEL__ */
 
 typedef struct {
 	unsigned long seg;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index ff0005eec..125fc1a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -283,6 +283,7 @@
 #define HID0_NOPTI	(1<<0)		/* No-op dcbt and dcbst instr. */
 
 #define SPRN_HID1	0x3F1		/* Hardware Implementation Register 1 */
+#ifdef CONFIG_6xx
 #define HID1_EMCP	(1<<31)		/* 7450 Machine Check Pin Enable */
 #define HID1_DFS	(1<<22)		/* 7447A Dynamic Frequency Scaling */
 #define HID1_PC0	(1<<16)		/* 7450 PLL_CFG[0] */
@@ -292,6 +293,7 @@
 #define HID1_SYNCBE	(1<<11)		/* 7450 ABE for sync, eieio */
 #define HID1_ABE	(1<<10)		/* 7450 Address Broadcast Enable */
 #define HID1_PS		(1<<16)		/* 750FX PLL selection */
+#endif
 #define SPRN_HID2	0x3F8		/* Hardware Implementation Register 2 */
 #define SPRN_HID2_GEKKO	0x398		/* Gekko HID2 Register */
 #define SPRN_IABR	0x3F2	/* Instruction Address Breakpoint Register */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 667a498..e68c69b 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -246,6 +246,20 @@
 					store or cache line push */
 #endif
 
+/* Bit definitions for the HID1 */
+#ifdef CONFIG_E500
+/* e500v1/v2 */
+#define HID1_PLL_CFG_MASK 0xfc000000	/* PLL_CFG input pins */
+#define HID1_RFXE	0x00020000	/* Read fault exception enable */
+#define HID1_R1DPE	0x00008000	/* R1 data bus parity enable */
+#define HID1_R2DPE	0x00004000	/* R2 data bus parity enable */
+#define HID1_ASTME	0x00002000	/* Address bus streaming mode enable */
+#define HID1_ABE	0x00001000	/* Address broadcast enable */
+#define HID1_MPXTT	0x00000400	/* MPX re-map transfer type */
+#define HID1_ATS	0x00000080	/* Atomic status */
+#define HID1_MID_MASK	0x0000000f	/* MID input pins */
+#endif
+
 /* Bit definitions for the DBSR. */
 /*
  * DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 0ab8d869..0c8b35d 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -203,14 +203,6 @@
 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
 		void *code, int code_size);
 
-#ifdef CONFIG_KEXEC
-void crash_register_spus(struct list_head *list);
-#else
-static inline void crash_register_spus(struct list_head *list)
-{
-}
-#endif
-
 extern void spu_invalidate_slbs(struct spu *spu);
 extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
 int spu_64k_pages_available(void);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index afe4aaa..7ef0d90 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -106,9 +106,22 @@
 						int nid)
 {
 }
-
 #endif /* CONFIG_NUMA */
 
+#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
+extern int start_topology_update(void);
+extern int stop_topology_update(void);
+#else
+static inline int start_topology_update(void)
+{
+	return 0;
+}
+static inline int stop_topology_update(void)
+{
+	return 0;
+}
+#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
+
 #include <asm-generic/topology.h>
 
 #ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 08679c5..25e3922 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -116,9 +116,7 @@
 
 #endif /* CONFIG_PPC64 */
 
-#ifdef __KERNEL__
 extern struct vdso_data *vdso_data;
-#endif
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 36c30f3..3bb2a3e 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -29,8 +29,10 @@
 obj-y				:= cputable.o ptrace.o syscalls.o \
 				   irq.o align.o signal_32.o pmc.o vdso.o \
 				   init_task.o process.o systbl.o idle.o \
-				   signal.o sysfs.o cacheinfo.o
-obj-y				+= vdso32/
+				   signal.o sysfs.o cacheinfo.o time.o \
+				   prom.o traps.o setup-common.o \
+				   udbg.o misc.o io.o dma.o \
+				   misc_$(CONFIG_WORD_SIZE).o vdso32/
 obj-$(CONFIG_PPC64)		+= setup_64.o sys_ppc32.o \
 				   signal_64.o ptrace32.o \
 				   paca.o nvram_64.o firmware.o
@@ -80,9 +82,6 @@
 extra-$(CONFIG_8xx)		:= head_8xx.o
 extra-y				+= vmlinux.lds
 
-obj-y				+= time.o prom.o traps.o setup-common.o \
-				   udbg.o misc.o io.o dma.o \
-				   misc_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_PPC32)		+= entry_32.o setup_32.o
 obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o
 obj-$(CONFIG_KGDB)		+= kgdb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index bd0df2e..23e6a93 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -209,7 +209,6 @@
 	DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
 
 	/* Interrupt register frame */
-	DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
 	DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
 	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 55cba4a..f8cd9fb 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -18,7 +18,7 @@
 #include <asm/mmu.h>
 
 _GLOBAL(__setup_cpu_603)
-	mflr	r4
+	mflr	r5
 BEGIN_MMU_FTR_SECTION
 	li	r10,0
 	mtspr	SPRN_SPRG_603_LRU,r10		/* init SW LRU tracking */
@@ -27,60 +27,60 @@
 	bl	__init_fpu_registers
 END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
 	bl	setup_common_caches
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_604)
-	mflr	r4
+	mflr	r5
 	bl	setup_common_caches
 	bl	setup_604_hid0
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_750)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_750cx)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
 	bl	setup_750cx
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_750fx)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
 	bl	setup_750fx
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_7400)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_7400_workarounds
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_7410)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_7410_workarounds
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
 	li	r3,0
 	mtspr	SPRN_L2CR2,r3
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_745x)
-	mflr	r4
+	mflr	r5
 	bl	setup_common_caches
 	bl	setup_745x_specifics
-	mtlr	r4
+	mtlr	r5
 	blr
 
 /* Enable caches for 603's, 604, 750 & 7400 */
@@ -194,10 +194,10 @@
 	cror	4*cr0+eq,4*cr0+eq,4*cr1+eq
 	cror	4*cr0+eq,4*cr0+eq,4*cr2+eq
 	bnelr
-	lwz	r6,CPU_SPEC_FEATURES(r5)
+	lwz	r6,CPU_SPEC_FEATURES(r4)
 	li	r7,CPU_FTR_CAN_NAP
 	andc	r6,r6,r7
-	stw	r6,CPU_SPEC_FEATURES(r5)
+	stw	r6,CPU_SPEC_FEATURES(r4)
 	blr
 
 /* 750fx specific
@@ -225,12 +225,12 @@
 	andis.	r11,r11,L3CR_L3E@h
 	beq	1f
 END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
-	lwz	r6,CPU_SPEC_FEATURES(r5)
+	lwz	r6,CPU_SPEC_FEATURES(r4)
 	andi.	r0,r6,CPU_FTR_L3_DISABLE_NAP
 	beq	1f
 	li	r7,CPU_FTR_CAN_NAP
 	andc	r6,r6,r7
-	stw	r6,CPU_SPEC_FEATURES(r5)
+	stw	r6,CPU_SPEC_FEATURES(r4)
 1:
 	mfspr	r11,SPRN_HID0
 
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 894e64f..5c518ad 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -64,6 +64,12 @@
 	bl	__e500_icache_setup
 	bl	__e500_dcache_setup
 	bl	__setup_e500_ivors
+#ifdef CONFIG_RAPIDIO
+	/* Ensure that RFXE is set */
+	mfspr	r3,SPRN_HID1
+	oris	r3,r3,HID1_RFXE@h
+	mtspr	SPRN_HID1,r3
+#endif
 	mtlr	r4
 	blr
 _GLOBAL(__setup_cpu_e500mc)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 96a908f..e8e915c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -116,7 +116,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/power3",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "power3",
 	},
 	{	/* Power3+ */
@@ -132,7 +131,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/power3",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "power3",
 	},
 	{	/* Northstar */
@@ -148,7 +146,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/rs64",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "rs64",
 	},
 	{	/* Pulsar */
@@ -164,7 +161,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/rs64",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "rs64",
 	},
 	{	/* I-star */
@@ -180,7 +176,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/rs64",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "rs64",
 	},
 	{	/* S-star */
@@ -196,7 +191,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/rs64",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "rs64",
 	},
 	{	/* Power4 */
@@ -212,7 +206,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/power4",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "power4",
 	},
 	{	/* Power4+ */
@@ -228,7 +221,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/power4",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "power4",
 	},
 	{	/* PPC970 */
@@ -247,7 +239,6 @@
 		.cpu_restore		= __restore_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* PPC970FX */
@@ -266,7 +257,6 @@
 		.cpu_restore		= __restore_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@@ -285,7 +275,6 @@
 		.cpu_restore		= __restore_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970MP",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* PPC970MP */
@@ -304,7 +293,6 @@
 		.cpu_restore		= __restore_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970MP",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* PPC970GX */
@@ -322,7 +310,6 @@
 		.cpu_setup		= __setup_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* Power5 GR */
@@ -343,7 +330,6 @@
 		 */
 		.oprofile_mmcra_sihv	= MMCRA_SIHV,
 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
-		.machine_check		= machine_check_generic,
 		.platform		= "power5",
 	},
 	{	/* Power5++ */
@@ -360,7 +346,6 @@
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.oprofile_mmcra_sihv	= MMCRA_SIHV,
 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
-		.machine_check		= machine_check_generic,
 		.platform		= "power5+",
 	},
 	{	/* Power5 GS */
@@ -378,7 +363,6 @@
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.oprofile_mmcra_sihv	= MMCRA_SIHV,
 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
-		.machine_check		= machine_check_generic,
 		.platform		= "power5+",
 	},
 	{	/* POWER6 in P5+ mode; 2.04-compliant processor */
@@ -390,7 +374,6 @@
 		.mmu_features		= MMU_FTR_HPTE_TABLE,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
-		.machine_check		= machine_check_generic,
 		.oprofile_cpu_type	= "ppc64/ibm-compat-v1",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.platform		= "power5+",
@@ -413,7 +396,6 @@
 		.oprofile_mmcra_sipr	= POWER6_MMCRA_SIPR,
 		.oprofile_mmcra_clear	= POWER6_MMCRA_THRM |
 			POWER6_MMCRA_OTHER,
-		.machine_check		= machine_check_generic,
 		.platform		= "power6x",
 	},
 	{	/* 2.05-compliant processor, i.e. Power6 "architected" mode */
@@ -425,7 +407,6 @@
 		.mmu_features		= MMU_FTR_HPTE_TABLE,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
-		.machine_check		= machine_check_generic,
 		.oprofile_cpu_type	= "ppc64/ibm-compat-v1",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.platform		= "power6",
@@ -440,7 +421,6 @@
 			MMU_FTR_TLBIE_206,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
-		.machine_check		= machine_check_generic,
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.oprofile_cpu_type	= "ppc64/ibm-compat-v1",
 		.platform		= "power7",
@@ -457,16 +437,26 @@
 		.dcache_bsize		= 128,
 		.num_pmcs		= 6,
 		.pmc_type		= PPC_PMC_IBM,
-		.cpu_setup		= __setup_cpu_power7,
-		.cpu_restore		= __restore_cpu_power7,
 		.oprofile_cpu_type	= "ppc64/power7",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.oprofile_mmcra_sihv	= POWER6_MMCRA_SIHV,
-		.oprofile_mmcra_sipr	= POWER6_MMCRA_SIPR,
-		.oprofile_mmcra_clear	= POWER6_MMCRA_THRM |
-			POWER6_MMCRA_OTHER,
 		.platform		= "power7",
 	},
+	{	/* Power7+ */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x004A0000,
+		.cpu_name		= "POWER7+ (raw)",
+		.cpu_features		= CPU_FTRS_POWER7,
+		.cpu_user_features	= COMMON_USER_POWER7,
+		.mmu_features		= MMU_FTR_HPTE_TABLE |
+			MMU_FTR_TLBIE_206,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 6,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/power7",
+		.oprofile_type		= PPC_OPROFILE_POWER4,
+		.platform		= "power7+",
+	},
 	{	/* Cell Broadband Engine */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00700000,
@@ -482,7 +472,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/cell-be",
 		.oprofile_type		= PPC_OPROFILE_CELL,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc-cell-be",
 	},
 	{	/* PA Semi PA6T */
@@ -500,7 +489,6 @@
 		.cpu_restore		= __restore_cpu_pa6t,
 		.oprofile_cpu_type	= "ppc64/pa6t",
 		.oprofile_type		= PPC_OPROFILE_PA6T,
-		.machine_check		= machine_check_generic,
 		.platform		= "pa6t",
 	},
 	{	/* default match */
@@ -514,7 +502,6 @@
 		.dcache_bsize		= 128,
 		.num_pmcs		= 6,
 		.pmc_type		= PPC_PMC_IBM,
-		.machine_check		= machine_check_generic,
 		.platform		= "power4",
 	}
 #endif	/* CONFIG_PPC_BOOK3S_64 */
@@ -2089,8 +2076,8 @@
 	 * pointer on ppc64 and booke as we are running at 0 in real mode
 	 * on ppc64 and reloc_offset is always 0 on booke.
 	 */
-	if (s->cpu_setup) {
-		s->cpu_setup(offset, s);
+	if (t->cpu_setup) {
+		t->cpu_setup(offset, t);
 	}
 #endif /* CONFIG_PPC64 || CONFIG_BOOKE */
 }
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 832c8c4..3d569e2 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -48,7 +48,7 @@
 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
 cpumask_t cpus_in_sr = CPU_MASK_NONE;
 
-#define CRASH_HANDLER_MAX 2
+#define CRASH_HANDLER_MAX 3
 /* NULL terminated list of shutdown handles */
 static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
 static DEFINE_SPINLOCK(crash_handlers_lock);
@@ -125,7 +125,7 @@
 	smp_wmb();
 
 	/*
-	 * FIXME: Until we will have the way to stop other CPUSs reliabally,
+	 * FIXME: Until we will have the way to stop other CPUs reliably,
 	 * the crash CPU will send an IPI and wait for other CPUs to
 	 * respond.
 	 * Delay of at least 10 seconds.
@@ -254,72 +254,6 @@
 	cpus_in_sr = CPU_MASK_NONE;
 }
 #endif
-#ifdef CONFIG_SPU_BASE
-
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-
-struct crash_spu_info {
-	struct spu *spu;
-	u32 saved_spu_runcntl_RW;
-	u32 saved_spu_status_R;
-	u32 saved_spu_npc_RW;
-	u64 saved_mfc_sr1_RW;
-	u64 saved_mfc_dar;
-	u64 saved_mfc_dsisr;
-};
-
-#define CRASH_NUM_SPUS	16	/* Enough for current hardware */
-static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
-
-static void crash_kexec_stop_spus(void)
-{
-	struct spu *spu;
-	int i;
-	u64 tmp;
-
-	for (i = 0; i < CRASH_NUM_SPUS; i++) {
-		if (!crash_spu_info[i].spu)
-			continue;
-
-		spu = crash_spu_info[i].spu;
-
-		crash_spu_info[i].saved_spu_runcntl_RW =
-			in_be32(&spu->problem->spu_runcntl_RW);
-		crash_spu_info[i].saved_spu_status_R =
-			in_be32(&spu->problem->spu_status_R);
-		crash_spu_info[i].saved_spu_npc_RW =
-			in_be32(&spu->problem->spu_npc_RW);
-
-		crash_spu_info[i].saved_mfc_dar    = spu_mfc_dar_get(spu);
-		crash_spu_info[i].saved_mfc_dsisr  = spu_mfc_dsisr_get(spu);
-		tmp = spu_mfc_sr1_get(spu);
-		crash_spu_info[i].saved_mfc_sr1_RW = tmp;
-
-		tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
-		spu_mfc_sr1_set(spu, tmp);
-
-		__delay(200);
-	}
-}
-
-void crash_register_spus(struct list_head *list)
-{
-	struct spu *spu;
-
-	list_for_each_entry(spu, list, full_list) {
-		if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
-			continue;
-
-		crash_spu_info[spu->number].spu = spu;
-	}
-}
-
-#else
-static inline void crash_kexec_stop_spus(void)
-{
-}
-#endif /* CONFIG_SPU_BASE */
 
 /*
  * Register a function to be called on shutdown.  Only use this if you
@@ -439,8 +373,6 @@
 	crash_shutdown_cpu = -1;
 	__debugger_fault_handler = old_handler;
 
-	crash_kexec_stop_spus();
-
 	if (ppc_md.kexec_cpu_down)
 		ppc_md.kexec_cpu_down(1, 0);
 }
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 8e05c16..0a2af50 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -19,6 +19,7 @@
 #include <asm/prom.h>
 #include <asm/firmware.h>
 #include <asm/uaccess.h>
+#include <asm/rtas.h>
 
 #ifdef DEBUG
 #include <asm/udbg.h>
@@ -141,3 +142,35 @@
 
 	return csize;
 }
+
+#ifdef CONFIG_PPC_RTAS
+/*
+ * The crashkernel region will almost always overlap the RTAS region, so
+ * we have to be careful when shrinking the crashkernel region.
+ */
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+	unsigned long addr;
+	const u32 *basep, *sizep;
+	unsigned int rtas_start = 0, rtas_end = 0;
+
+	basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
+	sizep = of_get_property(rtas.dev, "rtas-size", NULL);
+
+	if (basep && sizep) {
+		rtas_start = *basep;
+		rtas_end = *basep + *sizep;
+	}
+
+	for (addr = begin; addr < end; addr += PAGE_SIZE) {
+		/* Does this page overlap with the RTAS region? */
+		if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
+			continue;
+
+		ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
+		init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
+		free_page((unsigned long)__va(addr));
+		totalram_pages++;
+	}
+}
+#endif
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 6e54a0f..e755415 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -19,7 +19,7 @@
 				      dma_addr_t *dma_handle, gfp_t flag)
 {
 	return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
-				    dma_handle, device_to_mask(dev), flag,
+				    dma_handle, dev->coherent_dma_mask, flag,
 				    dev_to_node(dev));
 }
 
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ed4aeb9..56212bc 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -31,6 +31,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
+#include <asm/ptrace.h>
 
 #undef SHOW_SYSCALLS
 #undef SHOW_SYSCALLS_TASK
@@ -879,7 +880,18 @@
 	 */
 	andi.	r10,r9,MSR_EE
 	beq	1f
+	/*
+	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+	 * which is the stack frame here, we need to force a stack frame
+	 * in case we came from user space.
+	 */
+	stwu	r1,-32(r1)
+	mflr	r0
+	stw	r0,4(r1)
+	stwu	r1,-32(r1)
 	bl	trace_hardirqs_on
+	lwz	r1,0(r1)
+	lwz	r1,0(r1)
 	lwz	r9,_MSR(r1)
 1:
 #endif /* CONFIG_TRACE_IRQFLAGS */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9f8b01d..8a81799 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -13,6 +13,7 @@
  */
 
 #include <asm/exception-64s.h>
+#include <asm/ptrace.h>
 
 /*
  * We layout physical memory as follows:
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index e86c040..de36955 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -23,6 +23,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 #ifdef CONFIG_VSX
 #define REST_32FPVSRS(n,c,base)						\
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 8278e8b..9dd21a8 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -40,6 +40,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 /* As with the other PowerPC ports, it is expected that when code
  * execution begins here, the following registers contain valid, yet
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 562305b..cbb3436 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 #include <asm/synch.h>
 #include "head_booke.h"
 
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index f0dd577..782f23d 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -38,6 +38,7 @@
 #include <asm/page_64.h>
 #include <asm/irqflags.h>
 #include <asm/kvm_book3s_asm.h>
+#include <asm/ptrace.h>
 
 /* The physical memory is layed out such that the secondary processor
  * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -96,7 +97,7 @@
 	.llong hvReleaseData-KERNELBASE
 #endif /* CONFIG_PPC_ISERIES */
 
-#ifdef CONFIG_CRASH_DUMP
+#ifdef CONFIG_RELOCATABLE
 	/* This flag is set to 1 by a loader if the kernel should run
 	 * at the loaded address instead of the linked address.  This
 	 * is used by kexec-tools to keep the the kdump kernel in the
@@ -384,12 +385,10 @@
 	/* process relocations for the final address of the kernel */
 	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
 	sldi	r25,r25,32
-#ifdef CONFIG_CRASH_DUMP
 	lwz	r7,__run_at_load-_stext(r26)
-	cmplwi	cr0,r7,1	/* kdump kernel ? - stay where we are */
+	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
 	bne	1f
 	add	r25,r25,r26
-#endif
 1:	mr	r3,r25
 	bl	.relocate
 #endif
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 1f1a04b..1cbf64e 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -29,6 +29,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 /* Macro to make the code more readable. */
 #ifdef CONFIG_8xx_CPU6
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 529b817..3e02710 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -41,6 +41,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
+#include <asm/ptrace.h>
 #include "head_booke.h"
 
 /* As with the other PowerPC ports, it is expected that when code
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d583917..961bb03 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -311,8 +311,9 @@
 		/* Handle failure */
 		if (unlikely(entry == DMA_ERROR_CODE)) {
 			if (printk_ratelimit())
-				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
-				       " npages %lx\n", tbl, vaddr, npages);
+				dev_info(dev, "iommu_alloc failed, tbl %p "
+					 "vaddr %lx npages %lu\n", tbl, vaddr,
+					 npages);
 			goto failure;
 		}
 
@@ -579,9 +580,9 @@
 					 attrs);
 		if (dma_handle == DMA_ERROR_CODE) {
 			if (printk_ratelimit())  {
-				printk(KERN_INFO "iommu_alloc failed, "
-						"tbl %p vaddr %p npages %d\n",
-						tbl, vaddr, npages);
+				dev_info(dev, "iommu_alloc failed, tbl %p "
+					 "vaddr %p npages %d\n", tbl, vaddr,
+					 npages);
 			}
 		} else
 			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
@@ -627,7 +628,8 @@
 	 * the tce tables.
 	 */
 	if (order >= IOMAP_MAX_ORDER) {
-		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
+		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
+			 size);
 		return NULL;
 	}
 
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index df7e20c..a5f8672 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
 #include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/irq.h>
+#include <linux/ftrace.h>
 
 #include <asm/machdep.h>
 #include <asm/prom.h>
@@ -44,10 +45,7 @@
 
 void machine_crash_shutdown(struct pt_regs *regs)
 {
-	if (ppc_md.machine_crash_shutdown)
-		ppc_md.machine_crash_shutdown(regs);
-	else
-		default_machine_crash_shutdown(regs);
+	default_machine_crash_shutdown(regs);
 }
 
 /*
@@ -65,8 +63,6 @@
 
 void machine_kexec_cleanup(struct kimage *image)
 {
-	if (ppc_md.machine_kexec_cleanup)
-		ppc_md.machine_kexec_cleanup(image);
 }
 
 void arch_crash_save_vmcoreinfo(void)
@@ -87,11 +83,17 @@
  */
 void machine_kexec(struct kimage *image)
 {
+	int save_ftrace_enabled;
+
+	save_ftrace_enabled = __ftrace_enabled_save();
+
 	if (ppc_md.machine_kexec)
 		ppc_md.machine_kexec(image);
 	else
 		default_machine_kexec(image);
 
+	__ftrace_enabled_restore(save_ftrace_enabled);
+
 	/* Fall back to normal restart if we're still alive. */
 	machine_restart(NULL);
 	for(;;);
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 2d29752..b69463e 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -122,8 +122,3 @@
 	mtlr	r0
 	mr	r3,r4
 	blr
-
-_GLOBAL(__setup_cpu_power7)
-_GLOBAL(__restore_cpu_power7)
-	/* place holder */
-	blr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index a7a570d..094bd98 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -30,6 +30,7 @@
 #include <asm/processor.h>
 #include <asm/kexec.h>
 #include <asm/bug.h>
+#include <asm/ptrace.h>
 
 	.text
 
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e514490..206a321 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -25,6 +25,7 @@
 #include <asm/cputable.h>
 #include <asm/thread_info.h>
 #include <asm/kexec.h>
+#include <asm/ptrace.h>
 
 	.text
 
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 9cf197f..bb12b32 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -34,15 +34,26 @@
 
 #undef DEBUG_NVRAM
 
-static struct nvram_partition * nvram_part;
-static long nvram_error_log_index = -1;
-static long nvram_error_log_size = 0;
+#define NVRAM_HEADER_LEN	sizeof(struct nvram_header)
+#define NVRAM_BLOCK_LEN		NVRAM_HEADER_LEN
 
-struct err_log_info {
-	int error_type;
-	unsigned int seq_num;
+/* If change this size, then change the size of NVNAME_LEN */
+struct nvram_header {
+	unsigned char signature;
+	unsigned char checksum;
+	unsigned short length;
+	/* Terminating null required only for names < 12 chars. */
+	char name[12];
 };
 
+struct nvram_partition {
+	struct list_head partition;
+	struct nvram_header header;
+	unsigned int index;
+};
+
+static LIST_HEAD(nvram_partitions);
+
 static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
 {
 	int size;
@@ -186,14 +197,12 @@
 #ifdef DEBUG_NVRAM
 static void __init nvram_print_partitions(char * label)
 {
-	struct list_head * p;
 	struct nvram_partition * tmp_part;
 	
 	printk(KERN_WARNING "--------%s---------\n", label);
 	printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
-	list_for_each(p, &nvram_part->partition) {
-		tmp_part = list_entry(p, struct nvram_partition, partition);
-		printk(KERN_WARNING "%4d    \t%02x\t%02x\t%d\t%s\n",
+	list_for_each_entry(tmp_part, &nvram_partitions, partition) {
+		printk(KERN_WARNING "%4d    \t%02x\t%02x\t%d\t%12s\n",
 		       tmp_part->index, tmp_part->header.signature,
 		       tmp_part->header.checksum, tmp_part->header.length,
 		       tmp_part->header.name);
@@ -228,95 +237,113 @@
 	return c_sum;
 }
 
-static int __init nvram_remove_os_partition(void)
+/**
+ * nvram_remove_partition - Remove one or more partitions in nvram
+ * @name: name of the partition to remove, or NULL for a
+ *        signature only match
+ * @sig: signature of the partition(s) to remove
+ */
+
+int __init nvram_remove_partition(const char *name, int sig)
 {
-	struct list_head *i;
-	struct list_head *j;
-	struct nvram_partition * part;
-	struct nvram_partition * cur_part;
+	struct nvram_partition *part, *prev, *tmp;
 	int rc;
 
-	list_for_each(i, &nvram_part->partition) {
-		part = list_entry(i, struct nvram_partition, partition);
-		if (part->header.signature != NVRAM_SIG_OS)
+	list_for_each_entry(part, &nvram_partitions, partition) {
+		if (part->header.signature != sig)
 			continue;
-		
-		/* Make os partition a free partition */
+		if (name && strncmp(name, part->header.name, 12))
+			continue;
+
+		/* Make partition a free partition */
 		part->header.signature = NVRAM_SIG_FREE;
-		sprintf(part->header.name, "wwwwwwwwwwww");
+		strncpy(part->header.name, "wwwwwwwwwwww", 12);
 		part->header.checksum = nvram_checksum(&part->header);
-
-		/* Merge contiguous free partitions backwards */
-		list_for_each_prev(j, &part->partition) {
-			cur_part = list_entry(j, struct nvram_partition, partition);
-			if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
-				break;
-			}
-			
-			part->header.length += cur_part->header.length;
-			part->header.checksum = nvram_checksum(&part->header);
-			part->index = cur_part->index;
-
-			list_del(&cur_part->partition);
-			kfree(cur_part);
-			j = &part->partition; /* fixup our loop */
-		}
-		
-		/* Merge contiguous free partitions forwards */
-		list_for_each(j, &part->partition) {
-			cur_part = list_entry(j, struct nvram_partition, partition);
-			if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
-				break;
-			}
-
-			part->header.length += cur_part->header.length;
-			part->header.checksum = nvram_checksum(&part->header);
-
-			list_del(&cur_part->partition);
-			kfree(cur_part);
-			j = &part->partition; /* fixup our loop */
-		}
-		
 		rc = nvram_write_header(part);
 		if (rc <= 0) {
-			printk(KERN_ERR "nvram_remove_os_partition: nvram_write failed (%d)\n", rc);
+			printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
 			return rc;
 		}
+	}
 
+	/* Merge contiguous ones */
+	prev = NULL;
+	list_for_each_entry_safe(part, tmp, &nvram_partitions, partition) {
+		if (part->header.signature != NVRAM_SIG_FREE) {
+			prev = NULL;
+			continue;
+		}
+		if (prev) {
+			prev->header.length += part->header.length;
+			prev->header.checksum = nvram_checksum(&part->header);
+			rc = nvram_write_header(part);
+			if (rc <= 0) {
+				printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
+				return rc;
+			}
+			list_del(&part->partition);
+			kfree(part);
+		} else
+			prev = part;
 	}
 	
 	return 0;
 }
 
-/* nvram_create_os_partition
+/**
+ * nvram_create_partition - Create a partition in nvram
+ * @name: name of the partition to create
+ * @sig: signature of the partition to create
+ * @req_size: size of data to allocate in bytes
+ * @min_size: minimum acceptable size (0 means req_size)
  *
- * Create a OS linux partition to buffer error logs.
- * Will create a partition starting at the first free
- * space found if space has enough room.
+ * Returns a negative error code or a positive nvram index
+ * of the beginning of the data area of the newly created
+ * partition. If you provided a min_size smaller than req_size
+ * you need to query for the actual size yourself after the
+ * call using nvram_partition_get_size().
  */
-static int __init nvram_create_os_partition(void)
+loff_t __init nvram_create_partition(const char *name, int sig,
+				     int req_size, int min_size)
 {
 	struct nvram_partition *part;
 	struct nvram_partition *new_part;
 	struct nvram_partition *free_part = NULL;
-	int seq_init[2] = { 0, 0 };
+	static char nv_init_vals[16];
 	loff_t tmp_index;
 	long size = 0;
 	int rc;
-	
+
+	/* Convert sizes from bytes to blocks */
+	req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+	min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+
+	/* If no minimum size specified, make it the same as the
+	 * requested size
+	 */
+	if (min_size == 0)
+		min_size = req_size;
+	if (min_size > req_size)
+		return -EINVAL;
+
+	/* Now add one block to each for the header */
+	req_size += 1;
+	min_size += 1;
+
 	/* Find a free partition that will give us the maximum needed size 
 	   If can't find one that will give us the minimum size needed */
-	list_for_each_entry(part, &nvram_part->partition, partition) {
+	list_for_each_entry(part, &nvram_partitions, partition) {
 		if (part->header.signature != NVRAM_SIG_FREE)
 			continue;
 
-		if (part->header.length >= NVRAM_MAX_REQ) {
-			size = NVRAM_MAX_REQ;
+		if (part->header.length >= req_size) {
+			size = req_size;
 			free_part = part;
 			break;
 		}
-		if (!size && part->header.length >= NVRAM_MIN_REQ) {
-			size = NVRAM_MIN_REQ;
+		if (part->header.length > size &&
+		    part->header.length >= min_size) {
+			size = part->header.length;
 			free_part = part;
 		}
 	}
@@ -326,136 +353,95 @@
 	/* Create our OS partition */
 	new_part = kmalloc(sizeof(*new_part), GFP_KERNEL);
 	if (!new_part) {
-		printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n");
+		pr_err("nvram_create_os_partition: kmalloc failed\n");
 		return -ENOMEM;
 	}
 
 	new_part->index = free_part->index;
-	new_part->header.signature = NVRAM_SIG_OS;
+	new_part->header.signature = sig;
 	new_part->header.length = size;
-	strcpy(new_part->header.name, "ppc64,linux");
+	strncpy(new_part->header.name, name, 12);
 	new_part->header.checksum = nvram_checksum(&new_part->header);
 
 	rc = nvram_write_header(new_part);
 	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
-				"failed (%d)\n", rc);
-		return rc;
-	}
-
-	/* make sure and initialize to zero the sequence number and the error
-	   type logged */
-	tmp_index = new_part->index + NVRAM_HEADER_LEN;
-	rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write "
+		pr_err("nvram_create_os_partition: nvram_write_header "
 		       "failed (%d)\n", rc);
 		return rc;
 	}
-	
-	nvram_error_log_index = new_part->index + NVRAM_HEADER_LEN;
-	nvram_error_log_size = ((part->header.length - 1) *
-				NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
-	
 	list_add_tail(&new_part->partition, &free_part->partition);
 
-	if (free_part->header.length <= size) {
+	/* Adjust or remove the partition we stole the space from */
+	if (free_part->header.length > size) {
+		free_part->index += size * NVRAM_BLOCK_LEN;
+		free_part->header.length -= size;
+		free_part->header.checksum = nvram_checksum(&free_part->header);
+		rc = nvram_write_header(free_part);
+		if (rc <= 0) {
+			pr_err("nvram_create_os_partition: nvram_write_header "
+			       "failed (%d)\n", rc);
+			return rc;
+		}
+	} else {
 		list_del(&free_part->partition);
 		kfree(free_part);
-		return 0;
 	} 
 
-	/* Adjust the partition we stole the space from */
-	free_part->index += size * NVRAM_BLOCK_LEN;
-	free_part->header.length -= size;
-	free_part->header.checksum = nvram_checksum(&free_part->header);
-	
-	rc = nvram_write_header(free_part);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
-		       "failed (%d)\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
-
-/* nvram_setup_partition
- *
- * This will setup the partition we need for buffering the
- * error logs and cleanup partitions if needed.
- *
- * The general strategy is the following:
- * 1.) If there is ppc64,linux partition large enough then use it.
- * 2.) If there is not a ppc64,linux partition large enough, search
- * for a free partition that is large enough.
- * 3.) If there is not a free partition large enough remove 
- * _all_ OS partitions and consolidate the space.
- * 4.) Will first try getting a chunk that will satisfy the maximum
- * error log size (NVRAM_MAX_REQ).
- * 5.) If the max chunk cannot be allocated then try finding a chunk
- * that will satisfy the minum needed (NVRAM_MIN_REQ).
- */
-static int __init nvram_setup_partition(void)
-{
-	struct list_head * p;
-	struct nvram_partition * part;
-	int rc;
-
-	/* For now, we don't do any of this on pmac, until I
-	 * have figured out if it's worth killing some unused stuffs
-	 * in our nvram, as Apple defined partitions use pretty much
-	 * all of the space
-	 */
-	if (machine_is(powermac))
-		return -ENOSPC;
-
-	/* see if we have an OS partition that meets our needs.
-	   will try getting the max we need.  If not we'll delete
-	   partitions and try again. */
-	list_for_each(p, &nvram_part->partition) {
-		part = list_entry(p, struct nvram_partition, partition);
-		if (part->header.signature != NVRAM_SIG_OS)
-			continue;
-
-		if (strcmp(part->header.name, "ppc64,linux"))
-			continue;
-
-		if (part->header.length >= NVRAM_MIN_REQ) {
-			/* found our partition */
-			nvram_error_log_index = part->index + NVRAM_HEADER_LEN;
-			nvram_error_log_size = ((part->header.length - 1) *
-						NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
-			return 0;
+	/* Clear the new partition */
+	for (tmp_index = new_part->index + NVRAM_HEADER_LEN;
+	     tmp_index <  ((size - 1) * NVRAM_BLOCK_LEN);
+	     tmp_index += NVRAM_BLOCK_LEN) {
+		rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index);
+		if (rc <= 0) {
+			pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc);
+			return rc;
 		}
 	}
 	
-	/* try creating a partition with the free space we have */
-	rc = nvram_create_os_partition();
-	if (!rc) {
-		return 0;
-	}
-		
-	/* need to free up some space */
-	rc = nvram_remove_os_partition();
-	if (rc) {
-		return rc;
-	}
+	return new_part->index + NVRAM_HEADER_LEN;
+}
+
+/**
+ * nvram_get_partition_size - Get the data size of an nvram partition
+ * @data_index: This is the offset of the start of the data of
+ *              the partition. The same value that is returned by
+ *              nvram_create_partition().
+ */
+int nvram_get_partition_size(loff_t data_index)
+{
+	struct nvram_partition *part;
 	
-	/* create a partition in this new space */
-	rc = nvram_create_os_partition();
-	if (rc) {
-		printk(KERN_ERR "nvram_create_os_partition: Could not find a "
-		       "NVRAM partition large enough\n");
-		return rc;
+	list_for_each_entry(part, &nvram_partitions, partition) {
+		if (part->index + NVRAM_HEADER_LEN == data_index)
+			return (part->header.length - 1) * NVRAM_BLOCK_LEN;
 	}
-	
-	return 0;
+	return -1;
 }
 
 
-static int __init nvram_scan_partitions(void)
+/**
+ * nvram_find_partition - Find an nvram partition by signature and name
+ * @name: Name of the partition or NULL for any name
+ * @sig: Signature to test against
+ * @out_size: if non-NULL, returns the size of the data part of the partition
+ */
+loff_t nvram_find_partition(const char *name, int sig, int *out_size)
+{
+	struct nvram_partition *p;
+
+	list_for_each_entry(p, &nvram_partitions, partition) {
+		if (p->header.signature == sig &&
+		    (!name || !strncmp(p->header.name, name, 12))) {
+			if (out_size)
+				*out_size = (p->header.length - 1) *
+					NVRAM_BLOCK_LEN;
+			return p->index + NVRAM_HEADER_LEN;
+		}
+	}
+	return 0;
+}
+
+int __init nvram_scan_partitions(void)
 {
 	loff_t cur_index = 0;
 	struct nvram_header phead;
@@ -465,7 +451,7 @@
 	int total_size;
 	int err;
 
-	if (ppc_md.nvram_size == NULL)
+	if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
 		return -ENODEV;
 	total_size = ppc_md.nvram_size();
 	
@@ -512,12 +498,16 @@
 		
 		memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN);
 		tmp_part->index = cur_index;
-		list_add_tail(&tmp_part->partition, &nvram_part->partition);
+		list_add_tail(&tmp_part->partition, &nvram_partitions);
 		
 		cur_index += phead.length * NVRAM_BLOCK_LEN;
 	}
 	err = 0;
 
+#ifdef DEBUG_NVRAM
+	nvram_print_partitions("NVRAM Partitions");
+#endif
+
  out:
 	kfree(header);
 	return err;
@@ -525,9 +515,10 @@
 
 static int __init nvram_init(void)
 {
-	int error;
 	int rc;
 	
+	BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
+
 	if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
 		return  -ENODEV;
 
@@ -537,29 +528,6 @@
 		return rc;
 	}
   	
-  	/* initialize our anchor for the nvram partition list */
-  	nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
-  	if (!nvram_part) {
-  		printk(KERN_ERR "nvram_init: Failed kmalloc\n");
-  		return -ENOMEM;
-  	}
-  	INIT_LIST_HEAD(&nvram_part->partition);
-  
-  	/* Get all the NVRAM partitions */
-  	error = nvram_scan_partitions();
-  	if (error) {
-  		printk(KERN_ERR "nvram_init: Failed nvram_scan_partitions\n");
-  		return error;
-  	}
-  		
-  	if(nvram_setup_partition()) 
-  		printk(KERN_WARNING "nvram_init: Could not find nvram partition"
-  		       " for nvram buffered error logging.\n");
-  
-#ifdef DEBUG_NVRAM
-	nvram_print_partitions("NVRAM Partitions");
-#endif
-
   	return rc;
 }
 
@@ -568,135 +536,6 @@
         misc_deregister( &nvram_dev );
 }
 
-
-#ifdef CONFIG_PPC_PSERIES
-
-/* nvram_write_error_log
- *
- * We need to buffer the error logs into nvram to ensure that we have
- * the failure information to decode.  If we have a severe error there
- * is no way to guarantee that the OS or the machine is in a state to
- * get back to user land and write the error to disk.  For example if
- * the SCSI device driver causes a Machine Check by writing to a bad
- * IO address, there is no way of guaranteeing that the device driver
- * is in any state that is would also be able to write the error data
- * captured to disk, thus we buffer it in NVRAM for analysis on the
- * next boot.
- *
- * In NVRAM the partition containing the error log buffer will looks like:
- * Header (in bytes):
- * +-----------+----------+--------+------------+------------------+
- * | signature | checksum | length | name       | data             |
- * |0          |1         |2      3|4         15|16        length-1|
- * +-----------+----------+--------+------------+------------------+
- *
- * The 'data' section would look like (in bytes):
- * +--------------+------------+-----------------------------------+
- * | event_logged | sequence # | error log                         |
- * |0            3|4          7|8            nvram_error_log_size-1|
- * +--------------+------------+-----------------------------------+
- *
- * event_logged: 0 if event has not been logged to syslog, 1 if it has
- * sequence #: The unique sequence # for each event. (until it wraps)
- * error log: The error log from event_scan
- */
-int nvram_write_error_log(char * buff, int length,
-                          unsigned int err_type, unsigned int error_log_cnt)
-{
-	int rc;
-	loff_t tmp_index;
-	struct err_log_info info;
-	
-	if (nvram_error_log_index == -1) {
-		return -ESPIPE;
-	}
-
-	if (length > nvram_error_log_size) {
-		length = nvram_error_log_size;
-	}
-
-	info.error_type = err_type;
-	info.seq_num = error_log_cnt;
-
-	tmp_index = nvram_error_log_index;
-
-	rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
-		return rc;
-	}
-
-	rc = ppc_md.nvram_write(buff, length, &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
-		return rc;
-	}
-	
-	return 0;
-}
-
-/* nvram_read_error_log
- *
- * Reads nvram for error log for at most 'length'
- */
-int nvram_read_error_log(char * buff, int length,
-                         unsigned int * err_type, unsigned int * error_log_cnt)
-{
-	int rc;
-	loff_t tmp_index;
-	struct err_log_info info;
-	
-	if (nvram_error_log_index == -1)
-		return -1;
-
-	if (length > nvram_error_log_size)
-		length = nvram_error_log_size;
-
-	tmp_index = nvram_error_log_index;
-
-	rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
-		return rc;
-	}
-
-	rc = ppc_md.nvram_read(buff, length, &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
-		return rc;
-	}
-
-	*error_log_cnt = info.seq_num;
-	*err_type = info.error_type;
-
-	return 0;
-}
-
-/* This doesn't actually zero anything, but it sets the event_logged
- * word to tell that this event is safely in syslog.
- */
-int nvram_clear_error_log(void)
-{
-	loff_t tmp_index;
-	int clear_word = ERR_FLAG_ALREADY_LOGGED;
-	int rc;
-
-	if (nvram_error_log_index == -1)
-		return -1;
-
-	tmp_index = nvram_error_log_index;
-	
-	rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
-#endif /* CONFIG_PPC_PSERIES */
-
 module_init(nvram_init);
 module_exit(nvram_cleanup);
 MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index ebf9846..f4adf89 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -27,20 +27,6 @@
 #ifdef CONFIG_PPC_BOOK3S
 
 /*
- * We only have to have statically allocated lppaca structs on
- * legacy iSeries, which supports at most 64 cpus.
- */
-#ifdef CONFIG_PPC_ISERIES
-#if NR_CPUS < 64
-#define NR_LPPACAS	NR_CPUS
-#else
-#define NR_LPPACAS	64
-#endif
-#else /* not iSeries */
-#define NR_LPPACAS	1
-#endif
-
-/*
  * The structure which the hypervisor knows about - this structure
  * should not cross a page boundary.  The vpa_init/register_vpa call
  * is now known to fail if the lppaca structure crosses a page
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index d43fc65..8515776 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -193,8 +193,7 @@
 	hose->io_resource.start += io_virt_offset;
 	hose->io_resource.end += io_virt_offset;
 
-	pr_debug("  hose->io_resource=0x%016llx...0x%016llx\n",
-		 hose->io_resource.start, hose->io_resource.end);
+	pr_debug("  hose->io_resource=%pR\n", &hose->io_resource);
 
 	return 0;
 }
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 5674807..ab6f6be 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1212,6 +1212,7 @@
 			if (left <= 0)
 				left = period;
 			record = 1;
+			event->hw.last_period = event->hw.sample_period;
 		}
 		if (left < 0x80000000LL)
 			val = 0x80000000LL - left;
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 4dcf5f8..b0dc8f7 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -596,6 +596,7 @@
 			if (left <= 0)
 				left = period;
 			record = 1;
+			event->hw.last_period = event->hw.sample_period;
 		}
 		if (left < 0x80000000LL)
 			val = 0x80000000LL - left;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ab3e392..ef3ef56 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -186,3 +186,10 @@
 EXPORT_SYMBOL(__mfdcr);
 #endif
 EXPORT_SYMBOL(empty_zero_page);
+
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(__arch_hweight8);
+EXPORT_SYMBOL(__arch_hweight16);
+EXPORT_SYMBOL(__arch_hweight32);
+EXPORT_SYMBOL(__arch_hweight64);
+#endif
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index 5113bd2..e83ba3f 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -11,6 +11,7 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 /*
  * Grab the register values as they are now.
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 84906d3..8303a6c 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -353,6 +353,7 @@
 			prime_debug_regs(new_thread);
 }
 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
 static void set_debug_reg_defaults(struct thread_struct *thread)
 {
 	if (thread->dabr) {
@@ -360,6 +361,7 @@
 		set_dabr(0);
 	}
 }
+#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
 
 int set_dabr(unsigned long dabr)
@@ -631,7 +633,7 @@
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 		printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
 #else
-		printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
+		printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
 #endif
 	printk("TASK = %p[%d] '%s' THREAD: %p",
 	       current, task_pid_nr(current), current->comm, task_thread_info(current));
@@ -670,11 +672,11 @@
 {
 	discard_lazy_cpu_state();
 
-#ifdef CONFIG_HAVE_HW_BREAKPOINTS
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
 	flush_ptrace_hw_breakpoint(current);
-#else /* CONFIG_HAVE_HW_BREAKPOINTS */
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
 	set_debug_reg_defaults(&current->thread);
-#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 }
 
 void
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9e3132d..7185f0d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -519,9 +519,9 @@
 	memblock_add(base, size);
 }
 
-u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 {
-	return memblock_alloc(size, align);
+	return __va(memblock_alloc(size, align));
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index a9b3296..9065369 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1316,6 +1316,10 @@
 static long ppc_set_hwdebug(struct task_struct *child,
 		     struct ppc_hw_breakpoint *bp_info)
 {
+#ifndef CONFIG_PPC_ADV_DEBUG_REGS
+	unsigned long dabr;
+#endif
+
 	if (bp_info->version != 1)
 		return -ENOTSUPP;
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1353,11 +1357,10 @@
 	/*
 	 * We only support one data breakpoint
 	 */
-	if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) ||
-	    ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) ||
-	    (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) ||
-	    (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) ||
-	    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
+	if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
+	    (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
+	    bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
+	    bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
 		return -EINVAL;
 
 	if (child->thread.dabr)
@@ -1366,7 +1369,14 @@
 	if ((unsigned long)bp_info->addr >= TASK_SIZE)
 		return -EIO;
 
-	child->thread.dabr = (unsigned long)bp_info->addr;
+	dabr = (unsigned long)bp_info->addr & ~7UL;
+	dabr |= DABR_TRANSLATION;
+	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+		dabr |= DABR_DATA_READ;
+	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+		dabr |= DABR_DATA_WRITE;
+
+	child->thread.dabr = dabr;
 
 	return 1;
 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 8a6daf4..69c4be9 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -280,7 +280,11 @@
 		/* We only support one DABR and no IABRS at the moment */
 		if (addr > 0)
 			break;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+		ret = put_user(child->thread.dac1, (u32 __user *)data);
+#else
 		ret = put_user(child->thread.dabr, (u32 __user *)data);
+#endif
 		break;
 	}
 
@@ -312,6 +316,9 @@
 	case PTRACE_SET_DEBUGREG:
 	case PTRACE_SYSCALL:
 	case PTRACE_CONT:
+	case PPC_PTRACE_GETHWDBGINFO:
+	case PPC_PTRACE_SETHWDEBUG:
+	case PPC_PTRACE_DELHWDEBUG:
 		ret = arch_ptrace(child, request, addr, data);
 		break;
 
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8fe8bc6..2097f2b 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -41,6 +41,7 @@
 #include <asm/atomic.h>
 #include <asm/time.h>
 #include <asm/mmu.h>
+#include <asm/topology.h>
 
 struct rtas_t rtas = {
 	.lock = __ARCH_SPIN_LOCK_UNLOCKED
@@ -713,6 +714,7 @@
 	int cpu;
 
 	slb_set_size(SLB_MIN_SIZE);
+	stop_topology_update();
 	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
 
 	while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
@@ -728,6 +730,7 @@
 		rc = atomic_read(&data->error);
 
 	atomic_set(&data->error, rc);
+	start_topology_update();
 
 	if (wake_when_done) {
 		atomic_set(&data->done, 1);
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 2b442e6..bf5f5ce 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -256,31 +256,16 @@
 	struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
 	struct rtas_update_flash_t *uf;
 	char msg[RTAS_MSG_MAXLEN];
-	int msglen;
 
-	uf = (struct rtas_update_flash_t *) dp->data;
+	uf = dp->data;
 
 	if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) {
 		get_flash_status_msg(uf->status, msg);
 	} else {	   /* FIRMWARE_UPDATE_NAME */
 		sprintf(msg, "%d\n", uf->status);
 	}
-	msglen = strlen(msg);
-	if (msglen > count)
-		msglen = count;
 
-	if (ppos && *ppos != 0)
-		return 0;	/* be cheap */
-
-	if (!access_ok(VERIFY_WRITE, buf, msglen))
-		return -EINVAL;
-
-	if (copy_to_user(buf, msg, msglen))
-		return -EFAULT;
-
-	if (ppos)
-		*ppos = msglen;
-	return msglen;
+	return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
 }
 
 /* constructor for flash_block_cache */
@@ -394,26 +379,13 @@
 	char msg[RTAS_MSG_MAXLEN];
 	int msglen;
 
-	args_buf = (struct rtas_manage_flash_t *) dp->data;
+	args_buf = dp->data;
 	if (args_buf == NULL)
 		return 0;
 
 	msglen = sprintf(msg, "%d\n", args_buf->status);
-	if (msglen > count)
-		msglen = count;
 
-	if (ppos && *ppos != 0)
-		return 0;	/* be cheap */
-
-	if (!access_ok(VERIFY_WRITE, buf, msglen))
-		return -EINVAL;
-
-	if (copy_to_user(buf, msg, msglen))
-		return -EFAULT;
-
-	if (ppos)
-		*ppos = msglen;
-	return msglen;
+	return simple_read_from_buffer(buf, count, ppos, msg, msglen);
 }
 
 static ssize_t manage_flash_write(struct file *file, const char __user *buf,
@@ -495,24 +467,11 @@
 	char msg[RTAS_MSG_MAXLEN];
 	int msglen;
 
-	args_buf = (struct rtas_validate_flash_t *) dp->data;
+	args_buf = dp->data;
 
-	if (ppos && *ppos != 0)
-		return 0;	/* be cheap */
-	
 	msglen = get_validate_flash_msg(args_buf, msg);
-	if (msglen > count)
-		msglen = count;
 
-	if (!access_ok(VERIFY_WRITE, buf, msglen))
-		return -EINVAL;
-
-	if (copy_to_user(buf, msg, msglen))
-		return -EFAULT;
-
-	if (ppos)
-		*ppos = msglen;
-	return msglen;
+	return simple_read_from_buffer(buf, count, ppos, msg, msglen);
 }
 
 static ssize_t validate_flash_write(struct file *file, const char __user *buf,
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 0438f81..049dbec 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -160,7 +160,7 @@
 	/* rtas fixed header */
 	len = 8;
 	err = (struct rtas_error_log *)buf;
-	if (err->extended_log_length) {
+	if (err->extended && err->extended_log_length) {
 
 		/* extended header */
 		len += err->extended_log_length;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ce6f61c..5a0401f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -437,8 +437,8 @@
 	unsigned int i;
 
 	/*
-	 * interrupt stacks must be under 256MB, we cannot afford to take
-	 * SLB misses on them.
+	 * Interrupt stacks must be in the first segment since we
+	 * cannot afford to take SLB misses on them.
 	 */
 	for_each_possible_cpu(i) {
 		softirq_ctx[i] = (struct thread_info *)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 68034bb..9813605 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -466,7 +466,20 @@
 	return id;
 }
 
-/* Must be called when no change can occur to cpu_present_mask,
+/* Helper routines for cpu to core mapping */
+int cpu_core_index_of_thread(int cpu)
+{
+	return cpu >> threads_shift;
+}
+EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
+
+int cpu_first_thread_of_core(int core)
+{
+	return core << threads_shift;
+}
+EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
+
+/* Must be called when no change can occur to cpu_present_map,
  * i.e. during cpu online or offline.
  */
 static struct device_node *cpu_to_l2cache(int cpu)
@@ -514,7 +527,7 @@
 	notify_cpu_starting(cpu);
 	set_cpu_online(cpu, true);
 	/* Update sibling maps */
-	base = cpu_first_thread_in_core(cpu);
+	base = cpu_first_thread_sibling(cpu);
 	for (i = 0; i < threads_per_core; i++) {
 		if (cpu_is_offline(base + i))
 			continue;
@@ -600,7 +613,7 @@
 		return err;
 
 	/* Update sibling maps */
-	base = cpu_first_thread_in_core(cpu);
+	base = cpu_first_thread_sibling(cpu);
 	for (i = 0; i < threads_per_core; i++) {
 		cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
 		cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 0104069..09d31db 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -155,7 +155,7 @@
 
 static u64 tb_to_ns_scale __read_mostly;
 static unsigned tb_to_ns_shift __read_mostly;
-static unsigned long boot_tb __read_mostly;
+static u64 boot_tb __read_mostly;
 
 extern struct timezone sys_tz;
 static long timezone_offset;
@@ -265,11 +265,26 @@
 {
 	u64 sst, ust;
 
-	sst = scan_dispatch_log(get_paca()->starttime_user);
-	ust = scan_dispatch_log(get_paca()->starttime);
-	get_paca()->system_time -= sst;
-	get_paca()->user_time -= ust;
-	get_paca()->stolen_time += ust + sst;
+	u8 save_soft_enabled = local_paca->soft_enabled;
+	u8 save_hard_enabled = local_paca->hard_enabled;
+
+	/* We are called early in the exception entry, before
+	 * soft/hard_enabled are sync'ed to the expected state
+	 * for the exception. We are hard disabled but the PACA
+	 * needs to reflect that so various debug stuff doesn't
+	 * complain
+	 */
+	local_paca->soft_enabled = 0;
+	local_paca->hard_enabled = 0;
+
+	sst = scan_dispatch_log(local_paca->starttime_user);
+	ust = scan_dispatch_log(local_paca->starttime);
+	local_paca->system_time -= sst;
+	local_paca->user_time -= ust;
+	local_paca->stolen_time += ust + sst;
+
+	local_paca->soft_enabled = save_soft_enabled;
+	local_paca->hard_enabled = save_hard_enabled;
 }
 
 static inline u64 calculate_stolen_time(u64 stop_tb)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1b2cdc8..bd74fac 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -626,12 +626,6 @@
 	if (recover > 0)
 		return;
 
-	if (user_mode(regs)) {
-		regs->msr |= MSR_RI;
-		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
-		return;
-	}
-
 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
 	/* the qspan pci read routines can cause machine checks -- Cort
 	 *
@@ -643,16 +637,12 @@
 	return;
 #endif
 
-	if (debugger_fault_handler(regs)) {
-		regs->msr |= MSR_RI;
+	if (debugger_fault_handler(regs))
 		return;
-	}
 
 	if (check_io_access(regs))
 		return;
 
-	if (debugger_fault_handler(regs))
-		return;
 	die("Machine check", regs, SIGBUS);
 
 	/* Must die if the interrupt is not recoverable */
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index fe46048..9de6f39 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -5,6 +5,7 @@
 #include <asm/cputable.h>
 #include <asm/thread_info.h>
 #include <asm/page.h>
+#include <asm/ptrace.h>
 
 /*
  * load_up_altivec(unused, unused, tsk)
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 441d2a7..1b695fd 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -600,6 +600,11 @@
 	vio_cmo_dealloc(viodev, alloc_size);
 }
 
+static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+        return dma_iommu_ops.dma_supported(dev, mask);
+}
+
 struct dma_map_ops vio_dma_mapping_ops = {
 	.alloc_coherent = vio_dma_iommu_alloc_coherent,
 	.free_coherent  = vio_dma_iommu_free_coherent,
@@ -607,6 +612,7 @@
 	.unmap_sg       = vio_dma_iommu_unmap_sg,
 	.map_page       = vio_dma_iommu_map_page,
 	.unmap_page     = vio_dma_iommu_unmap_page,
+	.dma_supported  = vio_dma_iommu_dma_supported,
 
 };
 
@@ -858,8 +864,7 @@
 
 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
 {
-	vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
-	viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
+	set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
 }
 
 /**
@@ -1244,7 +1249,7 @@
 	if (firmware_has_feature(FW_FEATURE_CMO))
 		vio_cmo_set_dma_ops(viodev);
 	else
-		viodev->dev.archdata.dma_ops = &dma_iommu_ops;
+		set_dma_ops(&viodev->dev, &dma_iommu_ops);
 	set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
 	set_dev_node(&viodev->dev, of_node_to_nid(of_node));
 
@@ -1252,6 +1257,10 @@
 	viodev->dev.parent = &vio_bus_device.dev;
 	viodev->dev.bus = &vio_bus_type;
 	viodev->dev.release = vio_dev_release;
+        /* needed to ensure proper operation of coherent allocations
+         * later, in case driver doesn't set it explicitly */
+        dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
+        dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
 
 	/* register with generic device framework */
 	if (device_register(&viodev->dev)) {
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index e316847..badc983 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1307,12 +1307,10 @@
 	int err = -ENOMEM;
 	unsigned long p;
 
-	vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
+	vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
 	if (!vcpu_book3s)
 		goto out;
 
-	memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
-
 	vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
 		kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
 	if (!vcpu_book3s->shadow_vcpu)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 38f756f..9975846 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -145,18 +145,12 @@
 	*(int *)rtn = kvmppc_core_check_processor_compat();
 }
 
-struct kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-	struct kvm *kvm;
-
-	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-	if (!kvm)
-		return ERR_PTR(-ENOMEM);
-
-	return kvm;
+	return 0;
 }
 
-static void kvmppc_free_vcpus(struct kvm *kvm)
+void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 	unsigned int i;
 	struct kvm_vcpu *vcpu;
@@ -176,14 +170,6 @@
 {
 }
 
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
-	kvmppc_free_vcpus(kvm);
-	kvm_free_physmem(kvm);
-	cleanup_srcu_struct(&kvm->srcu);
-	kfree(kvm);
-}
-
 int kvm_dev_ioctl_check_extension(long ext)
 {
 	int r;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 889f2bc..166a6a0 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -16,7 +16,7 @@
 
 obj-$(CONFIG_PPC64)	+= copypage_64.o copyuser_64.o \
 			   memcpy_64.o usercopy_64.o mem_64.o string.o \
-			   checksum_wrappers_64.o
+			   checksum_wrappers_64.o hweight_64.o
 obj-$(CONFIG_XMON)	+= sstep.o ldstfp.o
 obj-$(CONFIG_KPROBES)	+= sstep.o ldstfp.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= sstep.o ldstfp.o
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S
index cb73748..f461311 100644
--- a/arch/powerpc/lib/feature-fixups-test.S
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -172,6 +172,25 @@
 3:	or	3,3,3
 
 
+#if 0
+/* Test that if we have a larger else case the assembler spots it and
+ * reports an error. #if 0'ed so as not to break the build normally.
+ */
+ftr_fixup_test7:
+	or	1,1,1
+BEGIN_FTR_SECTION
+	or	2,2,2
+	or	2,2,2
+	or	2,2,2
+FTR_SECTION_ELSE
+	or	3,3,3
+	or	3,3,3
+	or	3,3,3
+	or	3,3,3
+ALT_FTR_SECTION_END(0, 1)
+	or	1,1,1
+#endif
+
 #define	MAKE_MACRO_TEST(TYPE)						\
 globl(ftr_fixup_test_ ##TYPE##_macros)					\
 	or	1,1,1;							\
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
new file mode 100644
index 0000000..fda2786
--- /dev/null
+++ b/arch/powerpc/lib/hweight_64.S
@@ -0,0 +1,110 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2010
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+/* Note: This code relies on -mminimal-toc */
+
+_GLOBAL(__arch_hweight8)
+BEGIN_FTR_SECTION
+	b .__sw_hweight8
+	nop
+	nop
+FTR_SECTION_ELSE
+	PPC_POPCNTB(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight16)
+BEGIN_FTR_SECTION
+	b .__sw_hweight16
+	nop
+	nop
+	nop
+	nop
+FTR_SECTION_ELSE
+  BEGIN_FTR_SECTION_NESTED(50)
+	PPC_POPCNTB(r3,r3)
+	srdi	r4,r3,8
+	add	r3,r4,r3
+	clrldi	r3,r3,64-8
+	blr
+  FTR_SECTION_ELSE_NESTED(50)
+	clrlwi  r3,r3,16
+	PPC_POPCNTW(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight32)
+BEGIN_FTR_SECTION
+	b .__sw_hweight32
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+FTR_SECTION_ELSE
+  BEGIN_FTR_SECTION_NESTED(51)
+	PPC_POPCNTB(r3,r3)
+	srdi	r4,r3,16
+	add	r3,r4,r3
+	srdi	r4,r3,8
+	add	r3,r4,r3
+	clrldi	r3,r3,64-8
+	blr
+  FTR_SECTION_ELSE_NESTED(51)
+	PPC_POPCNTW(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight64)
+BEGIN_FTR_SECTION
+	b .__sw_hweight64
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+FTR_SECTION_ELSE
+  BEGIN_FTR_SECTION_NESTED(52)
+	PPC_POPCNTB(r3,r3)
+	srdi	r4,r3,32
+	add	r3,r4,r3
+	srdi	r4,r3,16
+	add	r3,r4,r3
+	srdi	r4,r3,8
+	add	r3,r4,r3
+	clrldi	r3,r3,64-8
+	blr
+  FTR_SECTION_ELSE_NESTED(52)
+	PPC_POPCNTD(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index d7efdbf..fec1320 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -16,6 +16,16 @@
 
 #ifdef __HAVE_ARCH_PTE_SPECIAL
 
+static inline void get_huge_page_tail(struct page *page)
+{
+	/*
+	 * __split_huge_page_refcount() cannot run
+	 * from under us.
+	 */
+	VM_BUG_ON(atomic_read(&page->_count) < 0);
+	atomic_inc(&page->_count);
+}
+
 /*
  * The performance critical leaf functions are made noinline otherwise gcc
  * inlines everything into a single function which results in too much
@@ -47,6 +57,8 @@
 			put_page(page);
 			return 0;
 		}
+		if (PageTail(page))
+			get_huge_page_tail(page);
 		pages[*nr] = page;
 		(*nr)++;
 
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5e95844..a5991fa 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1070,7 +1070,7 @@
 		  unsigned long access, unsigned long trap)
 {
 	unsigned long vsid;
-	void *pgdir;
+	pgd_t *pgdir;
 	pte_t *ptep;
 	unsigned long flags;
 	int rc, ssize, local = 0;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5ce9984..c0aab52 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -111,8 +111,8 @@
 		 * a core map instead but this will do for now.
 		 */
 		for_each_cpu(cpu, mm_cpumask(mm)) {
-			for (i = cpu_first_thread_in_core(cpu);
-			     i <= cpu_last_thread_in_core(cpu); i++)
+			for (i = cpu_first_thread_sibling(cpu);
+			     i <= cpu_last_thread_sibling(cpu); i++)
 				__set_bit(id, stale_map[i]);
 			cpu = i - 1;
 		}
@@ -264,14 +264,14 @@
 	 */
 	if (test_bit(id, stale_map[cpu])) {
 		pr_hardcont(" | stale flush %d [%d..%d]",
-			    id, cpu_first_thread_in_core(cpu),
-			    cpu_last_thread_in_core(cpu));
+			    id, cpu_first_thread_sibling(cpu),
+			    cpu_last_thread_sibling(cpu));
 
 		local_flush_tlb_mm(next);
 
 		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
-		for (i = cpu_first_thread_in_core(cpu);
-		     i <= cpu_last_thread_in_core(cpu); i++) {
+		for (i = cpu_first_thread_sibling(cpu);
+		     i <= cpu_last_thread_sibling(cpu); i++) {
 			__clear_bit(id, stale_map[i]);
 		}
 	}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 74505b2..0dc95c0 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -20,10 +20,15 @@
 #include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/pfn.h>
+#include <linux/cpuset.h>
+#include <linux/node.h>
 #include <asm/sparsemem.h>
 #include <asm/prom.h>
 #include <asm/system.h>
 #include <asm/smp.h>
+#include <asm/firmware.h>
+#include <asm/paca.h>
+#include <asm/hvcall.h>
 
 static int numa_enabled = 1;
 
@@ -163,7 +168,7 @@
 	work_with_active_regions(nid, get_active_region_work_fn, node_ar);
 }
 
-static void __cpuinit map_cpu_to_node(int cpu, int node)
+static void map_cpu_to_node(int cpu, int node)
 {
 	numa_cpu_lookup_table[cpu] = node;
 
@@ -173,7 +178,7 @@
 		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 static void unmap_cpu_from_node(unsigned long cpu)
 {
 	int node = numa_cpu_lookup_table[cpu];
@@ -181,13 +186,13 @@
 	dbg("removing cpu %lu from node %d\n", cpu, node);
 
 	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
-		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
+		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
 	} else {
 		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
 		       cpu, node);
 	}
 }
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 
 /* must hold reference to node during call */
 static const int *of_get_associativity(struct device_node *dev)
@@ -246,32 +251,41 @@
 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
  * info is found.
  */
-static int of_node_to_nid_single(struct device_node *device)
+static int associativity_to_nid(const unsigned int *associativity)
 {
 	int nid = -1;
-	const unsigned int *tmp;
 
 	if (min_common_depth == -1)
 		goto out;
 
-	tmp = of_get_associativity(device);
-	if (!tmp)
-		goto out;
-
-	if (tmp[0] >= min_common_depth)
-		nid = tmp[min_common_depth];
+	if (associativity[0] >= min_common_depth)
+		nid = associativity[min_common_depth];
 
 	/* POWER4 LPAR uses 0xffff as invalid node */
 	if (nid == 0xffff || nid >= MAX_NUMNODES)
 		nid = -1;
 
-	if (nid > 0 && tmp[0] >= distance_ref_points_depth)
-		initialize_distance_lookup_table(nid, tmp);
+	if (nid > 0 && associativity[0] >= distance_ref_points_depth)
+		initialize_distance_lookup_table(nid, associativity);
 
 out:
 	return nid;
 }
 
+/* Returns the nid associated with the given device tree node,
+ * or -1 if not found.
+ */
+static int of_node_to_nid_single(struct device_node *device)
+{
+	int nid = -1;
+	const unsigned int *tmp;
+
+	tmp = of_get_associativity(device);
+	if (tmp)
+		nid = associativity_to_nid(tmp);
+	return nid;
+}
+
 /* Walk the device tree upwards, looking for an associativity id */
 int of_node_to_nid(struct device_node *device)
 {
@@ -1247,4 +1261,281 @@
 	return nid;
 }
 
+static u64 hot_add_drconf_memory_max(void)
+{
+        struct device_node *memory = NULL;
+        unsigned int drconf_cell_cnt = 0;
+        u64 lmb_size = 0;
+        const u32 *dm = 0;
+
+        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+        if (memory) {
+                drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
+                lmb_size = of_get_lmb_size(memory);
+                of_node_put(memory);
+        }
+        return lmb_size * drconf_cell_cnt;
+}
+
+/*
+ * memory_hotplug_max - return max address of memory that may be added
+ *
+ * This is currently only used on systems that support drconfig memory
+ * hotplug.
+ */
+u64 memory_hotplug_max(void)
+{
+        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
+}
 #endif /* CONFIG_MEMORY_HOTPLUG */
+
+/* Virtual Processor Home Node (VPHN) support */
+#ifdef CONFIG_PPC_SPLPAR
+static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
+static cpumask_t cpu_associativity_changes_mask;
+static int vphn_enabled;
+static void set_topology_timer(void);
+
+/*
+ * Store the current values of the associativity change counters in the
+ * hypervisor.
+ */
+static void setup_cpu_associativity_change_counters(void)
+{
+	int cpu;
+
+	/* The VPHN feature supports a maximum of 8 reference points */
+	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
+
+	for_each_possible_cpu(cpu) {
+		int i;
+		u8 *counts = vphn_cpu_change_counts[cpu];
+		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+		for (i = 0; i < distance_ref_points_depth; i++)
+			counts[i] = hypervisor_counts[i];
+	}
+}
+
+/*
+ * The hypervisor maintains a set of 8 associativity change counters in
+ * the VPA of each cpu that correspond to the associativity levels in the
+ * ibm,associativity-reference-points property. When an associativity
+ * level changes, the corresponding counter is incremented.
+ *
+ * Set a bit in cpu_associativity_changes_mask for each cpu whose home
+ * node associativity levels have changed.
+ *
+ * Returns the number of cpus with unhandled associativity changes.
+ */
+static int update_cpu_associativity_changes_mask(void)
+{
+	int cpu, nr_cpus = 0;
+	cpumask_t *changes = &cpu_associativity_changes_mask;
+
+	cpumask_clear(changes);
+
+	for_each_possible_cpu(cpu) {
+		int i, changed = 0;
+		u8 *counts = vphn_cpu_change_counts[cpu];
+		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+		for (i = 0; i < distance_ref_points_depth; i++) {
+			if (hypervisor_counts[i] != counts[i]) {
+				counts[i] = hypervisor_counts[i];
+				changed = 1;
+			}
+		}
+		if (changed) {
+			cpumask_set_cpu(cpu, changes);
+			nr_cpus++;
+		}
+	}
+
+	return nr_cpus;
+}
+
+/*
+ * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
+ * the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
+
+/*
+ * Convert the associativity domain numbers returned from the hypervisor
+ * to the sequence they would appear in the ibm,associativity property.
+ */
+static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
+{
+	int i, nr_assoc_doms = 0;
+	const u16 *field = (const u16*) packed;
+
+#define VPHN_FIELD_UNUSED	(0xffff)
+#define VPHN_FIELD_MSB		(0x8000)
+#define VPHN_FIELD_MASK		(~VPHN_FIELD_MSB)
+
+	for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
+		if (*field == VPHN_FIELD_UNUSED) {
+			/* All significant fields processed, and remaining
+			 * fields contain the reserved value of all 1's.
+			 * Just store them.
+			 */
+			unpacked[i] = *((u32*)field);
+			field += 2;
+		} else if (*field & VPHN_FIELD_MSB) {
+			/* Data is in the lower 15 bits of this field */
+			unpacked[i] = *field & VPHN_FIELD_MASK;
+			field++;
+			nr_assoc_doms++;
+		} else {
+			/* Data is in the lower 15 bits of this field
+			 * concatenated with the next 16 bit field
+			 */
+			unpacked[i] = *((u32*)field);
+			field += 2;
+			nr_assoc_doms++;
+		}
+	}
+
+	/* The first cell contains the length of the property */
+	unpacked[0] = nr_assoc_doms;
+
+	return nr_assoc_doms;
+}
+
+/*
+ * Retrieve the new associativity information for a virtual processor's
+ * home node.
+ */
+static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
+{
+	long rc;
+	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+	u64 flags = 1;
+	int hwcpu = get_hard_smp_processor_id(cpu);
+
+	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
+	vphn_unpack_associativity(retbuf, associativity);
+
+	return rc;
+}
+
+static long vphn_get_associativity(unsigned long cpu,
+					unsigned int *associativity)
+{
+	long rc;
+
+	rc = hcall_vphn(cpu, associativity);
+
+	switch (rc) {
+	case H_FUNCTION:
+		printk(KERN_INFO
+			"VPHN is not supported. Disabling polling...\n");
+		stop_topology_update();
+		break;
+	case H_HARDWARE:
+		printk(KERN_ERR
+			"hcall_vphn() experienced a hardware fault "
+			"preventing VPHN. Disabling polling...\n");
+		stop_topology_update();
+	}
+
+	return rc;
+}
+
+/*
+ * Update the node maps and sysfs entries for each cpu whose home node
+ * has changed.
+ */
+int arch_update_cpu_topology(void)
+{
+	int cpu, nid, old_nid;
+	unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
+	struct sys_device *sysdev;
+
+	for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
+		vphn_get_associativity(cpu, associativity);
+		nid = associativity_to_nid(associativity);
+
+		if (nid < 0 || !node_online(nid))
+			nid = first_online_node;
+
+		old_nid = numa_cpu_lookup_table[cpu];
+
+		/* Disable hotplug while we update the cpu
+		 * masks and sysfs.
+		 */
+		get_online_cpus();
+		unregister_cpu_under_node(cpu, old_nid);
+		unmap_cpu_from_node(cpu);
+		map_cpu_to_node(cpu, nid);
+		register_cpu_under_node(cpu, nid);
+		put_online_cpus();
+
+		sysdev = get_cpu_sysdev(cpu);
+		if (sysdev)
+			kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+	}
+
+	return 1;
+}
+
+static void topology_work_fn(struct work_struct *work)
+{
+	rebuild_sched_domains();
+}
+static DECLARE_WORK(topology_work, topology_work_fn);
+
+void topology_schedule_update(void)
+{
+	schedule_work(&topology_work);
+}
+
+static void topology_timer_fn(unsigned long ignored)
+{
+	if (!vphn_enabled)
+		return;
+	if (update_cpu_associativity_changes_mask() > 0)
+		topology_schedule_update();
+	set_topology_timer();
+}
+static struct timer_list topology_timer =
+	TIMER_INITIALIZER(topology_timer_fn, 0, 0);
+
+static void set_topology_timer(void)
+{
+	topology_timer.data = 0;
+	topology_timer.expires = jiffies + 60 * HZ;
+	add_timer(&topology_timer);
+}
+
+/*
+ * Start polling for VPHN associativity changes.
+ */
+int start_topology_update(void)
+{
+	int rc = 0;
+
+	/* Disabled until races with load balancing are fixed */
+	if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
+	    get_lppaca()->shared_proc) {
+		vphn_enabled = 1;
+		setup_cpu_associativity_change_counters();
+		init_timer_deferrable(&topology_timer);
+		set_topology_timer();
+		rc = 1;
+	}
+
+	return rc;
+}
+__initcall(start_topology_update);
+
+/*
+ * Disable polling for VPHN associativity changes.
+ */
+int stop_topology_update(void)
+{
+	vphn_enabled = 0;
+	return del_timer_sync(&topology_timer);
+}
+#endif /* CONFIG_PPC_SPLPAR */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index a87ead0..8dc41c0 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -78,7 +78,7 @@
 
 	/* pgdir take page or two with 4K pages and a page fraction otherwise */
 #ifndef CONFIG_PPC_4K_PAGES
-	ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+	ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
 #else
 	ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
 			PGDIR_ORDER - PAGE_SHIFT);
@@ -230,6 +230,7 @@
 		area = get_vm_area_caller(size, VM_IOREMAP, caller);
 		if (area == 0)
 			return NULL;
+		area->phys_addr = p;
 		v = (unsigned long) area->addr;
 	} else {
 		v = (ioremap_bot -= size);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 21d6dfa..88927a0 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -223,6 +223,8 @@
 					    caller);
 		if (area == NULL)
 			return NULL;
+
+		area->phys_addr = paligned;
 		ret = __ioremap_at(paligned, area->addr, size, flags);
 		if (!ret)
 			vunmap(area->addr);
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1ec0657..c14d09f 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -38,13 +38,11 @@
  * neesd to be flushed. This function will either perform the flush
  * immediately or will batch it up if the current CPU has an active
  * batch on it.
- *
- * Must be called from within some kind of spinlock/non-preempt region...
  */
 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, unsigned long pte, int huge)
 {
-	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+	struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
 	unsigned long vsid, vaddr;
 	unsigned int psize;
 	int ssize;
@@ -99,6 +97,7 @@
 	 */
 	if (!batch->active) {
 		flush_hash_page(vaddr, rpte, psize, ssize, 0);
+		put_cpu_var(ppc64_tlb_batch);
 		return;
 	}
 
@@ -127,6 +126,7 @@
 	batch->index = ++i;
 	if (i >= PPC64_TLB_BATCH_NR)
 		__flush_tlb_pending(batch);
+	put_cpu_var(ppc64_tlb_batch);
 }
 
 /*
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 7fd90d0..c4d2b71 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -1469,7 +1469,7 @@
  * The pm_interval register is setup to write the SPU PC value into the
  * trace buffer at the maximum rate possible.  The trace buffer is configured
  * to store the PCs, wrapping when it is full.  The performance counter is
- * intialized to the max hardware count minus the number of events, N, between
+ * initialized to the max hardware count minus the number of events, N, between
  * samples.  Once the N events have occured, a HW counter overflow occurs
  * causing the generation of a HW counter interrupt which also stops the
  * writing of the SPU PC values to the trace buffer.  Hence the last PC
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 82ff326..c04d16d 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -1,4 +1,7 @@
-obj-$(CONFIG_44x)	:= misc_44x.o idle.o
+obj-$(CONFIG_44x)	+= misc_44x.o
+ifneq ($(CONFIG_PPC4xx_CPM),y)
+obj-$(CONFIG_44x)	+= idle.o
+endif
 obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
 obj-$(CONFIG_EBONY)	+= ebony.o
 obj-$(CONFIG_SAM440EP) 	+= sam440ep.o
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index 80234e5..eda0fc2 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -232,7 +232,7 @@
 	lite5200_pm_target_state = PM_SUSPEND_ON;
 }
 
-static struct platform_suspend_ops lite5200_pm_ops = {
+static const struct platform_suspend_ops lite5200_pm_ops = {
 	.valid		= lite5200_pm_valid,
 	.begin		= lite5200_pm_begin,
 	.prepare	= lite5200_pm_prepare,
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index 568cef6..8310e8b 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -186,7 +186,7 @@
 	iounmap(mbar);
 }
 
-static struct platform_suspend_ops mpc52xx_pm_ops = {
+static const struct platform_suspend_ops mpc52xx_pm_ops = {
 	.valid		= mpc52xx_pm_valid,
 	.prepare	= mpc52xx_pm_prepare,
 	.enter		= mpc52xx_pm_enter,
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 661d354..d0c4e15 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -57,12 +57,12 @@
 	ipic_set_default_priority();
 }
 
-struct const char *board[] __initdata = {
+static const char *board[] __initdata = {
 	"MPC8308RDB",
 	"fsl,mpc8308rdb",
 	"denx,mpc8308_p1m",
 	NULL
-}
+};
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index b54cd73..f859ead 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -60,11 +60,11 @@
 	ipic_set_default_priority();
 }
 
-struct const char *board[] __initdata = {
+static const char *board[] __initdata = {
 	"MPC8313ERDB",
 	"fsl,mpc8315erdb",
 	NULL
-}
+};
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 0fea881..82a4345 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -35,6 +35,8 @@
 
 /* system i/o configuration register high */
 #define MPC83XX_SICRH_OFFS         0x118
+#define MPC8308_SICRH_USB_MASK     0x000c0000
+#define MPC8308_SICRH_USB_ULPI     0x00040000
 #define MPC834X_SICRH_USB_UTMI     0x00020000
 #define MPC831X_SICRH_USB_MASK     0x000000e0
 #define MPC831X_SICRH_USB_ULPI     0x000000a0
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
index 1930543..3d1ecd2 100644
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
@@ -231,7 +231,7 @@
 	ori	r4, r4, 0x002a
 	mtspr	SPRN_DBAT0L, r4
 	lis	r8, TMP_VIRT_IMMR@h
-	ori	r4, r8, 0x001e	/* 1 MByte accessable from Kernel Space only */
+	ori	r4, r8, 0x001e	/* 1 MByte accessible from Kernel Space only */
 	mtspr	SPRN_DBAT0U, r4
 	isync
 
@@ -241,7 +241,7 @@
 	ori	r4, r4, 0x002a
 	mtspr	SPRN_DBAT1L, r4
 	lis	r9, (TMP_VIRT_IMMR + 0x01000000)@h
-	ori	r4, r9, 0x001e	/* 1 MByte accessable from Kernel Space only */
+	ori	r4, r9, 0x001e	/* 1 MByte accessible from Kernel Space only */
 	mtspr	SPRN_DBAT1U, r4
 	isync
 
@@ -253,7 +253,7 @@
 	li	r4, 0x0002
 	mtspr	SPRN_DBAT2L, r4
 	lis	r4, KERNELBASE@h
-	ori	r4, r4, 0x001e	/* 1 MByte accessable from Kernel Space only */
+	ori	r4, r4, 0x001e	/* 1 MByte accessible from Kernel Space only */
 	mtspr	SPRN_DBAT2U, r4
 	isync
 
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 75ae77f..fd4f2f2 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -311,7 +311,7 @@
 	return ret;
 }
 
-static struct platform_suspend_ops mpc83xx_suspend_ops = {
+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
 	.valid = mpc83xx_suspend_valid,
 	.begin = mpc83xx_suspend_begin,
 	.enter = mpc83xx_suspend_enter,
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 3ba4bb7..2c64164 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -127,7 +127,8 @@
 
 	/* Configure clock */
 	immr_node = of_get_parent(np);
-	if (immr_node && of_device_is_compatible(immr_node, "fsl,mpc8315-immr"))
+	if (immr_node && (of_device_is_compatible(immr_node, "fsl,mpc8315-immr") ||
+			of_device_is_compatible(immr_node, "fsl,mpc8308-immr")))
 		clrsetbits_be32(immap + MPC83XX_SCCR_OFFS,
 		                MPC8315_SCCR_USB_MASK,
 		                MPC8315_SCCR_USB_DRCM_01);
@@ -138,7 +139,11 @@
 
 	/* Configure pin mux for ULPI.  There is no pin mux for UTMI */
 	if (prop && !strcmp(prop, "ulpi")) {
-		if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) {
+		if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) {
+			clrsetbits_be32(immap + MPC83XX_SICRH_OFFS,
+					MPC8308_SICRH_USB_MASK,
+					MPC8308_SICRH_USB_ULPI);
+		} else if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) {
 			clrsetbits_be32(immap + MPC83XX_SICRL_OFFS,
 					MPC8315_SICRL_USB_MASK,
 					MPC8315_SICRL_USB_ULPI);
@@ -173,6 +178,9 @@
 		     !strcmp(prop, "utmi"))) {
 		u32 refsel;
 
+		if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr"))
+			goto out;
+
 		if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr"))
 			refsel = CONTROL_REFSEL_24MHZ;
 		else
@@ -186,9 +194,11 @@
 		temp = CONTROL_PHY_CLK_SEL_ULPI;
 #ifdef CONFIG_USB_OTG
 		/* Set OTG_PORT */
-		dr_mode = of_get_property(np, "dr_mode", NULL);
-		if (dr_mode && !strcmp(dr_mode, "otg"))
-			temp |= CONTROL_OTG_PORT;
+		if (!of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) {
+			dr_mode = of_get_property(np, "dr_mode", NULL);
+			if (dr_mode && !strcmp(dr_mode, "otg"))
+				temp |= CONTROL_OTG_PORT;
+		}
 #endif /* CONFIG_USB_OTG */
 		out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp);
 	} else {
@@ -196,6 +206,7 @@
 		ret = -EINVAL;
 	}
 
+out:
 	iounmap(usb_regs);
 	of_node_put(np);
 	return ret;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index aa34cac..747d1ee 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -309,7 +309,7 @@
 			/* P1021 has pins muxed for QE and other functions. To
 			 * enable QE UEC mode, we need to set bit QE0 for UCC1
 			 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
-			 * and QE12 for QE MII management singals in PMUXCR
+			 * and QE12 for QE MII management signals in PMUXCR
 			 * register.
 			 */
 				setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 |
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 956154f..2057682 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -313,13 +313,14 @@
 source "arch/powerpc/sysdev/bestcomm/Kconfig"
 
 config MPC8xxx_GPIO
-	bool "MPC8xxx GPIO support"
-	depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx
+	bool "MPC512x/MPC8xxx GPIO support"
+	depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
+		   FSL_SOC_BOOKE || PPC_86xx
 	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	  Say Y here if you're going to use hardware that connects to the
-	  MPC831x/834x/837x/8572/8610 GPIOs.
+	  MPC512x/831x/834x/837x/8572/8610 GPIOs.
 
 config SIMPLE_GPIO
 	bool "Support for simple, memory-mapped GPIO controllers"
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
index beec405..3ce6855 100644
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ b/arch/powerpc/platforms/cell/beat_iommu.c
@@ -76,7 +76,7 @@
 
 static void celleb_dma_dev_setup(struct device *dev)
 {
-	dev->archdata.dma_ops = get_pci_dma_ops();
+	set_dma_ops(dev, &dma_direct_ops);
 	set_dma_offset(dev, celleb_dma_direct_offset);
 }
 
@@ -106,7 +106,6 @@
 static int __init celleb_init_iommu(void)
 {
 	celleb_init_direct_mapping();
-	set_pci_dma_ops(&dma_direct_ops);
 	ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
 	bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
 
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 968c1c0..d809836 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -39,8 +39,6 @@
 };
 static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
 
-static struct workqueue_struct *kspugov_wq;
-
 static int calc_freq(struct spu_gov_info_struct *info)
 {
 	int cpu;
@@ -71,14 +69,14 @@
 	__cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
 
 	delay = usecs_to_jiffies(info->poll_int);
-	queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
+	schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
 }
 
 static void spu_gov_init_work(struct spu_gov_info_struct *info)
 {
 	int delay = usecs_to_jiffies(info->poll_int);
 	INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
-	queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
+	schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
 }
 
 static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
@@ -152,27 +150,15 @@
 {
 	int ret;
 
-	kspugov_wq = create_workqueue("kspugov");
-	if (!kspugov_wq) {
-		printk(KERN_ERR "creation of kspugov failed\n");
-		ret = -EFAULT;
-		goto out;
-	}
-
 	ret = cpufreq_register_governor(&spu_governor);
-	if (ret) {
+	if (ret)
 		printk(KERN_ERR "registration of governor failed\n");
-		destroy_workqueue(kspugov_wq);
-		goto out;
-	}
-out:
 	return ret;
 }
 
 static void __exit spu_gov_exit(void)
 {
 	cpufreq_unregister_governor(&spu_governor);
-	destroy_workqueue(kspugov_wq);
 }
 
 
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index 1b57490..d31c594 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -145,9 +145,4 @@
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= qpace_progress,
 	.init_IRQ		= iic_init_IRQ,
-#ifdef CONFIG_KEXEC
-	.machine_kexec		= default_machine_kexec,
-	.machine_kexec_prepare	= default_machine_kexec_prepare,
-	.machine_crash_shutdown	= default_machine_crash_shutdown,
-#endif
 };
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 8547e86..acfacce 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -37,6 +37,7 @@
 #include <asm/spu_csa.h>
 #include <asm/xmon.h>
 #include <asm/prom.h>
+#include <asm/kexec.h>
 
 const struct spu_management_ops *spu_management_ops;
 EXPORT_SYMBOL_GPL(spu_management_ops);
@@ -727,6 +728,75 @@
 
 static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
 
+#ifdef CONFIG_KEXEC
+
+struct crash_spu_info {
+	struct spu *spu;
+	u32 saved_spu_runcntl_RW;
+	u32 saved_spu_status_R;
+	u32 saved_spu_npc_RW;
+	u64 saved_mfc_sr1_RW;
+	u64 saved_mfc_dar;
+	u64 saved_mfc_dsisr;
+};
+
+#define CRASH_NUM_SPUS	16	/* Enough for current hardware */
+static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
+
+static void crash_kexec_stop_spus(void)
+{
+	struct spu *spu;
+	int i;
+	u64 tmp;
+
+	for (i = 0; i < CRASH_NUM_SPUS; i++) {
+		if (!crash_spu_info[i].spu)
+			continue;
+
+		spu = crash_spu_info[i].spu;
+
+		crash_spu_info[i].saved_spu_runcntl_RW =
+			in_be32(&spu->problem->spu_runcntl_RW);
+		crash_spu_info[i].saved_spu_status_R =
+			in_be32(&spu->problem->spu_status_R);
+		crash_spu_info[i].saved_spu_npc_RW =
+			in_be32(&spu->problem->spu_npc_RW);
+
+		crash_spu_info[i].saved_mfc_dar    = spu_mfc_dar_get(spu);
+		crash_spu_info[i].saved_mfc_dsisr  = spu_mfc_dsisr_get(spu);
+		tmp = spu_mfc_sr1_get(spu);
+		crash_spu_info[i].saved_mfc_sr1_RW = tmp;
+
+		tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+		spu_mfc_sr1_set(spu, tmp);
+
+		__delay(200);
+	}
+}
+
+static void crash_register_spus(struct list_head *list)
+{
+	struct spu *spu;
+	int ret;
+
+	list_for_each_entry(spu, list, full_list) {
+		if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
+			continue;
+
+		crash_spu_info[spu->number].spu = spu;
+	}
+
+	ret = crash_shutdown_register(&crash_kexec_stop_spus);
+	if (ret)
+		printk(KERN_ERR "Could not register SPU crash handler");
+}
+
+#else
+static inline void crash_register_spus(struct list_head *list)
+{
+}
+#endif
+
 static int __init init_spu_base(void)
 {
 	int i, ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 02f7b11..3c7c3f8 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -219,24 +219,17 @@
 	loff_t pos = *ppos;
 	int ret;
 
-	if (pos < 0)
-		return -EINVAL;
 	if (pos > LS_SIZE)
 		return -EFBIG;
-	if (size > LS_SIZE - pos)
-		size = LS_SIZE - pos;
 
 	ret = spu_acquire(ctx);
 	if (ret)
 		return ret;
 
 	local_store = ctx->ops->get_ls(ctx);
-	ret = copy_from_user(local_store + pos, buffer, size);
+	size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
 	spu_release(ctx);
 
-	if (ret)
-		return -EFAULT;
-	*ppos = pos + size;
 	return size;
 }
 
@@ -574,18 +567,15 @@
 	if (*pos >= sizeof(lscsa->gprs))
 		return -EFBIG;
 
-	size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size);
-	*pos += size;
-
 	ret = spu_acquire_saved(ctx);
 	if (ret)
 		return ret;
 
-	ret = copy_from_user((char *)lscsa->gprs + *pos - size,
-			     buffer, size) ? -EFAULT : size;
+	size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
+					buffer, size);
 
 	spu_release_saved(ctx);
-	return ret;
+	return size;
 }
 
 static const struct file_operations spufs_regs_fops = {
@@ -630,18 +620,15 @@
 	if (*pos >= sizeof(lscsa->fpcr))
 		return -EFBIG;
 
-	size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
-
 	ret = spu_acquire_saved(ctx);
 	if (ret)
 		return ret;
 
-	*pos += size;
-	ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
-			     buffer, size) ? -EFAULT : size;
+	size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
+					buffer, size);
 
 	spu_release_saved(ctx);
-	return ret;
+	return size;
 }
 
 static const struct file_operations spufs_fpcr_fops = {
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index a101abf..3b894f5 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -36,10 +36,9 @@
 	struct spu_lscsa *lscsa;
 	unsigned char *p;
 
-	lscsa = vmalloc(sizeof(struct spu_lscsa));
+	lscsa = vzalloc(sizeof(struct spu_lscsa));
 	if (!lscsa)
 		return -ENOMEM;
-	memset(lscsa, 0, sizeof(struct spu_lscsa));
 	csa->lscsa = lscsa;
 
 	/* Set LS pages reserved to allow for user-space mapping. */
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index 054dfe5..f803f4b 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -29,6 +29,10 @@
 
 extern spinlock_t rtc_lock;
 
+#define NVRAM_AS0  0x74
+#define NVRAM_AS1  0x75
+#define NVRAM_DATA 0x77
+
 static int nvram_as1 = NVRAM_AS1;
 static int nvram_as0 = NVRAM_AS0;
 static int nvram_data = NVRAM_DATA;
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index 1106fd9..a138e14 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -75,14 +75,6 @@
 	flipper_quiesce();
 }
 
-#ifdef CONFIG_KEXEC
-static int gamecube_kexec_prepare(struct kimage *image)
-{
-	return 0;
-}
-#endif /* CONFIG_KEXEC */
-
-
 define_machine(gamecube) {
 	.name			= "gamecube",
 	.probe			= gamecube_probe,
@@ -95,9 +87,6 @@
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 	.machine_shutdown	= gamecube_shutdown,
-#ifdef CONFIG_KEXEC
-	.machine_kexec_prepare	= gamecube_kexec_prepare,
-#endif
 };
 
 
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 649473a..1b5dc1a 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -18,7 +18,6 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/seq_file.h>
-#include <linux/kexec.h>
 #include <linux/of_platform.h>
 #include <linux/memblock.h>
 #include <mm/mmu_decl.h>
@@ -226,13 +225,6 @@
 	flipper_quiesce();
 }
 
-#ifdef CONFIG_KEXEC
-static int wii_machine_kexec_prepare(struct kimage *image)
-{
-	return 0;
-}
-#endif /* CONFIG_KEXEC */
-
 define_machine(wii) {
 	.name			= "wii",
 	.probe			= wii_probe,
@@ -246,9 +238,6 @@
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 	.machine_shutdown	= wii_shutdown,
-#ifdef CONFIG_KEXEC
-	.machine_kexec_prepare	= wii_machine_kexec_prepare,
-#endif
 };
 
 static struct of_device_id wii_of_bus[] = {
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index 47a20cf..e5bc9f7 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -2,7 +2,7 @@
 	bool "IBM Legacy iSeries"
 	depends on PPC64 && PPC_BOOK3S
 	select PPC_INDIRECT_IO
-	select PPC_PCI_CHOICE if EMBEDDED
+	select PPC_PCI_CHOICE if EXPERT
 
 menu "iSeries device drivers"
 	depends on PPC_ISERIES
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index fdb7384..f0491cc 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -242,8 +242,8 @@
 	pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA  */
 	pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
 
-	for (i = 0; i < NR_CPUS; i++) {
-		if (lppaca_of(i).dyn_proc_status >= 2)
+	for (i = 0; i < NR_LPPACAS; i++) {
+		if (lppaca[i].dyn_proc_status >= 2)
 			continue;
 
 		snprintf(p, 32 - (p - buf), "@%d", i);
@@ -251,7 +251,7 @@
 
 		dt_prop_str(dt, "device_type", device_type_cpu);
 
-		index = lppaca_of(i).dyn_hv_phys_proc_index;
+		index = lppaca[i].dyn_hv_phys_proc_index;
 		d = &xIoHriProcessorVpd[index];
 
 		dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 42d0a88..b5e026b 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1045,71 +1045,9 @@
 	.write		= mf_side_proc_write,
 };
 
-#if 0
-static void mf_getSrcHistory(char *buffer, int size)
-{
-	struct IplTypeReturnStuff return_stuff;
-	struct pending_event *ev = new_pending_event();
-	int rc = 0;
-	char *pages[4];
-
-	pages[0] = kmalloc(4096, GFP_ATOMIC);
-	pages[1] = kmalloc(4096, GFP_ATOMIC);
-	pages[2] = kmalloc(4096, GFP_ATOMIC);
-	pages[3] = kmalloc(4096, GFP_ATOMIC);
-	if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
-			 || (pages[2] == NULL) || (pages[3] == NULL))
-		return -ENOMEM;
-
-	return_stuff.xType = 0;
-	return_stuff.xRc = 0;
-	return_stuff.xDone = 0;
-	ev->event.hp_lp_event.xSubtype = 6;
-	ev->event.hp_lp_event.x.xSubtypeData =
-		subtype_data('M', 'F', 'V', 'I');
-	ev->event.data.vsp_cmd.xEvent = &return_stuff;
-	ev->event.data.vsp_cmd.cmd = 4;
-	ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
-	ev->event.data.vsp_cmd.result_code = 0xFF;
-	ev->event.data.vsp_cmd.reserved = 0;
-	ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
-	ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
-	ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
-	ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
-	mb();
-	if (signal_event(ev) != 0)
-		return;
-
- 	while (return_stuff.xDone != 1)
- 		udelay(10);
- 	if (return_stuff.xRc == 0)
- 		memcpy(buffer, pages[0], size);
-	kfree(pages[0]);
-	kfree(pages[1]);
-	kfree(pages[2]);
-	kfree(pages[3]);
-}
-#endif
-
 static int mf_src_proc_show(struct seq_file *m, void *v)
 {
-#if 0
-	int len;
-
-	mf_getSrcHistory(page, count);
-	len = count;
-	len -= off;
-	if (len < count) {
-		*eof = 1;
-		if (len <= 0)
-			return 0;
-	} else
-		len = count;
-	*start = page + off;
-	return len;
-#else
 	return 0;
-#endif
 }
 
 static int mf_src_proc_open(struct inode *inode, struct file *file)
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index b086341..2946ae1 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -680,6 +680,7 @@
 	 * on but calling this function multiple times is fine.
 	 */
 	identify_cpu(0, mfspr(SPRN_PVR));
+	initialise_paca(&boot_paca, 0);
 
 	powerpc_firmware_features |= FW_FEATURE_ISERIES;
 	powerpc_firmware_features |= FW_FEATURE_LPAR;
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 1f9fb2c..14943ef 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -156,20 +156,12 @@
 
 static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
 {
-	struct device_node *dn;
-
 	pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
 
 	if (!iommu_table_iobmap_inited) {
 		iommu_table_iobmap_inited = 1;
 		iommu_table_iobmap_setup();
 	}
-
-	dn = pci_bus_to_OF_node(bus);
-
-	if (dn)
-		PCI_DN(dn)->iommu_table = &iommu_table_iobmap;
-
 }
 
 
@@ -192,9 +184,6 @@
 	set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
 }
 
-static void pci_dma_bus_setup_null(struct pci_bus *b) { }
-static void pci_dma_dev_setup_null(struct pci_dev *d) { }
-
 int __init iob_init(struct device_node *dn)
 {
 	unsigned long tmp;
@@ -251,14 +240,8 @@
 	iommu_off = of_chosen &&
 			of_get_property(of_chosen, "linux,iommu-off", NULL);
 #endif
-	if (iommu_off) {
-		/* Direct I/O, IOMMU off */
-		ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_null;
-		ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_null;
-		set_pci_dma_ops(&dma_direct_ops);
-
+	if (iommu_off)
 		return;
-	}
 
 	iob_init(NULL);
 
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 9deb274..d5aceb7 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -506,6 +506,15 @@
 		of_platform_device_create(np, "smu", NULL);
 		of_node_put(np);
 	}
+	np = of_find_node_by_type(NULL, "fcu");
+	if (np == NULL) {
+		/* Some machines have strangely broken device-tree */
+		np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
+	}
+	if (np) {
+		of_platform_device_create(np, "temperature", NULL);
+		of_node_put(np);
+	}
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index b341018..6c4b583 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -566,10 +566,10 @@
 	case PS3_DEV_TYPE_STOR_DISK:
 		result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK);
 
-		/* Some devices are not accessable from the Other OS lpar. */
+		/* Some devices are not accessible from the Other OS lpar. */
 		if (result == -ENODEV) {
 			result = 0;
-			pr_debug("%s:%u: not accessable\n", __func__,
+			pr_debug("%s:%u: not accessible\n", __func__,
 				 __LINE__);
 		}
 
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 59d9712..92290ff 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -44,7 +44,7 @@
  * @lock:
  * @ipi_debug_brk_mask:
  *
- * The HV mantains per SMT thread mappings of HV outlet to HV plug on
+ * The HV maintains per SMT thread mappings of HV outlet to HV plug on
  * behalf of the guest.  These mappings are implemented as 256 bit guest
  * supplied bitmaps indexed by plug number.  The addresses of the bitmaps
  * are registered with the HV through lv1_configure_irq_state_bitmap().
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 3139814..5b3da4b 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -10,7 +10,7 @@
 	select RTAS_ERROR_LOGGING
 	select PPC_UDBG_16550
 	select PPC_NATIVE
-	select PPC_PCI_CHOICE if EMBEDDED
+	select PPC_PCI_CHOICE if EXPERT
 	default y
 
 config PPC_SPLPAR
@@ -24,15 +24,25 @@
 	  two or more partitions.
 
 config EEH
-	bool "PCI Extended Error Handling (EEH)" if EMBEDDED
+	bool "PCI Extended Error Handling (EEH)" if EXPERT
 	depends on PPC_PSERIES && PCI
-	default y if !EMBEDDED
+	default y if !EXPERT
 
 config PSERIES_MSI
        bool
        depends on PCI_MSI && EEH
        default y
 
+config PSERIES_ENERGY
+	tristate "pSeries energy management capabilities driver"
+	depends on PPC_PSERIES
+	default y
+	help
+	  Provides interface to platform energy management capabilities
+	  on supported PSERIES platforms.
+	  Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list
+	  and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint
+
 config SCANLOG
 	tristate "Scanlog dump interface"
 	depends on RTAS_PROC && PPC_PSERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 59eb8bd..fc52378 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -11,6 +11,7 @@
 obj-$(CONFIG_KEXEC)	+= kexec.o
 obj-$(CONFIG_PCI)	+= pci.o pci_dlpar.o
 obj-$(CONFIG_PSERIES_MSI)	+= msi.o
+obj-$(CONFIG_PSERIES_ENERGY)	+= pseries_energy.o
 
 obj-$(CONFIG_HOTPLUG_CPU)	+= hotplug-cpu.o
 obj-$(CONFIG_MEMORY_HOTPLUG)	+= hotplug-memory.o
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 0a14d8c..0b0eff0 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -55,6 +55,7 @@
 	{FW_FEATURE_XDABR,		"hcall-xdabr"},
 	{FW_FEATURE_MULTITCE,		"hcall-multi-tce"},
 	{FW_FEATURE_SPLPAR,		"hcall-splpar"},
+	{FW_FEATURE_VPHN,		"hcall-vphn"},
 };
 
 /* Build up the firmware features bitmask using the contents of
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 48d2057..fd05fde 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -11,6 +11,7 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 	
 #define STK_PARM(i)     (48 + ((i)-3)*8)
 
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index e19ff02..f106662 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -55,7 +55,7 @@
 static int hc_show(struct seq_file *m, void *p)
 {
 	unsigned long h_num = (unsigned long)p;
-	struct hcall_stats *hs = (struct hcall_stats *)m->private;
+	struct hcall_stats *hs = m->private;
 
 	if (hs[h_num].num_calls) {
 		if (cpu_has_feature(CPU_FTR_PURR))
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a77bcae..edea60b 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -140,7 +140,7 @@
 	return ret;
 }
 
-static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
+static DEFINE_PER_CPU(u64 *, tce_page);
 
 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 				     long npages, unsigned long uaddr,
@@ -323,14 +323,13 @@
 static void iommu_table_setparms_lpar(struct pci_controller *phb,
 				      struct device_node *dn,
 				      struct iommu_table *tbl,
-				      const void *dma_window,
-				      int bussubno)
+				      const void *dma_window)
 {
 	unsigned long offset, size;
 
-	tbl->it_busno  = bussubno;
 	of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
 
+	tbl->it_busno = phb->bus->number;
 	tbl->it_base   = 0;
 	tbl->it_blocksize  = 16;
 	tbl->it_type = TCE_PCI;
@@ -450,14 +449,10 @@
 	if (!ppci->iommu_table) {
 		tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
 				   ppci->phb->node);
-		iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
-			bus->number);
+		iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
 		ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
 		pr_debug("  created table: %p\n", ppci->iommu_table);
 	}
-
-	if (pdn != dn)
-		PCI_DN(dn)->iommu_table = ppci->iommu_table;
 }
 
 
@@ -533,21 +528,11 @@
 	}
 	pr_debug("  parent is %s\n", pdn->full_name);
 
-	/* Check for parent == NULL so we don't try to setup the empty EADS
-	 * slots on POWER4 machines.
-	 */
-	if (dma_window == NULL || pdn->parent == NULL) {
-		pr_debug("  no dma window for device, linking to parent\n");
-		set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table);
-		return;
-	}
-
 	pci = PCI_DN(pdn);
 	if (!pci->iommu_table) {
 		tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
 				   pci->phb->node);
-		iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
-			pci->phb->bus->number);
+		iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
 		pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
 		pr_debug("  created table: %p\n", pci->iommu_table);
 	} else {
@@ -571,8 +556,7 @@
 
 	switch (action) {
 	case PSERIES_RECONFIG_REMOVE:
-		if (pci && pci->iommu_table &&
-		    of_get_property(np, "ibm,dma-window", NULL))
+		if (pci && pci->iommu_table)
 			iommu_free_table(pci->iommu_table, np->full_name);
 		break;
 	default:
@@ -589,13 +573,8 @@
 /* These are called very early. */
 void iommu_init_early_pSeries(void)
 {
-	if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) {
-		/* Direct I/O, IOMMU off */
-		ppc_md.pci_dma_dev_setup = NULL;
-		ppc_md.pci_dma_bus_setup = NULL;
-		set_pci_dma_ops(&dma_direct_ops);
+	if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
 		return;
-	}
 
 	if (firmware_has_feature(FW_FEATURE_LPAR)) {
 		if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
@@ -622,3 +601,17 @@
 	set_pci_dma_ops(&dma_iommu_ops);
 }
 
+static int __init disable_multitce(char *str)
+{
+	if (strcmp(str, "off") == 0 &&
+	    firmware_has_feature(FW_FEATURE_LPAR) &&
+	    firmware_has_feature(FW_FEATURE_MULTITCE)) {
+		printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
+		ppc_md.tce_build = tce_build_pSeriesLP;
+		ppc_md.tce_free	 = tce_free_pSeriesLP;
+		powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
+	}
+	return 1;
+}
+
+__setup("multitce=", disable_multitce);
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 53cbd53..77d38a5 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -61,13 +61,3 @@
 {
 	ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics;
 }
-
-static int __init pseries_kexec_setup(void)
-{
-	ppc_md.machine_kexec = default_machine_kexec;
-	ppc_md.machine_kexec_prepare = default_machine_kexec_prepare;
-	ppc_md.machine_crash_shutdown = default_machine_crash_shutdown;
-
-	return 0;
-}
-machine_device_initcall(pseries, pseries_kexec_setup);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f129040..ca5d589 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -627,6 +627,18 @@
 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
 }
 
+static int __init disable_bulk_remove(char *str)
+{
+	if (strcmp(str, "off") == 0 &&
+	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
+			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
+			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
+	}
+	return 1;
+}
+
+__setup("bulk_remove=", disable_bulk_remove);
+
 void __init hpte_init_lpar(void)
 {
 	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
@@ -701,6 +713,13 @@
 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
 extern long hcall_tracepoint_refcount;
 
+/* 
+ * Since the tracing code might execute hcalls we need to guard against
+ * recursion. One example of this are spinlocks calling H_YIELD on
+ * shared processor partitions.
+ */
+static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
+
 void hcall_tracepoint_regfunc(void)
 {
 	hcall_tracepoint_refcount++;
@@ -713,12 +732,42 @@
 
 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
 {
+	unsigned long flags;
+	unsigned int *depth;
+
+	local_irq_save(flags);
+
+	depth = &__get_cpu_var(hcall_trace_depth);
+
+	if (*depth)
+		goto out;
+
+	(*depth)++;
 	trace_hcall_entry(opcode, args);
+	(*depth)--;
+
+out:
+	local_irq_restore(flags);
 }
 
 void __trace_hcall_exit(long opcode, unsigned long retval,
 			unsigned long *retbuf)
 {
+	unsigned long flags;
+	unsigned int *depth;
+
+	local_irq_save(flags);
+
+	depth = &__get_cpu_var(hcall_trace_depth);
+
+	if (*depth)
+		goto out;
+
+	(*depth)++;
 	trace_hcall_exit(opcode, retval, retbuf);
+	(*depth)--;
+
+out:
+	local_irq_restore(flags);
 }
 #endif
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index bc3c7f2..7e828ba 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -22,11 +22,25 @@
 #include <asm/prom.h>
 #include <asm/machdep.h>
 
+/* Max bytes to read/write in one go */
+#define NVRW_CNT 0x20
+
 static unsigned int nvram_size;
 static int nvram_fetch, nvram_store;
 static char nvram_buf[NVRW_CNT];	/* assume this is in the first 4GB */
 static DEFINE_SPINLOCK(nvram_lock);
 
+static long nvram_error_log_index = -1;
+static long nvram_error_log_size = 0;
+
+struct err_log_info {
+	int error_type;
+	unsigned int seq_num;
+};
+#define NVRAM_MAX_REQ		2079
+#define NVRAM_MIN_REQ		1055
+
+#define NVRAM_LOG_PART_NAME	"ibm,rtas-log"
 
 static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
 {
@@ -119,6 +133,197 @@
 	return nvram_size ? nvram_size : -ENODEV;
 }
 
+
+/* nvram_write_error_log
+ *
+ * We need to buffer the error logs into nvram to ensure that we have
+ * the failure information to decode.  If we have a severe error there
+ * is no way to guarantee that the OS or the machine is in a state to
+ * get back to user land and write the error to disk.  For example if
+ * the SCSI device driver causes a Machine Check by writing to a bad
+ * IO address, there is no way of guaranteeing that the device driver
+ * is in any state that is would also be able to write the error data
+ * captured to disk, thus we buffer it in NVRAM for analysis on the
+ * next boot.
+ *
+ * In NVRAM the partition containing the error log buffer will looks like:
+ * Header (in bytes):
+ * +-----------+----------+--------+------------+------------------+
+ * | signature | checksum | length | name       | data             |
+ * |0          |1         |2      3|4         15|16        length-1|
+ * +-----------+----------+--------+------------+------------------+
+ *
+ * The 'data' section would look like (in bytes):
+ * +--------------+------------+-----------------------------------+
+ * | event_logged | sequence # | error log                         |
+ * |0            3|4          7|8            nvram_error_log_size-1|
+ * +--------------+------------+-----------------------------------+
+ *
+ * event_logged: 0 if event has not been logged to syslog, 1 if it has
+ * sequence #: The unique sequence # for each event. (until it wraps)
+ * error log: The error log from event_scan
+ */
+int nvram_write_error_log(char * buff, int length,
+                          unsigned int err_type, unsigned int error_log_cnt)
+{
+	int rc;
+	loff_t tmp_index;
+	struct err_log_info info;
+	
+	if (nvram_error_log_index == -1) {
+		return -ESPIPE;
+	}
+
+	if (length > nvram_error_log_size) {
+		length = nvram_error_log_size;
+	}
+
+	info.error_type = err_type;
+	info.seq_num = error_log_cnt;
+
+	tmp_index = nvram_error_log_index;
+
+	rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+		return rc;
+	}
+
+	rc = ppc_md.nvram_write(buff, length, &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+		return rc;
+	}
+	
+	return 0;
+}
+
+/* nvram_read_error_log
+ *
+ * Reads nvram for error log for at most 'length'
+ */
+int nvram_read_error_log(char * buff, int length,
+                         unsigned int * err_type, unsigned int * error_log_cnt)
+{
+	int rc;
+	loff_t tmp_index;
+	struct err_log_info info;
+	
+	if (nvram_error_log_index == -1)
+		return -1;
+
+	if (length > nvram_error_log_size)
+		length = nvram_error_log_size;
+
+	tmp_index = nvram_error_log_index;
+
+	rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
+		return rc;
+	}
+
+	rc = ppc_md.nvram_read(buff, length, &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
+		return rc;
+	}
+
+	*error_log_cnt = info.seq_num;
+	*err_type = info.error_type;
+
+	return 0;
+}
+
+/* This doesn't actually zero anything, but it sets the event_logged
+ * word to tell that this event is safely in syslog.
+ */
+int nvram_clear_error_log(void)
+{
+	loff_t tmp_index;
+	int clear_word = ERR_FLAG_ALREADY_LOGGED;
+	int rc;
+
+	if (nvram_error_log_index == -1)
+		return -1;
+
+	tmp_index = nvram_error_log_index;
+	
+	rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/* pseries_nvram_init_log_partition
+ *
+ * This will setup the partition we need for buffering the
+ * error logs and cleanup partitions if needed.
+ *
+ * The general strategy is the following:
+ * 1.) If there is log partition large enough then use it.
+ * 2.) If there is none large enough, search
+ * for a free partition that is large enough.
+ * 3.) If there is not a free partition large enough remove 
+ * _all_ OS partitions and consolidate the space.
+ * 4.) Will first try getting a chunk that will satisfy the maximum
+ * error log size (NVRAM_MAX_REQ).
+ * 5.) If the max chunk cannot be allocated then try finding a chunk
+ * that will satisfy the minum needed (NVRAM_MIN_REQ).
+ */
+static int __init pseries_nvram_init_log_partition(void)
+{
+	loff_t p;
+	int size;
+
+	/* Scan nvram for partitions */
+	nvram_scan_partitions();
+
+	/* Lookg for ours */
+	p = nvram_find_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS, &size);
+
+	/* Found one but too small, remove it */
+	if (p && size < NVRAM_MIN_REQ) {
+		pr_info("nvram: Found too small "NVRAM_LOG_PART_NAME" partition"
+			",removing it...");
+		nvram_remove_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS);
+		p = 0;
+	}
+
+	/* Create one if we didn't find */
+	if (!p) {
+		p = nvram_create_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS,
+					   NVRAM_MAX_REQ, NVRAM_MIN_REQ);
+		/* No room for it, try to get rid of any OS partition
+		 * and try again
+		 */
+		if (p == -ENOSPC) {
+			pr_info("nvram: No room to create "NVRAM_LOG_PART_NAME
+				" partition, deleting all OS partitions...");
+			nvram_remove_partition(NULL, NVRAM_SIG_OS);
+			p = nvram_create_partition(NVRAM_LOG_PART_NAME,
+						   NVRAM_SIG_OS, NVRAM_MAX_REQ,
+						   NVRAM_MIN_REQ);
+		}
+	}
+
+	if (p <= 0) {
+		pr_err("nvram: Failed to find or create "NVRAM_LOG_PART_NAME
+		       " partition, err %d\n", (int)p);
+		return 0;
+	}
+
+	nvram_error_log_index = p;
+	nvram_error_log_size = nvram_get_partition_size(p) -
+		sizeof(struct err_log_info);
+	
+	return 0;
+}
+machine_arch_initcall(pseries, pseries_nvram_init_log_partition);
+
 int __init pSeries_nvram_init(void)
 {
 	struct device_node *nvram;
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
new file mode 100644
index 0000000..c8b3c69
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -0,0 +1,326 @@
+/*
+ * POWER platform energy management driver
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This pseries platform device driver provides access to
+ * platform energy management capabilities.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/sysdev.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <asm/cputhreads.h>
+#include <asm/page.h>
+#include <asm/hvcall.h>
+
+
+#define MODULE_VERS "1.0"
+#define MODULE_NAME "pseries_energy"
+
+/* Driver flags */
+
+static int sysfs_entries;
+
+/* Helper routines */
+
+/*
+ * Routine to detect firmware support for hcall
+ * return 1 if H_BEST_ENERGY is supported
+ * else return 0
+ */
+
+static int check_for_h_best_energy(void)
+{
+	struct device_node *rtas = NULL;
+	const char *hypertas, *s;
+	int length;
+	int rc = 0;
+
+	rtas = of_find_node_by_path("/rtas");
+	if (!rtas)
+		return 0;
+
+	hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length);
+	if (!hypertas) {
+		of_node_put(rtas);
+		return 0;
+	}
+
+	/* hypertas will have list of strings with hcall names */
+	for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) {
+		if (!strncmp("hcall-best-energy-1", s, 19)) {
+			rc = 1; /* Found the string */
+			break;
+		}
+	}
+	of_node_put(rtas);
+	return rc;
+}
+
+/* Helper Routines to convert between drc_index to cpu numbers */
+
+static u32 cpu_to_drc_index(int cpu)
+{
+	struct device_node *dn = NULL;
+	const int *indexes;
+	int i;
+	int rc = 1;
+	u32 ret = 0;
+
+	dn = of_find_node_by_path("/cpus");
+	if (dn == NULL)
+		goto err;
+	indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+	if (indexes == NULL)
+		goto err_of_node_put;
+	/* Convert logical cpu number to core number */
+	i = cpu_core_index_of_thread(cpu);
+	/*
+	 * The first element indexes[0] is the number of drc_indexes
+	 * returned in the list.  Hence i+1 will get the drc_index
+	 * corresponding to core number i.
+	 */
+	WARN_ON(i > indexes[0]);
+	ret = indexes[i + 1];
+	rc = 0;
+
+err_of_node_put:
+	of_node_put(dn);
+err:
+	if (rc)
+		printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu);
+	return ret;
+}
+
+static int drc_index_to_cpu(u32 drc_index)
+{
+	struct device_node *dn = NULL;
+	const int *indexes;
+	int i, cpu = 0;
+	int rc = 1;
+
+	dn = of_find_node_by_path("/cpus");
+	if (dn == NULL)
+		goto err;
+	indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+	if (indexes == NULL)
+		goto err_of_node_put;
+	/*
+	 * First element in the array is the number of drc_indexes
+	 * returned.  Search through the list to find the matching
+	 * drc_index and get the core number
+	 */
+	for (i = 0; i < indexes[0]; i++) {
+		if (indexes[i + 1] == drc_index)
+			break;
+	}
+	/* Convert core number to logical cpu number */
+	cpu = cpu_first_thread_of_core(i);
+	rc = 0;
+
+err_of_node_put:
+	of_node_put(dn);
+err:
+	if (rc)
+		printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index);
+	return cpu;
+}
+
+/*
+ * pseries hypervisor call H_BEST_ENERGY provides hints to OS on
+ * preferred logical cpus to activate or deactivate for optimized
+ * energy consumption.
+ */
+
+#define FLAGS_MODE1	0x004E200000080E01
+#define FLAGS_MODE2	0x004E200000080401
+#define FLAGS_ACTIVATE  0x100
+
+static ssize_t get_best_energy_list(char *page, int activate)
+{
+	int rc, cnt, i, cpu;
+	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+	unsigned long flags = 0;
+	u32 *buf_page;
+	char *s = page;
+
+	buf_page = (u32 *) get_zeroed_page(GFP_KERNEL);
+	if (!buf_page)
+		return -ENOMEM;
+
+	flags = FLAGS_MODE1;
+	if (activate)
+		flags |= FLAGS_ACTIVATE;
+
+	rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page),
+				0, 0, 0, 0, 0, 0);
+	if (rc != H_SUCCESS) {
+		free_page((unsigned long) buf_page);
+		return -EINVAL;
+	}
+
+	cnt = retbuf[0];
+	for (i = 0; i < cnt; i++) {
+		cpu = drc_index_to_cpu(buf_page[2*i+1]);
+		if ((cpu_online(cpu) && !activate) ||
+		    (!cpu_online(cpu) && activate))
+			s += sprintf(s, "%d,", cpu);
+	}
+	if (s > page) { /* Something to show */
+		s--; /* Suppress last comma */
+		s += sprintf(s, "\n");
+	}
+
+	free_page((unsigned long) buf_page);
+	return s-page;
+}
+
+static ssize_t get_best_energy_data(struct sys_device *dev,
+					char *page, int activate)
+{
+	int rc;
+	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+	unsigned long flags = 0;
+
+	flags = FLAGS_MODE2;
+	if (activate)
+		flags |= FLAGS_ACTIVATE;
+
+	rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags,
+				cpu_to_drc_index(dev->id),
+				0, 0, 0, 0, 0, 0, 0);
+
+	if (rc != H_SUCCESS)
+		return -EINVAL;
+
+	return sprintf(page, "%lu\n", retbuf[1] >> 32);
+}
+
+/* Wrapper functions */
+
+static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class,
+			struct sysdev_class_attribute *attr, char *page)
+{
+	return get_best_energy_list(page, 1);
+}
+
+static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class,
+			struct sysdev_class_attribute *attr, char *page)
+{
+	return get_best_energy_list(page, 0);
+}
+
+static ssize_t percpu_activate_hint_show(struct sys_device *dev,
+			struct sysdev_attribute *attr, char *page)
+{
+	return get_best_energy_data(dev, page, 1);
+}
+
+static ssize_t percpu_deactivate_hint_show(struct sys_device *dev,
+			struct sysdev_attribute *attr, char *page)
+{
+	return get_best_energy_data(dev, page, 0);
+}
+
+/*
+ * Create sysfs interface:
+ * /sys/devices/system/cpu/pseries_activate_hint_list
+ * /sys/devices/system/cpu/pseries_deactivate_hint_list
+ *	Comma separated list of cpus to activate or deactivate
+ * /sys/devices/system/cpu/cpuN/pseries_activate_hint
+ * /sys/devices/system/cpu/cpuN/pseries_deactivate_hint
+ *	Per-cpu value of the hint
+ */
+
+struct sysdev_class_attribute attr_cpu_activate_hint_list =
+		_SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444,
+		cpu_activate_hint_list_show, NULL);
+
+struct sysdev_class_attribute attr_cpu_deactivate_hint_list =
+		_SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444,
+		cpu_deactivate_hint_list_show, NULL);
+
+struct sysdev_attribute attr_percpu_activate_hint =
+		_SYSDEV_ATTR(pseries_activate_hint, 0444,
+		percpu_activate_hint_show, NULL);
+
+struct sysdev_attribute attr_percpu_deactivate_hint =
+		_SYSDEV_ATTR(pseries_deactivate_hint, 0444,
+		percpu_deactivate_hint_show, NULL);
+
+static int __init pseries_energy_init(void)
+{
+	int cpu, err;
+	struct sys_device *cpu_sys_dev;
+
+	if (!check_for_h_best_energy()) {
+		printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
+		return 0;
+	}
+	/* Create the sysfs files */
+	err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_activate_hint_list.attr);
+	if (!err)
+		err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_deactivate_hint_list.attr);
+
+	if (err)
+		return err;
+	for_each_possible_cpu(cpu) {
+		cpu_sys_dev = get_cpu_sysdev(cpu);
+		err = sysfs_create_file(&cpu_sys_dev->kobj,
+				&attr_percpu_activate_hint.attr);
+		if (err)
+			break;
+		err = sysfs_create_file(&cpu_sys_dev->kobj,
+				&attr_percpu_deactivate_hint.attr);
+		if (err)
+			break;
+	}
+
+	if (err)
+		return err;
+
+	sysfs_entries = 1; /* Removed entries on cleanup */
+	return 0;
+
+}
+
+static void __exit pseries_energy_cleanup(void)
+{
+	int cpu;
+	struct sys_device *cpu_sys_dev;
+
+	if (!sysfs_entries)
+		return;
+
+	/* Remove the sysfs files */
+	sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_activate_hint_list.attr);
+
+	sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_deactivate_hint_list.attr);
+
+	for_each_possible_cpu(cpu) {
+		cpu_sys_dev = get_cpu_sysdev(cpu);
+		sysfs_remove_file(&cpu_sys_dev->kobj,
+				&attr_percpu_activate_hint.attr);
+		sysfs_remove_file(&cpu_sys_dev->kobj,
+				&attr_percpu_deactivate_hint.attr);
+	}
+}
+
+module_init(pseries_energy_init);
+module_exit(pseries_energy_cleanup);
+MODULE_DESCRIPTION("Driver for pSeries platform energy management");
+MODULE_AUTHOR("Vaidyanathan Srinivasan");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index a4fc6da..c55d7ad 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -54,7 +54,8 @@
 static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
 static DEFINE_SPINLOCK(ras_log_buf_lock);
 
-static char mce_data_buf[RTAS_ERROR_LOG_MAX];
+static char global_mce_data_buf[RTAS_ERROR_LOG_MAX];
+static DEFINE_PER_CPU(__u64, mce_data_buf);
 
 static int ras_get_sensor_state_token;
 static int ras_check_exception_token;
@@ -196,12 +197,24 @@
 	return IRQ_HANDLED;
 }
 
-/* Get the error information for errors coming through the
+/*
+ * Some versions of FWNMI place the buffer inside the 4kB page starting at
+ * 0x7000. Other versions place it inside the rtas buffer. We check both.
+ */
+#define VALID_FWNMI_BUFFER(A) \
+	((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
+	(((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
+
+/*
+ * Get the error information for errors coming through the
  * FWNMI vectors.  The pt_regs' r3 will be updated to reflect
  * the actual r3 if possible, and a ptr to the error log entry
  * will be returned if found.
  *
- * The mce_data_buf does not have any locks or protection around it,
+ * If the RTAS error is not of the extended type, then we put it in a per
+ * cpu 64bit buffer. If it is the extended type we use global_mce_data_buf.
+ *
+ * The global_mce_data_buf does not have any locks or protection around it,
  * if a second machine check comes in, or a system reset is done
  * before we have logged the error, then we will get corruption in the
  * error log.  This is preferable over holding off on calling
@@ -210,20 +223,31 @@
  */
 static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
 {
-	unsigned long errdata = regs->gpr[3];
-	struct rtas_error_log *errhdr = NULL;
 	unsigned long *savep;
+	struct rtas_error_log *h, *errhdr = NULL;
 
-	if ((errdata >= 0x7000 && errdata < 0x7fff0) ||
-	    (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) {
-		savep = __va(errdata);
-		regs->gpr[3] = savep[0];	/* restore original r3 */
-		memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
-		memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX);
-		errhdr = (struct rtas_error_log *)mce_data_buf;
-	} else {
-		printk("FWNMI: corrupt r3\n");
+	if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
+		printk(KERN_ERR "FWNMI: corrupt r3\n");
+		return NULL;
 	}
+
+	savep = __va(regs->gpr[3]);
+	regs->gpr[3] = savep[0];	/* restore original r3 */
+
+	/* If it isn't an extended log we can use the per cpu 64bit buffer */
+	h = (struct rtas_error_log *)&savep[1];
+	if (!h->extended) {
+		memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));
+		errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);
+	} else {
+		int len;
+
+		len = max_t(int, 8+h->extended_log_length, RTAS_ERROR_LOG_MAX);
+		memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
+		memcpy(global_mce_data_buf, h, len);
+		errhdr = (struct rtas_error_log *)global_mce_data_buf;
+	}
+
 	return errhdr;
 }
 
@@ -235,7 +259,7 @@
 {
 	int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
 	if (ret != 0)
-		printk("FWNMI: nmi-interlock failed: %d\n", ret);
+		printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret);
 }
 
 int pSeries_system_reset_exception(struct pt_regs *regs)
@@ -259,31 +283,43 @@
  * Return 1 if corrected (or delivered a signal).
  * Return 0 if there is nothing we can do.
  */
-static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
+static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
 {
-	int nonfatal = 0;
+	int recovered = 0;
 
-	if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
+	if (!(regs->msr & MSR_RI)) {
+		/* If MSR_RI isn't set, we cannot recover */
+		recovered = 0;
+
+	} else if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
 		/* Platform corrected itself */
-		nonfatal = 1;
-	} else if ((regs->msr & MSR_RI) &&
-		   user_mode(regs) &&
-		   err->severity == RTAS_SEVERITY_ERROR_SYNC &&
-		   err->disposition == RTAS_DISP_NOT_RECOVERED &&
-		   err->target == RTAS_TARGET_MEMORY &&
-		   err->type == RTAS_TYPE_ECC_UNCORR &&
-		   !(current->pid == 0 || is_global_init(current))) {
-		/* Kill off a user process with an ECC error */
-		printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n",
-		       current->pid);
-		/* XXX something better for ECC error? */
-		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
-		nonfatal = 1;
+		recovered = 1;
+
+	} else if (err->disposition == RTAS_DISP_LIMITED_RECOVERY) {
+		/* Platform corrected itself but could be degraded */
+		printk(KERN_ERR "MCE: limited recovery, system may "
+		       "be degraded\n");
+		recovered = 1;
+
+	} else if (user_mode(regs) && !is_global_init(current) &&
+		   err->severity == RTAS_SEVERITY_ERROR_SYNC) {
+
+		/*
+		 * If we received a synchronous error when in userspace
+		 * kill the task. Firmware may report details of the fail
+		 * asynchronously, so we can't rely on the target and type
+		 * fields being valid here.
+		 */
+		printk(KERN_ERR "MCE: uncorrectable error, killing task "
+		       "%s:%d\n", current->comm, current->pid);
+
+		_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
+		recovered = 1;
 	}
 
-	log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
+	log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
 
-	return nonfatal;
+	return recovered;
 }
 
 /*
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index ed72098..a8ca289 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -153,7 +153,7 @@
 	.name = "power",
 };
 
-static struct platform_suspend_ops pseries_suspend_ops = {
+static const struct platform_suspend_ops pseries_suspend_ops = {
 	.valid		= suspend_valid_only_mem,
 	.begin		= pseries_suspend_begin,
 	.prepare_late	= pseries_prepare_late,
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 0bef9da..9c29734 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -41,6 +41,7 @@
 ifeq ($(CONFIG_PCI),y)
 obj-$(CONFIG_4xx)		+= ppc4xx_pci.o
 endif
+obj-$(CONFIG_PPC4xx_CPM)	+= ppc4xx_cpm.o
 obj-$(CONFIG_PPC4xx_GPIO)	+= ppc4xx_gpio.o
 
 obj-$(CONFIG_CPM)		+= cpm_common.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 17cf15e..8e9e06a7c 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -312,17 +312,10 @@
 
 static void pci_dma_bus_setup_dart(struct pci_bus *bus)
 {
-	struct device_node *dn;
-
 	if (!iommu_table_dart_inited) {
 		iommu_table_dart_inited = 1;
 		iommu_table_dart_setup();
 	}
-
-	dn = pci_bus_to_OF_node(bus);
-
-	if (dn)
-		PCI_DN(dn)->iommu_table = &iommu_table_dart;
 }
 
 static bool dart_device_on_pcie(struct device *dev)
@@ -373,7 +366,7 @@
 	if (dn == NULL) {
 		dn = of_find_compatible_node(NULL, "dart", "u4-dart");
 		if (dn == NULL)
-			goto bail;
+			return;	/* use default direct_dma_ops */
 		dart_is_u4 = 1;
 	}
 
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 44de855..e9381bf 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -53,7 +53,7 @@
 	return 1;
 }
 
-static struct platform_suspend_ops pmc_suspend_ops = {
+static const struct platform_suspend_ops pmc_suspend_ops = {
 	.valid = pmc_suspend_valid,
 	.enter = pmc_suspend_enter,
 };
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 9725369..8c6cab0 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -973,7 +973,6 @@
 	if (dsr & DOORBELL_DSR_QFI) {
 		pr_info("RIO: doorbell queue full\n");
 		out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
-		goto out;
 	}
 
 	/* XXX Need to check/dispatch until queue empty */
@@ -1556,8 +1555,6 @@
 	saved_mcheck_exception = ppc_md.machine_check_exception;
 	ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
 #endif
-	/* Ensure that RFXE is set */
-	mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
 
 	return 0;
 err:
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index c0ea05e..c48cd81 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -1,5 +1,5 @@
 /*
- * GPIOs on MPC8349/8572/8610 and compatible
+ * GPIOs on MPC512x/8349/8572/8610 and compatible
  *
  * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
  *
@@ -26,6 +26,7 @@
 #define GPIO_IER		0x0c
 #define GPIO_IMR		0x10
 #define GPIO_ICR		0x14
+#define GPIO_ICR2		0x18
 
 struct mpc8xxx_gpio_chip {
 	struct of_mm_gpio_chip mm_gc;
@@ -37,6 +38,7 @@
 	 */
 	u32 data;
 	struct irq_host *irq;
+	void *of_dev_id_data;
 };
 
 static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
@@ -215,6 +217,51 @@
 	return 0;
 }
 
+static int mpc512x_irq_set_type(unsigned int virq, unsigned int flow_type)
+{
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+	unsigned long gpio = virq_to_hw(virq);
+	void __iomem *reg;
+	unsigned int shift;
+	unsigned long flags;
+
+	if (gpio < 16) {
+		reg = mm->regs + GPIO_ICR;
+		shift = (15 - gpio) * 2;
+	} else {
+		reg = mm->regs + GPIO_ICR2;
+		shift = (15 - (gpio % 16)) * 2;
+	}
+
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_FALLING:
+	case IRQ_TYPE_LEVEL_LOW:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrsetbits_be32(reg, 3 << shift, 2 << shift);
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	case IRQ_TYPE_EDGE_RISING:
+	case IRQ_TYPE_LEVEL_HIGH:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrsetbits_be32(reg, 3 << shift, 1 << shift);
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	case IRQ_TYPE_EDGE_BOTH:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrbits32(reg, 3 << shift);
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static struct irq_chip mpc8xxx_irq_chip = {
 	.name		= "mpc8xxx-gpio",
 	.unmask		= mpc8xxx_irq_unmask,
@@ -226,6 +273,11 @@
 static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
 				irq_hw_number_t hw)
 {
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
+
+	if (mpc8xxx_gc->of_dev_id_data)
+		mpc8xxx_irq_chip.set_type = mpc8xxx_gc->of_dev_id_data;
+
 	set_irq_chip_data(virq, h->host_data);
 	set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
 	set_irq_type(virq, IRQ_TYPE_NONE);
@@ -253,11 +305,20 @@
 	.xlate	= mpc8xxx_gpio_irq_xlate,
 };
 
+static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+	{ .compatible = "fsl,mpc8349-gpio", },
+	{ .compatible = "fsl,mpc8572-gpio", },
+	{ .compatible = "fsl,mpc8610-gpio", },
+	{ .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+	{}
+};
+
 static void __init mpc8xxx_add_controller(struct device_node *np)
 {
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc;
 	struct of_mm_gpio_chip *mm_gc;
 	struct gpio_chip *gc;
+	const struct of_device_id *id;
 	unsigned hwirq;
 	int ret;
 
@@ -297,6 +358,10 @@
 	if (!mpc8xxx_gc->irq)
 		goto skip_irq;
 
+	id = of_match_node(mpc8xxx_gpio_ids, np);
+	if (id)
+		mpc8xxx_gc->of_dev_id_data = id->data;
+
 	mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
 
 	/* ack and mask all irqs */
@@ -321,13 +386,7 @@
 {
 	struct device_node *np;
 
-	for_each_compatible_node(np, NULL, "fsl,mpc8349-gpio")
-		mpc8xxx_add_controller(np);
-
-	for_each_compatible_node(np, NULL, "fsl,mpc8572-gpio")
-		mpc8xxx_add_controller(np);
-
-	for_each_compatible_node(np, NULL, "fsl,mpc8610-gpio")
+	for_each_matching_node(np, mpc8xxx_gpio_ids)
 		mpc8xxx_add_controller(np);
 
 	for_each_compatible_node(np, NULL, "fsl,qoriq-gpio")
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7c13426..b0c8469 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -674,7 +674,8 @@
 	/* make sure mask gets to controller before we return to user */
 	do {
 		if (!loops--) {
-			printk(KERN_ERR "mpic_enable_irq timeout\n");
+			printk(KERN_ERR "%s: timeout on hwirq %u\n",
+			       __func__, src);
 			break;
 		}
 	} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
@@ -695,7 +696,8 @@
 	/* make sure mask gets to controller before we return to user */
 	do {
 		if (!loops--) {
-			printk(KERN_ERR "mpic_enable_irq timeout\n");
+			printk(KERN_ERR "%s: timeout on hwirq %u\n",
+			       __func__, src);
 			break;
 		}
 	} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
new file mode 100644
index 0000000..73b86cc
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -0,0 +1,346 @@
+/*
+ * PowerPC 4xx Clock and Power Management
+ *
+ * Copyright (C) 2010, Applied Micro Circuits Corporation
+ * Victor Gallardo (vgallardo@apm.com)
+ *
+ * Based on arch/powerpc/platforms/44x/idle.c:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Copyright 2008 IBM Corp.
+ *
+ * Based on arch/powerpc/sysdev/fsl_pmc.c:
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ * Copyright 2009  MontaVista Software, Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/suspend.h>
+#include <asm/dcr.h>
+#include <asm/dcr-native.h>
+#include <asm/machdep.h>
+
+#define CPM_ER	0
+#define CPM_FR	1
+#define CPM_SR	2
+
+#define CPM_IDLE_WAIT	0
+#define CPM_IDLE_DOZE	1
+
+struct cpm {
+	dcr_host_t	dcr_host;
+	unsigned int	dcr_offset[3];
+	unsigned int	powersave_off;
+	unsigned int	unused;
+	unsigned int	idle_doze;
+	unsigned int	standby;
+	unsigned int	suspend;
+};
+
+static struct cpm cpm;
+
+struct cpm_idle_mode {
+	unsigned int enabled;
+	const char  *name;
+};
+
+static struct cpm_idle_mode idle_mode[] = {
+	[CPM_IDLE_WAIT] = { 1, "wait" }, /* default */
+	[CPM_IDLE_DOZE] = { 0, "doze" },
+};
+
+static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
+{
+	unsigned int value;
+
+	/* CPM controller supports 3 different types of sleep interface
+	 * known as class 1, 2 and 3. For class 1 units, they are
+	 * unconditionally put to sleep when the corresponding CPM bit is
+	 * set. For class 2 and 3 units this is not case; if they can be
+	 * put to to sleep, they will. Here we do not verify, we just
+	 * set them and expect them to eventually go off when they can.
+	 */
+	value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
+	dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask);
+
+	/* return old state, to restore later if needed */
+	return value;
+}
+
+static void cpm_idle_wait(void)
+{
+	unsigned long msr_save;
+
+	/* save off initial state */
+	msr_save = mfmsr();
+	/* sync required when CPM0_ER[CPU] is set */
+	mb();
+	/* set wait state MSR */
+	mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
+	isync();
+	/* return to initial state */
+	mtmsr(msr_save);
+	isync();
+}
+
+static void cpm_idle_sleep(unsigned int mask)
+{
+	unsigned int er_save;
+
+	/* update CPM_ER state */
+	er_save = cpm_set(CPM_ER, mask);
+
+	/* go to wait state so that CPM0_ER[CPU] can take effect */
+	cpm_idle_wait();
+
+	/* restore CPM_ER state */
+	dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save);
+}
+
+static void cpm_idle_doze(void)
+{
+	cpm_idle_sleep(cpm.idle_doze);
+}
+
+static void cpm_idle_config(int mode)
+{
+	int i;
+
+	if (idle_mode[mode].enabled)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(idle_mode); i++)
+		idle_mode[i].enabled = 0;
+
+	idle_mode[mode].enabled = 1;
+}
+
+static ssize_t cpm_idle_show(struct kobject *kobj,
+			     struct kobj_attribute *attr, char *buf)
+{
+	char *s = buf;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
+		if (idle_mode[i].enabled)
+			s += sprintf(s, "[%s] ", idle_mode[i].name);
+		else
+			s += sprintf(s, "%s ", idle_mode[i].name);
+	}
+
+	*(s-1) = '\n'; /* convert the last space to a newline */
+
+	return s - buf;
+}
+
+static ssize_t cpm_idle_store(struct kobject *kobj,
+			      struct kobj_attribute *attr,
+			      const char *buf, size_t n)
+{
+	int i;
+	char *p;
+	int len;
+
+	p = memchr(buf, '\n', n);
+	len = p ? p - buf : n;
+
+	for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
+		if (strncmp(buf, idle_mode[i].name, len) == 0) {
+			cpm_idle_config(i);
+			return n;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static struct kobj_attribute cpm_idle_attr =
+	__ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
+
+static void cpm_idle_config_sysfs(void)
+{
+	struct sys_device *sys_dev;
+	unsigned long ret;
+
+	sys_dev = get_cpu_sysdev(0);
+
+	ret = sysfs_create_file(&sys_dev->kobj,
+				&cpm_idle_attr.attr);
+	if (ret)
+		printk(KERN_WARNING
+		       "cpm: failed to create idle sysfs entry\n");
+}
+
+static void cpm_idle(void)
+{
+	if (idle_mode[CPM_IDLE_DOZE].enabled)
+		cpm_idle_doze();
+	else
+		cpm_idle_wait();
+}
+
+static int cpm_suspend_valid(suspend_state_t state)
+{
+	switch (state) {
+	case PM_SUSPEND_STANDBY:
+		return !!cpm.standby;
+	case PM_SUSPEND_MEM:
+		return !!cpm.suspend;
+	default:
+		return 0;
+	}
+}
+
+static void cpm_suspend_standby(unsigned int mask)
+{
+	unsigned long tcr_save;
+
+	/* disable decrement interrupt */
+	tcr_save = mfspr(SPRN_TCR);
+	mtspr(SPRN_TCR, tcr_save & ~TCR_DIE);
+
+	/* go to sleep state */
+	cpm_idle_sleep(mask);
+
+	/* restore decrement interrupt */
+	mtspr(SPRN_TCR, tcr_save);
+}
+
+static int cpm_suspend_enter(suspend_state_t state)
+{
+	switch (state) {
+	case PM_SUSPEND_STANDBY:
+		cpm_suspend_standby(cpm.standby);
+		break;
+	case PM_SUSPEND_MEM:
+		cpm_suspend_standby(cpm.suspend);
+		break;
+	}
+
+	return 0;
+}
+
+static struct platform_suspend_ops cpm_suspend_ops = {
+	.valid		= cpm_suspend_valid,
+	.enter		= cpm_suspend_enter,
+};
+
+static int cpm_get_uint_property(struct device_node *np,
+				 const char *name)
+{
+	int len;
+	const unsigned int *prop = of_get_property(np, name, &len);
+
+	if (prop == NULL || len < sizeof(u32))
+		return 0;
+
+	return *prop;
+}
+
+static int __init cpm_init(void)
+{
+	struct device_node *np;
+	int dcr_base, dcr_len;
+	int ret = 0;
+
+	if (!cpm.powersave_off) {
+		cpm_idle_config(CPM_IDLE_WAIT);
+		ppc_md.power_save = &cpm_idle;
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "ibm,cpm");
+	if (!np) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dcr_base = dcr_resource_start(np, 0);
+	dcr_len = dcr_resource_len(np, 0);
+
+	if (dcr_base == 0 || dcr_len == 0) {
+		printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
+		       np->full_name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
+
+	if (!DCR_MAP_OK(cpm.dcr_host)) {
+		printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
+		       np->full_name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* All 4xx SoCs with a CPM controller have one of two
+	 * different order for the CPM registers. Some have the
+	 * CPM registers in the following order (ER,FR,SR). The
+	 * others have them in the following order (SR,ER,FR).
+	 */
+
+	if (cpm_get_uint_property(np, "er-offset") == 0) {
+		cpm.dcr_offset[CPM_ER] = 0;
+		cpm.dcr_offset[CPM_FR] = 1;
+		cpm.dcr_offset[CPM_SR] = 2;
+	} else {
+		cpm.dcr_offset[CPM_ER] = 1;
+		cpm.dcr_offset[CPM_FR] = 2;
+		cpm.dcr_offset[CPM_SR] = 0;
+	}
+
+	/* Now let's see what IPs to turn off for the following modes */
+
+	cpm.unused = cpm_get_uint_property(np, "unused-units");
+	cpm.idle_doze = cpm_get_uint_property(np, "idle-doze");
+	cpm.standby = cpm_get_uint_property(np, "standby");
+	cpm.suspend = cpm_get_uint_property(np, "suspend");
+
+	/* If some IPs are unused let's turn them off now */
+
+	if (cpm.unused) {
+		cpm_set(CPM_ER, cpm.unused);
+		cpm_set(CPM_FR, cpm.unused);
+	}
+
+	/* Now let's export interfaces */
+
+	if (!cpm.powersave_off && cpm.idle_doze)
+		cpm_idle_config_sysfs();
+
+	if (cpm.standby || cpm.suspend)
+		suspend_set_ops(&cpm_suspend_ops);
+out:
+	if (np)
+		of_node_put(np);
+	return ret;
+}
+
+late_initcall(cpm_init);
+
+static int __init cpm_powersave_off(char *arg)
+{
+	cpm.powersave_off = 1;
+	return 0;
+}
+__setup("powersave=off", cpm_powersave_off);
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index c2d675b..ee05680 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -84,8 +84,8 @@
 		memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
 
 		ret = of_address_to_resource(np, 0, &r[0]);
-		DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
-			__func__,r[0].name, r[0].start, r[0].end);
+		DBG("%s: name:start->end = %s:%pR\n",
+		    __func__, r[0].name, &r[0]);
 		if (ret)
 			goto err;
 
@@ -93,8 +93,8 @@
 		r[1].start = irq_of_parse_and_map(np, 0);
 		r[1].end = irq_of_parse_and_map(np, 0);
 		r[1].flags = IORESOURCE_IRQ;
-		DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
-			__func__,r[1].name, r[1].start, r[1].end);
+		DBG("%s: name:start->end = %s:%pR\n",
+			__func__, r[1].name, &r[1]);
 
 		tsi_eth_dev =
 		    platform_device_register_simple("tsi-ethernet", i++, &r[0],
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ff19efd..636bcb8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -406,7 +406,7 @@
 	  If unsure, say Y.
 
 config CHSC_SCH
-	def_tristate y
+	def_tristate m
 	prompt "Support for CHSC subchannels"
 	help
 	  This driver allows usage of CHSC subchannels. A CHSC subchannel
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 0851eb1..2751b3a 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -133,11 +133,12 @@
 	unsigned long output_addr;
 	unsigned char *output;
 
-	check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
+	output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
+	check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
 	memset(&_bss, 0, &_ebss - &_bss);
 	free_mem_ptr = (unsigned long)&_end;
 	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-	output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL);
+	output = (unsigned char *) output_addr;
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	/*
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index f42dbab..48884f8 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -38,6 +38,7 @@
 		BUG_ON(ret != bsize);
 		data += bsize - index;
 		len -= bsize - index;
+		index = 0;
 	}
 
 	/* process as many blocks as possible */
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index d796971..29c82c6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -5,10 +5,21 @@
 CONFIG_RCU_TRACE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -19,7 +30,9 @@
 CONFIG_PREEMPT=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
 CONFIG_BINFMT_MISC=m
+CONFIG_CMM=m
 CONFIG_HZ_100=y
 CONFIG_KEXEC=y
 CONFIG_PM=y
@@ -105,6 +118,7 @@
 CONFIG_DEBUG_NOTIFIERS=y
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
 CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
 CONFIG_LATENCYTOP=y
 CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 76daea1..5c5ba10 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -36,14 +36,19 @@
 
 static inline int atomic_read(const atomic_t *v)
 {
-	barrier();
-	return v->counter;
+	int c;
+
+	asm volatile(
+		"	l	%0,%1\n"
+		: "=d" (c) : "Q" (v->counter));
+	return c;
 }
 
 static inline void atomic_set(atomic_t *v, int i)
 {
-	v->counter = i;
-	barrier();
+	asm volatile(
+		"	st	%1,%0\n"
+		: "=Q" (v->counter) : "d" (i));
 }
 
 static inline int atomic_add_return(int i, atomic_t *v)
@@ -128,14 +133,19 @@
 
 static inline long long atomic64_read(const atomic64_t *v)
 {
-	barrier();
-	return v->counter;
+	long long c;
+
+	asm volatile(
+		"	lg	%0,%1\n"
+		: "=d" (c) : "Q" (v->counter));
+	return c;
 }
 
 static inline void atomic64_set(atomic64_t *v, long long i)
 {
-	v->counter = i;
-	barrier();
+	asm volatile(
+		"	stg	%1,%0\n"
+		: "=Q" (v->counter) : "d" (i));
 }
 
 static inline long long atomic64_add_return(long long i, atomic64_t *v)
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 24aafa6..2a30d5a 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -13,6 +13,7 @@
 
 #define L1_CACHE_BYTES     256
 #define L1_CACHE_SHIFT     8
+#define NET_SKB_PAD	   32
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 405cc97..7e1f776 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -1,29 +1,8 @@
 #ifndef _S390_CACHEFLUSH_H
 #define _S390_CACHEFLUSH_H
 
-/* Keep includes the same across arches.  */
-#include <linux/mm.h>
-
 /* Caches aren't brain-dead on the s390. */
-#define flush_cache_all()			do { } while (0)
-#define flush_cache_mm(mm)			do { } while (0)
-#define flush_cache_dup_mm(mm)			do { } while (0)
-#define flush_cache_range(vma, start, end)	do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)			do { } while (0)
-#define flush_dcache_mmap_lock(mapping)		do { } while (0)
-#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
-#define flush_icache_range(start, end)		do { } while (0)
-#define flush_icache_page(vma,pg)		do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
-#define flush_cache_vmap(start, end)		do { } while (0)
-#define flush_cache_vunmap(start, end)		do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index a875c2f..da359ca 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -169,7 +169,7 @@
 
 static inline int is_compat_task(void)
 {
-	return test_thread_flag(TIF_31BIT);
+	return is_32bit_task();
 }
 
 #else
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 354d426..10c029c 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -161,7 +161,9 @@
    use of this is to invoke "./ld.so someprog" to test out a new version of
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
-#define ELF_ET_DYN_BASE		(STACK_TOP / 3 * 2)
+
+extern unsigned long randomize_et_dyn(unsigned long base);
+#define ELF_ET_DYN_BASE		(randomize_et_dyn(STACK_TOP / 3 * 2))
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
@@ -206,6 +208,8 @@
 	current->mm->context.noexec == 0;		\
 })
 
+#define STACK_RND_MASK	0x7ffUL
+
 #define ARCH_DLINFO							    \
 do {									    \
 	if (vdso_enabled)						    \
@@ -218,4 +222,7 @@
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 int arch_setup_additional_pages(struct linux_binprm *, int);
 
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+#define arch_randomize_brk arch_randomize_brk
+
 #endif
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bf3de04..2c79b64 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -148,11 +148,6 @@
  */
 extern unsigned long thread_saved_pc(struct task_struct *t);
 
-/*
- * Print register of task into buffer. Used in fs/proc/array.c.
- */
-extern void task_show_regs(struct seq_file *m, struct task_struct *task);
-
 extern void show_code(struct pt_regs *regs);
 
 unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 6710b0e..8f8d759 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -449,7 +449,7 @@
 extern void (*_machine_halt)(void);
 extern void (*_machine_power_off)(void);
 
-#define arch_align_stack(x) (x)
+extern unsigned long arch_align_stack(unsigned long sp);
 
 static inline int tprot(unsigned long addr)
 {
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ebc7709..ad1382f 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -118,6 +118,12 @@
 #define _TIF_SINGLE_STEP	(1<<TIF_FREEZE)
 #define _TIF_FREEZE		(1<<TIF_FREEZE)
 
+#ifdef CONFIG_64BIT
+#define is_32bit_task()		(test_thread_flag(TIF_31BIT))
+#else
+#define is_32bit_task()		(1)
+#endif
+
 #endif /* __KERNEL__ */
 
 #define PREEMPT_ACTIVE		0x4000000
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index f1f644f..9074a54 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,6 +22,7 @@
  */
 
 #include <linux/mm.h>
+#include <linux/pagemap.h>
 #include <linux/swap.h>
 #include <asm/processor.h>
 #include <asm/pgalloc.h>
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6ba4222..a895e69 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -30,9 +30,11 @@
 #include <linux/tick.h>
 #include <linux/elfcore.h>
 #include <linux/kernel_stat.h>
+#include <linux/personality.h>
 #include <linux/syscalls.h>
 #include <linux/compat.h>
 #include <linux/kprobes.h>
+#include <linux/random.h>
 #include <asm/compat.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -332,3 +334,39 @@
 	}
 	return 0;
 }
+
+unsigned long arch_align_stack(unsigned long sp)
+{
+	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+		sp -= get_random_int() & ~PAGE_MASK;
+	return sp & ~0xf;
+}
+
+static inline unsigned long brk_rnd(void)
+{
+	/* 8MB for 32bit, 1GB for 64bit */
+	if (is_32bit_task())
+		return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+	else
+		return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+	unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+
+	if (ret < mm->brk)
+		return mm->brk;
+	return ret;
+}
+
+unsigned long randomize_et_dyn(unsigned long base)
+{
+	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+
+	if (!(current->flags & PF_RANDOMIZE))
+		return base;
+	if (ret < base)
+		return base;
+	return ret;
+}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 5eb78dd..b5a4a73 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -237,43 +237,6 @@
 	show_last_breaking_event(regs);
 }
 
-/* This is called from fs/proc/array.c */
-void task_show_regs(struct seq_file *m, struct task_struct *task)
-{
-	struct pt_regs *regs;
-
-	regs = task_pt_regs(task);
-	seq_printf(m, "task: %p, ksp: %p\n",
-		       task, (void *)task->thread.ksp);
-	seq_printf(m, "User PSW : %p %p\n",
-		       (void *) regs->psw.mask, (void *)regs->psw.addr);
-
-	seq_printf(m, "User GPRS: " FOURLONG,
-			  regs->gprs[0], regs->gprs[1],
-			  regs->gprs[2], regs->gprs[3]);
-	seq_printf(m, "           " FOURLONG,
-			  regs->gprs[4], regs->gprs[5],
-			  regs->gprs[6], regs->gprs[7]);
-	seq_printf(m, "           " FOURLONG,
-			  regs->gprs[8], regs->gprs[9],
-			  regs->gprs[10], regs->gprs[11]);
-	seq_printf(m, "           " FOURLONG,
-			  regs->gprs[12], regs->gprs[13],
-			  regs->gprs[14], regs->gprs[15]);
-	seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
-			  task->thread.acrs[0], task->thread.acrs[1],
-			  task->thread.acrs[2], task->thread.acrs[3]);
-	seq_printf(m, "           %08x %08x %08x %08x\n",
-			  task->thread.acrs[4], task->thread.acrs[5],
-			  task->thread.acrs[6], task->thread.acrs[7]);
-	seq_printf(m, "           %08x %08x %08x %08x\n",
-			  task->thread.acrs[8], task->thread.acrs[9],
-			  task->thread.acrs[10], task->thread.acrs[11]);
-	seq_printf(m, "           %08x %08x %08x %08x\n",
-			  task->thread.acrs[12], task->thread.acrs[13],
-			  task->thread.acrs[14], task->thread.acrs[15]);
-}
-
 static DEFINE_SPINLOCK(die_lock);
 
 void die(const char * str, struct pt_regs * regs, long err)
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index e3150dd..f438d74 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -203,7 +203,6 @@
 	if (!uses_interp)
 		return 0;
 
-	vdso_base = mm->mmap_base;
 #ifdef CONFIG_64BIT
 	vdso_pagelist = vdso64_pagelist;
 	vdso_pages = vdso64_pages;
@@ -233,8 +232,7 @@
 	 * fail and end up putting it elsewhere.
 	 */
 	down_write(&mm->mmap_sem);
-	vdso_base = get_unmapped_area(NULL, vdso_base,
-				      vdso_pages << PAGE_SHIFT, 0, 0);
+	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
 	if (IS_ERR_VALUE(vdso_base)) {
 		rc = vdso_base;
 		goto out_up;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 985d825..bade533 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -164,24 +164,18 @@
 	return r;
 }
 
-struct kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-	struct kvm *kvm;
 	int rc;
 	char debug_name[16];
 
 	rc = s390_enable_sie();
 	if (rc)
-		goto out_nokvm;
-
-	rc = -ENOMEM;
-	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-	if (!kvm)
-		goto out_nokvm;
+		goto out_err;
 
 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
 	if (!kvm->arch.sca)
-		goto out_nosca;
+		goto out_err;
 
 	sprintf(debug_name, "kvm-%u", current->pid);
 
@@ -195,13 +189,11 @@
 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
 	VM_EVENT(kvm, 3, "%s", "vm created");
 
-	return kvm;
+	return 0;
 out_nodbf:
 	free_page((unsigned long)(kvm->arch.sca));
-out_nosca:
-	kfree(kvm);
-out_nokvm:
-	return ERR_PTR(rc);
+out_err:
+	return rc;
 }
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -240,11 +232,8 @@
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 	kvm_free_vcpus(kvm);
-	kvm_free_physmem(kvm);
 	free_page((unsigned long)(kvm->arch.sca));
 	debug_unregister(kvm->arch.dbf);
-	cleanup_srcu_struct(&kvm->srcu);
-	kfree(kvm);
 }
 
 /* Section: vcpu related */
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 07deaee..a6c4f7e 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -125,9 +125,9 @@
 	unsigned long tmp1;
 
 	asm volatile(
+		"   sacf  256\n"
 		"  "AHI"  %0,-1\n"
 		"   jo    5f\n"
-		"   sacf  256\n"
 		"   bras  %3,3f\n"
 		"0:"AHI"  %0,257\n"
 		"1: mvc   0(1,%1),0(%2)\n"
@@ -142,9 +142,8 @@
 		"3:"AHI"  %0,-256\n"
 		"   jnm   2b\n"
 		"4: ex    %0,1b-0b(%3)\n"
-		"   sacf  0\n"
 		"5: "SLR"  %0,%0\n"
-		"6:\n"
+		"6: sacf  0\n"
 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
 		: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
 		: : "cc", "memory");
@@ -156,9 +155,9 @@
 	unsigned long tmp1, tmp2;
 
 	asm volatile(
+		"   sacf  256\n"
 		"  "AHI"  %0,-1\n"
 		"   jo    5f\n"
-		"   sacf  256\n"
 		"   bras  %3,3f\n"
 		"   xc    0(1,%1),0(%1)\n"
 		"0:"AHI"  %0,257\n"
@@ -178,9 +177,8 @@
 		"3:"AHI"  %0,-256\n"
 		"   jnm   2b\n"
 		"4: ex    %0,0(%3)\n"
-		"   sacf  0\n"
 		"5: "SLR"  %0,%0\n"
-		"6:\n"
+		"6: sacf  0\n"
 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
 		: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
 		: : "cc", "memory");
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 869efba..c9a9f7f 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -27,17 +27,44 @@
 #include <linux/personality.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/random.h>
 #include <asm/pgalloc.h>
 #include <asm/compat.h>
 
+static unsigned long stack_maxrandom_size(void)
+{
+	if (!(current->flags & PF_RANDOMIZE))
+		return 0;
+	if (current->personality & ADDR_NO_RANDOMIZE)
+		return 0;
+	return STACK_RND_MASK << PAGE_SHIFT;
+}
+
 /*
  * Top of mmap area (just below the process stack).
  *
- * Leave an at least ~128 MB hole.
+ * Leave at least a ~32 MB hole.
  */
-#define MIN_GAP (128*1024*1024)
+#define MIN_GAP (32*1024*1024)
 #define MAX_GAP (STACK_TOP/6*5)
 
+static inline int mmap_is_legacy(void)
+{
+	if (current->personality & ADDR_COMPAT_LAYOUT)
+		return 1;
+	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+		return 1;
+	return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_rnd(void)
+{
+	if (!(current->flags & PF_RANDOMIZE))
+		return 0;
+	/* 8MB randomization for mmap_base */
+	return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+}
+
 static inline unsigned long mmap_base(void)
 {
 	unsigned long gap = rlimit(RLIMIT_STACK);
@@ -46,22 +73,8 @@
 		gap = MIN_GAP;
 	else if (gap > MAX_GAP)
 		gap = MAX_GAP;
-
-	return STACK_TOP - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
-#ifdef CONFIG_64BIT
-	/*
-	 * Force standard allocation for 64 bit programs.
-	 */
-	if (!is_compat_task())
-		return 1;
-#endif
-	return sysctl_legacy_va_layout ||
-	    (current->personality & ADDR_COMPAT_LAYOUT) ||
-	    rlimit(RLIMIT_STACK) == RLIM_INFINITY;
+	gap &= PAGE_MASK;
+	return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
 }
 
 #ifndef CONFIG_64BIT
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 0c719c6..e1850c2 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -336,7 +336,8 @@
 	page->flags ^= bits;
 	if (page->flags & FRAG_MASK) {
 		/* Page now has some free pgtable fragments. */
-		list_move(&page->lru, &mm->context.pgtable_list);
+		if (!list_empty(&page->lru))
+			list_move(&page->lru, &mm->context.pgtable_list);
 		page = NULL;
 	} else
 		/* All fragments of the 4K page have been freed. */
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 4293fdcb..27b2295 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -1,5 +1,9 @@
 menu "Machine selection"
 
+config SCORE
+       def_bool y
+       select HAVE_GENERIC_HARDIRQS
+
 choice
 	prompt "System type"
 	default MACH_SPCT6600
@@ -53,9 +57,6 @@
 config SCHED_NO_NO_OMIT_FRAME_POINTER
 	def_bool y
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config GENERIC_SYSCALL_TABLE
 	def_bool y
 
@@ -68,9 +69,6 @@
 config 32BIT
 	def_bool y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
 config ARCH_FLATMEM_ENABLE
 	def_bool y
 
diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig
index 9883c50..df1edbf 100644
--- a/arch/score/configs/spct6600_defconfig
+++ b/arch/score/configs/spct6600_defconfig
@@ -9,7 +9,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index fff2522..8a9011d 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,6 +1,6 @@
 config SUPERH
 	def_bool y
-	select EMBEDDED
+	select EXPERT
 	select CLKDEV_LOOKUP
 	select HAVE_IDE if HAS_IOPORT
 	select HAVE_MEMBLOCK
@@ -15,6 +15,7 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
+	select HAVE_KERNEL_XZ
 	select HAVE_KERNEL_LZO
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 9c8c6e1..e3d8170 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -200,7 +200,7 @@
 libs-$(CONFIG_SUPERH32)		:= arch/sh/lib/	$(libs-y)
 libs-$(CONFIG_SUPERH64)		:= arch/sh/lib64/ $(libs-y)
 
-BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.lzo \
+BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
 	       uImage.srec uImage.bin zImage vmlinux.bin vmlinux.srec \
 	       romImage
 PHONY += $(BOOT_TARGETS)
@@ -230,5 +230,6 @@
 	@echo '* uImage.gz	           - Kernel-only image for U-Boot (gzip)'
 	@echo '  uImage.bz2	           - Kernel-only image for U-Boot (bzip2)'
 	@echo '  uImage.lzma	           - Kernel-only image for U-Boot (lzma)'
+	@echo '  uImage.xz	           - Kernel-only image for U-Boot (xz)'
 	@echo '  uImage.lzo	           - Kernel-only image for U-Boot (lzo)'
 endef
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 2018c7e..d893411 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -3,6 +3,9 @@
 config SOLUTION_ENGINE
 	bool
 
+config SH_ALPHA_BOARD
+	bool
+
 config SH_SOLUTION_ENGINE
 	bool "SolutionEngine"
 	select SOLUTION_ENGINE
@@ -320,6 +323,21 @@
 	  Compact Flash socket, two serial ports and PC-104 bus.
 	  More information at <http://sh2000.sh-linux.org>.
 
+config SH_APSH4A3A
+	bool "AP-SH4A-3A"
+	select SH_ALPHA_BOARD
+	depends on CPU_SUBTYPE_SH7785
+	help
+	  Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A.
+
+config SH_APSH4AD0A
+	bool "AP-SH4AD-0A"
+	select SH_ALPHA_BOARD
+	select SYS_SUPPORTS_PCI
+	depends on CPU_SUBTYPE_SH7786
+	help
+	  Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A.
+
 endmenu
 
 source "arch/sh/boards/mach-r2d/Kconfig"
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index be7d11d..975a0f6 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -13,3 +13,5 @@
 obj-$(CONFIG_SH_POLARIS)	+= board-polaris.o
 obj-$(CONFIG_SH_TITAN)		+= board-titan.o
 obj-$(CONFIG_SH_SH7757LCR)	+= board-sh7757lcr.o
+obj-$(CONFIG_SH_APSH4A3A)	+= board-apsh4a3a.o
+obj-$(CONFIG_SH_APSH4AD0A)	+= board-apsh4ad0a.o
diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c
new file mode 100644
index 0000000..8e2a270
--- /dev/null
+++ b/arch/sh/boards/board-apsh4a3a.c
@@ -0,0 +1,175 @@
+/*
+ * ALPHAPROJECT AP-SH4A-3A Support.
+ *
+ * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
+ * Copyright (C) 2008  Yoshihiro Shimoda
+ * Copyright (C) 2009  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mtd/physmap.h>
+#include <linux/smsc911x.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <asm/machvec.h>
+#include <asm/sizes.h>
+#include <asm/clock.h>
+
+static struct mtd_partition nor_flash_partitions[] = {
+	{
+		.name		= "loader",
+		.offset		= 0x00000000,
+		.size		= 512 * 1024,
+	},
+	{
+		.name		= "bootenv",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 512 * 1024,
+	},
+	{
+		.name		= "kernel",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 4 * 1024 * 1024,
+	},
+	{
+		.name		= "data",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= MTDPART_SIZ_FULL,
+	},
+};
+
+static struct physmap_flash_data nor_flash_data = {
+	.width		= 4,
+	.parts		= nor_flash_partitions,
+	.nr_parts	= ARRAY_SIZE(nor_flash_partitions),
+};
+
+static struct resource nor_flash_resources[] = {
+	[0]	= {
+		.start	= 0x00000000,
+		.end	= 0x01000000 - 1,
+		.flags	= IORESOURCE_MEM,
+	}
+};
+
+static struct platform_device nor_flash_device = {
+	.name		= "physmap-flash",
+	.dev		= {
+		.platform_data	= &nor_flash_data,
+	},
+	.num_resources	= ARRAY_SIZE(nor_flash_resources),
+	.resource	= nor_flash_resources,
+};
+
+static struct resource smsc911x_resources[] = {
+	[0] = {
+		.name		= "smsc911x-memory",
+		.start		= 0xA4000000,
+		.end		= 0xA4000000 + SZ_256 - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	[1] = {
+		.name		= "smsc911x-irq",
+		.start		= evt2irq(0x200),
+		.end		= evt2irq(0x200),
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct smsc911x_platform_config smsc911x_config = {
+	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type	= SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+	.flags		= SMSC911X_USE_16BIT,
+	.phy_interface	= PHY_INTERFACE_MODE_MII,
+};
+
+static struct platform_device smsc911x_device = {
+	.name		= "smsc911x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smsc911x_resources),
+	.resource	= smsc911x_resources,
+	.dev = {
+		.platform_data = &smsc911x_config,
+	},
+};
+
+static struct platform_device *apsh4a3a_devices[] __initdata = {
+	&nor_flash_device,
+	&smsc911x_device,
+};
+
+static int __init apsh4a3a_devices_setup(void)
+{
+	return platform_add_devices(apsh4a3a_devices,
+				    ARRAY_SIZE(apsh4a3a_devices));
+}
+device_initcall(apsh4a3a_devices_setup);
+
+static int apsh4a3a_clk_init(void)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = clk_get(NULL, "extal");
+	if (!clk || IS_ERR(clk))
+		return PTR_ERR(clk);
+	ret = clk_set_rate(clk, 33333000);
+	clk_put(clk);
+
+	return ret;
+}
+
+/* Initialize the board */
+static void __init apsh4a3a_setup(char **cmdline_p)
+{
+	printk(KERN_INFO "Alpha Project AP-SH4A-3A support:\n");
+}
+
+static void __init apsh4a3a_init_irq(void)
+{
+	plat_irq_setup_pins(IRQ_MODE_IRQ7654);
+}
+
+/* Return the board specific boot mode pin configuration */
+static int apsh4a3a_mode_pins(void)
+{
+	int value = 0;
+
+	/* These are the factory default settings of SW1 and SW2.
+	 * If you change these dip switches then you will need to
+	 * adjust the values below as well.
+	 */
+	value &= ~MODE_PIN0;  /* Clock Mode 16 */
+	value &= ~MODE_PIN1;
+	value &= ~MODE_PIN2;
+	value &= ~MODE_PIN3;
+	value |=  MODE_PIN4;
+	value &= ~MODE_PIN5;  /* 16-bit Area0 bus width */
+	value |=  MODE_PIN6;  /* Area 0 SRAM interface */
+	value |=  MODE_PIN7;
+	value |=  MODE_PIN8;  /* Little Endian */
+	value |=  MODE_PIN9;  /* Master Mode */
+	value |=  MODE_PIN10; /* Crystal resonator */
+	value |=  MODE_PIN11; /* Display Unit */
+	value |=  MODE_PIN12;
+	value &= ~MODE_PIN13; /* 29-bit address mode */
+	value |=  MODE_PIN14; /* No PLL step-up */
+
+	return value;
+}
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_apsh4a3a __initmv = {
+	.mv_name		= "AP-SH4A-3A",
+	.mv_setup		= apsh4a3a_setup,
+	.mv_clk_init		= apsh4a3a_clk_init,
+	.mv_init_irq		= apsh4a3a_init_irq,
+	.mv_mode_pins		= apsh4a3a_mode_pins,
+};
diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c
new file mode 100644
index 0000000..e2bd218
--- /dev/null
+++ b/arch/sh/boards/board-apsh4ad0a.c
@@ -0,0 +1,125 @@
+/*
+ * ALPHAPROJECT AP-SH4AD-0A Support.
+ *
+ * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
+ * Copyright (C) 2010  Matt Fleming
+ * Copyright (C) 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/smsc911x.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <asm/machvec.h>
+#include <asm/sizes.h>
+
+static struct resource smsc911x_resources[] = {
+	[0] = {
+		.name		= "smsc911x-memory",
+		.start		= 0xA4000000,
+		.end		= 0xA4000000 + SZ_256 - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	[1] = {
+		.name		= "smsc911x-irq",
+		.start		= evt2irq(0x200),
+		.end		= evt2irq(0x200),
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct smsc911x_platform_config smsc911x_config = {
+	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type	= SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+	.flags		= SMSC911X_USE_16BIT,
+	.phy_interface	= PHY_INTERFACE_MODE_MII,
+};
+
+static struct platform_device smsc911x_device = {
+	.name		= "smsc911x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smsc911x_resources),
+	.resource	= smsc911x_resources,
+	.dev = {
+		.platform_data = &smsc911x_config,
+	},
+};
+
+static struct platform_device *apsh4ad0a_devices[] __initdata = {
+	&smsc911x_device,
+};
+
+static int __init apsh4ad0a_devices_setup(void)
+{
+	return platform_add_devices(apsh4ad0a_devices,
+				    ARRAY_SIZE(apsh4ad0a_devices));
+}
+device_initcall(apsh4ad0a_devices_setup);
+
+static int apsh4ad0a_mode_pins(void)
+{
+	int value = 0;
+
+	/* These are the factory default settings of SW1 and SW2.
+	 * If you change these dip switches then you will need to
+	 * adjust the values below as well.
+	 */
+	value |=  MODE_PIN0;  /* Clock Mode 3 */
+	value |=  MODE_PIN1;
+	value &= ~MODE_PIN2;
+	value &= ~MODE_PIN3;
+	value &= ~MODE_PIN4;  /* 16-bit Area0 bus width  */
+	value |=  MODE_PIN5;
+	value |=  MODE_PIN6;
+	value |=  MODE_PIN7;  /* Normal mode */
+	value |=  MODE_PIN8;  /* Little Endian */
+	value |=  MODE_PIN9;  /* Crystal resonator */
+	value &= ~MODE_PIN10; /* 29-bit address mode */
+	value &= ~MODE_PIN11; /* PCI-E Root port */
+	value &= ~MODE_PIN12; /* 4 lane + 1 lane */
+	value |=  MODE_PIN13; /* AUD Enable */
+	value &= ~MODE_PIN14; /* Normal Operation */
+
+	return value;
+}
+
+static int apsh4ad0a_clk_init(void)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = clk_get(NULL, "extal");
+	if (!clk || IS_ERR(clk))
+		return PTR_ERR(clk);
+	ret = clk_set_rate(clk, 33333000);
+	clk_put(clk);
+
+	return ret;
+}
+
+/* Initialize the board */
+static void __init apsh4ad0a_setup(char **cmdline_p)
+{
+	pr_info("Alpha Project AP-SH4AD-0A support:\n");
+}
+
+static void __init apsh4ad0a_init_irq(void)
+{
+	plat_irq_setup_pins(IRQ_MODE_IRQ3210);
+}
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_apsh4ad0a __initmv = {
+	.mv_name		= "AP-SH4AD-0A",
+	.mv_setup		= apsh4ad0a_setup,
+	.mv_mode_pins		= apsh4ad0a_mode_pins,
+	.mv_clk_init		= apsh4ad0a_clk_init,
+	.mv_init_irq		= apsh4ad0a_init_irq,
+};
diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c
index 4cb3bb7..541d8a2 100644
--- a/arch/sh/boards/board-edosk7705.c
+++ b/arch/sh/boards/board-edosk7705.c
@@ -66,7 +66,7 @@
 	return platform_add_devices(edosk7705_devices,
 				    ARRAY_SIZE(edosk7705_devices));
 }
-__initcall(init_edosk7705_devices);
+device_initcall(init_edosk7705_devices);
 
 /*
  * The Machine Vector
diff --git a/arch/sh/boards/board-edosk7760.c b/arch/sh/boards/board-edosk7760.c
index 35dc099..f47ac82 100644
--- a/arch/sh/boards/board-edosk7760.c
+++ b/arch/sh/boards/board-edosk7760.c
@@ -182,7 +182,7 @@
 	return platform_add_devices(edosk7760_devices,
 				    ARRAY_SIZE(edosk7760_devices));
 }
-__initcall(init_edosk7760_devices);
+device_initcall(init_edosk7760_devices);
 
 /*
  * The Machine Vector
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c
index fe7e686..ee65ff0 100644
--- a/arch/sh/boards/board-sh7785lcr.c
+++ b/arch/sh/boards/board-sh7785lcr.c
@@ -284,7 +284,7 @@
 	return platform_add_devices(sh7785lcr_devices,
 				    ARRAY_SIZE(sh7785lcr_devices));
 }
-__initcall(sh7785lcr_devices_setup);
+device_initcall(sh7785lcr_devices_setup);
 
 /* Initialize IRQ setting */
 void __init init_sh7785lcr_IRQ(void)
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 07ea908..3e5fc3b 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -14,6 +14,8 @@
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/sh_flctl.h>
 #include <linux/delay.h>
@@ -430,11 +432,18 @@
 	},
 };
 
+static struct sh_mobile_sdhi_info sdhi0_cn3_data = {
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
+};
+
 static struct platform_device sdhi0_cn3_device = {
 	.name		= "sh_mobile_sdhi",
 	.id             = 0, /* "sdhi0" clock */
 	.num_resources	= ARRAY_SIZE(sdhi0_cn3_resources),
 	.resource	= sdhi0_cn3_resources,
+	.dev = {
+		.platform_data = &sdhi0_cn3_data,
+	},
 	.archdata = {
 		.hwblk_id = HWBLK_SDHI0,
 	},
@@ -453,11 +462,18 @@
 	},
 };
 
+static struct sh_mobile_sdhi_info sdhi1_cn7_data = {
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
+};
+
 static struct platform_device sdhi1_cn7_device = {
 	.name		= "sh_mobile_sdhi",
 	.id             = 1, /* "sdhi1" clock */
 	.num_resources	= ARRAY_SIZE(sdhi1_cn7_resources),
 	.resource	= sdhi1_cn7_resources,
+	.dev = {
+		.platform_data = &sdhi1_cn7_data,
+	},
 	.archdata = {
 		.hwblk_id = HWBLK_SDHI1,
 	},
diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c
index 7e8216a..e89e8e1 100644
--- a/arch/sh/boards/mach-cayman/setup.c
+++ b/arch/sh/boards/mach-cayman/setup.c
@@ -165,7 +165,7 @@
 
 	return 0;
 }
-__initcall(smsc_superio_setup);
+device_initcall(smsc_superio_setup);
 
 static void __iomem *cayman_ioport_map(unsigned long port, unsigned int len)
 {
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index f48c492..701667a 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -473,6 +473,7 @@
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
 	.set_pwr	= sdhi0_set_pwr,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
 };
 
 static struct resource sdhi0_resources[] = {
@@ -511,6 +512,7 @@
 static struct sh_mobile_sdhi_info sdhi1_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI1_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI1_RX,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
 	.set_pwr	= sdhi1_set_pwr,
 };
 
@@ -1292,6 +1294,7 @@
 	i2c_register_board_info(1, i2c1_devices,
 				ARRAY_SIZE(i2c1_devices));
 
+#if defined(CONFIG_VIDEO_SH_VOU) || defined(CONFIG_VIDEO_SH_VOU_MODULE)
 	/* VOU */
 	gpio_request(GPIO_FN_DV_D15, NULL);
 	gpio_request(GPIO_FN_DV_D14, NULL);
@@ -1323,6 +1326,7 @@
 
 	/* Remove reset */
 	gpio_set_value(GPIO_PTG4, 1);
+#endif
 
 	return platform_add_devices(ecovec_devices,
 				    ARRAY_SIZE(ecovec_devices));
diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
index 4499a37..adc9b4b 100644
--- a/arch/sh/boards/mach-hp6xx/pm.c
+++ b/arch/sh/boards/mach-hp6xx/pm.c
@@ -143,7 +143,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops hp6x0_pm_ops = {
+static const struct platform_suspend_ops hp6x0_pm_ops = {
 	.enter		= hp6x0_pm_enter,
 	.valid		= suspend_valid_only_mem,
 };
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 9b60eaa..7504daa 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -11,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
 #include <linux/mfd/tmio.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/onenand.h>
@@ -366,6 +367,7 @@
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
 	.tmio_flags	= TMIO_MMC_WRPROTECT_DISABLE,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
 };
 
 static struct platform_device kfr2r09_sh_sdhi0_device = {
diff --git a/arch/sh/boards/mach-landisk/irq.c b/arch/sh/boards/mach-landisk/irq.c
index e79412a..c00ace3 100644
--- a/arch/sh/boards/mach-landisk/irq.c
+++ b/arch/sh/boards/mach-landisk/irq.c
@@ -1,9 +1,10 @@
 /*
- * arch/sh/boards/landisk/irq.c
+ * arch/sh/boards/mach-landisk/irq.c
  *
  * I-O DATA Device, Inc. LANDISK Support
  *
  * Copyright (C) 2005-2007 kogiidena
+ * Copyright (C) 2011 Nobuhiro Iwamatsu
  *
  * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
  * Based largely on io_se.c.
@@ -12,44 +13,54 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  */
+
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <mach-landisk/mach/iodata_landisk.h>
 
-static void disable_landisk_irq(struct irq_data *data)
-{
-	unsigned char mask = 0xff ^ (0x01 << (data->irq - 5));
+enum {
+	UNUSED = 0,
 
-	__raw_writeb(__raw_readb(PA_IMASK) & mask, PA_IMASK);
-}
-
-static void enable_landisk_irq(struct irq_data *data)
-{
-	unsigned char value = (0x01 << (data->irq - 5));
-
-	__raw_writeb(__raw_readb(PA_IMASK) | value, PA_IMASK);
-}
-
-static struct irq_chip landisk_irq_chip __read_mostly = {
-	.name		= "LANDISK",
-	.irq_mask	= disable_landisk_irq,
-	.irq_unmask	= enable_landisk_irq,
+	PCI_INTA, /* PCI int A */
+	PCI_INTB, /* PCI int B */
+	PCI_INTC, /* PCI int C */
+	PCI_INTD, /* PCI int D */
+	ATA,	  /* ATA */
+	FATA,	  /* CF */
+	POWER,	  /* Power swtich */
+	BUTTON,	  /* Button swtich */
 };
 
+/* Vectors for LANDISK */
+static struct intc_vect vectors_landisk[] __initdata = {
+	INTC_IRQ(PCI_INTA, IRQ_PCIINTA),
+	INTC_IRQ(PCI_INTB, IRQ_PCIINTB),
+	INTC_IRQ(PCI_INTC, IRQ_PCIINTC),
+	INTC_IRQ(PCI_INTD, IRQ_PCIINTD),
+	INTC_IRQ(ATA, IRQ_ATA),
+	INTC_IRQ(FATA, IRQ_FATA),
+	INTC_IRQ(POWER, IRQ_POWER),
+	INTC_IRQ(BUTTON, IRQ_BUTTON),
+};
+
+/* IRLMSK mask register layout for LANDISK */
+static struct intc_mask_reg mask_registers_landisk[] __initdata = {
+	{ PA_IMASK, 0, 8, /* IRLMSK */
+	  {  BUTTON, POWER, FATA, ATA,
+	     PCI_INTD, PCI_INTC, PCI_INTB, PCI_INTA,
+	  }
+	},
+};
+
+static DECLARE_INTC_DESC(intc_desc_landisk, "landisk", vectors_landisk, NULL,
+			mask_registers_landisk, NULL, NULL);
 /*
  * Initialize IRQ setting
  */
 void __init init_landisk_IRQ(void)
 {
-	int i;
-
-	for (i = 5; i < 14; i++) {
-		disable_irq_nosync(i);
-		set_irq_chip_and_handler_name(i, &landisk_irq_chip,
-					      handle_level_irq, "level");
-		enable_landisk_irq(irq_get_irq_data(i));
-	}
+	register_intc_controller(&intc_desc_landisk);
 	__raw_writeb(0x00, PA_PWRINT_CLR);
 }
diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c
index 50337acc..94186cf 100644
--- a/arch/sh/boards/mach-landisk/setup.c
+++ b/arch/sh/boards/mach-landisk/setup.c
@@ -21,8 +21,6 @@
 #include <mach-landisk/mach/iodata_landisk.h>
 #include <asm/io.h>
 
-void init_landisk_IRQ(void);
-
 static void landisk_power_off(void)
 {
         __raw_writeb(0x01, PA_SHUTDOWN);
@@ -83,7 +81,7 @@
 				    ARRAY_SIZE(landisk_devices));
 }
 
-__initcall(landisk_devices_setup);
+device_initcall(landisk_devices_setup);
 
 static void __init landisk_setup(char **cmdline_p)
 {
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index c8acfec..03a7ffe7 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -13,6 +13,7 @@
 #include <linux/input.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/nand.h>
 #include <linux/i2c.h>
@@ -410,6 +411,7 @@
 static struct sh_mobile_sdhi_info sh7724_sdhi_data = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
 };
 
 static struct platform_device sdhi_cn9_device = {
diff --git a/arch/sh/boards/mach-r2d/setup.c b/arch/sh/boards/mach-r2d/setup.c
index b84df6a..4b98a52 100644
--- a/arch/sh/boards/mach-r2d/setup.c
+++ b/arch/sh/boards/mach-r2d/setup.c
@@ -258,7 +258,7 @@
 	return platform_add_devices(rts7751r2d_devices,
 				    ARRAY_SIZE(rts7751r2d_devices));
 }
-__initcall(rts7751r2d_devices_setup);
+device_initcall(rts7751r2d_devices_setup);
 
 static void rts7751r2d_power_off(void)
 {
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
index 75e4ddb..1521aa7 100644
--- a/arch/sh/boards/mach-sdk7786/setup.c
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -15,13 +15,13 @@
 #include <linux/i2c.h>
 #include <linux/irq.h>
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <mach/fpga.h>
 #include <mach/irq.h>
 #include <asm/machvec.h>
 #include <asm/heartbeat.h>
 #include <asm/sizes.h>
 #include <asm/clock.h>
-#include <asm/clkdev.h>
 #include <asm/reboot.h>
 #include <asm/smp-ops.h>
 
@@ -135,7 +135,7 @@
 
 	return sdk7786_i2c_setup();
 }
-__initcall(sdk7786_devices_setup);
+device_initcall(sdk7786_devices_setup);
 
 static int sdk7786_mode_pins(void)
 {
diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c
index 33039e0..8ab8330 100644
--- a/arch/sh/boards/mach-se/7206/setup.c
+++ b/arch/sh/boards/mach-se/7206/setup.c
@@ -77,7 +77,7 @@
 {
 	return platform_add_devices(se7206_devices, ARRAY_SIZE(se7206_devices));
 }
-__initcall(se7206_devices_setup);
+device_initcall(se7206_devices_setup);
 
 static int se7206_mode_pins(void)
 {
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 527a0cd..5276793 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -15,6 +15,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
 #include <linux/mtd/physmap.h>
 #include <linux/delay.h>
 #include <linux/smc91x.h>
@@ -318,6 +319,10 @@
 	},
 };
 
+static struct platform_device fsi_ak4642_device = {
+	.name		= "sh_fsi_a_ak4642",
+};
+
 /* KEYSC in SoC (Needs SW33-2 set to ON) */
 static struct sh_keysc_info keysc_info = {
 	.mode = SH_KEYSC_MODE_1,
@@ -467,6 +472,7 @@
 static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
 };
 
 static struct platform_device sdhi0_cn7_device = {
@@ -498,6 +504,7 @@
 static struct sh_mobile_sdhi_info sh7724_sdhi1_data = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI1_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI1_RX,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
 };
 
 static struct platform_device sdhi1_cn8_device = {
@@ -590,6 +597,7 @@
 	&sh7724_usb0_host_device,
 	&sh7724_usb1_gadget_device,
 	&fsi_device,
+	&fsi_ak4642_device,
 	&sdhi0_cn7_device,
 	&sdhi1_cn8_device,
 	&irda_device,
diff --git a/arch/sh/boards/mach-se/7751/setup.c b/arch/sh/boards/mach-se/7751/setup.c
index 9fbc51b..4ed60c5 100644
--- a/arch/sh/boards/mach-se/7751/setup.c
+++ b/arch/sh/boards/mach-se/7751/setup.c
@@ -48,7 +48,7 @@
 {
 	return platform_add_devices(se7751_devices, ARRAY_SIZE(se7751_devices));
 }
-__initcall(se7751_devices_setup);
+device_initcall(se7751_devices_setup);
 
 /*
  * The Machine Vector
diff --git a/arch/sh/boards/mach-sh03/rtc.c b/arch/sh/boards/mach-sh03/rtc.c
index 1b20099..f83ac79 100644
--- a/arch/sh/boards/mach-sh03/rtc.c
+++ b/arch/sh/boards/mach-sh03/rtc.c
@@ -108,7 +108,7 @@
 		__raw_writeb(real_minutes % 10, RTC_MIN1);
 		__raw_writeb(real_minutes / 10, RTC_MIN10);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c
index af4a0c0..d4f79b2 100644
--- a/arch/sh/boards/mach-sh03/setup.c
+++ b/arch/sh/boards/mach-sh03/setup.c
@@ -96,7 +96,7 @@
 
 	return platform_add_devices(sh03_devices, ARRAY_SIZE(sh03_devices));
 }
-__initcall(sh03_devices_setup);
+device_initcall(sh03_devices_setup);
 
 static struct sh_machine_vector mv_sh03 __initmv = {
 	.mv_name		= "Interface (CTP/PCI-SH03)",
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index 1ce6362..ba515d8 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -24,12 +24,13 @@
 suffix-$(CONFIG_KERNEL_GZIP)	:= gz
 suffix-$(CONFIG_KERNEL_BZIP2)	:= bz2
 suffix-$(CONFIG_KERNEL_LZMA)	:= lzma
+suffix-$(CONFIG_KERNEL_XZ)	:= xz
 suffix-$(CONFIG_KERNEL_LZO)	:= lzo
 
 targets := zImage vmlinux.srec romImage uImage uImage.srec uImage.gz \
-	   uImage.bz2 uImage.lzma uImage.lzo uImage.bin
+	   uImage.bz2 uImage.lzma uImage.xz uImage.lzo uImage.bin
 extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
-	   vmlinux.bin.lzo
+	   vmlinux.bin.xz vmlinux.bin.lzo
 subdir- := compressed romimage
 
 $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
@@ -76,6 +77,9 @@
 $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,lzma)
 
+$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,xzkern)
+
 $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,lzo)
 
@@ -88,6 +92,9 @@
 $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
 	$(call if_changed,uimage,lzma)
 
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+	$(call if_changed,uimage,xz)
+
 $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
 	$(call if_changed,uimage,lzo)
 
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index cfa5a08..e0b0293 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -6,7 +6,7 @@
 
 targets		:= vmlinux vmlinux.bin vmlinux.bin.gz \
 		   vmlinux.bin.bz2 vmlinux.bin.lzma \
-		   vmlinux.bin.lzo \
+		   vmlinux.bin.xz vmlinux.bin.lzo \
 		   head_$(BITS).o misc.o piggy.o
 
 OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
@@ -50,6 +50,8 @@
 	$(call if_changed,bzip2)
 $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzma)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
+	$(call if_changed,xzkern)
 $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzo)
 
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index 27140a6..95470a4 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -61,6 +61,10 @@
 #include "../../../../lib/decompress_unlzma.c"
 #endif
 
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
 #ifdef CONFIG_KERNEL_LZO
 #include "../../../../lib/decompress_unlzo.c"
 #endif
diff --git a/arch/sh/boot/romimage/mmcif-sh7724.c b/arch/sh/boot/romimage/mmcif-sh7724.c
index 14863d7..c84e783 100644
--- a/arch/sh/boot/romimage/mmcif-sh7724.c
+++ b/arch/sh/boot/romimage/mmcif-sh7724.c
@@ -21,9 +21,6 @@
 #define HIZCRC		0xa405015c
 #define DRVCRA		0xa405018a
 
-enum { MMCIF_PROGRESS_ENTER, MMCIF_PROGRESS_INIT,
-       MMCIF_PROGRESS_LOAD, MMCIF_PROGRESS_DONE };
-
 /* SH7724 specific MMCIF loader
  *
  * loads the romImage from an MMC card starting from block 512
@@ -63,7 +60,9 @@
 	mmcif_update_progress(MMCIF_PROGRESS_LOAD);
 
 	/* load kernel via MMCIF interface */
-	sh_mmcif_boot_slurp(MMCIF_BASE, buf, no_bytes);
+	sh_mmcif_boot_do_read(MMCIF_BASE, 512,
+	                      (no_bytes + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS,
+			      buf);
 
 	/* disable clock to the MMCIF hardware block */
 	__raw_writel(__raw_readl(MSTPCR2) | 0x20000000, MSTPCR2);
diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig
new file mode 100644
index 0000000..6cb3279
--- /dev/null
+++ b/arch/sh/configs/apsh4a3a_defconfig
@@ -0,0 +1,102 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_CPU_SUBTYPE_SH7785=y
+CONFIG_MEMORY_START=0x0C000000
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_SH_STORE_QUEUES=y
+CONFIG_SH_APSH4A3A=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_KEXEC=y
+CONFIG_PREEMPT=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=6
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_SH7785FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_CIFS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_FTRACE is not set
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
new file mode 100644
index 0000000..e71a531
--- /dev/null
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -0,0 +1,133 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_RCU_TRACE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_BLK_CGROUP=y
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_CPU_SUBTYPE_SH7786=y
+CONFIG_MEMORY_SIZE=0x10000000
+CONFIG_HUGETLB_PAGE_SIZE_1MB=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_SH_STORE_QUEUES=y
+CONFIG_SH_APSH4AD0A=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_SH_CPU_FREQ=y
+CONFIG_KEXEC=y
+CONFIG_SECCOMP=y
+CONFIG_PREEMPT=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_VERBOSE=y
+CONFIG_PM_RUNTIME=y
+CONFIG_CPU_IDLE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=6
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FB_SH7785FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_CIFS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DWARF_UNWINDER=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig
index 273f3fa..5f7f667 100644
--- a/arch/sh/configs/sh7757lcr_defconfig
+++ b/arch/sh/configs/sh7757lcr_defconfig
@@ -39,21 +39,15 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=y
 CONFIG_VITESSE_PHY=y
-CONFIG_MDIO_BITBANG=y
 CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
+CONFIG_SH_ETH=y
 # CONFIG_NETDEV_10000 is not set
 # CONFIG_WLAN is not set
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_MOUSE_PS2 is not set
 # CONFIG_SERIO is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=3
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
@@ -63,7 +57,6 @@
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_ISO9660_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_PROC_KCORE=y
@@ -76,10 +69,8 @@
 CONFIG_NLS_CODEPAGE_932=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/drivers/pci/fixups-landisk.c b/arch/sh/drivers/pci/fixups-landisk.c
index bb1a6bb..95c6e2d 100644
--- a/arch/sh/drivers/pci/fixups-landisk.c
+++ b/arch/sh/drivers/pci/fixups-landisk.c
@@ -1,9 +1,10 @@
 /*
- * arch/sh/drivers/pci/ops-landisk.c
+ * arch/sh/drivers/pci/fixups-landisk.c
  *
  * PCI initialization for the I-O DATA Device, Inc. LANDISK board
  *
  * Copyright (C) 2006 kogiidena
+ * Copyright (C) 2010 Nobuhiro Iwamatsu
  *
  * May be copied or modified under the terms of the GNU General Public
  * License.  See linux/COPYING for more information.
@@ -15,6 +16,9 @@
 #include <linux/pci.h>
 #include "pci-sh4.h"
 
+#define PCIMCR_MRSET_OFF	0xBFFFFFFF
+#define PCIMCR_RFSH_OFF		0xFFFFFFFB
+
 int pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
 {
 	/*
@@ -26,9 +30,29 @@
 	int irq = ((slot + pin - 1) & 0x3) + 5;
 
 	if ((slot | (pin - 1)) > 0x3) {
-		printk("PCI: Bad IRQ mapping request for slot %d pin %c\n",
+		printk(KERN_WARNING "PCI: Bad IRQ mapping request for slot %d pin %c\n",
 		       slot, pin - 1 + 'A');
 		return -1;
 	}
 	return irq;
 }
+
+int pci_fixup_pcic(struct pci_channel *chan)
+{
+	unsigned long bcr1, mcr;
+
+	bcr1 = __raw_readl(SH7751_BCR1);
+	bcr1 |= 0x40080000;	/* Enable Bit 19 BREQEN, set PCIC to slave */
+	pci_write_reg(chan, bcr1, SH4_PCIBCR1);
+
+	mcr = __raw_readl(SH7751_MCR);
+	mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
+	pci_write_reg(chan, mcr, SH4_PCIMCR);
+
+	pci_write_reg(chan, 0x0c000000, SH7751_PCICONF5);
+	pci_write_reg(chan, 0xd0000000, SH7751_PCICONF6);
+	pci_write_reg(chan, 0x0c000000, SH4_PCILAR0);
+	pci_write_reg(chan, 0x00000000, SH4_PCILAR1);
+
+	return 0;
+}
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 89ab2c5..28c5aa5 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -11,11 +11,6 @@
  *
  * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
  * automatically, there are also __raw versions, which do not.
- *
- * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
- * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
- * these have the same semantics as the __raw variants, and as such, all
- * new code should be using the __raw versions.
  */
 #include <linux/errno.h>
 #include <asm/cache.h>
@@ -231,52 +226,6 @@
 
 #endif
 
-/*
- * Legacy SuperH on-chip I/O functions
- *
- * These are all deprecated, all new (and especially cross-platform) code
- * should be using the __raw_xxx() routines directly.
- */
-static inline u8 __deprecated ctrl_inb(unsigned long addr)
-{
-	return __raw_readb(addr);
-}
-
-static inline u16 __deprecated ctrl_inw(unsigned long addr)
-{
-	return __raw_readw(addr);
-}
-
-static inline u32 __deprecated ctrl_inl(unsigned long addr)
-{
-	return __raw_readl(addr);
-}
-
-static inline u64 __deprecated ctrl_inq(unsigned long addr)
-{
-	return __raw_readq(addr);
-}
-
-static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
-{
-	__raw_writeb(v, addr);
-}
-
-static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
-{
-	__raw_writew(v, addr);
-}
-
-static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
-{
-	__raw_writel(v, addr);
-}
-
-static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
-{
-	__raw_writeq(v, addr);
-}
-
 #define IO_SPACE_LIMIT 0xffffffff
 
 /* synco on SH-4A, otherwise a nop */
@@ -341,7 +290,15 @@
 	 * mapping must be done by the PMB or by using page tables.
 	 */
 	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
-		if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
+		u64 flags = pgprot_val(prot);
+
+		/*
+		 * Anything using the legacy PTEA space attributes needs
+		 * to be kicked down to page table mappings.
+		 */
+		if (unlikely(flags & _PAGE_PCC_MASK))
+			return NULL;
+		if (unlikely(flags & _PAGE_CACHABLE))
 			return (void __iomem *)P1SEGADDR(offset);
 
 		return (void __iomem *)P2SEGADDR(offset);
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h
index dd5d6e5..57c5c3d 100644
--- a/arch/sh/include/asm/machvec.h
+++ b/arch/sh/include/asm/machvec.h
@@ -31,6 +31,7 @@
 	int (*mv_mode_pins)(void);
 
 	void (*mv_mem_init)(void);
+	void (*mv_mem_reserve)(void);
 };
 
 extern struct sh_machine_vector sh_mv;
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 083ea06..db85916 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -134,6 +134,7 @@
 extern void pgtable_cache_init(void);
 
 struct vm_area_struct;
+struct mm_struct;
 
 extern void __update_cache(struct vm_area_struct *vma,
 			   unsigned long address, pte_t pte);
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 43528ec..b799fe7 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -76,6 +76,10 @@
 /* Wrapper for extended mode pgprot twiddling */
 #define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
 
+#ifdef CONFIG_X2TLB
+#define _PAGE_PCC_MASK	0x00000000	/* No legacy PTEA support */
+#else
+
 /* software: moves to PTEA.TC (Timing Control) */
 #define _PAGE_PCC_AREA5	0x00000000	/* use BSC registers for area5 */
 #define _PAGE_PCC_AREA6	0x80000000	/* use BSC registers for area6 */
@@ -89,7 +93,8 @@
 #define _PAGE_PCC_ATR8	0x60000000	/* Attribute Memory space, 8 bit bus */
 #define _PAGE_PCC_ATR16	0x60000001	/* Attribute Memory space, 6 bit bus */
 
-#ifndef CONFIG_X2TLB
+#define _PAGE_PCC_MASK	0xe0000001
+
 /* copy the ptea attributes */
 static inline unsigned long copy_ptea_attributes(unsigned long x)
 {
@@ -231,13 +236,7 @@
 					   _PAGE_EXT_KERN_EXEC))
 
 #define PAGE_KERNEL_PCC(slot, type) \
-			__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
-				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
-				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-					   _PAGE_EXT_KERN_WRITE | \
-					   _PAGE_EXT_KERN_EXEC) \
-				 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
-				 (type))
+			__pgprot(0)
 
 #elif defined(CONFIG_MMU) /* SH-X TLB */
 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index c9e7cbc..9c7bdfc 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -35,7 +35,7 @@
 	CPU_SH7723, CPU_SH7724, CPU_SH7757, CPU_SHX3,
 
 	/* SH4AL-DSP types */
-	CPU_SH7343, CPU_SH7722, CPU_SH7366,
+	CPU_SH7343, CPU_SH7722, CPU_SH7366, CPU_SH7372,
 
 	/* SH-5 types */
         CPU_SH5_101, CPU_SH5_103,
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index e3c73cd..900f8d7 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -194,15 +194,17 @@
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
 
 #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
+
 #define PREFETCH_STRIDE		L1_CACHE_BYTES
 #define ARCH_HAS_PREFETCH
 #define ARCH_HAS_PREFETCHW
-static inline void prefetch(void *x)
+
+static inline void prefetch(const void *x)
 {
 	__builtin_prefetch(x, 0, 3);
 }
 
-static inline void prefetchw(void *x)
+static inline void prefetchw(const void *x)
 {
 	__builtin_prefetch(x, 1, 3);
 }
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index a78701d..4a53500 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -3,7 +3,7 @@
 
 #include <asm-generic/sections.h>
 
-extern void __nosave_begin, __nosave_end;
+extern long __nosave_begin, __nosave_end;
 extern long __machvec_start, __machvec_end;
 extern char __uncached_start, __uncached_end;
 extern char _ebss[];
diff --git a/arch/sh/include/mach-landisk/mach/iodata_landisk.h b/arch/sh/include/mach-landisk/mach/iodata_landisk.h
index 6fb04ab..f432773 100644
--- a/arch/sh/include/mach-landisk/mach/iodata_landisk.h
+++ b/arch/sh/include/mach-landisk/mach/iodata_landisk.h
@@ -2,7 +2,7 @@
 #define __ASM_SH_IODATA_LANDISK_H
 
 /*
- * linux/include/asm-sh/landisk/iodata_landisk.h
+ * arch/sh/include/mach-landisk/mach/iodata_landisk.h
  *
  * Copyright (C) 2000  Atom Create Engineering Co., Ltd.
  *
@@ -27,7 +27,7 @@
 
 #define IRQ_PCIINTA	5		/* PCI INTA IRQ */
 #define IRQ_PCIINTB	6		/* PCI INTB IRQ */
-#define IRQ_PCIINDC	7		/* PCI INTC IRQ */
+#define IRQ_PCIINTC	7		/* PCI INTC IRQ */
 #define IRQ_PCIINTD	8		/* PCI INTD IRQ */
 #define IRQ_ATA		9		/* ATA IRQ */
 #define IRQ_FATA	10		/* FATA IRQ */
@@ -35,6 +35,8 @@
 #define IRQ_BUTTON	12		/* USL-5P Button IRQ */
 #define IRQ_FAULT	13		/* USL-5P Fault  IRQ */
 
+void init_landisk_IRQ(void);
+
 #define __IO_PREFIX landisk
 #include <asm/io_generic.h>
 
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
index e80a936..f47be87 100644
--- a/arch/sh/kernel/cpu/proc.c
+++ b/arch/sh/kernel/cpu/proc.c
@@ -25,7 +25,7 @@
 	[CPU_SH5_101]	= "SH5-101",	[CPU_SH5_103]	= "SH5-103",
 	[CPU_MXG]	= "MX-G",	[CPU_SH7723]	= "SH7723",
 	[CPU_SH7366]	= "SH7366",	[CPU_SH7724]	= "SH7724",
-	[CPU_SH_NONE]	= "Unknown"
+	[CPU_SH7372]	= "SH7372",	[CPU_SH_NONE]	= "Unknown"
 };
 
 const char *get_cpu_subtype(struct sh_cpuinfo *c)
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index c363851..0f8befc 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -62,6 +62,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xf8400000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 88, 88, 88, 88 },
 };
@@ -77,6 +79,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xf8410000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 92, 92, 92, 92 },
 };
@@ -92,6 +96,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xf8420000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 96, 96, 96, 96 },
 };
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index 6c96ea0..949bf2b 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -201,6 +201,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xff804000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 220, 220, 220, 220 },
 };
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index d08bf4c..9df558d 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -180,6 +180,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfffe8000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 180, 180, 180, 180 }
 };
@@ -195,6 +197,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xfffe8800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 184, 184, 184, 184 }
 };
@@ -210,6 +214,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfffe9000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 188, 188, 188, 188 }
 };
@@ -225,6 +231,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfffe9800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 192, 192, 192, 192 }
 };
@@ -240,6 +248,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xfffea000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 196, 196, 196, 196 }
 };
@@ -255,6 +265,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xfffea800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 200, 200, 200, 200 }
 };
@@ -270,6 +282,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xfffeb000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 204, 204, 204, 204 }
 };
@@ -285,6 +299,8 @@
 static struct plat_sci_port scif7_platform_data = {
 	.mapbase	= 0xfffeb800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 208, 208, 208, 208 }
 };
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index 832f401..a43124e 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -176,6 +176,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfffe8000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		=  { 192, 192, 192, 192 },
 };
@@ -191,6 +193,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xfffe8800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		=  { 196, 196, 196, 196 },
 };
@@ -206,6 +210,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfffe9000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		=  { 200, 200, 200, 200 },
 };
@@ -221,6 +227,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfffe9800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		=  { 204, 204, 204, 204 },
 };
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index dc47b04..5d14f84 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -136,6 +136,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfffe8000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 240, 240, 240, 240 },
 };
@@ -151,6 +153,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xfffe8800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 244, 244, 244, 244 },
 };
@@ -166,6 +170,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfffe9000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 248, 248, 248, 248 },
 };
@@ -181,6 +187,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfffe9800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 252, 252, 252, 252 },
 };
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index baadd7f..cd2e702 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -70,6 +70,9 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xa4410000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TIE | SCSCR_RIE  | SCSCR_TE |
+			  SCSCR_RE  | SCSCR_CKE1 | SCSCR_CKE0,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { 56, 56, 56 },
 };
@@ -85,6 +88,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xa4400000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TIE | SCSCR_RIE | SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 52, 52 },
 };
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 3cf8c8e..4551ad6 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -109,6 +109,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfffffe80,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCI,
 	.irqs		= { 23, 23, 23, 0 },
 };
@@ -126,6 +128,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xa4000150,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 56, 56, 56, 56 },
 };
@@ -143,6 +147,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xa4000140,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_IRDA,
 	.irqs		= { 52, 52, 52, 52 },
 };
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index b0c2fb4..78f6b01 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -99,6 +99,9 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xa4400000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE | SCSCR_REIE |
+			  SCSCR_CKE1 | SCSCR_CKE0,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 52, 52, 52 },
 };
@@ -114,6 +117,9 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xa4410000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE | SCSCR_REIE |
+			  SCSCR_CKE1 | SCSCR_CKE0,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs           = { 56, 56, 56, 56 },
 };
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index 24b1713..365b94a6 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -1,5 +1,5 @@
 /*
- * SH7720 Setup
+ * Setup code for SH7720, SH7721.
  *
  *  Copyright (C) 2007  Markus Brunner, Mark Jonas
  *  Copyright (C) 2009  Paul Mundt
@@ -51,6 +51,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xa4430000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { 80, 80, 80, 80 },
 };
@@ -66,6 +68,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xa4438000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index b93458f..971cf0f 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -151,8 +151,14 @@
 			boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
 			break;
 		case 0x10:
+		case 0x11:
 			boot_cpu_data.type = CPU_SH7757;
 			break;
+		case 0xd0:
+		case 0x40: /* yon-ten-go */
+			boot_cpu_data.type = CPU_SH7372;
+			break;
+
 		}
 		break;
 	case 0x4000:	/* 1st cut */
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index e916b18..5b28331 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -18,6 +18,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffe80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 41, 43, 42 },
 };
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 911d196..e53b4b3 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -14,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/sh_timer.h>
 #include <linux/serial_sci.h>
+#include <generated/machtypes.h>
 
 static struct resource rtc_resources[] = {
 	[0] = {
@@ -35,33 +36,37 @@
 	.resource	= rtc_resources,
 };
 
-static struct plat_sci_port scif0_platform_data = {
+static struct plat_sci_port sci_platform_data = {
 	.mapbase	= 0xffe00000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCI,
 	.irqs		= { 23, 23, 23, 0 },
 };
 
-static struct platform_device scif0_device = {
+static struct platform_device sci_device = {
 	.name		= "sh-sci",
 	.id		= 0,
 	.dev		= {
-		.platform_data	= &scif0_platform_data,
+		.platform_data	= &sci_platform_data,
 	},
 };
 
-static struct plat_sci_port scif1_platform_data = {
+static struct plat_sci_port scif_platform_data = {
 	.mapbase	= 0xffe80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
 
-static struct platform_device scif1_device = {
+static struct platform_device scif_device = {
 	.name		= "sh-sci",
 	.id		= 1,
 	.dev		= {
-		.platform_data	= &scif1_platform_data,
+		.platform_data	= &scif_platform_data,
 	},
 };
 
@@ -210,8 +215,6 @@
 #endif
 
 static struct platform_device *sh7750_devices[] __initdata = {
-	&scif0_device,
-	&scif1_device,
 	&rtc_device,
 	&tmu0_device,
 	&tmu1_device,
@@ -226,14 +229,19 @@
 
 static int __init sh7750_devices_setup(void)
 {
+	if (mach_is_rts7751r2d()) {
+		platform_device_register(&scif_device);
+	} else {
+		platform_device_register(&sci_device);
+		platform_device_register(&scif_device);
+	}
+
 	return platform_add_devices(sh7750_devices,
 				    ARRAY_SIZE(sh7750_devices));
 }
 arch_initcall(sh7750_devices_setup);
 
 static struct platform_device *sh7750_early_devices[] __initdata = {
-	&scif0_device,
-	&scif1_device,
 	&tmu0_device,
 	&tmu1_device,
 	&tmu2_device,
@@ -247,6 +255,19 @@
 
 void __init plat_early_device_setup(void)
 {
+	struct platform_device *dev[1];
+
+	if (mach_is_rts7751r2d()) {
+		scif_platform_data.scscr |= SCSCR_CKE1;
+		dev[0] = &scif_device;
+		early_platform_add_devices(dev, 1);
+	} else {
+		dev[0] = &sci_device;
+		early_platform_add_devices(dev, 1);
+		dev[0] = &scif_device;
+		early_platform_add_devices(dev, 1);
+	}
+
 	early_platform_add_devices(sh7750_early_devices,
 				   ARRAY_SIZE(sh7750_early_devices));
 }
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 48ea8fe..78bbf23 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -129,6 +129,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfe600000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 53, 55, 54 },
 };
@@ -145,6 +147,8 @@
 	.mapbase	= 0xfe610000,
 	.flags		= UPF_BOOT_AUTOCONF,
 	.type		= PORT_SCIF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.irqs		= { 72, 73, 75, 74 },
 };
 
@@ -159,6 +163,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfe620000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 76, 77, 79, 78 },
 };
@@ -174,6 +180,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfe480000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCI,
 	.irqs		= { 80, 81, 82, 0 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 3681cafd..1b88483 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -19,6 +19,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase        = 0xffe00000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 80, 80, 80, 80 },
 };
@@ -34,6 +36,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase        = 0xffe10000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
@@ -49,6 +53,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase        = 0xffe20000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 82, 82, 82, 82 },
 };
@@ -64,6 +70,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase        = 0xffe30000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 83, 83, 83, 83 },
 };
@@ -360,6 +368,8 @@
 
 enum {
 	UNUSED = 0,
+	ENABLED,
+	DISABLED,
 
 	/* interrupt sources */
 	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -375,15 +385,13 @@
 	I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
 	I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
 	SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI,
-	IRDA,
-	SDHI0, SDHI1, SDHI2, SDHI3,
-	CMT, TSIF, SIU,
+	IRDA, SDHI, CMT, TSIF, SIU,
 	TMU0, TMU1, TMU2,
 	JPU, LCDC,
 
 	/* interrupt groups */
 
-	DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, SDHI, USB,
+	DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB,
 };
 
 static struct intc_vect vectors[] __initdata = {
@@ -412,8 +420,8 @@
 	INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
 	INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20),
 	INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60),
-	INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
-	INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+	INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+	INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
 	INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
 	INTC_VECT(SIU, 0xf80),
 	INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -431,7 +439,6 @@
 	INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
 	INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
 	INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI),
-	INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
 	INTC_GROUP(USB, USBI0, USBI1),
 };
 
@@ -452,7 +459,7 @@
 	  { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
+	  { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USBI1, USBI0 } },
 	{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -488,9 +495,13 @@
 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
 };
 
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7343", vectors, groups,
-			     mask_registers, prio_registers, sense_registers,
-			     ack_registers);
+static struct intc_desc intc_desc __initdata = {
+	.name = "sh7343",
+	.force_enable = ENABLED,
+	.force_disable = DISABLED,
+	.hw = INTC_HW_DESC(vectors, groups, mask_registers,
+			   prio_registers, sense_registers, ack_registers),
+};
 
 void __init plat_irq_setup(void)
 {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 8dab9e1..82616af 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -21,6 +21,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffe00000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 80, 80, 80, 80 },
 };
@@ -319,6 +321,8 @@
 
 enum {
 	UNUSED=0,
+	ENABLED,
+	DISABLED,
 
 	/* interrupt sources */
 	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -332,14 +336,13 @@
 	DENC, MSIOF,
 	FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
 	I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
-	SDHI0, SDHI1, SDHI2, SDHI3,
-	CMT, TSIF, SIU,
+	SDHI, CMT, TSIF, SIU,
 	TMU0, TMU1, TMU2,
 	VEU2, LCDC,
 
 	/* interrupt groups */
 
-	DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI,
+	DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C,
 };
 
 static struct intc_vect vectors[] __initdata = {
@@ -364,8 +367,8 @@
 	INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
 	INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
 	INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
-	INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
-	INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+	INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+	INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
 	INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
 	INTC_VECT(SIU, 0xf80),
 	INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -381,7 +384,6 @@
 	INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
 		   FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
 	INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
-	INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
 };
 
 static struct intc_mask_reg mask_registers[] __initdata = {
@@ -403,7 +405,7 @@
 	  { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
+	  { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USB, } },
 	{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -441,9 +443,13 @@
 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
 };
 
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7366", vectors, groups,
-			     mask_registers, prio_registers, sense_registers,
-			     ack_registers);
+static struct intc_desc intc_desc __initdata = {
+	.name = "sh7366",
+	.force_enable = ENABLED,
+	.force_disable = DISABLED,
+	.hw = INTC_HW_DESC(vectors, groups, mask_registers,
+			   prio_registers, sense_registers, ack_registers),
+};
 
 void __init plat_irq_setup(void)
 {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index d551ed8..5813d80 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -181,6 +181,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase        = 0xffe00000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 80, 80, 80, 80 },
 };
@@ -196,6 +198,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase        = 0xffe10000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
@@ -211,6 +215,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase        = 0xffe20000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 82, 82, 82, 82 },
 };
@@ -699,7 +705,7 @@
 	  { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { DISABLED, DISABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
+	  { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
 	{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 0eadefd..0723822 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -24,6 +24,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase        = 0xffe00000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 80, 80, 80, 80 },
 };
@@ -39,6 +41,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase        = 0xffe10000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
@@ -54,6 +58,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase        = 0xffe20000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 82, 82, 82, 82 },
 };
@@ -69,6 +75,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase        = 0xa4e30000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 56, 56, 56, 56 },
 };
@@ -84,6 +92,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase        = 0xa4e40000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 88, 88, 88, 88 },
 };
@@ -99,6 +109,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase        = 0xa4e50000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 109, 109, 109, 109 },
 };
@@ -719,7 +731,7 @@
 static struct intc_mask_reg mask_registers[] __initdata = {
 	{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
 	  { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
-	    0, DISABLED, ENABLED, ENABLED } },
+	    0, ENABLED, ENABLED, ENABLED } },
 	{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
 	  { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
 	{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
@@ -736,7 +748,7 @@
 	  { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { 0, DISABLED, ENABLED, ENABLED,
+	  { 0, ENABLED, ENABLED, ENABLED,
 	    0, 0, SCIFA_SCIFA2, SIU_SIUI } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 828c965..0333fe9 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -257,6 +257,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase        = 0xffe00000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 80, 80, 80, 80 },
 };
@@ -272,6 +274,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase        = 0xffe10000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
@@ -287,6 +291,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase        = 0xffe20000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 82, 82, 82, 82 },
 };
@@ -302,6 +308,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase        = 0xa4e30000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 56, 56, 56, 56 },
 };
@@ -317,6 +325,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase        = 0xa4e40000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 88, 88, 88, 88 },
 };
@@ -332,6 +342,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase        = 0xa4e50000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 109, 109, 109, 109 },
 };
@@ -1144,7 +1156,7 @@
 static struct intc_mask_reg mask_registers[] __initdata = {
 	{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
 	  { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
-	    0, DISABLED, ENABLED, ENABLED } },
+	    0, ENABLED, ENABLED, ENABLED } },
 	{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
 	  { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
 	    DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
@@ -1166,7 +1178,7 @@
 	  { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
 	    I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    0, 0, SCIFA5, FSI } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 749c638..9c1de26 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -20,6 +20,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfe4b0000,		/* SCIF2 */
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
@@ -35,6 +37,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfe4c0000,		/* SCIF3 */
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 76, 76, 76, 76 },
 };
@@ -50,6 +54,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xfe4d0000,		/* SCIF4 */
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 104, 104, 104, 104 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 5b5f6b0..593eca6 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -19,6 +19,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffe00000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
@@ -34,6 +36,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffe08000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 76, 76, 76, 76 },
 };
@@ -49,6 +53,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xffe10000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 104, 104, 104, 104 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index 7270d7f..2c6aa22 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -17,6 +17,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xff923000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 61, 61, 61, 61 },
 };
@@ -32,6 +34,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xff924000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 62, 62, 62, 62 },
 };
@@ -47,6 +51,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xff925000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 63, 63, 63, 63 },
 };
@@ -62,6 +68,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xff926000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 64, 64, 64, 64 },
 };
@@ -77,6 +85,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xff927000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 65, 65, 65, 65 },
 };
@@ -92,6 +102,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xff928000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 66, 66, 66, 66 },
 };
@@ -107,6 +119,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xff929000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 67, 67, 67, 67 },
 };
@@ -122,6 +136,8 @@
 static struct plat_sci_port scif7_platform_data = {
 	.mapbase	= 0xff92a000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 68, 68, 68, 68 },
 };
@@ -137,6 +153,8 @@
 static struct plat_sci_port scif8_platform_data = {
 	.mapbase	= 0xff92b000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 69, 69, 69, 69 },
 };
@@ -152,6 +170,8 @@
 static struct plat_sci_port scif9_platform_data = {
 	.mapbase	= 0xff92c000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 70, 70, 70, 70 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 0f41486..08add7f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -20,6 +20,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffe00000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
@@ -35,6 +37,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffe10000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 76, 76, 76, 76 },
 };
@@ -379,6 +383,7 @@
 				    ARRAY_SIZE(sh7780_devices));
 }
 arch_initcall(sh7780_devices_setup);
+
 static struct platform_device *sh7780_early_devices[] __initdata = {
 	&scif0_device,
 	&scif1_device,
@@ -392,6 +397,13 @@
 
 void __init plat_early_device_setup(void)
 {
+	if (mach_is_sh2007()) {
+		scif0_platform_data.scscr &= ~SCSCR_CKE1;
+		scif0_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
+		scif1_platform_data.scscr &= ~SCSCR_CKE1;
+		scif1_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
+	}
+
 	early_platform_add_devices(sh7780_early_devices,
 				   ARRAY_SIZE(sh7780_early_devices));
 }
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index c9a572b..18d8fc1 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -23,6 +23,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffea0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
@@ -38,6 +40,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffeb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 44, 44, 44, 44 },
 };
@@ -53,6 +57,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xffec0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 60, 60, 60, 60 },
 };
@@ -68,6 +74,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xffed0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 61, 61, 61, 61 },
 };
@@ -83,6 +91,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xffee0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 62, 62, 62, 62 },
 };
@@ -98,6 +108,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xffef0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 63, 63, 63, 63 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 0170dbd..1656b8c 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -29,6 +29,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffea0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 41, 43, 42 },
 };
@@ -47,6 +49,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffeb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 44, 44, 44, 44 },
 };
@@ -62,6 +66,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xffec0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 50, 50, 50, 50 },
 };
@@ -77,6 +83,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xffed0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 51, 51, 51, 51 },
 };
@@ -92,6 +100,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xffee0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 52, 52, 52 },
 };
@@ -107,6 +117,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xffef0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 53, 53, 53, 53 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 013f0b1..bb20880 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -29,6 +29,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffc30000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 41, 43, 42 },
 };
@@ -44,6 +46,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffc40000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 44, 45, 47, 46 },
 };
@@ -59,6 +63,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xffc60000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 53, 55, 54 },
 };
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
index d910666..18419f1 100644
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -19,6 +19,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= PHYS_PERIPHERAL_BLOCK + 0x01030000,
 	.flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 39, 40, 42, 0 },
 };
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 83972aa..c19e2a9 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -81,7 +81,6 @@
 	state->target_residency = 1 * 2;
 	state->power_usage = 3;
 	state->flags = 0;
-	state->flags |= CPUIDLE_FLAG_SHALLOW;
 	state->flags |= CPUIDLE_FLAG_TIME_VALID;
 	state->enter = cpuidle_sleep_enter;
 
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index e559687..a6f95ae 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -141,7 +141,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops sh_pm_ops = {
+static const struct platform_suspend_ops sh_pm_ops = {
 	.enter          = sh_pm_enter,
 	.valid          = suspend_valid_only_mem,
 };
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 948fdb6..38e8628 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -17,6 +17,7 @@
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
 cpumask_t cpu_core_map[NR_CPUS];
+EXPORT_SYMBOL(cpu_core_map);
 
 static cpumask_t cpu_coregroup_map(unsigned int cpu)
 {
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index faa8f86..0901b2f 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -10,6 +10,16 @@
 void __delay(unsigned long loops)
 {
 	__asm__ __volatile__(
+		/*
+		 * ST40-300 appears to have an issue with this code,
+		 * normally taking two cycles each loop, as with all
+		 * other SH variants. If however the branch and the
+		 * delay slot straddle an 8 byte boundary, this increases
+		 * to 3 cycles.
+		 * This align directive ensures this doesn't occur.
+		 */
+		".balign 8\n\t"
+
 		"tst	%0, %0\n\t"
 		"1:\t"
 		"bf/s	1b\n\t"
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index eb4cc4e..d1bffbc 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -568,7 +568,7 @@
 }
 
 /*
- * Flush the range [start,end] of kernel virtual adddress space from
+ * Flush the range [start,end] of kernel virtual address space from
  * the I-cache.  The corresponding range must be purged from the
  * D-cache also because the SH-5 doesn't have cache snooping between
  * the caches.  The addresses will be visible through the superpage
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 88d3dc3..5a580ea 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -108,7 +108,8 @@
 		kunmap_atomic(vfrom, KM_USER0);
 	}
 
-	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
+	    (vma->vm_flags & VM_EXEC))
 		__flush_purge_region(vto, PAGE_SIZE);
 
 	kunmap_atomic(vto, KM_USER1);
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 9163db3..d776234 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -35,7 +35,7 @@
 		if (pud) {
 			pmd = pmd_alloc(mm, pud, addr);
 			if (pmd)
-				pte = pte_alloc_map(mm, pmd, addr);
+				pte = pte_alloc_map(mm, NULL, pmd, addr);
 		}
 	}
 
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 3385b28..0d3f912 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -2,7 +2,7 @@
  * linux/arch/sh/mm/init.c
  *
  *  Copyright (C) 1999  Niibe Yutaka
- *  Copyright (C) 2002 - 2010  Paul Mundt
+ *  Copyright (C) 2002 - 2011  Paul Mundt
  *
  *  Based on linux/arch/i386/mm/init.c:
  *   Copyright (C) 1995  Linus Torvalds
@@ -325,11 +325,17 @@
 	int nid;
 
 	memblock_init();
-
 	sh_mv.mv_mem_init();
 
 	early_reserve_mem();
 
+	/*
+	 * Once the early reservations are out of the way, give the
+	 * platforms a chance to kick out some memory.
+	 */
+	if (sh_mv.mv_mem_reserve)
+		sh_mv.mv_mem_reserve();
+
 	memblock_enforce_memory_limit(memory_limit);
 	memblock_analyze();
 
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 0e68465..6dd56c4 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -9,6 +9,7 @@
 HIGHLANDER		SH_HIGHLANDER
 RTS7751R2D		SH_RTS7751R2D
 RSK			SH_RSK
+ALPHA_BOARD		SH_ALPHA_BOARD
 
 #
 # List of companion chips / MFDs.
@@ -61,3 +62,5 @@
 POLARIS			SH_POLARIS
 KFR2R09			SH_KFR2R09
 ECOVEC			SH_ECOVEC
+APSH4A3A		SH_APSH4A3A
+APSH4AD0A		SH_APSH4AD0A
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 45d9c87..95695e9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -50,6 +50,7 @@
 	select RTC_DRV_STARFIRE
 	select HAVE_PERF_EVENTS
 	select PERF_USE_VMALLOC
+	select HAVE_GENERIC_HARDIRQS
 
 config ARCH_DEFCONFIG
 	string
@@ -107,10 +108,6 @@
 config NEED_PER_CPU_PAGE_FIRST_CHUNK
 	def_bool y if SPARC64
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	bool
-	def_bool y if SPARC64
-
 config MMU
 	bool
 	default y
@@ -276,10 +273,6 @@
 	  can be controlled through /sys/devices/system/cpu/cpu#.
 	  Say N if you want to disable CPU hotplug.
 
-config GENERIC_HARDIRQS
-	bool
-	default y if SPARC64
-
 source "kernel/time/Kconfig"
 
 if SPARC64
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h
index a2f5c61..843e4fa 100644
--- a/arch/sparc/include/asm/pcr.h
+++ b/arch/sparc/include/asm/pcr.h
@@ -43,4 +43,6 @@
 
 extern u64 pcr_enable;
 
+extern int pcr_arch_init(void);
+
 #endif /* __PCR_H */
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index e447938..0dc714f 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -375,5 +375,5 @@
 	return 0;
 }
 
-arch_initcall(cpu_type_probe);
+early_initcall(cpu_type_probe);
 #endif
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 47977a7..72509d0 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -255,10 +255,9 @@
 static int iommu_alloc_ctx(struct iommu *iommu)
 {
 	int lowest = iommu->ctx_lowest_free;
-	int sz = IOMMU_NUM_CTXS - lowest;
-	int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
+	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
 
-	if (unlikely(n == sz)) {
+	if (unlikely(n == IOMMU_NUM_CTXS)) {
 		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
 		if (unlikely(n == lowest)) {
 			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index ee3c7dd..8d348c4 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -23,17 +23,11 @@
 
 static void *module_map(unsigned long size)
 {
-	struct vm_struct *area;
-
-	size = PAGE_ALIGN(size);
-	if (!size || size > MODULES_LEN)
+	if (PAGE_ALIGN(size) > MODULES_LEN)
 		return NULL;
-
-	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
-	if (!area)
-		return NULL;
-
-	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+				GFP_KERNEL, PAGE_KERNEL, -1,
+				__builtin_return_address(0));
 }
 
 static char *dot2underscore(char *name)
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index b87873c..7c2ced6 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -167,5 +167,3 @@
 	unregister_perf_hsvc();
 	return err;
 }
-
-arch_initcall(pcr_arch_init);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b6a2b8f..555a76d 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -49,6 +49,7 @@
 #include <asm/mdesc.h>
 #include <asm/ldc.h>
 #include <asm/hypervisor.h>
+#include <asm/pcr.h>
 
 #include "cpumap.h"
 
@@ -1358,6 +1359,7 @@
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
+	pcr_arch_init();
 }
 
 void smp_send_reschedule(int cpu)
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 42ad2ba..1e97709 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -622,7 +622,7 @@
 static const char CHAFSR_IERR_msg[] =
 	"Internal processor error";
 static const char CHAFSR_ISAP_msg[] =
-	"System request parity error on incoming addresss";
+	"System request parity error on incoming address";
 static const char CHAFSR_UCU_msg[] =
 	"Uncorrectable E-cache ECC error for ifetch/data";
 static const char CHAFSR_UCC_msg[] =
diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S
index 8cc0345..8f096e8 100644
--- a/arch/sparc/kernel/una_asm_32.S
+++ b/arch/sparc/kernel/una_asm_32.S
@@ -24,9 +24,9 @@
 	.globl	__do_int_store
 __do_int_store:
 	ld	[%o2], %g1
-	cmp	%1, 2
+	cmp	%o1, 2
 	be	2f
-	 cmp	%1, 4
+	 cmp	%o1, 4
 	be	1f
 	 srl	%g1, 24, %g2
 	srl	%g1, 16, %g7
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c
index 764b3eb..48d00e7 100644
--- a/arch/sparc/lib/bitext.c
+++ b/arch/sparc/lib/bitext.c
@@ -10,7 +10,7 @@
  */
 
 #include <linux/string.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
 
 #include <asm/bitext.h>
 
@@ -80,8 +80,7 @@
 		while (test_bit(offset + i, t->map) == 0) {
 			i++;
 			if (i == len) {
-				for (i = 0; i < len; i++)
-					__set_bit(offset + i, t->map);
+				bitmap_set(t->map, offset, len);
 				if (offset == t->first_free)
 					t->first_free = find_next_zero_bit
 							(t->map, t->size,
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
index 5edcac1..e6067b7 100644
--- a/arch/sparc/mm/generic_32.c
+++ b/arch/sparc/mm/generic_32.c
@@ -50,7 +50,7 @@
 		end = PGDIR_SIZE;
 	offset -= address;
 	do {
-		pte_t * pte = pte_alloc_map(mm, pmd, address);
+		pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
 		if (!pte)
 			return -ENOMEM;
 		io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
index 04f2bf4..3cb00df 100644
--- a/arch/sparc/mm/generic_64.c
+++ b/arch/sparc/mm/generic_64.c
@@ -92,7 +92,7 @@
 		end = PGDIR_SIZE;
 	offset -= address;
 	do {
-		pte_t * pte = pte_alloc_map(mm, pmd, address);
+		pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
 		if (!pte)
 			return -ENOMEM;
 		io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 5fdddf1..f4e9764 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -214,7 +214,7 @@
 	if (pud) {
 		pmd = pmd_alloc(mm, pud, addr);
 		if (pmd)
-			pte = pte_alloc_map(mm, pmd, addr);
+			pte = pte_alloc_map(mm, NULL, pmd, addr);
 	}
 	return pte;
 }
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index e11b5fc..08948e4 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -1,25 +1,34 @@
 # For a description of the syntax of this configuration file,
 # see Documentation/kbuild/config-language.txt.
 
+config TILE
+	def_bool y
+	select HAVE_KVM if !TILEGX
+	select GENERIC_FIND_FIRST_BIT
+	select GENERIC_FIND_NEXT_BIT
+	select USE_GENERIC_SMP_HELPERS
+	select CC_OPTIMIZE_FOR_SIZE
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select GENERIC_PENDING_IRQ if SMP
+
+# FIXME: investigate whether we need/want these options.
+#	select HAVE_IOREMAP_PROT
+#       select HAVE_OPTPROBES
+#       select HAVE_REGS_AND_STACK_ACCESS_API
+#       select HAVE_HW_BREAKPOINT
+#       select PERF_EVENTS
+#       select HAVE_USER_RETURN_NOTIFIER
+#       config NO_BOOTMEM
+#       config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+#       config HUGETLB_PAGE_SIZE_VARIABLE
+
 config MMU
 	def_bool y
 
 config GENERIC_CSUM
 	def_bool y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	def_bool y
-
-config GENERIC_PENDING_IRQ
-	def_bool y
-	depends on GENERIC_HARDIRQS && SMP
-
 config SEMAPHORE_SLEEPERS
 	def_bool y
 
@@ -97,26 +106,6 @@
 	select HVC_DRIVER
 	def_bool y
 
-config TILE
-	def_bool y
-	select HAVE_KVM if !TILEGX
-	select GENERIC_FIND_FIRST_BIT
-	select GENERIC_FIND_NEXT_BIT
-	select USE_GENERIC_SMP_HELPERS
-	select CC_OPTIMIZE_FOR_SIZE
-
-# FIXME: investigate whether we need/want these options.
-#	select HAVE_IOREMAP_PROT
-#       select HAVE_OPTPROBES
-#       select HAVE_REGS_AND_STACK_ACCESS_API
-#       select HAVE_HW_BREAKPOINT
-#       select PERF_EVENTS
-#       select HAVE_USER_RETURN_NOTIFIER
-#       config NO_BOOTMEM
-#       config ARCH_SUPPORTS_DEBUG_PAGEALLOC
-#       config HUGETLB_PAGE_SIZE_VARIABLE
-
-
 # Please note: TILE-Gx support is not yet finalized; this is
 # the preliminary support.  TILE-Gx drivers are only provided
 # with the alpha or beta test versions for Tilera customers.
@@ -220,7 +209,7 @@
 
 choice
 	depends on !TILEGX
-	prompt "Memory split" if EMBEDDED
+	prompt "Memory split" if EXPERT
 	default VMSPLIT_3G
 	---help---
 	  Select the desired split between kernel and user memory.
diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug
index a81f0fb..9bc161a 100644
--- a/arch/tile/Kconfig.debug
+++ b/arch/tile/Kconfig.debug
@@ -3,7 +3,7 @@
 source "lib/Kconfig.debug"
 
 config EARLY_PRINTK
-	bool "Early printk" if EMBEDDED && DEBUG_KERNEL
+	bool "Early printk" if EXPERT && DEBUG_KERNEL
 	default y
 	help
 	  Write kernel log output directly via the hypervisor console.
diff --git a/arch/tile/configs/tile_defconfig b/arch/tile/configs/tile_defconfig
index 919c54a..0fe54445 100644
--- a/arch/tile/configs/tile_defconfig
+++ b/arch/tile/configs/tile_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_MODULES=y
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 049d048..e351e14 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -3,14 +3,10 @@
 	option defconfig_list
 	default "arch/$ARCH/defconfig"
 
-# UML uses the generic IRQ subsystem
-config GENERIC_HARDIRQS
-	bool
-	default y
-
 config UML
 	bool
 	default y
+	select HAVE_GENERIC_HARDIRQS
 
 config MMU
 	bool
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index 50d6aa2..90a438a 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -120,9 +120,6 @@
 
 	  If you don't know what to do, say N.
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config NR_CPUS
 	int "Maximum number of CPUs (2-32)"
 	range 2 32
@@ -131,7 +128,7 @@
 
 config HIGHMEM
 	bool "Highmem support (EXPERIMENTAL)"
-	depends on !64BIT && EXPERIMENTAL
+	depends on !64BIT && BROKEN
 	default n
 	help
 	  This was used to allow UML to run with big amounts of memory.
diff --git a/arch/um/defconfig b/arch/um/defconfig
index 564f3de..9f7634f 100644
--- a/arch/um/defconfig
+++ b/arch/um/defconfig
@@ -133,7 +133,7 @@
 # CONFIG_BLK_DEV_INITRD is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
-# CONFIG_EMBEDDED is not set
+# CONFIG_EXPERT is not set
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 1664cce..050e4dd 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -821,12 +821,12 @@
 
 static void unregister_winch(struct tty_struct *tty)
 {
-	struct list_head *ele;
+	struct list_head *ele, *next;
 	struct winch *winch;
 
 	spin_lock(&winch_handler_lock);
 
-	list_for_each(ele, &winch_handlers) {
+	list_for_each_safe(ele, next, &winch_handlers) {
 		winch = list_entry(ele, struct winch, list);
 		if (winch->tty == tty) {
 			free_winch(winch, 1);
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index 8501e7d..7e0619c 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -37,13 +37,7 @@
 	if (*ppos > mmapper_size)
 		return -EINVAL;
 
-	if (count > mmapper_size - *ppos)
-		count = mmapper_size - *ppos;
-
-	if (copy_from_user(&v_buf[*ppos], buf, count))
-		return -EFAULT;
-
-	return count;
+	return simple_write_to_buffer(v_buf, mmapper_size, ppos, buf, count);
 }
 
 static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -137,3 +131,4 @@
 
 MODULE_AUTHOR("Greg Lonnon <glonnon@ridgerun.com>");
 MODULE_DESCRIPTION("DSPLinux simulator mmapper driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 3d099f9..1aee587 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -31,7 +31,7 @@
 	if (!pmd)
 		goto out_pmd;
 
-	pte = pte_alloc_map(mm, pmd, proc);
+	pte = pte_alloc_map(mm, NULL, pmd, proc);
 	if (!pte)
 		goto out_pte;
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b6fccb0..d5ed94d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -51,6 +51,7 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
+	select HAVE_KERNEL_XZ
 	select HAVE_KERNEL_LZO
 	select HAVE_HW_BREAKPOINT
 	select HAVE_MIXED_BREAKPOINTS_REGS
@@ -65,6 +66,7 @@
 	select HAVE_SPARSE_IRQ
 	select GENERIC_IRQ_PROBE
 	select GENERIC_PENDING_IRQ if SMP
+	select USE_GENERIC_SMP_HELPERS if SMP
 
 config INSTRUCTION_DECODER
 	def_bool (KPROBES || PERF_EVENTS)
@@ -203,10 +205,6 @@
 	def_bool y
 	depends on EXPERIMENTAL && DMAR && ACPI
 
-config USE_GENERIC_SMP_HELPERS
-	def_bool y
-	depends on SMP
-
 config X86_32_SMP
 	def_bool y
 	depends on X86_32 && SMP
@@ -629,11 +627,11 @@
          as it is off-chip. APB timers are always running regardless of CPU
          C states, they are used as per CPU clockevent device when possible.
 
-# Mark as embedded because too many people got it wrong.
+# Mark as expert because too many people got it wrong.
 # The code disables itself when not needed.
 config DMI
 	default y
-	bool "Enable DMI scanning" if EMBEDDED
+	bool "Enable DMI scanning" if EXPERT
 	---help---
 	  Enabled scanning of DMI to identify machine quirks. Say Y
 	  here unless you have verified that your setup is not
@@ -641,7 +639,7 @@
 	  BIOS code.
 
 config GART_IOMMU
-	bool "GART IOMMU support" if EMBEDDED
+	bool "GART IOMMU support" if EXPERT
 	default y
 	select SWIOTLB
 	depends on X86_64 && PCI && AMD_NB
@@ -891,7 +889,7 @@
 	depends on X86_MCE_INTEL
 
 config VM86
-	bool "Enable VM86 support" if EMBEDDED
+	bool "Enable VM86 support" if EXPERT
 	default y
 	depends on X86_32
 	---help---
@@ -1075,7 +1073,7 @@
 
 choice
 	depends on EXPERIMENTAL
-	prompt "Memory split" if EMBEDDED
+	prompt "Memory split" if EXPERT
 	default VMSPLIT_3G
 	depends on X86_32
 	---help---
@@ -1137,7 +1135,7 @@
 	def_bool X86_64 || HIGHMEM64G
 
 config DIRECT_GBPAGES
-	bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
+	bool "Enable 1GB pages for kernel pagetables" if EXPERT
 	default y
 	depends on X86_64
 	---help---
@@ -1371,7 +1369,7 @@
 
 config MTRR
 	def_bool y
-	prompt "MTRR (Memory Type Range Register) support" if EMBEDDED
+	prompt "MTRR (Memory Type Range Register) support" if EXPERT
 	---help---
 	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
 	  the Memory Type Range Registers (MTRRs) may be used to control
@@ -1437,7 +1435,7 @@
 
 config X86_PAT
 	def_bool y
-	prompt "x86 PAT support" if EMBEDDED
+	prompt "x86 PAT support" if EXPERT
 	depends on MTRR
 	---help---
 	  Use PAT attributes to setup page level cache control.
@@ -1541,7 +1539,7 @@
 	  code in physical address mode via KEXEC
 
 config PHYSICAL_START
-	hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+	hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
 	default "0x1000000"
 	---help---
 	  This gives the physical address where the kernel is loaded.
@@ -1936,13 +1934,19 @@
 	depends on X86_64 && PCI && ACPI
 
 config PCI_CNB20LE_QUIRK
-	bool "Read CNB20LE Host Bridge Windows"
-	depends on PCI
+	bool "Read CNB20LE Host Bridge Windows" if EXPERT
+	default n
+	depends on PCI && EXPERIMENTAL
 	help
 	  Read the PCI windows out of the CNB20LE host bridge. This allows
 	  PCI hotplug to work on systems with the CNB20LE chipset which do
 	  not have ACPI.
 
+	  There's no public spec for this chipset, and this functionality
+	  is known to be incomplete.
+
+	  You should say N unless you know you need this.
+
 config DMAR
 	bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
 	depends on PCI_MSI && ACPI && EXPERIMENTAL
@@ -2064,13 +2068,14 @@
 	bool "One Laptop Per Child support"
 	select GPIOLIB
 	select OLPC_OPENFIRMWARE
+	depends on !X86_64 && !X86_PAE
 	---help---
 	  Add support for detecting the unique features of the OLPC
 	  XO hardware.
 
 config OLPC_XO1
 	tristate "OLPC XO-1 support"
-	depends on OLPC && PCI
+	depends on OLPC && MFD_CS5535
 	---help---
 	  Add support for non-essential features of the OLPC XO-1 laptop.
 
@@ -2078,11 +2083,17 @@
 	bool "Support for OLPC's Open Firmware"
 	depends on !X86_64 && !X86_PAE
 	default n
+	select OF
 	help
 	  This option adds support for the implementation of Open Firmware
 	  that is used on the OLPC XO-1 Children's Machine.
 	  If unsure, say N here.
 
+config OLPC_OPENFIRMWARE_DT
+	bool
+	default y if OLPC_OPENFIRMWARE && PROC_DEVICETREE
+	select OF_PROMTREE
+
 endif # X86_32
 
 config AMD_NB
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 15588a0..283c5a6 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -424,7 +424,7 @@
 	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML
 
 menuconfig PROCESSOR_SELECT
-	bool "Supported processor vendors" if EMBEDDED
+	bool "Supported processor vendors" if EXPERT
 	---help---
 	  This lets you choose what x86 vendor support code your kernel
 	  will include.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 45143bb..615e188 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -31,7 +31,7 @@
 	  see errors. Disable this if you want silent bootup.
 
 config EARLY_PRINTK
-	bool "Early printk" if EMBEDDED
+	bool "Early printk" if EXPERT
 	default y
 	---help---
 	  Write kernel log output directly into the VGA buffer or to a serial
@@ -138,7 +138,7 @@
 
 config DOUBLEFAULT
 	default y
-	bool "Enable doublefault exception handler" if EMBEDDED
+	bool "Enable doublefault exception handler" if EXPERT
 	depends on X86_32
 	---help---
 	  This option allows trapping of rare doublefault exceptions that
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 0c22955..09664ef 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
 # create a compressed vmlinux image from the original vmlinux
 #
 
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
 
 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -49,12 +49,15 @@
 	$(call if_changed,bzip2)
 $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzma)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
+	$(call if_changed,xzkern)
 $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzo)
 
 suffix-$(CONFIG_KERNEL_GZIP)	:= gz
 suffix-$(CONFIG_KERNEL_BZIP2)	:= bz2
 suffix-$(CONFIG_KERNEL_LZMA)	:= lzma
+suffix-$(CONFIG_KERNEL_XZ)	:= xz
 suffix-$(CONFIG_KERNEL_LZO) 	:= lzo
 
 quiet_cmd_mkpiggy = MKPIGGY $@
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 325c052..3a19d04 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -139,6 +139,10 @@
 #include "../../../../lib/decompress_unlzma.c"
 #endif
 
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
 #ifdef CONFIG_KERNEL_LZO
 #include "../../../../lib/decompress_unlzo.c"
 #endif
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index 5c22812..46a8238 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -62,7 +62,12 @@
 	if (fseek(f, -4L, SEEK_END)) {
 		perror(argv[1]);
 	}
-	fread(&olen, sizeof olen, 1, f);
+
+	if (fread(&olen, sizeof(olen), 1, f) != 1) {
+		perror(argv[1]);
+		return 1;
+	}
+
 	ilen = ftell(f);
 	olen = getle32(&olen);
 	fclose(f);
@@ -74,7 +79,7 @@
 
 	offs = (olen > ilen) ? olen - ilen : 0;
 	offs += olen >> 12;	/* Add 8 bytes for each 32K block */
-	offs += 32*1024 + 18;	/* Add 32K + 18 bytes slack */
+	offs += 64*1024 + 128;	/* Add 64K + 128 bytes slack */
 	offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
 
 	printf(".section \".rodata..compressed\",\"a\",@progbits\n");
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index ff16756..8fe2a49 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -9,6 +9,20 @@
  *            Vinodh Gopal <vinodh.gopal@intel.com>
  *            Kahraman Akdemir
  *
+ * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
+ * interface for 64-bit kernels.
+ *    Authors: Erdinc Ozturk (erdinc.ozturk@intel.com)
+ *             Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             James Guilford (james.guilford@intel.com)
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Wajdi Feghali (wajdi.k.feghali@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
+ * Ported x86_64 version to x86:
+ *    Author: Mathias Krause <minipli@googlemail.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -18,8 +32,62 @@
 #include <linux/linkage.h>
 #include <asm/inst.h>
 
+#ifdef __x86_64__
+.data
+POLY:   .octa 0xC2000000000000000000000000000001
+TWOONE: .octa 0x00000001000000000000000000000001
+
+# order of these constants should not change.
+# more specifically, ALL_F should follow SHIFT_MASK,
+# and ZERO should follow ALL_F
+
+SHUF_MASK:  .octa 0x000102030405060708090A0B0C0D0E0F
+MASK1:      .octa 0x0000000000000000ffffffffffffffff
+MASK2:      .octa 0xffffffffffffffff0000000000000000
+SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
+ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
+ZERO:       .octa 0x00000000000000000000000000000000
+ONE:        .octa 0x00000000000000000000000000000001
+F_MIN_MASK: .octa 0xf1f2f3f4f5f6f7f8f9fafbfcfdfeff0
+dec:        .octa 0x1
+enc:        .octa 0x2
+
+
 .text
 
+
+#define	STACK_OFFSET    8*3
+#define	HashKey		16*0	// store HashKey <<1 mod poly here
+#define	HashKey_2	16*1	// store HashKey^2 <<1 mod poly here
+#define	HashKey_3	16*2	// store HashKey^3 <<1 mod poly here
+#define	HashKey_4	16*3	// store HashKey^4 <<1 mod poly here
+#define	HashKey_k	16*4	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey <<1 mod poly here
+				//(for Karatsuba purposes)
+#define	HashKey_2_k	16*5	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey^2 <<1 mod poly here
+				// (for Karatsuba purposes)
+#define	HashKey_3_k	16*6	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey^3 <<1 mod poly here
+				// (for Karatsuba purposes)
+#define	HashKey_4_k	16*7	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey^4 <<1 mod poly here
+				// (for Karatsuba purposes)
+#define	VARIABLE_OFFSET	16*8
+
+#define arg1 rdi
+#define arg2 rsi
+#define arg3 rdx
+#define arg4 rcx
+#define arg5 r8
+#define arg6 r9
+#define arg7 STACK_OFFSET+8(%r14)
+#define arg8 STACK_OFFSET+16(%r14)
+#define arg9 STACK_OFFSET+24(%r14)
+#define arg10 STACK_OFFSET+32(%r14)
+#endif
+
+
 #define STATE1	%xmm0
 #define STATE2	%xmm4
 #define STATE3	%xmm5
@@ -32,12 +100,16 @@
 #define IN	IN1
 #define KEY	%xmm2
 #define IV	%xmm3
+
 #define BSWAP_MASK %xmm10
 #define CTR	%xmm11
 #define INC	%xmm12
 
+#ifdef __x86_64__
+#define AREG	%rax
 #define KEYP	%rdi
 #define OUTP	%rsi
+#define UKEYP	OUTP
 #define INP	%rdx
 #define LEN	%rcx
 #define IVP	%r8
@@ -46,6 +118,1588 @@
 #define TKEYP	T1
 #define T2	%r11
 #define TCTR_LOW T2
+#else
+#define AREG	%eax
+#define KEYP	%edi
+#define OUTP	AREG
+#define UKEYP	OUTP
+#define INP	%edx
+#define LEN	%esi
+#define IVP	%ebp
+#define KLEN	%ebx
+#define T1	%ecx
+#define TKEYP	T1
+#endif
+
+
+#ifdef __x86_64__
+/* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+*
+*
+* Input: A and B (128-bits each, bit-reflected)
+* Output: C = A*B*x mod poly, (i.e. >>1 )
+* To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+* GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+*
+*/
+.macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
+	movdqa	  \GH, \TMP1
+	pshufd	  $78, \GH, \TMP2
+	pshufd	  $78, \HK, \TMP3
+	pxor	  \GH, \TMP2            # TMP2 = a1+a0
+	pxor	  \HK, \TMP3            # TMP3 = b1+b0
+	PCLMULQDQ 0x11, \HK, \TMP1     # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \HK, \GH       # GH = a0*b0
+	PCLMULQDQ 0x00, \TMP3, \TMP2   # TMP2 = (a0+a1)*(b1+b0)
+	pxor	  \GH, \TMP2
+	pxor	  \TMP1, \TMP2          # TMP2 = (a0*b0)+(a1*b0)
+	movdqa	  \TMP2, \TMP3
+	pslldq	  $8, \TMP3             # left shift TMP3 2 DWs
+	psrldq	  $8, \TMP2             # right shift TMP2 2 DWs
+	pxor	  \TMP3, \GH
+	pxor	  \TMP2, \TMP1          # TMP2:GH holds the result of GH*HK
+
+        # first phase of the reduction
+
+	movdqa    \GH, \TMP2
+	movdqa    \GH, \TMP3
+	movdqa    \GH, \TMP4            # copy GH into TMP2,TMP3 and TMP4
+					# in in order to perform
+					# independent shifts
+	pslld     $31, \TMP2            # packed right shift <<31
+	pslld     $30, \TMP3            # packed right shift <<30
+	pslld     $25, \TMP4            # packed right shift <<25
+	pxor      \TMP3, \TMP2          # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP5
+	psrldq    $4, \TMP5             # right shift TMP5 1 DW
+	pslldq    $12, \TMP2            # left shift TMP2 3 DWs
+	pxor      \TMP2, \GH
+
+        # second phase of the reduction
+
+	movdqa    \GH,\TMP2             # copy GH into TMP2,TMP3 and TMP4
+					# in in order to perform
+					# independent shifts
+	movdqa    \GH,\TMP3
+	movdqa    \GH,\TMP4
+	psrld     $1,\TMP2              # packed left shift >>1
+	psrld     $2,\TMP3              # packed left shift >>2
+	psrld     $7,\TMP4              # packed left shift >>7
+	pxor      \TMP3,\TMP2		# xor the shifted versions
+	pxor      \TMP4,\TMP2
+	pxor      \TMP5, \TMP2
+	pxor      \TMP2, \GH
+	pxor      \TMP1, \GH            # result is in TMP1
+.endm
+
+/*
+* if a = number of total plaintext bytes
+* b = floor(a/16)
+* num_initial_blocks = b mod 4
+* encrypt the initial num_initial_blocks blocks and apply ghash on
+* the ciphertext
+* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* are clobbered
+* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+*/
+
+
+.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+	mov	   arg7, %r10           # %r10 = AAD
+	mov	   arg8, %r12           # %r12 = aadLen
+	mov	   %r12, %r11
+	pxor	   %xmm\i, %xmm\i
+_get_AAD_loop\num_initial_blocks\operation:
+	movd	   (%r10), \TMP1
+	pslldq	   $12, \TMP1
+	psrldq	   $4, %xmm\i
+	pxor	   \TMP1, %xmm\i
+	add	   $4, %r10
+	sub	   $4, %r12
+	jne	   _get_AAD_loop\num_initial_blocks\operation
+	cmp	   $16, %r11
+	je	   _get_AAD_loop2_done\num_initial_blocks\operation
+	mov	   $16, %r12
+_get_AAD_loop2\num_initial_blocks\operation:
+	psrldq	   $4, %xmm\i
+	sub	   $4, %r12
+	cmp	   %r11, %r12
+	jne	   _get_AAD_loop2\num_initial_blocks\operation
+_get_AAD_loop2_done\num_initial_blocks\operation:
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\i # byte-reflect the AAD data
+
+	xor	   %r11, %r11 # initialise the data pointer offset as zero
+
+        # start AES for num_initial_blocks blocks
+
+	mov	   %arg5, %rax                      # %rax = *Y0
+	movdqu	   (%rax), \XMM0                    # XMM0 = Y0
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, \XMM0
+
+.if (\i == 5) || (\i == 6) || (\i == 7)
+.irpc index, \i_seq
+	paddd	   ONE(%rip), \XMM0                 # INCR Y0
+	movdqa	   \XMM0, %xmm\index
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\index      # perform a 16 byte swap
+
+.endr
+.irpc index, \i_seq
+	pxor	   16*0(%arg1), %xmm\index
+.endr
+.irpc index, \i_seq
+	movaps 0x10(%rdi), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 1
+.endr
+.irpc index, \i_seq
+	movaps 0x20(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x30(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x40(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x50(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x60(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x70(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x80(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x90(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0xa0(%arg1), \TMP1
+	AESENCLAST \TMP1, %xmm\index         # Round 10
+.endr
+.irpc index, \i_seq
+	movdqu	   (%arg3 , %r11, 1), \TMP1
+	pxor	   \TMP1, %xmm\index
+	movdqu	   %xmm\index, (%arg2 , %r11, 1)
+	# write back plaintext/ciphertext for num_initial_blocks
+	add	   $16, %r11
+
+	movdqa     \TMP1, %xmm\index
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM	   %xmm14, %xmm\index
+
+		# prepare plaintext/ciphertext for GHASH computation
+.endr
+.endif
+	GHASH_MUL  %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        # apply GHASH on num_initial_blocks blocks
+
+.if \i == 5
+        pxor       %xmm5, %xmm6
+	GHASH_MUL  %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 6
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 7
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.endif
+	cmp	   $64, %r13
+	jl	_initial_blocks_done\num_initial_blocks\operation
+	# no need for precomputed values
+/*
+*
+* Precomputations for HashKey parallel with encryption of first 4 blocks.
+* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+*/
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM1
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM1        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM2
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM2        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM3
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM4
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4        # perform a 16 byte swap
+
+	pxor	   16*0(%arg1), \XMM1
+	pxor	   16*0(%arg1), \XMM2
+	pxor	   16*0(%arg1), \XMM3
+	pxor	   16*0(%arg1), \XMM4
+	movdqa	   \TMP3, \TMP5
+	pshufd	   $78, \TMP3, \TMP1
+	pxor	   \TMP3, \TMP1
+	movdqa	   \TMP1, HashKey_k(%rsp)
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^2<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_2(%rsp)
+# HashKey_2 = HashKey^2<<1 (mod poly)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_2_k(%rsp)
+.irpc index, 1234 # do 4 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_3(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_3_k(%rsp)
+.irpc index, 56789 # do next 5 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_4(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_4_k(%rsp)
+	movaps 0xa0(%arg1), \TMP2
+	AESENCLAST \TMP2, \XMM1
+	AESENCLAST \TMP2, \XMM2
+	AESENCLAST \TMP2, \XMM3
+	AESENCLAST \TMP2, \XMM4
+	movdqu	   16*0(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM1
+	movdqu	   \XMM1, 16*0(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM1
+	movdqu	   16*1(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM2
+	movdqu	   \XMM2, 16*1(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM2
+	movdqu	   16*2(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM3
+	movdqu	   \XMM3, 16*2(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM3
+	movdqu	   16*3(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM4
+	movdqu	   \XMM4, 16*3(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM4
+	add	   $64, %r11
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+	pxor	   \XMMDst, \XMM1
+# combine GHASHed value with the corresponding ciphertext
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+
+_initial_blocks_done\num_initial_blocks\operation:
+
+.endm
+
+
+/*
+* if a = number of total plaintext bytes
+* b = floor(a/16)
+* num_initial_blocks = b mod 4
+* encrypt the initial num_initial_blocks blocks and apply ghash on
+* the ciphertext
+* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* are clobbered
+* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+*/
+
+
+.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+	mov	   arg7, %r10           # %r10 = AAD
+	mov	   arg8, %r12           # %r12 = aadLen
+	mov	   %r12, %r11
+	pxor	   %xmm\i, %xmm\i
+_get_AAD_loop\num_initial_blocks\operation:
+	movd	   (%r10), \TMP1
+	pslldq	   $12, \TMP1
+	psrldq	   $4, %xmm\i
+	pxor	   \TMP1, %xmm\i
+	add	   $4, %r10
+	sub	   $4, %r12
+	jne	   _get_AAD_loop\num_initial_blocks\operation
+	cmp	   $16, %r11
+	je	   _get_AAD_loop2_done\num_initial_blocks\operation
+	mov	   $16, %r12
+_get_AAD_loop2\num_initial_blocks\operation:
+	psrldq	   $4, %xmm\i
+	sub	   $4, %r12
+	cmp	   %r11, %r12
+	jne	   _get_AAD_loop2\num_initial_blocks\operation
+_get_AAD_loop2_done\num_initial_blocks\operation:
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\i # byte-reflect the AAD data
+
+	xor	   %r11, %r11 # initialise the data pointer offset as zero
+
+        # start AES for num_initial_blocks blocks
+
+	mov	   %arg5, %rax                      # %rax = *Y0
+	movdqu	   (%rax), \XMM0                    # XMM0 = Y0
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, \XMM0
+
+.if (\i == 5) || (\i == 6) || (\i == 7)
+.irpc index, \i_seq
+	paddd	   ONE(%rip), \XMM0                 # INCR Y0
+	movdqa	   \XMM0, %xmm\index
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\index      # perform a 16 byte swap
+
+.endr
+.irpc index, \i_seq
+	pxor	   16*0(%arg1), %xmm\index
+.endr
+.irpc index, \i_seq
+	movaps 0x10(%rdi), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 1
+.endr
+.irpc index, \i_seq
+	movaps 0x20(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x30(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x40(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x50(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x60(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x70(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x80(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x90(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0xa0(%arg1), \TMP1
+	AESENCLAST \TMP1, %xmm\index         # Round 10
+.endr
+.irpc index, \i_seq
+	movdqu	   (%arg3 , %r11, 1), \TMP1
+	pxor	   \TMP1, %xmm\index
+	movdqu	   %xmm\index, (%arg2 , %r11, 1)
+	# write back plaintext/ciphertext for num_initial_blocks
+	add	   $16, %r11
+
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM	   %xmm14, %xmm\index
+
+		# prepare plaintext/ciphertext for GHASH computation
+.endr
+.endif
+	GHASH_MUL  %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        # apply GHASH on num_initial_blocks blocks
+
+.if \i == 5
+        pxor       %xmm5, %xmm6
+	GHASH_MUL  %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 6
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 7
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.endif
+	cmp	   $64, %r13
+	jl	_initial_blocks_done\num_initial_blocks\operation
+	# no need for precomputed values
+/*
+*
+* Precomputations for HashKey parallel with encryption of first 4 blocks.
+* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+*/
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM1
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM1        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM2
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM2        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM3
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM4
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4        # perform a 16 byte swap
+
+	pxor	   16*0(%arg1), \XMM1
+	pxor	   16*0(%arg1), \XMM2
+	pxor	   16*0(%arg1), \XMM3
+	pxor	   16*0(%arg1), \XMM4
+	movdqa	   \TMP3, \TMP5
+	pshufd	   $78, \TMP3, \TMP1
+	pxor	   \TMP3, \TMP1
+	movdqa	   \TMP1, HashKey_k(%rsp)
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^2<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_2(%rsp)
+# HashKey_2 = HashKey^2<<1 (mod poly)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_2_k(%rsp)
+.irpc index, 1234 # do 4 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_3(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_3_k(%rsp)
+.irpc index, 56789 # do next 5 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_4(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_4_k(%rsp)
+	movaps 0xa0(%arg1), \TMP2
+	AESENCLAST \TMP2, \XMM1
+	AESENCLAST \TMP2, \XMM2
+	AESENCLAST \TMP2, \XMM3
+	AESENCLAST \TMP2, \XMM4
+	movdqu	   16*0(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM1
+	movdqu	   16*1(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM2
+	movdqu	   16*2(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM3
+	movdqu	   16*3(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM4
+	movdqu     \XMM1, 16*0(%arg2 , %r11 , 1)
+	movdqu     \XMM2, 16*1(%arg2 , %r11 , 1)
+	movdqu     \XMM3, 16*2(%arg2 , %r11 , 1)
+	movdqu     \XMM4, 16*3(%arg2 , %r11 , 1)
+
+	add	   $64, %r11
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+	pxor	   \XMMDst, \XMM1
+# combine GHASHed value with the corresponding ciphertext
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+
+_initial_blocks_done\num_initial_blocks\operation:
+
+.endm
+
+/*
+* encrypt 4 blocks at a time
+* ghash the 4 previously encrypted ciphertext blocks
+* arg1, %arg2, %arg3 are used as pointers only, not modified
+* %r11 is the data offset value
+*/
+.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
+TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
+
+	movdqa	  \XMM1, \XMM5
+	movdqa	  \XMM2, \XMM6
+	movdqa	  \XMM3, \XMM7
+	movdqa	  \XMM4, \XMM8
+
+        movdqa    SHUF_MASK(%rip), %xmm15
+        # multiply TMP5 * HashKey using karatsuba
+
+	movdqa	  \XMM5, \TMP4
+	pshufd	  $78, \XMM5, \TMP6
+	pxor	  \XMM5, \TMP6
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa	  HashKey_4(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
+	movdqa    \XMM0, \XMM1
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM2
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM3
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM4
+	PSHUFB_XMM %xmm15, \XMM1	# perform a 16 byte swap
+	PCLMULQDQ 0x00, \TMP5, \XMM5           # XMM5 = a0*b0
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  (%arg1), \XMM1
+	pxor	  (%arg1), \XMM2
+	pxor	  (%arg1), \XMM3
+	pxor	  (%arg1), \XMM4
+	movdqa	  HashKey_4_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
+	movaps 0x10(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 1
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movaps 0x20(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 2
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movdqa	  \XMM6, \TMP1
+	pshufd	  $78, \XMM6, \TMP2
+	pxor	  \XMM6, \TMP2
+	movdqa	  HashKey_3(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
+	movaps 0x30(%arg1), \TMP3
+	AESENC    \TMP3, \XMM1              # Round 3
+	AESENC    \TMP3, \XMM2
+	AESENC    \TMP3, \XMM3
+	AESENC    \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM6           # XMM6 = a0*b0
+	movaps 0x40(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 4
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_3_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x50(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 5
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM6, \XMM5
+	pxor	  \TMP2, \TMP6
+	movdqa	  \XMM7, \TMP1
+	pshufd	  $78, \XMM7, \TMP2
+	pxor	  \XMM7, \TMP2
+	movdqa	  HashKey_2(%rsp ), \TMP5
+
+        # Multiply TMP5 * HashKey using karatsuba
+
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1*b1
+	movaps 0x60(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 6
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM7           # XMM7 = a0*b0
+	movaps 0x70(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 7
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_2_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x80(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 8
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM7, \XMM5
+	pxor	  \TMP2, \TMP6
+
+        # Multiply XMM8 * HashKey
+        # XMM8 and TMP5 hold the values for the two operands
+
+	movdqa	  \XMM8, \TMP1
+	pshufd	  $78, \XMM8, \TMP2
+	pxor	  \XMM8, \TMP2
+	movdqa	  HashKey(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
+	movaps 0x90(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1            # Round 9
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM8          # XMM8 = a0*b0
+	movaps 0xa0(%arg1), \TMP3
+	AESENCLAST \TMP3, \XMM1           # Round 10
+	AESENCLAST \TMP3, \XMM2
+	AESENCLAST \TMP3, \XMM3
+	AESENCLAST \TMP3, \XMM4
+	movdqa    HashKey_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
+	movdqu	  (%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
+	movdqu	  16(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM2                 # Ciphertext/Plaintext XOR EK
+	movdqu	  32(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM3                 # Ciphertext/Plaintext XOR EK
+	movdqu	  48(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM4                 # Ciphertext/Plaintext XOR EK
+        movdqu    \XMM1, (%arg2,%r11,1)        # Write to the ciphertext buffer
+        movdqu    \XMM2, 16(%arg2,%r11,1)      # Write to the ciphertext buffer
+        movdqu    \XMM3, 32(%arg2,%r11,1)      # Write to the ciphertext buffer
+        movdqu    \XMM4, 48(%arg2,%r11,1)      # Write to the ciphertext buffer
+	PSHUFB_XMM %xmm15, \XMM1        # perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  \TMP4, \TMP1
+	pxor	  \XMM8, \XMM5
+	pxor	  \TMP6, \TMP2
+	pxor	  \TMP1, \TMP2
+	pxor	  \XMM5, \TMP2
+	movdqa	  \TMP2, \TMP3
+	pslldq	  $8, \TMP3                    # left shift TMP3 2 DWs
+	psrldq	  $8, \TMP2                    # right shift TMP2 2 DWs
+	pxor	  \TMP3, \XMM5
+	pxor	  \TMP2, \TMP1	  # accumulate the results in TMP1:XMM5
+
+        # first phase of reduction
+
+	movdqa    \XMM5, \TMP2
+	movdqa    \XMM5, \TMP3
+	movdqa    \XMM5, \TMP4
+# move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
+	pslld     $31, \TMP2                   # packed right shift << 31
+	pslld     $30, \TMP3                   # packed right shift << 30
+	pslld     $25, \TMP4                   # packed right shift << 25
+	pxor      \TMP3, \TMP2	               # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP5
+	psrldq    $4, \TMP5                    # right shift T5 1 DW
+	pslldq    $12, \TMP2                   # left shift T2 3 DWs
+	pxor      \TMP2, \XMM5
+
+        # second phase of reduction
+
+	movdqa    \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
+	movdqa    \XMM5,\TMP3
+	movdqa    \XMM5,\TMP4
+	psrld     $1, \TMP2                    # packed left shift >>1
+	psrld     $2, \TMP3                    # packed left shift >>2
+	psrld     $7, \TMP4                    # packed left shift >>7
+	pxor      \TMP3,\TMP2		       # xor the shifted versions
+	pxor      \TMP4,\TMP2
+	pxor      \TMP5, \TMP2
+	pxor      \TMP2, \XMM5
+	pxor      \TMP1, \XMM5                 # result is in TMP1
+
+	pxor	  \XMM5, \XMM1
+.endm
+
+/*
+* decrypt 4 blocks at a time
+* ghash the 4 previously decrypted ciphertext blocks
+* arg1, %arg2, %arg3 are used as pointers only, not modified
+* %r11 is the data offset value
+*/
+.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
+TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
+
+	movdqa	  \XMM1, \XMM5
+	movdqa	  \XMM2, \XMM6
+	movdqa	  \XMM3, \XMM7
+	movdqa	  \XMM4, \XMM8
+
+        movdqa    SHUF_MASK(%rip), %xmm15
+        # multiply TMP5 * HashKey using karatsuba
+
+	movdqa	  \XMM5, \TMP4
+	pshufd	  $78, \XMM5, \TMP6
+	pxor	  \XMM5, \TMP6
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa	  HashKey_4(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
+	movdqa    \XMM0, \XMM1
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM2
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM3
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM4
+	PSHUFB_XMM %xmm15, \XMM1	# perform a 16 byte swap
+	PCLMULQDQ 0x00, \TMP5, \XMM5           # XMM5 = a0*b0
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  (%arg1), \XMM1
+	pxor	  (%arg1), \XMM2
+	pxor	  (%arg1), \XMM3
+	pxor	  (%arg1), \XMM4
+	movdqa	  HashKey_4_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
+	movaps 0x10(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 1
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movaps 0x20(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 2
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movdqa	  \XMM6, \TMP1
+	pshufd	  $78, \XMM6, \TMP2
+	pxor	  \XMM6, \TMP2
+	movdqa	  HashKey_3(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
+	movaps 0x30(%arg1), \TMP3
+	AESENC    \TMP3, \XMM1              # Round 3
+	AESENC    \TMP3, \XMM2
+	AESENC    \TMP3, \XMM3
+	AESENC    \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM6           # XMM6 = a0*b0
+	movaps 0x40(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 4
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_3_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x50(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 5
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM6, \XMM5
+	pxor	  \TMP2, \TMP6
+	movdqa	  \XMM7, \TMP1
+	pshufd	  $78, \XMM7, \TMP2
+	pxor	  \XMM7, \TMP2
+	movdqa	  HashKey_2(%rsp ), \TMP5
+
+        # Multiply TMP5 * HashKey using karatsuba
+
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1*b1
+	movaps 0x60(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 6
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM7           # XMM7 = a0*b0
+	movaps 0x70(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 7
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_2_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x80(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 8
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM7, \XMM5
+	pxor	  \TMP2, \TMP6
+
+        # Multiply XMM8 * HashKey
+        # XMM8 and TMP5 hold the values for the two operands
+
+	movdqa	  \XMM8, \TMP1
+	pshufd	  $78, \XMM8, \TMP2
+	pxor	  \XMM8, \TMP2
+	movdqa	  HashKey(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
+	movaps 0x90(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1            # Round 9
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM8          # XMM8 = a0*b0
+	movaps 0xa0(%arg1), \TMP3
+	AESENCLAST \TMP3, \XMM1           # Round 10
+	AESENCLAST \TMP3, \XMM2
+	AESENCLAST \TMP3, \XMM3
+	AESENCLAST \TMP3, \XMM4
+	movdqa    HashKey_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
+	movdqu	  (%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM1, (%arg2,%r11,1)        # Write to plaintext buffer
+	movdqa    \TMP3, \XMM1
+	movdqu	  16(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM2                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM2, 16(%arg2,%r11,1)      # Write to plaintext buffer
+	movdqa    \TMP3, \XMM2
+	movdqu	  32(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM3                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM3, 32(%arg2,%r11,1)      # Write to plaintext buffer
+	movdqa    \TMP3, \XMM3
+	movdqu	  48(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM4                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM4, 48(%arg2,%r11,1)      # Write to plaintext buffer
+	movdqa    \TMP3, \XMM4
+	PSHUFB_XMM %xmm15, \XMM1        # perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  \TMP4, \TMP1
+	pxor	  \XMM8, \XMM5
+	pxor	  \TMP6, \TMP2
+	pxor	  \TMP1, \TMP2
+	pxor	  \XMM5, \TMP2
+	movdqa	  \TMP2, \TMP3
+	pslldq	  $8, \TMP3                    # left shift TMP3 2 DWs
+	psrldq	  $8, \TMP2                    # right shift TMP2 2 DWs
+	pxor	  \TMP3, \XMM5
+	pxor	  \TMP2, \TMP1	  # accumulate the results in TMP1:XMM5
+
+        # first phase of reduction
+
+	movdqa    \XMM5, \TMP2
+	movdqa    \XMM5, \TMP3
+	movdqa    \XMM5, \TMP4
+# move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
+	pslld     $31, \TMP2                   # packed right shift << 31
+	pslld     $30, \TMP3                   # packed right shift << 30
+	pslld     $25, \TMP4                   # packed right shift << 25
+	pxor      \TMP3, \TMP2	               # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP5
+	psrldq    $4, \TMP5                    # right shift T5 1 DW
+	pslldq    $12, \TMP2                   # left shift T2 3 DWs
+	pxor      \TMP2, \XMM5
+
+        # second phase of reduction
+
+	movdqa    \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
+	movdqa    \XMM5,\TMP3
+	movdqa    \XMM5,\TMP4
+	psrld     $1, \TMP2                    # packed left shift >>1
+	psrld     $2, \TMP3                    # packed left shift >>2
+	psrld     $7, \TMP4                    # packed left shift >>7
+	pxor      \TMP3,\TMP2		       # xor the shifted versions
+	pxor      \TMP4,\TMP2
+	pxor      \TMP5, \TMP2
+	pxor      \TMP2, \XMM5
+	pxor      \TMP1, \XMM5                 # result is in TMP1
+
+	pxor	  \XMM5, \XMM1
+.endm
+
+/* GHASH the last 4 ciphertext blocks. */
+.macro	GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
+TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
+
+        # Multiply TMP6 * HashKey (using Karatsuba)
+
+	movdqa	  \XMM1, \TMP6
+	pshufd	  $78, \XMM1, \TMP2
+	pxor	  \XMM1, \TMP2
+	movdqa	  HashKey_4(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP6       # TMP6 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM1       # XMM1 = a0*b0
+	movdqa	  HashKey_4_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	movdqa	  \XMM1, \XMMDst
+	movdqa	  \TMP2, \XMM1              # result in TMP6, XMMDst, XMM1
+
+        # Multiply TMP1 * HashKey (using Karatsuba)
+
+	movdqa	  \XMM2, \TMP1
+	pshufd	  $78, \XMM2, \TMP2
+	pxor	  \XMM2, \TMP2
+	movdqa	  HashKey_3(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM2       # XMM2 = a0*b0
+	movdqa	  HashKey_3_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	pxor	  \TMP1, \TMP6
+	pxor	  \XMM2, \XMMDst
+	pxor	  \TMP2, \XMM1
+# results accumulated in TMP6, XMMDst, XMM1
+
+        # Multiply TMP1 * HashKey (using Karatsuba)
+
+	movdqa	  \XMM3, \TMP1
+	pshufd	  $78, \XMM3, \TMP2
+	pxor	  \XMM3, \TMP2
+	movdqa	  HashKey_2(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM3       # XMM3 = a0*b0
+	movdqa	  HashKey_2_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	pxor	  \TMP1, \TMP6
+	pxor	  \XMM3, \XMMDst
+	pxor	  \TMP2, \XMM1   # results accumulated in TMP6, XMMDst, XMM1
+
+        # Multiply TMP1 * HashKey (using Karatsuba)
+	movdqa	  \XMM4, \TMP1
+	pshufd	  $78, \XMM4, \TMP2
+	pxor	  \XMM4, \TMP2
+	movdqa	  HashKey(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1	    # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM4       # XMM4 = a0*b0
+	movdqa	  HashKey_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	pxor	  \TMP1, \TMP6
+	pxor	  \XMM4, \XMMDst
+	pxor	  \XMM1, \TMP2
+	pxor	  \TMP6, \TMP2
+	pxor	  \XMMDst, \TMP2
+	# middle section of the temp results combined as in karatsuba algorithm
+	movdqa	  \TMP2, \TMP4
+	pslldq	  $8, \TMP4                 # left shift TMP4 2 DWs
+	psrldq	  $8, \TMP2                 # right shift TMP2 2 DWs
+	pxor	  \TMP4, \XMMDst
+	pxor	  \TMP2, \TMP6
+# TMP6:XMMDst holds the result of the accumulated carry-less multiplications
+	# first phase of the reduction
+	movdqa    \XMMDst, \TMP2
+	movdqa    \XMMDst, \TMP3
+	movdqa    \XMMDst, \TMP4
+# move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
+	pslld     $31, \TMP2                # packed right shifting << 31
+	pslld     $30, \TMP3                # packed right shifting << 30
+	pslld     $25, \TMP4                # packed right shifting << 25
+	pxor      \TMP3, \TMP2              # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP7
+	psrldq    $4, \TMP7                 # right shift TMP7 1 DW
+	pslldq    $12, \TMP2                # left shift TMP2 3 DWs
+	pxor      \TMP2, \XMMDst
+
+        # second phase of the reduction
+	movdqa    \XMMDst, \TMP2
+	# make 3 copies of XMMDst for doing 3 shift operations
+	movdqa    \XMMDst, \TMP3
+	movdqa    \XMMDst, \TMP4
+	psrld     $1, \TMP2                 # packed left shift >> 1
+	psrld     $2, \TMP3                 # packed left shift >> 2
+	psrld     $7, \TMP4                 # packed left shift >> 7
+	pxor      \TMP3, \TMP2              # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	pxor      \TMP7, \TMP2
+	pxor      \TMP2, \XMMDst
+	pxor      \TMP6, \XMMDst            # reduced result is in XMMDst
+.endm
+
+/* Encryption of a single block done*/
+.macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1
+
+	pxor	(%arg1), \XMM0
+        movaps 16(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 32(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 48(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 64(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 80(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 96(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 112(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 128(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 144(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 160(%arg1), \TMP1
+	AESENCLAST	\TMP1, \XMM0
+.endm
+
+
+/*****************************************************************************
+* void aesni_gcm_dec(void *aes_ctx,    // AES Key schedule. Starts on a 16 byte boundary.
+*                   u8 *out,           // Plaintext output. Encrypt in-place is allowed.
+*                   const u8 *in,      // Ciphertext input
+*                   u64 plaintext_len, // Length of data in bytes for decryption.
+*                   u8 *iv,            // Pre-counter block j0: 4 byte salt (from Security Association)
+*                                      // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
+*                                      // concatenated with 0x00000001. 16-byte aligned pointer.
+*                   u8 *hash_subkey,   // H, the Hash sub key input. Data starts on a 16-byte boundary.
+*                   const u8 *aad,     // Additional Authentication Data (AAD)
+*                   u64 aad_len,       // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes
+*                   u8  *auth_tag,     // Authenticated Tag output. The driver will compare this to the
+*                                      // given authentication tag and only return the plaintext if they match.
+*                   u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16
+*                                      // (most likely), 12 or 8.
+*
+* Assumptions:
+*
+* keys:
+*       keys are pre-expanded and aligned to 16 bytes. we are using the first
+*       set of 11 keys in the data structure void *aes_ctx
+*
+* iv:
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                             Salt  (From the SA)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     Initialization Vector                     |
+*       |         (This is the sequence number from IPSec header)       |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x1                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*
+*
+* AAD:
+*       AAD padded to 128 bits with 0
+*       for example, assume AAD is a u32 vector
+*
+*       if AAD is 8 bytes:
+*       AAD[3] = {A0, A1};
+*       padded AAD in xmm register = {A1 A0 0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A1)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     32-bit Sequence Number (A0)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                                       AAD Format with 32-bit Sequence Number
+*
+*       if AAD is 12 bytes:
+*       AAD[3] = {A0, A1, A2};
+*       padded AAD in xmm register = {A2 A1 A0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A2)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                 64-bit Extended Sequence Number {A1,A0}       |
+*       |                                                               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                        AAD Format with 64-bit Extended Sequence Number
+*
+* aadLen:
+*       from the definition of the spec, aadLen can only be 8 or 12 bytes.
+*       The code supports 16 too but for other sizes, the code will fail.
+*
+* TLen:
+*       from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+*       For other sizes, the code will fail.
+*
+* poly = x^128 + x^127 + x^126 + x^121 + 1
+*
+*****************************************************************************/
+
+ENTRY(aesni_gcm_dec)
+	push	%r12
+	push	%r13
+	push	%r14
+	mov	%rsp, %r14
+/*
+* states of %xmm registers %xmm6:%xmm15 not saved
+* all %xmm registers are clobbered
+*/
+	sub	$VARIABLE_OFFSET, %rsp
+	and	$~63, %rsp                        # align rsp to 64 bytes
+	mov	%arg6, %r12
+	movdqu	(%r12), %xmm13			  # %xmm13 = HashKey
+        movdqa  SHUF_MASK(%rip), %xmm2
+	PSHUFB_XMM %xmm2, %xmm13
+
+
+# Precompute HashKey<<1 (mod poly) from the hash key (required for GHASH)
+
+	movdqa	%xmm13, %xmm2
+	psllq	$1, %xmm13
+	psrlq	$63, %xmm2
+	movdqa	%xmm2, %xmm1
+	pslldq	$8, %xmm2
+	psrldq	$8, %xmm1
+	por	%xmm2, %xmm13
+
+        # Reduction
+
+	pshufd	$0x24, %xmm1, %xmm2
+	pcmpeqd TWOONE(%rip), %xmm2
+	pand	POLY(%rip), %xmm2
+	pxor	%xmm2, %xmm13     # %xmm13 holds the HashKey<<1 (mod poly)
+
+
+        # Decrypt first few blocks
+
+	movdqa %xmm13, HashKey(%rsp)           # store HashKey<<1 (mod poly)
+	mov %arg4, %r13    # save the number of bytes of plaintext/ciphertext
+	and $-16, %r13                      # %r13 = %r13 - (%r13 mod 16)
+	mov %r13, %r12
+	and $(3<<4), %r12
+	jz _initial_num_blocks_is_0_decrypt
+	cmp $(2<<4), %r12
+	jb _initial_num_blocks_is_1_decrypt
+	je _initial_num_blocks_is_2_decrypt
+_initial_num_blocks_is_3_decrypt:
+	INITIAL_BLOCKS_DEC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, dec
+	sub	$48, %r13
+	jmp	_initial_blocks_decrypted
+_initial_num_blocks_is_2_decrypt:
+	INITIAL_BLOCKS_DEC	2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, dec
+	sub	$32, %r13
+	jmp	_initial_blocks_decrypted
+_initial_num_blocks_is_1_decrypt:
+	INITIAL_BLOCKS_DEC	1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, dec
+	sub	$16, %r13
+	jmp	_initial_blocks_decrypted
+_initial_num_blocks_is_0_decrypt:
+	INITIAL_BLOCKS_DEC	0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, dec
+_initial_blocks_decrypted:
+	cmp	$0, %r13
+	je	_zero_cipher_left_decrypt
+	sub	$64, %r13
+	je	_four_cipher_left_decrypt
+_decrypt_by_4:
+	GHASH_4_ENCRYPT_4_PARALLEL_DEC	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
+%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, dec
+	add	$64, %r11
+	sub	$64, %r13
+	jne	_decrypt_by_4
+_four_cipher_left_decrypt:
+	GHASH_LAST_4	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
+%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
+_zero_cipher_left_decrypt:
+	mov	%arg4, %r13
+	and	$15, %r13				# %r13 = arg4 (mod 16)
+	je	_multiple_of_16_bytes_decrypt
+
+        # Handle the last <16 byte block seperately
+
+	paddd ONE(%rip), %xmm0         # increment CNT to get Yn
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm0
+
+	ENCRYPT_SINGLE_BLOCK  %xmm0, %xmm1    # E(K, Yn)
+	sub $16, %r11
+	add %r13, %r11
+	movdqu (%arg3,%r11,1), %xmm1   # recieve the last <16 byte block
+	lea SHIFT_MASK+16(%rip), %r12
+	sub %r13, %r12
+# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+# (%r13 is the number of bytes in plaintext mod 16)
+	movdqu (%r12), %xmm2           # get the appropriate shuffle mask
+	PSHUFB_XMM %xmm2, %xmm1            # right shift 16-%r13 butes
+
+	movdqa  %xmm1, %xmm2
+	pxor %xmm1, %xmm0            # Ciphertext XOR E(K, Yn)
+	movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+	# get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+	pand %xmm1, %xmm0            # mask out top 16-%r13 bytes of %xmm0
+	pand    %xmm1, %xmm2
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10 ,%xmm2
+
+	pxor %xmm2, %xmm8
+	GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	          # GHASH computation for the last <16 byte block
+	sub %r13, %r11
+	add $16, %r11
+
+        # output %r13 bytes
+	MOVQ_R64_XMM	%xmm0, %rax
+	cmp	$8, %r13
+	jle	_less_than_8_bytes_left_decrypt
+	mov	%rax, (%arg2 , %r11, 1)
+	add	$8, %r11
+	psrldq	$8, %xmm0
+	MOVQ_R64_XMM	%xmm0, %rax
+	sub	$8, %r13
+_less_than_8_bytes_left_decrypt:
+	mov	%al,  (%arg2, %r11, 1)
+	add	$1, %r11
+	shr	$8, %rax
+	sub	$1, %r13
+	jne	_less_than_8_bytes_left_decrypt
+_multiple_of_16_bytes_decrypt:
+	mov	arg8, %r12		  # %r13 = aadLen (number of bytes)
+	shl	$3, %r12		  # convert into number of bits
+	movd	%r12d, %xmm15		  # len(A) in %xmm15
+	shl	$3, %arg4		  # len(C) in bits (*128)
+	MOVQ_R64_XMM	%arg4, %xmm1
+	pslldq	$8, %xmm15		  # %xmm15 = len(A)||0x0000000000000000
+	pxor	%xmm1, %xmm15		  # %xmm15 = len(A)||len(C)
+	pxor	%xmm15, %xmm8
+	GHASH_MUL	%xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	         # final GHASH computation
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm8
+
+	mov	%arg5, %rax		  # %rax = *Y0
+	movdqu	(%rax), %xmm0		  # %xmm0 = Y0
+	ENCRYPT_SINGLE_BLOCK	%xmm0,  %xmm1	  # E(K, Y0)
+	pxor	%xmm8, %xmm0
+_return_T_decrypt:
+	mov	arg9, %r10                # %r10 = authTag
+	mov	arg10, %r11               # %r11 = auth_tag_len
+	cmp	$16, %r11
+	je	_T_16_decrypt
+	cmp	$12, %r11
+	je	_T_12_decrypt
+_T_8_decrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	jmp	_return_T_done_decrypt
+_T_12_decrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	psrldq	$8, %xmm0
+	movd	%xmm0, %eax
+	mov	%eax, 8(%r10)
+	jmp	_return_T_done_decrypt
+_T_16_decrypt:
+	movdqu	%xmm0, (%r10)
+_return_T_done_decrypt:
+	mov	%r14, %rsp
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	ret
+
+
+/*****************************************************************************
+* void aesni_gcm_enc(void *aes_ctx,      // AES Key schedule. Starts on a 16 byte boundary.
+*                    u8 *out,            // Ciphertext output. Encrypt in-place is allowed.
+*                    const u8 *in,       // Plaintext input
+*                    u64 plaintext_len,  // Length of data in bytes for encryption.
+*                    u8 *iv,             // Pre-counter block j0: 4 byte salt (from Security Association)
+*                                        // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
+*                                        // concatenated with 0x00000001. 16-byte aligned pointer.
+*                    u8 *hash_subkey,    // H, the Hash sub key input. Data starts on a 16-byte boundary.
+*                    const u8 *aad,      // Additional Authentication Data (AAD)
+*                    u64 aad_len,        // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes
+*                    u8 *auth_tag,       // Authenticated Tag output.
+*                    u64 auth_tag_len);  // Authenticated Tag Length in bytes. Valid values are 16 (most likely),
+*                                        // 12 or 8.
+*
+* Assumptions:
+*
+* keys:
+*       keys are pre-expanded and aligned to 16 bytes. we are using the
+*       first set of 11 keys in the data structure void *aes_ctx
+*
+*
+* iv:
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                             Salt  (From the SA)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     Initialization Vector                     |
+*       |         (This is the sequence number from IPSec header)       |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x1                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*
+*
+* AAD:
+*       AAD padded to 128 bits with 0
+*       for example, assume AAD is a u32 vector
+*
+*       if AAD is 8 bytes:
+*       AAD[3] = {A0, A1};
+*       padded AAD in xmm register = {A1 A0 0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A1)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     32-bit Sequence Number (A0)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                                 AAD Format with 32-bit Sequence Number
+*
+*       if AAD is 12 bytes:
+*       AAD[3] = {A0, A1, A2};
+*       padded AAD in xmm register = {A2 A1 A0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A2)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                 64-bit Extended Sequence Number {A1,A0}       |
+*       |                                                               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                         AAD Format with 64-bit Extended Sequence Number
+*
+* aadLen:
+*       from the definition of the spec, aadLen can only be 8 or 12 bytes.
+*       The code supports 16 too but for other sizes, the code will fail.
+*
+* TLen:
+*       from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+*       For other sizes, the code will fail.
+*
+* poly = x^128 + x^127 + x^126 + x^121 + 1
+***************************************************************************/
+ENTRY(aesni_gcm_enc)
+	push	%r12
+	push	%r13
+	push	%r14
+	mov	%rsp, %r14
+#
+# states of %xmm registers %xmm6:%xmm15 not saved
+# all %xmm registers are clobbered
+#
+	sub	$VARIABLE_OFFSET, %rsp
+	and	$~63, %rsp
+	mov	%arg6, %r12
+	movdqu	(%r12), %xmm13
+        movdqa  SHUF_MASK(%rip), %xmm2
+	PSHUFB_XMM %xmm2, %xmm13
+
+
+# precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
+
+	movdqa	%xmm13, %xmm2
+	psllq	$1, %xmm13
+	psrlq	$63, %xmm2
+	movdqa	%xmm2, %xmm1
+	pslldq	$8, %xmm2
+	psrldq	$8, %xmm1
+	por	%xmm2, %xmm13
+
+        # reduce HashKey<<1
+
+	pshufd	$0x24, %xmm1, %xmm2
+	pcmpeqd TWOONE(%rip), %xmm2
+	pand	POLY(%rip), %xmm2
+	pxor	%xmm2, %xmm13
+	movdqa	%xmm13, HashKey(%rsp)
+	mov	%arg4, %r13            # %xmm13 holds HashKey<<1 (mod poly)
+	and	$-16, %r13
+	mov	%r13, %r12
+
+        # Encrypt first few blocks
+
+	and	$(3<<4), %r12
+	jz	_initial_num_blocks_is_0_encrypt
+	cmp	$(2<<4), %r12
+	jb	_initial_num_blocks_is_1_encrypt
+	je	_initial_num_blocks_is_2_encrypt
+_initial_num_blocks_is_3_encrypt:
+	INITIAL_BLOCKS_ENC	3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, enc
+	sub	$48, %r13
+	jmp	_initial_blocks_encrypted
+_initial_num_blocks_is_2_encrypt:
+	INITIAL_BLOCKS_ENC	2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, enc
+	sub	$32, %r13
+	jmp	_initial_blocks_encrypted
+_initial_num_blocks_is_1_encrypt:
+	INITIAL_BLOCKS_ENC	1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, enc
+	sub	$16, %r13
+	jmp	_initial_blocks_encrypted
+_initial_num_blocks_is_0_encrypt:
+	INITIAL_BLOCKS_ENC	0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, enc
+_initial_blocks_encrypted:
+
+        # Main loop - Encrypt remaining blocks
+
+	cmp	$0, %r13
+	je	_zero_cipher_left_encrypt
+	sub	$64, %r13
+	je	_four_cipher_left_encrypt
+_encrypt_by_4_encrypt:
+	GHASH_4_ENCRYPT_4_PARALLEL_ENC	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
+%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc
+	add	$64, %r11
+	sub	$64, %r13
+	jne	_encrypt_by_4_encrypt
+_four_cipher_left_encrypt:
+	GHASH_LAST_4	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
+%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
+_zero_cipher_left_encrypt:
+	mov	%arg4, %r13
+	and	$15, %r13			# %r13 = arg4 (mod 16)
+	je	_multiple_of_16_bytes_encrypt
+
+         # Handle the last <16 Byte block seperately
+	paddd ONE(%rip), %xmm0                # INCR CNT to get Yn
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm0
+
+	ENCRYPT_SINGLE_BLOCK	%xmm0, %xmm1        # Encrypt(K, Yn)
+	sub $16, %r11
+	add %r13, %r11
+	movdqu (%arg3,%r11,1), %xmm1     # receive the last <16 byte blocks
+	lea SHIFT_MASK+16(%rip), %r12
+	sub %r13, %r12
+	# adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+	# (%r13 is the number of bytes in plaintext mod 16)
+	movdqu	(%r12), %xmm2           # get the appropriate shuffle mask
+	PSHUFB_XMM	%xmm2, %xmm1            # shift right 16-r13 byte
+	pxor	%xmm1, %xmm0            # Plaintext XOR Encrypt(K, Yn)
+	movdqu	ALL_F-SHIFT_MASK(%r12), %xmm1
+	# get the appropriate mask to mask out top 16-r13 bytes of xmm0
+	pand	%xmm1, %xmm0            # mask out top 16-r13 bytes of xmm0
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10,%xmm0
+
+	pxor	%xmm0, %xmm8
+	GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	# GHASH computation for the last <16 byte block
+	sub	%r13, %r11
+	add	$16, %r11
+	PSHUFB_XMM %xmm10, %xmm1
+
+	# shuffle xmm0 back to output as ciphertext
+
+        # Output %r13 bytes
+	MOVQ_R64_XMM %xmm0, %rax
+	cmp $8, %r13
+	jle _less_than_8_bytes_left_encrypt
+	mov %rax, (%arg2 , %r11, 1)
+	add $8, %r11
+	psrldq $8, %xmm0
+	MOVQ_R64_XMM %xmm0, %rax
+	sub $8, %r13
+_less_than_8_bytes_left_encrypt:
+	mov %al,  (%arg2, %r11, 1)
+	add $1, %r11
+	shr $8, %rax
+	sub $1, %r13
+	jne _less_than_8_bytes_left_encrypt
+_multiple_of_16_bytes_encrypt:
+	mov	arg8, %r12    # %r12 = addLen (number of bytes)
+	shl	$3, %r12
+	movd	%r12d, %xmm15       # len(A) in %xmm15
+	shl	$3, %arg4               # len(C) in bits (*128)
+	MOVQ_R64_XMM	%arg4, %xmm1
+	pslldq	$8, %xmm15          # %xmm15 = len(A)||0x0000000000000000
+	pxor	%xmm1, %xmm15       # %xmm15 = len(A)||len(C)
+	pxor	%xmm15, %xmm8
+	GHASH_MUL	%xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	# final GHASH computation
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm8         # perform a 16 byte swap
+
+	mov	%arg5, %rax		       # %rax  = *Y0
+	movdqu	(%rax), %xmm0		       # %xmm0 = Y0
+	ENCRYPT_SINGLE_BLOCK	%xmm0, %xmm15         # Encrypt(K, Y0)
+	pxor	%xmm8, %xmm0
+_return_T_encrypt:
+	mov	arg9, %r10                     # %r10 = authTag
+	mov	arg10, %r11                    # %r11 = auth_tag_len
+	cmp	$16, %r11
+	je	_T_16_encrypt
+	cmp	$12, %r11
+	je	_T_12_encrypt
+_T_8_encrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	jmp	_return_T_done_encrypt
+_T_12_encrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	psrldq	$8, %xmm0
+	movd	%xmm0, %eax
+	mov	%eax, 8(%r10)
+	jmp	_return_T_done_encrypt
+_T_16_encrypt:
+	movdqu	%xmm0, (%r10)
+_return_T_done_encrypt:
+	mov	%r14, %rsp
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	ret
+
+#endif
+
 
 _key_expansion_128:
 _key_expansion_256a:
@@ -55,10 +1709,11 @@
 	shufps $0b10001100, %xmm0, %xmm4
 	pxor %xmm4, %xmm0
 	pxor %xmm1, %xmm0
-	movaps %xmm0, (%rcx)
-	add $0x10, %rcx
+	movaps %xmm0, (TKEYP)
+	add $0x10, TKEYP
 	ret
 
+.align 4
 _key_expansion_192a:
 	pshufd $0b01010101, %xmm1, %xmm1
 	shufps $0b00010000, %xmm0, %xmm4
@@ -76,12 +1731,13 @@
 
 	movaps %xmm0, %xmm1
 	shufps $0b01000100, %xmm0, %xmm6
-	movaps %xmm6, (%rcx)
+	movaps %xmm6, (TKEYP)
 	shufps $0b01001110, %xmm2, %xmm1
-	movaps %xmm1, 16(%rcx)
-	add $0x20, %rcx
+	movaps %xmm1, 0x10(TKEYP)
+	add $0x20, TKEYP
 	ret
 
+.align 4
 _key_expansion_192b:
 	pshufd $0b01010101, %xmm1, %xmm1
 	shufps $0b00010000, %xmm0, %xmm4
@@ -96,10 +1752,11 @@
 	pxor %xmm3, %xmm2
 	pxor %xmm5, %xmm2
 
-	movaps %xmm0, (%rcx)
-	add $0x10, %rcx
+	movaps %xmm0, (TKEYP)
+	add $0x10, TKEYP
 	ret
 
+.align 4
 _key_expansion_256b:
 	pshufd $0b10101010, %xmm1, %xmm1
 	shufps $0b00010000, %xmm2, %xmm4
@@ -107,8 +1764,8 @@
 	shufps $0b10001100, %xmm2, %xmm4
 	pxor %xmm4, %xmm2
 	pxor %xmm1, %xmm2
-	movaps %xmm2, (%rcx)
-	add $0x10, %rcx
+	movaps %xmm2, (TKEYP)
+	add $0x10, TKEYP
 	ret
 
 /*
@@ -116,17 +1773,23 @@
  *                   unsigned int key_len)
  */
 ENTRY(aesni_set_key)
-	movups (%rsi), %xmm0		# user key (first 16 bytes)
-	movaps %xmm0, (%rdi)
-	lea 0x10(%rdi), %rcx		# key addr
-	movl %edx, 480(%rdi)
+#ifndef __x86_64__
+	pushl KEYP
+	movl 8(%esp), KEYP		# ctx
+	movl 12(%esp), UKEYP		# in_key
+	movl 16(%esp), %edx		# key_len
+#endif
+	movups (UKEYP), %xmm0		# user key (first 16 bytes)
+	movaps %xmm0, (KEYP)
+	lea 0x10(KEYP), TKEYP		# key addr
+	movl %edx, 480(KEYP)
 	pxor %xmm4, %xmm4		# xmm4 is assumed 0 in _key_expansion_x
 	cmp $24, %dl
 	jb .Lenc_key128
 	je .Lenc_key192
-	movups 0x10(%rsi), %xmm2	# other user key
-	movaps %xmm2, (%rcx)
-	add $0x10, %rcx
+	movups 0x10(UKEYP), %xmm2	# other user key
+	movaps %xmm2, (TKEYP)
+	add $0x10, TKEYP
 	AESKEYGENASSIST 0x1 %xmm2 %xmm1		# round 1
 	call _key_expansion_256a
 	AESKEYGENASSIST 0x1 %xmm0 %xmm1
@@ -155,7 +1818,7 @@
 	call _key_expansion_256a
 	jmp .Ldec_key
 .Lenc_key192:
-	movq 0x10(%rsi), %xmm2		# other user key
+	movq 0x10(UKEYP), %xmm2		# other user key
 	AESKEYGENASSIST 0x1 %xmm2 %xmm1		# round 1
 	call _key_expansion_192a
 	AESKEYGENASSIST 0x2 %xmm2 %xmm1		# round 2
@@ -195,33 +1858,47 @@
 	AESKEYGENASSIST 0x36 %xmm0 %xmm1	# round 10
 	call _key_expansion_128
 .Ldec_key:
-	sub $0x10, %rcx
-	movaps (%rdi), %xmm0
-	movaps (%rcx), %xmm1
-	movaps %xmm0, 240(%rcx)
-	movaps %xmm1, 240(%rdi)
-	add $0x10, %rdi
-	lea 240-16(%rcx), %rsi
+	sub $0x10, TKEYP
+	movaps (KEYP), %xmm0
+	movaps (TKEYP), %xmm1
+	movaps %xmm0, 240(TKEYP)
+	movaps %xmm1, 240(KEYP)
+	add $0x10, KEYP
+	lea 240-16(TKEYP), UKEYP
 .align 4
 .Ldec_key_loop:
-	movaps (%rdi), %xmm0
+	movaps (KEYP), %xmm0
 	AESIMC %xmm0 %xmm1
-	movaps %xmm1, (%rsi)
-	add $0x10, %rdi
-	sub $0x10, %rsi
-	cmp %rcx, %rdi
+	movaps %xmm1, (UKEYP)
+	add $0x10, KEYP
+	sub $0x10, UKEYP
+	cmp TKEYP, KEYP
 	jb .Ldec_key_loop
-	xor %rax, %rax
+	xor AREG, AREG
+#ifndef __x86_64__
+	popl KEYP
+#endif
 	ret
 
 /*
  * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
  */
 ENTRY(aesni_enc)
+#ifndef __x86_64__
+	pushl KEYP
+	pushl KLEN
+	movl 12(%esp), KEYP
+	movl 16(%esp), OUTP
+	movl 20(%esp), INP
+#endif
 	movl 480(KEYP), KLEN		# key length
 	movups (INP), STATE		# input
 	call _aesni_enc1
 	movups STATE, (OUTP)		# output
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+#endif
 	ret
 
 /*
@@ -236,6 +1913,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_enc1:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -298,6 +1976,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_enc4:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -391,11 +2070,22 @@
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
  */
 ENTRY(aesni_dec)
+#ifndef __x86_64__
+	pushl KEYP
+	pushl KLEN
+	movl 12(%esp), KEYP
+	movl 16(%esp), OUTP
+	movl 20(%esp), INP
+#endif
 	mov 480(KEYP), KLEN		# key length
 	add $240, KEYP
 	movups (INP), STATE		# input
 	call _aesni_dec1
 	movups STATE, (OUTP)		#output
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+#endif
 	ret
 
 /*
@@ -410,6 +2100,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_dec1:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -472,6 +2163,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_dec4:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -566,6 +2258,15 @@
  *		      size_t len)
  */
 ENTRY(aesni_ecb_enc)
+#ifndef __x86_64__
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 16(%esp), KEYP
+	movl 20(%esp), OUTP
+	movl 24(%esp), INP
+	movl 28(%esp), LEN
+#endif
 	test LEN, LEN		# check length
 	jz .Lecb_enc_ret
 	mov 480(KEYP), KLEN
@@ -602,6 +2303,11 @@
 	cmp $16, LEN
 	jge .Lecb_enc_loop1
 .Lecb_enc_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+#endif
 	ret
 
 /*
@@ -609,6 +2315,15 @@
  *		      size_t len);
  */
 ENTRY(aesni_ecb_dec)
+#ifndef __x86_64__
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 16(%esp), KEYP
+	movl 20(%esp), OUTP
+	movl 24(%esp), INP
+	movl 28(%esp), LEN
+#endif
 	test LEN, LEN
 	jz .Lecb_dec_ret
 	mov 480(KEYP), KLEN
@@ -646,6 +2361,11 @@
 	cmp $16, LEN
 	jge .Lecb_dec_loop1
 .Lecb_dec_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+#endif
 	ret
 
 /*
@@ -653,6 +2373,17 @@
  *		      size_t len, u8 *iv)
  */
 ENTRY(aesni_cbc_enc)
+#ifndef __x86_64__
+	pushl IVP
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 20(%esp), KEYP
+	movl 24(%esp), OUTP
+	movl 28(%esp), INP
+	movl 32(%esp), LEN
+	movl 36(%esp), IVP
+#endif
 	cmp $16, LEN
 	jb .Lcbc_enc_ret
 	mov 480(KEYP), KLEN
@@ -670,6 +2401,12 @@
 	jge .Lcbc_enc_loop
 	movups STATE, (IVP)
 .Lcbc_enc_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+	popl IVP
+#endif
 	ret
 
 /*
@@ -677,6 +2414,17 @@
  *		      size_t len, u8 *iv)
  */
 ENTRY(aesni_cbc_dec)
+#ifndef __x86_64__
+	pushl IVP
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 20(%esp), KEYP
+	movl 24(%esp), OUTP
+	movl 28(%esp), INP
+	movl 32(%esp), LEN
+	movl 36(%esp), IVP
+#endif
 	cmp $16, LEN
 	jb .Lcbc_dec_just_ret
 	mov 480(KEYP), KLEN
@@ -690,16 +2438,30 @@
 	movaps IN1, STATE1
 	movups 0x10(INP), IN2
 	movaps IN2, STATE2
+#ifdef __x86_64__
 	movups 0x20(INP), IN3
 	movaps IN3, STATE3
 	movups 0x30(INP), IN4
 	movaps IN4, STATE4
+#else
+	movups 0x20(INP), IN1
+	movaps IN1, STATE3
+	movups 0x30(INP), IN2
+	movaps IN2, STATE4
+#endif
 	call _aesni_dec4
 	pxor IV, STATE1
+#ifdef __x86_64__
 	pxor IN1, STATE2
 	pxor IN2, STATE3
 	pxor IN3, STATE4
 	movaps IN4, IV
+#else
+	pxor (INP), STATE2
+	pxor 0x10(INP), STATE3
+	pxor IN1, STATE4
+	movaps IN2, IV
+#endif
 	movups STATE1, (OUTP)
 	movups STATE2, 0x10(OUTP)
 	movups STATE3, 0x20(OUTP)
@@ -727,8 +2489,15 @@
 .Lcbc_dec_ret:
 	movups IV, (IVP)
 .Lcbc_dec_just_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+	popl IVP
+#endif
 	ret
 
+#ifdef __x86_64__
 .align 16
 .Lbswap_mask:
 	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
@@ -744,6 +2513,7 @@
  *	INC:	== 1, in little endian
  *	BSWAP_MASK == endian swapping mask
  */
+.align 4
 _aesni_inc_init:
 	movaps .Lbswap_mask, BSWAP_MASK
 	movaps IV, CTR
@@ -768,6 +2538,7 @@
  *	CTR:	== output IV, in little endian
  *	TCTR_LOW: == lower qword of CTR
  */
+.align 4
 _aesni_inc:
 	paddq INC, CTR
 	add $1, TCTR_LOW
@@ -839,3 +2610,4 @@
 	movups IV, (IVP)
 .Lctr_enc_just_ret:
 	ret
+#endif
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 2cb3dcc..e1e60c7 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -5,6 +5,14 @@
  * Copyright (C) 2008, Intel Corp.
  *    Author: Huang Ying <ying.huang@intel.com>
  *
+ * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
+ * interface for 64-bit kernels.
+ *    Authors: Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -21,6 +29,10 @@
 #include <crypto/ctr.h>
 #include <asm/i387.h>
 #include <asm/aes.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
 
 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
 #define HAS_CTR
@@ -42,8 +54,31 @@
 	struct cryptd_ablkcipher *cryptd_tfm;
 };
 
-#define AESNI_ALIGN	16
+/* This data is stored at the end of the crypto_tfm struct.
+ * It's a type of per "session" data storage location.
+ * This needs to be 16 byte aligned.
+ */
+struct aesni_rfc4106_gcm_ctx {
+	u8 hash_subkey[16];
+	struct crypto_aes_ctx aes_key_expanded;
+	u8 nonce[4];
+	struct cryptd_aead *cryptd_tfm;
+};
+
+struct aesni_gcm_set_hash_subkey_result {
+	int err;
+	struct completion completion;
+};
+
+struct aesni_hash_subkey_req_data {
+	u8 iv[16];
+	struct aesni_gcm_set_hash_subkey_result result;
+	struct scatterlist sg;
+};
+
+#define AESNI_ALIGN	(16)
 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE-1))
+#define RFC4106_HASH_SUBKEY_SIZE 16
 
 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
 			     unsigned int key_len);
@@ -59,9 +94,62 @@
 			      const u8 *in, unsigned int len, u8 *iv);
 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
 			      const u8 *in, unsigned int len, u8 *iv);
+#ifdef CONFIG_X86_64
 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 			      const u8 *in, unsigned int len, u8 *iv);
 
+/* asmlinkage void aesni_gcm_enc()
+ * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
+ * u8 *out, Ciphertext output. Encrypt in-place is allowed.
+ * const u8 *in, Plaintext input
+ * unsigned long plaintext_len, Length of data in bytes for encryption.
+ * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
+ *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
+ *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ * const u8 *aad, Additional Authentication Data (AAD)
+ * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
+ *          is going to be 8 or 12 bytes
+ * u8 *auth_tag, Authenticated Tag output.
+ * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
+ *          Valid values are 16 (most likely), 12 or 8.
+ */
+asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
+			const u8 *in, unsigned long plaintext_len, u8 *iv,
+			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+			u8 *auth_tag, unsigned long auth_tag_len);
+
+/* asmlinkage void aesni_gcm_dec()
+ * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
+ * u8 *out, Plaintext output. Decrypt in-place is allowed.
+ * const u8 *in, Ciphertext input
+ * unsigned long ciphertext_len, Length of data in bytes for decryption.
+ * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
+ *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
+ *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ * const u8 *aad, Additional Authentication Data (AAD)
+ * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
+ * to be 8 or 12 bytes
+ * u8 *auth_tag, Authenticated Tag output.
+ * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
+ * Valid values are 16 (most likely), 12 or 8.
+ */
+asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
+			const u8 *in, unsigned long ciphertext_len, u8 *iv,
+			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+			u8 *auth_tag, unsigned long auth_tag_len);
+
+static inline struct
+aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
+{
+	return
+		(struct aesni_rfc4106_gcm_ctx *)
+		PTR_ALIGN((u8 *)
+		crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
+}
+#endif
+
 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 {
 	unsigned long addr = (unsigned long)raw_ctx;
@@ -324,6 +412,7 @@
 	},
 };
 
+#ifdef CONFIG_X86_64
 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
 			    struct blkcipher_walk *walk)
 {
@@ -389,6 +478,7 @@
 		},
 	},
 };
+#endif
 
 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 			unsigned int key_len)
@@ -536,6 +626,7 @@
 	},
 };
 
+#ifdef CONFIG_X86_64
 static int ablk_ctr_init(struct crypto_tfm *tfm)
 {
 	struct cryptd_ablkcipher *cryptd_tfm;
@@ -612,6 +703,7 @@
 	},
 };
 #endif
+#endif
 
 #ifdef HAS_LRW
 static int ablk_lrw_init(struct crypto_tfm *tfm)
@@ -730,6 +822,424 @@
 };
 #endif
 
+#ifdef CONFIG_X86_64
+static int rfc4106_init(struct crypto_tfm *tfm)
+{
+	struct cryptd_aead *cryptd_tfm;
+	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
+		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
+	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+	ctx->cryptd_tfm = cryptd_tfm;
+	tfm->crt_aead.reqsize = sizeof(struct aead_request)
+		+ crypto_aead_reqsize(&cryptd_tfm->base);
+	return 0;
+}
+
+static void rfc4106_exit(struct crypto_tfm *tfm)
+{
+	struct aesni_rfc4106_gcm_ctx *ctx =
+		(struct aesni_rfc4106_gcm_ctx *)
+		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
+	if (!IS_ERR(ctx->cryptd_tfm))
+		cryptd_free_aead(ctx->cryptd_tfm);
+	return;
+}
+
+static void
+rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
+{
+	struct aesni_gcm_set_hash_subkey_result *result = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+	result->err = err;
+	complete(&result->completion);
+}
+
+static int
+rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
+{
+	struct crypto_ablkcipher *ctr_tfm;
+	struct ablkcipher_request *req;
+	int ret = -EINVAL;
+	struct aesni_hash_subkey_req_data *req_data;
+
+	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
+	if (IS_ERR(ctr_tfm))
+		return PTR_ERR(ctr_tfm);
+
+	crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
+
+	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
+	if (ret) {
+		crypto_free_ablkcipher(ctr_tfm);
+		return ret;
+	}
+
+	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
+	if (!req) {
+		crypto_free_ablkcipher(ctr_tfm);
+		return -EINVAL;
+	}
+
+	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
+	if (!req_data) {
+		crypto_free_ablkcipher(ctr_tfm);
+		return -ENOMEM;
+	}
+	memset(req_data->iv, 0, sizeof(req_data->iv));
+
+	/* Clear the data in the hash sub key container to zero.*/
+	/* We want to cipher all zeros to create the hash sub key. */
+	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
+
+	init_completion(&req_data->result.completion);
+	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
+	ablkcipher_request_set_tfm(req, ctr_tfm);
+	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+					CRYPTO_TFM_REQ_MAY_BACKLOG,
+					rfc4106_set_hash_subkey_done,
+					&req_data->result);
+
+	ablkcipher_request_set_crypt(req, &req_data->sg,
+		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
+
+	ret = crypto_ablkcipher_encrypt(req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret = wait_for_completion_interruptible
+			(&req_data->result.completion);
+		if (!ret)
+			ret = req_data->result.err;
+	}
+	ablkcipher_request_free(req);
+	kfree(req_data);
+	crypto_free_ablkcipher(ctr_tfm);
+	return ret;
+}
+
+static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
+						   unsigned int key_len)
+{
+	int ret = 0;
+	struct crypto_tfm *tfm = crypto_aead_tfm(parent);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
+	u8 *new_key_mem = NULL;
+
+	if (key_len < 4) {
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	/*Account for 4 byte nonce at the end.*/
+	key_len -= 4;
+	if (key_len != AES_KEYSIZE_128) {
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
+	/*This must be on a 16 byte boundary!*/
+	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
+		return -EINVAL;
+
+	if ((unsigned long)key % AESNI_ALIGN) {
+		/*key is not aligned: use an auxuliar aligned pointer*/
+		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
+		if (!new_key_mem)
+			return -ENOMEM;
+
+		new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
+		memcpy(new_key_mem, key, key_len);
+		key = new_key_mem;
+	}
+
+	if (!irq_fpu_usable())
+		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
+		key, key_len);
+	else {
+		kernel_fpu_begin();
+		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
+		kernel_fpu_end();
+	}
+	/*This must be on a 16 byte boundary!*/
+	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
+exit:
+	kfree(new_key_mem);
+	return ret;
+}
+
+/* This is the Integrity Check Value (aka the authentication tag length and can
+ * be 8, 12 or 16 bytes long. */
+static int rfc4106_set_authsize(struct crypto_aead *parent,
+				unsigned int authsize)
+{
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
+	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	crypto_aead_crt(parent)->authsize = authsize;
+	crypto_aead_crt(cryptd_child)->authsize = authsize;
+	return 0;
+}
+
+static int rfc4106_encrypt(struct aead_request *req)
+{
+	int ret;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	if (!irq_fpu_usable()) {
+		struct aead_request *cryptd_req =
+			(struct aead_request *) aead_request_ctx(req);
+		memcpy(cryptd_req, req, sizeof(*req));
+		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+		return crypto_aead_encrypt(cryptd_req);
+	} else {
+		kernel_fpu_begin();
+		ret = cryptd_child->base.crt_aead.encrypt(req);
+		kernel_fpu_end();
+		return ret;
+	}
+}
+
+static int rfc4106_decrypt(struct aead_request *req)
+{
+	int ret;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	if (!irq_fpu_usable()) {
+		struct aead_request *cryptd_req =
+			(struct aead_request *) aead_request_ctx(req);
+		memcpy(cryptd_req, req, sizeof(*req));
+		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+		return crypto_aead_decrypt(cryptd_req);
+	} else {
+		kernel_fpu_begin();
+		ret = cryptd_child->base.crt_aead.decrypt(req);
+		kernel_fpu_end();
+		return ret;
+	}
+}
+
+static struct crypto_alg rfc4106_alg = {
+	.cra_name = "rfc4106(gcm(aes))",
+	.cra_driver_name = "rfc4106-gcm-aesni",
+	.cra_priority = 400,
+	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = 1,
+	.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
+	.cra_alignmask = 0,
+	.cra_type = &crypto_nivaead_type,
+	.cra_module = THIS_MODULE,
+	.cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
+	.cra_init = rfc4106_init,
+	.cra_exit = rfc4106_exit,
+	.cra_u = {
+		.aead = {
+			.setkey = rfc4106_set_key,
+			.setauthsize = rfc4106_set_authsize,
+			.encrypt = rfc4106_encrypt,
+			.decrypt = rfc4106_decrypt,
+			.geniv = "seqiv",
+			.ivsize = 8,
+			.maxauthsize = 16,
+		},
+	},
+};
+
+static int __driver_rfc4106_encrypt(struct aead_request *req)
+{
+	u8 one_entry_in_sg = 0;
+	u8 *src, *dst, *assoc;
+	__be32 counter = cpu_to_be32(1);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	void *aes_ctx = &(ctx->aes_key_expanded);
+	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+	u8 iv_tab[16+AESNI_ALIGN];
+	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
+	struct scatter_walk src_sg_walk;
+	struct scatter_walk assoc_sg_walk;
+	struct scatter_walk dst_sg_walk;
+	unsigned int i;
+
+	/* Assuming we are supporting rfc4106 64-bit extended */
+	/* sequence numbers We need to have the AAD length equal */
+	/* to 8 or 12 bytes */
+	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
+		return -EINVAL;
+	/* IV below built */
+	for (i = 0; i < 4; i++)
+		*(iv+i) = ctx->nonce[i];
+	for (i = 0; i < 8; i++)
+		*(iv+4+i) = req->iv[i];
+	*((__be32 *)(iv+12)) = counter;
+
+	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+		one_entry_in_sg = 1;
+		scatterwalk_start(&src_sg_walk, req->src);
+		scatterwalk_start(&assoc_sg_walk, req->assoc);
+		src = scatterwalk_map(&src_sg_walk, 0);
+		assoc = scatterwalk_map(&assoc_sg_walk, 0);
+		dst = src;
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_start(&dst_sg_walk, req->dst);
+			dst = scatterwalk_map(&dst_sg_walk, 0);
+		}
+
+	} else {
+		/* Allocate memory for src, dst, assoc */
+		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
+			GFP_ATOMIC);
+		if (unlikely(!src))
+			return -ENOMEM;
+		assoc = (src + req->cryptlen + auth_tag_len);
+		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
+		scatterwalk_map_and_copy(assoc, req->assoc, 0,
+					req->assoclen, 0);
+		dst = src;
+	}
+
+	aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
+		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
+		+ ((unsigned long)req->cryptlen), auth_tag_len);
+
+	/* The authTag (aka the Integrity Check Value) needs to be written
+	 * back to the packet. */
+	if (one_entry_in_sg) {
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_unmap(dst, 0);
+			scatterwalk_done(&dst_sg_walk, 0, 0);
+		}
+		scatterwalk_unmap(src, 0);
+		scatterwalk_unmap(assoc, 0);
+		scatterwalk_done(&src_sg_walk, 0, 0);
+		scatterwalk_done(&assoc_sg_walk, 0, 0);
+	} else {
+		scatterwalk_map_and_copy(dst, req->dst, 0,
+			req->cryptlen + auth_tag_len, 1);
+		kfree(src);
+	}
+	return 0;
+}
+
+static int __driver_rfc4106_decrypt(struct aead_request *req)
+{
+	u8 one_entry_in_sg = 0;
+	u8 *src, *dst, *assoc;
+	unsigned long tempCipherLen = 0;
+	__be32 counter = cpu_to_be32(1);
+	int retval = 0;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	void *aes_ctx = &(ctx->aes_key_expanded);
+	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+	u8 iv_and_authTag[32+AESNI_ALIGN];
+	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
+	u8 *authTag = iv + 16;
+	struct scatter_walk src_sg_walk;
+	struct scatter_walk assoc_sg_walk;
+	struct scatter_walk dst_sg_walk;
+	unsigned int i;
+
+	if (unlikely((req->cryptlen < auth_tag_len) ||
+		(req->assoclen != 8 && req->assoclen != 12)))
+		return -EINVAL;
+	/* Assuming we are supporting rfc4106 64-bit extended */
+	/* sequence numbers We need to have the AAD length */
+	/* equal to 8 or 12 bytes */
+
+	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
+	/* IV below built */
+	for (i = 0; i < 4; i++)
+		*(iv+i) = ctx->nonce[i];
+	for (i = 0; i < 8; i++)
+		*(iv+4+i) = req->iv[i];
+	*((__be32 *)(iv+12)) = counter;
+
+	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+		one_entry_in_sg = 1;
+		scatterwalk_start(&src_sg_walk, req->src);
+		scatterwalk_start(&assoc_sg_walk, req->assoc);
+		src = scatterwalk_map(&src_sg_walk, 0);
+		assoc = scatterwalk_map(&assoc_sg_walk, 0);
+		dst = src;
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_start(&dst_sg_walk, req->dst);
+			dst = scatterwalk_map(&dst_sg_walk, 0);
+		}
+
+	} else {
+		/* Allocate memory for src, dst, assoc */
+		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
+		if (!src)
+			return -ENOMEM;
+		assoc = (src + req->cryptlen + auth_tag_len);
+		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
+		scatterwalk_map_and_copy(assoc, req->assoc, 0,
+			req->assoclen, 0);
+		dst = src;
+	}
+
+	aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
+		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
+		authTag, auth_tag_len);
+
+	/* Compare generated tag with passed in tag. */
+	retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
+		-EBADMSG : 0;
+
+	if (one_entry_in_sg) {
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_unmap(dst, 0);
+			scatterwalk_done(&dst_sg_walk, 0, 0);
+		}
+		scatterwalk_unmap(src, 0);
+		scatterwalk_unmap(assoc, 0);
+		scatterwalk_done(&src_sg_walk, 0, 0);
+		scatterwalk_done(&assoc_sg_walk, 0, 0);
+	} else {
+		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
+		kfree(src);
+	}
+	return retval;
+}
+
+static struct crypto_alg __rfc4106_alg = {
+	.cra_name		= "__gcm-aes-aesni",
+	.cra_driver_name	= "__driver-gcm-aes-aesni",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
+	.cra_blocksize		= 1,
+	.cra_ctxsize	= sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_aead_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(__rfc4106_alg.cra_list),
+	.cra_u = {
+		.aead = {
+			.encrypt	= __driver_rfc4106_encrypt,
+			.decrypt	= __driver_rfc4106_decrypt,
+		},
+	},
+};
+#endif
+
 static int __init aesni_init(void)
 {
 	int err;
@@ -738,6 +1248,7 @@
 		printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
 		return -ENODEV;
 	}
+
 	if ((err = crypto_register_alg(&aesni_alg)))
 		goto aes_err;
 	if ((err = crypto_register_alg(&__aesni_alg)))
@@ -746,18 +1257,24 @@
 		goto blk_ecb_err;
 	if ((err = crypto_register_alg(&blk_cbc_alg)))
 		goto blk_cbc_err;
-	if ((err = crypto_register_alg(&blk_ctr_alg)))
-		goto blk_ctr_err;
 	if ((err = crypto_register_alg(&ablk_ecb_alg)))
 		goto ablk_ecb_err;
 	if ((err = crypto_register_alg(&ablk_cbc_alg)))
 		goto ablk_cbc_err;
+#ifdef CONFIG_X86_64
+	if ((err = crypto_register_alg(&blk_ctr_alg)))
+		goto blk_ctr_err;
 	if ((err = crypto_register_alg(&ablk_ctr_alg)))
 		goto ablk_ctr_err;
+	if ((err = crypto_register_alg(&__rfc4106_alg)))
+		goto __aead_gcm_err;
+	if ((err = crypto_register_alg(&rfc4106_alg)))
+		goto aead_gcm_err;
 #ifdef HAS_CTR
 	if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
 		goto ablk_rfc3686_ctr_err;
 #endif
+#endif
 #ifdef HAS_LRW
 	if ((err = crypto_register_alg(&ablk_lrw_alg)))
 		goto ablk_lrw_err;
@@ -770,7 +1287,6 @@
 	if ((err = crypto_register_alg(&ablk_xts_alg)))
 		goto ablk_xts_err;
 #endif
-
 	return err;
 
 #ifdef HAS_XTS
@@ -784,18 +1300,24 @@
 	crypto_unregister_alg(&ablk_lrw_alg);
 ablk_lrw_err:
 #endif
+#ifdef CONFIG_X86_64
 #ifdef HAS_CTR
 	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
 ablk_rfc3686_ctr_err:
 #endif
+	crypto_unregister_alg(&rfc4106_alg);
+aead_gcm_err:
+	crypto_unregister_alg(&__rfc4106_alg);
+__aead_gcm_err:
 	crypto_unregister_alg(&ablk_ctr_alg);
 ablk_ctr_err:
+	crypto_unregister_alg(&blk_ctr_alg);
+blk_ctr_err:
+#endif
 	crypto_unregister_alg(&ablk_cbc_alg);
 ablk_cbc_err:
 	crypto_unregister_alg(&ablk_ecb_alg);
 ablk_ecb_err:
-	crypto_unregister_alg(&blk_ctr_alg);
-blk_ctr_err:
 	crypto_unregister_alg(&blk_cbc_alg);
 blk_cbc_err:
 	crypto_unregister_alg(&blk_ecb_alg);
@@ -818,13 +1340,17 @@
 #ifdef HAS_LRW
 	crypto_unregister_alg(&ablk_lrw_alg);
 #endif
+#ifdef CONFIG_X86_64
 #ifdef HAS_CTR
 	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
 #endif
+	crypto_unregister_alg(&rfc4106_alg);
+	crypto_unregister_alg(&__rfc4106_alg);
 	crypto_unregister_alg(&ablk_ctr_alg);
+	crypto_unregister_alg(&blk_ctr_alg);
+#endif
 	crypto_unregister_alg(&ablk_cbc_alg);
 	crypto_unregister_alg(&ablk_ecb_alg);
-	crypto_unregister_alg(&blk_ctr_alg);
 	crypto_unregister_alg(&blk_cbc_alg);
 	crypto_unregister_alg(&blk_ecb_alg);
 	crypto_unregister_alg(&__aesni_alg);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 55d106b..4ea15ca 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -88,6 +88,7 @@
 extern int acpi_pci_disabled;
 extern int acpi_skip_timer_override;
 extern int acpi_use_timer_override;
+extern int acpi_fix_pin2_polarity;
 
 extern u8 acpi_sci_flags;
 extern int acpi_sci_override_gsi;
@@ -185,17 +186,16 @@
 
 #ifdef CONFIG_ACPI_NUMA
 extern int acpi_numa;
-extern int acpi_get_nodes(struct bootnode *physnodes);
+extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
+				unsigned long end);
 extern int acpi_scan_nodes(unsigned long start, unsigned long end);
 #define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+#ifdef CONFIG_NUMA_EMU
 extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
 				   int num_nodes);
-#else
-static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
-				   int num_nodes)
-{
-}
 #endif
+#endif /* CONFIG_ACPI_NUMA */
 
 #define acpi_unlazy_tlb(x)	leave_mm(x)
 
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 6aee50d..64dc82e 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -3,16 +3,27 @@
 
 #include <linux/pci.h>
 
+struct amd_nb_bus_dev_range {
+	u8 bus;
+	u8 dev_base;
+	u8 dev_limit;
+};
+
 extern struct pci_device_id amd_nb_misc_ids[];
+extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
 struct bootnode;
 
 extern int early_is_amd_nb(u32 value);
 extern int amd_cache_northbridges(void);
 extern void amd_flush_garts(void);
-extern int amd_get_nodes(struct bootnode *nodes);
 extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
 extern int amd_scan_nodes(void);
 
+#ifdef CONFIG_NUMA_EMU
+extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
+extern void amd_get_nodes(struct bootnode *nodes);
+#endif
+
 struct amd_northbridge {
 	struct pci_dev *misc;
 };
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 5e3969c..3c89694 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -233,6 +233,7 @@
 extern void init_bsp_APIC(void);
 extern void setup_local_APIC(void);
 extern void end_local_APIC_setup(void);
+extern void bsp_end_local_APIC_setup(void);
 extern void init_apic_mappings(void);
 void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 3b62ab5..5e1a2ee 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -32,11 +32,7 @@
 #define BOOT_HEAP_SIZE             0x400000
 #else /* !CONFIG_KERNEL_BZIP2 */
 
-#ifdef CONFIG_X86_64
-#define BOOT_HEAP_SIZE	0x7000
-#else
-#define BOOT_HEAP_SIZE	0x4000
-#endif
+#define BOOT_HEAP_SIZE	0x8000
 
 #endif /* !CONFIG_KERNEL_BZIP2 */
 
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63e35ec..62f0844 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -1,48 +1,8 @@
 #ifndef _ASM_X86_CACHEFLUSH_H
 #define _ASM_X86_CACHEFLUSH_H
 
-/* Keep includes the same across arches.  */
-#include <linux/mm.h>
-
 /* Caches aren't brain-dead on the intel. */
-static inline void flush_cache_all(void) { }
-static inline void flush_cache_mm(struct mm_struct *mm) { }
-static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
-static inline void flush_cache_range(struct vm_area_struct *vma,
-				     unsigned long start, unsigned long end) { }
-static inline void flush_cache_page(struct vm_area_struct *vma,
-				    unsigned long vmaddr, unsigned long pfn) { }
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-static inline void flush_dcache_page(struct page *page) { }
-static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
-static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
-static inline void flush_icache_range(unsigned long start,
-				      unsigned long end) { }
-static inline void flush_icache_page(struct vm_area_struct *vma,
-				     struct page *page) { }
-static inline void flush_icache_user_range(struct vm_area_struct *vma,
-					   struct page *page,
-					   unsigned long addr,
-					   unsigned long len) { }
-static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
-static inline void flush_cache_vunmap(unsigned long start,
-				      unsigned long end) { }
-
-static inline void copy_to_user_page(struct vm_area_struct *vma,
-				     struct page *page, unsigned long vaddr,
-				     void *dst, const void *src,
-				     unsigned long len)
-{
-	memcpy(dst, src, len);
-}
-
-static inline void copy_from_user_page(struct vm_area_struct *vma,
-				       struct page *page, unsigned long vaddr,
-				       void *dst, const void *src,
-				       unsigned long len)
-{
-	memcpy(dst, src, len);
-}
+#include <asm-generic/cacheflush.h>
 
 #ifdef CONFIG_X86_PAT
 /*
diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h
new file mode 100644
index 0000000..e656ad8
--- /dev/null
+++ b/arch/x86/include/asm/ce4100.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_CE4100_H_
+#define _ASM_CE4100_H_
+
+int ce4100_pci_init(void);
+
+#endif
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 4fab24d..4564c8e 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,5 +32,6 @@
 
 DECLARE_PER_CPU(int, cpu_state);
 
+int mwait_usable(const struct cpuinfo_x86 *);
 
 #endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0141b23..4729b2b 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -116,11 +116,11 @@
 #endif
 	FIX_TEXT_POKE1,	/* reserve 2 pages for text_poke() */
 	FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
-	__end_of_permanent_fixed_addresses,
-
 #ifdef	CONFIG_X86_MRST
 	FIX_LNW_VRTC,
 #endif
+	__end_of_permanent_fixed_addresses,
+
 	/*
 	 * 256 temporary boot-time mappings, used by early_ioremap(),
 	 * before ioremap() is functional.
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index 49dbfdf..91d915a 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -38,12 +38,9 @@
 	return __gpio_cansleep(gpio);
 }
 
-/*
- * Not implemented, yet.
- */
 static inline int gpio_to_irq(unsigned int gpio)
 {
-	return -ENOSYS;
+	return __gpio_to_irq(gpio);
 }
 
 static inline int irq_to_gpio(unsigned int irq)
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index ba870bb..c704b38 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -10,6 +10,9 @@
 #include <asm/apicdef.h>
 #include <asm/irq_vectors.h>
 
+/* Even though we don't support this, supply it to appease OF */
+static inline void irq_dispose_mapping(unsigned int virq) { }
+
 static inline int irq_canonicalize(int irq)
 {
 	return ((irq == 2) ? 9 : irq);
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index f52d42e..574dbc2 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -14,7 +14,7 @@
 	do {							\
 		asm goto("1:"					\
 			JUMP_LABEL_INITIAL_NOP			\
-			".pushsection __jump_table,  \"a\" \n\t"\
+			".pushsection __jump_table,  \"aw\" \n\t"\
 			_ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
 			".popsection \n\t"			\
 			: :  "i" (key) :  : label);		\
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index f23eb25..ca242d3 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -18,7 +18,6 @@
 	DIE_TRAP,
 	DIE_GPF,
 	DIE_CALL,
-	DIE_NMI_IPI,
 	DIE_PAGE_FAULT,
 	DIE_NMIUNKNOWN,
 };
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index b36c6b3..8e37deb 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -15,6 +15,14 @@
 
 struct x86_emulate_ctxt;
 
+struct x86_exception {
+	u8 vector;
+	bool error_code_valid;
+	u16 error_code;
+	bool nested_page_fault;
+	u64 address; /* cr2 or nested page fault gpa */
+};
+
 /*
  * x86_emulate_ops:
  *
@@ -64,7 +72,8 @@
 	 *  @bytes: [IN ] Number of bytes to read from memory.
 	 */
 	int (*read_std)(unsigned long addr, void *val,
-			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+			unsigned int bytes, struct kvm_vcpu *vcpu,
+			struct x86_exception *fault);
 
 	/*
 	 * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -74,7 +83,8 @@
 	 *  @bytes: [IN ] Number of bytes to write to memory.
 	 */
 	int (*write_std)(unsigned long addr, void *val,
-			 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+			 unsigned int bytes, struct kvm_vcpu *vcpu,
+			 struct x86_exception *fault);
 	/*
 	 * fetch: Read bytes of standard (non-emulated/special) memory.
 	 *        Used for instruction fetch.
@@ -83,7 +93,8 @@
 	 *  @bytes: [IN ] Number of bytes to read from memory.
 	 */
 	int (*fetch)(unsigned long addr, void *val,
-			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+		     unsigned int bytes, struct kvm_vcpu *vcpu,
+		     struct x86_exception *fault);
 
 	/*
 	 * read_emulated: Read bytes from emulated/special memory area.
@@ -94,7 +105,7 @@
 	int (*read_emulated)(unsigned long addr,
 			     void *val,
 			     unsigned int bytes,
-			     unsigned int *error,
+			     struct x86_exception *fault,
 			     struct kvm_vcpu *vcpu);
 
 	/*
@@ -107,7 +118,7 @@
 	int (*write_emulated)(unsigned long addr,
 			      const void *val,
 			      unsigned int bytes,
-			      unsigned int *error,
+			      struct x86_exception *fault,
 			      struct kvm_vcpu *vcpu);
 
 	/*
@@ -122,7 +133,7 @@
 				const void *old,
 				const void *new,
 				unsigned int bytes,
-				unsigned int *error,
+				struct x86_exception *fault,
 				struct kvm_vcpu *vcpu);
 
 	int (*pio_in_emulated)(int size, unsigned short port, void *val,
@@ -159,7 +170,10 @@
 	};
 	union {
 		unsigned long *reg;
-		unsigned long mem;
+		struct segmented_address {
+			ulong ea;
+			unsigned seg;
+		} mem;
 	} addr;
 	union {
 		unsigned long val;
@@ -226,9 +240,8 @@
 
 	bool perm_ok; /* do not check permissions if true */
 
-	int exception; /* exception that happens during emulation or -1 */
-	u32 error_code; /* error code for exception */
-	bool error_code_valid;
+	bool have_exception;
+	struct x86_exception exception;
 
 	/* decode cache */
 	struct decode_cache decode;
@@ -252,7 +265,7 @@
 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
 #endif
 
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
 #define EMULATION_FAILED -1
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f702f82..ffd7f8d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -83,11 +83,14 @@
 #define KVM_NR_FIXED_MTRR_REGION 88
 #define KVM_NR_VAR_MTRR 8
 
+#define ASYNC_PF_PER_VCPU 64
+
 extern spinlock_t kvm_lock;
 extern struct list_head vm_list;
 
 struct kvm_vcpu;
 struct kvm;
+struct kvm_async_pf;
 
 enum kvm_reg {
 	VCPU_REGS_RAX = 0,
@@ -114,6 +117,7 @@
 
 enum kvm_reg_ex {
 	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
+	VCPU_EXREG_CR3,
 };
 
 enum {
@@ -238,16 +242,18 @@
 	void (*new_cr3)(struct kvm_vcpu *vcpu);
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
 	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
-	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
-	void (*inject_page_fault)(struct kvm_vcpu *vcpu);
+	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
+			  bool prefault);
+	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
+				  struct x86_exception *fault);
 	void (*free)(struct kvm_vcpu *vcpu);
 	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
-			    u32 *error);
+			    struct x86_exception *exception);
 	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
 	void (*prefetch_page)(struct kvm_vcpu *vcpu,
 			      struct kvm_mmu_page *page);
 	int (*sync_page)(struct kvm_vcpu *vcpu,
-			 struct kvm_mmu_page *sp, bool clear_unsync);
+			 struct kvm_mmu_page *sp);
 	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
 	hpa_t root_hpa;
 	int root_level;
@@ -315,16 +321,6 @@
 	 */
 	struct kvm_mmu *walk_mmu;
 
-	/*
-	 * This struct is filled with the necessary information to propagate a
-	 * page fault into the guest
-	 */
-	struct {
-		u64      address;
-		unsigned error_code;
-		bool     nested;
-	} fault;
-
 	/* only needed in kvm_pv_mmu_op() path, but it's hot so
 	 * put it here to avoid allocation */
 	struct kvm_pv_mmu_op_buffer mmu_op_buffer;
@@ -412,6 +408,15 @@
 	u64 hv_vapic;
 
 	cpumask_var_t wbinvd_dirty_mask;
+
+	struct {
+		bool halted;
+		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
+		struct gfn_to_hva_cache data;
+		u64 msr_val;
+		u32 id;
+		bool send_user_only;
+	} apf;
 };
 
 struct kvm_arch {
@@ -456,6 +461,10 @@
 	/* fields used by HYPER-V emulation */
 	u64 hv_guest_os_id;
 	u64 hv_hypercall;
+
+	#ifdef CONFIG_KVM_MMU_AUDIT
+	int audit_point;
+	#endif
 };
 
 struct kvm_vm_stat {
@@ -529,6 +538,7 @@
 			    struct kvm_segment *var, int seg);
 	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
 	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
+	void (*decache_cr3)(struct kvm_vcpu *vcpu);
 	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
 	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -582,9 +592,17 @@
 
 	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
+	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 	const struct trace_print_flags *exit_reasons_str;
 };
 
+struct kvm_arch_async_pf {
+	u32 token;
+	gfn_t gfn;
+	unsigned long cr3;
+	bool direct_map;
+};
+
 extern struct kvm_x86_ops *kvm_x86_ops;
 
 int kvm_mmu_module_init(void);
@@ -594,7 +612,6 @@
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
-void kvm_mmu_set_base_ptes(u64 base_pte);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 		u64 dirty_mask, u64 nx_mask, u64 x_mask);
 
@@ -623,8 +640,15 @@
 #define EMULTYPE_NO_DECODE	    (1 << 0)
 #define EMULTYPE_TRAP_UD	    (1 << 1)
 #define EMULTYPE_SKIP		    (1 << 2)
-int emulate_instruction(struct kvm_vcpu *vcpu,
-			unsigned long cr2, u16 error_code, int emulation_type);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+			    int emulation_type, void *insn, int insn_len);
+
+static inline int emulate_instruction(struct kvm_vcpu *vcpu,
+			int emulation_type)
+{
+	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+
 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
 
@@ -650,7 +674,7 @@
 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
-void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
@@ -668,11 +692,11 @@
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
-void kvm_inject_page_fault(struct kvm_vcpu *vcpu);
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 			    gfn_t gfn, void *data, int offset, int len,
 			    u32 access);
-void kvm_propagate_fault(struct kvm_vcpu *vcpu);
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
 
 int kvm_pic_set_irq(void *opaque, int irq, int level);
@@ -690,16 +714,21 @@
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
-gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
+			      struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
+			       struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
+			       struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
+				struct x86_exception *exception);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
 int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+		       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 
 void kvm_enable_tdp(void);
@@ -766,20 +795,25 @@
 #define HF_VINTR_MASK		(1 << 2)
 #define HF_NMI_MASK		(1 << 3)
 #define HF_IRET_MASK		(1 << 4)
+#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
 
 /*
  * Hardware virtualization extension instructions may fault if a
  * reboot turns off virtualization while processes are running.
  * Trap the fault and ignore the instruction if that happens.
  */
-asmlinkage void kvm_handle_fault_on_reboot(void);
+asmlinkage void kvm_spurious_fault(void);
+extern bool kvm_rebooting;
 
 #define __kvm_handle_fault_on_reboot(insn) \
 	"666: " insn "\n\t" \
+	"668: \n\t"                           \
 	".pushsection .fixup, \"ax\" \n" \
 	"667: \n\t" \
+	"cmpb $0, kvm_rebooting \n\t"	      \
+	"jne 668b \n\t"      		      \
 	__ASM_SIZE(push) " $666b \n\t"	      \
-	"jmp kvm_handle_fault_on_reboot \n\t" \
+	"call kvm_spurious_fault \n\t"	      \
 	".popsection \n\t" \
 	".pushsection __ex_table, \"a\" \n\t" \
 	_ASM_PTR " 666b, 667b \n\t" \
@@ -788,6 +822,7 @@
 #define KVM_ARCH_WANT_MMU_NOTIFIER
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_age_hva(struct kvm *kvm, unsigned long hva);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
@@ -799,4 +834,15 @@
 
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
 
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+				     struct kvm_async_pf *work);
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+				 struct kvm_async_pf *work);
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
+			       struct kvm_async_pf *work);
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
+extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+
 #endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 7b562b6..a427bf7 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -20,6 +20,7 @@
  * are available. The use of 0x11 and 0x12 is deprecated
  */
 #define KVM_FEATURE_CLOCKSOURCE2        3
+#define KVM_FEATURE_ASYNC_PF		4
 
 /* The last 8 bits are used to indicate how to interpret the flags field
  * in pvclock structure. If no bits are set, all flags are ignored.
@@ -32,9 +33,13 @@
 /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
 #define MSR_KVM_WALL_CLOCK_NEW  0x4b564d00
 #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
+#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
 
 #define KVM_MAX_MMU_OP_BATCH           32
 
+#define KVM_ASYNC_PF_ENABLED			(1 << 0)
+#define KVM_ASYNC_PF_SEND_ALWAYS		(1 << 1)
+
 /* Operations for KVM_HC_MMU_OP */
 #define KVM_MMU_OP_WRITE_PTE            1
 #define KVM_MMU_OP_FLUSH_TLB	        2
@@ -61,10 +66,20 @@
 	__u64 pt_phys;
 };
 
+#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
+#define KVM_PV_REASON_PAGE_READY 2
+
+struct kvm_vcpu_pv_apf_data {
+	__u32 reason;
+	__u8 pad[60];
+	__u32 enabled;
+};
+
 #ifdef __KERNEL__
 #include <asm/processor.h>
 
 extern void kvmclock_init(void);
+extern int kvm_register_clock(char *txt);
 
 
 /* This instruction is vmcall.  On non-VT architectures, it will generate a
@@ -160,8 +175,17 @@
 
 #ifdef CONFIG_KVM_GUEST
 void __init kvm_guest_init(void);
+void kvm_async_pf_task_wait(u32 token);
+void kvm_async_pf_task_wake(u32 token);
+u32 kvm_read_and_reset_pf_reason(void);
 #else
 #define kvm_guest_init() do { } while (0)
+#define kvm_async_pf_task_wait(T) do {} while(0)
+#define kvm_async_pf_task_wake(T) do {} while(0)
+static inline u32 kvm_read_and_reset_pf_reason(void)
+{
+	return 0;
+}
 #endif
 
 #endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f792060..72a8b52 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -7,9 +7,19 @@
 
 #include <asm/mc146818rtc.h>
 
+#define NMI_REASON_PORT		0x61
+
+#define NMI_REASON_SERR		0x80
+#define NMI_REASON_IOCHK	0x40
+#define NMI_REASON_MASK		(NMI_REASON_SERR | NMI_REASON_IOCHK)
+
+#define NMI_REASON_CLEAR_SERR	0x04
+#define NMI_REASON_CLEAR_IOCHK	0x08
+#define NMI_REASON_CLEAR_MASK	0x0f
+
 static inline unsigned char get_nmi_reason(void)
 {
-	return inb(0x61);
+	return inb(NMI_REASON_PORT);
 }
 
 static inline void reassert_nmi(void)
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 4a2d4e0..8b5393e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -36,8 +36,6 @@
 	unsigned cpu = smp_processor_id();
 
 	if (likely(prev != next)) {
-		/* stop flush ipis for the previous mm */
-		cpumask_clear_cpu(cpu, mm_cpumask(prev));
 #ifdef CONFIG_SMP
 		percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 		percpu_write(cpu_tlbstate.active_mm, next);
@@ -47,6 +45,9 @@
 		/* Re-load page tables */
 		load_cr3(next->pgd);
 
+		/* stop flush ipis for the previous mm */
+		cpumask_clear_cpu(cpu, mm_cpumask(prev));
+
 		/*
 		 * load the LDT, if the LDT is different:
 		 */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4d0dfa0..43a18c7 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -36,6 +36,11 @@
 #define MSR_IA32_PERFCTR1		0x000000c2
 #define MSR_FSB_FREQ			0x000000cd
 
+#define MSR_NHM_SNB_PKG_CST_CFG_CTL	0x000000e2
+#define NHM_C3_AUTO_DEMOTE		(1UL << 25)
+#define NHM_C1_AUTO_DEMOTE		(1UL << 26)
+#define ATM_LNC_C6_AUTO_DEMOTE		(1UL << 25)
+
 #define MSR_MTRRcap			0x000000fe
 #define MSR_IA32_BBL_CR_CTL		0x00000119
 
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c4021b9..c76f5b9 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -23,6 +23,26 @@
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 #endif
 
+/*
+ * Define some priorities for the nmi notifier call chain.
+ *
+ * Create a local nmi bit that has a higher priority than
+ * external nmis, because the local ones are more frequent.
+ *
+ * Also setup some default high/normal/low settings for
+ * subsystems to registers with.  Using 4 bits to seperate
+ * the priorities.  This can go alot higher if needed be.
+ */
+
+#define NMI_LOCAL_SHIFT		16	/* randomly picked */
+#define NMI_LOCAL_BIT		(1ULL << NMI_LOCAL_SHIFT)
+#define NMI_HIGH_PRIOR		(1ULL << 8)
+#define NMI_NORMAL_PRIOR	(1ULL << 4)
+#define NMI_LOW_PRIOR		(1ULL << 0)
+#define NMI_LOCAL_HIGH_PRIOR	(NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
+#define NMI_LOCAL_NORMAL_PRIOR	(NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
+#define NMI_LOCAL_LOW_PRIOR	(NMI_LOCAL_BIT | NMI_LOW_PRIOR)
+
 void stop_nmi(void);
 void restart_nmi(void);
 
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index a372290..b0ef2b4 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_NUMA_32_H
 #define _ASM_X86_NUMA_32_H
 
+extern int numa_off;
+
 extern int pxm_to_nid(int pxm);
 extern void numa_remove_cpu(int cpu);
 
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 823e070..0493be3 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -38,8 +38,9 @@
 extern void __cpuinit numa_remove_cpu(int cpu);
 
 #ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE	((u64)64 << 20)
+#define FAKE_NODE_MIN_SIZE	((u64)32 << 20)
 #define FAKE_NODE_MIN_HASH_MASK	(~(FAKE_NODE_MIN_SIZE - 1UL))
+void numa_emu_cmdline(char *);
 #endif /* CONFIG_NUMA_EMU */
 #else
 static inline void init_cpu_to_node(void)		{ }
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 42a978c..f482010 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -107,10 +107,14 @@
 /* GPIO assignments */
 
 #define OLPC_GPIO_MIC_AC	1
-#define OLPC_GPIO_DCON_IRQ	geode_gpio(7)
+#define OLPC_GPIO_DCON_STAT0	5
+#define OLPC_GPIO_DCON_STAT1	6
+#define OLPC_GPIO_DCON_IRQ	7
 #define OLPC_GPIO_THRM_ALRM	geode_gpio(10)
-#define OLPC_GPIO_SMB_CLK	geode_gpio(14)
-#define OLPC_GPIO_SMB_DATA	geode_gpio(15)
+#define OLPC_GPIO_DCON_LOAD    11
+#define OLPC_GPIO_DCON_BLANK   12
+#define OLPC_GPIO_SMB_CLK      14
+#define OLPC_GPIO_SMB_DATA     15
 #define OLPC_GPIO_WORKAUX	geode_gpio(24)
 #define OLPC_GPIO_LID		geode_gpio(26)
 #define OLPC_GPIO_ECSCI		geode_gpio(27)
diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h
index 2a84781..641988e 100644
--- a/arch/x86/include/asm/olpc_ofw.h
+++ b/arch/x86/include/asm/olpc_ofw.h
@@ -8,6 +8,8 @@
 
 #ifdef CONFIG_OLPC_OPENFIRMWARE
 
+extern bool olpc_ofw_is_installed(void);
+
 /* run an OFW command by calling into the firmware */
 #define olpc_ofw(name, args, res) \
 	__olpc_ofw((name), ARRAY_SIZE(args), args, ARRAY_SIZE(res), res)
@@ -26,10 +28,17 @@
 
 #else /* !CONFIG_OLPC_OPENFIRMWARE */
 
+static inline bool olpc_ofw_is_installed(void) { return false; }
 static inline void olpc_ofw_detect(void) { }
 static inline void setup_olpc_ofw_pgd(void) { }
 static inline bool olpc_ofw_present(void) { return false; }
 
 #endif /* !CONFIG_OLPC_OPENFIRMWARE */
 
+#ifdef CONFIG_OLPC_OPENFIRMWARE_DT
+extern void olpc_dt_build_devicetree(void);
+#else
+static inline void olpc_dt_build_devicetree(void) { }
+#endif /* CONFIG_OLPC_OPENFIRMWARE_DT */
+
 #endif /* _ASM_X86_OLPC_OFW_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 7709c12..ebbc4d8 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -435,6 +435,11 @@
 {
 	PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
 }
+static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
+			      pmd_t *pmdp)
+{
+	PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
+}
 
 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
 				    pte_t *ptep)
@@ -442,6 +447,12 @@
 	PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
 }
 
+static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
+				    pmd_t *pmdp)
+{
+	PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
+}
+
 static inline pte_t __pte(pteval_t val)
 {
 	pteval_t ret;
@@ -543,6 +554,19 @@
 		PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+			      pmd_t *pmdp, pmd_t pmd)
+{
+	if (sizeof(pmdval_t) > sizeof(long))
+		/* 5 arg words */
+		pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
+	else
+		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
+			    native_pmd_val(pmd));
+}
+#endif
+
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
 	pmdval_t val = native_pmd_val(pmd);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index b82bac9..8288509 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -265,10 +265,16 @@
 	void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
 			   pte_t *ptep, pte_t pteval);
 	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+	void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
+			   pmd_t *pmdp, pmd_t pmdval);
 	void (*pte_update)(struct mm_struct *mm, unsigned long addr,
 			   pte_t *ptep);
 	void (*pte_update_defer)(struct mm_struct *mm,
 				 unsigned long addr, pte_t *ptep);
+	void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
+			   pmd_t *pmdp);
+	void (*pmd_update_defer)(struct mm_struct *mm,
+				 unsigned long addr, pmd_t *pmdp);
 
 	pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
 					pte_t *ptep);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 8ee4516..7e17295 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -273,34 +273,34 @@
 	typeof(var) pxo_new__ = (nval);					\
 	switch (sizeof(var)) {						\
 	case 1:								\
-		asm("\n1:mov "__percpu_arg(1)",%%al"			\
-		    "\n\tcmpxchgb %2, "__percpu_arg(1)			\
+		asm("\n\tmov "__percpu_arg(1)",%%al"			\
+		    "\n1:\tcmpxchgb %2, "__percpu_arg(1)		\
 		    "\n\tjnz 1b"					\
-			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "=&a" (pxo_ret__), "+m" (var)		\
 			    : "q" (pxo_new__)				\
 			    : "memory");				\
 		break;							\
 	case 2:								\
-		asm("\n1:mov "__percpu_arg(1)",%%ax"			\
-		    "\n\tcmpxchgw %2, "__percpu_arg(1)			\
+		asm("\n\tmov "__percpu_arg(1)",%%ax"			\
+		    "\n1:\tcmpxchgw %2, "__percpu_arg(1)		\
 		    "\n\tjnz 1b"					\
-			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "=&a" (pxo_ret__), "+m" (var)		\
 			    : "r" (pxo_new__)				\
 			    : "memory");				\
 		break;							\
 	case 4:								\
-		asm("\n1:mov "__percpu_arg(1)",%%eax"			\
-		    "\n\tcmpxchgl %2, "__percpu_arg(1)			\
+		asm("\n\tmov "__percpu_arg(1)",%%eax"			\
+		    "\n1:\tcmpxchgl %2, "__percpu_arg(1)		\
 		    "\n\tjnz 1b"					\
-			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "=&a" (pxo_ret__), "+m" (var)		\
 			    : "r" (pxo_new__)				\
 			    : "memory");				\
 		break;							\
 	case 8:								\
-		asm("\n1:mov "__percpu_arg(1)",%%rax"			\
-		    "\n\tcmpxchgq %2, "__percpu_arg(1)			\
+		asm("\n\tmov "__percpu_arg(1)",%%rax"			\
+		    "\n1:\tcmpxchgq %2, "__percpu_arg(1)		\
 		    "\n\tjnz 1b"					\
-			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "=&a" (pxo_ret__), "+m" (var)		\
 			    : "r" (pxo_new__)				\
 			    : "memory");				\
 		break;							\
@@ -414,8 +414,6 @@
 #define this_cpu_xchg_1(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define this_cpu_xchg_2(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define this_cpu_xchg_4(pcp, nval)	percpu_xchg_op(pcp, nval)
-#define this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
-#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 
 #define irqsafe_cpu_add_1(pcp, val)	percpu_add_op((pcp), val)
 #define irqsafe_cpu_add_2(pcp, val)	percpu_add_op((pcp), val)
@@ -432,8 +430,6 @@
 #define irqsafe_cpu_xchg_1(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define irqsafe_cpu_xchg_2(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define irqsafe_cpu_xchg_4(pcp, nval)	percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 
 #ifndef CONFIG_M386
 #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
@@ -475,11 +471,15 @@
 #define this_cpu_or_8(pcp, val)		percpu_to_op("or", (pcp), val)
 #define this_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define this_cpu_add_return_8(pcp, val)	percpu_add_return_op(pcp, val)
+#define this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 
 #define irqsafe_cpu_add_8(pcp, val)	percpu_add_op((pcp), val)
 #define irqsafe_cpu_and_8(pcp, val)	percpu_to_op("and", (pcp), val)
 #define irqsafe_cpu_or_8(pcp, val)	percpu_to_op("or", (pcp), val)
 #define irqsafe_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 #endif
 
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 295e2ff..cc29086 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -20,6 +20,10 @@
 #define ARCH_P4_MAX_ESCR	(ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
 #define ARCH_P4_MAX_CCCR	(18)
 
+#define ARCH_P4_CNTRVAL_BITS	(40)
+#define ARCH_P4_CNTRVAL_MASK	((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
+#define ARCH_P4_UNFLAGGED_BIT	((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
+
 #define P4_ESCR_EVENT_MASK	0x7e000000U
 #define P4_ESCR_EVENT_SHIFT	25
 #define P4_ESCR_EVENTMASK_MASK	0x01fffe00U
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index 271de94..b4389a4 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -92,7 +92,7 @@
 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
 
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
-				  unsigned long adddress)
+				  unsigned long address)
 {
 	___pmd_free_tlb(tlb, pmd);
 }
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 2334982..98391db 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -46,6 +46,15 @@
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_SMP
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+{
+	return __pmd(xchg((pmdval_t *)xp, 0));
+}
+#else
+#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
+#endif
+
 /*
  * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
  * split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 177b016..94b979d 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -104,6 +104,29 @@
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_SMP
+union split_pmd {
+	struct {
+		u32 pmd_low;
+		u32 pmd_high;
+	};
+	pmd_t pmd;
+};
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
+{
+	union split_pmd res, *orig = (union split_pmd *)pmdp;
+
+	/* xchg acts as a barrier before setting of the high bits */
+	res.pmd_low = xchg(&orig->pmd_low, 0);
+	res.pmd_high = orig->pmd_high;
+	orig->pmd_high = 0;
+
+	return res.pmd;
+}
+#else
+#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
+#endif
+
 /*
  * Bits 0, 6 and 7 are taken in the low part of the pte,
  * put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index ada823a..18601c8 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -35,6 +35,7 @@
 #else  /* !CONFIG_PARAVIRT */
 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
+#define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
 
 #define set_pte_atomic(ptep, pte)					\
 	native_set_pte_atomic(ptep, pte)
@@ -59,6 +60,8 @@
 
 #define pte_update(mm, addr, ptep)              do { } while (0)
 #define pte_update_defer(mm, addr, ptep)        do { } while (0)
+#define pmd_update(mm, addr, ptep)              do { } while (0)
+#define pmd_update_defer(mm, addr, ptep)        do { } while (0)
 
 #define pgd_val(x)	native_pgd_val(x)
 #define __pgd(x)	native_make_pgd(x)
@@ -94,6 +97,11 @@
 	return pte_flags(pte) & _PAGE_ACCESSED;
 }
 
+static inline int pmd_young(pmd_t pmd)
+{
+	return pmd_flags(pmd) & _PAGE_ACCESSED;
+}
+
 static inline int pte_write(pte_t pte)
 {
 	return pte_flags(pte) & _PAGE_RW;
@@ -142,6 +150,23 @@
 		(_PAGE_PSE | _PAGE_PRESENT);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+	return pmd_val(pmd) & _PAGE_SPLITTING;
+}
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+	return pmd_val(pmd) & _PAGE_PSE;
+}
+
+static inline int has_transparent_hugepage(void)
+{
+	return cpu_has_pse;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
 {
 	pteval_t v = native_pte_val(pte);
@@ -216,6 +241,55 @@
 	return pte_set_flags(pte, _PAGE_SPECIAL);
 }
 
+static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
+{
+	pmdval_t v = native_pmd_val(pmd);
+
+	return __pmd(v | set);
+}
+
+static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
+{
+	pmdval_t v = native_pmd_val(pmd);
+
+	return __pmd(v & ~clear);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+	return pmd_clear_flags(pmd, _PAGE_RW);
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_DIRTY);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_PSE);
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_RW);
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+	return pmd_clear_flags(pmd, _PAGE_PRESENT);
+}
+
 /*
  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
  * can use those bits for other purposes, so leave them be.
@@ -256,6 +330,16 @@
 	return __pte(val);
 }
 
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+	pmdval_t val = pmd_val(pmd);
+
+	val &= _HPAGE_CHG_MASK;
+	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+
+	return __pmd(val);
+}
+
 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
 #define pgprot_modify pgprot_modify
 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
@@ -350,7 +434,7 @@
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pmd_page(pmd)	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_page(pmd)	pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
 
 /*
  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
@@ -524,12 +608,26 @@
 	return res;
 }
 
+static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
+{
+	pmd_t res = *pmdp;
+
+	native_pmd_clear(pmdp);
+	return res;
+}
+
 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 				     pte_t *ptep , pte_t pte)
 {
 	native_set_pte(ptep, pte);
 }
 
+static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
+				     pmd_t *pmdp , pmd_t pmd)
+{
+	native_set_pmd(pmdp, pmd);
+}
+
 #ifndef CONFIG_PARAVIRT
 /*
  * Rules for using pte_update - it must be called after any PTE update which
@@ -607,6 +705,49 @@
 
 #define flush_tlb_fix_spurious_fault(vma, address)
 
+#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
+
+#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pmd_t *pmdp,
+				 pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+				     unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+				  unsigned long address, pmd_t *pmdp);
+
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+				 unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+	return pmd_flags(pmd) & _PAGE_RW;
+}
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
+				       pmd_t *pmdp)
+{
+	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
+	pmd_update(mm, addr, pmdp);
+	return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long addr, pmd_t *pmdp)
+{
+	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
+	pmd_update(mm, addr, pmdp);
+}
+
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  *
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index f86da20..975f709 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -59,6 +59,16 @@
 	native_set_pte(ptep, pte);
 }
 
+static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+	*pmdp = pmd;
+}
+
+static inline void native_pmd_clear(pmd_t *pmd)
+{
+	native_set_pmd(pmd, native_make_pmd(0));
+}
+
 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
 {
 #ifdef CONFIG_SMP
@@ -72,14 +82,17 @@
 #endif
 }
 
-static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 {
-	*pmdp = pmd;
-}
-
-static inline void native_pmd_clear(pmd_t *pmd)
-{
-	native_set_pmd(pmd, native_make_pmd(0));
+#ifdef CONFIG_SMP
+	return native_make_pmd(xchg(&xp->pmd, 0));
+#else
+	/* native_local_pmdp_get_and_clear,
+	   but duplicated because of cyclic dependency */
+	pmd_t ret = *xp;
+	native_pmd_clear(xp);
+	return ret;
+#endif
 }
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
@@ -168,6 +181,7 @@
 #define	kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
 
 #define __HAVE_ARCH_PTE_SAME
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index d1f4a76..7db7723 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -22,6 +22,7 @@
 #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
 #define _PAGE_BIT_SPECIAL	_PAGE_BIT_UNUSED1
 #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_UNUSED1
+#define _PAGE_BIT_SPLITTING	_PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
 #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
 
 /* If _PAGE_BIT_PRESENT is clear, we use these: */
@@ -45,6 +46,7 @@
 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
 #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
 #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
+#define _PAGE_SPLITTING	(_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
 #define __HAVE_ARCH_PTE_SPECIAL
 
 #ifdef CONFIG_KMEMCHECK
@@ -70,6 +72,7 @@
 /* Set of bits not changed in pte_modify */
 #define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
 			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
 
 #define _PAGE_CACHE_MASK	(_PAGE_PCD | _PAGE_PWT)
 #define _PAGE_CACHE_WB		(0)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c6efecf..45636ce 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -761,10 +761,11 @@
 extern void init_c1e_mask(void);
 
 extern unsigned long		boot_option_idle_override;
-extern unsigned long		idle_halt;
-extern unsigned long		idle_nomwait;
 extern bool			c1e_detected;
 
+enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
+			 IDLE_POLL, IDLE_FORCE_MWAIT};
+
 extern void enable_sep_cpu(void);
 extern int sysenter_setup(void);
 
@@ -901,7 +902,7 @@
 /*
  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
  * This is necessary to guarantee that the entire "struct pt_regs"
- * is accessable even if the CPU haven't stored the SS/ESP registers
+ * is accessible even if the CPU haven't stored the SS/ESP registers
  * on the stack (interrupt gate does not save these registers
  * when switching to the same priv ring).
  * Therefore beware: accessing the ss/esp fields of the
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
new file mode 100644
index 0000000..b4ec95f
--- /dev/null
+++ b/arch/x86/include/asm/prom.h
@@ -0,0 +1 @@
+/* dummy prom.h; here to make linux/of.h's #includes happy */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4c2f63c..1f46951 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -40,10 +40,7 @@
 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
 
 /* Static state in head.S used to set up a CPU */
-extern struct {
-	void *sp;
-	unsigned short ss;
-} stack_start;
+extern unsigned long stack_start; /* Initial stack pointer address */
 
 struct smp_ops {
 	void (*smp_prepare_boot_cpu)(void);
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 6c22bf3..725b778 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -34,7 +34,7 @@
 	 */
 	CMOS_WRITE(0, 0xf);
 
-	*((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+	*((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
 }
 
 static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 0e83105..f2b83bc 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -47,14 +47,13 @@
 	INTERCEPT_MONITOR,
 	INTERCEPT_MWAIT,
 	INTERCEPT_MWAIT_COND,
+	INTERCEPT_XSETBV,
 };
 
 
 struct __attribute__ ((__packed__)) vmcb_control_area {
-	u16 intercept_cr_read;
-	u16 intercept_cr_write;
-	u16 intercept_dr_read;
-	u16 intercept_dr_write;
+	u32 intercept_cr;
+	u32 intercept_dr;
 	u32 intercept_exceptions;
 	u64 intercept;
 	u8 reserved_1[42];
@@ -81,14 +80,19 @@
 	u32 event_inj_err;
 	u64 nested_cr3;
 	u64 lbr_ctl;
-	u64 reserved_5;
+	u32 clean;
+	u32 reserved_5;
 	u64 next_rip;
-	u8 reserved_6[816];
+	u8 insn_len;
+	u8 insn_bytes[15];
+	u8 reserved_6[800];
 };
 
 
 #define TLB_CONTROL_DO_NOTHING 0
 #define TLB_CONTROL_FLUSH_ALL_ASID 1
+#define TLB_CONTROL_FLUSH_ASID 3
+#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
 
 #define V_TPR_MASK 0x0f
 
@@ -204,19 +208,31 @@
 #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
 #define SVM_SELECTOR_CODE_MASK (1 << 3)
 
-#define INTERCEPT_CR0_MASK 1
-#define INTERCEPT_CR3_MASK (1 << 3)
-#define INTERCEPT_CR4_MASK (1 << 4)
-#define INTERCEPT_CR8_MASK (1 << 8)
+#define INTERCEPT_CR0_READ	0
+#define INTERCEPT_CR3_READ	3
+#define INTERCEPT_CR4_READ	4
+#define INTERCEPT_CR8_READ	8
+#define INTERCEPT_CR0_WRITE	(16 + 0)
+#define INTERCEPT_CR3_WRITE	(16 + 3)
+#define INTERCEPT_CR4_WRITE	(16 + 4)
+#define INTERCEPT_CR8_WRITE	(16 + 8)
 
-#define INTERCEPT_DR0_MASK 1
-#define INTERCEPT_DR1_MASK (1 << 1)
-#define INTERCEPT_DR2_MASK (1 << 2)
-#define INTERCEPT_DR3_MASK (1 << 3)
-#define INTERCEPT_DR4_MASK (1 << 4)
-#define INTERCEPT_DR5_MASK (1 << 5)
-#define INTERCEPT_DR6_MASK (1 << 6)
-#define INTERCEPT_DR7_MASK (1 << 7)
+#define INTERCEPT_DR0_READ	0
+#define INTERCEPT_DR1_READ	1
+#define INTERCEPT_DR2_READ	2
+#define INTERCEPT_DR3_READ	3
+#define INTERCEPT_DR4_READ	4
+#define INTERCEPT_DR5_READ	5
+#define INTERCEPT_DR6_READ	6
+#define INTERCEPT_DR7_READ	7
+#define INTERCEPT_DR0_WRITE	(16 + 0)
+#define INTERCEPT_DR1_WRITE	(16 + 1)
+#define INTERCEPT_DR2_WRITE	(16 + 2)
+#define INTERCEPT_DR3_WRITE	(16 + 3)
+#define INTERCEPT_DR4_WRITE	(16 + 4)
+#define INTERCEPT_DR5_WRITE	(16 + 5)
+#define INTERCEPT_DR6_WRITE	(16 + 6)
+#define INTERCEPT_DR7_WRITE	(16 + 7)
 
 #define SVM_EVTINJ_VEC_MASK 0xff
 
@@ -246,6 +262,8 @@
 #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
 #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
 
+#define SVM_EXITINFO_REG_MASK 0x0F
+
 #define	SVM_EXIT_READ_CR0 	0x000
 #define	SVM_EXIT_READ_CR3 	0x003
 #define	SVM_EXIT_READ_CR4 	0x004
@@ -316,6 +334,7 @@
 #define SVM_EXIT_MONITOR	0x08a
 #define SVM_EXIT_MWAIT		0x08b
 #define SVM_EXIT_MWAIT_COND	0x08c
+#define SVM_EXIT_XSETBV		0x08d
 #define SVM_EXIT_NPF  		0x400
 
 #define SVM_EXIT_ERR		-1
diff --git a/arch/x86/include/asm/system_64.h b/arch/x86/include/asm/system_64.h
deleted file mode 100644
index 1159e09..0000000
--- a/arch/x86/include/asm/system_64.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _ASM_X86_SYSTEM_64_H
-#define _ASM_X86_SYSTEM_64_H
-
-#include <asm/segment.h>
-#include <asm/cmpxchg.h>
-
-
-static inline unsigned long read_cr8(void)
-{
-	unsigned long cr8;
-	asm volatile("movq %%cr8,%0" : "=r" (cr8));
-	return cr8;
-}
-
-static inline void write_cr8(unsigned long val)
-{
-	asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
-}
-
-#include <linux/irqflags.h>
-
-#endif /* _ASM_X86_SYSTEM_64_H */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index f66cda5..0310da6 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -30,6 +30,7 @@
 asmlinkage void stack_segment(void);
 asmlinkage void general_protection(void);
 asmlinkage void page_fault(void);
+asmlinkage void async_page_fault(void);
 asmlinkage void spurious_interrupt_bug(void);
 asmlinkage void coprocessor_error(void);
 asmlinkage void alignment_check(void);
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index ce1d54c..3e094af 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -176,7 +176,7 @@
 struct bau_msg_header {
 	unsigned int dest_subnodeid:6;	/* must be 0x10, for the LB */
 	/* bits 5:0 */
-	unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
+	unsigned int base_dest_nodeid:15; /* nasid of the */
 	/* bits 20:6 */			  /* first bit in uvhub map */
 	unsigned int command:8;	/* message type */
 	/* bits 28:21 */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9f0cbd9..84471b8 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -66,15 +66,23 @@
 #define PIN_BASED_NMI_EXITING                   0x00000008
 #define PIN_BASED_VIRTUAL_NMIS                  0x00000020
 
+#define VM_EXIT_SAVE_DEBUG_CONTROLS             0x00000002
 #define VM_EXIT_HOST_ADDR_SPACE_SIZE            0x00000200
+#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL      0x00001000
 #define VM_EXIT_ACK_INTR_ON_EXIT                0x00008000
 #define VM_EXIT_SAVE_IA32_PAT			0x00040000
 #define VM_EXIT_LOAD_IA32_PAT			0x00080000
+#define VM_EXIT_SAVE_IA32_EFER                  0x00100000
+#define VM_EXIT_LOAD_IA32_EFER                  0x00200000
+#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER       0x00400000
 
+#define VM_ENTRY_LOAD_DEBUG_CONTROLS            0x00000002
 #define VM_ENTRY_IA32E_MODE                     0x00000200
 #define VM_ENTRY_SMM                            0x00000400
 #define VM_ENTRY_DEACT_DUAL_MONITOR             0x00000800
+#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL     0x00002000
 #define VM_ENTRY_LOAD_IA32_PAT			0x00004000
+#define VM_ENTRY_LOAD_IA32_EFER                 0x00008000
 
 /* VMCS Encodings */
 enum vmcs_field {
@@ -239,6 +247,7 @@
 #define EXIT_REASON_TASK_SWITCH         9
 #define EXIT_REASON_CPUID               10
 #define EXIT_REASON_HLT                 12
+#define EXIT_REASON_INVD                13
 #define EXIT_REASON_INVLPG              14
 #define EXIT_REASON_RDPMC               15
 #define EXIT_REASON_RDTSC               16
@@ -296,6 +305,12 @@
 #define GUEST_INTR_STATE_SMI		0x00000004
 #define GUEST_INTR_STATE_NMI		0x00000008
 
+/* GUEST_ACTIVITY_STATE flags */
+#define GUEST_ACTIVITY_ACTIVE		0
+#define GUEST_ACTIVITY_HLT		1
+#define GUEST_ACTIVITY_SHUTDOWN		2
+#define GUEST_ACTIVITY_WAIT_SIPI	3
+
 /*
  * Exit Qualifications for MOV for Control Register Access
  */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 8760cc6..f25bdf2 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -42,6 +42,11 @@
 extern unsigned long get_phys_to_machine(unsigned long pfn);
 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
 
+extern int m2p_add_override(unsigned long mfn, struct page *page);
+extern int m2p_remove_override(struct page *page);
+extern struct page *m2p_find_override(unsigned long mfn);
+extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
+
 static inline unsigned long pfn_to_mfn(unsigned long pfn)
 {
 	unsigned long mfn;
@@ -72,9 +77,6 @@
 	if (xen_feature(XENFEAT_auto_translated_physmap))
 		return mfn;
 
-	if (unlikely((mfn >> machine_to_phys_order) != 0))
-		return ~0;
-
 	pfn = 0;
 	/*
 	 * The array access can fail (e.g., device space beyond end of RAM).
@@ -83,6 +85,14 @@
 	 */
 	__get_user(pfn, &machine_to_phys_mapping[mfn]);
 
+	/*
+	 * If this appears to be a foreign mfn (because the pfn
+	 * doesn't map back to the mfn), then check the local override
+	 * table to see if there's a better pfn to use.
+	 */
+	if (get_phys_to_machine(pfn) != mfn)
+		pfn = m2p_find_override_pfn(mfn, pfn);
+
 	return pfn;
 }
 
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index ec881c6..3e6e2d6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -72,6 +72,7 @@
 int acpi_sci_override_gsi __initdata;
 int acpi_skip_timer_override __initdata;
 int acpi_use_timer_override __initdata;
+int acpi_fix_pin2_polarity __initdata;
 
 #ifdef CONFIG_X86_LOCAL_APIC
 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -415,10 +416,15 @@
 		return 0;
 	}
 
-	if (acpi_skip_timer_override &&
-	    intsrc->source_irq == 0 && intsrc->global_irq == 2) {
-		printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
-		return 0;
+	if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+		if (acpi_skip_timer_override) {
+			printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+			return 0;
+		}
+		if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+			intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+			printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+		}
 	}
 
 	mp_override_legacy_irq(intsrc->source_irq,
@@ -509,6 +515,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
 
 int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
 {
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 69fd72a..68d1537 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -12,10 +12,8 @@
 #include <linux/cpumask.h>
 #include <asm/segment.h>
 #include <asm/desc.h>
-
-#ifdef CONFIG_X86_32
 #include <asm/pgtable.h>
-#endif
+#include <asm/cacheflush.h>
 
 #include "realmode/wakeup.h"
 #include "sleep.h"
@@ -100,7 +98,7 @@
 #else /* CONFIG_64BIT */
 	header->trampoline_segment = setup_trampoline() >> 4;
 #ifdef CONFIG_SMP
-	stack_start.sp = temp_stack + sizeof(temp_stack);
+	stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
 	early_gdt_descr.address =
 			(unsigned long)get_cpu_gdt_table(smp_processor_id());
 	initial_gs = per_cpu_offset(smp_processor_id());
@@ -149,6 +147,15 @@
 	memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
 }
 
+int __init acpi_configure_wakeup_memory(void)
+{
+	if (acpi_realmode)
+		set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
+
+	return 0;
+}
+arch_initcall(acpi_configure_wakeup_memory);
+
 
 static int __init acpi_sleep_setup(char *str)
 {
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 1236085..7038b95 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -671,7 +671,7 @@
 
 	atomic_set(&stop_machine_first, 1);
 	wrote_text = 0;
-	stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+	__stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
 }
 
 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index d2fdb08..57ca777 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1086,7 +1086,7 @@
 
 	dma_dom->aperture_size += APERTURE_RANGE_SIZE;
 
-	/* Intialize the exclusion range if necessary */
+	/* Initialize the exclusion range if necessary */
 	for_each_iommu(iommu) {
 		if (iommu->exclusion_start &&
 		    iommu->exclusion_start >= dma_dom->aperture[index]->offset
@@ -1353,7 +1353,7 @@
 
 /*
  * Allocates a new protection domain usable for the dma_ops functions.
- * It also intializes the page table and the address allocator data
+ * It also initializes the page table and the address allocator data
  * structures required for the dma_ops interface
  */
 static struct dma_ops_domain *dma_ops_domain_alloc(void)
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index affacb5..0a99f71 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,6 +20,13 @@
 };
 EXPORT_SYMBOL(amd_nb_misc_ids);
 
+const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
+	{ 0x00, 0x18, 0x20 },
+	{ 0xff, 0x00, 0x20 },
+	{ 0xfe, 0x00, 0x20 },
+	{ }
+};
+
 struct amd_northbridge_info amd_northbridges;
 EXPORT_SYMBOL(amd_northbridges);
 
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 7c9ab59..51d4e16 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -284,7 +284,7 @@
 	memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
 
 	if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
-		apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
+		adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
 		global_clock_event = &adev->evt;
 		printk(KERN_DEBUG "%s clockevent registered as global\n",
 		       global_clock_event->name);
@@ -313,14 +313,16 @@
 	if (adev->irq == 0)
 		return;
 
+	irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
+	irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
+	/* APB timer irqs are set up as mp_irqs, timer is edge type */
+	__set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
+
 	if (system_state == SYSTEM_BOOTING) {
-		irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
-		irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
-		/* APB timer irqs are set up as mp_irqs, timer is edge type */
-		__set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
 		if (request_irq(adev->irq, apbt_interrupt_handler,
-				IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
-				adev->name, adev)) {
+					IRQF_TIMER | IRQF_DISABLED |
+					IRQF_NOBALANCING,
+					adev->name, adev)) {
 			printk(KERN_ERR "Failed request IRQ for APBT%d\n",
 			       adev->num);
 		}
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index dcd7c83..5955a78 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -39,18 +39,6 @@
 
 int fix_aperture __initdata = 1;
 
-struct bus_dev_range {
-	int bus;
-	int dev_base;
-	int dev_limit;
-};
-
-static struct bus_dev_range bus_dev_ranges[] __initdata = {
-	{ 0x00, 0x18, 0x20},
-	{ 0xff, 0x00, 0x20},
-	{ 0xfe, 0x00, 0x20}
-};
-
 static struct resource gart_resource = {
 	.name	= "GART",
 	.flags	= IORESOURCE_MEM,
@@ -294,13 +282,13 @@
 	search_agp_bridge(&agp_aper_order, &valid_agp);
 
 	fix = 0;
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus;
 		int dev_base, dev_limit;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -349,13 +337,13 @@
 		return;
 
 	/* disable them all at first */
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus;
 		int dev_base, dev_limit;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -390,14 +378,14 @@
 
 	fix = 0;
 	node = 0;
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus;
 		int dev_base, dev_limit;
 		u32 ctl;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -505,7 +493,7 @@
 	}
 
 	/* Fix up the north bridges */
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus, dev_base, dev_limit;
 
 		/*
@@ -514,9 +502,9 @@
 		 */
 		u32 ctl = DISTLBWALKPRB | aper_order << 1;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
 				continue;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a51345b..76b96d7 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -684,7 +684,7 @@
 	lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
 				       lapic_clockevent.shift);
 	lapic_clockevent.max_delta_ns =
-		clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+		clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
 	lapic_clockevent.min_delta_ns =
 		clockevent_delta2ns(0xF, &lapic_clockevent);
 
@@ -1381,12 +1381,17 @@
 #endif
 
 	apic_pm_activate();
+}
+
+void __init bsp_end_local_APIC_setup(void)
+{
+	end_local_APIC_setup();
 
 	/*
 	 * Now that local APIC setup is completed for BP, configure the fault
 	 * handling for interrupt remapping.
 	 */
-	if (!smp_processor_id() && intr_remapping_enabled)
+	if (intr_remapping_enabled)
 		enable_drhd_fault_handling();
 
 }
@@ -1756,7 +1761,7 @@
 		enable_IO_APIC();
 #endif
 
-	end_local_APIC_setup();
+	bsp_end_local_APIC_setup();
 
 #ifdef CONFIG_X86_IO_APIC
 	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 72ec29e..79fd43c 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -68,7 +68,6 @@
 
 	switch (cmd) {
 	case DIE_NMI:
-	case DIE_NMI_IPI:
 		break;
 
 	default:
@@ -96,7 +95,7 @@
 static __read_mostly struct notifier_block backtrace_notifier = {
 	.notifier_call          = arch_trigger_all_cpu_backtrace_handler,
 	.next                   = NULL,
-	.priority               = 1
+	.priority               = NMI_LOCAL_LOW_PRIOR,
 };
 
 static int __init register_trigger_all_cpu_backtrace(void)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 697dc34..ca9e2a35 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -4002,6 +4002,9 @@
 {
 	int i = 0;
 
+	if (nr_ioapics == 0)
+		return -1;
+
 	/* Find the IOAPIC that manages this GSI. */
 	for (i = 0; i < nr_ioapics; i++) {
 		if ((gsi >= mp_gsi_routing[i].gsi_base)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ecca5f4..bd16b58 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -378,7 +378,7 @@
 
 static __cpuinit void set_x2apic_extra_bits(int pnode)
 {
-	__this_cpu_write(x2apic_extra_bits, (pnode << 6));
+	__this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
 }
 
 /*
@@ -641,7 +641,7 @@
  */
 int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
 {
-	if (reason != DIE_NMI_IPI)
+	if (reason != DIE_NMIUNKNOWN)
 		return NOTIFY_OK;
 
 	if (in_crash_kexec)
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 13a3891..452932d 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -106,8 +106,8 @@
 		addr += size;
 	}
 
-	printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
-	       num_scan_areas);
+	if (num_scan_areas)
+		printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas);
 }
 
 
@@ -143,12 +143,12 @@
 {
 	check_for_bios_corruption();
 	schedule_delayed_work(&bios_check_work,
-		round_jiffies_relative(corruption_check_period*HZ)); 
+		round_jiffies_relative(corruption_check_period*HZ));
 }
 
 static int start_periodic_check_for_corruption(void)
 {
-	if (!memory_corruption_check || corruption_check_period == 0)
+	if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0)
 		return 0;
 
 	printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index bd1cac7..52c9364 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -158,9 +158,9 @@
 {
 	if (c->x86 == 0x06) {
 		if (cpu_has(c, X86_FEATURE_EST))
-			printk(KERN_WARNING PFX "Warning: EST-capable CPU "
-			       "detected. The acpi-cpufreq module offers "
-			       "voltage scaling in addition of frequency "
+			printk_once(KERN_WARNING PFX "Warning: EST-capable "
+			       "CPU detected. The acpi-cpufreq module offers "
+			       "voltage scaling in addition to frequency "
 			       "scaling. You should use that instead of "
 			       "p4-clockmod, if possible.\n");
 		switch (c->x86_model) {
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index 4f6f679..4a5a42b 100644
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -195,7 +195,7 @@
 cmd_incomplete:
 	iowrite16(0, &pcch_hdr->status);
 	spin_unlock(&pcc_lock);
-	return -EINVAL;
+	return 0;
 }
 
 static int pcc_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 35c7e65..c567dec 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1537,6 +1537,7 @@
 static int __cpuinit powernowk8_init(void)
 {
 	unsigned int i, supported_cpus = 0, cpu;
+	int rv;
 
 	for_each_online_cpu(i) {
 		int rc;
@@ -1555,14 +1556,14 @@
 
 		cpb_capable = true;
 
-		register_cpu_notifier(&cpb_nb);
-
 		msrs = msrs_alloc();
 		if (!msrs) {
 			printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
 			return -ENOMEM;
 		}
 
+		register_cpu_notifier(&cpb_nb);
+
 		rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
 
 		for_each_cpu(cpu, cpu_online_mask) {
@@ -1574,7 +1575,13 @@
 			(cpb_enabled ? "on" : "off"));
 	}
 
-	return cpufreq_register_driver(&cpufreq_amd64_driver);
+	rv = cpufreq_register_driver(&cpufreq_amd64_driver);
+	if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
+		unregister_cpu_notifier(&cpb_nb);
+		msrs_free(msrs);
+		msrs = NULL;
+	}
+	return rv;
 }
 
 /* driver entry point for term */
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 7283e98..ec2c19a 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -45,6 +45,7 @@
 	{ 0x0a, LVL_1_DATA, 8 },	/* 2 way set assoc, 32 byte line size */
 	{ 0x0c, LVL_1_DATA, 16 },	/* 4-way set assoc, 32 byte line size */
 	{ 0x0d, LVL_1_DATA, 16 },	/* 4-way set assoc, 64 byte line size */
+	{ 0x0e, LVL_1_DATA, 24 },	/* 6-way set assoc, 64 byte line size */
 	{ 0x21, LVL_2,      256 },	/* 8-way set assoc, 64 byte line size */
 	{ 0x22, LVL_3,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
 	{ 0x23, LVL_3,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
@@ -66,6 +67,7 @@
 	{ 0x45, LVL_2,      MB(2) },	/* 4-way set assoc, 32 byte line size */
 	{ 0x46, LVL_3,      MB(4) },	/* 4-way set assoc, 64 byte line size */
 	{ 0x47, LVL_3,      MB(8) },	/* 8-way set assoc, 64 byte line size */
+	{ 0x48, LVL_2,      MB(3) },	/* 12-way set assoc, 64 byte line size */
 	{ 0x49, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
 	{ 0x4a, LVL_3,      MB(6) },	/* 12-way set assoc, 64 byte line size */
 	{ 0x4b, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
@@ -87,6 +89,7 @@
 	{ 0x7c, LVL_2,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
 	{ 0x7d, LVL_2,      MB(2) },	/* 8-way set assoc, 64 byte line size */
 	{ 0x7f, LVL_2,      512 },	/* 2-way set assoc, 64 byte line size */
+	{ 0x80, LVL_2,      512 },	/* 8-way set assoc, 64 byte line size */
 	{ 0x82, LVL_2,      256 },	/* 8-way set assoc, 32 byte line size */
 	{ 0x83, LVL_2,      512 },	/* 8-way set assoc, 32 byte line size */
 	{ 0x84, LVL_2,      MB(1) },	/* 8-way set assoc, 32 byte line size */
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index e7dbde7..a779719 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -25,6 +25,7 @@
 #include <linux/gfp.h>
 #include <asm/mce.h>
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 /* Update fake mce registers on current CPU. */
 static void inject_mce(struct mce *m)
@@ -83,7 +84,7 @@
 	struct die_args *args = (struct die_args *)data;
 	int cpu = smp_processor_id();
 	struct mce *m = &__get_cpu_var(injectm);
-	if (val != DIE_NMI_IPI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
+	if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
 		return NOTIFY_DONE;
 	cpumask_clear_cpu(cpu, mce_inject_cpumask);
 	if (m->inject_flags & MCJ_EXCEPTION)
@@ -95,7 +96,7 @@
 
 static struct notifier_block mce_raise_nb = {
 	.notifier_call = mce_raise_notify,
-	.priority = 1000,
+	.priority = NMI_LOCAL_NORMAL_PRIOR,
 };
 
 /* Inject mce on current CPU */
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index e12246f..6f8c5e9 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -59,6 +59,7 @@
 
 /* Callback to handle core threshold interrupts */
 int (*platform_thermal_notify)(__u64 msr_val);
+EXPORT_SYMBOL(platform_thermal_notify);
 
 static DEFINE_PER_CPU(struct thermal_state, thermal_state);
 
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 01c0f3e..bebabec 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -793,13 +793,21 @@
 }
 
 /*
- * MTRR initialization for all AP's
+ * Delayed MTRR initialization for all AP's
  */
 void mtrr_aps_init(void)
 {
 	if (!use_intel())
 		return;
 
+	/*
+	 * Check if someone has requested the delay of AP MTRR initialization,
+	 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
+	 * then we are done.
+	 */
+	if (!mtrr_aps_delayed_init)
+		return;
+
 	set_mtrr(~0U, 0, 0, 0);
 	mtrr_aps_delayed_init = false;
 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 0492101..9d977a2 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1267,7 +1267,6 @@
 
 	switch (cmd) {
 	case DIE_NMI:
-	case DIE_NMI_IPI:
 		break;
 	case DIE_NMIUNKNOWN:
 		this_nmi = percpu_read(irq_stat.__nmi_count);
@@ -1317,7 +1316,7 @@
 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
 	.notifier_call		= perf_event_nmi_handler,
 	.next			= NULL,
-	.priority		= 1
+	.priority		= NMI_LOCAL_LOW_PRIOR,
 };
 
 static struct event_constraint unconstrained;
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 81400b9..ff751a9 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -682,7 +682,7 @@
 	 * if an event is shared accross the logical threads
 	 * the user needs special permissions to be able to use it
 	 */
-	if (p4_event_bind_map[v].shared) {
+	if (p4_ht_active() && p4_event_bind_map[v].shared) {
 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
 			return -EACCES;
 	}
@@ -727,7 +727,8 @@
 		event->hw.config = p4_set_ht_bit(event->hw.config);
 
 	if (event->attr.type == PERF_TYPE_RAW) {
-
+		struct p4_event_bind *bind;
+		unsigned int esel;
 		/*
 		 * Clear bits we reserve to be managed by kernel itself
 		 * and never allowed from a user space
@@ -743,6 +744,13 @@
 		 * bits since we keep additional info here (for cache events and etc)
 		 */
 		event->hw.config |= event->attr.config;
+		bind = p4_config_get_bind(event->attr.config);
+		if (!bind) {
+			rc = -EINVAL;
+			goto out;
+		}
+		esel = P4_OPCODE_ESEL(bind->opcode);
+		event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
 	}
 
 	rc = x86_setup_perfctr(event);
@@ -753,19 +761,26 @@
 
 static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
 {
-	int overflow = 0;
-	u32 low, high;
+	u64 v;
 
-	rdmsr(hwc->config_base + hwc->idx, low, high);
-
-	/* we need to check high bit for unflagged overflows */
-	if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) {
-		overflow = 1;
-		(void)checking_wrmsrl(hwc->config_base + hwc->idx,
-			((u64)low) & ~P4_CCCR_OVF);
+	/* an official way for overflow indication */
+	rdmsrl(hwc->config_base + hwc->idx, v);
+	if (v & P4_CCCR_OVF) {
+		wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
+		return 1;
 	}
 
-	return overflow;
+	/*
+	 * In some circumstances the overflow might issue an NMI but did
+	 * not set P4_CCCR_OVF bit. Because a counter holds a negative value
+	 * we simply check for high bit being set, if it's cleared it means
+	 * the counter has reached zero value and continued counting before
+	 * real NMI signal was received:
+	 */
+	if (!(v & ARCH_P4_UNFLAGGED_BIT))
+		return 1;
+
+	return 0;
 }
 
 static void p4_pmu_disable_pebs(void)
@@ -1152,9 +1167,9 @@
 	 */
 	.num_counters		= ARCH_P4_MAX_CCCR,
 	.apic			= 1,
-	.cntval_bits		= 40,
-	.cntval_mask		= (1ULL << 40) - 1,
-	.max_period		= (1ULL << 39) - 1,
+	.cntval_bits		= ARCH_P4_CNTRVAL_BITS,
+	.cntval_mask		= ARCH_P4_CNTRVAL_MASK,
+	.max_period		= (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
 	.hw_config		= p4_hw_config,
 	.schedule_events	= p4_pmu_schedule_events,
 	/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 8474c99..df20723 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -197,14 +197,8 @@
  */
 void dump_stack(void)
 {
-	unsigned long bp = 0;
 	unsigned long stack;
 
-#ifdef CONFIG_FRAME_POINTER
-	if (!bp)
-		get_bp(bp);
-#endif
-
 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
 		current->pid, current->comm, print_tainted(),
 		init_utsname()->release,
@@ -240,6 +234,7 @@
 	bust_spinlocks(1);
 	return flags;
 }
+EXPORT_SYMBOL_GPL(oops_begin);
 
 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 {
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 6410133..a6b6fcf 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -149,13 +149,13 @@
 	unsigned used = 0;
 	struct thread_info *tinfo;
 	int graph = 0;
+	unsigned long dummy;
 	unsigned long bp;
 
 	if (!task)
 		task = current;
 
 	if (!stack) {
-		unsigned long dummy;
 		stack = &dummy;
 		if (task && task != current)
 			stack = (unsigned long *)task->thread.sp;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 0c2b7ef..294f26d 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -14,6 +14,7 @@
 #include <linux/bootmem.h>
 #include <linux/pfn.h>
 #include <linux/suspend.h>
+#include <linux/acpi.h>
 #include <linux/firmware-map.h>
 #include <linux/memblock.h>
 
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 76b8cd9..9efbdcc 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -143,15 +143,10 @@
 
 static u32 __init ati_sbx00_rev(int num, int slot, int func)
 {
-	u32 old, d;
+	u32 d;
 
-	d = read_pci_config(num, slot, func, 0x70);
-	old = d;
-	d &= ~(1<<8);
-	write_pci_config(num, slot, func, 0x70, d);
 	d = read_pci_config(num, slot, func, 0x8);
 	d &= 0xff;
-	write_pci_config(num, slot, func, 0x70, old);
 
 	return d;
 }
@@ -160,11 +155,14 @@
 {
 	u32 d, rev;
 
-	if (acpi_use_timer_override)
+	rev = ati_sbx00_rev(num, slot, func);
+	if (rev >= 0x40)
+		acpi_fix_pin2_polarity = 1;
+
+	if (rev > 0x13)
 		return;
 
-	rev = ati_sbx00_rev(num, slot, func);
-	if (rev > 0x13)
+	if (acpi_use_timer_override)
 		return;
 
 	/* check for IRQ0 interrupt swap */
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 591e601..c8b4efa 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1406,6 +1406,16 @@
 	CFI_ENDPROC
 END(general_protection)
 
+#ifdef CONFIG_KVM_GUEST
+ENTRY(async_page_fault)
+	RING0_EC_FRAME
+	pushl $do_async_page_fault
+	CFI_ADJUST_CFA_OFFSET 4
+	jmp error_code
+	CFI_ENDPROC
+END(apf_page_fault)
+#endif
+
 /*
  * End of kprobes section
  */
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e3ba417..aed1ffb 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -299,17 +299,21 @@
 ENTRY(save_args)
 	XCPT_FRAME
 	cld
-	movq_cfi rdi, RDI+16-ARGOFFSET
-	movq_cfi rsi, RSI+16-ARGOFFSET
-	movq_cfi rdx, RDX+16-ARGOFFSET
-	movq_cfi rcx, RCX+16-ARGOFFSET
-	movq_cfi rax, RAX+16-ARGOFFSET
-	movq_cfi  r8,  R8+16-ARGOFFSET
-	movq_cfi  r9,  R9+16-ARGOFFSET
-	movq_cfi r10, R10+16-ARGOFFSET
-	movq_cfi r11, R11+16-ARGOFFSET
+	/*
+	 * start from rbp in pt_regs and jump over
+	 * return address.
+	 */
+	movq_cfi rdi, RDI+8-RBP
+	movq_cfi rsi, RSI+8-RBP
+	movq_cfi rdx, RDX+8-RBP
+	movq_cfi rcx, RCX+8-RBP
+	movq_cfi rax, RAX+8-RBP
+	movq_cfi  r8,  R8+8-RBP
+	movq_cfi  r9,  R9+8-RBP
+	movq_cfi r10, R10+8-RBP
+	movq_cfi r11, R11+8-RBP
 
-	leaq -ARGOFFSET+16(%rsp),%rdi	/* arg1 for handler */
+	leaq -RBP+8(%rsp),%rdi	/* arg1 for handler */
 	movq_cfi rbp, 8		/* push %rbp */
 	leaq 8(%rsp), %rbp		/* mov %rsp, %ebp */
 	testl $3, CS(%rdi)
@@ -782,8 +786,9 @@
 
 /* 0(%rsp): ~(interrupt number) */
 	.macro interrupt func
-	subq $ORIG_RAX-ARGOFFSET+8, %rsp
-	CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8
+	/* reserve pt_regs for scratch regs and rbp */
+	subq $ORIG_RAX-RBP, %rsp
+	CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
 	call save_args
 	PARTIAL_FRAME 0
 	call \func
@@ -808,9 +813,14 @@
 	TRACE_IRQS_OFF
 	decl PER_CPU_VAR(irq_count)
 	leaveq
+
 	CFI_RESTORE		rbp
 	CFI_DEF_CFA_REGISTER	rsp
 	CFI_ADJUST_CFA_OFFSET	-8
+
+	/* we did not save rbx, restore only from ARGOFFSET */
+	addq $8, %rsp
+	CFI_ADJUST_CFA_OFFSET	-8
 exit_intr:
 	GET_THREAD_INFO(%rcx)
 	testl $3,CS-ARGOFFSET(%rsp)
@@ -1319,6 +1329,9 @@
 #endif
 errorentry general_protection do_general_protection
 errorentry page_fault do_page_fault
+#ifdef CONFIG_KVM_GUEST
+errorentry async_page_fault do_async_page_fault
+#endif
 #ifdef CONFIG_X86_MCE
 paranoidzeroentry machine_check *machine_check_vector(%rip)
 #endif
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 9f54b20..767d6c4 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -85,6 +85,8 @@
  */
 __HEAD
 ENTRY(startup_32)
+	movl pa(stack_start),%ecx
+	
 	/* test KEEP_SEGMENTS flag to see if the bootloader is asking
 		us to not reload segments */
 	testb $(1<<6), BP_loadflags(%esi)
@@ -99,7 +101,9 @@
 	movl %eax,%es
 	movl %eax,%fs
 	movl %eax,%gs
+	movl %eax,%ss
 2:
+	leal -__PAGE_OFFSET(%ecx),%esp
 
 /*
  * Clear BSS first so that there are no surprises...
@@ -126,7 +130,7 @@
 	movsl
 	movl pa(boot_params) + NEW_CL_POINTER,%esi
 	andl %esi,%esi
-	jz 1f			# No comand line
+	jz 1f			# No command line
 	movl $pa(boot_command_line),%edi
 	movl $(COMMAND_LINE_SIZE/4),%ecx
 	rep
@@ -145,8 +149,6 @@
  * _brk_end is set up to point to the first "safe" location.
  * Mappings are created both at virtual address 0 (identity mapping)
  * and PAGE_OFFSET for up to _end.
- *
- * Note that the stack is not yet set up!
  */
 #ifdef CONFIG_X86_PAE
 
@@ -282,6 +284,9 @@
 	movl %eax,%es
 	movl %eax,%fs
 	movl %eax,%gs
+	movl pa(stack_start),%ecx
+	movl %eax,%ss
+	leal -__PAGE_OFFSET(%ecx),%esp
 #endif /* CONFIG_SMP */
 default_entry:
 
@@ -347,8 +352,8 @@
 	movl %eax,%cr0		/* ..and set paging (PG) bit */
 	ljmp $__BOOT_CS,$1f	/* Clear prefetch and normalize %eip */
 1:
-	/* Set up the stack pointer */
-	lss stack_start,%esp
+	/* Shift the stack pointer to a virtual address */
+	addl $__PAGE_OFFSET, %esp
 
 /*
  * Initialize eflags.  Some BIOS's leave bits like NT set.  This would
@@ -360,9 +365,7 @@
 
 #ifdef CONFIG_SMP
 	cmpb $0, ready
-	jz  1f				/* Initial CPU cleans BSS */
-	jmp checkCPUtype
-1:
+	jnz checkCPUtype
 #endif /* CONFIG_SMP */
 
 /*
@@ -470,14 +473,7 @@
 
 	cld			# gcc2 wants the direction flag cleared at all times
 	pushl $0		# fake return address for unwinder
-#ifdef CONFIG_SMP
-	movb ready, %cl
 	movb $1, ready
-	cmpb $0,%cl		# the first CPU calls start_kernel
-	je   1f
-	movl (stack_start), %esp
-1:
-#endif /* CONFIG_SMP */
 	jmp *(initial_code)
 
 /*
@@ -670,15 +666,15 @@
 #endif
 
 .data
+.balign 4
 ENTRY(stack_start)
 	.long init_thread_union+THREAD_SIZE
-	.long __BOOT_DS
-
-ready:	.byte 0
 
 early_recursion_flag:
 	.long 0
 
+ready:	.byte 0
+
 int_msg:
 	.asciz "Unknown interrupt or fault at: %p %p %p\n"
 
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 58bb239..e60c38c 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -169,6 +169,7 @@
 	set_stopped_child_used_math(tsk);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(init_fpu);
 
 /*
  * The xstateregs_active() routine is the same as the fpregs_active() routine,
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3a43caa..387b6a0 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -4,6 +4,7 @@
 #include <linux/cpu.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/of.h>
 #include <linux/seq_file.h>
 #include <linux/smp.h>
 #include <linux/ftrace.h>
@@ -275,6 +276,15 @@
 
 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
 
+#ifdef CONFIG_OF
+unsigned int irq_create_of_mapping(struct device_node *controller,
+		const u32 *intspec, unsigned int intsize)
+{
+	return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+#endif
+
 #ifdef CONFIG_HOTPLUG_CPU
 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
 void fixup_irqs(void)
@@ -357,7 +367,8 @@
 		if (irr  & (1 << (vector % 32))) {
 			irq = __this_cpu_read(vector_irq[vector]);
 
-			data = irq_get_irq_data(irq);
+			desc = irq_to_desc(irq);
+			data = &desc->irq_data;
 			raw_spin_lock(&desc->lock);
 			if (data->chip->irq_retrigger)
 				data->chip->irq_retrigger(data);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 48ff6dc..9974d21 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -129,8 +129,7 @@
 	irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
 					       THREAD_FLAGS,
 					       THREAD_ORDER));
-	irqctx->tinfo.task		= NULL;
-	irqctx->tinfo.exec_domain	= NULL;
+	memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
 	irqctx->tinfo.cpu		= cpu;
 	irqctx->tinfo.preempt_count	= HARDIRQ_OFFSET;
 	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0);
@@ -140,10 +139,8 @@
 	irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
 					       THREAD_FLAGS,
 					       THREAD_ORDER));
-	irqctx->tinfo.task		= NULL;
-	irqctx->tinfo.exec_domain	= NULL;
+	memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
 	irqctx->tinfo.cpu		= cpu;
-	irqctx->tinfo.preempt_count	= 0;
 	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0);
 
 	per_cpu(softirq_ctx, cpu) = irqctx;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index cd21b65..a413000 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -48,6 +48,7 @@
 #include <asm/apicdef.h>
 #include <asm/system.h>
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
 {
@@ -525,10 +526,6 @@
 		}
 		return NOTIFY_DONE;
 
-	case DIE_NMI_IPI:
-		/* Just ignore, we will handle the roundup on DIE_NMI. */
-		return NOTIFY_DONE;
-
 	case DIE_NMIUNKNOWN:
 		if (was_in_debug_nmi[raw_smp_processor_id()]) {
 			was_in_debug_nmi[raw_smp_processor_id()] = 0;
@@ -606,7 +603,7 @@
 	/*
 	 * Lowest-prio notifier priority, we want to be notified last:
 	 */
-	.priority	= -INT_MAX,
+	.priority	= NMI_LOCAL_LOW_PRIOR,
 };
 
 /**
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 63b0ec8..8dc4466 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -27,16 +27,37 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/hardirq.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/hash.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kprobes.h>
 #include <asm/timer.h>
+#include <asm/cpu.h>
+#include <asm/traps.h>
+#include <asm/desc.h>
+#include <asm/tlbflush.h>
 
 #define MMU_QUEUE_SIZE 1024
 
+static int kvmapf = 1;
+
+static int parse_no_kvmapf(char *arg)
+{
+        kvmapf = 0;
+        return 0;
+}
+
+early_param("no-kvmapf", parse_no_kvmapf);
+
 struct kvm_para_state {
 	u8 mmu_queue[MMU_QUEUE_SIZE];
 	int mmu_queue_len;
 };
 
 static DEFINE_PER_CPU(struct kvm_para_state, para_state);
+static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 
 static struct kvm_para_state *kvm_para_state(void)
 {
@@ -50,6 +71,195 @@
 {
 }
 
+#define KVM_TASK_SLEEP_HASHBITS 8
+#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
+
+struct kvm_task_sleep_node {
+	struct hlist_node link;
+	wait_queue_head_t wq;
+	u32 token;
+	int cpu;
+	bool halted;
+	struct mm_struct *mm;
+};
+
+static struct kvm_task_sleep_head {
+	spinlock_t lock;
+	struct hlist_head list;
+} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
+
+static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
+						  u32 token)
+{
+	struct hlist_node *p;
+
+	hlist_for_each(p, &b->list) {
+		struct kvm_task_sleep_node *n =
+			hlist_entry(p, typeof(*n), link);
+		if (n->token == token)
+			return n;
+	}
+
+	return NULL;
+}
+
+void kvm_async_pf_task_wait(u32 token)
+{
+	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+	struct kvm_task_sleep_node n, *e;
+	DEFINE_WAIT(wait);
+	int cpu, idle;
+
+	cpu = get_cpu();
+	idle = idle_cpu(cpu);
+	put_cpu();
+
+	spin_lock(&b->lock);
+	e = _find_apf_task(b, token);
+	if (e) {
+		/* dummy entry exist -> wake up was delivered ahead of PF */
+		hlist_del(&e->link);
+		kfree(e);
+		spin_unlock(&b->lock);
+		return;
+	}
+
+	n.token = token;
+	n.cpu = smp_processor_id();
+	n.mm = current->active_mm;
+	n.halted = idle || preempt_count() > 1;
+	atomic_inc(&n.mm->mm_count);
+	init_waitqueue_head(&n.wq);
+	hlist_add_head(&n.link, &b->list);
+	spin_unlock(&b->lock);
+
+	for (;;) {
+		if (!n.halted)
+			prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+		if (hlist_unhashed(&n.link))
+			break;
+
+		if (!n.halted) {
+			local_irq_enable();
+			schedule();
+			local_irq_disable();
+		} else {
+			/*
+			 * We cannot reschedule. So halt.
+			 */
+			native_safe_halt();
+			local_irq_disable();
+		}
+	}
+	if (!n.halted)
+		finish_wait(&n.wq, &wait);
+
+	return;
+}
+EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
+
+static void apf_task_wake_one(struct kvm_task_sleep_node *n)
+{
+	hlist_del_init(&n->link);
+	if (!n->mm)
+		return;
+	mmdrop(n->mm);
+	if (n->halted)
+		smp_send_reschedule(n->cpu);
+	else if (waitqueue_active(&n->wq))
+		wake_up(&n->wq);
+}
+
+static void apf_task_wake_all(void)
+{
+	int i;
+
+	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
+		struct hlist_node *p, *next;
+		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
+		spin_lock(&b->lock);
+		hlist_for_each_safe(p, next, &b->list) {
+			struct kvm_task_sleep_node *n =
+				hlist_entry(p, typeof(*n), link);
+			if (n->cpu == smp_processor_id())
+				apf_task_wake_one(n);
+		}
+		spin_unlock(&b->lock);
+	}
+}
+
+void kvm_async_pf_task_wake(u32 token)
+{
+	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+	struct kvm_task_sleep_node *n;
+
+	if (token == ~0) {
+		apf_task_wake_all();
+		return;
+	}
+
+again:
+	spin_lock(&b->lock);
+	n = _find_apf_task(b, token);
+	if (!n) {
+		/*
+		 * async PF was not yet handled.
+		 * Add dummy entry for the token.
+		 */
+		n = kmalloc(sizeof(*n), GFP_ATOMIC);
+		if (!n) {
+			/*
+			 * Allocation failed! Busy wait while other cpu
+			 * handles async PF.
+			 */
+			spin_unlock(&b->lock);
+			cpu_relax();
+			goto again;
+		}
+		n->token = token;
+		n->cpu = smp_processor_id();
+		n->mm = NULL;
+		init_waitqueue_head(&n->wq);
+		hlist_add_head(&n->link, &b->list);
+	} else
+		apf_task_wake_one(n);
+	spin_unlock(&b->lock);
+	return;
+}
+EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
+
+u32 kvm_read_and_reset_pf_reason(void)
+{
+	u32 reason = 0;
+
+	if (__get_cpu_var(apf_reason).enabled) {
+		reason = __get_cpu_var(apf_reason).reason;
+		__get_cpu_var(apf_reason).reason = 0;
+	}
+
+	return reason;
+}
+EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
+
+dotraplinkage void __kprobes
+do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
+{
+	switch (kvm_read_and_reset_pf_reason()) {
+	default:
+		do_page_fault(regs, error_code);
+		break;
+	case KVM_PV_REASON_PAGE_NOT_PRESENT:
+		/* page is swapped out by the host. */
+		kvm_async_pf_task_wait((u32)read_cr2());
+		break;
+	case KVM_PV_REASON_PAGE_READY:
+		kvm_async_pf_task_wake((u32)read_cr2());
+		break;
+	}
+}
+
 static void kvm_mmu_op(void *buffer, unsigned len)
 {
 	int r;
@@ -231,10 +441,117 @@
 #endif
 }
 
-void __init kvm_guest_init(void)
+void __cpuinit kvm_guest_cpu_init(void)
 {
 	if (!kvm_para_available())
 		return;
 
+	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
+		u64 pa = __pa(&__get_cpu_var(apf_reason));
+
+#ifdef CONFIG_PREEMPT
+		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
+#endif
+		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
+		__get_cpu_var(apf_reason).enabled = 1;
+		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
+		       smp_processor_id());
+	}
+}
+
+static void kvm_pv_disable_apf(void *unused)
+{
+	if (!__get_cpu_var(apf_reason).enabled)
+		return;
+
+	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
+	__get_cpu_var(apf_reason).enabled = 0;
+
+	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
+	       smp_processor_id());
+}
+
+static int kvm_pv_reboot_notify(struct notifier_block *nb,
+				unsigned long code, void *unused)
+{
+	if (code == SYS_RESTART)
+		on_each_cpu(kvm_pv_disable_apf, NULL, 1);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block kvm_pv_reboot_nb = {
+	.notifier_call = kvm_pv_reboot_notify,
+};
+
+#ifdef CONFIG_SMP
+static void __init kvm_smp_prepare_boot_cpu(void)
+{
+#ifdef CONFIG_KVM_CLOCK
+	WARN_ON(kvm_register_clock("primary cpu clock"));
+#endif
+	kvm_guest_cpu_init();
+	native_smp_prepare_boot_cpu();
+}
+
+static void kvm_guest_cpu_online(void *dummy)
+{
+	kvm_guest_cpu_init();
+}
+
+static void kvm_guest_cpu_offline(void *dummy)
+{
+	kvm_pv_disable_apf(NULL);
+	apf_task_wake_all();
+}
+
+static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
+				    unsigned long action, void *hcpu)
+{
+	int cpu = (unsigned long)hcpu;
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_DOWN_FAILED:
+	case CPU_ONLINE_FROZEN:
+		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
+        .notifier_call  = kvm_cpu_notify,
+};
+#endif
+
+static void __init kvm_apf_trap_init(void)
+{
+	set_intr_gate(14, &async_page_fault);
+}
+
+void __init kvm_guest_init(void)
+{
+	int i;
+
+	if (!kvm_para_available())
+		return;
+
 	paravirt_ops_setup();
+	register_reboot_notifier(&kvm_pv_reboot_nb);
+	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
+		spin_lock_init(&async_pf_sleepers[i].lock);
+	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
+		x86_init.irqs.trap_init = kvm_apf_trap_init;
+
+#ifdef CONFIG_SMP
+	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
+	register_cpu_notifier(&kvm_cpu_notifier);
+#else
+	kvm_guest_cpu_init();
+#endif
 }
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index ca43ce3..f98d3ea 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -125,7 +125,7 @@
 	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static int kvm_register_clock(char *txt)
+int kvm_register_clock(char *txt)
 {
 	int cpu = smp_processor_id();
 	int low, high, ret;
@@ -152,14 +152,6 @@
 }
 #endif
 
-#ifdef CONFIG_SMP
-static void __init kvm_smp_prepare_boot_cpu(void)
-{
-	WARN_ON(kvm_register_clock("primary cpu clock"));
-	native_smp_prepare_boot_cpu();
-}
-#endif
-
 /*
  * After the clock is registered, the host will keep writing to the
  * registered memory location. If the guest happens to shutdown, this memory
@@ -206,9 +198,6 @@
 	x86_cpuinit.setup_percpu_clockev =
 		kvm_setup_secondary_clock;
 #endif
-#ifdef CONFIG_SMP
-	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
-#endif
 	machine_ops.shutdown  = kvm_shutdown;
 #ifdef CONFIG_KEXEC
 	machine_ops.crash_shutdown  = kvm_crash_shutdown;
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 8f29560..ab23f1a 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -37,20 +37,11 @@
 
 void *module_alloc(unsigned long size)
 {
-	struct vm_struct *area;
-
-	if (!size)
+	if (PAGE_ALIGN(size) > MODULES_LEN)
 		return NULL;
-	size = PAGE_ALIGN(size);
-	if (size > MODULES_LEN)
-		return NULL;
-
-	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
-	if (!area)
-		return NULL;
-
-	return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
-					PAGE_KERNEL_EXEC);
+	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+				GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
+				-1, __builtin_return_address(0));
 }
 
 /* Free memory returned from module_alloc */
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c5b2500..869e1ae 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -421,8 +421,11 @@
 	.set_pte = native_set_pte,
 	.set_pte_at = native_set_pte_at,
 	.set_pmd = native_set_pmd,
+	.set_pmd_at = native_set_pmd_at,
 	.pte_update = paravirt_nop,
 	.pte_update_defer = paravirt_nop,
+	.pmd_update = paravirt_nop,
+	.pmd_update_defer = paravirt_nop,
 
 	.ptep_modify_prot_start = __ptep_modify_prot_start,
 	.ptep_modify_prot_commit = __ptep_modify_prot_commit,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 09c08a1..ff45541 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -14,6 +14,7 @@
 #include <linux/utsname.h>
 #include <trace/events/power.h>
 #include <linux/hw_breakpoint.h>
+#include <asm/cpu.h>
 #include <asm/system.h>
 #include <asm/apic.h>
 #include <asm/syscalls.h>
@@ -22,11 +23,6 @@
 #include <asm/i387.h>
 #include <asm/debugreg.h>
 
-unsigned long idle_halt;
-EXPORT_SYMBOL(idle_halt);
-unsigned long idle_nomwait;
-EXPORT_SYMBOL(idle_nomwait);
-
 struct kmem_cache *task_xstate_cachep;
 EXPORT_SYMBOL_GPL(task_xstate_cachep);
 
@@ -96,21 +92,31 @@
 
 void show_regs_common(void)
 {
-	const char *board, *product;
+	const char *vendor, *product, *board;
 
-	board = dmi_get_system_info(DMI_BOARD_NAME);
-	if (!board)
-		board = "";
+	vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+	if (!vendor)
+		vendor = "";
 	product = dmi_get_system_info(DMI_PRODUCT_NAME);
 	if (!product)
 		product = "";
 
+	/* Board Name is optional */
+	board = dmi_get_system_info(DMI_BOARD_NAME);
+
 	printk(KERN_CONT "\n");
-	printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
+	printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
 		current->pid, current->comm, print_tainted(),
 		init_utsname()->release,
 		(int)strcspn(init_utsname()->version, " "),
-		init_utsname()->version, board, product);
+		init_utsname()->version);
+	printk(KERN_CONT " ");
+	printk(KERN_CONT "%s %s", vendor, product);
+	if (board) {
+		printk(KERN_CONT "/");
+		printk(KERN_CONT "%s", board);
+	}
+	printk(KERN_CONT "\n");
 }
 
 void flush_thread(void)
@@ -327,7 +333,7 @@
 /*
  * Idle related variables and functions
  */
-unsigned long boot_option_idle_override = 0;
+unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
 EXPORT_SYMBOL(boot_option_idle_override);
 
 /*
@@ -386,6 +392,8 @@
 		else
 			local_irq_enable();
 		current_thread_info()->status |= TS_POLLING;
+		trace_power_end(smp_processor_id());
+		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 	} else {
 		local_irq_enable();
 		/* loop is done by the caller */
@@ -443,8 +451,6 @@
  */
 void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
 {
-	trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
-	trace_cpu_idle((ax>>4)+1, smp_processor_id());
 	if (!need_resched()) {
 		if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
 			clflush((void *)&current_thread_info()->flags);
@@ -471,6 +477,8 @@
 			__sti_mwait(0, 0);
 		else
 			local_irq_enable();
+		trace_power_end(smp_processor_id());
+		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 	} else
 		local_irq_enable();
 }
@@ -503,17 +511,16 @@
  *
  * idle=mwait overrides this decision and forces the usage of mwait.
  */
-static int __cpuinitdata force_mwait;
 
 #define MWAIT_INFO			0x05
 #define MWAIT_ECX_EXTENDED_INFO		0x01
 #define MWAIT_EDX_C1			0xf0
 
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+int mwait_usable(const struct cpuinfo_x86 *c)
 {
 	u32 eax, ebx, ecx, edx;
 
-	if (force_mwait)
+	if (boot_option_idle_override == IDLE_FORCE_MWAIT)
 		return 1;
 
 	if (c->cpuid_level < MWAIT_INFO)
@@ -633,9 +640,10 @@
 	if (!strcmp(str, "poll")) {
 		printk("using polling idle threads.\n");
 		pm_idle = poll_idle;
-	} else if (!strcmp(str, "mwait"))
-		force_mwait = 1;
-	else if (!strcmp(str, "halt")) {
+		boot_option_idle_override = IDLE_POLL;
+	} else if (!strcmp(str, "mwait")) {
+		boot_option_idle_override = IDLE_FORCE_MWAIT;
+	} else if (!strcmp(str, "halt")) {
 		/*
 		 * When the boot option of idle=halt is added, halt is
 		 * forced to be used for CPU idle. In such case CPU C2/C3
@@ -644,8 +652,7 @@
 		 * the boot_option_idle_override.
 		 */
 		pm_idle = default_idle;
-		idle_halt = 1;
-		return 0;
+		boot_option_idle_override = IDLE_HALT;
 	} else if (!strcmp(str, "nomwait")) {
 		/*
 		 * If the boot option of "idle=nomwait" is added,
@@ -653,12 +660,10 @@
 		 * states. In such case it won't touch the variable
 		 * of boot_option_idle_override.
 		 */
-		idle_nomwait = 1;
-		return 0;
+		boot_option_idle_override = IDLE_NOMWAIT;
 	} else
 		return -1;
 
-	boot_option_idle_override = 1;
 	return 0;
 }
 early_param("idle", idle_setup);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 4b9befa..8d12878 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -57,8 +57,6 @@
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
 
-#include <trace/events/power.h>
-
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
 /*
@@ -113,8 +111,6 @@
 			stop_critical_timings();
 			pm_idle();
 			start_critical_timings();
-			trace_power_end(smp_processor_id());
-			trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 		}
 		tick_nohz_restart_sched_tick();
 		preempt_enable_no_resched();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 4c818a7..bd387e8 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -51,8 +51,6 @@
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
 
-#include <trace/events/power.h>
-
 asmlinkage extern void ret_from_fork(void);
 
 DEFINE_PER_CPU(unsigned long, old_rsp);
@@ -141,10 +139,6 @@
 			pm_idle();
 			start_critical_timings();
 
-			trace_power_end(smp_processor_id());
-			trace_cpu_idle(PWR_EVENT_EXIT,
-				       smp_processor_id());
-
 			/* In many cases the interrupt that ended idle
 			   has already called exit_idle. But some idle
 			   loops can be woken up without interrupt. */
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index c495aa8d..715037c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -18,6 +18,7 @@
 #include <asm/pci_x86.h>
 #include <asm/virtext.h>
 #include <asm/cpu.h>
+#include <asm/nmi.h>
 
 #ifdef CONFIG_X86_32
 # include <linux/ctype.h>
@@ -284,6 +285,14 @@
 			DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
 		},
 	},
+	{	/* Handle problems with rebooting on VersaLogic Menlow boards */
+		.callback = set_bios_reboot,
+		.ident = "VersaLogic Menlow based board",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
+			DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
+		},
+	},
 	{ }
 };
 
@@ -747,7 +756,7 @@
 {
 	int cpu;
 
-	if (val != DIE_NMI_IPI)
+	if (val != DIE_NMI)
 		return NOTIFY_OK;
 
 	cpu = raw_smp_processor_id();
@@ -778,6 +787,8 @@
 
 static struct notifier_block crash_nmi_nb = {
 	.notifier_call = crash_nmi_callback,
+	/* we want to be the first one called */
+	.priority = NMI_LOCAL_HIGH_PRIOR+1,
 };
 
 /* Halt all other CPUs, calling the specified function on each of them
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 1cfbbfc..6f39cab 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -76,7 +76,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c7149c9..08776a9 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -97,12 +97,12 @@
  */
 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
 
-void cpu_hotplug_driver_lock()
+void cpu_hotplug_driver_lock(void)
 {
         mutex_lock(&x86_cpu_hotplug_driver_mutex);
 }
 
-void cpu_hotplug_driver_unlock()
+void cpu_hotplug_driver_unlock(void)
 {
         mutex_unlock(&x86_cpu_hotplug_driver_mutex);
 }
@@ -638,7 +638,7 @@
 	 * target processor state.
 	 */
 	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
-			 (unsigned long)stack_start.sp);
+			 stack_start);
 
 	/*
 	 * Run STARTUP IPI loop.
@@ -785,7 +785,7 @@
 #endif
 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
 	initial_code = (unsigned long)start_secondary;
-	stack_start.sp = (void *) c_idle.idle->thread.sp;
+	stack_start  = c_idle.idle->thread.sp;
 
 	/* start_ip had better be page-aligned! */
 	start_ip = setup_trampoline();
@@ -1060,7 +1060,7 @@
 
 		connect_bsp_APIC();
 		setup_local_APIC();
-		end_local_APIC_setup();
+		bsp_end_local_APIC_setup();
 		return -1;
 	}
 
@@ -1137,7 +1137,7 @@
 	if (!skip_ioapic_setup && nr_ioapics)
 		enable_IO_APIC();
 
-	end_local_APIC_setup();
+	bsp_end_local_APIC_setup();
 
 	map_cpu_to_logical_apicid();
 
@@ -1402,8 +1402,9 @@
 	unsigned int highest_subcstate = 0;
 	int i;
 	void *mwait_ptr;
+	struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
 
-	if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
+	if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)))
 		return;
 	if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
 		return;
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index c2f1b26..998e972 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -133,7 +133,7 @@
 	pmd = pmd_alloc(&tboot_mm, pud, vaddr);
 	if (!pmd)
 		return -1;
-	pte = pte_alloc_map(&tboot_mm, pmd, vaddr);
+	pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr);
 	if (!pte)
 		return -1;
 	set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c76aaca..b9b6716 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -84,6 +84,11 @@
 static int ignore_nmis;
 
 int unknown_nmi_panic;
+/*
+ * Prevent NMI reason port (0x61) being accessed simultaneously, can
+ * only be used in NMI handler.
+ */
+static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
 
 static inline void conditional_sti(struct pt_regs *regs)
 {
@@ -310,15 +315,15 @@
 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
 
 static notrace __kprobes void
-mem_parity_error(unsigned char reason, struct pt_regs *regs)
+pci_serr_error(unsigned char reason, struct pt_regs *regs)
 {
-	printk(KERN_EMERG
-		"Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-			reason, smp_processor_id());
+	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
+		 reason, smp_processor_id());
 
-	printk(KERN_EMERG
-		"You have some hardware problem, likely on the PCI bus.\n");
-
+	/*
+	 * On some machines, PCI SERR line is used to report memory
+	 * errors. EDAC makes use of it.
+	 */
 #if defined(CONFIG_EDAC)
 	if (edac_handler_set()) {
 		edac_atomic_assert_error();
@@ -329,11 +334,11 @@
 	if (panic_on_unrecovered_nmi)
 		panic("NMI: Not continuing");
 
-	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+	pr_emerg("Dazed and confused, but trying to continue\n");
 
-	/* Clear and disable the memory parity error line. */
-	reason = (reason & 0xf) | 4;
-	outb(reason, 0x61);
+	/* Clear and disable the PCI SERR error line. */
+	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
+	outb(reason, NMI_REASON_PORT);
 }
 
 static notrace __kprobes void
@@ -341,15 +346,17 @@
 {
 	unsigned long i;
 
-	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+	pr_emerg(
+	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
+		 reason, smp_processor_id());
 	show_registers(regs);
 
 	if (panic_on_io_nmi)
 		panic("NMI IOCK error: Not continuing");
 
 	/* Re-enable the IOCK line, wait for a few seconds */
-	reason = (reason & 0xf) | 8;
-	outb(reason, 0x61);
+	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
+	outb(reason, NMI_REASON_PORT);
 
 	i = 20000;
 	while (--i) {
@@ -357,8 +364,8 @@
 		udelay(100);
 	}
 
-	reason &= ~8;
-	outb(reason, 0x61);
+	reason &= ~NMI_REASON_CLEAR_IOCHK;
+	outb(reason, NMI_REASON_PORT);
 }
 
 static notrace __kprobes void
@@ -377,57 +384,50 @@
 		return;
 	}
 #endif
-	printk(KERN_EMERG
-		"Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-			reason, smp_processor_id());
+	pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+		 reason, smp_processor_id());
 
-	printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
+	pr_emerg("Do you have a strange power saving mode enabled?\n");
 	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
 		panic("NMI: Not continuing");
 
-	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+	pr_emerg("Dazed and confused, but trying to continue\n");
 }
 
 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
 {
 	unsigned char reason = 0;
-	int cpu;
 
-	cpu = smp_processor_id();
+	/*
+	 * CPU-specific NMI must be processed before non-CPU-specific
+	 * NMI, otherwise we may lose it, because the CPU-specific
+	 * NMI can not be detected/processed on other CPUs.
+	 */
+	if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
+		return;
 
-	/* Only the BSP gets external NMIs from the system. */
-	if (!cpu)
-		reason = get_nmi_reason();
+	/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
+	raw_spin_lock(&nmi_reason_lock);
+	reason = get_nmi_reason();
 
-	if (!(reason & 0xc0)) {
-		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
-								== NOTIFY_STOP)
-			return;
-
-#ifdef CONFIG_X86_LOCAL_APIC
-		if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
-							== NOTIFY_STOP)
-			return;
+	if (reason & NMI_REASON_MASK) {
+		if (reason & NMI_REASON_SERR)
+			pci_serr_error(reason, regs);
+		else if (reason & NMI_REASON_IOCHK)
+			io_check_error(reason, regs);
+#ifdef CONFIG_X86_32
+		/*
+		 * Reassert NMI in case it became active
+		 * meanwhile as it's edge-triggered:
+		 */
+		reassert_nmi();
 #endif
-		unknown_nmi_error(reason, regs);
-
+		raw_spin_unlock(&nmi_reason_lock);
 		return;
 	}
-	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
-		return;
+	raw_spin_unlock(&nmi_reason_lock);
 
-	/* AK: following checks seem to be broken on modern chipsets. FIXME */
-	if (reason & 0x80)
-		mem_parity_error(reason, regs);
-	if (reason & 0x40)
-		io_check_error(reason, regs);
-#ifdef CONFIG_X86_32
-	/*
-	 * Reassert NMI in case it became active meanwhile
-	 * as it's edge-triggered:
-	 */
-	reassert_nmi();
-#endif
+	unknown_nmi_error(reason, regs);
 }
 
 dotraplinkage notrace __kprobes void
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 03d2ea8..ffe5755 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -464,7 +464,7 @@
 		tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
 
 		/* hpet or pmtimer available ? */
-		if (!hpet && !ref1 && !ref2)
+		if (ref1 == ref2)
 			continue;
 
 		/* Check, whether the sampling was disturbed by an SMI */
@@ -935,7 +935,7 @@
 	tsc_stop = tsc_read_refs(&ref_stop, hpet);
 
 	/* hpet or pmtimer available ? */
-	if (!hpet && !ref_start && !ref_stop)
+	if (ref_start == ref_stop)
 		goto out;
 
 	/* Check, whether the sampling was disturbed by an SMI */
@@ -965,7 +965,7 @@
 
 static int __init init_tsc_clocksource(void)
 {
-	if (!cpu_has_tsc || tsc_disabled > 0)
+	if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
 		return 0;
 
 	if (tsc_clocksource_reliable)
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 61fb985..863f875 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -179,6 +179,7 @@
 	if (pud_none_or_clear_bad(pud))
 		goto out;
 	pmd = pmd_offset(pud, 0xA0000);
+	split_huge_page_pmd(mm, pmd);
 	if (pmd_none_or_clear_bad(pmd))
 		goto out;
 	pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ddc131f..50f6364 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -28,6 +28,7 @@
 	select HAVE_KVM_IRQCHIP
 	select HAVE_KVM_EVENTFD
 	select KVM_APIC_ARCHITECTURE
+	select KVM_ASYNC_PF
 	select USER_RETURN_NOTIFIER
 	select KVM_MMIO
 	---help---
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 31a7035..f15501f43 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,5 +1,5 @@
 
-EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
+ccflags-y += -Ivirt/kvm -Iarch/x86/kvm
 
 CFLAGS_x86.o := -I.
 CFLAGS_svm.o := -I.
@@ -9,6 +9,7 @@
 				coalesced_mmio.o irq_comm.o eventfd.o \
 				assigned-dev.o)
 kvm-$(CONFIG_IOMMU_API)	+= $(addprefix ../../../virt/kvm/, iommu.o)
+kvm-$(CONFIG_KVM_ASYNC_PF)	+= $(addprefix ../../../virt/kvm/, async_pf.o)
 
 kvm-y			+= x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
 			   i8254.o timer.o
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 38b6e8d..caf9667 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -20,16 +20,8 @@
  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  */
 
-#ifndef __KERNEL__
-#include <stdio.h>
-#include <stdint.h>
-#include <public/xen.h>
-#define DPRINTF(_f, _a ...) printf(_f , ## _a)
-#else
 #include <linux/kvm_host.h>
 #include "kvm_cache_regs.h"
-#define DPRINTF(x...) do {} while (0)
-#endif
 #include <linux/module.h>
 #include <asm/kvm_emulate.h>
 
@@ -418,9 +410,9 @@
 }
 
 static inline unsigned long
-register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
+register_address(struct decode_cache *c, unsigned long reg)
 {
-	return base + address_mask(c, reg);
+	return address_mask(c, reg);
 }
 
 static inline void
@@ -452,60 +444,55 @@
 	return ops->get_cached_segment_base(seg, ctxt->vcpu);
 }
 
-static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
-				       struct x86_emulate_ops *ops,
-				       struct decode_cache *c)
+static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
+			     struct x86_emulate_ops *ops,
+			     struct decode_cache *c)
 {
 	if (!c->has_seg_override)
 		return 0;
 
-	return seg_base(ctxt, ops, c->seg_override);
+	return c->seg_override;
 }
 
-static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
-			     struct x86_emulate_ops *ops)
+static ulong linear(struct x86_emulate_ctxt *ctxt,
+		    struct segmented_address addr)
 {
-	return seg_base(ctxt, ops, VCPU_SREG_ES);
+	struct decode_cache *c = &ctxt->decode;
+	ulong la;
+
+	la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
+	if (c->ad_bytes != 8)
+		la &= (u32)-1;
+	return la;
 }
 
-static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
-			     struct x86_emulate_ops *ops)
+static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
+			     u32 error, bool valid)
 {
-	return seg_base(ctxt, ops, VCPU_SREG_SS);
+	ctxt->exception.vector = vec;
+	ctxt->exception.error_code = error;
+	ctxt->exception.error_code_valid = valid;
+	return X86EMUL_PROPAGATE_FAULT;
 }
 
-static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
-				      u32 error, bool valid)
+static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 {
-	ctxt->exception = vec;
-	ctxt->error_code = error;
-	ctxt->error_code_valid = valid;
+	return emulate_exception(ctxt, GP_VECTOR, err, true);
 }
 
-static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
+static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 {
-	emulate_exception(ctxt, GP_VECTOR, err, true);
+	return emulate_exception(ctxt, UD_VECTOR, 0, false);
 }
 
-static void emulate_pf(struct x86_emulate_ctxt *ctxt)
+static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
 {
-	emulate_exception(ctxt, PF_VECTOR, 0, true);
-}
-
-static void emulate_ud(struct x86_emulate_ctxt *ctxt)
-{
-	emulate_exception(ctxt, UD_VECTOR, 0, false);
-}
-
-static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
-{
-	emulate_exception(ctxt, TS_VECTOR, err, true);
+	return emulate_exception(ctxt, TS_VECTOR, err, true);
 }
 
 static int emulate_de(struct x86_emulate_ctxt *ctxt)
 {
-	emulate_exception(ctxt, DE_VECTOR, 0, false);
-	return X86EMUL_PROPAGATE_FAULT;
+	return emulate_exception(ctxt, DE_VECTOR, 0, false);
 }
 
 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
@@ -520,7 +507,7 @@
 		cur_size = fc->end - fc->start;
 		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
 		rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
-				size, ctxt->vcpu, NULL);
+				size, ctxt->vcpu, &ctxt->exception);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
 		fc->end += size;
@@ -564,7 +551,7 @@
 
 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
 			   struct x86_emulate_ops *ops,
-			   ulong addr,
+			   struct segmented_address addr,
 			   u16 *size, unsigned long *address, int op_bytes)
 {
 	int rc;
@@ -572,10 +559,13 @@
 	if (op_bytes == 2)
 		op_bytes = 3;
 	*address = 0;
-	rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
+	rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
+			   ctxt->vcpu, &ctxt->exception);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
-	rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
+	addr.ea += 2;
+	rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
+			   ctxt->vcpu, &ctxt->exception);
 	return rc;
 }
 
@@ -768,7 +758,7 @@
 			break;
 		}
 	}
-	op->addr.mem = modrm_ea;
+	op->addr.mem.ea = modrm_ea;
 done:
 	return rc;
 }
@@ -783,13 +773,13 @@
 	op->type = OP_MEM;
 	switch (c->ad_bytes) {
 	case 2:
-		op->addr.mem = insn_fetch(u16, 2, c->eip);
+		op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
 		break;
 	case 4:
-		op->addr.mem = insn_fetch(u32, 4, c->eip);
+		op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
 		break;
 	case 8:
-		op->addr.mem = insn_fetch(u64, 8, c->eip);
+		op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
 		break;
 	}
 done:
@@ -808,7 +798,7 @@
 		else if (c->src.bytes == 4)
 			sv = (s32)c->src.val & (s32)mask;
 
-		c->dst.addr.mem += (sv >> 3);
+		c->dst.addr.mem.ea += (sv >> 3);
 	}
 
 	/* only subword offset */
@@ -821,7 +811,6 @@
 {
 	int rc;
 	struct read_cache *mc = &ctxt->decode.mem_read;
-	u32 err;
 
 	while (size) {
 		int n = min(size, 8u);
@@ -829,10 +818,8 @@
 		if (mc->pos < mc->end)
 			goto read_cached;
 
-		rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
-					ctxt->vcpu);
-		if (rc == X86EMUL_PROPAGATE_FAULT)
-			emulate_pf(ctxt);
+		rc = ops->read_emulated(addr, mc->data + mc->end, n,
+					&ctxt->exception, ctxt->vcpu);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
 		mc->end += n;
@@ -907,19 +894,15 @@
 	struct desc_ptr dt;
 	u16 index = selector >> 3;
 	int ret;
-	u32 err;
 	ulong addr;
 
 	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
 
-	if (dt.size < index * 8 + 7) {
-		emulate_gp(ctxt, selector & 0xfffc);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (dt.size < index * 8 + 7)
+		return emulate_gp(ctxt, selector & 0xfffc);
 	addr = dt.address + index * 8;
-	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,  &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT)
-		emulate_pf(ctxt);
+	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
+			    &ctxt->exception);
 
        return ret;
 }
@@ -931,21 +914,17 @@
 {
 	struct desc_ptr dt;
 	u16 index = selector >> 3;
-	u32 err;
 	ulong addr;
 	int ret;
 
 	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
 
-	if (dt.size < index * 8 + 7) {
-		emulate_gp(ctxt, selector & 0xfffc);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (dt.size < index * 8 + 7)
+		return emulate_gp(ctxt, selector & 0xfffc);
 
 	addr = dt.address + index * 8;
-	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT)
-		emulate_pf(ctxt);
+	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
+			     &ctxt->exception);
 
 	return ret;
 }
@@ -1092,7 +1071,6 @@
 {
 	int rc;
 	struct decode_cache *c = &ctxt->decode;
-	u32 err;
 
 	switch (c->dst.type) {
 	case OP_REG:
@@ -1101,21 +1079,19 @@
 	case OP_MEM:
 		if (c->lock_prefix)
 			rc = ops->cmpxchg_emulated(
-					c->dst.addr.mem,
+					linear(ctxt, c->dst.addr.mem),
 					&c->dst.orig_val,
 					&c->dst.val,
 					c->dst.bytes,
-					&err,
+					&ctxt->exception,
 					ctxt->vcpu);
 		else
 			rc = ops->write_emulated(
-					c->dst.addr.mem,
+					linear(ctxt, c->dst.addr.mem),
 					&c->dst.val,
 					c->dst.bytes,
-					&err,
+					&ctxt->exception,
 					ctxt->vcpu);
-		if (rc == X86EMUL_PROPAGATE_FAULT)
-			emulate_pf(ctxt);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
 		break;
@@ -1137,8 +1113,8 @@
 	c->dst.bytes = c->op_bytes;
 	c->dst.val = c->src.val;
 	register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
-	c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
-					   c->regs[VCPU_REGS_RSP]);
+	c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+	c->dst.addr.mem.seg = VCPU_SREG_SS;
 }
 
 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
@@ -1147,10 +1123,11 @@
 {
 	struct decode_cache *c = &ctxt->decode;
 	int rc;
+	struct segmented_address addr;
 
-	rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
-						       c->regs[VCPU_REGS_RSP]),
-			   dest, len);
+	addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+	addr.seg = VCPU_SREG_SS;
+	rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
@@ -1184,10 +1161,8 @@
 			change_mask |= EFLG_IF;
 		break;
 	case X86EMUL_MODE_VM86:
-		if (iopl < 3) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if (iopl < 3)
+			return emulate_gp(ctxt, 0);
 		change_mask |= EFLG_IF;
 		break;
 	default: /* real mode */
@@ -1198,9 +1173,6 @@
 	*(unsigned long *)dest =
 		(ctxt->eflags & ~change_mask) | (val & change_mask);
 
-	if (rc == X86EMUL_PROPAGATE_FAULT)
-		emulate_pf(ctxt);
-
 	return rc;
 }
 
@@ -1287,7 +1259,6 @@
 	gva_t cs_addr;
 	gva_t eip_addr;
 	u16 cs, eip;
-	u32 err;
 
 	/* TODO: Add limit checks */
 	c->src.val = ctxt->eflags;
@@ -1317,11 +1288,11 @@
 	eip_addr = dt.address + (irq << 2);
 	cs_addr = dt.address + (irq << 2) + 2;
 
-	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
+	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
+	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
@@ -1370,10 +1341,8 @@
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	if (temp_eip & ~0xffff) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (temp_eip & ~0xffff)
+		return emulate_gp(ctxt, 0);
 
 	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
 
@@ -1624,10 +1593,8 @@
 
 	/* syscall is not available in real mode */
 	if (ctxt->mode == X86EMUL_MODE_REAL ||
-	    ctxt->mode == X86EMUL_MODE_VM86) {
-		emulate_ud(ctxt);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	    ctxt->mode == X86EMUL_MODE_VM86)
+		return emulate_ud(ctxt);
 
 	setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
@@ -1678,34 +1645,26 @@
 	u16 cs_sel, ss_sel;
 
 	/* inject #GP if in real mode */
-	if (ctxt->mode == X86EMUL_MODE_REAL) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (ctxt->mode == X86EMUL_MODE_REAL)
+		return emulate_gp(ctxt, 0);
 
 	/* XXX sysenter/sysexit have not been tested in 64bit mode.
 	* Therefore, we inject an #UD.
 	*/
-	if (ctxt->mode == X86EMUL_MODE_PROT64) {
-		emulate_ud(ctxt);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (ctxt->mode == X86EMUL_MODE_PROT64)
+		return emulate_ud(ctxt);
 
 	setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
 	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
 	switch (ctxt->mode) {
 	case X86EMUL_MODE_PROT32:
-		if ((msr_data & 0xfffc) == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if ((msr_data & 0xfffc) == 0x0)
+			return emulate_gp(ctxt, 0);
 		break;
 	case X86EMUL_MODE_PROT64:
-		if (msr_data == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if (msr_data == 0x0)
+			return emulate_gp(ctxt, 0);
 		break;
 	}
 
@@ -1745,10 +1704,8 @@
 
 	/* inject #GP if in real mode or Virtual 8086 mode */
 	if (ctxt->mode == X86EMUL_MODE_REAL ||
-	    ctxt->mode == X86EMUL_MODE_VM86) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	    ctxt->mode == X86EMUL_MODE_VM86)
+		return emulate_gp(ctxt, 0);
 
 	setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
@@ -1763,18 +1720,14 @@
 	switch (usermode) {
 	case X86EMUL_MODE_PROT32:
 		cs_sel = (u16)(msr_data + 16);
-		if ((msr_data & 0xfffc) == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if ((msr_data & 0xfffc) == 0x0)
+			return emulate_gp(ctxt, 0);
 		ss_sel = (u16)(msr_data + 24);
 		break;
 	case X86EMUL_MODE_PROT64:
 		cs_sel = (u16)(msr_data + 32);
-		if (msr_data == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if (msr_data == 0x0)
+			return emulate_gp(ctxt, 0);
 		ss_sel = cs_sel + 8;
 		cs.d = 0;
 		cs.l = 1;
@@ -1934,33 +1887,27 @@
 {
 	struct tss_segment_16 tss_seg;
 	int ret;
-	u32 err, new_tss_base = get_desc_base(new_desc);
+	u32 new_tss_base = get_desc_base(new_desc);
 
 	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	save_state_to_tss16(ctxt, ops, &tss_seg);
 
 	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			     &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			     &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	if (old_tss_sel != 0xffff) {
 		tss_seg.prev_task_link = old_tss_sel;
@@ -1968,12 +1915,10 @@
 		ret = ops->write_std(new_tss_base,
 				     &tss_seg.prev_task_link,
 				     sizeof tss_seg.prev_task_link,
-				     ctxt->vcpu, &err);
-		if (ret == X86EMUL_PROPAGATE_FAULT) {
+				     ctxt->vcpu, &ctxt->exception);
+		if (ret != X86EMUL_CONTINUE)
 			/* FIXME: need to provide precise fault address */
-			emulate_pf(ctxt);
 			return ret;
-		}
 	}
 
 	return load_state_from_tss16(ctxt, ops, &tss_seg);
@@ -2013,10 +1958,8 @@
 	struct decode_cache *c = &ctxt->decode;
 	int ret;
 
-	if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
+		return emulate_gp(ctxt, 0);
 	c->eip = tss->eip;
 	ctxt->eflags = tss->eflags | 2;
 	c->regs[VCPU_REGS_RAX] = tss->eax;
@@ -2076,33 +2019,27 @@
 {
 	struct tss_segment_32 tss_seg;
 	int ret;
-	u32 err, new_tss_base = get_desc_base(new_desc);
+	u32 new_tss_base = get_desc_base(new_desc);
 
 	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	save_state_to_tss32(ctxt, ops, &tss_seg);
 
 	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			     &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			     &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	if (old_tss_sel != 0xffff) {
 		tss_seg.prev_task_link = old_tss_sel;
@@ -2110,12 +2047,10 @@
 		ret = ops->write_std(new_tss_base,
 				     &tss_seg.prev_task_link,
 				     sizeof tss_seg.prev_task_link,
-				     ctxt->vcpu, &err);
-		if (ret == X86EMUL_PROPAGATE_FAULT) {
+				     ctxt->vcpu, &ctxt->exception);
+		if (ret != X86EMUL_CONTINUE)
 			/* FIXME: need to provide precise fault address */
-			emulate_pf(ctxt);
 			return ret;
-		}
 	}
 
 	return load_state_from_tss32(ctxt, ops, &tss_seg);
@@ -2146,10 +2081,8 @@
 
 	if (reason != TASK_SWITCH_IRET) {
 		if ((tss_selector & 3) > next_tss_desc.dpl ||
-		    ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		    ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
+			return emulate_gp(ctxt, 0);
 	}
 
 	desc_limit = desc_limit_scaled(&next_tss_desc);
@@ -2231,14 +2164,15 @@
 	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
 }
 
-static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
+static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
 			    int reg, struct operand *op)
 {
 	struct decode_cache *c = &ctxt->decode;
 	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
 
 	register_address_increment(c, &c->regs[reg], df * op->bytes);
-	op->addr.mem = register_address(c,  base, c->regs[reg]);
+	op->addr.mem.ea = register_address(c, c->regs[reg]);
+	op->addr.mem.seg = seg;
 }
 
 static int em_push(struct x86_emulate_ctxt *ctxt)
@@ -2369,10 +2303,8 @@
 	struct decode_cache *c = &ctxt->decode;
 	u64 tsc = 0;
 
-	if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
+		return emulate_gp(ctxt, 0);
 	ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
 	c->regs[VCPU_REGS_RAX] = (u32)tsc;
 	c->regs[VCPU_REGS_RDX] = tsc >> 32;
@@ -2647,7 +2579,7 @@
 
 	op->type = OP_IMM;
 	op->bytes = size;
-	op->addr.mem = c->eip;
+	op->addr.mem.ea = c->eip;
 	/* NB. Immediates are sign-extended as necessary. */
 	switch (op->bytes) {
 	case 1:
@@ -2678,7 +2610,7 @@
 }
 
 int
-x86_decode_insn(struct x86_emulate_ctxt *ctxt)
+x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 {
 	struct x86_emulate_ops *ops = ctxt->ops;
 	struct decode_cache *c = &ctxt->decode;
@@ -2689,7 +2621,10 @@
 	struct operand memop = { .type = OP_NONE };
 
 	c->eip = ctxt->eip;
-	c->fetch.start = c->fetch.end = c->eip;
+	c->fetch.start = c->eip;
+	c->fetch.end = c->fetch.start + insn_len;
+	if (insn_len > 0)
+		memcpy(c->fetch.data, insn, insn_len);
 	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
 
 	switch (mode) {
@@ -2803,10 +2738,8 @@
 	c->execute = opcode.u.execute;
 
 	/* Unrecognised? */
-	if (c->d == 0 || (c->d & Undefined)) {
-		DPRINTF("Cannot emulate %02x\n", c->b);
+	if (c->d == 0 || (c->d & Undefined))
 		return -1;
-	}
 
 	if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
 		c->op_bytes = 8;
@@ -2831,14 +2764,13 @@
 	if (!c->has_seg_override)
 		set_seg_override(c, VCPU_SREG_DS);
 
-	if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
-		memop.addr.mem += seg_override_base(ctxt, ops, c);
+	memop.addr.mem.seg = seg_override(ctxt, ops, c);
 
 	if (memop.type == OP_MEM && c->ad_bytes != 8)
-		memop.addr.mem = (u32)memop.addr.mem;
+		memop.addr.mem.ea = (u32)memop.addr.mem.ea;
 
 	if (memop.type == OP_MEM && c->rip_relative)
-		memop.addr.mem += c->eip;
+		memop.addr.mem.ea += c->eip;
 
 	/*
 	 * Decode and fetch the source operand: register, memory
@@ -2890,14 +2822,14 @@
 	case SrcSI:
 		c->src.type = OP_MEM;
 		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-		c->src.addr.mem =
-			register_address(c,  seg_override_base(ctxt, ops, c),
-					 c->regs[VCPU_REGS_RSI]);
+		c->src.addr.mem.ea =
+			register_address(c, c->regs[VCPU_REGS_RSI]);
+		c->src.addr.mem.seg = seg_override(ctxt, ops, c),
 		c->src.val = 0;
 		break;
 	case SrcImmFAddr:
 		c->src.type = OP_IMM;
-		c->src.addr.mem = c->eip;
+		c->src.addr.mem.ea = c->eip;
 		c->src.bytes = c->op_bytes + 2;
 		insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
 		break;
@@ -2944,7 +2876,7 @@
 		break;
 	case DstImmUByte:
 		c->dst.type = OP_IMM;
-		c->dst.addr.mem = c->eip;
+		c->dst.addr.mem.ea = c->eip;
 		c->dst.bytes = 1;
 		c->dst.val = insn_fetch(u8, 1, c->eip);
 		break;
@@ -2969,9 +2901,9 @@
 	case DstDI:
 		c->dst.type = OP_MEM;
 		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-		c->dst.addr.mem =
-			register_address(c, es_base(ctxt, ops),
-					 c->regs[VCPU_REGS_RDI]);
+		c->dst.addr.mem.ea =
+			register_address(c, c->regs[VCPU_REGS_RDI]);
+		c->dst.addr.mem.seg = VCPU_SREG_ES;
 		c->dst.val = 0;
 		break;
 	case ImplicitOps:
@@ -3020,24 +2952,24 @@
 	ctxt->decode.mem_read.pos = 0;
 
 	if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
-		emulate_ud(ctxt);
+		rc = emulate_ud(ctxt);
 		goto done;
 	}
 
 	/* LOCK prefix is allowed only with some instructions */
 	if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
-		emulate_ud(ctxt);
+		rc = emulate_ud(ctxt);
 		goto done;
 	}
 
 	if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
-		emulate_ud(ctxt);
+		rc = emulate_ud(ctxt);
 		goto done;
 	}
 
 	/* Privileged instruction can be executed only in CPL=0 */
 	if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
-		emulate_gp(ctxt, 0);
+		rc = emulate_gp(ctxt, 0);
 		goto done;
 	}
 
@@ -3050,7 +2982,7 @@
 	}
 
 	if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
-		rc = read_emulated(ctxt, ops, c->src.addr.mem,
+		rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
 					c->src.valptr, c->src.bytes);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
@@ -3058,7 +2990,7 @@
 	}
 
 	if (c->src2.type == OP_MEM) {
-		rc = read_emulated(ctxt, ops, c->src2.addr.mem,
+		rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
 					&c->src2.val, c->src2.bytes);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
@@ -3070,7 +3002,7 @@
 
 	if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
 		/* optimisation - avoid slow emulated read if Mov */
-		rc = read_emulated(ctxt, ops, c->dst.addr.mem,
+		rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
 				   &c->dst.val, c->dst.bytes);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
@@ -3215,13 +3147,13 @@
 		break;
 	case 0x8c:  /* mov r/m, sreg */
 		if (c->modrm_reg > VCPU_SREG_GS) {
-			emulate_ud(ctxt);
+			rc = emulate_ud(ctxt);
 			goto done;
 		}
 		c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
 		break;
 	case 0x8d: /* lea r16/r32, m */
-		c->dst.val = c->src.addr.mem;
+		c->dst.val = c->src.addr.mem.ea;
 		break;
 	case 0x8e: { /* mov seg, r/m16 */
 		uint16_t sel;
@@ -3230,7 +3162,7 @@
 
 		if (c->modrm_reg == VCPU_SREG_CS ||
 		    c->modrm_reg > VCPU_SREG_GS) {
-			emulate_ud(ctxt);
+			rc = emulate_ud(ctxt);
 			goto done;
 		}
 
@@ -3268,7 +3200,6 @@
 		break;
 	case 0xa6 ... 0xa7:	/* cmps */
 		c->dst.type = OP_NONE; /* Disable writeback. */
-		DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
 		goto cmp;
 	case 0xa8 ... 0xa9:	/* test ax, imm */
 		goto test;
@@ -3363,7 +3294,7 @@
 	do_io_in:
 		c->dst.bytes = min(c->dst.bytes, 4u);
 		if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		}
 		if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
@@ -3377,7 +3308,7 @@
 		c->src.bytes = min(c->src.bytes, 4u);
 		if (!emulator_io_permited(ctxt, ops, c->dst.val,
 					  c->src.bytes)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		}
 		ops->pio_out_emulated(c->src.bytes, c->dst.val,
@@ -3402,14 +3333,14 @@
 		break;
 	case 0xfa: /* cli */
 		if (emulator_bad_iopl(ctxt, ops)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		} else
 			ctxt->eflags &= ~X86_EFLAGS_IF;
 		break;
 	case 0xfb: /* sti */
 		if (emulator_bad_iopl(ctxt, ops)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		} else {
 			ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
@@ -3449,11 +3380,11 @@
 	c->dst.type = saved_dst_type;
 
 	if ((c->d & SrcMask) == SrcSI)
-		string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
+		string_addr_inc(ctxt, seg_override(ctxt, ops, c),
 				VCPU_REGS_RSI, &c->src);
 
 	if ((c->d & DstMask) == DstDI)
-		string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
+		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
 				&c->dst);
 
 	if (c->rep_prefix && (c->d & String)) {
@@ -3482,6 +3413,8 @@
 	ctxt->eip = c->eip;
 
 done:
+	if (rc == X86EMUL_PROPAGATE_FAULT)
+		ctxt->have_exception = true;
 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
 
 twobyte_insn:
@@ -3544,9 +3477,11 @@
 			break;
 		case 5: /* not defined */
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		case 7: /* invlpg*/
-			emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
+			emulate_invlpg(ctxt->vcpu,
+				       linear(ctxt, c->src.addr.mem));
 			/* Disable writeback. */
 			c->dst.type = OP_NONE;
 			break;
@@ -3573,6 +3508,7 @@
 		case 5 ... 7:
 		case 9 ... 15:
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
@@ -3581,6 +3517,7 @@
 		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
 		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
@@ -3588,6 +3525,7 @@
 	case 0x22: /* mov reg, cr */
 		if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		c->dst.type = OP_NONE;
@@ -3596,6 +3534,7 @@
 		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
 		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 
@@ -3604,6 +3543,7 @@
 				 ~0ULL : ~0U), ctxt->vcpu) < 0) {
 			/* #UD condition is already handled by the code above */
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 
@@ -3615,6 +3555,7 @@
 			| ((u64)c->regs[VCPU_REGS_RDX] << 32);
 		if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		rc = X86EMUL_CONTINUE;
@@ -3623,6 +3564,7 @@
 		/* rdmsr */
 		if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		} else {
 			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
@@ -3785,6 +3727,5 @@
 	goto writeback;
 
 cannot_emulate:
-	DPRINTF("Cannot emulate %02x\n", c->b);
 	return -1;
 }
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 975bb45..3377d53 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -73,6 +73,13 @@
 	return vcpu->arch.cr4 & mask;
 }
 
+static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
+{
+	if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+		kvm_x86_ops->decache_cr3(vcpu);
+	return vcpu->arch.cr3;
+}
+
 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
 {
 	return kvm_read_cr4_bits(vcpu, ~0UL);
@@ -84,4 +91,19 @@
 		| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
 }
 
+static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hflags |= HF_GUEST_MASK;
+}
+
+static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hflags &= ~HF_GUEST_MASK;
+}
+
+static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.hflags & HF_GUEST_MASK;
+}
+
 #endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 413f897..93cf9d0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -277,7 +277,8 @@
 
 	if (old_ppr != ppr) {
 		apic_set_reg(apic, APIC_PROCPRI, ppr);
-		kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+		if (ppr < old_ppr)
+			kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 	}
 }
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fbb04ae..f02b8ed 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -18,9 +18,11 @@
  *
  */
 
+#include "irq.h"
 #include "mmu.h"
 #include "x86.h"
 #include "kvm_cache_regs.h"
+#include "x86.h"
 
 #include <linux/kvm_host.h>
 #include <linux/types.h>
@@ -194,7 +196,6 @@
 
 static u64 __read_mostly shadow_trap_nonpresent_pte;
 static u64 __read_mostly shadow_notrap_nonpresent_pte;
-static u64 __read_mostly shadow_base_present_pte;
 static u64 __read_mostly shadow_nx_mask;
 static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
@@ -213,12 +214,6 @@
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
 
-void kvm_mmu_set_base_ptes(u64 base_pte)
-{
-	shadow_base_present_pte = base_pte;
-}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
-
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 		u64 dirty_mask, u64 nx_mask, u64 x_mask)
 {
@@ -482,46 +477,46 @@
 }
 
 /*
- * Return the pointer to the largepage write count for a given
- * gfn, handling slots that are not large page aligned.
+ * Return the pointer to the large page information for a given gfn,
+ * handling slots that are not large page aligned.
  */
-static int *slot_largepage_idx(gfn_t gfn,
-			       struct kvm_memory_slot *slot,
-			       int level)
+static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
+					      struct kvm_memory_slot *slot,
+					      int level)
 {
 	unsigned long idx;
 
 	idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
 	      (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
-	return &slot->lpage_info[level - 2][idx].write_count;
+	return &slot->lpage_info[level - 2][idx];
 }
 
 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
 {
 	struct kvm_memory_slot *slot;
-	int *write_count;
+	struct kvm_lpage_info *linfo;
 	int i;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	for (i = PT_DIRECTORY_LEVEL;
 	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-		write_count   = slot_largepage_idx(gfn, slot, i);
-		*write_count += 1;
+		linfo = lpage_info_slot(gfn, slot, i);
+		linfo->write_count += 1;
 	}
 }
 
 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
 {
 	struct kvm_memory_slot *slot;
-	int *write_count;
+	struct kvm_lpage_info *linfo;
 	int i;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	for (i = PT_DIRECTORY_LEVEL;
 	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-		write_count   = slot_largepage_idx(gfn, slot, i);
-		*write_count -= 1;
-		WARN_ON(*write_count < 0);
+		linfo = lpage_info_slot(gfn, slot, i);
+		linfo->write_count -= 1;
+		WARN_ON(linfo->write_count < 0);
 	}
 }
 
@@ -530,12 +525,12 @@
 				int level)
 {
 	struct kvm_memory_slot *slot;
-	int *largepage_idx;
+	struct kvm_lpage_info *linfo;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	if (slot) {
-		largepage_idx = slot_largepage_idx(gfn, slot, level);
-		return *largepage_idx;
+		linfo = lpage_info_slot(gfn, slot, level);
+		return linfo->write_count;
 	}
 
 	return 1;
@@ -559,14 +554,18 @@
 	return ret;
 }
 
-static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
 {
 	struct kvm_memory_slot *slot;
-	int host_level, level, max_level;
-
 	slot = gfn_to_memslot(vcpu->kvm, large_gfn);
 	if (slot && slot->dirty_bitmap)
-		return PT_PAGE_TABLE_LEVEL;
+		return true;
+	return false;
+}
+
+static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+{
+	int host_level, level, max_level;
 
 	host_level = host_mapping_level(vcpu->kvm, large_gfn);
 
@@ -590,16 +589,15 @@
 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
 {
 	struct kvm_memory_slot *slot;
-	unsigned long idx;
+	struct kvm_lpage_info *linfo;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	if (likely(level == PT_PAGE_TABLE_LEVEL))
 		return &slot->rmap[gfn - slot->base_gfn];
 
-	idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
-		(slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
+	linfo = lpage_info_slot(gfn, slot, level);
 
-	return &slot->lpage_info[level - 2][idx].rmap_pde;
+	return &linfo->rmap_pde;
 }
 
 /*
@@ -887,19 +885,16 @@
 		end = start + (memslot->npages << PAGE_SHIFT);
 		if (hva >= start && hva < end) {
 			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+			gfn_t gfn = memslot->base_gfn + gfn_offset;
 
 			ret = handler(kvm, &memslot->rmap[gfn_offset], data);
 
 			for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
-				unsigned long idx;
-				int sh;
+				struct kvm_lpage_info *linfo;
 
-				sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
-				idx = ((memslot->base_gfn+gfn_offset) >> sh) -
-					(memslot->base_gfn >> sh);
-				ret |= handler(kvm,
-					&memslot->lpage_info[j][idx].rmap_pde,
-					data);
+				linfo = lpage_info_slot(gfn, memslot,
+							PT_DIRECTORY_LEVEL + j);
+				ret |= handler(kvm, &linfo->rmap_pde, data);
 			}
 			trace_kvm_age_page(hva, memslot, ret);
 			retval |= ret;
@@ -950,6 +945,35 @@
 	return young;
 }
 
+static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
+			      unsigned long data)
+{
+	u64 *spte;
+	int young = 0;
+
+	/*
+	 * If there's no access bit in the secondary pte set by the
+	 * hardware it's up to gup-fast/gup to set the access bit in
+	 * the primary pte or in the page structure.
+	 */
+	if (!shadow_accessed_mask)
+		goto out;
+
+	spte = rmap_next(kvm, rmapp, NULL);
+	while (spte) {
+		u64 _spte = *spte;
+		BUG_ON(!(_spte & PT_PRESENT_MASK));
+		young = _spte & PT_ACCESSED_MASK;
+		if (young) {
+			young = 1;
+			break;
+		}
+		spte = rmap_next(kvm, rmapp, spte);
+	}
+out:
+	return young;
+}
+
 #define RMAP_RECYCLE_THRESHOLD 1000
 
 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
@@ -970,6 +994,11 @@
 	return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
 }
 
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+	return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
+}
+
 #ifdef MMU_DEBUG
 static int is_empty_shadow_page(u64 *spt)
 {
@@ -1161,7 +1190,7 @@
 }
 
 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
-			       struct kvm_mmu_page *sp, bool clear_unsync)
+			       struct kvm_mmu_page *sp)
 {
 	return 1;
 }
@@ -1291,7 +1320,7 @@
 	if (clear_unsync)
 		kvm_unlink_unsync_page(vcpu->kvm, sp);
 
-	if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) {
+	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
 		return 1;
 	}
@@ -1332,12 +1361,12 @@
 			continue;
 
 		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+		kvm_unlink_unsync_page(vcpu->kvm, s);
 		if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
-			(vcpu->arch.mmu.sync_page(vcpu, s, true))) {
+			(vcpu->arch.mmu.sync_page(vcpu, s))) {
 			kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
 			continue;
 		}
-		kvm_unlink_unsync_page(vcpu->kvm, s);
 		flush = true;
 	}
 
@@ -1963,9 +1992,9 @@
 		    unsigned pte_access, int user_fault,
 		    int write_fault, int dirty, int level,
 		    gfn_t gfn, pfn_t pfn, bool speculative,
-		    bool can_unsync, bool reset_host_protection)
+		    bool can_unsync, bool host_writable)
 {
-	u64 spte;
+	u64 spte, entry = *sptep;
 	int ret = 0;
 
 	/*
@@ -1973,7 +2002,7 @@
 	 * whether the guest actually used the pte (in order to detect
 	 * demand paging).
 	 */
-	spte = shadow_base_present_pte;
+	spte = PT_PRESENT_MASK;
 	if (!speculative)
 		spte |= shadow_accessed_mask;
 	if (!dirty)
@@ -1990,8 +2019,10 @@
 		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
 			kvm_is_mmio_pfn(pfn));
 
-	if (reset_host_protection)
+	if (host_writable)
 		spte |= SPTE_HOST_WRITEABLE;
+	else
+		pte_access &= ~ACC_WRITE_MASK;
 
 	spte |= (u64)pfn << PAGE_SHIFT;
 
@@ -2036,6 +2067,14 @@
 
 set_pte:
 	update_spte(sptep, spte);
+	/*
+	 * If we overwrite a writable spte with a read-only one we
+	 * should flush remote TLBs. Otherwise rmap_write_protect
+	 * will find a read-only spte, even though the writable spte
+	 * might be cached on a CPU's TLB.
+	 */
+	if (is_writable_pte(entry) && !is_writable_pte(*sptep))
+		kvm_flush_remote_tlbs(vcpu->kvm);
 done:
 	return ret;
 }
@@ -2045,7 +2084,7 @@
 			 int user_fault, int write_fault, int dirty,
 			 int *ptwrite, int level, gfn_t gfn,
 			 pfn_t pfn, bool speculative,
-			 bool reset_host_protection)
+			 bool host_writable)
 {
 	int was_rmapped = 0;
 	int rmap_count;
@@ -2080,7 +2119,7 @@
 
 	if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
 		      dirty, level, gfn, pfn, speculative, true,
-		      reset_host_protection)) {
+		      host_writable)) {
 		if (write_fault)
 			*ptwrite = 1;
 		kvm_mmu_flush_tlb(vcpu);
@@ -2211,7 +2250,8 @@
 }
 
 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
-			int level, gfn_t gfn, pfn_t pfn)
+			int map_writable, int level, gfn_t gfn, pfn_t pfn,
+			bool prefault)
 {
 	struct kvm_shadow_walk_iterator iterator;
 	struct kvm_mmu_page *sp;
@@ -2220,9 +2260,11 @@
 
 	for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
 		if (iterator.level == level) {
-			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
+			unsigned pte_access = ACC_ALL;
+
+			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
 				     0, write, 1, &pt_write,
-				     level, gfn, pfn, false, true);
+				     level, gfn, pfn, prefault, map_writable);
 			direct_pte_prefetch(vcpu, iterator.sptep);
 			++vcpu->stat.pf_fixed;
 			break;
@@ -2277,27 +2319,81 @@
 	return 1;
 }
 
-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
+static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
+					gfn_t *gfnp, pfn_t *pfnp, int *levelp)
+{
+	pfn_t pfn = *pfnp;
+	gfn_t gfn = *gfnp;
+	int level = *levelp;
+
+	/*
+	 * Check if it's a transparent hugepage. If this would be an
+	 * hugetlbfs page, level wouldn't be set to
+	 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
+	 * here.
+	 */
+	if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
+	    level == PT_PAGE_TABLE_LEVEL &&
+	    PageTransCompound(pfn_to_page(pfn)) &&
+	    !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
+		unsigned long mask;
+		/*
+		 * mmu_notifier_retry was successful and we hold the
+		 * mmu_lock here, so the pmd can't become splitting
+		 * from under us, and in turn
+		 * __split_huge_page_refcount() can't run from under
+		 * us and we can safely transfer the refcount from
+		 * PG_tail to PG_head as we switch the pfn to tail to
+		 * head.
+		 */
+		*levelp = level = PT_DIRECTORY_LEVEL;
+		mask = KVM_PAGES_PER_HPAGE(level) - 1;
+		VM_BUG_ON((gfn & mask) != (pfn & mask));
+		if (pfn & mask) {
+			gfn &= ~mask;
+			*gfnp = gfn;
+			kvm_release_pfn_clean(pfn);
+			pfn &= ~mask;
+			if (!get_page_unless_zero(pfn_to_page(pfn)))
+				BUG();
+			*pfnp = pfn;
+		}
+	}
+}
+
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+			 gva_t gva, pfn_t *pfn, bool write, bool *writable);
+
+static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
+			 bool prefault)
 {
 	int r;
 	int level;
+	int force_pt_level;
 	pfn_t pfn;
 	unsigned long mmu_seq;
+	bool map_writable;
 
-	level = mapping_level(vcpu, gfn);
+	force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
+	if (likely(!force_pt_level)) {
+		level = mapping_level(vcpu, gfn);
+		/*
+		 * This path builds a PAE pagetable - so we can map
+		 * 2mb pages at maximum. Therefore check if the level
+		 * is larger than that.
+		 */
+		if (level > PT_DIRECTORY_LEVEL)
+			level = PT_DIRECTORY_LEVEL;
 
-	/*
-	 * This path builds a PAE pagetable - so we can map 2mb pages at
-	 * maximum. Therefore check if the level is larger than that.
-	 */
-	if (level > PT_DIRECTORY_LEVEL)
-		level = PT_DIRECTORY_LEVEL;
-
-	gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+	} else
+		level = PT_PAGE_TABLE_LEVEL;
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
-	pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+	if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
+		return 0;
 
 	/* mmio */
 	if (is_error_pfn(pfn))
@@ -2307,7 +2403,10 @@
 	if (mmu_notifier_retry(vcpu, mmu_seq))
 		goto out_unlock;
 	kvm_mmu_free_some_pages(vcpu);
-	r = __direct_map(vcpu, v, write, level, gfn, pfn);
+	if (likely(!force_pt_level))
+		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
+	r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
+			 prefault);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
 
@@ -2530,6 +2629,7 @@
 		hpa_t root = vcpu->arch.mmu.root_hpa;
 		sp = page_header(root);
 		mmu_sync_children(vcpu, sp);
+		trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
 		return;
 	}
 	for (i = 0; i < 4; ++i) {
@@ -2552,23 +2652,24 @@
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
-				  u32 access, u32 *error)
+				  u32 access, struct x86_exception *exception)
 {
-	if (error)
-		*error = 0;
+	if (exception)
+		exception->error_code = 0;
 	return vaddr;
 }
 
 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
-					 u32 access, u32 *error)
+					 u32 access,
+					 struct x86_exception *exception)
 {
-	if (error)
-		*error = 0;
+	if (exception)
+		exception->error_code = 0;
 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
 }
 
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
-				u32 error_code)
+				u32 error_code, bool prefault)
 {
 	gfn_t gfn;
 	int r;
@@ -2584,17 +2685,68 @@
 	gfn = gva >> PAGE_SHIFT;
 
 	return nonpaging_map(vcpu, gva & PAGE_MASK,
-			     error_code & PFERR_WRITE_MASK, gfn);
+			     error_code & PFERR_WRITE_MASK, gfn, prefault);
 }
 
-static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
-				u32 error_code)
+static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
+{
+	struct kvm_arch_async_pf arch;
+
+	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+	arch.gfn = gfn;
+	arch.direct_map = vcpu->arch.mmu.direct_map;
+	arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
+
+	return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
+}
+
+static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+{
+	if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
+		     kvm_event_needs_reinjection(vcpu)))
+		return false;
+
+	return kvm_x86_ops->interrupt_allowed(vcpu);
+}
+
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+			 gva_t gva, pfn_t *pfn, bool write, bool *writable)
+{
+	bool async;
+
+	*pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
+
+	if (!async)
+		return false; /* *pfn has correct page already */
+
+	put_page(pfn_to_page(*pfn));
+
+	if (!prefault && can_do_async_pf(vcpu)) {
+		trace_kvm_try_async_get_page(gva, gfn);
+		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
+			trace_kvm_async_pf_doublefault(gva, gfn);
+			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+			return true;
+		} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
+			return true;
+	}
+
+	*pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
+
+	return false;
+}
+
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
+			  bool prefault)
 {
 	pfn_t pfn;
 	int r;
 	int level;
+	int force_pt_level;
 	gfn_t gfn = gpa >> PAGE_SHIFT;
 	unsigned long mmu_seq;
+	int write = error_code & PFERR_WRITE_MASK;
+	bool map_writable;
 
 	ASSERT(vcpu);
 	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -2603,21 +2755,30 @@
 	if (r)
 		return r;
 
-	level = mapping_level(vcpu, gfn);
-
-	gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+	force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
+	if (likely(!force_pt_level)) {
+		level = mapping_level(vcpu, gfn);
+		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+	} else
+		level = PT_PAGE_TABLE_LEVEL;
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
-	pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+		return 0;
+
+	/* mmio */
 	if (is_error_pfn(pfn))
 		return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
 	spin_lock(&vcpu->kvm->mmu_lock);
 	if (mmu_notifier_retry(vcpu, mmu_seq))
 		goto out_unlock;
 	kvm_mmu_free_some_pages(vcpu);
-	r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
-			 level, gfn, pfn);
+	if (likely(!force_pt_level))
+		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
+	r = __direct_map(vcpu, gpa, write, map_writable,
+			 level, gfn, pfn, prefault);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
 	return r;
@@ -2659,18 +2820,19 @@
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
 {
-	pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
+	pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
 	mmu_free_roots(vcpu);
 }
 
 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.cr3;
+	return kvm_read_cr3(vcpu);
 }
 
-static void inject_page_fault(struct kvm_vcpu *vcpu)
+static void inject_page_fault(struct kvm_vcpu *vcpu,
+			      struct x86_exception *fault)
 {
-	vcpu->arch.mmu.inject_page_fault(vcpu);
+	vcpu->arch.mmu.inject_page_fault(vcpu, fault);
 }
 
 static void paging_free(struct kvm_vcpu *vcpu)
@@ -2816,6 +2978,7 @@
 {
 	struct kvm_mmu *context = vcpu->arch.walk_mmu;
 
+	context->base_role.word = 0;
 	context->new_cr3 = nonpaging_new_cr3;
 	context->page_fault = tdp_page_fault;
 	context->free = nonpaging_free;
@@ -3008,9 +3171,6 @@
 		return;
         }
 
-	if (is_rsvd_bits_set(&vcpu->arch.mmu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
-		return;
-
 	++vcpu->kvm->stat.mmu_pte_updated;
 	if (!sp->role.cr4_pae)
 		paging32_update_pte(vcpu, sp, spte, new);
@@ -3264,12 +3424,13 @@
 	}
 }
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+		       void *insn, int insn_len)
 {
 	int r;
 	enum emulation_result er;
 
-	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
+	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
 	if (r < 0)
 		goto out;
 
@@ -3282,7 +3443,7 @@
 	if (r)
 		goto out;
 
-	er = emulate_instruction(vcpu, cr2, error_code, 0);
+	er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
 
 	switch (er) {
 	case EMULATE_DONE:
@@ -3377,11 +3538,14 @@
 		if (!test_bit(slot, sp->slot_bitmap))
 			continue;
 
+		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
+			continue;
+
 		pt = sp->spt;
 		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
 			/* avoid RMW */
 			if (is_writable_pte(pt[i]))
-				pt[i] &= ~PT_WRITABLE_MASK;
+				update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
 	}
 	kvm_flush_remote_tlbs(kvm);
 }
@@ -3463,13 +3627,6 @@
 		kmem_cache_destroy(mmu_page_header_cache);
 }
 
-void kvm_mmu_module_exit(void)
-{
-	mmu_destroy_caches();
-	percpu_counter_destroy(&kvm_total_used_mmu_pages);
-	unregister_shrinker(&mmu_shrinker);
-}
-
 int kvm_mmu_module_init(void)
 {
 	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@@ -3566,7 +3723,7 @@
 
 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
-	(void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
+	(void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
 	return 1;
 }
 
@@ -3662,12 +3819,6 @@
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
 
-#ifdef CONFIG_KVM_MMU_AUDIT
-#include "mmu_audit.c"
-#else
-static void mmu_audit_disable(void) { }
-#endif
-
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 {
 	ASSERT(vcpu);
@@ -3675,5 +3826,18 @@
 	destroy_kvm_mmu(vcpu);
 	free_mmu_pages(vcpu);
 	mmu_free_memory_caches(vcpu);
+}
+
+#ifdef CONFIG_KVM_MMU_AUDIT
+#include "mmu_audit.c"
+#else
+static void mmu_audit_disable(void) { }
+#endif
+
+void kvm_mmu_module_exit(void)
+{
+	mmu_destroy_caches();
+	percpu_counter_destroy(&kvm_total_used_mmu_pages);
+	unregister_shrinker(&mmu_shrinker);
 	mmu_audit_disable();
 }
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index ba2bcdd..5f6223b 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -19,11 +19,9 @@
 
 #include <linux/ratelimit.h>
 
-static int audit_point;
-
-#define audit_printk(fmt, args...)		\
+#define audit_printk(kvm, fmt, args...)		\
 	printk(KERN_ERR "audit: (%s) error: "	\
-		fmt, audit_point_name[audit_point], ##args)
+		fmt, audit_point_name[kvm->arch.audit_point], ##args)
 
 typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
 
@@ -97,18 +95,21 @@
 
 	if (sp->unsync) {
 		if (level != PT_PAGE_TABLE_LEVEL) {
-			audit_printk("unsync sp: %p level = %d\n", sp, level);
+			audit_printk(vcpu->kvm, "unsync sp: %p "
+				     "level = %d\n", sp, level);
 			return;
 		}
 
 		if (*sptep == shadow_notrap_nonpresent_pte) {
-			audit_printk("notrap spte in unsync sp: %p\n", sp);
+			audit_printk(vcpu->kvm, "notrap spte in unsync "
+				     "sp: %p\n", sp);
 			return;
 		}
 	}
 
 	if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
-		audit_printk("notrap spte in direct sp: %p\n", sp);
+		audit_printk(vcpu->kvm, "notrap spte in direct sp: %p\n",
+			     sp);
 		return;
 	}
 
@@ -125,8 +126,9 @@
 
 	hpa =  pfn << PAGE_SHIFT;
 	if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
-		audit_printk("levels %d pfn %llx hpa %llx ent %llxn",
-				   vcpu->arch.mmu.root_level, pfn, hpa, *sptep);
+		audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
+			     "ent %llxn", vcpu->arch.mmu.root_level, pfn,
+			     hpa, *sptep);
 }
 
 static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
@@ -142,8 +144,8 @@
 	if (!gfn_to_memslot(kvm, gfn)) {
 		if (!printk_ratelimit())
 			return;
-		audit_printk("no memslot for gfn %llx\n", gfn);
-		audit_printk("index %ld of sp (gfn=%llx)\n",
+		audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
+		audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
 		       (long int)(sptep - rev_sp->spt), rev_sp->gfn);
 		dump_stack();
 		return;
@@ -153,7 +155,8 @@
 	if (!*rmapp) {
 		if (!printk_ratelimit())
 			return;
-		audit_printk("no rmap for writable spte %llx\n", *sptep);
+		audit_printk(kvm, "no rmap for writable spte %llx\n",
+			     *sptep);
 		dump_stack();
 	}
 }
@@ -168,8 +171,9 @@
 {
 	struct kvm_mmu_page *sp = page_header(__pa(sptep));
 
-	if (audit_point == AUDIT_POST_SYNC && sp->unsync)
-		audit_printk("meet unsync sp(%p) after sync root.\n", sp);
+	if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
+		audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
+			     "root.\n", sp);
 }
 
 static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -202,8 +206,9 @@
 	spte = rmap_next(kvm, rmapp, NULL);
 	while (spte) {
 		if (is_writable_pte(*spte))
-			audit_printk("shadow page has writable mappings: gfn "
-				     "%llx role %x\n", sp->gfn, sp->role.word);
+			audit_printk(kvm, "shadow page has writable "
+				     "mappings: gfn %llx role %x\n",
+				     sp->gfn, sp->role.word);
 		spte = rmap_next(kvm, rmapp, spte);
 	}
 }
@@ -238,7 +243,7 @@
 	if (!__ratelimit(&ratelimit_state))
 		return;
 
-	audit_point = point;
+	vcpu->kvm->arch.audit_point = point;
 	audit_all_active_sps(vcpu->kvm);
 	audit_vcpu_spte(vcpu);
 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index cd7a833..6bccc24 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -72,7 +72,7 @@
 	unsigned pt_access;
 	unsigned pte_access;
 	gfn_t gfn;
-	u32 error_code;
+	struct x86_exception fault;
 };
 
 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
@@ -266,21 +266,23 @@
 	return 1;
 
 error:
-	walker->error_code = 0;
+	walker->fault.vector = PF_VECTOR;
+	walker->fault.error_code_valid = true;
+	walker->fault.error_code = 0;
 	if (present)
-		walker->error_code |= PFERR_PRESENT_MASK;
+		walker->fault.error_code |= PFERR_PRESENT_MASK;
 
-	walker->error_code |= write_fault | user_fault;
+	walker->fault.error_code |= write_fault | user_fault;
 
 	if (fetch_fault && mmu->nx)
-		walker->error_code |= PFERR_FETCH_MASK;
+		walker->fault.error_code |= PFERR_FETCH_MASK;
 	if (rsvd_fault)
-		walker->error_code |= PFERR_RSVD_MASK;
+		walker->fault.error_code |= PFERR_RSVD_MASK;
 
-	vcpu->arch.fault.address    = addr;
-	vcpu->arch.fault.error_code = walker->error_code;
+	walker->fault.address = addr;
+	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
 
-	trace_kvm_mmu_walker_error(walker->error_code);
+	trace_kvm_mmu_walker_error(walker->fault.error_code);
 	return 0;
 }
 
@@ -299,25 +301,42 @@
 					addr, access);
 }
 
+static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
+				    struct kvm_mmu_page *sp, u64 *spte,
+				    pt_element_t gpte)
+{
+	u64 nonpresent = shadow_trap_nonpresent_pte;
+
+	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+		goto no_present;
+
+	if (!is_present_gpte(gpte)) {
+		if (!sp->unsync)
+			nonpresent = shadow_notrap_nonpresent_pte;
+		goto no_present;
+	}
+
+	if (!(gpte & PT_ACCESSED_MASK))
+		goto no_present;
+
+	return false;
+
+no_present:
+	drop_spte(vcpu->kvm, spte, nonpresent);
+	return true;
+}
+
 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			      u64 *spte, const void *pte)
 {
 	pt_element_t gpte;
 	unsigned pte_access;
 	pfn_t pfn;
-	u64 new_spte;
 
 	gpte = *(const pt_element_t *)pte;
-	if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
-		if (!is_present_gpte(gpte)) {
-			if (sp->unsync)
-				new_spte = shadow_trap_nonpresent_pte;
-			else
-				new_spte = shadow_notrap_nonpresent_pte;
-			__set_spte(spte, new_spte);
-		}
+	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 		return;
-	}
+
 	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
 	if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
@@ -329,7 +348,7 @@
 		return;
 	kvm_get_pfn(pfn);
 	/*
-	 * we call mmu_set_spte() with reset_host_protection = true beacuse that
+	 * we call mmu_set_spte() with host_writable = true beacuse that
 	 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
 	 */
 	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
@@ -364,7 +383,6 @@
 				u64 *sptep)
 {
 	struct kvm_mmu_page *sp;
-	struct kvm_mmu *mmu = &vcpu->arch.mmu;
 	pt_element_t *gptep = gw->prefetch_ptes;
 	u64 *spte;
 	int i;
@@ -395,14 +413,7 @@
 
 		gpte = gptep[i];
 
-		if (!is_present_gpte(gpte) ||
-		      is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL)) {
-			if (!sp->unsync)
-				__set_spte(spte, shadow_notrap_nonpresent_pte);
-			continue;
-		}
-
-		if (!(gpte & PT_ACCESSED_MASK))
+		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 			continue;
 
 		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
@@ -427,7 +438,8 @@
 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 			 struct guest_walker *gw,
 			 int user_fault, int write_fault, int hlevel,
-			 int *ptwrite, pfn_t pfn)
+			 int *ptwrite, pfn_t pfn, bool map_writable,
+			 bool prefault)
 {
 	unsigned access = gw->pt_access;
 	struct kvm_mmu_page *sp = NULL;
@@ -501,7 +513,7 @@
 
 	mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
 		     user_fault, write_fault, dirty, ptwrite, it.level,
-		     gw->gfn, pfn, false, true);
+		     gw->gfn, pfn, prefault, map_writable);
 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 
 	return it.sptep;
@@ -527,8 +539,8 @@
  *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  *           a negative value on error.
  */
-static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
-			       u32 error_code)
+static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
+			     bool prefault)
 {
 	int write_fault = error_code & PFERR_WRITE_MASK;
 	int user_fault = error_code & PFERR_USER_MASK;
@@ -538,7 +550,9 @@
 	int r;
 	pfn_t pfn;
 	int level = PT_PAGE_TABLE_LEVEL;
+	int force_pt_level;
 	unsigned long mmu_seq;
+	bool map_writable;
 
 	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 
@@ -556,19 +570,29 @@
 	 */
 	if (!r) {
 		pgprintk("%s: guest page fault\n", __func__);
-		inject_page_fault(vcpu);
-		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
+		if (!prefault) {
+			inject_page_fault(vcpu, &walker.fault);
+			/* reset fork detector */
+			vcpu->arch.last_pt_write_count = 0;
+		}
 		return 0;
 	}
 
-	if (walker.level >= PT_DIRECTORY_LEVEL) {
+	if (walker.level >= PT_DIRECTORY_LEVEL)
+		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
+	else
+		force_pt_level = 1;
+	if (!force_pt_level) {
 		level = min(walker.level, mapping_level(vcpu, walker.gfn));
 		walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
 	}
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
-	pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
+
+	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
+			 &map_writable))
+		return 0;
 
 	/* mmio */
 	if (is_error_pfn(pfn))
@@ -580,8 +604,10 @@
 
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
 	kvm_mmu_free_some_pages(vcpu);
+	if (!force_pt_level)
+		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
 	sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
-			     level, &write_pt, pfn);
+			     level, &write_pt, pfn, map_writable, prefault);
 	(void)sptep;
 	pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
 		 sptep, *sptep, write_pt);
@@ -661,7 +687,7 @@
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
-			       u32 *error)
+			       struct x86_exception *exception)
 {
 	struct guest_walker walker;
 	gpa_t gpa = UNMAPPED_GVA;
@@ -672,14 +698,15 @@
 	if (r) {
 		gpa = gfn_to_gpa(walker.gfn);
 		gpa |= vaddr & ~PAGE_MASK;
-	} else if (error)
-		*error = walker.error_code;
+	} else if (exception)
+		*exception = walker.fault;
 
 	return gpa;
 }
 
 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
-				      u32 access, u32 *error)
+				      u32 access,
+				      struct x86_exception *exception)
 {
 	struct guest_walker walker;
 	gpa_t gpa = UNMAPPED_GVA;
@@ -690,8 +717,8 @@
 	if (r) {
 		gpa = gfn_to_gpa(walker.gfn);
 		gpa |= vaddr & ~PAGE_MASK;
-	} else if (error)
-		*error = walker.error_code;
+	} else if (exception)
+		*exception = walker.fault;
 
 	return gpa;
 }
@@ -730,12 +757,19 @@
  * Using the cached information from sp->gfns is safe because:
  * - The spte has a reference to the struct page, so the pfn for a given gfn
  *   can't change unless all sptes pointing to it are nuked first.
+ *
+ * Note:
+ *   We should flush all tlbs if spte is dropped even though guest is
+ *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
+ *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
+ *   used by guest then tlbs are not flushed, so guest is allowed to access the
+ *   freed pages.
+ *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  */
-static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-			    bool clear_unsync)
+static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
 	int i, offset, nr_present;
-	bool reset_host_protection;
+	bool host_writable;
 	gpa_t first_pte_gpa;
 
 	offset = nr_present = 0;
@@ -764,31 +798,27 @@
 			return -EINVAL;
 
 		gfn = gpte_to_gfn(gpte);
-		if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)
-		      || gfn != sp->gfns[i] || !is_present_gpte(gpte)
-		      || !(gpte & PT_ACCESSED_MASK)) {
-			u64 nonpresent;
 
-			if (is_present_gpte(gpte) || !clear_unsync)
-				nonpresent = shadow_trap_nonpresent_pte;
-			else
-				nonpresent = shadow_notrap_nonpresent_pte;
-			drop_spte(vcpu->kvm, &sp->spt[i], nonpresent);
+		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
+			vcpu->kvm->tlbs_dirty++;
+			continue;
+		}
+
+		if (gfn != sp->gfns[i]) {
+			drop_spte(vcpu->kvm, &sp->spt[i],
+				      shadow_trap_nonpresent_pte);
+			vcpu->kvm->tlbs_dirty++;
 			continue;
 		}
 
 		nr_present++;
 		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
-		if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
-			pte_access &= ~ACC_WRITE_MASK;
-			reset_host_protection = 0;
-		} else {
-			reset_host_protection = 1;
-		}
+		host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
+
 		set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
 			 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
 			 spte_to_pfn(sp->spt[i]), true, false,
-			 reset_host_protection);
+			 host_writable);
 	}
 
 	return !nr_present;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b81a9b7..63fec15 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -31,6 +31,7 @@
 
 #include <asm/tlbflush.h>
 #include <asm/desc.h>
+#include <asm/kvm_para.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -50,6 +51,10 @@
 #define SVM_FEATURE_LBRV           (1 <<  1)
 #define SVM_FEATURE_SVML           (1 <<  2)
 #define SVM_FEATURE_NRIP           (1 <<  3)
+#define SVM_FEATURE_TSC_RATE       (1 <<  4)
+#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
+#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
+#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
 
 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
@@ -97,10 +102,8 @@
 	unsigned long vmexit_rax;
 
 	/* cache for intercepts of the guest */
-	u16 intercept_cr_read;
-	u16 intercept_cr_write;
-	u16 intercept_dr_read;
-	u16 intercept_dr_write;
+	u32 intercept_cr;
+	u32 intercept_dr;
 	u32 intercept_exceptions;
 	u64 intercept;
 
@@ -123,7 +126,12 @@
 	u64 next_rip;
 
 	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
-	u64 host_gs_base;
+	struct {
+		u16 fs;
+		u16 gs;
+		u16 ldt;
+		u64 gs_base;
+	} host;
 
 	u32 *msrpm;
 
@@ -133,6 +141,7 @@
 
 	unsigned int3_injected;
 	unsigned long int3_rip;
+	u32 apf_reason;
 };
 
 #define MSR_INVALID			0xffffffffU
@@ -180,14 +189,151 @@
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
 				      bool has_error_code, u32 error_code);
 
+enum {
+	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
+			    pause filter count */
+	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
+	VMCB_ASID,	 /* ASID */
+	VMCB_INTR,	 /* int_ctl, int_vector */
+	VMCB_NPT,        /* npt_en, nCR3, gPAT */
+	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
+	VMCB_DR,         /* DR6, DR7 */
+	VMCB_DT,         /* GDT, IDT */
+	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
+	VMCB_CR2,        /* CR2 only */
+	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
+	VMCB_DIRTY_MAX,
+};
+
+/* TPR and CR2 are always written before VMRUN */
+#define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
+
+static inline void mark_all_dirty(struct vmcb *vmcb)
+{
+	vmcb->control.clean = 0;
+}
+
+static inline void mark_all_clean(struct vmcb *vmcb)
+{
+	vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
+			       & ~VMCB_ALWAYS_DIRTY_MASK;
+}
+
+static inline void mark_dirty(struct vmcb *vmcb, int bit)
+{
+	vmcb->control.clean &= ~(1 << bit);
+}
+
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 {
 	return container_of(vcpu, struct vcpu_svm, vcpu);
 }
 
-static inline bool is_nested(struct vcpu_svm *svm)
+static void recalc_intercepts(struct vcpu_svm *svm)
 {
-	return svm->nested.vmcb;
+	struct vmcb_control_area *c, *h;
+	struct nested_state *g;
+
+	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+
+	if (!is_guest_mode(&svm->vcpu))
+		return;
+
+	c = &svm->vmcb->control;
+	h = &svm->nested.hsave->control;
+	g = &svm->nested;
+
+	c->intercept_cr = h->intercept_cr | g->intercept_cr;
+	c->intercept_dr = h->intercept_dr | g->intercept_dr;
+	c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
+	c->intercept = h->intercept | g->intercept;
+}
+
+static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
+{
+	if (is_guest_mode(&svm->vcpu))
+		return svm->nested.hsave;
+	else
+		return svm->vmcb;
+}
+
+static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_cr |= (1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_cr &= ~(1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	return vmcb->control.intercept_cr & (1U << bit);
+}
+
+static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_dr |= (1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_dr &= ~(1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_exceptions |= (1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_exceptions &= ~(1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void set_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept |= (1ULL << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept &= ~(1ULL << bit);
+
+	recalc_intercepts(svm);
 }
 
 static inline void enable_gif(struct vcpu_svm *svm)
@@ -264,11 +410,6 @@
 
 #define MAX_INST_SIZE 15
 
-static inline u32 svm_has(u32 feat)
-{
-	return svm_features & feat;
-}
-
 static inline void clgi(void)
 {
 	asm volatile (__ex(SVM_CLGI));
@@ -284,16 +425,6 @@
 	asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
 }
 
-static inline void force_new_asid(struct kvm_vcpu *vcpu)
-{
-	to_svm(vcpu)->asid_generation--;
-}
-
-static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
-{
-	force_new_asid(vcpu);
-}
-
 static int get_npt_level(void)
 {
 #ifdef CONFIG_X86_64
@@ -310,6 +441,7 @@
 		efer &= ~EFER_LME;
 
 	to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
+	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static int is_external_interrupt(u32 info)
@@ -347,7 +479,7 @@
 		svm->next_rip = svm->vmcb->control.next_rip;
 
 	if (!svm->next_rip) {
-		if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
+		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
 				EMULATE_DONE)
 			printk(KERN_DEBUG "%s: NOP\n", __func__);
 		return;
@@ -374,7 +506,7 @@
 	    nested_svm_check_exception(svm, nr, has_error_code, error_code))
 		return;
 
-	if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
+	if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
 		unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
 
 		/*
@@ -670,7 +802,7 @@
 
 	svm_features = cpuid_edx(SVM_CPUID_FUNC);
 
-	if (!svm_has(SVM_FEATURE_NPT))
+	if (!boot_cpu_has(X86_FEATURE_NPT))
 		npt_enabled = false;
 
 	if (npt_enabled && !npt) {
@@ -725,13 +857,15 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u64 g_tsc_offset = 0;
 
-	if (is_nested(svm)) {
+	if (is_guest_mode(vcpu)) {
 		g_tsc_offset = svm->vmcb->control.tsc_offset -
 			       svm->nested.hsave->control.tsc_offset;
 		svm->nested.hsave->control.tsc_offset = offset;
 	}
 
 	svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
+
+	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
 static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -739,8 +873,9 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.tsc_offset += adjustment;
-	if (is_nested(svm))
+	if (is_guest_mode(vcpu))
 		svm->nested.hsave->control.tsc_offset += adjustment;
+	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
 static void init_vmcb(struct vcpu_svm *svm)
@@ -749,62 +884,62 @@
 	struct vmcb_save_area *save = &svm->vmcb->save;
 
 	svm->vcpu.fpu_active = 1;
+	svm->vcpu.arch.hflags = 0;
 
-	control->intercept_cr_read =	INTERCEPT_CR0_MASK |
-					INTERCEPT_CR3_MASK |
-					INTERCEPT_CR4_MASK;
+	set_cr_intercept(svm, INTERCEPT_CR0_READ);
+	set_cr_intercept(svm, INTERCEPT_CR3_READ);
+	set_cr_intercept(svm, INTERCEPT_CR4_READ);
+	set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
+	set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
+	set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
+	set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 
-	control->intercept_cr_write =	INTERCEPT_CR0_MASK |
-					INTERCEPT_CR3_MASK |
-					INTERCEPT_CR4_MASK |
-					INTERCEPT_CR8_MASK;
+	set_dr_intercept(svm, INTERCEPT_DR0_READ);
+	set_dr_intercept(svm, INTERCEPT_DR1_READ);
+	set_dr_intercept(svm, INTERCEPT_DR2_READ);
+	set_dr_intercept(svm, INTERCEPT_DR3_READ);
+	set_dr_intercept(svm, INTERCEPT_DR4_READ);
+	set_dr_intercept(svm, INTERCEPT_DR5_READ);
+	set_dr_intercept(svm, INTERCEPT_DR6_READ);
+	set_dr_intercept(svm, INTERCEPT_DR7_READ);
 
-	control->intercept_dr_read =	INTERCEPT_DR0_MASK |
-					INTERCEPT_DR1_MASK |
-					INTERCEPT_DR2_MASK |
-					INTERCEPT_DR3_MASK |
-					INTERCEPT_DR4_MASK |
-					INTERCEPT_DR5_MASK |
-					INTERCEPT_DR6_MASK |
-					INTERCEPT_DR7_MASK;
+	set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
 
-	control->intercept_dr_write =	INTERCEPT_DR0_MASK |
-					INTERCEPT_DR1_MASK |
-					INTERCEPT_DR2_MASK |
-					INTERCEPT_DR3_MASK |
-					INTERCEPT_DR4_MASK |
-					INTERCEPT_DR5_MASK |
-					INTERCEPT_DR6_MASK |
-					INTERCEPT_DR7_MASK;
+	set_exception_intercept(svm, PF_VECTOR);
+	set_exception_intercept(svm, UD_VECTOR);
+	set_exception_intercept(svm, MC_VECTOR);
 
-	control->intercept_exceptions = (1 << PF_VECTOR) |
-					(1 << UD_VECTOR) |
-					(1 << MC_VECTOR);
-
-
-	control->intercept =	(1ULL << INTERCEPT_INTR) |
-				(1ULL << INTERCEPT_NMI) |
-				(1ULL << INTERCEPT_SMI) |
-				(1ULL << INTERCEPT_SELECTIVE_CR0) |
-				(1ULL << INTERCEPT_CPUID) |
-				(1ULL << INTERCEPT_INVD) |
-				(1ULL << INTERCEPT_HLT) |
-				(1ULL << INTERCEPT_INVLPG) |
-				(1ULL << INTERCEPT_INVLPGA) |
-				(1ULL << INTERCEPT_IOIO_PROT) |
-				(1ULL << INTERCEPT_MSR_PROT) |
-				(1ULL << INTERCEPT_TASK_SWITCH) |
-				(1ULL << INTERCEPT_SHUTDOWN) |
-				(1ULL << INTERCEPT_VMRUN) |
-				(1ULL << INTERCEPT_VMMCALL) |
-				(1ULL << INTERCEPT_VMLOAD) |
-				(1ULL << INTERCEPT_VMSAVE) |
-				(1ULL << INTERCEPT_STGI) |
-				(1ULL << INTERCEPT_CLGI) |
-				(1ULL << INTERCEPT_SKINIT) |
-				(1ULL << INTERCEPT_WBINVD) |
-				(1ULL << INTERCEPT_MONITOR) |
-				(1ULL << INTERCEPT_MWAIT);
+	set_intercept(svm, INTERCEPT_INTR);
+	set_intercept(svm, INTERCEPT_NMI);
+	set_intercept(svm, INTERCEPT_SMI);
+	set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
+	set_intercept(svm, INTERCEPT_CPUID);
+	set_intercept(svm, INTERCEPT_INVD);
+	set_intercept(svm, INTERCEPT_HLT);
+	set_intercept(svm, INTERCEPT_INVLPG);
+	set_intercept(svm, INTERCEPT_INVLPGA);
+	set_intercept(svm, INTERCEPT_IOIO_PROT);
+	set_intercept(svm, INTERCEPT_MSR_PROT);
+	set_intercept(svm, INTERCEPT_TASK_SWITCH);
+	set_intercept(svm, INTERCEPT_SHUTDOWN);
+	set_intercept(svm, INTERCEPT_VMRUN);
+	set_intercept(svm, INTERCEPT_VMMCALL);
+	set_intercept(svm, INTERCEPT_VMLOAD);
+	set_intercept(svm, INTERCEPT_VMSAVE);
+	set_intercept(svm, INTERCEPT_STGI);
+	set_intercept(svm, INTERCEPT_CLGI);
+	set_intercept(svm, INTERCEPT_SKINIT);
+	set_intercept(svm, INTERCEPT_WBINVD);
+	set_intercept(svm, INTERCEPT_MONITOR);
+	set_intercept(svm, INTERCEPT_MWAIT);
+	set_intercept(svm, INTERCEPT_XSETBV);
 
 	control->iopm_base_pa = iopm_base;
 	control->msrpm_base_pa = __pa(svm->msrpm);
@@ -855,25 +990,27 @@
 	if (npt_enabled) {
 		/* Setup VMCB for Nested Paging */
 		control->nested_ctl = 1;
-		control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
-					(1ULL << INTERCEPT_INVLPG));
-		control->intercept_exceptions &= ~(1 << PF_VECTOR);
-		control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
-		control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
+		clr_intercept(svm, INTERCEPT_TASK_SWITCH);
+		clr_intercept(svm, INTERCEPT_INVLPG);
+		clr_exception_intercept(svm, PF_VECTOR);
+		clr_cr_intercept(svm, INTERCEPT_CR3_READ);
+		clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
 		save->g_pat = 0x0007040600070406ULL;
 		save->cr3 = 0;
 		save->cr4 = 0;
 	}
-	force_new_asid(&svm->vcpu);
+	svm->asid_generation = 0;
 
 	svm->nested.vmcb = 0;
 	svm->vcpu.arch.hflags = 0;
 
-	if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+	if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
 		control->pause_filter_count = 3000;
-		control->intercept |= (1ULL << INTERCEPT_PAUSE);
+		set_intercept(svm, INTERCEPT_PAUSE);
 	}
 
+	mark_all_dirty(svm->vmcb);
+
 	enable_gif(svm);
 }
 
@@ -990,8 +1127,16 @@
 
 	if (unlikely(cpu != vcpu->cpu)) {
 		svm->asid_generation = 0;
+		mark_all_dirty(svm->vmcb);
 	}
 
+#ifdef CONFIG_X86_64
+	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
+#endif
+	savesegment(fs, svm->host.fs);
+	savesegment(gs, svm->host.gs);
+	svm->host.ldt = kvm_read_ldt();
+
 	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
 		rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
@@ -1002,6 +1147,14 @@
 	int i;
 
 	++vcpu->stat.host_state_reload;
+	kvm_load_ldt(svm->host.ldt);
+#ifdef CONFIG_X86_64
+	loadsegment(fs, svm->host.fs);
+	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+	load_gs_index(svm->host.gs);
+#else
+	loadsegment(gs, svm->host.gs);
+#endif
 	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
 		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
@@ -1021,7 +1174,7 @@
 	switch (reg) {
 	case VCPU_EXREG_PDPTR:
 		BUG_ON(!npt_enabled);
-		load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
 		break;
 	default:
 		BUG();
@@ -1030,12 +1183,12 @@
 
 static void svm_set_vintr(struct vcpu_svm *svm)
 {
-	svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
+	set_intercept(svm, INTERCEPT_VINTR);
 }
 
 static void svm_clear_vintr(struct vcpu_svm *svm)
 {
-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
+	clr_intercept(svm, INTERCEPT_VINTR);
 }
 
 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
@@ -1150,6 +1303,7 @@
 
 	svm->vmcb->save.idtr.limit = dt->size;
 	svm->vmcb->save.idtr.base = dt->address ;
+	mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
@@ -1166,19 +1320,23 @@
 
 	svm->vmcb->save.gdtr.limit = dt->size;
 	svm->vmcb->save.gdtr.base = dt->address ;
+	mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
 }
 
+static void svm_decache_cr3(struct kvm_vcpu *vcpu)
+{
+}
+
 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
 }
 
 static void update_cr0_intercept(struct vcpu_svm *svm)
 {
-	struct vmcb *vmcb = svm->vmcb;
 	ulong gcr0 = svm->vcpu.arch.cr0;
 	u64 *hcr0 = &svm->vmcb->save.cr0;
 
@@ -1188,27 +1346,14 @@
 		*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
 			| (gcr0 & SVM_CR0_SELECTIVE_MASK);
 
+	mark_dirty(svm->vmcb, VMCB_CR);
 
 	if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
-		vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
-		vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
-		if (is_nested(svm)) {
-			struct vmcb *hsave = svm->nested.hsave;
-
-			hsave->control.intercept_cr_read  &= ~INTERCEPT_CR0_MASK;
-			hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
-			vmcb->control.intercept_cr_read  |= svm->nested.intercept_cr_read;
-			vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
-		}
+		clr_cr_intercept(svm, INTERCEPT_CR0_READ);
+		clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
 	} else {
-		svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
-		svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
-		if (is_nested(svm)) {
-			struct vmcb *hsave = svm->nested.hsave;
-
-			hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
-			hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
-		}
+		set_cr_intercept(svm, INTERCEPT_CR0_READ);
+		set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
 	}
 }
 
@@ -1216,7 +1361,7 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_nested(svm)) {
+	if (is_guest_mode(vcpu)) {
 		/*
 		 * We are here because we run in nested mode, the host kvm
 		 * intercepts cr0 writes but the l1 hypervisor does not.
@@ -1268,6 +1413,7 @@
 	 */
 	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
 	svm->vmcb->save.cr0 = cr0;
+	mark_dirty(svm->vmcb, VMCB_CR);
 	update_cr0_intercept(svm);
 }
 
@@ -1277,13 +1423,14 @@
 	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
 
 	if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
-		force_new_asid(vcpu);
+		svm_flush_tlb(vcpu);
 
 	vcpu->arch.cr4 = cr4;
 	if (!npt_enabled)
 		cr4 |= X86_CR4_PAE;
 	cr4 |= host_cr4_mce;
 	to_svm(vcpu)->vmcb->save.cr4 = cr4;
+	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -1312,26 +1459,25 @@
 			= (svm->vmcb->save.cs.attrib
 			   >> SVM_SELECTOR_DPL_SHIFT) & 3;
 
+	mark_dirty(svm->vmcb, VMCB_SEG);
 }
 
 static void update_db_intercept(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	svm->vmcb->control.intercept_exceptions &=
-		~((1 << DB_VECTOR) | (1 << BP_VECTOR));
+	clr_exception_intercept(svm, DB_VECTOR);
+	clr_exception_intercept(svm, BP_VECTOR);
 
 	if (svm->nmi_singlestep)
-		svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
+		set_exception_intercept(svm, DB_VECTOR);
 
 	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
 		if (vcpu->guest_debug &
 		    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-			svm->vmcb->control.intercept_exceptions |=
-				1 << DB_VECTOR;
+			set_exception_intercept(svm, DB_VECTOR);
 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
-			svm->vmcb->control.intercept_exceptions |=
-				1 << BP_VECTOR;
+			set_exception_intercept(svm, BP_VECTOR);
 	} else
 		vcpu->guest_debug = 0;
 }
@@ -1345,23 +1491,11 @@
 	else
 		svm->vmcb->save.dr7 = vcpu->arch.dr7;
 
+	mark_dirty(svm->vmcb, VMCB_DR);
+
 	update_db_intercept(vcpu);
 }
 
-static void load_host_msrs(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
-	wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
-#endif
-}
-
-static void save_host_msrs(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
-	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
-#endif
-}
-
 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
 {
 	if (sd->next_asid > sd->max_asid) {
@@ -1372,6 +1506,8 @@
 
 	svm->asid_generation = sd->asid_generation;
 	svm->vmcb->control.asid = sd->next_asid++;
+
+	mark_dirty(svm->vmcb, VMCB_ASID);
 }
 
 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
@@ -1379,20 +1515,40 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->save.dr7 = value;
+	mark_dirty(svm->vmcb, VMCB_DR);
 }
 
 static int pf_interception(struct vcpu_svm *svm)
 {
-	u64 fault_address;
+	u64 fault_address = svm->vmcb->control.exit_info_2;
 	u32 error_code;
+	int r = 1;
 
-	fault_address  = svm->vmcb->control.exit_info_2;
-	error_code = svm->vmcb->control.exit_info_1;
+	switch (svm->apf_reason) {
+	default:
+		error_code = svm->vmcb->control.exit_info_1;
 
-	trace_kvm_page_fault(fault_address, error_code);
-	if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
-		kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
-	return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
+		trace_kvm_page_fault(fault_address, error_code);
+		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
+			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
+		r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
+			svm->vmcb->control.insn_bytes,
+			svm->vmcb->control.insn_len);
+		break;
+	case KVM_PV_REASON_PAGE_NOT_PRESENT:
+		svm->apf_reason = 0;
+		local_irq_disable();
+		kvm_async_pf_task_wait(fault_address);
+		local_irq_enable();
+		break;
+	case KVM_PV_REASON_PAGE_READY:
+		svm->apf_reason = 0;
+		local_irq_disable();
+		kvm_async_pf_task_wake(fault_address);
+		local_irq_enable();
+		break;
+	}
+	return r;
 }
 
 static int db_interception(struct vcpu_svm *svm)
@@ -1440,7 +1596,7 @@
 {
 	int er;
 
-	er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
+	er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
 	if (er != EMULATE_DONE)
 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
 	return 1;
@@ -1449,21 +1605,8 @@
 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	u32 excp;
 
-	if (is_nested(svm)) {
-		u32 h_excp, n_excp;
-
-		h_excp  = svm->nested.hsave->control.intercept_exceptions;
-		n_excp  = svm->nested.intercept_exceptions;
-		h_excp &= ~(1 << NM_VECTOR);
-		excp    = h_excp | n_excp;
-	} else {
-		excp  = svm->vmcb->control.intercept_exceptions;
-		excp &= ~(1 << NM_VECTOR);
-	}
-
-	svm->vmcb->control.intercept_exceptions = excp;
+	clr_exception_intercept(svm, NM_VECTOR);
 
 	svm->vcpu.fpu_active = 1;
 	update_cr0_intercept(svm);
@@ -1570,7 +1713,7 @@
 	string = (io_info & SVM_IOIO_STR_MASK) != 0;
 	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
 	if (string || in)
-		return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+		return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
 	port = io_info >> 16;
 	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -1624,17 +1767,19 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.nested_cr3 = root;
-	force_new_asid(vcpu);
+	mark_dirty(svm->vmcb, VMCB_NPT);
+	svm_flush_tlb(vcpu);
 }
 
-static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu)
+static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
+				       struct x86_exception *fault)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.exit_code = SVM_EXIT_NPF;
 	svm->vmcb->control.exit_code_hi = 0;
-	svm->vmcb->control.exit_info_1 = vcpu->arch.fault.error_code;
-	svm->vmcb->control.exit_info_2 = vcpu->arch.fault.address;
+	svm->vmcb->control.exit_info_1 = fault->error_code;
+	svm->vmcb->control.exit_info_2 = fault->address;
 
 	nested_svm_vmexit(svm);
 }
@@ -1680,7 +1825,7 @@
 {
 	int vmexit;
 
-	if (!is_nested(svm))
+	if (!is_guest_mode(&svm->vcpu))
 		return 0;
 
 	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
@@ -1698,7 +1843,7 @@
 /* This function returns true if it is save to enable the irq window */
 static inline bool nested_svm_intr(struct vcpu_svm *svm)
 {
-	if (!is_nested(svm))
+	if (!is_guest_mode(&svm->vcpu))
 		return true;
 
 	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
@@ -1737,7 +1882,7 @@
 /* This function returns true if it is save to enable the nmi window */
 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
 {
-	if (!is_nested(svm))
+	if (!is_guest_mode(&svm->vcpu))
 		return true;
 
 	if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
@@ -1836,8 +1981,8 @@
 			return NESTED_EXIT_HOST;
 		break;
 	case SVM_EXIT_EXCP_BASE + PF_VECTOR:
-		/* When we're shadowing, trap PFs */
-		if (!npt_enabled)
+		/* When we're shadowing, trap PFs, but not async PF */
+		if (!npt_enabled && svm->apf_reason == 0)
 			return NESTED_EXIT_HOST;
 		break;
 	case SVM_EXIT_EXCP_BASE + NM_VECTOR:
@@ -1865,27 +2010,15 @@
 	case SVM_EXIT_IOIO:
 		vmexit = nested_svm_intercept_ioio(svm);
 		break;
-	case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
-		u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
-		if (svm->nested.intercept_cr_read & cr_bits)
+	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
+		u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
+		if (svm->nested.intercept_cr & bit)
 			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
-	case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
-		u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
-		if (svm->nested.intercept_cr_write & cr_bits)
-			vmexit = NESTED_EXIT_DONE;
-		break;
-	}
-	case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
-		u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
-		if (svm->nested.intercept_dr_read & dr_bits)
-			vmexit = NESTED_EXIT_DONE;
-		break;
-	}
-	case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
-		u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
-		if (svm->nested.intercept_dr_write & dr_bits)
+	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
+		u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
+		if (svm->nested.intercept_dr & bit)
 			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
@@ -1893,6 +2026,10 @@
 		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
 		if (svm->nested.intercept_exceptions & excp_bits)
 			vmexit = NESTED_EXIT_DONE;
+		/* async page fault always cause vmexit */
+		else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
+			 svm->apf_reason != 0)
+			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
 	case SVM_EXIT_ERR: {
@@ -1926,10 +2063,8 @@
 	struct vmcb_control_area *dst  = &dst_vmcb->control;
 	struct vmcb_control_area *from = &from_vmcb->control;
 
-	dst->intercept_cr_read    = from->intercept_cr_read;
-	dst->intercept_cr_write   = from->intercept_cr_write;
-	dst->intercept_dr_read    = from->intercept_dr_read;
-	dst->intercept_dr_write   = from->intercept_dr_write;
+	dst->intercept_cr         = from->intercept_cr;
+	dst->intercept_dr         = from->intercept_dr;
 	dst->intercept_exceptions = from->intercept_exceptions;
 	dst->intercept            = from->intercept;
 	dst->iopm_base_pa         = from->iopm_base_pa;
@@ -1970,7 +2105,8 @@
 	if (!nested_vmcb)
 		return 1;
 
-	/* Exit nested SVM mode */
+	/* Exit Guest-Mode */
+	leave_guest_mode(&svm->vcpu);
 	svm->nested.vmcb = 0;
 
 	/* Give the current vmcb to the guest */
@@ -1984,7 +2120,7 @@
 	nested_vmcb->save.idtr   = vmcb->save.idtr;
 	nested_vmcb->save.efer   = svm->vcpu.arch.efer;
 	nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
-	nested_vmcb->save.cr3    = svm->vcpu.arch.cr3;
+	nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
 	nested_vmcb->save.cr2    = vmcb->save.cr2;
 	nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
 	nested_vmcb->save.rflags = vmcb->save.rflags;
@@ -2061,6 +2197,8 @@
 	svm->vmcb->save.cpl = 0;
 	svm->vmcb->control.exit_int_info = 0;
 
+	mark_all_dirty(svm->vmcb);
+
 	nested_svm_unmap(page);
 
 	nested_svm_uninit_mmu_context(&svm->vcpu);
@@ -2148,8 +2286,8 @@
 			       nested_vmcb->control.event_inj,
 			       nested_vmcb->control.nested_ctl);
 
-	trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
-				    nested_vmcb->control.intercept_cr_write,
+	trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
+				    nested_vmcb->control.intercept_cr >> 16,
 				    nested_vmcb->control.intercept_exceptions,
 				    nested_vmcb->control.intercept);
 
@@ -2177,7 +2315,7 @@
 	if (npt_enabled)
 		hsave->save.cr3    = vmcb->save.cr3;
 	else
-		hsave->save.cr3    = svm->vcpu.arch.cr3;
+		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
 
 	copy_vmcb_control_area(hsave, vmcb);
 
@@ -2229,14 +2367,12 @@
 	svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
 
 	/* cache intercepts */
-	svm->nested.intercept_cr_read    = nested_vmcb->control.intercept_cr_read;
-	svm->nested.intercept_cr_write   = nested_vmcb->control.intercept_cr_write;
-	svm->nested.intercept_dr_read    = nested_vmcb->control.intercept_dr_read;
-	svm->nested.intercept_dr_write   = nested_vmcb->control.intercept_dr_write;
+	svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
+	svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
 	svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
 	svm->nested.intercept            = nested_vmcb->control.intercept;
 
-	force_new_asid(&svm->vcpu);
+	svm_flush_tlb(&svm->vcpu);
 	svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
 	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
 		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
@@ -2245,29 +2381,12 @@
 
 	if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
 		/* We only want the cr8 intercept bits of the guest */
-		svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
-		svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+		clr_cr_intercept(svm, INTERCEPT_CR8_READ);
+		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 	}
 
 	/* We don't want to see VMMCALLs from a nested guest */
-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
-
-	/*
-	 * We don't want a nested guest to be more powerful than the guest, so
-	 * all intercepts are ORed
-	 */
-	svm->vmcb->control.intercept_cr_read |=
-		nested_vmcb->control.intercept_cr_read;
-	svm->vmcb->control.intercept_cr_write |=
-		nested_vmcb->control.intercept_cr_write;
-	svm->vmcb->control.intercept_dr_read |=
-		nested_vmcb->control.intercept_dr_read;
-	svm->vmcb->control.intercept_dr_write |=
-		nested_vmcb->control.intercept_dr_write;
-	svm->vmcb->control.intercept_exceptions |=
-		nested_vmcb->control.intercept_exceptions;
-
-	svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
+	clr_intercept(svm, INTERCEPT_VMMCALL);
 
 	svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
 	svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
@@ -2278,11 +2397,21 @@
 
 	nested_svm_unmap(page);
 
-	/* nested_vmcb is our indicator if nested SVM is activated */
+	/* Enter Guest-Mode */
+	enter_guest_mode(&svm->vcpu);
+
+	/*
+	 * Merge guest and host intercepts - must be called  with vcpu in
+	 * guest-mode to take affect here
+	 */
+	recalc_intercepts(svm);
+
 	svm->nested.vmcb = vmcb_gpa;
 
 	enable_gif(svm);
 
+	mark_all_dirty(svm->vmcb);
+
 	return true;
 }
 
@@ -2400,6 +2529,8 @@
 	svm_clear_vintr(svm);
 	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
 
+	mark_dirty(svm->vmcb, VMCB_INTR);
+
 	return 1;
 }
 
@@ -2426,6 +2557,19 @@
 	return 1;
 }
 
+static int xsetbv_interception(struct vcpu_svm *svm)
+{
+	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
+	u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+
+	if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
+		svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+		skip_emulated_instruction(&svm->vcpu);
+	}
+
+	return 1;
+}
+
 static int invalid_op_interception(struct vcpu_svm *svm)
 {
 	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
@@ -2507,19 +2651,92 @@
 static int iret_interception(struct vcpu_svm *svm)
 {
 	++svm->vcpu.stat.nmi_window_exits;
-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+	clr_intercept(svm, INTERCEPT_IRET);
 	svm->vcpu.arch.hflags |= HF_IRET_MASK;
 	return 1;
 }
 
 static int invlpg_interception(struct vcpu_svm *svm)
 {
-	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+
+	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
+	skip_emulated_instruction(&svm->vcpu);
+	return 1;
 }
 
 static int emulate_on_interception(struct vcpu_svm *svm)
 {
-	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+	return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+}
+
+#define CR_VALID (1ULL << 63)
+
+static int cr_interception(struct vcpu_svm *svm)
+{
+	int reg, cr;
+	unsigned long val;
+	int err;
+
+	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_on_interception(svm);
+
+	if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+
+	err = 0;
+	if (cr >= 16) { /* mov to cr */
+		cr -= 16;
+		val = kvm_register_read(&svm->vcpu, reg);
+		switch (cr) {
+		case 0:
+			err = kvm_set_cr0(&svm->vcpu, val);
+			break;
+		case 3:
+			err = kvm_set_cr3(&svm->vcpu, val);
+			break;
+		case 4:
+			err = kvm_set_cr4(&svm->vcpu, val);
+			break;
+		case 8:
+			err = kvm_set_cr8(&svm->vcpu, val);
+			break;
+		default:
+			WARN(1, "unhandled write to CR%d", cr);
+			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+			return 1;
+		}
+	} else { /* mov from cr */
+		switch (cr) {
+		case 0:
+			val = kvm_read_cr0(&svm->vcpu);
+			break;
+		case 2:
+			val = svm->vcpu.arch.cr2;
+			break;
+		case 3:
+			val = kvm_read_cr3(&svm->vcpu);
+			break;
+		case 4:
+			val = kvm_read_cr4(&svm->vcpu);
+			break;
+		case 8:
+			val = kvm_get_cr8(&svm->vcpu);
+			break;
+		default:
+			WARN(1, "unhandled read from CR%d", cr);
+			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+			return 1;
+		}
+		kvm_register_write(&svm->vcpu, reg, val);
+	}
+	kvm_complete_insn_gp(&svm->vcpu, err);
+
+	return 1;
 }
 
 static int cr0_write_interception(struct vcpu_svm *svm)
@@ -2527,7 +2744,7 @@
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	int r;
 
-	r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 
 	if (svm->nested.vmexit_rip) {
 		kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2536,22 +2753,49 @@
 		svm->nested.vmexit_rip = 0;
 	}
 
-	return r == EMULATE_DONE;
+	return r;
+}
+
+static int dr_interception(struct vcpu_svm *svm)
+{
+	int reg, dr;
+	unsigned long val;
+	int err;
+
+	if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
+
+	if (dr >= 16) { /* mov to DRn */
+		val = kvm_register_read(&svm->vcpu, reg);
+		kvm_set_dr(&svm->vcpu, dr - 16, val);
+	} else {
+		err = kvm_get_dr(&svm->vcpu, dr, &val);
+		if (!err)
+			kvm_register_write(&svm->vcpu, reg, val);
+	}
+
+	skip_emulated_instruction(&svm->vcpu);
+
+	return 1;
 }
 
 static int cr8_write_interception(struct vcpu_svm *svm)
 {
 	struct kvm_run *kvm_run = svm->vcpu.run;
+	int r;
 
 	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
 	/* instruction emulation calls kvm_set_cr8() */
-	emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 	if (irqchip_in_kernel(svm->vcpu.kvm)) {
-		svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
-		return 1;
+		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+		return r;
 	}
 	if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
-		return 1;
+		return r;
 	kvm_run->exit_reason = KVM_EXIT_SET_TPR;
 	return 0;
 }
@@ -2562,14 +2806,9 @@
 
 	switch (ecx) {
 	case MSR_IA32_TSC: {
-		u64 tsc_offset;
+		struct vmcb *vmcb = get_host_vmcb(svm);
 
-		if (is_nested(svm))
-			tsc_offset = svm->nested.hsave->control.tsc_offset;
-		else
-			tsc_offset = svm->vmcb->control.tsc_offset;
-
-		*data = tsc_offset + native_read_tsc();
+		*data = vmcb->control.tsc_offset + native_read_tsc();
 		break;
 	}
 	case MSR_STAR:
@@ -2714,7 +2953,7 @@
 		svm->vmcb->save.sysenter_esp = data;
 		break;
 	case MSR_IA32_DEBUGCTLMSR:
-		if (!svm_has(SVM_FEATURE_LBRV)) {
+		if (!boot_cpu_has(X86_FEATURE_LBRV)) {
 			pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
 					__func__, data);
 			break;
@@ -2723,6 +2962,7 @@
 			return 1;
 
 		svm->vmcb->save.dbgctl = data;
+		mark_dirty(svm->vmcb, VMCB_LBR);
 		if (data & (1ULL<<0))
 			svm_enable_lbrv(svm);
 		else
@@ -2775,6 +3015,7 @@
 	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
 	svm_clear_vintr(svm);
 	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
+	mark_dirty(svm->vmcb, VMCB_INTR);
 	/*
 	 * If the user space waits to inject interrupts, exit as soon as
 	 * possible
@@ -2797,31 +3038,31 @@
 }
 
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
-	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
+	[SVM_EXIT_READ_CR0]			= cr_interception,
+	[SVM_EXIT_READ_CR3]			= cr_interception,
+	[SVM_EXIT_READ_CR4]			= cr_interception,
+	[SVM_EXIT_READ_CR8]			= cr_interception,
 	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
 	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
-	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
+	[SVM_EXIT_WRITE_CR3]			= cr_interception,
+	[SVM_EXIT_WRITE_CR4]			= cr_interception,
 	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
-	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR2]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR5]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR6]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR7]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR0]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR1]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR2]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR4]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR5]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR6]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR7]			= emulate_on_interception,
+	[SVM_EXIT_READ_DR0]			= dr_interception,
+	[SVM_EXIT_READ_DR1]			= dr_interception,
+	[SVM_EXIT_READ_DR2]			= dr_interception,
+	[SVM_EXIT_READ_DR3]			= dr_interception,
+	[SVM_EXIT_READ_DR4]			= dr_interception,
+	[SVM_EXIT_READ_DR5]			= dr_interception,
+	[SVM_EXIT_READ_DR6]			= dr_interception,
+	[SVM_EXIT_READ_DR7]			= dr_interception,
+	[SVM_EXIT_WRITE_DR0]			= dr_interception,
+	[SVM_EXIT_WRITE_DR1]			= dr_interception,
+	[SVM_EXIT_WRITE_DR2]			= dr_interception,
+	[SVM_EXIT_WRITE_DR3]			= dr_interception,
+	[SVM_EXIT_WRITE_DR4]			= dr_interception,
+	[SVM_EXIT_WRITE_DR5]			= dr_interception,
+	[SVM_EXIT_WRITE_DR6]			= dr_interception,
+	[SVM_EXIT_WRITE_DR7]			= dr_interception,
 	[SVM_EXIT_EXCP_BASE + DB_VECTOR]	= db_interception,
 	[SVM_EXIT_EXCP_BASE + BP_VECTOR]	= bp_interception,
 	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
@@ -2854,6 +3095,7 @@
 	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
 	[SVM_EXIT_MONITOR]			= invalid_op_interception,
 	[SVM_EXIT_MWAIT]			= invalid_op_interception,
+	[SVM_EXIT_XSETBV]			= xsetbv_interception,
 	[SVM_EXIT_NPF]				= pf_interception,
 };
 
@@ -2864,10 +3106,10 @@
 	struct vmcb_save_area *save = &svm->vmcb->save;
 
 	pr_err("VMCB Control Area:\n");
-	pr_err("cr_read:            %04x\n", control->intercept_cr_read);
-	pr_err("cr_write:           %04x\n", control->intercept_cr_write);
-	pr_err("dr_read:            %04x\n", control->intercept_dr_read);
-	pr_err("dr_write:           %04x\n", control->intercept_dr_write);
+	pr_err("cr_read:            %04x\n", control->intercept_cr & 0xffff);
+	pr_err("cr_write:           %04x\n", control->intercept_cr >> 16);
+	pr_err("dr_read:            %04x\n", control->intercept_dr & 0xffff);
+	pr_err("dr_write:           %04x\n", control->intercept_dr >> 16);
 	pr_err("exceptions:         %08x\n", control->intercept_exceptions);
 	pr_err("intercepts:         %016llx\n", control->intercept);
 	pr_err("pause filter count: %d\n", control->pause_filter_count);
@@ -2950,15 +3192,23 @@
 
 }
 
+static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
+{
+	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
+
+	*info1 = control->exit_info_1;
+	*info2 = control->exit_info_2;
+}
+
 static int handle_exit(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_run *kvm_run = vcpu->run;
 	u32 exit_code = svm->vmcb->control.exit_code;
 
-	trace_kvm_exit(exit_code, vcpu);
+	trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
 
-	if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
+	if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
 		vcpu->arch.cr0 = svm->vmcb->save.cr0;
 	if (npt_enabled)
 		vcpu->arch.cr3 = svm->vmcb->save.cr3;
@@ -2970,7 +3220,7 @@
 		return 1;
 	}
 
-	if (is_nested(svm)) {
+	if (is_guest_mode(vcpu)) {
 		int vmexit;
 
 		trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
@@ -3033,7 +3283,6 @@
 
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
-	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
 	/* FIXME: handle wraparound of asid_generation */
 	if (svm->asid_generation != sd->asid_generation)
 		new_asid(svm, sd);
@@ -3045,7 +3294,7 @@
 
 	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
 	vcpu->arch.hflags |= HF_NMI_MASK;
-	svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+	set_intercept(svm, INTERCEPT_IRET);
 	++vcpu->stat.nmi_injections;
 }
 
@@ -3058,6 +3307,7 @@
 	control->int_ctl &= ~V_INTR_PRIO_MASK;
 	control->int_ctl |= V_IRQ_MASK |
 		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
+	mark_dirty(svm->vmcb, VMCB_INTR);
 }
 
 static void svm_set_irq(struct kvm_vcpu *vcpu)
@@ -3077,14 +3327,14 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
 		return;
 
 	if (irr == -1)
 		return;
 
 	if (tpr >= irr)
-		svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
+		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 }
 
 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -3112,10 +3362,10 @@
 
 	if (masked) {
 		svm->vcpu.arch.hflags |= HF_NMI_MASK;
-		svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+		set_intercept(svm, INTERCEPT_IRET);
 	} else {
 		svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
-		svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+		clr_intercept(svm, INTERCEPT_IRET);
 	}
 }
 
@@ -3131,7 +3381,7 @@
 
 	ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
 
-	if (is_nested(svm))
+	if (is_guest_mode(vcpu))
 		return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
 
 	return ret;
@@ -3177,7 +3427,12 @@
 
 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
 {
-	force_new_asid(vcpu);
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
+		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+	else
+		svm->asid_generation--;
 }
 
 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
@@ -3188,10 +3443,10 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
 		return;
 
-	if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
+	if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
 		int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
 		kvm_set_cr8(vcpu, cr8);
 	}
@@ -3202,7 +3457,7 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u64 cr8;
 
-	if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
 		return;
 
 	cr8 = kvm_get_cr8(vcpu);
@@ -3289,9 +3544,6 @@
 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	u16 fs_selector;
-	u16 gs_selector;
-	u16 ldt_selector;
 
 	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
@@ -3308,10 +3560,6 @@
 
 	sync_lapic_to_cr8(vcpu);
 
-	save_host_msrs(vcpu);
-	savesegment(fs, fs_selector);
-	savesegment(gs, gs_selector);
-	ldt_selector = kvm_read_ldt();
 	svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
 	clgi();
@@ -3389,19 +3637,10 @@
 #endif
 		);
 
-	vcpu->arch.cr2 = svm->vmcb->save.cr2;
-	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
-	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
-	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
-
-	load_host_msrs(vcpu);
-	kvm_load_ldt(ldt_selector);
-	loadsegment(fs, fs_selector);
 #ifdef CONFIG_X86_64
-	load_gs_index(gs_selector);
-	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
-	loadsegment(gs, gs_selector);
+	loadsegment(fs, svm->host.fs);
 #endif
 
 	reload_tss(vcpu);
@@ -3410,10 +3649,21 @@
 
 	stgi();
 
+	vcpu->arch.cr2 = svm->vmcb->save.cr2;
+	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
+
 	sync_cr8_to_lapic(vcpu);
 
 	svm->next_rip = 0;
 
+	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+
+	/* if exit due to PF check for async PF */
+	if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
+		svm->apf_reason = kvm_read_and_reset_pf_reason();
+
 	if (npt_enabled) {
 		vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
 		vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
@@ -3426,6 +3676,8 @@
 	if (unlikely(svm->vmcb->control.exit_code ==
 		     SVM_EXIT_EXCP_BASE + MC_VECTOR))
 		svm_handle_mce(svm);
+
+	mark_all_clean(svm->vmcb);
 }
 
 #undef R
@@ -3435,7 +3687,8 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->save.cr3 = root;
-	force_new_asid(vcpu);
+	mark_dirty(svm->vmcb, VMCB_CR);
+	svm_flush_tlb(vcpu);
 }
 
 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -3443,11 +3696,13 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.nested_cr3 = root;
+	mark_dirty(svm->vmcb, VMCB_NPT);
 
 	/* Also sync guest cr3 here in case we live migrate */
-	svm->vmcb->save.cr3 = vcpu->arch.cr3;
+	svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
+	mark_dirty(svm->vmcb, VMCB_CR);
 
-	force_new_asid(vcpu);
+	svm_flush_tlb(vcpu);
 }
 
 static int is_disabled(void)
@@ -3494,10 +3749,6 @@
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
 	switch (func) {
-	case 0x00000001:
-		/* Mask out xsave bit as long as it is not supported by SVM */
-		entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
-		break;
 	case 0x80000001:
 		if (nested)
 			entry->ecx |= (1 << 2); /* Set SVM bit */
@@ -3511,7 +3762,7 @@
 				   additional features */
 
 		/* Support next_rip if host supports it */
-		if (svm_has(SVM_FEATURE_NRIP))
+		if (boot_cpu_has(X86_FEATURE_NRIPS))
 			entry->edx |= SVM_FEATURE_NRIP;
 
 		/* Support NPT for the guest if enabled */
@@ -3571,6 +3822,7 @@
 	{ SVM_EXIT_WBINVD,			"wbinvd" },
 	{ SVM_EXIT_MONITOR,			"monitor" },
 	{ SVM_EXIT_MWAIT,			"mwait" },
+	{ SVM_EXIT_XSETBV,			"xsetbv" },
 	{ SVM_EXIT_NPF,				"npf" },
 	{ -1, NULL }
 };
@@ -3594,9 +3846,7 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
-	if (is_nested(svm))
-		svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
+	set_exception_intercept(svm, NM_VECTOR);
 	update_cr0_intercept(svm);
 }
 
@@ -3627,6 +3877,7 @@
 	.get_cpl = svm_get_cpl,
 	.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
 	.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
+	.decache_cr3 = svm_decache_cr3,
 	.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
 	.set_cr0 = svm_set_cr0,
 	.set_cr3 = svm_set_cr3,
@@ -3667,7 +3918,9 @@
 	.get_tdp_level = get_npt_level,
 	.get_mt_mask = svm_get_mt_mask,
 
+	.get_exit_info = svm_get_exit_info,
 	.exit_reasons_str = svm_exit_reasons_str,
+
 	.get_lpage_level = svm_get_lpage_level,
 
 	.cpuid_update = svm_cpuid_update,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index a6544b8..1357d7c 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -178,27 +178,36 @@
 #define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
 #define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
 
+#define KVM_ISA_VMX   1
+#define KVM_ISA_SVM   2
+
 /*
  * Tracepoint for kvm guest exit:
  */
 TRACE_EVENT(kvm_exit,
-	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu),
-	TP_ARGS(exit_reason, vcpu),
+	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
+	TP_ARGS(exit_reason, vcpu, isa),
 
 	TP_STRUCT__entry(
 		__field(	unsigned int,	exit_reason	)
 		__field(	unsigned long,	guest_rip	)
+		__field(	u32,	        isa             )
+		__field(	u64,	        info1           )
+		__field(	u64,	        info2           )
 	),
 
 	TP_fast_assign(
 		__entry->exit_reason	= exit_reason;
 		__entry->guest_rip	= kvm_rip_read(vcpu);
+		__entry->isa            = isa;
+		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
+					   &__entry->info2);
 	),
 
-	TP_printk("reason %s rip 0x%lx",
+	TP_printk("reason %s rip 0x%lx info %llx %llx",
 		 ftrace_print_symbols_seq(p, __entry->exit_reason,
 					  kvm_x86_ops->exit_reasons_str),
-		 __entry->guest_rip)
+		 __entry->guest_rip, __entry->info1, __entry->info2)
 );
 
 /*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 81fcbe9..bf89ec2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -69,6 +69,9 @@
 static int __read_mostly vmm_exclusive = 1;
 module_param(vmm_exclusive, bool, S_IRUGO);
 
+static int __read_mostly yield_on_hlt = 1;
+module_param(yield_on_hlt, bool, S_IRUGO);
+
 #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST				\
 	(X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
 #define KVM_GUEST_CR0_MASK						\
@@ -177,6 +180,7 @@
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
+static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -188,6 +192,8 @@
 static unsigned long *vmx_msr_bitmap_legacy;
 static unsigned long *vmx_msr_bitmap_longmode;
 
+static bool cpu_has_load_ia32_efer;
+
 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
 static DEFINE_SPINLOCK(vmx_vpid_lock);
 
@@ -472,7 +478,7 @@
 	u8 error;
 
 	asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
-		      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+		      : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
 		      : "cc", "memory");
 	if (error)
 		printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
@@ -485,7 +491,7 @@
 	u8 error;
 
 	asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
-			: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+			: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
 			: "cc", "memory");
 	if (error)
 		printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
@@ -565,10 +571,10 @@
 
 static unsigned long vmcs_readl(unsigned long field)
 {
-	unsigned long value;
+	unsigned long value = 0;
 
 	asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
-		      : "=a"(value) : "d"(field) : "cc");
+		      : "+a"(value) : "d"(field) : "cc");
 	return value;
 }
 
@@ -661,6 +667,12 @@
 	unsigned i;
 	struct msr_autoload *m = &vmx->msr_autoload;
 
+	if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
+		vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
+		vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
+		return;
+	}
+
 	for (i = 0; i < m->nr; ++i)
 		if (m->guest[i].index == msr)
 			break;
@@ -680,6 +692,14 @@
 	unsigned i;
 	struct msr_autoload *m = &vmx->msr_autoload;
 
+	if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
+		vmcs_write64(GUEST_IA32_EFER, guest_val);
+		vmcs_write64(HOST_IA32_EFER, host_val);
+		vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
+		vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
+		return;
+	}
+
 	for (i = 0; i < m->nr; ++i)
 		if (m->guest[i].index == msr)
 			break;
@@ -1009,6 +1029,17 @@
 	vmx_set_interrupt_shadow(vcpu, 0);
 }
 
+static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
+{
+	/* Ensure that we clear the HLT state in the VMCS.  We don't need to
+	 * explicitly skip the instruction because if the HLT state is set, then
+	 * the instruction is already executing and RIP has already been
+	 * advanced. */
+	if (!yield_on_hlt &&
+	    vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
+		vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
+}
+
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code,
 				bool reinject)
@@ -1035,6 +1066,7 @@
 		intr_info |= INTR_TYPE_HARD_EXCEPTION;
 
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
+	vmx_clear_hlt(vcpu);
 }
 
 static bool vmx_rdtscp_supported(void)
@@ -1305,8 +1337,11 @@
 			&& tboot_enabled())
 			return 1;
 		if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
-			&& !tboot_enabled())
+			&& !tboot_enabled()) {
+			printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
+				" activate TXT before enabling KVM\n");
 			return 1;
+		}
 	}
 
 	return 0;
@@ -1400,6 +1435,14 @@
 	return 0;
 }
 
+static __init bool allow_1_setting(u32 msr, u32 ctl)
+{
+	u32 vmx_msr_low, vmx_msr_high;
+
+	rdmsr(msr, vmx_msr_low, vmx_msr_high);
+	return vmx_msr_high & ctl;
+}
+
 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 {
 	u32 vmx_msr_low, vmx_msr_high;
@@ -1416,7 +1459,7 @@
 				&_pin_based_exec_control) < 0)
 		return -EIO;
 
-	min = CPU_BASED_HLT_EXITING |
+	min =
 #ifdef CONFIG_X86_64
 	      CPU_BASED_CR8_LOAD_EXITING |
 	      CPU_BASED_CR8_STORE_EXITING |
@@ -1429,6 +1472,10 @@
 	      CPU_BASED_MWAIT_EXITING |
 	      CPU_BASED_MONITOR_EXITING |
 	      CPU_BASED_INVLPG_EXITING;
+
+	if (yield_on_hlt)
+		min |= CPU_BASED_HLT_EXITING;
+
 	opt = CPU_BASED_TPR_SHADOW |
 	      CPU_BASED_USE_MSR_BITMAPS |
 	      CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -1510,6 +1557,12 @@
 	vmcs_conf->vmexit_ctrl         = _vmexit_control;
 	vmcs_conf->vmentry_ctrl        = _vmentry_control;
 
+	cpu_has_load_ia32_efer =
+		allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
+				VM_ENTRY_LOAD_IA32_EFER)
+		&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
+				   VM_EXIT_LOAD_IA32_EFER);
+
 	return 0;
 }
 
@@ -1683,9 +1736,13 @@
 	save->limit = vmcs_read32(sf->limit);
 	save->ar = vmcs_read32(sf->ar_bytes);
 	vmcs_write16(sf->selector, save->base >> 4);
-	vmcs_write32(sf->base, save->base & 0xfffff);
+	vmcs_write32(sf->base, save->base & 0xffff0);
 	vmcs_write32(sf->limit, 0xffff);
 	vmcs_write32(sf->ar_bytes, 0xf3);
+	if (save->base & 0xf)
+		printk_once(KERN_WARNING "kvm: segment base is not paragraph"
+			    " aligned when entering protected mode (seg=%d)",
+			    seg);
 }
 
 static void enter_rmode(struct kvm_vcpu *vcpu)
@@ -1814,6 +1871,13 @@
 	vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
 }
 
+static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
+{
+	if (enable_ept && is_paging(vcpu))
+		vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+}
+
 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
 	ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
@@ -1857,6 +1921,7 @@
 					unsigned long cr0,
 					struct kvm_vcpu *vcpu)
 {
+	vmx_decache_cr3(vcpu);
 	if (!(cr0 & X86_CR0_PG)) {
 		/* From paging/starting to nonpaging */
 		vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1937,7 +2002,7 @@
 	if (enable_ept) {
 		eptp = construct_eptp(cr3);
 		vmcs_write64(EPT_POINTER, eptp);
-		guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
+		guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
 			vcpu->kvm->arch.ept_identity_map_addr;
 		ept_load_pdptrs(vcpu);
 	}
@@ -2725,7 +2790,7 @@
 	vmcs_writel(GUEST_IDTR_BASE, 0);
 	vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
 
-	vmcs_write32(GUEST_ACTIVITY_STATE, 0);
+	vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
 	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
 	vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
 
@@ -2787,6 +2852,10 @@
 		return;
 	}
 
+	if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+		enable_irq_window(vcpu);
+		return;
+	}
 	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
 	cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
 	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
@@ -2814,6 +2883,7 @@
 	} else
 		intr |= INTR_TYPE_EXT_INTR;
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
+	vmx_clear_hlt(vcpu);
 }
 
 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
@@ -2841,6 +2911,7 @@
 	}
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
 			INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
+	vmx_clear_hlt(vcpu);
 }
 
 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -2849,7 +2920,8 @@
 		return 0;
 
 	return	!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
-			(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
+		  (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
+		   | GUEST_INTR_STATE_NMI));
 }
 
 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -2910,7 +2982,7 @@
 	 * Cause the #SS fault with 0 error code in VM86 mode.
 	 */
 	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
-		if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE)
+		if (emulate_instruction(vcpu, 0) == EMULATE_DONE)
 			return 1;
 	/*
 	 * Forward all other exceptions that are valid in real mode.
@@ -3007,7 +3079,7 @@
 	}
 
 	if (is_invalid_opcode(intr_info)) {
-		er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD);
+		er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
 		if (er != EMULATE_DONE)
 			kvm_queue_exception(vcpu, UD_VECTOR);
 		return 1;
@@ -3026,7 +3098,7 @@
 
 		if (kvm_event_needs_reinjection(vcpu))
 			kvm_mmu_unprotect_page_virt(vcpu, cr2);
-		return kvm_mmu_page_fault(vcpu, cr2, error_code);
+		return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
 	}
 
 	if (vmx->rmode.vm86_active &&
@@ -3098,7 +3170,7 @@
 	++vcpu->stat.io_exits;
 
 	if (string || in)
-		return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+		return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
 	port = exit_qualification >> 16;
 	size = (exit_qualification & 7) + 1;
@@ -3118,14 +3190,6 @@
 	hypercall[2] = 0xc1;
 }
 
-static void complete_insn_gp(struct kvm_vcpu *vcpu, int err)
-{
-	if (err)
-		kvm_inject_gp(vcpu, 0);
-	else
-		skip_emulated_instruction(vcpu);
-}
-
 static int handle_cr(struct kvm_vcpu *vcpu)
 {
 	unsigned long exit_qualification, val;
@@ -3143,21 +3207,21 @@
 		switch (cr) {
 		case 0:
 			err = kvm_set_cr0(vcpu, val);
-			complete_insn_gp(vcpu, err);
+			kvm_complete_insn_gp(vcpu, err);
 			return 1;
 		case 3:
 			err = kvm_set_cr3(vcpu, val);
-			complete_insn_gp(vcpu, err);
+			kvm_complete_insn_gp(vcpu, err);
 			return 1;
 		case 4:
 			err = kvm_set_cr4(vcpu, val);
-			complete_insn_gp(vcpu, err);
+			kvm_complete_insn_gp(vcpu, err);
 			return 1;
 		case 8: {
 				u8 cr8_prev = kvm_get_cr8(vcpu);
 				u8 cr8 = kvm_register_read(vcpu, reg);
-				kvm_set_cr8(vcpu, cr8);
-				skip_emulated_instruction(vcpu);
+				err = kvm_set_cr8(vcpu, cr8);
+				kvm_complete_insn_gp(vcpu, err);
 				if (irqchip_in_kernel(vcpu->kvm))
 					return 1;
 				if (cr8_prev <= cr8)
@@ -3176,8 +3240,9 @@
 	case 1: /*mov from cr*/
 		switch (cr) {
 		case 3:
-			kvm_register_write(vcpu, reg, vcpu->arch.cr3);
-			trace_kvm_cr_read(cr, vcpu->arch.cr3);
+			val = kvm_read_cr3(vcpu);
+			kvm_register_write(vcpu, reg, val);
+			trace_kvm_cr_read(cr, val);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 8:
@@ -3349,6 +3414,11 @@
 	return 1;
 }
 
+static int handle_invd(struct kvm_vcpu *vcpu)
+{
+	return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+}
+
 static int handle_invlpg(struct kvm_vcpu *vcpu)
 {
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -3377,7 +3447,7 @@
 
 static int handle_apic_access(struct kvm_vcpu *vcpu)
 {
-	return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+	return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_task_switch(struct kvm_vcpu *vcpu)
@@ -3476,7 +3546,7 @@
 
 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
 	trace_kvm_page_fault(gpa, exit_qualification);
-	return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
+	return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
 }
 
 static u64 ept_rsvd_mask(u64 spte, int level)
@@ -3592,7 +3662,7 @@
 		    && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
 			return handle_interrupt_window(&vmx->vcpu);
 
-		err = emulate_instruction(vcpu, 0, 0, 0);
+		err = emulate_instruction(vcpu, 0);
 
 		if (err == EMULATE_DO_MMIO) {
 			ret = 0;
@@ -3649,6 +3719,7 @@
 	[EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
 	[EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
 	[EXIT_REASON_HLT]                     = handle_halt,
+	[EXIT_REASON_INVD]		      = handle_invd,
 	[EXIT_REASON_INVLPG]		      = handle_invlpg,
 	[EXIT_REASON_VMCALL]                  = handle_vmcall,
 	[EXIT_REASON_VMCLEAR]	              = handle_vmx_insn,
@@ -3676,6 +3747,12 @@
 static const int kvm_vmx_max_exit_handlers =
 	ARRAY_SIZE(kvm_vmx_exit_handlers);
 
+static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
+{
+	*info1 = vmcs_readl(EXIT_QUALIFICATION);
+	*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
+}
+
 /*
  * The guest has exited.  See if we can fix it or if we need userspace
  * assistance.
@@ -3686,17 +3763,12 @@
 	u32 exit_reason = vmx->exit_reason;
 	u32 vectoring_info = vmx->idt_vectoring_info;
 
-	trace_kvm_exit(exit_reason, vcpu);
+	trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
 
 	/* If guest state is invalid, start emulating */
 	if (vmx->emulation_required && emulate_invalid_guest_state)
 		return handle_invalid_guest_state(vcpu);
 
-	/* Access CR3 don't cause VMExit in paging mode, so we need
-	 * to sync with guest real CR3. */
-	if (enable_ept && is_paging(vcpu))
-		vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
-
 	if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
 		vcpu->run->fail_entry.hardware_entry_failure_reason
@@ -4013,7 +4085,8 @@
 	      );
 
 	vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
-				  | (1 << VCPU_EXREG_PDPTR));
+				  | (1 << VCPU_EXREG_PDPTR)
+				  | (1 << VCPU_EXREG_CR3));
 	vcpu->arch.regs_dirty = 0;
 
 	vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
@@ -4280,6 +4353,7 @@
 	.get_cpl = vmx_get_cpl,
 	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
 	.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
+	.decache_cr3 = vmx_decache_cr3,
 	.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
 	.set_cr0 = vmx_set_cr0,
 	.set_cr3 = vmx_set_cr3,
@@ -4320,7 +4394,9 @@
 	.get_tdp_level = get_ept_level,
 	.get_mt_mask = vmx_get_mt_mask,
 
+	.get_exit_info = vmx_get_exit_info,
 	.exit_reasons_str = vmx_exit_reasons_str,
+
 	.get_lpage_level = vmx_get_lpage_level,
 
 	.cpuid_update = vmx_cpuid_update,
@@ -4396,8 +4472,6 @@
 
 	if (enable_ept) {
 		bypass_guest_pf = 0;
-		kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
-			VMX_EPT_WRITABLE_MASK);
 		kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
 				VMX_EPT_EXECUTABLE_MASK);
 		kvm_enable_tdp();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 46a368c..bcc0efc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -43,6 +43,7 @@
 #include <linux/slab.h>
 #include <linux/perf_event.h>
 #include <linux/uaccess.h>
+#include <linux/hash.h>
 #include <trace/events/kvm.h>
 
 #define CREATE_TRACE_POINTS
@@ -155,6 +156,13 @@
 
 u64 __read_mostly host_xcr0;
 
+static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
+{
+	int i;
+	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
+		vcpu->arch.apf.gfns[i] = ~0;
+}
+
 static void kvm_on_user_return(struct user_return_notifier *urn)
 {
 	unsigned slot;
@@ -326,23 +334,28 @@
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
 
-void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
 {
-	unsigned error_code = vcpu->arch.fault.error_code;
+	if (err)
+		kvm_inject_gp(vcpu, 0);
+	else
+		kvm_x86_ops->skip_emulated_instruction(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
 
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
+{
 	++vcpu->stat.pf_guest;
-	vcpu->arch.cr2 = vcpu->arch.fault.address;
-	kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
+	vcpu->arch.cr2 = fault->address;
+	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
 }
 
-void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
 {
-	if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
-		vcpu->arch.nested_mmu.inject_page_fault(vcpu);
+	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
+		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
 	else
-		vcpu->arch.mmu.inject_page_fault(vcpu);
-
-	vcpu->arch.fault.nested = false;
+		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
 }
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -460,8 +473,8 @@
 		      (unsigned long *)&vcpu->arch.regs_avail))
 		return true;
 
-	gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
-	offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
+	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
+	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
 	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
 				       PFERR_USER_MASK | PFERR_WRITE_MASK);
 	if (r < 0)
@@ -506,12 +519,15 @@
 		} else
 #endif
 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
-						 vcpu->arch.cr3))
+						 kvm_read_cr3(vcpu)))
 			return 1;
 	}
 
 	kvm_x86_ops->set_cr0(vcpu, cr0);
 
+	if ((cr0 ^ old_cr0) & X86_CR0_PG)
+		kvm_clear_async_pf_completion_queue(vcpu);
+
 	if ((cr0 ^ old_cr0) & update_bits)
 		kvm_mmu_reset_context(vcpu);
 	return 0;
@@ -595,7 +611,8 @@
 			return 1;
 	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
 		   && ((cr4 ^ old_cr4) & pdptr_bits)
-		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
+		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
+				   kvm_read_cr3(vcpu)))
 		return 1;
 
 	if (cr4 & X86_CR4_VMXE)
@@ -615,7 +632,7 @@
 
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
-	if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
+	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
 		kvm_mmu_sync_roots(vcpu);
 		kvm_mmu_flush_tlb(vcpu);
 		return 0;
@@ -650,12 +667,13 @@
 	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
 		return 1;
 	vcpu->arch.cr3 = cr3;
+	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
 	vcpu->arch.mmu.new_cr3(vcpu);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr3);
 
-int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
 	if (cr8 & CR8_RESERVED_BITS)
 		return 1;
@@ -665,12 +683,6 @@
 		vcpu->arch.cr8 = cr8;
 	return 0;
 }
-
-void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
-{
-	if (__kvm_set_cr8(vcpu, cr8))
-		kvm_inject_gp(vcpu, 0);
-}
 EXPORT_SYMBOL_GPL(kvm_set_cr8);
 
 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
@@ -775,12 +787,12 @@
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN	7
+#define KVM_SAVE_MSRS_BEGIN	8
 static u32 msrs_to_save[] = {
 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
-	HV_X64_MSR_APIC_ASSIST_PAGE,
+	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
 	MSR_STAR,
 #ifdef CONFIG_X86_64
@@ -830,7 +842,6 @@
 	kvm_x86_ops->set_efer(vcpu, efer);
 
 	vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
-	kvm_mmu_reset_context(vcpu);
 
 	/* Update reserved bits */
 	if ((efer ^ old_efer) & EFER_NX)
@@ -1418,6 +1429,30 @@
 	return 0;
 }
 
+static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+{
+	gpa_t gpa = data & ~0x3f;
+
+	/* Bits 2:5 are resrved, Should be zero */
+	if (data & 0x3c)
+		return 1;
+
+	vcpu->arch.apf.msr_val = data;
+
+	if (!(data & KVM_ASYNC_PF_ENABLED)) {
+		kvm_clear_async_pf_completion_queue(vcpu);
+		kvm_async_pf_hash_reset(vcpu);
+		return 0;
+	}
+
+	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+		return 1;
+
+	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+	kvm_async_pf_wakeup_all(vcpu);
+	return 0;
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
 	switch (msr) {
@@ -1499,6 +1534,10 @@
 		}
 		break;
 	}
+	case MSR_KVM_ASYNC_PF_EN:
+		if (kvm_pv_enable_async_pf(vcpu, data))
+			return 1;
+		break;
 	case MSR_IA32_MCG_CTL:
 	case MSR_IA32_MCG_STATUS:
 	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1775,6 +1814,9 @@
 	case MSR_KVM_SYSTEM_TIME_NEW:
 		data = vcpu->arch.time;
 		break;
+	case MSR_KVM_ASYNC_PF_EN:
+		data = vcpu->arch.apf.msr_val;
+		break;
 	case MSR_IA32_P5_MC_ADDR:
 	case MSR_IA32_P5_MC_TYPE:
 	case MSR_IA32_MCG_CAP:
@@ -1904,6 +1946,7 @@
 	case KVM_CAP_NOP_IO_DELAY:
 	case KVM_CAP_MP_STATE:
 	case KVM_CAP_SYNC_MMU:
+	case KVM_CAP_USER_NMI:
 	case KVM_CAP_REINJECT_CONTROL:
 	case KVM_CAP_IRQ_INJECT_STATUS:
 	case KVM_CAP_ASSIGN_DEV_IRQ:
@@ -1922,6 +1965,7 @@
 	case KVM_CAP_DEBUGREGS:
 	case KVM_CAP_X86_ROBUST_SINGLESTEP:
 	case KVM_CAP_XSAVE:
+	case KVM_CAP_ASYNC_PF:
 		r = 1;
 		break;
 	case KVM_CAP_COALESCED_MMIO:
@@ -2185,6 +2229,11 @@
 	return r;
 }
 
+static void cpuid_mask(u32 *word, int wordnum)
+{
+	*word &= boot_cpu_data.x86_capability[wordnum];
+}
+
 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 			   u32 index)
 {
@@ -2259,7 +2308,9 @@
 		break;
 	case 1:
 		entry->edx &= kvm_supported_word0_x86_features;
+		cpuid_mask(&entry->edx, 0);
 		entry->ecx &= kvm_supported_word4_x86_features;
+		cpuid_mask(&entry->ecx, 4);
 		/* we support x2apic emulation even if host does not support
 		 * it since we emulate x2apic in software */
 		entry->ecx |= F(X2APIC);
@@ -2350,7 +2401,9 @@
 		break;
 	case 0x80000001:
 		entry->edx &= kvm_supported_word1_x86_features;
+		cpuid_mask(&entry->edx, 1);
 		entry->ecx &= kvm_supported_word6_x86_features;
+		cpuid_mask(&entry->ecx, 6);
 		break;
 	}
 
@@ -3169,20 +3222,18 @@
 		struct kvm_memslots *slots, *old_slots;
 		unsigned long *dirty_bitmap;
 
-		r = -ENOMEM;
-		dirty_bitmap = vmalloc(n);
-		if (!dirty_bitmap)
-			goto out;
+		dirty_bitmap = memslot->dirty_bitmap_head;
+		if (memslot->dirty_bitmap == dirty_bitmap)
+			dirty_bitmap += n / sizeof(long);
 		memset(dirty_bitmap, 0, n);
 
 		r = -ENOMEM;
 		slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
-		if (!slots) {
-			vfree(dirty_bitmap);
+		if (!slots)
 			goto out;
-		}
 		memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 		slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+		slots->generation++;
 
 		old_slots = kvm->memslots;
 		rcu_assign_pointer(kvm->memslots, slots);
@@ -3195,11 +3246,8 @@
 		spin_unlock(&kvm->mmu_lock);
 
 		r = -EFAULT;
-		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
-			vfree(dirty_bitmap);
+		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
 			goto out;
-		}
-		vfree(dirty_bitmap);
 	} else {
 		r = -EFAULT;
 		if (clear_user(log->dirty_bitmap, n))
@@ -3266,8 +3314,10 @@
 		if (vpic) {
 			r = kvm_ioapic_init(kvm);
 			if (r) {
+				mutex_lock(&kvm->slots_lock);
 				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
 							  &vpic->dev);
+				mutex_unlock(&kvm->slots_lock);
 				kfree(vpic);
 				goto create_irqchip_unlock;
 			}
@@ -3278,10 +3328,12 @@
 		smp_wmb();
 		r = kvm_setup_default_irq_routing(kvm);
 		if (r) {
+			mutex_lock(&kvm->slots_lock);
 			mutex_lock(&kvm->irq_lock);
 			kvm_ioapic_destroy(kvm);
 			kvm_destroy_pic(kvm);
 			mutex_unlock(&kvm->irq_lock);
+			mutex_unlock(&kvm->slots_lock);
 		}
 	create_irqchip_unlock:
 		mutex_unlock(&kvm->lock);
@@ -3557,63 +3609,63 @@
 static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
 {
 	gpa_t t_gpa;
-	u32 error;
+	struct x86_exception exception;
 
 	BUG_ON(!mmu_is_nested(vcpu));
 
 	/* NPT walks are always user-walks */
 	access |= PFERR_USER_MASK;
-	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
-	if (t_gpa == UNMAPPED_GVA)
-		vcpu->arch.fault.nested = true;
+	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
 
 	return t_gpa;
 }
 
-gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
+			      struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
- gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+ gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
+				struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	access |= PFERR_FETCH_MASK;
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
-gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
+			       struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	access |= PFERR_WRITE_MASK;
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
 /* uses this to access any guest's mapped memory without checking CPL */
-gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
+				struct x86_exception *exception)
 {
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
 }
 
 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
 				      struct kvm_vcpu *vcpu, u32 access,
-				      u32 *error)
+				      struct x86_exception *exception)
 {
 	void *data = val;
 	int r = X86EMUL_CONTINUE;
 
 	while (bytes) {
 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
-							    error);
+							    exception);
 		unsigned offset = addr & (PAGE_SIZE-1);
 		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
 		int ret;
 
-		if (gpa == UNMAPPED_GVA) {
-			r = X86EMUL_PROPAGATE_FAULT;
-			goto out;
-		}
+		if (gpa == UNMAPPED_GVA)
+			return X86EMUL_PROPAGATE_FAULT;
 		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
@@ -3630,31 +3682,35 @@
 
 /* used for instruction fetching */
 static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
-				struct kvm_vcpu *vcpu, u32 *error)
+				struct kvm_vcpu *vcpu,
+				struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
-					  access | PFERR_FETCH_MASK, error);
+					  access | PFERR_FETCH_MASK,
+					  exception);
 }
 
 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
-			       struct kvm_vcpu *vcpu, u32 *error)
+			       struct kvm_vcpu *vcpu,
+			       struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
-					  error);
+					  exception);
 }
 
 static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
-			       struct kvm_vcpu *vcpu, u32 *error)
+				      struct kvm_vcpu *vcpu,
+				      struct x86_exception *exception)
 {
-	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
+	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
 }
 
 static int kvm_write_guest_virt_system(gva_t addr, void *val,
 				       unsigned int bytes,
 				       struct kvm_vcpu *vcpu,
-				       u32 *error)
+				       struct x86_exception *exception)
 {
 	void *data = val;
 	int r = X86EMUL_CONTINUE;
@@ -3662,15 +3718,13 @@
 	while (bytes) {
 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
 							     PFERR_WRITE_MASK,
-							     error);
+							     exception);
 		unsigned offset = addr & (PAGE_SIZE-1);
 		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
 		int ret;
 
-		if (gpa == UNMAPPED_GVA) {
-			r = X86EMUL_PROPAGATE_FAULT;
-			goto out;
-		}
+		if (gpa == UNMAPPED_GVA)
+			return X86EMUL_PROPAGATE_FAULT;
 		ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
@@ -3688,7 +3742,7 @@
 static int emulator_read_emulated(unsigned long addr,
 				  void *val,
 				  unsigned int bytes,
-				  unsigned int *error_code,
+				  struct x86_exception *exception,
 				  struct kvm_vcpu *vcpu)
 {
 	gpa_t                 gpa;
@@ -3701,7 +3755,7 @@
 		return X86EMUL_CONTINUE;
 	}
 
-	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
+	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
 
 	if (gpa == UNMAPPED_GVA)
 		return X86EMUL_PROPAGATE_FAULT;
@@ -3710,8 +3764,8 @@
 	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
 		goto mmio;
 
-	if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
-				== X86EMUL_CONTINUE)
+	if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception)
+	    == X86EMUL_CONTINUE)
 		return X86EMUL_CONTINUE;
 
 mmio:
@@ -3735,7 +3789,7 @@
 }
 
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
-			  const void *val, int bytes)
+			const void *val, int bytes)
 {
 	int ret;
 
@@ -3749,12 +3803,12 @@
 static int emulator_write_emulated_onepage(unsigned long addr,
 					   const void *val,
 					   unsigned int bytes,
-					   unsigned int *error_code,
+					   struct x86_exception *exception,
 					   struct kvm_vcpu *vcpu)
 {
 	gpa_t                 gpa;
 
-	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
+	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
 
 	if (gpa == UNMAPPED_GVA)
 		return X86EMUL_PROPAGATE_FAULT;
@@ -3787,7 +3841,7 @@
 int emulator_write_emulated(unsigned long addr,
 			    const void *val,
 			    unsigned int bytes,
-			    unsigned int *error_code,
+			    struct x86_exception *exception,
 			    struct kvm_vcpu *vcpu)
 {
 	/* Crossing a page boundary? */
@@ -3795,7 +3849,7 @@
 		int rc, now;
 
 		now = -addr & ~PAGE_MASK;
-		rc = emulator_write_emulated_onepage(addr, val, now, error_code,
+		rc = emulator_write_emulated_onepage(addr, val, now, exception,
 						     vcpu);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
@@ -3803,7 +3857,7 @@
 		val += now;
 		bytes -= now;
 	}
-	return emulator_write_emulated_onepage(addr, val, bytes, error_code,
+	return emulator_write_emulated_onepage(addr, val, bytes, exception,
 					       vcpu);
 }
 
@@ -3821,7 +3875,7 @@
 				     const void *old,
 				     const void *new,
 				     unsigned int bytes,
-				     unsigned int *error_code,
+				     struct x86_exception *exception,
 				     struct kvm_vcpu *vcpu)
 {
 	gpa_t gpa;
@@ -3879,7 +3933,7 @@
 emul_write:
 	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
 
-	return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
+	return emulator_write_emulated(addr, new, bytes, exception, vcpu);
 }
 
 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
@@ -3904,7 +3958,7 @@
 	if (vcpu->arch.pio.count)
 		goto data_avail;
 
-	trace_kvm_pio(0, port, size, 1);
+	trace_kvm_pio(0, port, size, count);
 
 	vcpu->arch.pio.port = port;
 	vcpu->arch.pio.in = 1;
@@ -3932,7 +3986,7 @@
 			      const void *val, unsigned int count,
 			      struct kvm_vcpu *vcpu)
 {
-	trace_kvm_pio(1, port, size, 1);
+	trace_kvm_pio(1, port, size, count);
 
 	vcpu->arch.pio.port = port;
 	vcpu->arch.pio.in = 0;
@@ -3973,13 +4027,15 @@
 		return X86EMUL_CONTINUE;
 
 	if (kvm_x86_ops->has_wbinvd_exit()) {
-		preempt_disable();
+		int cpu = get_cpu();
+
+		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
 				wbinvd_ipi, NULL, 1);
-		preempt_enable();
+		put_cpu();
 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
-	}
-	wbinvd();
+	} else
+		wbinvd();
 	return X86EMUL_CONTINUE;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
@@ -4019,7 +4075,7 @@
 		value = vcpu->arch.cr2;
 		break;
 	case 3:
-		value = vcpu->arch.cr3;
+		value = kvm_read_cr3(vcpu);
 		break;
 	case 4:
 		value = kvm_read_cr4(vcpu);
@@ -4053,7 +4109,7 @@
 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
 		break;
 	case 8:
-		res = __kvm_set_cr8(vcpu, val & 0xfUL);
+		res = kvm_set_cr8(vcpu, val);
 		break;
 	default:
 		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
@@ -4206,12 +4262,13 @@
 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
 {
 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
-	if (ctxt->exception == PF_VECTOR)
-		kvm_propagate_fault(vcpu);
-	else if (ctxt->error_code_valid)
-		kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
+	if (ctxt->exception.vector == PF_VECTOR)
+		kvm_propagate_fault(vcpu, &ctxt->exception);
+	else if (ctxt->exception.error_code_valid)
+		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
+				      ctxt->exception.error_code);
 	else
-		kvm_queue_exception(vcpu, ctxt->exception);
+		kvm_queue_exception(vcpu, ctxt->exception.vector);
 }
 
 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
@@ -4267,13 +4324,19 @@
 
 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 {
+	int r = EMULATE_DONE;
+
 	++vcpu->stat.insn_emulation_fail;
 	trace_kvm_emulate_insn_failed(vcpu);
-	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
-	vcpu->run->internal.ndata = 0;
+	if (!is_guest_mode(vcpu)) {
+		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+		vcpu->run->internal.ndata = 0;
+		r = EMULATE_FAIL;
+	}
 	kvm_queue_exception(vcpu, UD_VECTOR);
-	return EMULATE_FAIL;
+
+	return r;
 }
 
 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
@@ -4302,10 +4365,11 @@
 	return false;
 }
 
-int emulate_instruction(struct kvm_vcpu *vcpu,
-			unsigned long cr2,
-			u16 error_code,
-			int emulation_type)
+int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+			    unsigned long cr2,
+			    int emulation_type,
+			    void *insn,
+			    int insn_len)
 {
 	int r;
 	struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
@@ -4323,10 +4387,10 @@
 	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
 		init_emulate_ctxt(vcpu);
 		vcpu->arch.emulate_ctxt.interruptibility = 0;
-		vcpu->arch.emulate_ctxt.exception = -1;
+		vcpu->arch.emulate_ctxt.have_exception = false;
 		vcpu->arch.emulate_ctxt.perm_ok = false;
 
-		r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
+		r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
 		if (r == X86EMUL_PROPAGATE_FAULT)
 			goto done;
 
@@ -4389,7 +4453,7 @@
 	}
 
 done:
-	if (vcpu->arch.emulate_ctxt.exception >= 0) {
+	if (vcpu->arch.emulate_ctxt.have_exception) {
 		inject_emulated_exception(vcpu);
 		r = EMULATE_DONE;
 	} else if (vcpu->arch.pio.count) {
@@ -4413,7 +4477,7 @@
 
 	return r;
 }
-EXPORT_SYMBOL_GPL(emulate_instruction);
+EXPORT_SYMBOL_GPL(x86_emulate_instruction);
 
 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
 {
@@ -4653,7 +4717,6 @@
 
 	kvm_x86_ops = ops;
 	kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
-	kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
 			PT_DIRTY_MASK, PT64_NX_MASK, 0);
 
@@ -5116,6 +5179,12 @@
 			vcpu->fpu_active = 0;
 			kvm_x86_ops->fpu_deactivate(vcpu);
 		}
+		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
+			/* Page is swapped out. Do synthetic halt */
+			vcpu->arch.apf.halted = true;
+			r = 1;
+			goto out;
+		}
 	}
 
 	r = kvm_mmu_reload(vcpu);
@@ -5244,7 +5313,8 @@
 
 	r = 1;
 	while (r > 0) {
-		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
+		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+		    !vcpu->arch.apf.halted)
 			r = vcpu_enter_guest(vcpu);
 		else {
 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
@@ -5257,6 +5327,7 @@
 					vcpu->arch.mp_state =
 						KVM_MP_STATE_RUNNABLE;
 				case KVM_MP_STATE_RUNNABLE:
+					vcpu->arch.apf.halted = false;
 					break;
 				case KVM_MP_STATE_SIPI_RECEIVED:
 				default:
@@ -5278,6 +5349,9 @@
 			vcpu->run->exit_reason = KVM_EXIT_INTR;
 			++vcpu->stat.request_irq_exits;
 		}
+
+		kvm_check_async_pf_completion(vcpu);
+
 		if (signal_pending(current)) {
 			r = -EINTR;
 			vcpu->run->exit_reason = KVM_EXIT_INTR;
@@ -5302,6 +5376,9 @@
 	int r;
 	sigset_t sigsaved;
 
+	if (!tsk_used_math(current) && init_fpu(current))
+		return -ENOMEM;
+
 	if (vcpu->sigset_active)
 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
@@ -5313,8 +5390,12 @@
 	}
 
 	/* re-sync apic's tpr */
-	if (!irqchip_in_kernel(vcpu->kvm))
-		kvm_set_cr8(vcpu, kvm_run->cr8);
+	if (!irqchip_in_kernel(vcpu->kvm)) {
+		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
+			r = -EINVAL;
+			goto out;
+		}
+	}
 
 	if (vcpu->arch.pio.count || vcpu->mmio_needed) {
 		if (vcpu->mmio_needed) {
@@ -5323,7 +5404,7 @@
 			vcpu->mmio_needed = 0;
 		}
 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-		r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
+		r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 		if (r != EMULATE_DONE) {
 			r = 0;
@@ -5436,7 +5517,7 @@
 
 	sregs->cr0 = kvm_read_cr0(vcpu);
 	sregs->cr2 = vcpu->arch.cr2;
-	sregs->cr3 = vcpu->arch.cr3;
+	sregs->cr3 = kvm_read_cr3(vcpu);
 	sregs->cr4 = kvm_read_cr4(vcpu);
 	sregs->cr8 = kvm_get_cr8(vcpu);
 	sregs->efer = vcpu->arch.efer;
@@ -5504,8 +5585,9 @@
 	kvm_x86_ops->set_gdt(vcpu, &dt);
 
 	vcpu->arch.cr2 = sregs->cr2;
-	mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
+	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
 	vcpu->arch.cr3 = sregs->cr3;
+	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
 
 	kvm_set_cr8(vcpu, sregs->cr8);
 
@@ -5522,7 +5604,7 @@
 	if (sregs->cr4 & X86_CR4_OSXSAVE)
 		update_cpuid(vcpu);
 	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
-		load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
 		mmu_reset_needed = 1;
 	}
 
@@ -5773,6 +5855,8 @@
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
+	vcpu->arch.apf.msr_val = 0;
+
 	vcpu_load(vcpu);
 	kvm_mmu_unload(vcpu);
 	vcpu_put(vcpu);
@@ -5792,6 +5876,11 @@
 	vcpu->arch.dr7 = DR7_FIXED_1;
 
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	vcpu->arch.apf.msr_val = 0;
+
+	kvm_clear_async_pf_completion_queue(vcpu);
+	kvm_async_pf_hash_reset(vcpu);
+	vcpu->arch.apf.halted = false;
 
 	return kvm_x86_ops->vcpu_reset(vcpu);
 }
@@ -5881,6 +5970,8 @@
 	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
 		goto fail_free_mce_banks;
 
+	kvm_async_pf_hash_reset(vcpu);
+
 	return 0;
 fail_free_mce_banks:
 	kfree(vcpu->arch.mce_banks);
@@ -5906,13 +5997,8 @@
 	free_page((unsigned long)vcpu->arch.pio_data);
 }
 
-struct  kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-
-	if (!kvm)
-		return ERR_PTR(-ENOMEM);
-
 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
 
@@ -5921,7 +6007,7 @@
 
 	spin_lock_init(&kvm->arch.tsc_write_lock);
 
-	return kvm;
+	return 0;
 }
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@ -5939,8 +6025,10 @@
 	/*
 	 * Unpin any mmu pages first.
 	 */
-	kvm_for_each_vcpu(i, vcpu, kvm)
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		kvm_clear_async_pf_completion_queue(vcpu);
 		kvm_unload_vcpu_mmu(vcpu);
+	}
 	kvm_for_each_vcpu(i, vcpu, kvm)
 		kvm_arch_vcpu_free(vcpu);
 
@@ -5964,13 +6052,10 @@
 	kfree(kvm->arch.vpic);
 	kfree(kvm->arch.vioapic);
 	kvm_free_vcpus(kvm);
-	kvm_free_physmem(kvm);
 	if (kvm->arch.apic_access_page)
 		put_page(kvm->arch.apic_access_page);
 	if (kvm->arch.ept_identity_pagetable)
 		put_page(kvm->arch.ept_identity_pagetable);
-	cleanup_srcu_struct(&kvm->srcu);
-	kfree(kvm);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -6051,7 +6136,9 @@
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
+	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+		!vcpu->arch.apf.halted)
+		|| !list_empty_careful(&vcpu->async_pf.done)
 		|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
 		|| vcpu->arch.nmi_pending ||
 		(kvm_arch_interrupt_allowed(vcpu) &&
@@ -6110,6 +6197,147 @@
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);
 
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
+{
+	int r;
+
+	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
+	      is_error_page(work->page))
+		return;
+
+	r = kvm_mmu_reload(vcpu);
+	if (unlikely(r))
+		return;
+
+	if (!vcpu->arch.mmu.direct_map &&
+	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
+		return;
+
+	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+}
+
+static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
+{
+	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
+}
+
+static inline u32 kvm_async_pf_next_probe(u32 key)
+{
+	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
+}
+
+static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	u32 key = kvm_async_pf_hash_fn(gfn);
+
+	while (vcpu->arch.apf.gfns[key] != ~0)
+		key = kvm_async_pf_next_probe(key);
+
+	vcpu->arch.apf.gfns[key] = gfn;
+}
+
+static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	int i;
+	u32 key = kvm_async_pf_hash_fn(gfn);
+
+	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
+		     (vcpu->arch.apf.gfns[key] != gfn &&
+		      vcpu->arch.apf.gfns[key] != ~0); i++)
+		key = kvm_async_pf_next_probe(key);
+
+	return key;
+}
+
+bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
+}
+
+static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	u32 i, j, k;
+
+	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
+	while (true) {
+		vcpu->arch.apf.gfns[i] = ~0;
+		do {
+			j = kvm_async_pf_next_probe(j);
+			if (vcpu->arch.apf.gfns[j] == ~0)
+				return;
+			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
+			/*
+			 * k lies cyclically in ]i,j]
+			 * |    i.k.j |
+			 * |....j i.k.| or  |.k..j i...|
+			 */
+		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
+		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
+		i = j;
+	}
+}
+
+static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+{
+
+	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+				      sizeof(val));
+}
+
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+				     struct kvm_async_pf *work)
+{
+	struct x86_exception fault;
+
+	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
+	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
+
+	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
+	    (vcpu->arch.apf.send_user_only &&
+	     kvm_x86_ops->get_cpl(vcpu) == 0))
+		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+		fault.vector = PF_VECTOR;
+		fault.error_code_valid = true;
+		fault.error_code = 0;
+		fault.nested_page_fault = false;
+		fault.address = work->arch.token;
+		kvm_inject_page_fault(vcpu, &fault);
+	}
+}
+
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+				 struct kvm_async_pf *work)
+{
+	struct x86_exception fault;
+
+	trace_kvm_async_pf_ready(work->arch.token, work->gva);
+	if (is_error_page(work->page))
+		work->arch.token = ~0; /* broadcast wakeup */
+	else
+		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+
+	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
+	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+		fault.vector = PF_VECTOR;
+		fault.error_code_valid = true;
+		fault.error_code = 0;
+		fault.nested_page_fault = false;
+		fault.address = work->arch.token;
+		kvm_inject_page_fault(vcpu, &fault);
+	}
+	vcpu->arch.apf.halted = false;
+}
+
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
+{
+	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
+		return true;
+	else
+		return !kvm_event_needs_reinjection(vcpu) &&
+			kvm_x86_ops->interrupt_allowed(vcpu);
+}
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 3871804..6e121a2 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,6 +2,7 @@
 	bool "Lguest guest support"
 	select PARAVIRT
 	depends on X86_32
+	select VIRTUALIZATION
 	select VIRTIO
 	select VIRTIO_RING
 	select VIRTIO_CONSOLE
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 4996cf5..eba687f0 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -824,7 +824,7 @@
 
 	for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
 		/* Some systems map "vectors" to interrupts weirdly.  Not us! */
-		__get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR;
+		__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
 		if (i != SYSCALL_VECTOR)
 			set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
 	}
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index 08a0069..f21962c 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -27,6 +27,7 @@
 #include <asm/amd_nb.h>
 
 static struct bootnode __initdata nodes[8];
+static unsigned char __initdata nodeids[8];
 static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
 
 static __init int find_northbridge(void)
@@ -68,19 +69,6 @@
 #endif
 }
 
-int __init amd_get_nodes(struct bootnode *physnodes)
-{
-	int i;
-	int ret = 0;
-
-	for_each_node_mask(i, nodes_parsed) {
-		physnodes[ret].start = nodes[i].start;
-		physnodes[ret].end = nodes[i].end;
-		ret++;
-	}
-	return ret;
-}
-
 int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
 {
 	unsigned long start = PFN_PHYS(start_pfn);
@@ -113,7 +101,7 @@
 		base = read_pci_config(0, nb, 1, 0x40 + i*8);
 		limit = read_pci_config(0, nb, 1, 0x44 + i*8);
 
-		nodeid = limit & 7;
+		nodeids[i] = nodeid = limit & 7;
 		if ((base & 3) == 0) {
 			if (i < numnodes)
 				pr_info("Skipping disabled node %d\n", i);
@@ -193,6 +181,76 @@
 	return 0;
 }
 
+#ifdef CONFIG_NUMA_EMU
+static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
+	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+};
+
+void __init amd_get_nodes(struct bootnode *physnodes)
+{
+	int i;
+
+	for_each_node_mask(i, nodes_parsed) {
+		physnodes[i].start = nodes[i].start;
+		physnodes[i].end = nodes[i].end;
+	}
+}
+
+static int __init find_node_by_addr(unsigned long addr)
+{
+	int ret = NUMA_NO_NODE;
+	int i;
+
+	for (i = 0; i < 8; i++)
+		if (addr >= nodes[i].start && addr < nodes[i].end) {
+			ret = i;
+			break;
+		}
+	return ret;
+}
+
+/*
+ * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
+ * setup to represent the physical topology but reflect the emulated
+ * environment.  For each emulated node, the real node which it appears on is
+ * found and a fake pxm to nid mapping is created which mirrors the actual
+ * locality.  node_distance() then represents the correct distances between
+ * emulated nodes by using the fake acpi mappings to pxms.
+ */
+void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
+{
+	unsigned int bits;
+	unsigned int cores;
+	unsigned int apicid_base = 0;
+	int i;
+
+	bits = boot_cpu_data.x86_coreid_bits;
+	cores = 1 << bits;
+	early_get_boot_cpu_id();
+	if (boot_cpu_physical_apicid > 0)
+		apicid_base = boot_cpu_physical_apicid;
+
+	for (i = 0; i < nr_nodes; i++) {
+		int index;
+		int nid;
+		int j;
+
+		nid = find_node_by_addr(nodes[i].start);
+		if (nid == NUMA_NO_NODE)
+			continue;
+
+		index = nodeids[nid] << bits;
+		if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE)
+			for (j = apicid_base; j < cores + apicid_base; j++)
+				fake_apicid_to_node[index + j] = i;
+#ifdef CONFIG_ACPI_NUMA
+		__acpi_map_pxm_to_node(nid, i);
+#endif
+	}
+	memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
+}
+#endif /* CONFIG_NUMA_EMU */
+
 int __init amd_scan_nodes(void)
 {
 	unsigned int bits;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 7d90ceb..20e3f87 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -229,15 +229,14 @@
 	for (address = VMALLOC_START & PMD_MASK;
 	     address >= TASK_SIZE && address < FIXADDR_TOP;
 	     address += PMD_SIZE) {
-
-		unsigned long flags;
 		struct page *page;
 
-		spin_lock_irqsave(&pgd_lock, flags);
+		spin_lock(&pgd_lock);
 		list_for_each_entry(page, &pgd_list, lru) {
 			spinlock_t *pgt_lock;
 			pmd_t *ret;
 
+			/* the pgt_lock only for Xen */
 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
 			spin_lock(pgt_lock);
@@ -247,7 +246,7 @@
 			if (!ret)
 				break;
 		}
-		spin_unlock_irqrestore(&pgd_lock, flags);
+		spin_unlock(&pgd_lock);
 	}
 }
 
@@ -828,6 +827,13 @@
 	       unsigned long address, unsigned int fault)
 {
 	if (fault & VM_FAULT_OOM) {
+		/* Kernel mode? Handle exceptions or die: */
+		if (!(error_code & PF_USER)) {
+			up_read(&current->mm->mmap_sem);
+			no_context(regs, error_code, address);
+			return;
+		}
+
 		out_of_memory(regs, error_code, address);
 	} else {
 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 738e659..dbe34b9 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -8,6 +8,7 @@
 #include <linux/mm.h>
 #include <linux/vmstat.h>
 #include <linux/highmem.h>
+#include <linux/swap.h>
 
 #include <asm/pgtable.h>
 
@@ -89,6 +90,7 @@
 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 		page = pte_page(pte);
 		get_page(page);
+		SetPageReferenced(page);
 		pages[*nr] = page;
 		(*nr)++;
 
@@ -103,6 +105,17 @@
 	VM_BUG_ON(page != compound_head(page));
 	VM_BUG_ON(page_count(page) == 0);
 	atomic_add(nr, &page->_count);
+	SetPageReferenced(page);
+}
+
+static inline void get_huge_page_tail(struct page *page)
+{
+	/*
+	 * __split_huge_page_refcount() cannot run
+	 * from under us.
+	 */
+	VM_BUG_ON(atomic_read(&page->_count) < 0);
+	atomic_inc(&page->_count);
 }
 
 static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
@@ -128,6 +141,8 @@
 	do {
 		VM_BUG_ON(compound_head(page) != head);
 		pages[*nr] = page;
+		if (PageTail(page))
+			get_huge_page_tail(page);
 		(*nr)++;
 		page++;
 		refs++;
@@ -148,7 +163,18 @@
 		pmd_t pmd = *pmdp;
 
 		next = pmd_addr_end(addr, end);
-		if (pmd_none(pmd))
+		/*
+		 * The pmd_trans_splitting() check below explains why
+		 * pmdp_splitting_flush has to flush the tlb, to stop
+		 * this gup-fast code from running while we set the
+		 * splitting bit in the pmd. Returning zero will take
+		 * the slow path that will call wait_split_huge_page()
+		 * if the pmd is still in splitting state. gup-fast
+		 * can't because it has irq disabled and
+		 * wait_split_huge_page() would never return as the
+		 * tlb flush IPI wouldn't run.
+		 */
+		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
 			return 0;
 		if (unlikely(pmd_large(pmd))) {
 			if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f89b5bb..c821074 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -45,6 +45,7 @@
 #include <asm/bugs.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
+#include <asm/olpc_ofw.h>
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
 #include <asm/paravirt.h>
@@ -715,6 +716,7 @@
 	/*
 	 * NOTE: at this point the bootmem allocator is fully available.
 	 */
+	olpc_dt_build_devicetree();
 	sparse_init();
 	zone_sizes_init();
 }
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 71a5929..c14a542 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -105,18 +105,18 @@
 
 	for (address = start; address <= end; address += PGDIR_SIZE) {
 		const pgd_t *pgd_ref = pgd_offset_k(address);
-		unsigned long flags;
 		struct page *page;
 
 		if (pgd_none(*pgd_ref))
 			continue;
 
-		spin_lock_irqsave(&pgd_lock, flags);
+		spin_lock(&pgd_lock);
 		list_for_each_entry(page, &pgd_list, lru) {
 			pgd_t *pgd;
 			spinlock_t *pgt_lock;
 
 			pgd = (pgd_t *)page_address(page) + pgd_index(address);
+			/* the pgt_lock only for Xen */
 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 			spin_lock(pgt_lock);
 
@@ -128,7 +128,7 @@
 
 			spin_unlock(pgt_lock);
 		}
-		spin_unlock_irqrestore(&pgd_lock, flags);
+		spin_unlock(&pgd_lock);
 	}
 }
 
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 787c52c..ebf6d78 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -2,6 +2,28 @@
 #include <linux/topology.h>
 #include <linux/module.h>
 #include <linux/bootmem.h>
+#include <asm/numa.h>
+#include <asm/acpi.h>
+
+int __initdata numa_off;
+
+static __init int numa_setup(char *opt)
+{
+	if (!opt)
+		return -EINVAL;
+	if (!strncmp(opt, "off", 3))
+		numa_off = 1;
+#ifdef CONFIG_NUMA_EMU
+	if (!strncmp(opt, "fake=", 5))
+		numa_emu_cmdline(opt + 5);
+#endif
+#ifdef CONFIG_ACPI_NUMA
+	if (!strncmp(opt, "noacpi", 6))
+		acpi_numa = -1;
+#endif
+	return 0;
+}
+early_param("numa", numa_setup);
 
 /*
  * Which logical CPUs are on which nodes
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 7762a51..1337c51 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -30,7 +30,6 @@
 	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
 };
 
-int numa_off __initdata;
 static unsigned long __initdata nodemap_addr;
 static unsigned long __initdata nodemap_size;
 
@@ -260,30 +259,35 @@
 #ifdef CONFIG_NUMA_EMU
 /* Numa emulation */
 static struct bootnode nodes[MAX_NUMNODES] __initdata;
-static struct bootnode physnodes[MAX_NUMNODES] __initdata;
+static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
 static char *cmdline __initdata;
 
+void __init numa_emu_cmdline(char *str)
+{
+	cmdline = str;
+}
+
 static int __init setup_physnodes(unsigned long start, unsigned long end,
 					int acpi, int amd)
 {
-	int nr_nodes = 0;
 	int ret = 0;
 	int i;
 
+	memset(physnodes, 0, sizeof(physnodes));
 #ifdef CONFIG_ACPI_NUMA
 	if (acpi)
-		nr_nodes = acpi_get_nodes(physnodes);
+		acpi_get_nodes(physnodes, start, end);
 #endif
 #ifdef CONFIG_AMD_NUMA
 	if (amd)
-		nr_nodes = amd_get_nodes(physnodes);
+		amd_get_nodes(physnodes);
 #endif
 	/*
 	 * Basic sanity checking on the physical node map: there may be errors
 	 * if the SRAT or AMD code incorrectly reported the topology or the mem=
 	 * kernel parameter is used.
 	 */
-	for (i = 0; i < nr_nodes; i++) {
+	for (i = 0; i < MAX_NUMNODES; i++) {
 		if (physnodes[i].start == physnodes[i].end)
 			continue;
 		if (physnodes[i].start > end) {
@@ -298,17 +302,6 @@
 			physnodes[i].start = start;
 		if (physnodes[i].end > end)
 			physnodes[i].end = end;
-	}
-
-	/*
-	 * Remove all nodes that have no memory or were truncated because of the
-	 * limited address range.
-	 */
-	for (i = 0; i < nr_nodes; i++) {
-		if (physnodes[i].start == physnodes[i].end)
-			continue;
-		physnodes[ret].start = physnodes[i].start;
-		physnodes[ret].end = physnodes[i].end;
 		ret++;
 	}
 
@@ -324,6 +317,24 @@
 	return ret;
 }
 
+static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
+{
+	int i;
+
+	BUG_ON(acpi && amd);
+#ifdef CONFIG_ACPI_NUMA
+	if (acpi)
+		acpi_fake_nodes(nodes, nr_nodes);
+#endif
+#ifdef CONFIG_AMD_NUMA
+	if (amd)
+		amd_fake_nodes(nodes, nr_nodes);
+#endif
+	if (!acpi && !amd)
+		for (i = 0; i < nr_cpu_ids; i++)
+			numa_set_node(i, 0);
+}
+
 /*
  * Setups up nid to range from addr to addr + size.  If the end
  * boundary is greater than max_addr, then max_addr is used instead.
@@ -352,8 +363,7 @@
  * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
  * to max_addr.  The return value is the number of nodes allocated.
  */
-static int __init split_nodes_interleave(u64 addr, u64 max_addr,
-						int nr_phys_nodes, int nr_nodes)
+static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
 {
 	nodemask_t physnode_mask = NODE_MASK_NONE;
 	u64 size;
@@ -384,7 +394,7 @@
 		return -1;
 	}
 
-	for (i = 0; i < nr_phys_nodes; i++)
+	for (i = 0; i < MAX_NUMNODES; i++)
 		if (physnodes[i].start != physnodes[i].end)
 			node_set(i, physnode_mask);
 
@@ -553,11 +563,9 @@
 {
 	u64 addr = start_pfn << PAGE_SHIFT;
 	u64 max_addr = last_pfn << PAGE_SHIFT;
-	int num_phys_nodes;
 	int num_nodes;
 	int i;
 
-	num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd);
 	/*
 	 * If the numa=fake command-line contains a 'M' or 'G', it represents
 	 * the fixed node size.  Otherwise, if it is just a single number N,
@@ -572,7 +580,7 @@
 		unsigned long n;
 
 		n = simple_strtoul(cmdline, NULL, 0);
-		num_nodes = split_nodes_interleave(addr, max_addr, num_phys_nodes, n);
+		num_nodes = split_nodes_interleave(addr, max_addr, n);
 	}
 
 	if (num_nodes < 0)
@@ -595,7 +603,8 @@
 						nodes[i].end >> PAGE_SHIFT);
 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 	}
-	acpi_fake_nodes(nodes, num_nodes);
+	setup_physnodes(addr, max_addr, acpi, amd);
+	fake_physnodes(acpi, amd, num_nodes);
 	numa_init_array();
 	return 0;
 }
@@ -610,8 +619,12 @@
 	nodes_clear(node_online_map);
 
 #ifdef CONFIG_NUMA_EMU
+	setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
+			acpi, amd);
 	if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
 		return;
+	setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
+			acpi, amd);
 	nodes_clear(node_possible_map);
 	nodes_clear(node_online_map);
 #endif
@@ -661,24 +674,6 @@
 	return pages;
 }
 
-static __init int numa_setup(char *opt)
-{
-	if (!opt)
-		return -EINVAL;
-	if (!strncmp(opt, "off", 3))
-		numa_off = 1;
-#ifdef CONFIG_NUMA_EMU
-	if (!strncmp(opt, "fake=", 5))
-		cmdline = opt + 5;
-#endif
-#ifdef CONFIG_ACPI_NUMA
-	if (!strncmp(opt, "noacpi", 6))
-		acpi_numa = -1;
-#endif
-	return 0;
-}
-early_param("numa", numa_setup);
-
 #ifdef CONFIG_NUMA
 
 static __init int find_near_online_node(int node)
@@ -767,6 +762,7 @@
 
 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
 
+#ifndef CONFIG_NUMA_EMU
 void __cpuinit numa_add_cpu(int cpu)
 {
 	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
@@ -776,34 +772,111 @@
 {
 	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 }
+#else
+void __cpuinit numa_add_cpu(int cpu)
+{
+	unsigned long addr;
+	u16 apicid;
+	int physnid;
+	int nid = NUMA_NO_NODE;
+
+	nid = early_cpu_to_node(cpu);
+	BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
+
+	/*
+	 * Use the starting address of the emulated node to find which physical
+	 * node it is allocated on.
+	 */
+	addr = node_start_pfn(nid) << PAGE_SHIFT;
+	for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
+		if (addr >= physnodes[physnid].start &&
+		    addr < physnodes[physnid].end)
+			break;
+
+	/*
+	 * Map the cpu to each emulated node that is allocated on the physical
+	 * node of the cpu's apic id.
+	 */
+	for_each_online_node(nid) {
+		addr = node_start_pfn(nid) << PAGE_SHIFT;
+		if (addr >= physnodes[physnid].start &&
+		    addr < physnodes[physnid].end)
+			cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+	}
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+	int i;
+
+	for_each_online_node(i)
+		cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
+}
+#endif /* !CONFIG_NUMA_EMU */
 
 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
-
-/*
- * --------- debug versions of the numa functions ---------
- */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
 {
 	int node = early_cpu_to_node(cpu);
 	struct cpumask *mask;
 	char buf[64];
 
 	mask = node_to_cpumask_map[node];
-	if (mask == NULL) {
-		printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
+	if (!mask) {
+		pr_err("node_to_cpumask_map[%i] NULL\n", node);
 		dump_stack();
-		return;
+		return NULL;
 	}
 
+	cpulist_scnprintf(buf, sizeof(buf), mask);
+	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+		enable ? "numa_add_cpu" : "numa_remove_cpu",
+		cpu, node, buf);
+	return mask;
+}
+
+/*
+ * --------- debug versions of the numa functions ---------
+ */
+#ifndef CONFIG_NUMA_EMU
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+	struct cpumask *mask;
+
+	mask = debug_cpumask_set_cpu(cpu, enable);
+	if (!mask)
+		return;
+
 	if (enable)
 		cpumask_set_cpu(cpu, mask);
 	else
 		cpumask_clear_cpu(cpu, mask);
-
-	cpulist_scnprintf(buf, sizeof(buf), mask);
-	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-		enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
 }
+#else
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+	int node = early_cpu_to_node(cpu);
+	struct cpumask *mask;
+	int i;
+
+	for_each_online_node(i) {
+		unsigned long addr;
+
+		addr = node_start_pfn(i) << PAGE_SHIFT;
+		if (addr < physnodes[node].start ||
+					addr >= physnodes[node].end)
+			continue;
+		mask = debug_cpumask_set_cpu(cpu, enable);
+		if (!mask)
+			return;
+
+		if (enable)
+			cpumask_set_cpu(cpu, mask);
+		else
+			cpumask_clear_cpu(cpu, mask);
+	}
+}
+#endif /* CONFIG_NUMA_EMU */
 
 void __cpuinit numa_add_cpu(int cpu)
 {
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8b830ca..90825f2 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -57,12 +57,10 @@
 
 void update_page_count(int level, unsigned long pages)
 {
-	unsigned long flags;
-
 	/* Protect against CPA */
-	spin_lock_irqsave(&pgd_lock, flags);
+	spin_lock(&pgd_lock);
 	direct_pages_count[level] += pages;
-	spin_unlock_irqrestore(&pgd_lock, flags);
+	spin_unlock(&pgd_lock);
 }
 
 static void split_page_count(int level)
@@ -256,7 +254,6 @@
 				   unsigned long pfn)
 {
 	pgprot_t forbidden = __pgprot(0);
-	pgprot_t required = __pgprot(0);
 
 	/*
 	 * The BIOS area between 640k and 1Mb needs to be executable for
@@ -282,12 +279,6 @@
 	if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
 		   __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
 		pgprot_val(forbidden) |= _PAGE_RW;
-	/*
-	 * .data and .bss should always be writable.
-	 */
-	if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
-	    within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
-		pgprot_val(required) |= _PAGE_RW;
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
 	/*
@@ -327,7 +318,6 @@
 #endif
 
 	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
-	prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
 
 	return prot;
 }
@@ -402,7 +392,7 @@
 try_preserve_large_page(pte_t *kpte, unsigned long address,
 			struct cpa_data *cpa)
 {
-	unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
+	unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
 	pte_t new_pte, old_pte, *tmp;
 	pgprot_t old_prot, new_prot, req_prot;
 	int i, do_split = 1;
@@ -411,7 +401,7 @@
 	if (cpa->force_split)
 		return 1;
 
-	spin_lock_irqsave(&pgd_lock, flags);
+	spin_lock(&pgd_lock);
 	/*
 	 * Check for races, another CPU might have split this page
 	 * up already:
@@ -506,14 +496,14 @@
 	}
 
 out_unlock:
-	spin_unlock_irqrestore(&pgd_lock, flags);
+	spin_unlock(&pgd_lock);
 
 	return do_split;
 }
 
 static int split_large_page(pte_t *kpte, unsigned long address)
 {
-	unsigned long flags, pfn, pfninc = 1;
+	unsigned long pfn, pfninc = 1;
 	unsigned int i, level;
 	pte_t *pbase, *tmp;
 	pgprot_t ref_prot;
@@ -527,7 +517,7 @@
 	if (!base)
 		return -ENOMEM;
 
-	spin_lock_irqsave(&pgd_lock, flags);
+	spin_lock(&pgd_lock);
 	/*
 	 * Check for races, another CPU might have split this page
 	 * up for us already:
@@ -599,7 +589,7 @@
 	 */
 	if (base)
 		__free_page(base);
-	spin_unlock_irqrestore(&pgd_lock, flags);
+	spin_unlock(&pgd_lock);
 
 	return 0;
 }
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 8be8c7d..0113d19 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -121,14 +121,12 @@
 
 static void pgd_dtor(pgd_t *pgd)
 {
-	unsigned long flags; /* can be called from interrupt context */
-
 	if (SHARED_KERNEL_PMD)
 		return;
 
-	spin_lock_irqsave(&pgd_lock, flags);
+	spin_lock(&pgd_lock);
 	pgd_list_del(pgd);
-	spin_unlock_irqrestore(&pgd_lock, flags);
+	spin_unlock(&pgd_lock);
 }
 
 /*
@@ -260,7 +258,6 @@
 {
 	pgd_t *pgd;
 	pmd_t *pmds[PREALLOCATED_PMDS];
-	unsigned long flags;
 
 	pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
 
@@ -280,12 +277,12 @@
 	 * respect to anything walking the pgd_list, so that they
 	 * never see a partially populated pgd.
 	 */
-	spin_lock_irqsave(&pgd_lock, flags);
+	spin_lock(&pgd_lock);
 
 	pgd_ctor(mm, pgd);
 	pgd_prepopulate_pmd(mm, pgd, pmds);
 
-	spin_unlock_irqrestore(&pgd_lock, flags);
+	spin_unlock(&pgd_lock);
 
 	return pgd;
 
@@ -320,6 +317,25 @@
 	return changed;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_set_access_flags(struct vm_area_struct *vma,
+			  unsigned long address, pmd_t *pmdp,
+			  pmd_t entry, int dirty)
+{
+	int changed = !pmd_same(*pmdp, entry);
+
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+	if (changed && dirty) {
+		*pmdp = entry;
+		pmd_update_defer(vma->vm_mm, address, pmdp);
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	}
+
+	return changed;
+}
+#endif
+
 int ptep_test_and_clear_young(struct vm_area_struct *vma,
 			      unsigned long addr, pte_t *ptep)
 {
@@ -335,6 +351,23 @@
 	return ret;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+			      unsigned long addr, pmd_t *pmdp)
+{
+	int ret = 0;
+
+	if (pmd_young(*pmdp))
+		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
+					 (unsigned long *)pmdp);
+
+	if (ret)
+		pmd_update(vma->vm_mm, addr, pmdp);
+
+	return ret;
+}
+#endif
+
 int ptep_clear_flush_young(struct vm_area_struct *vma,
 			   unsigned long address, pte_t *ptep)
 {
@@ -347,6 +380,36 @@
 	return young;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pmd_t *pmdp)
+{
+	int young;
+
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+	young = pmdp_test_and_clear_young(vma, address, pmdp);
+	if (young)
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+	return young;
+}
+
+void pmdp_splitting_flush(struct vm_area_struct *vma,
+			  unsigned long address, pmd_t *pmdp)
+{
+	int set;
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
+				(unsigned long *)pmdp);
+	if (set) {
+		pmd_update(vma->vm_mm, address, pmdp);
+		/* need tlb flush only to serialize against gup-fast */
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	}
+}
+#endif
+
 /**
  * reserve_top_address - reserves a hole in the top of kernel address space
  * @reserve - size of hole to reserve
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index f164345..ae96e7b8 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -59,7 +59,6 @@
 static int __initdata num_memory_chunks; /* total number of memory chunks */
 static u8 __initdata apicid_to_pxm[MAX_APICID];
 
-int numa_off __initdata;
 int acpi_numa __initdata;
 
 static __init void bad_srat(void)
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 171a0aa..603d285 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -349,18 +349,19 @@
 
 void __init acpi_numa_arch_fixup(void) {}
 
-int __init acpi_get_nodes(struct bootnode *physnodes)
+#ifdef CONFIG_NUMA_EMU
+void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
+				unsigned long end)
 {
 	int i;
-	int ret = 0;
 
 	for_each_node_mask(i, nodes_parsed) {
-		physnodes[ret].start = nodes[i].start;
-		physnodes[ret].end = nodes[i].end;
-		ret++;
+		cutoff_node(i, start, end);
+		physnodes[i].start = nodes[i].start;
+		physnodes[i].end = nodes[i].end;
 	}
-	return ret;
 }
+#endif /* CONFIG_NUMA_EMU */
 
 /* Use the information discovered above to actually set up the nodes. */
 int __init acpi_scan_nodes(unsigned long start, unsigned long end)
@@ -505,8 +506,6 @@
 {
 	int i, j;
 
-	printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
-			 "topology.\n");
 	for (i = 0; i < num_nodes; i++) {
 		int nid, pxm;
 
@@ -526,6 +525,17 @@
 			    fake_apicid_to_node[j] == NUMA_NO_NODE)
 				fake_apicid_to_node[j] = i;
 	}
+
+	/*
+	 * If there are apicid-to-node mappings for physical nodes that do not
+	 * have a corresponding emulated node, it should default to a guaranteed
+	 * value.
+	 */
+	for (i = 0; i < MAX_LOCAL_APIC; i++)
+		if (apicid_to_node[i] != NUMA_NO_NODE &&
+		    fake_apicid_to_node[i] == NUMA_NO_NODE)
+			fake_apicid_to_node[i] = 0;
+
 	for (i = 0; i < num_nodes; i++)
 		__acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
 	memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f24a853..e2b7b0c 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -65,7 +65,6 @@
 
 	switch (val) {
 	case DIE_NMI:
-	case DIE_NMI_IPI:
 		if (ctr_running)
 			model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
 		else if (!nmi_enabled)
@@ -361,7 +360,7 @@
 static struct notifier_block profile_exceptions_nb = {
 	.notifier_call = profile_exceptions_notify,
 	.next = NULL,
-	.priority = 2
+	.priority = NMI_LOCAL_LOW_PRIOR,
 };
 
 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index 0636dd9..720bf5a 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -38,7 +38,7 @@
 static struct notifier_block profile_timer_exceptions_nb = {
 	.notifier_call = profile_timer_exceptions_notify,
 	.next = NULL,
-	.priority = 0
+	.priority = NMI_LOW_PRIOR,
 };
 
 static int timer_start(void)
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index fc1e8fe..e27dffb 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -4,6 +4,7 @@
 #include <linux/cpu.h>
 #include <linux/range.h>
 
+#include <asm/amd_nb.h>
 #include <asm/pci_x86.h>
 
 #include <asm/pci-direct.h>
@@ -378,6 +379,34 @@
 	.notifier_call	= amd_cpu_notify,
 };
 
+static void __init pci_enable_pci_io_ecs(void)
+{
+#ifdef CONFIG_AMD_NB
+	unsigned int i, n;
+
+	for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) {
+		u8 bus = amd_nb_bus_dev_ranges[i].bus;
+		u8 slot = amd_nb_bus_dev_ranges[i].dev_base;
+		u8 limit = amd_nb_bus_dev_ranges[i].dev_limit;
+
+		for (; slot < limit; ++slot) {
+			u32 val = read_pci_config(bus, slot, 3, 0);
+
+			if (!early_is_amd_nb(val))
+				continue;
+
+			val = read_pci_config(bus, slot, 3, 0x8c);
+			if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) {
+				val |= ENABLE_CF8_EXT_CFG >> 32;
+				write_pci_config(bus, slot, 3, 0x8c, val);
+			}
+			++n;
+		}
+	}
+	pr_info("Extended Config Space enabled on %u nodes\n", n);
+#endif
+}
+
 static int __init pci_io_ecs_init(void)
 {
 	int cpu;
@@ -386,6 +415,10 @@
         if (boot_cpu_data.x86 < 0x10)
 		return 0;
 
+	/* Try the PCI method first. */
+	if (early_pci_allowed())
+		pci_enable_pci_io_ecs();
+
 	register_cpu_notifier(&amd_cpu_notifier);
 	for_each_online_cpu(cpu)
 		amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index 0846a5b..ab8269b 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -9,6 +9,7 @@
  * option) any later version.
  */
 
+#include <linux/acpi.h>
 #include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/pci.h>
@@ -25,12 +26,14 @@
 	u8 fbus, lbus;
 	int i;
 
+#ifdef CONFIG_ACPI
 	/*
-	 * The x86_pci_root_bus_res_quirks() function already refuses to use
-	 * this information if ACPI _CRS was used. Therefore, we don't bother
-	 * checking if ACPI is enabled, and just generate the information
-	 * for both the ACPI _CRS and no ACPI cases.
+	 * We should get host bridge information from ACPI unless the BIOS
+	 * doesn't support it.
 	 */
+	if (acpi_os_get_root_pointer())
+		return;
+#endif
 
 	info = &pci_root_info[pci_root_num];
 	pci_root_num++;
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c
index 85b68ef..9260b3e 100644
--- a/arch/x86/pci/ce4100.c
+++ b/arch/x86/pci/ce4100.c
@@ -34,6 +34,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 
+#include <asm/ce4100.h>
 #include <asm/pci_x86.h>
 
 struct sim_reg {
@@ -306,10 +307,10 @@
 	.write = ce4100_conf_write,
 };
 
-static int __init ce4100_pci_init(void)
+int __init ce4100_pci_init(void)
 {
 	init_sim_regs();
 	raw_pci_ops = &ce4100_pci_conf;
-	return 0;
+	/* Indicate caller that it should invoke pci_legacy_init() */
+	return 1;
 }
-subsys_initcall(ce4100_pci_init);
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index f7c8a39..5fe7502 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -22,6 +22,7 @@
 
 unsigned int pci_early_dump_regs;
 static int pci_bf_sort;
+static int smbios_type_b1_flag;
 int pci_routeirq;
 int noioapicquirk;
 #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
@@ -185,6 +186,39 @@
 	return 0;
 }
 
+static void __devinit read_dmi_type_b1(const struct dmi_header *dm,
+				       void *private_data)
+{
+	u8 *d = (u8 *)dm + 4;
+
+	if (dm->type != 0xB1)
+		return;
+	switch (((*(u32 *)d) >> 9) & 0x03) {
+	case 0x00:
+		printk(KERN_INFO "dmi type 0xB1 record - unknown flag\n");
+		break;
+	case 0x01: /* set pci=bfsort */
+		smbios_type_b1_flag = 1;
+		break;
+	case 0x02: /* do not set pci=bfsort */
+		smbios_type_b1_flag = 2;
+		break;
+	default:
+		break;
+	}
+}
+
+static int __devinit find_sort_method(const struct dmi_system_id *d)
+{
+	dmi_walk(read_dmi_type_b1, NULL);
+
+	if (smbios_type_b1_flag == 1) {
+		set_bf_sort(d);
+		return 0;
+	}
+	return -1;
+}
+
 /*
  * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
  */
@@ -213,6 +247,13 @@
 	},
 #endif		/* __i386__ */
 	{
+		.callback = find_sort_method,
+		.ident = "Dell System",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+		},
+	},
+	{
 		.callback = set_bf_sort,
 		.ident = "Dell PowerEdge 1950",
 		.matches = {
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 9f9bfb7..87e6c83 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -589,7 +589,8 @@
 	case PCI_DEVICE_ID_INTEL_ICH10_1:
 	case PCI_DEVICE_ID_INTEL_ICH10_2:
 	case PCI_DEVICE_ID_INTEL_ICH10_3:
-	case PCI_DEVICE_ID_INTEL_PATSBURG_LPC:
+	case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0:
+	case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1:
 		r->name = "PIIX/ICH";
 		r->get = pirq_piix_get;
 		r->set = pirq_piix_set;
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index d2c0d51..cd6f184 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -15,6 +15,7 @@
 #include <linux/serial_reg.h>
 #include <linux/serial_8250.h>
 
+#include <asm/ce4100.h>
 #include <asm/setup.h>
 #include <asm/io.h>
 
@@ -129,4 +130,5 @@
 	x86_init.resources.probe_roms = x86_init_noop;
 	x86_init.mpparse.get_smp_config = x86_init_uint_noop;
 	x86_init.mpparse.find_smp_config = sdv_find_smp_config;
+	x86_init.pci.init = ce4100_pci_init;
 }
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
index 65df603..25bfdbb 100644
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ b/arch/x86/platform/mrst/early_printk_mrst.c
@@ -103,7 +103,7 @@
 static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
 
 static u32 *pclk_spi0;
-/* Always contains an accessable address, start with 0 */
+/* Always contains an accessible address, start with 0 */
 static struct dw_spi_reg *pspi;
 
 static struct kmsg_dumper dw_dumper;
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile
index c31b8fc..e797428 100644
--- a/arch/x86/platform/olpc/Makefile
+++ b/arch/x86/platform/olpc/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_OLPC)		+= olpc.o
 obj-$(CONFIG_OLPC_XO1)		+= olpc-xo1.o
 obj-$(CONFIG_OLPC_OPENFIRMWARE)	+= olpc_ofw.o
+obj-$(CONFIG_OLPC_OPENFIRMWARE_DT)	+= olpc_dt.o
diff --git a/arch/x86/platform/olpc/olpc-xo1.c b/arch/x86/platform/olpc/olpc-xo1.c
index f5442c0..1277756 100644
--- a/arch/x86/platform/olpc/olpc-xo1.c
+++ b/arch/x86/platform/olpc/olpc-xo1.c
@@ -1,6 +1,7 @@
 /*
  * Support for features of the OLPC XO-1 laptop
  *
+ * Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
  * Copyright (C) 2010 One Laptop per Child
  * Copyright (C) 2006 Red Hat, Inc.
  * Copyright (C) 2006 Advanced Micro Devices, Inc.
@@ -12,8 +13,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
 
@@ -22,9 +21,6 @@
 
 #define DRV_NAME "olpc-xo1"
 
-#define PMS_BAR		4
-#define ACPI_BAR	5
-
 /* PMC registers (PMS block) */
 #define PM_SCLK		0x10
 #define PM_IN_SLPCTL	0x20
@@ -57,65 +53,67 @@
 	outl(0x00002000, acpi_base + PM1_CNT);
 }
 
-/* Read the base addresses from the PCI BAR info */
-static int __devinit setup_bases(struct pci_dev *pdev)
-{
-	int r;
-
-	r = pci_enable_device_io(pdev);
-	if (r) {
-		dev_err(&pdev->dev, "can't enable device IO\n");
-		return r;
-	}
-
-	r = pci_request_region(pdev, ACPI_BAR, DRV_NAME);
-	if (r) {
-		dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", ACPI_BAR);
-		return r;
-	}
-
-	r = pci_request_region(pdev, PMS_BAR, DRV_NAME);
-	if (r) {
-		dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", PMS_BAR);
-		pci_release_region(pdev, ACPI_BAR);
-		return r;
-	}
-
-	acpi_base = pci_resource_start(pdev, ACPI_BAR);
-	pms_base = pci_resource_start(pdev, PMS_BAR);
-
-	return 0;
-}
-
 static int __devinit olpc_xo1_probe(struct platform_device *pdev)
 {
-	struct pci_dev *pcidev;
-	int r;
+	struct resource *res;
 
-	pcidev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA,
-				NULL);
-	if (!pdev)
+	/* don't run on non-XOs */
+	if (!machine_is_olpc())
 		return -ENODEV;
 
-	r = setup_bases(pcidev);
-	if (r)
-		return r;
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't fetch device resource info\n");
+		return -EIO;
+	}
 
-	pm_power_off = xo1_power_off;
+	if (!request_region(res->start, resource_size(res), DRV_NAME)) {
+		dev_err(&pdev->dev, "can't request region\n");
+		return -EIO;
+	}
 
-	printk(KERN_INFO "OLPC XO-1 support registered\n");
+	if (strcmp(pdev->name, "cs5535-pms") == 0)
+		pms_base = res->start;
+	else if (strcmp(pdev->name, "cs5535-acpi") == 0)
+		acpi_base = res->start;
+
+	/* If we have both addresses, we can override the poweroff hook */
+	if (pms_base && acpi_base) {
+		pm_power_off = xo1_power_off;
+		printk(KERN_INFO "OLPC XO-1 support registered\n");
+	}
+
 	return 0;
 }
 
 static int __devexit olpc_xo1_remove(struct platform_device *pdev)
 {
+	struct resource *r;
+
+	r = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	release_region(r->start, resource_size(r));
+
+	if (strcmp(pdev->name, "cs5535-pms") == 0)
+		pms_base = 0;
+	else if (strcmp(pdev->name, "cs5535-acpi") == 0)
+		acpi_base = 0;
+
 	pm_power_off = NULL;
 	return 0;
 }
 
-static struct platform_driver olpc_xo1_driver = {
+static struct platform_driver cs5535_pms_drv = {
 	.driver = {
-		.name = DRV_NAME,
+		.name = "cs5535-pms",
+		.owner = THIS_MODULE,
+	},
+	.probe = olpc_xo1_probe,
+	.remove = __devexit_p(olpc_xo1_remove),
+};
+
+static struct platform_driver cs5535_acpi_drv = {
+	.driver = {
+		.name = "cs5535-acpi",
 		.owner = THIS_MODULE,
 	},
 	.probe = olpc_xo1_probe,
@@ -124,12 +122,23 @@
 
 static int __init olpc_xo1_init(void)
 {
-	return platform_driver_register(&olpc_xo1_driver);
+	int r;
+
+	r = platform_driver_register(&cs5535_pms_drv);
+	if (r)
+		return r;
+
+	r = platform_driver_register(&cs5535_acpi_drv);
+	if (r)
+		platform_driver_unregister(&cs5535_pms_drv);
+
+	return r;
 }
 
 static void __exit olpc_xo1_exit(void)
 {
-	platform_driver_unregister(&olpc_xo1_driver);
+	platform_driver_unregister(&cs5535_acpi_drv);
+	platform_driver_unregister(&cs5535_pms_drv);
 }
 
 MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
new file mode 100644
index 0000000..044bda5
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -0,0 +1,182 @@
+/*
+ * OLPC-specific OFW device tree support code.
+ *
+ * Paul Mackerras	August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ *    {engebret|bergner}@us.ibm.com
+ *
+ *  Adapted for sparc by David S. Miller davem@davemloft.net
+ *  Adapted for x86/OLPC by Andres Salomon <dilinger@queued.net>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/of.h>
+#include <linux/of_pdt.h>
+#include <asm/olpc_ofw.h>
+
+static phandle __init olpc_dt_getsibling(phandle node)
+{
+	const void *args[] = { (void *)node };
+	void *res[] = { &node };
+
+	if ((s32)node == -1)
+		return 0;
+
+	if (olpc_ofw("peer", args, res) || (s32)node == -1)
+		return 0;
+
+	return node;
+}
+
+static phandle __init olpc_dt_getchild(phandle node)
+{
+	const void *args[] = { (void *)node };
+	void *res[] = { &node };
+
+	if ((s32)node == -1)
+		return 0;
+
+	if (olpc_ofw("child", args, res) || (s32)node == -1) {
+		pr_err("PROM: %s: fetching child failed!\n", __func__);
+		return 0;
+	}
+
+	return node;
+}
+
+static int __init olpc_dt_getproplen(phandle node, const char *prop)
+{
+	const void *args[] = { (void *)node, prop };
+	int len;
+	void *res[] = { &len };
+
+	if ((s32)node == -1)
+		return -1;
+
+	if (olpc_ofw("getproplen", args, res)) {
+		pr_err("PROM: %s: getproplen failed!\n", __func__);
+		return -1;
+	}
+
+	return len;
+}
+
+static int __init olpc_dt_getproperty(phandle node, const char *prop,
+		char *buf, int bufsize)
+{
+	int plen;
+
+	plen = olpc_dt_getproplen(node, prop);
+	if (plen > bufsize || plen < 1) {
+		return -1;
+	} else {
+		const void *args[] = { (void *)node, prop, buf, (void *)plen };
+		void *res[] = { &plen };
+
+		if (olpc_ofw("getprop", args, res)) {
+			pr_err("PROM: %s: getprop failed!\n", __func__);
+			return -1;
+		}
+	}
+
+	return plen;
+}
+
+static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf)
+{
+	const void *args[] = { (void *)node, prev, buf };
+	int success;
+	void *res[] = { &success };
+
+	buf[0] = '\0';
+
+	if ((s32)node == -1)
+		return -1;
+
+	if (olpc_ofw("nextprop", args, res) || success != 1)
+		return -1;
+
+	return 0;
+}
+
+static int __init olpc_dt_pkg2path(phandle node, char *buf,
+		const int buflen, int *len)
+{
+	const void *args[] = { (void *)node, buf, (void *)buflen };
+	void *res[] = { len };
+
+	if ((s32)node == -1)
+		return -1;
+
+	if (olpc_ofw("package-to-path", args, res) || *len < 1)
+		return -1;
+
+	return 0;
+}
+
+static unsigned int prom_early_allocated __initdata;
+
+void * __init prom_early_alloc(unsigned long size)
+{
+	static u8 *mem;
+	static size_t free_mem;
+	void *res;
+
+	if (free_mem < size) {
+		const size_t chunk_size = max(PAGE_SIZE, size);
+
+		/*
+		 * To mimimize the number of allocations, grab at least
+		 * PAGE_SIZE of memory (that's an arbitrary choice that's
+		 * fast enough on the platforms we care about while minimizing
+		 * wasted bootmem) and hand off chunks of it to callers.
+		 */
+		res = alloc_bootmem(chunk_size);
+		BUG_ON(!res);
+		prom_early_allocated += chunk_size;
+		memset(res, 0, chunk_size);
+		free_mem = chunk_size;
+		mem = res;
+	}
+
+	/* allocate from the local cache */
+	free_mem -= size;
+	res = mem;
+	mem += size;
+	return res;
+}
+
+static struct of_pdt_ops prom_olpc_ops __initdata = {
+	.nextprop = olpc_dt_nextprop,
+	.getproplen = olpc_dt_getproplen,
+	.getproperty = olpc_dt_getproperty,
+	.getchild = olpc_dt_getchild,
+	.getsibling = olpc_dt_getsibling,
+	.pkg2path = olpc_dt_pkg2path,
+};
+
+void __init olpc_dt_build_devicetree(void)
+{
+	phandle root;
+
+	if (!olpc_ofw_is_installed())
+		return;
+
+	root = olpc_dt_getsibling(0);
+	if (!root) {
+		pr_err("PROM: unable to get root node from OFW!\n");
+		return;
+	}
+	of_pdt_build_devicetree(root, &prom_olpc_ops);
+
+	pr_info("PROM DT: Built device tree with %u bytes of memory.\n",
+			prom_early_allocated);
+}
diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
index 7873204..e7604f6 100644
--- a/arch/x86/platform/olpc/olpc_ofw.c
+++ b/arch/x86/platform/olpc/olpc_ofw.c
@@ -110,3 +110,8 @@
 			(unsigned long)olpc_ofw_cif, (-start) >> 20);
 	reserve_top_address(-start);
 }
+
+bool __init olpc_ofw_is_installed(void)
+{
+	return olpc_ofw_cif != NULL;
+}
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index df58e9c..a7b38d3 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1364,11 +1364,11 @@
 		memset(bd2, 0, sizeof(struct bau_desc));
 		bd2->header.sw_ack_flag = 1;
 		/*
-		 * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
+		 * base_dest_nodeid is the nasid of the first uvhub
 		 * in the partition. The bit map will indicate uvhub numbers,
 		 * which are 0-N in a partition. Pnodes are unique system-wide.
 		 */
-		bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
+		bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
 		bd2->header.dest_subnodeid = 0x10; /* the LB */
 		bd2->header.command = UV_NET_ENDPOINT_INTD;
 		bd2->header.int_both = 1;
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7793851..17c565d 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -12,7 +12,8 @@
 
 obj-y		:= enlighten.o setup.o multicalls.o mmu.o irq.o \
 			time.o xen-asm.o xen-asm_$(BITS).o \
-			grant-table.o suspend.o platform-pci-unplug.o
+			grant-table.o suspend.o platform-pci-unplug.o \
+			p2m.o
 
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 7e8d3bc..50542ef 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1194,7 +1194,7 @@
 	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
 
 	local_irq_disable();
-	early_boot_irqs_off();
+	early_boot_irqs_disabled = true;
 
 	memblock_init();
 
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 9d30105..6a6fe89 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -126,7 +126,7 @@
 #endif
 };
 
-void __init xen_init_irq_ops()
+void __init xen_init_irq_ops(void)
 {
 	pv_irq_ops = xen_irq_ops;
 	x86_init.irqs.intr_init = xen_init_IRQ;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 44924e5..f608942 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -173,371 +173,6 @@
  */
 #define USER_LIMIT	((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
 
-/*
- * Xen leaves the responsibility for maintaining p2m mappings to the
- * guests themselves, but it must also access and update the p2m array
- * during suspend/resume when all the pages are reallocated.
- *
- * The p2m table is logically a flat array, but we implement it as a
- * three-level tree to allow the address space to be sparse.
- *
- *                               Xen
- *                                |
- *     p2m_top              p2m_top_mfn
- *       /  \                   /   \
- * p2m_mid p2m_mid	p2m_mid_mfn p2m_mid_mfn
- *    / \      / \         /           /
- *  p2m p2m p2m p2m p2m p2m p2m ...
- *
- * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
- *
- * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
- * maximum representable pseudo-physical address space is:
- *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
- *
- * P2M_PER_PAGE depends on the architecture, as a mfn is always
- * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
- * 512 and 1024 entries respectively. 
- */
-
-unsigned long xen_max_p2m_pfn __read_mostly;
-
-#define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
-#define P2M_MID_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long *))
-#define P2M_TOP_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long **))
-
-#define MAX_P2M_PFN		(P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
-
-/* Placeholders for holes in the address space */
-static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
-
-static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
-
-RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
-RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
-
-static inline unsigned p2m_top_index(unsigned long pfn)
-{
-	BUG_ON(pfn >= MAX_P2M_PFN);
-	return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
-}
-
-static inline unsigned p2m_mid_index(unsigned long pfn)
-{
-	return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
-}
-
-static inline unsigned p2m_index(unsigned long pfn)
-{
-	return pfn % P2M_PER_PAGE;
-}
-
-static void p2m_top_init(unsigned long ***top)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
-		top[i] = p2m_mid_missing;
-}
-
-static void p2m_top_mfn_init(unsigned long *top)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
-		top[i] = virt_to_mfn(p2m_mid_missing_mfn);
-}
-
-static void p2m_top_mfn_p_init(unsigned long **top)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
-		top[i] = p2m_mid_missing_mfn;
-}
-
-static void p2m_mid_init(unsigned long **mid)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_MID_PER_PAGE; i++)
-		mid[i] = p2m_missing;
-}
-
-static void p2m_mid_mfn_init(unsigned long *mid)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_MID_PER_PAGE; i++)
-		mid[i] = virt_to_mfn(p2m_missing);
-}
-
-static void p2m_init(unsigned long *p2m)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_MID_PER_PAGE; i++)
-		p2m[i] = INVALID_P2M_ENTRY;
-}
-
-/*
- * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
- *
- * This is called both at boot time, and after resuming from suspend:
- * - At boot time we're called very early, and must use extend_brk()
- *   to allocate memory.
- *
- * - After resume we're called from within stop_machine, but the mfn
- *   tree should alreay be completely allocated.
- */
-void xen_build_mfn_list_list(void)
-{
-	unsigned long pfn;
-
-	/* Pre-initialize p2m_top_mfn to be completely missing */
-	if (p2m_top_mfn == NULL) {
-		p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
-		p2m_mid_mfn_init(p2m_mid_missing_mfn);
-
-		p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
-		p2m_top_mfn_p_init(p2m_top_mfn_p);
-
-		p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
-		p2m_top_mfn_init(p2m_top_mfn);
-	} else {
-		/* Reinitialise, mfn's all change after migration */
-		p2m_mid_mfn_init(p2m_mid_missing_mfn);
-	}
-
-	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
-		unsigned topidx = p2m_top_index(pfn);
-		unsigned mididx = p2m_mid_index(pfn);
-		unsigned long **mid;
-		unsigned long *mid_mfn_p;
-
-		mid = p2m_top[topidx];
-		mid_mfn_p = p2m_top_mfn_p[topidx];
-
-		/* Don't bother allocating any mfn mid levels if
-		 * they're just missing, just update the stored mfn,
-		 * since all could have changed over a migrate.
-		 */
-		if (mid == p2m_mid_missing) {
-			BUG_ON(mididx);
-			BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
-			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
-			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
-			continue;
-		}
-
-		if (mid_mfn_p == p2m_mid_missing_mfn) {
-			/*
-			 * XXX boot-time only!  We should never find
-			 * missing parts of the mfn tree after
-			 * runtime.  extend_brk() will BUG if we call
-			 * it too late.
-			 */
-			mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
-			p2m_mid_mfn_init(mid_mfn_p);
-
-			p2m_top_mfn_p[topidx] = mid_mfn_p;
-		}
-
-		p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
-		mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
-	}
-}
-
-void xen_setup_mfn_list_list(void)
-{
-	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
-
-	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-		virt_to_mfn(p2m_top_mfn);
-	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
-}
-
-/* Set up p2m_top to point to the domain-builder provided p2m pages */
-void __init xen_build_dynamic_phys_to_machine(void)
-{
-	unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
-	unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
-	unsigned long pfn;
-
-	xen_max_p2m_pfn = max_pfn;
-
-	p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
-	p2m_init(p2m_missing);
-
-	p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
-	p2m_mid_init(p2m_mid_missing);
-
-	p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
-	p2m_top_init(p2m_top);
-
-	/*
-	 * The domain builder gives us a pre-constructed p2m array in
-	 * mfn_list for all the pages initially given to us, so we just
-	 * need to graft that into our tree structure.
-	 */
-	for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
-		unsigned topidx = p2m_top_index(pfn);
-		unsigned mididx = p2m_mid_index(pfn);
-
-		if (p2m_top[topidx] == p2m_mid_missing) {
-			unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-			p2m_mid_init(mid);
-
-			p2m_top[topidx] = mid;
-		}
-
-		p2m_top[topidx][mididx] = &mfn_list[pfn];
-	}
-}
-
-unsigned long get_phys_to_machine(unsigned long pfn)
-{
-	unsigned topidx, mididx, idx;
-
-	if (unlikely(pfn >= MAX_P2M_PFN))
-		return INVALID_P2M_ENTRY;
-
-	topidx = p2m_top_index(pfn);
-	mididx = p2m_mid_index(pfn);
-	idx = p2m_index(pfn);
-
-	return p2m_top[topidx][mididx][idx];
-}
-EXPORT_SYMBOL_GPL(get_phys_to_machine);
-
-static void *alloc_p2m_page(void)
-{
-	return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
-}
-
-static void free_p2m_page(void *p)
-{
-	free_page((unsigned long)p);
-}
-
-/* 
- * Fully allocate the p2m structure for a given pfn.  We need to check
- * that both the top and mid levels are allocated, and make sure the
- * parallel mfn tree is kept in sync.  We may race with other cpus, so
- * the new pages are installed with cmpxchg; if we lose the race then
- * simply free the page we allocated and use the one that's there.
- */
-static bool alloc_p2m(unsigned long pfn)
-{
-	unsigned topidx, mididx;
-	unsigned long ***top_p, **mid;
-	unsigned long *top_mfn_p, *mid_mfn;
-
-	topidx = p2m_top_index(pfn);
-	mididx = p2m_mid_index(pfn);
-
-	top_p = &p2m_top[topidx];
-	mid = *top_p;
-
-	if (mid == p2m_mid_missing) {
-		/* Mid level is missing, allocate a new one */
-		mid = alloc_p2m_page();
-		if (!mid)
-			return false;
-
-		p2m_mid_init(mid);
-
-		if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
-			free_p2m_page(mid);
-	}
-
-	top_mfn_p = &p2m_top_mfn[topidx];
-	mid_mfn = p2m_top_mfn_p[topidx];
-
-	BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
-
-	if (mid_mfn == p2m_mid_missing_mfn) {
-		/* Separately check the mid mfn level */
-		unsigned long missing_mfn;
-		unsigned long mid_mfn_mfn;
-
-		mid_mfn = alloc_p2m_page();
-		if (!mid_mfn)
-			return false;
-
-		p2m_mid_mfn_init(mid_mfn);
-
-		missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
-		mid_mfn_mfn = virt_to_mfn(mid_mfn);
-		if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
-			free_p2m_page(mid_mfn);
-		else
-			p2m_top_mfn_p[topidx] = mid_mfn;
-	}
-
-	if (p2m_top[topidx][mididx] == p2m_missing) {
-		/* p2m leaf page is missing */
-		unsigned long *p2m;
-
-		p2m = alloc_p2m_page();
-		if (!p2m)
-			return false;
-
-		p2m_init(p2m);
-
-		if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
-			free_p2m_page(p2m);
-		else
-			mid_mfn[mididx] = virt_to_mfn(p2m);
-	}
-
-	return true;
-}
-
-/* Try to install p2m mapping; fail if intermediate bits missing */
-bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
-	unsigned topidx, mididx, idx;
-
-	if (unlikely(pfn >= MAX_P2M_PFN)) {
-		BUG_ON(mfn != INVALID_P2M_ENTRY);
-		return true;
-	}
-
-	topidx = p2m_top_index(pfn);
-	mididx = p2m_mid_index(pfn);
-	idx = p2m_index(pfn);
-
-	if (p2m_top[topidx][mididx] == p2m_missing)
-		return mfn == INVALID_P2M_ENTRY;
-
-	p2m_top[topidx][mididx][idx] = mfn;
-
-	return true;
-}
-
-bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
-	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
-		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-		return true;
-	}
-
-	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
-		if (!alloc_p2m(pfn))
-			return false;
-
-		if (!__set_phys_to_machine(pfn, mfn))
-			return false;
-	}
-
-	return true;
-}
-
 unsigned long arbitrary_virt_to_mfn(void *vaddr)
 {
 	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
@@ -566,6 +201,7 @@
 	offset = address & ~PAGE_MASK;
 	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
 }
+EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
 
 void make_lowmem_page_readonly(void *vaddr)
 {
@@ -1350,10 +986,9 @@
  */
 void xen_mm_pin_all(void)
 {
-	unsigned long flags;
 	struct page *page;
 
-	spin_lock_irqsave(&pgd_lock, flags);
+	spin_lock(&pgd_lock);
 
 	list_for_each_entry(page, &pgd_list, lru) {
 		if (!PagePinned(page)) {
@@ -1362,7 +997,7 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&pgd_lock, flags);
+	spin_unlock(&pgd_lock);
 }
 
 /*
@@ -1463,10 +1098,9 @@
  */
 void xen_mm_unpin_all(void)
 {
-	unsigned long flags;
 	struct page *page;
 
-	spin_lock_irqsave(&pgd_lock, flags);
+	spin_lock(&pgd_lock);
 
 	list_for_each_entry(page, &pgd_list, lru) {
 		if (PageSavePinned(page)) {
@@ -1476,7 +1110,7 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&pgd_lock, flags);
+	spin_unlock(&pgd_lock);
 }
 
 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
new file mode 100644
index 0000000..fd12d7c
--- /dev/null
+++ b/arch/x86/xen/p2m.c
@@ -0,0 +1,522 @@
+/*
+ * Xen leaves the responsibility for maintaining p2m mappings to the
+ * guests themselves, but it must also access and update the p2m array
+ * during suspend/resume when all the pages are reallocated.
+ *
+ * The p2m table is logically a flat array, but we implement it as a
+ * three-level tree to allow the address space to be sparse.
+ *
+ *                               Xen
+ *                                |
+ *     p2m_top              p2m_top_mfn
+ *       /  \                   /   \
+ * p2m_mid p2m_mid	p2m_mid_mfn p2m_mid_mfn
+ *    / \      / \         /           /
+ *  p2m p2m p2m p2m p2m p2m p2m ...
+ *
+ * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
+ *
+ * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
+ * maximum representable pseudo-physical address space is:
+ *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
+ *
+ * P2M_PER_PAGE depends on the architecture, as a mfn is always
+ * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
+ * 512 and 1024 entries respectively. 
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/sched.h>
+
+#include <asm/cache.h>
+#include <asm/setup.h>
+
+#include <asm/xen/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#include "xen-ops.h"
+
+static void __init m2p_override_init(void);
+
+unsigned long xen_max_p2m_pfn __read_mostly;
+
+#define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
+#define P2M_MID_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long **))
+
+#define MAX_P2M_PFN		(P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
+
+/* Placeholders for holes in the address space */
+static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
+
+static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
+
+RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+
+static inline unsigned p2m_top_index(unsigned long pfn)
+{
+	BUG_ON(pfn >= MAX_P2M_PFN);
+	return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
+}
+
+static inline unsigned p2m_mid_index(unsigned long pfn)
+{
+	return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
+}
+
+static inline unsigned p2m_index(unsigned long pfn)
+{
+	return pfn % P2M_PER_PAGE;
+}
+
+static void p2m_top_init(unsigned long ***top)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+		top[i] = p2m_mid_missing;
+}
+
+static void p2m_top_mfn_init(unsigned long *top)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+		top[i] = virt_to_mfn(p2m_mid_missing_mfn);
+}
+
+static void p2m_top_mfn_p_init(unsigned long **top)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+		top[i] = p2m_mid_missing_mfn;
+}
+
+static void p2m_mid_init(unsigned long **mid)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_MID_PER_PAGE; i++)
+		mid[i] = p2m_missing;
+}
+
+static void p2m_mid_mfn_init(unsigned long *mid)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_MID_PER_PAGE; i++)
+		mid[i] = virt_to_mfn(p2m_missing);
+}
+
+static void p2m_init(unsigned long *p2m)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_MID_PER_PAGE; i++)
+		p2m[i] = INVALID_P2M_ENTRY;
+}
+
+/*
+ * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
+ *
+ * This is called both at boot time, and after resuming from suspend:
+ * - At boot time we're called very early, and must use extend_brk()
+ *   to allocate memory.
+ *
+ * - After resume we're called from within stop_machine, but the mfn
+ *   tree should alreay be completely allocated.
+ */
+void xen_build_mfn_list_list(void)
+{
+	unsigned long pfn;
+
+	/* Pre-initialize p2m_top_mfn to be completely missing */
+	if (p2m_top_mfn == NULL) {
+		p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+		p2m_mid_mfn_init(p2m_mid_missing_mfn);
+
+		p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+		p2m_top_mfn_p_init(p2m_top_mfn_p);
+
+		p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+		p2m_top_mfn_init(p2m_top_mfn);
+	} else {
+		/* Reinitialise, mfn's all change after migration */
+		p2m_mid_mfn_init(p2m_mid_missing_mfn);
+	}
+
+	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
+		unsigned topidx = p2m_top_index(pfn);
+		unsigned mididx = p2m_mid_index(pfn);
+		unsigned long **mid;
+		unsigned long *mid_mfn_p;
+
+		mid = p2m_top[topidx];
+		mid_mfn_p = p2m_top_mfn_p[topidx];
+
+		/* Don't bother allocating any mfn mid levels if
+		 * they're just missing, just update the stored mfn,
+		 * since all could have changed over a migrate.
+		 */
+		if (mid == p2m_mid_missing) {
+			BUG_ON(mididx);
+			BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
+			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
+			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
+			continue;
+		}
+
+		if (mid_mfn_p == p2m_mid_missing_mfn) {
+			/*
+			 * XXX boot-time only!  We should never find
+			 * missing parts of the mfn tree after
+			 * runtime.  extend_brk() will BUG if we call
+			 * it too late.
+			 */
+			mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+			p2m_mid_mfn_init(mid_mfn_p);
+
+			p2m_top_mfn_p[topidx] = mid_mfn_p;
+		}
+
+		p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+		mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
+	}
+}
+
+void xen_setup_mfn_list_list(void)
+{
+	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
+	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
+		virt_to_mfn(p2m_top_mfn);
+	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+}
+
+/* Set up p2m_top to point to the domain-builder provided p2m pages */
+void __init xen_build_dynamic_phys_to_machine(void)
+{
+	unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
+	unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
+	unsigned long pfn;
+
+	xen_max_p2m_pfn = max_pfn;
+
+	p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+	p2m_init(p2m_missing);
+
+	p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+	p2m_mid_init(p2m_mid_missing);
+
+	p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+	p2m_top_init(p2m_top);
+
+	/*
+	 * The domain builder gives us a pre-constructed p2m array in
+	 * mfn_list for all the pages initially given to us, so we just
+	 * need to graft that into our tree structure.
+	 */
+	for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
+		unsigned topidx = p2m_top_index(pfn);
+		unsigned mididx = p2m_mid_index(pfn);
+
+		if (p2m_top[topidx] == p2m_mid_missing) {
+			unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+			p2m_mid_init(mid);
+
+			p2m_top[topidx] = mid;
+		}
+
+		/*
+		 * As long as the mfn_list has enough entries to completely
+		 * fill a p2m page, pointing into the array is ok. But if
+		 * not the entries beyond the last pfn will be undefined.
+		 */
+		if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
+			unsigned long p2midx;
+
+			p2midx = max_pfn % P2M_PER_PAGE;
+			for ( ; p2midx < P2M_PER_PAGE; p2midx++)
+				mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
+		}
+		p2m_top[topidx][mididx] = &mfn_list[pfn];
+	}
+
+	m2p_override_init();
+}
+
+unsigned long get_phys_to_machine(unsigned long pfn)
+{
+	unsigned topidx, mididx, idx;
+
+	if (unlikely(pfn >= MAX_P2M_PFN))
+		return INVALID_P2M_ENTRY;
+
+	topidx = p2m_top_index(pfn);
+	mididx = p2m_mid_index(pfn);
+	idx = p2m_index(pfn);
+
+	return p2m_top[topidx][mididx][idx];
+}
+EXPORT_SYMBOL_GPL(get_phys_to_machine);
+
+static void *alloc_p2m_page(void)
+{
+	return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
+
+static void free_p2m_page(void *p)
+{
+	free_page((unsigned long)p);
+}
+
+/* 
+ * Fully allocate the p2m structure for a given pfn.  We need to check
+ * that both the top and mid levels are allocated, and make sure the
+ * parallel mfn tree is kept in sync.  We may race with other cpus, so
+ * the new pages are installed with cmpxchg; if we lose the race then
+ * simply free the page we allocated and use the one that's there.
+ */
+static bool alloc_p2m(unsigned long pfn)
+{
+	unsigned topidx, mididx;
+	unsigned long ***top_p, **mid;
+	unsigned long *top_mfn_p, *mid_mfn;
+
+	topidx = p2m_top_index(pfn);
+	mididx = p2m_mid_index(pfn);
+
+	top_p = &p2m_top[topidx];
+	mid = *top_p;
+
+	if (mid == p2m_mid_missing) {
+		/* Mid level is missing, allocate a new one */
+		mid = alloc_p2m_page();
+		if (!mid)
+			return false;
+
+		p2m_mid_init(mid);
+
+		if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
+			free_p2m_page(mid);
+	}
+
+	top_mfn_p = &p2m_top_mfn[topidx];
+	mid_mfn = p2m_top_mfn_p[topidx];
+
+	BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
+
+	if (mid_mfn == p2m_mid_missing_mfn) {
+		/* Separately check the mid mfn level */
+		unsigned long missing_mfn;
+		unsigned long mid_mfn_mfn;
+
+		mid_mfn = alloc_p2m_page();
+		if (!mid_mfn)
+			return false;
+
+		p2m_mid_mfn_init(mid_mfn);
+
+		missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+		mid_mfn_mfn = virt_to_mfn(mid_mfn);
+		if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
+			free_p2m_page(mid_mfn);
+		else
+			p2m_top_mfn_p[topidx] = mid_mfn;
+	}
+
+	if (p2m_top[topidx][mididx] == p2m_missing) {
+		/* p2m leaf page is missing */
+		unsigned long *p2m;
+
+		p2m = alloc_p2m_page();
+		if (!p2m)
+			return false;
+
+		p2m_init(p2m);
+
+		if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+			free_p2m_page(p2m);
+		else
+			mid_mfn[mididx] = virt_to_mfn(p2m);
+	}
+
+	return true;
+}
+
+/* Try to install p2m mapping; fail if intermediate bits missing */
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+	unsigned topidx, mididx, idx;
+
+	if (unlikely(pfn >= MAX_P2M_PFN)) {
+		BUG_ON(mfn != INVALID_P2M_ENTRY);
+		return true;
+	}
+
+	topidx = p2m_top_index(pfn);
+	mididx = p2m_mid_index(pfn);
+	idx = p2m_index(pfn);
+
+	if (p2m_top[topidx][mididx] == p2m_missing)
+		return mfn == INVALID_P2M_ENTRY;
+
+	p2m_top[topidx][mididx][idx] = mfn;
+
+	return true;
+}
+
+bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
+		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+		return true;
+	}
+
+	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
+		if (!alloc_p2m(pfn))
+			return false;
+
+		if (!__set_phys_to_machine(pfn, mfn))
+			return false;
+	}
+
+	return true;
+}
+
+#define M2P_OVERRIDE_HASH_SHIFT	10
+#define M2P_OVERRIDE_HASH	(1 << M2P_OVERRIDE_HASH_SHIFT)
+
+static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH);
+static DEFINE_SPINLOCK(m2p_override_lock);
+
+static void __init m2p_override_init(void)
+{
+	unsigned i;
+
+	m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
+				   sizeof(unsigned long));
+
+	for (i = 0; i < M2P_OVERRIDE_HASH; i++)
+		INIT_LIST_HEAD(&m2p_overrides[i]);
+}
+
+static unsigned long mfn_hash(unsigned long mfn)
+{
+	return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
+}
+
+/* Add an MFN override for a particular page */
+int m2p_add_override(unsigned long mfn, struct page *page)
+{
+	unsigned long flags;
+	unsigned long pfn;
+	unsigned long address;
+	unsigned level;
+	pte_t *ptep = NULL;
+
+	pfn = page_to_pfn(page);
+	if (!PageHighMem(page)) {
+		address = (unsigned long)__va(pfn << PAGE_SHIFT);
+		ptep = lookup_address(address, &level);
+
+		if (WARN(ptep == NULL || level != PG_LEVEL_4K,
+					"m2p_add_override: pfn %lx not mapped", pfn))
+			return -EINVAL;
+	}
+
+	page->private = mfn;
+	page->index = pfn_to_mfn(pfn);
+
+	__set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
+	if (!PageHighMem(page))
+		/* Just zap old mapping for now */
+		pte_clear(&init_mm, address, ptep);
+
+	spin_lock_irqsave(&m2p_override_lock, flags);
+	list_add(&page->lru,  &m2p_overrides[mfn_hash(mfn)]);
+	spin_unlock_irqrestore(&m2p_override_lock, flags);
+
+	return 0;
+}
+
+int m2p_remove_override(struct page *page)
+{
+	unsigned long flags;
+	unsigned long mfn;
+	unsigned long pfn;
+	unsigned long address;
+	unsigned level;
+	pte_t *ptep = NULL;
+
+	pfn = page_to_pfn(page);
+	mfn = get_phys_to_machine(pfn);
+	if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
+		return -EINVAL;
+
+	if (!PageHighMem(page)) {
+		address = (unsigned long)__va(pfn << PAGE_SHIFT);
+		ptep = lookup_address(address, &level);
+
+		if (WARN(ptep == NULL || level != PG_LEVEL_4K,
+					"m2p_remove_override: pfn %lx not mapped", pfn))
+			return -EINVAL;
+	}
+
+	spin_lock_irqsave(&m2p_override_lock, flags);
+	list_del(&page->lru);
+	spin_unlock_irqrestore(&m2p_override_lock, flags);
+	__set_phys_to_machine(pfn, page->index);
+
+	if (!PageHighMem(page))
+		set_pte_at(&init_mm, address, ptep,
+				pfn_pte(pfn, PAGE_KERNEL));
+		/* No tlb flush necessary because the caller already
+		 * left the pte unmapped. */
+
+	return 0;
+}
+
+struct page *m2p_find_override(unsigned long mfn)
+{
+	unsigned long flags;
+	struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)];
+	struct page *p, *ret;
+
+	ret = NULL;
+
+	spin_lock_irqsave(&m2p_override_lock, flags);
+
+	list_for_each_entry(p, bucket, lru) {
+		if (p->private == mfn) {
+			ret = p;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&m2p_override_lock, flags);
+
+	return ret;
+}
+
+unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
+{
+	struct page *p = m2p_find_override(mfn);
+	unsigned long ret = pfn;
+
+	if (p)
+		ret = page_to_pfn(p);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b5a7f92..a8a66a5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -179,8 +179,13 @@
 	e820.nr_map = 0;
 	xen_extra_mem_start = mem_end;
 	for (i = 0; i < memmap.nr_entries; i++) {
-		unsigned long long end = map[i].addr + map[i].size;
+		unsigned long long end;
 
+		/* Guard against non-page aligned E820 entries. */
+		if (map[i].type == E820_RAM)
+			map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
+
+		end = map[i].addr + map[i].size;
 		if (map[i].type == E820_RAM && end > mem_end) {
 			/* RAM off the end - may be partially included */
 			u64 delta = min(map[i].size, end - mem_end);
@@ -350,6 +355,7 @@
 	boot_cpu_data.hlt_works_ok = 1;
 #endif
 	pm_idle = default_idle;
+	boot_option_idle_override = IDLE_HALT;
 
 	fiddle_vdso();
 }
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
index 1d230ee..b90038e 100644
--- a/arch/xtensa/configs/common_defconfig
+++ b/arch/xtensa/configs/common_defconfig
@@ -32,7 +32,7 @@
 # CONFIG_HOTPLUG is not set
 CONFIG_KOBJECT_UEVENT=y
 # CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
+# CONFIG_EXPERT is not set
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index 7368164..0234cd1 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -55,7 +55,7 @@
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
 CONFIG_ANON_INODES=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index bb84fbc..095cd80 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -55,7 +55,7 @@
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
diff --git a/arch/xtensa/include/asm/mman.h b/arch/xtensa/include/asm/mman.h
index fca4db4..3078901 100644
--- a/arch/xtensa/include/asm/mman.h
+++ b/arch/xtensa/include/asm/mman.h
@@ -83,6 +83,9 @@
 #define MADV_MERGEABLE   12		/* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 13		/* KSM may not merge identical pages */
 
+#define MADV_HUGEPAGE	14		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	15		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 
diff --git a/block/Kconfig b/block/Kconfig
index 6c9213e..60be1e0 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -2,7 +2,7 @@
 # Block layer core configuration
 #
 menuconfig BLOCK
-       bool "Enable the block layer" if EMBEDDED
+       bool "Enable the block layer" if EXPERT
        default y
        help
 	 Provide block layer support for the kernel.
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b1febd0..455768a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1452,10 +1452,6 @@
 		goto done;
 	}
 
-	/* Currently we do not support hierarchy deeper than two level (0,1) */
-	if (parent != cgroup->top_cgroup)
-		return ERR_PTR(-EPERM);
-
 	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
 	if (!blkcg)
 		return ERR_PTR(-ENOMEM);
diff --git a/block/blk-core.c b/block/blk-core.c
index 4ce953f..518dd42 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -33,7 +33,7 @@
 
 #include "blk.h"
 
-EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 
@@ -64,13 +64,27 @@
 		return;
 
 	cpu = part_stat_lock();
-	part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
 
-	if (!new_io)
+	if (!new_io) {
+		part = rq->part;
 		part_stat_inc(cpu, part, merges[rw]);
-	else {
+	} else {
+		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+		if (!hd_struct_try_get(part)) {
+			/*
+			 * The partition is already being removed,
+			 * the request will be accounted on the disk only
+			 *
+			 * We take a reference on disk->part0 although that
+			 * partition will never be deleted, so we can treat
+			 * it as any other partition.
+			 */
+			part = &rq->rq_disk->part0;
+			hd_struct_get(part);
+		}
 		part_round_stats(cpu, part);
 		part_inc_in_flight(part, rw);
+		rq->part = part;
 	}
 
 	part_stat_unlock();
@@ -128,6 +142,7 @@
 	rq->ref_count = 1;
 	rq->start_time = jiffies;
 	set_start_time_ns(rq);
+	rq->part = NULL;
 }
 EXPORT_SYMBOL(blk_rq_init);
 
@@ -337,7 +352,7 @@
 	WARN_ON(!irqs_disabled());
 
 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-	__blk_run_queue(q);
+	__blk_run_queue(q, false);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -388,13 +403,14 @@
 /**
  * __blk_run_queue - run a single device queue
  * @q:	The queue to run
+ * @force_kblockd: Don't run @q->request_fn directly.  Use kblockd.
  *
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
  *    held and interrupts disabled.
  *
  */
-void __blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q, bool force_kblockd)
 {
 	blk_remove_plug(q);
 
@@ -408,7 +424,7 @@
 	 * Only recurse once to avoid overrunning the stack, let the unplug
 	 * handling reinvoke the handler shortly if we already got there.
 	 */
-	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+	if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
 		q->request_fn(q);
 		queue_flag_clear(QUEUE_FLAG_REENTER, q);
 	} else {
@@ -431,7 +447,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(q->queue_lock, flags);
-	__blk_run_queue(q);
+	__blk_run_queue(q, false);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -1038,7 +1054,7 @@
 
 	drive_stat_acct(rq, 1);
 	__elv_add_request(q, rq, where, 0);
-	__blk_run_queue(q);
+	__blk_run_queue(q, false);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -1329,9 +1345,9 @@
 		bio->bi_sector += p->start_sect;
 		bio->bi_bdev = bdev->bd_contains;
 
-		trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
-				    bdev->bd_dev,
-				    bio->bi_sector - p->start_sect);
+		trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
+				      bdev->bd_dev,
+				      bio->bi_sector - p->start_sect);
 	}
 }
 
@@ -1500,7 +1516,7 @@
 			goto end_io;
 
 		if (old_sector != -1)
-			trace_block_remap(q, bio, old_dev, old_sector);
+			trace_block_bio_remap(q, bio, old_dev, old_sector);
 
 		old_sector = bio->bi_sector;
 		old_dev = bio->bi_bdev->bd_dev;
@@ -1776,7 +1792,7 @@
 		int cpu;
 
 		cpu = part_stat_lock();
-		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
+		part = req->part;
 		part_stat_add(cpu, part, sectors[rw], bytes >> 9);
 		part_stat_unlock();
 	}
@@ -1796,13 +1812,14 @@
 		int cpu;
 
 		cpu = part_stat_lock();
-		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
+		part = req->part;
 
 		part_stat_inc(cpu, part, ios[rw]);
 		part_stat_add(cpu, part, ticks[rw], duration);
 		part_round_stats(cpu, part);
 		part_dec_in_flight(part, rw);
 
+		hd_struct_put(part);
 		part_stat_unlock();
 	}
 }
@@ -2594,19 +2611,14 @@
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 
-int kblockd_schedule_delayed_work(struct request_queue *q,
-			struct delayed_work *dwork, unsigned long delay)
-{
-	return queue_delayed_work(kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
 int __init blk_dev_init(void)
 {
 	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
 			sizeof(((struct request *)0)->cmd_flags));
 
-	kblockd_workqueue = create_workqueue("kblockd");
+	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
+	kblockd_workqueue = alloc_workqueue("kblockd",
+					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
 	if (!kblockd_workqueue)
 		panic("Failed to create kblockd\n");
 
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 54b123d..b27d020 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -66,10 +66,12 @@
 
 	/*
 	 * Moving a request silently to empty queue_head may stall the
-	 * queue.  Kick the queue in those cases.
+	 * queue.  Kick the queue in those cases.  This function is called
+	 * from request completion path and calling directly into
+	 * request_fn may confuse the driver.  Always use kblockd.
 	 */
 	if (was_empty && next_rq)
-		__blk_run_queue(q);
+		__blk_run_queue(q, true);
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -130,7 +132,7 @@
 		BUG();
 	}
 
-	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
 	return rq;
 }
 
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 3c7a339..b791022 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -64,7 +64,7 @@
 	rcu_read_unlock();
 }
 
-/* Called by the exitting task */
+/* Called by the exiting task */
 void exit_io_context(struct task_struct *task)
 {
 	struct io_context *ioc;
@@ -74,10 +74,9 @@
 	task->io_context = NULL;
 	task_unlock(task);
 
-	if (atomic_dec_and_test(&ioc->nr_tasks)) {
+	if (atomic_dec_and_test(&ioc->nr_tasks))
 		cfq_exit(ioc);
 
-	}
 	put_io_context(ioc);
 }
 
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 1a320d2..bd3e8df 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -109,7 +109,6 @@
 	atomic_t 		done;
 	unsigned long 		flags;
 	struct completion 	*wait;
-	bio_end_io_t		*end_io;
 };
 
 static void bio_batch_end_io(struct bio *bio, int err)
@@ -122,17 +121,14 @@
 		else
 			clear_bit(BIO_UPTODATE, &bb->flags);
 	}
-	if (bb) {
-		if (bb->end_io)
-			bb->end_io(bio, err);
-		atomic_inc(&bb->done);
-		complete(bb->wait);
-	}
+	if (bb)
+		if (atomic_dec_and_test(&bb->done))
+			complete(bb->wait);
 	bio_put(bio);
 }
 
 /**
- * blkdev_issue_zeroout generate number of zero filed write bios
+ * blkdev_issue_zeroout - generate number of zero filed write bios
  * @bdev:	blockdev to issue
  * @sector:	start sector
  * @nr_sects:	number of sectors to write
@@ -150,13 +146,12 @@
 	int ret;
 	struct bio *bio;
 	struct bio_batch bb;
-	unsigned int sz, issued = 0;
+	unsigned int sz;
 	DECLARE_COMPLETION_ONSTACK(wait);
 
-	atomic_set(&bb.done, 0);
+	atomic_set(&bb.done, 1);
 	bb.flags = 1 << BIO_UPTODATE;
 	bb.wait = &wait;
-	bb.end_io = NULL;
 
 submit:
 	ret = 0;
@@ -185,12 +180,12 @@
 				break;
 		}
 		ret = 0;
-		issued++;
+		atomic_inc(&bb.done);
 		submit_bio(WRITE, bio);
 	}
 
 	/* Wait for bios in-flight */
-	while (issued != atomic_read(&bb.done))
+	if (!atomic_dec_and_test(&bb.done))
 		wait_for_completion(&wait);
 
 	if (!test_bit(BIO_UPTODATE, &bb.flags))
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 74bc4a7..ea85e20 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -351,11 +351,12 @@
 		int cpu;
 
 		cpu = part_stat_lock();
-		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
+		part = req->part;
 
 		part_round_stats(cpu, part);
 		part_dec_in_flight(part, rq_data_dir(req));
 
+		hd_struct_put(part);
 		part_stat_unlock();
 	}
 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 381b09b..e36cc10 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -20,6 +20,11 @@
 /* Throttling is performed over 100ms slice and after that slice is renewed */
 static unsigned long throtl_slice = HZ/10;	/* 100 ms */
 
+/* A workqueue to queue throttle related work */
+static struct workqueue_struct *kthrotld_workqueue;
+static void throtl_schedule_delayed_work(struct throtl_data *td,
+				unsigned long delay);
+
 struct throtl_rb_root {
 	struct rb_root rb;
 	struct rb_node *left;
@@ -168,7 +173,15 @@
 	 * tree of blkg (instead of traversing through hash list all
 	 * the time.
 	 */
-	tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+
+	/*
+	 * This is the common case when there are no blkio cgroups.
+ 	 * Avoid lookup in this case
+ 	 */
+	if (blkcg == &blkio_root_cgroup)
+		tg = &td->root_tg;
+	else
+		tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
 
 	/* Fill in device details for root group */
 	if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
@@ -337,10 +350,9 @@
 	update_min_dispatch_time(st);
 
 	if (time_before_eq(st->min_disptime, jiffies))
-		throtl_schedule_delayed_work(td->queue, 0);
+		throtl_schedule_delayed_work(td, 0);
 	else
-		throtl_schedule_delayed_work(td->queue,
-				(st->min_disptime - jiffies));
+		throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
 }
 
 static inline void
@@ -807,10 +819,10 @@
 }
 
 /* Call with queue lock held */
-void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
+static void
+throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 {
 
-	struct throtl_data *td = q->td;
 	struct delayed_work *dwork = &td->throtl_work;
 
 	if (total_nr_queued(td) > 0) {
@@ -819,12 +831,11 @@
 		 * Cancel that and schedule a new one.
 		 */
 		__cancel_delayed_work(dwork);
-		kblockd_schedule_delayed_work(q, dwork, delay);
+		queue_delayed_work(kthrotld_workqueue, dwork, delay);
 		throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
 				delay, jiffies);
 	}
 }
-EXPORT_SYMBOL(throtl_schedule_delayed_work);
 
 static void
 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -912,7 +923,7 @@
 	smp_mb__after_atomic_inc();
 
 	/* Schedule a work now to process the limit change */
-	throtl_schedule_delayed_work(td->queue, 0);
+	throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_write_bps(void *key,
@@ -926,7 +937,7 @@
 	smp_mb__before_atomic_inc();
 	atomic_inc(&td->limits_changed);
 	smp_mb__after_atomic_inc();
-	throtl_schedule_delayed_work(td->queue, 0);
+	throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_read_iops(void *key,
@@ -940,7 +951,7 @@
 	smp_mb__before_atomic_inc();
 	atomic_inc(&td->limits_changed);
 	smp_mb__after_atomic_inc();
-	throtl_schedule_delayed_work(td->queue, 0);
+	throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_write_iops(void *key,
@@ -954,7 +965,7 @@
 	smp_mb__before_atomic_inc();
 	atomic_inc(&td->limits_changed);
 	smp_mb__after_atomic_inc();
-	throtl_schedule_delayed_work(td->queue, 0);
+	throtl_schedule_delayed_work(td, 0);
 }
 
 void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1127,6 +1138,10 @@
 
 static int __init throtl_init(void)
 {
+	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
+	if (!kthrotld_workqueue)
+		panic("Failed to create kthrotld\n");
+
 	blkio_policy_register(&blkio_policy_throtl);
 	return 0;
 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4cd59b0..ea83a4f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -87,7 +87,6 @@
 	unsigned count;
 	unsigned total_weight;
 	u64 min_vdisktime;
-	struct rb_node *active;
 };
 #define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
 			.count = 0, .min_vdisktime = 0, }
@@ -97,7 +96,7 @@
  */
 struct cfq_queue {
 	/* reference count */
-	atomic_t ref;
+	int ref;
 	/* various state flags, see below */
 	unsigned int flags;
 	/* parent cfq_data */
@@ -180,7 +179,6 @@
 	/* group service_tree key */
 	u64 vdisktime;
 	unsigned int weight;
-	bool on_st;
 
 	/* number of cfqq currently on this group */
 	int nr_cfqq;
@@ -209,7 +207,7 @@
 	struct blkio_group blkg;
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 	struct hlist_node cfqd_node;
-	atomic_t ref;
+	int ref;
 #endif
 	/* number of requests that are on the dispatch list or inside driver */
 	int dispatched;
@@ -563,11 +561,6 @@
 	u64 vdisktime = st->min_vdisktime;
 	struct cfq_group *cfqg;
 
-	if (st->active) {
-		cfqg = rb_entry_cfqg(st->active);
-		vdisktime = cfqg->vdisktime;
-	}
-
 	if (st->left) {
 		cfqg = rb_entry_cfqg(st->left);
 		vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
@@ -605,8 +598,8 @@
 	return cfq_target_latency * cfqg->weight / st->total_weight;
 }
 
-static inline void
-cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static inline unsigned
+cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
 	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
 	if (cfqd->cfq_latency) {
@@ -632,6 +625,14 @@
 				    low_slice);
 		}
 	}
+	return slice;
+}
+
+static inline void
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
+
 	cfqq->slice_start = jiffies;
 	cfqq->slice_end = jiffies + slice;
 	cfqq->allocated_slice = slice;
@@ -646,11 +647,11 @@
 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
 {
 	if (cfq_cfqq_slice_new(cfqq))
-		return 0;
+		return false;
 	if (time_before(jiffies, cfqq->slice_end))
-		return 0;
+		return false;
 
-	return 1;
+	return true;
 }
 
 /*
@@ -869,7 +870,7 @@
 	struct rb_node *n;
 
 	cfqg->nr_cfqq++;
-	if (cfqg->on_st)
+	if (!RB_EMPTY_NODE(&cfqg->rb_node))
 		return;
 
 	/*
@@ -885,7 +886,6 @@
 		cfqg->vdisktime = st->min_vdisktime;
 
 	__cfq_group_service_tree_add(st, cfqg);
-	cfqg->on_st = true;
 	st->total_weight += cfqg->weight;
 }
 
@@ -894,9 +894,6 @@
 {
 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
 
-	if (st->active == &cfqg->rb_node)
-		st->active = NULL;
-
 	BUG_ON(cfqg->nr_cfqq < 1);
 	cfqg->nr_cfqq--;
 
@@ -905,7 +902,6 @@
 		return;
 
 	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
-	cfqg->on_st = false;
 	st->total_weight -= cfqg->weight;
 	if (!RB_EMPTY_NODE(&cfqg->rb_node))
 		cfq_rb_erase(&cfqg->rb_node, st);
@@ -1026,11 +1022,11 @@
 	 * elevator which will be dropped by either elevator exit
 	 * or cgroup deletion path depending on who is exiting first.
 	 */
-	atomic_set(&cfqg->ref, 1);
+	cfqg->ref = 1;
 
 	/*
 	 * Add group onto cgroup list. It might happen that bdi->dev is
-	 * not initiliazed yet. Initialize this new group without major
+	 * not initialized yet. Initialize this new group without major
 	 * and minor info and this info will be filled in once a new thread
 	 * comes for IO. See code above.
 	 */
@@ -1071,7 +1067,7 @@
 
 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
 {
-	atomic_inc(&cfqg->ref);
+	cfqg->ref++;
 	return cfqg;
 }
 
@@ -1083,7 +1079,7 @@
 
 	cfqq->cfqg = cfqg;
 	/* cfqq reference on cfqg */
-	atomic_inc(&cfqq->cfqg->ref);
+	cfqq->cfqg->ref++;
 }
 
 static void cfq_put_cfqg(struct cfq_group *cfqg)
@@ -1091,11 +1087,12 @@
 	struct cfq_rb_root *st;
 	int i, j;
 
-	BUG_ON(atomic_read(&cfqg->ref) <= 0);
-	if (!atomic_dec_and_test(&cfqg->ref))
+	BUG_ON(cfqg->ref <= 0);
+	cfqg->ref--;
+	if (cfqg->ref)
 		return;
 	for_each_cfqg_st(cfqg, i, j, st)
-		BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
+		BUG_ON(!RB_EMPTY_ROOT(&st->rb));
 	kfree(cfqg);
 }
 
@@ -1200,7 +1197,7 @@
 			cfq_group_service_tree_del(cfqd, cfqq->cfqg);
 		cfqq->orig_cfqg = cfqq->cfqg;
 		cfqq->cfqg = &cfqd->root_group;
-		atomic_inc(&cfqd->root_group.ref);
+		cfqd->root_group.ref++;
 		group_changed = 1;
 	} else if (!cfqd->cfq_group_isolation
 		   && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
@@ -1672,8 +1669,11 @@
 	/*
 	 * store what was left of this slice, if the queue idled/timed out
 	 */
-	if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
-		cfqq->slice_resid = cfqq->slice_end - jiffies;
+	if (timed_out) {
+		if (cfq_cfqq_slice_new(cfqq))
+			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
+		else
+			cfqq->slice_resid = cfqq->slice_end - jiffies;
 		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
 	}
 
@@ -1687,9 +1687,6 @@
 	if (cfqq == cfqd->active_queue)
 		cfqd->active_queue = NULL;
 
-	if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
-		cfqd->grp_service_tree.active = NULL;
-
 	if (cfqd->active_cic) {
 		put_io_context(cfqd->active_cic->ioc);
 		cfqd->active_cic = NULL;
@@ -1901,10 +1898,10 @@
 	 * in their service tree.
 	 */
 	if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
-		return 1;
+		return true;
 	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
 			service_tree->count);
-	return 0;
+	return false;
 }
 
 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -2040,7 +2037,7 @@
 	int process_refs, io_refs;
 
 	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
-	process_refs = atomic_read(&cfqq->ref) - io_refs;
+	process_refs = cfqq->ref - io_refs;
 	BUG_ON(process_refs < 0);
 	return process_refs;
 }
@@ -2080,10 +2077,10 @@
 	 */
 	if (new_process_refs >= process_refs) {
 		cfqq->new_cfqq = new_cfqq;
-		atomic_add(process_refs, &new_cfqq->ref);
+		new_cfqq->ref += process_refs;
 	} else {
 		new_cfqq->new_cfqq = cfqq;
-		atomic_add(new_process_refs, &cfqq->ref);
+		cfqq->ref += new_process_refs;
 	}
 }
 
@@ -2116,12 +2113,7 @@
 	unsigned count;
 	struct cfq_rb_root *st;
 	unsigned group_slice;
-
-	if (!cfqg) {
-		cfqd->serving_prio = IDLE_WORKLOAD;
-		cfqd->workload_expires = jiffies + 1;
-		return;
-	}
+	enum wl_prio_t original_prio = cfqd->serving_prio;
 
 	/* Choose next priority. RT > BE > IDLE */
 	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
@@ -2134,6 +2126,9 @@
 		return;
 	}
 
+	if (original_prio != cfqd->serving_prio)
+		goto new_workload;
+
 	/*
 	 * For RT and BE, we have to choose also the type
 	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
@@ -2148,6 +2143,7 @@
 	if (count && !time_after(jiffies, cfqd->workload_expires))
 		return;
 
+new_workload:
 	/* otherwise select new workload type */
 	cfqd->serving_type =
 		cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
@@ -2199,7 +2195,6 @@
 	if (RB_EMPTY_ROOT(&st->rb))
 		return NULL;
 	cfqg = cfq_rb_first_group(st);
-	st->active = &cfqg->rb_node;
 	update_min_vdisktime(st);
 	return cfqg;
 }
@@ -2293,6 +2288,17 @@
 		goto keep_queue;
 	}
 
+	/*
+	 * This is a deep seek queue, but the device is much faster than
+	 * the queue can deliver, don't idle
+	 **/
+	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
+	    (cfq_cfqq_slice_new(cfqq) ||
+	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
+		cfq_clear_cfqq_deep(cfqq);
+		cfq_clear_cfqq_idle_window(cfqq);
+	}
+
 	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
 		cfqq = NULL;
 		goto keep_queue;
@@ -2367,12 +2373,12 @@
 {
 	/* the queue hasn't finished any request, can't estimate */
 	if (cfq_cfqq_slice_new(cfqq))
-		return 1;
+		return true;
 	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
 		cfqq->slice_end))
-		return 1;
+		return true;
 
-	return 0;
+	return false;
 }
 
 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
@@ -2538,9 +2544,10 @@
 	struct cfq_data *cfqd = cfqq->cfqd;
 	struct cfq_group *cfqg, *orig_cfqg;
 
-	BUG_ON(atomic_read(&cfqq->ref) <= 0);
+	BUG_ON(cfqq->ref <= 0);
 
-	if (!atomic_dec_and_test(&cfqq->ref))
+	cfqq->ref--;
+	if (cfqq->ref)
 		return;
 
 	cfq_log_cfqq(cfqd, cfqq, "put_queue");
@@ -2843,7 +2850,7 @@
 	RB_CLEAR_NODE(&cfqq->p_node);
 	INIT_LIST_HEAD(&cfqq->fifo);
 
-	atomic_set(&cfqq->ref, 0);
+	cfqq->ref = 0;
 	cfqq->cfqd = cfqd;
 
 	cfq_mark_cfqq_prio_changed(cfqq);
@@ -2979,11 +2986,11 @@
 	 * pin the queue now that it's allocated, scheduler exit will prune it
 	 */
 	if (!is_sync && !(*async_cfqq)) {
-		atomic_inc(&cfqq->ref);
+		cfqq->ref++;
 		*async_cfqq = cfqq;
 	}
 
-	atomic_inc(&cfqq->ref);
+	cfqq->ref++;
 	return cfqq;
 }
 
@@ -3265,6 +3272,10 @@
 	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
 		return true;
 
+	/* An idle queue should not be idle now for some reason */
+	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
+		return true;
+
 	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
 		return false;
 
@@ -3284,10 +3295,19 @@
  */
 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
+	struct cfq_queue *old_cfqq = cfqd->active_queue;
+
 	cfq_log_cfqq(cfqd, cfqq, "preempt");
 	cfq_slice_expired(cfqd, 1);
 
 	/*
+	 * workload type is changed, don't save slice, otherwise preempt
+	 * doesn't happen
+	 */
+	if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
+		cfqq->cfqg->saved_workload_slice = 0;
+
+	/*
 	 * Put the new queue at the front of the of the current list,
 	 * so we know that it will be selected next.
 	 */
@@ -3335,7 +3355,7 @@
 			    cfqd->busy_queues > 1) {
 				cfq_del_timer(cfqd, cfqq);
 				cfq_clear_cfqq_wait_request(cfqq);
-				__blk_run_queue(cfqd->queue);
+				__blk_run_queue(cfqd->queue, false);
 			} else {
 				cfq_blkiocg_update_idle_time_stats(
 						&cfqq->cfqg->blkg);
@@ -3350,7 +3370,7 @@
 		 * this new queue is RT and the current one is BE
 		 */
 		cfq_preempt_queue(cfqd, cfqq);
-		__blk_run_queue(cfqd->queue);
+		__blk_run_queue(cfqd->queue, false);
 	}
 }
 
@@ -3412,6 +3432,10 @@
 {
 	struct cfq_io_context *cic = cfqd->active_cic;
 
+	/* If the queue already has requests, don't wait */
+	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
+		return false;
+
 	/* If there are other queues in the group, don't wait */
 	if (cfqq->cfqg->nr_cfqq > 1)
 		return false;
@@ -3681,13 +3705,13 @@
 	}
 
 	cfqq->allocated[rw]++;
-	atomic_inc(&cfqq->ref);
-
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
+	cfqq->ref++;
 	rq->elevator_private = cic;
 	rq->elevator_private2 = cfqq;
 	rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
+
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
 	return 0;
 
 queue_fail:
@@ -3707,7 +3731,7 @@
 	struct request_queue *q = cfqd->queue;
 
 	spin_lock_irq(q->queue_lock);
-	__blk_run_queue(cfqd->queue);
+	__blk_run_queue(cfqd->queue, false);
 	spin_unlock_irq(q->queue_lock);
 }
 
@@ -3862,6 +3886,10 @@
 	if (!cfqd)
 		return NULL;
 
+	/*
+	 * Don't need take queue_lock in the routine, since we are
+	 * initializing the ioscheduler, and nobody is using cfqd
+	 */
 	cfqd->cic_index = i;
 
 	/* Init root service tree */
@@ -3881,7 +3909,7 @@
 	 * Take a reference to root group which we never drop. This is just
 	 * to make sure that cfq_put_cfqg() does not try to kfree root group
 	 */
-	atomic_set(&cfqg->ref, 1);
+	cfqg->ref = 1;
 	rcu_read_lock();
 	cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
 					(void *)cfqd, 0);
@@ -3901,7 +3929,7 @@
 	 * will not attempt to free it.
 	 */
 	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
-	atomic_inc(&cfqd->oom_cfqq.ref);
+	cfqd->oom_cfqq.ref++;
 	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
 
 	INIT_LIST_HEAD(&cfqd->cic_list);
diff --git a/block/elevator.c b/block/elevator.c
index 2569512..236e93c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -602,7 +602,7 @@
 	 */
 	elv_drain_elevator(q);
 	while (q->rq.elvpriv) {
-		__blk_run_queue(q);
+		__blk_run_queue(q, false);
 		spin_unlock_irq(q->queue_lock);
 		msleep(10);
 		spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@
 		 *   with anything.  There's no point in delaying queue
 		 *   processing.
 		 */
-		__blk_run_queue(q);
+		__blk_run_queue(q, false);
 		break;
 
 	case ELEVATOR_INSERT_SORT:
diff --git a/block/genhd.c b/block/genhd.c
index 5fa2b44..cbf1112 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -18,6 +18,7 @@
 #include <linux/buffer_head.h>
 #include <linux/mutex.h>
 #include <linux/idr.h>
+#include <linux/log2.h>
 
 #include "blk.h"
 
@@ -35,6 +36,10 @@
 
 static struct device_type disk_type;
 
+static void disk_add_events(struct gendisk *disk);
+static void disk_del_events(struct gendisk *disk);
+static void disk_release_events(struct gendisk *disk);
+
 /**
  * disk_get_part - get partition
  * @disk: disk to look partition from
@@ -239,7 +244,7 @@
 } *major_names[BLKDEV_MAJOR_HASH_SIZE];
 
 /* index in the above - for now: assume no multimajor ranges */
-static inline int major_to_index(int major)
+static inline int major_to_index(unsigned major)
 {
 	return major % BLKDEV_MAJOR_HASH_SIZE;
 }
@@ -502,6 +507,64 @@
 	return 0;
 }
 
+void register_disk(struct gendisk *disk)
+{
+	struct device *ddev = disk_to_dev(disk);
+	struct block_device *bdev;
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+	int err;
+
+	ddev->parent = disk->driverfs_dev;
+
+	dev_set_name(ddev, disk->disk_name);
+
+	/* delay uevents, until we scanned partition table */
+	dev_set_uevent_suppress(ddev, 1);
+
+	if (device_add(ddev))
+		return;
+	if (!sysfs_deprecated) {
+		err = sysfs_create_link(block_depr, &ddev->kobj,
+					kobject_name(&ddev->kobj));
+		if (err) {
+			device_del(ddev);
+			return;
+		}
+	}
+	disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
+	disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
+
+	/* No minors to use for partitions */
+	if (!disk_partitionable(disk))
+		goto exit;
+
+	/* No such device (e.g., media were just removed) */
+	if (!get_capacity(disk))
+		goto exit;
+
+	bdev = bdget_disk(disk, 0);
+	if (!bdev)
+		goto exit;
+
+	bdev->bd_invalidated = 1;
+	err = blkdev_get(bdev, FMODE_READ, NULL);
+	if (err < 0)
+		goto exit;
+	blkdev_put(bdev, FMODE_READ);
+
+exit:
+	/* announce disk after possible partitions are created */
+	dev_set_uevent_suppress(ddev, 0);
+	kobject_uevent(&ddev->kobj, KOBJ_ADD);
+
+	/* announce possible partitions */
+	disk_part_iter_init(&piter, disk, 0);
+	while ((part = disk_part_iter_next(&piter)))
+		kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
+	disk_part_iter_exit(&piter);
+}
+
 /**
  * add_disk - add partitioning information to kernel list
  * @disk: per-device partitioning information
@@ -551,18 +614,48 @@
 	retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
 				   "bdi");
 	WARN_ON(retval);
+
+	disk_add_events(disk);
 }
-
 EXPORT_SYMBOL(add_disk);
-EXPORT_SYMBOL(del_gendisk);	/* in partitions/check.c */
 
-void unlink_gendisk(struct gendisk *disk)
+void del_gendisk(struct gendisk *disk)
 {
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+
+	disk_del_events(disk);
+
+	/* invalidate stuff */
+	disk_part_iter_init(&piter, disk,
+			     DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
+	while ((part = disk_part_iter_next(&piter))) {
+		invalidate_partition(disk, part->partno);
+		delete_partition(disk, part->partno);
+	}
+	disk_part_iter_exit(&piter);
+
+	invalidate_partition(disk, 0);
+	blk_free_devt(disk_to_dev(disk)->devt);
+	set_capacity(disk, 0);
+	disk->flags &= ~GENHD_FL_UP;
+
 	sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
 	bdi_unregister(&disk->queue->backing_dev_info);
 	blk_unregister_queue(disk);
 	blk_unregister_region(disk_devt(disk), disk->minors);
+
+	part_stat_set_all(&disk->part0, 0);
+	disk->part0.stamp = 0;
+
+	kobject_put(disk->part0.holder_dir);
+	kobject_put(disk->slave_dir);
+	disk->driverfs_dev = NULL;
+	if (!sysfs_deprecated)
+		sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+	device_del(disk_to_dev(disk));
 }
+EXPORT_SYMBOL(del_gendisk);
 
 /**
  * get_gendisk - get partitioning information for a given device
@@ -735,7 +828,7 @@
 	static void *p;
 
 	p = disk_seqf_start(seqf, pos);
-	if (!IS_ERR(p) && p && !*pos)
+	if (!IS_ERR_OR_NULL(p) && !*pos)
 		seq_puts(seqf, "major minor  #blocks  name\n\n");
 	return p;
 }
@@ -1005,6 +1098,7 @@
 {
 	struct gendisk *disk = dev_to_disk(dev);
 
+	disk_release_events(disk);
 	kfree(disk->random);
 	disk_replace_part_tbl(disk, NULL);
 	free_part_stats(&disk->part0);
@@ -1110,29 +1204,6 @@
 module_init(proc_genhd_init);
 #endif /* CONFIG_PROC_FS */
 
-static void media_change_notify_thread(struct work_struct *work)
-{
-	struct gendisk *gd = container_of(work, struct gendisk, async_notify);
-	char event[] = "MEDIA_CHANGE=1";
-	char *envp[] = { event, NULL };
-
-	/*
-	 * set enviroment vars to indicate which event this is for
-	 * so that user space will know to go check the media status.
-	 */
-	kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
-	put_device(gd->driverfs_dev);
-}
-
-#if 0
-void genhd_media_change_notify(struct gendisk *disk)
-{
-	get_device(disk->driverfs_dev);
-	schedule_work(&disk->async_notify);
-}
-EXPORT_SYMBOL_GPL(genhd_media_change_notify);
-#endif  /*  0  */
-
 dev_t blk_lookup_devt(const char *name, int partno)
 {
 	dev_t devt = MKDEV(0, 0);
@@ -1193,13 +1264,13 @@
 		}
 		disk->part_tbl->part[0] = &disk->part0;
 
+		hd_ref_init(&disk->part0);
+
 		disk->minors = minors;
 		rand_initialize_disk(disk);
 		disk_to_dev(disk)->class = &block_class;
 		disk_to_dev(disk)->type = &disk_type;
 		device_initialize(disk_to_dev(disk));
-		INIT_WORK(&disk->async_notify,
-			media_change_notify_thread);
 	}
 	return disk;
 }
@@ -1284,10 +1355,429 @@
 	struct block_device *bdev = bdget_disk(disk, partno);
 	if (bdev) {
 		fsync_bdev(bdev);
-		res = __invalidate_device(bdev);
+		res = __invalidate_device(bdev, true);
 		bdput(bdev);
 	}
 	return res;
 }
 
 EXPORT_SYMBOL(invalidate_partition);
+
+/*
+ * Disk events - monitor disk events like media change and eject request.
+ */
+struct disk_events {
+	struct list_head	node;		/* all disk_event's */
+	struct gendisk		*disk;		/* the associated disk */
+	spinlock_t		lock;
+
+	int			block;		/* event blocking depth */
+	unsigned int		pending;	/* events already sent out */
+	unsigned int		clearing;	/* events being cleared */
+
+	long			poll_msecs;	/* interval, -1 for default */
+	struct delayed_work	dwork;
+};
+
+static const char *disk_events_strs[] = {
+	[ilog2(DISK_EVENT_MEDIA_CHANGE)]	= "media_change",
+	[ilog2(DISK_EVENT_EJECT_REQUEST)]	= "eject_request",
+};
+
+static char *disk_uevents[] = {
+	[ilog2(DISK_EVENT_MEDIA_CHANGE)]	= "DISK_MEDIA_CHANGE=1",
+	[ilog2(DISK_EVENT_EJECT_REQUEST)]	= "DISK_EJECT_REQUEST=1",
+};
+
+/* list of all disk_events */
+static DEFINE_MUTEX(disk_events_mutex);
+static LIST_HEAD(disk_events);
+
+/* disable in-kernel polling by default */
+static unsigned long disk_events_dfl_poll_msecs	= 0;
+
+static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
+{
+	struct disk_events *ev = disk->ev;
+	long intv_msecs = 0;
+
+	/*
+	 * If device-specific poll interval is set, always use it.  If
+	 * the default is being used, poll iff there are events which
+	 * can't be monitored asynchronously.
+	 */
+	if (ev->poll_msecs >= 0)
+		intv_msecs = ev->poll_msecs;
+	else if (disk->events & ~disk->async_events)
+		intv_msecs = disk_events_dfl_poll_msecs;
+
+	return msecs_to_jiffies(intv_msecs);
+}
+
+static void __disk_block_events(struct gendisk *disk, bool sync)
+{
+	struct disk_events *ev = disk->ev;
+	unsigned long flags;
+	bool cancel;
+
+	spin_lock_irqsave(&ev->lock, flags);
+	cancel = !ev->block++;
+	spin_unlock_irqrestore(&ev->lock, flags);
+
+	if (cancel) {
+		if (sync)
+			cancel_delayed_work_sync(&disk->ev->dwork);
+		else
+			cancel_delayed_work(&disk->ev->dwork);
+	}
+}
+
+static void __disk_unblock_events(struct gendisk *disk, bool check_now)
+{
+	struct disk_events *ev = disk->ev;
+	unsigned long intv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ev->lock, flags);
+
+	if (WARN_ON_ONCE(ev->block <= 0))
+		goto out_unlock;
+
+	if (--ev->block)
+		goto out_unlock;
+
+	/*
+	 * Not exactly a latency critical operation, set poll timer
+	 * slack to 25% and kick event check.
+	 */
+	intv = disk_events_poll_jiffies(disk);
+	set_timer_slack(&ev->dwork.timer, intv / 4);
+	if (check_now)
+		queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+	else if (intv)
+		queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
+out_unlock:
+	spin_unlock_irqrestore(&ev->lock, flags);
+}
+
+/**
+ * disk_block_events - block and flush disk event checking
+ * @disk: disk to block events for
+ *
+ * On return from this function, it is guaranteed that event checking
+ * isn't in progress and won't happen until unblocked by
+ * disk_unblock_events().  Events blocking is counted and the actual
+ * unblocking happens after the matching number of unblocks are done.
+ *
+ * Note that this intentionally does not block event checking from
+ * disk_clear_events().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void disk_block_events(struct gendisk *disk)
+{
+	if (disk->ev)
+		__disk_block_events(disk, true);
+}
+
+/**
+ * disk_unblock_events - unblock disk event checking
+ * @disk: disk to unblock events for
+ *
+ * Undo disk_block_events().  When the block count reaches zero, it
+ * starts events polling if configured.
+ *
+ * CONTEXT:
+ * Don't care.  Safe to call from irq context.
+ */
+void disk_unblock_events(struct gendisk *disk)
+{
+	if (disk->ev)
+		__disk_unblock_events(disk, true);
+}
+
+/**
+ * disk_check_events - schedule immediate event checking
+ * @disk: disk to check events for
+ *
+ * Schedule immediate event checking on @disk if not blocked.
+ *
+ * CONTEXT:
+ * Don't care.  Safe to call from irq context.
+ */
+void disk_check_events(struct gendisk *disk)
+{
+	if (disk->ev) {
+		__disk_block_events(disk, false);
+		__disk_unblock_events(disk, true);
+	}
+}
+EXPORT_SYMBOL_GPL(disk_check_events);
+
+/**
+ * disk_clear_events - synchronously check, clear and return pending events
+ * @disk: disk to fetch and clear events from
+ * @mask: mask of events to be fetched and clearted
+ *
+ * Disk events are synchronously checked and pending events in @mask
+ * are cleared and returned.  This ignores the block count.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
+{
+	const struct block_device_operations *bdops = disk->fops;
+	struct disk_events *ev = disk->ev;
+	unsigned int pending;
+
+	if (!ev) {
+		/* for drivers still using the old ->media_changed method */
+		if ((mask & DISK_EVENT_MEDIA_CHANGE) &&
+		    bdops->media_changed && bdops->media_changed(disk))
+			return DISK_EVENT_MEDIA_CHANGE;
+		return 0;
+	}
+
+	/* tell the workfn about the events being cleared */
+	spin_lock_irq(&ev->lock);
+	ev->clearing |= mask;
+	spin_unlock_irq(&ev->lock);
+
+	/* uncondtionally schedule event check and wait for it to finish */
+	__disk_block_events(disk, true);
+	queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+	flush_delayed_work(&ev->dwork);
+	__disk_unblock_events(disk, false);
+
+	/* then, fetch and clear pending events */
+	spin_lock_irq(&ev->lock);
+	WARN_ON_ONCE(ev->clearing & mask);	/* cleared by workfn */
+	pending = ev->pending & mask;
+	ev->pending &= ~mask;
+	spin_unlock_irq(&ev->lock);
+
+	return pending;
+}
+
+static void disk_events_workfn(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
+	struct gendisk *disk = ev->disk;
+	char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
+	unsigned int clearing = ev->clearing;
+	unsigned int events;
+	unsigned long intv;
+	int nr_events = 0, i;
+
+	/* check events */
+	events = disk->fops->check_events(disk, clearing);
+
+	/* accumulate pending events and schedule next poll if necessary */
+	spin_lock_irq(&ev->lock);
+
+	events &= ~ev->pending;
+	ev->pending |= events;
+	ev->clearing &= ~clearing;
+
+	intv = disk_events_poll_jiffies(disk);
+	if (!ev->block && intv)
+		queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
+
+	spin_unlock_irq(&ev->lock);
+
+	/* tell userland about new events */
+	for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
+		if (events & (1 << i))
+			envp[nr_events++] = disk_uevents[i];
+
+	if (nr_events)
+		kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
+}
+
+/*
+ * A disk events enabled device has the following sysfs nodes under
+ * its /sys/block/X/ directory.
+ *
+ * events		: list of all supported events
+ * events_async		: list of events which can be detected w/o polling
+ * events_poll_msecs	: polling interval, 0: disable, -1: system default
+ */
+static ssize_t __disk_events_show(unsigned int events, char *buf)
+{
+	const char *delim = "";
+	ssize_t pos = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
+		if (events & (1 << i)) {
+			pos += sprintf(buf + pos, "%s%s",
+				       delim, disk_events_strs[i]);
+			delim = " ";
+		}
+	if (pos)
+		pos += sprintf(buf + pos, "\n");
+	return pos;
+}
+
+static ssize_t disk_events_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+
+	return __disk_events_show(disk->events, buf);
+}
+
+static ssize_t disk_events_async_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+
+	return __disk_events_show(disk->async_events, buf);
+}
+
+static ssize_t disk_events_poll_msecs_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+
+	return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
+}
+
+static ssize_t disk_events_poll_msecs_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+	long intv;
+
+	if (!count || !sscanf(buf, "%ld", &intv))
+		return -EINVAL;
+
+	if (intv < 0 && intv != -1)
+		return -EINVAL;
+
+	__disk_block_events(disk, true);
+	disk->ev->poll_msecs = intv;
+	__disk_unblock_events(disk, true);
+
+	return count;
+}
+
+static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL);
+static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL);
+static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR,
+			 disk_events_poll_msecs_show,
+			 disk_events_poll_msecs_store);
+
+static const struct attribute *disk_events_attrs[] = {
+	&dev_attr_events.attr,
+	&dev_attr_events_async.attr,
+	&dev_attr_events_poll_msecs.attr,
+	NULL,
+};
+
+/*
+ * The default polling interval can be specified by the kernel
+ * parameter block.events_dfl_poll_msecs which defaults to 0
+ * (disable).  This can also be modified runtime by writing to
+ * /sys/module/block/events_dfl_poll_msecs.
+ */
+static int disk_events_set_dfl_poll_msecs(const char *val,
+					  const struct kernel_param *kp)
+{
+	struct disk_events *ev;
+	int ret;
+
+	ret = param_set_ulong(val, kp);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&disk_events_mutex);
+
+	list_for_each_entry(ev, &disk_events, node)
+		disk_check_events(ev->disk);
+
+	mutex_unlock(&disk_events_mutex);
+
+	return 0;
+}
+
+static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
+	.set	= disk_events_set_dfl_poll_msecs,
+	.get	= param_get_ulong,
+};
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX	"block."
+
+module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
+		&disk_events_dfl_poll_msecs, 0644);
+
+/*
+ * disk_{add|del|release}_events - initialize and destroy disk_events.
+ */
+static void disk_add_events(struct gendisk *disk)
+{
+	struct disk_events *ev;
+
+	if (!disk->fops->check_events || !(disk->events | disk->async_events))
+		return;
+
+	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+	if (!ev) {
+		pr_warn("%s: failed to initialize events\n", disk->disk_name);
+		return;
+	}
+
+	if (sysfs_create_files(&disk_to_dev(disk)->kobj,
+			       disk_events_attrs) < 0) {
+		pr_warn("%s: failed to create sysfs files for events\n",
+			disk->disk_name);
+		kfree(ev);
+		return;
+	}
+
+	disk->ev = ev;
+
+	INIT_LIST_HEAD(&ev->node);
+	ev->disk = disk;
+	spin_lock_init(&ev->lock);
+	ev->block = 1;
+	ev->poll_msecs = -1;
+	INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
+
+	mutex_lock(&disk_events_mutex);
+	list_add_tail(&ev->node, &disk_events);
+	mutex_unlock(&disk_events_mutex);
+
+	/*
+	 * Block count is initialized to 1 and the following initial
+	 * unblock kicks it into action.
+	 */
+	__disk_unblock_events(disk, true);
+}
+
+static void disk_del_events(struct gendisk *disk)
+{
+	if (!disk->ev)
+		return;
+
+	__disk_block_events(disk, true);
+
+	mutex_lock(&disk_events_mutex);
+	list_del_init(&disk->ev->node);
+	mutex_unlock(&disk_events_mutex);
+
+	sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
+}
+
+static void disk_release_events(struct gendisk *disk)
+{
+	/* the block count should be 1 from disk_del_events() */
+	WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
+	kfree(disk->ev);
+}
diff --git a/block/ioctl.c b/block/ioctl.c
index a9a302e..1124cd2 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,11 +294,14 @@
 			return -EINVAL;
 		if (get_user(n, (int __user *) arg))
 			return -EFAULT;
-		if (!(mode & FMODE_EXCL) && bd_claim(bdev, &bdev) < 0)
-			return -EBUSY;
+		if (!(mode & FMODE_EXCL)) {
+			bdgrab(bdev);
+			if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+				return -EBUSY;
+		}
 		ret = set_blocksize(bdev, n);
 		if (!(mode & FMODE_EXCL))
-			bd_release(bdev);
+			blkdev_put(bdev, mode | FMODE_EXCL);
 		return ret;
 	case BLKPG:
 		ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index e4bac29..4b7cb0e 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -110,7 +110,6 @@
 
 config CRYPTO_GF128MUL
 	tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
-	depends on EXPERIMENTAL
 	help
 	  Efficient table driven implementation of multiplications in the
 	  field GF(2^128).  This is needed by some cypher modes. This
@@ -539,8 +538,9 @@
 
 config CRYPTO_AES_NI_INTEL
 	tristate "AES cipher algorithms (AES-NI)"
-	depends on (X86 || UML_X86) && 64BIT
-	select CRYPTO_AES_X86_64
+	depends on (X86 || UML_X86)
+	select CRYPTO_AES_X86_64 if 64BIT
+	select CRYPTO_AES_586 if !64BIT
 	select CRYPTO_CRYPTD
 	select CRYPTO_ALGAPI
 	select CRYPTO_FPU
@@ -563,9 +563,10 @@
 
 	  See <http://csrc.nist.gov/encryption/aes/> for more information.
 
-	  In addition to AES cipher algorithm support, the
-	  acceleration for some popular block cipher mode is supported
-	  too, including ECB, CBC, CTR, LRW, PCBC, XTS.
+	  In addition to AES cipher algorithm support, the acceleration
+	  for some popular block cipher mode is supported too, including
+	  ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
+	  acceleration for CTR.
 
 config CRYPTO_ANUBIS
 	tristate "Anubis cipher algorithm"
@@ -841,6 +842,27 @@
 	  ANSI X9.31 A.2.4. Note that this option must be enabled if
 	  CRYPTO_FIPS is selected
 
+config CRYPTO_USER_API
+	tristate
+
+config CRYPTO_USER_API_HASH
+	tristate "User-space interface for hash algorithms"
+	depends on NET
+	select CRYPTO_HASH
+	select CRYPTO_USER_API
+	help
+	  This option enables the user-spaces interface for hash
+	  algorithms.
+
+config CRYPTO_USER_API_SKCIPHER
+	tristate "User-space interface for symmetric key cipher algorithms"
+	depends on NET
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_USER_API
+	help
+	  This option enables the user-spaces interface for symmetric
+	  key cipher algorithms.
+
 source "drivers/crypto/Kconfig"
 
 endif	# if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index 423b7de..e9a399c 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -3,32 +3,32 @@
 #
 
 obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-objs := api.o cipher.o compress.o
+crypto-y := api.o cipher.o compress.o
 
 obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
 
 obj-$(CONFIG_CRYPTO_FIPS) += fips.o
 
 crypto_algapi-$(CONFIG_PROC_FS) += proc.o
-crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
+crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y)
 obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
 
 obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
 
-crypto_blkcipher-objs := ablkcipher.o
-crypto_blkcipher-objs += blkcipher.o
+crypto_blkcipher-y := ablkcipher.o
+crypto_blkcipher-y += blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
 obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
 
-crypto_hash-objs += ahash.o
-crypto_hash-objs += shash.o
+crypto_hash-y += ahash.o
+crypto_hash-y += shash.o
 obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 
 obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
 
-cryptomgr-objs := algboss.o testmgr.o
+cryptomgr-y := algboss.o testmgr.o
 
 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
@@ -85,6 +85,9 @@
 obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
 obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
+obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
+obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
+obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
 
 #
 # generic algorithms and the async_tx api
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
new file mode 100644
index 0000000..940d70c
--- /dev/null
+++ b/crypto/af_alg.c
@@ -0,0 +1,483 @@
+/*
+ * af_alg: User-space algorithm interface
+ *
+ * This file provides the user-space API for algorithms.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/atomic.h>
+#include <crypto/if_alg.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/rwsem.h>
+
+struct alg_type_list {
+	const struct af_alg_type *type;
+	struct list_head list;
+};
+
+static atomic_long_t alg_memory_allocated;
+
+static struct proto alg_proto = {
+	.name			= "ALG",
+	.owner			= THIS_MODULE,
+	.memory_allocated	= &alg_memory_allocated,
+	.obj_size		= sizeof(struct alg_sock),
+};
+
+static LIST_HEAD(alg_types);
+static DECLARE_RWSEM(alg_types_sem);
+
+static const struct af_alg_type *alg_get_type(const char *name)
+{
+	const struct af_alg_type *type = ERR_PTR(-ENOENT);
+	struct alg_type_list *node;
+
+	down_read(&alg_types_sem);
+	list_for_each_entry(node, &alg_types, list) {
+		if (strcmp(node->type->name, name))
+			continue;
+
+		if (try_module_get(node->type->owner))
+			type = node->type;
+		break;
+	}
+	up_read(&alg_types_sem);
+
+	return type;
+}
+
+int af_alg_register_type(const struct af_alg_type *type)
+{
+	struct alg_type_list *node;
+	int err = -EEXIST;
+
+	down_write(&alg_types_sem);
+	list_for_each_entry(node, &alg_types, list) {
+		if (!strcmp(node->type->name, type->name))
+			goto unlock;
+	}
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	err = -ENOMEM;
+	if (!node)
+		goto unlock;
+
+	type->ops->owner = THIS_MODULE;
+	node->type = type;
+	list_add(&node->list, &alg_types);
+	err = 0;
+
+unlock:
+	up_write(&alg_types_sem);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_register_type);
+
+int af_alg_unregister_type(const struct af_alg_type *type)
+{
+	struct alg_type_list *node;
+	int err = -ENOENT;
+
+	down_write(&alg_types_sem);
+	list_for_each_entry(node, &alg_types, list) {
+		if (strcmp(node->type->name, type->name))
+			continue;
+
+		list_del(&node->list);
+		kfree(node);
+		err = 0;
+		break;
+	}
+	up_write(&alg_types_sem);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_unregister_type);
+
+static void alg_do_release(const struct af_alg_type *type, void *private)
+{
+	if (!type)
+		return;
+
+	type->release(private);
+	module_put(type->owner);
+}
+
+int af_alg_release(struct socket *sock)
+{
+	if (sock->sk)
+		sock_put(sock->sk);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_release);
+
+static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct sockaddr_alg *sa = (void *)uaddr;
+	const struct af_alg_type *type;
+	void *private;
+
+	if (sock->state == SS_CONNECTED)
+		return -EINVAL;
+
+	if (addr_len != sizeof(*sa))
+		return -EINVAL;
+
+	sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
+	sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
+
+	type = alg_get_type(sa->salg_type);
+	if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
+		request_module("algif-%s", sa->salg_type);
+		type = alg_get_type(sa->salg_type);
+	}
+
+	if (IS_ERR(type))
+		return PTR_ERR(type);
+
+	private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
+	if (IS_ERR(private)) {
+		module_put(type->owner);
+		return PTR_ERR(private);
+	}
+
+	lock_sock(sk);
+
+	swap(ask->type, type);
+	swap(ask->private, private);
+
+	release_sock(sk);
+
+	alg_do_release(type, private);
+
+	return 0;
+}
+
+static int alg_setkey(struct sock *sk, char __user *ukey,
+		      unsigned int keylen)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	const struct af_alg_type *type = ask->type;
+	u8 *key;
+	int err;
+
+	key = sock_kmalloc(sk, keylen, GFP_KERNEL);
+	if (!key)
+		return -ENOMEM;
+
+	err = -EFAULT;
+	if (copy_from_user(key, ukey, keylen))
+		goto out;
+
+	err = type->setkey(ask->private, key, keylen);
+
+out:
+	sock_kfree_s(sk, key, keylen);
+
+	return err;
+}
+
+static int alg_setsockopt(struct socket *sock, int level, int optname,
+			  char __user *optval, unsigned int optlen)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	const struct af_alg_type *type;
+	int err = -ENOPROTOOPT;
+
+	lock_sock(sk);
+	type = ask->type;
+
+	if (level != SOL_ALG || !type)
+		goto unlock;
+
+	switch (optname) {
+	case ALG_SET_KEY:
+		if (sock->state == SS_CONNECTED)
+			goto unlock;
+		if (!type->setkey)
+			goto unlock;
+
+		err = alg_setkey(sk, optval, optlen);
+	}
+
+unlock:
+	release_sock(sk);
+
+	return err;
+}
+
+int af_alg_accept(struct sock *sk, struct socket *newsock)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	const struct af_alg_type *type;
+	struct sock *sk2;
+	int err;
+
+	lock_sock(sk);
+	type = ask->type;
+
+	err = -EINVAL;
+	if (!type)
+		goto unlock;
+
+	sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto);
+	err = -ENOMEM;
+	if (!sk2)
+		goto unlock;
+
+	sock_init_data(newsock, sk2);
+	sock_graft(sk2, newsock);
+
+	err = type->accept(ask->private, sk2);
+	if (err) {
+		sk_free(sk2);
+		goto unlock;
+	}
+
+	sk2->sk_family = PF_ALG;
+
+	sock_hold(sk);
+	alg_sk(sk2)->parent = sk;
+	alg_sk(sk2)->type = type;
+
+	newsock->ops = type->ops;
+	newsock->state = SS_CONNECTED;
+
+	err = 0;
+
+unlock:
+	release_sock(sk);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_accept);
+
+static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+	return af_alg_accept(sock->sk, newsock);
+}
+
+static const struct proto_ops alg_proto_ops = {
+	.family		=	PF_ALG,
+	.owner		=	THIS_MODULE,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.sendpage	=	sock_no_sendpage,
+	.sendmsg	=	sock_no_sendmsg,
+	.recvmsg	=	sock_no_recvmsg,
+	.poll		=	sock_no_poll,
+
+	.bind		=	alg_bind,
+	.release	=	af_alg_release,
+	.setsockopt	=	alg_setsockopt,
+	.accept		=	alg_accept,
+};
+
+static void alg_sock_destruct(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+
+	alg_do_release(ask->type, ask->private);
+}
+
+static int alg_create(struct net *net, struct socket *sock, int protocol,
+		      int kern)
+{
+	struct sock *sk;
+	int err;
+
+	if (sock->type != SOCK_SEQPACKET)
+		return -ESOCKTNOSUPPORT;
+	if (protocol != 0)
+		return -EPROTONOSUPPORT;
+
+	err = -ENOMEM;
+	sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto);
+	if (!sk)
+		goto out;
+
+	sock->ops = &alg_proto_ops;
+	sock_init_data(sock, sk);
+
+	sk->sk_family = PF_ALG;
+	sk->sk_destruct = alg_sock_destruct;
+
+	return 0;
+out:
+	return err;
+}
+
+static const struct net_proto_family alg_family = {
+	.family	=	PF_ALG,
+	.create	=	alg_create,
+	.owner	=	THIS_MODULE,
+};
+
+int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
+		   int write)
+{
+	unsigned long from = (unsigned long)addr;
+	unsigned long npages;
+	unsigned off;
+	int err;
+	int i;
+
+	err = -EFAULT;
+	if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
+		goto out;
+
+	off = from & ~PAGE_MASK;
+	npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	if (npages > ALG_MAX_PAGES)
+		npages = ALG_MAX_PAGES;
+
+	err = get_user_pages_fast(from, npages, write, sgl->pages);
+	if (err < 0)
+		goto out;
+
+	npages = err;
+	err = -EINVAL;
+	if (WARN_ON(npages == 0))
+		goto out;
+
+	err = 0;
+
+	sg_init_table(sgl->sg, npages);
+
+	for (i = 0; i < npages; i++) {
+		int plen = min_t(int, len, PAGE_SIZE - off);
+
+		sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
+
+		off = 0;
+		len -= plen;
+		err += plen;
+	}
+
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_make_sg);
+
+void af_alg_free_sg(struct af_alg_sgl *sgl)
+{
+	int i;
+
+	i = 0;
+	do {
+		put_page(sgl->pages[i]);
+	} while (!sg_is_last(sgl->sg + (i++)));
+}
+EXPORT_SYMBOL_GPL(af_alg_free_sg);
+
+int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
+{
+	struct cmsghdr *cmsg;
+
+	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+		if (!CMSG_OK(msg, cmsg))
+			return -EINVAL;
+		if (cmsg->cmsg_level != SOL_ALG)
+			continue;
+
+		switch(cmsg->cmsg_type) {
+		case ALG_SET_IV:
+			if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
+				return -EINVAL;
+			con->iv = (void *)CMSG_DATA(cmsg);
+			if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen +
+						      sizeof(*con->iv)))
+				return -EINVAL;
+			break;
+
+		case ALG_SET_OP:
+			if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
+				return -EINVAL;
+			con->op = *(u32 *)CMSG_DATA(cmsg);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
+
+int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
+{
+	switch (err) {
+	case -EINPROGRESS:
+	case -EBUSY:
+		wait_for_completion(&completion->completion);
+		INIT_COMPLETION(completion->completion);
+		err = completion->err;
+		break;
+	};
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
+
+void af_alg_complete(struct crypto_async_request *req, int err)
+{
+	struct af_alg_completion *completion = req->data;
+
+	completion->err = err;
+	complete(&completion->completion);
+}
+EXPORT_SYMBOL_GPL(af_alg_complete);
+
+static int __init af_alg_init(void)
+{
+	int err = proto_register(&alg_proto, 0);
+
+	if (err)
+		goto out;
+
+	err = sock_register(&alg_family);
+	if (err != 0)
+		goto out_unregister_proto;
+
+out:
+	return err;
+
+out_unregister_proto:
+	proto_unregister(&alg_proto);
+	goto out;
+}
+
+static void __exit af_alg_exit(void)
+{
+	sock_unregister(PF_ALG);
+	proto_unregister(&alg_proto);
+}
+
+module_init(af_alg_init);
+module_exit(af_alg_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(AF_ALG);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
new file mode 100644
index 0000000..62122a1
--- /dev/null
+++ b/crypto/algif_hash.c
@@ -0,0 +1,319 @@
+/*
+ * algif_hash: User-space interface for hash algorithms
+ *
+ * This file provides the user-space API for hash algorithms.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/hash.h>
+#include <crypto/if_alg.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+struct hash_ctx {
+	struct af_alg_sgl sgl;
+
+	u8 *result;
+
+	struct af_alg_completion completion;
+
+	unsigned int len;
+	bool more;
+
+	struct ahash_request req;
+};
+
+static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
+			struct msghdr *msg, size_t ignored)
+{
+	int limit = ALG_MAX_PAGES * PAGE_SIZE;
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	unsigned long iovlen;
+	struct iovec *iov;
+	long copied = 0;
+	int err;
+
+	if (limit > sk->sk_sndbuf)
+		limit = sk->sk_sndbuf;
+
+	lock_sock(sk);
+	if (!ctx->more) {
+		err = crypto_ahash_init(&ctx->req);
+		if (err)
+			goto unlock;
+	}
+
+	ctx->more = 0;
+
+	for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+	     iovlen--, iov++) {
+		unsigned long seglen = iov->iov_len;
+		char __user *from = iov->iov_base;
+
+		while (seglen) {
+			int len = min_t(unsigned long, seglen, limit);
+			int newlen;
+
+			newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
+			if (newlen < 0)
+				goto unlock;
+
+			ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
+						newlen);
+
+			err = af_alg_wait_for_completion(
+				crypto_ahash_update(&ctx->req),
+				&ctx->completion);
+
+			af_alg_free_sg(&ctx->sgl);
+
+			if (err)
+				goto unlock;
+
+			seglen -= newlen;
+			from += newlen;
+			copied += newlen;
+		}
+	}
+
+	err = 0;
+
+	ctx->more = msg->msg_flags & MSG_MORE;
+	if (!ctx->more) {
+		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
+						 &ctx->completion);
+	}
+
+unlock:
+	release_sock(sk);
+
+	return err ?: copied;
+}
+
+static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+			     int offset, size_t size, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	int err;
+
+	lock_sock(sk);
+	sg_init_table(ctx->sgl.sg, 1);
+	sg_set_page(ctx->sgl.sg, page, size, offset);
+
+	ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
+
+	if (!(flags & MSG_MORE)) {
+		if (ctx->more)
+			err = crypto_ahash_finup(&ctx->req);
+		else
+			err = crypto_ahash_digest(&ctx->req);
+	} else {
+		if (!ctx->more) {
+			err = crypto_ahash_init(&ctx->req);
+			if (err)
+				goto unlock;
+		}
+
+		err = crypto_ahash_update(&ctx->req);
+	}
+
+	err = af_alg_wait_for_completion(err, &ctx->completion);
+	if (err)
+		goto unlock;
+
+	ctx->more = flags & MSG_MORE;
+
+unlock:
+	release_sock(sk);
+
+	return err ?: size;
+}
+
+static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+			struct msghdr *msg, size_t len, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+	int err;
+
+	if (len > ds)
+		len = ds;
+	else if (len < ds)
+		msg->msg_flags |= MSG_TRUNC;
+
+	lock_sock(sk);
+	if (ctx->more) {
+		ctx->more = 0;
+		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
+						 &ctx->completion);
+		if (err)
+			goto unlock;
+	}
+
+	err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
+
+unlock:
+	release_sock(sk);
+
+	return err ?: len;
+}
+
+static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	struct ahash_request *req = &ctx->req;
+	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
+	struct sock *sk2;
+	struct alg_sock *ask2;
+	struct hash_ctx *ctx2;
+	int err;
+
+	err = crypto_ahash_export(req, state);
+	if (err)
+		return err;
+
+	err = af_alg_accept(ask->parent, newsock);
+	if (err)
+		return err;
+
+	sk2 = newsock->sk;
+	ask2 = alg_sk(sk2);
+	ctx2 = ask2->private;
+	ctx2->more = 1;
+
+	err = crypto_ahash_import(&ctx2->req, state);
+	if (err) {
+		sock_orphan(sk2);
+		sock_put(sk2);
+	}
+
+	return err;
+}
+
+static struct proto_ops algif_hash_ops = {
+	.family		=	PF_ALG,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.bind		=	sock_no_bind,
+	.setsockopt	=	sock_no_setsockopt,
+	.poll		=	sock_no_poll,
+
+	.release	=	af_alg_release,
+	.sendmsg	=	hash_sendmsg,
+	.sendpage	=	hash_sendpage,
+	.recvmsg	=	hash_recvmsg,
+	.accept		=	hash_accept,
+};
+
+static void *hash_bind(const char *name, u32 type, u32 mask)
+{
+	return crypto_alloc_ahash(name, type, mask);
+}
+
+static void hash_release(void *private)
+{
+	crypto_free_ahash(private);
+}
+
+static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+	return crypto_ahash_setkey(private, key, keylen);
+}
+
+static void hash_sock_destruct(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+
+	sock_kfree_s(sk, ctx->result,
+		     crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+	sock_kfree_s(sk, ctx, ctx->len);
+	af_alg_release_parent(sk);
+}
+
+static int hash_accept_parent(void *private, struct sock *sk)
+{
+	struct hash_ctx *ctx;
+	struct alg_sock *ask = alg_sk(sk);
+	unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
+	unsigned ds = crypto_ahash_digestsize(private);
+
+	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
+	if (!ctx->result) {
+		sock_kfree_s(sk, ctx, len);
+		return -ENOMEM;
+	}
+
+	memset(ctx->result, 0, ds);
+
+	ctx->len = len;
+	ctx->more = 0;
+	af_alg_init_completion(&ctx->completion);
+
+	ask->private = ctx;
+
+	ahash_request_set_tfm(&ctx->req, private);
+	ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   af_alg_complete, &ctx->completion);
+
+	sk->sk_destruct = hash_sock_destruct;
+
+	return 0;
+}
+
+static const struct af_alg_type algif_type_hash = {
+	.bind		=	hash_bind,
+	.release	=	hash_release,
+	.setkey		=	hash_setkey,
+	.accept		=	hash_accept_parent,
+	.ops		=	&algif_hash_ops,
+	.name		=	"hash",
+	.owner		=	THIS_MODULE
+};
+
+static int __init algif_hash_init(void)
+{
+	return af_alg_register_type(&algif_type_hash);
+}
+
+static void __exit algif_hash_exit(void)
+{
+	int err = af_alg_unregister_type(&algif_type_hash);
+	BUG_ON(err);
+}
+
+module_init(algif_hash_init);
+module_exit(algif_hash_exit);
+MODULE_LICENSE("GPL");
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
new file mode 100644
index 0000000..6a6dfc0
--- /dev/null
+++ b/crypto/algif_skcipher.c
@@ -0,0 +1,632 @@
+/*
+ * algif_skcipher: User-space interface for skcipher algorithms
+ *
+ * This file provides the user-space API for symmetric key ciphers.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/if_alg.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+struct skcipher_sg_list {
+	struct list_head list;
+
+	int cur;
+
+	struct scatterlist sg[0];
+};
+
+struct skcipher_ctx {
+	struct list_head tsgl;
+	struct af_alg_sgl rsgl;
+
+	void *iv;
+
+	struct af_alg_completion completion;
+
+	unsigned used;
+
+	unsigned int len;
+	bool more;
+	bool merge;
+	bool enc;
+
+	struct ablkcipher_request req;
+};
+
+#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
+		      sizeof(struct scatterlist) - 1)
+
+static inline int skcipher_sndbuf(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+
+	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
+			  ctx->used, 0);
+}
+
+static inline bool skcipher_writable(struct sock *sk)
+{
+	return PAGE_SIZE <= skcipher_sndbuf(sk);
+}
+
+static int skcipher_alloc_sgl(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg = NULL;
+
+	sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+	if (!list_empty(&ctx->tsgl))
+		sg = sgl->sg;
+
+	if (!sg || sgl->cur >= MAX_SGL_ENTS) {
+		sgl = sock_kmalloc(sk, sizeof(*sgl) +
+				       sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
+				   GFP_KERNEL);
+		if (!sgl)
+			return -ENOMEM;
+
+		sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+		sgl->cur = 0;
+
+		if (sg)
+			scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+
+		list_add_tail(&sgl->list, &ctx->tsgl);
+	}
+
+	return 0;
+}
+
+static void skcipher_pull_sgl(struct sock *sk, int used)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg;
+	int i;
+
+	while (!list_empty(&ctx->tsgl)) {
+		sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
+				       list);
+		sg = sgl->sg;
+
+		for (i = 0; i < sgl->cur; i++) {
+			int plen = min_t(int, used, sg[i].length);
+
+			if (!sg_page(sg + i))
+				continue;
+
+			sg[i].length -= plen;
+			sg[i].offset += plen;
+
+			used -= plen;
+			ctx->used -= plen;
+
+			if (sg[i].length)
+				return;
+
+			put_page(sg_page(sg + i));
+			sg_assign_page(sg + i, NULL);
+		}
+
+		list_del(&sgl->list);
+		sock_kfree_s(sk, sgl,
+			     sizeof(*sgl) + sizeof(sgl->sg[0]) *
+					    (MAX_SGL_ENTS + 1));
+	}
+
+	if (!ctx->used)
+		ctx->merge = 0;
+}
+
+static void skcipher_free_sgl(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+
+	skcipher_pull_sgl(sk, ctx->used);
+}
+
+static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
+{
+	long timeout;
+	DEFINE_WAIT(wait);
+	int err = -ERESTARTSYS;
+
+	if (flags & MSG_DONTWAIT)
+		return -EAGAIN;
+
+	set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+	for (;;) {
+		if (signal_pending(current))
+			break;
+		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+		timeout = MAX_SCHEDULE_TIMEOUT;
+		if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
+			err = 0;
+			break;
+		}
+	}
+	finish_wait(sk_sleep(sk), &wait);
+
+	return err;
+}
+
+static void skcipher_wmem_wakeup(struct sock *sk)
+{
+	struct socket_wq *wq;
+
+	if (!skcipher_writable(sk))
+		return;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+							   POLLRDNORM |
+							   POLLRDBAND);
+	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+	rcu_read_unlock();
+}
+
+static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	long timeout;
+	DEFINE_WAIT(wait);
+	int err = -ERESTARTSYS;
+
+	if (flags & MSG_DONTWAIT) {
+		return -EAGAIN;
+	}
+
+	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+	for (;;) {
+		if (signal_pending(current))
+			break;
+		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+		timeout = MAX_SCHEDULE_TIMEOUT;
+		if (sk_wait_event(sk, &timeout, ctx->used)) {
+			err = 0;
+			break;
+		}
+	}
+	finish_wait(sk_sleep(sk), &wait);
+
+	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+	return err;
+}
+
+static void skcipher_data_wakeup(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct socket_wq *wq;
+
+	if (!ctx->used)
+		return;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+							   POLLRDNORM |
+							   POLLRDBAND);
+	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+	rcu_read_unlock();
+}
+
+static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
+			    struct msghdr *msg, size_t size)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+	unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct skcipher_sg_list *sgl;
+	struct af_alg_control con = {};
+	long copied = 0;
+	bool enc = 0;
+	int err;
+	int i;
+
+	if (msg->msg_controllen) {
+		err = af_alg_cmsg_send(msg, &con);
+		if (err)
+			return err;
+
+		switch (con.op) {
+		case ALG_OP_ENCRYPT:
+			enc = 1;
+			break;
+		case ALG_OP_DECRYPT:
+			enc = 0;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (con.iv && con.iv->ivlen != ivsize)
+			return -EINVAL;
+	}
+
+	err = -EINVAL;
+
+	lock_sock(sk);
+	if (!ctx->more && ctx->used)
+		goto unlock;
+
+	if (!ctx->used) {
+		ctx->enc = enc;
+		if (con.iv)
+			memcpy(ctx->iv, con.iv->iv, ivsize);
+	}
+
+	while (size) {
+		struct scatterlist *sg;
+		unsigned long len = size;
+		int plen;
+
+		if (ctx->merge) {
+			sgl = list_entry(ctx->tsgl.prev,
+					 struct skcipher_sg_list, list);
+			sg = sgl->sg + sgl->cur - 1;
+			len = min_t(unsigned long, len,
+				    PAGE_SIZE - sg->offset - sg->length);
+
+			err = memcpy_fromiovec(page_address(sg_page(sg)) +
+					       sg->offset + sg->length,
+					       msg->msg_iov, len);
+			if (err)
+				goto unlock;
+
+			sg->length += len;
+			ctx->merge = (sg->offset + sg->length) &
+				     (PAGE_SIZE - 1);
+
+			ctx->used += len;
+			copied += len;
+			size -= len;
+			continue;
+		}
+
+		if (!skcipher_writable(sk)) {
+			err = skcipher_wait_for_wmem(sk, msg->msg_flags);
+			if (err)
+				goto unlock;
+		}
+
+		len = min_t(unsigned long, len, skcipher_sndbuf(sk));
+
+		err = skcipher_alloc_sgl(sk);
+		if (err)
+			goto unlock;
+
+		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+		sg = sgl->sg;
+		do {
+			i = sgl->cur;
+			plen = min_t(int, len, PAGE_SIZE);
+
+			sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
+			err = -ENOMEM;
+			if (!sg_page(sg + i))
+				goto unlock;
+
+			err = memcpy_fromiovec(page_address(sg_page(sg + i)),
+					       msg->msg_iov, plen);
+			if (err) {
+				__free_page(sg_page(sg + i));
+				sg_assign_page(sg + i, NULL);
+				goto unlock;
+			}
+
+			sg[i].length = plen;
+			len -= plen;
+			ctx->used += plen;
+			copied += plen;
+			size -= plen;
+			sgl->cur++;
+		} while (len && sgl->cur < MAX_SGL_ENTS);
+
+		ctx->merge = plen & (PAGE_SIZE - 1);
+	}
+
+	err = 0;
+
+	ctx->more = msg->msg_flags & MSG_MORE;
+	if (!ctx->more && !list_empty(&ctx->tsgl))
+		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+unlock:
+	skcipher_data_wakeup(sk);
+	release_sock(sk);
+
+	return copied ?: err;
+}
+
+static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
+				 int offset, size_t size, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_sg_list *sgl;
+	int err = -EINVAL;
+
+	lock_sock(sk);
+	if (!ctx->more && ctx->used)
+		goto unlock;
+
+	if (!size)
+		goto done;
+
+	if (!skcipher_writable(sk)) {
+		err = skcipher_wait_for_wmem(sk, flags);
+		if (err)
+			goto unlock;
+	}
+
+	err = skcipher_alloc_sgl(sk);
+	if (err)
+		goto unlock;
+
+	ctx->merge = 0;
+	sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+	get_page(page);
+	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
+	sgl->cur++;
+	ctx->used += size;
+
+done:
+	ctx->more = flags & MSG_MORE;
+	if (!ctx->more && !list_empty(&ctx->tsgl))
+		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+unlock:
+	skcipher_data_wakeup(sk);
+	release_sock(sk);
+
+	return err ?: size;
+}
+
+static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+			    struct msghdr *msg, size_t ignored, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
+		&ctx->req));
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg;
+	unsigned long iovlen;
+	struct iovec *iov;
+	int err = -EAGAIN;
+	int used;
+	long copied = 0;
+
+	lock_sock(sk);
+	for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+	     iovlen--, iov++) {
+		unsigned long seglen = iov->iov_len;
+		char __user *from = iov->iov_base;
+
+		while (seglen) {
+			sgl = list_first_entry(&ctx->tsgl,
+					       struct skcipher_sg_list, list);
+			sg = sgl->sg;
+
+			while (!sg->length)
+				sg++;
+
+			used = ctx->used;
+			if (!used) {
+				err = skcipher_wait_for_data(sk, flags);
+				if (err)
+					goto unlock;
+			}
+
+			used = min_t(unsigned long, used, seglen);
+
+			used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
+			err = used;
+			if (err < 0)
+				goto unlock;
+
+			if (ctx->more || used < ctx->used)
+				used -= used % bs;
+
+			err = -EINVAL;
+			if (!used)
+				goto free;
+
+			ablkcipher_request_set_crypt(&ctx->req, sg,
+						     ctx->rsgl.sg, used,
+						     ctx->iv);
+
+			err = af_alg_wait_for_completion(
+				ctx->enc ?
+					crypto_ablkcipher_encrypt(&ctx->req) :
+					crypto_ablkcipher_decrypt(&ctx->req),
+				&ctx->completion);
+
+free:
+			af_alg_free_sg(&ctx->rsgl);
+
+			if (err)
+				goto unlock;
+
+			copied += used;
+			from += used;
+			seglen -= used;
+			skcipher_pull_sgl(sk, used);
+		}
+	}
+
+	err = 0;
+
+unlock:
+	skcipher_wmem_wakeup(sk);
+	release_sock(sk);
+
+	return copied ?: err;
+}
+
+
+static unsigned int skcipher_poll(struct file *file, struct socket *sock,
+				  poll_table *wait)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	unsigned int mask;
+
+	sock_poll_wait(file, sk_sleep(sk), wait);
+	mask = 0;
+
+	if (ctx->used)
+		mask |= POLLIN | POLLRDNORM;
+
+	if (skcipher_writable(sk))
+		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+	return mask;
+}
+
+static struct proto_ops algif_skcipher_ops = {
+	.family		=	PF_ALG,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.bind		=	sock_no_bind,
+	.accept		=	sock_no_accept,
+	.setsockopt	=	sock_no_setsockopt,
+
+	.release	=	af_alg_release,
+	.sendmsg	=	skcipher_sendmsg,
+	.sendpage	=	skcipher_sendpage,
+	.recvmsg	=	skcipher_recvmsg,
+	.poll		=	skcipher_poll,
+};
+
+static void *skcipher_bind(const char *name, u32 type, u32 mask)
+{
+	return crypto_alloc_ablkcipher(name, type, mask);
+}
+
+static void skcipher_release(void *private)
+{
+	crypto_free_ablkcipher(private);
+}
+
+static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+	return crypto_ablkcipher_setkey(private, key, keylen);
+}
+
+static void skcipher_sock_destruct(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+
+	skcipher_free_sgl(sk);
+	sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+	sock_kfree_s(sk, ctx, ctx->len);
+	af_alg_release_parent(sk);
+}
+
+static int skcipher_accept_parent(void *private, struct sock *sk)
+{
+	struct skcipher_ctx *ctx;
+	struct alg_sock *ask = alg_sk(sk);
+	unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
+
+	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
+			       GFP_KERNEL);
+	if (!ctx->iv) {
+		sock_kfree_s(sk, ctx, len);
+		return -ENOMEM;
+	}
+
+	memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
+
+	INIT_LIST_HEAD(&ctx->tsgl);
+	ctx->len = len;
+	ctx->used = 0;
+	ctx->more = 0;
+	ctx->merge = 0;
+	ctx->enc = 0;
+	af_alg_init_completion(&ctx->completion);
+
+	ask->private = ctx;
+
+	ablkcipher_request_set_tfm(&ctx->req, private);
+	ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					af_alg_complete, &ctx->completion);
+
+	sk->sk_destruct = skcipher_sock_destruct;
+
+	return 0;
+}
+
+static const struct af_alg_type algif_type_skcipher = {
+	.bind		=	skcipher_bind,
+	.release	=	skcipher_release,
+	.setkey		=	skcipher_setkey,
+	.accept		=	skcipher_accept_parent,
+	.ops		=	&algif_skcipher_ops,
+	.name		=	"skcipher",
+	.owner		=	THIS_MODULE
+};
+
+static int __init algif_skcipher_init(void)
+{
+	return af_alg_register_type(&algif_type_skcipher);
+}
+
+static void __exit algif_skcipher_exit(void)
+{
+	int err = af_alg_unregister_type(&algif_type_skcipher);
+	BUG_ON(err);
+}
+
+module_init(algif_skcipher_init);
+module_exit(algif_skcipher_exit);
+MODULE_LICENSE("GPL");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a5a22cf..5ef7ba6 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -107,20 +107,6 @@
 	goto out;
 }
 
-static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
-			  int chain)
-{
-	if (chain) {
-		head->length += sg->length;
-		sg = scatterwalk_sg_next(sg);
-	}
-
-	if (sg)
-		scatterwalk_sg_chain(head, 2, sg);
-	else
-		sg_mark_end(head);
-}
-
 static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
 					    int err)
 {
@@ -345,7 +331,7 @@
 	if (ivsize) {
 		sg_init_table(cipher, 2);
 		sg_set_buf(cipher, iv, ivsize);
-		authenc_chain(cipher, dst, vdst == iv + ivsize);
+		scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
 		dst = cipher;
 		cryptlen += ivsize;
 	}
@@ -354,7 +340,7 @@
 		authenc_ahash_fn = crypto_authenc_ahash;
 		sg_init_table(asg, 2);
 		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		authenc_chain(asg, dst, 0);
+		scatterwalk_crypto_chain(asg, dst, 0, 2);
 		dst = asg;
 		cryptlen += req->assoclen;
 	}
@@ -499,7 +485,7 @@
 	if (ivsize) {
 		sg_init_table(cipher, 2);
 		sg_set_buf(cipher, iv, ivsize);
-		authenc_chain(cipher, src, vsrc == iv + ivsize);
+		scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
 		src = cipher;
 		cryptlen += ivsize;
 	}
@@ -508,7 +494,7 @@
 		authenc_ahash_fn = crypto_authenc_ahash;
 		sg_init_table(asg, 2);
 		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		authenc_chain(asg, src, 0);
+		scatterwalk_crypto_chain(asg, src, 0, 2);
 		src = asg;
 		cryptlen += req->assoclen;
 	}
diff --git a/crypto/cast5.c b/crypto/cast5.c
index a1d2294..4a230dd 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -604,36 +604,23 @@
 	 * Rounds 3, 6, 9, 12, and 15 use f function Type 3.
 	 */
 
+	t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
+	t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
+	t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
+	t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
+	t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
+	t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
+	t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
+	t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
+	t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
+	t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
+	t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
+	t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
 	if (!(c->rr)) {
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
 		t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
 		t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
 		t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
 		t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
-	} else {
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
 	}
 
 	/* c1...c64 <-- (R16,L16).  (Exchange final blocks L16, R16 and
@@ -663,32 +650,19 @@
 		t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
 		t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
 		t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
-	} else {
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
 	}
+	t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
+	t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
+	t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
+	t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
+	t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
+	t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
+	t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
+	t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
+	t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
+	t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
+	t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
+	t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
 
 	dst[0] = cpu_to_be32(r);
 	dst[1] = cpu_to_be32(l);
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
index fdcf624..b980ee1 100644
--- a/crypto/crypto_wq.c
+++ b/crypto/crypto_wq.c
@@ -20,7 +20,8 @@
 
 static int __init crypto_wq_init(void)
 {
-	kcrypto_wq = create_workqueue("crypto");
+	kcrypto_wq = alloc_workqueue("crypto",
+				     WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 	if (unlikely(!kcrypto_wq))
 		return -ENOMEM;
 	return 0;
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 463dc85..cbc7a33 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -48,12 +48,11 @@
 	int ret = 0;
 	struct z_stream_s *stream = &ctx->comp_stream;
 
-	stream->workspace = vmalloc(zlib_deflate_workspacesize());
+	stream->workspace = vzalloc(zlib_deflate_workspacesize());
 	if (!stream->workspace) {
 		ret = -ENOMEM;
 		goto out;
 	}
-	memset(stream->workspace, 0, zlib_deflate_workspacesize());
 	ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
 	                        -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
 	                        Z_DEFAULT_STRATEGY);
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index 3ca3b66..42ce9f5 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -62,20 +62,6 @@
 	skcipher_givcrypt_complete(req, err);
 }
 
-static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg,
-			 int chain)
-{
-	if (chain) {
-		head->length += sg->length;
-		sg = scatterwalk_sg_next(sg);
-	}
-
-	if (sg)
-		scatterwalk_sg_chain(head, 2, sg);
-	else
-		sg_mark_end(head);
-}
-
 static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
 {
 	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
@@ -124,13 +110,13 @@
 
 	sg_init_table(reqctx->src, 2);
 	sg_set_buf(reqctx->src, giv, ivsize);
-	eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize);
+	scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
 
 	dst = reqctx->src;
 	if (osrc != odst) {
 		sg_init_table(reqctx->dst, 2);
 		sg_set_buf(reqctx->dst, giv, ivsize);
-		eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize);
+		scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
 
 		dst = reqctx->dst;
 	}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 2f5fbba..1a25263 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1102,21 +1102,6 @@
 	return crypto_aead_setauthsize(ctx->child, authsize);
 }
 
-/* this is the same as crypto_authenc_chain */
-static void crypto_rfc4543_chain(struct scatterlist *head,
-				 struct scatterlist *sg, int chain)
-{
-	if (chain) {
-		head->length += sg->length;
-		sg = scatterwalk_sg_next(sg);
-	}
-
-	if (sg)
-		scatterwalk_sg_chain(head, 2, sg);
-	else
-		sg_mark_end(head);
-}
-
 static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
 						 int enc)
 {
@@ -1154,13 +1139,13 @@
 
 	sg_init_table(payload, 2);
 	sg_set_buf(payload, req->iv, 8);
-	crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
+	scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
 	assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
 
 	sg_init_table(assoc, 2);
 	sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
 		    req->assoc->offset);
-	crypto_rfc4543_chain(assoc, payload, 0);
+	scatterwalk_crypto_chain(assoc, payload, 0, 2);
 
 	aead_request_set_tfm(subreq, ctx->child);
 	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 75586f1..29a89da 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -455,7 +455,8 @@
 
 	get_online_cpus();
 
-	pcrypt->wq = create_workqueue(name);
+	pcrypt->wq = alloc_workqueue(name,
+				     WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 	if (!pcrypt->wq)
 		goto err;
 
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 1ceb673..8a0f68b 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -325,4 +325,5 @@
 module_exit(rmd128_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 472261fc..525d7bb 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -369,4 +369,5 @@
 module_exit(rmd160_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 72eafa8..69293d9 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -344,4 +344,5 @@
 module_exit(rmd256_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 86becab..09f97df 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -393,4 +393,5 @@
 module_exit(rmd320_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
diff --git a/crypto/shash.c b/crypto/shash.c
index 22fd943..76f74b9 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -310,7 +310,13 @@
 
 static int shash_async_import(struct ahash_request *req, const void *in)
 {
-	return crypto_shash_import(ahash_request_ctx(req), in);
+	struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+	struct shash_desc *desc = ahash_request_ctx(req);
+
+	desc->tfm = *ctx;
+	desc->flags = req->base.flags;
+
+	return crypto_shash_import(desc, in);
 }
 
 static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 3ca68f9..9aac5e5 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -8,6 +8,13 @@
  * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
  * Copyright (c) 2007 Nokia Siemens Networks
  *
+ * Updated RFC4106 AES-GCM testing.
+ *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
@@ -980,6 +987,10 @@
 		ret += tcrypt_test("ansi_cprng");
 		break;
 
+	case 151:
+		ret += tcrypt_test("rfc4106(gcm(aes))");
+		break;
+
 	case 200:
 		test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
 				speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index fa8c8f7..27ea9fe 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -6,6 +6,13 @@
  * Copyright (c) 2007 Nokia Siemens Networks
  * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
  *
+ * Updated RFC4106 AES-GCM testing.
+ *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
@@ -2242,6 +2249,23 @@
 			}
 		}
 	}, {
+		.alg = "rfc4106(gcm(aes))",
+		.test = alg_test_aead,
+		.suite = {
+			.aead = {
+				.enc = {
+					.vecs = aes_gcm_rfc4106_enc_tv_template,
+					.count = AES_GCM_4106_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = aes_gcm_rfc4106_dec_tv_template,
+					.count = AES_GCM_4106_DEC_TEST_VECTORS
+				}
+			}
+		}
+	}, {
+
+
 		.alg = "rfc4309(ccm(aes))",
 		.test = alg_test_aead,
 		.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 74e3537..834af7f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -6,6 +6,15 @@
  * Copyright (c) 2007 Nokia Siemens Networks
  * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
  *
+ * Updated RFC4106 AES-GCM testing. Some test vectors were taken from
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/
+ * gcm/gcm-test-vectors.tar.gz
+ *     Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *              Adrian Hoban <adrian.hoban@intel.com>
+ *              Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *              Tadeusz Struk (tadeusz.struk@intel.com)
+ *     Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
@@ -2947,6 +2956,8 @@
 #define AES_CTR_3686_DEC_TEST_VECTORS 6
 #define AES_GCM_ENC_TEST_VECTORS 9
 #define AES_GCM_DEC_TEST_VECTORS 8
+#define AES_GCM_4106_ENC_TEST_VECTORS 7
+#define AES_GCM_4106_DEC_TEST_VECTORS 7
 #define AES_CCM_ENC_TEST_VECTORS 7
 #define AES_CCM_DEC_TEST_VECTORS 7
 #define AES_CCM_4309_ENC_TEST_VECTORS 7
@@ -5829,6 +5840,356 @@
 	}
 };
 
+static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
+        { /* Generated using Crypto++ */
+		.key    = zeroed_string,
+		.klen	= 20,
+                .iv     = zeroed_string,
+                .input  = zeroed_string,
+                .ilen   = 16,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+		.result	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
+                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+		.rlen	= 32,
+        },{
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+                .input  = zeroed_string,
+                .ilen   = 16,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+		.result	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
+                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+		.rlen	= 32,
+
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 16,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+		.rlen	= 32,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 16,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+		.rlen	= 32,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 16,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+		.rlen	= 32,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 64,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+		.rlen	= 80,
+        }, {
+		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
+                          "\x00\x00\x00\x00",
+                .input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff",
+                .ilen   = 192,
+                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                          "\xaa\xaa\xaa\xaa",
+                .alen   = 12,
+		.result	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
+			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
+			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
+			  "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
+			  "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44"
+			  "\x41\xA9\x82\x6F\x22\xA1\x23\x1A"
+			  "\xA8\xE3\x16\xFD\x31\x5C\x27\x31"
+			  "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1"
+			  "\xCF\x07\x57\x41\x67\xD0\xC4\x42"
+			  "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F"
+			  "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B"
+			  "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE"
+			  "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C"
+			  "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF"
+			  "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59"
+			  "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA"
+			  "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25"
+			  "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B"
+			  "\x7E\x13\x06\x82\x08\x17\xA4\x35"
+			  "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F"
+			  "\xA3\x05\x38\x95\x20\x1A\x47\x04"
+			  "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35"
+			  "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E"
+			  "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
+			  "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
+			  "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
+		.rlen	= 208,
+	}
+};
+
+static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
+        { /* Generated using Crypto++ */
+		.key    = zeroed_string,
+		.klen	= 20,
+                .iv     = zeroed_string,
+		.input	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
+                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+		.ilen	= 32,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+                .result = zeroed_string,
+                .rlen   = 16,
+
+        },{
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
+                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+		.ilen	= 32,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+                .result = zeroed_string,
+                .rlen   = 16,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+		.ilen	= 32,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 16,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+		.ilen	= 32,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 16,
+
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+		.ilen	= 32,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 16,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+		.ilen	= 80,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 64,
+        }, {
+		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
+			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
+			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
+			  "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
+			  "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44"
+			  "\x41\xA9\x82\x6F\x22\xA1\x23\x1A"
+			  "\xA8\xE3\x16\xFD\x31\x5C\x27\x31"
+			  "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1"
+			  "\xCF\x07\x57\x41\x67\xD0\xC4\x42"
+			  "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F"
+			  "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B"
+			  "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE"
+			  "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C"
+			  "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF"
+			  "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59"
+			  "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA"
+			  "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25"
+			  "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B"
+			  "\x7E\x13\x06\x82\x08\x17\xA4\x35"
+			  "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F"
+			  "\xA3\x05\x38\x95\x20\x1A\x47\x04"
+			  "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35"
+			  "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E"
+			  "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
+			  "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
+			  "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
+		.ilen	= 208,
+                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                          "\xaa\xaa\xaa\xaa",
+                .alen   = 12,
+                .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff",
+                .rlen   = 192,
+
+	}
+};
+
 static struct aead_testvec aes_ccm_enc_tv_template[] = {
 	{ /* From RFC 3610 */
 		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
diff --git a/crypto/zlib.c b/crypto/zlib.c
index c3015733..739b8fc 100644
--- a/crypto/zlib.c
+++ b/crypto/zlib.c
@@ -95,11 +95,10 @@
 	zlib_comp_exit(ctx);
 
 	workspacesize = zlib_deflate_workspacesize();
-	stream->workspace = vmalloc(workspacesize);
+	stream->workspace = vzalloc(workspacesize);
 	if (!stream->workspace)
 		return -ENOMEM;
 
-	memset(stream->workspace, 0, workspacesize);
 	ret = zlib_deflateInit2(stream,
 				tb[ZLIB_COMP_LEVEL]
 					? nla_get_u32(tb[ZLIB_COMP_LEVEL])
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3d93b3a..9bfb71f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -26,6 +26,8 @@
 
 source "drivers/md/Kconfig"
 
+source "drivers/target/Kconfig"
+
 source "drivers/message/fusion/Kconfig"
 
 source "drivers/firewire/Kconfig"
@@ -88,6 +90,8 @@
 
 source "drivers/leds/Kconfig"
 
+source "drivers/nfc/Kconfig"
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index bf15ce7..b423bb1 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -24,7 +24,7 @@
 # regulators early, since some subsystems rely on them to initialize
 obj-$(CONFIG_REGULATOR)		+= regulator/
 
-# char/ comes before serial/ etc so that the VT console is the boot-time
+# tty/ comes before char/ so that the VT console is the boot-time
 # default.
 obj-y				+= tty/
 obj-y				+= char/
@@ -38,14 +38,14 @@
 obj-$(CONFIG_FB_I810)           += video/i810/
 obj-$(CONFIG_FB_INTEL)          += video/intelfb/
 
-obj-y				+= serial/
 obj-$(CONFIG_PARPORT)		+= parport/
-obj-y				+= base/ block/ misc/ mfd/
+obj-y				+= base/ block/ misc/ mfd/ nfc/
 obj-$(CONFIG_NUBUS)		+= nubus/
 obj-y				+= macintosh/
 obj-$(CONFIG_IDE)		+= ide/
 obj-$(CONFIG_SCSI)		+= scsi/
 obj-$(CONFIG_ATA)		+= ata/
+obj-$(CONFIG_TARGET_CORE)	+= target/
 obj-$(CONFIG_MTD)		+= mtd/
 obj-$(CONFIG_SPI)		+= spi/
 obj-y				+= net/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 3f3489c..2aa042a 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -51,12 +51,7 @@
 	  For backwards compatibility, this option allows
 	  deprecated /proc/acpi/ files to exist, even when
 	  they have been replaced by functions in /sys.
-	  The deprecated files (and their replacements) include:
 
-	  /proc/acpi/processor/*/throttling (/sys/class/thermal/
-		cooling_device*/*)
-	  /proc/acpi/video/*/brightness (/sys/class/backlight/)
-	  /proc/acpi/thermal_zone/*/* (/sys/class/thermal/)
 	  This option has no effect on /proc/acpi/ files
 	  and functions which do not yet exist in /sys.
 
@@ -74,6 +69,8 @@
 	  /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
 	  This option has no effect on /proc/acpi/ directories
 	  and functions, which do not yet exist in /sys
+	  This option, together with the proc directories, will be
+	  deleted in 2.6.39.
 
 	  Say N to delete power /proc/acpi/ directories that have moved to /sys/
 
@@ -209,6 +206,17 @@
 
 	  To compile this driver as a module, choose M here:
 	  the module will be called processor.
+config ACPI_IPMI
+	tristate "IPMI"
+	depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER
+	default n
+	help
+	  This driver enables the ACPI to access the BMC controller. And it
+	  uses the IPMI request/response message to communicate with BMC
+	  controller, which can be found on on the server.
+
+	  To compile this driver as a module, choose M here:
+	  the module will be called as acpi_ipmi.
 
 config ACPI_HOTPLUG_CPU
 	bool
@@ -310,7 +318,7 @@
 	  the module will be called pci_slot.
 
 config X86_PM_TIMER
-	bool "Power Management Timer Support" if EMBEDDED
+	bool "Power Management Timer Support" if EXPERT
 	depends on X86
 	default y
 	help
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3d031d0..d113fa5 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -24,7 +24,7 @@
 # sleep related files
 acpi-y				+= wakeup.o
 acpi-y				+= sleep.o
-acpi-$(CONFIG_ACPI_SLEEP)	+= proc.o
+acpi-$(CONFIG_ACPI_SLEEP)	+= proc.o nvs.o
 
 
 #
@@ -69,5 +69,6 @@
 processor-$(CONFIG_CPU_FREQ)	+= processor_perflib.o
 
 obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+obj-$(CONFIG_ACPI_IPMI)		+= acpi_ipmi.o
 
 obj-$(CONFIG_ACPI_APEI)		+= apei/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 25d3aae..58c3f74 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -197,7 +197,8 @@
 {
 	struct proc_dir_entry *entry = NULL;
 
-
+	printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
+			" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
 	if (!acpi_device_dir(device)) {
 		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
 						     acpi_ac_dir);
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
new file mode 100644
index 0000000..f40acef
--- /dev/null
+++ b/drivers/acpi/acpi_ipmi.c
@@ -0,0 +1,525 @@
+/*
+ *  acpi_ipmi.c - ACPI IPMI opregion
+ *
+ *  Copyright (C) 2010 Intel Corporation
+ *  Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/ipmi.h>
+#include <linux/device.h>
+#include <linux/pnp.h>
+
+MODULE_AUTHOR("Zhao Yakui");
+MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
+MODULE_LICENSE("GPL");
+
+#define IPMI_FLAGS_HANDLER_INSTALL	0
+
+#define ACPI_IPMI_OK			0
+#define ACPI_IPMI_TIMEOUT		0x10
+#define ACPI_IPMI_UNKNOWN		0x07
+/* the IPMI timeout is 5s */
+#define IPMI_TIMEOUT			(5 * HZ)
+
+struct acpi_ipmi_device {
+	/* the device list attached to driver_data.ipmi_devices */
+	struct list_head head;
+	/* the IPMI request message list */
+	struct list_head tx_msg_list;
+	struct mutex	tx_msg_lock;
+	acpi_handle handle;
+	struct pnp_dev *pnp_dev;
+	ipmi_user_t	user_interface;
+	int ipmi_ifnum; /* IPMI interface number */
+	long curr_msgid;
+	unsigned long flags;
+	struct ipmi_smi_info smi_data;
+};
+
+struct ipmi_driver_data {
+	struct list_head	ipmi_devices;
+	struct ipmi_smi_watcher	bmc_events;
+	struct ipmi_user_hndl	ipmi_hndlrs;
+	struct mutex		ipmi_lock;
+};
+
+struct acpi_ipmi_msg {
+	struct list_head head;
+	/*
+	 * General speaking the addr type should be SI_ADDR_TYPE. And
+	 * the addr channel should be BMC.
+	 * In fact it can also be IPMB type. But we will have to
+	 * parse it from the Netfn command buffer. It is so complex
+	 * that it is skipped.
+	 */
+	struct ipmi_addr addr;
+	long tx_msgid;
+	/* it is used to track whether the IPMI message is finished */
+	struct completion tx_complete;
+	struct kernel_ipmi_msg tx_message;
+	int	msg_done;
+	/* tx data . And copy it from ACPI object buffer */
+	u8	tx_data[64];
+	int	tx_len;
+	u8	rx_data[64];
+	int	rx_len;
+	struct acpi_ipmi_device *device;
+};
+
+/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
+struct acpi_ipmi_buffer {
+	u8 status;
+	u8 length;
+	u8 data[64];
+};
+
+static void ipmi_register_bmc(int iface, struct device *dev);
+static void ipmi_bmc_gone(int iface);
+static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
+static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
+static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
+
+static struct ipmi_driver_data driver_data = {
+	.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
+	.bmc_events = {
+		.owner = THIS_MODULE,
+		.new_smi = ipmi_register_bmc,
+		.smi_gone = ipmi_bmc_gone,
+	},
+	.ipmi_hndlrs = {
+		.ipmi_recv_hndl = ipmi_msg_handler,
+	},
+};
+
+static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+{
+	struct acpi_ipmi_msg *ipmi_msg;
+	struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+	ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
+	if (!ipmi_msg)	{
+		dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+		return NULL;
+	}
+	init_completion(&ipmi_msg->tx_complete);
+	INIT_LIST_HEAD(&ipmi_msg->head);
+	ipmi_msg->device = ipmi;
+	return ipmi_msg;
+}
+
+#define		IPMI_OP_RGN_NETFN(offset)	((offset >> 8) & 0xff)
+#define		IPMI_OP_RGN_CMD(offset)		(offset & 0xff)
+static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
+				acpi_physical_address address,
+				acpi_integer *value)
+{
+	struct kernel_ipmi_msg *msg;
+	struct acpi_ipmi_buffer *buffer;
+	struct acpi_ipmi_device *device;
+
+	msg = &tx_msg->tx_message;
+	/*
+	 * IPMI network function and command are encoded in the address
+	 * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
+	 */
+	msg->netfn = IPMI_OP_RGN_NETFN(address);
+	msg->cmd = IPMI_OP_RGN_CMD(address);
+	msg->data = tx_msg->tx_data;
+	/*
+	 * value is the parameter passed by the IPMI opregion space handler.
+	 * It points to the IPMI request message buffer
+	 */
+	buffer = (struct acpi_ipmi_buffer *)value;
+	/* copy the tx message data */
+	msg->data_len = buffer->length;
+	memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+	/*
+	 * now the default type is SYSTEM_INTERFACE and channel type is BMC.
+	 * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
+	 * the addr type should be changed to IPMB. Then we will have to parse
+	 * the IPMI request message buffer to get the IPMB address.
+	 * If so, please fix me.
+	 */
+	tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	tx_msg->addr.channel = IPMI_BMC_CHANNEL;
+	tx_msg->addr.data[0] = 0;
+
+	/* Get the msgid */
+	device = tx_msg->device;
+	mutex_lock(&device->tx_msg_lock);
+	device->curr_msgid++;
+	tx_msg->tx_msgid = device->curr_msgid;
+	mutex_unlock(&device->tx_msg_lock);
+}
+
+static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
+		acpi_integer *value, int rem_time)
+{
+	struct acpi_ipmi_buffer *buffer;
+
+	/*
+	 * value is also used as output parameter. It represents the response
+	 * IPMI message returned by IPMI command.
+	 */
+	buffer = (struct acpi_ipmi_buffer *)value;
+	if (!rem_time && !msg->msg_done) {
+		buffer->status = ACPI_IPMI_TIMEOUT;
+		return;
+	}
+	/*
+	 * If the flag of msg_done is not set or the recv length is zero, it
+	 * means that the IPMI command is not executed correctly.
+	 * The status code will be ACPI_IPMI_UNKNOWN.
+	 */
+	if (!msg->msg_done || !msg->rx_len) {
+		buffer->status = ACPI_IPMI_UNKNOWN;
+		return;
+	}
+	/*
+	 * If the IPMI response message is obtained correctly, the status code
+	 * will be ACPI_IPMI_OK
+	 */
+	buffer->status = ACPI_IPMI_OK;
+	buffer->length = msg->rx_len;
+	memcpy(buffer->data, msg->rx_data, msg->rx_len);
+}
+
+static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
+{
+	struct acpi_ipmi_msg *tx_msg, *temp;
+	int count = HZ / 10;
+	struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+	list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+		/* wake up the sleep thread on the Tx msg */
+		complete(&tx_msg->tx_complete);
+	}
+
+	/* wait for about 100ms to flush the tx message list */
+	while (count--) {
+		if (list_empty(&ipmi->tx_msg_list))
+			break;
+		schedule_timeout(1);
+	}
+	if (!list_empty(&ipmi->tx_msg_list))
+		dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+}
+
+static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+{
+	struct acpi_ipmi_device *ipmi_device = user_msg_data;
+	int msg_found = 0;
+	struct acpi_ipmi_msg *tx_msg;
+	struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+
+	if (msg->user != ipmi_device->user_interface) {
+		dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
+			"returned user %p, expected user %p\n",
+			msg->user, ipmi_device->user_interface);
+		ipmi_free_recv_msg(msg);
+		return;
+	}
+	mutex_lock(&ipmi_device->tx_msg_lock);
+	list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+		if (msg->msgid == tx_msg->tx_msgid) {
+			msg_found = 1;
+			break;
+		}
+	}
+
+	mutex_unlock(&ipmi_device->tx_msg_lock);
+	if (!msg_found) {
+		dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
+			"returned.\n", msg->msgid);
+		ipmi_free_recv_msg(msg);
+		return;
+	}
+
+	if (msg->msg.data_len) {
+		/* copy the response data to Rx_data buffer */
+		memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
+		tx_msg->rx_len = msg->msg.data_len;
+		tx_msg->msg_done = 1;
+	}
+	complete(&tx_msg->tx_complete);
+	ipmi_free_recv_msg(msg);
+};
+
+static void ipmi_register_bmc(int iface, struct device *dev)
+{
+	struct acpi_ipmi_device *ipmi_device, *temp;
+	struct pnp_dev *pnp_dev;
+	ipmi_user_t		user;
+	int err;
+	struct ipmi_smi_info smi_data;
+	acpi_handle handle;
+
+	err = ipmi_get_smi_info(iface, &smi_data);
+
+	if (err)
+		return;
+
+	if (smi_data.addr_src != SI_ACPI) {
+		put_device(smi_data.dev);
+		return;
+	}
+
+	handle = smi_data.addr_info.acpi_info.acpi_handle;
+
+	mutex_lock(&driver_data.ipmi_lock);
+	list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
+		/*
+		 * if the corresponding ACPI handle is already added
+		 * to the device list, don't add it again.
+		 */
+		if (temp->handle == handle)
+			goto out;
+	}
+
+	ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+
+	if (!ipmi_device)
+		goto out;
+
+	pnp_dev = to_pnp_dev(smi_data.dev);
+	ipmi_device->handle = handle;
+	ipmi_device->pnp_dev = pnp_dev;
+
+	err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+					ipmi_device, &user);
+	if (err) {
+		dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
+		kfree(ipmi_device);
+		goto out;
+	}
+	acpi_add_ipmi_device(ipmi_device);
+	ipmi_device->user_interface = user;
+	ipmi_device->ipmi_ifnum = iface;
+	mutex_unlock(&driver_data.ipmi_lock);
+	memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+	return;
+
+out:
+	mutex_unlock(&driver_data.ipmi_lock);
+	put_device(smi_data.dev);
+	return;
+}
+
+static void ipmi_bmc_gone(int iface)
+{
+	struct acpi_ipmi_device *ipmi_device, *temp;
+
+	mutex_lock(&driver_data.ipmi_lock);
+	list_for_each_entry_safe(ipmi_device, temp,
+				&driver_data.ipmi_devices, head) {
+		if (ipmi_device->ipmi_ifnum != iface)
+			continue;
+
+		acpi_remove_ipmi_device(ipmi_device);
+		put_device(ipmi_device->smi_data.dev);
+		kfree(ipmi_device);
+		break;
+	}
+	mutex_unlock(&driver_data.ipmi_lock);
+}
+/* --------------------------------------------------------------------------
+ *			Address Space Management
+ * -------------------------------------------------------------------------- */
+/*
+ * This is the IPMI opregion space handler.
+ * @function: indicates the read/write. In fact as the IPMI message is driven
+ * by command, only write is meaningful.
+ * @address: This contains the netfn/command of IPMI request message.
+ * @bits   : not used.
+ * @value  : it is an in/out parameter. It points to the IPMI message buffer.
+ *	     Before the IPMI message is sent, it represents the actual request
+ *	     IPMI message. After the IPMI message is finished, it represents
+ *	     the response IPMI message returned by IPMI command.
+ * @handler_context: IPMI device context.
+ */
+
+static acpi_status
+acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+		      u32 bits, acpi_integer *value,
+		      void *handler_context, void *region_context)
+{
+	struct acpi_ipmi_msg *tx_msg;
+	struct acpi_ipmi_device *ipmi_device = handler_context;
+	int err, rem_time;
+	acpi_status status;
+	/*
+	 * IPMI opregion message.
+	 * IPMI message is firstly written to the BMC and system software
+	 * can get the respsonse. So it is unmeaningful for the read access
+	 * of IPMI opregion.
+	 */
+	if ((function & ACPI_IO_MASK) == ACPI_READ)
+		return AE_TYPE;
+
+	if (!ipmi_device->user_interface)
+		return AE_NOT_EXIST;
+
+	tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
+	if (!tx_msg)
+		return AE_NO_MEMORY;
+
+	acpi_format_ipmi_msg(tx_msg, address, value);
+	mutex_lock(&ipmi_device->tx_msg_lock);
+	list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
+	mutex_unlock(&ipmi_device->tx_msg_lock);
+	err = ipmi_request_settime(ipmi_device->user_interface,
+					&tx_msg->addr,
+					tx_msg->tx_msgid,
+					&tx_msg->tx_message,
+					NULL, 0, 0, 0);
+	if (err) {
+		status = AE_ERROR;
+		goto end_label;
+	}
+	rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
+					IPMI_TIMEOUT);
+	acpi_format_ipmi_response(tx_msg, value, rem_time);
+	status = AE_OK;
+
+end_label:
+	mutex_lock(&ipmi_device->tx_msg_lock);
+	list_del(&tx_msg->head);
+	mutex_unlock(&ipmi_device->tx_msg_lock);
+	kfree(tx_msg);
+	return status;
+}
+
+static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
+{
+	if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+		return;
+
+	acpi_remove_address_space_handler(ipmi->handle,
+				ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
+
+	clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
+}
+
+static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
+{
+	acpi_status status;
+
+	if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+		return 0;
+
+	status = acpi_install_address_space_handler(ipmi->handle,
+						    ACPI_ADR_SPACE_IPMI,
+						    &acpi_ipmi_space_handler,
+						    NULL, ipmi);
+	if (ACPI_FAILURE(status)) {
+		struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+		dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
+			"handle\n");
+		return -EINVAL;
+	}
+	set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
+	return 0;
+}
+
+static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+{
+
+	INIT_LIST_HEAD(&ipmi_device->head);
+
+	mutex_init(&ipmi_device->tx_msg_lock);
+	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+	ipmi_install_space_handler(ipmi_device);
+
+	list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
+}
+
+static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+{
+	/*
+	 * If the IPMI user interface is created, it should be
+	 * destroyed.
+	 */
+	if (ipmi_device->user_interface) {
+		ipmi_destroy_user(ipmi_device->user_interface);
+		ipmi_device->user_interface = NULL;
+	}
+	/* flush the Tx_msg list */
+	if (!list_empty(&ipmi_device->tx_msg_list))
+		ipmi_flush_tx_msg(ipmi_device);
+
+	list_del(&ipmi_device->head);
+	ipmi_remove_space_handler(ipmi_device);
+}
+
+static int __init acpi_ipmi_init(void)
+{
+	int result = 0;
+
+	if (acpi_disabled)
+		return result;
+
+	mutex_init(&driver_data.ipmi_lock);
+
+	result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+
+	return result;
+}
+
+static void __exit acpi_ipmi_exit(void)
+{
+	struct acpi_ipmi_device *ipmi_device, *temp;
+
+	if (acpi_disabled)
+		return;
+
+	ipmi_smi_watcher_unregister(&driver_data.bmc_events);
+
+	/*
+	 * When one smi_watcher is unregistered, it is only deleted
+	 * from the smi_watcher list. But the smi_gone callback function
+	 * is not called. So explicitly uninstall the ACPI IPMI oregion
+	 * handler and free it.
+	 */
+	mutex_lock(&driver_data.ipmi_lock);
+	list_for_each_entry_safe(ipmi_device, temp,
+				&driver_data.ipmi_devices, head) {
+		acpi_remove_ipmi_device(ipmi_device);
+		put_device(ipmi_device->smi_data.dev);
+		kfree(ipmi_device);
+	}
+	mutex_unlock(&driver_data.ipmi_lock);
+}
+
+module_init(acpi_ipmi_init);
+module_exit(acpi_ipmi_exit);
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a7e1d1a..eec2ead 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@
 
 acpi-y += evevent.o  evregion.o  evsci.o    evxfevnt.o \
 	 evmisc.o   evrgnini.o  evxface.o  evxfregn.o \
-	 evgpe.o    evgpeblk.o evgpeinit.o  evgpeutil.o
+	 evgpe.o    evgpeblk.o evgpeinit.o  evgpeutil.o evxfgpe.o
 
 acpi-y += exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
 	 exconvrt.o  exfldio.o  exoparg1.o  exprep.o    exresop.o   exsystem.o\
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 3e50c74..e0ba17f 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index b17d8de..ab87396 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 72e9d5e..eb0b1f8 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 894a0ff..666271b 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index a6f99cc..41d247d 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -51,8 +51,6 @@
 
 acpi_status acpi_ev_install_xrupt_handlers(void);
 
-acpi_status acpi_ev_install_fadt_gpes(void);
-
 u32 acpi_ev_fixed_event_detect(void);
 
 /*
@@ -82,9 +80,9 @@
 
 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
 
-acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
 
-acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
 
 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
 						       u32 gpe_number);
@@ -93,6 +91,8 @@
 						     struct acpi_gpe_block_info
 						     *gpe_block);
 
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
 /*
  * evgpeblk - Upper-level GPE block support
  */
@@ -107,12 +107,13 @@
 acpi_status
 acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 			     struct acpi_gpe_block_info *gpe_block,
-			     void *ignored);
+			     void *context);
 
 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
 
 u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
+acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
+		     struct acpi_gpe_event_info *gpe_event_info,
 		     u32 gpe_number);
 
 /*
@@ -126,10 +127,6 @@
 acpi_ev_match_gpe_method(acpi_handle obj_handle,
 			 u32 level, void *context, void **return_value);
 
-acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
-			  u32 level, void *context, void **return_value);
-
 /*
  * evgpeutil - GPE utilities
  */
@@ -138,6 +135,10 @@
 
 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
 
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+		       struct acpi_gpe_block_info *gpe_block, void *context);
+
 struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
 
 acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ad88fca..82a1bd2 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -146,6 +146,9 @@
 
 extern u32 acpi_gbl_nesting_level;
 
+ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+
 /* Support for dynamic control method tracing mechanism */
 
 ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
@@ -225,8 +228,10 @@
  */
 ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock;	/* For GPE data structs and registers */
 ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock;	/* For ACPI H/W except GPE registers */
+ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
 #define acpi_gbl_gpe_lock	&_acpi_gbl_gpe_lock
 #define acpi_gbl_hardware_lock	&_acpi_gbl_hardware_lock
+#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
 
 /*****************************************************************************
  *
@@ -370,7 +375,9 @@
 ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
 ACPI_EXTERN struct acpi_gpe_block_info
 *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-ACPI_EXTERN u8 acpi_all_gpes_initialized;
+ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
+ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
+ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
 
 /*****************************************************************************
  *
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 167470a..e7213be 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -94,7 +94,7 @@
 			     struct acpi_gpe_register_info *gpe_register_info);
 
 acpi_status
-acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action);
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action);
 
 acpi_status
 acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 049e203..3731e1c 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 2ceb0c0..edc2586 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -408,17 +408,23 @@
 
 /* Dispatch info for each GPE -- either a method or handler, cannot be both */
 
-struct acpi_handler_info {
-	acpi_event_handler address;	/* Address of handler, if any */
+struct acpi_gpe_handler_info {
+	acpi_gpe_handler address;	/* Address of handler, if any */
 	void *context;		/* Context to be passed to handler */
 	struct acpi_namespace_node *method_node;	/* Method node for this GPE level (saved) */
-	u8 orig_flags;		/* Original misc info about this GPE */
-	u8 orig_enabled;	/* Set if the GPE was originally enabled */
+	u8 original_flags;      /* Original (pre-handler) GPE info */
+	u8 originally_enabled;  /* True if GPE was originally enabled */
+};
+
+struct acpi_gpe_notify_object {
+	struct acpi_namespace_node *node;
+	struct acpi_gpe_notify_object *next;
 };
 
 union acpi_gpe_dispatch_info {
 	struct acpi_namespace_node *method_node;	/* Method node for this GPE level */
-	struct acpi_handler_info *handler;
+	struct acpi_gpe_handler_info *handler;  /* Installed GPE handler */
+	struct acpi_gpe_notify_object device;   /* List of _PRW devices for implicit notify */
 };
 
 /*
@@ -458,7 +464,7 @@
 	u32 register_count;	/* Number of register pairs in block */
 	u16 gpe_count;		/* Number of individual GPEs in block */
 	u8 block_base_number;	/* Base GPE number for this block */
-	u8 initialized;         /* If set, the GPE block has been initialized */
+	u8 initialized;         /* TRUE if this block is initialized */
 };
 
 /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 8d5c9e0..b7491ee 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index d44d3bc..79a598c 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index bdbfaf22b..1055769 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -93,12 +93,10 @@
 
 #define AOPOBJ_AML_CONSTANT         0x01	/* Integer is an AML constant */
 #define AOPOBJ_STATIC_POINTER       0x02	/* Data is part of an ACPI table, don't delete */
-#define AOPOBJ_DATA_VALID           0x04	/* Object is intialized and data is valid */
+#define AOPOBJ_DATA_VALID           0x04	/* Object is initialized and data is valid */
 #define AOPOBJ_OBJECT_INITIALIZED   0x08	/* Region is initialized, _REG was run */
 #define AOPOBJ_SETUP_COMPLETE       0x10	/* Region setup is complete */
 #define AOPOBJ_INVALID              0x20	/* Host OS won't allow a Region address */
-#define AOPOBJ_MODULE_LEVEL         0x40	/* Method is actually module-level code */
-#define AOPOBJ_MODIFIED_NAMESPACE   0x80	/* Method modified the namespace */
 
 /******************************************************************************
  *
@@ -175,7 +173,7 @@
 };
 
 struct acpi_object_method {
-	ACPI_OBJECT_COMMON_HEADER u8 method_flags;
+	ACPI_OBJECT_COMMON_HEADER u8 info_flags;
 	u8 param_count;
 	u8 sync_level;
 	union acpi_operand_object *mutex;
@@ -183,13 +181,21 @@
 	union {
 		ACPI_INTERNAL_METHOD implementation;
 		union acpi_operand_object *handler;
-	} extra;
+	} dispatch;
 
 	u32 aml_length;
 	u8 thread_count;
 	acpi_owner_id owner_id;
 };
 
+/* Flags for info_flags field above */
+
+#define ACPI_METHOD_MODULE_LEVEL        0x01	/* Method is actually module-level code */
+#define ACPI_METHOD_INTERNAL_ONLY       0x02	/* Method is implemented internally (_OSI) */
+#define ACPI_METHOD_SERIALIZED          0x04	/* Method is serialized */
+#define ACPI_METHOD_SERIALIZED_PENDING  0x08	/* Method is to be marked serialized */
+#define ACPI_METHOD_MODIFIED_NAMESPACE  0x10	/* Method modified the namespace */
+
 /******************************************************************************
  *
  * Objects that can be notified.  All share a common notify_info area.
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 8c15ff4..bb2ccfa 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index d0bb0fd..5ea1e06 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 10998d3..94e73c9 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 528bcba..f08b55b 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 6e5dd97..1623b24 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 62a576e..967f081 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 72e4183..99c140d 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1f484ba..f4f0998 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -480,16 +480,10 @@
 	AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
 } AML_ACCESS_ATTRIBUTE;
 
-/* Bit fields in method_flags byte */
+/* Bit fields in the AML method_flags byte */
 
 #define AML_METHOD_ARG_COUNT        0x07
 #define AML_METHOD_SERIALIZED       0x08
 #define AML_METHOD_SYNC_LEVEL       0xF0
 
-/* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */
-
-#define AML_METHOD_INTERNAL_ONLY    0x01
-#define AML_METHOD_RESERVED1        0x02
-#define AML_METHOD_RESERVED2        0x04
-
 #endif				/* __AMLCODE_H__ */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 0e5798f..59122cd 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 347bee1..34be60c 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index cc4a38c..a7718bf 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index d94dd89..5d79775 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,6 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
-#include "amlcode.h"
 #include "acdispat.h"
 #include "acinterp.h"
 #include "acnamesp.h"
@@ -201,7 +200,7 @@
 	/*
 	 * If this method is serialized, we need to acquire the method mutex.
 	 */
-	if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
+	if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
 		/*
 		 * Create a mutex for the method if it is defined to be Serialized
 		 * and a mutex has not already been created. We defer the mutex creation
@@ -413,8 +412,9 @@
 
 	/* Invoke an internal method if necessary */
 
-	if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
-		status = obj_desc->method.extra.implementation(next_walk_state);
+	if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
+		status =
+		    obj_desc->method.dispatch.implementation(next_walk_state);
 		if (status == AE_OK) {
 			status = AE_CTRL_TERMINATE;
 		}
@@ -579,11 +579,14 @@
 
 		/*
 		 * Delete any namespace objects created anywhere within the
-		 * namespace by the execution of this method. Unless this method
-		 * is a module-level executable code method, in which case we
-		 * want make the objects permanent.
+		 * namespace by the execution of this method. Unless:
+		 * 1) This method is a module-level executable code method, in which
+		 *    case we want make the objects permanent.
+		 * 2) There are other threads executing the method, in which case we
+		 *    will wait until the last thread has completed.
 		 */
-		if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+		if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
+		    && (method_desc->method.thread_count == 1)) {
 
 			/* Delete any direct children of (created by) this method */
 
@@ -593,12 +596,17 @@
 			/*
 			 * Delete any objects that were created by this method
 			 * elsewhere in the namespace (if any were created).
+			 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
+			 * deletion such that we don't have to perform an entire
+			 * namespace walk for every control method execution.
 			 */
 			if (method_desc->method.
-			    flags & AOPOBJ_MODIFIED_NAMESPACE) {
+			    info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
 				acpi_ns_delete_namespace_by_owner(method_desc->
 								  method.
 								  owner_id);
+				method_desc->method.info_flags &=
+				    ~ACPI_METHOD_MODIFIED_NAMESPACE;
 			}
 		}
 	}
@@ -629,19 +637,43 @@
 		 * Serialized if it appears that the method is incorrectly written and
 		 * does not support multiple thread execution. The best example of this
 		 * is if such a method creates namespace objects and blocks. A second
-		 * thread will fail with an AE_ALREADY_EXISTS exception
+		 * thread will fail with an AE_ALREADY_EXISTS exception.
 		 *
 		 * This code is here because we must wait until the last thread exits
-		 * before creating the synchronization semaphore.
+		 * before marking the method as serialized.
 		 */
-		if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
-		    && (!method_desc->method.mutex)) {
-			(void)acpi_ds_create_method_mutex(method_desc);
+		if (method_desc->method.
+		    info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
+			if (walk_state) {
+				ACPI_INFO((AE_INFO,
+					   "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
+					   walk_state->method_node->name.
+					   ascii));
+			}
+
+			/*
+			 * Method tried to create an object twice and was marked as
+			 * "pending serialized". The probable cause is that the method
+			 * cannot handle reentrancy.
+			 *
+			 * The method was created as not_serialized, but it tried to create
+			 * a named object and then blocked, causing the second thread
+			 * entrance to begin and then fail. Workaround this problem by
+			 * marking the method permanently as Serialized when the last
+			 * thread exits here.
+			 */
+			method_desc->method.info_flags &=
+			    ~ACPI_METHOD_SERIALIZED_PENDING;
+			method_desc->method.info_flags |=
+			    ACPI_METHOD_SERIALIZED;
+			method_desc->method.sync_level = 0;
 		}
 
 		/* No more threads, we can free the owner_id */
 
-		if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+		if (!
+		    (method_desc->method.
+		     info_flags & ACPI_METHOD_MODULE_LEVEL)) {
 			acpi_ut_release_owner_id(&method_desc->method.owner_id);
 		}
 	}
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 8095306..905ce29 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 8e85f54..f42e17e 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 7c0e742..bbecf29 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 15135c2..2c477ce 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b0b5d0..fe40e4c 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 140a9d0..52566ff 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index d1e7017..76a661f 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 83155dd..a6c374e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c61c3039..d458b04 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -217,9 +217,17 @@
 		     status_bit_mask)
 		    && (fixed_enable & acpi_gbl_fixed_event_info[i].
 			enable_bit_mask)) {
+			/*
+			 * Found an active (signalled) event. Invoke global event
+			 * handler if present.
+			 */
+			acpi_fixed_event_count[i]++;
+			if (acpi_gbl_global_event_handler) {
+				acpi_gbl_global_event_handler
+				    (ACPI_EVENT_TYPE_FIXED, NULL, i,
+				     acpi_gbl_global_event_handler_context);
+			}
 
-			/* Found an active (signalled) event */
-			acpi_os_fixed_event_count(i);
 			int_status |= acpi_ev_fixed_event_dispatch(i);
 		}
 	}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index f226eac..f472521 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -52,6 +52,8 @@
 /* Local prototypes */
 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
 
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_update_gpe_enable_mask
@@ -102,7 +104,7 @@
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Clear the given GPE from stale events and enable it.
+ * DESCRIPTION: Clear a GPE of stale events and enable it.
  *
  ******************************************************************************/
 acpi_status
@@ -113,12 +115,13 @@
 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
 
 	/*
-	 * We will only allow a GPE to be enabled if it has either an
-	 * associated method (_Lxx/_Exx) or a handler. Otherwise, the
-	 * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
-	 * first time it fires.
+	 * We will only allow a GPE to be enabled if it has either an associated
+	 * method (_Lxx/_Exx) or a handler, or is using the implicit notify
+	 * feature. Otherwise, the GPE will be immediately disabled by
+	 * acpi_ev_gpe_dispatch the first time it fires.
 	 */
-	if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+	if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+	    ACPI_GPE_DISPATCH_NONE) {
 		return_ACPI_STATUS(AE_NO_HANDLER);
 	}
 
@@ -137,9 +140,9 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_raw_enable_gpe
+ * FUNCTION:    acpi_ev_add_gpe_reference
  *
- * PARAMETERS:  gpe_event_info  - GPE to enable
+ * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
  *
  * RETURN:      Status
  *
@@ -148,16 +151,21 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
 {
 	acpi_status status = AE_OK;
 
+	ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
+
 	if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
 		return_ACPI_STATUS(AE_LIMIT);
 	}
 
 	gpe_event_info->runtime_count++;
 	if (gpe_event_info->runtime_count == 1) {
+
+		/* Enable on first reference */
+
 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
 		if (ACPI_SUCCESS(status)) {
 			status = acpi_ev_enable_gpe(gpe_event_info);
@@ -173,9 +181,9 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_raw_disable_gpe
+ * FUNCTION:    acpi_ev_remove_gpe_reference
  *
- * PARAMETERS:  gpe_event_info  - GPE to disable
+ * PARAMETERS:  gpe_event_info          - Remove a reference to this GPE
  *
  * RETURN:      Status
  *
@@ -184,16 +192,21 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
 {
 	acpi_status status = AE_OK;
 
+	ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
+
 	if (!gpe_event_info->runtime_count) {
 		return_ACPI_STATUS(AE_LIMIT);
 	}
 
 	gpe_event_info->runtime_count--;
 	if (!gpe_event_info->runtime_count) {
+
+		/* Disable on last reference */
+
 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
 		if (ACPI_SUCCESS(status)) {
 			status = acpi_hw_low_set_gpe(gpe_event_info,
@@ -379,7 +392,7 @@
 			}
 
 			ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
-					  "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
+					  "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
 					  gpe_register_info->base_gpe_number,
 					  status_reg, enable_reg));
 
@@ -405,7 +418,9 @@
 					 * or method.
 					 */
 					int_status |=
-					    acpi_ev_gpe_dispatch(&gpe_block->
+					    acpi_ev_gpe_dispatch(gpe_block->
+								 node,
+								 &gpe_block->
 						event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
 				}
 			}
@@ -435,19 +450,29 @@
  *              an interrupt handler.
  *
  ******************************************************************************/
-static void acpi_ev_asynch_enable_gpe(void *context);
 
 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
 {
-	struct acpi_gpe_event_info *gpe_event_info = (void *)context;
+	struct acpi_gpe_event_info *gpe_event_info = context;
 	acpi_status status;
-	struct acpi_gpe_event_info local_gpe_event_info;
+	struct acpi_gpe_event_info *local_gpe_event_info;
 	struct acpi_evaluate_info *info;
+	struct acpi_gpe_notify_object *notify_object;
 
 	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 
+	/* Allocate a local GPE block */
+
+	local_gpe_event_info =
+	    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
+	if (!local_gpe_event_info) {
+		ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
+		return_VOID;
+	}
+
 	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
 	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(local_gpe_event_info);
 		return_VOID;
 	}
 
@@ -455,6 +480,7 @@
 
 	if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
 		status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+		ACPI_FREE(local_gpe_event_info);
 		return_VOID;
 	}
 
@@ -462,7 +488,7 @@
 	 * Take a snapshot of the GPE info for this level - we copy the info to
 	 * prevent a race condition with remove_handler/remove_block.
 	 */
-	ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
+	ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
 		    sizeof(struct acpi_gpe_event_info));
 
 	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -470,12 +496,34 @@
 		return_VOID;
 	}
 
-	/*
-	 * Must check for control method type dispatch one more time to avoid a
-	 * race with ev_gpe_install_handler
-	 */
-	if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
-	    ACPI_GPE_DISPATCH_METHOD) {
+	/* Do the correct dispatch - normal method or implicit notify */
+
+	switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+	case ACPI_GPE_DISPATCH_NOTIFY:
+
+		/*
+		 * Implicit notify.
+		 * Dispatch a DEVICE_WAKE notify to the appropriate handler.
+		 * NOTE: the request is queued for execution after this method
+		 * completes. The notify handlers are NOT invoked synchronously
+		 * from this thread -- because handlers may in turn run other
+		 * control methods.
+		 */
+		status = acpi_ev_queue_notify_request(
+				local_gpe_event_info->dispatch.device.node,
+				ACPI_NOTIFY_DEVICE_WAKE);
+
+		notify_object = local_gpe_event_info->dispatch.device.next;
+		while (ACPI_SUCCESS(status) && notify_object) {
+			status = acpi_ev_queue_notify_request(
+					notify_object->node,
+					ACPI_NOTIFY_DEVICE_WAKE);
+			notify_object = notify_object->next;
+		}
+
+		break;
+
+	case ACPI_GPE_DISPATCH_METHOD:
 
 		/* Allocate the evaluation information block */
 
@@ -488,7 +536,7 @@
 			 * control method that corresponds to this GPE
 			 */
 			info->prefix_node =
-			    local_gpe_event_info.dispatch.method_node;
+			    local_gpe_event_info->dispatch.method_node;
 			info->flags = ACPI_IGNORE_RETURN_VALUE;
 
 			status = acpi_ns_evaluate(info);
@@ -499,46 +547,98 @@
 			ACPI_EXCEPTION((AE_INFO, status,
 					"while evaluating GPE method [%4.4s]",
 					acpi_ut_get_node_name
-					(local_gpe_event_info.dispatch.
+					(local_gpe_event_info->dispatch.
 					 method_node)));
 		}
+
+		break;
+
+	default:
+		return_VOID;    /* Should never happen */
 	}
+
 	/* Defer enabling of GPE until all notify handlers are done */
-	acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
-				gpe_event_info);
+
+	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
+				 acpi_ev_asynch_enable_gpe,
+				 local_gpe_event_info);
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(local_gpe_event_info);
+	}
 	return_VOID;
 }
 
-static void acpi_ev_asynch_enable_gpe(void *context)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_asynch_enable_gpe
+ *
+ * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
+ *              Callback from acpi_os_execute
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
+ *              complete (i.e., finish execution of Notify)
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
 {
 	struct acpi_gpe_event_info *gpe_event_info = context;
+
+	(void)acpi_ev_finish_gpe(gpe_event_info);
+
+	ACPI_FREE(gpe_event_info);
+	return;
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_finish_gpe
+ *
+ * PARAMETERS:  gpe_event_info      - Info for this GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
+ *              of a GPE method or a synchronous or asynchronous GPE handler.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
 	acpi_status status;
+
 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
 	    ACPI_GPE_LEVEL_TRIGGERED) {
 		/*
-		 * GPE is level-triggered, we clear the GPE status bit after handling
-		 * the event.
+		 * GPE is level-triggered, we clear the GPE status bit after
+		 * handling the event.
 		 */
 		status = acpi_hw_clear_gpe(gpe_event_info);
 		if (ACPI_FAILURE(status)) {
-			return_VOID;
+			return (status);
 		}
 	}
 
 	/*
-	 * Enable this GPE, conditionally. This means that the GPE will only be
-	 * physically enabled if the enable_for_run bit is set in the event_info
+	 * Enable this GPE, conditionally. This means that the GPE will
+	 * only be physically enabled if the enable_for_run bit is set
+	 * in the event_info.
 	 */
-	(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
-
-	return_VOID;
+	(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
+	return (AE_OK);
 }
 
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_gpe_dispatch
  *
- * PARAMETERS:  gpe_event_info  - Info for this GPE
+ * PARAMETERS:  gpe_device      - Device node. NULL for GPE0/GPE1
+ *              gpe_event_info  - Info for this GPE
  *              gpe_number      - Number relative to the parent GPE block
  *
  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
@@ -551,13 +651,22 @@
  ******************************************************************************/
 
 u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
+		    struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
 {
 	acpi_status status;
+	u32 return_value;
 
 	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
 
-	acpi_os_gpe_count(gpe_number);
+	/* Invoke global event handler if present */
+
+	acpi_gpe_count++;
+	if (acpi_gbl_global_event_handler) {
+		acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
+					      gpe_number,
+					      acpi_gbl_global_event_handler_context);
+	}
 
 	/*
 	 * If edge-triggered, clear the GPE status bit now. Note that
@@ -568,59 +677,55 @@
 		status = acpi_hw_clear_gpe(gpe_event_info);
 		if (ACPI_FAILURE(status)) {
 			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to clear GPE[0x%2X]",
-					gpe_number));
+					"Unable to clear GPE%02X", gpe_number));
 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
 		}
 	}
 
 	/*
-	 * Dispatch the GPE to either an installed handler, or the control method
-	 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
-	 * it and do not attempt to run the method. If there is neither a handler
-	 * nor a method, we disable this GPE to prevent further such pointless
-	 * events from firing.
+	 * Always disable the GPE so that it does not keep firing before
+	 * any asynchronous activity completes (either from the execution
+	 * of a GPE method or an asynchronous GPE handler.)
+	 *
+	 * If there is no handler or method to run, just disable the
+	 * GPE and leave it disabled permanently to prevent further such
+	 * pointless events from firing.
+	 */
+	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Unable to disable GPE%02X", gpe_number));
+		return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+	}
+
+	/*
+	 * Dispatch the GPE to either an installed handler or the control
+	 * method associated with this GPE (_Lxx or _Exx). If a handler
+	 * exists, we invoke it and do not attempt to run the method.
+	 * If there is neither a handler nor a method, leave the GPE
+	 * disabled.
 	 */
 	switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
 	case ACPI_GPE_DISPATCH_HANDLER:
 
-		/*
-		 * Invoke the installed handler (at interrupt level)
-		 * Ignore return status for now.
-		 * TBD: leave GPE disabled on error?
-		 */
-		(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
-								dispatch.
-								handler->
-								context);
+		/* Invoke the installed handler (at interrupt level) */
 
-		/* It is now safe to clear level-triggered events. */
+		return_value =
+		    gpe_event_info->dispatch.handler->address(gpe_device,
+							      gpe_number,
+							      gpe_event_info->
+							      dispatch.handler->
+							      context);
 
-		if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
-		    ACPI_GPE_LEVEL_TRIGGERED) {
-			status = acpi_hw_clear_gpe(gpe_event_info);
-			if (ACPI_FAILURE(status)) {
-				ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to clear GPE[0x%2X]",
-						gpe_number));
-				return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-			}
+		/* If requested, clear (if level-triggered) and reenable the GPE */
+
+		if (return_value & ACPI_REENABLE_GPE) {
+			(void)acpi_ev_finish_gpe(gpe_event_info);
 		}
 		break;
 
 	case ACPI_GPE_DISPATCH_METHOD:
-
-		/*
-		 * Disable the GPE, so it doesn't keep firing before the method has a
-		 * chance to run (it runs asynchronously with interrupts enabled).
-		 */
-		status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to disable GPE[0x%2X]",
-					gpe_number));
-			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-		}
+	case ACPI_GPE_DISPATCH_NOTIFY:
 
 		/*
 		 * Execute the method associated with the GPE
@@ -631,7 +736,7 @@
 					 gpe_event_info);
 		if (ACPI_FAILURE(status)) {
 			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to queue handler for GPE[0x%2X] - event disabled",
+					"Unable to queue handler for GPE%2X - event disabled",
 					gpe_number));
 		}
 		break;
@@ -644,20 +749,9 @@
 		 * a GPE to be enabled if it has no handler or method.
 		 */
 		ACPI_ERROR((AE_INFO,
-			    "No handler or method for GPE[0x%2X], disabling event",
+			    "No handler or method for GPE%02X, disabling event",
 			    gpe_number));
 
-		/*
-		 * Disable the GPE. The GPE will remain disabled a handler
-		 * is installed or ACPICA is restarted.
-		 */
-		status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to disable GPE[0x%2X]",
-					gpe_number));
-			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-		}
 		break;
 	}
 
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 020add3..ca2c41a 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -361,9 +361,9 @@
 
 	gpe_block->node = gpe_device;
 	gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
+	gpe_block->initialized = FALSE;
 	gpe_block->register_count = register_count;
 	gpe_block->block_base_number = gpe_block_base_number;
-	gpe_block->initialized = FALSE;
 
 	ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
 		    sizeof(struct acpi_generic_address));
@@ -386,7 +386,7 @@
 		return_ACPI_STATUS(status);
 	}
 
-	acpi_all_gpes_initialized = FALSE;
+	acpi_gbl_all_gpes_initialized = FALSE;
 
 	/* Find all GPE methods (_Lxx or_Exx) for this block */
 
@@ -423,14 +423,12 @@
  *
  * FUNCTION:    acpi_ev_initialize_gpe_block
  *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE block
- *              gpe_block           - Gpe Block info
+ * PARAMETERS:  acpi_gpe_callback
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Initialize and enable a GPE block. First find and run any
- *              _PRT methods associated with the block, then enable the
- *              appropriate GPEs.
+ * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
+ *              associated methods.
  *              Note: Assumes namespace is locked.
  *
  ******************************************************************************/
@@ -450,8 +448,8 @@
 	ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
 
 	/*
-	 * Ignore a null GPE block (e.g., if no GPE block 1 exists) and
-	 * GPE blocks that have been initialized already.
+	 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
+	 * any GPE blocks that have been initialized already.
 	 */
 	if (!gpe_block || gpe_block->initialized) {
 		return_ACPI_STATUS(AE_OK);
@@ -459,8 +457,8 @@
 
 	/*
 	 * Enable all GPEs that have a corresponding method and have the
-	 * ACPI_GPE_CAN_WAKE flag unset.  Any other GPEs within this block must
-	 * be enabled via the acpi_enable_gpe() interface.
+	 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
+	 * must be enabled via the acpi_enable_gpe() interface.
 	 */
 	gpe_enabled_count = 0;
 
@@ -472,14 +470,19 @@
 			gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
 			gpe_event_info = &gpe_block->event_info[gpe_index];
 
-			/* Ignore GPEs that have no corresponding _Lxx/_Exx method */
-
-			if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)
+			/*
+			 * Ignore GPEs that have no corresponding _Lxx/_Exx method
+			 * and GPEs that are used to wake the system
+			 */
+			if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+			     ACPI_GPE_DISPATCH_NONE)
+			    || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+				== ACPI_GPE_DISPATCH_HANDLER)
 			    || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
 				continue;
 			}
 
-			status = acpi_raw_enable_gpe(gpe_event_info);
+			status = acpi_ev_add_gpe_reference(gpe_event_info);
 			if (ACPI_FAILURE(status)) {
 				ACPI_EXCEPTION((AE_INFO, status,
 					"Could not enable GPE 0x%02X",
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 4c8dea5..ce9aa9f 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -45,11 +45,27 @@
 #include "accommon.h"
 #include "acevents.h"
 #include "acnamesp.h"
-#include "acinterp.h"
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evgpeinit")
 
+/*
+ * Note: History of _PRW support in ACPICA
+ *
+ * Originally (2000 - 2010), the GPE initialization code performed a walk of
+ * the entire namespace to execute the _PRW methods and detect all GPEs
+ * capable of waking the system.
+ *
+ * As of 10/2010, the _PRW method execution has been removed since it is
+ * actually unnecessary. The host OS must in fact execute all _PRW methods
+ * in order to identify the device/power-resource dependencies. We now put
+ * the onus on the host OS to identify the wake GPEs as part of this process
+ * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
+ * not only reduces the complexity of the ACPICA initialization code, but in
+ * some cases (on systems with very large namespaces) it should reduce the
+ * kernel boot time as well.
+ */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_gpe_initialize
@@ -222,7 +238,7 @@
 	acpi_status status = AE_OK;
 
 	/*
-	 * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
+	 * Find any _Lxx/_Exx GPE methods that have just been loaded.
 	 *
 	 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately
 	 * enabled.
@@ -235,9 +251,9 @@
 		return;
 	}
 
+	walk_info.count = 0;
 	walk_info.owner_id = table_owner_id;
 	walk_info.execute_by_owner_id = TRUE;
-	walk_info.count = 0;
 
 	/* Walk the interrupt level descriptor list */
 
@@ -298,7 +314,7 @@
  *                  xx     - is the GPE number [in HEX]
  *
  * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
- *    with that owner.
+ * with that owner.
  *
  ******************************************************************************/
 
@@ -415,6 +431,7 @@
 	 * Add the GPE information from above to the gpe_event_info block for
 	 * use during dispatch of this GPE.
 	 */
+	gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
 	gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
 	gpe_event_info->dispatch.method_node = method_node;
 
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 19a0e51..80a81d0 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -154,6 +154,45 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_ev_get_gpe_device
+ *
+ * PARAMETERS:  GPE_WALK_CALLBACK
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
+ *              block device. NULL if the GPE is one of the FADT-defined GPEs.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+		       struct acpi_gpe_block_info *gpe_block, void *context)
+{
+	struct acpi_gpe_device_info *info = context;
+
+	/* Increment Index by the number of GPEs in this block */
+
+	info->next_block_base_index += gpe_block->gpe_count;
+
+	if (info->index < info->next_block_base_index) {
+		/*
+		 * The GPE index is within this block, get the node. Leave the node
+		 * NULL for the FADT-defined GPEs
+		 */
+		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
+			info->gpe_device = gpe_block->node;
+		}
+
+		info->status = AE_OK;
+		return (AE_CTRL_END);
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
  *
  * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index fcaed9f..7dc8094 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -284,41 +284,39 @@
  * RETURN:      ACPI_INTERRUPT_HANDLED
  *
  * DESCRIPTION: Invoked directly from the SCI handler when a global lock
- *              release interrupt occurs. Attempt to acquire the global lock,
- *              if successful, signal the thread waiting for the lock.
+ *              release interrupt occurs.  If there's a thread waiting for
+ *              the global lock, signal it.
  *
  * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
  * this is not possible for some reason, a separate thread will have to be
  * scheduled to do this.
  *
  ******************************************************************************/
+static u8 acpi_ev_global_lock_pending;
 
 static u32 acpi_ev_global_lock_handler(void *context)
 {
-	u8 acquired = FALSE;
+	acpi_status status;
+	acpi_cpu_flags flags;
 
-	/*
-	 * Attempt to get the lock.
-	 *
-	 * If we don't get it now, it will be marked pending and we will
-	 * take another interrupt when it becomes free.
-	 */
-	ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
-	if (acquired) {
+	flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
 
-		/* Got the lock, now wake all threads waiting for it */
-
-		acpi_gbl_global_lock_acquired = TRUE;
-		/* Send a unit to the semaphore */
-
-		if (ACPI_FAILURE
-		    (acpi_os_signal_semaphore
-		     (acpi_gbl_global_lock_semaphore, 1))) {
-			ACPI_ERROR((AE_INFO,
-				    "Could not signal Global Lock semaphore"));
-		}
+	if (!acpi_ev_global_lock_pending) {
+		goto out;
 	}
 
+	/* Send a unit to the semaphore */
+
+	status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
+	}
+
+	acpi_ev_global_lock_pending = FALSE;
+
+ out:
+	acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
+
 	return (ACPI_INTERRUPT_HANDLED);
 }
 
@@ -415,6 +413,7 @@
 
 acpi_status acpi_ev_acquire_global_lock(u16 timeout)
 {
+	acpi_cpu_flags flags;
 	acpi_status status = AE_OK;
 	u8 acquired = FALSE;
 
@@ -467,32 +466,47 @@
 		return_ACPI_STATUS(AE_OK);
 	}
 
-	/* Attempt to acquire the actual hardware lock */
+	flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
 
-	ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
-	if (acquired) {
+	do {
 
-		/* We got the lock */
+		/* Attempt to acquire the actual hardware lock */
 
+		ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+		if (acquired) {
+			acpi_gbl_global_lock_acquired = TRUE;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+					  "Acquired hardware Global Lock\n"));
+			break;
+		}
+
+		acpi_ev_global_lock_pending = TRUE;
+
+		acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
+
+		/*
+		 * Did not get the lock. The pending bit was set above, and we
+		 * must wait until we get the global lock released interrupt.
+		 */
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Acquired hardware Global Lock\n"));
+				  "Waiting for hardware Global Lock\n"));
 
-		acpi_gbl_global_lock_acquired = TRUE;
-		return_ACPI_STATUS(AE_OK);
-	}
+		/*
+		 * Wait for handshake with the global lock interrupt handler.
+		 * This interface releases the interpreter if we must wait.
+		 */
+		status = acpi_ex_system_wait_semaphore(
+						acpi_gbl_global_lock_semaphore,
+						ACPI_WAIT_FOREVER);
 
-	/*
-	 * Did not get the lock. The pending bit was set above, and we must now
-	 * wait until we get the global lock released interrupt.
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
+		flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
 
-	/*
-	 * Wait for handshake with the global lock interrupt handler.
-	 * This interface releases the interpreter if we must wait.
-	 */
-	status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
-					       ACPI_WAIT_FOREVER);
+	} while (ACPI_SUCCESS(status));
+
+	acpi_ev_global_lock_pending = FALSE;
+
+	acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
 
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 98fd210..785a5ee 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 0b47a6d..9659cee 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -590,9 +590,9 @@
 				 * See acpi_ns_exec_module_code
 				 */
 				if (obj_desc->method.
-				    flags & AOPOBJ_MODULE_LEVEL) {
+				    info_flags & ACPI_METHOD_MODULE_LEVEL) {
 					handler_obj =
-					    obj_desc->method.extra.handler;
+					    obj_desc->method.dispatch.handler;
 				}
 				break;
 
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 8dfbaa9..2ebd40e 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 36af222..e114140 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,57 @@
 
 ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
 #endif				/*  ACPI_FUTURE_USAGE  */
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_global_event_handler
+ *
+ * PARAMETERS:  Handler         - Pointer to the global event handler function
+ *              Context         - Value passed to the handler on each event
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function. The global handler
+ *              is invoked upon each incoming GPE and Fixed Event. It is
+ *              invoked at interrupt level at the time of the event dispatch.
+ *              Can be used to update event counters, etc.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
+
+	/* Parameter validation */
+
+	if (!handler) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Don't allow two handlers. */
+
+	if (acpi_gbl_global_event_handler) {
+		status = AE_ALREADY_EXISTS;
+		goto cleanup;
+	}
+
+	acpi_gbl_global_event_handler = handler;
+	acpi_gbl_global_event_handler_context = context;
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_install_fixed_event_handler
@@ -671,10 +722,10 @@
 acpi_status
 acpi_install_gpe_handler(acpi_handle gpe_device,
 			 u32 gpe_number,
-			 u32 type, acpi_event_handler address, void *context)
+			 u32 type, acpi_gpe_handler address, void *context)
 {
 	struct acpi_gpe_event_info *gpe_event_info;
-	struct acpi_handler_info *handler;
+	struct acpi_gpe_handler_info *handler;
 	acpi_status status;
 	acpi_cpu_flags flags;
 
@@ -693,7 +744,7 @@
 
 	/* Allocate memory for the handler object */
 
-	handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
+	handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
 	if (!handler) {
 		status = AE_NO_MEMORY;
 		goto unlock_and_exit;
@@ -722,7 +773,7 @@
 	handler->address = address;
 	handler->context = context;
 	handler->method_node = gpe_event_info->dispatch.method_node;
-	handler->orig_flags = gpe_event_info->flags &
+	handler->original_flags = gpe_event_info->flags &
 			(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
 
 	/*
@@ -731,10 +782,10 @@
 	 * disabled now to avoid spurious execution of the handler.
 	 */
 
-	if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
+	if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
 	    && gpe_event_info->runtime_count) {
-		handler->orig_enabled = 1;
-		(void)acpi_raw_disable_gpe(gpe_event_info);
+		handler->originally_enabled = 1;
+		(void)acpi_ev_remove_gpe_reference(gpe_event_info);
 	}
 
 	/* Install the handler */
@@ -777,10 +828,10 @@
  ******************************************************************************/
 acpi_status
 acpi_remove_gpe_handler(acpi_handle gpe_device,
-			u32 gpe_number, acpi_event_handler address)
+			u32 gpe_number, acpi_gpe_handler address)
 {
 	struct acpi_gpe_event_info *gpe_event_info;
-	struct acpi_handler_info *handler;
+	struct acpi_gpe_handler_info *handler;
 	acpi_status status;
 	acpi_cpu_flags flags;
 
@@ -835,7 +886,7 @@
 	gpe_event_info->dispatch.method_node = handler->method_node;
 	gpe_event_info->flags &=
 		~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
-	gpe_event_info->flags |= handler->orig_flags;
+	gpe_event_info->flags |= handler->original_flags;
 
 	/*
 	 * If the GPE was previously associated with a method and it was
@@ -843,9 +894,9 @@
 	 * post-initialization configuration.
 	 */
 
-	if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
-	    && handler->orig_enabled)
-		(void)acpi_raw_enable_gpe(gpe_event_info);
+	if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
+	    && handler->originally_enabled)
+		(void)acpi_ev_add_gpe_reference(gpe_event_info);
 
 	/* Now we can free the handler object */
 
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index a1dabe3..c57b5c7 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -43,18 +43,11 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
-#include "acevents.h"
-#include "acnamesp.h"
 #include "actables.h"
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evxfevnt")
 
-/* Local prototypes */
-static acpi_status
-acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-		       struct acpi_gpe_block_info *gpe_block, void *context);
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_enable
@@ -213,185 +206,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_gpe_wakeup
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *              Action          - Enable or Disable
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit.
- *
- ******************************************************************************/
-acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-	struct acpi_gpe_register_info *gpe_register_info;
-	acpi_cpu_flags flags;
-	u32 register_bit;
-
-	ACPI_FUNCTION_TRACE(acpi_gpe_wakeup);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	gpe_register_info = gpe_event_info->register_info;
-	if (!gpe_register_info) {
-		status = AE_NOT_EXIST;
-		goto unlock_and_exit;
-	}
-
-	register_bit =
-	    acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
-
-	/* Perform the action */
-
-	switch (action) {
-	case ACPI_GPE_ENABLE:
-		ACPI_SET_BIT(gpe_register_info->enable_for_wake,
-			     (u8)register_bit);
-		break;
-
-	case ACPI_GPE_DISABLE:
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
-			       (u8)register_bit);
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
-		status = AE_BAD_PARAMETER;
-		break;
-	}
-
-unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enable_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
- *              hardware-enabled.
- *
- ******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_BAD_PARAMETER;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_enable_gpe);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (gpe_event_info) {
-		status = acpi_raw_enable_gpe(gpe_event_info);
-	}
-
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_disable_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a reference to a GPE. When the last reference is
- *              removed, only then is the GPE disabled (for runtime GPEs), or
- *              the GPE mask bit disabled (for wake GPEs)
- *
- ******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_BAD_PARAMETER;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_disable_gpe);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (gpe_event_info) {
-		status = acpi_raw_disable_gpe(gpe_event_info) ;
-	}
-
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_gpe_can_wake
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE.  If the GPE
- *              has a corresponding method and is currently enabled, disable it
- *              (GPEs with corresponding methods are enabled unconditionally
- *              during initialization, but GPEs that can wake up are expected
- *              to be initially disabled).
- *
- ******************************************************************************/
-acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_gpe_can_wake);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (gpe_event_info) {
-		gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
-	} else {
-		status = AE_BAD_PARAMETER;
-	}
-
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake)
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_disable_event
  *
  * PARAMETERS:  Event           - The fixed eventto be enabled
@@ -483,44 +297,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_clear_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Clear an ACPI event (general purpose)
- *
- ******************************************************************************/
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_clear_gpe);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	status = acpi_hw_clear_gpe(gpe_event_info);
-
-      unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
-/*******************************************************************************
- *
  * FUNCTION:    acpi_get_event_status
  *
  * PARAMETERS:  Event           - The fixed event
@@ -575,379 +351,3 @@
 }
 
 ACPI_EXPORT_SYMBOL(acpi_get_event_status)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_gpe_status
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *              event_status    - Where the current status of the event will
- *                                be returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get status of an event (general purpose)
- *
- ******************************************************************************/
-acpi_status
-acpi_get_gpe_status(acpi_handle gpe_device,
-		    u32 gpe_number, acpi_event_status *event_status)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Obtain status on the requested GPE number */
-
-	status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
-
-	if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
-		*event_status |= ACPI_EVENT_FLAG_HANDLE;
-
-      unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
- *              gpe_block_address   - Address and space_iD
- *              register_count      - Number of GPE register pairs in the block
- *              interrupt_number    - H/W interrupt for the block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create and Install a block of GPE registers
- *
- ******************************************************************************/
-acpi_status
-acpi_install_gpe_block(acpi_handle gpe_device,
-		       struct acpi_generic_address *gpe_block_address,
-		       u32 register_count, u32 interrupt_number)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *obj_desc;
-	struct acpi_namespace_node *node;
-	struct acpi_gpe_block_info *gpe_block;
-
-	ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
-
-	if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	node = acpi_ns_validate_handle(gpe_device);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/*
-	 * For user-installed GPE Block Devices, the gpe_block_base_number
-	 * is always zero
-	 */
-	status =
-	    acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
-				     interrupt_number, &gpe_block);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Install block in the device_object attached to the node */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-
-		/*
-		 * No object, create a new one (Device nodes do not always have
-		 * an attached object)
-		 */
-		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
-		if (!obj_desc) {
-			status = AE_NO_MEMORY;
-			goto unlock_and_exit;
-		}
-
-		status =
-		    acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
-
-		/* Remove local reference to the object */
-
-		acpi_ut_remove_reference(obj_desc);
-
-		if (ACPI_FAILURE(status)) {
-			goto unlock_and_exit;
-		}
-	}
-
-	/* Now install the GPE block in the device_object */
-
-	obj_desc->device.gpe_block = gpe_block;
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a previously installed block of GPE registers
- *
- ******************************************************************************/
-acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
-
-	if (!gpe_device) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	node = acpi_ns_validate_handle(gpe_device);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Get the device_object attached to the node */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc || !obj_desc->device.gpe_block) {
-		return_ACPI_STATUS(AE_NULL_OBJECT);
-	}
-
-	/* Delete the GPE block (but not the device_object) */
-
-	status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
-	if (ACPI_SUCCESS(status)) {
-		obj_desc->device.gpe_block = NULL;
-	}
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_gpe_device
- *
- * PARAMETERS:  Index               - System GPE index (0-current_gpe_count)
- *              gpe_device          - Where the parent GPE Device is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
- *              gpe device indicates that the gpe number is contained in one of
- *              the FADT-defined gpe blocks. Otherwise, the GPE block device.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
-{
-	struct acpi_gpe_device_info info;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
-
-	if (!gpe_device) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (index >= acpi_current_gpe_count) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	/* Setup and walk the GPE list */
-
-	info.index = index;
-	info.status = AE_NOT_EXIST;
-	info.gpe_device = NULL;
-	info.next_block_base_index = 0;
-
-	status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	*gpe_device = info.gpe_device;
-	return_ACPI_STATUS(info.status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_get_gpe_device
- *
- * PARAMETERS:  GPE_WALK_CALLBACK
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
- *              block device. NULL if the GPE is one of the FADT-defined GPEs.
- *
- ******************************************************************************/
-static acpi_status
-acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-		       struct acpi_gpe_block_info *gpe_block, void *context)
-{
-	struct acpi_gpe_device_info *info = context;
-
-	/* Increment Index by the number of GPEs in this block */
-
-	info->next_block_base_index += gpe_block->gpe_count;
-
-	if (info->index < info->next_block_base_index) {
-		/*
-		 * The GPE index is within this block, get the node. Leave the node
-		 * NULL for the FADT-defined GPEs
-		 */
-		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
-			info->gpe_device = gpe_block->node;
-		}
-
-		info->status = AE_OK;
-		return (AE_CTRL_END);
-	}
-
-	return (AE_OK);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_disable_all_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_disable_all_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_hw_disable_all_gpes();
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_enable_all_runtime_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_enable_all_runtime_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_hw_enable_all_runtime_gpes();
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_update_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and
- *              are not pointed to by any device _PRW methods indicating that
- *              these GPEs are generally intended for system or device wakeup
- *              (such GPEs have to be enabled directly when the devices whose
- *              _PRW methods point to them are set up for wakeup signaling).
- *
- ******************************************************************************/
-
-acpi_status acpi_update_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_update_gpes);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	} else if (acpi_all_gpes_initialized) {
-		goto unlock;
-	}
-
-	status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
-	if (ACPI_SUCCESS(status)) {
-		acpi_all_gpes_initialized = TRUE;
-	}
-
-unlock:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
new file mode 100644
index 0000000..52aaff3
--- /dev/null
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -0,0 +1,696 @@
+/******************************************************************************
+ *
+ * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evxfgpe")
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_update_all_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Complete GPE initialization and enable all GPEs that have
+ *              associated _Lxx or _Exx methods and are not pointed to by any
+ *              device _PRW methods (this indicates that these GPEs are
+ *              generally intended for system or device wakeup. Such GPEs
+ *              have to be enabled directly when the devices whose _PRW
+ *              methods point to them are set up for wakeup signaling.)
+ *
+ * NOTE: Should be called after any GPEs are added to the system. Primarily,
+ * after the system _PRW methods have been run, but also after a GPE Block
+ * Device has been added or if any new GPE methods have been added via a
+ * dynamic table load.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_update_all_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_update_all_gpes);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (acpi_gbl_all_gpes_initialized) {
+		goto unlock_and_exit;
+	}
+
+	status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
+	if (ACPI_SUCCESS(status)) {
+		acpi_gbl_all_gpes_initialized = TRUE;
+	}
+
+unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
+ *              hardware-enabled.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+	struct acpi_gpe_event_info *gpe_event_info;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (gpe_event_info) {
+		status = acpi_ev_add_gpe_reference(gpe_event_info);
+	}
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_disable_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a reference to a GPE. When the last reference is
+ *              removed, only then is the GPE disabled (for runtime GPEs), or
+ *              the GPE mask bit disabled (for wake GPEs)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+	struct acpi_gpe_event_info *gpe_event_info;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_disable_gpe);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (gpe_event_info) {
+		status = acpi_ev_remove_gpe_reference(gpe_event_info) ;
+	}
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_setup_gpe_for_wake
+ *
+ * PARAMETERS:  wake_device         - Device associated with the GPE (via _PRW)
+ *              gpe_device          - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number          - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Mark a GPE as having the ability to wake the system. This
+ *              interface is intended to be used as the host executes the
+ *              _PRW methods (Power Resources for Wake) in the system tables.
+ *              Each _PRW appears under a Device Object (The wake_device), and
+ *              contains the info for the wake GPE associated with the
+ *              wake_device.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_setup_gpe_for_wake(acpi_handle wake_device,
+			acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+	struct acpi_gpe_event_info *gpe_event_info;
+	struct acpi_namespace_node *device_node;
+	struct acpi_gpe_notify_object *notify_object;
+	acpi_cpu_flags flags;
+	u8 gpe_dispatch_mask;
+
+	ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
+
+	/* Parameter Validation */
+
+	if (!wake_device) {
+		/*
+		 * By forcing wake_device to be valid, we automatically enable the
+		 * implicit notify feature on all hosts.
+		 */
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		goto unlock_and_exit;
+	}
+
+	if (wake_device == ACPI_ROOT_OBJECT) {
+		goto out;
+	}
+
+	/*
+	 * If there is no method or handler for this GPE, then the
+	 * wake_device will be notified whenever this GPE fires (aka
+	 * "implicit notify") Note: The GPE is assumed to be
+	 * level-triggered (for windows compatibility).
+	 */
+	gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
+	if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
+	    && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
+		goto out;
+	}
+
+	/* Validate wake_device is of type Device */
+
+	device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+	if (device_node->type != ACPI_TYPE_DEVICE) {
+		goto unlock_and_exit;
+	}
+
+	if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
+		gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
+					 ACPI_GPE_LEVEL_TRIGGERED);
+		gpe_event_info->dispatch.device.node = device_node;
+		gpe_event_info->dispatch.device.next = NULL;
+	} else {
+		/* There are multiple devices to notify implicitly. */
+
+		notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
+		if (!notify_object) {
+			status = AE_NO_MEMORY;
+			goto unlock_and_exit;
+		}
+
+		notify_object->node = device_node;
+		notify_object->next = gpe_event_info->dispatch.device.next;
+		gpe_event_info->dispatch.device.next = notify_object;
+	}
+
+ out:
+	gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+	status = AE_OK;
+
+ unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_gpe_wake_mask
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *              Action          - Enable or Disable
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must
+ *              already be marked as a WAKE GPE.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
+{
+	acpi_status status = AE_OK;
+	struct acpi_gpe_event_info *gpe_event_info;
+	struct acpi_gpe_register_info *gpe_register_info;
+	acpi_cpu_flags flags;
+	u32 register_bit;
+
+	ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/*
+	 * Ensure that we have a valid GPE number and that this GPE is in
+	 * fact a wake GPE
+	 */
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+		status = AE_TYPE;
+		goto unlock_and_exit;
+	}
+
+	gpe_register_info = gpe_event_info->register_info;
+	if (!gpe_register_info) {
+		status = AE_NOT_EXIST;
+		goto unlock_and_exit;
+	}
+
+	register_bit =
+	    acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+
+	/* Perform the action */
+
+	switch (action) {
+	case ACPI_GPE_ENABLE:
+		ACPI_SET_BIT(gpe_register_info->enable_for_wake,
+			     (u8)register_bit);
+		break;
+
+	case ACPI_GPE_DISABLE:
+		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+			       (u8)register_bit);
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
+		status = AE_BAD_PARAMETER;
+		break;
+	}
+
+unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_clear_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_OK;
+	struct acpi_gpe_event_info *gpe_event_info;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_clear_gpe);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	status = acpi_hw_clear_gpe(gpe_event_info);
+
+      unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_gpe_status
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *              event_status    - Where the current status of the event will
+ *                                be returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_status(acpi_handle gpe_device,
+		    u32 gpe_number, acpi_event_status *event_status)
+{
+	acpi_status status = AE_OK;
+	struct acpi_gpe_event_info *gpe_event_info;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Obtain status on the requested GPE number */
+
+	status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
+
+	if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+		*event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+      unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_disable_all_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_disable_all_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_disable_all_gpes();
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_all_runtime_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_all_runtime_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_enable_all_runtime_gpes();
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
+ *              gpe_block_address   - Address and space_iD
+ *              register_count      - Number of GPE register pairs in the block
+ *              interrupt_number    - H/W interrupt for the block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not
+ *              enabled here.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_block(acpi_handle gpe_device,
+		       struct acpi_generic_address *gpe_block_address,
+		       u32 register_count, u32 interrupt_number)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+	struct acpi_namespace_node *node;
+	struct acpi_gpe_block_info *gpe_block;
+
+	ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
+
+	if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	node = acpi_ns_validate_handle(gpe_device);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * For user-installed GPE Block Devices, the gpe_block_base_number
+	 * is always zero
+	 */
+	status =
+	    acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
+				     interrupt_number, &gpe_block);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/* Install block in the device_object attached to the node */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+
+		/*
+		 * No object, create a new one (Device nodes do not always have
+		 * an attached object)
+		 */
+		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
+		if (!obj_desc) {
+			status = AE_NO_MEMORY;
+			goto unlock_and_exit;
+		}
+
+		status =
+		    acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
+
+		/* Remove local reference to the object */
+
+		acpi_ut_remove_reference(obj_desc);
+
+		if (ACPI_FAILURE(status)) {
+			goto unlock_and_exit;
+		}
+	}
+
+	/* Now install the GPE block in the device_object */
+
+	obj_desc->device.gpe_block = gpe_block;
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a previously installed block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
+
+	if (!gpe_device) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	node = acpi_ns_validate_handle(gpe_device);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Get the device_object attached to the node */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc || !obj_desc->device.gpe_block) {
+		return_ACPI_STATUS(AE_NULL_OBJECT);
+	}
+
+	/* Delete the GPE block (but not the device_object) */
+
+	status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
+	if (ACPI_SUCCESS(status)) {
+		obj_desc->device.gpe_block = NULL;
+	}
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_gpe_device
+ *
+ * PARAMETERS:  Index               - System GPE index (0-current_gpe_count)
+ *              gpe_device          - Where the parent GPE Device is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
+ *              gpe device indicates that the gpe number is contained in one of
+ *              the FADT-defined gpe blocks. Otherwise, the GPE block device.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
+{
+	struct acpi_gpe_device_info info;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
+
+	if (!gpe_device) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (index >= acpi_current_gpe_count) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Setup and walk the GPE list */
+
+	info.index = index;
+	info.status = AE_NOT_EXIST;
+	info.gpe_device = NULL;
+	info.next_block_base_index = 0;
+
+	status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	*gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device);
+	return_ACPI_STATUS(info.status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index ce9314f..eb73867 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 1883220..745a42b 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index b73bc50..74162a1 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3c61b48..e7b372d 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -482,13 +482,11 @@
 	obj_desc->method.aml_length = aml_length;
 
 	/*
-	 * Disassemble the method flags. Split off the Arg Count
-	 * for efficiency
+	 * Disassemble the method flags. Split off the arg_count, Serialized
+	 * flag, and sync_level for efficiency.
 	 */
 	method_flags = (u8) operand[1]->integer.value;
 
-	obj_desc->method.method_flags =
-	    (u8) (method_flags & ~AML_METHOD_ARG_COUNT);
 	obj_desc->method.param_count =
 	    (u8) (method_flags & AML_METHOD_ARG_COUNT);
 
@@ -497,6 +495,8 @@
 	 * created for this method when it is parsed.
 	 */
 	if (method_flags & AML_METHOD_SERIALIZED) {
+		obj_desc->method.info_flags = ACPI_METHOD_SERIALIZED;
+
 		/*
 		 * ACPI 1.0: sync_level = 0
 		 * ACPI 2.0: sync_level = sync_level in method declaration
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index be8c98b..c7a2f1e 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index f067bbb..61b8c0e 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -122,7 +122,7 @@
 
 static struct acpi_exdump_info acpi_ex_dump_method[9] = {
 	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.method_flags), "Method Flags"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.info_flags), "Info Flags"},
 	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count),
 	 "Parameter Count"},
 	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index f17d2ff..0bde223 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 38293fd..6c79c29 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 95db4be0..703d88e 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 6af14e4..be1c56e 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index d11e539..49ec049 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 84e4d18..236ead1 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 10e104c..2571b4a 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 7a08d23..1b48d9d 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 4b50730..f4a2787 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 7aae29f..cc95e20 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index de17e10..f0d5e14 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 1fa4289..55997e4 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 7ca35ea..db502cd 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 8c97cfd..e3bb00c 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 1624436..c0c8842 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index d4af684..a979017 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index e972b66..dc665cc 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 675aaa9..df66e7b 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 4093522..8ad9314 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index b44274a..fc380d3 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 14750db..f610d88 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -62,10 +62,10 @@
  * PARAMETERS:	gpe_event_info	    - Info block for the GPE
  *		gpe_register_info   - Info block for the GPE register
  *
- * RETURN:	Status
+ * RETURN:	Register mask with a one in the GPE bit position
  *
- * DESCRIPTION:	Compute GPE enable mask with one bit corresponding to the given
- *		GPE set.
+ * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the
+ *              correct position for the input GPE.
  *
  ******************************************************************************/
 
@@ -85,12 +85,12 @@
  *
  * RETURN:	Status
  *
- * DESCRIPTION: Enable or disable a single GPE in its enable register.
+ * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
  *
  ******************************************************************************/
 
 acpi_status
-acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
 {
 	struct acpi_gpe_register_info *gpe_register_info;
 	acpi_status status;
@@ -113,14 +113,20 @@
 		return (status);
 	}
 
-	/* Set ot clear just the bit that corresponds to this GPE */
+	/* Set or clear just the bit that corresponds to this GPE */
 
 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
 						gpe_register_info);
 	switch (action) {
-	case ACPI_GPE_COND_ENABLE:
-		if (!(register_bit & gpe_register_info->enable_for_run))
+	case ACPI_GPE_CONDITIONAL_ENABLE:
+
+		/* Only enable if the enable_for_run bit is set */
+
+		if (!(register_bit & gpe_register_info->enable_for_run)) {
 			return (AE_BAD_PARAMETER);
+		}
+
+		/*lint -fallthrough */
 
 	case ACPI_GPE_ENABLE:
 		ACPI_SET_BIT(enable_mask, register_bit);
@@ -131,7 +137,7 @@
 		break;
 
 	default:
-		ACPI_ERROR((AE_INFO, "Invalid action\n"));
+		ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action));
 		return (AE_BAD_PARAMETER);
 	}
 
@@ -168,13 +174,13 @@
 		return (AE_NOT_EXIST);
 	}
 
-	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
-						gpe_register_info);
-
 	/*
 	 * Write a one to the appropriate bit in the status register to
 	 * clear this GPE.
 	 */
+	register_bit =
+	    acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+
 	status = acpi_hw_write(register_bit,
 			       &gpe_register_info->status_address);
 
@@ -201,8 +207,8 @@
 	u32 in_byte;
 	u32 register_bit;
 	struct acpi_gpe_register_info *gpe_register_info;
-	acpi_status status;
 	acpi_event_status local_event_status = 0;
+	acpi_status status;
 
 	ACPI_FUNCTION_ENTRY();
 
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index ad21c7d..050fd22 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 5d1273b..55accb7 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3796811..2ac28bb 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 1ef8e0b..9c8eb71 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e1d9c77..5f16058 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 50cc3be..6f98d21 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 0cd925b..d93172f 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -163,9 +163,9 @@
 #else
 				/* Mark this as a very SPECIAL method */
 
-				obj_desc->method.method_flags =
-				    AML_METHOD_INTERNAL_ONLY;
-				obj_desc->method.extra.implementation =
+				obj_desc->method.info_flags =
+				    ACPI_METHOD_INTERNAL_ONLY;
+				obj_desc->method.dispatch.implementation =
 				    acpi_ut_osi_implementation;
 #endif
 				break;
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1e5ff80..1d0ef15 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -234,8 +234,8 @@
 			 * modified the namespace. This is used for cleanup when the
 			 * method exits.
 			 */
-			walk_state->method_desc->method.flags |=
-			    AOPOBJ_MODIFIED_NAMESPACE;
+			walk_state->method_desc->method.info_flags |=
+			    ACPI_METHOD_MODIFIED_NAMESPACE;
 		}
 	}
 
@@ -341,6 +341,7 @@
 {
 	struct acpi_namespace_node *child_node = NULL;
 	u32 level = 1;
+	acpi_status status;
 
 	ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
 
@@ -348,6 +349,13 @@
 		return_VOID;
 	}
 
+	/* Lock namespace for possible update */
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_VOID;
+	}
+
 	/*
 	 * Traverse the tree of objects until we bubble back up
 	 * to where we started.
@@ -397,6 +405,7 @@
 		}
 	}
 
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_VOID;
 }
 
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index a54dc39..b683cc2 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -624,9 +624,22 @@
 		     acpi_owner_id owner_id, acpi_handle start_handle)
 {
 	struct acpi_walk_info info;
+	acpi_status status;
 
 	ACPI_FUNCTION_ENTRY();
 
+	/*
+	 * Just lock the entire namespace for the duration of the dump.
+	 * We don't want any changes to the namespace during this time,
+	 * especially the temporary nodes since we are going to display
+	 * them also.
+	 */
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		acpi_os_printf("Could not acquire namespace mutex\n");
+		return;
+	}
+
 	info.debug_level = ACPI_LV_TABLES;
 	info.owner_id = owner_id;
 	info.display_type = display_type;
@@ -636,6 +649,8 @@
 				     ACPI_NS_WALK_TEMP_NODES,
 				     acpi_ns_dump_one_object, NULL,
 				     (void *)&info, NULL);
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 }
 #endif				/* ACPI_FUTURE_USAGE */
 
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index d2a9792..2ed294b 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index f52829c..c1bd02b 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -389,7 +389,7 @@
 	 * acpi_gbl_root_node->Object is NULL at PASS1.
 	 */
 	if ((type == ACPI_TYPE_DEVICE) && parent_node->object) {
-		method_obj->method.extra.handler =
+		method_obj->method.dispatch.handler =
 		    parent_node->object->device.handler;
 	}
 
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 0cac7ec..fd7c638 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index df18be9..5f7dc69 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d3104af..d5fa520 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 41a9213..3bb8bf1 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 5808c89..b3234fa 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 7096bcd..9fb03fa 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index d1c1366..1d76ac8 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 4ef9f43..973883b 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 41102a8..28b0d7a 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index a7d6ad9..cb1b104 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 2cd5be8..345f0c3 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index ebef8a7..c53f004 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index b01e45a..3fd4526 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -603,10 +603,9 @@
 	method_obj->method.param_count = (u8)
 	    (method_flags & AML_METHOD_ARG_COUNT);
 
-	method_obj->method.method_flags = (u8)
-	    (method_flags & ~AML_METHOD_ARG_COUNT);
-
 	if (method_flags & AML_METHOD_SERIALIZED) {
+		method_obj->method.info_flags = ACPI_METHOD_SERIALIZED;
+
 		method_obj->method.sync_level = (u8)
 		    ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
 	}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index a1f04e9..db7660f 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 7df1a4c..e1fad0e 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 2f2e776..01dd70d 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -655,7 +655,7 @@
 		method_obj->method.aml_start = aml_start;
 		method_obj->method.aml_length = aml_length;
 		method_obj->method.owner_id = owner_id;
-		method_obj->method.flags |= AOPOBJ_MODULE_LEVEL;
+		method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL;
 
 		/*
 		 * Save the parent node in next_object. This is cheating, but we
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 2b0c3be..bed08de 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 8d81542..9bb0cbd 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,6 @@
 #include "acparser.h"
 #include "acdispat.h"
 #include "amlcode.h"
-#include "acnamesp.h"
 #include "acinterp.h"
 
 #define _COMPONENT          ACPI_PARSER
@@ -539,24 +538,16 @@
 			/* Check for possible multi-thread reentrancy problem */
 
 			if ((status == AE_ALREADY_EXISTS) &&
-			    (!walk_state->method_desc->method.mutex)) {
-				ACPI_INFO((AE_INFO,
-					   "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
-					   walk_state->method_node->name.
-					   ascii));
-
+			    (!(walk_state->method_desc->method.
+			       info_flags & ACPI_METHOD_SERIALIZED))) {
 				/*
-				 * Method tried to create an object twice. The probable cause is
-				 * that the method cannot handle reentrancy.
-				 *
-				 * The method is marked not_serialized, but it tried to create
-				 * a named object, causing the second thread entrance to fail.
-				 * Workaround this problem by marking the method permanently
-				 * as Serialized.
+				 * Method is not serialized and tried to create an object
+				 * twice. The probable cause is that the method cannot
+				 * handle reentrancy. Mark as "pending serialized" now, and
+				 * then mark "serialized" when the last thread exits.
 				 */
-				walk_state->method_desc->method.method_flags |=
-				    AML_METHOD_SERIALIZED;
-				walk_state->method_desc->method.sync_level = 0;
+				walk_state->method_desc->method.info_flags |=
+				    ACPI_METHOD_SERIALIZED_PENDING;
 			}
 		}
 
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 40e2b27..a5faa13 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index d4b970c..f1464c0 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index fe29eee..7eda785 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 8abb962..3312d63 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index c42f067..8086805 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,6 @@
 #include "acdispat.h"
 #include "acinterp.h"
 #include "actables.h"
-#include "amlcode.h"
 
 #define _COMPONENT          ACPI_PARSER
 ACPI_MODULE_NAME("psxface")
@@ -285,15 +284,15 @@
 		goto cleanup;
 	}
 
-	if (info->obj_desc->method.flags & AOPOBJ_MODULE_LEVEL) {
+	if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) {
 		walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL;
 	}
 
 	/* Invoke an internal method if necessary */
 
-	if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
+	if (info->obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
 		status =
-		    info->obj_desc->method.extra.implementation(walk_state);
+		    info->obj_desc->method.dispatch.implementation(walk_state);
 		info->return_object = walk_state->return_desc;
 
 		/* Cleanup states */
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 226c806..9e66f90 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index d6ebf7e..3a8a89e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index c80a2ee..4ce6e11 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index f859b03..33db752 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 1fd868b..f9ea608 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 33bff17..0c7efef 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 545da40..50b8ad2 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 7335f22..1bfcef7 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 887b8ba..7cc6d86 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index f8cd9e8..410264b 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 491191e..231811e 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 9f6a6e7..2ff657a 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index d2ff432..428d44e 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 989d5c8..a55cb2b 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 83d7af8..48db094 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 34f9c2b..0f2d395 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 4a8b9e6..4b7085d 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index fd2c07d..7eb6c6c 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 8f08962..0a69735 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 6fef83f..aded299 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index f21c486..a9bcd81 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index ed794cd..31f5a78 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 22f59ef..18f73c9 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index e87bc67..97dd9bb 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -768,7 +768,7 @@
 	acpi_gbl_gpe_fadt_blocks[0] = NULL;
 	acpi_gbl_gpe_fadt_blocks[1] = NULL;
 	acpi_current_gpe_count = 0;
-	acpi_all_gpes_initialized = FALSE;
+	acpi_gbl_all_gpes_initialized = FALSE;
 
 	/* Global handlers */
 
@@ -778,6 +778,7 @@
 	acpi_gbl_init_handler = NULL;
 	acpi_gbl_table_handler = NULL;
 	acpi_gbl_interface_handler = NULL;
+	acpi_gbl_global_event_handler = NULL;
 
 	/* Global Lock support */
 
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index d290632..b679ea6 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index c1b1c80..191b682 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index b081cd4..f6bb75c6 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 49cf7b7..ce481da 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c7d0e05..c33a852 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index d9efa49..a946c68 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -85,6 +85,7 @@
 
 	spin_lock_init(acpi_gbl_gpe_lock);
 	spin_lock_init(acpi_gbl_hardware_lock);
+	spin_lock_init(acpi_ev_global_lock_pending_lock);
 
 	/* Mutex for _OSI support */
 	status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index fd1fa27..188340a 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 18c59a8..1fb10cb 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 7965919..84e0518 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index d35d109..30c21e1 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 1f484c9..98ad125 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6f12e31..916ae09 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 18df1e9..ef0581f 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -109,6 +109,8 @@
 		return sizeof(*estatus) + estatus->data_length;
 }
 
+void apei_estatus_print(const char *pfx,
+			const struct acpi_hest_generic_status *estatus);
 int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
 int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
 #endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index f4cf2fc..31464a0 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -46,6 +46,317 @@
 }
 EXPORT_SYMBOL_GPL(cper_next_record_id);
 
+static const char *cper_severity_strs[] = {
+	"recoverable",
+	"fatal",
+	"corrected",
+	"info",
+};
+
+static const char *cper_severity_str(unsigned int severity)
+{
+	return severity < ARRAY_SIZE(cper_severity_strs) ?
+		cper_severity_strs[severity] : "unknown";
+}
+
+/*
+ * cper_print_bits - print strings for set bits
+ * @pfx: prefix for each line, including log level and prefix string
+ * @bits: bit mask
+ * @strs: string array, indexed by bit position
+ * @strs_size: size of the string array: @strs
+ *
+ * For each set bit in @bits, print the corresponding string in @strs.
+ * If the output length is longer than 80, multiple line will be
+ * printed, with @pfx is printed at the beginning of each line.
+ */
+static void cper_print_bits(const char *pfx, unsigned int bits,
+			    const char *strs[], unsigned int strs_size)
+{
+	int i, len = 0;
+	const char *str;
+	char buf[84];
+
+	for (i = 0; i < strs_size; i++) {
+		if (!(bits & (1U << i)))
+			continue;
+		str = strs[i];
+		if (len && len + strlen(str) + 2 > 80) {
+			printk("%s\n", buf);
+			len = 0;
+		}
+		if (!len)
+			len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
+		else
+			len += snprintf(buf+len, sizeof(buf)-len, ", %s", str);
+	}
+	if (len)
+		printk("%s\n", buf);
+}
+
+static const char *cper_proc_type_strs[] = {
+	"IA32/X64",
+	"IA64",
+};
+
+static const char *cper_proc_isa_strs[] = {
+	"IA32",
+	"IA64",
+	"X64",
+};
+
+static const char *cper_proc_error_type_strs[] = {
+	"cache error",
+	"TLB error",
+	"bus error",
+	"micro-architectural error",
+};
+
+static const char *cper_proc_op_strs[] = {
+	"unknown or generic",
+	"data read",
+	"data write",
+	"instruction execution",
+};
+
+static const char *cper_proc_flag_strs[] = {
+	"restartable",
+	"precise IP",
+	"overflow",
+	"corrected",
+};
+
+static void cper_print_proc_generic(const char *pfx,
+				    const struct cper_sec_proc_generic *proc)
+{
+	if (proc->validation_bits & CPER_PROC_VALID_TYPE)
+		printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type,
+		       proc->proc_type < ARRAY_SIZE(cper_proc_type_strs) ?
+		       cper_proc_type_strs[proc->proc_type] : "unknown");
+	if (proc->validation_bits & CPER_PROC_VALID_ISA)
+		printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa,
+		       proc->proc_isa < ARRAY_SIZE(cper_proc_isa_strs) ?
+		       cper_proc_isa_strs[proc->proc_isa] : "unknown");
+	if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
+		printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
+		cper_print_bits(pfx, proc->proc_error_type,
+				cper_proc_error_type_strs,
+				ARRAY_SIZE(cper_proc_error_type_strs));
+	}
+	if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
+		printk("%s""operation: %d, %s\n", pfx, proc->operation,
+		       proc->operation < ARRAY_SIZE(cper_proc_op_strs) ?
+		       cper_proc_op_strs[proc->operation] : "unknown");
+	if (proc->validation_bits & CPER_PROC_VALID_FLAGS) {
+		printk("%s""flags: 0x%02x\n", pfx, proc->flags);
+		cper_print_bits(pfx, proc->flags, cper_proc_flag_strs,
+				ARRAY_SIZE(cper_proc_flag_strs));
+	}
+	if (proc->validation_bits & CPER_PROC_VALID_LEVEL)
+		printk("%s""level: %d\n", pfx, proc->level);
+	if (proc->validation_bits & CPER_PROC_VALID_VERSION)
+		printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version);
+	if (proc->validation_bits & CPER_PROC_VALID_ID)
+		printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id);
+	if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS)
+		printk("%s""target_address: 0x%016llx\n",
+		       pfx, proc->target_addr);
+	if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID)
+		printk("%s""requestor_id: 0x%016llx\n",
+		       pfx, proc->requestor_id);
+	if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID)
+		printk("%s""responder_id: 0x%016llx\n",
+		       pfx, proc->responder_id);
+	if (proc->validation_bits & CPER_PROC_VALID_IP)
+		printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
+}
+
+static const char *cper_mem_err_type_strs[] = {
+	"unknown",
+	"no error",
+	"single-bit ECC",
+	"multi-bit ECC",
+	"single-symbol chipkill ECC",
+	"multi-symbol chipkill ECC",
+	"master abort",
+	"target abort",
+	"parity error",
+	"watchdog timeout",
+	"invalid address",
+	"mirror Broken",
+	"memory sparing",
+	"scrub corrected error",
+	"scrub uncorrected error",
+};
+
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+{
+	if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
+		printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
+	if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)
+		printk("%s""physical_address: 0x%016llx\n",
+		       pfx, mem->physical_addr);
+	if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK)
+		printk("%s""physical_address_mask: 0x%016llx\n",
+		       pfx, mem->physical_addr_mask);
+	if (mem->validation_bits & CPER_MEM_VALID_NODE)
+		printk("%s""node: %d\n", pfx, mem->node);
+	if (mem->validation_bits & CPER_MEM_VALID_CARD)
+		printk("%s""card: %d\n", pfx, mem->card);
+	if (mem->validation_bits & CPER_MEM_VALID_MODULE)
+		printk("%s""module: %d\n", pfx, mem->module);
+	if (mem->validation_bits & CPER_MEM_VALID_BANK)
+		printk("%s""bank: %d\n", pfx, mem->bank);
+	if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
+		printk("%s""device: %d\n", pfx, mem->device);
+	if (mem->validation_bits & CPER_MEM_VALID_ROW)
+		printk("%s""row: %d\n", pfx, mem->row);
+	if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
+		printk("%s""column: %d\n", pfx, mem->column);
+	if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+		printk("%s""bit_position: %d\n", pfx, mem->bit_pos);
+	if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+		printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id);
+	if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+		printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id);
+	if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
+		printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id);
+	if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+		u8 etype = mem->error_type;
+		printk("%s""error_type: %d, %s\n", pfx, etype,
+		       etype < ARRAY_SIZE(cper_mem_err_type_strs) ?
+		       cper_mem_err_type_strs[etype] : "unknown");
+	}
+}
+
+static const char *cper_pcie_port_type_strs[] = {
+	"PCIe end point",
+	"legacy PCI end point",
+	"unknown",
+	"unknown",
+	"root port",
+	"upstream switch port",
+	"downstream switch port",
+	"PCIe to PCI/PCI-X bridge",
+	"PCI/PCI-X to PCIe bridge",
+	"root complex integrated endpoint device",
+	"root complex event collector",
+};
+
+static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie)
+{
+	if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
+		printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
+		       pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
+		       cper_pcie_port_type_strs[pcie->port_type] : "unknown");
+	if (pcie->validation_bits & CPER_PCIE_VALID_VERSION)
+		printk("%s""version: %d.%d\n", pfx,
+		       pcie->version.major, pcie->version.minor);
+	if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS)
+		printk("%s""command: 0x%04x, status: 0x%04x\n", pfx,
+		       pcie->command, pcie->status);
+	if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) {
+		const __u8 *p;
+		printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx,
+		       pcie->device_id.segment, pcie->device_id.bus,
+		       pcie->device_id.device, pcie->device_id.function);
+		printk("%s""slot: %d\n", pfx,
+		       pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
+		printk("%s""secondary_bus: 0x%02x\n", pfx,
+		       pcie->device_id.secondary_bus);
+		printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
+		       pcie->device_id.vendor_id, pcie->device_id.device_id);
+		p = pcie->device_id.class_code;
+		printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+	}
+	if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
+		printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
+		       pcie->serial_number.lower, pcie->serial_number.upper);
+	if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS)
+		printk(
+	"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
+	pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+}
+
+static const char *apei_estatus_section_flag_strs[] = {
+	"primary",
+	"containment warning",
+	"reset",
+	"threshold exceeded",
+	"resource not accessible",
+	"latent error",
+};
+
+static void apei_estatus_print_section(
+	const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
+{
+	uuid_le *sec_type = (uuid_le *)gdata->section_type;
+	__u16 severity;
+
+	severity = gdata->error_severity;
+	printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity,
+	       cper_severity_str(severity));
+	printk("%s""flags: 0x%02x\n", pfx, gdata->flags);
+	cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs,
+			ARRAY_SIZE(apei_estatus_section_flag_strs));
+	if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+		printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id);
+	if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+		printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
+
+	if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) {
+		struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1);
+		printk("%s""section_type: general processor error\n", pfx);
+		if (gdata->error_data_length >= sizeof(*proc_err))
+			cper_print_proc_generic(pfx, proc_err);
+		else
+			goto err_section_too_small;
+	} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
+		struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
+		printk("%s""section_type: memory error\n", pfx);
+		if (gdata->error_data_length >= sizeof(*mem_err))
+			cper_print_mem(pfx, mem_err);
+		else
+			goto err_section_too_small;
+	} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
+		struct cper_sec_pcie *pcie = (void *)(gdata + 1);
+		printk("%s""section_type: PCIe error\n", pfx);
+		if (gdata->error_data_length >= sizeof(*pcie))
+			cper_print_pcie(pfx, pcie);
+		else
+			goto err_section_too_small;
+	} else
+		printk("%s""section type: unknown, %pUl\n", pfx, sec_type);
+
+	return;
+
+err_section_too_small:
+	pr_err(FW_WARN "error section length is too small\n");
+}
+
+void apei_estatus_print(const char *pfx,
+			const struct acpi_hest_generic_status *estatus)
+{
+	struct acpi_hest_generic_data *gdata;
+	unsigned int data_len, gedata_len;
+	int sec_no = 0;
+	__u16 severity;
+
+	printk("%s""APEI generic hardware error status\n", pfx);
+	severity = estatus->error_severity;
+	printk("%s""severity: %d, %s\n", pfx, severity,
+	       cper_severity_str(severity));
+	data_len = estatus->data_length;
+	gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+	while (data_len > sizeof(*gdata)) {
+		gedata_len = gdata->error_data_length;
+		apei_estatus_print_section(pfx, gdata, sec_no);
+		data_len -= gedata_len + sizeof(*gdata);
+		sec_no++;
+	}
+}
+EXPORT_SYMBOL_GPL(apei_estatus_print);
+
 int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
 {
 	if (estatus->data_length &&
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index cf29df6..096aebf 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -39,7 +39,7 @@
 #define EINJ_PFX "EINJ: "
 
 #define SPIN_UNIT		100			/* 100ns */
-/* Firmware should respond within 1 miliseconds */
+/* Firmware should respond within 1 milliseconds */
 #define FIRMWARE_TIMEOUT	(1 * NSEC_PER_MSEC)
 
 /*
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 5850d32..cf6db6b7 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -53,7 +53,7 @@
 				     sizeof(struct acpi_table_erst)))
 
 #define SPIN_UNIT		100			/* 100ns */
-/* Firmware should respond within 1 miliseconds */
+/* Firmware should respond within 1 milliseconds */
 #define FIRMWARE_TIMEOUT	(1 * NSEC_PER_MSEC)
 #define FIRMWARE_MAX_STALL	50			/* 50us */
 
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0d505e5..d1d484d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -12,10 +12,6 @@
  * For more information about Generic Hardware Error Source, please
  * refer to ACPI Specification version 4.0, section 17.3.2.6
  *
- * Now, only SCI notification type and memory errors are
- * supported. More notification type and hardware error type will be
- * added later.
- *
  * Copyright 2010 Intel Corp.
  *   Author: Huang Ying <ying.huang@intel.com>
  *
@@ -39,14 +35,18 @@
 #include <linux/acpi.h>
 #include <linux/io.h>
 #include <linux/interrupt.h>
+#include <linux/timer.h>
 #include <linux/cper.h>
 #include <linux/kdebug.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
 #include <acpi/apei.h>
 #include <acpi/atomicio.h>
 #include <acpi/hed.h>
 #include <asm/mce.h>
+#include <asm/tlbflush.h>
 
 #include "apei-internal.h"
 
@@ -55,42 +55,131 @@
 #define GHES_ESTATUS_MAX_SIZE		65536
 
 /*
- * One struct ghes is created for each generic hardware error
- * source.
- *
+ * One struct ghes is created for each generic hardware error source.
  * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
- * handler. Handler for one generic hardware error source is only
- * triggered after the previous one is done. So handler can uses
- * struct ghes without locking.
+ * handler.
  *
  * estatus: memory buffer for error status block, allocated during
  * HEST parsing.
  */
 #define GHES_TO_CLEAR		0x0001
+#define GHES_EXITING		0x0002
 
 struct ghes {
 	struct acpi_hest_generic *generic;
 	struct acpi_hest_generic_status *estatus;
-	struct list_head list;
 	u64 buffer_paddr;
 	unsigned long flags;
+	union {
+		struct list_head list;
+		struct timer_list timer;
+		unsigned int irq;
+	};
 };
 
+static int ghes_panic_timeout	__read_mostly = 30;
+
 /*
- * Error source lists, one list for each notification method. The
- * members in lists are struct ghes.
+ * All error sources notified with SCI shares one notifier function,
+ * so they need to be linked and checked one by one.  This is applied
+ * to NMI too.
  *
- * The list members are only added in HEST parsing and deleted during
- * module_exit, that is, single-threaded. So no lock is needed for
- * that.
- *
- * But the mutual exclusion is needed between members adding/deleting
- * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
- * used for that.
+ * RCU is used for these lists, so ghes_list_mutex is only used for
+ * list changing, not for traversing.
  */
 static LIST_HEAD(ghes_sci);
+static LIST_HEAD(ghes_nmi);
 static DEFINE_MUTEX(ghes_list_mutex);
 
+/*
+ * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
+ * mutual exclusion.
+ */
+static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
+
+/*
+ * Because the memory area used to transfer hardware error information
+ * from BIOS to Linux can be determined only in NMI, IRQ or timer
+ * handler, but general ioremap can not be used in atomic context, so
+ * a special version of atomic ioremap is implemented for that.
+ */
+
+/*
+ * Two virtual pages are used, one for NMI context, the other for
+ * IRQ/PROCESS context
+ */
+#define GHES_IOREMAP_PAGES		2
+#define GHES_IOREMAP_NMI_PAGE(base)	(base)
+#define GHES_IOREMAP_IRQ_PAGE(base)	((base) + PAGE_SIZE)
+
+/* virtual memory area for atomic ioremap */
+static struct vm_struct *ghes_ioremap_area;
+/*
+ * These 2 spinlock is used to prevent atomic ioremap virtual memory
+ * area from being mapped simultaneously.
+ */
+static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
+static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+
+static int ghes_ioremap_init(void)
+{
+	ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
+		VM_IOREMAP, VMALLOC_START, VMALLOC_END);
+	if (!ghes_ioremap_area) {
+		pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void ghes_ioremap_exit(void)
+{
+	free_vm_area(ghes_ioremap_area);
+}
+
+static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
+{
+	unsigned long vaddr;
+
+	vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
+	ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
+			   pfn << PAGE_SHIFT, PAGE_KERNEL);
+
+	return (void __iomem *)vaddr;
+}
+
+static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
+{
+	unsigned long vaddr;
+
+	vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
+	ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
+			   pfn << PAGE_SHIFT, PAGE_KERNEL);
+
+	return (void __iomem *)vaddr;
+}
+
+static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
+{
+	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
+	void *base = ghes_ioremap_area->addr;
+
+	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
+	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
+	__flush_tlb_one(vaddr);
+}
+
+static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
+{
+	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
+	void *base = ghes_ioremap_area->addr;
+
+	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
+	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
+	__flush_tlb_one(vaddr);
+}
+
 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
 {
 	struct ghes *ghes;
@@ -101,7 +190,6 @@
 	if (!ghes)
 		return ERR_PTR(-ENOMEM);
 	ghes->generic = generic;
-	INIT_LIST_HEAD(&ghes->list);
 	rc = acpi_pre_map_gar(&generic->error_status_address);
 	if (rc)
 		goto err_free;
@@ -158,22 +246,41 @@
 	}
 }
 
-/* SCI handler run in work queue, so ioremap can be used here */
-static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
-				 int from_phys)
+static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
+				  int from_phys)
 {
-	void *vaddr;
+	void __iomem *vaddr;
+	unsigned long flags = 0;
+	int in_nmi = in_nmi();
+	u64 offset;
+	u32 trunk;
 
-	vaddr = ioremap_cache(paddr, len);
-	if (!vaddr)
-		return -ENOMEM;
-	if (from_phys)
-		memcpy(buffer, vaddr, len);
-	else
-		memcpy(vaddr, buffer, len);
-	iounmap(vaddr);
-
-	return 0;
+	while (len > 0) {
+		offset = paddr - (paddr & PAGE_MASK);
+		if (in_nmi) {
+			raw_spin_lock(&ghes_ioremap_lock_nmi);
+			vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
+		} else {
+			spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
+			vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
+		}
+		trunk = PAGE_SIZE - offset;
+		trunk = min(trunk, len);
+		if (from_phys)
+			memcpy_fromio(buffer, vaddr + offset, trunk);
+		else
+			memcpy_toio(vaddr + offset, buffer, trunk);
+		len -= trunk;
+		paddr += trunk;
+		buffer += trunk;
+		if (in_nmi) {
+			ghes_iounmap_nmi(vaddr);
+			raw_spin_unlock(&ghes_ioremap_lock_nmi);
+		} else {
+			ghes_iounmap_irq(vaddr);
+			spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
+		}
+	}
 }
 
 static int ghes_read_estatus(struct ghes *ghes, int silent)
@@ -194,10 +301,8 @@
 	if (!buf_paddr)
 		return -ENOENT;
 
-	rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
-				   sizeof(*ghes->estatus), 1);
-	if (rc)
-		return rc;
+	ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
+			      sizeof(*ghes->estatus), 1);
 	if (!ghes->estatus->block_status)
 		return -ENOENT;
 
@@ -212,17 +317,15 @@
 		goto err_read_block;
 	if (apei_estatus_check_header(ghes->estatus))
 		goto err_read_block;
-	rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
-				   buf_paddr + sizeof(*ghes->estatus),
-				   len - sizeof(*ghes->estatus), 1);
-	if (rc)
-		return rc;
+	ghes_copy_tofrom_phys(ghes->estatus + 1,
+			      buf_paddr + sizeof(*ghes->estatus),
+			      len - sizeof(*ghes->estatus), 1);
 	if (apei_estatus_check(ghes->estatus))
 		goto err_read_block;
 	rc = 0;
 
 err_read_block:
-	if (rc && !silent)
+	if (rc && !silent && printk_ratelimit())
 		pr_warning(FW_WARN GHES_PFX
 			   "Failed to read error status block!\n");
 	return rc;
@@ -255,11 +358,26 @@
 		}
 #endif
 	}
+}
 
-	if (!processed && printk_ratelimit())
-		pr_warning(GHES_PFX
-		"Unknown error record from generic hardware error source: %d\n",
-			   ghes->generic->header.source_id);
+static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
+{
+	/* Not more than 2 messages every 5 seconds */
+	static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
+
+	if (pfx == NULL) {
+		if (ghes_severity(ghes->estatus->error_severity) <=
+		    GHES_SEV_CORRECTED)
+			pfx = KERN_WARNING HW_ERR;
+		else
+			pfx = KERN_ERR HW_ERR;
+	}
+	if (__ratelimit(&ratelimit)) {
+		printk(
+	"%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+	pfx, ghes->generic->header.source_id);
+		apei_estatus_print(pfx, ghes->estatus);
+	}
 }
 
 static int ghes_proc(struct ghes *ghes)
@@ -269,6 +387,7 @@
 	rc = ghes_read_estatus(ghes, 0);
 	if (rc)
 		goto out;
+	ghes_print_estatus(NULL, ghes);
 	ghes_do_proc(ghes);
 
 out:
@@ -276,6 +395,42 @@
 	return 0;
 }
 
+static void ghes_add_timer(struct ghes *ghes)
+{
+	struct acpi_hest_generic *g = ghes->generic;
+	unsigned long expire;
+
+	if (!g->notify.poll_interval) {
+		pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
+			   g->header.source_id);
+		return;
+	}
+	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
+	ghes->timer.expires = round_jiffies_relative(expire);
+	add_timer(&ghes->timer);
+}
+
+static void ghes_poll_func(unsigned long data)
+{
+	struct ghes *ghes = (void *)data;
+
+	ghes_proc(ghes);
+	if (!(ghes->flags & GHES_EXITING))
+		ghes_add_timer(ghes);
+}
+
+static irqreturn_t ghes_irq_func(int irq, void *data)
+{
+	struct ghes *ghes = data;
+	int rc;
+
+	rc = ghes_proc(ghes);
+	if (rc)
+		return IRQ_NONE;
+
+	return IRQ_HANDLED;
+}
+
 static int ghes_notify_sci(struct notifier_block *this,
 				  unsigned long event, void *data)
 {
@@ -292,10 +447,63 @@
 	return ret;
 }
 
+static int ghes_notify_nmi(struct notifier_block *this,
+				  unsigned long cmd, void *data)
+{
+	struct ghes *ghes, *ghes_global = NULL;
+	int sev, sev_global = -1;
+	int ret = NOTIFY_DONE;
+
+	if (cmd != DIE_NMI)
+		return ret;
+
+	raw_spin_lock(&ghes_nmi_lock);
+	list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+		if (ghes_read_estatus(ghes, 1)) {
+			ghes_clear_estatus(ghes);
+			continue;
+		}
+		sev = ghes_severity(ghes->estatus->error_severity);
+		if (sev > sev_global) {
+			sev_global = sev;
+			ghes_global = ghes;
+		}
+		ret = NOTIFY_STOP;
+	}
+
+	if (ret == NOTIFY_DONE)
+		goto out;
+
+	if (sev_global >= GHES_SEV_PANIC) {
+		oops_begin();
+		ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
+		/* reboot to log the error! */
+		if (panic_timeout == 0)
+			panic_timeout = ghes_panic_timeout;
+		panic("Fatal hardware error!");
+	}
+
+	list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+		if (!(ghes->flags & GHES_TO_CLEAR))
+			continue;
+		/* Do not print estatus because printk is not NMI safe */
+		ghes_do_proc(ghes);
+		ghes_clear_estatus(ghes);
+	}
+
+out:
+	raw_spin_unlock(&ghes_nmi_lock);
+	return ret;
+}
+
 static struct notifier_block ghes_notifier_sci = {
 	.notifier_call = ghes_notify_sci,
 };
 
+static struct notifier_block ghes_notifier_nmi = {
+	.notifier_call = ghes_notify_nmi,
+};
+
 static int __devinit ghes_probe(struct platform_device *ghes_dev)
 {
 	struct acpi_hest_generic *generic;
@@ -306,18 +514,27 @@
 	if (!generic->enabled)
 		return -ENODEV;
 
-	if (generic->error_block_length <
-	    sizeof(struct acpi_hest_generic_status)) {
-		pr_warning(FW_BUG GHES_PFX
-"Invalid error block length: %u for generic hardware error source: %d\n",
-			   generic->error_block_length,
+	switch (generic->notify.type) {
+	case ACPI_HEST_NOTIFY_POLLED:
+	case ACPI_HEST_NOTIFY_EXTERNAL:
+	case ACPI_HEST_NOTIFY_SCI:
+	case ACPI_HEST_NOTIFY_NMI:
+		break;
+	case ACPI_HEST_NOTIFY_LOCAL:
+		pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
 			   generic->header.source_id);
 		goto err;
+	default:
+		pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
+			   generic->notify.type, generic->header.source_id);
+		goto err;
 	}
-	if (generic->records_to_preallocate == 0) {
-		pr_warning(FW_BUG GHES_PFX
-"Invalid records to preallocate: %u for generic hardware error source: %d\n",
-			   generic->records_to_preallocate,
+
+	rc = -EIO;
+	if (generic->error_block_length <
+	    sizeof(struct acpi_hest_generic_status)) {
+		pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
+			   generic->error_block_length,
 			   generic->header.source_id);
 		goto err;
 	}
@@ -327,38 +544,43 @@
 		ghes = NULL;
 		goto err;
 	}
-	if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) {
+	switch (generic->notify.type) {
+	case ACPI_HEST_NOTIFY_POLLED:
+		ghes->timer.function = ghes_poll_func;
+		ghes->timer.data = (unsigned long)ghes;
+		init_timer_deferrable(&ghes->timer);
+		ghes_add_timer(ghes);
+		break;
+	case ACPI_HEST_NOTIFY_EXTERNAL:
+		/* External interrupt vector is GSI */
+		if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
+			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
+			       generic->header.source_id);
+			goto err;
+		}
+		if (request_irq(ghes->irq, ghes_irq_func,
+				0, "GHES IRQ", ghes)) {
+			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
+			       generic->header.source_id);
+			goto err;
+		}
+		break;
+	case ACPI_HEST_NOTIFY_SCI:
 		mutex_lock(&ghes_list_mutex);
 		if (list_empty(&ghes_sci))
 			register_acpi_hed_notifier(&ghes_notifier_sci);
 		list_add_rcu(&ghes->list, &ghes_sci);
 		mutex_unlock(&ghes_list_mutex);
-	} else {
-		unsigned char *notify = NULL;
-
-		switch (generic->notify.type) {
-		case ACPI_HEST_NOTIFY_POLLED:
-			notify = "POLL";
-			break;
-		case ACPI_HEST_NOTIFY_EXTERNAL:
-		case ACPI_HEST_NOTIFY_LOCAL:
-			notify = "IRQ";
-			break;
-		case ACPI_HEST_NOTIFY_NMI:
-			notify = "NMI";
-			break;
-		}
-		if (notify) {
-			pr_warning(GHES_PFX
-"Generic hardware error source: %d notified via %s is not supported!\n",
-				   generic->header.source_id, notify);
-		} else {
-			pr_warning(FW_WARN GHES_PFX
-"Unknown notification type: %u for generic hardware error source: %d\n",
-			generic->notify.type, generic->header.source_id);
-		}
-		rc = -ENODEV;
-		goto err;
+		break;
+	case ACPI_HEST_NOTIFY_NMI:
+		mutex_lock(&ghes_list_mutex);
+		if (list_empty(&ghes_nmi))
+			register_die_notifier(&ghes_notifier_nmi);
+		list_add_rcu(&ghes->list, &ghes_nmi);
+		mutex_unlock(&ghes_list_mutex);
+		break;
+	default:
+		BUG();
 	}
 	platform_set_drvdata(ghes_dev, ghes);
 
@@ -379,7 +601,14 @@
 	ghes = platform_get_drvdata(ghes_dev);
 	generic = ghes->generic;
 
+	ghes->flags |= GHES_EXITING;
 	switch (generic->notify.type) {
+	case ACPI_HEST_NOTIFY_POLLED:
+		del_timer_sync(&ghes->timer);
+		break;
+	case ACPI_HEST_NOTIFY_EXTERNAL:
+		free_irq(ghes->irq, ghes);
+		break;
 	case ACPI_HEST_NOTIFY_SCI:
 		mutex_lock(&ghes_list_mutex);
 		list_del_rcu(&ghes->list);
@@ -387,12 +616,23 @@
 			unregister_acpi_hed_notifier(&ghes_notifier_sci);
 		mutex_unlock(&ghes_list_mutex);
 		break;
+	case ACPI_HEST_NOTIFY_NMI:
+		mutex_lock(&ghes_list_mutex);
+		list_del_rcu(&ghes->list);
+		if (list_empty(&ghes_nmi))
+			unregister_die_notifier(&ghes_notifier_nmi);
+		mutex_unlock(&ghes_list_mutex);
+		/*
+		 * To synchronize with NMI handler, ghes can only be
+		 * freed after NMI handler finishes.
+		 */
+		synchronize_rcu();
+		break;
 	default:
 		BUG();
 		break;
 	}
 
-	synchronize_rcu();
 	ghes_fini(ghes);
 	kfree(ghes);
 
@@ -412,6 +652,8 @@
 
 static int __init ghes_init(void)
 {
+	int rc;
+
 	if (acpi_disabled)
 		return -ENODEV;
 
@@ -420,12 +662,25 @@
 		return -EINVAL;
 	}
 
-	return platform_driver_register(&ghes_platform_driver);
+	rc = ghes_ioremap_init();
+	if (rc)
+		goto err;
+
+	rc = platform_driver_register(&ghes_platform_driver);
+	if (rc)
+		goto err_ioremap_exit;
+
+	return 0;
+err_ioremap_exit:
+	ghes_ioremap_exit();
+err:
+	return rc;
 }
 
 static void __exit ghes_exit(void)
 {
 	platform_driver_unregister(&ghes_platform_driver);
+	ghes_ioremap_exit();
 }
 
 module_init(ghes_init);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index daa7bc6..abda378 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -195,24 +195,24 @@
 
 __setup("hest_disable", setup_hest_disable);
 
-static int __init hest_init(void)
+void __init acpi_hest_init(void)
 {
 	acpi_status status;
 	int rc = -ENODEV;
 	unsigned int ghes_count = 0;
 
+	if (hest_disable) {
+		pr_info(HEST_PFX "Table parsing disabled.\n");
+		return;
+	}
+
 	if (acpi_disabled)
 		goto err;
 
-	if (hest_disable) {
-		pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
-		goto err;
-	}
-
 	status = acpi_get_table(ACPI_SIG_HEST, 0,
 				(struct acpi_table_header **)&hest_tab);
 	if (status == AE_NOT_FOUND) {
-		pr_info(HEST_PFX "Table is not found!\n");
+		pr_info(HEST_PFX "Table not found.\n");
 		goto err;
 	} else if (ACPI_FAILURE(status)) {
 		const char *msg = acpi_format_exception(status);
@@ -226,15 +226,11 @@
 		goto err;
 
 	rc = hest_ghes_dev_register(ghes_count);
-	if (rc)
-		goto err;
+	if (!rc) {
+		pr_info(HEST_PFX "Table parsing has been initialized.\n");
+		return;
+	}
 
-	pr_info(HEST_PFX "HEST table parsing is initialized.\n");
-
-	return 0;
 err:
 	hest_disable = 1;
-	return rc;
 }
-
-subsys_initcall(hest_init);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 95649d3..ac1a599 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -631,6 +631,17 @@
 	return result;
 }
 
+static void acpi_battery_refresh(struct acpi_battery *battery)
+{
+	if (!battery->bat.dev)
+		return;
+
+	acpi_battery_get_info(battery);
+	/* The battery may have changed its reporting units. */
+	sysfs_remove_battery(battery);
+	sysfs_add_battery(battery);
+}
+
 /* --------------------------------------------------------------------------
                               FS Interface (/proc)
    -------------------------------------------------------------------------- */
@@ -868,6 +879,8 @@
 	struct proc_dir_entry *entry = NULL;
 	int i;
 
+	printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
+			" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
 	if (!acpi_device_dir(device)) {
 		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
 						     acpi_battery_dir);
@@ -914,6 +927,8 @@
 	if (!battery)
 		return;
 	old = battery->bat.dev;
+	if (event == ACPI_BATTERY_NOTIFY_INFO)
+		acpi_battery_refresh(battery);
 	acpi_battery_update(battery);
 	acpi_bus_generate_proc_event(device, event,
 				     acpi_battery_present(battery));
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d68bd61..7ced61f 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -52,22 +52,6 @@
 
 #define STRUCT_TO_INT(s)	(*((int*)&s))
 
-static int set_power_nocheck(const struct dmi_system_id *id)
-{
-	printk(KERN_NOTICE PREFIX "%s detected - "
-		"disable power check in power transition\n", id->ident);
-	acpi_power_nocheck = 1;
-	return 0;
-}
-static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
-	{
-	set_power_nocheck, "HP Pavilion 05", {
-	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
-	DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"),
-	DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL},
-	{},
-};
-
 
 #ifdef CONFIG_X86
 static int set_copy_dsdt(const struct dmi_system_id *id)
@@ -196,33 +180,24 @@
                                  Power Management
    -------------------------------------------------------------------------- */
 
-int acpi_bus_get_power(acpi_handle handle, int *state)
+static int __acpi_bus_get_power(struct acpi_device *device, int *state)
 {
 	int result = 0;
 	acpi_status status = 0;
-	struct acpi_device *device = NULL;
 	unsigned long long psc = 0;
 
-
-	result = acpi_bus_get_device(handle, &device);
-	if (result)
-		return result;
+	if (!device || !state)
+		return -EINVAL;
 
 	*state = ACPI_STATE_UNKNOWN;
 
-	if (!device->flags.power_manageable) {
-		/* TBD: Non-recursive algorithm for walking up hierarchy */
-		if (device->parent)
-			*state = device->parent->power.state;
-		else
-			*state = ACPI_STATE_D0;
-	} else {
+	if (device->flags.power_manageable) {
 		/*
 		 * Get the device's power state either directly (via _PSC) or
 		 * indirectly (via power resources).
 		 */
 		if (device->power.flags.power_resources) {
-			result = acpi_power_get_inferred_state(device);
+			result = acpi_power_get_inferred_state(device, state);
 			if (result)
 				return result;
 		} else if (device->power.flags.explicit_get) {
@@ -230,59 +205,33 @@
 						       NULL, &psc);
 			if (ACPI_FAILURE(status))
 				return -ENODEV;
-			device->power.state = (int)psc;
+			*state = (int)psc;
 		}
-
-		*state = device->power.state;
+	} else {
+		/* TBD: Non-recursive algorithm for walking up hierarchy. */
+		*state = device->parent ?
+			device->parent->power.state : ACPI_STATE_D0;
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
-			  device->pnp.bus_id, device->power.state));
+			  device->pnp.bus_id, *state));
 
 	return 0;
 }
 
-EXPORT_SYMBOL(acpi_bus_get_power);
 
-int acpi_bus_set_power(acpi_handle handle, int state)
+static int __acpi_bus_set_power(struct acpi_device *device, int state)
 {
 	int result = 0;
 	acpi_status status = AE_OK;
-	struct acpi_device *device = NULL;
 	char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
 
-
-	result = acpi_bus_get_device(handle, &device);
-	if (result)
-		return result;
-
-	if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+	if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
 		return -EINVAL;
 
 	/* Make sure this is a valid target state */
 
-	if (!device->flags.power_manageable) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
-				kobject_name(&device->dev.kobj)));
-		return -ENODEV;
-	}
-	/*
-	 * Get device's current power state
-	 */
-	if (!acpi_power_nocheck) {
-		/*
-		 * Maybe the incorrect power state is returned on the bogus
-		 * bios, which is different with the real power state.
-		 * For example: the bios returns D0 state and the real power
-		 * state is D3. OS expects to set the device to D0 state. In
-		 * such case if OS uses the power state returned by the BIOS,
-		 * the device can't be transisted to the correct power state.
-		 * So if the acpi_power_nocheck is set, it is unnecessary to
-		 * get the power state by calling acpi_bus_get_power.
-		 */
-		acpi_bus_get_power(device->handle, &device->power.state);
-	}
-	if ((state == device->power.state) && !device->flags.force_power_state) {
+	if (state == device->power.state) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
 				  state));
 		return 0;
@@ -351,8 +300,75 @@
 	return result;
 }
 
+
+int acpi_bus_set_power(acpi_handle handle, int state)
+{
+	struct acpi_device *device;
+	int result;
+
+	result = acpi_bus_get_device(handle, &device);
+	if (result)
+		return result;
+
+	if (!device->flags.power_manageable) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				"Device [%s] is not power manageable\n",
+				dev_name(&device->dev)));
+		return -ENODEV;
+	}
+
+	return __acpi_bus_set_power(device, state);
+}
 EXPORT_SYMBOL(acpi_bus_set_power);
 
+
+int acpi_bus_init_power(struct acpi_device *device)
+{
+	int state;
+	int result;
+
+	if (!device)
+		return -EINVAL;
+
+	device->power.state = ACPI_STATE_UNKNOWN;
+
+	result = __acpi_bus_get_power(device, &state);
+	if (result)
+		return result;
+
+	if (device->power.flags.power_resources)
+		result = acpi_power_on_resources(device, state);
+
+	if (!result)
+		device->power.state = state;
+
+	return result;
+}
+
+
+int acpi_bus_update_power(acpi_handle handle, int *state_p)
+{
+	struct acpi_device *device;
+	int state;
+	int result;
+
+	result = acpi_bus_get_device(handle, &device);
+	if (result)
+		return result;
+
+	result = __acpi_bus_get_power(device, &state);
+	if (result)
+		return result;
+
+	result = __acpi_bus_set_power(device, state);
+	if (!result && state_p)
+		*state_p = state;
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_update_power);
+
+
 bool acpi_bus_power_manageable(acpi_handle handle)
 {
 	struct acpi_device *device;
@@ -1023,15 +1039,8 @@
 	if (acpi_disabled)
 		return result;
 
-	/*
-	 * If the laptop falls into the DMI check table, the power state check
-	 * will be disabled in the course of device power transition.
-	 */
-	dmi_check_system(power_nocheck_dmi_table);
-
 	acpi_scan_init();
 	acpi_ec_init();
-	acpi_power_init();
 	acpi_debugfs_init();
 	acpi_sleep_proc_init();
 	acpi_wakeup_device_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 71ef9cd..76bbb78 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -279,6 +279,9 @@
 	input_report_switch(button->input, SW_LID, !state);
 	input_sync(button->input);
 
+	if (state)
+		pm_wakeup_event(&device->dev, 0);
+
 	ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
 	if (ret == NOTIFY_DONE)
 		ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
@@ -314,6 +317,8 @@
 			input_sync(input);
 			input_report_key(input, keycode, 0);
 			input_sync(input);
+
+			pm_wakeup_event(&device->dev, 0);
 		}
 
 		acpi_bus_generate_proc_event(device, event, ++button->pushed);
@@ -426,7 +431,7 @@
 		acpi_enable_gpe(device->wakeup.gpe_device,
 				device->wakeup.gpe_number);
 		device->wakeup.run_wake_count++;
-		device->wakeup.state.enabled = 1;
+		device_set_wakeup_enable(&device->dev, true);
 	}
 
 	printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
@@ -449,7 +454,7 @@
 		acpi_disable_gpe(device->wakeup.gpe_device,
 				device->wakeup.gpe_number);
 		device->wakeup.run_wake_count--;
-		device->wakeup.state.enabled = 0;
+		device_set_wakeup_enable(&device->dev, false);
 	}
 
 	acpi_button_remove_fs(device);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 5df67f1..384f7ab 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -26,7 +26,9 @@
 			size_t count, loff_t *ppos)
 {
 	static char *buf;
-	static int uncopied_bytes;
+	static u32 max_size;
+	static u32 uncopied_bytes;
+
 	struct acpi_table_header table;
 	acpi_status status;
 
@@ -37,19 +39,24 @@
 		if (copy_from_user(&table, user_buf,
 				   sizeof(struct acpi_table_header)))
 			return -EFAULT;
-		uncopied_bytes = table.length;
-		buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+		uncopied_bytes = max_size = table.length;
+		buf = kzalloc(max_size, GFP_KERNEL);
 		if (!buf)
 			return -ENOMEM;
 	}
 
-	if (uncopied_bytes < count) {
-		kfree(buf);
+	if (buf == NULL)
 		return -EINVAL;
-	}
+
+	if ((*ppos > max_size) ||
+	    (*ppos + count > max_size) ||
+	    (*ppos + count < count) ||
+	    (count > uncopied_bytes))
+		return -EINVAL;
 
 	if (copy_from_user(buf + (*ppos), user_buf, count)) {
 		kfree(buf);
+		buf = NULL;
 		return -EFAULT;
 	}
 
@@ -59,6 +66,7 @@
 	if (!uncopied_bytes) {
 		status = acpi_install_method(buf);
 		kfree(buf);
+		buf = NULL;
 		if (ACPI_FAILURE(status))
 			return -EINVAL;
 		add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 81514a4..1864ad3 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -725,7 +725,7 @@
 			complete_dock(ds);
 			dock_event(ds, event, DOCK_EVENT);
 			dock_lock(ds, 1);
-			acpi_update_gpes();
+			acpi_update_all_gpes();
 			break;
 		}
 		if (dock_present(ds) || dock_in_progress(ds))
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 302b31e..fa848c4 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -606,7 +606,8 @@
 	return 0;
 }
 
-static u32 acpi_ec_gpe_handler(void *data)
+static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+	u32 gpe_number, void *data)
 {
 	struct acpi_ec *ec = data;
 
@@ -618,7 +619,7 @@
 		wake_up(&ec->wait);
 		ec_check_sci(ec, acpi_ec_read_status(ec));
 	}
-	return ACPI_INTERRUPT_HANDLED;
+	return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
 }
 
 /* --------------------------------------------------------------------------
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 6004908..467479f 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -86,7 +86,7 @@
 	if (!device)
 		return -EINVAL;
 
-	result = acpi_bus_get_power(device->handle, &acpi_state);
+	result = acpi_bus_update_power(device->handle, &acpi_state);
 	if (result)
 		return result;
 
@@ -123,7 +123,6 @@
 static int acpi_fan_add(struct acpi_device *device)
 {
 	int result = 0;
-	int state = 0;
 	struct thermal_cooling_device *cdev;
 
 	if (!device)
@@ -132,16 +131,12 @@
 	strcpy(acpi_device_name(device), "Fan");
 	strcpy(acpi_device_class(device), ACPI_FAN_CLASS);
 
-	result = acpi_bus_get_power(device->handle, &state);
+	result = acpi_bus_update_power(device->handle, NULL);
 	if (result) {
-		printk(KERN_ERR PREFIX "Reading power state\n");
+		printk(KERN_ERR PREFIX "Setting initial power state\n");
 		goto end;
 	}
 
-	device->flags.force_power_state = 1;
-	acpi_bus_set_power(device->handle, state);
-	device->flags.force_power_state = 0;
-
 	cdev = thermal_cooling_device_register("Fan", device,
 						&fan_cooling_ops);
 	if (IS_ERR(cdev)) {
@@ -200,22 +195,14 @@
 
 static int acpi_fan_resume(struct acpi_device *device)
 {
-	int result = 0;
-	int power_state = 0;
+	int result;
 
 	if (!device)
 		return -EINVAL;
 
-	result = acpi_bus_get_power(device->handle, &power_state);
-	if (result) {
-		printk(KERN_ERR PREFIX
-				  "Error reading fan power state\n");
-		return result;
-	}
-
-	device->flags.force_power_state = 1;
-	acpi_bus_set_power(device->handle, power_state);
-	device->flags.force_power_state = 0;
+	result = acpi_bus_update_power(device->handle, NULL);
+	if (result)
+		printk(KERN_ERR PREFIX "Error updating fan power state\n");
 
 	return result;
 }
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 78b0164..7c47ed5 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -167,11 +167,8 @@
 				"firmware_node");
 		ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
 				"physical_node");
-		if (acpi_dev->wakeup.flags.valid) {
+		if (acpi_dev->wakeup.flags.valid)
 			device_set_wakeup_capable(dev, true);
-			device_set_wakeup_enable(dev,
-						acpi_dev->wakeup.state.enabled);
-		}
 	}
 
 	return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a212bfe..b1cc81a 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,9 +41,10 @@
 int acpi_power_init(void);
 int acpi_device_sleep_wake(struct acpi_device *dev,
                            int enable, int sleep_state, int dev_state);
-int acpi_power_get_inferred_state(struct acpi_device *device);
+int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
+int acpi_power_on_resources(struct acpi_device *device, int state);
 int acpi_power_transition(struct acpi_device *device, int state);
-extern int acpi_power_nocheck;
+int acpi_bus_init_power(struct acpi_device *device);
 
 int acpi_wakeup_device_init(void);
 void acpi_early_processor_set_pdc(void);
@@ -82,8 +83,16 @@
 
 #ifdef CONFIG_ACPI_SLEEP
 int acpi_sleep_proc_init(void);
+int suspend_nvs_alloc(void);
+void suspend_nvs_free(void);
+int suspend_nvs_save(void);
+void suspend_nvs_restore(void);
 #else
 static inline int acpi_sleep_proc_init(void) { return 0; }
+static inline int suspend_nvs_alloc(void) { return 0; }
+static inline void suspend_nvs_free(void) {}
+static inline int suspend_nvs_save(void) { return 0; }
+static inline void suspend_nvs_restore(void) {}
 #endif
 
 #endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d9926af..5eb25eb 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -275,23 +275,19 @@
 int __init acpi_numa_init(void)
 {
 	int ret = 0;
-	int nr_cpu_entries = nr_cpu_ids;
 
-#ifdef CONFIG_X86
 	/*
 	 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
 	 * SRAT cpu entries could have different order with that in MADT.
 	 * So go over all cpu entries in SRAT to get apicid to node mapping.
 	 */
-	nr_cpu_entries = MAX_LOCAL_APIC;
-#endif
 
 	/* SRAT: Static Resource Affinity Table */
 	if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
 		acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
-				     acpi_parse_x2apic_affinity, nr_cpu_entries);
+				     acpi_parse_x2apic_affinity, 0);
 		acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
-				     acpi_parse_processor_affinity, nr_cpu_entries);
+				     acpi_parse_processor_affinity, 0);
 		ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
 					    acpi_parse_memory_affinity,
 					    NR_NODE_MEMBLKS);
diff --git a/kernel/power/nvs.c b/drivers/acpi/nvs.c
similarity index 87%
rename from kernel/power/nvs.c
rename to drivers/acpi/nvs.c
index 1836db6..fa5a1df 100644
--- a/kernel/power/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -1,7 +1,7 @@
 /*
- * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory
+ * nvs.c - Routines for saving and restoring ACPI NVS memory region
  *
- * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  *
  * This file is released under the GPLv2.
  */
@@ -11,7 +11,9 @@
 #include <linux/list.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/suspend.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
+#include <acpi/acpiosxf.h>
 
 /*
  * Platforms, like ACPI, may want us to save some memory used by them during
@@ -105,7 +107,7 @@
 /**
  *	suspend_nvs_save - save NVS memory regions
  */
-void suspend_nvs_save(void)
+int suspend_nvs_save(void)
 {
 	struct nvs_page *entry;
 
@@ -113,9 +115,16 @@
 
 	list_for_each_entry(entry, &nvs_list, node)
 		if (entry->data) {
-			entry->kaddr = ioremap(entry->phys_start, entry->size);
+			entry->kaddr = acpi_os_ioremap(entry->phys_start,
+						    entry->size);
+			if (!entry->kaddr) {
+				suspend_nvs_free();
+				return -ENOMEM;
+			}
 			memcpy(entry->data, entry->kaddr, entry->size);
 		}
+
+	return 0;
 }
 
 /**
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 055d7b7..c90c76a 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -38,6 +38,7 @@
 #include <linux/workqueue.h>
 #include <linux/nmi.h>
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <linux/efi.h>
 #include <linux/ioport.h>
 #include <linux/list.h>
@@ -302,9 +303,10 @@
 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
 	struct acpi_ioremap *map, *tmp_map;
-	unsigned long flags, pg_sz;
+	unsigned long flags;
 	void __iomem *virt;
-	phys_addr_t pg_off;
+	acpi_physical_address pg_off;
+	acpi_size pg_sz;
 
 	if (phys > ULONG_MAX) {
 		printk(KERN_ERR PREFIX "Cannot map memory that high\n");
@@ -320,7 +322,7 @@
 
 	pg_off = round_down(phys, PAGE_SIZE);
 	pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
-	virt = ioremap(pg_off, pg_sz);
+	virt = acpi_os_ioremap(pg_off, pg_sz);
 	if (!virt) {
 		kfree(map);
 		return NULL;
@@ -634,17 +636,21 @@
 acpi_status
 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
 {
-	u32 dummy;
 	void __iomem *virt_addr;
-	int size = width / 8, unmap = 0;
+	unsigned int size = width / 8;
+	bool unmap = false;
+	u32 dummy;
 
 	rcu_read_lock();
 	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-	rcu_read_unlock();
 	if (!virt_addr) {
-		virt_addr = ioremap(phys_addr, size);
-		unmap = 1;
+		rcu_read_unlock();
+		virt_addr = acpi_os_ioremap(phys_addr, size);
+		if (!virt_addr)
+			return AE_BAD_ADDRESS;
+		unmap = true;
 	}
+
 	if (!value)
 		value = &dummy;
 
@@ -664,6 +670,8 @@
 
 	if (unmap)
 		iounmap(virt_addr);
+	else
+		rcu_read_unlock();
 
 	return AE_OK;
 }
@@ -672,14 +680,17 @@
 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
 {
 	void __iomem *virt_addr;
-	int size = width / 8, unmap = 0;
+	unsigned int size = width / 8;
+	bool unmap = false;
 
 	rcu_read_lock();
 	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-	rcu_read_unlock();
 	if (!virt_addr) {
-		virt_addr = ioremap(phys_addr, size);
-		unmap = 1;
+		rcu_read_unlock();
+		virt_addr = acpi_os_ioremap(phys_addr, size);
+		if (!virt_addr)
+			return AE_BAD_ADDRESS;
+		unmap = true;
 	}
 
 	switch (width) {
@@ -698,6 +709,8 @@
 
 	if (unmap)
 		iounmap(virt_addr);
+	else
+		rcu_read_unlock();
 
 	return AE_OK;
 }
@@ -1233,8 +1246,7 @@
 int acpi_check_resource_conflict(const struct resource *res)
 {
 	struct acpi_res_list *res_list_elem;
-	int ioport;
-	int clash = 0;
+	int ioport = 0, clash = 0;
 
 	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
 		return 0;
@@ -1264,9 +1276,13 @@
 	if (clash) {
 		if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
 			printk(KERN_WARNING "ACPI: resource %s %pR"
-			       " conflicts with ACPI region %s %pR\n",
+			       " conflicts with ACPI region %s "
+			       "[%s 0x%zx-0x%zx]\n",
 			       res->name, res, res_list_elem->name,
-			       res_list_elem);
+			       (res_list_elem->resource_type ==
+				ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
+			       (size_t) res_list_elem->start,
+			       (size_t) res_list_elem->end);
 			if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
 				printk(KERN_NOTICE "ACPI: This conflict may"
 				       " cause random problems and system"
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 96668ad..8524939 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -36,6 +36,7 @@
 #include <linux/slab.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
+#include <acpi/apei.h>
 
 #define PREFIX "ACPI: "
 
@@ -47,6 +48,11 @@
 static int acpi_pci_root_remove(struct acpi_device *device, int type);
 static int acpi_pci_root_start(struct acpi_device *device);
 
+#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
+				| OSC_ACTIVE_STATE_PWR_SUPPORT \
+				| OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
+				| OSC_MSI_SUPPORT)
+
 static const struct acpi_device_id root_device_ids[] = {
 	{"PNP0A03", 0},
 	{"", 0},
@@ -566,6 +572,33 @@
 	if (flags != base_flags)
 		acpi_pci_osc_support(root, flags);
 
+	if (!pcie_ports_disabled
+	    && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
+		flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
+			| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+			| OSC_PCI_EXPRESS_PME_CONTROL;
+
+		if (pci_aer_available()) {
+			if (aer_acpi_firmware_first())
+				dev_dbg(root->bus->bridge,
+					"PCIe errors handled by BIOS.\n");
+			else
+				flags |= OSC_PCI_EXPRESS_AER_CONTROL;
+		}
+
+		dev_info(root->bus->bridge,
+			"Requesting ACPI _OSC control (0x%02x)\n", flags);
+
+		status = acpi_pci_osc_control_set(device->handle, &flags,
+					OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+		if (ACPI_SUCCESS(status))
+			dev_info(root->bus->bridge,
+				"ACPI _OSC control (0x%02x) granted\n", flags);
+		else
+			dev_dbg(root->bus->bridge,
+				"ACPI _OSC request failed (code %d)\n", status);
+	}
+
 	pci_acpi_add_bus_pm_notifier(device, root->bus);
 	if (device->wakeup.flags.run_wake)
 		device_set_run_wake(root->bus->bridge, true);
@@ -600,6 +633,8 @@
 
 static int __init acpi_pci_root_init(void)
 {
+	acpi_hest_init();
+
 	if (acpi_pci_disabled)
 		return 0;
 
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 4c9c2fb..9ac2a9f 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -56,9 +56,6 @@
 #define ACPI_POWER_RESOURCE_STATE_ON	0x01
 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
 
-int acpi_power_nocheck;
-module_param_named(power_nocheck, acpi_power_nocheck, bool, 000);
-
 static int acpi_power_add(struct acpi_device *device);
 static int acpi_power_remove(struct acpi_device *device, int type);
 static int acpi_power_resume(struct acpi_device *device);
@@ -148,9 +145,8 @@
 
 static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
 {
-	int result = 0, state1;
-	u32 i = 0;
-
+	int cur_state;
+	int i = 0;
 
 	if (!list || !state)
 		return -EINVAL;
@@ -158,25 +154,33 @@
 	/* The state of the list is 'on' IFF all resources are 'on'. */
 
 	for (i = 0; i < list->count; i++) {
-		/*
-		 * The state of the power resource can be obtained by
-		 * using the ACPI handle. In such case it is unnecessary to
-		 * get the Power resource first and then get its state again.
-		 */
-		result = acpi_power_get_state(list->handles[i], &state1);
+		struct acpi_power_resource *resource;
+		acpi_handle handle = list->handles[i];
+		int result;
+
+		result = acpi_power_get_context(handle, &resource);
 		if (result)
 			return result;
 
-		*state = state1;
+		mutex_lock(&resource->resource_lock);
 
-		if (*state != ACPI_POWER_RESOURCE_STATE_ON)
+		result = acpi_power_get_state(handle, &cur_state);
+
+		mutex_unlock(&resource->resource_lock);
+
+		if (result)
+			return result;
+
+		if (cur_state != ACPI_POWER_RESOURCE_STATE_ON)
 			break;
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n",
-			  *state ? "on" : "off"));
+			  cur_state ? "on" : "off"));
 
-	return result;
+	*state = cur_state;
+
+	return 0;
 }
 
 static int __acpi_power_on(struct acpi_power_resource *resource)
@@ -222,7 +226,7 @@
 	return result;
 }
 
-static int acpi_power_off_device(acpi_handle handle)
+static int acpi_power_off(acpi_handle handle)
 {
 	int result = 0;
 	acpi_status status = AE_OK;
@@ -266,6 +270,35 @@
 	return result;
 }
 
+static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res)
+{
+	int i;
+
+	for (i = num_res - 1; i >= 0 ; i--)
+		acpi_power_off(list->handles[i]);
+}
+
+static void acpi_power_off_list(struct acpi_handle_list *list)
+{
+	__acpi_power_off_list(list, list->count);
+}
+
+static int acpi_power_on_list(struct acpi_handle_list *list)
+{
+	int result = 0;
+	int i;
+
+	for (i = 0; i < list->count; i++) {
+		result = acpi_power_on(list->handles[i]);
+		if (result) {
+			__acpi_power_off_list(list, i);
+			break;
+		}
+	}
+
+	return result;
+}
+
 /**
  * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
  *                          ACPI 3.0) _PSW (Power State Wake)
@@ -404,8 +437,7 @@
 
 	/* Close power resource */
 	for (i = 0; i < dev->wakeup.resources.count; i++) {
-		int ret = acpi_power_off_device(
-				dev->wakeup.resources.handles[i]);
+		int ret = acpi_power_off(dev->wakeup.resources.handles[i]);
 		if (ret) {
 			printk(KERN_ERR PREFIX "Transition power state\n");
 			dev->wakeup.flags.valid = 0;
@@ -423,19 +455,16 @@
                              Device Power Management
    -------------------------------------------------------------------------- */
 
-int acpi_power_get_inferred_state(struct acpi_device *device)
+int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
 {
 	int result = 0;
 	struct acpi_handle_list *list = NULL;
 	int list_state = 0;
 	int i = 0;
 
-
-	if (!device)
+	if (!device || !state)
 		return -EINVAL;
 
-	device->power.state = ACPI_STATE_UNKNOWN;
-
 	/*
 	 * We know a device's inferred power state when all the resources
 	 * required for a given D-state are 'on'.
@@ -450,22 +479,26 @@
 			return result;
 
 		if (list_state == ACPI_POWER_RESOURCE_STATE_ON) {
-			device->power.state = i;
+			*state = i;
 			return 0;
 		}
 	}
 
-	device->power.state = ACPI_STATE_D3;
-
+	*state = ACPI_STATE_D3;
 	return 0;
 }
 
+int acpi_power_on_resources(struct acpi_device *device, int state)
+{
+	if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3)
+		return -EINVAL;
+
+	return acpi_power_on_list(&device->power.states[state].resources);
+}
+
 int acpi_power_transition(struct acpi_device *device, int state)
 {
-	int result = 0;
-	struct acpi_handle_list *cl = NULL;	/* Current Resources */
-	struct acpi_handle_list *tl = NULL;	/* Target Resources */
-	int i = 0;
+	int result;
 
 	if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
 		return -EINVAL;
@@ -477,37 +510,20 @@
 	    || (device->power.state > ACPI_STATE_D3))
 		return -ENODEV;
 
-	cl = &device->power.states[device->power.state].resources;
-	tl = &device->power.states[state].resources;
-
 	/* TBD: Resources must be ordered. */
 
 	/*
 	 * First we reference all power resources required in the target list
-	 * (e.g. so the device doesn't lose power while transitioning).
+	 * (e.g. so the device doesn't lose power while transitioning).  Then,
+	 * we dereference all power resources used in the current list.
 	 */
-	for (i = 0; i < tl->count; i++) {
-		result = acpi_power_on(tl->handles[i]);
-		if (result)
-			goto end;
-	}
+	result = acpi_power_on_list(&device->power.states[state].resources);
+	if (!result)
+		acpi_power_off_list(
+			&device->power.states[device->power.state].resources);
 
-	/*
-	 * Then we dereference all power resources used in the current list.
-	 */
-	for (i = 0; i < cl->count; i++) {
-		result = acpi_power_off_device(cl->handles[i]);
-		if (result)
-			goto end;
-	}
-
-     end:
-	if (result)
-		device->power.state = ACPI_STATE_UNKNOWN;
-	else {
-	/* We shouldn't change the state till all above operations succeed */
-		device->power.state = state;
-	}
+	/* We shouldn't change the state unless the above operations succeed. */
+	device->power.state = result ? ACPI_STATE_UNKNOWN : state;
 
 	return result;
 }
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index afad677..f5f9869 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,7 +311,9 @@
 			   dev->pnp.bus_id,
 			   (u32) dev->wakeup.sleep_state,
 			   dev->wakeup.flags.run_wake ? '*' : ' ',
-			   dev->wakeup.state.enabled ? "enabled" : "disabled");
+			   (device_may_wakeup(&dev->dev)
+			     || (ldev && device_may_wakeup(ldev))) ?
+			       "enabled" : "disabled");
 		if (ldev)
 			seq_printf(seq, "%s:%s",
 				   ldev->bus ? ldev->bus->name : "no-bus",
@@ -328,8 +330,10 @@
 {
 	struct device *dev = acpi_get_physical_device(adev->handle);
 
-	if (dev && device_can_wakeup(dev))
-		device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
+	if (dev && device_can_wakeup(dev)) {
+		bool enable = !device_may_wakeup(dev);
+		device_set_wakeup_enable(dev, enable);
+	}
 }
 
 static ssize_t
@@ -341,7 +345,6 @@
 	char strbuf[5];
 	char str[5] = "";
 	unsigned int len = count;
-	struct acpi_device *found_dev = NULL;
 
 	if (len > 4)
 		len = 4;
@@ -361,33 +364,13 @@
 			continue;
 
 		if (!strncmp(dev->pnp.bus_id, str, 4)) {
-			dev->wakeup.state.enabled =
-			    dev->wakeup.state.enabled ? 0 : 1;
-			found_dev = dev;
-			break;
-		}
-	}
-	if (found_dev) {
-		physical_device_enable_wakeup(found_dev);
-		list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-			struct acpi_device *dev = container_of(node,
-							       struct
-							       acpi_device,
-							       wakeup_list);
-
-			if ((dev != found_dev) &&
-			    (dev->wakeup.gpe_number ==
-			     found_dev->wakeup.gpe_number)
-			    && (dev->wakeup.gpe_device ==
-				found_dev->wakeup.gpe_device)) {
-				printk(KERN_WARNING
-				       "ACPI: '%s' and '%s' have the same GPE, "
-				       "can't disable/enable one separately\n",
-				       dev->pnp.bus_id, found_dev->pnp.bus_id);
-				dev->wakeup.state.enabled =
-				    found_dev->wakeup.state.enabled;
+			if (device_can_wakeup(&dev->dev)) {
+				bool enable = !device_may_wakeup(&dev->dev);
+				device_set_wakeup_enable(&dev->dev, enable);
+			} else {
 				physical_device_enable_wakeup(dev);
 			}
+			break;
 		}
 	}
 	mutex_unlock(&acpi_device_lock);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index bec561c..3c1a2fe 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -23,7 +23,7 @@
 {
 	printk(KERN_NOTICE PREFIX "%s detected - "
 		"disabling mwait for CPU C-states\n", id->ident);
-	idle_nomwait = 1;
+	boot_option_idle_override = IDLE_NOMWAIT;
 	return 0;
 }
 
@@ -283,7 +283,7 @@
 {
 	acpi_status status = AE_OK;
 
-	if (idle_nomwait) {
+	if (boot_option_idle_override == IDLE_NOMWAIT) {
 		/*
 		 * If mwait is disabled for CPU C-states, the C2C3_FFH access
 		 * mode will be disabled in the parameter of _PDC object.
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 85e4804..360a74e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -40,10 +40,6 @@
 #include <linux/pm.h>
 #include <linux/cpufreq.h>
 #include <linux/cpu.h>
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
 #include <linux/dmi.h>
 #include <linux/moduleparam.h>
 #include <linux/cpuidle.h>
@@ -246,53 +242,6 @@
 	return result;
 }
 
-#ifdef CONFIG_ACPI_PROCFS
-static struct proc_dir_entry *acpi_processor_dir = NULL;
-
-static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
-{
-	struct proc_dir_entry *entry = NULL;
-
-
-	if (!acpi_device_dir(device)) {
-		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-						     acpi_processor_dir);
-		if (!acpi_device_dir(device))
-			return -ENODEV;
-	}
-
-	/* 'throttling' [R/W] */
-	entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
-				 S_IFREG | S_IRUGO | S_IWUSR,
-				 acpi_device_dir(device),
-				 &acpi_processor_throttling_fops,
-				 acpi_driver_data(device));
-	if (!entry)
-		return -EIO;
-	return 0;
-}
-static int acpi_processor_remove_fs(struct acpi_device *device)
-{
-
-	if (acpi_device_dir(device)) {
-		remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
-				  acpi_device_dir(device));
-		remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
-		acpi_device_dir(device) = NULL;
-	}
-
-	return 0;
-}
-#else
-static inline int acpi_processor_add_fs(struct acpi_device *device)
-{
-	return 0;
-}
-static inline int acpi_processor_remove_fs(struct acpi_device *device)
-{
-	return 0;
-}
-#endif
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -478,8 +427,13 @@
 	if (action == CPU_ONLINE && pr) {
 		acpi_processor_ppc_has_changed(pr, 0);
 		acpi_processor_cst_has_changed(pr);
+		acpi_processor_reevaluate_tstate(pr, action);
 		acpi_processor_tstate_has_changed(pr);
 	}
+	if (action == CPU_DEAD && pr) {
+		/* invalidate the flag.throttling after one CPU is offline */
+		acpi_processor_reevaluate_tstate(pr, action);
+	}
 	return NOTIFY_OK;
 }
 
@@ -537,14 +491,10 @@
 
 	per_cpu(processors, pr->id) = pr;
 
-	result = acpi_processor_add_fs(device);
-	if (result)
-		goto err_free_cpumask;
-
 	sysdev = get_cpu_sysdev(pr->id);
 	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
 		result = -EFAULT;
-		goto err_remove_fs;
+		goto err_free_cpumask;
 	}
 
 #ifdef CONFIG_CPU_FREQ
@@ -590,8 +540,6 @@
 	thermal_cooling_device_unregister(pr->cdev);
 err_power_exit:
 	acpi_processor_power_exit(pr, device);
-err_remove_fs:
-	acpi_processor_remove_fs(device);
 err_free_cpumask:
 	free_cpumask_var(pr->throttling.shared_cpu_map);
 
@@ -620,8 +568,6 @@
 
 	sysfs_remove_link(&device->dev.kobj, "sysdev");
 
-	acpi_processor_remove_fs(device);
-
 	if (pr->cdev) {
 		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
 		sysfs_remove_link(&pr->cdev->device.kobj, "device");
@@ -854,12 +800,6 @@
 
 	memset(&errata, 0, sizeof(errata));
 
-#ifdef CONFIG_ACPI_PROCFS
-	acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-	if (!acpi_processor_dir)
-		return -ENOMEM;
-#endif
-
 	if (!cpuidle_register_driver(&acpi_idle_driver)) {
 		printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
 			acpi_idle_driver.name);
@@ -885,10 +825,6 @@
 out_cpuidle:
 	cpuidle_unregister_driver(&acpi_idle_driver);
 
-#ifdef CONFIG_ACPI_PROCFS
-	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
-
 	return result;
 }
 
@@ -907,10 +843,6 @@
 
 	cpuidle_unregister_driver(&acpi_idle_driver);
 
-#ifdef CONFIG_ACPI_PROCFS
-	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
-
 	return;
 }
 
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index a765b82..d615b7d 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -79,6 +79,13 @@
 static unsigned int latency_factor __read_mostly = 2;
 module_param(latency_factor, uint, 0644);
 
+static int disabled_by_idle_boot_param(void)
+{
+	return boot_option_idle_override == IDLE_POLL ||
+		boot_option_idle_override == IDLE_FORCE_MWAIT ||
+		boot_option_idle_override == IDLE_HALT;
+}
+
 /*
  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  * For now disable this. Probably a bug somewhere else.
@@ -455,7 +462,7 @@
 				continue;
 			}
 			if (cx.type == ACPI_STATE_C1 &&
-					(idle_halt || idle_nomwait)) {
+			    (boot_option_idle_override == IDLE_NOMWAIT)) {
 				/*
 				 * In most cases the C1 space_id obtained from
 				 * _CST object is FIXED_HARDWARE access mode.
@@ -1016,7 +1023,6 @@
 		state->flags = 0;
 		switch (cx->type) {
 			case ACPI_STATE_C1:
-			state->flags |= CPUIDLE_FLAG_SHALLOW;
 			if (cx->entry_method == ACPI_CSTATE_FFH)
 				state->flags |= CPUIDLE_FLAG_TIME_VALID;
 
@@ -1025,16 +1031,13 @@
 			break;
 
 			case ACPI_STATE_C2:
-			state->flags |= CPUIDLE_FLAG_BALANCED;
 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
 			state->enter = acpi_idle_enter_simple;
 			dev->safe_state = state;
 			break;
 
 			case ACPI_STATE_C3:
-			state->flags |= CPUIDLE_FLAG_DEEP;
 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
-			state->flags |= CPUIDLE_FLAG_CHECK_BM;
 			state->enter = pr->flags.bm_check ?
 					acpi_idle_enter_bm :
 					acpi_idle_enter_simple;
@@ -1058,7 +1061,7 @@
 {
 	int ret = 0;
 
-	if (boot_option_idle_override)
+	if (disabled_by_idle_boot_param())
 		return 0;
 
 	if (!pr)
@@ -1089,19 +1092,10 @@
 	acpi_status status = 0;
 	static int first_run;
 
-	if (boot_option_idle_override)
+	if (disabled_by_idle_boot_param())
 		return 0;
 
 	if (!first_run) {
-		if (idle_halt) {
-			/*
-			 * When the boot option of "idle=halt" is added, halt
-			 * is used for CPU IDLE.
-			 * In such case C2/C3 is meaningless. So the max_cstate
-			 * is set to one.
-			 */
-			max_cstate = 1;
-		}
 		dmi_check_system(processor_power_dmi_table);
 		max_cstate = acpi_processor_cstate_check(max_cstate);
 		if (max_cstate < ACPI_C_STATES_MAX)
@@ -1142,7 +1136,7 @@
 int acpi_processor_power_exit(struct acpi_processor *pr,
 			      struct acpi_device *device)
 {
-	if (boot_option_idle_override)
+	if (disabled_by_idle_boot_param())
 		return 0;
 
 	cpuidle_unregister_device(&pr->power.dev);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index ff36327..fa84e97 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -32,10 +32,6 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/cpufreq.h>
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
@@ -370,6 +366,58 @@
 }
 
 /*
+ * This function is used to reevaluate whether the T-state is valid
+ * after one CPU is onlined/offlined.
+ * It is noted that it won't reevaluate the following properties for
+ * the T-state.
+ *	1. Control method.
+ *	2. the number of supported T-state
+ *	3. TSD domain
+ */
+void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+					unsigned long action)
+{
+	int result = 0;
+
+	if (action == CPU_DEAD) {
+		/* When one CPU is offline, the T-state throttling
+		 * will be invalidated.
+		 */
+		pr->flags.throttling = 0;
+		return;
+	}
+	/* the following is to recheck whether the T-state is valid for
+	 * the online CPU
+	 */
+	if (!pr->throttling.state_count) {
+		/* If the number of T-state is invalid, it is
+		 * invalidated.
+		 */
+		pr->flags.throttling = 0;
+		return;
+	}
+	pr->flags.throttling = 1;
+
+	/* Disable throttling (if enabled).  We'll let subsequent
+	 * policy (e.g.thermal) decide to lower performance if it
+	 * so chooses, but for now we'll crank up the speed.
+	 */
+
+	result = acpi_processor_get_throttling(pr);
+	if (result)
+		goto end;
+
+	if (pr->throttling.state) {
+		result = acpi_processor_set_throttling(pr, 0, false);
+		if (result)
+			goto end;
+	}
+
+end:
+	if (result)
+		pr->flags.throttling = 0;
+}
+/*
  * _PTC - Processor Throttling Control (and status) register location
  */
 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
@@ -876,7 +924,11 @@
 	 */
 	cpumask_copy(saved_mask, &current->cpus_allowed);
 	/* FIXME: use work_on_cpu() */
-	set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+	if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+		/* Can't migrate to the target pr->id CPU. Exit */
+		free_cpumask_var(saved_mask);
+		return -ENODEV;
+	}
 	ret = pr->throttling.acpi_processor_get_throttling(pr);
 	/* restore the previous state */
 	set_cpus_allowed_ptr(current, saved_mask);
@@ -1051,6 +1103,14 @@
 		return -ENOMEM;
 	}
 
+	if (cpu_is_offline(pr->id)) {
+		/*
+		 * the cpu pointed by pr->id is offline. Unnecessary to change
+		 * the throttling state any more.
+		 */
+		return -ENODEV;
+	}
+
 	cpumask_copy(saved_mask, &current->cpus_allowed);
 	t_state.target_state = state;
 	p_throttling = &(pr->throttling);
@@ -1074,7 +1134,11 @@
 	 */
 	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
 		/* FIXME: use work_on_cpu() */
-		set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+		if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+			/* Can't migrate to the pr->id CPU. Exit */
+			ret = -ENODEV;
+			goto exit;
+		}
 		ret = p_throttling->acpi_processor_set_throttling(pr,
 						t_state.target_state, force);
 	} else {
@@ -1106,7 +1170,8 @@
 			}
 			t_state.cpu = i;
 			/* FIXME: use work_on_cpu() */
-			set_cpus_allowed_ptr(current, cpumask_of(i));
+			if (set_cpus_allowed_ptr(current, cpumask_of(i)))
+				continue;
 			ret = match_pr->throttling.
 				acpi_processor_set_throttling(
 				match_pr, t_state.target_state, force);
@@ -1126,6 +1191,7 @@
 	/* restore the previous state */
 	/* FIXME: use work_on_cpu() */
 	set_cpus_allowed_ptr(current, saved_mask);
+exit:
 	free_cpumask_var(online_throttling_cpus);
 	free_cpumask_var(saved_mask);
 	return ret;
@@ -1216,113 +1282,3 @@
 	return result;
 }
 
-#ifdef CONFIG_ACPI_PROCFS
-/* proc interface */
-static int acpi_processor_throttling_seq_show(struct seq_file *seq,
-					      void *offset)
-{
-	struct acpi_processor *pr = seq->private;
-	int i = 0;
-	int result = 0;
-
-	if (!pr)
-		goto end;
-
-	if (!(pr->throttling.state_count > 0)) {
-		seq_puts(seq, "<not supported>\n");
-		goto end;
-	}
-
-	result = acpi_processor_get_throttling(pr);
-
-	if (result) {
-		seq_puts(seq,
-			 "Could not determine current throttling state.\n");
-		goto end;
-	}
-
-	seq_printf(seq, "state count:             %d\n"
-		   "active state:            T%d\n"
-		   "state available: T%d to T%d\n",
-		   pr->throttling.state_count, pr->throttling.state,
-		   pr->throttling_platform_limit,
-		   pr->throttling.state_count - 1);
-
-	seq_puts(seq, "states:\n");
-	if (pr->throttling.acpi_processor_get_throttling ==
-			acpi_processor_get_throttling_fadt) {
-		for (i = 0; i < pr->throttling.state_count; i++)
-			seq_printf(seq, "   %cT%d:                  %02d%%\n",
-				   (i == pr->throttling.state ? '*' : ' '), i,
-				   (pr->throttling.states[i].performance ? pr->
-				    throttling.states[i].performance / 10 : 0));
-	} else {
-		for (i = 0; i < pr->throttling.state_count; i++)
-			seq_printf(seq, "   %cT%d:                  %02d%%\n",
-				   (i == pr->throttling.state ? '*' : ' '), i,
-				   (int)pr->throttling.states_tss[i].
-				   freqpercentage);
-	}
-
-      end:
-	return 0;
-}
-
-static int acpi_processor_throttling_open_fs(struct inode *inode,
-					     struct file *file)
-{
-	return single_open(file, acpi_processor_throttling_seq_show,
-			   PDE(inode)->data);
-}
-
-static ssize_t acpi_processor_write_throttling(struct file *file,
-					       const char __user * buffer,
-					       size_t count, loff_t * data)
-{
-	int result = 0;
-	struct seq_file *m = file->private_data;
-	struct acpi_processor *pr = m->private;
-	char state_string[5] = "";
-	char *charp = NULL;
-	size_t state_val = 0;
-	char tmpbuf[5] = "";
-
-	if (!pr || (count > sizeof(state_string) - 1))
-		return -EINVAL;
-
-	if (copy_from_user(state_string, buffer, count))
-		return -EFAULT;
-
-	state_string[count] = '\0';
-	if ((count > 0) && (state_string[count-1] == '\n'))
-		state_string[count-1] = '\0';
-
-	charp = state_string;
-	if ((state_string[0] == 't') || (state_string[0] == 'T'))
-		charp++;
-
-	state_val = simple_strtoul(charp, NULL, 0);
-	if (state_val >= pr->throttling.state_count)
-		return -EINVAL;
-
-	snprintf(tmpbuf, 5, "%zu", state_val);
-
-	if (strcmp(tmpbuf, charp) != 0)
-		return -EINVAL;
-
-	result = acpi_processor_set_throttling(pr, state_val, false);
-	if (result)
-		return result;
-
-	return count;
-}
-
-const struct file_operations acpi_processor_throttling_fops = {
-	.owner = THIS_MODULE,
-	.open = acpi_processor_throttling_open_fs,
-	.read = seq_read,
-	.write = acpi_processor_write_throttling,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-#endif
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index e5dbedb..51ae379 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -484,6 +484,8 @@
 		const struct file_operations *state_fops,
 		const struct file_operations *alarm_fops, void *data)
 {
+	printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
+			" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
 	if (!*dir) {
 		*dir = proc_mkdir(dir_name, parent_dir);
 		if (!*dir) {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 29ef505..b99e624 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -778,7 +778,7 @@
 		wakeup->resources.handles[i] = element->reference.handle;
 	}
 
-	acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number);
+	acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
 
  out:
 	kfree(buffer.pointer);
@@ -803,7 +803,7 @@
 	/* Power button, Lid switch always enable wakeup */
 	if (!acpi_match_device_ids(device, button_device_ids)) {
 		device->wakeup.flags.run_wake = 1;
-		device->wakeup.flags.always_enabled = 1;
+		device_set_wakeup_capable(&device->dev, true);
 		return;
 	}
 
@@ -815,16 +815,22 @@
 				!!(event_status & ACPI_EVENT_FLAG_HANDLE);
 }
 
-static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
+static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
 {
+	acpi_handle temp;
 	acpi_status status = 0;
 	int psw_error;
 
+	/* Presence of _PRW indicates wake capable */
+	status = acpi_get_handle(device->handle, "_PRW", &temp);
+	if (ACPI_FAILURE(status))
+		return;
+
 	status = acpi_bus_extract_wakeup_device_power_package(device->handle,
 							      &device->wakeup);
 	if (ACPI_FAILURE(status)) {
 		ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
-		goto end;
+		return;
 	}
 
 	device->wakeup.flags.valid = 1;
@@ -840,13 +846,10 @@
 	if (psw_error)
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 				"error in _DSW or _PSW evaluation\n"));
-
-end:
-	if (ACPI_FAILURE(status))
-		device->flags.wake_capable = 0;
-	return 0;
 }
 
+static void acpi_bus_add_power_resource(acpi_handle handle);
+
 static int acpi_bus_get_power_flags(struct acpi_device *device)
 {
 	acpi_status status = 0;
@@ -875,8 +878,12 @@
 		acpi_evaluate_reference(device->handle, object_name, NULL,
 					&ps->resources);
 		if (ps->resources.count) {
+			int j;
+
 			device->power.flags.power_resources = 1;
 			ps->flags.valid = 1;
+			for (j = 0; j < ps->resources.count; j++)
+				acpi_bus_add_power_resource(ps->resources.handles[j]);
 		}
 
 		/* Evaluate "_PSx" to see if we can do explicit sets */
@@ -901,10 +908,7 @@
 	device->power.states[ACPI_STATE_D3].flags.valid = 1;
 	device->power.states[ACPI_STATE_D3].power = 0;
 
-	/* TBD: System wake support and resource requirements. */
-
-	device->power.state = ACPI_STATE_UNKNOWN;
-	acpi_bus_get_power(device->handle, &(device->power.state));
+	acpi_bus_init_power(device);
 
 	return 0;
 }
@@ -947,11 +951,6 @@
 	if (ACPI_SUCCESS(status))
 		device->flags.power_manageable = 1;
 
-	/* Presence of _PRW indicates wake capable */
-	status = acpi_get_handle(device->handle, "_PRW", &temp);
-	if (ACPI_SUCCESS(status))
-		device->flags.wake_capable = 1;
-
 	/* TBD: Performance management */
 
 	return 0;
@@ -1278,11 +1277,7 @@
 	 * Wakeup device management
 	 *-----------------------
 	 */
-	if (device->flags.wake_capable) {
-		result = acpi_bus_get_wakeup_device_flags(device);
-		if (result)
-			goto end;
-	}
+	acpi_bus_get_wakeup_device_flags(device);
 
 	/*
 	 * Performance Management
@@ -1326,6 +1321,20 @@
 #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
 			  ACPI_STA_DEVICE_UI      | ACPI_STA_DEVICE_FUNCTIONING)
 
+static void acpi_bus_add_power_resource(acpi_handle handle)
+{
+	struct acpi_bus_ops ops = {
+		.acpi_op_add = 1,
+		.acpi_op_start = 1,
+	};
+	struct acpi_device *device = NULL;
+
+	acpi_bus_get_device(handle, &device);
+	if (!device)
+		acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER,
+					ACPI_STA_DEFAULT, &ops);
+}
+
 static int acpi_bus_type_and_status(acpi_handle handle, int *type,
 				    unsigned long long *sta)
 {
@@ -1371,7 +1380,6 @@
 	struct acpi_bus_ops *ops = context;
 	int type;
 	unsigned long long sta;
-	struct acpi_device_wakeup wakeup;
 	struct acpi_device *device;
 	acpi_status status;
 	int result;
@@ -1382,7 +1390,13 @@
 
 	if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
 	    !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
-		acpi_bus_extract_wakeup_device_power_package(handle, &wakeup);
+		struct acpi_device_wakeup wakeup;
+		acpi_handle temp;
+
+		status = acpi_get_handle(handle, "_PRW", &temp);
+		if (ACPI_SUCCESS(status))
+			acpi_bus_extract_wakeup_device_power_package(handle,
+								     &wakeup);
 		return AE_CTRL_DEPTH;
 	}
 
@@ -1467,7 +1481,7 @@
 
 	result = acpi_bus_scan(device->handle, &ops, NULL);
 
-	acpi_update_gpes();
+	acpi_update_all_gpes();
 
 	return result;
 }
@@ -1573,6 +1587,8 @@
 		printk(KERN_ERR PREFIX "Could not register bus type\n");
 	}
 
+	acpi_power_init();
+
 	/*
 	 * Enumerate devices in the ACPI namespace.
 	 */
@@ -1584,7 +1600,7 @@
 	if (result)
 		acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
 	else
-		acpi_update_gpes();
+		acpi_update_all_gpes();
 
 	return result;
 }
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index febb153..d6a8cd1 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -124,8 +124,7 @@
 static int acpi_pm_pre_suspend(void)
 {
 	acpi_pm_freeze();
-	suspend_nvs_save();
-	return 0;
+	return suspend_nvs_save();
 }
 
 /**
@@ -151,7 +150,7 @@
 {
 	int error = __acpi_pm_prepare();
 	if (!error)
-		acpi_pm_pre_suspend();
+		error = acpi_pm_pre_suspend();
 
 	return error;
 }
@@ -167,6 +166,7 @@
 	u32 acpi_state = acpi_target_sleep_state;
 
 	acpi_ec_unblock_transactions();
+	suspend_nvs_free();
 
 	if (acpi_state == ACPI_STATE_S0)
 		return;
@@ -187,7 +187,6 @@
  */
 static void acpi_pm_end(void)
 {
-	suspend_nvs_free();
 	/*
 	 * This is necessary in case acpi_pm_finish() is not called during a
 	 * failing transition to a sleep state.
@@ -319,7 +318,7 @@
 	}
 }
 
-static struct platform_suspend_ops acpi_suspend_ops = {
+static const struct platform_suspend_ops acpi_suspend_ops = {
 	.valid = acpi_suspend_state_valid,
 	.begin = acpi_suspend_begin,
 	.prepare_late = acpi_pm_prepare,
@@ -347,7 +346,7 @@
  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
  * been requested.
  */
-static struct platform_suspend_ops acpi_suspend_ops_old = {
+static const struct platform_suspend_ops acpi_suspend_ops_old = {
 	.valid = acpi_suspend_state_valid,
 	.begin = acpi_suspend_begin_old,
 	.prepare_late = acpi_pm_pre_suspend,
@@ -435,6 +434,14 @@
 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
 		},
 	},
+	{
+	.callback = init_nvs_nosave,
+	.ident = "Averatec AV1020-ED2",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
+		},
+	},
 	{},
 };
 #endif /* CONFIG_SUSPEND */
@@ -506,7 +513,7 @@
 	acpi_enable_all_runtime_gpes();
 }
 
-static struct platform_hibernation_ops acpi_hibernation_ops = {
+static const struct platform_hibernation_ops acpi_hibernation_ops = {
 	.begin = acpi_hibernation_begin,
 	.end = acpi_pm_end,
 	.pre_snapshot = acpi_pm_prepare,
@@ -549,7 +556,7 @@
  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
  * been requested.
  */
-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
 	.begin = acpi_hibernation_begin_old,
 	.end = acpi_pm_end,
 	.pre_snapshot = acpi_pm_pre_suspend,
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index f8588f8..61891e7 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -438,7 +438,7 @@
 	return;
 }
 
-void acpi_os_gpe_count(u32 gpe_number)
+static void gpe_count(u32 gpe_number)
 {
 	acpi_gpe_count++;
 
@@ -454,7 +454,7 @@
 	return;
 }
 
-void acpi_os_fixed_event_count(u32 event_number)
+static void fixed_event_count(u32 event_number)
 {
 	if (!all_counters)
 		return;
@@ -468,6 +468,16 @@
 	return;
 }
 
+static void acpi_gbl_event_handler(u32 event_type, acpi_handle device,
+	u32 event_number, void *context)
+{
+	if (event_type == ACPI_EVENT_TYPE_GPE)
+		gpe_count(event_number);
+
+	if (event_type == ACPI_EVENT_TYPE_FIXED)
+		fixed_event_count(event_number);
+}
+
 static int get_status(u32 index, acpi_event_status *status,
 		      acpi_handle *handle)
 {
@@ -601,6 +611,7 @@
 
 void acpi_irq_stats_init(void)
 {
+	acpi_status status;
 	int i;
 
 	if (all_counters)
@@ -619,6 +630,10 @@
 	if (all_counters == NULL)
 		goto fail;
 
+	status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL);
+	if (ACPI_FAILURE(status))
+		goto fail;
+
 	counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
 				GFP_KERNEL);
 	if (counter_attrs == NULL)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5a27b0a..2607e17 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1059,8 +1059,9 @@
 			break;
 		tz->trips.active[i].flags.enabled = 1;
 		for (j = 0; j < tz->trips.active[i].devices.count; j++) {
-			result = acpi_bus_get_power(tz->trips.active[i].devices.
-			    handles[j], &power_state);
+			result = acpi_bus_update_power(
+					tz->trips.active[i].devices.handles[j],
+					&power_state);
 			if (result || (power_state != ACPI_STATE_D0)) {
 				tz->trips.active[i].flags.enabled = 0;
 				break;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 5cd0228..90f8f76 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -33,7 +33,6 @@
 #include <linux/input.h>
 #include <linux/backlight.h>
 #include <linux/thermal.h>
-#include <linux/video_output.h>
 #include <linux/sort.h>
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
@@ -81,6 +80,13 @@
 static int allow_duplicates;
 module_param(allow_duplicates, bool, 0644);
 
+/*
+ * Some BIOSes claim they use minimum backlight at boot,
+ * and this may bring dimming screen after boot
+ */
+static int use_bios_initial_backlight = 1;
+module_param(use_bios_initial_backlight, bool, 0644);
+
 static int register_count = 0;
 static int acpi_video_bus_add(struct acpi_device *device);
 static int acpi_video_bus_remove(struct acpi_device *device, int type);
@@ -172,9 +178,6 @@
 	u8 _BQC:1;		/* Get current brightness level */
 	u8 _BCQ:1;		/* Some buggy BIOS uses _BCQ instead of _BQC */
 	u8 _DDC:1;		/*Return the EDID for this device */
-	u8 _DCS:1;		/*Return status of output device */
-	u8 _DGS:1;		/*Query graphics state */
-	u8 _DSS:1;		/*Device state set */
 };
 
 struct acpi_video_brightness_flags {
@@ -202,7 +205,6 @@
 	struct acpi_video_device_brightness *brightness;
 	struct backlight_device *backlight;
 	struct thermal_cooling_device *cooling_dev;
-	struct output_device *output_dev;
 };
 
 static const char device_decode[][30] = {
@@ -226,10 +228,6 @@
 				     u32 level_current, u32 event);
 static int acpi_video_switch_brightness(struct acpi_video_device *device,
 					 int event);
-static int acpi_video_device_get_state(struct acpi_video_device *device,
-			    unsigned long long *state);
-static int acpi_video_output_get(struct output_device *od);
-static int acpi_video_device_set_state(struct acpi_video_device *device, int state);
 
 /*backlight device sysfs support*/
 static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -260,35 +258,11 @@
 				vd->brightness->levels[request_level]);
 }
 
-static struct backlight_ops acpi_backlight_ops = {
+static const struct backlight_ops acpi_backlight_ops = {
 	.get_brightness = acpi_video_get_brightness,
 	.update_status  = acpi_video_set_brightness,
 };
 
-/*video output device sysfs support*/
-static int acpi_video_output_get(struct output_device *od)
-{
-	unsigned long long state;
-	struct acpi_video_device *vd =
-		(struct acpi_video_device *)dev_get_drvdata(&od->dev);
-	acpi_video_device_get_state(vd, &state);
-	return (int)state;
-}
-
-static int acpi_video_output_set(struct output_device *od)
-{
-	unsigned long state = od->request_state;
-	struct acpi_video_device *vd=
-		(struct acpi_video_device *)dev_get_drvdata(&od->dev);
-	return acpi_video_device_set_state(vd, state);
-}
-
-static struct output_properties acpi_output_properties = {
-	.set_state = acpi_video_output_set,
-	.get_status = acpi_video_output_get,
-};
-
-
 /* thermal cooling device callbacks */
 static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned
 			       long *state)
@@ -344,34 +318,6 @@
                                Video Management
    -------------------------------------------------------------------------- */
 
-/* device */
-
-static int
-acpi_video_device_get_state(struct acpi_video_device *device,
-			    unsigned long long *state)
-{
-	int status;
-
-	status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state);
-
-	return status;
-}
-
-static int
-acpi_video_device_set_state(struct acpi_video_device *device, int state)
-{
-	int status;
-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-	struct acpi_object_list args = { 1, &arg0 };
-	unsigned long long ret;
-
-
-	arg0.integer.value = state;
-	status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret);
-
-	return status;
-}
-
 static int
 acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
 				   union acpi_object **levels)
@@ -766,9 +712,11 @@
 		 * when invoked for the first time, i.e. level_old is invalid.
 		 * set the backlight to max_level in this case
 		 */
-		for (i = 2; i < br->count; i++)
-			if (level_old == br->levels[i])
-				level = level_old;
+		if (use_bios_initial_backlight) {
+			for (i = 2; i < br->count; i++)
+				if (level_old == br->levels[i])
+					level = level_old;
+		}
 		goto set_level;
 	}
 
@@ -831,15 +779,6 @@
 	if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
 		device->cap._DDC = 1;
 	}
-	if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) {
-		device->cap._DCS = 1;
-	}
-	if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) {
-		device->cap._DGS = 1;
-	}
-	if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) {
-		device->cap._DSS = 1;
-	}
 
 	if (acpi_video_backlight_support()) {
 		struct backlight_properties props;
@@ -904,21 +843,6 @@
 			printk(KERN_ERR PREFIX "Create sysfs link\n");
 
 	}
-
-	if (acpi_video_display_switch_support()) {
-
-		if (device->cap._DCS && device->cap._DSS) {
-			static int count;
-			char *name;
-			name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
-			if (!name)
-				return;
-			count++;
-			device->output_dev = video_output_register(name,
-					NULL, device, &acpi_output_properties);
-			kfree(name);
-		}
-	}
 }
 
 /*
@@ -1360,6 +1284,9 @@
 		if (!video_device)
 			continue;
 
+		if (!video_device->cap._DDC)
+			continue;
+
 		if (type) {
 			switch (type) {
 			case ACPI_VIDEO_DISPLAY_CRT:
@@ -1452,7 +1379,6 @@
 		thermal_cooling_device_unregister(device->cooling_dev);
 		device->cooling_dev = NULL;
 	}
-	video_output_unregister(device->output_dev);
 
 	return 0;
 }
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b836761..5af3479 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -17,15 +17,14 @@
  * capabilities the graphics cards plugged in support. The check for general
  * video capabilities will be triggered by the first caller of
  * acpi_video_get_capabilities(NULL); which will happen when the first
- * backlight (or display output) switching supporting driver calls:
+ * backlight switching supporting driver calls:
  * acpi_video_backlight_support();
  *
  * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B)
  * are available, video.ko should be used to handle the device.
  *
  * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi,
- * sony_acpi,... can take care about backlight brightness and display output
- * switching.
+ * sony_acpi,... can take care about backlight brightness.
  *
  * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
  * this file will not be compiled, acpi_video_get_capabilities() and
@@ -161,8 +160,6 @@
 		 *
 		 *   if (dmi_name_in_vendors("XY")) {
 		 *	acpi_video_support |=
-		 *		ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR;
-		 *	acpi_video_support |=
 		 *		ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
 		 *}
 		 */
@@ -212,33 +209,8 @@
 EXPORT_SYMBOL(acpi_video_backlight_support);
 
 /*
- * Returns true if video.ko can do display output switching.
- * This does not work well/at all with binary graphics drivers
- * which disable system io ranges and do it on their own.
- */
-int acpi_video_display_switch_support(void)
-{
-	if (!acpi_video_caps_checked)
-		acpi_video_get_capabilities(NULL);
-
-	if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR)
-		return 0;
-	else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO)
-		return 1;
-
-	if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR)
-		return 0;
-	else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO)
-		return 1;
-
-	return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING;
-}
-EXPORT_SYMBOL(acpi_video_display_switch_support);
-
-/*
- * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video
- * To force that backlight or display output switching is processed by vendor
- * specific acpi drivers or video.ko driver.
+ * Use acpi_backlight=vendor/video to force that backlight switching
+ * is processed by vendor specific acpi drivers or video.ko driver.
  */
 static int __init acpi_backlight(char *str)
 {
@@ -255,19 +227,3 @@
 	return 1;
 }
 __setup("acpi_backlight=", acpi_backlight);
-
-static int __init acpi_display_output(char *str)
-{
-	if (str == NULL || *str == '\0')
-		return 1;
-	else {
-		if (!strcmp("vendor", str))
-			acpi_video_support |=
-				ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR;
-		if (!strcmp("video", str))
-			acpi_video_support |=
-				ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
-	}
-	return 1;
-}
-__setup("acpi_display_output=", acpi_display_output);
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index f62a50c..7bfbe40 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -37,15 +37,16 @@
 			container_of(node, struct acpi_device, wakeup_list);
 
 		if (!dev->wakeup.flags.valid
-		    || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
-		    || sleep_state > (u32) dev->wakeup.sleep_state)
+		    || sleep_state > (u32) dev->wakeup.sleep_state
+		    || !(device_may_wakeup(&dev->dev)
+		        || dev->wakeup.prepare_count))
 			continue;
 
-		if (dev->wakeup.state.enabled)
+		if (device_may_wakeup(&dev->dev))
 			acpi_enable_wakeup_device_power(dev, sleep_state);
 
 		/* The wake-up power should have been enabled already. */
-		acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+		acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
 				ACPI_GPE_ENABLE);
 	}
 }
@@ -63,14 +64,15 @@
 			container_of(node, struct acpi_device, wakeup_list);
 
 		if (!dev->wakeup.flags.valid
-		    || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
-		    || (sleep_state > (u32) dev->wakeup.sleep_state))
+		    || sleep_state > (u32) dev->wakeup.sleep_state
+		    || !(device_may_wakeup(&dev->dev)
+		        || dev->wakeup.prepare_count))
 			continue;
 
-		acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+		acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
 				ACPI_GPE_DISABLE);
 
-		if (dev->wakeup.state.enabled)
+		if (device_may_wakeup(&dev->dev))
 			acpi_disable_wakeup_device_power(dev);
 	}
 }
@@ -84,8 +86,12 @@
 		struct acpi_device *dev = container_of(node,
 						       struct acpi_device,
 						       wakeup_list);
-		if (dev->wakeup.flags.always_enabled)
-			dev->wakeup.state.enabled = 1;
+		if (device_can_wakeup(&dev->dev)) {
+			/* Button GPEs are supposed to be always enabled. */
+			acpi_enable_gpe(dev->wakeup.gpe_device,
+					dev->wakeup.gpe_number);
+			device_set_wakeup_enable(&dev->dev, true);
+		}
 	}
 	mutex_unlock(&acpi_device_lock);
 	return 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c6b298d..c2328ae 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -783,7 +783,7 @@
 
 config PATA_PLATFORM
 	tristate "Generic platform device PATA support"
-	depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM
+	depends on EXPERT || PPC || HAVE_PATA_PLATFORM
 	help
 	  This option enables support for generic directly connected ATA
 	  devices commonly found on embedded systems.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 3288263..b8d96ce 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -260,6 +260,7 @@
 	{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
 	{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
+	{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
 
 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -379,6 +380,8 @@
 	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
 	{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },	/* 6121 */
 	{ PCI_DEVICE(0x1b4b, 0x9123),
+	  .class = PCI_CLASS_STORAGE_SATA_AHCI,
+	  .class_mask = 0xffffff,
 	  .driver_data = board_ahci_yes_fbs },			/* 88se9128 */
 
 	/* Promise */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0a6a943..d4e52e2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2240,7 +2240,7 @@
 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
 				ata_dev_printk(dev, KERN_WARNING,
 					       "supports DRM functions and may "
-					       "not be fully accessable.\n");
+					       "not be fully accessible.\n");
 			snprintf(revbuf, 7, "CFA");
 		} else {
 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
@@ -2248,7 +2248,7 @@
 			if (ata_id_has_tpm(id))
 				ata_dev_printk(dev, KERN_WARNING,
 					       "supports DRM functions and may "
-					       "not be fully accessable.\n");
+					       "not be fully accessible.\n");
 		}
 
 		dev->n_sectors = ata_id_n_sectors(id);
@@ -4138,6 +4138,7 @@
 	 * device and controller are SATA.
 	 */
 	{ "PIONEER DVD-RW  DVRTD08",	"1.00",	ATA_HORKAGE_NOSETXFER },
+	{ "PIONEER DVD-RW  DVR-212D",	"1.28", ATA_HORKAGE_NOSETXFER },
 
 	/* End Marker */
 	{ }
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 5defc74..600f635 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1099,9 +1099,9 @@
 		struct request_queue *q = sdev->request_queue;
 		void *buf;
 
-		/* set the min alignment and padding */
-		blk_queue_update_dma_alignment(sdev->request_queue,
-					       ATA_DMA_PAD_SZ - 1);
+		sdev->sector_size = ATA_SECT_SIZE;
+
+		/* set DMA padding */
 		blk_queue_update_dma_pad(sdev->request_queue,
 					 ATA_DMA_PAD_SZ - 1);
 
@@ -1115,13 +1115,25 @@
 
 		blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
 	} else {
-		/* ATA devices must be sector aligned */
 		sdev->sector_size = ata_id_logical_sector_size(dev->id);
-		blk_queue_update_dma_alignment(sdev->request_queue,
-					       sdev->sector_size - 1);
 		sdev->manage_start_stop = 1;
 	}
 
+	/*
+	 * ata_pio_sectors() expects buffer for each sector to not cross
+	 * page boundary.  Enforce it by requiring buffers to be sector
+	 * aligned, which works iff sector_size is not larger than
+	 * PAGE_SIZE.  ATAPI devices also need the alignment as
+	 * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
+	 */
+	if (sdev->sector_size > PAGE_SIZE)
+		ata_dev_printk(dev, KERN_WARNING,
+			"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
+			sdev->sector_size);
+
+	blk_queue_update_dma_alignment(sdev->request_queue,
+				       sdev->sector_size - 1);
+
 	if (dev->flags & ATA_DFLAG_AN)
 		set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
 
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index d7e57db..538ec38 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt366"
-#define DRV_VERSION	"0.6.9"
+#define DRV_VERSION	"0.6.10"
 
 struct hpt_clock {
 	u8	xfer_mode;
@@ -160,8 +160,8 @@
 
 	while (list[i] != NULL) {
 		if (!strcmp(list[i], model_num)) {
-			printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
-				modestr, list[i]);
+			pr_warning(DRV_NAME ": %s is not supported for %s.\n",
+				   modestr, list[i]);
 			return 1;
 		}
 		i++;
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index efdd18b..4c5b518 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -24,7 +24,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt37x"
-#define DRV_VERSION	"0.6.18"
+#define DRV_VERSION	"0.6.22"
 
 struct hpt_clock {
 	u8	xfer_speed;
@@ -229,8 +229,8 @@
 
 	while (list[i] != NULL) {
 		if (!strcmp(list[i], model_num)) {
-			printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
-				modestr, list[i]);
+			pr_warning(DRV_NAME ": %s is not supported for %s.\n",
+				   modestr, list[i]);
 			return 1;
 		}
 		i++;
@@ -642,7 +642,6 @@
 static struct ata_port_operations hpt374_fn1_port_ops = {
 	.inherits	= &hpt372_port_ops,
 	.cable_detect	= hpt374_fn1_cable_detect,
-	.prereset	= hpt37x_pre_reset,
 };
 
 /**
@@ -803,7 +802,7 @@
 		.udma_mask = ATA_UDMA6,
 		.port_ops = &hpt302_port_ops
 	};
-	/* HPT374 - UDMA100, function 1 uses different prereset method */
+	/* HPT374 - UDMA100, function 1 uses different cable_detect method */
 	static const struct ata_port_info info_hpt374_fn0 = {
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = ATA_PIO4,
@@ -838,7 +837,8 @@
 	if (rc)
 		return rc;
 
-	if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
+	switch (dev->device) {
+	case PCI_DEVICE_ID_TTI_HPT366:
 		/* May be a later chip in disguise. Check */
 		/* Older chips are in the HPT366 driver. Ignore them */
 		if (rev < 3)
@@ -863,54 +863,50 @@
 			chip_table = &hpt372;
 			break;
 		default:
-			printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype, "
+			pr_err(DRV_NAME ": Unknown HPT366 subtype, "
 			       "please report (%d).\n", rev);
 			return -ENODEV;
 		}
-	} else {
-		switch (dev->device) {
-		case PCI_DEVICE_ID_TTI_HPT372:
-			/* 372N if rev >= 2 */
-			if (rev >= 2)
-				return -ENODEV;
-			ppi[0] = &info_hpt372;
-			chip_table = &hpt372a;
-			break;
-		case PCI_DEVICE_ID_TTI_HPT302:
-			/* 302N if rev > 1 */
-			if (rev > 1)
-				return -ENODEV;
-			ppi[0] = &info_hpt302;
-			/* Check this */
-			chip_table = &hpt302;
-			break;
-		case PCI_DEVICE_ID_TTI_HPT371:
-			if (rev > 1)
-				return -ENODEV;
-			ppi[0] = &info_hpt302;
-			chip_table = &hpt371;
-			/*
-			 * Single channel device, master is not present
-			 * but the BIOS (or us for non x86) must mark it
-			 * absent
-			 */
-			pci_read_config_byte(dev, 0x50, &mcr1);
-			mcr1 &= ~0x04;
-			pci_write_config_byte(dev, 0x50, mcr1);
-			break;
-		case PCI_DEVICE_ID_TTI_HPT374:
-			chip_table = &hpt374;
-			if (!(PCI_FUNC(dev->devfn) & 1))
-				*ppi = &info_hpt374_fn0;
-			else
-				*ppi = &info_hpt374_fn1;
-			break;
-		default:
-			printk(KERN_ERR
-			       "pata_hpt37x: PCI table is bogus, please report (%d).\n",
-			       dev->device);
-				return -ENODEV;
-		}
+		break;
+	case PCI_DEVICE_ID_TTI_HPT372:
+		/* 372N if rev >= 2 */
+		if (rev >= 2)
+			return -ENODEV;
+		ppi[0] = &info_hpt372;
+		chip_table = &hpt372a;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT302:
+		/* 302N if rev > 1 */
+		if (rev > 1)
+			return -ENODEV;
+		ppi[0] = &info_hpt302;
+		/* Check this */
+		chip_table = &hpt302;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT371:
+		if (rev > 1)
+			return -ENODEV;
+		ppi[0] = &info_hpt302;
+		chip_table = &hpt371;
+		/*
+		 * Single channel device, master is not present but the BIOS
+		 * (or us for non x86) must mark it absent
+		 */
+		pci_read_config_byte(dev, 0x50, &mcr1);
+		mcr1 &= ~0x04;
+		pci_write_config_byte(dev, 0x50, mcr1);
+		break;
+	case PCI_DEVICE_ID_TTI_HPT374:
+		chip_table = &hpt374;
+		if (!(PCI_FUNC(dev->devfn) & 1))
+			*ppi = &info_hpt374_fn0;
+		else
+			*ppi = &info_hpt374_fn1;
+		break;
+	default:
+		pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
+		       dev->device);
+		return -ENODEV;
 	}
 	/* Ok so this is a chip we support */
 
@@ -957,8 +953,7 @@
 		u8 sr;
 		u32 total = 0;
 
-		printk(KERN_WARNING
-		       "pata_hpt37x: BIOS has not set timing clocks.\n");
+		pr_warning(DRV_NAME ": BIOS has not set timing clocks.\n");
 
 		/* This is the process the HPT371 BIOS is reported to use */
 		for (i = 0; i < 128; i++) {
@@ -1014,7 +1009,7 @@
 					       (f_high << 16) | f_low | 0x100);
 		}
 		if (adjust == 8) {
-			printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n");
+			pr_err(DRV_NAME ": DPLL did not stabilize!\n");
 			return -ENODEV;
 		}
 		if (dpll == 3)
@@ -1022,8 +1017,8 @@
 		else
 			private_data = (void *)hpt37x_timings_50;
 
-		printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using %dMHz DPLL.\n",
-		       MHz[clock_slot], MHz[dpll]);
+		pr_info(DRV_NAME ": bus clock %dMHz, using %dMHz DPLL.\n",
+			MHz[clock_slot], MHz[dpll]);
 	} else {
 		private_data = (void *)chip_table->clocks[clock_slot];
 		/*
@@ -1036,8 +1031,9 @@
 			ppi[0] = &info_hpt370_33;
 		if (clock_slot < 2 && ppi[0] == &info_hpt370a)
 			ppi[0] = &info_hpt370a_33;
-		printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n",
-		       chip_table->name, MHz[clock_slot]);
+
+		pr_info(DRV_NAME ": %s using %dMHz bus clock.\n",
+			chip_table->name, MHz[clock_slot]);
 	}
 
 	/* Now kick off ATA set up */
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index d2239bb..eca68ca 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt3x2n"
-#define DRV_VERSION	"0.3.13"
+#define DRV_VERSION	"0.3.14"
 
 enum {
 	HPT_PCI_FAST	=	(1 << 31),
@@ -418,7 +418,7 @@
 		u16 sr;
 		u32 total = 0;
 
-		printk(KERN_WARNING "pata_hpt3x2n: BIOS clock data not set.\n");
+		pr_warning(DRV_NAME ": BIOS clock data not set.\n");
 
 		/* This is the process the HPT371 BIOS is reported to use */
 		for (i = 0; i < 128; i++) {
@@ -528,8 +528,7 @@
 		ppi[0] = &info_hpt372n;
 		break;
 	default:
-		printk(KERN_ERR
-		       "pata_hpt3x2n: PCI table is bogus please report (%d).\n",
+		pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
 		       dev->device);
 		return -ENODEV;
 	}
@@ -579,12 +578,11 @@
 		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
 	}
 	if (adjust == 8) {
-		printk(KERN_ERR "pata_hpt3x2n: DPLL did not stabilize!\n");
+		pr_err(DRV_NAME ": DPLL did not stabilize!\n");
 		return -ENODEV;
 	}
 
-	printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n",
-	       pci_mhz);
+	pr_info(DRV_NAME ": bus clock %dMHz, using 66MHz DPLL.\n", pci_mhz);
 
 	/*
 	 * Set our private data up. We only need a few flags
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 8cc536e..d7d8026 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -610,7 +610,7 @@
 };
 
 static struct ata_port_operations mpc52xx_ata_port_ops = {
-	.inherits		= &ata_sff_port_ops,
+	.inherits		= &ata_bmdma_port_ops,
 	.sff_dev_select		= mpc52xx_ata_dev_select,
 	.set_piomode		= mpc52xx_ata_set_piomode,
 	.set_dmamode		= mpc52xx_ata_set_dmamode,
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index b777176..e079cf2 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -370,7 +370,7 @@
 	if (pci_resource_len(pdev, 0) == 0)
 		return -ENODEV;
 
-	/* map IO regions and intialize host accordingly */
+	/* map IO regions and initialize host accordingly */
 	rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
 	if (rc == -EBUSY)
 		pcim_pin_device(pdev);
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index ffe9b65..9f47e86 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1926,8 +1926,9 @@
   const struct firmware *fw;
   unsigned long start_address;
   const struct ihex_binrec *rec;
+  const char *errmsg = 0;
   int res;
-  
+
   res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
   if (res) {
     PRINTK (KERN_ERR, "Cannot load microcode data");
@@ -1937,8 +1938,8 @@
   /* First record contains just the start address */
   rec = (const struct ihex_binrec *)fw->data;
   if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
-    PRINTK (KERN_ERR, "Bad microcode data (no start record)");
-    return -EINVAL;
+    errmsg = "no start record";
+    goto fail;
   }
   start_address = be32_to_cpup((__be32 *)rec->data);
 
@@ -1950,12 +1951,12 @@
     PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
 	    be16_to_cpu(rec->len));
     if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
-	    PRINTK (KERN_ERR, "Bad microcode data (record too long)");
-	    return -EINVAL;
+	    errmsg = "record too long";
+	    goto fail;
     }
     if (be16_to_cpu(rec->len) & 3) {
-	    PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)");
-	    return -EINVAL;
+	    errmsg = "odd number of bytes";
+	    goto fail;
     }
     res = loader_write(lb, dev, rec);
     if (res)
@@ -1970,6 +1971,10 @@
     res = loader_start(lb, dev, start_address);
 
   return res;
+fail:
+  release_firmware(fw);
+  PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
+  return -EINVAL;
 }
 
 /********** give adapter parameters **********/
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index bca9cb8..487a547 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -151,7 +151,7 @@
 	spin_unlock_irqrestore(&idt77105_priv_lock, flags);
 	if (arg == NULL)
 		return 0;
-	return copy_to_user(arg, &PRIV(dev)->stats,
+	return copy_to_user(arg, &stats,
 		    sizeof(struct idt77105_stats)) ? -EFAULT : 0;
 }
 
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index 5042bb2..f53a43a 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -572,7 +572,7 @@
 #define SAR_STAT_TSQF       0x00001000 /* Transmit Status Queue full      */
 #define SAR_STAT_TMROF      0x00000800 /* Timer overflow                  */
 #define SAR_STAT_PHYI       0x00000400 /* PHY device Interrupt flag       */
-#define SAR_STAT_CMDBZ      0x00000200 /* ABR SAR Comand Busy Flag        */
+#define SAR_STAT_CMDBZ      0x00000200 /* ABR SAR Command Busy Flag       */
 #define SAR_STAT_FBQ3A      0x00000100 /* Free Buffer Queue 3 Attention   */
 #define SAR_STAT_FBQ2A      0x00000080 /* Free Buffer Queue 2 Attention   */
 #define SAR_STAT_RSQF       0x00000040 /* Receive Status Queue full       */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 7292540..d80d51b 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2063,7 +2063,7 @@
 		- UBR Table size is 4K  
 		- UBR wait queue is 4K  
 	   since the table and wait queues are contiguous, all the bytes   
-	   can be initialized by one memeset.  
+	   can be initialized by one memeset.
 	*/  
         
         vcsize_sel = 0;
@@ -2089,7 +2089,7 @@
 		- ABR Table size is 2K  
 		- ABR wait queue is 2K  
 	   since the table and wait queues are contiguous, all the bytes   
-	   can be intialized by one memeset.  
+	   can be initialized by one memeset.
 	*/  
         i = ABR_SCHED_TABLE * iadev->memSize;
         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 73fb1c4..25ef1a4 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -866,8 +866,9 @@
 	}
 
 	skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
-	if (!skb && net_ratelimit()) {
-		dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
+	if (!skb) {
+		if (net_ratelimit())
+			dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
 		return -ENOMEM;
 	}
 	header = (void *)skb_put(skb, sizeof(*header));
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fd96345..d57e8d0 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -70,7 +70,7 @@
 	  If unsure say Y here.
 
 config FW_LOADER
-	tristate "Userspace firmware loading support" if EMBEDDED
+	tristate "Userspace firmware loading support" if EXPERT
 	default y
 	---help---
 	  This option is provided for the case where no in-kernel-tree modules
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index e243bd4..000e7b2 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -975,7 +975,7 @@
 EXPORT_SYMBOL_GPL(bus_get_device_klist);
 
 /*
- * Yes, this forcably breaks the klist abstraction temporarily.  It
+ * Yes, this forcibly breaks the klist abstraction temporarily.  It
  * just wants to sort the klist, not change reference counts and
  * take/drop locks rapidly in the process.  It does all this while
  * holding the lock for the list, so objects can't otherwise be
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ce012a9..36b4305 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -117,12 +117,21 @@
 		       "Node %d WritebackTmp:   %8lu kB\n"
 		       "Node %d Slab:           %8lu kB\n"
 		       "Node %d SReclaimable:   %8lu kB\n"
-		       "Node %d SUnreclaim:     %8lu kB\n",
+		       "Node %d SUnreclaim:     %8lu kB\n"
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		       "Node %d AnonHugePages:  %8lu kB\n"
+#endif
+			,
 		       nid, K(node_page_state(nid, NR_FILE_DIRTY)),
 		       nid, K(node_page_state(nid, NR_WRITEBACK)),
 		       nid, K(node_page_state(nid, NR_FILE_PAGES)),
 		       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
-		       nid, K(node_page_state(nid, NR_ANON_PAGES)),
+		       nid, K(node_page_state(nid, NR_ANON_PAGES)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+			+ node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
+			HPAGE_PMD_NR
+#endif
+		       ),
 		       nid, K(node_page_state(nid, NR_SHMEM)),
 		       nid, node_page_state(nid, NR_KERNEL_STACK) *
 				THREAD_SIZE / 1024,
@@ -133,7 +142,13 @@
 		       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
 				node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
 		       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
-		       nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
+		       nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+			, nid,
+			K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
+			HPAGE_PMD_NR)
+#endif
+		       );
 	n += hugetlb_report_node_meminfo(nid, buf + n);
 	return n;
 }
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2a52270..8340497 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -8,7 +8,7 @@
  *
  *
  * The driver model core calls device_pm_add() when a device is registered.
- * This will intialize the embedded device_pm_info object in the device
+ * This will initialize the embedded device_pm_info object in the device
  * and add it to the list of power-controlled devices. sysfs entries for
  * controlling device power management will also be added.
  *
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 656493a..42615b4 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -407,12 +407,15 @@
 		goto out;
 	}
 
+	/* Maybe the parent is now able to suspend. */
 	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
-		spin_unlock_irq(&dev->power.lock);
+		spin_unlock(&dev->power.lock);
 
-		pm_request_idle(parent);
+		spin_lock(&parent->power.lock);
+		rpm_idle(parent, RPM_ASYNC);
+		spin_unlock(&parent->power.lock);
 
-		spin_lock_irq(&dev->power.lock);
+		spin_lock(&dev->power.lock);
 	}
 
  out:
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4b9359a..83c32cb 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -464,6 +464,7 @@
 	tristate "Xen virtual block device support"
 	depends on XEN
 	default y
+	select XEN_XENBUS_FRONTEND
 	help
 	  This driver implements the front-end of the Xen virtual
 	  block device driver.  It communicates with a back-end driver
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index d7f463d..40528ba 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,4 +39,4 @@
 obj-$(CONFIG_BLK_DEV_DRBD)     += drbd/
 obj-$(CONFIG_BLK_DEV_RBD)     += rbd.o
 
-swim_mod-objs	:= swim.o swim_asm.o
+swim_mod-y	:= swim.o swim_asm.o
diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile
index e76d997..06ea82c 100644
--- a/drivers/block/aoe/Makefile
+++ b/drivers/block/aoe/Makefile
@@ -3,4 +3,4 @@
 #
 
 obj-$(CONFIG_ATA_OVER_ETH)	+= aoe.o
-aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
+aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8e0f925..9279272 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -238,9 +238,9 @@
 /*
  * Enqueuing and dequeuing functions for cmdlists.
  */
-static inline void addQ(struct hlist_head *list, CommandList_struct *c)
+static inline void addQ(struct list_head *list, CommandList_struct *c)
 {
-	hlist_add_head(&c->list, list);
+	list_add_tail(&c->list, list);
 }
 
 static inline void removeQ(CommandList_struct *c)
@@ -253,12 +253,12 @@
 	 * them off as 'stale' to prevent the driver from
 	 * falling over.
 	 */
-	if (WARN_ON(hlist_unhashed(&c->list))) {
+	if (WARN_ON(list_empty(&c->list))) {
 		c->cmd_type = CMD_MSG_STALE;
 		return;
 	}
 
-	hlist_del_init(&c->list);
+	list_del_init(&c->list);
 }
 
 static void enqueue_cmd_and_start_io(ctlr_info_t *h,
@@ -905,7 +905,7 @@
 
 	c->cmdindex = i;
 
-	INIT_HLIST_NODE(&c->list);
+	INIT_LIST_HEAD(&c->list);
 	c->busaddr = (__u32) cmd_dma_handle;
 	temp64.val = (__u64) err_dma_handle;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -944,7 +944,7 @@
 	}
 	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
 
-	INIT_HLIST_NODE(&c->list);
+	INIT_LIST_HEAD(&c->list);
 	c->busaddr = (__u32) cmd_dma_handle;
 	temp64.val = (__u64) err_dma_handle;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2833,7 +2833,7 @@
 	sector_t total_size;
 	InquiryData_struct *inq_buff = NULL;
 
-	for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
+	for (logvol = 0; logvol <= h->highest_lun; logvol++) {
 		if (!h->drv[logvol])
 			continue;
 		if (memcmp(h->drv[logvol]->LunID, drv->LunID,
@@ -2888,8 +2888,8 @@
 {
 	CommandList_struct *c;
 
-	while (!hlist_empty(&h->reqQ)) {
-		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
+	while (!list_empty(&h->reqQ)) {
+		c = list_entry(h->reqQ.next, CommandList_struct, list);
 		/* can't do anything if fifo is full */
 		if ((h->access.fifo_full(h))) {
 			dev_warn(&h->pdev->dev, "fifo full\n");
@@ -3402,11 +3402,10 @@
 {
 	u32 tag;
 	CommandList_struct *c = NULL;
-	struct hlist_node *tmp;
 	__u32 busaddr_masked, tag_masked;
 
 	tag = cciss_tag_discard_error_bits(raw_tag);
-	hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+	list_for_each_entry(c, &h->cmpQ, list) {
 		busaddr_masked = cciss_tag_discard_error_bits(c->busaddr);
 		tag_masked = cciss_tag_discard_error_bits(tag);
 		if (busaddr_masked == tag_masked) {
@@ -4572,8 +4571,8 @@
 	h = hba[i];
 	h->pdev = pdev;
 	h->busy_initializing = 1;
-	INIT_HLIST_HEAD(&h->cmpQ);
-	INIT_HLIST_HEAD(&h->reqQ);
+	INIT_LIST_HEAD(&h->cmpQ);
+	INIT_LIST_HEAD(&h->reqQ);
 	mutex_init(&h->busy_shutting_down);
 
 	if (cciss_pci_init(h) != 0)
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 4b8933d..579f749 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -103,8 +103,8 @@
 	struct access_method access;
 
 	/* queue and queue Info */ 
-	struct hlist_head reqQ;
-	struct hlist_head cmpQ;
+	struct list_head reqQ;
+	struct list_head cmpQ;
 	unsigned int Qdepth;
 	unsigned int maxQsinceinit;
 	unsigned int maxSG;
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index eb060f1..35463d2 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -195,7 +195,7 @@
   int			   ctlr;
   int			   cmd_type; 
   long			   cmdindex;
-  struct hlist_node list;
+  struct list_head list;
   struct request *	   rq;
   struct completion *waiting;
   int	 retry_count;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 1ea1a34..3803a03 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -911,8 +911,6 @@
 struct drbd_backing_dev {
 	struct block_device *backing_bdev;
 	struct block_device *md_bdev;
-	struct file *lo_file;
-	struct file *md_file;
 	struct drbd_md md;
 	struct disk_conf dc; /* The user provided config... */
 	sector_t known_size; /* last known size of that backing device */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 6be5401..29cd0dc 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3372,11 +3372,8 @@
 	if (ldev == NULL)
 		return;
 
-	bd_release(ldev->backing_bdev);
-	bd_release(ldev->md_bdev);
-
-	fput(ldev->lo_file);
-	fput(ldev->md_file);
+	blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+	blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 
 	kfree(ldev);
 }
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 29e5c70..8cbfaa6 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -855,7 +855,7 @@
 	sector_t max_possible_sectors;
 	sector_t min_md_device_sectors;
 	struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
-	struct inode *inode, *inode2;
+	struct block_device *bdev;
 	struct lru_cache *resync_lru = NULL;
 	union drbd_state ns, os;
 	unsigned int max_seg_s;
@@ -907,46 +907,40 @@
 		}
 	}
 
-	nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
-	if (IS_ERR(nbc->lo_file)) {
+	bdev = blkdev_get_by_path(nbc->dc.backing_dev,
+				  FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
+	if (IS_ERR(bdev)) {
 		dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
-		    PTR_ERR(nbc->lo_file));
-		nbc->lo_file = NULL;
+			PTR_ERR(bdev));
 		retcode = ERR_OPEN_DISK;
 		goto fail;
 	}
+	nbc->backing_bdev = bdev;
 
-	inode = nbc->lo_file->f_dentry->d_inode;
-
-	if (!S_ISBLK(inode->i_mode)) {
-		retcode = ERR_DISK_NOT_BDEV;
-		goto fail;
-	}
-
-	nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
-	if (IS_ERR(nbc->md_file)) {
+	/*
+	 * meta_dev_idx >= 0: external fixed size, possibly multiple
+	 * drbd sharing one meta device.  TODO in that case, paranoia
+	 * check that [md_bdev, meta_dev_idx] is not yet used by some
+	 * other drbd minor!  (if you use drbd.conf + drbdadm, that
+	 * should check it for you already; but if you don't, or
+	 * someone fooled it, we need to double check here)
+	 */
+	bdev = blkdev_get_by_path(nbc->dc.meta_dev,
+				  FMODE_READ | FMODE_WRITE | FMODE_EXCL,
+				  (nbc->dc.meta_dev_idx < 0) ?
+				  (void *)mdev : (void *)drbd_m_holder);
+	if (IS_ERR(bdev)) {
 		dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
-		    PTR_ERR(nbc->md_file));
-		nbc->md_file = NULL;
+			PTR_ERR(bdev));
 		retcode = ERR_OPEN_MD_DISK;
 		goto fail;
 	}
+	nbc->md_bdev = bdev;
 
-	inode2 = nbc->md_file->f_dentry->d_inode;
-
-	if (!S_ISBLK(inode2->i_mode)) {
-		retcode = ERR_MD_NOT_BDEV;
-		goto fail;
-	}
-
-	nbc->backing_bdev = inode->i_bdev;
-	if (bd_claim(nbc->backing_bdev, mdev)) {
-		printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
-		       nbc->backing_bdev, mdev,
-		       nbc->backing_bdev->bd_holder,
-		       nbc->backing_bdev->bd_contains->bd_holder,
-		       nbc->backing_bdev->bd_holders);
-		retcode = ERR_BDCLAIM_DISK;
+	if ((nbc->backing_bdev == nbc->md_bdev) !=
+	    (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+	     nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+		retcode = ERR_MD_IDX_INVALID;
 		goto fail;
 	}
 
@@ -955,28 +949,7 @@
 			offsetof(struct bm_extent, lce));
 	if (!resync_lru) {
 		retcode = ERR_NOMEM;
-		goto release_bdev_fail;
-	}
-
-	/* meta_dev_idx >= 0: external fixed size,
-	 * possibly multiple drbd sharing one meta device.
-	 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
-	 * not yet used by some other drbd minor!
-	 * (if you use drbd.conf + drbdadm,
-	 * that should check it for you already; but if you don't, or someone
-	 * fooled it, we need to double check here) */
-	nbc->md_bdev = inode2->i_bdev;
-	if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
-				: (void *) drbd_m_holder)) {
-		retcode = ERR_BDCLAIM_MD_DISK;
-		goto release_bdev_fail;
-	}
-
-	if ((nbc->backing_bdev == nbc->md_bdev) !=
-	    (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
-	     nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
-		retcode = ERR_MD_IDX_INVALID;
-		goto release_bdev2_fail;
+		goto fail;
 	}
 
 	/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
@@ -987,7 +960,7 @@
 			(unsigned long long) drbd_get_max_capacity(nbc),
 			(unsigned long long) nbc->dc.disk_size);
 		retcode = ERR_DISK_TO_SMALL;
-		goto release_bdev2_fail;
+		goto fail;
 	}
 
 	if (nbc->dc.meta_dev_idx < 0) {
@@ -1004,7 +977,7 @@
 		dev_warn(DEV, "refusing attach: md-device too small, "
 		     "at least %llu sectors needed for this meta-disk type\n",
 		     (unsigned long long) min_md_device_sectors);
-		goto release_bdev2_fail;
+		goto fail;
 	}
 
 	/* Make sure the new disk is big enough
@@ -1012,7 +985,7 @@
 	if (drbd_get_max_capacity(nbc) <
 	    drbd_get_capacity(mdev->this_bdev)) {
 		retcode = ERR_DISK_TO_SMALL;
-		goto release_bdev2_fail;
+		goto fail;
 	}
 
 	nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
@@ -1035,7 +1008,7 @@
 	retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
 	drbd_resume_io(mdev);
 	if (retcode < SS_SUCCESS)
-		goto release_bdev2_fail;
+		goto fail;
 
 	if (!get_ldev_if_state(mdev, D_ATTACHING))
 		goto force_diskless;
@@ -1269,18 +1242,14 @@
  force_diskless:
 	drbd_force_state(mdev, NS(disk, D_FAILED));
 	drbd_md_sync(mdev);
- release_bdev2_fail:
-	if (nbc)
-		bd_release(nbc->md_bdev);
- release_bdev_fail:
-	if (nbc)
-		bd_release(nbc->backing_bdev);
  fail:
 	if (nbc) {
-		if (nbc->lo_file)
-			fput(nbc->lo_file);
-		if (nbc->md_file)
-			fput(nbc->md_file);
+		if (nbc->backing_bdev)
+			blkdev_put(nbc->backing_bdev,
+				   FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+		if (nbc->md_bdev)
+			blkdev_put(nbc->md_bdev,
+				   FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 		kfree(nbc);
 	}
 	lc_destroy(resync_lru);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 25e4dff..77fc76f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -597,6 +597,11 @@
 static unsigned char in_sector_offset;	/* offset within physical sector,
 					 * expressed in units of 512 bytes */
 
+static inline bool drive_no_geom(int drive)
+{
+	return !current_type[drive] && !ITYPE(UDRS->fd_device);
+}
+
 #ifndef fd_eject
 static inline int fd_eject(int drive)
 {
@@ -3276,7 +3281,7 @@
 			struct block_device *bdev = opened_bdev[cnt];
 			if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
 				continue;
-			__invalidate_device(bdev);
+			__invalidate_device(bdev, true);
 		}
 		mutex_unlock(&open_lock);
 	} else {
@@ -3782,7 +3787,7 @@
 	if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 	    test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
 	    test_bit(drive, &fake_change) ||
-	    (!ITYPE(UDRS->fd_device) && !current_type[drive]))
+	    drive_no_geom(drive))
 		return 1;
 	return 0;
 }
@@ -3848,13 +3853,13 @@
 static int floppy_revalidate(struct gendisk *disk)
 {
 	int drive = (long)disk->private_data;
-#define NO_GEOM (!current_type[drive] && !ITYPE(UDRS->fd_device))
 	int cf;
 	int res = 0;
 
 	if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 	    test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
-	    test_bit(drive, &fake_change) || NO_GEOM) {
+	    test_bit(drive, &fake_change) ||
+	    drive_no_geom(drive)) {
 		if (WARN(atomic_read(&usage_count) == 0,
 			 "VFS: revalidate called on non-open device.\n"))
 			return -EFAULT;
@@ -3862,7 +3867,7 @@
 		lock_fdc(drive, false);
 		cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 		      test_bit(FD_VERIFY_BIT, &UDRS->flags));
-		if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)) {
+		if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
 			process_fd_request();	/*already done by another thread */
 			return 0;
 		}
@@ -3874,7 +3879,7 @@
 		clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
 		if (cf)
 			UDRS->generation++;
-		if (NO_GEOM) {
+		if (drive_no_geom(drive)) {
 			/* auto-sensing */
 			res = __floppy_read_block_0(opened_bdev[drive]);
 		} else {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 7ea0bea..dbf31ec 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -78,7 +78,6 @@
 
 #include <asm/uaccess.h>
 
-static DEFINE_MUTEX(loop_mutex);
 static LIST_HEAD(loop_devices);
 static DEFINE_MUTEX(loop_devices_mutex);
 
@@ -395,11 +394,7 @@
 	struct loop_device *lo = p->lo;
 	struct page *page = buf->page;
 	sector_t IV;
-	int size, ret;
-
-	ret = buf->ops->confirm(pipe, buf);
-	if (unlikely(ret))
-		return ret;
+	int size;
 
 	IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
 							(buf->offset >> 9);
@@ -1505,11 +1500,9 @@
 {
 	struct loop_device *lo = bdev->bd_disk->private_data;
 
-	mutex_lock(&loop_mutex);
 	mutex_lock(&lo->lo_ctl_mutex);
 	lo->lo_refcnt++;
 	mutex_unlock(&lo->lo_ctl_mutex);
-	mutex_unlock(&loop_mutex);
 
 	return 0;
 }
@@ -1519,7 +1512,6 @@
 	struct loop_device *lo = disk->private_data;
 	int err;
 
-	mutex_lock(&loop_mutex);
 	mutex_lock(&lo->lo_ctl_mutex);
 
 	if (--lo->lo_refcnt)
@@ -1544,7 +1536,6 @@
 out:
 	mutex_unlock(&lo->lo_ctl_mutex);
 out_unlocked:
-	mutex_unlock(&loop_mutex);
 	return 0;
 }
 
@@ -1645,6 +1636,9 @@
 
 static void loop_free(struct loop_device *lo)
 {
+	if (!lo->lo_queue->queue_lock)
+		lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
+
 	blk_cleanup_queue(lo->lo_queue);
 	put_disk(lo->lo_disk);
 	list_del(&lo->lo_list);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a32fb41..e6fc716 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -53,7 +53,6 @@
 #define DBG_BLKDEV      0x0100
 #define DBG_RX          0x0200
 #define DBG_TX          0x0400
-static DEFINE_MUTEX(nbd_mutex);
 static unsigned int debugflags;
 #endif /* NDEBUG */
 
@@ -718,11 +717,9 @@
 	dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
 			lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
 
-	mutex_lock(&nbd_mutex);
 	mutex_lock(&lo->tx_lock);
 	error = __nbd_ioctl(bdev, lo, cmd, arg);
 	mutex_unlock(&lo->tx_lock);
-	mutex_unlock(&nbd_mutex);
 
 	return error;
 }
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 19b3568..77d70ee 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2296,15 +2296,12 @@
 	 * so bdget() can't fail.
 	 */
 	bdget(pd->bdev->bd_dev);
-	if ((ret = blkdev_get(pd->bdev, FMODE_READ)))
+	if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
 		goto out;
 
-	if ((ret = bd_claim(pd->bdev, pd)))
-		goto out_putdev;
-
 	if ((ret = pkt_get_last_written(pd, &lba))) {
 		printk(DRIVER_NAME": pkt_get_last_written failed\n");
-		goto out_unclaim;
+		goto out_putdev;
 	}
 
 	set_capacity(pd->disk, lba << 2);
@@ -2314,7 +2311,7 @@
 	q = bdev_get_queue(pd->bdev);
 	if (write) {
 		if ((ret = pkt_open_write(pd)))
-			goto out_unclaim;
+			goto out_putdev;
 		/*
 		 * Some CDRW drives can not handle writes larger than one packet,
 		 * even if the size is a multiple of the packet size.
@@ -2329,23 +2326,21 @@
 	}
 
 	if ((ret = pkt_set_segment_merging(pd, q)))
-		goto out_unclaim;
+		goto out_putdev;
 
 	if (write) {
 		if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
 			printk(DRIVER_NAME": not enough memory for buffers\n");
 			ret = -ENOMEM;
-			goto out_unclaim;
+			goto out_putdev;
 		}
 		printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
 	}
 
 	return 0;
 
-out_unclaim:
-	bd_release(pd->bdev);
 out_putdev:
-	blkdev_put(pd->bdev, FMODE_READ);
+	blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
 out:
 	return ret;
 }
@@ -2362,8 +2357,7 @@
 	pkt_lock_door(pd, 0);
 
 	pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
-	bd_release(pd->bdev);
-	blkdev_put(pd->bdev, FMODE_READ);
+	blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
 
 	pkt_shrink_pktlist(pd);
 }
@@ -2733,7 +2727,7 @@
 	bdev = bdget(dev);
 	if (!bdev)
 		return -ENOMEM;
-	ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY);
+	ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
 	if (ret)
 		return ret;
 
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 008d4a0..e1e38b1 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1790,18 +1790,29 @@
 
 	rc = rbd_bus_add_dev(rbd_dev);
 	if (rc)
-		goto err_out_disk;
+		goto err_out_blkdev;
+
 	/* set up and announce blkdev mapping */
 	rc = rbd_init_disk(rbd_dev);
 	if (rc)
-		goto err_out_blkdev;
+		goto err_out_bus;
 
 	return count;
 
+err_out_bus:
+	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+	list_del_init(&rbd_dev->node);
+	mutex_unlock(&ctl_mutex);
+
+	/* this will also clean up rest of rbd_dev stuff */
+
+	rbd_bus_del_dev(rbd_dev);
+	kfree(options);
+	kfree(mon_dev_name);
+	return rc;
+
 err_out_blkdev:
 	unregister_blkdev(rbd_dev->major, rbd_dev->name);
-err_out_disk:
-	rbd_free_disk(rbd_dev);
 err_out_client:
 	rbd_put_client(rbd_dev);
 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 949ed09..6dcd55a 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -39,6 +39,11 @@
 	/* Atheros AR3011 with sflash firmware*/
 	{ USB_DEVICE(0x0CF3, 0x3002) },
 
+	/* Atheros AR9285 Malbec with sflash firmware */
+	{ USB_DEVICE(0x03F0, 0x311D) },
+
+	/* Atheros AR5BBU12 with sflash firmware */
+	{ USB_DEVICE(0x0489, 0xE02C) },
 	{ }	/* Terminating entry */
 };
 
@@ -47,33 +52,16 @@
 #define USB_REQ_DFU_DNLOAD	1
 #define BULK_SIZE		4096
 
-struct ath3k_data {
-	struct usb_device *udev;
-	u8 *fw_data;
-	u32 fw_size;
-	u32 fw_sent;
-};
-
-static int ath3k_load_firmware(struct ath3k_data *data,
-				unsigned char *firmware,
-				int count)
+static int ath3k_load_firmware(struct usb_device *udev,
+				const struct firmware *firmware)
 {
 	u8 *send_buf;
 	int err, pipe, len, size, sent = 0;
+	int count = firmware->size;
 
-	BT_DBG("ath3k %p udev %p", data, data->udev);
+	BT_DBG("udev %p", udev);
 
-	pipe = usb_sndctrlpipe(data->udev, 0);
-
-	if ((usb_control_msg(data->udev, pipe,
-				USB_REQ_DFU_DNLOAD,
-				USB_TYPE_VENDOR, 0, 0,
-				firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
-		BT_ERR("Can't change to loading configuration err");
-		return -EBUSY;
-	}
-	sent += 20;
-	count -= 20;
+	pipe = usb_sndctrlpipe(udev, 0);
 
 	send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
 	if (!send_buf) {
@@ -81,12 +69,23 @@
 		return -ENOMEM;
 	}
 
+	memcpy(send_buf, firmware->data, 20);
+	if ((err = usb_control_msg(udev, pipe,
+				USB_REQ_DFU_DNLOAD,
+				USB_TYPE_VENDOR, 0, 0,
+				send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
+		BT_ERR("Can't change to loading configuration err");
+		goto error;
+	}
+	sent += 20;
+	count -= 20;
+
 	while (count) {
 		size = min_t(uint, count, BULK_SIZE);
-		pipe = usb_sndbulkpipe(data->udev, 0x02);
-		memcpy(send_buf, firmware + sent, size);
+		pipe = usb_sndbulkpipe(udev, 0x02);
+		memcpy(send_buf, firmware->data + sent, size);
 
-		err = usb_bulk_msg(data->udev, pipe, send_buf, size,
+		err = usb_bulk_msg(udev, pipe, send_buf, size,
 					&len, 3000);
 
 		if (err || (len != size)) {
@@ -112,57 +111,28 @@
 {
 	const struct firmware *firmware;
 	struct usb_device *udev = interface_to_usbdev(intf);
-	struct ath3k_data *data;
-	int size;
 
 	BT_DBG("intf %p id %p", intf, id);
 
 	if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
 		return -ENODEV;
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	data->udev = udev;
-
 	if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
-		kfree(data);
 		return -EIO;
 	}
 
-	size = max_t(uint, firmware->size, 4096);
-	data->fw_data = kmalloc(size, GFP_KERNEL);
-	if (!data->fw_data) {
+	if (ath3k_load_firmware(udev, firmware)) {
 		release_firmware(firmware);
-		kfree(data);
-		return -ENOMEM;
-	}
-
-	memcpy(data->fw_data, firmware->data, firmware->size);
-	data->fw_size = firmware->size;
-	data->fw_sent = 0;
-	release_firmware(firmware);
-
-	usb_set_intfdata(intf, data);
-	if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) {
-		usb_set_intfdata(intf, NULL);
-		kfree(data->fw_data);
-		kfree(data);
 		return -EIO;
 	}
+	release_firmware(firmware);
 
 	return 0;
 }
 
 static void ath3k_disconnect(struct usb_interface *intf)
 {
-	struct ath3k_data *data = usb_get_intfdata(intf);
-
 	BT_DBG("ath3k_disconnect intf %p", intf);
-
-	kfree(data->fw_data);
-	kfree(data);
 }
 
 static struct usb_driver ath3k_driver = {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 1da773f8..700a384 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,12 @@
 	/* Atheros 3011 with sflash firmware */
 	{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
 
+	/* Atheros AR9285 Malbec with sflash firmware */
+	{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+
+	/* Atheros AR5BBU12 with sflash firmware */
+	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
 	/* Broadcom BCM2035 */
 	{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
 	{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -826,7 +832,7 @@
 
 	if (hdev->conn_hash.sco_num > 0) {
 		if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
-			err = usb_autopm_get_interface(data->isoc);
+			err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
 			if (err < 0) {
 				clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
 				usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -855,7 +861,7 @@
 
 		__set_isoc_interface(hdev, 0);
 		if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
-			usb_autopm_put_interface(data->isoc);
+			usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
 	}
 }
 
@@ -1038,8 +1044,6 @@
 
 	usb_set_intfdata(intf, data);
 
-	usb_enable_autosuspend(interface_to_usbdev(intf));
-
 	return 0;
 }
 
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index af13c62d..e2c48a7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -409,7 +409,8 @@
 	}
 
 	ENSURE(drive_status, CDC_DRIVE_STATUS );
-	ENSURE(media_changed, CDC_MEDIA_CHANGED);
+	if (cdo->check_events == NULL && cdo->media_changed == NULL)
+		*change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
 	ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
 	ENSURE(lock_door, CDC_LOCK);
 	ENSURE(select_speed, CDC_SELECT_SPEED);
@@ -1348,7 +1349,10 @@
 	if (!CDROM_CAN(CDC_SELECT_DISC))
 		return -EDRIVE_CANT_DO_THIS;
 
-	(void) cdi->ops->media_changed(cdi, slot);
+	if (cdi->ops->check_events)
+		cdi->ops->check_events(cdi, 0, slot);
+	else
+		cdi->ops->media_changed(cdi, slot);
 
 	if (slot == CDSL_NONE) {
 		/* set media changed bits, on both queues */
@@ -1392,6 +1396,42 @@
 	return slot;
 }
 
+/*
+ * As cdrom implements an extra ioctl consumer for media changed
+ * event, it needs to buffer ->check_events() output, such that event
+ * is not lost for both the usual VFS and ioctl paths.
+ * cdi->{vfs|ioctl}_events are used to buffer pending events for each
+ * path.
+ *
+ * XXX: Locking is non-existent.  cdi->ops->check_events() can be
+ * called in parallel and buffering fields are accessed without any
+ * exclusion.  The original media_changed code had the same problem.
+ * It might be better to simply deprecate CDROM_MEDIA_CHANGED ioctl
+ * and remove this cruft altogether.  It doesn't have much usefulness
+ * at this point.
+ */
+static void cdrom_update_events(struct cdrom_device_info *cdi,
+				unsigned int clearing)
+{
+	unsigned int events;
+
+	events = cdi->ops->check_events(cdi, clearing, CDSL_CURRENT);
+	cdi->vfs_events |= events;
+	cdi->ioctl_events |= events;
+}
+
+unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
+				unsigned int clearing)
+{
+	unsigned int events;
+
+	cdrom_update_events(cdi, clearing);
+	events = cdi->vfs_events;
+	cdi->vfs_events = 0;
+	return events;
+}
+EXPORT_SYMBOL(cdrom_check_events);
+
 /* We want to make media_changed accessible to the user through an
  * ioctl. The main problem now is that we must double-buffer the
  * low-level implementation, to assure that the VFS and the user both
@@ -1403,15 +1443,26 @@
 {
 	unsigned int mask = (1 << (queue & 1));
 	int ret = !!(cdi->mc_flags & mask);
+	bool changed;
 
 	if (!CDROM_CAN(CDC_MEDIA_CHANGED))
-	    return ret;
+		return ret;
+
 	/* changed since last call? */
-	if (cdi->ops->media_changed(cdi, CDSL_CURRENT)) {
+	if (cdi->ops->check_events) {
+		BUG_ON(!queue);	/* shouldn't be called from VFS path */
+		cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE);
+		changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE;
+		cdi->ioctl_events = 0;
+	} else
+		changed = cdi->ops->media_changed(cdi, CDSL_CURRENT);
+
+	if (changed) {
 		cdi->mc_flags = 0x3;    /* set bit on both queues */
 		ret |= 1;
 		cdi->media_written = 0;
 	}
+
 	cdi->mc_flags &= ~mask;         /* clear bit */
 	return ret;
 }
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d4a7776..b7980a83 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -5,7 +5,7 @@
 menu "Character devices"
 
 config VT
-	bool "Virtual terminal" if EMBEDDED
+	bool "Virtual terminal" if EXPERT
 	depends on !S390
 	select INPUT
 	default y
@@ -39,13 +39,13 @@
 config CONSOLE_TRANSLATIONS
 	depends on VT
 	default y
-	bool "Enable character translations in console" if EMBEDDED
+	bool "Enable character translations in console" if EXPERT
 	---help---
 	  This enables support for font mapping and Unicode translation
 	  on virtual consoles.
 
 config VT_CONSOLE
-	bool "Support for console on virtual terminal" if EMBEDDED
+	bool "Support for console on virtual terminal" if EXPERT
 	depends on VT
 	default y
 	---help---
@@ -426,10 +426,10 @@
          If you have an SGI Altix with an attached SABrick
          say Y or M here, otherwise say N.
 
-source "drivers/serial/Kconfig"
+source "drivers/tty/serial/Kconfig"
 
 config UNIX98_PTYS
-	bool "Unix98 PTY support" if EMBEDDED
+	bool "Unix98 PTY support" if EXPERT
 	default y
 	---help---
 	  A pseudo terminal (PTY) is a software device consisting of two
@@ -495,7 +495,7 @@
 
 config TTY_PRINTK
 	bool "TTY driver to output user messages via printk"
-	depends on EMBEDDED
+	depends on EXPERT
 	default n
 	---help---
 	  If you say Y here, the support for writing user messages (i.e.
@@ -1047,15 +1047,6 @@
 	  pc8736x_gpio drivers.  If those drivers are built as
 	  modules, this one will be too, named nsc_gpio
 
-config CS5535_GPIO
-	tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
-	depends on X86_32
-	help
-	  Give userspace access to the GPIO pins on the AMD CS5535 and
-	  CS5536 Geode companion devices.
-
-	  If compiled as a module, it will be called cs5535_gpio.
-
 config RAW_DRIVER
 	tristate "RAW driver (/dev/raw/rawN)"
 	depends on BLOCK
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index fa0b824..8238f89 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -30,17 +30,6 @@
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_SX)		+= sx.o generic_serial.o
 obj-$(CONFIG_RIO)		+= rio/ generic_serial.o
-obj-$(CONFIG_HVC_CONSOLE)	+= hvc_vio.o hvsi.o
-obj-$(CONFIG_HVC_ISERIES)	+= hvc_iseries.o
-obj-$(CONFIG_HVC_RTAS)		+= hvc_rtas.o
-obj-$(CONFIG_HVC_TILE)		+= hvc_tile.o
-obj-$(CONFIG_HVC_DCC)		+= hvc_dcc.o
-obj-$(CONFIG_HVC_BEAT)		+= hvc_beat.o
-obj-$(CONFIG_HVC_DRIVER)	+= hvc_console.o
-obj-$(CONFIG_HVC_IRQ)		+= hvc_irq.o
-obj-$(CONFIG_HVC_XEN)		+= hvc_xen.o
-obj-$(CONFIG_HVC_IUCV)		+= hvc_iucv.o
-obj-$(CONFIG_HVC_UDBG)		+= hvc_udbg.o
 obj-$(CONFIG_VIRTIO_CONSOLE)	+= virtio_console.o
 obj-$(CONFIG_RAW_DRIVER)	+= raw.o
 obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
@@ -48,7 +37,6 @@
 obj-$(CONFIG_MMTIMER)		+= mmtimer.o
 obj-$(CONFIG_UV_MMTIMER)	+= uv_mmtimer.o
 obj-$(CONFIG_VIOTAPE)		+= viotape.o
-obj-$(CONFIG_HVCS)		+= hvcs.o
 obj-$(CONFIG_IBM_BSR)		+= bsr.o
 obj-$(CONFIG_SGI_MBCS)		+= mbcs.o
 obj-$(CONFIG_BRIQ_PANEL)	+= briq_panel.o
@@ -82,7 +70,6 @@
 obj-$(CONFIG_SCx200_GPIO)	+= scx200_gpio.o
 obj-$(CONFIG_PC8736x_GPIO)	+= pc8736x_gpio.o
 obj-$(CONFIG_NSC_GPIO)		+= nsc_gpio.o
-obj-$(CONFIG_CS5535_GPIO)	+= cs5535_gpio.o
 obj-$(CONFIG_GPIO_TB0219)	+= tb0219.o
 obj-$(CONFIG_TELCLOCK)		+= tlclk.o
 
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index fcd867d..d8b1b57 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -50,7 +50,7 @@
 
 config AGP_AMD
 	tristate "AMD Irongate, 761, and 762 chipset support"
-	depends on AGP && (X86_32 || ALPHA)
+	depends on AGP && X86_32
 	help
 	  This option gives you AGP support for the GLX component of
 	  X on AMD Irongate, 761, and 762 chipsets.
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index b1b4362..45681c0 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -41,22 +41,8 @@
 	if (page_map->real == NULL)
 		return -ENOMEM;
 
-#ifndef CONFIG_X86
-	SetPageReserved(virt_to_page(page_map->real));
-	global_cache_flush();
-	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
-					    PAGE_SIZE);
-	if (page_map->remapped == NULL) {
-		ClearPageReserved(virt_to_page(page_map->real));
-		free_page((unsigned long) page_map->real);
-		page_map->real = NULL;
-		return -ENOMEM;
-	}
-	global_cache_flush();
-#else
 	set_memory_uc((unsigned long)page_map->real, 1);
 	page_map->remapped = page_map->real;
-#endif
 
 	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
 		writel(agp_bridge->scratch_page, page_map->remapped+i);
@@ -68,12 +54,7 @@
 
 static void amd_free_page_map(struct amd_page_map *page_map)
 {
-#ifndef CONFIG_X86
-	iounmap(page_map->remapped);
-	ClearPageReserved(virt_to_page(page_map->real));
-#else
 	set_memory_wb((unsigned long)page_map->real, 1);
-#endif
 	free_page((unsigned long) page_map->real);
 }
 
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9252e85..780498d 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -773,18 +773,23 @@
 #else
 			printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
 #endif
+			pci_unregister_driver(&agp_amd64_pci_driver);
 			return -ENODEV;
 		}
 
 		/* First check that we have at least one AMD64 NB */
-		if (!pci_dev_present(amd_nb_misc_ids))
+		if (!pci_dev_present(amd_nb_misc_ids)) {
+			pci_unregister_driver(&agp_amd64_pci_driver);
 			return -ENODEV;
+		}
 
 		/* Look for any AGP bridge */
 		agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
 		err = driver_attach(&agp_amd64_pci_driver.driver);
-		if (err == 0 && agp_bridges_found == 0)
+		if (err == 0 && agp_bridges_found == 0) {
+			pci_unregister_driver(&agp_amd64_pci_driver);
 			err = -ENODEV;
+		}
 	}
 	return err;
 }
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 07e9796..b0a0dcc 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -717,8 +717,8 @@
 	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
 	{ PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
 	{ PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
-	{ PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
-	{ PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver },
 	{ PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
 	{ PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
 	{ PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
@@ -774,20 +774,14 @@
 	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
 
 	/*
-	* If the device has not been properly setup, the following will catch
-	* the problem and should stop the system from crashing.
-	* 20030610 - hamish@zot.org
-	*/
-	if (pci_enable_device(pdev)) {
-		dev_err(&pdev->dev, "can't enable PCI device\n");
-		agp_put_bridge(bridge);
-		return -ENODEV;
-	}
-
-	/*
 	* The following fixes the case where the BIOS has "forgotten" to
 	* provide an address range for the GART.
 	* 20030610 - hamish@zot.org
+	* This happens before pci_enable_device() intentionally;
+	* calling pci_enable_device() before assigning the resource
+	* will result in the GART being disabled on machines with such
+	* BIOSs (the GART ends up with a BAR starting at 0, which
+	* conflicts a lot of other devices).
 	*/
 	r = &pdev->resource[0];
 	if (!r->start && r->end) {
@@ -798,6 +792,17 @@
 		}
 	}
 
+	/*
+	* If the device has not been properly setup, the following will catch
+	* the problem and should stop the system from crashing.
+	* 20030610 - hamish@zot.org
+	*/
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev, "can't enable PCI device\n");
+		agp_put_bridge(bridge);
+		return -ENODEV;
+	}
+
 	/* Fill in the mode register */
 	if (cap_ptr) {
 		pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 010e3de..5feebe2 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -94,6 +94,8 @@
 #define G4x_GMCH_SIZE_VT_1_5M	(0xa << 8)
 #define G4x_GMCH_SIZE_VT_2M	(0xc << 8)
 
+#define GFX_FLSH_CNTL		0x2170 /* 915+ */
+
 #define I810_DRAM_CTL		0x3000
 #define I810_DRAM_ROW_0		0x00000001
 #define I810_DRAM_ROW_0_SDRAM	0x00000001
@@ -128,6 +130,7 @@
 #define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
 
 #define I915_IFPADDR    0x60
+#define I830_HIC        0x70
 
 /* Intel 965G registers */
 #define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 356f73e..0d09b53 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/pagemap.h>
 #include <linux/agp_backend.h>
+#include <linux/delay.h>
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
@@ -68,13 +69,10 @@
 	phys_addr_t gma_bus_addr;
 	u32 PGETBL_save;
 	u32 __iomem *gtt;		/* I915G */
+	bool clear_fake_agp; /* on first access via agp, fill with scratch */
 	int num_dcache_entries;
-	union {
-		void __iomem *i9xx_flush_page;
-		void *i8xx_flush_page;
-	};
+	void __iomem *i9xx_flush_page;
 	char *i81x_gtt_table;
-	struct page *i8xx_page;
 	struct resource ifp_resource;
 	int resource_valid;
 	struct page *scratch_page;
@@ -688,14 +686,14 @@
 
 	intel_private.base.stolen_size = intel_gtt_stolen_size();
 
+	intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
+
 	ret = intel_gtt_setup_scratch_page();
 	if (ret != 0) {
 		intel_gtt_cleanup();
 		return ret;
 	}
 
-	intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
-
 	return 0;
 }
 
@@ -721,28 +719,6 @@
 
 static void i830_cleanup(void)
 {
-	if (intel_private.i8xx_flush_page) {
-		kunmap(intel_private.i8xx_flush_page);
-		intel_private.i8xx_flush_page = NULL;
-	}
-
-	__free_page(intel_private.i8xx_page);
-	intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
-	/* return if we've already set the flush mechanism up */
-	if (intel_private.i8xx_page)
-		return;
-
-	intel_private.i8xx_page = alloc_page(GFP_KERNEL);
-	if (!intel_private.i8xx_page)
-		return;
-
-	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
-	if (!intel_private.i8xx_flush_page)
-		i830_cleanup();
 }
 
 /* The chipset_flush interface needs to get data that has already been
@@ -757,14 +733,27 @@
  */
 static void i830_chipset_flush(void)
 {
-	unsigned int *pg = intel_private.i8xx_flush_page;
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 
-	memset(pg, 0, 1024);
+	/* Forcibly evict everything from the CPU write buffers.
+	 * clflush appears to be insufficient.
+	 */
+	wbinvd_on_all_cpus();
 
-	if (cpu_has_clflush)
-		clflush_cache_range(pg, 1024);
-	else if (wbinvd_on_all_cpus() != 0)
-		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+	/* Now we've only seen documents for this magic bit on 855GM,
+	 * we hope it exists for the other gen2 chipsets...
+	 *
+	 * Also works as advertised on my 845G.
+	 */
+	writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+	       intel_private.registers+I830_HIC);
+
+	while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+		if (time_after(jiffies, timeout))
+			break;
+
+		udelay(50);
+	}
 }
 
 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -814,6 +803,12 @@
 		}
 	}
 
+	/* On the resume path we may be adjusting the PGTBL value, so
+	 * be paranoid and flush all chipset write buffers...
+	 */
+	if (INTEL_GTT_GEN >= 3)
+		writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
 	reg = intel_private.registers+I810_PGETBL_CTL;
 	writel(intel_private.PGETBL_save, reg);
 	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
@@ -823,6 +818,9 @@
 		return false;
 	}
 
+	if (INTEL_GTT_GEN >= 3)
+		writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
 	return true;
 }
 
@@ -839,8 +837,6 @@
 
 	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
 
-	intel_i830_setup_flush();
-
 	return 0;
 }
 
@@ -860,21 +856,12 @@
 
 static int intel_fake_agp_configure(void)
 {
-	int i;
-
 	if (!intel_enable_gtt())
 	    return -EIO;
 
+	intel_private.clear_fake_agp = true;
 	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
 
-	for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
-		intel_private.driver->write_entry(intel_private.scratch_page_dma,
-						  i, 0);
-	}
-	readl(intel_private.gtt+i-1);	/* PCI Posting. */
-
-	global_cache_flush();
-
 	return 0;
 }
 
@@ -936,6 +923,13 @@
 {
 	int ret = -EINVAL;
 
+	if (intel_private.clear_fake_agp) {
+		int start = intel_private.base.stolen_size / PAGE_SIZE;
+		int end = intel_private.base.gtt_mappable_entries;
+		intel_gtt_clear_range(start, end - start);
+		intel_private.clear_fake_agp = false;
+	}
+
 	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
 		return i810_insert_dcache_entries(mem, pg_start, type);
 
@@ -991,14 +985,14 @@
 	if (mem->page_count == 0)
 		return 0;
 
+	intel_gtt_clear_range(pg_start, mem->page_count);
+
 	if (intel_private.base.needs_dmar) {
 		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
 		mem->sg_list = NULL;
 		mem->num_sg = 0;
 	}
 
-	intel_gtt_clear_range(pg_start, mem->page_count);
-
 	return 0;
 }
 
@@ -1352,7 +1346,7 @@
 		&i81x_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
 		&i8xx_gtt_driver},
-	{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
+	{ PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
 		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
 		&i8xx_gtt_driver},
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index e397df3..16402445 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -183,16 +183,16 @@
 }
 
 #ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE
-# define acquire_console_sem()
-# define release_console_sem()
+# define console_lock()
+# define console_unlock()
 #endif
 static int
 bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
 	int i;
-	acquire_console_sem();
+	console_lock();
 	i = bfin_jc_circ_write(buf, count);
-	release_console_sem();
+	console_unlock();
 	wake_up_process(bfin_jc_kthread);
 	return i;
 }
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 794aacb..d0387a8 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -24,6 +24,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <crypto/padlock.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/hw_random.h>
@@ -34,7 +35,6 @@
 #include <asm/i387.h>
 
 
-#define PFX	KBUILD_MODNAME ": "
 
 
 enum {
@@ -81,8 +81,7 @@
 	ts_state = irq_ts_save();
 
 	asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
-		:"=m"(*addr), "=a"(eax_out)
-		:"D"(addr), "d"(edx_in));
+		: "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
 
 	irq_ts_restore(ts_state);
 	return eax_out;
@@ -90,8 +89,10 @@
 
 static int via_rng_data_present(struct hwrng *rng, int wait)
 {
+	char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+		((aligned(STACK_ALIGN)));
+	u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 	u32 bytes_out;
-	u32 *via_rng_datum = (u32 *)(&rng->priv);
 	int i;
 
 	/* We choose the recommended 1-byte-per-instruction RNG rate,
@@ -115,6 +116,7 @@
 			break;
 		udelay(10);
 	}
+	rng->priv = *via_rng_datum;
 	return bytes_out ? 1 : 0;
 }
 
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 2fe72f8..38223e9 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -970,6 +970,33 @@
 }
 EXPORT_SYMBOL(ipmi_create_user);
 
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+	int           rv = 0;
+	ipmi_smi_t    intf;
+	struct ipmi_smi_handlers *handlers;
+
+	mutex_lock(&ipmi_interfaces_mutex);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == if_num)
+			goto found;
+	}
+	/* Not found, return an error */
+	rv = -EINVAL;
+	mutex_unlock(&ipmi_interfaces_mutex);
+	return rv;
+
+found:
+	handlers = intf->handlers;
+	rv = -ENOSYS;
+	if (handlers->get_smi_info)
+		rv = handlers->get_smi_info(intf->send_info, data);
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
 static void free_user(struct kref *ref)
 {
 	ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f27c04e..62787e3 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -57,6 +57,7 @@
 #include <asm/irq.h>
 #include <linux/interrupt.h>
 #include <linux/rcupdate.h>
+#include <linux/ipmi.h>
 #include <linux/ipmi_smi.h>
 #include <asm/io.h>
 #include "ipmi_si_sm.h"
@@ -109,10 +110,6 @@
 };
 static char *si_to_str[] = { "kcs", "smic", "bt" };
 
-enum ipmi_addr_src {
-	SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
-	SI_PCI,	SI_DEVICETREE, SI_DEFAULT
-};
 static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
 					"ACPI", "SMBIOS", "PCI",
 					"device-tree", "default" };
@@ -293,6 +290,7 @@
 	struct task_struct *thread;
 
 	struct list_head link;
+	union ipmi_smi_info_union addr_info;
 };
 
 #define smi_inc_stat(smi, stat) \
@@ -322,6 +320,7 @@
 static int add_smi(struct smi_info *smi);
 static int try_smi_init(struct smi_info *smi);
 static void cleanup_one_si(struct smi_info *to_clean);
+static void cleanup_ipmi_si(void);
 
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block *nb)
@@ -901,6 +900,14 @@
 	printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 
+	/*
+	 * last_timeout_jiffies is updated here to avoid
+	 * smi_timeout() handler passing very large time_diff
+	 * value to smi_event_handler() that causes
+	 * the send command to abort.
+	 */
+	smi_info->last_timeout_jiffies = jiffies;
+
 	mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
 
 	if (smi_info->thread)
@@ -1188,6 +1195,18 @@
 	return 0;
 }
 
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+	struct smi_info *smi = send_info;
+
+	data->addr_src = smi->addr_source;
+	data->dev = smi->dev;
+	data->addr_info = smi->addr_info;
+	get_device(smi->dev);
+
+	return 0;
+}
+
 static void set_maintenance_mode(void *send_info, int enable)
 {
 	struct smi_info   *smi_info = send_info;
@@ -1199,6 +1218,7 @@
 static struct ipmi_smi_handlers handlers = {
 	.owner                  = THIS_MODULE,
 	.start_processing       = smi_start_processing,
+	.get_smi_info		= get_smi_info,
 	.sender			= sender,
 	.request_events		= request_events,
 	.set_maintenance_mode   = set_maintenance_mode,
@@ -1930,7 +1950,8 @@
 static int acpi_failure;
 
 /* For GPE-type interrupts. */
-static u32 ipmi_acpi_gpe(void *context)
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+	u32 gpe_number, void *context)
 {
 	struct smi_info *smi_info = context;
 	unsigned long   flags;
@@ -2158,6 +2179,7 @@
 	printk(KERN_INFO PFX "probing via ACPI\n");
 
 	handle = acpi_dev->handle;
+	info->addr_info.acpi_info.acpi_handle = handle;
 
 	/* _IFT tells us the interface type: KCS, BT, etc */
 	status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
@@ -3437,16 +3459,7 @@
 	mutex_lock(&smi_infos_lock);
 	if (unload_when_empty && list_empty(&smi_infos)) {
 		mutex_unlock(&smi_infos_lock);
-#ifdef CONFIG_PCI
-		if (pci_registered)
-			pci_unregister_driver(&ipmi_pci_driver);
-#endif
-
-#ifdef CONFIG_PPC_OF
-		if (of_registered)
-			of_unregister_platform_driver(&ipmi_of_platform_driver);
-#endif
-		driver_unregister(&ipmi_driver.driver);
+		cleanup_ipmi_si();
 		printk(KERN_WARNING PFX
 		       "Unable to find any System Interface(s)\n");
 		return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f4d334f..320668f 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1081,7 +1081,7 @@
 {
 	struct die_args *args = data;
 
-	if (val != DIE_NMI)
+	if (val != DIE_NMIUNKNOWN)
 		return NOTIFY_OK;
 
 	/* Hack, if it's a memory or I/O error, ignore it. */
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 777181a..bcbbc71 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -830,8 +830,7 @@
 			    test_bit(IS_ANY_T1, &dev->flags))) {
 				DEBUGP(4, dev, "Perform AUTOPPS\n");
 				set_bit(IS_AUTOPPS_ACT, &dev->flags);
-				ptsreq.protocol = ptsreq.protocol =
-				    (0x01 << dev->proto);
+				ptsreq.protocol = (0x01 << dev->proto);
 				ptsreq.flags = 0x01;
 				ptsreq.pts1 = 0x00;
 				ptsreq.pts2 = 0x00;
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 94b8eb4..444155a 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -78,7 +78,6 @@
 static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 {
 	struct ipw_dev *ipw = priv_data;
-	struct resource *io_resource;
 	int ret;
 
 	p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@
 	if (ret)
 		return ret;
 
-	io_resource = request_region(p_dev->resource[0]->start,
-				resource_size(p_dev->resource[0]),
-				IPWIRELESS_PCCARD_NAME);
+	if (!request_region(p_dev->resource[0]->start,
+			    resource_size(p_dev->resource[0]),
+			    IPWIRELESS_PCCARD_NAME)) {
+		ret = -EBUSY;
+		goto exit;
+	}
 
 	p_dev->resource[2]->flags |=
 		WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@
 
 	ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
 	if (ret != 0)
-		goto exit2;
+		goto exit1;
 
 	ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
 
-	ipw->attr_memory = ioremap(p_dev->resource[2]->start,
+	ipw->common_memory = ioremap(p_dev->resource[2]->start,
 				resource_size(p_dev->resource[2]));
-	request_mem_region(p_dev->resource[2]->start,
-			resource_size(p_dev->resource[2]),
-			IPWIRELESS_PCCARD_NAME);
+	if (!request_mem_region(p_dev->resource[2]->start,
+				resource_size(p_dev->resource[2]),
+				IPWIRELESS_PCCARD_NAME)) {
+		ret = -EBUSY;
+		goto exit2;
+	}
 
 	p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
 					WIN_ENABLE;
 	p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
 	ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
 	if (ret != 0)
-		goto exit2;
+		goto exit3;
 
 	ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
 	if (ret != 0)
@@ -128,23 +133,28 @@
 
 	ipw->attr_memory = ioremap(p_dev->resource[3]->start,
 				resource_size(p_dev->resource[3]));
-	request_mem_region(p_dev->resource[3]->start,
-			resource_size(p_dev->resource[3]),
-			IPWIRELESS_PCCARD_NAME);
+	if (!request_mem_region(p_dev->resource[3]->start,
+				resource_size(p_dev->resource[3]),
+				IPWIRELESS_PCCARD_NAME)) {
+		ret = -EBUSY;
+		goto exit4;
+	}
 
 	return 0;
 
+exit4:
+	iounmap(ipw->attr_memory);
 exit3:
+	release_mem_region(p_dev->resource[2]->start,
+			resource_size(p_dev->resource[2]));
 exit2:
-	if (ipw->common_memory) {
-		release_mem_region(p_dev->resource[2]->start,
-				resource_size(p_dev->resource[2]));
-		iounmap(ipw->common_memory);
-	}
+	iounmap(ipw->common_memory);
 exit1:
-	release_resource(io_resource);
+	release_region(p_dev->resource[0]->start,
+		       resource_size(p_dev->resource[0]));
+exit:
 	pcmcia_disable_device(p_dev);
-	return -1;
+	return ret;
 }
 
 static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@
 
 static void release_ipwireless(struct ipw_dev *ipw)
 {
+	release_region(ipw->link->resource[0]->start,
+		       resource_size(ipw->link->resource[0]));
 	if (ipw->common_memory) {
 		release_mem_region(ipw->link->resource[2]->start,
 				resource_size(ipw->link->resource[2]));
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index d3d63be..1a9f5f6 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -30,7 +30,7 @@
 
 #define RAMOOPS_KERNMSG_HDR "===="
 
-#define RECORD_SIZE 4096
+#define RECORD_SIZE 4096UL
 
 static ulong mem_address;
 module_param(mem_address, ulong, 0400);
@@ -68,11 +68,16 @@
 	char *buf, *buf_orig;
 	struct timeval timestamp;
 
+	if (reason != KMSG_DUMP_OOPS &&
+	    reason != KMSG_DUMP_PANIC &&
+	    reason != KMSG_DUMP_KEXEC)
+		return;
+
 	/* Only dump oopses if dump_oops is set */
 	if (reason == KMSG_DUMP_OOPS && !dump_oops)
 		return;
 
-	buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
+	buf = cxt->virt_addr + (cxt->count * RECORD_SIZE);
 	buf_orig = buf;
 
 	memset(buf, '\0', RECORD_SIZE);
@@ -83,8 +88,8 @@
 	buf += res;
 
 	hdr_size = buf - buf_orig;
-	l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size));
-	l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy);
+	l2_cpy = min(l2, RECORD_SIZE - hdr_size);
+	l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy);
 
 	s2_start = l2 - l2_cpy;
 	s1_start = l1 - l1_cpy;
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index bfe25ea..b4b9d5a 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -65,15 +65,12 @@
 	if (!bdev)
 		goto out;
 	igrab(bdev->bd_inode);
-	err = blkdev_get(bdev, filp->f_mode);
+	err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
 	if (err)
 		goto out;
-	err = bd_claim(bdev, raw_open);
-	if (err)
-		goto out1;
 	err = set_blocksize(bdev, bdev_logical_block_size(bdev));
 	if (err)
-		goto out2;
+		goto out1;
 	filp->f_flags |= O_DIRECT;
 	filp->f_mapping = bdev->bd_inode->i_mapping;
 	if (++raw_devices[minor].inuse == 1)
@@ -83,10 +80,8 @@
 	mutex_unlock(&raw_mutex);
 	return 0;
 
-out2:
-	bd_release(bdev);
 out1:
-	blkdev_put(bdev, filp->f_mode);
+	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
 out:
 	mutex_unlock(&raw_mutex);
 	return err;
@@ -110,8 +105,7 @@
 	}
 	mutex_unlock(&raw_mutex);
 
-	bd_release(bdev);
-	blkdev_put(bdev, filp->f_mode);
+	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
 	return 0;
 }
 
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index c17a305..dd21df5 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -493,9 +493,6 @@
 		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
 		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
 
-	if (is_itpm(to_pnp_dev(dev)))
-		itpm = 1;
-
 	if (itpm)
 		dev_info(dev, "Intel iTPM workaround enabled\n");
 
@@ -637,6 +634,9 @@
 	else
 		interrupts = 0;
 
+	if (is_itpm(pnp_dev))
+		itpm = 1;
+
 	return tpm_tis_init(&pnp_dev->dev, start, len, irq);
 }
 
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 896a2ce..84b164d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
- * Copyright (C) 2009, 2010 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -31,7 +32,7 @@
 #include <linux/virtio_console.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
-#include "hvc_console.h"
+#include "../tty/hvc/hvc_console.h"
 
 /*
  * This is a global struct for storing common data for all the devices
@@ -387,6 +388,10 @@
 	unsigned int len;
 	int ret;
 
+	if (!port->portdev) {
+		/* Device has been unplugged.  vqs are already gone. */
+		return;
+	}
 	vq = port->in_vq;
 	if (port->inbuf)
 		buf = port->inbuf;
@@ -469,6 +474,10 @@
 	void *buf;
 	unsigned int len;
 
+	if (!port->portdev) {
+		/* Device has been unplugged.  vqs are already gone. */
+		return;
+	}
 	while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
 		kfree(buf);
 		port->outvq_full = false;
@@ -1462,6 +1471,17 @@
 	spin_unlock(&portdev->cvq_lock);
 }
 
+static void out_intr(struct virtqueue *vq)
+{
+	struct port *port;
+
+	port = find_port_by_vq(vq->vdev->priv, vq);
+	if (!port)
+		return;
+
+	wake_up_interruptible(&port->waitqueue);
+}
+
 static void in_intr(struct virtqueue *vq)
 {
 	struct port *port;
@@ -1566,7 +1586,7 @@
 	 */
 	j = 0;
 	io_callbacks[j] = in_intr;
-	io_callbacks[j + 1] = NULL;
+	io_callbacks[j + 1] = out_intr;
 	io_names[j] = "input";
 	io_names[j + 1] = "output";
 	j += 2;
@@ -1580,7 +1600,7 @@
 		for (i = 1; i < nr_ports; i++) {
 			j += 2;
 			io_callbacks[j] = in_intr;
-			io_callbacks[j + 1] = NULL;
+			io_callbacks[j + 1] = out_intr;
 			io_names[j] = "input";
 			io_names[j + 1] = "output";
 		}
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index cfb0f52..effe797 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -202,17 +202,21 @@
 			printk(KERN_INFO "PM-Timer had inconsistent results:"
 			       " 0x%#llx, 0x%#llx - aborting.\n",
 			       value1, value2);
+			pmtmr_ioport = 0;
 			return -EINVAL;
 		}
 		if (i == ACPI_PM_READ_CHECKS) {
 			printk(KERN_INFO "PM-Timer failed consistency check "
 			       " (0x%#llx) - aborting.\n", value1);
+			pmtmr_ioport = 0;
 			return -ENODEV;
 		}
 	}
 
-	if (verify_pmtmr_rate() != 0)
+	if (verify_pmtmr_rate() != 0){
+		pmtmr_ioport = 0;
 		return -ENODEV;
+	}
 
 	return clocksource_register_hz(&clocksource_acpi_pm,
 						PMTMR_TICKS_PER_SEC);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 01b886e..79c47e8 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -196,9 +196,9 @@
 	clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
 	clkevt.clkevt.cpumask = cpumask_of(0);
 
-	setup_irq(irq, &tc_irqaction);
-
 	clockevents_register_device(&clkevt.clkevt);
+
+	setup_irq(irq, &tc_irqaction);
 }
 
 #else /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a8c8d9c..ca8ee80 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -71,7 +71,7 @@
 
 config CPU_FREQ_DEFAULT_GOV_POWERSAVE
 	bool "powersave"
-	depends on EMBEDDED
+	depends on EXPERT
 	select CPU_FREQ_GOV_POWERSAVE
 	help
 	  Use the CPUFreq governor 'powersave' as default. This sets
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1109f68..5cb4d09 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1919,8 +1919,10 @@
 
 	ret = sysdev_driver_register(&cpu_sysdev_class,
 					&cpufreq_sysdev_driver);
+	if (ret)
+		goto err_null_driver;
 
-	if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+	if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
 		int i;
 		ret = -ENODEV;
 
@@ -1935,21 +1937,22 @@
 		if (ret) {
 			dprintk("no CPU initialized for driver %s\n",
 							driver_data->name);
-			sysdev_driver_unregister(&cpu_sysdev_class,
-						&cpufreq_sysdev_driver);
-
-			spin_lock_irqsave(&cpufreq_driver_lock, flags);
-			cpufreq_driver = NULL;
-			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+			goto err_sysdev_unreg;
 		}
 	}
 
-	if (!ret) {
-		register_hotcpu_notifier(&cpufreq_cpu_notifier);
-		dprintk("driver %s up and running\n", driver_data->name);
-		cpufreq_debug_enable_ratelimit();
-	}
+	register_hotcpu_notifier(&cpufreq_cpu_notifier);
+	dprintk("driver %s up and running\n", driver_data->name);
+	cpufreq_debug_enable_ratelimit();
 
+	return 0;
+err_sysdev_unreg:
+	sysdev_driver_unregister(&cpu_sysdev_class,
+			&cpufreq_sysdev_driver);
+err_null_driver:
+	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	cpufreq_driver = NULL;
+	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 386888f..bf50924 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -96,7 +96,15 @@
 
 	/* enter the state and update stats */
 	dev->last_state = target_state;
+
+	trace_power_start(POWER_CSTATE, next_state, dev->cpu);
+	trace_cpu_idle(next_state, dev->cpu);
+
 	dev->last_residency = target_state->enter(dev, target_state);
+
+	trace_power_end(dev->cpu);
+	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
+
 	if (dev->last_state)
 		target_state = dev->last_state;
 
@@ -106,8 +114,6 @@
 	/* give the governor an opportunity to reflect on the outcome */
 	if (cpuidle_curr_governor->reflect)
 		cpuidle_curr_governor->reflect(dev);
-	trace_power_end(smp_processor_id());
-	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 /**
@@ -155,6 +161,45 @@
 
 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
 
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
+{
+	ktime_t	t1, t2;
+	s64 diff;
+	int ret;
+
+	t1 = ktime_get();
+	local_irq_enable();
+	while (!need_resched())
+		cpu_relax();
+
+	t2 = ktime_get();
+	diff = ktime_to_us(ktime_sub(t2, t1));
+	if (diff > INT_MAX)
+		diff = INT_MAX;
+
+	ret = (int) diff;
+	return ret;
+}
+
+static void poll_idle_init(struct cpuidle_device *dev)
+{
+	struct cpuidle_state *state = &dev->states[0];
+
+	cpuidle_set_statedata(state, NULL);
+
+	snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+	state->exit_latency = 0;
+	state->target_residency = 0;
+	state->power_usage = -1;
+	state->flags = 0;
+	state->enter = poll_idle;
+}
+#else
+static void poll_idle_init(struct cpuidle_device *dev) {}
+#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
+
 /**
  * cpuidle_enable_device - enables idle PM for a CPU
  * @dev: the CPU
@@ -179,6 +224,8 @@
 			return ret;
 	}
 
+	poll_idle_init(dev);
+
 	if ((ret = cpuidle_add_state_sysfs(dev)))
 		return ret;
 
@@ -233,45 +280,6 @@
 
 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
 
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
-{
-	ktime_t	t1, t2;
-	s64 diff;
-	int ret;
-
-	t1 = ktime_get();
-	local_irq_enable();
-	while (!need_resched())
-		cpu_relax();
-
-	t2 = ktime_get();
-	diff = ktime_to_us(ktime_sub(t2, t1));
-	if (diff > INT_MAX)
-		diff = INT_MAX;
-
-	ret = (int) diff;
-	return ret;
-}
-
-static void poll_idle_init(struct cpuidle_device *dev)
-{
-	struct cpuidle_state *state = &dev->states[0];
-
-	cpuidle_set_statedata(state, NULL);
-
-	snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
-	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
-	state->exit_latency = 0;
-	state->target_residency = 0;
-	state->power_usage = -1;
-	state->flags = CPUIDLE_FLAG_POLL;
-	state->enter = poll_idle;
-}
-#else
-static void poll_idle_init(struct cpuidle_device *dev) {}
-#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
-
 /**
  * __cpuidle_register_device - internal register function called before register
  * and enable routines
@@ -292,8 +300,6 @@
 
 	init_completion(&dev->kobj_unregister);
 
-	poll_idle_init(dev);
-
 	/*
 	 * cpuidle driver should set the dev->power_specified bit
 	 * before registering the device if the driver provides
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 7d279e5..c99305a 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -857,7 +857,7 @@
 			printk(KERN_WARNING MV_CESA
 			       "Base driver '%s' could not be loaded!\n",
 			       base_hash_name);
-			err = PTR_ERR(fallback_tfm);
+			err = PTR_ERR(base_hash);
 			goto err_bad_base;
 		}
 	}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 76141262..80dc094 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1542,7 +1542,7 @@
 	return err;
 }
 
-static void __exit n2_unregister_algs(void)
+static void __devexit n2_unregister_algs(void)
 {
 	mutex_lock(&spu_lock);
 	if (!--algs_registered)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 799ca51..add2a1a 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -74,11 +74,9 @@
 #define FLAGS_CBC		BIT(1)
 #define FLAGS_GIV		BIT(2)
 
-#define FLAGS_NEW_KEY		BIT(4)
-#define FLAGS_NEW_IV		BIT(5)
-#define FLAGS_INIT		BIT(6)
-#define FLAGS_FAST		BIT(7)
-#define FLAGS_BUSY		8
+#define FLAGS_INIT		BIT(4)
+#define FLAGS_FAST		BIT(5)
+#define FLAGS_BUSY		BIT(6)
 
 struct omap_aes_ctx {
 	struct omap_aes_dev *dd;
@@ -98,19 +96,18 @@
 struct omap_aes_dev {
 	struct list_head	list;
 	unsigned long		phys_base;
-	void __iomem 		*io_base;
+	void __iomem		*io_base;
 	struct clk		*iclk;
 	struct omap_aes_ctx	*ctx;
 	struct device		*dev;
 	unsigned long		flags;
+	int			err;
 
-	u32			*iv;
-	u32			ctrl;
+	spinlock_t		lock;
+	struct crypto_queue	queue;
 
-	spinlock_t			lock;
-	struct crypto_queue		queue;
-
-	struct tasklet_struct		task;
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	queue_task;
 
 	struct ablkcipher_request	*req;
 	size_t				total;
@@ -179,9 +176,13 @@
 
 static int omap_aes_hw_init(struct omap_aes_dev *dd)
 {
-	int err = 0;
-
+	/*
+	 * clocks are enabled when request starts and disabled when finished.
+	 * It may be long delays between requests.
+	 * Device might go to off mode to save power.
+	 */
 	clk_enable(dd->iclk);
+
 	if (!(dd->flags & FLAGS_INIT)) {
 		/* is it necessary to reset before every operation? */
 		omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
@@ -193,39 +194,26 @@
 		__asm__ __volatile__("nop");
 		__asm__ __volatile__("nop");
 
-		err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
-				AES_REG_SYSSTATUS_RESETDONE);
-		if (!err)
-			dd->flags |= FLAGS_INIT;
+		if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
+				AES_REG_SYSSTATUS_RESETDONE))
+			return -ETIMEDOUT;
+
+		dd->flags |= FLAGS_INIT;
+		dd->err = 0;
 	}
 
-	return err;
+	return 0;
 }
 
-static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
-{
-	clk_disable(dd->iclk);
-}
-
-static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
+static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 {
 	unsigned int key32;
-	int i;
+	int i, err;
 	u32 val, mask;
 
-	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
-	if (dd->flags & FLAGS_CBC)
-		val |= AES_REG_CTRL_CBC;
-	if (dd->flags & FLAGS_ENCRYPT)
-		val |= AES_REG_CTRL_DIRECTION;
-
-	if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
-		   !(dd->ctx->flags & FLAGS_NEW_KEY))
-		goto out;
-
-	/* only need to write control registers for new settings */
-
-	dd->ctrl = val;
+	err = omap_aes_hw_init(dd);
+	if (err)
+		return err;
 
 	val = 0;
 	if (dd->dma_lch_out >= 0)
@@ -237,30 +225,43 @@
 
 	omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
 
-	pr_debug("Set key\n");
 	key32 = dd->ctx->keylen / sizeof(u32);
-	/* set a key */
+
+	/* it seems a key should always be set even if it has not changed */
 	for (i = 0; i < key32; i++) {
 		omap_aes_write(dd, AES_REG_KEY(i),
 			__le32_to_cpu(dd->ctx->key[i]));
 	}
-	dd->ctx->flags &= ~FLAGS_NEW_KEY;
 
-	if (dd->flags & FLAGS_NEW_IV) {
-		pr_debug("Set IV\n");
-		omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
-		dd->flags &= ~FLAGS_NEW_IV;
-	}
+	if ((dd->flags & FLAGS_CBC) && dd->req->info)
+		omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
+
+	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
+	if (dd->flags & FLAGS_CBC)
+		val |= AES_REG_CTRL_CBC;
+	if (dd->flags & FLAGS_ENCRYPT)
+		val |= AES_REG_CTRL_DIRECTION;
 
 	mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
 			AES_REG_CTRL_KEY_SIZE;
 
-	omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
+	omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
 
-out:
-	/* start DMA or disable idle mode */
-	omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
-			    AES_REG_MASK_START);
+	/* IN */
+	omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
+				 dd->phys_base + AES_REG_DATA, 0, 4);
+
+	omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+	omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+
+	/* OUT */
+	omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
+				dd->phys_base + AES_REG_DATA, 0, 4);
+
+	omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+	omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+
+	return 0;
 }
 
 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
@@ -288,8 +289,16 @@
 {
 	struct omap_aes_dev *dd = data;
 
-	if (lch == dd->dma_lch_out)
-		tasklet_schedule(&dd->task);
+	if (ch_status != OMAP_DMA_BLOCK_IRQ) {
+		pr_err("omap-aes DMA error status: 0x%hx\n", ch_status);
+		dd->err = -EIO;
+		dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+	} else if (lch == dd->dma_lch_in) {
+		return;
+	}
+
+	/* dma_lch_out - completed */
+	tasklet_schedule(&dd->done_task);
 }
 
 static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -339,18 +348,6 @@
 		goto err_dma_out;
 	}
 
-	omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
-				 dd->phys_base + AES_REG_DATA, 0, 4);
-
-	omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-	omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-
-	omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
-				dd->phys_base + AES_REG_DATA, 0, 4);
-
-	omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-	omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-
 	return 0;
 
 err_dma_out:
@@ -406,6 +403,11 @@
 		if (!count)
 			return off;
 
+		/*
+		 * buflen and total are AES_BLOCK_SIZE size aligned,
+		 * so count should be also aligned
+		 */
+
 		sg_copy_buf(buf + off, *sg, *offset, count, out);
 
 		off += count;
@@ -461,7 +463,9 @@
 	omap_start_dma(dd->dma_lch_in);
 	omap_start_dma(dd->dma_lch_out);
 
-	omap_aes_write_ctrl(dd);
+	/* start DMA or disable idle mode */
+	omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
+			    AES_REG_MASK_START);
 
 	return 0;
 }
@@ -488,8 +492,10 @@
 		count = min(dd->total, sg_dma_len(dd->in_sg));
 		count = min(count, sg_dma_len(dd->out_sg));
 
-		if (count != dd->total)
+		if (count != dd->total) {
+			pr_err("request length != buffer length\n");
 			return -EINVAL;
+		}
 
 		pr_debug("fast\n");
 
@@ -525,23 +531,25 @@
 
 	dd->total -= count;
 
-	err = omap_aes_hw_init(dd);
-
 	err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
+	if (err) {
+		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+	}
 
 	return err;
 }
 
 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
 {
-	struct omap_aes_ctx *ctx;
+	struct ablkcipher_request *req = dd->req;
 
 	pr_debug("err: %d\n", err);
 
-	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
+	clk_disable(dd->iclk);
+	dd->flags &= ~FLAGS_BUSY;
 
-	if (!dd->total)
-		dd->req->base.complete(&dd->req->base, err);
+	req->base.complete(&req->base, err);
 }
 
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -553,8 +561,6 @@
 
 	omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
 
-	omap_aes_hw_cleanup(dd);
-
 	omap_stop_dma(dd->dma_lch_in);
 	omap_stop_dma(dd->dma_lch_out);
 
@@ -574,40 +580,39 @@
 		}
 	}
 
-	if (err || !dd->total)
-		omap_aes_finish_req(dd, err);
-
 	return err;
 }
 
-static int omap_aes_handle_req(struct omap_aes_dev *dd)
+static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+			       struct ablkcipher_request *req)
 {
 	struct crypto_async_request *async_req, *backlog;
 	struct omap_aes_ctx *ctx;
 	struct omap_aes_reqctx *rctx;
-	struct ablkcipher_request *req;
 	unsigned long flags;
-
-	if (dd->total)
-		goto start;
+	int err, ret = 0;
 
 	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ablkcipher_enqueue_request(&dd->queue, req);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
 	backlog = crypto_get_backlog(&dd->queue);
 	async_req = crypto_dequeue_request(&dd->queue);
-	if (!async_req)
-		clear_bit(FLAGS_BUSY, &dd->flags);
+	if (async_req)
+		dd->flags |= FLAGS_BUSY;
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 	if (!async_req)
-		return 0;
+		return ret;
 
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
 
 	req = ablkcipher_request_cast(async_req);
 
-	pr_debug("get new req\n");
-
 	/* assign new request to device */
 	dd->req = req;
 	dd->total = req->nbytes;
@@ -621,27 +626,22 @@
 	rctx->mode &= FLAGS_MODE_MASK;
 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 
-	dd->iv = req->info;
-	if ((dd->flags & FLAGS_CBC) && dd->iv)
-		dd->flags |= FLAGS_NEW_IV;
-	else
-		dd->flags &= ~FLAGS_NEW_IV;
-
+	dd->ctx = ctx;
 	ctx->dd = dd;
-	if (dd->ctx != ctx) {
-		/* assign new context to device */
-		dd->ctx = ctx;
-		ctx->flags |= FLAGS_NEW_KEY;
+
+	err = omap_aes_write_ctrl(dd);
+	if (!err)
+		err = omap_aes_crypt_dma_start(dd);
+	if (err) {
+		/* aes_task will not finish it, so do it here */
+		omap_aes_finish_req(dd, err);
+		tasklet_schedule(&dd->queue_task);
 	}
 
-	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
-		pr_err("request size is not exact amount of AES blocks\n");
-
-start:
-	return omap_aes_crypt_dma_start(dd);
+	return ret; /* return ret, which is enqueue return value */
 }
 
-static void omap_aes_task(unsigned long data)
+static void omap_aes_done_task(unsigned long data)
 {
 	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
 	int err;
@@ -650,40 +650,50 @@
 
 	err = omap_aes_crypt_dma_stop(dd);
 
-	err = omap_aes_handle_req(dd);
+	err = dd->err ? : err;
+
+	if (dd->total && !err) {
+		err = omap_aes_crypt_dma_start(dd);
+		if (!err)
+			return; /* DMA started. Not fininishing. */
+	}
+
+	omap_aes_finish_req(dd, err);
+	omap_aes_handle_queue(dd, NULL);
 
 	pr_debug("exit\n");
 }
 
+static void omap_aes_queue_task(unsigned long data)
+{
+	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+
+	omap_aes_handle_queue(dd, NULL);
+}
+
 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 {
 	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
 			crypto_ablkcipher_reqtfm(req));
 	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 	struct omap_aes_dev *dd;
-	unsigned long flags;
-	int err;
 
 	pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
 		  !!(mode & FLAGS_ENCRYPT),
 		  !!(mode & FLAGS_CBC));
 
+	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+		pr_err("request size is not exact amount of AES blocks\n");
+		return -EINVAL;
+	}
+
 	dd = omap_aes_find_dev(ctx);
 	if (!dd)
 		return -ENODEV;
 
 	rctx->mode = mode;
 
-	spin_lock_irqsave(&dd->lock, flags);
-	err = ablkcipher_enqueue_request(&dd->queue, req);
-	spin_unlock_irqrestore(&dd->lock, flags);
-
-	if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
-		omap_aes_handle_req(dd);
-
-	pr_debug("exit\n");
-
-	return err;
+	return omap_aes_handle_queue(dd, req);
 }
 
 /* ********************** ALG API ************************************ */
@@ -701,7 +711,6 @@
 
 	memcpy(ctx->key, key, keylen);
 	ctx->keylen = keylen;
-	ctx->flags |= FLAGS_NEW_KEY;
 
 	return 0;
 }
@@ -750,7 +759,7 @@
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
-	.cra_alignmask	 	= 0,
+	.cra_alignmask		= 0,
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= omap_aes_cra_init,
@@ -770,7 +779,7 @@
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
-	.cra_alignmask	 	= 0,
+	.cra_alignmask		= 0,
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= omap_aes_cra_init,
@@ -849,7 +858,8 @@
 		 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
 	clk_disable(dd->iclk);
 
-	tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd);
+	tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
+	tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
 
 	err = omap_aes_dma_init(dd);
 	if (err)
@@ -876,7 +886,8 @@
 		crypto_unregister_alg(&algs[j]);
 	omap_aes_dma_cleanup(dd);
 err_dma:
-	tasklet_kill(&dd->task);
+	tasklet_kill(&dd->done_task);
+	tasklet_kill(&dd->queue_task);
 	iounmap(dd->io_base);
 err_io:
 	clk_put(dd->iclk);
@@ -903,7 +914,8 @@
 	for (i = 0; i < ARRAY_SIZE(algs); i++)
 		crypto_unregister_alg(&algs[i]);
 
-	tasklet_kill(&dd->task);
+	tasklet_kill(&dd->done_task);
+	tasklet_kill(&dd->queue_task);
 	omap_aes_dma_cleanup(dd);
 	iounmap(dd->io_base);
 	clk_put(dd->iclk);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index a081c7c..2e71123 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -72,10 +72,9 @@
 
 #define DEFAULT_TIMEOUT_INTERVAL	HZ
 
-#define FLAGS_FIRST		0x0001
 #define FLAGS_FINUP		0x0002
 #define FLAGS_FINAL		0x0004
-#define FLAGS_FAST		0x0008
+#define FLAGS_SG		0x0008
 #define FLAGS_SHA1		0x0010
 #define FLAGS_DMA_ACTIVE	0x0020
 #define FLAGS_OUTPUT_READY	0x0040
@@ -83,13 +82,17 @@
 #define FLAGS_INIT		0x0100
 #define FLAGS_CPU		0x0200
 #define FLAGS_HMAC		0x0400
-
-/* 3rd byte */
-#define FLAGS_BUSY		16
+#define FLAGS_ERROR		0x0800
+#define FLAGS_BUSY		0x1000
 
 #define OP_UPDATE	1
 #define OP_FINAL	2
 
+#define OMAP_ALIGN_MASK		(sizeof(u32)-1)
+#define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
+
+#define BUFLEN		PAGE_SIZE
+
 struct omap_sham_dev;
 
 struct omap_sham_reqctx {
@@ -97,8 +100,8 @@
 	unsigned long		flags;
 	unsigned long		op;
 
+	u8			digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
 	size_t			digcnt;
-	u8			*buffer;
 	size_t			bufcnt;
 	size_t			buflen;
 	dma_addr_t		dma_addr;
@@ -107,6 +110,8 @@
 	struct scatterlist	*sg;
 	unsigned int		offset;	/* offset in current sg */
 	unsigned int		total;	/* total request */
+
+	u8			buffer[0] OMAP_ALIGNED;
 };
 
 struct omap_sham_hmac_ctx {
@@ -136,6 +141,7 @@
 	int			irq;
 	struct clk		*iclk;
 	spinlock_t		lock;
+	int			err;
 	int			dma;
 	int			dma_lch;
 	struct tasklet_struct	done_task;
@@ -194,53 +200,68 @@
 static void omap_sham_copy_hash(struct ahash_request *req, int out)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	u32 *hash = (u32 *)ctx->digest;
+	int i;
+
+	/* MD5 is almost unused. So copy sha1 size to reduce code */
+	for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+		if (out)
+			hash[i] = omap_sham_read(ctx->dd,
+						SHA_REG_DIGEST(i));
+		else
+			omap_sham_write(ctx->dd,
+					SHA_REG_DIGEST(i), hash[i]);
+	}
+}
+
+static void omap_sham_copy_ready_hash(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	u32 *in = (u32 *)ctx->digest;
 	u32 *hash = (u32 *)req->result;
 	int i;
 
+	if (!hash)
+		return;
+
 	if (likely(ctx->flags & FLAGS_SHA1)) {
 		/* SHA1 results are in big endian */
 		for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
-			if (out)
-				hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
-							SHA_REG_DIGEST(i)));
-			else
-				omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
-							cpu_to_be32(hash[i]));
+			hash[i] = be32_to_cpu(in[i]);
 	} else {
 		/* MD5 results are in little endian */
 		for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
-			if (out)
-				hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
-							SHA_REG_DIGEST(i)));
-			else
-				omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
-							cpu_to_le32(hash[i]));
+			hash[i] = le32_to_cpu(in[i]);
 	}
 }
 
-static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+static int omap_sham_hw_init(struct omap_sham_dev *dd)
+{
+	clk_enable(dd->iclk);
+
+	if (!(dd->flags & FLAGS_INIT)) {
+		omap_sham_write_mask(dd, SHA_REG_MASK,
+			SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+
+		if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+					SHA_REG_SYSSTATUS_RESETDONE))
+			return -ETIMEDOUT;
+
+		dd->flags |= FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
 				 int final, int dma)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 	u32 val = length << 5, mask;
 
-	if (unlikely(!ctx->digcnt)) {
-
-		clk_enable(dd->iclk);
-
-		if (!(dd->flags & FLAGS_INIT)) {
-			omap_sham_write_mask(dd, SHA_REG_MASK,
-				SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
-
-			if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
-						SHA_REG_SYSSTATUS_RESETDONE))
-				return -ETIMEDOUT;
-
-			dd->flags |= FLAGS_INIT;
-		}
-	} else {
+	if (likely(ctx->digcnt))
 		omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
-	}
 
 	omap_sham_write_mask(dd, SHA_REG_MASK,
 		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
@@ -260,29 +281,26 @@
 			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 
 	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
-
-	return 0;
 }
 
 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
 			      size_t length, int final)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	int err, count, len32;
+	int count, len32;
 	const u32 *buffer = (const u32 *)buf;
 
 	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
 						ctx->digcnt, length, final);
 
-	err = omap_sham_write_ctrl(dd, length, final, 0);
-	if (err)
-		return err;
+	omap_sham_write_ctrl(dd, length, final, 0);
+
+	/* should be non-zero before next lines to disable clocks later */
+	ctx->digcnt += length;
 
 	if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
 		return -ETIMEDOUT;
 
-	ctx->digcnt += length;
-
 	if (final)
 		ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
 
@@ -298,16 +316,11 @@
 			      size_t length, int final)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	int err, len32;
+	int len32;
 
 	dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
 						ctx->digcnt, length, final);
 
-	/* flush cache entries related to our page */
-	if (dma_addr == ctx->dma_addr)
-		dma_sync_single_for_device(dd->dev, dma_addr, length,
-					   DMA_TO_DEVICE);
-
 	len32 = DIV_ROUND_UP(length, sizeof(u32));
 
 	omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
@@ -317,9 +330,7 @@
 	omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
 				dma_addr, 0, 0);
 
-	err = omap_sham_write_ctrl(dd, length, final, 1);
-	if (err)
-		return err;
+	omap_sham_write_ctrl(dd, length, final, 1);
 
 	ctx->digcnt += length;
 
@@ -371,15 +382,29 @@
 	return 0;
 }
 
+static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
+					struct omap_sham_reqctx *ctx,
+					size_t length, int final)
+{
+	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+				       DMA_TO_DEVICE);
+	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+		return -EINVAL;
+	}
+
+	ctx->flags &= ~FLAGS_SG;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
+}
+
 static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 	unsigned int final;
 	size_t count;
 
-	if (!ctx->total)
-		return 0;
-
 	omap_sham_append_sg(ctx);
 
 	final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
@@ -390,30 +415,68 @@
 	if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
 		count = ctx->bufcnt;
 		ctx->bufcnt = 0;
-		return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+		return omap_sham_xmit_dma_map(dd, ctx, count, final);
 	}
 
 	return 0;
 }
 
-static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+/* Start address alignment */
+#define SG_AA(sg)	(IS_ALIGNED(sg->offset, sizeof(u32)))
+/* SHA1 block size alignment */
+#define SG_SA(sg)	(IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
+
+static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	unsigned int length;
+	unsigned int length, final, tail;
+	struct scatterlist *sg;
 
-	ctx->flags |= FLAGS_FAST;
+	if (!ctx->total)
+		return 0;
 
-	length = min(ctx->total, sg_dma_len(ctx->sg));
-	ctx->total = length;
+	if (ctx->bufcnt || ctx->offset)
+		return omap_sham_update_dma_slow(dd);
+
+	dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
+			ctx->digcnt, ctx->bufcnt, ctx->total);
+
+	sg = ctx->sg;
+
+	if (!SG_AA(sg))
+		return omap_sham_update_dma_slow(dd);
+
+	if (!sg_is_last(sg) && !SG_SA(sg))
+		/* size is not SHA1_BLOCK_SIZE aligned */
+		return omap_sham_update_dma_slow(dd);
+
+	length = min(ctx->total, sg->length);
+
+	if (sg_is_last(sg)) {
+		if (!(ctx->flags & FLAGS_FINUP)) {
+			/* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
+			tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
+			/* without finup() we need one block to close hash */
+			if (!tail)
+				tail = SHA1_MD5_BLOCK_SIZE;
+			length -= tail;
+		}
+	}
 
 	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 		dev_err(dd->dev, "dma_map_sg  error\n");
 		return -EINVAL;
 	}
 
-	ctx->total -= length;
+	ctx->flags |= FLAGS_SG;
 
-	return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+	ctx->total -= length;
+	ctx->offset = length; /* offset where to start slow */
+
+	final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
 }
 
 static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -433,8 +496,17 @@
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 
 	omap_stop_dma(dd->dma_lch);
-	if (ctx->flags & FLAGS_FAST)
+	if (ctx->flags & FLAGS_SG) {
 		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+		if (ctx->sg->length == ctx->offset) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+		}
+	} else {
+		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+				 DMA_TO_DEVICE);
+	}
 
 	return 0;
 }
@@ -454,14 +526,7 @@
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 	if (ctx->digcnt)
-		clk_disable(dd->iclk);
-
-	if (ctx->dma_addr)
-		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
-				 DMA_TO_DEVICE);
-
-	if (ctx->buffer)
-		free_page((unsigned long)ctx->buffer);
+		omap_sham_copy_ready_hash(req);
 
 	dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
 }
@@ -489,8 +554,6 @@
 
 	ctx->flags = 0;
 
-	ctx->flags |= FLAGS_FIRST;
-
 	dev_dbg(dd->dev, "init: digest size: %d\n",
 		crypto_ahash_digestsize(tfm));
 
@@ -499,21 +562,7 @@
 
 	ctx->bufcnt = 0;
 	ctx->digcnt = 0;
-
-	ctx->buflen = PAGE_SIZE;
-	ctx->buffer = (void *)__get_free_page(
-				(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-				GFP_KERNEL : GFP_ATOMIC);
-	if (!ctx->buffer)
-		return -ENOMEM;
-
-	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
-					DMA_TO_DEVICE);
-	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
-		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
-		free_page((unsigned long)ctx->buffer);
-		return -EINVAL;
-	}
+	ctx->buflen = BUFLEN;
 
 	if (tctx->flags & FLAGS_HMAC) {
 		struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -538,10 +587,8 @@
 
 	if (ctx->flags & FLAGS_CPU)
 		err = omap_sham_update_cpu(dd);
-	else if (ctx->flags & FLAGS_FAST)
-		err = omap_sham_update_dma_fast(dd);
 	else
-		err = omap_sham_update_dma_slow(dd);
+		err = omap_sham_update_dma_start(dd);
 
 	/* wait for dma completion before can take more data */
 	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
@@ -560,15 +607,12 @@
 		use_dma = 0;
 
 	if (use_dma)
-		err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+		err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
 	else
 		err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
 
 	ctx->bufcnt = 0;
 
-	if (err != -EINPROGRESS)
-		omap_sham_cleanup(req);
-
 	dev_dbg(dd->dev, "final_req: err: %d\n", err);
 
 	return err;
@@ -576,6 +620,7 @@
 
 static int omap_sham_finish_req_hmac(struct ahash_request *req)
 {
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 	struct omap_sham_hmac_ctx *bctx = tctx->base;
 	int bs = crypto_shash_blocksize(bctx->shash);
@@ -590,48 +635,56 @@
 
 	return crypto_shash_init(&desc.shash) ?:
 	       crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
-	       crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+	       crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest);
 }
 
 static void omap_sham_finish_req(struct ahash_request *req, int err)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = ctx->dd;
 
 	if (!err) {
 		omap_sham_copy_hash(ctx->dd->req, 1);
 		if (ctx->flags & FLAGS_HMAC)
 			err = omap_sham_finish_req_hmac(req);
+	} else {
+		ctx->flags |= FLAGS_ERROR;
 	}
 
-	if (ctx->flags & FLAGS_FINAL)
+	if ((ctx->flags & FLAGS_FINAL) || err)
 		omap_sham_cleanup(req);
 
-	clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+	clk_disable(dd->iclk);
+	dd->flags &= ~FLAGS_BUSY;
 
 	if (req->base.complete)
 		req->base.complete(&req->base, err);
 }
 
-static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+static int omap_sham_handle_queue(struct omap_sham_dev *dd,
+				  struct ahash_request *req)
 {
 	struct crypto_async_request *async_req, *backlog;
 	struct omap_sham_reqctx *ctx;
-	struct ahash_request *req, *prev_req;
+	struct ahash_request *prev_req;
 	unsigned long flags;
-	int err = 0;
-
-	if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
-		return 0;
+	int err = 0, ret = 0;
 
 	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ahash_enqueue_request(&dd->queue, req);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
 	backlog = crypto_get_backlog(&dd->queue);
 	async_req = crypto_dequeue_request(&dd->queue);
-	if (!async_req)
-		clear_bit(FLAGS_BUSY, &dd->flags);
+	if (async_req)
+		dd->flags |= FLAGS_BUSY;
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 	if (!async_req)
-		return 0;
+		return ret;
 
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
@@ -646,7 +699,22 @@
 	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
 						ctx->op, req->nbytes);
 
-	if (req != prev_req && ctx->digcnt)
+
+	err = omap_sham_hw_init(dd);
+	if (err)
+		goto err1;
+
+	omap_set_dma_dest_params(dd->dma_lch, 0,
+			OMAP_DMA_AMODE_CONSTANT,
+			dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+	omap_set_dma_dest_burst_mode(dd->dma_lch,
+			OMAP_DMA_DATA_BURST_16);
+
+	omap_set_dma_src_burst_mode(dd->dma_lch,
+			OMAP_DMA_DATA_BURST_4);
+
+	if (ctx->digcnt)
 		/* request has changed - restore hash */
 		omap_sham_copy_hash(req, 0);
 
@@ -658,7 +726,7 @@
 	} else if (ctx->op == OP_FINAL) {
 		err = omap_sham_final_req(dd);
 	}
-
+err1:
 	if (err != -EINPROGRESS) {
 		/* done_task will not finish it, so do it here */
 		omap_sham_finish_req(req, err);
@@ -667,7 +735,7 @@
 
 	dev_dbg(dd->dev, "exit, err: %d\n", err);
 
-	return err;
+	return ret;
 }
 
 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
@@ -675,18 +743,10 @@
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 	struct omap_sham_dev *dd = tctx->dd;
-	unsigned long flags;
-	int err;
 
 	ctx->op = op;
 
-	spin_lock_irqsave(&dd->lock, flags);
-	err = ahash_enqueue_request(&dd->queue, req);
-	spin_unlock_irqrestore(&dd->lock, flags);
-
-	omap_sham_handle_queue(dd);
-
-	return err;
+	return omap_sham_handle_queue(dd, req);
 }
 
 static int omap_sham_update(struct ahash_request *req)
@@ -709,21 +769,13 @@
 			*/
 			omap_sham_append_sg(ctx);
 			return 0;
-		} else if (ctx->bufcnt + ctx->total <= 64) {
+		} else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
+			/*
+			* faster to use CPU for short transfers
+			*/
 			ctx->flags |= FLAGS_CPU;
-		} else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
-			/* may be can use faster functions */
-			int aligned = IS_ALIGNED((u32)ctx->sg->offset,
-								sizeof(u32));
-
-			if (aligned && (ctx->flags & FLAGS_FIRST))
-				/* digest: first and final */
-				ctx->flags |= FLAGS_FAST;
-
-			ctx->flags &= ~FLAGS_FIRST;
 		}
-	} else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
-		/* if not finaup -> not fast */
+	} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
 		omap_sham_append_sg(ctx);
 		return 0;
 	}
@@ -761,12 +813,14 @@
 
 	ctx->flags |= FLAGS_FINUP;
 
-	/* OMAP HW accel works only with buffers >= 9 */
-	/* HMAC is always >= 9 because of ipad */
-	if ((ctx->digcnt + ctx->bufcnt) < 9)
-		err = omap_sham_final_shash(req);
-	else if (ctx->bufcnt)
-		return omap_sham_enqueue(req, OP_FINAL);
+	if (!(ctx->flags & FLAGS_ERROR)) {
+		/* OMAP HW accel works only with buffers >= 9 */
+		/* HMAC is always >= 9 because of ipad */
+		if ((ctx->digcnt + ctx->bufcnt) < 9)
+			err = omap_sham_final_shash(req);
+		else if (ctx->bufcnt)
+			return omap_sham_enqueue(req, OP_FINAL);
+	}
 
 	omap_sham_cleanup(req);
 
@@ -836,6 +890,8 @@
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
 	const char *alg_name = crypto_tfm_alg_name(tfm);
 
+	pr_info("enter\n");
+
 	/* Allocate a fallback and abort if it failed. */
 	tctx->fallback = crypto_alloc_shash(alg_name, 0,
 					    CRYPTO_ALG_NEED_FALLBACK);
@@ -846,7 +902,7 @@
 	}
 
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-				 sizeof(struct omap_sham_reqctx));
+				 sizeof(struct omap_sham_reqctx) + BUFLEN);
 
 	if (alg_base) {
 		struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -932,7 +988,7 @@
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -956,7 +1012,7 @@
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
 					sizeof(struct omap_sham_hmac_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_sha1_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -980,7 +1036,7 @@
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
 					sizeof(struct omap_sham_hmac_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_md5_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -993,7 +1049,7 @@
 	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
 	struct ahash_request *req = dd->req;
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
-	int ready = 1;
+	int ready = 0, err = 0;
 
 	if (ctx->flags & FLAGS_OUTPUT_READY) {
 		ctx->flags &= ~FLAGS_OUTPUT_READY;
@@ -1003,15 +1059,18 @@
 	if (dd->flags & FLAGS_DMA_ACTIVE) {
 		dd->flags &= ~FLAGS_DMA_ACTIVE;
 		omap_sham_update_dma_stop(dd);
-		omap_sham_update_dma_slow(dd);
+		if (!dd->err)
+			err = omap_sham_update_dma_start(dd);
 	}
 
-	if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
-		dev_dbg(dd->dev, "update done\n");
+	err = dd->err ? : err;
+
+	if (err != -EINPROGRESS && (ready || err)) {
+		dev_dbg(dd->dev, "update done: err: %d\n", err);
 		/* finish curent request */
-		omap_sham_finish_req(req, 0);
+		omap_sham_finish_req(req, err);
 		/* start new request */
-		omap_sham_handle_queue(dd);
+		omap_sham_handle_queue(dd, NULL);
 	}
 }
 
@@ -1019,7 +1078,7 @@
 {
 	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
 
-	omap_sham_handle_queue(dd);
+	omap_sham_handle_queue(dd, NULL);
 }
 
 static irqreturn_t omap_sham_irq(int irq, void *dev_id)
@@ -1041,6 +1100,7 @@
 	omap_sham_read(dd, SHA_REG_CTRL);
 
 	ctx->flags |= FLAGS_OUTPUT_READY;
+	dd->err = 0;
 	tasklet_schedule(&dd->done_task);
 
 	return IRQ_HANDLED;
@@ -1050,8 +1110,13 @@
 {
 	struct omap_sham_dev *dd = data;
 
-	if (likely(lch == dd->dma_lch))
-		tasklet_schedule(&dd->done_task);
+	if (ch_status != OMAP_DMA_BLOCK_IRQ) {
+		pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
+		dd->err = -EIO;
+		dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+	}
+
+	tasklet_schedule(&dd->done_task);
 }
 
 static int omap_sham_dma_init(struct omap_sham_dev *dd)
@@ -1066,15 +1131,6 @@
 		dev_err(dd->dev, "Unable to request DMA channel\n");
 		return err;
 	}
-	omap_set_dma_dest_params(dd->dma_lch, 0,
-			OMAP_DMA_AMODE_CONSTANT,
-			dd->phys_base + SHA_REG_DIN(0), 0, 16);
-
-	omap_set_dma_dest_burst_mode(dd->dma_lch,
-			OMAP_DMA_DATA_BURST_16);
-
-	omap_set_dma_src_burst_mode(dd->dma_lch,
-			OMAP_DMA_DATA_BURST_4);
 
 	return 0;
 }
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 8a515ba..db33d30 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -9,6 +9,7 @@
 
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
+#include <crypto/padlock.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
@@ -21,7 +22,6 @@
 #include <asm/byteorder.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
-#include "padlock.h"
 
 /*
  * Number of data blocks actually fetched for each xcrypt insn.
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d3a27e0..adf075b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -13,6 +13,7 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/padlock.h>
 #include <crypto/sha.h>
 #include <linux/err.h>
 #include <linux/module.h>
@@ -22,13 +23,6 @@
 #include <linux/kernel.h>
 #include <linux/scatterlist.h>
 #include <asm/i387.h>
-#include "padlock.h"
-
-#ifdef CONFIG_64BIT
-#define STACK_ALIGN 16
-#else
-#define STACK_ALIGN 4
-#endif
 
 struct padlock_sha_desc {
 	struct shash_desc fallback;
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index b98c676..c461eda 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -110,8 +110,6 @@
 
 	/* at this point only one domain in the list is expected */
 	domain = list_first_entry(&dca_domains, struct dca_domain, node);
-	if (!domain)
-		return;
 
 	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
 		list_del(&dca->node);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ee2359..1c28816 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -109,7 +109,7 @@
 
 config MPC512X_DMA
 	tristate "Freescale MPC512x built-in DMA engine support"
-	depends on PPC_MPC512x
+	depends on PPC_MPC512x || PPC_MPC831x
 	select DMA_ENGINE
 	---help---
 	  Enable support for the Freescale MPC512x built-in DMA engine.
@@ -200,11 +200,16 @@
 	  platform_data for a dma-pl330 device.
 
 config PCH_DMA
-	tristate "Topcliff (Intel EG20T) PCH DMA support"
+	tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
 	depends on PCI && X86
 	select DMA_ENGINE
 	help
-	  Enable support for the Topcliff (Intel EG20T) PCH DMA engine.
+	  Enable support for Intel EG20T PCH DMA engine.
+
+	  This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
+	  Output Hub) which is for IVI(In-Vehicle Infotainment) use.
+	  ML7213 is companion chip for Intel Atom E6xx series.
+	  ML7213 is completely compatible for Intel EG20T PCH.
 
 config IMX_SDMA
 	tristate "i.MX SDMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b605cc9..07bca49 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -19,14 +19,14 @@
  * this program; if not, write to the Free Software Foundation, Inc., 59
  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  *
- * The full GNU General Public License is iin this distribution in the
- * file called COPYING.
+ * The full GNU General Public License is in this distribution in the file
+ * called COPYING.
  *
  * Documentation: ARM DDI 0196G == PL080
- * Documentation: ARM DDI 0218E	== PL081
+ * Documentation: ARM DDI 0218E == PL081
  *
- * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
- * any channel.
+ * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
+ * channel.
  *
  * The PL080 has 8 channels available for simultaneous use, and the PL081
  * has only two channels. So on these DMA controllers the number of channels
@@ -53,7 +53,23 @@
  *
  * ASSUMES default (little) endianness for DMA transfers
  *
- * Only DMAC flow control is implemented
+ * The PL08x has two flow control settings:
+ *  - DMAC flow control: the transfer size defines the number of transfers
+ *    which occur for the current LLI entry, and the DMAC raises TC at the
+ *    end of every LLI entry.  Observed behaviour shows the DMAC listening
+ *    to both the BREQ and SREQ signals (contrary to documented),
+ *    transferring data if either is active.  The LBREQ and LSREQ signals
+ *    are ignored.
+ *
+ *  - Peripheral flow control: the transfer size is ignored (and should be
+ *    zero).  The data is transferred from the current LLI entry, until
+ *    after the final transfer signalled by LBREQ or LSREQ.  The DMAC
+ *    will then move to the next LLI entry.
+ *
+ * Only the former works sanely with scatter lists, so we only implement
+ * the DMAC flow control method.  However, peripherals which use the LBREQ
+ * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
+ * these hardware restrictions prevents them from using scatter DMA.
  *
  * Global TODO:
  * - Break out common code from arch/arm/mach-s3c64xx and share
@@ -61,50 +77,40 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <linux/dmapool.h>
-#include <linux/amba/bus.h>
 #include <linux/dmaengine.h>
+#include <linux/amba/bus.h>
 #include <linux/amba/pl08x.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 
 #include <asm/hardware/pl080.h>
-#include <asm/dma.h>
-#include <asm/mach/dma.h>
-#include <asm/atomic.h>
-#include <asm/processor.h>
-#include <asm/cacheflush.h>
 
 #define DRIVER_NAME	"pl08xdmac"
 
 /**
- * struct vendor_data - vendor-specific config parameters
- * for PL08x derivates
- * @name: the name of this specific variant
+ * struct vendor_data - vendor-specific config parameters for PL08x derivatives
  * @channels: the number of channels available in this variant
- * @dualmaster: whether this version supports dual AHB masters
- * or not.
+ * @dualmaster: whether this version supports dual AHB masters or not.
  */
 struct vendor_data {
-	char *name;
 	u8 channels;
 	bool dualmaster;
 };
 
 /*
  * PL08X private data structures
- * An LLI struct - see pl08x TRM
- * Note that next uses bit[0] as a bus bit,
- * start & end do not - their bus bit info
- * is in cctl
+ * An LLI struct - see PL08x TRM.  Note that next uses bit[0] as a bus bit,
+ * start & end do not - their bus bit info is in cctl.  Also note that these
+ * are fixed 32-bit quantities.
  */
-struct lli {
-	dma_addr_t src;
-	dma_addr_t dst;
-	dma_addr_t next;
+struct pl08x_lli {
+	u32 src;
+	u32 dst;
+	u32 lli;
 	u32 cctl;
 };
 
@@ -119,6 +125,8 @@
  * @phy_chans: array of data for the physical channels
  * @pool: a pool for the LLI descriptors
  * @pool_ctr: counter of LLIs in the pool
+ * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
+ * @mem_buses: set to indicate memory transfers on AHB2.
  * @lock: a spinlock for this struct
  */
 struct pl08x_driver_data {
@@ -126,11 +134,13 @@
 	struct dma_device memcpy;
 	void __iomem *base;
 	struct amba_device *adev;
-	struct vendor_data *vd;
+	const struct vendor_data *vd;
 	struct pl08x_platform_data *pd;
 	struct pl08x_phy_chan *phy_chans;
 	struct dma_pool *pool;
 	int pool_ctr;
+	u8 lli_buses;
+	u8 mem_buses;
 	spinlock_t lock;
 };
 
@@ -152,9 +162,9 @@
 /* Size (bytes) of each LLI buffer allocated for one transfer */
 # define PL08X_LLI_TSFR_SIZE	0x2000
 
-/* Maximimum times we call dma_pool_alloc on this pool without freeing */
+/* Maximum times we call dma_pool_alloc on this pool without freeing */
 #define PL08X_MAX_ALLOCS	0x40
-#define MAX_NUM_TSFR_LLIS	(PL08X_LLI_TSFR_SIZE/sizeof(struct lli))
+#define MAX_NUM_TSFR_LLIS	(PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
 #define PL08X_ALIGN		8
 
 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -162,6 +172,11 @@
 	return container_of(chan, struct pl08x_dma_chan, chan);
 }
 
+static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
+{
+	return container_of(tx, struct pl08x_txd, tx);
+}
+
 /*
  * Physical channel handling
  */
@@ -177,103 +192,63 @@
 
 /*
  * Set the initial DMA register values i.e. those for the first LLI
- * The next lli pointer and the configuration interrupt bit have
- * been set when the LLIs were constructed
+ * The next LLI pointer and the configuration interrupt bit have
+ * been set when the LLIs were constructed.  Poke them into the hardware
+ * and start the transfer.
  */
-static void pl08x_set_cregs(struct pl08x_driver_data *pl08x,
-			    struct pl08x_phy_chan *ch)
+static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
+	struct pl08x_txd *txd)
 {
-	/* Wait for channel inactive */
-	while (pl08x_phy_channel_busy(ch))
-		;
-
-	dev_vdbg(&pl08x->adev->dev,
-		"WRITE channel %d: csrc=%08x, cdst=%08x, "
-		 "cctl=%08x, clli=%08x, ccfg=%08x\n",
-		ch->id,
-		ch->csrc,
-		ch->cdst,
-		ch->cctl,
-		ch->clli,
-		ch->ccfg);
-
-	writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
-	writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
-	writel(ch->clli, ch->base + PL080_CH_LLI);
-	writel(ch->cctl, ch->base + PL080_CH_CONTROL);
-	writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
-}
-
-static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
-{
-	struct pl08x_channel_data *cd = plchan->cd;
+	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_phy_chan *phychan = plchan->phychan;
-	struct pl08x_txd *txd = plchan->at;
-
-	/* Copy the basic control register calculated at transfer config */
-	phychan->csrc = txd->csrc;
-	phychan->cdst = txd->cdst;
-	phychan->clli = txd->clli;
-	phychan->cctl = txd->cctl;
-
-	/* Assign the signal to the proper control registers */
-	phychan->ccfg = cd->ccfg;
-	phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
-	phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
-	/* If it wasn't set from AMBA, ignore it */
-	if (txd->direction == DMA_TO_DEVICE)
-		/* Select signal as destination */
-		phychan->ccfg |=
-			(phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
-	else if (txd->direction == DMA_FROM_DEVICE)
-		/* Select signal as source */
-		phychan->ccfg |=
-			(phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
-	/* Always enable error interrupts */
-	phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
-	/* Always enable terminal interrupts */
-	phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
-}
-
-/*
- * Enable the DMA channel
- * Assumes all other configuration bits have been set
- * as desired before this code is called
- */
-static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
-				  struct pl08x_phy_chan *ch)
-{
+	struct pl08x_lli *lli = &txd->llis_va[0];
 	u32 val;
 
-	/*
-	 * Do not access config register until channel shows as disabled
-	 */
-	while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
-		;
+	plchan->at = txd;
 
-	/*
-	 * Do not access config register until channel shows as inactive
-	 */
-	val = readl(ch->base + PL080_CH_CONFIG);
+	/* Wait for channel inactive */
+	while (pl08x_phy_channel_busy(phychan))
+		cpu_relax();
+
+	dev_vdbg(&pl08x->adev->dev,
+		"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+		"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+		phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
+		txd->ccfg);
+
+	writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
+	writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
+	writel(lli->lli, phychan->base + PL080_CH_LLI);
+	writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
+	writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+
+	/* Enable the DMA channel */
+	/* Do not access config register until channel shows as disabled */
+	while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
+		cpu_relax();
+
+	/* Do not access config register until channel shows as inactive */
+	val = readl(phychan->base + PL080_CH_CONFIG);
 	while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
-		val = readl(ch->base + PL080_CH_CONFIG);
+		val = readl(phychan->base + PL080_CH_CONFIG);
 
-	writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG);
+	writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
 }
 
 /*
- * Overall DMAC remains enabled always.
+ * Pause the channel by setting the HALT bit.
  *
- * Disabling individual channels could lose data.
+ * For M->P transfers, pause the DMAC first and then stop the peripheral -
+ * the FIFO can only drain if the peripheral is still requesting data.
+ * (note: this can still timeout if the DMAC FIFO never drains of data.)
  *
- * Disable the peripheral DMA after disabling the DMAC
- * in order to allow the DMAC FIFO to drain, and
- * hence allow the channel to show inactive
- *
+ * For P->M transfers, disable the peripheral first to stop it filling
+ * the DMAC FIFO, and then pause the DMAC.
  */
 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
 {
 	u32 val;
+	int timeout;
 
 	/* Set the HALT bit and wait for the FIFO to drain */
 	val = readl(ch->base + PL080_CH_CONFIG);
@@ -281,8 +256,13 @@
 	writel(val, ch->base + PL080_CH_CONFIG);
 
 	/* Wait for channel inactive */
-	while (pl08x_phy_channel_busy(ch))
-		;
+	for (timeout = 1000; timeout; timeout--) {
+		if (!pl08x_phy_channel_busy(ch))
+			break;
+		udelay(1);
+	}
+	if (pl08x_phy_channel_busy(ch))
+		pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
 }
 
 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -296,19 +276,24 @@
 }
 
 
-/* Stops the channel */
-static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
+/*
+ * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
+ * clears any pending interrupt status.  This should not be used for
+ * an on-going transfer, but as a method of shutting down a channel
+ * (eg, when it's no longer used) or terminating a transfer.
+ */
+static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
+	struct pl08x_phy_chan *ch)
 {
-	u32 val;
+	u32 val = readl(ch->base + PL080_CH_CONFIG);
 
-	pl08x_pause_phy_chan(ch);
+	val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
+	         PL080_CONFIG_TC_IRQ_MASK);
 
-	/* Disable channel */
-	val = readl(ch->base + PL080_CH_CONFIG);
-	val &= ~PL080_CONFIG_ENABLE;
-	val &= ~PL080_CONFIG_ERR_IRQ_MASK;
-	val &= ~PL080_CONFIG_TC_IRQ_MASK;
 	writel(val, ch->base + PL080_CH_CONFIG);
+
+	writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
+	writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
 }
 
 static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -333,54 +318,56 @@
 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_phy_chan *ch;
-	struct pl08x_txd *txdi = NULL;
 	struct pl08x_txd *txd;
 	unsigned long flags;
-	u32 bytes = 0;
+	size_t bytes = 0;
 
 	spin_lock_irqsave(&plchan->lock, flags);
-
 	ch = plchan->phychan;
 	txd = plchan->at;
 
 	/*
-	 * Next follow the LLIs to get the number of pending bytes in the
-	 * currently active transaction.
+	 * Follow the LLIs to get the number of remaining
+	 * bytes in the currently active transaction.
 	 */
 	if (ch && txd) {
-		struct lli *llis_va = txd->llis_va;
-		struct lli *llis_bus = (struct lli *) txd->llis_bus;
-		u32 clli = readl(ch->base + PL080_CH_LLI);
+		u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
 
-		/* First get the bytes in the current active LLI */
+		/* First get the remaining bytes in the active transfer */
 		bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
 
 		if (clli) {
-			int i = 0;
+			struct pl08x_lli *llis_va = txd->llis_va;
+			dma_addr_t llis_bus = txd->llis_bus;
+			int index;
 
-			/* Forward to the LLI pointed to by clli */
-			while ((clli != (u32) &(llis_bus[i])) &&
-			       (i < MAX_NUM_TSFR_LLIS))
-				i++;
+			BUG_ON(clli < llis_bus || clli >= llis_bus +
+				sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
 
-			while (clli) {
-				bytes += get_bytes_in_cctl(llis_va[i].cctl);
+			/*
+			 * Locate the next LLI - as this is an array,
+			 * it's simple maths to find.
+			 */
+			index = (clli - llis_bus) / sizeof(struct pl08x_lli);
+
+			for (; index < MAX_NUM_TSFR_LLIS; index++) {
+				bytes += get_bytes_in_cctl(llis_va[index].cctl);
+
 				/*
-				 * A clli of 0x00000000 will terminate the
-				 * LLI list
+				 * A LLI pointer of 0 terminates the LLI list
 				 */
-				clli = llis_va[i].next;
-				i++;
+				if (!llis_va[index].lli)
+					break;
 			}
 		}
 	}
 
 	/* Sum up all queued transactions */
-	if (!list_empty(&plchan->desc_list)) {
-		list_for_each_entry(txdi, &plchan->desc_list, node) {
+	if (!list_empty(&plchan->pend_list)) {
+		struct pl08x_txd *txdi;
+		list_for_each_entry(txdi, &plchan->pend_list, node) {
 			bytes += txdi->len;
 		}
-
 	}
 
 	spin_unlock_irqrestore(&plchan->lock, flags);
@@ -390,6 +377,10 @@
 
 /*
  * Allocate a physical channel for a virtual channel
+ *
+ * Try to locate a physical channel to be used for this transfer. If all
+ * are taken return NULL and the requester will have to cope by using
+ * some fallback PIO mode or retrying later.
  */
 static struct pl08x_phy_chan *
 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
@@ -399,12 +390,6 @@
 	unsigned long flags;
 	int i;
 
-	/*
-	 * Try to locate a physical channel to be used for
-	 * this transfer. If all are taken return NULL and
-	 * the requester will have to cope by using some fallback
-	 * PIO mode or retrying later.
-	 */
 	for (i = 0; i < pl08x->vd->channels; i++) {
 		ch = &pl08x->phy_chans[i];
 
@@ -433,13 +418,12 @@
 {
 	unsigned long flags;
 
+	spin_lock_irqsave(&ch->lock, flags);
+
 	/* Stop the channel and clear its interrupts */
-	pl08x_stop_phy_chan(ch);
-	writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
-	writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
+	pl08x_terminate_phy_chan(pl08x, ch);
 
 	/* Mark it as free */
-	spin_lock_irqsave(&ch->lock, flags);
 	ch->serving = NULL;
 	spin_unlock_irqrestore(&ch->lock, flags);
 }
@@ -465,11 +449,11 @@
 }
 
 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
-				  u32 tsize)
+				  size_t tsize)
 {
 	u32 retbits = cctl;
 
-	/* Remove all src, dst and transfersize bits */
+	/* Remove all src, dst and transfer size bits */
 	retbits &= ~PL080_CONTROL_DWIDTH_MASK;
 	retbits &= ~PL080_CONTROL_SWIDTH_MASK;
 	retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
@@ -509,95 +493,87 @@
 	return retbits;
 }
 
+struct pl08x_lli_build_data {
+	struct pl08x_txd *txd;
+	struct pl08x_driver_data *pl08x;
+	struct pl08x_bus_data srcbus;
+	struct pl08x_bus_data dstbus;
+	size_t remainder;
+};
+
 /*
- * Autoselect a master bus to use for the transfer
- * this prefers the destination bus if both available
- * if fixed address on one bus the other will be chosen
+ * Autoselect a master bus to use for the transfer this prefers the
+ * destination bus if both available if fixed address on one bus the
+ * other will be chosen
  */
-void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
-	struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
-	struct pl08x_bus_data **sbus, u32 cctl)
+static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
+	struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
 {
 	if (!(cctl & PL080_CONTROL_DST_INCR)) {
-		*mbus = src_bus;
-		*sbus = dst_bus;
+		*mbus = &bd->srcbus;
+		*sbus = &bd->dstbus;
 	} else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
-		*mbus = dst_bus;
-		*sbus = src_bus;
+		*mbus = &bd->dstbus;
+		*sbus = &bd->srcbus;
 	} else {
-		if (dst_bus->buswidth == 4) {
-			*mbus = dst_bus;
-			*sbus = src_bus;
-		} else if (src_bus->buswidth == 4) {
-			*mbus = src_bus;
-			*sbus = dst_bus;
-		} else if (dst_bus->buswidth == 2) {
-			*mbus = dst_bus;
-			*sbus = src_bus;
-		} else if (src_bus->buswidth == 2) {
-			*mbus = src_bus;
-			*sbus = dst_bus;
+		if (bd->dstbus.buswidth == 4) {
+			*mbus = &bd->dstbus;
+			*sbus = &bd->srcbus;
+		} else if (bd->srcbus.buswidth == 4) {
+			*mbus = &bd->srcbus;
+			*sbus = &bd->dstbus;
+		} else if (bd->dstbus.buswidth == 2) {
+			*mbus = &bd->dstbus;
+			*sbus = &bd->srcbus;
+		} else if (bd->srcbus.buswidth == 2) {
+			*mbus = &bd->srcbus;
+			*sbus = &bd->dstbus;
 		} else {
-			/* src_bus->buswidth == 1 */
-			*mbus = dst_bus;
-			*sbus = src_bus;
+			/* bd->srcbus.buswidth == 1 */
+			*mbus = &bd->dstbus;
+			*sbus = &bd->srcbus;
 		}
 	}
 }
 
 /*
- * Fills in one LLI for a certain transfer descriptor
- * and advance the counter
+ * Fills in one LLI for a certain transfer descriptor and advance the counter
  */
-int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
-			    struct pl08x_txd *txd, int num_llis, int len,
-			    u32 cctl, u32 *remainder)
+static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
+	int num_llis, int len, u32 cctl)
 {
-	struct lli *llis_va = txd->llis_va;
-	struct lli *llis_bus = (struct lli *) txd->llis_bus;
+	struct pl08x_lli *llis_va = bd->txd->llis_va;
+	dma_addr_t llis_bus = bd->txd->llis_bus;
 
 	BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
 
-	llis_va[num_llis].cctl		= cctl;
-	llis_va[num_llis].src		= txd->srcbus.addr;
-	llis_va[num_llis].dst		= txd->dstbus.addr;
-
-	/*
-	 * On versions with dual masters, you can optionally AND on
-	 * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
-	 * in new LLIs with that controller, but we always try to
-	 * choose AHB1 to point into memory. The idea is to have AHB2
-	 * fixed on the peripheral and AHB1 messing around in the
-	 * memory. So we don't manipulate this bit currently.
-	 */
-
-	llis_va[num_llis].next =
-		(dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
+	llis_va[num_llis].cctl = cctl;
+	llis_va[num_llis].src = bd->srcbus.addr;
+	llis_va[num_llis].dst = bd->dstbus.addr;
+	llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
+	if (bd->pl08x->lli_buses & PL08X_AHB2)
+		llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
 
 	if (cctl & PL080_CONTROL_SRC_INCR)
-		txd->srcbus.addr += len;
+		bd->srcbus.addr += len;
 	if (cctl & PL080_CONTROL_DST_INCR)
-		txd->dstbus.addr += len;
+		bd->dstbus.addr += len;
 
-	*remainder -= len;
+	BUG_ON(bd->remainder < len);
 
-	return num_llis + 1;
+	bd->remainder -= len;
 }
 
 /*
- * Return number of bytes to fill to boundary, or len
+ * Return number of bytes to fill to boundary, or len.
+ * This calculation works for any value of addr.
  */
-static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
+static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
 {
-	u32 boundary;
+	size_t boundary_len = PL08X_BOUNDARY_SIZE -
+			(addr & (PL08X_BOUNDARY_SIZE - 1));
 
-	boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
-		<< PL08X_BOUNDARY_SHIFT;
-
-	if (boundary < addr + len)
-		return boundary - addr;
-	else
-		return len;
+	return min(boundary_len, len);
 }
 
 /*
@@ -608,20 +584,13 @@
 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
 			      struct pl08x_txd *txd)
 {
-	struct pl08x_channel_data *cd = txd->cd;
 	struct pl08x_bus_data *mbus, *sbus;
-	u32 remainder;
+	struct pl08x_lli_build_data bd;
 	int num_llis = 0;
 	u32 cctl;
-	int max_bytes_per_lli;
-	int total_bytes = 0;
-	struct lli *llis_va;
-	struct lli *llis_bus;
-
-	if (!txd) {
-		dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
-		return 0;
-	}
+	size_t max_bytes_per_lli;
+	size_t total_bytes = 0;
+	struct pl08x_lli *llis_va;
 
 	txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
 				      &txd->llis_bus);
@@ -632,121 +601,79 @@
 
 	pl08x->pool_ctr++;
 
-	/*
-	 * Initialize bus values for this transfer
-	 * from the passed optimal values
-	 */
-	if (!cd) {
-		dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
-		return 0;
-	}
+	/* Get the default CCTL */
+	cctl = txd->cctl;
 
-	/* Get the default CCTL from the platform data */
-	cctl = cd->cctl;
-
-	/*
-	 * On the PL080 we have two bus masters and we
-	 * should select one for source and one for
-	 * destination. We try to use AHB2 for the
-	 * bus which does not increment (typically the
-	 * peripheral) else we just choose something.
-	 */
-	cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
-	if (pl08x->vd->dualmaster) {
-		if (cctl & PL080_CONTROL_SRC_INCR)
-			/* Source increments, use AHB2 for destination */
-			cctl |= PL080_CONTROL_DST_AHB2;
-		else if (cctl & PL080_CONTROL_DST_INCR)
-			/* Destination increments, use AHB2 for source */
-			cctl |= PL080_CONTROL_SRC_AHB2;
-		else
-			/* Just pick something, source AHB1 dest AHB2 */
-			cctl |= PL080_CONTROL_DST_AHB2;
-	}
+	bd.txd = txd;
+	bd.pl08x = pl08x;
+	bd.srcbus.addr = txd->src_addr;
+	bd.dstbus.addr = txd->dst_addr;
 
 	/* Find maximum width of the source bus */
-	txd->srcbus.maxwidth =
+	bd.srcbus.maxwidth =
 		pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
 				       PL080_CONTROL_SWIDTH_SHIFT);
 
 	/* Find maximum width of the destination bus */
-	txd->dstbus.maxwidth =
+	bd.dstbus.maxwidth =
 		pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
 				       PL080_CONTROL_DWIDTH_SHIFT);
 
 	/* Set up the bus widths to the maximum */
-	txd->srcbus.buswidth = txd->srcbus.maxwidth;
-	txd->dstbus.buswidth = txd->dstbus.maxwidth;
+	bd.srcbus.buswidth = bd.srcbus.maxwidth;
+	bd.dstbus.buswidth = bd.dstbus.maxwidth;
 	dev_vdbg(&pl08x->adev->dev,
 		 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
-		 __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
+		 __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
 
 
 	/*
 	 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
 	 */
-	max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
+	max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
 		PL080_CONTROL_TRANSFER_SIZE_MASK;
 	dev_vdbg(&pl08x->adev->dev,
-		 "%s max bytes per lli = %d\n",
+		 "%s max bytes per lli = %zu\n",
 		 __func__, max_bytes_per_lli);
 
 	/* We need to count this down to zero */
-	remainder = txd->len;
+	bd.remainder = txd->len;
 	dev_vdbg(&pl08x->adev->dev,
-		 "%s remainder = %d\n",
-		 __func__, remainder);
+		 "%s remainder = %zu\n",
+		 __func__, bd.remainder);
 
 	/*
 	 * Choose bus to align to
 	 * - prefers destination bus if both available
 	 * - if fixed address on one bus chooses other
-	 * - modifies cctl to choose an apropriate master
 	 */
-	pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
-				&mbus, &sbus, cctl);
-
-
-	/*
-	 * The lowest bit of the LLI register
-	 * is also used to indicate which master to
-	 * use for reading the LLIs.
-	 */
+	pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
 
 	if (txd->len < mbus->buswidth) {
-		/*
-		 * Less than a bus width available
-		 * - send as single bytes
-		 */
-		while (remainder) {
+		/* Less than a bus width available - send as single bytes */
+		while (bd.remainder) {
 			dev_vdbg(&pl08x->adev->dev,
 				 "%s single byte LLIs for a transfer of "
-				 "less than a bus width (remain %08x)\n",
-				 __func__, remainder);
+				 "less than a bus width (remain 0x%08x)\n",
+				 __func__, bd.remainder);
 			cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-			num_llis =
-				pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
-					cctl, &remainder);
+			pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
 			total_bytes++;
 		}
 	} else {
-		/*
-		 *  Make one byte LLIs until master bus is aligned
-		 *  - slave will then be aligned also
-		 */
+		/* Make one byte LLIs until master bus is aligned */
 		while ((mbus->addr) % (mbus->buswidth)) {
 			dev_vdbg(&pl08x->adev->dev,
 				"%s adjustment lli for less than bus width "
-				 "(remain %08x)\n",
-				 __func__, remainder);
+				 "(remain 0x%08x)\n",
+				 __func__, bd.remainder);
 			cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
-			num_llis = pl08x_fill_lli_for_desc
-				(pl08x, txd, num_llis, 1, cctl, &remainder);
+			pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
 			total_bytes++;
 		}
 
 		/*
-		 *  Master now aligned
+		 * Master now aligned
 		 * - if slave is not then we must set its width down
 		 */
 		if (sbus->addr % sbus->buswidth) {
@@ -761,63 +688,51 @@
 		 * Make largest possible LLIs until less than one bus
 		 * width left
 		 */
-		while (remainder > (mbus->buswidth - 1)) {
-			int lli_len, target_len;
-			int tsize;
-			int odd_bytes;
+		while (bd.remainder > (mbus->buswidth - 1)) {
+			size_t lli_len, target_len, tsize, odd_bytes;
 
 			/*
 			 * If enough left try to send max possible,
 			 * otherwise try to send the remainder
 			 */
-			target_len = remainder;
-			if (remainder > max_bytes_per_lli)
-				target_len = max_bytes_per_lli;
+			target_len = min(bd.remainder, max_bytes_per_lli);
 
 			/*
-			 * Set bus lengths for incrementing busses
-			 * to number of bytes which fill to next memory
-			 * boundary
+			 * Set bus lengths for incrementing buses to the
+			 * number of bytes which fill to next memory boundary,
+			 * limiting on the target length calculated above.
 			 */
 			if (cctl & PL080_CONTROL_SRC_INCR)
-				txd->srcbus.fill_bytes =
-					pl08x_pre_boundary(
-						txd->srcbus.addr,
-						remainder);
+				bd.srcbus.fill_bytes =
+					pl08x_pre_boundary(bd.srcbus.addr,
+						target_len);
 			else
-				txd->srcbus.fill_bytes =
-					max_bytes_per_lli;
+				bd.srcbus.fill_bytes = target_len;
 
 			if (cctl & PL080_CONTROL_DST_INCR)
-				txd->dstbus.fill_bytes =
-					pl08x_pre_boundary(
-						txd->dstbus.addr,
-						remainder);
+				bd.dstbus.fill_bytes =
+					pl08x_pre_boundary(bd.dstbus.addr,
+						target_len);
 			else
-				txd->dstbus.fill_bytes =
-						max_bytes_per_lli;
+				bd.dstbus.fill_bytes = target_len;
 
-			/*
-			 *  Find the nearest
-			 */
-			lli_len	= min(txd->srcbus.fill_bytes,
-				txd->dstbus.fill_bytes);
+			/* Find the nearest */
+			lli_len	= min(bd.srcbus.fill_bytes,
+				      bd.dstbus.fill_bytes);
 
-			BUG_ON(lli_len > remainder);
+			BUG_ON(lli_len > bd.remainder);
 
 			if (lli_len <= 0) {
 				dev_err(&pl08x->adev->dev,
-					"%s lli_len is %d, <= 0\n",
+					"%s lli_len is %zu, <= 0\n",
 						__func__, lli_len);
 				return 0;
 			}
 
 			if (lli_len == target_len) {
 				/*
-				 * Can send what we wanted
-				 */
-				/*
-				 *  Maintain alignment
+				 * Can send what we wanted.
+				 * Maintain alignment
 				 */
 				lli_len	= (lli_len/mbus->buswidth) *
 							mbus->buswidth;
@@ -825,17 +740,14 @@
 			} else {
 				/*
 				 * So now we know how many bytes to transfer
-				 * to get to the nearest boundary
-				 * The next lli will past the boundary
-				 * - however we may be working to a boundary
-				 *   on the slave bus
-				 *   We need to ensure the master stays aligned
+				 * to get to the nearest boundary.  The next
+				 * LLI will past the boundary.  However, we
+				 * may be working to a boundary on the slave
+				 * bus.  We need to ensure the master stays
+				 * aligned, and that we are working in
+				 * multiples of the bus widths.
 				 */
 				odd_bytes = lli_len % mbus->buswidth;
-				/*
-				 * - and that we are working in multiples
-				 *   of the bus widths
-				 */
 				lli_len -= odd_bytes;
 
 			}
@@ -855,41 +767,38 @@
 
 				if (target_len != lli_len) {
 					dev_vdbg(&pl08x->adev->dev,
-					"%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n",
+					"%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
 					__func__, target_len, lli_len, txd->len);
 				}
 
 				cctl = pl08x_cctl_bits(cctl,
-						       txd->srcbus.buswidth,
-						       txd->dstbus.buswidth,
+						       bd.srcbus.buswidth,
+						       bd.dstbus.buswidth,
 						       tsize);
 
 				dev_vdbg(&pl08x->adev->dev,
-					"%s fill lli with single lli chunk of size %08x (remainder %08x)\n",
-					__func__, lli_len, remainder);
-				num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
-						num_llis, lli_len, cctl,
-						&remainder);
+					"%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
+					__func__, lli_len, bd.remainder);
+				pl08x_fill_lli_for_desc(&bd, num_llis++,
+					lli_len, cctl);
 				total_bytes += lli_len;
 			}
 
 
 			if (odd_bytes) {
 				/*
-				 * Creep past the boundary,
-				 * maintaining master alignment
+				 * Creep past the boundary, maintaining
+				 * master alignment
 				 */
 				int j;
 				for (j = 0; (j < mbus->buswidth)
-						&& (remainder); j++) {
+						&& (bd.remainder); j++) {
 					cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
 					dev_vdbg(&pl08x->adev->dev,
-						"%s align with boundardy, single byte (remain %08x)\n",
-						__func__, remainder);
-					num_llis =
-						pl08x_fill_lli_for_desc(pl08x,
-							txd, num_llis, 1,
-							cctl, &remainder);
+						"%s align with boundary, single byte (remain 0x%08zx)\n",
+						__func__, bd.remainder);
+					pl08x_fill_lli_for_desc(&bd,
+						num_llis++, 1, cctl);
 					total_bytes++;
 				}
 			}
@@ -898,25 +807,18 @@
 		/*
 		 * Send any odd bytes
 		 */
-		if (remainder < 0) {
-			dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
-					__func__, remainder);
-			return 0;
-		}
-
-		while (remainder) {
+		while (bd.remainder) {
 			cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
 			dev_vdbg(&pl08x->adev->dev,
-				"%s align with boundardy, single odd byte (remain %d)\n",
-				__func__, remainder);
-			num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
-					1, cctl, &remainder);
+				"%s align with boundary, single odd byte (remain %zu)\n",
+				__func__, bd.remainder);
+			pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
 			total_bytes++;
 		}
 	}
 	if (total_bytes != txd->len) {
 		dev_err(&pl08x->adev->dev,
-			"%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n",
+			"%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
 			__func__, total_bytes, txd->len);
 		return 0;
 	}
@@ -927,41 +829,12 @@
 			__func__, (u32) MAX_NUM_TSFR_LLIS);
 		return 0;
 	}
-	/*
-	 * Decide whether this is a loop or a terminated transfer
-	 */
+
 	llis_va = txd->llis_va;
-	llis_bus = (struct lli *) txd->llis_bus;
-
-	if (cd->circular_buffer) {
-		/*
-		 * Loop the circular buffer so that the next element
-		 * points back to the beginning of the LLI.
-		 */
-		llis_va[num_llis - 1].next =
-			(dma_addr_t)((unsigned int)&(llis_bus[0]));
-	} else {
-		/*
-		 * On non-circular buffers, the final LLI terminates
-		 * the LLI.
-		 */
-		llis_va[num_llis - 1].next = 0;
-		/*
-		 * The final LLI element shall also fire an interrupt
-		 */
-		llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
-	}
-
-	/* Now store the channel register values */
-	txd->csrc = llis_va[0].src;
-	txd->cdst = llis_va[0].dst;
-	if (num_llis > 1)
-		txd->clli = llis_va[0].next;
-	else
-		txd->clli = 0;
-
-	txd->cctl = llis_va[0].cctl;
-	/* ccfg will be set at physical channel allocation time */
+	/* The final LLI terminates the LLI. */
+	llis_va[num_llis - 1].lli = 0;
+	/* The final LLI element shall also fire an interrupt. */
+	llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
 
 #ifdef VERBOSE_DEBUG
 	{
@@ -969,13 +842,13 @@
 
 		for (i = 0; i < num_llis; i++) {
 			dev_vdbg(&pl08x->adev->dev,
-				 "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n",
+				 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
 				 i,
 				 &llis_va[i],
 				 llis_va[i].src,
 				 llis_va[i].dst,
 				 llis_va[i].cctl,
-				 llis_va[i].next
+				 llis_va[i].lli
 				);
 		}
 	}
@@ -988,14 +861,8 @@
 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
 			   struct pl08x_txd *txd)
 {
-	if (!txd)
-		dev_err(&pl08x->adev->dev,
-			"%s no descriptor to free\n",
-			__func__);
-
 	/* Free the LLI */
-	dma_pool_free(pl08x->pool, txd->llis_va,
-		      txd->llis_bus);
+	dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
 
 	pl08x->pool_ctr--;
 
@@ -1008,13 +875,12 @@
 	struct pl08x_txd *txdi = NULL;
 	struct pl08x_txd *next;
 
-	if (!list_empty(&plchan->desc_list)) {
+	if (!list_empty(&plchan->pend_list)) {
 		list_for_each_entry_safe(txdi,
-					 next, &plchan->desc_list, node) {
+					 next, &plchan->pend_list, node) {
 			list_del(&txdi->node);
 			pl08x_free_txd(pl08x, txdi);
 		}
-
 	}
 }
 
@@ -1069,6 +935,12 @@
 			return -EBUSY;
 		}
 		ch->signal = ret;
+
+		/* Assign the flow control signal to this channel */
+		if (txd->direction == DMA_TO_DEVICE)
+			txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+		else if (txd->direction == DMA_FROM_DEVICE)
+			txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
 	}
 
 	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
@@ -1076,19 +948,54 @@
 		 ch->signal,
 		 plchan->name);
 
+	plchan->phychan_hold++;
 	plchan->phychan = ch;
 
 	return 0;
 }
 
+static void release_phy_channel(struct pl08x_dma_chan *plchan)
+{
+	struct pl08x_driver_data *pl08x = plchan->host;
+
+	if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
+		pl08x->pd->put_signal(plchan);
+		plchan->phychan->signal = -1;
+	}
+	pl08x_put_phy_channel(pl08x, plchan->phychan);
+	plchan->phychan = NULL;
+}
+
 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
+	struct pl08x_txd *txd = to_pl08x_txd(tx);
+	unsigned long flags;
 
-	atomic_inc(&plchan->last_issued);
-	tx->cookie = atomic_read(&plchan->last_issued);
-	/* This unlock follows the lock in the prep() function */
-	spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+	spin_lock_irqsave(&plchan->lock, flags);
+
+	plchan->chan.cookie += 1;
+	if (plchan->chan.cookie < 0)
+		plchan->chan.cookie = 1;
+	tx->cookie = plchan->chan.cookie;
+
+	/* Put this onto the pending list */
+	list_add_tail(&txd->node, &plchan->pend_list);
+
+	/*
+	 * If there was no physical channel available for this memcpy,
+	 * stack the request up and indicate that the channel is waiting
+	 * for a free physical channel.
+	 */
+	if (!plchan->slave && !plchan->phychan) {
+		/* Do this memcpy whenever there is a channel ready */
+		plchan->state = PL08X_CHAN_WAITING;
+		plchan->waiting = txd;
+	} else {
+		plchan->phychan_hold--;
+	}
+
+	spin_unlock_irqrestore(&plchan->lock, flags);
 
 	return tx->cookie;
 }
@@ -1102,10 +1009,9 @@
 }
 
 /*
- * Code accessing dma_async_is_complete() in a tight loop
- * may give problems - could schedule where indicated.
- * If slaves are relying on interrupts to signal completion this
- * function must not be called with interrupts disabled
+ * Code accessing dma_async_is_complete() in a tight loop may give problems.
+ * If slaves are relying on interrupts to signal completion this function
+ * must not be called with interrupts disabled.
  */
 static enum dma_status
 pl08x_dma_tx_status(struct dma_chan *chan,
@@ -1118,7 +1024,7 @@
 	enum dma_status ret;
 	u32 bytesleft = 0;
 
-	last_used = atomic_read(&plchan->last_issued);
+	last_used = plchan->chan.cookie;
 	last_complete = plchan->lc;
 
 	ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1128,13 +1034,9 @@
 	}
 
 	/*
-	 * schedule(); could be inserted here
-	 */
-
-	/*
 	 * This cookie not complete yet
 	 */
-	last_used = atomic_read(&plchan->last_issued);
+	last_used = plchan->chan.cookie;
 	last_complete = plchan->lc;
 
 	/* Get number of bytes left in the active transactions and queue */
@@ -1199,37 +1101,35 @@
 	},
 };
 
-static void dma_set_runtime_config(struct dma_chan *chan,
-			       struct dma_slave_config *config)
+static int dma_set_runtime_config(struct dma_chan *chan,
+				  struct dma_slave_config *config)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_channel_data *cd = plchan->cd;
 	enum dma_slave_buswidth addr_width;
+	dma_addr_t addr;
 	u32 maxburst;
 	u32 cctl = 0;
-	/* Mask out all except src and dst channel */
-	u32 ccfg = cd->ccfg & 0x000003DEU;
-	int i = 0;
+	int i;
+
+	if (!plchan->slave)
+		return -EINVAL;
 
 	/* Transfer direction */
 	plchan->runtime_direction = config->direction;
 	if (config->direction == DMA_TO_DEVICE) {
-		plchan->runtime_addr = config->dst_addr;
-		cctl |= PL080_CONTROL_SRC_INCR;
-		ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+		addr = config->dst_addr;
 		addr_width = config->dst_addr_width;
 		maxburst = config->dst_maxburst;
 	} else if (config->direction == DMA_FROM_DEVICE) {
-		plchan->runtime_addr = config->src_addr;
-		cctl |= PL080_CONTROL_DST_INCR;
-		ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+		addr = config->src_addr;
 		addr_width = config->src_addr_width;
 		maxburst = config->src_maxburst;
 	} else {
 		dev_err(&pl08x->adev->dev,
 			"bad runtime_config: alien transfer direction\n");
-		return;
+		return -EINVAL;
 	}
 
 	switch (addr_width) {
@@ -1248,42 +1148,40 @@
 	default:
 		dev_err(&pl08x->adev->dev,
 			"bad runtime_config: alien address width\n");
-		return;
+		return -EINVAL;
 	}
 
 	/*
 	 * Now decide on a maxburst:
-	 * If this channel will only request single transfers, set
-	 * this down to ONE element.
+	 * If this channel will only request single transfers, set this
+	 * down to ONE element.  Also select one element if no maxburst
+	 * is specified.
 	 */
-	if (plchan->cd->single) {
+	if (plchan->cd->single || maxburst == 0) {
 		cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
 			(PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
 	} else {
-		while (i < ARRAY_SIZE(burst_sizes)) {
+		for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
 			if (burst_sizes[i].burstwords <= maxburst)
 				break;
-			i++;
-		}
 		cctl |= burst_sizes[i].reg;
 	}
 
-	/* Access the cell in privileged mode, non-bufferable, non-cacheable */
-	cctl &= ~PL080_CONTROL_PROT_MASK;
-	cctl |= PL080_CONTROL_PROT_SYS;
+	plchan->runtime_addr = addr;
 
 	/* Modify the default channel data to fit PrimeCell request */
 	cd->cctl = cctl;
-	cd->ccfg = ccfg;
 
 	dev_dbg(&pl08x->adev->dev,
 		"configured channel %s (%s) for %s, data width %d, "
-		"maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n",
+		"maxburst %d words, LE, CCTL=0x%08x\n",
 		dma_chan_name(chan), plchan->name,
 		(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
 		addr_width,
 		maxburst,
-		cctl, ccfg);
+		cctl);
+
+	return 0;
 }
 
 /*
@@ -1293,35 +1191,26 @@
 static void pl08x_issue_pending(struct dma_chan *chan)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
-	struct pl08x_driver_data *pl08x = plchan->host;
 	unsigned long flags;
 
 	spin_lock_irqsave(&plchan->lock, flags);
-	/* Something is already active */
-	if (plchan->at) {
-			spin_unlock_irqrestore(&plchan->lock, flags);
-			return;
+	/* Something is already active, or we're waiting for a channel... */
+	if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
+		spin_unlock_irqrestore(&plchan->lock, flags);
+		return;
 	}
 
-	/* Didn't get a physical channel so waiting for it ... */
-	if (plchan->state == PL08X_CHAN_WAITING)
-		return;
-
 	/* Take the first element in the queue and execute it */
-	if (!list_empty(&plchan->desc_list)) {
+	if (!list_empty(&plchan->pend_list)) {
 		struct pl08x_txd *next;
 
-		next = list_first_entry(&plchan->desc_list,
+		next = list_first_entry(&plchan->pend_list,
 					struct pl08x_txd,
 					node);
 		list_del(&next->node);
-		plchan->at = next;
 		plchan->state = PL08X_CHAN_RUNNING;
 
-		/* Configure the physical channel for the active txd */
-		pl08x_config_phychan_for_txd(plchan);
-		pl08x_set_cregs(pl08x, plchan->phychan);
-		pl08x_enable_phy_chan(pl08x, plchan->phychan);
+		pl08x_start_txd(plchan, next);
 	}
 
 	spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1330,30 +1219,17 @@
 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 					struct pl08x_txd *txd)
 {
-	int num_llis;
 	struct pl08x_driver_data *pl08x = plchan->host;
-	int ret;
+	unsigned long flags;
+	int num_llis, ret;
 
 	num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
-
-	if (!num_llis)
+	if (!num_llis) {
+		kfree(txd);
 		return -EINVAL;
+	}
 
-	spin_lock_irqsave(&plchan->lock, plchan->lockflags);
-
-	/*
-	 * If this device is not using a circular buffer then
-	 * queue this new descriptor for transfer.
-	 * The descriptor for a circular buffer continues
-	 * to be used until the channel is freed.
-	 */
-	if (txd->cd->circular_buffer)
-		dev_err(&pl08x->adev->dev,
-			"%s attempting to queue a circular buffer\n",
-			__func__);
-	else
-		list_add_tail(&txd->node,
-			      &plchan->desc_list);
+	spin_lock_irqsave(&plchan->lock, flags);
 
 	/*
 	 * See if we already have a physical channel allocated,
@@ -1362,45 +1238,74 @@
 	ret = prep_phy_channel(plchan, txd);
 	if (ret) {
 		/*
-		 * No physical channel available, we will
-		 * stack up the memcpy channels until there is a channel
-		 * available to handle it whereas slave transfers may
-		 * have been denied due to platform channel muxing restrictions
-		 * and since there is no guarantee that this will ever be
-		 * resolved, and since the signal must be aquired AFTER
-		 * aquiring the physical channel, we will let them be NACK:ed
-		 * with -EBUSY here. The drivers can alway retry the prep()
-		 * call if they are eager on doing this using DMA.
+		 * No physical channel was available.
+		 *
+		 * memcpy transfers can be sorted out at submission time.
+		 *
+		 * Slave transfers may have been denied due to platform
+		 * channel muxing restrictions.  Since there is no guarantee
+		 * that this will ever be resolved, and the signal must be
+		 * acquired AFTER acquiring the physical channel, we will let
+		 * them be NACK:ed with -EBUSY here. The drivers can retry
+		 * the prep() call if they are eager on doing this using DMA.
 		 */
 		if (plchan->slave) {
 			pl08x_free_txd_list(pl08x, plchan);
-			spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+			pl08x_free_txd(pl08x, txd);
+			spin_unlock_irqrestore(&plchan->lock, flags);
 			return -EBUSY;
 		}
-		/* Do this memcpy whenever there is a channel ready */
-		plchan->state = PL08X_CHAN_WAITING;
-		plchan->waiting = txd;
 	} else
 		/*
-		 * Else we're all set, paused and ready to roll,
-		 * status will switch to PL08X_CHAN_RUNNING when
-		 * we call issue_pending(). If there is something
-		 * running on the channel already we don't change
-		 * its state.
+		 * Else we're all set, paused and ready to roll, status
+		 * will switch to PL08X_CHAN_RUNNING when we call
+		 * issue_pending(). If there is something running on the
+		 * channel already we don't change its state.
 		 */
 		if (plchan->state == PL08X_CHAN_IDLE)
 			plchan->state = PL08X_CHAN_PAUSED;
 
-	/*
-	 * Notice that we leave plchan->lock locked on purpose:
-	 * it will be unlocked in the subsequent tx_submit()
-	 * call. This is a consequence of the current API.
-	 */
+	spin_unlock_irqrestore(&plchan->lock, flags);
 
 	return 0;
 }
 
 /*
+ * Given the source and destination available bus masks, select which
+ * will be routed to each port.  We try to have source and destination
+ * on separate ports, but always respect the allowable settings.
+ */
+static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
+{
+	u32 cctl = 0;
+
+	if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
+		cctl |= PL080_CONTROL_DST_AHB2;
+	if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
+		cctl |= PL080_CONTROL_SRC_AHB2;
+
+	return cctl;
+}
+
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
+	unsigned long flags)
+{
+	struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+
+	if (txd) {
+		dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
+		txd->tx.flags = flags;
+		txd->tx.tx_submit = pl08x_tx_submit;
+		INIT_LIST_HEAD(&txd->node);
+
+		/* Always enable error and terminal interrupts */
+		txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
+			    PL080_CONFIG_TC_IRQ_MASK;
+	}
+	return txd;
+}
+
+/*
  * Initialize a descriptor to be used by memcpy submit
  */
 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
@@ -1412,40 +1317,38 @@
 	struct pl08x_txd *txd;
 	int ret;
 
-	txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+	txd = pl08x_get_txd(plchan, flags);
 	if (!txd) {
 		dev_err(&pl08x->adev->dev,
 			"%s no memory for descriptor\n", __func__);
 		return NULL;
 	}
 
-	dma_async_tx_descriptor_init(&txd->tx, chan);
 	txd->direction = DMA_NONE;
-	txd->srcbus.addr = src;
-	txd->dstbus.addr = dest;
-
-	/* Set platform data for m2m */
-	txd->cd = &pl08x->pd->memcpy_channel;
-	/* Both to be incremented or the code will break */
-	txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
-	txd->tx.tx_submit = pl08x_tx_submit;
-	txd->tx.callback = NULL;
-	txd->tx.callback_param = NULL;
+	txd->src_addr = src;
+	txd->dst_addr = dest;
 	txd->len = len;
 
-	INIT_LIST_HEAD(&txd->node);
+	/* Set platform data for m2m */
+	txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+	txd->cctl = pl08x->pd->memcpy_channel.cctl &
+			~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
+
+	/* Both to be incremented or the code will break */
+	txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
+
+	if (pl08x->vd->dualmaster)
+		txd->cctl |= pl08x_select_bus(pl08x,
+					pl08x->mem_buses, pl08x->mem_buses);
+
 	ret = pl08x_prep_channel_resources(plchan, txd);
 	if (ret)
 		return NULL;
-	/*
-	 * NB: the channel lock is held at this point so tx_submit()
-	 * must be called in direct succession.
-	 */
 
 	return &txd->tx;
 }
 
-struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		struct dma_chan *chan, struct scatterlist *sgl,
 		unsigned int sg_len, enum dma_data_direction direction,
 		unsigned long flags)
@@ -1453,6 +1356,7 @@
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_txd *txd;
+	u8 src_buses, dst_buses;
 	int ret;
 
 	/*
@@ -1467,14 +1371,12 @@
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
 		__func__, sgl->length, plchan->name);
 
-	txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+	txd = pl08x_get_txd(plchan, flags);
 	if (!txd) {
 		dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
 		return NULL;
 	}
 
-	dma_async_tx_descriptor_init(&txd->tx, chan);
-
 	if (direction != plchan->runtime_direction)
 		dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
 			"the direction configured for the PrimeCell\n",
@@ -1486,37 +1388,47 @@
 	 * channel target address dynamically at runtime.
 	 */
 	txd->direction = direction;
+	txd->len = sgl->length;
+
+	txd->cctl = plchan->cd->cctl &
+			~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
+			  PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
+			  PL080_CONTROL_PROT_MASK);
+
+	/* Access the cell in privileged mode, non-bufferable, non-cacheable */
+	txd->cctl |= PL080_CONTROL_PROT_SYS;
+
 	if (direction == DMA_TO_DEVICE) {
-		txd->srcbus.addr = sgl->dma_address;
+		txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+		txd->cctl |= PL080_CONTROL_SRC_INCR;
+		txd->src_addr = sgl->dma_address;
 		if (plchan->runtime_addr)
-			txd->dstbus.addr = plchan->runtime_addr;
+			txd->dst_addr = plchan->runtime_addr;
 		else
-			txd->dstbus.addr = plchan->cd->addr;
+			txd->dst_addr = plchan->cd->addr;
+		src_buses = pl08x->mem_buses;
+		dst_buses = plchan->cd->periph_buses;
 	} else if (direction == DMA_FROM_DEVICE) {
+		txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+		txd->cctl |= PL080_CONTROL_DST_INCR;
 		if (plchan->runtime_addr)
-			txd->srcbus.addr = plchan->runtime_addr;
+			txd->src_addr = plchan->runtime_addr;
 		else
-			txd->srcbus.addr = plchan->cd->addr;
-		txd->dstbus.addr = sgl->dma_address;
+			txd->src_addr = plchan->cd->addr;
+		txd->dst_addr = sgl->dma_address;
+		src_buses = plchan->cd->periph_buses;
+		dst_buses = pl08x->mem_buses;
 	} else {
 		dev_err(&pl08x->adev->dev,
 			"%s direction unsupported\n", __func__);
 		return NULL;
 	}
-	txd->cd = plchan->cd;
-	txd->tx.tx_submit = pl08x_tx_submit;
-	txd->tx.callback = NULL;
-	txd->tx.callback_param = NULL;
-	txd->len = sgl->length;
-	INIT_LIST_HEAD(&txd->node);
+
+	txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
 
 	ret = pl08x_prep_channel_resources(plchan, txd);
 	if (ret)
 		return NULL;
-	/*
-	 * NB: the channel lock is held at this point so tx_submit()
-	 * must be called in direct succession.
-	 */
 
 	return &txd->tx;
 }
@@ -1531,10 +1443,8 @@
 
 	/* Controls applicable to inactive channels */
 	if (cmd == DMA_SLAVE_CONFIG) {
-		dma_set_runtime_config(chan,
-				       (struct dma_slave_config *)
-				       arg);
-		return 0;
+		return dma_set_runtime_config(chan,
+					      (struct dma_slave_config *)arg);
 	}
 
 	/*
@@ -1552,22 +1462,14 @@
 		plchan->state = PL08X_CHAN_IDLE;
 
 		if (plchan->phychan) {
-			pl08x_stop_phy_chan(plchan->phychan);
+			pl08x_terminate_phy_chan(pl08x, plchan->phychan);
 
 			/*
 			 * Mark physical channel as free and free any slave
 			 * signal
 			 */
-			if ((plchan->phychan->signal >= 0) &&
-			    pl08x->pd->put_signal) {
-				pl08x->pd->put_signal(plchan);
-				plchan->phychan->signal = -1;
-			}
-			pl08x_put_phy_channel(pl08x, plchan->phychan);
-			plchan->phychan = NULL;
+			release_phy_channel(plchan);
 		}
-		/* Stop any pending tasklet */
-		tasklet_disable(&plchan->tasklet);
 		/* Dequeue jobs and free LLIs */
 		if (plchan->at) {
 			pl08x_free_txd(pl08x, plchan->at);
@@ -1609,10 +1511,9 @@
 
 /*
  * Just check that the device is there and active
- * TODO: turn this bit on/off depending on the number of
- * physical channels actually used, if it is zero... well
- * shut it off. That will save some power. Cut the clock
- * at the same time.
+ * TODO: turn this bit on/off depending on the number of physical channels
+ * actually used, if it is zero... well shut it off. That will save some
+ * power. Cut the clock at the same time.
  */
 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 {
@@ -1620,78 +1521,66 @@
 
 	val = readl(pl08x->base + PL080_CONFIG);
 	val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
-	/* We implictly clear bit 1 and that means little-endian mode */
+	/* We implicitly clear bit 1 and that means little-endian mode */
 	val |= PL080_CONFIG_ENABLE;
 	writel(val, pl08x->base + PL080_CONFIG);
 }
 
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
+{
+	struct device *dev = txd->tx.chan->device->dev;
+
+	if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+		if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+			dma_unmap_single(dev, txd->src_addr, txd->len,
+				DMA_TO_DEVICE);
+		else
+			dma_unmap_page(dev, txd->src_addr, txd->len,
+				DMA_TO_DEVICE);
+	}
+	if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+		if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+			dma_unmap_single(dev, txd->dst_addr, txd->len,
+				DMA_FROM_DEVICE);
+		else
+			dma_unmap_page(dev, txd->dst_addr, txd->len,
+				DMA_FROM_DEVICE);
+	}
+}
+
 static void pl08x_tasklet(unsigned long data)
 {
 	struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
-	struct pl08x_phy_chan *phychan = plchan->phychan;
 	struct pl08x_driver_data *pl08x = plchan->host;
+	struct pl08x_txd *txd;
+	unsigned long flags;
 
-	if (!plchan)
-		BUG();
+	spin_lock_irqsave(&plchan->lock, flags);
 
-	spin_lock(&plchan->lock);
+	txd = plchan->at;
+	plchan->at = NULL;
 
-	if (plchan->at) {
-		dma_async_tx_callback callback =
-			plchan->at->tx.callback;
-		void *callback_param =
-			plchan->at->tx.callback_param;
-
-		/*
-		 * Update last completed
-		 */
-		plchan->lc =
-			(plchan->at->tx.cookie);
-
-		/*
-		 * Callback to signal completion
-		 */
-		if (callback)
-			callback(callback_param);
-
-		/*
-		 * Device callbacks should NOT clear
-		 * the current transaction on the channel
-		 * Linus: sometimes they should?
-		 */
-		if (!plchan->at)
-			BUG();
-
-		/*
-		 * Free the descriptor if it's not for a device
-		 * using a circular buffer
-		 */
-		if (!plchan->at->cd->circular_buffer) {
-			pl08x_free_txd(pl08x, plchan->at);
-			plchan->at = NULL;
-		}
-		/*
-		 * else descriptor for circular
-		 * buffers only freed when
-		 * client has disabled dma
-		 */
+	if (txd) {
+		/* Update last completed */
+		plchan->lc = txd->tx.cookie;
 	}
-	/*
-	 * If a new descriptor is queued, set it up
-	 * plchan->at is NULL here
-	 */
-	if (!list_empty(&plchan->desc_list)) {
+
+	/* If a new descriptor is queued, set it up plchan->at is NULL here */
+	if (!list_empty(&plchan->pend_list)) {
 		struct pl08x_txd *next;
 
-		next = list_first_entry(&plchan->desc_list,
+		next = list_first_entry(&plchan->pend_list,
 					struct pl08x_txd,
 					node);
 		list_del(&next->node);
-		plchan->at = next;
-		/* Configure the physical channel for the next txd */
-		pl08x_config_phychan_for_txd(plchan);
-		pl08x_set_cregs(pl08x, plchan->phychan);
-		pl08x_enable_phy_chan(pl08x, plchan->phychan);
+
+		pl08x_start_txd(plchan, next);
+	} else if (plchan->phychan_hold) {
+		/*
+		 * This channel is still in use - we have a new txd being
+		 * prepared and will soon be queued.  Don't give up the
+		 * physical channel.
+		 */
 	} else {
 		struct pl08x_dma_chan *waiting = NULL;
 
@@ -1699,20 +1588,14 @@
 		 * No more jobs, so free up the physical channel
 		 * Free any allocated signal on slave transfers too
 		 */
-		if ((phychan->signal >= 0) && pl08x->pd->put_signal) {
-			pl08x->pd->put_signal(plchan);
-			phychan->signal = -1;
-		}
-		pl08x_put_phy_channel(pl08x, phychan);
-		plchan->phychan = NULL;
+		release_phy_channel(plchan);
 		plchan->state = PL08X_CHAN_IDLE;
 
 		/*
-		 * And NOW before anyone else can grab that free:d
-		 * up physical channel, see if there is some memcpy
-		 * pending that seriously needs to start because of
-		 * being stacked up while we were choking the
-		 * physical channels with data.
+		 * And NOW before anyone else can grab that free:d up
+		 * physical channel, see if there is some memcpy pending
+		 * that seriously needs to start because of being stacked
+		 * up while we were choking the physical channels with data.
 		 */
 		list_for_each_entry(waiting, &pl08x->memcpy.channels,
 				    chan.device_node) {
@@ -1724,6 +1607,7 @@
 				ret = prep_phy_channel(waiting,
 						       waiting->waiting);
 				BUG_ON(ret);
+				waiting->phychan_hold--;
 				waiting->state = PL08X_CHAN_RUNNING;
 				waiting->waiting = NULL;
 				pl08x_issue_pending(&waiting->chan);
@@ -1732,7 +1616,25 @@
 		}
 	}
 
-	spin_unlock(&plchan->lock);
+	spin_unlock_irqrestore(&plchan->lock, flags);
+
+	if (txd) {
+		dma_async_tx_callback callback = txd->tx.callback;
+		void *callback_param = txd->tx.callback_param;
+
+		/* Don't try to unmap buffers on slave channels */
+		if (!plchan->slave)
+			pl08x_unmap_buffers(txd);
+
+		/* Free the descriptor */
+		spin_lock_irqsave(&plchan->lock, flags);
+		pl08x_free_txd(pl08x, txd);
+		spin_unlock_irqrestore(&plchan->lock, flags);
+
+		/* Callback to signal completion */
+		if (callback)
+			callback(callback_param);
+	}
 }
 
 static irqreturn_t pl08x_irq(int irq, void *dev)
@@ -1744,9 +1646,7 @@
 
 	val = readl(pl08x->base + PL080_ERR_STATUS);
 	if (val) {
-		/*
-		 * An error interrupt (on one or more channels)
-		 */
+		/* An error interrupt (on one or more channels) */
 		dev_err(&pl08x->adev->dev,
 			"%s error interrupt, register value 0x%08x\n",
 				__func__, val);
@@ -1770,9 +1670,7 @@
 			mask |= (1 << i);
 		}
 	}
-	/*
-	 * Clear only the terminal interrupts on channels we processed
-	 */
+	/* Clear only the terminal interrupts on channels we processed */
 	writel(mask, pl08x->base + PL080_TC_CLEAR);
 
 	return mask ? IRQ_HANDLED : IRQ_NONE;
@@ -1791,6 +1689,7 @@
 	int i;
 
 	INIT_LIST_HEAD(&dmadev->channels);
+
 	/*
 	 * Register as many many memcpy as we have physical channels,
 	 * we won't always be able to use all but the code will have
@@ -1819,16 +1718,23 @@
 				return -ENOMEM;
 			}
 		}
+		if (chan->cd->circular_buffer) {
+			dev_err(&pl08x->adev->dev,
+				"channel %s: circular buffers not supported\n",
+				chan->name);
+			kfree(chan);
+			continue;
+		}
 		dev_info(&pl08x->adev->dev,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
 		chan->chan.device = dmadev;
-		atomic_set(&chan->last_issued, 0);
-		chan->lc = atomic_read(&chan->last_issued);
+		chan->chan.cookie = 0;
+		chan->lc = 0;
 
 		spin_lock_init(&chan->lock);
-		INIT_LIST_HEAD(&chan->desc_list);
+		INIT_LIST_HEAD(&chan->pend_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
 
@@ -1898,7 +1804,7 @@
 	seq_printf(s, "CHANNEL:\tSTATE:\n");
 	seq_printf(s, "--------\t------\n");
 	list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
-		seq_printf(s, "%s\t\t\%s\n", chan->name,
+		seq_printf(s, "%s\t\t%s\n", chan->name,
 			   pl08x_state_str(chan->state));
 	}
 
@@ -1906,7 +1812,7 @@
 	seq_printf(s, "CHANNEL:\tSTATE:\n");
 	seq_printf(s, "--------\t------\n");
 	list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
-		seq_printf(s, "%s\t\t\%s\n", chan->name,
+		seq_printf(s, "%s\t\t%s\n", chan->name,
 			   pl08x_state_str(chan->state));
 	}
 
@@ -1942,7 +1848,7 @@
 static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
 {
 	struct pl08x_driver_data *pl08x;
-	struct vendor_data *vd = id->data;
+	const struct vendor_data *vd = id->data;
 	int ret = 0;
 	int i;
 
@@ -1990,6 +1896,14 @@
 	pl08x->adev = adev;
 	pl08x->vd = vd;
 
+	/* By default, AHB1 only.  If dualmaster, from platform */
+	pl08x->lli_buses = PL08X_AHB1;
+	pl08x->mem_buses = PL08X_AHB1;
+	if (pl08x->vd->dualmaster) {
+		pl08x->lli_buses = pl08x->pd->lli_buses;
+		pl08x->mem_buses = pl08x->pd->mem_buses;
+	}
+
 	/* A DMA memory pool for LLIs, align on 1-byte boundary */
 	pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
 			PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
@@ -2009,14 +1923,12 @@
 	/* Turn on the PL08x */
 	pl08x_ensure_on(pl08x);
 
-	/*
-	 * Attach the interrupt handler
-	 */
+	/* Attach the interrupt handler */
 	writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
 	writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
 
 	ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
-			  vd->name, pl08x);
+			  DRIVER_NAME, pl08x);
 	if (ret) {
 		dev_err(&adev->dev, "%s failed to request interrupt %d\n",
 			__func__, adev->irq[0]);
@@ -2087,8 +1999,9 @@
 
 	amba_set_drvdata(adev, pl08x);
 	init_pl08x_debugfs(pl08x);
-	dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n",
-		vd->name, adev->res.start);
+	dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
+		 amba_part(adev), amba_rev(adev),
+		 (unsigned long long)adev->res.start, adev->irq[0]);
 	return 0;
 
 out_no_slave_reg:
@@ -2115,13 +2028,11 @@
 
 /* PL080 has 8 channels and the PL080 have just 2 */
 static struct vendor_data vendor_pl080 = {
-	.name = "PL080",
 	.channels = 8,
 	.dualmaster = true,
 };
 
 static struct vendor_data vendor_pl081 = {
-	.name = "PL081",
 	.channels = 2,
 	.dualmaster = false,
 };
@@ -2160,7 +2071,7 @@
 	retval = amba_driver_register(&pl08x_amba_driver);
 	if (retval)
 		printk(KERN_WARNING DRIVER_NAME
-		       "failed to register as an amba device (%d)\n",
+		       "failed to register as an AMBA device (%d)\n",
 		       retval);
 	return retval;
 }
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ea0ee81..3d7d705 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -253,7 +253,7 @@
 	/* move myself to free_list */
 	list_move(&desc->desc_node, &atchan->free_list);
 
-	/* unmap dma addresses */
+	/* unmap dma addresses (not on slave channels) */
 	if (!atchan->chan_common.private) {
 		struct device *parent = chan2parent(&atchan->chan_common);
 		if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
@@ -583,7 +583,6 @@
 		desc->lli.ctrlb = ctrlb;
 
 		desc->txd.cookie = 0;
-		async_tx_ack(&desc->txd);
 
 		if (!first) {
 			first = desc;
@@ -604,7 +603,7 @@
 	/* set end-of-link to the last link descriptor of list*/
 	set_desc_eol(desc);
 
-	desc->txd.flags = flags; /* client is in control of this ack */
+	first->txd.flags = flags; /* client is in control of this ack */
 
 	return &first->txd;
 
@@ -670,7 +669,7 @@
 			if (!desc)
 				goto err_desc_get;
 
-			mem = sg_phys(sg);
+			mem = sg_dma_address(sg);
 			len = sg_dma_len(sg);
 			mem_width = 2;
 			if (unlikely(mem & 3 || len & 3))
@@ -712,7 +711,7 @@
 			if (!desc)
 				goto err_desc_get;
 
-			mem = sg_phys(sg);
+			mem = sg_dma_address(sg);
 			len = sg_dma_len(sg);
 			mem_width = 2;
 			if (unlikely(mem & 3 || len & 3))
@@ -749,8 +748,8 @@
 	first->txd.cookie = -EBUSY;
 	first->len = total_len;
 
-	/* last link descriptor of list is responsible of flags */
-	prev->txd.flags = flags; /* client is in control of this ack */
+	/* first link descriptor of list is responsible of flags */
+	first->txd.flags = flags; /* client is in control of this ack */
 
 	return &first->txd;
 
@@ -854,11 +853,11 @@
 
 	dev_vdbg(chan2dev(chan), "issue_pending\n");
 
+	spin_lock_bh(&atchan->lock);
 	if (!atc_chan_is_enabled(atchan)) {
-		spin_lock_bh(&atchan->lock);
 		atc_advance_work(atchan);
-		spin_unlock_bh(&atchan->lock);
 	}
+	spin_unlock_bh(&atchan->lock);
 }
 
 /**
@@ -1210,7 +1209,7 @@
 {
 	return platform_driver_probe(&at_dma_driver, at_dma_probe);
 }
-module_init(at_dma_init);
+subsys_initcall(at_dma_init);
 
 static void __exit at_dma_exit(void)
 {
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e5e172d2..4de947a 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1,7 +1,7 @@
 /*
  * Freescale MPC85xx, MPC83xx DMA Engine support
  *
- * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
  *
  * Author:
  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
@@ -1324,6 +1324,8 @@
 	fdev->common.device_control = fsl_dma_device_control;
 	fdev->common.dev = &op->dev;
 
+	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
+
 	dev_set_drvdata(&op->dev, fdev);
 
 	/*
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e53d438..e18eaab 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -49,6 +49,7 @@
 
 struct imxdma_engine {
 	struct device			*dev;
+	struct device_dma_parameters	dma_parms;
 	struct dma_device		dma_device;
 	struct imxdma_channel		channel[MAX_DMA_CHANNELS];
 };
@@ -242,6 +243,21 @@
 	else
 		dmamode = DMA_MODE_WRITE;
 
+	switch (imxdmac->word_size) {
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		if (sgl->length & 3 || sgl->dma_address & 3)
+			return NULL;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		if (sgl->length & 1 || sgl->dma_address & 1)
+			return NULL;
+		break;
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		break;
+	default:
+		return NULL;
+	}
+
 	ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
 		 dma_length, imxdmac->per_address, dmamode);
 	if (ret)
@@ -329,6 +345,9 @@
 
 	INIT_LIST_HEAD(&imxdma->dma_device.channels);
 
+	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
+	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+
 	/* Initialize channel parameters */
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 		struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -346,11 +365,7 @@
 		imxdmac->imxdma = imxdma;
 		spin_lock_init(&imxdmac->lock);
 
-		dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
-		dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
-
 		imxdmac->chan.device = &imxdma->dma_device;
-		imxdmac->chan.chan_id = i;
 		imxdmac->channel = i;
 
 		/* Add the channel to the DMAC list */
@@ -370,6 +385,9 @@
 
 	platform_set_drvdata(pdev, imxdma);
 
+	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
+	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
+
 	ret = dma_async_device_register(&imxdma->dma_device);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to register\n");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d5a5d4d..b6d1455 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -230,7 +230,7 @@
  * struct sdma_channel - housekeeping for a SDMA channel
  *
  * @sdma		pointer to the SDMA engine for this channel
- * @channel		the channel number, matches dmaengine chan_id
+ * @channel		the channel number, matches dmaengine chan_id + 1
  * @direction		transfer type. Needed for setting SDMA script
  * @peripheral_type	Peripheral type. Needed for setting SDMA script
  * @event_id0		aka dma request line
@@ -301,6 +301,7 @@
 
 struct sdma_engine {
 	struct device			*dev;
+	struct device_dma_parameters	dma_parms;
 	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 	struct sdma_channel_control	*channel_control;
 	void __iomem			*regs;
@@ -449,7 +450,7 @@
 		if (bd->mode.status & BD_RROR)
 			sdmac->status = DMA_ERROR;
 		else
-			sdmac->status = DMA_SUCCESS;
+			sdmac->status = DMA_IN_PROGRESS;
 
 		bd->mode.status |= BD_DONE;
 		sdmac->buf_tail++;
@@ -770,15 +771,15 @@
 	__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
 }
 
-static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
+static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
 {
-	dma_cookie_t cookie = sdma->chan.cookie;
+	dma_cookie_t cookie = sdmac->chan.cookie;
 
 	if (++cookie < 0)
 		cookie = 1;
 
-	sdma->chan.cookie = cookie;
-	sdma->desc.cookie = cookie;
+	sdmac->chan.cookie = cookie;
+	sdmac->desc.cookie = cookie;
 
 	return cookie;
 }
@@ -798,7 +799,7 @@
 
 	cookie = sdma_assign_cookie(sdmac);
 
-	sdma_enable_channel(sdma, tx->chan->chan_id);
+	sdma_enable_channel(sdma, sdmac->channel);
 
 	spin_unlock_irq(&sdmac->lock);
 
@@ -811,10 +812,6 @@
 	struct imx_dma_data *data = chan->private;
 	int prio, ret;
 
-	/* No need to execute this for internal channel 0 */
-	if (chan->chan_id == 0)
-		return 0;
-
 	if (!data)
 		return -EINVAL;
 
@@ -879,7 +876,7 @@
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	int ret, i, count;
-	int channel = chan->chan_id;
+	int channel = sdmac->channel;
 	struct scatterlist *sg;
 
 	if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@
 			ret =  -EINVAL;
 			goto err_out;
 		}
-		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+		switch (sdmac->word_size) {
+		case DMA_SLAVE_BUSWIDTH_4_BYTES:
 			bd->mode.command = 0;
-		else
-			bd->mode.command = sdmac->word_size;
+			if (count & 3 || sg->dma_address & 3)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_2_BYTES:
+			bd->mode.command = 2;
+			if (count & 1 || sg->dma_address & 1)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_1_BYTE:
+			bd->mode.command = 1;
+			break;
+		default:
+			return NULL;
+		}
 
 		param = BD_DONE | BD_EXTD | BD_CONT;
 
-		if (sdmac->flags & IMX_DMA_SG_LOOP) {
+		if (i + 1 == sg_len) {
 			param |= BD_INTR;
-			if (i + 1 == sg_len)
-				param |= BD_WRAP;
+			param |= BD_LAST;
+			param &= ~BD_CONT;
 		}
 
-		if (i + 1 == sg_len)
-			param |= BD_INTR;
-
 		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
 				i, count, sg->dma_address,
 				param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@
 
 	return &sdmac->desc;
 err_out:
+	sdmac->status = DMA_ERROR;
 	return NULL;
 }
 
@@ -963,7 +972,7 @@
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	int num_periods = buf_len / period_len;
-	int channel = chan->chan_id;
+	int channel = sdmac->channel;
 	int ret, i = 0, buf = 0;
 
 	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	dma_cookie_t last_used;
-	enum dma_status ret;
 
 	last_used = chan->cookie;
 
-	ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
 	dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
 
-	return ret;
+	return sdmac->status;
 }
 
 static void sdma_issue_pending(struct dma_chan *chan)
@@ -1135,7 +1142,7 @@
 	/* download the RAM image for SDMA */
 	sdma_load_script(sdma, ram_code,
 			header->ram_code_size,
-			sdma->script_addrs->ram_code_start_addr);
+			addr->ram_code_start_addr);
 	clk_disable(sdma->clk);
 
 	sdma_add_scripts(sdma, addr);
@@ -1237,7 +1244,6 @@
 	struct resource *iores;
 	struct sdma_platform_data *pdata = pdev->dev.platform_data;
 	int i;
-	dma_cap_mask_t mask;
 	struct sdma_engine *sdma;
 
 	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@
 
 	sdma->version = pdata->sdma_version;
 
+	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
+	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+
 	INIT_LIST_HEAD(&sdma->dma_device.channels);
 	/* Initialize channel parameters */
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@
 		sdmac->sdma = sdma;
 		spin_lock_init(&sdmac->lock);
 
-		dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
-		dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
-
 		sdmac->chan.device = &sdma->dma_device;
-		sdmac->chan.chan_id = i;
 		sdmac->channel = i;
 
-		/* Add the channel to the DMAC list */
-		list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
+		/*
+		 * Add the channel to the DMAC list. Do not add channel 0 though
+		 * because we need it internally in the SDMA driver. This also means
+		 * that channel 0 in dmaengine counting matches sdma channel 1.
+		 */
+		if (i)
+			list_add_tail(&sdmac->chan.device_node,
+					&sdma->dma_device.channels);
 	}
 
 	ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@
 	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
 	sdma->dma_device.device_control = sdma_control;
 	sdma->dma_device.device_issue_pending = sdma_issue_pending;
+	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
+	dma_set_max_seg_size(sdma->dma_device.dev, 65535);
 
 	ret = dma_async_device_register(&sdma->dma_device);
 	if (ret) {
@@ -1324,13 +1337,6 @@
 		goto err_init;
 	}
 
-	/* request channel 0. This is an internal control channel
-	 * to the SDMA engine and not available to clients.
-	 */
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-	dma_request_channel(mask, NULL, NULL);
-
 	dev_info(sdma->dev, "initialized\n");
 
 	return 0;
@@ -1348,7 +1354,7 @@
 err_request_region:
 err_irq:
 	kfree(sdma);
-	return 0;
+	return ret;
 }
 
 static int __exit sdma_remove(struct platform_device *pdev)
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 3109bd9..798f46a 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -664,11 +664,20 @@
 	/*calculate CTL_LO*/
 	ctl_lo.ctl_lo = 0;
 	ctl_lo.ctlx.int_en = 1;
-	ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
-	ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
 	ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
 	ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
 
+	/*
+	 * Here we need some translation from "enum dma_slave_buswidth"
+	 * to the format for our dma controller
+	 *		standard	intel_mid_dmac's format
+	 *		 1 Byte			0b000
+	 *		 2 Bytes		0b001
+	 *		 4 Bytes		0b010
+	 */
+	ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
+	ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
+
 	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
 		ctl_lo.ctlx.tt_fc = 0;
 		ctl_lo.ctlx.sinc = 0;
@@ -746,8 +755,18 @@
 	BUG_ON(!mids);
 
 	if (!midc->dma->pimr_mask) {
-		pr_debug("MDMA: SG list is not supported by this controller\n");
-		return  NULL;
+		/* We can still handle sg list with only one item */
+		if (sg_len == 1) {
+			txd = intel_mid_dma_prep_memcpy(chan,
+						mids->dma_slave.dst_addr,
+						mids->dma_slave.src_addr,
+						sgl->length,
+						flags);
+			return txd;
+		} else {
+			pr_warn("MDMA: SG list is not supported by this controller\n");
+			return  NULL;
+		}
 	}
 
 	pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
@@ -758,6 +777,7 @@
 		pr_err("MDMA: Prep memcpy failed\n");
 		return NULL;
 	}
+
 	desc = to_intel_mid_dma_desc(txd);
 	desc->dirn = direction;
 	ctl_lo.ctl_lo = desc->ctl_lo;
@@ -1021,11 +1041,6 @@
 
 	/*DMA Interrupt*/
 	pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
-	if (!mid) {
-		pr_err("ERR_MDMA:null pointer mid\n");
-		return -EINVAL;
-	}
-
 	pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
 	tfr_status &= mid->intr_mask;
 	if (tfr_status) {
@@ -1060,8 +1075,8 @@
  * mid_setup_dma -	Setup the DMA controller
  * @pdev: Controller PCI device structure
  *
- * Initilize the DMA controller, channels, registers with DMA engine,
- * ISR. Initilize DMA controller channels.
+ * Initialize the DMA controller, channels, registers with DMA engine,
+ * ISR. Initialize DMA controller channels.
  */
 static int mid_setup_dma(struct pci_dev *pdev)
 {
@@ -1217,7 +1232,7 @@
  * @pdev: Controller PCI device structure
  * @id: pci device id structure
  *
- * Initilize the PCI device, map BARs, query driver data.
+ * Initialize the PCI device, map BARs, query driver data.
  * Call setup_dma to complete contoller and chan initilzation
  */
 static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 161c452..c6b01f5 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1261,7 +1261,7 @@
 	return err;
 }
 
-#ifdef CONFIG_MD_RAID6_PQ
+#ifdef CONFIG_RAID6_PQ
 static int __devinit
 iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
 {
@@ -1584,7 +1584,7 @@
 
 	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
 	    dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
-		#ifdef CONFIG_MD_RAID6_PQ
+		#ifdef CONFIG_RAID6_PQ
 		ret = iop_adma_pq_zero_sum_self_test(adev);
 		dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
 		#else
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb26ee9..c1a125e7 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1145,29 +1145,6 @@
 	reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
 	idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
 
-	/*
-	 * Problem (observed with channel DMAIC_7): after enabling the channel
-	 * and initialising buffers, there comes an interrupt with current still
-	 * pointing at buffer 0, whereas it should use buffer 0 first and only
-	 * generate an interrupt when it is done, then current should already
-	 * point to buffer 1. This spurious interrupt also comes on channel
-	 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
-	 * first interrupt, there comes the second with current correctly
-	 * pointing to buffer 1 this time. But sometimes this second interrupt
-	 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
-	 * the channel seems to prevent the channel from hanging, but it doesn't
-	 * prevent the spurious interrupt. This might also be unsafe. Think
-	 * about the IDMAC controller trying to switch to a buffer, when we
-	 * clear the ready bit, and re-enable it a moment later.
-	 */
-	reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
-	idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
-	idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
-
-	reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
-	idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
-	idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
-
 	spin_unlock_irqrestore(&ipu->lock, flags);
 
 	return 0;
@@ -1246,33 +1223,6 @@
 
 	/* Other interrupts do not interfere with this channel */
 	spin_lock(&ichan->lock);
-	if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
-		     ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
-		     !list_is_last(ichan->queue.next, &ichan->queue))) {
-		int i = 100;
-
-		/* This doesn't help. See comment in ipu_disable_channel() */
-		while (--i) {
-			curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
-			if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
-				break;
-			cpu_relax();
-		}
-
-		if (!i) {
-			spin_unlock(&ichan->lock);
-			dev_dbg(dev,
-				"IRQ on active buffer on channel %x, active "
-				"%d, ready %x, %x, current %x!\n", chan_id,
-				ichan->active_buffer, ready0, ready1, curbuf);
-			return IRQ_NONE;
-		} else
-			dev_dbg(dev,
-				"Buffer deactivated on channel %x, active "
-				"%d, ready %x, %x, current %x, rest %d!\n", chan_id,
-				ichan->active_buffer, ready0, ready1, curbuf, i);
-	}
-
 	if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
 		     (!ichan->active_buffer && (ready0 >> chan_id) & 1)
 		     )) {
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4e9cbf3..59c2701 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  * Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  *
  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  * (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -70,6 +71,8 @@
 #define MPC_DMA_DMAES_SBE	(1 << 1)
 #define MPC_DMA_DMAES_DBE	(1 << 0)
 
+#define MPC_DMA_DMAGPOR_SNOOP_ENABLE	(1 << 6)
+
 #define MPC_DMA_TSIZE_1		0x00
 #define MPC_DMA_TSIZE_2		0x01
 #define MPC_DMA_TSIZE_4		0x02
@@ -104,7 +107,10 @@
 	/* 0x30 */
 	u32 dmahrsh;		/* DMA hw request status high(ch63~32) */
 	u32 dmahrsl;		/* DMA hardware request status low(ch31~0) */
-	u32 dmaihsa;		/* DMA interrupt high select AXE(ch63~32) */
+	union {
+		u32 dmaihsa;	/* DMA interrupt high select AXE(ch63~32) */
+		u32 dmagpor;	/* (General purpose register on MPC8308) */
+	};
 	u32 dmailsa;		/* DMA interrupt low select AXE(ch31~0) */
 	/* 0x40 ~ 0xff */
 	u32 reserve0[48];	/* Reserved */
@@ -195,7 +201,9 @@
 	struct mpc_dma_regs __iomem	*regs;
 	struct mpc_dma_tcd __iomem	*tcd;
 	int				irq;
+	int				irq2;
 	uint				error_status;
+	int				is_mpc8308;
 
 	/* Lock for error_status field in this structure */
 	spinlock_t			error_status_lock;
@@ -252,11 +260,13 @@
 		prev = mdesc;
 	}
 
-	prev->tcd->start = 0;
 	prev->tcd->int_maj = 1;
 
 	/* Send first descriptor in chain into hardware */
 	memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
+
+	if (first != prev)
+		mdma->tcd[cid].e_sg = 1;
 	out_8(&mdma->regs->dmassrt, cid);
 }
 
@@ -274,6 +284,9 @@
 
 		spin_lock(&mchan->lock);
 
+		out_8(&mdma->regs->dmacint, ch + off);
+		out_8(&mdma->regs->dmacerr, ch + off);
+
 		/* Check error status */
 		if (es & (1 << ch))
 			list_for_each_entry(mdesc, &mchan->active, node)
@@ -302,36 +315,68 @@
 	spin_unlock(&mdma->error_status_lock);
 
 	/* Handle interrupt on each channel */
-	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
+	if (mdma->dma.chancnt > 32) {
+		mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
 					in_be32(&mdma->regs->dmaerrh), 32);
+	}
 	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
 					in_be32(&mdma->regs->dmaerrl), 0);
 
-	/* Ack interrupt on all channels */
-	out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
-
 	/* Schedule tasklet */
 	tasklet_schedule(&mdma->tasklet);
 
 	return IRQ_HANDLED;
 }
 
-/* DMA Tasklet */
-static void mpc_dma_tasklet(unsigned long data)
+/* proccess completed descriptors */
+static void mpc_dma_process_completed(struct mpc_dma *mdma)
 {
-	struct mpc_dma *mdma = (void *)data;
 	dma_cookie_t last_cookie = 0;
 	struct mpc_dma_chan *mchan;
 	struct mpc_dma_desc *mdesc;
 	struct dma_async_tx_descriptor *desc;
 	unsigned long flags;
 	LIST_HEAD(list);
-	uint es;
 	int i;
 
+	for (i = 0; i < mdma->dma.chancnt; i++) {
+		mchan = &mdma->channels[i];
+
+		/* Get all completed descriptors */
+		spin_lock_irqsave(&mchan->lock, flags);
+		if (!list_empty(&mchan->completed))
+			list_splice_tail_init(&mchan->completed, &list);
+		spin_unlock_irqrestore(&mchan->lock, flags);
+
+		if (list_empty(&list))
+			continue;
+
+		/* Execute callbacks and run dependencies */
+		list_for_each_entry(mdesc, &list, node) {
+			desc = &mdesc->desc;
+
+			if (desc->callback)
+				desc->callback(desc->callback_param);
+
+			last_cookie = desc->cookie;
+			dma_run_dependencies(desc);
+		}
+
+		/* Free descriptors */
+		spin_lock_irqsave(&mchan->lock, flags);
+		list_splice_tail_init(&list, &mchan->free);
+		mchan->completed_cookie = last_cookie;
+		spin_unlock_irqrestore(&mchan->lock, flags);
+	}
+}
+
+/* DMA Tasklet */
+static void mpc_dma_tasklet(unsigned long data)
+{
+	struct mpc_dma *mdma = (void *)data;
+	unsigned long flags;
+	uint es;
+
 	spin_lock_irqsave(&mdma->error_status_lock, flags);
 	es = mdma->error_status;
 	mdma->error_status = 0;
@@ -370,35 +415,7 @@
 			dev_err(mdma->dma.dev, "- Destination Bus Error\n");
 	}
 
-	for (i = 0; i < mdma->dma.chancnt; i++) {
-		mchan = &mdma->channels[i];
-
-		/* Get all completed descriptors */
-		spin_lock_irqsave(&mchan->lock, flags);
-		if (!list_empty(&mchan->completed))
-			list_splice_tail_init(&mchan->completed, &list);
-		spin_unlock_irqrestore(&mchan->lock, flags);
-
-		if (list_empty(&list))
-			continue;
-
-		/* Execute callbacks and run dependencies */
-		list_for_each_entry(mdesc, &list, node) {
-			desc = &mdesc->desc;
-
-			if (desc->callback)
-				desc->callback(desc->callback_param);
-
-			last_cookie = desc->cookie;
-			dma_run_dependencies(desc);
-		}
-
-		/* Free descriptors */
-		spin_lock_irqsave(&mchan->lock, flags);
-		list_splice_tail_init(&list, &mchan->free);
-		mchan->completed_cookie = last_cookie;
-		spin_unlock_irqrestore(&mchan->lock, flags);
-	}
+	mpc_dma_process_completed(mdma);
 }
 
 /* Submit descriptor to hardware */
@@ -563,6 +580,7 @@
 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
 					size_t len, unsigned long flags)
 {
+	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 	struct mpc_dma_desc *mdesc = NULL;
 	struct mpc_dma_tcd *tcd;
@@ -577,8 +595,11 @@
 	}
 	spin_unlock_irqrestore(&mchan->lock, iflags);
 
-	if (!mdesc)
+	if (!mdesc) {
+		/* try to free completed descriptors */
+		mpc_dma_process_completed(mdma);
 		return NULL;
+	}
 
 	mdesc->error = 0;
 	tcd = mdesc->tcd;
@@ -591,7 +612,8 @@
 		tcd->dsize = MPC_DMA_TSIZE_32;
 		tcd->soff = 32;
 		tcd->doff = 32;
-	} else if (IS_ALIGNED(src | dst | len, 16)) {
+	} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
+		/* MPC8308 doesn't support 16 byte transfers */
 		tcd->ssize = MPC_DMA_TSIZE_16;
 		tcd->dsize = MPC_DMA_TSIZE_16;
 		tcd->soff = 16;
@@ -651,6 +673,15 @@
 		return -EINVAL;
 	}
 
+	if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
+		mdma->is_mpc8308 = 1;
+		mdma->irq2 = irq_of_parse_and_map(dn, 1);
+		if (mdma->irq2 == NO_IRQ) {
+			dev_err(dev, "Error mapping IRQ!\n");
+			return -EINVAL;
+		}
+	}
+
 	retval = of_address_to_resource(dn, 0, &res);
 	if (retval) {
 		dev_err(dev, "Error parsing memory region!\n");
@@ -681,11 +712,23 @@
 		return -EINVAL;
 	}
 
+	if (mdma->is_mpc8308) {
+		retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
+				DRV_NAME, mdma);
+		if (retval) {
+			dev_err(dev, "Error requesting IRQ2!\n");
+			return -EINVAL;
+		}
+	}
+
 	spin_lock_init(&mdma->error_status_lock);
 
 	dma = &mdma->dma;
 	dma->dev = dev;
-	dma->chancnt = MPC_DMA_CHANNELS;
+	if (!mdma->is_mpc8308)
+		dma->chancnt = MPC_DMA_CHANNELS;
+	else
+		dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
 	dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
 	dma->device_free_chan_resources = mpc_dma_free_chan_resources;
 	dma->device_issue_pending = mpc_dma_issue_pending;
@@ -721,26 +764,40 @@
 	 * - Round-robin group arbitration,
 	 * - Round-robin channel arbitration.
 	 */
-	out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
-				MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+	if (!mdma->is_mpc8308) {
+		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
+					MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
 
-	/* Disable hardware DMA requests */
-	out_be32(&mdma->regs->dmaerqh, 0);
-	out_be32(&mdma->regs->dmaerql, 0);
+		/* Disable hardware DMA requests */
+		out_be32(&mdma->regs->dmaerqh, 0);
+		out_be32(&mdma->regs->dmaerql, 0);
 
-	/* Disable error interrupts */
-	out_be32(&mdma->regs->dmaeeih, 0);
-	out_be32(&mdma->regs->dmaeeil, 0);
+		/* Disable error interrupts */
+		out_be32(&mdma->regs->dmaeeih, 0);
+		out_be32(&mdma->regs->dmaeeil, 0);
 
-	/* Clear interrupts status */
-	out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+		/* Clear interrupts status */
+		out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+		out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+		out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+		out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
 
-	/* Route interrupts to IPIC */
-	out_be32(&mdma->regs->dmaihsa, 0);
-	out_be32(&mdma->regs->dmailsa, 0);
+		/* Route interrupts to IPIC */
+		out_be32(&mdma->regs->dmaihsa, 0);
+		out_be32(&mdma->regs->dmailsa, 0);
+	} else {
+		/* MPC8308 has 16 channels and lacks some registers */
+		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
+
+		/* enable snooping */
+		out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
+		/* Disable error interrupts */
+		out_be32(&mdma->regs->dmaeeil, 0);
+
+		/* Clear interrupts status */
+		out_be32(&mdma->regs->dmaintl, 0xFFFF);
+		out_be32(&mdma->regs->dmaerrl, 0xFFFF);
+	}
 
 	/* Register DMA engine */
 	dev_set_drvdata(dev, mdma);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c064c89..1c38418 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,6 +1,7 @@
 /*
  * Topcliff PCH DMA controller driver
  * Copyright (c) 2010 Intel Corporation
+ * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -921,12 +922,19 @@
 }
 
 /* PCI Device ID of DMA device */
-#define PCI_DEVICE_ID_PCH_DMA_8CH        0x8810
-#define PCI_DEVICE_ID_PCH_DMA_4CH        0x8815
+#define PCI_VENDOR_ID_ROHM             0x10DB
+#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
+#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
+#define PCI_DEVICE_ID_ML7213_DMA1_8CH	0x8026
+#define PCI_DEVICE_ID_ML7213_DMA2_8CH	0x802B
+#define PCI_DEVICE_ID_ML7213_DMA3_4CH	0x8034
 
 static const struct pci_device_id pch_dma_id_table[] = {
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
+	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
+	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
+	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
 	{ 0, },
 };
 
@@ -954,6 +962,7 @@
 module_init(pch_dma_init);
 module_exit(pch_dma_exit);
 
-MODULE_DESCRIPTION("Topcliff PCH DMA controller driver");
+MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+		   "DMA controller driver");
 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index fab68a5..6e1d46a 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,5 +1,6 @@
 /*
- * Copyright (C) ST-Ericsson SA 2007-2010
+ * Copyright (C) Ericsson AB 2007-2008
+ * Copyright (C) ST-Ericsson SA 2008-2010
  * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  * License terms: GNU General Public License (GPL) version 2
@@ -554,8 +555,66 @@
 	return d;
 }
 
-/* Support functions for logical channels */
+static int d40_psize_2_burst_size(bool is_log, int psize)
+{
+	if (is_log) {
+		if (psize == STEDMA40_PSIZE_LOG_1)
+			return 1;
+	} else {
+		if (psize == STEDMA40_PSIZE_PHY_1)
+			return 1;
+	}
 
+	return 2 << psize;
+}
+
+/*
+ * The dma only supports transmitting packages up to
+ * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
+ * dma elements required to send the entire sg list
+ */
+static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
+{
+	int dmalen;
+	u32 max_w = max(data_width1, data_width2);
+	u32 min_w = min(data_width1, data_width2);
+	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+
+	if (seg_max > STEDMA40_MAX_SEG_SIZE)
+		seg_max -= (1 << max_w);
+
+	if (!IS_ALIGNED(size, 1 << max_w))
+		return -EINVAL;
+
+	if (size <= seg_max)
+		dmalen = 1;
+	else {
+		dmalen = size / seg_max;
+		if (dmalen * seg_max < size)
+			dmalen++;
+	}
+	return dmalen;
+}
+
+static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
+			   u32 data_width1, u32 data_width2)
+{
+	struct scatterlist *sg;
+	int i;
+	int len = 0;
+	int ret;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		ret = d40_size_2_dmalen(sg_dma_len(sg),
+					data_width1, data_width2);
+		if (ret < 0)
+			return ret;
+		len += ret;
+	}
+	return len;
+}
+
+/* Support functions for logical channels */
 
 static int d40_channel_execute_command(struct d40_chan *d40c,
 				       enum d40_command command)
@@ -1241,6 +1300,21 @@
 		res = -EINVAL;
 	}
 
+	if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
+	    (1 << conf->src_info.data_width) !=
+	    d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
+	    (1 << conf->dst_info.data_width)) {
+		/*
+		 * The DMAC hardware only supports
+		 * src (burst x width) == dst (burst x width)
+		 */
+
+		dev_err(&d40c->chan.dev->device,
+			"[%s] src (burst x width) != dst (burst x width)\n",
+			__func__);
+		res = -EINVAL;
+	}
+
 	return res;
 }
 
@@ -1638,13 +1712,21 @@
 	if (d40d == NULL)
 		goto err;
 
-	d40d->lli_len = sgl_len;
+	d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
+					d40c->dma_cfg.src_info.data_width,
+					d40c->dma_cfg.dst_info.data_width);
+	if (d40d->lli_len < 0) {
+		dev_err(&d40c->chan.dev->device,
+			"[%s] Unaligned size\n", __func__);
+		goto err;
+	}
+
 	d40d->lli_current = 0;
 	d40d->txd.flags = dma_flags;
 
 	if (d40c->log_num != D40_PHY_CHAN) {
 
-		if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
+		if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
 			dev_err(&d40c->chan.dev->device,
 				"[%s] Out of memory\n", __func__);
 			goto err;
@@ -1654,15 +1736,17 @@
 					 sgl_len,
 					 d40d->lli_log.src,
 					 d40c->log_def.lcsp1,
-					 d40c->dma_cfg.src_info.data_width);
+					 d40c->dma_cfg.src_info.data_width,
+					 d40c->dma_cfg.dst_info.data_width);
 
 		(void) d40_log_sg_to_lli(sgl_dst,
 					 sgl_len,
 					 d40d->lli_log.dst,
 					 d40c->log_def.lcsp3,
-					 d40c->dma_cfg.dst_info.data_width);
+					 d40c->dma_cfg.dst_info.data_width,
+					 d40c->dma_cfg.src_info.data_width);
 	} else {
-		if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+		if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
 			dev_err(&d40c->chan.dev->device,
 				"[%s] Out of memory\n", __func__);
 			goto err;
@@ -1675,6 +1759,7 @@
 					virt_to_phys(d40d->lli_phy.src),
 					d40c->src_def_cfg,
 					d40c->dma_cfg.src_info.data_width,
+					d40c->dma_cfg.dst_info.data_width,
 					d40c->dma_cfg.src_info.psize);
 
 		if (res < 0)
@@ -1687,6 +1772,7 @@
 					virt_to_phys(d40d->lli_phy.dst),
 					d40c->dst_def_cfg,
 					d40c->dma_cfg.dst_info.data_width,
+					d40c->dma_cfg.src_info.data_width,
 					d40c->dma_cfg.dst_info.psize);
 
 		if (res < 0)
@@ -1826,7 +1912,6 @@
 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
 					     chan);
 	unsigned long flags;
-	int err = 0;
 
 	if (d40c->phy_chan == NULL) {
 		dev_err(&d40c->chan.dev->device,
@@ -1844,6 +1929,15 @@
 	}
 
 	d40d->txd.flags = dma_flags;
+	d40d->lli_len = d40_size_2_dmalen(size,
+					  d40c->dma_cfg.src_info.data_width,
+					  d40c->dma_cfg.dst_info.data_width);
+	if (d40d->lli_len < 0) {
+		dev_err(&d40c->chan.dev->device,
+			"[%s] Unaligned size\n", __func__);
+		goto err;
+	}
+
 
 	dma_async_tx_descriptor_init(&d40d->txd, chan);
 
@@ -1851,37 +1945,40 @@
 
 	if (d40c->log_num != D40_PHY_CHAN) {
 
-		if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
+		if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
 			dev_err(&d40c->chan.dev->device,
 				"[%s] Out of memory\n", __func__);
 			goto err;
 		}
-		d40d->lli_len = 1;
 		d40d->lli_current = 0;
 
-		d40_log_fill_lli(d40d->lli_log.src,
-				 src,
-				 size,
-				 d40c->log_def.lcsp1,
-				 d40c->dma_cfg.src_info.data_width,
-				 true);
+		if (d40_log_buf_to_lli(d40d->lli_log.src,
+				       src,
+				       size,
+				       d40c->log_def.lcsp1,
+				       d40c->dma_cfg.src_info.data_width,
+				       d40c->dma_cfg.dst_info.data_width,
+				       true) == NULL)
+			goto err;
 
-		d40_log_fill_lli(d40d->lli_log.dst,
-				 dst,
-				 size,
-				 d40c->log_def.lcsp3,
-				 d40c->dma_cfg.dst_info.data_width,
-				 true);
+		if (d40_log_buf_to_lli(d40d->lli_log.dst,
+				       dst,
+				       size,
+				       d40c->log_def.lcsp3,
+				       d40c->dma_cfg.dst_info.data_width,
+				       d40c->dma_cfg.src_info.data_width,
+				       true) == NULL)
+			goto err;
 
 	} else {
 
-		if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
+		if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
 			dev_err(&d40c->chan.dev->device,
 				"[%s] Out of memory\n", __func__);
 			goto err;
 		}
 
-		err = d40_phy_fill_lli(d40d->lli_phy.src,
+		if (d40_phy_buf_to_lli(d40d->lli_phy.src,
 				       src,
 				       size,
 				       d40c->dma_cfg.src_info.psize,
@@ -1889,11 +1986,11 @@
 				       d40c->src_def_cfg,
 				       true,
 				       d40c->dma_cfg.src_info.data_width,
-				       false);
-		if (err)
-			goto err_fill_lli;
+				       d40c->dma_cfg.dst_info.data_width,
+				       false) == NULL)
+			goto err;
 
-		err = d40_phy_fill_lli(d40d->lli_phy.dst,
+		if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
 				       dst,
 				       size,
 				       d40c->dma_cfg.dst_info.psize,
@@ -1901,10 +1998,9 @@
 				       d40c->dst_def_cfg,
 				       true,
 				       d40c->dma_cfg.dst_info.data_width,
-				       false);
-
-		if (err)
-			goto err_fill_lli;
+				       d40c->dma_cfg.src_info.data_width,
+				       false) == NULL)
+			goto err;
 
 		(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
 				      d40d->lli_pool.size, DMA_TO_DEVICE);
@@ -1913,9 +2009,6 @@
 	spin_unlock_irqrestore(&d40c->lock, flags);
 	return &d40d->txd;
 
-err_fill_lli:
-	dev_err(&d40c->chan.dev->device,
-		"[%s] Failed filling in PHY LLI\n", __func__);
 err:
 	if (d40d)
 		d40_desc_free(d40c, d40d);
@@ -1945,13 +2038,21 @@
 	dma_addr_t dev_addr = 0;
 	int total_size;
 
-	if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
+	d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
+					d40c->dma_cfg.src_info.data_width,
+					d40c->dma_cfg.dst_info.data_width);
+	if (d40d->lli_len < 0) {
+		dev_err(&d40c->chan.dev->device,
+			"[%s] Unaligned size\n", __func__);
+		return -EINVAL;
+	}
+
+	if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
 		dev_err(&d40c->chan.dev->device,
 			"[%s] Out of memory\n", __func__);
 		return -ENOMEM;
 	}
 
-	d40d->lli_len = sg_len;
 	d40d->lli_current = 0;
 
 	if (direction == DMA_FROM_DEVICE)
@@ -1993,13 +2094,21 @@
 	dma_addr_t dst_dev_addr;
 	int res;
 
-	if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+	d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
+					d40c->dma_cfg.src_info.data_width,
+					d40c->dma_cfg.dst_info.data_width);
+	if (d40d->lli_len < 0) {
+		dev_err(&d40c->chan.dev->device,
+			"[%s] Unaligned size\n", __func__);
+		return -EINVAL;
+	}
+
+	if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
 		dev_err(&d40c->chan.dev->device,
 			"[%s] Out of memory\n", __func__);
 		return -ENOMEM;
 	}
 
-	d40d->lli_len = sgl_len;
 	d40d->lli_current = 0;
 
 	if (direction == DMA_FROM_DEVICE) {
@@ -2024,6 +2133,7 @@
 				virt_to_phys(d40d->lli_phy.src),
 				d40c->src_def_cfg,
 				d40c->dma_cfg.src_info.data_width,
+				d40c->dma_cfg.dst_info.data_width,
 				d40c->dma_cfg.src_info.psize);
 	if (res < 0)
 		return res;
@@ -2035,6 +2145,7 @@
 				virt_to_phys(d40d->lli_phy.dst),
 				d40c->dst_def_cfg,
 				d40c->dma_cfg.dst_info.data_width,
+				d40c->dma_cfg.src_info.data_width,
 				d40c->dma_cfg.dst_info.psize);
 	if (res < 0)
 		return res;
@@ -2244,6 +2355,8 @@
 			psize = STEDMA40_PSIZE_PHY_8;
 		else if (config_maxburst >= 4)
 			psize = STEDMA40_PSIZE_PHY_4;
+		else if (config_maxburst >= 2)
+			psize = STEDMA40_PSIZE_PHY_2;
 		else
 			psize = STEDMA40_PSIZE_PHY_1;
 	}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 8557cb8..0b096a3 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) ST-Ericsson SA 2007-2010
- * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
  * License terms: GNU General Public License (GPL) version 2
  */
@@ -122,15 +122,15 @@
 	*dst_cfg = dst;
 }
 
-int d40_phy_fill_lli(struct d40_phy_lli *lli,
-		     dma_addr_t data,
-		     u32 data_size,
-		     int psize,
-		     dma_addr_t next_lli,
-		     u32 reg_cfg,
-		     bool term_int,
-		     u32 data_width,
-		     bool is_device)
+static int d40_phy_fill_lli(struct d40_phy_lli *lli,
+			    dma_addr_t data,
+			    u32 data_size,
+			    int psize,
+			    dma_addr_t next_lli,
+			    u32 reg_cfg,
+			    bool term_int,
+			    u32 data_width,
+			    bool is_device)
 {
 	int num_elems;
 
@@ -139,13 +139,6 @@
 	else
 		num_elems = 2 << psize;
 
-	/*
-	 * Size is 16bit. data_width is 8, 16, 32 or 64 bit
-	 * Block large than 64 KiB must be split.
-	 */
-	if (data_size > (0xffff << data_width))
-		return -EINVAL;
-
 	/* Must be aligned */
 	if (!IS_ALIGNED(data, 0x1 << data_width))
 		return -EINVAL;
@@ -187,55 +180,118 @@
 	return 0;
 }
 
+static int d40_seg_size(int size, int data_width1, int data_width2)
+{
+	u32 max_w = max(data_width1, data_width2);
+	u32 min_w = min(data_width1, data_width2);
+	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+
+	if (seg_max > STEDMA40_MAX_SEG_SIZE)
+		seg_max -= (1 << max_w);
+
+	if (size <= seg_max)
+		return size;
+
+	if (size <= 2 * seg_max)
+		return ALIGN(size / 2, 1 << max_w);
+
+	return seg_max;
+}
+
+struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
+				       dma_addr_t addr,
+				       u32 size,
+				       int psize,
+				       dma_addr_t lli_phys,
+				       u32 reg_cfg,
+				       bool term_int,
+				       u32 data_width1,
+				       u32 data_width2,
+				       bool is_device)
+{
+	int err;
+	dma_addr_t next = lli_phys;
+	int size_rest = size;
+	int size_seg = 0;
+
+	do {
+		size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+		size_rest -= size_seg;
+
+		if (term_int && size_rest == 0)
+			next = 0;
+		else
+			next = ALIGN(next + sizeof(struct d40_phy_lli),
+				     D40_LLI_ALIGN);
+
+		err = d40_phy_fill_lli(lli,
+				       addr,
+				       size_seg,
+				       psize,
+				       next,
+				       reg_cfg,
+				       !next,
+				       data_width1,
+				       is_device);
+
+		if (err)
+			goto err;
+
+		lli++;
+		if (!is_device)
+			addr += size_seg;
+	} while (size_rest);
+
+	return lli;
+
+ err:
+	return NULL;
+}
+
 int d40_phy_sg_to_lli(struct scatterlist *sg,
 		      int sg_len,
 		      dma_addr_t target,
-		      struct d40_phy_lli *lli,
+		      struct d40_phy_lli *lli_sg,
 		      dma_addr_t lli_phys,
 		      u32 reg_cfg,
-		      u32 data_width,
+		      u32 data_width1,
+		      u32 data_width2,
 		      int psize)
 {
 	int total_size = 0;
 	int i;
 	struct scatterlist *current_sg = sg;
-	dma_addr_t next_lli_phys;
 	dma_addr_t dst;
-	int err = 0;
+	struct d40_phy_lli *lli = lli_sg;
+	dma_addr_t l_phys = lli_phys;
 
 	for_each_sg(sg, current_sg, sg_len, i) {
 
 		total_size += sg_dma_len(current_sg);
 
-		/* If this scatter list entry is the last one, no next link */
-		if (sg_len - 1 == i)
-			next_lli_phys = 0;
-		else
-			next_lli_phys = ALIGN(lli_phys + (i + 1) *
-					      sizeof(struct d40_phy_lli),
-					      D40_LLI_ALIGN);
-
 		if (target)
 			dst = target;
 		else
 			dst = sg_phys(current_sg);
 
-		err = d40_phy_fill_lli(&lli[i],
-				       dst,
-				       sg_dma_len(current_sg),
-				       psize,
-				       next_lli_phys,
-				       reg_cfg,
-				       !next_lli_phys,
-				       data_width,
-				       target == dst);
-		if (err)
-			goto err;
+		l_phys = ALIGN(lli_phys + (lli - lli_sg) *
+			       sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
+
+		lli = d40_phy_buf_to_lli(lli,
+					 dst,
+					 sg_dma_len(current_sg),
+					 psize,
+					 l_phys,
+					 reg_cfg,
+					 sg_len - 1 == i,
+					 data_width1,
+					 data_width2,
+					 target == dst);
+		if (lli == NULL)
+			return -EINVAL;
 	}
 
 	return total_size;
-err:
-	return err;
 }
 
 
@@ -315,17 +371,20 @@
 	writel(lli_dst->lcsp13, &lcla[1].lcsp13);
 }
 
-void d40_log_fill_lli(struct d40_log_lli *lli,
-		      dma_addr_t data, u32 data_size,
-		      u32 reg_cfg,
-		      u32 data_width,
-		      bool addr_inc)
+static void d40_log_fill_lli(struct d40_log_lli *lli,
+			     dma_addr_t data, u32 data_size,
+			     u32 reg_cfg,
+			     u32 data_width,
+			     bool addr_inc)
 {
 	lli->lcsp13 = reg_cfg;
 
 	/* The number of elements to transfer */
 	lli->lcsp02 = ((data_size >> data_width) <<
 		       D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
+
+	BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
+
 	/* 16 LSBs address of the current element */
 	lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
 	/* 16 MSBs address of the current element */
@@ -348,55 +407,94 @@
 	int total_size = 0;
 	struct scatterlist *current_sg = sg;
 	int i;
+	struct d40_log_lli *lli_src = lli->src;
+	struct d40_log_lli *lli_dst = lli->dst;
 
 	for_each_sg(sg, current_sg, sg_len, i) {
 		total_size += sg_dma_len(current_sg);
 
 		if (direction == DMA_TO_DEVICE) {
-			d40_log_fill_lli(&lli->src[i],
-					 sg_phys(current_sg),
-					 sg_dma_len(current_sg),
-					 lcsp->lcsp1, src_data_width,
-					 true);
-			d40_log_fill_lli(&lli->dst[i],
-					 dev_addr,
-					 sg_dma_len(current_sg),
-					 lcsp->lcsp3, dst_data_width,
-					 false);
+			lli_src =
+				d40_log_buf_to_lli(lli_src,
+						   sg_phys(current_sg),
+						   sg_dma_len(current_sg),
+						   lcsp->lcsp1, src_data_width,
+						   dst_data_width,
+						   true);
+			lli_dst =
+				d40_log_buf_to_lli(lli_dst,
+						   dev_addr,
+						   sg_dma_len(current_sg),
+						   lcsp->lcsp3, dst_data_width,
+						   src_data_width,
+						   false);
 		} else {
-			d40_log_fill_lli(&lli->dst[i],
-					 sg_phys(current_sg),
-					 sg_dma_len(current_sg),
-					 lcsp->lcsp3, dst_data_width,
-					 true);
-			d40_log_fill_lli(&lli->src[i],
-					 dev_addr,
-					 sg_dma_len(current_sg),
-					 lcsp->lcsp1, src_data_width,
-					 false);
+			lli_dst =
+				d40_log_buf_to_lli(lli_dst,
+						   sg_phys(current_sg),
+						   sg_dma_len(current_sg),
+						   lcsp->lcsp3, dst_data_width,
+						   src_data_width,
+						   true);
+			lli_src =
+				d40_log_buf_to_lli(lli_src,
+						   dev_addr,
+						   sg_dma_len(current_sg),
+						   lcsp->lcsp1, src_data_width,
+						   dst_data_width,
+						   false);
 		}
 	}
 	return total_size;
 }
 
+struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+				       dma_addr_t addr,
+				       int size,
+				       u32 lcsp13, /* src or dst*/
+				       u32 data_width1,
+				       u32 data_width2,
+				       bool addr_inc)
+{
+	struct d40_log_lli *lli = lli_sg;
+	int size_rest = size;
+	int size_seg = 0;
+
+	do {
+		size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+		size_rest -= size_seg;
+
+		d40_log_fill_lli(lli,
+				 addr,
+				 size_seg,
+				 lcsp13, data_width1,
+				 addr_inc);
+		if (addr_inc)
+			addr += size_seg;
+		lli++;
+	} while (size_rest);
+
+	return lli;
+}
+
 int d40_log_sg_to_lli(struct scatterlist *sg,
 		      int sg_len,
 		      struct d40_log_lli *lli_sg,
 		      u32 lcsp13, /* src or dst*/
-		      u32 data_width)
+		      u32 data_width1, u32 data_width2)
 {
 	int total_size = 0;
 	struct scatterlist *current_sg = sg;
 	int i;
+	struct d40_log_lli *lli = lli_sg;
 
 	for_each_sg(sg, current_sg, sg_len, i) {
 		total_size += sg_dma_len(current_sg);
-
-		d40_log_fill_lli(&lli_sg[i],
-				 sg_phys(current_sg),
-				 sg_dma_len(current_sg),
-				 lcsp13, data_width,
-				 true);
+		lli = d40_log_buf_to_lli(lli,
+					 sg_phys(current_sg),
+					 sg_dma_len(current_sg),
+					 lcsp13,
+					 data_width1, data_width2, true);
 	}
 	return total_size;
 }
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9e419b9..9cc4349 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -292,18 +292,20 @@
 		      struct d40_phy_lli *lli,
 		      dma_addr_t lli_phys,
 		      u32 reg_cfg,
-		      u32 data_width,
+		      u32 data_width1,
+		      u32 data_width2,
 		      int psize);
 
-int d40_phy_fill_lli(struct d40_phy_lli *lli,
-		     dma_addr_t data,
-		     u32 data_size,
-		     int psize,
-		     dma_addr_t next_lli,
-		     u32 reg_cfg,
-		     bool term_int,
-		     u32 data_width,
-		     bool is_device);
+struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
+				       dma_addr_t data,
+				       u32 data_size,
+				       int psize,
+				       dma_addr_t next_lli,
+				       u32 reg_cfg,
+				       bool term_int,
+				       u32 data_width1,
+				       u32 data_width2,
+				       bool is_device);
 
 void d40_phy_lli_write(void __iomem *virtbase,
 		       u32 phy_chan_num,
@@ -312,12 +314,12 @@
 
 /* Logical channels */
 
-void d40_log_fill_lli(struct d40_log_lli *lli,
-		      dma_addr_t data,
-		      u32 data_size,
-		      u32 reg_cfg,
-		      u32 data_width,
-		      bool addr_inc);
+struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+				       dma_addr_t addr,
+				       int size,
+				       u32 lcsp13, /* src or dst*/
+				       u32 data_width1, u32 data_width2,
+				       bool addr_inc);
 
 int d40_log_sg_to_dev(struct scatterlist *sg,
 		      int sg_len,
@@ -332,7 +334,7 @@
 		      int sg_len,
 		      struct d40_log_lli *lli_sg,
 		      u32 lcsp13, /* src or dst*/
-		      u32 data_width);
+		      u32 data_width1, u32 data_width2);
 
 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
 			    struct d40_log_lli *lli_dst,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4a5ecc5..23e0355 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -826,8 +826,6 @@
 /* Display and decode various NB registers for debug purposes. */
 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
 {
-	int ganged;
-
 	debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
 
 	debugf1("  NB two channel DRAM capable: %s\n",
@@ -851,28 +849,19 @@
 	debugf1("  DramHoleValid: %s\n",
 		(pvt->dhar & DHAR_VALID) ? "yes" : "no");
 
+	amd64_debug_display_dimm_sizes(0, pvt);
+
 	/* everything below this point is Fam10h and above */
-	if (boot_cpu_data.x86 == 0xf) {
-		amd64_debug_display_dimm_sizes(0, pvt);
+	if (boot_cpu_data.x86 == 0xf)
 		return;
-	}
+
+	amd64_debug_display_dimm_sizes(1, pvt);
 
 	amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
 
 	/* Only if NOT ganged does dclr1 have valid info */
 	if (!dct_ganging_enabled(pvt))
 		amd64_dump_dramcfg_low(pvt->dclr1, 1);
-
-	/*
-	 * Determine if ganged and then dump memory sizes for first controller,
-	 * and if NOT ganged dump info for 2nd controller.
-	 */
-	ganged = dct_ganging_enabled(pvt);
-
-	amd64_debug_display_dimm_sizes(0, pvt);
-
-	if (!ganged)
-		amd64_debug_display_dimm_sizes(1, pvt);
 }
 
 /* Read in both of DBAM registers */
@@ -1644,11 +1633,10 @@
 		       WARN_ON(ctrl != 0);
 	}
 
-	debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
-		ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
+	dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
+	dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
 
-	dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
-	dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+	debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
 
 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
 
diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h
index 60e0d1c..6f8b071 100644
--- a/drivers/edac/amd8131_edac.h
+++ b/drivers/edac/amd8131_edac.h
@@ -99,7 +99,7 @@
 
 /*
  * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
- * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are
+ * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are
  * four PCIX Bridges on ATCA-6101 altogether.
  *
  * These PCIX Bridges share the same PCI Device ID and are all of
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index c973004..db1df59 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -47,7 +47,7 @@
 	offset = address & ~PAGE_MASK;
 	syndrome = (ar & 0x000000001fe00000ul) >> 21;
 
-	/* TODO: Decoding of the error addresss */
+	/* TODO: Decoding of the error address */
 	edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
 			  syndrome, 0, chan, "");
 }
@@ -68,7 +68,7 @@
 	pfn = address >> PAGE_SHIFT;
 	offset = address & ~PAGE_MASK;
 
-	/* TODO: Decoding of the error addresss */
+	/* TODO: Decoding of the error address */
 	edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
 }
 
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index ff1eb7b..3d96534 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -259,7 +259,7 @@
  *			for single channel are 64 bits, for dual channel 128
  *			bits.
  *
- * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memmory.
+ * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memory.
  *			Motherboards commonly drive two chip-select pins to
  *			a memory stick. A single-ranked stick, will occupy
  *			only one of those rows. The other will be unused.
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 362861c..81154ab 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1,6 +1,6 @@
 /* Intel i7 core/Nehalem Memory Controller kernel module
  *
- * This driver supports yhe memory controllers found on the Intel
+ * This driver supports the memory controllers found on the Intel
  * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
  * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
  * and Westmere-EP.
@@ -1271,7 +1271,7 @@
 	int i;
 
 	/*
-	 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
+	 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
 	 * aren't announced by acpi. So, we need to use a legacy scan probing
 	 * to detect them
 	 */
@@ -1864,7 +1864,7 @@
 	if (mce->mcgstatus & 1)
 		i7core_check_error(mci);
 
-	/* Advice mcelog that the error were handled */
+	/* Advise mcelog that the errors were handled */
 	return 1;
 }
 
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 070cea4..b9f0c20 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -873,7 +873,7 @@
 }
 
 /**
- * ppc4xx_edac_init_csrows - intialize driver instance rows
+ * ppc4xx_edac_init_csrows - initialize driver instance rows
  * @mci: A pointer to the EDAC memory controller instance
  *       associated with the ibm,sdram-4xx-ddr2 controller for which
  *       the csrows (i.e. banks/ranks) are being initialized.
@@ -881,7 +881,7 @@
  *          currently set for the controller, from which bank width
  *          and memory typ information is derived.
  *
- * This routine intializes the virtual "chip select rows" associated
+ * This routine initializes the virtual "chip select rows" associated
  * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
  * controller bank/rank is mapped to a row.
  *
@@ -992,7 +992,7 @@
 }
 
 /**
- * ppc4xx_edac_mc_init - intialize driver instance
+ * ppc4xx_edac_mc_init - initialize driver instance
  * @mci: A pointer to the EDAC memory controller instance being
  *       initialized.
  * @op: A pointer to the OpenFirmware device tree node associated
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 68f942c..0c56989 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -49,15 +49,13 @@
 	  configuration section.
 
 config FIREWIRE_NET
-	tristate "IP networking over 1394 (EXPERIMENTAL)"
-	depends on FIREWIRE && INET && EXPERIMENTAL
+	tristate "IP networking over 1394"
+	depends on FIREWIRE && INET
 	help
 	  This enables IPv4 over IEEE 1394, providing IP connectivity with
 	  other implementations of RFC 2734 as found on several operating
 	  systems.  Multicast support is currently limited.
 
-	  NOTE, this driver is not stable yet!
-
 	  To compile this driver as a module, say M here:  The module will be
 	  called firewire-net.
 
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index be04923..24ff355 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -75,6 +75,8 @@
 #define BIB_IRMC		((1) << 31)
 #define NODE_CAPABILITIES	0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
 
+#define CANON_OUI		0x000085
+
 static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
 {
 	struct fw_descriptor *desc;
@@ -284,6 +286,7 @@
 	bool root_device_is_running;
 	bool root_device_is_cmc;
 	bool irm_is_1394_1995_only;
+	bool keep_this_irm;
 
 	spin_lock_irq(&card->lock);
 
@@ -305,6 +308,10 @@
 	irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
 			(irm_device->config_rom[2] & 0x000000f0) == 0;
 
+	/* Canon MV5i works unreliably if it is not root node. */
+	keep_this_irm = irm_device && irm_device->config_rom &&
+			irm_device->config_rom[3] >> 8 == CANON_OUI;
+
 	root_id  = root_node->node_id;
 	irm_id   = card->irm_node->node_id;
 	local_id = card->local_node->node_id;
@@ -333,7 +340,7 @@
 			goto pick_me;
 		}
 
-		if (irm_is_1394_1995_only) {
+		if (irm_is_1394_1995_only && !keep_this_irm) {
 			new_root_id = local_id;
 			fw_notify("%s, making local node (%02x) root.\n",
 				  "IRM is not 1394a compliant", new_root_id);
@@ -382,7 +389,7 @@
 
 		spin_lock_irq(&card->lock);
 
-		if (rcode != RCODE_COMPLETE) {
+		if (rcode != RCODE_COMPLETE && !keep_this_irm) {
 			/*
 			 * The lock request failed, maybe the IRM
 			 * isn't really IRM capable after all. Let's
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index c2e194c..7ed08fd1 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -191,6 +191,7 @@
 	struct fwnet_device *dev;
 	u64 guid;
 	u64 fifo;
+	__be32 ip;
 
 	/* guarded by dev->lock */
 	struct list_head pd_list; /* received partial datagrams */
@@ -570,6 +571,8 @@
 				peer->speed = sspd;
 			if (peer->max_payload > max_payload)
 				peer->max_payload = max_payload;
+
+			peer->ip = arp1394->sip;
 		}
 		spin_unlock_irqrestore(&dev->lock, flags);
 
@@ -1470,6 +1473,7 @@
 	peer->dev = dev;
 	peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
 	peer->fifo = FWNET_NO_FIFO_ADDR;
+	peer->ip = 0;
 	INIT_LIST_HEAD(&peer->pd_list);
 	peer->pdg_size = 0;
 	peer->datagram_label = 0;
@@ -1589,10 +1593,13 @@
 
 	mutex_lock(&fwnet_device_mutex);
 
+	net = dev->netdev;
+	if (net && peer->ip)
+		arp_invalidate(net, peer->ip);
+
 	fwnet_remove_peer(peer, dev);
 
 	if (list_empty(&dev->peer_list)) {
-		net = dev->netdev;
 		unregister_netdev(net);
 
 		if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index d77d120..bd3c61b 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -961,7 +961,7 @@
 	for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
 		pages[AR_BUFFERS + i] = ctx->pages[i];
 	ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
-				 -1, PAGE_KERNEL_RO);
+				 -1, PAGE_KERNEL);
 	if (!ctx->buffer)
 		goto out_of_memory;
 
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index e8b6a13..e710424 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -27,7 +27,7 @@
 	  using the kernel parameter 'edd={on|skipmbr|off}'.
 
 config FIRMWARE_MEMMAP
-    bool "Add firmware-provided memory map to sysfs" if EMBEDDED
+    bool "Add firmware-provided memory map to sysfs" if EXPERT
     default X86
     help
       Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index e28e41668..bcb1126 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -378,10 +378,17 @@
 
 static void __init dmi_dump_ids(void)
 {
+	const char *board;	/* Board Name is optional */
+
 	printk(KERN_DEBUG "DMI: ");
-	print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
-	printk(KERN_CONT "/");
+	print_filtered(dmi_get_system_info(DMI_SYS_VENDOR));
+	printk(KERN_CONT " ");
 	print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
+	board = dmi_get_system_info(DMI_BOARD_NAME);
+	if (board) {
+		printk(KERN_CONT "/");
+		print_filtered(board);
+	}
 	printk(KERN_CONT ", BIOS ");
 	print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
 	printk(KERN_CONT " ");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 082495b..664660e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -118,7 +118,7 @@
 
 config GPIO_VX855
 	tristate "VIA VX855/VX875 GPIO"
-	depends on GPIOLIB
+	depends on GPIOLIB && MFD_SUPPORT && PCI
 	select MFD_CORE
 	select MFD_VX855
 	help
@@ -295,7 +295,7 @@
 
 config GPIO_CS5535
 	tristate "AMD CS5535/CS5536 GPIO support"
-	depends on PCI && !CS5535_GPIO
+	depends on PCI && X86 && !CS5535_GPIO
 	help
 	  The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
 	  can be used for quite a number of things.  The CS5535/6 is found on
@@ -333,6 +333,15 @@
 	  which is an IOH(Input/Output Hub) for x86 embedded processor.
 	  This driver can access PCH GPIO device.
 
+config GPIO_ML_IOH
+	tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
+	depends on PCI
+	help
+	  ML7213 is companion chip for Intel Atom E6xx series.
+	  This driver can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/Output
+	  Hub) which is for IVI(In-Vehicle Infotainment) use.
+	  This driver can access the IOH's GPIO device.
+
 config GPIO_TIMBERDALE
 	bool "Support for timberdale GPIO IP"
 	depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
@@ -342,6 +351,7 @@
 config GPIO_RDC321X
 	tristate "RDC R-321x GPIO support"
 	depends on PCI && GPIOLIB
+	select MFD_SUPPORT
 	select MFD_CORE
 	select MFD_RDC321X
 	help
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 39bfd7a..3351cf8 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -41,3 +41,4 @@
 obj-$(CONFIG_GPIO_JANZ_TTL)	+= janz-ttl.o
 obj-$(CONFIG_GPIO_SX150X)	+= sx150x.o
 obj-$(CONFIG_GPIO_VX855)	+= vx855_gpio.o
+obj-$(CONFIG_GPIO_ML_IOH)	+= ml_ioh_gpio.o
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
index 0871f78..33fc685 100644
--- a/drivers/gpio/adp5588-gpio.c
+++ b/drivers/gpio/adp5588-gpio.c
@@ -146,9 +146,10 @@
 	return dev->irq_base + off;
 }
 
-static void adp5588_irq_bus_lock(unsigned int irq)
+static void adp5588_irq_bus_lock(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+
 	mutex_lock(&dev->irq_lock);
 }
 
@@ -160,9 +161,9 @@
   * and unlocks the bus.
   */
 
-static void adp5588_irq_bus_sync_unlock(unsigned int irq)
+static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
 	int i;
 
 	for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
@@ -175,31 +176,31 @@
 	mutex_unlock(&dev->irq_lock);
 }
 
-static void adp5588_irq_mask(unsigned int irq)
+static void adp5588_irq_mask(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
-	unsigned gpio = irq - dev->irq_base;
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+	unsigned gpio = d->irq - dev->irq_base;
 
 	dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio);
 }
 
-static void adp5588_irq_unmask(unsigned int irq)
+static void adp5588_irq_unmask(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
-	unsigned gpio = irq - dev->irq_base;
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+	unsigned gpio = d->irq - dev->irq_base;
 
 	dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio);
 }
 
-static int adp5588_irq_set_type(unsigned int irq, unsigned int type)
+static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
-	uint16_t gpio = irq - dev->irq_base;
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+	uint16_t gpio = d->irq - dev->irq_base;
 	unsigned bank, bit;
 
 	if ((type & IRQ_TYPE_EDGE_BOTH)) {
 		dev_err(&dev->client->dev, "irq %d: unsupported type %d\n",
-			irq, type);
+			d->irq, type);
 		return -EINVAL;
 	}
 
@@ -222,11 +223,11 @@
 
 static struct irq_chip adp5588_irq_chip = {
 	.name			= "adp5588",
-	.mask			= adp5588_irq_mask,
-	.unmask			= adp5588_irq_unmask,
-	.bus_lock		= adp5588_irq_bus_lock,
-	.bus_sync_unlock	= adp5588_irq_bus_sync_unlock,
-	.set_type		= adp5588_irq_set_type,
+	.irq_mask		= adp5588_irq_mask,
+	.irq_unmask		= adp5588_irq_unmask,
+	.irq_bus_lock		= adp5588_irq_bus_lock,
+	.irq_bus_sync_unlock	= adp5588_irq_bus_sync_unlock,
+	.irq_set_type		= adp5588_irq_set_type,
 };
 
 static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf)
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index d3e55a0..0d05ea7 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -11,13 +11,13 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
-#include <linux/pci.h>
+#include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
 #include <linux/cs5535.h>
+#include <asm/msr.h>
 
 #define DRV_NAME "cs5535-gpio"
-#define GPIO_BAR 1
 
 /*
  * Some GPIO pins
@@ -46,7 +46,7 @@
 	struct gpio_chip chip;
 	resource_size_t base;
 
-	struct pci_dev *pdev;
+	struct platform_device *pdev;
 	spinlock_t lock;
 } cs5535_gpio_chip;
 
@@ -144,6 +144,57 @@
 }
 EXPORT_SYMBOL_GPL(cs5535_gpio_isset);
 
+int cs5535_gpio_set_irq(unsigned group, unsigned irq)
+{
+	uint32_t lo, hi;
+
+	if (group > 7 || irq > 15)
+		return -EINVAL;
+
+	rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+
+	lo &= ~(0xF << (group * 4));
+	lo |= (irq & 0xF) << (group * 4);
+
+	wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_set_irq);
+
+void cs5535_gpio_setup_event(unsigned offset, int pair, int pme)
+{
+	struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+	uint32_t shift = (offset % 8) * 4;
+	unsigned long flags;
+	uint32_t val;
+
+	if (offset >= 24)
+		offset = GPIO_MAP_W;
+	else if (offset >= 16)
+		offset = GPIO_MAP_Z;
+	else if (offset >= 8)
+		offset = GPIO_MAP_Y;
+	else
+		offset = GPIO_MAP_X;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	val = inl(chip->base + offset);
+
+	/* Clear whatever was there before */
+	val &= ~(0xF << shift);
+
+	/* Set the new value */
+	val |= ((pair & 7) << shift);
+
+	/* Set the PME bit if this is a PME event */
+	if (pme)
+		val |= (1 << (shift + 3));
+
+	outl(val, chip->base + offset);
+	spin_unlock_irqrestore(&chip->lock, flags);
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_setup_event);
+
 /*
  * Generic gpio_chip API support.
  */
@@ -249,10 +300,10 @@
 	},
 };
 
-static int __init cs5535_gpio_probe(struct pci_dev *pdev,
-		const struct pci_device_id *pci_id)
+static int __devinit cs5535_gpio_probe(struct platform_device *pdev)
 {
-	int err;
+	struct resource *res;
+	int err = -EIO;
 	ulong mask_orig = mask;
 
 	/* There are two ways to get the GPIO base address; one is by
@@ -262,25 +313,23 @@
 	 * it turns out to be unreliable in the face of crappy BIOSes, we
 	 * can always go back to using MSRs.. */
 
-	err = pci_enable_device_io(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "can't enable device IO\n");
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't fetch device resource info\n");
 		goto done;
 	}
 
-	err = pci_request_region(pdev, GPIO_BAR, DRV_NAME);
-	if (err) {
-		dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR);
+	if (!request_region(res->start, resource_size(res), pdev->name)) {
+		dev_err(&pdev->dev, "can't request region\n");
 		goto done;
 	}
 
 	/* set up the driver-specific struct */
-	cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR);
+	cs5535_gpio_chip.base = res->start;
 	cs5535_gpio_chip.pdev = pdev;
 	spin_lock_init(&cs5535_gpio_chip.lock);
 
-	dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR,
-			(unsigned long long) cs5535_gpio_chip.base);
+	dev_info(&pdev->dev, "reserved resource region %pR\n", res);
 
 	/* mask out reserved pins */
 	mask &= 0x1F7FFFFF;
@@ -298,78 +347,49 @@
 	if (err)
 		goto release_region;
 
-	dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n");
+	dev_info(&pdev->dev, "GPIO support successfully loaded.\n");
 	return 0;
 
 release_region:
-	pci_release_region(pdev, GPIO_BAR);
+	release_region(res->start, resource_size(res));
 done:
 	return err;
 }
 
-static void __exit cs5535_gpio_remove(struct pci_dev *pdev)
+static int __devexit cs5535_gpio_remove(struct platform_device *pdev)
 {
+	struct resource *r;
 	int err;
 
 	err = gpiochip_remove(&cs5535_gpio_chip.chip);
 	if (err) {
 		/* uhh? */
 		dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
+		return err;
 	}
-	pci_release_region(pdev, GPIO_BAR);
+
+	r = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	release_region(r->start, resource_size(r));
+	return 0;
 }
 
-static struct pci_device_id cs5535_gpio_pci_tbl[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
-	{ 0, },
+static struct platform_driver cs5535_gpio_drv = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = cs5535_gpio_probe,
+	.remove = __devexit_p(cs5535_gpio_remove),
 };
-MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl);
-
-/*
- * We can't use the standard PCI driver registration stuff here, since
- * that allows only one driver to bind to each PCI device (and we want
- * multiple drivers to be able to bind to the device).  Instead, manually
- * scan for the PCI device, request a single region, and keep track of the
- * devices that we're using.
- */
-
-static int __init cs5535_gpio_scan_pci(void)
-{
-	struct pci_dev *pdev;
-	int err = -ENODEV;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) {
-		pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor,
-				cs5535_gpio_pci_tbl[i].device, NULL);
-		if (pdev) {
-			err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]);
-			if (err)
-				pci_dev_put(pdev);
-
-			/* we only support a single CS5535/6 southbridge */
-			break;
-		}
-	}
-
-	return err;
-}
-
-static void __exit cs5535_gpio_free_pci(void)
-{
-	cs5535_gpio_remove(cs5535_gpio_chip.pdev);
-	pci_dev_put(cs5535_gpio_chip.pdev);
-}
 
 static int __init cs5535_gpio_init(void)
 {
-	return cs5535_gpio_scan_pci();
+	return platform_driver_register(&cs5535_gpio_drv);
 }
 
 static void __exit cs5535_gpio_exit(void)
 {
-	cs5535_gpio_free_pci();
+	platform_driver_unregister(&cs5535_gpio_drv);
 }
 
 module_init(cs5535_gpio_init);
@@ -378,3 +398,4 @@
 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 64db9dc..54d70a4 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -134,10 +134,10 @@
 	return lnw->irq_base + offset;
 }
 
-static int lnw_irq_type(unsigned irq, unsigned type)
+static int lnw_irq_type(struct irq_data *d, unsigned type)
 {
-	struct lnw_gpio *lnw = get_irq_chip_data(irq);
-	u32 gpio = irq - lnw->irq_base;
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = d->irq - lnw->irq_base;
 	unsigned long flags;
 	u32 value;
 	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
@@ -162,19 +162,19 @@
 	return 0;
 }
 
-static void lnw_irq_unmask(unsigned irq)
+static void lnw_irq_unmask(struct irq_data *d)
 {
 }
 
-static void lnw_irq_mask(unsigned irq)
+static void lnw_irq_mask(struct irq_data *d)
 {
 }
 
 static struct irq_chip lnw_irqchip = {
 	.name		= "LNW-GPIO",
-	.mask		= lnw_irq_mask,
-	.unmask		= lnw_irq_unmask,
-	.set_type	= lnw_irq_type,
+	.irq_mask	= lnw_irq_mask,
+	.irq_unmask	= lnw_irq_unmask,
+	.irq_set_type	= lnw_irq_type,
 };
 
 static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = {   /* pin number */
@@ -187,7 +187,7 @@
 
 static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
+	struct lnw_gpio *lnw = get_irq_data(irq);
 	u32 base, gpio;
 	void __iomem *gedr;
 	u32 gedr_v;
@@ -206,7 +206,12 @@
 		/* clear the edge detect status bit */
 		writel(gedr_v, gedr);
 	}
-	desc->chip->eoi(irq);
+
+	if (desc->chip->irq_eoi)
+		desc->chip->irq_eoi(irq_get_irq_data(irq));
+	else
+		dev_warn(lnw->chip.dev, "missing EOI handler for irq %d\n", irq);
+
 }
 
 static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index 9cad60f..9e1d01f 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -327,40 +327,40 @@
 	return chip->irq_base + off;
 }
 
-static void max732x_irq_mask(unsigned int irq)
+static void max732x_irq_mask(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
+	chip->irq_mask_cur &= ~(1 << (d->irq - chip->irq_base));
 }
 
-static void max732x_irq_unmask(unsigned int irq)
+static void max732x_irq_unmask(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
+	chip->irq_mask_cur |= 1 << (d->irq - chip->irq_base);
 }
 
-static void max732x_irq_bus_lock(unsigned int irq)
+static void max732x_irq_bus_lock(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&chip->irq_lock);
 	chip->irq_mask_cur = chip->irq_mask;
 }
 
-static void max732x_irq_bus_sync_unlock(unsigned int irq)
+static void max732x_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
 	max732x_irq_update_mask(chip);
 	mutex_unlock(&chip->irq_lock);
 }
 
-static int max732x_irq_set_type(unsigned int irq, unsigned int type)
+static int max732x_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
-	uint16_t off = irq - chip->irq_base;
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+	uint16_t off = d->irq - chip->irq_base;
 	uint16_t mask = 1 << off;
 
 	if (!(mask & chip->dir_input)) {
@@ -371,7 +371,7 @@
 
 	if (!(type & IRQ_TYPE_EDGE_BOTH)) {
 		dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
-			irq, type);
+			d->irq, type);
 		return -EINVAL;
 	}
 
@@ -390,11 +390,11 @@
 
 static struct irq_chip max732x_irq_chip = {
 	.name			= "max732x",
-	.mask			= max732x_irq_mask,
-	.unmask			= max732x_irq_unmask,
-	.bus_lock		= max732x_irq_bus_lock,
-	.bus_sync_unlock	= max732x_irq_bus_sync_unlock,
-	.set_type		= max732x_irq_set_type,
+	.irq_mask		= max732x_irq_mask,
+	.irq_unmask		= max732x_irq_unmask,
+	.irq_bus_lock		= max732x_irq_bus_lock,
+	.irq_bus_sync_unlock	= max732x_irq_bus_sync_unlock,
+	.irq_set_type		= max732x_irq_set_type,
 };
 
 static uint8_t max732x_irq_pending(struct max732x_chip *chip)
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
new file mode 100644
index 0000000..7f6f01a
--- /dev/null
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+
+#define PCI_VENDOR_ID_ROHM             0x10DB
+
+struct ioh_reg_comn {
+	u32	ien;
+	u32	istatus;
+	u32	idisp;
+	u32	iclr;
+	u32	imask;
+	u32	imaskclr;
+	u32	po;
+	u32	pi;
+	u32	pm;
+	u32	im_0;
+	u32	im_1;
+	u32	reserved;
+};
+
+struct ioh_regs {
+	struct ioh_reg_comn regs[8];
+	u32 reserve1[16];
+	u32 ioh_sel_reg[4];
+	u32 reserve2[11];
+	u32 srst;
+};
+
+/**
+ * struct ioh_gpio_reg_data - The register store data.
+ * @po_reg:	To store contents of PO register.
+ * @pm_reg:	To store contents of PM register.
+ */
+struct ioh_gpio_reg_data {
+	u32 po_reg;
+	u32 pm_reg;
+};
+
+/**
+ * struct ioh_gpio - GPIO private data structure.
+ * @base:			PCI base address of Memory mapped I/O register.
+ * @reg:			Memory mapped IOH GPIO register list.
+ * @dev:			Pointer to device structure.
+ * @gpio:			Data for GPIO infrastructure.
+ * @ioh_gpio_reg:		Memory mapped Register data is saved here
+ *				when suspend.
+ * @ch:				Indicate GPIO channel
+ */
+struct ioh_gpio {
+	void __iomem *base;
+	struct ioh_regs __iomem *reg;
+	struct device *dev;
+	struct gpio_chip gpio;
+	struct ioh_gpio_reg_data ioh_gpio_reg;
+	struct mutex lock;
+	int ch;
+};
+
+static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12};
+
+static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+	u32 reg_val;
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+
+	mutex_lock(&chip->lock);
+	reg_val = ioread32(&chip->reg->regs[chip->ch].po);
+	if (val)
+		reg_val |= (1 << nr);
+	else
+		reg_val &= ~(1 << nr);
+
+	iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
+	mutex_unlock(&chip->lock);
+}
+
+static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+
+	return ioread32(&chip->reg->regs[chip->ch].pi) & (1 << nr);
+}
+
+static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+				     int val)
+{
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+	u32 pm;
+	u32 reg_val;
+
+	mutex_lock(&chip->lock);
+	pm = ioread32(&chip->reg->regs[chip->ch].pm) &
+					((1 << num_ports[chip->ch]) - 1);
+	pm |= (1 << nr);
+	iowrite32(pm, &chip->reg->regs[chip->ch].pm);
+
+	reg_val = ioread32(&chip->reg->regs[chip->ch].po);
+	if (val)
+		reg_val |= (1 << nr);
+	else
+		reg_val &= ~(1 << nr);
+
+	mutex_unlock(&chip->lock);
+
+	return 0;
+}
+
+static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+	u32 pm;
+
+	mutex_lock(&chip->lock);
+	pm = ioread32(&chip->reg->regs[chip->ch].pm) &
+				((1 << num_ports[chip->ch]) - 1);
+	pm &= ~(1 << nr);
+	iowrite32(pm, &chip->reg->regs[chip->ch].pm);
+	mutex_unlock(&chip->lock);
+
+	return 0;
+}
+
+/*
+ * Save register configuration and disable interrupts.
+ */
+static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
+{
+	chip->ioh_gpio_reg.po_reg = ioread32(&chip->reg->regs[chip->ch].po);
+	chip->ioh_gpio_reg.pm_reg = ioread32(&chip->reg->regs[chip->ch].pm);
+}
+
+/*
+ * This function restores the register configuration of the GPIO device.
+ */
+static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
+{
+	/* to store contents of PO register */
+	iowrite32(chip->ioh_gpio_reg.po_reg, &chip->reg->regs[chip->ch].po);
+	/* to store contents of PM register */
+	iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm);
+}
+
+static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
+{
+	struct gpio_chip *gpio = &chip->gpio;
+
+	gpio->label = dev_name(chip->dev);
+	gpio->owner = THIS_MODULE;
+	gpio->direction_input = ioh_gpio_direction_input;
+	gpio->get = ioh_gpio_get;
+	gpio->direction_output = ioh_gpio_direction_output;
+	gpio->set = ioh_gpio_set;
+	gpio->dbg_show = NULL;
+	gpio->base = -1;
+	gpio->ngpio = num_port;
+	gpio->can_sleep = 0;
+}
+
+static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
+				    const struct pci_device_id *id)
+{
+	int ret;
+	int i;
+	struct ioh_gpio *chip;
+	void __iomem *base;
+	void __iomem *chip_save;
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "%s : pci_enable_device failed", __func__);
+		goto err_pci_enable;
+	}
+
+	ret = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_request_regions failed-%d", ret);
+		goto err_request_regions;
+	}
+
+	base = pci_iomap(pdev, 1, 0);
+	if (base == 0) {
+		dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
+		ret = -ENOMEM;
+		goto err_iomap;
+	}
+
+	chip_save = kzalloc(sizeof(*chip) * 8, GFP_KERNEL);
+	if (chip_save == NULL) {
+		dev_err(&pdev->dev, "%s : kzalloc failed", __func__);
+		ret = -ENOMEM;
+		goto err_kzalloc;
+	}
+
+	chip = chip_save;
+	for (i = 0; i < 8; i++, chip++) {
+		chip->dev = &pdev->dev;
+		chip->base = base;
+		chip->reg = chip->base;
+		chip->ch = i;
+		mutex_init(&chip->lock);
+		ioh_gpio_setup(chip, num_ports[i]);
+		ret = gpiochip_add(&chip->gpio);
+		if (ret) {
+			dev_err(&pdev->dev, "IOH gpio: Failed to register GPIO\n");
+			goto err_gpiochip_add;
+		}
+	}
+
+	chip = chip_save;
+	pci_set_drvdata(pdev, chip);
+
+	return 0;
+
+err_gpiochip_add:
+	for (; i != 0; i--) {
+		chip--;
+		ret = gpiochip_remove(&chip->gpio);
+		if (ret)
+			dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i);
+	}
+	kfree(chip_save);
+
+err_kzalloc:
+	pci_iounmap(pdev, base);
+
+err_iomap:
+	pci_release_regions(pdev);
+
+err_request_regions:
+	pci_disable_device(pdev);
+
+err_pci_enable:
+
+	dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret);
+	return ret;
+}
+
+static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
+{
+	int err;
+	int i;
+	struct ioh_gpio *chip = pci_get_drvdata(pdev);
+	void __iomem *chip_save;
+
+	chip_save = chip;
+	for (i = 0; i < 8; i++, chip++) {
+		err = gpiochip_remove(&chip->gpio);
+		if (err)
+			dev_err(&pdev->dev, "Failed gpiochip_remove\n");
+	}
+
+	chip = chip_save;
+	pci_iounmap(pdev, chip->base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	kfree(chip);
+}
+
+#ifdef CONFIG_PM
+static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	s32 ret;
+	struct ioh_gpio *chip = pci_get_drvdata(pdev);
+
+	ioh_gpio_save_reg_conf(chip);
+	ioh_gpio_restore_reg_conf(chip);
+
+	ret = pci_save_state(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret);
+		return ret;
+	}
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D0);
+	ret = pci_enable_wake(pdev, PCI_D0, 1);
+	if (ret)
+		dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret);
+
+	return 0;
+}
+
+static int ioh_gpio_resume(struct pci_dev *pdev)
+{
+	s32 ret;
+	struct ioh_gpio *chip = pci_get_drvdata(pdev);
+
+	ret = pci_enable_wake(pdev, PCI_D0, 0);
+
+	pci_set_power_state(pdev, PCI_D0);
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret);
+		return ret;
+	}
+	pci_restore_state(pdev);
+
+	iowrite32(0x01, &chip->reg->srst);
+	iowrite32(0x00, &chip->reg->srst);
+	ioh_gpio_restore_reg_conf(chip);
+
+	return 0;
+}
+#else
+#define ioh_gpio_suspend NULL
+#define ioh_gpio_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ioh_gpio_pcidev_id);
+
+static struct pci_driver ioh_gpio_driver = {
+	.name = "ml_ioh_gpio",
+	.id_table = ioh_gpio_pcidev_id,
+	.probe = ioh_gpio_probe,
+	.remove = __devexit_p(ioh_gpio_remove),
+	.suspend = ioh_gpio_suspend,
+	.resume = ioh_gpio_resume
+};
+
+static int __init ioh_gpio_pci_init(void)
+{
+	return pci_register_driver(&ioh_gpio_driver);
+}
+module_init(ioh_gpio_pci_init);
+
+static void __exit ioh_gpio_pci_exit(void)
+{
+	pci_unregister_driver(&ioh_gpio_driver);
+}
+module_exit(ioh_gpio_pci_exit);
+
+MODULE_DESCRIPTION("OKI SEMICONDUCTOR ML-IOH series GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 5018666..b473429e 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -60,6 +60,7 @@
 	unsigned gpio_start;
 	uint16_t reg_output;
 	uint16_t reg_direction;
+	struct mutex i2c_lock;
 
 #ifdef CONFIG_GPIO_PCA953X_IRQ
 	struct mutex irq_lock;
@@ -119,13 +120,17 @@
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	reg_val = chip->reg_direction | (1u << off);
 	ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
 	if (ret)
-		return ret;
+		goto exit;
 
 	chip->reg_direction = reg_val;
-	return 0;
+	ret = 0;
+exit:
+	mutex_unlock(&chip->i2c_lock);
+	return ret;
 }
 
 static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -137,6 +142,7 @@
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	/* set output level */
 	if (val)
 		reg_val = chip->reg_output | (1u << off);
@@ -145,7 +151,7 @@
 
 	ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
 	if (ret)
-		return ret;
+		goto exit;
 
 	chip->reg_output = reg_val;
 
@@ -153,10 +159,13 @@
 	reg_val = chip->reg_direction & ~(1u << off);
 	ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
 	if (ret)
-		return ret;
+		goto exit;
 
 	chip->reg_direction = reg_val;
-	return 0;
+	ret = 0;
+exit:
+	mutex_unlock(&chip->i2c_lock);
+	return ret;
 }
 
 static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
@@ -167,7 +176,9 @@
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+	mutex_unlock(&chip->i2c_lock);
 	if (ret < 0) {
 		/* NOTE:  diagnostic already emitted; that's all we should
 		 * do unless gpio_*_value_cansleep() calls become different
@@ -187,6 +198,7 @@
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	if (val)
 		reg_val = chip->reg_output | (1u << off);
 	else
@@ -194,9 +206,11 @@
 
 	ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
 	if (ret)
-		return;
+		goto exit;
 
 	chip->reg_output = reg_val;
+exit:
+	mutex_unlock(&chip->i2c_lock);
 }
 
 static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
@@ -228,30 +242,30 @@
 	return chip->irq_base + off;
 }
 
-static void pca953x_irq_mask(unsigned int irq)
+static void pca953x_irq_mask(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask &= ~(1 << (irq - chip->irq_base));
+	chip->irq_mask &= ~(1 << (d->irq - chip->irq_base));
 }
 
-static void pca953x_irq_unmask(unsigned int irq)
+static void pca953x_irq_unmask(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask |= 1 << (irq - chip->irq_base);
+	chip->irq_mask |= 1 << (d->irq - chip->irq_base);
 }
 
-static void pca953x_irq_bus_lock(unsigned int irq)
+static void pca953x_irq_bus_lock(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&chip->irq_lock);
 }
 
-static void pca953x_irq_bus_sync_unlock(unsigned int irq)
+static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 	uint16_t new_irqs;
 	uint16_t level;
 
@@ -268,15 +282,15 @@
 	mutex_unlock(&chip->irq_lock);
 }
 
-static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
+static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
-	uint16_t level = irq - chip->irq_base;
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+	uint16_t level = d->irq - chip->irq_base;
 	uint16_t mask = 1 << level;
 
 	if (!(type & IRQ_TYPE_EDGE_BOTH)) {
 		dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
-			irq, type);
+			d->irq, type);
 		return -EINVAL;
 	}
 
@@ -295,11 +309,11 @@
 
 static struct irq_chip pca953x_irq_chip = {
 	.name			= "pca953x",
-	.mask			= pca953x_irq_mask,
-	.unmask			= pca953x_irq_unmask,
-	.bus_lock		= pca953x_irq_bus_lock,
-	.bus_sync_unlock	= pca953x_irq_bus_sync_unlock,
-	.set_type		= pca953x_irq_set_type,
+	.irq_mask		= pca953x_irq_mask,
+	.irq_unmask		= pca953x_irq_unmask,
+	.irq_bus_lock		= pca953x_irq_bus_lock,
+	.irq_bus_sync_unlock	= pca953x_irq_bus_sync_unlock,
+	.irq_set_type		= pca953x_irq_set_type,
 };
 
 static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
@@ -517,6 +531,8 @@
 
 	chip->names = pdata->names;
 
+	mutex_init(&chip->i2c_lock);
+
 	/* initialize cached registers from their original values.
 	 * we can't share this chip with another i2c master.
 	 */
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index 0eba0a7..2c6af87 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -286,6 +286,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
 	{ 0, }
 };
+MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
 
 static struct pci_driver pch_gpio_driver = {
 	.name = "pch_gpio",
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 5005990..2975d22 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -129,10 +129,10 @@
 /*
  * PL061 GPIO IRQ
  */
-static void pl061_irq_disable(unsigned irq)
+static void pl061_irq_disable(struct irq_data *d)
 {
-	struct pl061_gpio *chip = get_irq_chip_data(irq);
-	int offset = irq - chip->irq_base;
+	struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - chip->irq_base;
 	unsigned long flags;
 	u8 gpioie;
 
@@ -143,10 +143,10 @@
 	spin_unlock_irqrestore(&chip->irq_lock, flags);
 }
 
-static void pl061_irq_enable(unsigned irq)
+static void pl061_irq_enable(struct irq_data *d)
 {
-	struct pl061_gpio *chip = get_irq_chip_data(irq);
-	int offset = irq - chip->irq_base;
+	struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - chip->irq_base;
 	unsigned long flags;
 	u8 gpioie;
 
@@ -157,10 +157,10 @@
 	spin_unlock_irqrestore(&chip->irq_lock, flags);
 }
 
-static int pl061_irq_type(unsigned irq, unsigned trigger)
+static int pl061_irq_type(struct irq_data *d, unsigned trigger)
 {
-	struct pl061_gpio *chip = get_irq_chip_data(irq);
-	int offset = irq - chip->irq_base;
+	struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - chip->irq_base;
 	unsigned long flags;
 	u8 gpiois, gpioibe, gpioiev;
 
@@ -203,9 +203,9 @@
 
 static struct irq_chip pl061_irqchip = {
 	.name		= "GPIO",
-	.enable		= pl061_irq_enable,
-	.disable	= pl061_irq_disable,
-	.set_type	= pl061_irq_type,
+	.irq_enable	= pl061_irq_enable,
+	.irq_disable	= pl061_irq_disable,
+	.irq_set_type	= pl061_irq_type,
 };
 
 static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -214,7 +214,7 @@
 	struct list_head *ptr;
 	struct pl061_gpio *chip;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	list_for_each(ptr, chip_list) {
 		unsigned long pending;
 		int offset;
@@ -229,7 +229,7 @@
 		for_each_set_bit(offset, &pending, PL061_GPIO_NR)
 			generic_handle_irq(pl061_to_irq(&chip->gc, offset));
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static int pl061_probe(struct amba_device *dev, struct amba_id *id)
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/stmpe-gpio.c
index 7c9e6a0..eb2901f 100644
--- a/drivers/gpio/stmpe-gpio.c
+++ b/drivers/gpio/stmpe-gpio.c
@@ -122,10 +122,10 @@
 	.can_sleep		= 1,
 };
 
-static int stmpe_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
-	int offset = irq - stmpe_gpio->irq_base;
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - stmpe_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -145,16 +145,16 @@
 	return 0;
 }
 
-static void stmpe_gpio_irq_lock(unsigned int irq)
+static void stmpe_gpio_irq_lock(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&stmpe_gpio->irq_lock);
 }
 
-static void stmpe_gpio_irq_sync_unlock(unsigned int irq)
+static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
 	struct stmpe *stmpe = stmpe_gpio->stmpe;
 	int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
 	static const u8 regmap[] = {
@@ -180,20 +180,20 @@
 	mutex_unlock(&stmpe_gpio->irq_lock);
 }
 
-static void stmpe_gpio_irq_mask(unsigned int irq)
+static void stmpe_gpio_irq_mask(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
-	int offset = irq - stmpe_gpio->irq_base;
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - stmpe_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	stmpe_gpio->regs[REG_IE][regoffset] &= ~mask;
 }
 
-static void stmpe_gpio_irq_unmask(unsigned int irq)
+static void stmpe_gpio_irq_unmask(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
-	int offset = irq - stmpe_gpio->irq_base;
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - stmpe_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -202,11 +202,11 @@
 
 static struct irq_chip stmpe_gpio_irq_chip = {
 	.name			= "stmpe-gpio",
-	.bus_lock		= stmpe_gpio_irq_lock,
-	.bus_sync_unlock	= stmpe_gpio_irq_sync_unlock,
-	.mask			= stmpe_gpio_irq_mask,
-	.unmask			= stmpe_gpio_irq_unmask,
-	.set_type		= stmpe_gpio_irq_set_type,
+	.irq_bus_lock		= stmpe_gpio_irq_lock,
+	.irq_bus_sync_unlock	= stmpe_gpio_irq_sync_unlock,
+	.irq_mask		= stmpe_gpio_irq_mask,
+	.irq_unmask		= stmpe_gpio_irq_unmask,
+	.irq_set_type		= stmpe_gpio_irq_set_type,
 };
 
 static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index 823559a..e60be00 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -304,36 +304,36 @@
 	return chip->irq_base + offset;
 }
 
-static void sx150x_irq_mask(unsigned int irq)
+static void sx150x_irq_mask(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
-	n = irq - chip->irq_base;
+	n = d->irq - chip->irq_base;
 
 	sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
 	sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
 }
 
-static void sx150x_irq_unmask(unsigned int irq)
+static void sx150x_irq_unmask(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
-	n = irq - chip->irq_base;
+	n = d->irq - chip->irq_base;
 
 	sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
 	sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
 			 chip->irq_sense >> (n * 2));
 }
 
-static int sx150x_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n, val = 0;
 
@@ -341,7 +341,7 @@
 		return -EINVAL;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
-	n = irq - chip->irq_base;
+	n = d->irq - chip->irq_base;
 
 	if (flow_type & IRQ_TYPE_EDGE_RISING)
 		val |= 0x1;
@@ -386,9 +386,9 @@
 	return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
 }
 
-static void sx150x_irq_bus_lock(unsigned int irq)
+static void sx150x_irq_bus_lock(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
@@ -396,9 +396,9 @@
 	mutex_lock(&chip->lock);
 }
 
-static void sx150x_irq_bus_sync_unlock(unsigned int irq)
+static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n;
 
@@ -437,16 +437,16 @@
 	if (pdata->oscio_is_gpo)
 		++chip->gpio_chip.ngpio;
 
-	chip->irq_chip.name            = client->name;
-	chip->irq_chip.mask            = sx150x_irq_mask;
-	chip->irq_chip.unmask          = sx150x_irq_unmask;
-	chip->irq_chip.set_type        = sx150x_irq_set_type;
-	chip->irq_chip.bus_lock        = sx150x_irq_bus_lock;
-	chip->irq_chip.bus_sync_unlock = sx150x_irq_bus_sync_unlock;
-	chip->irq_summary              = -1;
-	chip->irq_base                 = -1;
-	chip->irq_sense                = 0;
-	chip->irq_set_type_pending     = 0;
+	chip->irq_chip.name                = client->name;
+	chip->irq_chip.irq_mask            = sx150x_irq_mask;
+	chip->irq_chip.irq_unmask          = sx150x_irq_unmask;
+	chip->irq_chip.irq_set_type        = sx150x_irq_set_type;
+	chip->irq_chip.irq_bus_lock        = sx150x_irq_bus_lock;
+	chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
+	chip->irq_summary                  = -1;
+	chip->irq_base                     = -1;
+	chip->irq_sense                    = 0;
+	chip->irq_set_type_pending         = 0;
 }
 
 static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
diff --git a/drivers/gpio/tc3589x-gpio.c b/drivers/gpio/tc3589x-gpio.c
index 180d584..27200af 100644
--- a/drivers/gpio/tc3589x-gpio.c
+++ b/drivers/gpio/tc3589x-gpio.c
@@ -110,10 +110,10 @@
 	.can_sleep		= 1,
 };
 
-static int tc3589x_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
-	int offset = irq - tc3589x_gpio->irq_base;
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tc3589x_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -137,16 +137,16 @@
 	return 0;
 }
 
-static void tc3589x_gpio_irq_lock(unsigned int irq)
+static void tc3589x_gpio_irq_lock(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&tc3589x_gpio->irq_lock);
 }
 
-static void tc3589x_gpio_irq_sync_unlock(unsigned int irq)
+static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
 	struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
 	static const u8 regmap[] = {
 		[REG_IBE]	= TC3589x_GPIOIBE0,
@@ -172,20 +172,20 @@
 	mutex_unlock(&tc3589x_gpio->irq_lock);
 }
 
-static void tc3589x_gpio_irq_mask(unsigned int irq)
+static void tc3589x_gpio_irq_mask(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
-	int offset = irq - tc3589x_gpio->irq_base;
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tc3589x_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask;
 }
 
-static void tc3589x_gpio_irq_unmask(unsigned int irq)
+static void tc3589x_gpio_irq_unmask(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
-	int offset = irq - tc3589x_gpio->irq_base;
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tc3589x_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -194,11 +194,11 @@
 
 static struct irq_chip tc3589x_gpio_irq_chip = {
 	.name			= "tc3589x-gpio",
-	.bus_lock		= tc3589x_gpio_irq_lock,
-	.bus_sync_unlock	= tc3589x_gpio_irq_sync_unlock,
-	.mask			= tc3589x_gpio_irq_mask,
-	.unmask			= tc3589x_gpio_irq_unmask,
-	.set_type		= tc3589x_gpio_irq_set_type,
+	.irq_bus_lock		= tc3589x_gpio_irq_lock,
+	.irq_bus_sync_unlock	= tc3589x_gpio_irq_sync_unlock,
+	.irq_mask		= tc3589x_gpio_irq_mask,
+	.irq_unmask		= tc3589x_gpio_irq_unmask,
+	.irq_set_type		= tc3589x_gpio_irq_set_type,
 };
 
 static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index 4529366..58c8f30 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -109,10 +109,10 @@
 /*
  * GPIO IRQ
  */
-static void timbgpio_irq_disable(unsigned irq)
+static void timbgpio_irq_disable(struct irq_data *d)
 {
-	struct timbgpio *tgpio = get_irq_chip_data(irq);
-	int offset = irq - tgpio->irq_base;
+	struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tgpio->irq_base;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgpio->lock, flags);
@@ -121,10 +121,10 @@
 	spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
-static void timbgpio_irq_enable(unsigned irq)
+static void timbgpio_irq_enable(struct irq_data *d)
 {
-	struct timbgpio *tgpio = get_irq_chip_data(irq);
-	int offset = irq - tgpio->irq_base;
+	struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tgpio->irq_base;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgpio->lock, flags);
@@ -133,10 +133,10 @@
 	spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
-static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
 {
-	struct timbgpio *tgpio = get_irq_chip_data(irq);
-	int offset = irq - tgpio->irq_base;
+	struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tgpio->irq_base;
 	unsigned long flags;
 	u32 lvr, flr, bflr = 0;
 	u32 ver;
@@ -199,7 +199,7 @@
 	unsigned long ipr;
 	int offset;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(irq_get_irq_data(irq));
 	ipr = ioread32(tgpio->membase + TGPIO_IPR);
 	iowrite32(ipr, tgpio->membase + TGPIO_ICR);
 
@@ -217,9 +217,9 @@
 
 static struct irq_chip timbgpio_irqchip = {
 	.name		= "GPIO",
-	.enable		= timbgpio_irq_enable,
-	.disable	= timbgpio_irq_disable,
-	.set_type	= timbgpio_irq_type,
+	.irq_enable	= timbgpio_irq_enable,
+	.irq_disable	= timbgpio_irq_disable,
+	.irq_set_type	= timbgpio_irq_type,
 };
 
 static int __devinit timbgpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
index b16c9a8..cffa3bd 100644
--- a/drivers/gpio/vr41xx_giu.c
+++ b/drivers/gpio/vr41xx_giu.c
@@ -111,69 +111,69 @@
 	return data;
 }
 
-static void ack_giuint_low(unsigned int irq)
+static void ack_giuint_low(struct irq_data *d)
 {
-	giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
+	giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
-static void mask_giuint_low(unsigned int irq)
+static void mask_giuint_low(struct irq_data *d)
 {
-	giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+	giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
-static void mask_ack_giuint_low(unsigned int irq)
+static void mask_ack_giuint_low(struct irq_data *d)
 {
 	unsigned int pin;
 
-	pin = GPIO_PIN_OF_IRQ(irq);
+	pin = GPIO_PIN_OF_IRQ(d->irq);
 	giu_clear(GIUINTENL, 1 << pin);
 	giu_write(GIUINTSTATL, 1 << pin);
 }
 
-static void unmask_giuint_low(unsigned int irq)
+static void unmask_giuint_low(struct irq_data *d)
 {
-	giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+	giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
 static struct irq_chip giuint_low_irq_chip = {
 	.name		= "GIUINTL",
-	.ack		= ack_giuint_low,
-	.mask		= mask_giuint_low,
-	.mask_ack	= mask_ack_giuint_low,
-	.unmask		= unmask_giuint_low,
+	.irq_ack	= ack_giuint_low,
+	.irq_mask	= mask_giuint_low,
+	.irq_mask_ack	= mask_ack_giuint_low,
+	.irq_unmask	= unmask_giuint_low,
 };
 
-static void ack_giuint_high(unsigned int irq)
+static void ack_giuint_high(struct irq_data *d)
 {
 	giu_write(GIUINTSTATH,
-		  1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+		  1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
 }
 
-static void mask_giuint_high(unsigned int irq)
+static void mask_giuint_high(struct irq_data *d)
 {
-	giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+	giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
 }
 
-static void mask_ack_giuint_high(unsigned int irq)
+static void mask_ack_giuint_high(struct irq_data *d)
 {
 	unsigned int pin;
 
-	pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
+	pin = GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET;
 	giu_clear(GIUINTENH, 1 << pin);
 	giu_write(GIUINTSTATH, 1 << pin);
 }
 
-static void unmask_giuint_high(unsigned int irq)
+static void unmask_giuint_high(struct irq_data *d)
 {
-	giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+	giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
 }
 
 static struct irq_chip giuint_high_irq_chip = {
 	.name		= "GIUINTH",
-	.ack		= ack_giuint_high,
-	.mask		= mask_giuint_high,
-	.mask_ack	= mask_ack_giuint_high,
-	.unmask		= unmask_giuint_high,
+	.irq_ack	= ack_giuint_high,
+	.irq_mask	= mask_giuint_high,
+	.irq_mask_ack	= mask_ack_giuint_high,
+	.irq_unmask	= unmask_giuint_high,
 };
 
 static int giu_get_irq(unsigned int irq)
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/wm8994-gpio.c
index 618398e..c822baa 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/wm8994-gpio.c
@@ -35,6 +35,29 @@
 	return container_of(chip, struct wm8994_gpio, gpio_chip);
 }
 
+static int wm8994_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+	struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+	struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
+	switch (wm8994->type) {
+	case WM8958:
+		switch (offset) {
+		case 1:
+		case 2:
+		case 3:
+		case 4:
+		case 6:
+			return -EINVAL;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
 static int wm8994_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
 {
 	struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
@@ -136,6 +159,7 @@
 static struct gpio_chip template_chip = {
 	.label			= "wm8994",
 	.owner			= THIS_MODULE,
+	.request		= wm8994_gpio_request,
 	.direction_input	= wm8994_gpio_direction_in,
 	.get			= wm8994_gpio_get,
 	.direction_output	= wm8994_gpio_direction_out,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 7af4436..0902d44 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -23,7 +23,7 @@
 	tristate
 	depends on DRM
 	select FB
-	select FRAMEBUFFER_CONSOLE if !EMBEDDED
+	select FRAMEBUFFER_CONSOLE if !EXPERT
 	help
 	  FB and CRTC helpers for KMS drivers.
 
@@ -100,14 +100,16 @@
 config DRM_I915
 	tristate "i915 driver"
 	depends on AGP_INTEL
+	# we need shmfs for the swappable backing store, and in particular
+	# the shmem_readpage() which depends upon tmpfs
 	select SHMEM
+	select TMPFS
 	select DRM_KMS_HELPER
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	# i915 depends on ACPI_VIDEO when ACPI is enabled
 	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
-	select VIDEO_OUTPUT_CONTROL if ACPI
 	select BACKLIGHT_CLASS_DEVICE if ACPI
 	select INPUT if ACPI
 	select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2baa670..654faa8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2674,3 +2674,23 @@
 	mutex_unlock(&dev->mode_config.mutex);
 	return ret;
 }
+
+void drm_mode_config_reset(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		if (crtc->funcs->reset)
+			crtc->funcs->reset(crtc);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->funcs->reset)
+			encoder->funcs->reset(encoder);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->funcs->reset)
+			connector->funcs->reset(connector);
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 952b3d4..92369655 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -343,13 +343,12 @@
 	struct drm_encoder *encoder;
 	bool ret = true;
 
-	adjusted_mode = drm_mode_duplicate(dev, mode);
-
 	crtc->enabled = drm_helper_crtc_in_use(crtc);
-
 	if (!crtc->enabled)
 		return true;
 
+	adjusted_mode = drm_mode_duplicate(dev, mode);
+
 	saved_hwmode = crtc->hwmode;
 	saved_mode = crtc->mode;
 	saved_x = crtc->x;
@@ -437,10 +436,9 @@
 	 */
 	drm_calc_timestamping_constants(crtc);
 
-	/* XXX free adjustedmode */
-	drm_mode_destroy(dev, adjusted_mode);
 	/* FIXME: add subpixel order */
 done:
+	drm_mode_destroy(dev, adjusted_mode);
 	if (!ret) {
 		crtc->hwmode = saved_hwmode;
 		crtc->mode = saved_mode;
@@ -497,14 +495,17 @@
 
 	crtc_funcs = set->crtc->helper_private;
 
+	if (!set->mode)
+		set->fb = NULL;
+
 	if (set->fb) {
 		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
 				set->crtc->base.id, set->fb->base.id,
 				(int)set->num_connectors, set->x, set->y);
 	} else {
-		DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n",
-				set->crtc->base.id, (int)set->num_connectors,
-				set->x, set->y);
+		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+		set->mode = NULL;
+		set->num_connectors = 0;
 	}
 
 	dev = set->crtc->dev;
@@ -649,8 +650,8 @@
 		mode_changed = true;
 
 	if (mode_changed) {
-		set->crtc->enabled = (set->mode != NULL);
-		if (set->mode != NULL) {
+		set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
+		if (set->crtc->enabled) {
 			DRM_DEBUG_KMS("attempting to set mode from"
 					" userspace\n");
 			drm_mode_debug_printmodeline(set->mode);
@@ -665,6 +666,12 @@
 				ret = -EINVAL;
 				goto fail;
 			}
+			DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+			for (i = 0; i < set->num_connectors; i++) {
+				DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+					      drm_get_connector_name(set->connectors[i]));
+				set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+			}
 		}
 		drm_helper_disable_unused_functions(dev);
 	} else if (fb_changed) {
@@ -681,12 +688,6 @@
 			goto fail;
 		}
 	}
-	DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
-	for (i = 0; i < set->num_connectors; i++) {
-		DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
-			      drm_get_connector_name(set->connectors[i]));
-		set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
-	}
 
 	kfree(save_connectors);
 	kfree(save_encoders);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0307d60..f73ef43 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,25 +607,6 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_fini);
 
-void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
-{
-	info->fix.type = FB_TYPE_PACKED_PIXELS;
-	info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
-		FB_VISUAL_TRUECOLOR;
-	info->fix.mmio_start = 0;
-	info->fix.mmio_len = 0;
-	info->fix.type_aux = 0;
-	info->fix.xpanstep = 1; /* doing it in hw */
-	info->fix.ypanstep = 1; /* doing it in hw */
-	info->fix.ywrapstep = 0;
-	info->fix.accel = FB_ACCEL_NONE;
-	info->fix.type_aux = 0;
-
-	info->fix.line_length = fb->pitch;
-	return;
-}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -691,7 +672,7 @@
 	struct drm_crtc_helper_funcs *crtc_funcs;
 	u16 *red, *green, *blue, *transp;
 	struct drm_crtc *crtc;
-	int i, rc = 0;
+	int i, j, rc = 0;
 	int start;
 
 	for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -704,7 +685,7 @@
 		transp = cmap->transp;
 		start = cmap->start;
 
-		for (i = 0; i < cmap->len; i++) {
+		for (j = 0; j < cmap->len; j++) {
 			u16 hred, hgreen, hblue, htransp = 0xffff;
 
 			hred = *red++;
@@ -835,7 +816,6 @@
 			mutex_unlock(&dev->mode_config.mutex);
 			return ret;
 		}
-		drm_fb_helper_fill_fix(info, fb_helper->fb);
 	}
 	mutex_unlock(&dev->mode_config.mutex);
 
@@ -973,7 +953,6 @@
 
 	if (new_fb) {
 		info->var.pixclock = 0;
-		drm_fb_helper_fill_fix(info, fb_helper->fb);
 		if (register_framebuffer(info) < 0) {
 			return -EINVAL;
 		}
@@ -1000,6 +979,26 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
 
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+			    uint32_t depth)
+{
+	info->fix.type = FB_TYPE_PACKED_PIXELS;
+	info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+		FB_VISUAL_TRUECOLOR;
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
+	info->fix.type_aux = 0;
+	info->fix.xpanstep = 1; /* doing it in hw */
+	info->fix.ypanstep = 1; /* doing it in hw */
+	info->fix.ywrapstep = 0;
+	info->fix.accel = FB_ACCEL_NONE;
+	info->fix.type_aux = 0;
+
+	info->fix.line_length = pitch;
+	return;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height)
 {
@@ -1534,11 +1533,11 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
 
-/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
  * but the module doesn't depend on any fb console symbols.  At least
  * attempt to load fbcon to avoid leaving the system without a usable console.
  */
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
 static int __init drm_fb_helper_modinit(void)
 {
 	const char *name = "fbcon";
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 3cdbaf3..be9a9c0 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -283,17 +283,18 @@
 #endif
 
 	mutex_lock(&dev->struct_mutex);
-	seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
+	seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
 		   atomic_read(&dev->vma_count),
-		   high_memory, (u64)virt_to_phys(high_memory));
+		   high_memory, (void *)virt_to_phys(high_memory));
 
 	list_for_each_entry(pt, &dev->vmalist, head) {
 		vma = pt->vma;
 		if (!vma)
 			continue;
 		seq_printf(m,
-			   "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
-			   pt->pid, vma->vm_start, vma->vm_end,
+			   "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
+			   pt->pid,
+			   (void *)vma->vm_start, (void *)vma->vm_end,
 			   vma->vm_flags & VM_READ ? 'r' : '-',
 			   vma->vm_flags & VM_WRITE ? 'w' : '-',
 			   vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0054e95..28d1d3c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -164,8 +164,10 @@
 	 * available. In that case we can't account for this and just
 	 * hope for the best.
 	 */
-	if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
 		atomic_inc(&dev->_vblank_count[crtc]);
+		smp_mb__after_atomic_inc();
+	}
 
 	/* Invalidate all timestamps while vblank irq's are off. */
 	clear_vblank_timestamps(dev, crtc);
@@ -491,6 +493,12 @@
 	/* Dot clock in Hz: */
 	dotclock = (u64) crtc->hwmode.clock * 1000;
 
+	/* Fields of interlaced scanout modes are only halve a frame duration.
+	 * Double the dotclock to get halve the frame-/line-/pixelduration.
+	 */
+	if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+		dotclock *= 2;
+
 	/* Valid dotclock? */
 	if (dotclock > 0) {
 		/* Convert scanline length in pixels and video dot clock to
@@ -603,14 +611,6 @@
 		return -EAGAIN;
 	}
 
-	/* Don't know yet how to handle interlaced or
-	 * double scan modes. Just no-op for now.
-	 */
-	if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
-		DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
-		return -ENOTSUPP;
-	}
-
 	/* Get current scanout position with system timestamp.
 	 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
 	 * if single query takes longer than max_error nanoseconds.
@@ -858,10 +858,11 @@
 	if (rc) {
 		tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
 		vblanktimestamp(dev, crtc, tslot) = t_vblank;
-		smp_wmb();
 	}
 
+	smp_mb__before_atomic_inc();
 	atomic_add(diff, &dev->_vblank_count[crtc]);
+	smp_mb__after_atomic_inc();
 }
 
 /**
@@ -1011,7 +1012,8 @@
 		    struct drm_file *file_priv)
 {
 	struct drm_modeset_ctl *modeset = data;
-	int crtc, ret = 0;
+	int ret = 0;
+	unsigned int crtc;
 
 	/* If drm_vblank_init() hasn't been called yet, just no-op */
 	if (!dev->num_crtcs)
@@ -1250,7 +1252,7 @@
  * Drivers should call this routine in their vblank interrupt handlers to
  * update the vblank counter and send any signals that may be pending.
  */
-void drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, int crtc)
 {
 	u32 vblcount;
 	s64 diff_ns;
@@ -1258,7 +1260,7 @@
 	unsigned long irqflags;
 
 	if (!dev->num_crtcs)
-		return;
+		return false;
 
 	/* Need timestamp lock to prevent concurrent execution with
 	 * vblank enable/disable, as this would cause inconsistent
@@ -1269,7 +1271,7 @@
 	/* Vblank irq handling disabled. Nothing to do. */
 	if (!dev->vblank_enabled[crtc]) {
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
-		return;
+		return false;
 	}
 
 	/* Fetch corresponding timestamp for this vblank interval from
@@ -1293,15 +1295,16 @@
 	 * e.g., due to spurious vblank interrupts. We need to
 	 * ignore those for accounting.
 	 */
-	if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+	if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
 		/* Store new timestamp in ringbuffer. */
 		vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
-		smp_wmb();
 
 		/* Increment cooked vblank count. This also atomically commits
 		 * the timestamp computed above.
 		 */
+		smp_mb__before_atomic_inc();
 		atomic_inc(&dev->_vblank_count[crtc]);
+		smp_mb__after_atomic_inc();
 	} else {
 		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
 			  crtc, (int) diff_ns);
@@ -1311,5 +1314,6 @@
 	drm_handle_vblank_events(dev, crtc);
 
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+	return true;
 }
 EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 92f7578..4ff9b6c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -106,10 +106,19 @@
     }
 }
 
+static const char *agp_type_str(int type)
+{
+	switch (type) {
+	case 0: return " uncached";
+	case 1: return " snooped";
+	default: return "";
+	}
+}
+
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s",
+	seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
 		   &obj->base,
 		   get_pin_flag(obj),
 		   get_tiling_flag(obj),
@@ -118,6 +127,7 @@
 		   obj->base.write_domain,
 		   obj->last_rendering_seqno,
 		   obj->last_fenced_seqno,
+		   agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
@@ -276,6 +286,37 @@
 	return 0;
 }
 
+static int i915_gem_gtt_info(struct seq_file *m, void* data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	size_t total_obj_size, total_gtt_size;
+	int count, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	total_obj_size = total_gtt_size = count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		seq_printf(m, "   ");
+		describe_obj(m, obj);
+		seq_printf(m, "\n");
+		total_obj_size += obj->base.size;
+		total_gtt_size += obj->gtt_space->size;
+		count++;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+		   count, total_obj_size, total_gtt_size);
+
+	return 0;
+}
+
 
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
@@ -456,8 +497,14 @@
 	}
 	seq_printf(m, "Interrupts received: %d\n",
 		   atomic_read(&dev_priv->irq_received));
-	for (i = 0; i < I915_NUM_RINGS; i++)
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		if (IS_GEN6(dev)) {
+			seq_printf(m, "Graphics Interrupt mask (%s):	%08x\n",
+				   dev_priv->ring[i].name,
+				   I915_READ_IMR(&dev_priv->ring[i]));
+		}
 		i915_ring_seqno_info(m, &dev_priv->ring[i]);
+	}
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -656,7 +703,7 @@
 	seq_printf(m, "%s [%d]:\n", name, count);
 
 	while (count--) {
-		seq_printf(m, "  %08x %8zd %04x %04x %08x%s%s%s%s%s",
+		seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s",
 			   err->gtt_offset,
 			   err->size,
 			   err->read_domains,
@@ -666,7 +713,8 @@
 			   tiling_flag(err->tiling),
 			   dirty_flag(err->dirty),
 			   purgeable_flag(err->purgeable),
-			   ring_str(err->ring));
+			   ring_str(err->ring),
+			   agp_type_str(err->agp_type));
 
 		if (err->name)
 			seq_printf(m, " (name: %d)", err->name);
@@ -744,7 +792,9 @@
 		if (error->batchbuffer[i]) {
 			struct drm_i915_error_object *obj = error->batchbuffer[i];
 
-			seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+			seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   obj->gtt_offset);
 			offset = 0;
 			for (page = 0; page < obj->page_count; page++) {
 				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
@@ -815,7 +865,7 @@
 		int max_freq;
 
 		/* RPSTAT1 is in the GT power well */
-		__gen6_force_wake_get(dev_priv);
+		__gen6_gt_force_wake_get(dev_priv);
 
 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 		seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -838,7 +888,7 @@
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 			   max_freq * 100);
 
-		__gen6_force_wake_put(dev_priv);
+		__gen6_gt_force_wake_put(dev_priv);
 	} else {
 		seq_printf(m, "no P-state info available\n");
 	}
@@ -890,7 +940,7 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 rgvmodectl = I915_READ(MEMMODECTL);
-	u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
+	u32 rstdbyctl = I915_READ(RSTDBYCTL);
 	u16 crstandvid = I915_READ16(CRSTANDVID);
 
 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -913,6 +963,30 @@
 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
 	seq_printf(m, "Render standby enabled: %s\n",
 		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
+	seq_printf(m, "Current RS state: ");
+	switch (rstdbyctl & RSX_STATUS_MASK) {
+	case RSX_STATUS_ON:
+		seq_printf(m, "on\n");
+		break;
+	case RSX_STATUS_RC1:
+		seq_printf(m, "RC1\n");
+		break;
+	case RSX_STATUS_RC1E:
+		seq_printf(m, "RC1E\n");
+		break;
+	case RSX_STATUS_RS1:
+		seq_printf(m, "RS1\n");
+		break;
+	case RSX_STATUS_RS2:
+		seq_printf(m, "RS2 (RC6)\n");
+		break;
+	case RSX_STATUS_RS3:
+		seq_printf(m, "RC3 (RC6+)\n");
+		break;
+	default:
+		seq_printf(m, "unknown\n");
+		break;
+	}
 
 	return 0;
 }
@@ -1187,6 +1261,7 @@
 static struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
+	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0568dbd..e33d9be 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -152,7 +152,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+	int ret;
 
 	master_priv->sarea = drm_getsarea(dev);
 	if (master_priv->sarea) {
@@ -163,33 +163,22 @@
 	}
 
 	if (init->ring_size != 0) {
-		if (ring->obj != NULL) {
+		if (LP_RING(dev_priv)->obj != NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("Client tried to initialize ringbuffer in "
 				  "GEM mode\n");
 			return -EINVAL;
 		}
 
-		ring->size = init->ring_size;
-
-		ring->map.offset = init->ring_start;
-		ring->map.size = init->ring_size;
-		ring->map.type = 0;
-		ring->map.flags = 0;
-		ring->map.mtrr = 0;
-
-		drm_core_ioremap_wc(&ring->map, dev);
-
-		if (ring->map.handle == NULL) {
+		ret = intel_render_ring_init_dri(dev,
+						 init->ring_start,
+						 init->ring_size);
+		if (ret) {
 			i915_dma_cleanup(dev);
-			DRM_ERROR("can not ioremap virtual address for"
-				  " ring buffer\n");
-			return -ENOMEM;
+			return ret;
 		}
 	}
 
-	ring->virtual_start = ring->map.handle;
-
 	dev_priv->cpp = init->cpp;
 	dev_priv->back_offset = init->back_offset;
 	dev_priv->front_offset = init->front_offset;
@@ -1226,9 +1215,15 @@
 	if (ret)
 		DRM_INFO("failed to find VBIOS tables\n");
 
-	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
+	/* If we have > 1 VGA cards, then we need to arbitrate access
+	 * to the common VGA resources.
+	 *
+	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+	 * then we do not take part in VGA arbitration and the
+	 * vga_client_register() fails with -ENODEV.
+	 */
 	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
-	if (ret)
+	if (ret && ret != -ENODEV)
 		goto cleanup_ringbuffer;
 
 	intel_register_dsm_handler();
@@ -1900,6 +1895,17 @@
 	if (IS_GEN2(dev))
 		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
 
+	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
+	 * using 32bit addressing, overwriting memory if HWS is located
+	 * above 4GB.
+	 *
+	 * The documentation also mentions an issue with undefined
+	 * behaviour if any general state is accessed within a page above 4GB,
+	 * which also needs to be handled carefully.
+	 */
+	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
 	mmio_bar = IS_GEN2(dev) ? 1 : 0;
 	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
 	if (!dev_priv->regs) {
@@ -1962,13 +1968,6 @@
 	/* enable GEM by default */
 	dev_priv->has_gem = 1;
 
-	if (dev_priv->has_gem == 0 &&
-	    drm_core_check_feature(dev, DRIVER_MODESET)) {
-		DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
-		ret = -ENODEV;
-		goto out_workqueue_free;
-	}
-
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
@@ -2055,7 +2054,6 @@
 
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
-out_workqueue_free:
 	destroy_workqueue(dev_priv->wq);
 out_iomapfree:
 	io_mapping_free(dev_priv->mm.gtt_mapping);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8724933..22ec066 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,15 +46,27 @@
 unsigned int i915_powersave = 1;
 module_param_named(powersave, i915_powersave, int, 0600);
 
+unsigned int i915_semaphores = 0;
+module_param_named(semaphores, i915_semaphores, int, 0600);
+
+unsigned int i915_enable_rc6 = 0;
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
 
+unsigned int i915_panel_use_ssc = 1;
+module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
+
+bool i915_try_reset = true;
+module_param_named(reset, i915_try_reset, bool, 0600);
+
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
 #define INTEL_VGA_DEVICE(id, info) {		\
 	.class = PCI_CLASS_DISPLAY_VGA << 8,	\
-	.class_mask = 0xffff00,			\
+	.class_mask = 0xff0000,			\
 	.vendor = 0x8086,			\
 	.device = id,				\
 	.subvendor = PCI_ANY_ID,		\
@@ -245,7 +257,7 @@
 	}
 }
 
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
 	int count;
 
@@ -261,12 +273,22 @@
 		udelay(10);
 }
 
-void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
 	POSTING_READ(FORCEWAKE);
 }
 
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+	int loop = 500;
+	u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+	while (fifo < 20 && loop--) {
+		udelay(10);
+		fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+	}
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -348,10 +370,14 @@
 		error = i915_gem_init_ringbuffer(dev);
 		mutex_unlock(&dev->struct_mutex);
 
+		drm_mode_config_reset(dev);
 		drm_irq_install(dev);
 
 		/* Resume the modeset for every activated CRTC */
 		drm_helper_resume_force_mode(dev);
+
+		if (IS_IRONLAKE_M(dev))
+			ironlake_enable_rc6(dev);
 	}
 
 	intel_opregion_init(dev);
@@ -475,6 +501,9 @@
 	bool need_display = true;
 	int ret;
 
+	if (!i915_try_reset)
+		return 0;
+
 	if (!mutex_trylock(&dev->struct_mutex))
 		return -EBUSY;
 
@@ -530,6 +559,7 @@
 
 		mutex_unlock(&dev->struct_mutex);
 		drm_irq_uninstall(dev);
+		drm_mode_config_reset(dev);
 		drm_irq_install(dev);
 		mutex_lock(&dev->struct_mutex);
 	}
@@ -554,6 +584,14 @@
 static int __devinit
 i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+	/* Only bind to function 0 of the device. Early generations
+	 * used function 1 as a placeholder for multi-head. This causes
+	 * us confusion instead, especially on the systems where both
+	 * functions have the same PCI-ID!
+	 */
+	if (PCI_FUNC(pdev->devfn))
+		return -ENODEV;
+
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -740,6 +778,9 @@
 		driver.driver_features &= ~DRIVER_MODESET;
 #endif
 
+	if (!(driver.driver_features & DRIVER_MODESET))
+		driver.get_vblank_timestamp = NULL;
+
 	return drm_init(&driver);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aac1bf3..456f404 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -172,20 +172,21 @@
 		int page_count;
 		u32 gtt_offset;
 		u32 *pages[0];
-	} *ringbuffer, *batchbuffer[2];
+	} *ringbuffer, *batchbuffer[I915_NUM_RINGS];
 	struct drm_i915_error_buffer {
-		size_t size;
+		u32 size;
 		u32 name;
 		u32 seqno;
 		u32 gtt_offset;
 		u32 read_domains;
 		u32 write_domain;
-		u32 fence_reg;
+		s32 fence_reg:5;
 		s32 pinned:2;
 		u32 tiling:2;
 		u32 dirty:1;
 		u32 purgeable:1;
 		u32 ring:4;
+		u32 agp_type:1;
 	} *active_bo, *pinned_bo;
 	u32 active_bo_count, pinned_bo_count;
 	struct intel_overlay_error_state *overlay;
@@ -332,6 +333,7 @@
 
 	/* LVDS info */
 	int backlight_level;  /* restore backlight to this value */
+	bool backlight_enabled;
 	struct drm_display_mode *panel_fixed_mode;
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -541,8 +543,11 @@
 		/** List of all objects in gtt_space. Used to restore gtt
 		 * mappings on resume */
 		struct list_head gtt_list;
-		/** End of mappable part of GTT */
+
+		/** Usable portion of the GTT for GEM */
+		unsigned long gtt_start;
 		unsigned long gtt_mappable_end;
+		unsigned long gtt_end;
 
 		struct io_mapping *gtt_mapping;
 		int gtt_mtrr;
@@ -794,6 +799,7 @@
 	 */
 	struct hlist_node exec_node;
 	unsigned long exec_handle;
+	struct drm_i915_gem_exec_object2 *exec_entry;
 
 	/**
 	 * Current offset of the object in GTT space.
@@ -950,7 +956,10 @@
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc;
 extern unsigned int i915_powersave;
+extern unsigned int i915_semaphores;
 extern unsigned int i915_lvds_downclock;
+extern unsigned int i915_panel_use_ssc;
+extern unsigned int i915_enable_rc6;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
@@ -1006,12 +1015,6 @@
 extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
-extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
-extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
-extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
-		u32 mask);
-extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
-		u32 mask);
 
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1091,10 +1094,10 @@
 				struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-void i915_gem_flush_ring(struct drm_device *dev,
-			 struct intel_ring_buffer *ring,
-			 uint32_t invalidate_domains,
-			 uint32_t flush_domains);
+int __must_check i915_gem_flush_ring(struct drm_device *dev,
+				     struct intel_ring_buffer *ring,
+				     uint32_t invalidate_domains,
+				     uint32_t flush_domains);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
@@ -1175,6 +1178,9 @@
 void i915_gem_free_all_phys_object(struct drm_device *dev);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+
 /* i915_gem_gtt.c */
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1265,6 +1271,7 @@
 extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void ironlake_enable_rc6(struct drm_device *dev);
 extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch (struct drm_device *dev);
 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
@@ -1350,22 +1357,32 @@
  * must be set to prevent GT core from power down and stale values being
  * returned.
  */
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
-void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
-static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
 {
 	u32 val;
 
 	if (dev_priv->info->gen >= 6) {
-		__gen6_force_wake_get(dev_priv);
+		__gen6_gt_force_wake_get(dev_priv);
 		val = I915_READ(reg);
-		__gen6_force_wake_put(dev_priv);
+		__gen6_gt_force_wake_put(dev_priv);
 	} else
 		val = I915_READ(reg);
 
 	return val;
 }
 
+static inline void i915_gt_write(struct drm_i915_private *dev_priv,
+				u32 reg, u32 val)
+{
+	if (dev_priv->info->gen >= 6)
+		__gen6_gt_wait_for_fifo(dev_priv);
+	I915_WRITE(reg, val);
+}
+
 static inline void
 i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c79c0b6..36e66cc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,18 +35,18 @@
 #include <linux/swap.h>
 #include <linux/pci.h>
 
-static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
+static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
-					     bool write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
-						     uint64_t offset,
-						     uint64_t size);
+static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+							  bool write);
+static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
+								  uint64_t offset,
+								  uint64_t size);
 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
-static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
-				       unsigned alignment,
-				       bool map_and_fenceable);
+static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+						    unsigned alignment,
+						    bool map_and_fenceable);
 static void i915_gem_clear_fence_reg(struct drm_device *dev,
 				     struct drm_i915_fence_reg *reg);
 static int i915_gem_phys_pwrite(struct drm_device *dev,
@@ -140,12 +140,16 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	drm_mm_init(&dev_priv->mm.gtt_space, start,
-		    end - start);
+	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
 
+	dev_priv->mm.gtt_start = start;
+	dev_priv->mm.gtt_mappable_end = mappable_end;
+	dev_priv->mm.gtt_end = end;
 	dev_priv->mm.gtt_total = end - start;
 	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
-	dev_priv->mm.gtt_mappable_end = mappable_end;
+
+	/* Take over this portion of the GTT */
+	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
 }
 
 int
@@ -1394,7 +1398,7 @@
  * Return the required GTT alignment for an object, only taking into account
  * unfenced tiled surface requirements.
  */
-static uint32_t
+uint32_t
 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
 {
 	struct drm_device *dev = obj->base.dev;
@@ -1857,7 +1861,7 @@
 
 	seqno = ring->get_seqno(ring);
 
-	for (i = 0; i < I915_NUM_RINGS; i++)
+	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
 		if (seqno >= ring->sync_seqno[i])
 			ring->sync_seqno[i] = 0;
 
@@ -1935,6 +1939,8 @@
 {
 	drm_i915_private_t *dev_priv;
 	struct drm_device *dev;
+	bool idle;
+	int i;
 
 	dev_priv = container_of(work, drm_i915_private_t,
 				mm.retire_work.work);
@@ -1948,11 +1954,31 @@
 
 	i915_gem_retire_requests(dev);
 
-	if (!dev_priv->mm.suspended &&
-		(!list_empty(&dev_priv->ring[RCS].request_list) ||
-		 !list_empty(&dev_priv->ring[VCS].request_list) ||
-		 !list_empty(&dev_priv->ring[BCS].request_list)))
+	/* Send a periodic flush down the ring so we don't hold onto GEM
+	 * objects indefinitely.
+	 */
+	idle = true;
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+		if (!list_empty(&ring->gpu_write_list)) {
+			struct drm_i915_gem_request *request;
+			int ret;
+
+			ret = i915_gem_flush_ring(dev, ring, 0,
+						  I915_GEM_GPU_DOMAINS);
+			request = kzalloc(sizeof(*request), GFP_KERNEL);
+			if (ret || request == NULL ||
+			    i915_add_request(dev, NULL, request, ring))
+			    kfree(request);
+		}
+
+		idle &= list_empty(&ring->request_list);
+	}
+
+	if (!dev_priv->mm.suspended && !idle)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+
 	mutex_unlock(&dev->struct_mutex);
 }
 
@@ -2142,25 +2168,37 @@
 	return ret;
 }
 
-void
+int
 i915_gem_flush_ring(struct drm_device *dev,
 		    struct intel_ring_buffer *ring,
 		    uint32_t invalidate_domains,
 		    uint32_t flush_domains)
 {
-	ring->flush(ring, invalidate_domains, flush_domains);
+	int ret;
+
+	ret = ring->flush(ring, invalidate_domains, flush_domains);
+	if (ret)
+		return ret;
+
 	i915_gem_process_flushing_list(dev, flush_domains, ring);
+	return 0;
 }
 
 static int i915_ring_idle(struct drm_device *dev,
 			  struct intel_ring_buffer *ring)
 {
+	int ret;
+
 	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
 		return 0;
 
-	if (!list_empty(&ring->gpu_write_list))
-		i915_gem_flush_ring(dev, ring,
+	if (!list_empty(&ring->gpu_write_list)) {
+		ret = i915_gem_flush_ring(dev, ring,
 				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		if (ret)
+			return ret;
+	}
+
 	return i915_wait_request(dev,
 				 i915_gem_next_request_seqno(dev, ring),
 				 ring);
@@ -2370,10 +2408,13 @@
 	int ret;
 
 	if (obj->fenced_gpu_access) {
-		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
-			i915_gem_flush_ring(obj->base.dev,
-					    obj->last_fenced_ring,
-					    0, obj->base.write_domain);
+		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+			ret = i915_gem_flush_ring(obj->base.dev,
+						  obj->last_fenced_ring,
+						  0, obj->base.write_domain);
+			if (ret)
+				return ret;
+		}
 
 		obj->fenced_gpu_access = false;
 	}
@@ -2393,6 +2434,12 @@
 		obj->last_fenced_ring = NULL;
 	}
 
+	/* Ensure that all CPU reads are completed before installing a fence
+	 * and all writes before removing the fence.
+	 */
+	if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
+		mb();
+
 	return 0;
 }
 
@@ -2523,9 +2570,12 @@
 				return ret;
 		} else if (obj->tiling_changed) {
 			if (obj->fenced_gpu_access) {
-				if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
-					i915_gem_flush_ring(obj->base.dev, obj->ring,
-							    0, obj->base.write_domain);
+				if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+					ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+								  0, obj->base.write_domain);
+					if (ret)
+						return ret;
+				}
 
 				obj->fenced_gpu_access = false;
 			}
@@ -2736,10 +2786,8 @@
 		obj->gtt_space = NULL;
 
 		if (ret == -ENOMEM) {
-			/* first try to clear up some space from the GTT */
-			ret = i915_gem_evict_something(dev, size,
-						       alignment,
-						       map_and_fenceable);
+			/* first try to reclaim some memory by clearing the GTT */
+			ret = i915_gem_evict_everything(dev, false);
 			if (ret) {
 				/* now try to shrink everyone else */
 				if (gfpmask) {
@@ -2747,7 +2795,7 @@
 					goto search_free;
 				}
 
-				return ret;
+				return -ENOMEM;
 			}
 
 			goto search_free;
@@ -2762,9 +2810,7 @@
 		drm_mm_put_block(obj->gtt_space);
 		obj->gtt_space = NULL;
 
-		ret = i915_gem_evict_something(dev, size,
-					       alignment, map_and_fenceable);
-		if (ret)
+		if (i915_gem_evict_everything(dev, false))
 			return ret;
 
 		goto search_free;
@@ -2811,17 +2857,16 @@
 }
 
 /** Flushes any GPU write domain for the object if it's dirty. */
-static void
+static int
 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
 {
 	struct drm_device *dev = obj->base.dev;
 
 	if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
-		return;
+		return 0;
 
 	/* Queue the GPU write cache flushing we need. */
-	i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
-	BUG_ON(obj->base.write_domain);
+	return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -2833,10 +2878,16 @@
 	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
 		return;
 
-	/* No actual flushing is required for the GTT write domain.   Writes
+	/* No actual flushing is required for the GTT write domain.  Writes
 	 * to it immediately go to main memory as far as we know, so there's
 	 * no chipset flush.  It also doesn't land in render cache.
+	 *
+	 * However, we do have to enforce the order so that all writes through
+	 * the GTT land before any writes to the device, such as updates to
+	 * the GATT itself.
 	 */
+	wmb();
+
 	i915_gem_release_mmap(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -2882,7 +2933,10 @@
 	if (obj->gtt_space == NULL)
 		return -EINVAL;
 
-	i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	if (ret)
+		return ret;
+
 	if (obj->pending_gpu_write || write) {
 		ret = i915_gem_object_wait_rendering(obj, true);
 		if (ret)
@@ -2927,7 +2981,10 @@
 	if (obj->gtt_space == NULL)
 		return -EINVAL;
 
-	i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	if (ret)
+		return ret;
+
 
 	/* Currently, we are always called from an non-interruptible context. */
 	if (pipelined != obj->ring) {
@@ -2952,12 +3009,17 @@
 i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
 			  bool interruptible)
 {
+	int ret;
+
 	if (!obj->active)
 		return 0;
 
-	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
-		i915_gem_flush_ring(obj->base.dev, obj->ring,
-				    0, obj->base.write_domain);
+	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+		ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+					  0, obj->base.write_domain);
+		if (ret)
+			return ret;
+	}
 
 	return i915_gem_object_wait_rendering(obj, interruptible);
 }
@@ -2974,7 +3036,10 @@
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
-	i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	if (ret)
+		return ret;
+
 	ret = i915_gem_object_wait_rendering(obj, true);
 	if (ret)
 		return ret;
@@ -3069,7 +3134,10 @@
 	if (offset == 0 && size == obj->base.size)
 		return i915_gem_object_set_to_cpu_domain(obj, 0);
 
-	i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	if (ret)
+		return ret;
+
 	ret = i915_gem_object_wait_rendering(obj, true);
 	if (ret)
 		return ret;
@@ -3362,8 +3430,8 @@
 		 * flush earlier is beneficial.
 		 */
 		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-			i915_gem_flush_ring(dev, obj->ring,
-					    0, obj->base.write_domain);
+			ret = i915_gem_flush_ring(dev, obj->ring,
+						  0, obj->base.write_domain);
 		} else if (obj->ring->outstanding_lazy_request ==
 			   obj->last_rendering_seqno) {
 			struct drm_i915_gem_request *request;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 78b8cf9..3d39005 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -127,9 +127,15 @@
 	}
 
 	/* Nothing found, clean up and bail out! */
-	list_for_each_entry(obj, &unwind_list, exec_list) {
+	while (!list_empty(&unwind_list)) {
+		obj = list_first_entry(&unwind_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
+
 		ret = drm_mm_scan_remove_block(obj->gtt_space);
 		BUG_ON(ret);
+
+		list_del_init(&obj->exec_list);
 		drm_gem_object_unreference(&obj->base);
 	}
 
@@ -162,6 +168,7 @@
 				       exec_list);
 		if (ret == 0)
 			ret = i915_gem_object_unbind(obj);
+
 		list_del_init(&obj->exec_list);
 		drm_gem_object_unreference(&obj->base);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 61129e6..50ab161 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -268,7 +268,6 @@
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 				   struct eb_objects *eb,
-				   struct drm_i915_gem_exec_object2 *entry,
 				   struct drm_i915_gem_relocation_entry *reloc)
 {
 	struct drm_device *dev = obj->base.dev;
@@ -411,10 +410,10 @@
 
 static int
 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-				    struct eb_objects *eb,
-				    struct drm_i915_gem_exec_object2 *entry)
+				    struct eb_objects *eb)
 {
 	struct drm_i915_gem_relocation_entry __user *user_relocs;
+	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 	int i, ret;
 
 	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
@@ -426,7 +425,7 @@
 					      sizeof(reloc)))
 			return -EFAULT;
 
-		ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc);
+		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
 		if (ret)
 			return ret;
 
@@ -442,13 +441,13 @@
 static int
 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
 					 struct eb_objects *eb,
-					 struct drm_i915_gem_exec_object2 *entry,
 					 struct drm_i915_gem_relocation_entry *relocs)
 {
+	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 	int i, ret;
 
 	for (i = 0; i < entry->relocation_count; i++) {
-		ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]);
+		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
 		if (ret)
 			return ret;
 	}
@@ -459,16 +458,13 @@
 static int
 i915_gem_execbuffer_relocate(struct drm_device *dev,
 			     struct eb_objects *eb,
-			     struct list_head *objects,
-			     struct drm_i915_gem_exec_object2 *exec)
+			     struct list_head *objects)
 {
 	struct drm_i915_gem_object *obj;
 	int ret;
 
 	list_for_each_entry(obj, objects, exec_list) {
-		obj->base.pending_read_domains = 0;
-		obj->base.pending_write_domain = 0;
-		ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++);
+		ret = i915_gem_execbuffer_relocate_object(obj, eb);
 		if (ret)
 			return ret;
 	}
@@ -479,13 +475,39 @@
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			    struct drm_file *file,
-			    struct list_head *objects,
-			    struct drm_i915_gem_exec_object2 *exec)
+			    struct list_head *objects)
 {
 	struct drm_i915_gem_object *obj;
-	struct drm_i915_gem_exec_object2 *entry;
 	int ret, retry;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	struct list_head ordered_objects;
+
+	INIT_LIST_HEAD(&ordered_objects);
+	while (!list_empty(objects)) {
+		struct drm_i915_gem_exec_object2 *entry;
+		bool need_fence, need_mappable;
+
+		obj = list_first_entry(objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		entry = obj->exec_entry;
+
+		need_fence =
+			has_fenced_gpu_access &&
+			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+			obj->tiling_mode != I915_TILING_NONE;
+		need_mappable =
+			entry->relocation_count ? true : need_fence;
+
+		if (need_mappable)
+			list_move(&obj->exec_list, &ordered_objects);
+		else
+			list_move_tail(&obj->exec_list, &ordered_objects);
+
+		obj->base.pending_read_domains = 0;
+		obj->base.pending_write_domain = 0;
+	}
+	list_splice(&ordered_objects, objects);
 
 	/* Attempt to pin all of the buffers into the GTT.
 	 * This is done in 3 phases:
@@ -504,14 +526,11 @@
 		ret = 0;
 
 		/* Unbind any ill-fitting objects or pin. */
-		entry = exec;
 		list_for_each_entry(obj, objects, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 			bool need_fence, need_mappable;
-
-			if (!obj->gtt_space) {
-				entry++;
+			if (!obj->gtt_space)
 				continue;
-			}
 
 			need_fence =
 				has_fenced_gpu_access &&
@@ -534,8 +553,8 @@
 		}
 
 		/* Bind fresh objects */
-		entry = exec;
 		list_for_each_entry(obj, objects, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 			bool need_fence;
 
 			need_fence =
@@ -570,7 +589,6 @@
 			}
 
 			entry->offset = obj->gtt_offset;
-			entry++;
 		}
 
 		/* Decrement pin count for bound objects */
@@ -619,10 +637,11 @@
 {
 	struct drm_i915_gem_relocation_entry *reloc;
 	struct drm_i915_gem_object *obj;
+	int *reloc_offset;
 	int i, total, ret;
 
 	/* We may process another execbuffer during the unlock... */
-	while (list_empty(objects)) {
+	while (!list_empty(objects)) {
 		obj = list_first_entry(objects,
 				       struct drm_i915_gem_object,
 				       exec_list);
@@ -636,8 +655,11 @@
 	for (i = 0; i < count; i++)
 		total += exec[i].relocation_count;
 
+	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
 	reloc = drm_malloc_ab(total, sizeof(*reloc));
-	if (reloc == NULL) {
+	if (reloc == NULL || reloc_offset == NULL) {
+		drm_free_large(reloc);
+		drm_free_large(reloc_offset);
 		mutex_lock(&dev->struct_mutex);
 		return -ENOMEM;
 	}
@@ -655,6 +677,7 @@
 			goto err;
 		}
 
+		reloc_offset[i] = total;
 		total += exec[i].relocation_count;
 	}
 
@@ -665,7 +688,6 @@
 	}
 
 	/* reacquire the objects */
-	INIT_LIST_HEAD(objects);
 	eb_reset(eb);
 	for (i = 0; i < count; i++) {
 		struct drm_i915_gem_object *obj;
@@ -681,25 +703,20 @@
 
 		list_add_tail(&obj->exec_list, objects);
 		obj->exec_handle = exec[i].handle;
+		obj->exec_entry = &exec[i];
 		eb_add_object(eb, obj);
 	}
 
-	ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
+	ret = i915_gem_execbuffer_reserve(ring, file, objects);
 	if (ret)
 		goto err;
 
-	total = 0;
 	list_for_each_entry(obj, objects, exec_list) {
-		obj->base.pending_read_domains = 0;
-		obj->base.pending_write_domain = 0;
+		int offset = obj->exec_entry - exec;
 		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
-							       exec,
-							       reloc + total);
+							       reloc + reloc_offset[offset]);
 		if (ret)
 			goto err;
-
-		total += exec->relocation_count;
-		exec++;
 	}
 
 	/* Leave the user relocations as are, this is the painfully slow path,
@@ -710,28 +727,38 @@
 
 err:
 	drm_free_large(reloc);
+	drm_free_large(reloc_offset);
 	return ret;
 }
 
-static void
+static int
 i915_gem_execbuffer_flush(struct drm_device *dev,
 			  uint32_t invalidate_domains,
 			  uint32_t flush_domains,
 			  uint32_t flush_rings)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int i;
+	int i, ret;
 
 	if (flush_domains & I915_GEM_DOMAIN_CPU)
 		intel_gtt_chipset_flush();
 
+	if (flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
+
 	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
 		for (i = 0; i < I915_NUM_RINGS; i++)
-			if (flush_rings & (1 << i))
-				i915_gem_flush_ring(dev, &dev_priv->ring[i],
-						    invalidate_domains,
-						    flush_domains);
+			if (flush_rings & (1 << i)) {
+				ret = i915_gem_flush_ring(dev,
+							  &dev_priv->ring[i],
+							  invalidate_domains,
+							  flush_domains);
+				if (ret)
+					return ret;
+			}
 	}
+
+	return 0;
 }
 
 static int
@@ -745,7 +772,8 @@
 	if (from == NULL || to == from)
 		return 0;
 
-	if (INTEL_INFO(obj->base.dev)->gen < 6)
+	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
+	if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
 		return i915_gem_object_wait_rendering(obj, true);
 
 	idx = intel_ring_sync_index(from, to);
@@ -795,10 +823,12 @@
 			 cd.invalidate_domains,
 			 cd.flush_domains);
 #endif
-		i915_gem_execbuffer_flush(ring->dev,
-					  cd.invalidate_domains,
-					  cd.flush_domains,
-					  cd.flush_rings);
+		ret = i915_gem_execbuffer_flush(ring->dev,
+						cd.invalidate_domains,
+						cd.flush_domains,
+						cd.flush_rings);
+		if (ret)
+			return ret;
 	}
 
 	list_for_each_entry(obj, objects, exec_list) {
@@ -921,7 +951,7 @@
 				    struct intel_ring_buffer *ring)
 {
 	struct drm_i915_gem_request *request;
-	u32 flush_domains;
+	u32 invalidate;
 
 	/*
 	 * Ensure that the commands in the batch buffer are
@@ -929,11 +959,13 @@
 	 *
 	 * The sampler always gets flushed on i965 (sigh).
 	 */
-	flush_domains = 0;
+	invalidate = I915_GEM_DOMAIN_COMMAND;
 	if (INTEL_INFO(dev)->gen >= 4)
-		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-
-	ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
+		invalidate |= I915_GEM_DOMAIN_SAMPLER;
+	if (ring->flush(ring, invalidate, 0)) {
+		i915_gem_next_request_seqno(dev, ring);
+		return;
+	}
 
 	/* Add a breadcrumb for the completion of the batch buffer */
 	request = kzalloc(sizeof(*request), GFP_KERNEL);
@@ -1098,16 +1130,22 @@
 
 		list_add_tail(&obj->exec_list, &objects);
 		obj->exec_handle = exec[i].handle;
+		obj->exec_entry = &exec[i];
 		eb_add_object(eb, obj);
 	}
 
+	/* take note of the batch buffer before we might reorder the lists */
+	batch_obj = list_entry(objects.prev,
+			       struct drm_i915_gem_object,
+			       exec_list);
+
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
-	ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
+	ret = i915_gem_execbuffer_reserve(ring, file, &objects);
 	if (ret)
 		goto err;
 
 	/* The objects are in their final locations, apply the relocations. */
-	ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec);
+	ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
 	if (ret) {
 		if (ret == -EFAULT) {
 			ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
@@ -1121,9 +1159,6 @@
 	}
 
 	/* Set the pending read domains for the batch buffer to COMMAND */
-	batch_obj = list_entry(objects.prev,
-			       struct drm_i915_gem_object,
-			       exec_list);
 	if (batch_obj->base.pending_write_domain) {
 		DRM_ERROR("Attempting to use self-modifying batch buffer\n");
 		ret = -EINVAL;
@@ -1140,7 +1175,7 @@
 		goto err;
 
 	seqno = i915_gem_next_request_seqno(dev, ring);
-	for (i = 0; i < I915_NUM_RINGS-1; i++) {
+	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
 		if (seqno < ring->sync_seqno[i]) {
 			/* The GPU can not handle its semaphore value wrapping,
 			 * so every billion or so execbuffers, we need to stall
@@ -1340,4 +1375,3 @@
 	drm_free_large(exec2_list);
 	return ret;
 }
-
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 86673e7..b0abdc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,6 +34,10 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 
+	/* First fill our portion of the GTT with scratch pages */
+	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+
 	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 		i915_gem_clflush_object(obj);
 
@@ -85,15 +89,11 @@
 
 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->mm.gtt->needs_dmar) {
-		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
-		obj->sg_list = NULL;
-		obj->num_sg = 0;
-	}
-
 	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
 			      obj->base.size >> PAGE_SHIFT);
+
+	if (obj->sg_list) {
+		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
+		obj->sg_list = NULL;
+	}
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9..d64843e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -349,14 +349,27 @@
 			(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
 			 i915_gem_object_fence_ok(obj, args->tiling_mode));
 
-		obj->tiling_changed = true;
-		obj->tiling_mode = args->tiling_mode;
-		obj->stride = args->stride;
+		/* Rebind if we need a change of alignment */
+		if (!obj->map_and_fenceable) {
+			u32 unfenced_alignment =
+				i915_gem_get_unfenced_gtt_alignment(obj);
+			if (obj->gtt_offset & (unfenced_alignment - 1))
+				ret = i915_gem_object_unbind(obj);
+		}
+
+		if (ret == 0) {
+			obj->tiling_changed = true;
+			obj->tiling_mode = args->tiling_mode;
+			obj->stride = args->stride;
+		}
 	}
+	/* we have to maintain this existing ABI... */
+	args->stride = obj->stride;
+	args->tiling_mode = obj->tiling_mode;
 	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 
-	return 0;
+	return ret;
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0dadc02..8a9e08b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -64,26 +64,6 @@
 #define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
 					 DRM_I915_VBLANK_PIPE_B)
 
-void
-ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->gt_irq_mask & mask) != 0) {
-		dev_priv->gt_irq_mask &= ~mask;
-		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-		POSTING_READ(GTIMR);
-	}
-}
-
-void
-ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->gt_irq_mask & mask) != mask) {
-		dev_priv->gt_irq_mask |= mask;
-		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-		POSTING_READ(GTIMR);
-	}
-}
-
 /* For display hotplug interrupt */
 static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -105,26 +85,6 @@
 	}
 }
 
-void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->irq_mask & mask) != 0) {
-		dev_priv->irq_mask &= ~mask;
-		I915_WRITE(IMR, dev_priv->irq_mask);
-		POSTING_READ(IMR);
-	}
-}
-
-void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->irq_mask & mask) != mask) {
-		dev_priv->irq_mask |= mask;
-		I915_WRITE(IMR, dev_priv->irq_mask);
-		POSTING_READ(IMR);
-	}
-}
-
 static inline u32
 i915_pipestat(int pipe)
 {
@@ -314,24 +274,35 @@
 	return ret;
 }
 
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
 			      int *max_error,
 			      struct timeval *vblank_time,
 			      unsigned flags)
 {
-	struct drm_crtc *drmcrtc;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
 
-	if (crtc < 0 || crtc >= dev->num_crtcs) {
-		DRM_ERROR("Invalid crtc %d\n", crtc);
+	if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
 		return -EINVAL;
 	}
 
 	/* Get drm_crtc to timestamp: */
-	drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
+	crtc = intel_get_crtc_for_pipe(dev, pipe);
+	if (crtc == NULL) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
+		return -EINVAL;
+	}
+
+	if (!crtc->enabled) {
+		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+		return -EBUSY;
+	}
 
 	/* Helper routine in DRM core does all the work: */
-	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
-						     vblank_time, flags, drmcrtc);
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+						     vblank_time, flags,
+						     crtc);
 }
 
 /*
@@ -345,6 +316,8 @@
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct intel_encoder *encoder;
 
+	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
 		if (encoder->hot_plug)
 			encoder->hot_plug(encoder);
@@ -388,10 +361,17 @@
 			struct intel_ring_buffer *ring)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 seqno = ring->get_seqno(ring);
-	ring->irq_seqno = seqno;
+	u32 seqno;
+
+	if (ring->obj == NULL)
+		return;
+
+	seqno = ring->get_seqno(ring);
 	trace_i915_gem_request_complete(dev, seqno);
+
+	ring->irq_seqno = seqno;
 	wake_up_all(&ring->irq_queue);
+
 	dev_priv->hangcheck_count = 0;
 	mod_timer(&dev_priv->hangcheck_timer,
 		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -435,6 +415,50 @@
 	I915_WRITE(GEN6_PMIIR, pm_iir);
 }
 
+static void pch_irq_handler(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 pch_iir;
+
+	pch_iir = I915_READ(SDEIIR);
+
+	if (pch_iir & SDE_AUDIO_POWER_MASK)
+		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
+				 SDE_AUDIO_POWER_SHIFT);
+
+	if (pch_iir & SDE_GMBUS)
+		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_HDCP_MASK)
+		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_TRANS_MASK)
+		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+
+	if (pch_iir & SDE_POISON)
+		DRM_ERROR("PCH poison interrupt\n");
+
+	if (pch_iir & SDE_FDI_MASK) {
+		u32 fdia, fdib;
+
+		fdia = I915_READ(FDI_RXA_IIR);
+		fdib = I915_READ(FDI_RXB_IIR);
+		DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
+	}
+
+	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+
+	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+
+	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
+	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
+}
+
 static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -502,8 +526,11 @@
 		drm_handle_vblank(dev, 1);
 
 	/* check event from PCH */
-	if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
-		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+	if (de_iir & DE_PCH_EVENT) {
+		if (pch_iir & hotplug_mask)
+			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+		pch_irq_handler(dev);
+	}
 
 	if (de_iir & DE_PCU_EVENT) {
 		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -556,10 +583,9 @@
 
 #ifdef CONFIG_DEBUG_FS
 static struct drm_i915_error_object *
-i915_error_object_create(struct drm_device *dev,
+i915_error_object_create(struct drm_i915_private *dev_priv,
 			 struct drm_i915_gem_object *src)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_error_object *dst;
 	int page, page_count;
 	u32 reloc_offset;
@@ -632,52 +658,6 @@
 	kfree(error);
 }
 
-static u32
-i915_get_bbaddr(struct drm_device *dev, u32 *ring)
-{
-	u32 cmd;
-
-	if (IS_I830(dev) || IS_845G(dev))
-		cmd = MI_BATCH_BUFFER;
-	else if (INTEL_INFO(dev)->gen >= 4)
-		cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
-		       MI_BATCH_NON_SECURE_I965);
-	else
-		cmd = (MI_BATCH_BUFFER_START | (2 << 6));
-
-	return ring[0] == cmd ? ring[1] : 0;
-}
-
-static u32
-i915_ringbuffer_last_batch(struct drm_device *dev,
-			   struct intel_ring_buffer *ring)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 head, bbaddr;
-	u32 *val;
-
-	/* Locate the current position in the ringbuffer and walk back
-	 * to find the most recently dispatched batch buffer.
-	 */
-	head = I915_READ_HEAD(ring) & HEAD_ADDR;
-
-	val = (u32 *)(ring->virtual_start + head);
-	while (--val >= (u32 *)ring->virtual_start) {
-		bbaddr = i915_get_bbaddr(dev, val);
-		if (bbaddr)
-			return bbaddr;
-	}
-
-	val = (u32 *)(ring->virtual_start + ring->size);
-	while (--val >= (u32 *)ring->virtual_start) {
-		bbaddr = i915_get_bbaddr(dev, val);
-		if (bbaddr)
-			return bbaddr;
-	}
-
-	return 0;
-}
-
 static u32 capture_bo_list(struct drm_i915_error_buffer *err,
 			   int count,
 			   struct list_head *head)
@@ -702,6 +682,7 @@
 		err->dirty = obj->dirty;
 		err->purgeable = obj->madv != I915_MADV_WILLNEED;
 		err->ring = obj->ring ? obj->ring->id : 0;
+		err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY;
 
 		if (++i == count)
 			break;
@@ -741,6 +722,36 @@
 	}
 }
 
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+			     struct intel_ring_buffer *ring)
+{
+	struct drm_i915_gem_object *obj;
+	u32 seqno;
+
+	if (!ring->get_seqno)
+		return NULL;
+
+	seqno = ring->get_seqno(ring);
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+		if (obj->ring != ring)
+			continue;
+
+		if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+			continue;
+
+		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+			continue;
+
+		/* We need to copy these to an anonymous buffer as the simplest
+		 * method to avoid being overwritten by userspace.
+		 */
+		return i915_error_object_create(dev_priv, obj);
+	}
+
+	return NULL;
+}
+
 /**
  * i915_capture_error_state - capture an error record for later analysis
  * @dev: drm device
@@ -755,10 +766,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_error_state *error;
-	struct drm_i915_gem_object *batchbuffer[2];
 	unsigned long flags;
-	u32 bbaddr;
-	int count;
+	int i;
 
 	spin_lock_irqsave(&dev_priv->error_lock, flags);
 	error = dev_priv->first_error;
@@ -817,83 +826,32 @@
 	}
 	i915_gem_record_fences(dev, error);
 
-	bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]);
-
-	/* Grab the current batchbuffer, most likely to have crashed. */
-	batchbuffer[0] = NULL;
-	batchbuffer[1] = NULL;
-	count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		if (batchbuffer[0] == NULL &&
-		    bbaddr >= obj->gtt_offset &&
-		    bbaddr < obj->gtt_offset + obj->base.size)
-			batchbuffer[0] = obj;
-
-		if (batchbuffer[1] == NULL &&
-		    error->acthd >= obj->gtt_offset &&
-		    error->acthd < obj->gtt_offset + obj->base.size)
-			batchbuffer[1] = obj;
-
-		count++;
-	}
-	/* Scan the other lists for completeness for those bizarre errors. */
-	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-		list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
-			if (batchbuffer[0] == NULL &&
-			    bbaddr >= obj->gtt_offset &&
-			    bbaddr < obj->gtt_offset + obj->base.size)
-				batchbuffer[0] = obj;
-
-			if (batchbuffer[1] == NULL &&
-			    error->acthd >= obj->gtt_offset &&
-			    error->acthd < obj->gtt_offset + obj->base.size)
-				batchbuffer[1] = obj;
-
-			if (batchbuffer[0] && batchbuffer[1])
-				break;
-		}
-	}
-	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-		list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
-			if (batchbuffer[0] == NULL &&
-			    bbaddr >= obj->gtt_offset &&
-			    bbaddr < obj->gtt_offset + obj->base.size)
-				batchbuffer[0] = obj;
-
-			if (batchbuffer[1] == NULL &&
-			    error->acthd >= obj->gtt_offset &&
-			    error->acthd < obj->gtt_offset + obj->base.size)
-				batchbuffer[1] = obj;
-
-			if (batchbuffer[0] && batchbuffer[1])
-				break;
-		}
-	}
-
-	/* We need to copy these to an anonymous buffer as the simplest
-	 * method to avoid being overwritten by userspace.
-	 */
-	error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
-	if (batchbuffer[1] != batchbuffer[0])
-		error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
-	else
-		error->batchbuffer[1] = NULL;
+	/* Record the active batchbuffers */
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		error->batchbuffer[i] =
+			i915_error_first_batchbuffer(dev_priv,
+						     &dev_priv->ring[i]);
 
 	/* Record the ringbuffer */
-	error->ringbuffer = i915_error_object_create(dev,
+	error->ringbuffer = i915_error_object_create(dev_priv,
 						     dev_priv->ring[RCS].obj);
 
 	/* Record buffers on the active and pinned lists. */
 	error->active_bo = NULL;
 	error->pinned_bo = NULL;
 
-	error->active_bo_count = count;
+	i = 0;
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+		i++;
+	error->active_bo_count = i;
 	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
-		count++;
-	error->pinned_bo_count = count - error->active_bo_count;
+		i++;
+	error->pinned_bo_count = i - error->active_bo_count;
 
-	if (count) {
-		error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
+	error->active_bo = NULL;
+	error->pinned_bo = NULL;
+	if (i) {
+		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
 					   GFP_ATOMIC);
 		if (error->active_bo)
 			error->pinned_bo =
@@ -1240,18 +1198,18 @@
 				intel_finish_page_flip_plane(dev, 1);
 		}
 
-		if (pipea_stats & vblank_status) {
+		if (pipea_stats & vblank_status &&
+		    drm_handle_vblank(dev, 0)) {
 			vblank++;
-			drm_handle_vblank(dev, 0);
 			if (!dev_priv->flip_pending_is_done) {
 				i915_pageflip_stall_check(dev, 0);
 				intel_finish_page_flip(dev, 0);
 			}
 		}
 
-		if (pipeb_stats & vblank_status) {
+		if (pipeb_stats & vblank_status &&
+		    drm_handle_vblank(dev, 1)) {
 			vblank++;
-			drm_handle_vblank(dev, 1);
 			if (!dev_priv->flip_pending_is_done) {
 				i915_pageflip_stall_check(dev, 1);
 				intel_finish_page_flip(dev, 1);
@@ -1339,12 +1297,12 @@
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
-	ret = -ENODEV;
 	if (ring->irq_get(ring)) {
 		DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
 			    READ_BREADCRUMB(dev_priv) >= irq_nr);
 		ring->irq_put(ring);
-	}
+	} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+		ret = -EBUSY;
 
 	if (ret == -EBUSY) {
 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1673,11 +1631,6 @@
 
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-	if (IS_GEN6(dev)) {
-		I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
-		I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
-		I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
-	}
 
 	if (IS_GEN6(dev))
 		render_irqs =
@@ -1698,6 +1651,7 @@
 	} else {
 		hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
 			       SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+		hotplug_mask |= SDE_AUX_MASK;
 	}
 
 	dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 8f948a6..2abe240 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -145,6 +145,8 @@
 #define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
 #define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
 #define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
+#define   MI_SUSPEND_FLUSH_EN	(1<<0)
 #define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
 #define MI_OVERLAY_FLIP		MI_INSTR(0x11,0)
 #define   MI_OVERLAY_CONTINUE	(0x0<<21)
@@ -159,6 +161,7 @@
 #define   MI_MM_SPACE_PHYSICAL		(0<<8)
 #define   MI_SAVE_EXT_STATE_EN		(1<<3)
 #define   MI_RESTORE_EXT_STATE_EN	(1<<2)
+#define   MI_FORCE_RESTORE		(1<<1)
 #define   MI_RESTORE_INHIBIT		(1<<0)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
@@ -171,7 +174,9 @@
  *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
  */
 #define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
-#define MI_FLUSH_DW		MI_INSTR(0x26, 2) /* for GEN6 */
+#define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
+#define   MI_INVALIDATE_TLB	(1<<18)
+#define   MI_INVALIDATE_BSD	(1<<7)
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE	(1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -288,6 +293,7 @@
 #define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
 #define RING_ACTHD(base)	((base)+0x74)
 #define RING_NOPID(base)	((base)+0x94)
+#define RING_IMR(base)		((base)+0xa8)
 #define   TAIL_ADDR		0x001FFFF8
 #define   HEAD_WRAP_COUNT	0xFFE00000
 #define   HEAD_WRAP_ONE		0x00200000
@@ -509,6 +515,10 @@
 #define   GEN6_BLITTER_SYNC_STATUS			(1 << 24)
 #define   GEN6_BLITTER_USER_INTERRUPT			(1 << 22)
 
+#define GEN6_BLITTER_ECOSKPD	0x221d0
+#define   GEN6_BLITTER_LOCK_SHIFT			16
+#define   GEN6_BLITTER_FBC_NOTIFY			(1<<3)
+
 #define GEN6_BSD_SLEEP_PSMI_CONTROL	0x12050
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK	(1 << 16)
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE		(1 << 0)
@@ -1130,9 +1140,50 @@
 #define RCBMINAVG		0x111a0
 #define RCUPEI			0x111b0
 #define RCDNEI			0x111b4
-#define MCHBAR_RENDER_STANDBY		0x111b8
-#define   RCX_SW_EXIT		(1<<23)
-#define   RSX_STATUS_MASK	0x00700000
+#define RSTDBYCTL		0x111b8
+#define   RS1EN			(1<<31)
+#define   RS2EN			(1<<30)
+#define   RS3EN			(1<<29)
+#define   D3RS3EN		(1<<28) /* Display D3 imlies RS3 */
+#define   SWPROMORSX		(1<<27) /* RSx promotion timers ignored */
+#define   RCWAKERW		(1<<26) /* Resetwarn from PCH causes wakeup */
+#define   DPRSLPVREN		(1<<25) /* Fast voltage ramp enable */
+#define   GFXTGHYST		(1<<24) /* Hysteresis to allow trunk gating */
+#define   RCX_SW_EXIT		(1<<23) /* Leave RSx and prevent re-entry */
+#define   RSX_STATUS_MASK	(7<<20)
+#define   RSX_STATUS_ON		(0<<20)
+#define   RSX_STATUS_RC1	(1<<20)
+#define   RSX_STATUS_RC1E	(2<<20)
+#define   RSX_STATUS_RS1	(3<<20)
+#define   RSX_STATUS_RS2	(4<<20) /* aka rc6 */
+#define   RSX_STATUS_RSVD	(5<<20) /* deep rc6 unsupported on ilk */
+#define   RSX_STATUS_RS3	(6<<20) /* rs3 unsupported on ilk */
+#define   RSX_STATUS_RSVD2	(7<<20)
+#define   UWRCRSXE		(1<<19) /* wake counter limit prevents rsx */
+#define   RSCRP			(1<<18) /* rs requests control on rs1/2 reqs */
+#define   JRSC			(1<<17) /* rsx coupled to cpu c-state */
+#define   RS2INC0		(1<<16) /* allow rs2 in cpu c0 */
+#define   RS1CONTSAV_MASK	(3<<14)
+#define   RS1CONTSAV_NO_RS1	(0<<14) /* rs1 doesn't save/restore context */
+#define   RS1CONTSAV_RSVD	(1<<14)
+#define   RS1CONTSAV_SAVE_RS1	(2<<14) /* rs1 saves context */
+#define   RS1CONTSAV_FULL_RS1	(3<<14) /* rs1 saves and restores context */
+#define   NORMSLEXLAT_MASK	(3<<12)
+#define   SLOW_RS123		(0<<12)
+#define   SLOW_RS23		(1<<12)
+#define   SLOW_RS3		(2<<12)
+#define   NORMAL_RS123		(3<<12)
+#define   RCMODE_TIMEOUT	(1<<11) /* 0 is eval interval method */
+#define   IMPROMOEN		(1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define   RCENTSYNC		(1<<9) /* rs coupled to cpu c-state (3/6/7) */
+#define   STATELOCK		(1<<7) /* locked to rs_cstate if 0 */
+#define   RS_CSTATE_MASK	(3<<4)
+#define   RS_CSTATE_C367_RS1	(0<<4)
+#define   RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
+#define   RS_CSTATE_RSVD	(2<<4)
+#define   RS_CSTATE_C367_RS2	(3<<4)
+#define   REDSAVES		(1<<3) /* no context save if was idle during rs0 */
+#define   REDRESTORES		(1<<2) /* no restore if was idle during rs0 */
 #define VIDCTL			0x111c0
 #define VIDSTS			0x111c8
 #define VIDSTART		0x111cc /* 8 bits */
@@ -2345,8 +2396,13 @@
 
 /* Memory latency timer register */
 #define MLTR_ILK		0x11222
+#define  MLTR_WM1_SHIFT		0
+#define  MLTR_WM2_SHIFT		8
 /* the unit of memory self-refresh latency time is 0.5us */
 #define  ILK_SRLT_MASK		0x3f
+#define ILK_LATENCY(shift)	(I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
+#define ILK_READ_WM1_LATENCY()	ILK_LATENCY(MLTR_WM1_SHIFT)
+#define ILK_READ_WM2_LATENCY()	ILK_LATENCY(MLTR_WM2_SHIFT)
 
 /* define the fifo size on Ironlake */
 #define ILK_DISPLAY_FIFO	128
@@ -2576,6 +2632,8 @@
 #define DISPLAY_PORT_PLL_BIOS_2         0x46014
 
 #define PCH_DSPCLK_GATE_D	0x42020
+# define DPFCUNIT_CLOCK_GATE_DISABLE		(1 << 9)
+# define DPFCRUNIT_CLOCK_GATE_DISABLE		(1 << 8)
 # define DPFDUNIT_CLOCK_GATE_DISABLE		(1 << 7)
 # define DPARBUNIT_CLOCK_GATE_DISABLE		(1 << 5)
 
@@ -2728,12 +2786,41 @@
 /* PCH */
 
 /* south display engine interrupt */
+#define SDE_AUDIO_POWER_D	(1 << 27)
+#define SDE_AUDIO_POWER_C	(1 << 26)
+#define SDE_AUDIO_POWER_B	(1 << 25)
+#define SDE_AUDIO_POWER_SHIFT	(25)
+#define SDE_AUDIO_POWER_MASK	(7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS		(1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB	(1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA	(1 << 22)
+#define SDE_AUDIO_HDCP_MASK	(3 << 22)
+#define SDE_AUDIO_TRANSB	(1 << 21)
+#define SDE_AUDIO_TRANSA	(1 << 20)
+#define SDE_AUDIO_TRANS_MASK	(3 << 20)
+#define SDE_POISON		(1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB		(1 << 17)
+#define SDE_FDI_RXA		(1 << 16)
+#define SDE_FDI_MASK		(3 << 16)
+#define SDE_AUXD		(1 << 15)
+#define SDE_AUXC		(1 << 14)
+#define SDE_AUXB		(1 << 13)
+#define SDE_AUX_MASK		(7 << 13)
+/* 12 reserved */
 #define SDE_CRT_HOTPLUG         (1 << 11)
 #define SDE_PORTD_HOTPLUG       (1 << 10)
 #define SDE_PORTC_HOTPLUG       (1 << 9)
 #define SDE_PORTB_HOTPLUG       (1 << 8)
 #define SDE_SDVOB_HOTPLUG       (1 << 6)
 #define SDE_HOTPLUG_MASK	(0xf << 8)
+#define SDE_TRANSB_CRC_DONE	(1 << 5)
+#define SDE_TRANSB_CRC_ERR	(1 << 4)
+#define SDE_TRANSB_FIFO_UNDER	(1 << 3)
+#define SDE_TRANSA_CRC_DONE	(1 << 2)
+#define SDE_TRANSA_CRC_ERR	(1 << 1)
+#define SDE_TRANSA_FIFO_UNDER	(1 << 0)
+#define SDE_TRANS_MASK		(0x3f)
 /* CPT */
 #define SDE_CRT_HOTPLUG_CPT	(1 << 19)
 #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
@@ -3174,15 +3261,18 @@
 #define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
 #define  EDP_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
 /* SNB B-stepping */
-#define  EDP_LINK_TRAIN_400MV_0DB_SNB_B		(0x0<<22)
-#define  EDP_LINK_TRAIN_400MV_6DB_SNB_B		(0x3a<<22)
-#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_B	(0x39<<22)
-#define  EDP_LINK_TRAIN_800MV_0DB_SNB_B		(0x38<<22)
+#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B	(0x0<<22)
+#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B	(0x1<<22)
+#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B	(0x3a<<22)
+#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B	(0x39<<22)
+#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B	(0x38<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
 
 #define  FORCEWAKE				0xA18C
 #define  FORCEWAKE_ACK				0x130090
 
+#define  GT_FIFO_FREE_ENTRIES			0x120008
+
 #define GEN6_RPNSWREQ				0xA008
 #define   GEN6_TURBO_DISABLE			(1<<31)
 #define   GEN6_FREQUENCY(x)			((x)<<25)
@@ -3239,6 +3329,7 @@
 
 #define GEN6_PCODE_MAILBOX			0x138124
 #define   GEN6_PCODE_READY			(1<<31)
+#define   GEN6_READ_OC_PARAMS			0xc
 #define   GEN6_PCODE_WRITE_MIN_FREQ_TABLE	0x9
 #define GEN6_PCODE_DATA				0x138128
 
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 4107724..0521ecf2 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -740,7 +740,7 @@
 		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
 		I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
 		I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
-		I915_WRITE(MCHBAR_RENDER_STANDBY,
+		I915_WRITE(RSTDBYCTL,
 			   dev_priv->saveMCHBAR_RENDER_STANDBY);
 	} else {
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
@@ -811,7 +811,7 @@
 		dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
 		dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
 		dev_priv->saveMCHBAR_RENDER_STANDBY =
-			I915_READ(MCHBAR_RENDER_STANDBY);
+			I915_READ(RSTDBYCTL);
 	} else {
 		dev_priv->saveIER = I915_READ(IER);
 		dev_priv->saveIMR = I915_READ(IMR);
@@ -822,10 +822,6 @@
 	if (IS_GEN6(dev))
 		gen6_disable_rps(dev);
 
-	/* XXX disabling the clock gating breaks suspend on gm45
-	intel_disable_clock_gating(dev);
-	 */
-
 	/* Cache mode state */
 	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
 
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b0b1200..0b44956 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -264,17 +264,12 @@
 		dev_priv->int_crt_support = general->int_crt_support;
 		dev_priv->lvds_use_ssc = general->enable_ssc;
 
-		if (dev_priv->lvds_use_ssc) {
-			if (IS_I85X(dev))
-				dev_priv->lvds_ssc_freq =
-					general->ssc_freq ? 66 : 48;
-			else if (IS_GEN5(dev) || IS_GEN6(dev))
-				dev_priv->lvds_ssc_freq =
-					general->ssc_freq ? 100 : 120;
-			else
-				dev_priv->lvds_ssc_freq =
-					general->ssc_freq ? 100 : 96;
-		}
+		if (IS_I85X(dev))
+			dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
+		else if (IS_GEN5(dev) || IS_GEN6(dev))
+			dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
+		else
+			dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8df5743..8a77ff4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -30,6 +30,7 @@
 #include "drm.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_edid.h"
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
@@ -287,8 +288,9 @@
 	return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
 }
 
-static bool intel_crt_detect_ddc(struct intel_crt *crt)
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
 {
+	struct intel_crt *crt = intel_attached_crt(connector);
 	struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
 
 	/* CRT should always be at 0, but check anyway */
@@ -301,8 +303,26 @@
 	}
 
 	if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
-		DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
-		return true;
+		struct edid *edid;
+		bool is_digital = false;
+
+		edid = drm_get_edid(connector,
+			&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+		/*
+		 * This may be a DVI-I connector with a shared DDC
+		 * link between analog and digital outputs, so we
+		 * have to check the EDID input spec of the attached device.
+		 */
+		if (edid != NULL) {
+			is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+			connector->display_info.raw_edid = NULL;
+			kfree(edid);
+		}
+
+		if (!is_digital) {
+			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+			return true;
+		}
 	}
 
 	return false;
@@ -458,7 +478,7 @@
 		}
 	}
 
-	if (intel_crt_detect_ddc(crt))
+	if (intel_crt_detect_ddc(connector))
 		return connector_status_connected;
 
 	if (!force)
@@ -472,7 +492,7 @@
 		crtc = intel_get_load_detect_pipe(&crt->base, connector,
 						  NULL, &dpms_mode);
 		if (crtc) {
-			if (intel_crt_detect_ddc(crt))
+			if (intel_crt_detect_ddc(connector))
 				status = connector_status_connected;
 			else
 				status = intel_crt_load_detect(crtc, crt);
@@ -515,6 +535,15 @@
 	return 0;
 }
 
+static void intel_crt_reset(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_crt *crt = intel_attached_crt(connector);
+
+	if (HAS_PCH_SPLIT(dev))
+		crt->force_hotplug_required = 1;
+}
+
 /*
  * Routines for controlling stuff on the analog port
  */
@@ -528,6 +557,7 @@
 };
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
+	.reset = intel_crt_reset,
 	.dpms = drm_helper_connector_dpms,
 	.detect = intel_crt_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0abe79f..49fb54f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1213,6 +1213,26 @@
 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 blt_ecoskpd;
+
+	/* Make sure blitter notifies FBC of writes */
+	__gen6_gt_force_wake_get(dev_priv);
+	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+		GEN6_BLITTER_LOCK_SHIFT;
+	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+			 GEN6_BLITTER_LOCK_SHIFT);
+	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+	POSTING_READ(GEN6_BLITTER_ECOSKPD);
+	__gen6_gt_force_wake_put(dev_priv);
+}
+
 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 {
 	struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@
 		I915_WRITE(SNB_DPFC_CTL_SA,
 			   SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+		sandybridge_blit_fbc_update(dev);
 	}
 
 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -1609,19 +1630,19 @@
 		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
 
 		wait_event(dev_priv->pending_flip_queue,
+			   atomic_read(&dev_priv->mm.wedged) ||
 			   atomic_read(&obj->pending_flip) == 0);
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
 		 * current scanout is retired before unpinning the old
 		 * framebuffer.
+		 *
+		 * This should only fail upon a hung GPU, in which case we
+		 * can safely continue.
 		 */
 		ret = i915_gem_object_flush_gpu(obj, false);
-		if (ret) {
-			i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
-			mutex_unlock(&dev->struct_mutex);
-			return ret;
-		}
+		(void) ret;
 	}
 
 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2024,6 +2045,31 @@
 		   atomic_read(&obj->pending_flip) == 0);
 }
 
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_encoder *encoder;
+
+	/*
+	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+	 * must be driven by its own crtc; no sharing is possible.
+	 */
+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+		if (encoder->base.crtc != crtc)
+			continue;
+
+		switch (encoder->type) {
+		case INTEL_OUTPUT_EDP:
+			if (!intel_encoder_is_pch_edp(&encoder->base))
+				return false;
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -2032,6 +2078,7 @@
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 	u32 reg, temp;
+	bool is_pch_port = false;
 
 	if (intel_crtc->active)
 		return;
@@ -2045,7 +2092,56 @@
 			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
 	}
 
-	ironlake_fdi_enable(crtc);
+	is_pch_port = intel_crtc_driving_pch(crtc);
+
+	if (is_pch_port)
+		ironlake_fdi_enable(crtc);
+	else {
+		/* disable CPU FDI tx and PCH FDI rx */
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+		POSTING_READ(reg);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~(0x7 << 16);
+		temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+		I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+		POSTING_READ(reg);
+		udelay(100);
+
+		/* Ironlake workaround, disable clock pointer after downing FDI */
+		if (HAS_PCH_IBX(dev))
+			I915_WRITE(FDI_RX_CHICKEN(pipe),
+				   I915_READ(FDI_RX_CHICKEN(pipe) &
+					     ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+		/* still set train pattern 1 */
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+		I915_WRITE(reg, temp);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = I915_READ(reg);
+		if (HAS_PCH_CPT(dev)) {
+			temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+			temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+		} else {
+			temp &= ~FDI_LINK_TRAIN_NONE;
+			temp |= FDI_LINK_TRAIN_PATTERN_1;
+		}
+		/* BPC in FDI rx is consistent with that in PIPECONF */
+		temp &= ~(0x07 << 16);
+		temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+		I915_WRITE(reg, temp);
+
+		POSTING_READ(reg);
+		udelay(100);
+	}
 
 	/* Enable panel fitting for LVDS */
 	if (dev_priv->pch_pf_size &&
@@ -2079,6 +2175,10 @@
 		intel_flush_display_plane(dev, plane);
 	}
 
+	/* Skip the PCH stuff if possible */
+	if (!is_pch_port)
+		goto done;
+
 	/* For PCH output, training FDI link */
 	if (IS_GEN6(dev))
 		gen6_fdi_link_train(crtc);
@@ -2163,7 +2263,7 @@
 	I915_WRITE(reg, temp | TRANS_ENABLE);
 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
 		DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
 	intel_crtc_load_lut(crtc);
 	intel_update_fbc(dev);
 	intel_crtc_update_cursor(crtc, true);
@@ -3418,15 +3518,16 @@
 static bool ironlake_compute_wm0(struct drm_device *dev,
 				 int pipe,
 				 const struct intel_watermark_params *display,
-				 int display_latency,
+				 int display_latency_ns,
 				 const struct intel_watermark_params *cursor,
-				 int cursor_latency,
+				 int cursor_latency_ns,
 				 int *plane_wm,
 				 int *cursor_wm)
 {
 	struct drm_crtc *crtc;
-	int htotal, hdisplay, clock, pixel_size = 0;
-	int line_time_us, line_count, entries;
+	int htotal, hdisplay, clock, pixel_size;
+	int line_time_us, line_count;
+	int entries, tlb_miss;
 
 	crtc = intel_get_crtc_for_pipe(dev, pipe);
 	if (crtc->fb == NULL || !crtc->enabled)
@@ -3438,7 +3539,10 @@
 	pixel_size = crtc->fb->bits_per_pixel / 8;
 
 	/* Use the small buffer method to calculate plane watermark */
-	entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000;
+	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
 	entries = DIV_ROUND_UP(entries, display->cacheline_size);
 	*plane_wm = entries + display->guard_size;
 	if (*plane_wm > (int)display->max_wm)
@@ -3446,8 +3550,11 @@
 
 	/* Use the large buffer method to calculate cursor watermark */
 	line_time_us = ((htotal * 1000) / clock);
-	line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000;
+	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
 	entries = line_count * 64 * pixel_size;
+	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
 	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
 	*cursor_wm = entries + cursor->guard_size;
 	if (*cursor_wm > (int)cursor->max_wm)
@@ -3456,14 +3563,109 @@
 	return true;
 }
 
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+				int fbc_wm, int display_wm, int cursor_wm,
+				const struct intel_watermark_params *display,
+				const struct intel_watermark_params *cursor)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+	if (fbc_wm > SNB_FBC_MAX_SRWM) {
+		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+			      fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+		/* fbc has it's own way to disable FBC WM */
+		I915_WRITE(DISP_ARB_CTL,
+			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+		return false;
+	}
+
+	if (display_wm > display->max_wm) {
+		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
+		return false;
+	}
+
+	if (cursor_wm > cursor->max_wm) {
+		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+		return false;
+	}
+
+	if (!(fbc_wm || display_wm || cursor_wm)) {
+		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level,
+				  int hdisplay, int htotal,
+				  int pixel_size, int clock, int latency_ns,
+				  const struct intel_watermark_params *display,
+				  const struct intel_watermark_params *cursor,
+				  int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+
+	unsigned long line_time_us;
+	int line_count, line_size;
+	int small, large;
+	int entries;
+
+	if (!latency_ns) {
+		*fbc_wm = *display_wm = *cursor_wm = 0;
+		return false;
+	}
+
+	line_time_us = (htotal * 1000) / clock;
+	line_count = (latency_ns / line_time_us + 1000) / 1000;
+	line_size = hdisplay * pixel_size;
+
+	/* Use the minimum of the small and large buffer method for primary */
+	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+	large = line_count * line_size;
+
+	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+	*display_wm = entries + display->guard_size;
+
+	/*
+	 * Spec says:
+	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+	 */
+	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+	/* calculate the self-refresh watermark for display cursor */
+	entries = line_count * pixel_size * 64;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+
+	return ironlake_check_srwm(dev, level,
+				   *fbc_wm, *display_wm, *cursor_wm,
+				   display, cursor);
+}
+
 static void ironlake_update_wm(struct drm_device *dev,
 			       int planea_clock, int planeb_clock,
-			       int sr_hdisplay, int sr_htotal,
+			       int hdisplay, int htotal,
 			       int pixel_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int plane_wm, cursor_wm, enabled;
-	int tmp;
+	int fbc_wm, plane_wm, cursor_wm, enabled;
+	int clock;
 
 	enabled = 0;
 	if (ironlake_compute_wm0(dev, 0,
@@ -3498,152 +3700,49 @@
 	 * Calculate and update the self-refresh watermark only when one
 	 * display plane is used.
 	 */
-	tmp = 0;
-	if (enabled == 1) {
-		unsigned long line_time_us;
-		int small, large, plane_fbc;
-		int sr_clock, entries;
-		int line_count, line_size;
-		/* Read the self-refresh latency. The unit is 0.5us */
-		int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
 
-		sr_clock = planea_clock ? planea_clock : planeb_clock;
-		line_time_us = (sr_htotal * 1000) / sr_clock;
+	if (enabled != 1)
+		return;
 
-		/* Use ns/us then divide to preserve precision */
-		line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
-			/ 1000;
-		line_size = sr_hdisplay * pixel_size;
+	clock = planea_clock ? planea_clock : planeb_clock;
 
-		/* Use the minimum of the small and large buffer method for primary */
-		small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
-		large = line_count * line_size;
+	/* WM1 */
+	if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+				   clock, ILK_READ_WM1_LATENCY() * 500,
+				   &ironlake_display_srwm_info,
+				   &ironlake_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
 
-		entries = DIV_ROUND_UP(min(small, large),
-				       ironlake_display_srwm_info.cacheline_size);
+	I915_WRITE(WM1_LP_ILK,
+		   WM1_LP_SR_EN |
+		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
 
-		plane_fbc = entries * 64;
-		plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
+	/* WM2 */
+	if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
+				   clock, ILK_READ_WM2_LATENCY() * 500,
+				   &ironlake_display_srwm_info,
+				   &ironlake_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
 
-		plane_wm = entries + ironlake_display_srwm_info.guard_size;
-		if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
-			plane_wm = ironlake_display_srwm_info.max_wm;
-
-		/* calculate the self-refresh watermark for display cursor */
-		entries = line_count * pixel_size * 64;
-		entries = DIV_ROUND_UP(entries,
-				       ironlake_cursor_srwm_info.cacheline_size);
-
-		cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
-		if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
-			cursor_wm = ironlake_cursor_srwm_info.max_wm;
-
-		/* configure watermark and enable self-refresh */
-		tmp = (WM1_LP_SR_EN |
-		       (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
-		       (plane_fbc << WM1_LP_FBC_SHIFT) |
-		       (plane_wm << WM1_LP_SR_SHIFT) |
-		       cursor_wm);
-		DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
-			      " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
-	}
-	I915_WRITE(WM1_LP_ILK, tmp);
-	/* XXX setup WM2 and WM3 */
-}
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- *
- * Also return true if all of those watermark values is 0, which is set by
- * sandybridge_compute_srwm, to indicate the latency is ZERO.
- */
-static bool sandybridge_check_srwm(struct drm_device *dev, int level,
-				   int fbc_wm, int display_wm, int cursor_wm)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
-		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
-	if (fbc_wm > SNB_FBC_MAX_SRWM) {
-		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
-				fbc_wm, SNB_FBC_MAX_SRWM, level);
-
-		/* fbc has it's own way to disable FBC WM */
-		I915_WRITE(DISP_ARB_CTL,
-			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
-		return false;
-	}
-
-	if (display_wm > SNB_DISPLAY_MAX_SRWM) {
-		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
-				display_wm, SNB_DISPLAY_MAX_SRWM, level);
-		return false;
-	}
-
-	if (cursor_wm > SNB_CURSOR_MAX_SRWM) {
-		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
-				cursor_wm, SNB_CURSOR_MAX_SRWM, level);
-		return false;
-	}
-
-	if (!(fbc_wm || display_wm || cursor_wm)) {
-		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
-		return false;
-	}
-
-	return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool sandybridge_compute_srwm(struct drm_device *dev, int level,
-				     int hdisplay, int htotal, int pixel_size,
-				     int clock, int latency_ns, int *fbc_wm,
-				     int *display_wm, int *cursor_wm)
-{
-
-	unsigned long line_time_us;
-	int small, large;
-	int entries;
-	int line_count, line_size;
-
-	if (!latency_ns) {
-		*fbc_wm = *display_wm = *cursor_wm = 0;
-		return false;
-	}
-
-	line_time_us = (htotal * 1000) / clock;
-	line_count = (latency_ns / line_time_us + 1000) / 1000;
-	line_size = hdisplay * pixel_size;
-
-	/* Use the minimum of the small and large buffer method for primary */
-	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-	large = line_count * line_size;
-
-	entries = DIV_ROUND_UP(min(small, large),
-				sandybridge_display_srwm_info.cacheline_size);
-	*display_wm = entries + sandybridge_display_srwm_info.guard_size;
+	I915_WRITE(WM2_LP_ILK,
+		   WM2_LP_EN |
+		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
 
 	/*
-	 * Spec said:
-	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+	 * WM3 is unsupported on ILK, probably because we don't have latency
+	 * data for that power state
 	 */
-	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
-	/* calculate the self-refresh watermark for display cursor */
-	entries = line_count * pixel_size * 64;
-	entries = DIV_ROUND_UP(entries,
-			       sandybridge_cursor_srwm_info.cacheline_size);
-	*cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size;
-
-	return sandybridge_check_srwm(dev, level,
-				      *fbc_wm, *display_wm, *cursor_wm);
 }
 
 static void sandybridge_update_wm(struct drm_device *dev,
@@ -3652,7 +3751,7 @@
 			       int pixel_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int latency = SNB_READ_WM0_LATENCY();
+	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
 	int fbc_wm, plane_wm, cursor_wm, enabled;
 	int clock;
 
@@ -3701,9 +3800,11 @@
 	clock = planea_clock ? planea_clock : planeb_clock;
 
 	/* WM1 */
-	if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
-				      clock, SNB_READ_WM1_LATENCY() * 500,
-				      &fbc_wm, &plane_wm, &cursor_wm))
+	if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+				   clock, SNB_READ_WM1_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
 		return;
 
 	I915_WRITE(WM1_LP_ILK,
@@ -3714,10 +3815,12 @@
 		   cursor_wm);
 
 	/* WM2 */
-	if (!sandybridge_compute_srwm(dev, 2,
-				      hdisplay, htotal, pixel_size,
-				      clock, SNB_READ_WM2_LATENCY() * 500,
-				      &fbc_wm, &plane_wm, &cursor_wm))
+	if (!ironlake_compute_srwm(dev, 2,
+				   hdisplay, htotal, pixel_size,
+				   clock, SNB_READ_WM2_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
 		return;
 
 	I915_WRITE(WM2_LP_ILK,
@@ -3728,10 +3831,12 @@
 		   cursor_wm);
 
 	/* WM3 */
-	if (!sandybridge_compute_srwm(dev, 3,
-				      hdisplay, htotal, pixel_size,
-				      clock, SNB_READ_WM3_LATENCY() * 500,
-				      &fbc_wm, &plane_wm, &cursor_wm))
+	if (!ironlake_compute_srwm(dev, 3,
+				   hdisplay, htotal, pixel_size,
+				   clock, SNB_READ_WM3_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
 		return;
 
 	I915_WRITE(WM3_LP_ILK,
@@ -3817,6 +3922,11 @@
 				    sr_hdisplay, sr_htotal, pixel_size);
 }
 
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
+}
+
 static int intel_crtc_mode_set(struct drm_crtc *crtc,
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode,
@@ -3879,7 +3989,7 @@
 		num_connectors++;
 	}
 
-	if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
+	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 		refclk = dev_priv->lvds_ssc_freq * 1000;
 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
 			      refclk / 1000);
@@ -3951,7 +4061,7 @@
 		int lane = 0, link_bw, bpp;
 		/* CPU eDP doesn't require FDI link, so just set DP M/N
 		   according to current link config */
-		if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
+		if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
 			target_clock = mode->clock;
 			intel_edp_link_config(has_edp_encoder,
 					      &lane, &link_bw);
@@ -4054,7 +4164,7 @@
 		udelay(200);
 
 		if (has_edp_encoder) {
-			if (dev_priv->lvds_use_ssc) {
+			if (intel_panel_use_ssc(dev_priv)) {
 				temp |= DREF_SSC1_ENABLE;
 				I915_WRITE(PCH_DREF_CONTROL, temp);
 
@@ -4065,13 +4175,13 @@
 
 			/* Enable CPU source on CPU attached eDP */
 			if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
-				if (dev_priv->lvds_use_ssc)
+				if (intel_panel_use_ssc(dev_priv))
 					temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
 				else
 					temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
 			} else {
 				/* Enable SSC on PCH eDP if needed */
-				if (dev_priv->lvds_use_ssc) {
+				if (intel_panel_use_ssc(dev_priv)) {
 					DRM_ERROR("enabling SSC on PCH\n");
 					temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
 				}
@@ -4099,7 +4209,7 @@
 		int factor = 21;
 
 		if (is_lvds) {
-			if ((dev_priv->lvds_use_ssc &&
+			if ((intel_panel_use_ssc(dev_priv) &&
 			     dev_priv->lvds_ssc_freq == 100) ||
 			    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
 				factor = 25;
@@ -4178,7 +4288,7 @@
 		/* XXX: just matching BIOS for now */
 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
 		dpll |= 3;
-	else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
+	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 	else
 		dpll |= PLL_REF_INPUT_DREFCLK;
@@ -5038,8 +5148,8 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-	int dpll = I915_READ(dpll_reg);
+	int dpll_reg = DPLL(pipe);
+	int dpll;
 
 	if (HAS_PCH_SPLIT(dev))
 		return;
@@ -5047,17 +5157,19 @@
 	if (!dev_priv->lvds_downclock_avail)
 		return;
 
+	dpll = I915_READ(dpll_reg);
 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
 		DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
 		/* Unlock panel regs */
-		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
-			   PANEL_UNLOCK_REGS);
+		I915_WRITE(PP_CONTROL,
+			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
 
 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
 		I915_WRITE(dpll_reg, dpll);
-		dpll = I915_READ(dpll_reg);
+		POSTING_READ(dpll_reg);
 		intel_wait_for_vblank(dev, pipe);
+
 		dpll = I915_READ(dpll_reg);
 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -5518,6 +5630,16 @@
 	return ret;
 }
 
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	/* Reset flags back to the 'unknown' status so that they
+	 * will be correctly set on the initial modeset.
+	 */
+	intel_crtc->dpms_mode = -1;
+}
+
 static struct drm_crtc_helper_funcs intel_helper_funcs = {
 	.dpms = intel_crtc_dpms,
 	.mode_fixup = intel_crtc_mode_fixup,
@@ -5529,6 +5651,7 @@
 };
 
 static const struct drm_crtc_funcs intel_crtc_funcs = {
+	.reset = intel_crtc_reset,
 	.cursor_set = intel_crtc_cursor_set,
 	.cursor_move = intel_crtc_cursor_move,
 	.gamma_set = intel_crtc_gamma_set,
@@ -5619,8 +5742,7 @@
 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
-	intel_crtc->cursor_addr = 0;
-	intel_crtc->dpms_mode = -1;
+	intel_crtc_reset(&intel_crtc->base);
 	intel_crtc->active = true; /* force the pipe off on setup_init_config */
 
 	if (HAS_PCH_SPLIT(dev)) {
@@ -5802,6 +5924,8 @@
 		encoder->base.possible_clones =
 			intel_encoder_clones(dev, encoder->clone_mask);
 	}
+
+	intel_panel_setup_backlight(dev);
 }
 
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -6145,6 +6269,10 @@
 
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
+	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+	u32 pcu_mbox;
+	int cur_freq, min_freq, max_freq;
 	int i;
 
 	/* Here begins a magic sequence of register writes to enable
@@ -6154,7 +6282,7 @@
 	 * userspace...
 	 */
 	I915_WRITE(GEN6_RC_STATE, 0);
-	__gen6_force_wake_get(dev_priv);
+	__gen6_gt_force_wake_get(dev_priv);
 
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6216,6 +6344,29 @@
 		     500))
 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 
+	min_freq = (rp_state_cap & 0xff0000) >> 16;
+	max_freq = rp_state_cap & 0xff;
+	cur_freq = (gt_perf_status & 0xff00) >> 8;
+
+	/* Check for overclock support */
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+	if (pcu_mbox & (1<<31)) { /* OC supported */
+		max_freq = pcu_mbox & 0xff;
+		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100);
+	}
+
+	/* In units of 100MHz */
+	dev_priv->max_delay = max_freq;
+	dev_priv->min_delay = min_freq;
+	dev_priv->cur_delay = cur_freq;
+
 	/* requires MSI enabled */
 	I915_WRITE(GEN6_PMIER,
 		   GEN6_PM_MBOX_EVENT |
@@ -6229,7 +6380,7 @@
 	/* enable all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
-	__gen6_force_wake_put(dev_priv);
+	__gen6_gt_force_wake_put(dev_priv);
 }
 
 void intel_enable_clock_gating(struct drm_device *dev)
@@ -6245,7 +6396,9 @@
 
 		if (IS_GEN5(dev)) {
 			/* Required for FBC */
-			dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
+			dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+				DPFCRUNIT_CLOCK_GATE_DISABLE |
+				DPFDUNIT_CLOCK_GATE_DISABLE;
 			/* Required for CxSR */
 			dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
 
@@ -6386,70 +6539,105 @@
 	} else if (IS_I830(dev)) {
 		I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 	}
+}
+
+static void ironlake_teardown_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->renderctx) {
+		i915_gem_object_unpin(dev_priv->renderctx);
+		drm_gem_object_unreference(&dev_priv->renderctx->base);
+		dev_priv->renderctx = NULL;
+	}
+
+	if (dev_priv->pwrctx) {
+		i915_gem_object_unpin(dev_priv->pwrctx);
+		drm_gem_object_unreference(&dev_priv->pwrctx->base);
+		dev_priv->pwrctx = NULL;
+	}
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (I915_READ(PWRCTXA)) {
+		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+			 50);
+
+		I915_WRITE(PWRCTXA, 0);
+		POSTING_READ(PWRCTXA);
+
+		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+		POSTING_READ(RSTDBYCTL);
+	}
+
+	ironlake_teardown_rc6(dev);
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->renderctx == NULL)
+		dev_priv->renderctx = intel_alloc_context_page(dev);
+	if (!dev_priv->renderctx)
+		return -ENOMEM;
+
+	if (dev_priv->pwrctx == NULL)
+		dev_priv->pwrctx = intel_alloc_context_page(dev);
+	if (!dev_priv->pwrctx) {
+		ironlake_teardown_rc6(dev);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void ironlake_enable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	/* rc6 disabled by default due to repeated reports of hanging during
+	 * boot and resume.
+	 */
+	if (!i915_enable_rc6)
+		return;
+
+	ret = ironlake_setup_rc6(dev);
+	if (ret)
+		return;
 
 	/*
 	 * GPU can automatically power down the render unit if given a page
 	 * to save state.
 	 */
-	if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */
-		if (dev_priv->renderctx == NULL)
-			dev_priv->renderctx = intel_alloc_context_page(dev);
-		if (dev_priv->renderctx) {
-			struct drm_i915_gem_object *obj = dev_priv->renderctx;
-			if (BEGIN_LP_RING(4) == 0) {
-				OUT_RING(MI_SET_CONTEXT);
-				OUT_RING(obj->gtt_offset |
-					 MI_MM_SPACE_GTT |
-					 MI_SAVE_EXT_STATE_EN |
-					 MI_RESTORE_EXT_STATE_EN |
-					 MI_RESTORE_INHIBIT);
-				OUT_RING(MI_NOOP);
-				OUT_RING(MI_FLUSH);
-				ADVANCE_LP_RING();
-			}
-		} else
-			DRM_DEBUG_KMS("Failed to allocate render context."
-				       "Disable RC6\n");
+	ret = BEGIN_LP_RING(6);
+	if (ret) {
+		ironlake_teardown_rc6(dev);
+		return;
 	}
 
-	if (IS_GEN4(dev) && IS_MOBILE(dev)) {
-		if (dev_priv->pwrctx == NULL)
-			dev_priv->pwrctx = intel_alloc_context_page(dev);
-		if (dev_priv->pwrctx) {
-			struct drm_i915_gem_object *obj = dev_priv->pwrctx;
-			I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
-			I915_WRITE(MCHBAR_RENDER_STANDBY,
-				   I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
-		}
-	}
+	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+	OUT_RING(MI_SET_CONTEXT);
+	OUT_RING(dev_priv->renderctx->gtt_offset |
+		 MI_MM_SPACE_GTT |
+		 MI_SAVE_EXT_STATE_EN |
+		 MI_RESTORE_EXT_STATE_EN |
+		 MI_RESTORE_INHIBIT);
+	OUT_RING(MI_SUSPEND_FLUSH);
+	OUT_RING(MI_NOOP);
+	OUT_RING(MI_FLUSH);
+	ADVANCE_LP_RING();
+
+	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
-void intel_disable_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->renderctx) {
-		struct drm_i915_gem_object *obj = dev_priv->renderctx;
-
-		I915_WRITE(CCID, 0);
-		POSTING_READ(CCID);
-
-		i915_gem_object_unpin(obj);
-		drm_gem_object_unreference(&obj->base);
-		dev_priv->renderctx = NULL;
-	}
-
-	if (dev_priv->pwrctx) {
-		struct drm_i915_gem_object *obj = dev_priv->pwrctx;
-
-		I915_WRITE(PWRCTXA, 0);
-		POSTING_READ(PWRCTXA);
-
-		i915_gem_object_unpin(obj);
-		drm_gem_object_unreference(&obj->base);
-		dev_priv->pwrctx = NULL;
-	}
-}
 
 /* Set up chip specific display functions */
 static void intel_init_display(struct drm_device *dev)
@@ -6665,12 +6853,7 @@
 		dev->mode_config.max_width = 8192;
 		dev->mode_config.max_height = 8192;
 	}
-
-	/* set memory base */
-	if (IS_GEN2(dev))
-		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
-	else
-		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+	dev->mode_config.fb_base = dev->agp->base;
 
 	if (IS_MOBILE(dev) || !IS_GEN2(dev))
 		dev_priv->num_pipe = 2;
@@ -6698,6 +6881,9 @@
 	if (IS_GEN6(dev))
 		gen6_enable_rps(dev_priv);
 
+	if (IS_IRONLAKE_M(dev))
+		ironlake_enable_rc6(dev);
+
 	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
 	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
 		    (unsigned long)dev);
@@ -6734,7 +6920,8 @@
 	if (IS_GEN6(dev))
 		gen6_disable_rps(dev);
 
-	intel_disable_clock_gating(dev);
+	if (IS_IRONLAKE_M(dev))
+		ironlake_disable_rc6(dev);
 
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1dc6040..51cb4e3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1153,18 +1153,27 @@
 static uint32_t
 intel_gen6_edp_signal_levels(uint8_t train_set)
 {
-	switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
-		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
-		return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
-		return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
-		return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
 	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
-		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
 	}
 }
 
@@ -1334,17 +1343,24 @@
 	struct drm_device *dev = intel_dp->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool channel_eq = false;
-	int tries;
+	int tries, cr_tries;
 	u32 reg;
 	uint32_t DP = intel_dp->DP;
 
 	/* channel equalization */
 	tries = 0;
+	cr_tries = 0;
 	channel_eq = false;
 	for (;;) {
 		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
 		uint32_t    signal_levels;
 
+		if (cr_tries > 5) {
+			DRM_ERROR("failed to train DP, aborting\n");
+			intel_dp_link_down(intel_dp);
+			break;
+		}
+
 		if (IS_GEN6(dev) && is_edp(intel_dp)) {
 			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
@@ -1367,14 +1383,26 @@
 		if (!intel_dp_get_link_status(intel_dp))
 			break;
 
+		/* Make sure clock is still ok */
+		if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			intel_dp_start_link_train(intel_dp);
+			cr_tries++;
+			continue;
+		}
+
 		if (intel_channel_eq_ok(intel_dp)) {
 			channel_eq = true;
 			break;
 		}
 
-		/* Try 5 times */
-		if (tries > 5)
-			break;
+		/* Try 5 times, then try clock recovery if that fails */
+		if (tries > 5) {
+			intel_dp_link_down(intel_dp);
+			intel_dp_start_link_train(intel_dp);
+			tries = 0;
+			cr_tries++;
+			continue;
+		}
 
 		/* Compute new intel_dp->train_set as requested by target */
 		intel_get_adjust_train(intel_dp);
@@ -1611,6 +1639,24 @@
 	return 0;
 }
 
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		has_audio = drm_detect_monitor_audio(edid);
+
+		connector->display_info.raw_edid = NULL;
+		kfree(edid);
+	}
+
+	return has_audio;
+}
+
 static int
 intel_dp_set_property(struct drm_connector *connector,
 		      struct drm_property *property,
@@ -1624,17 +1670,23 @@
 		return ret;
 
 	if (property == intel_dp->force_audio_property) {
-		if (val == intel_dp->force_audio)
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_dp->force_audio)
 			return 0;
 
-		intel_dp->force_audio = val;
+		intel_dp->force_audio = i;
 
-		if (val > 0 && intel_dp->has_audio)
-			return 0;
-		if (val < 0 && !intel_dp->has_audio)
+		if (i == 0)
+			has_audio = intel_dp_detect_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_dp->has_audio)
 			return 0;
 
-		intel_dp->has_audio = val > 0;
+		intel_dp->has_audio = has_audio;
 		goto done;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d782ad9..2c43104 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -257,6 +257,9 @@
 extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
 extern u32 intel_panel_get_backlight(struct drm_device *dev);
 extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+extern void intel_panel_setup_backlight(struct drm_device *dev);
+extern void intel_panel_enable_backlight(struct drm_device *dev);
+extern void intel_panel_disable_backlight(struct drm_device *dev);
 
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
@@ -295,7 +298,6 @@
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 				    u16 *blue, int regno);
 extern void intel_enable_clock_gating(struct drm_device *dev);
-extern void intel_disable_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 701e830..5127827 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -62,6 +62,7 @@
 			  struct drm_fb_helper_surface_size *sizes)
 {
 	struct drm_device *dev = ifbdev->helper.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd mode_cmd;
@@ -77,7 +78,7 @@
 	mode_cmd.height = sizes->surface_height;
 
 	mode_cmd.bpp = sizes->surface_bpp;
-	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
+	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
 	mode_cmd.depth = sizes->surface_depth;
 
 	size = mode_cmd.pitch * mode_cmd.height;
@@ -120,6 +121,11 @@
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &intelfb_ops;
 
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
 	/* setup aperture base/size for vesafb takeover */
 	info->apertures = alloc_apertures(1);
 	if (!info->apertures) {
@@ -127,10 +133,8 @@
 		goto out_unpin;
 	}
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
-	if (!IS_GEN2(dev))
-		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
-	else
-		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+	info->apertures->ranges[0].size =
+		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
 	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
 	info->fix.smem_len = size;
@@ -140,16 +144,11 @@
 		ret = -ENOSPC;
 		goto out_unpin;
 	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unpin;
-	}
 	info->screen_size = size;
 
 //	memset(info->screen_base, 0, size);
 
+	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	info->pixmap.size = 64*1024;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e..c635c9e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -251,6 +251,27 @@
 				   &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
 }
 
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct edid *edid;
+	bool has_audio = false;
+
+	edid = drm_get_edid(connector,
+			    &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+	if (edid) {
+		if (edid->input & DRM_EDID_INPUT_DIGITAL)
+			has_audio = drm_detect_monitor_audio(edid);
+
+		connector->display_info.raw_edid = NULL;
+		kfree(edid);
+	}
+
+	return has_audio;
+}
+
 static int
 intel_hdmi_set_property(struct drm_connector *connector,
 		      struct drm_property *property,
@@ -264,17 +285,23 @@
 		return ret;
 
 	if (property == intel_hdmi->force_audio_property) {
-		if (val == intel_hdmi->force_audio)
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_hdmi->force_audio)
 			return 0;
 
-		intel_hdmi->force_audio = val;
+		intel_hdmi->force_audio = i;
 
-		if (val > 0 && intel_hdmi->has_audio)
-			return 0;
-		if (val < 0 && !intel_hdmi->has_audio)
+		if (i == 0)
+			has_audio = intel_hdmi_detect_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_hdmi->has_audio)
 			return 0;
 
-		intel_hdmi->has_audio = val > 0;
+		intel_hdmi->has_audio = has_audio;
 		goto done;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa230708..bcdba7b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
 	POSTING_READ(lvds_reg);
 
-	intel_panel_set_backlight(dev, dev_priv->backlight_level);
+	intel_panel_enable_backlight(dev);
 }
 
 static void intel_lvds_disable(struct intel_lvds *intel_lvds)
@@ -123,8 +123,7 @@
 		lvds_reg = LVDS;
 	}
 
-	dev_priv->backlight_level = intel_panel_get_backlight(dev);
-	intel_panel_set_backlight(dev, 0);
+	intel_panel_disable_backlight(dev);
 
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 
@@ -262,12 +261,6 @@
 		return true;
 	}
 
-	/* Make sure pre-965s set dither correctly */
-	if (INTEL_INFO(dev)->gen < 4) {
-		if (dev_priv->lvds_dither)
-			pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-	}
-
 	/* Native modes don't need fitting */
 	if (adjusted_mode->hdisplay == mode->hdisplay &&
 	    adjusted_mode->vdisplay == mode->vdisplay)
@@ -375,6 +368,16 @@
 	}
 
 out:
+	/* If not enabling scaling, be consistent and always use 0. */
+	if ((pfit_control & PFIT_ENABLE) == 0) {
+		pfit_control = 0;
+		pfit_pgm_ratios = 0;
+	}
+
+	/* Make sure pre-965 set dither correctly */
+	if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
+		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
 	if (pfit_control != intel_lvds->pfit_control ||
 	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
 		intel_lvds->pfit_control = pfit_control;
@@ -398,8 +401,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
-	dev_priv->backlight_level = intel_panel_get_backlight(dev);
-
 	/* We try to do the minimum that is necessary in order to unlock
 	 * the registers for mode setting.
 	 *
@@ -430,9 +431,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
-	if (dev_priv->backlight_level == 0)
-		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
-
 	/* Undo any unlocking done in prepare to prevent accidental
 	 * adjustment of the registers.
 	 */
@@ -706,6 +704,14 @@
 	},
 	{
 		.callback = intel_no_lvds_dmi_callback,
+		.ident = "AOpen i915GMm-HFS",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+			DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
 		.ident = "Aopen i945GTt-VFA",
 		.matches = {
 			DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f295a7a..64fd644 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -26,6 +26,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <acpi/video.h>
 
 #include "drmP.h"
@@ -476,7 +477,7 @@
 		return -ENOTSUPP;
 	}
 
-	base = ioremap(asls, OPREGION_SIZE);
+	base = acpi_os_ioremap(asls, OPREGION_SIZE);
 	if (!base)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 7350ec2..f8f86e5 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -208,7 +208,6 @@
 			val &= ~1;
 			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
 			val *= lbpc;
-			val >>= 1;
 		}
 	}
 
@@ -235,11 +234,11 @@
 
 	if (is_backlight_combination_mode(dev)){
 		u32 max = intel_panel_get_max_backlight(dev);
-		u8 lpbc;
+		u8 lbpc;
 
-		lpbc = level * 0xfe / max + 1;
-		level /= lpbc;
-		pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
+		lbpc = level * 0xfe / max + 1;
+		level /= lbpc;
+		pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
 	}
 
 	tmp = I915_READ(BLC_PWM_CTL);
@@ -250,3 +249,34 @@
 		tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
 	I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->backlight_enabled) {
+		dev_priv->backlight_level = intel_panel_get_backlight(dev);
+		dev_priv->backlight_enabled = false;
+	}
+
+	intel_panel_set_backlight(dev, 0);
+}
+
+void intel_panel_enable_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->backlight_level == 0)
+		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+	intel_panel_set_backlight(dev, dev_priv->backlight_level);
+	dev_priv->backlight_enabled = true;
+}
+
+void intel_panel_setup_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->backlight_level = intel_panel_get_backlight(dev);
+	dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 56bc95c..445f27e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+	if (space < 0)
+		space += ring->size;
+	return space;
+}
+
 static u32 i915_gem_get_seqno(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -48,7 +56,7 @@
 	return seqno;
 }
 
-static void
+static int
 render_ring_flush(struct intel_ring_buffer *ring,
 		  u32	invalidate_domains,
 		  u32	flush_domains)
@@ -56,6 +64,7 @@
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 cmd;
+	int ret;
 
 #if WATCH_EXEC
 	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
@@ -116,12 +125,16 @@
 #if WATCH_EXEC
 		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
 #endif
-		if (intel_ring_begin(ring, 2) == 0) {
-			intel_ring_emit(ring, cmd);
-			intel_ring_emit(ring, MI_NOOP);
-			intel_ring_advance(ring);
-		}
+		ret = intel_ring_begin(ring, 2);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(ring, cmd);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 	}
+
+	return 0;
 }
 
 static void ring_write_tail(struct intel_ring_buffer *ring,
@@ -199,11 +212,9 @@
 	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
 		i915_kernel_lost_context(ring->dev);
 	else {
-		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+		ring->head = I915_READ_HEAD(ring);
 		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-		ring->space = ring->head - (ring->tail + 8);
-		if (ring->space < 0)
-			ring->space += ring->size;
+		ring->space = ring_space(ring);
 	}
 
 	return 0;
@@ -480,26 +491,56 @@
 	return pc->cpu_page[0];
 }
 
+static void
+ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->gt_irq_mask &= ~mask;
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	POSTING_READ(GTIMR);
+}
+
+static void
+ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->gt_irq_mask |= mask;
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	POSTING_READ(GTIMR);
+}
+
+static void
+i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->irq_mask &= ~mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
+}
+
+static void
+i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->irq_mask |= mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
+}
+
 static bool
 render_ring_get_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	if (!dev->irq_enabled)
 		return false;
 
-	if (atomic_inc_return(&ring->irq_refcount) == 1) {
-		drm_i915_private_t *dev_priv = dev->dev_private;
-		unsigned long irqflags;
-
-		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	spin_lock(&ring->irq_lock);
+	if (ring->irq_refcount++ == 0) {
 		if (HAS_PCH_SPLIT(dev))
-			ironlake_enable_graphics_irq(dev_priv,
-						     GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
+			ironlake_enable_irq(dev_priv,
+					    GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
 		else
 			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 	}
+	spin_unlock(&ring->irq_lock);
 
 	return true;
 }
@@ -508,20 +549,18 @@
 render_ring_put_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (atomic_dec_and_test(&ring->irq_refcount)) {
-		drm_i915_private_t *dev_priv = dev->dev_private;
-		unsigned long irqflags;
-
-		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	spin_lock(&ring->irq_lock);
+	if (--ring->irq_refcount == 0) {
 		if (HAS_PCH_SPLIT(dev))
-			ironlake_disable_graphics_irq(dev_priv,
-						      GT_USER_INTERRUPT |
-						      GT_PIPE_NOTIFY);
+			ironlake_disable_irq(dev_priv,
+					     GT_USER_INTERRUPT |
+					     GT_PIPE_NOTIFY);
 		else
 			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 	}
+	spin_unlock(&ring->irq_lock);
 }
 
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -534,19 +573,24 @@
 	POSTING_READ(mmio);
 }
 
-static void
+static int
 bsd_ring_flush(struct intel_ring_buffer *ring,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
-		return;
+	int ret;
 
-	if (intel_ring_begin(ring, 2) == 0) {
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
-	}
+	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+		return 0;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+	return 0;
 }
 
 static int
@@ -577,18 +621,15 @@
 ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
 {
 	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	if (!dev->irq_enabled)
 	       return false;
 
-	if (atomic_inc_return(&ring->irq_refcount) == 1) {
-		drm_i915_private_t *dev_priv = dev->dev_private;
-		unsigned long irqflags;
-
-		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-		ironlake_enable_graphics_irq(dev_priv, flag);
-		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-	}
+	spin_lock(&ring->irq_lock);
+	if (ring->irq_refcount++ == 0)
+		ironlake_enable_irq(dev_priv, flag);
+	spin_unlock(&ring->irq_lock);
 
 	return true;
 }
@@ -597,15 +638,47 @@
 ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
 {
 	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (atomic_dec_and_test(&ring->irq_refcount)) {
-		drm_i915_private_t *dev_priv = dev->dev_private;
-		unsigned long irqflags;
+	spin_lock(&ring->irq_lock);
+	if (--ring->irq_refcount == 0)
+		ironlake_disable_irq(dev_priv, flag);
+	spin_unlock(&ring->irq_lock);
+}
 
-		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-		ironlake_disable_graphics_irq(dev_priv, flag);
-		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+static bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev->irq_enabled)
+	       return false;
+
+	spin_lock(&ring->irq_lock);
+	if (ring->irq_refcount++ == 0) {
+		ring->irq_mask &= ~rflag;
+		I915_WRITE_IMR(ring, ring->irq_mask);
+		ironlake_enable_irq(dev_priv, gflag);
 	}
+	spin_unlock(&ring->irq_lock);
+
+	return true;
+}
+
+static void
+gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	spin_lock(&ring->irq_lock);
+	if (--ring->irq_refcount == 0) {
+		ring->irq_mask |= rflag;
+		I915_WRITE_IMR(ring, ring->irq_mask);
+		ironlake_disable_irq(dev_priv, gflag);
+	}
+	spin_unlock(&ring->irq_lock);
 }
 
 static bool
@@ -748,6 +821,9 @@
 	INIT_LIST_HEAD(&ring->request_list);
 	INIT_LIST_HEAD(&ring->gpu_write_list);
 
+	spin_lock_init(&ring->irq_lock);
+	ring->irq_mask = ~0;
+
 	if (I915_NEED_GFX_HWS(dev)) {
 		ret = init_status_page(ring);
 		if (ret)
@@ -785,6 +861,14 @@
 	if (ret)
 		goto err_unmap;
 
+	/* Workaround an erratum on the i830 which causes a hang if
+	 * the TAIL pointer points to within the last 2 cachelines
+	 * of the buffer.
+	 */
+	ring->effective_size = ring->size;
+	if (IS_I830(ring->dev))
+		ring->effective_size -= 128;
+
 	return 0;
 
 err_unmap:
@@ -827,8 +911,7 @@
 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
 {
 	unsigned int *virt;
-	int rem;
-	rem = ring->size - ring->tail;
+	int rem = ring->size - ring->tail;
 
 	if (ring->space < rem) {
 		int ret = intel_wait_ring_buffer(ring, rem);
@@ -844,7 +927,7 @@
 	}
 
 	ring->tail = 0;
-	ring->space = ring->head - 8;
+	ring->space = ring_space(ring);
 
 	return 0;
 }
@@ -856,20 +939,22 @@
 	unsigned long end;
 	u32 head;
 
+	/* If the reported head position has wrapped or hasn't advanced,
+	 * fallback to the slow and accurate path.
+	 */
+	head = intel_read_status_page(ring, 4);
+	if (head > ring->head) {
+		ring->head = head;
+		ring->space = ring_space(ring);
+		if (ring->space >= n)
+			return 0;
+	}
+
 	trace_i915_ring_wait_begin (dev);
 	end = jiffies + 3 * HZ;
 	do {
-		/* If the reported head position has wrapped or hasn't advanced,
-		 * fallback to the slow and accurate path.
-		 */
-		head = intel_read_status_page(ring, 4);
-		if (head < ring->actual_head)
-			head = I915_READ_HEAD(ring);
-		ring->actual_head = head;
-		ring->head = head & HEAD_ADDR;
-		ring->space = ring->head - (ring->tail + 8);
-		if (ring->space < 0)
-			ring->space += ring->size;
+		ring->head = I915_READ_HEAD(ring);
+		ring->space = ring_space(ring);
 		if (ring->space >= n) {
 			trace_i915_ring_wait_end(dev);
 			return 0;
@@ -895,7 +980,7 @@
 	int n = 4*num_dwords;
 	int ret;
 
-	if (unlikely(ring->tail + n > ring->size)) {
+	if (unlikely(ring->tail + n > ring->effective_size)) {
 		ret = intel_wrap_ring_buffer(ring);
 		if (unlikely(ret))
 			return ret;
@@ -973,20 +1058,28 @@
 	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
 }
 
-static void gen6_ring_flush(struct intel_ring_buffer *ring,
-			    u32 invalidate_domains,
-			    u32 flush_domains)
+static int gen6_ring_flush(struct intel_ring_buffer *ring,
+			   u32 invalidate, u32 flush)
 {
-	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
-		return;
+	uint32_t cmd;
+	int ret;
 
-	if (intel_ring_begin(ring, 4) == 0) {
-		intel_ring_emit(ring, MI_FLUSH_DW);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_advance(ring);
-	}
+	if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
+		return 0;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	cmd = MI_FLUSH_DW;
+	if (invalidate & I915_GEM_GPU_DOMAINS)
+		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+	return 0;
 }
 
 static int
@@ -1008,15 +1101,35 @@
 }
 
 static bool
+gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_get_irq(ring,
+				 GT_USER_INTERRUPT,
+				 GEN6_RENDER_USER_INTERRUPT);
+}
+
+static void
+gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_put_irq(ring,
+				 GT_USER_INTERRUPT,
+				 GEN6_RENDER_USER_INTERRUPT);
+}
+
+static bool
 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
 {
-	return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+	return gen6_ring_get_irq(ring,
+				 GT_GEN6_BSD_USER_INTERRUPT,
+				 GEN6_BSD_USER_INTERRUPT);
 }
 
 static void
 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
 {
-	ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+	return gen6_ring_put_irq(ring,
+				 GT_GEN6_BSD_USER_INTERRUPT,
+				 GEN6_BSD_USER_INTERRUPT);
 }
 
 /* ring buffer for Video Codec for Gen6+ */
@@ -1040,13 +1153,17 @@
 static bool
 blt_ring_get_irq(struct intel_ring_buffer *ring)
 {
-	return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
+	return gen6_ring_get_irq(ring,
+				 GT_BLT_USER_INTERRUPT,
+				 GEN6_BLITTER_USER_INTERRUPT);
 }
 
 static void
 blt_ring_put_irq(struct intel_ring_buffer *ring)
 {
-	ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
+	gen6_ring_put_irq(ring,
+			  GT_BLT_USER_INTERRUPT,
+			  GEN6_BLITTER_USER_INTERRUPT);
 }
 
 
@@ -1115,20 +1232,28 @@
 		return intel_ring_begin(ring, 4);
 }
 
-static void blt_ring_flush(struct intel_ring_buffer *ring,
-			   u32 invalidate_domains,
-			   u32 flush_domains)
+static int blt_ring_flush(struct intel_ring_buffer *ring,
+			  u32 invalidate, u32 flush)
 {
-	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
-		return;
+	uint32_t cmd;
+	int ret;
 
-	if (blt_ring_begin(ring, 4) == 0) {
-		intel_ring_emit(ring, MI_FLUSH_DW);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_advance(ring);
-	}
+	if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
+		return 0;
+
+	ret = blt_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	cmd = MI_FLUSH_DW;
+	if (invalidate & I915_GEM_DOMAIN_RENDER)
+		cmd |= MI_INVALIDATE_TLB;
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+	return 0;
 }
 
 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -1165,6 +1290,8 @@
 	*ring = render_ring;
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->add_request = gen6_add_request;
+		ring->irq_get = gen6_render_ring_get_irq;
+		ring->irq_put = gen6_render_ring_put_irq;
 	} else if (IS_GEN5(dev)) {
 		ring->add_request = pc_render_add_request;
 		ring->get_seqno = pc_render_get_seqno;
@@ -1178,6 +1305,48 @@
 	return intel_init_ring_buffer(dev, ring);
 }
 
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+	*ring = render_ring;
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ring->add_request = gen6_add_request;
+		ring->irq_get = gen6_render_ring_get_irq;
+		ring->irq_put = gen6_render_ring_put_irq;
+	} else if (IS_GEN5(dev)) {
+		ring->add_request = pc_render_add_request;
+		ring->get_seqno = pc_render_get_seqno;
+	}
+
+	ring->dev = dev;
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+	INIT_LIST_HEAD(&ring->gpu_write_list);
+
+	ring->size = size;
+	ring->effective_size = ring->size;
+	if (IS_I830(ring->dev))
+		ring->effective_size -= 128;
+
+	ring->map.offset = start;
+	ring->map.size = size;
+	ring->map.type = 0;
+	ring->map.flags = 0;
+	ring->map.mtrr = 0;
+
+	drm_core_ioremap_wc(&ring->map, dev);
+	if (ring->map.handle == NULL) {
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	ring->virtual_start = (void __force __iomem *)ring->map.handle;
+	return 0;
+}
+
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8e2e357..3430686 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,23 +14,27 @@
 	struct		drm_i915_gem_object *obj;
 };
 
-#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
+#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
 
-#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
+#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
 
-#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
+#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
 
-#define I915_READ_HEAD(ring)  I915_RING_READ(RING_HEAD(ring->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
+#define I915_READ_HEAD(ring)  I915_RING_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
 
-#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
+#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
 
-#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
-#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
-#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
+#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
+
+#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
 
 struct  intel_ring_buffer {
 	const char	*name;
@@ -44,17 +48,19 @@
 	struct		drm_device *dev;
 	struct		drm_i915_gem_object *obj;
 
-	u32		actual_head;
 	u32		head;
 	u32		tail;
 	int		space;
 	int		size;
+	int		effective_size;
 	struct intel_hw_status_page status_page;
 
+	spinlock_t	irq_lock;
+	u32		irq_refcount;
+	u32		irq_mask;
 	u32		irq_seqno;		/* last seq seem at irq time */
 	u32		waiting_seqno;
 	u32		sync_seqno[I915_NUM_RINGS-1];
-	atomic_t	irq_refcount;
 	bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
 	void		(*irq_put)(struct intel_ring_buffer *ring);
 
@@ -62,9 +68,9 @@
 
 	void		(*write_tail)(struct intel_ring_buffer *ring,
 				      u32 value);
-	void		(*flush)(struct intel_ring_buffer *ring,
-				 u32	invalidate_domains,
-				 u32	flush_domains);
+	int __must_check (*flush)(struct intel_ring_buffer *ring,
+				  u32	invalidate_domains,
+				  u32	flush_domains);
 	int		(*add_request)(struct intel_ring_buffer *ring,
 				       u32 *seqno);
 	u32		(*get_seqno)(struct intel_ring_buffer *ring);
@@ -161,4 +167,7 @@
 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
 
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
+
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9d0af36..7c50cdc 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
                          SDVO_TV_MASK)
 
 #define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
 
@@ -473,20 +474,6 @@
 		return false;
 	}
 
-	i = 3;
-	while (status == SDVO_CMD_STATUS_PENDING && i--) {
-		if (!intel_sdvo_read_byte(intel_sdvo,
-					  SDVO_I2C_CMD_STATUS,
-					  &status))
-			return false;
-	}
-	if (status != SDVO_CMD_STATUS_SUCCESS) {
-		DRM_DEBUG_KMS("command returns response %s [%d]\n",
-			      status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
-			      status);
-		return false;
-	}
-
 	return true;
 }
 
@@ -497,6 +484,8 @@
 	u8 status;
 	int i;
 
+	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+
 	/*
 	 * The documentation states that all commands will be
 	 * processed within 15µs, and that we need only poll
@@ -505,14 +494,19 @@
 	 *
 	 * Check 5 times in case the hardware failed to read the docs.
 	 */
-	do {
+	if (!intel_sdvo_read_byte(intel_sdvo,
+				  SDVO_I2C_CMD_STATUS,
+				  &status))
+		goto log_fail;
+
+	while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+		udelay(15);
 		if (!intel_sdvo_read_byte(intel_sdvo,
 					  SDVO_I2C_CMD_STATUS,
 					  &status))
-			return false;
-	} while (status == SDVO_CMD_STATUS_PENDING && --retry);
+			goto log_fail;
+	}
 
-	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
 	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
 		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
 	else
@@ -533,7 +527,7 @@
 	return true;
 
 log_fail:
-	DRM_LOG_KMS("\n");
+	DRM_LOG_KMS("... failed\n");
 	return false;
 }
 
@@ -550,6 +544,7 @@
 static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
 					      u8 ddc_bus)
 {
+	/* This must be the immediately preceding write before the i2c xfer */
 	return intel_sdvo_write_cmd(intel_sdvo,
 				    SDVO_CMD_SET_CONTROL_BUS_SWITCH,
 				    &ddc_bus, 1);
@@ -557,7 +552,10 @@
 
 static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
 {
-	return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
+	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+		return false;
+
+	return intel_sdvo_read_response(intel_sdvo, NULL, 0);
 }
 
 static bool
@@ -859,18 +857,21 @@
 
 	intel_dip_infoframe_csum(&avi_if);
 
-	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+	if (!intel_sdvo_set_value(intel_sdvo,
+				  SDVO_CMD_SET_HBUF_INDEX,
 				  set_buf_index, 2))
 		return false;
 
 	for (i = 0; i < sizeof(avi_if); i += 8) {
-		if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
+		if (!intel_sdvo_set_value(intel_sdvo,
+					  SDVO_CMD_SET_HBUF_DATA,
 					  data, 8))
 			return false;
 		data++;
 	}
 
-	return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_HBUF_TXRATE,
 				    &tx_rate, 1);
 }
 
@@ -1024,9 +1025,13 @@
 	if (!intel_sdvo_set_target_input(intel_sdvo))
 		return;
 
-	if (intel_sdvo->has_hdmi_monitor &&
-	    !intel_sdvo_set_avi_infoframe(intel_sdvo))
-		return;
+	if (intel_sdvo->has_hdmi_monitor) {
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+		intel_sdvo_set_colorimetry(intel_sdvo,
+					   SDVO_COLORIMETRY_RGB256);
+		intel_sdvo_set_avi_infoframe(intel_sdvo);
+	} else
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
 	if (intel_sdvo->is_tv &&
 	    !intel_sdvo_set_tv_format(intel_sdvo))
@@ -1355,7 +1360,8 @@
 				intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
 				intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
 			}
-		}
+		} else
+			status = connector_status_disconnected;
 		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
@@ -1398,12 +1404,30 @@
 
 	intel_sdvo->attached_output = response;
 
+	intel_sdvo->has_hdmi_monitor = false;
+	intel_sdvo->has_hdmi_audio = false;
+
 	if ((intel_sdvo_connector->output_flag & response) == 0)
 		ret = connector_status_disconnected;
-	else if (response & SDVO_TMDS_MASK)
+	else if (IS_TMDS(intel_sdvo_connector))
 		ret = intel_sdvo_hdmi_sink_detect(connector);
-	else
-		ret = connector_status_connected;
+	else {
+		struct edid *edid;
+
+		/* if we have an edid check it matches the connection */
+		edid = intel_sdvo_get_edid(connector);
+		if (edid == NULL)
+			edid = intel_sdvo_get_analog_edid(connector);
+		if (edid != NULL) {
+			if (edid->input & DRM_EDID_INPUT_DIGITAL)
+				ret = connector_status_disconnected;
+			else
+				ret = connector_status_connected;
+			connector->display_info.raw_edid = NULL;
+			kfree(edid);
+		} else
+			ret = connector_status_connected;
+	}
 
 	/* May update encoder flag for like clock for SDVO TV, etc.*/
 	if (ret == connector_status_connected) {
@@ -1439,10 +1463,15 @@
 		edid = intel_sdvo_get_analog_edid(connector);
 
 	if (edid != NULL) {
-		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+		bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+		bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
+
+		if (connector_is_digital == monitor_is_digital) {
 			drm_mode_connector_update_edid_property(connector, edid);
 			drm_add_edid_modes(connector, edid);
 		}
+
 		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
@@ -1661,6 +1690,22 @@
 	kfree(connector);
 }
 
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	if (!intel_sdvo->is_hdmi)
+		return false;
+
+	edid = intel_sdvo_get_edid(connector);
+	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+		has_audio = drm_detect_monitor_audio(edid);
+
+	return has_audio;
+}
+
 static int
 intel_sdvo_set_property(struct drm_connector *connector,
 			struct drm_property *property,
@@ -1677,17 +1722,23 @@
 		return ret;
 
 	if (property == intel_sdvo_connector->force_audio_property) {
-		if (val == intel_sdvo_connector->force_audio)
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_sdvo_connector->force_audio)
 			return 0;
 
-		intel_sdvo_connector->force_audio = val;
+		intel_sdvo_connector->force_audio = i;
 
-		if (val > 0 && intel_sdvo->has_hdmi_audio)
-			return 0;
-		if (val < 0 && !intel_sdvo->has_hdmi_audio)
+		if (i == 0)
+			has_audio = intel_sdvo_detect_hdmi_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_sdvo->has_hdmi_audio)
 			return 0;
 
-		intel_sdvo->has_hdmi_audio = val > 0;
+		intel_sdvo->has_hdmi_audio = has_audio;
 		goto done;
 	}
 
@@ -1922,20 +1973,7 @@
 static bool
 intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
 {
-	int is_hdmi;
-
-	if (!intel_sdvo_check_supp_encode(intel_sdvo))
-		return false;
-
-	if (!intel_sdvo_set_target_output(intel_sdvo,
-					  device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
-		return false;
-
-	is_hdmi = 0;
-	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
-		return false;
-
-	return !!is_hdmi;
+	return intel_sdvo_check_supp_encode(intel_sdvo);
 }
 
 static u8
@@ -2037,12 +2075,7 @@
 	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
 	if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
-		/* enable hdmi encoding mode if supported */
-		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
-		intel_sdvo_set_colorimetry(intel_sdvo,
-					   SDVO_COLORIMETRY_RGB256);
 		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
-
 		intel_sdvo->is_hdmi = true;
 	}
 	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 93206e4..fe4a53a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1234,7 +1234,8 @@
  * \return false if TV is disconnected.
  */
 static int
-intel_tv_detect_type (struct intel_tv *intel_tv)
+intel_tv_detect_type (struct intel_tv *intel_tv,
+		      struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = &intel_tv->base.base;
 	struct drm_device *dev = encoder->dev;
@@ -1245,11 +1246,13 @@
 	int type;
 
 	/* Disable TV interrupts around load detect or we'll recurse */
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_disable_pipestat(dev_priv, 0,
-			      PIPE_HOTPLUG_INTERRUPT_ENABLE |
-			      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		i915_disable_pipestat(dev_priv, 0,
+				      PIPE_HOTPLUG_INTERRUPT_ENABLE |
+				      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	}
 
 	save_tv_dac = tv_dac = I915_READ(TV_DAC);
 	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1305,13 @@
 	I915_WRITE(TV_CTL, save_tv_ctl);
 
 	/* Restore interrupt config */
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0,
-			     PIPE_HOTPLUG_INTERRUPT_ENABLE |
-			     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		i915_enable_pipestat(dev_priv, 0,
+				     PIPE_HOTPLUG_INTERRUPT_ENABLE |
+				     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	}
 
 	return type;
 }
@@ -1356,7 +1361,7 @@
 	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 
 	if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
-		type = intel_tv_detect_type(intel_tv);
+		type = intel_tv_detect_type(intel_tv, connector);
 	} else if (force) {
 		struct drm_crtc *crtc;
 		int dpms_mode;
@@ -1364,7 +1369,7 @@
 		crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
 						  &mode, &dpms_mode);
 		if (crtc) {
-			type = intel_tv_detect_type(intel_tv);
+			type = intel_tv_detect_type(intel_tv, connector);
 			intel_release_load_detect_pipe(&intel_tv->base, connector,
 						       dpms_mode);
 		} else
@@ -1658,6 +1663,18 @@
 	intel_encoder = &intel_tv->base;
 	connector = &intel_connector->base;
 
+	/* The documentation, for the older chipsets at least, recommend
+	 * using a polling method rather than hotplug detection for TVs.
+	 * This is because in order to perform the hotplug detection, the PLLs
+	 * for the TV must be kept alive increasing power drain and starving
+	 * bandwidth from other encoders. Notably for instance, it causes
+	 * pipe underruns on Crestline when this encoder is supposedly idle.
+	 *
+	 * More recent chipsets favour HDMI rather than integrated S-Video.
+	 */
+	connector->polled =
+		DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
 	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
 			   DRM_MODE_CONNECTOR_SVIDEO);
 
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 21d6c29..de70959 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -8,7 +8,7 @@
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select FB
-	select FRAMEBUFFER_CONSOLE if !EMBEDDED
+	select FRAMEBUFFER_CONSOLE if !EXPERT
 	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
 	select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
 	help
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index b14c811..d3a9c6e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -59,7 +59,7 @@
 	return 0;
 }
 
-static struct backlight_ops nv40_bl_ops = {
+static const struct backlight_ops nv40_bl_ops = {
 	.options = BL_CORE_SUSPENDRESUME,
 	.get_brightness = nv40_get_intensity,
 	.update_status = nv40_set_intensity,
@@ -82,7 +82,7 @@
 	return 0;
 }
 
-static struct backlight_ops nv50_bl_ops = {
+static const struct backlight_ops nv50_bl_ops = {
 	.options = BL_CORE_SUSPENDRESUME,
 	.get_brightness = nv50_get_intensity,
 	.update_status = nv50_set_intensity,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d304655..6bdab89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1927,7 +1927,7 @@
 	 * offset      (8  bit): opcode
 	 * offset + 1  (16 bit): time
 	 *
-	 * Sleep for "time" miliseconds.
+	 * Sleep for "time" milliseconds.
 	 */
 
 	unsigned time = ROM16(bios->data[offset + 1]);
@@ -1935,7 +1935,7 @@
 	if (!iexec->execute)
 		return 3;
 
-	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X miliseconds\n",
+	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
 		offset, time);
 
 	msleep(time);
@@ -6228,7 +6228,7 @@
 		entry->tvconf.has_component_output = false;
 		break;
 	case OUTPUT_LVDS:
-		if ((conn & 0x00003f00) != 0x10)
+		if ((conn & 0x00003f00) >> 8 != 0x10)
 			entry->lvdsconf.use_straps_for_mode = true;
 		entry->lvdsconf.use_power_scripts = true;
 		break;
@@ -6310,6 +6310,9 @@
 static bool
 apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+
 	/* Dell Precision M6300
 	 *   DCB entry 2: 02025312 00000010
 	 *   DCB entry 3: 02026312 00000020
@@ -6327,6 +6330,18 @@
 			return false;
 	}
 
+	/* GeForce3 Ti 200
+	 *
+	 * DCB reports an LVDS output that should be TMDS:
+	 *   DCB entry 1: f2005014 ffffffff
+	 */
+	if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
+		if (*conn == 0xf2005014 && *conf == 0xffffffff) {
+			fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1);
+			return false;
+		}
+	}
+
 	return true;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a7fae26..a521840 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
 	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
-	nouveau_vm_put(&nvbo->vma);
+	if (nvbo->vma.node) {
+		nouveau_vm_unmap(&nvbo->vma);
+		nouveau_vm_put(&nvbo->vma);
+	}
 	kfree(nvbo);
 }
 
@@ -128,6 +131,7 @@
 		}
 	}
 
+	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
 	nouveau_bo_placement_set(nvbo, flags, 0);
 
 	nvbo->channel = chan;
@@ -166,17 +170,17 @@
 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 {
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+	int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
 
 	if (dev_priv->card_type == NV_10 &&
-	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
+	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+	    nvbo->bo.mem.num_pages < vram_pages / 2) {
 		/*
 		 * Make sure that the color and depth buffers are handled
 		 * by independent memory controller units. Up to a 9x
 		 * speed up when alpha-blending and depth-test are enabled
 		 * at the same time.
 		 */
-		int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
-
 		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
 			nvbo->placement.fpfn = vram_pages / 2;
 			nvbo->placement.lpfn = ~0;
@@ -785,7 +789,7 @@
 	if (ret)
 		goto out;
 
-	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
 out:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return ret;
@@ -811,11 +815,11 @@
 	if (ret)
 		return ret;
 
-	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
+	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
 	if (ret)
 		goto out;
 
-	ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
 	if (ret)
 		goto out;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a21e000..390d82c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -507,6 +507,7 @@
 	int high_w = 0, high_h = 0, high_v = 0;
 
 	list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+		mode->vrefresh = drm_mode_vrefresh(mode);
 		if (helper->mode_valid(connector, mode) != MODE_OK ||
 		    (mode->flags & DRM_MODE_FLAG_INTERLACE))
 			continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65699bf..b368ed7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,8 @@
 		return ret;
 
 	/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
-	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+				     &chan->m2mf_ntfy);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 13bb672..f658a04 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -234,9 +234,9 @@
 		pci_set_power_state(pdev, PCI_D3hot);
 	}
 
-	acquire_console_sem();
+	console_lock();
 	nouveau_fbcon_set_suspend(dev, 1);
-	release_console_sem();
+	console_unlock();
 	nouveau_fbcon_restore_accel(dev);
 	return 0;
 
@@ -359,9 +359,9 @@
 		nv_crtc->lut.depth = 0;
 	}
 
-	acquire_console_sem();
+	console_lock();
 	nouveau_fbcon_set_suspend(dev, 0);
-	release_console_sem();
+	console_unlock();
 
 	nouveau_fbcon_zfill_all(dev);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 46e3257..982d70b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -160,6 +160,7 @@
 #define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
 #define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
 #define NVOBJ_FLAG_VM			(1 << 3)
+#define NVOBJ_FLAG_VM_USER		(1 << 4)
 
 #define NVOBJ_CINST_GLOBAL	0xdeadbeef
 
@@ -847,14 +848,12 @@
 				     struct nouveau_fence *fence);
 extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
 
-/* nvc0_vram.c */
-extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
-
 /* nouveau_notifier.c */
 extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
 extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
 extern int  nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
-				   int cout, uint32_t *offset);
+				   int cout, uint32_t start, uint32_t end,
+				   uint32_t *offset);
 extern int  nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
 extern int  nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
 					 struct drm_file *);
@@ -1576,6 +1575,20 @@
 		dev->pdev->subsystem_device == sub_device;
 }
 
+/* returns 1 if device is one of the nv4x using the 0x4497 object class,
+ * helpful to determine a number of other hardware features
+ */
+static inline int
+nv44_graph_class(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if ((dev_priv->chipset & 0xf0) == 0x60)
+		return 1;
+
+	return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
+}
+
 /* memory type/access flags, do not match hardware values */
 #define NV_MEM_ACCESS_RO  1
 #define NV_MEM_ACCESS_WO  2
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a26d047..60769d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -352,13 +352,14 @@
 			      FBINFO_HWACCEL_IMAGEBLIT;
 	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &nouveau_fbcon_sw_ops;
-	info->fix.smem_start = dev->mode_config.fb_base +
-			       (nvbo->bo.mem.start << PAGE_SHIFT);
+	info->fix.smem_start = nvbo->bo.mem.bus.base +
+			       nvbo->bo.mem.bus.offset;
 	info->fix.smem_len = size;
 
 	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
 	info->screen_size = size;
 
+	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* Set aperture base/size for vesafb takeover */
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 69044eb..b0fb9bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -725,8 +725,10 @@
 	ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
 			mem->page_alignment << PAGE_SHIFT, size_nc,
 			(nvbo->tile_flags >> 8) & 0xff, &node);
-	if (ret)
-		return ret;
+	if (ret) {
+		mem->mm_node = NULL;
+		return (ret == -ENOSPC) ? 0 : ret;
+	}
 
 	node->page_shift = 12;
 	if (nvbo->vma.node)
@@ -742,30 +744,24 @@
 {
 	struct nouveau_mm *mm = man->priv;
 	struct nouveau_mm_node *r;
-	u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
-	int i;
+	u32 total = 0, free = 0;
 
 	mutex_lock(&mm->mutex);
 	list_for_each_entry(r, &mm->nodes, nl_entry) {
-		printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
-		       prefix, r->free ? "free" : "used", r->type,
-		       ((u64)r->offset << 12),
+		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
+		       prefix, r->type, ((u64)r->offset << 12),
 		       (((u64)r->offset + r->length) << 12));
+
 		total += r->length;
-		ttotal[r->type] += r->length;
-		if (r->free)
-			tfree[r->type] += r->length;
-		else
-			tused[r->type] += r->length;
+		if (!r->type)
+			free += r->length;
 	}
 	mutex_unlock(&mm->mutex);
 
-	printk(KERN_DEBUG "%s  total: 0x%010llx\n", prefix, total << 12);
-	for (i = 0; i < 3; i++) {
-		printk(KERN_DEBUG "%s type %d: 0x%010llx, "
-				  "used 0x%010llx, free 0x%010llx\n", prefix,
-		       i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
-	}
+	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
+	       prefix, (u64)total << 12, (u64)free << 12);
+	printk(KERN_DEBUG "%s  block: 0x%08x\n",
+	       prefix, mm->block_size << 12);
 }
 
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index cdbb11e..7609756 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -48,181 +48,82 @@
 
 	b->offset = a->offset;
 	b->length = size;
-	b->free   = a->free;
 	b->type   = a->type;
 	a->offset += size;
 	a->length -= size;
 	list_add_tail(&b->nl_entry, &a->nl_entry);
-	if (b->free)
+	if (b->type == 0)
 		list_add_tail(&b->fl_entry, &a->fl_entry);
 	return b;
 }
 
-static struct nouveau_mm_node *
-nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
-{
-	struct nouveau_mm_node *prev, *next;
-
-	/* try to merge with free adjacent entries of same type */
-	prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
-	if (this->nl_entry.prev != &rmm->nodes) {
-		if (prev->free && prev->type == this->type) {
-			prev->length += this->length;
-			region_put(rmm, this);
-			this = prev;
-		}
-	}
-
-	next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
-	if (this->nl_entry.next != &rmm->nodes) {
-		if (next->free && next->type == this->type) {
-			next->offset  = this->offset;
-			next->length += this->length;
-			region_put(rmm, this);
-			this = next;
-		}
-	}
-
-	return this;
-}
+#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
+	list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
 
 void
 nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
 {
-	u32 block_s, block_l;
+	struct nouveau_mm_node *prev = node(this, prev);
+	struct nouveau_mm_node *next = node(this, next);
 
-	this->free = true;
 	list_add(&this->fl_entry, &rmm->free);
-	this = nouveau_mm_merge(rmm, this);
-
-	/* any entirely free blocks now?  we'll want to remove typing
-	 * on them now so they can be use for any memory allocation
-	 */
-	block_s = roundup(this->offset, rmm->block_size);
-	if (block_s + rmm->block_size > this->offset + this->length)
-		return;
-
-	/* split off any still-typed region at the start */
-	if (block_s != this->offset) {
-		if (!region_split(rmm, this, block_s - this->offset))
-			return;
-	}
-
-	/* split off the soon-to-be-untyped block(s) */
-	block_l = rounddown(this->length, rmm->block_size);
-	if (block_l != this->length) {
-		this = region_split(rmm, this, block_l);
-		if (!this)
-			return;
-	}
-
-	/* mark as having no type, and retry merge with any adjacent
-	 * untyped blocks
-	 */
 	this->type = 0;
-	nouveau_mm_merge(rmm, this);
+
+	if (prev && prev->type == 0) {
+		prev->length += this->length;
+		region_put(rmm, this);
+		this = prev;
+	}
+
+	if (next && next->type == 0) {
+		next->offset  = this->offset;
+		next->length += this->length;
+		region_put(rmm, this);
+	}
 }
 
 int
 nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
 	       u32 align, struct nouveau_mm_node **pnode)
 {
-	struct nouveau_mm_node *this, *tmp, *next;
-	u32 splitoff, avail, alloc;
+	struct nouveau_mm_node *prev, *this, *next;
+	u32 min = size_nc ? size_nc : size;
+	u32 align_mask = align - 1;
+	u32 splitoff;
+	u32 s, e;
 
-	list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
-		next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
-		if (this->nl_entry.next == &rmm->nodes)
-			next = NULL;
+	list_for_each_entry(this, &rmm->free, fl_entry) {
+		e = this->offset + this->length;
+		s = this->offset;
 
-		/* skip wrongly typed blocks */
-		if (this->type && this->type != type)
+		prev = node(this, prev);
+		if (prev && prev->type != type)
+			s = roundup(s, rmm->block_size);
+
+		next = node(this, next);
+		if (next && next->type != type)
+			e = rounddown(e, rmm->block_size);
+
+		s  = (s + align_mask) & ~align_mask;
+		e &= ~align_mask;
+		if (s > e || e - s < min)
 			continue;
 
-		/* account for alignment */
-		splitoff = this->offset & (align - 1);
-		if (splitoff)
-			splitoff = align - splitoff;
-
-		if (this->length <= splitoff)
-			continue;
-
-		/* determine total memory available from this, and
-		 * the next block (if appropriate)
-		 */
-		avail = this->length;
-		if (next && next->free && (!next->type || next->type == type))
-			avail += next->length;
-
-		avail -= splitoff;
-
-		/* determine allocation size */
-		if (size_nc) {
-			alloc = min(avail, size);
-			alloc = rounddown(alloc, size_nc);
-			if (alloc == 0)
-				continue;
-		} else {
-			alloc = size;
-			if (avail < alloc)
-				continue;
-		}
-
-		/* untyped block, split off a chunk that's a multiple
-		 * of block_size and type it
-		 */
-		if (!this->type) {
-			u32 block = roundup(alloc + splitoff, rmm->block_size);
-			if (this->length < block)
-				continue;
-
-			this = region_split(rmm, this, block);
-			if (!this)
-				return -ENOMEM;
-
-			this->type = type;
-		}
-
-		/* stealing memory from adjacent block */
-		if (alloc > this->length) {
-			u32 amount = alloc - (this->length - splitoff);
-
-			if (!next->type) {
-				amount = roundup(amount, rmm->block_size);
-
-				next = region_split(rmm, next, amount);
-				if (!next)
-					return -ENOMEM;
-
-				next->type = type;
-			}
-
-			this->length += amount;
-			next->offset += amount;
-			next->length -= amount;
-			if (!next->length) {
-				list_del(&next->nl_entry);
-				list_del(&next->fl_entry);
-				kfree(next);
-			}
-		}
-
-		if (splitoff) {
-			if (!region_split(rmm, this, splitoff))
-				return -ENOMEM;
-		}
-
-		this = region_split(rmm, this, alloc);
-		if (this == NULL)
+		splitoff = s - this->offset;
+		if (splitoff && !region_split(rmm, this, splitoff))
 			return -ENOMEM;
 
-		this->free = false;
+		this = region_split(rmm, this, min(size, e - s));
+		if (!this)
+			return -ENOMEM;
+
+		this->type = type;
 		list_del(&this->fl_entry);
 		*pnode = this;
 		return 0;
 	}
 
-	return -ENOMEM;
+	return -ENOSPC;
 }
 
 int
@@ -234,7 +135,6 @@
 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 	if (!heap)
 		return -ENOMEM;
-	heap->free = true;
 	heap->offset = roundup(offset, block);
 	heap->length = rounddown(offset + length, block) - heap->offset;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index af38449..798eaf3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -30,9 +30,7 @@
 	struct list_head fl_entry;
 	struct list_head rl_entry;
 
-	bool free;
-	int  type;
-
+	u8  type;
 	u32 offset;
 	u32 length;
 };
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index fe29d60..5ea1676 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -96,7 +96,8 @@
 
 int
 nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
-		       int size, uint32_t *b_offset)
+		       int size, uint32_t start, uint32_t end,
+		       uint32_t *b_offset)
 {
 	struct drm_device *dev = chan->dev;
 	struct nouveau_gpuobj *nobj = NULL;
@@ -104,9 +105,10 @@
 	uint32_t offset;
 	int target, ret;
 
-	mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
+	mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
+					  start, end, 0);
 	if (mem)
-		mem = drm_mm_get_block(mem, size, 0);
+		mem = drm_mm_get_block_range(mem, size, 0, start, end);
 	if (!mem) {
 		NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
 		return -ENOMEM;
@@ -177,7 +179,8 @@
 	if (IS_ERR(chan))
 		return PTR_ERR(chan);
 
-	ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
+	ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
+				     &na->offset);
 	nouveau_channel_put(&chan);
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index fb846a3..4399e2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -443,7 +443,7 @@
 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 
 	if (pm->hwmon) {
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+		sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
 		hwmon_device_unregister(pm->hwmon);
 	}
 #endif
@@ -543,7 +543,7 @@
 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 	struct nouveau_pm_level *perflvl;
 
-	if (pm->cur == &pm->boot)
+	if (!pm->cur || pm->cur == &pm->boot)
 		return;
 
 	perflvl = pm->cur;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 7ecc4ad..8d9968e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -265,8 +265,8 @@
 	struct i2c_board_info info[] = {
 		{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
 		{ I2C_BOARD_INFO("w83781d", 0x2d) },
-		{ I2C_BOARD_INFO("f75375", 0x2e) },
 		{ I2C_BOARD_INFO("adt7473", 0x2e) },
+		{ I2C_BOARD_INFO("f75375", 0x2e) },
 		{ I2C_BOARD_INFO("lm99", 0x4c) },
 		{ }
 	};
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index ef23550..c82db37 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@
 	if (nv_encoder->dcb->type == OUTPUT_LVDS) {
 		bool duallink, dummy;
 
-		nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
-					      clock, &duallink, &dummy);
+		nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+					      &duallink, &dummy);
 		if (duallink)
 			regp->fp_control |= (8 << 28);
 	} else
@@ -518,8 +518,6 @@
 		return;
 
 	if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
-		struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
-
 		/* when removing an output, crtc may not be set, but PANEL_OFF
 		 * must still be run
 		 */
@@ -527,12 +525,8 @@
 			   nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
 
 		if (mode == DRM_MODE_DPMS_ON) {
-			if (!nv_connector->native_mode) {
-				NV_ERROR(dev, "Not turning on LVDS without native mode\n");
-				return;
-			}
 			call_lvds_script(dev, nv_encoder->dcb, head,
-					 LVDS_PANEL_ON, nv_connector->native_mode->clock);
+					 LVDS_PANEL_ON, nv_encoder->mode.clock);
 		} else
 			/* pxclk of 0 is fine for PANEL_OFF, and for a
 			 * disconnected LVDS encoder there is no native_mode
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 19ef92a..18d30c2 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -211,30 +211,35 @@
 	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
 	switch (dev_priv->chipset) {
-	case 0x44:
-	case 0x4a:
+	case 0x40:
+	case 0x41: /* guess */
+	case 0x42:
+	case 0x43:
+	case 0x45: /* guess */
 	case 0x4e:
 		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
 		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
 		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
-		break;
-
-	case 0x46:
-	case 0x47:
-	case 0x49:
-	case 0x4b:
-		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
 		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
 		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
 		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
 		break;
-
-	default:
+	case 0x44:
+	case 0x4a:
 		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
 		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
 		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+		break;
+	case 0x46:
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+	case 0x4c:
+	case 0x67:
+	default:
+		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
 		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
 		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
 		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
@@ -396,17 +401,20 @@
 		break;
 	default:
 		switch (dev_priv->chipset) {
-		case 0x46:
-		case 0x47:
-		case 0x49:
-		case 0x4b:
-			nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
-			nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
-			break;
-		default:
+		case 0x41:
+		case 0x42:
+		case 0x43:
+		case 0x45:
+		case 0x4e:
+		case 0x44:
+		case 0x4a:
 			nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
 			nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
 			break;
+		default:
+			nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+			nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+			break;
 		}
 		nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
 		nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
@@ -451,8 +459,7 @@
 	NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
 
 	/* curie */
-	if (dev_priv->chipset >= 0x60 ||
-	    0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
+	if (nv44_graph_class(dev))
 		NVOBJ_CLASS(dev, 0x4497, GR);
 	else
 		NVOBJ_CLASS(dev, 0x4097, GR);
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index ce58509..f70447d 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -118,17 +118,6 @@
  */
 
 static int
-nv40_graph_4097(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if ((dev_priv->chipset & 0xf0) == 0x60)
-		return 0;
-
-	return !!(0x0baf & (1 << dev_priv->chipset));
-}
-
-static int
 nv40_graph_vs_count(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -219,7 +208,7 @@
 		gr_def(ctx, 0x4009dc, 0x80000000);
 	} else {
 		cp_ctx(ctx, 0x400840, 20);
-		if (!nv40_graph_4097(ctx->dev)) {
+		if (nv44_graph_class(ctx->dev)) {
 			for (i = 0; i < 8; i++)
 				gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
 		}
@@ -228,7 +217,7 @@
 		gr_def(ctx, 0x400888, 0x00000040);
 		cp_ctx(ctx, 0x400894, 11);
 		gr_def(ctx, 0x400894, 0x00000040);
-		if (nv40_graph_4097(ctx->dev)) {
+		if (!nv44_graph_class(ctx->dev)) {
 			for (i = 0; i < 8; i++)
 				gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
 		}
@@ -546,7 +535,7 @@
 static void
 nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
 {
-	int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
+	int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
 
 	cp_out (ctx, 0x300000);
 	cp_lsr (ctx, len - 4);
@@ -582,11 +571,11 @@
 	} else {
 		b0_offset = 0x1d40/4; /* 2200 */
 		b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
-		vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
+		vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
 	}
 
 	cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
-	cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
+	cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
 
 	offset = ctx->ctxvals_pos;
 	ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
index e4e72c1..03c0d4c 100644
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -6,27 +6,17 @@
 int
 nv40_mc_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t tmp;
-
 	/* Power up everything, resetting each individual unit will
 	 * be done later if needed.
 	 */
 	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
 
-	switch (dev_priv->chipset) {
-	case 0x44:
-	case 0x46: /* G72 */
-	case 0x4e:
-	case 0x4c: /* C51_G7X */
-		tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+	if (nv44_graph_class(dev)) {
+		u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
 		nv_wr32(dev, NV40_PMC_1700, tmp);
 		nv_wr32(dev, NV40_PMC_1704, 0);
 		nv_wr32(dev, NV40_PMC_1708, 0);
 		nv_wr32(dev, NV40_PMC_170C, tmp);
-		break;
-	default:
-		break;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 14e24e9..0ea090f 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -283,8 +283,7 @@
 			nv50_evo_channel_del(&dev_priv->evo);
 			return ret;
 		}
-	} else
-	if (dev_priv->chipset != 0x50) {
+	} else {
 		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
 					  0, 0xffffffff, 0x00010000);
 		if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 2d7ea75..37e21d2 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -256,6 +256,7 @@
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
 	unsigned long flags;
 
@@ -265,6 +266,7 @@
 		return;
 
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pfifo->reassign(dev, false);
 	pgraph->fifo_access(dev, false);
 
 	if (pgraph->channel(dev) == chan)
@@ -275,6 +277,7 @@
 	dev_priv->engine.instmem.flush(dev);
 
 	pgraph->fifo_access(dev, true);
+	pfifo->reassign(dev, true);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
 	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 2e1b1cd..e57caa2 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -332,8 +332,11 @@
 	gpuobj->vinst = node->vram->offset;
 
 	if (gpuobj->flags & NVOBJ_FLAG_VM) {
-		ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
-				     NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+		u32 flags = NV_MEM_ACCESS_RW;
+		if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
+			flags |= NV_MEM_ACCESS_SYS;
+
+		ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
 				     &node->chan_vma);
 		if (ret) {
 			vram->put(dev, &node->vram);
@@ -400,16 +403,24 @@
 void
 nv50_instmem_flush(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	spin_lock(&dev_priv->ramin_lock);
 	nv_wr32(dev, 0x00330c, 0x00000001);
 	if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
 		NV_ERROR(dev, "PRAMIN flush timeout\n");
+	spin_unlock(&dev_priv->ramin_lock);
 }
 
 void
 nv84_instmem_flush(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	spin_lock(&dev_priv->ramin_lock);
 	nv_wr32(dev, 0x070000, 0x00000001);
 	if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
 		NV_ERROR(dev, "PRAMIN flush timeout\n");
+	spin_unlock(&dev_priv->ramin_lock);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 38e523e..6144156 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -45,11 +45,6 @@
 	}
 
 	if (phys & 1) {
-		if (dev_priv->vram_sys_base) {
-			phys += dev_priv->vram_sys_base;
-			phys |= 0x30;
-		}
-
 		if (coverage <= 32 * 1024 * 1024)
 			phys |= 0x60;
 		else if (coverage <= 64 * 1024 * 1024)
@@ -174,7 +169,11 @@
 void
 nv50_vm_flush_engine(struct drm_device *dev, int engine)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	spin_lock(&dev_priv->ramin_lock);
 	nv_wr32(dev, 0x100c80, (engine << 16) | 1);
 	if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
 		NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+	spin_unlock(&dev_priv->ramin_lock);
 }
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 5feacd5..eb18a7e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -31,6 +31,7 @@
 #include "nvc0_graph.h"
 
 static void nvc0_graph_isr(struct drm_device *);
+static void nvc0_runk140_isr(struct drm_device *);
 static int  nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
 
 void
@@ -105,7 +106,8 @@
 	if (ret)
 		return ret;
 
-	ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
+	ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
+				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
 				 &grch->unk418810);
 	if (ret)
 		return ret;
@@ -280,6 +282,7 @@
 		return;
 
 	nouveau_irq_unregister(dev, 12);
+	nouveau_irq_unregister(dev, 25);
 
 	nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
 	nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
@@ -389,6 +392,7 @@
 	}
 
 	nouveau_irq_register(dev, 12, nvc0_graph_isr);
+	nouveau_irq_register(dev, 25, nvc0_runk140_isr);
 	NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
 	NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
 	NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
@@ -511,8 +515,8 @@
 			nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
 			nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
 			nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
 		}
 		nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
 		nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -776,3 +780,19 @@
 
 	nv_wr32(dev, 0x400500, 0x00010001);
 }
+
+static void
+nvc0_runk140_isr(struct drm_device *dev)
+{
+	u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
+
+	while (units) {
+		u32 unit = ffs(units) - 1;
+		u32 reg = 0x140000 + unit * 0x2000;
+		u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
+		u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
+
+		NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
+		units &= ~(1 << unit);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index b9e68b2..f880ff7 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1830,7 +1830,7 @@
 
 	for (tp = 0, id = 0; tp < 4; tp++) {
 		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tp <= priv->tp_nr[gpc]) {
+			if (tp < priv->tp_nr[gpc]) {
 				nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
 				nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
 				nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 4b9251b..e4e83c2 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -48,8 +48,8 @@
 	phys >>= 8;
 
 	phys |= 0x00000001; /* present */
-//	if (vma->access & NV_MEM_ACCESS_SYS)
-//		phys |= 0x00000002;
+	if (vma->access & NV_MEM_ACCESS_SYS)
+		phys |= 0x00000002;
 
 	phys |= ((u64)target  << 32);
 	phys |= ((u64)memtype << 36);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 58a0cd0..04b269d 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1629,7 +1629,7 @@
 typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
 {
   USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
-  USHORT    usVRAMAddress;      //Adress in Frame Buffer where to pace raw EDID
+  USHORT    usVRAMAddress;      //Address in Frame Buffer where to pace raw EDID
   USHORT    usStatus;           //When use output: lower byte EDID checksum, high byte hardware status
                                 //WHen use input:  lower byte as 'byte to read':currently limited to 128byte or 1byte
   UCHAR     ucSlaveAddr;        //Read from which slave
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b0ab185..a4e5e53 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -48,29 +48,29 @@
 
 	switch (radeon_crtc->rmx_type) {
 	case RMX_CENTER:
-		args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
-		args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
-		args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
-		args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+		args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
 		break;
 	case RMX_ASPECT:
 		a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
 		a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
 
 		if (a1 > a2) {
-			args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
-			args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
 		} else if (a2 > a1) {
-			args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
-			args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
 		}
 		break;
 	case RMX_FULL:
 	default:
-		args.usOverscanRight = radeon_crtc->h_border;
-		args.usOverscanLeft = radeon_crtc->h_border;
-		args.usOverscanBottom = radeon_crtc->v_border;
-		args.usOverscanTop = radeon_crtc->v_border;
+		args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
+		args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
 		break;
 	}
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -419,23 +419,23 @@
 	memset(&args, 0, sizeof(args));
 
 	if (ASIC_IS_DCE5(rdev)) {
-		args.v3.usSpreadSpectrumAmountFrac = 0;
+		args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
 		args.v3.ucSpreadSpectrumType = ss->type;
 		switch (pll_id) {
 		case ATOM_PPLL1:
 			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
-			args.v3.usSpreadSpectrumAmount = ss->amount;
-			args.v3.usSpreadSpectrumStep = ss->step;
+			args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_PPLL2:
 			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
-			args.v3.usSpreadSpectrumAmount = ss->amount;
-			args.v3.usSpreadSpectrumStep = ss->step;
+			args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_DCPLL:
 			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
-			args.v3.usSpreadSpectrumAmount = 0;
-			args.v3.usSpreadSpectrumStep = 0;
+			args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
+			args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
 			break;
 		case ATOM_PPLL_INVALID:
 			return;
@@ -447,18 +447,18 @@
 		switch (pll_id) {
 		case ATOM_PPLL1:
 			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
-			args.v2.usSpreadSpectrumAmount = ss->amount;
-			args.v2.usSpreadSpectrumStep = ss->step;
+			args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_PPLL2:
 			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
-			args.v2.usSpreadSpectrumAmount = ss->amount;
-			args.v2.usSpreadSpectrumStep = ss->step;
+			args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_DCPLL:
 			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
-			args.v2.usSpreadSpectrumAmount = 0;
-			args.v2.usSpreadSpectrumStep = 0;
+			args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
+			args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
 			break;
 		case ATOM_PPLL_INVALID:
 			return;
@@ -538,7 +538,6 @@
 			pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
 		else
 			pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
-
 	}
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -555,23 +554,28 @@
 					dp_clock = dig_connector->dp_clock;
 				}
 			}
-#if 0 /* doesn't work properly on some laptops */
+
 			/* use recommended ref_div for ss */
 			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
 				if (ss_enabled) {
 					if (ss->refdiv) {
+						pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
 						pll->flags |= RADEON_PLL_USE_REF_DIV;
 						pll->reference_div = ss->refdiv;
+						if (ASIC_IS_AVIVO(rdev))
+							pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 					}
 				}
 			}
-#endif
+
 			if (ASIC_IS_AVIVO(rdev)) {
 				/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
 				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
 					adjusted_clock = mode->clock * 2;
 				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
 					pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+				if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+					pll->flags |= RADEON_PLL_IS_LCD;
 			} else {
 				if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
 					pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -606,14 +610,9 @@
 				args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
 				args.v1.ucTransmitterID = radeon_encoder->encoder_id;
 				args.v1.ucEncodeMode = encoder_mode;
-				if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-					if (ss_enabled)
-						args.v1.ucConfig |=
-							ADJUST_DISPLAY_CONFIG_SS_ENABLE;
-				} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+				if (ss_enabled)
 					args.v1.ucConfig |=
 						ADJUST_DISPLAY_CONFIG_SS_ENABLE;
-				}
 
 				atom_execute_table(rdev->mode_info.atom_context,
 						   index, (uint32_t *)&args);
@@ -624,12 +623,12 @@
 				args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
 				args.v3.sInput.ucEncodeMode = encoder_mode;
 				args.v3.sInput.ucDispPllConfig = 0;
+				if (ss_enabled)
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_SS_ENABLE;
 				if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
 					struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 					if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-						if (ss_enabled)
-							args.v3.sInput.ucDispPllConfig |=
-								DISPPLL_CONFIG_SS_ENABLE;
 						args.v3.sInput.ucDispPllConfig |=
 							DISPPLL_CONFIG_COHERENT_MODE;
 						/* 16200 or 27000 */
@@ -649,18 +648,11 @@
 					}
 				} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
 					if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-						if (ss_enabled)
-							args.v3.sInput.ucDispPllConfig |=
-								DISPPLL_CONFIG_SS_ENABLE;
 						args.v3.sInput.ucDispPllConfig |=
 							DISPPLL_CONFIG_COHERENT_MODE;
 						/* 16200 or 27000 */
 						args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
-					} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
-						if (ss_enabled)
-							args.v3.sInput.ucDispPllConfig |=
-								DISPPLL_CONFIG_SS_ENABLE;
-					} else {
+					} else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
 						if (mode->clock > 165000)
 							args.v3.sInput.ucDispPllConfig |=
 								DISPPLL_CONFIG_DUAL_LINK;
@@ -670,10 +662,12 @@
 						   index, (uint32_t *)&args);
 				adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
 				if (args.v3.sOutput.ucRefDiv) {
+					pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 					pll->flags |= RADEON_PLL_USE_REF_DIV;
 					pll->reference_div = args.v3.sOutput.ucRefDiv;
 				}
 				if (args.v3.sOutput.ucPostDiv) {
+					pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 					pll->flags |= RADEON_PLL_USE_POST_DIV;
 					pll->post_div = args.v3.sOutput.ucPostDiv;
 				}
@@ -727,14 +721,14 @@
 			 * SetPixelClock provides the dividers
 			 */
 			args.v5.ucCRTC = ATOM_CRTC_INVALID;
-			args.v5.usPixelClock = dispclk;
+			args.v5.usPixelClock = cpu_to_le16(dispclk);
 			args.v5.ucPpll = ATOM_DCPLL;
 			break;
 		case 6:
 			/* if the default dcpll clock is specified,
 			 * SetPixelClock provides the dividers
 			 */
-			args.v6.ulDispEngClkFreq = dispclk;
+			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
 			args.v6.ucPpll = ATOM_DCPLL;
 			break;
 		default:
@@ -963,8 +957,12 @@
 	/* adjust pixel clock as needed */
 	adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
 
-	radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
-			   &ref_div, &post_div);
+	if (ASIC_IS_AVIVO(rdev))
+		radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+					 &ref_div, &post_div);
+	else
+		radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+					  &ref_div, &post_div);
 
 	atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
 
@@ -993,9 +991,9 @@
 	}
 }
 
-static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
-				      struct drm_framebuffer *fb,
-				      int x, int y, int atomic)
+static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 int x, int y, int atomic)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -1006,6 +1004,7 @@
 	struct radeon_bo *rbo;
 	uint64_t fb_location;
 	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
 	int r;
 
 	/* no fb bound */
@@ -1057,11 +1056,17 @@
 	case 16:
 		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
 			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
 		break;
 	case 24:
 	case 32:
 		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
 			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
 		break;
 	default:
 		DRM_ERROR("Unsupported screen depth %d\n",
@@ -1106,6 +1111,7 @@
 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
 	       (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
 	WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
 	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
 	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1127,12 +1133,6 @@
 	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
-		WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
-		       EVERGREEN_INTERLEAVE_EN);
-	else
-		WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
 	if (!atomic && fb && fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
 		rbo = radeon_fb->obj->driver_private;
@@ -1162,6 +1162,7 @@
 	struct drm_framebuffer *target_fb;
 	uint64_t fb_location;
 	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
 	int r;
 
 	/* no fb bound */
@@ -1215,12 +1216,18 @@
 		fb_format =
 		    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
 		    AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
 		break;
 	case 24:
 	case 32:
 		fb_format =
 		    AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
 		    AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
 		break;
 	default:
 		DRM_ERROR("Unsupported screen depth %d\n",
@@ -1260,6 +1267,8 @@
 	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
 	       radeon_crtc->crtc_offset, (u32) fb_location);
 	WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
 	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
 	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1281,12 +1290,6 @@
 	WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
-		WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
-		       AVIVO_D1MODE_INTERLEAVE_EN);
-	else
-		WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
 	if (!atomic && fb && fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
 		rbo = radeon_fb->obj->driver_private;
@@ -1310,7 +1313,7 @@
 	struct radeon_device *rdev = dev->dev_private;
 
 	if (ASIC_IS_DCE4(rdev))
-		return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
+		return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
 	else if (ASIC_IS_AVIVO(rdev))
 		return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
 	else
@@ -1325,7 +1328,7 @@
        struct radeon_device *rdev = dev->dev_private;
 
 	if (ASIC_IS_DCE4(rdev))
-		return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
+		return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
 	else if (ASIC_IS_AVIVO(rdev))
 		return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
 	else
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4e7778d..695de9a 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -187,9 +187,9 @@
 int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
 {
 	int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
-	int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
+	int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock);
 
-	if ((lanes == 0) || (bw == 0))
+	if ((lanes == 0) || (dp_clock == 0))
 		return MODE_CLOCK_HIGH;
 
 	return MODE_OK;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7fe8ebd..6140ea1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -97,26 +97,29 @@
 }
 
 /* get temperature in millidegrees */
-u32 evergreen_get_temp(struct radeon_device *rdev)
+int evergreen_get_temp(struct radeon_device *rdev)
 {
 	u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
 		ASIC_T_SHIFT;
 	u32 actual_temp = 0;
 
-	if ((temp >> 10) & 1)
-		actual_temp = 0;
-	else if ((temp >> 9) & 1)
+	if (temp & 0x400)
+		actual_temp = -256;
+	else if (temp & 0x200)
 		actual_temp = 255;
-	else
-		actual_temp = (temp >> 1) & 0xff;
+	else if (temp & 0x100) {
+		actual_temp = temp & 0x1ff;
+		actual_temp |= ~0x1ff;
+	} else
+		actual_temp = temp & 0xff;
 
-	return actual_temp * 1000;
+	return (actual_temp * 1000) / 2;
 }
 
-u32 sumo_get_temp(struct radeon_device *rdev)
+int sumo_get_temp(struct radeon_device *rdev)
 {
 	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
-	u32 actual_temp = (temp >> 1) & 0xff;
+	int actual_temp = temp - 49;
 
 	return actual_temp * 1000;
 }
@@ -1182,6 +1185,22 @@
 /*
  * CP.
  */
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	/* set to DX10/11 mode */
+	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(rdev, 1);
+	/* FIXME: implement */
+	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(rdev, ib->length_dw);
+}
+
 
 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
 {
@@ -1192,7 +1211,11 @@
 		return -EINVAL;
 
 	r700_cp_stop(rdev);
-	WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
 	fw_data = (const __be32 *)rdev->pfp_fw->data;
 	WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1233,7 +1256,7 @@
 	cp_me = 0xff;
 	WREG32(CP_ME_CNTL, cp_me);
 
-	r = radeon_ring_lock(rdev, evergreen_default_size + 15);
+	r = radeon_ring_lock(rdev, evergreen_default_size + 19);
 	if (r) {
 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
 		return r;
@@ -1266,6 +1289,11 @@
 	radeon_ring_write(rdev, 0xffffffff);
 	radeon_ring_write(rdev, 0xffffffff);
 
+	radeon_ring_write(rdev, 0xc0026900);
+	radeon_ring_write(rdev, 0x00000316);
+	radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(rdev, 0x00000010); /*  */
+
 	radeon_ring_unlock_commit(rdev);
 
 	return 0;
@@ -1306,7 +1334,11 @@
 	WREG32(CP_RB_WPTR, 0);
 
 	/* set the wb address wether it's enabled or not */
-	WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+	       RB_RPTR_SWAP(2) |
+#endif
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
@@ -2072,6 +2104,7 @@
 	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
 
 	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
 
 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
@@ -2161,7 +2194,6 @@
 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
 	}
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
 	r700_vram_gtt_location(rdev, &rdev->mc);
 	radeon_update_bandwidth_info(rdev);
 
@@ -2201,6 +2233,9 @@
 	struct evergreen_mc_save save;
 	u32 grbm_reset = 0;
 
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		return 0;
+
 	dev_info(rdev->dev, "GPU softreset \n");
 	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
 		RREG32(GRBM_STATUS));
@@ -2603,8 +2638,8 @@
 	while (rptr != wptr) {
 		/* wptr/rptr are in bytes! */
 		ring_index = rptr / 4;
-		src_id =  rdev->ih.ring[ring_index] & 0xff;
-		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
 
 		switch (src_id) {
 		case 1: /* D1 vblank/vline */
@@ -2898,7 +2933,7 @@
 	/* XXX: ontario has problems blitting to gart at the moment */
 	if (rdev->family == CHIP_PALM) {
 		rdev->asic->copy = NULL;
-		rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	}
 
 	/* allocate wb buffer */
@@ -3002,31 +3037,6 @@
 	return 0;
 }
 
-static bool evergreen_card_posted(struct radeon_device *rdev)
-{
-	u32 reg;
-
-	/* first check CRTCs */
-	if (rdev->flags & RADEON_IS_IGP)
-		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-	else
-		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
-			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
-	if (reg & EVERGREEN_CRTC_MASTER_EN)
-		return true;
-
-	/* then check MEM_SIZE, in case the crtcs are off */
-	if (RREG32(CONFIG_MEMSIZE))
-		return true;
-
-	return false;
-}
-
 /* Plan is to move initialization in that function and use
  * helper function so that radeon_device_init pretty much
  * do nothing more than calling asic specific function. This
@@ -3063,7 +3073,7 @@
 	if (radeon_asic_reset(rdev))
 		dev_warn(rdev->dev, "GPU reset failed !\n");
 	/* Post card if necessary */
-	if (!evergreen_card_posted(rdev)) {
+	if (!radeon_card_posted(rdev)) {
 		if (!rdev->bios) {
 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
@@ -3158,6 +3168,9 @@
 {
 	u32 link_width_cntl, speed_cntl;
 
+	if (radeon_pcie_gen2 == 0)
+		return;
+
 	if (rdev->flags & RADEON_IS_IGP)
 		return;
 
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index b758dc7..2be698e 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -55,7 +55,7 @@
 	if (h < 8)
 		h = 8;
 
-	cb_color_info = ((format << 2) | (1 << 24));
+	cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
 	pitch = (w / 8) - 1;
 	slice = ((w * h) / 64) - 1;
 
@@ -133,6 +133,9 @@
 
 	/* high addr, stride */
 	sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
 	/* xyzw swizzles */
 	sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
 
@@ -173,7 +176,7 @@
 	sq_tex_resource_word0 = (1 << 0); /* 2D */
 	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
 				  ((w - 1) << 18));
-	sq_tex_resource_word1 = ((h - 1) << 0);
+	sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
 	/* xyzw swizzles */
 	sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
 
@@ -221,7 +224,11 @@
 	radeon_ring_write(rdev, DI_PT_RECTLIST);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-	radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
 	radeon_ring_write(rdev, 1);
@@ -232,7 +239,7 @@
 
 }
 
-/* emits 30 */
+/* emits 36 */
 static void
 set_default_state(struct radeon_device *rdev)
 {
@@ -245,6 +252,8 @@
 	int num_hs_threads, num_ls_threads;
 	int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
 	int num_hs_stack_entries, num_ls_stack_entries;
+	u64 gpu_addr;
+	int dwords;
 
 	switch (rdev->family) {
 	case CHIP_CEDAR:
@@ -497,6 +506,18 @@
 	radeon_ring_write(rdev, 0x00000000);
 	radeon_ring_write(rdev, 0x00000000);
 
+	/* set to DX10/11 mode */
+	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(rdev, 1);
+
+	/* emit an IB pointing at default state */
+	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
+	radeon_ring_write(rdev, dwords);
+
 }
 
 static inline uint32_t i2f(uint32_t input)
@@ -527,8 +548,10 @@
 int evergreen_blit_init(struct radeon_device *rdev)
 {
 	u32 obj_size;
-	int r;
+	int i, r, dwords;
 	void *ptr;
+	u32 packet2s[16];
+	int num_packet2s = 0;
 
 	/* pin copy shader into vram if already initialized */
 	if (rdev->r600_blit.shader_obj)
@@ -536,8 +559,17 @@
 
 	mutex_init(&rdev->r600_blit.mutex);
 	rdev->r600_blit.state_offset = 0;
-	rdev->r600_blit.state_len = 0;
-	obj_size = 0;
+
+	rdev->r600_blit.state_len = evergreen_default_size;
+
+	dwords = rdev->r600_blit.state_len;
+	while (dwords & 0xf) {
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+		dwords++;
+	}
+
+	obj_size = dwords * 4;
+	obj_size = ALIGN(obj_size, 256);
 
 	rdev->r600_blit.vs_offset = obj_size;
 	obj_size += evergreen_vs_size * 4;
@@ -567,8 +599,16 @@
 		return r;
 	}
 
-	memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
-	memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
+	memcpy_toio(ptr + rdev->r600_blit.state_offset,
+		    evergreen_default_state, rdev->r600_blit.state_len * 4);
+
+	if (num_packet2s)
+		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+			    packet2s, num_packet2s * 4);
+	for (i = 0; i < evergreen_vs_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+	for (i = 0; i < evergreen_ps_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
 	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
 	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
@@ -583,7 +623,7 @@
 		dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
 		return r;
 	}
-	rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 	return 0;
 }
 
@@ -591,7 +631,7 @@
 {
 	int r;
 
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	if (rdev->r600_blit.shader_obj == NULL)
 		return;
 	/* If we can't reserve the bo, unref should be enough to destroy
@@ -652,7 +692,7 @@
 	/* calculate number of loops correctly */
 	ring_size = num_loops * dwords_per_loop;
 	/* set default  + shaders */
-	ring_size += 46; /* shaders + def state */
+	ring_size += 52; /* shaders + def state */
 	ring_size += 10; /* fence emit for VB IB */
 	ring_size += 5; /* done copy */
 	ring_size += 10; /* fence emit for done copy */
@@ -660,7 +700,7 @@
 	if (r)
 		return r;
 
-	set_default_state(rdev); /* 30 */
+	set_default_state(rdev); /* 36 */
 	set_shaders(rdev); /* 16 */
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index ef1d28c..3a10399 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -311,11 +311,19 @@
 	0x00000000,
 	0x3c000000,
 	0x67961001,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
 	0x00080000,
+#endif
 	0x00000000,
 	0x1c000000,
 	0x67961000,
+#ifdef __BIG_ENDIAN
+	0x00020008,
+#else
 	0x00000008,
+#endif
 	0x00000000,
 };
 
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 36d32d8..eb4acf4 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -98,6 +98,7 @@
 #define		BUF_SWAP_32BIT					(2 << 16)
 #define	CP_RB_RPTR					0x8700
 #define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
 #define	CP_RB_RPTR_ADDR_HI				0xC110
 #define	CP_RB_RPTR_WR					0xC108
 #define	CP_RB_WPTR					0xC114
@@ -240,6 +241,7 @@
 #define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
 #define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
 #define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SU_LINE_STIPPLE_VALUE			0x8A60
 #define	PA_SC_LINE_STIPPLE_STATE			0x8B10
 
 #define	SCRATCH_REG0					0x8500
@@ -652,6 +654,7 @@
 #define	PACKET3_DISPATCH_DIRECT				0x15
 #define	PACKET3_DISPATCH_INDIRECT			0x16
 #define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_MODE_CONTROL				0x18
 #define	PACKET3_SET_PREDICATION				0x20
 #define	PACKET3_REG_RMW					0x21
 #define	PACKET3_COND_EXEC				0x22
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 607241c..5a82b6b 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -673,8 +673,10 @@
 	last_reg = strtol(last_reg_s, NULL, 16);
 
 	do {
-		if (fgets(buf, 1024, file) == NULL)
+		if (fgets(buf, 1024, file) == NULL) {
+			fclose(file);
 			return -1;
+		}
 		len = strlen(buf);
 		if (ftell(file) == end)
 			done = 1;
@@ -685,6 +687,7 @@
 				fprintf(stderr,
 					"Error matching regular expression %d in %s\n",
 					r, filename);
+				fclose(file);
 				return -1;
 			} else {
 				buf[match[0].rm_eo] = 0;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f637595..e372f9e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -70,23 +70,6 @@
 
 void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
 {
-	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
-	u32 tmp;
-
-	/* make sure flip is at vb rather than hb */
-	tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
-	tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
-	/* make sure pending bit is asserted */
-	tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
-	WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
-
-	/* set pageflip to happen as late as possible in the vblank interval.
-	 * same field for crtc1/2
-	 */
-	tmp = RREG32(RADEON_CRTC_GEN_CNTL);
-	tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
-	WREG32(RADEON_CRTC_GEN_CNTL, tmp);
-
 	/* enable the pflip int */
 	radeon_irq_kms_pflip_irq_get(rdev, crtc);
 }
@@ -1031,8 +1014,8 @@
 	WREG32(RADEON_CP_CSQ_MODE,
 	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
 	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
-	WREG32(0x718, 0);
-	WREG32(0x744, 0x00004D4D);
+	WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
+	WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
 	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
 	radeon_ring_start(rdev);
 	r = radeon_ring_test(rdev);
@@ -1041,7 +1024,7 @@
 		return r;
 	}
 	rdev->cp.ready = true;
-	rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 	return 0;
 }
 
@@ -1059,7 +1042,7 @@
 void r100_cp_disable(struct radeon_device *rdev)
 {
 	/* Disable ring */
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	rdev->cp.ready = false;
 	WREG32(RADEON_CP_CSQ_MODE, 0);
 	WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -1427,6 +1410,7 @@
 		}
 		track->zb.robj = reloc->robj;
 		track->zb.offset = idx_value;
+		track->zb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case RADEON_RB3D_COLOROFFSET:
@@ -1439,6 +1423,7 @@
 		}
 		track->cb[0].robj = reloc->robj;
 		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case RADEON_PP_TXOFFSET_0:
@@ -1454,6 +1439,7 @@
 		}
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_OFFSET_T0_0:
 	case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1471,6 +1457,7 @@
 		track->textures[0].cube_info[i].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[0].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_OFFSET_T1_0:
 	case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1488,6 +1475,7 @@
 		track->textures[1].cube_info[i].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[1].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_OFFSET_T2_0:
 	case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1505,9 +1493,12 @@
 		track->textures[2].cube_info[i].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[2].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_RE_WIDTH_HEIGHT:
 		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_COLORPITCH:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1528,9 +1519,11 @@
 		ib[idx] = tmp;
 
 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
 		break;
 	case RADEON_RB3D_DEPTHPITCH:
 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_CNTL:
 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1555,6 +1548,8 @@
 			return -EINVAL;
 		}
 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZSTENCILCNTL:
 		switch (idx_value & 0xf) {
@@ -1572,6 +1567,7 @@
 		default:
 			break;
 		}
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZPASS_ADDR:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1588,6 +1584,7 @@
 			uint32_t temp = idx_value >> 4;
 			for (i = 0; i < track->num_texture; i++)
 				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
 		}
 		break;
 	case RADEON_SE_VF_CNTL:
@@ -1602,12 +1599,14 @@
 		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_TEX_PITCH_0:
 	case RADEON_PP_TEX_PITCH_1:
 	case RADEON_PP_TEX_PITCH_2:
 		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
 		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_TXFILTER_0:
 	case RADEON_PP_TXFILTER_1:
@@ -1621,6 +1620,7 @@
 		tmp = (idx_value >> 27) & 0x7;
 		if (tmp == 2 || tmp == 6)
 			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_TXFORMAT_0:
 	case RADEON_PP_TXFORMAT_1:
@@ -1673,6 +1673,7 @@
 		}
 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_FACES_0:
 	case RADEON_PP_CUBIC_FACES_1:
@@ -1683,6 +1684,7 @@
 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
 		}
+		track->tex_dirty = true;
 		break;
 	default:
 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -2086,12 +2088,13 @@
 {
 	struct r100_mc_save save;
 	u32 status, tmp;
+	int ret = 0;
 
-	r100_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	if (!G_000E40_GUI_ACTIVE(status)) {
 		return 0;
 	}
+	r100_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
 	/* stop CP */
@@ -2131,11 +2134,11 @@
 		G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
 		dev_err(rdev->dev, "failed to reset GPU\n");
 		rdev->gpu_lockup = true;
-		return -1;
-	}
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
 	r100_mc_resume(rdev, &save);
-	dev_info(rdev->dev, "GPU reset succeed\n");
-	return 0;
+	return ret;
 }
 
 void r100_set_common_regs(struct radeon_device *rdev)
@@ -2309,7 +2312,6 @@
 	/* FIXME we don't use the second aperture yet when we could use it */
 	if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
 		rdev->mc.visible_vram_size = rdev->mc.aper_size;
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
 	config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
 	if (rdev->flags & RADEON_IS_IGP) {
 		uint32_t tom;
@@ -2346,10 +2348,10 @@
 
 	temp = RREG32(RADEON_CONFIG_CNTL);
 	if (state == false) {
-		temp &= ~(1<<8);
-		temp |= (1<<9);
+		temp &= ~RADEON_CFG_VGA_RAM_EN;
+		temp |= RADEON_CFG_VGA_IO_DIS;
 	} else {
-		temp &= ~(1<<9);
+		temp &= ~RADEON_CFG_VGA_IO_DIS;
 	}
 	WREG32(RADEON_CONFIG_CNTL, temp);
 }
@@ -3317,9 +3319,9 @@
 	unsigned long size;
 	unsigned prim_walk;
 	unsigned nverts;
-	unsigned num_cb = track->num_cb;
+	unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
 
-	if (!track->zb_cb_clear && !track->color_channel_mask &&
+	if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
 	    !track->blend_read_enable)
 		num_cb = 0;
 
@@ -3340,7 +3342,9 @@
 			return -EINVAL;
 		}
 	}
-	if (track->z_enabled) {
+	track->cb_dirty = false;
+
+	if (track->zb_dirty && track->z_enabled) {
 		if (track->zb.robj == NULL) {
 			DRM_ERROR("[drm] No buffer for z buffer !\n");
 			return -EINVAL;
@@ -3357,6 +3361,28 @@
 			return -EINVAL;
 		}
 	}
+	track->zb_dirty = false;
+
+	if (track->aa_dirty && track->aaresolve) {
+		if (track->aa.robj == NULL) {
+			DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+			return -EINVAL;
+		}
+		/* I believe the format comes from colorbuffer0. */
+		size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+		size += track->aa.offset;
+		if (size > radeon_bo_size(track->aa.robj)) {
+			DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+				  "(need %lu have %lu) !\n", i, size,
+				  radeon_bo_size(track->aa.robj));
+			DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+				  i, track->aa.pitch, track->cb[0].cpp,
+				  track->aa.offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->aa_dirty = false;
+
 	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
 	if (track->vap_vf_cntl & (1 << 14)) {
 		nverts = track->vap_alt_nverts;
@@ -3416,13 +3442,23 @@
 			  prim_walk);
 		return -EINVAL;
 	}
-	return r100_cs_track_texture_check(rdev, track);
+
+	if (track->tex_dirty) {
+		track->tex_dirty = false;
+		return r100_cs_track_texture_check(rdev, track);
+	}
+	return 0;
 }
 
 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
 {
 	unsigned i, face;
 
+	track->cb_dirty = true;
+	track->zb_dirty = true;
+	track->tex_dirty = true;
+	track->aa_dirty = true;
+
 	if (rdev->family < CHIP_R300) {
 		track->num_cb = 1;
 		if (rdev->family <= CHIP_RS200)
@@ -3436,6 +3472,8 @@
 		track->num_texture = 16;
 		track->maxy = 4096;
 		track->separate_cube = 0;
+		track->aaresolve = false;
+		track->aa.robj = NULL;
 	}
 
 	for (i = 0; i < track->num_cb; i++) {
@@ -3521,7 +3559,7 @@
 	if (i < rdev->usec_timeout) {
 		DRM_INFO("ring test succeeded in %d usecs\n", i);
 	} else {
-		DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
+		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);
 		r = -EINVAL;
 	}
@@ -3745,8 +3783,6 @@
 	r100_mc_program(rdev);
 	/* Resume clock */
 	r100_clock_startup(rdev);
-	/* Initialize GPU configuration (# pipes, ...) */
-//	r100_gpu_init(rdev);
 	/* Initialize GART (initialize after TTM so we can allocate
 	 * memory through TTM but finalize after TTM) */
 	r100_enable_bm(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index af65600..2fef9de 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -52,14 +52,7 @@
 	unsigned                compress_format;
 };
 
-struct r100_cs_track_limits {
-	unsigned num_cb;
-	unsigned num_texture;
-	unsigned max_levels;
-};
-
 struct r100_cs_track {
-	struct radeon_device *rdev;
 	unsigned			num_cb;
 	unsigned                        num_texture;
 	unsigned			maxy;
@@ -73,11 +66,17 @@
 	struct r100_cs_track_array	arrays[11];
 	struct r100_cs_track_cb 	cb[R300_MAX_CB];
 	struct r100_cs_track_cb 	zb;
+	struct r100_cs_track_cb 	aa;
 	struct r100_cs_track_texture	textures[R300_TRACK_MAX_TEXTURE];
 	bool				z_enabled;
 	bool                            separate_cube;
 	bool				zb_cb_clear;
 	bool				blend_read_enable;
+	bool				cb_dirty;
+	bool				zb_dirty;
+	bool				tex_dirty;
+	bool				aa_dirty;
+	bool				aaresolve;
 };
 
 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index d2408c3..f240583 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -184,6 +184,7 @@
 		}
 		track->zb.robj = reloc->robj;
 		track->zb.offset = idx_value;
+		track->zb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case RADEON_RB3D_COLOROFFSET:
@@ -196,6 +197,7 @@
 		}
 		track->cb[0].robj = reloc->robj;
 		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case R200_PP_TXOFFSET_0:
@@ -214,6 +216,7 @@
 		}
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_CUBIC_OFFSET_F1_0:
 	case R200_PP_CUBIC_OFFSET_F2_0:
@@ -257,9 +260,12 @@
 		track->textures[i].cube_info[face - 1].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[i].cube_info[face - 1].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_RE_WIDTH_HEIGHT:
 		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_COLORPITCH:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -280,9 +286,11 @@
 		ib[idx] = tmp;
 
 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
 		break;
 	case RADEON_RB3D_DEPTHPITCH:
 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_CNTL:
 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -312,6 +320,8 @@
 		}
 
 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZSTENCILCNTL:
 		switch (idx_value & 0xf) {
@@ -329,6 +339,7 @@
 		default:
 			break;
 		}
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZPASS_ADDR:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -345,6 +356,7 @@
 			uint32_t temp = idx_value >> 4;
 			for (i = 0; i < track->num_texture; i++)
 				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
 		}
 		break;
 	case RADEON_SE_VF_CNTL:
@@ -369,6 +381,7 @@
 		i = (reg - R200_PP_TXSIZE_0) / 32;
 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXPITCH_0:
 	case R200_PP_TXPITCH_1:
@@ -378,6 +391,7 @@
 	case R200_PP_TXPITCH_5:
 		i = (reg - R200_PP_TXPITCH_0) / 32;
 		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXFILTER_0:
 	case R200_PP_TXFILTER_1:
@@ -394,6 +408,7 @@
 		tmp = (idx_value >> 27) & 0x7;
 		if (tmp == 2 || tmp == 6)
 			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXMULTI_CTL_0:
 	case R200_PP_TXMULTI_CTL_1:
@@ -432,6 +447,7 @@
 			track->textures[i].tex_coord_type = 1;
 			break;
 		}
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXFORMAT_0:
 	case R200_PP_TXFORMAT_1:
@@ -488,6 +504,7 @@
 		}
 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
 		break;
 	case R200_PP_CUBIC_FACES_0:
 	case R200_PP_CUBIC_FACES_1:
@@ -501,6 +518,7 @@
 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
 		}
+		track->tex_dirty = true;
 		break;
 	default:
 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fae5e70..069efa8 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -69,6 +69,9 @@
 	mb();
 }
 
+#define R300_PTE_WRITEABLE (1 << 2)
+#define R300_PTE_READABLE  (1 << 3)
+
 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
 	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
@@ -78,7 +81,7 @@
 	}
 	addr = (lower_32_bits(addr) >> 8) |
 	       ((upper_32_bits(addr) & 0xff) << 24) |
-	       0xc;
+	       R300_PTE_WRITEABLE | R300_PTE_READABLE;
 	/* on x86 we want this to be CPU endian, on powerpc
 	 * on powerpc without HW swappers, it'll get swapped on way
 	 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -135,7 +138,7 @@
 	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
 	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
 	/* Clear error */
-	WREG32_PCIE(0x18, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
 	tmp |= RADEON_PCIE_TX_GART_EN;
 	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
@@ -405,12 +408,13 @@
 {
 	struct r100_mc_save save;
 	u32 status, tmp;
+	int ret = 0;
 
-	r100_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	if (!G_000E40_GUI_ACTIVE(status)) {
 		return 0;
 	}
+	r100_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
 	/* stop CP */
@@ -451,11 +455,11 @@
 	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
 		dev_err(rdev->dev, "failed to reset GPU\n");
 		rdev->gpu_lockup = true;
-		return -1;
-	}
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
 	r100_mc_resume(rdev, &save);
-	dev_info(rdev->dev, "GPU reset succeed\n");
-	return 0;
+	return ret;
 }
 
 /*
@@ -663,6 +667,7 @@
 		}
 		track->cb[i].robj = reloc->robj;
 		track->cb[i].offset = idx_value;
+		track->cb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case R300_ZB_DEPTHOFFSET:
@@ -675,6 +680,7 @@
 		}
 		track->zb.robj = reloc->robj;
 		track->zb.offset = idx_value;
+		track->zb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case R300_TX_OFFSET_0:
@@ -713,6 +719,7 @@
 		tmp |= tile_flags;
 		ib[idx] = tmp;
 		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	/* Tracked registers */
 	case 0x2084:
@@ -739,6 +746,8 @@
 		if (p->rdev->family < CHIP_RV515) {
 			track->maxy -= 1440;
 		}
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case 0x4E00:
 		/* RB3D_CCTL */
@@ -748,6 +757,7 @@
 			return -EINVAL;
 		}
 		track->num_cb = ((idx_value >> 5) & 0x3) + 1;
+		track->cb_dirty = true;
 		break;
 	case 0x4E38:
 	case 0x4E3C:
@@ -810,6 +820,7 @@
 				  ((idx_value >> 21) & 0xF));
 			return -EINVAL;
 		}
+		track->cb_dirty = true;
 		break;
 	case 0x4F00:
 		/* ZB_CNTL */
@@ -818,6 +829,7 @@
 		} else {
 			track->z_enabled = false;
 		}
+		track->zb_dirty = true;
 		break;
 	case 0x4F10:
 		/* ZB_FORMAT */
@@ -834,6 +846,7 @@
 				  (idx_value & 0xF));
 			return -EINVAL;
 		}
+		track->zb_dirty = true;
 		break;
 	case 0x4F24:
 		/* ZB_DEPTHPITCH */
@@ -857,14 +870,17 @@
 		ib[idx] = tmp;
 
 		track->zb.pitch = idx_value & 0x3FFC;
+		track->zb_dirty = true;
 		break;
 	case 0x4104:
+		/* TX_ENABLE */
 		for (i = 0; i < 16; i++) {
 			bool enabled;
 
 			enabled = !!(idx_value & (1 << i));
 			track->textures[i].enabled = enabled;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x44C0:
 	case 0x44C4:
@@ -894,6 +910,7 @@
 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_X16:
+		case R300_TX_FORMAT_FL_I16:
 		case R300_TX_FORMAT_Y8X8:
 		case R300_TX_FORMAT_Z5Y6X5:
 		case R300_TX_FORMAT_Z6Y5X5:
@@ -906,6 +923,7 @@
 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_Y16X16:
+		case R300_TX_FORMAT_FL_I16A16:
 		case R300_TX_FORMAT_Z11Y11X10:
 		case R300_TX_FORMAT_Z10Y11X11:
 		case R300_TX_FORMAT_W8Z8Y8X8:
@@ -947,8 +965,8 @@
 			DRM_ERROR("Invalid texture format %u\n",
 				  (idx_value & 0x1F));
 			return -EINVAL;
-			break;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x4400:
 	case 0x4404:
@@ -976,6 +994,7 @@
 		if (tmp == 2 || tmp == 4 || tmp == 6) {
 			track->textures[i].roundup_h = false;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x4500:
 	case 0x4504:
@@ -1013,6 +1032,7 @@
 			DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
 			return -EINVAL;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x4480:
 	case 0x4484:
@@ -1042,6 +1062,7 @@
 		track->textures[i].use_pitch = !!tmp;
 		tmp = (idx_value >> 22) & 0xF;
 		track->textures[i].txdepth = tmp;
+		track->tex_dirty = true;
 		break;
 	case R300_ZB_ZPASS_ADDR:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1056,6 +1077,7 @@
 	case 0x4e0c:
 		/* RB3D_COLOR_CHANNEL_MASK */
 		track->color_channel_mask = idx_value;
+		track->cb_dirty = true;
 		break;
 	case 0x43a4:
 		/* SC_HYPERZ_EN */
@@ -1069,6 +1091,8 @@
 	case 0x4f1c:
 		/* ZB_BW_CNTL */
 		track->zb_cb_clear = !!(idx_value & (1 << 5));
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		if (p->rdev->hyperz_filp != p->filp) {
 			if (idx_value & (R300_HIZ_ENABLE |
 					 R300_RD_COMP_ENABLE |
@@ -1080,8 +1104,28 @@
 	case 0x4e04:
 		/* RB3D_BLENDCNTL */
 		track->blend_read_enable = !!(idx_value & (1 << 2));
+		track->cb_dirty = true;
 		break;
-	case 0x4f28: /* ZB_DEPTHCLEARVALUE */
+	case R300_RB3D_AARESOLVE_OFFSET:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->aa.robj = reloc->robj;
+		track->aa.offset = idx_value;
+		track->aa_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R300_RB3D_AARESOLVE_PITCH:
+		track->aa.pitch = idx_value & 0x3FFE;
+		track->aa_dirty = true;
+		break;
+	case R300_RB3D_AARESOLVE_CTL:
+		track->aaresolve = idx_value & 0x1;
+		track->aa_dirty = true;
 		break;
 	case 0x4f30: /* ZB_MASK_OFFSET */
 	case 0x4f34: /* ZB_ZMASK_PITCH */
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1a0d536..f0bce39 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1371,6 +1371,8 @@
 #define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
 #define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
 
+#define R300_RB3D_AARESOLVE_OFFSET          0x4E80
+#define R300_RB3D_AARESOLVE_PITCH           0x4E84
 #define R300_RB3D_AARESOLVE_CTL             0x4E88
 /* gap */
 
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c387346..0b59ed7 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -96,7 +96,7 @@
 		       "programming pipes. Bad things might happen.\n");
 	}
 	/* get max number of pipes */
-	gb_pipe_select = RREG32(0x402C);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
 	num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
 
 	/* SE chips have 1 pipe */
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c8677f..2ce80d9 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -79,8 +79,8 @@
 		WREG32(0x4128, 0xFF);
 	}
 	r420_pipes_init(rdev);
-	gb_pipe_select = RREG32(0x402C);
-	tmp = RREG32(0x170C);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
 	pipe_select_current = (tmp >> 2) & 3;
 	tmp = (1 << pipe_select_current) |
 	      (((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6b50716..9b3fad2 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,12 +97,16 @@
 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 
 /* get temperature in millidegrees */
-u32 rv6xx_get_temp(struct radeon_device *rdev)
+int rv6xx_get_temp(struct radeon_device *rdev)
 {
 	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
 		ASIC_T_SHIFT;
+	int actual_temp = temp & 0xff;
 
-	return temp * 1000;
+	if (temp & 0x100)
+		actual_temp -= 256;
+
+	return actual_temp * 1000;
 }
 
 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -1251,7 +1255,6 @@
 	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
 	r600_vram_gtt_location(rdev, &rdev->mc);
 
 	if (rdev->flags & RADEON_IS_IGP) {
@@ -1287,6 +1290,9 @@
 			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
 	u32 tmp;
 
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		return 0;
+
 	dev_info(rdev->dev, "GPU softreset \n");
 	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
 		RREG32(R_008010_GRBM_STATUS));
@@ -1930,7 +1936,7 @@
  */
 void r600_cp_stop(struct radeon_device *rdev)
 {
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
 	WREG32(SCRATCH_UMSK, 0);
 }
@@ -2098,7 +2104,11 @@
 
 	r600_cp_stop(rdev);
 
-	WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
 	/* Reset cp */
 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2185,7 +2195,11 @@
 	WREG32(CP_RB_WPTR, 0);
 
 	/* set the wb address whether it's enabled or not */
-	WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+	       RB_RPTR_SWAP(2) |
+#endif
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
@@ -2358,24 +2372,6 @@
 	/* FIXME: implement */
 }
 
-
-bool r600_card_posted(struct radeon_device *rdev)
-{
-	uint32_t reg;
-
-	/* first check CRTCs */
-	reg = RREG32(D1CRTC_CONTROL) |
-		RREG32(D2CRTC_CONTROL);
-	if (reg & CRTC_EN)
-		return true;
-
-	/* then check MEM_SIZE, in case the crtcs are off */
-	if (RREG32(CONFIG_MEMSIZE))
-		return true;
-
-	return false;
-}
-
 int r600_startup(struct radeon_device *rdev)
 {
 	int r;
@@ -2536,7 +2532,7 @@
 	if (r)
 		return r;
 	/* Post card if necessary */
-	if (!r600_card_posted(rdev)) {
+	if (!radeon_card_posted(rdev)) {
 		if (!rdev->bios) {
 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
@@ -2639,7 +2635,11 @@
 {
 	/* FIXME: implement */
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-	radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
 	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
 	radeon_ring_write(rdev, ib->length_dw);
 }
@@ -3308,8 +3308,8 @@
 	while (rptr != wptr) {
 		/* wptr/rptr are in bytes! */
 		ring_index = rptr / 4;
-		src_id =  rdev->ih.ring[ring_index] & 0xff;
-		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
 
 		switch (src_id) {
 		case 1: /* D1 vblank/vline */
@@ -3658,6 +3658,9 @@
 	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
 	u16 link_cntl2;
 
+	if (radeon_pcie_gen2 == 0)
+		return;
+
 	if (rdev->flags & RADEON_IS_IGP)
 		return;
 
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index ca5c29f..7f10434 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -137,9 +137,9 @@
 	ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
 
 	for (i = 0; i < r6xx_vs_size; i++)
-		vs[i] = r6xx_vs[i];
+		vs[i] = cpu_to_le32(r6xx_vs[i]);
 	for (i = 0; i < r6xx_ps_size; i++)
-		ps[i] = r6xx_ps[i];
+		ps[i] = cpu_to_le32(r6xx_ps[i]);
 
 	dev_priv->blit_vb->used = 512;
 
@@ -192,6 +192,9 @@
 	DRM_DEBUG("\n");
 
 	sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
 
 	BEGIN_RING(9);
 	OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -291,7 +294,11 @@
 	OUT_RING(DI_PT_RECTLIST);
 
 	OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+#ifdef __BIG_ENDIAN
+	OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
+#else
 	OUT_RING(DI_INDEX_SIZE_16_BIT);
+#endif
 
 	OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
 	OUT_RING(1);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 86e5aa0..df68d91 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -54,7 +54,7 @@
 	if (h < 8)
 		h = 8;
 
-	cb_color_info = ((format << 2) | (1 << 27));
+	cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
 	pitch = (w / 8) - 1;
 	slice = ((w * h) / 64) - 1;
 
@@ -165,6 +165,9 @@
 	u32 sq_vtx_constant_word2;
 
 	sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
 	radeon_ring_write(rdev, 0x460);
@@ -199,7 +202,7 @@
 	if (h < 1)
 		h = 1;
 
-	sq_tex_resource_word0 = (1 << 0);
+	sq_tex_resource_word0 = (1 << 0) | (1 << 3);
 	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
 				  ((w - 1) << 19));
 
@@ -253,7 +256,11 @@
 	radeon_ring_write(rdev, DI_PT_RECTLIST);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-	radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
 	radeon_ring_write(rdev, 1);
@@ -424,7 +431,11 @@
 	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-	radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (gpu_addr & 0xFFFFFFFC));
 	radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
 	radeon_ring_write(rdev, dwords);
 
@@ -467,7 +478,7 @@
 int r600_blit_init(struct radeon_device *rdev)
 {
 	u32 obj_size;
-	int r, dwords;
+	int i, r, dwords;
 	void *ptr;
 	u32 packet2s[16];
 	int num_packet2s = 0;
@@ -486,7 +497,7 @@
 
 	dwords = rdev->r600_blit.state_len;
 	while (dwords & 0xf) {
-		packet2s[num_packet2s++] = PACKET2(0);
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
 		dwords++;
 	}
 
@@ -529,8 +540,10 @@
 	if (num_packet2s)
 		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
 			    packet2s, num_packet2s * 4);
-	memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
-	memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
+	for (i = 0; i < r6xx_vs_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
+	for (i = 0; i < r6xx_ps_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
 	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
 	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
@@ -545,7 +558,7 @@
 		dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
 		return r;
 	}
-	rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 	return 0;
 }
 
@@ -553,7 +566,7 @@
 {
 	int r;
 
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	if (rdev->r600_blit.shader_obj == NULL)
 		return;
 	/* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index e8151c1..2d1f6c5 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -684,7 +684,11 @@
 	0x00000000,
 	0x3c000000,
 	0x68cd1000,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
 	0x00080000,
+#endif
 	0x00000000,
 };
 
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 4f4cd8b..c3ab959 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -396,6 +396,9 @@
 	r600_do_cp_stop(dev_priv);
 
 	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
 		     R600_RB_NO_UPDATE |
 		     R600_RB_BLKSZ(15) |
 		     R600_RB_BUFSZ(3));
@@ -486,9 +489,12 @@
 	r600_do_cp_stop(dev_priv);
 
 	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
 		     R600_RB_NO_UPDATE |
-		     (15 << 8) |
-		     (3 << 0));
+		     R600_RB_BLKSZ(15) |
+		     R600_RB_BUFSZ(3));
 
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
 	RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -550,8 +556,12 @@
 
 	if (!dev_priv->writeback_works) {
 		/* Disable writeback to avoid unnecessary bus master transfer */
-		RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) |
-			     RADEON_RB_NO_UPDATE);
+		RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+			     R600_BUF_SWAP_32BIT |
+#endif
+			     RADEON_READ(R600_CP_RB_CNTL) |
+			     R600_RB_NO_UPDATE);
 		RADEON_WRITE(R600_SCRATCH_UMSK, 0);
 	}
 }
@@ -575,7 +585,11 @@
 
 	RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
 	cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
-	RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA);
+	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
+		     R600_RB_RPTR_WR_ENA);
 
 	RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
 	RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1838,7 +1852,10 @@
 			+ dev_priv->gart_vm_start;
 	}
 	RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
-		     rptr_addr & 0xffffffff);
+#ifdef __BIG_ENDIAN
+		     (2 << 0) |
+#endif
+		     (rptr_addr & 0xfffffffc));
 	RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
 		     upper_32_bits(rptr_addr));
 
@@ -1889,7 +1906,7 @@
 	{
 		u64 scratch_addr;
 
-		scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR);
+		scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
 		scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
 		scratch_addr += R600_SCRATCH_REG_OFFSET;
 		scratch_addr >>= 8;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7831e08..153095f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -295,17 +295,18 @@
 	}
 
 	if (!IS_ALIGNED(pitch, pitch_align)) {
-		dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
-			 __func__, __LINE__, pitch);
+		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, array_mode);
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(height, height_align)) {
-		dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
-			 __func__, __LINE__, height);
+		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, height, height_align, array_mode);
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(base_offset, base_align)) {
-		dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+			 base_offset, base_align, array_mode);
 		return -EINVAL;
 	}
 
@@ -320,7 +321,10 @@
 			 * broken userspace.
 			 */
 		} else {
-			dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+			dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
+				 array_mode,
+				 track->cb_color_bo_offset[i], tmp,
+				 radeon_bo_size(track->cb_color_bo[i]));
 			return -EINVAL;
 		}
 	}
@@ -455,17 +459,18 @@
 			}
 
 			if (!IS_ALIGNED(pitch, pitch_align)) {
-				dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
-					 __func__, __LINE__, pitch);
+				dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+					 __func__, __LINE__, pitch, pitch_align, array_mode);
 				return -EINVAL;
 			}
 			if (!IS_ALIGNED(height, height_align)) {
-				dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
-					 __func__, __LINE__, height);
+				dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+					 __func__, __LINE__, height, height_align, array_mode);
 				return -EINVAL;
 			}
 			if (!IS_ALIGNED(base_offset, base_align)) {
-				dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+				dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
+					 base_offset, base_align, array_mode);
 				return -EINVAL;
 			}
 
@@ -473,9 +478,10 @@
 			nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
 			tmp = ntiles * bpe * 64 * nviews;
 			if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
-				dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
-						track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
-						radeon_bo_size(track->db_bo));
+				dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+					 array_mode,
+					 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+					 radeon_bo_size(track->db_bo));
 				return -EINVAL;
 			}
 		}
@@ -1227,18 +1233,18 @@
 	/* XXX check height as well... */
 
 	if (!IS_ALIGNED(pitch, pitch_align)) {
-		dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
-			 __func__, __LINE__, pitch);
+		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(base_offset, base_align)) {
-		dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
-			 __func__, __LINE__, base_offset);
+		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(mip_offset, base_align)) {
-		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
-			 __func__, __LINE__, mip_offset);
+		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 33cda01..f869897 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -81,7 +81,11 @@
 #define R600_MEDIUM_VID_LOWER_GPIO_CNTL                            0x720
 #define R600_LOW_VID_LOWER_GPIO_CNTL                               0x724
 
-
+#define R600_D1GRPH_SWAP_CONTROL                               0x610C
+#       define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
 
 #define R600_HDP_NONSURFACE_BASE                                0x2c04
 
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a5d898b..04bac0b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -154,13 +154,14 @@
 #define		ROQ_IB2_START(x)				((x) << 8)
 #define	CP_RB_BASE					0xC100
 #define	CP_RB_CNTL					0xC104
-#define		RB_BUFSZ(x)					((x)<<0)
-#define		RB_BLKSZ(x)					((x)<<8)
-#define		RB_NO_UPDATE					(1<<27)
-#define		RB_RPTR_WR_ENA					(1<<31)
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
 #define		BUF_SWAP_32BIT					(2 << 16)
 #define	CP_RB_RPTR					0x8700
 #define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
 #define	CP_RB_RPTR_ADDR_HI				0xC110
 #define	CP_RB_RPTR_WR					0xC108
 #define	CP_RB_WPTR					0xC114
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e948663..6b34294 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -92,6 +92,7 @@
 extern int radeon_audio;
 extern int radeon_disp_priority;
 extern int radeon_hw_i2c;
+extern int radeon_pcie_gen2;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -178,10 +179,10 @@
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
 void rs690_pm_info(struct radeon_device *rdev);
-extern u32 rv6xx_get_temp(struct radeon_device *rdev);
-extern u32 rv770_get_temp(struct radeon_device *rdev);
-extern u32 evergreen_get_temp(struct radeon_device *rdev);
-extern u32 sumo_get_temp(struct radeon_device *rdev);
+extern int rv6xx_get_temp(struct radeon_device *rdev);
+extern int rv770_get_temp(struct radeon_device *rdev);
+extern int evergreen_get_temp(struct radeon_device *rdev);
+extern int sumo_get_temp(struct radeon_device *rdev);
 
 /*
  * Fences.
@@ -344,7 +345,6 @@
 	 * about vram size near mc fb location */
 	u64			mc_vram_size;
 	u64			visible_vram_size;
-	u64			active_vram_size;
 	u64			gtt_size;
 	u64			gtt_start;
 	u64			gtt_end;
@@ -811,8 +811,7 @@
 	fixed20_12		sclk;
 	fixed20_12		mclk;
 	fixed20_12		needed_bandwidth;
-	/* XXX: use a define for num power modes */
-	struct radeon_power_state power_state[8];
+	struct radeon_power_state *power_state;
 	/* number of valid power states */
 	int                     num_power_states;
 	int                     current_power_state_index;
@@ -1448,6 +1447,7 @@
 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 extern int radeon_resume_kms(struct drm_device *dev);
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
 /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
 extern bool r600_card_posted(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 3a1b161..793c5e6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -759,7 +759,7 @@
 	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
 	.ring_test = &r600_ring_test,
-	.ring_ib_execute = &r600_ring_ib_execute,
+	.ring_ib_execute = &evergreen_ring_ib_execute,
 	.irq_set = &evergreen_irq_set,
 	.irq_process = &evergreen_irq_process,
 	.get_vblank_counter = &evergreen_get_vblank_counter,
@@ -805,7 +805,7 @@
 	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
 	.ring_test = &r600_ring_test,
-	.ring_ib_execute = &r600_ring_ib_execute,
+	.ring_ib_execute = &evergreen_ring_ib_execute,
 	.irq_set = &evergreen_irq_set,
 	.irq_process = &evergreen_irq_process,
 	.get_vblank_counter = &evergreen_get_vblank_counter,
@@ -834,6 +834,9 @@
 	.pm_finish = &evergreen_pm_finish,
 	.pm_init_profile = &rs780_pm_init_profile,
 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &evergreen_pre_page_flip,
+	.page_flip = &evergreen_page_flip,
+	.post_page_flip = &evergreen_post_page_flip,
 };
 
 static struct radeon_asic btc_asic = {
@@ -848,7 +851,7 @@
 	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
 	.ring_test = &r600_ring_test,
-	.ring_ib_execute = &r600_ring_ib_execute,
+	.ring_ib_execute = &evergreen_ring_ib_execute,
 	.irq_set = &evergreen_irq_set,
 	.irq_process = &evergreen_irq_process,
 	.get_vblank_counter = &evergreen_get_vblank_counter,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e01f0771..c59bd98 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -355,6 +355,7 @@
 bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
 int evergreen_asic_reset(struct radeon_device *rdev);
 void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_copy_blit(struct radeon_device *rdev,
 			uint64_t src_offset, uint64_t dst_offset,
 			unsigned num_pages, struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1573202..02d5c41 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -88,7 +88,7 @@
 			/* some evergreen boards have bad data for this entry */
 			if (ASIC_IS_DCE4(rdev)) {
 				if ((i == 7) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1936) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
 				    (gpio->sucI2cId.ucAccess == 0)) {
 					gpio->sucI2cId.ucAccess = 0x97;
 					gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@
 			/* some DCE3 boards have bad data for this entry */
 			if (ASIC_IS_DCE3(rdev)) {
 				if ((i == 4) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
 				    (gpio->sucI2cId.ucAccess == 0x94))
 					gpio->sucI2cId.ucAccess = 0x14;
 			}
@@ -172,7 +172,7 @@
 			/* some evergreen boards have bad data for this entry */
 			if (ASIC_IS_DCE4(rdev)) {
 				if ((i == 7) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1936) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
 				    (gpio->sucI2cId.ucAccess == 0)) {
 					gpio->sucI2cId.ucAccess = 0x97;
 					gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@
 			/* some DCE3 boards have bad data for this entry */
 			if (ASIC_IS_DCE3(rdev)) {
 				if ((i == 4) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
 				    (gpio->sucI2cId.ucAccess == 0x94))
 					gpio->sucI2cId.ucAccess = 0x14;
 			}
@@ -252,7 +252,7 @@
 			pin = &gpio_info->asGPIO_Pin[i];
 			if (id == pin->ucGPIO_ID) {
 				gpio.id = pin->ucGPIO_ID;
-				gpio.reg = pin->usGpioPin_AIndex * 4;
+				gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
 				gpio.mask = (1 << pin->ucGpioPinBitShift);
 				gpio.valid = true;
 				break;
@@ -387,15 +387,11 @@
 			*line_mux = 0x90;
 	}
 
-	/* mac rv630 */
-	if ((dev->pdev->device == 0x9588) &&
-	    (dev->pdev->subsystem_vendor == 0x106b) &&
-	    (dev->pdev->subsystem_device == 0x00a6)) {
-		if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
-		    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
-			*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
-			*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
-		}
+	/* mac rv630, rv730, others */
+	if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+	    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+		*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+		*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
 	}
 
 	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -1167,16 +1163,6 @@
 				p1pll->pll_out_min = 64800;
 			else
 				p1pll->pll_out_min = 20000;
-		} else if (p1pll->pll_out_min > 64800) {
-			/* Limiting the pll output range is a good thing generally as
-			 * it limits the number of possible pll combinations for a given
-			 * frequency presumably to the ones that work best on each card.
-			 * However, certain duallink DVI monitors seem to like
-			 * pll combinations that would be limited by this at least on
-			 * pre-DCE 3.0 r6xx hardware.  This might need to be adjusted per
-			 * family.
-			 */
-			p1pll->pll_out_min = 64800;
 		}
 
 		p1pll->pll_in_min =
@@ -1288,11 +1274,11 @@
 				      data_offset);
 		switch (crev) {
 		case 1:
-			if (igp_info->info.ulBootUpMemoryClock)
+			if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
 				return true;
 			break;
 		case 2:
-			if (igp_info->info_2.ulBootUpSidePortClock)
+			if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
 				return true;
 			break;
 		default:
@@ -1456,7 +1442,7 @@
 
 			for (i = 0; i < num_indices; i++) {
 				if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
+				    (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
 					ss->percentage =
 						le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1470,7 +1456,7 @@
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
 			for (i = 0; i < num_indices; i++) {
 				if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
+				    (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
 					ss->percentage =
 						le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1484,7 +1470,7 @@
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
 			for (i = 0; i < num_indices; i++) {
 				if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
+				    (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
 					ss->percentage =
 						le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1567,8 +1553,8 @@
 		if (misc & ATOM_DOUBLE_CLOCK_MODE)
 			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
 
-		lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
-		lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
+		lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+		lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
 
 		/* set crtc values */
 		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1583,13 +1569,13 @@
 			lvds->linkb = false;
 
 		/* parse the lcd record table */
-		if (lvds_info->info.usModePatchTableOffset) {
+		if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
 			ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
 			ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
 			bool bad_record = false;
 			u8 *record = (u8 *)(mode_info->atom_context->bios +
 					    data_offset +
-					    lvds_info->info.usModePatchTableOffset);
+					    le16_to_cpu(lvds_info->info.usModePatchTableOffset));
 			while (*record != ATOM_RECORD_END_TYPE) {
 				switch (*record) {
 				case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -1991,6 +1977,9 @@
 	num_modes = power_info->info.ucNumOfPowerModeEntries;
 	if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
 		num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
 	/* last mode is usually default, array is low to high */
 	for (i = 0; i < num_modes; i++) {
 		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
@@ -2200,7 +2189,7 @@
 		firmware_info =
 			(union firmware_info *)(mode_info->atom_context->bios +
 						data_offset);
-		vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+		vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
 	}
 
 	return vddc;
@@ -2295,7 +2284,7 @@
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
 			VOLTAGE_SW;
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-			clock_info->evergreen.usVDDC;
+			le16_to_cpu(clock_info->evergreen.usVDDC);
 	} else {
 		sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
 		sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2306,7 +2295,7 @@
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
 			VOLTAGE_SW;
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-			clock_info->r600.usVDDC;
+			le16_to_cpu(clock_info->r600.usVDDC);
 	}
 
 	if (rdev->flags & RADEON_IS_IGP) {
@@ -2342,6 +2331,10 @@
 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
 	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+				       power_info->pplib.ucNumStates, GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
 	/* first mode is usually default, followed by low to high */
 	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
 		mode_index = 0;
@@ -2415,13 +2408,17 @@
 	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
 	state_array = (struct StateArray *)
 		(mode_info->atom_context->bios + data_offset +
-		 power_info->pplib.usStateArrayOffset);
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
 	clock_info_array = (struct ClockInfoArray *)
 		(mode_info->atom_context->bios + data_offset +
-		 power_info->pplib.usClockInfoArrayOffset);
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
 	non_clock_info_array = (struct NonClockInfoArray *)
 		(mode_info->atom_context->bios + data_offset +
-		 power_info->pplib.usNonClockInfoArrayOffset);
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+				       state_array->ucNumEntries, GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
 	for (i = 0; i < state_array->ucNumEntries; i++) {
 		mode_index = 0;
 		power_state = (union pplib_power_state *)&state_array->states[i];
@@ -2495,19 +2492,22 @@
 			break;
 		}
 	} else {
-		/* add the default mode */
-		rdev->pm.power_state[state_index].type =
-			POWER_STATE_TYPE_DEFAULT;
-		rdev->pm.power_state[state_index].num_clock_modes = 1;
-		rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
-		rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
-		rdev->pm.power_state[state_index].default_clock_mode =
-			&rdev->pm.power_state[state_index].clock_info[0];
-		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
-		rdev->pm.power_state[state_index].pcie_lanes = 16;
-		rdev->pm.default_power_state_index = state_index;
-		rdev->pm.power_state[state_index].flags = 0;
-		state_index++;
+		rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+		if (rdev->pm.power_state) {
+			/* add the default mode */
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_DEFAULT;
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+			rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+			rdev->pm.power_state[state_index].default_clock_mode =
+				&rdev->pm.power_state[state_index].clock_info[0];
+			rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+			rdev->pm.power_state[state_index].pcie_lanes = 16;
+			rdev->pm.default_power_state_index = state_index;
+			rdev->pm.power_state[state_index].flags = 0;
+			state_index++;
+		}
 	}
 
 	rdev->pm.num_power_states = state_index;
@@ -2533,7 +2533,7 @@
 	int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-	return args.ulReturnEngineClock;
+	return le32_to_cpu(args.ulReturnEngineClock);
 }
 
 uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2542,7 +2542,7 @@
 	int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-	return args.ulReturnMemoryClock;
+	return le32_to_cpu(args.ulReturnMemoryClock);
 }
 
 void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2551,7 +2551,7 @@
 	SET_ENGINE_CLOCK_PS_ALLOCATION args;
 	int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
 
-	args.ulTargetEngineClock = eng_clock;	/* 10 khz */
+	args.ulTargetEngineClock = cpu_to_le32(eng_clock);	/* 10 khz */
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
@@ -2565,7 +2565,7 @@
 	if (rdev->flags & RADEON_IS_IGP)
 		return;
 
-	args.ulTargetMemoryClock = mem_clock;	/* 10 khz */
+	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
@@ -2623,7 +2623,7 @@
 	bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
 
 	/* tell the bios not to handle mode switching */
-	bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
+	bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
 
 	if (rdev->family >= CHIP_R600) {
 		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
@@ -2674,10 +2674,13 @@
 	else
 		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
 
-	if (lock)
+	if (lock) {
 		bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
-	else
+		bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+	} else {
 		bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch |= ATOM_S6_ACC_MODE;
+	}
 
 	if (rdev->family >= CHIP_R600)
 		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 591fcae..cf7c8d5 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1504,6 +1504,11 @@
 			   (rdev->pdev->subsystem_device == 0x4a48)) {
 			/* Mac X800 */
 			rdev->mode_info.connector_table = CT_MAC_X800;
+		} else if ((rdev->pdev->device == 0x4150) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4150)) {
+			/* Mac G5 9600 */
+			rdev->mode_info.connector_table = CT_MAC_G5_9600;
 		} else
 #endif /* CONFIG_PPC_PMAC */
 #ifdef CONFIG_PPC64
@@ -2022,6 +2027,48 @@
 					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
 					    &hpd);
 		break;
+	case CT_MAC_G5_9600:
+		DRM_INFO("Connector Table: %d (mac g5 9600)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* ADC - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		break;
 	default:
 		DRM_INFO("Connector table: %d (invalid)\n",
 			 rdev->mode_info.connector_table);
@@ -2442,6 +2489,17 @@
 
 	rdev->pm.default_power_state_index = -1;
 
+	/* allocate 2 power states */
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
+	if (!rdev->pm.power_state) {
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.num_power_states = 0;
+
+		rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+		rdev->pm.current_clock_mode_index = 0;
+		return;
+	}
+
 	if (rdev->flags & RADEON_IS_MOBILITY) {
 		offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
 		if (offset) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 26091d6..4954e2d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -891,9 +891,9 @@
 		pci_disable_device(dev->pdev);
 		pci_set_power_state(dev->pdev, PCI_D3hot);
 	}
-	acquire_console_sem();
+	console_lock();
 	radeon_fbdev_set_suspend(rdev, 1);
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 
@@ -905,11 +905,11 @@
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	pci_set_power_state(dev->pdev, PCI_D0);
 	pci_restore_state(dev->pdev);
 	if (pci_enable_device(dev->pdev)) {
-		release_console_sem();
+		console_unlock();
 		return -1;
 	}
 	pci_set_master(dev->pdev);
@@ -920,7 +920,7 @@
 	radeon_restore_bios_scratch_regs(rdev);
 
 	radeon_fbdev_set_suspend(rdev, 0);
-	release_console_sem();
+	console_unlock();
 
 	/* reset hpd state */
 	radeon_hpd_init(rdev);
@@ -936,8 +936,11 @@
 int radeon_gpu_reset(struct radeon_device *rdev)
 {
 	int r;
+	int resched;
 
 	radeon_save_bios_scratch_regs(rdev);
+	/* block TTM */
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
 	radeon_suspend(rdev);
 
 	r = radeon_asic_reset(rdev);
@@ -946,6 +949,7 @@
 		radeon_resume(rdev);
 		radeon_restore_bios_scratch_regs(rdev);
 		drm_helper_resume_force_mode(rdev->ddev);
+		ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
 		return 0;
 	}
 	/* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d26dabf..3e7e7f9e 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -780,6 +780,125 @@
 	return ret;
 }
 
+/* avivo */
+static void avivo_get_fb_div(struct radeon_pll *pll,
+			     u32 target_clock,
+			     u32 post_div,
+			     u32 ref_div,
+			     u32 *fb_div,
+			     u32 *frac_fb_div)
+{
+	u32 tmp = post_div * ref_div;
+
+	tmp *= target_clock;
+	*fb_div = tmp / pll->reference_freq;
+	*frac_fb_div = tmp % pll->reference_freq;
+
+        if (*fb_div > pll->max_feedback_div)
+		*fb_div = pll->max_feedback_div;
+        else if (*fb_div < pll->min_feedback_div)
+                *fb_div = pll->min_feedback_div;
+}
+
+static u32 avivo_get_post_div(struct radeon_pll *pll,
+			      u32 target_clock)
+{
+	u32 vco, post_div, tmp;
+
+	if (pll->flags & RADEON_PLL_USE_POST_DIV)
+		return pll->post_div;
+
+	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+		if (pll->flags & RADEON_PLL_IS_LCD)
+			vco = pll->lcd_pll_out_min;
+		else
+			vco = pll->pll_out_min;
+	} else {
+		if (pll->flags & RADEON_PLL_IS_LCD)
+			vco = pll->lcd_pll_out_max;
+		else
+			vco = pll->pll_out_max;
+	}
+
+	post_div = vco / target_clock;
+	tmp = vco % target_clock;
+
+	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+		if (tmp)
+			post_div++;
+	} else {
+		if (!tmp)
+			post_div--;
+	}
+
+	if (post_div > pll->max_post_div)
+		post_div = pll->max_post_div;
+	else if (post_div < pll->min_post_div)
+		post_div = pll->min_post_div;
+
+	return post_div;
+}
+
+#define MAX_TOLERANCE 10
+
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+			      u32 freq,
+			      u32 *dot_clock_p,
+			      u32 *fb_div_p,
+			      u32 *frac_fb_div_p,
+			      u32 *ref_div_p,
+			      u32 *post_div_p)
+{
+	u32 target_clock = freq / 10;
+	u32 post_div = avivo_get_post_div(pll, target_clock);
+	u32 ref_div = pll->min_ref_div;
+	u32 fb_div = 0, frac_fb_div = 0, tmp;
+
+	if (pll->flags & RADEON_PLL_USE_REF_DIV)
+		ref_div = pll->reference_div;
+
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+		avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
+		frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
+		if (frac_fb_div >= 5) {
+			frac_fb_div -= 5;
+			frac_fb_div = frac_fb_div / 10;
+			frac_fb_div++;
+		}
+		if (frac_fb_div >= 10) {
+			fb_div++;
+			frac_fb_div = 0;
+		}
+	} else {
+		while (ref_div <= pll->max_ref_div) {
+			avivo_get_fb_div(pll, target_clock, post_div, ref_div,
+					 &fb_div, &frac_fb_div);
+			if (frac_fb_div >= (pll->reference_freq / 2))
+				fb_div++;
+			frac_fb_div = 0;
+			tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
+			tmp = (tmp * 10000) / target_clock;
+
+			if (tmp > (10000 + MAX_TOLERANCE))
+				ref_div++;
+			else if (tmp >= (10000 - MAX_TOLERANCE))
+				break;
+			else
+				ref_div++;
+		}
+	}
+
+	*dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
+		(ref_div * post_div * 10);
+	*fb_div_p = fb_div;
+	*frac_fb_div_p = frac_fb_div;
+	*ref_div_p = ref_div;
+	*post_div_p = post_div;
+	DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
+}
+
+/* pre-avivo */
 static inline uint32_t radeon_div(uint64_t n, uint32_t d)
 {
 	uint64_t mod;
@@ -790,13 +909,13 @@
 	return n;
 }
 
-void radeon_compute_pll(struct radeon_pll *pll,
-			uint64_t freq,
-			uint32_t *dot_clock_p,
-			uint32_t *fb_div_p,
-			uint32_t *frac_fb_div_p,
-			uint32_t *ref_div_p,
-			uint32_t *post_div_p)
+void radeon_compute_pll_legacy(struct radeon_pll *pll,
+			       uint64_t freq,
+			       uint32_t *dot_clock_p,
+			       uint32_t *fb_div_p,
+			       uint32_t *frac_fb_div_p,
+			       uint32_t *ref_div_p,
+			       uint32_t *post_div_p)
 {
 	uint32_t min_ref_div = pll->min_ref_div;
 	uint32_t max_ref_div = pll->max_ref_div;
@@ -826,6 +945,9 @@
 		pll_out_max = pll->pll_out_max;
 	}
 
+	if (pll_out_min > 64800)
+		pll_out_min = 64800;
+
 	if (pll->flags & RADEON_PLL_USE_REF_DIV)
 		min_ref_div = max_ref_div = pll->reference_div;
 	else {
@@ -965,6 +1087,10 @@
 	*frac_fb_div_p = best_frac_feedback_div;
 	*ref_div_p = best_ref_div;
 	*post_div_p = best_post_div;
+	DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
+		      best_ref_div, best_post_div);
+
 }
 
 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index be5cb4f..275b26a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,7 +48,7 @@
  * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
  * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
  *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
- *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK
+ *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
  */
 #define KMS_DRIVER_MAJOR	2
 #define KMS_DRIVER_MINOR	8
@@ -104,6 +104,7 @@
 int radeon_audio = 1;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
+int radeon_pcie_gen2 = 0;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -147,6 +148,9 @@
 MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
 module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
 
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
+module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+
 static int radeon_suspend(struct drm_device *dev, pm_message_t state)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 448eba8..5cba46b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1524,6 +1524,7 @@
 #define R600_CP_RB_CNTL                                        0xc104
 #       define R600_RB_BUFSZ(x)                                ((x) << 0)
 #       define R600_RB_BLKSZ(x)                                ((x) << 8)
+#	define R600_BUF_SWAP_32BIT		               (2 << 16)
 #       define R600_RB_NO_UPDATE                               (1 << 27)
 #       define R600_RB_RPTR_WR_ENA                             (1 << 31)
 #define R600_CP_RB_RPTR_WR                                     0xc108
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 8fd1842..b427488 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@
 	switch (connector->connector_type) {
 	case DRM_MODE_CONNECTOR_DVII:
 	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-		if (drm_detect_monitor_audio(radeon_connector->edid)) {
+		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@
 	case DRM_MODE_CONNECTOR_DVID:
 	case DRM_MODE_CONNECTOR_HDMIA:
 	default:
-		if (drm_detect_monitor_audio(radeon_connector->edid)) {
+		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@
 		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
 		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
 			return ATOM_ENCODER_MODE_DP;
-		else if (drm_detect_monitor_audio(radeon_connector->edid)) {
+		else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -910,7 +910,7 @@
 
 	args.v1.ucAction = action;
 	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
-		args.v1.usInitInfo = connector_object_id;
+		args.v1.usInitInfo = cpu_to_le16(connector_object_id);
 	} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
 		args.v1.asMode.ucLaneSel = lane_num;
 		args.v1.asMode.ucLaneSet = lane_set;
@@ -1063,7 +1063,7 @@
 	if (!ASIC_IS_DCE4(rdev))
 		return;
 
-	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) ||
+	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
 	    (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
 		return;
 
@@ -1140,7 +1140,7 @@
 		case 3:
 			args.v3.sExtEncoder.ucAction = action;
 			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
-				args.v3.sExtEncoder.usConnectorId = connector_object_id;
+				args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
 			else
 				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
@@ -1570,11 +1570,21 @@
 	}
 
 	/* set scaler clears this on some chips */
-	/* XXX check DCE4 */
-	if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
-		if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
-			WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
-			       AVIVO_D1MODE_INTERLEAVE_EN);
+	if (ASIC_IS_AVIVO(rdev) &&
+	    (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+		if (ASIC_IS_DCE4(rdev)) {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       EVERGREEN_INTERLEAVE_EN);
+			else
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		} else {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       AVIVO_D1MODE_INTERLEAVE_EN);
+			else
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ca32e9c..cc44bdf 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,11 +113,14 @@
 	u32 tiling_flags = 0;
 	int ret;
 	int aligned_size, size;
+	int height = mode_cmd->height;
 
 	/* need to align pitch with crtc limits */
 	mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
 
-	size = mode_cmd->pitch * mode_cmd->height;
+	if (rdev->family >= CHIP_R600)
+		height = ALIGN(mode_cmd->height, 8);
+	size = mode_cmd->pitch * height;
 	aligned_size = ALIGN(size, PAGE_SIZE);
 	ret = radeon_gem_object_create(rdev, aligned_size, 0,
 				       RADEON_GEM_DOMAIN_VRAM,
@@ -225,6 +228,8 @@
 
 	strcpy(info->fix.id, "radeondrmfb");
 
+	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &radeonfb_ops;
 
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index df95eb8..1fe95df 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -156,9 +156,12 @@
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_radeon_gem_info *args = data;
+	struct ttm_mem_type_manager *man;
+
+	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
 	args->vram_size = rdev->mc.real_vram_size;
-	args->vram_visible = rdev->mc.real_vram_size;
+	args->vram_visible = (u64)man->size << PAGE_SHIFT;
 	if (rdev->stollen_vga_memory)
 		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
 	args->vram_visible -= radeon_fbdev_total_size(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a289646..9ec830c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -110,11 +110,14 @@
 
 int radeon_irq_kms_init(struct radeon_device *rdev)
 {
+	int i;
 	int r = 0;
 
 	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
 
 	spin_lock_init(&rdev->irq.sw_lock);
+	for (i = 0; i < rdev->num_crtc; i++)
+		spin_lock_init(&rdev->irq.pflip_lock[i]);
 	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
 	if (r) {
 		return r;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 28a53e4a..8387d32 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -201,6 +201,10 @@
 		}
 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
 		break;
+	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
+		/* return clock value in KHz */
+		value = rdev->clock.spll.reference_freq * 10;
+		break;
 	default:
 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
 		return -EINVAL;
@@ -243,6 +247,8 @@
 	struct radeon_device *rdev = dev->dev_private;
 	if (rdev->hyperz_filp == file_priv)
 		rdev->hyperz_filp = NULL;
+	if (rdev->cmask_filp == file_priv)
+		rdev->cmask_filp = NULL;
 }
 
 /*
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index ace2e63..78968b7 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -443,7 +443,7 @@
 		       (target_fb->bits_per_pixel * 8));
 	crtc_pitch |= crtc_pitch << 16;
 
-
+	crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
 	if (tiling_flags & RADEON_TILING_MACRO) {
 		if (ASIC_IS_R300(rdev))
 			crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
@@ -502,6 +502,7 @@
 	gen_cntl_val = RREG32(gen_cntl_reg);
 	gen_cntl_val &= ~(0xf << 8);
 	gen_cntl_val |= (format << 8);
+	gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
 	WREG32(gen_cntl_reg, gen_cntl_val);
 
 	crtc_offset = (u32)base;
@@ -778,9 +779,9 @@
 	DRM_DEBUG_KMS("\n");
 
 	if (!use_bios_divs) {
-		radeon_compute_pll(pll, mode->clock,
-				   &freq, &feedback_div, &frac_fb_div,
-				   &reference_div, &post_divider);
+		radeon_compute_pll_legacy(pll, mode->clock,
+					  &freq, &feedback_div, &frac_fb_div,
+					  &reference_div, &post_divider);
 
 		for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
 			if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 12bdeab..a670caa 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -149,6 +149,7 @@
 #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
 #define RADEON_PLL_USE_POST_DIV         (1 << 12)
 #define RADEON_PLL_IS_LCD               (1 << 13)
+#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
 
 struct radeon_pll {
 	/* reference frequency */
@@ -208,6 +209,7 @@
 	CT_EMAC,
 	CT_RN50_POWER,
 	CT_MAC_X800,
+	CT_MAC_G5_9600,
 };
 
 enum radeon_dvo_chip {
@@ -510,13 +512,21 @@
 					     struct radeon_atom_ss *ss,
 					     int id, u32 clock);
 
-extern void radeon_compute_pll(struct radeon_pll *pll,
-			       uint64_t freq,
-			       uint32_t *dot_clock_p,
-			       uint32_t *fb_div_p,
-			       uint32_t *frac_fb_div_p,
-			       uint32_t *ref_div_p,
-			       uint32_t *post_div_p);
+extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
+				      uint64_t freq,
+				      uint32_t *dot_clock_p,
+				      uint32_t *fb_div_p,
+				      uint32_t *frac_fb_div_p,
+				      uint32_t *ref_div_p,
+				      uint32_t *post_div_p);
+
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+				     u32 freq,
+				     u32 *dot_clock_p,
+				     u32 *fb_div_p,
+				     u32 *frac_fb_div_p,
+				     u32 *ref_div_p,
+				     u32 *post_div_p);
 
 extern void radeon_setup_encoder_clones(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3b1b2bf..2aed03b 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -430,7 +430,7 @@
 {
 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
 	struct radeon_device *rdev = ddev->dev_private;
-	u32 temp;
+	int temp;
 
 	switch (rdev->pm.int_thermal_type) {
 	case THERMAL_TYPE_RV6XX:
@@ -646,6 +646,9 @@
 #endif
 	}
 
+	if (rdev->pm.power_state)
+		kfree(rdev->pm.power_state);
+
 	radeon_hwmon_fini(rdev);
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 3cd4dac..ec93a75 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -375,6 +375,8 @@
 #define RADEON_CONFIG_APER_SIZE             0x0108
 #define RADEON_CONFIG_BONDS                 0x00e8
 #define RADEON_CONFIG_CNTL                  0x00e0
+#       define RADEON_CFG_VGA_RAM_EN        (1 << 8)
+#       define RADEON_CFG_VGA_IO_DIS        (1 << 9)
 #       define RADEON_CFG_ATI_REV_A11       (0   << 16)
 #       define RADEON_CFG_ATI_REV_A12       (1   << 16)
 #       define RADEON_CFG_ATI_REV_A13       (2   << 16)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1272e4b..8389b4c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -589,6 +589,20 @@
 	DRM_INFO("radeon: ttm finalized\n");
 }
 
+/* this should only be called at bootup or when userspace
+ * isn't running */
+void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+{
+	struct ttm_mem_type_manager *man;
+
+	if (!rdev->mman.initialized)
+		return;
+
+	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
+	man->size = size >> PAGE_SHIFT;
+}
+
 static struct vm_operations_struct radeon_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
@@ -787,9 +801,9 @@
 		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
 		radeon_mem_types_list[i].driver_features = 0;
 		if (i == 0)
-			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
 		else
-			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
 
 	}
 	/* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index ac40fd3..9177f91 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -439,7 +439,7 @@
 0x000286EC SPI_COMPUTE_NUM_THREAD_X
 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
-0x000286F8 GDS_ADDR_SIZE
+0x00028724 GDS_ADDR_SIZE
 0x00028780 CB_BLEND0_CONTROL
 0x00028784 CB_BLEND1_CONTROL
 0x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index b506ec1..e8a1786 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -683,9 +683,7 @@
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -706,13 +704,11 @@
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 8c1214c..722074e 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,7 +130,6 @@
 0x401C GB_SELECT
 0x4020 GB_AA_CONFIG
 0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
 0x4100 TX_INVALTAGS
 0x4200 GA_POINT_S0
 0x4204 GA_POINT_T0
@@ -750,9 +749,7 @@
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -773,13 +770,11 @@
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 0828d80..d9f6286 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -749,9 +749,7 @@
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -772,13 +770,11 @@
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index ef422bb..911a8fb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -164,7 +164,6 @@
 0x401C GB_SELECT
 0x4020 GB_AA_CONFIG
 0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
 0x4100 TX_INVALTAGS
 0x4114 SU_TEX_WRAP_PS3
 0x4118 PS3_ENABLE
@@ -461,9 +460,7 @@
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -484,9 +481,6 @@
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -496,4 +490,5 @@
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
 0x4F58 ZB_ZPASS_DATA
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 5512e4e..c76283d 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -203,6 +203,9 @@
 	radeon_gart_table_ram_free(rdev);
 }
 
+#define RS400_PTE_WRITEABLE (1 << 2)
+#define RS400_PTE_READABLE  (1 << 3)
+
 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
 	uint32_t entry;
@@ -213,7 +216,7 @@
 
 	entry = (lower_32_bits(addr) & PAGE_MASK) |
 		((upper_32_bits(addr) & 0xff) << 4) |
-		0xc;
+		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
 	entry = cpu_to_le32(entry);
 	rdev->gart.table.ram.ptr[i] = entry;
 	return 0;
@@ -226,8 +229,8 @@
 
 	for (i = 0; i < rdev->usec_timeout; i++) {
 		/* read MC_STATUS */
-		tmp = RREG32(0x0150);
-		if (tmp & (1 << 2)) {
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & RADEON_MC_IDLE) {
 			return 0;
 		}
 		DRM_UDELAY(1);
@@ -241,7 +244,7 @@
 	r420_pipes_init(rdev);
 	if (rs400_mc_wait_for_idle(rdev)) {
 		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
-		       "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
+		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
 	}
 }
 
@@ -300,9 +303,9 @@
 		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
 		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
 		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
-		tmp = RREG32_MC(0x100);
+		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
 		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
-		tmp = RREG32(0x134);
+		tmp = RREG32(RS690_HDP_FB_LOCATION);
 		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
 	} else {
 		tmp = RREG32(RADEON_AGP_BASE);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b4192ac..8af4679 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -339,16 +339,16 @@
 
 int rs600_asic_reset(struct radeon_device *rdev)
 {
-	u32 status, tmp;
-
 	struct rv515_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
 
-	/* Stops all mc clients */
-	rv515_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	if (!G_000E40_GUI_ACTIVE(status)) {
 		return 0;
 	}
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
 	status = RREG32(R_000E40_RBBM_STATUS);
 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
 	/* stop CP */
@@ -392,11 +392,11 @@
 	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
 		dev_err(rdev->dev, "failed to reset GPU\n");
 		rdev->gpu_lockup = true;
-		return -1;
-	}
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
 	rv515_mc_resume(rdev, &save);
-	dev_info(rdev->dev, "GPU reset succeed\n");
-	return 0;
+	return ret;
 }
 
 /*
@@ -751,7 +751,6 @@
 	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
 	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
 	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
 	base = RREG32_MC(R_000004_MC_FB_LOCATION);
 	base = G_000004_MC_FB_START(base) << 16;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0137d3e..66c949b 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -77,9 +77,9 @@
 		switch (crev) {
 		case 1:
 			tmp.full = dfixed_const(100);
-			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			if (info->info.usK8MemoryClock)
+			if (le16_to_cpu(info->info.usK8MemoryClock))
 				rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
 			else if (rdev->clock.default_mclk) {
 				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@
 			break;
 		case 2:
 			tmp.full = dfixed_const(100);
-			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			if (info->info_v2.ulBootUpUMAClock)
-				rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+			if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
+				rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
 			else if (rdev->clock.default_mclk)
 				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
 			else
 				rdev->pm.igp_system_mclk.full = dfixed_const(66700);
 			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
-			rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
 			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
 			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
 			break;
@@ -157,7 +157,6 @@
 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
 	base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
 	base = G_000100_MC_FB_START(base) << 16;
 	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 5d569f4..64b57af 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -69,13 +69,13 @@
 			  ISYNC_CPSCRATCH_IDLEGUI);
 	radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
 	radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
-	radeon_ring_write(rdev, PACKET0(0x170C, 0));
-	radeon_ring_write(rdev, 1 << 31);
+	radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+	radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
 	radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
 	radeon_ring_write(rdev, 0);
 	radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
 	radeon_ring_write(rdev, 0);
-	radeon_ring_write(rdev, PACKET0(0x42C8, 0));
+	radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
 	radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
 	radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
 	radeon_ring_write(rdev, 0);
@@ -153,8 +153,8 @@
 	}
 	rv515_vga_render_disable(rdev);
 	r420_pipes_init(rdev);
-	gb_pipe_select = RREG32(0x402C);
-	tmp = RREG32(0x170C);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
 	pipe_select_current = (tmp >> 2) & 3;
 	tmp = (1 << pipe_select_current) |
 	      (((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3a264aa..714ad45 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -78,18 +78,23 @@
 }
 
 /* get temperature in millidegrees */
-u32 rv770_get_temp(struct radeon_device *rdev)
+int rv770_get_temp(struct radeon_device *rdev)
 {
 	u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
 		ASIC_T_SHIFT;
-	u32 actual_temp = 0;
+	int actual_temp;
 
-	if ((temp >> 9) & 1)
-		actual_temp = 0;
-	else
-		actual_temp = (temp >> 1) & 0xff;
+	if (temp & 0x400)
+		actual_temp = -256;
+	else if (temp & 0x200)
+		actual_temp = 255;
+	else if (temp & 0x100) {
+		actual_temp = temp & 0x1ff;
+		actual_temp |= ~0x1ff;
+	} else
+		actual_temp = temp & 0xff;
 
-	return actual_temp * 1000;
+	return (actual_temp * 1000) / 2;
 }
 
 void rv770_pm_misc(struct radeon_device *rdev)
@@ -302,7 +307,7 @@
  */
 void r700_cp_stop(struct radeon_device *rdev)
 {
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 	WREG32(SCRATCH_UMSK, 0);
 }
@@ -316,7 +321,11 @@
 		return -EINVAL;
 
 	r700_cp_stop(rdev);
-	WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
 	/* Reset cp */
 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -1114,7 +1123,6 @@
 	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
-	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
 	r700_vram_gtt_location(rdev, &rdev->mc);
 	radeon_update_bandwidth_info(rdev);
 
@@ -1268,7 +1276,7 @@
 	if (r)
 		return r;
 	/* Post card if necessary */
-	if (!r600_card_posted(rdev)) {
+	if (!radeon_card_posted(rdev)) {
 		if (!rdev->bios) {
 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
@@ -1372,6 +1380,9 @@
 	u32 link_width_cntl, lanes, speed_cntl, tmp;
 	u16 link_cntl2;
 
+	if (radeon_pcie_gen2 == 0)
+		return;
+
 	if (rdev->flags & RADEON_IS_IGP)
 		return;
 
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index abc8cf5..79fa588 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -76,10 +76,10 @@
 #define		ROQ_IB1_START(x)				((x) << 0)
 #define		ROQ_IB2_START(x)				((x) << 8)
 #define	CP_RB_CNTL					0xC104
-#define		RB_BUFSZ(x)					((x)<<0)
-#define		RB_BLKSZ(x)					((x)<<8)
-#define		RB_NO_UPDATE					(1<<27)
-#define		RB_RPTR_WR_ENA					(1<<31)
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
 #define		BUF_SWAP_32BIT					(2 << 16)
 #define	CP_RB_RPTR					0x8700
 #define	CP_RB_RPTR_ADDR					0xC10C
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 0e1edd7..70e60a4 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -1,12 +1,13 @@
 config STUB_POULSBO
 	tristate "Intel GMA500 Stub Driver"
 	depends on PCI
+	depends on NET # for THERMAL
 	# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
 	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
-	select VIDEO_OUTPUT_CONTROL if ACPI
 	select BACKLIGHT_CLASS_DEVICE if ACPI
 	select INPUT if ACPI
 	select ACPI_VIDEO if ACPI
+	select THERMAL if ACPI
 	help
 	  Choose this option if you have a system that has Intel GMA500
 	  (Poulsbo) integrated graphics. If M is selected, the module will
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 8d0e31a..96c83a9 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,5 +1,5 @@
 config VGA_ARB
-	bool "VGA Arbitration" if EMBEDDED
+	bool "VGA Arbitration" if EXPERT
 	default y
 	depends on PCI
 	help
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c380c65..ace2b16 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -636,7 +636,7 @@
 			void (*irq_set_state)(void *cookie, bool state),
 			unsigned int (*set_vga_decode)(void *cookie, bool decode))
 {
-	int ret = -1;
+	int ret = -ENODEV;
 	struct vga_device *vgadev;
 	unsigned long flags;
 
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ffbc278..2560f01 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -62,9 +62,9 @@
 	Support for 3M PCT touch screens.
 
 config HID_A4TECH
-	tristate "A4 tech mice" if EMBEDDED
+	tristate "A4 tech mice" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for A4 tech X5 and WOP-35 / Trust 450L mice.
 
@@ -77,9 +77,9 @@
 	game controllers.
 
 config HID_APPLE
-	tristate "Apple {i,Power,Mac}Books" if EMBEDDED
+	tristate "Apple {i,Power,Mac}Books" if EXPERT
 	depends on (USB_HID || BT_HIDP)
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for some Apple devices which less or more break
 	HID specification.
@@ -88,9 +88,9 @@
 	MacBooks, MacBook Pros and Apple Aluminum.
 
 config HID_BELKIN
-	tristate "Belkin Flip KVM and Wireless keyboard" if EMBEDDED
+	tristate "Belkin Flip KVM and Wireless keyboard" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Belkin Flip KVM and Wireless keyboard.
 
@@ -101,16 +101,16 @@
 	Support for Cando dual touch panel.
 
 config HID_CHERRY
-	tristate "Cherry Cymotion keyboard" if EMBEDDED
+	tristate "Cherry Cymotion keyboard" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Cherry Cymotion keyboard.
 
 config HID_CHICONY
-	tristate "Chicony Tactical pad" if EMBEDDED
+	tristate "Chicony Tactical pad" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Chicony Tactical pad.
 
@@ -130,9 +130,9 @@
 	  and some additional multimedia keys.
 
 config HID_CYPRESS
-	tristate "Cypress mouse and barcode readers" if EMBEDDED
+	tristate "Cypress mouse and barcode readers" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for cypress mouse and barcode readers.
 
@@ -174,16 +174,16 @@
 	Support for the ELECOM BM084 (bluetooth mouse).
 
 config HID_EZKEY
-	tristate "Ezkey BTC 8193 keyboard" if EMBEDDED
+	tristate "Ezkey BTC 8193 keyboard" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Ezkey BTC 8193 keyboard.
 
 config HID_KYE
-	tristate "Kye/Genius Ergo Mouse" if EMBEDDED
+	tristate "Kye/Genius Ergo Mouse" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Kye/Genius Ergo Mouse.
 
@@ -212,16 +212,16 @@
 	Support for Twinhan IR remote control.
 
 config HID_KENSINGTON
-	tristate "Kensington Slimblade Trackball" if EMBEDDED
+	tristate "Kensington Slimblade Trackball" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Kensington Slimblade Trackball.
 
 config HID_LOGITECH
-	tristate "Logitech devices" if EMBEDDED
+	tristate "Logitech devices" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Logitech devices that are not fully compliant with HID standard.
 
@@ -276,9 +276,9 @@
 	Apple Wireless "Magic" Mouse.
 
 config HID_MICROSOFT
-	tristate "Microsoft non-fully HID-compliant devices" if EMBEDDED
+	tristate "Microsoft non-fully HID-compliant devices" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Microsoft devices that are not fully compliant with HID standard.
 
@@ -289,12 +289,29 @@
 	Support for MosArt dual-touch panels.
 
 config HID_MONTEREY
-	tristate "Monterey Genius KB29E keyboard" if EMBEDDED
+	tristate "Monterey Genius KB29E keyboard" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Monterey Genius KB29E.
 
+config HID_MULTITOUCH
+	tristate "HID Multitouch panels"
+	depends on USB_HID
+	---help---
+	  Generic support for HID multitouch panels.
+
+	  Say Y here if you have one of the following devices:
+	  - Cypress TrueTouch panels
+	  - Hanvon dual touch panels
+	  - Pixcir dual touch panels
+	  - 'Sensing Win7-TwoFinger' panel by GeneralTouch
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called hid-multitouch.
+
 config HID_NTRIG
 	tristate "N-Trig touch screen"
 	depends on USB_HID
@@ -348,8 +365,8 @@
 	  - IR
 
 config HID_PICOLCD_FB
-	bool "Framebuffer support" if EMBEDDED
-	default !EMBEDDED
+	bool "Framebuffer support" if EXPERT
+	default !EXPERT
 	depends on HID_PICOLCD
 	depends on HID_PICOLCD=FB || FB=y
 	select FB_DEFERRED_IO
@@ -362,8 +379,8 @@
 	  frambuffer device.
 
 config HID_PICOLCD_BACKLIGHT
-	bool "Backlight control" if EMBEDDED
-	default !EMBEDDED
+	bool "Backlight control" if EXPERT
+	default !EXPERT
 	depends on HID_PICOLCD
 	depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
 	---help---
@@ -371,16 +388,16 @@
 	  class.
 
 config HID_PICOLCD_LCD
-	bool "Contrast control" if EMBEDDED
-	default !EMBEDDED
+	bool "Contrast control" if EXPERT
+	default !EXPERT
 	depends on HID_PICOLCD
 	depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
 	---help---
 	  Provide access to PicoLCD's LCD contrast via lcd class.
 
 config HID_PICOLCD_LEDS
-	bool "GPO via leds class" if EMBEDDED
-	default !EMBEDDED
+	bool "GPO via leds class" if EXPERT
+	default !EXPERT
 	depends on HID_PICOLCD
 	depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
 	---help---
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 6eae9a9..6efc2a0 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -47,6 +47,7 @@
 obj-$(CONFIG_HID_MICROSOFT)	+= hid-microsoft.o
 obj-$(CONFIG_HID_MONTEREY)	+= hid-monterey.o
 obj-$(CONFIG_HID_MOSART)	+= hid-mosart.o
+obj-$(CONFIG_HID_MULTITOUCH)	+= hid-multitouch.o
 obj-$(CONFIG_HID_NTRIG)		+= hid-ntrig.o
 obj-$(CONFIG_HID_ORTEK)		+= hid-ortek.o
 obj-$(CONFIG_HID_PRODIKEYS)	+= hid-prodikeys.o
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
index 375b509..1ea066c 100644
--- a/drivers/hid/hid-cando.c
+++ b/drivers/hid/hid-cando.c
@@ -236,6 +236,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
 			USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+			USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
 			USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
 		USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2611686..d678cf3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1312,7 +1312,9 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1324,6 +1326,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
@@ -1335,11 +1338,13 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
@@ -1422,6 +1427,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@@ -1640,7 +1646,6 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f65cace..92a0d61 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -140,7 +140,9 @@
 #define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2	0x5577
 
 #define USB_VENDOR_ID_CANDO		0x2087
+#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH	0x0a01
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1 0x0a02
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
 
@@ -186,6 +188,7 @@
 #define USB_DEVICE_ID_CYPRESS_BARCODE_1	0xde61
 #define USB_DEVICE_ID_CYPRESS_BARCODE_2	0xde64
 #define USB_DEVICE_ID_CYPRESS_BARCODE_3	0xbca1
+#define USB_DEVICE_ID_CYPRESS_TRUETOUCH	0xc001
 
 #define USB_VENDOR_ID_DEALEXTREAME	0x10c5
 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701	0x819a
@@ -236,6 +239,7 @@
 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR	0x0002
 
 #define USB_VENDOR_ID_GENERAL_TOUCH	0x0dfc
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
 
 #define USB_VENDOR_ID_GLAB		0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30	0x0038
@@ -318,6 +322,9 @@
 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST	0x5000
 #define USB_DEVICE_ID_HANWANG_TABLET_LAST	0x8fff
 
+#define USB_VENDOR_ID_HANVON		0x20b3
+#define USB_DEVICE_ID_HANVON_MULTITOUCH	0x0a18
+
 #define USB_VENDOR_ID_HAPP		0x078b
 #define USB_DEVICE_ID_UGCI_DRIVING	0x0010
 #define USB_DEVICE_ID_UGCI_FLYING	0x0020
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e60fdb8..7f552bf 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -290,6 +290,14 @@
 		goto ignore;
 	}
 
+	if (field->report_type == HID_FEATURE_REPORT) {
+		if (device->driver->feature_mapping) {
+			device->driver->feature_mapping(device, hidinput, field,
+				usage);
+		}
+		goto ignore;
+	}
+
 	if (device->driver->input_mapping) {
 		int ret = device->driver->input_mapping(device, hidinput, field,
 				usage, &bit, &max);
@@ -839,7 +847,6 @@
 	struct hid_input *hidinput = NULL;
 	struct input_dev *input_dev;
 	int i, j, k;
-	int max_report_type = HID_OUTPUT_REPORT;
 
 	INIT_LIST_HEAD(&hid->inputs);
 
@@ -856,10 +863,11 @@
 			return -1;
 	}
 
-	if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
-		max_report_type = HID_INPUT_REPORT;
+	for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) {
+		if (k == HID_OUTPUT_REPORT &&
+			hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
+			continue;
 
-	for (k = HID_INPUT_REPORT; k <= max_report_type; k++)
 		list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
 
 			if (!report->maxfield)
@@ -912,6 +920,7 @@
 				hidinput = NULL;
 			}
 		}
+	}
 
 	if (hidinput && input_register_device(hidinput->input))
 		goto out_cleanup;
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index 9fb050c..aed7ffe 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -257,6 +257,7 @@
 static const struct hid_device_id mosart_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, mosart_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
new file mode 100644
index 0000000..07d3183
--- /dev/null
+++ b/drivers/hid/hid-multitouch.c
@@ -0,0 +1,516 @@
+/*
+ *  HID driver for multitouch panels
+ *
+ *  Copyright (c) 2010-2011 Stephane Chatty <chatty@enac.fr>
+ *  Copyright (c) 2010-2011 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ *  Copyright (c) 2010-2011 Ecole Nationale de l'Aviation Civile, France
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/input/mt.h>
+#include "usbhid/usbhid.h"
+
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("HID multitouch panels");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+/* quirks to control the device */
+#define MT_QUIRK_NOT_SEEN_MEANS_UP	(1 << 0)
+#define MT_QUIRK_SLOT_IS_CONTACTID	(1 << 1)
+#define MT_QUIRK_CYPRESS		(1 << 2)
+#define MT_QUIRK_SLOT_IS_CONTACTNUMBER	(1 << 3)
+#define MT_QUIRK_VALID_IS_INRANGE	(1 << 4)
+#define MT_QUIRK_VALID_IS_CONFIDENCE	(1 << 5)
+
+struct mt_slot {
+	__s32 x, y, p, w, h;
+	__s32 contactid;	/* the device ContactID assigned to this slot */
+	bool touch_state;	/* is the touch valid? */
+	bool seen_in_this_frame;/* has this slot been updated */
+};
+
+struct mt_device {
+	struct mt_slot curdata;	/* placeholder of incoming data */
+	struct mt_class *mtclass;	/* our mt device class */
+	unsigned last_field_index;	/* last field index of the report */
+	unsigned last_slot_field;	/* the last field of a slot */
+	__s8 inputmode;		/* InputMode HID feature, -1 if non-existent */
+	__u8 num_received;	/* how many contacts we received */
+	__u8 num_expected;	/* expected last contact index */
+	bool curvalid;		/* is the current contact valid? */
+	struct mt_slot slots[0];	/* first slot */
+};
+
+struct mt_class {
+	__s32 name;	/* MT_CLS */
+	__s32 quirks;
+	__s32 sn_move;	/* Signal/noise ratio for move events */
+	__s32 sn_pressure;	/* Signal/noise ratio for pressure events */
+	__u8 maxcontacts;
+};
+
+/* classes of device behavior */
+#define MT_CLS_DEFAULT	1
+#define MT_CLS_DUAL1	2
+#define MT_CLS_DUAL2	3
+#define MT_CLS_CYPRESS	4
+
+/*
+ * these device-dependent functions determine what slot corresponds
+ * to a valid contact that was just read.
+ */
+
+static int cypress_compute_slot(struct mt_device *td)
+{
+	if (td->curdata.contactid != 0 || td->num_received == 0)
+		return td->curdata.contactid;
+	else
+		return -1;
+}
+
+static int find_slot_from_contactid(struct mt_device *td)
+{
+	int i;
+	for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+		if (td->slots[i].contactid == td->curdata.contactid &&
+			td->slots[i].touch_state)
+			return i;
+	}
+	for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+		if (!td->slots[i].seen_in_this_frame &&
+			!td->slots[i].touch_state)
+			return i;
+	}
+	/* should not occurs. If this happens that means
+	 * that the device sent more touches that it says
+	 * in the report descriptor. It is ignored then. */
+	return -1;
+}
+
+struct mt_class mt_classes[] = {
+	{ .name = MT_CLS_DEFAULT,
+		.quirks = MT_QUIRK_VALID_IS_INRANGE,
+		.maxcontacts = 10 },
+	{ .name = MT_CLS_DUAL1,
+		.quirks = MT_QUIRK_VALID_IS_INRANGE |
+			MT_QUIRK_SLOT_IS_CONTACTID,
+		.maxcontacts = 2 },
+	{ .name = MT_CLS_DUAL2,
+		.quirks = MT_QUIRK_VALID_IS_INRANGE |
+			MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+		.maxcontacts = 2 },
+	{ .name = MT_CLS_CYPRESS,
+		.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+			MT_QUIRK_CYPRESS,
+		.maxcontacts = 10 },
+
+	{ }
+};
+
+static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage)
+{
+	if (usage->hid == HID_DG_INPUTMODE) {
+		struct mt_device *td = hid_get_drvdata(hdev);
+		td->inputmode = field->report->id;
+	}
+}
+
+static void set_abs(struct input_dev *input, unsigned int code,
+		struct hid_field *field, int snratio)
+{
+	int fmin = field->logical_minimum;
+	int fmax = field->logical_maximum;
+	int fuzz = snratio ? (fmax - fmin) / snratio : 0;
+	input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+}
+
+static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	struct mt_device *td = hid_get_drvdata(hdev);
+	struct mt_class *cls = td->mtclass;
+	switch (usage->hid & HID_USAGE_PAGE) {
+
+	case HID_UP_GENDESK:
+		switch (usage->hid) {
+		case HID_GD_X:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_X);
+			set_abs(hi->input, ABS_MT_POSITION_X, field,
+				cls->sn_move);
+			/* touchscreen emulation */
+			set_abs(hi->input, ABS_X, field, cls->sn_move);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_GD_Y:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_Y);
+			set_abs(hi->input, ABS_MT_POSITION_Y, field,
+				cls->sn_move);
+			/* touchscreen emulation */
+			set_abs(hi->input, ABS_Y, field, cls->sn_move);
+			td->last_slot_field = usage->hid;
+			return 1;
+		}
+		return 0;
+
+	case HID_UP_DIGITIZER:
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_CONFIDENCE:
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_TIPSWITCH:
+			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_CONTACTID:
+			input_mt_init_slots(hi->input,
+					td->mtclass->maxcontacts);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_WIDTH:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MAJOR);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_HEIGHT:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MINOR);
+			field->logical_maximum = 1;
+			field->logical_minimum = 0;
+			set_abs(hi->input, ABS_MT_ORIENTATION, field, 0);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_TIPPRESSURE:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_PRESSURE);
+			set_abs(hi->input, ABS_MT_PRESSURE, field,
+				cls->sn_pressure);
+			/* touchscreen emulation */
+			set_abs(hi->input, ABS_PRESSURE, field,
+				cls->sn_pressure);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_CONTACTCOUNT:
+			td->last_field_index = field->report->maxfield - 1;
+			return 1;
+		case HID_DG_CONTACTMAX:
+			/* we don't set td->last_slot_field as contactcount and
+			 * contact max are global to the report */
+			return -1;
+		}
+		/* let hid-input decide for the others */
+		return 0;
+
+	case 0xff000000:
+		/* we do not want to map these: no input-oriented meaning */
+		return -1;
+	}
+
+	return 0;
+}
+
+static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	if (usage->type == EV_KEY || usage->type == EV_ABS)
+		set_bit(usage->type, hi->input->evbit);
+
+	return -1;
+}
+
+static int mt_compute_slot(struct mt_device *td)
+{
+	__s32 quirks = td->mtclass->quirks;
+
+	if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
+		return td->curdata.contactid;
+
+	if (quirks & MT_QUIRK_CYPRESS)
+		return cypress_compute_slot(td);
+
+	if (quirks & MT_QUIRK_SLOT_IS_CONTACTNUMBER)
+		return td->num_received;
+
+	return find_slot_from_contactid(td);
+}
+
+/*
+ * this function is called when a whole contact has been processed,
+ * so that it can assign it to a slot and store the data there
+ */
+static void mt_complete_slot(struct mt_device *td)
+{
+	td->curdata.seen_in_this_frame = true;
+	if (td->curvalid) {
+		int slotnum = mt_compute_slot(td);
+
+		if (slotnum >= 0 && slotnum < td->mtclass->maxcontacts)
+			td->slots[slotnum] = td->curdata;
+	}
+	td->num_received++;
+}
+
+
+/*
+ * this function is called when a whole packet has been received and processed,
+ * so that it can decide what to send to the input layer.
+ */
+static void mt_emit_event(struct mt_device *td, struct input_dev *input)
+{
+	int i;
+
+	for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+		struct mt_slot *s = &(td->slots[i]);
+		if ((td->mtclass->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
+			!s->seen_in_this_frame) {
+			s->touch_state = false;
+		}
+
+		input_mt_slot(input, i);
+		input_mt_report_slot_state(input, MT_TOOL_FINGER,
+			s->touch_state);
+		if (s->touch_state) {
+			input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x);
+			input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y);
+			input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
+			input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, s->w);
+			input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, s->h);
+		}
+		s->seen_in_this_frame = false;
+
+	}
+
+	input_mt_report_pointer_emulation(input, true);
+	input_sync(input);
+	td->num_received = 0;
+}
+
+
+
+static int mt_event(struct hid_device *hid, struct hid_field *field,
+				struct hid_usage *usage, __s32 value)
+{
+	struct mt_device *td = hid_get_drvdata(hid);
+	__s32 quirks = td->mtclass->quirks;
+
+	if (hid->claimed & HID_CLAIMED_INPUT) {
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+				td->curvalid = value;
+			break;
+		case HID_DG_TIPSWITCH:
+			if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
+				td->curvalid = value;
+			td->curdata.touch_state = value;
+			break;
+		case HID_DG_CONFIDENCE:
+			if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
+				td->curvalid = value;
+			break;
+		case HID_DG_CONTACTID:
+			td->curdata.contactid = value;
+			break;
+		case HID_DG_TIPPRESSURE:
+			td->curdata.p = value;
+			break;
+		case HID_GD_X:
+			td->curdata.x = value;
+			break;
+		case HID_GD_Y:
+			td->curdata.y = value;
+			break;
+		case HID_DG_WIDTH:
+			td->curdata.w = value;
+			break;
+		case HID_DG_HEIGHT:
+			td->curdata.h = value;
+			break;
+		case HID_DG_CONTACTCOUNT:
+			/*
+			 * Includes multi-packet support where subsequent
+			 * packets are sent with zero contactcount.
+			 */
+			if (value)
+				td->num_expected = value;
+			break;
+
+		default:
+			/* fallback to the generic hidinput handling */
+			return 0;
+		}
+
+		if (usage->hid == td->last_slot_field)
+			mt_complete_slot(td);
+
+		if (field->index == td->last_field_index
+			&& td->num_received >= td->num_expected)
+			mt_emit_event(td, field->hidinput->input);
+
+	}
+
+	/* we have handled the hidinput part, now remains hiddev */
+	if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
+
+	return 1;
+}
+
+static void mt_set_input_mode(struct hid_device *hdev)
+{
+	struct mt_device *td = hid_get_drvdata(hdev);
+	struct hid_report *r;
+	struct hid_report_enum *re;
+
+	if (td->inputmode < 0)
+		return;
+
+	re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+	r = re->report_id_hash[td->inputmode];
+	if (r) {
+		r->field[0]->value[0] = 0x02;
+		usbhid_submit_report(hdev, r, USB_DIR_OUT);
+	}
+}
+
+static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret, i;
+	struct mt_device *td;
+	struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
+
+	for (i = 0; mt_classes[i].name ; i++) {
+		if (id->driver_data == mt_classes[i].name) {
+			mtclass = &(mt_classes[i]);
+			break;
+		}
+	}
+
+	/* This allows the driver to correctly support devices
+	 * that emit events over several HID messages.
+	 */
+	hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+	td = kzalloc(sizeof(struct mt_device) +
+				mtclass->maxcontacts * sizeof(struct mt_slot),
+				GFP_KERNEL);
+	if (!td) {
+		dev_err(&hdev->dev, "cannot allocate multitouch data\n");
+		return -ENOMEM;
+	}
+	td->mtclass = mtclass;
+	td->inputmode = -1;
+	hid_set_drvdata(hdev, td);
+
+	ret = hid_parse(hdev);
+	if (ret != 0)
+		goto fail;
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret)
+		goto fail;
+
+	mt_set_input_mode(hdev);
+
+	return 0;
+
+fail:
+	kfree(td);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int mt_reset_resume(struct hid_device *hdev)
+{
+	mt_set_input_mode(hdev);
+	return 0;
+}
+#endif
+
+static void mt_remove(struct hid_device *hdev)
+{
+	struct mt_device *td = hid_get_drvdata(hdev);
+	hid_hw_stop(hdev);
+	kfree(td);
+	hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id mt_devices[] = {
+
+	/* Cypress panel */
+	{ .driver_data = MT_CLS_CYPRESS,
+		HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS,
+			USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+
+	/* GeneralTouch panel */
+	{ .driver_data = MT_CLS_DUAL2,
+		HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+			USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
+
+	/* PixCir-based panels */
+	{ .driver_data = MT_CLS_DUAL1,
+		HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
+			USB_DEVICE_ID_HANVON_MULTITOUCH) },
+	{ .driver_data = MT_CLS_DUAL1,
+		HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+			USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
+
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, mt_devices);
+
+static const struct hid_usage_id mt_grabbed_usages[] = {
+	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver mt_driver = {
+	.name = "hid-multitouch",
+	.id_table = mt_devices,
+	.probe = mt_probe,
+	.remove = mt_remove,
+	.input_mapping = mt_input_mapping,
+	.input_mapped = mt_input_mapped,
+	.feature_mapping = mt_feature_mapping,
+	.usage_table = mt_grabbed_usages,
+	.event = mt_event,
+#ifdef CONFIG_PM
+	.reset_resume = mt_reset_resume,
+#endif
+};
+
+static int __init mt_init(void)
+{
+	return hid_register_driver(&mt_driver);
+}
+
+static void __exit mt_exit(void)
+{
+	hid_unregister_driver(&mt_driver);
+}
+
+module_init(mt_init);
+module_exit(mt_exit);
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 4edb3be..0f20fd1 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -45,7 +45,7 @@
 	  If unsure, say Y.
 
 menu "USB HID Boot Protocol drivers"
-	depends on USB!=n && USB_HID!=y && EMBEDDED
+	depends on USB!=n && USB_HID!=y && EXPERT
 
 config USB_KBD
 	tristate "USB HIDBP Keyboard (simple Boot) support"
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 76b9a14..9a94b64 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -35,7 +35,6 @@
 	{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
-	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bdc13d2..297bc9a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -238,13 +238,13 @@
 	  will be called k8temp.
 
 config SENSORS_K10TEMP
-	tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
+	tristate "AMD Family 10h/11h/12h/14h temperature sensor"
 	depends on X86 && PCI
 	help
 	  If you say yes here you get support for the temperature
 	  sensor(s) inside your CPU. Supported are later revisions of
-	  the AMD Family 10h and all revisions of the AMD Family 11h
-	  microarchitectures.
+	  the AMD Family 10h and all revisions of the AMD Family 11h,
+	  12h (Llano), and 14h (Brazos) microarchitectures.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called k10temp.
@@ -455,13 +455,14 @@
 	  called jz4740-hwmon.
 
 config SENSORS_JC42
-	tristate "JEDEC JC42.4 compliant temperature sensors"
+	tristate "JEDEC JC42.4 compliant memory module temperature sensors"
 	depends on I2C
 	help
-	  If you say yes here you get support for Jedec JC42.4 compliant
-	  temperature sensors. Support will include, but not be limited to,
-	  ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
-	  MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3.
+	  If you say yes here, you get support for JEDEC JC42.4 compliant
+	  temperature sensors, which are used on many DDR3 memory modules for
+	  mobile devices and servers.  Support will include, but not be limited
+	  to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
+	  MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called jc42.
@@ -574,7 +575,7 @@
 	help
 	  If you say yes here you get support for National Semiconductor LM85
 	  sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
-	  EMC6D101 and EMC6D102.
+	  EMC6D101, EMC6D102, and EMC6D103.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called lm85.
@@ -618,8 +619,8 @@
 	depends on I2C
 	select HWMON_VID
 	help
-	  If you say yes here you get support for National Semiconductor LM93
-	  sensor chips.
+	  If you say yes here you get support for National Semiconductor LM93,
+	  LM94, and compatible sensor chips.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called lm93.
@@ -809,10 +810,10 @@
 	  will be called dme1737.
 
 config SENSORS_EMC1403
-	tristate "SMSC EMC1403 thermal sensor"
+	tristate "SMSC EMC1403/23 thermal sensor"
 	depends on I2C
 	help
-	  If you say yes here you get support for the SMSC EMC1403
+	  If you say yes here you get support for the SMSC EMC1403/23
 	  temperature monitoring chip.
 
 	  Threshold values can be configured using sysfs.
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822a..d46c0c7 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@
 	{ "ad7414", 0 },
 	{}
 };
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
 
 static struct i2c_driver ad7414_driver = {
 	.driver = {
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 0727ad2..9e234b9 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -20,7 +20,7 @@
  * Alarms	16-bit map of active alarms
  * Analog Out	0..1250 mV output
  *
- * Chassis Intrusion: clear CI latch with 'echo 1 > chassis_clear'
+ * Chassis Intrusion: clear CI latch with 'echo 0 > intrusion0_alarm'
  *
  * Test hardware: Intel SE440BX-2 desktop motherboard --Grant
  *
@@ -476,13 +476,16 @@
 static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout);
 
 /* chassis_clear */
-static ssize_t chassis_clear(struct device *dev,
+static ssize_t chassis_clear_legacy(struct device *dev,
 		struct device_attribute *attr,
 		const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	unsigned long val = simple_strtol(buf, NULL, 10);
 
+	dev_warn(dev, "Attribute chassis_clear is deprecated, "
+		 "use intrusion0_alarm instead\n");
+
 	if (val == 1) {
 		i2c_smbus_write_byte_data(client,
 				ADM9240_REG_CHASSIS_CLEAR, 0x80);
@@ -490,7 +493,29 @@
 	}
 	return count;
 }
-static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear);
+static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear_legacy);
+
+static ssize_t chassis_clear(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adm9240_data *data = i2c_get_clientdata(client);
+	unsigned long val;
+
+	if (strict_strtoul(buf, 10, &val) || val != 0)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+	i2c_smbus_write_byte_data(client, ADM9240_REG_CHASSIS_CLEAR, 0x80);
+	data->valid = 0;		/* Force cache refresh */
+	mutex_unlock(&data->update_lock);
+	dev_dbg(&client->dev, "chassis intrusion latch cleared\n");
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_alarm,
+		chassis_clear, 12);
 
 static struct attribute *adm9240_attributes[] = {
 	&sensor_dev_attr_in0_input.dev_attr.attr,
@@ -532,6 +557,7 @@
 	&dev_attr_alarms.attr,
 	&dev_attr_aout_output.attr,
 	&dev_attr_chassis_clear.attr,
+	&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
 	&dev_attr_cpu0_vid.attr,
 	NULL
 };
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index aac85f3..c42c5a6 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -4,7 +4,7 @@
 
 	This driver is based on the lm75 and other lm_sensors/hwmon drivers
 
-	Written by Steve Hardy <steve@linuxrealtime.co.uk>
+	Written by Steve Hardy <shardy@redhat.com>
 
 	Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads7828.pdf
 
@@ -271,7 +271,7 @@
 	i2c_del_driver(&ads7828_driver);
 }
 
-MODULE_AUTHOR("Steve Hardy <steve@linuxrealtime.co.uk>");
+MODULE_AUTHOR("Steve Hardy <shardy@redhat.com>");
 MODULE_DESCRIPTION("ADS7828 driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843..5cc3e37 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@
 	{ "adt7411", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
 
 static struct i2c_driver adt7411_driver = {
 	.driver		= {
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index ce0372f0..4c07436 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -1072,6 +1072,7 @@
 			node->sda.dev_attr.show = grp->show;
 			node->sda.dev_attr.store = grp->store;
 			attr = &node->sda.dev_attr.attr;
+			sysfs_attr_init(attr);
 			attr->name = node->name;
 			attr->mode = S_IRUGO | (grp->store ? S_IWUSR : 0);
 			ret = sysfs_create_file(&pdev->dev.kobj, attr);
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 2d68cf3..b5e8920 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -13,6 +13,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/dmi.h>
 
 #include <acpi/acpi.h>
 #include <acpi/acpixf.h>
@@ -22,6 +23,21 @@
 
 #define ATK_HID "ATK0110"
 
+static bool new_if;
+module_param(new_if, bool, 0);
+MODULE_PARM_DESC(new_if, "Override detection heuristic and force the use of the new ATK0110 interface");
+
+static const struct dmi_system_id __initconst atk_force_new_if[] = {
+	{
+		/* Old interface has broken MCH temp monitoring */
+		.ident = "Asus Sabertooth X58",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
+		}
+	},
+	{ }
+};
+
 /* Minimum time between readings, enforced in order to avoid
  * hogging the CPU.
  */
@@ -1302,7 +1318,9 @@
 	 * analysis of multiple DSDTs indicates that when both interfaces
 	 * are present the new one (GGRP/GITM) is not functional.
 	 */
-	if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle)
+	if (new_if)
+		dev_info(dev, "Overriding interface detection\n");
+	if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle && !new_if)
 		data->old_interface = true;
 	else if (data->enumerate_handle && data->read_handle &&
 			data->write_handle)
@@ -1420,6 +1438,9 @@
 		return -EBUSY;
 	}
 
+	if (dmi_check_system(atk_force_new_if))
+		new_if = true;
+
 	ret = acpi_bus_register_driver(&atk_driver);
 	if (ret)
 		pr_info("acpi_bus_register_driver failed: %d\n", ret);
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index e9a610b..d9c5927 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -77,12 +77,14 @@
  * in4   +12V
  * in5   VTR   (+3.3V stby)
  * in6   Vbat
+ * in7   Vtrip (sch5127 only)
  *
  * --------------------------------------------------------------------- */
 
-/* Voltages (in) numbered 0-6 (ix) */
-#define	DME1737_REG_IN(ix)		((ix) < 5 ? 0x20 + (ix) \
-						  : 0x94 + (ix))
+/* Voltages (in) numbered 0-7 (ix) */
+#define	DME1737_REG_IN(ix)		((ix) < 5 ? 0x20 + (ix) : \
+					 (ix) < 7 ? 0x94 + (ix) : \
+						    0x1f)
 #define	DME1737_REG_IN_MIN(ix)		((ix) < 5 ? 0x44 + (ix) * 2 \
 						  : 0x91 + (ix) * 2)
 #define	DME1737_REG_IN_MAX(ix)		((ix) < 5 ? 0x45 + (ix) * 2 \
@@ -101,10 +103,11 @@
  *    IN_TEMP_LSB(1) = [temp3, temp1]
  *    IN_TEMP_LSB(2) = [in4, temp2]
  *    IN_TEMP_LSB(3) = [in3, in0]
- *    IN_TEMP_LSB(4) = [in2, in1] */
+ *    IN_TEMP_LSB(4) = [in2, in1]
+ *    IN_TEMP_LSB(5) = [res, in7] */
 #define DME1737_REG_IN_TEMP_LSB(ix)	(0x84 + (ix))
-static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0};
-static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4};
+static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0, 5};
+static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4, 4};
 static const u8 DME1737_REG_TEMP_LSB[] = {1, 2, 1};
 static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0};
 
@@ -145,7 +148,7 @@
 #define DME1737_REG_ALARM1		0x41
 #define DME1737_REG_ALARM2		0x42
 #define DME1737_REG_ALARM3		0x83
-static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17};
+static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17, 18};
 static const u8 DME1737_BIT_ALARM_TEMP[] = {4, 5, 6};
 static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
 
@@ -190,6 +193,7 @@
 #define HAS_PWM_MIN		(1 << 4)		/* bit 4 */
 #define HAS_FAN(ix)		(1 << ((ix) + 5))	/* bits 5-10 */
 #define HAS_PWM(ix)		(1 << ((ix) + 11))	/* bits 11-16 */
+#define HAS_IN7			(1 << 17)		/* bit 17 */
 
 /* ---------------------------------------------------------------------
  * Data structures and manipulation thereof
@@ -213,9 +217,9 @@
 	u32 has_features;
 
 	/* Register values */
-	u16 in[7];
-	u8  in_min[7];
-	u8  in_max[7];
+	u16 in[8];
+	u8  in_min[8];
+	u8  in_max[8];
 	s16 temp[3];
 	s8  temp_min[3];
 	s8  temp_max[3];
@@ -247,7 +251,7 @@
 static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300,
 					 3300};
 static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300,
-					 3300};
+					 3300, 1500};
 #define IN_NOMINAL(type)	((type) == sch311x ? IN_NOMINAL_SCH311x : \
 				 (type) == sch5027 ? IN_NOMINAL_SCH5027 : \
 				 (type) == sch5127 ? IN_NOMINAL_SCH5127 : \
@@ -580,7 +584,7 @@
 {
 	struct dme1737_data *data = dev_get_drvdata(dev);
 	int ix;
-	u8 lsb[5];
+	u8 lsb[6];
 
 	mutex_lock(&data->update_lock);
 
@@ -603,6 +607,9 @@
 			/* Voltage inputs are stored as 16 bit values even
 			 * though they have only 12 bits resolution. This is
 			 * to make it consistent with the temp inputs. */
+			if (ix == 7 && !(data->has_features & HAS_IN7)) {
+				continue;
+			}
 			data->in[ix] = dme1737_read(data,
 					DME1737_REG_IN(ix)) << 8;
 			data->in_min[ix] = dme1737_read(data,
@@ -635,10 +642,16 @@
 		 * which the registers are read (MSB first, then LSB) is
 		 * important! */
 		for (ix = 0; ix < ARRAY_SIZE(lsb); ix++) {
+			if (ix == 5 && !(data->has_features & HAS_IN7)) {
+				continue;
+			}
 			lsb[ix] = dme1737_read(data,
 					DME1737_REG_IN_TEMP_LSB(ix));
 		}
 		for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
+			if (ix == 7 && !(data->has_features & HAS_IN7)) {
+				continue;
+			}
 			data->in[ix] |= (lsb[DME1737_REG_IN_LSB[ix]] <<
 					DME1737_REG_IN_LSB_SHL[ix]) & 0xf0;
 		}
@@ -762,7 +775,7 @@
 
 /* ---------------------------------------------------------------------
  * Voltage sysfs attributes
- * ix = [0-5]
+ * ix = [0-7]
  * --------------------------------------------------------------------- */
 
 #define SYS_IN_INPUT	0
@@ -1439,7 +1452,7 @@
  * Sysfs device attribute defines and structs
  * --------------------------------------------------------------------- */
 
-/* Voltages 0-6 */
+/* Voltages 0-7 */
 
 #define SENSOR_DEVICE_ATTR_IN(ix) \
 static SENSOR_DEVICE_ATTR_2(in##ix##_input, S_IRUGO, \
@@ -1458,6 +1471,7 @@
 SENSOR_DEVICE_ATTR_IN(4);
 SENSOR_DEVICE_ATTR_IN(5);
 SENSOR_DEVICE_ATTR_IN(6);
+SENSOR_DEVICE_ATTR_IN(7);
 
 /* Temperatures 1-3 */
 
@@ -1576,7 +1590,7 @@
  * created unconditionally. The attributes that need modification of their
  * permissions are created read-only and write permissions are added or removed
  * on the fly when required */
-static struct attribute *dme1737_attr[] ={
+static struct attribute *dme1737_attr[] = {
 	/* Voltages */
 	&sensor_dev_attr_in0_input.dev_attr.attr,
 	&sensor_dev_attr_in0_min.dev_attr.attr,
@@ -1681,7 +1695,7 @@
 };
 
 
-/* The following struct holds temp zone hysteresis  related attributes, which
+/* The following struct holds temp zone hysteresis related attributes, which
  * are not available in all chips. The following chips support them:
  * DME1737, SCH311x */
 static struct attribute *dme1737_zone_hyst_attr[] = {
@@ -1695,6 +1709,21 @@
 	.attrs = dme1737_zone_hyst_attr,
 };
 
+/* The following struct holds voltage in7 related attributes, which
+ * are not available in all chips. The following chips support them:
+ * SCH5127 */
+static struct attribute *dme1737_in7_attr[] = {
+	&sensor_dev_attr_in7_input.dev_attr.attr,
+	&sensor_dev_attr_in7_min.dev_attr.attr,
+	&sensor_dev_attr_in7_max.dev_attr.attr,
+	&sensor_dev_attr_in7_alarm.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group dme1737_in7_group = {
+	.attrs = dme1737_in7_attr,
+};
+
 /* The following structs hold the PWM attributes, some of which are optional.
  * Their creation depends on the chip configuration which is determined during
  * module load. */
@@ -1986,6 +2015,9 @@
 	if (data->has_features & HAS_ZONE_HYST) {
 		sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group);
 	}
+	if (data->has_features & HAS_IN7) {
+		sysfs_remove_group(&dev->kobj, &dme1737_in7_group);
+	}
 	sysfs_remove_group(&dev->kobj, &dme1737_group);
 
 	if (!data->client) {
@@ -1999,43 +2031,58 @@
 	int err, ix;
 
 	/* Create a name attribute for ISA devices */
-	if (!data->client &&
-	    (err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr))) {
-		goto exit;
+	if (!data->client) {
+		err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr);
+		if (err) {
+			goto exit;
+		}
 	}
 
 	/* Create standard sysfs attributes */
-	if ((err = sysfs_create_group(&dev->kobj, &dme1737_group))) {
+	err = sysfs_create_group(&dev->kobj, &dme1737_group);
+	if (err) {
 		goto exit_remove;
 	}
 
 	/* Create chip-dependent sysfs attributes */
-	if ((data->has_features & HAS_TEMP_OFFSET) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_temp_offset_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_TEMP_OFFSET) {
+		err = sysfs_create_group(&dev->kobj,
+					 &dme1737_temp_offset_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
-	if ((data->has_features & HAS_VID) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_vid_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_VID) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_vid_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
-	if ((data->has_features & HAS_ZONE3) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_zone3_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_ZONE3) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_zone3_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
-	if ((data->has_features & HAS_ZONE_HYST) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_zone_hyst_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_ZONE_HYST) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_zone_hyst_group);
+		if (err) {
+			goto exit_remove;
+		}
+	}
+	if (data->has_features & HAS_IN7) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_in7_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
 
 	/* Create fan sysfs attributes */
 	for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
 		if (data->has_features & HAS_FAN(ix)) {
-			if ((err = sysfs_create_group(&dev->kobj,
-						&dme1737_fan_group[ix]))) {
+			err = sysfs_create_group(&dev->kobj,
+						 &dme1737_fan_group[ix]);
+			if (err) {
 				goto exit_remove;
 			}
 		}
@@ -2044,14 +2091,17 @@
 	/* Create PWM sysfs attributes */
 	for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
 		if (data->has_features & HAS_PWM(ix)) {
-			if ((err = sysfs_create_group(&dev->kobj,
-						&dme1737_pwm_group[ix]))) {
+			err = sysfs_create_group(&dev->kobj,
+						 &dme1737_pwm_group[ix]);
+			if (err) {
 				goto exit_remove;
 			}
-			if ((data->has_features & HAS_PWM_MIN) && ix < 3 &&
-			    (err = sysfs_create_file(&dev->kobj,
-					dme1737_auto_pwm_min_attr[ix]))) {
-				goto exit_remove;
+			if ((data->has_features & HAS_PWM_MIN) && (ix < 3)) {
+				err = sysfs_create_file(&dev->kobj,
+						dme1737_auto_pwm_min_attr[ix]);
+				if (err) {
+					goto exit_remove;
+				}
 			}
 		}
 	}
@@ -2188,7 +2238,7 @@
 		data->has_features |= HAS_ZONE3;
 		break;
 	case sch5127:
-		data->has_features |= HAS_FAN(2) | HAS_PWM(2);
+		data->has_features |= HAS_FAN(2) | HAS_PWM(2) | HAS_IN7;
 		break;
 	default:
 		break;
@@ -2281,8 +2331,9 @@
 	dme1737_sio_outb(sio_cip, 0x07, 0x0a);
 
 	/* Get the base address of the runtime registers */
-	if (!(addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
-		      dme1737_sio_inb(sio_cip, 0x61))) {
+	addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+		dme1737_sio_inb(sio_cip, 0x61);
+	if (!addr) {
 		err = -ENODEV;
 		goto exit;
 	}
@@ -2363,13 +2414,15 @@
 	mutex_init(&data->update_lock);
 
 	/* Initialize the DME1737 chip */
-	if ((err = dme1737_init_device(dev))) {
+	err = dme1737_init_device(dev);
+	if (err) {
 		dev_err(dev, "Failed to initialize device.\n");
 		goto exit_kfree;
 	}
 
 	/* Create sysfs files */
-	if ((err = dme1737_create_files(dev))) {
+	err = dme1737_create_files(dev);
+	if (err) {
 		dev_err(dev, "Failed to create sysfs files.\n");
 		goto exit_kfree;
 	}
@@ -2446,8 +2499,9 @@
 	dme1737_sio_outb(sio_cip, 0x07, 0x0a);
 
 	/* Get the base address of the runtime registers */
-	if (!(base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
-			   dme1737_sio_inb(sio_cip, 0x61))) {
+	base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+		     dme1737_sio_inb(sio_cip, 0x61);
+	if (!base_addr) {
 		pr_err("Base address not set\n");
 		err = -ENODEV;
 		goto exit;
@@ -2476,18 +2530,21 @@
 	if (err)
 		goto exit;
 
-	if (!(pdev = platform_device_alloc("dme1737", addr))) {
+	pdev = platform_device_alloc("dme1737", addr);
+	if (!pdev) {
 		pr_err("Failed to allocate device\n");
 		err = -ENOMEM;
 		goto exit;
 	}
 
-	if ((err = platform_device_add_resources(pdev, &res, 1))) {
+	err = platform_device_add_resources(pdev, &res, 1);
+	if (err) {
 		pr_err("Failed to add device resource (err = %d)\n", err);
 		goto exit_device_put;
 	}
 
-	if ((err = platform_device_add(pdev))) {
+	err = platform_device_add(pdev);
+	if (err) {
 		pr_err("Failed to add device (err = %d)\n", err);
 		goto exit_device_put;
 	}
@@ -2514,11 +2571,12 @@
 		dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n",
 			(unsigned short)res->start,
 			(unsigned short)res->start + DME1737_EXTENT - 1);
-                err = -EBUSY;
-                goto exit;
-        }
+		err = -EBUSY;
+		goto exit;
+	}
 
-	if (!(data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL))) {
+	data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL);
+	if (!data) {
 		err = -ENOMEM;
 		goto exit_release_region;
 	}
@@ -2565,13 +2623,15 @@
 		 data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr);
 
 	/* Initialize the chip */
-	if ((err = dme1737_init_device(dev))) {
+	err = dme1737_init_device(dev);
+	if (err) {
 		dev_err(dev, "Failed to initialize device.\n");
 		goto exit_kfree;
 	}
 
 	/* Create sysfs files */
-	if ((err = dme1737_create_files(dev))) {
+	err = dme1737_create_files(dev);
+	if (err) {
 		dev_err(dev, "Failed to create sysfs files.\n");
 		goto exit_kfree;
 	}
@@ -2628,7 +2688,8 @@
 	int err;
 	unsigned short addr;
 
-	if ((err = i2c_add_driver(&dme1737_i2c_driver))) {
+	err = i2c_add_driver(&dme1737_i2c_driver);
+	if (err) {
 		goto exit;
 	}
 
@@ -2641,12 +2702,14 @@
 		return 0;
 	}
 
-	if ((err = platform_driver_register(&dme1737_isa_driver))) {
+	err = platform_driver_register(&dme1737_isa_driver);
+	if (err) {
 		goto exit_del_i2c_driver;
 	}
 
 	/* Sets global pdev as a side effect */
-	if ((err = dme1737_isa_device_add(addr))) {
+	err = dme1737_isa_device_add(addr);
+	if (err) {
 		goto exit_del_isa_driver;
 	}
 
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 8dee3f3..cd2a6e4 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -269,23 +269,30 @@
 			struct i2c_board_info *info)
 {
 	int id;
-	/* Check if thermal chip is SMSC and EMC1403 */
+	/* Check if thermal chip is SMSC and EMC1403 or EMC1423 */
 
 	id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG);
 	if (id != 0x5d)
 		return -ENODEV;
 
+	id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
+	switch (id) {
+	case 0x21:
+		strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+		break;
+	case 0x23:
+		strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
+		break;
 	/* Note: 0x25 is the 1404 which is very similar and this
 	   driver could be extended */
-	id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
-	if (id != 0x21)
+	default:
 		return -ENODEV;
+	}
 
 	id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
 	if (id != 0x01)
 		return -ENODEV;
 
-	strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
 	return 0;
 }
 
@@ -337,11 +344,12 @@
 }
 
 static const unsigned short emc1403_address_list[] = {
-	0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
+	0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END
 };
 
 static const struct i2c_device_id emc1403_idtable[] = {
 	{ "emc1403", 0 },
+	{ "emc1423", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 3f49dd3..6e06019 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -37,7 +37,7 @@
 #define SIO_F71858FG_LD_HWM	0x02	/* Hardware monitor logical device */
 #define SIO_F71882FG_LD_HWM	0x04	/* Hardware monitor logical device */
 #define SIO_UNLOCK_KEY		0x87	/* Key to enable Super-I/O */
-#define SIO_LOCK_KEY		0xAA	/* Key to diasble Super-I/O */
+#define SIO_LOCK_KEY		0xAA	/* Key to disable Super-I/O */
 
 #define SIO_REG_LDSEL		0x07	/* Logical device select */
 #define SIO_REG_DEVID		0x20	/* Device ID (2 bytes) */
@@ -2111,7 +2111,6 @@
 	int nr_fans = (data->type == f71882fg) ? 4 : 3;
 	u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
 
-	platform_set_drvdata(pdev, NULL);
 	if (data->hwmon_dev)
 		hwmon_device_unregister(data->hwmon_dev);
 
@@ -2178,6 +2177,7 @@
 		}
 	}
 
+	platform_set_drvdata(pdev, NULL);
 	kfree(data);
 
 	return 0;
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index d4d4ca6..aa6d8b6 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -49,7 +49,6 @@
 #include <linux/kref.h>
 
 /* Addresses to scan */
-static DEFINE_MUTEX(watchdog_mutex);
 static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
 
 /* Insmod parameters */
@@ -850,7 +849,7 @@
 
 static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-	static struct watchdog_info ident = {
+	struct watchdog_info ident = {
 		.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
 				WDIOF_CARDRESET,
 		.identity = "FSC watchdog"
@@ -858,7 +857,6 @@
 	int i, ret = 0;
 	struct fschmd_data *data = filp->private_data;
 
-	mutex_lock(&watchdog_mutex);
 	switch (cmd) {
 	case WDIOC_GETSUPPORT:
 		ident.firmware_version = data->revision;
@@ -915,7 +913,6 @@
 	default:
 		ret = -ENOTTY;
 	}
-	mutex_unlock(&watchdog_mutex);
 	return ret;
 }
 
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index a428a92..316b648 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -38,6 +38,8 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1570,26 +1572,25 @@
 	case 0xffff:	/* No device at all */
 		goto exit;
 	default:
-		pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%x)\n",
-			 chip_type);
+		pr_debug("Unsupported chip (DEVID=0x%x)\n", chip_type);
 		goto exit;
 	}
 
 	superio_select(PME);
 	if (!(superio_inb(IT87_ACT_REG) & 0x01)) {
-		pr_info("it87: Device not activated, skipping\n");
+		pr_info("Device not activated, skipping\n");
 		goto exit;
 	}
 
 	*address = superio_inw(IT87_BASE_REG) & ~(IT87_EXTENT - 1);
 	if (*address == 0) {
-		pr_info("it87: Base address not set, skipping\n");
+		pr_info("Base address not set, skipping\n");
 		goto exit;
 	}
 
 	err = 0;
 	sio_data->revision = superio_inb(DEVREV) & 0x0f;
-	pr_info("it87: Found IT%04xF chip at 0x%x, revision %d\n",
+	pr_info("Found IT%04xF chip at 0x%x, revision %d\n",
 		chip_type, *address, sio_data->revision);
 
 	/* in8 (Vbat) is always internal */
@@ -1615,7 +1616,7 @@
 		} else {
 			/* We need at least 4 VID pins */
 			if (reg & 0x0f) {
-				pr_info("it87: VID is disabled (pins used for GPIO)\n");
+				pr_info("VID is disabled (pins used for GPIO)\n");
 				sio_data->skip_vid = 1;
 			}
 		}
@@ -1651,7 +1652,7 @@
 		if (sio_data->type == it8720 && !(reg & (1 << 1))) {
 			reg |= (1 << 1);
 			superio_outb(IT87_SIO_PINX2_REG, reg);
-			pr_notice("it87: Routing internal VCCH to in7\n");
+			pr_notice("Routing internal VCCH to in7\n");
 		}
 		if (reg & (1 << 0))
 			sio_data->internal |= (1 << 0);
@@ -1661,7 +1662,7 @@
 		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
 	}
 	if (sio_data->beep_pin)
-		pr_info("it87: Beeping is supported\n");
+		pr_info("Beeping is supported\n");
 
 	/* Disable specific features based on DMI strings */
 	board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
@@ -1675,8 +1676,7 @@
 			   the PWM2 duty cycle, so we disable it.
 			   I use the board name string as the trigger in case
 			   the same board is ever used in other systems. */
-			pr_info("it87: Disabling pwm2 due to "
-				"hardware constraints\n");
+			pr_info("Disabling pwm2 due to hardware constraints\n");
 			sio_data->skip_pwm = (1 << 1);
 		}
 	}
@@ -2189,28 +2189,26 @@
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(pdev, sio_data,
 				       sizeof(struct it87_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 340fc78..9349912 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -53,6 +53,8 @@
 
 /* Configuration register defines */
 #define JC42_CFG_CRIT_ONLY	(1 << 2)
+#define JC42_CFG_TCRIT_LOCK	(1 << 6)
+#define JC42_CFG_EVENT_LOCK	(1 << 7)
 #define JC42_CFG_SHUTDOWN	(1 << 8)
 #define JC42_CFG_HYST_SHIFT	9
 #define JC42_CFG_HYST_MASK	0x03
@@ -332,7 +334,7 @@
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct jc42_data *data = i2c_get_clientdata(client);
-	long val;
+	unsigned long val;
 	int diff, hyst;
 	int err;
 	int ret = count;
@@ -380,14 +382,14 @@
 
 static DEVICE_ATTR(temp1_input, S_IRUGO,
 		   show_temp_input, NULL);
-static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit, S_IRUGO,
 		   show_temp_crit, set_temp_crit);
-static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_min, S_IRUGO,
 		   show_temp_min, set_temp_min);
-static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_max, S_IRUGO,
 		   show_temp_max, set_temp_max);
 
-static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
 		   show_temp_crit_hyst, set_temp_crit_hyst);
 static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
 		   show_temp_max_hyst, NULL);
@@ -412,8 +414,31 @@
 	NULL
 };
 
+static mode_t jc42_attribute_mode(struct kobject *kobj,
+				  struct attribute *attr, int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct jc42_data *data = i2c_get_clientdata(client);
+	unsigned int config = data->config;
+	bool readonly;
+
+	if (attr == &dev_attr_temp1_crit.attr)
+		readonly = config & JC42_CFG_TCRIT_LOCK;
+	else if (attr == &dev_attr_temp1_min.attr ||
+		 attr == &dev_attr_temp1_max.attr)
+		readonly = config & JC42_CFG_EVENT_LOCK;
+	else if (attr == &dev_attr_temp1_crit_hyst.attr)
+		readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
+	else
+		readonly = true;
+
+	return S_IRUGO | (readonly ? 0 : S_IWUSR);
+}
+
 static const struct attribute_group jc42_group = {
 	.attrs = jc42_attributes,
+	.is_visible = jc42_attribute_mode,
 };
 
 /* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index da5a240..82bf65a 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
 /*
- * k10temp.c - AMD Family 10h/11h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
  *
  * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
  *
@@ -25,7 +25,7 @@
 #include <linux/pci.h>
 #include <asm/processor.h>
 
-MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
+MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_LICENSE("GPL");
 
@@ -208,6 +208,7 @@
 static const struct pci_device_id k10temp_id_table[] = {
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 	{}
 };
 MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 1b674b7..d805e8e 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -957,7 +957,7 @@
 
 	/* bail if we did not get an IRQ from the bus layer */
 	if (!dev->irq) {
-		pr_err("No IRQ. Disabling /dev/freefall\n");
+		pr_debug("No IRQ. Disabling /dev/freefall\n");
 		goto out;
 	}
 
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 776aeb3..508cb29 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -98,6 +98,9 @@
  * value, it uses signed 8-bit values with LSB = 1 degree Celsius.
  * For remote temperature, low and high limits, it uses signed 11-bit values
  * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
+ * For LM64 the actual remote diode temperature is 16 degree Celsius higher
+ * than the register reading. Remote temperature setpoints have to be
+ * adapted accordingly.
  */
 
 #define FAN_FROM_REG(reg)	((reg) == 0xFFFC || (reg) == 0 ? 0 : \
@@ -165,6 +168,8 @@
 	struct mutex update_lock;
 	char valid; /* zero until following fields are valid */
 	unsigned long last_updated; /* in jiffies */
+	int kind;
+	int temp2_offset;
 
 	/* registers values */
 	u8 config, config_fan;
@@ -247,16 +252,34 @@
 	return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
 }
 
-static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
-			  char *buf)
+/*
+ * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
+ * For remote sensor registers temp2_offset has to be considered,
+ * for local sensor it must not.
+ * So we need separate 8bit accessors for local and remote sensor.
+ */
+static ssize_t show_local_temp8(struct device *dev,
+				struct device_attribute *devattr,
+				char *buf)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 	struct lm63_data *data = lm63_update_device(dev);
 	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
 }
 
-static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy,
-			 const char *buf, size_t count)
+static ssize_t show_remote_temp8(struct device *dev,
+				 struct device_attribute *devattr,
+				 char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct lm63_data *data = lm63_update_device(dev);
+	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
+		       + data->temp2_offset);
+}
+
+static ssize_t set_local_temp8(struct device *dev,
+			       struct device_attribute *dummy,
+			       const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct lm63_data *data = i2c_get_clientdata(client);
@@ -274,7 +297,8 @@
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 	struct lm63_data *data = lm63_update_device(dev);
-	return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]));
+	return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
+		       + data->temp2_offset);
 }
 
 static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
@@ -294,7 +318,7 @@
 	int nr = attr->index;
 
 	mutex_lock(&data->update_lock);
-	data->temp11[nr] = TEMP11_TO_REG(val);
+	data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
 	i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
 				  data->temp11[nr] >> 8);
 	i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -310,6 +334,7 @@
 {
 	struct lm63_data *data = lm63_update_device(dev);
 	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
+		       + data->temp2_offset
 		       - TEMP8_FROM_REG(data->temp2_crit_hyst));
 }
 
@@ -324,7 +349,7 @@
 	long hyst;
 
 	mutex_lock(&data->update_lock);
-	hyst = TEMP8_FROM_REG(data->temp8[2]) - val;
+	hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
 	i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
 				  HYST_TO_REG(hyst));
 	mutex_unlock(&data->update_lock);
@@ -355,16 +380,21 @@
 static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
 static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
 
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 1);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
+	set_local_temp8, 1);
 
 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
 	set_temp11, 1);
 static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
 	set_temp11, 2);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2);
+/*
+ * On LM63, temp2_crit can be set only once, which should be job
+ * of the bootloader.
+ */
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
+	NULL, 2);
 static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
 	set_temp2_crit_hyst);
 
@@ -479,7 +509,12 @@
 	data->valid = 0;
 	mutex_init(&data->update_lock);
 
-	/* Initialize the LM63 chip */
+	/* Set the device type */
+	data->kind = id->driver_data;
+	if (data->kind == lm64)
+		data->temp2_offset = 16000;
+
+	/* Initialize chip */
 	lm63_init_client(new_client);
 
 	/* Register sysfs hooks */
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 72ff2c4..4cb24ea 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -19,6 +19,8 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -858,7 +860,7 @@
 	 * individually for the probing phase. */
 	for (port = address; port < address + LM78_EXTENT; port++) {
 		if (!request_region(port, 1, "lm78")) {
-			pr_debug("lm78: Failed to request port 0x%x\n", port);
+			pr_debug("Failed to request port 0x%x\n", port);
 			goto release;
 		}
 	}
@@ -920,7 +922,7 @@
 		found = 1;
 
 	if (found)
-		pr_info("lm78: Found an %s chip at %#x\n",
+		pr_info("Found an %s chip at %#x\n",
 			val & 0x80 ? "LM79" : "LM78", (int)address);
 
  release:
@@ -942,21 +944,19 @@
 	pdev = platform_device_alloc("lm78", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "lm78: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR "lm78: Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "lm78: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 1e22984..d2cc286 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@
 enum chips {
 	any_chip, lm85b, lm85c,
 	adm1027, adt7463, adt7468,
-	emc6d100, emc6d102
+	emc6d100, emc6d102, emc6d103
 };
 
 /* The LM85 registers */
@@ -90,6 +90,9 @@
 #define	LM85_VERSTEP_EMC6D100_A0        0x60
 #define	LM85_VERSTEP_EMC6D100_A1        0x61
 #define	LM85_VERSTEP_EMC6D102		0x65
+#define	LM85_VERSTEP_EMC6D103_A0	0x68
+#define	LM85_VERSTEP_EMC6D103_A1	0x69
+#define	LM85_VERSTEP_EMC6D103S		0x6A	/* Also known as EMC6D103:A2 */
 
 #define	LM85_REG_CONFIG			0x40
 
@@ -348,6 +351,7 @@
 	{ "emc6d100", emc6d100 },
 	{ "emc6d101", emc6d100 },
 	{ "emc6d102", emc6d102 },
+	{ "emc6d103", emc6d103 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1250,6 +1254,20 @@
 		case LM85_VERSTEP_EMC6D102:
 			type_name = "emc6d102";
 			break;
+		case LM85_VERSTEP_EMC6D103_A0:
+		case LM85_VERSTEP_EMC6D103_A1:
+			type_name = "emc6d103";
+			break;
+		/*
+		 * Registers apparently missing in EMC6D103S/EMC6D103:A2
+		 * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
+		 * (according to the data sheets), but used unconditionally
+		 * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
+		 * So skip EMC6D103S for now.
+		case LM85_VERSTEP_EMC6D103S:
+			type_name = "emc6d103s";
+			break;
+		 */
 		}
 	} else {
 		dev_dbg(&adapter->dev,
@@ -1283,6 +1301,7 @@
 	case adt7468:
 	case emc6d100:
 	case emc6d102:
+	case emc6d103:
 		data->freq_map = adm1027_freq_map;
 		break;
 	default:
@@ -1468,7 +1487,7 @@
 			/* More alarm bits */
 			data->alarms |= lm85_read_value(client,
 						EMC6D100_REG_ALARM3) << 16;
-		} else if (data->type == emc6d102) {
+		} else if (data->type == emc6d102 || data->type == emc6d103) {
 			/* Have to read LSB bits after the MSB ones because
 			   the reading of the MSB bits has frozen the
 			   LSBs (backward from the ADM1027).
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index c9ed14e..3b43df4 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -135,6 +135,11 @@
 #define LM93_MFR_ID		0x73
 #define LM93_MFR_ID_PROTOTYPE	0x72
 
+/* LM94 REGISTER VALUES */
+#define LM94_MFR_ID_2		0x7a
+#define LM94_MFR_ID		0x79
+#define LM94_MFR_ID_PROTOTYPE	0x78
+
 /* SMBus capabilities */
 #define LM93_SMBUS_FUNC_FULL (I2C_FUNC_SMBUS_BYTE_DATA | \
 		I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA)
@@ -2504,6 +2509,7 @@
 {
 	struct i2c_adapter *adapter = client->adapter;
 	int mfr, ver;
+	const char *name;
 
 	if (!i2c_check_functionality(adapter, LM93_SMBUS_FUNC_MIN))
 		return -ENODEV;
@@ -2517,13 +2523,23 @@
 	}
 
 	ver = lm93_read_byte(client, LM93_REG_VER);
-	if (ver != LM93_MFR_ID && ver != LM93_MFR_ID_PROTOTYPE) {
+	switch (ver) {
+	case LM93_MFR_ID:
+	case LM93_MFR_ID_PROTOTYPE:
+		name = "lm93";
+		break;
+	case LM94_MFR_ID_2:
+	case LM94_MFR_ID:
+	case LM94_MFR_ID_PROTOTYPE:
+		name = "lm94";
+		break;
+	default:
 		dev_dbg(&adapter->dev,
 			"detect failed, bad version id 0x%02x!\n", ver);
 		return -ENODEV;
 	}
 
-	strlcpy(info->type, "lm93", I2C_NAME_SIZE);
+	strlcpy(info->type, name, I2C_NAME_SIZE);
 	dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n",
 		client->name, i2c_adapter_id(client->adapter),
 		client->addr);
@@ -2602,6 +2618,7 @@
 
 static const struct i2c_device_id lm93_id[] = {
 	{ "lm93", 0 },
+	{ "lm94", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, lm93_id);
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 68e69a4..3d99b88 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -33,6 +33,8 @@
  *  the standard Super-I/O addresses is used (0x2E/0x2F or 0x4E/0x4F).
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1031,16 +1033,15 @@
 
 		val = superio_inb(sioaddr, ACT);
 		if (!(val & 0x01)) {
-			printk(KERN_INFO "pc87360: Device 0x%02x not "
-			       "activated\n", logdev[i]);
+			pr_info("Device 0x%02x not activated\n", logdev[i]);
 			continue;
 		}
 
 		val = (superio_inb(sioaddr, BASE) << 8)
 		    | superio_inb(sioaddr, BASE + 1);
 		if (!val) {
-			printk(KERN_INFO "pc87360: Base address not set for "
-			       "device 0x%02x\n", logdev[i]);
+			pr_info("Base address not set for device 0x%02x\n",
+				logdev[i]);
 			continue;
 		}
 
@@ -1050,17 +1051,15 @@
 			confreg[0] = superio_inb(sioaddr, 0xF0);
 			confreg[1] = superio_inb(sioaddr, 0xF1);
 
-#ifdef DEBUG
-			printk(KERN_DEBUG "pc87360: Fan 1: mon=%d "
-			       "ctrl=%d inv=%d\n", (confreg[0]>>2)&1,
-			       (confreg[0]>>3)&1, (confreg[0]>>4)&1);
-			printk(KERN_DEBUG "pc87360: Fan 2: mon=%d "
-			       "ctrl=%d inv=%d\n", (confreg[0]>>5)&1,
-			       (confreg[0]>>6)&1, (confreg[0]>>7)&1);
-			printk(KERN_DEBUG "pc87360: Fan 3: mon=%d "
-			       "ctrl=%d inv=%d\n", confreg[1]&1,
-			       (confreg[1]>>1)&1, (confreg[1]>>2)&1);
-#endif
+			pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 1,
+				 (confreg[0] >> 2) & 1, (confreg[0] >> 3) & 1,
+				 (confreg[0] >> 4) & 1);
+			pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 2,
+				 (confreg[0] >> 5) & 1, (confreg[0] >> 6) & 1,
+				 (confreg[0] >> 7) & 1);
+			pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 3,
+				 confreg[1] & 1, (confreg[1] >> 1) & 1,
+				 (confreg[1] >> 2) & 1);
 		} else if (i==1) { /* Voltages */
 			/* Are we using thermistors? */
 			if (*devid == 0xE9) { /* PC87366 */
@@ -1071,14 +1070,12 @@
 				confreg[3] = superio_inb(sioaddr, 0x25);
 
 				if (confreg[2] & 0x40) {
-					printk(KERN_INFO "pc87360: Using "
-					       "thermistors for temperature "
-					       "monitoring\n");
+					pr_info("Using thermistors for "
+						"temperature monitoring\n");
 				}
 				if (confreg[3] & 0xE0) {
-					printk(KERN_INFO "pc87360: VID "
-					       "inputs routed (mode %u)\n",
-					       confreg[3] >> 5);
+					pr_info("VID inputs routed (mode %u)\n",
+						confreg[3] >> 5);
 				}
 			}
 		}
@@ -1616,7 +1613,7 @@
 	pdev = platform_device_alloc("pc87360", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "pc87360: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
@@ -1639,15 +1636,13 @@
 
 	err = platform_device_add_resources(pdev, res, res_count);
 	if (err) {
-		printk(KERN_ERR "pc87360: Device resources addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resources addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "pc87360: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
@@ -1666,8 +1661,7 @@
 
 	if (pc87360_find(0x2e, &devid, extra_isa)
 	 && pc87360_find(0x4e, &devid, extra_isa)) {
-		printk(KERN_WARNING "pc87360: PC8736x not detected, "
-		       "module not inserted.\n");
+		pr_warn("PC8736x not detected, module not inserted\n");
 		return -ENODEV;
 	}
 
@@ -1680,8 +1674,7 @@
 	}
 
 	if (address == 0x0000) {
-		printk(KERN_WARNING "pc87360: No active logical device, "
-		       "module not inserted.\n");
+		pr_warn("No active logical device, module not inserted\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 9ec4daa..8da2181 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -22,6 +22,8 @@
  *  mode, and voltages aren't supported at all.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1077,7 +1079,7 @@
 	data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL);
 	if (!data) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Out of memory\n");
+		pr_err("Out of memory\n");
 		goto exit;
 	}
 
@@ -1196,28 +1198,26 @@
 	pdev = platform_device_alloc(DRVNAME, res[0].start);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, res, res_count);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(pdev, sio_data,
 				       sizeof(struct pc87427_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
@@ -1249,23 +1249,23 @@
 
 		val = superio_inb(sioaddr, SIOREG_ACT);
 		if (!(val & 0x01)) {
-			printk(KERN_INFO DRVNAME ": Logical device 0x%02x "
-			       "not activated\n", logdev[i]);
+			pr_info("Logical device 0x%02x not activated\n",
+				logdev[i]);
 			continue;
 		}
 
 		val = superio_inb(sioaddr, SIOREG_MAP);
 		if (val & 0x01) {
-			printk(KERN_WARNING DRVNAME ": Logical device 0x%02x "
-			       "is memory-mapped, can't use\n", logdev[i]);
+			pr_warn("Logical device 0x%02x is memory-mapped, "
+				"can't use\n", logdev[i]);
 			continue;
 		}
 
 		val = (superio_inb(sioaddr, SIOREG_IOBASE) << 8)
 		    | superio_inb(sioaddr, SIOREG_IOBASE + 1);
 		if (!val) {
-			printk(KERN_INFO DRVNAME ": I/O base address not set "
-			       "for logical device 0x%02x\n", logdev[i]);
+			pr_info("I/O base address not set for logical device "
+				"0x%02x\n", logdev[i]);
 			continue;
 		}
 		sio_data->address[i] = val;
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 13e8d21..25e9166 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -689,6 +689,13 @@
 	return 0;
 }
 
+static void via686a_update_fan_div(struct via686a_data *data)
+{
+	int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
+	data->fan_div[0] = (reg >> 4) & 0x03;
+	data->fan_div[1] = reg >> 6;
+}
+
 static void __devinit via686a_init_device(struct via686a_data *data)
 {
 	u8 reg;
@@ -702,6 +709,9 @@
 	via686a_write_value(data, VIA686A_REG_TEMP_MODE,
 			    (reg & ~VIA686A_TEMP_MODE_MASK)
 			    | VIA686A_TEMP_MODE_CONTINUOUS);
+
+	/* Pre-read fan clock divisor values */
+	via686a_update_fan_div(data);
 }
 
 static struct via686a_data *via686a_update_device(struct device *dev)
@@ -753,9 +763,7 @@
 		    (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
 		     0xc0) >> 6;
 
-		i = via686a_read_value(data, VIA686A_REG_FANDIV);
-		data->fan_div[0] = (i >> 4) & 0x03;
-		data->fan_div[1] = i >> 6;
+		via686a_update_fan_div(data);
 		data->alarms =
 		    via686a_read_value(data,
 				       VIA686A_REG_ALARM1) |
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index c84b9b4..eed43a0 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -33,6 +33,8 @@
 
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1798,8 +1800,7 @@
 	 * individually for the probing phase. */
 	for (port = address; port < address + W83781D_EXTENT; port++) {
 		if (!request_region(port, 1, "w83781d")) {
-			pr_debug("w83781d: Failed to request port 0x%x\n",
-				 port);
+			pr_debug("Failed to request port 0x%x\n", port);
 			goto release;
 		}
 	}
@@ -1811,7 +1812,7 @@
 	if (inb_p(address + 2) != val
 	 || inb_p(address + 3) != val
 	 || inb_p(address + 7) != val) {
-		pr_debug("w83781d: Detection failed at step 1\n");
+		pr_debug("Detection failed at step %d\n", 1);
 		goto release;
 	}
 #undef REALLY_SLOW_IO
@@ -1820,14 +1821,14 @@
 	   MSB (busy flag) should be clear initially, set after the write. */
 	save = inb_p(address + W83781D_ADDR_REG_OFFSET);
 	if (save & 0x80) {
-		pr_debug("w83781d: Detection failed at step 2\n");
+		pr_debug("Detection failed at step %d\n", 2);
 		goto release;
 	}
 	val = ~save & 0x7f;
 	outb_p(val, address + W83781D_ADDR_REG_OFFSET);
 	if (inb_p(address + W83781D_ADDR_REG_OFFSET) != (val | 0x80)) {
 		outb_p(save, address + W83781D_ADDR_REG_OFFSET);
-		pr_debug("w83781d: Detection failed at step 3\n");
+		pr_debug("Detection failed at step %d\n", 3);
 		goto release;
 	}
 
@@ -1835,7 +1836,7 @@
 	outb_p(W83781D_REG_CONFIG, address + W83781D_ADDR_REG_OFFSET);
 	val = inb_p(address + W83781D_DATA_REG_OFFSET);
 	if (val & 0x80) {
-		pr_debug("w83781d: Detection failed at step 4\n");
+		pr_debug("Detection failed at step %d\n", 4);
 		goto release;
 	}
 	outb_p(W83781D_REG_BANK, address + W83781D_ADDR_REG_OFFSET);
@@ -1844,19 +1845,19 @@
 	val = inb_p(address + W83781D_DATA_REG_OFFSET);
 	if ((!(save & 0x80) && (val != 0xa3))
 	 || ((save & 0x80) && (val != 0x5c))) {
-		pr_debug("w83781d: Detection failed at step 5\n");
+		pr_debug("Detection failed at step %d\n", 5);
 		goto release;
 	}
 	outb_p(W83781D_REG_I2C_ADDR, address + W83781D_ADDR_REG_OFFSET);
 	val = inb_p(address + W83781D_DATA_REG_OFFSET);
 	if (val < 0x03 || val > 0x77) {	/* Not a valid I2C address */
-		pr_debug("w83781d: Detection failed at step 6\n");
+		pr_debug("Detection failed at step %d\n", 6);
 		goto release;
 	}
 
 	/* The busy flag should be clear again */
 	if (inb_p(address + W83781D_ADDR_REG_OFFSET) & 0x80) {
-		pr_debug("w83781d: Detection failed at step 7\n");
+		pr_debug("Detection failed at step %d\n", 7);
 		goto release;
 	}
 
@@ -1871,7 +1872,7 @@
 		found = 1;
 
 	if (found)
-		pr_info("w83781d: Found a %s chip at %#x\n",
+		pr_info("Found a %s chip at %#x\n",
 			val == 0x30 ? "W83782D" : "W83781D", (int)address);
 
  release:
@@ -1894,21 +1895,19 @@
 	pdev = platform_device_alloc("w83781d", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "w83781d: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR "w83781d: Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "w83781d: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 679718e..63841f8 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -691,7 +691,7 @@
 }
 
 static ssize_t
-show_regs_chassis(struct device *dev, struct device_attribute *attr,
+show_chassis(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
 	struct w83792d_data *data = w83792d_update_device(dev);
@@ -699,6 +699,16 @@
 }
 
 static ssize_t
+show_regs_chassis(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	dev_warn(dev,
+		 "Attribute %s is deprecated, use intrusion0_alarm instead\n",
+		 "chassis");
+	return show_chassis(dev, attr, buf);
+}
+
+static ssize_t
 show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct w83792d_data *data = w83792d_update_device(dev);
@@ -706,7 +716,7 @@
 }
 
 static ssize_t
-store_chassis_clear(struct device *dev, struct device_attribute *attr,
+store_chassis_clear_legacy(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
@@ -714,6 +724,10 @@
 	u32 val;
 	u8 temp1 = 0, temp2 = 0;
 
+	dev_warn(dev,
+		 "Attribute %s is deprecated, use intrusion0_alarm instead\n",
+		 "chassis_clear");
+
 	val = simple_strtoul(buf, NULL, 10);
 	mutex_lock(&data->update_lock);
 	data->chassis_clear = SENSORS_LIMIT(val, 0 ,1);
@@ -726,6 +740,27 @@
 	return count;
 }
 
+static ssize_t
+store_chassis_clear(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct w83792d_data *data = i2c_get_clientdata(client);
+	unsigned long val;
+	u8 reg;
+
+	if (strict_strtoul(buf, 10, &val) || val != 0)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+	reg = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR);
+	w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, reg | 0x80);
+	data->valid = 0;		/* Force cache refresh */
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
 /* For Smart Fan I / Thermal Cruise */
 static ssize_t
 show_thermal_cruise(struct device *dev, struct device_attribute *attr,
@@ -1012,7 +1047,9 @@
 static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 23);
 static DEVICE_ATTR(chassis, S_IRUGO, show_regs_chassis, NULL);
 static DEVICE_ATTR(chassis_clear, S_IRUGO | S_IWUSR,
-			show_chassis_clear, store_chassis_clear);
+			show_chassis_clear, store_chassis_clear_legacy);
+static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
+			show_chassis, store_chassis_clear);
 static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2);
@@ -1214,6 +1251,7 @@
 	&dev_attr_alarms.attr,
 	&dev_attr_chassis.attr,
 	&dev_attr_chassis_clear.attr,
+	&dev_attr_intrusion0_alarm.attr,
 	&sensor_dev_attr_tolerance1.dev_attr.attr,
 	&sensor_dev_attr_thermal_cruise1.dev_attr.attr,
 	&sensor_dev_attr_tolerance2.dev_attr.attr,
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 8e540ad..e3bdedf 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -51,7 +51,6 @@
 #define WATCHDOG_TIMEOUT 2	/* 2 minute default timeout */
 
 /* Addresses to scan */
-static DEFINE_MUTEX(watchdog_mutex);
 static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
 						I2C_CLIENT_END };
 
@@ -421,18 +420,43 @@
 
 /* Write any value to clear chassis alarm */
 static ssize_t
+store_chassis_clear_legacy(struct device *dev,
+			   struct device_attribute *attr, const char *buf,
+			   size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct w83793_data *data = i2c_get_clientdata(client);
+	u8 val;
+
+	dev_warn(dev, "Attribute chassis is deprecated, "
+		 "use intrusion0_alarm instead\n");
+
+	mutex_lock(&data->update_lock);
+	val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
+	val |= 0x80;
+	w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+/* Write 0 to clear chassis alarm */
+static ssize_t
 store_chassis_clear(struct device *dev,
 		    struct device_attribute *attr, const char *buf,
 		    size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct w83793_data *data = i2c_get_clientdata(client);
-	u8 val;
+	unsigned long val;
+	u8 reg;
+
+	if (strict_strtoul(buf, 10, &val) || val != 0)
+		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
-	val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
-	val |= 0x80;
-	w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
+	reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
+	w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80);
+	data->valid = 0;		/* Force cache refresh */
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -1102,6 +1126,8 @@
 
 static struct sensor_device_attribute_2 sda_single_files[] = {
 	SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
+		      store_chassis_clear_legacy, ALARM_STATUS, 30),
+	SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
 		      store_chassis_clear, ALARM_STATUS, 30),
 	SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
 		      store_beep_enable, NOT_USED, NOT_USED),
@@ -1323,7 +1349,7 @@
 static long watchdog_ioctl(struct file *filp, unsigned int cmd,
 			   unsigned long arg)
 {
-	static struct watchdog_info ident = {
+	struct watchdog_info ident = {
 		.options = WDIOF_KEEPALIVEPING |
 			   WDIOF_SETTIMEOUT |
 			   WDIOF_CARDRESET,
@@ -1333,7 +1359,6 @@
 	int val, ret = 0;
 	struct w83793_data *data = filp->private_data;
 
-	mutex_lock(&watchdog_mutex);
 	switch (cmd) {
 	case WDIOC_GETSUPPORT:
 		if (!nowayout)
@@ -1387,7 +1412,6 @@
 	default:
 		ret = -ENOTTY;
 	}
-	mutex_unlock(&watchdog_mutex);
 	return ret;
 }
 
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index cdbc744..845232d 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -458,6 +458,7 @@
 {
 	struct w83795_data *data = i2c_get_clientdata(client);
 	int i, limit;
+	u8 lsb;
 
 	/* Read the voltage limits */
 	for (i = 0; i < ARRAY_SIZE(data->in); i++) {
@@ -479,9 +480,8 @@
 	}
 
 	/* Read the fan limits */
+	lsb = 0; /* Silent false gcc warning */
 	for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
-		u8 lsb;
-
 		/* Each register contains LSB for 2 fans, but we want to
 		 * read it only once to save time */
 		if ((i & 1) == 0 && (data->has_fan & (3 << i)))
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3a6321c..113505a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -638,6 +638,14 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called xilinx_i2c.
 
+config I2C_EG20T
+        tristate "PCH I2C of Intel EG20T"
+        depends on PCI
+        help
+          This driver is for PCH(Platform controller Hub) I2C of EG20T which
+          is an IOH(Input/Output Hub) for x86 embedded processor.
+          This driver can access PCH I2C bus device.
+
 comment "External I2C/SMBus adapter drivers"
 
 config I2C_PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 84cb16a..9d2d0ec 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -61,6 +61,7 @@
 obj-$(CONFIG_I2C_VERSATILE)	+= i2c-versatile.o
 obj-$(CONFIG_I2C_OCTEON)	+= i2c-octeon.o
 obj-$(CONFIG_I2C_XILINX)	+= i2c-xiic.o
+obj-$(CONFIG_I2C_EG20T)         += i2c-eg20t.o
 
 # External I2C/SMBus adapter drivers
 obj-$(CONFIG_I2C_PARPORT)	+= i2c-parport.o
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index fb26e5c..52b545a 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -20,6 +20,7 @@
 #include <linux/completion.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/delay.h>
 
 #include <asm/blackfin.h>
 #include <asm/portmux.h>
@@ -159,6 +160,27 @@
 		if (mast_stat & BUFWRERR)
 			dev_dbg(&iface->adap.dev, "Buffer Write Error\n");
 
+		/* Faulty slave devices, may drive SDA low after a transfer
+		 * finishes. To release the bus this code generates up to 9
+		 * extra clocks until SDA is released.
+		 */
+
+		if (read_MASTER_STAT(iface) & SDASEN) {
+			int cnt = 9;
+			do {
+				write_MASTER_CTL(iface, SCLOVR);
+				udelay(6);
+				write_MASTER_CTL(iface, 0);
+				udelay(6);
+			} while ((read_MASTER_STAT(iface) & SDASEN) && cnt--);
+
+			write_MASTER_CTL(iface, SDAOVR | SCLOVR);
+			udelay(6);
+			write_MASTER_CTL(iface, SDAOVR);
+			udelay(6);
+			write_MASTER_CTL(iface, 0);
+		}
+
 		/* If it is a quick transfer, only address without data,
 		 * not an err, return 1.
 		 */
@@ -760,7 +782,7 @@
 	platform_driver_unregister(&i2c_bfin_twi_driver);
 }
 
-module_init(i2c_bfin_twi_init);
+subsys_initcall(i2c_bfin_twi_init);
 module_exit(i2c_bfin_twi_exit);
 
 MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
new file mode 100644
index 0000000..50ea1f4
--- /dev/null
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -0,0 +1,901 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+
+#define PCH_EVENT_SET	0	/* I2C Interrupt Event Set Status */
+#define PCH_EVENT_NONE	1	/* I2C Interrupt Event Clear Status */
+#define PCH_MAX_CLK		100000	/* Maximum Clock speed in MHz */
+#define PCH_BUFFER_MODE_ENABLE	0x0002	/* flag for Buffer mode enable */
+#define PCH_EEPROM_SW_RST_MODE_ENABLE	0x0008	/* EEPROM SW RST enable flag */
+
+#define PCH_I2CSADR	0x00	/* I2C slave address register */
+#define PCH_I2CCTL	0x04	/* I2C control register */
+#define PCH_I2CSR	0x08	/* I2C status register */
+#define PCH_I2CDR	0x0C	/* I2C data register */
+#define PCH_I2CMON	0x10	/* I2C bus monitor register */
+#define PCH_I2CBC	0x14	/* I2C bus transfer rate setup counter */
+#define PCH_I2CMOD	0x18	/* I2C mode register */
+#define PCH_I2CBUFSLV	0x1C	/* I2C buffer mode slave address register */
+#define PCH_I2CBUFSUB	0x20	/* I2C buffer mode subaddress register */
+#define PCH_I2CBUFFOR	0x24	/* I2C buffer mode format register */
+#define PCH_I2CBUFCTL	0x28	/* I2C buffer mode control register */
+#define PCH_I2CBUFMSK	0x2C	/* I2C buffer mode interrupt mask register */
+#define PCH_I2CBUFSTA	0x30	/* I2C buffer mode status register */
+#define PCH_I2CBUFLEV	0x34	/* I2C buffer mode level register */
+#define PCH_I2CESRFOR	0x38	/* EEPROM software reset mode format register */
+#define PCH_I2CESRCTL	0x3C	/* EEPROM software reset mode ctrl register */
+#define PCH_I2CESRMSK	0x40	/* EEPROM software reset mode */
+#define PCH_I2CESRSTA	0x44	/* EEPROM software reset mode status register */
+#define PCH_I2CTMR	0x48	/* I2C timer register */
+#define PCH_I2CSRST	0xFC	/* I2C reset register */
+#define PCH_I2CNF	0xF8	/* I2C noise filter register */
+
+#define BUS_IDLE_TIMEOUT	20
+#define PCH_I2CCTL_I2CMEN	0x0080
+#define TEN_BIT_ADDR_DEFAULT	0xF000
+#define TEN_BIT_ADDR_MASK	0xF0
+#define PCH_START		0x0020
+#define PCH_ESR_START		0x0001
+#define PCH_BUFF_START		0x1
+#define PCH_REPSTART		0x0004
+#define PCH_ACK			0x0008
+#define PCH_GETACK		0x0001
+#define CLR_REG			0x0
+#define I2C_RD			0x1
+#define I2CMCF_BIT		0x0080
+#define I2CMIF_BIT		0x0002
+#define I2CMAL_BIT		0x0010
+#define I2CBMFI_BIT		0x0001
+#define I2CBMAL_BIT		0x0002
+#define I2CBMNA_BIT		0x0004
+#define I2CBMTO_BIT		0x0008
+#define I2CBMIS_BIT		0x0010
+#define I2CESRFI_BIT		0X0001
+#define I2CESRTO_BIT		0x0002
+#define I2CESRFIIE_BIT		0x1
+#define I2CESRTOIE_BIT		0x2
+#define I2CBMDZ_BIT		0x0040
+#define I2CBMAG_BIT		0x0020
+#define I2CMBB_BIT		0x0020
+#define BUFFER_MODE_MASK	(I2CBMFI_BIT | I2CBMAL_BIT | I2CBMNA_BIT | \
+				I2CBMTO_BIT | I2CBMIS_BIT)
+#define I2C_ADDR_MSK		0xFF
+#define I2C_MSB_2B_MSK		0x300
+#define FAST_MODE_CLK		400
+#define FAST_MODE_EN		0x0001
+#define SUB_ADDR_LEN_MAX	4
+#define BUF_LEN_MAX		32
+#define PCH_BUFFER_MODE		0x1
+#define EEPROM_SW_RST_MODE	0x0002
+#define NORMAL_INTR_ENBL	0x0300
+#define EEPROM_RST_INTR_ENBL	(I2CESRFIIE_BIT | I2CESRTOIE_BIT)
+#define EEPROM_RST_INTR_DISBL	0x0
+#define BUFFER_MODE_INTR_ENBL	0x001F
+#define BUFFER_MODE_INTR_DISBL	0x0
+#define NORMAL_MODE		0x0
+#define BUFFER_MODE		0x1
+#define EEPROM_SR_MODE		0x2
+#define I2C_TX_MODE		0x0010
+#define PCH_BUF_TX		0xFFF7
+#define PCH_BUF_RD		0x0008
+#define I2C_ERROR_MASK	(I2CESRTO_EVENT | I2CBMIS_EVENT | I2CBMTO_EVENT | \
+			I2CBMNA_EVENT | I2CBMAL_EVENT | I2CMAL_EVENT)
+#define I2CMAL_EVENT		0x0001
+#define I2CMCF_EVENT		0x0002
+#define I2CBMFI_EVENT		0x0004
+#define I2CBMAL_EVENT		0x0008
+#define I2CBMNA_EVENT		0x0010
+#define I2CBMTO_EVENT		0x0020
+#define I2CBMIS_EVENT		0x0040
+#define I2CESRFI_EVENT		0x0080
+#define I2CESRTO_EVENT		0x0100
+#define PCI_DEVICE_ID_PCH_I2C	0x8817
+
+#define pch_dbg(adap, fmt, arg...)  \
+	dev_dbg(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg)
+
+#define pch_err(adap, fmt, arg...)  \
+	dev_err(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg)
+
+#define pch_pci_err(pdev, fmt, arg...)  \
+	dev_err(&pdev->dev, "%s :" fmt, __func__, ##arg)
+
+#define pch_pci_dbg(pdev, fmt, arg...)  \
+	dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg)
+
+/**
+ * struct i2c_algo_pch_data - for I2C driver functionalities
+ * @pch_adapter:		stores the reference to i2c_adapter structure
+ * @p_adapter_info:		stores the reference to adapter_info structure
+ * @pch_base_address:		specifies the remapped base address
+ * @pch_buff_mode_en:		specifies if buffer mode is enabled
+ * @pch_event_flag:		specifies occurrence of interrupt events
+ * @pch_i2c_xfer_in_progress:	specifies whether the transfer is completed
+ */
+struct i2c_algo_pch_data {
+	struct i2c_adapter pch_adapter;
+	struct adapter_info *p_adapter_info;
+	void __iomem *pch_base_address;
+	int pch_buff_mode_en;
+	u32 pch_event_flag;
+	bool pch_i2c_xfer_in_progress;
+};
+
+/**
+ * struct adapter_info - This structure holds the adapter information for the
+			 PCH i2c controller
+ * @pch_data:		stores a list of i2c_algo_pch_data
+ * @pch_i2c_suspended:	specifies whether the system is suspended or not
+ *			perhaps with more lines and words.
+ *
+ * pch_data has as many elements as maximum I2C channels
+ */
+struct adapter_info {
+	struct i2c_algo_pch_data pch_data;
+	bool pch_i2c_suspended;
+};
+
+
+static int pch_i2c_speed = 100; /* I2C bus speed in Kbps */
+static int pch_clk = 50000;	/* specifies I2C clock speed in KHz */
+static wait_queue_head_t pch_event;
+static DEFINE_MUTEX(pch_mutex);
+
+static struct pci_device_id __devinitdata pch_pcidev_id[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_I2C)},
+	{0,}
+};
+
+static irqreturn_t pch_i2c_handler(int irq, void *pData);
+
+static inline void pch_setbit(void __iomem *addr, u32 offset, u32 bitmask)
+{
+	u32 val;
+	val = ioread32(addr + offset);
+	val |= bitmask;
+	iowrite32(val, addr + offset);
+}
+
+static inline void pch_clrbit(void __iomem *addr, u32 offset, u32 bitmask)
+{
+	u32 val;
+	val = ioread32(addr + offset);
+	val &= (~bitmask);
+	iowrite32(val, addr + offset);
+}
+
+/**
+ * pch_i2c_init() - hardware initialization of I2C module
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_init(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	u32 pch_i2cbc;
+	u32 pch_i2ctmr;
+	u32 reg_value;
+
+	/* reset I2C controller */
+	iowrite32(0x01, p + PCH_I2CSRST);
+	msleep(20);
+	iowrite32(0x0, p + PCH_I2CSRST);
+
+	/* Initialize I2C registers */
+	iowrite32(0x21, p + PCH_I2CNF);
+
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL,
+			  PCH_I2CCTL_I2CMEN);
+
+	if (pch_i2c_speed != 400)
+		pch_i2c_speed = 100;
+
+	reg_value = PCH_I2CCTL_I2CMEN;
+	if (pch_i2c_speed == FAST_MODE_CLK) {
+		reg_value |= FAST_MODE_EN;
+		pch_dbg(adap, "Fast mode enabled\n");
+	}
+
+	if (pch_clk > PCH_MAX_CLK)
+		pch_clk = 62500;
+
+	pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
+	/* Set transfer speed in I2CBC */
+	iowrite32(pch_i2cbc, p + PCH_I2CBC);
+
+	pch_i2ctmr = (pch_clk) / 8;
+	iowrite32(pch_i2ctmr, p + PCH_I2CTMR);
+
+	reg_value |= NORMAL_INTR_ENBL;	/* Enable interrupts in normal mode */
+	iowrite32(reg_value, p + PCH_I2CCTL);
+
+	pch_dbg(adap,
+		"I2CCTL=%x pch_i2cbc=%x pch_i2ctmr=%x Enable interrupts\n",
+		ioread32(p + PCH_I2CCTL), pch_i2cbc, pch_i2ctmr);
+
+	init_waitqueue_head(&pch_event);
+}
+
+static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
+{
+	return cmp1.tv64 < cmp2.tv64;
+}
+
+/**
+ * pch_i2c_wait_for_bus_idle() - check the status of bus.
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ * @timeout:	waiting time counter (us).
+ */
+static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap,
+				 s32 timeout)
+{
+	void __iomem *p = adap->pch_base_address;
+
+	/* MAX timeout value is timeout*1000*1000nsec */
+	ktime_t ns_val = ktime_add_ns(ktime_get(), timeout*1000*1000);
+	do {
+		if ((ioread32(p + PCH_I2CSR) & I2CMBB_BIT) == 0)
+			break;
+		msleep(20);
+	} while (ktime_lt(ktime_get(), ns_val));
+
+	pch_dbg(adap, "I2CSR = %x\n", ioread32(p + PCH_I2CSR));
+
+	if (timeout == 0) {
+		pch_err(adap, "%s: Timeout Error.return%d\n", __func__, -ETIME);
+		return -ETIME;
+	}
+
+	return 0;
+}
+
+/**
+ * pch_i2c_start() - Generate I2C start condition in normal mode.
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ *
+ * Generate I2C start condition in normal mode by setting I2CCTL.I2CMSTA to 1.
+ */
+static void pch_i2c_start(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
+}
+
+/**
+ * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
+{
+	s32 ret;
+	ret = wait_event_timeout(pch_event,
+			(adap->pch_event_flag != 0), msecs_to_jiffies(50));
+	if (ret < 0) {
+		pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
+		return ret;
+	}
+
+	if (ret == 0) {
+		pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
+		return -ETIMEDOUT;
+	}
+
+	if (adap->pch_event_flag & I2C_ERROR_MASK) {
+		pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
+		return -EIO;
+	}
+
+	adap->pch_event_flag = 0;
+
+	return 0;
+}
+
+/**
+ * pch_i2c_getack() - to confirm ACK/NACK
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static s32 pch_i2c_getack(struct i2c_algo_pch_data *adap)
+{
+	u32 reg_val;
+	void __iomem *p = adap->pch_base_address;
+	reg_val = ioread32(p + PCH_I2CSR) & PCH_GETACK;
+
+	if (reg_val != 0) {
+		pch_err(adap, "return%d\n", -EPROTO);
+		return -EPROTO;
+	}
+
+	return 0;
+}
+
+/**
+ * pch_i2c_stop() - generate stop condition in normal mode.
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_stop(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	/* clear the start bit */
+	pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
+}
+
+/**
+ * pch_i2c_repstart() - generate repeated start condition in normal mode
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_repstart(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_REPSTART);
+}
+
+/**
+ * pch_i2c_writebytes() - write data to I2C bus in normal mode
+ * @i2c_adap:	Pointer to the struct i2c_adapter.
+ * @last:	specifies whether last message or not.
+ *		In the case of compound mode it will be 1 for last message,
+ *		otherwise 0.
+ * @first:	specifies whether first message or not.
+ *		1 for first message otherwise 0.
+ */
+static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
+			      struct i2c_msg *msgs, u32 last, u32 first)
+{
+	struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
+	u8 *buf;
+	u32 length;
+	u32 addr;
+	u32 addr_2_msb;
+	u32 addr_8_lsb;
+	s32 wrcount;
+	void __iomem *p = adap->pch_base_address;
+
+	length = msgs->len;
+	buf = msgs->buf;
+	addr = msgs->addr;
+
+	/* enable master tx */
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE);
+
+	pch_dbg(adap, "I2CCTL = %x msgs->len = %d\n", ioread32(p + PCH_I2CCTL),
+		length);
+
+	if (first) {
+		if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME)
+			return -ETIME;
+	}
+
+	if (msgs->flags & I2C_M_TEN) {
+		addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7);
+		iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+		if (first)
+			pch_i2c_start(adap);
+		if (pch_i2c_wait_for_xfer_complete(adap) == 0 &&
+		    pch_i2c_getack(adap) == 0) {
+			addr_8_lsb = (addr & I2C_ADDR_MSK);
+			iowrite32(addr_8_lsb, p + PCH_I2CDR);
+		} else {
+			pch_i2c_stop(adap);
+			return -ETIME;
+		}
+	} else {
+		/* set 7 bit slave address and R/W bit as 0 */
+		iowrite32(addr << 1, p + PCH_I2CDR);
+		if (first)
+			pch_i2c_start(adap);
+	}
+
+	if ((pch_i2c_wait_for_xfer_complete(adap) == 0) &&
+	    (pch_i2c_getack(adap) == 0)) {
+		for (wrcount = 0; wrcount < length; ++wrcount) {
+			/* write buffer value to I2C data register */
+			iowrite32(buf[wrcount], p + PCH_I2CDR);
+			pch_dbg(adap, "writing %x to Data register\n",
+				buf[wrcount]);
+
+			if (pch_i2c_wait_for_xfer_complete(adap) != 0)
+				return -ETIME;
+
+			if (pch_i2c_getack(adap))
+				return -EIO;
+		}
+
+		/* check if this is the last message */
+		if (last)
+			pch_i2c_stop(adap);
+		else
+			pch_i2c_repstart(adap);
+	} else {
+		pch_i2c_stop(adap);
+		return -EIO;
+	}
+
+	pch_dbg(adap, "return=%d\n", wrcount);
+
+	return wrcount;
+}
+
+/**
+ * pch_i2c_sendack() - send ACK
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_sendack(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK);
+}
+
+/**
+ * pch_i2c_sendnack() - send NACK
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK);
+}
+
+/**
+ * pch_i2c_readbytes() - read data  from I2C bus in normal mode.
+ * @i2c_adap:	Pointer to the struct i2c_adapter.
+ * @msgs:	Pointer to i2c_msg structure.
+ * @last:	specifies whether last message or not.
+ * @first:	specifies whether first message or not.
+ */
+s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
+		  u32 last, u32 first)
+{
+	struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
+
+	u8 *buf;
+	u32 count;
+	u32 length;
+	u32 addr;
+	u32 addr_2_msb;
+	void __iomem *p = adap->pch_base_address;
+
+	length = msgs->len;
+	buf = msgs->buf;
+	addr = msgs->addr;
+
+	/* enable master reception */
+	pch_clrbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE);
+
+	if (first) {
+		if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME)
+			return -ETIME;
+	}
+
+	if (msgs->flags & I2C_M_TEN) {
+		addr_2_msb = (((addr & I2C_MSB_2B_MSK) >> 7) | (I2C_RD));
+		iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+
+	} else {
+		/* 7 address bits + R/W bit */
+		addr = (((addr) << 1) | (I2C_RD));
+		iowrite32(addr, p + PCH_I2CDR);
+	}
+
+	/* check if it is the first message */
+	if (first)
+		pch_i2c_start(adap);
+
+	if ((pch_i2c_wait_for_xfer_complete(adap) == 0) &&
+	    (pch_i2c_getack(adap) == 0)) {
+		pch_dbg(adap, "return %d\n", 0);
+
+		if (length == 0) {
+			pch_i2c_stop(adap);
+			ioread32(p + PCH_I2CDR); /* Dummy read needs */
+
+			count = length;
+		} else {
+			int read_index;
+			int loop;
+			pch_i2c_sendack(adap);
+
+			/* Dummy read */
+			for (loop = 1, read_index = 0; loop < length; loop++) {
+				buf[read_index] = ioread32(p + PCH_I2CDR);
+
+				if (loop != 1)
+					read_index++;
+
+				if (pch_i2c_wait_for_xfer_complete(adap) != 0) {
+					pch_i2c_stop(adap);
+					return -ETIME;
+				}
+			}	/* end for */
+
+			pch_i2c_sendnack(adap);
+
+			buf[read_index] = ioread32(p + PCH_I2CDR);
+
+			if (length != 1)
+				read_index++;
+
+			if (pch_i2c_wait_for_xfer_complete(adap) == 0) {
+				if (last)
+					pch_i2c_stop(adap);
+				else
+					pch_i2c_repstart(adap);
+
+				buf[read_index++] = ioread32(p + PCH_I2CDR);
+				count = read_index;
+			} else {
+				count = -ETIME;
+			}
+
+		}
+	} else {
+		count = -ETIME;
+		pch_i2c_stop(adap);
+	}
+
+	return count;
+}
+
+/**
+ * pch_i2c_cb_ch0() - Interrupt handler Call back function
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap)
+{
+	u32 sts;
+	void __iomem *p = adap->pch_base_address;
+
+	sts = ioread32(p + PCH_I2CSR);
+	sts &= (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT);
+	if (sts & I2CMAL_BIT)
+		adap->pch_event_flag |= I2CMAL_EVENT;
+
+	if (sts & I2CMCF_BIT)
+		adap->pch_event_flag |= I2CMCF_EVENT;
+
+	/* clear the applicable bits */
+	pch_clrbit(adap->pch_base_address, PCH_I2CSR, sts);
+
+	pch_dbg(adap, "PCH_I2CSR = %x\n", ioread32(p + PCH_I2CSR));
+
+	wake_up(&pch_event);
+}
+
+/**
+ * pch_i2c_handler() - interrupt handler for the PCH I2C controller
+ * @irq:	irq number.
+ * @pData:	cookie passed back to the handler function.
+ */
+static irqreturn_t pch_i2c_handler(int irq, void *pData)
+{
+	s32 reg_val;
+
+	struct i2c_algo_pch_data *adap_data = (struct i2c_algo_pch_data *)pData;
+	void __iomem *p = adap_data->pch_base_address;
+	u32 mode = ioread32(p + PCH_I2CMOD) & (BUFFER_MODE | EEPROM_SR_MODE);
+
+	if (mode != NORMAL_MODE) {
+		pch_err(adap_data, "I2C mode is not supported\n");
+		return IRQ_NONE;
+	}
+
+	reg_val = ioread32(p + PCH_I2CSR);
+	if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT))
+		pch_i2c_cb_ch0(adap_data);
+	else
+		return IRQ_NONE;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * pch_i2c_xfer() - Reading adnd writing data through I2C bus
+ * @i2c_adap:	Pointer to the struct i2c_adapter.
+ * @msgs:	Pointer to i2c_msg structure.
+ * @num:	number of messages.
+ */
+static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
+		    struct i2c_msg *msgs, s32 num)
+{
+	struct i2c_msg *pmsg;
+	u32 i = 0;
+	u32 status;
+	u32 msglen;
+	u32 subaddrlen;
+	s32 ret;
+
+	struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
+
+	ret = mutex_lock_interruptible(&pch_mutex);
+	if (ret)
+		return -ERESTARTSYS;
+
+	if (adap->p_adapter_info->pch_i2c_suspended) {
+		mutex_unlock(&pch_mutex);
+		return -EBUSY;
+	}
+
+	pch_dbg(adap, "adap->p_adapter_info->pch_i2c_suspended is %d\n",
+		adap->p_adapter_info->pch_i2c_suspended);
+	/* transfer not completed */
+	adap->pch_i2c_xfer_in_progress = true;
+
+	pmsg = &msgs[0];
+	pmsg->flags |= adap->pch_buff_mode_en;
+	status = pmsg->flags;
+	pch_dbg(adap,
+		"After invoking I2C_MODE_SEL :flag= 0x%x\n", status);
+	/* calculate sub address length and message length */
+	/* these are applicable only for buffer mode */
+	subaddrlen = pmsg->buf[0];
+	/* calculate actual message length excluding
+	 * the sub address fields */
+	msglen = (pmsg->len) - (subaddrlen + 1);
+	if (status & (I2C_M_RD)) {
+		pch_dbg(adap, "invoking pch_i2c_readbytes\n");
+		ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num),
+				   (i == 0));
+	} else {
+		pch_dbg(adap, "invoking pch_i2c_writebytes\n");
+		ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num),
+				    (i == 0));
+	}
+
+	adap->pch_i2c_xfer_in_progress = false;	/* transfer completed */
+
+	mutex_unlock(&pch_mutex);
+
+	return ret;
+}
+
+/**
+ * pch_i2c_func() - return the functionality of the I2C driver
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static u32 pch_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
+}
+
+static struct i2c_algorithm pch_algorithm = {
+	.master_xfer = pch_i2c_xfer,
+	.functionality = pch_i2c_func
+};
+
+/**
+ * pch_i2c_disbl_int() - Disable PCH I2C interrupts
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+
+	pch_clrbit(adap->pch_base_address, PCH_I2CCTL, NORMAL_INTR_ENBL);
+
+	iowrite32(EEPROM_RST_INTR_DISBL, p + PCH_I2CESRMSK);
+
+	iowrite32(BUFFER_MODE_INTR_DISBL, p + PCH_I2CBUFMSK);
+}
+
+static int __devinit pch_i2c_probe(struct pci_dev *pdev,
+			       const struct pci_device_id *id)
+{
+	void __iomem *base_addr;
+	s32 ret;
+	struct adapter_info *adap_info;
+
+	pch_pci_dbg(pdev, "Entered.\n");
+
+	adap_info = kzalloc((sizeof(struct adapter_info)), GFP_KERNEL);
+	if (adap_info == NULL) {
+		pch_pci_err(pdev, "Memory allocation FAILED\n");
+		return -ENOMEM;
+	}
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		pch_pci_err(pdev, "pci_enable_device FAILED\n");
+		goto err_pci_enable;
+	}
+
+	ret = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (ret) {
+		pch_pci_err(pdev, "pci_request_regions FAILED\n");
+		goto err_pci_req;
+	}
+
+	base_addr = pci_iomap(pdev, 1, 0);
+
+	if (base_addr == NULL) {
+		pch_pci_err(pdev, "pci_iomap FAILED\n");
+		ret = -ENOMEM;
+		goto err_pci_iomap;
+	}
+
+	adap_info->pch_i2c_suspended = false;
+
+	adap_info->pch_data.p_adapter_info = adap_info;
+
+	adap_info->pch_data.pch_adapter.owner = THIS_MODULE;
+	adap_info->pch_data.pch_adapter.class = I2C_CLASS_HWMON;
+	strcpy(adap_info->pch_data.pch_adapter.name, KBUILD_MODNAME);
+	adap_info->pch_data.pch_adapter.algo = &pch_algorithm;
+	adap_info->pch_data.pch_adapter.algo_data =
+						&adap_info->pch_data;
+
+	/* (i * 0x80) + base_addr; */
+	adap_info->pch_data.pch_base_address = base_addr;
+
+	adap_info->pch_data.pch_adapter.dev.parent = &pdev->dev;
+
+	ret = i2c_add_adapter(&(adap_info->pch_data.pch_adapter));
+
+	if (ret) {
+		pch_pci_err(pdev, "i2c_add_adapter FAILED\n");
+		goto err_i2c_add_adapter;
+	}
+
+	pch_i2c_init(&adap_info->pch_data);
+	ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+		  KBUILD_MODNAME, &adap_info->pch_data);
+	if (ret) {
+		pch_pci_err(pdev, "request_irq FAILED\n");
+		goto err_request_irq;
+	}
+
+	pci_set_drvdata(pdev, adap_info);
+	pch_pci_dbg(pdev, "returns %d.\n", ret);
+	return 0;
+
+err_request_irq:
+	i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
+err_i2c_add_adapter:
+	pci_iounmap(pdev, base_addr);
+err_pci_iomap:
+	pci_release_regions(pdev);
+err_pci_req:
+	pci_disable_device(pdev);
+err_pci_enable:
+	kfree(adap_info);
+	return ret;
+}
+
+static void __devexit pch_i2c_remove(struct pci_dev *pdev)
+{
+	struct adapter_info *adap_info = pci_get_drvdata(pdev);
+
+	pch_i2c_disbl_int(&adap_info->pch_data);
+	free_irq(pdev->irq, &adap_info->pch_data);
+	i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
+
+	if (adap_info->pch_data.pch_base_address) {
+		pci_iounmap(pdev, adap_info->pch_data.pch_base_address);
+		adap_info->pch_data.pch_base_address = 0;
+	}
+
+	pci_set_drvdata(pdev, NULL);
+
+	pci_release_regions(pdev);
+
+	pci_disable_device(pdev);
+	kfree(adap_info);
+}
+
+#ifdef CONFIG_PM
+static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int ret;
+	struct adapter_info *adap_info = pci_get_drvdata(pdev);
+	void __iomem *p = adap_info->pch_data.pch_base_address;
+
+	adap_info->pch_i2c_suspended = true;
+
+	while ((adap_info->pch_data.pch_i2c_xfer_in_progress)) {
+		/* Wait until all channel transfers are completed */
+		msleep(20);
+	}
+	/* Disable the i2c interrupts */
+	pch_i2c_disbl_int(&adap_info->pch_data);
+
+	pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x "
+		"invoked function pch_i2c_disbl_int successfully\n",
+		ioread32(p + PCH_I2CSR), ioread32(p + PCH_I2CBUFSTA),
+		ioread32(p + PCH_I2CESRSTA));
+
+	ret = pci_save_state(pdev);
+
+	if (ret) {
+		pch_pci_err(pdev, "pci_save_state\n");
+		return ret;
+	}
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int pch_i2c_resume(struct pci_dev *pdev)
+{
+	struct adapter_info *adap_info = pci_get_drvdata(pdev);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	if (pci_enable_device(pdev) < 0) {
+		pch_pci_err(pdev, "pch_i2c_resume:pci_enable_device FAILED\n");
+		return -EIO;
+	}
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+
+	pch_i2c_init(&adap_info->pch_data);
+
+	adap_info->pch_i2c_suspended = false;
+
+	return 0;
+}
+#else
+#define pch_i2c_suspend NULL
+#define pch_i2c_resume NULL
+#endif
+
+static struct pci_driver pch_pcidriver = {
+	.name = KBUILD_MODNAME,
+	.id_table = pch_pcidev_id,
+	.probe = pch_i2c_probe,
+	.remove = __devexit_p(pch_i2c_remove),
+	.suspend = pch_i2c_suspend,
+	.resume = pch_i2c_resume
+};
+
+static int __init pch_pci_init(void)
+{
+	return pci_register_driver(&pch_pcidriver);
+}
+module_init(pch_pci_init);
+
+static void __exit pch_pci_exit(void)
+{
+	pci_unregister_driver(&pch_pcidriver);
+}
+module_exit(pch_pci_exit);
+
+MODULE_DESCRIPTION("PCH I2C PCI Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
+module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
+module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 112c61f..f09c931 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -409,7 +409,7 @@
 		IOP3XX_ICR_RXFULL_IE | IOP3XX_ICR_TXEMPTY_IE);
 	__raw_writel(cr, adapter_data->ioaddr + CR_OFFSET);
 
-	iounmap((void __iomem*)adapter_data->ioaddr);
+	iounmap(adapter_data->ioaddr);
 	release_mem_region(res->start, IOP3XX_I2C_IO_SIZE);
 	kfree(adapter_data);
 	kfree(padapter);
@@ -453,7 +453,7 @@
 	/* set the adapter enumeration # */
 	adapter_data->id = i2c_id++;
 
-	adapter_data->ioaddr = (u32)ioremap(res->start, IOP3XX_I2C_IO_SIZE);
+	adapter_data->ioaddr = ioremap(res->start, IOP3XX_I2C_IO_SIZE);
 	if (!adapter_data->ioaddr) {
 		ret = -ENOMEM;
 		goto release_region;
@@ -498,7 +498,7 @@
 	return 0;
 
 unmap:
-	iounmap((void __iomem*)adapter_data->ioaddr);
+	iounmap(adapter_data->ioaddr);
 
 release_region:
 	release_mem_region(res->start, IOP3XX_I2C_IO_SIZE);
diff --git a/drivers/i2c/busses/i2c-iop3xx.h b/drivers/i2c/busses/i2c-iop3xx.h
index 8485861..097e270 100644
--- a/drivers/i2c/busses/i2c-iop3xx.h
+++ b/drivers/i2c/busses/i2c-iop3xx.h
@@ -97,7 +97,7 @@
 #define	IOP3XX_I2C_IO_SIZE	0x18
 
 struct i2c_algo_iop3xx_data {
-	u32 ioaddr;
+	void __iomem *ioaddr;
 	wait_queue_head_t waitq;
 	spinlock_t lock;
 	u32 SR_enabled, SR_received;
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 1624206..a9941c6 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -59,6 +59,7 @@
 	MV64XXX_I2C_STATE_INVALID,
 	MV64XXX_I2C_STATE_IDLE,
 	MV64XXX_I2C_STATE_WAITING_FOR_START_COND,
+	MV64XXX_I2C_STATE_WAITING_FOR_RESTART,
 	MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK,
 	MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK,
 	MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK,
@@ -70,6 +71,7 @@
 	MV64XXX_I2C_ACTION_INVALID,
 	MV64XXX_I2C_ACTION_CONTINUE,
 	MV64XXX_I2C_ACTION_SEND_START,
+	MV64XXX_I2C_ACTION_SEND_RESTART,
 	MV64XXX_I2C_ACTION_SEND_ADDR_1,
 	MV64XXX_I2C_ACTION_SEND_ADDR_2,
 	MV64XXX_I2C_ACTION_SEND_DATA,
@@ -91,6 +93,7 @@
 	u32			addr2;
 	u32			bytes_left;
 	u32			byte_posn;
+	u32			send_stop;
 	u32			block;
 	int			rc;
 	u32			freq_m;
@@ -159,8 +162,15 @@
 		if ((drv_data->bytes_left == 0)
 				|| (drv_data->aborting
 					&& (drv_data->byte_posn != 0))) {
-			drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
-			drv_data->state = MV64XXX_I2C_STATE_IDLE;
+			if (drv_data->send_stop) {
+				drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
+				drv_data->state = MV64XXX_I2C_STATE_IDLE;
+			} else {
+				drv_data->action =
+					MV64XXX_I2C_ACTION_SEND_RESTART;
+				drv_data->state =
+					MV64XXX_I2C_STATE_WAITING_FOR_RESTART;
+			}
 		} else {
 			drv_data->action = MV64XXX_I2C_ACTION_SEND_DATA;
 			drv_data->state =
@@ -228,6 +238,15 @@
 mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
 {
 	switch(drv_data->action) {
+	case MV64XXX_I2C_ACTION_SEND_RESTART:
+		drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
+		drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
+		writel(drv_data->cntl_bits,
+			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+		drv_data->block = 0;
+		wake_up_interruptible(&drv_data->waitq);
+		break;
+
 	case MV64XXX_I2C_ACTION_CONTINUE:
 		writel(drv_data->cntl_bits,
 			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
@@ -386,7 +405,8 @@
 }
 
 static int
-mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg)
+mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
+				int is_first, int is_last)
 {
 	unsigned long	flags;
 
@@ -406,10 +426,18 @@
 			drv_data->bytes_left--;
 		}
 	} else {
-		drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
-		drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+		if (is_first) {
+			drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+			drv_data->state =
+				MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+		} else {
+			drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_1;
+			drv_data->state =
+				MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK;
+		}
 	}
 
+	drv_data->send_stop = is_last;
 	drv_data->block = 1;
 	mv64xxx_i2c_do_action(drv_data);
 	spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -437,9 +465,12 @@
 	struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
 	int	i, rc;
 
-	for (i=0; i<num; i++)
-		if ((rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i])) < 0)
+	for (i = 0; i < num; i++) {
+		rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i],
+						i == 0, i + 1 == num);
+		if (rc < 0)
 			return rc;
+	}
 
 	return num;
 }
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index c9fffd0..594ed50 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -434,7 +434,7 @@
 	}
 
 	if (timeout == 0) {
-		/* controler has timedout, re-init the h/w */
+		/* controller has timedout, re-init the h/w */
 		dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n");
 		(void) init_hw(dev);
 		status = -ETIMEDOUT;
@@ -498,7 +498,7 @@
 	}
 
 	if (timeout == 0) {
-		/* controler has timedout, re-init the h/w */
+		/* controller has timedout, re-init the h/w */
 		dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n");
 		(void) init_hw(dev);
 		status = -ETIMEDOUT;
@@ -872,6 +872,8 @@
 	adap->owner	= THIS_MODULE;
 	adap->class	= I2C_CLASS_HWMON | I2C_CLASS_SPD;
 	adap->algo	= &nmk_i2c_algo;
+	snprintf(adap->name, sizeof(adap->name),
+		 "Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start);
 
 	/* fetch the controller id */
 	adap->nr	= pdev->id;
@@ -891,8 +893,8 @@
 		goto err_init_hw;
 	}
 
-	dev_dbg(&pdev->dev, "initialize I2C%d bus on virtual "
-		"base %p\n", pdev->id, dev->virtbase);
+	dev_info(&pdev->dev, "initialize %s on virtual "
+		"base %p\n", adap->name, dev->virtbase);
 
 	ret = i2c_add_numbered_adapter(adap);
 	if (ret) {
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 0070371..61653f0 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -9,6 +9,41 @@
  * kind, whether express or implied.
  */
 
+/*
+ * Device tree configuration:
+ *
+ * Required properties:
+ * - compatible      : "opencores,i2c-ocores"
+ * - reg             : bus address start and address range size of device
+ * - interrupts      : interrupt number
+ * - regstep         : size of device registers in bytes
+ * - clock-frequency : frequency of bus clock in Hz
+ * 
+ * Example:
+ *
+ *  i2c0: ocores@a0000000 {
+ *              compatible = "opencores,i2c-ocores";
+ *              reg = <0xa0000000 0x8>;
+ *              interrupts = <10>;
+ *
+ *              regstep = <1>;
+ *              clock-frequency = <20000000>;
+ *
+ * -- Devices connected on this I2C bus get
+ * -- defined here; address- and size-cells
+ * -- apply to these child devices
+ *
+ *              #address-cells = <1>;
+ *              #size-cells = <0>;
+ *
+ *              dummy@60 {
+ *                     compatible = "dummy";
+ *                     reg = <60>;
+ *              };
+ *  };
+ *
+ */
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -210,6 +245,32 @@
 	.algo		= &ocores_algorithm,
 };
 
+#ifdef CONFIG_OF
+static int ocores_i2c_of_probe(struct platform_device* pdev,
+				struct ocores_i2c* i2c)
+{
+	const __be32* val;
+
+	val = of_get_property(pdev->dev.of_node, "regstep", NULL);
+	if (!val) {
+		dev_err(&pdev->dev, "Missing required parameter 'regstep'");
+		return -ENODEV;
+	}
+	i2c->regstep = be32_to_cpup(val);
+
+	val = of_get_property(pdev->dev.of_node, "clock-frequency", NULL);
+	if (!val) {
+		dev_err(&pdev->dev,
+			"Missing required parameter 'clock-frequency'");
+		return -ENODEV;
+	}
+	i2c->clock_khz = be32_to_cpup(val) / 1000;
+
+	return 0;
+}
+#else
+#define ocores_i2c_of_probe(pdev,i2c) -ENODEV
+#endif
 
 static int __devinit ocores_i2c_probe(struct platform_device *pdev)
 {
@@ -227,37 +288,41 @@
 	if (!res2)
 		return -ENODEV;
 
-	pdata = (struct ocores_i2c_platform_data*) pdev->dev.platform_data;
-	if (!pdata)
-		return -ENODEV;
-
-	i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+	i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
 	if (!i2c)
 		return -ENOMEM;
 
-	if (!request_mem_region(res->start, resource_size(res),
-				pdev->name)) {
+	if (!devm_request_mem_region(&pdev->dev, res->start,
+				     resource_size(res), pdev->name)) {
 		dev_err(&pdev->dev, "Memory region busy\n");
-		ret = -EBUSY;
-		goto request_mem_failed;
+		return -EBUSY;
 	}
 
-	i2c->base = ioremap(res->start, resource_size(res));
+	i2c->base = devm_ioremap_nocache(&pdev->dev, res->start,
+					 resource_size(res));
 	if (!i2c->base) {
 		dev_err(&pdev->dev, "Unable to map registers\n");
-		ret = -EIO;
-		goto map_failed;
+		return -EIO;
 	}
 
-	i2c->regstep = pdata->regstep;
-	i2c->clock_khz = pdata->clock_khz;
+	pdata = pdev->dev.platform_data;
+	if (pdata) {
+		i2c->regstep = pdata->regstep;
+		i2c->clock_khz = pdata->clock_khz;
+	} else {
+		ret = ocores_i2c_of_probe(pdev, i2c);
+		if (ret)
+			return ret;
+	}
+
 	ocores_init(i2c);
 
 	init_waitqueue_head(&i2c->wait);
-	ret = request_irq(res2->start, ocores_isr, 0, pdev->name, i2c);
+	ret = devm_request_irq(&pdev->dev, res2->start, ocores_isr, 0,
+			       pdev->name, i2c);
 	if (ret) {
 		dev_err(&pdev->dev, "Cannot claim IRQ\n");
-		goto request_irq_failed;
+		return ret;
 	}
 
 	/* hook up driver to tree */
@@ -265,36 +330,29 @@
 	i2c->adap = ocores_adapter;
 	i2c_set_adapdata(&i2c->adap, i2c);
 	i2c->adap.dev.parent = &pdev->dev;
+#ifdef CONFIG_OF
+	i2c->adap.dev.of_node = pdev->dev.of_node;
+#endif
 
 	/* add i2c adapter to i2c tree */
 	ret = i2c_add_adapter(&i2c->adap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to add adapter\n");
-		goto add_adapter_failed;
+		return ret;
 	}
 
 	/* add in known devices to the bus */
-	for (i = 0; i < pdata->num_devices; i++)
-		i2c_new_device(&i2c->adap, pdata->devices + i);
+	if (pdata) {
+		for (i = 0; i < pdata->num_devices; i++)
+			i2c_new_device(&i2c->adap, pdata->devices + i);
+	}
 
 	return 0;
-
-add_adapter_failed:
-	free_irq(res2->start, i2c);
-request_irq_failed:
-	iounmap(i2c->base);
-map_failed:
-	release_mem_region(res->start, resource_size(res));
-request_mem_failed:
-	kfree(i2c);
-
-	return ret;
 }
 
 static int __devexit ocores_i2c_remove(struct platform_device* pdev)
 {
 	struct ocores_i2c *i2c = platform_get_drvdata(pdev);
-	struct resource *res;
 
 	/* disable i2c logic */
 	oc_setreg(i2c, OCI2C_CONTROL, oc_getreg(i2c, OCI2C_CONTROL)
@@ -304,18 +362,6 @@
 	i2c_del_adapter(&i2c->adap);
 	platform_set_drvdata(pdev, NULL);
 
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (res)
-		free_irq(res->start, i2c);
-
-	iounmap(i2c->base);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res)
-		release_mem_region(res->start, resource_size(res));
-
-	kfree(i2c);
-
 	return 0;
 }
 
@@ -344,6 +390,16 @@
 #define ocores_i2c_resume	NULL
 #endif
 
+#ifdef CONFIG_OF
+static struct of_device_id ocores_i2c_match[] = {
+        {
+                .compatible = "opencores,i2c-ocores",
+        },
+        {},
+};
+MODULE_DEVICE_TABLE(of, ocores_i2c_match);
+#endif
+
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:ocores-i2c");
 
@@ -355,6 +411,9 @@
 	.driver  = {
 		.owner = THIS_MODULE,
 		.name = "ocores-i2c",
+#ifdef CONFIG_OF
+                .of_match_table = ocores_i2c_match,
+#endif
 	},
 };
 
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 9d09083..58a58c7 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -378,9 +378,7 @@
 			 * REVISIT: Some wkup sources might not be needed.
 			 */
 			dev->westate = OMAP_I2C_WE_ALL;
-			if (dev->rev < OMAP_I2C_REV_ON_4430)
-				omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
-								dev->westate);
+			omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
 		}
 	}
 	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
@@ -598,12 +596,8 @@
 	 * REVISIT: We should abort the transfer on signals, but the bus goes
 	 * into arbitration and we're currently unable to recover from it.
 	 */
-	if (dev->set_mpu_wkup_lat != NULL)
-		dev->set_mpu_wkup_lat(dev->dev, dev->latency);
 	r = wait_for_completion_timeout(&dev->cmd_complete,
 					OMAP_I2C_TIMEOUT);
-	if (dev->set_mpu_wkup_lat != NULL)
-		dev->set_mpu_wkup_lat(dev->dev, -1);
 	dev->buf_len = 0;
 	if (r < 0)
 		return r;
@@ -654,12 +648,18 @@
 	if (r < 0)
 		goto out;
 
+	if (dev->set_mpu_wkup_lat != NULL)
+		dev->set_mpu_wkup_lat(dev->dev, dev->latency);
+
 	for (i = 0; i < num; i++) {
 		r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
 		if (r != 0)
 			break;
 	}
 
+	if (dev->set_mpu_wkup_lat != NULL)
+		dev->set_mpu_wkup_lat(dev->dev, -1);
+
 	if (r == 0)
 		r = num;
 
@@ -845,11 +845,15 @@
 			dev_err(dev->dev, "Arbitration lost\n");
 			err |= OMAP_I2C_STAT_AL;
 		}
+		/*
+		 * ProDB0017052: Clear ARDY bit twice
+		 */
 		if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
 					OMAP_I2C_STAT_AL)) {
 			omap_i2c_ack_stat(dev, stat &
 				(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
-				OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
+				OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
+				OMAP_I2C_STAT_ARDY));
 			omap_i2c_complete_cmd(dev, err);
 			return IRQ_HANDLED;
 		}
@@ -1135,12 +1139,41 @@
 	return 0;
 }
 
+#ifdef CONFIG_SUSPEND
+static int omap_i2c_suspend(struct device *dev)
+{
+	if (!pm_runtime_suspended(dev))
+		if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
+			dev->bus->pm->runtime_suspend(dev);
+
+	return 0;
+}
+
+static int omap_i2c_resume(struct device *dev)
+{
+	if (!pm_runtime_suspended(dev))
+		if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
+			dev->bus->pm->runtime_resume(dev);
+
+	return 0;
+}
+
+static struct dev_pm_ops omap_i2c_pm_ops = {
+	.suspend = omap_i2c_suspend,
+	.resume = omap_i2c_resume,
+};
+#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+#else
+#define OMAP_I2C_PM_OPS NULL
+#endif
+
 static struct platform_driver omap_i2c_driver = {
 	.probe		= omap_i2c_probe,
 	.remove		= omap_i2c_remove,
 	.driver		= {
 		.name	= "omap_i2c",
 		.owner	= THIS_MODULE,
+		.pm	= OMAP_I2C_PM_OPS,
 	},
 };
 
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 495be45..266135d 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -942,7 +942,7 @@
 	adap->owner = THIS_MODULE;
 	/* DDC class but actually often used for more generic I2C */
 	adap->class = I2C_CLASS_DDC;
-	strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
+	strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
 		sizeof(adap->name));
 	adap->nr = bus_nr;
 	adap->algo = &stu300_algo;
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 53fab51..986e5f6 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -29,6 +29,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/pci.h>
+#include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
@@ -40,6 +41,7 @@
 
 MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
 MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver");
+MODULE_ALIAS("platform:cs5535-smb");
 MODULE_LICENSE("GPL");
 
 #define MAX_DEVICES 4
@@ -84,10 +86,6 @@
 	u8 *ptr;
 	char needs_reset;
 	unsigned len;
-
-	/* PCI device info */
-	struct pci_dev *pdev;
-	int bar;
 };
 
 /* Register Definitions */
@@ -391,7 +389,7 @@
 static struct scx200_acb_iface *scx200_acb_list;
 static DEFINE_MUTEX(scx200_acb_list_mutex);
 
-static __init int scx200_acb_probe(struct scx200_acb_iface *iface)
+static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
 {
 	u8 val;
 
@@ -427,7 +425,7 @@
 	return 0;
 }
 
-static __init struct scx200_acb_iface *scx200_create_iface(const char *text,
+static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
 		struct device *dev, int index)
 {
 	struct scx200_acb_iface *iface;
@@ -452,7 +450,7 @@
 	return iface;
 }
 
-static int __init scx200_acb_create(struct scx200_acb_iface *iface)
+static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
 {
 	struct i2c_adapter *adapter;
 	int rc;
@@ -472,67 +470,31 @@
 		return -ENODEV;
 	}
 
-	mutex_lock(&scx200_acb_list_mutex);
-	iface->next = scx200_acb_list;
-	scx200_acb_list = iface;
-	mutex_unlock(&scx200_acb_list_mutex);
+	if (!adapter->dev.parent) {
+		/* If there's no dev, we're tracking (ISA) ifaces manually */
+		mutex_lock(&scx200_acb_list_mutex);
+		iface->next = scx200_acb_list;
+		scx200_acb_list = iface;
+		mutex_unlock(&scx200_acb_list_mutex);
+	}
 
 	return 0;
 }
 
-static __init int scx200_create_pci(const char *text, struct pci_dev *pdev,
-		int bar)
+static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
+		unsigned long base, int index, struct device *dev)
 {
 	struct scx200_acb_iface *iface;
 	int rc;
 
-	iface = scx200_create_iface(text, &pdev->dev, 0);
+	iface = scx200_create_iface(text, dev, index);
 
 	if (iface == NULL)
-		return -ENOMEM;
-
-	iface->pdev = pdev;
-	iface->bar = bar;
-
-	rc = pci_enable_device_io(iface->pdev);
-	if (rc)
-		goto errout_free;
-
-	rc = pci_request_region(iface->pdev, iface->bar, iface->adapter.name);
-	if (rc) {
-		printk(KERN_ERR NAME ": can't allocate PCI BAR %d\n",
-				iface->bar);
-		goto errout_free;
-	}
-
-	iface->base = pci_resource_start(iface->pdev, iface->bar);
-	rc = scx200_acb_create(iface);
-
-	if (rc == 0)
-		return 0;
-
-	pci_release_region(iface->pdev, iface->bar);
-	pci_dev_put(iface->pdev);
- errout_free:
-	kfree(iface);
-	return rc;
-}
-
-static int __init scx200_create_isa(const char *text, unsigned long base,
-		int index)
-{
-	struct scx200_acb_iface *iface;
-	int rc;
-
-	iface = scx200_create_iface(text, NULL, index);
-
-	if (iface == NULL)
-		return -ENOMEM;
+		return NULL;
 
 	if (!request_region(base, 8, iface->adapter.name)) {
 		printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n",
 		       base, base + 8 - 1);
-		rc = -EBUSY;
 		goto errout_free;
 	}
 
@@ -540,115 +502,113 @@
 	rc = scx200_acb_create(iface);
 
 	if (rc == 0)
-		return 0;
+		return iface;
 
 	release_region(base, 8);
  errout_free:
 	kfree(iface);
-	return rc;
+	return NULL;
 }
 
-/* Driver data is an index into the scx200_data array that indicates
- * the name and the BAR where the I/O address resource is located.  ISA
- * devices are flagged with a bar value of -1 */
+static int __devinit scx200_probe(struct platform_device *pdev)
+{
+	struct scx200_acb_iface *iface;
+	struct resource *res;
 
-static const struct pci_device_id scx200_pci[] __initconst = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE),
-	  .driver_data = 0 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE),
-	  .driver_data = 0 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA),
-	  .driver_data = 1 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA),
-	  .driver_data = 2 },
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't fetch device resource info\n");
+		return -ENODEV;
+	}
+
+	iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev);
+	if (!iface)
+		return -EIO;
+
+	dev_info(&pdev->dev, "SCx200 device '%s' registered\n",
+			iface->adapter.name);
+	platform_set_drvdata(pdev, iface);
+
+	return 0;
+}
+
+static void __devexit scx200_cleanup_iface(struct scx200_acb_iface *iface)
+{
+	i2c_del_adapter(&iface->adapter);
+	release_region(iface->base, 8);
+	kfree(iface);
+}
+
+static int __devexit scx200_remove(struct platform_device *pdev)
+{
+	struct scx200_acb_iface *iface;
+
+	iface = platform_get_drvdata(pdev);
+	platform_set_drvdata(pdev, NULL);
+	scx200_cleanup_iface(iface);
+
+	return 0;
+}
+
+static struct platform_driver scx200_pci_drv = {
+	.driver = {
+		.name = "cs5535-smb",
+		.owner = THIS_MODULE,
+	},
+	.probe = scx200_probe,
+	.remove = __devexit_p(scx200_remove),
+};
+
+static const struct pci_device_id scx200_isa[] __initconst = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) },
 	{ 0, }
 };
 
-static struct {
-	const char *name;
-	int bar;
-} scx200_data[] = {
-	{ "SCx200", -1 },
-	{ "CS5535",  0 },
-	{ "CS5536",  0 }
-};
-
-static __init int scx200_scan_pci(void)
+static __init void scx200_scan_isa(void)
 {
-	int data, dev;
-	int rc = -ENODEV;
-	struct pci_dev *pdev;
+	int i;
 
-	for(dev = 0; dev < ARRAY_SIZE(scx200_pci); dev++) {
-		pdev = pci_get_device(scx200_pci[dev].vendor,
-				scx200_pci[dev].device, NULL);
+	if (!pci_dev_present(scx200_isa))
+		return;
 
-		if (pdev == NULL)
+	for (i = 0; i < MAX_DEVICES; ++i) {
+		if (base[i] == 0)
 			continue;
 
-		data = scx200_pci[dev].driver_data;
-
-		/* if .bar is greater or equal to zero, this is a
-		 * PCI device - otherwise, we assume
-		   that the ports are ISA based
-		*/
-
-		if (scx200_data[data].bar >= 0)
-			rc = scx200_create_pci(scx200_data[data].name, pdev,
-					scx200_data[data].bar);
-		else {
-			int i;
-
-			pci_dev_put(pdev);
-			for (i = 0; i < MAX_DEVICES; ++i) {
-				if (base[i] == 0)
-					continue;
-
-				rc = scx200_create_isa(scx200_data[data].name,
-						base[i],
-						i);
-			}
-		}
-
-		break;
+		/* XXX: should we care about failures? */
+		scx200_create_dev("SCx200", base[i], i, NULL);
 	}
-
-	return rc;
 }
 
 static int __init scx200_acb_init(void)
 {
-	int rc;
-
 	pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n");
 
-	rc = scx200_scan_pci();
+	/* First scan for ISA-based devices */
+	scx200_scan_isa();	/* XXX: should we care about errors? */
 
 	/* If at least one bus was created, init must succeed */
 	if (scx200_acb_list)
 		return 0;
-	return rc;
+
+	/* No ISA devices; register the platform driver for PCI-based devices */
+	return platform_driver_register(&scx200_pci_drv);
 }
 
 static void __exit scx200_acb_cleanup(void)
 {
 	struct scx200_acb_iface *iface;
 
+	platform_driver_unregister(&scx200_pci_drv);
+
 	mutex_lock(&scx200_acb_list_mutex);
 	while ((iface = scx200_acb_list) != NULL) {
 		scx200_acb_list = iface->next;
 		mutex_unlock(&scx200_acb_list_mutex);
 
-		i2c_del_adapter(&iface->adapter);
+		scx200_cleanup_iface(iface);
 
-		if (iface->pdev) {
-			pci_release_region(iface->pdev, iface->bar);
-			pci_dev_put(iface->pdev);
-		}
-		else
-			release_region(iface->base, 8);
-
-		kfree(iface);
 		mutex_lock(&scx200_acb_list_mutex);
 	}
 	mutex_unlock(&scx200_acb_list_mutex);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c7db698..f0bd5bc 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -196,88 +196,60 @@
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->suspend ? pm->suspend(dev) : 0;
-	}
-
-	return i2c_legacy_suspend(dev, PMSG_SUSPEND);
+	if (pm)
+		return pm_generic_suspend(dev);
+	else
+		return i2c_legacy_suspend(dev, PMSG_SUSPEND);
 }
 
 static int i2c_device_pm_resume(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	int ret;
 
 	if (pm)
-		ret = pm->resume ? pm->resume(dev) : 0;
+		return pm_generic_resume(dev);
 	else
-		ret = i2c_legacy_resume(dev);
-
-	return ret;
+		return i2c_legacy_resume(dev);
 }
 
 static int i2c_device_pm_freeze(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->freeze ? pm->freeze(dev) : 0;
-	}
-
-	return i2c_legacy_suspend(dev, PMSG_FREEZE);
+	if (pm)
+		return pm_generic_freeze(dev);
+	else
+		return i2c_legacy_suspend(dev, PMSG_FREEZE);
 }
 
 static int i2c_device_pm_thaw(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->thaw ? pm->thaw(dev) : 0;
-	}
-
-	return i2c_legacy_resume(dev);
+	if (pm)
+		return pm_generic_thaw(dev);
+	else
+		return i2c_legacy_resume(dev);
 }
 
 static int i2c_device_pm_poweroff(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->poweroff ? pm->poweroff(dev) : 0;
-	}
-
-	return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
+	if (pm)
+		return pm_generic_poweroff(dev);
+	else
+		return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
 }
 
 static int i2c_device_pm_restore(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	int ret;
 
 	if (pm)
-		ret = pm->restore ? pm->restore(dev) : 0;
+		return pm_generic_restore(dev);
 	else
-		ret = i2c_legacy_resume(dev);
-
-	if (!ret) {
-		pm_runtime_disable(dev);
-		pm_runtime_set_active(dev);
-		pm_runtime_enable(dev);
-	}
-
-	return ret;
+		return i2c_legacy_resume(dev);
 }
 #else /* !CONFIG_PM_SLEEP */
 #define i2c_device_pm_suspend	NULL
@@ -1021,6 +993,14 @@
 static int __unregister_client(struct device *dev, void *dummy)
 {
 	struct i2c_client *client = i2c_verify_client(dev);
+	if (client && strcmp(client->name, "dummy"))
+		i2c_unregister_device(client);
+	return 0;
+}
+
+static int __unregister_dummy(struct device *dev, void *dummy)
+{
+	struct i2c_client *client = i2c_verify_client(dev);
 	if (client)
 		i2c_unregister_device(client);
 	return 0;
@@ -1075,8 +1055,12 @@
 	mutex_unlock(&adap->userspace_clients_lock);
 
 	/* Detach any active clients. This can't fail, thus we do not
-	   checking the returned value. */
+	 * check the returned value. This is a two-pass process, because
+	 * we can't remove the dummy devices during the first pass: they
+	 * could have been instantiated by real devices wishing to clean
+	 * them up properly, so we give them a chance to do that first. */
 	res = device_for_each_child(&adap->dev, NULL, __unregister_client);
+	res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
 
 #ifdef CONFIG_I2C_COMPAT
 	class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
@@ -1140,6 +1124,14 @@
 	if (res)
 		return res;
 
+	/* Drivers should switch to dev_pm_ops instead. */
+	if (driver->suspend)
+		pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
+			driver->driver.name);
+	if (driver->resume)
+		pr_warn("i2c-core: driver [%s] using legacy resume method\n",
+			driver->driver.name);
+
 	pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
 
 	INIT_LIST_HEAD(&driver->clients);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 98ccfeb..9827c5e 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -134,7 +134,7 @@
 	  module will be called ide-cd.
 
 config BLK_DEV_IDECD_VERBOSE_ERRORS
-	bool "Verbose error logging for IDE/ATAPI CDROM driver" if EMBEDDED
+	bool "Verbose error logging for IDE/ATAPI CDROM driver" if EXPERT
 	depends on BLK_DEV_IDECD
 	default y
 	help
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 56ac09d..4a5c4a4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -59,7 +59,10 @@
 #include <linux/hrtimer.h>	/* ktime_get_real() */
 #include <trace/events/power.h>
 #include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
 #include <asm/mwait.h>
+#include <asm/msr.h>
 
 #define INTEL_IDLE_VERSION "0.4"
 #define PREFIX "intel_idle: "
@@ -73,6 +76,7 @@
 
 static unsigned int mwait_substates;
 
+#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
 /* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
 static unsigned int lapic_timer_reliable_states = (1 << 1);	 /* Default to only C1 */
 
@@ -82,6 +86,20 @@
 static struct cpuidle_state *cpuidle_state_table;
 
 /*
+ * Hardware C-state auto-demotion may not always be optimal.
+ * Indicate which enable bits to clear here.
+ */
+static unsigned long long auto_demotion_disable_flags;
+
+/*
+ * Set this flag for states where the HW flushes the TLB for us
+ * and so we don't need cross-calls to keep it consistent.
+ * If this flag is set, SW flushes the TLB, so even if the
+ * HW doesn't do the flushing, this flag is safe to use.
+ */
+#define CPUIDLE_FLAG_TLB_FLUSHED	0x10000
+
+/*
  * States are indexed by the cstate number,
  * which is also the index into the MWAIT hint array.
  * Thus C0 is a dummy.
@@ -122,7 +140,7 @@
 		.driver_data = (void *) 0x00,
 		.flags = CPUIDLE_FLAG_TIME_VALID,
 		.exit_latency = 1,
-		.target_residency = 4,
+		.target_residency = 1,
 		.enter = &intel_idle },
 	{ /* MWAIT C2 */
 		.name = "SNB-C3",
@@ -130,7 +148,7 @@
 		.driver_data = (void *) 0x10,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 80,
-		.target_residency = 160,
+		.target_residency = 211,
 		.enter = &intel_idle },
 	{ /* MWAIT C3 */
 		.name = "SNB-C6",
@@ -138,7 +156,7 @@
 		.driver_data = (void *) 0x20,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 104,
-		.target_residency = 208,
+		.target_residency = 345,
 		.enter = &intel_idle },
 	{ /* MWAIT C4 */
 		.name = "SNB-C7",
@@ -146,7 +164,7 @@
 		.driver_data = (void *) 0x30,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 109,
-		.target_residency = 300,
+		.target_residency = 345,
 		.enter = &intel_idle },
 };
 
@@ -220,8 +238,6 @@
 	kt_before = ktime_get_real();
 
 	stop_critical_timings();
-	trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu);
-	trace_cpu_idle((eax >> 4) + 1, cpu);
 	if (!need_resched()) {
 
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -243,6 +259,44 @@
 	return usec_delta;
 }
 
+static void __setup_broadcast_timer(void *arg)
+{
+	unsigned long reason = (unsigned long)arg;
+	int cpu = smp_processor_id();
+
+	reason = reason ?
+		CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+	clockevents_notify(reason, &cpu);
+}
+
+static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
+		unsigned long action, void *hcpu)
+{
+	int hotcpu = (unsigned long)hcpu;
+
+	switch (action & 0xf) {
+	case CPU_ONLINE:
+		smp_call_function_single(hotcpu, __setup_broadcast_timer,
+			(void *)true, 1);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block setup_broadcast_notifier = {
+	.notifier_call = setup_broadcast_cpuhp_notify,
+};
+
+static void auto_demotion_disable(void *dummy)
+{
+	unsigned long long msr_bits;
+
+	rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+	msr_bits &= ~auto_demotion_disable_flags;
+	wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+}
+
 /*
  * intel_idle_probe()
  */
@@ -286,11 +340,17 @@
 	case 0x25:	/* Westmere */
 	case 0x2C:	/* Westmere */
 		cpuidle_state_table = nehalem_cstates;
+		auto_demotion_disable_flags =
+			(NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
 		break;
 
 	case 0x1C:	/* 28 - Atom Processor */
+		cpuidle_state_table = atom_cstates;
+		break;
+
 	case 0x26:	/* 38 - Lincroft Atom Processor */
 		cpuidle_state_table = atom_cstates;
+		auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
 		break;
 
 	case 0x2A:	/* SNB */
@@ -305,7 +365,11 @@
 	}
 
 	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
-		lapic_timer_reliable_states = 0xFFFFFFFF;
+		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+	else {
+		smp_call_function(__setup_broadcast_timer, (void *)true, 1);
+		register_cpu_notifier(&setup_broadcast_notifier);
+	}
 
 	pr_debug(PREFIX "v" INTEL_IDLE_VERSION
 		" model 0x%X\n", boot_cpu_data.x86_model);
@@ -394,6 +458,8 @@
 			return -EIO;
 		}
 	}
+	if (auto_demotion_disable_flags)
+		smp_call_function(auto_demotion_disable, NULL, 1);
 
 	return 0;
 }
@@ -403,6 +469,10 @@
 {
 	int retval;
 
+	/* Do not load intel_idle at all for now if idle= is passed */
+	if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+		return -ENODEV;
+
 	retval = intel_idle_probe();
 	if (retval)
 		return retval;
@@ -428,6 +498,11 @@
 	intel_idle_cpuidle_devices_uninit();
 	cpuidle_unregister_driver(&intel_idle_driver);
 
+	if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
+		smp_call_function(__setup_broadcast_timer, (void *)false, 1);
+		unregister_cpu_notifier(&setup_broadcast_notifier);
+	}
+
 	return;
 }
 
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 6888356..f9ba7d7 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -308,7 +308,7 @@
 			INIT_WORK(&work->work, ib_cache_task);
 			work->device   = event->device;
 			work->port_num = event->element.port_num;
-			schedule_work(&work->work);
+			queue_work(ib_wq, &work->work);
 		}
 	}
 }
@@ -368,7 +368,7 @@
 	int p;
 
 	ib_unregister_event_handler(&device->cache.event_handler);
-	flush_scheduled_work();
+	flush_workqueue(ib_wq);
 
 	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
 		kfree(device->cache.pkey_cache[p]);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a19effa..f793bf2 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -38,7 +38,6 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
 
 #include "core_priv.h"
 
@@ -52,6 +51,9 @@
 	void *            data;
 };
 
+struct workqueue_struct *ib_wq;
+EXPORT_SYMBOL_GPL(ib_wq);
+
 static LIST_HEAD(device_list);
 static LIST_HEAD(client_list);
 
@@ -718,6 +720,10 @@
 {
 	int ret;
 
+	ib_wq = alloc_workqueue("infiniband", 0, 0);
+	if (!ib_wq)
+		return -ENOMEM;
+
 	ret = ib_sysfs_setup();
 	if (ret)
 		printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
@@ -726,6 +732,7 @@
 	if (ret) {
 		printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
 		ib_sysfs_cleanup();
+		destroy_workqueue(ib_wq);
 	}
 
 	return ret;
@@ -736,7 +743,7 @@
 	ib_cache_cleanup();
 	ib_sysfs_cleanup();
 	/* Make sure that any pending umem accounting work is done. */
-	flush_scheduled_work();
+	destroy_workqueue(ib_wq);
 }
 
 module_init(ib_core_init);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 91a6603..fbbfa24 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -425,7 +425,7 @@
 		port->sm_ah = NULL;
 		spin_unlock_irqrestore(&port->ah_lock, flags);
 
-		schedule_work(&sa_dev->port[event->element.port_num -
+		queue_work(ib_wq, &sa_dev->port[event->element.port_num -
 					    sa_dev->start_port].update_task);
 	}
 }
@@ -1079,7 +1079,7 @@
 
 	ib_unregister_event_handler(&sa_dev->event_handler);
 
-	flush_scheduled_work();
+	flush_workqueue(ib_wq);
 
 	for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
 		if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ca12acf..ec1e9da 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -636,6 +636,16 @@
 	}
 }
 
+static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
+			       struct rdma_route *route)
+{
+	struct rdma_dev_addr *dev_addr;
+
+	dev_addr = &route->addr.dev_addr;
+	rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
+	rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
+}
+
 static ssize_t ucma_query_route(struct ucma_file *file,
 				const char __user *inbuf,
 				int in_len, int out_len)
@@ -670,8 +680,10 @@
 
 	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
 	resp.port_num = ctx->cm_id->port_num;
-	if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) {
-		switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) {
+	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
+	case RDMA_TRANSPORT_IB:
+		switch (rdma_port_get_link_layer(ctx->cm_id->device,
+			ctx->cm_id->port_num)) {
 		case IB_LINK_LAYER_INFINIBAND:
 			ucma_copy_ib_route(&resp, &ctx->cm_id->route);
 			break;
@@ -681,6 +693,12 @@
 		default:
 			break;
 		}
+		break;
+	case RDMA_TRANSPORT_IWARP:
+		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
+		break;
+	default:
+		break;
 	}
 
 out:
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 415e186..b645e55 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -262,7 +262,7 @@
 			umem->mm   = mm;
 			umem->diff = diff;
 
-			schedule_work(&umem->work);
+			queue_work(ib_wq, &umem->work);
 			return;
 		}
 	} else
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 85cfae4..8c81992 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -459,13 +459,12 @@
 	     IB_DEVICE_MEM_WINDOW);
 
 	/* Allocate the qptr_array */
-	c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
+	c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
 	if (!c2dev->qptr_array) {
 		return -ENOMEM;
 	}
 
-	/* Inialize the qptr_array */
-	memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
+	/* Initialize the qptr_array */
 	c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
 	c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
 	c2dev->qptr_array[2] = (void *) &c2dev->aeq;
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index 9ce7819b..2ec716f 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -107,7 +107,7 @@
 	r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
 	if (r) {
 		init_waitqueue_head(&r->wait_object);
-		r->reply_msg = (u64) NULL;
+		r->reply_msg = 0;
 		r->event = 0;
 		r->cm_id = NULL;
 		r->qp = NULL;
@@ -123,7 +123,7 @@
  */
 void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
 {
-	r->reply_msg = (u64) NULL;
+	r->reply_msg = 0;
 	if (atomic_dec_and_test(&r->refcnt)) {
 		kfree(r);
 	}
@@ -151,7 +151,7 @@
 void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
 {
 	if (atomic_dec_and_test(&r->refcnt)) {
-		if (r->reply_msg != (u64) NULL)
+		if (r->reply_msg != 0)
 			vq_repbuf_free(c2dev,
 				       (void *) (unsigned long) r->reply_msg);
 		kfree(r);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 09dda0b..c3f5aca 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -189,6 +189,7 @@
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 }
 
+#ifdef notyet
 int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
 {
 	struct rdma_cq_setup setup;
@@ -200,6 +201,7 @@
 	setup.ovfl_mode = 1;
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 }
+#endif
 
 static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
 {
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 4bb997a..83d2e19 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -689,7 +689,7 @@
  * A T3 WQ implements both the SQ and RQ.
  */
 struct t3_wq {
-	union t3_wr *queue;		/* DMA accessable memory */
+	union t3_wr *queue;		/* DMA accessible memory */
 	dma_addr_t dma_addr;		/* DMA address for HW */
 	DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
 	u32 error;			/* 1 once we go to ERROR */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index a237d49..c5406da 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -335,8 +335,6 @@
 int iwch_post_zb_read(struct iwch_qp *qhp);
 int iwch_register_device(struct iwch_dev *dev);
 void iwch_unregister_device(struct iwch_dev *dev);
-int iwch_quiesce_qps(struct iwch_cq *chp);
-int iwch_resume_qps(struct iwch_cq *chp);
 void stop_read_rep_timer(struct iwch_qp *qhp);
 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
 		      struct iwch_mr *mhp, int shift);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0993137..1b4cd09 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -1149,59 +1149,3 @@
 	PDBG("%s exit state %d\n", __func__, qhp->attr.state);
 	return ret;
 }
-
-static int quiesce_qp(struct iwch_qp *qhp)
-{
-	spin_lock_irq(&qhp->lock);
-	iwch_quiesce_tid(qhp->ep);
-	qhp->flags |= QP_QUIESCED;
-	spin_unlock_irq(&qhp->lock);
-	return 0;
-}
-
-static int resume_qp(struct iwch_qp *qhp)
-{
-	spin_lock_irq(&qhp->lock);
-	iwch_resume_tid(qhp->ep);
-	qhp->flags &= ~QP_QUIESCED;
-	spin_unlock_irq(&qhp->lock);
-	return 0;
-}
-
-int iwch_quiesce_qps(struct iwch_cq *chp)
-{
-	int i;
-	struct iwch_qp *qhp;
-
-	for (i=0; i < T3_MAX_NUM_QP; i++) {
-		qhp = get_qhp(chp->rhp, i);
-		if (!qhp)
-			continue;
-		if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
-			quiesce_qp(qhp);
-			continue;
-		}
-		if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
-			quiesce_qp(qhp);
-	}
-	return 0;
-}
-
-int iwch_resume_qps(struct iwch_cq *chp)
-{
-	int i;
-	struct iwch_qp *qhp;
-
-	for (i=0; i < T3_MAX_NUM_QP; i++) {
-		qhp = get_qhp(chp->rhp, i);
-		if (!qhp)
-			continue;
-		if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
-			resume_qp(qhp);
-			continue;
-		}
-		if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
-			resume_qp(qhp);
-	}
-	return 0;
-}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0dc62b1..8b00e6c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -380,7 +380,7 @@
 					  16)) | FW_WR_FLOWID(ep->hwtid));
 
 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
-	flowc->mnemval[0].val = cpu_to_be32(0);
+	flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
 	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
 	flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
 	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 16032cd..2fe19ec 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -46,7 +46,6 @@
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <linux/kfifo.h>
-#include <linux/mutex.h>
 
 #include <asm/byteorder.h>
 
@@ -760,7 +759,6 @@
 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
-int c4iw_post_zb_read(struct c4iw_qp *qhp);
 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 057cb25..4f0be25 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -220,7 +220,7 @@
 		V_FW_RI_RES_WR_DCAEN(0) |
 		V_FW_RI_RES_WR_DCACPU(0) |
 		V_FW_RI_RES_WR_FBMIN(2) |
-		V_FW_RI_RES_WR_FBMAX(3) |
+		V_FW_RI_RES_WR_FBMAX(2) |
 		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
 		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
 		V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -243,7 +243,7 @@
 		V_FW_RI_RES_WR_DCAEN(0) |
 		V_FW_RI_RES_WR_DCACPU(0) |
 		V_FW_RI_RES_WR_FBMIN(2) |
-		V_FW_RI_RES_WR_FBMAX(3) |
+		V_FW_RI_RES_WR_FBMAX(2) |
 		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
 		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
 		V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -892,36 +892,6 @@
 	}
 }
 
-int c4iw_post_zb_read(struct c4iw_qp *qhp)
-{
-	union t4_wr *wqe;
-	struct sk_buff *skb;
-	u8 len16;
-
-	PDBG("%s enter\n", __func__);
-	skb = alloc_skb(40, GFP_KERNEL);
-	if (!skb) {
-		printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
-		return -ENOMEM;
-	}
-	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
-
-	wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
-	memset(wqe, 0, sizeof wqe->read);
-	wqe->read.r2 = cpu_to_be64(0);
-	wqe->read.stag_sink = cpu_to_be32(1);
-	wqe->read.to_sink_hi = cpu_to_be32(0);
-	wqe->read.to_sink_lo = cpu_to_be32(1);
-	wqe->read.stag_src = cpu_to_be32(1);
-	wqe->read.plen = cpu_to_be32(0);
-	wqe->read.to_src_hi = cpu_to_be32(0);
-	wqe->read.to_src_lo = cpu_to_be32(1);
-	len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
-	init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
-
-	return c4iw_ofld_send(&qhp->rhp->rdev, skb);
-}
-
 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
 			   gfp_t gfp)
 {
@@ -1029,7 +999,6 @@
 	wqe->cookie = (unsigned long) &ep->com.wr_wait;
 
 	wqe->u.fini.type = FW_RI_TYPE_FINI;
-	c4iw_init_wr_wait(&ep->com.wr_wait);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	if (ret)
 		goto out;
@@ -1125,7 +1094,6 @@
 	if (qhp->attr.mpa_attr.initiator)
 		build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
 
-	c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	if (ret)
 		goto out;
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index 1596e30..1898d6e 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -222,15 +222,14 @@
 	queue->small_page = NULL;
 
 	/* allocate queue page pointers */
-	queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
+	queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
 	if (!queue->queue_pages) {
-		queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+		queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
 		if (!queue->queue_pages) {
 			ehca_gen_err("Couldn't allocate queue page list");
 			return 0;
 		}
 	}
-	memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
 
 	/* allocate actual queue pages */
 	if (is_small) {
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 765f0fc..47db4bf 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -199,12 +199,11 @@
 		goto bail;
 	}
 
-	dd = vmalloc(sizeof(*dd));
+	dd = vzalloc(sizeof(*dd));
 	if (!dd) {
 		dd = ERR_PTR(-ENOMEM);
 		goto bail;
 	}
-	memset(dd, 0, sizeof(*dd));
 	dd->ipath_unit = -1;
 
 	spin_lock_irqsave(&ipath_devs_lock, flags);
@@ -530,9 +529,8 @@
 	for (j = 0; j < 6; j++) {
 		if (!pdev->resource[j].start)
 			continue;
-		ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
-			   j, (unsigned long long)pdev->resource[j].start,
-			   (unsigned long long)pdev->resource[j].end,
+		ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
+			   j, &pdev->resource[j],
 			   (unsigned long long)pci_resource_len(pdev, j));
 	}
 
@@ -757,7 +755,7 @@
 	 */
 	ipath_shutdown_device(dd);
 
-	flush_scheduled_work();
+	flush_workqueue(ib_wq);
 
 	if (dd->verbs_dev)
 		ipath_unregister_ib_device(dd->verbs_dev);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 9292a15..6d4b29c 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1530,7 +1530,7 @@
 	}
 
 	num_subports = uinfo->spu_subport_cnt;
-	pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
+	pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
 	if (!pd->subport_uregbase) {
 		ret = -ENOMEM;
 		goto bail;
@@ -1538,13 +1538,13 @@
 	/* Note: pd->port_rcvhdrq_size isn't initialized yet. */
 	size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
 		     sizeof(u32), PAGE_SIZE) * num_subports;
-	pd->subport_rcvhdr_base = vmalloc(size);
+	pd->subport_rcvhdr_base = vzalloc(size);
 	if (!pd->subport_rcvhdr_base) {
 		ret = -ENOMEM;
 		goto bail_ureg;
 	}
 
-	pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
+	pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
 					pd->port_rcvegrbuf_size *
 					num_subports);
 	if (!pd->subport_rcvegrbuf) {
@@ -1556,11 +1556,6 @@
 	pd->port_subport_id = uinfo->spu_subport_id;
 	pd->active_slaves = 1;
 	set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
-	memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
-	memset(pd->subport_rcvhdr_base, 0, size);
-	memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
-				         pd->port_rcvegrbuf_size *
-				         num_subports);
 	goto bail;
 
 bail_rhdr:
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 7769382..fef0f42 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -442,7 +442,7 @@
 	struct page **pages;
 	dma_addr_t *addrs;
 
-	pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
+	pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
 			sizeof(struct page *));
 	if (!pages) {
 		ipath_dev_err(dd, "failed to allocate shadow page * "
@@ -461,9 +461,6 @@
 		return;
 	}
 
-	memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt *
-	       sizeof(struct page *));
-
 	dd->ipath_pageshadow = pages;
 	dd->ipath_physshadow = addrs;
 }
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 5e86d73..bab9f74 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -220,7 +220,7 @@
 	work->mm = mm;
 	work->num_pages = num_pages;
 
-	schedule_work(&work->work);
+	queue_work(ib_wq, &work->work);
 	return;
 
 bail_mm:
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5a219a2..e8df155 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -397,10 +397,14 @@
 		cq->resize_buf = NULL;
 		cq->resize_umem = NULL;
 	} else {
+		struct mlx4_ib_cq_buf tmp_buf;
+		int tmp_cqe = 0;
+
 		spin_lock_irq(&cq->lock);
 		if (cq->resize_buf) {
 			mlx4_ib_cq_resize_copy_cqes(cq);
-			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+			tmp_buf = cq->buf;
+			tmp_cqe = cq->ibcq.cqe;
 			cq->buf      = cq->resize_buf->buf;
 			cq->ibcq.cqe = cq->resize_buf->cqe;
 
@@ -408,6 +412,9 @@
 			cq->resize_buf = NULL;
 		}
 		spin_unlock_irq(&cq->lock);
+
+		if (tmp_cqe)
+			mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
 	}
 
 	goto out;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c9a8dd6..57ffa50 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -211,6 +211,8 @@
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+		if (IS_ERR(send_buf))
+			return;
 		/*
 		 * We rely here on the fact that MLX QPs don't use the
 		 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4c85224..c7a6213 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -623,8 +623,9 @@
 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
 
-	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
-				    MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
+				    !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+				    MLX4_PROTOCOL_IB);
 	if (err)
 		return err;
 
@@ -635,7 +636,7 @@
 	return 0;
 
 err_add:
-	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw);
+	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
 	return err;
 }
 
@@ -665,7 +666,7 @@
 	struct mlx4_ib_gid_entry *ge;
 
 	err = mlx4_multicast_detach(mdev->dev,
-				    &mqp->mqp, gid->raw);
+				    &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
 	if (err)
 		return err;
 
@@ -1005,7 +1006,8 @@
 	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
 		goto err_pd;
 
-	ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+	ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
+				 PAGE_SIZE);
 	if (!ibdev->uar_map)
 		goto err_uar;
 	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
diff --git a/drivers/infiniband/hw/mthca/Kconfig b/drivers/infiniband/hw/mthca/Kconfig
index 03efc07..da314c3 100644
--- a/drivers/infiniband/hw/mthca/Kconfig
+++ b/drivers/infiniband/hw/mthca/Kconfig
@@ -7,7 +7,7 @@
 	  ("Tavor") and the MT25208 PCI Express HCA ("Arbel").
 
 config INFINIBAND_MTHCA_DEBUG
-	bool "Verbose debugging output" if EMBEDDED
+	bool "Verbose debugging output" if EXPERT
 	depends on INFINIBAND_MTHCA
 	default y
 	---help---
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 0aa0110..e4a08c2 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -146,7 +146,7 @@
 
 void mthca_start_catas_poll(struct mthca_dev *dev)
 {
-	unsigned long addr;
+	phys_addr_t addr;
 
 	init_timer(&dev->catas_err.timer);
 	dev->catas_err.map  = NULL;
@@ -158,7 +158,8 @@
 	dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4);
 	if (!dev->catas_err.map) {
 		mthca_warn(dev, "couldn't map catastrophic error region "
-			   "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4);
+			   "at 0x%llx/0x%x\n", (unsigned long long) addr,
+			   dev->catas_err.size * 4);
 		return;
 	}
 
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index f4ceecd..7bfa2a1 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -713,7 +713,7 @@
 
 static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
 {
-	unsigned long addr;
+	phys_addr_t addr;
 	u16 max_off = 0;
 	int i;
 
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 8e8c728..76785c6 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -653,7 +653,7 @@
 			 unsigned long offset, unsigned long size,
 			 void __iomem **map)
 {
-	unsigned long base = pci_resource_start(dev->pdev, 0);
+	phys_addr_t base = pci_resource_start(dev->pdev, 0);
 
 	*map = ioremap(base + offset, size);
 	if (!*map)
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 5648659..03a59534 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -171,6 +171,8 @@
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+		if (IS_ERR(send_buf))
+			return;
 		/*
 		 * We rely here on the fact that MLX QPs don't use the
 		 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 5eee666..8a40cd53 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -790,7 +790,7 @@
 		goto err_uar_table_free;
 	}
 
-	dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+	dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
 	if (!dev->kar) {
 		mthca_err(dev, "Couldn't map kernel access region, "
 			  "aborting.\n");
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 065b208..44045c8 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -853,7 +853,7 @@
 
 int mthca_init_mr_table(struct mthca_dev *dev)
 {
-	unsigned long addr;
+	phys_addr_t addr;
 	int mpts, mtts, err, i;
 
 	err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 0c9f0aa..3b4ec32 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -144,6 +144,7 @@
 	struct nes_device *nesdev;
 	struct net_device *netdev;
 	struct nes_vnic *nesvnic;
+	unsigned int is_bonded;
 
 	nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n",
 		  &ifa->ifa_address, &ifa->ifa_mask);
@@ -152,7 +153,8 @@
 				nesdev, nesdev->netdev[0]->name);
 		netdev = nesdev->netdev[0];
 		nesvnic = netdev_priv(netdev);
-		if (netdev == event_netdev) {
+		is_bonded = (netdev->master == event_netdev);
+		if ((netdev == event_netdev) || is_bonded) {
 			if (nesvnic->rdma_enabled == 0) {
 				nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
 						" RDMA is not enabled.\n",
@@ -169,7 +171,10 @@
 					nes_manage_arp_cache(netdev, netdev->dev_addr,
 							ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE);
 					nesvnic->local_ipaddr = 0;
-					return NOTIFY_OK;
+					if (is_bonded)
+						continue;
+					else
+						return NOTIFY_OK;
 					break;
 				case NETDEV_UP:
 					nes_debug(NES_DBG_NETDEV, "event:UP\n");
@@ -178,15 +183,24 @@
 						nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n");
 						return NOTIFY_OK;
 					}
+					/* fall through */
+				case NETDEV_CHANGEADDR:
 					/* Add the address to the IP table */
-					nesvnic->local_ipaddr = ifa->ifa_address;
+					if (netdev->master)
+						nesvnic->local_ipaddr =
+							((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address;
+					else
+						nesvnic->local_ipaddr = ifa->ifa_address;
 
 					nes_write_indexed(nesdev,
 							NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
-							ntohl(ifa->ifa_address));
+							ntohl(nesvnic->local_ipaddr));
 					nes_manage_arp_cache(netdev, netdev->dev_addr,
 							ntohl(nesvnic->local_ipaddr), NES_ARP_ADD);
-					return NOTIFY_OK;
+					if (is_bonded)
+						continue;
+					else
+						return NOTIFY_OK;
 					break;
 				default:
 					break;
@@ -660,6 +674,8 @@
 	}
 	nes_notifiers_registered++;
 
+	INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
+
 	/* Initialize network devices */
 	if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
 		goto bail7;
@@ -742,6 +758,7 @@
 	struct nes_device *nesdev = pci_get_drvdata(pcidev);
 	struct net_device *netdev;
 	int netdev_index = 0;
+	unsigned long flags;
 
 		if (nesdev->netdev_count) {
 			netdev = nesdev->netdev[netdev_index];
@@ -768,6 +785,14 @@
 	free_irq(pcidev->irq, nesdev);
 	tasklet_kill(&nesdev->dpc_tasklet);
 
+	spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+	if (nesdev->link_recheck) {
+		spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+		cancel_delayed_work_sync(&nesdev->work);
+	} else {
+		spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+	}
+
 	/* Deallocate the Adapter Structure */
 	nes_destroy_adapter(nesdev->nesadapter);
 
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index b3d145e..6fe7987 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -268,6 +268,9 @@
 	u8                     napi_isr_ran;
 	u8                     disable_rx_flow_control;
 	u8                     disable_tx_flow_control;
+
+	struct delayed_work    work;
+	u8                     link_recheck;
 };
 
 
@@ -507,6 +510,7 @@
 void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
 int nes_destroy_cqp(struct nes_device *);
 int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+void nes_recheck_link_status(struct work_struct *work);
 
 /* nes_nic.c */
 struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 25ad0f9..009ec81 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1107,6 +1107,7 @@
 	struct flowi fl;
 	struct neighbour *neigh;
 	int rc = arpindex;
+	struct net_device *netdev;
 	struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
 
 	memset(&fl, 0, sizeof fl);
@@ -1117,7 +1118,12 @@
 		return rc;
 	}
 
-	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev);
+	if (nesvnic->netdev->master)
+		netdev = nesvnic->netdev->master;
+	else
+		netdev = nesvnic->netdev;
+
+	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
 	if (neigh) {
 		if (neigh->nud_state & NUD_VALID) {
 			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 1980a46..08c1948 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2608,6 +2608,15 @@
 						netif_start_queue(nesvnic->netdev);
 					nesvnic->linkup = 1;
 					netif_carrier_on(nesvnic->netdev);
+
+					spin_lock(&nesvnic->port_ibevent_lock);
+					if (nesvnic->of_device_registered) {
+						if (nesdev->iw_status == 0) {
+							nesdev->iw_status = 1;
+							nes_port_ibevent(nesvnic);
+						}
+					}
+					spin_unlock(&nesvnic->port_ibevent_lock);
 				}
 			}
 		} else {
@@ -2633,9 +2642,25 @@
 						netif_stop_queue(nesvnic->netdev);
 					nesvnic->linkup = 0;
 					netif_carrier_off(nesvnic->netdev);
+
+					spin_lock(&nesvnic->port_ibevent_lock);
+					if (nesvnic->of_device_registered) {
+						if (nesdev->iw_status == 1) {
+							nesdev->iw_status = 0;
+							nes_port_ibevent(nesvnic);
+						}
+					}
+					spin_unlock(&nesvnic->port_ibevent_lock);
 				}
 			}
 		}
+		if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
+			if (nesdev->link_recheck)
+				cancel_delayed_work(&nesdev->work);
+			nesdev->link_recheck = 1;
+			schedule_delayed_work(&nesdev->work,
+					      NES_LINK_RECHECK_DELAY);
+		}
 	}
 
 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
@@ -2643,6 +2668,84 @@
 	nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
 }
 
+void nes_recheck_link_status(struct work_struct *work)
+{
+	unsigned long flags;
+	struct nes_device *nesdev = container_of(work, struct nes_device, work.work);
+	struct nes_adapter *nesadapter = nesdev->nesadapter;
+	struct nes_vnic *nesvnic;
+	u32 mac_index = nesdev->mac_index;
+	u16 phy_data;
+	u16 temp_phy_data;
+
+	spin_lock_irqsave(&nesadapter->phy_lock, flags);
+
+	/* check link status */
+	nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
+	temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+
+	nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
+	nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+	nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
+	phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+
+	phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
+
+	nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
+		__func__, phy_data,
+		nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
+
+	if (phy_data & 0x0004) {
+		nesadapter->mac_link_down[mac_index] = 0;
+		list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+			if (nesvnic->linkup == 0) {
+				printk(PFX "The Link is now up for port %s, netdev %p.\n",
+						nesvnic->netdev->name, nesvnic->netdev);
+				if (netif_queue_stopped(nesvnic->netdev))
+					netif_start_queue(nesvnic->netdev);
+				nesvnic->linkup = 1;
+				netif_carrier_on(nesvnic->netdev);
+
+				spin_lock(&nesvnic->port_ibevent_lock);
+				if (nesvnic->of_device_registered) {
+					if (nesdev->iw_status == 0) {
+						nesdev->iw_status = 1;
+						nes_port_ibevent(nesvnic);
+					}
+				}
+				spin_unlock(&nesvnic->port_ibevent_lock);
+			}
+		}
+
+	} else {
+		nesadapter->mac_link_down[mac_index] = 1;
+		list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+			if (nesvnic->linkup == 1) {
+				printk(PFX "The Link is now down for port %s, netdev %p.\n",
+						nesvnic->netdev->name, nesvnic->netdev);
+				if (!(netif_queue_stopped(nesvnic->netdev)))
+					netif_stop_queue(nesvnic->netdev);
+				nesvnic->linkup = 0;
+				netif_carrier_off(nesvnic->netdev);
+
+				spin_lock(&nesvnic->port_ibevent_lock);
+				if (nesvnic->of_device_registered) {
+					if (nesdev->iw_status == 1) {
+						nesdev->iw_status = 0;
+						nes_port_ibevent(nesvnic);
+					}
+				}
+				spin_unlock(&nesvnic->port_ibevent_lock);
+			}
+		}
+	}
+	if (nesdev->link_recheck++ < NES_LINK_RECHECK_MAX)
+		schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
+	else
+		nesdev->link_recheck = 0;
+
+	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+}
 
 
 static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 1204c34..d2abe07 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1193,6 +1193,8 @@
 
 struct nes_ib_device;
 
+#define NES_EVENT_DELAY msecs_to_jiffies(100)
+
 struct nes_vnic {
 	struct nes_ib_device *nesibdev;
 	u64 sq_full;
@@ -1247,6 +1249,10 @@
 	u32 lro_max_aggr;
 	struct net_lro_mgr lro_mgr;
 	struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
+	struct timer_list event_timer;
+	enum ib_event_type delayed_event;
+	enum ib_event_type last_dispatched_event;
+	spinlock_t port_ibevent_lock;
 };
 
 struct nes_ib_device {
@@ -1348,6 +1354,10 @@
 #define BAD_FRAME_OFFSET	64
 #define CQE_MAJOR_DRV		0x8000
 
+/* Used for link status recheck after interrupt processing */
+#define NES_LINK_RECHECK_DELAY	msecs_to_jiffies(50)
+#define NES_LINK_RECHECK_MAX	60
+
 #define nes_vlan_rx vlan_hwaccel_receive_skb
 #define nes_netif_rx netif_receive_skb
 
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3892e2c..2c9c193 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -144,6 +144,7 @@
 	u32 nic_active_bit;
 	u32 nic_active;
 	struct list_head *list_pos, *list_temp;
+	unsigned long flags;
 
 	assert(nesdev != NULL);
 
@@ -233,18 +234,36 @@
 		first_nesvnic = nesvnic;
 	}
 
-	if (nesvnic->of_device_registered) {
-		nesdev->iw_status = 1;
-		nesdev->nesadapter->send_term_ok = 1;
-		nes_port_ibevent(nesvnic);
-	}
-
 	if (first_nesvnic->linkup) {
 		/* Enable network packets */
 		nesvnic->linkup = 1;
 		netif_start_queue(netdev);
 		netif_carrier_on(netdev);
 	}
+
+	spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+	if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
+		if (nesdev->link_recheck)
+			cancel_delayed_work(&nesdev->work);
+		nesdev->link_recheck = 1;
+		schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
+	}
+	spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+
+	spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
+	if (nesvnic->of_device_registered) {
+		nesdev->nesadapter->send_term_ok = 1;
+		if (nesvnic->linkup == 1) {
+			if (nesdev->iw_status == 0) {
+				nesdev->iw_status = 1;
+				nes_port_ibevent(nesvnic);
+			}
+		} else {
+			nesdev->iw_status = 0;
+		}
+	}
+	spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
+
 	napi_enable(&nesvnic->napi);
 	nesvnic->netdev_open = 1;
 
@@ -263,6 +282,7 @@
 	u32 nic_active;
 	struct nes_vnic *first_nesvnic = NULL;
 	struct list_head *list_pos, *list_temp;
+	unsigned long flags;
 
 	nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
 			nesvnic, nesdev, netdev, netdev->name);
@@ -315,12 +335,17 @@
 	nic_active &= nic_active_mask;
 	nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
 
-
+	spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
 	if (nesvnic->of_device_registered) {
 		nesdev->nesadapter->send_term_ok = 0;
 		nesdev->iw_status = 0;
-		nes_port_ibevent(nesvnic);
+		if (nesvnic->linkup == 1)
+			nes_port_ibevent(nesvnic);
 	}
+	del_timer_sync(&nesvnic->event_timer);
+	nesvnic->event_timer.function = NULL;
+	spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
+
 	nes_destroy_nic_qp(nesvnic);
 
 	nesvnic->netdev_open = 0;
@@ -908,8 +933,8 @@
 					nesvnic->nic_index &&
 					mc_index < max_pft_entries_avaiable) {
 						nes_debug(NES_DBG_NIC_RX,
-					"mc_index=%d skipping nic_index=%d,\
-					used for=%d \n", mc_index,
+					"mc_index=%d skipping nic_index=%d, "
+					"used for=%d \n", mc_index,
 					nesvnic->nic_index,
 					nesadapter->pft_mcast_map[mc_index]);
 				mc_index++;
@@ -1750,7 +1775,10 @@
 		nesvnic->rdma_enabled = 0;
 	}
 	nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
+	init_timer(&nesvnic->event_timer);
+	nesvnic->event_timer.function = NULL;
 	spin_lock_init(&nesvnic->tx_lock);
+	spin_lock_init(&nesvnic->port_ibevent_lock);
 	nesdev->netdev[nesdev->netdev_count] = netdev;
 
 	nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
@@ -1763,8 +1791,11 @@
 	      (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
 	       ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
 		u32 u32temp;
-		u32 link_mask;
-		u32 link_val;
+		u32 link_mask = 0;
+		u32 link_val = 0;
+		u16 temp_phy_data;
+		u16 phy_data = 0;
+		unsigned long flags;
 
 		u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
 				(0x200 * (nesdev->mac_index & 1)));
@@ -1786,6 +1817,23 @@
 				link_val = 0x02020000;
 			}
 			break;
+		case NES_PHY_TYPE_SFP_D:
+			spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+			nes_read_10G_phy_reg(nesdev,
+					     nesdev->nesadapter->phy_index[nesdev->mac_index],
+					     1, 0x9003);
+			temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+			nes_read_10G_phy_reg(nesdev,
+					     nesdev->nesadapter->phy_index[nesdev->mac_index],
+					     3, 0x0021);
+			nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+			nes_read_10G_phy_reg(nesdev,
+					     nesdev->nesadapter->phy_index[nesdev->mac_index],
+					     3, 0x0021);
+			phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+			spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+			phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
+			break;
 		default:
 			link_mask = 0x0f1f0000;
 			link_val = 0x0f0f0000;
@@ -1795,8 +1843,14 @@
 		u32temp = nes_read_indexed(nesdev,
 					   NES_IDX_PHY_PCS_CONTROL_STATUS0 +
 					   (0x200 * (nesdev->mac_index & 1)));
-		if ((u32temp & link_mask) == link_val)
-			nesvnic->linkup = 1;
+
+		if (phy_type == NES_PHY_TYPE_SFP_D) {
+			if (phy_data & 0x0004)
+				nesvnic->linkup = 1;
+		} else {
+			if ((u32temp & link_mask) == link_val)
+				nesvnic->linkup = 1;
+		}
 
 		/* clear the MAC interrupt status, assumes direct logical to physical mapping */
 		u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 99933e4..26d8018 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3936,6 +3936,30 @@
 	return nesibdev;
 }
 
+
+/**
+ * nes_handle_delayed_event
+ */
+static void nes_handle_delayed_event(unsigned long data)
+{
+	struct nes_vnic *nesvnic = (void *) data;
+
+	if (nesvnic->delayed_event != nesvnic->last_dispatched_event) {
+		struct ib_event event;
+
+		event.device = &nesvnic->nesibdev->ibdev;
+		if (!event.device)
+			goto stop_timer;
+		event.event = nesvnic->delayed_event;
+		event.element.port_num = nesvnic->logical_port + 1;
+		ib_dispatch_event(&event);
+	}
+
+stop_timer:
+	nesvnic->event_timer.function = NULL;
+}
+
+
 void  nes_port_ibevent(struct nes_vnic *nesvnic)
 {
 	struct nes_ib_device *nesibdev = nesvnic->nesibdev;
@@ -3944,7 +3968,18 @@
 	event.device = &nesibdev->ibdev;
 	event.element.port_num = nesvnic->logical_port + 1;
 	event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
-	ib_dispatch_event(&event);
+
+	if (!nesvnic->event_timer.function) {
+		ib_dispatch_event(&event);
+		nesvnic->last_dispatched_event = event.event;
+		nesvnic->event_timer.function = nes_handle_delayed_event;
+		nesvnic->event_timer.data = (unsigned long) nesvnic;
+		nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
+		add_timer(&nesvnic->event_timer);
+	} else {
+		mod_timer(&nesvnic->event_timer, jiffies + NES_EVENT_DELAY);
+	}
+	nesvnic->delayed_event = event.event;
 }
 
 
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 64c9e7d..73225ee 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -766,7 +766,7 @@
 	void (*f_sdma_hw_start_up)(struct qib_pportdata *);
 	void (*f_sdma_init_early)(struct qib_pportdata *);
 	void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
-	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
 	u32 (*f_hdrqempty)(struct qib_ctxtdata *);
 	u64 (*f_portcntr)(struct qib_pportdata *, u32);
 	u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
index a86cbf8..5246aa4 100644
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -100,7 +100,8 @@
 	wc->head = next;
 
 	if (cq->notify == IB_CQ_NEXT_COMP ||
-	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
+	    (cq->notify == IB_CQ_SOLICITED &&
+	     (solicited || entry->status != IB_WC_SUCCESS))) {
 		cq->notify = IB_CQ_NONE;
 		cq->triggered++;
 		/*
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 9cd1936..23e584f 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -71,6 +71,11 @@
  */
 #define QIB_PIO_MAXIBHDR 128
 
+/*
+ * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
+ */
+#define QIB_MAX_PKT_RECV 64
+
 struct qlogic_ib_stats qib_stats;
 
 const char *qib_get_unit_name(int unit)
@@ -284,14 +289,147 @@
  * Returns 1 if error was a CRC, else 0.
  * Needed for some chip's synthesized error counters.
  */
-static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
-			  u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
-			  struct qib_message_header *hdr)
+static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
+			  u32 ctxt, u32 eflags, u32 l, u32 etail,
+			  __le32 *rhf_addr, struct qib_message_header *rhdr)
 {
 	u32 ret = 0;
 
 	if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
 		ret = 1;
+	else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
+		/* For TIDERR and RC QPs premptively schedule a NAK */
+		struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
+		struct qib_other_headers *ohdr = NULL;
+		struct qib_ibport *ibp = &ppd->ibport_data;
+		struct qib_qp *qp = NULL;
+		u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
+		u16 lid  = be16_to_cpu(hdr->lrh[1]);
+		int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+		u32 qp_num;
+		u32 opcode;
+		u32 psn;
+		int diff;
+		unsigned long flags;
+
+		/* Sanity check packet */
+		if (tlen < 24)
+			goto drop;
+
+		if (lid < QIB_MULTICAST_LID_BASE) {
+			lid &= ~((1 << ppd->lmc) - 1);
+			if (unlikely(lid != ppd->lid))
+				goto drop;
+		}
+
+		/* Check for GRH */
+		if (lnh == QIB_LRH_BTH)
+			ohdr = &hdr->u.oth;
+		else if (lnh == QIB_LRH_GRH) {
+			u32 vtf;
+
+			ohdr = &hdr->u.l.oth;
+			if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+				goto drop;
+			vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+			if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+				goto drop;
+		} else
+			goto drop;
+
+		/* Get opcode and PSN from packet */
+		opcode = be32_to_cpu(ohdr->bth[0]);
+		opcode >>= 24;
+		psn = be32_to_cpu(ohdr->bth[2]);
+
+		/* Get the destination QP number. */
+		qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+		if (qp_num != QIB_MULTICAST_QPN) {
+			int ruc_res;
+			qp = qib_lookup_qpn(ibp, qp_num);
+			if (!qp)
+				goto drop;
+
+			/*
+			 * Handle only RC QPs - for other QP types drop error
+			 * packet.
+			 */
+			spin_lock(&qp->r_lock);
+
+			/* Check for valid receive state. */
+			if (!(ib_qib_state_ops[qp->state] &
+			      QIB_PROCESS_RECV_OK)) {
+				ibp->n_pkt_drops++;
+				goto unlock;
+			}
+
+			switch (qp->ibqp.qp_type) {
+			case IB_QPT_RC:
+				spin_lock_irqsave(&qp->s_lock, flags);
+				ruc_res =
+					qib_ruc_check_hdr(
+						ibp, hdr,
+						lnh == QIB_LRH_GRH,
+						qp,
+						be32_to_cpu(ohdr->bth[0]));
+				if (ruc_res) {
+					spin_unlock_irqrestore(&qp->s_lock,
+							       flags);
+					goto unlock;
+				}
+				spin_unlock_irqrestore(&qp->s_lock, flags);
+
+				/* Only deal with RDMA Writes for now */
+				if (opcode <
+				    IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
+					diff = qib_cmp24(psn, qp->r_psn);
+					if (!qp->r_nak_state && diff >= 0) {
+						ibp->n_rc_seqnak++;
+						qp->r_nak_state =
+							IB_NAK_PSN_ERROR;
+						/* Use the expected PSN. */
+						qp->r_ack_psn = qp->r_psn;
+						/*
+						 * Wait to send the sequence
+						 * NAK until all packets
+						 * in the receive queue have
+						 * been processed.
+						 * Otherwise, we end up
+						 * propagating congestion.
+						 */
+						if (list_empty(&qp->rspwait)) {
+							qp->r_flags |=
+								QIB_R_RSP_NAK;
+							atomic_inc(
+								&qp->refcount);
+							list_add_tail(
+							 &qp->rspwait,
+							 &rcd->qp_wait_list);
+						}
+					} /* Out of sequence NAK */
+				} /* QP Request NAKs */
+				break;
+			case IB_QPT_SMI:
+			case IB_QPT_GSI:
+			case IB_QPT_UD:
+			case IB_QPT_UC:
+			default:
+				/* For now don't handle any other QP types */
+				break;
+			}
+
+unlock:
+			spin_unlock(&qp->r_lock);
+			/*
+			 * Notify qib_destroy_qp() if it is waiting
+			 * for us to finish.
+			 */
+			if (atomic_dec_and_test(&qp->refcount))
+				wake_up(&qp->wait);
+		} /* Unicast QP */
+	} /* Valid packet with TIDErr */
+
+drop:
 	return ret;
 }
 
@@ -335,7 +473,7 @@
 		smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
 	}
 
-	for (last = 0, i = 1; !last && i <= 64; i += !last) {
+	for (last = 0, i = 1; !last; i += !last) {
 		hdr = dd->f_get_msgheader(dd, rhf_addr);
 		eflags = qib_hdrget_err_flags(rhf_addr);
 		etype = qib_hdrget_rcv_type(rhf_addr);
@@ -371,7 +509,7 @@
 		 * packets; only qibhdrerr should be set.
 		 */
 		if (unlikely(eflags))
-			crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+			crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
 					       etail, rhf_addr, hdr);
 		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
 			qib_ib_rcv(rcd, hdr, ebuf, tlen);
@@ -384,6 +522,9 @@
 		l += rsize;
 		if (l >= maxcnt)
 			l = 0;
+		if (i == QIB_MAX_PKT_RECV)
+			last = 1;
+
 		rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
 		if (dd->flags & QIB_NODMA_RTAIL) {
 			u32 seq = qib_hdrget_seq(rhf_addr);
@@ -402,7 +543,7 @@
 		 */
 		lval = l;
 		if (!last && !(i & 0xf)) {
-			dd->f_update_usrhead(rcd, lval, updegr, etail);
+			dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 			updegr = 0;
 		}
 	}
@@ -444,7 +585,7 @@
 	 * if no packets were processed.
 	 */
 	lval = (u64)rcd->head | dd->rhdrhead_intr_off;
-	dd->f_update_usrhead(rcd, lval, updegr, etail);
+	dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 	return crcs;
 }
 
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 79d9971..75bfad1 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1379,17 +1379,17 @@
 		/* find device (with ACTIVE ports) with fewest ctxts in use */
 		for (ndev = 0; ndev < devmax; ndev++) {
 			struct qib_devdata *dd = qib_lookup(ndev);
-			unsigned cused = 0, cfree = 0;
+			unsigned cused = 0, cfree = 0, pusable = 0;
 			if (!dd)
 				continue;
 			if (port && port <= dd->num_pports &&
 			    usable(dd->pport + port - 1))
-				dusable = 1;
+				pusable = 1;
 			else
 				for (i = 0; i < dd->num_pports; i++)
 					if (usable(dd->pport + i))
-						dusable++;
-			if (!dusable)
+						pusable++;
+			if (!pusable)
 				continue;
 			for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
 			     ctxt++)
@@ -1397,7 +1397,7 @@
 					cused++;
 				else
 					cfree++;
-			if (cfree && cused < inuse) {
+			if (pusable && cfree && cused < inuse) {
 				udd = dd;
 				inuse = cused;
 			}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a5e29db..774dea8 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2074,7 +2074,7 @@
 }
 
 static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 6fd8d74..de799f1 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1692,8 +1692,7 @@
 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 	wake_up(&ppd->cpspec->autoneg_wait);
-	cancel_delayed_work(&ppd->cpspec->autoneg_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
 
 	shutdown_7220_relock_poll(ppd->dd);
 	val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
@@ -2297,7 +2296,7 @@
 	nchipctxts = qib_read_kreg32(dd, kr_portcnt);
 	dd->cspec->numctxts = nchipctxts;
 	if (qib_n_krcv_queues > 1) {
-		dd->qpn_mask = 0x3f;
+		dd->qpn_mask = 0x3e;
 		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
 		if (dd->first_user_ctxt > nchipctxts)
 			dd->first_user_ctxt = nchipctxts;
@@ -2703,7 +2702,7 @@
 }
 
 static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
@@ -3515,8 +3514,8 @@
 
 	toggle_7220_rclkrls(ppd->dd);
 	/* 2 msec is minimum length of a poll cycle */
-	schedule_delayed_work(&ppd->cpspec->autoneg_work,
-			      msecs_to_jiffies(2));
+	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+			   msecs_to_jiffies(2));
 }
 
 /*
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 584d443..b01809a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -71,6 +71,9 @@
 
 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+static void serdes_7322_los_enable(struct qib_pportdata *, int);
+static int serdes_7322_init_old(struct qib_pportdata *);
+static int serdes_7322_init_new(struct qib_pportdata *);
 
 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
 
@@ -111,6 +114,21 @@
 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 
+/*
+ * Receive header queue sizes
+ */
+static unsigned qib_rcvhdrcnt;
+module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
+
+static unsigned qib_rcvhdrsize;
+module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
+
+static unsigned qib_rcvhdrentsize;
+module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
+
 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
 /* for read back, default index is ~5m copper cable */
 static char txselect_list[MAX_ATTEN_LEN] = "10";
@@ -314,7 +332,7 @@
 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
 
 /*
- * Per-context kernel registers.  Acess only with qib_read_kreg_ctxt()
+ * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
  * or qib_write_kreg_ctxt()
  */
 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
@@ -544,6 +562,7 @@
 
 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
+#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 
 #define H1_FORCE_VAL 8
@@ -1677,6 +1696,8 @@
 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
 		force_h1(ppd);
 		ppd->cpspec->qdr_reforce = 1;
+		if (!ppd->dd->cspec->r1)
+			serdes_7322_los_enable(ppd, 0);
 	} else if (ppd->cpspec->qdr_reforce &&
 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
@@ -1692,18 +1713,37 @@
 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
 		adj_tx_serdes(ppd);
 
-	if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
-	    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
-		ppd->cpspec->qdr_dfe_on = 1;
-		ppd->cpspec->qdr_dfe_time = 0;
-		/* On link down, reenable QDR adaptation */
-		qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
-			ppd->dd->cspec->r1 ?
-				    QDR_STATIC_ADAPT_DOWN_R1 :
-				    QDR_STATIC_ADAPT_DOWN);
+	if (ibclt != IB_7322_LT_STATE_LINKUP) {
+		u8 ltstate = qib_7322_phys_portstate(ibcst);
+		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
+					  LinkTrainingState);
+		if (!ppd->dd->cspec->r1 &&
+		    pibclt == IB_7322_LT_STATE_LINKUP &&
+		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+			/* If the link went down (but no into recovery,
+			 * turn LOS back on */
+			serdes_7322_los_enable(ppd, 1);
+		if (!ppd->cpspec->qdr_dfe_on &&
+		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+			ppd->cpspec->qdr_dfe_on = 1;
+			ppd->cpspec->qdr_dfe_time = 0;
+			/* On link down, reenable QDR adaptation */
+			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+					    ppd->dd->cspec->r1 ?
+					    QDR_STATIC_ADAPT_DOWN_R1 :
+					    QDR_STATIC_ADAPT_DOWN);
+			printk(KERN_INFO QIB_DRV_NAME
+				" IB%u:%u re-enabled QDR adaptation "
+				"ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
+		}
 	}
 }
 
+static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
+
 /*
  * This is per-pport error handling.
  * will likely get it's own MSIx interrupt (one for each port,
@@ -2323,6 +2363,11 @@
 	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
 
+	/* Hold the link state machine for mezz boards */
+	if (IS_QMH(dd) || IS_QME(dd))
+		qib_set_ib_7322_lstate(ppd, 0,
+				       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
 	/* Also enable IBSTATUSCHG interrupt.  */
 	val = qib_read_kreg_port(ppd, krp_errmask);
 	qib_write_kreg_port(ppd, krp_errmask,
@@ -2348,10 +2393,9 @@
 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 	wake_up(&ppd->cpspec->autoneg_wait);
-	cancel_delayed_work(&ppd->cpspec->autoneg_work);
+	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
 	if (ppd->dd->cspec->r1)
-		cancel_delayed_work(&ppd->cpspec->ipg_work);
-	flush_scheduled_work();
+		cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
 
 	ppd->cpspec->chase_end = 0;
 	if (ppd->cpspec->chase_timer.data) /* if initted */
@@ -2648,7 +2692,7 @@
 			if (!(pins & mask)) {
 				++handled;
 				qd->t_insert = get_jiffies_64();
-				schedule_work(&qd->work);
+				queue_work(ib_wq, &qd->work);
 			}
 		}
 	}
@@ -2785,7 +2829,6 @@
 				ctxtrbits &= ~rmask;
 				if (dd->rcd[i]) {
 					qib_kreceive(dd->rcd[i], NULL, &npkts);
-					adjust_rcv_timeout(dd->rcd[i], npkts);
 				}
 			}
 			rmask <<= 1;
@@ -2835,7 +2878,6 @@
 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
 
 	qib_kreceive(rcd, NULL, &npkts);
-	adjust_rcv_timeout(rcd, npkts);
 
 	return IRQ_HANDLED;
 }
@@ -3157,6 +3199,10 @@
 	case BOARD_QME7342:
 		n = "InfiniPath_QME7342";
 		break;
+	case 8:
+		n = "InfiniPath_QME7362";
+		dd->flags |= QIB_HAS_QSFP;
+		break;
 	case 15:
 		n = "InfiniPath_QLE7342_TEST";
 		dd->flags |= QIB_HAS_QSFP;
@@ -3475,11 +3521,6 @@
 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
 	dd->cspec->numctxts = nchipctxts;
 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
-		/*
-		 * Set the mask for which bits from the QPN are used
-		 * to select a context number.
-		 */
-		dd->qpn_mask = 0x3f;
 		dd->first_user_ctxt = NUM_IB_PORTS +
 			(qib_n_krcv_queues - 1) * dd->num_pports;
 		if (dd->first_user_ctxt > nchipctxts)
@@ -3530,8 +3571,11 @@
 
 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
-	dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
-				dd->num_pports > 1 ? 1024U : 2048U);
+	if (qib_rcvhdrcnt)
+		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
+	else
+		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+				    dd->num_pports > 1 ? 1024U : 2048U);
 }
 
 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@ -4002,8 +4046,14 @@
 }
 
 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
+	/*
+	 * Need to write timeout register before updating rcvhdrhead to ensure
+	 * that the timer is enabled on reception of a packet.
+	 */
+	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
+		adjust_rcv_timeout(rcd, npkts);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
@@ -4926,8 +4976,8 @@
 	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
 	qib_7322_mini_pcs_reset(ppd);
 	/* 2 msec is minimum length of a poll cycle */
-	schedule_delayed_work(&ppd->cpspec->autoneg_work,
-			      msecs_to_jiffies(2));
+	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+			   msecs_to_jiffies(2));
 }
 
 /*
@@ -5057,7 +5107,8 @@
 		ib_free_send_mad(send_buf);
 retry:
 	delay = 2 << ppd->cpspec->ipg_tries;
-	schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
+	queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
+			   msecs_to_jiffies(delay));
 }
 
 /*
@@ -5522,7 +5573,7 @@
 		u64 now = get_jiffies_64();
 		if (time_after64(now, pwrup))
 			break;
-		msleep(1);
+		msleep(20);
 	}
 	ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
 	/*
@@ -5579,6 +5630,7 @@
 	u32 pidx, unit, port, deflt, h1;
 	unsigned long val;
 	int any = 0, seth1;
+	int txdds_size;
 
 	str = txselect_list;
 
@@ -5587,6 +5639,10 @@
 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
 		dd->pport[pidx].cpspec->no_eep = deflt;
 
+	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
+	if (IS_QME(dd) || IS_QMH(dd))
+		txdds_size += TXDDS_MFG_SZ;
+
 	while (*nxt && nxt[1]) {
 		str = ++nxt;
 		unit = simple_strtoul(str, &nxt, 0);
@@ -5609,7 +5665,7 @@
 				;
 			continue;
 		}
-		if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
+		if (val >= txdds_size)
 			continue;
 		seth1 = 0;
 		h1 = 0; /* gcc thinks it might be used uninitted */
@@ -5633,6 +5689,11 @@
 				ppd->cpspec->h1_val = h1;
 			/* now change the IBC and serdes, overriding generic */
 			init_txdds_table(ppd, 1);
+			/* Re-enable the physical state machine on mezz boards
+			 * now that the correct settings have been set. */
+			if (IS_QMH(dd) || IS_QME(dd))
+				qib_set_ib_7322_lstate(ppd, 0,
+					    QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
 			any++;
 		}
 		if (*nxt == '\n')
@@ -5661,10 +5722,11 @@
 		return -ENOSPC;
 	}
 	val = simple_strtoul(str, &n, 0);
-	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+				TXDDS_MFG_SZ)) {
 		printk(KERN_INFO QIB_DRV_NAME
 		       "txselect_values must start with a number < %d\n",
-			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
 		return -EINVAL;
 	}
 	strcpy(txselect_list, str);
@@ -5810,7 +5872,8 @@
 		unsigned n, regno;
 		unsigned long flags;
 
-		if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+		if (dd->n_krcv_queues < 2 ||
+			!dd->pport[pidx].link_speed_supported)
 			continue;
 
 		ppd = &dd->pport[pidx];
@@ -6097,8 +6160,10 @@
 		ppd++;
 	}
 
-	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
-	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+	dd->rcvhdrentsize = qib_rcvhdrentsize ?
+		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
+	dd->rcvhdrsize = qib_rcvhdrsize ?
+		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
 
 	/* we always allocate at least 2048 bytes for eager buffers */
@@ -6495,7 +6560,7 @@
 		/* make sure we see an updated copy next time around */
 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
 		sleeps++;
-		msleep(1);
+		msleep(20);
 	}
 
 	switch (which) {
@@ -6993,6 +7058,12 @@
 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
 };
 
+static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
+	/* amp, pre, main, post */
+	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
+	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
+};
+
 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
 					       unsigned atten)
 {
@@ -7066,6 +7137,16 @@
 		*sdr_dds = &txdds_extra_sdr[idx];
 		*ddr_dds = &txdds_extra_ddr[idx];
 		*qdr_dds = &txdds_extra_qdr[idx];
+	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
+		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+					  TXDDS_MFG_SZ)) {
+		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+		printk(KERN_INFO QIB_DRV_NAME
+			" IB%u:%u use idx %u into txdds_mfg\n",
+			ppd->dd->unit, ppd->port, idx);
+		*sdr_dds = &txdds_extra_mfg[idx];
+		*ddr_dds = &txdds_extra_mfg[idx];
+		*qdr_dds = &txdds_extra_mfg[idx];
 	} else {
 		/* this shouldn't happen, it's range checked */
 		*sdr_dds = txdds_sdr + qib_long_atten;
@@ -7210,9 +7291,30 @@
 	}
 }
 
+static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
+{
+	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
+	printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
+		ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
+	if (enable)
+		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+	else
+		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+	qib_write_kreg_port(ppd, krp_serdesctrl, data);
+}
+
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
-	u64 data;
+	int ret = 0;
+	if (ppd->dd->cspec->r1)
+		ret = serdes_7322_init_old(ppd);
+	else
+		ret = serdes_7322_init_new(ppd);
+	return ret;
+}
+
+static int serdes_7322_init_old(struct qib_pportdata *ppd)
+{
 	u32 le_val;
 
 	/*
@@ -7270,11 +7372,7 @@
 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
 
-	data = qib_read_kreg_port(ppd, krp_serdesctrl);
-	/* Turn off IB latency mode */
-	data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
-	qib_write_kreg_port(ppd, krp_serdesctrl, data |
-		SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+	serdes_7322_los_enable(ppd, 1);
 
 	/* rxbistena; set 0 to avoid effects of it switch later */
 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
@@ -7314,6 +7412,205 @@
 	return 0;
 }
 
+static int serdes_7322_init_new(struct qib_pportdata *ppd)
+{
+	u64 tstart;
+	u32 le_val, rxcaldone;
+	int chan, chan_done = (1 << SERDES_CHANS) - 1;
+
+	/*
+	 * Initialize the Tx DDS tables.  Also done every QSFP event,
+	 * for adapters with QSFP
+	 */
+	init_txdds_table(ppd, 0);
+
+	/* Clear cmode-override, may be set from older driver */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+	/* ensure no tx overrides from earlier driver loads */
+	qib_write_kreg_port(ppd, krp_tx_deemph_override,
+		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+		reset_tx_deemphasis_override));
+
+	/* START OF LSI SUGGESTED SERDES BRINGUP */
+	/* Reset - Calibration Setup */
+	/*       Stop DFE adaptaion */
+	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
+	/*       Disable LE1 */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
+	/*       Disable autoadapt for LE1 */
+	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
+	/*       Disable LE2 */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
+	/*       Disable VGA */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+	/*       Disable AFE Offset Cancel */
+	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
+	/*       Disable Timing Loop */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
+	/*       Disable Frequency Loop */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
+	/*       Disable Baseline Wander Correction */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
+	/*       Disable RX Calibration */
+	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+	/*       Disable RX Offset Calibration */
+	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
+	/*       Select BB CDR */
+	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
+	/*       CDR Step Size */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
+	/*       Enable phase Calibration */
+	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
+	/*       DFE Bandwidth [2:14-12] */
+	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
+	/*       DFE Config (4 taps only) */
+	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
+	/*       Gain Loop Bandwidth */
+	if (!ppd->dd->cspec->r1) {
+		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
+		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
+	} else {
+		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
+	}
+	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
+	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
+	/*       Data Rate Select [5:7-6] (leave as default) */
+	/*       RX Parralel Word Width [3:10-8] (leave as default) */
+
+	/* RX REST */
+	/*       Single- or Multi-channel reset */
+	/*       RX Analog reset */
+	/*       RX Digital reset */
+	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
+	msleep(20);
+	/*       RX Analog reset */
+	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
+	msleep(20);
+	/*       RX Digital reset */
+	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
+	msleep(20);
+
+	/* setup LoS params; these are subsystem, so chan == 5 */
+	/* LoS filter threshold_count on, ch 0-3, set to 8 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+	/* LoS filter threshold_count off, ch 0-3, set to 4 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+	/* LoS filter select enabled */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
+	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+	/* Turn on LOS on initial SERDES init */
+	serdes_7322_los_enable(ppd, 1);
+	/* FLoop LOS gate: PPM filter  enabled */
+	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+	/* RX LATCH CALIBRATION */
+	/*       Enable Eyefinder Phase Calibration latch */
+	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
+	/*       Enable RX Offset Calibration latch */
+	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
+	msleep(20);
+	/*       Start Calibration */
+	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
+	tstart = get_jiffies_64();
+	while (chan_done &&
+	       !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
+		msleep(20);
+		for (chan = 0; chan < SERDES_CHANS; ++chan) {
+			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+					    (chan + (chan >> 1)),
+					    25, 0, 0);
+			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
+			    (~chan_done & (1 << chan)) == 0)
+				chan_done &= ~(1 << chan);
+		}
+	}
+	if (chan_done) {
+		printk(KERN_INFO QIB_DRV_NAME
+			 " Serdes %d calibration not done after .5 sec: 0x%x\n",
+			 IBSD(ppd->hw_pidx), chan_done);
+	} else {
+		for (chan = 0; chan < SERDES_CHANS; ++chan) {
+			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+					    (chan + (chan >> 1)),
+					    25, 0, 0);
+			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
+				printk(KERN_INFO QIB_DRV_NAME
+					 " Serdes %d chan %d calibration "
+					 "failed\n", IBSD(ppd->hw_pidx), chan);
+		}
+	}
+
+	/*       Turn off Calibration */
+	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+	msleep(20);
+
+	/* BRING RX UP */
+	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
+	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+	/*       Set LE2 Loop bandwidth */
+	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
+	/*       Enable LE2 */
+	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
+	msleep(20);
+	/*       Enable H0 only */
+	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
+	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+	/*       Enable VGA */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+	msleep(20);
+	/*       Set Frequency Loop Bandwidth */
+	ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
+	/*       Enable Frequency Loop */
+	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
+	/*       Set Timing Loop Bandwidth */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+	/*       Enable Timing Loop */
+	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
+	msleep(50);
+	/*       Enable DFE
+	 *       Set receive adaptation mode.  SDR and DDR adaptation are
+	 *       always on, and QDR is initially enabled; later disabled.
+	 */
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+			    ppd->dd->cspec->r1 ?
+			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+	ppd->cpspec->qdr_dfe_on = 1;
+	/*       Disable LE1  */
+	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
+	/*       Disable auto adapt for LE1 */
+	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
+	msleep(20);
+	/*       Enable AFE Offset Cancel */
+	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
+	/*       Enable Baseline Wander Correction */
+	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
+	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+	/* VGA output common mode */
+	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
+
+	return 0;
+}
+
 /* start adjust QMH serdes parameters */
 
 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f3b5039..ffefb78 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -80,7 +80,6 @@
 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
 
-struct workqueue_struct *qib_wq;
 struct workqueue_struct *qib_cq_wq;
 
 static void verify_interrupt(unsigned long);
@@ -92,9 +91,11 @@
 /* set number of contexts we'll actually use */
 void qib_set_ctxtcnt(struct qib_devdata *dd)
 {
-	if (!qib_cfgctxts)
+	if (!qib_cfgctxts) {
 		dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
-	else if (qib_cfgctxts < dd->num_pports)
+		if (dd->cfgctxts > dd->ctxtcnt)
+			dd->cfgctxts = dd->ctxtcnt;
+	} else if (qib_cfgctxts < dd->num_pports)
 		dd->cfgctxts = dd->ctxtcnt;
 	else if (qib_cfgctxts <= dd->ctxtcnt)
 		dd->cfgctxts = qib_cfgctxts;
@@ -268,23 +269,20 @@
 	struct page **pages;
 	dma_addr_t *addrs;
 
-	pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+	pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
 	if (!pages) {
 		qib_dev_err(dd, "failed to allocate shadow page * "
 			    "array, no expected sends!\n");
 		goto bail;
 	}
 
-	addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+	addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
 	if (!addrs) {
 		qib_dev_err(dd, "failed to allocate shadow dma handle "
 			    "array, no expected sends!\n");
 		goto bail_free;
 	}
 
-	memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
-	memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
-
 	dd->pageshadow = pages;
 	dd->physshadow = addrs;
 	return;
@@ -1045,24 +1043,10 @@
 	if (ret)
 		goto bail;
 
-	/*
-	 * We create our own workqueue mainly because we want to be
-	 * able to flush it when devices are being removed.  We can't
-	 * use schedule_work()/flush_scheduled_work() because both
-	 * unregister_netdev() and linkwatch_event take the rtnl lock,
-	 * so flush_scheduled_work() can deadlock during device
-	 * removal.
-	 */
-	qib_wq = create_workqueue("qib");
-	if (!qib_wq) {
-		ret = -ENOMEM;
-		goto bail_dev;
-	}
-
 	qib_cq_wq = create_singlethread_workqueue("qib_cq");
 	if (!qib_cq_wq) {
 		ret = -ENOMEM;
-		goto bail_wq;
+		goto bail_dev;
 	}
 
 	/*
@@ -1092,8 +1076,6 @@
 	idr_destroy(&qib_unit_table);
 bail_cq_wq:
 	destroy_workqueue(qib_cq_wq);
-bail_wq:
-	destroy_workqueue(qib_wq);
 bail_dev:
 	qib_dev_cleanup();
 bail:
@@ -1117,7 +1099,6 @@
 
 	pci_unregister_driver(&qib_driver);
 
-	destroy_workqueue(qib_wq);
 	destroy_workqueue(qib_cq_wq);
 
 	qib_cpulist_count = 0;
@@ -1290,7 +1271,7 @@
 
 	if (qib_mini_init || initfail || ret) {
 		qib_stop_timers(dd);
-		flush_scheduled_work();
+		flush_workqueue(ib_wq);
 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
 			dd->f_quiet_serdes(dd->pport + pidx);
 		if (qib_mini_init)
@@ -1339,8 +1320,8 @@
 
 	qib_stop_timers(dd);
 
-	/* wait until all of our (qsfp) schedule_work() calls complete */
-	flush_scheduled_work();
+	/* wait until all of our (qsfp) queue_work() calls complete */
+	flush_workqueue(ib_wq);
 
 	ret = qibfs_remove(dd);
 	if (ret)
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 54a4082..a693c56 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -131,7 +131,8 @@
 			/* start a 75msec timer to clear symbol errors */
 			mod_timer(&ppd->symerr_clear_timer,
 				  msecs_to_jiffies(75));
-		} else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+		} else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
+			   !(ppd->lflags & QIBL_LINKACTIVE)) {
 			/* active, but not active defered */
 			qib_hol_up(ppd); /* useful only for 6120 now */
 			*ppd->statusp |=
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 4b80eb1..8fd19a4 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@
 	struct qib_mregion *mr;
 	unsigned n, m;
 	size_t off;
-	int ret = 0;
 	unsigned long flags;
 
 	/*
@@ -152,6 +151,8 @@
 		if (!dev->dma_mr)
 			goto bail;
 		atomic_inc(&dev->dma_mr->refcount);
+		spin_unlock_irqrestore(&rkt->lock, flags);
+
 		isge->mr = dev->dma_mr;
 		isge->vaddr = (void *) sge->addr;
 		isge->length = sge->length;
@@ -170,19 +171,34 @@
 		     off + sge->length > mr->length ||
 		     (mr->access_flags & acc) != acc))
 		goto bail;
+	atomic_inc(&mr->refcount);
+	spin_unlock_irqrestore(&rkt->lock, flags);
 
 	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= QIB_SEGSZ) {
-			m++;
-			n = 0;
+	if (mr->page_shift) {
+		/*
+		page sizes are uniform power of 2 so no loop is necessary
+		entries_spanned_by_off is the number of times the loop below
+		would have executed.
+		*/
+		size_t entries_spanned_by_off;
+
+		entries_spanned_by_off = off >> mr->page_shift;
+		off -= (entries_spanned_by_off << mr->page_shift);
+		m = entries_spanned_by_off/QIB_SEGSZ;
+		n = entries_spanned_by_off%QIB_SEGSZ;
+	} else {
+		m = 0;
+		n = 0;
+		while (off >= mr->map[m]->segs[n].length) {
+			off -= mr->map[m]->segs[n].length;
+			n++;
+			if (n >= QIB_SEGSZ) {
+				m++;
+				n = 0;
+			}
 		}
 	}
-	atomic_inc(&mr->refcount);
 	isge->mr = mr;
 	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@
 	isge->m = m;
 	isge->n = n;
 ok:
-	ret = 1;
+	return 1;
 bail:
 	spin_unlock_irqrestore(&rkt->lock, flags);
-	return ret;
+	return 0;
 }
 
 /**
@@ -214,7 +230,6 @@
 	struct qib_mregion *mr;
 	unsigned n, m;
 	size_t off;
-	int ret = 0;
 	unsigned long flags;
 
 	/*
@@ -231,6 +246,8 @@
 		if (!dev->dma_mr)
 			goto bail;
 		atomic_inc(&dev->dma_mr->refcount);
+		spin_unlock_irqrestore(&rkt->lock, flags);
+
 		sge->mr = dev->dma_mr;
 		sge->vaddr = (void *) vaddr;
 		sge->length = len;
@@ -248,19 +265,34 @@
 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
 		     (mr->access_flags & acc) == 0))
 		goto bail;
+	atomic_inc(&mr->refcount);
+	spin_unlock_irqrestore(&rkt->lock, flags);
 
 	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= QIB_SEGSZ) {
-			m++;
-			n = 0;
+	if (mr->page_shift) {
+		/*
+		page sizes are uniform power of 2 so no loop is necessary
+		entries_spanned_by_off is the number of times the loop below
+		would have executed.
+		*/
+		size_t entries_spanned_by_off;
+
+		entries_spanned_by_off = off >> mr->page_shift;
+		off -= (entries_spanned_by_off << mr->page_shift);
+		m = entries_spanned_by_off/QIB_SEGSZ;
+		n = entries_spanned_by_off%QIB_SEGSZ;
+	} else {
+		m = 0;
+		n = 0;
+		while (off >= mr->map[m]->segs[n].length) {
+			off -= mr->map[m]->segs[n].length;
+			n++;
+			if (n >= QIB_SEGSZ) {
+				m++;
+				n = 0;
+			}
 		}
 	}
-	atomic_inc(&mr->refcount);
 	sge->mr = mr;
 	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@
 	sge->m = m;
 	sge->n = n;
 ok:
-	ret = 1;
+	return 1;
 bail:
 	spin_unlock_irqrestore(&rkt->lock, flags);
-	return ret;
+	return 0;
 }
 
 /*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 94b0d1f..5ad224e 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -668,8 +668,8 @@
 	lid = be16_to_cpu(pip->lid);
 	/* Must be a valid unicast LID address. */
 	if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
-		goto err;
-	if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
 		if (ppd->lid != lid)
 			qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
 		if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
@@ -683,8 +683,8 @@
 	msl = pip->neighbormtu_mastersmsl & 0xF;
 	/* Must be a valid unicast LID address. */
 	if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
-		goto err;
-	if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
 		spin_lock_irqsave(&ibp->lock, flags);
 		if (ibp->sm_ah) {
 			if (smlid != ibp->sm_lid)
@@ -707,8 +707,9 @@
 		if (lwe == 0xFF)
 			lwe = ppd->link_width_supported;
 		else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
-			goto err;
-		set_link_width_enabled(ppd, lwe);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else if (lwe != ppd->link_width_enabled)
+			set_link_width_enabled(ppd, lwe);
 	}
 
 	lse = pip->linkspeedactive_enabled & 0xF;
@@ -721,8 +722,9 @@
 		if (lse == 15)
 			lse = ppd->link_speed_supported;
 		else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
-			goto err;
-		set_link_speed_enabled(ppd, lse);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else if (lse != ppd->link_speed_enabled)
+			set_link_speed_enabled(ppd, lse);
 	}
 
 	/* Set link down default state. */
@@ -738,7 +740,7 @@
 					IB_LINKINITCMD_POLL);
 		break;
 	default:
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 	}
 
 	ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
@@ -748,15 +750,17 @@
 
 	mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
 	if (mtu == -1)
-		goto err;
-	qib_set_mtu(ppd, mtu);
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else
+		qib_set_mtu(ppd, mtu);
 
 	/* Set operational VLs */
 	vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
 	if (vls) {
 		if (vls > ppd->vls_supported)
-			goto err;
-		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else
+			(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
 	}
 
 	if (pip->mkey_violations == 0)
@@ -770,10 +774,10 @@
 
 	ore = pip->localphyerrors_overrunerrors;
 	if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 	if (set_overrunthreshold(ppd, (ore & 0xF)))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 	ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
 
@@ -792,7 +796,7 @@
 	state = pip->linkspeed_portstate & 0xF;
 	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
 	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 	/*
 	 * Only state changes of DOWN, ARM, and ACTIVE are valid
@@ -812,8 +816,10 @@
 			lstate = QIB_IB_LINKDOWN;
 		else if (lstate == 3)
 			lstate = QIB_IB_LINKDOWN_DISABLE;
-		else
-			goto err;
+		else {
+			smp->status |= IB_SMP_INVALID_FIELD;
+			break;
+		}
 		spin_lock_irqsave(&ppd->lflags_lock, flags);
 		ppd->lflags &= ~QIBL_LINKV;
 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
@@ -835,8 +841,7 @@
 		qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
 		break;
 	default:
-		/* XXX We have already partially updated our state! */
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 	}
 
 	ret = subn_get_portinfo(smp, ibdev, port);
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 5f95f0f..08944e2 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -39,7 +39,6 @@
 /* Fast memory region */
 struct qib_fmr {
 	struct ib_fmr ibfmr;
-	u8 page_shift;
 	struct qib_mregion mr;        /* must be last */
 };
 
@@ -107,6 +106,7 @@
 			goto bail;
 	}
 	mr->mr.mapsz = m;
+	mr->mr.page_shift = 0;
 	mr->mr.max_segs = count;
 
 	/*
@@ -231,6 +231,8 @@
 	mr->mr.access_flags = mr_access_flags;
 	mr->umem = umem;
 
+	if (is_power_of_2(umem->page_size))
+		mr->mr.page_shift = ilog2(umem->page_size);
 	m = 0;
 	n = 0;
 	list_for_each_entry(chunk, &umem->chunk_list, list) {
@@ -390,7 +392,7 @@
 	fmr->mr.offset = 0;
 	fmr->mr.access_flags = mr_access_flags;
 	fmr->mr.max_segs = fmr_attr->max_pages;
-	fmr->page_shift = fmr_attr->page_shift;
+	fmr->mr.page_shift = fmr_attr->page_shift;
 
 	atomic_set(&fmr->mr.refcount, 0);
 	ret = &fmr->ibfmr;
@@ -437,7 +439,7 @@
 	spin_lock_irqsave(&rkt->lock, flags);
 	fmr->mr.user_base = iova;
 	fmr->mr.iova = iova;
-	ps = 1 << fmr->page_shift;
+	ps = 1 << fmr->mr.page_shift;
 	fmr->mr.length = list_len * ps;
 	m = 0;
 	n = 0;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6c39851..e16751f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -48,13 +48,12 @@
 
 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
 					struct qpn_map *map, unsigned off,
-					unsigned r)
+					unsigned n)
 {
 	if (qpt->mask) {
 		off++;
-		if ((off & qpt->mask) >> 1 != r)
-			off = ((off & qpt->mask) ?
-				(off | qpt->mask) + 1 : off) | (r << 1);
+		if (((off & qpt->mask) >> 1) >= n)
+			off = (off | qpt->mask) + 2;
 	} else
 		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
 	return off;
@@ -123,7 +122,6 @@
 	u32 i, offset, max_scan, qpn;
 	struct qpn_map *map;
 	u32 ret;
-	int r;
 
 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 		unsigned n;
@@ -139,15 +137,11 @@
 		goto bail;
 	}
 
-	r = smp_processor_id();
-	if (r >= dd->n_krcv_queues)
-		r %= dd->n_krcv_queues;
-	qpn = qpt->last + 1;
+	qpn = qpt->last + 2;
 	if (qpn >= QPN_MAX)
 		qpn = 2;
-	if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
-		qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
-			(r << 1);
+	if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
+		qpn = (qpn | qpt->mask) + 2;
 	offset = qpn & BITS_PER_PAGE_MASK;
 	map = &qpt->map[qpn / BITS_PER_PAGE];
 	max_scan = qpt->nmaps - !offset;
@@ -163,7 +157,8 @@
 				ret = qpn;
 				goto bail;
 			}
-			offset = find_next_offset(qpt, map, offset, r);
+			offset = find_next_offset(qpt, map, offset,
+				dd->n_krcv_queues);
 			qpn = mk_qpn(qpt, map, offset);
 			/*
 			 * This test differs from alloc_pidmap().
@@ -183,13 +178,13 @@
 			if (qpt->nmaps == QPNMAP_ENTRIES)
 				break;
 			map = &qpt->map[qpt->nmaps++];
-			offset = qpt->mask ? (r << 1) : 0;
+			offset = 0;
 		} else if (map < &qpt->map[qpt->nmaps]) {
 			++map;
-			offset = qpt->mask ? (r << 1) : 0;
+			offset = 0;
 		} else {
 			map = &qpt->map[0];
-			offset = qpt->mask ? (r << 1) : 2;
+			offset = 2;
 		}
 		qpn = mk_qpn(qpt, map, offset);
 	}
@@ -468,6 +463,10 @@
 		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
 		del_timer(&qp->s_timer);
 	}
+
+	if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
+		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
+
 	spin_lock(&dev->pending_lock);
 	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
 		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
@@ -1061,7 +1060,6 @@
 		}
 		qp->ibqp.qp_num = err;
 		qp->port_num = init_attr->port_num;
-		qp->processor_id = smp_processor_id();
 		qib_reset_qp(qp, init_attr->qp_type);
 		break;
 
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index 35b3604..3374a52 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -485,7 +485,7 @@
 		goto bail;
 	/* We see a module, but it may be unwise to look yet. Just schedule */
 	qd->t_insert = get_jiffies_64();
-	schedule_work(&qd->work);
+	queue_work(ib_wq, &qd->work);
 bail:
 	return;
 }
@@ -493,10 +493,9 @@
 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
 {
 	/*
-	 * There is nothing to do here for now.  our
-	 * work is scheduled with schedule_work(), and
-	 * flush_scheduled_work() from remove_one will
-	 * block until all work ssetup with schedule_work()
+	 * There is nothing to do here for now.  our work is scheduled
+	 * with queue_work(), and flush_workqueue() from remove_one
+	 * will block until all work setup with queue_work()
 	 * completes.
 	 */
 }
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 955fb71..eca0c41 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,7 +1005,8 @@
 	 * there are still requests that haven't been acked.
 	 */
 	if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
-	    !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+	    !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
+	    (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
 		start_timer(qp);
 
 	while (qp->s_last != qp->s_acked) {
@@ -1407,6 +1408,7 @@
 			    struct qib_ctxtdata *rcd)
 {
 	struct qib_swqe *wqe;
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 	enum ib_wc_status status;
 	unsigned long flags;
 	int diff;
@@ -1414,7 +1416,32 @@
 	u32 aeth;
 	u64 val;
 
+	if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
+		/*
+		 * If ACK'd PSN on SDMA busy list try to make progress to
+		 * reclaim SDMA credits.
+		 */
+		if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
+		    (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
+
+			/*
+			 * If send tasklet not running attempt to progress
+			 * SDMA queue.
+			 */
+			if (!(qp->s_flags & QIB_S_BUSY)) {
+				/* Acquire SDMA Lock */
+				spin_lock_irqsave(&ppd->sdma_lock, flags);
+				/* Invoke sdma make progress */
+				qib_sdma_make_progress(ppd);
+				/* Release SDMA Lock */
+				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+			}
+		}
+	}
+
 	spin_lock_irqsave(&qp->s_lock, flags);
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+		goto ack_done;
 
 	/* Ignore invalid responses. */
 	if (qib_cmp24(psn, qp->s_next_psn) >= 0)
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index e1b3da2..4a51fd1 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -445,13 +445,14 @@
 	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
 	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
 
-	/* Get the number of bytes the message was padded by. */
+	/*
+	 * Get the number of bytes the message was padded by
+	 * and drop incomplete packets.
+	 */
 	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-	if (unlikely(tlen < (hdrsize + pad + 4))) {
-		/* Drop incomplete packets. */
-		ibp->n_pkt_drops++;
-		goto bail;
-	}
+	if (unlikely(tlen < (hdrsize + pad + 4)))
+		goto drop;
+
 	tlen -= hdrsize + pad + 4;
 
 	/*
@@ -460,10 +461,8 @@
 	 */
 	if (qp->ibqp.qp_num) {
 		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
-			     hdr->lrh[3] == IB_LID_PERMISSIVE)) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+			     hdr->lrh[3] == IB_LID_PERMISSIVE))
+			goto drop;
 		if (qp->ibqp.qp_num > 1) {
 			u16 pkey1, pkey2;
 
@@ -476,7 +475,7 @@
 						0xF,
 					      src_qp, qp->ibqp.qp_num,
 					      hdr->lrh[3], hdr->lrh[1]);
-				goto bail;
+				return;
 			}
 		}
 		if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@
 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
 				      src_qp, qp->ibqp.qp_num,
 				      hdr->lrh[3], hdr->lrh[1]);
-			goto bail;
+			return;
 		}
 		/* Drop invalid MAD packets (see 13.5.3.1). */
 		if (unlikely(qp->ibqp.qp_num == 1 &&
 			     (tlen != 256 ||
-			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+			goto drop;
 	} else {
 		struct ib_smp *smp;
 
 		/* Drop invalid MAD packets (see 13.5.3.1). */
-		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
+			goto drop;
 		smp = (struct ib_smp *) data;
 		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
 		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
-		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+			goto drop;
 	}
 
 	/*
@@ -519,14 +512,12 @@
 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 		wc.ex.imm_data = ohdr->u.ud.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
-		hdrsize += sizeof(u32);
+		tlen -= sizeof(u32);
 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 		wc.ex.imm_data = 0;
 		wc.wc_flags = 0;
-	} else {
-		ibp->n_pkt_drops++;
-		goto bail;
-	}
+	} else
+		goto drop;
 
 	/*
 	 * A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@
 	/* Silently drop packets which are too big. */
 	if (unlikely(wc.byte_len > qp->r_len)) {
 		qp->r_flags |= QIB_R_REUSE_SGE;
-		ibp->n_pkt_drops++;
-		return;
+		goto drop;
 	}
 	if (has_grh) {
 		qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@
 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 		     (ohdr->bth[0] &
 			cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail:;
+	return;
+
+drop:
+	ibp->n_pkt_drops++;
 }
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 4c19e06..66208bc 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -382,6 +382,7 @@
 
 		kmem_cache_free(pq->pkt_slab, pkt);
 	}
+	INIT_LIST_HEAD(list);
 }
 
 /*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index bd57c12..95e5b47 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -301,6 +301,7 @@
 	int access_flags;
 	u32 max_segs;           /* number of qib_segs in all the arrays */
 	u32 mapsz;              /* size of the map array */
+	u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
 	atomic_t refcount;
 	struct qib_segarray *map[0];    /* the segments */
 };
@@ -435,7 +436,6 @@
 	spinlock_t r_lock;      /* used for APM */
 	spinlock_t s_lock;
 	atomic_t s_dma_busy;
-	unsigned processor_id;	/* Processor ID QP is bound to */
 	u32 s_flags;
 	u32 s_cur_size;         /* size of send packet in bytes */
 	u32 s_len;              /* total length of s_sge */
@@ -805,7 +805,6 @@
 		 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
 }
 
-extern struct workqueue_struct *qib_wq;
 extern struct workqueue_struct *qib_cq_wq;
 
 /*
@@ -813,13 +812,8 @@
  */
 static inline void qib_schedule_send(struct qib_qp *qp)
 {
-	if (qib_send_ok(qp)) {
-		if (qp->processor_id == smp_processor_id())
-			queue_work(qib_wq, &qp->s_work);
-		else
-			queue_work_on(qp->processor_id,
-				      qib_wq, &qp->s_work);
-	}
+	if (qib_send_ok(qp))
+		queue_work(ib_wq, &qp->s_work);
 }
 
 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 9d9a9dc..cda8eac 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,7 +1,6 @@
 config INFINIBAND_IPOIB
 	tristate "IP-over-InfiniBand"
 	depends on NETDEVICES && INET && (IPV6 || IPV6=n)
-	select INET_LRO
 	---help---
 	  Support for the IP-over-InfiniBand protocol (IPoIB). This
 	  transports IP packets over InfiniBand so you can use your IB
@@ -25,7 +24,7 @@
 	  unless you limit mtu for these destinations to 2044.
 
 config INFINIBAND_IPOIB_DEBUG
-	bool "IP-over-InfiniBand debugging" if EMBEDDED
+	bool "IP-over-InfiniBand debugging" if EXPERT
 	depends on INFINIBAND_IPOIB
 	default y
 	---help---
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 753a983..ab97f92 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -50,7 +50,7 @@
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_sa.h>
-#include <linux/inet_lro.h>
+#include <linux/sched.h>
 
 /* constants */
 
@@ -100,9 +100,6 @@
 	IPOIB_MCAST_FLAG_BUSY	  = 2,	/* joining or already joined */
 	IPOIB_MCAST_FLAG_ATTACHED = 3,
 
-	IPOIB_MAX_LRO_DESCRIPTORS = 8,
-	IPOIB_LRO_MAX_AGGR 	  = 64,
-
 	MAX_SEND_CQE		  = 16,
 	IPOIB_CM_COPYBREAK	  = 256,
 };
@@ -262,11 +259,6 @@
 	u16     max_coalesced_frames;
 };
 
-struct ipoib_lro {
-	struct net_lro_mgr lro_mgr;
-	struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
-};
-
 /*
  * Device private locking: network stack tx_lock protects members used
  * in TX fast path, lock protects everything else.  lock nests inside
@@ -352,8 +344,6 @@
 	int	hca_caps;
 	struct ipoib_ethtool_st ethtool;
 	struct timer_list poll_timer;
-
-	struct ipoib_lro lro;
 };
 
 struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index bb10041..93d5580 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -352,15 +352,13 @@
 	int ret;
 	int i;
 
-	rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
+	rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
 	if (!rx->rx_ring) {
 		printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
 		       priv->ca->name, ipoib_recvq_size);
 		return -ENOMEM;
 	}
 
-	memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
-
 	t = kmalloc(sizeof *t, GFP_KERNEL);
 	if (!t) {
 		ret = -ENOMEM;
@@ -1097,13 +1095,12 @@
 	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
 	int ret;
 
-	p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring);
+	p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
 	if (!p->tx_ring) {
 		ipoib_warn(priv, "failed to allocate tx ring\n");
 		ret = -ENOMEM;
 		goto err_tx;
 	}
-	memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
 
 	p->qp = ipoib_cm_create_tx_qp(p->dev, p);
 	if (IS_ERR(p->qp)) {
@@ -1480,6 +1477,7 @@
 
 		if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+			priv->dev->features |= NETIF_F_GRO;
 			if (priv->hca_caps & IB_DEVICE_UD_TSO)
 				dev->features |= NETIF_F_TSO;
 		}
@@ -1520,7 +1518,7 @@
 		return;
 	}
 
-	priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
+	priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
 	if (!priv->cm.srq_ring) {
 		printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
 		       priv->ca->name, ipoib_recvq_size);
@@ -1529,7 +1527,6 @@
 		return;
 	}
 
-	memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
 }
 
 int ipoib_cm_dev_init(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1a1657c..19f7f52 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -106,63 +106,12 @@
 	return 0;
 }
 
-static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
-	"LRO aggregated", "LRO flushed",
-	"LRO avg aggr", "LRO no desc"
-};
-
-static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
-	switch (stringset) {
-	case ETH_SS_STATS:
-		memcpy(data, *ipoib_stats_keys,	sizeof(ipoib_stats_keys));
-		break;
-	}
-}
-
-static int ipoib_get_sset_count(struct net_device *dev, int sset)
-{
-	switch (sset) {
-	case ETH_SS_STATS:
-		return ARRAY_SIZE(ipoib_stats_keys);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static void ipoib_get_ethtool_stats(struct net_device *dev,
-				struct ethtool_stats *stats, uint64_t *data)
-{
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	int index = 0;
-
-	/* Get LRO statistics */
-	data[index++] = priv->lro.lro_mgr.stats.aggregated;
-	data[index++] = priv->lro.lro_mgr.stats.flushed;
-	if (priv->lro.lro_mgr.stats.flushed)
-		data[index++] = priv->lro.lro_mgr.stats.aggregated /
-				priv->lro.lro_mgr.stats.flushed;
-	else
-		data[index++] = 0;
-	data[index++] = priv->lro.lro_mgr.stats.no_desc;
-}
-
-static int ipoib_set_flags(struct net_device *dev, u32 flags)
-{
-	return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
-}
-
 static const struct ethtool_ops ipoib_ethtool_ops = {
 	.get_drvinfo		= ipoib_get_drvinfo,
 	.get_rx_csum		= ipoib_get_rx_csum,
 	.set_tso		= ipoib_set_tso,
 	.get_coalesce		= ipoib_get_coalesce,
 	.set_coalesce		= ipoib_set_coalesce,
-	.get_flags		= ethtool_op_get_flags,
-	.set_flags		= ipoib_set_flags,
-	.get_strings		= ipoib_get_strings,
-	.get_sset_count		= ipoib_get_sset_count,
-	.get_ethtool_stats	= ipoib_get_ethtool_stats,
 };
 
 void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dfa7190..806d029 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -295,10 +295,7 @@
 	if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-	if (dev->features & NETIF_F_LRO)
-		lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
-	else
-		netif_receive_skb(skb);
+	napi_gro_receive(&priv->napi, skb);
 
 repost:
 	if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -450,9 +447,6 @@
 	}
 
 	if (done < budget) {
-		if (dev->features & NETIF_F_LRO)
-			lro_flush_all(&priv->lro.lro_mgr);
-
 		napi_complete(napi);
 		if (unlikely(ib_req_notify_cq(priv->recv_cq,
 					      IB_CQ_NEXT_COMP |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9ff7bc7..aca3b44 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -60,15 +60,6 @@
 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
 
-static int lro;
-module_param(lro, bool, 0444);
-MODULE_PARM_DESC(lro,  "Enable LRO (Large Receive Offload)");
-
-static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
-module_param(lro_max_aggr, int, 0644);
-MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
-		"(default = 64)");
-
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 int ipoib_debug_level;
 
@@ -925,13 +916,12 @@
 		goto out;
 	}
 
-	priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
+	priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
 	if (!priv->tx_ring) {
 		printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
 		       ca->name, ipoib_sendq_size);
 		goto out_rx_ring_cleanup;
 	}
-	memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
 
 	/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
 
@@ -976,54 +966,6 @@
 	.create	= ipoib_hard_header,
 };
 
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
-		       void **tcph, u64 *hdr_flags, void *priv)
-{
-	unsigned int ip_len;
-	struct iphdr *iph;
-
-	if (unlikely(skb->protocol != htons(ETH_P_IP)))
-		return -1;
-
-	/*
-	 * In the future we may add an else clause that verifies the
-	 * checksum and allows devices which do not calculate checksum
-	 * to use LRO.
-	 */
-	if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
-		return -1;
-
-	/* Check for non-TCP packet */
-	skb_reset_network_header(skb);
-	iph = ip_hdr(skb);
-	if (iph->protocol != IPPROTO_TCP)
-		return -1;
-
-	ip_len = ip_hdrlen(skb);
-	skb_set_transport_header(skb, ip_len);
-	*tcph = tcp_hdr(skb);
-
-	/* check if IP header and TCP header are complete */
-	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
-		return -1;
-
-	*hdr_flags = LRO_IPV4 | LRO_TCP;
-	*iphdr = iph;
-
-	return 0;
-}
-
-static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
-{
-	priv->lro.lro_mgr.max_aggr	 = lro_max_aggr;
-	priv->lro.lro_mgr.max_desc	 = IPOIB_MAX_LRO_DESCRIPTORS;
-	priv->lro.lro_mgr.lro_arr	 = priv->lro.lro_desc;
-	priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
-	priv->lro.lro_mgr.features	 = LRO_F_NAPI;
-	priv->lro.lro_mgr.dev		 = priv->dev;
-	priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-}
-
 static const struct net_device_ops ipoib_netdev_ops = {
 	.ndo_open		 = ipoib_open,
 	.ndo_stop		 = ipoib_stop,
@@ -1067,8 +1009,6 @@
 
 	priv->dev = dev;
 
-	ipoib_lro_setup(priv);
-
 	spin_lock_init(&priv->lock);
 
 	mutex_init(&priv->vlan_mutex);
@@ -1218,8 +1158,7 @@
 		priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
 	}
 
-	if (lro)
-		priv->dev->features |= NETIF_F_LRO;
+	priv->dev->features |= NETIF_F_GRO;
 
 	if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
 		priv->dev->features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1e1e347..83664ed 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -441,18 +441,28 @@
 	wait_for_completion(&target->done);
 }
 
+static bool srp_change_state(struct srp_target_port *target,
+			    enum srp_target_state old,
+			    enum srp_target_state new)
+{
+	bool changed = false;
+
+	spin_lock_irq(&target->lock);
+	if (target->state == old) {
+		target->state = new;
+		changed = true;
+	}
+	spin_unlock_irq(&target->lock);
+	return changed;
+}
+
 static void srp_remove_work(struct work_struct *work)
 {
 	struct srp_target_port *target =
 		container_of(work, struct srp_target_port, work);
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state != SRP_TARGET_DEAD) {
-		spin_unlock_irq(target->scsi_host->host_lock);
+	if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
 		return;
-	}
-	target->state = SRP_TARGET_REMOVED;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 	spin_lock(&target->srp_host->target_lock);
 	list_del(&target->list);
@@ -539,33 +549,34 @@
 			scsi_sg_count(scmnd), scmnd->sc_data_direction);
 }
 
-static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
+static void srp_remove_req(struct srp_target_port *target,
+			   struct srp_request *req, s32 req_lim_delta)
 {
+	unsigned long flags;
+
 	srp_unmap_data(req->scmnd, target, req);
-	list_move_tail(&req->list, &target->free_reqs);
+	spin_lock_irqsave(&target->lock, flags);
+	target->req_lim += req_lim_delta;
+	req->scmnd = NULL;
+	list_add_tail(&req->list, &target->free_reqs);
+	spin_unlock_irqrestore(&target->lock, flags);
 }
 
 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 {
 	req->scmnd->result = DID_RESET << 16;
 	req->scmnd->scsi_done(req->scmnd);
-	srp_remove_req(target, req);
+	srp_remove_req(target, req, 0);
 }
 
 static int srp_reconnect_target(struct srp_target_port *target)
 {
 	struct ib_qp_attr qp_attr;
-	struct srp_request *req, *tmp;
 	struct ib_wc wc;
-	int ret;
+	int i, ret;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state != SRP_TARGET_LIVE) {
-		spin_unlock_irq(target->scsi_host->host_lock);
+	if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
 		return -EAGAIN;
-	}
-	target->state = SRP_TARGET_CONNECTING;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 	srp_disconnect_target(target);
 	/*
@@ -590,27 +601,23 @@
 	while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
 		; /* nothing */
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
-		srp_reset_req(target, req);
-	spin_unlock_irq(target->scsi_host->host_lock);
+	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+		struct srp_request *req = &target->req_ring[i];
+		if (req->scmnd)
+			srp_reset_req(target, req);
+	}
 
-	target->rx_head	 = 0;
-	target->tx_head	 = 0;
-	target->tx_tail  = 0;
+	INIT_LIST_HEAD(&target->free_tx);
+	for (i = 0; i < SRP_SQ_SIZE; ++i)
+		list_add(&target->tx_ring[i]->list, &target->free_tx);
 
 	target->qp_in_error = 0;
 	ret = srp_connect_target(target);
 	if (ret)
 		goto err;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state == SRP_TARGET_CONNECTING) {
-		ret = 0;
-		target->state = SRP_TARGET_LIVE;
-	} else
+	if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
 		ret = -EAGAIN;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 	return ret;
 
@@ -620,17 +627,20 @@
 
 	/*
 	 * We couldn't reconnect, so kill our target port off.
-	 * However, we have to defer the real removal because we might
-	 * be in the context of the SCSI error handler now, which
-	 * would deadlock if we call scsi_remove_host().
+	 * However, we have to defer the real removal because we
+	 * are in the context of the SCSI error handler now, which
+	 * will deadlock if we call scsi_remove_host().
+	 *
+	 * Schedule our work inside the lock to avoid a race with
+	 * the flush_scheduled_work() in srp_remove_one().
 	 */
-	spin_lock_irq(target->scsi_host->host_lock);
+	spin_lock_irq(&target->lock);
 	if (target->state == SRP_TARGET_CONNECTING) {
 		target->state = SRP_TARGET_DEAD;
 		INIT_WORK(&target->work, srp_remove_work);
-		schedule_work(&target->work);
+		queue_work(ib_wq, &target->work);
 	}
-	spin_unlock_irq(target->scsi_host->host_lock);
+	spin_unlock_irq(&target->lock);
 
 	return ret;
 }
@@ -758,7 +768,7 @@
 		struct srp_direct_buf *buf = (void *) cmd->add_data;
 
 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
-		buf->key = cpu_to_be32(dev->mr->rkey);
+		buf->key = cpu_to_be32(target->rkey);
 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
 	} else if (srp_map_fmr(target, scat, count, req,
 			       (void *) cmd->add_data)) {
@@ -783,7 +793,7 @@
 			buf->desc_list[i].va  =
 				cpu_to_be64(ib_sg_dma_address(ibdev, sg));
 			buf->desc_list[i].key =
-				cpu_to_be32(dev->mr->rkey);
+				cpu_to_be32(target->rkey);
 			buf->desc_list[i].len = cpu_to_be32(dma_len);
 			datalen += dma_len;
 		}
@@ -796,7 +806,7 @@
 		buf->table_desc.va  =
 			cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
 		buf->table_desc.key =
-			cpu_to_be32(target->srp_host->srp_dev->mr->rkey);
+			cpu_to_be32(target->rkey);
 		buf->table_desc.len =
 			cpu_to_be32(count * sizeof (struct srp_direct_buf));
 
@@ -812,9 +822,23 @@
 }
 
 /*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.  Lock cannot be dropped between call here and
- * call to __srp_post_send().
+ * Return an IU and possible credit to the free pool
+ */
+static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
+			  enum srp_iu_type iu_type)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_add(&iu->list, &target->free_tx);
+	if (iu_type != SRP_IU_RSP)
+		++target->req_lim;
+	spin_unlock_irqrestore(&target->lock, flags);
+}
+
+/*
+ * Must be called with target->lock held to protect req_lim and free_tx.
+ * If IU is not sent, it must be returned using srp_put_tx_iu().
  *
  * Note:
  * An upper limit for the number of allocated information units for each
@@ -833,83 +857,59 @@
 
 	srp_send_completion(target->send_cq, target);
 
-	if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+	if (list_empty(&target->free_tx))
 		return NULL;
 
 	/* Initiator responses to target requests do not consume credits */
-	if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
-		++target->zero_req_lim;
-		return NULL;
+	if (iu_type != SRP_IU_RSP) {
+		if (target->req_lim <= rsv) {
+			++target->zero_req_lim;
+			return NULL;
+		}
+
+		--target->req_lim;
 	}
 
-	iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
-	iu->type = iu_type;
+	iu = list_first_entry(&target->free_tx, struct srp_iu, list);
+	list_del(&iu->list);
 	return iu;
 }
 
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
-			   struct srp_iu *iu, int len)
+static int srp_post_send(struct srp_target_port *target,
+			 struct srp_iu *iu, int len)
 {
 	struct ib_sge list;
 	struct ib_send_wr wr, *bad_wr;
-	int ret = 0;
 
 	list.addr   = iu->dma;
 	list.length = len;
-	list.lkey   = target->srp_host->srp_dev->mr->lkey;
+	list.lkey   = target->lkey;
 
 	wr.next       = NULL;
-	wr.wr_id      = target->tx_head & SRP_SQ_MASK;
+	wr.wr_id      = (uintptr_t) iu;
 	wr.sg_list    = &list;
 	wr.num_sge    = 1;
 	wr.opcode     = IB_WR_SEND;
 	wr.send_flags = IB_SEND_SIGNALED;
 
-	ret = ib_post_send(target->qp, &wr, &bad_wr);
-
-	if (!ret) {
-		++target->tx_head;
-		if (iu->type != SRP_IU_RSP)
-			--target->req_lim;
-	}
-
-	return ret;
+	return ib_post_send(target->qp, &wr, &bad_wr);
 }
 
-static int srp_post_recv(struct srp_target_port *target)
+static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
 {
-	unsigned long flags;
-	struct srp_iu *iu;
-	struct ib_sge list;
 	struct ib_recv_wr wr, *bad_wr;
-	unsigned int next;
-	int ret;
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
-	next	 = target->rx_head & SRP_RQ_MASK;
-	wr.wr_id = next;
-	iu	 = target->rx_ring[next];
+	struct ib_sge list;
 
 	list.addr   = iu->dma;
 	list.length = iu->size;
-	list.lkey   = target->srp_host->srp_dev->mr->lkey;
+	list.lkey   = target->lkey;
 
 	wr.next     = NULL;
+	wr.wr_id    = (uintptr_t) iu;
 	wr.sg_list  = &list;
 	wr.num_sge  = 1;
 
-	ret = ib_post_recv(target->qp, &wr, &bad_wr);
-	if (!ret)
-		++target->rx_head;
-
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
-
-	return ret;
+	return ib_post_recv(target->qp, &wr, &bad_wr);
 }
 
 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -917,23 +917,18 @@
 	struct srp_request *req;
 	struct scsi_cmnd *scmnd;
 	unsigned long flags;
-	s32 delta;
-
-	delta = (s32) be32_to_cpu(rsp->req_lim_delta);
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
-	target->req_lim += delta;
-
-	req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
 
 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
-		if (be32_to_cpu(rsp->resp_data_len) < 4)
-			req->tsk_status = -1;
-		else
-			req->tsk_status = rsp->data[3];
-		complete(&req->done);
+		spin_lock_irqsave(&target->lock, flags);
+		target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+		spin_unlock_irqrestore(&target->lock, flags);
+
+		target->tsk_mgmt_status = -1;
+		if (be32_to_cpu(rsp->resp_data_len) >= 4)
+			target->tsk_mgmt_status = rsp->data[3];
+		complete(&target->tsk_mgmt_done);
 	} else {
+		req = &target->req_ring[rsp->tag];
 		scmnd = req->scmnd;
 		if (!scmnd)
 			shost_printk(KERN_ERR, target->scsi_host,
@@ -953,49 +948,42 @@
 		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
 
-		if (!req->tsk_mgmt) {
-			scmnd->host_scribble = (void *) -1L;
-			scmnd->scsi_done(scmnd);
-
-			srp_remove_req(target, req);
-		} else
-			req->cmd_done = 1;
+		srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+		scmnd->host_scribble = NULL;
+		scmnd->scsi_done(scmnd);
 	}
-
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 }
 
 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
 			       void *rsp, int len)
 {
-	struct ib_device *dev;
+	struct ib_device *dev = target->srp_host->srp_dev->dev;
 	unsigned long flags;
 	struct srp_iu *iu;
-	int err = 1;
+	int err;
 
-	dev = target->srp_host->srp_dev->dev;
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
+	spin_lock_irqsave(&target->lock, flags);
 	target->req_lim += req_delta;
-
 	iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+	spin_unlock_irqrestore(&target->lock, flags);
+
 	if (!iu) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 			     "no IU available to send response\n");
-		goto out;
+		return 1;
 	}
 
 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
 	memcpy(iu->buf, rsp, len);
 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
 
-	err = __srp_post_send(target, iu, len);
-	if (err)
+	err = srp_post_send(target, iu, len);
+	if (err) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 			     "unable to post response: %d\n", err);
+		srp_put_tx_iu(target, iu, SRP_IU_RSP);
+	}
 
-out:
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 	return err;
 }
 
@@ -1032,14 +1020,11 @@
 
 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
 {
-	struct ib_device *dev;
-	struct srp_iu *iu;
+	struct ib_device *dev = target->srp_host->srp_dev->dev;
+	struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
 	int res;
 	u8 opcode;
 
-	iu = target->rx_ring[wc->wr_id];
-
-	dev = target->srp_host->srp_dev->dev;
 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
 				   DMA_FROM_DEVICE);
 
@@ -1080,7 +1065,7 @@
 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
 				      DMA_FROM_DEVICE);
 
-	res = srp_post_recv(target);
+	res = srp_post_recv(target, iu);
 	if (res != 0)
 		shost_printk(KERN_ERR, target->scsi_host,
 			     PFX "Recv failed with error code %d\n", res);
@@ -1109,6 +1094,7 @@
 {
 	struct srp_target_port *target = target_ptr;
 	struct ib_wc wc;
+	struct srp_iu *iu;
 
 	while (ib_poll_cq(cq, 1, &wc) > 0) {
 		if (wc.status) {
@@ -1119,18 +1105,19 @@
 			break;
 		}
 
-		++target->tx_tail;
+		iu = (struct srp_iu *) wc.wr_id;
+		list_add(&iu->list, &target->free_tx);
 	}
 }
 
-static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
-			    void (*done)(struct scsi_cmnd *))
+static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
 {
-	struct srp_target_port *target = host_to_target(scmnd->device->host);
+	struct srp_target_port *target = host_to_target(shost);
 	struct srp_request *req;
 	struct srp_iu *iu;
 	struct srp_cmd *cmd;
 	struct ib_device *dev;
+	unsigned long flags;
 	int len;
 
 	if (target->state == SRP_TARGET_CONNECTING)
@@ -1139,23 +1126,25 @@
 	if (target->state == SRP_TARGET_DEAD ||
 	    target->state == SRP_TARGET_REMOVED) {
 		scmnd->result = DID_BAD_TARGET << 16;
-		done(scmnd);
+		scmnd->scsi_done(scmnd);
 		return 0;
 	}
 
+	spin_lock_irqsave(&target->lock, flags);
 	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
 	if (!iu)
-		goto err;
+		goto err_unlock;
+
+	req = list_first_entry(&target->free_reqs, struct srp_request, list);
+	list_del(&req->list);
+	spin_unlock_irqrestore(&target->lock, flags);
 
 	dev = target->srp_host->srp_dev->dev;
 	ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
 				   DMA_TO_DEVICE);
 
-	req = list_first_entry(&target->free_reqs, struct srp_request, list);
-
-	scmnd->scsi_done     = done;
 	scmnd->result        = 0;
-	scmnd->host_scribble = (void *) (long) req->index;
+	scmnd->host_scribble = (void *) req;
 
 	cmd = iu->buf;
 	memset(cmd, 0, sizeof *cmd);
@@ -1167,37 +1156,40 @@
 
 	req->scmnd    = scmnd;
 	req->cmd      = iu;
-	req->cmd_done = 0;
-	req->tsk_mgmt = NULL;
 
 	len = srp_map_data(scmnd, target, req);
 	if (len < 0) {
 		shost_printk(KERN_ERR, target->scsi_host,
 			     PFX "Failed to map data\n");
-		goto err;
+		goto err_iu;
 	}
 
 	ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
 				      DMA_TO_DEVICE);
 
-	if (__srp_post_send(target, iu, len)) {
+	if (srp_post_send(target, iu, len)) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
 		goto err_unmap;
 	}
 
-	list_move_tail(&req->list, &target->req_queue);
-
 	return 0;
 
 err_unmap:
 	srp_unmap_data(scmnd, target, req);
 
+err_iu:
+	srp_put_tx_iu(target, iu, SRP_IU_CMD);
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_add(&req->list, &target->free_reqs);
+
+err_unlock:
+	spin_unlock_irqrestore(&target->lock, flags);
+
 err:
 	return SCSI_MLQUEUE_HOST_BUSY;
 }
 
-static DEF_SCSI_QCMD(srp_queuecommand)
-
 static int srp_alloc_iu_bufs(struct srp_target_port *target)
 {
 	int i;
@@ -1216,6 +1208,8 @@
 						  GFP_KERNEL, DMA_TO_DEVICE);
 		if (!target->tx_ring[i])
 			goto err;
+
+		list_add(&target->tx_ring[i]->list, &target->free_tx);
 	}
 
 	return 0;
@@ -1377,7 +1371,8 @@
 			break;
 
 		for (i = 0; i < SRP_RQ_SIZE; i++) {
-			target->status = srp_post_recv(target);
+			struct srp_iu *iu = target->rx_ring[i];
+			target->status = srp_post_recv(target, iu);
 			if (target->status)
 				break;
 		}
@@ -1442,25 +1437,24 @@
 }
 
 static int srp_send_tsk_mgmt(struct srp_target_port *target,
-			     struct srp_request *req, u8 func)
+			     u64 req_tag, unsigned int lun, u8 func)
 {
 	struct ib_device *dev = target->srp_host->srp_dev->dev;
 	struct srp_iu *iu;
 	struct srp_tsk_mgmt *tsk_mgmt;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
 	if (target->state == SRP_TARGET_DEAD ||
-	    target->state == SRP_TARGET_REMOVED) {
-		req->scmnd->result = DID_BAD_TARGET << 16;
-		goto out;
-	}
+	    target->state == SRP_TARGET_REMOVED)
+		return -1;
 
-	init_completion(&req->done);
+	init_completion(&target->tsk_mgmt_done);
 
+	spin_lock_irq(&target->lock);
 	iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
+	spin_unlock_irq(&target->lock);
+
 	if (!iu)
-		goto out;
+		return -1;
 
 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
 				   DMA_TO_DEVICE);
@@ -1468,70 +1462,46 @@
 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
 
 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
-	tsk_mgmt->lun 		= cpu_to_be64((u64) req->scmnd->device->lun << 48);
-	tsk_mgmt->tag 		= req->index | SRP_TAG_TSK_MGMT;
+	tsk_mgmt->lun		= cpu_to_be64((u64) lun << 48);
+	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
 	tsk_mgmt->tsk_mgmt_func = func;
-	tsk_mgmt->task_tag 	= req->index;
+	tsk_mgmt->task_tag	= req_tag;
 
 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
 				      DMA_TO_DEVICE);
-	if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
-		goto out;
+	if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
+		srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
+		return -1;
+	}
 
-	req->tsk_mgmt = iu;
-
-	spin_unlock_irq(target->scsi_host->host_lock);
-
-	if (!wait_for_completion_timeout(&req->done,
+	if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
 		return -1;
 
 	return 0;
-
-out:
-	spin_unlock_irq(target->scsi_host->host_lock);
-	return -1;
-}
-
-static int srp_find_req(struct srp_target_port *target,
-			struct scsi_cmnd *scmnd,
-			struct srp_request **req)
-{
-	if (scmnd->host_scribble == (void *) -1L)
-		return -1;
-
-	*req = &target->req_ring[(long) scmnd->host_scribble];
-
-	return 0;
 }
 
 static int srp_abort(struct scsi_cmnd *scmnd)
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
-	struct srp_request *req;
+	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
 	int ret = SUCCESS;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
 
-	if (target->qp_in_error)
+	if (!req || target->qp_in_error)
 		return FAILED;
-	if (srp_find_req(target, scmnd, &req))
-		return FAILED;
-	if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
+	if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+			      SRP_TSK_ABORT_TASK))
 		return FAILED;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
-	if (req->cmd_done) {
-		srp_remove_req(target, req);
-		scmnd->scsi_done(scmnd);
-	} else if (!req->tsk_status) {
-		srp_remove_req(target, req);
-		scmnd->result = DID_ABORT << 16;
-	} else
-		ret = FAILED;
-
-	spin_unlock_irq(target->scsi_host->host_lock);
+	if (req->scmnd) {
+		if (!target->tsk_mgmt_status) {
+			srp_remove_req(target, req, 0);
+			scmnd->result = DID_ABORT << 16;
+		} else
+			ret = FAILED;
+	}
 
 	return ret;
 }
@@ -1539,26 +1509,23 @@
 static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
-	struct srp_request *req, *tmp;
+	int i;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
 
 	if (target->qp_in_error)
 		return FAILED;
-	if (srp_find_req(target, scmnd, &req))
+	if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
+			      SRP_TSK_LUN_RESET))
 		return FAILED;
-	if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
-		return FAILED;
-	if (req->tsk_status)
+	if (target->tsk_mgmt_status)
 		return FAILED;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
-	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
-		if (req->scmnd->device == scmnd->device)
+	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+		struct srp_request *req = &target->req_ring[i];
+		if (req->scmnd && req->scmnd->device == scmnd->device)
 			srp_reset_req(target, req);
-
-	spin_unlock_irq(target->scsi_host->host_lock);
+	}
 
 	return SUCCESS;
 }
@@ -1987,9 +1954,12 @@
 	target->io_class   = SRP_REV16A_IB_IO_CLASS;
 	target->scsi_host  = target_host;
 	target->srp_host   = host;
+	target->lkey	   = host->srp_dev->mr->lkey;
+	target->rkey	   = host->srp_dev->mr->rkey;
 
+	spin_lock_init(&target->lock);
+	INIT_LIST_HEAD(&target->free_tx);
 	INIT_LIST_HEAD(&target->free_reqs);
-	INIT_LIST_HEAD(&target->req_queue);
 	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
 		target->req_ring[i].index = i;
 		list_add_tail(&target->req_ring[i].list, &target->free_reqs);
@@ -2217,9 +2187,9 @@
 		 */
 		spin_lock(&host->target_lock);
 		list_for_each_entry(target, &host->target_list, list) {
-			spin_lock_irq(target->scsi_host->host_lock);
+			spin_lock_irq(&target->lock);
 			target->state = SRP_TARGET_REMOVED;
-			spin_unlock_irq(target->scsi_host->host_lock);
+			spin_unlock_irq(&target->lock);
 		}
 		spin_unlock(&host->target_lock);
 
@@ -2228,7 +2198,7 @@
 		 * started before we marked our target ports as
 		 * removed, and any target port removal tasks.
 		 */
-		flush_scheduled_work();
+		flush_workqueue(ib_wq);
 
 		list_for_each_entry_safe(target, tmp_target,
 					 &host->target_list, list) {
@@ -2258,8 +2228,7 @@
 {
 	int ret;
 
-	BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
-	BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
 
 	if (srp_sg_tablesize > 255) {
 		printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ed0dce9..9dc6fc3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,16 +59,15 @@
 
 	SRP_RQ_SHIFT    	= 6,
 	SRP_RQ_SIZE		= 1 << SRP_RQ_SHIFT,
-	SRP_RQ_MASK		= SRP_RQ_SIZE - 1,
 
 	SRP_SQ_SIZE		= SRP_RQ_SIZE,
-	SRP_SQ_MASK		= SRP_SQ_SIZE - 1,
 	SRP_RSP_SQ_SIZE		= 1,
 	SRP_REQ_SQ_SIZE		= SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
 	SRP_TSK_MGMT_SQ_SIZE	= 1,
 	SRP_CMD_SQ_SIZE		= SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
 
-	SRP_TAG_TSK_MGMT	= 1 << (SRP_RQ_SHIFT + 1),
+	SRP_TAG_NO_REQ		= ~0U,
+	SRP_TAG_TSK_MGMT	= 1U << 31,
 
 	SRP_FMR_SIZE		= 256,
 	SRP_FMR_POOL_SIZE	= 1024,
@@ -113,15 +112,29 @@
 	struct list_head	list;
 	struct scsi_cmnd       *scmnd;
 	struct srp_iu	       *cmd;
-	struct srp_iu	       *tsk_mgmt;
 	struct ib_pool_fmr     *fmr;
-	struct completion	done;
 	short			index;
-	u8			cmd_done;
-	u8			tsk_status;
 };
 
 struct srp_target_port {
+	/* These are RW in the hot path, and commonly used together */
+	struct list_head	free_tx;
+	struct list_head	free_reqs;
+	spinlock_t		lock;
+	s32			req_lim;
+
+	/* These are read-only in the hot path */
+	struct ib_cq	       *send_cq ____cacheline_aligned_in_smp;
+	struct ib_cq	       *recv_cq;
+	struct ib_qp	       *qp;
+	u32			lkey;
+	u32			rkey;
+	enum srp_target_state	state;
+
+	/* Everything above this point is used in the hot path of
+	 * command processing. Try to keep them packed into cachelines.
+	 */
+
 	__be64			id_ext;
 	__be64			ioc_guid;
 	__be64			service_id;
@@ -138,24 +151,13 @@
 	int			path_query_id;
 
 	struct ib_cm_id	       *cm_id;
-	struct ib_cq	       *recv_cq;
-	struct ib_cq	       *send_cq;
-	struct ib_qp	       *qp;
 
 	int			max_ti_iu_len;
-	s32			req_lim;
 
 	int			zero_req_lim;
 
-	unsigned		rx_head;
-	struct srp_iu	       *rx_ring[SRP_RQ_SIZE];
-
-	unsigned		tx_head;
-	unsigned		tx_tail;
 	struct srp_iu	       *tx_ring[SRP_SQ_SIZE];
-
-	struct list_head	free_reqs;
-	struct list_head	req_queue;
+	struct srp_iu	       *rx_ring[SRP_RQ_SIZE];
 	struct srp_request	req_ring[SRP_CMD_SQ_SIZE];
 
 	struct work_struct	work;
@@ -163,16 +165,18 @@
 	struct list_head	list;
 	struct completion	done;
 	int			status;
-	enum srp_target_state	state;
 	int			qp_in_error;
+
+	struct completion	tsk_mgmt_done;
+	u8			tsk_mgmt_status;
 };
 
 struct srp_iu {
+	struct list_head	list;
 	u64			dma;
 	void		       *buf;
 	size_t			size;
 	enum dma_data_direction	direction;
-	enum srp_iu_type	type;
 };
 
 #endif /* IB_SRP_H */
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 07c2cd4..1903c0f 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -6,7 +6,7 @@
 	depends on !S390
 
 config INPUT
-	tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED
+	tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
 	default y
 	help
 	  Say Y here if you have any input device (mouse, keyboard, tablet,
@@ -67,7 +67,7 @@
 comment "Userland interfaces"
 
 config INPUT_MOUSEDEV
-	tristate "Mouse interface" if EMBEDDED
+	tristate "Mouse interface" if EXPERT
 	default y
 	help
 	  Say Y here if you want your mouse to be accessible as char devices
@@ -150,7 +150,7 @@
 	  module will be called evbug.
 
 config INPUT_APMPOWER
-	tristate "Input Power Event -> APM Bridge" if EMBEDDED
+	tristate "Input Power Event -> APM Bridge" if EXPERT
 	depends on INPUT && APM_EMULATION
 	help
 	  Say Y here if you want suspend key events to trigger a user
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 23cf8fc..5b8f59d 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -360,7 +360,7 @@
 	event->owner = owner;
 
 	list_add_tail(&event->node, &gameport_event_list);
-	schedule_work(&gameport_event_work);
+	queue_work(system_long_wq, &gameport_event_work);
 
 out:
 	spin_unlock_irqrestore(&gameport_event_lock, flags);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7985114..11905b6 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -75,7 +75,6 @@
  * dev->event_lock held and interrupts disabled.
  */
 static void input_pass_event(struct input_dev *dev,
-			     struct input_handler *src_handler,
 			     unsigned int type, unsigned int code, int value)
 {
 	struct input_handler *handler;
@@ -94,15 +93,6 @@
 				continue;
 
 			handler = handle->handler;
-
-			/*
-			 * If this is the handler that injected this
-			 * particular event we want to skip it to avoid
-			 * filters firing again and again.
-			 */
-			if (handler == src_handler)
-				continue;
-
 			if (!handler->filter) {
 				if (filtered)
 					break;
@@ -132,7 +122,7 @@
 	if (test_bit(dev->repeat_key, dev->key) &&
 	    is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
 
-		input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
+		input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
 
 		if (dev->sync) {
 			/*
@@ -141,7 +131,7 @@
 			 * Otherwise assume that the driver will send
 			 * SYN_REPORT once it's done.
 			 */
-			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 		}
 
 		if (dev->rep[REP_PERIOD])
@@ -174,7 +164,6 @@
 #define INPUT_PASS_TO_ALL	(INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
 
 static int input_handle_abs_event(struct input_dev *dev,
-				  struct input_handler *src_handler,
 				  unsigned int code, int *pval)
 {
 	bool is_mt_event;
@@ -218,15 +207,13 @@
 	/* Flush pending "slot" event */
 	if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
 		input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
-		input_pass_event(dev, src_handler,
-				 EV_ABS, ABS_MT_SLOT, dev->slot);
+		input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
 	}
 
 	return INPUT_PASS_TO_HANDLERS;
 }
 
 static void input_handle_event(struct input_dev *dev,
-			       struct input_handler *src_handler,
 			       unsigned int type, unsigned int code, int value)
 {
 	int disposition = INPUT_IGNORE_EVENT;
@@ -279,8 +266,7 @@
 
 	case EV_ABS:
 		if (is_event_supported(code, dev->absbit, ABS_MAX))
-			disposition = input_handle_abs_event(dev, src_handler,
-							     code, &value);
+			disposition = input_handle_abs_event(dev, code, &value);
 
 		break;
 
@@ -338,7 +324,7 @@
 		dev->event(dev, type, code, value);
 
 	if (disposition & INPUT_PASS_TO_HANDLERS)
-		input_pass_event(dev, src_handler, type, code, value);
+		input_pass_event(dev, type, code, value);
 }
 
 /**
@@ -367,7 +353,7 @@
 
 		spin_lock_irqsave(&dev->event_lock, flags);
 		add_input_randomness(type, code, value);
-		input_handle_event(dev, NULL, type, code, value);
+		input_handle_event(dev, type, code, value);
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 }
@@ -397,8 +383,7 @@
 		rcu_read_lock();
 		grab = rcu_dereference(dev->grab);
 		if (!grab || grab == handle)
-			input_handle_event(dev, handle->handler,
-					   type, code, value);
+			input_handle_event(dev, type, code, value);
 		rcu_read_unlock();
 
 		spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -611,10 +596,10 @@
 		for (code = 0; code <= KEY_MAX; code++) {
 			if (is_event_supported(code, dev->keybit, KEY_MAX) &&
 			    __test_and_clear_bit(code, dev->key)) {
-				input_pass_event(dev, NULL, EV_KEY, code, 0);
+				input_pass_event(dev, EV_KEY, code, 0);
 			}
 		}
-		input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+		input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 	}
 }
 
@@ -889,9 +874,9 @@
 	    !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
 	    __test_and_clear_bit(old_keycode, dev->key)) {
 
-		input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
+		input_pass_event(dev, EV_KEY, old_keycode, 0);
 		if (dev->sync)
-			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 	}
 
  out:
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 5b59616..56eb471 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -255,6 +255,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called amijoy.
 
+config JOYSTICK_AS5011
+	tristate "Austria Microsystem AS5011 joystick"
+	depends on I2C
+	help
+	  Say Y here if you have an AS5011 digital joystick connected to your
+	  system.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called as5011.
+
 config JOYSTICK_JOYDUMP
 	tristate "Gameport data dumper"
 	select GAMEPORT
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index f3a8cbe..92dc0de 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_JOYSTICK_A3D)		+= a3d.o
 obj-$(CONFIG_JOYSTICK_ADI)		+= adi.o
 obj-$(CONFIG_JOYSTICK_AMIGA)		+= amijoy.o
+obj-$(CONFIG_JOYSTICK_AS5011)		+= as5011.o
 obj-$(CONFIG_JOYSTICK_ANALOG)		+= analog.o
 obj-$(CONFIG_JOYSTICK_COBRA)		+= cobra.o
 obj-$(CONFIG_JOYSTICK_DB9)		+= db9.o
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
new file mode 100644
index 0000000..f6732b5
--- /dev/null
+++ b/drivers/input/joystick/as5011.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
+ * Sponsored by ARMadeus Systems
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Driver for Austria Microsystems joysticks AS5011
+ *
+ * TODO:
+ *	- Power on the chip when open() and power down when close()
+ *	- Manage power mode
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/input/as5011.h>
+#include <linux/slab.h>
+
+#define DRIVER_DESC "Driver for Austria Microsystems AS5011 joystick"
+#define MODULE_DEVICE_ALIAS "as5011"
+
+MODULE_AUTHOR("Fabien Marteau <fabien.marteau@armadeus.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/* registers */
+#define AS5011_CTRL1		0x76
+#define AS5011_CTRL2		0x75
+#define AS5011_XP		0x43
+#define AS5011_XN		0x44
+#define AS5011_YP		0x53
+#define AS5011_YN		0x54
+#define AS5011_X_REG		0x41
+#define AS5011_Y_REG		0x42
+#define AS5011_X_RES_INT	0x51
+#define AS5011_Y_RES_INT	0x52
+
+/* CTRL1 bits */
+#define AS5011_CTRL1_LP_PULSED		0x80
+#define AS5011_CTRL1_LP_ACTIVE		0x40
+#define AS5011_CTRL1_LP_CONTINUE	0x20
+#define AS5011_CTRL1_INT_WUP_EN		0x10
+#define AS5011_CTRL1_INT_ACT_EN		0x08
+#define AS5011_CTRL1_EXT_CLK_EN		0x04
+#define AS5011_CTRL1_SOFT_RST		0x02
+#define AS5011_CTRL1_DATA_VALID		0x01
+
+/* CTRL2 bits */
+#define AS5011_CTRL2_EXT_SAMPLE_EN	0x08
+#define AS5011_CTRL2_RC_BIAS_ON		0x04
+#define AS5011_CTRL2_INV_SPINNING	0x02
+
+#define AS5011_MAX_AXIS	80
+#define AS5011_MIN_AXIS	(-80)
+#define AS5011_FUZZ	8
+#define AS5011_FLAT	40
+
+struct as5011_device {
+	struct input_dev *input_dev;
+	struct i2c_client *i2c_client;
+	unsigned int button_gpio;
+	unsigned int button_irq;
+	unsigned int axis_irq;
+};
+
+static int as5011_i2c_write(struct i2c_client *client,
+			    uint8_t aregaddr,
+			    uint8_t avalue)
+{
+	uint8_t data[2] = { aregaddr, avalue };
+	struct i2c_msg msg = {
+		client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data
+	};
+	int error;
+
+	error = i2c_transfer(client->adapter, &msg, 1);
+	return error < 0 ? error : 0;
+}
+
+static int as5011_i2c_read(struct i2c_client *client,
+			   uint8_t aregaddr, signed char *value)
+{
+	uint8_t data[2] = { aregaddr };
+	struct i2c_msg msg_set[2] = {
+		{ client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data },
+		{ client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data }
+	};
+	int error;
+
+	error = i2c_transfer(client->adapter, msg_set, 2);
+	if (error < 0)
+		return error;
+
+	*value = data[0] & 0x80 ? -1 * (1 + ~data[0]) : data[0];
+	return 0;
+}
+
+static irqreturn_t as5011_button_interrupt(int irq, void *dev_id)
+{
+	struct as5011_device *as5011 = dev_id;
+	int val = gpio_get_value_cansleep(as5011->button_gpio);
+
+	input_report_key(as5011->input_dev, BTN_JOYSTICK, !val);
+	input_sync(as5011->input_dev);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t as5011_axis_interrupt(int irq, void *dev_id)
+{
+	struct as5011_device *as5011 = dev_id;
+	int error;
+	signed char x, y;
+
+	error = as5011_i2c_read(as5011->i2c_client, AS5011_X_RES_INT, &x);
+	if (error < 0)
+		goto out;
+
+	error = as5011_i2c_read(as5011->i2c_client, AS5011_Y_RES_INT, &y);
+	if (error < 0)
+		goto out;
+
+	input_report_abs(as5011->input_dev, ABS_X, x);
+	input_report_abs(as5011->input_dev, ABS_Y, y);
+	input_sync(as5011->input_dev);
+
+out:
+	return IRQ_HANDLED;
+}
+
+static int __devinit as5011_configure_chip(struct as5011_device *as5011,
+				const struct as5011_platform_data *plat_dat)
+{
+	struct i2c_client *client = as5011->i2c_client;
+	int error;
+	signed char value;
+
+	/* chip soft reset */
+	error = as5011_i2c_write(client, AS5011_CTRL1,
+				 AS5011_CTRL1_SOFT_RST);
+	if (error < 0) {
+		dev_err(&client->dev, "Soft reset failed\n");
+		return error;
+	}
+
+	mdelay(10);
+
+	error = as5011_i2c_write(client, AS5011_CTRL1,
+				 AS5011_CTRL1_LP_PULSED |
+				 AS5011_CTRL1_LP_ACTIVE |
+				 AS5011_CTRL1_INT_ACT_EN);
+	if (error < 0) {
+		dev_err(&client->dev, "Power config failed\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_CTRL2,
+				 AS5011_CTRL2_INV_SPINNING);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't invert spinning\n");
+		return error;
+	}
+
+	/* write threshold */
+	error = as5011_i2c_write(client, AS5011_XP, plat_dat->xp);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_XN, plat_dat->xn);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_YP, plat_dat->yp);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_YN, plat_dat->yn);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	/* to free irq gpio in chip */
+	error = as5011_i2c_read(client, AS5011_X_RES_INT, &value);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't read i2c X resolution value\n");
+		return error;
+	}
+
+	return 0;
+}
+
+static int __devinit as5011_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	const struct as5011_platform_data *plat_data;
+	struct as5011_device *as5011;
+	struct input_dev *input_dev;
+	int irq;
+	int error;
+
+	plat_data = client->dev.platform_data;
+	if (!plat_data)
+		return -EINVAL;
+
+	if (!plat_data->axis_irq) {
+		dev_err(&client->dev, "No axis IRQ?\n");
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_PROTOCOL_MANGLING)) {
+		dev_err(&client->dev,
+			"need i2c bus that supports protocol mangling\n");
+		return -ENODEV;
+	}
+
+	as5011 = kmalloc(sizeof(struct as5011_device), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!as5011 || !input_dev) {
+		dev_err(&client->dev,
+			"Can't allocate memory for device structure\n");
+		error = -ENOMEM;
+		goto err_free_mem;
+	}
+
+	as5011->i2c_client = client;
+	as5011->input_dev = input_dev;
+	as5011->button_gpio = plat_data->button_gpio;
+	as5011->axis_irq = plat_data->axis_irq;
+
+	input_dev->name = "Austria Microsystem as5011 joystick";
+	input_dev->id.bustype = BUS_I2C;
+	input_dev->dev.parent = &client->dev;
+
+	__set_bit(EV_KEY, input_dev->evbit);
+	__set_bit(EV_ABS, input_dev->evbit);
+	__set_bit(BTN_JOYSTICK, input_dev->keybit);
+
+	input_set_abs_params(input_dev, ABS_X,
+		AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
+	input_set_abs_params(as5011->input_dev, ABS_Y,
+		AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
+
+	error = gpio_request(as5011->button_gpio, "AS5011 button");
+	if (error < 0) {
+		dev_err(&client->dev, "Failed to request button gpio\n");
+		goto err_free_mem;
+	}
+
+	irq = gpio_to_irq(as5011->button_gpio);
+	if (irq < 0) {
+		dev_err(&client->dev,
+			"Failed to get irq number for button gpio\n");
+		goto err_free_button_gpio;
+	}
+
+	as5011->button_irq = irq;
+
+	error = request_threaded_irq(as5011->button_irq,
+				     NULL, as5011_button_interrupt,
+				     IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				     "as5011_button", as5011);
+	if (error < 0) {
+		dev_err(&client->dev,
+			"Can't allocate button irq %d\n", as5011->button_irq);
+		goto err_free_button_gpio;
+	}
+
+	error = as5011_configure_chip(as5011, plat_data);
+	if (error)
+		goto err_free_button_irq;
+
+	error = request_threaded_irq(as5011->axis_irq, NULL,
+				     as5011_axis_interrupt,
+				     plat_data->axis_irqflags,
+				     "as5011_joystick", as5011);
+	if (error) {
+		dev_err(&client->dev,
+			"Can't allocate axis irq %d\n", plat_data->axis_irq);
+		goto err_free_button_irq;
+	}
+
+	error = input_register_device(as5011->input_dev);
+	if (error) {
+		dev_err(&client->dev, "Failed to register input device\n");
+		goto err_free_axis_irq;
+	}
+
+	i2c_set_clientdata(client, as5011);
+
+	return 0;
+
+err_free_axis_irq:
+	free_irq(as5011->axis_irq, as5011);
+err_free_button_irq:
+	free_irq(as5011->button_irq, as5011);
+err_free_button_gpio:
+	gpio_free(as5011->button_gpio);
+err_free_mem:
+	input_free_device(input_dev);
+	kfree(as5011);
+
+	return error;
+}
+
+static int __devexit as5011_remove(struct i2c_client *client)
+{
+	struct as5011_device *as5011 = i2c_get_clientdata(client);
+
+	free_irq(as5011->axis_irq, as5011);
+	free_irq(as5011->button_irq, as5011);
+	gpio_free(as5011->button_gpio);
+
+	input_unregister_device(as5011->input_dev);
+	kfree(as5011);
+
+	return 0;
+}
+
+static const struct i2c_device_id as5011_id[] = {
+	{ MODULE_DEVICE_ALIAS, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, as5011_id);
+
+static struct i2c_driver as5011_driver = {
+	.driver = {
+		.name = "as5011",
+	},
+	.probe		= as5011_probe,
+	.remove		= __devexit_p(as5011_remove),
+	.id_table	= as5011_id,
+};
+
+static int __init as5011_init(void)
+{
+	return i2c_add_driver(&as5011_driver);
+}
+module_init(as5011_init);
+
+static void __exit as5011_exit(void)
+{
+	i2c_del_driver(&as5011_driver);
+}
+module_exit(as5011_exit);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index f829998..c7a9202 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -2,7 +2,7 @@
 # Input core configuration
 #
 menuconfig INPUT_KEYBOARD
-	bool "Keyboards" if EMBEDDED || !X86
+	bool "Keyboards" if EXPERT || !X86
 	default y
 	help
 	  Say Y here, and a list of supported keyboards will be displayed.
@@ -12,18 +12,6 @@
 
 if INPUT_KEYBOARD
 
-config KEYBOARD_AAED2000
-	tristate "AAED-2000 keyboard"
-	depends on MACH_AAED2000
-	select INPUT_POLLDEV
-	default y
-	help
-	  Say Y here to enable the keyboard on the Agilent AAED-2000
-	  development board.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called aaed2000_kbd.
-
 config KEYBOARD_ADP5520
 	tristate "Keypad Support for ADP5520 PMIC"
 	depends on PMIC_ADP5520
@@ -69,7 +57,7 @@
 	  module will be called atakbd.
 
 config KEYBOARD_ATKBD
-	tristate "AT keyboard" if EMBEDDED || !X86
+	tristate "AT keyboard" if EXPERT || !X86
 	default y
 	select SERIO
 	select SERIO_LIBPS2
@@ -355,6 +343,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called nmk-ske-keypad.
 
+config KEYBOARD_TEGRA
+	tristate "NVIDIA Tegra internal matrix keyboard controller support"
+	depends on ARCH_TEGRA
+	help
+	  Say Y here if you want to use a matrix keyboard connected directly
+	  to the internal keyboard controller on Tegra SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called tegra-kbc.
+
 config KEYBOARD_OPENCORES
 	tristate "OpenCores Keyboard Controller"
 	help
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 8933e9c..468c627 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -4,7 +4,6 @@
 
 # Each configuration option enables a list of files.
 
-obj-$(CONFIG_KEYBOARD_AAED2000)		+= aaed2000_kbd.o
 obj-$(CONFIG_KEYBOARD_ADP5520)		+= adp5520-keys.o
 obj-$(CONFIG_KEYBOARD_ADP5588)		+= adp5588-keys.o
 obj-$(CONFIG_KEYBOARD_AMIGA)		+= amikbd.o
@@ -43,6 +42,7 @@
 obj-$(CONFIG_KEYBOARD_STOWAWAY)		+= stowaway.o
 obj-$(CONFIG_KEYBOARD_SUNKBD)		+= sunkbd.o
 obj-$(CONFIG_KEYBOARD_TC3589X)		+= tc3589x-keypad.o
+obj-$(CONFIG_KEYBOARD_TEGRA)		+= tegra-kbc.o
 obj-$(CONFIG_KEYBOARD_TNETV107X)	+= tnetv107x-keypad.o
 obj-$(CONFIG_KEYBOARD_TWL4030)		+= twl4030_keypad.o
 obj-$(CONFIG_KEYBOARD_XTKBD)		+= xtkbd.o
diff --git a/drivers/input/keyboard/aaed2000_kbd.c b/drivers/input/keyboard/aaed2000_kbd.c
deleted file mode 100644
index 18222a6..0000000
--- a/drivers/input/keyboard/aaed2000_kbd.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- *  Keyboard driver for the AAED-2000 dev board
- *
- *  Copyright (c) 2006 Nicolas Bellido Y Ortega
- *
- *  Based on corgikbd.c
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input-polldev.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/hardware.h>
-#include <mach/aaed2000.h>
-
-#define KB_ROWS			12
-#define KB_COLS			8
-#define KB_ROWMASK(r)		(1 << (r))
-#define SCANCODE(r,c)		(((c) * KB_ROWS) + (r))
-#define NR_SCANCODES		(KB_COLS * KB_ROWS)
-
-#define SCAN_INTERVAL		(50) /* ms */
-#define KB_ACTIVATE_DELAY	(20) /* us */
-
-static unsigned char aaedkbd_keycode[NR_SCANCODES] = {
-	KEY_9, KEY_0, KEY_MINUS, KEY_EQUAL, KEY_BACKSPACE, 0, KEY_SPACE, KEY_KP6, 0, KEY_KPDOT, 0, 0,
-	KEY_K, KEY_M, KEY_O, KEY_DOT, KEY_SLASH, 0, KEY_F, 0, 0, 0, KEY_LEFTSHIFT, 0,
-	KEY_I, KEY_P, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH, 0, 0, 0, 0, 0, KEY_RIGHTSHIFT, 0,
-	KEY_8, KEY_L, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_ENTER, 0, 0, 0, 0, 0, 0, 0,
-	KEY_J, KEY_H, KEY_B, KEY_KP8, KEY_KP4, 0, KEY_C, KEY_D, KEY_S, KEY_A, 0, KEY_CAPSLOCK,
-	KEY_Y, KEY_U, KEY_N, KEY_T, 0, 0, KEY_R, KEY_E, KEY_W, KEY_Q, 0, KEY_TAB,
-	KEY_7, KEY_6, KEY_G, 0, KEY_5, 0, KEY_4, KEY_3, KEY_2, KEY_1, 0, KEY_GRAVE,
-	0, 0, KEY_COMMA, 0, KEY_KP2, 0, KEY_V, KEY_LEFTALT, KEY_X, KEY_Z, 0, KEY_LEFTCTRL
-};
-
-struct aaedkbd {
-	unsigned char keycode[ARRAY_SIZE(aaedkbd_keycode)];
-	struct input_polled_dev *poll_dev;
-	int kbdscan_state[KB_COLS];
-	int kbdscan_count[KB_COLS];
-};
-
-#define KBDSCAN_STABLE_COUNT 2
-
-static void aaedkbd_report_col(struct aaedkbd *aaedkbd,
-				unsigned int col, unsigned int rowd)
-{
-	unsigned int scancode, pressed;
-	unsigned int row;
-
-	for (row = 0; row < KB_ROWS; row++) {
-		scancode = SCANCODE(row, col);
-		pressed = rowd & KB_ROWMASK(row);
-
-		input_report_key(aaedkbd->poll_dev->input,
-				 aaedkbd->keycode[scancode], pressed);
-	}
-}
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void aaedkbd_poll(struct input_polled_dev *dev)
-{
-	struct aaedkbd *aaedkbd = dev->private;
-	unsigned int col, rowd;
-
-	col = 0;
-	do {
-		AAEC_GPIO_KSCAN = col + 8;
-		udelay(KB_ACTIVATE_DELAY);
-		rowd = AAED_EXT_GPIO & AAED_EGPIO_KBD_SCAN;
-
-		if (rowd != aaedkbd->kbdscan_state[col]) {
-			aaedkbd->kbdscan_count[col] = 0;
-			aaedkbd->kbdscan_state[col] = rowd;
-		} else if (++aaedkbd->kbdscan_count[col] >= KBDSCAN_STABLE_COUNT) {
-			aaedkbd_report_col(aaedkbd, col, rowd);
-			col++;
-		}
-	} while (col < KB_COLS);
-
-	AAEC_GPIO_KSCAN = 0x07;
-	input_sync(dev->input);
-}
-
-static int __devinit aaedkbd_probe(struct platform_device *pdev)
-{
-	struct aaedkbd *aaedkbd;
-	struct input_polled_dev *poll_dev;
-	struct input_dev *input_dev;
-	int i;
-	int error;
-
-	aaedkbd = kzalloc(sizeof(struct aaedkbd), GFP_KERNEL);
-	poll_dev = input_allocate_polled_device();
-	if (!aaedkbd || !poll_dev) {
-		error = -ENOMEM;
-		goto fail;
-	}
-
-	platform_set_drvdata(pdev, aaedkbd);
-
-	aaedkbd->poll_dev = poll_dev;
-	memcpy(aaedkbd->keycode, aaedkbd_keycode, sizeof(aaedkbd->keycode));
-
-	poll_dev->private = aaedkbd;
-	poll_dev->poll = aaedkbd_poll;
-	poll_dev->poll_interval = SCAN_INTERVAL;
-
-	input_dev = poll_dev->input;
-	input_dev->name = "AAED-2000 Keyboard";
-	input_dev->phys = "aaedkbd/input0";
-	input_dev->id.bustype = BUS_HOST;
-	input_dev->id.vendor = 0x0001;
-	input_dev->id.product = 0x0001;
-	input_dev->id.version = 0x0100;
-	input_dev->dev.parent = &pdev->dev;
-
-	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
-	input_dev->keycode = aaedkbd->keycode;
-	input_dev->keycodesize = sizeof(unsigned char);
-	input_dev->keycodemax = ARRAY_SIZE(aaedkbd_keycode);
-
-	for (i = 0; i < ARRAY_SIZE(aaedkbd_keycode); i++)
-		set_bit(aaedkbd->keycode[i], input_dev->keybit);
-	clear_bit(0, input_dev->keybit);
-
-	error = input_register_polled_device(aaedkbd->poll_dev);
-	if (error)
-		goto fail;
-
-	return 0;
-
- fail:	kfree(aaedkbd);
-	input_free_polled_device(poll_dev);
-	return error;
-}
-
-static int __devexit aaedkbd_remove(struct platform_device *pdev)
-{
-	struct aaedkbd *aaedkbd = platform_get_drvdata(pdev);
-
-	input_unregister_polled_device(aaedkbd->poll_dev);
-	input_free_polled_device(aaedkbd->poll_dev);
-	kfree(aaedkbd);
-
-	return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:aaed2000-keyboard");
-
-static struct platform_driver aaedkbd_driver = {
-	.probe		= aaedkbd_probe,
-	.remove		= __devexit_p(aaedkbd_remove),
-	.driver		= {
-		.name	= "aaed2000-keyboard",
-		.owner	= THIS_MODULE,
-	},
-};
-
-static int __init aaedkbd_init(void)
-{
-	return platform_driver_register(&aaedkbd_driver);
-}
-
-static void __exit aaedkbd_exit(void)
-{
-	platform_driver_unregister(&aaedkbd_driver);
-}
-
-module_init(aaedkbd_init);
-module_exit(aaedkbd_exit);
-
-MODULE_AUTHOR("Nicolas Bellido Y Ortega");
-MODULE_DESCRIPTION("AAED-2000 Keyboard Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6069abe..eb30063 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -322,7 +322,7 @@
 	struct gpio_keys_button *button = bdata->button;
 	struct input_dev *input = bdata->input;
 	unsigned int type = button->type ?: EV_KEY;
-	int state = (gpio_get_value(button->gpio) ? 1 : 0) ^ button->active_low;
+	int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low;
 
 	input_event(input, type, button->code, !!state);
 	input_sync(input);
@@ -410,8 +410,8 @@
 	if (!button->can_disable)
 		irqflags |= IRQF_SHARED;
 
-	error = request_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
-	if (error) {
+	error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
+	if (error < 0) {
 		dev_err(dev, "Unable to claim irq %d; error %d\n",
 			irq, error);
 		goto fail3;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
new file mode 100644
index 0000000..99ce903
--- /dev/null
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -0,0 +1,783 @@
+/*
+ * Keyboard class input driver for the NVIDIA Tegra SoC internal matrix
+ * keyboard controller
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <mach/clk.h>
+#include <mach/kbc.h>
+
+#define KBC_MAX_DEBOUNCE_CNT	0x3ffu
+
+/* KBC row scan time and delay for beginning the row scan. */
+#define KBC_ROW_SCAN_TIME	16
+#define KBC_ROW_SCAN_DLY	5
+
+/* KBC uses a 32KHz clock so a cycle = 1/32Khz */
+#define KBC_CYCLE_USEC	32
+
+/* KBC Registers */
+
+/* KBC Control Register */
+#define KBC_CONTROL_0	0x0
+#define KBC_FIFO_TH_CNT_SHIFT(cnt)	(cnt << 14)
+#define KBC_DEBOUNCE_CNT_SHIFT(cnt)	(cnt << 4)
+#define KBC_CONTROL_FIFO_CNT_INT_EN	(1 << 3)
+#define KBC_CONTROL_KBC_EN		(1 << 0)
+
+/* KBC Interrupt Register */
+#define KBC_INT_0	0x4
+#define KBC_INT_FIFO_CNT_INT_STATUS	(1 << 2)
+
+#define KBC_ROW_CFG0_0	0x8
+#define KBC_COL_CFG0_0	0x18
+#define KBC_INIT_DLY_0	0x28
+#define KBC_RPT_DLY_0	0x2c
+#define KBC_KP_ENT0_0	0x30
+#define KBC_KP_ENT1_0	0x34
+#define KBC_ROW0_MASK_0	0x38
+
+#define KBC_ROW_SHIFT	3
+
+struct tegra_kbc {
+	void __iomem *mmio;
+	struct input_dev *idev;
+	unsigned int irq;
+	unsigned int wake_enable_rows;
+	unsigned int wake_enable_cols;
+	spinlock_t lock;
+	unsigned int repoll_dly;
+	unsigned long cp_dly_jiffies;
+	bool use_fn_map;
+	const struct tegra_kbc_platform_data *pdata;
+	unsigned short keycode[KBC_MAX_KEY * 2];
+	unsigned short current_keys[KBC_MAX_KPENT];
+	unsigned int num_pressed_keys;
+	struct timer_list timer;
+	struct clk *clk;
+};
+
+static const u32 tegra_kbc_default_keymap[] = {
+	KEY(0, 2, KEY_W),
+	KEY(0, 3, KEY_S),
+	KEY(0, 4, KEY_A),
+	KEY(0, 5, KEY_Z),
+	KEY(0, 7, KEY_FN),
+
+	KEY(1, 7, KEY_LEFTMETA),
+
+	KEY(2, 6, KEY_RIGHTALT),
+	KEY(2, 7, KEY_LEFTALT),
+
+	KEY(3, 0, KEY_5),
+	KEY(3, 1, KEY_4),
+	KEY(3, 2, KEY_R),
+	KEY(3, 3, KEY_E),
+	KEY(3, 4, KEY_F),
+	KEY(3, 5, KEY_D),
+	KEY(3, 6, KEY_X),
+
+	KEY(4, 0, KEY_7),
+	KEY(4, 1, KEY_6),
+	KEY(4, 2, KEY_T),
+	KEY(4, 3, KEY_H),
+	KEY(4, 4, KEY_G),
+	KEY(4, 5, KEY_V),
+	KEY(4, 6, KEY_C),
+	KEY(4, 7, KEY_SPACE),
+
+	KEY(5, 0, KEY_9),
+	KEY(5, 1, KEY_8),
+	KEY(5, 2, KEY_U),
+	KEY(5, 3, KEY_Y),
+	KEY(5, 4, KEY_J),
+	KEY(5, 5, KEY_N),
+	KEY(5, 6, KEY_B),
+	KEY(5, 7, KEY_BACKSLASH),
+
+	KEY(6, 0, KEY_MINUS),
+	KEY(6, 1, KEY_0),
+	KEY(6, 2, KEY_O),
+	KEY(6, 3, KEY_I),
+	KEY(6, 4, KEY_L),
+	KEY(6, 5, KEY_K),
+	KEY(6, 6, KEY_COMMA),
+	KEY(6, 7, KEY_M),
+
+	KEY(7, 1, KEY_EQUAL),
+	KEY(7, 2, KEY_RIGHTBRACE),
+	KEY(7, 3, KEY_ENTER),
+	KEY(7, 7, KEY_MENU),
+
+	KEY(8, 4, KEY_RIGHTSHIFT),
+	KEY(8, 5, KEY_LEFTSHIFT),
+
+	KEY(9, 5, KEY_RIGHTCTRL),
+	KEY(9, 7, KEY_LEFTCTRL),
+
+	KEY(11, 0, KEY_LEFTBRACE),
+	KEY(11, 1, KEY_P),
+	KEY(11, 2, KEY_APOSTROPHE),
+	KEY(11, 3, KEY_SEMICOLON),
+	KEY(11, 4, KEY_SLASH),
+	KEY(11, 5, KEY_DOT),
+
+	KEY(12, 0, KEY_F10),
+	KEY(12, 1, KEY_F9),
+	KEY(12, 2, KEY_BACKSPACE),
+	KEY(12, 3, KEY_3),
+	KEY(12, 4, KEY_2),
+	KEY(12, 5, KEY_UP),
+	KEY(12, 6, KEY_PRINT),
+	KEY(12, 7, KEY_PAUSE),
+
+	KEY(13, 0, KEY_INSERT),
+	KEY(13, 1, KEY_DELETE),
+	KEY(13, 3, KEY_PAGEUP),
+	KEY(13, 4, KEY_PAGEDOWN),
+	KEY(13, 5, KEY_RIGHT),
+	KEY(13, 6, KEY_DOWN),
+	KEY(13, 7, KEY_LEFT),
+
+	KEY(14, 0, KEY_F11),
+	KEY(14, 1, KEY_F12),
+	KEY(14, 2, KEY_F8),
+	KEY(14, 3, KEY_Q),
+	KEY(14, 4, KEY_F4),
+	KEY(14, 5, KEY_F3),
+	KEY(14, 6, KEY_1),
+	KEY(14, 7, KEY_F7),
+
+	KEY(15, 0, KEY_ESC),
+	KEY(15, 1, KEY_GRAVE),
+	KEY(15, 2, KEY_F5),
+	KEY(15, 3, KEY_TAB),
+	KEY(15, 4, KEY_F1),
+	KEY(15, 5, KEY_F2),
+	KEY(15, 6, KEY_CAPSLOCK),
+	KEY(15, 7, KEY_F6),
+
+	/* Software Handled Function Keys */
+	KEY(20, 0, KEY_KP7),
+
+	KEY(21, 0, KEY_KP9),
+	KEY(21, 1, KEY_KP8),
+	KEY(21, 2, KEY_KP4),
+	KEY(21, 4, KEY_KP1),
+
+	KEY(22, 1, KEY_KPSLASH),
+	KEY(22, 2, KEY_KP6),
+	KEY(22, 3, KEY_KP5),
+	KEY(22, 4, KEY_KP3),
+	KEY(22, 5, KEY_KP2),
+	KEY(22, 7, KEY_KP0),
+
+	KEY(27, 1, KEY_KPASTERISK),
+	KEY(27, 3, KEY_KPMINUS),
+	KEY(27, 4, KEY_KPPLUS),
+	KEY(27, 5, KEY_KPDOT),
+
+	KEY(28, 5, KEY_VOLUMEUP),
+
+	KEY(29, 3, KEY_HOME),
+	KEY(29, 4, KEY_END),
+	KEY(29, 5, KEY_BRIGHTNESSDOWN),
+	KEY(29, 6, KEY_VOLUMEDOWN),
+	KEY(29, 7, KEY_BRIGHTNESSUP),
+
+	KEY(30, 0, KEY_NUMLOCK),
+	KEY(30, 1, KEY_SCROLLLOCK),
+	KEY(30, 2, KEY_MUTE),
+
+	KEY(31, 4, KEY_HELP),
+};
+
+static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
+	.keymap		= tegra_kbc_default_keymap,
+	.keymap_size	= ARRAY_SIZE(tegra_kbc_default_keymap),
+};
+
+static void tegra_kbc_report_released_keys(struct input_dev *input,
+					   unsigned short old_keycodes[],
+					   unsigned int old_num_keys,
+					   unsigned short new_keycodes[],
+					   unsigned int new_num_keys)
+{
+	unsigned int i, j;
+
+	for (i = 0; i < old_num_keys; i++) {
+		for (j = 0; j < new_num_keys; j++)
+			if (old_keycodes[i] == new_keycodes[j])
+				break;
+
+		if (j == new_num_keys)
+			input_report_key(input, old_keycodes[i], 0);
+	}
+}
+
+static void tegra_kbc_report_pressed_keys(struct input_dev *input,
+					  unsigned char scancodes[],
+					  unsigned short keycodes[],
+					  unsigned int num_pressed_keys)
+{
+	unsigned int i;
+
+	for (i = 0; i < num_pressed_keys; i++) {
+		input_event(input, EV_MSC, MSC_SCAN, scancodes[i]);
+		input_report_key(input, keycodes[i], 1);
+	}
+}
+
+static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
+{
+	unsigned char scancodes[KBC_MAX_KPENT];
+	unsigned short keycodes[KBC_MAX_KPENT];
+	u32 val = 0;
+	unsigned int i;
+	unsigned int num_down = 0;
+	unsigned long flags;
+	bool fn_keypress = false;
+
+	spin_lock_irqsave(&kbc->lock, flags);
+	for (i = 0; i < KBC_MAX_KPENT; i++) {
+		if ((i % 4) == 0)
+			val = readl(kbc->mmio + KBC_KP_ENT0_0 + i);
+
+		if (val & 0x80) {
+			unsigned int col = val & 0x07;
+			unsigned int row = (val >> 3) & 0x0f;
+			unsigned char scancode =
+				MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
+
+			scancodes[num_down] = scancode;
+			keycodes[num_down] = kbc->keycode[scancode];
+			/* If driver uses Fn map, do not report the Fn key. */
+			if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
+				fn_keypress = true;
+			else
+				num_down++;
+		}
+
+		val >>= 8;
+	}
+
+	/*
+	 * If the platform uses Fn keymaps, translate keys on a Fn keypress.
+	 * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
+	 */
+	if (fn_keypress) {
+		for (i = 0; i < num_down; i++) {
+			scancodes[i] += KBC_MAX_KEY;
+			keycodes[i] = kbc->keycode[scancodes[i]];
+		}
+	}
+
+	spin_unlock_irqrestore(&kbc->lock, flags);
+
+	tegra_kbc_report_released_keys(kbc->idev,
+				       kbc->current_keys, kbc->num_pressed_keys,
+				       keycodes, num_down);
+	tegra_kbc_report_pressed_keys(kbc->idev, scancodes, keycodes, num_down);
+	input_sync(kbc->idev);
+
+	memcpy(kbc->current_keys, keycodes, sizeof(kbc->current_keys));
+	kbc->num_pressed_keys = num_down;
+}
+
+static void tegra_kbc_keypress_timer(unsigned long data)
+{
+	struct tegra_kbc *kbc = (struct tegra_kbc *)data;
+	unsigned long flags;
+	u32 val;
+	unsigned int i;
+
+	val = (readl(kbc->mmio + KBC_INT_0) >> 4) & 0xf;
+	if (val) {
+		unsigned long dly;
+
+		tegra_kbc_report_keys(kbc);
+
+		/*
+		 * If more than one keys are pressed we need not wait
+		 * for the repoll delay.
+		 */
+		dly = (val == 1) ? kbc->repoll_dly : 1;
+		mod_timer(&kbc->timer, jiffies + msecs_to_jiffies(dly));
+	} else {
+		/* Release any pressed keys and exit the polling loop */
+		for (i = 0; i < kbc->num_pressed_keys; i++)
+			input_report_key(kbc->idev, kbc->current_keys[i], 0);
+		input_sync(kbc->idev);
+
+		kbc->num_pressed_keys = 0;
+
+		/* All keys are released so enable the keypress interrupt */
+		spin_lock_irqsave(&kbc->lock, flags);
+		val = readl(kbc->mmio + KBC_CONTROL_0);
+		val |= KBC_CONTROL_FIFO_CNT_INT_EN;
+		writel(val, kbc->mmio + KBC_CONTROL_0);
+		spin_unlock_irqrestore(&kbc->lock, flags);
+	}
+}
+
+static irqreturn_t tegra_kbc_isr(int irq, void *args)
+{
+	struct tegra_kbc *kbc = args;
+	u32 val, ctl;
+
+	/*
+	 * Until all keys are released, defer further processing to
+	 * the polling loop in tegra_kbc_keypress_timer
+	 */
+	ctl = readl(kbc->mmio + KBC_CONTROL_0);
+	ctl &= ~KBC_CONTROL_FIFO_CNT_INT_EN;
+	writel(ctl, kbc->mmio + KBC_CONTROL_0);
+
+	/*
+	 * Quickly bail out & reenable interrupts if the fifo threshold
+	 * count interrupt wasn't the interrupt source
+	 */
+	val = readl(kbc->mmio + KBC_INT_0);
+	writel(val, kbc->mmio + KBC_INT_0);
+
+	if (val & KBC_INT_FIFO_CNT_INT_STATUS) {
+		/*
+		 * Schedule timer to run when hardware is in continuous
+		 * polling mode.
+		 */
+		mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
+	} else {
+		ctl |= KBC_CONTROL_FIFO_CNT_INT_EN;
+		writel(ctl, kbc->mmio + KBC_CONTROL_0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
+{
+	const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+	int i;
+	unsigned int rst_val;
+
+	BUG_ON(pdata->wake_cnt > KBC_MAX_KEY);
+	rst_val = (filter && pdata->wake_cnt) ? ~0 : 0;
+
+	for (i = 0; i < KBC_MAX_ROW; i++)
+		writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
+
+	if (filter) {
+		for (i = 0; i < pdata->wake_cnt; i++) {
+			u32 val, addr;
+			addr = pdata->wake_cfg[i].row * 4 + KBC_ROW0_MASK_0;
+			val = readl(kbc->mmio + addr);
+			val &= ~(1 << pdata->wake_cfg[i].col);
+			writel(val, kbc->mmio + addr);
+		}
+	}
+}
+
+static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
+{
+	const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+	int i;
+
+	for (i = 0; i < KBC_MAX_GPIO; i++) {
+		u32 r_shft = 5 * (i % 6);
+		u32 c_shft = 4 * (i % 8);
+		u32 r_mask = 0x1f << r_shft;
+		u32 c_mask = 0x0f << c_shft;
+		u32 r_offs = (i / 6) * 4 + KBC_ROW_CFG0_0;
+		u32 c_offs = (i / 8) * 4 + KBC_COL_CFG0_0;
+		u32 row_cfg = readl(kbc->mmio + r_offs);
+		u32 col_cfg = readl(kbc->mmio + c_offs);
+
+		row_cfg &= ~r_mask;
+		col_cfg &= ~c_mask;
+
+		if (pdata->pin_cfg[i].is_row)
+			row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft;
+		else
+			col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft;
+
+		writel(row_cfg, kbc->mmio + r_offs);
+		writel(col_cfg, kbc->mmio + c_offs);
+	}
+}
+
+static int tegra_kbc_start(struct tegra_kbc *kbc)
+{
+	const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+	unsigned long flags;
+	unsigned int debounce_cnt;
+	u32 val = 0;
+
+	clk_enable(kbc->clk);
+
+	/* Reset the KBC controller to clear all previous status.*/
+	tegra_periph_reset_assert(kbc->clk);
+	udelay(100);
+	tegra_periph_reset_deassert(kbc->clk);
+	udelay(100);
+
+	tegra_kbc_config_pins(kbc);
+	tegra_kbc_setup_wakekeys(kbc, false);
+
+	writel(pdata->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
+
+	/* Keyboard debounce count is maximum of 12 bits. */
+	debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+	val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt);
+	val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */
+	val |= KBC_CONTROL_FIFO_CNT_INT_EN;  /* interrupt on FIFO threshold */
+	val |= KBC_CONTROL_KBC_EN;     /* enable */
+	writel(val, kbc->mmio + KBC_CONTROL_0);
+
+	/*
+	 * Compute the delay(ns) from interrupt mode to continuous polling
+	 * mode so the timer routine is scheduled appropriately.
+	 */
+	val = readl(kbc->mmio + KBC_INIT_DLY_0);
+	kbc->cp_dly_jiffies = usecs_to_jiffies((val & 0xfffff) * 32);
+
+	kbc->num_pressed_keys = 0;
+
+	/*
+	 * Atomically clear out any remaining entries in the key FIFO
+	 * and enable keyboard interrupts.
+	 */
+	spin_lock_irqsave(&kbc->lock, flags);
+	while (1) {
+		val = readl(kbc->mmio + KBC_INT_0);
+		val >>= 4;
+		if (!val)
+			break;
+
+		val = readl(kbc->mmio + KBC_KP_ENT0_0);
+		val = readl(kbc->mmio + KBC_KP_ENT1_0);
+	}
+	writel(0x7, kbc->mmio + KBC_INT_0);
+	spin_unlock_irqrestore(&kbc->lock, flags);
+
+	enable_irq(kbc->irq);
+
+	return 0;
+}
+
+static void tegra_kbc_stop(struct tegra_kbc *kbc)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&kbc->lock, flags);
+	val = readl(kbc->mmio + KBC_CONTROL_0);
+	val &= ~1;
+	writel(val, kbc->mmio + KBC_CONTROL_0);
+	spin_unlock_irqrestore(&kbc->lock, flags);
+
+	disable_irq(kbc->irq);
+	del_timer_sync(&kbc->timer);
+
+	clk_disable(kbc->clk);
+}
+
+static int tegra_kbc_open(struct input_dev *dev)
+{
+	struct tegra_kbc *kbc = input_get_drvdata(dev);
+
+	return tegra_kbc_start(kbc);
+}
+
+static void tegra_kbc_close(struct input_dev *dev)
+{
+	struct tegra_kbc *kbc = input_get_drvdata(dev);
+
+	return tegra_kbc_stop(kbc);
+}
+
+static bool __devinit
+tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
+			struct device *dev, unsigned int *num_rows)
+{
+	int i;
+
+	*num_rows = 0;
+
+	for (i = 0; i < KBC_MAX_GPIO; i++) {
+		const struct tegra_kbc_pin_cfg *pin_cfg = &pdata->pin_cfg[i];
+
+		if (pin_cfg->is_row) {
+			if (pin_cfg->num >= KBC_MAX_ROW) {
+				dev_err(dev,
+					"pin_cfg[%d]: invalid row number %d\n",
+					i, pin_cfg->num);
+				return false;
+			}
+			(*num_rows)++;
+		} else {
+			if (pin_cfg->num >= KBC_MAX_COL) {
+				dev_err(dev,
+					"pin_cfg[%d]: invalid column number %d\n",
+					i, pin_cfg->num);
+				return false;
+			}
+		}
+	}
+
+	return true;
+}
+
+static int __devinit tegra_kbc_probe(struct platform_device *pdev)
+{
+	const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
+	const struct matrix_keymap_data *keymap_data;
+	struct tegra_kbc *kbc;
+	struct input_dev *input_dev;
+	struct resource *res;
+	int irq;
+	int err;
+	int i;
+	int num_rows = 0;
+	unsigned int debounce_cnt;
+	unsigned int scan_time_rows;
+
+	if (!pdata)
+		return -EINVAL;
+
+	if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows))
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to get I/O memory\n");
+		return -ENXIO;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
+		return -ENXIO;
+	}
+
+	kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!kbc || !input_dev) {
+		err = -ENOMEM;
+		goto err_free_mem;
+	}
+
+	kbc->pdata = pdata;
+	kbc->idev = input_dev;
+	kbc->irq = irq;
+	spin_lock_init(&kbc->lock);
+	setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
+
+	res = request_mem_region(res->start, resource_size(res), pdev->name);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to request I/O memory\n");
+		err = -EBUSY;
+		goto err_free_mem;
+	}
+
+	kbc->mmio = ioremap(res->start, resource_size(res));
+	if (!kbc->mmio) {
+		dev_err(&pdev->dev, "failed to remap I/O memory\n");
+		err = -ENXIO;
+		goto err_free_mem_region;
+	}
+
+	kbc->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(kbc->clk)) {
+		dev_err(&pdev->dev, "failed to get keyboard clock\n");
+		err = PTR_ERR(kbc->clk);
+		goto err_iounmap;
+	}
+
+	kbc->wake_enable_rows = 0;
+	kbc->wake_enable_cols = 0;
+	for (i = 0; i < pdata->wake_cnt; i++) {
+		kbc->wake_enable_rows |= (1 << pdata->wake_cfg[i].row);
+		kbc->wake_enable_cols |= (1 << pdata->wake_cfg[i].col);
+	}
+
+	/*
+	 * The time delay between two consecutive reads of the FIFO is
+	 * the sum of the repeat time and the time taken for scanning
+	 * the rows. There is an additional delay before the row scanning
+	 * starts. The repoll delay is computed in milliseconds.
+	 */
+	debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+	scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
+	kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
+	kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000;
+
+	input_dev->name = pdev->name;
+	input_dev->id.bustype = BUS_HOST;
+	input_dev->dev.parent = &pdev->dev;
+	input_dev->open = tegra_kbc_open;
+	input_dev->close = tegra_kbc_close;
+
+	input_set_drvdata(input_dev, kbc);
+
+	input_dev->evbit[0] = BIT_MASK(EV_KEY);
+	input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+
+	input_dev->keycode = kbc->keycode;
+	input_dev->keycodesize = sizeof(kbc->keycode[0]);
+	input_dev->keycodemax = KBC_MAX_KEY;
+	if (pdata->use_fn_map)
+		input_dev->keycodemax *= 2;
+
+	kbc->use_fn_map = pdata->use_fn_map;
+	keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
+	matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
+				   input_dev->keycode, input_dev->keybit);
+
+	err = request_irq(kbc->irq, tegra_kbc_isr, IRQF_TRIGGER_HIGH,
+			  pdev->name, kbc);
+	if (err) {
+		dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
+		goto err_put_clk;
+	}
+
+	disable_irq(kbc->irq);
+
+	err = input_register_device(kbc->idev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to register input device\n");
+		goto err_free_irq;
+	}
+
+	platform_set_drvdata(pdev, kbc);
+	device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+	return 0;
+
+err_free_irq:
+	free_irq(kbc->irq, pdev);
+err_put_clk:
+	clk_put(kbc->clk);
+err_iounmap:
+	iounmap(kbc->mmio);
+err_free_mem_region:
+	release_mem_region(res->start, resource_size(res));
+err_free_mem:
+	input_free_device(kbc->idev);
+	kfree(kbc);
+
+	return err;
+}
+
+static int __devexit tegra_kbc_remove(struct platform_device *pdev)
+{
+	struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+	struct resource *res;
+
+	free_irq(kbc->irq, pdev);
+	clk_put(kbc->clk);
+
+	input_unregister_device(kbc->idev);
+	iounmap(kbc->mmio);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+
+	kfree(kbc);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_kbc_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+
+	if (device_may_wakeup(&pdev->dev)) {
+		tegra_kbc_setup_wakekeys(kbc, true);
+		enable_irq_wake(kbc->irq);
+		/* Forcefully clear the interrupt status */
+		writel(0x7, kbc->mmio + KBC_INT_0);
+		msleep(30);
+	} else {
+		mutex_lock(&kbc->idev->mutex);
+		if (kbc->idev->users)
+			tegra_kbc_stop(kbc);
+		mutex_unlock(&kbc->idev->mutex);
+	}
+
+	return 0;
+}
+
+static int tegra_kbc_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+	int err = 0;
+
+	if (device_may_wakeup(&pdev->dev)) {
+		disable_irq_wake(kbc->irq);
+		tegra_kbc_setup_wakekeys(kbc, false);
+	} else {
+		mutex_lock(&kbc->idev->mutex);
+		if (kbc->idev->users)
+			err = tegra_kbc_start(kbc);
+		mutex_unlock(&kbc->idev->mutex);
+	}
+
+	return err;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, tegra_kbc_suspend, tegra_kbc_resume);
+
+static struct platform_driver tegra_kbc_driver = {
+	.probe		= tegra_kbc_probe,
+	.remove		= __devexit_p(tegra_kbc_remove),
+	.driver	= {
+		.name	= "tegra-kbc",
+		.owner  = THIS_MODULE,
+		.pm	= &tegra_kbc_pm_ops,
+	},
+};
+
+static void __exit tegra_kbc_exit(void)
+{
+	platform_driver_unregister(&tegra_kbc_driver);
+}
+module_exit(tegra_kbc_exit);
+
+static int __init tegra_kbc_init(void)
+{
+	return platform_driver_register(&tegra_kbc_driver);
+}
+module_init(tegra_kbc_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rakesh Iyer <riyer@nvidia.com>");
+MODULE_DESCRIPTION("Tegra matrix keyboard controller driver");
+MODULE_ALIAS("platform:tegra-kbc");
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index b4a81eb..c8f097a 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -14,6 +14,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/input.h>
 #include <linux/platform_device.h>
@@ -219,9 +220,9 @@
 	}
 
 	kp->clk = clk_get(dev, NULL);
-	if (!kp->clk) {
+	if (IS_ERR(kp->clk)) {
 		dev_err(dev, "cannot claim device clock\n");
-		error = -EINVAL;
+		error = PTR_ERR(kp->clk);
 		goto error_clk;
 	}
 
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 9dfd6e5..1f38302 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -69,11 +69,7 @@
 	}
 
 	if (value > 20 && value < 32767)
-#ifndef FREQ
-		count = (ixp4xx_get_board_tick_rate() / (value * 4)) - 1;
-#else
-		count = (FREQ / (value * 4)) - 1;
-#endif
+		count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1;
 
 	ixp4xx_spkr_control(pin, count);
 
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 1f8e010..7e64d01 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -176,7 +176,7 @@
 
 	/* request the IRQs */
 	err = request_irq(encoder->irq_a, &rotary_encoder_irq,
-			  IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 			  DRV_NAME, encoder);
 	if (err) {
 		dev_err(&pdev->dev, "unable to request IRQ %d\n",
@@ -185,7 +185,7 @@
 	}
 
 	err = request_irq(encoder->irq_b, &rotary_encoder_irq,
-			  IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 			  DRV_NAME, encoder);
 	if (err) {
 		dev_err(&pdev->dev, "unable to request IRQ %d\n",
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index bf5fd7f..9c1e6ee 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -39,7 +39,7 @@
 	  module will be called psmouse.
 
 config MOUSE_PS2_ALPS
-	bool "ALPS PS/2 mouse protocol extension" if EMBEDDED
+	bool "ALPS PS/2 mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2
 	help
@@ -49,7 +49,7 @@
 	  If unsure, say Y.
 
 config MOUSE_PS2_LOGIPS2PP
-	bool "Logitech PS/2++ mouse protocol extension" if EMBEDDED
+	bool "Logitech PS/2++ mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2
 	help
@@ -59,7 +59,7 @@
 	  If unsure, say Y.
 
 config MOUSE_PS2_SYNAPTICS
-	bool "Synaptics PS/2 mouse protocol extension" if EMBEDDED
+	bool "Synaptics PS/2 mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2
 	help
@@ -69,7 +69,7 @@
 	  If unsure, say Y.
 
 config MOUSE_PS2_LIFEBOOK
-	bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
+	bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2 && X86 && DMI
 	help
@@ -79,7 +79,7 @@
 	  If unsure, say Y.
 
 config MOUSE_PS2_TRACKPOINT
-	bool "IBM Trackpoint PS/2 mouse protocol extension" if EMBEDDED
+	bool "IBM Trackpoint PS/2 mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2
 	help
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index da392c2..aa186cf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -755,23 +755,26 @@
 {
 	struct synaptics_data *priv = psmouse->private;
 	struct synaptics_data old_priv = *priv;
+	int retry = 0;
+	int error;
 
-	psmouse_reset(psmouse);
+	do {
+		psmouse_reset(psmouse);
+		error = synaptics_detect(psmouse, 0);
+	} while (error && ++retry < 3);
 
-	if (synaptics_detect(psmouse, 0))
+	if (error)
 		return -1;
 
+	if (retry > 1)
+		printk(KERN_DEBUG "Synaptics reconnected after %d tries\n",
+			retry);
+
 	if (synaptics_query_hardware(psmouse)) {
 		printk(KERN_ERR "Unable to query Synaptics hardware.\n");
 		return -1;
 	}
 
-	if (old_priv.identity != priv->identity ||
-	    old_priv.model_id != priv->model_id ||
-	    old_priv.capabilities != priv->capabilities ||
-	    old_priv.ext_cap != priv->ext_cap)
-		return -1;
-
 	if (synaptics_set_absolute_mode(psmouse)) {
 		printk(KERN_ERR "Unable to initialize Synaptics hardware.\n");
 		return -1;
@@ -782,6 +785,19 @@
 		return -1;
 	}
 
+	if (old_priv.identity != priv->identity ||
+	    old_priv.model_id != priv->model_id ||
+	    old_priv.capabilities != priv->capabilities ||
+	    old_priv.ext_cap != priv->ext_cap) {
+		printk(KERN_ERR "Synaptics hardware appears to be different: "
+			"id(%ld-%ld), model(%ld-%ld), caps(%lx-%lx), ext(%lx-%lx).\n",
+			old_priv.identity, priv->identity,
+			old_priv.model_id, priv->model_id,
+			old_priv.capabilities, priv->capabilities,
+			old_priv.ext_cap, priv->ext_cap);
+		return -1;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 25e5d04..7453938 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,6 +51,29 @@
 #define SYN_EXT_CAP_REQUESTS(c)		(((c) & 0x700000) >> 20)
 #define SYN_CAP_MULTI_BUTTON_NO(ec)	(((ec) & 0x00f000) >> 12)
 #define SYN_CAP_PRODUCT_ID(ec)		(((ec) & 0xff0000) >> 16)
+
+/*
+ * The following describes response for the 0x0c query.
+ *
+ * byte	mask	name			meaning
+ * ----	----	-------			------------
+ * 1	0x01	adjustable threshold	capacitive button sensitivity
+ *					can be adjusted
+ * 1	0x02	report max		query 0x0d gives max coord reported
+ * 1	0x04	clearpad		sensor is ClearPad product
+ * 1	0x08	advanced gesture	not particularly meaningful
+ * 1	0x10	clickpad bit 0		1-button ClickPad
+ * 1	0x60	multifinger mode	identifies firmware finger counting
+ *					(not reporting!) algorithm.
+ *					Not particularly meaningful
+ * 1	0x80    covered pad		W clipped to 14, 15 == pad mostly covered
+ * 2	0x01    clickpad bit 1		2-button ClickPad
+ * 2	0x02    deluxe LED controls	touchpad support LED commands
+ *					ala multimedia control bar
+ * 2	0x04	reduced filtering	firmware does less filtering on
+ *					position data, driver should watch
+ *					for noise.
+ */
 #define SYN_CAP_CLICKPAD(ex0c)		((ex0c) & 0x100000) /* 1-button ClickPad */
 #define SYN_CAP_CLICKPAD2BTN(ex0c)	((ex0c) & 0x000100) /* 2-button ClickPad */
 #define SYN_CAP_MAX_DIMENSIONS(ex0c)	((ex0c) & 0x020000)
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index bcb1fde..55f2c22 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -2,7 +2,7 @@
 # Input core configuration
 #
 config SERIO
-	tristate "Serial I/O support" if EMBEDDED || !X86
+	tristate "Serial I/O support" if EXPERT || !X86
 	default y
 	help
 	  Say Yes here if you have any input device that uses serial I/O to
@@ -19,7 +19,7 @@
 if SERIO
 
 config SERIO_I8042
-	tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
+	tristate "i8042 PC Keyboard controller" if EXPERT || !X86
 	default y
 	depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
 		   (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
@@ -168,7 +168,7 @@
 	  module will be called maceps2.
 
 config SERIO_LIBPS2
-	tristate "PS/2 driver library" if EMBEDDED
+	tristate "PS/2 driver library" if EXPERT
 	depends on SERIO_I8042 || SERIO_I8042=n
 	help
 	  Say Y here if you are using a driver for device connected
@@ -229,7 +229,7 @@
 	tristate "TQC PS/2 multiplexer"
 	help
 	  Say Y here if you have the PS/2 line multiplexer like the one
-	  present on TQC boads.
+	  present on TQC boards.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ps2mult.
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 448c772..8528165 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -111,9 +111,11 @@
 static int ct82c710_open(struct serio *serio)
 {
 	unsigned char status;
+	int err;
 
-	if (request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL))
-		return -1;
+	err = request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL);
+	if (err)
+		return err;
 
 	status = inb_p(CT82C710_STATUS);
 
@@ -131,7 +133,7 @@
 		status &= ~(CT82C710_ENABLE | CT82C710_INTS_ON);
 		outb_p(status, CT82C710_STATUS);
 		free_irq(CT82C710_IRQ, NULL);
-		return -1;
+		return -EBUSY;
 	}
 
 	return 0;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5ae0fc4..bb9f5d3 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -424,6 +424,13 @@
 			DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
 		},
 	},
+	{
+		/* Dell Vostro V13 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+		},
+	},
 	{ }
 };
 
@@ -545,6 +552,17 @@
 };
 #endif
 
+static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+	{
+		/* Dell Vostro V13 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+		},
+	},
+	{ }
+};
+
 /*
  * Some Wistron based laptops need us to explicitly enable the 'Dritek
  * keyboard extension' to make their extra keys start generating scancodes.
@@ -896,6 +914,9 @@
 	if (dmi_check_system(i8042_dmi_nomux_table))
 		i8042_nomux = true;
 
+	if (dmi_check_system(i8042_dmi_notimeout_table))
+		i8042_notimeout = true;
+
 	if (dmi_check_system(i8042_dmi_dritek_table))
 		i8042_dritek = true;
 #endif /* CONFIG_X86 */
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index c04ff00..ac4c936 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -63,6 +63,10 @@
 module_param_named(noloop, i8042_noloop, bool, 0);
 MODULE_PARM_DESC(noloop, "Disable the AUX Loopback command while probing for the AUX port");
 
+static bool i8042_notimeout;
+module_param_named(notimeout, i8042_notimeout, bool, 0);
+MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+
 #ifdef CONFIG_X86
 static bool i8042_dritek;
 module_param_named(dritek, i8042_dritek, bool, 0);
@@ -504,7 +508,7 @@
 	} else {
 
 		dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
-		      ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
+		      ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0);
 
 		port_no = (str & I8042_STR_AUXDATA) ?
 				I8042_AUX_PORT_NO : I8042_KBD_PORT_NO;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index db5b0bc..ba70058 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -188,7 +188,8 @@
 	kfree(event);
 }
 
-static void serio_remove_duplicate_events(struct serio_event *event)
+static void serio_remove_duplicate_events(void *object,
+					  enum serio_event_type type)
 {
 	struct serio_event *e, *next;
 	unsigned long flags;
@@ -196,13 +197,13 @@
 	spin_lock_irqsave(&serio_event_lock, flags);
 
 	list_for_each_entry_safe(e, next, &serio_event_list, node) {
-		if (event->object == e->object) {
+		if (object == e->object) {
 			/*
 			 * If this event is of different type we should not
 			 * look further - we only suppress duplicate events
 			 * that were sent back-to-back.
 			 */
-			if (event->type != e->type)
+			if (type != e->type)
 				break;
 
 			list_del_init(&e->node);
@@ -245,7 +246,7 @@
 			break;
 		}
 
-		serio_remove_duplicate_events(event);
+		serio_remove_duplicate_events(event->object, event->type);
 		serio_free_event(event);
 	}
 
@@ -298,7 +299,7 @@
 	event->owner = owner;
 
 	list_add_tail(&event->node, &serio_event_list);
-	schedule_work(&serio_event_work);
+	queue_work(system_long_wq, &serio_event_work);
 
 out:
 	spin_unlock_irqrestore(&serio_event_lock, flags);
@@ -436,10 +437,12 @@
 	} else if (!strncmp(buf, "rescan", count)) {
 		serio_disconnect_port(serio);
 		serio_find_driver(serio);
+		serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
 	} else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
 		serio_disconnect_port(serio);
 		error = serio_bind_driver(serio, to_serio_driver(drv));
 		put_driver(drv);
+		serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
 	} else {
 		error = -EINVAL;
 	}
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 6e362de..8755f5f 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -116,14 +116,15 @@
 
 /*
  * serport_ldisc_receive() is called by the low level tty driver when characters
- * are ready for us. We forward the characters, one by one to the 'interrupt'
- * routine.
+ * are ready for us. We forward the characters and flags, one by one to the
+ * 'interrupt' routine.
  */
 
 static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
 {
 	struct serport *serport = (struct serport*) tty->disc_data;
 	unsigned long flags;
+	unsigned int ch_flags;
 	int i;
 
 	spin_lock_irqsave(&serport->lock, flags);
@@ -131,8 +132,23 @@
 	if (!test_bit(SERPORT_ACTIVE, &serport->flags))
 		goto out;
 
-	for (i = 0; i < count; i++)
-		serio_interrupt(serport->serio, cp[i], 0);
+	for (i = 0; i < count; i++) {
+		switch (fp[i]) {
+		case TTY_FRAME:
+			ch_flags = SERIO_FRAME;
+			break;
+
+		case TTY_PARITY:
+			ch_flags = SERIO_PARITY;
+			break;
+
+		default:
+			ch_flags = 0;
+			break;
+		}
+
+		serio_interrupt(serport->serio, cp[i], ch_flags);
+	}
 
 out:
 	spin_unlock_irqrestore(&serport->lock, flags);
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index a29a7812..7729e54 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -201,6 +201,7 @@
 			break;
 
 		case KE_SW:
+		case KE_VSW:
 			__set_bit(EV_SW, dev->evbit);
 			__set_bit(entry->sw.code, dev->swbit);
 			break;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index fc38149..cf8fb9f 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -519,7 +519,7 @@
 	/* Retrieve the physical and logical size for OEM devices */
 	error = wacom_retrieve_hid_descriptor(intf, features);
 	if (error)
-		goto fail2;
+		goto fail3;
 
 	wacom_setup_device_quirks(features);
 
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 5187829..367fa82 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1101,6 +1101,13 @@
 	}
 }
 
+static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
+					      unsigned int physical_max)
+{
+       /* Touch physical dimensions are in 100th of mm */
+       return (logical_max * 100) / physical_max;
+}
+
 void wacom_setup_input_capabilities(struct input_dev *input_dev,
 				    struct wacom_wac *wacom_wac)
 {
@@ -1228,8 +1235,12 @@
 	case TABLETPC:
 		if (features->device_type == BTN_TOOL_DOUBLETAP ||
 		    features->device_type == BTN_TOOL_TRIPLETAP) {
-			input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
-			input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
+			input_abs_set_res(input_dev, ABS_X,
+				wacom_calculate_touch_res(features->x_max,
+							features->x_phy));
+			input_abs_set_res(input_dev, ABS_Y,
+				wacom_calculate_touch_res(features->y_max,
+							features->y_phy));
 			__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
 		}
 
@@ -1272,6 +1283,12 @@
 			input_set_abs_params(input_dev, ABS_MT_PRESSURE,
 					     0, features->pressure_max,
 					     features->pressure_fuzz, 0);
+			input_abs_set_res(input_dev, ABS_X,
+				wacom_calculate_touch_res(features->x_max,
+							features->x_phy));
+			input_abs_set_res(input_dev, ABS_Y,
+				wacom_calculate_touch_res(features->y_max,
+							features->y_phy));
 		} else if (features->device_type == BTN_TOOL_PEN) {
 			__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
 			__set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1426,6 +1443,10 @@
 	{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN,     21648, 13530, 1023, 63, BAMBOO_PT };
 static const struct wacom_features wacom_features_0xD4 =
 	{ "Wacom Bamboo Pen",     WACOM_PKGLEN_BBFUN,     14720,  9200,  255, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD6 =
+	{ "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN,   14720,  9200, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD7 =
+	{ "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720,  9200, 1023, 63, BAMBOO_PT };
 static struct wacom_features wacom_features_0xD8 =
 	{ "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN,   21648, 13530, 1023, 63, BAMBOO_PT };
 static struct wacom_features wacom_features_0xDA =
@@ -1507,6 +1528,8 @@
 	{ USB_DEVICE_WACOM(0xD2) },
 	{ USB_DEVICE_WACOM(0xD3) },
 	{ USB_DEVICE_WACOM(0xD4) },
+	{ USB_DEVICE_WACOM(0xD6) },
+	{ USB_DEVICE_WACOM(0xD7) },
 	{ USB_DEVICE_WACOM(0xD8) },
 	{ USB_DEVICE_WACOM(0xDA) },
 	{ USB_DEVICE_WACOM(0xDB) },
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 07ac77d..61834ae 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -540,62 +540,62 @@
 
 config TOUCHSCREEN_USB_EGALAX
 	default y
-	bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED
+	bool "eGalax, eTurboTouch CT-410/510/700 device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_PANJIT
 	default y
-	bool "PanJit device support" if EMBEDDED
+	bool "PanJit device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_3M
 	default y
-	bool "3M/Microtouch EX II series device support" if EMBEDDED
+	bool "3M/Microtouch EX II series device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_ITM
 	default y
-	bool "ITM device support" if EMBEDDED
+	bool "ITM device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_ETURBO
 	default y
-	bool "eTurboTouch (non-eGalax compatible) device support" if EMBEDDED
+	bool "eTurboTouch (non-eGalax compatible) device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_GUNZE
 	default y
-	bool "Gunze AHL61 device support" if EMBEDDED
+	bool "Gunze AHL61 device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_DMC_TSC10
 	default y
-	bool "DMC TSC-10/25 device support" if EMBEDDED
+	bool "DMC TSC-10/25 device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_IRTOUCH
 	default y
-	bool "IRTOUCHSYSTEMS/UNITOP device support" if EMBEDDED
+	bool "IRTOUCHSYSTEMS/UNITOP device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_IDEALTEK
 	default y
-	bool "IdealTEK URTC1000 device support" if EMBEDDED
+	bool "IdealTEK URTC1000 device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_GENERAL_TOUCH
 	default y
-	bool "GeneralTouch Touchscreen device support" if EMBEDDED
+	bool "GeneralTouch Touchscreen device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_GOTOP
 	default y
-	bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EMBEDDED
+	bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_JASTEC
 	default y
-	bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EMBEDDED
+	bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_E2I
@@ -605,17 +605,17 @@
 
 config TOUCHSCREEN_USB_ZYTRONIC
 	default y
-	bool "Zytronic controller" if EMBEDDED
+	bool "Zytronic controller" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_ETT_TC45USB
 	default y
-	bool "ET&T USB series TC4UM/TC5UH touchscreen controler support" if EMBEDDED
+	bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_NEXIO
 	default y
-	bool "NEXIO/iNexio device support" if EMBEDDED
+	bool "NEXIO/iNexio device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_TOUCHIT213
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index d82a38e..4e4e58c 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -10,14 +10,16 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/pm.h>
 
 #include "ad7879.h"
 
 #define AD7879_DEVID		0x79	/* AD7879-1/AD7889-1 */
 
 #ifdef CONFIG_PM
-static int ad7879_i2c_suspend(struct i2c_client *client, pm_message_t message)
+static int ad7879_i2c_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct ad7879 *ts = i2c_get_clientdata(client);
 
 	ad7879_suspend(ts);
@@ -25,17 +27,17 @@
 	return 0;
 }
 
-static int ad7879_i2c_resume(struct i2c_client *client)
+static int ad7879_i2c_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct ad7879 *ts = i2c_get_clientdata(client);
 
 	ad7879_resume(ts);
 
 	return 0;
 }
-#else
-# define ad7879_i2c_suspend NULL
-# define ad7879_i2c_resume  NULL
+
+static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume);
 #endif
 
 /* All registers are word-sized.
@@ -117,11 +119,12 @@
 	.driver = {
 		.name	= "ad7879",
 		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &ad7879_i2c_pm,
+#endif
 	},
 	.probe		= ad7879_i2c_probe,
 	.remove		= __devexit_p(ad7879_i2c_remove),
-	.suspend	= ad7879_i2c_suspend,
-	.resume		= ad7879_i2c_resume,
 	.id_table	= ad7879_id,
 };
 
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 14ea54b..4bf2316 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -941,28 +941,29 @@
 	struct ads7846_platform_data *pdata = spi->dev.platform_data;
 	int err;
 
-	/* REVISIT when the irq can be triggered active-low, or if for some
+	/*
+	 * REVISIT when the irq can be triggered active-low, or if for some
 	 * reason the touchscreen isn't hooked up, we don't need to access
 	 * the pendown state.
 	 */
-	if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) {
-		dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
-		return -EINVAL;
-	}
 
 	if (pdata->get_pendown_state) {
 		ts->get_pendown_state = pdata->get_pendown_state;
-		return 0;
-	}
+	} else if (gpio_is_valid(pdata->gpio_pendown)) {
 
-	err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
-	if (err) {
-		dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
-			pdata->gpio_pendown);
-		return err;
-	}
+		err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
+		if (err) {
+			dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
+				pdata->gpio_pendown);
+			return err;
+		}
 
-	ts->gpio_pendown = pdata->gpio_pendown;
+		ts->gpio_pendown = pdata->gpio_pendown;
+
+	} else {
+		dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
+		return -EINVAL;
+	}
 
 	return 0;
 }
@@ -1353,7 +1354,7 @@
  err_put_regulator:
 	regulator_put(ts->reg);
  err_free_gpio:
-	if (ts->gpio_pendown != -1)
+	if (!ts->get_pendown_state)
 		gpio_free(ts->gpio_pendown);
  err_cleanup_filter:
 	if (ts->filter_cleanup)
@@ -1383,8 +1384,13 @@
 	regulator_disable(ts->reg);
 	regulator_put(ts->reg);
 
-	if (ts->gpio_pendown != -1)
+	if (!ts->get_pendown_state) {
+		/*
+		 * If we are not using specialized pendown method we must
+		 * have been relying on gpio we set up ourselves.
+		 */
 		gpio_free(ts->gpio_pendown);
+	}
 
 	if (ts->filter_cleanup)
 		ts->filter_cleanup(ts->filter_data);
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index f7fa9ef..1507ce1 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -12,6 +12,7 @@
 #include <linux/input.h>
 #include <linux/input/bu21013.h>
 #include <linux/slab.h>
+#include <linux/regulator/consumer.h>
 
 #define PEN_DOWN_INTR	0
 #define MAX_FINGERS	2
@@ -139,6 +140,7 @@
  * @chip: pointer to the touch panel controller
  * @in_dev: pointer to the input device structure
  * @intr_pin: interrupt pin value
+ * @regulator: pointer to the Regulator used for touch screen
  *
  * Touch panel device data structure
  */
@@ -149,6 +151,7 @@
 	const struct bu21013_platform_device *chip;
 	struct input_dev *in_dev;
 	unsigned int intr_pin;
+	struct regulator *regulator;
 };
 
 /**
@@ -456,6 +459,20 @@
 	bu21013_data->in_dev = in_dev;
 	bu21013_data->chip = pdata;
 	bu21013_data->client = client;
+
+	bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+	if (IS_ERR(bu21013_data->regulator)) {
+		dev_err(&client->dev, "regulator_get failed\n");
+		error = PTR_ERR(bu21013_data->regulator);
+		goto err_free_mem;
+	}
+
+	error = regulator_enable(bu21013_data->regulator);
+	if (error < 0) {
+		dev_err(&client->dev, "regulator enable failed\n");
+		goto err_put_regulator;
+	}
+
 	bu21013_data->touch_stopped = false;
 	init_waitqueue_head(&bu21013_data->wait);
 
@@ -464,7 +481,7 @@
 		error = pdata->cs_en(pdata->cs_pin);
 		if (error < 0) {
 			dev_err(&client->dev, "chip init failed\n");
-			goto err_free_mem;
+			goto err_disable_regulator;
 		}
 	}
 
@@ -485,9 +502,9 @@
 	__set_bit(EV_ABS, in_dev->evbit);
 
 	input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
-						pdata->x_max_res, 0, 0);
+						pdata->touch_x_max, 0, 0);
 	input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
-						pdata->y_max_res, 0, 0);
+						pdata->touch_y_max, 0, 0);
 	input_set_drvdata(in_dev, bu21013_data);
 
 	error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
@@ -513,6 +530,10 @@
 	bu21013_free_irq(bu21013_data);
 err_cs_disable:
 	pdata->cs_dis(pdata->cs_pin);
+err_disable_regulator:
+	regulator_disable(bu21013_data->regulator);
+err_put_regulator:
+	regulator_put(bu21013_data->regulator);
 err_free_mem:
 	input_free_device(in_dev);
 	kfree(bu21013_data);
@@ -535,6 +556,10 @@
 	bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
 
 	input_unregister_device(bu21013_data->in_dev);
+
+	regulator_disable(bu21013_data->regulator);
+	regulator_put(bu21013_data->regulator);
+
 	kfree(bu21013_data);
 
 	device_init_wakeup(&client->dev, false);
@@ -561,6 +586,8 @@
 	else
 		disable_irq(bu21013_data->chip->irq);
 
+	regulator_disable(bu21013_data->regulator);
+
 	return 0;
 }
 
@@ -577,6 +604,12 @@
 	struct i2c_client *client = bu21013_data->client;
 	int retval;
 
+	retval = regulator_enable(bu21013_data->regulator);
+	if (retval < 0) {
+		dev_err(&client->dev, "bu21013 regulator enable failed\n");
+		return retval;
+	}
+
 	retval = bu21013_init_chip(bu21013_data);
 	if (retval < 0) {
 		dev_err(&client->dev, "bu21013 controller config failed\n");
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index d0c3a72..a93c5c2 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -280,8 +280,9 @@
 }
 
 #ifdef CONFIG_PM
-static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg)
+static int cy8ctmg110_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct cy8ctmg110 *ts = i2c_get_clientdata(client);
 
 	if (device_may_wakeup(&client->dev))
@@ -293,8 +294,9 @@
 	return 0;
 }
 
-static int cy8ctmg110_resume(struct i2c_client *client)
+static int cy8ctmg110_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct cy8ctmg110 *ts = i2c_get_clientdata(client);
 
 	if (device_may_wakeup(&client->dev))
@@ -305,6 +307,8 @@
 	}
 	return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
 #endif
 
 static int __devexit cy8ctmg110_remove(struct i2c_client *client)
@@ -335,14 +339,13 @@
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= CY8CTMG110_DRIVER_NAME,
+#ifdef CONFIG_PM
+		.pm	= &cy8ctmg110_pm,
+#endif
 	},
 	.id_table	= cy8ctmg110_idtable,
 	.probe		= cy8ctmg110_probe,
 	.remove		= __devexit_p(cy8ctmg110_remove),
-#ifdef CONFIG_PM
-	.suspend	= cy8ctmg110_suspend,
-	.resume		= cy8ctmg110_resume,
-#endif
 };
 
 static int __init cy8ctmg110_init(void)
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 7a3a916..7f8f538 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -261,8 +261,9 @@
 }
 
 #ifdef CONFIG_PM
-static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int eeti_ts_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct eeti_ts_priv *priv = i2c_get_clientdata(client);
 	struct input_dev *input_dev = priv->input;
 
@@ -279,8 +280,9 @@
 	return 0;
 }
 
-static int eeti_ts_resume(struct i2c_client *client)
+static int eeti_ts_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct eeti_ts_priv *priv = i2c_get_clientdata(client);
 	struct input_dev *input_dev = priv->input;
 
@@ -296,9 +298,8 @@
 
 	return 0;
 }
-#else
-#define eeti_ts_suspend NULL
-#define eeti_ts_resume NULL
+
+static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume);
 #endif
 
 static const struct i2c_device_id eeti_ts_id[] = {
@@ -310,11 +311,12 @@
 static struct i2c_driver eeti_ts_driver = {
 	.driver = {
 		.name = "eeti_ts",
+#ifdef CONFIG_PM
+		.pm = &eeti_ts_pm,
+#endif
 	},
 	.probe = eeti_ts_probe,
 	.remove = __devexit_p(eeti_ts_remove),
-	.suspend = eeti_ts_suspend,
-	.resume = eeti_ts_resume,
 	.id_table = eeti_ts_id,
 };
 
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index 6ee9940..2d84c80 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -261,25 +261,27 @@
 }
 
 #ifdef CONFIG_PM
-static int mcs5000_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int mcs5000_ts_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
+
 	/* Touch sleep mode */
 	i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP);
 
 	return 0;
 }
 
-static int mcs5000_ts_resume(struct i2c_client *client)
+static int mcs5000_ts_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct mcs5000_ts_data *data = i2c_get_clientdata(client);
 
 	mcs5000_ts_phys_init(data);
 
 	return 0;
 }
-#else
-#define mcs5000_ts_suspend	NULL
-#define mcs5000_ts_resume	NULL
+
+static SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, mcs5000_ts_suspend, mcs5000_ts_resume);
 #endif
 
 static const struct i2c_device_id mcs5000_ts_id[] = {
@@ -291,10 +293,11 @@
 static struct i2c_driver mcs5000_ts_driver = {
 	.probe		= mcs5000_ts_probe,
 	.remove		= __devexit_p(mcs5000_ts_remove),
-	.suspend	= mcs5000_ts_suspend,
-	.resume		= mcs5000_ts_resume,
 	.driver = {
 		.name = "mcs5000_ts",
+#ifdef CONFIG_PM
+		.pm   = &mcs5000_ts_pm,
+#endif
 	},
 	.id_table	= mcs5000_ts_id,
 };
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index defe5dd..5803bd0 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <linux/slab.h>
 #include <asm/io.h>
 #include <linux/i2c.h>
@@ -226,8 +227,9 @@
 	return 0;
 }
 
-static int migor_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int migor_ts_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
 
 	if (device_may_wakeup(&client->dev))
@@ -236,8 +238,9 @@
 	return 0;
 }
 
-static int migor_ts_resume(struct i2c_client *client)
+static int migor_ts_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
 
 	if (device_may_wakeup(&client->dev))
@@ -246,6 +249,8 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(migor_ts_pm, migor_ts_suspend, migor_ts_resume);
+
 static const struct i2c_device_id migor_ts_id[] = {
 	{ "migor_ts", 0 },
 	{ }
@@ -255,11 +260,10 @@
 static struct i2c_driver migor_ts_driver = {
 	.driver = {
 		.name = "migor_ts",
+		.pm = &migor_ts_pm,
 	},
 	.probe = migor_ts_probe,
 	.remove = migor_ts_remove,
-	.suspend = migor_ts_suspend,
-	.resume = migor_ts_resume,
 	.id_table = migor_ts_id,
 };
 
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index cf1dba2..22a3411 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -14,6 +14,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/input.h>
 #include <linux/platform_device.h>
@@ -289,9 +290,9 @@
 	}
 
 	ts->clk = clk_get(dev, NULL);
-	if (!ts->clk) {
+	if (IS_ERR(ts->clk)) {
 		dev_err(dev, "cannot claim device clock\n");
-		error = -EINVAL;
+		error = PTR_ERR(ts->clk);
 		goto error_clk;
 	}
 
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 8ed53ad..c14412e 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -3,6 +3,7 @@
  *
  * Copyright (c) 2008 Jaya Kumar
  * Copyright (c) 2010 Red Hat, Inc.
+ * Copyright (c) 2010 - 2011 Ping Cheng, Wacom. <pingc@wacom.com>
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License. See the file COPYING in the main directory of this archive for
@@ -50,6 +51,10 @@
 #define W8001_PKTLEN_TPCCTL	11	/* control packet */
 #define W8001_PKTLEN_TOUCH2FG	13
 
+/* resolution in points/mm */
+#define W8001_PEN_RESOLUTION    100
+#define W8001_TOUCH_RESOLUTION  10
+
 struct w8001_coord {
 	u8 rdy;
 	u8 tsw;
@@ -64,11 +69,11 @@
 
 /* touch query reply packet */
 struct w8001_touch_query {
+	u16 x;
+	u16 y;
 	u8 panel_res;
 	u8 capacity_res;
 	u8 sensor_id;
-	u16 x;
-	u16 y;
 };
 
 /*
@@ -87,9 +92,14 @@
 	char phys[32];
 	int type;
 	unsigned int pktlen;
+	u16 max_touch_x;
+	u16 max_touch_y;
+	u16 max_pen_x;
+	u16 max_pen_y;
+	char name[64];
 };
 
-static void parse_data(u8 *data, struct w8001_coord *coord)
+static void parse_pen_data(u8 *data, struct w8001_coord *coord)
 {
 	memset(coord, 0, sizeof(*coord));
 
@@ -113,11 +123,30 @@
 	coord->tilt_y = data[8] & 0x7F;
 }
 
-static void parse_touch(struct w8001 *w8001)
+static void parse_single_touch(u8 *data, struct w8001_coord *coord)
+{
+	coord->x = (data[1] << 7) | data[2];
+	coord->y = (data[3] << 7) | data[4];
+	coord->tsw = data[0] & 0x01;
+}
+
+static void scale_touch_coordinates(struct w8001 *w8001,
+				    unsigned int *x, unsigned int *y)
+{
+	if (w8001->max_pen_x && w8001->max_touch_x)
+		*x = *x * w8001->max_pen_x / w8001->max_touch_x;
+
+	if (w8001->max_pen_y && w8001->max_touch_y)
+		*y = *y * w8001->max_pen_y / w8001->max_touch_y;
+}
+
+static void parse_multi_touch(struct w8001 *w8001)
 {
 	struct input_dev *dev = w8001->dev;
 	unsigned char *data = w8001->data;
+	unsigned int x, y;
 	int i;
+	int count = 0;
 
 	for (i = 0; i < 2; i++) {
 		bool touch = data[0] & (1 << i);
@@ -125,15 +154,29 @@
 		input_mt_slot(dev, i);
 		input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
 		if (touch) {
-			int x = (data[6 * i + 1] << 7) | (data[6 * i + 2]);
-			int y = (data[6 * i + 3] << 7) | (data[6 * i + 4]);
+			x = (data[6 * i + 1] << 7) | data[6 * i + 2];
+			y = (data[6 * i + 3] << 7) | data[6 * i + 4];
 			/* data[5,6] and [11,12] is finger capacity */
 
+			/* scale to pen maximum */
+			scale_touch_coordinates(w8001, &x, &y);
+
 			input_report_abs(dev, ABS_MT_POSITION_X, x);
 			input_report_abs(dev, ABS_MT_POSITION_Y, y);
+			count++;
 		}
 	}
 
+	/* emulate single touch events when stylus is out of proximity.
+	 * This is to make single touch backward support consistent
+	 * across all Wacom single touch devices.
+	 */
+	if (w8001->type != BTN_TOOL_PEN &&
+			    w8001->type != BTN_TOOL_RUBBER) {
+		w8001->type = count == 1 ? BTN_TOOL_FINGER : KEY_RESERVED;
+		input_mt_report_pointer_emulation(dev, true);
+	}
+
 	input_sync(dev);
 }
 
@@ -152,6 +195,15 @@
 	query->y = data[5] << 9;
 	query->y |= data[6] << 2;
 	query->y |= (data[2] >> 3) & 0x3;
+
+	/* Early days' single-finger touch models need the following defaults */
+	if (!query->x && !query->y) {
+		query->x = 1024;
+		query->y = 1024;
+		if (query->panel_res)
+			query->x = query->y = (1 << query->panel_res);
+		query->panel_res = W8001_TOUCH_RESOLUTION;
+	}
 }
 
 static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord)
@@ -161,16 +213,15 @@
 	/*
 	 * We have 1 bit for proximity (rdy) and 3 bits for tip, side,
 	 * side2/eraser. If rdy && f2 are set, this can be either pen + side2,
-	 * or eraser. assume
+	 * or eraser. Assume:
 	 * - if dev is already in proximity and f2 is toggled → pen + side2
 	 * - if dev comes into proximity with f2 set → eraser
 	 * If f2 disappears after assuming eraser, fake proximity out for
 	 * eraser and in for pen.
 	 */
 
-	if (!w8001->type) {
-		w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
-	} else if (w8001->type == BTN_TOOL_RUBBER) {
+	switch (w8001->type) {
+	case BTN_TOOL_RUBBER:
 		if (!coord->f2) {
 			input_report_abs(dev, ABS_PRESSURE, 0);
 			input_report_key(dev, BTN_TOUCH, 0);
@@ -180,8 +231,21 @@
 			input_sync(dev);
 			w8001->type = BTN_TOOL_PEN;
 		}
-	} else {
+		break;
+
+	case BTN_TOOL_FINGER:
+		input_report_key(dev, BTN_TOUCH, 0);
+		input_report_key(dev, BTN_TOOL_FINGER, 0);
+		input_sync(dev);
+		/* fall through */
+
+	case KEY_RESERVED:
+		w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+		break;
+
+	default:
 		input_report_key(dev, BTN_STYLUS2, coord->f2);
+		break;
 	}
 
 	input_report_abs(dev, ABS_X, coord->x);
@@ -193,7 +257,26 @@
 	input_sync(dev);
 
 	if (!coord->rdy)
-		w8001->type = 0;
+		w8001->type = KEY_RESERVED;
+}
+
+static void report_single_touch(struct w8001 *w8001, struct w8001_coord *coord)
+{
+	struct input_dev *dev = w8001->dev;
+	unsigned int x = coord->x;
+	unsigned int y = coord->y;
+
+	/* scale to pen maximum */
+	scale_touch_coordinates(w8001, &x, &y);
+
+	input_report_abs(dev, ABS_X, x);
+	input_report_abs(dev, ABS_Y, y);
+	input_report_key(dev, BTN_TOUCH, coord->tsw);
+	input_report_key(dev, BTN_TOOL_FINGER, coord->tsw);
+
+	input_sync(dev);
+
+	w8001->type = coord->tsw ? BTN_TOOL_FINGER : KEY_RESERVED;
 }
 
 static irqreturn_t w8001_interrupt(struct serio *serio,
@@ -214,9 +297,18 @@
 
 	case W8001_PKTLEN_TOUCH93 - 1:
 	case W8001_PKTLEN_TOUCH9A - 1:
-		/* ignore one-finger touch packet. */
-		if (w8001->pktlen == w8001->idx)
+		tmp = w8001->data[0] & W8001_TOUCH_BYTE;
+		if (tmp != W8001_TOUCH_BYTE)
+			break;
+
+		if (w8001->pktlen == w8001->idx) {
 			w8001->idx = 0;
+			if (w8001->type != BTN_TOOL_PEN &&
+			    w8001->type != BTN_TOOL_RUBBER) {
+				parse_single_touch(w8001->data, &coord);
+				report_single_touch(w8001, &coord);
+			}
+		}
 		break;
 
 	/* Pen coordinates packet */
@@ -225,18 +317,18 @@
 		if (unlikely(tmp == W8001_TAB_BYTE))
 			break;
 
-		tmp = (w8001->data[0] & W8001_TOUCH_BYTE);
+		tmp = w8001->data[0] & W8001_TOUCH_BYTE;
 		if (tmp == W8001_TOUCH_BYTE)
 			break;
 
 		w8001->idx = 0;
-		parse_data(w8001->data, &coord);
+		parse_pen_data(w8001->data, &coord);
 		report_pen_events(w8001, &coord);
 		break;
 
 	/* control packet */
 	case W8001_PKTLEN_TPCCTL - 1:
-		tmp = (w8001->data[0] & W8001_TOUCH_MASK);
+		tmp = w8001->data[0] & W8001_TOUCH_MASK;
 		if (tmp == W8001_TOUCH_BYTE)
 			break;
 
@@ -249,7 +341,7 @@
 	/* 2 finger touch packet */
 	case W8001_PKTLEN_TOUCH2FG - 1:
 		w8001->idx = 0;
-		parse_touch(w8001);
+		parse_multi_touch(w8001);
 		break;
 	}
 
@@ -279,6 +371,7 @@
 {
 	struct input_dev *dev = w8001->dev;
 	struct w8001_coord coord;
+	struct w8001_touch_query touch;
 	int error;
 
 	error = w8001_command(w8001, W8001_CMD_STOP, false);
@@ -287,22 +380,33 @@
 
 	msleep(250);	/* wait 250ms before querying the device */
 
+	dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+	strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
+
 	/* penabled? */
 	error = w8001_command(w8001, W8001_CMD_QUERY, true);
 	if (!error) {
+		__set_bit(BTN_TOUCH, dev->keybit);
 		__set_bit(BTN_TOOL_PEN, dev->keybit);
 		__set_bit(BTN_TOOL_RUBBER, dev->keybit);
 		__set_bit(BTN_STYLUS, dev->keybit);
 		__set_bit(BTN_STYLUS2, dev->keybit);
-		parse_data(w8001->response, &coord);
+
+		parse_pen_data(w8001->response, &coord);
+		w8001->max_pen_x = coord.x;
+		w8001->max_pen_y = coord.y;
 
 		input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
 		input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
+		input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION);
+		input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION);
 		input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
 		if (coord.tilt_x && coord.tilt_y) {
 			input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
 			input_set_abs_params(dev, ABS_TILT_Y, 0, coord.tilt_y, 0, 0);
 		}
+		w8001->id = 0x90;
+		strlcat(w8001->name, " Penabled", sizeof(w8001->name));
 	}
 
 	/* Touch enabled? */
@@ -313,24 +417,41 @@
 	 * second byte is empty, which indicates touch is not supported.
 	 */
 	if (!error && w8001->response[1]) {
-		struct w8001_touch_query touch;
+		__set_bit(BTN_TOUCH, dev->keybit);
+		__set_bit(BTN_TOOL_FINGER, dev->keybit);
 
 		parse_touchquery(w8001->response, &touch);
+		w8001->max_touch_x = touch.x;
+		w8001->max_touch_y = touch.y;
+
+		if (w8001->max_pen_x && w8001->max_pen_y) {
+			/* if pen is supported scale to pen maximum */
+			touch.x = w8001->max_pen_x;
+			touch.y = w8001->max_pen_y;
+			touch.panel_res = W8001_PEN_RESOLUTION;
+		}
 
 		input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
 		input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
-		__set_bit(BTN_TOOL_FINGER, dev->keybit);
+		input_abs_set_res(dev, ABS_X, touch.panel_res);
+		input_abs_set_res(dev, ABS_Y, touch.panel_res);
 
 		switch (touch.sensor_id) {
 		case 0:
 		case 2:
 			w8001->pktlen = W8001_PKTLEN_TOUCH93;
+			w8001->id = 0x93;
+			strlcat(w8001->name, " 1FG", sizeof(w8001->name));
 			break;
+
 		case 1:
 		case 3:
 		case 4:
 			w8001->pktlen = W8001_PKTLEN_TOUCH9A;
+			strlcat(w8001->name, " 1FG", sizeof(w8001->name));
+			w8001->id = 0x9a;
 			break;
+
 		case 5:
 			w8001->pktlen = W8001_PKTLEN_TOUCH2FG;
 
@@ -341,10 +462,18 @@
 						0, touch.y, 0, 0);
 			input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
 						0, MT_TOOL_MAX, 0, 0);
+
+			strlcat(w8001->name, " 2FG", sizeof(w8001->name));
+			if (w8001->max_pen_x && w8001->max_pen_y)
+				w8001->id = 0xE3;
+			else
+				w8001->id = 0xE2;
 			break;
 		}
 	}
 
+	strlcat(w8001->name, " Touchscreen", sizeof(w8001->name));
+
 	return w8001_command(w8001, W8001_CMD_START, false);
 }
 
@@ -384,22 +513,10 @@
 	}
 
 	w8001->serio = serio;
-	w8001->id = serio->id.id;
 	w8001->dev = input_dev;
 	init_completion(&w8001->cmd_done);
 	snprintf(w8001->phys, sizeof(w8001->phys), "%s/input0", serio->phys);
 
-	input_dev->name = "Wacom W8001 Penabled Serial TouchScreen";
-	input_dev->phys = w8001->phys;
-	input_dev->id.bustype = BUS_RS232;
-	input_dev->id.vendor = SERIO_W8001;
-	input_dev->id.product = w8001->id;
-	input_dev->id.version = 0x0100;
-	input_dev->dev.parent = &serio->dev;
-
-	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-	__set_bit(BTN_TOUCH, input_dev->keybit);
-
 	serio_set_drvdata(serio, w8001);
 	err = serio_open(serio, drv);
 	if (err)
@@ -409,6 +526,14 @@
 	if (err)
 		goto fail3;
 
+	input_dev->name = w8001->name;
+	input_dev->phys = w8001->phys;
+	input_dev->id.product = w8001->id;
+	input_dev->id.bustype = BUS_RS232;
+	input_dev->id.vendor = 0x056a;
+	input_dev->id.version = 0x0100;
+	input_dev->dev.parent = &serio->dev;
+
 	err = input_register_device(w8001->dev);
 	if (err)
 		goto fail3;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 178942a..8a3c5cf 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2318,7 +2318,7 @@
 		 __func__, le16_to_cpu(udev->descriptor.idVendor),
 		 le16_to_cpu(udev->descriptor.idProduct));
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
 			    GIGASET_MODULENAME);
 	if (!cs)
@@ -2576,7 +2576,7 @@
 {
 	int result;
 
-	/* allocate memory for our driver state and intialize it */
+	/* allocate memory for our driver state and initialize it */
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 				    GIGASET_MODULENAME, GIGASET_DEVNAME,
 				    &gigops, THIS_MODULE);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index d151dcb..0ef09d0 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -513,7 +513,7 @@
 		return -ENODEV;
 	}
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
 	if (!cs)
 		goto error;
@@ -771,7 +771,7 @@
 		return rc;
 	}
 
-	/* allocate memory for our driver state and intialize it */
+	/* allocate memory for our driver state and initialize it */
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 					  GIGASET_MODULENAME, GIGASET_DEVNAME,
 					  &ops, THIS_MODULE);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4a66338..5e3300d 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -695,7 +695,7 @@
 
 	dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
 	if (!cs)
 		return -ENODEV;
@@ -894,7 +894,7 @@
 {
 	int result;
 
-	/* allocate memory for our driver state and intialize it */
+	/* allocate memory for our driver state and initialize it */
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 				    GIGASET_MODULENAME, GIGASET_DEVNAME,
 				    &ops, THIS_MODULE);
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
index 18f8798..7bd5baa 100644
--- a/drivers/isdn/hardware/eicon/istream.c
+++ b/drivers/isdn/hardware/eicon/istream.c
@@ -62,7 +62,7 @@
   stream interface.
   If synchronous service was requested, then function
   does return amount of data written to stream.
-  'final' does indicate that pice of data to be written is
+  'final' does indicate that piece of data to be written is
   final part of frame (necessary only by structured datatransfer)
   return  0 if zero lengh packet was written
   return -1 if stream is full
diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h
index 74a6ccf..8121e04 100644
--- a/drivers/isdn/hardware/mISDN/ipac.h
+++ b/drivers/isdn/hardware/mISDN/ipac.h
@@ -29,7 +29,7 @@
 	u32			type;
 	u32			off;		/* offset to isac regs */
 	char			*name;
-	spinlock_t		*hwlock;	/* lock HW acccess */
+	spinlock_t		*hwlock;	/* lock HW access */
 	read_reg_func		*read_reg;
 	write_reg_func		*write_reg;
 	fifo_func		*read_fifo;
@@ -70,7 +70,7 @@
 	struct hscx_hw		hscx[2];
 	char			*name;
 	void			*hw;
-	spinlock_t		*hwlock;	/* lock HW acccess */
+	spinlock_t		*hwlock;	/* lock HW access */
 	struct module		*owner;
 	u32			type;
 	read_reg_func		*read_reg;
diff --git a/drivers/isdn/hardware/mISDN/isar.h b/drivers/isdn/hardware/mISDN/isar.h
index 4a134ac..9962bdf 100644
--- a/drivers/isdn/hardware/mISDN/isar.h
+++ b/drivers/isdn/hardware/mISDN/isar.h
@@ -44,7 +44,7 @@
 struct isar_hw {
 	struct	isar_ch	ch[2];
 	void		*hw;
-	spinlock_t	*hwlock;	/* lock HW acccess */
+	spinlock_t	*hwlock;	/* lock HW access */
 	char		*name;
 	struct module	*owner;
 	read_reg_func	*read_reg;
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 0858791..cfff0c4 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,10 +1247,10 @@
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
 	struct PStack *st = fi->userdata;
-	struct sk_buff *skb, *oskb;
+	struct sk_buff *skb;
 	struct Layer2 *l2 = &st->l2;
 	u_char header[MAX_HEADER_LEN];
-	int i;
+	int i, hdr_space_needed;
 	int unsigned p1;
 	u_long flags;
 
@@ -1261,6 +1261,16 @@
 	if (!skb)
 		return;
 
+	hdr_space_needed = l2headersize(l2, 0);
+	if (hdr_space_needed > skb_headroom(skb)) {
+		struct sk_buff *orig_skb = skb;
+
+		skb = skb_realloc_headroom(skb, hdr_space_needed);
+		if (!skb) {
+			dev_kfree_skb(orig_skb);
+			return;
+		}
+	}
 	spin_lock_irqsave(&l2->lock, flags);
 	if(test_bit(FLG_MOD128, &l2->flag))
 		p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@
 		l2->vs = (l2->vs + 1) % 8;
 	}
 	spin_unlock_irqrestore(&l2->lock, flags);
-	p1 = skb->data - skb->head;
-	if (p1 >= i)
-		memcpy(skb_push(skb, i), header, i);
-	else {
-		printk(KERN_WARNING
-		"isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
-		oskb = skb;
-		skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
-		memcpy(skb_put(skb, i), header, i);
-		skb_copy_from_linear_data(oskb,
-					  skb_put(skb, oskb->len), oskb->len);
-		dev_kfree_skb(oskb);
-	}
+	memcpy(skb_push(skb, i), header, i);
 	st->l2.l2l1(st, PH_PULL | INDICATION, skb);
 	test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
 	if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/isdn/hysdn/hysdn_defs.h
index 729df40..18b801a 100644
--- a/drivers/isdn/hysdn/hysdn_defs.h
+++ b/drivers/isdn/hysdn/hysdn_defs.h
@@ -227,7 +227,6 @@
 /*************************/
 /* im/exported functions */
 /*************************/
-extern char *hysdn_getrev(const char *);
 
 /* hysdn_procconf.c */
 extern int hysdn_procconf_init(void);	/* init proc config filesys */
@@ -259,7 +258,6 @@
 
 /* hysdn_net.c */
 extern unsigned int hynet_enable; 
-extern char *hysdn_net_revision;
 extern int hysdn_net_create(hysdn_card *);	/* create a new net device */
 extern int hysdn_net_release(hysdn_card *);	/* delete the device */
 extern char *hysdn_net_getname(hysdn_card *);	/* get name of net interface */
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index b7cc5c2..0ab42ac 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -36,7 +36,6 @@
 MODULE_AUTHOR("Werner Cornelius");
 MODULE_LICENSE("GPL");
 
-static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
 static int cardmax;		/* number of found cards */
 hysdn_card *card_root = NULL;	/* pointer to first card */
 static hysdn_card *card_last = NULL;	/* pointer to first card */
@@ -49,25 +48,6 @@
 /* Additionally newer versions may be activated without rebooting.          */
 /****************************************************************************/
 
-/******************************************************/
-/* extract revision number from string for log output */
-/******************************************************/
-char *
-hysdn_getrev(const char *revision)
-{
-	char *rev;
-	char *p;
-
-	if ((p = strchr(revision, ':'))) {
-		rev = p + 2;
-		p = strchr(rev, '$');
-		*--p = 0;
-	} else
-		rev = "???";
-	return rev;
-}
-
-
 /****************************************************************************/
 /* init_module is called once when the module is loaded to do all necessary */
 /* things like autodetect...                                                */
@@ -175,13 +155,9 @@
 static int __init
 hysdn_init(void)
 {
-	char tmp[50];
 	int rc;
 
-	strcpy(tmp, hysdn_init_revision);
-	printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp));
-	strcpy(tmp, hysdn_net_revision);
-	printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp));
+	printk(KERN_NOTICE "HYSDN: module loaded\n");
 
 	rc = pci_register_driver(&hysdn_pci_driver);
 	if (rc)
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index feec8d8..11f2cce 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -26,9 +26,6 @@
 unsigned int hynet_enable = 0xffffffff; 
 module_param(hynet_enable, uint, 0);
 
-/* store the actual version for log reporting */
-char *hysdn_net_revision = "$Revision: 1.8.6.4 $";
-
 #define MAX_SKB_BUFFERS 20	/* number of buffers for keeping TX-data */
 
 /****************************************************************************/
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 96b3e39..5fe83bd 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -23,7 +23,6 @@
 #include "hysdn_defs.h"
 
 static DEFINE_MUTEX(hysdn_conf_mutex);
-static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $";
 
 #define INFO_OUT_LEN 80		/* length of info line including lf */
 
@@ -404,7 +403,7 @@
 		card = card->next;	/* next entry */
 	}
 
-	printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision));
+	printk(KERN_NOTICE "HYSDN: procfs initialised\n");
 	return (0);
 }				/* hysdn_procconf_init */
 
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index f2b5bab..1f355bb 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1627,7 +1627,7 @@
 static int __init icn_init(void)
 {
 	char *p;
-	char rev[20];
+	char rev[21];
 
 	memset(&dev, 0, sizeof(icn_dev));
 	dev.memaddr = (membase & 0x0ffc000);
@@ -1638,6 +1638,7 @@
 
 	if ((p = strchr(revision, ':'))) {
 		strncpy(rev, p + 1, 20);
+		rev[20] = '\0';
 		p = strchr(rev, '$');
 		if (p)
 			*p = 0;
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 76d9e67..309bacf 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -112,7 +112,7 @@
  * Disable rx-data:
  * If cmx is realized in hardware, rx data will be disabled if requested by
  * the upper layer. If dtmf decoding is done by software and enabled, rx data
- * will not be diabled but blocked to the upper layer.
+ * will not be disabled but blocked to the upper layer.
  *
  * HFC conference engine:
  * If it is possible to realize all features using hardware, hardware will be
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 33facd0..80a3ae3 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -98,7 +98,6 @@
 #define LP5521_EXT_CLK_USED		0x08
 
 struct lp5521_engine {
-	const struct attribute_group *attributes;
 	int		id;
 	u8		mode;
 	u8		prog_page;
@@ -225,25 +224,22 @@
 		    curr);
 }
 
-static void lp5521_init_engine(struct lp5521_chip *chip,
-			const struct attribute_group *attr_group)
+static void lp5521_init_engine(struct lp5521_chip *chip)
 {
 	int i;
 	for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
 		chip->engines[i].id = i + 1;
 		chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
 		chip->engines[i].prog_page = i;
-		chip->engines[i].attributes = &attr_group[i];
 	}
 }
 
-static int lp5521_configure(struct i2c_client *client,
-			const struct attribute_group *attr_group)
+static int lp5521_configure(struct i2c_client *client)
 {
 	struct lp5521_chip *chip = i2c_get_clientdata(client);
 	int ret;
 
-	lp5521_init_engine(chip, attr_group);
+	lp5521_init_engine(chip);
 
 	/* Set all PWMs to direct control mode */
 	ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
@@ -329,9 +325,6 @@
 /* Set engine mode and create appropriate sysfs attributes, if required. */
 static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
 {
-	struct lp5521_chip *chip = engine_to_lp5521(engine);
-	struct i2c_client *client = chip->client;
-	struct device *dev = &client->dev;
 	int ret = 0;
 
 	/* if in that mode already do nothing, except for run */
@@ -343,18 +336,10 @@
 	} else if (mode == LP5521_CMD_LOAD) {
 		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
 		lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
-
-		ret = sysfs_create_group(&dev->kobj, engine->attributes);
-		if (ret)
-			return ret;
 	} else if (mode == LP5521_CMD_DISABLED) {
 		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
 	}
 
-	/* remove load attribute from sysfs if not in load mode */
-	if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
-		sysfs_remove_group(&dev->kobj, engine->attributes);
-
 	engine->mode = mode;
 
 	return ret;
@@ -373,6 +358,8 @@
 	while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
 		/* separate sscanfs because length is working only for %s */
 		ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+		if (ret != 2)
+			goto fail;
 		ret = sscanf(c, "%2x", &cmd);
 		if (ret != 1)
 			goto fail;
@@ -387,7 +374,10 @@
 		goto fail;
 
 	mutex_lock(&chip->lock);
-	ret = lp5521_load_program(engine, pattern);
+	if (engine->mode == LP5521_CMD_LOAD)
+		ret = lp5521_load_program(engine, pattern);
+	else
+		ret = -EINVAL;
 	mutex_unlock(&chip->lock);
 
 	if (ret) {
@@ -574,20 +564,8 @@
 	&dev_attr_engine2_mode.attr,
 	&dev_attr_engine3_mode.attr,
 	&dev_attr_selftest.attr,
-	NULL
-};
-
-static struct attribute *lp5521_engine1_attributes[] = {
 	&dev_attr_engine1_load.attr,
-	NULL
-};
-
-static struct attribute *lp5521_engine2_attributes[] = {
 	&dev_attr_engine2_load.attr,
-	NULL
-};
-
-static struct attribute *lp5521_engine3_attributes[] = {
 	&dev_attr_engine3_load.attr,
 	NULL
 };
@@ -596,12 +574,6 @@
 	.attrs = lp5521_attributes,
 };
 
-static const struct attribute_group lp5521_engine_group[] = {
-	{.attrs = lp5521_engine1_attributes },
-	{.attrs = lp5521_engine2_attributes },
-	{.attrs = lp5521_engine3_attributes },
-};
-
 static int lp5521_register_sysfs(struct i2c_client *client)
 {
 	struct device *dev = &client->dev;
@@ -616,12 +588,6 @@
 
 	sysfs_remove_group(&dev->kobj, &lp5521_group);
 
-	for (i = 0; i <  ARRAY_SIZE(chip->engines); i++) {
-		if (chip->engines[i].mode == LP5521_CMD_LOAD)
-			sysfs_remove_group(&dev->kobj,
-					chip->engines[i].attributes);
-	}
-
 	for (i = 0; i < chip->num_leds; i++)
 		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
 				&lp5521_led_attribute_group);
@@ -651,7 +617,8 @@
 		return -EINVAL;
 	}
 
-	snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
+	snprintf(name, sizeof(name), "%s:channel%d",
+			pdata->label ?: client->name, chan);
 	led->cdev.brightness_set = lp5521_set_brightness;
 	led->cdev.name = name;
 	res = led_classdev_register(dev, &led->cdev);
@@ -723,7 +690,7 @@
 
 	dev_info(&client->dev, "%s programmable led chip found\n", id->name);
 
-	ret = lp5521_configure(client, lp5521_engine_group);
+	ret = lp5521_configure(client);
 	if (ret < 0) {
 		dev_err(&client->dev, "error configuring chip\n");
 		goto fail2;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 0cc4ead..d0c4068 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -105,7 +105,6 @@
 #define SHIFT_MASK(id)			(((id) - 1) * 2)
 
 struct lp5523_engine {
-	const struct attribute_group *attributes;
 	int		id;
 	u8		mode;
 	u8		prog_page;
@@ -403,14 +402,23 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct lp5523_chip *chip = i2c_get_clientdata(client);
 	u16 mux = 0;
+	ssize_t ret;
 
 	if (lp5523_mux_parse(buf, &mux, len))
 		return -EINVAL;
 
-	if (lp5523_load_mux(&chip->engines[nr - 1], mux))
-		return -EINVAL;
+	mutex_lock(&chip->lock);
+	ret = -EINVAL;
+	if (chip->engines[nr - 1].mode != LP5523_CMD_LOAD)
+		goto leave;
 
-	return len;
+	if (lp5523_load_mux(&chip->engines[nr - 1], mux))
+		goto leave;
+
+	ret = len;
+leave:
+	mutex_unlock(&chip->lock);
+	return ret;
 }
 
 #define store_leds(nr)						\
@@ -556,7 +564,11 @@
 
 	mutex_lock(&chip->lock);
 
-	ret = lp5523_load_program(engine, pattern);
+	if (engine->mode == LP5523_CMD_LOAD)
+		ret = lp5523_load_program(engine, pattern);
+	else
+		ret = -EINVAL;
+
 	mutex_unlock(&chip->lock);
 
 	if (ret) {
@@ -737,37 +749,18 @@
 	&dev_attr_engine2_mode.attr,
 	&dev_attr_engine3_mode.attr,
 	&dev_attr_selftest.attr,
-	NULL
-};
-
-static struct attribute *lp5523_engine1_attributes[] = {
 	&dev_attr_engine1_load.attr,
 	&dev_attr_engine1_leds.attr,
-	NULL
-};
-
-static struct attribute *lp5523_engine2_attributes[] = {
 	&dev_attr_engine2_load.attr,
 	&dev_attr_engine2_leds.attr,
-	NULL
-};
-
-static struct attribute *lp5523_engine3_attributes[] = {
 	&dev_attr_engine3_load.attr,
 	&dev_attr_engine3_leds.attr,
-	NULL
 };
 
 static const struct attribute_group lp5523_group = {
 	.attrs = lp5523_attributes,
 };
 
-static const struct attribute_group lp5523_engine_group[] = {
-	{.attrs = lp5523_engine1_attributes },
-	{.attrs = lp5523_engine2_attributes },
-	{.attrs = lp5523_engine3_attributes },
-};
-
 static int lp5523_register_sysfs(struct i2c_client *client)
 {
 	struct device *dev = &client->dev;
@@ -788,10 +781,6 @@
 
 	sysfs_remove_group(&dev->kobj, &lp5523_group);
 
-	for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
-		if (chip->engines[i].mode == LP5523_CMD_LOAD)
-			sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
-
 	for (i = 0; i < chip->num_leds; i++)
 		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
 				&lp5523_led_attribute_group);
@@ -802,10 +791,6 @@
 /*--------------------------------------------------------------*/
 static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
 {
-	/*  engine to chip */
-	struct lp5523_chip *chip = engine_to_lp5523(engine);
-	struct i2c_client *client = chip->client;
-	struct device *dev = &client->dev;
 	int ret = 0;
 
 	/* if in that mode already do nothing, except for run */
@@ -817,18 +802,10 @@
 	} else if (mode == LP5523_CMD_LOAD) {
 		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
 		lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
-
-		ret = sysfs_create_group(&dev->kobj, engine->attributes);
-		if (ret)
-			return ret;
 	} else if (mode == LP5523_CMD_DISABLED) {
 		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
 	}
 
-	/* remove load attribute from sysfs if not in load mode */
-	if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
-		sysfs_remove_group(&dev->kobj, engine->attributes);
-
 	engine->mode = mode;
 
 	return ret;
@@ -845,7 +822,6 @@
 	engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
 	engine->prog_page = id - 1;
 	engine->mux_page = id + 2;
-	engine->attributes = &lp5523_engine_group[id - 1];
 
 	return 0;
 }
@@ -870,7 +846,8 @@
 			return -EINVAL;
 		}
 
-		snprintf(name, 32, "lp5523:channel%d", chan);
+		snprintf(name, sizeof(name), "%s:channel%d",
+			pdata->label ?: "lp5523", chan);
 
 		led->cdev.name = name;
 		led->cdev.brightness_set = lp5523_set_brightness;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 43d0875..afac338 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -200,6 +200,32 @@
 	pca9532_setled(led);
 }
 
+static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
+{
+	int i = n_devs;
+
+	if (!data)
+		return;
+
+	while (--i >= 0) {
+		switch (data->leds[i].type) {
+		case PCA9532_TYPE_NONE:
+			break;
+		case PCA9532_TYPE_LED:
+			led_classdev_unregister(&data->leds[i].ldev);
+			cancel_work_sync(&data->leds[i].work);
+			break;
+		case PCA9532_TYPE_N2100_BEEP:
+			if (data->idev != NULL) {
+				input_unregister_device(data->idev);
+				cancel_work_sync(&data->work);
+				data->idev = NULL;
+			}
+			break;
+		}
+	}
+}
+
 static int pca9532_configure(struct i2c_client *client,
 	struct pca9532_data *data, struct pca9532_platform_data *pdata)
 {
@@ -274,25 +300,7 @@
 	return 0;
 
 exit:
-	if (i > 0)
-		for (i = i - 1; i >= 0; i--)
-			switch (data->leds[i].type) {
-			case PCA9532_TYPE_NONE:
-				break;
-			case PCA9532_TYPE_LED:
-				led_classdev_unregister(&data->leds[i].ldev);
-				cancel_work_sync(&data->leds[i].work);
-				break;
-			case PCA9532_TYPE_N2100_BEEP:
-				if (data->idev != NULL) {
-					input_unregister_device(data->idev);
-					input_free_device(data->idev);
-					cancel_work_sync(&data->work);
-					data->idev = NULL;
-				}
-				break;
-			}
-
+	pca9532_destroy_devices(data, i);
 	return err;
 }
 
@@ -329,25 +337,7 @@
 static int pca9532_remove(struct i2c_client *client)
 {
 	struct pca9532_data *data = i2c_get_clientdata(client);
-	int i;
-	for (i = 0; i < 16; i++)
-		switch (data->leds[i].type) {
-		case PCA9532_TYPE_NONE:
-			break;
-		case PCA9532_TYPE_LED:
-			led_classdev_unregister(&data->leds[i].ldev);
-			cancel_work_sync(&data->leds[i].work);
-			break;
-		case PCA9532_TYPE_N2100_BEEP:
-			if (data->idev != NULL) {
-				input_unregister_device(data->idev);
-				input_free_device(data->idev);
-				cancel_work_sync(&data->work);
-				data->idev = NULL;
-			}
-			break;
-		}
-
+	pca9532_destroy_devices(data, 16);
 	kfree(data);
 	return 0;
 }
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index da3fa8d..666daf7 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -69,6 +69,7 @@
 		led_dat->pwm = pwm_request(cur_led->pwm_id,
 				cur_led->name);
 		if (IS_ERR(led_dat->pwm)) {
+			ret = PTR_ERR(led_dat->pwm);
 			dev_err(&pdev->dev, "unable to request PWM %d\n",
 					cur_led->pwm_id);
 			goto err;
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index f948e57..2b513a2 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -26,6 +26,7 @@
 	int brightness;
 	int old_status;
 	struct notifier_block notifier;
+	unsigned invert;
 };
 
 static int fb_notifier_callback(struct notifier_block *p,
@@ -36,23 +37,64 @@
 	struct led_classdev *led = n->led;
 	struct fb_event *fb_event = data;
 	int *blank = fb_event->data;
+	int new_status = *blank ? BLANK : UNBLANK;
 
 	switch (event) {
 	case FB_EVENT_BLANK :
-		if (*blank && n->old_status == UNBLANK) {
+		if (new_status == n->old_status)
+			break;
+
+		if ((n->old_status == UNBLANK) ^ n->invert) {
 			n->brightness = led->brightness;
 			led_set_brightness(led, LED_OFF);
-			n->old_status = BLANK;
-		} else if (!*blank && n->old_status == BLANK) {
+		} else {
 			led_set_brightness(led, n->brightness);
-			n->old_status = UNBLANK;
 		}
+
+		n->old_status = new_status;
+
 		break;
 	}
 
 	return 0;
 }
 
+static ssize_t bl_trig_invert_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led = dev_get_drvdata(dev);
+	struct bl_trig_notifier *n = led->trigger_data;
+
+	return sprintf(buf, "%u\n", n->invert);
+}
+
+static ssize_t bl_trig_invert_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t num)
+{
+	struct led_classdev *led = dev_get_drvdata(dev);
+	struct bl_trig_notifier *n = led->trigger_data;
+	unsigned long invert;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &invert);
+	if (ret < 0)
+		return ret;
+
+	if (invert > 1)
+		return -EINVAL;
+
+	n->invert = invert;
+
+	/* After inverting, we need to update the LED. */
+	if ((n->old_status == BLANK) ^ n->invert)
+		led_set_brightness(led, LED_OFF);
+	else
+		led_set_brightness(led, n->brightness);
+
+	return num;
+}
+static DEVICE_ATTR(inverted, 0644, bl_trig_invert_show, bl_trig_invert_store);
+
 static void bl_trig_activate(struct led_classdev *led)
 {
 	int ret;
@@ -66,6 +108,10 @@
 		return;
 	}
 
+	ret = device_create_file(led->dev, &dev_attr_inverted);
+	if (ret)
+		goto err_invert;
+
 	n->led = led;
 	n->brightness = led->brightness;
 	n->old_status = UNBLANK;
@@ -74,6 +120,12 @@
 	ret = fb_register_client(&n->notifier);
 	if (ret)
 		dev_err(led->dev, "unable to register backlight trigger\n");
+
+	return;
+
+err_invert:
+	led->trigger_data = NULL;
+	kfree(n);
 }
 
 static void bl_trig_deactivate(struct led_classdev *led)
@@ -82,6 +134,7 @@
 		(struct bl_trig_notifier *) led->trigger_data;
 
 	if (n) {
+		device_remove_file(led->dev, &dev_attr_inverted);
 		fb_unregister_client(&n->notifier);
 		kfree(n);
 	}
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index 991d93b..ecc4bf3 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -99,7 +99,7 @@
 	struct led_classdev *led = dev_get_drvdata(dev);
 	struct gpio_trig_data *gpio_data = led->trigger_data;
 
-	return sprintf(buf, "%s\n", gpio_data->inverted ? "yes" : "no");
+	return sprintf(buf, "%u\n", gpio_data->inverted);
 }
 
 static ssize_t gpio_trig_inverted_store(struct device *dev,
@@ -107,16 +107,17 @@
 {
 	struct led_classdev *led = dev_get_drvdata(dev);
 	struct gpio_trig_data *gpio_data = led->trigger_data;
-	unsigned inverted;
+	unsigned long inverted;
 	int ret;
 
-	ret = sscanf(buf, "%u", &inverted);
-	if (ret < 1) {
-		dev_err(dev, "invalid value\n");
-		return -EINVAL;
-	}
+	ret = strict_strtoul(buf, 10, &inverted);
+	if (ret < 0)
+		return ret;
 
-	gpio_data->inverted = !!inverted;
+	if (inverted > 1)
+		return -EINVAL;
+
+	gpio_data->inverted = inverted;
 
 	/* After inverting, we need to update the LED. */
 	schedule_work(&gpio_data->work);
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 04b2212..d21578e 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1137,7 +1137,7 @@
  */
 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
 {
-	pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
+	pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages);
 	pte_t regs_pte;
 
 #ifdef CONFIG_X86_PAE
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index b4eb675..9f1659c 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -90,8 +90,8 @@
 	 * meanwhile).  If that's not the case, we pretend everything in the
 	 * Guest has changed.
 	 */
-	if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
-		__get_cpu_var(lg_last_cpu) = cpu;
+	if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) {
+		__this_cpu_write(lg_last_cpu, cpu);
 		cpu->last_pages = pages;
 		cpu->changed = CHANGED_ALL;
 	}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index b6e7ddc..4daf9e5 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -387,11 +387,10 @@
 	/* Set the DMA ops to the ones from the PCI device, this could be
 	 * fishy if we didn't know that on PowerMac it's always direct ops
 	 * or iommu ops that will work fine
+	 *
+	 * To get all the fields, copy all archdata
 	 */
-	dev->ofdev.dev.archdata.dma_ops =
-		chip->lbus.pdev->dev.archdata.dma_ops;
-	dev->ofdev.dev.archdata.dma_data =
-		chip->lbus.pdev->dev.archdata.dma_data;
+	dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
 #endif /* CONFIG_PCI */
 
 #ifdef DEBUG
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 4454927..f3a29f2 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -443,7 +443,7 @@
 	tries = 0;
 	for (;;) {
 		nr = i2c_master_recv(fcu, buf, nb);
-		if (nr > 0 || (nr < 0 && nr != ENODEV) || tries >= 100)
+		if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
 			break;
 		msleep(10);
 		++tries;
@@ -464,7 +464,7 @@
 	tries = 0;
 	for (;;) {
 		nw = i2c_master_send(fcu, buf, nb);
-		if (nw > 0 || (nw < 0 && nw != EIO) || tries >= 100)
+		if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
 			break;
 		msleep(10);
 		++tries;
@@ -2213,6 +2213,9 @@
 static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
 {
 	state = state_detached;
+	of_dev = dev;
+
+	dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
 
 	/* Lookup the fans in the device tree */
 	fcu_lookup_fans(dev->dev.of_node);
@@ -2235,6 +2238,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, fcu_match);
 
 static struct of_platform_driver fcu_of_platform_driver = 
 {
@@ -2252,8 +2256,6 @@
  */
 static int __init therm_pm72_init(void)
 {
-	struct device_node *np;
-
 	rackmac = of_machine_is_compatible("RackMac3,1");
 
 	if (!of_machine_is_compatible("PowerMac7,2") &&
@@ -2261,34 +2263,12 @@
 	    !rackmac)
 	    	return -ENODEV;
 
-	printk(KERN_INFO "PowerMac G5 Thermal control driver %s\n", VERSION);
-
-	np = of_find_node_by_type(NULL, "fcu");
-	if (np == NULL) {
-		/* Some machines have strangely broken device-tree */
-		np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
-		if (np == NULL) {
-			    printk(KERN_ERR "Can't find FCU in device-tree !\n");
-			    return -ENODEV;
-		}
-	}
-	of_dev = of_platform_device_create(np, "temperature", NULL);
-	if (of_dev == NULL) {
-		printk(KERN_ERR "Can't register FCU platform device !\n");
-		return -ENODEV;
-	}
-
-	of_register_platform_driver(&fcu_of_platform_driver);
-	
-	return 0;
+	return of_register_platform_driver(&fcu_of_platform_driver);
 }
 
 static void __exit therm_pm72_exit(void)
 {
 	of_unregister_platform_driver(&fcu_of_platform_driver);
-
-	if (of_dev)
-		of_device_unregister(of_dev);
 }
 
 module_init(therm_pm72_init);
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 1cec02f..ade1e65 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -15,7 +15,7 @@
 
 #define MAX_PMU_LEVEL 0xFF
 
-static struct backlight_ops pmu_backlight_data;
+static const struct backlight_ops pmu_backlight_data;
 static DEFINE_SPINLOCK(pmu_backlight_lock);
 static int sleeping, uses_pmu_bl;
 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
@@ -115,7 +115,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops pmu_backlight_data = {
+static const struct backlight_ops pmu_backlight_data = {
 	.get_brightness	= pmu_backlight_get_brightness,
 	.update_status	= pmu_backlight_update_status,
 
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index cd29c82..8b021eb 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2257,7 +2257,7 @@
 		&& (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
 }
 
-static struct platform_suspend_ops pmu_pm_ops = {
+static const struct platform_suspend_ops pmu_pm_ops = {
 	.enter = powerbook_sleep,
 	.valid = pmu_sleep_valid,
 };
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index bf1a95e3..98d9ec8 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -240,6 +240,30 @@
          Allow volume managers to mirror logical volumes, also
          needed for live data migration tools such as 'pvmove'.
 
+config DM_RAID
+       tristate "RAID 4/5/6 target (EXPERIMENTAL)"
+       depends on BLK_DEV_DM && EXPERIMENTAL
+       select MD_RAID456
+       select BLK_DEV_MD
+       ---help---
+	 A dm target that supports RAID4, RAID5 and RAID6 mappings
+
+	 A RAID-5 set of N drives with a capacity of C MB per drive provides
+	 the capacity of C * (N - 1) MB, and protects against a failure
+	 of a single drive. For a given sector (row) number, (N - 1) drives
+	 contain data sectors, and one drive contains the parity protection.
+	 For a RAID-4 set, the parity blocks are present on a single drive,
+	 while a RAID-5 set distributes the parity across the drives in one
+	 of the available parity distribution methods.
+
+	 A RAID-6 set of N drives with a capacity of C MB per drive
+	 provides the capacity of C * (N - 2) MB, and protects
+	 against a failure of any two drives. For a given sector
+	 (row) number, (N - 2) drives contain data sectors, and two
+	 drives contains two independent redundancy syndromes.  Like
+	 RAID-5, RAID-6 distributes the syndromes across the drives
+	 in one of the available parity distribution methods.
+
 config DM_LOG_USERSPACE
 	tristate "Mirror userspace logging (EXPERIMENTAL)"
 	depends on DM_MIRROR && EXPERIMENTAL && NET
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 5e3aac4..d013860 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -36,6 +36,7 @@
 obj-$(CONFIG_DM_MIRROR)		+= dm-mirror.o dm-log.o dm-region-hash.o
 obj-$(CONFIG_DM_LOG_USERSPACE)	+= dm-log-userspace.o
 obj-$(CONFIG_DM_ZERO)		+= dm-zero.o
+obj-$(CONFIG_DM_RAID)	+= dm-raid.o
 
 ifeq ($(CONFIG_DM_UEVENT),y)
 dm-mod-objs			+= dm-uevent.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5a1ffe3..9a35320 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -210,11 +210,11 @@
 		    || test_bit(Faulty, &rdev->flags))
 			continue;
 
-		target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
+		target = offset + index * (PAGE_SIZE/512);
 
 		if (sync_page_io(rdev, target,
 				 roundup(size, bdev_logical_block_size(rdev->bdev)),
-				 page, READ)) {
+				 page, READ, true)) {
 			page->index = index;
 			attach_page_buffers(page, NULL); /* so that free_buffer will
 							  * quietly no-op */
@@ -264,14 +264,18 @@
 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
 {
 	mdk_rdev_t *rdev = NULL;
+	struct block_device *bdev;
 	mddev_t *mddev = bitmap->mddev;
 
 	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
 		int size = PAGE_SIZE;
 		loff_t offset = mddev->bitmap_info.offset;
+
+		bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
+
 		if (page->index == bitmap->file_pages-1)
 			size = roundup(bitmap->last_page_size,
-				       bdev_logical_block_size(rdev->bdev));
+				       bdev_logical_block_size(bdev));
 		/* Just make sure we aren't corrupting data or
 		 * metadata
 		 */
@@ -1542,7 +1546,7 @@
 	wait_event(bitmap->mddev->recovery_wait,
 		   atomic_read(&bitmap->mddev->recovery_active) == 0);
 
-	bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
+	bitmap->mddev->curr_resync_completed = sector;
 	set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
 	sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
 	s = 0;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d5b0e4c..4e054bd 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -18,10 +18,14 @@
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
 #include <linux/backing-dev.h>
+#include <linux/percpu.h>
 #include <asm/atomic.h>
 #include <linux/scatterlist.h>
 #include <asm/page.h>
 #include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/algapi.h>
 
 #include <linux/device-mapper.h>
 
@@ -63,6 +67,7 @@
 	struct convert_context *ctx;
 	struct scatterlist sg_in;
 	struct scatterlist sg_out;
+	sector_t iv_sector;
 };
 
 struct crypt_config;
@@ -73,11 +78,13 @@
 	void (*dtr)(struct crypt_config *cc);
 	int (*init)(struct crypt_config *cc);
 	int (*wipe)(struct crypt_config *cc);
-	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
+	int (*generator)(struct crypt_config *cc, u8 *iv,
+			 struct dm_crypt_request *dmreq);
+	int (*post)(struct crypt_config *cc, u8 *iv,
+		    struct dm_crypt_request *dmreq);
 };
 
 struct iv_essiv_private {
-	struct crypto_cipher *tfm;
 	struct crypto_hash *hash_tfm;
 	u8 *salt;
 };
@@ -86,11 +93,32 @@
 	int shift;
 };
 
+#define LMK_SEED_SIZE 64 /* hash + 0 */
+struct iv_lmk_private {
+	struct crypto_shash *hash_tfm;
+	u8 *seed;
+};
+
 /*
  * Crypt: maps a linear range of a block device
  * and encrypts / decrypts at the same time.
  */
 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
+
+/*
+ * Duplicated per-CPU state for cipher.
+ */
+struct crypt_cpu {
+	struct ablkcipher_request *req;
+	/* ESSIV: struct crypto_cipher *essiv_tfm */
+	void *iv_private;
+	struct crypto_ablkcipher *tfms[0];
+};
+
+/*
+ * The fields in here must be read only after initialization,
+ * changing state should be in crypt_cpu.
+ */
 struct crypt_config {
 	struct dm_dev *dev;
 	sector_t start;
@@ -108,17 +136,25 @@
 	struct workqueue_struct *crypt_queue;
 
 	char *cipher;
-	char *cipher_mode;
+	char *cipher_string;
 
 	struct crypt_iv_operations *iv_gen_ops;
 	union {
 		struct iv_essiv_private essiv;
 		struct iv_benbi_private benbi;
+		struct iv_lmk_private lmk;
 	} iv_gen_private;
 	sector_t iv_offset;
 	unsigned int iv_size;
 
 	/*
+	 * Duplicated per cpu state. Access through
+	 * per_cpu_ptr() only.
+	 */
+	struct crypt_cpu __percpu *cpu;
+	unsigned tfms_count;
+
+	/*
 	 * Layout of each crypto request:
 	 *
 	 *   struct ablkcipher_request
@@ -132,11 +168,10 @@
 	 * correctly aligned.
 	 */
 	unsigned int dmreq_start;
-	struct ablkcipher_request *req;
 
-	struct crypto_ablkcipher *tfm;
 	unsigned long flags;
 	unsigned int key_size;
+	unsigned int key_parts;
 	u8 key[0];
 };
 
@@ -148,6 +183,20 @@
 
 static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
+static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
+
+static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
+{
+	return this_cpu_ptr(cc->cpu);
+}
+
+/*
+ * Use this to access cipher attributes that are the same for each CPU.
+ */
+static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
+{
+	return __this_cpu_ptr(cc->cpu)->tfms[0];
+}
 
 /*
  * Different IV generation algorithms:
@@ -168,23 +217,38 @@
  * null: the initial vector is always zero.  Provides compatibility with
  *       obsolete loop_fish2 devices.  Do not use for new devices.
  *
+ * lmk:  Compatible implementation of the block chaining mode used
+ *       by the Loop-AES block device encryption system
+ *       designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
+ *       It operates on full 512 byte sectors and uses CBC
+ *       with an IV derived from the sector number, the data and
+ *       optionally extra IV seed.
+ *       This means that after decryption the first block
+ *       of sector must be tweaked according to decrypted data.
+ *       Loop-AES can use three encryption schemes:
+ *         version 1: is plain aes-cbc mode
+ *         version 2: uses 64 multikey scheme with lmk IV generator
+ *         version 3: the same as version 2 with additional IV seed
+ *                   (it uses 65 keys, last key is used as IV seed)
+ *
  * plumb: unimplemented, see:
  * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
  */
 
-static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
+			      struct dm_crypt_request *dmreq)
 {
 	memset(iv, 0, cc->iv_size);
-	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
+	*(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
 
 	return 0;
 }
 
 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
-				sector_t sector)
+				struct dm_crypt_request *dmreq)
 {
 	memset(iv, 0, cc->iv_size);
-	*(u64 *)iv = cpu_to_le64(sector);
+	*(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
 
 	return 0;
 }
@@ -195,7 +259,8 @@
 	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
 	struct hash_desc desc;
 	struct scatterlist sg;
-	int err;
+	struct crypto_cipher *essiv_tfm;
+	int err, cpu;
 
 	sg_init_one(&sg, cc->key, cc->key_size);
 	desc.tfm = essiv->hash_tfm;
@@ -205,8 +270,16 @@
 	if (err)
 		return err;
 
-	return crypto_cipher_setkey(essiv->tfm, essiv->salt,
+	for_each_possible_cpu(cpu) {
+		essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
+
+		err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
 				    crypto_hash_digestsize(essiv->hash_tfm));
+		if (err)
+			return err;
+	}
+
+	return 0;
 }
 
 /* Wipe salt and reset key derived from volume key */
@@ -214,24 +287,76 @@
 {
 	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
 	unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
+	struct crypto_cipher *essiv_tfm;
+	int cpu, r, err = 0;
 
 	memset(essiv->salt, 0, salt_size);
 
-	return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
+	for_each_possible_cpu(cpu) {
+		essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
+		r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
+		if (r)
+			err = r;
+	}
+
+	return err;
+}
+
+/* Set up per cpu cipher state */
+static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
+					     struct dm_target *ti,
+					     u8 *salt, unsigned saltsize)
+{
+	struct crypto_cipher *essiv_tfm;
+	int err;
+
+	/* Setup the essiv_tfm with the given salt */
+	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(essiv_tfm)) {
+		ti->error = "Error allocating crypto tfm for ESSIV";
+		return essiv_tfm;
+	}
+
+	if (crypto_cipher_blocksize(essiv_tfm) !=
+	    crypto_ablkcipher_ivsize(any_tfm(cc))) {
+		ti->error = "Block size of ESSIV cipher does "
+			    "not match IV size of block cipher";
+		crypto_free_cipher(essiv_tfm);
+		return ERR_PTR(-EINVAL);
+	}
+
+	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
+	if (err) {
+		ti->error = "Failed to set key for ESSIV cipher";
+		crypto_free_cipher(essiv_tfm);
+		return ERR_PTR(err);
+	}
+
+	return essiv_tfm;
 }
 
 static void crypt_iv_essiv_dtr(struct crypt_config *cc)
 {
+	int cpu;
+	struct crypt_cpu *cpu_cc;
+	struct crypto_cipher *essiv_tfm;
 	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
 
-	crypto_free_cipher(essiv->tfm);
-	essiv->tfm = NULL;
-
 	crypto_free_hash(essiv->hash_tfm);
 	essiv->hash_tfm = NULL;
 
 	kzfree(essiv->salt);
 	essiv->salt = NULL;
+
+	for_each_possible_cpu(cpu) {
+		cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+		essiv_tfm = cpu_cc->iv_private;
+
+		if (essiv_tfm)
+			crypto_free_cipher(essiv_tfm);
+
+		cpu_cc->iv_private = NULL;
+	}
 }
 
 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -240,7 +365,7 @@
 	struct crypto_cipher *essiv_tfm = NULL;
 	struct crypto_hash *hash_tfm = NULL;
 	u8 *salt = NULL;
-	int err;
+	int err, cpu;
 
 	if (!opts) {
 		ti->error = "Digest algorithm missing for ESSIV mode";
@@ -262,48 +387,44 @@
 		goto bad;
 	}
 
-	/* Allocate essiv_tfm */
-	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
-	if (IS_ERR(essiv_tfm)) {
-		ti->error = "Error allocating crypto tfm for ESSIV";
-		err = PTR_ERR(essiv_tfm);
-		goto bad;
-	}
-	if (crypto_cipher_blocksize(essiv_tfm) !=
-	    crypto_ablkcipher_ivsize(cc->tfm)) {
-		ti->error = "Block size of ESSIV cipher does "
-			    "not match IV size of block cipher";
-		err = -EINVAL;
-		goto bad;
-	}
-
 	cc->iv_gen_private.essiv.salt = salt;
-	cc->iv_gen_private.essiv.tfm = essiv_tfm;
 	cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
 
+	for_each_possible_cpu(cpu) {
+		essiv_tfm = setup_essiv_cpu(cc, ti, salt,
+					crypto_hash_digestsize(hash_tfm));
+		if (IS_ERR(essiv_tfm)) {
+			crypt_iv_essiv_dtr(cc);
+			return PTR_ERR(essiv_tfm);
+		}
+		per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
+	}
+
 	return 0;
 
 bad:
-	if (essiv_tfm && !IS_ERR(essiv_tfm))
-		crypto_free_cipher(essiv_tfm);
 	if (hash_tfm && !IS_ERR(hash_tfm))
 		crypto_free_hash(hash_tfm);
 	kfree(salt);
 	return err;
 }
 
-static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
+			      struct dm_crypt_request *dmreq)
 {
+	struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
+
 	memset(iv, 0, cc->iv_size);
-	*(u64 *)iv = cpu_to_le64(sector);
-	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv);
+	*(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
+	crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
+
 	return 0;
 }
 
 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
 			      const char *opts)
 {
-	unsigned bs = crypto_ablkcipher_blocksize(cc->tfm);
+	unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
 	int log = ilog2(bs);
 
 	/* we need to calculate how far we must shift the sector count
@@ -328,25 +449,177 @@
 {
 }
 
-static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
+			      struct dm_crypt_request *dmreq)
 {
 	__be64 val;
 
 	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
 
-	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
+	val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
 	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
 
 	return 0;
 }
 
-static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
+			     struct dm_crypt_request *dmreq)
 {
 	memset(iv, 0, cc->iv_size);
 
 	return 0;
 }
 
+static void crypt_iv_lmk_dtr(struct crypt_config *cc)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+
+	if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
+		crypto_free_shash(lmk->hash_tfm);
+	lmk->hash_tfm = NULL;
+
+	kzfree(lmk->seed);
+	lmk->seed = NULL;
+}
+
+static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
+			    const char *opts)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+
+	lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
+	if (IS_ERR(lmk->hash_tfm)) {
+		ti->error = "Error initializing LMK hash";
+		return PTR_ERR(lmk->hash_tfm);
+	}
+
+	/* No seed in LMK version 2 */
+	if (cc->key_parts == cc->tfms_count) {
+		lmk->seed = NULL;
+		return 0;
+	}
+
+	lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
+	if (!lmk->seed) {
+		crypt_iv_lmk_dtr(cc);
+		ti->error = "Error kmallocing seed storage in LMK";
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int crypt_iv_lmk_init(struct crypt_config *cc)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+	int subkey_size = cc->key_size / cc->key_parts;
+
+	/* LMK seed is on the position of LMK_KEYS + 1 key */
+	if (lmk->seed)
+		memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
+		       crypto_shash_digestsize(lmk->hash_tfm));
+
+	return 0;
+}
+
+static int crypt_iv_lmk_wipe(struct crypt_config *cc)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+
+	if (lmk->seed)
+		memset(lmk->seed, 0, LMK_SEED_SIZE);
+
+	return 0;
+}
+
+static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
+			    struct dm_crypt_request *dmreq,
+			    u8 *data)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+	struct {
+		struct shash_desc desc;
+		char ctx[crypto_shash_descsize(lmk->hash_tfm)];
+	} sdesc;
+	struct md5_state md5state;
+	u32 buf[4];
+	int i, r;
+
+	sdesc.desc.tfm = lmk->hash_tfm;
+	sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	r = crypto_shash_init(&sdesc.desc);
+	if (r)
+		return r;
+
+	if (lmk->seed) {
+		r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
+		if (r)
+			return r;
+	}
+
+	/* Sector is always 512B, block size 16, add data of blocks 1-31 */
+	r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
+	if (r)
+		return r;
+
+	/* Sector is cropped to 56 bits here */
+	buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
+	buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
+	buf[2] = cpu_to_le32(4024);
+	buf[3] = 0;
+	r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
+	if (r)
+		return r;
+
+	/* No MD5 padding here */
+	r = crypto_shash_export(&sdesc.desc, &md5state);
+	if (r)
+		return r;
+
+	for (i = 0; i < MD5_HASH_WORDS; i++)
+		__cpu_to_le32s(&md5state.hash[i]);
+	memcpy(iv, &md5state.hash, cc->iv_size);
+
+	return 0;
+}
+
+static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
+			    struct dm_crypt_request *dmreq)
+{
+	u8 *src;
+	int r = 0;
+
+	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+		src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0);
+		r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
+		kunmap_atomic(src, KM_USER0);
+	} else
+		memset(iv, 0, cc->iv_size);
+
+	return r;
+}
+
+static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
+			     struct dm_crypt_request *dmreq)
+{
+	u8 *dst;
+	int r;
+
+	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
+		return 0;
+
+	dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0);
+	r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
+
+	/* Tweak the first block of plaintext sector */
+	if (!r)
+		crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
+
+	kunmap_atomic(dst, KM_USER0);
+	return r;
+}
+
 static struct crypt_iv_operations crypt_iv_plain_ops = {
 	.generator = crypt_iv_plain_gen
 };
@@ -373,6 +646,15 @@
 	.generator = crypt_iv_null_gen
 };
 
+static struct crypt_iv_operations crypt_iv_lmk_ops = {
+	.ctr	   = crypt_iv_lmk_ctr,
+	.dtr	   = crypt_iv_lmk_dtr,
+	.init	   = crypt_iv_lmk_init,
+	.wipe	   = crypt_iv_lmk_wipe,
+	.generator = crypt_iv_lmk_gen,
+	.post	   = crypt_iv_lmk_post
+};
+
 static void crypt_convert_init(struct crypt_config *cc,
 			       struct convert_context *ctx,
 			       struct bio *bio_out, struct bio *bio_in,
@@ -400,6 +682,13 @@
 	return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
 }
 
+static u8 *iv_of_dmreq(struct crypt_config *cc,
+		       struct dm_crypt_request *dmreq)
+{
+	return (u8 *)ALIGN((unsigned long)(dmreq + 1),
+		crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
+}
+
 static int crypt_convert_block(struct crypt_config *cc,
 			       struct convert_context *ctx,
 			       struct ablkcipher_request *req)
@@ -411,9 +700,9 @@
 	int r = 0;
 
 	dmreq = dmreq_of_req(cc, req);
-	iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
-			 crypto_ablkcipher_alignmask(cc->tfm) + 1);
+	iv = iv_of_dmreq(cc, dmreq);
 
+	dmreq->iv_sector = ctx->sector;
 	dmreq->ctx = ctx;
 	sg_init_table(&dmreq->sg_in, 1);
 	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
@@ -436,7 +725,7 @@
 	}
 
 	if (cc->iv_gen_ops) {
-		r = cc->iv_gen_ops->generator(cc, iv, ctx->sector);
+		r = cc->iv_gen_ops->generator(cc, iv, dmreq);
 		if (r < 0)
 			return r;
 	}
@@ -449,21 +738,28 @@
 	else
 		r = crypto_ablkcipher_decrypt(req);
 
+	if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
+		r = cc->iv_gen_ops->post(cc, iv, dmreq);
+
 	return r;
 }
 
 static void kcryptd_async_done(struct crypto_async_request *async_req,
 			       int error);
+
 static void crypt_alloc_req(struct crypt_config *cc,
 			    struct convert_context *ctx)
 {
-	if (!cc->req)
-		cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
-	ablkcipher_request_set_tfm(cc->req, cc->tfm);
-	ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
-					CRYPTO_TFM_REQ_MAY_SLEEP,
-					kcryptd_async_done,
-					dmreq_of_req(cc, cc->req));
+	struct crypt_cpu *this_cc = this_crypt_config(cc);
+	unsigned key_index = ctx->sector & (cc->tfms_count - 1);
+
+	if (!this_cc->req)
+		this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+
+	ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
+	ablkcipher_request_set_callback(this_cc->req,
+	    CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+	    kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
 }
 
 /*
@@ -472,6 +768,7 @@
 static int crypt_convert(struct crypt_config *cc,
 			 struct convert_context *ctx)
 {
+	struct crypt_cpu *this_cc = this_crypt_config(cc);
 	int r;
 
 	atomic_set(&ctx->pending, 1);
@@ -483,7 +780,7 @@
 
 		atomic_inc(&ctx->pending);
 
-		r = crypt_convert_block(cc, ctx, cc->req);
+		r = crypt_convert_block(cc, ctx, this_cc->req);
 
 		switch (r) {
 		/* async */
@@ -492,7 +789,7 @@
 			INIT_COMPLETION(ctx->restart);
 			/* fall through*/
 		case -EINPROGRESS:
-			cc->req = NULL;
+			this_cc->req = NULL;
 			ctx->sector++;
 			continue;
 
@@ -651,6 +948,9 @@
  * They must be separated as otherwise the final stages could be
  * starved by new requests which can block in the first stages due
  * to memory allocation.
+ *
+ * The work is done per CPU global for all dm-crypt instances.
+ * They should not depend on each other and do not block.
  */
 static void crypt_endio(struct bio *clone, int error)
 {
@@ -691,26 +991,30 @@
 	clone->bi_destructor = dm_crypt_bio_destructor;
 }
 
-static void kcryptd_io_read(struct dm_crypt_io *io)
+static void kcryptd_unplug(struct crypt_config *cc)
+{
+	blk_unplug(bdev_get_queue(cc->dev->bdev));
+}
+
+static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
 {
 	struct crypt_config *cc = io->target->private;
 	struct bio *base_bio = io->base_bio;
 	struct bio *clone;
 
-	crypt_inc_pending(io);
-
 	/*
 	 * The block layer might modify the bvec array, so always
 	 * copy the required bvecs because we need the original
 	 * one in order to decrypt the whole bio data *afterwards*.
 	 */
-	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
-	if (unlikely(!clone)) {
-		io->error = -ENOMEM;
-		crypt_dec_pending(io);
-		return;
+	clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
+	if (!clone) {
+		kcryptd_unplug(cc);
+		return 1;
 	}
 
+	crypt_inc_pending(io);
+
 	clone_init(io, clone);
 	clone->bi_idx = 0;
 	clone->bi_vcnt = bio_segments(base_bio);
@@ -720,6 +1024,7 @@
 	       sizeof(struct bio_vec) * clone->bi_vcnt);
 
 	generic_make_request(clone);
+	return 0;
 }
 
 static void kcryptd_io_write(struct dm_crypt_io *io)
@@ -732,9 +1037,12 @@
 {
 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
 
-	if (bio_data_dir(io->base_bio) == READ)
-		kcryptd_io_read(io);
-	else
+	if (bio_data_dir(io->base_bio) == READ) {
+		crypt_inc_pending(io);
+		if (kcryptd_io_read(io, GFP_NOIO))
+			io->error = -ENOMEM;
+		crypt_dec_pending(io);
+	} else
 		kcryptd_io_write(io);
 }
 
@@ -901,6 +1209,9 @@
 		return;
 	}
 
+	if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+		error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+
 	mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
 
 	if (!atomic_dec_and_test(&ctx->pending))
@@ -971,34 +1282,84 @@
 	}
 }
 
+static void crypt_free_tfms(struct crypt_config *cc, int cpu)
+{
+	struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+	unsigned i;
+
+	for (i = 0; i < cc->tfms_count; i++)
+		if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
+			crypto_free_ablkcipher(cpu_cc->tfms[i]);
+			cpu_cc->tfms[i] = NULL;
+		}
+}
+
+static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
+{
+	struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+	unsigned i;
+	int err;
+
+	for (i = 0; i < cc->tfms_count; i++) {
+		cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
+		if (IS_ERR(cpu_cc->tfms[i])) {
+			err = PTR_ERR(cpu_cc->tfms[i]);
+			crypt_free_tfms(cc, cpu);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int crypt_setkey_allcpus(struct crypt_config *cc)
+{
+	unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
+	int cpu, err = 0, i, r;
+
+	for_each_possible_cpu(cpu) {
+		for (i = 0; i < cc->tfms_count; i++) {
+			r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
+						     cc->key + (i * subkey_size), subkey_size);
+			if (r)
+				err = r;
+		}
+	}
+
+	return err;
+}
+
 static int crypt_set_key(struct crypt_config *cc, char *key)
 {
-	unsigned key_size = strlen(key) >> 1;
-
-	if (cc->key_size && cc->key_size != key_size)
+	/* The key size may not be changed. */
+	if (cc->key_size != (strlen(key) >> 1))
 		return -EINVAL;
 
-	cc->key_size = key_size; /* initial settings */
+	/* Hyphen (which gives a key_size of zero) means there is no key. */
+	if (!cc->key_size && strcmp(key, "-"))
+		return -EINVAL;
 
-	if ((!key_size && strcmp(key, "-")) ||
-	   (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
+	if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
 		return -EINVAL;
 
 	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 
-	return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
+	return crypt_setkey_allcpus(cc);
 }
 
 static int crypt_wipe_key(struct crypt_config *cc)
 {
 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 	memset(&cc->key, 0, cc->key_size * sizeof(u8));
-	return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
+
+	return crypt_setkey_allcpus(cc);
 }
 
 static void crypt_dtr(struct dm_target *ti)
 {
 	struct crypt_config *cc = ti->private;
+	struct crypt_cpu *cpu_cc;
+	int cpu;
 
 	ti->private = NULL;
 
@@ -1010,6 +1371,14 @@
 	if (cc->crypt_queue)
 		destroy_workqueue(cc->crypt_queue);
 
+	if (cc->cpu)
+		for_each_possible_cpu(cpu) {
+			cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+			if (cpu_cc->req)
+				mempool_free(cpu_cc->req, cc->req_pool);
+			crypt_free_tfms(cc, cpu);
+		}
+
 	if (cc->bs)
 		bioset_free(cc->bs);
 
@@ -1023,14 +1392,14 @@
 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
 		cc->iv_gen_ops->dtr(cc);
 
-	if (cc->tfm && !IS_ERR(cc->tfm))
-		crypto_free_ablkcipher(cc->tfm);
-
 	if (cc->dev)
 		dm_put_device(ti, cc->dev);
 
+	if (cc->cpu)
+		free_percpu(cc->cpu);
+
 	kzfree(cc->cipher);
-	kzfree(cc->cipher_mode);
+	kzfree(cc->cipher_string);
 
 	/* Must zero key material before freeing */
 	kzfree(cc);
@@ -1040,9 +1409,9 @@
 			    char *cipher_in, char *key)
 {
 	struct crypt_config *cc = ti->private;
-	char *tmp, *cipher, *chainmode, *ivmode, *ivopts;
+	char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
 	char *cipher_api = NULL;
-	int ret = -EINVAL;
+	int cpu, ret = -EINVAL;
 
 	/* Convert to crypto api definition? */
 	if (strchr(cipher_in, '(')) {
@@ -1050,23 +1419,31 @@
 		return -EINVAL;
 	}
 
+	cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
+	if (!cc->cipher_string)
+		goto bad_mem;
+
 	/*
 	 * Legacy dm-crypt cipher specification
-	 * cipher-mode-iv:ivopts
+	 * cipher[:keycount]-mode-iv:ivopts
 	 */
 	tmp = cipher_in;
-	cipher = strsep(&tmp, "-");
+	keycount = strsep(&tmp, "-");
+	cipher = strsep(&keycount, ":");
+
+	if (!keycount)
+		cc->tfms_count = 1;
+	else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
+		 !is_power_of_2(cc->tfms_count)) {
+		ti->error = "Bad cipher key count specification";
+		return -EINVAL;
+	}
+	cc->key_parts = cc->tfms_count;
 
 	cc->cipher = kstrdup(cipher, GFP_KERNEL);
 	if (!cc->cipher)
 		goto bad_mem;
 
-	if (tmp) {
-		cc->cipher_mode = kstrdup(tmp, GFP_KERNEL);
-		if (!cc->cipher_mode)
-			goto bad_mem;
-	}
-
 	chainmode = strsep(&tmp, "-");
 	ivopts = strsep(&tmp, "-");
 	ivmode = strsep(&ivopts, ":");
@@ -1074,10 +1451,19 @@
 	if (tmp)
 		DMWARN("Ignoring unexpected additional cipher options");
 
-	/* Compatibility mode for old dm-crypt mappings */
+	cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
+				 cc->tfms_count * sizeof(*(cc->cpu->tfms)),
+				 __alignof__(struct crypt_cpu));
+	if (!cc->cpu) {
+		ti->error = "Cannot allocate per cpu state";
+		goto bad_mem;
+	}
+
+	/*
+	 * For compatibility with the original dm-crypt mapping format, if
+	 * only the cipher name is supplied, use cbc-plain.
+	 */
 	if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
-		kfree(cc->cipher_mode);
-		cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL);
 		chainmode = "cbc";
 		ivmode = "plain";
 	}
@@ -1099,11 +1485,12 @@
 	}
 
 	/* Allocate cipher */
-	cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0);
-	if (IS_ERR(cc->tfm)) {
-		ret = PTR_ERR(cc->tfm);
-		ti->error = "Error allocating crypto tfm";
-		goto bad;
+	for_each_possible_cpu(cpu) {
+		ret = crypt_alloc_tfms(cc, cpu, cipher_api);
+		if (ret < 0) {
+			ti->error = "Error allocating crypto tfm";
+			goto bad;
+		}
 	}
 
 	/* Initialize and set key */
@@ -1114,7 +1501,7 @@
 	}
 
 	/* Initialize IV */
-	cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm);
+	cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
 	if (cc->iv_size)
 		/* at least a 64 bit sector number should fit in our buffer */
 		cc->iv_size = max(cc->iv_size,
@@ -1137,7 +1524,15 @@
 		cc->iv_gen_ops = &crypt_iv_benbi_ops;
 	else if (strcmp(ivmode, "null") == 0)
 		cc->iv_gen_ops = &crypt_iv_null_ops;
-	else {
+	else if (strcmp(ivmode, "lmk") == 0) {
+		cc->iv_gen_ops = &crypt_iv_lmk_ops;
+		/* Version 2 and 3 is recognised according
+		 * to length of provided multi-key string.
+		 * If present (version 3), last key is used as IV seed.
+		 */
+		if (cc->key_size % cc->key_parts)
+			cc->key_parts++;
+	} else {
 		ret = -EINVAL;
 		ti->error = "Invalid IV mode";
 		goto bad;
@@ -1194,6 +1589,7 @@
 		ti->error = "Cannot allocate encryption context";
 		return -ENOMEM;
 	}
+	cc->key_size = key_size;
 
 	ti->private = cc;
 	ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
@@ -1208,9 +1604,9 @@
 	}
 
 	cc->dmreq_start = sizeof(struct ablkcipher_request);
-	cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm);
+	cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
 	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
-	cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) &
+	cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
 			   ~(crypto_tfm_ctx_alignment() - 1);
 
 	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
@@ -1219,7 +1615,6 @@
 		ti->error = "Cannot allocate crypt request mempool";
 		goto bad;
 	}
-	cc->req = NULL;
 
 	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
 	if (!cc->page_pool) {
@@ -1252,13 +1647,20 @@
 	cc->start = tmpll;
 
 	ret = -ENOMEM;
-	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
+	cc->io_queue = alloc_workqueue("kcryptd_io",
+				       WQ_NON_REENTRANT|
+				       WQ_MEM_RECLAIM,
+				       1);
 	if (!cc->io_queue) {
 		ti->error = "Couldn't create kcryptd io queue";
 		goto bad;
 	}
 
-	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
+	cc->crypt_queue = alloc_workqueue("kcryptd",
+					  WQ_NON_REENTRANT|
+					  WQ_CPU_INTENSIVE|
+					  WQ_MEM_RECLAIM,
+					  1);
 	if (!cc->crypt_queue) {
 		ti->error = "Couldn't create kcryptd queue";
 		goto bad;
@@ -1286,9 +1688,10 @@
 
 	io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
 
-	if (bio_data_dir(io->base_bio) == READ)
-		kcryptd_queue_io(io);
-	else
+	if (bio_data_dir(io->base_bio) == READ) {
+		if (kcryptd_io_read(io, GFP_NOWAIT))
+			kcryptd_queue_io(io);
+	} else
 		kcryptd_queue_crypt(io);
 
 	return DM_MAPIO_SUBMITTED;
@@ -1306,10 +1709,7 @@
 		break;
 
 	case STATUSTYPE_TABLE:
-		if (cc->cipher_mode)
-			DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode);
-		else
-			DMEMIT("%s ", cc->cipher);
+		DMEMIT("%s ", cc->cipher_string);
 
 		if (cc->key_size > 0) {
 			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
@@ -1421,7 +1821,7 @@
 
 static struct target_type crypt_target = {
 	.name   = "crypt",
-	.version = {1, 7, 0},
+	.version = {1, 10, 0},
 	.module = THIS_MODULE,
 	.ctr    = crypt_ctr,
 	.dtr    = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index baa1191..f18375d 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -352,7 +352,7 @@
 {
 	int r = -ENOMEM;
 
-	kdelayd_wq = create_workqueue("kdelayd");
+	kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
 	if (!kdelayd_wq) {
 		DMERR("Couldn't start kdelayd");
 		goto bad_queue;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4b54618..6d12775 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -295,19 +295,55 @@
 		DMWARN("remove_all left %d open device(s)", dev_skipped);
 }
 
+/*
+ * Set the uuid of a hash_cell that isn't already set.
+ */
+static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
+{
+	mutex_lock(&dm_hash_cells_mutex);
+	hc->uuid = new_uuid;
+	mutex_unlock(&dm_hash_cells_mutex);
+
+	list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid));
+}
+
+/*
+ * Changes the name of a hash_cell and returns the old name for
+ * the caller to free.
+ */
+static char *__change_cell_name(struct hash_cell *hc, char *new_name)
+{
+	char *old_name;
+
+	/*
+	 * Rename and move the name cell.
+	 */
+	list_del(&hc->name_list);
+	old_name = hc->name;
+
+	mutex_lock(&dm_hash_cells_mutex);
+	hc->name = new_name;
+	mutex_unlock(&dm_hash_cells_mutex);
+
+	list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+
+	return old_name;
+}
+
 static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
 					    const char *new)
 {
-	char *new_name, *old_name;
+	char *new_data, *old_name = NULL;
 	struct hash_cell *hc;
 	struct dm_table *table;
 	struct mapped_device *md;
+	unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
 
 	/*
 	 * duplicate new.
 	 */
-	new_name = kstrdup(new, GFP_KERNEL);
-	if (!new_name)
+	new_data = kstrdup(new, GFP_KERNEL);
+	if (!new_data)
 		return ERR_PTR(-ENOMEM);
 
 	down_write(&_hash_lock);
@@ -315,13 +351,19 @@
 	/*
 	 * Is new free ?
 	 */
-	hc = __get_name_cell(new);
+	if (change_uuid)
+		hc = __get_uuid_cell(new);
+	else
+		hc = __get_name_cell(new);
+
 	if (hc) {
-		DMWARN("asked to rename to an already-existing name %s -> %s",
+		DMWARN("Unable to change %s on mapped device %s to one that "
+		       "already exists: %s",
+		       change_uuid ? "uuid" : "name",
 		       param->name, new);
 		dm_put(hc->md);
 		up_write(&_hash_lock);
-		kfree(new_name);
+		kfree(new_data);
 		return ERR_PTR(-EBUSY);
 	}
 
@@ -330,22 +372,30 @@
 	 */
 	hc = __get_name_cell(param->name);
 	if (!hc) {
-		DMWARN("asked to rename a non-existent device %s -> %s",
-		       param->name, new);
+		DMWARN("Unable to rename non-existent device, %s to %s%s",
+		       param->name, change_uuid ? "uuid " : "", new);
 		up_write(&_hash_lock);
-		kfree(new_name);
+		kfree(new_data);
 		return ERR_PTR(-ENXIO);
 	}
 
 	/*
-	 * rename and move the name cell.
+	 * Does this device already have a uuid?
 	 */
-	list_del(&hc->name_list);
-	old_name = hc->name;
-	mutex_lock(&dm_hash_cells_mutex);
-	hc->name = new_name;
-	mutex_unlock(&dm_hash_cells_mutex);
-	list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+	if (change_uuid && hc->uuid) {
+		DMWARN("Unable to change uuid of mapped device %s to %s "
+		       "because uuid is already set to %s",
+		       param->name, new, hc->uuid);
+		dm_put(hc->md);
+		up_write(&_hash_lock);
+		kfree(new_data);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (change_uuid)
+		__set_cell_uuid(hc, new_data);
+	else
+		old_name = __change_cell_name(hc, new_data);
 
 	/*
 	 * Wake up any dm event waiters.
@@ -729,7 +779,7 @@
 	hc = __find_device_hash_cell(param);
 
 	if (!hc) {
-		DMWARN("device doesn't appear to be in the dev hash table.");
+		DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
 		up_write(&_hash_lock);
 		return -ENXIO;
 	}
@@ -741,7 +791,7 @@
 	 */
 	r = dm_lock_for_deletion(md);
 	if (r) {
-		DMWARN("unable to remove open device %s", hc->name);
+		DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
 		up_write(&_hash_lock);
 		dm_put(md);
 		return r;
@@ -774,21 +824,24 @@
 static int dev_rename(struct dm_ioctl *param, size_t param_size)
 {
 	int r;
-	char *new_name = (char *) param + param->data_start;
+	char *new_data = (char *) param + param->data_start;
 	struct mapped_device *md;
+	unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
 
-	if (new_name < param->data ||
-	    invalid_str(new_name, (void *) param + param_size) ||
-	    strlen(new_name) > DM_NAME_LEN - 1) {
-		DMWARN("Invalid new logical volume name supplied.");
+	if (new_data < param->data ||
+	    invalid_str(new_data, (void *) param + param_size) ||
+	    strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
+		DMWARN("Invalid new mapped device name or uuid string supplied.");
 		return -EINVAL;
 	}
 
-	r = check_name(new_name);
-	if (r)
-		return r;
+	if (!change_uuid) {
+		r = check_name(new_data);
+		if (r)
+			return r;
+	}
 
-	md = dm_hash_rename(param, new_name);
+	md = dm_hash_rename(param, new_data);
 	if (IS_ERR(md))
 		return PTR_ERR(md);
 
@@ -885,7 +938,7 @@
 
 	hc = __find_device_hash_cell(param);
 	if (!hc) {
-		DMWARN("device doesn't appear to be in the dev hash table.");
+		DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
 		up_write(&_hash_lock);
 		return -ENXIO;
 	}
@@ -1212,7 +1265,7 @@
 
 	hc = __find_device_hash_cell(param);
 	if (!hc) {
-		DMWARN("device doesn't appear to be in the dev hash table.");
+		DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
 		up_write(&_hash_lock);
 		return -ENXIO;
 	}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index d8587ba..924f5f0 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,6 +37,13 @@
 	unsigned int nr_pages;
 	unsigned int nr_free_pages;
 
+	/*
+	 * Block devices to unplug.
+	 * Non-NULL pointer means that a block device has some pending requests
+	 * and needs to be unplugged.
+	 */
+	struct block_device *unplug[2];
+
 	struct dm_io_client *io_client;
 
 	wait_queue_head_t destroyq;
@@ -308,6 +315,31 @@
 	return 0;
 }
 
+/*
+ * Unplug the block device at the specified index.
+ */
+static void unplug(struct dm_kcopyd_client *kc, int rw)
+{
+	if (kc->unplug[rw] != NULL) {
+		blk_unplug(bdev_get_queue(kc->unplug[rw]));
+		kc->unplug[rw] = NULL;
+	}
+}
+
+/*
+ * Prepare block device unplug. If there's another device
+ * to be unplugged at the same array index, we unplug that
+ * device first.
+ */
+static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
+			   struct block_device *bdev)
+{
+	if (likely(kc->unplug[rw] == bdev))
+		return;
+	unplug(kc, rw);
+	kc->unplug[rw] = bdev;
+}
+
 static void complete_io(unsigned long error, void *context)
 {
 	struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -345,7 +377,7 @@
 {
 	int r;
 	struct dm_io_request io_req = {
-		.bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
+		.bi_rw = job->rw,
 		.mem.type = DM_IO_PAGE_LIST,
 		.mem.ptr.pl = job->pages,
 		.mem.offset = job->offset,
@@ -354,10 +386,16 @@
 		.client = job->kc->io_client,
 	};
 
-	if (job->rw == READ)
+	if (job->rw == READ) {
 		r = dm_io(&io_req, 1, &job->source, NULL);
-	else
+		prepare_unplug(job->kc, READ, job->source.bdev);
+	} else {
+		if (job->num_dests > 1)
+			io_req.bi_rw |= REQ_UNPLUG;
 		r = dm_io(&io_req, job->num_dests, job->dests, NULL);
+		if (!(io_req.bi_rw & REQ_UNPLUG))
+			prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
+	}
 
 	return r;
 }
@@ -435,10 +473,18 @@
 	 * Pages jobs when successful will jump onto the io jobs
 	 * list.  io jobs call wake when they complete and it all
 	 * starts again.
+	 *
+	 * Note that io_jobs add block devices to the unplug array,
+	 * this array is cleared with "unplug" calls. It is thus
+	 * forbidden to run complete_jobs after io_jobs and before
+	 * unplug because the block device could be destroyed in
+	 * job completion callback.
 	 */
 	process_jobs(&kc->complete_jobs, kc, run_complete_job);
 	process_jobs(&kc->pages_jobs, kc, run_pages_job);
 	process_jobs(&kc->io_jobs, kc, run_io_job);
+	unplug(kc, READ);
+	unplug(kc, WRITE);
 }
 
 /*
@@ -619,12 +665,15 @@
 	INIT_LIST_HEAD(&kc->io_jobs);
 	INIT_LIST_HEAD(&kc->pages_jobs);
 
+	memset(kc->unplug, 0, sizeof(kc->unplug));
+
 	kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
 	if (!kc->job_pool)
 		goto bad_slab;
 
 	INIT_WORK(&kc->kcopyd_work, do_work);
-	kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
+	kc->kcopyd_wq = alloc_workqueue("kcopyd",
+					WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
 	if (!kc->kcopyd_wq)
 		goto bad_workqueue;
 
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 1ed0094..aa2e0c3 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -12,12 +12,22 @@
 
 #include "dm-log-userspace-transfer.h"
 
+#define DM_LOG_USERSPACE_VSN "1.1.0"
+
 struct flush_entry {
 	int type;
 	region_t region;
 	struct list_head list;
 };
 
+/*
+ * This limit on the number of mark and clear request is, to a degree,
+ * arbitrary.  However, there is some basis for the choice in the limits
+ * imposed on the size of data payload by dm-log-userspace-transfer.c:
+ * dm_consult_userspace().
+ */
+#define MAX_FLUSH_GROUP_COUNT 32
+
 struct log_c {
 	struct dm_target *ti;
 	uint32_t region_size;
@@ -37,8 +47,15 @@
 	 */
 	uint64_t in_sync_hint;
 
+	/*
+	 * Mark and clear requests are held until a flush is issued
+	 * so that we can group, and thereby limit, the amount of
+	 * network traffic between kernel and userspace.  The 'flush_lock'
+	 * is used to protect these lists.
+	 */
 	spinlock_t flush_lock;
-	struct list_head flush_list;  /* only for clear and mark requests */
+	struct list_head mark_list;
+	struct list_head clear_list;
 };
 
 static mempool_t *flush_entry_pool;
@@ -169,7 +186,8 @@
 
 	strncpy(lc->uuid, argv[0], DM_UUID_LEN);
 	spin_lock_init(&lc->flush_lock);
-	INIT_LIST_HEAD(&lc->flush_list);
+	INIT_LIST_HEAD(&lc->mark_list);
+	INIT_LIST_HEAD(&lc->clear_list);
 
 	str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
 	if (str_size < 0) {
@@ -181,8 +199,11 @@
 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
 				 ctr_str, str_size, NULL, NULL);
 
-	if (r == -ESRCH) {
-		DMERR("Userspace log server not found");
+	if (r < 0) {
+		if (r == -ESRCH)
+			DMERR("Userspace log server not found");
+		else
+			DMERR("Userspace log server failed to create log");
 		goto out;
 	}
 
@@ -214,10 +235,9 @@
 
 static void userspace_dtr(struct dm_dirty_log *log)
 {
-	int r;
 	struct log_c *lc = log->context;
 
-	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
+	(void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
 				 NULL, 0,
 				 NULL, NULL);
 
@@ -338,6 +358,71 @@
 	return (r) ? 0 : (int)in_sync;
 }
 
+static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
+{
+	int r = 0;
+	struct flush_entry *fe;
+
+	list_for_each_entry(fe, flush_list, list) {
+		r = userspace_do_request(lc, lc->uuid, fe->type,
+					 (char *)&fe->region,
+					 sizeof(fe->region),
+					 NULL, NULL);
+		if (r)
+			break;
+	}
+
+	return r;
+}
+
+static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
+{
+	int r = 0;
+	int count;
+	uint32_t type = 0;
+	struct flush_entry *fe, *tmp_fe;
+	LIST_HEAD(tmp_list);
+	uint64_t group[MAX_FLUSH_GROUP_COUNT];
+
+	/*
+	 * Group process the requests
+	 */
+	while (!list_empty(flush_list)) {
+		count = 0;
+
+		list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
+			group[count] = fe->region;
+			count++;
+
+			list_del(&fe->list);
+			list_add(&fe->list, &tmp_list);
+
+			type = fe->type;
+			if (count >= MAX_FLUSH_GROUP_COUNT)
+				break;
+		}
+
+		r = userspace_do_request(lc, lc->uuid, type,
+					 (char *)(group),
+					 count * sizeof(uint64_t),
+					 NULL, NULL);
+		if (r) {
+			/* Group send failed.  Attempt one-by-one. */
+			list_splice_init(&tmp_list, flush_list);
+			r = flush_one_by_one(lc, flush_list);
+			break;
+		}
+	}
+
+	/*
+	 * Must collect flush_entrys that were successfully processed
+	 * as a group so that they will be free'd by the caller.
+	 */
+	list_splice_init(&tmp_list, flush_list);
+
+	return r;
+}
+
 /*
  * userspace_flush
  *
@@ -360,31 +445,25 @@
 	int r = 0;
 	unsigned long flags;
 	struct log_c *lc = log->context;
-	LIST_HEAD(flush_list);
+	LIST_HEAD(mark_list);
+	LIST_HEAD(clear_list);
 	struct flush_entry *fe, *tmp_fe;
 
 	spin_lock_irqsave(&lc->flush_lock, flags);
-	list_splice_init(&lc->flush_list, &flush_list);
+	list_splice_init(&lc->mark_list, &mark_list);
+	list_splice_init(&lc->clear_list, &clear_list);
 	spin_unlock_irqrestore(&lc->flush_lock, flags);
 
-	if (list_empty(&flush_list))
+	if (list_empty(&mark_list) && list_empty(&clear_list))
 		return 0;
 
-	/*
-	 * FIXME: Count up requests, group request types,
-	 * allocate memory to stick all requests in and
-	 * send to server in one go.  Failing the allocation,
-	 * do it one by one.
-	 */
+	r = flush_by_group(lc, &mark_list);
+	if (r)
+		goto fail;
 
-	list_for_each_entry(fe, &flush_list, list) {
-		r = userspace_do_request(lc, lc->uuid, fe->type,
-					 (char *)&fe->region,
-					 sizeof(fe->region),
-					 NULL, NULL);
-		if (r)
-			goto fail;
-	}
+	r = flush_by_group(lc, &clear_list);
+	if (r)
+		goto fail;
 
 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
 				 NULL, 0, NULL, NULL);
@@ -395,7 +474,11 @@
 	 * Calling code will receive an error and will know that
 	 * the log facility has failed.
 	 */
-	list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) {
+	list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
+		list_del(&fe->list);
+		mempool_free(fe, flush_entry_pool);
+	}
+	list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
 		list_del(&fe->list);
 		mempool_free(fe, flush_entry_pool);
 	}
@@ -425,7 +508,7 @@
 	spin_lock_irqsave(&lc->flush_lock, flags);
 	fe->type = DM_ULOG_MARK_REGION;
 	fe->region = region;
-	list_add(&fe->list, &lc->flush_list);
+	list_add(&fe->list, &lc->mark_list);
 	spin_unlock_irqrestore(&lc->flush_lock, flags);
 
 	return;
@@ -462,7 +545,7 @@
 	spin_lock_irqsave(&lc->flush_lock, flags);
 	fe->type = DM_ULOG_CLEAR_REGION;
 	fe->region = region;
-	list_add(&fe->list, &lc->flush_list);
+	list_add(&fe->list, &lc->clear_list);
 	spin_unlock_irqrestore(&lc->flush_lock, flags);
 
 	return;
@@ -684,7 +767,7 @@
 		return r;
 	}
 
-	DMINFO("version 1.0.0 loaded");
+	DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
 	return 0;
 }
 
@@ -694,7 +777,7 @@
 	dm_ulog_tfr_exit();
 	mempool_destroy(flush_entry_pool);
 
-	DMINFO("version 1.0.0 unloaded");
+	DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
 	return;
 }
 
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 075cbcf..049eaf1 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -198,6 +198,7 @@
 
 	memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
 	memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+	tfr->version = DM_ULOG_REQUEST_VERSION;
 	tfr->luid = luid;
 	tfr->seq = dm_ulog_seq++;
 
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 33420e6..6951536 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -455,7 +455,7 @@
 			r = PTR_ERR(lc->io_req.client);
 			DMWARN("couldn't allocate disk io client");
 			kfree(lc);
-			return -ENOMEM;
+			return r;
 		}
 
 		lc->disk_header = vmalloc(buf_size);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 487ecda..b82d288 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -23,6 +23,8 @@
 
 #define DM_MSG_PREFIX "multipath"
 #define MESG_STR(x) x, sizeof(x)
+#define DM_PG_INIT_DELAY_MSECS 2000
+#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
 
 /* Path properties */
 struct pgpath {
@@ -33,8 +35,7 @@
 	unsigned fail_count;		/* Cumulative failure count */
 
 	struct dm_path path;
-	struct work_struct deactivate_path;
-	struct work_struct activate_path;
+	struct delayed_work activate_path;
 };
 
 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -65,11 +66,15 @@
 
 	const char *hw_handler_name;
 	char *hw_handler_params;
+
 	unsigned nr_priority_groups;
 	struct list_head priority_groups;
+
+	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
+
 	unsigned pg_init_required;	/* pg_init needs calling? */
 	unsigned pg_init_in_progress;	/* Only one pg_init allowed at once */
-	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
+	unsigned pg_init_delay_retry;	/* Delay pg_init retry? */
 
 	unsigned nr_valid_paths;	/* Total number of usable paths */
 	struct pgpath *current_pgpath;
@@ -82,6 +87,7 @@
 	unsigned saved_queue_if_no_path;/* Saved state during suspension */
 	unsigned pg_init_retries;	/* Number of times to retry pg_init */
 	unsigned pg_init_count;		/* Number of times pg_init called */
+	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
 
 	struct work_struct process_queued_ios;
 	struct list_head queued_ios;
@@ -116,7 +122,6 @@
 static void process_queued_ios(struct work_struct *work);
 static void trigger_event(struct work_struct *work);
 static void activate_path(struct work_struct *work);
-static void deactivate_path(struct work_struct *work);
 
 
 /*-----------------------------------------------
@@ -129,8 +134,7 @@
 
 	if (pgpath) {
 		pgpath->is_active = 1;
-		INIT_WORK(&pgpath->deactivate_path, deactivate_path);
-		INIT_WORK(&pgpath->activate_path, activate_path);
+		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
 	}
 
 	return pgpath;
@@ -141,14 +145,6 @@
 	kfree(pgpath);
 }
 
-static void deactivate_path(struct work_struct *work)
-{
-	struct pgpath *pgpath =
-		container_of(work, struct pgpath, deactivate_path);
-
-	blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
-}
-
 static struct priority_group *alloc_priority_group(void)
 {
 	struct priority_group *pg;
@@ -199,6 +195,7 @@
 		INIT_LIST_HEAD(&m->queued_ios);
 		spin_lock_init(&m->lock);
 		m->queue_io = 1;
+		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 		INIT_WORK(&m->process_queued_ios, process_queued_ios);
 		INIT_WORK(&m->trigger_event, trigger_event);
 		init_waitqueue_head(&m->pg_init_wait);
@@ -238,14 +235,19 @@
 static void __pg_init_all_paths(struct multipath *m)
 {
 	struct pgpath *pgpath;
+	unsigned long pg_init_delay = 0;
 
 	m->pg_init_count++;
 	m->pg_init_required = 0;
+	if (m->pg_init_delay_retry)
+		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
+						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 		/* Skip failed paths */
 		if (!pgpath->is_active)
 			continue;
-		if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
+				       pg_init_delay))
 			m->pg_init_in_progress++;
 	}
 }
@@ -793,8 +795,9 @@
 	const char *param_name;
 
 	static struct param _params[] = {
-		{0, 3, "invalid number of feature args"},
+		{0, 5, "invalid number of feature args"},
 		{1, 50, "pg_init_retries must be between 1 and 50"},
+		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
 	};
 
 	r = read_param(_params, shift(as), &argc, &ti->error);
@@ -821,6 +824,14 @@
 			continue;
 		}
 
+		if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) &&
+		    (argc >= 1)) {
+			r = read_param(_params + 2, shift(as),
+				       &m->pg_init_delay_msecs, &ti->error);
+			argc--;
+			continue;
+		}
+
 		ti->error = "Unrecognised multipath feature request";
 		r = -EINVAL;
 	} while (argc && !r);
@@ -931,7 +942,7 @@
 	flush_workqueue(kmpath_handlerd);
 	multipath_wait_for_pg_init_completion(m);
 	flush_workqueue(kmultipathd);
-	flush_scheduled_work();
+	flush_work_sync(&m->trigger_event);
 }
 
 static void multipath_dtr(struct dm_target *ti)
@@ -995,7 +1006,6 @@
 		      pgpath->path.dev->name, m->nr_valid_paths);
 
 	schedule_work(&m->trigger_event);
-	queue_work(kmultipathd, &pgpath->deactivate_path);
 
 out:
 	spin_unlock_irqrestore(&m->lock, flags);
@@ -1034,7 +1044,7 @@
 		m->current_pgpath = NULL;
 		queue_work(kmultipathd, &m->process_queued_ios);
 	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
-		if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
 			m->pg_init_in_progress++;
 	}
 
@@ -1169,6 +1179,7 @@
 	struct priority_group *pg = pgpath->pg;
 	struct multipath *m = pg->m;
 	unsigned long flags;
+	unsigned delay_retry = 0;
 
 	/* device or driver problems */
 	switch (errors) {
@@ -1193,8 +1204,9 @@
 		 */
 		bypass_pg(m, pg, 1);
 		break;
-	/* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
 	case SCSI_DH_RETRY:
+		/* Wait before retrying. */
+		delay_retry = 1;
 	case SCSI_DH_IMM_RETRY:
 	case SCSI_DH_RES_TEMP_UNAVAIL:
 		if (pg_init_limit_reached(m, pgpath))
@@ -1227,6 +1239,7 @@
 	if (!m->pg_init_required)
 		m->queue_io = 0;
 
+	m->pg_init_delay_retry = delay_retry;
 	queue_work(kmultipathd, &m->process_queued_ios);
 
 	/*
@@ -1241,7 +1254,7 @@
 static void activate_path(struct work_struct *work)
 {
 	struct pgpath *pgpath =
-		container_of(work, struct pgpath, activate_path);
+		container_of(work, struct pgpath, activate_path.work);
 
 	scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
 				pg_init_done, pgpath);
@@ -1382,11 +1395,14 @@
 		DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
 	else {
 		DMEMIT("%u ", m->queue_if_no_path +
-			      (m->pg_init_retries > 0) * 2);
+			      (m->pg_init_retries > 0) * 2 +
+			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
 		if (m->queue_if_no_path)
 			DMEMIT("queue_if_no_path ");
 		if (m->pg_init_retries)
 			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
+		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
+			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
 	}
 
 	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
@@ -1655,7 +1671,7 @@
  *---------------------------------------------------------------*/
 static struct target_type multipath_target = {
 	.name = "multipath",
-	.version = {1, 1, 1},
+	.version = {1, 2, 0},
 	.module = THIS_MODULE,
 	.ctr = multipath_ctr,
 	.dtr = multipath_dtr,
@@ -1687,7 +1703,7 @@
 		return -EINVAL;
 	}
 
-	kmultipathd = create_workqueue("kmpathd");
+	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
 	if (!kmultipathd) {
 		DMERR("failed to create workqueue kmpathd");
 		dm_unregister_target(&multipath_target);
@@ -1701,7 +1717,8 @@
 	 * old workqueue would also create a bottleneck in the
 	 * path of the storage hardware device activation.
 	 */
-	kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
+	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
+						  WQ_MEM_RECLAIM);
 	if (!kmpath_handlerd) {
 		DMERR("failed to create workqueue kmpath_handlerd");
 		destroy_workqueue(kmultipathd);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
new file mode 100644
index 0000000..b9e1e15
--- /dev/null
+++ b/drivers/md/dm-raid.c
@@ -0,0 +1,697 @@
+/*
+ * Copyright (C) 2010-2011 Neil Brown
+ * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/slab.h>
+
+#include "md.h"
+#include "raid5.h"
+#include "dm.h"
+#include "bitmap.h"
+
+#define DM_MSG_PREFIX "raid"
+
+/*
+ * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then
+ * make it so the flag doesn't set anything.
+ */
+#ifndef MD_SYNC_STATE_FORCED
+#define MD_SYNC_STATE_FORCED 0
+#endif
+
+struct raid_dev {
+	/*
+	 * Two DM devices, one to hold metadata and one to hold the
+	 * actual data/parity.  The reason for this is to not confuse
+	 * ti->len and give more flexibility in altering size and
+	 * characteristics.
+	 *
+	 * While it is possible for this device to be associated
+	 * with a different physical device than the data_dev, it
+	 * is intended for it to be the same.
+	 *    |--------- Physical Device ---------|
+	 *    |- meta_dev -|------ data_dev ------|
+	 */
+	struct dm_dev *meta_dev;
+	struct dm_dev *data_dev;
+	struct mdk_rdev_s rdev;
+};
+
+/*
+ * Flags for rs->print_flags field.
+ */
+#define DMPF_DAEMON_SLEEP      0x1
+#define DMPF_MAX_WRITE_BEHIND  0x2
+#define DMPF_SYNC              0x4
+#define DMPF_NOSYNC            0x8
+#define DMPF_STRIPE_CACHE      0x10
+#define DMPF_MIN_RECOVERY_RATE 0x20
+#define DMPF_MAX_RECOVERY_RATE 0x40
+
+struct raid_set {
+	struct dm_target *ti;
+
+	uint64_t print_flags;
+
+	struct mddev_s md;
+	struct raid_type *raid_type;
+	struct dm_target_callbacks callbacks;
+
+	struct raid_dev dev[0];
+};
+
+/* Supported raid types and properties. */
+static struct raid_type {
+	const char *name;		/* RAID algorithm. */
+	const char *descr;		/* Descriptor text for logging. */
+	const unsigned parity_devs;	/* # of parity devices. */
+	const unsigned minimal_devs;	/* minimal # of devices in set. */
+	const unsigned level;		/* RAID level. */
+	const unsigned algorithm;	/* RAID algorithm. */
+} raid_types[] = {
+	{"raid4",    "RAID4 (dedicated parity disk)",	1, 2, 5, ALGORITHM_PARITY_0},
+	{"raid5_la", "RAID5 (left asymmetric)",		1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
+	{"raid5_ra", "RAID5 (right asymmetric)",	1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
+	{"raid5_ls", "RAID5 (left symmetric)",		1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
+	{"raid5_rs", "RAID5 (right symmetric)",		1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
+	{"raid6_zr", "RAID6 (zero restart)",		2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
+	{"raid6_nr", "RAID6 (N restart)",		2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
+	{"raid6_nc", "RAID6 (N continue)",		2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
+};
+
+static struct raid_type *get_raid_type(char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(raid_types); i++)
+		if (!strcmp(raid_types[i].name, name))
+			return &raid_types[i];
+
+	return NULL;
+}
+
+static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
+{
+	unsigned i;
+	struct raid_set *rs;
+	sector_t sectors_per_dev;
+
+	if (raid_devs <= raid_type->parity_devs) {
+		ti->error = "Insufficient number of devices";
+		return ERR_PTR(-EINVAL);
+	}
+
+	sectors_per_dev = ti->len;
+	if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
+		ti->error = "Target length not divisible by number of data devices";
+		return ERR_PTR(-EINVAL);
+	}
+
+	rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
+	if (!rs) {
+		ti->error = "Cannot allocate raid context";
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mddev_init(&rs->md);
+
+	rs->ti = ti;
+	rs->raid_type = raid_type;
+	rs->md.raid_disks = raid_devs;
+	rs->md.level = raid_type->level;
+	rs->md.new_level = rs->md.level;
+	rs->md.dev_sectors = sectors_per_dev;
+	rs->md.layout = raid_type->algorithm;
+	rs->md.new_layout = rs->md.layout;
+	rs->md.delta_disks = 0;
+	rs->md.recovery_cp = 0;
+
+	for (i = 0; i < raid_devs; i++)
+		md_rdev_init(&rs->dev[i].rdev);
+
+	/*
+	 * Remaining items to be initialized by further RAID params:
+	 *  rs->md.persistent
+	 *  rs->md.external
+	 *  rs->md.chunk_sectors
+	 *  rs->md.new_chunk_sectors
+	 */
+
+	return rs;
+}
+
+static void context_free(struct raid_set *rs)
+{
+	int i;
+
+	for (i = 0; i < rs->md.raid_disks; i++)
+		if (rs->dev[i].data_dev)
+			dm_put_device(rs->ti, rs->dev[i].data_dev);
+
+	kfree(rs);
+}
+
+/*
+ * For every device we have two words
+ *  <meta_dev>: meta device name or '-' if missing
+ *  <data_dev>: data device name or '-' if missing
+ *
+ * This code parses those words.
+ */
+static int dev_parms(struct raid_set *rs, char **argv)
+{
+	int i;
+	int rebuild = 0;
+	int metadata_available = 0;
+	int ret = 0;
+
+	for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
+		rs->dev[i].rdev.raid_disk = i;
+
+		rs->dev[i].meta_dev = NULL;
+		rs->dev[i].data_dev = NULL;
+
+		/*
+		 * There are no offsets, since there is a separate device
+		 * for data and metadata.
+		 */
+		rs->dev[i].rdev.data_offset = 0;
+		rs->dev[i].rdev.mddev = &rs->md;
+
+		if (strcmp(argv[0], "-")) {
+			rs->ti->error = "Metadata devices not supported";
+			return -EINVAL;
+		}
+
+		if (!strcmp(argv[1], "-")) {
+			if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
+			    (!rs->dev[i].rdev.recovery_offset)) {
+				rs->ti->error = "Drive designated for rebuild not specified";
+				return -EINVAL;
+			}
+
+			continue;
+		}
+
+		ret = dm_get_device(rs->ti, argv[1],
+				    dm_table_get_mode(rs->ti->table),
+				    &rs->dev[i].data_dev);
+		if (ret) {
+			rs->ti->error = "RAID device lookup failure";
+			return ret;
+		}
+
+		rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
+		list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
+		if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+			rebuild++;
+	}
+
+	if (metadata_available) {
+		rs->md.external = 0;
+		rs->md.persistent = 1;
+		rs->md.major_version = 2;
+	} else if (rebuild && !rs->md.recovery_cp) {
+		/*
+		 * Without metadata, we will not be able to tell if the array
+		 * is in-sync or not - we must assume it is not.  Therefore,
+		 * it is impossible to rebuild a drive.
+		 *
+		 * Even if there is metadata, the on-disk information may
+		 * indicate that the array is not in-sync and it will then
+		 * fail at that time.
+		 *
+		 * User could specify 'nosync' option if desperate.
+		 */
+		DMERR("Unable to rebuild drive while array is not in-sync");
+		rs->ti->error = "RAID device lookup failure";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Possible arguments are...
+ * RAID456:
+ *	<chunk_size> [optional_args]
+ *
+ * Optional args:
+ *    [[no]sync]			Force or prevent recovery of the entire array
+ *    [rebuild <idx>]			Rebuild the drive indicated by the index
+ *    [daemon_sleep <ms>]		Time between bitmap daemon work to clear bits
+ *    [min_recovery_rate <kB/sec/disk>]	Throttle RAID initialization
+ *    [max_recovery_rate <kB/sec/disk>]	Throttle RAID initialization
+ *    [max_write_behind <sectors>]	See '-write-behind=' (man mdadm)
+ *    [stripe_cache <sectors>]		Stripe cache size for higher RAIDs
+ */
+static int parse_raid_params(struct raid_set *rs, char **argv,
+			     unsigned num_raid_params)
+{
+	unsigned i, rebuild_cnt = 0;
+	unsigned long value;
+	char *key;
+
+	/*
+	 * First, parse the in-order required arguments
+	 */
+	if ((strict_strtoul(argv[0], 10, &value) < 0) ||
+	    !is_power_of_2(value) || (value < 8)) {
+		rs->ti->error = "Bad chunk size";
+		return -EINVAL;
+	}
+
+	rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
+	argv++;
+	num_raid_params--;
+
+	/*
+	 * Second, parse the unordered optional arguments
+	 */
+	for (i = 0; i < rs->md.raid_disks; i++)
+		set_bit(In_sync, &rs->dev[i].rdev.flags);
+
+	for (i = 0; i < num_raid_params; i++) {
+		if (!strcmp(argv[i], "nosync")) {
+			rs->md.recovery_cp = MaxSector;
+			rs->print_flags |= DMPF_NOSYNC;
+			rs->md.flags |= MD_SYNC_STATE_FORCED;
+			continue;
+		}
+		if (!strcmp(argv[i], "sync")) {
+			rs->md.recovery_cp = 0;
+			rs->print_flags |= DMPF_SYNC;
+			rs->md.flags |= MD_SYNC_STATE_FORCED;
+			continue;
+		}
+
+		/* The rest of the optional arguments come in key/value pairs */
+		if ((i + 1) >= num_raid_params) {
+			rs->ti->error = "Wrong number of raid parameters given";
+			return -EINVAL;
+		}
+
+		key = argv[i++];
+		if (strict_strtoul(argv[i], 10, &value) < 0) {
+			rs->ti->error = "Bad numerical argument given in raid params";
+			return -EINVAL;
+		}
+
+		if (!strcmp(key, "rebuild")) {
+			if (++rebuild_cnt > rs->raid_type->parity_devs) {
+				rs->ti->error = "Too many rebuild drives given";
+				return -EINVAL;
+			}
+			if (value > rs->md.raid_disks) {
+				rs->ti->error = "Invalid rebuild index given";
+				return -EINVAL;
+			}
+			clear_bit(In_sync, &rs->dev[value].rdev.flags);
+			rs->dev[value].rdev.recovery_offset = 0;
+		} else if (!strcmp(key, "max_write_behind")) {
+			rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
+
+			/*
+			 * In device-mapper, we specify things in sectors, but
+			 * MD records this value in kB
+			 */
+			value /= 2;
+			if (value > COUNTER_MAX) {
+				rs->ti->error = "Max write-behind limit out of range";
+				return -EINVAL;
+			}
+			rs->md.bitmap_info.max_write_behind = value;
+		} else if (!strcmp(key, "daemon_sleep")) {
+			rs->print_flags |= DMPF_DAEMON_SLEEP;
+			if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
+				rs->ti->error = "daemon sleep period out of range";
+				return -EINVAL;
+			}
+			rs->md.bitmap_info.daemon_sleep = value;
+		} else if (!strcmp(key, "stripe_cache")) {
+			rs->print_flags |= DMPF_STRIPE_CACHE;
+
+			/*
+			 * In device-mapper, we specify things in sectors, but
+			 * MD records this value in kB
+			 */
+			value /= 2;
+
+			if (rs->raid_type->level < 5) {
+				rs->ti->error = "Inappropriate argument: stripe_cache";
+				return -EINVAL;
+			}
+			if (raid5_set_cache_size(&rs->md, (int)value)) {
+				rs->ti->error = "Bad stripe_cache size";
+				return -EINVAL;
+			}
+		} else if (!strcmp(key, "min_recovery_rate")) {
+			rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
+			if (value > INT_MAX) {
+				rs->ti->error = "min_recovery_rate out of range";
+				return -EINVAL;
+			}
+			rs->md.sync_speed_min = (int)value;
+		} else if (!strcmp(key, "max_recovery_rate")) {
+			rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
+			if (value > INT_MAX) {
+				rs->ti->error = "max_recovery_rate out of range";
+				return -EINVAL;
+			}
+			rs->md.sync_speed_max = (int)value;
+		} else {
+			DMERR("Unable to parse RAID parameter: %s", key);
+			rs->ti->error = "Unable to parse RAID parameters";
+			return -EINVAL;
+		}
+	}
+
+	/* Assume there are no metadata devices until the drives are parsed */
+	rs->md.persistent = 0;
+	rs->md.external = 1;
+
+	return 0;
+}
+
+static void do_table_event(struct work_struct *ws)
+{
+	struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
+
+	dm_table_event(rs->ti->table);
+}
+
+static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
+{
+	struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
+
+	return md_raid5_congested(&rs->md, bits);
+}
+
+static void raid_unplug(struct dm_target_callbacks *cb)
+{
+	struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
+
+	md_raid5_unplug_device(rs->md.private);
+}
+
+/*
+ * Construct a RAID4/5/6 mapping:
+ * Args:
+ *	<raid_type> <#raid_params> <raid_params>		\
+ *	<#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
+ *
+ * ** metadata devices are not supported yet, use '-' instead **
+ *
+ * <raid_params> varies by <raid_type>.  See 'parse_raid_params' for
+ * details on possible <raid_params>.
+ */
+static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+	int ret;
+	struct raid_type *rt;
+	unsigned long num_raid_params, num_raid_devs;
+	struct raid_set *rs = NULL;
+
+	/* Must have at least <raid_type> <#raid_params> */
+	if (argc < 2) {
+		ti->error = "Too few arguments";
+		return -EINVAL;
+	}
+
+	/* raid type */
+	rt = get_raid_type(argv[0]);
+	if (!rt) {
+		ti->error = "Unrecognised raid_type";
+		return -EINVAL;
+	}
+	argc--;
+	argv++;
+
+	/* number of RAID parameters */
+	if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) {
+		ti->error = "Cannot understand number of RAID parameters";
+		return -EINVAL;
+	}
+	argc--;
+	argv++;
+
+	/* Skip over RAID params for now and find out # of devices */
+	if (num_raid_params + 1 > argc) {
+		ti->error = "Arguments do not agree with counts given";
+		return -EINVAL;
+	}
+
+	if ((strict_strtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
+	    (num_raid_devs >= INT_MAX)) {
+		ti->error = "Cannot understand number of raid devices";
+		return -EINVAL;
+	}
+
+	rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
+	if (IS_ERR(rs))
+		return PTR_ERR(rs);
+
+	ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
+	if (ret)
+		goto bad;
+
+	ret = -EINVAL;
+
+	argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
+	argv += num_raid_params + 1;
+
+	if (argc != (num_raid_devs * 2)) {
+		ti->error = "Supplied RAID devices does not match the count given";
+		goto bad;
+	}
+
+	ret = dev_parms(rs, argv);
+	if (ret)
+		goto bad;
+
+	INIT_WORK(&rs->md.event_work, do_table_event);
+	ti->split_io = rs->md.chunk_sectors;
+	ti->private = rs;
+
+	mutex_lock(&rs->md.reconfig_mutex);
+	ret = md_run(&rs->md);
+	rs->md.in_sync = 0; /* Assume already marked dirty */
+	mutex_unlock(&rs->md.reconfig_mutex);
+
+	if (ret) {
+		ti->error = "Fail to run raid array";
+		goto bad;
+	}
+
+	rs->callbacks.congested_fn = raid_is_congested;
+	rs->callbacks.unplug_fn = raid_unplug;
+	dm_table_add_target_callbacks(ti->table, &rs->callbacks);
+
+	return 0;
+
+bad:
+	context_free(rs);
+
+	return ret;
+}
+
+static void raid_dtr(struct dm_target *ti)
+{
+	struct raid_set *rs = ti->private;
+
+	list_del_init(&rs->callbacks.list);
+	md_stop(&rs->md);
+	context_free(rs);
+}
+
+static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
+{
+	struct raid_set *rs = ti->private;
+	mddev_t *mddev = &rs->md;
+
+	mddev->pers->make_request(mddev, bio);
+
+	return DM_MAPIO_SUBMITTED;
+}
+
+static int raid_status(struct dm_target *ti, status_type_t type,
+		       char *result, unsigned maxlen)
+{
+	struct raid_set *rs = ti->private;
+	unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
+	unsigned sz = 0;
+	int i;
+	sector_t sync;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
+
+		for (i = 0; i < rs->md.raid_disks; i++) {
+			if (test_bit(Faulty, &rs->dev[i].rdev.flags))
+				DMEMIT("D");
+			else if (test_bit(In_sync, &rs->dev[i].rdev.flags))
+				DMEMIT("A");
+			else
+				DMEMIT("a");
+		}
+
+		if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
+			sync = rs->md.curr_resync_completed;
+		else
+			sync = rs->md.recovery_cp;
+
+		if (sync > rs->md.resync_max_sectors)
+			sync = rs->md.resync_max_sectors;
+
+		DMEMIT(" %llu/%llu",
+		       (unsigned long long) sync,
+		       (unsigned long long) rs->md.resync_max_sectors);
+
+		break;
+	case STATUSTYPE_TABLE:
+		/* The string you would use to construct this array */
+		for (i = 0; i < rs->md.raid_disks; i++)
+			if (rs->dev[i].data_dev &&
+			    !test_bit(In_sync, &rs->dev[i].rdev.flags))
+				raid_param_cnt++; /* for rebuilds */
+
+		raid_param_cnt += (hweight64(rs->print_flags) * 2);
+		if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
+			raid_param_cnt--;
+
+		DMEMIT("%s %u %u", rs->raid_type->name,
+		       raid_param_cnt, rs->md.chunk_sectors);
+
+		if ((rs->print_flags & DMPF_SYNC) &&
+		    (rs->md.recovery_cp == MaxSector))
+			DMEMIT(" sync");
+		if (rs->print_flags & DMPF_NOSYNC)
+			DMEMIT(" nosync");
+
+		for (i = 0; i < rs->md.raid_disks; i++)
+			if (rs->dev[i].data_dev &&
+			    !test_bit(In_sync, &rs->dev[i].rdev.flags))
+				DMEMIT(" rebuild %u", i);
+
+		if (rs->print_flags & DMPF_DAEMON_SLEEP)
+			DMEMIT(" daemon_sleep %lu",
+			       rs->md.bitmap_info.daemon_sleep);
+
+		if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
+			DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
+
+		if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
+			DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
+
+		if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
+			DMEMIT(" max_write_behind %lu",
+			       rs->md.bitmap_info.max_write_behind);
+
+		if (rs->print_flags & DMPF_STRIPE_CACHE) {
+			raid5_conf_t *conf = rs->md.private;
+
+			/* convert from kiB to sectors */
+			DMEMIT(" stripe_cache %d",
+			       conf ? conf->max_nr_stripes * 2 : 0);
+		}
+
+		DMEMIT(" %d", rs->md.raid_disks);
+		for (i = 0; i < rs->md.raid_disks; i++) {
+			DMEMIT(" -"); /* metadata device */
+
+			if (rs->dev[i].data_dev)
+				DMEMIT(" %s", rs->dev[i].data_dev->name);
+			else
+				DMEMIT(" -");
+		}
+	}
+
+	return 0;
+}
+
+static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+{
+	struct raid_set *rs = ti->private;
+	unsigned i;
+	int ret = 0;
+
+	for (i = 0; !ret && i < rs->md.raid_disks; i++)
+		if (rs->dev[i].data_dev)
+			ret = fn(ti,
+				 rs->dev[i].data_dev,
+				 0, /* No offset on data devs */
+				 rs->md.dev_sectors,
+				 data);
+
+	return ret;
+}
+
+static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+	struct raid_set *rs = ti->private;
+	unsigned chunk_size = rs->md.chunk_sectors << 9;
+	raid5_conf_t *conf = rs->md.private;
+
+	blk_limits_io_min(limits, chunk_size);
+	blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
+}
+
+static void raid_presuspend(struct dm_target *ti)
+{
+	struct raid_set *rs = ti->private;
+
+	md_stop_writes(&rs->md);
+}
+
+static void raid_postsuspend(struct dm_target *ti)
+{
+	struct raid_set *rs = ti->private;
+
+	mddev_suspend(&rs->md);
+}
+
+static void raid_resume(struct dm_target *ti)
+{
+	struct raid_set *rs = ti->private;
+
+	mddev_resume(&rs->md);
+}
+
+static struct target_type raid_target = {
+	.name = "raid",
+	.version = {1, 0, 0},
+	.module = THIS_MODULE,
+	.ctr = raid_ctr,
+	.dtr = raid_dtr,
+	.map = raid_map,
+	.status = raid_status,
+	.iterate_devices = raid_iterate_devices,
+	.io_hints = raid_io_hints,
+	.presuspend = raid_presuspend,
+	.postsuspend = raid_postsuspend,
+	.resume = raid_resume,
+};
+
+static int __init dm_raid_init(void)
+{
+	return dm_register_target(&raid_target);
+}
+
+static void __exit dm_raid_exit(void)
+{
+	dm_unregister_target(&raid_target);
+}
+
+module_init(dm_raid_init);
+module_exit(dm_raid_exit);
+
+MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
+MODULE_ALIAS("dm-raid4");
+MODULE_ALIAS("dm-raid5");
+MODULE_ALIAS("dm-raid6");
+MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 19a59b0..dee3267 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -261,7 +261,7 @@
 	struct dm_io_request io_req = {
 		.bi_rw = WRITE_FLUSH,
 		.mem.type = DM_IO_KMEM,
-		.mem.ptr.bvec = NULL,
+		.mem.ptr.addr = NULL,
 		.client = ms->io_client,
 	};
 
@@ -637,6 +637,12 @@
 		.client = ms->io_client,
 	};
 
+	if (bio->bi_rw & REQ_DISCARD) {
+		io_req.bi_rw |= REQ_DISCARD;
+		io_req.mem.type = DM_IO_KMEM;
+		io_req.mem.ptr.addr = NULL;
+	}
+
 	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
 		map_region(dest++, m, bio);
 
@@ -670,7 +676,8 @@
 	bio_list_init(&requeue);
 
 	while ((bio = bio_list_pop(writes))) {
-		if (bio->bi_rw & REQ_FLUSH) {
+		if ((bio->bi_rw & REQ_FLUSH) ||
+		    (bio->bi_rw & REQ_DISCARD)) {
 			bio_list_add(&sync, bio);
 			continue;
 		}
@@ -1076,8 +1083,10 @@
 	ti->private = ms;
 	ti->split_io = dm_rh_get_region_size(ms->rh);
 	ti->num_flush_requests = 1;
+	ti->num_discard_requests = 1;
 
-	ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
+	ms->kmirrord_wq = alloc_workqueue("kmirrord",
+					  WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
 	if (!ms->kmirrord_wq) {
 		DMERR("couldn't start kmirrord");
 		r = -ENOMEM;
@@ -1130,7 +1139,7 @@
 
 	del_timer_sync(&ms->timer);
 	flush_workqueue(ms->kmirrord_wq);
-	flush_scheduled_work();
+	flush_work_sync(&ms->trigger_event);
 	dm_kcopyd_client_destroy(ms->kcopyd_client);
 	destroy_workqueue(ms->kmirrord_wq);
 	free_context(ms, ti, ms->nr_mirrors);
@@ -1406,7 +1415,7 @@
 
 static struct target_type mirror_target = {
 	.name	 = "mirror",
-	.version = {1, 12, 0},
+	.version = {1, 12, 1},
 	.module	 = THIS_MODULE,
 	.ctr	 = mirror_ctr,
 	.dtr	 = mirror_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 2129cdb..95891df 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@
 	 */
 	INIT_WORK_ONSTACK(&req.work, do_metadata);
 	queue_work(ps->metadata_wq, &req.work);
-	flush_workqueue(ps->metadata_wq);
+	flush_work(&req.work);
 
 	return req.result;
 }
@@ -818,7 +818,7 @@
 	atomic_set(&ps->pending_count, 0);
 	ps->callbacks = NULL;
 
-	ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
+	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
 	if (!ps->metadata_wq) {
 		kfree(ps);
 		DMERR("couldn't start header metadata update thread");
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 53cf79d..fdde53c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -19,7 +19,6 @@
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
 #include <linux/dm-kcopyd.h>
-#include <linux/workqueue.h>
 
 #include "dm-exception-store.h"
 
@@ -80,9 +79,6 @@
 	/* Origin writes don't trigger exceptions until this is set */
 	int active;
 
-	/* Whether or not owning mapped_device is suspended */
-	int suspended;
-
 	atomic_t pending_exceptions_count;
 
 	mempool_t *pending_pool;
@@ -106,10 +102,6 @@
 
 	struct dm_kcopyd_client *kcopyd_client;
 
-	/* Queue of snapshot writes for ksnapd to flush */
-	struct bio_list queued_bios;
-	struct work_struct queued_bios_work;
-
 	/* Wait for events based on state_bits */
 	unsigned long state_bits;
 
@@ -160,9 +152,6 @@
 }
 EXPORT_SYMBOL(dm_snap_cow);
 
-static struct workqueue_struct *ksnapd;
-static void flush_queued_bios(struct work_struct *work);
-
 static sector_t chunk_to_sector(struct dm_exception_store *store,
 				chunk_t chunk)
 {
@@ -1110,7 +1099,6 @@
 	s->ti = ti;
 	s->valid = 1;
 	s->active = 0;
-	s->suspended = 0;
 	atomic_set(&s->pending_exceptions_count, 0);
 	init_rwsem(&s->lock);
 	INIT_LIST_HEAD(&s->list);
@@ -1153,9 +1141,6 @@
 
 	spin_lock_init(&s->tracked_chunk_lock);
 
-	bio_list_init(&s->queued_bios);
-	INIT_WORK(&s->queued_bios_work, flush_queued_bios);
-
 	ti->private = s;
 	ti->num_flush_requests = num_flush_requests;
 
@@ -1279,8 +1264,6 @@
 	struct dm_snapshot *s = ti->private;
 	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 
-	flush_workqueue(ksnapd);
-
 	down_read(&_origins_lock);
 	/* Check whether exception handover must be cancelled */
 	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
@@ -1342,20 +1325,6 @@
 	}
 }
 
-static void flush_queued_bios(struct work_struct *work)
-{
-	struct dm_snapshot *s =
-		container_of(work, struct dm_snapshot, queued_bios_work);
-	struct bio *queued_bios;
-	unsigned long flags;
-
-	spin_lock_irqsave(&s->pe_lock, flags);
-	queued_bios = bio_list_get(&s->queued_bios);
-	spin_unlock_irqrestore(&s->pe_lock, flags);
-
-	flush_bios(queued_bios);
-}
-
 static int do_origin(struct dm_dev *origin, struct bio *bio);
 
 /*
@@ -1760,15 +1729,6 @@
 	stop_merge(s);
 }
 
-static void snapshot_postsuspend(struct dm_target *ti)
-{
-	struct dm_snapshot *s = ti->private;
-
-	down_write(&s->lock);
-	s->suspended = 1;
-	up_write(&s->lock);
-}
-
 static int snapshot_preresume(struct dm_target *ti)
 {
 	int r = 0;
@@ -1783,7 +1743,7 @@
 			DMERR("Unable to resume snapshot source until "
 			      "handover completes.");
 			r = -EINVAL;
-		} else if (!snap_src->suspended) {
+		} else if (!dm_suspended(snap_src->ti)) {
 			DMERR("Unable to perform snapshot handover until "
 			      "source is suspended.");
 			r = -EINVAL;
@@ -1816,7 +1776,6 @@
 
 	down_write(&s->lock);
 	s->active = 1;
-	s->suspended = 0;
 	up_write(&s->lock);
 }
 
@@ -2194,7 +2153,7 @@
 
 static struct target_type origin_target = {
 	.name    = "snapshot-origin",
-	.version = {1, 7, 0},
+	.version = {1, 7, 1},
 	.module  = THIS_MODULE,
 	.ctr     = origin_ctr,
 	.dtr     = origin_dtr,
@@ -2207,13 +2166,12 @@
 
 static struct target_type snapshot_target = {
 	.name    = "snapshot",
-	.version = {1, 9, 0},
+	.version = {1, 10, 0},
 	.module  = THIS_MODULE,
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
 	.map     = snapshot_map,
 	.end_io  = snapshot_end_io,
-	.postsuspend = snapshot_postsuspend,
 	.preresume  = snapshot_preresume,
 	.resume  = snapshot_resume,
 	.status  = snapshot_status,
@@ -2222,14 +2180,13 @@
 
 static struct target_type merge_target = {
 	.name    = dm_snapshot_merge_target_name,
-	.version = {1, 0, 0},
+	.version = {1, 1, 0},
 	.module  = THIS_MODULE,
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
 	.map     = snapshot_merge_map,
 	.end_io  = snapshot_end_io,
 	.presuspend = snapshot_merge_presuspend,
-	.postsuspend = snapshot_postsuspend,
 	.preresume  = snapshot_preresume,
 	.resume  = snapshot_merge_resume,
 	.status  = snapshot_status,
@@ -2291,17 +2248,8 @@
 		goto bad_tracked_chunk_cache;
 	}
 
-	ksnapd = create_singlethread_workqueue("ksnapd");
-	if (!ksnapd) {
-		DMERR("Failed to create ksnapd workqueue.");
-		r = -ENOMEM;
-		goto bad_pending_pool;
-	}
-
 	return 0;
 
-bad_pending_pool:
-	kmem_cache_destroy(tracked_chunk_cache);
 bad_tracked_chunk_cache:
 	kmem_cache_destroy(pending_cache);
 bad_pending_cache:
@@ -2322,8 +2270,6 @@
 
 static void __exit dm_snapshot_exit(void)
 {
-	destroy_workqueue(ksnapd);
-
 	dm_unregister_target(&snapshot_target);
 	dm_unregister_target(&origin_target);
 	dm_unregister_target(&merge_target);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index f0371b4..dddfa14 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -39,23 +39,20 @@
 	struct dm_target *ti;
 
 	/* Work struct used for triggering events*/
-	struct work_struct kstriped_ws;
+	struct work_struct trigger_event;
 
 	struct stripe stripe[0];
 };
 
-static struct workqueue_struct *kstriped;
-
 /*
  * An event is triggered whenever a drive
  * drops out of a stripe volume.
  */
 static void trigger_event(struct work_struct *work)
 {
-	struct stripe_c *sc = container_of(work, struct stripe_c, kstriped_ws);
-
+	struct stripe_c *sc = container_of(work, struct stripe_c,
+					   trigger_event);
 	dm_table_event(sc->ti->table);
-
 }
 
 static inline struct stripe_c *alloc_context(unsigned int stripes)
@@ -160,7 +157,7 @@
 		return -ENOMEM;
 	}
 
-	INIT_WORK(&sc->kstriped_ws, trigger_event);
+	INIT_WORK(&sc->trigger_event, trigger_event);
 
 	/* Set pointer to dm target; used in trigger_event */
 	sc->ti = ti;
@@ -211,7 +208,7 @@
 	for (i = 0; i < sc->stripes; i++)
 		dm_put_device(ti, sc->stripe[i].dev);
 
-	flush_workqueue(kstriped);
+	flush_work_sync(&sc->trigger_event);
 	kfree(sc);
 }
 
@@ -367,7 +364,7 @@
 			atomic_inc(&(sc->stripe[i].error_count));
 			if (atomic_read(&(sc->stripe[i].error_count)) <
 			    DM_IO_ERROR_THRESHOLD)
-				queue_work(kstriped, &sc->kstriped_ws);
+				schedule_work(&sc->trigger_event);
 		}
 
 	return error;
@@ -401,7 +398,7 @@
 
 static struct target_type stripe_target = {
 	.name   = "striped",
-	.version = {1, 3, 0},
+	.version = {1, 3, 1},
 	.module = THIS_MODULE,
 	.ctr    = stripe_ctr,
 	.dtr    = stripe_dtr,
@@ -422,20 +419,10 @@
 		return r;
 	}
 
-	kstriped = create_singlethread_workqueue("kstriped");
-	if (!kstriped) {
-		DMERR("failed to create workqueue kstriped");
-		dm_unregister_target(&stripe_target);
-		return -ENOMEM;
-	}
-
 	return r;
 }
 
 void dm_stripe_exit(void)
 {
 	dm_unregister_target(&stripe_target);
-	destroy_workqueue(kstriped);
-
-	return;
 }
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4d705ce..38e4eb1 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -71,6 +71,8 @@
 	void *event_context;
 
 	struct dm_md_mempools *mempools;
+
+	struct list_head target_callbacks;
 };
 
 /*
@@ -204,6 +206,7 @@
 		return -ENOMEM;
 
 	INIT_LIST_HEAD(&t->devices);
+	INIT_LIST_HEAD(&t->target_callbacks);
 	atomic_set(&t->holders, 0);
 	t->discards_supported = 1;
 
@@ -325,15 +328,18 @@
 
 	BUG_ON(d->dm_dev.bdev);
 
-	bdev = open_by_devnum(dev, d->dm_dev.mode);
+	bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
 	if (IS_ERR(bdev))
 		return PTR_ERR(bdev);
-	r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
-	if (r)
-		blkdev_put(bdev, d->dm_dev.mode);
-	else
-		d->dm_dev.bdev = bdev;
-	return r;
+
+	r = bd_link_disk_holder(bdev, dm_disk(md));
+	if (r) {
+		blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
+		return r;
+	}
+
+	d->dm_dev.bdev = bdev;
+	return 0;
 }
 
 /*
@@ -344,8 +350,8 @@
 	if (!d->dm_dev.bdev)
 		return;
 
-	bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
-	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
+	bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
+	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
 	d->dm_dev.bdev = NULL;
 }
 
@@ -1223,10 +1229,17 @@
 	return 0;
 }
 
+void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
+{
+	list_add(&cb->list, &t->target_callbacks);
+}
+EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
+
 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
 {
 	struct dm_dev_internal *dd;
 	struct list_head *devices = dm_table_get_devices(t);
+	struct dm_target_callbacks *cb;
 	int r = 0;
 
 	list_for_each_entry(dd, devices, list) {
@@ -1241,6 +1254,10 @@
 				     bdevname(dd->dm_dev.bdev, b));
 	}
 
+	list_for_each_entry(cb, &t->target_callbacks, list)
+		if (cb->congested_fn)
+			r |= cb->congested_fn(cb, bdi_bits);
+
 	return r;
 }
 
@@ -1262,6 +1279,7 @@
 {
 	struct dm_dev_internal *dd;
 	struct list_head *devices = dm_table_get_devices(t);
+	struct dm_target_callbacks *cb;
 
 	list_for_each_entry(dd, devices, list) {
 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
@@ -1274,6 +1292,10 @@
 				     dm_device_name(t->md),
 				     bdevname(dd->dm_dev.bdev, b));
 	}
+
+	list_for_each_entry(cb, &t->target_callbacks, list)
+		if (cb->unplug_fn)
+			cb->unplug_fn(cb);
 }
 
 struct mapped_device *dm_table_get_md(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7cb1352..eaa3af0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -32,7 +32,6 @@
 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
 #define DM_COOKIE_LENGTH 24
 
-static DEFINE_MUTEX(dm_mutex);
 static const char *_name = DM_NAME;
 
 static unsigned int major = 0;
@@ -328,7 +327,6 @@
 {
 	struct mapped_device *md;
 
-	mutex_lock(&dm_mutex);
 	spin_lock(&_minor_lock);
 
 	md = bdev->bd_disk->private_data;
@@ -346,7 +344,6 @@
 
 out:
 	spin_unlock(&_minor_lock);
-	mutex_unlock(&dm_mutex);
 
 	return md ? 0 : -ENXIO;
 }
@@ -355,10 +352,12 @@
 {
 	struct mapped_device *md = disk->private_data;
 
-	mutex_lock(&dm_mutex);
+	spin_lock(&_minor_lock);
+
 	atomic_dec(&md->open_count);
 	dm_put(md);
-	mutex_unlock(&dm_mutex);
+
+	spin_unlock(&_minor_lock);
 
 	return 0;
 }
@@ -630,7 +629,7 @@
 			queue_io(md, bio);
 		} else {
 			/* done with normal IO or empty flush */
-			trace_block_bio_complete(md->queue, bio);
+			trace_block_bio_complete(md->queue, bio, io_error);
 			bio_endio(bio, io_error);
 		}
 	}
@@ -990,8 +989,8 @@
 	if (r == DM_MAPIO_REMAPPED) {
 		/* the bio has been remapped so dispatch it */
 
-		trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
-				    tio->io->bio->bi_bdev->bd_dev, sector);
+		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
+				      tio->io->bio->bi_bdev->bd_dev, sector);
 
 		generic_make_request(clone);
 	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
@@ -1638,13 +1637,15 @@
 		if (map_request(ti, clone, md))
 			goto requeued;
 
-		spin_lock_irq(q->queue_lock);
+		BUG_ON(!irqs_disabled());
+		spin_lock(q->queue_lock);
 	}
 
 	goto out;
 
 requeued:
-	spin_lock_irq(q->queue_lock);
+	BUG_ON(!irqs_disabled());
+	spin_lock(q->queue_lock);
 
 plug_and_out:
 	if (!elv_queue_empty(q))
@@ -1884,7 +1885,8 @@
 	add_disk(md->disk);
 	format_dev_t(md->name, MKDEV(_major, minor));
 
-	md->wq = create_singlethread_workqueue("kdmflush");
+	md->wq = alloc_workqueue("kdmflush",
+				 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
 	if (!md->wq)
 		goto bad_thread;
 
@@ -1992,13 +1994,14 @@
 	wake_up(&md->eventq);
 }
 
+/*
+ * Protected by md->suspend_lock obtained by dm_swap_table().
+ */
 static void __set_size(struct mapped_device *md, sector_t size)
 {
 	set_capacity(md->disk, size);
 
-	mutex_lock(&md->bdev->bd_inode->i_mutex);
 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
-	mutex_unlock(&md->bdev->bd_inode->i_mutex);
 }
 
 /*
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767..0ed7f6b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@
 
 	if (md_check_no_bitmap(mddev))
 		return -EINVAL;
-	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 	conf = linear_conf(mddev, mddev->raid_disks);
 
 	if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 175c424f..818313e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -287,11 +287,14 @@
 	mddev_t *mddev = q->queuedata;
 	int rv;
 	int cpu;
+	unsigned int sectors;
 
-	if (mddev == NULL || mddev->pers == NULL) {
+	if (mddev == NULL || mddev->pers == NULL
+	    || !mddev->ready) {
 		bio_io_error(bio);
 		return 0;
 	}
+	smp_rmb(); /* Ensure implications of  'active' are visible */
 	rcu_read_lock();
 	if (mddev->suspended) {
 		DEFINE_WAIT(__wait);
@@ -309,12 +312,16 @@
 	atomic_inc(&mddev->active_io);
 	rcu_read_unlock();
 
+	/*
+	 * save the sectors now since our bio can
+	 * go away inside make_request
+	 */
+	sectors = bio_sectors(bio);
 	rv = mddev->pers->make_request(mddev, bio);
 
 	cpu = part_stat_lock();
 	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
-	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
-		      bio_sectors(bio));
+	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
 	part_stat_unlock();
 
 	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -546,6 +553,9 @@
 {
 	mddev_t *mddev, *new = NULL;
 
+	if (unit && MAJOR(unit) != MD_MAJOR)
+		unit &= ~((1<<MdpMinorShift)-1);
+
  retry:
 	spin_lock(&all_mddevs_lock);
 
@@ -703,9 +713,9 @@
 }
 
 /* return the offset of the super block in 512byte sectors */
-static inline sector_t calc_dev_sboffset(struct block_device *bdev)
+static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev)
 {
-	sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
+	sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
 	return MD_NEW_SIZE_SECTORS(num_sectors);
 }
 
@@ -763,7 +773,7 @@
 	 */
 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
 
-	bio->bi_bdev = rdev->bdev;
+	bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
 	bio->bi_sector = sector;
 	bio_add_page(bio, page, size, 0);
 	bio->bi_private = rdev;
@@ -793,7 +803,7 @@
 }
 
 int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
-		 struct page *page, int rw)
+		 struct page *page, int rw, bool metadata_op)
 {
 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
 	struct completion event;
@@ -801,8 +811,12 @@
 
 	rw |= REQ_SYNC | REQ_UNPLUG;
 
-	bio->bi_bdev = rdev->bdev;
-	bio->bi_sector = sector;
+	bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
+		rdev->meta_bdev : rdev->bdev;
+	if (metadata_op)
+		bio->bi_sector = sector + rdev->sb_start;
+	else
+		bio->bi_sector = sector + rdev->data_offset;
 	bio_add_page(bio, page, size, 0);
 	init_completion(&event);
 	bio->bi_private = &event;
@@ -827,7 +841,7 @@
 		return 0;
 
 
-	if (!sync_page_io(rdev, rdev->sb_start, size, rdev->sb_page, READ))
+	if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
 		goto fail;
 	rdev->sb_loaded = 1;
 	return 0;
@@ -989,7 +1003,7 @@
 	 *
 	 * It also happens to be a multiple of 4Kb.
 	 */
-	rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+	rdev->sb_start = calc_dev_sboffset(rdev);
 
 	ret = read_disk_sb(rdev, MD_SB_BYTES);
 	if (ret) return ret;
@@ -1330,7 +1344,7 @@
 		return 0; /* component must fit device */
 	if (rdev->mddev->bitmap_info.offset)
 		return 0; /* can't move bitmap */
-	rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+	rdev->sb_start = calc_dev_sboffset(rdev);
 	if (!num_sectors || num_sectors > rdev->sb_start)
 		num_sectors = rdev->sb_start;
 	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -1879,7 +1893,7 @@
 	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
 
 	list_add_rcu(&rdev->same_set, &mddev->disks);
-	bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
+	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
 
 	/* May as well allow recovery to be retried once */
 	mddev->recovery_disabled = 0;
@@ -1906,7 +1920,7 @@
 		MD_BUG();
 		return;
 	}
-	bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
+	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
 	list_del_rcu(&rdev->same_set);
 	printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
 	rdev->mddev = NULL;
@@ -1934,21 +1948,13 @@
 	struct block_device *bdev;
 	char b[BDEVNAME_SIZE];
 
-	bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+				 shared ? (mdk_rdev_t *)lock_rdev : rdev);
 	if (IS_ERR(bdev)) {
 		printk(KERN_ERR "md: could not open %s.\n",
 			__bdevname(dev, b));
 		return PTR_ERR(bdev);
 	}
-	err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
-	if (err) {
-		printk(KERN_ERR "md: could not bd_claim %s.\n",
-			bdevname(bdev, b));
-		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
-		return err;
-	}
-	if (!shared)
-		set_bit(AllReserved, &rdev->flags);
 	rdev->bdev = bdev;
 	return err;
 }
@@ -1959,8 +1965,7 @@
 	rdev->bdev = NULL;
 	if (!bdev)
 		MD_BUG();
-	bd_release(bdev);
-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 void md_autodetect_dev(dev_t dev);
@@ -2466,6 +2471,9 @@
 		if (rdev->raid_disk != -1)
 			return -EBUSY;
 
+		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
+			return -EBUSY;
+
 		if (rdev->mddev->pers->hot_add_disk == NULL)
 			return -EINVAL;
 
@@ -2473,6 +2481,10 @@
 			if (rdev2->raid_disk == slot)
 				return -EEXIST;
 
+		if (slot >= rdev->mddev->raid_disks &&
+		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
+			return -ENOSPC;
+
 		rdev->raid_disk = slot;
 		if (test_bit(In_sync, &rdev->flags))
 			rdev->saved_raid_disk = slot;
@@ -2490,7 +2502,8 @@
 			/* failure here is OK */;
 		/* don't wakeup anyone, leave that to userspace. */
 	} else {
-		if (slot >= rdev->mddev->raid_disks)
+		if (slot >= rdev->mddev->raid_disks &&
+		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
 			return -ENOSPC;
 		rdev->raid_disk = slot;
 		/* assume it is working */
@@ -2606,12 +2619,11 @@
 
 			mddev_lock(mddev);
 			list_for_each_entry(rdev2, &mddev->disks, same_set)
-				if (test_bit(AllReserved, &rdev2->flags) ||
-				    (rdev->bdev == rdev2->bdev &&
-				     rdev != rdev2 &&
-				     overlaps(rdev->data_offset, rdev->sectors,
-					      rdev2->data_offset,
-					      rdev2->sectors))) {
+				if (rdev->bdev == rdev2->bdev &&
+				    rdev != rdev2 &&
+				    overlaps(rdev->data_offset, rdev->sectors,
+					     rdev2->data_offset,
+					     rdev2->sectors)) {
 					overlap = 1;
 					break;
 				}
@@ -3115,7 +3127,7 @@
 		char nm[20];
 		if (rdev->raid_disk < 0)
 			continue;
-		if (rdev->new_raid_disk > mddev->raid_disks)
+		if (rdev->new_raid_disk >= mddev->raid_disks)
 			rdev->new_raid_disk = -1;
 		if (rdev->new_raid_disk == rdev->raid_disk)
 			continue;
@@ -3744,6 +3756,8 @@
 	return sprintf(page, "%s\n", type);
 }
 
+static void reap_sync_thread(mddev_t *mddev);
+
 static ssize_t
 action_store(mddev_t *mddev, const char *page, size_t len)
 {
@@ -3758,9 +3772,7 @@
 	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
 		if (mddev->sync_thread) {
 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-			md_unregister_thread(mddev->sync_thread);
-			mddev->sync_thread = NULL;
-			mddev->recovery = 0;
+			reap_sync_thread(mddev);
 		}
 	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
 		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -3912,7 +3924,7 @@
 static ssize_t
 sync_completed_show(mddev_t *mddev, char *page)
 {
-	unsigned long max_sectors, resync;
+	unsigned long long max_sectors, resync;
 
 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
 		return sprintf(page, "none\n");
@@ -3923,7 +3935,7 @@
 		max_sectors = mddev->dev_sectors;
 
 	resync = mddev->curr_resync_completed;
-	return sprintf(page, "%lu / %lu\n", resync, max_sectors);
+	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
 }
 
 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
@@ -4010,19 +4022,24 @@
 {
 	char *e;
 	unsigned long long new = simple_strtoull(buf, &e, 10);
+	unsigned long long old = mddev->suspend_lo;
 
 	if (mddev->pers == NULL || 
 	    mddev->pers->quiesce == NULL)
 		return -EINVAL;
 	if (buf == e || (*e && *e != '\n'))
 		return -EINVAL;
-	if (new >= mddev->suspend_hi ||
-	    (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
-		mddev->suspend_lo = new;
+
+	mddev->suspend_lo = new;
+	if (new >= old)
+		/* Shrinking suspended region */
 		mddev->pers->quiesce(mddev, 2);
-		return len;
-	} else
-		return -EINVAL;
+	else {
+		/* Expanding suspended region - need to wait */
+		mddev->pers->quiesce(mddev, 1);
+		mddev->pers->quiesce(mddev, 0);
+	}
+	return len;
 }
 static struct md_sysfs_entry md_suspend_lo =
 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
@@ -4039,20 +4056,24 @@
 {
 	char *e;
 	unsigned long long new = simple_strtoull(buf, &e, 10);
+	unsigned long long old = mddev->suspend_hi;
 
 	if (mddev->pers == NULL ||
 	    mddev->pers->quiesce == NULL)
 		return -EINVAL;
 	if (buf == e || (*e && *e != '\n'))
 		return -EINVAL;
-	if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
-	    (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
-		mddev->suspend_hi = new;
+
+	mddev->suspend_hi = new;
+	if (new <= old)
+		/* Shrinking suspended region */
+		mddev->pers->quiesce(mddev, 2);
+	else {
+		/* Expanding suspended region - need to wait */
 		mddev->pers->quiesce(mddev, 1);
 		mddev->pers->quiesce(mddev, 0);
-		return len;
-	} else
-		return -EINVAL;
+	}
+	return len;
 }
 static struct md_sysfs_entry md_suspend_hi =
 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
@@ -4120,10 +4141,10 @@
 	}
 
 	mddev->array_sectors = sectors;
-	set_capacity(mddev->gendisk, mddev->array_sectors);
-	if (mddev->pers)
+	if (mddev->pers) {
+		set_capacity(mddev->gendisk, mddev->array_sectors);
 		revalidate_disk(mddev->gendisk);
-
+	}
 	return len;
 }
 
@@ -4430,7 +4451,9 @@
 		 * We don't want the data to overlap the metadata,
 		 * Internal Bitmap issues have been handled elsewhere.
 		 */
-		if (rdev->data_offset < rdev->sb_start) {
+		if (rdev->meta_bdev) {
+			/* Nothing to check */;
+		} else if (rdev->data_offset < rdev->sb_start) {
 			if (mddev->dev_sectors &&
 			    rdev->data_offset + mddev->dev_sectors
 			    > rdev->sb_start) {
@@ -4564,7 +4587,8 @@
 	mddev->safemode_timer.data = (unsigned long) mddev;
 	mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
 	mddev->in_sync = 1;
-
+	smp_wmb();
+	mddev->ready = 1;
 	list_for_each_entry(rdev, &mddev->disks, same_set)
 		if (rdev->raid_disk >= 0) {
 			char nm[20];
@@ -4603,6 +4627,7 @@
 	}
 	set_capacity(mddev->gendisk, mddev->array_sectors);
 	revalidate_disk(mddev->gendisk);
+	mddev->changed = 1;
 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
 out:
 	return err;
@@ -4691,6 +4716,7 @@
 	mddev->sync_speed_min = mddev->sync_speed_max = 0;
 	mddev->recovery = 0;
 	mddev->in_sync = 0;
+	mddev->changed = 0;
 	mddev->degraded = 0;
 	mddev->safemode = 0;
 	mddev->bitmap_info.offset = 0;
@@ -4701,13 +4727,12 @@
 	mddev->plug = NULL;
 }
 
-void md_stop_writes(mddev_t *mddev)
+static void __md_stop_writes(mddev_t *mddev)
 {
 	if (mddev->sync_thread) {
 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-		md_unregister_thread(mddev->sync_thread);
-		mddev->sync_thread = NULL;
+		reap_sync_thread(mddev);
 	}
 
 	del_timer_sync(&mddev->safemode_timer);
@@ -4721,10 +4746,18 @@
 		md_update_sb(mddev, 1);
 	}
 }
+
+void md_stop_writes(mddev_t *mddev)
+{
+	mddev_lock(mddev);
+	__md_stop_writes(mddev);
+	mddev_unlock(mddev);
+}
 EXPORT_SYMBOL_GPL(md_stop_writes);
 
 void md_stop(mddev_t *mddev)
 {
+	mddev->ready = 0;
 	mddev->pers->stop(mddev);
 	if (mddev->pers->sync_request && mddev->to_remove == NULL)
 		mddev->to_remove = &md_redundancy_group;
@@ -4744,7 +4777,7 @@
 		goto out;
 	}
 	if (mddev->pers) {
-		md_stop_writes(mddev);
+		__md_stop_writes(mddev);
 
 		err  = -ENXIO;
 		if (mddev->ro==1)
@@ -4781,7 +4814,7 @@
 		if (mddev->ro)
 			set_disk_ro(disk, 0);
 
-		md_stop_writes(mddev);
+		__md_stop_writes(mddev);
 		md_stop(mddev);
 		mddev->queue->merge_bvec_fn = NULL;
 		mddev->queue->unplug_fn = NULL;
@@ -4799,6 +4832,7 @@
 
 		set_capacity(disk, 0);
 		mutex_unlock(&mddev->open_mutex);
+		mddev->changed = 1;
 		revalidate_disk(disk);
 
 		if (mddev->ro)
@@ -5159,9 +5193,10 @@
 		/* set saved_raid_disk if appropriate */
 		if (!mddev->persistent) {
 			if (info->state & (1<<MD_DISK_SYNC)  &&
-			    info->raid_disk < mddev->raid_disks)
+			    info->raid_disk < mddev->raid_disks) {
 				rdev->raid_disk = info->raid_disk;
-			else
+				set_bit(In_sync, &rdev->flags);
+			} else
 				rdev->raid_disk = -1;
 		} else
 			super_types[mddev->major_version].
@@ -5238,7 +5273,7 @@
 			printk(KERN_INFO "md: nonpersistent superblock ...\n");
 			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
 		} else
-			rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+			rdev->sb_start = calc_dev_sboffset(rdev);
 		rdev->sectors = rdev->sb_start;
 
 		err = bind_rdev_to_array(rdev, mddev);
@@ -5305,7 +5340,7 @@
 	}
 
 	if (mddev->persistent)
-		rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+		rdev->sb_start = calc_dev_sboffset(rdev);
 	else
 		rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
 
@@ -5518,7 +5553,6 @@
 	 * sb_start or, if that is <data_offset, it must fit before the size
 	 * of each device.  If num_sectors is zero, we find the largest size
 	 * that fits.
-
 	 */
 	if (mddev->sync_thread)
 		return -EBUSY;
@@ -5555,6 +5589,8 @@
 	mddev->delta_disks = raid_disks - mddev->raid_disks;
 
 	rv = mddev->pers->check_reshape(mddev);
+	if (rv < 0)
+		mddev->delta_disks = 0;
 	return rv;
 }
 
@@ -5981,7 +6017,7 @@
 	atomic_inc(&mddev->openers);
 	mutex_unlock(&mddev->open_mutex);
 
-	check_disk_size_change(mddev->gendisk, bdev);
+	check_disk_change(bdev);
  out:
 	return err;
 }
@@ -5996,6 +6032,21 @@
 
 	return 0;
 }
+
+static int md_media_changed(struct gendisk *disk)
+{
+	mddev_t *mddev = disk->private_data;
+
+	return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+	mddev_t *mddev = disk->private_data;
+
+	mddev->changed = 0;
+	return 0;
+}
 static const struct block_device_operations md_fops =
 {
 	.owner		= THIS_MODULE,
@@ -6006,6 +6057,8 @@
 	.compat_ioctl	= md_compat_ioctl,
 #endif
 	.getgeo		= md_getgeo,
+	.media_changed  = md_media_changed,
+	.revalidate_disk= md_revalidate,
 };
 
 static int md_thread(void * arg)
@@ -6041,7 +6094,8 @@
 			 || kthread_should_stop(),
 			 thread->timeout);
 
-		if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags))
+		clear_bit(THREAD_WAKEUP, &thread->flags);
+		if (!kthread_should_stop())
 			thread->run(thread->mddev);
 	}
 
@@ -6807,7 +6861,7 @@
 		       desc, mdname(mddev));
 		mddev->curr_resync = j;
 	}
-	mddev->curr_resync_completed = mddev->curr_resync;
+	mddev->curr_resync_completed = j;
 
 	while (j < max_sectors) {
 		sector_t sectors;
@@ -6825,8 +6879,7 @@
 			md_unplug(mddev);
 			wait_event(mddev->recovery_wait,
 				   atomic_read(&mddev->recovery_active) == 0);
-			mddev->curr_resync_completed =
-				mddev->curr_resync;
+			mddev->curr_resync_completed = j;
 			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
 			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 		}
@@ -6962,9 +7015,6 @@
 	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
 		mddev->resync_min = mddev->curr_resync_completed;
 	mddev->curr_resync = 0;
-	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
-		mddev->curr_resync_completed = 0;
-	sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 	wake_up(&resync_wait);
 	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
 	md_wakeup_thread(mddev->thread);
@@ -7005,7 +7055,7 @@
 			}
 		}
 
-	if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
+	if (mddev->degraded && !mddev->recovery_disabled) {
 		list_for_each_entry(rdev, &mddev->disks, same_set) {
 			if (rdev->raid_disk >= 0 &&
 			    !test_bit(In_sync, &rdev->flags) &&
@@ -7031,6 +7081,45 @@
 	}
 	return spares;
 }
+
+static void reap_sync_thread(mddev_t *mddev)
+{
+	mdk_rdev_t *rdev;
+
+	/* resync has finished, collect result */
+	md_unregister_thread(mddev->sync_thread);
+	mddev->sync_thread = NULL;
+	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+		/* success...*/
+		/* activate any spares */
+		if (mddev->pers->spare_active(mddev))
+			sysfs_notify(&mddev->kobj, NULL,
+				     "degraded");
+	}
+	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+	    mddev->pers->finish_reshape)
+		mddev->pers->finish_reshape(mddev);
+	md_update_sb(mddev, 1);
+
+	/* if array is no-longer degraded, then any saved_raid_disk
+	 * information must be scrapped
+	 */
+	if (!mddev->degraded)
+		list_for_each_entry(rdev, &mddev->disks, same_set)
+			rdev->saved_raid_disk = -1;
+
+	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+	/* flag recovery needed just to double check */
+	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+	sysfs_notify_dirent_safe(mddev->sysfs_action);
+	md_new_event(mddev);
+}
+
 /*
  * This routine is regularly called by all per-raid-array threads to
  * deal with generic issues like resync and super-block update.
@@ -7055,9 +7144,6 @@
  */
 void md_check_recovery(mddev_t *mddev)
 {
-	mdk_rdev_t *rdev;
-
-
 	if (mddev->bitmap)
 		bitmap_daemon_work(mddev);
 
@@ -7092,7 +7178,20 @@
 			/* Only thing we do on a ro array is remove
 			 * failed devices.
 			 */
-			remove_and_add_spares(mddev);
+			mdk_rdev_t *rdev;
+			list_for_each_entry(rdev, &mddev->disks, same_set)
+				if (rdev->raid_disk >= 0 &&
+				    !test_bit(Blocked, &rdev->flags) &&
+				    test_bit(Faulty, &rdev->flags) &&
+				    atomic_read(&rdev->nr_pending)==0) {
+					if (mddev->pers->hot_remove_disk(
+						    mddev, rdev->raid_disk)==0) {
+						char nm[20];
+						sprintf(nm,"rd%d", rdev->raid_disk);
+						sysfs_remove_link(&mddev->kobj, nm);
+						rdev->raid_disk = -1;
+					}
+				}
 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 			goto unlock;
 		}
@@ -7125,34 +7224,7 @@
 			goto unlock;
 		}
 		if (mddev->sync_thread) {
-			/* resync has finished, collect result */
-			md_unregister_thread(mddev->sync_thread);
-			mddev->sync_thread = NULL;
-			if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
-			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
-				/* success...*/
-				/* activate any spares */
-				if (mddev->pers->spare_active(mddev))
-					sysfs_notify(&mddev->kobj, NULL,
-						     "degraded");
-			}
-			if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-			    mddev->pers->finish_reshape)
-				mddev->pers->finish_reshape(mddev);
-			md_update_sb(mddev, 1);
-
-			/* if array is no-longer degraded, then any saved_raid_disk
-			 * information must be scrapped
-			 */
-			if (!mddev->degraded)
-				list_for_each_entry(rdev, &mddev->disks, same_set)
-					rdev->saved_raid_disk = -1;
-
-			mddev->recovery = 0;
-			/* flag recovery needed just to double check */
-			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-			sysfs_notify_dirent_safe(mddev->sysfs_action);
-			md_new_event(mddev);
+			reap_sync_thread(mddev);
 			goto unlock;
 		}
 		/* Set RUNNING before clearing NEEDED to avoid
@@ -7210,7 +7282,11 @@
 					" thread...\n", 
 					mdname(mddev));
 				/* leave the spares where they are, it shouldn't hurt */
-				mddev->recovery = 0;
+				clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+				clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+				clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+				clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+				clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
 			} else
 				md_wakeup_thread(mddev->sync_thread);
 			sysfs_notify_dirent_safe(mddev->sysfs_action);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d05bab5..12215d4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -60,6 +60,12 @@
 	mddev_t *mddev;			/* RAID array if running */
 	int last_events;		/* IO event timestamp */
 
+	/*
+	 * If meta_bdev is non-NULL, it means that a separate device is
+	 * being used to store the metadata (superblock/bitmap) which
+	 * would otherwise be contained on the same device as the data (bdev).
+	 */
+	struct block_device *meta_bdev;
 	struct block_device *bdev;	/* block device handle */
 
 	struct page	*sb_page;
@@ -87,8 +93,6 @@
 #define	Faulty		1		/* device is known to have a fault */
 #define	In_sync		2		/* device is in_sync with rest of array */
 #define	WriteMostly	4		/* Avoid reading if at all possible */
-#define	AllReserved	6		/* If whole device is reserved for
-					 * one array */
 #define	AutoDetected	7		/* added by auto-detect */
 #define Blocked		8		/* An error occured on an externally
 					 * managed array, don't allow writes
@@ -148,7 +152,8 @@
 						       * are happening, so run/
 						       * takeover/stop are not safe
 						       */
-
+	int				ready; /* See when safe to pass 
+						* IO requests down */
 	struct gendisk			*gendisk;
 
 	struct kobject			kobj;
@@ -269,6 +274,8 @@
 	atomic_t			active;		/* general refcount */
 	atomic_t			openers;	/* number of active opens */
 
+	int				changed;	/* True if we might need to
+							 * reread partition info */
 	int				degraded;	/* whether md should consider
 							 * adding a spare
 							 */
@@ -497,8 +504,8 @@
 extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
 			   sector_t sector, int size, struct page *page);
 extern void md_super_wait(mddev_t *mddev);
-extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
-			struct page *page, int rw);
+extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, 
+			struct page *page, int rw, bool metadata_op);
 extern void md_do_sync(mddev_t *mddev);
 extern void md_new_event(mddev_t *mddev);
 extern int md_allow_write(mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf3..3a62d44 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@
 	 * bookkeeping area. [whatever we allocate in multipath_run(),
 	 * should be freed in multipath_stop()]
 	 */
-	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
 	conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
 	mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a39f4c3..c0ac457 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -179,6 +179,14 @@
 			rdev1->new_raid_disk = j;
 		}
 
+		if (mddev->level == 1) {
+			/* taiking over a raid1 array-
+			 * we have only one active disk
+			 */
+			j = 0;
+			rdev1->new_raid_disk = j;
+		}
+
 		if (j < 0 || j >= mddev->raid_disks) {
 			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
 			       "aborting!\n", mdname(mddev), j);
@@ -353,7 +361,6 @@
 	if (md_check_no_bitmap(mddev))
 		return -EINVAL;
 	blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
-	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
 	/* if private is not null, we are here after takeover */
 	if (mddev->private == NULL) {
@@ -644,12 +651,39 @@
 	return priv_conf;
 }
 
+static void *raid0_takeover_raid1(mddev_t *mddev)
+{
+	raid0_conf_t *priv_conf;
+
+	/* Check layout:
+	 *  - (N - 1) mirror drives must be already faulty
+	 */
+	if ((mddev->raid_disks - 1) != mddev->degraded) {
+		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+		       mdname(mddev));
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Set new parameters */
+	mddev->new_level = 0;
+	mddev->new_layout = 0;
+	mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
+	mddev->delta_disks = 1 - mddev->raid_disks;
+	mddev->raid_disks = 1;
+	/* make sure it will be not marked as dirty */
+	mddev->recovery_cp = MaxSector;
+
+	create_strip_zones(mddev, &priv_conf);
+	return priv_conf;
+}
+
 static void *raid0_takeover(mddev_t *mddev)
 {
 	/* raid0 can take over:
 	 *  raid4 - if all data disks are active.
 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
 	 *  raid10 - assuming we have all necessary active disks
+	 *  raid1 - with (N -1) mirror drives faulty
 	 */
 	if (mddev->level == 4)
 		return raid0_takeover_raid45(mddev);
@@ -665,6 +699,12 @@
 	if (mddev->level == 10)
 		return raid0_takeover_raid10(mddev);
 
+	if (mddev->level == 1)
+		return raid0_takeover_raid1(mddev);
+
+	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+		mddev->level);
+
 	return ERR_PTR(-EINVAL);
 }
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 845cf95..06cd712 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@
 	if (conf->pending_bio_list.head) {
 		struct bio *bio;
 		bio = bio_list_get(&conf->pending_bio_list);
+		/* Only take the spinlock to quiet a warning */
+		spin_lock(conf->mddev->queue->queue_lock);
 		blk_remove_plug(conf->mddev->queue);
+		spin_unlock(conf->mddev->queue->queue_lock);
 		spin_unlock_irq(&conf->device_lock);
 		/* flush any pending bitmap writes to
 		 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@
 		atomic_inc(&r1_bio->remaining);
 		spin_lock_irqsave(&conf->device_lock, flags);
 		bio_list_add(&conf->pending_bio_list, mbio);
-		blk_plug_device(mddev->queue);
+		blk_plug_device_unlocked(mddev->queue);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 	r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -1027,8 +1030,9 @@
 	} else
 		set_bit(Faulty, &rdev->flags);
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
-	printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n"
-	       KERN_ALERT "md/raid1:%s: Operation continuing on %d devices.\n",
+	printk(KERN_ALERT
+	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
+	       "md/raid1:%s: Operation continuing on %d devices.\n",
 	       mdname(mddev), bdevname(rdev->bdev, b),
 	       mdname(mddev), conf->raid_disks - mddev->degraded);
 }
@@ -1364,10 +1368,10 @@
 					 */
 					rdev = conf->mirrors[d].rdev;
 					if (sync_page_io(rdev,
-							 sect + rdev->data_offset,
+							 sect,
 							 s<<9,
 							 bio->bi_io_vec[idx].bv_page,
-							 READ)) {
+							 READ, false)) {
 						success = 1;
 						break;
 					}
@@ -1390,10 +1394,10 @@
 					rdev = conf->mirrors[d].rdev;
 					atomic_add(s, &rdev->corrected_errors);
 					if (sync_page_io(rdev,
-							 sect + rdev->data_offset,
+							 sect,
 							 s<<9,
 							 bio->bi_io_vec[idx].bv_page,
-							 WRITE) == 0)
+							 WRITE, false) == 0)
 						md_error(mddev, rdev);
 				}
 				d = start;
@@ -1405,10 +1409,10 @@
 						continue;
 					rdev = conf->mirrors[d].rdev;
 					if (sync_page_io(rdev,
-							 sect + rdev->data_offset,
+							 sect,
 							 s<<9,
 							 bio->bi_io_vec[idx].bv_page,
-							 READ) == 0)
+							 READ, false) == 0)
 						md_error(mddev, rdev);
 				}
 			} else {
@@ -1488,10 +1492,8 @@
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
 			    test_bit(In_sync, &rdev->flags) &&
-			    sync_page_io(rdev,
-					 sect + rdev->data_offset,
-					 s<<9,
-					 conf->tmppage, READ))
+			    sync_page_io(rdev, sect, s<<9,
+					 conf->tmppage, READ, false))
 				success = 1;
 			else {
 				d++;
@@ -1514,9 +1516,8 @@
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
 			    test_bit(In_sync, &rdev->flags)) {
-				if (sync_page_io(rdev,
-						 sect + rdev->data_offset,
-						 s<<9, conf->tmppage, WRITE)
+				if (sync_page_io(rdev, sect, s<<9,
+						 conf->tmppage, WRITE, false)
 				    == 0)
 					/* Well, this device is dead */
 					md_error(mddev, rdev);
@@ -1531,9 +1532,8 @@
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
 			    test_bit(In_sync, &rdev->flags)) {
-				if (sync_page_io(rdev,
-						 sect + rdev->data_offset,
-						 s<<9, conf->tmppage, READ)
+				if (sync_page_io(rdev, sect, s<<9,
+						 conf->tmppage, READ, false)
 				    == 0)
 					/* Well, this device is dead */
 					md_error(mddev, rdev);
@@ -2024,7 +2024,6 @@
 	if (IS_ERR(conf))
 		return PTR_ERR(conf);
 
-	mddev->queue->queue_lock = &conf->device_lock;
 	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		disk_stack_limits(mddev->gendisk, rdev->bdev,
 				  rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0641674..747d061 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@
 	if (conf->pending_bio_list.head) {
 		struct bio *bio;
 		bio = bio_list_get(&conf->pending_bio_list);
+		/* Spinlock only taken to quiet a warning */
+		spin_lock(conf->mddev->queue->queue_lock);
 		blk_remove_plug(conf->mddev->queue);
+		spin_unlock(conf->mddev->queue->queue_lock);
 		spin_unlock_irq(&conf->device_lock);
 		/* flush any pending bitmap writes to disk
 		 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@
 		atomic_inc(&r10_bio->remaining);
 		spin_lock_irqsave(&conf->device_lock, flags);
 		bio_list_add(&conf->pending_bio_list, mbio);
-		blk_plug_device(mddev->queue);
+		blk_plug_device_unlocked(mddev->queue);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 
@@ -1051,8 +1054,9 @@
 	}
 	set_bit(Faulty, &rdev->flags);
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
-	printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n"
-	       KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n",
+	printk(KERN_ALERT
+	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
+	       "md/raid10:%s: Operation continuing on %d devices.\n",
 	       mdname(mddev), bdevname(rdev->bdev, b),
 	       mdname(mddev), conf->raid_disks - mddev->degraded);
 }
@@ -1559,9 +1563,9 @@
 				rcu_read_unlock();
 				success = sync_page_io(rdev,
 						       r10_bio->devs[sl].addr +
-						       sect + rdev->data_offset,
+						       sect,
 						       s<<9,
-						       conf->tmppage, READ);
+						       conf->tmppage, READ, false);
 				rdev_dec_pending(rdev, mddev);
 				rcu_read_lock();
 				if (success)
@@ -1598,8 +1602,8 @@
 				atomic_add(s, &rdev->corrected_errors);
 				if (sync_page_io(rdev,
 						 r10_bio->devs[sl].addr +
-						 sect + rdev->data_offset,
-						 s<<9, conf->tmppage, WRITE)
+						 sect,
+						 s<<9, conf->tmppage, WRITE, false)
 				    == 0) {
 					/* Well, this device is dead */
 					printk(KERN_NOTICE
@@ -1635,9 +1639,9 @@
 				rcu_read_unlock();
 				if (sync_page_io(rdev,
 						 r10_bio->devs[sl].addr +
-						 sect + rdev->data_offset,
+						 sect,
 						 s<<9, conf->tmppage,
-						 READ) == 0) {
+						 READ, false) == 0) {
 					/* Well, this device is dead */
 					printk(KERN_NOTICE
 					       "md/raid10:%s: unable to read back "
@@ -2303,8 +2307,6 @@
 	if (!conf)
 		goto out;
 
-	mddev->queue->queue_lock = &conf->device_lock;
-
 	mddev->thread = conf->thread;
 	conf->thread = NULL;
 
@@ -2462,11 +2464,13 @@
 	mddev->recovery_cp = MaxSector;
 
 	conf = setup_conf(mddev);
-	if (!IS_ERR(conf))
+	if (!IS_ERR(conf)) {
 		list_for_each_entry(rdev, &mddev->disks, same_set)
 			if (rdev->raid_disk >= 0)
 				rdev->new_raid_disk = rdev->raid_disk * 2;
-		
+		conf->barrier = 1;
+	}
+
 	return conf;
 }
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dc574f3..78536fd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1721,7 +1721,6 @@
 		set_bit(Faulty, &rdev->flags);
 		printk(KERN_ALERT
 		       "md/raid:%s: Disk failure on %s, disabling device.\n"
-		       KERN_ALERT
 		       "md/raid:%s: Operation continuing on %d devices.\n",
 		       mdname(mddev),
 		       bdevname(rdev->bdev, b),
@@ -4237,7 +4236,7 @@
 		wait_event(conf->wait_for_overlap,
 			   atomic_read(&conf->reshape_stripes)==0);
 		mddev->reshape_position = conf->reshape_progress;
-		mddev->curr_resync_completed = mddev->curr_resync;
+		mddev->curr_resync_completed = sector_nr;
 		conf->reshape_checkpoint = jiffies;
 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
 		md_wakeup_thread(mddev->thread);
@@ -4338,7 +4337,7 @@
 		wait_event(conf->wait_for_overlap,
 			   atomic_read(&conf->reshape_stripes) == 0);
 		mddev->reshape_position = conf->reshape_progress;
-		mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
+		mddev->curr_resync_completed = sector_nr;
 		conf->reshape_checkpoint = jiffies;
 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
 		md_wakeup_thread(mddev->thread);
@@ -5205,7 +5204,6 @@
 
 		mddev->queue->backing_dev_info.congested_data = mddev;
 		mddev->queue->backing_dev_info.congested_fn = raid5_congested;
-		mddev->queue->queue_lock = &conf->device_lock;
 		mddev->queue->unplug_fn = raid5_unplug_queue;
 
 		chunk_size = mddev->chunk_sectors << 9;
@@ -5339,7 +5337,7 @@
 		    && !test_bit(Faulty, &tmp->rdev->flags)
 		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
 			count++;
-			sysfs_notify_dirent(tmp->rdev->sysfs_state);
+			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
 		}
 	}
 	spin_lock_irqsave(&conf->device_lock, flags);
@@ -5518,7 +5516,6 @@
 	raid5_conf_t *conf = mddev->private;
 	mdk_rdev_t *rdev;
 	int spares = 0;
-	int added_devices = 0;
 	unsigned long flags;
 
 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -5528,8 +5525,8 @@
 		return -ENOSPC;
 
 	list_for_each_entry(rdev, &mddev->disks, same_set)
-		if (rdev->raid_disk < 0 &&
-		    !test_bit(Faulty, &rdev->flags))
+		if (!test_bit(In_sync, &rdev->flags)
+		    && !test_bit(Faulty, &rdev->flags))
 			spares++;
 
 	if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5572,29 +5569,35 @@
 	 * to correctly record the "partially reconstructed" state of
 	 * such devices during the reshape and confusion could result.
 	 */
-	if (mddev->delta_disks >= 0)
-	    list_for_each_entry(rdev, &mddev->disks, same_set)
-		if (rdev->raid_disk < 0 &&
-		    !test_bit(Faulty, &rdev->flags)) {
-			if (raid5_add_disk(mddev, rdev) == 0) {
-				char nm[20];
-				if (rdev->raid_disk >= conf->previous_raid_disks) {
-					set_bit(In_sync, &rdev->flags);
-					added_devices++;
-				} else
-					rdev->recovery_offset = 0;
-				sprintf(nm, "rd%d", rdev->raid_disk);
-				if (sysfs_create_link(&mddev->kobj,
-						      &rdev->kobj, nm))
-					/* Failure here is OK */;
-			} else
-				break;
-		}
+	if (mddev->delta_disks >= 0) {
+		int added_devices = 0;
+		list_for_each_entry(rdev, &mddev->disks, same_set)
+			if (rdev->raid_disk < 0 &&
+			    !test_bit(Faulty, &rdev->flags)) {
+				if (raid5_add_disk(mddev, rdev) == 0) {
+					char nm[20];
+					if (rdev->raid_disk
+					    >= conf->previous_raid_disks) {
+						set_bit(In_sync, &rdev->flags);
+						added_devices++;
+					} else
+						rdev->recovery_offset = 0;
+					sprintf(nm, "rd%d", rdev->raid_disk);
+					if (sysfs_create_link(&mddev->kobj,
+							      &rdev->kobj, nm))
+						/* Failure here is OK */;
+				}
+			} else if (rdev->raid_disk >= conf->previous_raid_disks
+				   && !test_bit(Faulty, &rdev->flags)) {
+				/* This is a spare that was manually added */
+				set_bit(In_sync, &rdev->flags);
+				added_devices++;
+			}
 
-	/* When a reshape changes the number of devices, ->degraded
-	 * is measured against the larger of the pre and post number of
-	 * devices.*/
-	if (mddev->delta_disks > 0) {
+		/* When a reshape changes the number of devices,
+		 * ->degraded is measured against the larger of the
+		 * pre and post number of devices.
+		 */
 		spin_lock_irqsave(&conf->device_lock, flags);
 		mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
 			- added_devices;
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 982f000..9f47e38 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -452,7 +452,7 @@
 	INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device));
 	dev->ext = ext;
 
-	mutex_init(&dev->lock);
+	mutex_init(&dev->v4l2_lock);
 	spin_lock_init(&dev->int_slock);
 	spin_lock_init(&dev->slock);
 
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index e3fedc6..1bd3dd7 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -15,18 +15,15 @@
 	}
 
 	/* is it free? */
-	mutex_lock(&dev->lock);
 	if (vv->resources & bit) {
 		DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit));
 		/* no, someone else uses it */
-		mutex_unlock(&dev->lock);
 		return 0;
 	}
 	/* it's free, grab it */
 	fh->resources  |= bit;
 	vv->resources |= bit;
 	DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources));
-	mutex_unlock(&dev->lock);
 	return 1;
 }
 
@@ -37,11 +34,9 @@
 
 	BUG_ON((fh->resources & bits) != bits);
 
-	mutex_lock(&dev->lock);
 	fh->resources  &= ~bits;
 	vv->resources &= ~bits;
 	DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources));
-	mutex_unlock(&dev->lock);
 }
 
 
@@ -396,7 +391,7 @@
 	.write		= fops_write,
 	.poll		= fops_poll,
 	.mmap		= fops_mmap,
-	.ioctl		= video_ioctl2,
+	.unlocked_ioctl	= video_ioctl2,
 };
 
 static void vv_callback(struct saa7146_dev *dev, unsigned long status)
@@ -505,6 +500,7 @@
 	vfd->fops = &video_fops;
 	vfd->ioctl_ops = &dev->ext_vv_data->ops;
 	vfd->release = video_device_release;
+	vfd->lock = &dev->v4l2_lock;
 	vfd->tvnorms = 0;
 	for (i = 0; i < dev->ext_vv_data->num_stds; i++)
 		vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index 2d4533a..afe8580 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -412,7 +412,7 @@
 			    V4L2_BUF_TYPE_VBI_CAPTURE,
 			    V4L2_FIELD_SEQ_TB, // FIXME: does this really work?
 			    sizeof(struct saa7146_buf),
-			    file, NULL);
+			    file, &dev->v4l2_lock);
 
 	init_timer(&fh->vbi_read_timeout);
 	fh->vbi_read_timeout.function = vbi_read_timeout;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 0ac5c61..9aafa4e 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -553,8 +553,6 @@
 		}
 	}
 
-	mutex_lock(&dev->lock);
-
 	/* ok, accept it */
 	vv->ov_fb = *fb;
 	vv->ov_fmt = fmt;
@@ -563,8 +561,6 @@
 		vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8;
 		DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline));
 	}
-
-	mutex_unlock(&dev->lock);
 	return 0;
 }
 
@@ -649,8 +645,6 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&dev->lock);
-
 	switch (ctrl->type) {
 	case V4L2_CTRL_TYPE_BOOLEAN:
 	case V4L2_CTRL_TYPE_MENU:
@@ -693,7 +687,6 @@
 		/* fixme: we can support changing VFLIP and HFLIP here... */
 		if (IS_CAPTURE_ACTIVE(fh) != 0) {
 			DEB_D(("V4L2_CID_HFLIP while active capture.\n"));
-			mutex_unlock(&dev->lock);
 			return -EBUSY;
 		}
 		vv->hflip = c->value;
@@ -701,16 +694,13 @@
 	case V4L2_CID_VFLIP:
 		if (IS_CAPTURE_ACTIVE(fh) != 0) {
 			DEB_D(("V4L2_CID_VFLIP while active capture.\n"));
-			mutex_unlock(&dev->lock);
 			return -EBUSY;
 		}
 		vv->vflip = c->value;
 		break;
 	default:
-		mutex_unlock(&dev->lock);
 		return -EINVAL;
 	}
-	mutex_unlock(&dev->lock);
 
 	if (IS_OVERLAY_ACTIVE(fh) != 0) {
 		saa7146_stop_preview(fh);
@@ -902,22 +892,18 @@
 	err = vidioc_try_fmt_vid_overlay(file, fh, f);
 	if (0 != err)
 		return err;
-	mutex_lock(&dev->lock);
 	fh->ov.win    = f->fmt.win;
 	fh->ov.nclips = f->fmt.win.clipcount;
 	if (fh->ov.nclips > 16)
 		fh->ov.nclips = 16;
 	if (copy_from_user(fh->ov.clips, f->fmt.win.clips,
 				sizeof(struct v4l2_clip) * fh->ov.nclips)) {
-		mutex_unlock(&dev->lock);
 		return -EFAULT;
 	}
 
 	/* fh->ov.fh is used to indicate that we have valid overlay informations, too */
 	fh->ov.fh = fh;
 
-	mutex_unlock(&dev->lock);
-
 	/* check if our current overlay is active */
 	if (IS_OVERLAY_ACTIVE(fh) != 0) {
 		saa7146_stop_preview(fh);
@@ -976,8 +962,6 @@
 		}
 	}
 
-	mutex_lock(&dev->lock);
-
 	for (i = 0; i < dev->ext_vv_data->num_stds; i++)
 		if (*id & dev->ext_vv_data->stds[i].id)
 			break;
@@ -988,8 +972,6 @@
 		found = 1;
 	}
 
-	mutex_unlock(&dev->lock);
-
 	if (vv->ov_suspend != NULL) {
 		saa7146_start_preview(vv->ov_suspend);
 		vv->ov_suspend = NULL;
@@ -1354,7 +1336,7 @@
 			    V4L2_BUF_TYPE_VIDEO_CAPTURE,
 			    V4L2_FIELD_INTERLACED,
 			    sizeof(struct saa7146_buf),
-			    file, NULL);
+			    file, &dev->v4l2_lock);
 
 	return 0;
 }
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 78b0895..6fc79f1 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -34,7 +34,7 @@
 config MEDIA_TUNER_CUSTOMISE
 	bool "Customize analog and hybrid tuner modules to build"
 	depends on MEDIA_TUNER
-	default y if EMBEDDED
+	default y if EXPERT
 	help
 	  This allows the user to deselect tuner drivers unnecessary
 	  for their hardware from the build. Use this option with care
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index c9062ce..8c48521 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -95,8 +95,7 @@
 		msleep(20);
 	} else {
 		msg = disable;
-		tuner_i2c_xfer_send(&priv->i2c_props, msg, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &msg[1], 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props, msg, 1, &msg[1], 1);
 
 		buf[2] = msg[1];
 		buf[2] &= ~0x04;
@@ -233,19 +232,22 @@
 		tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2);
 	}
 
+
 	tda8290_i2c_bridge(fe, 1);
 
 	if (fe->ops.tuner_ops.set_analog_params)
 		fe->ops.tuner_ops.set_analog_params(fe, params);
 
 	for (i = 0; i < 3; i++) {
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_pll_stat, 1, &pll_stat, 1);
 		if (pll_stat & 0x80) {
-			tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1);
-			tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1);
-			tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
-			tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
+			tuner_i2c_xfer_send_recv(&priv->i2c_props,
+						 &addr_adc_sat, 1,
+						 &adc_sat, 1);
+			tuner_i2c_xfer_send_recv(&priv->i2c_props,
+						 &addr_agc_stat, 1,
+						 &agc_stat, 1);
 			tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat);
 			break;
 		} else {
@@ -259,20 +261,22 @@
 			   agc_stat, adc_sat, pll_stat & 0x80);
 		tuner_i2c_xfer_send(&priv->i2c_props, gainset_2, 2);
 		msleep(100);
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_agc_stat, 1, &agc_stat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_pll_stat, 1, &pll_stat, 1);
 		if ((agc_stat > 115) || !(pll_stat & 0x80)) {
 			tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n",
 				   agc_stat, pll_stat & 0x80);
 			if (priv->cfg.agcf)
 				priv->cfg.agcf(fe);
 			msleep(100);
-			tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
-			tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
-			tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
-			tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+			tuner_i2c_xfer_send_recv(&priv->i2c_props,
+						 &addr_agc_stat, 1,
+						 &agc_stat, 1);
+			tuner_i2c_xfer_send_recv(&priv->i2c_props,
+						 &addr_pll_stat, 1,
+						 &pll_stat, 1);
 			if((agc_stat > 115) || !(pll_stat & 0x80)) {
 				tuner_dbg("adjust gain, step 3. Agc: %d\n", agc_stat);
 				tuner_i2c_xfer_send(&priv->i2c_props, adc_head_12, 2);
@@ -284,10 +288,12 @@
 
 	/* l/ l' deadlock? */
 	if(priv->tda8290_easy_mode & 0x60) {
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1);
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_adc_sat, 1,
+					 &adc_sat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_pll_stat, 1,
+					 &pll_stat, 1);
 		if ((adc_sat > 20) || !(pll_stat & 0x80)) {
 			tuner_dbg("trying to resolve SECAM L deadlock\n");
 			tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_on, 2);
@@ -307,8 +313,7 @@
 	struct tda8290_priv *priv = fe->analog_demod_priv;
 	unsigned char buf[] = { 0x30, 0x00 }; /* clb_stdbt */
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
 
 	if (enable)
 		buf[1] = 0x01;
@@ -323,8 +328,7 @@
 	struct tda8290_priv *priv = fe->analog_demod_priv;
 	unsigned char buf[] = { 0x01, 0x00 };
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
 
 	if (enable)
 		buf[1] = 0x01; /* rising edge sets regs 0x02 - 0x23 */
@@ -353,8 +357,7 @@
 	struct tda8290_priv *priv = fe->analog_demod_priv;
 	unsigned char buf[] = { 0x02, 0x00 }; /* DIV_FUNC */
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
 
 	if (enable)
 		buf[1] &= ~0x40;
@@ -370,10 +373,10 @@
 	unsigned char set_gpio_cf[]    = { 0x44, 0x00 };
 	unsigned char set_gpio_val[]   = { 0x46, 0x00 };
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_cf[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_cf[1], 1);
-	tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_val[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_val[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props,
+				 &set_gpio_cf[0], 1, &set_gpio_cf[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props,
+				 &set_gpio_val[0], 1, &set_gpio_val[1], 1);
 
 	set_gpio_cf[1] &= 0xf0; /* clear GPIO_0 bits 3-0 */
 
@@ -392,8 +395,7 @@
 	unsigned char hvpll_stat = 0x26;
 	unsigned char ret;
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &hvpll_stat, 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &ret, 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props, &hvpll_stat, 1, &ret, 1);
 	return (ret & 0x01) ? 65535 : 0;
 }
 
@@ -413,8 +415,8 @@
 	tda8295_power(fe, 1);
 	tda8295_agc1_out(fe, 1);
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &blanking_mode[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &blanking_mode[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props,
+				 &blanking_mode[0], 1, &blanking_mode[1], 1);
 
 	tda8295_set_video_std(fe);
 
@@ -447,8 +449,8 @@
 	unsigned char i2c_get_afc[1] = { 0x1B };
 	unsigned char afc = 0;
 
-	tuner_i2c_xfer_send(&priv->i2c_props, i2c_get_afc, ARRAY_SIZE(i2c_get_afc));
-	tuner_i2c_xfer_recv(&priv->i2c_props, &afc, 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props,
+				 i2c_get_afc, ARRAY_SIZE(i2c_get_afc), &afc, 1);
 	return (afc & 0x80)? 65535:0;
 }
 
@@ -654,20 +656,26 @@
 static int tda8290_probe(struct tuner_i2c_props *i2c_props)
 {
 #define TDA8290_ID 0x89
-	unsigned char tda8290_id[] = { 0x1f, 0x00 };
+	u8 reg = 0x1f, id;
+	struct i2c_msg msg_read[] = {
+		{ .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
+		{ .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
+	};
 
 	/* detect tda8290 */
-	tuner_i2c_xfer_send(i2c_props, &tda8290_id[0], 1);
-	tuner_i2c_xfer_recv(i2c_props, &tda8290_id[1], 1);
+	if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
+		printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
+			       __func__, reg);
+		return -ENODEV;
+	}
 
-	if (tda8290_id[1] == TDA8290_ID) {
+	if (id == TDA8290_ID) {
 		if (debug)
 			printk(KERN_DEBUG "%s: tda8290 detected @ %d-%04x\n",
 			       __func__, i2c_adapter_id(i2c_props->adap),
 			       i2c_props->addr);
 		return 0;
 	}
-
 	return -ENODEV;
 }
 
@@ -675,16 +683,23 @@
 {
 #define TDA8295_ID 0x8a
 #define TDA8295C2_ID 0x8b
-	unsigned char tda8295_id[] = { 0x2f, 0x00 };
+	u8 reg = 0x2f, id;
+	struct i2c_msg msg_read[] = {
+		{ .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
+		{ .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
+	};
 
 	/* detect tda8295 */
-	tuner_i2c_xfer_send(i2c_props, &tda8295_id[0], 1);
-	tuner_i2c_xfer_recv(i2c_props, &tda8295_id[1], 1);
+	if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
+		printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
+			       __func__, reg);
+		return -ENODEV;
+	}
 
-	if ((tda8295_id[1] & 0xfe) == TDA8295_ID) {
+	if ((id & 0xfe) == TDA8295_ID) {
 		if (debug)
 			printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n",
-			       __func__, (tda8295_id[1] == TDA8295_ID) ?
+			       __func__, (id == TDA8295_ID) ?
 			       "tda8295c1" : "tda8295c2",
 			       i2c_adapter_id(i2c_props->adap),
 			       i2c_props->addr);
@@ -740,9 +755,11 @@
 		       sizeof(struct analog_demod_ops));
 	}
 
-	if ((!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) &&
-	    (tda829x_find_tuner(fe) < 0))
-		goto fail;
+	if (!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) {
+		tda8295_power(fe, 1);
+		if (tda829x_find_tuner(fe) < 0)
+			goto fail;
+	}
 
 	switch (priv->ver) {
 	case TDA8290:
@@ -786,6 +803,8 @@
 	return fe;
 
 fail:
+	memset(&fe->ops.analog_ops, 0, sizeof(struct analog_demod_ops));
+
 	tda829x_release(fe);
 	return NULL;
 }
@@ -809,8 +828,8 @@
 	int i;
 
 	/* rule out tda9887, which would return the same byte repeatedly */
-	tuner_i2c_xfer_send(&i2c_props, soft_reset, 1);
-	tuner_i2c_xfer_recv(&i2c_props, buf, PROBE_BUFFER_SIZE);
+	tuner_i2c_xfer_send_recv(&i2c_props,
+				 soft_reset, 1, buf, PROBE_BUFFER_SIZE);
 	for (i = 1; i < PROBE_BUFFER_SIZE; i++) {
 		if (buf[i] != buf[0])
 			break;
@@ -827,13 +846,12 @@
 	/* fall back to old probing method */
 	tuner_i2c_xfer_send(&i2c_props, easy_mode_b, 2);
 	tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
-	tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1);
-	tuner_i2c_xfer_recv(&i2c_props, &data, 1);
+	tuner_i2c_xfer_send_recv(&i2c_props, &addr_dto_lsb, 1, &data, 1);
 	if (data == 0) {
 		tuner_i2c_xfer_send(&i2c_props, easy_mode_g, 2);
 		tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
-		tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1);
-		tuner_i2c_xfer_recv(&i2c_props, &data, 1);
+		tuner_i2c_xfer_send_recv(&i2c_props,
+					 &addr_dto_lsb, 1, &data, 1);
 		if (data == 0x7b) {
 			return 0;
 		}
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 8ca48f7..98ffb40 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -514,8 +514,8 @@
 	union {
 		u16 system16;
 		struct {
-			u8 system;
 			u8 not_system;
+			u8 system;
 		};
 	};
 	u8 data;
@@ -575,7 +575,7 @@
 		if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
 			deb_data("NEC extended protocol\n");
 			/* NEC extended code - 24 bits */
-			keycode = poll_reply->system16 << 8 | poll_reply->data;
+			keycode = be16_to_cpu(poll_reply->system16) << 8 | poll_reply->data;
 		} else {
 			deb_data("NEC normal protocol\n");
 			/* normal NEC code - 16 bits */
@@ -587,7 +587,7 @@
 		deb_data("RC5 protocol\n");
 		/* RC5 Protocol */
 		toggle = poll_reply->report_id;
-		keycode = poll_reply->system16 << 8 | poll_reply->data;
+		keycode = poll_reply->system << 8 | poll_reply->data;
 
 		break;
 	}
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index defd839..193cdb7 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -870,6 +870,23 @@
 	return 0;
 }
 
+static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index,
+		u16 pid, int onoff)
+{
+	struct dib0700_state *st = adapter->dev->priv;
+	if (st->is_dib7000pc)
+		return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
+	return dib7000m_pid_filter(adapter->fe, index, pid, onoff);
+}
+
+static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
+{
+	struct dib0700_state *st = adapter->dev->priv;
+	if (st->is_dib7000pc)
+		return dib7000p_pid_filter_ctrl(adapter->fe, onoff);
+	return dib7000m_pid_filter_ctrl(adapter->fe, onoff);
+}
+
 static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
 {
     return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
@@ -1875,8 +1892,8 @@
 			{
 				.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
 				.pid_filter_count = 32,
-				.pid_filter       = stk70x0p_pid_filter,
-				.pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+				.pid_filter       = stk7700p_pid_filter,
+				.pid_filter_ctrl  = stk7700p_pid_filter_ctrl,
 				.frontend_attach  = stk7700p_frontend_attach,
 				.tuner_attach     = stk7700p_tuner_attach,
 
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index 9eea418..46ccd01 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -659,7 +659,7 @@
 }
 
 /* Default firmware for LME2510C */
-const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
+char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
 
 static void lme_coldreset(struct usb_device *dev)
 {
@@ -1006,7 +1006,7 @@
 	.caps = DVB_USB_IS_AN_I2C_ADAPTER,
 	.usb_ctrl = DEVICE_SPECIFIC,
 	.download_firmware = lme2510_download_firmware,
-	.firmware = lme_firmware,
+	.firmware = (const char *)&lme_firmware,
 	.size_of_priv = sizeof(struct lme2510_state),
 	.num_adapters = 1,
 	.adapter = {
@@ -1109,5 +1109,5 @@
 
 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
 MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.74");
+MODULE_VERSION("1.75");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/firewire/firedtv-rc.c b/drivers/media/dvb/firewire/firedtv-rc.c
index fcf3828..f82d4a9 100644
--- a/drivers/media/dvb/firewire/firedtv-rc.c
+++ b/drivers/media/dvb/firewire/firedtv-rc.c
@@ -172,7 +172,8 @@
 
 void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
 {
-	u16 *keycode = fdtv->remote_ctrl_dev->keycode;
+	struct input_dev *idev = fdtv->remote_ctrl_dev;
+	u16 *keycode = idev->keycode;
 
 	if (code >= 0x0300 && code <= 0x031f)
 		code = keycode[code - 0x0300];
@@ -188,6 +189,8 @@
 		return;
 	}
 
-	input_report_key(fdtv->remote_ctrl_dev, code, 1);
-	input_report_key(fdtv->remote_ctrl_dev, code, 0);
+	input_report_key(idev, code, 1);
+	input_sync(idev);
+	input_report_key(idev, code, 0);
+	input_sync(idev);
 }
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index ef3e43a..b8519ba 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -1,7 +1,7 @@
 config DVB_FE_CUSTOMISE
 	bool "Customise the frontend modules to build"
 	depends on DVB_CORE
-	default y if EMBEDDED
+	default y if EXPERT
 	help
 	  This allows the user to select/deselect frontend drivers for their
 	  hardware from the build.
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index ce22205..ba25fa0 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -334,11 +334,11 @@
 				if_sample_freq = 3300000; /* 3.3 MHz */
 				break;
 			case BANDWIDTH_7_MHZ:
-				if_sample_freq = 3800000; /* 3.8 MHz */
+				if_sample_freq = 3500000; /* 3.5 MHz */
 				break;
 			case BANDWIDTH_8_MHZ:
 			default:
-				if_sample_freq = 4300000; /* 4.3 MHz */
+				if_sample_freq = 4000000; /* 4.0 MHz */
 				break;
 			}
 		} else if (state->config.tuner == AF9013_TUNER_TDA18218) {
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index c7f5ccf..289a798 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -1285,6 +1285,25 @@
 }
 EXPORT_SYMBOL(dib7000m_get_i2c_master);
 
+int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
+{
+	struct dib7000m_state *state = fe->demodulator_priv;
+	u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
+	val |= (onoff & 0x1) << 4;
+	dprintk("PID filter enabled %d", onoff);
+	return dib7000m_write_word(state, 294 + state->reg_offs, val);
+}
+EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
+
+int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
+{
+	struct dib7000m_state *state = fe->demodulator_priv;
+	dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+	return dib7000m_write_word(state, 300 + state->reg_offs + id,
+			onoff ? (1 << 13) | pid : 0);
+}
+EXPORT_SYMBOL(dib7000m_pid_filter);
+
 #if 0
 /* used with some prototype boards */
 int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
diff --git a/drivers/media/dvb/frontends/dib7000m.h b/drivers/media/dvb/frontends/dib7000m.h
index 113819c..81fcf22 100644
--- a/drivers/media/dvb/frontends/dib7000m.h
+++ b/drivers/media/dvb/frontends/dib7000m.h
@@ -46,6 +46,8 @@
 extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *,
 						   enum dibx000_i2c_interface,
 						   int);
+extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
+extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
 #else
 static inline
 struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
@@ -63,6 +65,19 @@
 	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
 	return NULL;
 }
+static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id,
+						u16 pid, u8 onoff)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return -ENODEV;
+}
+
+static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe,
+						uint8_t onoff)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return -ENODEV;
+}
 #endif
 
 /* TODO
diff --git a/drivers/media/dvb/frontends/ix2505v.c b/drivers/media/dvb/frontends/ix2505v.c
index 6360c68..6c2e929 100644
--- a/drivers/media/dvb/frontends/ix2505v.c
+++ b/drivers/media/dvb/frontends/ix2505v.c
@@ -311,7 +311,7 @@
 	return fe;
 
 error:
-	ix2505v_release(fe);
+	kfree(state);
 	return NULL;
 }
 EXPORT_SYMBOL(ix2505v_attach);
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index d3ad3e7..cc4acd2 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -43,6 +43,8 @@
 	const struct mb86a20s_config *config;
 
 	struct dvb_frontend frontend;
+
+	bool need_init;
 };
 
 struct regdata {
@@ -318,7 +320,7 @@
 
 	rc = i2c_transfer(state->i2c, &msg, 1);
 	if (rc != 1) {
-		printk("%s: writereg rcor(rc == %i, reg == 0x%02x,"
+		printk("%s: writereg error (rc == %i, reg == 0x%02x,"
 			 " data == 0x%02x)\n", __func__, rc, reg, data);
 		return rc;
 	}
@@ -353,7 +355,7 @@
 	rc = i2c_transfer(state->i2c, msg, 2);
 
 	if (rc != 2) {
-		rc("%s: reg=0x%x (rcor=%d)\n", __func__, reg, rc);
+		rc("%s: reg=0x%x (error=%d)\n", __func__, reg, rc);
 		return rc;
 	}
 
@@ -382,23 +384,31 @@
 	/* Initialize the frontend */
 	rc = mb86a20s_writeregdata(state, mb86a20s_init);
 	if (rc < 0)
-		return rc;
+		goto err;
 
 	if (!state->config->is_serial) {
 		regD5 &= ~1;
 
 		rc = mb86a20s_writereg(state, 0x50, 0xd5);
 		if (rc < 0)
-			return rc;
+			goto err;
 		rc = mb86a20s_writereg(state, 0x51, regD5);
 		if (rc < 0)
-			return rc;
+			goto err;
 	}
 
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 1);
 
-	return 0;
+err:
+	if (rc < 0) {
+		state->need_init = true;
+		printk(KERN_INFO "mb86a20s: Init failed. Will try again later\n");
+	} else {
+		state->need_init = false;
+		dprintk("Initialization succeded.\n");
+	}
+	return rc;
 }
 
 static int mb86a20s_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
@@ -485,8 +495,22 @@
 
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 1);
+	dprintk("Calling tuner set parameters\n");
 	fe->ops.tuner_ops.set_params(fe, p);
 
+	/*
+	 * Make it more reliable: if, for some reason, the initial
+	 * device initialization doesn't happen, initialize it when
+	 * a SBTVD parameters are adjusted.
+	 *
+	 * Unfortunately, due to a hard to track bug at tda829x/tda18271,
+	 * the agc callback logic is not called during DVB attach time,
+	 * causing mb86a20s to not be initialized with Kworld SBTVD.
+	 * So, this hack is needed, in order to make Kworld SBTVD to work.
+	 */
+	if (state->need_init)
+		mb86a20s_initfe(fe);
+
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 0);
 	rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception);
diff --git a/drivers/media/dvb/mantis/mantis_pci.c b/drivers/media/dvb/mantis/mantis_pci.c
index 59feeb8..10a432a 100644
--- a/drivers/media/dvb/mantis/mantis_pci.c
+++ b/drivers/media/dvb/mantis/mantis_pci.c
@@ -22,7 +22,6 @@
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include <asm/io.h>
-#include <asm/pgtable.h>
 #include <asm/page.h>
 #include <linux/kmod.h>
 #include <linux/vmalloc.h>
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index 122c728..9fc1dd0 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -277,7 +277,7 @@
 	{
 		ca_slot_info_t *info=(ca_slot_info_t *)parg;
 
-		if (info->num > 1)
+		if (info->num < 0 || info->num > 1)
 			return -EINVAL;
 		av7110->ci_slot[info->num].num = info->num;
 		av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 3c5a473..ecdffa6 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -151,20 +151,6 @@
 	  following ports will be probed: 0x20c, 0x30c, 0x24c, 0x34c, 0x248 and
 	  0x28c.
 
-config RADIO_GEMTEK_PCI
-	tristate "GemTek PCI Radio Card support"
-	depends on VIDEO_V4L2 && PCI
-	---help---
-	  Choose Y here if you have this PCI FM radio card.
-
-	  In order to control your radio card, you will need to use programs
-	  that are compatible with the Video for Linux API.  Information on
-	  this API and pointers to "v4l" programs may be found at
-	  <file:Documentation/video4linux/API.html>.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called radio-gemtek-pci.
-
 config RADIO_MAXIRADIO
 	tristate "Guillemot MAXI Radio FM 2000 radio"
 	depends on VIDEO_V4L2 && PCI
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index d297074..717656d 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -13,7 +13,6 @@
 obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
 obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
 obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
-obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o
 obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
 obj-$(CONFIG_I2C_SI4713) += si4713-i2c.o
 obj-$(CONFIG_RADIO_SI4713) += radio-si4713.o
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 6cc5d13..4ce10db 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -31,6 +31,7 @@
 #include <linux/module.h>	/* Modules 			*/
 #include <linux/init.h>		/* Initdata			*/
 #include <linux/ioport.h>	/* request_region		*/
+#include <linux/delay.h>	/* msleep			*/
 #include <linux/videodev2.h>	/* kernel radio structs		*/
 #include <linux/version.h>	/* for KERNEL_VERSION MACRO	*/
 #include <linux/io.h>		/* outb, outb_p			*/
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
deleted file mode 100644
index 28fa85b..0000000
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- ***************************************************************************
- *
- *     radio-gemtek-pci.c - Gemtek PCI Radio driver
- *     (C) 2001 Vladimir Shebordaev <vshebordaev@mail.ru>
- *
- ***************************************************************************
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of
- *     the License, or (at your option) any later version.
- *
- *     This program is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- *     You should have received a copy of the GNU General Public
- *     License along with this program; if not, write to the Free
- *     Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
- *     USA.
- *
- ***************************************************************************
- *
- *     Gemtek Corp still silently refuses to release any specifications
- *     of their multimedia devices, so the protocol still has to be
- *     reverse engineered.
- *
- *     The v4l code was inspired by Jonas Munsin's  Gemtek serial line
- *     radio device driver.
- *
- *     Please, let me know if this piece of code was useful :)
- *
- *     TODO: multiple device support and portability were not tested
- *
- *     Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
- *
- ***************************************************************************
- */
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/videodev2.h>
-#include <linux/errno.h>
-#include <linux/version.h>      /* for KERNEL_VERSION MACRO     */
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-
-MODULE_AUTHOR("Vladimir Shebordaev <vshebordaev@mail.ru>");
-MODULE_DESCRIPTION("The video4linux driver for the Gemtek PCI Radio Card");
-MODULE_LICENSE("GPL");
-
-static int nr_radio = -1;
-static int mx = 1;
-
-module_param(mx, bool, 0);
-MODULE_PARM_DESC(mx, "single digit: 1 - turn off the turner upon module exit (default), 0 - do not");
-module_param(nr_radio, int, 0);
-MODULE_PARM_DESC(nr_radio, "video4linux device number to use");
-
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
-#ifndef PCI_VENDOR_ID_GEMTEK
-#define PCI_VENDOR_ID_GEMTEK 0x5046
-#endif
-
-#ifndef PCI_DEVICE_ID_GEMTEK_PR103
-#define PCI_DEVICE_ID_GEMTEK_PR103 0x1001
-#endif
-
-#ifndef GEMTEK_PCI_RANGE_LOW
-#define GEMTEK_PCI_RANGE_LOW (87*16000)
-#endif
-
-#ifndef GEMTEK_PCI_RANGE_HIGH
-#define GEMTEK_PCI_RANGE_HIGH (108*16000)
-#endif
-
-struct gemtek_pci {
-	struct v4l2_device v4l2_dev;
-	struct video_device vdev;
-	struct mutex lock;
-	struct pci_dev *pdev;
-
-	u32 iobase;
-	u32 length;
-
-	u32 current_frequency;
-	u8  mute;
-};
-
-static inline struct gemtek_pci *to_gemtek_pci(struct v4l2_device *v4l2_dev)
-{
-	return container_of(v4l2_dev, struct gemtek_pci, v4l2_dev);
-}
-
-static inline u8 gemtek_pci_out(u16 value, u32 port)
-{
-	outw(value, port);
-
-	return (u8)value;
-}
-
-#define _b0(v) (*((u8 *)&v))
-
-static void __gemtek_pci_cmd(u16 value, u32 port, u8 *last_byte, int keep)
-{
-	u8 byte = *last_byte;
-
-	if (!value) {
-		if (!keep)
-			value = (u16)port;
-		byte &= 0xfd;
-	} else
-		byte |= 2;
-
-	_b0(value) = byte;
-	outw(value, port);
-	byte |= 1;
-	_b0(value) = byte;
-	outw(value, port);
-	byte &= 0xfe;
-	_b0(value) = byte;
-	outw(value, port);
-
-	*last_byte = byte;
-}
-
-static inline void gemtek_pci_nil(u32 port, u8 *last_byte)
-{
-	__gemtek_pci_cmd(0x00, port, last_byte, false);
-}
-
-static inline void gemtek_pci_cmd(u16 cmd, u32 port, u8 *last_byte)
-{
-	__gemtek_pci_cmd(cmd, port, last_byte, true);
-}
-
-static void gemtek_pci_setfrequency(struct gemtek_pci *card, unsigned long frequency)
-{
-	int i;
-	u32 value = frequency / 200 + 856;
-	u16 mask = 0x8000;
-	u8 last_byte;
-	u32 port = card->iobase;
-
-	mutex_lock(&card->lock);
-	card->current_frequency = frequency;
-	last_byte = gemtek_pci_out(0x06, port);
-
-	i = 0;
-	do {
-		gemtek_pci_nil(port, &last_byte);
-		i++;
-	} while (i < 9);
-
-	i = 0;
-	do {
-		gemtek_pci_cmd(value & mask, port, &last_byte);
-		mask >>= 1;
-		i++;
-	} while (i < 16);
-
-	outw(0x10, port);
-	mutex_unlock(&card->lock);
-}
-
-
-static void gemtek_pci_mute(struct gemtek_pci *card)
-{
-	mutex_lock(&card->lock);
-	outb(0x1f, card->iobase);
-	card->mute = true;
-	mutex_unlock(&card->lock);
-}
-
-static void gemtek_pci_unmute(struct gemtek_pci *card)
-{
-	if (card->mute) {
-		gemtek_pci_setfrequency(card, card->current_frequency);
-		card->mute = false;
-	}
-}
-
-static int gemtek_pci_getsignal(struct gemtek_pci *card)
-{
-	int sig;
-
-	mutex_lock(&card->lock);
-	sig = (inb(card->iobase) & 0x08) ? 0 : 1;
-	mutex_unlock(&card->lock);
-	return sig;
-}
-
-static int vidioc_querycap(struct file *file, void *priv,
-					struct v4l2_capability *v)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	strlcpy(v->driver, "radio-gemtek-pci", sizeof(v->driver));
-	strlcpy(v->card, "GemTek PCI Radio", sizeof(v->card));
-	snprintf(v->bus_info, sizeof(v->bus_info), "PCI:%s", pci_name(card->pdev));
-	v->version = RADIO_VERSION;
-	v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
-	return 0;
-}
-
-static int vidioc_g_tuner(struct file *file, void *priv,
-					struct v4l2_tuner *v)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	if (v->index > 0)
-		return -EINVAL;
-
-	strlcpy(v->name, "FM", sizeof(v->name));
-	v->type = V4L2_TUNER_RADIO;
-	v->rangelow = GEMTEK_PCI_RANGE_LOW;
-	v->rangehigh = GEMTEK_PCI_RANGE_HIGH;
-	v->rxsubchans = V4L2_TUNER_SUB_MONO;
-	v->capability = V4L2_TUNER_CAP_LOW;
-	v->audmode = V4L2_TUNER_MODE_MONO;
-	v->signal = 0xffff * gemtek_pci_getsignal(card);
-	return 0;
-}
-
-static int vidioc_s_tuner(struct file *file, void *priv,
-					struct v4l2_tuner *v)
-{
-	return v->index ? -EINVAL : 0;
-}
-
-static int vidioc_s_frequency(struct file *file, void *priv,
-					struct v4l2_frequency *f)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
-		return -EINVAL;
-	if (f->frequency < GEMTEK_PCI_RANGE_LOW ||
-	    f->frequency > GEMTEK_PCI_RANGE_HIGH)
-		return -EINVAL;
-	gemtek_pci_setfrequency(card, f->frequency);
-	card->mute = false;
-	return 0;
-}
-
-static int vidioc_g_frequency(struct file *file, void *priv,
-					struct v4l2_frequency *f)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	if (f->tuner != 0)
-		return -EINVAL;
-	f->type = V4L2_TUNER_RADIO;
-	f->frequency = card->current_frequency;
-	return 0;
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
-					struct v4l2_queryctrl *qc)
-{
-	switch (qc->id) {
-	case V4L2_CID_AUDIO_MUTE:
-		return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
-	case V4L2_CID_AUDIO_VOLUME:
-		return v4l2_ctrl_query_fill(qc, 0, 65535, 65535, 65535);
-	}
-	return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
-					struct v4l2_control *ctrl)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	switch (ctrl->id) {
-	case V4L2_CID_AUDIO_MUTE:
-		ctrl->value = card->mute;
-		return 0;
-	case V4L2_CID_AUDIO_VOLUME:
-		if (card->mute)
-			ctrl->value = 0;
-		else
-			ctrl->value = 65535;
-		return 0;
-	}
-	return -EINVAL;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
-					struct v4l2_control *ctrl)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	switch (ctrl->id) {
-	case V4L2_CID_AUDIO_MUTE:
-		if (ctrl->value)
-			gemtek_pci_mute(card);
-		else
-			gemtek_pci_unmute(card);
-		return 0;
-	case V4L2_CID_AUDIO_VOLUME:
-		if (ctrl->value)
-			gemtek_pci_unmute(card);
-		else
-			gemtek_pci_mute(card);
-		return 0;
-	}
-	return -EINVAL;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
-	*i = 0;
-	return 0;
-}
-
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
-	return i ? -EINVAL : 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv,
-					struct v4l2_audio *a)
-{
-	a->index = 0;
-	strlcpy(a->name, "Radio", sizeof(a->name));
-	a->capability = V4L2_AUDCAP_STEREO;
-	return 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *priv,
-					struct v4l2_audio *a)
-{
-	return a->index ? -EINVAL : 0;
-}
-
-enum {
-	GEMTEK_PR103
-};
-
-static char *card_names[] __devinitdata = {
-	"GEMTEK_PR103"
-};
-
-static struct pci_device_id gemtek_pci_id[] =
-{
-	{ PCI_VENDOR_ID_GEMTEK, PCI_DEVICE_ID_GEMTEK_PR103,
-	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, GEMTEK_PR103 },
-	{ 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, gemtek_pci_id);
-
-static const struct v4l2_file_operations gemtek_pci_fops = {
-	.owner		= THIS_MODULE,
-	.unlocked_ioctl	= video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops gemtek_pci_ioctl_ops = {
-	.vidioc_querycap    = vidioc_querycap,
-	.vidioc_g_tuner     = vidioc_g_tuner,
-	.vidioc_s_tuner     = vidioc_s_tuner,
-	.vidioc_g_audio     = vidioc_g_audio,
-	.vidioc_s_audio     = vidioc_s_audio,
-	.vidioc_g_input     = vidioc_g_input,
-	.vidioc_s_input     = vidioc_s_input,
-	.vidioc_g_frequency = vidioc_g_frequency,
-	.vidioc_s_frequency = vidioc_s_frequency,
-	.vidioc_queryctrl   = vidioc_queryctrl,
-	.vidioc_g_ctrl      = vidioc_g_ctrl,
-	.vidioc_s_ctrl      = vidioc_s_ctrl,
-};
-
-static int __devinit gemtek_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
-{
-	struct gemtek_pci *card;
-	struct v4l2_device *v4l2_dev;
-	int res;
-
-	card = kzalloc(sizeof(struct gemtek_pci), GFP_KERNEL);
-	if (card == NULL) {
-		dev_err(&pdev->dev, "out of memory\n");
-		return -ENOMEM;
-	}
-
-	v4l2_dev = &card->v4l2_dev;
-	mutex_init(&card->lock);
-	card->pdev = pdev;
-
-	strlcpy(v4l2_dev->name, "gemtek_pci", sizeof(v4l2_dev->name));
-
-	res = v4l2_device_register(&pdev->dev, v4l2_dev);
-	if (res < 0) {
-		v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
-		kfree(card);
-		return res;
-	}
-
-	if (pci_enable_device(pdev))
-		goto err_pci;
-
-	card->iobase = pci_resource_start(pdev, 0);
-	card->length = pci_resource_len(pdev, 0);
-
-	if (request_region(card->iobase, card->length, card_names[pci_id->driver_data]) == NULL) {
-		v4l2_err(v4l2_dev, "i/o port already in use\n");
-		goto err_pci;
-	}
-
-	strlcpy(card->vdev.name, v4l2_dev->name, sizeof(card->vdev.name));
-	card->vdev.v4l2_dev = v4l2_dev;
-	card->vdev.fops = &gemtek_pci_fops;
-	card->vdev.ioctl_ops = &gemtek_pci_ioctl_ops;
-	card->vdev.release = video_device_release_empty;
-	video_set_drvdata(&card->vdev, card);
-
-	gemtek_pci_mute(card);
-
-	if (video_register_device(&card->vdev, VFL_TYPE_RADIO, nr_radio) < 0)
-		goto err_video;
-
-	v4l2_info(v4l2_dev, "Gemtek PCI Radio (rev. %d) found at 0x%04x-0x%04x.\n",
-		pdev->revision, card->iobase, card->iobase + card->length - 1);
-
-	return 0;
-
-err_video:
-	release_region(card->iobase, card->length);
-
-err_pci:
-	v4l2_device_unregister(v4l2_dev);
-	kfree(card);
-	return -ENODEV;
-}
-
-static void __devexit gemtek_pci_remove(struct pci_dev *pdev)
-{
-	struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
-	struct gemtek_pci *card = to_gemtek_pci(v4l2_dev);
-
-	video_unregister_device(&card->vdev);
-	v4l2_device_unregister(v4l2_dev);
-
-	release_region(card->iobase, card->length);
-
-	if (mx)
-		gemtek_pci_mute(card);
-
-	kfree(card);
-}
-
-static struct pci_driver gemtek_pci_driver = {
-	.name		= "gemtek_pci",
-	.id_table	= gemtek_pci_id,
-	.probe		= gemtek_pci_probe,
-	.remove		= __devexit_p(gemtek_pci_remove),
-};
-
-static int __init gemtek_pci_init(void)
-{
-	return pci_register_driver(&gemtek_pci_driver);
-}
-
-static void __exit gemtek_pci_exit(void)
-{
-	pci_unregister_driver(&gemtek_pci_driver);
-}
-
-module_init(gemtek_pci_init);
-module_exit(gemtek_pci_exit);
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 6459a22..5c2a905 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -77,8 +77,8 @@
 /* TEA5757 pin mappings */
 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
 
-#define FREQ_LO		(50 * 16000)
-#define FREQ_HI		(150 * 16000)
+#define FREQ_LO		(87 * 16000)
+#define FREQ_HI		(108 * 16000)
 
 #define FREQ_IF         171200 /* 10.7*16000   */
 #define FREQ_STEP       200    /* 12.5*16      */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index dd6bd36..7ecc8e6 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1407,7 +1407,7 @@
 	.read		= wl1273_fm_fops_read,
 	.write		= wl1273_fm_fops_write,
 	.poll		= wl1273_fm_fops_poll,
-	.ioctl		= video_ioctl2,
+	.unlocked_ioctl	= video_ioctl2,
 	.open		= wl1273_fm_fops_open,
 	.release	= wl1273_fm_fops_release,
 };
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index ac76dfe..60c176f 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -357,7 +357,8 @@
 		goto done;
 
 	/* sysconfig 1 */
-	radio->registers[SYSCONFIG1] = SYSCONFIG1_DE;
+	radio->registers[SYSCONFIG1] =
+		(de << 11) & SYSCONFIG1_DE;		/* DE*/
 	retval = si470x_set_register(radio, SYSCONFIG1);
 	if (retval < 0)
 		goto done;
@@ -687,12 +688,8 @@
 	/* driver constants */
 	strcpy(tuner->name, "FM");
 	tuner->type = V4L2_TUNER_RADIO;
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
 	tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
 			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
-#else
-	tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
-#endif
 
 	/* range limits */
 	switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
@@ -718,12 +715,10 @@
 		tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
 	else
 		tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
 	/* If there is a reliable method of detecting an RDS channel,
 	   then this code should check for that before setting this
 	   RDS subchannel. */
 	tuner->rxsubchans |= V4L2_TUNER_SUB_RDS;
-#endif
 
 	/* mono/stereo selector */
 	if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 0)
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 80b3c31..1ac4913 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -446,27 +446,27 @@
 
 select_timeout:
 	if (dev->rx_fan_input_inuse) {
-		dev->rdev->rx_resolution = MS_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
+		dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
 
 		/* Fan input doesn't support timeouts, it just ends the
 			input with a maximum sample */
 		dev->rdev->min_timeout = dev->rdev->max_timeout =
-			MS_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
+			US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
 				ENE_FW_SAMPLE_PERIOD_FAN);
 	} else {
-		dev->rdev->rx_resolution = MS_TO_NS(sample_period);
+		dev->rdev->rx_resolution = US_TO_NS(sample_period);
 
 		/* Theoreticly timeout is unlimited, but we cap it
 		 * because it was seen that on one device, it
 		 * would stop sending spaces after around 250 msec.
 		 * Besides, this is close to 2^32 anyway and timeout is u32.
 		 */
-		dev->rdev->min_timeout = MS_TO_NS(127 * sample_period);
-		dev->rdev->max_timeout = MS_TO_NS(200000);
+		dev->rdev->min_timeout = US_TO_NS(127 * sample_period);
+		dev->rdev->max_timeout = US_TO_NS(200000);
 	}
 
 	if (dev->hw_learning_and_tx_capable)
-		dev->rdev->tx_resolution = MS_TO_NS(sample_period);
+		dev->rdev->tx_resolution = US_TO_NS(sample_period);
 
 	if (dev->rdev->timeout > dev->rdev->max_timeout)
 		dev->rdev->timeout = dev->rdev->max_timeout;
@@ -801,7 +801,7 @@
 
 		dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
 
-		ev.duration = MS_TO_NS(hw_sample);
+		ev.duration = US_TO_NS(hw_sample);
 		ev.pulse = pulse;
 		ir_raw_event_store_with_filter(dev->rdev, &ev);
 	}
@@ -821,7 +821,7 @@
 	dev->learning_mode_enabled = learning_mode_force;
 
 	/* Set reasonable default timeout */
-	dev->rdev->timeout = MS_TO_NS(150000);
+	dev->rdev->timeout = US_TO_NS(150000);
 }
 
 /* Upload all hardware settings at once. Used at load and resume time */
@@ -1004,6 +1004,10 @@
 	/* validate resources */
 	error = -ENODEV;
 
+	/* init these to -1, as 0 is valid for both */
+	dev->hw_io = -1;
+	dev->irq = -1;
+
 	if (!pnp_port_valid(pnp_dev, 0) ||
 	    pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
 		goto error;
@@ -1072,6 +1076,8 @@
 		rdev->input_name = "ENE eHome Infrared Remote Transceiver";
 	}
 
+	dev->rdev = rdev;
+
 	ene_rx_setup_hw_buffer(dev);
 	ene_setup_default_settings(dev);
 	ene_setup_hw_settings(dev);
@@ -1083,7 +1089,6 @@
 	if (error < 0)
 		goto error;
 
-	dev->rdev = rdev;
 	ene_notice("driver has been succesfully loaded");
 	return 0;
 error:
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index c179baf..337a41d 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -201,8 +201,6 @@
 #define dbg_verbose(format, ...)	__dbg(2, format, ## __VA_ARGS__)
 #define dbg_regs(format, ...)		__dbg(3, format, ## __VA_ARGS__)
 
-#define MS_TO_NS(msec) ((msec) * 1000)
-
 struct ene_device {
 	struct pnp_dev *pnp_dev;
 	struct rc_dev *rdev;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 6811512..e7dc6b4 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -988,7 +988,6 @@
 	int retval;
 	struct imon_context *ictx = rc->priv;
 	struct device *dev = ictx->dev;
-	bool pad_mouse;
 	unsigned char ir_proto_packet[] = {
 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
 
@@ -1000,29 +999,20 @@
 	case RC_TYPE_RC6:
 		dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
 		ir_proto_packet[0] = 0x01;
-		pad_mouse = false;
 		break;
 	case RC_TYPE_UNKNOWN:
 	case RC_TYPE_OTHER:
 		dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
-		if (pad_stabilize && !nomouse)
-			pad_mouse = true;
-		else {
+		if (!pad_stabilize)
 			dev_dbg(dev, "PAD stabilize functionality disabled\n");
-			pad_mouse = false;
-		}
 		/* ir_proto_packet[0] = 0x00; // already the default */
 		rc_type = RC_TYPE_OTHER;
 		break;
 	default:
 		dev_warn(dev, "Unsupported IR protocol specified, overriding "
 			 "to iMON IR protocol\n");
-		if (pad_stabilize && !nomouse)
-			pad_mouse = true;
-		else {
+		if (!pad_stabilize)
 			dev_dbg(dev, "PAD stabilize functionality disabled\n");
-			pad_mouse = false;
-		}
 		/* ir_proto_packet[0] = 0x00; // already the default */
 		rc_type = RC_TYPE_OTHER;
 		break;
@@ -1035,7 +1025,7 @@
 		goto out;
 
 	ictx->rc_type = rc_type;
-	ictx->pad_mouse = pad_mouse;
+	ictx->pad_mouse = false;
 
 out:
 	return retval;
@@ -1517,7 +1507,7 @@
 			spin_unlock_irqrestore(&ictx->kc_lock, flags);
 			return;
 		} else {
-			ictx->pad_mouse = 0;
+			ictx->pad_mouse = false;
 			dev_dbg(dev, "mouse mode disabled, passing key value\n");
 		}
 	}
@@ -1756,7 +1746,6 @@
 	printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
 
 	ictx->display_type = detected_display_type;
-	ictx->rdev->allowed_protos = allowed_protos;
 	ictx->rc_type = allowed_protos;
 }
 
@@ -1839,10 +1828,6 @@
 	rdev->allowed_protos = RC_TYPE_OTHER | RC_TYPE_RC6; /* iMON PAD or MCE */
 	rdev->change_protocol = imon_ir_change_protocol;
 	rdev->driver_name = MOD_NAME;
-	if (ictx->rc_type == RC_TYPE_RC6)
-		rdev->map_name = RC_MAP_IMON_MCE;
-	else
-		rdev->map_name = RC_MAP_IMON_PAD;
 
 	/* Enable front-panel buttons and/or knobs */
 	memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
@@ -1851,11 +1836,18 @@
 	if (ret)
 		dev_info(ictx->dev, "panel buttons/knobs setup failed\n");
 
-	if (ictx->product == 0xffdc)
+	if (ictx->product == 0xffdc) {
 		imon_get_ffdc_type(ictx);
+		rdev->allowed_protos = ictx->rc_type;
+	}
 
 	imon_set_display_type(ictx);
 
+	if (ictx->rc_type == RC_TYPE_RC6)
+		rdev->map_name = RC_MAP_IMON_MCE;
+	else
+		rdev->map_name = RC_MAP_IMON_PAD;
+
 	ret = rc_register_device(rdev);
 	if (ret < 0) {
 		dev_err(ictx->dev, "remote input dev register failed\n");
@@ -2108,18 +2100,6 @@
 		goto find_endpoint_failed;
 	}
 
-	ictx->idev = imon_init_idev(ictx);
-	if (!ictx->idev) {
-		dev_err(dev, "%s: input device setup failed\n", __func__);
-		goto idev_setup_failed;
-	}
-
-	ictx->rdev = imon_init_rdev(ictx);
-	if (!ictx->rdev) {
-		dev_err(dev, "%s: rc device setup failed\n", __func__);
-		goto rdev_setup_failed;
-	}
-
 	usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
 		usb_rcvintpipe(ictx->usbdev_intf0,
 			ictx->rx_endpoint_intf0->bEndpointAddress),
@@ -2133,13 +2113,25 @@
 		goto urb_submit_failed;
 	}
 
+	ictx->idev = imon_init_idev(ictx);
+	if (!ictx->idev) {
+		dev_err(dev, "%s: input device setup failed\n", __func__);
+		goto idev_setup_failed;
+	}
+
+	ictx->rdev = imon_init_rdev(ictx);
+	if (!ictx->rdev) {
+		dev_err(dev, "%s: rc device setup failed\n", __func__);
+		goto rdev_setup_failed;
+	}
+
 	return ictx;
 
-urb_submit_failed:
-	rc_unregister_device(ictx->rdev);
 rdev_setup_failed:
 	input_unregister_device(ictx->idev);
 idev_setup_failed:
+	usb_kill_urb(ictx->rx_urb_intf0);
+urb_submit_failed:
 find_endpoint_failed:
 	mutex_unlock(&ictx->lock);
 	usb_free_urb(tx_urb);
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index f011c5d..1c5cc65 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -1,4 +1,4 @@
-/* ir-lirc-codec.c - ir-core to classic lirc interface bridge
+/* ir-lirc-codec.c - rc-core to classic lirc interface bridge
  *
  * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
  *
@@ -47,6 +47,7 @@
 	/* Carrier reports */
 	if (ev.carrier_report) {
 		sample = LIRC_FREQUENCY(ev.carrier);
+		IR_dprintk(2, "carrier report (freq: %d)\n", sample);
 
 	/* Packet end */
 	} else if (ev.timeout) {
@@ -62,6 +63,7 @@
 			return 0;
 
 		sample = LIRC_TIMEOUT(ev.duration / 1000);
+		IR_dprintk(2, "timeout report (duration: %d)\n", sample);
 
 	/* Normal sample */
 	} else {
@@ -85,6 +87,8 @@
 
 		sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
 					LIRC_SPACE(ev.duration / 1000);
+		IR_dprintk(2, "delivering %uus %s to lirc_dev\n",
+			   TO_US(ev.duration), TO_STR(ev.pulse));
 	}
 
 	lirc_buffer_write(dev->raw->lirc.drv->rbuf,
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 185badd..01f258a 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -112,7 +112,7 @@
 {
 	ktime_t			now;
 	s64			delta; /* ns */
-	struct ir_raw_event	ev;
+	DEFINE_IR_RAW_EVENT(ev);
 	int			rc = 0;
 
 	if (!dev->raw)
@@ -125,7 +125,6 @@
 	 * being called for the first time, note that delta can't
 	 * possibly be negative.
 	 */
-	ev.duration = 0;
 	if (delta > IR_MAX_DURATION || !dev->raw->last_type)
 		type |= IR_START_EVENT;
 	else
@@ -233,7 +232,7 @@
 
 /* used internally by the sysfs interface */
 u64
-ir_raw_get_allowed_protocols()
+ir_raw_get_allowed_protocols(void)
 {
 	u64 protocols;
 	mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index c59851b..7a5f530 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -19,35 +19,35 @@
 
 static struct rc_map_table dib0700_nec_table[] = {
 	/* Key codes for the Pixelview SBTVD remote */
-	{ 0x8613, KEY_MUTE },
-	{ 0x8612, KEY_POWER },
-	{ 0x8601, KEY_1 },
-	{ 0x8602, KEY_2 },
-	{ 0x8603, KEY_3 },
-	{ 0x8604, KEY_4 },
-	{ 0x8605, KEY_5 },
-	{ 0x8606, KEY_6 },
-	{ 0x8607, KEY_7 },
-	{ 0x8608, KEY_8 },
-	{ 0x8609, KEY_9 },
-	{ 0x8600, KEY_0 },
-	{ 0x860d, KEY_CHANNELUP },
-	{ 0x8619, KEY_CHANNELDOWN },
-	{ 0x8610, KEY_VOLUMEUP },
-	{ 0x860c, KEY_VOLUMEDOWN },
+	{ 0x866b13, KEY_MUTE },
+	{ 0x866b12, KEY_POWER },
+	{ 0x866b01, KEY_1 },
+	{ 0x866b02, KEY_2 },
+	{ 0x866b03, KEY_3 },
+	{ 0x866b04, KEY_4 },
+	{ 0x866b05, KEY_5 },
+	{ 0x866b06, KEY_6 },
+	{ 0x866b07, KEY_7 },
+	{ 0x866b08, KEY_8 },
+	{ 0x866b09, KEY_9 },
+	{ 0x866b00, KEY_0 },
+	{ 0x866b0d, KEY_CHANNELUP },
+	{ 0x866b19, KEY_CHANNELDOWN },
+	{ 0x866b10, KEY_VOLUMEUP },
+	{ 0x866b0c, KEY_VOLUMEDOWN },
 
-	{ 0x860a, KEY_CAMERA },
-	{ 0x860b, KEY_ZOOM },
-	{ 0x861b, KEY_BACKSPACE },
-	{ 0x8615, KEY_ENTER },
+	{ 0x866b0a, KEY_CAMERA },
+	{ 0x866b0b, KEY_ZOOM },
+	{ 0x866b1b, KEY_BACKSPACE },
+	{ 0x866b15, KEY_ENTER },
 
-	{ 0x861d, KEY_UP },
-	{ 0x861e, KEY_DOWN },
-	{ 0x860e, KEY_LEFT },
-	{ 0x860f, KEY_RIGHT },
+	{ 0x866b1d, KEY_UP },
+	{ 0x866b1e, KEY_DOWN },
+	{ 0x866b0e, KEY_LEFT },
+	{ 0x866b0f, KEY_RIGHT },
 
-	{ 0x8618, KEY_RECORD },
-	{ 0x861a, KEY_STOP },
+	{ 0x866b18, KEY_RECORD },
+	{ 0x866b1a, KEY_STOP },
 
 	/* Key codes for the EvolutePC TVWay+ remote */
 	{ 0x7a00, KEY_MENU },
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 3bf3337..2f5dc06 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -3,6 +3,9 @@
  *
  * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
  *
+ * See http://mediacenterguides.com/book/export/html/31 for details on
+ * key mappings.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -60,6 +63,9 @@
 	{ 0x800f0426, KEY_EPG },		/* Guide */
 	{ 0x800f0427, KEY_ZOOM },		/* Aspect */
 
+	{ 0x800f0432, KEY_MODE },		/* Visualization */
+	{ 0x800f0433, KEY_PRESENTATION },	/* Slide Show */
+	{ 0x800f0434, KEY_EJECTCD },
 	{ 0x800f043a, KEY_BRIGHTNESSUP },
 
 	{ 0x800f0446, KEY_TV },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 0fef6ef..e4f8eac 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -48,7 +48,6 @@
 #define USB_BUFLEN		32 /* USB reception buffer length */
 #define USB_CTRL_MSG_SZ		2  /* Size of usb ctrl msg on gen1 hw */
 #define MCE_G1_INIT_MSGS	40 /* Init messages on gen1 hw to throw out */
-#define MS_TO_NS(msec)		((msec) * 1000)
 
 /* MCE constants */
 #define MCE_CMDBUF_SIZE		384  /* MCE Command buffer length */
@@ -149,6 +148,7 @@
 	MCE_GEN2_TX_INV,
 	POLARIS_EVK,
 	CX_HYBRID_TV,
+	MULTIFUNCTION,
 };
 
 struct mceusb_model {
@@ -156,9 +156,10 @@
 	u32 mce_gen2:1;
 	u32 mce_gen3:1;
 	u32 tx_mask_normal:1;
-	u32 is_polaris:1;
 	u32 no_tx:1;
 
+	int ir_intfnum;
+
 	const char *rc_map;	/* Allow specify a per-board map */
 	const char *name;	/* per-board name */
 };
@@ -180,7 +181,6 @@
 		.tx_mask_normal = 1,
 	},
 	[POLARIS_EVK] = {
-		.is_polaris = 1,
 		/*
 		 * In fact, the EVK is shipped without
 		 * remotes, but we should have something handy,
@@ -190,10 +190,13 @@
 		.name = "Conexant Hybrid TV (cx231xx) MCE IR",
 	},
 	[CX_HYBRID_TV] = {
-		.is_polaris = 1,
 		.no_tx = 1, /* tx isn't wired up at all */
 		.name = "Conexant Hybrid TV (cx231xx) MCE IR",
 	},
+	[MULTIFUNCTION] = {
+		.mce_gen2 = 1,
+		.ir_intfnum = 2,
+	},
 };
 
 static struct usb_device_id mceusb_dev_table[] = {
@@ -217,8 +220,9 @@
 	{ USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
 	/* Philips/Spinel plus IR transceiver for ASUS */
 	{ USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
-	/* Realtek MCE IR Receiver */
-	{ USB_DEVICE(VENDOR_REALTEK, 0x0161) },
+	/* Realtek MCE IR Receiver and card reader */
+	{ USB_DEVICE(VENDOR_REALTEK, 0x0161),
+	  .driver_info = MULTIFUNCTION },
 	/* SMK/Toshiba G83C0004D410 */
 	{ USB_DEVICE(VENDOR_SMK, 0x031d),
 	  .driver_info = MCE_GEN2_TX_INV },
@@ -817,7 +821,7 @@
 	switch (ir->buf_in[index]) {
 	/* 2-byte return value commands */
 	case MCE_CMD_S_TIMEOUT:
-		ir->rc->timeout = MS_TO_NS((hi << 8 | lo) / 2);
+		ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
 		break;
 
 	/* 1-byte return value commands */
@@ -856,9 +860,10 @@
 			break;
 		case PARSE_IRDATA:
 			ir->rem--;
+			init_ir_raw_event(&rawir);
 			rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
 			rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
-					 * MS_TO_NS(MCE_TIME_UNIT);
+					 * US_TO_NS(MCE_TIME_UNIT);
 
 			dev_dbg(ir->dev, "Storing %s with duration %d\n",
 				rawir.pulse ? "pulse" : "space",
@@ -884,6 +889,8 @@
 					     i, ir->rem + 1, false);
 			if (ir->rem)
 				ir->parser_state = PARSE_IRDATA;
+			else
+				ir_raw_event_reset(ir->rc);
 			break;
 		}
 
@@ -1061,7 +1068,7 @@
 	rc->priv = ir;
 	rc->driver_type = RC_DRIVER_IR_RAW;
 	rc->allowed_protos = RC_TYPE_ALL;
-	rc->timeout = MS_TO_NS(1000);
+	rc->timeout = US_TO_NS(1000);
 	if (!ir->flags.no_tx) {
 		rc->s_tx_mask = mceusb_set_tx_mask;
 		rc->s_tx_carrier = mceusb_set_tx_carrier;
@@ -1099,7 +1106,7 @@
 	bool is_gen3;
 	bool is_microsoft_gen1;
 	bool tx_mask_normal;
-	bool is_polaris;
+	int ir_intfnum;
 
 	dev_dbg(&intf->dev, "%s called\n", __func__);
 
@@ -1108,13 +1115,11 @@
 	is_gen3 = mceusb_model[model].mce_gen3;
 	is_microsoft_gen1 = mceusb_model[model].mce_gen1;
 	tx_mask_normal = mceusb_model[model].tx_mask_normal;
-	is_polaris = mceusb_model[model].is_polaris;
+	ir_intfnum = mceusb_model[model].ir_intfnum;
 
-	if (is_polaris) {
-		/* Interface 0 is IR */
-		if (idesc->desc.bInterfaceNumber)
-			return -ENODEV;
-	}
+	/* There are multi-function devices with non-IR interfaces */
+	if (idesc->desc.bInterfaceNumber != ir_intfnum)
+		return -ENODEV;
 
 	/* step through the endpoints to find first bulk in and out endpoint */
 	for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index dd4caf8..d4d6449 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -385,8 +385,9 @@
 
 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
 {
-	/* set number of bytes needed for wake key comparison (default 67) */
-	nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP);
+	/* set number of bytes needed for wake from s3 (default 65) */
+	nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
+			       CIR_WAKE_FIFO_CMP_DEEP);
 
 	/* set tolerance/variance allowed per byte during wake compare */
 	nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
@@ -460,7 +461,7 @@
 		return 0;
 	}
 
-	carrier = (count * 1000000) / duration;
+	carrier = MS_TO_NS(count) / duration;
 
 	if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
 		nvt_dbg("WTF? Carrier frequency out of range!");
@@ -612,8 +613,8 @@
 		sample = nvt->buf[i];
 
 		rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
-		rawir.duration = (sample & BUF_LEN_MASK)
-					* SAMPLE_PERIOD * 1000;
+		rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
+					  * SAMPLE_PERIOD);
 
 		if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
 			if (nvt->rawir.pulse == rawir.pulse)
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 1df8235..048135e 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -305,8 +305,11 @@
 #define CIR_WAKE_IRFIFOSTS_RX_EMPTY	0x20
 #define CIR_WAKE_IRFIFOSTS_RX_FULL	0x10
 
-/* CIR Wake FIFO buffer is 67 bytes long */
-#define CIR_WAKE_FIFO_LEN		67
+/*
+ * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes
+ * the system comparing only 65 bytes (fails with this set to 67)
+ */
+#define CIR_WAKE_FIFO_CMP_BYTES		65
 /* CIR Wake byte comparison tolerance */
 #define CIR_WAKE_CMP_TOLERANCE		5
 
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 72be8a0..5b4422e 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -458,21 +458,27 @@
 		index = ir_lookup_by_scancode(rc_map, scancode);
 	}
 
-	if (index >= rc_map->len) {
-		if (!(ke->flags & INPUT_KEYMAP_BY_INDEX))
-			IR_dprintk(1, "unknown key for scancode 0x%04x\n",
-				   scancode);
+	if (index < rc_map->len) {
+		entry = &rc_map->scan[index];
+
+		ke->index = index;
+		ke->keycode = entry->keycode;
+		ke->len = sizeof(entry->scancode);
+		memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
+
+	} else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) {
+		/*
+		 * We do not really know the valid range of scancodes
+		 * so let's respond with KEY_RESERVED to anything we
+		 * do not have mapping for [yet].
+		 */
+		ke->index = index;
+		ke->keycode = KEY_RESERVED;
+	} else {
 		retval = -EINVAL;
 		goto out;
 	}
 
-	entry = &rc_map->scan[index];
-
-	ke->index = index;
-	ke->keycode = entry->keycode;
-	ke->len = sizeof(entry->scancode);
-	memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
-
 	retval = 0;
 
 out:
@@ -844,7 +850,7 @@
 			count++;
 		} else {
 			for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
-				if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) {
+				if (!strcasecmp(tmp, proto_names[i].name)) {
 					tmp += strlen(proto_names[i].name);
 					mask = proto_names[i].type;
 					break;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 6e2911c..e435d94 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -164,7 +164,7 @@
 				sz->signal_start.tv_usec -
 				sz->signal_last.tv_usec);
 			rawir.duration -= sz->sum;
-			rawir.duration *= 1000;
+			rawir.duration = US_TO_NS(rawir.duration);
 			rawir.duration &= IR_MAX_DURATION;
 		}
 		sz_push(sz, rawir);
@@ -177,7 +177,7 @@
 	rawir.duration = ((int) value) * SZ_RESOLUTION;
 	rawir.duration += SZ_RESOLUTION / 2;
 	sz->sum += rawir.duration;
-	rawir.duration *= 1000;
+	rawir.duration = US_TO_NS(rawir.duration);
 	rawir.duration &= IR_MAX_DURATION;
 	sz_push(sz, rawir);
 }
@@ -197,7 +197,7 @@
 	rawir.duration = ((int) value) * SZ_RESOLUTION;
 	rawir.duration += SZ_RESOLUTION / 2;
 	sz->sum += rawir.duration;
-	rawir.duration *= 1000;
+	rawir.duration = US_TO_NS(rawir.duration);
 	sz_push(sz, rawir);
 }
 
@@ -273,6 +273,7 @@
 				if (sz->timeout_enabled)
 					sz_push(sz, rawir);
 				ir_raw_event_handle(sz->rdev);
+				ir_raw_event_reset(sz->rdev);
 			} else {
 				sz_push_full_space(sz, sz->buf_in[i]);
 			}
@@ -290,6 +291,7 @@
 		}
 	}
 
+	ir_raw_event_handle(sz->rdev);
 	usb_submit_urb(urb, GFP_ATOMIC);
 
 	return;
@@ -430,13 +432,13 @@
 	sz->decoder_state = PulseSpace;
 	/* FIXME: don't yet have a way to set this */
 	sz->timeout_enabled = true;
-	sz->rdev->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
+	sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) &
 				IR_MAX_DURATION) | 0x03000000);
 	#if 0
 	/* not yet supported, depends on patches from maxim */
 	/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
-	sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
-	sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
+	sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
+	sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
 	#endif
 
 	do_gettimeofday(&sz->signal_start);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index eb875af..aa02160 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -78,7 +78,7 @@
 
 config VIDEO_HELPER_CHIPS_AUTO
 	bool "Autoselect pertinent encoders/decoders and other helper chips"
-	default y if !EMBEDDED
+	default y if !EXPERT
 	---help---
 	  Most video cards may require additional modules to encode or
 	  decode audio/video standards. This option will autoselect
@@ -141,15 +141,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called tda9840.
 
-config VIDEO_TDA9875
-	tristate "Philips TDA9875 audio processor"
-	depends on VIDEO_V4L2 && I2C
-	---help---
-	  Support for tda9875 audio decoder chip found on some bt8xx boards.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called tda9875.
-
 config VIDEO_TEA6415C
 	tristate "Philips TEA6415C audio processor"
 	depends on I2C
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 81e38cb..a509d31 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -27,7 +27,6 @@
 obj-$(CONFIG_VIDEO_TUNER) += tuner.o
 obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
 obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
-obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
 obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
 obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
 obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index f318b51..d2327db 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -303,11 +303,22 @@
 	return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7175, 0);
 }
 
+static int adv7175_s_power(struct v4l2_subdev *sd, int on)
+{
+	if (on)
+		adv7175_write(sd, 0x01, 0x00);
+	else
+		adv7175_write(sd, 0x01, 0x78);
+
+	return 0;
+}
+
 /* ----------------------------------------------------------------------- */
 
 static const struct v4l2_subdev_core_ops adv7175_core_ops = {
 	.g_chip_ident = adv7175_g_chip_ident,
 	.init = adv7175_init,
+	.s_power = adv7175_s_power,
 };
 
 static const struct v4l2_subdev_video_ops adv7175_video_ops = {
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index e41e4ad..9c475c6 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -1758,7 +1758,12 @@
 	if (rc < 0)
 		return rc;
 
-	return videobuf_reqbufs(&fh->vb_vidq, rb);
+	if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		rc = videobuf_reqbufs(&fh->vb_vidq, rb);
+	else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+		rc = videobuf_reqbufs(&fh->vb_vbiq, rb);
+
+	return rc;
 }
 
 static int vidioc_querybuf(struct file *file, void *priv,
@@ -1772,7 +1777,12 @@
 	if (rc < 0)
 		return rc;
 
-	return videobuf_querybuf(&fh->vb_vidq, b);
+	if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		rc = videobuf_querybuf(&fh->vb_vidq, b);
+	else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+		rc = videobuf_querybuf(&fh->vb_vbiq, b);
+
+	return rc;
 }
 
 static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1785,7 +1795,12 @@
 	if (rc < 0)
 		return rc;
 
-	return videobuf_qbuf(&fh->vb_vidq, b);
+	if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		rc = videobuf_qbuf(&fh->vb_vidq, b);
+	else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+		rc = videobuf_qbuf(&fh->vb_vbiq, b);
+
+	return rc;
 }
 
 static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1806,7 +1821,12 @@
 		dev->greenscreen_detected = 0;
 	}
 
-	return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
+	if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
+	else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+		rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK);
+
+	return rc;
 }
 
 static struct v4l2_file_operations au0828_v4l_fops = {
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 49efcf6..7f58756 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -1373,7 +1373,6 @@
 		.gpiomute 	= 0x1800,
 		.audio_mode_gpio= fv2000s_audio,
 		.no_msp34xx	= 1,
-		.no_tda9875	= 1,
 		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_PHILIPS_PAL,
@@ -1511,7 +1510,6 @@
 		.gpiomute 	= 0x09,
 		.needs_tvaudio  = 1,
 		.no_msp34xx	= 1,
-		.no_tda9875	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -1550,7 +1548,6 @@
 		.gpiomask2      = 0x07ff,
 		.muxsel         = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.muxsel_hook    = rv605_muxsel,
@@ -1686,7 +1683,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY1x0_848] = {
@@ -1699,7 +1695,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 
@@ -1714,7 +1709,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY1x1] = {
@@ -1727,7 +1721,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY1x1_SVID] = {
@@ -1740,7 +1733,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY2xx] = {
@@ -1753,7 +1745,6 @@
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 
@@ -1768,7 +1759,6 @@
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY2x0] = {
@@ -1781,7 +1771,6 @@
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY500] = {
@@ -1794,7 +1783,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY540] = {
@@ -1805,7 +1793,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 
@@ -1820,7 +1807,6 @@
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,      /* must avoid, conflicts with the bt860 */
 	},
 	[BTTV_BOARD_IDS_EAGLE] = {
@@ -1835,7 +1821,6 @@
 		.muxsel         = MUXSEL(2, 2, 2, 2),
 		.muxsel_hook    = eagle_muxsel,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.pll            = PLL_28,
 	},
 	[BTTV_BOARD_PINNACLESAT] = {
@@ -1846,7 +1831,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.muxsel         = MUXSEL(3, 1),
 		.pll            = PLL_28,
@@ -1897,7 +1881,6 @@
 		.svhs           = 2,
 		.gpiomask       = 0,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.muxsel         = MUXSEL(2, 0, 1),
 		.pll            = PLL_28,
@@ -1970,7 +1953,6 @@
 		/* Tuner, CVid, SVid, CVid over SVid connector */
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomask       = 0,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.tuner_type     = TUNER_PHILIPS_PAL_I,
 		.tuner_addr	= ADDR_UNSET,
@@ -2017,7 +1999,6 @@
 		.muxsel         = MUXSEL(2,2,2,2, 3,3,3,3, 1,1,1,1, 0,0,0,0),
 		.muxsel_hook    = xguard_muxsel,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 	},
@@ -2029,7 +2010,6 @@
 		.svhs           = NO_SVHS,
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
@@ -2134,7 +2114,6 @@
 		.svhs           = NO_SVHS,   /* card has no svhs */
 		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.gpiomask       = 0x00,
 		.muxsel         = MUXSEL(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
@@ -2156,7 +2135,6 @@
 	[BTTV_BOARD_TWINHAN_DST] = {
 		.name           = "Twinhan DST + clones",
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2171,7 +2149,6 @@
 		/* Vid In, SVid In, Vid over SVid in connector */
 		.muxsel		= MUXSEL(3, 1, 1, 3),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2226,7 +2203,6 @@
 		.svhs           = NO_SVHS,
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.needs_tvaudio  = 0,
 		.tuner_type     = TUNER_ABSENT,
@@ -2278,7 +2254,6 @@
 		.gpiomask       = 0,
 		.gpiomask2      = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		/*878A input is always MUX0, see above.*/
 		.muxsel         = MUXSEL(2, 2, 2, 2),
@@ -2302,7 +2277,6 @@
 		.tuner_type     = TUNER_TEMIC_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 	},
 	[BTTV_BOARD_AVDVBT_771] = {
 		/* Wolfram Joost <wojo@frokaschwei.de> */
@@ -2313,7 +2287,6 @@
 		.tuner_addr	= ADDR_UNSET,
 		.muxsel         = MUXSEL(3, 3),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 		.has_dvb        = 1,
@@ -2329,7 +2302,6 @@
 		.svhs           = 1,
 		.muxsel         = MUXSEL(3, 1, 2, 0), /* Comp0, S-Video, ?, ? */
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
@@ -2393,7 +2365,6 @@
 		/* Chris Pascoe <c.pascoe@itee.uq.edu.au> */
 		.name           = "DViCO FusionHDTV DVB-T Lite",
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 		.no_video       = 1,
@@ -2440,7 +2411,6 @@
 		.muxsel         = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
 		.pll		= PLL_28,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432	= 1,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2478,7 +2448,6 @@
 		.pll		= PLL_28,
 		.no_msp34xx	= 1,
 		.no_tda7432	= 1,
-		.no_tda9875	= 1,
 		.muxsel_hook	= kodicom4400r_muxsel,
 	},
 	[BTTV_BOARD_KODICOM_4400R_SL] = {
@@ -2500,7 +2469,6 @@
 		.pll		= PLL_28,
 		.no_msp34xx	= 1,
 		.no_tda7432	= 1,
-		.no_tda9875	= 1,
 		.muxsel_hook	= kodicom4400r_muxsel,
 	},
 		/* ---- card 0x86---------------------------------- */
@@ -2530,7 +2498,6 @@
 		.gpiomux        = { 0x00400005, 0, 0x00000001, 0 },
 		.gpiomute 	= 0x00c00007,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.has_dvb        = 1,
 	},
@@ -2630,7 +2597,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 		/* ---- card 0x8d ---------------------------------- */
@@ -2658,7 +2624,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 100000, 100002, 100002, 100000 },
 		.no_msp34xx	= 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_TNF_5335MF,
@@ -2674,7 +2639,6 @@
 		.gpiomask	= 0x0f, /* old: 7 */
 		.muxsel		= MUXSEL(0, 1, 3, 2), /* Composite 0-3 */
 		.no_msp34xx	= 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2732,7 +2696,6 @@
 		.gpiomux        = { 0x00400005, 0, 0x00000001, 0 },
 		.gpiomute 	= 0x00c00007,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	/* ---- card 0x95---------------------------------- */
@@ -2874,7 +2837,6 @@
 		.pll		= PLL_28,
 		.no_msp34xx	= 1,
 		.no_tda7432	= 1,
-		.no_tda9875	= 1,
 		.muxsel_hook    = gv800s_muxsel,
 	},
 		[BTTV_BOARD_GEOVISION_GV800S_SL] = {
@@ -2899,7 +2861,6 @@
 		.pll		= PLL_28,
 		.no_msp34xx	= 1,
 		.no_tda7432	= 1,
-		.no_tda9875	= 1,
 		.muxsel_hook    = gv800s_muxsel,
 	},
 	[BTTV_BOARD_PV183] = {
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index fd62bf1..c633359 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -234,7 +234,6 @@
 
 	/* i2c audio flags */
 	unsigned int no_msp34xx:1;
-	unsigned int no_tda9875:1;
 	unsigned int no_tda7432:1;
 	unsigned int needs_tvaudio:1;
 	unsigned int msp34xx_alt:1;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 789087c..55ffd60 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2001,6 +2001,11 @@
 		.min_width = 320,
 		.min_height = 240,
 	};
+	struct i2c_board_info ov7670_info = {
+		.type = "ov7670",
+		.addr = 0x42,
+		.platform_data = &sensor_cfg,
+	};
 
 	/*
 	 * Start putting together one of our big camera structures.
@@ -2062,9 +2067,9 @@
 	if (dmi_check_system(olpc_xo1_dmi))
 		sensor_cfg.clock_speed = 45;
 
-	cam->sensor_addr = 0x42;
-	cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter,
-			"ov7670", 0, &sensor_cfg, cam->sensor_addr, NULL);
+	cam->sensor_addr = ov7670_info.addr;
+	cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, &cam->i2c_adapter,
+			&ov7670_info, NULL);
 	if (cam->sensor == NULL) {
 		ret = -ENODEV;
 		goto out_smbus;
@@ -2184,9 +2189,7 @@
 	struct cafe_camera *cam = to_cam(v4l2_dev);
 	int ret = 0;
 
-	ret = pci_restore_state(pdev);
-	if (ret)
-		return ret;
+	pci_restore_state(pdev);
 	ret = pci_enable_device(pdev);
 
 	if (ret) {
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 916c13d..6d6d184 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -378,7 +378,7 @@
 
 struct camera_data {
 	/* locks */
-	struct mutex busy_lock;	/* guard against SMP multithreading */
+	struct mutex v4l2_lock;	/* serialize file operations */
 	struct v4l2_prio_state prio;
 
 	/* camera status */
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index 9606bc0..aaffca8 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -2247,7 +2247,7 @@
 
 
 	cam->present = 1;
-	mutex_init(&cam->busy_lock);
+	mutex_init(&cam->v4l2_lock);
 	init_waitqueue_head(&cam->wq_stream);
 
 	return cam;
@@ -2365,9 +2365,9 @@
 		char __user *buf, unsigned long count, int noblock)
 {
 	struct framebuf *frame;
-	if (!count) {
+
+	if (!count)
 		return 0;
-	}
 
 	if (!buf) {
 		ERR("%s: buffer NULL\n",__func__);
@@ -2379,17 +2379,12 @@
 		return -EINVAL;
 	}
 
-	/* make this _really_ smp and multithread-safe */
-	if (mutex_lock_interruptible(&cam->busy_lock))
-		return -ERESTARTSYS;
-
 	if (!cam->present) {
 		LOG("%s: camera removed\n",__func__);
-		mutex_unlock(&cam->busy_lock);
 		return 0;	/* EOF */
 	}
 
-	if(!cam->streaming) {
+	if (!cam->streaming) {
 		/* Start streaming */
 		cpia2_usb_stream_start(cam,
 				       cam->params.camera_state.stream_mode);
@@ -2398,42 +2393,31 @@
 	/* Copy cam->curbuff in case it changes while we're processing */
 	frame = cam->curbuff;
 	if (noblock && frame->status != FRAME_READY) {
-		mutex_unlock(&cam->busy_lock);
 		return -EAGAIN;
 	}
 
-	if(frame->status != FRAME_READY) {
-		mutex_unlock(&cam->busy_lock);
+	if (frame->status != FRAME_READY) {
+		mutex_unlock(&cam->v4l2_lock);
 		wait_event_interruptible(cam->wq_stream,
 			       !cam->present ||
 			       (frame = cam->curbuff)->status == FRAME_READY);
+		mutex_lock(&cam->v4l2_lock);
 		if (signal_pending(current))
 			return -ERESTARTSYS;
-		/* make this _really_ smp and multithread-safe */
-		if (mutex_lock_interruptible(&cam->busy_lock)) {
-			return -ERESTARTSYS;
-		}
-		if(!cam->present) {
-			mutex_unlock(&cam->busy_lock);
+		if (!cam->present)
 			return 0;
-		}
 	}
 
 	/* copy data to user space */
-	if (frame->length > count) {
-		mutex_unlock(&cam->busy_lock);
+	if (frame->length > count)
 		return -EFAULT;
-	}
-	if (copy_to_user(buf, frame->data, frame->length)) {
-		mutex_unlock(&cam->busy_lock);
+	if (copy_to_user(buf, frame->data, frame->length))
 		return -EFAULT;
-	}
 
 	count = frame->length;
 
 	frame->status = FRAME_EMPTY;
 
-	mutex_unlock(&cam->busy_lock);
 	return count;
 }
 
@@ -2447,17 +2431,13 @@
 {
 	unsigned int status=0;
 
-	if(!cam) {
+	if (!cam) {
 		ERR("%s: Internal error, camera_data not found!\n",__func__);
 		return POLLERR;
 	}
 
-	mutex_lock(&cam->busy_lock);
-
-	if(!cam->present) {
-		mutex_unlock(&cam->busy_lock);
+	if (!cam->present)
 		return POLLHUP;
-	}
 
 	if(!cam->streaming) {
 		/* Start streaming */
@@ -2465,16 +2445,13 @@
 				       cam->params.camera_state.stream_mode);
 	}
 
-	mutex_unlock(&cam->busy_lock);
 	poll_wait(filp, &cam->wq_stream, wait);
-	mutex_lock(&cam->busy_lock);
 
 	if(!cam->present)
 		status = POLLHUP;
 	else if(cam->curbuff->status == FRAME_READY)
 		status = POLLIN | POLLRDNORM;
 
-	mutex_unlock(&cam->busy_lock);
 	return status;
 }
 
@@ -2496,29 +2473,19 @@
 
 	DBG("mmap offset:%ld size:%ld\n", start_offset, size);
 
-	/* make this _really_ smp-safe */
-	if (mutex_lock_interruptible(&cam->busy_lock))
-		return -ERESTARTSYS;
-
-	if (!cam->present) {
-		mutex_unlock(&cam->busy_lock);
+	if (!cam->present)
 		return -ENODEV;
-	}
 
 	if (size > cam->frame_size*cam->num_frames  ||
 	    (start_offset % cam->frame_size) != 0 ||
-	    (start_offset+size > cam->frame_size*cam->num_frames)) {
-		mutex_unlock(&cam->busy_lock);
+	    (start_offset+size > cam->frame_size*cam->num_frames))
 		return -EINVAL;
-	}
 
 	pos = ((unsigned long) (cam->frame_buffer)) + start_offset;
 	while (size > 0) {
 		page = kvirt_to_pa(pos);
-		if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED)) {
-			mutex_unlock(&cam->busy_lock);
+		if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED))
 			return -EAGAIN;
-		}
 		start += PAGE_SIZE;
 		pos += PAGE_SIZE;
 		if (size > PAGE_SIZE)
@@ -2528,7 +2495,5 @@
 	}
 
 	cam->mmapped = true;
-	mutex_unlock(&cam->busy_lock);
 	return 0;
 }
-
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 7edf80b..9bad398 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -238,59 +238,40 @@
 static int cpia2_open(struct file *file)
 {
 	struct camera_data *cam = video_drvdata(file);
-	int retval = 0;
+	struct cpia2_fh *fh;
 
 	if (!cam) {
 		ERR("Internal error, camera_data not found!\n");
 		return -ENODEV;
 	}
 
-	if(mutex_lock_interruptible(&cam->busy_lock))
-		return -ERESTARTSYS;
+	if (!cam->present)
+		return -ENODEV;
 
-	if(!cam->present) {
-		retval = -ENODEV;
-		goto err_return;
+	if (cam->open_count == 0) {
+		if (cpia2_allocate_buffers(cam))
+			return -ENOMEM;
+
+		/* reset the camera */
+		if (cpia2_reset_camera(cam) < 0)
+			return -EIO;
+
+		cam->APP_len = 0;
+		cam->COM_len = 0;
 	}
 
-	if (cam->open_count > 0) {
-		goto skip_init;
-	}
-
-	if (cpia2_allocate_buffers(cam)) {
-		retval = -ENOMEM;
-		goto err_return;
-	}
-
-	/* reset the camera */
-	if (cpia2_reset_camera(cam) < 0) {
-		retval = -EIO;
-		goto err_return;
-	}
-
-	cam->APP_len = 0;
-	cam->COM_len = 0;
-
-skip_init:
-	{
-		struct cpia2_fh *fh = kmalloc(sizeof(*fh),GFP_KERNEL);
-		if(!fh) {
-			retval = -ENOMEM;
-			goto err_return;
-		}
-		file->private_data = fh;
-		fh->prio = V4L2_PRIORITY_UNSET;
-		v4l2_prio_open(&cam->prio, &fh->prio);
-		fh->mmapped = 0;
-	}
+	fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+	if (!fh)
+		return -ENOMEM;
+	file->private_data = fh;
+	fh->prio = V4L2_PRIORITY_UNSET;
+	v4l2_prio_open(&cam->prio, &fh->prio);
+	fh->mmapped = 0;
 
 	++cam->open_count;
 
 	cpia2_dbg_dump_registers(cam);
-
-err_return:
-	mutex_unlock(&cam->busy_lock);
-	return retval;
+	return 0;
 }
 
 /******************************************************************************
@@ -304,15 +285,11 @@
 	struct camera_data *cam = video_get_drvdata(dev);
 	struct cpia2_fh *fh = file->private_data;
 
-	mutex_lock(&cam->busy_lock);
-
 	if (cam->present &&
-	    (cam->open_count == 1
-	     || fh->prio == V4L2_PRIORITY_RECORD
-	    )) {
+	    (cam->open_count == 1 || fh->prio == V4L2_PRIORITY_RECORD)) {
 		cpia2_usb_stream_stop(cam);
 
-		if(cam->open_count == 1) {
+		if (cam->open_count == 1) {
 			/* save camera state for later open */
 			cpia2_save_camera_state(cam);
 
@@ -321,26 +298,21 @@
 		}
 	}
 
-	{
-		if(fh->mmapped)
-			cam->mmapped = 0;
-		v4l2_prio_close(&cam->prio, fh->prio);
-		file->private_data = NULL;
-		kfree(fh);
-	}
+	if (fh->mmapped)
+		cam->mmapped = 0;
+	v4l2_prio_close(&cam->prio, fh->prio);
+	file->private_data = NULL;
+	kfree(fh);
 
 	if (--cam->open_count == 0) {
 		cpia2_free_buffers(cam);
 		if (!cam->present) {
 			video_unregister_device(dev);
-			mutex_unlock(&cam->busy_lock);
 			kfree(cam);
 			return 0;
 		}
 	}
 
-	mutex_unlock(&cam->busy_lock);
-
 	return 0;
 }
 
@@ -405,11 +377,11 @@
 			return 0;
 		}
 
-		mutex_unlock(&cam->busy_lock);
+		mutex_unlock(&cam->v4l2_lock);
 		wait_event_interruptible(cam->wq_stream,
 					 !cam->streaming ||
 					 frame->status == FRAME_READY);
-		mutex_lock(&cam->busy_lock);
+		mutex_lock(&cam->v4l2_lock);
 		if (signal_pending(current))
 			return -ERESTARTSYS;
 		if(!cam->present)
@@ -1293,11 +1265,11 @@
 	if(frame < 0) {
 		/* Wait for a frame to become available */
 		struct framebuf *cb=cam->curbuff;
-		mutex_unlock(&cam->busy_lock);
+		mutex_unlock(&cam->v4l2_lock);
 		wait_event_interruptible(cam->wq_stream,
 					 !cam->present ||
 					 (cb=cam->curbuff)->status == FRAME_READY);
-		mutex_lock(&cam->busy_lock);
+		mutex_lock(&cam->v4l2_lock);
 		if (signal_pending(current))
 			return -ERESTARTSYS;
 		if(!cam->present)
@@ -1337,14 +1309,8 @@
 	if (!cam)
 		return -ENOTTY;
 
-	/* make this _really_ smp-safe */
-	if (mutex_lock_interruptible(&cam->busy_lock))
-		return -ERESTARTSYS;
-
-	if (!cam->present) {
-		mutex_unlock(&cam->busy_lock);
+	if (!cam->present)
 		return -ENODEV;
-	}
 
 	/* Priority check */
 	switch (cmd) {
@@ -1352,10 +1318,8 @@
 	{
 		struct cpia2_fh *fh = file->private_data;
 		retval = v4l2_prio_check(&cam->prio, fh->prio);
-		if(retval) {
-			mutex_unlock(&cam->busy_lock);
+		if (retval)
 			return retval;
-		}
 		break;
 	}
 	default:
@@ -1529,7 +1493,6 @@
 		break;
 	}
 
-	mutex_unlock(&cam->busy_lock);
 	return retval;
 }
 
@@ -1596,7 +1559,7 @@
 	.release	= cpia2_close,
 	.read		= cpia2_v4l_read,
 	.poll		= cpia2_v4l_poll,
-	.ioctl		= cpia2_ioctl,
+	.unlocked_ioctl	= cpia2_ioctl,
 	.mmap		= cpia2_mmap,
 };
 
@@ -1620,6 +1583,7 @@
 
 	memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template));
 	video_set_drvdata(cam->vdev, cam);
+	cam->vdev->lock = &cam->v4l2_lock;
 
 	reset_camera_struct_v4l(cam);
 
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 8717773..68ad196 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -95,6 +95,53 @@
 	.i2c = &cx18_i2c_std,
 };
 
+static const struct cx18_card cx18_card_hvr1600_s5h1411 = {
+	.type = CX18_CARD_HVR_1600_S5H1411,
+	.name = "Hauppauge HVR-1600",
+	.comment = "Simultaneous Digital and Analog TV capture supported\n",
+	.v4l2_capabilities = CX18_CAP_ENCODER,
+	.hw_audio_ctrl = CX18_HW_418_AV,
+	.hw_muxer = CX18_HW_CS5345,
+	.hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
+		  CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL |
+		  CX18_HW_Z8F0811_IR_HAUP,
+	.video_inputs = {
+		{ CX18_CARD_INPUT_VID_TUNER,  0, CX18_AV_COMPOSITE7 },
+		{ CX18_CARD_INPUT_SVIDEO1,    1, CX18_AV_SVIDEO1    },
+		{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 },
+		{ CX18_CARD_INPUT_SVIDEO2,    2, CX18_AV_SVIDEO2    },
+		{ CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 },
+	},
+	.audio_inputs = {
+		{ CX18_CARD_INPUT_AUD_TUNER,
+		  CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
+		{ CX18_CARD_INPUT_LINE_IN1,
+		  CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 },
+		{ CX18_CARD_INPUT_LINE_IN2,
+		  CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 },
+	},
+	.radio_input = { CX18_CARD_INPUT_AUD_TUNER,
+			 CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 },
+	.ddr = {
+		/* ESMT M13S128324A-5B memory */
+		.chip_config = 0x003,
+		.refresh = 0x30c,
+		.timing1 = 0x44220e82,
+		.timing2 = 0x08,
+		.tune_lane = 0,
+		.initial_emrs = 0,
+	},
+	.gpio_init.initial_value = 0x3001,
+	.gpio_init.direction = 0x3001,
+	.gpio_i2c_slave_reset = {
+		.active_lo_mask = 0x3001,
+		.msecs_asserted = 10,
+		.msecs_recovery = 40,
+		.ir_reset_mask  = 0x0001,
+	},
+	.i2c = &cx18_i2c_std,
+};
+
 static const struct cx18_card cx18_card_hvr1600_samsung = {
 	.type = CX18_CARD_HVR_1600_SAMSUNG,
 	.name = "Hauppauge HVR-1600 (Preproduction)",
@@ -523,7 +570,8 @@
 	&cx18_card_toshiba_qosmio_dvbt,
 	&cx18_card_leadtek_pvr2100,
 	&cx18_card_leadtek_dvr3100h,
-	&cx18_card_gotview_dvd3
+	&cx18_card_gotview_dvd3,
+	&cx18_card_hvr1600_s5h1411
 };
 
 const struct cx18_card *cx18_get_card(u16 index)
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 133ec2b..b1c3cbd 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -157,6 +157,7 @@
 		 "\t\t\t 7 = Leadtek WinFast PVR2100\n"
 		 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n"
 		 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n"
+		 "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n"
 		 "\t\t\t 0 = Autodetect (default)\n"
 		 "\t\t\t-1 = Ignore this card\n\t\t");
 MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
@@ -337,6 +338,7 @@
 	switch (cx->card->type) {
 	case CX18_CARD_HVR_1600_ESMT:
 	case CX18_CARD_HVR_1600_SAMSUNG:
+	case CX18_CARD_HVR_1600_S5H1411:
 		tveeprom_hauppauge_analog(&c, tv, eedata);
 		break;
 	case CX18_CARD_YUAN_MPC718:
@@ -365,7 +367,25 @@
 	   from the model number. Use the cardtype module option if you
 	   have one of these preproduction models. */
 	switch (tv.model) {
-	case 74000 ... 74999:
+	case 74301: /* Retail models */
+	case 74321:
+	case 74351: /* OEM models */
+	case 74361:
+		/* Digital side is s5h1411/tda18271 */
+		cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411);
+		break;
+	case 74021: /* Retail models */
+	case 74031:
+	case 74041:
+	case 74141:
+	case 74541: /* OEM models */
+	case 74551:
+	case 74591:
+	case 74651:
+	case 74691:
+	case 74751:
+	case 74891:
+		/* Digital side is s5h1409/mxl5005s */
 		cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
 		break;
 	case 0x718:
@@ -377,7 +397,8 @@
 		CX18_ERR("Invalid EEPROM\n");
 		return;
 	default:
-		CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model);
+		CX18_ERR("Unknown model %d, defaulting to original HVR-1600 "
+			 "(cardtype=1)\n", tv.model);
 		cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
 		break;
 	}
@@ -664,7 +685,7 @@
 {
 	snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
 		 cx->v4l2_dev.name);
-	cx->in_work_queue = create_singlethread_workqueue(cx->in_workq_name);
+	cx->in_work_queue = alloc_ordered_workqueue(cx->in_workq_name, 0);
 	if (cx->in_work_queue == NULL) {
 		CX18_ERR("Unable to create incoming mailbox handler thread\n");
 		return -ENOMEM;
@@ -672,18 +693,6 @@
 	return 0;
 }
 
-static int __devinit cx18_create_out_workq(struct cx18 *cx)
-{
-	snprintf(cx->out_workq_name, sizeof(cx->out_workq_name), "%s-out",
-		 cx->v4l2_dev.name);
-	cx->out_work_queue = create_workqueue(cx->out_workq_name);
-	if (cx->out_work_queue == NULL) {
-		CX18_ERR("Unable to create outgoing mailbox handler threads\n");
-		return -ENOMEM;
-	}
-	return 0;
-}
-
 static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
 {
 	int i;
@@ -710,16 +719,10 @@
 	mutex_init(&cx->epu2apu_mb_lock);
 	mutex_init(&cx->epu2cpu_mb_lock);
 
-	ret = cx18_create_out_workq(cx);
+	ret = cx18_create_in_workq(cx);
 	if (ret)
 		return ret;
 
-	ret = cx18_create_in_workq(cx);
-	if (ret) {
-		destroy_workqueue(cx->out_work_queue);
-		return ret;
-	}
-
 	cx18_init_in_work_orders(cx);
 
 	/* start counting open_id at 1 */
@@ -1107,7 +1110,6 @@
 	release_mem_region(cx->base_addr, CX18_MEM_SIZE);
 free_workqueues:
 	destroy_workqueue(cx->in_work_queue);
-	destroy_workqueue(cx->out_work_queue);
 err:
 	if (retval == 0)
 		retval = -ENODEV;
@@ -1259,7 +1261,6 @@
 	cx18_halt_firmware(cx);
 
 	destroy_workqueue(cx->in_work_queue);
-	destroy_workqueue(cx->out_work_queue);
 
 	cx18_streams_cleanup(cx, 1);
 
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index f6f3e50..f736679 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -85,7 +85,8 @@
 #define CX18_CARD_LEADTEK_PVR2100     6 /* Leadtek WinFast PVR2100 */
 #define CX18_CARD_LEADTEK_DVR3100H    7 /* Leadtek WinFast DVR3100 H */
 #define CX18_CARD_GOTVIEW_PCI_DVD3    8 /* GoTView PCI DVD3 Hybrid */
-#define CX18_CARD_LAST		      8
+#define CX18_CARD_HVR_1600_S5H1411    9 /* Hauppauge HVR 1600 s5h1411/tda18271*/
+#define CX18_CARD_LAST		      9
 
 #define CX18_ENC_STREAM_TYPE_MPG  0
 #define CX18_ENC_STREAM_TYPE_TS   1
@@ -617,9 +618,6 @@
 	struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS];
 	char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
 
-	struct workqueue_struct *out_work_queue;
-	char out_workq_name[12]; /* "cx18-NN-out" */
-
 	/* i2c */
 	struct i2c_adapter i2c_adap[2];
 	struct i2c_algo_bit_data i2c_algo[2];
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index f0381d6..f41922b 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -29,6 +29,8 @@
 #include "cx18-gpio.h"
 #include "s5h1409.h"
 #include "mxl5005s.h"
+#include "s5h1411.h"
+#include "tda18271.h"
 #include "zl10353.h"
 
 #include <linux/firmware.h>
@@ -77,6 +79,32 @@
 };
 
 /*
+ * CX18_CARD_HVR_1600_S5H1411
+ */
+static struct s5h1411_config hcw_s5h1411_config = {
+	.output_mode   = S5H1411_SERIAL_OUTPUT,
+	.gpio          = S5H1411_GPIO_OFF,
+	.vsb_if        = S5H1411_IF_44000,
+	.qam_if        = S5H1411_IF_4000,
+	.inversion     = S5H1411_INVERSION_ON,
+	.status_mode   = S5H1411_DEMODLOCKING,
+	.mpeg_timing   = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+
+static struct tda18271_std_map hauppauge_tda18271_std_map = {
+	.atsc_6   = { .if_freq = 5380, .agc_mode = 3, .std = 3,
+		      .if_lvl = 6, .rfagc_top = 0x37 },
+	.qam_6    = { .if_freq = 4000, .agc_mode = 3, .std = 0,
+		      .if_lvl = 6, .rfagc_top = 0x37 },
+};
+
+static struct tda18271_config hauppauge_tda18271_config = {
+	.std_map = &hauppauge_tda18271_std_map,
+	.gate    = TDA18271_GATE_DIGITAL,
+	.output_opt = TDA18271_OUTPUT_LT_OFF,
+};
+
+/*
  * CX18_CARD_LEADTEK_DVR3100H
  */
 /* Information/confirmation of proper config values provided by Terry Wu */
@@ -244,6 +272,7 @@
 	switch (cx->card->type) {
 	case CX18_CARD_HVR_1600_ESMT:
 	case CX18_CARD_HVR_1600_SAMSUNG:
+	case CX18_CARD_HVR_1600_S5H1411:
 		v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
 		v |= 0x00400000; /* Serial Mode */
 		v |= 0x00002000; /* Data Length - Byte */
@@ -455,6 +484,15 @@
 			ret = 0;
 		}
 		break;
+	case CX18_CARD_HVR_1600_S5H1411:
+		dvb->fe = dvb_attach(s5h1411_attach,
+				     &hcw_s5h1411_config,
+				     &cx->i2c_adap[0]);
+		if (dvb->fe != NULL)
+			dvb_attach(tda18271_attach, dvb->fe,
+				   0x60, &cx->i2c_adap[0],
+				   &hauppauge_tda18271_config);
+		break;
 	case CX18_CARD_LEADTEK_DVR3100H:
 		dvb->fe = dvb_attach(zl10353_attach,
 				     &leadtek_dvr3100h_demod,
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
index 51765eb..713b0e6 100644
--- a/drivers/media/video/cx18/cx18-streams.h
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -42,8 +42,7 @@
 /* Related to submission of mdls to firmware */
 static inline void cx18_stream_load_fw_queue(struct cx18_stream *s)
 {
-	struct cx18 *cx = s->cx;
-	queue_work(cx->out_work_queue, &s->out_work_order);
+	schedule_work(&s->out_work_order);
 }
 
 static inline void cx18_stream_put_mdl_fw(struct cx18_stream *s,
diff --git a/drivers/media/video/cx18/cx23418.h b/drivers/media/video/cx18/cx23418.h
index 2c00980..7e40035 100644
--- a/drivers/media/video/cx18/cx23418.h
+++ b/drivers/media/video/cx18/cx23418.h
@@ -177,7 +177,7 @@
    IN[0] - Task handle.
    IN[1] - luma type: 0 = disable, 1 = 1D horizontal only, 2 = 1D vertical only,
 		      3 = 2D H/V separable, 4 = 2D symmetric non-separable
-   IN[2] - chroma type: 0 - diable, 1 = 1D horizontal
+   IN[2] - chroma type: 0 - disable, 1 = 1D horizontal
    ReturnCode - One of the ERR_CAPTURE_... */
 #define CX18_CPU_SET_SPATIAL_FILTER_TYPE     	(CPU_CMD_MASK_CAPTURE | 0x000C)
 
diff --git a/drivers/media/video/cx231xx/cx231xx-dvb.c b/drivers/media/video/cx231xx/cx231xx-dvb.c
index fe59a1c..363aa60 100644
--- a/drivers/media/video/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/video/cx231xx/cx231xx-dvb.c
@@ -28,7 +28,6 @@
 #include <media/videobuf-vmalloc.h>
 
 #include "xc5000.h"
-#include "dvb_dummy_fe.h"
 #include "s5h1432.h"
 #include "tda18271.h"
 #include "s5h1411.h"
@@ -619,7 +618,7 @@
 
 		if (dev->dvb->frontend == NULL) {
 			printk(DRIVER_NAME
-			       ": Failed to attach dummy front end\n");
+			       ": Failed to attach s5h1411 front end\n");
 			result = -EINVAL;
 			goto out_free;
 		}
@@ -665,7 +664,7 @@
 
 		if (dev->dvb->frontend == NULL) {
 			printk(DRIVER_NAME
-			       ": Failed to attach dummy front end\n");
+			       ": Failed to attach s5h1411 front end\n");
 			result = -EINVAL;
 			goto out_free;
 		}
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index ed3d8f5..307ff54 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -122,10 +122,6 @@
 
 	if (!i2c_wait_done(i2c_adap))
 		goto eio;
-	if (!i2c_slave_did_ack(i2c_adap)) {
-		retval = -ENXIO;
-		goto err;
-	}
 	if (i2c_debug) {
 		printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
 		if (!(ctrl & I2C_NOSTOP))
@@ -158,7 +154,6 @@
 
  eio:
 	retval = -EIO;
- err:
 	if (i2c_debug)
 		printk(KERN_ERR " ERR: %d\n", retval);
 	return retval;
@@ -209,10 +204,6 @@
 
 		if (!i2c_wait_done(i2c_adap))
 			goto eio;
-		if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) {
-			retval = -ENXIO;
-			goto err;
-		}
 		msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff;
 		if (i2c_debug) {
 			dprintk(1, " %02x", msg->buf[cnt]);
@@ -224,7 +215,6 @@
 
  eio:
 	retval = -EIO;
- err:
 	if (i2c_debug)
 		printk(KERN_ERR " ERR: %d\n", retval);
 	return retval;
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f164618..35796e0 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1682,20 +1682,6 @@
 	return 0;
 }
 
-static int cx25840_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
-{
-	struct cx25840_state *state = to_state(sd);
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-
-	if (platform_data) {
-		struct cx25840_platform_data *pdata = platform_data;
-
-		state->pvr150_workaround = pdata->pvr150_workaround;
-		set_input(client, state->vid_input, state->aud_input);
-	}
-	return 0;
-}
-
 static int cx23885_irq_handler(struct v4l2_subdev *sd, u32 status,
 			       bool *handled)
 {
@@ -1787,7 +1773,6 @@
 
 static const struct v4l2_subdev_core_ops cx25840_core_ops = {
 	.log_status = cx25840_log_status,
-	.s_config = cx25840_s_config,
 	.g_chip_ident = cx25840_g_chip_ident,
 	.g_ctrl = v4l2_subdev_g_ctrl,
 	.s_ctrl = v4l2_subdev_s_ctrl,
@@ -1974,7 +1959,6 @@
 	state->vid_input = CX25840_COMPOSITE7;
 	state->aud_input = CX25840_AUDIO8;
 	state->audclk_freq = 48000;
-	state->pvr150_workaround = 0;
 	state->audmode = V4L2_TUNER_MODE_LANG1;
 	state->vbi_line_offset = 8;
 	state->id = id;
@@ -2031,9 +2015,16 @@
 		kfree(state);
 		return err;
 	}
-	v4l2_ctrl_cluster(2, &state->volume);
+	if (!is_cx2583x(state))
+		v4l2_ctrl_cluster(2, &state->volume);
 	v4l2_ctrl_handler_setup(&state->hdl);
 
+	if (client->dev.platform_data) {
+		struct cx25840_platform_data *pdata = client->dev.platform_data;
+
+		state->pvr150_workaround = pdata->pvr150_workaround;
+	}
+
 	cx25840_ir_probe(sd);
 	return 0;
 }
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
index 627926f..7eb79af 100644
--- a/drivers/media/video/cx25840/cx25840-ir.c
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -261,7 +261,7 @@
 	u32 rem;
 
 	/*
-	 * The 2 lsb's of the pulse width timer count are not accessable, hence
+	 * The 2 lsb's of the pulse width timer count are not accessible, hence
 	 * the (1 << 2)
 	 */
 	n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */
diff --git a/drivers/media/video/davinci/vpif.c b/drivers/media/video/davinci/vpif.c
index 1f532e3..9f3bfc1 100644
--- a/drivers/media/video/davinci/vpif.c
+++ b/drivers/media/video/davinci/vpif.c
@@ -41,6 +41,183 @@
 
 void __iomem *vpif_base;
 
+/**
+ * ch_params: video standard configuration parameters for vpif
+ * The table must include all presets from supported subdevices.
+ */
+const struct vpif_channel_config_params ch_params[] = {
+	/* HDTV formats */
+	{
+		.name = "480p59_94",
+		.width = 720,
+		.height = 480,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 138-8,
+		.sav2eav = 720,
+		.l1 = 1,
+		.l3 = 43,
+		.l5 = 523,
+		.vsize = 525,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_480P59_94,
+	},
+	{
+		.name = "576p50",
+		.width = 720,
+		.height = 576,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 144-8,
+		.sav2eav = 720,
+		.l1 = 1,
+		.l3 = 45,
+		.l5 = 621,
+		.vsize = 625,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_576P50,
+	},
+	{
+		.name = "720p50",
+		.width = 1280,
+		.height = 720,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 700-8,
+		.sav2eav = 1280,
+		.l1 = 1,
+		.l3 = 26,
+		.l5 = 746,
+		.vsize = 750,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_720P50,
+	},
+	{
+		.name = "720p60",
+		.width = 1280,
+		.height = 720,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 370 - 8,
+		.sav2eav = 1280,
+		.l1 = 1,
+		.l3 = 26,
+		.l5 = 746,
+		.vsize = 750,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_720P60,
+	},
+	{
+		.name = "1080I50",
+		.width = 1920,
+		.height = 1080,
+		.frm_fmt = 0,
+		.ycmux_mode = 0,
+		.eav2sav = 720 - 8,
+		.sav2eav = 1920,
+		.l1 = 1,
+		.l3 = 21,
+		.l5 = 561,
+		.l7 = 563,
+		.l9 = 584,
+		.l11 = 1124,
+		.vsize = 1125,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_1080I50,
+	},
+	{
+		.name = "1080I60",
+		.width = 1920,
+		.height = 1080,
+		.frm_fmt = 0,
+		.ycmux_mode = 0,
+		.eav2sav = 280 - 8,
+		.sav2eav = 1920,
+		.l1 = 1,
+		.l3 = 21,
+		.l5 = 561,
+		.l7 = 563,
+		.l9 = 584,
+		.l11 = 1124,
+		.vsize = 1125,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_1080I60,
+	},
+	{
+		.name = "1080p60",
+		.width = 1920,
+		.height = 1080,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 280 - 8,
+		.sav2eav = 1920,
+		.l1 = 1,
+		.l3 = 42,
+		.l5 = 1122,
+		.vsize = 1125,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_1080P60,
+	},
+
+	/* SDTV formats */
+	{
+		.name = "NTSC_M",
+		.width = 720,
+		.height = 480,
+		.frm_fmt = 0,
+		.ycmux_mode = 1,
+		.eav2sav = 268,
+		.sav2eav = 1440,
+		.l1 = 1,
+		.l3 = 23,
+		.l5 = 263,
+		.l7 = 266,
+		.l9 = 286,
+		.l11 = 525,
+		.vsize = 525,
+		.capture_format = 0,
+		.vbi_supported = 1,
+		.hd_sd = 0,
+		.stdid = V4L2_STD_525_60,
+	},
+	{
+		.name = "PAL_BDGHIK",
+		.width = 720,
+		.height = 576,
+		.frm_fmt = 0,
+		.ycmux_mode = 1,
+		.eav2sav = 280,
+		.sav2eav = 1440,
+		.l1 = 1,
+		.l3 = 23,
+		.l5 = 311,
+		.l7 = 313,
+		.l9 = 336,
+		.l11 = 624,
+		.vsize = 625,
+		.capture_format = 0,
+		.vbi_supported = 1,
+		.hd_sd = 0,
+		.stdid = V4L2_STD_625_50,
+	},
+};
+
+const unsigned int vpif_ch_params_count = ARRAY_SIZE(ch_params);
+
 static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val)
 {
 	if (val)
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index 188841b..10550bd 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -33,7 +33,7 @@
 #define regr(reg)               readl((reg) + vpif_base)
 #define regw(value, reg)        writel(value, (reg + vpif_base))
 
-/* Register Addresss Offsets */
+/* Register Address Offsets */
 #define VPIF_PID			(0x0000)
 #define VPIF_CH0_CTRL			(0x0004)
 #define VPIF_CH1_CTRL			(0x0008)
@@ -577,12 +577,10 @@
 	char name[VPIF_MAX_NAME];	/* Name of the mode */
 	u16 width;			/* Indicates width of the image */
 	u16 height;			/* Indicates height of the image */
-	u8 fps;
-	u8 frm_fmt;			/* Indicates whether this is interlaced
-					 * or progressive format */
-	u8 ycmux_mode;			/* Indicates whether this mode requires
-					 * single or two channels */
-	u16 eav2sav;			/* length of sav 2 eav */
+	u8 frm_fmt;			/* Interlaced (0) or progressive (1) */
+	u8 ycmux_mode;			/* This mode requires one (0) or two (1)
+					   channels */
+	u16 eav2sav;			/* length of eav 2 sav */
 	u16 sav2eav;			/* length of sav 2 eav */
 	u16 l1, l3, l5, l7, l9, l11;	/* Other parameter configurations */
 	u16 vsize;			/* Vertical size of the image */
@@ -590,10 +588,14 @@
 					 * is in BT or in CCD/CMOS */
 	u8  vbi_supported;		/* Indicates whether this mode
 					 * supports capturing vbi or not */
-	u8 hd_sd;
-	v4l2_std_id stdid;
+	u8 hd_sd;			/* HDTV (1) or SDTV (0) format */
+	v4l2_std_id stdid;		/* SDTV format */
+	u32 dv_preset;			/* HDTV format */
 };
 
+extern const unsigned int vpif_ch_params_count;
+extern const struct vpif_channel_config_params ch_params[];
+
 struct vpif_video_params;
 struct vpif_params;
 struct vpif_vbi_params;
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 193abab..d93ad74 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -37,6 +37,7 @@
 #include <linux/slab.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
 
 #include "vpif_capture.h"
 #include "vpif.h"
@@ -81,20 +82,6 @@
 static struct device *vpif_dev;
 
 /**
- * ch_params: video standard configuration parameters for vpif
- */
-static const struct vpif_channel_config_params ch_params[] = {
-	{
-		"NTSC_M", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
-		286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
-	},
-	{
-		"PAL_BDGHIK", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
-		336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
-	},
-};
-
-/**
  * vpif_uservirt_to_phys : translate user/virtual address to phy address
  * @virtp: user/virtual address
  *
@@ -342,7 +329,7 @@
  * @dev_id: dev_id ptr
  *
  * It changes status of the captured buffer, takes next buffer from the queue
- * and sets its address in VPIF  registers
+ * and sets its address in VPIF registers
  */
 static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
 {
@@ -435,24 +422,31 @@
 	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 	struct vpif_params *vpifparams = &ch->vpifparams;
 	const struct vpif_channel_config_params *config;
-	struct vpif_channel_config_params *std_info;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
 	struct video_obj *vid_ch = &ch->video;
 	int index;
 
 	vpif_dbg(2, debug, "vpif_update_std_info\n");
 
-	std_info = &vpifparams->std_info;
-
-	for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
+	for (index = 0; index < vpif_ch_params_count; index++) {
 		config = &ch_params[index];
-		if (config->stdid & vid_ch->stdid) {
-			memcpy(std_info, config, sizeof(*config));
-			break;
+		if (config->hd_sd == 0) {
+			vpif_dbg(2, debug, "SD format\n");
+			if (config->stdid & vid_ch->stdid) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
+		} else {
+			vpif_dbg(2, debug, "HD format\n");
+			if (config->dv_preset == vid_ch->dv_preset) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
 		}
 	}
 
 	/* standard not found */
-	if (index == ARRAY_SIZE(ch_params))
+	if (index == vpif_ch_params_count)
 		return -EINVAL;
 
 	common->fmt.fmt.pix.width = std_info->width;
@@ -462,6 +456,7 @@
 	common->fmt.fmt.pix.bytesperline = std_info->width;
 	vpifparams->video_params.hpitch = std_info->width;
 	vpifparams->video_params.storage_mode = std_info->frm_fmt;
+
 	return 0;
 }
 
@@ -757,7 +752,7 @@
 	struct video_obj *vid_ch;
 	struct channel_obj *ch;
 	struct vpif_fh *fh;
-	int i, ret = 0;
+	int i;
 
 	vpif_dbg(2, debug, "vpif_open\n");
 
@@ -766,9 +761,6 @@
 	vid_ch = &ch->video;
 	common = &ch->common[VPIF_VIDEO_INDEX];
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	if (NULL == ch->curr_subdev_info) {
 		/**
 		 * search through the sub device to see a registered
@@ -785,8 +777,7 @@
 		}
 		if (i == config->subdev_count) {
 			vpif_err("No sub device registered\n");
-			ret = -ENOENT;
-			goto exit;
+			return -ENOENT;
 		}
 	}
 
@@ -794,8 +785,7 @@
 	fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
 	if (NULL == fh) {
 		vpif_err("unable to allocate memory for file handle object\n");
-		ret = -ENOMEM;
-		goto exit;
+		return -ENOMEM;
 	}
 
 	/* store pointer to fh in private_data member of filep */
@@ -815,9 +805,7 @@
 	/* Initialize priority of this instance to default priority */
 	fh->prio = V4L2_PRIORITY_UNSET;
 	v4l2_prio_open(&ch->prio, &fh->prio);
-exit:
-	mutex_unlock(&common->lock);
-	return ret;
+	return 0;
 }
 
 /**
@@ -837,9 +825,6 @@
 
 	common = &ch->common[VPIF_VIDEO_INDEX];
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* if this instance is doing IO */
 	if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
 		/* Reset io_usrs member of channel object */
@@ -863,9 +848,6 @@
 	/* Decrement channel usrs counter */
 	ch->usrs--;
 
-	/* unlock mutex on channel object */
-	mutex_unlock(&common->lock);
-
 	/* Close the priority */
 	v4l2_prio_close(&ch->prio, fh->prio);
 
@@ -890,7 +872,6 @@
 	struct channel_obj *ch = fh->channel;
 	struct common_obj *common;
 	u8 index = 0;
-	int ret = 0;
 
 	vpif_dbg(2, debug, "vpif_reqbufs\n");
 
@@ -913,13 +894,8 @@
 
 	common = &ch->common[index];
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
-	if (0 != common->io_usrs) {
-		ret = -EBUSY;
-		goto reqbuf_exit;
-	}
+	if (0 != common->io_usrs)
+		return -EBUSY;
 
 	/* Initialize videobuf queue as per the buffer type */
 	videobuf_queue_dma_contig_init(&common->buffer_queue,
@@ -928,7 +904,7 @@
 					    reqbuf->type,
 					    common->fmt.fmt.pix.field,
 					    sizeof(struct videobuf_buffer), fh,
-					    NULL);
+					    &common->lock);
 
 	/* Set io allowed member of file handle to TRUE */
 	fh->io_allowed[index] = 1;
@@ -939,11 +915,7 @@
 	INIT_LIST_HEAD(&common->dma_queue);
 
 	/* Allocate buffers */
-	ret = videobuf_reqbufs(&common->buffer_queue, reqbuf);
-
-reqbuf_exit:
-	mutex_unlock(&common->lock);
-	return ret;
+	return videobuf_reqbufs(&common->buffer_queue, reqbuf);
 }
 
 /**
@@ -1157,11 +1129,6 @@
 		return ret;
 	}
 
-	if (mutex_lock_interruptible(&common->lock)) {
-		ret = -ERESTARTSYS;
-		goto streamoff_exit;
-	}
-
 	/* If buffer queue is empty, return error */
 	if (list_empty(&common->dma_queue)) {
 		vpif_dbg(1, debug, "buffer queue is empty\n");
@@ -1240,13 +1207,10 @@
 		enable_channel1(1);
 	}
 	channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
-	mutex_unlock(&common->lock);
 	return ret;
 
 exit:
-	mutex_unlock(&common->lock);
-streamoff_exit:
-	ret = videobuf_streamoff(&common->buffer_queue);
+	videobuf_streamoff(&common->buffer_queue);
 	return ret;
 }
 
@@ -1284,9 +1248,6 @@
 		return -EINVAL;
 	}
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* disable channel */
 	if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
 		enable_channel0(0);
@@ -1304,8 +1265,6 @@
 	if (ret && (ret != -ENOIOCTLCMD))
 		vpif_dbg(1, debug, "stream off failed in subdev\n");
 
-	mutex_unlock(&common->lock);
-
 	return videobuf_streamoff(&common->buffer_queue);
 }
 
@@ -1381,21 +1340,16 @@
 {
 	struct vpif_fh *fh = priv;
 	struct channel_obj *ch = fh->channel;
-	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 	int ret = 0;
 
 	vpif_dbg(2, debug, "vpif_querystd\n");
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* Call querystd function of decoder device */
 	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
 				querystd, std_id);
 	if (ret < 0)
 		vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
 
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1451,16 +1405,14 @@
 	fh->initialized = 1;
 
 	/* Call encoder subdevice function to set the standard */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	ch->video.stdid = *std_id;
+	ch->video.dv_preset = V4L2_DV_INVALID;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
 
 	/* Get the information about the standard */
 	if (vpif_update_std_info(ch)) {
-		ret = -EINVAL;
 		vpif_err("Error getting the standard info\n");
-		goto s_std_exit;
+		return -EINVAL;
 	}
 
 	/* Configure the default format information */
@@ -1471,9 +1423,6 @@
 				s_std, *std_id);
 	if (ret < 0)
 		vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
-
-s_std_exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1567,9 +1516,6 @@
 		return -EINVAL;
 	}
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* first setup input path from sub device to vpif */
 	if (config->setup_input_path) {
 		ret = config->setup_input_path(ch->channel_id,
@@ -1578,7 +1524,7 @@
 			vpif_dbg(1, debug, "couldn't setup input path for the"
 				" sub device %s, for input index %d\n",
 				subdev_info->name, index);
-			goto exit;
+			return ret;
 		}
 	}
 
@@ -1589,7 +1535,7 @@
 					input, output, 0);
 		if (ret < 0) {
 			vpif_dbg(1, debug, "Failed to set input\n");
-			goto exit;
+			return ret;
 		}
 	}
 	vid_ch->input_idx = index;
@@ -1600,9 +1546,6 @@
 
 	/* update tvnorms from the sub device input info */
 	ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
-
-exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1671,11 +1614,7 @@
 		return -EINVAL;
 
 	/* Fill in the information about format */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	*fmt = common->fmt;
-	mutex_unlock(&common->lock);
 	return 0;
 }
 
@@ -1694,7 +1633,7 @@
 	struct v4l2_pix_format *pixfmt;
 	int ret = 0;
 
-	vpif_dbg(2, debug, "VIDIOC_S_FMT\n");
+	vpif_dbg(2, debug, "%s\n", __func__);
 
 	/* If streaming is started, return error */
 	if (common->started) {
@@ -1723,12 +1662,7 @@
 	if (ret)
 		return ret;
 	/* store the format in the channel object */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	common->fmt = *fmt;
-	mutex_unlock(&common->lock);
-
 	return 0;
 }
 
@@ -1807,6 +1741,306 @@
 	return 0;
 }
 
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+		struct v4l2_dv_enum_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+			video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_query_dv_presets() - QUERY_DV_PRESET handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_query_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+		       video, query_dv_preset, preset);
+}
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	int ret = 0;
+
+	if (common->started) {
+		vpif_dbg(1, debug, "streaming in progress\n");
+		return -EBUSY;
+	}
+
+	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) ||
+	    (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
+		if (!fh->initialized) {
+			vpif_dbg(1, debug, "Channel Busy\n");
+			return -EBUSY;
+		}
+	}
+
+	ret = v4l2_prio_check(&ch->prio, fh->prio);
+	if (ret)
+		return ret;
+
+	fh->initialized = 1;
+
+	/* Call encoder subdevice function to set the standard */
+	if (mutex_lock_interruptible(&common->lock))
+		return -ERESTARTSYS;
+
+	ch->video.dv_preset = preset->preset;
+	ch->video.stdid = V4L2_STD_UNKNOWN;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+	/* Get the information about the standard */
+	if (vpif_update_std_info(ch)) {
+		vpif_dbg(1, debug, "Error getting the standard info\n");
+		ret = -EINVAL;
+	} else {
+		/* Configure the default format information */
+		vpif_config_format(ch);
+
+		ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+				video, s_dv_preset, preset);
+	}
+
+	mutex_unlock(&common->lock);
+
+	return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	preset->preset = ch->video.dv_preset;
+
+	return 0;
+}
+
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+	int ret;
+
+	if (timings->type != V4L2_DV_BT_656_1120) {
+		vpif_dbg(2, debug, "Timing type not defined\n");
+		return -EINVAL;
+	}
+
+	/* Configure subdevice timings, if any */
+	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+			video, s_dv_timings, timings);
+	if (ret == -ENOIOCTLCMD) {
+		vpif_dbg(2, debug, "Custom DV timings not supported by "
+				"subdevice\n");
+		return -EINVAL;
+	}
+	if (ret < 0) {
+		vpif_dbg(2, debug, "Error setting custom DV timings\n");
+		return ret;
+	}
+
+	if (!(timings->bt.width && timings->bt.height &&
+				(timings->bt.hbackporch ||
+				 timings->bt.hfrontporch ||
+				 timings->bt.hsync) &&
+				timings->bt.vfrontporch &&
+				(timings->bt.vbackporch ||
+				 timings->bt.vsync))) {
+		vpif_dbg(2, debug, "Timings for width, height, "
+				"horizontal back porch, horizontal sync, "
+				"horizontal front porch, vertical back porch, "
+				"vertical sync and vertical back porch "
+				"must be defined\n");
+		return -EINVAL;
+	}
+
+	*bt = timings->bt;
+
+	/* Configure video port timings */
+
+	std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+		bt->hsync - 8;
+	std_info->sav2eav = bt->width;
+
+	std_info->l1 = 1;
+	std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+	if (bt->interlaced) {
+		if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+			std_info->vsize = bt->height * 2 +
+				bt->vfrontporch + bt->vsync + bt->vbackporch +
+				bt->il_vfrontporch + bt->il_vsync +
+				bt->il_vbackporch;
+			std_info->l5 = std_info->vsize/2 -
+				(bt->vfrontporch - 1);
+			std_info->l7 = std_info->vsize/2 + 1;
+			std_info->l9 = std_info->l7 + bt->il_vsync +
+				bt->il_vbackporch + 1;
+			std_info->l11 = std_info->vsize -
+				(bt->il_vfrontporch - 1);
+		} else {
+			vpif_dbg(2, debug, "Required timing values for "
+					"interlaced BT format missing\n");
+			return -EINVAL;
+		}
+	} else {
+		std_info->vsize = bt->height + bt->vfrontporch +
+			bt->vsync + bt->vbackporch;
+		std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+	}
+	strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
+	std_info->width = bt->width;
+	std_info->height = bt->height;
+	std_info->frm_fmt = bt->interlaced ? 0 : 1;
+	std_info->ycmux_mode = 0;
+	std_info->capture_format = 0;
+	std_info->vbi_supported = 0;
+	std_info->hd_sd = 1;
+	std_info->stdid = 0;
+	std_info->dv_preset = V4L2_DV_INVALID;
+
+	vid_ch->stdid = 0;
+	vid_ch->dv_preset = V4L2_DV_INVALID;
+	return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+	timings->bt = *bt;
+
+	return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+		struct v4l2_dbg_chip_ident *chip)
+{
+	chip->ident = V4L2_IDENT_NONE;
+	chip->revision = 0;
+	if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+			chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+		vpif_dbg(2, debug, "match_type is invalid.\n");
+		return -EINVAL;
+	}
+
+	return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+			g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+			g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+			s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+	/* status for sub devices */
+	v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+	return 0;
+}
+
 /* vpif capture ioctl operations */
 static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
 	.vidioc_querycap        	= vpif_querycap,
@@ -1829,6 +2063,18 @@
 	.vidioc_streamon        	= vpif_streamon,
 	.vidioc_streamoff       	= vpif_streamoff,
 	.vidioc_cropcap         	= vpif_cropcap,
+	.vidioc_enum_dv_presets         = vpif_enum_dv_presets,
+	.vidioc_s_dv_preset             = vpif_s_dv_preset,
+	.vidioc_g_dv_preset             = vpif_g_dv_preset,
+	.vidioc_query_dv_preset         = vpif_query_dv_preset,
+	.vidioc_s_dv_timings            = vpif_s_dv_timings,
+	.vidioc_g_dv_timings            = vpif_g_dv_timings,
+	.vidioc_g_chip_ident		= vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register		= vpif_dbg_g_register,
+	.vidioc_s_register		= vpif_dbg_s_register,
+#endif
+	.vidioc_log_status		= vpif_log_status,
 };
 
 /* vpif file operations */
@@ -1836,7 +2082,7 @@
 	.owner = THIS_MODULE,
 	.open = vpif_open,
 	.release = vpif_release,
-	.ioctl = video_ioctl2,
+	.unlocked_ioctl = video_ioctl2,
 	.mmap = vpif_mmap,
 	.poll = vpif_poll
 };
@@ -1979,6 +2225,7 @@
 		common = &(ch->common[VPIF_VIDEO_INDEX]);
 		spin_lock_init(&common->irqlock);
 		mutex_init(&common->lock);
+		ch->video_dev->lock = &common->lock;
 		/* Initialize prio member of channel object */
 		v4l2_prio_init(&ch->prio);
 		err = video_register_device(ch->video_dev,
@@ -2026,9 +2273,9 @@
 		if (vpif_obj.sd[i])
 			vpif_obj.sd[i]->grp_id = 1 << i;
 	}
-	v4l2_info(&vpif_obj.v4l2_dev, "DM646x VPIF Capture driver"
-		  " initialized\n");
 
+	v4l2_info(&vpif_obj.v4l2_dev,
+			"DM646x VPIF capture driver initialized\n");
 	return 0;
 
 probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 4e12ec8..7a4196d 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -59,6 +59,8 @@
 	enum v4l2_field buf_field;
 	/* Currently selected or default standard */
 	v4l2_std_id stdid;
+	u32 dv_preset;
+	struct v4l2_bt_timings bt_timings;
 	/* This is to track the last input that is passed to application */
 	u32 input_idx;
 };
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 412c65d..cdf659a 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -38,6 +38,7 @@
 #include <media/adv7343.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
 
 #include <mach/dm646x.h>
 
@@ -84,17 +85,6 @@
 static struct vpif_device vpif_obj = { {NULL} };
 static struct device *vpif_dev;
 
-static const struct vpif_channel_config_params ch_params[] = {
-	{
-		"NTSC", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
-		286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
-	},
-	{
-		"PAL", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
-		336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
-	},
-};
-
 /*
  * vpif_uservirt_to_phys: This function is used to convert user
  * space virtual address to physical address.
@@ -373,30 +363,54 @@
 	return IRQ_HANDLED;
 }
 
-static int vpif_get_std_info(struct channel_obj *ch)
+static int vpif_update_std_info(struct channel_obj *ch)
 {
-	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 	struct video_obj *vid_ch = &ch->video;
 	struct vpif_params *vpifparams = &ch->vpifparams;
 	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
 	const struct vpif_channel_config_params *config;
 
-	int index;
+	int i;
 
-	std_info->stdid = vid_ch->stdid;
-	if (!std_info->stdid)
-		return -1;
-
-	for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
-		config = &ch_params[index];
-		if (config->stdid & std_info->stdid) {
-			memcpy(std_info, config, sizeof(*config));
-			break;
+	for (i = 0; i < vpif_ch_params_count; i++) {
+		config = &ch_params[i];
+		if (config->hd_sd == 0) {
+			vpif_dbg(2, debug, "SD format\n");
+			if (config->stdid & vid_ch->stdid) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
+		} else {
+			vpif_dbg(2, debug, "HD format\n");
+			if (config->dv_preset == vid_ch->dv_preset) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
 		}
 	}
 
-	if (index == ARRAY_SIZE(ch_params))
-		return -1;
+	if (i == vpif_ch_params_count) {
+		vpif_dbg(1, debug, "Format not found\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vpif_update_resolution(struct channel_obj *ch)
+{
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct video_obj *vid_ch = &ch->video;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+
+	if (!vid_ch->stdid && !vid_ch->dv_preset && !vid_ch->bt_timings.height)
+		return -EINVAL;
+
+	if (vid_ch->stdid || vid_ch->dv_preset) {
+		if (vpif_update_std_info(ch))
+			return -EINVAL;
+	}
 
 	common->fmt.fmt.pix.width = std_info->width;
 	common->fmt.fmt.pix.height = std_info->height;
@@ -404,8 +418,8 @@
 			common->fmt.fmt.pix.width, common->fmt.fmt.pix.height);
 
 	/* Set height and width paramateres */
-	ch->common[VPIF_VIDEO_INDEX].height = std_info->height;
-	ch->common[VPIF_VIDEO_INDEX].width = std_info->width;
+	common->height = std_info->height;
+	common->width = std_info->width;
 
 	return 0;
 }
@@ -516,10 +530,8 @@
 	else
 		sizeimage = config_params.channel_bufsize[ch->channel_id];
 
-	if (vpif_get_std_info(ch)) {
-		vpif_err("Error getting the standard info\n");
+	if (vpif_update_resolution(ch))
 		return -EINVAL;
-	}
 
 	hpitch = pixfmt->bytesperline;
 	vpitch = sizeimage / (hpitch * 2);
@@ -568,7 +580,10 @@
 static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
 {
 	struct vpif_fh *fh = filep->private_data;
-	struct common_obj *common = &fh->channel->common[VPIF_VIDEO_INDEX];
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
+
+	vpif_dbg(2, debug, "vpif_mmap\n");
 
 	return videobuf_mmap_mapper(&common->buffer_queue, vma);
 }
@@ -637,9 +652,6 @@
 	struct channel_obj *ch = fh->channel;
 	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* if this instance is doing IO */
 	if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
 		/* Reset io_usrs member of channel object */
@@ -662,8 +674,6 @@
 		    config_params.numbuffers[ch->channel_id];
 	}
 
-	mutex_unlock(&common->lock);
-
 	/* Decrement channel usrs counter */
 	atomic_dec(&ch->usrs);
 	/* If this file handle has initialize encoder device, reset it */
@@ -680,7 +690,12 @@
 }
 
 /* functions implementing ioctls */
-
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
 static int vpif_querycap(struct file *file, void  *priv,
 				struct v4l2_capability *cap)
 {
@@ -722,17 +737,9 @@
 	if (common->fmt.type != fmt->type)
 		return -EINVAL;
 
-	/* Fill in the information about format */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
-	if (vpif_get_std_info(ch)) {
-		vpif_err("Error getting the standard info\n");
+	if (vpif_update_resolution(ch))
 		return -EINVAL;
-	}
-
 	*fmt = common->fmt;
-	mutex_unlock(&common->lock);
 	return 0;
 }
 
@@ -773,12 +780,7 @@
 	/* store the pix format in the channel object */
 	common->fmt.fmt.pix = *pixfmt;
 	/* store the format in the channel object */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	common->fmt = *fmt;
-	mutex_unlock(&common->lock);
-
 	return 0;
 }
 
@@ -808,7 +810,6 @@
 	struct common_obj *common;
 	enum v4l2_field field;
 	u8 index = 0;
-	int ret = 0;
 
 	/* This file handle has not initialized the channel,
 	   It is not allowed to do settings */
@@ -826,18 +827,12 @@
 	index = VPIF_VIDEO_INDEX;
 
 	common = &ch->common[index];
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
 
-	if (common->fmt.type != reqbuf->type) {
-		ret = -EINVAL;
-		goto reqbuf_exit;
-	}
+	if (common->fmt.type != reqbuf->type)
+		return -EINVAL;
 
-	if (0 != common->io_usrs) {
-		ret = -EBUSY;
-		goto reqbuf_exit;
-	}
+	if (0 != common->io_usrs)
+		return -EBUSY;
 
 	if (reqbuf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
 		if (common->fmt.fmt.pix.field == V4L2_FIELD_ANY)
@@ -854,7 +849,7 @@
 					    &common->irqlock,
 					    reqbuf->type, field,
 					    sizeof(struct videobuf_buffer), fh,
-					    NULL);
+					    &common->lock);
 
 	/* Set io allowed member of file handle to TRUE */
 	fh->io_allowed[index] = 1;
@@ -865,11 +860,7 @@
 	INIT_LIST_HEAD(&common->dma_queue);
 
 	/* Allocate buffers */
-	ret = videobuf_reqbufs(&common->buffer_queue, reqbuf);
-
-reqbuf_exit:
-	mutex_unlock(&common->lock);
-	return ret;
+	return videobuf_reqbufs(&common->buffer_queue, reqbuf);
 }
 
 static int vpif_querybuf(struct file *file, void *priv,
@@ -990,22 +981,19 @@
 	}
 
 	/* Call encoder subdevice function to set the standard */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	ch->video.stdid = *std_id;
+	ch->video.dv_preset = V4L2_DV_INVALID;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
 	/* Get the information about the standard */
-	if (vpif_get_std_info(ch)) {
-		vpif_err("Error getting the standard info\n");
+	if (vpif_update_resolution(ch))
 		return -EINVAL;
-	}
 
 	if ((ch->vpifparams.std_info.width *
 		ch->vpifparams.std_info.height * 2) >
 		config_params.channel_bufsize[ch->channel_id]) {
 		vpif_err("invalid std for this size\n");
-		ret = -EINVAL;
-		goto s_std_exit;
+		return -EINVAL;
 	}
 
 	common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width;
@@ -1016,16 +1004,13 @@
 						s_std_output, *std_id);
 	if (ret < 0) {
 		vpif_err("Failed to set output standard\n");
-		goto s_std_exit;
+		return ret;
 	}
 
 	ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, core,
 							s_std, *std_id);
 	if (ret < 0)
 		vpif_err("Failed to set standard for sub devices\n");
-
-s_std_exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1090,21 +1075,17 @@
 	if (ret < 0)
 		return ret;
 
-	/* Call videobuf_streamon to start streaming  in videobuf */
+	/* Call videobuf_streamon to start streaming in videobuf */
 	ret = videobuf_streamon(&common->buffer_queue);
 	if (ret < 0) {
 		vpif_err("videobuf_streamon\n");
 		return ret;
 	}
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* If buffer queue is empty, return error */
 	if (list_empty(&common->dma_queue)) {
 		vpif_err("buffer queue is empty\n");
-		ret = -EIO;
-		goto streamon_exit;
+		return -EIO;
 	}
 
 	/* Get the next frame from the buffer queue */
@@ -1130,8 +1111,7 @@
 			|| (!ch->vpifparams.std_info.frm_fmt
 			&& (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
 			vpif_err("conflict in field format and std format\n");
-			ret = -EINVAL;
-			goto streamon_exit;
+			return -EINVAL;
 		}
 
 		/* clock settings */
@@ -1140,13 +1120,13 @@
 						ch->vpifparams.std_info.hd_sd);
 		if (ret < 0) {
 			vpif_err("can't set clock\n");
-			goto streamon_exit;
+			return ret;
 		}
 
 		/* set the parameters and addresses */
 		ret = vpif_set_video_params(vpif, ch->channel_id + 2);
 		if (ret < 0)
-			goto streamon_exit;
+			return ret;
 
 		common->started = ret;
 		vpif_config_addr(ch, ret);
@@ -1171,9 +1151,6 @@
 		}
 		channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
 	}
-
-streamon_exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1199,9 +1176,6 @@
 		return -EINVAL;
 	}
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
 		/* disable channel */
 		if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
@@ -1216,8 +1190,6 @@
 	}
 
 	common->started = 0;
-	mutex_unlock(&common->lock);
-
 	return videobuf_streamoff(&common->buffer_queue);
 }
 
@@ -1264,13 +1236,9 @@
 	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 	int ret = 0;
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	if (common->started) {
 		vpif_err("Streaming in progress\n");
-		ret = -EBUSY;
-		goto s_output_exit;
+		return -EBUSY;
 	}
 
 	ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
@@ -1280,9 +1248,6 @@
 		vpif_err("Failed to set output standard\n");
 
 	vid_ch->output_id = i;
-
-s_output_exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1315,6 +1280,287 @@
 	return v4l2_prio_change(&ch->prio, &fh->prio, p);
 }
 
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+		struct v4l2_dv_enum_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+			video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct video_obj *vid_ch = &ch->video;
+	int ret = 0;
+
+	if (common->started) {
+		vpif_dbg(1, debug, "streaming in progress\n");
+		return -EBUSY;
+	}
+
+	ret = v4l2_prio_check(&ch->prio, fh->prio);
+	if (ret != 0)
+		return ret;
+
+	fh->initialized = 1;
+
+	/* Call encoder subdevice function to set the standard */
+	if (mutex_lock_interruptible(&common->lock))
+		return -ERESTARTSYS;
+
+	ch->video.dv_preset = preset->preset;
+	ch->video.stdid = V4L2_STD_UNKNOWN;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+	/* Get the information about the standard */
+	if (vpif_update_resolution(ch)) {
+		ret = -EINVAL;
+	} else {
+		/* Configure the default format information */
+		vpif_config_format(ch);
+
+		ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+				video, s_dv_preset, preset);
+	}
+
+	mutex_unlock(&common->lock);
+
+	return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	preset->preset = ch->video.dv_preset;
+
+	return 0;
+}
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+	int ret;
+
+	if (timings->type != V4L2_DV_BT_656_1120) {
+		vpif_dbg(2, debug, "Timing type not defined\n");
+		return -EINVAL;
+	}
+
+	/* Configure subdevice timings, if any */
+	ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+			video, s_dv_timings, timings);
+	if (ret == -ENOIOCTLCMD) {
+		vpif_dbg(2, debug, "Custom DV timings not supported by "
+				"subdevice\n");
+		return -EINVAL;
+	}
+	if (ret < 0) {
+		vpif_dbg(2, debug, "Error setting custom DV timings\n");
+		return ret;
+	}
+
+	if (!(timings->bt.width && timings->bt.height &&
+				(timings->bt.hbackporch ||
+				 timings->bt.hfrontporch ||
+				 timings->bt.hsync) &&
+				timings->bt.vfrontporch &&
+				(timings->bt.vbackporch ||
+				 timings->bt.vsync))) {
+		vpif_dbg(2, debug, "Timings for width, height, "
+				"horizontal back porch, horizontal sync, "
+				"horizontal front porch, vertical back porch, "
+				"vertical sync and vertical back porch "
+				"must be defined\n");
+		return -EINVAL;
+	}
+
+	*bt = timings->bt;
+
+	/* Configure video port timings */
+
+	std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+		bt->hsync - 8;
+	std_info->sav2eav = bt->width;
+
+	std_info->l1 = 1;
+	std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+	if (bt->interlaced) {
+		if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+			std_info->vsize = bt->height * 2 +
+				bt->vfrontporch + bt->vsync + bt->vbackporch +
+				bt->il_vfrontporch + bt->il_vsync +
+				bt->il_vbackporch;
+			std_info->l5 = std_info->vsize/2 -
+				(bt->vfrontporch - 1);
+			std_info->l7 = std_info->vsize/2 + 1;
+			std_info->l9 = std_info->l7 + bt->il_vsync +
+				bt->il_vbackporch + 1;
+			std_info->l11 = std_info->vsize -
+				(bt->il_vfrontporch - 1);
+		} else {
+			vpif_dbg(2, debug, "Required timing values for "
+					"interlaced BT format missing\n");
+			return -EINVAL;
+		}
+	} else {
+		std_info->vsize = bt->height + bt->vfrontporch +
+			bt->vsync + bt->vbackporch;
+		std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+	}
+	strncpy(std_info->name, "Custom timings BT656/1120",
+			VPIF_MAX_NAME);
+	std_info->width = bt->width;
+	std_info->height = bt->height;
+	std_info->frm_fmt = bt->interlaced ? 0 : 1;
+	std_info->ycmux_mode = 0;
+	std_info->capture_format = 0;
+	std_info->vbi_supported = 0;
+	std_info->hd_sd = 1;
+	std_info->stdid = 0;
+	std_info->dv_preset = V4L2_DV_INVALID;
+
+	vid_ch->stdid = 0;
+	vid_ch->dv_preset = V4L2_DV_INVALID;
+
+	return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+	timings->bt = *bt;
+
+	return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+		struct v4l2_dbg_chip_ident *chip)
+{
+	chip->ident = V4L2_IDENT_NONE;
+	chip->revision = 0;
+	if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+			chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+		vpif_dbg(2, debug, "match_type is invalid.\n");
+		return -EINVAL;
+	}
+
+	return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+			g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+			g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+			s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+	/* status for sub devices */
+	v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+	return 0;
+}
+
 /* vpif display ioctl operations */
 static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
 	.vidioc_querycap        	= vpif_querycap,
@@ -1336,13 +1582,24 @@
 	.vidioc_s_output		= vpif_s_output,
 	.vidioc_g_output		= vpif_g_output,
 	.vidioc_cropcap         	= vpif_cropcap,
+	.vidioc_enum_dv_presets         = vpif_enum_dv_presets,
+	.vidioc_s_dv_preset             = vpif_s_dv_preset,
+	.vidioc_g_dv_preset             = vpif_g_dv_preset,
+	.vidioc_s_dv_timings            = vpif_s_dv_timings,
+	.vidioc_g_dv_timings            = vpif_g_dv_timings,
+	.vidioc_g_chip_ident		= vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register		= vpif_dbg_g_register,
+	.vidioc_s_register		= vpif_dbg_s_register,
+#endif
+	.vidioc_log_status		= vpif_log_status,
 };
 
 static const struct v4l2_file_operations vpif_fops = {
 	.owner		= THIS_MODULE,
 	.open		= vpif_open,
 	.release	= vpif_release,
-	.ioctl		= video_ioctl2,
+	.unlocked_ioctl	= video_ioctl2,
 	.mmap		= vpif_mmap,
 	.poll		= vpif_poll
 };
@@ -1526,6 +1783,7 @@
 		v4l2_prio_init(&ch->prio);
 		ch->common[VPIF_VIDEO_INDEX].fmt.type =
 						V4L2_BUF_TYPE_VIDEO_OUTPUT;
+		ch->video_dev->lock = &common->lock;
 
 		/* register video device */
 		vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
@@ -1565,6 +1823,8 @@
 			vpif_obj.sd[i]->grp_id = 1 << i;
 	}
 
+	v4l2_info(&vpif_obj.v4l2_dev,
+			"DM646x VPIF display driver initialized\n");
 	return 0;
 
 probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index a2a7cd1..b53aaa8 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -67,6 +67,8 @@
 					 * most recent displayed frame only */
 	v4l2_std_id stdid;		/* Currently selected or default
 					 * standard */
+	u32 dv_preset;
+	struct v4l2_bt_timings bt_timings;
 	u32 output_id;			/* Current output id */
 };
 
diff --git a/drivers/media/video/davinci/vpss.c b/drivers/media/video/davinci/vpss.c
index 7918680..3e5cf27 100644
--- a/drivers/media/video/davinci/vpss.c
+++ b/drivers/media/video/davinci/vpss.c
@@ -85,7 +85,7 @@
 /*
  * vpss operations. Depends on platform. Not all functions are available
  * on all platforms. The api, first check if a functio is available before
- * invoking it. In the probe, the function ptrs are intialized based on
+ * invoking it. In the probe, the function ptrs are initialized based on
  * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc.
  */
 struct vpss_hw_ops {
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 099d5df..87f77a3 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -33,6 +33,7 @@
 #include <media/saa7115.h>
 #include <media/tvp5150.h>
 #include <media/tvaudio.h>
+#include <media/mt9v011.h>
 #include <media/i2c-addr.h>
 #include <media/tveeprom.h>
 #include <media/v4l2-common.h>
@@ -1917,11 +1918,6 @@
 	I2C_CLIENT_END
 };
 
-static unsigned short mt9v011_addrs[] = {
-	0xba >> 1,
-	I2C_CLIENT_END
-};
-
 static unsigned short msp3400_addrs[] = {
 	0x80 >> 1,
 	0x88 >> 1,
@@ -2437,6 +2433,7 @@
 		dev->init_data.ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW;
 		dev->init_data.get_key = em28xx_get_key_em_haup;
 		dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
+		break;
 	case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
 		dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
 		dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
@@ -2623,11 +2620,17 @@
 			"tvp5150", 0, tvp5150_addrs);
 
 	if (dev->em28xx_sensor == EM28XX_MT9V011) {
+		struct mt9v011_platform_data pdata;
+		struct i2c_board_info mt9v011_info = {
+			.type = "mt9v011",
+			.addr = 0xba >> 1,
+			.platform_data = &pdata,
+		};
 		struct v4l2_subdev *sd;
 
-		sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
-			 &dev->i2c_adap, "mt9v011", 0, mt9v011_addrs);
-		v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal);
+		pdata.xtal = dev->sensor_xtal;
+		sd = v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
+				&mt9v011_info, NULL);
 	}
 
 
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index cc77d14..bf66189 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -59,31 +59,7 @@
 /*****************************************************************************/
 
 static const struct usb_device_id et61x251_id_table[] = {
-	{ USB_DEVICE(0x102c, 0x6151), },
 	{ USB_DEVICE(0x102c, 0x6251), },
-	{ USB_DEVICE(0x102c, 0x6253), },
-	{ USB_DEVICE(0x102c, 0x6254), },
-	{ USB_DEVICE(0x102c, 0x6255), },
-	{ USB_DEVICE(0x102c, 0x6256), },
-	{ USB_DEVICE(0x102c, 0x6257), },
-	{ USB_DEVICE(0x102c, 0x6258), },
-	{ USB_DEVICE(0x102c, 0x6259), },
-	{ USB_DEVICE(0x102c, 0x625a), },
-	{ USB_DEVICE(0x102c, 0x625b), },
-	{ USB_DEVICE(0x102c, 0x625c), },
-	{ USB_DEVICE(0x102c, 0x625d), },
-	{ USB_DEVICE(0x102c, 0x625e), },
-	{ USB_DEVICE(0x102c, 0x625f), },
-	{ USB_DEVICE(0x102c, 0x6260), },
-	{ USB_DEVICE(0x102c, 0x6261), },
-	{ USB_DEVICE(0x102c, 0x6262), },
-	{ USB_DEVICE(0x102c, 0x6263), },
-	{ USB_DEVICE(0x102c, 0x6264), },
-	{ USB_DEVICE(0x102c, 0x6265), },
-	{ USB_DEVICE(0x102c, 0x6266), },
-	{ USB_DEVICE(0x102c, 0x6267), },
-	{ USB_DEVICE(0x102c, 0x6268), },
-	{ USB_DEVICE(0x102c, 0x6269), },
 	{ }
 };
 
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 6290439..a09c470 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -276,7 +276,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x04a5, 0x3035)},
 	{}
 };
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 1eacb6c..8b39849 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1040,14 +1040,14 @@
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0572, 0x0041)},
 	{}
 };
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 			const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index c1ae05f..4bf2cab 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -2088,7 +2088,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0553, 0x0002)},
 	{USB_DEVICE(0x0813, 0x0001)},
 	{}
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index a594b36..4b2c483 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -864,7 +864,7 @@
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106},
 #if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE
 	{USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX},
@@ -875,7 +875,7 @@
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 		    const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index d782264..987b4b69d 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -229,7 +229,7 @@
 }
 
 /* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x04cb, 0x0104)},
 	{USB_DEVICE(0x04cb, 0x0109)},
 	{USB_DEVICE(0x04cb, 0x010b)},
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index b05bec7..9908303 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -488,7 +488,7 @@
 
 /*=================== USB driver structure initialisation ==================*/
 
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x05e3, 0x0503)},
 	{USB_DEVICE(0x05e3, 0xf191)},
 	{}
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 4429700..f21f2a2 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -55,7 +55,7 @@
 MODULE_DESCRIPTION("GSPCA USB Camera Driver");
 MODULE_LICENSE("GPL");
 
-#define DRIVER_VERSION_NUMBER	KERNEL_VERSION(2, 11, 0)
+#define DRIVER_VERSION_NUMBER	KERNEL_VERSION(2, 12, 0)
 
 #ifdef GSPCA_DEBUG
 int gspca_debug = D_ERR | D_PROBE;
@@ -508,8 +508,8 @@
 	return 0;
 }
 
-static int frame_alloc(struct gspca_dev *gspca_dev,
-			unsigned int count)
+static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
+			enum v4l2_memory memory, unsigned int count)
 {
 	struct gspca_frame *frame;
 	unsigned int frsz;
@@ -519,7 +519,6 @@
 	frsz = gspca_dev->cam.cam_mode[i].sizeimage;
 	PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz);
 	frsz = PAGE_ALIGN(frsz);
-	gspca_dev->frsz = frsz;
 	if (count >= GSPCA_MAX_FRAMES)
 		count = GSPCA_MAX_FRAMES - 1;
 	gspca_dev->frbuf = vmalloc_32(frsz * count);
@@ -527,6 +526,9 @@
 		err("frame alloc failed");
 		return -ENOMEM;
 	}
+	gspca_dev->capt_file = file;
+	gspca_dev->memory = memory;
+	gspca_dev->frsz = frsz;
 	gspca_dev->nframes = count;
 	for (i = 0; i < count; i++) {
 		frame = &gspca_dev->frame[i];
@@ -535,7 +537,7 @@
 		frame->v4l2_buf.flags = 0;
 		frame->v4l2_buf.field = V4L2_FIELD_NONE;
 		frame->v4l2_buf.length = frsz;
-		frame->v4l2_buf.memory = gspca_dev->memory;
+		frame->v4l2_buf.memory = memory;
 		frame->v4l2_buf.sequence = 0;
 		frame->data = gspca_dev->frbuf + i * frsz;
 		frame->v4l2_buf.m.offset = i * frsz;
@@ -558,6 +560,9 @@
 			gspca_dev->frame[i].data = NULL;
 	}
 	gspca_dev->nframes = 0;
+	gspca_dev->frsz = 0;
+	gspca_dev->capt_file = NULL;
+	gspca_dev->memory = GSPCA_MEMORY_NO;
 }
 
 static void destroy_urbs(struct gspca_dev *gspca_dev)
@@ -1210,29 +1215,15 @@
 static int dev_open(struct file *file)
 {
 	struct gspca_dev *gspca_dev;
-	int ret;
 
 	PDEBUG(D_STREAM, "[%s] open", current->comm);
 	gspca_dev = (struct gspca_dev *) video_devdata(file);
-	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
-		return -ERESTARTSYS;
-	if (!gspca_dev->present) {
-		ret = -ENODEV;
-		goto out;
-	}
-
-	if (gspca_dev->users > 4) {	/* (arbitrary value) */
-		ret = -EBUSY;
-		goto out;
-	}
+	if (!gspca_dev->present)
+		return -ENODEV;
 
 	/* protect the subdriver against rmmod */
-	if (!try_module_get(gspca_dev->module)) {
-		ret = -ENODEV;
-		goto out;
-	}
-
-	gspca_dev->users++;
+	if (!try_module_get(gspca_dev->module))
+		return -ENODEV;
 
 	file->private_data = gspca_dev;
 #ifdef GSPCA_DEBUG
@@ -1244,14 +1235,7 @@
 		gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL
 					| V4L2_DEBUG_IOCTL_ARG);
 #endif
-	ret = 0;
-out:
-	mutex_unlock(&gspca_dev->queue_lock);
-	if (ret != 0)
-		PDEBUG(D_ERR|D_STREAM, "open failed err %d", ret);
-	else
-		PDEBUG(D_STREAM, "open done");
-	return ret;
+	return 0;
 }
 
 static int dev_close(struct file *file)
@@ -1261,7 +1245,6 @@
 	PDEBUG(D_STREAM, "[%s] close", current->comm);
 	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
 		return -ERESTARTSYS;
-	gspca_dev->users--;
 
 	/* if the file did the capture, free the streaming resources */
 	if (gspca_dev->capt_file == file) {
@@ -1272,8 +1255,6 @@
 			mutex_unlock(&gspca_dev->usb_lock);
 		}
 		frame_free(gspca_dev);
-		gspca_dev->capt_file = NULL;
-		gspca_dev->memory = GSPCA_MEMORY_NO;
 	}
 	file->private_data = NULL;
 	module_put(gspca_dev->module);
@@ -1516,6 +1497,7 @@
 		return -ERESTARTSYS;
 
 	if (gspca_dev->memory != GSPCA_MEMORY_NO
+	    && gspca_dev->memory != GSPCA_MEMORY_READ
 	    && gspca_dev->memory != rb->memory) {
 		ret = -EBUSY;
 		goto out;
@@ -1544,19 +1526,18 @@
 		gspca_stream_off(gspca_dev);
 		mutex_unlock(&gspca_dev->usb_lock);
 	}
+	/* Don't restart the stream when switching from read to mmap mode */
+	if (gspca_dev->memory == GSPCA_MEMORY_READ)
+		streaming = 0;
 
 	/* free the previous allocated buffers, if any */
-	if (gspca_dev->nframes != 0) {
+	if (gspca_dev->nframes != 0)
 		frame_free(gspca_dev);
-		gspca_dev->capt_file = NULL;
-	}
 	if (rb->count == 0)			/* unrequest */
 		goto out;
-	gspca_dev->memory = rb->memory;
-	ret = frame_alloc(gspca_dev, rb->count);
+	ret = frame_alloc(gspca_dev, file, rb->memory, rb->count);
 	if (ret == 0) {
 		rb->count = gspca_dev->nframes;
-		gspca_dev->capt_file = file;
 		if (streaming)
 			ret = gspca_init_transfer(gspca_dev);
 	}
@@ -1630,11 +1611,15 @@
 
 	if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
-	if (!gspca_dev->streaming)
-		return 0;
+
 	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
 		return -ERESTARTSYS;
 
+	if (!gspca_dev->streaming) {
+		ret = 0;
+		goto out;
+	}
+
 	/* check the capture file */
 	if (gspca_dev->capt_file != file) {
 		ret = -EBUSY;
@@ -1649,6 +1634,8 @@
 	gspca_dev->usb_err = 0;
 	gspca_stream_off(gspca_dev);
 	mutex_unlock(&gspca_dev->usb_lock);
+	/* In case another thread is waiting in dqbuf */
+	wake_up_interruptible(&gspca_dev->wq);
 
 	/* empty the transfer queues */
 	atomic_set(&gspca_dev->fr_q, 0);
@@ -1827,43 +1814,29 @@
 	return ret;
 }
 
-/*
- * wait for a video frame
- *
- * If a frame is ready, its index is returned.
- */
-static int frame_wait(struct gspca_dev *gspca_dev,
-			int nonblock_ing)
+static int frame_ready_nolock(struct gspca_dev *gspca_dev, struct file *file,
+				enum v4l2_memory memory)
 {
-	int i, ret;
+	if (!gspca_dev->present)
+		return -ENODEV;
+	if (gspca_dev->capt_file != file || gspca_dev->memory != memory ||
+			!gspca_dev->streaming)
+		return -EINVAL;
 
 	/* check if a frame is ready */
-	i = gspca_dev->fr_o;
-	if (i == atomic_read(&gspca_dev->fr_i)) {
-		if (nonblock_ing)
-			return -EAGAIN;
+	return gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i);
+}
 
-		/* wait till a frame is ready */
-		ret = wait_event_interruptible_timeout(gspca_dev->wq,
-			i != atomic_read(&gspca_dev->fr_i) ||
-			!gspca_dev->streaming || !gspca_dev->present,
-			msecs_to_jiffies(3000));
-		if (ret < 0)
-			return ret;
-		if (ret == 0 || !gspca_dev->streaming || !gspca_dev->present)
-			return -EIO;
-	}
+static int frame_ready(struct gspca_dev *gspca_dev, struct file *file,
+			enum v4l2_memory memory)
+{
+	int ret;
 
-	gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES;
-
-	if (gspca_dev->sd_desc->dq_callback) {
-		mutex_lock(&gspca_dev->usb_lock);
-		gspca_dev->usb_err = 0;
-		if (gspca_dev->present)
-			gspca_dev->sd_desc->dq_callback(gspca_dev);
-		mutex_unlock(&gspca_dev->usb_lock);
-	}
-	return gspca_dev->fr_queue[i];
+	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+		return -ERESTARTSYS;
+	ret = frame_ready_nolock(gspca_dev, file, memory);
+	mutex_unlock(&gspca_dev->queue_lock);
+	return ret;
 }
 
 /*
@@ -1876,33 +1849,57 @@
 {
 	struct gspca_dev *gspca_dev = priv;
 	struct gspca_frame *frame;
-	int i, ret;
+	int i, j, ret;
 
 	PDEBUG(D_FRAM, "dqbuf");
-	if (v4l2_buf->memory != gspca_dev->memory)
-		return -EINVAL;
 
-	if (!gspca_dev->present)
-		return -ENODEV;
-
-	/* if not streaming, be sure the application will not loop forever */
-	if (!(file->f_flags & O_NONBLOCK)
-	    && !gspca_dev->streaming && gspca_dev->users == 1)
-		return -EINVAL;
-
-	/* only the capturing file may dequeue */
-	if (gspca_dev->capt_file != file)
-		return -EINVAL;
-
-	/* only one dequeue / read at a time */
-	if (mutex_lock_interruptible(&gspca_dev->read_lock))
+	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
 		return -ERESTARTSYS;
 
-	ret = frame_wait(gspca_dev, file->f_flags & O_NONBLOCK);
-	if (ret < 0)
-		goto out;
-	i = ret;				/* frame index */
-	frame = &gspca_dev->frame[i];
+	for (;;) {
+		ret = frame_ready_nolock(gspca_dev, file, v4l2_buf->memory);
+		if (ret < 0)
+			goto out;
+		if (ret > 0)
+			break;
+
+		mutex_unlock(&gspca_dev->queue_lock);
+
+		if (file->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		/* wait till a frame is ready */
+		ret = wait_event_interruptible_timeout(gspca_dev->wq,
+			frame_ready(gspca_dev, file, v4l2_buf->memory),
+			msecs_to_jiffies(3000));
+		if (ret < 0)
+			return ret;
+		if (ret == 0)
+			return -EIO;
+
+		if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+			return -ERESTARTSYS;
+	}
+
+	i = gspca_dev->fr_o;
+	j = gspca_dev->fr_queue[i];
+	frame = &gspca_dev->frame[j];
+
+	gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES;
+
+	if (gspca_dev->sd_desc->dq_callback) {
+		mutex_lock(&gspca_dev->usb_lock);
+		gspca_dev->usb_err = 0;
+		if (gspca_dev->present)
+			gspca_dev->sd_desc->dq_callback(gspca_dev);
+		mutex_unlock(&gspca_dev->usb_lock);
+	}
+
+	frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
+	memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
+	PDEBUG(D_FRAM, "dqbuf %d", j);
+	ret = 0;
+
 	if (gspca_dev->memory == V4L2_MEMORY_USERPTR) {
 		if (copy_to_user((__u8 __user *) frame->v4l2_buf.m.userptr,
 				 frame->data,
@@ -1910,15 +1907,10 @@
 			PDEBUG(D_ERR|D_STREAM,
 				"dqbuf cp to user failed");
 			ret = -EFAULT;
-			goto out;
 		}
 	}
-	frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
-	memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
-	PDEBUG(D_FRAM, "dqbuf %d", i);
-	ret = 0;
 out:
-	mutex_unlock(&gspca_dev->read_lock);
+	mutex_unlock(&gspca_dev->queue_lock);
 	return ret;
 }
 
@@ -2033,9 +2025,7 @@
 	poll_wait(file, &gspca_dev->wq, wait);
 
 	/* if reqbufs is not done, the user would use read() */
-	if (gspca_dev->nframes == 0) {
-		if (gspca_dev->memory != GSPCA_MEMORY_NO)
-			return POLLERR;		/* not the 1st time */
+	if (gspca_dev->memory == GSPCA_MEMORY_NO) {
 		ret = read_alloc(gspca_dev, file);
 		if (ret != 0)
 			return POLLERR;
@@ -2067,18 +2057,10 @@
 	PDEBUG(D_FRAM, "read (%zd)", count);
 	if (!gspca_dev->present)
 		return -ENODEV;
-	switch (gspca_dev->memory) {
-	case GSPCA_MEMORY_NO:			/* first time */
+	if (gspca_dev->memory == GSPCA_MEMORY_NO) { /* first time ? */
 		ret = read_alloc(gspca_dev, file);
 		if (ret != 0)
 			return ret;
-		break;
-	case GSPCA_MEMORY_READ:
-		if (gspca_dev->capt_file == file)
-			break;
-		/* fall thru */
-	default:
-		return -EINVAL;
 	}
 
 	/* get a frame */
@@ -2266,7 +2248,6 @@
 		goto out;
 
 	mutex_init(&gspca_dev->usb_lock);
-	mutex_init(&gspca_dev->read_lock);
 	mutex_init(&gspca_dev->queue_lock);
 	init_waitqueue_head(&gspca_dev->wq);
 
@@ -2341,12 +2322,11 @@
 	PDEBUG(D_PROBE, "%s disconnect",
 		video_device_node_name(&gspca_dev->vdev));
 	mutex_lock(&gspca_dev->usb_lock);
-	gspca_dev->present = 0;
 
-	if (gspca_dev->streaming) {
-		destroy_urbs(gspca_dev);
-		wake_up_interruptible(&gspca_dev->wq);
-	}
+	gspca_dev->present = 0;
+	wake_up_interruptible(&gspca_dev->wq);
+
+	destroy_urbs(gspca_dev);
 
 #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
 	gspca_input_destroy_urb(gspca_dev);
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 97b77a2..4175522 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -205,14 +205,12 @@
 
 	wait_queue_head_t wq;		/* wait queue */
 	struct mutex usb_lock;		/* usb exchange protection */
-	struct mutex read_lock;		/* read protection */
 	struct mutex queue_lock;	/* ISOC queue protection */
 	int usb_err;			/* USB error - protected by usb_lock */
 	u16 pkt_size;			/* ISOC packet size */
 #ifdef CONFIG_PM
 	char frozen;			/* suspend - resume */
 #endif
-	char users;			/* number of opens */
 	char present;			/* device connected */
 	char nbufread;			/* number of buffers for read() */
 	char memory;			/* memory type (V4L2_MEMORY_xxx) */
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index a35e87b..06b777f 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -314,7 +314,7 @@
 }
 
 /* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0979, 0x0280)},
 	{}
 };
diff --git a/drivers/media/video/gspca/jpeg.h b/drivers/media/video/gspca/jpeg.h
index de63c36..ab54910 100644
--- a/drivers/media/video/gspca/jpeg.h
+++ b/drivers/media/video/gspca/jpeg.h
@@ -141,9 +141,9 @@
 	memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head);
 #ifndef CONEX_CAM
 	jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8;
-	jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height & 0xff;
+	jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height;
 	jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8;
-	jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width & 0xff;
+	jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width;
 	jpeg_hdr[JPEG_HEIGHT_OFFSET + 6] = samplesY;
 #endif
 }
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index d2ce65d..5964691 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -607,7 +607,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x04c8, 0x0720)}, /* Intel YC 76 */
 	{}
 };
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index c872b93..a7722b1 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -28,7 +28,7 @@
 static int dump_bridge;
 int dump_sensor;
 
-static const __devinitdata struct usb_device_id m5602_table[] = {
+static const struct usb_device_id m5602_table[] = {
 	{USB_DEVICE(0x0402, 0x5602)},
 	{}
 };
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index a81536e..cb4d0bf 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -490,7 +490,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x093a, 0x050f)},
 	{}
 };
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 7607a28..3884c9d 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -1229,7 +1229,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x08ca, 0x0110)},	/* Trust Spyc@m 100 */
 	{USB_DEVICE(0x08ca, 0x0111)},	/* Aiptek Pencam VGA+ */
 	{USB_DEVICE(0x093a, 0x010f)},	/* All other known MR97310A VGA cams */
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index e1c3b93..8ab2c45 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -488,7 +488,6 @@
 #define R511_SNAP_PXDIV			0x1c
 #define R511_SNAP_LNDIV			0x1d
 #define R511_SNAP_UV_EN			0x1e
-#define R511_SNAP_UV_EN			0x1e
 #define R511_SNAP_OPTS			0x1f
 
 #define R511_DRAM_FLOW_CTL		0x20
@@ -1847,8 +1846,7 @@
 	{ 0x6c, 0x0a },
 	{ 0x6d, 0x55 },
 	{ 0x6e, 0x11 },
-	{ 0x6f, 0x9f },
-					/* "9e for advance AWB" */
+	{ 0x6f, 0x9f },			/* "9e for advance AWB" */
 	{ 0x6a, 0x40 },
 	{ OV7670_R01_BLUE, 0x40 },
 	{ OV7670_R02_RED, 0x60 },
@@ -3054,7 +3052,7 @@
 {
 	static const struct ov_regvals init_519[] = {
 		{ 0x5a, 0x6d }, /* EnableSystem */
-		{ 0x53, 0x9b },
+		{ 0x53, 0x9b }, /* don't enable the microcontroller */
 		{ OV519_R54_EN_CLK1, 0xff }, /* set bit2 to enable jpeg */
 		{ 0x5d, 0x03 },
 		{ 0x49, 0x01 },
@@ -4747,7 +4745,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
 	{USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
 	{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 0edf939..04da228 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -479,15 +479,20 @@
 	struct usb_device *udev = gspca_dev->dev;
 	int ret;
 
-	PDEBUG(D_USBO, "reg=0x%04x, val=0%02x", reg, val);
+	if (gspca_dev->usb_err < 0)
+		return;
+
+	PDEBUG(D_USBO, "SET 01 0000 %04x %02x", reg, val);
 	gspca_dev->usb_buf[0] = val;
 	ret = usb_control_msg(udev,
 			      usb_sndctrlpipe(udev, 0),
 			      0x01,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 			      0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
-	if (ret < 0)
+	if (ret < 0) {
 		err("write failed %d", ret);
+		gspca_dev->usb_err = ret;
+	}
 }
 
 static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -495,14 +500,18 @@
 	struct usb_device *udev = gspca_dev->dev;
 	int ret;
 
+	if (gspca_dev->usb_err < 0)
+		return 0;
 	ret = usb_control_msg(udev,
 			      usb_rcvctrlpipe(udev, 0),
 			      0x01,
 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 			      0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
-	PDEBUG(D_USBI, "reg=0x%04x, data=0x%02x", reg, gspca_dev->usb_buf[0]);
-	if (ret < 0)
+	PDEBUG(D_USBI, "GET 01 0000 %04x %02x", reg, gspca_dev->usb_buf[0]);
+	if (ret < 0) {
 		err("read failed %d", ret);
+		gspca_dev->usb_err = ret;
+	}
 	return gspca_dev->usb_buf[0];
 }
 
@@ -558,13 +567,15 @@
 
 static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
 {
-	PDEBUG(D_USBO, "reg: 0x%02x, val: 0x%02x", reg, val);
+	PDEBUG(D_USBO, "sccb write: %02x %02x", reg, val);
 	ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
 	ov534_reg_write(gspca_dev, OV534_REG_WRITE, val);
 	ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
 
-	if (!sccb_check_status(gspca_dev))
+	if (!sccb_check_status(gspca_dev)) {
 		err("sccb_reg_write failed");
+		gspca_dev->usb_err = -EIO;
+	}
 }
 
 static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -885,7 +896,7 @@
 	ov534_set_led(gspca_dev, 0);
 	set_frame_rate(gspca_dev);
 
-	return 0;
+	return gspca_dev->usb_err;
 }
 
 static int sd_start(struct gspca_dev *gspca_dev)
@@ -920,7 +931,7 @@
 
 	ov534_set_led(gspca_dev, 1);
 	ov534_reg_write(gspca_dev, 0xe0, 0x00);
-	return 0;
+	return gspca_dev->usb_err;
 }
 
 static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -1289,7 +1300,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x1415, 0x2000)},
 	{}
 };
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index c5244b4..aaf5428 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1429,7 +1429,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x06f8, 0x3003)},
 	{}
 };
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 96f9986..81739a2 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -530,7 +530,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x4028)},
 	{USB_DEVICE(0x093a, 0x2460)},
 	{USB_DEVICE(0x093a, 0x2461)},
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 2700975..5615d7b 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -1184,7 +1184,7 @@
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x06f8, 0x3009)},
 	{USB_DEVICE(0x093a, 0x2620)},
 	{USB_DEVICE(0x093a, 0x2621)},
@@ -1201,7 +1201,7 @@
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 			const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 6820f5d..f8801b5 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -837,7 +837,7 @@
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x093a, 0x2600)},
 	{USB_DEVICE(0x093a, 0x2601)},
 	{USB_DEVICE(0x093a, 0x2603)},
@@ -849,7 +849,7 @@
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 			const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 40a0668..4271f86 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -703,7 +703,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0458, 0x7005)}, /* Genius Smart 300, version 2 */
 	/* The Genius Smart is untested. I can't find an owner ! */
 	/* {USB_DEVICE(0x0c45, 0x8000)}, DC31VC, Don't know this camera */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index cb08d00..fcf2989 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2470,7 +2470,7 @@
 			| (SENSOR_ ## sensor << 8) \
 			| (i2c_addr)
 
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
 	{USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
 	{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 73504a3..c6cd68d 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -23,8 +23,15 @@
 /* Some documentation on known sonixb registers:
 
 Reg	Use
+sn9c101 / sn9c102:
 0x10	high nibble red gain low nibble blue gain
 0x11	low nibble green gain
+sn9c103:
+0x05	red gain 0-127
+0x06	blue gain 0-127
+0x07	green gain 0-127
+all:
+0x08-0x0f i2c / 3wire registers
 0x12	hstart
 0x13	vstart
 0x15	hsize (hsize = register-value * 16)
@@ -88,12 +95,9 @@
 typedef const __u8 sensor_init_t[8];
 
 struct sensor_data {
-	const __u8 *bridge_init[2];
-	int bridge_init_size[2];
+	const __u8 *bridge_init;
 	sensor_init_t *sensor_init;
 	int sensor_init_size;
-	sensor_init_t *sensor_bridge_init[2];
-	int sensor_bridge_init_size[2];
 	int flags;
 	unsigned ctrl_dis;
 	__u8 sensor_addr;
@@ -114,7 +118,6 @@
 #define NO_FREQ (1 << FREQ_IDX)
 #define NO_BRIGHTNESS (1 << BRIGHTNESS_IDX)
 
-#define COMP2 0x8f
 #define COMP 0xc7		/* 0x87 //0x07 */
 #define COMP1 0xc9		/* 0x89 //0x09 */
 
@@ -123,15 +126,11 @@
 
 #define SYS_CLK 0x04
 
-#define SENS(bridge_1, bridge_3, sensor, sensor_1, \
-	sensor_3, _flags, _ctrl_dis, _sensor_addr) \
+#define SENS(bridge, sensor, _flags, _ctrl_dis, _sensor_addr) \
 { \
-	.bridge_init = { bridge_1, bridge_3 }, \
-	.bridge_init_size = { sizeof(bridge_1), sizeof(bridge_3) }, \
+	.bridge_init = bridge, \
 	.sensor_init = sensor, \
 	.sensor_init_size = sizeof(sensor), \
-	.sensor_bridge_init = { sensor_1, sensor_3,}, \
-	.sensor_bridge_init_size = { sizeof(sensor_1), sizeof(sensor_3)}, \
 	.flags = _flags, .ctrl_dis = _ctrl_dis, .sensor_addr = _sensor_addr \
 }
 
@@ -311,7 +310,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x02, 0x02, 0x00,
 	0x28, 0x1e, 0x60, 0x8e, 0x42,
-	0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
 };
 static const __u8 hv7131d_sensor_init[][8] = {
 	{0xa0, 0x11, 0x01, 0x04, 0x00, 0x00, 0x00, 0x17},
@@ -326,7 +324,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x02, 0x01, 0x00,
 	0x28, 0x1e, 0x60, 0x8a, 0x20,
-	0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
 };
 static const __u8 hv7131r_sensor_init[][8] = {
 	{0xc0, 0x11, 0x31, 0x38, 0x2a, 0x2e, 0x00, 0x10},
@@ -339,7 +336,7 @@
 	0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
 	0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 	0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b,
-	0x10, 0x1d, 0x10, 0x02, 0x02, 0x09, 0x07
+	0x10,
 };
 static const __u8 ov6650_sensor_init[][8] = {
 	/* Bright, contrast, etc are set through SCBB interface.
@@ -378,24 +375,13 @@
 	0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,	/* r09 .. r10 */
 	0x00, 0x01, 0x01, 0x0a,				/* r11 .. r14 */
 	0x28, 0x1e,			/* H & V sizes     r15 .. r16 */
-	0x68, COMP2, MCK_INIT1,				/* r17 .. r19 */
-	0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c		/* r1a .. r1f */
-};
-static const __u8 initOv7630_3[] = {
-	0x44, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20, 0x80,	/* r01 .. r08 */
-	0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,	/* r09 .. r10 */
-	0x00, 0x02, 0x01, 0x0a,				/* r11 .. r14 */
-	0x28, 0x1e,			/* H & V sizes     r15 .. r16 */
 	0x68, 0x8f, MCK_INIT1,				/* r17 .. r19 */
-	0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c, 0x00,	/* r1a .. r20 */
-	0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80, /* r21 .. r28 */
-	0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xff  /* r29 .. r30 */
 };
 static const __u8 ov7630_sensor_init[][8] = {
 	{0xa0, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10},
 	{0xb0, 0x21, 0x01, 0x77, 0x3a, 0x00, 0x00, 0x10},
 /*	{0xd0, 0x21, 0x12, 0x7c, 0x01, 0x80, 0x34, 0x10},	   jfm */
-	{0xd0, 0x21, 0x12, 0x1c, 0x00, 0x80, 0x34, 0x10},	/* jfm */
+	{0xd0, 0x21, 0x12, 0x5c, 0x00, 0x80, 0x34, 0x10},	/* jfm */
 	{0xa0, 0x21, 0x1b, 0x04, 0x00, 0x80, 0x34, 0x10},
 	{0xa0, 0x21, 0x20, 0x44, 0x00, 0x80, 0x34, 0x10},
 	{0xa0, 0x21, 0x23, 0xee, 0x00, 0x80, 0x34, 0x10},
@@ -413,16 +399,11 @@
 	{0xd0, 0x21, 0x17, 0x1c, 0xbd, 0x06, 0xf6, 0x10},
 };
 
-static const __u8 ov7630_sensor_init_3[][8] = {
-	{0xa0, 0x21, 0x13, 0x80, 0x00,	0x00, 0x00, 0x10},
-};
-
 static const __u8 initPas106[] = {
 	0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x40, 0x00, 0x00, 0x00,
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x04, 0x01, 0x00,
 	0x16, 0x12, 0x24, COMP1, MCK_INIT1,
-	0x18, 0x10, 0x02, 0x02, 0x09, 0x07
 };
 /* compression 0x86 mckinit1 0x2b */
 
@@ -496,7 +477,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x06, 0x03, 0x0a,
 	0x28, 0x1e, 0x20, 0x89, 0x20,
-	0x00, 0x00, 0x02, 0x03, 0x0f, 0x0c
 };
 
 /* "Known" PAS202BCB registers:
@@ -537,7 +517,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x45, 0x09, 0x0a,
 	0x16, 0x12, 0x60, 0x86, 0x2b,
-	0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
 };
 /* Same as above, except a different hstart */
 static const __u8 initTas5110d[] = {
@@ -545,12 +524,19 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x41, 0x09, 0x0a,
 	0x16, 0x12, 0x60, 0x86, 0x2b,
-	0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
 };
-static const __u8 tas5110_sensor_init[][8] = {
+/* tas5110c is 3 wire, tas5110d is 2 wire (regular i2c) */
+static const __u8 tas5110c_sensor_init[][8] = {
 	{0x30, 0x11, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x10},
 	{0x30, 0x11, 0x02, 0x20, 0xa9, 0x00, 0x00, 0x10},
-	{0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17},
+};
+/* Known TAS5110D registers
+ * reg02: gain, bit order reversed!! 0 == max gain, 255 == min gain
+ * reg03: bit3: vflip, bit4: ~hflip, bit7: ~gainboost (~ == inverted)
+ *        Note: writing reg03 seems to only work when written together with 02
+ */
+static const __u8 tas5110d_sensor_init[][8] = {
+	{0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17}, /* reset */
 };
 
 static const __u8 initTas5130[] = {
@@ -558,7 +544,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x68, 0x0c, 0x0a,
 	0x28, 0x1e, 0x60, COMP, MCK_INIT,
-	0x18, 0x10, 0x04, 0x03, 0x11, 0x0c
 };
 static const __u8 tas5130_sensor_init[][8] = {
 /*	{0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10},
@@ -569,21 +554,18 @@
 };
 
 static struct sensor_data sensor_data[] = {
-SENS(initHv7131d, NULL, hv7131d_sensor_init, NULL, NULL, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initHv7131r, NULL, hv7131r_sensor_init, NULL, NULL, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
-SENS(initOv6650, NULL, ov6650_sensor_init, NULL, NULL, F_GAIN|F_SIF, 0, 0x60),
-SENS(initOv7630, initOv7630_3, ov7630_sensor_init, NULL, ov7630_sensor_init_3,
-	F_GAIN, 0, 0x21),
-SENS(initPas106, NULL, pas106_sensor_init, NULL, NULL, F_GAIN|F_SIF, NO_FREQ,
-	0),
-SENS(initPas202, initPas202, pas202_sensor_init, NULL, NULL, F_GAIN,
-	NO_FREQ, 0),
-SENS(initTas5110c, NULL, tas5110_sensor_init, NULL, NULL,
-	F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5110d, NULL, tas5110_sensor_init, NULL, NULL,
-	F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5130, NULL, tas5130_sensor_init, NULL, NULL, 0, NO_EXPO|NO_FREQ,
-	0),
+SENS(initHv7131d, hv7131d_sensor_init, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initHv7131r, hv7131r_sensor_init, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
+SENS(initOv6650, ov6650_sensor_init, F_GAIN|F_SIF, 0, 0x60),
+SENS(initOv7630, ov7630_sensor_init, F_GAIN, 0, 0x21),
+SENS(initPas106, pas106_sensor_init, F_GAIN|F_SIF, NO_FREQ, 0),
+SENS(initPas202, pas202_sensor_init, F_GAIN, NO_FREQ, 0),
+SENS(initTas5110c, tas5110c_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
+	NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initTas5110d, tas5110d_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
+	NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initTas5130, tas5130_sensor_init, F_GAIN,
+	NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
 };
 
 /* get one byte in gspca_dev->usb_buf */
@@ -655,7 +637,6 @@
 static void setbrightness(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
-	__u8 value;
 
 	switch (sd->sensor) {
 	case  SENSOR_OV6650:
@@ -697,17 +678,6 @@
 			goto err;
 		break;
 	    }
-	case SENSOR_TAS5130CXX: {
-		__u8 i2c[] =
-			{0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
-
-		value = 0xff - sd->brightness;
-		i2c[4] = value;
-		PDEBUG(D_CONF, "brightness %d : %d", value, i2c[4]);
-		if (i2c_w(gspca_dev, i2c) < 0)
-			goto err;
-		break;
-	    }
 	}
 	return;
 err:
@@ -733,7 +703,7 @@
 		break;
 	    }
 	case SENSOR_TAS5110C:
-	case SENSOR_TAS5110D: {
+	case SENSOR_TAS5130CXX: {
 		__u8 i2c[] =
 			{0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
 
@@ -742,6 +712,23 @@
 			goto err;
 		break;
 	    }
+	case SENSOR_TAS5110D: {
+		__u8 i2c[] = {
+			0xb0, 0x61, 0x02, 0x00, 0x10, 0x00, 0x00, 0x17 };
+		gain = 255 - gain;
+		/* The bits in the register are the wrong way around!! */
+		i2c[3] |= (gain & 0x80) >> 7;
+		i2c[3] |= (gain & 0x40) >> 5;
+		i2c[3] |= (gain & 0x20) >> 3;
+		i2c[3] |= (gain & 0x10) >> 1;
+		i2c[3] |= (gain & 0x08) << 1;
+		i2c[3] |= (gain & 0x04) << 3;
+		i2c[3] |= (gain & 0x02) << 5;
+		i2c[3] |= (gain & 0x01) << 7;
+		if (i2c_w(gspca_dev, i2c) < 0)
+			goto err;
+		break;
+	    }
 
 	case SENSOR_OV6650:
 		gain >>= 1;
@@ -796,7 +783,7 @@
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 	__u8 gain;
-	__u8 buf[2] = { 0, 0 };
+	__u8 buf[3] = { 0, 0, 0 };
 
 	if (sensor_data[sd->sensor].flags & F_GAIN) {
 		/* Use the sensor gain to do the actual gain */
@@ -804,13 +791,18 @@
 		return;
 	}
 
-	gain = sd->gain >> 4;
-
-	/* red and blue gain */
-	buf[0] = gain << 4 | gain;
-	/* green gain */
-	buf[1] = gain;
-	reg_w(gspca_dev, 0x10, buf, 2);
+	if (sd->bridge == BRIDGE_103) {
+		gain = sd->gain >> 1;
+		buf[0] = gain; /* Red */
+		buf[1] = gain; /* Green */
+		buf[2] = gain; /* Blue */
+		reg_w(gspca_dev, 0x05, buf, 3);
+	} else {
+		gain = sd->gain >> 4;
+		buf[0] = gain << 4 | gain; /* Red and blue */
+		buf[1] = gain; /* Green */
+		reg_w(gspca_dev, 0x10, buf, 2);
+	}
 }
 
 static void setexposure(struct gspca_dev *gspca_dev)
@@ -1049,7 +1041,7 @@
 		desired_avg_lum = 5000;
 	} else {
 		deadzone = 1500;
-		desired_avg_lum = 18000;
+		desired_avg_lum = 13000;
 	}
 
 	if (sensor_data[sd->sensor].flags & F_COARSE_EXPO)
@@ -1127,53 +1119,91 @@
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 	struct cam *cam = &gspca_dev->cam;
-	int mode, l;
-	const __u8 *sn9c10x;
-	__u8 reg12_19[8];
+	int i, mode;
+	__u8 regs[0x31];
 
 	mode = cam->cam_mode[gspca_dev->curr_mode].priv & 0x07;
-	sn9c10x = sensor_data[sd->sensor].bridge_init[sd->bridge];
-	l = sensor_data[sd->sensor].bridge_init_size[sd->bridge];
-	memcpy(reg12_19, &sn9c10x[0x12 - 1], 8);
-	reg12_19[6] = sn9c10x[0x18 - 1] | (mode << 4);
-	/* Special cases where reg 17 and or 19 value depends on mode */
+	/* Copy registers 0x01 - 0x19 from the template */
+	memcpy(&regs[0x01], sensor_data[sd->sensor].bridge_init, 0x19);
+	/* Set the mode */
+	regs[0x18] |= mode << 4;
+
+	/* Set bridge gain to 1.0 */
+	if (sd->bridge == BRIDGE_103) {
+		regs[0x05] = 0x20; /* Red */
+		regs[0x06] = 0x20; /* Green */
+		regs[0x07] = 0x20; /* Blue */
+	} else {
+		regs[0x10] = 0x00; /* Red and blue */
+		regs[0x11] = 0x00; /* Green */
+	}
+
+	/* Setup pixel numbers and auto exposure window */
+	if (sensor_data[sd->sensor].flags & F_SIF) {
+		regs[0x1a] = 0x14; /* HO_SIZE 640, makes no sense */
+		regs[0x1b] = 0x0a; /* VO_SIZE 320, makes no sense */
+		regs[0x1c] = 0x02; /* AE H-start 64 */
+		regs[0x1d] = 0x02; /* AE V-start 64 */
+		regs[0x1e] = 0x09; /* AE H-end 288 */
+		regs[0x1f] = 0x07; /* AE V-end 224 */
+	} else {
+		regs[0x1a] = 0x1d; /* HO_SIZE 960, makes no sense */
+		regs[0x1b] = 0x10; /* VO_SIZE 512, makes no sense */
+		regs[0x1c] = 0x05; /* AE H-start 160 */
+		regs[0x1d] = 0x03; /* AE V-start 96 */
+		regs[0x1e] = 0x0f; /* AE H-end 480 */
+		regs[0x1f] = 0x0c; /* AE V-end 384 */
+	}
+
+	/* Setup the gamma table (only used with the sn9c103 bridge) */
+	for (i = 0; i < 16; i++)
+		regs[0x20 + i] = i * 16;
+	regs[0x20 + i] = 255;
+
+	/* Special cases where some regs depend on mode or bridge */
 	switch (sd->sensor) {
 	case SENSOR_TAS5130CXX:
-		/* probably not mode specific at all most likely the upper
+		/* FIXME / TESTME
+		   probably not mode specific at all most likely the upper
 		   nibble of 0x19 is exposure (clock divider) just as with
 		   the tas5110, we need someone to test this. */
-		reg12_19[7] = mode ? 0x23 : 0x43;
+		regs[0x19] = mode ? 0x23 : 0x43;
 		break;
+	case SENSOR_OV7630:
+		/* FIXME / TESTME for some reason with the 101/102 bridge the
+		   clock is set to 12 Mhz (reg1 == 0x04), rather then 24.
+		   Also the hstart needs to go from 1 to 2 when using a 103,
+		   which is likely related. This does not seem right. */
+		if (sd->bridge == BRIDGE_103) {
+			regs[0x01] = 0x44; /* Select 24 Mhz clock */
+			regs[0x12] = 0x02; /* Set hstart to 2 */
+		}
 	}
 	/* Disable compression when the raw bayer format has been selected */
 	if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
-		reg12_19[6] &= ~0x80;
+		regs[0x18] &= ~0x80;
 
 	/* Vga mode emulation on SIF sensor? */
 	if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_REDUCED_SIF) {
-		reg12_19[0] += 16; /* 0x12: hstart adjust */
-		reg12_19[1] += 24; /* 0x13: vstart adjust */
-		reg12_19[3] = 320 / 16; /* 0x15: hsize */
-		reg12_19[4] = 240 / 16; /* 0x16: vsize */
+		regs[0x12] += 16;	/* hstart adjust */
+		regs[0x13] += 24;	/* vstart adjust */
+		regs[0x15]  = 320 / 16; /* hsize */
+		regs[0x16]  = 240 / 16; /* vsize */
 	}
 
 	/* reg 0x01 bit 2 video transfert on */
-	reg_w(gspca_dev, 0x01, &sn9c10x[0x01 - 1], 1);
+	reg_w(gspca_dev, 0x01, &regs[0x01], 1);
 	/* reg 0x17 SensorClk enable inv Clk 0x60 */
-	reg_w(gspca_dev, 0x17, &sn9c10x[0x17 - 1], 1);
+	reg_w(gspca_dev, 0x17, &regs[0x17], 1);
 	/* Set the registers from the template */
-	reg_w(gspca_dev, 0x01, sn9c10x, l);
+	reg_w(gspca_dev, 0x01, &regs[0x01],
+	      (sd->bridge == BRIDGE_103) ? 0x30 : 0x1f);
 
 	/* Init the sensor */
 	i2c_w_vector(gspca_dev, sensor_data[sd->sensor].sensor_init,
 			sensor_data[sd->sensor].sensor_init_size);
-	if (sensor_data[sd->sensor].sensor_bridge_init[sd->bridge])
-		i2c_w_vector(gspca_dev,
-			sensor_data[sd->sensor].sensor_bridge_init[sd->bridge],
-			sensor_data[sd->sensor].sensor_bridge_init_size[
-				sd->bridge]);
 
-	/* Mode specific sensor setup */
+	/* Mode / bridge specific sensor setup */
 	switch (sd->sensor) {
 	case SENSOR_PAS202: {
 		const __u8 i2cpclockdiv[] =
@@ -1181,27 +1211,37 @@
 		/* clockdiv from 4 to 3 (7.5 -> 10 fps) when in low res mode */
 		if (mode)
 			i2c_w(gspca_dev, i2cpclockdiv);
+		break;
 	    }
+	case SENSOR_OV7630:
+		/* FIXME / TESTME We should be able to handle this identical
+		   for the 101/102 and the 103 case */
+		if (sd->bridge == BRIDGE_103) {
+			const __u8 i2c[] = { 0xa0, 0x21, 0x13,
+					     0x80, 0x00, 0x00, 0x00, 0x10 };
+			i2c_w(gspca_dev, i2c);
+		}
+		break;
 	}
 	/* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */
-	reg_w(gspca_dev, 0x15, &reg12_19[3], 2);
+	reg_w(gspca_dev, 0x15, &regs[0x15], 2);
 	/* compression register */
-	reg_w(gspca_dev, 0x18, &reg12_19[6], 1);
+	reg_w(gspca_dev, 0x18, &regs[0x18], 1);
 	/* H_start */
-	reg_w(gspca_dev, 0x12, &reg12_19[0], 1);
+	reg_w(gspca_dev, 0x12, &regs[0x12], 1);
 	/* V_START */
-	reg_w(gspca_dev, 0x13, &reg12_19[1], 1);
+	reg_w(gspca_dev, 0x13, &regs[0x13], 1);
 	/* reset 0x17 SensorClk enable inv Clk 0x60 */
 				/*fixme: ov7630 [17]=68 8f (+20 if 102)*/
-	reg_w(gspca_dev, 0x17, &reg12_19[5], 1);
+	reg_w(gspca_dev, 0x17, &regs[0x17], 1);
 	/*MCKSIZE ->3 */	/*fixme: not ov7630*/
-	reg_w(gspca_dev, 0x19, &reg12_19[7], 1);
+	reg_w(gspca_dev, 0x19, &regs[0x19], 1);
 	/* AE_STRX AE_STRY AE_ENDX AE_ENDY */
-	reg_w(gspca_dev, 0x1c, &sn9c10x[0x1c - 1], 4);
+	reg_w(gspca_dev, 0x1c, &regs[0x1c], 4);
 	/* Enable video transfert */
-	reg_w(gspca_dev, 0x01, &sn9c10x[0], 1);
+	reg_w(gspca_dev, 0x01, &regs[0x01], 1);
 	/* Compression */
-	reg_w(gspca_dev, 0x18, &reg12_19[6], 2);
+	reg_w(gspca_dev, 0x18, &regs[0x18], 2);
 	msleep(20);
 
 	sd->reg11 = -1;
@@ -1525,15 +1565,15 @@
 	.driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge
 
 
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0c45, 0x6001), SB(TAS5110C, 102)}, /* TAS5110C1B */
 	{USB_DEVICE(0x0c45, 0x6005), SB(TAS5110C, 101)}, /* TAS5110C1B */
 	{USB_DEVICE(0x0c45, 0x6007), SB(TAS5110D, 101)}, /* TAS5110D */
 	{USB_DEVICE(0x0c45, 0x6009), SB(PAS106, 101)},
 	{USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
 	{USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
 	{USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
+#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
 	{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
 	{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
 #endif
@@ -1544,18 +1584,22 @@
 	{USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)},
 	{USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)},
 	{USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)},
-	/* {USB_DEVICE(0x0c45, 0x602b), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */
+	/* {USB_DEVICE(0x0c45, 0x6030), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */
+	/* {USB_DEVICE(0x0c45, 0x6082), SB(MI03XX, 103)}, */ /* MI0343 MI0360 */
+	{USB_DEVICE(0x0c45, 0x6083), SB(HV7131D, 103)},
+	{USB_DEVICE(0x0c45, 0x608c), SB(HV7131R, 103)},
+	/* {USB_DEVICE(0x0c45, 0x608e), SB(CISVF10, 103)}, */
 	{USB_DEVICE(0x0c45, 0x608f), SB(OV7630, 103)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+	{USB_DEVICE(0x0c45, 0x60a8), SB(PAS106, 103)},
+	{USB_DEVICE(0x0c45, 0x60aa), SB(TAS5130CXX, 103)},
 	{USB_DEVICE(0x0c45, 0x60af), SB(PAS202, 103)},
-#endif
 	{USB_DEVICE(0x0c45, 0x60b0), SB(OV7630, 103)},
 	{}
 };
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 			const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 2d0bb17..d6f39ce 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -25,12 +25,12 @@
 #include "gspca.h"
 #include "jpeg.h"
 
-#define V4L2_CID_INFRARED (V4L2_CID_PRIVATE_BASE + 0)
-
 MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
 MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
 MODULE_LICENSE("GPL");
 
+static int starcam;
+
 /* controls */
 enum e_ctrl {
 	BRIGHTNESS,
@@ -43,7 +43,7 @@
 	HFLIP,
 	VFLIP,
 	SHARPNESS,
-	INFRARED,
+	ILLUM,
 	FREQ,
 	NCTRLS		/* number of controls */
 };
@@ -100,7 +100,8 @@
 };
 
 /* device flags */
-#define PDN_INV	1		/* inverse pin S_PWR_DN / sn_xxx tables */
+#define F_PDN_INV	0x01	/* inverse pin S_PWR_DN / sn_xxx tables */
+#define F_ILLUM		0x02	/* presence of illuminator */
 
 /* sn9c1xx definitions */
 /* register 0x01 */
@@ -124,7 +125,7 @@
 static void setautogain(struct gspca_dev *gspca_dev);
 static void sethvflip(struct gspca_dev *gspca_dev);
 static void setsharpness(struct gspca_dev *gspca_dev);
-static void setinfrared(struct gspca_dev *gspca_dev);
+static void setillum(struct gspca_dev *gspca_dev);
 static void setfreq(struct gspca_dev *gspca_dev);
 
 static const struct ctrl sd_ctrls[NCTRLS] = {
@@ -251,18 +252,17 @@
 	    },
 	    .set_control = setsharpness
 	},
-/* mt9v111 only */
-[INFRARED] = {
+[ILLUM] = {
 	    {
-		.id      = V4L2_CID_INFRARED,
+		.id      = V4L2_CID_ILLUMINATORS_1,
 		.type    = V4L2_CTRL_TYPE_BOOLEAN,
-		.name    = "Infrared",
+		.name    = "Illuminator / infrared",
 		.minimum = 0,
 		.maximum = 1,
 		.step    = 1,
 		.default_value = 0,
 	    },
-	    .set_control = setinfrared
+	    .set_control = setillum
 	},
 /* ov7630/ov7648/ov7660 only */
 [FREQ] = {
@@ -282,32 +282,26 @@
 /* table of the disabled controls */
 static const __u32 ctrl_dis[] = {
 [SENSOR_ADCM1700] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_GC0307] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_GC0307] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_HV7131R] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_HV7131R] =	(1 << HFLIP) |
 			(1 << FREQ),
 
-[SENSOR_MI0360] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_MI0360] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_MI0360B] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_MI0360B] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_MO4000] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_MO4000] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
@@ -315,40 +309,32 @@
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_OM6802] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_OM6802] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_OV7630] =	(1 << INFRARED) |
-			(1 << HFLIP),
+[SENSOR_OV7630] =	(1 << HFLIP),
 
-[SENSOR_OV7648] =	(1 << INFRARED) |
-			(1 << HFLIP),
+[SENSOR_OV7648] =	(1 << HFLIP),
 
 [SENSOR_OV7660] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP),
 
 [SENSOR_PO1030] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
 [SENSOR_PO2030N] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << FREQ),
 
 [SENSOR_SOI768] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
 [SENSOR_SP80708] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
@@ -1822,44 +1808,46 @@
 	PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1);
 	switch (sd->bridge) {
 	case BRIDGE_SN9C102P:
-		if (regF1 != 0x11)
-			return -ENODEV;
-		reg_w1(gspca_dev, 0x02, regGpio[1]);
-		break;
 	case BRIDGE_SN9C105:
 		if (regF1 != 0x11)
 			return -ENODEV;
-		if (sd->sensor == SENSOR_MI0360)
-			mi0360_probe(gspca_dev);
-		reg_w(gspca_dev, 0x01, regGpio, 2);
-		break;
-	case BRIDGE_SN9C120:
-		if (regF1 != 0x12)
-			return -ENODEV;
-		switch (sd->sensor) {
-		case SENSOR_MI0360:
-			mi0360_probe(gspca_dev);
-			break;
-		case SENSOR_OV7630:
-			ov7630_probe(gspca_dev);
-			break;
-		case SENSOR_OV7648:
-			ov7648_probe(gspca_dev);
-			break;
-		case SENSOR_PO2030N:
-			po2030n_probe(gspca_dev);
-			break;
-		}
-		regGpio[1] = 0x70;		/* no audio */
-		reg_w(gspca_dev, 0x01, regGpio, 2);
 		break;
 	default:
 /*	case BRIDGE_SN9C110: */
-/*	case BRIDGE_SN9C325: */
+/*	case BRIDGE_SN9C120: */
 		if (regF1 != 0x12)
 			return -ENODEV;
+	}
+
+	switch (sd->sensor) {
+	case SENSOR_MI0360:
+		mi0360_probe(gspca_dev);
+		break;
+	case SENSOR_OV7630:
+		ov7630_probe(gspca_dev);
+		break;
+	case SENSOR_OV7648:
+		ov7648_probe(gspca_dev);
+		break;
+	case SENSOR_PO2030N:
+		po2030n_probe(gspca_dev);
+		break;
+	}
+
+	switch (sd->bridge) {
+	case BRIDGE_SN9C102P:
+		reg_w1(gspca_dev, 0x02, regGpio[1]);
+		break;
+	case BRIDGE_SN9C105:
+		reg_w(gspca_dev, 0x01, regGpio, 2);
+		break;
+	case BRIDGE_SN9C110:
 		reg_w1(gspca_dev, 0x02, 0x62);
 		break;
+	case BRIDGE_SN9C120:
+		regGpio[1] = 0x70;		/* no audio */
+		reg_w(gspca_dev, 0x01, regGpio, 2);
+		break;
 	}
 
 	if (sd->sensor == SENSOR_OM6802)
@@ -1874,6 +1862,8 @@
 	sd->i2c_addr = sn9c1xx[9];
 
 	gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
+	if (!(sd->flags & F_ILLUM))
+		gspca_dev->ctrl_dis |= (1 << ILLUM);
 
 	return gspca_dev->usb_err;
 }
@@ -2197,16 +2187,28 @@
 	reg_w1(gspca_dev, 0x99, sd->ctrls[SHARPNESS].val);
 }
 
-static void setinfrared(struct gspca_dev *gspca_dev)
+static void setillum(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	if (gspca_dev->ctrl_dis & (1 << INFRARED))
+	if (gspca_dev->ctrl_dis & (1 << ILLUM))
 		return;
-/*fixme: different sequence for StarCam Clip and StarCam 370i */
-/* Clip */
-	i2c_w1(gspca_dev, 0x02,				/* gpio */
-		sd->ctrls[INFRARED].val ? 0x66 : 0x64);
+	switch (sd->sensor) {
+	case SENSOR_ADCM1700:
+		reg_w1(gspca_dev, 0x02,				/* gpio */
+			sd->ctrls[ILLUM].val ? 0x64 : 0x60);
+		break;
+	case SENSOR_MT9V111:
+		if (starcam)
+			reg_w1(gspca_dev, 0x02,
+				sd->ctrls[ILLUM].val ?
+						0x55 : 0x54);	/* 370i */
+		else
+			reg_w1(gspca_dev, 0x02,
+				sd->ctrls[ILLUM].val ?
+						0x66 : 0x64);	/* Clip */
+		break;
+	}
 }
 
 static void setfreq(struct gspca_dev *gspca_dev)
@@ -2344,7 +2346,7 @@
 	/* sensor clock already enabled in sd_init */
 	/* reg_w1(gspca_dev, 0xf1, 0x00); */
 	reg01 = sn9c1xx[1];
-	if (sd->flags & PDN_INV)
+	if (sd->flags & F_PDN_INV)
 		reg01 ^= S_PDN_INV;		/* power down inverted */
 	reg_w1(gspca_dev, 0x01, reg01);
 
@@ -2907,13 +2909,11 @@
 	.driver_info = (BRIDGE_ ## bridge << 16) \
 			| (SENSOR_ ## sensor << 8) \
 			| (flags)
-static const __devinitdata struct usb_device_id device_table[] = {
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
 	{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
-#endif
-	{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)},
-	{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)},
+	{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
+	{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
 	{USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)},
 	{USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)},
 	{USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
@@ -2925,7 +2925,7 @@
 /*	{USB_DEVICE(0x0c45, 0x607b), BS(SN9C102P, OV7660)}, */
 	{USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)},
 /*	{USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */
-	{USB_DEVICE(0x0c45, 0x60c0), BS(SN9C105, MI0360)},
+	{USB_DEVICE(0x0c45, 0x60c0), BSF(SN9C105, MI0360, F_ILLUM)},
 						/* or MT9V111 */
 /*	{USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */
 /*	{USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */
@@ -2936,10 +2936,8 @@
 /*	{USB_DEVICE(0x0c45, 0x60fa), BS(SN9C105, OV7648)}, */
 /*	{USB_DEVICE(0x0c45, 0x60f2), BS(SN9C105, OV7660)}, */
 	{USB_DEVICE(0x0c45, 0x60fb), BS(SN9C105, OV7660)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
 	{USB_DEVICE(0x0c45, 0x60fc), BS(SN9C105, HV7131R)},
 	{USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)},
-#endif
 	{USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)},	/*sn9c128*/
 	{USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)},	/* /GC0305*/
 /*	{USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */
@@ -2962,16 +2960,15 @@
 /*	{USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */
 	{USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)},
 	{USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
 	{USB_DEVICE(0x0c45, 0x613b), BS(SN9C120, OV7660)},
-#endif
 	{USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)},
 	{USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)},
 	{USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)},	/*sn9c120b*/
 						/* or GC0305 / GC0307 */
 	{USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)},	/*sn9c120b*/
 	{USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)},	/*sn9c120b*/
-	{USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)},	/*sn9c120b*/
+	{USB_DEVICE(0x0c45, 0x614a), BSF(SN9C120, ADCM1700, F_ILLUM)},
+/*	{USB_DEVICE(0x0c45, 0x614c), BS(SN9C120, GC0306)}, */	/*sn9c120b*/
 	{}
 };
 MODULE_DEVICE_TABLE(usb, device_table);
@@ -3007,3 +3004,7 @@
 
 module_init(sd_mod_init);
 module_exit(sd_mod_exit);
+
+module_param(starcam, int, 0644);
+MODULE_PARM_DESC(starcam,
+	"StarCam model. 0: Clip, 1: 370i");
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index e643386..76c006b 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -555,7 +555,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x04fc, 0x1528)},
 	{}
 };
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 8e202b9..45552c3 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -1051,7 +1051,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x040a, 0x0300), .driver_info = KodakEZ200},
 	{USB_DEVICE(0x041e, 0x400a), .driver_info = CreativePCCam300},
 	{USB_DEVICE(0x046d, 0x0890), .driver_info = LogitechTraveler},
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 642839a..f7ef282 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -2155,7 +2155,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x040a, 0x0002), .driver_info = KodakDVC325},
 	{USB_DEVICE(0x0497, 0xc001), .driver_info = SmileIntlCamera},
 	{USB_DEVICE(0x0506, 0x00df), .driver_info = ThreeComHomeConnectLite},
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index bc9dd90..e5bf865 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -786,7 +786,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x401d), .driver_info = Nxultra},
 	{USB_DEVICE(0x0733, 0x0430), .driver_info = IntelPCCameraPro},
 /*fixme: may be UsbGrabberPV321 BRIDGE_SPCA506 SENSOR_SAA7113 */
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index 7307638..3483193 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1509,7 +1509,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam},
 	{USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista},
 	{USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110},
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 3a162c6..e836e77 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -1061,7 +1061,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A},
 	{USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A},
 	{USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A},
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 4040677..2e9c061 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -396,7 +396,7 @@
 }
 
 /* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x2770, 0x9120)},
 	{}
 };
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 8ba1995..457563b 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -298,7 +298,7 @@
 }
 
 /* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x2770, 0x905c)},
 	{USB_DEVICE(0x2770, 0x9050)},
 	{USB_DEVICE(0x2770, 0x9051)},
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index a4a9881..8215d5d 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -1163,7 +1163,7 @@
 #define ST(sensor, type) \
 	.driver_info = (SENSOR_ ## sensor << 8) \
 			| (type)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x4038), ST(MI0360, 0)},
 	{USB_DEVICE(0x041e, 0x403c), ST(LZ24BP, 0)},
 	{USB_DEVICE(0x041e, 0x403d), ST(LZ24BP, 0)},
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 11a192b..87be52b 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -495,7 +495,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x05e1, 0x0893)},
 	{}
 };
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index b199ad4..e2ef41c 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -327,7 +327,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0553, 0x0202)},
 	{USB_DEVICE(0x041e, 0x4007)},
 	{}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 28ea417..7e06614 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -564,7 +564,7 @@
 
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	/* QuickCam Express */
 	{USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
 	/* LEGO cam / QuickCam Web */
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index a9cbcd6..543542a 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -1162,7 +1162,7 @@
 #define BS(bridge, subtype) \
 	.driver_info = (BRIDGE_ ## bridge << 8) \
 			| (subtype)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x400b), BS(SPCA504C, 0)},
 	{USB_DEVICE(0x041e, 0x4012), BS(SPCA504C, 0)},
 	{USB_DEVICE(0x041e, 0x4013), BS(SPCA504C, 0)},
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 8f0c331..a3eccd8 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1416,7 +1416,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x17a1, 0x0128)},
 	{}
 };
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 38c22f0..933ef2c 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -388,7 +388,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x046d, 0x0920)},
 	{USB_DEVICE(0x046d, 0x0921)},
 	{USB_DEVICE(0x0545, 0x808b)},
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 9b2ae1b..6caed73 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -4192,7 +4192,7 @@
 #define BF(bridge, flags) \
 	.driver_info = (BRIDGE_ ## bridge << 8) \
 		| (flags)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x405b), BF(VC0323, FL_VFLIP)},
 	{USB_DEVICE(0x046d, 0x0892), BF(VC0321, 0)},
 	{USB_DEVICE(0x046d, 0x0896), BF(VC0321, 0)},
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index 5b5039a..c089a0f 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -3270,7 +3270,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{ USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 },
 	{ USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 },
 	{ USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 },
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 14b85d4..47236a5 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5793,7 +5793,7 @@
 			break;
 		default:
 /*		case 0xdd:	 * delay */
-			msleep(action->val / 64 + 10);
+			msleep(action->idx);
 			break;
 		}
 		action++;
@@ -5830,7 +5830,7 @@
 		[SENSOR_GC0305] =	gc0305_matrix,
 		[SENSOR_HDCS2020b] =	NULL,
 		[SENSOR_HV7131B] =	NULL,
-		[SENSOR_HV7131R] =	NULL,
+		[SENSOR_HV7131R] =	po2030_matrix,
 		[SENSOR_ICM105A] =	po2030_matrix,
 		[SENSOR_MC501CB] =	NULL,
 		[SENSOR_MT9V111_1] =	gc0305_matrix,
@@ -5936,6 +5936,7 @@
 	case SENSOR_ADCM2700:
 	case SENSOR_GC0305:
 	case SENSOR_HV7131B:
+	case SENSOR_HV7131R:
 	case SENSOR_OV7620:
 	case SENSOR_PAS202B:
 	case SENSOR_PO2030:
@@ -6108,11 +6109,13 @@
 		reg_w(gspca_dev, 0x02, 0x003b);
 		reg_w(gspca_dev, 0x00, 0x0038);
 		break;
+	case SENSOR_HV7131R:
 	case SENSOR_PAS202B:
 		reg_w(gspca_dev, 0x03, 0x003b);
 		reg_w(gspca_dev, 0x0c, 0x003a);
 		reg_w(gspca_dev, 0x0b, 0x0039);
-		reg_w(gspca_dev, 0x0b, 0x0038);
+		if (sensor == SENSOR_PAS202B)
+			reg_w(gspca_dev, 0x0b, 0x0038);
 		break;
 	}
 }
@@ -6704,10 +6707,13 @@
 		reg_w(gspca_dev, 0x02, 0x003b);
 		reg_w(gspca_dev, 0x00, 0x0038);
 		break;
+	case SENSOR_HV7131R:
 	case SENSOR_PAS202B:
 		reg_w(gspca_dev, 0x03, 0x003b);
 		reg_w(gspca_dev, 0x0c, 0x003a);
 		reg_w(gspca_dev, 0x0b, 0x0039);
+		if (sd->sensor == SENSOR_HV7131R)
+			reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN);
 		break;
 	}
 
@@ -6720,6 +6726,7 @@
 		break;
 	case SENSOR_PAS202B:
 	case SENSOR_GC0305:
+	case SENSOR_HV7131R:
 	case SENSOR_TAS5130C:
 		reg_r(gspca_dev, 0x0008);
 		/* fall thru */
@@ -6760,6 +6767,12 @@
 						/* ms-win + */
 		reg_w(gspca_dev, 0x40, 0x0117);
 		break;
+	case SENSOR_HV7131R:
+		i2c_write(gspca_dev, 0x25, 0x04, 0x00);	/* exposure */
+		i2c_write(gspca_dev, 0x26, 0x93, 0x00);
+		i2c_write(gspca_dev, 0x27, 0xe0, 0x00);
+		reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
+		break;
 	case SENSOR_GC0305:
 	case SENSOR_TAS5130C:
 		reg_w(gspca_dev, 0x09, 0x01ad);	/* (from win traces) */
@@ -6808,9 +6821,17 @@
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	if (data[0] == 0xff && data[1] == 0xd8) {	/* start of frame */
+	/* check the JPEG end of frame */
+	if (len >= 3
+	 && data[len - 3] == 0xff && data[len - 2] == 0xd9) {
+/*fixme: what does the last byte mean?*/
 		gspca_frame_add(gspca_dev, LAST_PACKET,
-					NULL, 0);
+					data, len - 1);
+		return;
+	}
+
+	/* check the JPEG start of a frame */
+	if (data[0] == 0xff && data[1] == 0xd8) {
 		/* put the JPEG header in the new frame */
 		gspca_frame_add(gspca_dev, FIRST_PACKET,
 			sd->jpeg_hdr, JPEG_HDR_SZ);
@@ -6909,7 +6930,7 @@
 #endif
 };
 
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x041e)},
 	{USB_DEVICE(0x041e, 0x4017)},
 	{USB_DEVICE(0x041e, 0x401c), .driver_info = SENSOR_PAS106},
diff --git a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile
index e0230fc..3baa9f6 100644
--- a/drivers/media/video/hdpvr/Makefile
+++ b/drivers/media/video/hdpvr/Makefile
@@ -1,6 +1,4 @@
-hdpvr-objs	:= hdpvr-control.o hdpvr-core.o hdpvr-video.o
-
-hdpvr-$(CONFIG_I2C) += hdpvr-i2c.o
+hdpvr-objs	:= hdpvr-control.o hdpvr-core.o hdpvr-video.o hdpvr-i2c.o
 
 obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o
 
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index f7d1ee5..a27d93b 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -283,6 +283,7 @@
 	struct hdpvr_device *dev;
 	struct usb_host_interface *iface_desc;
 	struct usb_endpoint_descriptor *endpoint;
+	struct i2c_client *client;
 	size_t buffer_size;
 	int i;
 	int retval = -ENOMEM;
@@ -378,25 +379,35 @@
 		goto error;
 	}
 
-#ifdef CONFIG_I2C
-	/* until i2c is working properly */
-	retval = 0; /* hdpvr_register_i2c_adapter(dev); */
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	retval = hdpvr_register_i2c_adapter(dev);
 	if (retval < 0) {
-		v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n");
+		v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
 		goto error;
 	}
 
-	/* until i2c is working properly */
-	retval = 0; /* hdpvr_register_i2c_ir(dev); */
-	if (retval < 0)
-		v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n");
-#endif /* CONFIG_I2C */
+	client = hdpvr_register_ir_rx_i2c(dev);
+	if (!client) {
+		v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
+		goto reg_fail;
+	}
+
+	client = hdpvr_register_ir_tx_i2c(dev);
+	if (!client) {
+		v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
+		goto reg_fail;
+	}
+#endif
 
 	/* let the user know what node this device is now attached to */
 	v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
 		  video_device_node_name(dev->video_dev));
 	return 0;
 
+reg_fail:
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_adapter(&dev->i2c_adapter);
+#endif
 error:
 	if (dev) {
 		/* Destroy single thread */
@@ -426,6 +437,9 @@
 	mutex_lock(&dev->io_mutex);
 	hdpvr_cancel_queue(dev);
 	mutex_unlock(&dev->io_mutex);
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_adapter(&dev->i2c_adapter);
+#endif
 	video_unregister_device(dev->video_dev);
 	atomic_dec(&dev_nr);
 }
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 24966aa..e53fa55 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -13,6 +13,8 @@
  *
  */
 
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+
 #include <linux/i2c.h>
 #include <linux/slab.h>
 
@@ -28,106 +30,86 @@
 #define Z8F0811_IR_TX_I2C_ADDR	0x70
 #define Z8F0811_IR_RX_I2C_ADDR	0x71
 
-static const u8 ir_i2c_addrs[] = {
-	Z8F0811_IR_TX_I2C_ADDR,
-	Z8F0811_IR_RX_I2C_ADDR,
-};
 
-static const char * const ir_devicenames[] = {
-	"ir_tx_z8f0811_hdpvr",
-	"ir_rx_z8f0811_hdpvr",
-};
-
-static int hdpvr_new_i2c_ir(struct hdpvr_device *dev, struct i2c_adapter *adap,
-			    const char *type, u8 addr)
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev)
 {
-	struct i2c_board_info info;
 	struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
-	unsigned short addr_list[2] = { addr, I2C_CLIENT_END };
+	struct i2c_board_info hdpvr_ir_tx_i2c_board_info = {
+		I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
+	};
 
-	memset(&info, 0, sizeof(struct i2c_board_info));
-	strlcpy(info.type, type, I2C_NAME_SIZE);
+	init_data->name = "HD-PVR";
+	hdpvr_ir_tx_i2c_board_info.platform_data = init_data;
+
+	return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info);
+}
+
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
+{
+	struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
+	struct i2c_board_info hdpvr_ir_rx_i2c_board_info = {
+		I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
+	};
 
 	/* Our default information for ir-kbd-i2c.c to use */
-	switch (addr) {
-	case Z8F0811_IR_RX_I2C_ADDR:
-		init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
-		init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
-		init_data->type = RC_TYPE_RC5;
-		init_data->name = "HD PVR";
-		info.platform_data = init_data;
-		break;
-	}
+	init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
+	init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+	init_data->type = RC_TYPE_RC5;
+	init_data->name = "HD-PVR";
+	hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
 
-	return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
-	       -1 : 0;
+	return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info);
 }
 
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev)
-{
-	int i;
-	int ret = 0;
-
-	for (i = 0; i < ARRAY_SIZE(ir_i2c_addrs); i++)
-		ret += hdpvr_new_i2c_ir(dev, dev->i2c_adapter,
-					ir_devicenames[i], ir_i2c_addrs[i]);
-
-	return ret;
-}
-
-static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr,
-			  char *data, int len)
+static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
+			  unsigned char addr, char *data, int len)
 {
 	int ret;
-	char *buf = kmalloc(len, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
+
+	if (len > sizeof(dev->i2c_buf))
+		return -EINVAL;
 
 	ret = usb_control_msg(dev->udev,
 			      usb_rcvctrlpipe(dev->udev, 0),
 			      REQTYPE_I2C_READ, CTRL_READ_REQUEST,
-			      0x100|addr, 0, buf, len, 1000);
+			      (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
 
 	if (ret == len) {
-		memcpy(data, buf, len);
+		memcpy(data, &dev->i2c_buf, len);
 		ret = 0;
 	} else if (ret >= 0)
 		ret = -EIO;
 
-	kfree(buf);
-
 	return ret;
 }
 
-static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr,
-			   char *data, int len)
+static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus,
+			   unsigned char addr, char *data, int len)
 {
 	int ret;
-	char *buf = kmalloc(len, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
 
-	memcpy(buf, data, len);
+	if (len > sizeof(dev->i2c_buf))
+		return -EINVAL;
+
+	memcpy(&dev->i2c_buf, data, len);
 	ret = usb_control_msg(dev->udev,
 			      usb_sndctrlpipe(dev->udev, 0),
 			      REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST,
-			      0x100|addr, 0, buf, len, 1000);
+			      (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
 
 	if (ret < 0)
-		goto error;
+		return ret;
 
 	ret = usb_control_msg(dev->udev,
 			      usb_rcvctrlpipe(dev->udev, 0),
 			      REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST,
-			      0, 0, buf, 2, 1000);
+			      0, 0, &dev->i2c_buf, 2, 1000);
 
-	if (ret == 2)
+	if ((ret == 2) && (dev->i2c_buf[1] == (len - 1)))
 		ret = 0;
 	else if (ret >= 0)
 		ret = -EIO;
 
-error:
-	kfree(buf);
 	return ret;
 }
 
@@ -146,10 +128,10 @@
 		addr = msgs[i].addr << 1;
 
 		if (msgs[i].flags & I2C_M_RD)
-			retval = hdpvr_i2c_read(dev, addr, msgs[i].buf,
+			retval = hdpvr_i2c_read(dev, 1, addr, msgs[i].buf,
 						msgs[i].len);
 		else
-			retval = hdpvr_i2c_write(dev, addr, msgs[i].buf,
+			retval = hdpvr_i2c_write(dev, 1, addr, msgs[i].buf,
 						 msgs[i].len);
 	}
 
@@ -168,30 +150,47 @@
 	.functionality = hdpvr_functionality,
 };
 
+static struct i2c_adapter hdpvr_i2c_adapter_template = {
+	.name   = "Hauppage HD PVR I2C",
+	.owner  = THIS_MODULE,
+	.algo   = &hdpvr_algo,
+};
+
+static int hdpvr_activate_ir(struct hdpvr_device *dev)
+{
+	char buffer[8];
+
+	mutex_lock(&dev->i2c_mutex);
+
+	hdpvr_i2c_read(dev, 0, 0x54, buffer, 1);
+
+	buffer[0] = 0;
+	buffer[1] = 0x8;
+	hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
+
+	buffer[1] = 0x18;
+	hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
+
+	mutex_unlock(&dev->i2c_mutex);
+
+	return 0;
+}
+
 int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
 {
-	struct i2c_adapter *i2c_adap;
 	int retval = -ENOMEM;
 
-	i2c_adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
-	if (i2c_adap == NULL)
-		goto error;
+	hdpvr_activate_ir(dev);
 
-	strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C",
-		sizeof(i2c_adap->name));
-	i2c_adap->algo  = &hdpvr_algo;
-	i2c_adap->owner = THIS_MODULE;
-	i2c_adap->dev.parent = &dev->udev->dev;
+	memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template,
+	       sizeof(struct i2c_adapter));
+	dev->i2c_adapter.dev.parent = &dev->udev->dev;
 
-	i2c_set_adapdata(i2c_adap, dev);
+	i2c_set_adapdata(&dev->i2c_adapter, dev);
 
-	retval = i2c_add_adapter(i2c_adap);
+	retval = i2c_add_adapter(&dev->i2c_adapter);
 
-	if (!retval)
-		dev->i2c_adapter = i2c_adap;
-	else
-		kfree(i2c_adap);
-
-error:
 	return retval;
 }
+
+#endif
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index d38fe10..514aea7 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -1220,12 +1220,9 @@
 	v4l2_device_unregister(&dev->v4l2_dev);
 
 	/* deregister I2C adapter */
-#ifdef CONFIG_I2C
+#if defined(CONFIG_I2C) || (CONFIG_I2C_MODULE)
 	mutex_lock(&dev->i2c_mutex);
-	if (dev->i2c_adapter)
-		i2c_del_adapter(dev->i2c_adapter);
-	kfree(dev->i2c_adapter);
-	dev->i2c_adapter = NULL;
+	i2c_del_adapter(&dev->i2c_adapter);
 	mutex_unlock(&dev->i2c_mutex);
 #endif /* CONFIG_I2C */
 
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 37f1e4c..072f23c 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -25,6 +25,7 @@
 	KERNEL_VERSION(HDPVR_MAJOR_VERSION, HDPVR_MINOR_VERSION, HDPVR_RELEASE)
 
 #define HDPVR_MAX 8
+#define HDPVR_I2C_MAX_SIZE 128
 
 /* Define these values to match your devices */
 #define HD_PVR_VENDOR_ID	0x2040
@@ -106,9 +107,11 @@
 	struct work_struct	worker;
 
 	/* I2C adapter */
-	struct i2c_adapter	*i2c_adapter;
+	struct i2c_adapter	i2c_adapter;
 	/* I2C lock */
 	struct mutex		i2c_mutex;
+	/* I2C message buffer space */
+	char			i2c_buf[HDPVR_I2C_MAX_SIZE];
 
 	/* For passing data to ir-kbd-i2c */
 	struct IR_i2c_init_data	ir_i2c_init_data;
@@ -310,7 +313,8 @@
 /* i2c adapter registration */
 int hdpvr_register_i2c_adapter(struct hdpvr_device *dev);
 
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev);
 
 /*========================================================================*/
 /* buffer management */
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index c87b6bc..a221ad6 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -128,6 +128,19 @@
 
 static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
 {
+	int ret;
+	unsigned char buf[1] = { 0 };
+
+	/*
+	 * This is the same apparent "are you ready?" poll command observed
+	 * watching Windows driver traffic and implemented in lirc_zilog. With
+	 * this added, we get far saner remote behavior with z8 chips on usb
+	 * connected devices, even with the default polling interval of 100ms.
+	 */
+	ret = i2c_master_send(ir->c, buf, 1);
+	if (ret != 1)
+		return (ret < 0) ? ret : -EINVAL;
+
 	return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
 }
 
@@ -244,15 +257,17 @@
 	static u32 ir_key, ir_raw;
 	int rc;
 
-	dprintk(2,"ir_poll_key\n");
+	dprintk(3, "%s\n", __func__);
 	rc = ir->get_key(ir, &ir_key, &ir_raw);
 	if (rc < 0) {
 		dprintk(2,"error\n");
 		return;
 	}
 
-	if (rc)
+	if (rc) {
+		dprintk(1, "%s: keycode = 0x%04x\n", __func__, ir_key);
 		rc_keydown(ir->rc, ir_key, 0);
+	}
 }
 
 static void ir_work(struct work_struct *work)
@@ -321,6 +336,12 @@
 		rc_type     = RC_TYPE_OTHER;
 		ir_codes    = RC_MAP_AVERMEDIA_CARDBUS;
 		break;
+	case 0x71:
+		name        = "Hauppauge/Zilog Z8";
+		ir->get_key = get_key_haup_xvr;
+		rc_type     = RC_TYPE_RC5;
+		ir_codes    = hauppauge ? RC_MAP_HAUPPAUGE_NEW : RC_MAP_RC5_TV;
+		break;
 	}
 
 	/* Let the caller override settings */
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index e103b8f..9fb86a0 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -300,10 +300,15 @@
 				adap, type, 0, I2C_ADDRS(hw_addrs[idx]));
 	} else if (hw == IVTV_HW_CX25840) {
 		struct cx25840_platform_data pdata;
+		struct i2c_board_info cx25840_info = {
+			.type = "cx25840",
+			.addr = hw_addrs[idx],
+			.platform_data = &pdata,
+		};
 
 		pdata.pvr150_workaround = itv->pvr150_workaround;
-		sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev,
-				adap, type, 0, &pdata, hw_addrs[idx], NULL);
+		sd = v4l2_i2c_new_subdev_board(&itv->v4l2_dev, adap,
+				&cx25840_info, NULL);
 	} else {
 		sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
 				adap, type, hw_addrs[idx], NULL);
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 9b4faf0..9c29e96 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -628,22 +628,66 @@
 static void ivtv_irq_dma_err(struct ivtv *itv)
 {
 	u32 data[CX2341X_MBOX_MAX_DATA];
+	u32 status;
 
 	del_timer(&itv->dma_timer);
+
 	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
+	status = read_reg(IVTV_REG_DMASTATUS);
 	IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
-				read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
-	write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
+				status, itv->cur_dma_stream);
+	/*
+	 * We do *not* write back to the IVTV_REG_DMASTATUS register to
+	 * clear the error status, if either the encoder write (0x02) or
+	 * decoder read (0x01) bus master DMA operation do not indicate
+	 * completed.  We can race with the DMA engine, which may have
+	 * transitioned to completed status *after* we read the register.
+	 * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
+	 * DMA engine has completed, will cause the DMA engine to stop working.
+	 */
+	status &= 0x3;
+	if (status == 0x3)
+		write_reg(status, IVTV_REG_DMASTATUS);
+
 	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
 	    itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
 		struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
 
-		/* retry */
-		if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
+		if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+			/* retry */
+			/*
+			 * FIXME - handle cases of DMA error similar to
+			 * encoder below, except conditioned on status & 0x1
+			 */
 			ivtv_dma_dec_start(s);
-		else
-			ivtv_dma_enc_start(s);
-		return;
+			return;
+		} else {
+			if ((status & 0x2) == 0) {
+				/*
+				 * CX2341x Bus Master DMA write is ongoing.
+				 * Reset the timer and let it complete.
+				 */
+				itv->dma_timer.expires =
+						jiffies + msecs_to_jiffies(600);
+				add_timer(&itv->dma_timer);
+				return;
+			}
+
+			if (itv->dma_retries < 3) {
+				/*
+				 * CX2341x Bus Master DMA write has ended.
+				 * Retry the write, starting with the first
+				 * xfer segment. Just retrying the current
+				 * segment is not sufficient.
+				 */
+				s->sg_processed = 0;
+				itv->dma_retries++;
+				ivtv_dma_enc_start_xfer(s);
+				return;
+			}
+			/* Too many retries, give up on this one */
+		}
+
 	}
 	if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
 		ivtv_udma_start(itv);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index c179041..e7e7178 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -1011,7 +1011,6 @@
 	v4l2_m2m_release(dev->m2m_dev);
 	del_timer_sync(&dev->timer);
 	video_unregister_device(dev->vfd);
-	video_device_release(dev->vfd);
 	v4l2_device_unregister(&dev->v4l2_dev);
 	kfree(dev);
 
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index 209ff97..4904d25 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -12,17 +12,41 @@
 #include <asm/div64.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-chip-ident.h>
-#include "mt9v011.h"
+#include <media/mt9v011.h>
 
 MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
 MODULE_LICENSE("GPL");
 
-
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-2)");
 
+#define R00_MT9V011_CHIP_VERSION	0x00
+#define R01_MT9V011_ROWSTART		0x01
+#define R02_MT9V011_COLSTART		0x02
+#define R03_MT9V011_HEIGHT		0x03
+#define R04_MT9V011_WIDTH		0x04
+#define R05_MT9V011_HBLANK		0x05
+#define R06_MT9V011_VBLANK		0x06
+#define R07_MT9V011_OUT_CTRL		0x07
+#define R09_MT9V011_SHUTTER_WIDTH	0x09
+#define R0A_MT9V011_CLK_SPEED		0x0a
+#define R0B_MT9V011_RESTART		0x0b
+#define R0C_MT9V011_SHUTTER_DELAY	0x0c
+#define R0D_MT9V011_RESET		0x0d
+#define R1E_MT9V011_DIGITAL_ZOOM	0x1e
+#define R20_MT9V011_READ_MODE		0x20
+#define R2B_MT9V011_GREEN_1_GAIN	0x2b
+#define R2C_MT9V011_BLUE_GAIN		0x2c
+#define R2D_MT9V011_RED_GAIN		0x2d
+#define R2E_MT9V011_GREEN_2_GAIN	0x2e
+#define R35_MT9V011_GLOBAL_GAIN		0x35
+#define RF1_MT9V011_CHIP_ENABLE		0xf1
+
+#define MT9V011_VERSION			0x8232
+#define MT9V011_REV_B_VERSION		0x8243
+
 /* supported controls */
 static struct v4l2_queryctrl mt9v011_qctrl[] = {
 	{
@@ -469,23 +493,6 @@
 	return 0;
 }
 
-static int mt9v011_s_config(struct v4l2_subdev *sd, int dumb, void *data)
-{
-	struct mt9v011 *core = to_mt9v011(sd);
-	unsigned *xtal = data;
-
-	v4l2_dbg(1, debug, sd, "s_config called\n");
-
-	if (xtal) {
-		core->xtal = *xtal;
-		v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
-			 *xtal / 1000000, (*xtal / 1000) % 1000);
-	}
-
-	return 0;
-}
-
-
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 static int mt9v011_g_register(struct v4l2_subdev *sd,
 			      struct v4l2_dbg_register *reg)
@@ -536,7 +543,6 @@
 	.g_ctrl = mt9v011_g_ctrl,
 	.s_ctrl = mt9v011_s_ctrl,
 	.reset = mt9v011_reset,
-	.s_config = mt9v011_s_config,
 	.g_chip_ident = mt9v011_g_chip_ident,
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	.g_register = mt9v011_g_register,
@@ -596,6 +602,14 @@
 	core->height = 480;
 	core->xtal = 27000000;	/* Hz */
 
+	if (c->dev.platform_data) {
+		struct mt9v011_platform_data *pdata = c->dev.platform_data;
+
+		core->xtal = pdata->xtal;
+		v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
+			core->xtal / 1000000, (core->xtal / 1000) % 1000);
+	}
+
 	v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n",
 		 c->addr << 1, c->adapter->name, version);
 
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h
deleted file mode 100644
index 3350fd6..0000000
--- a/drivers/media/video/mt9v011.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
- *
- * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
- * This code is placed under the terms of the GNU General Public License v2
- */
-
-#ifndef MT9V011_H_
-#define MT9V011_H_
-
-#define R00_MT9V011_CHIP_VERSION	0x00
-#define R01_MT9V011_ROWSTART		0x01
-#define R02_MT9V011_COLSTART		0x02
-#define R03_MT9V011_HEIGHT		0x03
-#define R04_MT9V011_WIDTH		0x04
-#define R05_MT9V011_HBLANK		0x05
-#define R06_MT9V011_VBLANK		0x06
-#define R07_MT9V011_OUT_CTRL		0x07
-#define R09_MT9V011_SHUTTER_WIDTH	0x09
-#define R0A_MT9V011_CLK_SPEED		0x0a
-#define R0B_MT9V011_RESTART		0x0b
-#define R0C_MT9V011_SHUTTER_DELAY	0x0c
-#define R0D_MT9V011_RESET		0x0d
-#define R1E_MT9V011_DIGITAL_ZOOM	0x1e
-#define R20_MT9V011_READ_MODE		0x20
-#define R2B_MT9V011_GREEN_1_GAIN	0x2b
-#define R2C_MT9V011_BLUE_GAIN		0x2c
-#define R2D_MT9V011_RED_GAIN		0x2d
-#define R2E_MT9V011_GREEN_2_GAIN	0x2e
-#define R35_MT9V011_GLOBAL_GAIN		0x35
-#define RF1_MT9V011_CHIP_ENABLE		0xf1
-
-#define MT9V011_VERSION			0x8232
-#define MT9V011_REV_B_VERSION		0x8243
-
-#endif
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 83de97a..029a4ba 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1286,7 +1286,7 @@
 	videobuf_mmap_free(q);
 
 	/* Even if apply changes fails we should continue
-	   freeing allocated memeory */
+	   freeing allocated memory */
 	if (vout->streaming) {
 		u32 mask = 0;
 
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index c881a64..d4e7c11 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -1449,47 +1449,6 @@
 	return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV7670, 0);
 }
 
-static int ov7670_s_config(struct v4l2_subdev *sd, int dumb, void *data)
-{
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	struct ov7670_config *config = data;
-	struct ov7670_info *info = to_state(sd);
-	int ret;
-
-	info->clock_speed = 30; /* default: a guess */
-
-	/*
-	 * Must apply configuration before initializing device, because it
-	 * selects I/O method.
-	 */
-	if (config) {
-		info->min_width = config->min_width;
-		info->min_height = config->min_height;
-		info->use_smbus = config->use_smbus;
-
-		if (config->clock_speed)
-			info->clock_speed = config->clock_speed;
-	}
-
-	/* Make sure it's an ov7670 */
-	ret = ov7670_detect(sd);
-	if (ret) {
-		v4l_dbg(1, debug, client,
-			"chip found @ 0x%x (%s) is not an ov7670 chip.\n",
-			client->addr << 1, client->adapter->name);
-		kfree(info);
-		return ret;
-	}
-	v4l_info(client, "chip found @ 0x%02x (%s)\n",
-			client->addr << 1, client->adapter->name);
-
-	info->fmt = &ov7670_formats[0];
-	info->sat = 128;	/* Review this */
-	info->clkrc = info->clock_speed / 30;
-
-	return 0;
-}
-
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 static int ov7670_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
 {
@@ -1528,7 +1487,6 @@
 	.s_ctrl = ov7670_s_ctrl,
 	.queryctrl = ov7670_queryctrl,
 	.reset = ov7670_reset,
-	.s_config = ov7670_s_config,
 	.init = ov7670_init,
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	.g_register = ov7670_g_register,
@@ -1558,6 +1516,7 @@
 {
 	struct v4l2_subdev *sd;
 	struct ov7670_info *info;
+	int ret;
 
 	info = kzalloc(sizeof(struct ov7670_info), GFP_KERNEL);
 	if (info == NULL)
@@ -1565,6 +1524,37 @@
 	sd = &info->sd;
 	v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
 
+	info->clock_speed = 30; /* default: a guess */
+	if (client->dev.platform_data) {
+		struct ov7670_config *config = client->dev.platform_data;
+
+		/*
+		 * Must apply configuration before initializing device, because it
+		 * selects I/O method.
+		 */
+		info->min_width = config->min_width;
+		info->min_height = config->min_height;
+		info->use_smbus = config->use_smbus;
+
+		if (config->clock_speed)
+			info->clock_speed = config->clock_speed;
+	}
+
+	/* Make sure it's an ov7670 */
+	ret = ov7670_detect(sd);
+	if (ret) {
+		v4l_dbg(1, debug, client,
+			"chip found @ 0x%x (%s) is not an ov7670 chip.\n",
+			client->addr << 1, client->adapter->name);
+		kfree(info);
+		return ret;
+	}
+	v4l_info(client, "chip found @ 0x%02x (%s)\n",
+			client->addr << 1, client->adapter->name);
+
+	info->fmt = &ov7670_formats[0];
+	info->sat = 128;	/* Review this */
+	info->clkrc = info->clock_speed / 30;
 	return 0;
 }
 
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index ac94a8b..305e6aa 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -40,6 +40,7 @@
 #include "pvrusb2-io.h"
 #include <media/v4l2-device.h>
 #include <media/cx2341x.h>
+#include <media/ir-kbd-i2c.h>
 #include "pvrusb2-devattr.h"
 
 /* Legal values for PVR2_CID_HSM */
@@ -202,6 +203,7 @@
 
 	/* IR related */
 	unsigned int ir_scheme_active; /* IR scheme as seen from the outside */
+	struct IR_i2c_init_data ir_init_data; /* params passed to IR modules */
 
 	/* Frequency table */
 	unsigned int freqTable[FREQTABLE_SIZE];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index 7cbe18c..451ecd4 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -19,6 +19,7 @@
  */
 
 #include <linux/i2c.h>
+#include <media/ir-kbd-i2c.h>
 #include "pvrusb2-i2c-core.h"
 #include "pvrusb2-hdw-internal.h"
 #include "pvrusb2-debug.h"
@@ -48,13 +49,6 @@
 MODULE_PARM_DESC(disable_autoload_ir_video,
 		 "1=do not try to autoload ir_video IR receiver");
 
-/* Mapping of IR schemes to known I2C addresses - if any */
-static const unsigned char ir_video_addresses[] = {
-	[PVR2_IR_SCHEME_ZILOG] = 0x71,
-	[PVR2_IR_SCHEME_29XXX] = 0x18,
-	[PVR2_IR_SCHEME_24XXX] = 0x18,
-};
-
 static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
 			  u8 i2c_addr,      /* I2C address we're talking to */
 			  u8 *data,         /* Data to write */
@@ -574,26 +568,55 @@
 static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
 {
 	struct i2c_board_info info;
-	unsigned char addr = 0;
+	struct IR_i2c_init_data *init_data = &hdw->ir_init_data;
 	if (pvr2_disable_ir_video) {
 		pvr2_trace(PVR2_TRACE_INFO,
 			   "Automatic binding of ir_video has been disabled.");
 		return;
 	}
-	if (hdw->ir_scheme_active < ARRAY_SIZE(ir_video_addresses)) {
-		addr = ir_video_addresses[hdw->ir_scheme_active];
-	}
-	if (!addr) {
+	memset(&info, 0, sizeof(struct i2c_board_info));
+	switch (hdw->ir_scheme_active) {
+	case PVR2_IR_SCHEME_24XXX: /* FX2-controlled IR */
+	case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */
+		init_data->ir_codes              = RC_MAP_HAUPPAUGE_NEW;
+		init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
+		init_data->type                  = RC_TYPE_RC5;
+		init_data->name                  = hdw->hdw_desc->description;
+		init_data->polling_interval      = 100; /* ms From ir-kbd-i2c */
+		/* IR Receiver */
+		info.addr          = 0x18;
+		info.platform_data = init_data;
+		strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+		pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+			   info.type, info.addr);
+		i2c_new_device(&hdw->i2c_adap, &info);
+		break;
+	case PVR2_IR_SCHEME_ZILOG:     /* HVR-1950 style */
+	case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
+		init_data->ir_codes              = RC_MAP_HAUPPAUGE_NEW;
+		init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+		init_data->type                  = RC_TYPE_RC5;
+		init_data->name                  = hdw->hdw_desc->description;
+		/* IR Receiver */
+		info.addr          = 0x71;
+		info.platform_data = init_data;
+		strlcpy(info.type, "ir_rx_z8f0811_haup", I2C_NAME_SIZE);
+		pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+			   info.type, info.addr);
+		i2c_new_device(&hdw->i2c_adap, &info);
+		/* IR Trasmitter */
+		info.addr          = 0x70;
+		info.platform_data = init_data;
+		strlcpy(info.type, "ir_tx_z8f0811_haup", I2C_NAME_SIZE);
+		pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+			   info.type, info.addr);
+		i2c_new_device(&hdw->i2c_adap, &info);
+		break;
+	default:
 		/* The device either doesn't support I2C-based IR or we
 		   don't know (yet) how to operate IR on the device. */
-		return;
+		break;
 	}
-	pvr2_trace(PVR2_TRACE_INFO,
-		   "Binding ir_video to i2c address 0x%02x.", addr);
-	memset(&info, 0, sizeof(struct i2c_board_info));
-	strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
-	info.addr = addr;
-	i2c_new_device(&hdw->i2c_adap, &info);
 }
 
 void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index b63f8ca..561909b 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -57,7 +57,7 @@
 #include <linux/usb.h>
 
 #define S2255_MAJOR_VERSION	1
-#define S2255_MINOR_VERSION	20
+#define S2255_MINOR_VERSION	21
 #define S2255_RELEASE		0
 #define S2255_VERSION		KERNEL_VERSION(S2255_MAJOR_VERSION, \
 					       S2255_MINOR_VERSION, \
@@ -312,9 +312,9 @@
 };
 
 /* current cypress EEPROM firmware version */
-#define S2255_CUR_USB_FWVER	((3 << 8) | 6)
+#define S2255_CUR_USB_FWVER	((3 << 8) | 11)
 /* current DSP FW version */
-#define S2255_CUR_DSP_FWVER     8
+#define S2255_CUR_DSP_FWVER     10102
 /* Need DSP version 5+ for video status feature */
 #define S2255_MIN_DSP_STATUS      5
 #define S2255_MIN_DSP_COLORFILTER 8
@@ -492,9 +492,11 @@
 
 static void s2255_reset_dsppower(struct s2255_dev *dev)
 {
-	s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1);
+	s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1);
 	msleep(10);
 	s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
+	msleep(600);
+	s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
 	return;
 }
 
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index f35459d..0db9092 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1565,7 +1565,7 @@
 	chip_id = name[5];
 
 	/* Check whether this chip is part of the saa711x series */
-	if (memcmp(name, "1f711", 5)) {
+	if (memcmp(name + 1, "f711", 4)) {
 		v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n",
 			client->addr << 1, name);
 		return -ENODEV;
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index e7aa588..deb8fcf 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5179,18 +5179,8 @@
 	[SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG] = {
 		.name           = "Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid",
 		.audio_clock    = 0x00187de7,
-#if 0
-	/*
-	 * FIXME: Analog mode doesn't work, if digital is enabled. The proper
-	 * fix is to use tda8290 driver, but Kworld seems to use an
-	 * unsupported version of tda8295.
-	 */
-		.tuner_type     = TUNER_NXP_TDA18271,	/* TUNER_PHILIPS_TDA8290 */
-		.tuner_addr     = 0x60,
-#else
-		.tuner_type     = UNSET,
+		.tuner_type     = TUNER_PHILIPS_TDA8290,
 		.tuner_addr     = ADDR_UNSET,
-#endif
 		.radio_type     = UNSET,
 		.radio_addr	= ADDR_UNSET,
 		.gpiomask       = 0x8e054000,
@@ -6932,10 +6922,17 @@
 	/* toggle AGC switch through GPIO 27 */
 	switch (mode) {
 	case TDA18271_ANALOG:
-		saa7134_set_gpio(dev, 27, 0);
+		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
+		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
+		msleep(20);
 		break;
 	case TDA18271_DIGITAL:
-		saa7134_set_gpio(dev, 27, 1);
+		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000);
+		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000);
+		msleep(20);
+		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
+		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
+		msleep(30);
 		break;
 	default:
 		return -EINVAL;
@@ -6993,6 +6990,7 @@
 int saa7134_tuner_callback(void *priv, int component, int command, int arg)
 {
 	struct saa7134_dev *dev = priv;
+
 	if (dev != NULL) {
 		switch (dev->tuner_type) {
 		case TUNER_PHILIPS_TDA8290:
@@ -7659,36 +7657,11 @@
 		break;
 	}
 	case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
-	{
-		struct i2c_msg msg = { .addr = 0x4b, .flags = 0 };
-		int i;
-		static u8 buffer[][2] = {
-			{0x30, 0x31},
-			{0xff, 0x00},
-			{0x41, 0x03},
-			{0x41, 0x1a},
-			{0xff, 0x02},
-			{0x34, 0x00},
-			{0x45, 0x97},
-			{0x45, 0xc1},
-		};
 		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
 		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
 
-		/*
-		 * FIXME: identify what device is at addr 0x4b and what means
-		 * this initialization
-		 */
-		for (i = 0; i < ARRAY_SIZE(buffer); i++) {
-			msg.buf = &buffer[i][0];
-			msg.len = ARRAY_SIZE(buffer[0]);
-			if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
-				printk(KERN_WARNING
-				       "%s: Unable to enable tuner(%i).\n",
-				       dev->name, i);
-		}
+		saa7134_set_gpio(dev, 27, 0);
 		break;
-	}
 	} /* switch() */
 
 	/* initialize tuner */
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 3315a48..f65cad2 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -237,12 +237,39 @@
 static struct tda18271_config kworld_tda18271_config = {
 	.std_map = &mb86a20s_tda18271_std_map,
 	.gate    = TDA18271_GATE_DIGITAL,
+	.config  = 3,	/* Use tuner callback for AGC */
+
 };
 
 static const struct mb86a20s_config kworld_mb86a20s_config = {
 	.demod_address = 0x10,
 };
 
+static int kworld_sbtvd_gate_ctrl(struct dvb_frontend* fe, int enable)
+{
+	struct saa7134_dev *dev = fe->dvb->priv;
+
+	unsigned char initmsg[] = {0x45, 0x97};
+	unsigned char msg_enable[] = {0x45, 0xc1};
+	unsigned char msg_disable[] = {0x45, 0x81};
+	struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
+
+	if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
+		wprintk("could not access the I2C gate\n");
+		return -EIO;
+	}
+	if (enable)
+		msg.buf = msg_enable;
+	else
+		msg.buf = msg_disable;
+	if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
+		wprintk("could not access the I2C gate\n");
+		return -EIO;
+	}
+	msleep(20);
+	return 0;
+}
+
 /* ==================================================================
  * tda1004x based DVB-T cards, helper functions
  */
@@ -623,37 +650,6 @@
 
 /* ------------------------------------------------------------------ */
 
-static int __kworld_sbtvd_i2c_gate_ctrl(struct saa7134_dev *dev, int enable)
-{
-	unsigned char initmsg[] = {0x45, 0x97};
-	unsigned char msg_enable[] = {0x45, 0xc1};
-	unsigned char msg_disable[] = {0x45, 0x81};
-	struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
-
-	if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
-		wprintk("could not access the I2C gate\n");
-		return -EIO;
-	}
-	if (enable)
-		msg.buf = msg_enable;
-	else
-		msg.buf = msg_disable;
-	if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
-		wprintk("could not access the I2C gate\n");
-		return -EIO;
-	}
-	msleep(20);
-	return 0;
-}
-static int kworld_sbtvd_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
-{
-	struct saa7134_dev *dev = fe->dvb->priv;
-
-	return __kworld_sbtvd_i2c_gate_ctrl(dev, enable);
-}
-
-/* ------------------------------------------------------------------ */
-
 static struct tda1004x_config tda827x_lifeview_config = {
 	.demod_address = 0x08,
 	.invert        = 1,
@@ -1660,27 +1656,23 @@
 		}
 		break;
 	case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
-		__kworld_sbtvd_i2c_gate_ctrl(dev, 0);
-		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000);
-		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000);
-		msleep(20);
-		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
-		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
-		msleep(20);
+		/* Switch to digital mode */
+		saa7134_tuner_callback(dev, 0,
+				       TDA18271_CALLBACK_CMD_AGC_ENABLE, 1);
 		fe0->dvb.frontend = dvb_attach(mb86a20s_attach,
 					       &kworld_mb86a20s_config,
 					       &dev->i2c_adap);
-		__kworld_sbtvd_i2c_gate_ctrl(dev, 1);
 		if (fe0->dvb.frontend != NULL) {
+			dvb_attach(tda829x_attach, fe0->dvb.frontend,
+				   &dev->i2c_adap, 0x4b,
+				   &tda829x_no_probe);
 			dvb_attach(tda18271_attach, fe0->dvb.frontend,
 				   0x60, &dev->i2c_adap,
 				   &kworld_tda18271_config);
-			/*
-			 * Only after success, it can initialize the gate, otherwise
-			 * an OOPS will hit, due to kfree(fe0->dvb.frontend)
-			 */
-			fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_i2c_gate_ctrl;
+			fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_gate_ctrl;
 		}
+
+		/* mb86a20s need to use the I2C gateway */
 		break;
 	default:
 		wprintk("Huh? unknown DVB card?\n");
diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c
index d6bf3f8..58af67f 100644
--- a/drivers/media/video/saa7164/saa7164-core.c
+++ b/drivers/media/video/saa7164/saa7164-core.c
@@ -655,8 +655,8 @@
 		goto out;
 	}
 
-	/* Check that the hardware is accessable. If the status bytes are
-	 * 0xFF then the device is not accessable, the the IRQ belongs
+	/* Check that the hardware is accessible. If the status bytes are
+	 * 0xFF then the device is not accessible, the the IRQ belongs
 	 * to another driver.
 	 * 4 x u32 interrupt registers.
 	 */
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index 41064c7..b3d2cc7 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -47,8 +47,8 @@
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
-#endif
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
+#endif
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
 #if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
@@ -56,78 +56,68 @@
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
 #endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), },
+	{ SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), }, /* not in sonixb */
 #if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), },
 #endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), },
+	{ SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), }, /* not in sonixb */
 	/* SN9C103 */
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), }, non existent ? */
+	{ SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), }, /* not in sonixb */
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x6083, BRIDGE_SN9C103), }, HY7131D/E */
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), }, non existent ? */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), },
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), },
-#endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), }, non existent ? */
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x60a8, BRIDGE_SN9C103), }, PAS106 */
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x60aa, BRIDGE_SN9C103), }, TAS5130 */
-/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5130 */
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5110, non existent */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), }, non existent ? */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), },
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), }, non existent ? */
 #endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), },
 	/* SN9C105 */
 #if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
 	{ SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), },
-#endif
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), }, PO1030 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), }, OM6801 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), }, HV7131GP */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), }, MO4000 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), }, ICM105C */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), }, OV7648 */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60fb, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60fc, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), },
 	/* SN9C120 */
 	{ SN9C102_USB_DEVICE(0x0458, 0x7025, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), },
-#endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), }, po2030 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), }, om6801 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), }, S5K53BEB */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
-#endif
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
-#endif
 	{ SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), },
 #endif
diff --git a/drivers/media/video/sn9c102/sn9c102_sensor.h b/drivers/media/video/sn9c102/sn9c102_sensor.h
index 494957b..7f38549 100644
--- a/drivers/media/video/sn9c102/sn9c102_sensor.h
+++ b/drivers/media/video/sn9c102/sn9c102_sensor.h
@@ -147,7 +147,7 @@
 
 struct sn9c102_sensor {
 	char name[32], /* sensor name */
-	     maintainer[64]; /* name of the mantainer <email> */
+	     maintainer[64]; /* name of the maintainer <email> */
 
 	enum sn9c102_bridge supported_bridge; /* supported SN9C1xx bridges */
 
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
index 864696b..c901721 100644
--- a/drivers/media/video/sr030pc30.c
+++ b/drivers/media/video/sr030pc30.c
@@ -714,15 +714,6 @@
 	return ret;
 }
 
-static int sr030pc30_s_config(struct v4l2_subdev *sd,
-			      int irq, void *platform_data)
-{
-	struct sr030pc30_info *info = to_sr030pc30(sd);
-
-	info->pdata = platform_data;
-	return 0;
-}
-
 static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable)
 {
 	return 0;
@@ -763,7 +754,6 @@
 }
 
 static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
-	.s_config	= sr030pc30_s_config,
 	.s_power	= sr030pc30_s_power,
 	.queryctrl	= sr030pc30_queryctrl,
 	.s_ctrl		= sr030pc30_s_ctrl,
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
deleted file mode 100644
index 35b6ff5..0000000
--- a/drivers/media/video/tda9875.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * For the TDA9875 chip
- * (The TDA9875 is used on the Diamond DTV2000 french version
- * Other cards probably use these chips as well.)
- * This driver will not complain if used with any
- * other i2c device with the same address.
- *
- * Copyright (c) 2000 Guillaume Delvit based on Gerd Knorr source and
- * Eric Sandeen
- * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
- * This code is placed under the terms of the GNU General Public License
- * Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu)
- * Which was based on tda8425.c by Greg Alexander (c) 1998
- *
- * OPTIONS:
- * debug   - set to 1 if you'd like to see debug messages
- *
- *  Revision: 0.1 - original version
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-device.h>
-#include <media/i2c-addr.h>
-
-static int debug; /* insmod parameter */
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_LICENSE("GPL");
-
-
-/* This is a superset of the TDA9875 */
-struct tda9875 {
-	struct v4l2_subdev sd;
-	int rvol, lvol;
-	int bass, treble;
-};
-
-static inline struct tda9875 *to_state(struct v4l2_subdev *sd)
-{
-	return container_of(sd, struct tda9875, sd);
-}
-
-#define dprintk  if (debug) printk
-
-/* The TDA9875 is made by Philips Semiconductor
- * http://www.semiconductors.philips.com
- * TDA9875: I2C-bus controlled DSP audio processor, FM demodulator
- *
- */
-
-		/* subaddresses for TDA9875 */
-#define TDA9875_MUT         0x12  /*General mute  (value --> 0b11001100*/
-#define TDA9875_CFG         0x01  /* Config register (value --> 0b00000000 */
-#define TDA9875_DACOS       0x13  /*DAC i/o select (ADC) 0b0000100*/
-#define TDA9875_LOSR        0x16  /*Line output select regirter 0b0100 0001*/
-
-#define TDA9875_CH1V        0x0c  /*Channel 1 volume (mute)*/
-#define TDA9875_CH2V        0x0d  /*Channel 2 volume (mute)*/
-#define TDA9875_SC1         0x14  /*SCART 1 in (mono)*/
-#define TDA9875_SC2         0x15  /*SCART 2 in (mono)*/
-
-#define TDA9875_ADCIS       0x17  /*ADC input select (mono) 0b0110 000*/
-#define TDA9875_AER         0x19  /*Audio effect (AVL+Pseudo) 0b0000 0110*/
-#define TDA9875_MCS         0x18  /*Main channel select (DAC) 0b0000100*/
-#define TDA9875_MVL         0x1a  /* Main volume gauche */
-#define TDA9875_MVR         0x1b  /* Main volume droite */
-#define TDA9875_MBA         0x1d  /* Main Basse */
-#define TDA9875_MTR         0x1e  /* Main treble */
-#define TDA9875_ACS         0x1f  /* Auxilary channel select (FM) 0b0000000*/
-#define TDA9875_AVL         0x20  /* Auxilary volume gauche */
-#define TDA9875_AVR         0x21  /* Auxilary volume droite */
-#define TDA9875_ABA         0x22  /* Auxilary Basse */
-#define TDA9875_ATR         0x23  /* Auxilary treble */
-
-#define TDA9875_MSR         0x02  /* Monitor select register */
-#define TDA9875_C1MSB       0x03  /* Carrier 1 (FM) frequency register MSB */
-#define TDA9875_C1MIB       0x04  /* Carrier 1 (FM) frequency register (16-8]b */
-#define TDA9875_C1LSB       0x05  /* Carrier 1 (FM) frequency register LSB */
-#define TDA9875_C2MSB       0x06  /* Carrier 2 (nicam) frequency register MSB */
-#define TDA9875_C2MIB       0x07  /* Carrier 2 (nicam) frequency register (16-8]b */
-#define TDA9875_C2LSB       0x08  /* Carrier 2 (nicam) frequency register LSB */
-#define TDA9875_DCR         0x09  /* Demodulateur configuration regirter*/
-#define TDA9875_DEEM        0x0a  /* FM de-emphasis regirter*/
-#define TDA9875_FMAT        0x0b  /* FM Matrix regirter*/
-
-/* values */
-#define TDA9875_MUTE_ON	    0xff /* general mute */
-#define TDA9875_MUTE_OFF    0xcc /* general no mute */
-
-
-
-/* Begin code */
-
-static int tda9875_write(struct v4l2_subdev *sd, int subaddr, unsigned char val)
-{
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	unsigned char buffer[2];
-
-	v4l2_dbg(1, debug, sd, "Writing %d 0x%x\n", subaddr, val);
-	buffer[0] = subaddr;
-	buffer[1] = val;
-	if (2 != i2c_master_send(client, buffer, 2)) {
-		v4l2_warn(sd, "I/O error, trying (write %d 0x%x)\n",
-		       subaddr, val);
-		return -1;
-	}
-	return 0;
-}
-
-
-static int i2c_read_register(struct i2c_client *client, int addr, int reg)
-{
-	unsigned char write[1];
-	unsigned char read[1];
-	struct i2c_msg msgs[2] = {
-		{ addr, 0,        1, write },
-		{ addr, I2C_M_RD, 1, read  }
-	};
-
-	write[0] = reg;
-
-	if (2 != i2c_transfer(client->adapter, msgs, 2)) {
-		v4l_warn(client, "I/O error (read2)\n");
-		return -1;
-	}
-	v4l_dbg(1, debug, client, "chip_read2: reg%d=0x%x\n", reg, read[0]);
-	return read[0];
-}
-
-static void tda9875_set(struct v4l2_subdev *sd)
-{
-	struct tda9875 *tda = to_state(sd);
-	unsigned char a;
-
-	v4l2_dbg(1, debug, sd, "tda9875_set(%04x,%04x,%04x,%04x)\n",
-		tda->lvol, tda->rvol, tda->bass, tda->treble);
-
-	a = tda->lvol & 0xff;
-	tda9875_write(sd, TDA9875_MVL, a);
-	a =tda->rvol & 0xff;
-	tda9875_write(sd, TDA9875_MVR, a);
-	a =tda->bass & 0xff;
-	tda9875_write(sd, TDA9875_MBA, a);
-	a =tda->treble  & 0xff;
-	tda9875_write(sd, TDA9875_MTR, a);
-}
-
-static void do_tda9875_init(struct v4l2_subdev *sd)
-{
-	struct tda9875 *t = to_state(sd);
-
-	v4l2_dbg(1, debug, sd, "In tda9875_init\n");
-	tda9875_write(sd, TDA9875_CFG, 0xd0); /*reg de config 0 (reset)*/
-	tda9875_write(sd, TDA9875_MSR, 0x03);    /* Monitor 0b00000XXX*/
-	tda9875_write(sd, TDA9875_C1MSB, 0x00);  /*Car1(FM) MSB XMHz*/
-	tda9875_write(sd, TDA9875_C1MIB, 0x00);  /*Car1(FM) MIB XMHz*/
-	tda9875_write(sd, TDA9875_C1LSB, 0x00);  /*Car1(FM) LSB XMHz*/
-	tda9875_write(sd, TDA9875_C2MSB, 0x00);  /*Car2(NICAM) MSB XMHz*/
-	tda9875_write(sd, TDA9875_C2MIB, 0x00);  /*Car2(NICAM) MIB XMHz*/
-	tda9875_write(sd, TDA9875_C2LSB, 0x00);  /*Car2(NICAM) LSB XMHz*/
-	tda9875_write(sd, TDA9875_DCR, 0x00);    /*Demod config 0x00*/
-	tda9875_write(sd, TDA9875_DEEM, 0x44);   /*DE-Emph 0b0100 0100*/
-	tda9875_write(sd, TDA9875_FMAT, 0x00);   /*FM Matrix reg 0x00*/
-	tda9875_write(sd, TDA9875_SC1, 0x00);    /* SCART 1 (SC1)*/
-	tda9875_write(sd, TDA9875_SC2, 0x01);    /* SCART 2 (sc2)*/
-
-	tda9875_write(sd, TDA9875_CH1V, 0x10);  /* Channel volume 1 mute*/
-	tda9875_write(sd, TDA9875_CH2V, 0x10);  /* Channel volume 2 mute */
-	tda9875_write(sd, TDA9875_DACOS, 0x02); /* sig DAC i/o(in:nicam)*/
-	tda9875_write(sd, TDA9875_ADCIS, 0x6f); /* sig ADC input(in:mono)*/
-	tda9875_write(sd, TDA9875_LOSR, 0x00);  /* line out (in:mono)*/
-	tda9875_write(sd, TDA9875_AER, 0x00);   /*06 Effect (AVL+PSEUDO) */
-	tda9875_write(sd, TDA9875_MCS, 0x44);   /* Main ch select (DAC) */
-	tda9875_write(sd, TDA9875_MVL, 0x03);   /* Vol Main left 10dB */
-	tda9875_write(sd, TDA9875_MVR, 0x03);   /* Vol Main right 10dB*/
-	tda9875_write(sd, TDA9875_MBA, 0x00);   /* Main Bass Main 0dB*/
-	tda9875_write(sd, TDA9875_MTR, 0x00);   /* Main Treble Main 0dB*/
-	tda9875_write(sd, TDA9875_ACS, 0x44);   /* Aux chan select (dac)*/
-	tda9875_write(sd, TDA9875_AVL, 0x00);   /* Vol Aux left 0dB*/
-	tda9875_write(sd, TDA9875_AVR, 0x00);   /* Vol Aux right 0dB*/
-	tda9875_write(sd, TDA9875_ABA, 0x00);   /* Aux Bass Main 0dB*/
-	tda9875_write(sd, TDA9875_ATR, 0x00);   /* Aux Aigus Main 0dB*/
-
-	tda9875_write(sd, TDA9875_MUT, 0xcc);   /* General mute  */
-
-	t->lvol = t->rvol = 0;  	/* 0dB */
-	t->bass = 0; 			/* 0dB */
-	t->treble = 0;  		/* 0dB */
-	tda9875_set(sd);
-}
-
-
-static int tda9875_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
-	struct tda9875 *t = to_state(sd);
-
-	switch (ctrl->id) {
-	case V4L2_CID_AUDIO_VOLUME:
-	{
-		int left = (t->lvol+84)*606;
-		int right = (t->rvol+84)*606;
-
-		ctrl->value=max(left,right);
-		return 0;
-	}
-	case V4L2_CID_AUDIO_BALANCE:
-	{
-		int left = (t->lvol+84)*606;
-		int right = (t->rvol+84)*606;
-		int volume = max(left,right);
-		int balance = (32768*min(left,right))/
-			      (volume ? volume : 1);
-		ctrl->value=(left<right)?
-			(65535-balance) : balance;
-		return 0;
-	}
-	case V4L2_CID_AUDIO_BASS:
-		ctrl->value = (t->bass+12)*2427;    /* min -12 max +15 */
-		return 0;
-	case V4L2_CID_AUDIO_TREBLE:
-		ctrl->value = (t->treble+12)*2730;/* min -12 max +12 */
-		return 0;
-	}
-	return -EINVAL;
-}
-
-static int tda9875_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
-	struct tda9875 *t = to_state(sd);
-	int chvol = 0, volume = 0, balance = 0, left, right;
-
-	switch (ctrl->id) {
-	case V4L2_CID_AUDIO_VOLUME:
-		left = (t->lvol+84)*606;
-		right = (t->rvol+84)*606;
-
-		volume = max(left,right);
-		balance = (32768*min(left,right))/
-			      (volume ? volume : 1);
-		balance =(left<right)?
-			(65535-balance) : balance;
-
-		volume = ctrl->value;
-
-		chvol=1;
-		break;
-	case V4L2_CID_AUDIO_BALANCE:
-		left = (t->lvol+84)*606;
-		right = (t->rvol+84)*606;
-
-		volume=max(left,right);
-
-		balance = ctrl->value;
-
-		chvol=1;
-		break;
-	case V4L2_CID_AUDIO_BASS:
-		t->bass = ((ctrl->value/2400)-12) & 0xff;
-		if (t->bass > 15)
-			t->bass = 15;
-		if (t->bass < -12)
-			t->bass = -12 & 0xff;
-		break;
-	case V4L2_CID_AUDIO_TREBLE:
-		t->treble = ((ctrl->value/2700)-12) & 0xff;
-		if (t->treble > 12)
-			t->treble = 12;
-		if (t->treble < -12)
-			t->treble = -12 & 0xff;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (chvol) {
-		left = (min(65536 - balance,32768) *
-			volume) / 32768;
-		right = (min(balance,32768) *
-				volume) / 32768;
-		t->lvol = ((left/606)-84) & 0xff;
-		if (t->lvol > 24)
-			t->lvol = 24;
-		if (t->lvol < -84)
-			t->lvol = -84 & 0xff;
-
-		t->rvol = ((right/606)-84) & 0xff;
-		if (t->rvol > 24)
-			t->rvol = 24;
-		if (t->rvol < -84)
-			t->rvol = -84 & 0xff;
-	}
-
-	tda9875_set(sd);
-	return 0;
-}
-
-static int tda9875_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
-	switch (qc->id) {
-	case V4L2_CID_AUDIO_VOLUME:
-		return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880);
-	case V4L2_CID_AUDIO_BASS:
-	case V4L2_CID_AUDIO_TREBLE:
-		return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
-	}
-	return -EINVAL;
-}
-
-/* ----------------------------------------------------------------------- */
-
-static const struct v4l2_subdev_core_ops tda9875_core_ops = {
-	.queryctrl = tda9875_queryctrl,
-	.g_ctrl = tda9875_g_ctrl,
-	.s_ctrl = tda9875_s_ctrl,
-};
-
-static const struct v4l2_subdev_ops tda9875_ops = {
-	.core = &tda9875_core_ops,
-};
-
-/* ----------------------------------------------------------------------- */
-
-
-/* *********************** *
- * i2c interface functions *
- * *********************** */
-
-static int tda9875_checkit(struct i2c_client *client, int addr)
-{
-	int dic, rev;
-
-	dic = i2c_read_register(client, addr, 254);
-	rev = i2c_read_register(client, addr, 255);
-
-	if (dic == 0 || dic == 2) { /* tda9875 and tda9875A */
-		v4l_info(client, "tda9875%s rev. %d detected at 0x%02x\n",
-			dic == 0 ? "" : "A", rev, addr << 1);
-		return 1;
-	}
-	v4l_info(client, "no such chip at 0x%02x (dic=0x%x rev=0x%x)\n",
-			addr << 1, dic, rev);
-	return 0;
-}
-
-static int tda9875_probe(struct i2c_client *client,
-			const struct i2c_device_id *id)
-{
-	struct tda9875 *t;
-	struct v4l2_subdev *sd;
-
-	v4l_info(client, "chip found @ 0x%02x (%s)\n",
-			client->addr << 1, client->adapter->name);
-
-	if (!tda9875_checkit(client, client->addr))
-		return -ENODEV;
-
-	t = kzalloc(sizeof(*t), GFP_KERNEL);
-	if (!t)
-		return -ENOMEM;
-	sd = &t->sd;
-	v4l2_i2c_subdev_init(sd, client, &tda9875_ops);
-
-	do_tda9875_init(sd);
-	return 0;
-}
-
-static int tda9875_remove(struct i2c_client *client)
-{
-	struct v4l2_subdev *sd = i2c_get_clientdata(client);
-
-	do_tda9875_init(sd);
-	v4l2_device_unregister_subdev(sd);
-	kfree(to_state(sd));
-	return 0;
-}
-
-static const struct i2c_device_id tda9875_id[] = {
-	{ "tda9875", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, tda9875_id);
-
-static struct i2c_driver tda9875_driver = {
-	.driver = {
-		.owner	= THIS_MODULE,
-		.name	= "tda9875",
-	},
-	.probe		= tda9875_probe,
-	.remove		= tda9875_remove,
-	.id_table	= tda9875_id,
-};
-
-static __init int init_tda9875(void)
-{
-	return i2c_add_driver(&tda9875_driver);
-}
-
-static __exit void exit_tda9875(void)
-{
-	i2c_del_driver(&tda9875_driver);
-}
-
-module_init(init_tda9875);
-module_exit(exit_tda9875);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index a1ffe18..df33a1d 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -512,19 +512,20 @@
 			int buf_size, gfp_t gfp_flags,
 			usb_complete_t complete_fn, void *context)
 {
-	struct urb *urb;
-	void *mem;
-	int i;
+	int i = 0;
 
-	for (i = 0; i < num; i++) {
-		urb = usb_alloc_urb(0, gfp_flags);
+	for (; i < num; i++) {
+		void *mem;
+		struct urb *urb = usb_alloc_urb(0, gfp_flags);
 		if (urb == NULL)
 			return i;
 
 		mem = usb_alloc_coherent(udev, buf_size, gfp_flags,
 					 &urb->transfer_dma);
-		if (mem == NULL)
+		if (mem == NULL) {
+			usb_free_urb(urb);
 			return i;
+		}
 
 		usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, ep_addr),
 				mem, buf_size, complete_fn, context);
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index e63b40f..c799e4e 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -789,7 +789,7 @@
  * Get the value of a TVP7002 decoder device register.
  * Returns zero when successful, -EINVAL if register read fails or
  * access to I2C client fails, -EPERM if the call is not allowed
- * by diabled CAP_SYS_ADMIN.
+ * by disabled CAP_SYS_ADMIN.
  */
 static int tvp7002_g_register(struct v4l2_subdev *sd,
 						struct v4l2_dbg_register *reg)
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 3f0871b..810eef4 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -407,18 +407,6 @@
 	/* Decrease the module use count to match the first try_module_get. */
 	module_put(client->driver->driver.owner);
 
-	if (sd) {
-		/* We return errors from v4l2_subdev_call only if we have the
-		   callback as the .s_config is not mandatory */
-		int err = v4l2_subdev_call(sd, core, s_config,
-				info->irq, info->platform_data);
-
-		if (err && err != -ENOIOCTLCMD) {
-			v4l2_device_unregister_subdev(sd);
-			sd = NULL;
-		}
-	}
-
 error:
 	/* If we have a client but no subdev, then something went wrong and
 	   we must unregister the client. */
@@ -428,9 +416,8 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
 
-struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
 		struct i2c_adapter *adapter, const char *client_type,
-		int irq, void *platform_data,
 		u8 addr, const unsigned short *probe_addrs)
 {
 	struct i2c_board_info info;
@@ -440,12 +427,10 @@
 	memset(&info, 0, sizeof(info));
 	strlcpy(info.type, client_type, sizeof(info.type));
 	info.addr = addr;
-	info.irq = irq;
-	info.platform_data = platform_data;
 
 	return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
 }
-EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
 
 /* Return i2c client address of v4l2_subdev. */
 unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 8f81efc..ef66d2a 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -569,7 +569,7 @@
 	int ret;
 	u32 size;
 
-	ctrl->has_new = 1;
+	ctrl->is_new = 1;
 	switch (ctrl->type) {
 	case V4L2_CTRL_TYPE_INTEGER64:
 		ctrl->val64 = c->value64;
@@ -1280,8 +1280,12 @@
 		if (ctrl->done)
 			continue;
 
-		for (i = 0; i < master->ncontrols; i++)
-			cur_to_new(master->cluster[i]);
+		for (i = 0; i < master->ncontrols; i++) {
+			if (master->cluster[i]) {
+				cur_to_new(master->cluster[i]);
+				master->cluster[i]->is_new = 1;
+			}
+		}
 
 		/* Skip button controls and read-only controls. */
 		if (ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
@@ -1340,12 +1344,15 @@
 
 	ctrl = ref->ctrl;
 	memset(qc, 0, sizeof(*qc));
-	qc->id = ctrl->id;
+	if (id >= V4L2_CID_PRIVATE_BASE)
+		qc->id = id;
+	else
+		qc->id = ctrl->id;
 	strlcpy(qc->name, ctrl->name, sizeof(qc->name));
 	qc->minimum = ctrl->minimum;
 	qc->maximum = ctrl->maximum;
 	qc->default_value = ctrl->default_value;
-	if (qc->type == V4L2_CTRL_TYPE_MENU)
+	if (ctrl->type == V4L2_CTRL_TYPE_MENU)
 		qc->step = 1;
 	else
 		qc->step = ctrl->step;
@@ -1645,7 +1652,7 @@
 		if (ctrl == NULL)
 			continue;
 
-		if (ctrl->has_new) {
+		if (ctrl->is_new) {
 			/* Double check this: it may have changed since the
 			   last check in try_or_set_ext_ctrls(). */
 			if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
@@ -1719,13 +1726,13 @@
 
 		v4l2_ctrl_lock(ctrl);
 
-		/* Reset the 'has_new' flags of the cluster */
+		/* Reset the 'is_new' flags of the cluster */
 		for (j = 0; j < master->ncontrols; j++)
 			if (master->cluster[j])
-				master->cluster[j]->has_new = 0;
+				master->cluster[j]->is_new = 0;
 
 		/* Copy the new caller-supplied control values.
-		   user_to_new() sets 'has_new' to 1. */
+		   user_to_new() sets 'is_new' to 1. */
 		ret = cluster_walk(i, cs, helpers, user_to_new);
 
 		if (!ret)
@@ -1820,15 +1827,18 @@
 	int ret;
 	int i;
 
+	if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+		return -EACCES;
+
 	v4l2_ctrl_lock(ctrl);
 
-	/* Reset the 'has_new' flags of the cluster */
+	/* Reset the 'is_new' flags of the cluster */
 	for (i = 0; i < master->ncontrols; i++)
 		if (master->cluster[i])
-			master->cluster[i]->has_new = 0;
+			master->cluster[i]->is_new = 0;
 
 	ctrl->val = *val;
-	ctrl->has_new = 1;
+	ctrl->is_new = 1;
 	ret = try_or_set_control_cluster(master, false);
 	if (!ret)
 		ret = try_or_set_control_cluster(master, true);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 359e232..341764a 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -419,6 +419,10 @@
  *	The registration code assigns minor numbers and device node numbers
  *	based on the requested type and registers the new device node with
  *	the kernel.
+ *
+ *	This function assumes that struct video_device was zeroed when it
+ *	was allocated and does not contain any stale date.
+ *
  *	An error is returned if no free minor or device node number could be
  *	found, or if the registration of the device node failed.
  *
@@ -440,7 +444,6 @@
 	int minor_offset = 0;
 	int minor_cnt = VIDEO_NUM_DEVICES;
 	const char *name_base;
-	void *priv = vdev->dev.p;
 
 	/* A minor value of -1 marks this video device as never
 	   having been registered */
@@ -559,10 +562,6 @@
 	}
 
 	/* Part 4: register the device with sysfs */
-	memset(&vdev->dev, 0, sizeof(vdev->dev));
-	/* The memset above cleared the device's device_private, so
-	   put back the copy we made earlier. */
-	vdev->dev.p = priv;
 	vdev->dev.class = &video_class;
 	vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
 	if (vdev->parent)
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 7fe6f92..ce64fe1 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -100,6 +100,7 @@
 			   is a platform bus, then it is never deleted. */
 			if (client)
 				i2c_unregister_device(client);
+			continue;
 		}
 #endif
 #if defined(CONFIG_SPI)
@@ -108,6 +109,7 @@
 
 			if (spi)
 				spi_unregister_device(spi);
+			continue;
 		}
 #endif
 	}
@@ -126,11 +128,19 @@
 	WARN_ON(sd->v4l2_dev != NULL);
 	if (!try_module_get(sd->owner))
 		return -ENODEV;
+	sd->v4l2_dev = v4l2_dev;
+	if (sd->internal_ops && sd->internal_ops->registered) {
+		err = sd->internal_ops->registered(sd);
+		if (err)
+			return err;
+	}
 	/* This just returns 0 if either of the two args is NULL */
 	err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler);
-	if (err)
+	if (err) {
+		if (sd->internal_ops && sd->internal_ops->unregistered)
+			sd->internal_ops->unregistered(sd);
 		return err;
-	sd->v4l2_dev = v4l2_dev;
+	}
 	spin_lock(&v4l2_dev->lock);
 	list_add_tail(&sd->list, &v4l2_dev->subdevs);
 	spin_unlock(&v4l2_dev->lock);
@@ -146,6 +156,8 @@
 	spin_lock(&sd->v4l2_dev->lock);
 	list_del(&sd->list);
 	spin_unlock(&sd->v4l2_dev->lock);
+	if (sd->internal_ops && sd->internal_ops->unregistered)
+		sd->internal_ops->unregistered(sd);
 	sd->v4l2_dev = NULL;
 	module_put(sd->owner);
 }
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 7e47f15..f51327e 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1659,20 +1659,24 @@
 	{
 		struct v4l2_dbg_register *p = arg;
 
-		if (!capable(CAP_SYS_ADMIN))
-			ret = -EPERM;
-		else if (ops->vidioc_g_register)
-			ret = ops->vidioc_g_register(file, fh, p);
+		if (ops->vidioc_g_register) {
+			if (!capable(CAP_SYS_ADMIN))
+				ret = -EPERM;
+			else
+				ret = ops->vidioc_g_register(file, fh, p);
+		}
 		break;
 	}
 	case VIDIOC_DBG_S_REGISTER:
 	{
 		struct v4l2_dbg_register *p = arg;
 
-		if (!capable(CAP_SYS_ADMIN))
-			ret = -EPERM;
-		else if (ops->vidioc_s_register)
-			ret = ops->vidioc_s_register(file, fh, p);
+		if (ops->vidioc_s_register) {
+			if (!capable(CAP_SYS_ADMIN))
+				ret = -EPERM;
+			else
+				ret = ops->vidioc_s_register(file, fh, p);
+		}
 		break;
 	}
 #endif
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index e25aca5..2f973cd 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -13,14 +13,12 @@
 #include <linux/pci.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
-#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/videodev2.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-chip-ident.h>
 #include <media/videobuf-dma-sg.h>
-#include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/pm_qos_params.h>
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 019ee20..fa35639 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -937,6 +937,7 @@
 		parport_unregister_device(cam->pdev);
 		w9966_set_state(cam, W9966_STATE_PDEV, 0);
 	}
+	memset(cam, 0, sizeof(*cam));
 }
 
 
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 9cdc3bb..9f2bac5 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -1041,7 +1041,7 @@
 	/* allocate memory *before* doing anything to the hardware
 	 * in case allocation fails */
 	zr->stat_com = kzalloc(BUZ_NUM_STAT_COM * 4, GFP_KERNEL);
-	zr->video_dev = kmalloc(sizeof(struct video_device), GFP_KERNEL);
+	zr->video_dev = video_device_alloc();
 	if (!zr->stat_com || !zr->video_dev) {
 		dprintk(1,
 			KERN_ERR
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index c00fe82..8c1d85e 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -465,6 +465,7 @@
 		if (!host->card) {
 			host->card = card;
 			if (device_register(&card->dev)) {
+				put_device(&card->dev);
 				kfree(host->card);
 				host->card = NULL;
 			}
@@ -510,14 +511,18 @@
 {
 	int rc;
 
-	if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
-		return -ENOMEM;
+	while (1) {
+		if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
+			return -ENOMEM;
 
-	spin_lock(&memstick_host_lock);
-	rc = idr_get_new(&memstick_host_idr, host, &host->id);
-	spin_unlock(&memstick_host_lock);
-	if (rc)
-		return rc;
+		spin_lock(&memstick_host_lock);
+		rc = idr_get_new(&memstick_host_idr, host, &host->id);
+		spin_unlock(&memstick_host_lock);
+		if (!rc)
+			break;
+		else if (rc != -EAGAIN)
+			return rc;
+	}
 
 	dev_set_name(&host->dev, "memstick%u", host->id);
 
@@ -616,7 +621,7 @@
 {
 	int rc;
 
-	workqueue = create_freezeable_workqueue("kmemstick");
+	workqueue = create_freezable_workqueue("kmemstick");
 	if (!workqueue)
 		return -ENOMEM;
 
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 02362ec..57b42bf 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -23,7 +23,6 @@
 
 #define DRIVER_NAME "mspro_block"
 
-static DEFINE_MUTEX(mspro_block_mutex);
 static int major;
 module_param(major, int, 0644);
 
@@ -160,6 +159,13 @@
 	int                   (*mrq_handler)(struct memstick_dev *card,
 					     struct memstick_request **mrq);
 
+
+	/* Default request setup function for data access method preferred by
+	 * this host instance.
+	 */
+	void                  (*setup_transfer)(struct memstick_dev *card,
+						u64 offset, size_t length);
+
 	struct attribute_group attr_group;
 
 	struct scatterlist    req_sg[MSPRO_BLOCK_MAX_SEGS];
@@ -181,7 +187,6 @@
 	struct mspro_block_data *msb = disk->private_data;
 	int rc = -ENXIO;
 
-	mutex_lock(&mspro_block_mutex);
 	mutex_lock(&mspro_block_disk_lock);
 
 	if (msb && msb->card) {
@@ -193,7 +198,6 @@
 	}
 
 	mutex_unlock(&mspro_block_disk_lock);
-	mutex_unlock(&mspro_block_mutex);
 
 	return rc;
 }
@@ -225,11 +229,7 @@
 
 static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
 {
-	int ret;
-	mutex_lock(&mspro_block_mutex);
-	ret = mspro_block_disk_release(disk);
-	mutex_unlock(&mspro_block_mutex);
-	return ret;
+	return mspro_block_disk_release(disk);
 }
 
 static int mspro_block_bd_getgeo(struct block_device *bdev,
@@ -663,14 +663,43 @@
 	}
 }
 
+/*** Transfer setup functions for different access methods. ***/
+
+/** Setup data transfer request for SET_CMD TPC with arguments in card
+ *  registers.
+ *
+ *  @card    Current media instance
+ *  @offset  Target data offset in bytes
+ *  @length  Required transfer length in bytes.
+ */
+static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset,
+				    size_t length)
+{
+	struct mspro_block_data *msb = memstick_get_drvdata(card);
+	struct mspro_param_register param = {
+		.system = msb->system,
+		.data_count = cpu_to_be16((uint16_t)(length / msb->page_size)),
+		/* ISO C90 warning precludes direct initialization for now. */
+		.data_address = 0,
+		.tpc_param = 0
+	};
+
+	do_div(offset, msb->page_size);
+	param.data_address = cpu_to_be32((uint32_t)offset);
+
+	card->next_request = h_mspro_block_req_init;
+	msb->mrq_handler = h_mspro_block_transfer_data;
+	memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
+			  &param, sizeof(param));
+}
+
 /*** Data transfer ***/
 
 static int mspro_block_issue_req(struct memstick_dev *card, int chunk)
 {
 	struct mspro_block_data *msb = memstick_get_drvdata(card);
-	sector_t t_sec;
+	u64 t_off;
 	unsigned int count;
-	struct mspro_param_register param;
 
 try_again:
 	while (chunk) {
@@ -685,30 +714,17 @@
 			continue;
 		}
 
-		t_sec = blk_rq_pos(msb->block_req) << 9;
-		sector_div(t_sec, msb->page_size);
-
+		t_off = blk_rq_pos(msb->block_req);
+		t_off <<= 9;
 		count = blk_rq_bytes(msb->block_req);
-		count /= msb->page_size;
 
-		param.system = msb->system;
-		param.data_count = cpu_to_be16(count);
-		param.data_address = cpu_to_be32((uint32_t)t_sec);
-		param.tpc_param = 0;
+		msb->setup_transfer(card, t_off, count);
 
 		msb->data_dir = rq_data_dir(msb->block_req);
 		msb->transfer_cmd = msb->data_dir == READ
 				    ? MSPRO_CMD_READ_DATA
 				    : MSPRO_CMD_WRITE_DATA;
 
-		dev_dbg(&card->dev, "data transfer: cmd %x, "
-			"lba %x, count %x\n", msb->transfer_cmd,
-			be32_to_cpu(param.data_address), count);
-
-		card->next_request = h_mspro_block_req_init;
-		msb->mrq_handler = h_mspro_block_transfer_data;
-		memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
-				  &param, sizeof(param));
 		memstick_new_req(card->host);
 		return 0;
 	}
@@ -963,18 +979,16 @@
 static int mspro_block_read_attributes(struct memstick_dev *card)
 {
 	struct mspro_block_data *msb = memstick_get_drvdata(card);
-	struct mspro_param_register param = {
-		.system = msb->system,
-		.data_count = cpu_to_be16(1),
-		.data_address = 0,
-		.tpc_param = 0
-	};
 	struct mspro_attribute *attr = NULL;
 	struct mspro_sys_attr *s_attr = NULL;
 	unsigned char *buffer = NULL;
 	int cnt, rc, attr_count;
-	unsigned int addr;
-	unsigned short page_count;
+	/* While normally physical device offsets, represented here by
+	 * attr_offset and attr_len will be of large numeric types, we can be
+	 * sure, that attributes are close enough to the beginning of the
+	 * device, to save ourselves some trouble.
+	 */
+	unsigned int addr, attr_offset = 0, attr_len = msb->page_size;
 
 	attr = kmalloc(msb->page_size, GFP_KERNEL);
 	if (!attr)
@@ -987,10 +1001,8 @@
 	msb->data_dir = READ;
 	msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
 
-	card->next_request = h_mspro_block_req_init;
-	msb->mrq_handler = h_mspro_block_transfer_data;
-	memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param,
-			  sizeof(param));
+	msb->setup_transfer(card, attr_offset, attr_len);
+
 	memstick_new_req(card->host);
 	wait_for_completion(&card->mrq_complete);
 	if (card->current_mrq.error) {
@@ -1021,13 +1033,12 @@
 	}
 	msb->attr_group.name = "media_attributes";
 
-	buffer = kmalloc(msb->page_size, GFP_KERNEL);
+	buffer = kmalloc(attr_len, GFP_KERNEL);
 	if (!buffer) {
 		rc = -ENOMEM;
 		goto out_free_attr;
 	}
-	memcpy(buffer, (char *)attr, msb->page_size);
-	page_count = 1;
+	memcpy(buffer, (char *)attr, attr_len);
 
 	for (cnt = 0; cnt < attr_count; ++cnt) {
 		s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL);
@@ -1038,9 +1049,10 @@
 
 		msb->attr_group.attrs[cnt] = &s_attr->dev_attr.attr;
 		addr = be32_to_cpu(attr->entries[cnt].address);
-		rc = be32_to_cpu(attr->entries[cnt].size);
+		s_attr->size = be32_to_cpu(attr->entries[cnt].size);
 		dev_dbg(&card->dev, "adding attribute %d: id %x, address %x, "
-			"size %x\n", cnt, attr->entries[cnt].id, addr, rc);
+			"size %zx\n", cnt, attr->entries[cnt].id, addr,
+			s_attr->size);
 		s_attr->id = attr->entries[cnt].id;
 		if (mspro_block_attr_name(s_attr->id))
 			snprintf(s_attr->name, sizeof(s_attr->name), "%s",
@@ -1054,57 +1066,47 @@
 		s_attr->dev_attr.attr.mode = S_IRUGO;
 		s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
 
-		if (!rc)
+		if (!s_attr->size)
 			continue;
 
-		s_attr->size = rc;
-		s_attr->data = kmalloc(rc, GFP_KERNEL);
+		s_attr->data = kmalloc(s_attr->size, GFP_KERNEL);
 		if (!s_attr->data) {
 			rc = -ENOMEM;
 			goto out_free_buffer;
 		}
 
-		if (((addr / msb->page_size)
-		     == be32_to_cpu(param.data_address))
-		    && (((addr + rc - 1) / msb->page_size)
-			== be32_to_cpu(param.data_address))) {
+		if (((addr / msb->page_size) == (attr_offset / msb->page_size))
+		    && (((addr + s_attr->size - 1) / msb->page_size)
+			== (attr_offset / msb->page_size))) {
 			memcpy(s_attr->data, buffer + addr % msb->page_size,
-			       rc);
+			       s_attr->size);
 			continue;
 		}
 
-		if (page_count <= (rc / msb->page_size)) {
+		attr_offset = (addr / msb->page_size) * msb->page_size;
+
+		if ((attr_offset + attr_len) < (addr + s_attr->size)) {
 			kfree(buffer);
-			page_count = (rc / msb->page_size) + 1;
-			buffer = kmalloc(page_count * msb->page_size,
-					 GFP_KERNEL);
+			attr_len = (((addr + s_attr->size) / msb->page_size)
+				    + 1 ) * msb->page_size - attr_offset;
+			buffer = kmalloc(attr_len, GFP_KERNEL);
 			if (!buffer) {
 				rc = -ENOMEM;
 				goto out_free_attr;
 			}
 		}
 
-		param.system = msb->system;
-		param.data_count = cpu_to_be16((rc / msb->page_size) + 1);
-		param.data_address = cpu_to_be32(addr / msb->page_size);
-		param.tpc_param = 0;
-
-		sg_init_one(&msb->req_sg[0], buffer,
-			    be16_to_cpu(param.data_count) * msb->page_size);
+		sg_init_one(&msb->req_sg[0], buffer, attr_len);
 		msb->seg_count = 1;
 		msb->current_seg = 0;
 		msb->current_page = 0;
 		msb->data_dir = READ;
 		msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
 
-		dev_dbg(&card->dev, "reading attribute pages %x, %x\n",
-			be32_to_cpu(param.data_address),
-			be16_to_cpu(param.data_count));
+		dev_dbg(&card->dev, "reading attribute range %x, %x\n",
+			attr_offset, attr_len);
 
-		card->next_request = h_mspro_block_req_init;
-		msb->mrq_handler = h_mspro_block_transfer_data;
-		memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
-				  (char *)&param, sizeof(param));
+		msb->setup_transfer(card, attr_offset, attr_len);
 		memstick_new_req(card->host);
 		wait_for_completion(&card->mrq_complete);
 		if (card->current_mrq.error) {
@@ -1112,7 +1114,8 @@
 			goto out_free_buffer;
 		}
 
-		memcpy(s_attr->data, buffer + addr % msb->page_size, rc);
+		memcpy(s_attr->data, buffer + addr % msb->page_size,
+		       s_attr->size);
 	}
 
 	rc = 0;
@@ -1130,6 +1133,8 @@
 	int rc = 0;
 
 	msb->system = MEMSTICK_SYS_SERIAL;
+	msb->setup_transfer = h_mspro_block_setup_cmd;
+
 	card->reg_addr.r_offset = offsetof(struct mspro_register, status);
 	card->reg_addr.r_length = sizeof(struct ms_status_register);
 	card->reg_addr.w_offset = offsetof(struct mspro_register, param);
@@ -1206,10 +1211,12 @@
 
 	msb->page_size = be16_to_cpu(sys_info->unit_size);
 
-	if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL))
-		return -ENOMEM;
-
 	mutex_lock(&mspro_block_disk_lock);
+	if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) {
+		mutex_unlock(&mspro_block_disk_lock);
+		return -ENOMEM;
+	}
+
 	rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id);
 	mutex_unlock(&mspro_block_disk_lock);
 
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index f2b894c..d89d925 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -61,6 +61,7 @@
 	struct memstick_request *req;
 	unsigned char           cmd_flags;
 	unsigned char           io_pos;
+	unsigned char           ifmode;
 	unsigned int            io_word[2];
 };
 
@@ -136,15 +137,14 @@
 #define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000
 #define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000
 
+#define CLOCK_CONTROL_BY_MMIO 0x00000008
 #define CLOCK_CONTROL_40MHZ   0x00000001
-#define CLOCK_CONTROL_50MHZ   0x0000000a
-#define CLOCK_CONTROL_60MHZ   0x00000008
-#define CLOCK_CONTROL_62_5MHZ 0x0000000c
+#define CLOCK_CONTROL_50MHZ   0x00000002
+#define CLOCK_CONTROL_60MHZ   0x00000010
+#define CLOCK_CONTROL_62_5MHZ 0x00000004
 #define CLOCK_CONTROL_OFF     0x00000000
 
 #define PCI_CTL_CLOCK_DLY_ADDR   0x000000b0
-#define PCI_CTL_CLOCK_DLY_MASK_A 0x00000f00
-#define PCI_CTL_CLOCK_DLY_MASK_B 0x0000f000
 
 enum {
 	CMD_READY    = 0x01,
@@ -390,8 +390,13 @@
 
 	if (host->req->data_dir == READ)
 		cmd |= TPC_DIR;
-	if (host->req->need_card_int)
-		cmd |= TPC_WAIT_INT;
+
+	if (host->req->need_card_int) {
+		if (host->ifmode == MEMSTICK_SERIAL)
+			cmd |= TPC_GET_INT;
+		else
+			cmd |= TPC_WAIT_INT;
+	}
 
 	data = host->req->data;
 
@@ -529,7 +534,10 @@
 		if (irq_status & INT_STATUS_ANY_ERR) {
 			if (irq_status & INT_STATUS_CRC_ERR)
 				host->req->error = -EILSEQ;
-			else
+			else if (irq_status & INT_STATUS_TPC_ERR) {
+				dev_dbg(&host->chip->pdev->dev, "TPC_ERR\n");
+				jmb38x_ms_complete_cmd(msh, 0);
+			} else
 				host->req->error = -ETIME;
 		} else {
 			if (host->cmd_flags & DMA_DATA) {
@@ -644,7 +652,6 @@
 		ndelay(20);
 	}
 	dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n");
-	/* return -EIO; */
 
 reset_next:
 	writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
@@ -675,7 +682,7 @@
 {
 	struct jmb38x_ms_host *host = memstick_priv(msh);
 	unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
-	unsigned int clock_ctl = CLOCK_CONTROL_40MHZ, clock_delay = 0;
+	unsigned int clock_ctl = CLOCK_CONTROL_BY_MMIO, clock_delay = 0;
 	int rc = 0;
 
 	switch (param) {
@@ -687,9 +694,7 @@
 
 			host_ctl = 7;
 			host_ctl |= HOST_CONTROL_POWER_EN
-				    | HOST_CONTROL_CLOCK_EN
-				    | HOST_CONTROL_HW_OC_P
-				    | HOST_CONTROL_TDELAY_EN;
+				 | HOST_CONTROL_CLOCK_EN;
 			writel(host_ctl, host->addr + HOST_CONTROL);
 
 			writel(host->id ? PAD_PU_PD_ON_MS_SOCK1
@@ -712,46 +717,88 @@
 			return -EINVAL;
 		break;
 	case MEMSTICK_INTERFACE:
+		dev_dbg(&host->chip->pdev->dev,
+			"Set Host Interface Mode to %d\n", value);
+		host_ctl &= ~(HOST_CONTROL_FAST_CLK | HOST_CONTROL_REI |
+			      HOST_CONTROL_REO);
+		host_ctl |= HOST_CONTROL_TDELAY_EN | HOST_CONTROL_HW_OC_P;
 		host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT);
-		pci_read_config_dword(host->chip->pdev,
-				      PCI_CTL_CLOCK_DLY_ADDR,
-				      &clock_delay);
-		clock_delay &= host->id ? ~PCI_CTL_CLOCK_DLY_MASK_B
-					: ~PCI_CTL_CLOCK_DLY_MASK_A;
 
 		if (value == MEMSTICK_SERIAL) {
-			host_ctl &= ~HOST_CONTROL_FAST_CLK;
-			host_ctl &= ~HOST_CONTROL_REO;
 			host_ctl |= HOST_CONTROL_IF_SERIAL
 				    << HOST_CONTROL_IF_SHIFT;
 			host_ctl |= HOST_CONTROL_REI;
-			clock_ctl = CLOCK_CONTROL_40MHZ;
+			clock_ctl |= CLOCK_CONTROL_40MHZ;
+			clock_delay = 0;
 		} else if (value == MEMSTICK_PAR4) {
-			host_ctl |= HOST_CONTROL_FAST_CLK | HOST_CONTROL_REO;
+			host_ctl |= HOST_CONTROL_FAST_CLK;
 			host_ctl |= HOST_CONTROL_IF_PAR4
 				    << HOST_CONTROL_IF_SHIFT;
-			host_ctl &= ~HOST_CONTROL_REI;
-			clock_ctl = CLOCK_CONTROL_40MHZ;
-			clock_delay |= host->id ? (4 << 12) : (4 << 8);
+			host_ctl |= HOST_CONTROL_REO;
+			clock_ctl |= CLOCK_CONTROL_40MHZ;
+			clock_delay = 4;
 		} else if (value == MEMSTICK_PAR8) {
 			host_ctl |= HOST_CONTROL_FAST_CLK;
 			host_ctl |= HOST_CONTROL_IF_PAR8
 				    << HOST_CONTROL_IF_SHIFT;
-			host_ctl &= ~(HOST_CONTROL_REI | HOST_CONTROL_REO);
-			clock_ctl = CLOCK_CONTROL_50MHZ;
+			clock_ctl |= CLOCK_CONTROL_50MHZ;
+			clock_delay = 0;
 		} else
 			return -EINVAL;
 
 		writel(host_ctl, host->addr + HOST_CONTROL);
+		writel(CLOCK_CONTROL_OFF, host->addr + CLOCK_CONTROL);
 		writel(clock_ctl, host->addr + CLOCK_CONTROL);
-		pci_write_config_dword(host->chip->pdev,
-				       PCI_CTL_CLOCK_DLY_ADDR,
-				       clock_delay);
+		pci_write_config_byte(host->chip->pdev,
+				      PCI_CTL_CLOCK_DLY_ADDR + 1,
+				      clock_delay);
+		host->ifmode = value;
 		break;
 	};
 	return 0;
 }
 
+#define PCI_PMOS0_CONTROL		0xae
+#define  PMOS0_ENABLE			0x01
+#define  PMOS0_OVERCURRENT_LEVEL_2_4V	0x06
+#define  PMOS0_EN_OVERCURRENT_DEBOUNCE	0x40
+#define  PMOS0_SW_LED_POLARITY_ENABLE	0x80
+#define  PMOS0_ACTIVE_BITS (PMOS0_ENABLE | PMOS0_EN_OVERCURRENT_DEBOUNCE | \
+			    PMOS0_OVERCURRENT_LEVEL_2_4V)
+#define PCI_PMOS1_CONTROL		0xbd
+#define  PMOS1_ACTIVE_BITS		0x4a
+#define PCI_CLOCK_CTL			0xb9
+
+static int jmb38x_ms_pmos(struct pci_dev *pdev, int flag)
+{
+	unsigned char val;
+
+	pci_read_config_byte(pdev, PCI_PMOS0_CONTROL, &val);
+	if (flag)
+		val |= PMOS0_ACTIVE_BITS;
+	else
+		val &= ~PMOS0_ACTIVE_BITS;
+	pci_write_config_byte(pdev, PCI_PMOS0_CONTROL, val);
+	dev_dbg(&pdev->dev, "JMB38x: set PMOS0 val 0x%x\n", val);
+
+	if (pci_resource_flags(pdev, 1)) {
+		pci_read_config_byte(pdev, PCI_PMOS1_CONTROL, &val);
+		if (flag)
+			val |= PMOS1_ACTIVE_BITS;
+		else
+			val &= ~PMOS1_ACTIVE_BITS;
+		pci_write_config_byte(pdev, PCI_PMOS1_CONTROL, val);
+		dev_dbg(&pdev->dev, "JMB38x: set PMOS1 val 0x%x\n", val);
+	}
+
+	pci_read_config_byte(pdev, PCI_CLOCK_CTL, &val);
+	pci_write_config_byte(pdev, PCI_CLOCK_CTL, val & ~0x0f);
+	pci_write_config_byte(pdev, PCI_CLOCK_CTL, val | 0x01);
+	dev_dbg(&pdev->dev, "Clock Control by PCI config is disabled!\n");
+
+        return 0;
+}
+
 #ifdef CONFIG_PM
 
 static int jmb38x_ms_suspend(struct pci_dev *dev, pm_message_t state)
@@ -784,8 +831,7 @@
 		return rc;
 	pci_set_master(dev);
 
-	pci_read_config_dword(dev, 0xac, &rc);
-	pci_write_config_dword(dev, 0xac, rc | 0x00470000);
+	jmb38x_ms_pmos(dev, 1);
 
 	for (rc = 0; rc < jm->host_cnt; ++rc) {
 		if (!jm->hosts[rc])
@@ -894,8 +940,7 @@
 		goto err_out;
 	}
 
-	pci_read_config_dword(pdev, 0xac, &rc);
-	pci_write_config_dword(pdev, 0xac, rc | 0x00470000);
+	jmb38x_ms_pmos(pdev, 1);
 
 	cnt = jmb38x_ms_count_slots(pdev);
 	if (!cnt) {
@@ -976,6 +1021,8 @@
 		jmb38x_ms_free_host(jm->hosts[cnt]);
 	}
 
+	jmb38x_ms_pmos(dev, 0);
+
 	pci_set_drvdata(dev, NULL);
 	pci_release_regions(dev);
 	pci_disable_device(dev);
@@ -983,8 +1030,9 @@
 }
 
 static struct pci_device_id jmb38x_ms_id_tbl [] = {
-	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS, PCI_ANY_ID,
-	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS) },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB385_MS) },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB390_MS) },
 	{ }
 };
 
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index 691620d..8b04810 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -268,7 +268,7 @@
 
 /* Compatibility Error : IR Disabled */
 #define IR_LOGINFO_COMPAT_ERROR_RAID_DISABLED                  (0x00010030)
-/* Compatibility Error : Inquiry Comand failed */
+/* Compatibility Error : Inquiry Command failed */
 #define IR_LOGINFO_COMPAT_ERROR_INQUIRY_FAILED                 (0x00010031)
 /* Compatibility Error : Device not direct access device */
 #define IR_LOGINFO_COMPAT_ERROR_NOT_DIRECT_ACCESS              (0x00010032)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 3e57b61..3358c0a 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7977,7 +7977,7 @@
 		NULL,						/* 2Eh */
 		NULL,						/* 2Fh */
 		"Compatibility Error: IR Disabled",		/* 30h */
-		"Compatibility Error: Inquiry Comand Failed",	/* 31h */
+		"Compatibility Error: Inquiry Command Failed",	/* 31h */
 		"Compatibility Error: Device not Direct Access "
 		    "Device ",					/* 32h */
 		"Compatibility Error: Removable Device Found",	/* 33h */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f71f229..1735c84 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
 #define COPYRIGHT	"Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON	"3.04.17"
-#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.17"
+#define MPT_LINUX_VERSION_COMMON	"3.04.18"
+#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.18"
 #define WHAT_MAGIC_STRING		"@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a3856ed..e8deb8e 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -597,6 +597,13 @@
 }
 
 static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+	fasync_helper(-1, filep, 0, &async_queue);
+	return 0;
+}
+
+static int
 mptctl_fasync(int fd, struct file *filep, int mode)
 {
 	MPT_ADAPTER	*ioc;
@@ -2815,6 +2822,7 @@
 	.llseek =	no_llseek,
 	.fasync = 	mptctl_fasync,
 	.unlocked_ioctl = mptctl_ioctl,
+	.release =	mptctl_release,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = compat_mpctl_ioctl,
 #endif
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index d48c2c6..8aefb18 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1146,7 +1146,7 @@
  *
  * This function will delete scheduled target reset from the list and
  * try to send next target reset. This will be called from completion
- * context of any Task managment command.
+ * context of any Task management command.
  */
 
 void
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 59b8f53..0d9b82a 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1873,8 +1873,9 @@
 	}
 
  out:
-	printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
-	    ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
+	printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
+	    ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+	    SCpnt, SCpnt->serial_number);
 
 	return retval;
 }
@@ -1911,7 +1912,7 @@
 
 	vdevice = SCpnt->device->hostdata;
 	if (!vdevice || !vdevice->vtarget) {
-		retval = SUCCESS;
+		retval = 0;
 		goto out;
 	}
 
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f87a9d4..ae7cad1 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -309,7 +309,7 @@
  *	@ireq: I2O block request
  *	@mptr: message body pointer
  *
- *	Builds the SG list and map it to be accessable by the controller.
+ *	Builds the SG list and map it to be accessible by the controller.
  *
  *	Returns 0 on failure or 1 on success.
  */
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 20895e7..793300c 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -361,12 +361,6 @@
 	},
 };
 
-static inline struct pm860x_irq_data *irq_to_pm860x(struct pm860x_chip *chip,
-						    int irq)
-{
-	return &pm860x_irqs[irq - chip->irq_base];
-}
-
 static irqreturn_t pm860x_irq(int irq, void *data)
 {
 	struct pm860x_chip *chip = data;
@@ -388,16 +382,16 @@
 	return IRQ_HANDLED;
 }
 
-static void pm860x_irq_lock(unsigned int irq)
+static void pm860x_irq_lock(struct irq_data *data)
 {
-	struct pm860x_chip *chip = get_irq_chip_data(irq);
+	struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&chip->irq_lock);
 }
 
-static void pm860x_irq_sync_unlock(unsigned int irq)
+static void pm860x_irq_sync_unlock(struct irq_data *data)
 {
-	struct pm860x_chip *chip = get_irq_chip_data(irq);
+	struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
 	struct pm860x_irq_data *irq_data;
 	struct i2c_client *i2c;
 	static unsigned char cached[3] = {0x0, 0x0, 0x0};
@@ -439,25 +433,25 @@
 	mutex_unlock(&chip->irq_lock);
 }
 
-static void pm860x_irq_enable(unsigned int irq)
+static void pm860x_irq_enable(struct irq_data *data)
 {
-	struct pm860x_chip *chip = get_irq_chip_data(irq);
-	pm860x_irqs[irq - chip->irq_base].enable
-		= pm860x_irqs[irq - chip->irq_base].offs;
+	struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
+	pm860x_irqs[data->irq - chip->irq_base].enable
+		= pm860x_irqs[data->irq - chip->irq_base].offs;
 }
 
-static void pm860x_irq_disable(unsigned int irq)
+static void pm860x_irq_disable(struct irq_data *data)
 {
-	struct pm860x_chip *chip = get_irq_chip_data(irq);
-	pm860x_irqs[irq - chip->irq_base].enable = 0;
+	struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
+	pm860x_irqs[data->irq - chip->irq_base].enable = 0;
 }
 
 static struct irq_chip pm860x_irq_chip = {
 	.name		= "88pm860x",
-	.bus_lock	= pm860x_irq_lock,
-	.bus_sync_unlock = pm860x_irq_sync_unlock,
-	.enable		= pm860x_irq_enable,
-	.disable	= pm860x_irq_disable,
+	.irq_bus_lock	= pm860x_irq_lock,
+	.irq_bus_sync_unlock = pm860x_irq_sync_unlock,
+	.irq_enable	= pm860x_irq_enable,
+	.irq_disable	= pm860x_irq_disable,
 };
 
 static int __devinit device_gpadc_init(struct pm860x_chip *chip,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index da9d297..fd01836 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -496,13 +496,13 @@
 
 config AB8500_CORE
 	bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
-	depends on GENERIC_HARDIRQS && ABX500_CORE && SPI_MASTER && ARCH_U8500
+	depends on GENERIC_HARDIRQS && ABX500_CORE
 	select MFD_CORE
 	help
 	  Select this option to enable access to AB8500 power management
-	  chip. This connects to U8500 either on the SSP/SPI bus
-	  or the I2C bus via PRCMU. It also adds the irq_chip
-	  parts for handling the Mixed Signal chip events.
+	  chip. This connects to U8500 either on the SSP/SPI bus (deprecated
+	  since hardware version v1.0) or the I2C bus via PRCMU. It also adds
+	  the irq_chip parts for handling the Mixed Signal chip events.
 	  This chip embeds various other multimedia funtionalities as well.
 
 config AB8500_I2C_CORE
@@ -537,6 +537,14 @@
 	  LEDs, vibrator, system power and temperature, power management
 	  and ALSA sound.
 
+config MFD_CS5535
+	tristate "Support for CS5535 and CS5536 southbridge core functions"
+	select MFD_CORE
+	depends on PCI
+	---help---
+	  This is the core driver for CS5535/CS5536 MFD functions.  This is
+          necessary for using the board's GPIO and MFGPT functionality.
+
 config MFD_TIMBERDALE
 	tristate "Support for the Timberdale FPGA"
 	select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 848e7ea..a54e2c7 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -70,7 +70,7 @@
 obj-$(CONFIG_AB3100_CORE)	+= ab3100-core.o
 obj-$(CONFIG_AB3100_OTP)	+= ab3100-otp.o
 obj-$(CONFIG_AB3550_CORE)	+= ab3550-core.o
-obj-$(CONFIG_AB8500_CORE)	+= ab8500-core.o ab8500-spi.o
+obj-$(CONFIG_AB8500_CORE)	+= ab8500-core.o
 obj-$(CONFIG_AB8500_I2C_CORE)	+= ab8500-i2c.o
 obj-$(CONFIG_AB8500_DEBUG)	+= ab8500-debugfs.o
 obj-$(CONFIG_MFD_TIMBERDALE)    += timberdale.o
@@ -82,3 +82,4 @@
 obj-$(CONFIG_MFD_TPS6586X)	+= tps6586x.o
 obj-$(CONFIG_MFD_VX855)		+= vx855.o
 obj-$(CONFIG_MFD_WL1273_CORE)	+= wl1273-core.o
+obj-$(CONFIG_MFD_CS5535)	+= cs5535-mfd.o
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 8a98739..5fbca34 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1159,15 +1159,16 @@
 	}
 }
 
-static void ab3550_mask(unsigned int irq)
+static void ab3550_mask(struct irq_data *data)
 {
 	unsigned long flags;
 	struct ab3550 *ab;
 	struct ab3550_platform_data *plf_data;
+	int irq;
 
-	ab = get_irq_chip_data(irq);
+	ab = irq_data_get_irq_chip_data(data);
 	plf_data = ab->i2c_client[0]->dev.platform_data;
-	irq -= plf_data->irq.base;
+	irq = data->irq - plf_data->irq.base;
 
 	spin_lock_irqsave(&ab->event_lock, flags);
 	ab->event_mask[irq / 8] |= BIT(irq % 8);
@@ -1176,15 +1177,16 @@
 	schedule_work(&ab->mask_work);
 }
 
-static void ab3550_unmask(unsigned int irq)
+static void ab3550_unmask(struct irq_data *data)
 {
 	unsigned long flags;
 	struct ab3550 *ab;
 	struct ab3550_platform_data *plf_data;
+	int irq;
 
-	ab = get_irq_chip_data(irq);
+	ab = irq_data_get_irq_chip_data(data);
 	plf_data = ab->i2c_client[0]->dev.platform_data;
-	irq -= plf_data->irq.base;
+	irq = data->irq - plf_data->irq.base;
 
 	spin_lock_irqsave(&ab->event_lock, flags);
 	ab->event_mask[irq / 8] &= ~BIT(irq % 8);
@@ -1193,20 +1195,16 @@
 	schedule_work(&ab->mask_work);
 }
 
-static void noop(unsigned int irq)
+static void noop(struct irq_data *data)
 {
 }
 
 static struct irq_chip ab3550_irq_chip = {
 	.name		= "ab3550-core", /* Keep the same name as the request */
-	.startup	= NULL, /* defaults to enable */
-	.shutdown	= NULL, /* defaults to disable */
-	.enable		= NULL, /* defaults to unmask */
-	.disable	= ab3550_mask, /* No default to mask in chip.c */
-	.ack		= noop,
-	.mask		= ab3550_mask,
-	.unmask		= ab3550_unmask,
-	.end		= NULL,
+	.irq_disable	= ab3550_mask, /* No default to mask in chip.c */
+	.irq_ack	= noop,
+	.irq_mask	= ab3550_mask,
+	.irq_unmask	= ab3550_unmask,
 };
 
 struct ab_family_id {
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index d9640a6..b688701 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -52,6 +52,7 @@
 #define AB8500_IT_LATCH8_REG		0x27
 #define AB8500_IT_LATCH9_REG		0x28
 #define AB8500_IT_LATCH10_REG		0x29
+#define AB8500_IT_LATCH12_REG		0x2B
 #define AB8500_IT_LATCH19_REG		0x32
 #define AB8500_IT_LATCH20_REG		0x33
 #define AB8500_IT_LATCH21_REG		0x34
@@ -98,13 +99,17 @@
  * offset 0.
  */
 static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = {
-	0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21,
+	0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 18, 19, 20, 21,
 };
 
 static int ab8500_get_chip_id(struct device *dev)
 {
-	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
-	return (int)ab8500->chip_id;
+	struct ab8500 *ab8500;
+
+	if (!dev)
+		return -EINVAL;
+	ab8500 = dev_get_drvdata(dev->parent);
+	return ab8500 ? (int)ab8500->chip_id : -EINVAL;
 }
 
 static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -228,16 +233,16 @@
 	.startup_irq_enabled = NULL,
 };
 
-static void ab8500_irq_lock(unsigned int irq)
+static void ab8500_irq_lock(struct irq_data *data)
 {
-	struct ab8500 *ab8500 = get_irq_chip_data(irq);
+	struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&ab8500->irq_lock);
 }
 
-static void ab8500_irq_sync_unlock(unsigned int irq)
+static void ab8500_irq_sync_unlock(struct irq_data *data)
 {
-	struct ab8500 *ab8500 = get_irq_chip_data(irq);
+	struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
@@ -248,6 +253,10 @@
 		if (new == old)
 			continue;
 
+		/* Interrupt register 12 does'nt exist prior to version 0x20 */
+		if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
+			continue;
+
 		ab8500->oldmask[i] = new;
 
 		reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i];
@@ -257,20 +266,20 @@
 	mutex_unlock(&ab8500->irq_lock);
 }
 
-static void ab8500_irq_mask(unsigned int irq)
+static void ab8500_irq_mask(struct irq_data *data)
 {
-	struct ab8500 *ab8500 = get_irq_chip_data(irq);
-	int offset = irq - ab8500->irq_base;
+	struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
+	int offset = data->irq - ab8500->irq_base;
 	int index = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	ab8500->mask[index] |= mask;
 }
 
-static void ab8500_irq_unmask(unsigned int irq)
+static void ab8500_irq_unmask(struct irq_data *data)
 {
-	struct ab8500 *ab8500 = get_irq_chip_data(irq);
-	int offset = irq - ab8500->irq_base;
+	struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
+	int offset = data->irq - ab8500->irq_base;
 	int index = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -279,10 +288,10 @@
 
 static struct irq_chip ab8500_irq_chip = {
 	.name			= "ab8500",
-	.bus_lock		= ab8500_irq_lock,
-	.bus_sync_unlock	= ab8500_irq_sync_unlock,
-	.mask			= ab8500_irq_mask,
-	.unmask			= ab8500_irq_unmask,
+	.irq_bus_lock		= ab8500_irq_lock,
+	.irq_bus_sync_unlock	= ab8500_irq_sync_unlock,
+	.irq_mask		= ab8500_irq_mask,
+	.irq_unmask		= ab8500_irq_unmask,
 };
 
 static irqreturn_t ab8500_irq(int irq, void *dev)
@@ -297,6 +306,10 @@
 		int status;
 		u8 value;
 
+		/* Interrupt register 12 does'nt exist prior to version 0x20 */
+		if (regoffset == 11 && ab8500->chip_id < 0x20)
+			continue;
+
 		status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
 			AB8500_IT_LATCH1_REG + regoffset, &value);
 		if (status < 0 || value == 0)
@@ -393,13 +406,195 @@
 	},
 };
 
+static struct resource ab8500_bm_resources[] = {
+	{
+		.name = "MAIN_EXT_CH_NOT_OK",
+		.start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+		.end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "BATT_OVV",
+		.start = AB8500_INT_BATT_OVV,
+		.end = AB8500_INT_BATT_OVV,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "MAIN_CH_UNPLUG_DET",
+		.start = AB8500_INT_MAIN_CH_UNPLUG_DET,
+		.end = AB8500_INT_MAIN_CH_UNPLUG_DET,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "MAIN_CHARGE_PLUG_DET",
+		.start = AB8500_INT_MAIN_CH_PLUG_DET,
+		.end = AB8500_INT_MAIN_CH_PLUG_DET,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_DET_F",
+		.start = AB8500_INT_VBUS_DET_F,
+		.end = AB8500_INT_VBUS_DET_F,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_DET_R",
+		.start = AB8500_INT_VBUS_DET_R,
+		.end = AB8500_INT_VBUS_DET_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "BAT_CTRL_INDB",
+		.start = AB8500_INT_BAT_CTRL_INDB,
+		.end = AB8500_INT_BAT_CTRL_INDB,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "CH_WD_EXP",
+		.start = AB8500_INT_CH_WD_EXP,
+		.end = AB8500_INT_CH_WD_EXP,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_OVV",
+		.start = AB8500_INT_VBUS_OVV,
+		.end = AB8500_INT_VBUS_OVV,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "NCONV_ACCU",
+		.start = AB8500_INT_CCN_CONV_ACC,
+		.end = AB8500_INT_CCN_CONV_ACC,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "LOW_BAT_F",
+		.start = AB8500_INT_LOW_BAT_F,
+		.end = AB8500_INT_LOW_BAT_F,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "LOW_BAT_R",
+		.start = AB8500_INT_LOW_BAT_R,
+		.end = AB8500_INT_LOW_BAT_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "BTEMP_LOW",
+		.start = AB8500_INT_BTEMP_LOW,
+		.end = AB8500_INT_BTEMP_LOW,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "BTEMP_HIGH",
+		.start = AB8500_INT_BTEMP_HIGH,
+		.end = AB8500_INT_BTEMP_HIGH,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_CHARGER_NOT_OKR",
+		.start = AB8500_INT_USB_CHARGER_NOT_OK,
+		.end = AB8500_INT_USB_CHARGER_NOT_OK,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_CHARGE_DET_DONE",
+		.start = AB8500_INT_USB_CHG_DET_DONE,
+		.end = AB8500_INT_USB_CHG_DET_DONE,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_CH_TH_PROT_R",
+		.start = AB8500_INT_USB_CH_TH_PROT_R,
+		.end = AB8500_INT_USB_CH_TH_PROT_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "MAIN_CH_TH_PROT_R",
+		.start = AB8500_INT_MAIN_CH_TH_PROT_R,
+		.end = AB8500_INT_MAIN_CH_TH_PROT_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_CHARGER_NOT_OKF",
+		.start = AB8500_INT_USB_CHARGER_NOT_OKF,
+		.end = AB8500_INT_USB_CHARGER_NOT_OKF,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct resource ab8500_debug_resources[] = {
+	{
+		.name	= "IRQ_FIRST",
+		.start	= AB8500_INT_MAIN_EXT_CH_NOT_OK,
+		.end	= AB8500_INT_MAIN_EXT_CH_NOT_OK,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.name	= "IRQ_LAST",
+		.start	= AB8500_INT_USB_CHARGER_NOT_OKF,
+		.end	= AB8500_INT_USB_CHARGER_NOT_OKF,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct resource ab8500_usb_resources[] = {
+	{
+		.name = "ID_WAKEUP_R",
+		.start = AB8500_INT_ID_WAKEUP_R,
+		.end = AB8500_INT_ID_WAKEUP_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "ID_WAKEUP_F",
+		.start = AB8500_INT_ID_WAKEUP_F,
+		.end = AB8500_INT_ID_WAKEUP_F,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_DET_F",
+		.start = AB8500_INT_VBUS_DET_F,
+		.end = AB8500_INT_VBUS_DET_F,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_DET_R",
+		.start = AB8500_INT_VBUS_DET_R,
+		.end = AB8500_INT_VBUS_DET_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_LINK_STATUS",
+		.start = AB8500_INT_USB_LINK_STATUS,
+		.end = AB8500_INT_USB_LINK_STATUS,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct resource ab8500_temp_resources[] = {
+	{
+		.name  = "AB8500_TEMP_WARM",
+		.start = AB8500_INT_TEMP_WARM,
+		.end   = AB8500_INT_TEMP_WARM,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
 static struct mfd_cell ab8500_devs[] = {
 #ifdef CONFIG_DEBUG_FS
 	{
 		.name = "ab8500-debug",
+		.num_resources = ARRAY_SIZE(ab8500_debug_resources),
+		.resources = ab8500_debug_resources,
 	},
 #endif
 	{
+		.name = "ab8500-sysctrl",
+	},
+	{
+		.name = "ab8500-regulator",
+	},
+	{
 		.name = "ab8500-gpadc",
 		.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
 		.resources = ab8500_gpadc_resources,
@@ -410,6 +605,22 @@
 		.resources = ab8500_rtc_resources,
 	},
 	{
+		.name = "ab8500-bm",
+		.num_resources = ARRAY_SIZE(ab8500_bm_resources),
+		.resources = ab8500_bm_resources,
+	},
+	{ .name = "ab8500-codec", },
+	{
+		.name = "ab8500-usb",
+		.num_resources = ARRAY_SIZE(ab8500_usb_resources),
+		.resources = ab8500_usb_resources,
+	},
+	{
+		.name = "ab8500-poweron-key",
+		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
+		.resources = ab8500_poweronkey_db_resources,
+	},
+	{
 		.name = "ab8500-pwm",
 		.id = 1,
 	},
@@ -421,15 +632,35 @@
 		.name = "ab8500-pwm",
 		.id = 3,
 	},
-	{ .name = "ab8500-charger", },
-	{ .name = "ab8500-audio", },
-	{ .name = "ab8500-usb", },
-	{ .name = "ab8500-regulator", },
+	{ .name = "ab8500-leds", },
 	{
-		.name = "ab8500-poweron-key",
-		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
-		.resources = ab8500_poweronkey_db_resources,
+		.name = "ab8500-denc",
 	},
+	{
+		.name = "ab8500-temp",
+		.num_resources = ARRAY_SIZE(ab8500_temp_resources),
+		.resources = ab8500_temp_resources,
+	},
+};
+
+static ssize_t show_chip_id(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ab8500 *ab8500;
+
+	ab8500 = dev_get_drvdata(dev);
+	return sprintf(buf, "%#x\n", ab8500 ? ab8500->chip_id : -EINVAL);
+}
+
+static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
+
+static struct attribute *ab8500_sysfs_entries[] = {
+	&dev_attr_chip_id.attr,
+	NULL,
+};
+
+static struct attribute_group ab8500_attr_group = {
+	.attrs	= ab8500_sysfs_entries,
 };
 
 int __devinit ab8500_init(struct ab8500 *ab8500)
@@ -454,8 +685,9 @@
 	 * 0x0 - Early Drop
 	 * 0x10 - Cut 1.0
 	 * 0x11 - Cut 1.1
+	 * 0x20 - Cut 2.0
 	 */
-	if (value == 0x0 || value == 0x10 || value == 0x11) {
+	if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20) {
 		ab8500->revision = value;
 		dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
 	} else {
@@ -468,18 +700,16 @@
 		plat->init(ab8500);
 
 	/* Clear and mask all interrupts */
-	for (i = 0; i < 10; i++) {
-		get_register_interruptible(ab8500, AB8500_INTERRUPT,
-			AB8500_IT_LATCH1_REG + i, &value);
-		set_register_interruptible(ab8500, AB8500_INTERRUPT,
-			AB8500_IT_MASK1_REG + i, 0xff);
-	}
+	for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
+		/* Interrupt register 12 does'nt exist prior to version 0x20 */
+		if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
+			continue;
 
-	for (i = 18; i < 24; i++) {
 		get_register_interruptible(ab8500, AB8500_INTERRUPT,
-			AB8500_IT_LATCH1_REG + i, &value);
+			AB8500_IT_LATCH1_REG + ab8500_irq_regoffset[i],
+			&value);
 		set_register_interruptible(ab8500, AB8500_INTERRUPT,
-			AB8500_IT_MASK1_REG + i, 0xff);
+			AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i], 0xff);
 	}
 
 	ret = abx500_register_ops(ab8500->dev, &ab8500_ops);
@@ -495,7 +725,8 @@
 			return ret;
 
 		ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
-					   IRQF_ONESHOT, "ab8500", ab8500);
+					   IRQF_ONESHOT | IRQF_NO_SUSPEND,
+					   "ab8500", ab8500);
 		if (ret)
 			goto out_removeirq;
 	}
@@ -506,6 +737,10 @@
 	if (ret)
 		goto out_freeirq;
 
+	ret = sysfs_create_group(&ab8500->dev->kobj, &ab8500_attr_group);
+	if (ret)
+		dev_err(ab8500->dev, "error creating sysfs entries\n");
+
 	return ret;
 
 out_freeirq:
@@ -519,6 +754,7 @@
 
 int __devexit ab8500_exit(struct ab8500 *ab8500)
 {
+	sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
 	mfd_remove_devices(ab8500->dev);
 	if (ab8500->irq_base) {
 		free_irq(ab8500->irq, ab8500);
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 8d1e05a..3c1541a 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -24,9 +24,9 @@
  * @perm: access permissions for the range
  */
 struct ab8500_reg_range {
-       u8 first;
-       u8 last;
-       u8 perm;
+	u8 first;
+	u8 last;
+	u8 perm;
 };
 
 /**
@@ -36,9 +36,9 @@
  * @range: the list of register ranges
  */
 struct ab8500_i2c_ranges {
-       u8 num_ranges;
-       u8 bankid;
-       const struct ab8500_reg_range *range;
+	u8 num_ranges;
+	u8 bankid;
+	const struct ab8500_reg_range *range;
 };
 
 #define AB8500_NAME_STRING "ab8500"
@@ -47,521 +47,521 @@
 #define AB8500_REV_REG 0x80
 
 static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
-       [0x0] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_SYS_CTRL1_BLOCK] = {
-               .num_ranges = 3,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x02,
-                       },
-                       {
-                               .first = 0x42,
-                               .last = 0x42,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x81,
-                       },
-               },
-       },
-       [AB8500_SYS_CTRL2_BLOCK] = {
-               .num_ranges = 4,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x0D,
-                       },
-                       {
-                               .first = 0x0F,
-                               .last = 0x17,
-                       },
-                       {
-                               .first = 0x30,
-                               .last = 0x30,
-                       },
-                       {
-                               .first = 0x32,
-                               .last = 0x33,
-                       },
-               },
-       },
-       [AB8500_REGU_CTRL1] = {
-               .num_ranges = 3,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x00,
-                       },
-                       {
-                               .first = 0x03,
-                               .last = 0x10,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x84,
-                       },
-               },
-       },
-       [AB8500_REGU_CTRL2] = {
-               .num_ranges = 5,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x15,
-                       },
-                       {
-                               .first = 0x17,
-                               .last = 0x19,
-                       },
-                       {
-                               .first = 0x1B,
-                               .last = 0x1D,
-                       },
-                       {
-                               .first = 0x1F,
-                               .last = 0x22,
-                       },
-                       {
-                               .first = 0x40,
-                               .last = 0x44,
-                       },
-                       /* 0x80-0x8B is SIM registers and should
-                        * not be accessed from here */
-               },
-       },
-       [AB8500_USB] = {
-               .num_ranges = 2,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x80,
-                               .last = 0x83,
-                       },
-                       {
-                               .first = 0x87,
-                               .last = 0x8A,
-                       },
-               },
-       },
-       [AB8500_TVOUT] = {
-               .num_ranges = 9,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x12,
-                       },
-                       {
-                               .first = 0x15,
-                               .last = 0x17,
-                       },
-                       {
-                               .first = 0x19,
-                               .last = 0x21,
-                       },
-                       {
-                               .first = 0x27,
-                               .last = 0x2C,
-                       },
-                       {
-                               .first = 0x41,
-                               .last = 0x41,
-                       },
-                       {
-                               .first = 0x45,
-                               .last = 0x5B,
-                       },
-                       {
-                               .first = 0x5D,
-                               .last = 0x5D,
-                       },
-                       {
-                               .first = 0x69,
-                               .last = 0x69,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x81,
-                       },
-               },
-       },
-       [AB8500_DBI] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_ECI_AV_ACC] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x80,
-                               .last = 0x82,
-                       },
-               },
-       },
-       [0x9] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_GPADC] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x08,
-                       },
-               },
-       },
-       [AB8500_CHARGER] = {
-               .num_ranges = 8,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x03,
-                       },
-                       {
-                               .first = 0x05,
-                               .last = 0x05,
-                       },
-                       {
-                               .first = 0x40,
-                               .last = 0x40,
-                       },
-                       {
-                               .first = 0x42,
-                               .last = 0x42,
-                       },
-                       {
-                               .first = 0x44,
-                               .last = 0x44,
-                       },
-                       {
-                               .first = 0x50,
-                               .last = 0x55,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x82,
-                       },
-                       {
-                               .first = 0xC0,
-                               .last = 0xC2,
-                       },
-               },
-       },
-       [AB8500_GAS_GAUGE] = {
-               .num_ranges = 3,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x00,
-                       },
-                       {
-                               .first = 0x07,
-                               .last = 0x0A,
-                       },
-                       {
-                               .first = 0x10,
-                               .last = 0x14,
-                       },
-               },
-       },
-       [AB8500_AUDIO] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x6F,
-                       },
-               },
-       },
-       [AB8500_INTERRUPT] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_RTC] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x0F,
-                       },
-               },
-       },
-       [AB8500_MISC] = {
-               .num_ranges = 8,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x05,
-                       },
-                       {
-                               .first = 0x10,
-                               .last = 0x15,
-                       },
-                       {
-                               .first = 0x20,
-                               .last = 0x25,
-                       },
-                       {
-                               .first = 0x30,
-                               .last = 0x35,
-                       },
-                       {
-                               .first = 0x40,
-                               .last = 0x45,
-                       },
-                       {
-                               .first = 0x50,
-                               .last = 0x50,
-                       },
-                       {
-                               .first = 0x60,
-                               .last = 0x67,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x80,
-                       },
-               },
-       },
-       [0x11] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [0x12] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [0x13] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [0x14] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_OTP_EMUL] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x01,
-                               .last = 0x0F,
-                       },
-               },
-       },
+	[0x0] = {
+		.num_ranges = 0,
+		.range = 0,
+	},
+	[AB8500_SYS_CTRL1_BLOCK] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x02,
+			},
+			{
+				.first = 0x42,
+				.last = 0x42,
+			},
+			{
+				.first = 0x80,
+				.last = 0x81,
+			},
+		},
+	},
+	[AB8500_SYS_CTRL2_BLOCK] = {
+		.num_ranges = 4,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x0D,
+			},
+			{
+				.first = 0x0F,
+				.last = 0x17,
+			},
+			{
+				.first = 0x30,
+				.last = 0x30,
+			},
+			{
+				.first = 0x32,
+				.last = 0x33,
+			},
+		},
+	},
+	[AB8500_REGU_CTRL1] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x00,
+			},
+			{
+				.first = 0x03,
+				.last = 0x10,
+			},
+			{
+				.first = 0x80,
+				.last = 0x84,
+			},
+		},
+	},
+	[AB8500_REGU_CTRL2] = {
+		.num_ranges = 5,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x15,
+			},
+			{
+				.first = 0x17,
+				.last = 0x19,
+			},
+			{
+				.first = 0x1B,
+				.last = 0x1D,
+			},
+			{
+				.first = 0x1F,
+				.last = 0x22,
+			},
+			{
+				.first = 0x40,
+				.last = 0x44,
+			},
+			/* 0x80-0x8B is SIM registers and should
+			 * not be accessed from here */
+		},
+	},
+	[AB8500_USB] = {
+		.num_ranges = 2,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x80,
+				.last = 0x83,
+			},
+			{
+				.first = 0x87,
+				.last = 0x8A,
+			},
+		},
+	},
+	[AB8500_TVOUT] = {
+		.num_ranges = 9,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x12,
+			},
+			{
+				.first = 0x15,
+				.last = 0x17,
+			},
+			{
+				.first = 0x19,
+				.last = 0x21,
+			},
+			{
+				.first = 0x27,
+				.last = 0x2C,
+			},
+			{
+				.first = 0x41,
+				.last = 0x41,
+			},
+			{
+				.first = 0x45,
+				.last = 0x5B,
+			},
+			{
+				.first = 0x5D,
+				.last = 0x5D,
+			},
+			{
+				.first = 0x69,
+				.last = 0x69,
+			},
+			{
+				.first = 0x80,
+				.last = 0x81,
+			},
+		},
+	},
+	[AB8500_DBI] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_ECI_AV_ACC] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x80,
+				.last = 0x82,
+			},
+		},
+	},
+	[0x9] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_GPADC] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x08,
+			},
+		},
+	},
+	[AB8500_CHARGER] = {
+		.num_ranges = 8,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x03,
+			},
+			{
+				.first = 0x05,
+				.last = 0x05,
+			},
+			{
+				.first = 0x40,
+				.last = 0x40,
+			},
+			{
+				.first = 0x42,
+				.last = 0x42,
+			},
+			{
+				.first = 0x44,
+				.last = 0x44,
+			},
+			{
+				.first = 0x50,
+				.last = 0x55,
+			},
+			{
+				.first = 0x80,
+				.last = 0x82,
+			},
+			{
+				.first = 0xC0,
+				.last = 0xC2,
+			},
+		},
+	},
+	[AB8500_GAS_GAUGE] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x00,
+			},
+			{
+				.first = 0x07,
+				.last = 0x0A,
+			},
+			{
+				.first = 0x10,
+				.last = 0x14,
+			},
+		},
+	},
+	[AB8500_AUDIO] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x6F,
+			},
+		},
+	},
+	[AB8500_INTERRUPT] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_RTC] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x0F,
+			},
+		},
+	},
+	[AB8500_MISC] = {
+		.num_ranges = 8,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x05,
+			},
+			{
+				.first = 0x10,
+				.last = 0x15,
+			},
+			{
+				.first = 0x20,
+				.last = 0x25,
+			},
+			{
+				.first = 0x30,
+				.last = 0x35,
+			},
+			{
+				.first = 0x40,
+				.last = 0x45,
+			},
+			{
+				.first = 0x50,
+				.last = 0x50,
+			},
+			{
+				.first = 0x60,
+				.last = 0x67,
+			},
+			{
+				.first = 0x80,
+				.last = 0x80,
+			},
+		},
+	},
+	[0x11] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[0x12] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[0x13] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[0x14] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_OTP_EMUL] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x01,
+				.last = 0x0F,
+			},
+		},
+	},
 };
 
 static int ab8500_registers_print(struct seq_file *s, void *p)
 {
-       struct device *dev = s->private;
-       unsigned int i;
-       u32 bank = debug_bank;
+	struct device *dev = s->private;
+	unsigned int i;
+	u32 bank = debug_bank;
 
-       seq_printf(s, AB8500_NAME_STRING " register values:\n");
+	seq_printf(s, AB8500_NAME_STRING " register values:\n");
 
-       seq_printf(s, " bank %u:\n", bank);
-       for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
-               u32 reg;
+	seq_printf(s, " bank %u:\n", bank);
+	for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
+		u32 reg;
 
-               for (reg = debug_ranges[bank].range[i].first;
-                       reg <= debug_ranges[bank].range[i].last;
-                       reg++) {
-                       u8 value;
-                       int err;
+		for (reg = debug_ranges[bank].range[i].first;
+			reg <= debug_ranges[bank].range[i].last;
+			reg++) {
+			u8 value;
+			int err;
 
-                       err = abx500_get_register_interruptible(dev,
-                               (u8)bank, (u8)reg, &value);
-                       if (err < 0) {
-                               dev_err(dev, "ab->read fail %d\n", err);
-                               return err;
-                       }
+			err = abx500_get_register_interruptible(dev,
+				(u8)bank, (u8)reg, &value);
+			if (err < 0) {
+				dev_err(dev, "ab->read fail %d\n", err);
+				return err;
+			}
 
-                       err = seq_printf(s, "  [%u/0x%02X]: 0x%02X\n", bank,
-                               reg, value);
-                       if (err < 0) {
-                               dev_err(dev, "seq_printf overflow\n");
-                               /* Error is not returned here since
-                                * the output is wanted in any case */
-                               return 0;
-                       }
-               }
-       }
-       return 0;
+			err = seq_printf(s, "  [%u/0x%02X]: 0x%02X\n", bank,
+				reg, value);
+			if (err < 0) {
+				dev_err(dev, "seq_printf overflow\n");
+				/* Error is not returned here since
+				 * the output is wanted in any case */
+				return 0;
+			}
+		}
+	}
+	return 0;
 }
 
 static int ab8500_registers_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ab8500_registers_print, inode->i_private);
+	return single_open(file, ab8500_registers_print, inode->i_private);
 }
 
 static const struct file_operations ab8500_registers_fops = {
-       .open = ab8500_registers_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
+	.open = ab8500_registers_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
 };
 
 static int ab8500_bank_print(struct seq_file *s, void *p)
 {
-       return seq_printf(s, "%d\n", debug_bank);
+	return seq_printf(s, "%d\n", debug_bank);
 }
 
 static int ab8500_bank_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ab8500_bank_print, inode->i_private);
+	return single_open(file, ab8500_bank_print, inode->i_private);
 }
 
 static ssize_t ab8500_bank_write(struct file *file,
-       const char __user *user_buf,
-       size_t count, loff_t *ppos)
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
 {
-       struct device *dev = ((struct seq_file *)(file->private_data))->private;
-       char buf[32];
-       int buf_size;
-       unsigned long user_bank;
-       int err;
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	char buf[32];
+	int buf_size;
+	unsigned long user_bank;
+	int err;
 
-       /* Get userspace string and assure termination */
-       buf_size = min(count, (sizeof(buf) - 1));
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       buf[buf_size] = 0;
+	/* Get userspace string and assure termination */
+	buf_size = min(count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
 
-       err = strict_strtoul(buf, 0, &user_bank);
-       if (err)
-               return -EINVAL;
+	err = strict_strtoul(buf, 0, &user_bank);
+	if (err)
+		return -EINVAL;
 
-       if (user_bank >= AB8500_NUM_BANKS) {
-               dev_err(dev, "debugfs error input > number of banks\n");
-               return -EINVAL;
-       }
+	if (user_bank >= AB8500_NUM_BANKS) {
+		dev_err(dev, "debugfs error input > number of banks\n");
+		return -EINVAL;
+	}
 
-       debug_bank = user_bank;
+	debug_bank = user_bank;
 
-       return buf_size;
+	return buf_size;
 }
 
 static int ab8500_address_print(struct seq_file *s, void *p)
 {
-       return seq_printf(s, "0x%02X\n", debug_address);
+	return seq_printf(s, "0x%02X\n", debug_address);
 }
 
 static int ab8500_address_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ab8500_address_print, inode->i_private);
+	return single_open(file, ab8500_address_print, inode->i_private);
 }
 
 static ssize_t ab8500_address_write(struct file *file,
-       const char __user *user_buf,
-       size_t count, loff_t *ppos)
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
 {
-       struct device *dev = ((struct seq_file *)(file->private_data))->private;
-       char buf[32];
-       int buf_size;
-       unsigned long user_address;
-       int err;
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	char buf[32];
+	int buf_size;
+	unsigned long user_address;
+	int err;
 
-       /* Get userspace string and assure termination */
-       buf_size = min(count, (sizeof(buf) - 1));
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       buf[buf_size] = 0;
+	/* Get userspace string and assure termination */
+	buf_size = min(count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
 
-       err = strict_strtoul(buf, 0, &user_address);
-       if (err)
-               return -EINVAL;
-       if (user_address > 0xff) {
-               dev_err(dev, "debugfs error input > 0xff\n");
-               return -EINVAL;
-       }
-       debug_address = user_address;
-       return buf_size;
+	err = strict_strtoul(buf, 0, &user_address);
+	if (err)
+		return -EINVAL;
+	if (user_address > 0xff) {
+		dev_err(dev, "debugfs error input > 0xff\n");
+		return -EINVAL;
+	}
+	debug_address = user_address;
+	return buf_size;
 }
 
 static int ab8500_val_print(struct seq_file *s, void *p)
 {
-       struct device *dev = s->private;
-       int ret;
-       u8 regvalue;
+	struct device *dev = s->private;
+	int ret;
+	u8 regvalue;
 
-       ret = abx500_get_register_interruptible(dev,
-               (u8)debug_bank, (u8)debug_address, &regvalue);
-       if (ret < 0) {
-               dev_err(dev, "abx500_get_reg fail %d, %d\n",
-                       ret, __LINE__);
-               return -EINVAL;
-       }
-       seq_printf(s, "0x%02X\n", regvalue);
+	ret = abx500_get_register_interruptible(dev,
+		(u8)debug_bank, (u8)debug_address, &regvalue);
+	if (ret < 0) {
+		dev_err(dev, "abx500_get_reg fail %d, %d\n",
+			ret, __LINE__);
+		return -EINVAL;
+	}
+	seq_printf(s, "0x%02X\n", regvalue);
 
-       return 0;
+	return 0;
 }
 
 static int ab8500_val_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ab8500_val_print, inode->i_private);
+	return single_open(file, ab8500_val_print, inode->i_private);
 }
 
 static ssize_t ab8500_val_write(struct file *file,
-       const char __user *user_buf,
-       size_t count, loff_t *ppos)
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
 {
-       struct device *dev = ((struct seq_file *)(file->private_data))->private;
-       char buf[32];
-       int buf_size;
-       unsigned long user_val;
-       int err;
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	char buf[32];
+	int buf_size;
+	unsigned long user_val;
+	int err;
 
-       /* Get userspace string and assure termination */
-       buf_size = min(count, (sizeof(buf)-1));
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       buf[buf_size] = 0;
+	/* Get userspace string and assure termination */
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
 
-       err = strict_strtoul(buf, 0, &user_val);
-       if (err)
-               return -EINVAL;
-       if (user_val > 0xff) {
-               dev_err(dev, "debugfs error input > 0xff\n");
-               return -EINVAL;
-       }
-       err = abx500_set_register_interruptible(dev,
-               (u8)debug_bank, debug_address, (u8)user_val);
-       if (err < 0) {
-               printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
-               return -EINVAL;
-       }
+	err = strict_strtoul(buf, 0, &user_val);
+	if (err)
+		return -EINVAL;
+	if (user_val > 0xff) {
+		dev_err(dev, "debugfs error input > 0xff\n");
+		return -EINVAL;
+	}
+	err = abx500_set_register_interruptible(dev,
+		(u8)debug_bank, debug_address, (u8)user_val);
+	if (err < 0) {
+		printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
+		return -EINVAL;
+	}
 
-       return buf_size;
+	return buf_size;
 }
 
 static const struct file_operations ab8500_bank_fops = {
-       .open = ab8500_bank_open,
-       .write = ab8500_bank_write,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
+	.open = ab8500_bank_open,
+	.write = ab8500_bank_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
 };
 
 static const struct file_operations ab8500_address_fops = {
-       .open = ab8500_address_open,
-       .write = ab8500_address_write,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
+	.open = ab8500_address_open,
+	.write = ab8500_address_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
 };
 
 static const struct file_operations ab8500_val_fops = {
-       .open = ab8500_val_open,
-       .write = ab8500_val_write,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
+	.open = ab8500_val_open,
+	.write = ab8500_val_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
 };
 
 static struct dentry *ab8500_dir;
@@ -572,77 +572,77 @@
 
 static int __devinit ab8500_debug_probe(struct platform_device *plf)
 {
-       debug_bank = AB8500_MISC;
-       debug_address = AB8500_REV_REG & 0x00FF;
+	debug_bank = AB8500_MISC;
+	debug_address = AB8500_REV_REG & 0x00FF;
 
-       ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
-       if (!ab8500_dir)
-               goto exit_no_debugfs;
+	ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
+	if (!ab8500_dir)
+		goto exit_no_debugfs;
 
-       ab8500_reg_file = debugfs_create_file("all-bank-registers",
-               S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
-       if (!ab8500_reg_file)
-               goto exit_destroy_dir;
+	ab8500_reg_file = debugfs_create_file("all-bank-registers",
+		S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
+	if (!ab8500_reg_file)
+		goto exit_destroy_dir;
 
-       ab8500_bank_file = debugfs_create_file("register-bank",
-               (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
-       if (!ab8500_bank_file)
-               goto exit_destroy_reg;
+	ab8500_bank_file = debugfs_create_file("register-bank",
+		(S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
+	if (!ab8500_bank_file)
+		goto exit_destroy_reg;
 
-       ab8500_address_file = debugfs_create_file("register-address",
-               (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
-               &ab8500_address_fops);
-       if (!ab8500_address_file)
-               goto exit_destroy_bank;
+	ab8500_address_file = debugfs_create_file("register-address",
+		(S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
+		&ab8500_address_fops);
+	if (!ab8500_address_file)
+		goto exit_destroy_bank;
 
-       ab8500_val_file = debugfs_create_file("register-value",
-               (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
-       if (!ab8500_val_file)
-               goto exit_destroy_address;
+	ab8500_val_file = debugfs_create_file("register-value",
+		(S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
+	if (!ab8500_val_file)
+		goto exit_destroy_address;
 
-       return 0;
+	return 0;
 
 exit_destroy_address:
-       debugfs_remove(ab8500_address_file);
+	debugfs_remove(ab8500_address_file);
 exit_destroy_bank:
-       debugfs_remove(ab8500_bank_file);
+	debugfs_remove(ab8500_bank_file);
 exit_destroy_reg:
-       debugfs_remove(ab8500_reg_file);
+	debugfs_remove(ab8500_reg_file);
 exit_destroy_dir:
-       debugfs_remove(ab8500_dir);
+	debugfs_remove(ab8500_dir);
 exit_no_debugfs:
-       dev_err(&plf->dev, "failed to create debugfs entries.\n");
-       return -ENOMEM;
+	dev_err(&plf->dev, "failed to create debugfs entries.\n");
+	return -ENOMEM;
 }
 
 static int __devexit ab8500_debug_remove(struct platform_device *plf)
 {
-       debugfs_remove(ab8500_val_file);
-       debugfs_remove(ab8500_address_file);
-       debugfs_remove(ab8500_bank_file);
-       debugfs_remove(ab8500_reg_file);
-       debugfs_remove(ab8500_dir);
+	debugfs_remove(ab8500_val_file);
+	debugfs_remove(ab8500_address_file);
+	debugfs_remove(ab8500_bank_file);
+	debugfs_remove(ab8500_reg_file);
+	debugfs_remove(ab8500_dir);
 
-       return 0;
+	return 0;
 }
 
 static struct platform_driver ab8500_debug_driver = {
-       .driver = {
-               .name = "ab8500-debug",
-               .owner = THIS_MODULE,
-       },
-       .probe  = ab8500_debug_probe,
-       .remove = __devexit_p(ab8500_debug_remove)
+	.driver = {
+		.name = "ab8500-debug",
+		.owner = THIS_MODULE,
+	},
+	.probe  = ab8500_debug_probe,
+	.remove = __devexit_p(ab8500_debug_remove)
 };
 
 static int __init ab8500_debug_init(void)
 {
-       return platform_driver_register(&ab8500_debug_driver);
+	return platform_driver_register(&ab8500_debug_driver);
 }
 
 static void __exit ab8500_debug_exit(void)
 {
-       platform_driver_unregister(&ab8500_debug_driver);
+	platform_driver_unregister(&ab8500_debug_driver);
 }
 subsys_initcall(ab8500_debug_init);
 module_exit(ab8500_debug_exit);
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
deleted file mode 100644
index b165342..0000000
--- a/drivers/mfd/ab8500-spi.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/mfd/ab8500.h>
-
-/*
- * This funtion writes to any AB8500 registers using
- * SPI protocol &  before it writes it packs the data
- * in the below 24 bit frame format
- *
- *	 *|------------------------------------|
- *	 *| 23|22...18|17.......10|9|8|7......0|
- *	 *| r/w  bank       adr          data  |
- *	 * ------------------------------------
- *
- * This function shouldn't be called from interrupt
- * context
- */
-static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data)
-{
-	struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
-					      dev);
-	unsigned long spi_data = addr << 10 | data;
-	struct spi_transfer xfer;
-	struct spi_message msg;
-
-	ab8500->tx_buf[0] = spi_data;
-	ab8500->rx_buf[0] = 0;
-
-	xfer.tx_buf	= ab8500->tx_buf;
-	xfer.rx_buf	= NULL;
-	xfer.len	= sizeof(unsigned long);
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	return spi_sync(spi, &msg);
-}
-
-static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr)
-{
-	struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
-					      dev);
-	unsigned long spi_data = 1 << 23 | addr << 10;
-	struct spi_transfer xfer;
-	struct spi_message msg;
-	int ret;
-
-	ab8500->tx_buf[0] = spi_data;
-	ab8500->rx_buf[0] = 0;
-
-	xfer.tx_buf	= ab8500->tx_buf;
-	xfer.rx_buf	= ab8500->rx_buf;
-	xfer.len	= sizeof(unsigned long);
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	ret = spi_sync(spi, &msg);
-	if (!ret)
-		/*
-		 * Only the 8 lowermost bytes are
-		 * defined with value, the rest may
-		 * vary depending on chip/board noise.
-		 */
-		ret = ab8500->rx_buf[0] & 0xFFU;
-
-	return ret;
-}
-
-static int __devinit ab8500_spi_probe(struct spi_device *spi)
-{
-	struct ab8500 *ab8500;
-	int ret;
-
-	spi->bits_per_word = 24;
-	ret = spi_setup(spi);
-	if (ret < 0)
-		return ret;
-
-	ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
-	if (!ab8500)
-		return -ENOMEM;
-
-	ab8500->dev = &spi->dev;
-	ab8500->irq = spi->irq;
-
-	ab8500->read = ab8500_spi_read;
-	ab8500->write = ab8500_spi_write;
-
-	spi_set_drvdata(spi, ab8500);
-
-	ret = ab8500_init(ab8500);
-	if (ret)
-		kfree(ab8500);
-
-	return ret;
-}
-
-static int __devexit ab8500_spi_remove(struct spi_device *spi)
-{
-	struct ab8500 *ab8500 = spi_get_drvdata(spi);
-
-	ab8500_exit(ab8500);
-	kfree(ab8500);
-
-	return 0;
-}
-
-static struct spi_driver ab8500_spi_driver = {
-	.driver = {
-		.name = "ab8500-spi",
-		.owner = THIS_MODULE,
-	},
-	.probe	= ab8500_spi_probe,
-	.remove	= __devexit_p(ab8500_spi_remove)
-};
-
-static int __init ab8500_spi_init(void)
-{
-	return spi_register_driver(&ab8500_spi_driver);
-}
-subsys_initcall(ab8500_spi_init);
-
-static void __exit ab8500_spi_exit(void)
-{
-	spi_unregister_driver(&ab8500_spi_driver);
-}
-module_exit(ab8500_spi_exit);
-
-MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
-MODULE_DESCRIPTION("AB8500 SPI");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 7de708d..c45e630 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -57,7 +57,7 @@
 		.rate = _rate,			\
 	}
 
-struct asic3_clk asic3_clk_init[] __initdata = {
+static struct asic3_clk asic3_clk_init[] __initdata = {
 	INIT_CDEX(SPI, 0),
 	INIT_CDEX(OWM, 5000000),
 	INIT_CDEX(PWM0, 0),
@@ -102,7 +102,7 @@
 			(reg >> asic->bus_shift));
 }
 
-void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
+static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
 {
 	unsigned long flags;
 	u32 val;
@@ -143,9 +143,9 @@
 	unsigned long flags;
 	struct asic3 *asic;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
-	asic = desc->handler_data;
+	asic = get_irq_data(irq);
 
 	for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
 		u32 status;
@@ -226,14 +226,14 @@
 	return (irq - asic->irq_base) & 0xf;
 }
 
-static void asic3_mask_gpio_irq(unsigned int irq)
+static void asic3_mask_gpio_irq(struct irq_data *data)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	u32 val, bank, index;
 	unsigned long flags;
 
-	bank = asic3_irq_to_bank(asic, irq);
-	index = asic3_irq_to_index(asic, irq);
+	bank = asic3_irq_to_bank(asic, data->irq);
+	index = asic3_irq_to_index(asic, data->irq);
 
 	spin_lock_irqsave(&asic->lock, flags);
 	val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
@@ -242,9 +242,9 @@
 	spin_unlock_irqrestore(&asic->lock, flags);
 }
 
-static void asic3_mask_irq(unsigned int irq)
+static void asic3_mask_irq(struct irq_data *data)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	int regval;
 	unsigned long flags;
 
@@ -254,7 +254,7 @@
 				     ASIC3_INTR_INT_MASK);
 
 	regval &= ~(ASIC3_INTMASK_MASK0 <<
-		    (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+		    (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
 
 	asic3_write_register(asic,
 			     ASIC3_INTR_BASE +
@@ -263,14 +263,14 @@
 	spin_unlock_irqrestore(&asic->lock, flags);
 }
 
-static void asic3_unmask_gpio_irq(unsigned int irq)
+static void asic3_unmask_gpio_irq(struct irq_data *data)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	u32 val, bank, index;
 	unsigned long flags;
 
-	bank = asic3_irq_to_bank(asic, irq);
-	index = asic3_irq_to_index(asic, irq);
+	bank = asic3_irq_to_bank(asic, data->irq);
+	index = asic3_irq_to_index(asic, data->irq);
 
 	spin_lock_irqsave(&asic->lock, flags);
 	val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
@@ -279,9 +279,9 @@
 	spin_unlock_irqrestore(&asic->lock, flags);
 }
 
-static void asic3_unmask_irq(unsigned int irq)
+static void asic3_unmask_irq(struct irq_data *data)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	int regval;
 	unsigned long flags;
 
@@ -291,7 +291,7 @@
 				     ASIC3_INTR_INT_MASK);
 
 	regval |= (ASIC3_INTMASK_MASK0 <<
-		   (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+		   (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
 
 	asic3_write_register(asic,
 			     ASIC3_INTR_BASE +
@@ -300,15 +300,15 @@
 	spin_unlock_irqrestore(&asic->lock, flags);
 }
 
-static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
+static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	u32 bank, index;
 	u16 trigger, level, edge, bit;
 	unsigned long flags;
 
-	bank = asic3_irq_to_bank(asic, irq);
-	index = asic3_irq_to_index(asic, irq);
+	bank = asic3_irq_to_bank(asic, data->irq);
+	index = asic3_irq_to_index(asic, data->irq);
 	bit = 1<<index;
 
 	spin_lock_irqsave(&asic->lock, flags);
@@ -318,7 +318,7 @@
 				   bank + ASIC3_GPIO_EDGE_TRIGGER);
 	trigger = asic3_read_register(asic,
 				      bank + ASIC3_GPIO_TRIGGER_TYPE);
-	asic->irq_bothedge[(irq - asic->irq_base) >> 4] &= ~bit;
+	asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] &= ~bit;
 
 	if (type == IRQ_TYPE_EDGE_RISING) {
 		trigger |= bit;
@@ -328,11 +328,11 @@
 		edge &= ~bit;
 	} else if (type == IRQ_TYPE_EDGE_BOTH) {
 		trigger |= bit;
-		if (asic3_gpio_get(&asic->gpio, irq - asic->irq_base))
+		if (asic3_gpio_get(&asic->gpio, data->irq - asic->irq_base))
 			edge &= ~bit;
 		else
 			edge |= bit;
-		asic->irq_bothedge[(irq - asic->irq_base) >> 4] |= bit;
+		asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] |= bit;
 	} else if (type == IRQ_TYPE_LEVEL_LOW) {
 		trigger &= ~bit;
 		level &= ~bit;
@@ -359,17 +359,17 @@
 
 static struct irq_chip asic3_gpio_irq_chip = {
 	.name		= "ASIC3-GPIO",
-	.ack		= asic3_mask_gpio_irq,
-	.mask		= asic3_mask_gpio_irq,
-	.unmask		= asic3_unmask_gpio_irq,
-	.set_type	= asic3_gpio_irq_type,
+	.irq_ack	= asic3_mask_gpio_irq,
+	.irq_mask	= asic3_mask_gpio_irq,
+	.irq_unmask	= asic3_unmask_gpio_irq,
+	.irq_set_type	= asic3_gpio_irq_type,
 };
 
 static struct irq_chip asic3_irq_chip = {
 	.name		= "ASIC3",
-	.ack		= asic3_mask_irq,
-	.mask		= asic3_mask_irq,
-	.unmask		= asic3_unmask_irq,
+	.irq_ack	= asic3_mask_irq,
+	.irq_mask	= asic3_mask_irq,
+	.irq_unmask	= asic3_unmask_irq,
 };
 
 static int __init asic3_irq_probe(struct platform_device *pdev)
@@ -635,7 +635,7 @@
 	},
 	{
 		.start = ASIC3_IRQ_OWM,
-		.start = ASIC3_IRQ_OWM,
+		.end   = ASIC3_IRQ_OWM,
 		.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
 	},
 };
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
new file mode 100644
index 0000000..59ca6f1
--- /dev/null
+++ b/drivers/mfd/cs5535-mfd.c
@@ -0,0 +1,151 @@
+/*
+ * cs5535-mfd.c - core MFD driver for CS5535/CS5536 southbridges
+ *
+ * The CS5535 and CS5536 has an ISA bridge on the PCI bus that is
+ * used for accessing GPIOs, MFGPTs, ACPI, etc.  Each subdevice has
+ * an IO range that's specified in a single BAR.  The BAR order is
+ * hardcoded in the CS553x specifications.
+ *
+ * Copyright (c) 2010  Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define DRV_NAME "cs5535-mfd"
+
+enum cs5535_mfd_bars {
+	SMB_BAR = 0,
+	GPIO_BAR = 1,
+	MFGPT_BAR = 2,
+	PMS_BAR = 4,
+	ACPI_BAR = 5,
+	NR_BARS,
+};
+
+static __devinitdata struct resource cs5535_mfd_resources[NR_BARS];
+
+static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
+	{
+		.id = SMB_BAR,
+		.name = "cs5535-smb",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[SMB_BAR],
+	},
+	{
+		.id = GPIO_BAR,
+		.name = "cs5535-gpio",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[GPIO_BAR],
+	},
+	{
+		.id = MFGPT_BAR,
+		.name = "cs5535-mfgpt",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[MFGPT_BAR],
+	},
+	{
+		.id = PMS_BAR,
+		.name = "cs5535-pms",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[PMS_BAR],
+	},
+	{
+		.id = ACPI_BAR,
+		.name = "cs5535-acpi",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[ACPI_BAR],
+	},
+};
+
+static int __devinit cs5535_mfd_probe(struct pci_dev *pdev,
+		const struct pci_device_id *id)
+{
+	int err, i;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	/* fill in IO range for each cell; subdrivers handle the region */
+	for (i = 0; i < ARRAY_SIZE(cs5535_mfd_cells); i++) {
+		int bar = cs5535_mfd_cells[i].id;
+		struct resource *r = &cs5535_mfd_resources[bar];
+
+		r->flags = IORESOURCE_IO;
+		r->start = pci_resource_start(pdev, bar);
+		r->end = pci_resource_end(pdev, bar);
+
+		/* id is used for temporarily storing BAR; unset it now */
+		cs5535_mfd_cells[i].id = 0;
+	}
+
+	err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells,
+			ARRAY_SIZE(cs5535_mfd_cells), NULL, 0);
+	if (err) {
+		dev_err(&pdev->dev, "MFD add devices failed: %d\n", err);
+		goto err_disable;
+	}
+
+	dev_info(&pdev->dev, "%zu devices registered.\n",
+			ARRAY_SIZE(cs5535_mfd_cells));
+
+	return 0;
+
+err_disable:
+	pci_disable_device(pdev);
+	return err;
+}
+
+static void __devexit cs5535_mfd_remove(struct pci_dev *pdev)
+{
+	mfd_remove_devices(&pdev->dev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_device_id cs5535_mfd_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl);
+
+static struct pci_driver cs5535_mfd_drv = {
+	.name = DRV_NAME,
+	.id_table = cs5535_mfd_pci_tbl,
+	.probe = cs5535_mfd_probe,
+	.remove = __devexit_p(cs5535_mfd_remove),
+};
+
+static int __init cs5535_mfd_init(void)
+{
+	return pci_register_driver(&cs5535_mfd_drv);
+}
+
+static void __exit cs5535_mfd_exit(void)
+{
+	pci_unregister_driver(&cs5535_mfd_drv);
+}
+
+module_init(cs5535_mfd_init);
+module_exit(cs5535_mfd_exit);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
+MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 33c923d..fdd8a1b 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -118,12 +118,12 @@
 
 	/* Voice codec interface client */
 	cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
-	cell->name = "davinci_vcif";
+	cell->name = "davinci-vcif";
 	cell->driver_data = davinci_vc;
 
 	/* Voice codec CQ93VC client */
 	cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
-	cell->name = "cq93vc";
+	cell->name = "cq93vc-codec";
 	cell->driver_data = davinci_vc;
 
 	ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index c2b698d..9e2d8dd 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -144,26 +144,26 @@
 }
 EXPORT_SYMBOL_GPL(pcap_to_irq);
 
-static void pcap_mask_irq(unsigned int irq)
+static void pcap_mask_irq(struct irq_data *d)
 {
-	struct pcap_chip *pcap = get_irq_chip_data(irq);
+	struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
 
-	pcap->msr |= 1 << irq_to_pcap(pcap, irq);
+	pcap->msr |= 1 << irq_to_pcap(pcap, d->irq);
 	queue_work(pcap->workqueue, &pcap->msr_work);
 }
 
-static void pcap_unmask_irq(unsigned int irq)
+static void pcap_unmask_irq(struct irq_data *d)
 {
-	struct pcap_chip *pcap = get_irq_chip_data(irq);
+	struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
 
-	pcap->msr &= ~(1 << irq_to_pcap(pcap, irq));
+	pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq));
 	queue_work(pcap->workqueue, &pcap->msr_work);
 }
 
 static struct irq_chip pcap_irq_chip = {
-	.name	= "pcap",
-	.mask	= pcap_mask_irq,
-	.unmask	= pcap_unmask_irq,
+	.name		= "pcap",
+	.irq_mask	= pcap_mask_irq,
+	.irq_unmask	= pcap_unmask_irq,
 };
 
 static void pcap_msr_work(struct work_struct *work)
@@ -199,8 +199,7 @@
 			if (service & 1) {
 				struct irq_desc *desc = irq_to_desc(irq);
 
-				if (WARN(!desc, KERN_WARNING
-						"Invalid PCAP IRQ %d\n", irq))
+				if (WARN(!desc, "Invalid PCAP IRQ %d\n", irq))
 					break;
 
 				if (desc->status & IRQ_DISABLED)
@@ -218,7 +217,7 @@
 {
 	struct pcap_chip *pcap = get_irq_data(irq);
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	queue_work(pcap->workqueue, &pcap->isr_work);
 	return;
 }
@@ -282,7 +281,7 @@
 	mutex_lock(&pcap->adc_mutex);
 	req = pcap->adc_queue[pcap->adc_head];
 
-	if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) {
+	if (WARN(!req, "adc irq without pending request\n")) {
 		mutex_unlock(&pcap->adc_mutex);
 		return IRQ_HANDLED;
 	}
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index d3e74f8..d00b6d1 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -70,31 +70,32 @@
 			ei->ack_write, ei->ack_register << ei->bus_shift);
 }
 
-static void egpio_ack(unsigned int irq)
+static void egpio_ack(struct irq_data *data)
 {
 }
 
 /* There does not appear to be a way to proactively mask interrupts
  * on the egpio chip itself.  So, we simply ignore interrupts that
  * aren't desired. */
-static void egpio_mask(unsigned int irq)
+static void egpio_mask(struct irq_data *data)
 {
-	struct egpio_info *ei = get_irq_chip_data(irq);
-	ei->irqs_enabled &= ~(1 << (irq - ei->irq_start));
-	pr_debug("EGPIO mask %d %04x\n", irq, ei->irqs_enabled);
+	struct egpio_info *ei = irq_data_get_irq_chip_data(data);
+	ei->irqs_enabled &= ~(1 << (data->irq - ei->irq_start));
+	pr_debug("EGPIO mask %d %04x\n", data->irq, ei->irqs_enabled);
 }
-static void egpio_unmask(unsigned int irq)
+
+static void egpio_unmask(struct irq_data *data)
 {
-	struct egpio_info *ei = get_irq_chip_data(irq);
-	ei->irqs_enabled |= 1 << (irq - ei->irq_start);
-	pr_debug("EGPIO unmask %d %04x\n", irq, ei->irqs_enabled);
+	struct egpio_info *ei = irq_data_get_irq_chip_data(data);
+	ei->irqs_enabled |= 1 << (data->irq - ei->irq_start);
+	pr_debug("EGPIO unmask %d %04x\n", data->irq, ei->irqs_enabled);
 }
 
 static struct irq_chip egpio_muxed_chip = {
-	.name   = "htc-egpio",
-	.ack	= egpio_ack,
-	.mask   = egpio_mask,
-	.unmask = egpio_unmask,
+	.name		= "htc-egpio",
+	.irq_ack	= egpio_ack,
+	.irq_mask	= egpio_mask,
+	.irq_unmask	= egpio_unmask,
 };
 
 static void egpio_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 594c9a8..296ad15 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -82,25 +82,25 @@
 /* There does not appear to be a way to proactively mask interrupts
  * on the htcpld chip itself.  So, we simply ignore interrupts that
  * aren't desired. */
-static void htcpld_mask(unsigned int irq)
+static void htcpld_mask(struct irq_data *data)
 {
-	struct htcpld_chip *chip = get_irq_chip_data(irq);
-	chip->irqs_enabled &= ~(1 << (irq - chip->irq_start));
-	pr_debug("HTCPLD mask %d %04x\n", irq, chip->irqs_enabled);
+	struct htcpld_chip *chip = irq_data_get_irq_chip_data(data);
+	chip->irqs_enabled &= ~(1 << (data->irq - chip->irq_start));
+	pr_debug("HTCPLD mask %d %04x\n", data->irq, chip->irqs_enabled);
 }
-static void htcpld_unmask(unsigned int irq)
+static void htcpld_unmask(struct irq_data *data)
 {
-	struct htcpld_chip *chip = get_irq_chip_data(irq);
-	chip->irqs_enabled |= 1 << (irq - chip->irq_start);
-	pr_debug("HTCPLD unmask %d %04x\n", irq, chip->irqs_enabled);
+	struct htcpld_chip *chip = irq_data_get_irq_chip_data(data);
+	chip->irqs_enabled |= 1 << (data->irq - chip->irq_start);
+	pr_debug("HTCPLD unmask %d %04x\n", data->irq, chip->irqs_enabled);
 }
 
-static int htcpld_set_type(unsigned int irq, unsigned int flags)
+static int htcpld_set_type(struct irq_data *data, unsigned int flags)
 {
-	struct irq_desc *d = irq_to_desc(irq);
+	struct irq_desc *d = irq_to_desc(data->irq);
 
 	if (!d) {
-		pr_err("HTCPLD invalid IRQ: %d\n", irq);
+		pr_err("HTCPLD invalid IRQ: %d\n", data->irq);
 		return -EINVAL;
 	}
 
@@ -118,10 +118,10 @@
 }
 
 static struct irq_chip htcpld_muxed_chip = {
-	.name     = "htcpld",
-	.mask     = htcpld_mask,
-	.unmask   = htcpld_unmask,
-	.set_type = htcpld_set_type,
+	.name         = "htcpld",
+	.irq_mask     = htcpld_mask,
+	.irq_unmask   = htcpld_unmask,
+	.irq_set_type = htcpld_set_type,
 };
 
 /* To properly dispatch IRQ events, we need to read from the
@@ -235,7 +235,7 @@
  * and that work is scheduled in the set routine.  The kernel can then run
  * the I2C functions, which will sleep, in process context.
  */
-void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
+static void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
 {
 	struct i2c_client *client;
 	struct htcpld_chip *chip_data;
@@ -259,7 +259,7 @@
 	schedule_work(&(chip_data->set_val_work));
 }
 
-void htcpld_chip_set_ni(struct work_struct *work)
+static void htcpld_chip_set_ni(struct work_struct *work)
 {
 	struct htcpld_chip *chip_data;
 	struct i2c_client *client;
@@ -269,7 +269,7 @@
 	i2c_smbus_read_byte_data(client, chip_data->cache_out);
 }
 
-int htcpld_chip_get(struct gpio_chip *chip, unsigned offset)
+static int htcpld_chip_get(struct gpio_chip *chip, unsigned offset)
 {
 	struct htcpld_chip *chip_data;
 	int val = 0;
@@ -316,7 +316,7 @@
 	return (offset < chip->ngpio) ? 0 : -EINVAL;
 }
 
-int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
+static int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
 {
 	struct htcpld_chip *chip_data;
 
@@ -328,7 +328,7 @@
 		return -EINVAL;
 }
 
-void htcpld_chip_reset(struct i2c_client *client)
+static void htcpld_chip_reset(struct i2c_client *client)
 {
 	struct htcpld_chip *chip_data = i2c_get_clientdata(client);
 	if (!chip_data)
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 9dd1b33..0cc5979 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -84,31 +84,30 @@
 	spin_unlock_irqrestore(&adc->lock, flags);
 }
 
-static void jz4740_adc_irq_mask(unsigned int irq)
+static void jz4740_adc_irq_mask(struct irq_data *data)
 {
-	struct jz4740_adc *adc = get_irq_chip_data(irq);
-	jz4740_adc_irq_set_masked(adc, irq, true);
+	struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
+	jz4740_adc_irq_set_masked(adc, data->irq, true);
 }
 
-static void jz4740_adc_irq_unmask(unsigned int irq)
+static void jz4740_adc_irq_unmask(struct irq_data *data)
 {
-	struct jz4740_adc *adc = get_irq_chip_data(irq);
-	jz4740_adc_irq_set_masked(adc, irq, false);
+	struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
+	jz4740_adc_irq_set_masked(adc, data->irq, false);
 }
 
-static void jz4740_adc_irq_ack(unsigned int irq)
+static void jz4740_adc_irq_ack(struct irq_data *data)
 {
-	struct jz4740_adc *adc = get_irq_chip_data(irq);
-
-	irq -= adc->irq_base;
+	struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
+	unsigned int irq = data->irq - adc->irq_base;
 	writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS);
 }
 
 static struct irq_chip jz4740_adc_irq_chip = {
 	.name = "jz4740-adc",
-	.mask = jz4740_adc_irq_mask,
-	.unmask = jz4740_adc_irq_unmask,
-	.ack = jz4740_adc_irq_ack,
+	.irq_mask = jz4740_adc_irq_mask,
+	.irq_unmask = jz4740_adc_irq_unmask,
+	.irq_ack = jz4740_adc_irq_ack,
 };
 
 static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 44695f5..0e998dc 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -407,16 +407,16 @@
 	return IRQ_HANDLED;
 }
 
-static void max8925_irq_lock(unsigned int irq)
+static void max8925_irq_lock(struct irq_data *data)
 {
-	struct max8925_chip *chip = get_irq_chip_data(irq);
+	struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&chip->irq_lock);
 }
 
-static void max8925_irq_sync_unlock(unsigned int irq)
+static void max8925_irq_sync_unlock(struct irq_data *data)
 {
-	struct max8925_chip *chip = get_irq_chip_data(irq);
+	struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
 	struct max8925_irq_data *irq_data;
 	static unsigned char cache_chg[2] = {0xff, 0xff};
 	static unsigned char cache_on[2] = {0xff, 0xff};
@@ -492,25 +492,25 @@
 	mutex_unlock(&chip->irq_lock);
 }
 
-static void max8925_irq_enable(unsigned int irq)
+static void max8925_irq_enable(struct irq_data *data)
 {
-	struct max8925_chip *chip = get_irq_chip_data(irq);
-	max8925_irqs[irq - chip->irq_base].enable
-		= max8925_irqs[irq - chip->irq_base].offs;
+	struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
+	max8925_irqs[data->irq - chip->irq_base].enable
+		= max8925_irqs[data->irq - chip->irq_base].offs;
 }
 
-static void max8925_irq_disable(unsigned int irq)
+static void max8925_irq_disable(struct irq_data *data)
 {
-	struct max8925_chip *chip = get_irq_chip_data(irq);
-	max8925_irqs[irq - chip->irq_base].enable = 0;
+	struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
+	max8925_irqs[data->irq - chip->irq_base].enable = 0;
 }
 
 static struct irq_chip max8925_irq_chip = {
 	.name		= "max8925",
-	.bus_lock	= max8925_irq_lock,
-	.bus_sync_unlock = max8925_irq_sync_unlock,
-	.enable		= max8925_irq_enable,
-	.disable	= max8925_irq_disable,
+	.irq_bus_lock	= max8925_irq_lock,
+	.irq_bus_sync_unlock = max8925_irq_sync_unlock,
+	.irq_enable	= max8925_irq_enable,
+	.irq_disable	= max8925_irq_disable,
 };
 
 static int max8925_irq_init(struct max8925_chip *chip, int irq,
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 45bfe77..3903e1f 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -102,16 +102,16 @@
 	return &max8998_irqs[irq - max8998->irq_base];
 }
 
-static void max8998_irq_lock(unsigned int irq)
+static void max8998_irq_lock(struct irq_data *data)
 {
-	struct max8998_dev *max8998 = get_irq_chip_data(irq);
+	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&max8998->irqlock);
 }
 
-static void max8998_irq_sync_unlock(unsigned int irq)
+static void max8998_irq_sync_unlock(struct irq_data *data)
 {
-	struct max8998_dev *max8998 = get_irq_chip_data(irq);
+	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(max8998->irq_masks_cur); i++) {
@@ -129,28 +129,30 @@
 	mutex_unlock(&max8998->irqlock);
 }
 
-static void max8998_irq_unmask(unsigned int irq)
+static void max8998_irq_unmask(struct irq_data *data)
 {
-	struct max8998_dev *max8998 = get_irq_chip_data(irq);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq);
+	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
+							       data->irq);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
 
-static void max8998_irq_mask(unsigned int irq)
+static void max8998_irq_mask(struct irq_data *data)
 {
-	struct max8998_dev *max8998 = get_irq_chip_data(irq);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq);
+	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
+							       data->irq);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
 
 static struct irq_chip max8998_irq_chip = {
 	.name = "max8998",
-	.bus_lock = max8998_irq_lock,
-	.bus_sync_unlock = max8998_irq_sync_unlock,
-	.mask = max8998_irq_mask,
-	.unmask = max8998_irq_unmask,
+	.irq_bus_lock = max8998_irq_lock,
+	.irq_bus_sync_unlock = max8998_irq_sync_unlock,
+	.irq_mask = max8998_irq_mask,
+	.irq_unmask = max8998_irq_unmask,
 };
 
 static irqreturn_t max8998_irq_thread(int irq, void *data)
@@ -181,6 +183,13 @@
 	return IRQ_HANDLED;
 }
 
+int max8998_irq_resume(struct max8998_dev *max8998)
+{
+	if (max8998->irq && max8998->irq_base)
+		max8998_irq_thread(max8998->irq_base, max8998);
+	return 0;
+}
+
 int max8998_irq_init(struct max8998_dev *max8998)
 {
 	int i;
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index bb9977b..bbfe867 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -25,6 +25,8 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
 #include <linux/mutex.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/max8998.h>
@@ -40,6 +42,14 @@
 	},
 };
 
+static struct mfd_cell lp3974_devs[] = {
+	{
+		.name = "lp3974-pmic",
+	}, {
+		.name = "lp3974-rtc",
+	},
+};
+
 int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
 {
 	struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
@@ -135,6 +145,7 @@
 	if (pdata) {
 		max8998->ono = pdata->ono;
 		max8998->irq_base = pdata->irq_base;
+		max8998->wakeup = pdata->wakeup;
 	}
 	mutex_init(&max8998->iolock);
 
@@ -143,9 +154,23 @@
 
 	max8998_irq_init(max8998);
 
-	ret = mfd_add_devices(max8998->dev, -1,
-			      max8998_devs, ARRAY_SIZE(max8998_devs),
-			      NULL, 0);
+	pm_runtime_set_active(max8998->dev);
+
+	switch (id->driver_data) {
+	case TYPE_LP3974:
+		ret = mfd_add_devices(max8998->dev, -1,
+				lp3974_devs, ARRAY_SIZE(lp3974_devs),
+				NULL, 0);
+		break;
+	case TYPE_MAX8998:
+		ret = mfd_add_devices(max8998->dev, -1,
+				max8998_devs, ARRAY_SIZE(max8998_devs),
+				NULL, 0);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
 	if (ret < 0)
 		goto err;
 
@@ -178,10 +203,113 @@
 };
 MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
 
+static int max8998_suspend(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
+
+	if (max8998->wakeup)
+		set_irq_wake(max8998->irq, 1);
+	return 0;
+}
+
+static int max8998_resume(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
+
+	if (max8998->wakeup)
+		set_irq_wake(max8998->irq, 0);
+	/*
+	 * In LP3974, if IRQ registers are not "read & clear"
+	 * when it's set during sleep, the interrupt becomes
+	 * disabled.
+	 */
+	return max8998_irq_resume(i2c_get_clientdata(i2c));
+}
+
+struct max8998_reg_dump {
+	u8	addr;
+	u8	val;
+};
+#define SAVE_ITEM(x)	{ .addr = (x), .val = 0x0, }
+struct max8998_reg_dump max8998_dump[] = {
+	SAVE_ITEM(MAX8998_REG_IRQM1),
+	SAVE_ITEM(MAX8998_REG_IRQM2),
+	SAVE_ITEM(MAX8998_REG_IRQM3),
+	SAVE_ITEM(MAX8998_REG_IRQM4),
+	SAVE_ITEM(MAX8998_REG_STATUSM1),
+	SAVE_ITEM(MAX8998_REG_STATUSM2),
+	SAVE_ITEM(MAX8998_REG_CHGR1),
+	SAVE_ITEM(MAX8998_REG_CHGR2),
+	SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1),
+	SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1),
+	SAVE_ITEM(MAX8998_REG_BUCK_ACTIVE_DISCHARGE3),
+	SAVE_ITEM(MAX8998_REG_ONOFF1),
+	SAVE_ITEM(MAX8998_REG_ONOFF2),
+	SAVE_ITEM(MAX8998_REG_ONOFF3),
+	SAVE_ITEM(MAX8998_REG_ONOFF4),
+	SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE1),
+	SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE2),
+	SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE3),
+	SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE4),
+	SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE1),
+	SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE2),
+	SAVE_ITEM(MAX8998_REG_LDO2_LDO3),
+	SAVE_ITEM(MAX8998_REG_LDO4),
+	SAVE_ITEM(MAX8998_REG_LDO5),
+	SAVE_ITEM(MAX8998_REG_LDO6),
+	SAVE_ITEM(MAX8998_REG_LDO7),
+	SAVE_ITEM(MAX8998_REG_LDO8_LDO9),
+	SAVE_ITEM(MAX8998_REG_LDO10_LDO11),
+	SAVE_ITEM(MAX8998_REG_LDO12),
+	SAVE_ITEM(MAX8998_REG_LDO13),
+	SAVE_ITEM(MAX8998_REG_LDO14),
+	SAVE_ITEM(MAX8998_REG_LDO15),
+	SAVE_ITEM(MAX8998_REG_LDO16),
+	SAVE_ITEM(MAX8998_REG_LDO17),
+	SAVE_ITEM(MAX8998_REG_BKCHR),
+	SAVE_ITEM(MAX8998_REG_LBCNFG1),
+	SAVE_ITEM(MAX8998_REG_LBCNFG2),
+};
+/* Save registers before hibernation */
+static int max8998_freeze(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(max8998_dump); i++)
+		max8998_read_reg(i2c, max8998_dump[i].addr,
+				&max8998_dump[i].val);
+
+	return 0;
+}
+
+/* Restore registers after hibernation */
+static int max8998_restore(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(max8998_dump); i++)
+		max8998_write_reg(i2c, max8998_dump[i].addr,
+				max8998_dump[i].val);
+
+	return 0;
+}
+
+const struct dev_pm_ops max8998_pm = {
+	.suspend = max8998_suspend,
+	.resume = max8998_resume,
+	.freeze = max8998_freeze,
+	.restore = max8998_restore,
+};
+
 static struct i2c_driver max8998_i2c_driver = {
 	.driver = {
 		   .name = "max8998",
 		   .owner = THIS_MODULE,
+		   .pm = &max8998_pm,
 	},
 	.probe = max8998_i2c_probe,
 	.remove = max8998_i2c_remove,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index a2ac2ed..b9fcaf0 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -749,7 +749,7 @@
 	if (ret) {
 err_mask:
 err_revision:
-		mutex_unlock(&mc13xxx->lock);
+		mc13xxx_unlock(mc13xxx);
 		dev_set_drvdata(&spi->dev, NULL);
 		kfree(mc13xxx);
 		return ret;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index ec99f68..d83ad0f 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -15,6 +15,7 @@
 #include <linux/platform_device.h>
 #include <linux/acpi.h>
 #include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
 static int mfd_add_device(struct device *parent, int id,
@@ -82,6 +83,9 @@
 	if (ret)
 		goto fail_res;
 
+	if (cell->pm_runtime_no_callbacks)
+		pm_runtime_no_callbacks(&pdev->dev);
+
 	kfree(res);
 
 	return 0;
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index f1714f9..0a7df44 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -131,11 +131,17 @@
 	 */
 	mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
 
+	/*
+	 * All SDHI blocks support SDIO IRQ signalling.
+	 */
+	mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+
 	if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
 		priv->param_tx.slave_id = p->dma_slave_tx;
 		priv->param_rx.slave_id = p->dma_slave_rx;
 		priv->dma_priv.chan_priv_tx = &priv->param_tx;
 		priv->dma_priv.chan_priv_rx = &priv->param_rx;
+		priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
 		mmc_data->dma = &priv->dma_priv;
 	}
 
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index bc9275c..5de3a76 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -26,7 +26,7 @@
 #include <linux/sm501-regs.h>
 #include <linux/serial_8250.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 
 struct sm501_device {
 	struct list_head		list;
@@ -745,11 +745,8 @@
 	int ret;
 
 	for (ptr = 0; ptr < pdev->num_resources; ptr++) {
-		printk(KERN_DEBUG "%s[%d] flags %08lx: %08llx..%08llx\n",
-		       pdev->name, ptr,
-		       pdev->resource[ptr].flags,
-		       (unsigned long long)pdev->resource[ptr].start,
-		       (unsigned long long)pdev->resource[ptr].end);
+		printk(KERN_DEBUG "%s[%d] %pR\n",
+		       pdev->name, ptr, &pdev->resource[ptr]);
 	}
 
 	ret = platform_device_register(pdev);
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index b11487f..3e5732b 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -699,16 +699,16 @@
 	return IRQ_HANDLED;
 }
 
-static void stmpe_irq_lock(unsigned int irq)
+static void stmpe_irq_lock(struct irq_data *data)
 {
-	struct stmpe *stmpe = get_irq_chip_data(irq);
+	struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&stmpe->irq_lock);
 }
 
-static void stmpe_irq_sync_unlock(unsigned int irq)
+static void stmpe_irq_sync_unlock(struct irq_data *data)
 {
-	struct stmpe *stmpe = get_irq_chip_data(irq);
+	struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
 	struct stmpe_variant_info *variant = stmpe->variant;
 	int num = DIV_ROUND_UP(variant->num_irqs, 8);
 	int i;
@@ -727,20 +727,20 @@
 	mutex_unlock(&stmpe->irq_lock);
 }
 
-static void stmpe_irq_mask(unsigned int irq)
+static void stmpe_irq_mask(struct irq_data *data)
 {
-	struct stmpe *stmpe = get_irq_chip_data(irq);
-	int offset = irq - stmpe->irq_base;
+	struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
+	int offset = data->irq - stmpe->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	stmpe->ier[regoffset] &= ~mask;
 }
 
-static void stmpe_irq_unmask(unsigned int irq)
+static void stmpe_irq_unmask(struct irq_data *data)
 {
-	struct stmpe *stmpe = get_irq_chip_data(irq);
-	int offset = irq - stmpe->irq_base;
+	struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
+	int offset = data->irq - stmpe->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -749,10 +749,10 @@
 
 static struct irq_chip stmpe_irq_chip = {
 	.name			= "stmpe",
-	.bus_lock		= stmpe_irq_lock,
-	.bus_sync_unlock	= stmpe_irq_sync_unlock,
-	.mask			= stmpe_irq_mask,
-	.unmask			= stmpe_irq_unmask,
+	.irq_bus_lock		= stmpe_irq_lock,
+	.irq_bus_sync_unlock	= stmpe_irq_sync_unlock,
+	.irq_mask		= stmpe_irq_mask,
+	.irq_unmask		= stmpe_irq_unmask,
 };
 
 static int __devinit stmpe_irq_init(struct stmpe *stmpe)
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 006c121..9caeb4a 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -199,37 +199,37 @@
 				generic_handle_irq(irq_base + i);
 }
 
-static void t7l66xb_irq_mask(unsigned int irq)
+static void t7l66xb_irq_mask(struct irq_data *data)
 {
-	struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
+	struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
 	unsigned long			flags;
 	u8 imr;
 
 	spin_lock_irqsave(&t7l66xb->lock, flags);
 	imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
-	imr |= 1 << (irq - t7l66xb->irq_base);
+	imr |= 1 << (data->irq - t7l66xb->irq_base);
 	tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
 	spin_unlock_irqrestore(&t7l66xb->lock, flags);
 }
 
-static void t7l66xb_irq_unmask(unsigned int irq)
+static void t7l66xb_irq_unmask(struct irq_data *data)
 {
-	struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
+	struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 	u8 imr;
 
 	spin_lock_irqsave(&t7l66xb->lock, flags);
 	imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
-	imr &= ~(1 << (irq - t7l66xb->irq_base));
+	imr &= ~(1 << (data->irq - t7l66xb->irq_base));
 	tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
 	spin_unlock_irqrestore(&t7l66xb->lock, flags);
 }
 
 static struct irq_chip t7l66xb_chip = {
-	.name	= "t7l66xb",
-	.ack	= t7l66xb_irq_mask,
-	.mask	= t7l66xb_irq_mask,
-	.unmask	= t7l66xb_irq_unmask,
+	.name		= "t7l66xb",
+	.irq_ack	= t7l66xb_irq_mask,
+	.irq_mask	= t7l66xb_irq_mask,
+	.irq_unmask	= t7l66xb_irq_unmask,
 };
 
 /*--------------------------------------------------------------------------*/
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 1ea80d8a..9a23863 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -527,41 +527,41 @@
 		}
 }
 
-static void tc6393xb_irq_ack(unsigned int irq)
+static void tc6393xb_irq_ack(struct irq_data *data)
 {
 }
 
-static void tc6393xb_irq_mask(unsigned int irq)
+static void tc6393xb_irq_mask(struct irq_data *data)
 {
-	struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+	struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 	u8 imr;
 
 	spin_lock_irqsave(&tc6393xb->lock, flags);
 	imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
-	imr |= 1 << (irq - tc6393xb->irq_base);
+	imr |= 1 << (data->irq - tc6393xb->irq_base);
 	tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
 	spin_unlock_irqrestore(&tc6393xb->lock, flags);
 }
 
-static void tc6393xb_irq_unmask(unsigned int irq)
+static void tc6393xb_irq_unmask(struct irq_data *data)
 {
-	struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+	struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 	u8 imr;
 
 	spin_lock_irqsave(&tc6393xb->lock, flags);
 	imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
-	imr &= ~(1 << (irq - tc6393xb->irq_base));
+	imr &= ~(1 << (data->irq - tc6393xb->irq_base));
 	tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
 	spin_unlock_irqrestore(&tc6393xb->lock, flags);
 }
 
 static struct irq_chip tc6393xb_chip = {
-	.name	= "tc6393xb",
-	.ack	= tc6393xb_irq_ack,
-	.mask	= tc6393xb_irq_mask,
-	.unmask	= tc6393xb_irq_unmask,
+	.name		= "tc6393xb",
+	.irq_ack	= tc6393xb_irq_ack,
+	.irq_mask	= tc6393xb_irq_mask,
+	.irq_unmask	= tc6393xb_irq_unmask,
 };
 
 static void tc6393xb_attach_irq(struct platform_device *dev)
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 90187fe..93d5fdf 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -34,7 +34,7 @@
 
 #include <linux/i2c/tps65010.h>
 
-#include <asm/gpio.h>
+#include <linux/gpio.h>
 
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b4931ab..e9018d1 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -46,8 +46,6 @@
 
 /* device id */
 #define TPS6586X_VERSIONCRC	0xcd
-#define TPS658621A_VERSIONCRC	0x15
-#define TPS658621C_VERSIONCRC	0x2c
 
 struct tps6586x_irq_data {
 	u8	mask_reg;
@@ -152,12 +150,12 @@
 static inline int __tps6586x_writes(struct i2c_client *client, int reg,
 				  int len, uint8_t *val)
 {
-	int ret;
+	int ret, i;
 
-	ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
-	if (ret < 0) {
-		dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
-		return ret;
+	for (i = 0; i < len; i++) {
+		ret = __tps6586x_write(client, reg + i, *(val + i));
+		if (ret < 0)
+			return ret;
 	}
 
 	return 0;
@@ -325,37 +323,37 @@
 	return device_for_each_child(tps6586x->dev, NULL, __remove_subdev);
 }
 
-static void tps6586x_irq_lock(unsigned int irq)
+static void tps6586x_irq_lock(struct irq_data *data)
 {
-	struct tps6586x *tps6586x = get_irq_chip_data(irq);
+	struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&tps6586x->irq_lock);
 }
 
-static void tps6586x_irq_enable(unsigned int irq)
+static void tps6586x_irq_enable(struct irq_data *irq_data)
 {
-	struct tps6586x *tps6586x = get_irq_chip_data(irq);
-	unsigned int __irq = irq - tps6586x->irq_base;
+	struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
+	unsigned int __irq = irq_data->irq - tps6586x->irq_base;
 	const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
 
 	tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
 	tps6586x->irq_en |= (1 << __irq);
 }
 
-static void tps6586x_irq_disable(unsigned int irq)
+static void tps6586x_irq_disable(struct irq_data *irq_data)
 {
-	struct tps6586x *tps6586x = get_irq_chip_data(irq);
+	struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
 
-	unsigned int __irq = irq - tps6586x->irq_base;
+	unsigned int __irq = irq_data->irq - tps6586x->irq_base;
 	const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
 
 	tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
 	tps6586x->irq_en &= ~(1 << __irq);
 }
 
-static void tps6586x_irq_sync_unlock(unsigned int irq)
+static void tps6586x_irq_sync_unlock(struct irq_data *data)
 {
-	struct tps6586x *tps6586x = get_irq_chip_data(irq);
+	struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) {
@@ -421,10 +419,10 @@
 	tps6586x->irq_base = irq_base;
 
 	tps6586x->irq_chip.name = "tps6586x";
-	tps6586x->irq_chip.enable = tps6586x_irq_enable;
-	tps6586x->irq_chip.disable = tps6586x_irq_disable;
-	tps6586x->irq_chip.bus_lock = tps6586x_irq_lock;
-	tps6586x->irq_chip.bus_sync_unlock = tps6586x_irq_sync_unlock;
+	tps6586x->irq_chip.irq_enable = tps6586x_irq_enable;
+	tps6586x->irq_chip.irq_disable = tps6586x_irq_disable;
+	tps6586x->irq_chip.irq_bus_lock = tps6586x_irq_lock;
+	tps6586x->irq_chip.irq_bus_sync_unlock = tps6586x_irq_sync_unlock;
 
 	for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
 		int __irq = i + tps6586x->irq_base;
@@ -498,11 +496,7 @@
 		return -EIO;
 	}
 
-	if ((ret != TPS658621A_VERSIONCRC) &&
-	    (ret != TPS658621C_VERSIONCRC)) {
-		dev_err(&client->dev, "Unsupported chip ID: %x\n", ret);
-		return -ENODEV;
-	}
+	dev_info(&client->dev, "VERSIONCRC is %02x\n", ret);
 
 	tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL);
 	if (tps6586x == NULL)
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 12abd5b..a35fa7dc 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1003,7 +1003,7 @@
 }
 
 /* NOTE:  this driver only handles a single twl4030/tps659x0 chip */
-static int __init
+static int __devinit
 twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
 	int				status;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 5d3a147..63a30e8 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -599,38 +599,38 @@
  * completion, potentially including some re-ordering, of these requests.
  */
 
-static void twl4030_sih_mask(unsigned irq)
+static void twl4030_sih_mask(struct irq_data *data)
 {
-	struct sih_agent *sih = get_irq_chip_data(irq);
+	struct sih_agent *sih = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 
 	spin_lock_irqsave(&sih_agent_lock, flags);
-	sih->imr |= BIT(irq - sih->irq_base);
+	sih->imr |= BIT(data->irq - sih->irq_base);
 	sih->imr_change_pending = true;
 	queue_work(wq, &sih->mask_work);
 	spin_unlock_irqrestore(&sih_agent_lock, flags);
 }
 
-static void twl4030_sih_unmask(unsigned irq)
+static void twl4030_sih_unmask(struct irq_data *data)
 {
-	struct sih_agent *sih = get_irq_chip_data(irq);
+	struct sih_agent *sih = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 
 	spin_lock_irqsave(&sih_agent_lock, flags);
-	sih->imr &= ~BIT(irq - sih->irq_base);
+	sih->imr &= ~BIT(data->irq - sih->irq_base);
 	sih->imr_change_pending = true;
 	queue_work(wq, &sih->mask_work);
 	spin_unlock_irqrestore(&sih_agent_lock, flags);
 }
 
-static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
+static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger)
 {
-	struct sih_agent *sih = get_irq_chip_data(irq);
-	struct irq_desc *desc = irq_to_desc(irq);
+	struct sih_agent *sih = irq_data_get_irq_chip_data(data);
+	struct irq_desc *desc = irq_to_desc(data->irq);
 	unsigned long flags;
 
 	if (!desc) {
-		pr_err("twl4030: Invalid IRQ: %d\n", irq);
+		pr_err("twl4030: Invalid IRQ: %d\n", data->irq);
 		return -EINVAL;
 	}
 
@@ -641,7 +641,7 @@
 	if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) {
 		desc->status &= ~IRQ_TYPE_SENSE_MASK;
 		desc->status |= trigger;
-		sih->edge_change |= BIT(irq - sih->irq_base);
+		sih->edge_change |= BIT(data->irq - sih->irq_base);
 		queue_work(wq, &sih->edge_work);
 	}
 	spin_unlock_irqrestore(&sih_agent_lock, flags);
@@ -650,9 +650,9 @@
 
 static struct irq_chip twl4030_sih_irq_chip = {
 	.name		= "twl4030",
-	.mask		= twl4030_sih_mask,
-	.unmask		= twl4030_sih_unmask,
-	.set_type	= twl4030_sih_set_type,
+	.irq_mask      	= twl4030_sih_mask,
+	.irq_unmask	= twl4030_sih_unmask,
+	.irq_set_type	= twl4030_sih_set_type,
 };
 
 /*----------------------------------------------------------------------*/
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 06c8955..4082ed7 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -332,7 +332,7 @@
 	 */
 	twl6030_irq_chip = dummy_irq_chip;
 	twl6030_irq_chip.name = "twl6030";
-	twl6030_irq_chip.set_type = NULL;
+	twl6030_irq_chip.irq_set_type = NULL;
 
 	for (i = irq_base; i < irq_end; i++) {
 		set_irq_chip_and_handler(i, &twl6030_irq_chip,
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 000cb41..92b85e2 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -385,12 +385,18 @@
 	idev->close      = ucb1x00_ts_close;
 
 	__set_bit(EV_ABS, idev->evbit);
-	__set_bit(ABS_X, idev->absbit);
-	__set_bit(ABS_Y, idev->absbit);
-	__set_bit(ABS_PRESSURE, idev->absbit);
 
 	input_set_drvdata(idev, ts);
 
+	ucb1x00_adc_enable(ts->ucb);
+	ts->x_res = ucb1x00_ts_read_xres(ts);
+	ts->y_res = ucb1x00_ts_read_yres(ts);
+	ucb1x00_adc_disable(ts->ucb);
+
+	input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
+	input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
+	input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
+
 	err = input_register_device(idev);
 	if (err)
 		goto fail;
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index ebb0597..348052a 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -112,7 +112,7 @@
 	return ret;
 }
 
-static void vx855_remove(struct pci_dev *pdev)
+static void __devexit vx855_remove(struct pci_dev *pdev)
 {
 	mfd_remove_devices(&pdev->dev);
 	pci_disable_device(pdev);
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 76cadcf..3fe9a58 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -1541,6 +1541,12 @@
 		dev_info(wm831x->dev, "WM8325 revision %c\n", 'A' + rev);
 		break;
 
+	case WM8326:
+		parent = WM8326;
+		wm831x->num_gpio = 12;
+		dev_info(wm831x->dev, "WM8326 revision %c\n", 'A' + rev);
+		break;
+
 	default:
 		dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret);
 		ret = -EINVAL;
@@ -1610,18 +1616,9 @@
 		break;
 
 	case WM8320:
-		ret = mfd_add_devices(wm831x->dev, -1,
-				      wm8320_devs, ARRAY_SIZE(wm8320_devs),
-				      NULL, 0);
-		break;
-
 	case WM8321:
-		ret = mfd_add_devices(wm831x->dev, -1,
-				      wm8320_devs, ARRAY_SIZE(wm8320_devs),
-				      NULL, 0);
-		break;
-
 	case WM8325:
+	case WM8326:
 		ret = mfd_add_devices(wm831x->dev, -1,
 				      wm8320_devs, ARRAY_SIZE(wm8320_devs),
 				      NULL, wm831x->irq_base);
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 156b198..3853fa8 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -94,9 +94,9 @@
 	return 0;
 }
 
-static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
+static int wm831x_i2c_suspend(struct device *dev)
 {
-	struct wm831x *wm831x = i2c_get_clientdata(i2c);
+	struct wm831x *wm831x = dev_get_drvdata(dev);
 
 	return wm831x_device_suspend(wm831x);
 }
@@ -108,19 +108,23 @@
 	{ "wm8320", WM8320 },
 	{ "wm8321", WM8321 },
 	{ "wm8325", WM8325 },
+	{ "wm8326", WM8326 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
 
+static const struct dev_pm_ops wm831x_pm_ops = {
+	.suspend = wm831x_i2c_suspend,
+};
 
 static struct i2c_driver wm831x_i2c_driver = {
 	.driver = {
-		   .name = "wm831x",
-		   .owner = THIS_MODULE,
+		.name = "wm831x",
+		.owner = THIS_MODULE,
+		.pm = &wm831x_pm_ops,
 	},
 	.probe = wm831x_i2c_probe,
 	.remove = wm831x_i2c_remove,
-	.suspend = wm831x_i2c_suspend,
 	.id_table = wm831x_i2c_id,
 };
 
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 294183b..f7192d4 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -345,16 +345,16 @@
 	return &wm831x_irqs[irq - wm831x->irq_base];
 }
 
-static void wm831x_irq_lock(unsigned int irq)
+static void wm831x_irq_lock(struct irq_data *data)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&wm831x->irq_lock);
 }
 
-static void wm831x_irq_sync_unlock(unsigned int irq)
+static void wm831x_irq_sync_unlock(struct irq_data *data)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
@@ -371,28 +371,30 @@
 	mutex_unlock(&wm831x->irq_lock);
 }
 
-static void wm831x_irq_unmask(unsigned int irq)
+static void wm831x_irq_unmask(struct irq_data *data)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
-	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
+	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
+							     data->irq);
 
 	wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
 
-static void wm831x_irq_mask(unsigned int irq)
+static void wm831x_irq_mask(struct irq_data *data)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
-	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
+	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
+							     data->irq);
 
 	wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
 
-static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
+static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
-	int val;
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
+	int val, irq;
 
-	irq = irq - wm831x->irq_base;
+	irq = data->irq - wm831x->irq_base;
 
 	if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
 		/* Ignore internal-only IRQs */
@@ -421,12 +423,12 @@
 }
 
 static struct irq_chip wm831x_irq_chip = {
-	.name = "wm831x",
-	.bus_lock = wm831x_irq_lock,
-	.bus_sync_unlock = wm831x_irq_sync_unlock,
-	.mask = wm831x_irq_mask,
-	.unmask = wm831x_irq_unmask,
-	.set_type = wm831x_irq_set_type,
+	.name			= "wm831x",
+	.irq_bus_lock		= wm831x_irq_lock,
+	.irq_bus_sync_unlock	= wm831x_irq_sync_unlock,
+	.irq_mask		= wm831x_irq_mask,
+	.irq_unmask		= wm831x_irq_unmask,
+	.irq_set_type		= wm831x_irq_set_type,
 };
 
 /* The processing of the primary interrupt occurs in a thread so that
@@ -515,6 +517,17 @@
 		return 0;
 	}
 
+	/* Try to flag /IRQ as a wake source; there are a number of
+	 * unconditional wake sources in the PMIC so this isn't
+	 * conditional but we don't actually care *too* much if it
+	 * fails.
+	 */
+	ret = enable_irq_wake(irq);
+	if (ret != 0) {
+		dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
+			 ret);
+	}
+
 	wm831x->irq = irq;
 	wm831x->irq_base = pdata->irq_base;
 
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 2789b15..0a8f772 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -81,6 +81,8 @@
 		type = WM8321;
 	else if (strcmp(spi->modalias, "wm8325") == 0)
 		type = WM8325;
+	else if (strcmp(spi->modalias, "wm8326") == 0)
+		type = WM8326;
 	else {
 		dev_err(&spi->dev, "Unknown device type\n");
 		return -EINVAL;
@@ -184,6 +186,17 @@
 	.suspend	= wm831x_spi_suspend,
 };
 
+static struct spi_driver wm8326_spi_driver = {
+	.driver = {
+		.name	= "wm8326",
+		.bus	= &spi_bus_type,
+		.owner	= THIS_MODULE,
+	},
+	.probe		= wm831x_spi_probe,
+	.remove		= __devexit_p(wm831x_spi_remove),
+	.suspend	= wm831x_spi_suspend,
+};
+
 static int __init wm831x_spi_init(void)
 {
 	int ret;
@@ -212,12 +225,17 @@
 	if (ret != 0)
 		pr_err("Failed to register WM8325 SPI driver: %d\n", ret);
 
+	ret = spi_register_driver(&wm8326_spi_driver);
+	if (ret != 0)
+		pr_err("Failed to register WM8326 SPI driver: %d\n", ret);
+
 	return 0;
 }
 subsys_initcall(wm831x_spi_init);
 
 static void __exit wm831x_spi_exit(void)
 {
+	spi_unregister_driver(&wm8326_spi_driver);
 	spi_unregister_driver(&wm8325_spi_driver);
 	spi_unregister_driver(&wm8321_spi_driver);
 	spi_unregister_driver(&wm8320_spi_driver);
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index f56c9ad..5839966 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -417,16 +417,16 @@
 	return IRQ_HANDLED;
 }
 
-static void wm8350_irq_lock(unsigned int irq)
+static void wm8350_irq_lock(struct irq_data *data)
 {
-	struct wm8350 *wm8350 = get_irq_chip_data(irq);
+	struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&wm8350->irq_lock);
 }
 
-static void wm8350_irq_sync_unlock(unsigned int irq)
+static void wm8350_irq_sync_unlock(struct irq_data *data)
 {
-	struct wm8350 *wm8350 = get_irq_chip_data(irq);
+	struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(wm8350->irq_masks); i++) {
@@ -442,28 +442,30 @@
 	mutex_unlock(&wm8350->irq_lock);
 }
 
-static void wm8350_irq_enable(unsigned int irq)
+static void wm8350_irq_enable(struct irq_data *data)
 {
-	struct wm8350 *wm8350 = get_irq_chip_data(irq);
-	struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq);
+	struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
+	struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350,
+							     data->irq);
 
 	wm8350->irq_masks[irq_data->reg] &= ~irq_data->mask;
 }
 
-static void wm8350_irq_disable(unsigned int irq)
+static void wm8350_irq_disable(struct irq_data *data)
 {
-	struct wm8350 *wm8350 = get_irq_chip_data(irq);
-	struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq);
+	struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
+	struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350,
+							     data->irq);
 
 	wm8350->irq_masks[irq_data->reg] |= irq_data->mask;
 }
 
 static struct irq_chip wm8350_irq_chip = {
-	.name = "wm8350",
-	.bus_lock = wm8350_irq_lock,
-	.bus_sync_unlock = wm8350_irq_sync_unlock,
-	.disable = wm8350_irq_disable,
-	.enable = wm8350_irq_enable,
+	.name			= "wm8350",
+	.irq_bus_lock		= wm8350_irq_lock,
+	.irq_bus_sync_unlock	= wm8350_irq_sync_unlock,
+	.irq_disable		= wm8350_irq_disable,
+	.irq_enable		= wm8350_irq_enable,
 };
 
 int wm8350_irq_init(struct wm8350 *wm8350, int irq,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index b3b2aaf8..f4016a0 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -18,6 +18,7 @@
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/machine.h>
 
@@ -169,8 +170,16 @@
 EXPORT_SYMBOL_GPL(wm8994_set_bits);
 
 static struct mfd_cell wm8994_regulator_devs[] = {
-	{ .name = "wm8994-ldo", .id = 1 },
-	{ .name = "wm8994-ldo", .id = 2 },
+	{
+		.name = "wm8994-ldo",
+		.id = 1,
+		.pm_runtime_no_callbacks = true,
+	},
+	{
+		.name = "wm8994-ldo",
+		.id = 2,
+		.pm_runtime_no_callbacks = true,
+	},
 };
 
 static struct resource wm8994_codec_resources[] = {
@@ -200,6 +209,7 @@
 		.name = "wm8994-gpio",
 		.num_resources = ARRAY_SIZE(wm8994_gpio_resources),
 		.resources = wm8994_gpio_resources,
+		.pm_runtime_no_callbacks = true,
 	},
 };
 
@@ -218,12 +228,34 @@
 	"SPKVDD2",
 };
 
+static const char *wm8958_main_supplies[] = {
+	"DBVDD1",
+	"DBVDD2",
+	"DBVDD3",
+	"DCVDD",
+	"AVDD1",
+	"AVDD2",
+	"CPVDD",
+	"SPKVDD1",
+	"SPKVDD2",
+};
+
 #ifdef CONFIG_PM
-static int wm8994_device_suspend(struct device *dev)
+static int wm8994_suspend(struct device *dev)
 {
 	struct wm8994 *wm8994 = dev_get_drvdata(dev);
 	int ret;
 
+	/* Don't actually go through with the suspend if the CODEC is
+	 * still active (eg, for audio passthrough from CP. */
+	ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
+	if (ret < 0) {
+		dev_err(dev, "Failed to read power status: %d\n", ret);
+	} else if (ret & WM8994_VMID_SEL_MASK) {
+		dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+		return 0;
+	}
+
 	/* GPIO configuration state is saved here since we may be configuring
 	 * the GPIO alternate functions even if we're not using the gpiolib
 	 * driver for them.
@@ -239,7 +271,9 @@
 	if (ret < 0)
 		dev_err(dev, "Failed to save LDO registers: %d\n", ret);
 
-	ret = regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+	wm8994->suspended = true;
+
+	ret = regulator_bulk_disable(wm8994->num_supplies,
 				     wm8994->supplies);
 	if (ret != 0) {
 		dev_err(dev, "Failed to disable supplies: %d\n", ret);
@@ -249,12 +283,16 @@
 	return 0;
 }
 
-static int wm8994_device_resume(struct device *dev)
+static int wm8994_resume(struct device *dev)
 {
 	struct wm8994 *wm8994 = dev_get_drvdata(dev);
 	int ret;
 
-	ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies),
+	/* We may have lied to the PM core about suspending */
+	if (!wm8994->suspended)
+		return 0;
+
+	ret = regulator_bulk_enable(wm8994->num_supplies,
 				    wm8994->supplies);
 	if (ret != 0) {
 		dev_err(dev, "Failed to enable supplies: %d\n", ret);
@@ -276,6 +314,8 @@
 	if (ret < 0)
 		dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
 
+	wm8994->suspended = false;
+
 	return 0;
 }
 #endif
@@ -305,9 +345,10 @@
 /*
  * Instantiate the generic non-control parts of the device.
  */
-static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
+static int wm8994_device_init(struct wm8994 *wm8994, int irq)
 {
 	struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+	const char *devname;
 	int ret, i;
 
 	mutex_init(&wm8994->io_lock);
@@ -323,25 +364,48 @@
 		goto err;
 	}
 
+	switch (wm8994->type) {
+	case WM8994:
+		wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies);
+		break;
+	case WM8958:
+		wm8994->num_supplies = ARRAY_SIZE(wm8958_main_supplies);
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+
 	wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
-				   ARRAY_SIZE(wm8994_main_supplies),
+				   wm8994->num_supplies,
 				   GFP_KERNEL);
 	if (!wm8994->supplies) {
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
-		wm8994->supplies[i].supply = wm8994_main_supplies[i];
-
-	ret = regulator_bulk_get(wm8994->dev, ARRAY_SIZE(wm8994_main_supplies),
+	switch (wm8994->type) {
+	case WM8994:
+		for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
+			wm8994->supplies[i].supply = wm8994_main_supplies[i];
+		break;
+	case WM8958:
+		for (i = 0; i < ARRAY_SIZE(wm8958_main_supplies); i++)
+			wm8994->supplies[i].supply = wm8958_main_supplies[i];
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+		
+	ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
 				 wm8994->supplies);
 	if (ret != 0) {
 		dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
 		goto err_supplies;
 	}
 
-	ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies),
+	ret = regulator_bulk_enable(wm8994->num_supplies,
 				    wm8994->supplies);
 	if (ret != 0) {
 		dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
@@ -353,7 +417,22 @@
 		dev_err(wm8994->dev, "Failed to read ID register\n");
 		goto err_enable;
 	}
-	if (ret != 0x8994) {
+	switch (ret) {
+	case 0x8994:
+		devname = "WM8994";
+		if (wm8994->type != WM8994)
+			dev_warn(wm8994->dev, "Device registered as type %d\n",
+				 wm8994->type);
+		wm8994->type = WM8994;
+		break;
+	case 0x8958:
+		devname = "WM8958";
+		if (wm8994->type != WM8958)
+			dev_warn(wm8994->dev, "Device registered as type %d\n",
+				 wm8994->type);
+		wm8994->type = WM8958;
+		break;
+	default:
 		dev_err(wm8994->dev, "Device is not a WM8994, ID is %x\n",
 			ret);
 		ret = -EINVAL;
@@ -370,14 +449,16 @@
 	switch (ret) {
 	case 0:
 	case 1:
-		dev_warn(wm8994->dev, "revision %c not fully supported\n",
-			'A' + ret);
+		if (wm8994->type == WM8994)
+			dev_warn(wm8994->dev,
+				 "revision %c not fully supported\n",
+				 'A' + ret);
 		break;
 	default:
-		dev_info(wm8994->dev, "revision %c\n", 'A' + ret);
 		break;
 	}
 
+	dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + ret);
 
 	if (pdata) {
 		wm8994->irq_base = pdata->irq_base;
@@ -418,15 +499,18 @@
 		goto err_irq;
 	}
 
+	pm_runtime_enable(wm8994->dev);
+	pm_runtime_resume(wm8994->dev);
+
 	return 0;
 
 err_irq:
 	wm8994_irq_exit(wm8994);
 err_enable:
-	regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+	regulator_bulk_disable(wm8994->num_supplies,
 			       wm8994->supplies);
 err_get:
-	regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
+	regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
 err_supplies:
 	kfree(wm8994->supplies);
 err:
@@ -437,11 +521,12 @@
 
 static void wm8994_device_exit(struct wm8994 *wm8994)
 {
+	pm_runtime_disable(wm8994->dev);
 	mfd_remove_devices(wm8994->dev);
 	wm8994_irq_exit(wm8994);
-	regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+	regulator_bulk_disable(wm8994->num_supplies,
 			       wm8994->supplies);
-	regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
+	regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
 	kfree(wm8994->supplies);
 	kfree(wm8994);
 }
@@ -506,8 +591,9 @@
 	wm8994->read_dev = wm8994_i2c_read_device;
 	wm8994->write_dev = wm8994_i2c_write_device;
 	wm8994->irq = i2c->irq;
+	wm8994->type = id->driver_data;
 
-	return wm8994_device_init(wm8994, id->driver_data, i2c->irq);
+	return wm8994_device_init(wm8994, i2c->irq);
 }
 
 static int wm8994_i2c_remove(struct i2c_client *i2c)
@@ -519,36 +605,23 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int wm8994_i2c_suspend(struct i2c_client *i2c, pm_message_t state)
-{
-	return wm8994_device_suspend(&i2c->dev);
-}
-
-static int wm8994_i2c_resume(struct i2c_client *i2c)
-{
-	return wm8994_device_resume(&i2c->dev);
-}
-#else
-#define wm8994_i2c_suspend NULL
-#define wm8994_i2c_resume NULL
-#endif
-
 static const struct i2c_device_id wm8994_i2c_id[] = {
-	{ "wm8994", 0 },
+	{ "wm8994", WM8994 },
+	{ "wm8958", WM8958 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id);
 
+UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume, NULL);
+
 static struct i2c_driver wm8994_i2c_driver = {
 	.driver = {
-		   .name = "wm8994",
-		   .owner = THIS_MODULE,
+		.name = "wm8994",
+		.owner = THIS_MODULE,
+		.pm = &wm8994_pm_ops,
 	},
 	.probe = wm8994_i2c_probe,
 	.remove = wm8994_i2c_remove,
-	.suspend = wm8994_i2c_suspend,
-	.resume = wm8994_i2c_resume,
 	.id_table = wm8994_i2c_id,
 };
 
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 8400eb1..29e8faf 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -156,16 +156,16 @@
 	return &wm8994_irqs[irq - wm8994->irq_base];
 }
 
-static void wm8994_irq_lock(unsigned int irq)
+static void wm8994_irq_lock(struct irq_data *data)
 {
-	struct wm8994 *wm8994 = get_irq_chip_data(irq);
+	struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&wm8994->irq_lock);
 }
 
-static void wm8994_irq_sync_unlock(unsigned int irq)
+static void wm8994_irq_sync_unlock(struct irq_data *data)
 {
-	struct wm8994 *wm8994 = get_irq_chip_data(irq);
+	struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
@@ -182,28 +182,30 @@
 	mutex_unlock(&wm8994->irq_lock);
 }
 
-static void wm8994_irq_unmask(unsigned int irq)
+static void wm8994_irq_unmask(struct irq_data *data)
 {
-	struct wm8994 *wm8994 = get_irq_chip_data(irq);
-	struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+	struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
+	struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
+							     data->irq);
 
 	wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
 
-static void wm8994_irq_mask(unsigned int irq)
+static void wm8994_irq_mask(struct irq_data *data)
 {
-	struct wm8994 *wm8994 = get_irq_chip_data(irq);
-	struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+	struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
+	struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
+							     data->irq);
 
 	wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
 
 static struct irq_chip wm8994_irq_chip = {
-	.name = "wm8994",
-	.bus_lock = wm8994_irq_lock,
-	.bus_sync_unlock = wm8994_irq_sync_unlock,
-	.mask = wm8994_irq_mask,
-	.unmask = wm8994_irq_unmask,
+	.name			= "wm8994",
+	.irq_bus_lock		= wm8994_irq_lock,
+	.irq_bus_sync_unlock	= wm8994_irq_sync_unlock,
+	.irq_mask		= wm8994_irq_mask,
+	.irq_unmask		= wm8994_irq_unmask,
 };
 
 /* The processing of the primary interrupt occurs in a thread so that
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 4d073f1..cc8e49d 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -64,7 +64,7 @@
 
 config AB8500_PWM
 	bool "AB8500 PWM support"
-	depends on AB8500_CORE
+	depends on AB8500_CORE && ARCH_U8500
 	select HAVE_PWM
 	help
 	  This driver exports functions to enable/disble/config/free Pulse
@@ -402,7 +402,7 @@
 	  DAC7512 16-bit digital-to-analog converter.
 
 	  This driver can also be built as a module. If so, the module
-	  will be calles ti_dac7512.
+	  will be called ti_dac7512.
 
 config VMWARE_BALLOON
 	tristate "VMware Balloon Driver"
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index 9e3879e..fe8616a 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -313,7 +313,7 @@
 	INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work);
 	schedule_delayed_work(&lcd->init_work, 0);
 
-	dev_info(&pdev->dev, "initalized ARM character LCD at %08x\n",
+	dev_info(&pdev->dev, "initialized ARM character LCD at %08x\n",
 		lcd->phybase);
 
 	return 0;
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 63ee4c1..b6e1c9a 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -449,6 +449,7 @@
 	{ "bmp085", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, bmp085_id);
 
 static struct i2c_driver bmp085_driver = {
 	.driver = {
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index 6f62180..d02d302 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -16,12 +16,11 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
-#include <linux/pci.h>
+#include <linux/platform_device.h>
 #include <linux/cs5535.h>
 #include <linux/slab.h>
 
 #define DRV_NAME "cs5535-mfgpt"
-#define MFGPT_BAR 2
 
 static int mfgpt_reset_timers;
 module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
@@ -37,7 +36,7 @@
 	DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
 	resource_size_t base;
 
-	struct pci_dev *pdev;
+	struct platform_device *pdev;
 	spinlock_t lock;
 	int initialized;
 } cs5535_mfgpt_chip;
@@ -290,10 +289,10 @@
 	return timers;
 }
 
-static int __init cs5535_mfgpt_probe(struct pci_dev *pdev,
-		const struct pci_device_id *pci_id)
+static int __devinit cs5535_mfgpt_probe(struct platform_device *pdev)
 {
-	int err, t;
+	struct resource *res;
+	int err = -EIO, t;
 
 	/* There are two ways to get the MFGPT base address; one is by
 	 * fetching it from MSR_LBAR_MFGPT, the other is by reading the
@@ -302,29 +301,27 @@
 	 * it turns out to be unreliable in the face of crappy BIOSes, we
 	 * can always go back to using MSRs.. */
 
-	err = pci_enable_device_io(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "can't enable device IO\n");
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't fetch device resource info\n");
 		goto done;
 	}
 
-	err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME);
-	if (err) {
-		dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR);
+	if (!request_region(res->start, resource_size(res), pdev->name)) {
+		dev_err(&pdev->dev, "can't request region\n");
 		goto done;
 	}
 
 	/* set up the driver-specific struct */
-	cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR);
+	cs5535_mfgpt_chip.base = res->start;
 	cs5535_mfgpt_chip.pdev = pdev;
 	spin_lock_init(&cs5535_mfgpt_chip.lock);
 
-	dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR,
-			(unsigned long long) cs5535_mfgpt_chip.base);
+	dev_info(&pdev->dev, "reserved resource region %pR\n", res);
 
 	/* detect the available timers */
 	t = scan_timers(&cs5535_mfgpt_chip);
-	dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t);
+	dev_info(&pdev->dev, "%d MFGPT timers available\n", t);
 	cs5535_mfgpt_chip.initialized = 1;
 	return 0;
 
@@ -332,47 +329,18 @@
 	return err;
 }
 
-static struct pci_device_id cs5535_mfgpt_pci_tbl[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
-	{ 0, },
+static struct platform_driver cs5535_mfgpt_drv = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = cs5535_mfgpt_probe,
 };
-MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl);
 
-/*
- * Just like with the cs5535-gpio driver, we can't use the standard PCI driver
- * registration stuff.  It only allows only one driver to bind to each PCI
- * device, and we want the GPIO and MFGPT drivers to be able to share a PCI
- * device.  Instead, we manually scan for the PCI device, request a single
- * region, and keep track of the devices that we're using.
- */
-
-static int __init cs5535_mfgpt_scan_pci(void)
-{
-	struct pci_dev *pdev;
-	int err = -ENODEV;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) {
-		pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor,
-				cs5535_mfgpt_pci_tbl[i].device, NULL);
-		if (pdev) {
-			err = cs5535_mfgpt_probe(pdev,
-					&cs5535_mfgpt_pci_tbl[i]);
-			if (err)
-				pci_dev_put(pdev);
-
-			/* we only support a single CS5535/6 southbridge */
-			break;
-		}
-	}
-
-	return err;
-}
 
 static int __init cs5535_mfgpt_init(void)
 {
-	return cs5535_mfgpt_scan_pci();
+	return platform_driver_register(&cs5535_mfgpt_drv);
 }
 
 module_init(cs5535_mfgpt_init);
@@ -380,3 +348,4 @@
 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 5f6852d..44d4475 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -329,7 +329,7 @@
 {
 	int rc;
 
-	workqueue = create_freezeable_workqueue("tifm");
+	workqueue = create_freezable_workqueue("tifm");
 	if (!workqueue)
 		return -ENOMEM;
 
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804..6df5a55 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,7 +45,7 @@
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.1-k");
+MODULE_VERSION("1.2.1.2-k");
 MODULE_ALIAS("dmi:*:svnVMware*:*");
 MODULE_ALIAS("vmware_vmmemctl");
 MODULE_LICENSE("GPL");
@@ -315,7 +315,8 @@
  * fear that guest will need it. Host may reject some pages, we need to
  * check the return value and maybe submit a different page.
  */
-static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
+static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
+				     unsigned int *hv_status)
 {
 	unsigned long status, dummy;
 	u32 pfn32;
@@ -326,7 +327,7 @@
 
 	STATS_INC(b->stats.lock);
 
-	status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
+	*hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
 	if (vmballoon_check_status(b, status))
 		return true;
 
@@ -410,6 +411,7 @@
 {
 	struct page *page;
 	gfp_t flags;
+	unsigned int hv_status;
 	bool locked = false;
 
 	do {
@@ -429,11 +431,12 @@
 		}
 
 		/* inform monitor */
-		locked = vmballoon_send_lock_page(b, page_to_pfn(page));
+		locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
 		if (!locked) {
 			STATS_INC(b->stats.refused_alloc);
 
-			if (b->reset_required) {
+			if (hv_status == VMW_BALLOON_ERROR_RESET ||
+			    hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
 				__free_page(page);
 				return -EIO;
 			}
@@ -782,7 +785,7 @@
 	if (x86_hyper != &x86_hyper_vmware)
 		return -ENODEV;
 
-	vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+	vmballoon_wq = create_freezable_workqueue("vmmemctl");
 	if (!vmballoon_wq) {
 		pr_err("failed to create workqueue\n");
 		return -ENOMEM;
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 57e4416..2a876c4 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -16,6 +16,7 @@
 
 config MMC_BLOCK_MINORS
 	int "Number of minors per block device"
+	depends on MMC_BLOCK
 	range 4 256
 	default 8
 	help
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 217f820..bfc8a8a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -257,7 +257,7 @@
 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 	if (err)
-		printk(KERN_ERR "%s: error %d sending status comand",
+		printk(KERN_ERR "%s: error %d sending status command",
 		       req->rq_disk->disk_name, err);
 	return cmd.resp[0];
 }
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bb22ffd..ef10387 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -16,3 +16,14 @@
 
 	  This option sets a default which can be overridden by the
 	  module parameter "removable=0" or "removable=1".
+
+config MMC_CLKGATE
+	bool "MMC host clock gating (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  This will attempt to aggressively gate the clock to the MMC card.
+	  This is done to save power due to gating off the logic and bus
+	  noise when the MMC card is not in use. Your host driver has to
+	  support handling this in order for it to be of any use.
+
+	  If unsure, say N.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index af8dc6a..63667a8 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -303,14 +303,14 @@
 			type, card->rca);
 	}
 
-	ret = device_add(&card->dev);
-	if (ret)
-		return ret;
-
 #ifdef CONFIG_DEBUG_FS
 	mmc_add_card_debugfs(card);
 #endif
 
+	ret = device_add(&card->dev);
+	if (ret)
+		return ret;
+
 	mmc_card_set_present(card);
 
 	return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a3a780f..150b5f3 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -22,6 +22,7 @@
 #include <linux/scatterlist.h>
 #include <linux/log2.h>
 #include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -130,6 +131,8 @@
 
 		if (mrq->done)
 			mrq->done(mrq);
+
+		mmc_host_clk_gate(host);
 	}
 }
 
@@ -190,6 +193,7 @@
 			mrq->stop->mrq = mrq;
 		}
 	}
+	mmc_host_clk_ungate(host);
 	host->ops->request(host, mrq);
 }
 
@@ -295,8 +299,9 @@
 		unsigned int timeout_us, limit_us;
 
 		timeout_us = data->timeout_ns / 1000;
-		timeout_us += data->timeout_clks * 1000 /
-			(card->host->ios.clock / 1000);
+		if (mmc_host_clk_rate(card->host))
+			timeout_us += data->timeout_clks * 1000 /
+				(mmc_host_clk_rate(card->host) / 1000);
 
 		if (data->flags & MMC_DATA_WRITE)
 			/*
@@ -614,6 +619,8 @@
 		 ios->power_mode, ios->chip_select, ios->vdd,
 		 ios->bus_width, ios->timing);
 
+	if (ios->clock > 0)
+		mmc_set_ungated(host);
 	host->ops->set_ios(host, ios);
 }
 
@@ -641,6 +648,61 @@
 	mmc_set_ios(host);
 }
 
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_old = host->ios.clock;
+	host->ios.clock = 0;
+	host->clk_gated = true;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+	/*
+	 * We should previously have gated the clock, so the clock shall
+	 * be 0 here! The clock may however be 0 during initialization,
+	 * when some request operations are performed before setting
+	 * the frequency. When ungate is requested in that situation
+	 * we just ignore the call.
+	 */
+	if (host->clk_old) {
+		BUG_ON(host->ios.clock);
+		/* This call will also set host->clk_gated to false */
+		mmc_set_clock(host, host->clk_old);
+	}
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/*
+	 * We've been given a new frequency while the clock is gated,
+	 * so make sure we regard this as ungating it.
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_gated = false;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+#endif
+
 /*
  * Change the bus mode (open drain/push-pull) of a host.
  */
@@ -1424,35 +1486,57 @@
 }
 EXPORT_SYMBOL(mmc_set_blocklen);
 
+static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
+{
+	host->f_init = freq;
+
+#ifdef CONFIG_MMC_DEBUG
+	pr_info("%s: %s: trying to init card at %u Hz\n",
+		mmc_hostname(host), __func__, host->f_init);
+#endif
+	mmc_power_up(host);
+	sdio_reset(host);
+	mmc_go_idle(host);
+
+	mmc_send_if_cond(host, host->ocr_avail);
+
+	/* Order's important: probe SDIO, then SD, then MMC */
+	if (!mmc_attach_sdio(host))
+		return 0;
+	if (!mmc_attach_sd(host))
+		return 0;
+	if (!mmc_attach_mmc(host))
+		return 0;
+
+	mmc_power_off(host);
+	return -EIO;
+}
+
 void mmc_rescan(struct work_struct *work)
 {
+	static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
 	struct mmc_host *host =
 		container_of(work, struct mmc_host, detect.work);
-	u32 ocr;
-	int err;
-	unsigned long flags;
 	int i;
-	const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
 
-	spin_lock_irqsave(&host->lock, flags);
-
-	if (host->rescan_disable) {
-		spin_unlock_irqrestore(&host->lock, flags);
+	if (host->rescan_disable)
 		return;
-	}
-
-	spin_unlock_irqrestore(&host->lock, flags);
-
 
 	mmc_bus_get(host);
 
-	/* if there is a card registered, check whether it is still present */
-	if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
+	/*
+	 * if there is a _removable_ card registered, check whether it is
+	 * still present
+	 */
+	if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
+	    && !(host->caps & MMC_CAP_NONREMOVABLE))
 		host->bus_ops->detect(host);
 
+	/*
+	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
+	 * the card is no longer present.
+	 */
 	mmc_bus_put(host);
-
-
 	mmc_bus_get(host);
 
 	/* if there still is a card present, stop here */
@@ -1461,8 +1545,6 @@
 		goto out;
 	}
 
-	/* detect a newly inserted card */
-
 	/*
 	 * Only we can add a new handler, so it's safe to
 	 * release the lock here.
@@ -1472,72 +1554,16 @@
 	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
 		goto out;
 
+	mmc_claim_host(host);
 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-		mmc_claim_host(host);
-
-		if (freqs[i] >= host->f_min)
-			host->f_init = freqs[i];
-		else if (!i || freqs[i-1] > host->f_min)
-			host->f_init = host->f_min;
-		else {
-			mmc_release_host(host);
-			goto out;
-		}
-#ifdef CONFIG_MMC_DEBUG
-		pr_info("%s: %s: trying to init card at %u Hz\n",
-			mmc_hostname(host), __func__, host->f_init);
-#endif
-		mmc_power_up(host);
-		sdio_reset(host);
-		mmc_go_idle(host);
-
-		mmc_send_if_cond(host, host->ocr_avail);
-
-		/*
-		 * First we search for SDIO...
-		 */
-		err = mmc_send_io_op_cond(host, 0, &ocr);
-		if (!err) {
-			if (mmc_attach_sdio(host, ocr)) {
-				mmc_claim_host(host);
-				/*
-				 * Try SDMEM (but not MMC) even if SDIO
-				 * is broken.
-				 */
-				if (mmc_send_app_op_cond(host, 0, &ocr))
-					goto out_fail;
-
-				if (mmc_attach_sd(host, ocr))
-					mmc_power_off(host);
-			}
-			goto out;
-		}
-
-		/*
-		 * ...then normal SD...
-		 */
-		err = mmc_send_app_op_cond(host, 0, &ocr);
-		if (!err) {
-			if (mmc_attach_sd(host, ocr))
-				mmc_power_off(host);
-			goto out;
-		}
-
-		/*
-		 * ...and finally MMC.
-		 */
-		err = mmc_send_op_cond(host, 0, &ocr);
-		if (!err) {
-			if (mmc_attach_mmc(host, ocr))
-				mmc_power_off(host);
-			goto out;
-		}
-
-out_fail:
-		mmc_release_host(host);
-		mmc_power_off(host);
+		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+			break;
+		if (freqs[i] < host->f_min)
+			break;
 	}
-out:
+	mmc_release_host(host);
+
+ out:
 	if (host->caps & MMC_CAP_NEEDS_POLL)
 		mmc_schedule_delayed_work(&host->detect, HZ);
 }
@@ -1721,6 +1747,18 @@
 		if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
 			mmc_power_up(host);
 			mmc_select_voltage(host, host->ocr);
+			/*
+			 * Tell runtime PM core we just powered up the card,
+			 * since it still believes the card is powered off.
+			 * Note that currently runtime PM is only enabled
+			 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
+			 */
+			if (mmc_card_sdio(host->card) &&
+			    (host->caps & MMC_CAP_POWER_OFF_CARD)) {
+				pm_runtime_disable(&host->card->dev);
+				pm_runtime_set_active(&host->card->dev);
+				pm_runtime_enable(&host->card->dev);
+			}
 		}
 		BUG_ON(!host->bus_ops->resume);
 		err = host->bus_ops->resume(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 77240cd..ca1fdde 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -33,6 +33,9 @@
 
 void mmc_set_chip_select(struct mmc_host *host, int mode);
 void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
 void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
@@ -54,9 +57,9 @@
 void mmc_start_host(struct mmc_host *host);
 void mmc_stop_host(struct mmc_host *host);
 
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
-int mmc_attach_sd(struct mmc_host *host, u32 ocr);
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
+int mmc_attach_mmc(struct mmc_host *host);
+int mmc_attach_sd(struct mmc_host *host);
+int mmc_attach_sdio(struct mmc_host *host);
 
 /* Module parameters */
 extern int use_spi_crc;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index eed1405..998797e 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -183,6 +183,11 @@
 			&mmc_clock_fops))
 		goto err_node;
 
+#ifdef CONFIG_MMC_CLKGATE
+	if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+				root, &host->clk_delay))
+		goto err_node;
+#endif
 	return;
 
 err_node:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 10b8af2..b3ac6c5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2003 Russell King, All Rights Reserved.
  *  Copyright (C) 2007-2008 Pierre Ossman
+ *  Copyright (C) 2010 Linus Walleij
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -20,6 +21,7 @@
 #include <linux/suspend.h>
 
 #include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
 
 #include "core.h"
 #include "host.h"
@@ -50,6 +52,205 @@
 static DEFINE_IDR(mmc_host_idr);
 static DEFINE_SPINLOCK(mmc_host_lock);
 
+#ifdef CONFIG_MMC_CLKGATE
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+	unsigned long tick_ns;
+	unsigned long freq = host->ios.clock;
+	unsigned long flags;
+
+	if (!freq) {
+		pr_debug("%s: frequency set to 0 in disable function, "
+			 "this means the clock is already disabled.\n",
+			 mmc_hostname(host));
+		return;
+	}
+	/*
+	 * New requests may have appeared while we were scheduling,
+	 * then there is no reason to delay the check before
+	 * clk_disable().
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+
+	/*
+	 * Delay n bus cycles (at least 8 from MMC spec) before attempting
+	 * to disable the MCI block clock. The reference count may have
+	 * gone up again after this delay due to rescheduling!
+	 */
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		tick_ns = DIV_ROUND_UP(1000000000, freq);
+		ndelay(host->clk_delay * tick_ns);
+	} else {
+		/* New users appeared while waiting for this work */
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		return;
+	}
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		/* This will set host->ios.clock to 0 */
+		mmc_gate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+	}
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+	struct mmc_host *host = container_of(work, struct mmc_host,
+					      clk_gate_work);
+
+	mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ *	mmc_host_clk_ungate - ungate hardware MCI clocks
+ *	@host: host to ungate.
+ *
+ *	Makes sure the host ios.clock is restored to a non-zero value
+ *	past this call.	Increase clock reference count and ungate clock
+ *	if we're the first user.
+ */
+void mmc_host_clk_ungate(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		mmc_ungate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+	}
+	host->clk_requests++;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_may_gate_card - check if this card may be gated
+ *	@card: card to check.
+ */
+static bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	/* If there is no card we may gate it */
+	if (!card)
+		return true;
+	/*
+	 * Don't gate SDIO cards! These need to be clocked at all times
+	 * since they may be independent systems generating interrupts
+	 * and other events. The clock requests counter from the core will
+	 * go down to zero since the core does not need it, but we will not
+	 * gate the clock, because there is somebody out there that may still
+	 * be using it.
+	 */
+	if (mmc_card_sdio(card))
+		return false;
+
+	return true;
+}
+
+/**
+ *	mmc_host_clk_gate - gate off hardware MCI clocks
+ *	@host: host to gate.
+ *
+ *	Calls the host driver with ios.clock set to zero as often as possible
+ *	in order to gate off hardware MCI clocks. Decrease clock reference
+ *	count and schedule disabling of clock.
+ */
+void mmc_host_clk_gate(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_requests--;
+	if (mmc_host_may_gate_card(host->card) &&
+	    !host->clk_requests)
+		schedule_work(&host->clk_gate_work);
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ *	mmc_host_clk_rate - get current clock frequency setting
+ *	@host: host to get the clock frequency for.
+ *
+ *	Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	unsigned long freq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated)
+		freq = host->clk_old;
+	else
+		freq = host->ios.clock;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return freq;
+}
+
+/**
+ *	mmc_host_clk_init - set up clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+	host->clk_requests = 0;
+	/* Hold MCI clock for 8 cycles by default */
+	host->clk_delay = 8;
+	host->clk_gated = false;
+	INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+	spin_lock_init(&host->clk_lock);
+	mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_clk_exit - shut down clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+	/*
+	 * Wait for any outstanding gate and then make sure we're
+	 * ungated before exiting.
+	 */
+	if (cancel_work_sync(&host->clk_gate_work))
+		mmc_host_clk_gate_delayed(host);
+	if (host->clk_gated)
+		mmc_host_clk_ungate(host);
+	/* There should be only one user now */
+	WARN_ON(host->clk_requests > 1);
+}
+
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+#endif
+
 /**
  *	mmc_alloc_host - initialise the per-host structure.
  *	@extra: sizeof private data structure
@@ -82,6 +283,8 @@
 	host->class_dev.class = &mmc_host_class;
 	device_initialize(&host->class_dev);
 
+	mmc_host_clk_init(host);
+
 	spin_lock_init(&host->lock);
 	init_waitqueue_head(&host->wq);
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -163,6 +366,8 @@
 	device_del(&host->class_dev);
 
 	led_trigger_unregister_simple(host->led);
+
+	mmc_host_clk_exit(host);
 }
 
 EXPORT_SYMBOL(mmc_remove_host);
@@ -183,4 +388,3 @@
 }
 
 EXPORT_SYMBOL(mmc_free_host);
-
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 8c87e11..de199f9 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,10 +10,31 @@
  */
 #ifndef _MMC_CORE_HOST_H
 #define _MMC_CORE_HOST_H
+#include <linux/mmc/host.h>
 
 int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_ungate(struct mmc_host *host);
+void mmc_host_clk_gate(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_ungate(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_gate(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	return host->ios.clock;
+}
+#endif
+
 void mmc_host_deeper_disable(struct work_struct *work);
 
 #endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 77f93c3..16006ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -534,39 +534,57 @@
 	 */
 	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
 	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
-		unsigned ext_csd_bit, bus_width;
+		static unsigned ext_csd_bits[][2] = {
+			{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
+			{ EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
+			{ EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
+		};
+		static unsigned bus_widths[] = {
+			MMC_BUS_WIDTH_8,
+			MMC_BUS_WIDTH_4,
+			MMC_BUS_WIDTH_1
+		};
+		unsigned idx, bus_width = 0;
 
-		if (host->caps & MMC_CAP_8_BIT_DATA) {
-			if (ddr)
-				ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
-			else
-				ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
-			bus_width = MMC_BUS_WIDTH_8;
-		} else {
-			if (ddr)
-				ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4;
-			else
-				ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
-			bus_width = MMC_BUS_WIDTH_4;
+		if (host->caps & MMC_CAP_8_BIT_DATA)
+			idx = 0;
+		else
+			idx = 1;
+		for (; idx < ARRAY_SIZE(bus_widths); idx++) {
+			bus_width = bus_widths[idx];
+			if (bus_width == MMC_BUS_WIDTH_1)
+				ddr = 0; /* no DDR for 1-bit width */
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					 EXT_CSD_BUS_WIDTH,
+					 ext_csd_bits[idx][0]);
+			if (!err) {
+				mmc_set_bus_width_ddr(card->host,
+						      bus_width, MMC_SDR_MODE);
+				/*
+				 * If controller can't handle bus width test,
+				 * use the highest bus width to maintain
+				 * compatibility with previous MMC behavior.
+				 */
+				if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+					break;
+				err = mmc_bus_test(card, bus_width);
+				if (!err)
+					break;
+			}
 		}
 
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				 EXT_CSD_BUS_WIDTH, ext_csd_bit);
-
-		if (err && err != -EBADMSG)
-			goto free_card;
-
+		if (!err && ddr) {
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_BUS_WIDTH,
+					ext_csd_bits[idx][1]);
+		}
 		if (err) {
 			printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
-			       "failed\n", mmc_hostname(card->host),
-			       1 << bus_width, ddr);
-			err = 0;
-		} else {
-			if (ddr)
-				mmc_card_set_ddr_mode(card);
-			else
-				ddr = MMC_SDR_MODE;
-
+				"failed\n", mmc_hostname(card->host),
+				1 << bus_width, ddr);
+			goto free_card;
+		} else if (ddr) {
+			mmc_card_set_ddr_mode(card);
 			mmc_set_bus_width_ddr(card->host, bus_width, ddr);
 		}
 	}
@@ -737,14 +755,21 @@
 /*
  * Starting point for MMC card init.
  */
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
+int mmc_attach_mmc(struct mmc_host *host)
 {
 	int err;
+	u32 ocr;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	err = mmc_send_op_cond(host, 0, &ocr);
+	if (err)
+		return err;
+
 	mmc_attach_bus_ops(host);
+	if (host->ocr_avail_mmc)
+		host->ocr_avail = host->ocr_avail_mmc;
 
 	/*
 	 * We need to get OCR a different way for SPI.
@@ -784,20 +809,20 @@
 		goto err;
 
 	mmc_release_host(host);
-
 	err = mmc_add_card(host->card);
+	mmc_claim_host(host);
 	if (err)
 		goto remove_card;
 
 	return 0;
 
 remove_card:
+	mmc_release_host(host);
 	mmc_remove_card(host->card);
-	host->card = NULL;
 	mmc_claim_host(host);
+	host->card = NULL;
 err:
 	mmc_detach_bus(host);
-	mmc_release_host(host);
 
 	printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
 		mmc_hostname(host), err);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 326447c..60842f8 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -462,3 +462,104 @@
 	return 0;
 }
 
+static int
+mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
+		  u8 len)
+{
+	struct mmc_request mrq;
+	struct mmc_command cmd;
+	struct mmc_data data;
+	struct scatterlist sg;
+	u8 *data_buf;
+	u8 *test_buf;
+	int i, err;
+	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
+	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
+
+	/* dma onto stack is unsafe/nonportable, but callers to this
+	 * routine normally provide temporary on-stack buffers ...
+	 */
+	data_buf = kmalloc(len, GFP_KERNEL);
+	if (!data_buf)
+		return -ENOMEM;
+
+	if (len == 8)
+		test_buf = testdata_8bit;
+	else if (len == 4)
+		test_buf = testdata_4bit;
+	else {
+		printk(KERN_ERR "%s: Invalid bus_width %d\n",
+		       mmc_hostname(host), len);
+		kfree(data_buf);
+		return -EINVAL;
+	}
+
+	if (opcode == MMC_BUS_TEST_W)
+		memcpy(data_buf, test_buf, len);
+
+	memset(&mrq, 0, sizeof(struct mmc_request));
+	memset(&cmd, 0, sizeof(struct mmc_command));
+	memset(&data, 0, sizeof(struct mmc_data));
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	cmd.opcode = opcode;
+	cmd.arg = 0;
+
+	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
+	 * rely on callers to never use this with "native" calls for reading
+	 * CSD or CID.  Native versions of those commands use the R2 type,
+	 * not R1 plus a data block.
+	 */
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+	data.blksz = len;
+	data.blocks = 1;
+	if (opcode == MMC_BUS_TEST_R)
+		data.flags = MMC_DATA_READ;
+	else
+		data.flags = MMC_DATA_WRITE;
+
+	data.sg = &sg;
+	data.sg_len = 1;
+	sg_init_one(&sg, data_buf, len);
+	mmc_wait_for_req(host, &mrq);
+	err = 0;
+	if (opcode == MMC_BUS_TEST_R) {
+		for (i = 0; i < len / 4; i++)
+			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
+				err = -EIO;
+				break;
+			}
+	}
+	kfree(data_buf);
+
+	if (cmd.error)
+		return cmd.error;
+	if (data.error)
+		return data.error;
+
+	return err;
+}
+
+int mmc_bus_test(struct mmc_card *card, u8 bus_width)
+{
+	int err, width;
+
+	if (bus_width == MMC_BUS_WIDTH_8)
+		width = 8;
+	else if (bus_width == MMC_BUS_WIDTH_4)
+		width = 4;
+	else if (bus_width == MMC_BUS_WIDTH_1)
+		return 0; /* no need for test */
+	else
+		return -EINVAL;
+
+	/*
+	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
+	 * is a problem.  This improves chances that the test will work.
+	 */
+	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
+	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
+	return err;
+}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 653eb8e..e6d44b8 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@
 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
 int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
 int mmc_card_sleepawake(struct mmc_host *host, int sleep);
+int mmc_bus_test(struct mmc_card *card, u8 bus_width);
 
 #endif
 
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 49da4df..d18c32b 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -764,14 +764,21 @@
 /*
  * Starting point for SD card init.
  */
-int mmc_attach_sd(struct mmc_host *host, u32 ocr)
+int mmc_attach_sd(struct mmc_host *host)
 {
 	int err;
+	u32 ocr;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	err = mmc_send_app_op_cond(host, 0, &ocr);
+	if (err)
+		return err;
+
 	mmc_sd_attach_bus_ops(host);
+	if (host->ocr_avail_sd)
+		host->ocr_avail = host->ocr_avail_sd;
 
 	/*
 	 * We need to get OCR a different way for SPI.
@@ -795,7 +802,8 @@
 		ocr &= ~0x7F;
 	}
 
-	if (ocr & MMC_VDD_165_195) {
+	if ((ocr & MMC_VDD_165_195) &&
+	    !(host->ocr_avail_sd & MMC_VDD_165_195)) {
 		printk(KERN_WARNING "%s: SD card claims to support the "
 		       "incompletely defined 'low voltage range'. This "
 		       "will be ignored.\n", mmc_hostname(host));
@@ -820,20 +828,20 @@
 		goto err;
 
 	mmc_release_host(host);
-
 	err = mmc_add_card(host->card);
+	mmc_claim_host(host);
 	if (err)
 		goto remove_card;
 
 	return 0;
 
 remove_card:
+	mmc_release_host(host);
 	mmc_remove_card(host->card);
 	host->card = NULL;
 	mmc_claim_host(host);
 err:
 	mmc_detach_bus(host);
-	mmc_release_host(host);
 
 	printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
 		mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index efef5f9..ebc62ad 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -627,15 +627,27 @@
 
 static int mmc_sdio_resume(struct mmc_host *host)
 {
-	int i, err;
+	int i, err = 0;
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
 	/* Basic card reinitialization. */
 	mmc_claim_host(host);
-	err = mmc_sdio_init_card(host, host->ocr, host->card,
+
+	/* No need to reinitialize powered-resumed nonremovable cards */
+	if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
+		err = mmc_sdio_init_card(host, host->ocr, host->card,
 				 (host->pm_flags & MMC_PM_KEEP_POWER));
+	else if (mmc_card_is_powered_resumed(host)) {
+		/* We may have switched to 1-bit mode during suspend */
+		err = sdio_enable_4bit_bus(host->card);
+		if (err > 0) {
+			mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+			err = 0;
+		}
+	}
+
 	if (!err && host->sdio_irqs)
 		mmc_signal_sdio_irq(host);
 	mmc_release_host(host);
@@ -690,16 +702,22 @@
 /*
  * Starting point for SDIO card init.
  */
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
+int mmc_attach_sdio(struct mmc_host *host)
 {
-	int err;
-	int i, funcs;
+	int err, i, funcs;
+	u32 ocr;
 	struct mmc_card *card;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	err = mmc_send_io_op_cond(host, 0, &ocr);
+	if (err)
+		return err;
+
 	mmc_attach_bus(host, &mmc_sdio_ops);
+	if (host->ocr_avail_sdio)
+		host->ocr_avail = host->ocr_avail_sdio;
 
 	/*
 	 * Sanity check the voltages that the card claims to
@@ -769,11 +787,10 @@
 			pm_runtime_enable(&card->sdio_func[i]->dev);
 	}
 
-	mmc_release_host(host);
-
 	/*
 	 * First add the card to the driver model...
 	 */
+	mmc_release_host(host);
 	err = mmc_add_card(host->card);
 	if (err)
 		goto remove_added;
@@ -787,6 +804,7 @@
 			goto remove_added;
 	}
 
+	mmc_claim_host(host);
 	return 0;
 
 
@@ -796,11 +814,12 @@
 	mmc_claim_host(host);
 remove:
 	/* And with lock if it hasn't been added. */
+	mmc_release_host(host);
 	if (host->card)
 		mmc_sdio_remove(host);
+	mmc_claim_host(host);
 err:
 	mmc_detach_bus(host);
-	mmc_release_host(host);
 
 	printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
 		mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 203da44..d29b9c3 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -197,44 +197,12 @@
 
 #ifdef CONFIG_PM_RUNTIME
 
-static int sdio_bus_pm_prepare(struct device *dev)
-{
-	struct sdio_func *func = dev_to_sdio_func(dev);
-
-	/*
-	 * Resume an SDIO device which was suspended at run time at this
-	 * point, in order to allow standard SDIO suspend/resume paths
-	 * to keep working as usual.
-	 *
-	 * Ultimately, the SDIO driver itself will decide (in its
-	 * suspend handler, or lack thereof) whether the card should be
-	 * removed or kept, and if kept, at what power state.
-	 *
-	 * At this point, PM core have increased our use count, so it's
-	 * safe to directly resume the device. After system is resumed
-	 * again, PM core will drop back its runtime PM use count, and if
-	 * needed device will be suspended again.
-	 *
-	 * The end result is guaranteed to be a power state that is
-	 * coherent with the device's runtime PM use count.
-	 *
-	 * The return value of pm_runtime_resume is deliberately unchecked
-	 * since there is little point in failing system suspend if a
-	 * device can't be resumed.
-	 */
-	if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
-		pm_runtime_resume(dev);
-
-	return 0;
-}
-
 static const struct dev_pm_ops sdio_bus_pm_ops = {
 	SET_RUNTIME_PM_OPS(
 		pm_generic_runtime_suspend,
 		pm_generic_runtime_resume,
 		pm_generic_runtime_idle
 	)
-	.prepare = sdio_bus_pm_prepare,
 };
 
 #define SDIO_PM_OPS_PTR	(&sdio_bus_pm_ops)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e960a93..afe8c6f 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -142,6 +142,27 @@
 
 	  If unsure, say N.
 
+config MMC_SDHCI_DOVE
+	bool "SDHCI support on Marvell's Dove SoC"
+	depends on ARCH_DOVE
+	depends on MMC_SDHCI_PLTFM
+	select MMC_SDHCI_IO_ACCESSORS
+	help
+	  This selects the Secure Digital Host Controller Interface in
+	  Marvell's Dove SoC.
+
+	  If unsure, say N.
+
+config MMC_SDHCI_TEGRA
+	tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+	depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
+	select MMC_SDHCI_IO_ACCESSORS
+	help
+	  This selects the Tegra SD/MMC controller. If you have a Tegra
+	  platform with SD or MMC devices, say Y or M here.
+
+	  If unsure, say N.
+
 config MMC_SDHCI_S3C
 	tristate "SDHCI support on Samsung S3C SoC"
 	depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -460,11 +481,27 @@
 	help
 	  If you say yes here SD-Cards may work on the EZkit.
 
+config MMC_DW
+	tristate "Synopsys DesignWare Memory Card Interface"
+	depends on ARM
+	help
+	  This selects support for the Synopsys DesignWare Mobile Storage IP
+	  block, this provides host support for SD and MMC interfaces, in both
+	  PIO and external DMA modes.
+
+config MMC_DW_IDMAC
+	bool "Internal DMAC interface"
+	depends on MMC_DW
+	help
+	  This selects support for the internal DMAC block within the Synopsys
+	  Designware Mobile Storage IP block. This disables the external DMA
+	  interface.
+
 config MMC_SH_MMCIF
 	tristate "SuperH Internal MMCIF support"
 	depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
 	help
-	  This selects the MMC Host Interface controler (MMCIF).
+	  This selects the MMC Host Interface controller (MMCIF).
 
 	  This driver supports MMCIF in sh7724/sh7757/sh7372.
 
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7b645ff..e834fb2 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -31,6 +31,7 @@
 obj-$(CONFIG_MMC_CB710)	+= cb710-mmc.o
 obj-$(CONFIG_MMC_VIA_SDMMC)	+= via-sdmmc.o
 obj-$(CONFIG_SDH_BFIN)		+= bfin_sdh.o
+obj-$(CONFIG_MMC_DW)		+= dw_mmc.o
 obj-$(CONFIG_MMC_SH_MMCIF)	+= sh_mmcif.o
 obj-$(CONFIG_MMC_JZ4740)	+= jz4740_mmc.o
 obj-$(CONFIG_MMC_USHC)		+= ushc.o
@@ -39,6 +40,8 @@
 sdhci-platform-y				:= sdhci-pltfm.o
 sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX)	+= sdhci-cns3xxx.o
 sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX)	+= sdhci-esdhc-imx.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE)		+= sdhci-dove.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA)	+= sdhci-tegra.o
 
 obj-$(CONFIG_MMC_SDHCI_OF)	+= sdhci-of.o
 sdhci-of-y				:= sdhci-of-core.o
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 41e5a60..ef72e87 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -192,7 +192,7 @@
 	au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
 	au_sync();
 
-	/* Send the stop commmand */
+	/* Send the stop command */
 	au_writel(STOP_CMD, HOST_CMD(host));
 }
 
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index bac7d62..0371bf5 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -462,7 +462,7 @@
 		goto out;
 	}
 
-	mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
+	mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
 	if (!mmc) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e15547c..0076c74 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -66,8 +66,8 @@
 #define DAVINCI_MMCBLNC      0x60
 #define DAVINCI_SDIOCTL      0x64
 #define DAVINCI_SDIOST0      0x68
-#define DAVINCI_SDIOEN       0x6C
-#define DAVINCI_SDIOST       0x70
+#define DAVINCI_SDIOIEN      0x6C
+#define DAVINCI_SDIOIST      0x70
 #define DAVINCI_MMCFIFOCTL   0x74 /* FIFO Control Register             */
 
 /* DAVINCI_MMCCTL definitions */
@@ -131,6 +131,14 @@
 #define MMCFIFOCTL_ACCWD_2    (2 << 3) /* access width of 2 bytes    */
 #define MMCFIFOCTL_ACCWD_1    (3 << 3) /* access width of 1 byte     */
 
+/* DAVINCI_SDIOST0 definitions */
+#define SDIOST0_DAT1_HI       BIT(0)
+
+/* DAVINCI_SDIOIEN definitions */
+#define SDIOIEN_IOINTEN       BIT(0)
+
+/* DAVINCI_SDIOIST definitions */
+#define SDIOIST_IOINT         BIT(0)
 
 /* MMCSD Init clock in Hz in opendrain mode */
 #define MMCSD_INIT_CLOCK		200000
@@ -164,7 +172,7 @@
 	unsigned int mmc_input_clk;
 	void __iomem *base;
 	struct resource *mem_res;
-	int irq;
+	int mmc_irq, sdio_irq;
 	unsigned char bus_mode;
 
 #define DAVINCI_MMC_DATADIR_NONE	0
@@ -184,6 +192,7 @@
 	u32 rxdma, txdma;
 	bool use_dma;
 	bool do_dma;
+	bool sdio_int;
 
 	/* Scatterlist DMA uses one or more parameter RAM entries:
 	 * the main one (associated with rxdma or txdma) plus zero or
@@ -480,7 +489,7 @@
 	struct scatterlist	*sg;
 	unsigned		sg_len;
 	unsigned		bytes_left = host->bytes_left;
-	const unsigned		shift = ffs(rw_threshold) - 1;;
+	const unsigned		shift = ffs(rw_threshold) - 1;
 
 	if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
 		template = &host->tx_template;
@@ -866,6 +875,19 @@
 {
 	host->data = NULL;
 
+	if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+		/*
+		 * SDIO Interrupt Detection work-around as suggested by
+		 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
+		 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
+		 */
+		if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
+					SDIOST0_DAT1_HI)) {
+			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+			mmc_signal_sdio_irq(host->mmc);
+		}
+	}
+
 	if (host->do_dma) {
 		davinci_abort_dma(host);
 
@@ -932,6 +954,21 @@
 	mmc_davinci_reset_ctrl(host, 0);
 }
 
+static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
+{
+	struct mmc_davinci_host *host = dev_id;
+	unsigned int status;
+
+	status = readl(host->base + DAVINCI_SDIOIST);
+	if (status & SDIOIST_IOINT) {
+		dev_dbg(mmc_dev(host->mmc),
+			"SDIO interrupt status %x\n", status);
+		writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+		mmc_signal_sdio_irq(host->mmc);
+	}
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
 {
 	struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
@@ -1076,11 +1113,32 @@
 	return config->get_ro(pdev->id);
 }
 
+static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+	struct mmc_davinci_host *host = mmc_priv(mmc);
+
+	if (enable) {
+		if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
+			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+			mmc_signal_sdio_irq(host->mmc);
+		} else {
+			host->sdio_int = true;
+			writel(readl(host->base + DAVINCI_SDIOIEN) |
+			       SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
+		}
+	} else {
+		host->sdio_int = false;
+		writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
+		       host->base + DAVINCI_SDIOIEN);
+	}
+}
+
 static struct mmc_host_ops mmc_davinci_ops = {
 	.request	= mmc_davinci_request,
 	.set_ios	= mmc_davinci_set_ios,
 	.get_cd		= mmc_davinci_get_cd,
 	.get_ro		= mmc_davinci_get_ro,
+	.enable_sdio_irq = mmc_davinci_enable_sdio_irq,
 };
 
 /*----------------------------------------------------------------------*/
@@ -1209,7 +1267,8 @@
 		host->nr_sg = MAX_NR_SG;
 
 	host->use_dma = use_dma;
-	host->irq = irq;
+	host->mmc_irq = irq;
+	host->sdio_irq = platform_get_irq(pdev, 1);
 
 	if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
 		host->use_dma = 0;
@@ -1270,6 +1329,13 @@
 	if (ret)
 		goto out;
 
+	if (host->sdio_irq >= 0) {
+		ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
+				  mmc_hostname(mmc), host);
+		if (!ret)
+			mmc->caps |= MMC_CAP_SDIO_IRQ;
+	}
+
 	rename_region(mem, mmc_hostname(mmc));
 
 	dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
@@ -1313,7 +1379,9 @@
 		mmc_davinci_cpufreq_deregister(host);
 
 		mmc_remove_host(host->mmc);
-		free_irq(host->irq, host);
+		free_irq(host->mmc_irq, host);
+		if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+			free_irq(host->sdio_irq, host);
 
 		davinci_release_dma_channels(host);
 
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 0000000..2fcc825
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1796 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *  (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/bitops.h>
+
+#include "dw_mmc.h"
+
+/* Common flag combinations */
+#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DTO | SDMMC_INT_DCRC | \
+				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
+				 SDMMC_INT_EBE)
+#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
+				 SDMMC_INT_RESP_ERR)
+#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
+				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
+#define DW_MCI_SEND_STATUS	1
+#define DW_MCI_RECV_STATUS	2
+#define DW_MCI_DMA_THRESHOLD	16
+
+#ifdef CONFIG_MMC_DW_IDMAC
+struct idmac_desc {
+	u32		des0;	/* Control Descriptor */
+#define IDMAC_DES0_DIC	BIT(1)
+#define IDMAC_DES0_LD	BIT(2)
+#define IDMAC_DES0_FD	BIT(3)
+#define IDMAC_DES0_CH	BIT(4)
+#define IDMAC_DES0_ER	BIT(5)
+#define IDMAC_DES0_CES	BIT(30)
+#define IDMAC_DES0_OWN	BIT(31)
+
+	u32		des1;	/* Buffer sizes */
+#define IDMAC_SET_BUFFER1_SIZE(d, s) \
+	((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
+
+	u32		des2;	/* buffer 1 physical address */
+
+	u32		des3;	/* buffer 2 physical address */
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+/**
+ * struct dw_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @ctype: Card type for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ *	processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ *	&struct dw_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @flags: Random state bits associated with the slot.
+ * @id: Number of this slot.
+ * @last_detect_state: Most recently observed card detect state.
+ */
+struct dw_mci_slot {
+	struct mmc_host		*mmc;
+	struct dw_mci		*host;
+
+	u32			ctype;
+
+	struct mmc_request	*mrq;
+	struct list_head	queue_node;
+
+	unsigned int		clock;
+	unsigned long		flags;
+#define DW_MMC_CARD_PRESENT	0
+#define DW_MMC_CARD_NEED_INIT	1
+	int			id;
+	int			last_detect_state;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static int dw_mci_req_show(struct seq_file *s, void *v)
+{
+	struct dw_mci_slot *slot = s->private;
+	struct mmc_request *mrq;
+	struct mmc_command *cmd;
+	struct mmc_command *stop;
+	struct mmc_data	*data;
+
+	/* Make sure we get a consistent snapshot */
+	spin_lock_bh(&slot->host->lock);
+	mrq = slot->mrq;
+
+	if (mrq) {
+		cmd = mrq->cmd;
+		data = mrq->data;
+		stop = mrq->stop;
+
+		if (cmd)
+			seq_printf(s,
+				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+				   cmd->opcode, cmd->arg, cmd->flags,
+				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
+				   cmd->resp[2], cmd->error);
+		if (data)
+			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
+				   data->bytes_xfered, data->blocks,
+				   data->blksz, data->flags, data->error);
+		if (stop)
+			seq_printf(s,
+				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+				   stop->opcode, stop->arg, stop->flags,
+				   stop->resp[0], stop->resp[1], stop->resp[2],
+				   stop->resp[2], stop->error);
+	}
+
+	spin_unlock_bh(&slot->host->lock);
+
+	return 0;
+}
+
+static int dw_mci_req_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dw_mci_req_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_req_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dw_mci_req_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int dw_mci_regs_show(struct seq_file *s, void *v)
+{
+	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
+	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
+	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
+	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
+	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
+	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+
+	return 0;
+}
+
+static int dw_mci_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dw_mci_regs_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_regs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dw_mci_regs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
+{
+	struct mmc_host	*mmc = slot->mmc;
+	struct dw_mci *host = slot->host;
+	struct dentry *root;
+	struct dentry *node;
+
+	root = mmc->debugfs_root;
+	if (!root)
+		return;
+
+	node = debugfs_create_file("regs", S_IRUSR, root, host,
+				   &dw_mci_regs_fops);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_file("req", S_IRUSR, root, slot,
+				   &dw_mci_req_fops);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_x32("pending_events", S_IRUSR, root,
+				  (u32 *)&host->pending_events);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_x32("completed_events", S_IRUSR, root,
+				  (u32 *)&host->completed_events);
+	if (!node)
+		goto err;
+
+	return;
+
+err:
+	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+static void dw_mci_set_timeout(struct dw_mci *host)
+{
+	/* timeout (maximum) */
+	mci_writel(host, TMOUT, 0xffffffff);
+}
+
+static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+	struct mmc_data	*data;
+	u32 cmdr;
+	cmd->error = -EINPROGRESS;
+
+	cmdr = cmd->opcode;
+
+	if (cmdr == MMC_STOP_TRANSMISSION)
+		cmdr |= SDMMC_CMD_STOP;
+	else
+		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+
+	if (cmd->flags & MMC_RSP_PRESENT) {
+		/* We expect a response, so set this bit */
+		cmdr |= SDMMC_CMD_RESP_EXP;
+		if (cmd->flags & MMC_RSP_136)
+			cmdr |= SDMMC_CMD_RESP_LONG;
+	}
+
+	if (cmd->flags & MMC_RSP_CRC)
+		cmdr |= SDMMC_CMD_RESP_CRC;
+
+	data = cmd->data;
+	if (data) {
+		cmdr |= SDMMC_CMD_DAT_EXP;
+		if (data->flags & MMC_DATA_STREAM)
+			cmdr |= SDMMC_CMD_STRM_MODE;
+		if (data->flags & MMC_DATA_WRITE)
+			cmdr |= SDMMC_CMD_DAT_WR;
+	}
+
+	return cmdr;
+}
+
+static void dw_mci_start_command(struct dw_mci *host,
+				 struct mmc_command *cmd, u32 cmd_flags)
+{
+	host->cmd = cmd;
+	dev_vdbg(&host->pdev->dev,
+		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
+		 cmd->arg, cmd_flags);
+
+	mci_writel(host, CMDARG, cmd->arg);
+	wmb();
+
+	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
+}
+
+static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
+{
+	dw_mci_start_command(host, data->stop, host->stop_cmdr);
+}
+
+/* DMA interface functions */
+static void dw_mci_stop_dma(struct dw_mci *host)
+{
+	if (host->use_dma) {
+		host->dma_ops->stop(host);
+		host->dma_ops->cleanup(host);
+	} else {
+		/* Data transfer was stopped by the interrupt handler */
+		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+	}
+}
+
+#ifdef CONFIG_MMC_DW_IDMAC
+static void dw_mci_dma_cleanup(struct dw_mci *host)
+{
+	struct mmc_data *data = host->data;
+
+	if (data)
+		dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+			     ((data->flags & MMC_DATA_WRITE)
+			      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+}
+
+static void dw_mci_idmac_stop_dma(struct dw_mci *host)
+{
+	u32 temp;
+
+	/* Disable and reset the IDMAC interface */
+	temp = mci_readl(host, CTRL);
+	temp &= ~SDMMC_CTRL_USE_IDMAC;
+	temp |= SDMMC_CTRL_DMA_RESET;
+	mci_writel(host, CTRL, temp);
+
+	/* Stop the IDMAC running */
+	temp = mci_readl(host, BMOD);
+	temp &= ~SDMMC_IDMAC_ENABLE;
+	mci_writel(host, BMOD, temp);
+}
+
+static void dw_mci_idmac_complete_dma(struct dw_mci *host)
+{
+	struct mmc_data *data = host->data;
+
+	dev_vdbg(&host->pdev->dev, "DMA complete\n");
+
+	host->dma_ops->cleanup(host);
+
+	/*
+	 * If the card was removed, data will be NULL. No point in trying to
+	 * send the stop command or waiting for NBUSY in this case.
+	 */
+	if (data) {
+		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+		tasklet_schedule(&host->tasklet);
+	}
+}
+
+static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
+				    unsigned int sg_len)
+{
+	int i;
+	struct idmac_desc *desc = host->sg_cpu;
+
+	for (i = 0; i < sg_len; i++, desc++) {
+		unsigned int length = sg_dma_len(&data->sg[i]);
+		u32 mem_addr = sg_dma_address(&data->sg[i]);
+
+		/* Set the OWN bit and disable interrupts for this descriptor */
+		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
+
+		/* Buffer length */
+		IDMAC_SET_BUFFER1_SIZE(desc, length);
+
+		/* Physical address to DMA to/from */
+		desc->des2 = mem_addr;
+	}
+
+	/* Set first descriptor */
+	desc = host->sg_cpu;
+	desc->des0 |= IDMAC_DES0_FD;
+
+	/* Set last descriptor */
+	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
+	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+	desc->des0 |= IDMAC_DES0_LD;
+
+	wmb();
+}
+
+static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
+{
+	u32 temp;
+
+	dw_mci_translate_sglist(host, host->data, sg_len);
+
+	/* Select IDMAC interface */
+	temp = mci_readl(host, CTRL);
+	temp |= SDMMC_CTRL_USE_IDMAC;
+	mci_writel(host, CTRL, temp);
+
+	wmb();
+
+	/* Enable the IDMAC */
+	temp = mci_readl(host, BMOD);
+	temp |= SDMMC_IDMAC_ENABLE;
+	mci_writel(host, BMOD, temp);
+
+	/* Start it running */
+	mci_writel(host, PLDMND, 1);
+}
+
+static int dw_mci_idmac_init(struct dw_mci *host)
+{
+	struct idmac_desc *p;
+	int i;
+
+	/* Number of descriptors in the ring buffer */
+	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+
+	/* Forward link the descriptor list */
+	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
+		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
+
+	/* Set the last descriptor as the end-of-ring descriptor */
+	p->des3 = host->sg_dma;
+	p->des0 = IDMAC_DES0_ER;
+
+	/* Mask out interrupts - get Tx & Rx complete only */
+	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
+		   SDMMC_IDMAC_INT_TI);
+
+	/* Set the descriptor base address */
+	mci_writel(host, DBADDR, host->sg_dma);
+	return 0;
+}
+
+static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+	.init = dw_mci_idmac_init,
+	.start = dw_mci_idmac_start_dma,
+	.stop = dw_mci_idmac_stop_dma,
+	.complete = dw_mci_idmac_complete_dma,
+	.cleanup = dw_mci_dma_cleanup,
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+{
+	struct scatterlist *sg;
+	unsigned int i, direction, sg_len;
+	u32 temp;
+
+	/* If we don't have a channel, we can't do DMA */
+	if (!host->use_dma)
+		return -ENODEV;
+
+	/*
+	 * We don't do DMA on "complex" transfers, i.e. with
+	 * non-word-aligned buffers or lengths. Also, we don't bother
+	 * with all the DMA setup overhead for short transfers.
+	 */
+	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
+		return -EINVAL;
+	if (data->blksz & 3)
+		return -EINVAL;
+
+	for_each_sg(data->sg, sg, data->sg_len, i) {
+		if (sg->offset & 3 || sg->length & 3)
+			return -EINVAL;
+	}
+
+	if (data->flags & MMC_DATA_READ)
+		direction = DMA_FROM_DEVICE;
+	else
+		direction = DMA_TO_DEVICE;
+
+	sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
+			    direction);
+
+	dev_vdbg(&host->pdev->dev,
+		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
+		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
+		 sg_len);
+
+	/* Enable the DMA interface */
+	temp = mci_readl(host, CTRL);
+	temp |= SDMMC_CTRL_DMA_ENABLE;
+	mci_writel(host, CTRL, temp);
+
+	/* Disable RX/TX IRQs, let DMA handle it */
+	temp = mci_readl(host, INTMASK);
+	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
+	mci_writel(host, INTMASK, temp);
+
+	host->dma_ops->start(host, sg_len);
+
+	return 0;
+}
+
+static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
+{
+	u32 temp;
+
+	data->error = -EINPROGRESS;
+
+	WARN_ON(host->data);
+	host->sg = NULL;
+	host->data = data;
+
+	if (dw_mci_submit_data_dma(host, data)) {
+		host->sg = data->sg;
+		host->pio_offset = 0;
+		if (data->flags & MMC_DATA_READ)
+			host->dir_status = DW_MCI_RECV_STATUS;
+		else
+			host->dir_status = DW_MCI_SEND_STATUS;
+
+		temp = mci_readl(host, INTMASK);
+		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
+		mci_writel(host, INTMASK, temp);
+
+		temp = mci_readl(host, CTRL);
+		temp &= ~SDMMC_CTRL_DMA_ENABLE;
+		mci_writel(host, CTRL, temp);
+	}
+}
+
+static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
+{
+	struct dw_mci *host = slot->host;
+	unsigned long timeout = jiffies + msecs_to_jiffies(500);
+	unsigned int cmd_status = 0;
+
+	mci_writel(host, CMDARG, arg);
+	wmb();
+	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
+
+	while (time_before(jiffies, timeout)) {
+		cmd_status = mci_readl(host, CMD);
+		if (!(cmd_status & SDMMC_CMD_START))
+			return;
+	}
+	dev_err(&slot->mmc->class_dev,
+		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
+		cmd, arg, cmd_status);
+}
+
+static void dw_mci_setup_bus(struct dw_mci_slot *slot)
+{
+	struct dw_mci *host = slot->host;
+	u32 div;
+
+	if (slot->clock != host->current_speed) {
+		if (host->bus_hz % slot->clock)
+			/*
+			 * move the + 1 after the divide to prevent
+			 * over-clocking the card.
+			 */
+			div = ((host->bus_hz / slot->clock) >> 1) + 1;
+		else
+			div = (host->bus_hz  / slot->clock) >> 1;
+
+		dev_info(&slot->mmc->class_dev,
+			 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
+			 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
+			 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
+
+		/* disable clock */
+		mci_writel(host, CLKENA, 0);
+		mci_writel(host, CLKSRC, 0);
+
+		/* inform CIU */
+		mci_send_cmd(slot,
+			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+		/* set clock to desired speed */
+		mci_writel(host, CLKDIV, div);
+
+		/* inform CIU */
+		mci_send_cmd(slot,
+			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+		/* enable clock */
+		mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
+
+		/* inform CIU */
+		mci_send_cmd(slot,
+			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+		host->current_speed = slot->clock;
+	}
+
+	/* Set the current slot bus width */
+	mci_writel(host, CTYPE, slot->ctype);
+}
+
+static void dw_mci_start_request(struct dw_mci *host,
+				 struct dw_mci_slot *slot)
+{
+	struct mmc_request *mrq;
+	struct mmc_command *cmd;
+	struct mmc_data	*data;
+	u32 cmdflags;
+
+	mrq = slot->mrq;
+	if (host->pdata->select_slot)
+		host->pdata->select_slot(slot->id);
+
+	/* Slot specific timing and width adjustment */
+	dw_mci_setup_bus(slot);
+
+	host->cur_slot = slot;
+	host->mrq = mrq;
+
+	host->pending_events = 0;
+	host->completed_events = 0;
+	host->data_status = 0;
+
+	data = mrq->data;
+	if (data) {
+		dw_mci_set_timeout(host);
+		mci_writel(host, BYTCNT, data->blksz*data->blocks);
+		mci_writel(host, BLKSIZ, data->blksz);
+	}
+
+	cmd = mrq->cmd;
+	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
+
+	/* this is the first command, send the initialization clock */
+	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
+		cmdflags |= SDMMC_CMD_INIT;
+
+	if (data) {
+		dw_mci_submit_data(host, data);
+		wmb();
+	}
+
+	dw_mci_start_command(host, cmd, cmdflags);
+
+	if (mrq->stop)
+		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
+}
+
+static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
+				 struct mmc_request *mrq)
+{
+	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
+		 host->state);
+
+	spin_lock_bh(&host->lock);
+	slot->mrq = mrq;
+
+	if (host->state == STATE_IDLE) {
+		host->state = STATE_SENDING_CMD;
+		dw_mci_start_request(host, slot);
+	} else {
+		list_add_tail(&slot->queue_node, &host->queue);
+	}
+
+	spin_unlock_bh(&host->lock);
+}
+
+static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci *host = slot->host;
+
+	WARN_ON(slot->mrq);
+
+	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+		mrq->cmd->error = -ENOMEDIUM;
+		mmc_request_done(mmc, mrq);
+		return;
+	}
+
+	/* We don't support multiple blocks of weird lengths. */
+	dw_mci_queue_request(host, slot, mrq);
+}
+
+static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+
+	/* set default 1 bit mode */
+	slot->ctype = SDMMC_CTYPE_1BIT;
+
+	switch (ios->bus_width) {
+	case MMC_BUS_WIDTH_1:
+		slot->ctype = SDMMC_CTYPE_1BIT;
+		break;
+	case MMC_BUS_WIDTH_4:
+		slot->ctype = SDMMC_CTYPE_4BIT;
+		break;
+	}
+
+	if (ios->clock) {
+		/*
+		 * Use mirror of ios->clock to prevent race with mmc
+		 * core ios update when finding the minimum.
+		 */
+		slot->clock = ios->clock;
+	}
+
+	switch (ios->power_mode) {
+	case MMC_POWER_UP:
+		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
+		break;
+	default:
+		break;
+	}
+}
+
+static int dw_mci_get_ro(struct mmc_host *mmc)
+{
+	int read_only;
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci_board *brd = slot->host->pdata;
+
+	/* Use platform get_ro function, else try on board write protect */
+	if (brd->get_ro)
+		read_only = brd->get_ro(slot->id);
+	else
+		read_only =
+			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
+
+	dev_dbg(&mmc->class_dev, "card is %s\n",
+		read_only ? "read-only" : "read-write");
+
+	return read_only;
+}
+
+static int dw_mci_get_cd(struct mmc_host *mmc)
+{
+	int present;
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci_board *brd = slot->host->pdata;
+
+	/* Use platform get_cd function, else try onboard card detect */
+	if (brd->get_cd)
+		present = !brd->get_cd(slot->id);
+	else
+		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
+			== 0 ? 1 : 0;
+
+	if (present)
+		dev_dbg(&mmc->class_dev, "card is present\n");
+	else
+		dev_dbg(&mmc->class_dev, "card is not present\n");
+
+	return present;
+}
+
+static const struct mmc_host_ops dw_mci_ops = {
+	.request	= dw_mci_request,
+	.set_ios	= dw_mci_set_ios,
+	.get_ro		= dw_mci_get_ro,
+	.get_cd		= dw_mci_get_cd,
+};
+
+static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
+	__releases(&host->lock)
+	__acquires(&host->lock)
+{
+	struct dw_mci_slot *slot;
+	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
+
+	WARN_ON(host->cmd || host->data);
+
+	host->cur_slot->mrq = NULL;
+	host->mrq = NULL;
+	if (!list_empty(&host->queue)) {
+		slot = list_entry(host->queue.next,
+				  struct dw_mci_slot, queue_node);
+		list_del(&slot->queue_node);
+		dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
+			 mmc_hostname(slot->mmc));
+		host->state = STATE_SENDING_CMD;
+		dw_mci_start_request(host, slot);
+	} else {
+		dev_vdbg(&host->pdev->dev, "list empty\n");
+		host->state = STATE_IDLE;
+	}
+
+	spin_unlock(&host->lock);
+	mmc_request_done(prev_mmc, mrq);
+	spin_lock(&host->lock);
+}
+
+static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
+{
+	u32 status = host->cmd_status;
+
+	host->cmd_status = 0;
+
+	/* Read the response from the card (up to 16 bytes) */
+	if (cmd->flags & MMC_RSP_PRESENT) {
+		if (cmd->flags & MMC_RSP_136) {
+			cmd->resp[3] = mci_readl(host, RESP0);
+			cmd->resp[2] = mci_readl(host, RESP1);
+			cmd->resp[1] = mci_readl(host, RESP2);
+			cmd->resp[0] = mci_readl(host, RESP3);
+		} else {
+			cmd->resp[0] = mci_readl(host, RESP0);
+			cmd->resp[1] = 0;
+			cmd->resp[2] = 0;
+			cmd->resp[3] = 0;
+		}
+	}
+
+	if (status & SDMMC_INT_RTO)
+		cmd->error = -ETIMEDOUT;
+	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
+		cmd->error = -EILSEQ;
+	else if (status & SDMMC_INT_RESP_ERR)
+		cmd->error = -EIO;
+	else
+		cmd->error = 0;
+
+	if (cmd->error) {
+		/* newer ip versions need a delay between retries */
+		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
+			mdelay(20);
+
+		if (cmd->data) {
+			host->data = NULL;
+			dw_mci_stop_dma(host);
+		}
+	}
+}
+
+static void dw_mci_tasklet_func(unsigned long priv)
+{
+	struct dw_mci *host = (struct dw_mci *)priv;
+	struct mmc_data	*data;
+	struct mmc_command *cmd;
+	enum dw_mci_state state;
+	enum dw_mci_state prev_state;
+	u32 status;
+
+	spin_lock(&host->lock);
+
+	state = host->state;
+	data = host->data;
+
+	do {
+		prev_state = state;
+
+		switch (state) {
+		case STATE_IDLE:
+			break;
+
+		case STATE_SENDING_CMD:
+			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+						&host->pending_events))
+				break;
+
+			cmd = host->cmd;
+			host->cmd = NULL;
+			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
+			dw_mci_command_complete(host, host->mrq->cmd);
+			if (!host->mrq->data || cmd->error) {
+				dw_mci_request_end(host, host->mrq);
+				goto unlock;
+			}
+
+			prev_state = state = STATE_SENDING_DATA;
+			/* fall through */
+
+		case STATE_SENDING_DATA:
+			if (test_and_clear_bit(EVENT_DATA_ERROR,
+					       &host->pending_events)) {
+				dw_mci_stop_dma(host);
+				if (data->stop)
+					send_stop_cmd(host, data);
+				state = STATE_DATA_ERROR;
+				break;
+			}
+
+			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+						&host->pending_events))
+				break;
+
+			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
+			prev_state = state = STATE_DATA_BUSY;
+			/* fall through */
+
+		case STATE_DATA_BUSY:
+			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
+						&host->pending_events))
+				break;
+
+			host->data = NULL;
+			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
+			status = host->data_status;
+
+			if (status & DW_MCI_DATA_ERROR_FLAGS) {
+				if (status & SDMMC_INT_DTO) {
+					dev_err(&host->pdev->dev,
+						"data timeout error\n");
+					data->error = -ETIMEDOUT;
+				} else if (status & SDMMC_INT_DCRC) {
+					dev_err(&host->pdev->dev,
+						"data CRC error\n");
+					data->error = -EILSEQ;
+				} else {
+					dev_err(&host->pdev->dev,
+						"data FIFO error "
+						"(status=%08x)\n",
+						status);
+					data->error = -EIO;
+				}
+			} else {
+				data->bytes_xfered = data->blocks * data->blksz;
+				data->error = 0;
+			}
+
+			if (!data->stop) {
+				dw_mci_request_end(host, host->mrq);
+				goto unlock;
+			}
+
+			prev_state = state = STATE_SENDING_STOP;
+			if (!data->error)
+				send_stop_cmd(host, data);
+			/* fall through */
+
+		case STATE_SENDING_STOP:
+			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+						&host->pending_events))
+				break;
+
+			host->cmd = NULL;
+			dw_mci_command_complete(host, host->mrq->stop);
+			dw_mci_request_end(host, host->mrq);
+			goto unlock;
+
+		case STATE_DATA_ERROR:
+			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+						&host->pending_events))
+				break;
+
+			state = STATE_DATA_BUSY;
+			break;
+		}
+	} while (state != prev_state);
+
+	host->state = state;
+unlock:
+	spin_unlock(&host->lock);
+
+}
+
+static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+{
+	u16 *pdata = (u16 *)buf;
+
+	WARN_ON(cnt % 2 != 0);
+
+	cnt = cnt >> 1;
+	while (cnt > 0) {
+		mci_writew(host, DATA, *pdata++);
+		cnt--;
+	}
+}
+
+static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+{
+	u16 *pdata = (u16 *)buf;
+
+	WARN_ON(cnt % 2 != 0);
+
+	cnt = cnt >> 1;
+	while (cnt > 0) {
+		*pdata++ = mci_readw(host, DATA);
+		cnt--;
+	}
+}
+
+static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
+{
+	u32 *pdata = (u32 *)buf;
+
+	WARN_ON(cnt % 4 != 0);
+	WARN_ON((unsigned long)pdata & 0x3);
+
+	cnt = cnt >> 2;
+	while (cnt > 0) {
+		mci_writel(host, DATA, *pdata++);
+		cnt--;
+	}
+}
+
+static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
+{
+	u32 *pdata = (u32 *)buf;
+
+	WARN_ON(cnt % 4 != 0);
+	WARN_ON((unsigned long)pdata & 0x3);
+
+	cnt = cnt >> 2;
+	while (cnt > 0) {
+		*pdata++ = mci_readl(host, DATA);
+		cnt--;
+	}
+}
+
+static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
+{
+	u64 *pdata = (u64 *)buf;
+
+	WARN_ON(cnt % 8 != 0);
+
+	cnt = cnt >> 3;
+	while (cnt > 0) {
+		mci_writeq(host, DATA, *pdata++);
+		cnt--;
+	}
+}
+
+static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
+{
+	u64 *pdata = (u64 *)buf;
+
+	WARN_ON(cnt % 8 != 0);
+
+	cnt = cnt >> 3;
+	while (cnt > 0) {
+		*pdata++ = mci_readq(host, DATA);
+		cnt--;
+	}
+}
+
+static void dw_mci_read_data_pio(struct dw_mci *host)
+{
+	struct scatterlist *sg = host->sg;
+	void *buf = sg_virt(sg);
+	unsigned int offset = host->pio_offset;
+	struct mmc_data	*data = host->data;
+	int shift = host->data_shift;
+	u32 status;
+	unsigned int nbytes = 0, len, old_len, count = 0;
+
+	do {
+		len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
+		if (count == 0)
+			old_len = len;
+
+		if (offset + len <= sg->length) {
+			host->pull_data(host, (void *)(buf + offset), len);
+
+			offset += len;
+			nbytes += len;
+
+			if (offset == sg->length) {
+				flush_dcache_page(sg_page(sg));
+				host->sg = sg = sg_next(sg);
+				if (!sg)
+					goto done;
+
+				offset = 0;
+				buf = sg_virt(sg);
+			}
+		} else {
+			unsigned int remaining = sg->length - offset;
+			host->pull_data(host, (void *)(buf + offset),
+					remaining);
+			nbytes += remaining;
+
+			flush_dcache_page(sg_page(sg));
+			host->sg = sg = sg_next(sg);
+			if (!sg)
+				goto done;
+
+			offset = len - remaining;
+			buf = sg_virt(sg);
+			host->pull_data(host, buf, offset);
+			nbytes += offset;
+		}
+
+		status = mci_readl(host, MINTSTS);
+		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+		if (status & DW_MCI_DATA_ERROR_FLAGS) {
+			host->data_status = status;
+			data->bytes_xfered += nbytes;
+			smp_wmb();
+
+			set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+			tasklet_schedule(&host->tasklet);
+			return;
+		}
+		count++;
+	} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
+	len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
+	host->pio_offset = offset;
+	data->bytes_xfered += nbytes;
+	return;
+
+done:
+	data->bytes_xfered += nbytes;
+	smp_wmb();
+	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_write_data_pio(struct dw_mci *host)
+{
+	struct scatterlist *sg = host->sg;
+	void *buf = sg_virt(sg);
+	unsigned int offset = host->pio_offset;
+	struct mmc_data	*data = host->data;
+	int shift = host->data_shift;
+	u32 status;
+	unsigned int nbytes = 0, len;
+
+	do {
+		len = SDMMC_FIFO_SZ -
+			(SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
+		if (offset + len <= sg->length) {
+			host->push_data(host, (void *)(buf + offset), len);
+
+			offset += len;
+			nbytes += len;
+			if (offset == sg->length) {
+				host->sg = sg = sg_next(sg);
+				if (!sg)
+					goto done;
+
+				offset = 0;
+				buf = sg_virt(sg);
+			}
+		} else {
+			unsigned int remaining = sg->length - offset;
+
+			host->push_data(host, (void *)(buf + offset),
+					remaining);
+			nbytes += remaining;
+
+			host->sg = sg = sg_next(sg);
+			if (!sg)
+				goto done;
+
+			offset = len - remaining;
+			buf = sg_virt(sg);
+			host->push_data(host, (void *)buf, offset);
+			nbytes += offset;
+		}
+
+		status = mci_readl(host, MINTSTS);
+		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+		if (status & DW_MCI_DATA_ERROR_FLAGS) {
+			host->data_status = status;
+			data->bytes_xfered += nbytes;
+
+			smp_wmb();
+
+			set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+			tasklet_schedule(&host->tasklet);
+			return;
+		}
+	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
+
+	host->pio_offset = offset;
+	data->bytes_xfered += nbytes;
+
+	return;
+
+done:
+	data->bytes_xfered += nbytes;
+	smp_wmb();
+	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
+{
+	if (!host->cmd_status)
+		host->cmd_status = status;
+
+	smp_wmb();
+
+	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+	tasklet_schedule(&host->tasklet);
+}
+
+static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
+{
+	struct dw_mci *host = dev_id;
+	u32 status, pending;
+	unsigned int pass_count = 0;
+
+	do {
+		status = mci_readl(host, RINTSTS);
+		pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+
+		/*
+		 * DTO fix - version 2.10a and below, and only if internal DMA
+		 * is configured.
+		 */
+		if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+			if (!pending &&
+			    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+				pending |= SDMMC_INT_DATA_OVER;
+		}
+
+		if (!pending)
+			break;
+
+		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
+			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
+			host->cmd_status = status;
+			smp_wmb();
+			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+			tasklet_schedule(&host->tasklet);
+		}
+
+		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+			/* if there is an error report DATA_ERROR */
+			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
+			host->data_status = status;
+			smp_wmb();
+			set_bit(EVENT_DATA_ERROR, &host->pending_events);
+			tasklet_schedule(&host->tasklet);
+		}
+
+		if (pending & SDMMC_INT_DATA_OVER) {
+			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
+			if (!host->data_status)
+				host->data_status = status;
+			smp_wmb();
+			if (host->dir_status == DW_MCI_RECV_STATUS) {
+				if (host->sg != NULL)
+					dw_mci_read_data_pio(host);
+			}
+			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+			tasklet_schedule(&host->tasklet);
+		}
+
+		if (pending & SDMMC_INT_RXDR) {
+			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+			if (host->sg)
+				dw_mci_read_data_pio(host);
+		}
+
+		if (pending & SDMMC_INT_TXDR) {
+			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+			if (host->sg)
+				dw_mci_write_data_pio(host);
+		}
+
+		if (pending & SDMMC_INT_CMD_DONE) {
+			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
+			dw_mci_cmd_interrupt(host, status);
+		}
+
+		if (pending & SDMMC_INT_CD) {
+			mci_writel(host, RINTSTS, SDMMC_INT_CD);
+			tasklet_schedule(&host->card_tasklet);
+		}
+
+	} while (pass_count++ < 5);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+	/* Handle DMA interrupts */
+	pending = mci_readl(host, IDSTS);
+	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
+		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
+		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
+		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+		host->dma_ops->complete(host);
+	}
+#endif
+
+	return IRQ_HANDLED;
+}
+
+static void dw_mci_tasklet_card(unsigned long data)
+{
+	struct dw_mci *host = (struct dw_mci *)data;
+	int i;
+
+	for (i = 0; i < host->num_slots; i++) {
+		struct dw_mci_slot *slot = host->slot[i];
+		struct mmc_host *mmc = slot->mmc;
+		struct mmc_request *mrq;
+		int present;
+		u32 ctrl;
+
+		present = dw_mci_get_cd(mmc);
+		while (present != slot->last_detect_state) {
+			spin_lock(&host->lock);
+
+			dev_dbg(&slot->mmc->class_dev, "card %s\n",
+				present ? "inserted" : "removed");
+
+			/* Card change detected */
+			slot->last_detect_state = present;
+
+			/* Power up slot */
+			if (present != 0) {
+				if (host->pdata->setpower)
+					host->pdata->setpower(slot->id,
+							      mmc->ocr_avail);
+
+				set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+			}
+
+			/* Clean up queue if present */
+			mrq = slot->mrq;
+			if (mrq) {
+				if (mrq == host->mrq) {
+					host->data = NULL;
+					host->cmd = NULL;
+
+					switch (host->state) {
+					case STATE_IDLE:
+						break;
+					case STATE_SENDING_CMD:
+						mrq->cmd->error = -ENOMEDIUM;
+						if (!mrq->data)
+							break;
+						/* fall through */
+					case STATE_SENDING_DATA:
+						mrq->data->error = -ENOMEDIUM;
+						dw_mci_stop_dma(host);
+						break;
+					case STATE_DATA_BUSY:
+					case STATE_DATA_ERROR:
+						if (mrq->data->error == -EINPROGRESS)
+							mrq->data->error = -ENOMEDIUM;
+						if (!mrq->stop)
+							break;
+						/* fall through */
+					case STATE_SENDING_STOP:
+						mrq->stop->error = -ENOMEDIUM;
+						break;
+					}
+
+					dw_mci_request_end(host, mrq);
+				} else {
+					list_del(&slot->queue_node);
+					mrq->cmd->error = -ENOMEDIUM;
+					if (mrq->data)
+						mrq->data->error = -ENOMEDIUM;
+					if (mrq->stop)
+						mrq->stop->error = -ENOMEDIUM;
+
+					spin_unlock(&host->lock);
+					mmc_request_done(slot->mmc, mrq);
+					spin_lock(&host->lock);
+				}
+			}
+
+			/* Power down slot */
+			if (present == 0) {
+				if (host->pdata->setpower)
+					host->pdata->setpower(slot->id, 0);
+				clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+				/*
+				 * Clear down the FIFO - doing so generates a
+				 * block interrupt, hence setting the
+				 * scatter-gather pointer to NULL.
+				 */
+				host->sg = NULL;
+
+				ctrl = mci_readl(host, CTRL);
+				ctrl |= SDMMC_CTRL_FIFO_RESET;
+				mci_writel(host, CTRL, ctrl);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+				ctrl = mci_readl(host, BMOD);
+				ctrl |= 0x01; /* Software reset of DMA */
+				mci_writel(host, BMOD, ctrl);
+#endif
+
+			}
+
+			spin_unlock(&host->lock);
+			present = dw_mci_get_cd(mmc);
+		}
+
+		mmc_detect_change(slot->mmc,
+			msecs_to_jiffies(host->pdata->detect_delay_ms));
+	}
+}
+
+static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+{
+	struct mmc_host *mmc;
+	struct dw_mci_slot *slot;
+
+	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
+	if (!mmc)
+		return -ENOMEM;
+
+	slot = mmc_priv(mmc);
+	slot->id = id;
+	slot->mmc = mmc;
+	slot->host = host;
+
+	mmc->ops = &dw_mci_ops;
+	mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
+	mmc->f_max = host->bus_hz;
+
+	if (host->pdata->get_ocr)
+		mmc->ocr_avail = host->pdata->get_ocr(id);
+	else
+		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+	/*
+	 * Start with slot power disabled, it will be enabled when a card
+	 * is detected.
+	 */
+	if (host->pdata->setpower)
+		host->pdata->setpower(id, 0);
+
+	mmc->caps = 0;
+	if (host->pdata->get_bus_wd)
+		if (host->pdata->get_bus_wd(slot->id) >= 4)
+			mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+	if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
+		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
+#ifdef CONFIG_MMC_DW_IDMAC
+	mmc->max_segs = host->ring_size;
+	mmc->max_blk_size = 65536;
+	mmc->max_blk_count = host->ring_size;
+	mmc->max_seg_size = 0x1000;
+	mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
+#else
+	if (host->pdata->blk_settings) {
+		mmc->max_segs = host->pdata->blk_settings->max_segs;
+		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
+		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
+		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
+		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
+	} else {
+		/* Useful defaults if platform data is unset. */
+		mmc->max_segs = 64;
+		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+		mmc->max_blk_count = 512;
+		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+		mmc->max_seg_size = mmc->max_req_size;
+	}
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+	if (dw_mci_get_cd(mmc))
+		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+	else
+		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+	host->slot[id] = slot;
+	mmc_add_host(mmc);
+
+#if defined(CONFIG_DEBUG_FS)
+	dw_mci_init_debugfs(slot);
+#endif
+
+	/* Card initially undetected */
+	slot->last_detect_state = 0;
+
+	return 0;
+}
+
+static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
+{
+	/* Shutdown detect IRQ */
+	if (slot->host->pdata->exit)
+		slot->host->pdata->exit(id);
+
+	/* Debugfs stuff is cleaned up by mmc core */
+	mmc_remove_host(slot->mmc);
+	slot->host->slot[id] = NULL;
+	mmc_free_host(slot->mmc);
+}
+
+static void dw_mci_init_dma(struct dw_mci *host)
+{
+	/* Alloc memory for sg translation */
+	host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
+					  &host->sg_dma, GFP_KERNEL);
+	if (!host->sg_cpu) {
+		dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
+			__func__);
+		goto no_dma;
+	}
+
+	/* Determine which DMA interface to use */
+#ifdef CONFIG_MMC_DW_IDMAC
+	host->dma_ops = &dw_mci_idmac_ops;
+	dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
+#endif
+
+	if (!host->dma_ops)
+		goto no_dma;
+
+	if (host->dma_ops->init) {
+		if (host->dma_ops->init(host)) {
+			dev_err(&host->pdev->dev, "%s: Unable to initialize "
+				"DMA Controller.\n", __func__);
+			goto no_dma;
+		}
+	} else {
+		dev_err(&host->pdev->dev, "DMA initialization not found.\n");
+		goto no_dma;
+	}
+
+	host->use_dma = 1;
+	return;
+
+no_dma:
+	dev_info(&host->pdev->dev, "Using PIO mode.\n");
+	host->use_dma = 0;
+	return;
+}
+
+static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(500);
+	unsigned int ctrl;
+
+	mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+				SDMMC_CTRL_DMA_RESET));
+
+	/* wait till resets clear */
+	do {
+		ctrl = mci_readl(host, CTRL);
+		if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+			      SDMMC_CTRL_DMA_RESET)))
+			return true;
+	} while (time_before(jiffies, timeout));
+
+	dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
+
+	return false;
+}
+
+static int dw_mci_probe(struct platform_device *pdev)
+{
+	struct dw_mci *host;
+	struct resource	*regs;
+	struct dw_mci_board *pdata;
+	int irq, ret, i, width;
+	u32 fifo_size;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENXIO;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	host->pdev = pdev;
+	host->pdata = pdata = pdev->dev.platform_data;
+	if (!pdata || !pdata->init) {
+		dev_err(&pdev->dev,
+			"Platform data must supply init function\n");
+		ret = -ENODEV;
+		goto err_freehost;
+	}
+
+	if (!pdata->select_slot && pdata->num_slots > 1) {
+		dev_err(&pdev->dev,
+			"Platform data must supply select_slot function\n");
+		ret = -ENODEV;
+		goto err_freehost;
+	}
+
+	if (!pdata->bus_hz) {
+		dev_err(&pdev->dev,
+			"Platform data must supply bus speed\n");
+		ret = -ENODEV;
+		goto err_freehost;
+	}
+
+	host->bus_hz = pdata->bus_hz;
+	host->quirks = pdata->quirks;
+
+	spin_lock_init(&host->lock);
+	INIT_LIST_HEAD(&host->queue);
+
+	ret = -ENOMEM;
+	host->regs = ioremap(regs->start, regs->end - regs->start + 1);
+	if (!host->regs)
+		goto err_freehost;
+
+	host->dma_ops = pdata->dma_ops;
+	dw_mci_init_dma(host);
+
+	/*
+	 * Get the host data width - this assumes that HCON has been set with
+	 * the correct values.
+	 */
+	i = (mci_readl(host, HCON) >> 7) & 0x7;
+	if (!i) {
+		host->push_data = dw_mci_push_data16;
+		host->pull_data = dw_mci_pull_data16;
+		width = 16;
+		host->data_shift = 1;
+	} else if (i == 2) {
+		host->push_data = dw_mci_push_data64;
+		host->pull_data = dw_mci_pull_data64;
+		width = 64;
+		host->data_shift = 3;
+	} else {
+		/* Check for a reserved value, and warn if it is */
+		WARN((i != 1),
+		     "HCON reports a reserved host data width!\n"
+		     "Defaulting to 32-bit access.\n");
+		host->push_data = dw_mci_push_data32;
+		host->pull_data = dw_mci_pull_data32;
+		width = 32;
+		host->data_shift = 2;
+	}
+
+	/* Reset all blocks */
+	if (!mci_wait_reset(&pdev->dev, host)) {
+		ret = -ENODEV;
+		goto err_dmaunmap;
+	}
+
+	/* Clear the interrupts for the host controller */
+	mci_writel(host, RINTSTS, 0xFFFFFFFF);
+	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+	/* Put in max timeout */
+	mci_writel(host, TMOUT, 0xFFFFFFFF);
+
+	/*
+	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
+	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
+	 */
+	fifo_size = mci_readl(host, FIFOTH);
+	fifo_size = (fifo_size >> 16) & 0x7ff;
+	mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
+				  ((fifo_size/2) << 0)));
+
+	/* disable clock to CIU */
+	mci_writel(host, CLKENA, 0);
+	mci_writel(host, CLKSRC, 0);
+
+	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
+	tasklet_init(&host->card_tasklet,
+		     dw_mci_tasklet_card, (unsigned long)host);
+
+	ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
+	if (ret)
+		goto err_dmaunmap;
+
+	platform_set_drvdata(pdev, host);
+
+	if (host->pdata->num_slots)
+		host->num_slots = host->pdata->num_slots;
+	else
+		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+
+	/* We need at least one slot to succeed */
+	for (i = 0; i < host->num_slots; i++) {
+		ret = dw_mci_init_slot(host, i);
+		if (ret) {
+			ret = -ENODEV;
+			goto err_init_slot;
+		}
+	}
+
+	/*
+	 * Enable interrupts for command done, data over, data empty, card det,
+	 * receive ready and error such as transmit, receive timeout, crc error
+	 */
+	mci_writel(host, RINTSTS, 0xFFFFFFFF);
+	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+	dev_info(&pdev->dev, "DW MMC controller at irq %d, "
+		 "%d bit host data width\n", irq, width);
+	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
+		dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
+
+	return 0;
+
+err_init_slot:
+	/* De-init any initialized slots */
+	while (i > 0) {
+		if (host->slot[i])
+			dw_mci_cleanup_slot(host->slot[i], i);
+		i--;
+	}
+	free_irq(irq, host);
+
+err_dmaunmap:
+	if (host->use_dma && host->dma_ops->exit)
+		host->dma_ops->exit(host);
+	dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
+			  host->sg_cpu, host->sg_dma);
+	iounmap(host->regs);
+
+err_freehost:
+	kfree(host);
+	return ret;
+}
+
+static int __exit dw_mci_remove(struct platform_device *pdev)
+{
+	struct dw_mci *host = platform_get_drvdata(pdev);
+	int i;
+
+	mci_writel(host, RINTSTS, 0xFFFFFFFF);
+	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+	platform_set_drvdata(pdev, NULL);
+
+	for (i = 0; i < host->num_slots; i++) {
+		dev_dbg(&pdev->dev, "remove slot %d\n", i);
+		if (host->slot[i])
+			dw_mci_cleanup_slot(host->slot[i], i);
+	}
+
+	/* disable clock to CIU */
+	mci_writel(host, CLKENA, 0);
+	mci_writel(host, CLKSRC, 0);
+
+	free_irq(platform_get_irq(pdev, 0), host);
+	dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+	if (host->use_dma && host->dma_ops->exit)
+		host->dma_ops->exit(host);
+
+	iounmap(host->regs);
+
+	kfree(host);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * TODO: we should probably disable the clock to the card in the suspend path.
+ */
+static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	int i, ret;
+	struct dw_mci *host = platform_get_drvdata(pdev);
+
+	for (i = 0; i < host->num_slots; i++) {
+		struct dw_mci_slot *slot = host->slot[i];
+		if (!slot)
+			continue;
+		ret = mmc_suspend_host(slot->mmc);
+		if (ret < 0) {
+			while (--i >= 0) {
+				slot = host->slot[i];
+				if (slot)
+					mmc_resume_host(host->slot[i]->mmc);
+			}
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int dw_mci_resume(struct platform_device *pdev)
+{
+	int i, ret;
+	struct dw_mci *host = platform_get_drvdata(pdev);
+
+	for (i = 0; i < host->num_slots; i++) {
+		struct dw_mci_slot *slot = host->slot[i];
+		if (!slot)
+			continue;
+		ret = mmc_resume_host(host->slot[i]->mmc);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+#else
+#define dw_mci_suspend	NULL
+#define dw_mci_resume	NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver dw_mci_driver = {
+	.remove		= __exit_p(dw_mci_remove),
+	.suspend	= dw_mci_suspend,
+	.resume		= dw_mci_resume,
+	.driver		= {
+		.name		= "dw_mmc",
+	},
+};
+
+static int __init dw_mci_init(void)
+{
+	return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
+}
+
+static void __exit dw_mci_exit(void)
+{
+	platform_driver_unregister(&dw_mci_driver);
+}
+
+module_init(dw_mci_init);
+module_exit(dw_mci_exit);
+
+MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
+MODULE_AUTHOR("NXP Semiconductor VietNam");
+MODULE_AUTHOR("Imagination Technologies Ltd");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 0000000..5dd55a7
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *  (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_H_
+#define _DW_MMC_H_
+
+#define SDMMC_CTRL		0x000
+#define SDMMC_PWREN		0x004
+#define SDMMC_CLKDIV		0x008
+#define SDMMC_CLKSRC		0x00c
+#define SDMMC_CLKENA		0x010
+#define SDMMC_TMOUT		0x014
+#define SDMMC_CTYPE		0x018
+#define SDMMC_BLKSIZ		0x01c
+#define SDMMC_BYTCNT		0x020
+#define SDMMC_INTMASK		0x024
+#define SDMMC_CMDARG		0x028
+#define SDMMC_CMD		0x02c
+#define SDMMC_RESP0		0x030
+#define SDMMC_RESP1		0x034
+#define SDMMC_RESP2		0x038
+#define SDMMC_RESP3		0x03c
+#define SDMMC_MINTSTS		0x040
+#define SDMMC_RINTSTS		0x044
+#define SDMMC_STATUS		0x048
+#define SDMMC_FIFOTH		0x04c
+#define SDMMC_CDETECT		0x050
+#define SDMMC_WRTPRT		0x054
+#define SDMMC_GPIO		0x058
+#define SDMMC_TCBCNT		0x05c
+#define SDMMC_TBBCNT		0x060
+#define SDMMC_DEBNCE		0x064
+#define SDMMC_USRID		0x068
+#define SDMMC_VERID		0x06c
+#define SDMMC_HCON		0x070
+#define SDMMC_BMOD		0x080
+#define SDMMC_PLDMND		0x084
+#define SDMMC_DBADDR		0x088
+#define SDMMC_IDSTS		0x08c
+#define SDMMC_IDINTEN		0x090
+#define SDMMC_DSCADDR		0x094
+#define SDMMC_BUFADDR		0x098
+#define SDMMC_DATA		0x100
+#define SDMMC_DATA_ADR		0x100
+
+/* shift bit field */
+#define _SBF(f, v)		((v) << (f))
+
+/* Control register defines */
+#define SDMMC_CTRL_USE_IDMAC		BIT(25)
+#define SDMMC_CTRL_CEATA_INT_EN		BIT(11)
+#define SDMMC_CTRL_SEND_AS_CCSD		BIT(10)
+#define SDMMC_CTRL_SEND_CCSD		BIT(9)
+#define SDMMC_CTRL_ABRT_READ_DATA	BIT(8)
+#define SDMMC_CTRL_SEND_IRQ_RESP	BIT(7)
+#define SDMMC_CTRL_READ_WAIT		BIT(6)
+#define SDMMC_CTRL_DMA_ENABLE		BIT(5)
+#define SDMMC_CTRL_INT_ENABLE		BIT(4)
+#define SDMMC_CTRL_DMA_RESET		BIT(2)
+#define SDMMC_CTRL_FIFO_RESET		BIT(1)
+#define SDMMC_CTRL_RESET		BIT(0)
+/* Clock Enable register defines */
+#define SDMMC_CLKEN_LOW_PWR		BIT(16)
+#define SDMMC_CLKEN_ENABLE		BIT(0)
+/* time-out register defines */
+#define SDMMC_TMOUT_DATA(n)		_SBF(8, (n))
+#define SDMMC_TMOUT_DATA_MSK		0xFFFFFF00
+#define SDMMC_TMOUT_RESP(n)		((n) & 0xFF)
+#define SDMMC_TMOUT_RESP_MSK		0xFF
+/* card-type register defines */
+#define SDMMC_CTYPE_8BIT		BIT(16)
+#define SDMMC_CTYPE_4BIT		BIT(0)
+#define SDMMC_CTYPE_1BIT		0
+/* Interrupt status & mask register defines */
+#define SDMMC_INT_SDIO			BIT(16)
+#define SDMMC_INT_EBE			BIT(15)
+#define SDMMC_INT_ACD			BIT(14)
+#define SDMMC_INT_SBE			BIT(13)
+#define SDMMC_INT_HLE			BIT(12)
+#define SDMMC_INT_FRUN			BIT(11)
+#define SDMMC_INT_HTO			BIT(10)
+#define SDMMC_INT_DTO			BIT(9)
+#define SDMMC_INT_RTO			BIT(8)
+#define SDMMC_INT_DCRC			BIT(7)
+#define SDMMC_INT_RCRC			BIT(6)
+#define SDMMC_INT_RXDR			BIT(5)
+#define SDMMC_INT_TXDR			BIT(4)
+#define SDMMC_INT_DATA_OVER		BIT(3)
+#define SDMMC_INT_CMD_DONE		BIT(2)
+#define SDMMC_INT_RESP_ERR		BIT(1)
+#define SDMMC_INT_CD			BIT(0)
+#define SDMMC_INT_ERROR			0xbfc2
+/* Command register defines */
+#define SDMMC_CMD_START			BIT(31)
+#define SDMMC_CMD_CCS_EXP		BIT(23)
+#define SDMMC_CMD_CEATA_RD		BIT(22)
+#define SDMMC_CMD_UPD_CLK		BIT(21)
+#define SDMMC_CMD_INIT			BIT(15)
+#define SDMMC_CMD_STOP			BIT(14)
+#define SDMMC_CMD_PRV_DAT_WAIT		BIT(13)
+#define SDMMC_CMD_SEND_STOP		BIT(12)
+#define SDMMC_CMD_STRM_MODE		BIT(11)
+#define SDMMC_CMD_DAT_WR		BIT(10)
+#define SDMMC_CMD_DAT_EXP		BIT(9)
+#define SDMMC_CMD_RESP_CRC		BIT(8)
+#define SDMMC_CMD_RESP_LONG		BIT(7)
+#define SDMMC_CMD_RESP_EXP		BIT(6)
+#define SDMMC_CMD_INDX(n)		((n) & 0x1F)
+/* Status register defines */
+#define SDMMC_GET_FCNT(x)		(((x)>>17) & 0x1FF)
+#define SDMMC_FIFO_SZ			32
+/* Internal DMAC interrupt defines */
+#define SDMMC_IDMAC_INT_AI		BIT(9)
+#define SDMMC_IDMAC_INT_NI		BIT(8)
+#define SDMMC_IDMAC_INT_CES		BIT(5)
+#define SDMMC_IDMAC_INT_DU		BIT(4)
+#define SDMMC_IDMAC_INT_FBE		BIT(2)
+#define SDMMC_IDMAC_INT_RI		BIT(1)
+#define SDMMC_IDMAC_INT_TI		BIT(0)
+/* Internal DMAC bus mode bits */
+#define SDMMC_IDMAC_ENABLE		BIT(7)
+#define SDMMC_IDMAC_FB			BIT(1)
+#define SDMMC_IDMAC_SWRESET		BIT(0)
+
+/* Register access macros */
+#define mci_readl(dev, reg)			\
+	__raw_readl(dev->regs + SDMMC_##reg)
+#define mci_writel(dev, reg, value)			\
+	__raw_writel((value), dev->regs + SDMMC_##reg)
+
+/* 16-bit FIFO access macros */
+#define mci_readw(dev, reg)			\
+	__raw_readw(dev->regs + SDMMC_##reg)
+#define mci_writew(dev, reg, value)			\
+	__raw_writew((value), dev->regs + SDMMC_##reg)
+
+/* 64-bit FIFO access macros */
+#ifdef readq
+#define mci_readq(dev, reg)			\
+	__raw_readq(dev->regs + SDMMC_##reg)
+#define mci_writeq(dev, reg, value)			\
+	__raw_writeq((value), dev->regs + SDMMC_##reg)
+#else
+/*
+ * Dummy readq implementation for architectures that don't define it.
+ *
+ * We would assume that none of these architectures would configure
+ * the IP block with a 64bit FIFO width, so this code will never be
+ * executed on those machines. Defining these macros here keeps the
+ * rest of the code free from ifdefs.
+ */
+#define mci_readq(dev, reg)			\
+	(*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
+#define mci_writeq(dev, reg, value)			\
+	(*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
+#endif
+
+#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index b3a0ab0..74218ad 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -14,6 +14,7 @@
  */
 
 #include <linux/mmc/host.h>
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
@@ -827,8 +828,8 @@
 	}
 
 	host->clk = clk_get(&pdev->dev, "mmc");
-	if (!host->clk) {
-		ret = -ENOENT;
+	if (IS_ERR(host->clk)) {
+		ret = PTR_ERR(host->clk);
 		dev_err(&pdev->dev, "Failed to get mmc clock\n");
 		goto err_free_host;
 	}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 5630228..2d6de3e 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -14,6 +14,7 @@
 #include <linux/ioport.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
+#include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
@@ -46,10 +47,6 @@
  *	      is asserted (likewise for RX)
  * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
  *		  is asserted (likewise for RX)
- * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
- *		and will not work at all.
- * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
- *		using DMA.
  * @sdio: variant supports SDIO
  * @st_clkdiv: true if using a ST-specific clock divider algorithm
  */
@@ -59,8 +56,6 @@
 	unsigned int		datalength_bits;
 	unsigned int		fifosize;
 	unsigned int		fifohalfsize;
-	bool			broken_blockend;
-	bool			broken_blockend_dma;
 	bool			sdio;
 	bool			st_clkdiv;
 };
@@ -76,7 +71,6 @@
 	.fifohalfsize		= 8 * 4,
 	.clkreg_enable		= 1 << 13, /* HWFCEN */
 	.datalength_bits	= 16,
-	.broken_blockend_dma	= true,
 	.sdio			= true,
 };
 
@@ -86,7 +80,6 @@
 	.clkreg			= MCI_CLK_ENABLE,
 	.clkreg_enable		= 1 << 14, /* HWFCEN */
 	.datalength_bits	= 24,
-	.broken_blockend	= true,
 	.sdio			= true,
 	.st_clkdiv		= true,
 };
@@ -210,8 +203,6 @@
 	host->data = data;
 	host->size = data->blksz * data->blocks;
 	host->data_xfered = 0;
-	host->blockend = false;
-	host->dataend = false;
 
 	mmci_init_sg(host, data);
 
@@ -288,21 +279,26 @@
 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
 	      unsigned int status)
 {
-	struct variant_data *variant = host->variant;
-
 	/* First check for errors */
 	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
-		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
-		if (status & MCI_DATACRCFAIL)
-			data->error = -EILSEQ;
-		else if (status & MCI_DATATIMEOUT)
-			data->error = -ETIMEDOUT;
-		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
-			data->error = -EIO;
+		u32 remain, success;
 
-		/* Force-complete the transaction */
-		host->blockend = true;
-		host->dataend = true;
+		/* Calculate how far we are into the transfer */
+		remain = readl(host->base + MMCIDATACNT);
+		success = data->blksz * data->blocks - remain;
+
+		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
+		if (status & MCI_DATACRCFAIL) {
+			/* Last block was not successful */
+			host->data_xfered = round_down(success - 1, data->blksz);
+			data->error = -EILSEQ;
+		} else if (status & MCI_DATATIMEOUT) {
+			host->data_xfered = round_down(success, data->blksz);
+			data->error = -ETIMEDOUT;
+		} else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+			host->data_xfered = round_down(success, data->blksz);
+			data->error = -EIO;
+		}
 
 		/*
 		 * We hit an error condition.  Ensure that any data
@@ -321,61 +317,14 @@
 		}
 	}
 
-	/*
-	 * On ARM variants in PIO mode, MCI_DATABLOCKEND
-	 * is always sent first, and we increase the
-	 * transfered number of bytes for that IRQ. Then
-	 * MCI_DATAEND follows and we conclude the transaction.
-	 *
-	 * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
-	 * doesn't seem to immediately clear from the status,
-	 * so we can't use it keep count when only one irq is
-	 * used because the irq will hit for other reasons, and
-	 * then the flag is still up. So we use the MCI_DATAEND
-	 * IRQ at the end of the entire transfer because
-	 * MCI_DATABLOCKEND is broken.
-	 *
-	 * In the U300, the IRQs can arrive out-of-order,
-	 * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
-	 * so for this case we use the flags "blockend" and
-	 * "dataend" to make sure both IRQs have arrived before
-	 * concluding the transaction. (This does not apply
-	 * to the Ux500 which doesn't fire MCI_DATABLOCKEND
-	 * at all.) In DMA mode it suffers from the same problem
-	 * as the Ux500.
-	 */
-	if (status & MCI_DATABLOCKEND) {
-		/*
-		 * Just being a little over-cautious, we do not
-		 * use this progressive update if the hardware blockend
-		 * flag is unreliable: since it can stay high between
-		 * IRQs it will corrupt the transfer counter.
-		 */
-		if (!variant->broken_blockend)
-			host->data_xfered += data->blksz;
-		host->blockend = true;
-	}
+	if (status & MCI_DATABLOCKEND)
+		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
 
-	if (status & MCI_DATAEND)
-		host->dataend = true;
-
-	/*
-	 * On variants with broken blockend we shall only wait for dataend,
-	 * on others we must sync with the blockend signal since they can
-	 * appear out-of-order.
-	 */
-	if (host->dataend && (host->blockend || variant->broken_blockend)) {
+	if (status & MCI_DATAEND || data->error) {
 		mmci_stop_data(host);
 
-		/* Reset these flags */
-		host->blockend = false;
-		host->dataend = false;
-
-		/*
-		 * Variants with broken blockend flags need to handle the
-		 * end of the entire transfer here.
-		 */
-		if (variant->broken_blockend && !data->error)
+		if (!data->error)
+			/* The error clause is handled above, success! */
 			host->data_xfered += data->blksz * data->blocks;
 
 		if (!data->stop) {
@@ -394,15 +343,15 @@
 
 	host->cmd = NULL;
 
-	cmd->resp[0] = readl(base + MMCIRESPONSE0);
-	cmd->resp[1] = readl(base + MMCIRESPONSE1);
-	cmd->resp[2] = readl(base + MMCIRESPONSE2);
-	cmd->resp[3] = readl(base + MMCIRESPONSE3);
-
 	if (status & MCI_CMDTIMEOUT) {
 		cmd->error = -ETIMEDOUT;
 	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
 		cmd->error = -EILSEQ;
+	} else {
+		cmd->resp[0] = readl(base + MMCIRESPONSE0);
+		cmd->resp[1] = readl(base + MMCIRESPONSE1);
+		cmd->resp[2] = readl(base + MMCIRESPONSE2);
+		cmd->resp[3] = readl(base + MMCIRESPONSE3);
 	}
 
 	if (!cmd->data || cmd->error) {
@@ -770,7 +719,6 @@
 	struct variant_data *variant = id->data;
 	struct mmci_host *host;
 	struct mmc_host *mmc;
-	unsigned int mask;
 	int ret;
 
 	/* must have platform data */
@@ -951,12 +899,7 @@
 			goto irq0_free;
 	}
 
-	mask = MCI_IRQENABLE;
-	/* Don't use the datablockend flag if it's broken */
-	if (variant->broken_blockend)
-		mask &= ~MCI_DATABLOCKEND;
-
-	writel(mask, host->base + MMCIMASK0);
+	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
 
 	amba_set_drvdata(dev, mmc);
 
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index df06f01..c1df7b8 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -137,7 +137,7 @@
 #define MCI_IRQENABLE	\
 	(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK|	\
 	MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|	\
-	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
+	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
 
 /* These interrupts are directed to IRQ1 when two IRQ lines are available */
 #define MCI_IRQ1MASK \
@@ -177,9 +177,6 @@
 	struct timer_list	timer;
 	unsigned int		oldstat;
 
-	bool			blockend;
-	bool			dataend;
-
 	/* pio stuff */
 	struct sg_mapping_iter	sg_miter;
 	unsigned int		size;
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 5decfd0..153ab97 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -383,14 +383,30 @@
 	host->curr.user_pages = 0;
 
 	box = &nc->cmd[0];
-	for (i = 0; i < host->dma.num_ents; i++) {
+
+	/* location of command block must be 64 bit aligned */
+	BUG_ON(host->dma.cmd_busaddr & 0x07);
+
+	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
+	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
+			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
+	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
+
+	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+			host->dma.num_ents, host->dma.dir);
+	if (n == 0) {
+		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+			mmc_hostname(host->mmc));
+		host->dma.sg = NULL;
+		host->dma.num_ents = 0;
+		return -ENOMEM;
+	}
+
+	for_each_sg(host->dma.sg, sg, n, i) {
+
 		box->cmd = CMD_MODE_BOX;
 
-	/* Initialize sg dma address */
-	sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
-				+ sg->offset;
-
-	if (i == (host->dma.num_ents - 1))
+		if (i == n - 1)
 			box->cmd |= CMD_LC;
 		rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
 			(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -418,27 +434,6 @@
 			box->cmd |= CMD_DST_CRCI(crci);
 		}
 		box++;
-		sg++;
-	}
-
-	/* location of command block must be 64 bit aligned */
-	BUG_ON(host->dma.cmd_busaddr & 0x07);
-
-	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
-	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
-			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
-	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
-
-	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
-			host->dma.num_ents, host->dma.dir);
-/* dsb inside dma_map_sg will write nc out to mem as well */
-
-	if (n != host->dma.num_ents) {
-		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
-			mmc_hostname(host->mmc));
-		host->dma.sg = NULL;
-		host->dma.num_ents = 0;
-		return -ENOMEM;
 	}
 
 	return 0;
@@ -1331,9 +1326,6 @@
 	if (host->timer.function)
 		pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
 
-#if BUSCLK_PWRSAVE
-	msmsdcc_disable_clocks(host, 1);
-#endif
 	return 0;
  cmd_irq_free:
 	free_irq(cmd_irqres->start, host);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bdd2cbb..4428594 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,6 +31,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
 
 #include <asm/dma.h>
 #include <asm/irq.h>
@@ -141,10 +142,49 @@
 
 	struct work_struct	datawork;
 	spinlock_t		lock;
+
+	struct regulator	*vcc;
 };
 
 static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 
+static inline void mxcmci_init_ocr(struct mxcmci_host *host)
+{
+	host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
+
+	if (IS_ERR(host->vcc)) {
+		host->vcc = NULL;
+	} else {
+		host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
+		if (host->pdata && host->pdata->ocr_avail)
+			dev_warn(mmc_dev(host->mmc),
+				"pdata->ocr_avail will not be used\n");
+	}
+
+	if (host->vcc == NULL) {
+		/* fall-back to platform data */
+		if (host->pdata && host->pdata->ocr_avail)
+			host->mmc->ocr_avail = host->pdata->ocr_avail;
+		else
+			host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+	}
+}
+
+static inline void mxcmci_set_power(struct mxcmci_host *host,
+				    unsigned char power_mode,
+				    unsigned int vdd)
+{
+	if (host->vcc) {
+		if (power_mode == MMC_POWER_UP)
+			mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+		else if (power_mode == MMC_POWER_OFF)
+			mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
+	}
+
+	if (host->pdata && host->pdata->setpower)
+		host->pdata->setpower(mmc_dev(host->mmc), vdd);
+}
+
 static inline int mxcmci_use_dma(struct mxcmci_host *host)
 {
 	return host->do_dma;
@@ -680,9 +720,9 @@
 		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 
 	if (host->power_mode != ios->power_mode) {
-		if (host->pdata && host->pdata->setpower)
-			host->pdata->setpower(mmc_dev(mmc), ios->vdd);
+		mxcmci_set_power(host, ios->power_mode, ios->vdd);
 		host->power_mode = ios->power_mode;
+
 		if (ios->power_mode == MMC_POWER_ON)
 			host->cmdat |= CMD_DAT_CONT_INIT;
 	}
@@ -807,10 +847,7 @@
 	host->pdata = pdev->dev.platform_data;
 	spin_lock_init(&host->lock);
 
-	if (host->pdata && host->pdata->ocr_avail)
-		mmc->ocr_avail = host->pdata->ocr_avail;
-	else
-		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+	mxcmci_init_ocr(host);
 
 	if (host->pdata && host->pdata->dat3_card_detect)
 		host->default_irq_mask =
@@ -915,6 +952,9 @@
 
 	mmc_remove_host(mmc);
 
+	if (host->vcc)
+		regulator_put(host->vcc);
+
 	if (host->pdata && host->pdata->exit)
 		host->pdata->exit(&pdev->dev, mmc);
 
@@ -927,7 +967,6 @@
 	clk_put(host->clk);
 
 	release_mem_region(host->res->start, resource_size(host->res));
-	release_resource(host->res);
 
 	mmc_free_host(mmc);
 
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 0000000..2aeef4f
--- /dev/null
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,70 @@
+/*
+ * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
+ *
+ * Author: Saeed Bishara <saeed@marvell.com>
+ *	   Mike Rapoport <mike@compulab.co.il>
+ * Based on sdhci-cns3xxx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
+{
+	u16 ret;
+
+	switch (reg) {
+	case SDHCI_HOST_VERSION:
+	case SDHCI_SLOT_INT_STATUS:
+		/* those registers don't exist */
+		return 0;
+	default:
+		ret = readw(host->ioaddr + reg);
+	}
+	return ret;
+}
+
+static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
+{
+	u32 ret;
+
+	switch (reg) {
+	case SDHCI_CAPABILITIES:
+		ret = readl(host->ioaddr + reg);
+		/* Mask the support for 3.0V */
+		ret &= ~SDHCI_CAN_VDD_300;
+		break;
+	default:
+		ret = readl(host->ioaddr + reg);
+	}
+	return ret;
+}
+
+static struct sdhci_ops sdhci_dove_ops = {
+	.read_w	= sdhci_dove_readw,
+	.read_l	= sdhci_dove_readl,
+};
+
+struct sdhci_pltfm_data sdhci_dove_pdata = {
+	.ops	= &sdhci_dove_ops,
+	.quirks	= SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+		  SDHCI_QUIRK_NO_BUSY_IRQ |
+		  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+		  SDHCI_QUIRK_FORCE_DMA,
+};
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index fa19d84..dd84124f 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -13,6 +13,7 @@
  * your option) any later version.
  */
 
+#include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -20,8 +21,12 @@
 #include <linux/delay.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/mmc/host.h>
+#ifdef CONFIG_PPC
 #include <asm/machdep.h>
+#endif
 #include "sdhci-of.h"
 #include "sdhci.h"
 
@@ -112,7 +117,11 @@
 		return true;
 
 	/* Old device trees don't have the wp-inverted property. */
+#ifdef CONFIG_PPC
 	return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
+#else
+	return false;
+#endif
 }
 
 static int __devinit sdhci_of_probe(struct platform_device *ofdev,
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 3d9c246..0dc905b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -176,6 +176,74 @@
 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 };
 
+/* O2Micro extra registers */
+#define O2_SD_LOCK_WP		0xD3
+#define O2_SD_MULTI_VCC3V	0xEE
+#define O2_SD_CLKREQ		0xEC
+#define O2_SD_CAPS		0xE0
+#define O2_SD_ADMA1		0xE2
+#define O2_SD_ADMA2		0xE7
+#define O2_SD_INF_MOD		0xF1
+
+static int o2_probe(struct sdhci_pci_chip *chip)
+{
+	int ret;
+	u8 scratch;
+
+	switch (chip->pdev->device) {
+	case PCI_DEVICE_ID_O2_8220:
+	case PCI_DEVICE_ID_O2_8221:
+	case PCI_DEVICE_ID_O2_8320:
+	case PCI_DEVICE_ID_O2_8321:
+		/* This extra setup is required due to broken ADMA. */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+		if (ret)
+			return ret;
+		scratch &= 0x7f;
+		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+		/* Set Multi 3 to VCC3V# */
+		pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
+
+		/* Disable CLK_REQ# support after media DET */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x20;
+		pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+
+		/* Choose capabilities, enable SDMA.  We have to write 0x01
+		 * to the capabilities register first to unlock it.
+		 */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x01;
+		pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+		pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+
+		/* Disable ADMA1/2 */
+		pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
+		pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
+
+		/* Disable the infinite transfer mode */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x08;
+		pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+
+		/* Lock WP */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x80;
+		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+	}
+
+	return 0;
+}
+
 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
 {
 	u8 scratch;
@@ -204,6 +272,7 @@
 static int jmicron_probe(struct sdhci_pci_chip *chip)
 {
 	int ret;
+	u16 mmcdev = 0;
 
 	if (chip->pdev->revision == 0) {
 		chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
@@ -225,12 +294,17 @@
 	 * 2. The MMC interface has a lower subfunction number
 	 *    than the SD interface.
 	 */
-	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) {
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
+		mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
+	else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
+		mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
+
+	if (mmcdev) {
 		struct pci_dev *sd_dev;
 
 		sd_dev = NULL;
 		while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
-			PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) {
+						mmcdev, sd_dev)) != NULL) {
 			if ((PCI_SLOT(chip->pdev->devfn) ==
 				PCI_SLOT(sd_dev->devfn)) &&
 				(chip->pdev->bus == sd_dev->bus))
@@ -290,13 +364,25 @@
 			slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
 	}
 
+	/* JM388 MMC doesn't support 1.8V while SD supports it */
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
+		slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
+			MMC_VDD_29_30 | MMC_VDD_30_31 |
+			MMC_VDD_165_195; /* allow 1.8V */
+		slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
+			MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
+	}
+
 	/*
 	 * The secondary interface requires a bit set to get the
 	 * interrupts.
 	 */
-	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
 		jmicron_enable_mmc(slot->host, 1);
 
+	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
+
 	return 0;
 }
 
@@ -305,7 +391,8 @@
 	if (dead)
 		return;
 
-	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
 		jmicron_enable_mmc(slot->host, 0);
 }
 
@@ -313,7 +400,8 @@
 {
 	int i;
 
-	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
 		for (i = 0;i < chip->num_slots;i++)
 			jmicron_enable_mmc(chip->slots[i]->host, 0);
 	}
@@ -325,7 +413,8 @@
 {
 	int ret, i;
 
-	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
 		for (i = 0;i < chip->num_slots;i++)
 			jmicron_enable_mmc(chip->slots[i]->host, 1);
 	}
@@ -339,6 +428,10 @@
 	return 0;
 }
 
+static const struct sdhci_pci_fixes sdhci_o2 = {
+	.probe		= o2_probe,
+};
+
 static const struct sdhci_pci_fixes sdhci_jmicron = {
 	.probe		= jmicron_probe,
 
@@ -510,6 +603,22 @@
 	},
 
 	{
+		.vendor		= PCI_VENDOR_ID_JMICRON,
+		.device		= PCI_DEVICE_ID_JMICRON_JMB388_SD,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_JMICRON,
+		.device		= PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
+	},
+
+	{
 		.vendor		= PCI_VENDOR_ID_SYSKONNECT,
 		.device		= 0x8000,
 		.subvendor	= PCI_ANY_ID,
@@ -589,6 +698,46 @@
 		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
 	},
 
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8120,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8220,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8221,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8320,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8321,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
 	{	/* Generic SD host controller */
 		PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
 	},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 0502f89..dbab040 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -170,6 +170,12 @@
 #ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
 	{ "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
 #endif
+#ifdef CONFIG_MMC_SDHCI_DOVE
+	{ "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
+#endif
+#ifdef CONFIG_MMC_SDHCI_TEGRA
+	{ "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
+#endif
 	{ },
 };
 MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index c1bfe48..ea2e44d 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -22,5 +22,7 @@
 
 extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
 extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
+extern struct sdhci_pltfm_data sdhci_dove_pdata;
+extern struct sdhci_pltfm_data sdhci_tegra_pdata;
 
 #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index aacb862..5309ab9 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -130,6 +130,15 @@
 	if (!clksrc)
 		return UINT_MAX;
 
+	/*
+	 * Clock divider's step is different as 1 from that of host controller
+	 * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
+	 */
+	if (ourhost->pdata->clk_type) {
+		rate = clk_round_rate(clksrc, wanted);
+		return wanted - rate;
+	}
+
 	rate = clk_get_rate(clksrc);
 
 	for (div = 1; div < 256; div *= 2) {
@@ -232,10 +241,79 @@
 	return min;
 }
 
+/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
+static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
+{
+	struct sdhci_s3c *ourhost = to_s3c(host);
+
+	return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
+}
+
+/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
+static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
+{
+	struct sdhci_s3c *ourhost = to_s3c(host);
+
+	/*
+	 * initial clock can be in the frequency range of
+	 * 100KHz-400KHz, so we set it as max value.
+	 */
+	return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
+}
+
+/* sdhci_cmu_set_clock - callback on clock change.*/
+static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	struct sdhci_s3c *ourhost = to_s3c(host);
+
+	/* don't bother if the clock is going off */
+	if (clock == 0)
+		return;
+
+	sdhci_s3c_set_clock(host, clock);
+
+	clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
+
+	host->clock = clock;
+}
+
+/**
+ * sdhci_s3c_platform_8bit_width - support 8bit buswidth
+ * @host: The SDHCI host being queried
+ * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
+ *
+ * We have 8-bit width support but is not a v3 controller.
+ * So we add platform_8bit_width() and support 8bit width.
+ */
+static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
+{
+	u8 ctrl;
+
+	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+	switch (width) {
+	case MMC_BUS_WIDTH_8:
+		ctrl |= SDHCI_CTRL_8BITBUS;
+		ctrl &= ~SDHCI_CTRL_4BITBUS;
+		break;
+	case MMC_BUS_WIDTH_4:
+		ctrl |= SDHCI_CTRL_4BITBUS;
+		ctrl &= ~SDHCI_CTRL_8BITBUS;
+		break;
+	default:
+		break;
+	}
+
+	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+	return 0;
+}
+
 static struct sdhci_ops sdhci_s3c_ops = {
 	.get_max_clock		= sdhci_s3c_get_max_clk,
 	.set_clock		= sdhci_s3c_set_clock,
 	.get_min_clock		= sdhci_s3c_get_min_clock,
+	.platform_8bit_width	= sdhci_s3c_platform_8bit_width,
 };
 
 static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
@@ -361,6 +439,13 @@
 
 		clks++;
 		sc->clk_bus[ptr] = clk;
+
+		/*
+		 * save current clock index to know which clock bus
+		 * is used later in overriding functions.
+		 */
+		sc->cur_clk = ptr;
+
 		clk_enable(clk);
 
 		dev_info(dev, "clock source %d: %s (%ld Hz)\n",
@@ -421,12 +506,29 @@
 	if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
 		host->mmc->caps = MMC_CAP_NONREMOVABLE;
 
+	if (pdata->host_caps)
+		host->mmc->caps |= pdata->host_caps;
+
 	host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
 			 SDHCI_QUIRK_32BIT_DMA_SIZE);
 
 	/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
 	host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
 
+	/*
+	 * If controller does not have internal clock divider,
+	 * we can use overriding functions instead of default.
+	 */
+	if (pdata->clk_type) {
+		sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+		sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+		sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+	}
+
+	/* It supports additional host capabilities if needed */
+	if (pdata->host_caps)
+		host->mmc->caps |= pdata->host_caps;
+
 	ret = sdhci_add_host(host);
 	if (ret) {
 		dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 0000000..4823ee9
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include <mach/gpio.h>
+#include <mach/sdhci.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
+{
+	u32 val;
+
+	if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+		/* Use wp_gpio here instead? */
+		val = readl(host->ioaddr + reg);
+		return val | SDHCI_WRITE_PROTECT;
+	}
+
+	return readl(host->ioaddr + reg);
+}
+
+static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
+{
+	if (unlikely(reg == SDHCI_HOST_VERSION)) {
+		/* Erratum: Version register is invalid in HW. */
+		return SDHCI_SPEC_200;
+	}
+
+	return readw(host->ioaddr + reg);
+}
+
+static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+	/* Seems like we're getting spurious timeout and crc errors, so
+	 * disable signalling of them. In case of real errors software
+	 * timers should take care of eventually detecting them.
+	 */
+	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
+		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
+
+	writel(val, host->ioaddr + reg);
+
+	if (unlikely(reg == SDHCI_INT_ENABLE)) {
+		/* Erratum: Must enable block gap interrupt detection */
+		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+		if (val & SDHCI_INT_CARD_INT)
+			gap_ctrl |= 0x8;
+		else
+			gap_ctrl &= ~0x8;
+		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+	}
+}
+
+static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
+{
+	struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
+	struct tegra_sdhci_platform_data *plat;
+
+	plat = pdev->dev.platform_data;
+
+	if (!gpio_is_valid(plat->wp_gpio))
+		return -1;
+
+	return gpio_get_value(plat->wp_gpio);
+}
+
+static irqreturn_t carddetect_irq(int irq, void *data)
+{
+	struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+	tasklet_schedule(&sdhost->card_tasklet);
+	return IRQ_HANDLED;
+};
+
+static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
+{
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct tegra_sdhci_platform_data *plat;
+	u32 ctrl;
+
+	plat = pdev->dev.platform_data;
+
+	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+	if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
+		ctrl &= ~SDHCI_CTRL_4BITBUS;
+		ctrl |= SDHCI_CTRL_8BITBUS;
+	} else {
+		ctrl &= ~SDHCI_CTRL_8BITBUS;
+		if (bus_width == MMC_BUS_WIDTH_4)
+			ctrl |= SDHCI_CTRL_4BITBUS;
+		else
+			ctrl &= ~SDHCI_CTRL_4BITBUS;
+	}
+	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+	return 0;
+}
+
+
+static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
+				  struct sdhci_pltfm_data *pdata)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct tegra_sdhci_platform_data *plat;
+	struct clk *clk;
+	int rc;
+
+	plat = pdev->dev.platform_data;
+	if (plat == NULL) {
+		dev_err(mmc_dev(host->mmc), "missing platform data\n");
+		return -ENXIO;
+	}
+
+	if (gpio_is_valid(plat->power_gpio)) {
+		rc = gpio_request(plat->power_gpio, "sdhci_power");
+		if (rc) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to allocate power gpio\n");
+			goto out;
+		}
+		tegra_gpio_enable(plat->power_gpio);
+		gpio_direction_output(plat->power_gpio, 1);
+	}
+
+	if (gpio_is_valid(plat->cd_gpio)) {
+		rc = gpio_request(plat->cd_gpio, "sdhci_cd");
+		if (rc) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to allocate cd gpio\n");
+			goto out_power;
+		}
+		tegra_gpio_enable(plat->cd_gpio);
+		gpio_direction_input(plat->cd_gpio);
+
+		rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
+				 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+				 mmc_hostname(host->mmc), host);
+
+		if (rc)	{
+			dev_err(mmc_dev(host->mmc), "request irq error\n");
+			goto out_cd;
+		}
+
+	}
+
+	if (gpio_is_valid(plat->wp_gpio)) {
+		rc = gpio_request(plat->wp_gpio, "sdhci_wp");
+		if (rc) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to allocate wp gpio\n");
+			goto out_cd;
+		}
+		tegra_gpio_enable(plat->wp_gpio);
+		gpio_direction_input(plat->wp_gpio);
+	}
+
+	clk = clk_get(mmc_dev(host->mmc), NULL);
+	if (IS_ERR(clk)) {
+		dev_err(mmc_dev(host->mmc), "clk err\n");
+		rc = PTR_ERR(clk);
+		goto out_wp;
+	}
+	clk_enable(clk);
+	pltfm_host->clk = clk;
+
+	if (plat->is_8bit)
+		host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+	return 0;
+
+out_wp:
+	if (gpio_is_valid(plat->wp_gpio)) {
+		tegra_gpio_disable(plat->wp_gpio);
+		gpio_free(plat->wp_gpio);
+	}
+
+out_cd:
+	if (gpio_is_valid(plat->cd_gpio)) {
+		tegra_gpio_disable(plat->cd_gpio);
+		gpio_free(plat->cd_gpio);
+	}
+
+out_power:
+	if (gpio_is_valid(plat->power_gpio)) {
+		tegra_gpio_disable(plat->power_gpio);
+		gpio_free(plat->power_gpio);
+	}
+
+out:
+	return rc;
+}
+
+static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct tegra_sdhci_platform_data *plat;
+
+	plat = pdev->dev.platform_data;
+
+	if (gpio_is_valid(plat->wp_gpio)) {
+		tegra_gpio_disable(plat->wp_gpio);
+		gpio_free(plat->wp_gpio);
+	}
+
+	if (gpio_is_valid(plat->cd_gpio)) {
+		tegra_gpio_disable(plat->cd_gpio);
+		gpio_free(plat->cd_gpio);
+	}
+
+	if (gpio_is_valid(plat->power_gpio)) {
+		tegra_gpio_disable(plat->power_gpio);
+		gpio_free(plat->power_gpio);
+	}
+
+	clk_disable(pltfm_host->clk);
+	clk_put(pltfm_host->clk);
+}
+
+static struct sdhci_ops tegra_sdhci_ops = {
+	.get_ro     = tegra_sdhci_get_ro,
+	.read_l     = tegra_sdhci_readl,
+	.read_w     = tegra_sdhci_readw,
+	.write_l    = tegra_sdhci_writel,
+	.platform_8bit_width = tegra_sdhci_8bit,
+};
+
+struct sdhci_pltfm_data sdhci_tegra_pdata = {
+	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
+		  SDHCI_QUIRK_NO_HISPD_BIT |
+		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+	.ops  = &tegra_sdhci_ops,
+	.init = tegra_sdhci_pltfm_init,
+	.exit = tegra_sdhci_pltfm_exit,
+};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a25db42..9e15f41 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
 
 #include <linux/leds.h>
 
+#include <linux/mmc/mmc.h>
 #include <linux/mmc/host.h>
 
 #include "sdhci.h"
@@ -77,8 +78,11 @@
 	printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
 		sdhci_readw(host, SDHCI_ACMD12_ERR),
 		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
-	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n",
+	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
 		sdhci_readl(host, SDHCI_CAPABILITIES),
+		sdhci_readl(host, SDHCI_CAPABILITIES_1));
+	printk(KERN_DEBUG DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
+		sdhci_readw(host, SDHCI_COMMAND),
 		sdhci_readl(host, SDHCI_MAX_CURRENT));
 
 	if (host->flags & SDHCI_USE_ADMA)
@@ -1518,7 +1522,11 @@
 
 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
 		host->data->error = -ETIMEDOUT;
-	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+	else if (intmask & SDHCI_INT_DATA_END_BIT)
+		host->data->error = -EILSEQ;
+	else if ((intmask & SDHCI_INT_DATA_CRC) &&
+		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
+			!= MMC_BUS_TEST_R)
 		host->data->error = -EILSEQ;
 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
 		printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1736,7 +1744,7 @@
 int sdhci_add_host(struct sdhci_host *host)
 {
 	struct mmc_host *mmc;
-	unsigned int caps;
+	unsigned int caps, ocr_avail;
 	int ret;
 
 	WARN_ON(host == NULL);
@@ -1890,13 +1898,26 @@
 	    mmc_card_is_removable(mmc))
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
-	mmc->ocr_avail = 0;
+	ocr_avail = 0;
 	if (caps & SDHCI_CAN_VDD_330)
-		mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
+		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
 	if (caps & SDHCI_CAN_VDD_300)
-		mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
+		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
 	if (caps & SDHCI_CAN_VDD_180)
-		mmc->ocr_avail |= MMC_VDD_165_195;
+		ocr_avail |= MMC_VDD_165_195;
+
+	mmc->ocr_avail = ocr_avail;
+	mmc->ocr_avail_sdio = ocr_avail;
+	if (host->ocr_avail_sdio)
+		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
+	mmc->ocr_avail_sd = ocr_avail;
+	if (host->ocr_avail_sd)
+		mmc->ocr_avail_sd &= host->ocr_avail_sd;
+	else /* normal SD controllers don't support 1.8V */
+		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
+	mmc->ocr_avail_mmc = ocr_avail;
+	if (host->ocr_avail_mmc)
+		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
 
 	if (mmc->ocr_avail == 0) {
 		printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1928,10 +1949,14 @@
 	 * of bytes. When doing hardware scatter/gather, each entry cannot
 	 * be larger than 64 KiB though.
 	 */
-	if (host->flags & SDHCI_USE_ADMA)
-		mmc->max_seg_size = 65536;
-	else
+	if (host->flags & SDHCI_USE_ADMA) {
+		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+			mmc->max_seg_size = 65535;
+		else
+			mmc->max_seg_size = 65536;
+	} else {
 		mmc->max_seg_size = mmc->max_req_size;
+	}
 
 	/*
 	 * Maximum block size. This varies from controller to controller and
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e42d7f0..6e0969e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -52,6 +52,7 @@
 #define  SDHCI_CMD_RESP_SHORT_BUSY 0x03
 
 #define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
+#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
 
 #define SDHCI_RESPONSE		0x10
 
@@ -165,7 +166,7 @@
 #define  SDHCI_CAN_VDD_180	0x04000000
 #define  SDHCI_CAN_64BIT	0x10000000
 
-/* 44-47 reserved for more caps */
+#define SDHCI_CAPABILITIES_1	0x44
 
 #define SDHCI_MAX_CURRENT	0x48
 
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index f472c27..bbc298f 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -446,7 +446,7 @@
 	mmc->max_seg_size = 1024 * 512;
 	mmc->max_blk_size = 512;
 
-	/* reset the controler */
+	/* reset the controller */
 	if (sdricoh_reset(host)) {
 		dev_dbg(dev, "could not reset\n");
 		result = -EIO;
@@ -478,7 +478,7 @@
 	dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
 		" %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
 
-	/* search pci cardbus bridge that contains the mmc controler */
+	/* search pci cardbus bridge that contains the mmc controller */
 	/* the io region is already claimed by yenta_socket... */
 	while ((pci_dev =
 		pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e7765a8..e3c6ef2 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -25,16 +25,261 @@
  *   double buffer support
  *
  */
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/device.h>
+
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/dmaengine.h>
-#include <linux/mmc/host.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
 
-#include "tmio_mmc.h"
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+#define CTL_STATUS 0x1c
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_RESET_SD 0xe0
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND    0x00000001
+#define TMIO_STAT_DATAEND       0x00000004
+#define TMIO_STAT_CARD_REMOVE   0x00000008
+#define TMIO_STAT_CARD_INSERT   0x00000010
+#define TMIO_STAT_SIGSTATE      0x00000020
+#define TMIO_STAT_WRPROTECT     0x00000080
+#define TMIO_STAT_CARD_REMOVE_A 0x00000100
+#define TMIO_STAT_CARD_INSERT_A 0x00000200
+#define TMIO_STAT_SIGSTATE_A    0x00000400
+#define TMIO_STAT_CMD_IDX_ERR   0x00010000
+#define TMIO_STAT_CRCFAIL       0x00020000
+#define TMIO_STAT_STOPBIT_ERR   0x00040000
+#define TMIO_STAT_DATATIMEOUT   0x00080000
+#define TMIO_STAT_RXOVERFLOW    0x00100000
+#define TMIO_STAT_TXUNDERRUN    0x00200000
+#define TMIO_STAT_CMDTIMEOUT    0x00400000
+#define TMIO_STAT_RXRDY         0x01000000
+#define TMIO_STAT_TXRQ          0x02000000
+#define TMIO_STAT_ILL_FUNC      0x20000000
+#define TMIO_STAT_CMD_BUSY      0x40000000
+#define TMIO_STAT_ILL_ACCESS    0x80000000
+
+/* Definitions for values the CTRL_SDIO_STATUS register can take. */
+#define TMIO_SDIO_STAT_IOIRQ	0x0001
+#define TMIO_SDIO_STAT_EXPUB52	0x4000
+#define TMIO_SDIO_STAT_EXWT	0x8000
+#define TMIO_SDIO_MASK_ALL	0xc007
+
+/* Define some IRQ masks */
+/* This is the mask used at reset by the chip */
+#define TMIO_MASK_ALL           0x837f031d
+#define TMIO_MASK_READOP  (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
+#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
+#define TMIO_MASK_CMD     (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
+		TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
+#define TMIO_MASK_IRQ     (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
+
+#define enable_mmc_irqs(host, i) \
+	do { \
+		u32 mask;\
+		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+		mask &= ~((i) & TMIO_MASK_IRQ); \
+		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+	} while (0)
+
+#define disable_mmc_irqs(host, i) \
+	do { \
+		u32 mask;\
+		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+		mask |= ((i) & TMIO_MASK_IRQ); \
+		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+	} while (0)
+
+#define ack_mmc_irqs(host, i) \
+	do { \
+		sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
+	} while (0)
+
+/* This is arbitrary, just noone needed any higher alignment yet */
+#define MAX_ALIGN 4
+
+struct tmio_mmc_host {
+	void __iomem *ctl;
+	unsigned long bus_shift;
+	struct mmc_command      *cmd;
+	struct mmc_request      *mrq;
+	struct mmc_data         *data;
+	struct mmc_host         *mmc;
+	int                     irq;
+	unsigned int		sdio_irq_enabled;
+
+	/* Callbacks for clock / power control */
+	void (*set_pwr)(struct platform_device *host, int state);
+	void (*set_clk_div)(struct platform_device *host, int state);
+
+	/* pio related stuff */
+	struct scatterlist      *sg_ptr;
+	struct scatterlist      *sg_orig;
+	unsigned int            sg_len;
+	unsigned int            sg_off;
+
+	struct platform_device *pdev;
+
+	/* DMA support */
+	struct dma_chan		*chan_rx;
+	struct dma_chan		*chan_tx;
+	struct tasklet_struct	dma_complete;
+	struct tasklet_struct	dma_issue;
+#ifdef CONFIG_TMIO_MMC_DMA
+	unsigned int            dma_sglen;
+	u8			bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
+	struct scatterlist	bounce_sg;
+#endif
+
+	/* Track lost interrupts */
+	struct delayed_work	delayed_reset_work;
+	spinlock_t		lock;
+	unsigned long		last_req_ts;
+};
+
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
+
+static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+	return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+		u16 *buf, int count)
+{
+	readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+	return readw(host->ctl + (addr << host->bus_shift)) |
+	       readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+{
+	writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+		u16 *buf, int count)
+{
+	writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+	writew(val, host->ctl + (addr << host->bus_shift));
+	writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
+{
+	host->sg_len = data->sg_len;
+	host->sg_ptr = data->sg;
+	host->sg_orig = data->sg;
+	host->sg_off = 0;
+}
+
+static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
+{
+	host->sg_ptr = sg_next(host->sg_ptr);
+	host->sg_off = 0;
+	return --host->sg_len;
+}
+
+static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
+{
+	local_irq_save(*flags);
+	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+}
+
+static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
+{
+	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+	local_irq_restore(*flags);
+}
+
+#ifdef CONFIG_MMC_DEBUG
+
+#define STATUS_TO_TEXT(a) \
+	do { \
+		if (status & TMIO_STAT_##a) \
+			printk(#a); \
+	} while (0)
+
+void pr_debug_status(u32 status)
+{
+	printk(KERN_DEBUG "status: %08x = ", status);
+	STATUS_TO_TEXT(CARD_REMOVE);
+	STATUS_TO_TEXT(CARD_INSERT);
+	STATUS_TO_TEXT(SIGSTATE);
+	STATUS_TO_TEXT(WRPROTECT);
+	STATUS_TO_TEXT(CARD_REMOVE_A);
+	STATUS_TO_TEXT(CARD_INSERT_A);
+	STATUS_TO_TEXT(SIGSTATE_A);
+	STATUS_TO_TEXT(CMD_IDX_ERR);
+	STATUS_TO_TEXT(STOPBIT_ERR);
+	STATUS_TO_TEXT(ILL_FUNC);
+	STATUS_TO_TEXT(CMD_BUSY);
+	STATUS_TO_TEXT(CMDRESPEND);
+	STATUS_TO_TEXT(DATAEND);
+	STATUS_TO_TEXT(CRCFAIL);
+	STATUS_TO_TEXT(DATATIMEOUT);
+	STATUS_TO_TEXT(CMDTIMEOUT);
+	STATUS_TO_TEXT(RXOVERFLOW);
+	STATUS_TO_TEXT(TXUNDERRUN);
+	STATUS_TO_TEXT(RXRDY);
+	STATUS_TO_TEXT(TXRQ);
+	STATUS_TO_TEXT(ILL_ACCESS);
+	printk("\n");
+}
+
+#else
+#define pr_debug_status(s)  do { } while (0)
+#endif
+
+static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+	struct tmio_mmc_host *host = mmc_priv(mmc);
+
+	if (enable) {
+		host->sdio_irq_enabled = 1;
+		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
+			(TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
+	} else {
+		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
+		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+		host->sdio_irq_enabled = 0;
+	}
+}
 
 static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
 {
@@ -55,8 +300,23 @@
 
 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
 {
+	struct mfd_cell *cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+
+	/*
+	 * Testing on sh-mobile showed that SDIO IRQs are unmasked when
+	 * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
+	 * device IRQ here and restore the SDIO IRQ mask before
+	 * re-enabling the device IRQ.
+	 */
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		disable_irq(host->irq);
 	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
 	msleep(10);
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+		enable_irq(host->irq);
+	}
 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
 	msleep(10);
@@ -64,11 +324,21 @@
 
 static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
 {
+	struct mfd_cell *cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+
 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
 	msleep(10);
+	/* see comment in tmio_mmc_clk_stop above */
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		disable_irq(host->irq);
 	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
 	msleep(10);
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+		enable_irq(host->irq);
+	}
 }
 
 static void reset(struct tmio_mmc_host *host)
@@ -82,15 +352,60 @@
 	msleep(10);
 }
 
+static void tmio_mmc_reset_work(struct work_struct *work)
+{
+	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+						  delayed_reset_work.work);
+	struct mmc_request *mrq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	mrq = host->mrq;
+
+	/* request already finished */
+	if (!mrq
+	    || time_is_after_jiffies(host->last_req_ts +
+		msecs_to_jiffies(2000))) {
+		spin_unlock_irqrestore(&host->lock, flags);
+		return;
+	}
+
+	dev_warn(&host->pdev->dev,
+		"timeout waiting for hardware interrupt (CMD%u)\n",
+		mrq->cmd->opcode);
+
+	if (host->data)
+		host->data->error = -ETIMEDOUT;
+	else if (host->cmd)
+		host->cmd->error = -ETIMEDOUT;
+	else
+		mrq->cmd->error = -ETIMEDOUT;
+
+	host->cmd = NULL;
+	host->data = NULL;
+	host->mrq = NULL;
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	reset(host);
+
+	mmc_request_done(host->mmc, mrq);
+}
+
 static void
 tmio_mmc_finish_request(struct tmio_mmc_host *host)
 {
 	struct mmc_request *mrq = host->mrq;
 
+	if (!mrq)
+		return;
+
 	host->mrq = NULL;
 	host->cmd = NULL;
 	host->data = NULL;
 
+	cancel_delayed_work(&host->delayed_reset_work);
+
 	mmc_request_done(host->mmc, mrq);
 }
 
@@ -200,6 +515,7 @@
 	return;
 }
 
+/* needs to be called with host->lock held */
 static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
 {
 	struct mmc_data *data = host->data;
@@ -233,6 +549,8 @@
 	if (data->flags & MMC_DATA_READ) {
 		if (!host->chan_rx)
 			disable_mmc_irqs(host, TMIO_MASK_READOP);
+		else
+			tmio_check_bounce_buffer(host);
 		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
 			host->mrq);
 	} else {
@@ -254,10 +572,12 @@
 
 static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
 {
-	struct mmc_data *data = host->data;
+	struct mmc_data *data;
+	spin_lock(&host->lock);
+	data = host->data;
 
 	if (!data)
-		return;
+		goto out;
 
 	if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
 		/*
@@ -278,6 +598,8 @@
 	} else {
 		tmio_mmc_do_data_irq(host);
 	}
+out:
+	spin_unlock(&host->lock);
 }
 
 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
@@ -286,9 +608,11 @@
 	struct mmc_command *cmd = host->cmd;
 	int i, addr;
 
+	spin_lock(&host->lock);
+
 	if (!host->cmd) {
 		pr_debug("Spurious CMD irq\n");
-		return;
+		goto out;
 	}
 
 	host->cmd = NULL;
@@ -324,8 +648,7 @@
 			if (!host->chan_rx)
 				enable_mmc_irqs(host, TMIO_MASK_READOP);
 		} else {
-			struct dma_chan *chan = host->chan_tx;
-			if (!chan)
+			if (!host->chan_tx)
 				enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
 			else
 				tasklet_schedule(&host->dma_issue);
@@ -334,13 +657,19 @@
 		tmio_mmc_finish_request(host);
 	}
 
+out:
+	spin_unlock(&host->lock);
+
 	return;
 }
 
 static irqreturn_t tmio_mmc_irq(int irq, void *devid)
 {
 	struct tmio_mmc_host *host = devid;
+	struct mfd_cell	*cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
 	unsigned int ireg, irq_mask, status;
+	unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
 
 	pr_debug("MMC IRQ begin\n");
 
@@ -348,6 +677,29 @@
 	irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
 	ireg = status & TMIO_MASK_IRQ & ~irq_mask;
 
+	sdio_ireg = 0;
+	if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+		sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
+		sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
+
+		sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
+
+		if (sdio_ireg && !host->sdio_irq_enabled) {
+			pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
+				   sdio_status, sdio_irq_mask, sdio_ireg);
+			tmio_mmc_enable_sdio_irq(host->mmc, 0);
+			goto out;
+		}
+
+		if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
+			sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
+			mmc_signal_sdio_irq(host->mmc);
+
+		if (sdio_ireg)
+			goto out;
+	}
+
 	pr_debug_status(status);
 	pr_debug_status(ireg);
 
@@ -375,8 +727,10 @@
  */
 
 		/* Command completion */
-		if (ireg & TMIO_MASK_CMD) {
-			ack_mmc_irqs(host, TMIO_MASK_CMD);
+		if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
+			ack_mmc_irqs(host,
+				     TMIO_STAT_CMDRESPEND |
+				     TMIO_STAT_CMDTIMEOUT);
 			tmio_mmc_cmd_irq(host, status);
 		}
 
@@ -407,6 +761,16 @@
 }
 
 #ifdef CONFIG_TMIO_MMC_DMA
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+	if (host->sg_ptr == &host->bounce_sg) {
+		unsigned long flags;
+		void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+		memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
+		tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+	}
+}
+
 static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
 {
 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
@@ -427,12 +791,39 @@
 		enable_mmc_irqs(host, TMIO_STAT_DATAEND);
 }
 
-static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
 {
-	struct scatterlist *sg = host->sg_ptr;
+	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_rx;
-	int ret;
+	struct mfd_cell	*cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+	dma_cookie_t cookie;
+	int ret, i;
+	bool aligned = true, multiple = true;
+	unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+	for_each_sg(sg, sg_tmp, host->sg_len, i) {
+		if (sg_tmp->offset & align)
+			aligned = false;
+		if (sg_tmp->length & align) {
+			multiple = false;
+			break;
+		}
+	}
+
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+			  align >= MAX_ALIGN)) || !multiple) {
+		ret = -EINVAL;
+		goto pio;
+	}
+
+	/* The only sg element can be unaligned, use our bounce buffer then */
+	if (!aligned) {
+		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+		host->sg_ptr = &host->bounce_sg;
+		sg = host->sg_ptr;
+	}
 
 	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
 	if (ret > 0) {
@@ -442,21 +833,21 @@
 	}
 
 	if (desc) {
-		host->desc = desc;
 		desc->callback = tmio_dma_complete;
 		desc->callback_param = host;
-		host->cookie = desc->tx_submit(desc);
-		if (host->cookie < 0) {
-			host->desc = NULL;
-			ret = host->cookie;
+		cookie = desc->tx_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
 		} else {
 			chan->device->device_issue_pending(chan);
 		}
 	}
 	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
-		__func__, host->sg_len, ret, host->cookie, host->mrq);
+		__func__, host->sg_len, ret, cookie, host->mrq);
 
-	if (!host->desc) {
+pio:
+	if (!desc) {
 		/* DMA failed, fall back to PIO */
 		if (ret >= 0)
 			ret = -EIO;
@@ -471,24 +862,49 @@
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		tmio_mmc_enable_dma(host, false);
-		reset(host);
-		/* Fail this request, let above layers recover */
-		host->mrq->cmd->error = ret;
-		tmio_mmc_finish_request(host);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
-		desc, host->cookie, host->sg_len);
-
-	return ret > 0 ? 0 : ret;
+		desc, cookie, host->sg_len);
 }
 
-static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
 {
-	struct scatterlist *sg = host->sg_ptr;
+	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_tx;
-	int ret;
+	struct mfd_cell	*cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+	dma_cookie_t cookie;
+	int ret, i;
+	bool aligned = true, multiple = true;
+	unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+	for_each_sg(sg, sg_tmp, host->sg_len, i) {
+		if (sg_tmp->offset & align)
+			aligned = false;
+		if (sg_tmp->length & align) {
+			multiple = false;
+			break;
+		}
+	}
+
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+			  align >= MAX_ALIGN)) || !multiple) {
+		ret = -EINVAL;
+		goto pio;
+	}
+
+	/* The only sg element can be unaligned, use our bounce buffer then */
+	if (!aligned) {
+		unsigned long flags;
+		void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
+		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+		memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
+		tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+		host->sg_ptr = &host->bounce_sg;
+		sg = host->sg_ptr;
+	}
 
 	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
 	if (ret > 0) {
@@ -498,19 +914,19 @@
 	}
 
 	if (desc) {
-		host->desc = desc;
 		desc->callback = tmio_dma_complete;
 		desc->callback_param = host;
-		host->cookie = desc->tx_submit(desc);
-		if (host->cookie < 0) {
-			host->desc = NULL;
-			ret = host->cookie;
+		cookie = desc->tx_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
 		}
 	}
 	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
-		__func__, host->sg_len, ret, host->cookie, host->mrq);
+		__func__, host->sg_len, ret, cookie, host->mrq);
 
-	if (!host->desc) {
+pio:
+	if (!desc) {
 		/* DMA failed, fall back to PIO */
 		if (ret >= 0)
 			ret = -EIO;
@@ -525,30 +941,22 @@
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		tmio_mmc_enable_dma(host, false);
-		reset(host);
-		/* Fail this request, let above layers recover */
-		host->mrq->cmd->error = ret;
-		tmio_mmc_finish_request(host);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
-		desc, host->cookie);
-
-	return ret > 0 ? 0 : ret;
+		desc, cookie);
 }
 
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
 			       struct mmc_data *data)
 {
 	if (data->flags & MMC_DATA_READ) {
 		if (host->chan_rx)
-			return tmio_mmc_start_dma_rx(host);
+			tmio_mmc_start_dma_rx(host);
 	} else {
 		if (host->chan_tx)
-			return tmio_mmc_start_dma_tx(host);
+			tmio_mmc_start_dma_tx(host);
 	}
-
-	return 0;
 }
 
 static void tmio_issue_tasklet_fn(unsigned long priv)
@@ -562,6 +970,12 @@
 static void tmio_tasklet_fn(unsigned long arg)
 {
 	struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	if (!host->data)
+		goto out;
 
 	if (host->data->flags & MMC_DATA_READ)
 		dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
@@ -571,6 +985,8 @@
 			     DMA_TO_DEVICE);
 
 	tmio_mmc_do_data_irq(host);
+out:
+	spin_unlock_irqrestore(&host->lock, flags);
 }
 
 /* It might be necessary to make filter MFD specific */
@@ -584,9 +1000,6 @@
 static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
 				 struct tmio_mmc_data *pdata)
 {
-	host->cookie = -EINVAL;
-	host->desc = NULL;
-
 	/* We can only either use DMA for both Tx and Rx or not use it at all */
 	if (pdata->dma) {
 		dma_cap_mask_t mask;
@@ -632,15 +1045,15 @@
 		host->chan_rx = NULL;
 		dma_release_channel(chan);
 	}
-
-	host->cookie = -EINVAL;
-	host->desc = NULL;
 }
 #else
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+}
+
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
 			       struct mmc_data *data)
 {
-	return 0;
 }
 
 static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
@@ -682,7 +1095,9 @@
 	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
 	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
 
-	return tmio_mmc_start_dma(host, data);
+	tmio_mmc_start_dma(host, data);
+
+	return 0;
 }
 
 /* Process requests from the MMC layer */
@@ -694,6 +1109,8 @@
 	if (host->mrq)
 		pr_debug("request not null\n");
 
+	host->last_req_ts = jiffies;
+	wmb();
 	host->mrq = mrq;
 
 	if (mrq->data) {
@@ -703,10 +1120,14 @@
 	}
 
 	ret = tmio_mmc_start_command(host, mrq->cmd);
-	if (!ret)
+	if (!ret) {
+		schedule_delayed_work(&host->delayed_reset_work,
+				      msecs_to_jiffies(2000));
 		return;
+	}
 
 fail:
+	host->mrq = NULL;
 	mrq->cmd->error = ret;
 	mmc_request_done(mmc, mrq);
 }
@@ -780,6 +1201,7 @@
 	.set_ios	= tmio_mmc_set_ios,
 	.get_ro         = tmio_mmc_get_ro,
 	.get_cd		= tmio_mmc_get_cd,
+	.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
 };
 
 #ifdef CONFIG_PM
@@ -864,10 +1286,15 @@
 		goto host_free;
 
 	mmc->ops = &tmio_mmc_ops;
-	mmc->caps = MMC_CAP_4_BIT_DATA;
-	mmc->caps |= pdata->capabilities;
+	mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
 	mmc->f_max = pdata->hclk;
 	mmc->f_min = mmc->f_max / 512;
+	mmc->max_segs = 32;
+	mmc->max_blk_size = 512;
+	mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+		mmc->max_segs;
+	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+	mmc->max_seg_size = mmc->max_req_size;
 	if (pdata->ocr_mask)
 		mmc->ocr_avail = pdata->ocr_mask;
 	else
@@ -890,12 +1317,19 @@
 		goto cell_disable;
 
 	disable_mmc_irqs(host, TMIO_MASK_ALL);
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		tmio_mmc_enable_sdio_irq(mmc, 0);
 
 	ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
 		IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
 	if (ret)
 		goto cell_disable;
 
+	spin_lock_init(&host->lock);
+
+	/* Init delayed work for request timeouts */
+	INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
+
 	/* See if we also get DMA */
 	tmio_mmc_request_dma(host, pdata);
 
@@ -934,6 +1368,7 @@
 	if (mmc) {
 		struct tmio_mmc_host *host = mmc_priv(mmc);
 		mmc_remove_host(mmc);
+		cancel_delayed_work_sync(&host->delayed_reset_work);
 		tmio_mmc_release_dma(host);
 		free_irq(host->irq, host);
 		if (cell->disable)
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
deleted file mode 100644
index 0fedc78..0000000
--- a/drivers/mmc/host/tmio_mmc.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* Definitons for use with the tmio_mmc.c
- *
- * (c) 2004 Ian Molton <spyro@f2s.com>
- * (c) 2007 Ian Molton <spyro@f2s.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-
-#define CTL_SD_CMD 0x00
-#define CTL_ARG_REG 0x04
-#define CTL_STOP_INTERNAL_ACTION 0x08
-#define CTL_XFER_BLK_COUNT 0xa
-#define CTL_RESPONSE 0x0c
-#define CTL_STATUS 0x1c
-#define CTL_IRQ_MASK 0x20
-#define CTL_SD_CARD_CLK_CTL 0x24
-#define CTL_SD_XFER_LEN 0x26
-#define CTL_SD_MEM_CARD_OPT 0x28
-#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
-#define CTL_SD_DATA_PORT 0x30
-#define CTL_TRANSACTION_CTL 0x34
-#define CTL_RESET_SD 0xe0
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-/* Definitions for values the CTRL_STATUS register can take. */
-#define TMIO_STAT_CMDRESPEND    0x00000001
-#define TMIO_STAT_DATAEND       0x00000004
-#define TMIO_STAT_CARD_REMOVE   0x00000008
-#define TMIO_STAT_CARD_INSERT   0x00000010
-#define TMIO_STAT_SIGSTATE      0x00000020
-#define TMIO_STAT_WRPROTECT     0x00000080
-#define TMIO_STAT_CARD_REMOVE_A 0x00000100
-#define TMIO_STAT_CARD_INSERT_A 0x00000200
-#define TMIO_STAT_SIGSTATE_A    0x00000400
-#define TMIO_STAT_CMD_IDX_ERR   0x00010000
-#define TMIO_STAT_CRCFAIL       0x00020000
-#define TMIO_STAT_STOPBIT_ERR   0x00040000
-#define TMIO_STAT_DATATIMEOUT   0x00080000
-#define TMIO_STAT_RXOVERFLOW    0x00100000
-#define TMIO_STAT_TXUNDERRUN    0x00200000
-#define TMIO_STAT_CMDTIMEOUT    0x00400000
-#define TMIO_STAT_RXRDY         0x01000000
-#define TMIO_STAT_TXRQ          0x02000000
-#define TMIO_STAT_ILL_FUNC      0x20000000
-#define TMIO_STAT_CMD_BUSY      0x40000000
-#define TMIO_STAT_ILL_ACCESS    0x80000000
-
-/* Define some IRQ masks */
-/* This is the mask used at reset by the chip */
-#define TMIO_MASK_ALL           0x837f031d
-#define TMIO_MASK_READOP  (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
-#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
-#define TMIO_MASK_CMD     (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
-		TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
-#define TMIO_MASK_IRQ     (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
-
-
-#define enable_mmc_irqs(host, i) \
-	do { \
-		u32 mask;\
-		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
-		mask &= ~((i) & TMIO_MASK_IRQ); \
-		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
-	} while (0)
-
-#define disable_mmc_irqs(host, i) \
-	do { \
-		u32 mask;\
-		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
-		mask |= ((i) & TMIO_MASK_IRQ); \
-		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
-	} while (0)
-
-#define ack_mmc_irqs(host, i) \
-	do { \
-		sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
-	} while (0)
-
-
-struct tmio_mmc_host {
-	void __iomem *ctl;
-	unsigned long bus_shift;
-	struct mmc_command      *cmd;
-	struct mmc_request      *mrq;
-	struct mmc_data         *data;
-	struct mmc_host         *mmc;
-	int                     irq;
-
-	/* Callbacks for clock / power control */
-	void (*set_pwr)(struct platform_device *host, int state);
-	void (*set_clk_div)(struct platform_device *host, int state);
-
-	/* pio related stuff */
-	struct scatterlist      *sg_ptr;
-	unsigned int            sg_len;
-	unsigned int            sg_off;
-
-	struct platform_device *pdev;
-
-	/* DMA support */
-	struct dma_chan		*chan_rx;
-	struct dma_chan		*chan_tx;
-	struct tasklet_struct	dma_complete;
-	struct tasklet_struct	dma_issue;
-#ifdef CONFIG_TMIO_MMC_DMA
-	struct dma_async_tx_descriptor *desc;
-	unsigned int            dma_sglen;
-	dma_cookie_t		cookie;
-#endif
-};
-
-#include <linux/io.h>
-
-static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
-{
-	return readw(host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
-		u16 *buf, int count)
-{
-	readsw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
-{
-	return readw(host->ctl + (addr << host->bus_shift)) |
-	       readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
-}
-
-static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
-		u16 val)
-{
-	writew(val, host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
-		u16 *buf, int count)
-{
-	writesw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
-		u32 val)
-{
-	writew(val, host->ctl + (addr << host->bus_shift));
-	writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
-}
-
-#include <linux/scatterlist.h>
-#include <linux/blkdev.h>
-
-static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
-	struct mmc_data *data)
-{
-	host->sg_len = data->sg_len;
-	host->sg_ptr = data->sg;
-	host->sg_off = 0;
-}
-
-static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
-{
-	host->sg_ptr = sg_next(host->sg_ptr);
-	host->sg_off = 0;
-	return --host->sg_len;
-}
-
-static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
-	unsigned long *flags)
-{
-	local_irq_save(*flags);
-	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
-}
-
-static inline void tmio_mmc_kunmap_atomic(void *virt,
-	unsigned long *flags)
-{
-	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
-	local_irq_restore(*flags);
-}
-
-#ifdef CONFIG_MMC_DEBUG
-
-#define STATUS_TO_TEXT(a) \
-	do { \
-		if (status & TMIO_STAT_##a) \
-			printk(#a); \
-	} while (0)
-
-void pr_debug_status(u32 status)
-{
-	printk(KERN_DEBUG "status: %08x = ", status);
-	STATUS_TO_TEXT(CARD_REMOVE);
-	STATUS_TO_TEXT(CARD_INSERT);
-	STATUS_TO_TEXT(SIGSTATE);
-	STATUS_TO_TEXT(WRPROTECT);
-	STATUS_TO_TEXT(CARD_REMOVE_A);
-	STATUS_TO_TEXT(CARD_INSERT_A);
-	STATUS_TO_TEXT(SIGSTATE_A);
-	STATUS_TO_TEXT(CMD_IDX_ERR);
-	STATUS_TO_TEXT(STOPBIT_ERR);
-	STATUS_TO_TEXT(ILL_FUNC);
-	STATUS_TO_TEXT(CMD_BUSY);
-	STATUS_TO_TEXT(CMDRESPEND);
-	STATUS_TO_TEXT(DATAEND);
-	STATUS_TO_TEXT(CRCFAIL);
-	STATUS_TO_TEXT(DATATIMEOUT);
-	STATUS_TO_TEXT(CMDTIMEOUT);
-	STATUS_TO_TEXT(RXOVERFLOW);
-	STATUS_TO_TEXT(TXUNDERRUN);
-	STATUS_TO_TEXT(RXRDY);
-	STATUS_TO_TEXT(TXRQ);
-	STATUS_TO_TEXT(ILL_ACCESS);
-	printk("\n");
-}
-
-#else
-#define pr_debug_status(s)  do { } while (0)
-#endif
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index f8f65df..f08f944 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -19,7 +19,6 @@
 #include <linux/module.h>
 #include <linux/usb.h>
 #include <linux/kernel.h>
-#include <linux/usb.h>
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/mmc/host.h>
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index b1f7689..7741470 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -53,9 +53,10 @@
 	  devices. Partitioning on NFTL 'devices' is a different - that's the
 	  'normal' form of partitioning used on a block device.
 
+if MTD_PARTITIONS
+
 config MTD_REDBOOT_PARTS
 	tristate "RedBoot partition table parsing"
-	depends on MTD_PARTITIONS
 	---help---
 	  RedBoot is a ROM monitor and bootloader which deals with multiple
 	  'images' in flash devices by putting a table one of the erase
@@ -72,9 +73,10 @@
 	  SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
 	  example.
 
+if MTD_REDBOOT_PARTS
+
 config MTD_REDBOOT_DIRECTORY_BLOCK
 	int "Location of RedBoot partition table"
-	depends on MTD_REDBOOT_PARTS
 	default "-1"
 	---help---
 	  This option is the Linux counterpart to the
@@ -91,18 +93,18 @@
 
 config MTD_REDBOOT_PARTS_UNALLOCATED
 	bool "Include unallocated flash regions"
-	depends on MTD_REDBOOT_PARTS
 	help
 	  If you need to register each unallocated flash region as a MTD
 	  'partition', enable this option.
 
 config MTD_REDBOOT_PARTS_READONLY
 	bool "Force read-only for RedBoot system images"
-	depends on MTD_REDBOOT_PARTS
 	help
 	  If you need to force read-only for 'RedBoot', 'RedBoot Config' and
 	  'FIS directory' images, enable this option.
 
+endif # MTD_REDBOOT_PARTS
+
 config MTD_CMDLINE_PARTS
 	bool "Command line partition table parsing"
 	depends on MTD_PARTITIONS = "y" && MTD = "y"
@@ -142,7 +144,7 @@
 
 config MTD_AFS_PARTS
 	tristate "ARM Firmware Suite partition parsing"
-	depends on ARM && MTD_PARTITIONS
+	depends on ARM
 	---help---
 	  The ARM Firmware Suite allows the user to divide flash devices into
 	  multiple 'images'. Each such image has a header containing its name
@@ -158,8 +160,8 @@
 	  example.
 
 config MTD_OF_PARTS
-	tristate "Flash partition map based on OF description"
-	depends on OF && MTD_PARTITIONS
+	def_bool y
+	depends on OF
 	help
 	  This provides a partition parsing function which derives
 	  the partition map from the children of the flash node,
@@ -167,10 +169,11 @@
 
 config MTD_AR7_PARTS
 	tristate "TI AR7 partitioning support"
-	depends on MTD_PARTITIONS
 	---help---
 	  TI AR7 partitioning support
 
+endif # MTD_PARTITIONS
+
 comment "User Modules And Translation Layers"
 
 config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 760abc5..d4e7f25 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -6,13 +6,13 @@
 obj-$(CONFIG_MTD)		+= mtd.o
 mtd-y				:= mtdcore.o mtdsuper.o
 mtd-$(CONFIG_MTD_PARTITIONS)	+= mtdpart.o
+mtd-$(CONFIG_MTD_OF_PARTS)	+= ofpart.o
 
 obj-$(CONFIG_MTD_CONCAT)	+= mtdconcat.o
 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
 obj-$(CONFIG_MTD_AFS_PARTS)	+= afs.o
 obj-$(CONFIG_MTD_AR7_PARTS)	+= ar7part.o
-obj-$(CONFIG_MTD_OF_PARTS)      += ofpart.o
 
 # 'Users' - code which presents functionality to userspace.
 obj-$(CONFIG_MTD_CHAR)		+= mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index ad9268b..4aaa88f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -162,7 +162,7 @@
 #endif
 
 /* Atmel chips don't use the same PRI format as Intel chips */
-static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -202,7 +202,7 @@
 	cfi->cfiq->BufWriteTimeoutMax = 0;
 }
 
-static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
+static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -214,7 +214,7 @@
 
 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
-static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
+static void fixup_intel_strataflash(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -227,7 +227,7 @@
 #endif
 
 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
-static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
+static void fixup_no_write_suspend(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -240,7 +240,7 @@
 }
 #endif
 
-static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
+static void fixup_st_m28w320ct(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -249,7 +249,7 @@
 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
 }
 
-static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
+static void fixup_st_m28w320cb(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -259,7 +259,7 @@
 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 };
 
-static void fixup_use_point(struct mtd_info *mtd, void *param)
+static void fixup_use_point(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	if (!mtd->point && map_is_linear(map)) {
@@ -268,7 +268,7 @@
 	}
 }
 
-static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+static void fixup_use_write_buffers(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@
 /*
  * Some chips power-up with all sectors locked by default.
  */
-static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -295,31 +295,31 @@
 }
 
 static struct cfi_fixup cfi_fixup_table[] = {
-	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
-	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
-	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
+	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
+	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
+	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
 #endif
 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
 #endif
 #if !FORCE_WORD_WRITE
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 #endif
-	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
-	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
-	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
+	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
+	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+	{ 0, 0, NULL }
 };
 
 static struct cfi_fixup jedec_fixup_table[] = {
-	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
+	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
+	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
+	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
+	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
+	{ 0, 0, NULL }
 };
 static struct cfi_fixup fixup_table[] = {
 	/* The CFI vendor ids and the JEDEC vendor IDs appear
@@ -327,8 +327,8 @@
 	 * well.  This table is to pick all cases where
 	 * we know that is the case.
 	 */
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
+	{ 0, 0, NULL }
 };
 
 static void cfi_fixup_major_minor(struct cfi_private *cfi,
@@ -455,6 +455,7 @@
 	mtd->flags   = MTD_CAP_NORFLASH;
 	mtd->name    = map->name;
 	mtd->writesize = 1;
+	mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
 
 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 
@@ -1229,10 +1230,32 @@
 	sleep_time = chip_op_time / 2;
 
 	for (;;) {
+		if (chip->state != chip_state) {
+			/* Someone's suspended the operation: sleep */
+			DECLARE_WAITQUEUE(wait, current);
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			add_wait_queue(&chip->wq, &wait);
+			mutex_unlock(&chip->mutex);
+			schedule();
+			remove_wait_queue(&chip->wq, &wait);
+			mutex_lock(&chip->mutex);
+			continue;
+		}
+
 		status = map_read(map, cmd_adr);
 		if (map_word_andequal(map, status, status_OK, status_OK))
 			break;
 
+		if (chip->erase_suspended && chip_state == FL_ERASING)  {
+			/* Erase suspend occured while sleep: reset timeout */
+			timeo = reset_timeo;
+			chip->erase_suspended = 0;
+		}
+		if (chip->write_suspended && chip_state == FL_WRITING)  {
+			/* Write suspend occured while sleep: reset timeout */
+			timeo = reset_timeo;
+			chip->write_suspended = 0;
+		}
 		if (!timeo) {
 			map_write(map, CMD(0x70), cmd_adr);
 			chip->state = FL_STATUS;
@@ -1256,27 +1279,6 @@
 			timeo--;
 		}
 		mutex_lock(&chip->mutex);
-
-		while (chip->state != chip_state) {
-			/* Someone's suspended the operation: sleep */
-			DECLARE_WAITQUEUE(wait, current);
-			set_current_state(TASK_UNINTERRUPTIBLE);
-			add_wait_queue(&chip->wq, &wait);
-			mutex_unlock(&chip->mutex);
-			schedule();
-			remove_wait_queue(&chip->wq, &wait);
-			mutex_lock(&chip->mutex);
-		}
-		if (chip->erase_suspended && chip_state == FL_ERASING)  {
-			/* Erase suspend occured while sleep: reset timeout */
-			timeo = reset_timeo;
-			chip->erase_suspended = 0;
-		}
-		if (chip->write_suspended && chip_state == FL_WRITING)  {
-			/* Write suspend occured while sleep: reset timeout */
-			timeo = reset_timeo;
-			chip->write_suspended = 0;
-		}
 	}
 
 	/* Done and happy. */
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3b8e32d..f072fcf 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -134,7 +134,7 @@
 
 #ifdef AMD_BOOTLOC_BUG
 /* Wheee. Bring me the head of someone at AMD. */
-static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
+static void fixup_amd_bootblock(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -186,7 +186,7 @@
 }
 #endif
 
-static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+static void fixup_use_write_buffers(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -197,7 +197,7 @@
 }
 
 /* Atmel chips don't use the same PRI format as AMD chips */
-static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -228,14 +228,14 @@
 	cfi->cfiq->BufWriteTimeoutMax = 0;
 }
 
-static void fixup_use_secsi(struct mtd_info *mtd, void *param)
+static void fixup_use_secsi(struct mtd_info *mtd)
 {
 	/* Setup for chips with a secsi area */
 	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
 	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
 }
 
-static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
+static void fixup_use_erase_chip(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -250,7 +250,7 @@
  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
  * locked by default.
  */
-static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
+static void fixup_use_atmel_lock(struct mtd_info *mtd)
 {
 	mtd->lock = cfi_atmel_lock;
 	mtd->unlock = cfi_atmel_unlock;
@@ -271,7 +271,7 @@
 	cfi->cfiq->NumEraseRegions = 1;
 }
 
-static void fixup_sst39vf(struct mtd_info *mtd, void *param)
+static void fixup_sst39vf(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@
 	cfi->addr_unlock2 = 0x2AAA;
 }
 
-static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
+static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -295,12 +295,12 @@
 	cfi->sector_erase_cmd = CMD(0x50);
 }
 
-static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
+static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 
-	fixup_sst39vf_rev_b(mtd, param);
+	fixup_sst39vf_rev_b(mtd);
 
 	/*
 	 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
@@ -310,7 +310,7 @@
 	pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
 }
 
-static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
+static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -321,7 +321,7 @@
 	}
 }
 
-static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
+static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -334,47 +334,47 @@
 
 /* Used to fix CFI-Tables of chips without Extended Query Tables */
 static struct cfi_fixup cfi_nopri_fixup_table[] = {
-	{ CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */
-	{ CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */
-	{ CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */
-	{ CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */
-	{ CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */
-	{ CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */
-	{ CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */
-	{ CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
+	{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
+	{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
+	{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
+	{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
+	{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
+	{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
+	{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
+	{ 0, 0, NULL }
 };
 
 static struct cfi_fixup cfi_fixup_table[] = {
-	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
+	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 #ifdef AMD_BOOTLOC_BUG
-	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
-	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
+	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 #endif
-	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
-	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
-	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
-	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
-	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
-	{ CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */
-	{ CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */
-	{ CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */
-	{ CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */
+	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
+	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
+	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
+	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
+	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
+	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
+	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
+	{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 #if !FORCE_WORD_WRITE
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 #endif
-	{ 0, 0, NULL, NULL }
+	{ 0, 0, NULL }
 };
 static struct cfi_fixup jedec_fixup_table[] = {
-	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
-	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
+	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
+	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
+	{ 0, 0, NULL }
 };
 
 static struct cfi_fixup fixup_table[] = {
@@ -383,18 +383,30 @@
 	 * well.  This table is to pick all cases where
 	 * we know that is the case.
 	 */
-	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
-	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
-	{ 0, 0, NULL, NULL }
+	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
+	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
+	{ 0, 0, NULL }
 };
 
 
 static void cfi_fixup_major_minor(struct cfi_private *cfi,
 				  struct cfi_pri_amdstd *extp)
 {
-	if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
-	    extp->MajorVersion == '0')
-		extp->MajorVersion = '1';
+	if (cfi->mfr == CFI_MFR_SAMSUNG) {
+		if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
+		    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
+			/*
+			 * Samsung K8P2815UQB and K8D6x16UxM chips
+			 * report major=0 / minor=0.
+			 * K8D3x16UxC chips report major=3 / minor=3.
+			 */
+			printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
+			       " Extended Query version to 1.%c\n",
+			       extp->MinorVersion);
+			extp->MajorVersion = '1';
+		}
+	}
+
 	/*
 	 * SST 38VF640x chips report major=0xFF / minor=0xFF.
 	 */
@@ -428,6 +440,10 @@
 	mtd->flags   = MTD_CAP_NORFLASH;
 	mtd->name    = map->name;
 	mtd->writesize = 1;
+	mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
+
+	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
+		__func__, mtd->writebufsize);
 
 	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 314af1f..c04b765 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -238,6 +238,7 @@
 	mtd->resume = cfi_staa_resume;
 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
+	mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
 	map->fldrv = &cfi_staa_chipdrv;
 	__module_get(THIS_MODULE);
 	mtd->name = map->name;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 360525c..6ae3d11 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -156,7 +156,7 @@
 	for (f=fixups; f->fixup; f++) {
 		if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
 		    ((f->id  == CFI_ID_ANY)  || (f->id  == cfi->id))) {
-			f->fixup(mtd, f->param);
+			f->fixup(mtd);
 		}
 	}
 }
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index d180649..5e3cc80 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -98,7 +98,7 @@
 	return ret;
 }
 
-static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param)
+static void fixup_use_fwh_lock(struct mtd_info *mtd)
 {
 	printk(KERN_NOTICE "using fwh lock/unlock method\n");
 	/* Setup for the chips with the fwh lock method */
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index d72a5fb..4e1be51 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1935,14 +1935,14 @@
 }
 
 
-static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
+static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
 {
 	int i,num_erase_regions;
 	uint8_t uaddr;
 
-	if (! (jedec_table[index].devtypes & p_cfi->device_type)) {
+	if (!(jedec_table[index].devtypes & cfi->device_type)) {
 		DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
-		      jedec_table[index].name, 4 * (1<<p_cfi->device_type));
+		      jedec_table[index].name, 4 * (1<<cfi->device_type));
 		return 0;
 	}
 
@@ -1950,27 +1950,28 @@
 
 	num_erase_regions = jedec_table[index].nr_regions;
 
-	p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
-	if (!p_cfi->cfiq) {
+	cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+	if (!cfi->cfiq) {
 		//xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
 		return 0;
 	}
 
-	memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
+	memset(cfi->cfiq, 0, sizeof(struct cfi_ident));
 
-	p_cfi->cfiq->P_ID = jedec_table[index].cmd_set;
-	p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
-	p_cfi->cfiq->DevSize = jedec_table[index].dev_size;
-	p_cfi->cfi_mode = CFI_MODE_JEDEC;
+	cfi->cfiq->P_ID = jedec_table[index].cmd_set;
+	cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
+	cfi->cfiq->DevSize = jedec_table[index].dev_size;
+	cfi->cfi_mode = CFI_MODE_JEDEC;
+	cfi->sector_erase_cmd = CMD(0x30);
 
 	for (i=0; i<num_erase_regions; i++){
-		p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
+		cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
 	}
-	p_cfi->cmdset_priv = NULL;
+	cfi->cmdset_priv = NULL;
 
 	/* This may be redundant for some cases, but it doesn't hurt */
-	p_cfi->mfr = jedec_table[index].mfr_id;
-	p_cfi->id = jedec_table[index].dev_id;
+	cfi->mfr = jedec_table[index].mfr_id;
+	cfi->id = jedec_table[index].dev_id;
 
 	uaddr = jedec_table[index].uaddr;
 
@@ -1978,8 +1979,8 @@
 	   our brains explode when we see the datasheets talking about address
 	   lines numbered from A-1 to A18. The CFI table has unlock addresses
 	   in device-words according to the mode the device is connected in */
-	p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
-	p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
+	cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type;
+	cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type;
 
 	return 1;	/* ok */
 }
@@ -2175,7 +2176,7 @@
 				       "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
 				       __func__, cfi->mfr, cfi->id,
 				       cfi->addr_unlock1, cfi->addr_unlock2 );
-				if (!cfi_jedec_setup(cfi, i))
+				if (!cfi_jedec_setup(map, cfi, i))
 					return 0;
 				goto ok_out;
 			}
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 2cf0cc6..f29a6f9 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -224,7 +224,7 @@
 	if (dev->blkdev) {
 		invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
 					0, -1);
-		close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
+		blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 	}
 
 	kfree(dev);
@@ -234,6 +234,7 @@
 /* FIXME: ensure that mtd->size % erase_size == 0 */
 static struct block2mtd_dev *add_device(char *devname, int erase_size)
 {
+	const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
 	struct block_device *bdev;
 	struct block2mtd_dev *dev;
 	char *name;
@@ -246,7 +247,7 @@
 		return NULL;
 
 	/* Get a handle on the device */
-	bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL);
+	bdev = blkdev_get_by_path(devname, mode, dev);
 #ifndef MODULE
 	if (IS_ERR(bdev)) {
 
@@ -254,9 +255,8 @@
 		   to resolve the device name by other means. */
 
 		dev_t devt = name_to_dev_t(devname);
-		if (devt) {
-			bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
-		}
+		if (devt)
+			bdev = blkdev_get_by_dev(devt, mode, dev);
 	}
 #endif
 
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index bf5a002..e4eba6c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -51,6 +51,10 @@
 #define	OPCODE_WRDI		0x04	/* Write disable */
 #define	OPCODE_AAI_WP		0xad	/* Auto address increment word program */
 
+/* Used for Macronix flashes only. */
+#define	OPCODE_EN4B		0xb7	/* Enter 4-byte mode */
+#define	OPCODE_EX4B		0xe9	/* Exit 4-byte mode */
+
 /* Status Register bits. */
 #define	SR_WIP			1	/* Write in progress */
 #define	SR_WEL			2	/* Write enable latch */
@@ -62,7 +66,7 @@
 
 /* Define max times to check status register before we give up. */
 #define	MAX_READY_WAIT_JIFFIES	(40 * HZ)	/* M25P16 specs 40s max chip erase */
-#define	MAX_CMD_SIZE		4
+#define	MAX_CMD_SIZE		5
 
 #ifdef CONFIG_M25PXX_USE_FAST_READ
 #define OPCODE_READ 	OPCODE_FAST_READ
@@ -152,6 +156,16 @@
 }
 
 /*
+ * Enable/disable 4-byte addressing mode.
+ */
+static inline int set_4byte(struct m25p *flash, int enable)
+{
+	u8	code = enable ? OPCODE_EN4B : OPCODE_EX4B;
+
+	return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
+}
+
+/*
  * Service routine to read status register until ready, or timeout occurs.
  * Returns non-zero if error.
  */
@@ -207,6 +221,7 @@
 	cmd[1] = addr >> (flash->addr_width * 8 -  8);
 	cmd[2] = addr >> (flash->addr_width * 8 - 16);
 	cmd[3] = addr >> (flash->addr_width * 8 - 24);
+	cmd[4] = addr >> (flash->addr_width * 8 - 32);
 }
 
 static int m25p_cmdsz(struct m25p *flash)
@@ -482,6 +497,10 @@
 	size_t actual;
 	int cmd_sz, ret;
 
+	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
+			dev_name(&flash->spi->dev), __func__, "to",
+			(u32)to, len);
+
 	*retlen = 0;
 
 	/* sanity checks */
@@ -607,7 +626,6 @@
 		.sector_size = (_sector_size),				\
 		.n_sectors = (_n_sectors),				\
 		.page_size = 256,					\
-		.addr_width = 3,					\
 		.flags = (_flags),					\
 	})
 
@@ -635,7 +653,7 @@
 	{ "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
 	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
 	{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
-	{ "at26df321",  INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
 
 	/* EON -- en25pxx */
 	{ "en25p32", INFO(0x1c2016, 0, 64 * 1024,  64, 0) },
@@ -653,6 +671,8 @@
 	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, 0) },
 	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
 	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
 
 	/* Spansion -- single (large) sector size only, at least
 	 * for the chips listed here (without boot sectors).
@@ -764,6 +784,7 @@
 			return &m25p_ids[tmp];
 		}
 	}
+	dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
 	return ERR_PTR(-ENODEV);
 }
 
@@ -883,7 +904,17 @@
 
 	flash->mtd.dev.parent = &spi->dev;
 	flash->page_size = info->page_size;
-	flash->addr_width = info->addr_width;
+
+	if (info->addr_width)
+		flash->addr_width = info->addr_width;
+	else {
+		/* enable 4-byte addressing if the device exceeds 16MiB */
+		if (flash->mtd.size > 0x1000000) {
+			flash->addr_width = 4;
+			set_4byte(flash, 1);
+		} else
+			flash->addr_width = 3;
+	}
 
 	dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
 			(long long)flash->mtd.size >> 10);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 684247a..c163e61 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -335,7 +335,7 @@
 	return ret;
 }
 
-static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
+static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
 {
 	struct flash_info *flash_info = NULL;
 	struct spi_message m;
@@ -375,7 +375,7 @@
 	return flash_info;
 }
 
-static int __init sst25l_probe(struct spi_device *spi)
+static int __devinit sst25l_probe(struct spi_device *spi)
 {
 	struct flash_info *flash_info;
 	struct sst25l_flash *flash;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 19fe92d..92de7e3 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -149,11 +149,9 @@
 	if (request_resource(&iomem_resource, &window->rsrc)) {
 		window->rsrc.parent = NULL;
 		printk(KERN_ERR MOD_NAME
-			" %s(): Unable to register resource"
-			" 0x%.16llx-0x%.16llx - kernel bug?\n",
-			__func__,
-			(unsigned long long)window->rsrc.start,
-			(unsigned long long)window->rsrc.end);
+		       " %s(): Unable to register resource %pR - kernel bug?\n",
+		       __func__, &window->rsrc);
+		return -EBUSY;
 	}
 
 
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index d175c12..1f30495 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -196,10 +196,15 @@
 	bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
 	if (!bcm963xx_mtd_info) {
 		dev_err(&pdev->dev, "failed to probe using CFI\n");
+		bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
+		if (bcm963xx_mtd_info)
+			goto probe_ok;
+		dev_err(&pdev->dev, "failed to probe using JEDEC\n");
 		err = -EIO;
 		goto err_probe;
 	}
 
+probe_ok:
 	bcm963xx_mtd_info->owner = THIS_MODULE;
 
 	/* This is mutually exclusive */
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index ddb462b..5fdb7b2 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -178,11 +178,8 @@
 	if (request_resource(&iomem_resource, &window->rsrc)) {
 		window->rsrc.parent = NULL;
 		printk(KERN_ERR MOD_NAME
-			" %s(): Unable to register resource"
-			" 0x%.016llx-0x%.016llx - kernel bug?\n",
-			__func__,
-			(unsigned long long)window->rsrc.start,
-			(unsigned long long)window->rsrc.end);
+		       " %s(): Unable to register resource %pR - kernel bug?\n",
+			__func__, &window->rsrc);
 	}
 
 
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index d12c93d..4feb750 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -242,12 +242,9 @@
 	window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 	if (request_resource(&iomem_resource, &window->rsrc)) {
 		window->rsrc.parent = NULL;
-		printk(KERN_DEBUG MOD_NAME
-			": %s(): Unable to register resource"
-			" 0x%.08llx-0x%.08llx - kernel bug?\n",
-			__func__,
-			(unsigned long long)window->rsrc.start,
-			(unsigned long long)window->rsrc.end);
+		printk(KERN_DEBUG MOD_NAME ": "
+		       "%s(): Unable to register resource %pR - kernel bug?\n",
+			__func__, &window->rsrc);
 	}
 
 	/* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index f102bf2..1337a41 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -175,12 +175,9 @@
 	window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 	if (request_resource(&iomem_resource, &window->rsrc)) {
 		window->rsrc.parent = NULL;
-		printk(KERN_DEBUG MOD_NAME
-			": %s(): Unable to register resource"
-			" 0x%.16llx-0x%.16llx - kernel bug?\n",
-			__func__,
-			(unsigned long long)window->rsrc.start,
-			(unsigned long long)window->rsrc.end);
+		printk(KERN_DEBUG MOD_NAME ": "
+		       "%s(): Unable to register resource %pR - kernel bug?\n",
+		       __func__, &window->rsrc);
 	}
 
 	/* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 9861814..8506578 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -274,9 +274,7 @@
 			continue;
 		}
 
-		dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
-			(unsigned long long)res.start,
-			(unsigned long long)res.end);
+		dev_dbg(&dev->dev, "of_flash device: %pR\n", &res);
 
 		err = -EBUSY;
 		res_size = resource_size(&res);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index b5391eb..027e628 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -166,9 +166,8 @@
 		outl(pmr, scx200_cb_base + SCx200_PMR);
 	}
 
-       	printk(KERN_INFO NAME ": DOCCS mapped at 0x%llx-0x%llx, width %d\n",
-			(unsigned long long)docmem.start,
-			(unsigned long long)docmem.end, width);
+	printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
+	       &docmem, width);
 
 	scx200_docflash_map.size = size;
 	if (width == 8)
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 6014698..c08e140 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -139,7 +139,7 @@
 			goto error_mem;
 		}
 
-		map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
+		map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
 
 		if (!map_banks[idx]->name) {
 			ret = -ENOMEM;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index cb20c67..e0a2373 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -413,7 +413,6 @@
 error2:
 	list_del(&new->list);
 error1:
-	kfree(new);
 	return ret;
 }
 
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f511dd1..145b3d0d 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -522,10 +522,6 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	/* Only master mtd device must be used to control partitions */
-	if (!mtd_is_master(mtd))
-		return -EINVAL;
-
 	if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
 		return -EFAULT;
 
@@ -535,6 +531,10 @@
 	switch (a.op) {
 	case BLKPG_ADD_PARTITION:
 
+		/* Only master mtd device must be used to add partitions */
+		if (mtd_is_partition(mtd))
+			return -EINVAL;
+
 		return mtd_add_partition(mtd, p.devname, p.start, p.length);
 
 	case BLKPG_DEL_PARTITION:
@@ -601,6 +601,7 @@
 	}
 
 	case MEMGETINFO:
+		memset(&info, 0, sizeof(info));
 		info.type	= mtd->type;
 		info.flags	= mtd->flags;
 		info.size	= mtd->size;
@@ -609,7 +610,6 @@
 		info.oobsize	= mtd->oobsize;
 		/* The below fields are obsolete */
 		info.ecctype	= -1;
-		info.eccsize	= 0;
 		if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
 			return -EFAULT;
 		break;
@@ -1134,7 +1134,7 @@
 static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
 				int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC);
+	return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
 }
 
 static struct file_system_type mtd_inodefs_type = {
@@ -1201,7 +1201,7 @@
 static void __exit cleanup_mtdchar(void)
 {
 	unregister_mtd_user(&mtdchar_notifier);
-	mntput_long(mtd_inode_mnt);
+	mntput(mtd_inode_mnt);
 	unregister_filesystem(&mtd_inodefs_type);
 	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
 }
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index bf8de09..5f5777b 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -776,6 +776,7 @@
 	concat->mtd.size = subdev[0]->size;
 	concat->mtd.erasesize = subdev[0]->erasesize;
 	concat->mtd.writesize = subdev[0]->writesize;
+	concat->mtd.writebufsize = subdev[0]->writebufsize;
 	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
 	concat->mtd.oobsize = subdev[0]->oobsize;
 	concat->mtd.oobavail = subdev[0]->oobavail;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1ee72f3..e3e40f4 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -307,6 +307,11 @@
 	unsigned long l1_cpy, l2_cpy;
 	char *dst;
 
+	if (reason != KMSG_DUMP_OOPS &&
+	    reason != KMSG_DUMP_PANIC &&
+	    reason != KMSG_DUMP_KEXEC)
+		return;
+
 	/* Only dump oopses if dump_oops is set */
 	if (reason == KMSG_DUMP_OOPS && !dump_oops)
 		return;
@@ -396,7 +401,8 @@
 		printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
 
 	cxt->mtd = NULL;
-	flush_scheduled_work();
+	flush_work_sync(&cxt->work_erase);
+	flush_work_sync(&cxt->work_write);
 }
 
 
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 79e3689..0a47601 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -120,8 +120,25 @@
 		return -EINVAL;
 	if (ops->datbuf && from + ops->len > mtd->size)
 		return -EINVAL;
-	res = part->master->read_oob(part->master, from + part->offset, ops);
 
+	/*
+	 * If OOB is also requested, make sure that we do not read past the end
+	 * of this partition.
+	 */
+	if (ops->oobbuf) {
+		size_t len, pages;
+
+		if (ops->mode == MTD_OOB_AUTO)
+			len = mtd->oobavail;
+		else
+			len = mtd->oobsize;
+		pages = mtd_div_by_ws(mtd->size, mtd);
+		pages -= mtd_div_by_ws(from, mtd);
+		if (ops->ooboffs + ops->ooblen > pages * len)
+			return -EINVAL;
+	}
+
+	res = part->master->read_oob(part->master, from + part->offset, ops);
 	if (unlikely(res)) {
 		if (res == -EUCLEAN)
 			mtd->ecc_stats.corrected++;
@@ -384,6 +401,7 @@
 	slave->mtd.flags = master->flags & ~part->mask_flags;
 	slave->mtd.size = part->size;
 	slave->mtd.writesize = master->writesize;
+	slave->mtd.writebufsize = master->writebufsize;
 	slave->mtd.oobsize = master->oobsize;
 	slave->mtd.oobavail = master->oobavail;
 	slave->mtd.subpage_sft = master->subpage_sft;
@@ -720,19 +738,19 @@
 }
 EXPORT_SYMBOL_GPL(parse_mtd_partitions);
 
-int mtd_is_master(struct mtd_info *mtd)
+int mtd_is_partition(struct mtd_info *mtd)
 {
 	struct mtd_part *part;
-	int nopart = 0;
+	int ispart = 0;
 
 	mutex_lock(&mtd_partitions_mutex);
 	list_for_each_entry(part, &mtd_partitions, list)
 		if (&part->mtd == mtd) {
-			nopart = 1;
+			ispart = 1;
 			break;
 		}
 	mutex_unlock(&mtd_partitions_mutex);
 
-	return nopart;
+	return ispart;
 }
-EXPORT_SYMBOL_GPL(mtd_is_master);
+EXPORT_SYMBOL_GPL(mtd_is_partition);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8229802..c895922 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -96,6 +96,7 @@
 config MTD_NAND_AMS_DELTA
 	tristate "NAND Flash device on Amstrad E3"
 	depends on MACH_AMS_DELTA
+	default y
 	help
 	  Support for NAND flash on Amstrad E3 (Delta).
 
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 2548e10..a067d09 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -4,6 +4,8 @@
  *  Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
  *
  *  Derived from drivers/mtd/toto.c
+ *  Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *  Partially stolen from drivers/mtd/nand/plat_nand.c
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -62,9 +64,10 @@
 static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
 {
 	struct nand_chip *this = mtd->priv;
+	void __iomem *io_base = this->priv;
 
-	omap_writew(0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
-	omap_writew(byte, this->IO_ADDR_W);
+	writew(0, io_base + OMAP_MPUIO_IO_CNTL);
+	writew(byte, this->IO_ADDR_W);
 	ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
 	ndelay(40);
 	ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
@@ -75,11 +78,12 @@
 {
 	u_char res;
 	struct nand_chip *this = mtd->priv;
+	void __iomem *io_base = this->priv;
 
 	ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
 	ndelay(40);
-	omap_writew(~0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
-	res = omap_readw(this->IO_ADDR_R);
+	writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
+	res = readw(this->IO_ADDR_R);
 	ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
 			       AMS_DELTA_LATCH2_NAND_NRE);
 
@@ -151,11 +155,16 @@
 /*
  * Main initialization routine
  */
-static int __init ams_delta_init(void)
+static int __devinit ams_delta_init(struct platform_device *pdev)
 {
 	struct nand_chip *this;
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	void __iomem *io_base;
 	int err = 0;
 
+	if (!res)
+		return -ENXIO;
+
 	/* Allocate memory for MTD device structure and private data */
 	ams_delta_mtd = kmalloc(sizeof(struct mtd_info) +
 				sizeof(struct nand_chip), GFP_KERNEL);
@@ -177,9 +186,25 @@
 	/* Link the private data with the MTD structure */
 	ams_delta_mtd->priv = this;
 
+	if (!request_mem_region(res->start, resource_size(res),
+			dev_name(&pdev->dev))) {
+		dev_err(&pdev->dev, "request_mem_region failed\n");
+		err = -EBUSY;
+		goto out_free;
+	}
+
+	io_base = ioremap(res->start, resource_size(res));
+	if (io_base == NULL) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		err = -EIO;
+		goto out_release_io;
+	}
+
+	this->priv = io_base;
+
 	/* Set address of NAND IO lines */
-	this->IO_ADDR_R = (OMAP1_MPUIO_BASE + OMAP_MPUIO_INPUT_LATCH);
-	this->IO_ADDR_W = (OMAP1_MPUIO_BASE + OMAP_MPUIO_OUTPUT);
+	this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
+	this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
 	this->read_byte = ams_delta_read_byte;
 	this->write_buf = ams_delta_write_buf;
 	this->read_buf = ams_delta_read_buf;
@@ -195,6 +220,8 @@
 	this->chip_delay = 30;
 	this->ecc.mode = NAND_ECC_SOFT;
 
+	platform_set_drvdata(pdev, io_base);
+
 	/* Set chip enabled, but  */
 	ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
 					  AMS_DELTA_LATCH2_NAND_NWE |
@@ -214,25 +241,56 @@
 	goto out;
 
  out_mtd:
+	platform_set_drvdata(pdev, NULL);
+	iounmap(io_base);
+out_release_io:
+	release_mem_region(res->start, resource_size(res));
+out_free:
 	kfree(ams_delta_mtd);
  out:
 	return err;
 }
 
-module_init(ams_delta_init);
-
 /*
  * Clean up routine
  */
-static void __exit ams_delta_cleanup(void)
+static int __devexit ams_delta_cleanup(struct platform_device *pdev)
 {
+	void __iomem *io_base = platform_get_drvdata(pdev);
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
 	/* Release resources, unregister device */
 	nand_release(ams_delta_mtd);
 
+	iounmap(io_base);
+	release_mem_region(res->start, resource_size(res));
+
 	/* Free the MTD device structure */
 	kfree(ams_delta_mtd);
+
+	return 0;
 }
-module_exit(ams_delta_cleanup);
+
+static struct platform_driver ams_delta_nand_driver = {
+	.probe		= ams_delta_init,
+	.remove		= __devexit_p(ams_delta_cleanup),
+	.driver		= {
+		.name	= "ams-delta-nand",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init ams_delta_nand_init(void)
+{
+	return platform_driver_register(&ams_delta_nand_driver);
+}
+module_init(ams_delta_nand_init);
+
+static void __exit ams_delta_nand_exit(void)
+{
+	platform_driver_unregister(&ams_delta_nand_driver);
+}
+module_exit(ams_delta_nand_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index c141b07..7a13d42 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -388,6 +388,8 @@
 		         "page_addr: 0x%x, column: 0x%x.\n",
 		         page_addr, column);
 
+		elbc_fcm_ctrl->column = column;
+		elbc_fcm_ctrl->oob = 0;
 		elbc_fcm_ctrl->use_mdr = 1;
 
 		fcr = (NAND_CMD_STATUS   << FCR_CMD1_SHIFT) |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 02edfba..205b10b 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/mtd/fsmc.h>
+#include <linux/amba/bus.h>
 #include <mtd/mtd-abi.h>
 
 static struct nand_ecclayout fsmc_ecc1_layout = {
@@ -119,21 +120,36 @@
 	}
 };
 
+
+#ifdef CONFIG_MTD_PARTITIONS
 /*
  * Default partition tables to be used if the partition information not
- * provided through platform data
- */
-#define PARTITION(n, off, sz)	{.name = n, .offset = off, .size = sz}
-
-/*
+ * provided through platform data.
+ *
  * Default partition layout for small page(= 512 bytes) devices
  * Size for "Root file system" is updated in driver based on actual device size
  */
 static struct mtd_partition partition_info_16KB_blk[] = {
-	PARTITION("X-loader", 0, 4 * 0x4000),
-	PARTITION("U-Boot", 0x10000, 20 * 0x4000),
-	PARTITION("Kernel", 0x60000, 256 * 0x4000),
-	PARTITION("Root File System", 0x460000, 0),
+	{
+		.name = "X-loader",
+		.offset = 0,
+		.size = 4*0x4000,
+	},
+	{
+		.name = "U-Boot",
+		.offset = 0x10000,
+		.size = 20*0x4000,
+	},
+	{
+		.name = "Kernel",
+		.offset = 0x60000,
+		.size = 256*0x4000,
+	},
+	{
+		.name = "Root File System",
+		.offset = 0x460000,
+		.size = 0,
+	},
 };
 
 /*
@@ -141,19 +157,37 @@
  * Size for "Root file system" is updated in driver based on actual device size
  */
 static struct mtd_partition partition_info_128KB_blk[] = {
-	PARTITION("X-loader", 0, 4 * 0x20000),
-	PARTITION("U-Boot", 0x80000, 12 * 0x20000),
-	PARTITION("Kernel", 0x200000, 48 * 0x20000),
-	PARTITION("Root File System", 0x800000, 0),
+	{
+		.name = "X-loader",
+		.offset = 0,
+		.size = 4*0x20000,
+	},
+	{
+		.name = "U-Boot",
+		.offset = 0x80000,
+		.size = 12*0x20000,
+	},
+	{
+		.name = "Kernel",
+		.offset = 0x200000,
+		.size = 48*0x20000,
+	},
+	{
+		.name = "Root File System",
+		.offset = 0x800000,
+		.size = 0,
+	},
 };
 
 #ifdef CONFIG_MTD_CMDLINE_PARTS
 const char *part_probes[] = { "cmdlinepart", NULL };
 #endif
+#endif
 
 /**
- * struct fsmc_nand_data - atructure for FSMC NAND device state
+ * struct fsmc_nand_data - structure for FSMC NAND device state
  *
+ * @pid:		Part ID on the AMBA PrimeCell format
  * @mtd:		MTD info for a NAND flash.
  * @nand:		Chip related info for a NAND flash.
  * @partitions:		Partition info for a NAND Flash.
@@ -169,6 +203,7 @@
  * @regs_va:		FSMC regs base address.
  */
 struct fsmc_nand_data {
+	u32			pid;
 	struct mtd_info		mtd;
 	struct nand_chip	nand;
 	struct mtd_partition	*partitions;
@@ -508,7 +543,9 @@
 	struct nand_chip *nand;
 	struct fsmc_regs *regs;
 	struct resource *res;
-	int nr_parts, ret = 0;
+	int ret = 0;
+	u32 pid;
+	int i;
 
 	if (!pdata) {
 		dev_err(&pdev->dev, "platform data is NULL\n");
@@ -598,6 +635,18 @@
 	if (ret)
 		goto err_probe1;
 
+	/*
+	 * This device ID is actually a common AMBA ID as used on the
+	 * AMBA PrimeCell bus. However it is not a PrimeCell.
+	 */
+	for (pid = 0, i = 0; i < 4; i++)
+		pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
+	host->pid = pid;
+	dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
+		 "revision %02x, config %02x\n",
+		 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
+		 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
+
 	host->bank = pdata->bank;
 	host->select_chip = pdata->select_bank;
 	regs = host->regs_va;
@@ -625,7 +674,7 @@
 
 	fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
 
-	if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+	if (AMBA_REV_BITS(host->pid) >= 8) {
 		nand->ecc.read_page = fsmc_read_page_hwecc;
 		nand->ecc.calculate = fsmc_read_hwecc_ecc4;
 		nand->ecc.correct = fsmc_correct_data;
@@ -645,7 +694,7 @@
 		goto err_probe;
 	}
 
-	if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+	if (AMBA_REV_BITS(host->pid) >= 8) {
 		if (host->mtd.writesize == 512) {
 			nand->ecc.layout = &fsmc_ecc4_sp_layout;
 			host->ecc_place = &fsmc_ecc4_sp_place;
@@ -676,11 +725,9 @@
 	 * Check if partition info passed via command line
 	 */
 	host->mtd.name = "nand";
-	nr_parts = parse_mtd_partitions(&host->mtd, part_probes,
+	host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
 			&host->partitions, 0);
-	if (nr_parts > 0) {
-		host->nr_partitions = nr_parts;
-	} else {
+	if (host->nr_partitions <= 0) {
 #endif
 		/*
 		 * Check if partition info passed via command line
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 67343fc..cea38a5 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,58 +251,6 @@
 	return 0;
 }
 
-
-/* Copy paste of nand_read_page_hwecc_oob_first except for different eccpos
- * handling. The ecc area is for 4k chips 72 bytes long and thus does not fit
- * into the eccpos array. */
-static int jz_nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
-	struct nand_chip *chip, uint8_t *buf, int page)
-{
-	int i, eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *p = buf;
-	unsigned int ecc_offset = chip->page_shift;
-
-	/* Read the OOB area first */
-	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
-	for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		int stat;
-
-		chip->ecc.hwctl(mtd, NAND_ECC_READ);
-		chip->read_buf(mtd, p, eccsize);
-
-		stat = chip->ecc.correct(mtd, p, &chip->oob_poi[i], NULL);
-		if (stat < 0)
-			mtd->ecc_stats.failed++;
-		else
-			mtd->ecc_stats.corrected += stat;
-	}
-	return 0;
-}
-
-/* Copy-and-paste of nand_write_page_hwecc with different eccpos handling. */
-static void jz_nand_write_page_hwecc(struct mtd_info *mtd,
-	struct nand_chip *chip, const uint8_t *buf)
-{
-	int i, eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	const uint8_t *p = buf;
-	unsigned int ecc_offset = chip->page_shift;
-
-	for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
-		chip->write_buf(mtd, p, eccsize);
-		chip->ecc.calculate(mtd, p, &chip->oob_poi[i]);
-	}
-
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-}
-
 #ifdef CONFIG_MTD_CMDLINE_PARTS
 static const char *part_probes[] = {"cmdline", NULL};
 #endif
@@ -393,9 +341,6 @@
 	chip->ecc.size		= 512;
 	chip->ecc.bytes		= 9;
 
-	chip->ecc.read_page	= jz_nand_read_page_hwecc_oob_first;
-	chip->ecc.write_page	= jz_nand_write_page_hwecc;
-
 	if (pdata)
 		chip->ecc.layout = pdata->ecc_layout;
 
@@ -489,7 +434,7 @@
 	return 0;
 }
 
-struct platform_driver jz_nand_driver = {
+static struct platform_driver jz_nand_driver = {
 	.probe = jz_nand_probe,
 	.remove = __devexit_p(jz_nand_remove),
 	.driver = {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 214b03a..ef932ba 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1009,7 +1009,7 @@
 	struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
 	struct mxc_nand_host *host;
 	struct resource *res;
-	int err = 0, nr_parts = 0;
+	int err = 0, __maybe_unused nr_parts = 0;
 	struct nand_ecclayout *oob_smallpage, *oob_largepage;
 
 	/* Allocate memory for MTD device structure and private data */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 1f75a1b..a9c6ce7 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -821,7 +821,7 @@
  *
  * Wait for command done. This is a helper function for nand_wait used when
  * we are in interrupt context. May happen when in panic and trying to write
- * an oops trough mtdoops.
+ * an oops through mtdoops.
  */
 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
 			    unsigned long timeo)
@@ -2865,20 +2865,24 @@
 
 	/* check version */
 	val = le16_to_cpu(p->revision);
-	if (val == 1 || val > (1 << 4)) {
-		printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
-								__func__, val);
-		return 0;
-	}
-
-	if (val & (1 << 4))
+	if (val & (1 << 5))
+		chip->onfi_version = 23;
+	else if (val & (1 << 4))
 		chip->onfi_version = 22;
 	else if (val & (1 << 3))
 		chip->onfi_version = 21;
 	else if (val & (1 << 2))
 		chip->onfi_version = 20;
-	else
+	else if (val & (1 << 1))
 		chip->onfi_version = 10;
+	else
+		chip->onfi_version = 0;
+
+	if (!chip->onfi_version) {
+		printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
+								__func__, val);
+		return 0;
+	}
 
 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
 	sanitize_string(p->model, sizeof(p->model));
@@ -2887,7 +2891,7 @@
 	mtd->writesize = le32_to_cpu(p->byte_per_page);
 	mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-	chip->chipsize = le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
+	chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
 	busw = 0;
 	if (le16_to_cpu(p->features) & 1)
 		busw = NAND_BUSWIDTH_16;
@@ -3157,7 +3161,7 @@
 	printk(KERN_INFO "NAND device: Manufacturer ID:"
 		" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
 		nand_manuf_ids[maf_idx].name,
-	chip->onfi_version ? type->name : chip->onfi_params.model);
+		chip->onfi_version ? chip->onfi_params.model : type->name);
 
 	return type;
 }
@@ -3435,6 +3439,7 @@
 	mtd->resume = nand_resume;
 	mtd->block_isbad = nand_block_isbad;
 	mtd->block_markbad = nand_block_markbad;
+	mtd->writebufsize = mtd->writesize;
 
 	/* propagate ecc.layout to mtd_info */
 	mtd->ecclayout = chip->ecc.layout;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 586b981..6ebd869 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1092,7 +1092,8 @@
 
 /**
  * verify_bbt_descr - verify the bad block description
- * @bd:			the table to verify
+ * @mtd:	MTD device structure
+ * @bd:		the table to verify
  *
  * This functions performs a few sanity checks on the bad block description
  * table.
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a6a73aa..a5aa99f 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -210,12 +210,12 @@
 #define STATE_CMD_READ0        0x00000001 /* read data from the beginning of page */
 #define STATE_CMD_READ1        0x00000002 /* read data from the second half of page */
 #define STATE_CMD_READSTART    0x00000003 /* read data second command (large page devices) */
-#define STATE_CMD_PAGEPROG     0x00000004 /* start page programm */
+#define STATE_CMD_PAGEPROG     0x00000004 /* start page program */
 #define STATE_CMD_READOOB      0x00000005 /* read OOB area */
 #define STATE_CMD_ERASE1       0x00000006 /* sector erase first command */
 #define STATE_CMD_STATUS       0x00000007 /* read status */
 #define STATE_CMD_STATUS_M     0x00000008 /* read multi-plane status (isn't implemented) */
-#define STATE_CMD_SEQIN        0x00000009 /* sequential data imput */
+#define STATE_CMD_SEQIN        0x00000009 /* sequential data input */
 #define STATE_CMD_READID       0x0000000A /* read ID */
 #define STATE_CMD_ERASE2       0x0000000B /* sector erase second command */
 #define STATE_CMD_RESET        0x0000000C /* reset */
@@ -230,7 +230,7 @@
 #define STATE_ADDR_ZERO        0x00000040 /* one byte zero address was accepted */
 #define STATE_ADDR_MASK        0x00000070 /* address states mask */
 
-/* Durind data input/output the simulator is in these states */
+/* During data input/output the simulator is in these states */
 #define STATE_DATAIN           0x00000100 /* waiting for data input */
 #define STATE_DATAIN_MASK      0x00000100 /* data input states mask */
 
@@ -248,7 +248,7 @@
 
 /* Simulator's actions bit masks */
 #define ACTION_CPY       0x00100000 /* copy page/OOB to the internal buffer */
-#define ACTION_PRGPAGE   0x00200000 /* programm the internal buffer to flash */
+#define ACTION_PRGPAGE   0x00200000 /* program the internal buffer to flash */
 #define ACTION_SECERASE  0x00300000 /* erase sector */
 #define ACTION_ZEROOFF   0x00400000 /* don't add any offset to address */
 #define ACTION_HALFOFF   0x00500000 /* add to address half of page */
@@ -263,18 +263,18 @@
 #define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
 #define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
 #define OPT_SMARTMEDIA   0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR     0x00000020 /* page number auto inctimentation is possible */
+#define OPT_AUTOINCR     0x00000020 /* page number auto incrementation is possible */
 #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
 #define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
 #define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
 #define OPT_SMALLPAGE    (OPT_PAGE256  | OPT_PAGE512)  /* 256 and 512-byte page chips */
 
-/* Remove action bits ftom state */
+/* Remove action bits from state */
 #define NS_STATE(x) ((x) & ~ACTION_MASK)
 
 /*
  * Maximum previous states which need to be saved. Currently saving is
- * only needed for page programm operation with preceeded read command
+ * only needed for page program operation with preceded read command
  * (which is only valid for 512-byte pages).
  */
 #define NS_MAX_PREVSTATES 1
@@ -380,16 +380,16 @@
 	/* Read OOB */
 	{OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
 			STATE_DATAOUT, STATE_READY}},
-	/* Programm page starting from the beginning */
+	/* Program page starting from the beginning */
 	{OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
 			STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Programm page starting from the beginning */
+	/* Program page starting from the beginning */
 	{OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
 			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Programm page starting from the second half */
+	/* Program page starting from the second half */
 	{OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
 			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Programm OOB */
+	/* Program OOB */
 	{OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
 			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
 	/* Erase sector */
@@ -470,7 +470,7 @@
 			err = -EINVAL;
 			goto err_close;
 		}
-		ns->pages_written = vmalloc(ns->geom.pgnum);
+		ns->pages_written = vzalloc(ns->geom.pgnum);
 		if (!ns->pages_written) {
 			NS_ERR("alloc_device: unable to allocate pages written array\n");
 			err = -ENOMEM;
@@ -483,7 +483,6 @@
 			goto err_free;
 		}
 		ns->cfile = cfile;
-		memset(ns->pages_written, 0, ns->geom.pgnum);
 		return 0;
 	}
 
@@ -1171,9 +1170,9 @@
  * of supported operations.
  *
  * Operation can be unknown because of the following.
- *   1. New command was accepted and this is the firs call to find the
+ *   1. New command was accepted and this is the first call to find the
  *      correspondent states chain. In this case ns->npstates = 0;
- *   2. There is several operations which begin with the same command(s)
+ *   2. There are several operations which begin with the same command(s)
  *      (for example program from the second half and read from the
  *      second half operations both begin with the READ1 command). In this
  *      case the ns->pstates[] array contains previous states.
@@ -1186,7 +1185,7 @@
  * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
  * zeroed).
  *
- * If there are several maches, the current state is pushed to the
+ * If there are several matches, the current state is pushed to the
  * ns->pstates.
  *
  * The operation can be unknown only while commands are input to the chip.
@@ -1195,10 +1194,10 @@
  * operation is searched using the following pattern:
  *     ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
  *
- * It is supposed that this pattern must either match one operation on
+ * It is supposed that this pattern must either match one operation or
  * none. There can't be ambiguity in that case.
  *
- * If no matches found, the functions does the following:
+ * If no matches found, the function does the following:
  *   1. if there are saved states present, try to ignore them and search
  *      again only using the last command. If nothing was found, switch
  *      to the STATE_READY state.
@@ -1668,7 +1667,7 @@
 
 	case ACTION_PRGPAGE:
 		/*
-		 * Programm page - move internal buffer data to the page.
+		 * Program page - move internal buffer data to the page.
 		 */
 
 		if (ns->lines.wp) {
@@ -1933,7 +1932,7 @@
 		NS_DBG("read_byte: all bytes were read\n");
 
 		/*
-		 * The OPT_AUTOINCR allows to read next conseqitive pages without
+		 * The OPT_AUTOINCR allows to read next consecutive pages without
 		 * new read operation cycle.
 		 */
 		if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 15682ec..28af71c 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -968,6 +968,6 @@
 module_init(omap_nand_init);
 module_exit(omap_nand_exit);
 
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 6ddb246..bb277a5 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -107,7 +107,7 @@
 	if (pasemi_nand_mtd)
 		return -ENODEV;
 
-	pr_debug("pasemi_nand at %llx-%llx\n", res.start, res.end);
+	pr_debug("pasemi_nand at %pR\n", &res);
 
 	/* Allocate memory for MTD device structure and private data */
 	pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 17f8518..ea2c288 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -885,6 +885,7 @@
 	/* set info fields needed to __readid */
 	info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
 	info->reg_ndcr = ndcr;
+	info->cmdset = &default_cmdset;
 
 	if (__readid(info, &id))
 		return -ENODEV;
@@ -915,7 +916,6 @@
 
 	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
 	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
-	info->cmdset = &default_cmdset;
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efb..6322d1f 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@
 
 	init_completion(&dev->dma_done);
 
-	dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+	dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
 
 	if (!dev->card_workqueue)
 		goto error9;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 054a41c..ca270a4 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -277,8 +277,9 @@
 	ret = nand_scan_ident(mtd, 1, NULL);
 	if (!ret) {
 		if (mtd->writesize >= 512) {
-			chip->ecc.size = mtd->writesize;
-			chip->ecc.bytes = 3 * (mtd->writesize / 256);
+			/* Hardware ECC 6 byte ECC per 512 Byte data */
+			chip->ecc.size = 512;
+			chip->ecc.bytes = 6;
 		}
 		ret = nand_scan_tail(mtd);
 	}
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index e789149..ac08750 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -131,7 +131,7 @@
 	.remove		= __devexit_p(generic_onenand_remove),
 };
 
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
 
 static int __init generic_onenand_init(void)
 {
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index d0894ca7..c849cac 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -35,6 +35,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/regulator/consumer.h>
 
 #include <asm/mach/flash.h>
 #include <plat/gpmc.h>
@@ -63,8 +64,13 @@
 	int dma_channel;
 	int freq;
 	int (*setup)(void __iomem *base, int freq);
+	struct regulator *regulator;
 };
 
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL,  };
+#endif
+
 static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
 {
 	struct omap2_onenand *c = data;
@@ -108,8 +114,9 @@
 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
 {
 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+	struct onenand_chip *this = mtd->priv;
 	unsigned int intr = 0;
-	unsigned int ctrl;
+	unsigned int ctrl, ctrl_mask;
 	unsigned long timeout;
 	u32 syscfg;
 
@@ -180,7 +187,8 @@
 			if (result == 0) {
 				/* Timeout after 20ms */
 				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
-				if (ctrl & ONENAND_CTRL_ONGO) {
+				if (ctrl & ONENAND_CTRL_ONGO &&
+				    !this->ongoing) {
 					/*
 					 * The operation seems to be still going
 					 * so give it some more time.
@@ -269,7 +277,11 @@
 		return -EIO;
 	}
 
-	if (ctrl & 0xFE9F)
+	ctrl_mask = 0xFE9F;
+	if (this->ongoing)
+		ctrl_mask &= ~0x8000;
+
+	if (ctrl & ctrl_mask)
 		wait_warn("unexpected controller status", state, ctrl, intr);
 
 	return 0;
@@ -591,6 +603,30 @@
 	memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
 }
 
+static int omap2_onenand_enable(struct mtd_info *mtd)
+{
+	int ret;
+	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+	ret = regulator_enable(c->regulator);
+	if (ret != 0)
+		dev_err(&c->pdev->dev, "cant enable regulator\n");
+
+	return ret;
+}
+
+static int omap2_onenand_disable(struct mtd_info *mtd)
+{
+	int ret;
+	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+	ret = regulator_disable(c->regulator);
+	if (ret != 0)
+		dev_err(&c->pdev->dev, "cant disable regulator\n");
+
+	return ret;
+}
+
 static int __devinit omap2_onenand_probe(struct platform_device *pdev)
 {
 	struct omap_onenand_platform_data *pdata;
@@ -705,8 +741,18 @@
 		}
 	}
 
+	if (pdata->regulator_can_sleep) {
+		c->regulator = regulator_get(&pdev->dev, "vonenand");
+		if (IS_ERR(c->regulator)) {
+			dev_err(&pdev->dev,  "Failed to get regulator\n");
+			goto err_release_dma;
+		}
+		c->onenand.enable = omap2_onenand_enable;
+		c->onenand.disable = omap2_onenand_disable;
+	}
+
 	if ((r = onenand_scan(&c->mtd, 1)) < 0)
-		goto err_release_dma;
+		goto err_release_regulator;
 
 	switch ((c->onenand.version_id >> 4) & 0xf) {
 	case 0:
@@ -727,13 +773,15 @@
 	}
 
 #ifdef CONFIG_MTD_PARTITIONS
-	if (pdata->parts != NULL)
-		r = add_mtd_partitions(&c->mtd, pdata->parts,
-				       pdata->nr_parts);
+	r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
+	if (r > 0)
+		r = add_mtd_partitions(&c->mtd, c->parts, r);
+	else if (pdata->parts != NULL)
+		r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
 	else
 #endif
 		r = add_mtd_device(&c->mtd);
-	if (r < 0)
+	if (r)
 		goto err_release_onenand;
 
 	platform_set_drvdata(pdev, c);
@@ -742,6 +790,8 @@
 
 err_release_onenand:
 	onenand_release(&c->mtd);
+err_release_regulator:
+	regulator_put(c->regulator);
 err_release_dma:
 	if (c->dma_channel != -1)
 		omap_free_dma(c->dma_channel);
@@ -757,6 +807,7 @@
 err_free_cs:
 	gpmc_cs_free(c->gpmc_cs);
 err_kfree:
+	kfree(c->parts);
 	kfree(c);
 
 	return r;
@@ -766,18 +817,8 @@
 {
 	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
 
-	BUG_ON(c == NULL);
-
-#ifdef CONFIG_MTD_PARTITIONS
-	if (c->parts)
-		del_mtd_partitions(&c->mtd);
-	else
-		del_mtd_device(&c->mtd);
-#else
-	del_mtd_device(&c->mtd);
-#endif
-
 	onenand_release(&c->mtd);
+	regulator_put(c->regulator);
 	if (c->dma_channel != -1)
 		omap_free_dma(c->dma_channel);
 	omap2_onenand_shutdown(pdev);
@@ -789,6 +830,7 @@
 	iounmap(c->onenand.base);
 	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
 	gpmc_cs_free(c->gpmc_cs);
+	kfree(c->parts);
 	kfree(c);
 
 	return 0;
@@ -818,7 +860,7 @@
 module_init(omap2_onenand_init);
 module_exit(omap2_onenand_exit);
 
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 6b3a875..bac41ca 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -400,8 +400,7 @@
 		value = onenand_bufferram_address(this, block);
 		this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
 
-		if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) ||
-		    ONENAND_IS_4KB_PAGE(this))
+		if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
 			/* It is always BufferRAM0 */
 			ONENAND_SET_BUFFERRAM0(this);
 		else
@@ -430,7 +429,7 @@
 		case FLEXONENAND_CMD_RECOVER_LSB:
 		case ONENAND_CMD_READ:
 		case ONENAND_CMD_READOOB:
-			if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+			if (ONENAND_IS_4KB_PAGE(this))
 				/* It is always BufferRAM0 */
 				dataram = ONENAND_SET_BUFFERRAM0(this);
 			else
@@ -949,6 +948,8 @@
 		if (this->state == FL_READY) {
 			this->state = new_state;
 			spin_unlock(&this->chip_lock);
+			if (new_state != FL_PM_SUSPENDED && this->enable)
+				this->enable(mtd);
 			break;
 		}
 		if (new_state == FL_PM_SUSPENDED) {
@@ -975,6 +976,8 @@
 {
 	struct onenand_chip *this = mtd->priv;
 
+	if (this->state != FL_PM_SUSPENDED && this->disable)
+		this->disable(mtd);
 	/* Release the chip */
 	spin_lock(&this->chip_lock);
 	this->state = FL_READY;
@@ -1353,7 +1356,7 @@
 
 	stats = mtd->ecc_stats;
 
-	readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+	readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
 
 	while (read < len) {
 		cond_resched();
@@ -1429,7 +1432,7 @@
 	int ret;
 
 	onenand_get_device(mtd, FL_READING);
-	ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+	ret = ONENAND_IS_4KB_PAGE(this) ?
 		onenand_mlc_read_ops_nolock(mtd, from, &ops) :
 		onenand_read_ops_nolock(mtd, from, &ops);
 	onenand_release_device(mtd);
@@ -1464,7 +1467,7 @@
 
 	onenand_get_device(mtd, FL_READING);
 	if (ops->datbuf)
-		ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+		ret = ONENAND_IS_4KB_PAGE(this) ?
 			onenand_mlc_read_ops_nolock(mtd, from, ops) :
 			onenand_read_ops_nolock(mtd, from, ops);
 	else
@@ -1485,8 +1488,7 @@
 {
 	struct onenand_chip *this = mtd->priv;
 	unsigned long timeout;
-	unsigned int interrupt;
-	unsigned int ctrl;
+	unsigned int interrupt, ctrl, ecc, addr1, addr8;
 
 	/* The 20 msec is enough */
 	timeout = jiffies + msecs_to_jiffies(20);
@@ -1498,25 +1500,28 @@
 	/* To get correct interrupt status in timeout case */
 	interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
 	ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+	addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
+	addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
 
 	if (interrupt & ONENAND_INT_READ) {
-		int ecc = onenand_read_ecc(this);
+		ecc = onenand_read_ecc(this);
 		if (ecc & ONENAND_ECC_2BIT_ALL) {
-			printk(KERN_WARNING "%s: ecc error = 0x%04x, "
-				"controller error 0x%04x\n",
-				__func__, ecc, ctrl);
+			printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
+			       "intr 0x%04x addr1 %#x addr8 %#x\n",
+			       __func__, ecc, ctrl, interrupt, addr1, addr8);
 			return ONENAND_BBT_READ_ECC_ERROR;
 		}
 	} else {
-		printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
-			__func__, ctrl, interrupt);
+		printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
+		       "intr 0x%04x addr1 %#x addr8 %#x\n",
+		       __func__, ctrl, interrupt, addr1, addr8);
 		return ONENAND_BBT_READ_FATAL_ERROR;
 	}
 
 	/* Initial bad block case: 0x2400 or 0x0400 */
 	if (ctrl & ONENAND_CTRL_ERROR) {
-		printk(KERN_DEBUG "%s: controller error = 0x%04x\n",
-			__func__, ctrl);
+		printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
+		       "addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
 		return ONENAND_BBT_READ_ERROR;
 	}
 
@@ -1558,7 +1563,7 @@
 
 	column = from & (mtd->oobsize - 1);
 
-	readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+	readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
 
 	while (read < len) {
 		cond_resched();
@@ -1612,7 +1617,7 @@
 	u_char *oob_buf = this->oob_buf;
 	int status, i, readcmd;
 
-	readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+	readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
 
 	this->command(mtd, readcmd, to, mtd->oobsize);
 	onenand_update_bufferram(mtd, to, 0);
@@ -1845,7 +1850,7 @@
 	const u_char *buf = ops->datbuf;
 	const u_char *oob = ops->oobbuf;
 	u_char *oobbuf;
-	int ret = 0;
+	int ret = 0, cmd;
 
 	DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
 		__func__, (unsigned int) to, (int) len);
@@ -1954,7 +1959,19 @@
 			ONENAND_SET_NEXT_BUFFERRAM(this);
 		}
 
-		this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+		this->ongoing = 0;
+		cmd = ONENAND_CMD_PROG;
+
+		/* Exclude 1st OTP and OTP blocks for cache program feature */
+		if (ONENAND_IS_CACHE_PROGRAM(this) &&
+		    likely(onenand_block(this, to) != 0) &&
+		    ONENAND_IS_4KB_PAGE(this) &&
+		    ((written + thislen) < len)) {
+			cmd = ONENAND_CMD_2X_CACHE_PROG;
+			this->ongoing = 1;
+		}
+
+		this->command(mtd, cmd, to, mtd->writesize);
 
 		/*
 		 * 2 PLANE, MLC, and Flex-OneNAND wait here
@@ -2067,7 +2084,7 @@
 
 	oobbuf = this->oob_buf;
 
-	oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+	oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
 
 	/* Loop until all data write */
 	while (written < len) {
@@ -2086,7 +2103,7 @@
 			memcpy(oobbuf + column, buf, thislen);
 		this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
 
-		if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) {
+		if (ONENAND_IS_4KB_PAGE(this)) {
 			/* Set main area of DataRAM to 0xff*/
 			memset(this->page_buf, 0xff, mtd->writesize);
 			this->write_bufferram(mtd, ONENAND_DATARAM,
@@ -2481,7 +2498,8 @@
 	/* Grab the lock and see if the device is available */
 	onenand_get_device(mtd, FL_ERASING);
 
-	if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
+	if (ONENAND_IS_4KB_PAGE(this) || region ||
+	    instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
 		/* region is set for Flex-OneNAND (no mb erase) */
 		ret = onenand_block_by_block_erase(mtd, instr,
 						   region, block_size);
@@ -3029,7 +3047,7 @@
 	this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
 	this->wait(mtd, FL_OTPING);
 
-	ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+	ret = ONENAND_IS_4KB_PAGE(this) ?
 		onenand_mlc_read_ops_nolock(mtd, from, &ops) :
 		onenand_read_ops_nolock(mtd, from, &ops);
 
@@ -3377,8 +3395,10 @@
 	case ONENAND_DEVICE_DENSITY_4Gb:
 		if (ONENAND_IS_DDP(this))
 			this->options |= ONENAND_HAS_2PLANE;
-		else if (numbufs == 1)
+		else if (numbufs == 1) {
 			this->options |= ONENAND_HAS_4KB_PAGE;
+			this->options |= ONENAND_HAS_CACHE_PROGRAM;
+		}
 
 	case ONENAND_DEVICE_DENSITY_2Gb:
 		/* 2Gb DDP does not have 2 plane */
@@ -3399,7 +3419,11 @@
 		break;
 	}
 
-	if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+	/* The MLC has 4KiB pagesize. */
+	if (ONENAND_IS_MLC(this))
+		this->options |= ONENAND_HAS_4KB_PAGE;
+
+	if (ONENAND_IS_4KB_PAGE(this))
 		this->options &= ~ONENAND_HAS_2PLANE;
 
 	if (FLEXONENAND(this)) {
@@ -3415,6 +3439,8 @@
 		printk(KERN_DEBUG "Chip has 2 plane\n");
 	if (this->options & ONENAND_HAS_4KB_PAGE)
 		printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
+	if (this->options & ONENAND_HAS_CACHE_PROGRAM)
+		printk(KERN_DEBUG "Chip has cache program feature\n");
 }
 
 /**
@@ -3831,7 +3857,7 @@
 	/* The data buffer size is equal to page size */
 	mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
 	/* We use the full BufferRAM */
-	if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+	if (ONENAND_IS_4KB_PAGE(this))
 		mtd->writesize <<= 1;
 
 	mtd->oobsize = mtd->writesize >> 5;
@@ -4054,6 +4080,7 @@
 	mtd->block_isbad = onenand_block_isbad;
 	mtd->block_markbad = onenand_block_markbad;
 	mtd->owner = THIS_MODULE;
+	mtd->writebufsize = mtd->writesize;
 
 	/* Unlock whole block */
 	this->unlock_all(mtd);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 01ab5b3..fc2c16a 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -91,16 +91,18 @@
 		for (j = 0; j < len; j++) {
 			/* No need to read pages fully,
 			 * just read required OOB bytes */
-			ret = onenand_bbt_read_oob(mtd, from + j * mtd->writesize + bd->offs, &ops);
+			ret = onenand_bbt_read_oob(mtd,
+				from + j * this->writesize + bd->offs, &ops);
 
 			/* If it is a initial bad block, just ignore it */
 			if (ret == ONENAND_BBT_READ_FATAL_ERROR)
 				return -EIO;
 
-			if (ret || check_short_pattern(&buf[j * scanlen], scanlen, mtd->writesize, bd)) {
+			if (ret || check_short_pattern(&buf[j * scanlen],
+					       scanlen, this->writesize, bd)) {
 				bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
-				printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
-					i >> 1, (unsigned int) from);
+				printk(KERN_INFO "OneNAND eraseblock %d is an "
+					"initial bad block\n", i >> 1);
 				mtd->ecc_stats.badblocks++;
 				break;
 			}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 0de7a05..a4c74a9 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -651,7 +651,7 @@
 	void __iomem *p;
 	void *buf = (void *) buffer;
 	dma_addr_t dma_src, dma_dst;
-	int err, page_dma = 0;
+	int err, ofs, page_dma = 0;
 	struct device *dev = &onenand->pdev->dev;
 
 	p = this->base + area;
@@ -677,10 +677,13 @@
 		if (!page)
 			goto normal;
 
+		/* Page offset */
+		ofs = ((size_t) buf & ~PAGE_MASK);
 		page_dma = 1;
+
 		/* DMA routine */
 		dma_src = onenand->phys_base + (p - this->base);
-		dma_dst = dma_map_page(dev, page, 0, count, DMA_FROM_DEVICE);
+		dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
 	} else {
 		/* DMA routine */
 		dma_src = onenand->phys_base + (p - this->base);
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 67822cf..ac0d6a8 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1258,7 +1258,7 @@
 static __init int sm_module_init(void)
 {
 	int error = 0;
-	cache_flush_workqueue = create_freezeable_workqueue("smflush");
+	cache_flush_workqueue = create_freezable_workqueue("smflush");
 
 	if (IS_ERR(cache_flush_workqueue))
 		return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fcdb7f6..0b8141f 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -425,12 +425,11 @@
 
 	/* Read both LEB 0 and LEB 1 into memory */
 	ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
-		leb[seb->lnum] = vmalloc(ubi->vtbl_size);
+		leb[seb->lnum] = vzalloc(ubi->vtbl_size);
 		if (!leb[seb->lnum]) {
 			err = -ENOMEM;
 			goto out_free;
 		}
-		memset(leb[seb->lnum], 0, ubi->vtbl_size);
 
 		err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
 				       ubi->vtbl_size);
@@ -516,10 +515,9 @@
 	int i;
 	struct ubi_vtbl_record *vtbl;
 
-	vtbl = vmalloc(ubi->vtbl_size);
+	vtbl = vzalloc(ubi->vtbl_size);
 	if (!vtbl)
 		return ERR_PTR(-ENOMEM);
-	memset(vtbl, 0, ubi->vtbl_size);
 
 	for (i = 0; i < ubi->vtbl_slots; i++)
 		memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3fda24a..0382332 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1944,19 +1944,12 @@
 config FEC
 	bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
 	depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
-		MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+		MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
 	select PHYLIB
 	help
 	  Say Y here if you want to use the built-in 10/100 Fast ethernet
 	  controller on some Motorola ColdFire and Freescale i.MX processors.
 
-config FEC2
-	bool "Second FEC ethernet controller (on some ColdFire CPUs)"
-	depends on FEC
-	help
-	  Say Y here if you want to use the second built-in 10/100 Fast
-	  ethernet controller on some Motorola ColdFire processors.
-
 config FEC_MPC52xx
 	tristate "MPC52xx FEC driver"
 	depends on PPC_MPC52xx && PPC_BESTCOMM
@@ -2871,7 +2864,7 @@
 	default n
 
 config MLX4_DEBUG
-	bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
+	bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
 	depends on MLX4_CORE
 	default y
 	---help---
@@ -2970,6 +2963,7 @@
 config XEN_NETDEV_FRONTEND
 	tristate "Xen network device frontend driver"
 	depends on XEN
+	select XEN_XENBUS_FRONTEND
 	default y
 	help
 	  The network device frontend driver allows the kernel to
@@ -3395,8 +3389,7 @@
 
 config NETCONSOLE_DYNAMIC
 	bool "Dynamic reconfiguration of logging targets"
-	depends on NETCONSOLE && SYSFS
-	select CONFIGFS_FS
+	depends on NETCONSOLE && SYSFS && CONFIGFS_FS
 	help
 	  This option enables the ability to dynamically reconfigure target
 	  parameters (interface, IP addresses, port numbers, MAC addresses)
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 39214e5..7ca0ede 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -425,11 +425,6 @@
     int csr0, boguscnt;
     int handled = 0;
 
-    if (dev == NULL) {
-	printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n");
-	return IRQ_NONE;
-    }
-
     lance->RAP = CSR0;			/* PCnet-ISA Controller Status */
 
     if (!(lance->RDP & INTR))		/* Check if any interrupt has been */
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 54c6d84..aa07657 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -854,12 +854,12 @@
 }
 
 /**
- *	ks8695_get_settings - Get device-specific settings.
+ *	ks8695_wan_get_settings - Get device-specific settings.
  *	@ndev: The network device to read settings from
  *	@cmd: The ethtool structure to read into
  */
 static int
-ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
@@ -870,69 +870,50 @@
 			  SUPPORTED_TP | SUPPORTED_MII);
 	cmd->transceiver = XCVR_INTERNAL;
 
-	/* Port specific extras */
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		cmd->phy_address = 0;
-		/* not supported for HPNA */
+	cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+	cmd->port = PORT_MII;
+	cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+	cmd->phy_address = 0;
+
+	ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+	if ((ctrl & WMC_WAND) == 0) {
+		/* auto-negotiation is enabled */
+		cmd->advertising |= ADVERTISED_Autoneg;
+		if (ctrl & WMC_WANA100F)
+			cmd->advertising |= ADVERTISED_100baseT_Full;
+		if (ctrl & WMC_WANA100H)
+			cmd->advertising |= ADVERTISED_100baseT_Half;
+		if (ctrl & WMC_WANA10F)
+			cmd->advertising |= ADVERTISED_10baseT_Full;
+		if (ctrl & WMC_WANA10H)
+			cmd->advertising |= ADVERTISED_10baseT_Half;
+		if (ctrl & WMC_WANAP)
+			cmd->advertising |= ADVERTISED_Pause;
+		cmd->autoneg = AUTONEG_ENABLE;
+
+		cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+		cmd->duplex = (ctrl & WMC_WDS) ?
+			DUPLEX_FULL : DUPLEX_HALF;
+	} else {
+		/* auto-negotiation is disabled */
 		cmd->autoneg = AUTONEG_DISABLE;
 
-		/* BUG: Erm, dtype hpna implies no phy regs */
-		/*
-		ctrl = readl(KS8695_MISC_VA + KS8695_HMC);
-		cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
-		cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
-		*/
-		return -EOPNOTSUPP;
-	case KS8695_DTYPE_WAN:
-		cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
-		cmd->port = PORT_MII;
-		cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
-		cmd->phy_address = 0;
-
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-		if ((ctrl & WMC_WAND) == 0) {
-			/* auto-negotiation is enabled */
-			cmd->advertising |= ADVERTISED_Autoneg;
-			if (ctrl & WMC_WANA100F)
-				cmd->advertising |= ADVERTISED_100baseT_Full;
-			if (ctrl & WMC_WANA100H)
-				cmd->advertising |= ADVERTISED_100baseT_Half;
-			if (ctrl & WMC_WANA10F)
-				cmd->advertising |= ADVERTISED_10baseT_Full;
-			if (ctrl & WMC_WANA10H)
-				cmd->advertising |= ADVERTISED_10baseT_Half;
-			if (ctrl & WMC_WANAP)
-				cmd->advertising |= ADVERTISED_Pause;
-			cmd->autoneg = AUTONEG_ENABLE;
-
-			cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
-			cmd->duplex = (ctrl & WMC_WDS) ?
-				DUPLEX_FULL : DUPLEX_HALF;
-		} else {
-			/* auto-negotiation is disabled */
-			cmd->autoneg = AUTONEG_DISABLE;
-
-			cmd->speed = (ctrl & WMC_WANF100) ?
-				SPEED_100 : SPEED_10;
-			cmd->duplex = (ctrl & WMC_WANFF) ?
-				DUPLEX_FULL : DUPLEX_HALF;
-		}
-		break;
-	case KS8695_DTYPE_LAN:
-		return -EOPNOTSUPP;
+		cmd->speed = (ctrl & WMC_WANF100) ?
+			SPEED_100 : SPEED_10;
+		cmd->duplex = (ctrl & WMC_WANFF) ?
+			DUPLEX_FULL : DUPLEX_HALF;
 	}
 
 	return 0;
 }
 
 /**
- *	ks8695_set_settings - Set device-specific settings.
+ *	ks8695_wan_set_settings - Set device-specific settings.
  *	@ndev: The network device to configure
  *	@cmd: The settings to configure
  */
 static int
-ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
@@ -956,171 +937,85 @@
 				ADVERTISED_100baseT_Full)) == 0)
 			return -EINVAL;
 
-		switch (ksp->dtype) {
-		case KS8695_DTYPE_HPNA:
-			/* HPNA does not support auto-negotiation. */
-			return -EINVAL;
-		case KS8695_DTYPE_WAN:
-			ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
 
-			ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
-				  WMC_WANA10F | WMC_WANA10H);
-			if (cmd->advertising & ADVERTISED_100baseT_Full)
-				ctrl |= WMC_WANA100F;
-			if (cmd->advertising & ADVERTISED_100baseT_Half)
-				ctrl |= WMC_WANA100H;
-			if (cmd->advertising & ADVERTISED_10baseT_Full)
-				ctrl |= WMC_WANA10F;
-			if (cmd->advertising & ADVERTISED_10baseT_Half)
-				ctrl |= WMC_WANA10H;
+		ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
+			  WMC_WANA10F | WMC_WANA10H);
+		if (cmd->advertising & ADVERTISED_100baseT_Full)
+			ctrl |= WMC_WANA100F;
+		if (cmd->advertising & ADVERTISED_100baseT_Half)
+			ctrl |= WMC_WANA100H;
+		if (cmd->advertising & ADVERTISED_10baseT_Full)
+			ctrl |= WMC_WANA10F;
+		if (cmd->advertising & ADVERTISED_10baseT_Half)
+			ctrl |= WMC_WANA10H;
 
-			/* force a re-negotiation */
-			ctrl |= WMC_WANR;
-			writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
-			break;
-		case KS8695_DTYPE_LAN:
-			return -EOPNOTSUPP;
-		}
-
+		/* force a re-negotiation */
+		ctrl |= WMC_WANR;
+		writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
 	} else {
-		switch (ksp->dtype) {
-		case KS8695_DTYPE_HPNA:
-			/* BUG: dtype_hpna implies no phy registers */
-			/*
-			ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
+		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
 
-			ctrl &= ~(HMC_HSS | HMC_HDS);
-			if (cmd->speed == SPEED_100)
-				ctrl |= HMC_HSS;
-			if (cmd->duplex == DUPLEX_FULL)
-				ctrl |= HMC_HDS;
+		/* disable auto-negotiation */
+		ctrl |= WMC_WAND;
+		ctrl &= ~(WMC_WANF100 | WMC_WANFF);
 
-			__raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
-			*/
-			return -EOPNOTSUPP;
-		case KS8695_DTYPE_WAN:
-			ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+		if (cmd->speed == SPEED_100)
+			ctrl |= WMC_WANF100;
+		if (cmd->duplex == DUPLEX_FULL)
+			ctrl |= WMC_WANFF;
 
-			/* disable auto-negotiation */
-			ctrl |= WMC_WAND;
-			ctrl &= ~(WMC_WANF100 | WMC_WANFF);
-
-			if (cmd->speed == SPEED_100)
-				ctrl |= WMC_WANF100;
-			if (cmd->duplex == DUPLEX_FULL)
-				ctrl |= WMC_WANFF;
-
-			writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
-			break;
-		case KS8695_DTYPE_LAN:
-			return -EOPNOTSUPP;
-		}
+		writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
 	}
 
 	return 0;
 }
 
 /**
- *	ks8695_nwayreset - Restart the autonegotiation on the port.
+ *	ks8695_wan_nwayreset - Restart the autonegotiation on the port.
  *	@ndev: The network device to restart autoneotiation on
  */
 static int
-ks8695_nwayreset(struct net_device *ndev)
+ks8695_wan_nwayreset(struct net_device *ndev)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
 
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		/* No phy means no autonegotiation on hpna */
+	ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+	if ((ctrl & WMC_WAND) == 0)
+		writel(ctrl | WMC_WANR,
+		       ksp->phyiface_regs + KS8695_WMC);
+	else
+		/* auto-negotiation not enabled */
 		return -EINVAL;
-	case KS8695_DTYPE_WAN:
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
-		if ((ctrl & WMC_WAND) == 0)
-			writel(ctrl | WMC_WANR,
-			       ksp->phyiface_regs + KS8695_WMC);
-		else
-			/* auto-negotiation not enabled */
-			return -EINVAL;
-		break;
-	case KS8695_DTYPE_LAN:
-		return -EOPNOTSUPP;
-	}
 
 	return 0;
 }
 
 /**
- *	ks8695_get_link - Retrieve link status of network interface
- *	@ndev: The network interface to retrive the link status of.
- */
-static u32
-ks8695_get_link(struct net_device *ndev)
-{
-	struct ks8695_priv *ksp = netdev_priv(ndev);
-	u32 ctrl;
-
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		/* HPNA always has link */
-		return 1;
-	case KS8695_DTYPE_WAN:
-		/* WAN we can read the PHY for */
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-		return ctrl & WMC_WLS;
-	case KS8695_DTYPE_LAN:
-		return -EOPNOTSUPP;
-	}
-	return 0;
-}
-
-/**
- *	ks8695_get_pause - Retrieve network pause/flow-control advertising
+ *	ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
  *	@ndev: The device to retrieve settings from
  *	@param: The structure to fill out with the information
  */
 static void
-ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
+ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
 
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		/* No phy link on hpna to configure */
-		return;
-	case KS8695_DTYPE_WAN:
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+	ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
 
-		/* advertise Pause */
-		param->autoneg = (ctrl & WMC_WANAP);
+	/* advertise Pause */
+	param->autoneg = (ctrl & WMC_WANAP);
 
-		/* current Rx Flow-control */
-		ctrl = ks8695_readreg(ksp, KS8695_DRXC);
-		param->rx_pause = (ctrl & DRXC_RFCE);
+	/* current Rx Flow-control */
+	ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+	param->rx_pause = (ctrl & DRXC_RFCE);
 
-		/* current Tx Flow-control */
-		ctrl = ks8695_readreg(ksp, KS8695_DTXC);
-		param->tx_pause = (ctrl & DTXC_TFCE);
-		break;
-	case KS8695_DTYPE_LAN:
-		/* The LAN's "phy" is a direct-attached switch */
-		return;
-	}
-}
-
-/**
- *	ks8695_set_pause - Configure pause/flow-control
- *	@ndev: The device to configure
- *	@param: The pause parameters to set
- *
- *	TODO: Implement this
- */
-static int
-ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
-{
-	return -EOPNOTSUPP;
+	/* current Tx Flow-control */
+	ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+	param->tx_pause = (ctrl & DTXC_TFCE);
 }
 
 /**
@@ -1140,12 +1035,17 @@
 static const struct ethtool_ops ks8695_ethtool_ops = {
 	.get_msglevel	= ks8695_get_msglevel,
 	.set_msglevel	= ks8695_set_msglevel,
-	.get_settings	= ks8695_get_settings,
-	.set_settings	= ks8695_set_settings,
-	.nway_reset	= ks8695_nwayreset,
-	.get_link	= ks8695_get_link,
-	.get_pauseparam = ks8695_get_pause,
-	.set_pauseparam = ks8695_set_pause,
+	.get_drvinfo	= ks8695_get_drvinfo,
+};
+
+static const struct ethtool_ops ks8695_wan_ethtool_ops = {
+	.get_msglevel	= ks8695_get_msglevel,
+	.set_msglevel	= ks8695_set_msglevel,
+	.get_settings	= ks8695_wan_get_settings,
+	.set_settings	= ks8695_wan_set_settings,
+	.nway_reset	= ks8695_wan_nwayreset,
+	.get_link	= ethtool_op_get_link,
+	.get_pauseparam = ks8695_wan_get_pause,
 	.get_drvinfo	= ks8695_get_drvinfo,
 };
 
@@ -1541,7 +1441,6 @@
 
 	/* driver system setup */
 	ndev->netdev_ops = &ks8695_netdev_ops;
-	SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	ndev->watchdog_timeo	 = msecs_to_jiffies(watchdog);
 
 	netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
@@ -1608,12 +1507,15 @@
 	if (ksp->phyiface_regs && ksp->link_irq == -1) {
 		ks8695_init_switch(ksp);
 		ksp->dtype = KS8695_DTYPE_LAN;
+		SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	} else if (ksp->phyiface_regs && ksp->link_irq != -1) {
 		ks8695_init_wan_phy(ksp);
 		ksp->dtype = KS8695_DTYPE_WAN;
+		SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
 	} else {
 		/* No initialisation since HPNA does not have a PHY */
 		ksp->dtype = KS8695_DTYPE_HPNA;
+		SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	}
 
 	/* And bring up the net_device with the net core */
@@ -1742,7 +1644,7 @@
 module_init(ks8695_init);
 module_exit(ks8695_cleanup);
 
-MODULE_AUTHOR("Simtec Electronics")
+MODULE_AUTHOR("Simtec Electronics");
 MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" MODULENAME);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index a699bbf..3824382 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -48,6 +48,7 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
 	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
 	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
 	/* required last entry */
 	{ 0 }
 };
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0c7811f..a179cc6 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1786,6 +1786,10 @@
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = nonemb_cmd->va;
 	sge = nonembedded_sgl(wrb);
 
@@ -1801,6 +1805,7 @@
 
 	status = be_mcc_notify_wait(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index de40d3b..28a32a6 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -312,11 +312,9 @@
 	if (adapter->link_up != link_up) {
 		adapter->link_speed = -1;
 		if (link_up) {
-			netif_start_queue(netdev);
 			netif_carrier_on(netdev);
 			printk(KERN_INFO "%s: Link up\n", netdev->name);
 		} else {
-			netif_stop_queue(netdev);
 			netif_carrier_off(netdev);
 			printk(KERN_INFO "%s: Link down\n", netdev->name);
 		}
@@ -2628,8 +2626,6 @@
 
 	netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
 		BE_NAPI_WEIGHT);
-
-	netif_stop_queue(netdev);
 }
 
 static void be_unmap_pci_bars(struct be_adapter *adapter)
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index ce1e5e9..22abfb3 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -8,6 +8,11 @@
  * Licensed under the GPL-2 or later.
  */
 
+#define DRV_VERSION	"1.1"
+#define DRV_DESC	"Blackfin on-chip Ethernet MAC driver"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -41,12 +46,7 @@
 
 #include "bfin_mac.h"
 
-#define DRV_NAME	"bfin_mac"
-#define DRV_VERSION	"1.1"
-#define DRV_AUTHOR	"Bryan Wu, Luke Yang"
-#define DRV_DESC	"Blackfin on-chip Ethernet MAC driver"
-
-MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_AUTHOR("Bryan Wu, Luke Yang");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_ALIAS("platform:bfin_mac");
@@ -189,8 +189,7 @@
 		/* allocate a new skb for next time receive */
 		new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
 		if (!new_skb) {
-			printk(KERN_NOTICE DRV_NAME
-			       ": init: low on mem - packet dropped\n");
+			pr_notice("init: low on mem - packet dropped\n");
 			goto init_error;
 		}
 		skb_reserve(new_skb, NET_IP_ALIGN);
@@ -240,7 +239,7 @@
 
 init_error:
 	desc_list_free();
-	printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
+	pr_err("kmalloc failed\n");
 	return -ENOMEM;
 }
 
@@ -259,8 +258,7 @@
 	while ((bfin_read_EMAC_STAADD()) & STABUSY) {
 		udelay(1);
 		if (timeout_cnt-- < 0) {
-			printk(KERN_ERR DRV_NAME
-			": wait MDC/MDIO transaction to complete timeout\n");
+			pr_err("wait MDC/MDIO transaction to complete timeout\n");
 			return -ETIMEDOUT;
 		}
 	}
@@ -350,9 +348,9 @@
 					opmode &= ~RMII_10;
 					break;
 				default:
-					printk(KERN_WARNING
-						"%s: Ack!  Speed (%d) is not 10/100!\n",
-						DRV_NAME, phydev->speed);
+					netdev_warn(dev,
+						"Ack! Speed (%d) is not 10/100!\n",
+						phydev->speed);
 					break;
 				}
 				bfin_write_EMAC_OPMODE(opmode);
@@ -417,14 +415,13 @@
 
 	/* now we are supposed to have a proper phydev, to attach to... */
 	if (!phydev) {
-		printk(KERN_INFO "%s: Don't found any phy device at all\n",
-			dev->name);
+		netdev_err(dev, "no phy device found\n");
 		return -ENODEV;
 	}
 
 	if (phy_mode != PHY_INTERFACE_MODE_RMII &&
 		phy_mode != PHY_INTERFACE_MODE_MII) {
-		printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
+		netdev_err(dev, "invalid phy interface mode\n");
 		return -EINVAL;
 	}
 
@@ -432,7 +429,7 @@
 			0, phy_mode);
 
 	if (IS_ERR(phydev)) {
-		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+		netdev_err(dev, "could not attach PHY\n");
 		return PTR_ERR(phydev);
 	}
 
@@ -453,11 +450,10 @@
 	lp->old_duplex = -1;
 	lp->phydev = phydev;
 
-	printk(KERN_INFO "%s: attached PHY driver [%s] "
-	       "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
-	       "@sclk=%dMHz)\n",
-	       DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
-	       MDC_CLK, mdc_div, sclk/1000000);
+	pr_info("attached PHY driver [%s] "
+	        "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
+	        phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
+	        MDC_CLK, mdc_div, sclk/1000000);
 
 	return 0;
 }
@@ -502,7 +498,7 @@
 static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
 					struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
+	strcpy(info->driver, KBUILD_MODNAME);
 	strcpy(info->version, DRV_VERSION);
 	strcpy(info->fw_version, "N/A");
 	strcpy(info->bus_info, dev_name(&dev->dev));
@@ -562,7 +558,7 @@
 };
 
 /**************************************************************************/
-void setup_system_regs(struct net_device *dev)
+static void setup_system_regs(struct net_device *dev)
 {
 	struct bfin_mac_local *lp = netdev_priv(dev);
 	int i;
@@ -592,6 +588,10 @@
 
 	bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
 
+	/* Set vlan regs to let 1522 bytes long packets pass through */
+	bfin_write_EMAC_VLAN1(lp->vlan1_mask);
+	bfin_write_EMAC_VLAN2(lp->vlan2_mask);
+
 	/* Initialize the TX DMA channel registers */
 	bfin_write_DMA2_X_COUNT(0);
 	bfin_write_DMA2_X_MODIFY(4);
@@ -827,8 +827,7 @@
 		while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
 			udelay(1);
 		if (timeout_cnt == 0)
-			printk(KERN_ERR DRV_NAME
-					": fails to timestamp the TX packet\n");
+			netdev_err(netdev, "timestamp the TX packet failed\n");
 		else {
 			struct skb_shared_hwtstamps shhwtstamps;
 			u64 ns;
@@ -1083,8 +1082,7 @@
 	 * we which case we simply drop the packet
 	 */
 	if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
-		printk(KERN_NOTICE DRV_NAME
-		       ": rx: receive error - packet dropped\n");
+		netdev_notice(dev, "rx: receive error - packet dropped\n");
 		dev->stats.rx_dropped++;
 		goto out;
 	}
@@ -1094,8 +1092,7 @@
 
 	new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
 	if (!new_skb) {
-		printk(KERN_NOTICE DRV_NAME
-		       ": rx: low on mem - packet dropped\n");
+		netdev_notice(dev, "rx: low on mem - packet dropped\n");
 		dev->stats.rx_dropped++;
 		goto out;
 	}
@@ -1213,7 +1210,7 @@
 	int ret;
 	u32 opmode;
 
-	pr_debug("%s: %s\n", DRV_NAME, __func__);
+	pr_debug("%s\n", __func__);
 
 	/* Set RX DMA */
 	bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -1287,19 +1284,12 @@
 {
 	u32 emac_hashhi, emac_hashlo;
 	struct netdev_hw_addr *ha;
-	char *addrs;
 	u32 crc;
 
 	emac_hashhi = emac_hashlo = 0;
 
 	netdev_for_each_mc_addr(ha, dev) {
-		addrs = ha->addr;
-
-		/* skip non-multicast addresses */
-		if (!(*addrs & 1))
-			continue;
-
-		crc = ether_crc(ETH_ALEN, addrs);
+		crc = ether_crc(ETH_ALEN, ha->addr);
 		crc >>= 26;
 
 		if (crc & 0x20)
@@ -1323,7 +1313,7 @@
 	u32 sysctl;
 
 	if (dev->flags & IFF_PROMISC) {
-		printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
+		netdev_info(dev, "set promisc mode\n");
 		sysctl = bfin_read_EMAC_OPMODE();
 		sysctl |= PR;
 		bfin_write_EMAC_OPMODE(sysctl);
@@ -1393,7 +1383,7 @@
 	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
 	 */
 	if (!is_valid_ether_addr(dev->dev_addr)) {
-		printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
+		netdev_warn(dev, "no valid ethernet hw addr\n");
 		return -EINVAL;
 	}
 
@@ -1527,6 +1517,9 @@
 		goto out_err_mii_probe;
 	}
 
+	lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
+	lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
+
 	/* Fill in the fields of the device structure with ethernet values. */
 	ether_setup(ndev);
 
@@ -1558,7 +1551,7 @@
 	bfin_mac_hwtstamp_init(ndev);
 
 	/* now, print out the card info, in a short format.. */
-	dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
+	netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
 
 	return 0;
 
@@ -1650,7 +1643,7 @@
 	 * so set the GPIO pins to Ethernet mode
 	 */
 	pin_req = mii_bus_pd->mac_peripherals;
-	rc = peripheral_request_list(pin_req, DRV_NAME);
+	rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
 	if (rc) {
 		dev_err(&pdev->dev, "Requesting peripherals failed!\n");
 		return rc;
@@ -1739,7 +1732,7 @@
 	.resume = bfin_mac_resume,
 	.suspend = bfin_mac_suspend,
 	.driver = {
-		.name = DRV_NAME,
+		.name = KBUILD_MODNAME,
 		.owner	= THIS_MODULE,
 	},
 };
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index aed68be..f8559ac 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -17,7 +17,14 @@
 #include <linux/etherdevice.h>
 #include <linux/bfin_mac.h>
 
+/*
+ * Disable hardware checksum for bug #5600 if writeback cache is
+ * enabled. Otherwize, corrupted RX packet will be sent up stack
+ * without error mark.
+ */
+#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
 #define BFIN_MAC_CSUM_OFFLOAD
+#endif
 
 #define TX_RECLAIM_JIFFIES (HZ / 5)
 
@@ -68,7 +75,6 @@
 	 */
 	struct net_device_stats stats;
 
-	unsigned char Mac[6];	/* MAC address of the board */
 	spinlock_t lock;
 
 	int wol;		/* Wake On Lan */
@@ -76,6 +82,9 @@
 	struct timer_list tx_reclaim_timer;
 	struct net_device *ndev;
 
+	/* Data for EMAC_VLAN1 regs */
+	u16 vlan1_mask, vlan2_mask;
+
 	/* MII and PHY stuffs */
 	int old_link;          /* used by bf537_adjust_link */
 	int old_speed;
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 99be5ae..142d604 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -275,7 +275,6 @@
 
 	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
 	if (ioc_attr) {
-		memset(ioc_attr, 0, sizeof(*ioc_attr));
 		spin_lock_irqsave(&bnad->bna_lock, flags);
 		bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index df99edf..0ba59d5 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -7553,6 +7553,10 @@
 	    !(data & ETH_FLAG_RXVLAN))
 		return -EINVAL;
 
+	/* TSO with VLAN tag won't work with current firmware */
+	if (!(data & ETH_FLAG_TXVLAN))
+		return -EINVAL;
+
 	rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
 				  ETH_FLAG_TXVLAN);
 	if (rc)
@@ -7962,11 +7966,8 @@
 
 		/* AER (Advanced Error Reporting) hooks */
 		err = pci_enable_pcie_error_reporting(pdev);
-		if (err) {
-			dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
-					    "failed 0x%x\n", err);
-			/* non-fatal, continue */
-		}
+		if (!err)
+			bp->flags |= BNX2_FLAG_AER_ENABLED;
 
 	} else {
 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8229,8 +8230,10 @@
 	return 0;
 
 err_out_unmap:
-	if (bp->flags & BNX2_FLAG_PCIE)
+	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
 		pci_disable_pcie_error_reporting(pdev);
+		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+	}
 
 	if (bp->regview) {
 		iounmap(bp->regview);
@@ -8418,8 +8421,10 @@
 
 	kfree(bp->temp_stats_blk);
 
-	if (bp->flags & BNX2_FLAG_PCIE)
+	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
 		pci_disable_pcie_error_reporting(pdev);
+		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+	}
 
 	free_netdev(dev);
 
@@ -8535,7 +8540,7 @@
 	}
 	rtnl_unlock();
 
-	if (!(bp->flags & BNX2_FLAG_PCIE))
+	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
 		return result;
 
 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5488a2e8..f459fb2 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6741,6 +6741,7 @@
 #define BNX2_FLAG_JUMBO_BROKEN		0x00000800
 #define BNX2_FLAG_CAN_KEEP_VLAN		0x00001000
 #define BNX2_FLAG_BROKEN_STATS		0x00002000
+#define BNX2_FLAG_AER_ENABLED		0x00004000
 
 	struct bnx2_napi	bnx2_napi[BNX2_MAX_MSIX_VEC];
 
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 77d6c8d..8849699 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.62.00-3"
-#define DRV_MODULE_RELDATE      "2010/12/21"
+#define DRV_MODULE_VERSION      "1.62.00-6"
+#define DRV_MODULE_RELDATE      "2011/01/30"
 #define BNX2X_BC_VER            0x040200
 
 #define BNX2X_MULTI_QUEUE
@@ -636,6 +636,7 @@
 
 #define CHIP_METAL(bp)			(bp->common.chip_id & 0x00000ff0)
 #define CHIP_BOND_ID(bp)		(bp->common.chip_id & 0x0000000f)
+#define CHIP_PARITY_ENABLED(bp)	(CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
 
 	int			flash_size;
 #define NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
@@ -1210,6 +1211,7 @@
 	/* DCBX Negotation results */
 	struct dcbx_features			dcbx_local_feat;
 	u32					dcbx_error;
+	u32					pending_max;
 };
 
 /**
@@ -1414,12 +1416,12 @@
 		else
 
 /* skip rx queue
- * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
  */
 #define skip_rx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
 
 /* skip tx queue
- * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
  */
 #define skip_tx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
 
@@ -1612,19 +1614,23 @@
 #define BNX2X_BTR			4
 #define MAX_SPQ_PENDING			8
 
-
-/* CMNG constants
-   derived from lab experiments, and not from system spec calculations !!! */
-#define DEF_MIN_RATE			100
-/* resolution of the rate shaping timer - 100 usec */
-#define RS_PERIODIC_TIMEOUT_USEC	100
-/* resolution of fairness algorithm in usecs -
-   coefficient for calculating the actual t fair */
-#define T_FAIR_COEF			10000000
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE					100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC			400
 /* number of bytes in single QM arbitration cycle -
-   coefficient for calculating the fairness timer */
-#define QM_ARB_BYTES			40000
-#define FAIR_MEM			2
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES					160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES						100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH				32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF	((MIN_ABOVE_THRESH +  QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM					2
 
 
 #define ATTN_NIG_FOR_FUNC		(1L << 8)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d..a71b329 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -259,10 +259,44 @@
 #endif
 }
 
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ *		nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN	12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ *		     aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+				    u16 len_on_bd)
+{
+	/* TPA arrgregation won't have an IP options and TCP options
+	 * other than timestamp.
+	 */
+	u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+	/* Check if there was a TCP timestamp, if there is it's will
+	 * always be 12 bytes length: nop nop kind length echo val.
+	 *
+	 * Otherwise FW would close the aggregation.
+	 */
+	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+		hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+	return len_on_bd - hdrs_len;
+}
+
 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 			       struct sk_buff *skb,
 			       struct eth_fast_path_rx_cqe *fp_cqe,
-			       u16 cqe_idx)
+			       u16 cqe_idx, u16 parsing_flags)
 {
 	struct sw_rx_page *rx_pg, old_rx_pg;
 	u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@
 
 	/* This is needed in order to enable forwarding support */
 	if (frag_size)
-		skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
-					       max(frag_size, (u32)len_on_bd));
+		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+							      len_on_bd);
 
 #ifdef BNX2X_STOP_ON_ERROR
 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@
 	if (likely(new_skb)) {
 		/* fix ip xsum and give it to the stack */
 		/* (no need to map the new skb) */
+		u16 parsing_flags =
+			le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
 
 		prefetch(skb);
 		prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@
 		}
 
 		if (!bnx2x_fill_frag_skb(bp, fp, skb,
-					 &cqe->fast_path_cqe, cqe_idx)) {
-			if ((le16_to_cpu(cqe->fast_path_cqe.
-			    pars_flags.flags) & PARSING_FLAGS_VLAN))
+					 &cqe->fast_path_cqe, cqe_idx,
+					 parsing_flags)) {
+			if (parsing_flags & PARSING_FLAGS_VLAN)
 				__vlan_hwaccel_put_tag(skb,
 						 le16_to_cpu(cqe->fast_path_cqe.
 							     vlan_tag));
@@ -703,19 +739,20 @@
 {
 	u16 line_speed = bp->link_vars.line_speed;
 	if (IS_MF(bp)) {
-		u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
-						FUNC_MF_CFG_MAX_BW_MASK) >>
-						FUNC_MF_CFG_MAX_BW_SHIFT;
-		/* Calculate the current MAX line speed limit for the DCC
-		 * capable devices
+		u16 maxCfg = bnx2x_extract_max_cfg(bp,
+						   bp->mf_config[BP_VN(bp)]);
+
+		/* Calculate the current MAX line speed limit for the MF
+		 * devices
 		 */
-		if (IS_MF_SD(bp)) {
+		if (IS_MF_SI(bp))
+			line_speed = (line_speed * maxCfg) / 100;
+		else { /* SD mode */
 			u16 vn_max_rate = maxCfg * 100;
 
 			if (vn_max_rate < line_speed)
 				line_speed = vn_max_rate;
-		} else /* IS_MF_SI(bp)) */
-			line_speed = (line_speed * maxCfg) / 100;
+		}
 	}
 
 	return line_speed;
@@ -959,6 +996,23 @@
 	bnx2x_free_rx_skbs(bp);
 }
 
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+	/* load old values */
+	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+		/* leave all but MAX value */
+		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+		/* set new MAX value */
+		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+				& FUNC_MF_CFG_MAX_BW_MASK;
+
+		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+	}
+}
+
 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
 {
 	int i, offset = 1;
@@ -1427,6 +1481,11 @@
 
 	bnx2x_set_eth_mac(bp, 1);
 
+	if (bp->pending_max) {
+		bnx2x_update_max_mf_config(bp, bp->pending_max);
+		bp->pending_max = 0;
+	}
+
 	if (bp->port.pmf)
 		bnx2x_initial_phy_init(bp, load_mode);
 
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d6..85ea7f2 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -341,6 +341,15 @@
  */
 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
 
+/**
+ * Updates MAX part of MF configuration in HW
+ * (if required)
+ *
+ * @param bp
+ * @param value
+ */
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+
 /* dev_close main block */
 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
 
@@ -1044,4 +1053,24 @@
 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
 void bnx2x_release_phy_lock(struct bnx2x *bp);
 
+/**
+ * Extracts MAX BW part from MF configuration.
+ *
+ * @param bp
+ * @param mf_cfg
+ *
+ * @return u16
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+	u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+			      FUNC_MF_CFG_MAX_BW_SHIFT;
+	if (!max_cfg) {
+		BNX2X_ERR("Illegal configuration detected for Max BW - "
+			  "using 100 instead\n");
+		max_cfg = 100;
+	}
+	return max_cfg;
+}
+
 #endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index dc18c25..fb3ff7c 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
 /* bnx2x_dump.h: Broadcom Everest network driver.
  *
- * Copyright (c) 2009 Broadcom Corporation
+ * Copyright (c) 2011 Broadcom Corporation
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
  */
 
 
@@ -17,53 +23,53 @@
 #define BNX2X_DUMP_H
 
 
+
+/*definitions */
+#define XSTORM_WAITP_ADDR    0x2b8a80
+#define TSTORM_WAITP_ADDR    0x1b8a80
+#define USTORM_WAITP_ADDR    0x338a80
+#define CSTORM_WAITP_ADDR    0x238a80
+#define TSTORM_CAM_MODE         0x1B1440
+
+#define MAX_TIMER_PENDING      200
+#define TIMER_SCAN_DONT_CARE   0xFF
+#define RI_E1			0x1
+#define RI_E1H			0x2
+#define RI_E2			0x4
+#define RI_ONLINE		0x100
+#define RI_PATH0_DUMP		0x200
+#define RI_PATH1_DUMP		0x400
+#define RI_E1_OFFLINE		(RI_E1)
+#define RI_E1_ONLINE		(RI_E1 | RI_ONLINE)
+#define RI_E1H_OFFLINE		(RI_E1H)
+#define RI_E1H_ONLINE		(RI_E1H | RI_ONLINE)
+#define RI_E2_OFFLINE		(RI_E2)
+#define RI_E2_ONLINE		(RI_E2 | RI_ONLINE)
+#define RI_E1E1H_OFFLINE	(RI_E1 | RI_E1H)
+#define RI_E1E1H_ONLINE		(RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E1HE2_OFFLINE	(RI_E2 | RI_E1H)
+#define RI_E1HE2_ONLINE		(RI_E2 | RI_E1H | RI_ONLINE)
+#define RI_E1E2_OFFLINE		(RI_E2 | RI_E1)
+#define RI_E1E2_ONLINE		(RI_E2 | RI_E1 | RI_ONLINE)
+#define RI_ALL_OFFLINE         (RI_E1 | RI_E1H | RI_E2)
+#define RI_ALL_ONLINE          (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+
 struct dump_sign {
 	u32 time_stamp;
 	u32 diag_ver;
 	u32 grc_dump_ver;
 };
 
-#define TSTORM_WAITP_ADDR		0x1b8a80
-#define CSTORM_WAITP_ADDR		0x238a80
-#define XSTORM_WAITP_ADDR		0x2b8a80
-#define USTORM_WAITP_ADDR		0x338a80
-#define TSTORM_CAM_MODE			0x1b1440
-
-#define RI_E1				0x1
-#define RI_E1H				0x2
-#define RI_E2			0x4
-#define RI_ONLINE			0x100
-#define RI_PATH0_DUMP		0x200
-#define RI_PATH1_DUMP		0x400
-#define RI_E1_OFFLINE			(RI_E1)
-#define RI_E1_ONLINE			(RI_E1 | RI_ONLINE)
-#define RI_E1H_OFFLINE			(RI_E1H)
-#define RI_E1H_ONLINE			(RI_E1H | RI_ONLINE)
-#define RI_E2_OFFLINE			(RI_E2)
-#define RI_E2_ONLINE			(RI_E2 | RI_ONLINE)
-#define RI_E1E1H_OFFLINE		(RI_E1 | RI_E1H)
-#define RI_E1E1H_ONLINE			(RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E1HE2_OFFLINE		(RI_E2 | RI_E1H)
-#define RI_E1HE2_ONLINE			(RI_E2 | RI_E1H | RI_ONLINE)
-#define RI_E1E2_OFFLINE			(RI_E2 | RI_E1)
-#define RI_E1E2_ONLINE			(RI_E2 | RI_E1 | RI_ONLINE)
-#define RI_ALL_OFFLINE			(RI_E1 | RI_E1H | RI_E2)
-#define RI_ALL_ONLINE			(RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-
-#define MAX_TIMER_PENDING		200
-#define TIMER_SCAN_DONT_CARE		0xFF
-
-
 struct dump_hdr {
-	u32		 hdr_size;	/* in dwords, excluding this field */
-	struct dump_sign dump_sign;
-	u32		 xstorm_waitp;
-	u32		 tstorm_waitp;
-	u32		 ustorm_waitp;
-	u32		 cstorm_waitp;
-	u16		 info;
-	u8		 idle_chk;
-	u8		 reserved;
+	u32  hdr_size;	/* in dwords, excluding this field */
+	struct dump_sign	dump_sign;
+	u32  xstorm_waitp;
+	u32  tstorm_waitp;
+	u32  ustorm_waitp;
+	u32  cstorm_waitp;
+	u16  info;
+	u8   idle_chk;
+	u8   reserved;
 };
 
 struct reg_addr {
@@ -80,202 +86,185 @@
 	u16 info;
 };
 
-
-#define REGS_COUNT			558
+#define REGS_COUNT			834
 static const struct reg_addr reg_addrs[REGS_COUNT] = {
 	{ 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
 	{ 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
-	{ 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE },
-	{ 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE },
-	{ 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE },
-	{ 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE },
-	{ 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE },
-	{ 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE },
-	{ 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE },
-	{ 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE },
-	{ 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE },
-	{ 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE },
-	{ 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE },
-	{ 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE },
-	{ 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE },
-	{ 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE },
-	{ 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE },
-	{ 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE },
-	{ 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
-	{ 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE },
-	{ 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE },
-	{ 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE },
-	{ 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE },
-	{ 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE },
-	{ 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE },
-	{ 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE },
-	{ 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE },
-	{ 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE },
-	{ 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE },
-	{ 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE },
-	{ 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE },
-	{ 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE },
-	{ 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE },
-	{ 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE },
-	{ 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE },
-	{ 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE },
-	{ 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE },
-	{ 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE },
-	{ 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE },
-	{ 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE },
-	{ 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE },
-	{ 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE },
-	{ 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE },
-	{ 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE },
-	{ 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE },
-	{ 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE },
-	{ 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
+	{ 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
+	{ 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
+	{ 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
+	{ 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
+	{ 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
+	{ 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
+	{ 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
+	{ 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
+	{ 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
+	{ 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
+	{ 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
+	{ 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
+	{ 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
+	{ 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
+	{ 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
+	{ 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
+	{ 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
+	{ 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
+	{ 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
+	{ 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
+	{ 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
+	{ 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
+	{ 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
+	{ 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
+	{ 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
+	{ 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
+	{ 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
+	{ 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
+	{ 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
+	{ 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
+	{ 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
+	{ 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
+	{ 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
+	{ 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
+	{ 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
+	{ 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
+	{ 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
+	{ 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
+	{ 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
+	{ 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
+	{ 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
+	{ 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
+	{ 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
+	{ 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
+	{ 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
+	{ 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
+	{ 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
+	{ 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
+	{ 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
+	{ 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
+	{ 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
+	{ 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
+	{ 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
+	{ 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
+	{ 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
+	{ 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
+	{ 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
+	{ 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
+	{ 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
+	{ 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
 	{ 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
 	{ 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
-	{ 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE },
-	{ 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE },
-	{ 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE },
-	{ 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE },
-	{ 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE },
-	{ 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE },
-	{ 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE },
-	{ 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE },
-	{ 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE },
-	{ 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE },
+	{ 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
+	{ 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
+	{ 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
+	{ 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
+	{ 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
+	{ 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
+	{ 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
+	{ 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
+	{ 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
+	{ 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
+	{ 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
+	{ 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
+	{ 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
 	{ 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
 	{ 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
-	{ 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE },
-	{ 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE },
-	{ 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+	{ 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
+	{ 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
+	{ 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
+	{ 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
+	{ 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
+	{ 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
+	{ 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+	{ 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
 	{ 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
-	{ 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE },
-	{ 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE },
-	{ 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE },
-	{ 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE },
-	{ 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE },
-	{ 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE },
-	{ 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
-	{ 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
-	{ 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
-	{ 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
+	{ 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
+	{ 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
+	{ 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
+	{ 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
+	{ 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
+	{ 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
 	{ 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
 	{ 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
-	{ 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE },
-	{ 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE },
-	{ 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE },
-	{ 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE },
+	{ 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
+	{ 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
+	{ 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
+	{ 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
 	{ 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
 	{ 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
-	{ 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE },
-	{ 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE },
-	{ 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE },
-	{ 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE },
-	{ 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE },
-	{ 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE },
-	{ 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE },
-	{ 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE },
-	{ 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
-	{ 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE },
+	{ 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
+	{ 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
+	{ 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
+	{ 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
+	{ 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
+	{ 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
+	{ 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
+	{ 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
+	{ 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
+	{ 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
 	{ 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
 	{ 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
 	{ 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
 	{ 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
-	{ 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE },
-	{ 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE },
-	{ 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE },
-	{ 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
+	{ 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
+	{ 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
+	{ 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
 	{ 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
-	{ 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE },
-	{ 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE },
-	{ 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE },
+	{ 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
+	{ 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
+	{ 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
+	{ 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
 	{ 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
-	{ 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE },
-	{ 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE },
-	{ 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE },
-	{ 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE },
-	{ 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE },
-	{ 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE },
-	{ 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE },
-	{ 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE },
-	{ 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE },
-	{ 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE },
-	{ 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE },
-	{ 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE },
-	{ 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE },
-	{ 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
-	{ 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE },
-	{ 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
-	{ 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE },
-	{ 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
-	{ 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE },
-	{ 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE },
-	{ 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE },
-	{ 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE },
-	{ 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE },
-	{ 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE },
-	{ 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE },
-	{ 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE },
-	{ 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE },
-	{ 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE },
-	{ 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE },
-	{ 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
-	{ 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE },
-	{ 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
-	{ 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE },
-	{ 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
-	{ 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE },
-	{ 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE },
-	{ 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE },
-	{ 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE },
-	{ 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE },
-	{ 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE },
-	{ 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE },
-	{ 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE },
-	{ 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE },
-	{ 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE },
-	{ 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE },
-	{ 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
-	{ 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE },
-	{ 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
-	{ 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE },
-	{ 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
-	{ 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE },
-	{ 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE },
-	{ 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
-	{ 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
-	{ 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
-	{ 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
-	{ 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
-	{ 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
-	{ 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
-	{ 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
-	{ 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
-	{ 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
-	{ 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
-	{ 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
-	{ 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
-	{ 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
-	{ 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
-	{ 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
-	{ 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
-	{ 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
-	{ 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
-	{ 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
-	{ 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
-	{ 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
-	{ 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
-	{ 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
-	{ 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
-	{ 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
-	{ 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
-	{ 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
-	{ 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
-	{ 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
-	{ 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
-	{ 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
-	{ 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
-	{ 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
-	{ 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
+	{ 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
+	{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
+	{ 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
+	{ 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
+	{ 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
+	{ 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
+	{ 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
+	{ 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
+	{ 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
+	{ 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
+	{ 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
+	{ 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
+	{ 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
+	{ 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
+	{ 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
+	{ 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
+	{ 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
+	{ 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
+	{ 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
+	{ 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
+	{ 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
+	{ 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
+	{ 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
+	{ 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
+	{ 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
+	{ 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
+	{ 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
+	{ 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
+	{ 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
+	{ 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
+	{ 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
+	{ 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
+	{ 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
+	{ 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
+	{ 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
+	{ 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
+	{ 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
+	{ 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
+	{ 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
+	{ 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
+	{ 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
+	{ 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
+	{ 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
+	{ 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
+	{ 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
+	{ 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
+	{ 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
+	{ 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
+	{ 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
+	{ 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
+	{ 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
+	{ 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
 	{ 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
 	{ 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
 	{ 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -284,169 +273,298 @@
 	{ 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
 	{ 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
 	{ 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
-	{ 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE },
-	{ 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
+	{ 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
 	{ 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
-	{ 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE },
-	{ 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE },
-	{ 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE },
-	{ 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE },
-	{ 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE },
-	{ 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE },
-	{ 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
-	{ 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE },
-	{ 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE },
-	{ 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE },
-	{ 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE },
-	{ 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE },
-	{ 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE },
-	{ 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE },
-	{ 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE },
-	{ 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE },
+	{ 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
+	{ 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
+	{ 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
+	{ 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
+	{ 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
+	{ 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
+	{ 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
+	{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
+	{ 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
+	{ 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
+	{ 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
+	{ 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
+	{ 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
+	{ 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
+	{ 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
+	{ 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
+	{ 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
+	{ 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
+	{ 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
+	{ 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
+	{ 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
+	{ 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
+	{ 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
+	{ 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
+	{ 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
+	{ 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
+	{ 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
 	{ 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
-	{ 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE },
-	{ 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE },
-	{ 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE },
-	{ 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE },
-	{ 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE },
-	{ 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE },
-	{ 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
+	{ 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
+	{ 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
+	{ 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
+	{ 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
+	{ 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
+	{ 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
+	{ 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
+	{ 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
+	{ 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
+	{ 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
+	{ 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
+	{ 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
+	{ 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
+	{ 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
+	{ 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
+	{ 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
+	{ 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
 	{ 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
-	{ 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
-	{ 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE },
-	{ 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE },
-	{ 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE },
-	{ 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE },
-	{ 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE },
-	{ 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE },
-	{ 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE },
-	{ 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE },
-	{ 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE },
-	{ 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE },
-	{ 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE },
-	{ 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
-	{ 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE },
-	{ 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE },
-	{ 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE },
-	{ 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE },
-	{ 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE },
-	{ 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE },
-	{ 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE },
-	{ 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE },
-	{ 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE },
-	{ 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE },
-	{ 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE },
-	{ 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE },
-	{ 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE },
-	{ 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE },
-	{ 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE },
-	{ 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
-	{ 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE },
-	{ 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE },
-	{ 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE },
-	{ 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE },
-	{ 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
-	{ 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE },
-	{ 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE },
-	{ 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE },
+	{ 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
+	{ 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
+	{ 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
+	{ 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
+	{ 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
+	{ 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
+	{ 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
+	{ 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
+	{ 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
+	{ 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
+	{ 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
+	{ 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
+	{ 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
+	{ 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
+	{ 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
+	{ 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
+	{ 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
+	{ 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
+	{ 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
+	{ 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
+	{ 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
+	{ 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
+	{ 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
+	{ 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
+	{ 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
+	{ 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
+	{ 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
+	{ 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
+	{ 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
+	{ 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
+	{ 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
+	{ 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
+	{ 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
+	{ 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
+	{ 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
+	{ 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
+	{ 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
+	{ 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
+	{ 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
+	{ 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
+	{ 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
+	{ 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
+	{ 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
+	{ 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
+	{ 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
+	{ 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
+	{ 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
+	{ 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+	{ 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+	{ 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
+	{ 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
+	{ 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
+	{ 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
+	{ 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
+	{ 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
+	{ 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
+	{ 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
+	{ 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
+	{ 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
+	{ 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
+	{ 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
+	{ 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
+	{ 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
+	{ 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
+	{ 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
+	{ 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
+	{ 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
+	{ 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
+	{ 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
+	{ 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
+	{ 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
+	{ 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
+	{ 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
+	{ 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
+	{ 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
+	{ 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
+	{ 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
+	{ 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
+	{ 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
+	{ 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
+	{ 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
+	{ 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
+	{ 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
+	{ 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
+	{ 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
+	{ 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
+	{ 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
+	{ 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
+	{ 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
+	{ 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
+	{ 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
+	{ 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
+	{ 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
+	{ 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
+	{ 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
+	{ 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
+	{ 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
+	{ 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
+	{ 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+	{ 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
+	{ 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
+	{ 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
+	{ 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
+	{ 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
+	{ 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
+	{ 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
+	{ 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
+	{ 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
+	{ 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
+	{ 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
+	{ 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
+	{ 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
+	{ 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
+	{ 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
+	{ 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
+	{ 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
+	{ 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
+	{ 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
+	{ 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
+	{ 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
+	{ 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
+	{ 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
+	{ 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
 	{ 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
-	{ 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE },
+	{ 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
+	{ 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
+	{ 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
+	{ 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
 	{ 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
-	{ 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE },
-	{ 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE },
-	{ 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE },
-	{ 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE },
-	{ 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE },
-	{ 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE },
-	{ 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE },
+	{ 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
+	{ 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
+	{ 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
+	{ 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
+	{ 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
+	{ 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
+	{ 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
+	{ 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
+	{ 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
+	{ 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
+	{ 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
+	{ 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
+	{ 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+	{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
+	{ 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
+	{ 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
+	{ 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
 	{ 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
-	{ 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
-	{ 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }
+	{ 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
+	{ 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
+	{ 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
+	{ 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
+	{ 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
+	{ 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
+	{ 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
+	{ 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
+	{ 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
+	{ 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
+	{ 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
+	{ 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
+	{ 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
+	{ 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
+	{ 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
+	{ 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
+	{ 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
+	{ 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
+	{ 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
+	{ 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
+	{ 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
+	{ 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
+	{ 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
+	{ 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
+	{ 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
+	{ 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
 };
 
-
-#define IDLE_REGS_COUNT			277
+#define IDLE_REGS_COUNT			237
 static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
-	{ 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE },
-	{ 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
-	{ 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE },
+	{ 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
+	{ 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
+	{ 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
+	{ 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
+	{ 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
+	{ 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
+	{ 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
+	{ 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
 	{ 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
-	{ 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE },
-	{ 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE },
-	{ 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE },
-	{ 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE },
-	{ 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE },
-	{ 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE },
-	{ 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE },
-	{ 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE },
-	{ 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE },
-	{ 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE },
-	{ 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE },
-	{ 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE },
-	{ 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE },
-	{ 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE },
-	{ 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE },
-	{ 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE },
-	{ 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE },
-	{ 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE },
-	{ 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE },
-	{ 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE },
-	{ 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE },
-	{ 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE },
-	{ 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE },
-	{ 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE },
-	{ 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE },
-	{ 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE },
-	{ 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE },
-	{ 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE },
-	{ 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE },
-	{ 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE },
-	{ 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE },
-	{ 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE },
-	{ 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE },
+	{ 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
+	{ 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
+	{ 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
+	{ 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
+	{ 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
+	{ 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
+	{ 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
+	{ 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
+	{ 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
+	{ 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
+	{ 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
+	{ 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
+	{ 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
+	{ 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
+	{ 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
+	{ 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
+	{ 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
+	{ 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
+	{ 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
+	{ 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
+	{ 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
+	{ 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
+	{ 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
+	{ 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
+	{ 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
+	{ 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
+	{ 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
+	{ 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
+	{ 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
+	{ 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
+	{ 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
+	{ 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
+	{ 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
+	{ 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
+	{ 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
+	{ 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
+	{ 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
 	{ 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
 	{ 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
 	{ 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
 	{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
-	{ 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE },
-	{ 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE },
-	{ 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE },
-	{ 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE },
-	{ 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE },
-	{ 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
-	{ 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
-	{ 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
-	{ 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
-	{ 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
-	{ 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
-	{ 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
-	{ 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
-	{ 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
-	{ 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
-	{ 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
-	{ 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
-	{ 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
-	{ 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
-	{ 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
-	{ 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
-	{ 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
-	{ 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
-	{ 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
-	{ 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
-	{ 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
-	{ 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
-	{ 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
-	{ 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
-	{ 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
-	{ 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
-	{ 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
-	{ 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
-	{ 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
-	{ 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
-	{ 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
-	{ 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
-	{ 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
-	{ 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
+	{ 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
+	{ 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
+	{ 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
+	{ 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
+	{ 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
+	{ 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
+	{ 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
+	{ 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
 	{ 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
 	{ 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
 	{ 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -462,48 +580,50 @@
 	{ 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
 	{ 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
 	{ 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
-	{ 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE },
-	{ 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE },
-	{ 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
-	{ 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE },
-	{ 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE },
-	{ 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE },
-	{ 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE },
-	{ 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE },
-	{ 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE },
-	{ 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE },
-	{ 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE },
-	{ 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE },
-	{ 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE },
-	{ 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
+	{ 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
+	{ 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
+	{ 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
+	{ 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
+	{ 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
+	{ 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
+	{ 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
+	{ 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
+	{ 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
+	{ 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
+	{ 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
+	{ 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
+	{ 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
+	{ 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
+	{ 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
+	{ 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
+	{ 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
+	{ 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
+	{ 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
 	{ 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
 	{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
-	{ 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE },
-	{ 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE },
-	{ 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE },
-	{ 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE },
-	{ 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE },
-	{ 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE },
-	{ 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE },
-	{ 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
-	{ 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
-	{ 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE },
-	{ 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE },
-	{ 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE },
-	{ 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE },
-	{ 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE },
-	{ 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE },
-	{ 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE },
-	{ 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE },
-	{ 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE },
-	{ 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE },
-	{ 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE },
-	{ 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
-	{ 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
+	{ 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
+	{ 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
+	{ 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
+	{ 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
+	{ 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
+	{ 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
+	{ 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
+	{ 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
+	{ 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
+	{ 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
+	{ 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
+	{ 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
+	{ 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
+	{ 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+	{ 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+	{ 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
+	{ 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
+	{ 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
+	{ 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+	{ 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
 	{ 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
 	{ 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
-	{ 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE },
-	{ 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+	{ 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
 	{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
 	{ 0x3380c0, 1, RI_ALL_ONLINE }
 };
@@ -515,7 +635,6 @@
 	{ 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
 };
 
-
 #define WREGS_COUNT_E1H			1
 static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
 
@@ -530,22 +649,53 @@
 	{ 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
 };
 
-static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
-
+static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
 
 #define TIMER_REGS_COUNT_E1		2
-static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
-	{ 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
-	{ 0x1640d0, 0x1640d4 };
 
+static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
+	0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
+	0x1640d0, 0x1640d4 };
 
 #define TIMER_REGS_COUNT_E1H		2
-static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
-	{ 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
-	{ 0x1640d0, 0x1640d4 };
 
+static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+	0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+	0x1640d0, 0x1640d4 };
+
+#define TIMER_REGS_COUNT_E2		2
+
+static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
+	0x164014, 0x164018 };
+static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
+	0x1640d0, 0x1640d4 };
+
+#define PAGE_MODE_VALUES_E1 0
+
+#define PAGE_READ_REGS_E1 0
+
+#define PAGE_WRITE_REGS_E1 0
+
+static const u32 page_vals_e1[] = { 0 };
+
+static const u32 page_write_regs_e1[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
+
+#define PAGE_MODE_VALUES_E1H 0
+
+#define PAGE_READ_REGS_E1H 0
+
+#define PAGE_WRITE_REGS_E1H 0
+
+static const u32 page_vals_e1h[] = { 0 };
+
+static const u32 page_write_regs_e1h[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1h[] = {
+	{ 0x0, 0, RI_E1H_ONLINE } };
 
 #define PAGE_MODE_VALUES_E2 2
 
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 99c672d..7e92f9d 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -24,6 +24,7 @@
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
 #include "bnx2x_dump.h"
+#include "bnx2x_init.h"
 
 /* Note: in the format strings below %s is replaced by the queue-name which is
  * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -237,7 +238,7 @@
 	speed |= (cmd->speed_hi << 16);
 
 	if (IS_MF_SI(bp)) {
-		u32 param = 0;
+		u32 part;
 		u32 line_speed = bp->link_vars.line_speed;
 
 		/* use 10G if no link detected */
@@ -250,23 +251,22 @@
 				       REQ_BC_VER_4_SET_MF_BW);
 			return -EINVAL;
 		}
-		if (line_speed < speed) {
-			BNX2X_DEV_INFO("New speed should be less or equal "
-				       "to actual line speed\n");
+
+		part = (speed * 100) / line_speed;
+
+		if (line_speed < speed || !part) {
+			BNX2X_DEV_INFO("Speed setting should be in a range "
+				       "from 1%% to 100%% "
+				       "of actual line speed\n");
 			return -EINVAL;
 		}
-		/* load old values */
-		param = bp->mf_config[BP_VN(bp)];
 
-		/* leave only MIN value */
-		param &= FUNC_MF_CFG_MIN_BW_MASK;
+		if (bp->state != BNX2X_STATE_OPEN)
+			/* store value for following "load" */
+			bp->pending_max = part;
+		else
+			bnx2x_update_max_mf_config(bp, part);
 
-		/* set new MAX value */
-		param |= (((speed * 100) / line_speed)
-				 << FUNC_MF_CFG_MAX_BW_SHIFT)
-				  & FUNC_MF_CFG_MAX_BW_MASK;
-
-		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
 		return 0;
 	}
 
@@ -472,7 +472,7 @@
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	int regdump_len = 0;
-	int i;
+	int i, j, k;
 
 	if (CHIP_IS_E1(bp)) {
 		for (i = 0; i < REGS_COUNT; i++)
@@ -502,6 +502,15 @@
 			if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
 				regdump_len += wreg_addrs_e2[i].size *
 					(1 + wreg_addrs_e2[i].read_regs_count);
+
+		for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
+			for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
+				for (k = 0; k < PAGE_READ_REGS_E2; k++)
+					if (IS_E2_ONLINE(page_read_regs_e2[k].
+							 info))
+						regdump_len +=
+						page_read_regs_e2[k].size;
+			}
 	}
 	regdump_len *= 4;
 	regdump_len += sizeof(struct dump_hdr);
@@ -539,6 +548,12 @@
 	if (!netif_running(bp->dev))
 		return;
 
+	/* Disable parity attentions as long as following dump may
+	 * cause false alarms by reading never written registers. We
+	 * will re-enable parity attentions right after the dump.
+	 */
+	bnx2x_disable_blocks_parity(bp);
+
 	dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
 	dump_hdr.dump_sign = dump_sign_all;
 	dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@@ -580,6 +595,10 @@
 
 		bnx2x_read_pages_regs_e2(bp, p);
 	}
+	/* Re-enable parity attentions */
+	bnx2x_clear_blocks_parity(bp);
+	if (CHIP_PARITY_ENABLED(bp))
+		bnx2x_enable_blocks_parity(bp);
 }
 
 #define PHY_FW_VER_LEN			20
@@ -1761,9 +1780,7 @@
 		{ 0x100, 0x350 }, /* manuf_info */
 		{ 0x450,  0xf0 }, /* feature_info */
 		{ 0x640,  0x64 }, /* upgrade_key_info */
-		{ 0x6a4,  0x64 },
 		{ 0x708,  0x70 }, /* manuf_key_info */
-		{ 0x778,  0x70 },
 		{     0,     0 }
 	};
 	__be32 buf[0x350 / 4];
@@ -1913,11 +1930,11 @@
 		buf[4] = 1;
 		etest->flags |= ETH_TEST_FL_FAILED;
 	}
-	if (bp->port.pmf)
-		if (bnx2x_link_test(bp, is_serdes) != 0) {
-			buf[5] = 1;
-			etest->flags |= ETH_TEST_FL_FAILED;
-		}
+
+	if (bnx2x_link_test(bp, is_serdes) != 0) {
+		buf[5] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
 
 #ifdef BNX2X_EXTRA_DEBUG
 	bnx2x_panic_dump(bp);
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 6238d4f..548f563 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -352,6 +352,10 @@
 #define PORT_HW_CFG_LANE_SWAP_CFG_31203120	    0x0000d8d8
 	/* forced only */
 #define PORT_HW_CFG_LANE_SWAP_CFG_32103210	    0x0000e4e4
+    /*	Indicate whether to swap the external phy polarity */
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK	       0x00010000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED	    0x00000000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED	    0x00010000
 
 	u32 external_phy_config;
 #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK	    0xff000000
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index a9d5487..fa6dbe3 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -192,5 +192,225 @@
 	u64 next;
 };
 
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
+{ \
+	block##_REG_##block##_PRTY_MASK, \
+	block##_REG_##block##_PRTY_STS_CLR, \
+	en_mask, {m1, m1h, m2}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
+{ \
+	block##_REG_##block##_PRTY_MASK_0, \
+	block##_REG_##block##_PRTY_STS_CLR_0, \
+	en_mask, {m1, m1h, m2}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
+{ \
+	block##_REG_##block##_PRTY_MASK_1, \
+	block##_REG_##block##_PRTY_STS_CLR_1, \
+	en_mask, {m1, m1h, m2}, #block"_1" \
+}
+
+static const struct {
+	u32 mask_addr;
+	u32 sts_clr_addr;
+	u32 en_mask;		/* Mask to enable parity attentions */
+	struct {
+		u32 e1;		/* 57710 */
+		u32 e1h;	/* 57711 */
+		u32 e2;		/* 57712 */
+	} reg_mask;		/* Register mask (all valid bits) */
+	char name[7];		/* Block's longest name is 6 characters long
+				 * (name + suffix)
+				 */
+} bnx2x_blocks_parity_data[] = {
+	/* bit 19 masked */
+	/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+	/* bit 5,18,20-31 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+	/* bit 5 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
+	/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+	/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+	/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+	 * want to handle "system kill" flow at the moment.
+	 */
+	BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(PXP2,	0x7ff, 0x7f, 0x7f, 0x7ff),
+	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
+	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
+	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
+	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
+		{0xf, 0xf, 0xf}, "UPB"},
+	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+		{0xf, 0xf, 0xf}, "XPB"},
+	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
+	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
+	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
+	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
+	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const u32 mcp_attn_ctl_regs[] = {
+	MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+	MISC_REG_AEU_ENABLE4_NIG_0,
+	MISC_REG_AEU_ENABLE4_PXP_0,
+	MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+	MISC_REG_AEU_ENABLE4_NIG_1,
+	MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+	int i;
+	u32 reg_val;
+
+	for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+		reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+
+		if (enable)
+			reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+		else
+			reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+		REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+	}
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+	if (CHIP_IS_E1(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+	else if (CHIP_IS_E1H(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+	else
+		return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (dis_mask) {
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+			       dis_mask);
+			DP(NETIF_MSG_HW, "Setting parity mask "
+						 "for %s to\t\t0x%x\n",
+				    bnx2x_blocks_parity_data[i].name, dis_mask);
+		}
+	}
+
+	/* Disable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, false);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+	u32 reg_val, mcp_aeu_bits =
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+	/* Clear SEM_FAST parities */
+	REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask) {
+			reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+					 sts_clr_addr);
+			if (reg_val & reg_mask)
+				DP(NETIF_MSG_HW,
+					    "Parity errors in %s: 0x%x\n",
+					    bnx2x_blocks_parity_data[i].name,
+					    reg_val & reg_mask);
+		}
+	}
+
+	/* Check if there were parity attentions in MCP */
+	reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+	if (reg_val & mcp_aeu_bits)
+		DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+		   reg_val & mcp_aeu_bits);
+
+	/* Clear parity attentions in MCP:
+	 * [7]  clears Latched rom_parity
+	 * [8]  clears Latched ump_rx_parity
+	 * [9]  clears Latched ump_tx_parity
+	 * [10] clears Latched scpad_parity (both ports)
+	 */
+	REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask)
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+				bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+	}
+
+	/* Enable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, true);
+}
+
+
 #endif /* BNX2X_INIT_H */
 
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 43b0de2..dd1210f 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1573,7 +1573,7 @@
 
 	offset = phy->addr + ser_lane;
 	if (CHIP_IS_E2(bp))
-		aer_val = 0x2800 + offset - 1;
+		aer_val = 0x3800 + offset - 1;
 	else
 		aer_val = 0x3800 + offset;
 	CL45_WR_OVER_CL22(bp, phy,
@@ -3166,7 +3166,23 @@
 		if (!vars->link_up)
 			break;
 	case LED_MODE_ON:
-		if (SINGLE_MEDIA_DIRECT(params)) {
+		if (params->phy[EXT_PHY1].type ==
+		    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
+		    CHIP_IS_E2(bp) && params->num_phys == 2) {
+			/**
+			* This is a work-around for E2+8727 Configurations
+			*/
+			if (mode == LED_MODE_ON ||
+				speed == SPEED_10000){
+				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+				REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+				tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+				EMAC_WR(bp, EMAC_REG_EMAC_LED,
+					(tmp | EMAC_LED_OVERRIDE));
+				return rc;
+			}
+		} else if (SINGLE_MEDIA_DIRECT(params)) {
 			/**
 			* This is a work-around for HW issue found when link
 			* is up in CL73
@@ -3854,11 +3870,14 @@
 			   pause_result);
 	}
 }
-
-static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
 					      struct bnx2x_phy *phy,
 					      u8 port)
 {
+	u32 count = 0;
+	u16 fw_ver1, fw_msgout;
+	u8 rc = 0;
+
 	/* Boot port from external ROM  */
 	/* EDC grst */
 	bnx2x_cl45_write(bp, phy,
@@ -3888,56 +3907,45 @@
 		       MDIO_PMA_REG_GEN_CTRL,
 		       MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
 
-	/* wait for 120ms for code download via SPI port */
-	msleep(120);
+	/* Delay 100ms per the PHY specifications */
+	msleep(100);
+
+	/* 8073 sometimes taking longer to download */
+	do {
+		count++;
+		if (count > 300) {
+			DP(NETIF_MSG_LINK,
+				 "bnx2x_8073_8727_external_rom_boot port %x:"
+				 "Download failed. fw version = 0x%x\n",
+				 port, fw_ver1);
+			rc = -EINVAL;
+			break;
+		}
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
+
+		msleep(1);
+	} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+			((fw_msgout & 0xff) != 0x03 && (phy->type ==
+			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
 
 	/* Clear ser_boot_ctl bit */
 	bnx2x_cl45_write(bp, phy,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_MISC_CTRL1, 0x0000);
 	bnx2x_save_bcm_spirom_ver(bp, phy, port);
-}
 
-static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
-					       struct bnx2x_phy *phy)
-{
-	u16 val;
-	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
+	DP(NETIF_MSG_LINK,
+		 "bnx2x_8073_8727_external_rom_boot port %x:"
+		 "Download complete. fw version = 0x%x\n",
+		 port, fw_ver1);
 
-	if (val == 0) {
-		/* Mustn't set low power mode in 8073 A0 */
-		return;
-	}
-
-	/* Disable PLL sequencer (use read-modify-write to clear bit 13) */
-	bnx2x_cl45_read(bp, phy,
-			MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
-	val &= ~(1<<13);
-	bnx2x_cl45_write(bp, phy,
-		       MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
-
-	/* PLL controls */
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
-
-	/* Tx Controls */
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
-
-	/* Rx Controls */
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
-
-	/* Enable PLL sequencer  (use read-modify-write to set bit 13) */
-	bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
-	val |= (1<<13);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
+	return rc;
 }
 
 /******************************************************************/
@@ -4098,8 +4106,6 @@
 
 	bnx2x_8073_set_pause_cl37(params, phy, vars);
 
-	bnx2x_8073_set_xaui_low_power_mode(bp, phy);
-
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
 
@@ -4108,6 +4114,25 @@
 
 	DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
 
+	/**
+	 * If this is forced speed, set to KR or KX (all other are not
+	 * supported)
+	 */
+	/* Swap polarity if required - Must be done only in non-1G mode */
+	if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+		/* Configure the 8073 to swap _P and _N of the KR lines */
+		DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+		/* 10G Rx/Tx and 1G Tx signal polarity swap */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+				 (val | (3<<9)));
+	}
+
+
 	/* Enable CL37 BAM */
 	if (REG_RD(bp, params->shmem_base +
 			 offsetof(struct shmem_region, dev_info.
@@ -4314,8 +4339,32 @@
 	}
 
 	if (link_up) {
+		/* Swap polarity if required */
+		if (params->lane_config &
+		    PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+			/* Configure the 8073 to swap P and N of the KR lines */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_XS_DEVAD,
+					MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+			/**
+			* Set bit 3 to invert Rx in 1G mode and clear this bit
+			* when it`s in 10G mode.
+			*/
+			if (vars->line_speed == SPEED_1000) {
+				DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+					      "the 8073\n");
+				val1 |= (1<<3);
+			} else
+				val1 &= ~(1<<3);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_XS_DEVAD,
+					 MDIO_XS_REG_8073_RX_CTRL_PCIE,
+					 val1);
+		}
 		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
 		bnx2x_8073_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
 	}
 	return link_up;
 }
@@ -5062,6 +5111,7 @@
 		else
 			vars->line_speed = SPEED_10000;
 		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
 	}
 	return link_up;
 }
@@ -5758,8 +5808,11 @@
 		DP(NETIF_MSG_LINK, "port %x: External link is down\n",
 			   params->port);
 	}
-	if (link_up)
+	if (link_up) {
 		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
+		DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+	}
 
 	if ((DUAL_MEDIA(params)) &&
 	    (phy->req_line_speed == SPEED_1000)) {
@@ -5875,10 +5928,26 @@
 			 MDIO_PMA_REG_8481_LED2_MASK,
 			 0x18);
 
+	/* Select activity source by Tx and Rx, as suggested by PHY AE */
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
 			 MDIO_PMA_REG_8481_LED3_MASK,
-			 0x0040);
+			 0x0006);
+
+	/* Select the closest activity blink rate to that in 10/100/1000 */
+	bnx2x_cl45_write(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8481_LED3_BLINK,
+			0);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
+	val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
 
 	/* 'Interrupt Mask' */
 	bnx2x_cl45_write(bp, phy,
@@ -6126,6 +6195,7 @@
 	/* Check link 10G */
 	if (val2 & (1<<11)) {
 		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
 		link_up = 1;
 		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
 	} else { /* Check Legacy speed link */
@@ -6405,6 +6475,18 @@
 					 MDIO_PMA_DEVAD,
 					 MDIO_PMA_REG_8481_LED1_MASK,
 					 0x80);
+
+			/* Tell LED3 to blink on source */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LINK_SIGNAL,
+					&val);
+			val &= ~(7<<6);
+			val |= (1<<6); /* A83B[8:6]= 1 */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LINK_SIGNAL,
+					 val);
 		}
 		break;
 	}
@@ -6489,6 +6571,7 @@
 				MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
 				&val2);
 		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
 		DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
 			   val2, (val2 & (1<<14)));
 		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -7605,10 +7688,13 @@
 	struct bnx2x_phy phy[PORT_MAX];
 	struct bnx2x_phy *phy_blk[PORT_MAX];
 	u16 val;
-	s8 port;
+	s8 port = 0;
 	s8 port_of_path = 0;
-
-	bnx2x_ext_phy_hw_reset(bp, 0);
+	u32 swap_val, swap_override;
+	swap_val = REG_RD(bp,  NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp,  NIG_REG_STRAP_OVERRIDE);
+	port ^= (swap_val && swap_override);
+	bnx2x_ext_phy_hw_reset(bp, port);
 	/* PART1 - Reset both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 		u32 shmem_base, shmem2_base;
@@ -7663,7 +7749,6 @@
 
 	/* PART2 - Download firmware to both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-		u16 fw_ver1;
 		if (CHIP_IS_E2(bp))
 			port_of_path = 0;
 		else
@@ -7671,19 +7756,9 @@
 
 		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
 			   phy_blk[port]->addr);
-		bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
-						  port_of_path);
-
-		bnx2x_cl45_read(bp, phy_blk[port],
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_ROM_VER1, &fw_ver1);
-		if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
-			DP(NETIF_MSG_LINK,
-				 "bnx2x_8073_common_init_phy port %x:"
-				 "Download failed. fw version = 0x%x\n",
-				 port, fw_ver1);
+		if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+						      port_of_path))
 			return -EINVAL;
-		}
 
 		/* Only set bit 10 = 1 (Tx power down) */
 		bnx2x_cl45_read(bp, phy_blk[port],
@@ -7848,27 +7923,17 @@
 	}
 	/* PART2 - Download firmware to both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-		u16 fw_ver1;
 		 if (CHIP_IS_E2(bp))
 			port_of_path = 0;
 		else
 			port_of_path = port;
 		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
 			   phy_blk[port]->addr);
-		bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
-						  port_of_path);
-		bnx2x_cl45_read(bp, phy_blk[port],
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_ROM_VER1, &fw_ver1);
-		if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
-			DP(NETIF_MSG_LINK,
-				 "bnx2x_8727_common_init_phy port %x:"
-				 "Download failed. fw version = 0x%x\n",
-				 port, fw_ver1);
+		if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+						      port_of_path))
 			return -EINVAL;
-		}
-	}
 
+	}
 	return 0;
 }
 
@@ -7916,6 +7981,7 @@
 			 u32 shmem2_base_path[], u32 chip_id)
 {
 	u8 rc = 0;
+	u32 phy_ver;
 	u8 phy_index;
 	u32 ext_phy_type, ext_phy_config;
 	DP(NETIF_MSG_LINK, "Begin common phy init\n");
@@ -7923,6 +7989,16 @@
 	if (CHIP_REV_IS_EMUL(bp))
 		return 0;
 
+	/* Check if common init was already done */
+	phy_ver = REG_RD(bp, shmem_base_path[0] +
+			 offsetof(struct shmem_region,
+				  port_mb[PORT_0].ext_phy_fw_version));
+	if (phy_ver) {
+		DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+			       phy_ver);
+		return 0;
+	}
+
 	/* Read the ext_phy_type for arbitrary port(0) */
 	for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
 	      phy_index++) {
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 489a5512..aa03233 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1974,13 +1974,22 @@
 		vn_max_rate = 0;
 
 	} else {
+		u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
 		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
 				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-		/* If min rate is zero - set it to 1 */
+		/* If fairness is enabled (not all min rates are zeroes) and
+		   if current min rate is zero - set it to 1.
+		   This is a requirement of the algorithm. */
 		if (bp->vn_weight_sum && (vn_min_rate == 0))
 			vn_min_rate = DEF_MIN_RATE;
-		vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
-				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+
+		if (IS_MF_SI(bp))
+			/* maxCfg in percents of linkspeed */
+			vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+		else
+			/* maxCfg is absolute in 100Mb units */
+			vn_max_rate = maxCfg * 100;
 	}
 
 	DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@
 		m_fair_vn.vn_credit_delta =
 			max_t(u32, (vn_min_rate * (T_FAIR_COEF /
 						   (8 * bp->vn_weight_sum))),
-			      (bp->cmng.fair_vars.fair_threshold * 2));
+			      (bp->cmng.fair_vars.fair_threshold +
+							MIN_ABOVE_THRESH));
 		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
 		   m_fair_vn.vn_credit_delta);
 	}
@@ -2082,8 +2092,9 @@
 		bnx2x_calc_vn_weight_sum(bp);
 
 		/* calculate and set min-max rate for each vn */
-		for (vn = VN_0; vn < E1HVN_MAX; vn++)
-			bnx2x_init_vn_minmax(bp, vn);
+		if (bp->port.pmf)
+			for (vn = VN_0; vn < E1HVN_MAX; vn++)
+				bnx2x_init_vn_minmax(bp, vn);
 
 		/* always enable rate shaping and fairness */
 		bp->cmng.flags.cmng_enables |=
@@ -2152,13 +2163,6 @@
 			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 	}
 
-	/* indicate link status only if link status actually changed */
-	if (prev_link_status != bp->link_vars.link_status)
-		bnx2x_link_report(bp);
-
-	if (IS_MF(bp))
-		bnx2x_link_sync_notify(bp);
-
 	if (bp->link_vars.link_up && bp->link_vars.line_speed) {
 		int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
 
@@ -2170,6 +2174,13 @@
 			DP(NETIF_MSG_IFUP,
 			   "single function mode without fairness\n");
 	}
+
+	if (IS_MF(bp))
+		bnx2x_link_sync_notify(bp);
+
+	/* indicate link status only if link status actually changed */
+	if (prev_link_status != bp->link_vars.link_status)
+		bnx2x_link_report(bp);
 }
 
 void bnx2x__link_status_update(struct bnx2x *bp)
@@ -2301,15 +2312,10 @@
 		/* accept matched ucast */
 		drop_all_ucast = 0;
 	}
-	if (filters & BNX2X_ACCEPT_MULTICAST) {
+	if (filters & BNX2X_ACCEPT_MULTICAST)
 		/* accept matched mcast */
 		drop_all_mcast = 0;
-		if (IS_MF_SI(bp))
-			/* since mcast addresses won't arrive with ovlan,
-			 * fw needs to accept all of them in
-			 * switch-independent mode */
-			accp_all_mcast = 1;
-	}
+
 	if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
 		/* accept all mcast */
 		drop_all_ucast = 0;
@@ -3152,7 +3158,6 @@
 #define LOAD_COUNTER_MASK	(((u32)0x1 << LOAD_COUNTER_BITS) - 1)
 #define RESET_DONE_FLAG_MASK	(~LOAD_COUNTER_MASK)
 #define RESET_DONE_FLAG_SHIFT	LOAD_COUNTER_BITS
-#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
 
 /*
  * should be run under rtnl lock
@@ -3527,7 +3532,7 @@
 	   try to handle this event */
 	bnx2x_acquire_alr(bp);
 
-	if (bnx2x_chk_parity_attn(bp)) {
+	if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
 		bp->recovery_state = BNX2X_RECOVERY_INIT;
 		bnx2x_set_reset_in_progress(bp);
 		schedule_delayed_work(&bp->reset_task, 0);
@@ -4282,9 +4287,12 @@
 		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
 				BNX2X_ACCEPT_MULTICAST;
 #ifdef BCM_CNIC
-		cl_id = bnx2x_fcoe(bp, cl_id);
-		bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-					  BNX2X_ACCEPT_MULTICAST);
+		if (!NO_FCOE(bp)) {
+			cl_id = bnx2x_fcoe(bp, cl_id);
+			bnx2x_rxq_set_mac_filters(bp, cl_id,
+						  BNX2X_ACCEPT_UNICAST |
+						  BNX2X_ACCEPT_MULTICAST);
+		}
 #endif
 		break;
 
@@ -4292,18 +4300,29 @@
 		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
 				BNX2X_ACCEPT_ALL_MULTICAST;
 #ifdef BCM_CNIC
-		cl_id = bnx2x_fcoe(bp, cl_id);
-		bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-					  BNX2X_ACCEPT_MULTICAST);
+		/*
+		 *  Prevent duplication of multicast packets by configuring FCoE
+		 *  L2 Client to receive only matched unicast frames.
+		 */
+		if (!NO_FCOE(bp)) {
+			cl_id = bnx2x_fcoe(bp, cl_id);
+			bnx2x_rxq_set_mac_filters(bp, cl_id,
+						  BNX2X_ACCEPT_UNICAST);
+		}
 #endif
 		break;
 
 	case BNX2X_RX_MODE_PROMISC:
 		def_q_filters |= BNX2X_PROMISCUOUS_MODE;
 #ifdef BCM_CNIC
-		cl_id = bnx2x_fcoe(bp, cl_id);
-		bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-					  BNX2X_ACCEPT_MULTICAST);
+		/*
+		 *  Prevent packets duplication by configuring DROP_ALL for FCoE
+		 *  L2 Client.
+		 */
+		if (!NO_FCOE(bp)) {
+			cl_id = bnx2x_fcoe(bp, cl_id);
+			bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+		}
 #endif
 		/* pass management unicast packets as well */
 		llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@ -4754,7 +4773,7 @@
 	return 0; /* OK */
 }
 
-static void enable_blocks_attention(struct bnx2x *bp)
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
 {
 	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
 	if (CHIP_IS_E2(bp))
@@ -4808,53 +4827,9 @@
 	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
 	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
 /*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
-	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);		/* bit 3,4 masked */
+	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);		/* bit 3,4 masked */
 }
 
-static const struct {
-	u32 addr;
-	u32 mask;
-} bnx2x_parity_mask[] = {
-	{PXP_REG_PXP_PRTY_MASK,		0x3ffffff},
-	{PXP2_REG_PXP2_PRTY_MASK_0,	0xffffffff},
-	{PXP2_REG_PXP2_PRTY_MASK_1,	0x7f},
-	{HC_REG_HC_PRTY_MASK,		0x7},
-	{MISC_REG_MISC_PRTY_MASK,	0x1},
-	{QM_REG_QM_PRTY_MASK,		0x0},
-	{DORQ_REG_DORQ_PRTY_MASK,	0x0},
-	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
-	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
-	{SRC_REG_SRC_PRTY_MASK,		0x4}, /* bit 2 */
-	{CDU_REG_CDU_PRTY_MASK,		0x0},
-	{CFC_REG_CFC_PRTY_MASK,		0x0},
-	{DBG_REG_DBG_PRTY_MASK,		0x0},
-	{DMAE_REG_DMAE_PRTY_MASK,	0x0},
-	{BRB1_REG_BRB1_PRTY_MASK,	0x0},
-	{PRS_REG_PRS_PRTY_MASK,		(1<<6)},/* bit 6 */
-	{TSDM_REG_TSDM_PRTY_MASK,	0x18},	/* bit 3,4 */
-	{CSDM_REG_CSDM_PRTY_MASK,	0x8},	/* bit 3 */
-	{USDM_REG_USDM_PRTY_MASK,	0x38},  /* bit 3,4,5 */
-	{XSDM_REG_XSDM_PRTY_MASK,	0x8},	/* bit 3 */
-	{TSEM_REG_TSEM_PRTY_MASK_0,	0x0},
-	{TSEM_REG_TSEM_PRTY_MASK_1,	0x0},
-	{USEM_REG_USEM_PRTY_MASK_0,	0x0},
-	{USEM_REG_USEM_PRTY_MASK_1,	0x0},
-	{CSEM_REG_CSEM_PRTY_MASK_0,	0x0},
-	{CSEM_REG_CSEM_PRTY_MASK_1,	0x0},
-	{XSEM_REG_XSEM_PRTY_MASK_0,	0x0},
-	{XSEM_REG_XSEM_PRTY_MASK_1,	0x0}
-};
-
-static void enable_blocks_parity(struct bnx2x *bp)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
-		REG_WR(bp, bnx2x_parity_mask[i].addr,
-			bnx2x_parity_mask[i].mask);
-}
-
-
 static void bnx2x_reset_common(struct bnx2x *bp)
 {
 	/* reset_common */
@@ -5082,7 +5057,7 @@
 		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
 		memset(&ilt, 0, sizeof(struct bnx2x_ilt));
 
-		/* initalize dummy TM client */
+		/* initialize dummy TM client */
 		ilt_cli.start = 0;
 		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
 		ilt_cli.client_num = ILT_CLIENT_TM;
@@ -5341,18 +5316,14 @@
 		}
 	}
 
-	bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
-						       bp->common.shmem_base,
-						       bp->common.shmem2_base);
-
 	bnx2x_setup_fan_failure_detection(bp);
 
 	/* clear PXP2 attentions */
 	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
 
-	enable_blocks_attention(bp);
-	if (CHIP_PARITY_SUPPORTED(bp))
-		enable_blocks_parity(bp);
+	bnx2x_enable_blocks_attention(bp);
+	if (CHIP_PARITY_ENABLED(bp))
+		bnx2x_enable_blocks_parity(bp);
 
 	if (!BP_NOMCP(bp)) {
 		/* In E2 2-PORT mode, same ext phy is used for the two paths */
@@ -5548,9 +5519,6 @@
 
 	bnx2x_init_block(bp, MCP_BLOCK, init_stage);
 	bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
-	bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
-						       bp->common.shmem_base,
-						       bp->common.shmem2_base);
 	if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
 				      bp->common.shmem2_base, port)) {
 		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -8424,6 +8392,17 @@
 		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
 		bp->mdio.prtad =
 			XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+	/*
+	 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+	 * In MF mode, it is set to cover self test cases
+	 */
+	if (IS_MF(bp))
+		bp->port.need_hw_lock = 1;
+	else
+		bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+							bp->common.shmem_base,
+							bp->common.shmem2_base);
 }
 
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
@@ -8751,13 +8730,6 @@
 		dev_err(&bp->pdev->dev, "MCP disabled, "
 					"must load devices in order!\n");
 
-	/* Set multi queue mode */
-	if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
-	    ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
-		dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
-					"requested is not MSI-X\n");
-		multi_mode = ETH_RSS_MODE_DISABLED;
-	}
 	bp->multi_mode = multi_mode;
 	bp->int_mode = int_mode;
 
@@ -9560,9 +9532,15 @@
 	/* Delete all NAPI objects */
 	bnx2x_del_all_napi(bp);
 
+	/* Power on: we can't let PCI layer write to us while we are in D3 */
+	bnx2x_set_power_state(bp, PCI_D0);
+
 	/* Disable MSI/MSI-X */
 	bnx2x_disable_msi(bp);
 
+	/* Power off */
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
 	/* Make sure RESET task is not scheduled before continuing */
 	cancel_delayed_work_sync(&bp->reset_task);
 
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index bfd875b..e01330b 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -18,6 +18,8 @@
  * WR - Write Clear (write 1 to clear the bit)
  *
  */
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
 
 #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR			 (0x1<<0)
 #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS		 (0x1<<2)
@@ -39,6 +41,8 @@
 #define BRB1_REG_BRB1_PRTY_MASK 				 0x60138
 /* [R 4] Parity register #0 read */
 #define BRB1_REG_BRB1_PRTY_STS					 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR				 0x60130
 /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
  * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
  * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@@ -132,8 +136,12 @@
 #define CCM_REG_CCM_INT_MASK					 0xd01e4
 /* [R 11] Interrupt register #0 read */
 #define CCM_REG_CCM_INT_STS					 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK					 0xd01f4
 /* [R 27] Parity register #0 read */
 #define CCM_REG_CCM_PRTY_STS					 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR				 0xd01ec
 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -350,6 +358,8 @@
 #define CDU_REG_CDU_PRTY_MASK					 0x10104c
 /* [R 5] Parity register #0 read */
 #define CDU_REG_CDU_PRTY_STS					 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR				 0x101044
 /* [RC 32] logging of error data in case of a CDU load error:
    {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
    ype_error; ctual_active; ctual_compressed_context}; */
@@ -381,6 +391,8 @@
 #define CFC_REG_CFC_PRTY_MASK					 0x104118
 /* [R 4] Parity register #0 read */
 #define CFC_REG_CFC_PRTY_STS					 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR				 0x104110
 /* [RW 21] CID cam access (21:1 - Data; alid - 0) */
 #define CFC_REG_CID_CAM 					 0x104800
 #define CFC_REG_CONTROL0					 0x104028
@@ -466,6 +478,8 @@
 #define CSDM_REG_CSDM_PRTY_MASK 				 0xc22bc
 /* [R 11] Parity register #0 read */
 #define CSDM_REG_CSDM_PRTY_STS					 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR				 0xc22b4
 #define CSDM_REG_ENABLE_IN1					 0xc2238
 #define CSDM_REG_ENABLE_IN2					 0xc223c
 #define CSDM_REG_ENABLE_OUT1					 0xc2240
@@ -556,6 +570,9 @@
 /* [R 32] Parity register #0 read */
 #define CSEM_REG_CSEM_PRTY_STS_0				 0x200124
 #define CSEM_REG_CSEM_PRTY_STS_1				 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0				 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1				 0x200138
 #define CSEM_REG_ENABLE_IN					 0x2000a4
 #define CSEM_REG_ENABLE_OUT					 0x2000a8
 /* [RW 32] This address space contains all registers and memories that are
@@ -648,6 +665,8 @@
 #define DBG_REG_DBG_PRTY_MASK					 0xc0a8
 /* [R 1] Parity register #0 read */
 #define DBG_REG_DBG_PRTY_STS					 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR				 0xc0a0
 /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
  * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
  * 4.Completion function=0; 5.Error handling=0 */
@@ -668,6 +687,8 @@
 #define DMAE_REG_DMAE_PRTY_MASK 				 0x102064
 /* [R 4] Parity register #0 read */
 #define DMAE_REG_DMAE_PRTY_STS					 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR				 0x10205c
 /* [RW 1] Command 0 go. */
 #define DMAE_REG_GO_C0						 0x102080
 /* [RW 1] Command 1 go. */
@@ -734,6 +755,8 @@
 #define DORQ_REG_DORQ_PRTY_MASK 				 0x170190
 /* [R 2] Parity register #0 read */
 #define DORQ_REG_DORQ_PRTY_STS					 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR				 0x170188
 /* [RW 8] The address to write the DPM CID to STORM. */
 #define DORQ_REG_DPM_CID_ADDR					 0x170044
 /* [RW 5] The DPM mode CID extraction offset. */
@@ -842,8 +865,12 @@
 /* [R 1] data availble for error memory. If this bit is clear do not red
  * from error_handling_memory. */
 #define IGU_REG_ERROR_HANDLING_DATA_VALID			 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK					 0x1300a8
 /* [R 11] Parity register #0 read */
 #define IGU_REG_IGU_PRTY_STS					 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR				 0x1300a0
 /* [R 4] Debug: int_handle_fsm */
 #define IGU_REG_INT_HANDLE_FSM					 0x130050
 #define IGU_REG_LEADING_EDGE_LATCH				 0x130134
@@ -1501,6 +1528,8 @@
 #define MISC_REG_MISC_PRTY_MASK 				 0xa398
 /* [R 1] Parity register #0 read */
 #define MISC_REG_MISC_PRTY_STS					 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR				 0xa390
 #define MISC_REG_NIG_WOL_P0					 0xa270
 #define MISC_REG_NIG_WOL_P1					 0xa274
 /* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -1604,7 +1633,7 @@
    (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
 #define MISC_REG_SW_TIMER_RELOAD_VAL_4				 0xa2fc
 /* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
-   in this register. addres 0 - timer 1; address 1 - timer 2, ...  address 7 -
+   in this register. address 0 - timer 1; address 1 - timer 2, ...  address 7 -
    timer 8 */
 #define MISC_REG_SW_TIMER_VAL					 0xa5c0
 /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2082,6 +2111,10 @@
 #define PBF_REG_PBF_INT_MASK					 0x1401d4
 /* [R 5] Interrupt register #0 read */
 #define PBF_REG_PBF_INT_STS					 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK					 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR				 0x1401dc
 #define PB_REG_CONTROL						 0
 /* [RW 2] Interrupt mask register #0 read/write */
 #define PB_REG_PB_INT_MASK					 0x28
@@ -2091,6 +2124,8 @@
 #define PB_REG_PB_PRTY_MASK					 0x38
 /* [R 4] Parity register #0 read */
 #define PB_REG_PB_PRTY_STS					 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR					 0x30
 #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR		 (0x1<<0)
 #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW	 (0x1<<8)
 #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR	 (0x1<<1)
@@ -2446,6 +2481,8 @@
 #define PRS_REG_PRS_PRTY_MASK					 0x401a4
 /* [R 8] Parity register #0 read */
 #define PRS_REG_PRS_PRTY_STS					 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR				 0x4019c
 /* [RW 8] Context region for pure acknowledge packets. Used in CFC load
    request message */
 #define PRS_REG_PURE_REGIONS					 0x40024
@@ -2599,6 +2636,9 @@
 /* [R 32] Parity register #0 read */
 #define PXP2_REG_PXP2_PRTY_STS_0				 0x12057c
 #define PXP2_REG_PXP2_PRTY_STS_1				 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0				 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1				 0x120590
 /* [R 1] Debug only: The 'almost full' indication from each fifo (gives
    indication about backpressure) */
 #define PXP2_REG_RD_ALMOST_FULL_0				 0x120424
@@ -3001,6 +3041,8 @@
 #define PXP_REG_PXP_PRTY_MASK					 0x103094
 /* [R 26] Parity register #0 read */
 #define PXP_REG_PXP_PRTY_STS					 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR				 0x10308c
 /* [RW 4] The activity counter initial increment value sent in the load
    request */
 #define QM_REG_ACTCTRINITVAL_0					 0x168040
@@ -3157,6 +3199,8 @@
 #define QM_REG_QM_PRTY_MASK					 0x168454
 /* [R 12] Parity register #0 read */
 #define QM_REG_QM_PRTY_STS					 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR					 0x16844c
 /* [R 32] Current queues in pipeline: Queues from 32 to 63 */
 #define QM_REG_QSTATUS_HIGH					 0x16802c
 /* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -3442,6 +3486,8 @@
 #define QM_REG_WRRWEIGHTS_9					 0x168848
 /* [R 6] Keep the fill level of the fifo from write client 1 */
 #define QM_REG_XQM_WRC_FIFOLVL					 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST					 0x18840
 #define SRC_REG_COUNTFREE0					 0x40500
 /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
    ports. If set the searcher support 8 functions. */
@@ -3470,6 +3516,8 @@
 #define SRC_REG_SRC_PRTY_MASK					 0x404c8
 /* [R 3] Parity register #0 read */
 #define SRC_REG_SRC_PRTY_STS					 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR				 0x404c0
 /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
 #define TCM_REG_CAM_OCCUP					 0x5017c
 /* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3596,8 +3644,12 @@
 #define TCM_REG_TCM_INT_MASK					 0x501dc
 /* [R 11] Interrupt register #0 read */
 #define TCM_REG_TCM_INT_STS					 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK					 0x501ec
 /* [R 27] Parity register #0 read */
 #define TCM_REG_TCM_PRTY_STS					 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR				 0x501e4
 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -3755,6 +3807,10 @@
 #define TM_REG_TM_INT_MASK					 0x1640fc
 /* [R 1] Interrupt register #0 read */
 #define TM_REG_TM_INT_STS					 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK					 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR					 0x164104
 /* [RW 8] The event id for aggregated interrupt 0 */
 #define TSDM_REG_AGG_INT_EVENT_0				 0x42038
 #define TSDM_REG_AGG_INT_EVENT_1				 0x4203c
@@ -3835,6 +3891,8 @@
 #define TSDM_REG_TSDM_PRTY_MASK 				 0x422bc
 /* [R 11] Parity register #0 read */
 #define TSDM_REG_TSDM_PRTY_STS					 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR				 0x422b4
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define TSEM_REG_ARB_CYCLE_SIZE 				 0x180034
 /* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3914,6 +3972,9 @@
 #define TSEM_REG_SLOW_EXT_STORE_EMPTY				 0x1802a0
 /* [RW 8] List of free threads . There is a bit per thread. */
 #define TSEM_REG_THREADS_LIST					 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0				 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1				 0x180128
 /* [RW 3] The arbitration scheme of time_slot 0 */
 #define TSEM_REG_TS_0_AS					 0x180038
 /* [RW 3] The arbitration scheme of time_slot 10 */
@@ -4116,6 +4177,8 @@
 #define UCM_REG_UCM_INT_STS					 0xe01c8
 /* [R 27] Parity register #0 read */
 #define UCM_REG_UCM_PRTY_STS					 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR				 0xe01dc
 /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -4292,6 +4355,8 @@
 #define USDM_REG_USDM_PRTY_MASK 				 0xc42c0
 /* [R 11] Parity register #0 read */
 #define USDM_REG_USDM_PRTY_STS					 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR				 0xc42b8
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define USEM_REG_ARB_CYCLE_SIZE 				 0x300034
 /* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4421,6 +4486,9 @@
 /* [R 32] Parity register #0 read */
 #define USEM_REG_USEM_PRTY_STS_0				 0x300124
 #define USEM_REG_USEM_PRTY_STS_1				 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0				 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1				 0x300138
 /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
  * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
 #define USEM_REG_VFPF_ERR_NUM					 0x300380
@@ -4797,6 +4865,8 @@
 #define XSDM_REG_XSDM_PRTY_MASK 				 0x1662bc
 /* [R 11] Parity register #0 read */
 #define XSDM_REG_XSDM_PRTY_STS					 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR				 0x1662b4
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define XSEM_REG_ARB_CYCLE_SIZE 				 0x280034
 /* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4929,6 +4999,9 @@
 /* [R 32] Parity register #0 read */
 #define XSEM_REG_XSEM_PRTY_STS_0				 0x280124
 #define XSEM_REG_XSEM_PRTY_STS_1				 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0				 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1				 0x280138
 #define MCPR_NVM_ACCESS_ENABLE_EN				 (1L<<0)
 #define MCPR_NVM_ACCESS_ENABLE_WR_EN				 (1L<<1)
 #define MCPR_NVM_ADDR_NVM_ADDR_VALUE				 (0xffffffL<<0)
@@ -6121,7 +6194,11 @@
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER	0x0000
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER		0x0100
 #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G			0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG		0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS		0x0080
 
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1		0xa8e3
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN		0x0080
 
 #define IGU_FUNC_BASE			0x0400
 
@@ -6316,3 +6393,4 @@
 }
 
 
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 6e4d9b1..3445ded 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -158,6 +158,11 @@
 
 		spin_lock_bh(&bp->stats_lock);
 
+		if (bp->stats_pending) {
+			spin_unlock_bh(&bp->stats_lock);
+			return;
+		}
+
 		ramrod_data.drv_counter = bp->stats_counter++;
 		ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
 		for_each_eth_queue(bp, i)
@@ -1234,14 +1239,14 @@
 	if (unlikely(bp->panic))
 		return;
 
+	bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
 	/* Protect a state change flow */
 	spin_lock_bh(&bp->stats_lock);
 	state = bp->stats_state;
 	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
 	spin_unlock_bh(&bp->stats_lock);
 
-	bnx2x_stats_stm[state][event].action(bp);
-
 	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
 		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
 		   state, event, bp->stats_state);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 48cf24f..a5d5d0b 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -281,23 +281,23 @@
 }
 
 /**
- * __get_rx_machine_lock - lock the port's RX machine
+ * __get_state_machine_lock - lock the port's state machines
  * @port: the port we're looking at
  *
  */
-static inline void __get_rx_machine_lock(struct port *port)
+static inline void __get_state_machine_lock(struct port *port)
 {
-	spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+	spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
 }
 
 /**
- * __release_rx_machine_lock - unlock the port's RX machine
+ * __release_state_machine_lock - unlock the port's state machines
  * @port: the port we're looking at
  *
  */
-static inline void __release_rx_machine_lock(struct port *port)
+static inline void __release_state_machine_lock(struct port *port)
 {
-	spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+	spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
 }
 
 /**
@@ -388,14 +388,14 @@
 }
 
 /**
- * __initialize_port_locks - initialize a port's RX machine spinlock
+ * __initialize_port_locks - initialize a port's STATE machine spinlock
  * @port: the port we're looking at
  *
  */
 static inline void __initialize_port_locks(struct port *port)
 {
 	// make sure it isn't called twice
-	spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+	spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
 }
 
 //conversions
@@ -840,7 +840,7 @@
 	lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
 
 	memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
-	/* Note: source addres is set to be the member's PERMANENT address,
+	/* Note: source address is set to be the member's PERMANENT address,
 	   because we use it to identify loopback lacpdus in receive. */
 	memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
 	lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -881,7 +881,7 @@
 	marker_header = (struct bond_marker_header *)skb_put(skb, length);
 
 	memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
-	/* Note: source addres is set to be the member's PERMANENT address,
+	/* Note: source address is set to be the member's PERMANENT address,
 	   because we use it to identify loopback MARKERs in receive. */
 	memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
 	marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -1025,9 +1025,6 @@
 {
 	rx_states_t last_state;
 
-	// Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback)
-	__get_rx_machine_lock(port);
-
 	// keep current State Machine state to compare later if it was changed
 	last_state = port->sm_rx_state;
 
@@ -1133,7 +1130,6 @@
 				pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
 				       "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
 				       port->slave->dev->master->name, port->slave->dev->name);
-				__release_rx_machine_lock(port);
 				return;
 			}
 			__update_selected(lacpdu, port);
@@ -1153,7 +1149,6 @@
 			break;
 		}
 	}
-	__release_rx_machine_lock(port);
 }
 
 /**
@@ -1916,7 +1911,7 @@
 		return -1;
 	}
 
-	//check that the slave has not been intialized yet.
+	//check that the slave has not been initialized yet.
 	if (SLAVE_AD_INFO(slave).port.slave != slave) {
 
 		// port initialization
@@ -2155,6 +2150,12 @@
 			goto re_arm;
 		}
 
+		/* Lock around state machines to protect data accessed
+		 * by all (e.g., port->sm_vars).  ad_rx_machine may run
+		 * concurrently due to incoming LACPDU.
+		 */
+		__get_state_machine_lock(port);
+
 		ad_rx_machine(NULL, port);
 		ad_periodic_machine(port);
 		ad_port_selection_logic(port);
@@ -2164,6 +2165,8 @@
 		// turn off the BEGIN bit, since we already handled it
 		if (port->sm_vars & AD_PORT_BEGIN)
 			port->sm_vars &= ~AD_PORT_BEGIN;
+
+		__release_state_machine_lock(port);
 	}
 
 re_arm:
@@ -2200,7 +2203,10 @@
 		case AD_TYPE_LACPDU:
 			pr_debug("Received LACPDU on port %d\n",
 				 port->actor_port_number);
+			/* Protect against concurrent state machines */
+			__get_state_machine_lock(port);
 			ad_rx_machine(lacpdu, port);
+			__release_state_machine_lock(port);
 			break;
 
 		case AD_TYPE_MARKER:
@@ -2470,6 +2476,10 @@
 	if (!(dev->flags & IFF_MASTER))
 		goto out;
 
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		goto out;
+
 	if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
 		goto out;
 
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 2c46a154..b28baff 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -264,7 +264,8 @@
 struct ad_slave_info {
 	struct aggregator aggregator;	    // 802.3ad aggregator structure
 	struct port port;		    // 802.3ad port structure
-	spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt
+	spinlock_t state_machine_lock; /* mutex state machines vs.
+					  incoming LACPDU */
 	u16 id;
 };
 
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f4e638c..5c6fba8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -326,6 +326,10 @@
 		goto out;
 	}
 
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		goto out;
+
 	if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
 		goto out;
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b1025b8..163e0b0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2733,6 +2733,10 @@
 	if (!slave || !slave_do_arp_validate(bond, slave))
 		goto out_unlock;
 
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		goto out_unlock;
+
 	if (!pskb_may_pull(skb, arp_hdr_len(dev)))
 		goto out_unlock;
 
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5a9db6..5dec456 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -23,7 +23,7 @@
 
 	  As only the sending and receiving of CAN frames is implemented, this
 	  driver should work with the (serial/USB) CAN hardware from:
-	  www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
+	  www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
 
 	  Userspace tools to attach the SLCAN line discipline (slcan_attach,
 	  slcand) can be found in the can-utils at the SocketCAN SVN, see
@@ -117,6 +117,8 @@
 
 source "drivers/net/can/usb/Kconfig"
 
+source "drivers/net/can/softing/Kconfig"
+
 config CAN_DEBUG_DEVICES
 	bool "CAN devices debugging messages"
 	depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 07ca159..53c82a7 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -9,6 +9,7 @@
 can-dev-y			:= dev.o
 
 obj-y				+= usb/
+obj-y				+= softing/
 
 obj-$(CONFIG_CAN_SJA1000)	+= sja1000/
 obj-$(CONFIG_CAN_MSCAN)		+= mscan/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 7ef83d0..57d2ffb 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
  * at91_can.c - CAN network driver for AT91 SoC CAN controller
  *
  * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
- * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
  *
  * This software may be distributed under the terms of the GNU General
  * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
@@ -40,22 +41,23 @@
 
 #include <mach/board.h>
 
-#define AT91_NAPI_WEIGHT	12
+#define AT91_NAPI_WEIGHT	11
 
 /*
  * RX/TX Mailbox split
  * don't dare to touch
  */
-#define AT91_MB_RX_NUM		12
+#define AT91_MB_RX_NUM		11
 #define AT91_MB_TX_SHIFT	2
 
-#define AT91_MB_RX_FIRST	0
+#define AT91_MB_RX_FIRST	1
 #define AT91_MB_RX_LAST		(AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
 
 #define AT91_MB_RX_MASK(i)	((1 << (i)) - 1)
 #define AT91_MB_RX_SPLIT	8
 #define AT91_MB_RX_LOW_LAST	(AT91_MB_RX_SPLIT - 1)
-#define AT91_MB_RX_LOW_MASK	(AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+#define AT91_MB_RX_LOW_MASK	(AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
+				 ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
 
 #define AT91_MB_TX_NUM		(1 << AT91_MB_TX_SHIFT)
 #define AT91_MB_TX_FIRST	(AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@
 
 	struct clk		*clk;
 	struct at91_can_data	*pdata;
+
+	canid_t			mb0_id;
 };
 
 static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@
 	set_mb_mode_prio(priv, mb, mode, 0);
 }
 
+static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
+{
+	u32 reg_mid;
+
+	if (can_id & CAN_EFF_FLAG)
+		reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+	else
+		reg_mid = (can_id & CAN_SFF_MASK) << 18;
+
+	return reg_mid;
+}
+
 /*
  * Swtich transceiver on or off
  */
@@ -233,12 +249,22 @@
 {
 	struct at91_priv *priv = netdev_priv(dev);
 	unsigned int i;
+	u32 reg_mid;
 
 	/*
-	 * The first 12 mailboxes are used as a reception FIFO. The
-	 * last mailbox is configured with overwrite option. The
-	 * overwrite flag indicates a FIFO overflow.
+	 * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
+	 * mailbox is disabled. The next 11 mailboxes are used as a
+	 * reception FIFO. The last mailbox is configured with
+	 * overwrite option. The overwrite flag indicates a FIFO
+	 * overflow.
 	 */
+	reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
+	for (i = 0; i < AT91_MB_RX_FIRST; i++) {
+		set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
+		at91_write(priv, AT91_MID(i), reg_mid);
+		at91_write(priv, AT91_MCR(i), 0x0);	/* clear dlc */
+	}
+
 	for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
 		set_mb_mode(priv, i, AT91_MB_MODE_RX);
 	set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@
 		set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
 
 	/* Reset tx and rx helper pointers */
-	priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+	priv->tx_next = priv->tx_echo = 0;
+	priv->rx_next = AT91_MB_RX_FIRST;
 }
 
 static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@
 		netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
 		return NETDEV_TX_BUSY;
 	}
-
-	if (cf->can_id & CAN_EFF_FLAG)
-		reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
-	else
-		reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
-
+	reg_mid = at91_can_id_to_reg_mid(cf->can_id);
 	reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
 		(cf->can_dlc << 16) | AT91_MCR_MTCR;
 
@@ -539,27 +561,31 @@
  *
  * Theory of Operation:
  *
- * 12 of the 16 mailboxes on the chip are reserved for RX. we split
- * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ * 11 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
  *
  * Like it or not, but the chip always saves a received CAN message
  * into the first free mailbox it finds (starting with the
  * lowest). This makes it very difficult to read the messages in the
  * right order from the chip. This is how we work around that problem:
  *
- * The first message goes into mb nr. 0 and issues an interrupt. All
+ * The first message goes into mb nr. 1 and issues an interrupt. All
  * rx ints are disabled in the interrupt handler and a napi poll is
  * scheduled. We read the mailbox, but do _not_ reenable the mb (to
  * receive another message).
  *
  *    lower mbxs      upper
- *   ______^______    __^__
- *  /             \  /     \
+ *     ____^______    __^__
+ *    /           \  /     \
  * +-+-+-+-+-+-+-+-++-+-+-+-+
- * |x|x|x|x|x|x|x|x|| | | | |
+ * | |x|x|x|x|x|x|x|| | | | |
  * +-+-+-+-+-+-+-+-++-+-+-+-+
  *  0 0 0 0 0 0  0 0 0 0 1 1  \ mail
  *  0 1 2 3 4 5  6 7 8 9 0 1  / box
+ *  ^
+ *  |
+ *   \
+ *     unused, due to chip bug
  *
  * The variable priv->rx_next points to the next mailbox to read a
  * message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@
 			"order of incoming frames cannot be guaranteed\n");
 
  again:
-	for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
-	     mb < AT91_MB_RX_NUM && quota > 0;
+	for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
+	     mb < AT91_MB_RX_LAST + 1 && quota > 0;
 	     reg_sr = at91_read(priv, AT91_SR),
-	     mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+	     mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
 		at91_read_msg(dev, mb);
 
 		/* reactivate mailboxes */
@@ -610,8 +636,8 @@
 
 	/* upper group completed, look again in lower */
 	if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
-	    quota > 0 && mb >= AT91_MB_RX_NUM) {
-		priv->rx_next = 0;
+	    quota > 0 && mb > AT91_MB_RX_LAST) {
+		priv->rx_next = AT91_MB_RX_FIRST;
 		goto again;
 	}
 
@@ -1037,6 +1063,64 @@
 	.ndo_start_xmit	= at91_start_xmit,
 };
 
+static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct at91_priv *priv = netdev_priv(to_net_dev(dev));
+
+	if (priv->mb0_id & CAN_EFF_FLAG)
+		return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
+	else
+		return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
+}
+
+static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct at91_priv *priv = netdev_priv(ndev);
+	unsigned long can_id;
+	ssize_t ret;
+	int err;
+
+	rtnl_lock();
+
+	if (ndev->flags & IFF_UP) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	err = strict_strtoul(buf, 0, &can_id);
+	if (err) {
+		ret = err;
+		goto out;
+	}
+
+	if (can_id & CAN_EFF_FLAG)
+		can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+	else
+		can_id &= CAN_SFF_MASK;
+
+	priv->mb0_id = can_id;
+	ret = count;
+
+ out:
+	rtnl_unlock();
+	return ret;
+}
+
+static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
+	at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
+
+static struct attribute *at91_sysfs_attrs[] = {
+	&dev_attr_mb0_id.attr,
+	NULL,
+};
+
+static struct attribute_group at91_sysfs_attr_group = {
+	.attrs = at91_sysfs_attrs,
+};
+
 static int __devinit at91_can_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
@@ -1082,6 +1166,7 @@
 	dev->netdev_ops	= &at91_netdev_ops;
 	dev->irq = irq;
 	dev->flags |= IFF_ECHO;
+	dev->sysfs_groups[0] = &at91_sysfs_attr_group;
 
 	priv = netdev_priv(dev);
 	priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@
 	priv->dev = dev;
 	priv->clk = clk;
 	priv->pdata = pdev->dev.platform_data;
+	priv->mb0_id = 0x7ff;
 
 	netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
 
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index b9a6d7a..366f5cc 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1618,7 +1618,7 @@
 	return count;
 }
 
-static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
+static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
 						   ican3_sysfs_set_term);
 
 static struct attribute *ican3_sysfs_attrs[] = {
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534a..7513c45 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@
 		goto open_unlock;
 	}
 
-	priv->wq = create_freezeable_workqueue("mcp251x_wq");
+	priv->wq = create_freezable_workqueue("mcp251x_wq");
 	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
 	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index 27d1d39..d387069 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
 config CAN_MSCAN
-	depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
+	depends on CAN_DEV && (PPC || M68K)
 	tristate "Support for Freescale MSCAN based chips"
 	---help---
 	  The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index c42e972..e54712b 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -185,7 +185,7 @@
 
 static struct can_bittiming_const pch_can_bittiming_const = {
 	.name = KBUILD_MODNAME,
-	.tseg1_min = 1,
+	.tseg1_min = 2,
 	.tseg1_max = 16,
 	.tseg2_min = 1,
 	.tseg2_max = 8,
@@ -959,13 +959,13 @@
 	struct pch_can_priv *priv = netdev_priv(ndev);
 
 	unregister_candev(priv->ndev);
-	pci_iounmap(pdev, priv->regs);
 	if (priv->use_msi)
 		pci_disable_msi(priv->dev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
 	pch_can_reset(priv);
+	pci_iounmap(pdev, priv->regs);
 	free_candev(priv->ndev);
 }
 
@@ -1238,6 +1238,7 @@
 		priv->use_msi = 0;
 	} else {
 		netdev_err(ndev, "PCH CAN opened with MSI\n");
+		pci_set_master(pdev);
 		priv->use_msi = 1;
 	}
 
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644
index 0000000..5de46a9
--- /dev/null
+++ b/drivers/net/can/softing/Kconfig
@@ -0,0 +1,30 @@
+config CAN_SOFTING
+	tristate "Softing Gmbh CAN generic support"
+	depends on CAN_DEV && HAS_IOMEM
+	---help---
+	  Support for CAN cards from Softing Gmbh & some cards
+	  from Vector Gmbh.
+	  Softing Gmbh CAN cards come with 1 or 2 physical busses.
+	  Those cards typically use Dual Port RAM to communicate
+	  with the host CPU. The interface is then identical for PCI
+	  and PCMCIA cards. This driver operates on a platform device,
+	  which has been created by softing_cs or softing_pci driver.
+	  Warning:
+	  The API of the card does not allow fine control per bus, but
+	  controls the 2 busses on the card together.
+	  As such, some actions (start/stop/busoff recovery) on 1 bus
+	  must bring down the other bus too temporarily.
+
+config CAN_SOFTING_CS
+	tristate "Softing Gmbh CAN pcmcia cards"
+	depends on PCMCIA
+	depends on CAN_SOFTING
+	---help---
+	  Support for PCMCIA cards from Softing Gmbh & some cards
+	  from Vector Gmbh.
+	  You need firmware for these, which you can get at
+	  http://developer.berlios.de/projects/socketcan/
+	  This version of the driver is written against
+	  firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
+	  In order to use the card as CAN device, you need the Softing generic
+	  support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644
index 0000000..c5e5016
--- /dev/null
+++ b/drivers/net/can/softing/Makefile
@@ -0,0 +1,6 @@
+
+softing-y := softing_main.o softing_fw.o
+obj-$(CONFIG_CAN_SOFTING) += softing.o
+obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644
index 0000000..7ec9f4d
--- /dev/null
+++ b/drivers/net/can/softing/softing.h
@@ -0,0 +1,167 @@
+/*
+ * softing common interfaces
+ *
+ * by Kurt Van Dijck, 2008-2010
+ */
+
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "softing_platform.h"
+
+struct softing;
+
+struct softing_priv {
+	struct can_priv can; /* must be the first member! */
+	struct net_device *netdev;
+	struct softing *card;
+	struct {
+		int pending;
+		/* variables wich hold the circular buffer */
+		int echo_put;
+		int echo_get;
+	} tx;
+	struct can_bittiming_const btr_const;
+	int index;
+	uint8_t output;
+	uint16_t chip;
+};
+#define netdev2softing(netdev)	((struct softing_priv *)netdev_priv(netdev))
+
+struct softing {
+	const struct softing_platform_data *pdat;
+	struct platform_device *pdev;
+	struct net_device *net[2];
+	spinlock_t spin; /* protect this structure & DPRAM access */
+	ktime_t ts_ref;
+	ktime_t ts_overflow; /* timestamp overflow value, in ktime */
+
+	struct {
+		/* indication of firmware status */
+		int up;
+		/* protection of the 'up' variable */
+		struct mutex lock;
+	} fw;
+	struct {
+		int nr;
+		int requested;
+		int svc_count;
+		unsigned int dpram_position;
+	} irq;
+	struct {
+		int pending;
+		int last_bus;
+		/*
+		 * keep the bus that last tx'd a message,
+		 * in order to let every netdev queue resume
+		 */
+	} tx;
+	__iomem uint8_t *dpram;
+	unsigned long dpram_phys;
+	unsigned long dpram_size;
+	struct {
+		uint16_t fw_version, hw_version, license, serial;
+		uint16_t chip[2];
+		unsigned int freq; /* remote cpu's operating frequency */
+	} id;
+};
+
+extern int softing_default_output(struct net_device *netdev);
+
+extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+
+extern int softing_chip_poweron(struct softing *card);
+
+extern int softing_bootloader_command(struct softing *card, int16_t cmd,
+		const char *msg);
+
+/* Load firmware after reset */
+extern int softing_load_fw(const char *file, struct softing *card,
+			__iomem uint8_t *virt, unsigned int size, int offset);
+
+/* Load final application firmware after bootloader */
+extern int softing_load_app_fw(const char *file, struct softing *card);
+
+/*
+ * enable or disable irq
+ * only called with fw.lock locked
+ */
+extern int softing_enable_irq(struct softing *card, int enable);
+
+/* start/stop 1 bus on card */
+extern int softing_startstop(struct net_device *netdev, int up);
+
+/* netif_rx() */
+extern int softing_netdev_rx(struct net_device *netdev,
+		const struct can_frame *msg, ktime_t ktime);
+
+/* SOFTING DPRAM mappings */
+#define DPRAM_RX		0x0000
+	#define DPRAM_RX_SIZE	32
+	#define DPRAM_RX_CNT	16
+#define DPRAM_RX_RD		0x0201	/* uint8_t */
+#define DPRAM_RX_WR		0x0205	/* uint8_t */
+#define DPRAM_RX_LOST		0x0207	/* uint8_t */
+
+#define DPRAM_FCT_PARAM		0x0300	/* int16_t [20] */
+#define DPRAM_FCT_RESULT	0x0328	/* int16_t */
+#define DPRAM_FCT_HOST		0x032b	/* uint16_t */
+
+#define DPRAM_INFO_BUSSTATE	0x0331	/* uint16_t */
+#define DPRAM_INFO_BUSSTATE2	0x0335	/* uint16_t */
+#define DPRAM_INFO_ERRSTATE	0x0339	/* uint16_t */
+#define DPRAM_INFO_ERRSTATE2	0x033d	/* uint16_t */
+#define DPRAM_RESET		0x0341	/* uint16_t */
+#define DPRAM_CLR_RECV_FIFO	0x0345	/* uint16_t */
+#define DPRAM_RESET_TIME	0x034d	/* uint16_t */
+#define DPRAM_TIME		0x0350	/* uint64_t */
+#define DPRAM_WR_START		0x0358	/* uint8_t */
+#define DPRAM_WR_END		0x0359	/* uint8_t */
+#define DPRAM_RESET_RX_FIFO	0x0361	/* uint16_t */
+#define DPRAM_RESET_TX_FIFO	0x0364	/* uint8_t */
+#define DPRAM_READ_FIFO_LEVEL	0x0365	/* uint8_t */
+#define DPRAM_RX_FIFO_LEVEL	0x0366	/* uint16_t */
+#define DPRAM_TX_FIFO_LEVEL	0x0366	/* uint16_t */
+
+#define DPRAM_TX		0x0400	/* uint16_t */
+	#define DPRAM_TX_SIZE	16
+	#define DPRAM_TX_CNT	32
+#define DPRAM_TX_RD		0x0601	/* uint8_t */
+#define DPRAM_TX_WR		0x0605	/* uint8_t */
+
+#define DPRAM_COMMAND		0x07e0	/* uint16_t */
+#define DPRAM_RECEIPT		0x07f0	/* uint16_t */
+#define DPRAM_IRQ_TOHOST	0x07fe	/* uint8_t */
+#define DPRAM_IRQ_TOCARD	0x07ff	/* uint8_t */
+
+#define DPRAM_V2_RESET		0x0e00	/* uint8_t */
+#define DPRAM_V2_IRQ_TOHOST	0x0e02	/* uint8_t */
+
+#define TXMAX	(DPRAM_TX_CNT - 1)
+
+/* DPRAM return codes */
+#define RES_NONE	0
+#define RES_OK		1
+#define RES_NOK		2
+#define RES_UNKNOWN	3
+/* DPRAM flags */
+#define CMD_TX		0x01
+#define CMD_ACK		0x02
+#define CMD_XTD		0x04
+#define CMD_RTR		0x08
+#define CMD_ERR		0x10
+#define CMD_BUS2	0x80
+
+/* returned fifo entry bus state masks */
+#define SF_MASK_BUSOFF		0x80
+#define SF_MASK_EPASSIVE	0x60
+
+/* bus states */
+#define STATE_BUSOFF	2
+#define STATE_EPASSIVE	1
+#define STATE_EACTIVE	0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644
index 0000000..c11bb4d
--- /dev/null
+++ b/drivers/net/can/softing/softing_cs.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "softing_platform.h"
+
+static int softingcs_index;
+static spinlock_t softingcs_index_lock;
+
+static int softingcs_reset(struct platform_device *pdev, int v);
+static int softingcs_enable_irq(struct platform_device *pdev, int v);
+
+/*
+ * platform_data descriptions
+ */
+#define MHZ (1000*1000)
+static const struct softing_platform_data softingcs_platform_data[] = {
+{
+	.name = "CANcard",
+	.manf = 0x0168, .prod = 0x001,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "CANcard-NEC",
+	.manf = 0x0168, .prod = 0x002,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "CANcard-SJA",
+	.manf = 0x0168, .prod = 0x004,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "CANcard-2",
+	.manf = 0x0168, .prod = 0x005,
+	.generation = 2,
+	.nbus = 2,
+	.freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x1000,
+	.boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = NULL,
+}, {
+	.name = "Vector-CANcard",
+	.manf = 0x0168, .prod = 0x081,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "Vector-CANcard-SJA",
+	.manf = 0x0168, .prod = 0x084,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "Vector-CANcard-2",
+	.manf = 0x0168, .prod = 0x085,
+	.generation = 2,
+	.nbus = 2,
+	.freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x1000,
+	.boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = NULL,
+}, {
+	.name = "EDICcard-NEC",
+	.manf = 0x0168, .prod = 0x102,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "EDICcard-2",
+	.manf = 0x0168, .prod = 0x105,
+	.generation = 2,
+	.nbus = 2,
+	.freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x1000,
+	.boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = NULL,
+}, {
+	0, 0,
+},
+};
+
+MODULE_FIRMWARE(fw_dir "bcard.bin");
+MODULE_FIRMWARE(fw_dir "ldcard.bin");
+MODULE_FIRMWARE(fw_dir "cancard.bin");
+MODULE_FIRMWARE(fw_dir "cansja.bin");
+
+MODULE_FIRMWARE(fw_dir "bcard2.bin");
+MODULE_FIRMWARE(fw_dir "ldcard2.bin");
+MODULE_FIRMWARE(fw_dir "cancrd2.bin");
+
+static __devinit const struct softing_platform_data
+*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
+{
+	const struct softing_platform_data *lp;
+
+	for (lp = softingcs_platform_data; lp->manf; ++lp) {
+		if ((lp->manf == manf) && (lp->prod == prod))
+			return lp;
+	}
+	return NULL;
+}
+
+/*
+ * platformdata callbacks
+ */
+static int softingcs_reset(struct platform_device *pdev, int v)
+{
+	struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+	dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
+	return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
+}
+
+static int softingcs_enable_irq(struct platform_device *pdev, int v)
+{
+	struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+	dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
+	return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
+}
+
+/*
+ * pcmcia check
+ */
+static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
+		void *priv_data)
+{
+	struct softing_platform_data *pdat = priv_data;
+	struct resource *pres;
+	int memspeed = 0;
+
+	WARN_ON(!pdat);
+	pres = pcmcia->resource[PCMCIA_IOMEM_0];
+	if (resource_size(pres) < 0x1000)
+		return -ERANGE;
+
+	pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+	if (pdat->generation < 2) {
+		pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
+		memspeed = 3;
+	} else {
+		pres->flags |= WIN_DATA_WIDTH_16;
+	}
+	return pcmcia_request_window(pcmcia, pres, memspeed);
+}
+
+static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
+{
+	struct platform_device *pdev = pcmcia->priv;
+
+	/* free bits */
+	platform_device_unregister(pdev);
+	/* release pcmcia stuff */
+	pcmcia_disable_device(pcmcia);
+}
+
+/*
+ * platform_device wrapper
+ * pdev->resource has 2 entries: io & irq
+ */
+static void softingcs_pdev_release(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	kfree(pdev);
+}
+
+static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
+{
+	int ret;
+	struct platform_device *pdev;
+	const struct softing_platform_data *pdat;
+	struct resource *pres;
+	struct dev {
+		struct platform_device pdev;
+		struct resource res[2];
+	} *dev;
+
+	/* find matching platform_data */
+	pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
+	if (!pdat)
+		return -ENOTTY;
+
+	/* setup pcmcia device */
+	pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
+		CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
+	ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
+	if (ret)
+		goto pcmcia_failed;
+
+	ret = pcmcia_enable_device(pcmcia);
+	if (ret < 0)
+		goto pcmcia_failed;
+
+	pres = pcmcia->resource[PCMCIA_IOMEM_0];
+	if (!pres) {
+		ret = -EBADF;
+		goto pcmcia_bad;
+	}
+
+	/* create softing platform device */
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto mem_failed;
+	}
+	dev->pdev.resource = dev->res;
+	dev->pdev.num_resources = ARRAY_SIZE(dev->res);
+	dev->pdev.dev.release = softingcs_pdev_release;
+
+	pdev = &dev->pdev;
+	pdev->dev.platform_data = (void *)pdat;
+	pdev->dev.parent = &pcmcia->dev;
+	pcmcia->priv = pdev;
+
+	/* platform device resources */
+	pdev->resource[0].flags = IORESOURCE_MEM;
+	pdev->resource[0].start = pres->start;
+	pdev->resource[0].end = pres->end;
+
+	pdev->resource[1].flags = IORESOURCE_IRQ;
+	pdev->resource[1].start = pcmcia->irq;
+	pdev->resource[1].end = pdev->resource[1].start;
+
+	/* platform device setup */
+	spin_lock(&softingcs_index_lock);
+	pdev->id = softingcs_index++;
+	spin_unlock(&softingcs_index_lock);
+	pdev->name = "softing";
+	dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
+	ret = platform_device_register(pdev);
+	if (ret < 0)
+		goto platform_failed;
+
+	dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
+	return 0;
+
+platform_failed:
+	kfree(dev);
+mem_failed:
+pcmcia_bad:
+pcmcia_failed:
+	pcmcia_disable_device(pcmcia);
+	pcmcia->priv = NULL;
+	return ret ?: -ENODEV;
+}
+
+static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
+	/* softing */
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
+	/* vector, manufacturer? */
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
+	/* EDIC */
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
+	PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
+
+static struct pcmcia_driver softingcs_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "softingcs",
+	.id_table	= softingcs_ids,
+	.probe		= softingcs_probe,
+	.remove		= __devexit_p(softingcs_remove),
+};
+
+static int __init softingcs_start(void)
+{
+	spin_lock_init(&softingcs_index_lock);
+	return pcmcia_register_driver(&softingcs_driver);
+}
+
+static void __exit softingcs_stop(void)
+{
+	pcmcia_unregister_driver(&softingcs_driver);
+}
+
+module_init(softingcs_start);
+module_exit(softingcs_stop);
+
+MODULE_DESCRIPTION("softing CANcard driver"
+		", links PCMCIA card to softing driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644
index 0000000..b520784
--- /dev/null
+++ b/drivers/net/can/softing/softing_fw.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <asm/div64.h>
+
+#include "softing.h"
+
+/*
+ * low level DPRAM command.
+ * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
+ */
+static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
+		const char *msg)
+{
+	int ret;
+	unsigned long stamp;
+
+	iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
+	iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
+	iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
+	/* be sure to flush this to the card */
+	wmb();
+	stamp = jiffies + 1 * HZ;
+	/* wait for card */
+	do {
+		/* DPRAM_FCT_HOST is _not_ aligned */
+		ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
+			(ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
+		/* don't have any cached variables */
+		rmb();
+		if (ret == RES_OK)
+			/* read return-value now */
+			return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
+
+		if ((ret != vector) || time_after(jiffies, stamp))
+			break;
+		/* process context => relax */
+		usleep_range(500, 10000);
+	} while (1);
+
+	ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+	dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
+	return ret;
+}
+
+static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
+{
+	int ret;
+
+	ret = _softing_fct_cmd(card, cmd, 0, msg);
+	if (ret > 0) {
+		dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
+		ret = -EIO;
+	}
+	return ret;
+}
+
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+		const char *msg)
+{
+	int ret;
+	unsigned long stamp;
+
+	iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
+	iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
+	/* be sure to flush this to the card */
+	wmb();
+	stamp = jiffies + 3 * HZ;
+	/* wait for card */
+	do {
+		ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
+		/* don't have any cached variables */
+		rmb();
+		if (ret == RES_OK)
+			return 0;
+		if (time_after(jiffies, stamp))
+			break;
+		/* process context => relax */
+		usleep_range(500, 10000);
+	} while (!signal_pending(current));
+
+	ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+	dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
+	return ret;
+}
+
+static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
+		uint16_t *plen, const uint8_t **pdat)
+{
+	uint16_t checksum[2];
+	const uint8_t *mem;
+	const uint8_t *end;
+
+	/*
+	 * firmware records are a binary, unaligned stream composed of:
+	 * uint16_t type;
+	 * uint32_t addr;
+	 * uint16_t len;
+	 * uint8_t dat[len];
+	 * uint16_t checksum;
+	 * all values in little endian.
+	 * We could define a struct for this, with __attribute__((packed)),
+	 * but would that solve the alignment in _all_ cases (cfr. the
+	 * struct itself may be an odd address)?
+	 *
+	 * I chose to use leXX_to_cpup() since this solves both
+	 * endianness & alignment.
+	 */
+	mem = *pmem;
+	*ptype = le16_to_cpup((void *)&mem[0]);
+	*paddr = le32_to_cpup((void *)&mem[2]);
+	*plen = le16_to_cpup((void *)&mem[6]);
+	*pdat = &mem[8];
+	/* verify checksum */
+	end = &mem[8 + *plen];
+	checksum[0] = le16_to_cpup((void *)end);
+	for (checksum[1] = 0; mem < end; ++mem)
+		checksum[1] += *mem;
+	if (checksum[0] != checksum[1])
+		return -EINVAL;
+	/* increment */
+	*pmem += 10 + *plen;
+	return 0;
+}
+
+int softing_load_fw(const char *file, struct softing *card,
+		__iomem uint8_t *dpram, unsigned int size, int offset)
+{
+	const struct firmware *fw;
+	int ret;
+	const uint8_t *mem, *end, *dat;
+	uint16_t type, len;
+	uint32_t addr;
+	uint8_t *buf = NULL;
+	int buflen = 0;
+	int8_t type_end = 0;
+
+	ret = request_firmware(&fw, file, &card->pdev->dev);
+	if (ret < 0)
+		return ret;
+	dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
+		", offset %c0x%04x\n",
+		card->pdat->name, file, (unsigned int)fw->size,
+		(offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
+	/* parse the firmware */
+	mem = fw->data;
+	end = &mem[fw->size];
+	/* look for header record */
+	ret = fw_parse(&mem, &type, &addr, &len, &dat);
+	if (ret < 0)
+		goto failed;
+	if (type != 0xffff)
+		goto failed;
+	if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
+		ret = -EINVAL;
+		goto failed;
+	}
+	/* ok, we had a header */
+	while (mem < end) {
+		ret = fw_parse(&mem, &type, &addr, &len, &dat);
+		if (ret < 0)
+			goto failed;
+		if (type == 3) {
+			/* start address, not used here */
+			continue;
+		} else if (type == 1) {
+			/* eof */
+			type_end = 1;
+			break;
+		} else if (type != 0) {
+			ret = -EINVAL;
+			goto failed;
+		}
+
+		if ((addr + len + offset) > size)
+			goto failed;
+		memcpy_toio(&dpram[addr + offset], dat, len);
+		/* be sure to flush caches from IO space */
+		mb();
+		if (len > buflen) {
+			/* align buflen */
+			buflen = (len + (1024-1)) & ~(1024-1);
+			buf = krealloc(buf, buflen, GFP_KERNEL);
+			if (!buf) {
+				ret = -ENOMEM;
+				goto failed;
+			}
+		}
+		/* verify record data */
+		memcpy_fromio(buf, &dpram[addr + offset], len);
+		if (memcmp(buf, dat, len)) {
+			/* is not ok */
+			dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
+			ret = -EIO;
+			goto failed;
+		}
+	}
+	if (!type_end)
+		/* no end record seen */
+		goto failed;
+	ret = 0;
+failed:
+	kfree(buf);
+	release_firmware(fw);
+	if (ret < 0)
+		dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+	return ret;
+}
+
+int softing_load_app_fw(const char *file, struct softing *card)
+{
+	const struct firmware *fw;
+	const uint8_t *mem, *end, *dat;
+	int ret, j;
+	uint16_t type, len;
+	uint32_t addr, start_addr = 0;
+	unsigned int sum, rx_sum;
+	int8_t type_end = 0, type_entrypoint = 0;
+
+	ret = request_firmware(&fw, file, &card->pdev->dev);
+	if (ret) {
+		dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
+			file, ret);
+		return ret;
+	}
+	dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
+		file, (unsigned long)fw->size);
+	/* parse the firmware */
+	mem = fw->data;
+	end = &mem[fw->size];
+	/* look for header record */
+	ret = fw_parse(&mem, &type, &addr, &len, &dat);
+	if (ret)
+		goto failed;
+	ret = -EINVAL;
+	if (type != 0xffff) {
+		dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
+			type);
+		goto failed;
+	}
+	if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
+		dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
+				len, dat);
+		goto failed;
+	}
+	/* ok, we had a header */
+	while (mem < end) {
+		ret = fw_parse(&mem, &type, &addr, &len, &dat);
+		if (ret)
+			goto failed;
+
+		if (type == 3) {
+			/* start address */
+			start_addr = addr;
+			type_entrypoint = 1;
+			continue;
+		} else if (type == 1) {
+			/* eof */
+			type_end = 1;
+			break;
+		} else if (type != 0) {
+			dev_alert(&card->pdev->dev,
+					"unknown record type 0x%04x\n", type);
+			ret = -EINVAL;
+			goto failed;
+		}
+
+		/* regualar data */
+		for (sum = 0, j = 0; j < len; ++j)
+			sum += dat[j];
+		/* work in 16bit (target) */
+		sum &= 0xffff;
+
+		memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
+		iowrite32(card->pdat->app.offs + card->pdat->app.addr,
+				&card->dpram[DPRAM_COMMAND + 2]);
+		iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
+		iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
+		iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
+		ret = softing_bootloader_command(card, 1, "loading app.");
+		if (ret < 0)
+			goto failed;
+		/* verify checksum */
+		rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
+		if (rx_sum != sum) {
+			dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
+				", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
+			ret = -EIO;
+			goto failed;
+		}
+	}
+	if (!type_end || !type_entrypoint)
+		goto failed;
+	/* start application in card */
+	iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
+	iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
+	ret = softing_bootloader_command(card, 3, "start app.");
+	if (ret < 0)
+		goto failed;
+	ret = 0;
+failed:
+	release_firmware(fw);
+	if (ret < 0)
+		dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+	return ret;
+}
+
+static int softing_reset_chip(struct softing *card)
+{
+	int ret;
+
+	do {
+		/* reset chip */
+		iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
+		iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
+		iowrite8(1, &card->dpram[DPRAM_RESET]);
+		iowrite8(0, &card->dpram[DPRAM_RESET+1]);
+
+		ret = softing_fct_cmd(card, 0, "reset_can");
+		if (!ret)
+			break;
+		if (signal_pending(current))
+			/* don't wait any longer */
+			break;
+	} while (1);
+	card->tx.pending = 0;
+	return ret;
+}
+
+int softing_chip_poweron(struct softing *card)
+{
+	int ret;
+	/* sync */
+	ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
+	if (ret < 0)
+		goto failed;
+
+	ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
+	if (ret < 0)
+		goto failed;
+
+	ret = softing_reset_chip(card);
+	if (ret < 0)
+		goto failed;
+	/* get_serial */
+	ret = softing_fct_cmd(card, 43, "get_serial_number");
+	if (ret < 0)
+		goto failed;
+	card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
+	/* get_version */
+	ret = softing_fct_cmd(card, 12, "get_version");
+	if (ret < 0)
+		goto failed;
+	card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
+	card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
+	card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
+	card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
+	card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
+	return 0;
+failed:
+	return ret;
+}
+
+static void softing_initialize_timestamp(struct softing *card)
+{
+	uint64_t ovf;
+
+	card->ts_ref = ktime_get();
+
+	/* 16MHz is the reference */
+	ovf = 0x100000000ULL * 16;
+	do_div(ovf, card->pdat->freq ?: 16);
+
+	card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
+}
+
+ktime_t softing_raw2ktime(struct softing *card, u32 raw)
+{
+	uint64_t rawl;
+	ktime_t now, real_offset;
+	ktime_t target;
+	ktime_t tmp;
+
+	now = ktime_get();
+	real_offset = ktime_sub(ktime_get_real(), now);
+
+	/* find nsec from card */
+	rawl = raw * 16;
+	do_div(rawl, card->pdat->freq ?: 16);
+	target = ktime_add_us(card->ts_ref, rawl);
+	/* test for overflows */
+	tmp = ktime_add(target, card->ts_overflow);
+	while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
+		card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
+		target = tmp;
+		tmp = ktime_add(target, card->ts_overflow);
+	}
+	return ktime_add(target, real_offset);
+}
+
+static inline int softing_error_reporting(struct net_device *netdev)
+{
+	struct softing_priv *priv = netdev_priv(netdev);
+
+	return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+		? 1 : 0;
+}
+
+int softing_startstop(struct net_device *dev, int up)
+{
+	int ret;
+	struct softing *card;
+	struct softing_priv *priv;
+	struct net_device *netdev;
+	int bus_bitmask_start;
+	int j, error_reporting;
+	struct can_frame msg;
+	const struct can_bittiming *bt;
+
+	priv = netdev_priv(dev);
+	card = priv->card;
+
+	if (!card->fw.up)
+		return -EIO;
+
+	ret = mutex_lock_interruptible(&card->fw.lock);
+	if (ret)
+		return ret;
+
+	bus_bitmask_start = 0;
+	if (dev && up)
+		/* prepare to start this bus as well */
+		bus_bitmask_start |= (1 << priv->index);
+	/* bring netdevs down */
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		netdev = card->net[j];
+		if (!netdev)
+			continue;
+		priv = netdev_priv(netdev);
+
+		if (dev != netdev)
+			netif_stop_queue(netdev);
+
+		if (netif_running(netdev)) {
+			if (dev != netdev)
+				bus_bitmask_start |= (1 << j);
+			priv->tx.pending = 0;
+			priv->tx.echo_put = 0;
+			priv->tx.echo_get = 0;
+			/*
+			 * this bus' may just have called open_candev()
+			 * which is rather stupid to call close_candev()
+			 * already
+			 * but we may come here from busoff recovery too
+			 * in which case the echo_skb _needs_ flushing too.
+			 * just be sure to call open_candev() again
+			 */
+			close_candev(netdev);
+		}
+		priv->can.state = CAN_STATE_STOPPED;
+	}
+	card->tx.pending = 0;
+
+	softing_enable_irq(card, 0);
+	ret = softing_reset_chip(card);
+	if (ret)
+		goto failed;
+	if (!bus_bitmask_start)
+		/* no busses to be brought up */
+		goto card_done;
+
+	if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
+			&& (softing_error_reporting(card->net[0])
+				!= softing_error_reporting(card->net[1]))) {
+		dev_alert(&card->pdev->dev,
+				"err_reporting flag differs for busses\n");
+		goto invalid;
+	}
+	error_reporting = 0;
+	if (bus_bitmask_start & 1) {
+		netdev = card->net[0];
+		priv = netdev_priv(netdev);
+		error_reporting += softing_error_reporting(netdev);
+		/* init chip 1 */
+		bt = &priv->can.bittiming;
+		iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		iowrite16(bt->phase_seg1 + bt->prop_seg,
+				&card->dpram[DPRAM_FCT_PARAM + 6]);
+		iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+		iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+				&card->dpram[DPRAM_FCT_PARAM + 10]);
+		ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
+		if (ret < 0)
+			goto failed;
+		/* set mode */
+		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		ret = softing_fct_cmd(card, 3, "set_mode[0]");
+		if (ret < 0)
+			goto failed;
+		/* set filter */
+		/* 11bit id & mask */
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		/* 29bit id.lo & mask.lo & id.hi & mask.hi */
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+		iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+		iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+		ret = softing_fct_cmd(card, 7, "set_filter[0]");
+		if (ret < 0)
+			goto failed;
+		/* set output control */
+		iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		ret = softing_fct_cmd(card, 5, "set_output[0]");
+		if (ret < 0)
+			goto failed;
+	}
+	if (bus_bitmask_start & 2) {
+		netdev = card->net[1];
+		priv = netdev_priv(netdev);
+		error_reporting += softing_error_reporting(netdev);
+		/* init chip2 */
+		bt = &priv->can.bittiming;
+		iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		iowrite16(bt->phase_seg1 + bt->prop_seg,
+				&card->dpram[DPRAM_FCT_PARAM + 6]);
+		iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+		iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+				&card->dpram[DPRAM_FCT_PARAM + 10]);
+		ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
+		if (ret < 0)
+			goto failed;
+		/* set mode2 */
+		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		ret = softing_fct_cmd(card, 4, "set_mode[1]");
+		if (ret < 0)
+			goto failed;
+		/* set filter2 */
+		/* 11bit id & mask */
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		/* 29bit id.lo & mask.lo & id.hi & mask.hi */
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+		iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+		iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+		ret = softing_fct_cmd(card, 8, "set_filter[1]");
+		if (ret < 0)
+			goto failed;
+		/* set output control2 */
+		iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		ret = softing_fct_cmd(card, 6, "set_output[1]");
+		if (ret < 0)
+			goto failed;
+	}
+	/* enable_error_frame */
+	/*
+	 * Error reporting is switched off at the moment since
+	 * the receiving of them is not yet 100% verified
+	 * This should be enabled sooner or later
+	 *
+	if (error_reporting) {
+		ret = softing_fct_cmd(card, 51, "enable_error_frame");
+		if (ret < 0)
+			goto failed;
+	}
+	*/
+	/* initialize interface */
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
+	ret = softing_fct_cmd(card, 17, "initialize_interface");
+	if (ret < 0)
+		goto failed;
+	/* enable_fifo */
+	ret = softing_fct_cmd(card, 36, "enable_fifo");
+	if (ret < 0)
+		goto failed;
+	/* enable fifo tx ack */
+	ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
+	if (ret < 0)
+		goto failed;
+	/* enable fifo tx ack2 */
+	ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
+	if (ret < 0)
+		goto failed;
+	/* start_chip */
+	ret = softing_fct_cmd(card, 11, "start_chip");
+	if (ret < 0)
+		goto failed;
+	iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
+	iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
+	if (card->pdat->generation < 2) {
+		iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+		/* flush the DPRAM caches */
+		wmb();
+	}
+
+	softing_initialize_timestamp(card);
+
+	/*
+	 * do socketcan notifications/status changes
+	 * from here, no errors should occur, or the failed: part
+	 * must be reviewed
+	 */
+	memset(&msg, 0, sizeof(msg));
+	msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
+	msg.can_dlc = CAN_ERR_DLC;
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		if (!(bus_bitmask_start & (1 << j)))
+			continue;
+		netdev = card->net[j];
+		if (!netdev)
+			continue;
+		priv = netdev_priv(netdev);
+		priv->can.state = CAN_STATE_ERROR_ACTIVE;
+		open_candev(netdev);
+		if (dev != netdev) {
+			/* notify other busses on the restart */
+			softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+			++priv->can.can_stats.restarts;
+		}
+		netif_wake_queue(netdev);
+	}
+
+	/* enable interrupts */
+	ret = softing_enable_irq(card, 1);
+	if (ret)
+		goto failed;
+card_done:
+	mutex_unlock(&card->fw.lock);
+	return 0;
+invalid:
+	ret = -EINVAL;
+failed:
+	softing_enable_irq(card, 0);
+	softing_reset_chip(card);
+	mutex_unlock(&card->fw.lock);
+	/* bring all other interfaces down */
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		netdev = card->net[j];
+		if (!netdev)
+			continue;
+		dev_close(netdev);
+	}
+	return ret;
+}
+
+int softing_default_output(struct net_device *netdev)
+{
+	struct softing_priv *priv = netdev_priv(netdev);
+	struct softing *card = priv->card;
+
+	switch (priv->chip) {
+	case 1000:
+		return (card->pdat->generation < 2) ? 0xfb : 0xfa;
+	case 5:
+		return 0x60;
+	default:
+		return 0x40;
+	}
+}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644
index 0000000..aeea9f9
--- /dev/null
+++ b/drivers/net/can/softing/softing_main.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "softing.h"
+
+#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
+
+/*
+ * test is a specific CAN netdev
+ * is online (ie. up 'n running, not sleeping, not busoff
+ */
+static inline int canif_is_active(struct net_device *netdev)
+{
+	struct can_priv *can = netdev_priv(netdev);
+
+	if (!netif_running(netdev))
+		return 0;
+	return (can->state <= CAN_STATE_ERROR_PASSIVE);
+}
+
+/* reset DPRAM */
+static inline void softing_set_reset_dpram(struct softing *card)
+{
+	if (card->pdat->generation >= 2) {
+		spin_lock_bh(&card->spin);
+		iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
+				&card->dpram[DPRAM_V2_RESET]);
+		spin_unlock_bh(&card->spin);
+	}
+}
+
+static inline void softing_clr_reset_dpram(struct softing *card)
+{
+	if (card->pdat->generation >= 2) {
+		spin_lock_bh(&card->spin);
+		iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
+				&card->dpram[DPRAM_V2_RESET]);
+		spin_unlock_bh(&card->spin);
+	}
+}
+
+/* trigger the tx queue-ing */
+static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
+		struct net_device *dev)
+{
+	struct softing_priv *priv = netdev_priv(dev);
+	struct softing *card = priv->card;
+	int ret;
+	uint8_t *ptr;
+	uint8_t fifo_wr, fifo_rd;
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	uint8_t buf[DPRAM_TX_SIZE];
+
+	if (can_dropped_invalid_skb(dev, skb))
+		return NETDEV_TX_OK;
+
+	spin_lock(&card->spin);
+
+	ret = NETDEV_TX_BUSY;
+	if (!card->fw.up ||
+			(card->tx.pending >= TXMAX) ||
+			(priv->tx.pending >= TX_ECHO_SKB_MAX))
+		goto xmit_done;
+	fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
+	fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
+	if (fifo_wr == fifo_rd)
+		/* fifo full */
+		goto xmit_done;
+	memset(buf, 0, sizeof(buf));
+	ptr = buf;
+	*ptr = CMD_TX;
+	if (cf->can_id & CAN_RTR_FLAG)
+		*ptr |= CMD_RTR;
+	if (cf->can_id & CAN_EFF_FLAG)
+		*ptr |= CMD_XTD;
+	if (priv->index)
+		*ptr |= CMD_BUS2;
+	++ptr;
+	*ptr++ = cf->can_dlc;
+	*ptr++ = (cf->can_id >> 0);
+	*ptr++ = (cf->can_id >> 8);
+	if (cf->can_id & CAN_EFF_FLAG) {
+		*ptr++ = (cf->can_id >> 16);
+		*ptr++ = (cf->can_id >> 24);
+	} else {
+		/* increment 1, not 2 as you might think */
+		ptr += 1;
+	}
+	if (!(cf->can_id & CAN_RTR_FLAG))
+		memcpy(ptr, &cf->data[0], cf->can_dlc);
+	memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
+			buf, DPRAM_TX_SIZE);
+	if (++fifo_wr >= DPRAM_TX_CNT)
+		fifo_wr = 0;
+	iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
+	card->tx.last_bus = priv->index;
+	++card->tx.pending;
+	++priv->tx.pending;
+	can_put_echo_skb(skb, dev, priv->tx.echo_put);
+	++priv->tx.echo_put;
+	if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
+		priv->tx.echo_put = 0;
+	/* can_put_echo_skb() saves the skb, safe to return TX_OK */
+	ret = NETDEV_TX_OK;
+xmit_done:
+	spin_unlock(&card->spin);
+	if (card->tx.pending >= TXMAX) {
+		int j;
+		for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+			if (card->net[j])
+				netif_stop_queue(card->net[j]);
+		}
+	}
+	if (ret != NETDEV_TX_OK)
+		netif_stop_queue(dev);
+
+	return ret;
+}
+
+/*
+ * shortcut for skb delivery
+ */
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+		ktime_t ktime)
+{
+	struct sk_buff *skb;
+	struct can_frame *cf;
+
+	skb = alloc_can_skb(netdev, &cf);
+	if (!skb)
+		return -ENOMEM;
+	memcpy(cf, msg, sizeof(*msg));
+	skb->tstamp = ktime;
+	return netif_rx(skb);
+}
+
+/*
+ * softing_handle_1
+ * pop 1 entry from the DPRAM queue, and process
+ */
+static int softing_handle_1(struct softing *card)
+{
+	struct net_device *netdev;
+	struct softing_priv *priv;
+	ktime_t ktime;
+	struct can_frame msg;
+	int cnt = 0, lost_msg;
+	uint8_t fifo_rd, fifo_wr, cmd;
+	uint8_t *ptr;
+	uint32_t tmp_u32;
+	uint8_t buf[DPRAM_RX_SIZE];
+
+	memset(&msg, 0, sizeof(msg));
+	/* test for lost msgs */
+	lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
+	if (lost_msg) {
+		int j;
+		/* reset condition */
+		iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
+		/* prepare msg */
+		msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+		msg.can_dlc = CAN_ERR_DLC;
+		msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+		/*
+		 * service to all busses, we don't know which it was applicable
+		 * but only service busses that are online
+		 */
+		for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+			netdev = card->net[j];
+			if (!netdev)
+				continue;
+			if (!canif_is_active(netdev))
+				/* a dead bus has no overflows */
+				continue;
+			++netdev->stats.rx_over_errors;
+			softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+		}
+		/* prepare for other use */
+		memset(&msg, 0, sizeof(msg));
+		++cnt;
+	}
+
+	fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
+	fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
+
+	if (++fifo_rd >= DPRAM_RX_CNT)
+		fifo_rd = 0;
+	if (fifo_wr == fifo_rd)
+		return cnt;
+
+	memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
+			DPRAM_RX_SIZE);
+	mb();
+	/* trigger dual port RAM */
+	iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
+
+	ptr = buf;
+	cmd = *ptr++;
+	if (cmd == 0xff)
+		/* not quite usefull, probably the card has got out */
+		return 0;
+	netdev = card->net[0];
+	if (cmd & CMD_BUS2)
+		netdev = card->net[1];
+	priv = netdev_priv(netdev);
+
+	if (cmd & CMD_ERR) {
+		uint8_t can_state, state;
+
+		state = *ptr++;
+
+		msg.can_id = CAN_ERR_FLAG;
+		msg.can_dlc = CAN_ERR_DLC;
+
+		if (state & SF_MASK_BUSOFF) {
+			can_state = CAN_STATE_BUS_OFF;
+			msg.can_id |= CAN_ERR_BUSOFF;
+			state = STATE_BUSOFF;
+		} else if (state & SF_MASK_EPASSIVE) {
+			can_state = CAN_STATE_ERROR_PASSIVE;
+			msg.can_id |= CAN_ERR_CRTL;
+			msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
+			state = STATE_EPASSIVE;
+		} else {
+			can_state = CAN_STATE_ERROR_ACTIVE;
+			msg.can_id |= CAN_ERR_CRTL;
+			state = STATE_EACTIVE;
+		}
+		/* update DPRAM */
+		iowrite8(state, &card->dpram[priv->index ?
+				DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
+		/* timestamp */
+		tmp_u32 = le32_to_cpup((void *)ptr);
+		ptr += 4;
+		ktime = softing_raw2ktime(card, tmp_u32);
+
+		++netdev->stats.rx_errors;
+		/* update internal status */
+		if (can_state != priv->can.state) {
+			priv->can.state = can_state;
+			if (can_state == CAN_STATE_ERROR_PASSIVE)
+				++priv->can.can_stats.error_passive;
+			else if (can_state == CAN_STATE_BUS_OFF) {
+				/* this calls can_close_cleanup() */
+				can_bus_off(netdev);
+				netif_stop_queue(netdev);
+			}
+			/* trigger socketcan */
+			softing_netdev_rx(netdev, &msg, ktime);
+		}
+
+	} else {
+		if (cmd & CMD_RTR)
+			msg.can_id |= CAN_RTR_FLAG;
+		msg.can_dlc = get_can_dlc(*ptr++);
+		if (cmd & CMD_XTD) {
+			msg.can_id |= CAN_EFF_FLAG;
+			msg.can_id |= le32_to_cpup((void *)ptr);
+			ptr += 4;
+		} else {
+			msg.can_id |= le16_to_cpup((void *)ptr);
+			ptr += 2;
+		}
+		/* timestamp */
+		tmp_u32 = le32_to_cpup((void *)ptr);
+		ptr += 4;
+		ktime = softing_raw2ktime(card, tmp_u32);
+		if (!(msg.can_id & CAN_RTR_FLAG))
+			memcpy(&msg.data[0], ptr, 8);
+		ptr += 8;
+		/* update socket */
+		if (cmd & CMD_ACK) {
+			/* acknowledge, was tx msg */
+			struct sk_buff *skb;
+			skb = priv->can.echo_skb[priv->tx.echo_get];
+			if (skb)
+				skb->tstamp = ktime;
+			can_get_echo_skb(netdev, priv->tx.echo_get);
+			++priv->tx.echo_get;
+			if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
+				priv->tx.echo_get = 0;
+			if (priv->tx.pending)
+				--priv->tx.pending;
+			if (card->tx.pending)
+				--card->tx.pending;
+			++netdev->stats.tx_packets;
+			if (!(msg.can_id & CAN_RTR_FLAG))
+				netdev->stats.tx_bytes += msg.can_dlc;
+		} else {
+			int ret;
+
+			ret = softing_netdev_rx(netdev, &msg, ktime);
+			if (ret == NET_RX_SUCCESS) {
+				++netdev->stats.rx_packets;
+				if (!(msg.can_id & CAN_RTR_FLAG))
+					netdev->stats.rx_bytes += msg.can_dlc;
+			} else {
+				++netdev->stats.rx_dropped;
+			}
+		}
+	}
+	++cnt;
+	return cnt;
+}
+
+/*
+ * real interrupt handler
+ */
+static irqreturn_t softing_irq_thread(int irq, void *dev_id)
+{
+	struct softing *card = (struct softing *)dev_id;
+	struct net_device *netdev;
+	struct softing_priv *priv;
+	int j, offset, work_done;
+
+	work_done = 0;
+	spin_lock_bh(&card->spin);
+	while (softing_handle_1(card) > 0) {
+		++card->irq.svc_count;
+		++work_done;
+	}
+	spin_unlock_bh(&card->spin);
+	/* resume tx queue's */
+	offset = card->tx.last_bus;
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		if (card->tx.pending >= TXMAX)
+			break;
+		netdev = card->net[(j + offset + 1) % card->pdat->nbus];
+		if (!netdev)
+			continue;
+		priv = netdev_priv(netdev);
+		if (!canif_is_active(netdev))
+			/* it makes no sense to wake dead busses */
+			continue;
+		if (priv->tx.pending >= TX_ECHO_SKB_MAX)
+			continue;
+		++work_done;
+		netif_wake_queue(netdev);
+	}
+	return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * interrupt routines:
+ * schedule the 'real interrupt handler'
+ */
+static irqreturn_t softing_irq_v2(int irq, void *dev_id)
+{
+	struct softing *card = (struct softing *)dev_id;
+	uint8_t ir;
+
+	ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
+	iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+	return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static irqreturn_t softing_irq_v1(int irq, void *dev_id)
+{
+	struct softing *card = (struct softing *)dev_id;
+	uint8_t ir;
+
+	ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
+	iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
+	return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+/*
+ * netdev/candev inter-operability
+ */
+static int softing_netdev_open(struct net_device *ndev)
+{
+	int ret;
+
+	/* check or determine and set bittime */
+	ret = open_candev(ndev);
+	if (!ret)
+		ret = softing_startstop(ndev, 1);
+	return ret;
+}
+
+static int softing_netdev_stop(struct net_device *ndev)
+{
+	int ret;
+
+	netif_stop_queue(ndev);
+
+	/* softing cycle does close_candev() */
+	ret = softing_startstop(ndev, 0);
+	return ret;
+}
+
+static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+	int ret;
+
+	switch (mode) {
+	case CAN_MODE_START:
+		/* softing_startstop does close_candev() */
+		ret = softing_startstop(ndev, 1);
+		return ret;
+	case CAN_MODE_STOP:
+	case CAN_MODE_SLEEP:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/*
+ * Softing device management helpers
+ */
+int softing_enable_irq(struct softing *card, int enable)
+{
+	int ret;
+
+	if (!card->irq.nr) {
+		return 0;
+	} else if (card->irq.requested && !enable) {
+		free_irq(card->irq.nr, card);
+		card->irq.requested = 0;
+	} else if (!card->irq.requested && enable) {
+		ret = request_threaded_irq(card->irq.nr,
+				(card->pdat->generation >= 2) ?
+					softing_irq_v2 : softing_irq_v1,
+				softing_irq_thread, IRQF_SHARED,
+				dev_name(&card->pdev->dev), card);
+		if (ret) {
+			dev_alert(&card->pdev->dev,
+					"request_threaded_irq(%u) failed\n",
+					card->irq.nr);
+			return ret;
+		}
+		card->irq.requested = 1;
+	}
+	return 0;
+}
+
+static void softing_card_shutdown(struct softing *card)
+{
+	int fw_up = 0;
+
+	if (mutex_lock_interruptible(&card->fw.lock))
+		/* return -ERESTARTSYS */;
+	fw_up = card->fw.up;
+	card->fw.up = 0;
+
+	if (card->irq.requested && card->irq.nr) {
+		free_irq(card->irq.nr, card);
+		card->irq.requested = 0;
+	}
+	if (fw_up) {
+		if (card->pdat->enable_irq)
+			card->pdat->enable_irq(card->pdev, 0);
+		softing_set_reset_dpram(card);
+		if (card->pdat->reset)
+			card->pdat->reset(card->pdev, 1);
+	}
+	mutex_unlock(&card->fw.lock);
+}
+
+static __devinit int softing_card_boot(struct softing *card)
+{
+	int ret, j;
+	static const uint8_t stream[] = {
+		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
+	unsigned char back[sizeof(stream)];
+
+	if (mutex_lock_interruptible(&card->fw.lock))
+		return -ERESTARTSYS;
+	if (card->fw.up) {
+		mutex_unlock(&card->fw.lock);
+		return 0;
+	}
+	/* reset board */
+	if (card->pdat->enable_irq)
+		card->pdat->enable_irq(card->pdev, 1);
+	/* boot card */
+	softing_set_reset_dpram(card);
+	if (card->pdat->reset)
+		card->pdat->reset(card->pdev, 1);
+	for (j = 0; (j + sizeof(stream)) < card->dpram_size;
+			j += sizeof(stream)) {
+
+		memcpy_toio(&card->dpram[j], stream, sizeof(stream));
+		/* flush IO cache */
+		mb();
+		memcpy_fromio(back, &card->dpram[j], sizeof(stream));
+
+		if (!memcmp(back, stream, sizeof(stream)))
+			continue;
+		/* memory is not equal */
+		dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
+		ret = -EIO;
+		goto failed;
+	}
+	wmb();
+	/* load boot firmware */
+	ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
+				card->dpram_size,
+				card->pdat->boot.offs - card->pdat->boot.addr);
+	if (ret < 0)
+		goto failed;
+	/* load loader firmware */
+	ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
+				card->dpram_size,
+				card->pdat->load.offs - card->pdat->load.addr);
+	if (ret < 0)
+		goto failed;
+
+	if (card->pdat->reset)
+		card->pdat->reset(card->pdev, 0);
+	softing_clr_reset_dpram(card);
+	ret = softing_bootloader_command(card, 0, "card boot");
+	if (ret < 0)
+		goto failed;
+	ret = softing_load_app_fw(card->pdat->app.fw, card);
+	if (ret < 0)
+		goto failed;
+
+	ret = softing_chip_poweron(card);
+	if (ret < 0)
+		goto failed;
+
+	card->fw.up = 1;
+	mutex_unlock(&card->fw.lock);
+	return 0;
+failed:
+	card->fw.up = 0;
+	if (card->pdat->enable_irq)
+		card->pdat->enable_irq(card->pdev, 0);
+	softing_set_reset_dpram(card);
+	if (card->pdat->reset)
+		card->pdat->reset(card->pdev, 1);
+	mutex_unlock(&card->fw.lock);
+	return ret;
+}
+
+/*
+ * netdev sysfs
+ */
+static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct softing_priv *priv = netdev2softing(ndev);
+
+	return sprintf(buf, "%i\n", priv->index);
+}
+
+static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct softing_priv *priv = netdev2softing(ndev);
+
+	return sprintf(buf, "%i\n", priv->chip);
+}
+
+static ssize_t show_output(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct softing_priv *priv = netdev2softing(ndev);
+
+	return sprintf(buf, "0x%02x\n", priv->output);
+}
+
+static ssize_t store_output(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct softing_priv *priv = netdev2softing(ndev);
+	struct softing *card = priv->card;
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	val &= 0xFF;
+
+	ret = mutex_lock_interruptible(&card->fw.lock);
+	if (ret)
+		return -ERESTARTSYS;
+	if (netif_running(ndev)) {
+		mutex_unlock(&card->fw.lock);
+		return -EBUSY;
+	}
+	priv->output = val;
+	mutex_unlock(&card->fw.lock);
+	return count;
+}
+
+static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
+static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
+static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
+
+static const struct attribute *const netdev_sysfs_attrs[] = {
+	&dev_attr_channel.attr,
+	&dev_attr_chip.attr,
+	&dev_attr_output.attr,
+	NULL,
+};
+static const struct attribute_group netdev_sysfs_group = {
+	.name = NULL,
+	.attrs = (struct attribute **)netdev_sysfs_attrs,
+};
+
+static const struct net_device_ops softing_netdev_ops = {
+	.ndo_open = softing_netdev_open,
+	.ndo_stop = softing_netdev_stop,
+	.ndo_start_xmit	= softing_netdev_start_xmit,
+};
+
+static const struct can_bittiming_const softing_btr_const = {
+	.name = "softing",
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4, /* overruled */
+	.brp_min = 1,
+	.brp_max = 32, /* overruled */
+	.brp_inc = 1,
+};
+
+
+static __devinit struct net_device *softing_netdev_create(struct softing *card,
+		uint16_t chip_id)
+{
+	struct net_device *netdev;
+	struct softing_priv *priv;
+
+	netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
+	if (!netdev) {
+		dev_alert(&card->pdev->dev, "alloc_candev failed\n");
+		return NULL;
+	}
+	priv = netdev_priv(netdev);
+	priv->netdev = netdev;
+	priv->card = card;
+	memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
+	priv->btr_const.brp_max = card->pdat->max_brp;
+	priv->btr_const.sjw_max = card->pdat->max_sjw;
+	priv->can.bittiming_const = &priv->btr_const;
+	priv->can.clock.freq = 8000000;
+	priv->chip = chip_id;
+	priv->output = softing_default_output(netdev);
+	SET_NETDEV_DEV(netdev, &card->pdev->dev);
+
+	netdev->flags |= IFF_ECHO;
+	netdev->netdev_ops = &softing_netdev_ops;
+	priv->can.do_set_mode = softing_candev_set_mode;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+	return netdev;
+}
+
+static __devinit int softing_netdev_register(struct net_device *netdev)
+{
+	int ret;
+
+	netdev->sysfs_groups[0] = &netdev_sysfs_group;
+	ret = register_candev(netdev);
+	if (ret) {
+		dev_alert(&netdev->dev, "register failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+static void softing_netdev_cleanup(struct net_device *netdev)
+{
+	unregister_candev(netdev);
+	free_candev(netdev);
+}
+
+/*
+ * sysfs for Platform device
+ */
+#define DEV_ATTR_RO(name, member) \
+static ssize_t show_##name(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
+{ \
+	struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+	return sprintf(buf, "%u\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+#define DEV_ATTR_RO_STR(name, member) \
+static ssize_t show_##name(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
+{ \
+	struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+	return sprintf(buf, "%s\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+DEV_ATTR_RO(serial, id.serial);
+DEV_ATTR_RO_STR(firmware, pdat->app.fw);
+DEV_ATTR_RO(firmware_version, id.fw_version);
+DEV_ATTR_RO_STR(hardware, pdat->name);
+DEV_ATTR_RO(hardware_version, id.hw_version);
+DEV_ATTR_RO(license, id.license);
+DEV_ATTR_RO(frequency, id.freq);
+DEV_ATTR_RO(txpending, tx.pending);
+
+static struct attribute *softing_pdev_attrs[] = {
+	&dev_attr_serial.attr,
+	&dev_attr_firmware.attr,
+	&dev_attr_firmware_version.attr,
+	&dev_attr_hardware.attr,
+	&dev_attr_hardware_version.attr,
+	&dev_attr_license.attr,
+	&dev_attr_frequency.attr,
+	&dev_attr_txpending.attr,
+	NULL,
+};
+
+static const struct attribute_group softing_pdev_group = {
+	.name = NULL,
+	.attrs = softing_pdev_attrs,
+};
+
+/*
+ * platform driver
+ */
+static __devexit int softing_pdev_remove(struct platform_device *pdev)
+{
+	struct softing *card = platform_get_drvdata(pdev);
+	int j;
+
+	/* first, disable card*/
+	softing_card_shutdown(card);
+
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		if (!card->net[j])
+			continue;
+		softing_netdev_cleanup(card->net[j]);
+		card->net[j] = NULL;
+	}
+	sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+
+	iounmap(card->dpram);
+	kfree(card);
+	return 0;
+}
+
+static __devinit int softing_pdev_probe(struct platform_device *pdev)
+{
+	const struct softing_platform_data *pdat = pdev->dev.platform_data;
+	struct softing *card;
+	struct net_device *netdev;
+	struct softing_priv *priv;
+	struct resource *pres;
+	int ret;
+	int j;
+
+	if (!pdat) {
+		dev_warn(&pdev->dev, "no platform data\n");
+		return -EINVAL;
+	}
+	if (pdat->nbus > ARRAY_SIZE(card->net)) {
+		dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
+		return -EINVAL;
+	}
+
+	card = kzalloc(sizeof(*card), GFP_KERNEL);
+	if (!card)
+		return -ENOMEM;
+	card->pdat = pdat;
+	card->pdev = pdev;
+	platform_set_drvdata(pdev, card);
+	mutex_init(&card->fw.lock);
+	spin_lock_init(&card->spin);
+
+	ret = -EINVAL;
+	pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!pres)
+		goto platform_resource_failed;;
+	card->dpram_phys = pres->start;
+	card->dpram_size = pres->end - pres->start + 1;
+	card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+	if (!card->dpram) {
+		dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
+		goto ioremap_failed;
+	}
+
+	pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (pres)
+		card->irq.nr = pres->start;
+
+	/* reset card */
+	ret = softing_card_boot(card);
+	if (ret < 0) {
+		dev_alert(&pdev->dev, "failed to boot\n");
+		goto boot_failed;
+	}
+
+	/* only now, the chip's are known */
+	card->id.freq = card->pdat->freq;
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
+	if (ret < 0) {
+		dev_alert(&card->pdev->dev, "sysfs failed\n");
+		goto sysfs_failed;
+	}
+
+	ret = -ENOMEM;
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		card->net[j] = netdev =
+			softing_netdev_create(card, card->id.chip[j]);
+		if (!netdev) {
+			dev_alert(&pdev->dev, "failed to make can[%i]", j);
+			goto netdev_failed;
+		}
+		priv = netdev_priv(card->net[j]);
+		priv->index = j;
+		ret = softing_netdev_register(netdev);
+		if (ret) {
+			free_candev(netdev);
+			card->net[j] = NULL;
+			dev_alert(&card->pdev->dev,
+					"failed to register can[%i]\n", j);
+			goto netdev_failed;
+		}
+	}
+	dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
+	return 0;
+
+netdev_failed:
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		if (!card->net[j])
+			continue;
+		softing_netdev_cleanup(card->net[j]);
+	}
+	sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+sysfs_failed:
+	softing_card_shutdown(card);
+boot_failed:
+	iounmap(card->dpram);
+ioremap_failed:
+platform_resource_failed:
+	kfree(card);
+	return ret;
+}
+
+static struct platform_driver softing_driver = {
+	.driver = {
+		.name = "softing",
+		.owner = THIS_MODULE,
+	},
+	.probe = softing_pdev_probe,
+	.remove = __devexit_p(softing_pdev_remove),
+};
+
+MODULE_ALIAS("platform:softing");
+
+static int __init softing_start(void)
+{
+	return platform_driver_register(&softing_driver);
+}
+
+static void __exit softing_stop(void)
+{
+	platform_driver_unregister(&softing_driver);
+}
+
+module_init(softing_start);
+module_exit(softing_stop);
+
+MODULE_DESCRIPTION("Softing DPRAM CAN driver");
+MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644
index 0000000..ebbf698
--- /dev/null
+++ b/drivers/net/can/softing/softing_platform.h
@@ -0,0 +1,40 @@
+
+#include <linux/platform_device.h>
+
+#ifndef _SOFTING_DEVICE_H_
+#define _SOFTING_DEVICE_H_
+
+/* softing firmware directory prefix */
+#define fw_dir "softing-4.6/"
+
+struct softing_platform_data {
+	unsigned int manf;
+	unsigned int prod;
+	/*
+	 * generation
+	 * 1st with NEC or SJA1000
+	 * 8bit, exclusive interrupt, ...
+	 * 2nd only SJA1000
+	 * 16bit, shared interrupt
+	 */
+	int generation;
+	int nbus; /* # busses on device */
+	unsigned int freq; /* operating frequency in Hz */
+	unsigned int max_brp;
+	unsigned int max_sjw;
+	unsigned long dpram_size;
+	const char *name;
+	struct {
+		unsigned long offs;
+		unsigned long addr;
+		const char *fw;
+	} boot, load, app;
+	/*
+	 * reset() function
+	 * bring pdev in or out of reset, depending on value
+	 */
+	int (*reset)(struct platform_device *pdev, int value);
+	int (*enable_irq)(struct platform_device *pdev, int value);
+};
+
+#endif
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 7206ab2..3437613 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3203,7 +3203,7 @@
 	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
 	int mac_off  = 0;
 
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
 	const unsigned char *addr;
 #endif
 
@@ -3354,7 +3354,7 @@
 	if (found & VPD_FOUND_MAC)
 		goto done;
 
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
 	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
 	if (addr != NULL) {
 		memcpy(dev_addr, addr, 6);
@@ -5031,7 +5031,7 @@
 	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
 	  cassini_debug;
 
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
 	cp->of_node = pci_device_to_OF_node(pdev);
 #endif
 
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 63ebf76..8a43c7e 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -556,7 +556,7 @@
 #define EEPROM_MAX_POLL   4
 
 /*
- * Read SEEPROM. A zero is written to the flag register when the addres is
+ * Read SEEPROM. A zero is written to the flag register when the address is
  * written to the Control register. The hardware device will set the flag to a
  * one when 4B have been transferred to the Data register.
  */
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 263a294..302be4a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -699,13 +699,13 @@
 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 {
 	int i;
-	u32 *page_table = dma->pgtbl;
+	__le32 *page_table = (__le32 *) dma->pgtbl;
 
 	for (i = 0; i < dma->num_pages; i++) {
 		/* Each entry needs to be in big endian format. */
-		*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 		page_table++;
-		*page_table = (u32) dma->pg_map_arr[i];
+		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 		page_table++;
 	}
 }
@@ -713,13 +713,13 @@
 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
 {
 	int i;
-	u32 *page_table = dma->pgtbl;
+	__le32 *page_table = (__le32 *) dma->pgtbl;
 
 	for (i = 0; i < dma->num_pages; i++) {
 		/* Each entry needs to be in little endian format. */
-		*page_table = dma->pg_map_arr[i] & 0xffffffff;
+		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 		page_table++;
-		*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 		page_table++;
 	}
 }
@@ -2760,6 +2760,8 @@
 	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
 	int kcqe_cnt;
 
+	/* status block index must be read before reading other fields */
+	rmb();
 	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
 
 	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2772,8 @@
 		barrier();
 		if (status_idx != *cp->kcq1.status_idx_ptr) {
 			status_idx = (u16) *cp->kcq1.status_idx_ptr;
+			/* status block index must be read first */
+			rmb();
 			cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
 		} else
 			break;
@@ -2888,6 +2892,8 @@
 	u32 last_status = *info->status_idx_ptr;
 	int kcqe_cnt;
 
+	/* status block index must be read before reading the KCQ */
+	rmb();
 	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
 
 		service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2904,8 @@
 			break;
 
 		last_status = *info->status_idx_ptr;
+		/* status block index must be read before reading the KCQ */
+		rmb();
 	}
 	return last_status;
 }
@@ -2906,26 +2914,35 @@
 {
 	struct cnic_dev *dev = (struct cnic_dev *) data;
 	struct cnic_local *cp = dev->cnic_priv;
-	u32 status_idx;
+	u32 status_idx, new_status_idx;
 
 	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
 		return;
 
-	status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+	while (1) {
+		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
 
-	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+		CNIC_WR16(dev, cp->kcq1.io_addr,
+			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
-		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+		if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+			cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+					   status_idx, IGU_INT_ENABLE, 1);
+			break;
+		}
+
+		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+		if (new_status_idx != status_idx)
+			continue;
 
 		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
 			  MAX_KCQ_IDX);
 
 		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
 				status_idx, IGU_INT_ENABLE, 1);
-	} else {
-		cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
-				   status_idx, IGU_INT_ENABLE, 1);
+
+		break;
 	}
 }
 
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
index a8766fb..e13b7fe 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/cxgb3/mc5.c
@@ -318,7 +318,7 @@
 
 /*
  * Initialization that requires the OS and protocol layers to already
- * be intialized goes here.
+ * be initialized goes here.
  */
 int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
 		unsigned int nroutes)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index ec8579a..d55db6b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -607,7 +607,7 @@
  *
  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
  *	VPD ROM capability.  A zero is written to the flag bit when the
- *	addres is written to the control register.  The hardware device will
+ *	address is written to the control register.  The hardware device will
  *	set the flag to 1 when 4 bytes have been read into the data register.
  */
 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 059c1ee..ec35d45 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2710,6 +2710,8 @@
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adapter = pi->adapter;
 
+	netif_carrier_off(dev);
+
 	if (!(adapter->flags & FULL_INIT_DONE)) {
 		err = cxgb_up(adapter);
 		if (err < 0)
@@ -3661,7 +3663,6 @@
 		pi->xact_addr_filt = -1;
 		pi->rx_offload = RX_CSO;
 		pi->port_id = i;
-		netif_carrier_off(netdev);
 		netdev->irq = pdev->irq;
 
 		netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 3c403f8..6aad64d 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -749,13 +749,19 @@
 	netif_set_real_num_tx_queues(dev, pi->nqsets);
 	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
 	if (err)
-		return err;
-	set_bit(pi->port_id, &adapter->open_device_map);
+		goto err_unwind;
 	err = link_start(dev);
 	if (err)
-		return err;
+		goto err_unwind;
+
 	netif_tx_start_all_queues(dev);
+	set_bit(pi->port_id, &adapter->open_device_map);
 	return 0;
+
+err_unwind:
+	if (adapter->open_device_map == 0)
+		adapter_down(adapter);
+	return err;
 }
 
 /*
@@ -764,13 +770,12 @@
  */
 static int cxgb4vf_stop(struct net_device *dev)
 {
-	int ret;
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adapter = pi->adapter;
 
 	netif_tx_stop_all_queues(dev);
 	netif_carrier_off(dev);
-	ret = t4vf_enable_vi(adapter, pi->viid, false, false);
+	t4vf_enable_vi(adapter, pi->viid, false, false);
 	pi->link_cfg.link_ok = 0;
 
 	clear_bit(pi->port_id, &adapter->open_device_map);
@@ -2035,7 +2040,7 @@
 {
 	int i;
 
-	BUG_ON(adapter->debugfs_root == NULL);
+	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
 	/*
 	 * Debugfs support is best effort.
@@ -2056,7 +2061,7 @@
  */
 static void cleanup_debugfs(struct adapter *adapter)
 {
-	BUG_ON(adapter->debugfs_root == NULL);
+	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
 	/*
 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2484,17 +2489,6 @@
 	struct net_device *netdev;
 
 	/*
-	 * Vet our module parameters.
-	 */
-	if (msi != MSI_MSIX && msi != MSI_MSI) {
-		dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
-			" (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
-			MSI_MSI);
-		err = -EINVAL;
-		goto err_out;
-	}
-
-	/*
 	 * Print our driver banner the first time we're called to initialize a
 	 * device.
 	 */
@@ -2706,11 +2700,11 @@
 	/*
 	 * Set up our debugfs entries.
 	 */
-	if (cxgb4vf_debugfs_root) {
+	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
 		adapter->debugfs_root =
 			debugfs_create_dir(pci_name(pdev),
 					   cxgb4vf_debugfs_root);
-		if (adapter->debugfs_root == NULL)
+		if (IS_ERR_OR_NULL(adapter->debugfs_root))
 			dev_warn(&pdev->dev, "could not create debugfs"
 				 " directory");
 		else
@@ -2765,7 +2759,7 @@
 	 */
 
 err_free_debugfs:
-	if (adapter->debugfs_root) {
+	if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
 		cleanup_debugfs(adapter);
 		debugfs_remove_recursive(adapter->debugfs_root);
 	}
@@ -2797,7 +2791,6 @@
 err_disable_device:
 	pci_disable_device(pdev);
 
-err_out:
 	return err;
 }
 
@@ -2835,7 +2828,7 @@
 		/*
 		 * Tear down our debugfs entries.
 		 */
-		if (adapter->debugfs_root) {
+		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
 			cleanup_debugfs(adapter);
 			debugfs_remove_recursive(adapter->debugfs_root);
 		}
@@ -2869,6 +2862,46 @@
 }
 
 /*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+	struct adapter *adapter;
+	int pidx;
+
+	adapter = pci_get_drvdata(pdev);
+	if (!adapter)
+		return;
+
+	/*
+	 * Disable all Virtual Interfaces.  This will shut down the
+	 * delivery of all ingress packets into the chip for these
+	 * Virtual Interfaces.
+	 */
+	for_each_port(adapter, pidx) {
+		struct net_device *netdev;
+		struct port_info *pi;
+
+		if (!test_bit(pidx, &adapter->registered_device_map))
+			continue;
+
+		netdev = adapter->port[pidx];
+		if (!netdev)
+			continue;
+
+		pi = netdev_priv(netdev);
+		t4vf_enable_vi(adapter, pi->viid, false, false);
+	}
+
+	/*
+	 * Free up all Queues which will prevent further DMA and
+	 * Interrupts allowing various internal pathways to drain.
+	 */
+	t4vf_free_sge_resources(adapter);
+}
+
+/*
  * PCI Device registration data structures.
  */
 #define CH_DEVICE(devid, idx) \
@@ -2901,6 +2934,7 @@
 	.id_table	= cxgb4vf_pci_tbl,
 	.probe		= cxgb4vf_pci_probe,
 	.remove		= __devexit_p(cxgb4vf_pci_remove),
+	.shutdown	= __devexit_p(cxgb4vf_pci_shutdown),
 };
 
 /*
@@ -2910,14 +2944,25 @@
 {
 	int ret;
 
+	/*
+	 * Vet our module parameters.
+	 */
+	if (msi != MSI_MSIX && msi != MSI_MSI) {
+		printk(KERN_WARNING KBUILD_MODNAME
+		       ": bad module parameter msi=%d; must be %d"
+		       " (MSI-X or MSI) or %d (MSI)\n",
+		       msi, MSI_MSIX, MSI_MSI);
+		return -EINVAL;
+	}
+
 	/* Debugfs support is optional, just warn if this fails */
 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
-	if (!cxgb4vf_debugfs_root)
+	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
 		printk(KERN_WARNING KBUILD_MODNAME ": could not create"
 		       " debugfs entry, continuing\n");
 
 	ret = pci_register_driver(&cxgb4vf_driver);
-	if (ret < 0)
+	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
 		debugfs_remove(cxgb4vf_debugfs_root);
 	return ret;
 }
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index e4bec78..192db22 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -147,9 +147,20 @@
 	/*
 	 * Write the command array into the Mailbox Data register array and
 	 * transfer ownership of the mailbox to the firmware.
+	 *
+	 * For the VFs, the Mailbox Data "registers" are actually backed by
+	 * T4's "MA" interface rather than PL Registers (as is the case for
+	 * the PFs).  Because these are in different coherency domains, the
+	 * write to the VF's PL-register-backed Mailbox Control can race in
+	 * front of the writes to the MA-backed VF Mailbox Data "registers".
+	 * So we need to do a read-back on at least one byte of the VF Mailbox
+	 * Data registers before doing the write to the VF Mailbox Control
+	 * register.
 	 */
 	for (i = 0, p = cmd; i < size; i += 8)
 		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+	t4_read_reg(adapter, mbox_data);         /* flush write */
+
 	t4_write_reg(adapter, mbox_ctl,
 		     MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
 	t4_read_reg(adapter, mbox_ctl);          /* flush write */
@@ -160,7 +171,7 @@
 	delay_idx = 0;
 	ms = delay[0];
 
-	for (i = 0; i < 500; i += ms) {
+	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
 		if (sleep_ok) {
 			ms = delay[delay_idx];
 			if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d1..7018bfe 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@
 	int			ret;
 
 	/* free and bail if we are shutting down */
-	if (unlikely(!netif_running(ndev))) {
+	if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
 		dev_kfree_skb_any(skb);
 		return;
 	}
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 1b48b68..8b0084d 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1094,7 +1094,7 @@
 				}
 			}
 			/* Change buffer ownership for this last frame, back to the adapter */
-			for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) {
+			for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
 				writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
 			}
 			writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@
 		/*
 		   ** Update entry information
 		 */
-		lp->rx_new = (++lp->rx_new) & lp->rxRingMask;
+		lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
 	}
 
 	return 0;
@@ -1148,7 +1148,7 @@
 		}
 
 		/* Update all the pointers */
-		lp->tx_old = (++lp->tx_old) & lp->txRingMask;
+		lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
 	}
 
 	return 0;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216..c05db60 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@
 
 	/* Free all the skbuffs in the queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
-		np->rx_ring[i].status = 0;
-		np->rx_ring[i].fraginfo = 0;
 		skb = np->rx_skbuff[i];
 		if (skb) {
 			pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@
 			dev_kfree_skb (skb);
 			np->rx_skbuff[i] = NULL;
 		}
+		np->rx_ring[i].status = 0;
+		np->rx_ring[i].fraginfo = 0;
 	}
 	for (i = 0; i < TX_RING_SIZE; i++) {
 		skb = np->tx_skbuff[i];
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 2d4c4fc..461dd6f 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -802,10 +802,7 @@
 	/* Checksum mode */
 	dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
 
-	/* GPIO0 on pre-activate PHY */
-	iow(db, DM9000_GPR, 0);	/* REG_1F bit0 activate phyxcer */
 	iow(db, DM9000_GPCR, GPCR_GEP_CNTL);	/* Let GPIO0 output */
-	iow(db, DM9000_GPR, 0);	/* Enable PHY */
 
 	ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
@@ -852,8 +849,8 @@
 	unsigned long flags;
 
 	/* Save previous register address */
-	reg_save = readb(db->io_addr);
 	spin_lock_irqsave(&db->lock, flags);
+	reg_save = readb(db->io_addr);
 
 	netif_stop_queue(dev);
 	dm9000_reset(db);
@@ -1194,6 +1191,10 @@
 	if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
 		return -EAGAIN;
 
+	/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+	iow(db, DM9000_GPR, 0);	/* REG_1F bit0 activate phyxcer */
+	mdelay(1); /* delay needs by DM9000B */
+
 	/* Initialize DM9000 board */
 	dm9000_reset(db);
 	dm9000_init_dm9000(dev);
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b..8318ea0 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@
 	for (i = 0; i < PHY_MAX_ADDR; i++)
 		bp->mii_bus->irq[i] = PHY_POLL;
 
-	platform_set_drvdata(bp->dev, bp->mii_bus);
-
 	if (mdiobus_register(bp->mii_bus)) {
 		err = -ENXIO;
 		goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@
 	bp = netdev_priv(dev);
 	bp->dev = dev;
 
+	platform_set_drvdata(pdev, dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 77d08e6..7501d97 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,16 +124,22 @@
 	case M88E1000_I_PHY_ID:
 	case M88E1011_I_PHY_ID:
 	case M88E1111_I_PHY_ID:
+	case M88E1118_E_PHY_ID:
 		hw->phy_type = e1000_phy_m88;
 		break;
 	case IGP01E1000_I_PHY_ID:
 		if (hw->mac_type == e1000_82541 ||
 		    hw->mac_type == e1000_82541_rev_2 ||
 		    hw->mac_type == e1000_82547 ||
-		    hw->mac_type == e1000_82547_rev_2) {
+		    hw->mac_type == e1000_82547_rev_2)
 			hw->phy_type = e1000_phy_igp;
-			break;
-		}
+		break;
+	case RTL8211B_PHY_ID:
+		hw->phy_type = e1000_phy_8211;
+		break;
+	case RTL8201N_PHY_ID:
+		hw->phy_type = e1000_phy_8201;
+		break;
 	default:
 		/* Should never have loaded on this device */
 		hw->phy_type = e1000_phy_undefined;
@@ -318,6 +324,9 @@
 	case E1000_DEV_ID_82547GI:
 		hw->mac_type = e1000_82547_rev_2;
 		break;
+	case E1000_DEV_ID_INTEL_CE4100_GBE:
+		hw->mac_type = e1000_ce4100;
+		break;
 	default:
 		/* Should never have loaded on this device */
 		return -E1000_ERR_MAC_TYPE;
@@ -372,6 +381,9 @@
 		case e1000_82542_rev2_1:
 			hw->media_type = e1000_media_type_fiber;
 			break;
+		case e1000_ce4100:
+			hw->media_type = e1000_media_type_copper;
+			break;
 		default:
 			status = er32(STATUS);
 			if (status & E1000_STATUS_TBIMODE) {
@@ -460,6 +472,7 @@
 		/* Reset is performed on a shadow of the control register */
 		ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
 		break;
+	case e1000_ce4100:
 	default:
 		ew32(CTRL, (ctrl | E1000_CTRL_RST));
 		break;
@@ -952,6 +965,67 @@
 }
 
 /**
+ * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Commits changes to PHY configuration by calling e1000_phy_reset().
+ */
+static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	/* SW reset the PHY so all changes take effect */
+	ret_val = e1000_phy_reset(hw);
+	if (ret_val) {
+		e_dbg("Error Resetting the PHY\n");
+		return ret_val;
+	}
+
+	return E1000_SUCCESS;
+}
+
+static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u32 ctrl_aux;
+
+	switch (hw->phy_type) {
+	case e1000_phy_8211:
+		ret_val = e1000_copper_link_rtl_setup(hw);
+		if (ret_val) {
+			e_dbg("e1000_copper_link_rtl_setup failed!\n");
+			return ret_val;
+		}
+		break;
+	case e1000_phy_8201:
+		/* Set RMII mode */
+		ctrl_aux = er32(CTL_AUX);
+		ctrl_aux |= E1000_CTL_AUX_RMII;
+		ew32(CTL_AUX, ctrl_aux);
+		E1000_WRITE_FLUSH();
+
+		/* Disable the J/K bits required for receive */
+		ctrl_aux = er32(CTL_AUX);
+		ctrl_aux |= 0x4;
+		ctrl_aux &= ~0x2;
+		ew32(CTL_AUX, ctrl_aux);
+		E1000_WRITE_FLUSH();
+		ret_val = e1000_copper_link_rtl_setup(hw);
+
+		if (ret_val) {
+			e_dbg("e1000_copper_link_rtl_setup failed!\n");
+			return ret_val;
+		}
+		break;
+	default:
+		e_dbg("Error Resetting the PHY\n");
+		return E1000_ERR_PHY_TYPE;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
  * e1000_copper_link_preconfig - early configuration for copper
  * @hw: Struct containing variables accessed by shared code
  *
@@ -1286,6 +1360,10 @@
 	if (hw->autoneg_advertised == 0)
 		hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
+	/* IFE/RTL8201N PHY only supports 10/100 */
+	if (hw->phy_type == e1000_phy_8201)
+		hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
 	e_dbg("Reconfiguring auto-neg advertisement params\n");
 	ret_val = e1000_phy_setup_autoneg(hw);
 	if (ret_val) {
@@ -1341,7 +1419,7 @@
 	s32 ret_val;
 	e_dbg("e1000_copper_link_postconfig");
 
-	if (hw->mac_type >= e1000_82544) {
+	if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
 		e1000_config_collision_dist(hw);
 	} else {
 		ret_val = e1000_config_mac_to_phy(hw);
@@ -1395,6 +1473,12 @@
 		ret_val = e1000_copper_link_mgp_setup(hw);
 		if (ret_val)
 			return ret_val;
+	} else {
+		ret_val = gbe_dhg_phy_setup(hw);
+		if (ret_val) {
+			e_dbg("gbe_dhg_phy_setup failed!\n");
+			return ret_val;
+		}
 	}
 
 	if (hw->autoneg) {
@@ -1461,10 +1545,11 @@
 		return ret_val;
 
 	/* Read the MII 1000Base-T Control Register (Address 9). */
-	ret_val =
-	    e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+	ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
 	if (ret_val)
 		return ret_val;
+	else if (hw->phy_type == e1000_phy_8201)
+		mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
 
 	/* Need to parse both autoneg_advertised and fc and set up
 	 * the appropriate PHY registers.  First we will parse for
@@ -1577,9 +1662,14 @@
 
 	e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
 
-	ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
-	if (ret_val)
-		return ret_val;
+	if (hw->phy_type == e1000_phy_8201) {
+		mii_1000t_ctrl_reg = 0;
+	} else {
+		ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+		                              mii_1000t_ctrl_reg);
+		if (ret_val)
+			return ret_val;
+	}
 
 	return E1000_SUCCESS;
 }
@@ -1860,7 +1950,7 @@
 
 	/* 82544 or newer MAC, Auto Speed Detection takes care of
 	 * MAC speed/duplex configuration.*/
-	if (hw->mac_type >= e1000_82544)
+	if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
 		return E1000_SUCCESS;
 
 	/* Read the Device Control Register and set the bits to Force Speed
@@ -1870,27 +1960,49 @@
 	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
 	ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
 
-	/* Set up duplex in the Device Control and Transmit Control
-	 * registers depending on negotiated values.
-	 */
-	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-	if (ret_val)
-		return ret_val;
+	switch (hw->phy_type) {
+	case e1000_phy_8201:
+		ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+		if (ret_val)
+			return ret_val;
 
-	if (phy_data & M88E1000_PSSR_DPLX)
-		ctrl |= E1000_CTRL_FD;
-	else
-		ctrl &= ~E1000_CTRL_FD;
+		if (phy_data & RTL_PHY_CTRL_FD)
+			ctrl |= E1000_CTRL_FD;
+		else
+			ctrl &= ~E1000_CTRL_FD;
 
-	e1000_config_collision_dist(hw);
+		if (phy_data & RTL_PHY_CTRL_SPD_100)
+			ctrl |= E1000_CTRL_SPD_100;
+		else
+			ctrl |= E1000_CTRL_SPD_10;
 
-	/* Set up speed in the Device Control register depending on
-	 * negotiated values.
-	 */
-	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
-		ctrl |= E1000_CTRL_SPD_1000;
-	else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
-		ctrl |= E1000_CTRL_SPD_100;
+		e1000_config_collision_dist(hw);
+		break;
+	default:
+		/* Set up duplex in the Device Control and Transmit Control
+		 * registers depending on negotiated values.
+		 */
+		ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+		                             &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		if (phy_data & M88E1000_PSSR_DPLX)
+			ctrl |= E1000_CTRL_FD;
+		else
+			ctrl &= ~E1000_CTRL_FD;
+
+		e1000_config_collision_dist(hw);
+
+		/* Set up speed in the Device Control register depending on
+		 * negotiated values.
+		 */
+		if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+			ctrl |= E1000_CTRL_SPD_1000;
+		else if ((phy_data & M88E1000_PSSR_SPEED) ==
+		         M88E1000_PSSR_100MBS)
+			ctrl |= E1000_CTRL_SPD_100;
+	}
 
 	/* Write the configured values back to the Device Control Reg. */
 	ew32(CTRL, ctrl);
@@ -2401,7 +2513,8 @@
 		 * speed/duplex on the MAC to the current PHY speed/duplex
 		 * settings.
 		 */
-		if (hw->mac_type >= e1000_82544)
+		if ((hw->mac_type >= e1000_82544) &&
+		    (hw->mac_type != e1000_ce4100))
 			e1000_config_collision_dist(hw);
 		else {
 			ret_val = e1000_config_mac_to_phy(hw);
@@ -2738,7 +2851,7 @@
 {
 	u32 i;
 	u32 mdic = 0;
-	const u32 phy_addr = 1;
+	const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
 
 	e_dbg("e1000_read_phy_reg_ex");
 
@@ -2752,28 +2865,61 @@
 		 * Control register.  The MAC will take care of interfacing with the
 		 * PHY to retrieve the desired data.
 		 */
-		mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
-			(phy_addr << E1000_MDIC_PHY_SHIFT) |
-			(E1000_MDIC_OP_READ));
+		if (hw->mac_type == e1000_ce4100) {
+			mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(INTEL_CE_GBE_MDIC_OP_READ) |
+				(INTEL_CE_GBE_MDIC_GO));
 
-		ew32(MDIC, mdic);
+			writel(mdic, E1000_MDIO_CMD);
 
-		/* Poll the ready bit to see if the MDI read completed */
-		for (i = 0; i < 64; i++) {
-			udelay(50);
-			mdic = er32(MDIC);
-			if (mdic & E1000_MDIC_READY)
-				break;
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 64; i++) {
+				udelay(50);
+				mdic = readl(E1000_MDIO_CMD);
+				if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+					break;
+			}
+
+			if (mdic & INTEL_CE_GBE_MDIC_GO) {
+				e_dbg("MDI Read did not complete\n");
+				return -E1000_ERR_PHY;
+			}
+
+			mdic = readl(E1000_MDIO_STS);
+			if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
+				e_dbg("MDI Read Error\n");
+				return -E1000_ERR_PHY;
+			}
+			*phy_data = (u16) mdic;
+		} else {
+			mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(E1000_MDIC_OP_READ));
+
+			ew32(MDIC, mdic);
+
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 64; i++) {
+				udelay(50);
+				mdic = er32(MDIC);
+				if (mdic & E1000_MDIC_READY)
+					break;
+			}
+			if (!(mdic & E1000_MDIC_READY)) {
+				e_dbg("MDI Read did not complete\n");
+				return -E1000_ERR_PHY;
+			}
+			if (mdic & E1000_MDIC_ERROR) {
+				e_dbg("MDI Error\n");
+				return -E1000_ERR_PHY;
+			}
+			*phy_data = (u16) mdic;
 		}
-		if (!(mdic & E1000_MDIC_READY)) {
-			e_dbg("MDI Read did not complete\n");
-			return -E1000_ERR_PHY;
-		}
-		if (mdic & E1000_MDIC_ERROR) {
-			e_dbg("MDI Error\n");
-			return -E1000_ERR_PHY;
-		}
-		*phy_data = (u16) mdic;
 	} else {
 		/* We must first send a preamble through the MDIO pin to signal the
 		 * beginning of an MII instruction.  This is done by sending 32
@@ -2840,7 +2986,7 @@
 {
 	u32 i;
 	u32 mdic = 0;
-	const u32 phy_addr = 1;
+	const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
 
 	e_dbg("e1000_write_phy_reg_ex");
 
@@ -2850,27 +2996,54 @@
 	}
 
 	if (hw->mac_type > e1000_82543) {
-		/* Set up Op-code, Phy Address, register address, and data intended
-		 * for the PHY register in the MDI Control register.  The MAC will take
-		 * care of interfacing with the PHY to send the desired data.
+		/* Set up Op-code, Phy Address, register address, and data
+		 * intended for the PHY register in the MDI Control register.
+		 * The MAC will take care of interfacing with the PHY to send
+		 * the desired data.
 		 */
-		mdic = (((u32) phy_data) |
-			(reg_addr << E1000_MDIC_REG_SHIFT) |
-			(phy_addr << E1000_MDIC_PHY_SHIFT) |
-			(E1000_MDIC_OP_WRITE));
+		if (hw->mac_type == e1000_ce4100) {
+			mdic = (((u32) phy_data) |
+				(reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(INTEL_CE_GBE_MDIC_OP_WRITE) |
+				(INTEL_CE_GBE_MDIC_GO));
 
-		ew32(MDIC, mdic);
+			writel(mdic, E1000_MDIO_CMD);
 
-		/* Poll the ready bit to see if the MDI read completed */
-		for (i = 0; i < 641; i++) {
-			udelay(5);
-			mdic = er32(MDIC);
-			if (mdic & E1000_MDIC_READY)
-				break;
-		}
-		if (!(mdic & E1000_MDIC_READY)) {
-			e_dbg("MDI Write did not complete\n");
-			return -E1000_ERR_PHY;
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 640; i++) {
+				udelay(5);
+				mdic = readl(E1000_MDIO_CMD);
+				if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+					break;
+			}
+			if (mdic & INTEL_CE_GBE_MDIC_GO) {
+				e_dbg("MDI Write did not complete\n");
+				return -E1000_ERR_PHY;
+			}
+		} else {
+			mdic = (((u32) phy_data) |
+				(reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(E1000_MDIC_OP_WRITE));
+
+			ew32(MDIC, mdic);
+
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 641; i++) {
+				udelay(5);
+				mdic = er32(MDIC);
+				if (mdic & E1000_MDIC_READY)
+					break;
+			}
+			if (!(mdic & E1000_MDIC_READY)) {
+				e_dbg("MDI Write did not complete\n");
+				return -E1000_ERR_PHY;
+			}
 		}
 	} else {
 		/* We'll need to use the SW defined pins to shift the write command
@@ -3048,6 +3221,12 @@
 		if (hw->phy_id == M88E1011_I_PHY_ID)
 			match = true;
 		break;
+	case e1000_ce4100:
+		if ((hw->phy_id == RTL8211B_PHY_ID) ||
+		    (hw->phy_id == RTL8201N_PHY_ID) ||
+		    (hw->phy_id == M88E1118_E_PHY_ID))
+			match = true;
+		break;
 	case e1000_82541:
 	case e1000_82541_rev_2:
 	case e1000_82547:
@@ -3291,6 +3470,9 @@
 
 	if (hw->phy_type == e1000_phy_igp)
 		return e1000_phy_igp_get_info(hw, phy_info);
+	else if ((hw->phy_type == e1000_phy_8211) ||
+	         (hw->phy_type == e1000_phy_8201))
+		return E1000_SUCCESS;
 	else
 		return e1000_phy_m88_get_info(hw, phy_info);
 }
@@ -3742,6 +3924,12 @@
 
 	e_dbg("e1000_read_eeprom");
 
+	if (hw->mac_type == e1000_ce4100) {
+		GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
+		                      data);
+		return E1000_SUCCESS;
+	}
+
 	/* If eeprom is not yet detected, do so now */
 	if (eeprom->word_size == 0)
 		e1000_init_eeprom_params(hw);
@@ -3904,6 +4092,12 @@
 
 	e_dbg("e1000_write_eeprom");
 
+	if (hw->mac_type == e1000_ce4100) {
+		GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
+		                       data);
+		return E1000_SUCCESS;
+	}
+
 	/* If eeprom is not yet detected, do so now */
 	if (eeprom->word_size == 0)
 		e1000_init_eeprom_params(hw);
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index ecd9f6c..c70b23d 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -41,7 +41,7 @@
 struct e1000_hw_stats;
 
 /* Enumerated types specific to the e1000 hardware */
-/* Media Access Controlers */
+/* Media Access Controllers */
 typedef enum {
 	e1000_undefined = 0,
 	e1000_82542_rev2_0,
@@ -52,6 +52,7 @@
 	e1000_82545,
 	e1000_82545_rev_3,
 	e1000_82546,
+	e1000_ce4100,
 	e1000_82546_rev_3,
 	e1000_82541,
 	e1000_82541_rev_2,
@@ -209,9 +210,11 @@
 } e1000_1000t_rx_status;
 
 typedef enum {
-    e1000_phy_m88 = 0,
-    e1000_phy_igp,
-    e1000_phy_undefined = 0xFF
+	e1000_phy_m88 = 0,
+	e1000_phy_igp,
+	e1000_phy_8211,
+	e1000_phy_8201,
+	e1000_phy_undefined = 0xFF
 } e1000_phy_type;
 
 typedef enum {
@@ -442,6 +445,7 @@
 #define E1000_DEV_ID_82547EI             0x1019
 #define E1000_DEV_ID_82547EI_MOBILE      0x101A
 #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_INTEL_CE4100_GBE    0x2E6E
 
 #define NODE_ADDRESS_SIZE 6
 #define ETH_LENGTH_OF_ADDRESS 6
@@ -808,6 +812,16 @@
 #define E1000_CTRL_EXT 0x00018	/* Extended Device Control - RW */
 #define E1000_FLA      0x0001C	/* Flash Access - RW */
 #define E1000_MDIC     0x00020	/* MDI Control - RW */
+
+extern void __iomem *ce4100_gbe_mdio_base_virt;
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE    (ce4100_gbe_mdio_base_virt)
+#define E1000_MDIO_STS  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
+#define E1000_MDIO_CMD  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
+#define E1000_MDIO_DRV  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
+#define E1000_MDC_CMD   (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
+#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
+#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
+
 #define E1000_SCTL     0x00024	/* SerDes Control - RW */
 #define E1000_FEXTNVM  0x00028	/* Future Extended NVM register */
 #define E1000_FCAL     0x00028	/* Flow Control Address Low - RW */
@@ -820,6 +834,34 @@
 #define E1000_IMS      0x000D0	/* Interrupt Mask Set - RW */
 #define E1000_IMC      0x000D8	/* Interrupt Mask Clear - WO */
 #define E1000_IAM      0x000E0	/* Interrupt Acknowledge Auto Mask */
+
+/* Auxiliary Control Register. This register is CE4100 specific,
+ * RMII/RGMII function is switched by this register - RW
+ * Following are bits definitions of the Auxiliary Control Register
+ */
+#define E1000_CTL_AUX  0x000E0
+#define E1000_CTL_AUX_END_SEL_SHIFT     10
+#define E1000_CTL_AUX_ENDIANESS_SHIFT   8
+#define E1000_CTL_AUX_RGMII_RMII_SHIFT  0
+
+/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_DES_PKT   (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use CTL_AUX.ENDIANESS, packet use default */
+#define E1000_CTL_AUX_DES       (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use default, packet use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_PKT       (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* all use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_ALL       (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
+
+#define E1000_CTL_AUX_RGMII     (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+#define E1000_CTL_AUX_RMII      (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+
+/* LW little endian, Byte big endian */
+#define E1000_CTL_AUX_LWLE_BBE  (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWLE_BLE  (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BBE  (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BLE  (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+
 #define E1000_RCTL     0x00100	/* RX Control - RW */
 #define E1000_RDTR1    0x02820	/* RX Delay Timer (1) - RW */
 #define E1000_RDBAL1   0x02900	/* RX Descriptor Base Address Low (1) - RW */
@@ -1011,6 +1053,7 @@
  * in more current versions of the 8254x. Despite the difference in location,
  * the registers function in the same manner.
  */
+#define E1000_82542_CTL_AUX  E1000_CTL_AUX
 #define E1000_82542_CTRL     E1000_CTRL
 #define E1000_82542_CTRL_DUP E1000_CTRL_DUP
 #define E1000_82542_STATUS   E1000_STATUS
@@ -1571,6 +1614,11 @@
 #define E1000_MDIC_INT_EN    0x20000000
 #define E1000_MDIC_ERROR     0x40000000
 
+#define INTEL_CE_GBE_MDIC_OP_WRITE      0x04000000
+#define INTEL_CE_GBE_MDIC_OP_READ       0x00000000
+#define INTEL_CE_GBE_MDIC_GO            0x80000000
+#define INTEL_CE_GBE_MDIC_READ_ERROR    0x80000000
+
 #define E1000_KUMCTRLSTA_MASK           0x0000FFFF
 #define E1000_KUMCTRLSTA_OFFSET         0x001F0000
 #define E1000_KUMCTRLSTA_OFFSET_SHIFT   16
@@ -2869,8 +2917,14 @@
 #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
 #define M88E1011_I_REV_4   0x04
 #define M88E1111_I_PHY_ID  0x01410CC0
+#define M88E1118_E_PHY_ID  0x01410E40
 #define L1LXT971A_PHY_ID   0x001378E0
 
+#define RTL8211B_PHY_ID    0x001CC910
+#define RTL8201N_PHY_ID    0x8200
+#define RTL_PHY_CTRL_FD    0x0100 /* Full duplex.0=half; 1=full */
+#define RTL_PHY_CTRL_SPD_100    0x200000 /* Force 100Mb */
+
 /* Bits...
  * 15-5: page
  * 4-0: register offset
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 340e12d..bfab140 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -28,6 +28,12 @@
 
 #include "e1000.h"
 #include <net/ip6_checksum.h>
+#include <linux/io.h>
+
+/* Intel Media SOC GbE MDIO physical base address */
+static unsigned long ce4100_gbe_mdio_base_phy;
+/* Intel Media SOC GbE MDIO virtual base address */
+void __iomem *ce4100_gbe_mdio_base_virt;
 
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -79,6 +85,7 @@
 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
 	/* required last entry */
 	{0,}
 };
@@ -459,6 +466,7 @@
 		case e1000_82545:
 		case e1000_82545_rev_3:
 		case e1000_82546:
+		case e1000_ce4100:
 		case e1000_82546_rev_3:
 		case e1000_82541:
 		case e1000_82541_rev_2:
@@ -573,6 +581,7 @@
 	case e1000_82545:
 	case e1000_82545_rev_3:
 	case e1000_82546:
+	case e1000_ce4100:
 	case e1000_82546_rev_3:
 		pba = E1000_PBA_48K;
 		break;
@@ -894,6 +903,7 @@
 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
 	int i, err, pci_using_dac;
 	u16 eeprom_data = 0;
+	u16 tmp = 0;
 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 	int bars, need_ioport;
 
@@ -996,6 +1006,14 @@
 		goto err_sw_init;
 
 	err = -EIO;
+	if (hw->mac_type == e1000_ce4100) {
+		ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
+		ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+		                                pci_resource_len(pdev, BAR_1));
+
+		if (!ce4100_gbe_mdio_base_virt)
+			goto err_mdio_ioremap;
+	}
 
 	if (hw->mac_type >= e1000_82543) {
 		netdev->features = NETIF_F_SG |
@@ -1135,6 +1153,20 @@
 	adapter->wol = adapter->eeprom_wol;
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
+	/* Auto detect PHY address */
+	if (hw->mac_type == e1000_ce4100) {
+		for (i = 0; i < 32; i++) {
+			hw->phy_addr = i;
+			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
+			if (tmp == 0 || tmp == 0xFF) {
+				if (i == 31)
+					goto err_eeprom;
+				continue;
+			} else
+				break;
+		}
+	}
+
 	/* reset the hardware with the new settings */
 	e1000_reset(adapter);
 
@@ -1171,6 +1203,8 @@
 	kfree(adapter->rx_ring);
 err_dma:
 err_sw_init:
+err_mdio_ioremap:
+	iounmap(ce4100_gbe_mdio_base_virt);
 	iounmap(hw->hw_addr);
 err_ioremap:
 	free_netdev(netdev);
@@ -1409,6 +1443,7 @@
 	/* First rev 82545 and 82546 need to not allow any memory
 	 * write location to cross 64k boundary due to errata 23 */
 	if (hw->mac_type == e1000_82545 ||
+	    hw->mac_type == e1000_ce4100 ||
 	    hw->mac_type == e1000_82546) {
 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
 	}
@@ -2198,7 +2233,7 @@
 	 * addresses take precedence to avoid disabling unicast filtering
 	 * when possible.
 	 *
-	 * RAR 0 is used for the station MAC adddress
+	 * RAR 0 is used for the station MAC address
 	 * if there are not 14 addresses, go ahead and clear the filters
 	 */
 	i = 1;
@@ -3443,9 +3478,17 @@
 	struct e1000_hw *hw = &adapter->hw;
 	u32 icr = er32(ICR);
 
-	if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
+	if (unlikely((!icr)))
 		return IRQ_NONE;  /* Not our interrupt */
 
+	/*
+	 * we might have caused the interrupt, but the above
+	 * read cleared it, and just in case the driver is
+	 * down there is nothing to do so return handled
+	 */
+	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
+		return IRQ_HANDLED;
+
 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
 		hw->get_link_status = 1;
 		/* guard against interrupt when we're going down */
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index edd1c75..33e7c45a 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -34,12 +34,22 @@
 #ifndef _E1000_OSDEP_H_
 #define _E1000_OSDEP_H_
 
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
 #include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
+
+#define CONFIG_RAM_BASE         0x60000
+#define GBE_CONFIG_OFFSET       0x0
+
+#define GBE_CONFIG_RAM_BASE \
+	((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
+
+#define GBE_CONFIG_BASE_VIRT \
+	((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
+
+#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
+	(iowrite16_rep(base + offset, data, count))
+
+#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
+	(ioread16_rep(base + (offset << 1), data, count))
 
 #define er32(reg)							\
 	(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543)		\
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index e57e409..89a6903 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -78,6 +78,8 @@
 static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
 static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
 static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
 
 /**
  *  e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -113,6 +115,8 @@
 		phy->type		 = e1000_phy_bm;
 		phy->ops.acquire = e1000_get_hw_semaphore_82574;
 		phy->ops.release = e1000_put_hw_semaphore_82574;
+		phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+		phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
 		break;
 	default:
 		return -E1000_ERR_PHY;
@@ -121,29 +125,36 @@
 
 	/* This can only be done after all function pointers are setup. */
 	ret_val = e1000_get_phy_id_82571(hw);
+	if (ret_val) {
+		e_dbg("Error getting PHY ID\n");
+		return ret_val;
+	}
 
 	/* Verify phy id */
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
 		if (phy->id != IGP01E1000_I_PHY_ID)
-			return -E1000_ERR_PHY;
+			ret_val = -E1000_ERR_PHY;
 		break;
 	case e1000_82573:
 		if (phy->id != M88E1111_I_PHY_ID)
-			return -E1000_ERR_PHY;
+			ret_val = -E1000_ERR_PHY;
 		break;
 	case e1000_82574:
 	case e1000_82583:
 		if (phy->id != BME1000_E_PHY_ID_R2)
-			return -E1000_ERR_PHY;
+			ret_val = -E1000_ERR_PHY;
 		break;
 	default:
-		return -E1000_ERR_PHY;
+		ret_val = -E1000_ERR_PHY;
 		break;
 	}
 
-	return 0;
+	if (ret_val)
+		e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+	return ret_val;
 }
 
 /**
@@ -317,7 +328,7 @@
 
 	/*
 	 * Ensure that the inter-port SWSM.SMBI lock bit is clear before
-	 * first NVM or PHY acess. This should be done for single-port
+	 * first NVM or PHY access. This should be done for single-port
 	 * devices, and for one port only on dual-port devices so that
 	 * for those devices we can still use the SMBI lock to synchronize
 	 * inter-port accesses to the PHY & NVM.
@@ -649,6 +660,58 @@
 }
 
 /**
+ *  e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.
+ *  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (active)
+		data |= E1000_PHY_CTRL_D0A_LPLU;
+	else
+		data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  when active is true, else clear lplu for D3. LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (!active) {
+		data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+	} else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= E1000_PHY_CTRL_NOND0A_LPLU;
+	}
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
  *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
  *  @hw: pointer to the HW structure
  *
@@ -956,7 +1019,7 @@
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-	u32 ctrl, ctrl_ext, icr;
+	u32 ctrl, ctrl_ext;
 	s32 ret_val;
 
 	/*
@@ -1040,7 +1103,7 @@
 
 	/* Clear any pending interrupt events. */
 	ew32(IMC, 0xffffffff);
-	icr = er32(ICR);
+	er32(ICR);
 
 	if (hw->mac.type == e1000_82571) {
 		/* Install any alternate MAC address into RAR0 */
@@ -1247,7 +1310,7 @@
 		 * apply workaround for hardware errata documented in errata
 		 * docs Fixes issue where some error prone or unreliable PCIe
 		 * completions are occurring, particularly with ASPM enabled.
-		 * Without fix, issue can cause tx timeouts.
+		 * Without fix, issue can cause Tx timeouts.
 		 */
 		reg = er32(GCR2);
 		reg |= 1;
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 360c913..28519ac 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2008 Intel Corporation.
+# Copyright(c) 1999 - 2011 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 7245dc2..1314998 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 2c913b8..e610e136 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,7 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/pci-aspm.h>
+#include <linux/crc32.h>
 
 #include "hw.h"
 
@@ -496,6 +497,8 @@
 extern void e1000e_update_stats(struct e1000_adapter *adapter);
 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
 extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
 
 extern unsigned int copybreak;
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index b18c644..2fefa82 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -784,7 +784,7 @@
  **/
 static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
 {
-	u32 ctrl, icr;
+	u32 ctrl;
 	s32 ret_val;
 
 	/*
@@ -818,7 +818,7 @@
 
 	/* Clear any pending interrupt events. */
 	ew32(IMC, 0xffffffff);
-	icr = er32(ICR);
+	er32(ICR);
 
 	ret_val = e1000_check_alt_mac_addr_generic(hw);
 
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index affcacf..fa08b63 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -624,20 +624,24 @@
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	char firmware_version[32];
 
-	strncpy(drvinfo->driver,  e1000e_driver_name, 32);
-	strncpy(drvinfo->version, e1000e_driver_version, 32);
+	strncpy(drvinfo->driver,  e1000e_driver_name,
+		sizeof(drvinfo->driver) - 1);
+	strncpy(drvinfo->version, e1000e_driver_version,
+		sizeof(drvinfo->version) - 1);
 
 	/*
 	 * EEPROM image version # is reported as firmware version # for
 	 * PCI-E controllers
 	 */
-	sprintf(firmware_version, "%d.%d-%d",
+	snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
 		(adapter->eeprom_vers & 0xF000) >> 12,
 		(adapter->eeprom_vers & 0x0FF0) >> 4,
 		(adapter->eeprom_vers & 0x000F));
 
-	strncpy(drvinfo->fw_version, firmware_version, 32);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strncpy(drvinfo->fw_version, firmware_version,
+		sizeof(drvinfo->fw_version) - 1);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info) - 1);
 	drvinfo->regdump_len = e1000_get_regs_len(netdev);
 	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
@@ -1704,6 +1708,19 @@
 	bool if_running = netif_running(netdev);
 
 	set_bit(__E1000_TESTING, &adapter->state);
+
+	if (!if_running) {
+		/* Get control of and reset hardware */
+		if (adapter->flags & FLAG_HAS_AMT)
+			e1000e_get_hw_control(adapter);
+
+		e1000e_power_up_phy(adapter);
+
+		adapter->hw.phy.autoneg_wait_to_complete = 1;
+		e1000e_reset(adapter);
+		adapter->hw.phy.autoneg_wait_to_complete = 0;
+	}
+
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline tests */
 
@@ -1717,8 +1734,6 @@
 		if (if_running)
 			/* indicate we're in test mode */
 			dev_close(netdev);
-		else
-			e1000e_reset(adapter);
 
 		if (e1000_reg_test(adapter, &data[0]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1732,8 +1747,6 @@
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		e1000e_reset(adapter);
-		/* make sure the phy is powered up */
-		e1000e_power_up_phy(adapter);
 		if (e1000_loopback_test(adapter, &data[3]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1755,28 +1768,29 @@
 		if (if_running)
 			dev_open(netdev);
 	} else {
-		if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
-			clear_bit(__E1000_TESTING, &adapter->state);
-			dev_open(netdev);
-			set_bit(__E1000_TESTING, &adapter->state);
-		}
+		/* Online tests */
 
 		e_info("online testing starting\n");
-		/* Online tests */
-		if (e1000_link_test(adapter, &data[4]))
-			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		/* Online tests aren't run; pass by default */
+		/* register, eeprom, intr and loopback tests not run online */
 		data[0] = 0;
 		data[1] = 0;
 		data[2] = 0;
 		data[3] = 0;
 
-		if (!if_running && (adapter->flags & FLAG_HAS_AMT))
-			dev_close(netdev);
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		clear_bit(__E1000_TESTING, &adapter->state);
 	}
+
+	if (!if_running) {
+		e1000e_reset(adapter);
+
+		if (adapter->flags & FLAG_HAS_AMT)
+			e1000e_release_hw_control(adapter);
+	}
+
 	msleep_interruptible(4 * 1000);
 }
 
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index ba302a5..bc0860a 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -83,6 +83,7 @@
 	E1000_EXTCNF_CTRL  = 0x00F00, /* Extended Configuration Control */
 	E1000_EXTCNF_SIZE  = 0x00F08, /* Extended Configuration Size */
 	E1000_PHY_CTRL     = 0x00F10, /* PHY Control Register in CSR */
+#define E1000_POEMB	E1000_PHY_CTRL	/* PHY OEM Bits */
 	E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */
 	E1000_PBS      = 0x01008, /* Packet Buffer Size */
 	E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
@@ -101,7 +102,7 @@
 	E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
 	E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
 #define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
-	E1000_RADV     = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
+	E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
 
 /* Convenience macros
  *
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d86cc08..fb46974 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -321,7 +321,7 @@
 	}
 
 	/*
-	 * Reset the PHY before any acccess to it.  Doing so, ensures that
+	 * Reset the PHY before any access to it.  Doing so, ensures that
 	 * the PHY is in a known good state before we read/write PHY registers.
 	 * The generic reset is sufficient here, because we haven't determined
 	 * the PHY type yet.
@@ -1395,22 +1395,6 @@
 	}
 }
 
-static u32 e1000_calc_rx_da_crc(u8 mac[])
-{
-	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
-	u32 i, j, mask, crc;
-
-	crc = 0xffffffff;
-	for (i = 0; i < 6; i++) {
-		crc = crc ^ mac[i];
-		for (j = 8; j > 0; j--) {
-			mask = (crc & 1) * (-1);
-			crc = (crc >> 1) ^ (poly & mask);
-		}
-	}
-	return ~crc;
-}
-
 /**
  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
  *  with 82579 PHY
@@ -1453,8 +1437,7 @@
 			mac_addr[4] = (addr_high & 0xFF);
 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
 
-			ew32(PCH_RAICC(i),
-					e1000_calc_rx_da_crc(mac_addr));
+			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
 		}
 
 		/* Write Rx addresses to the PHY */
@@ -2977,7 +2960,7 @@
 {
 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 	u16 reg;
-	u32 ctrl, icr, kab;
+	u32 ctrl, kab;
 	s32 ret_val;
 
 	/*
@@ -3067,7 +3050,7 @@
 		ew32(CRC_OFFSET, 0x65656565);
 
 	ew32(IMC, 0xffffffff);
-	icr = er32(ICR);
+	er32(ICR);
 
 	kab = er32(KABGTXD);
 	kab |= E1000_KABGTXD_BGSQLBIAS;
@@ -3118,7 +3101,7 @@
 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
 	 */
 	if (hw->phy.type == e1000_phy_82578) {
-		hw->phy.ops.read_reg(hw, BM_WUC, &i);
+		e1e_rphy(hw, BM_WUC, &i);
 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
 		if (ret_val)
 			return ret_val;
@@ -3276,9 +3259,8 @@
 	    (hw->phy.type == e1000_phy_82577)) {
 		ew32(FCRTV_PCH, hw->fc.refresh_time);
 
-		ret_val = hw->phy.ops.write_reg(hw,
-		                             PHY_REG(BM_PORT_CTRL_PAGE, 27),
-		                             hw->fc.pause_time);
+		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+				   hw->fc.pause_time);
 		if (ret_val)
 			return ret_val;
 	}
@@ -3342,8 +3324,7 @@
 			return ret_val;
 		break;
 	case e1000_phy_ife:
-		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
-		                               &reg_data);
+		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
 		if (ret_val)
 			return ret_val;
 
@@ -3361,8 +3342,7 @@
 			reg_data |= IFE_PMC_AUTO_MDIX;
 			break;
 		}
-		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
-		                                reg_data);
+		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
 		if (ret_val)
 			return ret_val;
 		break;
@@ -3646,7 +3626,8 @@
 {
 	if (hw->phy.type == e1000_phy_ife)
 		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
-			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+				(IFE_PSCL_PROBE_MODE |
+				 IFE_PSCL_PROBE_LEDS_OFF));
 
 	ew32(LEDCTL, hw->mac.ledctl_mode1);
 	return 0;
@@ -3660,8 +3641,7 @@
  **/
 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
 {
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
-					(u16)hw->mac.ledctl_mode1);
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
 }
 
 /**
@@ -3672,8 +3652,7 @@
  **/
 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
 {
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
-					(u16)hw->mac.ledctl_default);
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
 }
 
 /**
@@ -3704,7 +3683,7 @@
 		}
 	}
 
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
 }
 
 /**
@@ -3735,7 +3714,7 @@
 		}
 	}
 
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
 }
 
 /**
@@ -3844,20 +3823,20 @@
 	if ((hw->phy.type == e1000_phy_82578) ||
 	    (hw->phy.type == e1000_phy_82579) ||
 	    (hw->phy.type == e1000_phy_82577)) {
-		hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
+		e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
+		e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
+		e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
+		e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
+		e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_DC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_DC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
+		e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
 	}
 }
 
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 7e55170..68aa174 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -533,7 +533,7 @@
 			mac->autoneg_failed = 1;
 			return 0;
 		}
-		e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
 
 		/* Disable auto-negotiation in the TXCW register */
 		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -556,7 +556,7 @@
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
 		 */
-		e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
 		ew32(TXCW, mac->txcw);
 		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 
@@ -598,7 +598,7 @@
 			mac->autoneg_failed = 1;
 			return 0;
 		}
-		e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
 
 		/* Disable auto-negotiation in the TXCW register */
 		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -621,7 +621,7 @@
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
 		 */
-		e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
 		ew32(TXCW, mac->txcw);
 		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 
@@ -800,9 +800,9 @@
 	 * The possible values of the "fc" parameter are:
 	 *      0:  Flow control is completely disabled
 	 *      1:  Rx flow control is enabled (we can receive pause frames,
-	 *	  but not send pause frames).
+	 *          but not send pause frames).
 	 *      2:  Tx flow control is enabled (we can send pause frames but we
-	 *	  do not support receiving pause frames).
+	 *          do not support receiving pause frames).
 	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
 	 */
 	switch (hw->fc.current_mode) {
@@ -1031,9 +1031,9 @@
 	 * The possible values of the "fc" parameter are:
 	 *      0:  Flow control is completely disabled
 	 *      1:  Rx flow control is enabled (we can receive pause
-	 *	  frames but not send pause frames).
+	 *          frames but not send pause frames).
 	 *      2:  Tx flow control is enabled (we can send pause frames
-	 *	  frames but we do not receive pause frames).
+	 *          frames but we do not receive pause frames).
 	 *      3:  Both Rx and Tx flow control (symmetric) is enabled.
 	 *  other:  No other values should be possible at this point.
 	 */
@@ -1135,7 +1135,8 @@
 		ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
 		if (ret_val)
 			return ret_val;
-		ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+		ret_val =
+		    e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
 		if (ret_val)
 			return ret_val;
 
@@ -1188,7 +1189,7 @@
 			} else {
 				hw->fc.current_mode = e1000_fc_rx_pause;
 				e_dbg("Flow Control = "
-					 "RX PAUSE frames only.\r\n");
+				      "Rx PAUSE frames only.\r\n");
 			}
 		}
 		/*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fe50242..2e50228 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -77,17 +77,17 @@
 	char *name;
 };
 
-#define E1000_RDFH	0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT	0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS	0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS	0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC	0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_RDFH	0x02410	/* Rx Data FIFO Head - RW */
+#define E1000_RDFT	0x02418	/* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS	0x02420	/* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS	0x02428	/* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC	0x02430	/* Rx Data FIFO Packet Count - RW */
 
-#define E1000_TDFH	0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT	0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS	0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS	0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC	0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDFH	0x03410	/* Tx Data FIFO Head - RW */
+#define E1000_TDFT	0x03418	/* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS	0x03420	/* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS	0x03428	/* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC	0x03430	/* Tx Data FIFO Packet Count - RW */
 
 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
 
@@ -99,7 +99,7 @@
 	/* Interrupt Registers */
 	{E1000_ICR, "ICR"},
 
-	/* RX Registers */
+	/* Rx Registers */
 	{E1000_RCTL, "RCTL"},
 	{E1000_RDLEN, "RDLEN"},
 	{E1000_RDH, "RDH"},
@@ -115,7 +115,7 @@
 	{E1000_RDFTS, "RDFTS"},
 	{E1000_RDFPC, "RDFPC"},
 
-	/* TX Registers */
+	/* Tx Registers */
 	{E1000_TCTL, "TCTL"},
 	{E1000_TDBAL, "TDBAL"},
 	{E1000_TDBAH, "TDBAH"},
@@ -160,7 +160,7 @@
 		break;
 	default:
 		printk(KERN_INFO "%-15s %08x\n",
-			reginfo->name, __er32(hw, reginfo->ofs));
+		       reginfo->name, __er32(hw, reginfo->ofs));
 		return;
 	}
 
@@ -171,9 +171,8 @@
 	printk(KERN_CONT "\n");
 }
 
-
 /*
- * e1000e_dump - Print registers, tx-ring and rx-ring
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
  */
 static void e1000e_dump(struct e1000_adapter *adapter)
 {
@@ -182,12 +181,20 @@
 	struct e1000_reg_info *reginfo;
 	struct e1000_ring *tx_ring = adapter->tx_ring;
 	struct e1000_tx_desc *tx_desc;
-	struct my_u0 { u64 a; u64 b; } *u0;
+	struct my_u0 {
+		u64 a;
+		u64 b;
+	} *u0;
 	struct e1000_buffer *buffer_info;
 	struct e1000_ring *rx_ring = adapter->rx_ring;
 	union e1000_rx_desc_packet_split *rx_desc_ps;
 	struct e1000_rx_desc *rx_desc;
-	struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+	struct my_u1 {
+		u64 a;
+		u64 b;
+		u64 c;
+		u64 d;
+	} *u1;
 	u32 staterr;
 	int i = 0;
 
@@ -198,12 +205,10 @@
 	if (netdev) {
 		dev_info(&adapter->pdev->dev, "Net device Info\n");
 		printk(KERN_INFO "Device Name     state            "
-			"trans_start      last_rx\n");
+		       "trans_start      last_rx\n");
 		printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
-			netdev->name,
-			netdev->state,
-			netdev->trans_start,
-			netdev->last_rx);
+		       netdev->name, netdev->state, netdev->trans_start,
+		       netdev->last_rx);
 	}
 
 	/* Print Registers */
@@ -214,26 +219,26 @@
 		e1000_regdump(hw, reginfo);
 	}
 
-	/* Print TX Ring Summary */
+	/* Print Tx Ring Summary */
 	if (!netdev || !netif_running(netdev))
 		goto exit;
 
-	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
 	printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
-		" leng ntw timestamp\n");
+	       " leng ntw timestamp\n");
 	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
 	printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
-		0, tx_ring->next_to_use, tx_ring->next_to_clean,
-		(unsigned long long)buffer_info->dma,
-		buffer_info->length,
-		buffer_info->next_to_watch,
-		(unsigned long long)buffer_info->time_stamp);
+	       0, tx_ring->next_to_use, tx_ring->next_to_clean,
+	       (unsigned long long)buffer_info->dma,
+	       buffer_info->length,
+	       buffer_info->next_to_watch,
+	       (unsigned long long)buffer_info->time_stamp);
 
-	/* Print TX Rings */
+	/* Print Tx Ring */
 	if (!netif_msg_tx_done(adapter))
 		goto rx_ring_summary;
 
-	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
 
 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
 	 *
@@ -263,22 +268,22 @@
 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 	 */
 	printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
-		" [bi->dma       ] leng  ntw timestamp        bi->skb "
-		"<-- Legacy format\n");
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Legacy format\n");
 	printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
-		" [bi->dma       ] leng  ntw timestamp        bi->skb "
-		"<-- Ext Context format\n");
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Context format\n");
 	printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
-		" [bi->dma       ] leng  ntw timestamp        bi->skb "
-		"<-- Ext Data format\n");
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Data format\n");
 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 		tx_desc = E1000_TX_DESC(*tx_ring, i);
 		buffer_info = &tx_ring->buffer_info[i];
 		u0 = (struct my_u0 *)tx_desc;
 		printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
-			"%04X  %3X %016llX %p",
-		       (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
-			((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+		       "%04X  %3X %016llX %p",
+		       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+			((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
 		       (unsigned long long)le64_to_cpu(u0->a),
 		       (unsigned long long)le64_to_cpu(u0->b),
 		       (unsigned long long)buffer_info->dma,
@@ -296,22 +301,22 @@
 
 		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
 			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
-					16, 1, phys_to_virt(buffer_info->dma),
-					buffer_info->length, true);
+				       16, 1, phys_to_virt(buffer_info->dma),
+				       buffer_info->length, true);
 	}
 
-	/* Print RX Rings Summary */
+	/* Print Rx Ring Summary */
 rx_ring_summary:
-	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
 	printk(KERN_INFO "Queue [NTU] [NTC]\n");
 	printk(KERN_INFO " %5d %5X %5X\n", 0,
-		rx_ring->next_to_use, rx_ring->next_to_clean);
+	       rx_ring->next_to_use, rx_ring->next_to_clean);
 
-	/* Print RX Rings */
+	/* Print Rx Ring */
 	if (!netif_msg_rx_status(adapter))
 		goto exit;
 
-	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
 	switch (adapter->rx_ps_pages) {
 	case 1:
 	case 2:
@@ -329,7 +334,7 @@
 		 *    +-----------------------------------------------------+
 		 */
 		printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
-			"[buffer 1 63:0 ] "
+		       "[buffer 1 63:0 ] "
 		       "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
 		       "[bi->skb] <-- Ext Pkt Split format\n");
 		/* [Extended] Receive Descriptor (Write-Back) Format
@@ -344,7 +349,7 @@
 		 *   63       48 47    32 31            20 19               0
 		 */
 		printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
-			"[vl   l0 ee  es] "
+		       "[vl   l0 ee  es] "
 		       "[ l3  l2  l1 hs] [reserved      ] ---------------- "
 		       "[bi->skb] <-- Ext Rx Write-Back format\n");
 		for (i = 0; i < rx_ring->count; i++) {
@@ -352,26 +357,26 @@
 			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
 			u1 = (struct my_u1 *)rx_desc_ps;
 			staterr =
-				le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
 			if (staterr & E1000_RXD_STAT_DD) {
 				/* Descriptor Done */
 				printk(KERN_INFO "RWB[0x%03X]     %016llX "
-					"%016llX %016llX %016llX "
-					"---------------- %p", i,
-					(unsigned long long)le64_to_cpu(u1->a),
-					(unsigned long long)le64_to_cpu(u1->b),
-					(unsigned long long)le64_to_cpu(u1->c),
-					(unsigned long long)le64_to_cpu(u1->d),
-					buffer_info->skb);
+				       "%016llX %016llX %016llX "
+				       "---------------- %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       buffer_info->skb);
 			} else {
 				printk(KERN_INFO "R  [0x%03X]     %016llX "
-					"%016llX %016llX %016llX %016llX %p", i,
-					(unsigned long long)le64_to_cpu(u1->a),
-					(unsigned long long)le64_to_cpu(u1->b),
-					(unsigned long long)le64_to_cpu(u1->c),
-					(unsigned long long)le64_to_cpu(u1->d),
-					(unsigned long long)buffer_info->dma,
-					buffer_info->skb);
+				       "%016llX %016llX %016llX %016llX %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       (unsigned long long)buffer_info->dma,
+				       buffer_info->skb);
 
 				if (netif_msg_pktdata(adapter))
 					print_hex_dump(KERN_INFO, "",
@@ -400,18 +405,18 @@
 		 * 63       48 47    40 39      32 31         16 15      0
 		 */
 		printk(KERN_INFO "Rl[desc]     [address 63:0  ] "
-			"[vl er S cks ln] [bi->dma       ] [bi->skb] "
-			"<-- Legacy format\n");
+		       "[vl er S cks ln] [bi->dma       ] [bi->skb] "
+		       "<-- Legacy format\n");
 		for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
 			rx_desc = E1000_RX_DESC(*rx_ring, i);
 			buffer_info = &rx_ring->buffer_info[i];
 			u0 = (struct my_u0 *)rx_desc;
 			printk(KERN_INFO "Rl[0x%03X]    %016llX %016llX "
-				"%016llX %p", i,
-				(unsigned long long)le64_to_cpu(u0->a),
-				(unsigned long long)le64_to_cpu(u0->b),
-				(unsigned long long)buffer_info->dma,
-				buffer_info->skb);
+			       "%016llX %p", i,
+			       (unsigned long long)le64_to_cpu(u0->a),
+			       (unsigned long long)le64_to_cpu(u0->b),
+			       (unsigned long long)buffer_info->dma,
+			       buffer_info->skb);
 			if (i == rx_ring->next_to_use)
 				printk(KERN_CONT " NTU\n");
 			else if (i == rx_ring->next_to_clean)
@@ -421,9 +426,10 @@
 
 			if (netif_msg_pktdata(adapter))
 				print_hex_dump(KERN_INFO, "",
-					DUMP_PREFIX_ADDRESS,
-					16, 1, phys_to_virt(buffer_info->dma),
-					adapter->rx_buffer_len, true);
+					       DUMP_PREFIX_ADDRESS,
+					       16, 1,
+					       phys_to_virt(buffer_info->dma),
+					       adapter->rx_buffer_len, true);
 		}
 	}
 
@@ -450,8 +456,7 @@
  * @skb: pointer to sk_buff to be indicated to stack
  **/
 static void e1000_receive_skb(struct e1000_adapter *adapter,
-			      struct net_device *netdev,
-			      struct sk_buff *skb,
+			      struct net_device *netdev, struct sk_buff *skb,
 			      u8 status, __le16 vlan)
 {
 	skb->protocol = eth_type_trans(skb, netdev);
@@ -464,7 +469,7 @@
 }
 
 /**
- * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * e1000_rx_checksum - Receive Checksum Offload
  * @adapter:     board private structure
  * @status_err:  receive descriptor status and error fields
  * @csum:	receive descriptor csum field
@@ -548,7 +553,7 @@
 						  adapter->rx_buffer_len,
 						  DMA_FROM_DEVICE);
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
-			dev_err(&pdev->dev, "RX DMA map failed\n");
+			dev_err(&pdev->dev, "Rx DMA map failed\n");
 			adapter->rx_dma_failed++;
 			break;
 		}
@@ -601,7 +606,8 @@
 			ps_page = &buffer_info->ps_pages[j];
 			if (j >= adapter->rx_ps_pages) {
 				/* all unused desc entries get hw null ptr */
-				rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
+				rx_desc->read.buffer_addr[j + 1] =
+				    ~cpu_to_le64(0);
 				continue;
 			}
 			if (!ps_page->page) {
@@ -617,7 +623,7 @@
 				if (dma_mapping_error(&pdev->dev,
 						      ps_page->dma)) {
 					dev_err(&adapter->pdev->dev,
-					  "RX DMA page map failed\n");
+						"Rx DMA page map failed\n");
 					adapter->rx_dma_failed++;
 					goto no_buffers;
 				}
@@ -627,8 +633,8 @@
 			 * didn't change because each write-back
 			 * erases this info.
 			 */
-			rx_desc->read.buffer_addr[j+1] =
-			     cpu_to_le64(ps_page->dma);
+			rx_desc->read.buffer_addr[j + 1] =
+			    cpu_to_le64(ps_page->dma);
 		}
 
 		skb = netdev_alloc_skb_ip_align(netdev,
@@ -644,7 +650,7 @@
 						  adapter->rx_ps_bsize0,
 						  DMA_FROM_DEVICE);
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
-			dev_err(&pdev->dev, "RX DMA map failed\n");
+			dev_err(&pdev->dev, "Rx DMA map failed\n");
 			adapter->rx_dma_failed++;
 			/* cleanup skb */
 			dev_kfree_skb_any(skb);
@@ -662,7 +668,7 @@
 			 * such as IA-64).
 			 */
 			wmb();
-			writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+			writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
 		}
 
 		i++;
@@ -931,6 +937,9 @@
 	u16 phy_status, phy_1000t_status, phy_ext_status;
 	u16 pci_status;
 
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	e1e_rphy(hw, PHY_STATUS, &phy_status);
 	e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
 	e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1106,11 +1115,10 @@
 		cleaned = 1;
 		cleaned_count++;
 		dma_unmap_single(&pdev->dev, buffer_info->dma,
-				 adapter->rx_ps_bsize0,
-				 DMA_FROM_DEVICE);
+				 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
 		buffer_info->dma = 0;
 
-		/* see !EOP comment in other rx routine */
+		/* see !EOP comment in other Rx routine */
 		if (!(staterr & E1000_RXD_STAT_EOP))
 			adapter->flags2 |= FLAG2_IS_DISCARDING;
 
@@ -1501,6 +1509,9 @@
 	struct e1000_adapter *adapter = container_of(work,
 					struct e1000_adapter, downshift_task);
 
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
 }
 
@@ -1980,15 +1991,15 @@
 }
 
 /**
- * e1000_get_hw_control - get control of the h/w from f/w
+ * e1000e_get_hw_control - get control of the h/w from f/w
  * @adapter: address of board private structure
  *
- * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that
  * the driver is loaded. For AMT version (only with 82573)
  * of the f/w this means that the network i/f is open.
  **/
-static void e1000_get_hw_control(struct e1000_adapter *adapter)
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_ext;
@@ -2005,16 +2016,16 @@
 }
 
 /**
- * e1000_release_hw_control - release control of the h/w to f/w
+ * e1000e_release_hw_control - release control of the h/w to f/w
  * @adapter: address of board private structure
  *
- * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that the
  * driver is no longer loaded. For AMT version (only with 82573) i
  * of the f/w this means that the network i/f is closed.
  *
  **/
-static void e1000_release_hw_control(struct e1000_adapter *adapter)
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_ext;
@@ -2445,7 +2456,7 @@
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
 	    (vid == adapter->mng_vlan_id)) {
 		/* release control to f/w */
-		e1000_release_hw_control(adapter);
+		e1000e_release_hw_control(adapter);
 		return;
 	}
 
@@ -2610,7 +2621,7 @@
 }
 
 /**
- * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * e1000_configure_tx - Configure Transmit Unit after Reset
  * @adapter: board private structure
  *
  * Configure the Tx unit of the MAC after a reset.
@@ -2663,7 +2674,7 @@
 		 * hthresh = 1 ==> prefetch when one or more available
 		 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
 		 * BEWARE: this seems to work but should be considered first if
-		 * there are tx hangs or other tx related bugs
+		 * there are Tx hangs or other Tx related bugs
 		 */
 		txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
 		ew32(TXDCTL(0), txdctl);
@@ -2734,6 +2745,9 @@
 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
 		else
 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+		if (ret_val)
+			e_dbg("failed to enable jumbo frame workaround mode\n");
 	}
 
 	/* Program MC offset vector base */
@@ -2874,7 +2888,7 @@
 	if (adapter->rx_ps_pages) {
 		/* this is a 32 byte descriptor */
 		rdlen = rx_ring->count *
-			sizeof(union e1000_rx_desc_packet_split);
+		    sizeof(union e1000_rx_desc_packet_split);
 		adapter->clean_rx = e1000_clean_rx_irq_ps;
 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
 	} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
@@ -2897,7 +2911,7 @@
 		/*
 		 * set the writeback threshold (only takes effect if the RDTR
 		 * is set). set GRAN=1 and write back up to 0x4 worth, and
-		 * enable prefetching of 0x20 rx descriptors
+		 * enable prefetching of 0x20 Rx descriptors
 		 * granularity = 01
 		 * wthresh = 04,
 		 * hthresh = 04,
@@ -2978,12 +2992,10 @@
 			 * excessive C-state transition latencies result in
 			 * dropped transactions.
 			 */
-			pm_qos_update_request(
-				&adapter->netdev->pm_qos_req, 55);
+			pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
 		} else {
-			pm_qos_update_request(
-				&adapter->netdev->pm_qos_req,
-				PM_QOS_DEFAULT_VALUE);
+			pm_qos_update_request(&adapter->netdev->pm_qos_req,
+					      PM_QOS_DEFAULT_VALUE);
 		}
 	}
 
@@ -3149,7 +3161,7 @@
 		/* lower 16 bits has Rx packet buffer allocation size in KB */
 		pba &= 0xffff;
 		/*
-		 * the Tx fifo also stores 16 bytes of information about the tx
+		 * the Tx fifo also stores 16 bytes of information about the Tx
 		 * but don't include ethernet FCS because hardware appends it
 		 */
 		min_tx_space = (adapter->max_frame_size +
@@ -3172,7 +3184,7 @@
 			pba -= min_tx_space - tx_space;
 
 			/*
-			 * if short on Rx space, Rx wins and must trump tx
+			 * if short on Rx space, Rx wins and must trump Tx
 			 * adjustment or use Early Receive if available
 			 */
 			if ((pba < min_rx_space) &&
@@ -3184,7 +3196,6 @@
 		ew32(PBA, pba);
 	}
 
-
 	/*
 	 * flow control settings
 	 *
@@ -3272,7 +3283,7 @@
 	 * that the network interface is in control
 	 */
 	if (adapter->flags & FLAG_HAS_AMT)
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
 	ew32(WUC, 0);
 
@@ -3285,6 +3296,13 @@
 	ew32(VET, ETH_P_8021Q);
 
 	e1000e_reset_adaptive(hw);
+
+	if (!netif_running(adapter->netdev) &&
+	    !test_bit(__E1000_TESTING, &adapter->state)) {
+		e1000_power_down_phy(adapter);
+		return;
+	}
+
 	e1000_get_phy_info(hw);
 
 	if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@@ -3326,6 +3344,21 @@
 	return 0;
 }
 
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (!(adapter->flags2 & FLAG2_DMA_BURST))
+		return;
+
+	/* flush pending descriptor writebacks to memory */
+	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+	/* execute the writes immediately */
+	e1e_flush();
+}
+
 void e1000e_down(struct e1000_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -3365,6 +3398,9 @@
 
 	if (!pci_channel_offline(adapter->pdev))
 		e1000e_reset(adapter);
+
+	e1000e_flush_descriptors(adapter);
+
 	e1000_clean_tx_ring(adapter);
 	e1000_clean_rx_ring(adapter);
 
@@ -3570,7 +3606,7 @@
 	 * interface is now open and reset the part to a known state.
 	 */
 	if (adapter->flags & FLAG_HAS_AMT) {
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 		e1000e_reset(adapter);
 	}
 
@@ -3634,7 +3670,7 @@
 	return 0;
 
 err_req_irq:
-	e1000_release_hw_control(adapter);
+	e1000e_release_hw_control(adapter);
 	e1000_power_down_phy(adapter);
 	e1000e_free_rx_resources(adapter);
 err_setup_rx:
@@ -3689,8 +3725,9 @@
 	 * If AMT is enabled, let the firmware know that the network
 	 * interface is now closed
 	 */
-	if (adapter->flags & FLAG_HAS_AMT)
-		e1000_release_hw_control(adapter);
+	if ((adapter->flags & FLAG_HAS_AMT) &&
+	    !test_bit(__E1000_TESTING, &adapter->state))
+		e1000e_release_hw_control(adapter);
 
 	if ((adapter->flags & FLAG_HAS_ERT) ||
 	    (adapter->hw.mac.type == e1000_pch2lan))
@@ -3752,6 +3789,10 @@
 {
 	struct e1000_adapter *adapter = container_of(work,
 					struct e1000_adapter, update_phy_task);
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	e1000_get_phy_info(&adapter->hw);
 }
 
@@ -3762,6 +3803,10 @@
 static void e1000_update_phy_info(unsigned long data)
 {
 	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	schedule_work(&adapter->update_phy_task);
 }
 
@@ -4029,11 +4074,11 @@
 	       adapter->netdev->name,
 	       adapter->link_speed,
 	       (adapter->link_duplex == FULL_DUPLEX) ?
-	                        "Full Duplex" : "Half Duplex",
+	       "Full Duplex" : "Half Duplex",
 	       ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
-	                        "RX/TX" :
-	       ((ctrl & E1000_CTRL_RFCE) ? "RX" :
-	       ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
+	       "Rx/Tx" :
+	       ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+		((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
 }
 
 static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -4136,6 +4181,9 @@
 	u32 link, tctl;
 	int tx_pending = 0;
 
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	link = e1000e_has_link(adapter);
 	if ((netif_carrier_ok(netdev)) && link) {
 		/* Cancel scheduled suspend requests. */
@@ -4296,7 +4344,6 @@
 			 * to get done, so reset controller to flush Tx.
 			 * (Do the reset outside of interrupt context).
 			 */
-			adapter->tx_timeout_count++;
 			schedule_work(&adapter->reset_task);
 			/* return immediately since reset is imminent */
 			return;
@@ -4325,19 +4372,12 @@
 	else
 		ew32(ICS, E1000_ICS_RXDMT0);
 
+	/* flush pending descriptors to memory before detecting Tx hang */
+	e1000e_flush_descriptors(adapter);
+
 	/* Force detection of hung controller every watchdog period */
 	adapter->detect_tx_hung = 1;
 
-	/* flush partial descriptors to memory before detecting tx hang */
-	if (adapter->flags2 & FLAG2_DMA_BURST) {
-		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
-		ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
-		/*
-		 * no need to flush the writes because the timeout code does
-		 * an er32 first thing
-		 */
-	}
-
 	/*
 	 * With 82571 controllers, LAA may be overwritten due to controller
 	 * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4519,7 +4559,7 @@
 		buffer_info->next_to_watch = i;
 		buffer_info->dma = dma_map_single(&pdev->dev,
 						  skb->data + offset,
-						  size,	DMA_TO_DEVICE);
+						  size, DMA_TO_DEVICE);
 		buffer_info->mapped_as_page = false;
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
 			goto dma_error;
@@ -4566,7 +4606,7 @@
 		}
 	}
 
-	segs = skb_shinfo(skb)->gso_segs ?: 1;
+	segs = skb_shinfo(skb)->gso_segs ? : 1;
 	/* multiply data chunks by size of headers */
 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
 
@@ -4578,13 +4618,13 @@
 	return count;
 
 dma_error:
-	dev_err(&pdev->dev, "TX DMA map failed\n");
+	dev_err(&pdev->dev, "Tx DMA map failed\n");
 	buffer_info->dma = 0;
 	if (count)
 		count--;
 
 	while (count--) {
-		if (i==0)
+		if (i == 0)
 			i += tx_ring->count;
 		i--;
 		buffer_info = &tx_ring->buffer_info[i];
@@ -4875,6 +4915,10 @@
 	struct e1000_adapter *adapter;
 	adapter = container_of(work, struct e1000_adapter, reset_task);
 
+	/* don't run the task if already down */
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
 	      (adapter->flags & FLAG_RX_RESTART_NOW))) {
 		e1000e_dump(adapter);
@@ -5209,7 +5253,7 @@
 	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
 	 */
-	e1000_release_hw_control(adapter);
+	e1000e_release_hw_control(adapter);
 
 	pci_disable_device(pdev);
 
@@ -5366,7 +5410,7 @@
 	 * under the control of the driver.
 	 */
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
 	return 0;
 }
@@ -5613,7 +5657,7 @@
 	 * under the control of the driver.
 	 */
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
 }
 
@@ -5636,7 +5680,7 @@
 	ret_val = e1000_read_pba_string_generic(hw, pba_str,
 						E1000_PBANUM_LENGTH);
 	if (ret_val)
-		strcpy(pba_str, "Unknown");
+		strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
 	e_info("MAC: %d, PHY: %d, PBA No: %s\n",
 	       hw->mac.type, hw->phy.type, pba_str);
 }
@@ -5923,7 +5967,8 @@
 		/* APME bit in EEPROM is mapped to WUC.APME */
 		eeprom_data = er32(WUC);
 		eeprom_apme_mask = E1000_WUC_APME;
-		if (eeprom_data & E1000_WUC_PHY_WAKE)
+		if ((hw->mac.type > e1000_ich10lan) &&
+		    (eeprom_data & E1000_WUC_PHY_WAKE))
 			adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
 	} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
 		if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
@@ -5963,9 +6008,9 @@
 	 * under the control of the driver.
 	 */
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
-	strcpy(netdev->name, "eth%d");
+	strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
 	err = register_netdev(netdev);
 	if (err)
 		goto err_register;
@@ -5982,12 +6027,11 @@
 
 err_register:
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_release_hw_control(adapter);
+		e1000e_release_hw_control(adapter);
 err_eeprom:
 	if (!e1000_check_reset_block(&adapter->hw))
 		e1000_phy_hw_reset(&adapter->hw);
 err_hw_init:
-
 	kfree(adapter->tx_ring);
 	kfree(adapter->rx_ring);
 err_sw_init:
@@ -6053,7 +6097,7 @@
 	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
 	 */
-	e1000_release_hw_control(adapter);
+	e1000e_release_hw_control(adapter);
 
 	e1000e_reset_interrupt_capability(adapter);
 	kfree(adapter->tx_ring);
@@ -6184,7 +6228,7 @@
 	int ret;
 	pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
 		e1000e_driver_version);
-	pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
+	pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
 	ret = pci_register_driver(&e1000_driver);
 
 	return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a9612b0..4dd9b63 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -62,10 +62,9 @@
 	module_param_array_named(X, X, int, &num_##X, 0);	\
 	MODULE_PARM_DESC(X, desc);
 
-
 /*
  * Transmit Interrupt Delay in units of 1.024 microseconds
- * Tx interrupt delay needs to typically be set to something non zero
+ * Tx interrupt delay needs to typically be set to something non-zero
  *
  * Valid Range: 0-65535
  */
@@ -112,6 +111,7 @@
 #define DEFAULT_ITR 3
 #define MAX_ITR 100000
 #define MIN_ITR 100
+
 /* IntMode (Interrupt Mode)
  *
  * Valid Range: 0 - 2
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 1781efe..6bea051 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -637,12 +637,11 @@
  **/
 s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
 {
-	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
 	u16 phy_data;
 
-	/* Enable CRS on TX. This must be set for half-duplex operation. */
-	ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
+	ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
 	if (ret_val)
 		goto out;
 
@@ -651,7 +650,7 @@
 	/* Enable downshift */
 	phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
 
-	ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
+	ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
 
 out:
 	return ret_val;
@@ -774,16 +773,14 @@
 	}
 
 	if (phy->type == e1000_phy_82578) {
-		ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
-		                            &phy_data);
+		ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
 		if (ret_val)
 			return ret_val;
 
 		/* 82578 PHY - set the downshift count to 1x. */
 		phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
 		phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
-		ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
-		                             phy_data);
+		ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
 		if (ret_val)
 			return ret_val;
 	}
@@ -1319,9 +1316,8 @@
 				 * We didn't get link.
 				 * Reset the DSP and cross our fingers.
 				 */
-				ret_val = e1e_wphy(hw,
-						M88E1000_PHY_PAGE_SELECT,
-						0x001d);
+				ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+						   0x001d);
 				if (ret_val)
 					return ret_val;
 				ret_val = e1000e_phy_reset_dsp(hw);
@@ -2990,7 +2986,7 @@
 }
 
 /**
- *  e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+ *  e1000_get_phy_addr_for_hv_page - Get PHY address based on page
  *  @page: page to be accessed
  **/
 static u32 e1000_get_phy_addr_for_hv_page(u32 page)
@@ -3071,12 +3067,12 @@
 		goto out;
 
 	/* Do not apply workaround if in PHY loopback bit 14 set */
-	hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
+	e1e_rphy(hw, PHY_CONTROL, &data);
 	if (data & PHY_CONTROL_LB)
 		goto out;
 
 	/* check if link is up and at 1Gbps */
-	ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
+	ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
 	if (ret_val)
 		goto out;
 
@@ -3092,14 +3088,12 @@
 	mdelay(200);
 
 	/* flush the packets in the fifo buffer */
-	ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
-	                                HV_MUX_DATA_CTRL_GEN_TO_MAC |
-	                                HV_MUX_DATA_CTRL_FORCE_SPEED);
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
+			   HV_MUX_DATA_CTRL_FORCE_SPEED);
 	if (ret_val)
 		goto out;
 
-	ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
-	                                HV_MUX_DATA_CTRL_GEN_TO_MAC);
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
 
 out:
 	return ret_val;
@@ -3119,7 +3113,7 @@
 	s32 ret_val;
 	u16 data;
 
-	ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
 
 	if (!ret_val)
 		phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -3142,13 +3136,13 @@
 	u16 phy_data;
 	bool link;
 
-	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
 	if (ret_val)
 		goto out;
 
 	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
 
-	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
 	if (ret_val)
 		goto out;
 
@@ -3212,7 +3206,7 @@
 	if (ret_val)
 		goto out;
 
-	ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
 	if (ret_val)
 		goto out;
 
@@ -3224,7 +3218,7 @@
 		if (ret_val)
 			goto out;
 
-		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
 		if (ret_val)
 			goto out;
 
@@ -3258,7 +3252,7 @@
 	s32 ret_val;
 	u16 phy_data, length;
 
-	ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+	ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
 	if (ret_val)
 		goto out;
 
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 4fa8d2a..eb35951 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1761,7 +1761,7 @@
 module_param_array(irq, int, NULL, 0);
 module_param_array(mem, int, NULL, 0);
 module_param(autodetect, int, 0);
-MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
+MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
 MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
 MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
 MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index a724a2d..6c7257b 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME	"ehea"
-#define DRV_VERSION	"EHEA_0106"
+#define DRV_VERSION	"EHEA_0107"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 1032b5b..f75d314 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -437,7 +437,7 @@
 		}
 	}
 	/* Ring doorbell */
-	ehea_update_rq1a(pr->qp, i);
+	ehea_update_rq1a(pr->qp, i - 1);
 }
 
 static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -1329,9 +1329,7 @@
 	int ret;
 	struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
 
-	ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
-			       - init_attr->act_nr_rwqes_rq2
-			       - init_attr->act_nr_rwqes_rq3 - 1);
+	ehea_init_fill_rq1(pr, pr->rq1_skba.len);
 
 	ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
 
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 112c5aa..907b05a 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -812,7 +812,7 @@
 	if (netif_msg_hw(priv))
 		printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
 			 endptr + 1);
-	enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv);
+	enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
 }
 
 static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cce32d4..cd0282d 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -17,6 +17,8 @@
  *
  * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
  * Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
  */
 
 #include <linux/module.h>
@@ -45,29 +47,42 @@
 
 #include <asm/cacheflush.h>
 
-#ifndef CONFIG_ARCH_MXC
+#ifndef CONFIG_ARM
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #endif
 
 #include "fec.h"
 
-#ifdef CONFIG_ARCH_MXC
-#include <mach/hardware.h>
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 #define FEC_ALIGNMENT	0xf
 #else
 #define FEC_ALIGNMENT	0x3
 #endif
 
-/*
- * Define the fixed address of the FEC hardware.
- */
-#if defined(CONFIG_M5272)
+#define DRIVER_NAME	"fec"
 
-static unsigned char	fec_mac_default[] = {
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC		(1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME		(1 << 1)
+
+static struct platform_device_id fec_devtype[] = {
+	{
+		.name = DRIVER_NAME,
+		.driver_data = 0,
+	}, {
+		.name = "imx28-fec",
+		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+	},
+	{ }
 };
 
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
 /*
  * Some hardware gets it MAC address out of local flash memory.
  * if this is non-zero then assume it is the address to get MAC from.
@@ -133,7 +148,8 @@
  * account when setting it.
  */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 #define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
 #else
 #define	OPT_FRAME_SIZE	0
@@ -186,7 +202,6 @@
 	int     mii_timeout;
 	uint    phy_speed;
 	phy_interface_t	phy_interface;
-	int	index;
 	int	link;
 	int	full_duplex;
 	struct	completion mdio_done;
@@ -213,10 +228,23 @@
 /* Transmitter timeout */
 #define TX_TIMEOUT (2 * HZ)
 
+static void *swap_buffer(void *bufaddr, int len)
+{
+	int i;
+	unsigned int *buf = bufaddr;
+
+	for (i = 0; i < (len + 3) / 4; i++, buf++)
+		*buf = cpu_to_be32(*buf);
+
+	return bufaddr;
+}
+
 static netdev_tx_t
 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	struct bufdesc *bdp;
 	void *bufaddr;
 	unsigned short	status;
@@ -261,6 +289,14 @@
 		bufaddr = fep->tx_bounce[index];
 	}
 
+	/*
+	 * Some design made an incorrect assumption on endian mode of
+	 * the system that it's running on. As the result, driver has to
+	 * swap every frame going to and coming from the controller.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+		swap_buffer(bufaddr, skb->len);
+
 	/* Save skb pointer */
 	fep->tx_skbuff[fep->skb_cur] = skb;
 
@@ -429,6 +465,8 @@
 fec_enet_rx(struct net_device *dev)
 {
 	struct	fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	struct bufdesc *bdp;
 	unsigned short status;
 	struct	sk_buff	*skb;
@@ -492,6 +530,9 @@
 	        dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
         			DMA_FROM_DEVICE);
 
+		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+			swap_buffer(data, pkt_len);
+
 		/* This does 16 byte alignment, exactly what we need.
 		 * The packet length includes FCS, but we don't want to
 		 * include that when passing upstream as it messes up
@@ -538,37 +579,50 @@
 }
 
 /* ------------------------------------------------------------------------- */
-#ifdef CONFIG_M5272
 static void __inline__ fec_get_mac(struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
 	unsigned char *iap, tmpaddr[ETH_ALEN];
 
-	if (FEC_FLASHMAC) {
-		/*
-		 * Get MAC address from FLASH.
-		 * If it is all 1's or 0's, use the default.
-		 */
-		iap = (unsigned char *)FEC_FLASHMAC;
-		if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
-		    (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
-			iap = fec_mac_default;
-		if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
-		    (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
-			iap = fec_mac_default;
-	} else {
-		*((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
-		*((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+	/*
+	 * try to get mac address in following order:
+	 *
+	 * 1) module parameter via kernel command line in form
+	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+	 */
+	iap = macaddr;
+
+	/*
+	 * 2) from flash or fuse (via platform data)
+	 */
+	if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+		if (FEC_FLASHMAC)
+			iap = (unsigned char *)FEC_FLASHMAC;
+#else
+		if (pdata)
+			memcpy(iap, pdata->mac, ETH_ALEN);
+#endif
+	}
+
+	/*
+	 * 3) FEC mac registers set by bootloader
+	 */
+	if (!is_valid_ether_addr(iap)) {
+		*((unsigned long *) &tmpaddr[0]) =
+			be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
+		*((unsigned short *) &tmpaddr[4]) =
+			be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
 		iap = &tmpaddr[0];
 	}
 
 	memcpy(dev->dev_addr, iap, ETH_ALEN);
 
-	/* Adjust MAC if using default MAC address */
-	if (iap == fec_mac_default)
-		 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+	/* Adjust MAC if using macaddr */
+	if (iap == macaddr)
+		 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
 }
-#endif
 
 /* ------------------------------------------------------------------------- */
 
@@ -651,8 +705,8 @@
 	fep->mii_timeout = 0;
 	init_completion(&fep->mdio_done);
 
-	/* start a read op */
-	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+	/* start a write op */
+	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
 		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
 		FEC_MMFR_TA | FEC_MMFR_DATA(value),
 		fep->hwp + FEC_MII_DATA);
@@ -681,6 +735,7 @@
 	char mdio_bus_id[MII_BUS_ID_SIZE];
 	char phy_name[MII_BUS_ID_SIZE + 3];
 	int phy_id;
+	int dev_id = fep->pdev->id;
 
 	fep->phy_dev = NULL;
 
@@ -692,6 +747,8 @@
 			continue;
 		if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
 			continue;
+		if (dev_id--)
+			continue;
 		strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
 		break;
 	}
@@ -729,10 +786,35 @@
 
 static int fec_enet_mii_init(struct platform_device *pdev)
 {
+	static struct mii_bus *fec0_mii_bus;
 	struct net_device *dev = platform_get_drvdata(pdev);
 	struct fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	int err = -ENXIO, i;
 
+	/*
+	 * The dual fec interfaces are not equivalent with enet-mac.
+	 * Here are the differences:
+	 *
+	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
+	 *  - fec0 acts as the 1588 time master while fec1 is slave
+	 *  - external phys can only be configured by fec0
+	 *
+	 * That is to say fec1 can not work independently. It only works
+	 * when fec0 is working. The reason behind this design is that the
+	 * second interface is added primarily for Switch mode.
+	 *
+	 * Because of the last point above, both phys are attached on fec0
+	 * mdio interface in board design, and need to be configured by
+	 * fec0 mii_bus.
+	 */
+	if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
+		/* fec1 uses fec0 mii_bus */
+		fep->mii_bus = fec0_mii_bus;
+		return 0;
+	}
+
 	fep->mii_timeout = 0;
 
 	/*
@@ -769,6 +851,10 @@
 	if (mdiobus_register(fep->mii_bus))
 		goto err_out_free_mdio_irq;
 
+	/* save fec0 mii_bus */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+		fec0_mii_bus = fep->mii_bus;
+
 	return 0;
 
 err_out_free_mdio_irq:
@@ -1067,9 +1153,8 @@
  /*
   * XXX:  We need to clean up on failure exits here.
   *
-  * index is only used in legacy code
   */
-static int fec_enet_init(struct net_device *dev, int index)
+static int fec_enet_init(struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
 	struct bufdesc *cbd_base;
@@ -1086,26 +1171,11 @@
 
 	spin_lock_init(&fep->hw_lock);
 
-	fep->index = index;
 	fep->hwp = (void __iomem *)dev->base_addr;
 	fep->netdev = dev;
 
-	/* Set the Ethernet address */
-#ifdef CONFIG_M5272
+	/* Get the Ethernet address */
 	fec_get_mac(dev);
-#else
-	{
-		unsigned long l;
-		l = readl(fep->hwp + FEC_ADDR_LOW);
-		dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
-		dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
-		dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
-		dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
-		l = readl(fep->hwp + FEC_ADDR_HIGH);
-		dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
-		dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
-	}
-#endif
 
 	/* Set receive and transmit descriptor base. */
 	fep->rx_bd_base = cbd_base;
@@ -1156,12 +1226,25 @@
 fec_restart(struct net_device *dev, int duplex)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	int i;
+	u32 val, temp_mac[2];
 
 	/* Whack a reset.  We should wait for this. */
 	writel(1, fep->hwp + FEC_ECNTRL);
 	udelay(10);
 
+	/*
+	 * enet-mac reset will reset mac address registers too,
+	 * so need to reconfigure it.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
+		writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+		writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+	}
+
 	/* Clear any outstanding interrupt. */
 	writel(0xffc00000, fep->hwp + FEC_IEVENT);
 
@@ -1208,20 +1291,45 @@
 	/* Set MII speed */
 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
+	/*
+	 * The phy interface and speed need to get configured
+	 * differently on enet-mac.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		val = readl(fep->hwp + FEC_R_CNTRL);
+
+		/* MII or RMII */
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+			val |= (1 << 8);
+		else
+			val &= ~(1 << 8);
+
+		/* 10M or 100M */
+		if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+			val &= ~(1 << 9);
+		else
+			val |= (1 << 9);
+
+		writel(val, fep->hwp + FEC_R_CNTRL);
+	} else {
 #ifdef FEC_MIIGSK_ENR
-	if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
-		/* disable the gasket and wait */
-		writel(0, fep->hwp + FEC_MIIGSK_ENR);
-		while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
-			udelay(1);
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+			/* disable the gasket and wait */
+			writel(0, fep->hwp + FEC_MIIGSK_ENR);
+			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+				udelay(1);
 
-		/* configure the gasket: RMII, 50 MHz, no loopback, no echo */
-		writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+			/*
+			 * configure the gasket:
+			 *   RMII, 50 MHz, no loopback, no echo
+			 */
+			writel(1, fep->hwp + FEC_MIIGSK_CFGR);
 
-		/* re-enable the gasket */
-		writel(2, fep->hwp + FEC_MIIGSK_ENR);
-	}
+			/* re-enable the gasket */
+			writel(2, fep->hwp + FEC_MIIGSK_ENR);
+		}
 #endif
+	}
 
 	/* And last, enable the transmit and receive processing */
 	writel(2, fep->hwp + FEC_ECNTRL);
@@ -1316,7 +1424,7 @@
 	}
 	clk_enable(fep->clk);
 
-	ret = fec_enet_init(ndev, 0);
+	ret = fec_enet_init(ndev);
 	if (ret)
 		goto failed_init;
 
@@ -1380,8 +1488,10 @@
 
 	if (ndev) {
 		fep = netdev_priv(ndev);
-		if (netif_running(ndev))
-			fec_enet_close(ndev);
+		if (netif_running(ndev)) {
+			fec_stop(ndev);
+			netif_device_detach(ndev);
+		}
 		clk_disable(fep->clk);
 	}
 	return 0;
@@ -1396,8 +1506,10 @@
 	if (ndev) {
 		fep = netdev_priv(ndev);
 		clk_enable(fep->clk);
-		if (netif_running(ndev))
-			fec_enet_open(ndev);
+		if (netif_running(ndev)) {
+			fec_restart(ndev, fep->full_duplex);
+			netif_device_attach(ndev);
+		}
 	}
 	return 0;
 }
@@ -1414,12 +1526,13 @@
 
 static struct platform_driver fec_driver = {
 	.driver	= {
-		.name	= "fec",
+		.name	= DRIVER_NAME,
 		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &fec_pm_ops,
 #endif
 	},
+	.id_table = fec_devtype,
 	.probe	= fec_probe,
 	.remove	= __devexit_p(fec_drv_remove),
 };
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 2c48b25..ace318d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,8 @@
 /****************************************************************************/
 
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 /*
  *	Just figures, Motorola would have to change the offsets for
  *	registers in the same peripheral device on different models
@@ -78,7 +79,7 @@
 /*
  *	Define the buffer descriptor structure.
  */
-#ifdef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 struct bufdesc {
 	unsigned short cbd_datlen;	/* Data length */
 	unsigned short cbd_sc;	/* Control and status info */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cd2d72d..9c0b1ba 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3949,6 +3949,7 @@
 		writel(flags, base + NvRegWakeUpFlags);
 		spin_unlock_irq(&np->lock);
 	}
+	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
 	return 0;
 }
 
@@ -5488,14 +5489,10 @@
 	/* set mac address */
 	nv_copy_mac_to_hw(dev);
 
-	/* Workaround current PCI init glitch:  wakeup bits aren't
-	 * being set from PCI PM capability.
-	 */
-	device_init_wakeup(&pci_dev->dev, 1);
-
 	/* disable WOL */
 	writel(0, base + NvRegWakeUpFlags);
 	np->wolenabled = 0;
+	device_set_wakeup_enable(&pci_dev->dev, false);
 
 	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
 
@@ -5648,6 +5645,8 @@
 		goto out_error;
 	}
 
+	netif_carrier_off(dev);
+
 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
@@ -5746,8 +5745,9 @@
 }
 
 #ifdef CONFIG_PM
-static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
+static int nv_suspend(struct device *device)
 {
+	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
@@ -5763,25 +5763,17 @@
 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
 		np->saved_config_space[i] = readl(base + i*sizeof(u32));
 
-	pci_save_state(pdev);
-	pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
-	pci_disable_device(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 	return 0;
 }
 
-static int nv_resume(struct pci_dev *pdev)
+static int nv_resume(struct device *device)
 {
+	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
 	int i, rc = 0;
 
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	/* ack any pending wake events, disable PME */
-	pci_enable_wake(pdev, PCI_D0, 0);
-
 	/* restore non-pci configuration space */
 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
 		writel(np->saved_config_space[i], base+i*sizeof(u32));
@@ -5800,6 +5792,9 @@
 	return rc;
 }
 
+static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
+#define NV_PM_OPS (&nv_pm_ops)
+
 static void nv_shutdown(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
@@ -5822,15 +5817,13 @@
 	 * only put the device into D3 if we really go for poweroff.
 	 */
 	if (system_state == SYSTEM_POWER_OFF) {
-		if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
-			pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
+		pci_wake_from_d3(pdev, np->wolenabled);
 		pci_set_power_state(pdev, PCI_D3hot);
 	}
 }
 #else
-#define nv_suspend NULL
+#define NV_PM_OPS NULL
 #define nv_shutdown NULL
-#define nv_resume NULL
 #endif /* CONFIG_PM */
 
 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@@ -6002,9 +5995,8 @@
 	.id_table	= pci_tbl,
 	.probe		= nv_probe,
 	.remove		= __devexit_p(nv_remove),
-	.suspend	= nv_suspend,
-	.resume		= nv_resume,
 	.shutdown	= nv_shutdown,
+	.driver.pm	= NV_PM_OPS,
 };
 
 static int __init init_nic(void)
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6de4675..5ed8f9f9 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -434,7 +434,6 @@
 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct netdev_queue *txq;
 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
 	unsigned long tx_packets = 0, tx_bytes = 0;
 	int i = 0;
@@ -450,9 +449,8 @@
 	dev->stats.rx_dropped = rx_dropped;
 
 	for (i = 0; i < priv->num_tx_queues; i++) {
-		txq = netdev_get_tx_queue(dev, i);
-		tx_bytes += txq->tx_bytes;
-		tx_packets += txq->tx_packets;
+		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+		tx_packets += priv->tx_queue[i]->stats.tx_packets;
 	}
 
 	dev->stats.tx_bytes = tx_bytes;
@@ -1922,7 +1920,7 @@
 		if (err) {
 			for (j = 0; j < i; j++)
 				free_grp_irqs(&priv->gfargrp[j]);
-				goto irq_fail;
+			goto irq_fail;
 		}
 	}
 
@@ -2109,8 +2107,8 @@
 	}
 
 	/* Update transmit stats */
-	txq->tx_bytes += skb->len;
-	txq->tx_packets ++;
+	tx_queue->stats.tx_bytes += skb->len;
+	tx_queue->stats.tx_packets++;
 
 	txbdp = txbdp_start = tx_queue->cur_tx;
 	lstatus = txbdp->lstatus;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 68984eb..54de413 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -907,12 +907,21 @@
 	MQ_MG_MODE
 };
 
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+	unsigned long tx_packets;
+	unsigned long tx_bytes;
+};
+
 /**
  *	struct gfar_priv_tx_q - per tx queue structure
  *	@txlock: per queue tx spin lock
  *	@tx_skbuff:skb pointers
  *	@skb_curtx: to be used skb pointer
  *	@skb_dirtytx:the last used skb pointer
+ *	@stats: bytes/packets stats
  *	@qindex: index of this queue
  *	@dev: back pointer to the dev structure
  *	@grp: back pointer to the group to which this queue belongs
@@ -934,6 +943,7 @@
 	struct	txbd8 *tx_bd_base;
 	struct	txbd8 *cur_tx;
 	struct	txbd8 *dirty_tx;
+	struct tx_q_stats stats;
 	struct	net_device *dev;
 	struct gfar_priv_grp *grp;
 	u16	skb_curtx;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 27d6960..fdb0333 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
 /*
  * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
  *
- * 2005-2009 (c) Aeroflex Gaisler AB
+ * 2005-2010 (c) Aeroflex Gaisler AB
  *
  * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
  * available in the GRLIB VHDL IP core library.
@@ -356,6 +356,8 @@
 		dev_dbg(&dev->dev, " starting queue\n");
 	netif_start_queue(dev);
 
+	GRETH_REGSAVE(greth->regs->status, 0xFF);
+
 	napi_enable(&greth->napi);
 
 	greth_enable_irqs(greth);
@@ -371,7 +373,9 @@
 
 	napi_disable(&greth->napi);
 
+	greth_disable_irqs(greth);
 	greth_disable_tx(greth);
+	greth_disable_rx(greth);
 
 	netif_stop_queue(dev);
 
@@ -388,12 +392,20 @@
 	struct greth_private *greth = netdev_priv(dev);
 	struct greth_bd *bdp;
 	int err = NETDEV_TX_OK;
-	u32 status, dma_addr;
+	u32 status, dma_addr, ctrl;
+	unsigned long flags;
 
-	bdp = greth->tx_bd_base + greth->tx_next;
+	/* Clean TX Ring */
+	greth_clean_tx(greth->netdev);
 
 	if (unlikely(greth->tx_free <= 0)) {
+		spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+		ctrl = GRETH_REGLOAD(greth->regs->control);
+		/* Enable TX IRQ only if not already in poll() routine */
+		if (ctrl & GRETH_RXI)
+			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -406,13 +418,14 @@
 		goto out;
 	}
 
+	bdp = greth->tx_bd_base + greth->tx_next;
 	dma_addr = greth_read_bd(&bdp->addr);
 
 	memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
 
 	dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
 
-	status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
+	status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
 
 	/* Wrap around descriptor ring */
 	if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -422,22 +435,11 @@
 	greth->tx_next = NEXT_TX(greth->tx_next);
 	greth->tx_free--;
 
-	/* No more descriptors */
-	if (unlikely(greth->tx_free == 0)) {
-
-		/* Free transmitted descriptors */
-		greth_clean_tx(dev);
-
-		/* If nothing was cleaned, stop queue & wait for irq */
-		if (unlikely(greth->tx_free == 0)) {
-			status |= GRETH_BD_IE;
-			netif_stop_queue(dev);
-		}
-	}
-
 	/* Write descriptor control word and enable transmission */
 	greth_write_bd(&bdp->stat, status);
+	spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 	greth_enable_tx(greth);
+	spin_unlock_irqrestore(&greth->devlock, flags);
 
 out:
 	dev_kfree_skb(skb);
@@ -450,13 +452,23 @@
 {
 	struct greth_private *greth = netdev_priv(dev);
 	struct greth_bd *bdp;
-	u32 status = 0, dma_addr;
+	u32 status = 0, dma_addr, ctrl;
 	int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+	unsigned long flags;
 
 	nr_frags = skb_shinfo(skb)->nr_frags;
 
+	/* Clean TX Ring */
+	greth_clean_tx_gbit(dev);
+
 	if (greth->tx_free < nr_frags + 1) {
+		spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+		ctrl = GRETH_REGLOAD(greth->regs->control);
+		/* Enable TX IRQ only if not already in poll() routine */
+		if (ctrl & GRETH_RXI)
+			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 		err = NETDEV_TX_BUSY;
 		goto out;
 	}
@@ -499,7 +511,7 @@
 		greth->tx_skbuff[curr_tx] = NULL;
 		bdp = greth->tx_bd_base + curr_tx;
 
-		status = GRETH_TXBD_CSALL;
+		status = GRETH_TXBD_CSALL | GRETH_BD_EN;
 		status |= frag->size & GRETH_BD_LEN;
 
 		/* Wrap around descriptor ring */
@@ -509,14 +521,8 @@
 		/* More fragments left */
 		if (i < nr_frags - 1)
 			status |= GRETH_TXBD_MORE;
-
-		/* ... last fragment, check if out of descriptors  */
-		else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
-
-			/* Enable interrupts and stop queue */
-			status |= GRETH_BD_IE;
-			netif_stop_queue(dev);
-		}
+		else
+			status |= GRETH_BD_IE; /* enable IRQ on last fragment */
 
 		greth_write_bd(&bdp->stat, status);
 
@@ -536,26 +542,29 @@
 
 	wmb();
 
-	/* Enable the descriptors that we configured ...  */
-	for (i = 0; i < nr_frags + 1; i++) {
-		bdp = greth->tx_bd_base + greth->tx_next;
-		greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
-		greth->tx_next = NEXT_TX(greth->tx_next);
-		greth->tx_free--;
-	}
+	/* Enable the descriptor chain by enabling the first descriptor */
+	bdp = greth->tx_bd_base + greth->tx_next;
+	greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+	greth->tx_next = curr_tx;
+	greth->tx_free -= nr_frags + 1;
 
+	wmb();
+
+	spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 	greth_enable_tx(greth);
+	spin_unlock_irqrestore(&greth->devlock, flags);
 
 	return NETDEV_TX_OK;
 
 frag_map_error:
-	/* Unmap SKB mappings that succeeded */
+	/* Unmap SKB mappings that succeeded and disable descriptor */
 	for (i = 0; greth->tx_next + i != curr_tx; i++) {
 		bdp = greth->tx_bd_base + greth->tx_next + i;
 		dma_unmap_single(greth->dev,
 				 greth_read_bd(&bdp->addr),
 				 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
 				 DMA_TO_DEVICE);
+		greth_write_bd(&bdp->stat, 0);
 	}
 map_error:
 	if (net_ratelimit())
@@ -565,12 +574,11 @@
 	return err;
 }
 
-
 static irqreturn_t greth_interrupt(int irq, void *dev_id)
 {
 	struct net_device *dev = dev_id;
 	struct greth_private *greth;
-	u32 status;
+	u32 status, ctrl;
 	irqreturn_t retval = IRQ_NONE;
 
 	greth = netdev_priv(dev);
@@ -580,13 +588,15 @@
 	/* Get the interrupt events that caused us to be here. */
 	status = GRETH_REGLOAD(greth->regs->status);
 
+	/* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
+	 * set regardless of whether IRQ is enabled or not. Especially
+	 * important when shared IRQ.
+	 */
+	ctrl = GRETH_REGLOAD(greth->regs->control);
+
 	/* Handle rx and tx interrupts through poll */
-	if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
-
-		/* Clear interrupt status */
-		GRETH_REGORIN(greth->regs->status,
-			      status & (GRETH_INT_RX | GRETH_INT_TX));
-
+	if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
+	    ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
 		retval = IRQ_HANDLED;
 
 		/* Disable interrupts and schedule poll() */
@@ -610,6 +620,8 @@
 
 	while (1) {
 		bdp = greth->tx_bd_base + greth->tx_last;
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+		mb();
 		stat = greth_read_bd(&bdp->stat);
 
 		if (unlikely(stat & GRETH_BD_EN))
@@ -670,7 +682,10 @@
 
 		/* We only clean fully completed SKBs */
 		bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
-		stat = bdp_last_frag->stat;
+
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+		mb();
+		stat = greth_read_bd(&bdp_last_frag->stat);
 
 		if (stat & GRETH_BD_EN)
 			break;
@@ -702,21 +717,9 @@
 		greth->tx_free += nr_frags+1;
 		dev_kfree_skb(skb);
 	}
-	if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
-		netif_wake_queue(dev);
-	}
-}
 
-static int greth_pending_packets(struct greth_private *greth)
-{
-	struct greth_bd *bdp;
-	u32 status;
-	bdp = greth->rx_bd_base + greth->rx_cur;
-	status = greth_read_bd(&bdp->stat);
-	if (status & GRETH_BD_EN)
-		return 0;
-	else
-		return 1;
+	if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
+		netif_wake_queue(dev);
 }
 
 static int greth_rx(struct net_device *dev, int limit)
@@ -727,20 +730,24 @@
 	int pkt_len;
 	int bad, count;
 	u32 status, dma_addr;
+	unsigned long flags;
 
 	greth = netdev_priv(dev);
 
 	for (count = 0; count < limit; ++count) {
 
 		bdp = greth->rx_bd_base + greth->rx_cur;
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+		mb();
 		status = greth_read_bd(&bdp->stat);
-		dma_addr = greth_read_bd(&bdp->addr);
-		bad = 0;
 
 		if (unlikely(status & GRETH_BD_EN)) {
 			break;
 		}
 
+		dma_addr = greth_read_bd(&bdp->addr);
+		bad = 0;
+
 		/* Check status for errors. */
 		if (unlikely(status & GRETH_RXBD_STATUS)) {
 			if (status & GRETH_RXBD_ERR_FT) {
@@ -802,7 +809,9 @@
 
 		dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
 
+		spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
 		greth_enable_rx(greth);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 
 		greth->rx_cur = NEXT_RX(greth->rx_cur);
 	}
@@ -836,6 +845,7 @@
 	int pkt_len;
 	int bad, count = 0;
 	u32 status, dma_addr;
+	unsigned long flags;
 
 	greth = netdev_priv(dev);
 
@@ -843,6 +853,8 @@
 
 		bdp = greth->rx_bd_base + greth->rx_cur;
 		skb = greth->rx_skbuff[greth->rx_cur];
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+		mb();
 		status = greth_read_bd(&bdp->stat);
 		bad = 0;
 
@@ -865,10 +877,9 @@
 			}
 		}
 
-		/* Allocate new skb to replace current */
-		newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
-
-		if (!bad && newskb) {
+		/* Allocate new skb to replace current, not needed if the
+		 * current skb can be reused */
+		if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
 			skb_reserve(newskb, NET_IP_ALIGN);
 
 			dma_addr = dma_map_single(greth->dev,
@@ -905,11 +916,22 @@
 				if (net_ratelimit())
 					dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
 				dev_kfree_skb(newskb);
+				/* reusing current skb, so it is a drop */
 				dev->stats.rx_dropped++;
 			}
+		} else if (bad) {
+			/* Bad Frame transfer, the skb is reused */
+			dev->stats.rx_dropped++;
 		} else {
+			/* Failed Allocating a new skb. This is rather stupid
+			 * but the current "filled" skb is reused, as if
+			 * transfer failure. One could argue that RX descriptor
+			 * table handling should be divided into cleaning and
+			 * filling as the TX part of the driver
+			 */
 			if (net_ratelimit())
 				dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+			/* reusing current skb, so it is a drop */
 			dev->stats.rx_dropped++;
 		}
 
@@ -920,7 +942,9 @@
 
 		wmb();
 		greth_write_bd(&bdp->stat, status);
+		spin_lock_irqsave(&greth->devlock, flags);
 		greth_enable_rx(greth);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 		greth->rx_cur = NEXT_RX(greth->rx_cur);
 	}
 
@@ -932,15 +956,18 @@
 {
 	struct greth_private *greth;
 	int work_done = 0;
+	unsigned long flags;
+	u32 mask, ctrl;
 	greth = container_of(napi, struct greth_private, napi);
 
-	if (greth->gbit_mac) {
-		greth_clean_tx_gbit(greth->netdev);
-	} else {
-		greth_clean_tx(greth->netdev);
+restart_txrx_poll:
+	if (netif_queue_stopped(greth->netdev)) {
+		if (greth->gbit_mac)
+			greth_clean_tx_gbit(greth->netdev);
+		else
+			greth_clean_tx(greth->netdev);
 	}
 
-restart_poll:
 	if (greth->gbit_mac) {
 		work_done += greth_rx_gbit(greth->netdev, budget - work_done);
 	} else {
@@ -949,15 +976,29 @@
 
 	if (work_done < budget) {
 
-		napi_complete(napi);
+		spin_lock_irqsave(&greth->devlock, flags);
 
-		if (greth_pending_packets(greth)) {
-			napi_reschedule(napi);
-			goto restart_poll;
+		ctrl = GRETH_REGLOAD(greth->regs->control);
+		if (netif_queue_stopped(greth->netdev)) {
+			GRETH_REGSAVE(greth->regs->control,
+					ctrl | GRETH_TXI | GRETH_RXI);
+			mask = GRETH_INT_RX | GRETH_INT_RE |
+			       GRETH_INT_TX | GRETH_INT_TE;
+		} else {
+			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
+			mask = GRETH_INT_RX | GRETH_INT_RE;
+		}
+
+		if (GRETH_REGLOAD(greth->regs->status) & mask) {
+			GRETH_REGSAVE(greth->regs->control, ctrl);
+			spin_unlock_irqrestore(&greth->devlock, flags);
+			goto restart_txrx_poll;
+		} else {
+			__napi_complete(napi);
+			spin_unlock_irqrestore(&greth->devlock, flags);
 		}
 	}
 
-	greth_enable_irqs(greth);
 	return work_done;
 }
 
@@ -1152,11 +1193,11 @@
 };
 
 static struct net_device_ops greth_netdev_ops = {
-	.ndo_open = greth_open,
-	.ndo_stop = greth_close,
-	.ndo_start_xmit = greth_start_xmit,
-	.ndo_set_mac_address = greth_set_mac_add,
-	.ndo_validate_addr 	= eth_validate_addr,
+	.ndo_open		= greth_open,
+	.ndo_stop		= greth_close,
+	.ndo_start_xmit		= greth_start_xmit,
+	.ndo_set_mac_address	= greth_set_mac_add,
+	.ndo_validate_addr	= eth_validate_addr,
 };
 
 static inline int wait_for_mdio(struct greth_private *greth)
@@ -1217,29 +1258,26 @@
 	struct greth_private *greth = netdev_priv(dev);
 	struct phy_device *phydev = greth->phy;
 	unsigned long flags;
-
 	int status_change = 0;
+	u32 ctrl;
 
 	spin_lock_irqsave(&greth->devlock, flags);
 
 	if (phydev->link) {
 
 		if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
-
-			GRETH_REGANDIN(greth->regs->control,
-				       ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
+			ctrl = GRETH_REGLOAD(greth->regs->control) &
+			       ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
 
 			if (phydev->duplex)
-				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
+				ctrl |= GRETH_CTRL_FD;
 
-			if (phydev->speed == SPEED_100) {
-
-				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
-			}
-
+			if (phydev->speed == SPEED_100)
+				ctrl |= GRETH_CTRL_SP;
 			else if (phydev->speed == SPEED_1000)
-				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
+				ctrl |= GRETH_CTRL_GB;
 
+			GRETH_REGSAVE(greth->regs->control, ctrl);
 			greth->speed = phydev->speed;
 			greth->duplex = phydev->duplex;
 			status_change = 1;
@@ -1600,6 +1638,9 @@
 	{
 	 .name = "GAISLER_ETHMAC",
 	 },
+	{
+	 .name = "01_01d",
+	 },
 	{},
 };
 
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 03ad903..be0f206 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -23,6 +23,7 @@
 #define GRETH_BD_LEN 0x7FF
 
 #define GRETH_TXEN 0x1
+#define GRETH_INT_TE 0x2
 #define GRETH_INT_TX 0x8
 #define GRETH_TXI 0x4
 #define GRETH_TXBD_STATUS 0x0001C000
@@ -35,6 +36,7 @@
 #define GRETH_TXBD_ERR_UE 0x4000
 #define GRETH_TXBD_ERR_AL 0x8000
 
+#define GRETH_INT_RE         0x1
 #define GRETH_INT_RX         0x4
 #define GRETH_RXEN           0x2
 #define GRETH_RXI            0x8
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4e7d1d0..7d9ced0 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -396,7 +396,7 @@
 	while (p) {
 		if (p->bitrate == bitrate) {
 			memcpy(p->bits, bits, YAM_FPGA_SIZE);
-			return p->bits;
+			goto out;
 		}
 		p = p->next;
 	}
@@ -411,7 +411,7 @@
 	p->bitrate = bitrate;
 	p->next = yam_data;
 	yam_data = p;
-
+ out:
 	release_firmware(fw);
 	return p->bits;
 }
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 74486a8..af3822f 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -220,7 +220,7 @@
  *  The parameter rar_count will usually be hw->mac.rar_entry_count
  *  unless there are workarounds that change this.
  **/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
                                   u8 *mc_addr_list, u32 mc_addr_count,
                                   u32 rar_used_count, u32 rar_count)
 {
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 4dc39e5..77fcf44 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -30,7 +30,7 @@
  *     or the type-DO IR port.
  *
  * IrDA chip set list from Toshiba Computer Engineering Corp.
- * model			method	maker	controler		Version 
+ * model			method	maker	controller		Version 
  * Portege 320CT	FIR,SIR Toshiba Oboe(Triangle) 
  * Portege 3010CT	FIR,SIR Toshiba Oboe(Sydney) 
  * Portege 3015CT	FIR,SIR Toshiba Oboe(Sydney) 
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f5..4488bd5 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@
 
 	ret = sh_irda_set_baudrate(self, speed);
 	if (ret < 0)
-		return ret;
+		goto sh_irda_hard_xmit_end;
 
 	self->tx_buff.len = 0;
 	if (skb->len) {
@@ -652,11 +652,21 @@
 
 		sh_irda_write(self, IRTFLR, self->tx_buff.len);
 		sh_irda_write(self, IRTCTR, ARMOD | TE);
-	}
+	} else
+		goto sh_irda_hard_xmit_end;
 
 	dev_kfree_skb(skb);
 
 	return 0;
+
+sh_irda_hard_xmit_end:
+	sh_irda_set_baudrate(self, 9600);
+	netif_wake_queue(self->ndev);
+	sh_irda_rcv_ctrl(self, 1);
+	dev_kfree_skb(skb);
+
+	return ret;
+
 }
 
 static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3ae30b8..3b8c924 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -508,6 +508,8 @@
 extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
 extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
 extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+				   struct ixgbe_ring *);
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
@@ -524,26 +526,13 @@
 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                                 struct ixgbe_atr_input *input,
+						 union ixgbe_atr_hash_dword input,
+						 union ixgbe_atr_hash_dword common,
                                                  u8 queue);
 extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_atr_input *input,
+                                      union ixgbe_atr_input *input,
                                       struct ixgbe_atr_input_masks *input_masks,
                                       u16 soft_id, u8 queue);
-extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
-                                       u16 vlan_id);
-extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 src_addr);
-extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 dst_addr);
-extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
-                                        u16 src_port);
-extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
-                                        u16 dst_port);
-extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
-                                         u16 flex_byte);
-extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
-                                      u8 l4type);
 extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
                                    struct ixgbe_ring *ring);
 extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index bfd3c22..a21f581 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1003,7 +1003,7 @@
 		udelay(10);
 	}
 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
-		hw_dbg(hw ,"Flow Director previous command isn't complete, "
+		hw_dbg(hw, "Flow Director previous command isn't complete, "
 		       "aborting table re-initialization.\n");
 		return IXGBE_ERR_FDIR_REINIT_FAILED;
 	}
@@ -1079,7 +1079,7 @@
 
 	/*
 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
-	 * intialized to zero for non DCB mode otherwise actual total RX PB
+	 * initialized to zero for non DCB mode otherwise actual total RX PB
 	 * would be bigger than programmed and filter space would run into
 	 * the PB 0 region.
 	 */
@@ -1113,13 +1113,10 @@
 	/* Move the flexible bytes to use the ethertype - shift 6 words */
 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
 
-	fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
 
 	/* Prime the keys for hashing */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
-	                htonl(IXGBE_ATR_BUCKET_HASH_KEY));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
-	                htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
 
 	/*
 	 * Poll init-done after we write the register.  Estimated times:
@@ -1170,7 +1167,7 @@
 
 	/*
 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
-	 * intialized to zero for non DCB mode otherwise actual total RX PB
+	 * initialized to zero for non DCB mode otherwise actual total RX PB
 	 * would be bigger than programmed and filter space would run into
 	 * the PB 0 region.
 	 */
@@ -1209,10 +1206,8 @@
 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
 
 	/* Prime the keys for hashing */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
-	                htonl(IXGBE_ATR_BUCKET_HASH_KEY));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
-	                htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
 
 	/*
 	 * Poll init-done after we write the register.  Estimated times:
@@ -1251,8 +1246,8 @@
  *  @stream: input bitstream to compute the hash on
  *  @key: 32-bit hash key
  **/
-static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
-                                        u32 key)
+static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+					u32 key)
 {
 	/*
 	 * The algorithm is as follows:
@@ -1272,410 +1267,250 @@
 	 *    To simplify for programming, the algorithm is implemented
 	 *    in software this way:
 	 *
-	 *    Key[31:0], Stream[335:0]
+	 *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
 	 *
-	 *    tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
-	 *    int_key[350:0] = tmp_key[351:1]
-	 *    int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+	 *    for (i = 0; i < 352; i+=32)
+	 *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
 	 *
-	 *    hash[15:0] = 0;
-	 *    for (i = 0; i < 351; i++) {
-	 *        if (int_key[i])
-	 *            hash ^= int_stream[(i + 15):i];
+	 *    lo_hash_dword[15:0]  ^= Stream[15:0];
+	 *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
+	 *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+	 *
+	 *    hi_hash_dword[31:0]  ^= Stream[351:320];
+	 *
+	 *    if(key[0])
+	 *        hash[15:0] ^= Stream[15:0];
+	 *
+	 *    for (i = 0; i < 16; i++) {
+	 *        if (key[i])
+	 *            hash[15:0] ^= lo_hash_dword[(i+15):i];
+	 *        if (key[i + 16])
+	 *            hash[15:0] ^= hi_hash_dword[(i+15):i];
 	 *    }
+	 *
 	 */
+	__be32 common_hash_dword = 0;
+	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+	u32 hash_result = 0;
+	u8 i;
 
-	union {
-		u64    fill[6];
-		u32    key[11];
-		u8     key_stream[44];
-	} tmp_key;
+	/* record the flow_vm_vlan bits as they are a key part to the hash */
+	flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
 
-	u8   *stream = (u8 *)atr_input;
-	u8   int_key[44];      /* upper-most bit unused */
-	u8   hash_str[46];     /* upper-most 2 bits unused */
-	u16  hash_result = 0;
-	int  i, j, k, h;
+	/* generate common hash dword */
+	for (i = 10; i; i -= 2)
+		common_hash_dword ^= atr_input->dword_stream[i] ^
+				     atr_input->dword_stream[i - 1];
+
+	hi_hash_dword = ntohl(common_hash_dword);
+
+	/* low dword is word swapped version of common */
+	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+	/* apply flow ID/VM pool/VLAN ID bits to hash words */
+	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+	/* Process bits 0 and 16 */
+	if (key & 0x0001) hash_result ^= lo_hash_dword;
+	if (key & 0x00010000) hash_result ^= hi_hash_dword;
 
 	/*
-	 * Initialize the fill member to prevent warnings
-	 * on some compilers
+	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+	 * delay this because bit 0 of the stream should not be processed
+	 * so we do not add the vlan until after bit 0 was processed
 	 */
-	 tmp_key.fill[0] = 0;
+	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
 
-	/* First load the temporary key stream */
-	for (i = 0; i < 6; i++) {
-		u64 fillkey = ((u64)key << 32) | key;
-		tmp_key.fill[i] = fillkey;
+
+	/* process the remaining 30 bits in the key 2 bits at a time */
+	for (i = 15; i; i-- ) {
+		if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+		if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
 	}
 
-	/*
-	 * Set the interim key for the hashing.  Bit 352 is unused, so we must
-	 * shift and compensate when building the key.
-	 */
-
-	int_key[0] = tmp_key.key_stream[0] >> 1;
-	for (i = 1, j = 0; i < 44; i++) {
-		unsigned int this_key = tmp_key.key_stream[j] << 7;
-		j++;
-		int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
-	}
-
-	/*
-	 * Set the interim bit string for the hashing.  Bits 368 and 367 are
-	 * unused, so shift and compensate when building the string.
-	 */
-	hash_str[0] = (stream[40] & 0x7f) >> 1;
-	for (i = 1, j = 40; i < 46; i++) {
-		unsigned int this_str = stream[j] << 7;
-		j++;
-		if (j > 41)
-			j = 0;
-		hash_str[i] = (u8)(this_str | (stream[j] >> 1));
-	}
-
-	/*
-	 * Now compute the hash.  i is the index into hash_str, j is into our
-	 * key stream, k is counting the number of bits, and h interates within
-	 * each byte.
-	 */
-	for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
-		for (h = 0; h < 8 && k < 351; h++, k++) {
-			if (int_key[j] & (1 << h)) {
-				/*
-				 * Key bit is set, XOR in the current 16-bit
-				 * string.  Example of processing:
-				 *    h = 0,
-				 *      tmp = (hash_str[i - 2] & 0 << 16) |
-				 *            (hash_str[i - 1] & 0xff << 8) |
-				 *            (hash_str[i] & 0xff >> 0)
-				 *      So tmp = hash_str[15 + k:k], since the
-				 *      i + 2 clause rolls off the 16-bit value
-				 *    h = 7,
-				 *      tmp = (hash_str[i - 2] & 0x7f << 9) |
-				 *            (hash_str[i - 1] & 0xff << 1) |
-				 *            (hash_str[i] & 0x80 >> 7)
-				 */
-				int tmp = (hash_str[i] >> h);
-				tmp |= (hash_str[i - 1] << (8 - h));
-				tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
-				             << (16 - h);
-				hash_result ^= (u16)tmp;
-			}
-		}
-	}
-
-	return hash_result;
+	return hash_result & IXGBE_ATR_HASH_MASK;
 }
 
-/**
- *  ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
- *  @input: input stream to modify
- *  @vlan: the VLAN id to load
- **/
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
-{
-	input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
-	input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
-
-	return 0;
-}
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+	u32 n = (_n); \
+	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+		common_hash ^= lo_hash_dword >> n; \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+		bucket_hash ^= lo_hash_dword >> n; \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+		sig_hash ^= lo_hash_dword << (16 - n); \
+	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+		common_hash ^= hi_hash_dword >> n; \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+		bucket_hash ^= hi_hash_dword >> n; \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+		sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
 
 /**
- *  ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
- *  @input: input stream to modify
- *  @src_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
-{
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
-	                                               (src_addr >> 16) & 0xff;
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
-	                                                (src_addr >> 8) & 0xff;
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
- *  @input: input stream to modify
- *  @dst_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
-{
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
-	                                               (dst_addr >> 16) & 0xff;
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
-	                                                (dst_addr >> 8) & 0xff;
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_src_port_82599 - Sets the source port
- *  @input: input stream to modify
- *  @src_port: the source port to load
- **/
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
-{
-	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
-	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_dst_port_82599 - Sets the destination port
- *  @input: input stream to modify
- *  @dst_port: the destination port to load
- **/
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
-{
-	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
-	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
- *  @input: input stream to modify
- *  @flex_bytes: the flexible bytes to load
- **/
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
-{
-	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
-	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
- *  @input: input stream to modify
- *  @l4type: the layer 4 type value to load
- **/
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
-{
-	input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
- *  @input: input stream to search
- *  @vlan: the VLAN id to load
- **/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
-{
-	*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
-	*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
- *  @input: input stream to search
- *  @src_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 *src_addr)
-{
-	*src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
-	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
-	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
-	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
- *  @input: input stream to search
- *  @dst_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 *dst_addr)
-{
-	*dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
-	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
-	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
-	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
- *  @input: input stream to search
- *  @src_addr_1: the first 4 bytes of the IP address to load
- *  @src_addr_2: the second 4 bytes of the IP address to load
- *  @src_addr_3: the third 4 bytes of the IP address to load
- *  @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
-                                        u32 *src_addr_1, u32 *src_addr_2,
-                                        u32 *src_addr_3, u32 *src_addr_4)
-{
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
-
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
-
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
-
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_port_82599 - Gets the source port
- *  @input: input stream to modify
- *  @src_port: the source port to load
+ *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ *  @stream: input bitstream to compute the hash on
  *
- *  Even though the input is given in big-endian, the FDIRPORT registers
- *  expect the ports to be programmed in little-endian.  Hence the need to swap
- *  endianness when retrieving the data.  This can be confusing since the
- *  internal hash engine expects it to be big-endian.
+ *  This function is almost identical to the function above but contains
+ *  several optomizations such as unwinding all of the loops, letting the
+ *  compiler work out all of the conditional ifs since the keys are static
+ *  defines, and computing two keys at once since the hashed dword stream
+ *  will be the same for both keys.
  **/
-static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
-                                        u16 *src_port)
+static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+					    union ixgbe_atr_hash_dword common)
 {
-	*src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
-	*src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
+	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
 
-	return 0;
-}
+	/* record the flow_vm_vlan bits as they are a key part to the hash */
+	flow_vm_vlan = ntohl(input.dword);
 
-/**
- *  ixgbe_atr_get_dst_port_82599 - Gets the destination port
- *  @input: input stream to modify
- *  @dst_port: the destination port to load
- *
- *  Even though the input is given in big-endian, the FDIRPORT registers
- *  expect the ports to be programmed in little-endian.  Hence the need to swap
- *  endianness when retrieving the data.  This can be confusing since the
- *  internal hash engine expects it to be big-endian.
- **/
-static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
-                                        u16 *dst_port)
-{
-	*dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
-	*dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+	/* generate common hash dword */
+	hi_hash_dword = ntohl(common.dword);
 
-	return 0;
-}
+	/* low dword is word swapped version of common */
+	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
 
-/**
- *  ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
- *  @input: input stream to modify
- *  @flex_bytes: the flexible bytes to load
- **/
-static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
-                                         u16 *flex_byte)
-{
-	*flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
-	*flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+	/* apply flow ID/VM pool/VLAN ID bits to hash words */
+	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
 
-	return 0;
-}
+	/* Process bits 0 and 16 */
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
 
-/**
- *  ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
- *  @input: input stream to modify
- *  @l4type: the layer 4 type value to load
- **/
-static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
-                                      u8 *l4type)
-{
-	*l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
+	/*
+	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+	 * delay this because bit 0 of the stream should not be processed
+	 * so we do not add the vlan until after bit 0 was processed
+	 */
+	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
 
-	return 0;
+	/* Process remaining 30 bit of the key */
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+	/* combine common_hash result with signature and bucket hashes */
+	bucket_hash ^= common_hash;
+	bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+	sig_hash ^= common_hash << 16;
+	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+	/* return completed signature hash */
+	return sig_hash ^ bucket_hash;
 }
 
 /**
  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
  *  @hw: pointer to hardware structure
- *  @stream: input bitstream
+ *  @input: unique input dword
+ *  @common: compressed common input dword
  *  @queue: queue index to direct traffic to
  **/
 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                          struct ixgbe_atr_input *input,
+                                          union ixgbe_atr_hash_dword input,
+                                          union ixgbe_atr_hash_dword common,
                                           u8 queue)
 {
 	u64  fdirhashcmd;
-	u64  fdircmd;
-	u32  fdirhash;
-	u16  bucket_hash, sig_hash;
-	u8   l4type;
+	u32  fdircmd;
 
-	bucket_hash = ixgbe_atr_compute_hash_82599(input,
-	                                           IXGBE_ATR_BUCKET_HASH_KEY);
+	/*
+	 * Get the flow_type in order to program FDIRCMD properly
+	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+	 */
+	switch (input.formatted.flow_type) {
+	case IXGBE_ATR_FLOW_TYPE_TCPV4:
+	case IXGBE_ATR_FLOW_TYPE_UDPV4:
+	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+	case IXGBE_ATR_FLOW_TYPE_TCPV6:
+	case IXGBE_ATR_FLOW_TYPE_UDPV6:
+	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
+		break;
+	default:
+		hw_dbg(hw, " Error on flow type input\n");
+		return IXGBE_ERR_CONFIG;
+	}
 
-	/* bucket_hash is only 15 bits */
-	bucket_hash &= IXGBE_ATR_HASH_MASK;
-
-	sig_hash = ixgbe_atr_compute_hash_82599(input,
-	                                        IXGBE_ATR_SIGNATURE_HASH_KEY);
-
-	/* Get the l4type in order to program FDIRCMD properly */
-	/* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
-	ixgbe_atr_get_l4type_82599(input, &l4type);
+	/* configure FDIRCMD register */
+	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+	          IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
 
 	/*
 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
 	 */
-	fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
-	fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
-	           IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
-
-	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-	case IXGBE_ATR_L4TYPE_TCP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
-		break;
-	case IXGBE_ATR_L4TYPE_UDP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
-		break;
-	case IXGBE_ATR_L4TYPE_SCTP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
-		break;
-	default:
-		hw_dbg(hw, "Error on l4type input\n");
-		return IXGBE_ERR_CONFIG;
-	}
-
-	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
-		fdircmd |= IXGBE_FDIRCMD_IPV6;
-
-	fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
-	fdirhashcmd = ((fdircmd << 32) | fdirhash);
+	fdirhashcmd = (u64)fdircmd << 32;
+	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
 
 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
 
+	hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
 	return 0;
 }
 
 /**
+ *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ *  @input_mask: mask to be bit swapped
+ *
+ *  The source and destination port masks for flow director are bit swapped
+ *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
+ *  generate a correctly swapped value we need to bit swap the mask and that
+ *  is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+{
+	u32 mask = ntohs(input_masks->dst_port_mask);
+	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+	mask |= ntohs(input_masks->src_port_mask);
+	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian.  As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+	(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+/**
  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
  *  @hw: pointer to hardware structure
  *  @input: input bitstream
@@ -1687,135 +1522,139 @@
  *  hardware writes must be protected from one another.
  **/
 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_atr_input *input,
+                                      union ixgbe_atr_input *input,
                                       struct ixgbe_atr_input_masks *input_masks,
                                       u16 soft_id, u8 queue)
 {
-	u32 fdircmd = 0;
 	u32 fdirhash;
-	u32 src_ipv4 = 0, dst_ipv4 = 0;
-	u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
-	u16 src_port, dst_port, vlan_id, flex_bytes;
-	u16 bucket_hash;
-	u8  l4type;
-	u8  fdirm = 0;
-
-	/* Get our input values */
-	ixgbe_atr_get_l4type_82599(input, &l4type);
+	u32 fdircmd;
+	u32 fdirport, fdirtcpm;
+	u32 fdirvlan;
+	/* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
+	u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
+		    IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
 
 	/*
-	 * Check l4type formatting, and bail out before we touch the hardware
+	 * Check flow_type formatting, and bail out before we touch the hardware
 	 * if there's a configuration issue
 	 */
-	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-	case IXGBE_ATR_L4TYPE_TCP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
-		break;
-	case IXGBE_ATR_L4TYPE_UDP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
-		break;
-	case IXGBE_ATR_L4TYPE_SCTP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+	switch (input->formatted.flow_type) {
+	case IXGBE_ATR_FLOW_TYPE_IPV4:
+		/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+		fdirm |= IXGBE_FDIRM_L4P;
+	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+		if (input_masks->dst_port_mask || input_masks->src_port_mask) {
+			hw_dbg(hw, " Error on src/dst port mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+	case IXGBE_ATR_FLOW_TYPE_TCPV4:
+	case IXGBE_ATR_FLOW_TYPE_UDPV4:
 		break;
 	default:
-		hw_dbg(hw, "Error on l4type input\n");
+		hw_dbg(hw, " Error on flow type input\n");
 		return IXGBE_ERR_CONFIG;
 	}
 
-	bucket_hash = ixgbe_atr_compute_hash_82599(input,
-	                                           IXGBE_ATR_BUCKET_HASH_KEY);
-
-	/* bucket_hash is only 15 bits */
-	bucket_hash &= IXGBE_ATR_HASH_MASK;
-
-	ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
-	ixgbe_atr_get_src_port_82599(input, &src_port);
-	ixgbe_atr_get_dst_port_82599(input, &dst_port);
-	ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
-
-	fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
-	/* Now figure out if we're IPv4 or IPv6 */
-	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
-		/* IPv6 */
-		ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
-	                                     &src_ipv6_3, &src_ipv6_4);
-
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
-		/* The last 4 bytes is the same register as IPv4 */
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
-
-		fdircmd |= IXGBE_FDIRCMD_IPV6;
-		fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
-	} else {
-		/* IPv4 */
-		ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
-	}
-
-	ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
-	                            (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
-	              (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
-
 	/*
-	 * Program the relevant mask registers.  L4type cannot be
-	 * masked out in this implementation.
+	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
+	 * are zero, then assume a full mask for that field.  Also assume that
+	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
+	 * cannot be masked out in this implementation.
 	 *
 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
 	 * point in time.
 	 */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
 
-	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-	case IXGBE_ATR_L4TYPE_TCP:
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
-				(IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
-				 (input_masks->dst_port_mask << 16)));
+	/* Program FDIRM */
+	switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
+	case 0xEFFF:
+		/* Unmask VLAN ID - bit 0 and fall through to unmask prio */
+		fdirm &= ~IXGBE_FDIRM_VLANID;
+	case 0xE000:
+		/* Unmask VLAN prio - bit 1 */
+		fdirm &= ~IXGBE_FDIRM_VLANP;
 		break;
-	case IXGBE_ATR_L4TYPE_UDP:
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
-				(IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
-				 (input_masks->src_port_mask << 16)));
+	case 0x0FFF:
+		/* Unmask VLAN ID - bit 0 */
+		fdirm &= ~IXGBE_FDIRM_VLANID;
+		break;
+	case 0x0000:
+		/* do nothing, vlans already masked */
 		break;
 	default:
-		/* this already would have failed above */
-		break;
+		hw_dbg(hw, " Error on VLAN mask\n");
+		return IXGBE_ERR_CONFIG;
 	}
 
-	/* Program the last mask register, FDIRM */
-	if (input_masks->vlan_id_mask)
-		/* Mask both VLAN and VLANP - bits 0 and 1 */
-		fdirm |= 0x3;
-
-	if (input_masks->data_mask)
-		/* Flex bytes need masking, so mask the whole thing - bit 4 */
-		fdirm |= 0x10;
+	if (input_masks->flex_mask & 0xFFFF) {
+		if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
+			hw_dbg(hw, " Error on flexible byte mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+		/* Unmask Flex Bytes - bit 4 */
+		fdirm &= ~IXGBE_FDIRM_FLEX;
+	}
 
 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
-	fdirm |= 0x24;
-
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
-	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
-	fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
-	fdircmd |= IXGBE_FDIRCMD_LAST;
-	fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
-	fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+	/* store the TCP/UDP port masks, bit reversed from port layout */
+	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+
+	/* write both the same so that UDP and TCP use the same mask */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+	/* store source and destination IP masks (big-enian) */
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+			     ~input_masks->src_ip_mask[0]);
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+			     ~input_masks->dst_ip_mask[0]);
+
+	/* Apply masks to input data */
+	input->formatted.vlan_id &= input_masks->vlan_id_mask;
+	input->formatted.flex_bytes &= input_masks->flex_mask;
+	input->formatted.src_port &= input_masks->src_port_mask;
+	input->formatted.dst_port &= input_masks->dst_port_mask;
+	input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
+	input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+
+	/* record vlan (little-endian) and flex_bytes(big-endian) */
+	fdirvlan =
+		IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
+	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+	fdirvlan |= ntohs(input->formatted.vlan_id);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+	/* record source and destination port (little-endian)*/
+	fdirport = ntohs(input->formatted.dst_port);
+	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+	fdirport |= ntohs(input->formatted.src_port);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+	/* record the first 32 bits of the destination address (big-endian) */
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+	/* record the source address (big-endian) */
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+	/* configure FDIRCMD register */
+	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+	/* we only want the bucket hash so drop the upper 16 bits */
+	fdirhash = ixgbe_atr_compute_hash_82599(input,
+						IXGBE_ATR_BUCKET_HASH_KEY);
+	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
 
 	return 0;
 }
+
 /**
  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
  *  @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index d5ede2d..ebbda7d 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1370,6 +1370,9 @@
 		hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
 
 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+		/*  clear VMDq pool/queue selection for RAR 0 */
+		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
 	}
 	hw->addr_ctrl.overflow_promisc = 0;
 
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 23ff23e..2002ea8 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1477,9 +1477,7 @@
 	reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 	reg_ctl &= ~IXGBE_RXCTRL_RXEN;
 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
-	reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
-	reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
-	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
+	ixgbe_disable_rx_queue(adapter, rx_ring);
 
 	/* now Tx */
 	reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
@@ -2279,10 +2277,11 @@
                                struct ethtool_rx_ntuple *cmd)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
-	struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
-	struct ixgbe_atr_input input_struct;
+	struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
+	union ixgbe_atr_input input_struct;
 	struct ixgbe_atr_input_masks input_masks;
 	int target_queue;
+	int err;
 
 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 		return -EOPNOTSUPP;
@@ -2291,67 +2290,122 @@
 	 * Don't allow programming if the action is a queue greater than
 	 * the number of online Tx queues.
 	 */
-	if ((fs.action >= adapter->num_tx_queues) ||
-	    (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+	if ((fs->action >= adapter->num_tx_queues) ||
+	    (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
 		return -EINVAL;
 
-	memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+	memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
 	memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
 
-	input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
-	input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
-	input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
-	input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
-	input_masks.vlan_id_mask = fs.vlan_tag_mask;
-	/* only use the lowest 2 bytes for flex bytes */
-	input_masks.data_mask = (fs.data_mask & 0xffff);
-
-	switch (fs.flow_type) {
+	/* record flow type */
+	switch (fs->flow_type) {
+	case IPV4_FLOW:
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+		break;
 	case TCP_V4_FLOW:
-		ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
 		break;
 	case UDP_V4_FLOW:
-		ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
 		break;
 	case SCTP_V4_FLOW:
-		ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
 		break;
 	default:
 		return -1;
 	}
 
-	/* Mask bits from the inputs based on user-supplied mask */
-	ixgbe_atr_set_src_ipv4_82599(&input_struct,
-	            (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
-	ixgbe_atr_set_dst_ipv4_82599(&input_struct,
-	            (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
-	/* 82599 expects these to be byte-swapped for perfect filtering */
-	ixgbe_atr_set_src_port_82599(&input_struct,
-	       ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
-	ixgbe_atr_set_dst_port_82599(&input_struct,
-	       ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
+	/* copy vlan tag minus the CFI bit */
+	if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
+		input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
+		if (!fs->vlan_tag_mask) {
+			input_masks.vlan_id_mask = htons(0xEFFF);
+		} else {
+			switch (~fs->vlan_tag_mask & 0xEFFF) {
+			/* all of these are valid vlan-mask values */
+			case 0xEFFF:
+			case 0xE000:
+			case 0x0FFF:
+			case 0x0000:
+				input_masks.vlan_id_mask =
+					htons(~fs->vlan_tag_mask);
+				break;
+			/* exit with error if vlan-mask is invalid */
+			default:
+				e_err(drv, "Partial VLAN ID or "
+				      "priority mask in vlan-mask is not "
+				      "supported by hardware\n");
+				return -1;
+			}
+		}
+	}
 
-	/* VLAN and Flex bytes are either completely masked or not */
-	if (!fs.vlan_tag_mask)
-		ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
+	/* make sure we only use the first 2 bytes of user data */
+	if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
+		input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
+		if (!(fs->data_mask & 0xFFFF)) {
+			input_masks.flex_mask = 0xFFFF;
+		} else if (~fs->data_mask & 0xFFFF) {
+			e_err(drv, "Partial user-def-mask is not "
+			      "supported by hardware\n");
+			return -1;
+		}
+	}
 
-	if (!input_masks.data_mask)
-		/* make sure we only use the first 2 bytes of user data */
-		ixgbe_atr_set_flex_byte_82599(&input_struct,
-		                              (fs.data & 0xffff));
+	/*
+	 * Copy input into formatted structures
+	 *
+	 * These assignments are based on the following logic
+	 * If neither input or mask are set assume value is masked out.
+	 * If input is set, but mask is not mask should default to accept all.
+	 * If input is not set, but mask is set then mask likely results in 0.
+	 * If input is set and mask is set then assign both.
+	 */
+	if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
+		input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
+		if (!fs->m_u.tcp_ip4_spec.ip4src)
+			input_masks.src_ip_mask[0] = 0xFFFFFFFF;
+		else
+			input_masks.src_ip_mask[0] =
+				~fs->m_u.tcp_ip4_spec.ip4src;
+	}
+	if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
+		input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
+		if (!fs->m_u.tcp_ip4_spec.ip4dst)
+			input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
+		else
+			input_masks.dst_ip_mask[0] =
+				~fs->m_u.tcp_ip4_spec.ip4dst;
+	}
+	if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
+		input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
+		if (!fs->m_u.tcp_ip4_spec.psrc)
+			input_masks.src_port_mask = 0xFFFF;
+		else
+			input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
+	}
+	if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
+		input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
+		if (!fs->m_u.tcp_ip4_spec.pdst)
+			input_masks.dst_port_mask = 0xFFFF;
+		else
+			input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
+	}
 
 	/* determine if we need to drop or route the packet */
-	if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+	if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
 		target_queue = MAX_RX_QUEUES - 1;
 	else
-		target_queue = fs.action;
+		target_queue = fs->action;
 
 	spin_lock(&adapter->fdir_perfect_lock);
-	ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
-	                                    &input_masks, 0, target_queue);
+	err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
+						  &input_struct,
+						  &input_masks, 0,
+						  target_queue);
 	spin_unlock(&adapter->fdir_perfect_lock);
 
-	return 0;
+	return err ? -1 : 0;
 }
 
 static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6342d48..c54a882 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,13 +159,13 @@
 	struct scatterlist *sg;
 	unsigned int i, j, dmacount;
 	unsigned int len;
-	static const unsigned int bufflen = 4096;
+	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
 	unsigned int firstoff = 0;
 	unsigned int lastsize;
 	unsigned int thisoff = 0;
 	unsigned int thislen = 0;
 	u32 fcbuff, fcdmarw, fcfltrw;
-	dma_addr_t addr;
+	dma_addr_t addr = 0;
 
 	if (!netdev || !sgl)
 		return 0;
@@ -254,6 +254,24 @@
 	/* only the last buffer may have non-full bufflen */
 	lastsize = thisoff + thislen;
 
+	/*
+	 * lastsize can not be buffer len.
+	 * If it is then adding another buffer with lastsize = 1.
+	 */
+	if (lastsize == bufflen) {
+		if (j >= IXGBE_BUFFCNT_MAX) {
+			e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+				"not enough user buffers. We need an extra "
+				"buffer because lastsize is bufflen.\n",
+				xid, i, j, dmacount, (u64)addr);
+			goto out_noddp_free;
+		}
+
+		ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+		j++;
+		lastsize = 1;
+	}
+
 	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
 	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
 	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@
 			e_err(drv, "failed to allocated FCoE DDP pool\n");
 
 		spin_lock_init(&fcoe->lock);
+
+		/* Extra buffer to be shared by all DDPs for HW work around */
+		fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+		if (fcoe->extra_ddp_buffer == NULL) {
+			e_err(drv, "failed to allocated extra DDP buffer\n");
+			goto out_extra_ddp_buffer_alloc;
+		}
+
+		fcoe->extra_ddp_buffer_dma =
+			dma_map_single(&adapter->pdev->dev,
+				       fcoe->extra_ddp_buffer,
+				       IXGBE_FCBUFF_MIN,
+				       DMA_FROM_DEVICE);
+		if (dma_mapping_error(&adapter->pdev->dev,
+				      fcoe->extra_ddp_buffer_dma)) {
+			e_err(drv, "failed to map extra DDP buffer\n");
+			goto out_extra_ddp_buffer_dma;
+		}
 	}
 
 	/* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@
 		}
 	}
 #endif
+
+	return;
+
+out_extra_ddp_buffer_dma:
+	kfree(fcoe->extra_ddp_buffer);
+out_extra_ddp_buffer_alloc:
+	pci_pool_destroy(fcoe->pool);
+	fcoe->pool = NULL;
 }
 
 /**
@@ -600,6 +644,11 @@
 	if (fcoe->pool) {
 		for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
 			ixgbe_fcoe_ddp_put(adapter->netdev, i);
+		dma_unmap_single(&adapter->pdev->dev,
+				 fcoe->extra_ddp_buffer_dma,
+				 IXGBE_FCBUFF_MIN,
+				 DMA_FROM_DEVICE);
+		kfree(fcoe->extra_ddp_buffer);
 		pci_pool_destroy(fcoe->pool);
 		fcoe->pool = NULL;
 	}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c55..65cc8fb 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@
 	spinlock_t lock;
 	struct pci_pool *pool;
 	struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+	unsigned char *extra_ddp_buffer;
+	dma_addr_t extra_ddp_buffer_dma;
 };
 
 #endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 38ab4f3..30f9ccf 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,7 +52,7 @@
 static const char ixgbe_driver_string[] =
 			      "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "3.0.12-k2"
+#define DRV_VERSION "3.2.9-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
 
@@ -3024,6 +3024,36 @@
 	}
 }
 
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+			    struct ixgbe_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+	u32 rxdctl;
+	u8 reg_idx = ring->reg_idx;
+
+	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+	/* write value back with RXDCTL.ENABLE bit cleared */
+	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+	if (hw->mac.type == ixgbe_mac_82598EB &&
+	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+		return;
+
+	/* the hardware may take up to 100us to really disable the rx queue */
+	do {
+		udelay(10);
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+	if (!wait_loop) {
+		e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
+		      "the polling period\n", reg_idx);
+	}
+}
+
 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 			     struct ixgbe_ring *ring)
 {
@@ -3034,9 +3064,7 @@
 
 	/* disable queue to avoid issues while updating state */
 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
-	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
-			rxdctl & ~IXGBE_RXDCTL_ENABLE);
-	IXGBE_WRITE_FLUSH(hw);
+	ixgbe_disable_rx_queue(adapter, ring);
 
 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -3148,9 +3176,16 @@
 	u32 mhadd, hlreg0;
 
 	/* Decide whether to use packet split mode or not */
+	/* On by default */
+	adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+
 	/* Do not use packet split if we're in SR-IOV Mode */
-	if (!adapter->num_vfs)
-		adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+	if (adapter->num_vfs)
+		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+	/* Disable packet split due to 82599 erratum #45 */
+	if (hw->mac.type == ixgbe_mac_82599EB)
+		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 
 	/* Set the RX buffer length according to the mode */
 	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -3693,7 +3728,8 @@
 			 * We need to try and force an autonegotiation
 			 * session, then bring up link.
 			 */
-			hw->mac.ops.setup_sfp(hw);
+			if (hw->mac.ops.setup_sfp)
+				hw->mac.ops.setup_sfp(hw);
 			if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
 				schedule_work(&adapter->multispeed_fiber_task);
 		} else {
@@ -4064,7 +4100,11 @@
 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
-	IXGBE_WRITE_FLUSH(hw);
+	/* disable all enabled rx queues */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		/* this call also flushes the previous write */
+		ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
 	msleep(10);
 
 	netif_tx_stop_all_queues(netdev);
@@ -4789,6 +4829,12 @@
 
 	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
 	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+	if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
+			      IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+		e_err(probe,
+		      "Flow Director is not supported while multiple "
+		      "queues are disabled.  Disabling Flow Director\n");
+	}
 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 	adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
 	adapter->atr_sample_rate = 0;
@@ -4825,16 +4871,13 @@
 {
 	int q_idx, num_q_vectors;
 	struct ixgbe_q_vector *q_vector;
-	int napi_vectors;
 	int (*poll)(struct napi_struct *, int);
 
 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-		napi_vectors = adapter->num_rx_queues;
 		poll = &ixgbe_clean_rxtx_many;
 	} else {
 		num_q_vectors = 1;
-		napi_vectors = 1;
 		poll = &ixgbe_poll;
 	}
 
@@ -5094,16 +5137,11 @@
 		adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
 		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
-		if (dev->features & NETIF_F_NTUPLE) {
-			/* Flow Director perfect filter enabled */
-			adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-			adapter->atr_sample_rate = 0;
-			spin_lock_init(&adapter->fdir_perfect_lock);
-		} else {
-			/* Flow Director hash filters enabled */
-			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
-			adapter->atr_sample_rate = 20;
-		}
+		/* n-tuple support exists, always init our spinlock */
+		spin_lock_init(&adapter->fdir_perfect_lock);
+		/* Flow Director hash filters enabled */
+		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+		adapter->atr_sample_rate = 20;
 		adapter->ring_feature[RING_F_FDIR].indices =
 							 IXGBE_MAX_FDIR_INDICES;
 		adapter->fdir_pballoc = 0;
@@ -5931,7 +5969,8 @@
 		unregister_netdev(adapter->netdev);
 		return;
 	}
-	hw->mac.ops.setup_sfp(hw);
+	if (hw->mac.ops.setup_sfp)
+		hw->mac.ops.setup_sfp(hw);
 
 	if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
 		/* This will also work for DA Twinax connections */
@@ -6474,38 +6513,92 @@
 	writel(i, tx_ring->tail);
 }
 
-static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-		      u8 queue, u32 tx_flags, __be16 protocol)
+static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
+		      u32 tx_flags, __be16 protocol)
 {
-	struct ixgbe_atr_input atr_input;
-	struct iphdr *iph = ip_hdr(skb);
-	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	struct ixgbe_q_vector *q_vector = ring->q_vector;
+	union ixgbe_atr_hash_dword input = { .dword = 0 };
+	union ixgbe_atr_hash_dword common = { .dword = 0 };
+	union {
+		unsigned char *network;
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
 	struct tcphdr *th;
-	u16 vlan_id;
+	__be16 vlan_id;
 
-	/* Right now, we support IPv4 w/ TCP only */
-	if (protocol != htons(ETH_P_IP) ||
-	    iph->protocol != IPPROTO_TCP)
+	/* if ring doesn't have a interrupt vector, cannot perform ATR */
+	if (!q_vector)
 		return;
 
-	memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+	/* do nothing if sampling is disabled */
+	if (!ring->atr_sample_rate)
+		return;
 
-	vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
-		   IXGBE_TX_FLAGS_VLAN_SHIFT;
+	ring->atr_count++;
+
+	/* snag network header to get L4 type and address */
+	hdr.network = skb_network_header(skb);
+
+	/* Currently only IPv4/IPv6 with TCP is supported */
+	if ((protocol != __constant_htons(ETH_P_IPV6) ||
+	     hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+	    (protocol != __constant_htons(ETH_P_IP) ||
+	     hdr.ipv4->protocol != IPPROTO_TCP))
+		return;
 
 	th = tcp_hdr(skb);
 
-	ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
-	ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
-	ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
-	ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
-	ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
-	/* src and dst are inverted, think how the receiver sees them */
-	ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
-	ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
+	/* skip this packet since the socket is closing */
+	if (th->fin)
+		return;
+
+	/* sample on all syn packets or once every atr sample count */
+	if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
+		return;
+
+	/* reset sample count */
+	ring->atr_count = 0;
+
+	vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+
+	/*
+	 * src and dst are inverted, think how the receiver sees them
+	 *
+	 * The input is broken into two sections, a non-compressed section
+	 * containing vm_pool, vlan_id, and flow_type.  The rest of the data
+	 * is XORed together and stored in the compressed dword.
+	 */
+	input.formatted.vlan_id = vlan_id;
+
+	/*
+	 * since src port and flex bytes occupy the same word XOR them together
+	 * and write the value to source port portion of compressed dword
+	 */
+	if (vlan_id)
+		common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+	else
+		common.port.src ^= th->dest ^ protocol;
+	common.port.dst ^= th->source;
+
+	if (protocol == __constant_htons(ETH_P_IP)) {
+		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+		common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+	} else {
+		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+		common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+			     hdr.ipv6->saddr.s6_addr32[1] ^
+			     hdr.ipv6->saddr.s6_addr32[2] ^
+			     hdr.ipv6->saddr.s6_addr32[3] ^
+			     hdr.ipv6->daddr.s6_addr32[0] ^
+			     hdr.ipv6->daddr.s6_addr32[1] ^
+			     hdr.ipv6->daddr.s6_addr32[2] ^
+			     hdr.ipv6->daddr.s6_addr32[3];
+	}
 
 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
-	ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+	ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
+					      input, common, ring->queue_index);
 }
 
 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
@@ -6580,8 +6673,6 @@
 			  struct ixgbe_adapter *adapter,
 			  struct ixgbe_ring *tx_ring)
 {
-	struct net_device *netdev = tx_ring->netdev;
-	struct netdev_queue *txq;
 	unsigned int first;
 	unsigned int tx_flags = 0;
 	u8 hdr_len = 0;
@@ -6676,19 +6767,8 @@
 	count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
 	if (count) {
 		/* add the ATR filter if ATR is on */
-		if (tx_ring->atr_sample_rate) {
-			++tx_ring->atr_count;
-			if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
-			     test_bit(__IXGBE_TX_FDIR_INIT_DONE,
-				      &tx_ring->state)) {
-				ixgbe_atr(adapter, skb, tx_ring->queue_index,
-					  tx_flags, protocol);
-				tx_ring->atr_count = 0;
-			}
-		}
-		txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
-		txq->tx_bytes += skb->len;
-		txq->tx_packets++;
+		if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+			ixgbe_atr(tx_ring, skb, tx_flags, protocol);
 		ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
 		ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
@@ -6846,8 +6926,6 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	int i;
 
-	/* accurate rx/tx bytes/packets stats */
-	dev_txq_stats_fold(netdev, stats);
 	rcu_read_lock();
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
@@ -6864,6 +6942,22 @@
 			stats->rx_bytes   += bytes;
 		}
 	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin_bh(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes   = ring->stats.bytes;
+			} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+			stats->tx_packets += packets;
+			stats->tx_bytes   += bytes;
+		}
+	}
 	rcu_read_unlock();
 	/* following stats updated by ixgbe_watchdog_task() */
 	stats->multicast	= netdev->stats.multicast;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 47b1573..187b3a1 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -110,12 +110,10 @@
 	return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 }
 
-
 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 {
 	u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 	vmolr |= (IXGBE_VMOLR_ROMPE |
-		  IXGBE_VMOLR_ROPE |
 		  IXGBE_VMOLR_BAM);
 	if (aupe)
 		vmolr |= IXGBE_VMOLR_AUPE;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 446f3467..fd3358f 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1947,10 +1947,9 @@
 #define IXGBE_FDIRM_VLANID                      0x00000001
 #define IXGBE_FDIRM_VLANP                       0x00000002
 #define IXGBE_FDIRM_POOL                        0x00000004
-#define IXGBE_FDIRM_L3P                         0x00000008
-#define IXGBE_FDIRM_L4P                         0x00000010
-#define IXGBE_FDIRM_FLEX                        0x00000020
-#define IXGBE_FDIRM_DIPv6                       0x00000040
+#define IXGBE_FDIRM_L4P                         0x00000008
+#define IXGBE_FDIRM_FLEX                        0x00000010
+#define IXGBE_FDIRM_DIPv6                       0x00000020
 
 #define IXGBE_FDIRFREE_FREE_MASK                0xFFFF
 #define IXGBE_FDIRFREE_FREE_SHIFT               0
@@ -1990,6 +1989,7 @@
 #define IXGBE_FDIRCMD_LAST                      0x00000800
 #define IXGBE_FDIRCMD_COLLISION                 0x00001000
 #define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT           5
 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16
 #define IXGBE_FDIRCMD_VT_POOL_SHIFT             24
 #define IXGBE_FDIR_INIT_DONE_POLL               10
@@ -2147,51 +2147,80 @@
 #define FC_LOW_WATER(MTU)  (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
 
 /* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY    0xE214AD3D
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
+#define IXGBE_ATR_BUCKET_HASH_KEY    0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
 
-/* Software ATR input stream offsets and masks */
-#define IXGBE_ATR_VLAN_OFFSET       0
-#define IXGBE_ATR_SRC_IPV6_OFFSET   2
-#define IXGBE_ATR_SRC_IPV4_OFFSET  14
-#define IXGBE_ATR_DST_IPV6_OFFSET  18
-#define IXGBE_ATR_DST_IPV4_OFFSET  30
-#define IXGBE_ATR_SRC_PORT_OFFSET  34
-#define IXGBE_ATR_DST_PORT_OFFSET  36
-#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
-#define IXGBE_ATR_VM_POOL_OFFSET   40
-#define IXGBE_ATR_L4TYPE_OFFSET    41
-
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK     0x7fff
 #define IXGBE_ATR_L4TYPE_MASK      0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
 #define IXGBE_ATR_L4TYPE_UDP       0x1
 #define IXGBE_ATR_L4TYPE_TCP       0x2
 #define IXGBE_ATR_L4TYPE_SCTP      0x3
-#define IXGBE_ATR_HASH_MASK     0x7fff
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+	IXGBE_ATR_FLOW_TYPE_IPV4   = 0x0,
+	IXGBE_ATR_FLOW_TYPE_UDPV4  = 0x1,
+	IXGBE_ATR_FLOW_TYPE_TCPV4  = 0x2,
+	IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+	IXGBE_ATR_FLOW_TYPE_IPV6   = 0x4,
+	IXGBE_ATR_FLOW_TYPE_UDPV6  = 0x5,
+	IXGBE_ATR_FLOW_TYPE_TCPV6  = 0x6,
+	IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
 
 /* Flow Director ATR input struct. */
-struct ixgbe_atr_input {
-	/* Byte layout in order, all values with MSB first:
+union ixgbe_atr_input {
+	/*
+	 * Byte layout in order, all values with MSB first:
 	 *
+	 * vm_pool    - 1 byte
+	 * flow_type  - 1 byte
 	 * vlan_id    - 2 bytes
 	 * src_ip     - 16 bytes
 	 * dst_ip     - 16 bytes
 	 * src_port   - 2 bytes
 	 * dst_port   - 2 bytes
 	 * flex_bytes - 2 bytes
-	 * vm_pool    - 1 byte
-	 * l4type     - 1 byte
+	 * rsvd0      - 2 bytes - space reserved must be 0.
 	 */
-	u8 byte_stream[42];
+	struct {
+		u8     vm_pool;
+		u8     flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 src_ip[4];
+		__be16 src_port;
+		__be16 dst_port;
+		__be16 flex_bytes;
+		__be16 rsvd0;
+	} formatted;
+	__be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
 };
 
 struct ixgbe_atr_input_masks {
-	u32 src_ip_mask;
-	u32 dst_ip_mask;
-	u16 src_port_mask;
-	u16 dst_port_mask;
-	u16 vlan_id_mask;
-	u16 data_mask;
+	__be16 rsvd0;
+	__be16 vlan_id_mask;
+	__be32 dst_ip_mask[4];
+	__be32 src_ip_mask[4];
+	__be16 src_port_mask;
+	__be16 dst_port_mask;
+	__be16 flex_mask;
 };
 
 enum ixgbe_eeprom_type {
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 3a89239..f2518b0 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -133,17 +133,17 @@
 	}
 
 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
 	IXGBE_WRITE_FLUSH(hw);
 
 	/* Poll for reset bit to self-clear indicating reset is complete */
 	for (i = 0; i < 10; i++) {
 		udelay(1);
 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-		if (!(ctrl & IXGBE_CTRL_RST))
+		if (!(ctrl & reset_bit))
 			break;
 	}
-	if (ctrl & IXGBE_CTRL_RST) {
+	if (ctrl & reset_bit) {
 		status = IXGBE_ERR_RESET_FAILED;
 		hw_dbg(hw, "Reset polling failed to complete.\n");
 	}
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 183765c..f35554d 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -238,7 +238,7 @@
 		goto out;
 	}
 	/* allocate the tx and rx ring buffer descriptors. */
-	/* returns a virtual addres and a physical address. */
+	/* returns a virtual address and a physical address. */
 	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 					 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 					 &lp->tx_bd_p, GFP_KERNEL);
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e..79ccb54 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@
 	for (i = 0; i < PHY_MAX_ADDR; i++)
 		bp->mii_bus->irq[i] = PHY_POLL;
 
-	platform_set_drvdata(bp->dev, bp->mii_bus);
+	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
 
 	if (mdiobus_register(bp->mii_bus))
 		goto err_out_free_mdio_irq;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 21845af..fc27a99 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -528,8 +528,9 @@
 		vnet_hdr_len = q->vnet_hdr_sz;
 
 		err = -EINVAL;
-		if ((len -= vnet_hdr_len) < 0)
+		if (len < vnet_hdr_len)
 			goto err;
+		len -= vnet_hdr_len;
 
 		err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
 					   sizeof(vnet_hdr));
@@ -585,7 +586,7 @@
 	rcu_read_lock_bh();
 	vlan = rcu_dereference(q->vlan);
 	if (vlan)
-		netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+		vlan->dev->stats.tx_dropped++;
 	rcu_read_unlock_bh();
 
 	return err;
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f..3a4277f 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@
 	} else {
 		int i;
 
+		buf->direct.buf  = NULL;
 		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 		buf->npages      = buf->nbufs;
 		buf->page_shift  = PAGE_SHIFT;
@@ -229,7 +230,7 @@
 		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
 				  buf->direct.map);
 	else {
-		if (BITS_PER_LONG == 64)
+		if (BITS_PER_LONG == 64 && buf->direct.buf)
 			vunmap(buf->direct.buf);
 
 		for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 68aaa42..32f9471 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -113,7 +113,7 @@
 void mlx4_start_catas_poll(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
-	unsigned long addr;
+	phys_addr_t addr;
 
 	INIT_LIST_HEAD(&priv->catas_err.list);
 	init_timer(&priv->catas_err.timer);
@@ -124,8 +124,8 @@
 
 	priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
 	if (!priv->catas_err.map) {
-		mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
-			  addr);
+		mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
+			  (unsigned long long) addr);
 		return;
 	}
 
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index f6e0d40..1ff6ca6 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -202,7 +202,8 @@
 	if (mlx4_uar_alloc(dev, &mdev->priv_uar))
 		goto err_pd;
 
-	mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+	mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
+				PAGE_SIZE);
 	if (!mdev->uar_map)
 		goto err_uar;
 	spin_lock_init(&mdev->uar_lock);
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 6d6806b..897f576 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -972,7 +972,8 @@
 	int i;
 	int err;
 
-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
+	dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
+	    prof->tx_ring_num, prof->rx_ring_num);
 	if (dev == NULL) {
 		mlx4_err(mdev, "Net device allocation failed\n");
 		return -ENOMEM;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18b..5de1db8 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
 		dev_cap->bf_reg_size = 1 << (field & 0x1f);
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
-		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
-			mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
 			field = 3;
-		}
 		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
 		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
 			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 782f11d..2765a3c 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -829,7 +829,7 @@
 		goto err_uar_table_free;
 	}
 
-	priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+	priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
 	if (!priv->kar) {
 		mlx4_err(dev, "Couldn't map kernel access region, "
 			 "aborting.\n");
@@ -1286,6 +1286,21 @@
 	{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
 	{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
 	{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
+	{ PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
+	{ PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
+	{ PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
+	{ PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
 	{ 0, }
 };
 
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c4f88b7..79cf42d 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -95,7 +95,8 @@
  * entry in hash chain and *mgm holds end of hash chain.
  */
 static int find_mgm(struct mlx4_dev *dev,
-		    u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox,
+		    u8 *gid, enum mlx4_protocol protocol,
+		    struct mlx4_cmd_mailbox *mgm_mailbox,
 		    u16 *hash, int *prev, int *index)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -134,7 +135,8 @@
 			return err;
 		}
 
-		if (!memcmp(mgm->gid, gid, 16))
+		if (!memcmp(mgm->gid, gid, 16) &&
+		    be32_to_cpu(mgm->members_count) >> 30 == protocol)
 			return err;
 
 		*prev = *index;
@@ -146,7 +148,7 @@
 }
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-			  int block_mcast_loopback)
+			  int block_mcast_loopback, enum mlx4_protocol protocol)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_cmd_mailbox *mailbox;
@@ -165,7 +167,7 @@
 
 	mutex_lock(&priv->mcg_table.mutex);
 
-	err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+	err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
 	if (err)
 		goto out;
 
@@ -187,7 +189,7 @@
 		memcpy(mgm->gid, gid, 16);
 	}
 
-	members_count = be32_to_cpu(mgm->members_count);
+	members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
 	if (members_count == MLX4_QP_PER_MGM) {
 		mlx4_err(dev, "MGM at index %x is full.\n", index);
 		err = -ENOMEM;
@@ -207,7 +209,7 @@
 	else
 		mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
 
-	mgm->members_count       = cpu_to_be32(members_count);
+	mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30);
 
 	err = mlx4_WRITE_MCG(dev, index, mailbox);
 	if (err)
@@ -242,7 +244,8 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
 
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+			  enum mlx4_protocol protocol)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_cmd_mailbox *mailbox;
@@ -260,7 +263,7 @@
 
 	mutex_lock(&priv->mcg_table.mutex);
 
-	err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+	err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
 	if (err)
 		goto out;
 
@@ -270,7 +273,7 @@
 		goto out;
 	}
 
-	members_count = be32_to_cpu(mgm->members_count);
+	members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
 	for (loc = -1, i = 0; i < members_count; ++i)
 		if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
 			loc = i;
@@ -282,7 +285,7 @@
 	}
 
 
-	mgm->members_count = cpu_to_be32(--members_count);
+	mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30);
 	mgm->qp[loc]       = mgm->qp[i - 1];
 	mgm->qp[i - 1]     = 0;
 
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a37fcf1..ea5cfe2 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3403,9 +3403,7 @@
 		return -EIO;
 	}
 
-	status = pci_restore_state(pdev);
-	if (status)
-		return status;
+	pci_restore_state(pdev);
 
 	status = pci_enable_device(pdev);
 	if (status) {
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2541321..9fb59d3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4489,6 +4489,9 @@
 {
 	struct niu_parent *parent = np->parent;
 	int first_rx_channel, first_tx_channel;
+	int num_rx_rings, num_tx_rings;
+	struct rx_ring_info *rx_rings;
+	struct tx_ring_info *tx_rings;
 	int i, port, err;
 
 	port = np->port;
@@ -4498,18 +4501,21 @@
 		first_tx_channel += parent->txchan_per_port[i];
 	}
 
-	np->num_rx_rings = parent->rxchan_per_port[port];
-	np->num_tx_rings = parent->txchan_per_port[port];
+	num_rx_rings = parent->rxchan_per_port[port];
+	num_tx_rings = parent->txchan_per_port[port];
 
-	netif_set_real_num_rx_queues(np->dev, np->num_rx_rings);
-	netif_set_real_num_tx_queues(np->dev, np->num_tx_rings);
-
-	np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
-			       GFP_KERNEL);
+	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
+			   GFP_KERNEL);
 	err = -ENOMEM;
-	if (!np->rx_rings)
+	if (!rx_rings)
 		goto out_err;
 
+	np->num_rx_rings = num_rx_rings;
+	smp_wmb();
+	np->rx_rings = rx_rings;
+
+	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
+
 	for (i = 0; i < np->num_rx_rings; i++) {
 		struct rx_ring_info *rp = &np->rx_rings[i];
 
@@ -4538,12 +4544,18 @@
 			return err;
 	}
 
-	np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
-			       GFP_KERNEL);
+	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
+			   GFP_KERNEL);
 	err = -ENOMEM;
-	if (!np->tx_rings)
+	if (!tx_rings)
 		goto out_err;
 
+	np->num_tx_rings = num_tx_rings;
+	smp_wmb();
+	np->tx_rings = tx_rings;
+
+	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
+
 	for (i = 0; i < np->num_tx_rings; i++) {
 		struct tx_ring_info *rp = &np->tx_rings[i];
 
@@ -6246,11 +6258,17 @@
 static void niu_get_rx_stats(struct niu *np)
 {
 	unsigned long pkts, dropped, errors, bytes;
+	struct rx_ring_info *rx_rings;
 	int i;
 
 	pkts = dropped = errors = bytes = 0;
+
+	rx_rings = ACCESS_ONCE(np->rx_rings);
+	if (!rx_rings)
+		goto no_rings;
+
 	for (i = 0; i < np->num_rx_rings; i++) {
-		struct rx_ring_info *rp = &np->rx_rings[i];
+		struct rx_ring_info *rp = &rx_rings[i];
 
 		niu_sync_rx_discard_stats(np, rp, 0);
 
@@ -6259,6 +6277,8 @@
 		dropped += rp->rx_dropped;
 		errors += rp->rx_errors;
 	}
+
+no_rings:
 	np->dev->stats.rx_packets = pkts;
 	np->dev->stats.rx_bytes = bytes;
 	np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@
 static void niu_get_tx_stats(struct niu *np)
 {
 	unsigned long pkts, errors, bytes;
+	struct tx_ring_info *tx_rings;
 	int i;
 
 	pkts = errors = bytes = 0;
+
+	tx_rings = ACCESS_ONCE(np->tx_rings);
+	if (!tx_rings)
+		goto no_rings;
+
 	for (i = 0; i < np->num_tx_rings; i++) {
-		struct tx_ring_info *rp = &np->tx_rings[i];
+		struct tx_ring_info *rp = &tx_rings[i];
 
 		pkts += rp->tx_packets;
 		bytes += rp->tx_bytes;
 		errors += rp->tx_errors;
 	}
+
+no_rings:
 	np->dev->stats.tx_packets = pkts;
 	np->dev->stats.tx_bytes = bytes;
 	np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@
 {
 	struct niu *np = netdev_priv(dev);
 
-	niu_get_rx_stats(np);
-	niu_get_tx_stats(np);
-
+	if (netif_running(dev)) {
+		niu_get_rx_stats(np);
+		niu_get_tx_stats(np);
+	}
 	return &dev->stats;
 }
 
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c7..a41b2cf 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@
 	}
 
 	ndev = alloc_etherdev(sizeof(struct ns83820));
-	dev = PRIV(ndev);
-
 	err = -ENOMEM;
-	if (!dev)
+	if (!ndev)
 		goto out;
 
+	dev = PRIV(ndev);
 	dev->ndev = ndev;
 
 	spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a9..e1e33c8 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@
 	struct pch_gbe_regs_mac_adr mac_adr[16];
 	u32 ADDR_MASK;
 	u32 MIIM;
-	u32 reserve2;
+	u32 MAC_ADDR_LOAD;
 	u32 RGMII_ST;
 	u32 RGMII_CTRL;
 	u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index d735530..b99e90a 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@
 #define PCH_GBE_SHORT_PKT		64
 #define DSC_INIT16			0xC000
 #define PCH_GBE_DMA_ALIGN		0
+#define PCH_GBE_DMA_PADDING		2
 #define PCH_GBE_WATCHDOG_PERIOD		(1 * HZ)	/* watchdog time */
 #define PCH_GBE_COPYBREAK_DEFAULT	256
 #define PCH_GBE_PCI_BAR			1
@@ -88,6 +89,12 @@
 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
 			       int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+	iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
 /**
  * pch_gbe_mac_read_mac_addr - Read MAC address
  * @hw:	            Pointer to the HW structure
@@ -519,7 +526,9 @@
 	struct pch_gbe_adapter *adapter;
 	adapter = container_of(work, struct pch_gbe_adapter, reset_task);
 
+	rtnl_lock();
 	pch_gbe_reinit_locked(adapter);
+	rtnl_unlock();
 }
 
 /**
@@ -528,14 +537,8 @@
  */
 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
-
-	rtnl_lock();
-	if (netif_running(netdev)) {
-		pch_gbe_down(adapter);
-		pch_gbe_up(adapter);
-	}
-	rtnl_unlock();
+	pch_gbe_down(adapter);
+	pch_gbe_up(adapter);
 }
 
 /**
@@ -1369,16 +1372,13 @@
 	struct pch_gbe_buffer *buffer_info;
 	struct pch_gbe_rx_desc *rx_desc;
 	u32 length;
-	unsigned char tmp_packet[ETH_HLEN];
 	unsigned int i;
 	unsigned int cleaned_count = 0;
 	bool cleaned = false;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *new_skb;
 	u8 dma_status;
 	u16 gbec_status;
 	u32 tcp_ip_status;
-	u8 skb_copy_flag = 0;
-	u8 skb_padding_flag = 0;
 
 	i = rx_ring->next_to_clean;
 
@@ -1422,55 +1422,70 @@
 			pr_err("Receive CRC Error\n");
 		} else {
 			/* get receive length */
-			/* length convert[-3], padding[-2] */
-			length = (rx_desc->rx_words_eob) - 3 - 2;
+			/* length convert[-3] */
+			length = (rx_desc->rx_words_eob) - 3;
 
 			/* Decide the data conversion method */
 			if (!adapter->rx_csum) {
 				/* [Header:14][payload] */
-				skb_padding_flag = 0;
-				skb_copy_flag = 1;
-			} else {
-				/* [Header:14][padding:2][payload] */
-				skb_padding_flag = 1;
-				if (length < copybreak)
-					skb_copy_flag = 1;
-				else
-					skb_copy_flag = 0;
-			}
-
-			/* Data conversion */
-			if (skb_copy_flag) {	/* recycle  skb */
-				struct sk_buff *new_skb;
-				new_skb =
-				    netdev_alloc_skb(netdev,
-						     length + NET_IP_ALIGN);
-				if (new_skb) {
-					if (!skb_padding_flag) {
-						skb_reserve(new_skb,
-								NET_IP_ALIGN);
+				if (NET_IP_ALIGN) {
+					/* Because alignment differs,
+					 * the new_skb is newly allocated,
+					 * and data is copied to new_skb.*/
+					new_skb = netdev_alloc_skb(netdev,
+							 length + NET_IP_ALIGN);
+					if (!new_skb) {
+						/* dorrop error */
+						pr_err("New skb allocation "
+							"Error\n");
+						goto dorrop;
 					}
+					skb_reserve(new_skb, NET_IP_ALIGN);
 					memcpy(new_skb->data, skb->data,
-						length);
-					/* save the skb
-					 * in buffer_info as good */
+					       length);
 					skb = new_skb;
-				} else if (!skb_padding_flag) {
-					/* dorrop error */
-					pr_err("New skb allocation Error\n");
-					goto dorrop;
+				} else {
+					/* DMA buffer is used as SKB as it is.*/
+					buffer_info->skb = NULL;
 				}
 			} else {
-				buffer_info->skb = NULL;
+				/* [Header:14][padding:2][payload] */
+				/* The length includes padding length */
+				length = length - PCH_GBE_DMA_PADDING;
+				if ((length < copybreak) ||
+				    (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
+					/* Because alignment differs,
+					 * the new_skb is newly allocated,
+					 * and data is copied to new_skb.
+					 * Padding data is deleted
+					 * at the time of a copy.*/
+					new_skb = netdev_alloc_skb(netdev,
+							 length + NET_IP_ALIGN);
+					if (!new_skb) {
+						/* dorrop error */
+						pr_err("New skb allocation "
+							"Error\n");
+						goto dorrop;
+					}
+					skb_reserve(new_skb, NET_IP_ALIGN);
+					memcpy(new_skb->data, skb->data,
+					       ETH_HLEN);
+					memcpy(&new_skb->data[ETH_HLEN],
+					       &skb->data[ETH_HLEN +
+					       PCH_GBE_DMA_PADDING],
+					       length - ETH_HLEN);
+					skb = new_skb;
+				} else {
+					/* Padding data is deleted
+					 * by moving header data.*/
+					memmove(&skb->data[PCH_GBE_DMA_PADDING],
+						&skb->data[0], ETH_HLEN);
+					skb_reserve(skb, NET_IP_ALIGN);
+					buffer_info->skb = NULL;
+				}
 			}
-			if (skb_padding_flag) {
-				memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
-				memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
-					ETH_HLEN);
-				skb_reserve(skb, NET_IP_ALIGN);
-
-			}
-
+			/* The length includes FCS length */
+			length = length - ETH_FCS_LEN;
 			/* update status of driver */
 			adapter->stats.rx_bytes += length;
 			adapter->stats.rx_packets++;
@@ -2247,7 +2262,7 @@
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
-	flush_scheduled_work();
+	cancel_work_sync(&adapter->reset_task);
 	unregister_netdev(netdev);
 
 	pch_gbe_hal_phy_hw_reset(&adapter->hw);
@@ -2322,6 +2337,7 @@
 	netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
 	pch_gbe_set_ethtool_ops(netdev);
 
+	pch_gbe_mac_load_mac_addr(&adapter->hw);
 	pch_gbe_mac_reset_hw(&adapter->hw);
 
 	/* setup the private structure */
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1f42f6a..d3cb772 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1488,12 +1488,10 @@
     
 	/* 
 	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
-	 * Early datasheets said to poll the reset bit, but now they say that
-	 * it "is not a reliable indicator and subsequently should be ignored."
-	 * We wait at least 10ms.
+	 * We wait at least 2ms.
 	 */
 
-	mdelay(10);
+	mdelay(2);
 
 	/*
 	 * Reset RBCR[01] back to zero as per magic incantation.
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda..530ab5a 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
 	PCMCIA_DEVICE_NULL,
 };
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2c15891..e953793 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@
 	PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
 	PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
 	PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
+	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
 	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
 	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
 	PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 78d70a6..a1b82c9 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -32,6 +32,7 @@
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <asm/uaccess.h>
 #include <asm/string.h>
 
@@ -542,7 +543,7 @@
 	data = ap->tpkt->data;
 	count = ap->tpkt->len;
 	fcs = ap->tfcs;
-	proto = (data[0] << 8) + data[1];
+	proto = get_unaligned_be16(data);
 
 	/*
 	 * LCP packets with code values between 1 (configure-reqest)
@@ -963,7 +964,7 @@
 	code = data[0];
 	if (code != CONFACK && code != CONFREQ)
 		return;
-	dlen = (data[2] << 8) + data[3];
+	dlen = get_unaligned_be16(data + 2);
 	if (len < dlen)
 		return;		/* packet got truncated or length is bogus */
 
@@ -997,15 +998,14 @@
 	while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
 		switch (data[0]) {
 		case LCP_MRU:
-			val = (data[2] << 8) + data[3];
+			val = get_unaligned_be16(data + 2);
 			if (inbound)
 				ap->mru = val;
 			else
 				ap->chan.mtu = val;
 			break;
 		case LCP_ASYNCMAP:
-			val = (data[2] << 24) + (data[3] << 16)
-				+ (data[4] << 8) + data[5];
+			val = get_unaligned_be32(data + 2);
 			if (inbound)
 				ap->raccm = val;
 			else
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 695bc83..4358330 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -41,6 +41,7 @@
 #include <linux/ppp-comp.h>
 
 #include <linux/zlib.h>
+#include <asm/unaligned.h>
 
 /*
  * State for a Deflate (de)compressor.
@@ -232,11 +233,9 @@
 	 */
 	wptr[0] = PPP_ADDRESS(rptr);
 	wptr[1] = PPP_CONTROL(rptr);
-	wptr[2] = PPP_COMP >> 8;
-	wptr[3] = PPP_COMP;
+	put_unaligned_be16(PPP_COMP, wptr + 2);
 	wptr += PPP_HDRLEN;
-	wptr[0] = state->seqno >> 8;
-	wptr[1] = state->seqno;
+	put_unaligned_be16(state->seqno, wptr);
 	wptr += DEFLATE_OVHD;
 	olen = PPP_HDRLEN + DEFLATE_OVHD;
 	state->strm.next_out = wptr;
@@ -451,7 +450,7 @@
 	}
 
 	/* Check the sequence number. */
-	seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1];
+	seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
 	if (seq != (state->seqno & 0xffff)) {
 		if (state->debug)
 			printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6456484..c7a6c44 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <net/slhc_vj.h>
 #include <asm/atomic.h>
 
@@ -210,7 +211,7 @@
 };
 
 /* Get the PPP protocol number from a skb */
-#define PPP_PROTO(skb)	(((skb)->data[0] << 8) + (skb)->data[1])
+#define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
 
 /* We limit the length of ppp->file.rq to this (arbitrary) value */
 #define PPP_MAX_RQLEN	32
@@ -964,8 +965,7 @@
 
 	pp = skb_push(skb, 2);
 	proto = npindex_to_proto[npi];
-	pp[0] = proto >> 8;
-	pp[1] = proto;
+	put_unaligned_be16(proto, pp);
 
 	netif_stop_queue(dev);
 	skb_queue_tail(&ppp->file.xq, skb);
@@ -1473,8 +1473,7 @@
 		q = skb_put(frag, flen + hdrlen);
 
 		/* make the MP header */
-		q[0] = PPP_MP >> 8;
-		q[1] = PPP_MP;
+		put_unaligned_be16(PPP_MP, q);
 		if (ppp->flags & SC_MP_XSHORTSEQ) {
 			q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
 			q[3] = ppp->nxseq;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 6d1a1b8..9a1849a 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,6 +55,7 @@
 #include <linux/ppp_defs.h>
 #include <linux/ppp-comp.h>
 #include <linux/scatterlist.h>
+#include <asm/unaligned.h>
 
 #include "ppp_mppe.h"
 
@@ -395,16 +396,14 @@
 	 */
 	obuf[0] = PPP_ADDRESS(ibuf);
 	obuf[1] = PPP_CONTROL(ibuf);
-	obuf[2] = PPP_COMP >> 8;	/* isize + MPPE_OVHD + 1 */
-	obuf[3] = PPP_COMP;	/* isize + MPPE_OVHD + 2 */
+	put_unaligned_be16(PPP_COMP, obuf + 2);
 	obuf += PPP_HDRLEN;
 
 	state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
 	if (state->debug >= 7)
 		printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
 		       state->ccount);
-	obuf[0] = state->ccount >> 8;
-	obuf[1] = state->ccount & 0xff;
+	put_unaligned_be16(state->ccount, obuf);
 
 	if (!state->stateful ||	/* stateless mode     */
 	    ((state->ccount & 0xff) == 0xff) ||	/* "flag" packet      */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4c95ec3..4e6b72f 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -45,6 +45,7 @@
 #include <linux/completion.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <asm/uaccess.h>
 
 #define PPP_VERSION	"2.4.2"
@@ -563,7 +564,7 @@
 	int islcp;
 
 	data  = skb->data;
-	proto = (data[0] << 8) + data[1];
+	proto = get_unaligned_be16(data);
 
 	/* LCP packets with codes between 1 (configure-request)
 	 * and 7 (code-reject) must be sent as though no options
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 9c2a02d..44e316f 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -34,8 +34,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 14
-#define QLCNIC_LINUX_VERSIONID  "5.0.14"
+#define _QLCNIC_LINUX_SUBVERSION 15
+#define QLCNIC_LINUX_VERSIONID  "5.0.15"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -289,6 +289,26 @@
 	u32	reserved[5];
 };
 
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION	0x3F1000
+#define QLCNIC_FW_IMAGE_REGION	0x74
+struct qlcnic_flt_header {
+	u16 version;
+	u16 len;
+	u16 checksum;
+	u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+	u8 region;
+	u8 reserved0;
+	u8 attrib;
+	u8 reserved1;
+	u32 size;
+	u32 start_addr;
+	u32 end_add;
+};
+
 /* Magic number to let user know flash is programmed */
 #define	QLCNIC_BDINFO_MAGIC 0x12345678
 
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 1e7af70..4c14510 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -672,7 +672,7 @@
 	if (data[1])
 		eth_test->flags |= ETH_TEST_FL_FAILED;
 
-	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+	if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
 		data[2] = qlcnic_irq_test(dev);
 		if (data[2])
 			eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9b9c7c3..a7f1d5b 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -627,12 +627,73 @@
 	return 0;
 }
 
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+				struct qlcnic_flt_entry *region_entry)
+{
+	struct qlcnic_flt_header flt_hdr;
+	struct qlcnic_flt_entry *flt_entry;
+	int i = 0, ret;
+	u32 entry_size;
+
+	memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+	ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+					 (u8 *)&flt_hdr,
+					 sizeof(struct qlcnic_flt_header));
+	if (ret) {
+		dev_warn(&adapter->pdev->dev,
+			 "error reading flash layout header\n");
+		return -EIO;
+	}
+
+	entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+	flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
+	if (flt_entry == NULL) {
+		dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+		return -EIO;
+	}
+
+	ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+					 sizeof(struct qlcnic_flt_header),
+					 (u8 *)flt_entry, entry_size);
+	if (ret) {
+		dev_warn(&adapter->pdev->dev,
+			 "error reading flash layout entries\n");
+		goto err_out;
+	}
+
+	while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+		if (flt_entry[i].region == region)
+			break;
+		i++;
+	}
+	if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+		dev_warn(&adapter->pdev->dev,
+			 "region=%x not found in %d regions\n", region, i);
+		ret = -EIO;
+		goto err_out;
+	}
+	memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+	vfree(flt_entry);
+	return ret;
+}
+
 int
 qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
 {
+	struct qlcnic_flt_entry fw_entry;
 	u32 ver = -1, min_ver;
+	int ret;
 
-	qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
+	ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
+	if (!ret)
+		/* 0-4:-signature,  4-8:-fw version */
+		qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+				     (int *)&ver);
+	else
+		qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+				     (int *)&ver);
 
 	ver = QLCNIC_DECODE_VERSION(ver);
 	min_ver = QLCNIC_MIN_FW_VERSION;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 11e3a46..37c04b4 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -31,15 +31,15 @@
 
 static struct workqueue_struct *qlcnic_wq;
 static int qlcnic_mac_learn;
-module_param(qlcnic_mac_learn, int, 0644);
+module_param(qlcnic_mac_learn, int, 0444);
 MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
 
 static int use_msi = 1;
-module_param(use_msi, int, 0644);
+module_param(use_msi, int, 0444);
 MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
 
 static int use_msi_x = 1;
-module_param(use_msi_x, int, 0644);
+module_param(use_msi_x, int, 0444);
 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
 
 static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
@@ -47,11 +47,11 @@
 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
 
 static int load_fw_file;
-module_param(load_fw_file, int, 0644);
+module_param(load_fw_file, int, 0444);
 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
 
 static int qlcnic_config_npars;
-module_param(qlcnic_config_npars, int, 0644);
+module_param(qlcnic_config_npars, int, 0444);
 MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
 
 static int __devinit qlcnic_probe(struct pci_dev *pdev,
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 27e6f6d..e3ebd90 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
 #include <asm/processor.h>
 
 #define DRV_NAME	"r6040"
-#define DRV_VERSION	"0.26"
-#define DRV_RELDATE	"30May2010"
+#define DRV_VERSION	"0.27"
+#define DRV_RELDATE	"23Feb2011"
 
 /* PHY CHIP Address */
 #define PHY1_ADDR	1	/* For MAC1 */
@@ -69,6 +69,8 @@
 
 /* MAC registers */
 #define MCR0		0x00	/* Control register 0 */
+#define  MCR0_PROMISC	0x0020	/* Promiscuous mode */
+#define  MCR0_HASH_EN	0x0100	/* Enable multicast hash table function */
 #define MCR1		0x04	/* Control register 1 */
 #define  MAC_RST	0x0001	/* Reset the MAC */
 #define MBCR		0x08	/* Bus control */
@@ -851,77 +853,92 @@
 {
 	struct r6040_private *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
-	u16 *adrp;
-	u16 reg;
 	unsigned long flags;
 	struct netdev_hw_addr *ha;
 	int i;
+	u16 *adrp;
+	u16 hash_table[4] = { 0 };
 
-	/* MAC Address */
+	spin_lock_irqsave(&lp->lock, flags);
+
+	/* Keep our MAC Address */
 	adrp = (u16 *)dev->dev_addr;
 	iowrite16(adrp[0], ioaddr + MID_0L);
 	iowrite16(adrp[1], ioaddr + MID_0M);
 	iowrite16(adrp[2], ioaddr + MID_0H);
 
-	/* Promiscous Mode */
-	spin_lock_irqsave(&lp->lock, flags);
-
 	/* Clear AMCP & PROM bits */
-	reg = ioread16(ioaddr) & ~0x0120;
-	if (dev->flags & IFF_PROMISC) {
-		reg |= 0x0020;
-		lp->mcr0 |= 0x0020;
-	}
-	/* Too many multicast addresses
-	 * accept all traffic */
-	else if ((netdev_mc_count(dev) > MCAST_MAX) ||
-		 (dev->flags & IFF_ALLMULTI))
-		reg |= 0x0020;
+	lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
 
-	iowrite16(reg, ioaddr);
-	spin_unlock_irqrestore(&lp->lock, flags);
+	/* Promiscuous mode */
+	if (dev->flags & IFF_PROMISC)
+		lp->mcr0 |= MCR0_PROMISC;
 
-	/* Build the hash table */
-	if (netdev_mc_count(dev) > MCAST_MAX) {
-		u16 hash_table[4];
-		u32 crc;
+	/* Enable multicast hash table function to
+	 * receive all multicast packets. */
+	else if (dev->flags & IFF_ALLMULTI) {
+		lp->mcr0 |= MCR0_HASH_EN;
+
+		for (i = 0; i < MCAST_MAX ; i++) {
+			iowrite16(0, ioaddr + MID_1L + 8 * i);
+			iowrite16(0, ioaddr + MID_1M + 8 * i);
+			iowrite16(0, ioaddr + MID_1H + 8 * i);
+		}
 
 		for (i = 0; i < 4; i++)
-			hash_table[i] = 0;
-
+			hash_table[i] = 0xffff;
+	}
+	/* Use internal multicast address registers if the number of
+	 * multicast addresses is not greater than MCAST_MAX. */
+	else if (netdev_mc_count(dev) <= MCAST_MAX) {
+		i = 0;
 		netdev_for_each_mc_addr(ha, dev) {
-			char *addrs = ha->addr;
-
-			if (!(*addrs & 1))
-				continue;
-
-			crc = ether_crc_le(6, addrs);
-			crc >>= 26;
-			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+			u16 *adrp = (u16 *) ha->addr;
+			iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+			iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+			iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+			i++;
 		}
-		/* Fill the MAC hash tables with their values */
+		while (i < MCAST_MAX) {
+			iowrite16(0, ioaddr + MID_1L + 8 * i);
+			iowrite16(0, ioaddr + MID_1M + 8 * i);
+			iowrite16(0, ioaddr + MID_1H + 8 * i);
+			i++;
+		}
+	}
+	/* Otherwise, Enable multicast hash table function. */
+	else {
+		u32 crc;
+
+		lp->mcr0 |= MCR0_HASH_EN;
+
+		for (i = 0; i < MCAST_MAX ; i++) {
+			iowrite16(0, ioaddr + MID_1L + 8 * i);
+			iowrite16(0, ioaddr + MID_1M + 8 * i);
+			iowrite16(0, ioaddr + MID_1H + 8 * i);
+		}
+
+		/* Build multicast hash table */
+		netdev_for_each_mc_addr(ha, dev) {
+			u8 *addrs = ha->addr;
+
+			crc = ether_crc(ETH_ALEN, addrs);
+			crc >>= 26;
+			hash_table[crc >> 4] |= 1 << (crc & 0xf);
+		}
+	}
+
+	iowrite16(lp->mcr0, ioaddr + MCR0);
+
+	/* Fill the MAC hash tables with their values */
+	if (lp->mcr0 && MCR0_HASH_EN) {
 		iowrite16(hash_table[0], ioaddr + MAR0);
 		iowrite16(hash_table[1], ioaddr + MAR1);
 		iowrite16(hash_table[2], ioaddr + MAR2);
 		iowrite16(hash_table[3], ioaddr + MAR3);
 	}
-	/* Multicast Address 1~4 case */
-	i = 0;
-	netdev_for_each_mc_addr(ha, dev) {
-		if (i >= MCAST_MAX)
-			break;
-		adrp = (u16 *) ha->addr;
-		iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
-		iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
-		iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
-		i++;
-	}
-	while (i < MCAST_MAX) {
-		iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
-		iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
-		iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
-		i++;
-	}
+
+	spin_unlock_irqrestore(&lp->lock, flags);
 }
 
 static void netdev_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 27a7c20..7ffdb80 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -25,6 +25,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
+#include <linux/pci-aspm.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -554,6 +555,8 @@
 	struct mii_if_info mii;
 	struct rtl8169_counters counters;
 	u32 saved_wolopts;
+
+	const struct firmware *fw;
 };
 
 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -615,8 +618,9 @@
 	}
 }
 
-static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	int i;
 
 	RTL_W8(ERIDR, cmd);
@@ -628,7 +632,7 @@
 			break;
 	}
 
-	ocp_write(ioaddr, 0x1, 0x30, 0x00000001);
+	ocp_write(tp, 0x1, 0x30, 0x00000001);
 }
 
 #define OOB_CMD_RESET		0x00
@@ -971,7 +975,8 @@
 		if (pm)
 			pm_request_resume(&tp->pci_dev->dev);
 		netif_carrier_on(dev);
-		netif_info(tp, ifup, dev, "link up\n");
+		if (net_ratelimit())
+			netif_info(tp, ifup, dev, "link up\n");
 	} else {
 		netif_carrier_off(dev);
 		netif_info(tp, ifdown, dev, "link down\n");
@@ -1632,42 +1637,163 @@
 {
 	__le32 *phytable = (__le32 *)fw->data;
 	struct net_device *dev = tp->dev;
-	size_t i;
+	size_t index, fw_size = fw->size / sizeof(*phytable);
+	u32 predata, count;
 
 	if (fw->size % sizeof(*phytable)) {
 		netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
 		return;
 	}
 
-	for (i = 0; i < fw->size / sizeof(*phytable); i++) {
-		u32 action = le32_to_cpu(phytable[i]);
+	for (index = 0; index < fw_size; index++) {
+		u32 action = le32_to_cpu(phytable[index]);
+		u32 regno = (action & 0x0fff0000) >> 16;
 
-		if (!action)
+		switch(action & 0xf0000000) {
+		case PHY_READ:
+		case PHY_DATA_OR:
+		case PHY_DATA_AND:
+		case PHY_READ_EFUSE:
+		case PHY_CLEAR_READCOUNT:
+		case PHY_WRITE:
+		case PHY_WRITE_PREVIOUS:
+		case PHY_DELAY_MS:
 			break;
 
-		if ((action & 0xf0000000) != PHY_WRITE) {
-			netif_err(tp, probe, dev,
-				  "unknown action 0x%08x\n", action);
+		case PHY_BJMPN:
+			if (regno > index) {
+				netif_err(tp, probe, tp->dev,
+					"Out of range of firmware\n");
+				return;
+			}
+			break;
+		case PHY_READCOUNT_EQ_SKIP:
+			if (index + 2 >= fw_size) {
+				netif_err(tp, probe, tp->dev,
+					"Out of range of firmware\n");
+				return;
+			}
+			break;
+		case PHY_COMP_EQ_SKIPN:
+		case PHY_COMP_NEQ_SKIPN:
+		case PHY_SKIPN:
+			if (index + 1 + regno >= fw_size) {
+				netif_err(tp, probe, tp->dev,
+					"Out of range of firmware\n");
+				return;
+			}
+			break;
+
+		case PHY_READ_MAC_BYTE:
+		case PHY_WRITE_MAC_BYTE:
+		case PHY_WRITE_ERI_WORD:
+		default:
+			netif_err(tp, probe, tp->dev,
+				  "Invalid action 0x%08x\n", action);
 			return;
 		}
 	}
 
-	while (i-- != 0) {
-		u32 action = le32_to_cpu(*phytable);
+	predata = 0;
+	count = 0;
+
+	for (index = 0; index < fw_size; ) {
+		u32 action = le32_to_cpu(phytable[index]);
 		u32 data = action & 0x0000ffff;
-		u32 reg = (action & 0x0fff0000) >> 16;
+		u32 regno = (action & 0x0fff0000) >> 16;
+
+		if (!action)
+			break;
 
 		switch(action & 0xf0000000) {
-		case PHY_WRITE:
-			rtl_writephy(tp, reg, data);
-			phytable++;
+		case PHY_READ:
+			predata = rtl_readphy(tp, regno);
+			count++;
+			index++;
 			break;
+		case PHY_DATA_OR:
+			predata |= data;
+			index++;
+			break;
+		case PHY_DATA_AND:
+			predata &= data;
+			index++;
+			break;
+		case PHY_BJMPN:
+			index -= regno;
+			break;
+		case PHY_READ_EFUSE:
+			predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+			index++;
+			break;
+		case PHY_CLEAR_READCOUNT:
+			count = 0;
+			index++;
+			break;
+		case PHY_WRITE:
+			rtl_writephy(tp, regno, data);
+			index++;
+			break;
+		case PHY_READCOUNT_EQ_SKIP:
+			if (count == data)
+				index += 2;
+			else
+				index += 1;
+			break;
+		case PHY_COMP_EQ_SKIPN:
+			if (predata == data)
+				index += regno;
+			index++;
+			break;
+		case PHY_COMP_NEQ_SKIPN:
+			if (predata != data)
+				index += regno;
+			index++;
+			break;
+		case PHY_WRITE_PREVIOUS:
+			rtl_writephy(tp, regno, predata);
+			index++;
+			break;
+		case PHY_SKIPN:
+			index += regno + 1;
+			break;
+		case PHY_DELAY_MS:
+			mdelay(data);
+			index++;
+			break;
+
+		case PHY_READ_MAC_BYTE:
+		case PHY_WRITE_MAC_BYTE:
+		case PHY_WRITE_ERI_WORD:
 		default:
 			BUG();
 		}
 	}
 }
 
+static void rtl_release_firmware(struct rtl8169_private *tp)
+{
+	release_firmware(tp->fw);
+	tp->fw = NULL;
+}
+
+static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
+{
+	const struct firmware **fw = &tp->fw;
+	int rc = !*fw;
+
+	if (rc) {
+		rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
+		if (rc < 0)
+			goto out;
+	}
+
+	/* TODO: release firmware once rtl_phy_write_fw signals failures. */
+	rtl_phy_write_fw(tp, *fw);
+out:
+	return rc;
+}
+
 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
 {
 	static const struct phy_reg phy_reg_init[] = {
@@ -2041,7 +2167,6 @@
 		{ 0x0d, 0xf880 }
 	};
 	void __iomem *ioaddr = tp->mmio_addr;
-	const struct firmware *fw;
 
 	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
 
@@ -2105,11 +2230,8 @@
 
 	rtl_writephy(tp, 0x1f, 0x0005);
 	rtl_writephy(tp, 0x05, 0x001b);
-	if (rtl_readphy(tp, 0x06) == 0xbf00 &&
-	    request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) {
-		rtl_phy_write_fw(tp, fw);
-		release_firmware(fw);
-	} else {
+	if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
+	    (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
 		netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
 	}
 
@@ -2159,7 +2281,6 @@
 		{ 0x0d, 0xf880 }
 	};
 	void __iomem *ioaddr = tp->mmio_addr;
-	const struct firmware *fw;
 
 	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
 
@@ -2214,11 +2335,8 @@
 
 	rtl_writephy(tp, 0x1f, 0x0005);
 	rtl_writephy(tp, 0x05, 0x001b);
-	if (rtl_readphy(tp, 0x06) == 0xb300 &&
-	    request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) {
-		rtl_phy_write_fw(tp, fw);
-		release_firmware(fw);
-	} else {
+	if ((rtl_readphy(tp, 0x06) != 0xb300) ||
+	    (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
 		netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
 	}
 
@@ -2752,8 +2870,11 @@
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 
-	if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+	if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+	     (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+	    (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
 		return;
+	}
 
 	if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
 	     (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2775,6 +2896,8 @@
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_25:
 	case RTL_GIGA_MAC_VER_26:
+	case RTL_GIGA_MAC_VER_27:
+	case RTL_GIGA_MAC_VER_28:
 		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
 		break;
 	}
@@ -2784,12 +2907,17 @@
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 
-	if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+	if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+	     (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+	    (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
 		return;
+	}
 
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_25:
 	case RTL_GIGA_MAC_VER_26:
+	case RTL_GIGA_MAC_VER_27:
+	case RTL_GIGA_MAC_VER_28:
 		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
 		break;
 	}
@@ -2893,6 +3021,11 @@
 	mii->reg_num_mask = 0x1f;
 	mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
 
+	/* disable ASPM completely as that cause random device stop working
+	 * problems as well as full system hangs for some PCIe devices users */
+	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+				     PCIE_LINK_STATE_CLKPM);
+
 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
 	rc = pci_enable_device(pdev);
 	if (rc < 0) {
@@ -2926,7 +3059,7 @@
 		goto err_out_mwi_2;
 	}
 
-	tp->cp_cmd = PCIMulRW | RxChkSum;
+	tp->cp_cmd = RxChkSum;
 
 	if ((sizeof(dma_addr_t) > 4) &&
 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3069,20 +3202,13 @@
 		rtl8168_driver_start(tp);
 	}
 
-	rtl8169_init_phy(dev, tp);
-
-	/*
-	 * Pretend we are using VLANs; This bypasses a nasty bug where
-	 * Interrupts stop flowing on high load on 8110SCd controllers.
-	 */
-	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
-
 	device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
 
 	if (pci_dev_run_wake(pdev))
 		pm_runtime_put_noidle(&pdev->dev);
 
+	netif_carrier_off(dev);
+
 out:
 	return rc;
 
@@ -3111,6 +3237,8 @@
 
 	cancel_delayed_work_sync(&tp->task);
 
+	rtl_release_firmware(tp);
+
 	unregister_netdev(dev);
 
 	if (pci_dev_run_wake(pdev))
@@ -3127,6 +3255,7 @@
 static int rtl8169_open(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
 	struct pci_dev *pdev = tp->pci_dev;
 	int retval = -ENOMEM;
 
@@ -3162,6 +3291,15 @@
 
 	napi_enable(&tp->napi);
 
+	rtl8169_init_phy(dev, tp);
+
+	/*
+	 * Pretend we are using VLANs; This bypasses a nasty bug where
+	 * Interrupts stop flowing on high load on 8110SCd controllers.
+	 */
+	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+
 	rtl_pll_power_up(tp);
 
 	rtl_hw_start(dev);
@@ -3171,7 +3309,7 @@
 	tp->saved_wolopts = 0;
 	pm_runtime_put_noidle(&pdev->dev);
 
-	rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+	rtl8169_check_link_status(dev, tp, ioaddr);
 out:
 	return retval;
 
@@ -3197,7 +3335,8 @@
 	/* Disable interrupts */
 	rtl8169_irq_mask_and_ack(ioaddr);
 
-	if (tp->mac_version == RTL_GIGA_MAC_VER_28) {
+	if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+	    tp->mac_version == RTL_GIGA_MAC_VER_28) {
 		while (RTL_R8(TxPoll) & NPQ)
 			udelay(20);
 
@@ -3639,7 +3778,8 @@
 	RTL_W16(IntrMitigate, 0x5151);
 
 	/* Work around for RxFIFO overflow. */
-	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+	if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+	    tp->mac_version == RTL_GIGA_MAC_VER_22) {
 		tp->intr_event |= RxFIFOOver | PCSTimeout;
 		tp->intr_event &= ~RxOverflow;
 	}
@@ -3725,8 +3865,7 @@
 	Cxpl_dbg_sel | \
 	ASF | \
 	PktCntrDisable | \
-	PCIDAC | \
-	PCIMulRW)
+	Mac_dbgo_sel)
 
 static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 {
@@ -3756,8 +3895,6 @@
 	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
 		RTL_W8(Config1, cfg1 & ~LEDS0);
 
-	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-
 	rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
 }
 
@@ -3769,8 +3906,6 @@
 
 	RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-
-	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
 }
 
 static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3796,6 +3931,8 @@
 		}
 	}
 
+	RTL_W8(Cfg9346, Cfg9346_Unlock);
+
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_07:
 		rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3810,14 +3947,13 @@
 		break;
 	}
 
-	RTL_W8(Cfg9346, Cfg9346_Unlock);
+	RTL_W8(Cfg9346, Cfg9346_Lock);
 
 	RTL_W8(MaxTxPacketSize, TxPacketMax);
 
 	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 
-	tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
-
+	tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
 	RTL_W16(CPlusCmd, tp->cp_cmd);
 
 	RTL_W16(IntrMitigate, 0x0000);
@@ -3827,14 +3963,10 @@
 	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
 	rtl_set_rx_tx_config_registers(tp);
 
-	RTL_W8(Cfg9346, Cfg9346_Lock);
-
 	RTL_R8(IntrMask);
 
 	rtl_set_rx_mode(dev);
 
-	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-
 	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
 
 	RTL_W16(IntrMask, tp->intr_event);
@@ -4521,12 +4653,33 @@
 			break;
 		}
 
-		/* Work around for rx fifo overflow */
-		if (unlikely(status & RxFIFOOver) &&
-		(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
-			netif_stop_queue(dev);
-			rtl8169_tx_timeout(dev);
-			break;
+		if (unlikely(status & RxFIFOOver)) {
+			switch (tp->mac_version) {
+			/* Work around for rx fifo overflow */
+			case RTL_GIGA_MAC_VER_11:
+			case RTL_GIGA_MAC_VER_22:
+			case RTL_GIGA_MAC_VER_26:
+				netif_stop_queue(dev);
+				rtl8169_tx_timeout(dev);
+				goto done;
+			/* Testers needed. */
+			case RTL_GIGA_MAC_VER_17:
+			case RTL_GIGA_MAC_VER_19:
+			case RTL_GIGA_MAC_VER_20:
+			case RTL_GIGA_MAC_VER_21:
+			case RTL_GIGA_MAC_VER_23:
+			case RTL_GIGA_MAC_VER_24:
+			case RTL_GIGA_MAC_VER_27:
+			case RTL_GIGA_MAC_VER_28:
+			/* Experimental science. Pktgen proof. */
+			case RTL_GIGA_MAC_VER_12:
+			case RTL_GIGA_MAC_VER_25:
+				if (status == RxFIFOOver)
+					goto done;
+				break;
+			default:
+				break;
+			}
 		}
 
 		if (unlikely(status & SYSErr)) {
@@ -4562,7 +4715,7 @@
 			(status & RxFIFOOver) ? (status | RxOverflow) : status);
 		status = RTL_R16(IntrStatus);
 	}
-
+done:
 	return IRQ_RETVAL(handled);
 }
 
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 711449c..002bac7 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1153,6 +1153,9 @@
 	int count;
 	int cpu;
 
+	if (rss_cpus)
+		return rss_cpus;
+
 	if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
 		printk(KERN_WARNING
 		       "sfc: RSS disabled due to allocation failure\n");
@@ -1266,27 +1269,18 @@
 	efx->legacy_irq = 0;
 }
 
-struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
-{
-	unsigned tx_channel_offset =
-		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
-	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
-			    type >= EFX_TXQ_TYPES);
-	return &efx->channel[tx_channel_offset + index]->tx_queue[type];
-}
-
 static void efx_set_channels(struct efx_nic *efx)
 {
 	struct efx_channel *channel;
 	struct efx_tx_queue *tx_queue;
-	unsigned tx_channel_offset =
+
+	efx->tx_channel_offset =
 		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
 
 	/* Channel pointers were set in efx_init_struct() but we now
 	 * need to clear them for TX queues in any RX-only channels. */
 	efx_for_each_channel(channel, efx) {
-		if (channel->channel - tx_channel_offset >=
+		if (channel->channel - efx->tx_channel_offset >=
 		    efx->n_tx_channels) {
 			efx_for_each_channel_tx_queue(tx_queue, channel)
 				tx_queue->channel = NULL;
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19..ca886d9 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -569,9 +569,14 @@
 				  struct ethtool_test *test, u64 *data)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
-	struct efx_self_tests efx_tests;
+	struct efx_self_tests *efx_tests;
 	int already_up;
-	int rc;
+	int rc = -ENOMEM;
+
+	efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+	if (!efx_tests)
+		goto fail;
+
 
 	ASSERT_RTNL();
 	if (efx->state != STATE_RUNNING) {
@@ -589,13 +594,11 @@
 		if (rc) {
 			netif_err(efx, drv, efx->net_dev,
 				  "failed opening device.\n");
-			goto fail2;
+			goto fail1;
 		}
 	}
 
-	memset(&efx_tests, 0, sizeof(efx_tests));
-
-	rc = efx_selftest(efx, &efx_tests, test->flags);
+	rc = efx_selftest(efx, efx_tests, test->flags);
 
 	if (!already_up)
 		dev_close(efx->net_dev);
@@ -604,10 +607,11 @@
 		   rc == 0 ? "passed" : "failed",
 		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
 
- fail2:
- fail1:
+fail1:
 	/* Fill ethtool results structures */
-	efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+	efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+	kfree(efx_tests);
+fail:
 	if (rc)
 		test->flags |= ETH_TEST_FL_FAILED;
 }
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 70e4f7d..61ddd2c 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1107,22 +1107,9 @@
 
 	/* Restore PCI configuration if needed */
 	if (method == RESET_TYPE_WORLD) {
-		if (efx_nic_is_dual_func(efx)) {
-			rc = pci_restore_state(nic_data->pci_dev2);
-			if (rc) {
-				netif_err(efx, drv, efx->net_dev,
-					  "failed to restore PCI config for "
-					  "the secondary function\n");
-				goto fail3;
-			}
-		}
-		rc = pci_restore_state(efx->pci_dev);
-		if (rc) {
-			netif_err(efx, drv, efx->net_dev,
-				  "failed to restore PCI config for the "
-				  "primary function\n");
-			goto fail4;
-		}
+		if (efx_nic_is_dual_func(efx))
+			pci_restore_state(nic_data->pci_dev2);
+		pci_restore_state(efx->pci_dev);
 		netif_dbg(efx, drv, efx->net_dev,
 			  "successfully restored PCI config\n");
 	}
@@ -1133,7 +1120,7 @@
 		rc = -ETIMEDOUT;
 		netif_err(efx, hw, efx->net_dev,
 			  "timed out waiting for hardware reset\n");
-		goto fail5;
+		goto fail3;
 	}
 	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
 
@@ -1141,11 +1128,9 @@
 
 	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
 fail2:
-fail3:
 	pci_restore_state(efx->pci_dev);
 fail1:
-fail4:
-fail5:
+fail3:
 	return rc;
 }
 
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index bdce66d..28df866 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -735,6 +735,7 @@
 	unsigned next_buffer_table;
 	unsigned n_channels;
 	unsigned n_rx_channels;
+	unsigned tx_channel_offset;
 	unsigned n_tx_channels;
 	unsigned int rx_buffer_len;
 	unsigned int rx_buffer_order;
@@ -929,8 +930,13 @@
 	     _channel = (_channel->channel + 1 < (_efx)->n_channels) ?	\
 		     (_efx)->channel[_channel->channel + 1] : NULL)
 
-extern struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+			    type >= EFX_TXQ_TYPES);
+	return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
+}
 
 static inline struct efx_tx_queue *
 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5818368..640e368 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -36,7 +36,7 @@
    Rev 1.07.06 Nov.  7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning
    Rev 1.07.05 Nov.  6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig
    Rev 1.07.04 Sep.  6 2000 Lei-Chun Chang added ICS1893 PHY support
-   Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E eqaulizer workaround rule
+   Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule
    Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1
    Rev 1.07    Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring
    Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4
@@ -1777,6 +1777,7 @@
 					      "cur_rx:%4.4d, dirty_rx:%4.4d\n",
 					      net_dev->name, sis_priv->cur_rx,
 					      sis_priv->dirty_rx);
+				dev_kfree_skb(skb);
 				break;
 			}
 
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 42daf98..35b28f4 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3856,9 +3856,6 @@
 	memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-	/* device is off until link detection */
-	netif_carrier_off(dev);
-
 	return dev;
 }
 
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 39996bf..7d85a38 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -46,10 +46,6 @@
 
 #include <asm/irq.h>
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define SKY2_VLAN_TAG_USED 1
-#endif
-
 #include "sky2.h"
 
 #define DRV_NAME		"sky2"
@@ -1326,39 +1322,34 @@
 	return err;
 }
 
-#ifdef SKY2_VLAN_TAG_USED
-static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
-{
-	if (onoff) {
-		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
-			     RX_VLAN_STRIP_ON);
-		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-			     TX_VLAN_TAG_ON);
-	} else {
-		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
-			     RX_VLAN_STRIP_OFF);
-		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-			     TX_VLAN_TAG_OFF);
-	}
-}
+#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
 
-static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+static void sky2_vlan_mode(struct net_device *dev)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_hw *hw = sky2->hw;
 	u16 port = sky2->port;
 
-	netif_tx_lock_bh(dev);
-	napi_disable(&hw->napi);
+	if (dev->features & NETIF_F_HW_VLAN_RX)
+		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+			     RX_VLAN_STRIP_ON);
+	else
+		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+			     RX_VLAN_STRIP_OFF);
 
-	sky2->vlgrp = grp;
-	sky2_set_vlan_mode(hw, port, grp != NULL);
+	dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
+	if (dev->features & NETIF_F_HW_VLAN_TX)
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_VLAN_TAG_ON);
+	else {
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_VLAN_TAG_OFF);
 
-	sky2_read32(hw, B0_Y2_SP_LISR);
-	napi_enable(&hw->napi);
-	netif_tx_unlock_bh(dev);
+		/* Can't do transmit offload of vlan without hw vlan */
+		dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
+					| NETIF_F_ALL_CSUM);
+	}
 }
-#endif
 
 /* Amount of required worst case padding in rx buffer */
 static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
@@ -1635,9 +1626,7 @@
 	sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
 			   sky2->tx_ring_size - 1);
 
-#ifdef SKY2_VLAN_TAG_USED
-	sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
-#endif
+	sky2_vlan_mode(sky2->netdev);
 
 	sky2_rx_start(sky2);
 }
@@ -1780,7 +1769,7 @@
 	}
 
 	ctrl = 0;
-#ifdef SKY2_VLAN_TAG_USED
+
 	/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
 	if (vlan_tx_tag_present(skb)) {
 		if (!le) {
@@ -1792,7 +1781,6 @@
 		le->length = cpu_to_be16(vlan_tx_tag_get(skb));
 		ctrl |= INS_VLAN;
 	}
-#endif
 
 	/* Handle TCP checksum offload */
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2432,11 +2420,8 @@
 	struct sk_buff *skb = NULL;
 	u16 count = (status & GMR_FS_LEN) >> 16;
 
-#ifdef SKY2_VLAN_TAG_USED
-	/* Account for vlan tag */
-	if (sky2->vlgrp && (status & GMR_FS_VLAN))
-		count -= VLAN_HLEN;
-#endif
+	if (status & GMR_FS_VLAN)
+		count -= VLAN_HLEN;	/* Account for vlan tag */
 
 	netif_printk(sky2, rx_status, KERN_DEBUG, dev,
 		     "rx slot %u status 0x%x len %d\n",
@@ -2504,17 +2489,9 @@
 static inline void sky2_skb_rx(const struct sky2_port *sky2,
 			       u32 status, struct sk_buff *skb)
 {
-#ifdef SKY2_VLAN_TAG_USED
-	u16 vlan_tag = be16_to_cpu(sky2->rx_tag);
-	if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
-		if (skb->ip_summed == CHECKSUM_NONE)
-			vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
-		else
-			vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
-					 vlan_tag, skb);
-		return;
-	}
-#endif
+	if (status & GMR_FS_VLAN)
+		__vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
+
 	if (skb->ip_summed == CHECKSUM_NONE)
 		netif_receive_skb(skb);
 	else
@@ -2631,7 +2608,6 @@
 				goto exit_loop;
 			break;
 
-#ifdef SKY2_VLAN_TAG_USED
 		case OP_RXVLAN:
 			sky2->rx_tag = length;
 			break;
@@ -2639,7 +2615,6 @@
 		case OP_RXCHKSVLAN:
 			sky2->rx_tag = length;
 			/* fall through */
-#endif
 		case OP_RXCHKS:
 			if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
 				sky2_rx_checksum(sky2, status);
@@ -3042,6 +3017,10 @@
 			| SKY2_HW_NEW_LE
 			| SKY2_HW_AUTO_TX_SUM
 			| SKY2_HW_ADV_POWER_CTL;
+
+		/* The workaround for status conflicts VLAN tag detection. */
+		if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
+			hw->flags |= SKY2_HW_VLAN_BROKEN;
 		break;
 
 	case CHIP_ID_YUKON_SUPR:
@@ -3411,18 +3390,15 @@
 		u32 modes = SUPPORTED_10baseT_Half
 			| SUPPORTED_10baseT_Full
 			| SUPPORTED_100baseT_Half
-			| SUPPORTED_100baseT_Full
-			| SUPPORTED_Autoneg | SUPPORTED_TP;
+			| SUPPORTED_100baseT_Full;
 
 		if (hw->flags & SKY2_HW_GIGABIT)
 			modes |= SUPPORTED_1000baseT_Half
 				| SUPPORTED_1000baseT_Full;
 		return modes;
 	} else
-		return  SUPPORTED_1000baseT_Half
-			| SUPPORTED_1000baseT_Full
-			| SUPPORTED_Autoneg
-			| SUPPORTED_FIBRE;
+		return SUPPORTED_1000baseT_Half
+			| SUPPORTED_1000baseT_Full;
 }
 
 static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -3436,9 +3412,11 @@
 	if (sky2_is_copper(hw)) {
 		ecmd->port = PORT_TP;
 		ecmd->speed = sky2->speed;
+		ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_TP;
 	} else {
 		ecmd->speed = SPEED_1000;
 		ecmd->port = PORT_FIBRE;
+		ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_FIBRE;
 	}
 
 	ecmd->advertising = sky2->advertising;
@@ -3455,8 +3433,19 @@
 	u32 supported = sky2_supported_modes(hw);
 
 	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		if (ecmd->advertising & ~supported)
+			return -EINVAL;
+
+		if (sky2_is_copper(hw))
+			sky2->advertising = ecmd->advertising |
+					    ADVERTISED_TP |
+					    ADVERTISED_Autoneg;
+		else
+			sky2->advertising = ecmd->advertising |
+					    ADVERTISED_FIBRE |
+					    ADVERTISED_Autoneg;
+
 		sky2->flags |= SKY2_FLAG_AUTO_SPEED;
-		ecmd->advertising = supported;
 		sky2->duplex = -1;
 		sky2->speed = -1;
 	} else {
@@ -3500,8 +3489,6 @@
 		sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
 	}
 
-	sky2->advertising = ecmd->advertising;
-
 	if (netif_running(dev)) {
 		sky2_phy_reinit(sky2);
 		sky2_set_multicast(dev);
@@ -4229,15 +4216,28 @@
 static int sky2_set_flags(struct net_device *dev, u32 data)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
-	u32 supported =
-		(sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH;
+	unsigned long old_feat = dev->features;
+	u32 supported = 0;
 	int rc;
 
+	if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
+		supported |= ETH_FLAG_RXHASH;
+
+	if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
+		supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
+
+	printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
+	       supported, data);
+
 	rc = ethtool_op_set_flags(dev, data, supported);
 	if (rc)
 		return rc;
 
-	rx_set_rss(dev);
+	if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
+		rx_set_rss(dev);
+
+	if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
+		sky2_vlan_mode(dev);
 
 	return 0;
 }
@@ -4273,6 +4273,7 @@
 	.get_sset_count = sky2_get_sset_count,
 	.get_ethtool_stats = sky2_get_ethtool_stats,
 	.set_flags	= sky2_set_flags,
+	.get_flags	= ethtool_op_get_flags,
 };
 
 #ifdef CONFIG_SKY2_DEBUG
@@ -4554,9 +4555,6 @@
 	.ndo_change_mtu		= sky2_change_mtu,
 	.ndo_tx_timeout		= sky2_tx_timeout,
 	.ndo_get_stats64	= sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= sky2_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= sky2_netpoll,
 #endif
@@ -4572,9 +4570,6 @@
 	.ndo_change_mtu		= sky2_change_mtu,
 	.ndo_tx_timeout		= sky2_tx_timeout,
 	.ndo_get_stats64	= sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= sky2_vlan_rx_register,
-#endif
   },
 };
 
@@ -4625,7 +4620,8 @@
 	sky2->port = port;
 
 	dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
-		| NETIF_F_TSO  | NETIF_F_GRO;
+		| NETIF_F_TSO | NETIF_F_GRO;
+
 	if (highmem)
 		dev->features |= NETIF_F_HIGHDMA;
 
@@ -4633,13 +4629,8 @@
 	if (!(hw->flags & SKY2_HW_RSS_BROKEN))
 		dev->features |= NETIF_F_RXHASH;
 
-#ifdef SKY2_VLAN_TAG_USED
-	/* The workaround for FE+ status conflicts with VLAN tag detection. */
-	if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
-	      sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
+	if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
 		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-	}
-#endif
 
 	/* read the mac address */
 	memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 80bdc40..6861b0e 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2236,11 +2236,8 @@
 	u16		     rx_pending;
 	u16		     rx_data_size;
 	u16		     rx_nfrags;
-
-#ifdef SKY2_VLAN_TAG_USED
 	u16		     rx_tag;
-	struct vlan_group    *vlgrp;
-#endif
+
 	struct {
 		unsigned long last;
 		u32	mac_rp;
@@ -2284,6 +2281,7 @@
 #define SKY2_HW_AUTO_TX_SUM	0x00000040	/* new IP decode for Tx */
 #define SKY2_HW_ADV_POWER_CTL	0x00000080	/* additional PHY power regs */
 #define SKY2_HW_RSS_BROKEN	0x00000100
+#define SKY2_HW_VLAN_BROKEN     0x00000200
 
 	u8	     	     chip_id;
 	u8		     chip_rev;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 64bfdae..d70bde9 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1178,6 +1178,11 @@
 	smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
 	smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
 
+	/* Increase the legal frame size of VLAN tagged frames to 1522 bytes */
+	spin_lock_irq(&pdata->mac_lock);
+	smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q);
+	spin_unlock_irq(&pdata->mac_lock);
+
 	/* Make sure EEPROM has finished loading before setting GPIO_CFG */
 	timeout = 50;
 	while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) &&
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3..0e5f031 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@
 
 	priv->hw = device;
 
-	if (device_can_wakeup(priv->device))
+	if (device_can_wakeup(priv->device)) {
 		priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+		enable_irq_wake(dev->irq);
+	}
 
 	return 0;
 }
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 296000b..3397618 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -12,7 +12,7 @@
 /*
  * RX HW/SW interaction overview
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of RX communication channels betwean driver and NIC.
+ * There are 2 types of RX communication channels between driver and NIC.
  * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
  * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
  * info about buffer's location, size and ID. An ID field is used to identify a
@@ -821,7 +821,7 @@
 		}
 
 		/* use PMF to accept first MAC_MCST_NUM (15) addresses */
-		/* TBD: sort addreses and write them in ascending order
+		/* TBD: sort addresses and write them in ascending order
 		 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
 		 * multicast frames throu IMF */
 		/* accept the rest of addresses throu IMF */
@@ -1346,7 +1346,7 @@
 /*
  * TX HW/SW interaction overview
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of TX communication channels betwean driver and NIC.
+ * There are 2 types of TX communication channels between driver and NIC.
  * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
  * 2) TX Data Fifo - TXD - holds descriptors of full buffers.
  *
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7841a8f..06c0e503 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -60,12 +60,6 @@
 #define BAR_0	0
 #define BAR_2	2
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define TG3_VLAN_TAG_USED 1
-#else
-#define TG3_VLAN_TAG_USED 0
-#endif
-
 #include "tg3.h"
 
 #define DRV_MODULE_NAME		"tg3"
@@ -134,9 +128,6 @@
 				 TG3_TX_RING_SIZE)
 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
 
-#define TG3_RX_DMA_ALIGN		16
-#define TG3_RX_HEADROOM			ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
-
 #define TG3_DMA_BYTE_ENAB		64
 
 #define TG3_RX_STD_DMA_SZ		1536
@@ -4722,8 +4713,6 @@
 		struct sk_buff *skb;
 		dma_addr_t dma_addr;
 		u32 opaque_key, desc_idx, *post_ptr;
-		bool hw_vlan __maybe_unused = false;
-		u16 vtag __maybe_unused = 0;
 
 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4782,12 +4771,12 @@
 			tg3_recycle_rx(tnapi, tpr, opaque_key,
 				       desc_idx, *post_ptr);
 
-			copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+			copy_skb = netdev_alloc_skb(tp->dev, len +
 						    TG3_RAW_IP_ALIGN);
 			if (copy_skb == NULL)
 				goto drop_it_no_recycle;
 
-			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
+			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
 			skb_put(copy_skb, len);
 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 			skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4814,30 +4803,11 @@
 		}
 
 		if (desc->type_flags & RXD_FLAG_VLAN &&
-		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
-			vtag = desc->err_vlan & RXD_VLAN_MASK;
-#if TG3_VLAN_TAG_USED
-			if (tp->vlgrp)
-				hw_vlan = true;
-			else
-#endif
-			{
-				struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
-						    __skb_push(skb, VLAN_HLEN);
+		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+			__vlan_hwaccel_put_tag(skb,
+					       desc->err_vlan & RXD_VLAN_MASK);
 
-				memmove(ve, skb->data + VLAN_HLEN,
-					ETH_ALEN * 2);
-				ve->h_vlan_proto = htons(ETH_P_8021Q);
-				ve->h_vlan_TCI = htons(vtag);
-			}
-		}
-
-#if TG3_VLAN_TAG_USED
-		if (hw_vlan)
-			vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
-		else
-#endif
-			napi_gro_receive(&tnapi->napi, skb);
+		napi_gro_receive(&tnapi->napi, skb);
 
 		received++;
 		budget--;
@@ -5740,11 +5710,9 @@
 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
 	}
 
-#if TG3_VLAN_TAG_USED
 	if (vlan_tx_tag_present(skb))
 		base_flags |= (TXD_FLAG_VLAN |
 			       (vlan_tx_tag_get(skb) << 16));
-#endif
 
 	len = skb_headlen(skb);
 
@@ -5986,11 +5954,10 @@
 			}
 		}
 	}
-#if TG3_VLAN_TAG_USED
+
 	if (vlan_tx_tag_present(skb))
 		base_flags |= (TXD_FLAG_VLAN |
 			       (vlan_tx_tag_get(skb) << 16));
-#endif
 
 	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -9532,17 +9499,10 @@
 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
 				  RX_MODE_KEEP_VLAN_TAG);
 
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
 	 * flag clear.
 	 */
-#if TG3_VLAN_TAG_USED
-	if (!tp->vlgrp &&
-	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
-		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
-#else
-	/* By definition, VLAN is disabled always in this
-	 * case.
-	 */
 	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
 #endif
@@ -11198,7 +11158,9 @@
 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 			break;			/* We have no PHY */
 
-		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+		     !netif_running(dev)))
 			return -EAGAIN;
 
 		spin_lock_bh(&tp->lock);
@@ -11214,7 +11176,9 @@
 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 			break;			/* We have no PHY */
 
-		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+		     !netif_running(dev)))
 			return -EAGAIN;
 
 		spin_lock_bh(&tp->lock);
@@ -11230,31 +11194,6 @@
 	return -EOPNOTSUPP;
 }
 
-#if TG3_VLAN_TAG_USED
-static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
-	struct tg3 *tp = netdev_priv(dev);
-
-	if (!netif_running(dev)) {
-		tp->vlgrp = grp;
-		return;
-	}
-
-	tg3_netif_stop(tp);
-
-	tg3_full_lock(tp, 0);
-
-	tp->vlgrp = grp;
-
-	/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
-	__tg3_set_rx_mode(dev);
-
-	tg3_netif_start(tp);
-
-	tg3_full_unlock(tp);
-}
-#endif
-
 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 {
 	struct tg3 *tp = netdev_priv(dev);
@@ -13066,9 +13005,7 @@
 
 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
 {
-#if TG3_VLAN_TAG_USED
 	dev->vlan_features |= flags;
-#endif
 }
 
 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13861,11 +13798,11 @@
 	else
 		tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
 
-	tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+	tp->rx_offset = NET_IP_ALIGN;
 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
-		tp->rx_offset -= NET_IP_ALIGN;
+		tp->rx_offset = 0;
 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 		tp->rx_copy_thresh = ~(u16)0;
 #endif
@@ -14629,9 +14566,6 @@
 	.ndo_do_ioctl		= tg3_ioctl,
 	.ndo_tx_timeout		= tg3_tx_timeout,
 	.ndo_change_mtu		= tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= tg3_poll_controller,
 #endif
@@ -14648,9 +14582,6 @@
 	.ndo_do_ioctl		= tg3_ioctl,
 	.ndo_tx_timeout		= tg3_tx_timeout,
 	.ndo_change_mtu		= tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= tg3_poll_controller,
 #endif
@@ -14700,9 +14631,7 @@
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-#if TG3_VLAN_TAG_USED
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
 
 	tp = netdev_priv(dev);
 	tp->pdev = pdev;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d62c8d9..f528243 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2808,9 +2808,6 @@
 	u32				rx_std_max_post;
 	u32				rx_offset;
 	u32				rx_pkt_map_sz;
-#if TG3_VLAN_TAG_USED
-	struct vlan_group		*vlgrp;
-#endif
 
 
 	/* begin "everything else" cacheline(s) section */
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0e6bac5..7cb301d 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -142,14 +142,6 @@
 MODULE_AUTHOR("Tilera");
 MODULE_LICENSE("GPL");
 
-
-#define IS_MULTICAST(mac_addr) \
-	(((u8 *)(mac_addr))[0] & 0x01)
-
-#define IS_BROADCAST(mac_addr) \
-	(((u16 *)(mac_addr))[0] == 0xffff)
-
-
 /*
  * Queue of incoming packets for a specific cpu and device.
  *
@@ -795,7 +787,7 @@
 		/*
 		 * FIXME: Implement HW multicast filter.
 		 */
-		if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
+		if (is_unicast_ether_addr(buf)) {
 			/* Filter packets not for our address. */
 			const u8 *mine = dev->dev_addr;
 			filter = compare_ether_addr(mine, buf);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7599c45..b100bd5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1309,7 +1309,7 @@
 		break;
 
 	case SIOCGIFHWADDR:
-		/* Get hw addres */
+		/* Get hw address */
 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
 		if (copy_to_user(argp, &ifr, ifreq_len))
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 73a3e0d..715e7b4 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2032,7 +2032,7 @@
 			netdev_for_each_mc_addr(ha, dev) {
 				/* Only support group multicast for now.
 				 */
-				if (!(ha->addr[0] & 1))
+				if (!is_multicast_ether_addr(ha->addr))
 					continue;
 
 				/* Ask CPM to run CRC and set bit in
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 593c104..7113168 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
 /*
  * cdc_ncm.c
  *
- * Copyright (C) ST-Ericsson 2010
+ * Copyright (C) ST-Ericsson 2010-2011
  * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
  * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
  *
@@ -54,7 +54,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc.h>
 
-#define	DRIVER_VERSION				"30-Nov-2010"
+#define	DRIVER_VERSION				"7-Feb-2011"
 
 /* CDC NCM subclass 3.2.1 */
 #define USB_CDC_NCM_NDP16_LENGTH_MIN		0x10
@@ -77,6 +77,9 @@
  */
 #define	CDC_NCM_DPT_DATAGRAMS_MAX		32
 
+/* Maximum amount of IN datagrams in NTB */
+#define	CDC_NCM_DPT_DATAGRAMS_IN_MAX		0 /* unlimited */
+
 /* Restart the timer, if amount of datagrams is less than given value */
 #define	CDC_NCM_RESTART_TIMER_DATAGRAM_CNT	3
 
@@ -85,11 +88,6 @@
 	(sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
 	(CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
 
-struct connection_speed_change {
-	__le32	USBitRate; /* holds 3GPP downlink value, bits per second */
-	__le32	DSBitRate; /* holds 3GPP uplink value, bits per second */
-} __attribute__ ((packed));
-
 struct cdc_ncm_data {
 	struct usb_cdc_ncm_nth16 nth16;
 	struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@
 {
 	struct usb_cdc_notification req;
 	u32 val;
-	__le16 max_datagram_size;
 	u8 flags;
 	u8 iface_no;
 	int err;
+	u16 ntb_fmt_supported;
 
 	iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
 
@@ -223,6 +221,9 @@
 	ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
 	ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
 	ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+	/* devices prior to NCM Errata shall set this field to zero */
+	ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+	ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
 
 	if (ctx->func_desc != NULL)
 		flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@
 
 	pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
 		 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
-		 "wNdpOutAlignment=%u flags=0x%x\n",
+		 "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
 		 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
-		 ctx->tx_ndp_modulus, flags);
+		 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
 
-	/* max count of tx datagrams without terminating NULL entry */
-	ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
+	/* max count of tx datagrams */
+	if ((ctx->tx_max_datagrams == 0) ||
+			(ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
+		ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
 
 	/* verify maximum size of received NTB in bytes */
-	if ((ctx->rx_max <
-	    (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
-	    (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
+	if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
+		pr_debug("Using min receive length=%d\n",
+						USB_CDC_NCM_NTB_MIN_IN_SIZE);
+		ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+	}
+
+	if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
 		pr_debug("Using default maximum receive length=%d\n",
 						CDC_NCM_NTB_MAX_SIZE_RX);
 		ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
 	}
 
+	/* inform device about NTB input size changes */
+	if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
+		req.wValue = 0;
+		req.wIndex = cpu_to_le16(iface_no);
+
+		if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
+			struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
+
+			req.wLength = 8;
+			ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+			ndp_in_sz.wNtbInMaxDatagrams =
+					cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
+			ndp_in_sz.wReserved = 0;
+			err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
+									1000);
+		} else {
+			__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
+			req.wLength = 4;
+			err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
+								NULL, 1000);
+		}
+
+		if (err)
+			pr_debug("Setting NTB Input Size failed\n");
+	}
+
 	/* verify maximum size of transmitted NTB in bytes */
 	if ((ctx->tx_max <
 	    (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@
 	/* additional configuration */
 
 	/* set CRC Mode */
-	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
-	req.bNotificationType = USB_CDC_SET_CRC_MODE;
-	req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
-	req.wIndex = cpu_to_le16(iface_no);
-	req.wLength = 0;
+	if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_SET_CRC_MODE;
+		req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
+		req.wIndex = cpu_to_le16(iface_no);
+		req.wLength = 0;
 
-	err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
-	if (err)
-		pr_debug("Setting CRC mode off failed\n");
+		err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+		if (err)
+			pr_debug("Setting CRC mode off failed\n");
+	}
 
-	/* set NTB format */
-	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
-	req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
-	req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
-	req.wIndex = cpu_to_le16(iface_no);
-	req.wLength = 0;
+	/* set NTB format, if both formats are supported */
+	if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
+		req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
+		req.wIndex = cpu_to_le16(iface_no);
+		req.wLength = 0;
 
-	err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
-	if (err)
-		pr_debug("Setting NTB format to 16-bit failed\n");
+		err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+		if (err)
+			pr_debug("Setting NTB format to 16-bit failed\n");
+	}
+
+	ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
 
 	/* set Max Datagram Size (MTU) */
-	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
-	req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
-	req.wValue = 0;
-	req.wIndex = cpu_to_le16(iface_no);
-	req.wLength = cpu_to_le16(2);
+	if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
+		__le16 max_datagram_size;
+		u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
 
-	err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
-	if (err) {
-		pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
-			 CDC_NCM_MIN_DATAGRAM_SIZE);
-		/* use default */
-		ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-	} else {
-		ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
+		req.wValue = 0;
+		req.wIndex = cpu_to_le16(iface_no);
+		req.wLength = cpu_to_le16(2);
 
-		if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
-			ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-		else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
-			ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
+		err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
+									1000);
+		if (err) {
+			pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
+						CDC_NCM_MIN_DATAGRAM_SIZE);
+		} else {
+			ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+			/* Check Eth descriptor value */
+			if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
+				if (ctx->max_datagram_size > eth_max_sz)
+					ctx->max_datagram_size = eth_max_sz;
+			} else {
+				if (ctx->max_datagram_size >
+						CDC_NCM_MAX_DATAGRAM_SIZE)
+					ctx->max_datagram_size =
+						CDC_NCM_MAX_DATAGRAM_SIZE;
+			}
+
+			if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+				ctx->max_datagram_size =
+					CDC_NCM_MIN_DATAGRAM_SIZE;
+
+			/* if value changed, update device */
+			req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+			req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
+			req.wValue = 0;
+			req.wIndex = cpu_to_le16(iface_no);
+			req.wLength = 2;
+			max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+
+			err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
+								0, NULL, 1000);
+			if (err)
+				pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
+		}
+
 	}
 
 	if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@
 
 			ctx->ether_desc =
 					(const struct usb_cdc_ether_desc *)buf;
-
 			dev->hard_mtu =
 				le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
 
-			if (dev->hard_mtu <
-			    (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
-				dev->hard_mtu =
-					CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
-
-			else if (dev->hard_mtu >
-				 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
-				dev->hard_mtu =
-					CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
+			if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
+				dev->hard_mtu =	CDC_NCM_MIN_DATAGRAM_SIZE;
+			else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
+				dev->hard_mtu =	CDC_NCM_MAX_DATAGRAM_SIZE;
 			break;
 
 		case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@
 	u32 offset;
 	u32 last_offset;
 	u16 n = 0;
-	u8 timeout = 0;
+	u8 ready2send = 0;
 
 	/* if there is a remaining skb, it gets priority */
 	if (skb != NULL)
 		swap(skb, ctx->tx_rem_skb);
 	else
-		timeout = 1;
+		ready2send = 1;
 
 	/*
 	 * +----------------+
@@ -682,9 +750,10 @@
 
 	for (; n < ctx->tx_max_datagrams; n++) {
 		/* check if end of transmit buffer is reached */
-		if (offset >= ctx->tx_max)
+		if (offset >= ctx->tx_max) {
+			ready2send = 1;
 			break;
-
+		}
 		/* compute maximum buffer size */
 		rem = ctx->tx_max - offset;
 
@@ -711,9 +780,7 @@
 				}
 				ctx->tx_rem_skb = skb;
 				skb = NULL;
-
-				/* loop one more time */
-				timeout = 1;
+				ready2send = 1;
 			}
 			break;
 		}
@@ -756,7 +823,7 @@
 		ctx->tx_curr_last_offset = last_offset;
 		goto exit_no_skb;
 
-	} else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
+	} else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
 		/* wait for more frames */
 		/* push variables */
 		ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@
 					cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
 	ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
 	ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
-	ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+	ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
 							ctx->tx_ndp_modulus);
 
 	memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@
 	rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
 					sizeof(struct usb_cdc_ncm_dpe16));
 	ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
-	ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
+	ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
 
-	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
+	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
 						&(ctx->tx_ncm.ndp16),
 						sizeof(ctx->tx_ncm.ndp16));
 
-	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
+	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
 					sizeof(ctx->tx_ncm.ndp16),
 					&(ctx->tx_ncm.dpe16),
 					(ctx->tx_curr_frame_num + 1) *
@@ -868,15 +935,19 @@
 	if (ctx->tx_timer_pending != 0) {
 		ctx->tx_timer_pending--;
 		restart = 1;
-	} else
+	} else {
 		restart = 0;
+	}
 
 	spin_unlock(&ctx->mtx);
 
-	if (restart)
+	if (restart) {
+		spin_lock(&ctx->mtx);
 		cdc_ncm_tx_timeout_start(ctx);
-	else if (ctx->netdev != NULL)
+		spin_unlock(&ctx->mtx);
+	} else if (ctx->netdev != NULL) {
 		usbnet_start_xmit(NULL, ctx->netdev);
+	}
 }
 
 static struct sk_buff *
@@ -900,7 +971,6 @@
 	skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
 	if (ctx->tx_curr_skb != NULL)
 		need_timer = 1;
-	spin_unlock(&ctx->mtx);
 
 	/* Start timer, if there is a remaining skb */
 	if (need_timer)
@@ -908,6 +978,8 @@
 
 	if (skb_out)
 		dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+
+	spin_unlock(&ctx->mtx);
 	return skb_out;
 
 error:
@@ -956,7 +1028,7 @@
 		goto error;
 	}
 
-	temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
+	temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
 	if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
 		pr_debug("invalid DPT16 index\n");
 		goto error;
@@ -1020,14 +1092,16 @@
 		if (((offset + temp) > actlen) ||
 		    (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
 			pr_debug("invalid frame detected (ignored)"
-				"offset[%u]=%u, length=%u, skb=%p\n",
-							x, offset, temp, skb);
+					"offset[%u]=%u, length=%u, skb=%p\n",
+					x, offset, temp, skb_in);
 			if (!x)
 				goto error;
 			break;
 
 		} else {
 			skb = skb_clone(skb_in, GFP_ATOMIC);
+			if (!skb)
+				goto error;
 			skb->len = temp;
 			skb->data = ((u8 *)skb_in->data) + offset;
 			skb_set_tail_pointer(skb, temp);
@@ -1041,10 +1115,10 @@
 
 static void
 cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
-		     struct connection_speed_change *data)
+		     struct usb_cdc_speed_change *data)
 {
-	uint32_t rx_speed = le32_to_cpu(data->USBitRate);
-	uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
+	uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
+	uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
 
 	/*
 	 * Currently the USB-NET API does not support reporting the actual
@@ -1085,7 +1159,7 @@
 	/* test for split data in 8-byte chunks */
 	if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
 		cdc_ncm_speed_change(ctx,
-		      (struct connection_speed_change *)urb->transfer_buffer);
+		      (struct usb_cdc_speed_change *)urb->transfer_buffer);
 		return;
 	}
 
@@ -1113,12 +1187,12 @@
 		break;
 
 	case USB_CDC_NOTIFY_SPEED_CHANGE:
-		if (urb->actual_length <
-		    (sizeof(*event) + sizeof(struct connection_speed_change)))
+		if (urb->actual_length < (sizeof(*event) +
+					sizeof(struct usb_cdc_speed_change)))
 			set_bit(EVENT_STS_SPLIT, &dev->flags);
 		else
 			cdc_ncm_speed_change(ctx,
-				(struct connection_speed_change *) &event[1]);
+				(struct usb_cdc_speed_change *) &event[1]);
 		break;
 
 	default:
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e..5002f5b 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@
 	.driver_info = (unsigned long)&dm9601_info,
 	 },
 	{
+	 USB_DEVICE(0x0fe6, 0x9700),	/* DM9601 USB to Fast Ethernet Adapter */
+	 .driver_info = (unsigned long)&dm9601_info,
+	 },
+	{
 	 USB_DEVICE(0x0a46, 0x9000),	/* DM9000E */
 	 .driver_info = (unsigned long)&dm9601_info,
 	 },
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fce..6d83812 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@
 
 static void hso_free_tiomget(struct hso_serial *serial)
 {
-	struct hso_tiocmget *tiocmget = serial->tiocmget;
+	struct hso_tiocmget *tiocmget;
+	if (!serial)
+		return;
+	tiocmget = serial->tiocmget;
 	if (tiocmget) {
-		if (tiocmget->urb) {
-			usb_free_urb(tiocmget->urb);
-			tiocmget->urb = NULL;
-		}
+		usb_free_urb(tiocmget->urb);
+		tiocmget->urb = NULL;
 		serial->tiocmget = NULL;
 		kfree(tiocmget);
-
 	}
 }
 
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 5e98643..7dc8497 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -406,6 +406,7 @@
 
 	if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
 		err("Firmware too big: %zu", fw->size);
+		release_firmware(fw);
 		return -ENOSPC;
 	}
 	data_len = fw->size;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a416..95c41d5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@
 		if (urb != NULL) {
 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
 			status = usb_autopm_get_interface(dev->intf);
-			if (status < 0)
+			if (status < 0) {
+				usb_free_urb(urb);
 				goto fail_lowmem;
+			}
 			if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
 				resched = 0;
 			usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index cab96ad..09cac70 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -898,7 +898,7 @@
 	set_mii_flow_control(vptr);
 
 	/*
-	   Check if new status is consisent with current status
+	   Check if new status is consistent with current status
 	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 	       (mii_status==curr_status)) {
 	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 90a23e4..82dba5a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@
 	}
 }
 
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+	napi_enable(&vi->napi);
+
+	/* If all buffers were filled by other side before we napi_enabled, we
+	 * won't get another interrupt, so process any outstanding packets
+	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
+	 * We synchronize against interrupts via NAPI_STATE_SCHED */
+	if (napi_schedule_prep(&vi->napi)) {
+		virtqueue_disable_cb(vi->rvq);
+		__napi_schedule(&vi->napi);
+	}
+}
+
 static void refill_work(struct work_struct *work)
 {
 	struct virtnet_info *vi;
@@ -454,7 +468,7 @@
 	vi = container_of(work, struct virtnet_info, refill.work);
 	napi_disable(&vi->napi);
 	still_empty = !try_fill_recv(vi, GFP_KERNEL);
-	napi_enable(&vi->napi);
+	virtnet_napi_enable(vi);
 
 	/* In theory, this can happen: if we don't get any buffers in
 	 * we will *never* try to fill again. */
@@ -638,16 +652,7 @@
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 
-	napi_enable(&vi->napi);
-
-	/* If all buffers were filled by other side before we napi_enabled, we
-	 * won't get another interrupt, so process any outstanding packets
-	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
-	 * We synchronize against interrupts via NAPI_STATE_SCHED */
-	if (napi_schedule_prep(&vi->napi)) {
-		virtqueue_disable_cb(vi->rvq);
-		__napi_schedule(&vi->napi);
-	}
+	virtnet_napi_enable(vi);
 	return 0;
 }
 
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d143e8b..cc14b4a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -48,6 +48,9 @@
 static int enable_mq = 1;
 static int irq_share_mode;
 
+static void
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
+
 /*
  *    Enable/Disable the given intr
  */
@@ -139,9 +142,13 @@
 {
 	u32 ret;
 	int i;
+	unsigned long flags;
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
 	adapter->link_speed = ret >> 16;
 	if (ret & 1) { /* Link is up. */
 		printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -183,8 +190,10 @@
 
 	/* Check if there is an error on xmit/recv queues */
 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+		spin_lock(&adapter->cmd_lock);
 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_GET_QUEUE_STATUS);
+		spin_unlock(&adapter->cmd_lock);
 
 		for (i = 0; i < adapter->num_tx_queues; i++)
 			if (adapter->tqd_start[i].status.stopped)
@@ -804,30 +813,25 @@
 				   skb_transport_header(skb))->doff * 4;
 		ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
 	} else {
-		unsigned int pull_size;
-
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 			ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
 
 			if (ctx->ipv4) {
 				struct iphdr *iph = (struct iphdr *)
 						    skb_network_header(skb);
-				if (iph->protocol == IPPROTO_TCP) {
-					pull_size = ctx->eth_ip_hdr_size +
-						    sizeof(struct tcphdr);
-
-					if (unlikely(!pskb_may_pull(skb,
-								pull_size))) {
-						goto err;
-					}
+				if (iph->protocol == IPPROTO_TCP)
 					ctx->l4_hdr_size = ((struct tcphdr *)
 					   skb_transport_header(skb))->doff * 4;
-				} else if (iph->protocol == IPPROTO_UDP) {
+				else if (iph->protocol == IPPROTO_UDP)
+					/*
+					 * Use tcp header size so that bytes to
+					 * be copied are more than required by
+					 * the device.
+					 */
 					ctx->l4_hdr_size =
-							sizeof(struct udphdr);
-				} else {
+							sizeof(struct tcphdr);
+				else
 					ctx->l4_hdr_size = 0;
-				}
 			} else {
 				/* for simplicity, don't copy L4 headers */
 				ctx->l4_hdr_size = 0;
@@ -1859,18 +1863,14 @@
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	struct Vmxnet3_DriverShared *shared = adapter->shared;
 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+	unsigned long flags;
 
 	if (grp) {
 		/* add vlan rx stripping. */
 		if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
 			int i;
-			struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
 			adapter->vlan_grp = grp;
 
-			/* update FEATURES to device */
-			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
-			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-					       VMXNET3_CMD_UPDATE_FEATURE);
 			/*
 			 *  Clear entire vfTable; then enable untagged pkts.
 			 *  Note: setting one entry in vfTable to non-zero turns
@@ -1880,8 +1880,10 @@
 				vfTable[i] = 0;
 
 			VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+			spin_lock_irqsave(&adapter->cmd_lock, flags);
 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 					       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+			spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 		} else {
 			printk(KERN_ERR "%s: vlan_rx_register when device has "
 			       "no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1900,13 +1902,10 @@
 				 */
 				vfTable[i] = 0;
 			}
+			spin_lock_irqsave(&adapter->cmd_lock, flags);
 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 					       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
-
-			/* update FEATURES to device */
-			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
-			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-					       VMXNET3_CMD_UPDATE_FEATURE);
+			spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 		}
 	}
 }
@@ -1939,10 +1938,13 @@
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+	unsigned long flags;
 
 	VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 }
 
 
@@ -1951,10 +1953,13 @@
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+	unsigned long flags;
 
 	VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 }
 
 
@@ -1985,6 +1990,7 @@
 vmxnet3_set_mc(struct net_device *netdev)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
 	struct Vmxnet3_RxFilterConf *rxConf =
 					&adapter->shared->devRead.rxFilterConf;
 	u8 *new_table = NULL;
@@ -2020,6 +2026,7 @@
 		rxConf->mfTablePA = 0;
 	}
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	if (new_mode != rxConf->rxMode) {
 		rxConf->rxMode = cpu_to_le32(new_mode);
 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2028,6 +2035,7 @@
 
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_MAC_FILTERS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	kfree(new_table);
 }
@@ -2080,10 +2088,8 @@
 		devRead->misc.uptFeatures |= UPT1_F_LRO;
 		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
 	}
-	if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
-	    adapter->vlan_grp) {
+	if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
 		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
-	}
 
 	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
 	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2174,8 @@
 	/* rx filter settings */
 	devRead->rxFilterConf.rxMode = 0;
 	vmxnet3_restore_vlan(adapter);
+	vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
+
 	/* the rest are already zeroed */
 }
 
@@ -2177,6 +2185,7 @@
 {
 	int err, i;
 	u32 ret;
+	unsigned long flags;
 
 	dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
 		" ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2215,11 @@
 			       adapter->shared_pa));
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
 			       adapter->shared_pa));
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_ACTIVATE_DEV);
 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	if (ret != 0) {
 		printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2266,10 @@
 void
 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
 {
+	unsigned long flags;
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 }
 
 
@@ -2263,12 +2277,15 @@
 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
 {
 	int i;
+	unsigned long flags;
 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
 		return 0;
 
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_QUIESCE_DEV);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 	vmxnet3_disable_all_intrs(adapter);
 
 	for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2443,7 @@
 	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
 	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
 	ring0_size = (ring0_size + sz - 1) / sz * sz;
-	ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
+	ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
 			   sz * sz);
 	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
 	comp_size = ring0_size + ring1_size;
@@ -2695,7 +2712,7 @@
 			break;
 		} else {
 			/* If fails to enable required number of MSI-x vectors
-			 * try enabling 3 of them. One each for rx, tx and event
+			 * try enabling minimum number of vectors required.
 			 */
 			vectors = vector_threshold;
 			printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2718,9 +2735,11 @@
 	u32 cfg;
 
 	/* intr settings */
+	spin_lock(&adapter->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_GET_CONF_INTR);
 	cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+	spin_unlock(&adapter->cmd_lock);
 	adapter->intr.type = cfg & 0x3;
 	adapter->intr.mask_mode = (cfg >> 2) & 0x3;
 
@@ -2755,7 +2774,7 @@
 		 */
 		if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
-			    || adapter->num_rx_queues != 2) {
+			    || adapter->num_rx_queues != 1) {
 				adapter->share_intr = VMXNET3_INTR_TXSHARE;
 				printk(KERN_ERR "Number of rx queues : 1\n");
 				adapter->num_rx_queues = 1;
@@ -2905,6 +2924,7 @@
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
 
+	spin_lock_init(&adapter->cmd_lock);
 	adapter->shared = pci_alloc_consistent(adapter->pdev,
 			  sizeof(struct Vmxnet3_DriverShared),
 			  &adapter->shared_pa);
@@ -3108,11 +3128,15 @@
 	u8 *arpreq;
 	struct in_device *in_dev;
 	struct in_ifaddr *ifa;
+	unsigned long flags;
 	int i = 0;
 
 	if (!netif_running(netdev))
 		return 0;
 
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_disable(&adapter->rx_queue[i].napi);
+
 	vmxnet3_disable_all_intrs(adapter);
 	vmxnet3_free_irqs(adapter);
 	vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3212,10 @@
 	adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
 								 pmConf));
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_PMCFG);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	pci_save_state(pdev);
 	pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3230,8 @@
 static int
 vmxnet3_resume(struct device *device)
 {
-	int err;
+	int err, i = 0;
+	unsigned long flags;
 	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3259,14 @@
 
 	pci_enable_wake(pdev, PCI_D0, 0);
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_PMCFG);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 	vmxnet3_alloc_intr_resources(adapter);
 	vmxnet3_request_irqs(adapter);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_enable(&adapter->rx_queue[i].napi);
 	vmxnet3_enable_all_intrs(adapter);
 
 	return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 8e17fc8..81254be 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@
 vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
 
 	if (adapter->rxcsum != val) {
 		adapter->rxcsum = val;
@@ -56,8 +57,10 @@
 				adapter->shared->devRead.misc.uptFeatures &=
 				~UPT1_F_RXCSUM;
 
+			spin_lock_irqsave(&adapter->cmd_lock, flags);
 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 					       VMXNET3_CMD_UPDATE_FEATURE);
+			spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 		}
 	}
 	return 0;
@@ -68,76 +71,78 @@
 static const struct vmxnet3_stat_desc
 vmxnet3_tq_dev_stats[] = {
 	/* description,         offset */
-	{ "TSO pkts tx",        offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
-	{ "TSO bytes tx",       offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
-	{ "ucast pkts tx",      offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
-	{ "ucast bytes tx",     offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
-	{ "mcast pkts tx",      offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
-	{ "mcast bytes tx",     offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
-	{ "bcast pkts tx",      offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
-	{ "bcast bytes tx",     offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
-	{ "pkts tx err",        offsetof(struct UPT1_TxStats, pktsTxError) },
-	{ "pkts tx discard",    offsetof(struct UPT1_TxStats, pktsTxDiscard) },
+	{ "Tx Queue#",        0 },
+	{ "  TSO pkts tx",	offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
+	{ "  TSO bytes tx",	offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
+	{ "  ucast pkts tx",	offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
+	{ "  ucast bytes tx",	offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
+	{ "  mcast pkts tx",	offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
+	{ "  mcast bytes tx",	offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
+	{ "  bcast pkts tx",	offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
+	{ "  bcast bytes tx",	offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
+	{ "  pkts tx err",	offsetof(struct UPT1_TxStats, pktsTxError) },
+	{ "  pkts tx discard",	offsetof(struct UPT1_TxStats, pktsTxDiscard) },
 };
 
 /* per tq stats maintained by the driver */
 static const struct vmxnet3_stat_desc
 vmxnet3_tq_driver_stats[] = {
 	/* description,         offset */
-	{"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
-					drop_total) },
-	{ "   too many frags",  offsetof(struct vmxnet3_tq_driver_stats,
-					drop_too_many_frags) },
-	{ "   giant hdr",       offsetof(struct vmxnet3_tq_driver_stats,
-					drop_oversized_hdr) },
-	{ "   hdr err",         offsetof(struct vmxnet3_tq_driver_stats,
-					drop_hdr_inspect_err) },
-	{ "   tso",             offsetof(struct vmxnet3_tq_driver_stats,
-					drop_tso) },
-	{ "ring full",          offsetof(struct vmxnet3_tq_driver_stats,
-					tx_ring_full) },
-	{ "pkts linearized",    offsetof(struct vmxnet3_tq_driver_stats,
-					linearized) },
-	{ "hdr cloned",         offsetof(struct vmxnet3_tq_driver_stats,
-					copy_skb_header) },
-	{ "giant hdr",          offsetof(struct vmxnet3_tq_driver_stats,
-					oversized_hdr) },
+	{"  drv dropped tx total",	offsetof(struct vmxnet3_tq_driver_stats,
+						 drop_total) },
+	{ "     too many frags", offsetof(struct vmxnet3_tq_driver_stats,
+					  drop_too_many_frags) },
+	{ "     giant hdr",	offsetof(struct vmxnet3_tq_driver_stats,
+					 drop_oversized_hdr) },
+	{ "     hdr err",	offsetof(struct vmxnet3_tq_driver_stats,
+					 drop_hdr_inspect_err) },
+	{ "     tso",		offsetof(struct vmxnet3_tq_driver_stats,
+					 drop_tso) },
+	{ "  ring full",	offsetof(struct vmxnet3_tq_driver_stats,
+					 tx_ring_full) },
+	{ "  pkts linearized",	offsetof(struct vmxnet3_tq_driver_stats,
+					 linearized) },
+	{ "  hdr cloned",	offsetof(struct vmxnet3_tq_driver_stats,
+					 copy_skb_header) },
+	{ "  giant hdr",	offsetof(struct vmxnet3_tq_driver_stats,
+					 oversized_hdr) },
 };
 
 /* per rq stats maintained by the device */
 static const struct vmxnet3_stat_desc
 vmxnet3_rq_dev_stats[] = {
-	{ "LRO pkts rx",        offsetof(struct UPT1_RxStats, LROPktsRxOK) },
-	{ "LRO byte rx",        offsetof(struct UPT1_RxStats, LROBytesRxOK) },
-	{ "ucast pkts rx",      offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
-	{ "ucast bytes rx",     offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
-	{ "mcast pkts rx",      offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
-	{ "mcast bytes rx",     offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
-	{ "bcast pkts rx",      offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
-	{ "bcast bytes rx",     offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
-	{ "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
-	{ "pkts rx err",        offsetof(struct UPT1_RxStats, pktsRxError) },
+	{ "Rx Queue#",        0 },
+	{ "  LRO pkts rx",	offsetof(struct UPT1_RxStats, LROPktsRxOK) },
+	{ "  LRO byte rx",	offsetof(struct UPT1_RxStats, LROBytesRxOK) },
+	{ "  ucast pkts rx",	offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
+	{ "  ucast bytes rx",	offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
+	{ "  mcast pkts rx",	offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
+	{ "  mcast bytes rx",	offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
+	{ "  bcast pkts rx",	offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
+	{ "  bcast bytes rx",	offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
+	{ "  pkts rx OOB",	offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
+	{ "  pkts rx err",	offsetof(struct UPT1_RxStats, pktsRxError) },
 };
 
 /* per rq stats maintained by the driver */
 static const struct vmxnet3_stat_desc
 vmxnet3_rq_driver_stats[] = {
 	/* description,         offset */
-	{ "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
-					   drop_total) },
-	{ "   err",            offsetof(struct vmxnet3_rq_driver_stats,
-					drop_err) },
-	{ "   fcs",            offsetof(struct vmxnet3_rq_driver_stats,
-					drop_fcs) },
-	{ "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
-					rx_buf_alloc_failure) },
+	{ "  drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
+					     drop_total) },
+	{ "     err",		offsetof(struct vmxnet3_rq_driver_stats,
+					 drop_err) },
+	{ "     fcs",		offsetof(struct vmxnet3_rq_driver_stats,
+					 drop_fcs) },
+	{ "  rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
+					  rx_buf_alloc_failure) },
 };
 
 /* gloabl stats maintained by the driver */
 static const struct vmxnet3_stat_desc
 vmxnet3_global_stats[] = {
 	/* description,         offset */
-	{ "tx timeout count",   offsetof(struct vmxnet3_adapter,
+	{ "tx timeout count",	offsetof(struct vmxnet3_adapter,
 					 tx_timeout_count) }
 };
 
@@ -151,12 +156,15 @@
 	struct UPT1_TxStats *devTxStats;
 	struct UPT1_RxStats *devRxStats;
 	struct net_device_stats *net_stats = &netdev->stats;
+	unsigned long flags;
 	int i;
 
 	adapter = netdev_priv(netdev);
 
 	/* Collect the dev stats into the shared area */
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	memset(net_stats, 0, sizeof(*net_stats));
 	for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -193,12 +201,15 @@
 static int
 vmxnet3_get_sset_count(struct net_device *netdev, int sset)
 {
+	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	switch (sset) {
 	case ETH_SS_STATS:
-		return ARRAY_SIZE(vmxnet3_tq_dev_stats) +
-			ARRAY_SIZE(vmxnet3_tq_driver_stats) +
-			ARRAY_SIZE(vmxnet3_rq_dev_stats) +
-			ARRAY_SIZE(vmxnet3_rq_driver_stats) +
+		return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
+			ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
+		       adapter->num_tx_queues +
+		       (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
+			ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
+		       adapter->num_rx_queues +
 			ARRAY_SIZE(vmxnet3_global_stats);
 	default:
 		return -EOPNOTSUPP;
@@ -206,10 +217,16 @@
 }
 
 
+/* Should be multiple of 4 */
+#define NUM_TX_REGS	8
+#define NUM_RX_REGS	12
+
 static int
 vmxnet3_get_regs_len(struct net_device *netdev)
 {
-	return 20 * sizeof(u32);
+	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
+		adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
 }
 
 
@@ -240,29 +257,37 @@
 static void
 vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
 {
+	 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	if (stringset == ETH_SS_STATS) {
-		int i;
+		int i, j;
+		for (j = 0; j < adapter->num_tx_queues; j++) {
+			for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
+				memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
+				       ETH_GSTRING_LEN);
+				buf += ETH_GSTRING_LEN;
+			}
+			for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
+			     i++) {
+				memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
+				       ETH_GSTRING_LEN);
+				buf += ETH_GSTRING_LEN;
+			}
+		}
 
-		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
-			memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
-			       ETH_GSTRING_LEN);
-			buf += ETH_GSTRING_LEN;
+		for (j = 0; j < adapter->num_rx_queues; j++) {
+			for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
+				memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
+				       ETH_GSTRING_LEN);
+				buf += ETH_GSTRING_LEN;
+			}
+			for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
+			     i++) {
+				memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
+				       ETH_GSTRING_LEN);
+				buf += ETH_GSTRING_LEN;
+			}
 		}
-		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) {
-			memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
-			       ETH_GSTRING_LEN);
-			buf += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
-			memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
-			       ETH_GSTRING_LEN);
-			buf += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) {
-			memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
-			       ETH_GSTRING_LEN);
-			buf += ETH_GSTRING_LEN;
-		}
+
 		for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
 			memcpy(buf, vmxnet3_global_stats[i].desc,
 				ETH_GSTRING_LEN);
@@ -277,6 +302,7 @@
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
 	u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
+	unsigned long flags;
 
 	if (data & ~ETH_FLAG_LRO)
 		return -EOPNOTSUPP;
@@ -292,8 +318,10 @@
 		else
 			adapter->shared->devRead.misc.uptFeatures &=
 							~UPT1_F_LRO;
+		spin_lock_irqsave(&adapter->cmd_lock, flags);
 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_UPDATE_FEATURE);
+		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 	}
 	return 0;
 }
@@ -303,30 +331,41 @@
 			  struct ethtool_stats *stats, u64  *buf)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
 	u8 *base;
 	int i;
 	int j = 0;
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	/* this does assume each counter is 64-bit wide */
-/* TODO change this for multiple queues */
+	for (j = 0; j < adapter->num_tx_queues; j++) {
+		base = (u8 *)&adapter->tqd_start[j].stats;
+		*buf++ = (u64)j;
+		for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
+			*buf++ = *(u64 *)(base +
+					  vmxnet3_tq_dev_stats[i].offset);
 
-	base = (u8 *)&adapter->tqd_start[j].stats;
-	for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
-		*buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
+		base = (u8 *)&adapter->tx_queue[j].stats;
+		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
+			*buf++ = *(u64 *)(base +
+					  vmxnet3_tq_driver_stats[i].offset);
+	}
 
-	base = (u8 *)&adapter->tx_queue[j].stats;
-	for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
-		*buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
+	for (j = 0; j < adapter->num_tx_queues; j++) {
+		base = (u8 *)&adapter->rqd_start[j].stats;
+		*buf++ = (u64) j;
+		for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
+			*buf++ = *(u64 *)(base +
+					  vmxnet3_rq_dev_stats[i].offset);
 
-	base = (u8 *)&adapter->rqd_start[j].stats;
-	for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
-		*buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
-
-	base = (u8 *)&adapter->rx_queue[j].stats;
-	for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
-		*buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
+		base = (u8 *)&adapter->rx_queue[j].stats;
+		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
+			*buf++ = *(u64 *)(base +
+					  vmxnet3_rq_driver_stats[i].offset);
+	}
 
 	base = (u8 *)adapter;
 	for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,7 +378,7 @@
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u32 *buf = p;
-	int i = 0;
+	int i = 0, j = 0;
 
 	memset(p, 0, vmxnet3_get_regs_len(netdev));
 
@@ -348,31 +387,35 @@
 	/* Update vmxnet3_get_regs_len if we want to dump more registers */
 
 	/* make each ring use multiple of 16 bytes */
-/* TODO change this for multiple queues */
-	buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
-	buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
-	buf[2] = adapter->tx_queue[i].tx_ring.gen;
-	buf[3] = 0;
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
+		buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
+		buf[j++] = adapter->tx_queue[i].tx_ring.gen;
+		buf[j++] = 0;
 
-	buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
-	buf[5] = adapter->tx_queue[i].comp_ring.gen;
-	buf[6] = adapter->tx_queue[i].stopped;
-	buf[7] = 0;
+		buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
+		buf[j++] = adapter->tx_queue[i].comp_ring.gen;
+		buf[j++] = adapter->tx_queue[i].stopped;
+		buf[j++] = 0;
+	}
 
-	buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
-	buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
-	buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
-	buf[11] = 0;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
+		buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
+		buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
+		buf[j++] = 0;
 
-	buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
-	buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
-	buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
-	buf[15] = 0;
+		buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
+		buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
+		buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
+		buf[j++] = 0;
 
-	buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
-	buf[17] = adapter->rx_queue[i].comp_ring.gen;
-	buf[18] = 0;
-	buf[19] = 0;
+		buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
+		buf[j++] = adapter->rx_queue[i].comp_ring.gen;
+		buf[j++] = 0;
+		buf[j++] = 0;
+	}
+
 }
 
 
@@ -574,6 +617,7 @@
 		      const struct ethtool_rxfh_indir *p)
 {
 	unsigned int i;
+	unsigned long flags;
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
 
@@ -592,8 +636,10 @@
 	for (i = 0; i < rssConf->indTableSize; i++)
 		rssConf->indTable[i] = p->ring_index[i];
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_RSSIDT);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	return 0;
 
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7fadeed..fb5d245 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.0.16.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.0.25.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01001000
+#define VMXNET3_DRIVER_VERSION_NUM      0x01001900
 
 #if defined(CONFIG_PCI_MSI)
 	/* RSS only makes sense if MSI-X is supported. */
@@ -289,7 +289,7 @@
 
 #define VMXNET3_LINUX_MAX_MSIX_VECT     (VMXNET3_DEVICE_MAX_TX_QUEUES + \
 					 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
-#define VMXNET3_LINUX_MIN_MSIX_VECT     3    /* 1 for each : tx, rx and event */
+#define VMXNET3_LINUX_MIN_MSIX_VECT     2 /* 1 for tx-rx pair and 1 for event */
 
 
 struct vmxnet3_intr {
@@ -317,6 +317,7 @@
 	struct vmxnet3_rx_queue		rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
 	struct vlan_group		*vlan_grp;
 	struct vmxnet3_intr		intr;
+	spinlock_t			cmd_lock;
 	struct Vmxnet3_DriverShared	*shared;
 	struct Vmxnet3_PMConf		*pm_conf;
 	struct Vmxnet3_TxQueueDesc	*tqd_start;     /* all tx queue desc */
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 01c05f5..228d4f7 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -3690,7 +3690,7 @@
 	if (status != VXGE_HW_OK)
 		goto exit;
 
-	if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+	if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
 	    (rts_table !=
 	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
 		*data1 = 0;
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 1ac9b56..c81a651 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4120,6 +4120,7 @@
 	       "hotplug event.\n");
 
 out:
+	release_firmware(fw);
 	return ret;
 }
 
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 8c3103f..d48486d 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1695,7 +1695,7 @@
  * struct vxge_hw_device_stats - Contains HW per-device statistics,
  * including hw.
  * @devh: HW device handle.
- * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
+ * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
  * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
  *                space.
  * @hw_info_dma_acch: One more DMA handle used subsequently to free the
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 34cff6c..4578e5b 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -125,7 +125,7 @@
 /* Module parameters */
 
 MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
-MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
 MODULE_LICENSE("GPL");
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug,"Enable/disable extra messages");
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index f060332..65bc334 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -232,7 +232,7 @@
 			result);
 		goto error;
 	}
-	/* Extract MAC addresss */
+	/* Extract MAC address */
 	ddi = (void *) skb->data;
 	BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
 	d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 17ecaa4..030cbfd 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -186,7 +186,7 @@
  * struct i2400m_poke_table - Hardware poke table for the Intel 2400m
  *
  * This structure will be used to create a device specific poke table
- * to put the device in a consistant state at boot time.
+ * to put the device in a consistent state at boot time.
  *
  * @address: The device address to poke
  *
@@ -703,7 +703,7 @@
  * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
  *     rom after reading the MAC address. This is quite a dirty hack,
  *     if you ask me -- the device requires the bootrom to be
- *     intialized after reading the MAC address.
+ *     initialized after reading the MAC address.
  */
 enum i2400m_bri {
 	I2400M_BRI_SOFT       = 1 << 1,
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 019a74d..09ae4ef 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2294,6 +2294,8 @@
 	int i;
 	bool needreset = false;
 
+	mutex_lock(&sc->lock);
+
 	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
 		if (sc->txqs[i].setup) {
 			txq = &sc->txqs[i];
@@ -2321,6 +2323,8 @@
 		ath5k_reset(sc, NULL, true);
 	}
 
+	mutex_unlock(&sc->lock);
+
 	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
 		msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
 }
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 0064be7..21091c2 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -838,9 +838,9 @@
 	for (i = 0; i < qmax; i++) {
 		err = ath5k_hw_stop_tx_dma(ah, i);
 		/* -EINVAL -> queue inactive */
-		if (err != -EINVAL)
+		if (err && err != -EINVAL)
 			return err;
 	}
 
-	return err;
+	return 0;
 }
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index e5f2b96..a702817 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -86,7 +86,7 @@
 	if (!ah->ah_bwmode) {
 		dur = ieee80211_generic_frame_duration(sc->hw,
 						NULL, len, rate);
-		return dur;
+		return le16_to_cpu(dur);
 	}
 
 	bitrate = rate->bitrate;
@@ -265,8 +265,6 @@
 		 * what rate we should choose to TX ACKs. */
 		tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
 
-		tx_time = le16_to_cpu(tx_time);
-
 		ath5k_hw_reg_write(ah, tx_time, reg);
 
 		if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 78c26fd..62ce2f4 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -282,6 +282,34 @@
 	return 0;
 }
 
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+			struct ieee80211_channel *channel)
+{
+	/*
+	 * On 5211+ read activation -> rx delay
+	 * and use it (100ns steps).
+	 */
+	if (ah->ah_version != AR5K_AR5210) {
+		u32 delay;
+		delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+			AR5K_PHY_RX_DELAY_M;
+		delay = (channel->hw_value & CHANNEL_CCK) ?
+			((delay << 2) / 22) : (delay / 10);
+		if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+			delay = delay << 1;
+		if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+			delay = delay << 2;
+		/* XXX: /2 on turbo ? Let's be safe
+		 * for now */
+		udelay(100 + delay);
+	} else {
+		mdelay(1);
+	}
+}
+
 
 /**********************\
 * RF Gain optimization *
@@ -1253,6 +1281,7 @@
 	case AR5K_RF5111:
 		ret = ath5k_hw_rf5111_channel(ah, channel);
 		break;
+	case AR5K_RF2317:
 	case AR5K_RF2425:
 		ret = ath5k_hw_rf2425_channel(ah, channel);
 		break;
@@ -3237,6 +3266,13 @@
 		/* Failed */
 		if (i >= 100)
 			return -EIO;
+
+		/* Set channel and wait for synth */
+		ret = ath5k_hw_channel(ah, channel);
+		if (ret)
+			return ret;
+
+		ath5k_hw_wait_for_synth(ah, channel);
 	}
 
 	/*
@@ -3251,13 +3287,53 @@
 	if (ret)
 		return ret;
 
+	/* Write OFDM timings on 5212*/
+	if (ah->ah_version == AR5K_AR5212 &&
+		channel->hw_value & CHANNEL_OFDM) {
+
+		ret = ath5k_hw_write_ofdm_timings(ah, channel);
+		if (ret)
+			return ret;
+
+		/* Spur info is available only from EEPROM versions
+		 * greater than 5.3, but the EEPROM routines will use
+		 * static values for older versions */
+		if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+			ath5k_hw_set_spur_mitigation_filter(ah,
+							    channel);
+	}
+
+	/* If we used fast channel switching
+	 * we are done, release RF bus and
+	 * fire up NF calibration.
+	 *
+	 * Note: Only NF calibration due to
+	 * channel change, not AGC calibration
+	 * since AGC is still running !
+	 */
+	if (fast) {
+		/*
+		 * Release RF Bus grant
+		 */
+		AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+				    AR5K_PHY_RFBUS_REQ_REQUEST);
+
+		/*
+		 * Start NF calibration
+		 */
+		AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+					AR5K_PHY_AGCCTL_NF);
+
+		return ret;
+	}
+
 	/*
 	 * For 5210 we do all initialization using
 	 * initvals, so we don't have to modify
 	 * any settings (5210 also only supports
 	 * a/aturbo modes)
 	 */
-	if ((ah->ah_version != AR5K_AR5210) && !fast) {
+	if (ah->ah_version != AR5K_AR5210) {
 
 		/*
 		 * Write initial RF gain settings
@@ -3276,22 +3352,6 @@
 		if (ret)
 			return ret;
 
-		/* Write OFDM timings on 5212*/
-		if (ah->ah_version == AR5K_AR5212 &&
-			channel->hw_value & CHANNEL_OFDM) {
-
-			ret = ath5k_hw_write_ofdm_timings(ah, channel);
-			if (ret)
-				return ret;
-
-			/* Spur info is available only from EEPROM versions
-			 * greater than 5.3, but the EEPROM routines will use
-			 * static values for older versions */
-			if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
-				ath5k_hw_set_spur_mitigation_filter(ah,
-								    channel);
-		}
-
 		/*Enable/disable 802.11b mode on 5111
 		(enable 2111 frequency converter + CCK)*/
 		if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3382,20 @@
 	 */
 	ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
 
-	/*
-	 * On 5211+ read activation -> rx delay
-	 * and use it.
-	 */
-	if (ah->ah_version != AR5K_AR5210) {
-		u32 delay;
-		delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
-			AR5K_PHY_RX_DELAY_M;
-		delay = (channel->hw_value & CHANNEL_CCK) ?
-			((delay << 2) / 22) : (delay / 10);
-		if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
-			delay = delay << 1;
-		if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
-			delay = delay << 2;
-		/* XXX: /2 on turbo ? Let's be safe
-		 * for now */
-		udelay(100 + delay);
-	} else {
-		mdelay(1);
-	}
+	ath5k_hw_wait_for_synth(ah, channel);
 
-	if (fast)
-		/*
-		 * Release RF Bus grant
-		 */
-		AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
-				    AR5K_PHY_RFBUS_REQ_REQUEST);
-	else {
-		/*
-		 * Perform ADC test to see if baseband is ready
-		 * Set tx hold and check adc test register
-		 */
-		phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
-		ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
-		for (i = 0; i <= 20; i++) {
-			if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
-				break;
-			udelay(200);
-		}
-		ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+	/*
+	 * Perform ADC test to see if baseband is ready
+	 * Set tx hold and check adc test register
+	 */
+	phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+	ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+	for (i = 0; i <= 20; i++) {
+		if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+			break;
+		udelay(200);
 	}
+	ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
 
 	/*
 	 * Start automatic gain control calibration
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 7ad05d4..fd14b91 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -1064,7 +1064,7 @@
 /*
  * EEPROM command register
  */
-#define AR5K_EEPROM_CMD		0x6008			/* Register Addres */
+#define AR5K_EEPROM_CMD		0x6008			/* Register Address */
 #define AR5K_EEPROM_CMD_READ	0x00000001	/* EEPROM read */
 #define AR5K_EEPROM_CMD_WRITE	0x00000002	/* EEPROM write */
 #define AR5K_EEPROM_CMD_RESET	0x00000004	/* EEPROM reset */
@@ -1084,7 +1084,7 @@
 /*
  * EEPROM config register
  */
-#define AR5K_EEPROM_CFG			0x6010			/* Register Addres */
+#define AR5K_EEPROM_CFG			0x6010			/* Register Address */
 #define AR5K_EEPROM_CFG_SIZE		0x00000003		/* Size determination override */
 #define AR5K_EEPROM_CFG_SIZE_AUTO	0
 #define AR5K_EEPROM_CFG_SIZE_4KBIT	1
@@ -1126,7 +1126,7 @@
  * Second station id register (Upper 16 bits of MAC address + PCU settings)
  */
 #define AR5K_STA_ID1			0x8004			/* Register Address */
-#define	AR5K_STA_ID1_ADDR_U16		0x0000ffff	/* Upper 16 bits of MAC addres */
+#define	AR5K_STA_ID1_ADDR_U16		0x0000ffff	/* Upper 16 bits of MAC address */
 #define AR5K_STA_ID1_AP			0x00010000	/* Set AP mode */
 #define AR5K_STA_ID1_ADHOC		0x00020000	/* Set Ad-Hoc mode */
 #define AR5K_STA_ID1_PWR_SV		0x00040000	/* Power save reporting */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 01880aa..5e300bd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -679,10 +679,6 @@
 
 	/* Do NF cal only at longer intervals */
 	if (longcal || nfcal_pending) {
-		/* Do periodic PAOffset Cal */
-		ar9002_hw_pa_cal(ah, false);
-		ar9002_hw_olc_temp_compensation(ah);
-
 		/*
 		 * Get the value from the previous NF cal and update
 		 * history buffer.
@@ -697,8 +693,12 @@
 			ath9k_hw_loadnf(ah, ah->curchan);
 		}
 
-		if (longcal)
+		if (longcal) {
 			ath9k_hw_start_nfcal(ah, false);
+			/* Do periodic PAOffset Cal */
+			ar9002_hw_pa_cal(ah, false);
+			ar9002_hw_olc_temp_compensation(ah);
+		}
 	}
 
 	return iscaldone;
@@ -954,6 +954,9 @@
 				&adc_dc_cal_multi_sample;
 		}
 		ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+
+		if (AR_SREV_9287(ah))
+			ah->supp_cals &= ~ADC_GAIN_CAL;
 	}
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index f8a7771..f44c84a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -426,9 +426,8 @@
 		}
 
 		/* WAR for ASPM system hang */
-		if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
+		if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
 			val |= (AR_WA_BIT6 | AR_WA_BIT7);
-		}
 
 		if (AR_SREV_9285E_20(ah))
 			val |= AR_WA_BIT23;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 81f9cf2..9ecca93 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1842,7 +1842,7 @@
 
 static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
 	/* Addr      allmodes  */
-	{0x00004040, 0x08212e5e},
+	{0x00004040, 0x0821265e},
 	{0x00004040, 0x0008003b},
 	{0x00004044, 0x00000000},
 };
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 6137634..06fb2c8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -146,8 +146,8 @@
 		/* Sleep Setting */
 
 		INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-				ar9300PciePhy_clkreq_enable_L1_2p2,
-				ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2),
+				ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
+				ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
 				2);
 
 		/* Fast clock modal settings */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3681caf5..1a7fa6e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,7 +21,6 @@
 #include <linux/device.h>
 #include <linux/leds.h>
 #include <linux/completion.h>
-#include <linux/pm_qos_params.h>
 
 #include "debug.h"
 #include "common.h"
@@ -57,8 +56,6 @@
 
 #define A_MAX(a, b) ((a) > (b) ? (a) : (b))
 
-#define ATH9K_PM_QOS_DEFAULT_VALUE	55
-
 #define TSF_TO_TU(_h,_l) \
 	((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
 
@@ -218,6 +215,7 @@
 struct ath_buf_state {
 	u8 bf_type;
 	u8 bfs_paprd;
+	unsigned long bfs_paprd_timestamp;
 	enum ath9k_internal_frame_type bfs_ftype;
 };
 
@@ -593,7 +591,6 @@
 	struct work_struct paprd_work;
 	struct work_struct hw_check_work;
 	struct completion paprd_complete;
-	bool paprd_pending;
 
 	u32 intrstatus;
 	u32 sc_flags; /* SC_OP_* */
@@ -633,8 +630,6 @@
 	struct ath_descdma txsdma;
 
 	struct ath_ant_comb ant_comb;
-
-	struct pm_qos_request_list pm_qos_req;
 };
 
 struct ath_wiphy {
@@ -666,7 +661,6 @@
 extern struct ieee80211_ops ath9k_ops;
 extern int ath9k_modparam_nohwcrypt;
 extern int led_blink;
-extern int ath9k_pm_qos_value;
 extern bool is_ath9k_unloaded;
 
 irqreturn_t ath_isr(int irq, void *dev);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 088f141..749a936 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -226,6 +226,10 @@
 	    eep->baseEepHeader.pwdclkind == 0)
 		ah->need_an_top2_fixup = 1;
 
+	if ((common->bus_ops->ath_bus_type == ATH_USB) &&
+	    (AR_SREV_9280(ah)))
+		eep->modalHeader[0].xpaBiasLvl = 0;
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5ab3084..07b1633 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -219,8 +219,9 @@
 	struct tx_buf *tx_buf = NULL;
 	struct sk_buff *nskb = NULL;
 	int ret = 0, i;
-	u16 *hdr, tx_skb_cnt = 0;
+	u16 tx_skb_cnt = 0;
 	u8 *buf;
+	__le16 *hdr;
 
 	if (hif_dev->tx.tx_skb_cnt == 0)
 		return 0;
@@ -245,9 +246,9 @@
 
 		buf = tx_buf->buf;
 		buf += tx_buf->offset;
-		hdr = (u16 *)buf;
-		*hdr++ = nskb->len;
-		*hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+		hdr = (__le16 *)buf;
+		*hdr++ = cpu_to_le16(nskb->len);
+		*hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
 		buf += 4;
 		memcpy(buf, nskb->data, nskb->len);
 		tx_buf->len = nskb->len + 4;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index a099b3e..780ac5e 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -78,7 +78,7 @@
 	u8 node_idx;
 	u8 vif_idx;
 	u8 tidno;
-	u32 flags; /* ATH9K_HTC_TX_* */
+	__be32 flags; /* ATH9K_HTC_TX_* */
 	u8 key_type;
 	u8 keyix;
 	u8 reserved[26];
@@ -433,6 +433,7 @@
 void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
 			enum htc_endpoint_id ep_id, bool txok);
 
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
 void ath9k_htc_station_work(struct work_struct *work);
 void ath9k_htc_aggr_work(struct work_struct *work);
 void ath9k_ani_work(struct work_struct *work);;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 38433f9..0352f09 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -142,9 +142,6 @@
 {
 	ath9k_htc_exit_debug(priv->ah);
 	ath9k_hw_deinit(priv->ah);
-	tasklet_kill(&priv->swba_tasklet);
-	tasklet_kill(&priv->rx_tasklet);
-	tasklet_kill(&priv->tx_tasklet);
 	kfree(priv->ah);
 	priv->ah = NULL;
 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 845b4c9..6bb5995 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -301,6 +301,16 @@
 
 	priv->nstations++;
 
+	/*
+	 * Set chainmask etc. on the target.
+	 */
+	ret = ath9k_htc_update_cap_target(priv);
+	if (ret)
+		ath_dbg(common, ATH_DBG_CONFIG,
+			"Failed to update capability in target\n");
+
+	priv->ah->is_monitoring = true;
+
 	return 0;
 
 err_vif:
@@ -328,6 +338,7 @@
 	}
 
 	priv->nstations--;
+	priv->ah->is_monitoring = false;
 
 	return 0;
 }
@@ -419,7 +430,7 @@
 	return 0;
 }
 
-static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
 {
 	struct ath9k_htc_cap_target tcap;
 	int ret;
@@ -1014,12 +1025,6 @@
 	int ret = 0;
 	u8 cmd_rsp;
 
-	/* Cancel all the running timers/work .. */
-	cancel_work_sync(&priv->fatal_work);
-	cancel_work_sync(&priv->ps_work);
-	cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
-	ath9k_led_stop_brightness(priv);
-
 	mutex_lock(&priv->mutex);
 
 	if (priv->op_flags & OP_INVALID) {
@@ -1033,8 +1038,23 @@
 	WMI_CMD(WMI_DISABLE_INTR_CMDID);
 	WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
 	WMI_CMD(WMI_STOP_RECV_CMDID);
+
+	tasklet_kill(&priv->swba_tasklet);
+	tasklet_kill(&priv->rx_tasklet);
+	tasklet_kill(&priv->tx_tasklet);
+
 	skb_queue_purge(&priv->tx_queue);
 
+	mutex_unlock(&priv->mutex);
+
+	/* Cancel all the running timers/work .. */
+	cancel_work_sync(&priv->fatal_work);
+	cancel_work_sync(&priv->ps_work);
+	cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+	ath9k_led_stop_brightness(priv);
+
+	mutex_lock(&priv->mutex);
+
 	/* Remove monitor interface here */
 	if (ah->opmode == NL80211_IFTYPE_MONITOR) {
 		if (ath9k_htc_remove_monitor_interface(priv))
@@ -1186,6 +1206,20 @@
 		}
 	}
 
+	/*
+	 * Monitor interface should be added before
+	 * IEEE80211_CONF_CHANGE_CHANNEL is handled.
+	 */
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (conf->flags & IEEE80211_CONF_MONITOR) {
+			if (ath9k_htc_add_monitor_interface(priv))
+				ath_err(common, "Failed to set monitor mode\n");
+			else
+				ath_dbg(common, ATH_DBG_CONFIG,
+					"HW opmode set to Monitor mode\n");
+		}
+	}
+
 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
 		struct ieee80211_channel *curchan = hw->conf.channel;
 		int pos = curchan->hw_value;
@@ -1221,16 +1255,6 @@
 		ath_update_txpow(priv);
 	}
 
-	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-		if (conf->flags & IEEE80211_CONF_MONITOR) {
-			if (ath9k_htc_add_monitor_interface(priv))
-				ath_err(common, "Failed to set monitor mode\n");
-			else
-				ath_dbg(common, ATH_DBG_CONFIG,
-					"HW opmode set to Monitor mode\n");
-		}
-	}
-
 	if (changed & IEEE80211_CONF_CHANGE_IDLE) {
 		mutex_lock(&priv->htc_pm_lock);
 		if (!priv->ps_idle) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 33f3602..7a5ffca 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -113,6 +113,7 @@
 
 	if (ieee80211_is_data(fc)) {
 		struct tx_frame_hdr tx_hdr;
+		u32 flags = 0;
 		u8 *qc;
 
 		memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
@@ -136,13 +137,14 @@
 		/* Check for RTS protection */
 		if (priv->hw->wiphy->rts_threshold != (u32) -1)
 			if (skb->len > priv->hw->wiphy->rts_threshold)
-				tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
+				flags |= ATH9K_HTC_TX_RTSCTS;
 
 		/* CTS-to-self */
-		if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
+		if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
 		    (priv->op_flags & OP_PROTECT_ENABLE))
-			tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
+			flags |= ATH9K_HTC_TX_CTSONLY;
 
+		tx_hdr.flags = cpu_to_be32(flags);
 		tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
 		if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
 			tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fde9786..9f01e50 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -369,6 +369,9 @@
 	else
 		ah->config.ht_enable = 0;
 
+	/* PAPRD needs some more work to be enabled */
+	ah->config.paprd_disable = 1;
+
 	ah->config.rx_intr_mitigation = true;
 	ah->config.pcieSerDesWrite = true;
 
@@ -436,9 +439,10 @@
 
 static int ath9k_hw_post_init(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	int ecode;
 
-	if (!AR_SREV_9271(ah)) {
+	if (common->bus_ops->ath_bus_type != ATH_USB) {
 		if (!ath9k_hw_chip_test(ah))
 			return -ENODEV;
 	}
@@ -1213,7 +1217,7 @@
 	ah->txchainmask = common->tx_chainmask;
 	ah->rxchainmask = common->rx_chainmask;
 
-	if (!ah->chip_fullsleep) {
+	if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
 		ath9k_hw_abortpcurecv(ah);
 		if (!ath9k_hw_stopdmarecv(ah)) {
 			ath_dbg(common, ATH_DBG_XMIT,
@@ -1932,7 +1936,8 @@
 		pCap->rx_status_len = sizeof(struct ar9003_rxs);
 		pCap->tx_desc_len = sizeof(struct ar9003_txc);
 		pCap->txs_len = sizeof(struct ar9003_txs);
-		if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+		if (!ah->config.paprd_disable &&
+		    ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
 			pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
 	} else {
 		pCap->tx_desc_len = sizeof(struct ath_desc);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 5a3dfec..ea9fde6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -225,6 +225,7 @@
 	u32 pcie_waen;
 	u8 analog_shiftreg;
 	u8 ht_enable;
+	u8 paprd_disable;
 	u32 ofdm_trig_low;
 	u32 ofdm_trig_high;
 	u32 cck_trig_high;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 767d8b8..a033d01 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -41,10 +41,6 @@
 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
 
-int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
-module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
-
 bool is_ath9k_unloaded;
 /* We use the hw_value as an index into our private channel structure */
 
@@ -598,8 +594,6 @@
 err_queues:
 	ath9k_hw_deinit(ah);
 err_hw:
-	tasklet_kill(&sc->intr_tq);
-	tasklet_kill(&sc->bcon_tasklet);
 
 	kfree(ah);
 	sc->sc_ah = NULL;
@@ -764,9 +758,6 @@
 	ath_init_leds(sc);
 	ath_start_rfkill_poll(sc);
 
-	pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
-			   PM_QOS_DEFAULT_VALUE);
-
 	return 0;
 
 error_world:
@@ -807,9 +798,6 @@
 
 	ath9k_hw_deinit(sc->sc_ah);
 
-	tasklet_kill(&sc->intr_tq);
-	tasklet_kill(&sc->bcon_tasklet);
-
 	kfree(sc->sc_ah);
 	sc->sc_ah = NULL;
 }
@@ -824,6 +812,8 @@
 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
 	ath_deinit_leds(sc);
 
+	ath9k_ps_restore(sc);
+
 	for (i = 0; i < sc->num_sec_wiphy; i++) {
 		struct ath_wiphy *aphy = sc->sec_wiphy[i];
 		if (aphy == NULL)
@@ -834,7 +824,6 @@
 	}
 
 	ieee80211_unregister_hw(hw);
-	pm_qos_remove_request(&sc->pm_qos_req);
 	ath_rx_cleanup(sc);
 	ath_tx_cleanup(sc);
 	ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 180170d..2915b11 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -885,7 +885,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (!(ints & ATH9K_INT_GLOBAL))
-		ath9k_hw_enable_interrupts(ah);
+		ath9k_hw_disable_interrupts(ah);
 
 	ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
 
@@ -963,7 +963,8 @@
 			REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
 	}
 
-	ath9k_hw_enable_interrupts(ah);
+	if (ints & ATH9K_INT_GLOBAL)
+		ath9k_hw_enable_interrupts(ah);
 
 	return;
 }
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f90a6ca..a09d15f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -325,6 +325,8 @@
 {
 	struct ieee80211_hw *hw = sc->hw;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_tx_control txctl;
 	int time_left;
 
@@ -340,14 +342,16 @@
 	tx_info->control.rates[1].idx = -1;
 
 	init_completion(&sc->paprd_complete);
-	sc->paprd_pending = true;
 	txctl.paprd = BIT(chain);
-	if (ath_tx_start(hw, skb, &txctl) != 0)
+
+	if (ath_tx_start(hw, skb, &txctl) != 0) {
+		ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n");
+		dev_kfree_skb_any(skb);
 		return false;
+	}
 
 	time_left = wait_for_completion_timeout(&sc->paprd_complete,
 			msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
-	sc->paprd_pending = false;
 
 	if (!time_left)
 		ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
@@ -592,14 +596,12 @@
 	u32 status = sc->intrstatus;
 	u32 rxmask;
 
-	ath9k_ps_wakeup(sc);
-
 	if (status & ATH9K_INT_FATAL) {
 		ath_reset(sc, true);
-		ath9k_ps_restore(sc);
 		return;
 	}
 
+	ath9k_ps_wakeup(sc);
 	spin_lock(&sc->sc_pcu_lock);
 
 	if (!ath9k_hw_check_alive(ah))
@@ -955,8 +957,6 @@
 
 	spin_unlock_bh(&sc->sc_pcu_lock);
 	ath9k_ps_restore(sc);
-
-	ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
 }
 
 int ath_reset(struct ath_softc *sc, bool retry_tx)
@@ -969,6 +969,7 @@
 	/* Stop ANI */
 	del_timer_sync(&common->ani.timer);
 
+	ath9k_ps_wakeup(sc);
 	spin_lock_bh(&sc->sc_pcu_lock);
 
 	ieee80211_stop_queues(hw);
@@ -1015,6 +1016,7 @@
 
 	/* Start ANI */
 	ath_start_ani(common);
+	ath9k_ps_restore(sc);
 
 	return r;
 }
@@ -1171,12 +1173,6 @@
 			ath9k_btcoex_timer_resume(sc);
 	}
 
-	/* User has the option to provide pm-qos value as a module
-	 * parameter rather than using the default value of
-	 * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
-	 */
-	pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
-
 	if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
 		common->bus_ops->extn_synch_en(common);
 
@@ -1309,6 +1305,9 @@
 
 	spin_lock_bh(&sc->sc_pcu_lock);
 
+	/* prevent tasklets to enable interrupts once we disable them */
+	ah->imask &= ~ATH9K_INT_GLOBAL;
+
 	/* make sure h/w will not generate any interrupt
 	 * before setting the invalid flag. */
 	ath9k_hw_disable_interrupts(ah);
@@ -1326,6 +1325,12 @@
 
 	spin_unlock_bh(&sc->sc_pcu_lock);
 
+	/* we can now sync irq and kill any running tasklets, since we already
+	 * disabled interrupts and not holding a spin lock */
+	synchronize_irq(sc->irq);
+	tasklet_kill(&sc->intr_tq);
+	tasklet_kill(&sc->bcon_tasklet);
+
 	ath9k_ps_restore(sc);
 
 	sc->ps_idle = true;
@@ -1334,8 +1339,6 @@
 
 	sc->sc_flags |= SC_OP_INVALID;
 
-	pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
-
 	mutex_unlock(&sc->mutex);
 
 	ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
@@ -1701,7 +1704,9 @@
 skip_chan_change:
 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
 		sc->config.txpowlimit = 2 * conf->power_level;
+		ath9k_ps_wakeup(sc);
 		ath_update_txpow(sc);
+		ath9k_ps_restore(sc);
 	}
 
 	spin_lock_bh(&sc->wiphy_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 332d1fe..07b7804 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1725,6 +1725,9 @@
 			ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
 						   bf->bf_state.bfs_paprd);
 
+		if (txctl->paprd)
+			bf->bf_state.bfs_paprd_timestamp = jiffies;
+
 		ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
 	}
 
@@ -1886,7 +1889,9 @@
 	bf->bf_buf_addr = 0;
 
 	if (bf->bf_state.bfs_paprd) {
-		if (!sc->paprd_pending)
+		if (time_after(jiffies,
+				bf->bf_state.bfs_paprd_timestamp +
+				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
 			dev_kfree_skb_any(skb);
 		else
 			complete(&sc->paprd_complete);
@@ -2113,9 +2118,7 @@
 	if (needreset) {
 		ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
 			"tx hung, resetting the chip\n");
-		ath9k_ps_wakeup(sc);
 		ath_reset(sc, true);
-		ath9k_ps_restore(sc);
 	}
 
 	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 939a0e9..84866a4 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -564,7 +564,7 @@
 	cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
 
 	/* 2. Maybe the AP wants to send multicast/broadcast data? */
-	cam = !!(tim_ie->bitmap_ctrl & 0x01);
+	cam |= !!(tim_ie->bitmap_ctrl & 0x01);
 
 	if (!cam) {
 		/* back to low-power land. */
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 537732e..f82c400 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@
 	{ USB_DEVICE(0x057c, 0x8402) },
 	/* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
 	{ USB_DEVICE(0x1668, 0x1200) },
+	/* Airlive X.USB a/b/g/n */
+	{ USB_DEVICE(0x1b75, 0x9170) },
 
 	/* terminate */
 	{}
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 0dc33b6..be48281 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1919,7 +1919,7 @@
 	b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
 }
 
-/* Intialize B/G PHY power control */
+/* Initialize B/G PHY power control */
 static void b43_phy_init_pctl(struct b43_wldev *dev)
 {
 	struct ssb_bus *bus = dev->dev->bus;
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 35033dd..28e477d 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -153,7 +153,7 @@
 	phy->calibrated = 1;
 }
 
-/* intialize B PHY power control
+/* initialize B PHY power control
  * as described in http://bcm-specs.sipsolutions.net/InitPowerControl
  */
 static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev)
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index bd8a413..2176ede 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -518,22 +518,21 @@
 	hw_priv->link = link;
 
 	/*
-	 * Make sure the IRQ handler cannot proceed until at least
-	 * dev->base_addr is initialized.
+	 * We enable IRQ here, but IRQ handler will not proceed
+	 * until dev->base_addr is set below. This protect us from
+	 * receive interrupts when driver is not initialized.
 	 */
-	spin_lock_irqsave(&local->irq_init_lock, flags);
-
 	ret = pcmcia_request_irq(link, prism2_interrupt);
 	if (ret)
-		goto failed_unlock;
+		goto failed;
 
 	ret = pcmcia_enable_device(link);
 	if (ret)
-		goto failed_unlock;
+		goto failed;
 
+	spin_lock_irqsave(&local->irq_init_lock, flags);
 	dev->irq = link->irq;
 	dev->base_addr = link->resource[0]->start;
-
 	spin_unlock_irqrestore(&local->irq_init_lock, flags);
 
 	local->shutdown = 0;
@@ -546,8 +545,6 @@
 
 	return ret;
 
- failed_unlock:
-	spin_unlock_irqrestore(&local->irq_init_lock, flags);
  failed:
 	kfree(hw_priv);
 	prism2_release((u_long)link);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d6ed5f..ae438ed 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1973,6 +1973,13 @@
 
 	inta = ipw_read32(priv, IPW_INTA_RW);
 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
+
+	if (inta == 0xFFFFFFFF) {
+		/* Hardware disappeared */
+		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
+		/* Only handle the cached INTA values */
+		inta = 0;
+	}
 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
 
 	/* Add any cached INTA values that need to be handled */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a9b852b..39b6f16 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@
 }
 #endif
 
-/**
- * iwl3945_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
-				struct iwl_rx_packet *pkt)
-{
-	bool rc = true;
-	struct iwl3945_notif_statistics current_stat;
-	int combined_plcp_delta;
-	unsigned int plcp_msec;
-	unsigned long plcp_received_jiffies;
-
-	if (priv->cfg->base_params->plcp_delta_threshold ==
-	    IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
-		IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
-		return rc;
-	}
-	memcpy(&current_stat, pkt->u.raw, sizeof(struct
-			iwl3945_notif_statistics));
-	/*
-	 * check for plcp_err and trigger radio reset if it exceeds
-	 * the plcp error threshold plcp_delta.
-	 */
-	plcp_received_jiffies = jiffies;
-	plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
-					(long) priv->plcp_jiffies);
-	priv->plcp_jiffies = plcp_received_jiffies;
-	/*
-	 * check to make sure plcp_msec is not 0 to prevent division
-	 * by zero.
-	 */
-	if (plcp_msec) {
-		combined_plcp_delta =
-			(le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
-			le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
-
-		if ((combined_plcp_delta > 0) &&
-			((combined_plcp_delta * 100) / plcp_msec) >
-			priv->cfg->base_params->plcp_delta_threshold) {
-			/*
-			 * if plcp_err exceed the threshold, the following
-			 * data is printed in csv format:
-			 *    Text: plcp_err exceeded %d,
-			 *    Received ofdm.plcp_err,
-			 *    Current ofdm.plcp_err,
-			 *    combined_plcp_delta,
-			 *    plcp_msec
-			 */
-			IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
-				"%u, %d, %u mSecs\n",
-				priv->cfg->base_params->plcp_delta_threshold,
-				le32_to_cpu(current_stat.rx.ofdm.plcp_err),
-				combined_plcp_delta, plcp_msec);
-			/*
-			 * Reset the RF radio due to the high plcp
-			 * error rate
-			 */
-			rc = false;
-		}
-	}
-	return rc;
-}
-
 void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
 		struct iwl_rx_mem_buffer *rxb)
 {
@@ -2734,7 +2668,6 @@
 	.isr_ops = {
 		.isr = iwl_isr_legacy,
 	},
-	.check_plcp_health = iwl3945_good_plcp_health,
 
 	.debugfs_ops = {
 		.rx_stats_read = iwl3945_ucode_rx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 3f1e5f1..91a9f52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2624,6 +2624,7 @@
 	.fw_name_pre = IWL4965_FW_PRE,
 	.ucode_api_max = IWL4965_UCODE_API_MAX,
 	.ucode_api_min = IWL4965_UCODE_API_MIN,
+	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
 	.valid_tx_ant = ANT_AB,
 	.valid_rx_ant = ANT_ABC,
 	.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 79ab0a6..537fb8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
 #include "iwl-agn-debugfs.h"
 
 /* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 2
+#define IWL5000_UCODE_API_MAX 5
 #define IWL5150_UCODE_API_MAX 2
 
 /* Lowest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index af505bc..ef36aff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -681,6 +681,8 @@
 	.fw_name_pre = IWL6050_FW_PRE,				\
 	.ucode_api_max = IWL6050_UCODE_API_MAX,			\
 	.ucode_api_min = IWL6050_UCODE_API_MIN,			\
+	.valid_tx_ant = ANT_AB,		/* .cfg overwrite */	\
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */	\
 	.ops = &iwl6050_ops,					\
 	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,	\
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index 97906dd..27b5a3e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -152,11 +152,14 @@
 
 	eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
 
-	priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
+	if (!priv->cfg->sku) {
+		/* not using sku overwrite */
+		priv->cfg->sku =
+			((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
 			EEPROM_SKU_CAP_BAND_POS);
-	if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
-		priv->cfg->sku |= IWL_SKU_N;
-
+		if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
+			priv->cfg->sku |= IWL_SKU_N;
+	}
 	if (!priv->cfg->sku) {
 		IWL_ERR(priv, "Invalid device sku\n");
 		return -EINVAL;
@@ -168,7 +171,7 @@
 		/* not using .cfg overwrite */
 		radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
 		priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
-		priv->cfg->valid_rx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+		priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
 		if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
 			IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
 				priv->cfg->valid_tx_ant,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
index a5dbfea..b5cb3be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -197,7 +197,7 @@
 
  none:
 	/* re-enable interrupts here since we don't have anything to service. */
-	/* only Re-enable if diabled by irq  and no schedules tasklet. */
+	/* only Re-enable if disabled by irq  and no schedules tasklet. */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
 		iwl_enable_interrupts(priv);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f13a83a..c1cfd99 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1154,9 +1154,12 @@
 	}
 
 	/* Re-enable all interrupts */
-	/* only Re-enable if diabled by irq */
+	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
+	/* Re-enable RF_KILL if it occurred */
+	else if (handled & CSR_INT_BIT_RF_KILL)
+		iwl_enable_rfkill_int(priv);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 	if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -1368,9 +1371,12 @@
 	}
 
 	/* Re-enable all interrupts */
-	/* only Re-enable if diabled by irq */
+	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
+	/* Re-enable RF_KILL if it occurred */
+	else if (handled & CSR_INT_BIT_RF_KILL)
+		iwl_enable_rfkill_int(priv);
 }
 
 /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
index a08b4e5..bb1a742 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c
@@ -619,7 +619,7 @@
 
 none:
 	/* re-enable interrupts here since we don't have anything to service. */
-	/* only Re-enable if diabled by irq */
+	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
 	spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 4776323..49493d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -107,7 +107,7 @@
 	/*
 	 * XXX: The MAC address in the command buffer is often changed from
 	 * the original sent to the device. That is, the MAC address
-	 * written to the command buffer often is not the same MAC adress
+	 * written to the command buffer often is not the same MAC address
 	 * read from the command buffer when the command returns. This
 	 * issue has not yet been resolved and this debugging is left to
 	 * observe the problem.
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 13a69eb..5091d77 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -126,6 +126,7 @@
 	ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
 	if (!ndev) {
 		dev_err(dev, "no memory for network device instance\n");
+		ret = -ENOMEM;
 		goto out_priv;
 	}
 
@@ -138,6 +139,7 @@
 				    GFP_KERNEL);
 	if (!iwm->umac_profile) {
 		dev_err(dev, "Couldn't alloc memory for profile\n");
+		ret = -ENOMEM;
 		goto out_profile;
 	}
 
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4..0494d7b 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@
 	while (i != idx) {
 		u16 len;
 		struct sk_buff *skb;
+		dma_addr_t dma_addr;
 		desc = &ring[i];
 		len = le16_to_cpu(desc->len);
 		skb = rx_buf[i];
@@ -216,17 +217,20 @@
 
 			len = priv->common.rx_mtu;
 		}
+		dma_addr = le32_to_cpu(desc->host_addr);
+		pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+			priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
 		skb_put(skb, len);
 
 		if (p54_rx(dev, skb)) {
-			pci_unmap_single(priv->pdev,
-					 le32_to_cpu(desc->host_addr),
-					 priv->common.rx_mtu + 32,
-					 PCI_DMA_FROMDEVICE);
+			pci_unmap_single(priv->pdev, dma_addr,
+				priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
 			rx_buf[i] = NULL;
-			desc->host_addr = 0;
+			desc->host_addr = cpu_to_le32(0);
 		} else {
 			skb_trim(skb, 0);
+			pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+				priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
 			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
 		}
 
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 21713a7..9b344a9 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -98,6 +98,7 @@
 	{USB_DEVICE(0x1413, 0x5400)},   /* Telsey 802.11g USB2.0 Adapter */
 	{USB_DEVICE(0x1435, 0x0427)},	/* Inventel UR054G */
 	{USB_DEVICE(0x1668, 0x1050)},	/* Actiontec 802UIG-1 */
+	{USB_DEVICE(0x1740, 0x1000)},	/* Senao NUB-350 */
 	{USB_DEVICE(0x2001, 0x3704)},	/* DLink DWL-G122 rev A2 */
 	{USB_DEVICE(0x2001, 0x3705)},	/* D-Link DWL-G120 rev C1 */
 	{USB_DEVICE(0x413c, 0x5513)},	/* Dell WLA3310 USB Wireless Adapter */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 76b2318a..f618b96 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -618,7 +618,7 @@
 	else
 		*burst_possible = false;
 
-	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+	if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
 		*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
 
 	if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 2c8cc95..ec2c75d 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -630,7 +630,7 @@
 	printk(KERN_DEBUG "islpci_alloc_memory\n");
 #endif
 
-	/* remap the PCI device base address to accessable */
+	/* remap the PCI device base address to accessible */
 	if (!(priv->device_base =
 	      ioremap(pci_resource_start(priv->pdev, 0),
 		      ISL38XX_PCI_MEM_SIZE))) {
@@ -709,7 +709,7 @@
 				   PCI_DMA_FROMDEVICE);
 		if (!priv->pci_map_rx_address[counter]) {
 			/* error mapping the buffer to device
-			   accessable memory address */
+			   accessible memory address */
 			printk(KERN_ERR "failed to map skb DMA'able\n");
 			goto out_free;
 		}
@@ -773,7 +773,7 @@
 		priv->data_low_rx[counter] = NULL;
 	}
 
-	/* Free the acces control list and the WPA list */
+	/* Free the access control list and the WPA list */
 	prism54_acl_clean(&priv->acl);
 	prism54_wpa_bss_ie_clean(priv);
 	mgt_clean(priv);
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 2fc52bc..d44f8e2 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -450,7 +450,7 @@
 				   MAX_FRAGMENT_SIZE_RX + 2,
 				   PCI_DMA_FROMDEVICE);
 		if (unlikely(!priv->pci_map_rx_address[index])) {
-			/* error mapping the buffer to device accessable memory address */
+			/* error mapping the buffer to device accessible memory address */
 			DEBUG(SHOW_ERROR_MESSAGES,
 			      "Error mapping DMA address\n");
 
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 848cc2c..518542b 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2597,6 +2597,9 @@
 	__le32 mode;
 	int ret;
 
+	if (priv->device_type != RNDIS_BCM4320B)
+		return -ENOTSUPP;
+
 	netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
 				enabled ? "enabled" : "disabled",
 				timeout);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index aa97971..3b3f1e4 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -652,6 +652,12 @@
 		 */
 		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
+		/*
+		 * The hardware has already checked the Michael Mic and has
+		 * stripped it from the frame. Signal this to mac80211.
+		 */
+		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
 		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
 			rxdesc->flags |= RX_FLAG_DECRYPTED;
 		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -1065,6 +1071,8 @@
 	{ PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
 #endif
 #ifdef CONFIG_RT2800PCI_RT35XX
+	{ PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
 	{ PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
 	{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
 	{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index b97a4a5..197a36c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -486,6 +486,12 @@
 		 */
 		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
+		/*
+		 * The hardware has already checked the Michael Mic and has
+		 * stripped it from the frame. Signal this to mac80211.
+		 */
+		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
 		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
 			rxdesc->flags |= RX_FLAG_DECRYPTED;
 		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index f0e1eb7..be0ff78 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -58,6 +58,7 @@
 
 	if (!fw || !fw->size || !fw->data) {
 		ERROR(rt2x00dev, "Failed to read Firmware.\n");
+		release_firmware(fw);
 		return -ENOENT;
 	}
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 658542d..f3da051 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -273,7 +273,7 @@
 	intf->beacon = entry;
 
 	/*
-	 * The MAC adddress must be configured after the device
+	 * The MAC address must be configured after the device
 	 * has been initialized. Otherwise the device can reset
 	 * the MAC registers.
 	 * The BSSID address must only be configured in AP mode,
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 73631c6..ace0b66 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -363,12 +363,12 @@
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 
 	if (pci_set_power_state(pci_dev, PCI_D0) ||
-	    pci_enable_device(pci_dev) ||
-	    pci_restore_state(pci_dev)) {
+	    pci_enable_device(pci_dev)) {
 		ERROR(rt2x00dev, "Failed to resume device.\n");
 		return -EIO;
 	}
 
+	pci_restore_state(pci_dev);
 	return rt2x00lib_resume(rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00pci_resume);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 0b4e859..029be3c 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2446,6 +2446,7 @@
 	{ USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
+	{ USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
 	/* Qcom */
 	{ USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index b8433f3..62876cd 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -726,9 +726,9 @@
 }
 
 static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
-				u8 efuse_data, u8 offset, int *bcontinual,
-				u8 *write_state, struct pgpkt_struct target_pkt,
-				int *repeat_times, int *bresult, u8 word_en)
+			u8 efuse_data, u8 offset, int *bcontinual,
+			u8 *write_state, struct pgpkt_struct *target_pkt,
+			int *repeat_times, int *bresult, u8 word_en)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct pgpkt_struct tmp_pkt;
@@ -744,8 +744,8 @@
 	tmp_pkt.word_en = tmp_header & 0x0F;
 	tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
 
-	if (tmp_pkt.offset != target_pkt.offset) {
-		efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
+	if (tmp_pkt.offset != target_pkt->offset) {
+		*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
 		*write_state = PG_STATE_HEADER;
 	} else {
 		for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
@@ -756,23 +756,23 @@
 		}
 
 		if (bdataempty == false) {
-			efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
+			*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
 			*write_state = PG_STATE_HEADER;
 		} else {
 			match_word_en = 0x0F;
-			if (!((target_pkt.word_en & BIT(0)) |
+			if (!((target_pkt->word_en & BIT(0)) |
 			     (tmp_pkt.word_en & BIT(0))))
 				match_word_en &= (~BIT(0));
 
-			if (!((target_pkt.word_en & BIT(1)) |
+			if (!((target_pkt->word_en & BIT(1)) |
 			     (tmp_pkt.word_en & BIT(1))))
 				match_word_en &= (~BIT(1));
 
-			if (!((target_pkt.word_en & BIT(2)) |
+			if (!((target_pkt->word_en & BIT(2)) |
 			     (tmp_pkt.word_en & BIT(2))))
 				match_word_en &= (~BIT(2));
 
-			if (!((target_pkt.word_en & BIT(3)) |
+			if (!((target_pkt->word_en & BIT(3)) |
 			     (tmp_pkt.word_en & BIT(3))))
 				match_word_en &= (~BIT(3));
 
@@ -780,7 +780,7 @@
 				badworden = efuse_word_enable_data_write(
 							    hw, *efuse_addr + 1,
 							    tmp_pkt.word_en,
-							    target_pkt.data);
+							    target_pkt->data);
 
 				if (0x0F != (badworden & 0x0F)) {
 					u8 reorg_offset = offset;
@@ -791,26 +791,26 @@
 				}
 
 				tmp_word_en = 0x0F;
-				if ((target_pkt.word_en & BIT(0)) ^
+				if ((target_pkt->word_en & BIT(0)) ^
 				    (match_word_en & BIT(0)))
 					tmp_word_en &= (~BIT(0));
 
-				if ((target_pkt.word_en & BIT(1)) ^
+				if ((target_pkt->word_en & BIT(1)) ^
 				    (match_word_en & BIT(1)))
 					tmp_word_en &= (~BIT(1));
 
-				if ((target_pkt.word_en & BIT(2)) ^
+				if ((target_pkt->word_en & BIT(2)) ^
 					(match_word_en & BIT(2)))
 					tmp_word_en &= (~BIT(2));
 
-				if ((target_pkt.word_en & BIT(3)) ^
+				if ((target_pkt->word_en & BIT(3)) ^
 				    (match_word_en & BIT(3)))
 					tmp_word_en &= (~BIT(3));
 
 				if ((tmp_word_en & 0x0F) != 0x0F) {
 					*efuse_addr = efuse_get_current_size(hw);
-					target_pkt.offset = offset;
-					target_pkt.word_en = tmp_word_en;
+					target_pkt->offset = offset;
+					target_pkt->word_en = tmp_word_en;
 				} else
 					*bcontinual = false;
 				*write_state = PG_STATE_HEADER;
@@ -821,8 +821,8 @@
 				}
 			} else {
 				*efuse_addr += (2 * tmp_word_cnts) + 1;
-				target_pkt.offset = offset;
-				target_pkt.word_en = word_en;
+				target_pkt->offset = offset;
+				target_pkt->word_en = word_en;
 				*write_state = PG_STATE_HEADER;
 			}
 		}
@@ -938,7 +938,7 @@
 				efuse_write_data_case1(hw, &efuse_addr,
 						       efuse_data, offset,
 						       &bcontinual,
-						       &write_state, target_pkt,
+						       &write_state, &target_pkt,
 						       &repeat_times, &bresult,
 						       word_en);
 			else
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0fa36aa..1758d44 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -619,6 +619,13 @@
 					struct sk_buff *uskb = NULL;
 					u8 *pdata;
 					uskb = dev_alloc_skb(skb->len + 128);
+					if (!uskb) {
+						RT_TRACE(rtlpriv,
+							(COMP_INTR | COMP_RECV),
+							DBG_EMERG,
+							("can't alloc rx skb\n"));
+						goto done;
+					}
 					memcpy(IEEE80211_SKB_RXCB(uskb),
 							&rx_status,
 							sizeof(rx_status));
@@ -641,7 +648,7 @@
 			new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
 			if (unlikely(!new_skb)) {
 				RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
-					 DBG_DMESG,
+					 DBG_EMERG,
 					 ("can't alloc skb for rx\n"));
 				goto done;
 			}
@@ -1066,9 +1073,9 @@
 			struct sk_buff *skb =
 			    dev_alloc_skb(rtlpci->rxbuffersize);
 			u32 bufferaddress;
-			entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
 			if (!skb)
 				return 0;
+			entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
 
 			/*skb->dev = dev; */
 
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h
index e54b21a..efcc3aa 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -1272,10 +1272,10 @@
 /* OBSOLETE */
 #define WL1251_ACX_INTR_WAKE_ON_HOST	BIT(6)
 
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
 #define WL1251_ACX_INTR_TRACE_A		BIT(7)
 
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
 #define WL1251_ACX_INTR_TRACE_B		BIT(8)
 
 /* Command processing completion */
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 012e1a4..40372ba 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -1039,6 +1039,9 @@
 
 	if (changed & BSS_CHANGED_BEACON) {
 		beacon = ieee80211_beacon_get(hw, vif);
+		if (!beacon)
+			goto out_sleep;
+
 		ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
 					      beacon->len);
 
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index 13fbeec..c0ce2c8 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -419,7 +419,7 @@
 #define WL1251_FW_NAME "wl1251-fw.bin"
 #define WL1251_NVS_NAME "wl1251-nvs.bin"
 
-#define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */
+#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
 
 #define WL1251_PART_DOWN_MEM_START	0x0
 #define WL1251_PART_DOWN_MEM_SIZE	0x16800
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 9cbc3f4..7bd8e4d 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -47,9 +47,9 @@
 #define WL1271_ACX_INTR_HW_AVAILABLE       BIT(5)
 /* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */
 #define WL1271_ACX_INTR_DATA               BIT(6)
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
 #define WL1271_ACX_INTR_TRACE_A            BIT(7)
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
 #define WL1271_ACX_INTR_TRACE_B            BIT(8)
 
 #define WL1271_ACX_INTR_ALL		   0xFFFFFFFF
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 4671491..7145ea5 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,9 +110,8 @@
 	spi_message_add_tail(&t, &m);
 
 	spi_sync(wl_to_spi(wl), &m);
-	kfree(cmd);
-
 	wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+	kfree(cmd);
 }
 
 static void wl1271_spi_init(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index ce3d31f..9050dd9 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -416,8 +416,8 @@
 
 /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
    on in case is has been shut down shortly before */
-#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
-#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
+#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
+#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */
 
 /* Macros to handle wl1271.sta_rate_set */
 #define HW_BG_RATES_MASK	0xffff
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index ee82df6..3e5befe4 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -192,7 +192,7 @@
 }
 
 /*
- * Get Ethernet MAC addresss.
+ * Get Ethernet MAC address.
  *
  * WARNING: We switch to FPAGE0 and switc back again.
  *          Making sure there is no other WL function beening called by ISR.
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cdbeec9..da1f121 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@
 	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
 	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
 	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+
+	/* Statistics */
+	int rx_gso_checksum_fixup;
 };
 
 struct netfront_rx_info {
@@ -488,7 +491,7 @@
 
 	if (unlikely(!netif_carrier_ok(dev) ||
 		     (frags > 1 && !xennet_can_sg(dev)) ||
-		     netif_needs_gso(dev, skb))) {
+		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 		spin_unlock_irq(&np->tx_lock);
 		goto drop;
 	}
@@ -770,11 +773,29 @@
 	return cons;
 }
 
-static int skb_checksum_setup(struct sk_buff *skb)
+static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 {
 	struct iphdr *iph;
 	unsigned char *th;
 	int err = -EPROTO;
+	int recalculate_partial_csum = 0;
+
+	/*
+	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+	 * peers can fail to set NETRXF_csum_blank when sending a GSO
+	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+	 * recalculate the partial checksum.
+	 */
+	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+		struct netfront_info *np = netdev_priv(dev);
+		np->rx_gso_checksum_fixup++;
+		skb->ip_summed = CHECKSUM_PARTIAL;
+		recalculate_partial_csum = 1;
+	}
+
+	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
 
 	if (skb->protocol != htons(ETH_P_IP))
 		goto out;
@@ -788,9 +809,23 @@
 	switch (iph->protocol) {
 	case IPPROTO_TCP:
 		skb->csum_offset = offsetof(struct tcphdr, check);
+
+		if (recalculate_partial_csum) {
+			struct tcphdr *tcph = (struct tcphdr *)th;
+			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+							 skb->len - iph->ihl*4,
+							 IPPROTO_TCP, 0);
+		}
 		break;
 	case IPPROTO_UDP:
 		skb->csum_offset = offsetof(struct udphdr, check);
+
+		if (recalculate_partial_csum) {
+			struct udphdr *udph = (struct udphdr *)th;
+			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+							 skb->len - iph->ihl*4,
+							 IPPROTO_UDP, 0);
+		}
 		break;
 	default:
 		if (net_ratelimit())
@@ -829,13 +864,11 @@
 		/* Ethernet work: Delayed to here as it peeks the header. */
 		skb->protocol = eth_type_trans(skb, dev);
 
-		if (skb->ip_summed == CHECKSUM_PARTIAL) {
-			if (skb_checksum_setup(skb)) {
-				kfree_skb(skb);
-				packets_dropped++;
-				dev->stats.rx_errors++;
-				continue;
-			}
+		if (checksum_setup(dev, skb)) {
+			kfree_skb(skb);
+			packets_dropped++;
+			dev->stats.rx_errors++;
+			continue;
 		}
 
 		dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@
 	}
 }
 
+static const struct xennet_stat {
+	char name[ETH_GSTRING_LEN];
+	u16 offset;
+} xennet_stats[] = {
+	{
+		"rx_gso_checksum_fixup",
+		offsetof(struct netfront_info, rx_gso_checksum_fixup)
+	},
+};
+
+static int xennet_get_sset_count(struct net_device *dev, int string_set)
+{
+	switch (string_set) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(xennet_stats);
+	default:
+		return -EINVAL;
+	}
+}
+
+static void xennet_get_ethtool_stats(struct net_device *dev,
+				     struct ethtool_stats *stats, u64 * data)
+{
+	void *np = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+		data[i] = *(int *)(np + xennet_stats[i].offset);
+}
+
+static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       xennet_stats[i].name, ETH_GSTRING_LEN);
+		break;
+	}
+}
+
 static const struct ethtool_ops xennet_ethtool_ops =
 {
 	.set_tx_csum = ethtool_op_set_tx_csum,
 	.set_sg = xennet_set_sg,
 	.set_tso = xennet_set_tso,
 	.get_link = ethtool_op_get_link,
+
+	.get_sset_count = xennet_get_sset_count,
+	.get_ethtool_stats = xennet_get_ethtool_stats,
+	.get_strings = xennet_get_strings,
 };
 
 #ifdef CONFIG_SYSFS
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
new file mode 100644
index 0000000..ea15800
--- /dev/null
+++ b/drivers/nfc/Kconfig
@@ -0,0 +1,30 @@
+#
+# Near Field Communication (NFC) devices
+#
+
+menuconfig NFC_DEVICES
+	bool "Near Field Communication (NFC) devices"
+	default n
+	---help---
+	  You'll have to say Y if your computer contains an NFC device that
+	  you want to use under Linux.
+
+	  You can say N here if you don't have any Near Field Communication
+	  devices connected to your computer.
+
+if NFC_DEVICES
+
+config PN544_NFC
+	tristate "PN544 NFC driver"
+	depends on I2C
+	select CRC_CCITT
+	default n
+	---help---
+	  Say yes if you want PN544 Near Field Communication driver.
+	  This is for i2c connected version. If unsure, say N here.
+
+	  To compile this driver as a module, choose m here. The module will
+	  be called pn544.
+
+
+endif # NFC_DEVICES
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
new file mode 100644
index 0000000..a4efb16
--- /dev/null
+++ b/drivers/nfc/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for nfc devices
+#
+
+obj-$(CONFIG_PN544_NFC)		+= pn544.o
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
new file mode 100644
index 0000000..724f65d
--- /dev/null
+++ b/drivers/nfc/pn544.c
@@ -0,0 +1,893 @@
+/*
+ * Driver for the PN544 NFC chip.
+ *
+ * Copyright (C) Nokia Corporation
+ *
+ * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
+ * Contact: Matti Aaltonen <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/completion.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nfc/pn544.h>
+#include <linux/poll.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serial_core.h> /* for TCGETS */
+#include <linux/slab.h>
+
+#define DRIVER_CARD	"PN544 NFC"
+#define DRIVER_DESC	"NFC driver for PN544"
+
+static struct i2c_device_id pn544_id_table[] = {
+	{ PN544_DRIVER_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, pn544_id_table);
+
+#define HCI_MODE	0
+#define FW_MODE		1
+
+enum pn544_state {
+	PN544_ST_COLD,
+	PN544_ST_FW_READY,
+	PN544_ST_READY,
+};
+
+enum pn544_irq {
+	PN544_NONE,
+	PN544_INT,
+};
+
+struct pn544_info {
+	struct miscdevice miscdev;
+	struct i2c_client *i2c_dev;
+	struct regulator_bulk_data regs[3];
+
+	enum pn544_state state;
+	wait_queue_head_t read_wait;
+	loff_t read_offset;
+	enum pn544_irq read_irq;
+	struct mutex read_mutex; /* Serialize read_irq access */
+	struct mutex mutex; /* Serialize info struct access */
+	u8 *buf;
+	size_t buflen;
+};
+
+static const char reg_vdd_io[]	= "Vdd_IO";
+static const char reg_vbat[]	= "VBat";
+static const char reg_vsim[]	= "VSim";
+
+/* sysfs interface */
+static ssize_t pn544_test(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct pn544_info *info = dev_get_drvdata(dev);
+	struct i2c_client *client = info->i2c_dev;
+	struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test());
+}
+
+static int pn544_enable(struct pn544_info *info, int mode)
+{
+	struct pn544_nfc_platform_data *pdata;
+	struct i2c_client *client = info->i2c_dev;
+
+	int r;
+
+	r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs);
+	if (r < 0)
+		return r;
+
+	pdata = client->dev.platform_data;
+	info->read_irq = PN544_NONE;
+	if (pdata->enable)
+		pdata->enable(mode);
+
+	if (mode) {
+		info->state = PN544_ST_FW_READY;
+		dev_dbg(&client->dev, "now in FW-mode\n");
+	} else {
+		info->state = PN544_ST_READY;
+		dev_dbg(&client->dev, "now in HCI-mode\n");
+	}
+
+	usleep_range(10000, 15000);
+
+	return 0;
+}
+
+static void pn544_disable(struct pn544_info *info)
+{
+	struct pn544_nfc_platform_data *pdata;
+	struct i2c_client *client = info->i2c_dev;
+
+	pdata = client->dev.platform_data;
+	if (pdata->disable)
+		pdata->disable();
+
+	info->state = PN544_ST_COLD;
+
+	dev_dbg(&client->dev, "Now in OFF-mode\n");
+
+	msleep(PN544_RESETVEN_TIME);
+
+	info->read_irq = PN544_NONE;
+	regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs);
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+	u8 len;
+	u16 crc;
+
+	len = buf[0] + 1;
+	if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) {
+		pr_err(PN544_DRIVER_NAME
+		       ": CRC; corrupt packet len %u (%d)\n", len, buflen);
+		print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+			       16, 2, buf, buflen, false);
+		return -EPERM;
+	}
+	crc = crc_ccitt(0xffff, buf, len - 2);
+	crc = ~crc;
+
+	if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) {
+		pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
+		       crc, buf[len-1], buf[len-2]);
+
+		print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+			       16, 2, buf, buflen, false);
+		return -EPERM;
+	}
+	return 0;
+}
+
+static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len)
+{
+	int r;
+
+	if (len < 4 || len != (buf[0] + 1)) {
+		dev_err(&client->dev, "%s: Illegal message length: %d\n",
+			__func__, len);
+		return -EINVAL;
+	}
+
+	if (check_crc(buf, len))
+		return -EINVAL;
+
+	usleep_range(3000, 6000);
+
+	r = i2c_master_send(client, buf, len);
+	dev_dbg(&client->dev, "send: %d\n", r);
+
+	if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+		usleep_range(6000, 10000);
+		r = i2c_master_send(client, buf, len);
+		dev_dbg(&client->dev, "send2: %d\n", r);
+	}
+
+	if (r != len)
+		return -EREMOTEIO;
+
+	return r;
+}
+
+static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen)
+{
+	int r;
+	u8 len;
+
+	/*
+	 * You could read a packet in one go, but then you'd need to read
+	 * max size and rest would be 0xff fill, so we do split reads.
+	 */
+	r = i2c_master_recv(client, &len, 1);
+	dev_dbg(&client->dev, "recv1: %d\n", r);
+
+	if (r != 1)
+		return -EREMOTEIO;
+
+	if (len < PN544_LLC_HCI_OVERHEAD)
+		len = PN544_LLC_HCI_OVERHEAD;
+	else if (len > (PN544_MSG_MAX_SIZE - 1))
+		len = PN544_MSG_MAX_SIZE - 1;
+
+	if (1 + len > buflen) /* len+(data+crc16) */
+		return -EMSGSIZE;
+
+	buf[0] = len;
+
+	r = i2c_master_recv(client, buf + 1, len);
+	dev_dbg(&client->dev, "recv2: %d\n", r);
+
+	if (r != len)
+		return -EREMOTEIO;
+
+	usleep_range(3000, 6000);
+
+	return r + 1;
+}
+
+static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len)
+{
+	int r;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	if (len < PN544_FW_HEADER_SIZE ||
+	    (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len)
+		return -EINVAL;
+
+	r = i2c_master_send(client, buf, len);
+	dev_dbg(&client->dev, "fw send: %d\n", r);
+
+	if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+		usleep_range(6000, 10000);
+		r = i2c_master_send(client, buf, len);
+		dev_dbg(&client->dev, "fw send2: %d\n", r);
+	}
+
+	if (r != len)
+		return -EREMOTEIO;
+
+	return r;
+}
+
+static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen)
+{
+	int r, len;
+
+	if (buflen < PN544_FW_HEADER_SIZE)
+		return -EINVAL;
+
+	r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE);
+	dev_dbg(&client->dev, "FW recv1: %d\n", r);
+
+	if (r < 0)
+		return r;
+
+	if (r < PN544_FW_HEADER_SIZE)
+		return -EINVAL;
+
+	len = (buf[1] << 8) + buf[2];
+	if (len == 0) /* just header, no additional data */
+		return r;
+
+	if (len > buflen - PN544_FW_HEADER_SIZE)
+		return -EMSGSIZE;
+
+	r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len);
+	dev_dbg(&client->dev, "fw recv2: %d\n", r);
+
+	if (r != len)
+		return -EINVAL;
+
+	return r + PN544_FW_HEADER_SIZE;
+}
+
+static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id)
+{
+	struct pn544_info *info = dev_id;
+	struct i2c_client *client = info->i2c_dev;
+
+	BUG_ON(!info);
+	BUG_ON(irq != info->i2c_dev->irq);
+
+	dev_dbg(&client->dev, "IRQ\n");
+
+	mutex_lock(&info->read_mutex);
+	info->read_irq = PN544_INT;
+	mutex_unlock(&info->read_mutex);
+
+	wake_up_interruptible(&info->read_wait);
+
+	return IRQ_HANDLED;
+}
+
+static enum pn544_irq pn544_irq_state(struct pn544_info *info)
+{
+	enum pn544_irq irq;
+
+	mutex_lock(&info->read_mutex);
+	irq = info->read_irq;
+	mutex_unlock(&info->read_mutex);
+	/*
+	 * XXX: should we check GPIO-line status directly?
+	 * return pdata->irq_status() ? PN544_INT : PN544_NONE;
+	 */
+
+	return irq;
+}
+
+static ssize_t pn544_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *offset)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	enum pn544_irq irq;
+	size_t len;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__,
+		info, count);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	irq = pn544_irq_state(info);
+	if (irq == PN544_NONE) {
+		if (file->f_flags & O_NONBLOCK) {
+			r = -EAGAIN;
+			goto out;
+		}
+
+		if (wait_event_interruptible(info->read_wait,
+					     (info->read_irq == PN544_INT))) {
+			r = -ERESTARTSYS;
+			goto out;
+		}
+	}
+
+	if (info->state == PN544_ST_FW_READY) {
+		len = min(count, info->buflen);
+
+		mutex_lock(&info->read_mutex);
+		r = pn544_fw_read(info->i2c_dev, info->buf, len);
+		info->read_irq = PN544_NONE;
+		mutex_unlock(&info->read_mutex);
+
+		if (r < 0) {
+			dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r);
+			goto out;
+		}
+
+		print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, r, false);
+
+		*offset += r;
+		if (copy_to_user(buf, info->buf, r)) {
+			r = -EFAULT;
+			goto out;
+		}
+	} else {
+		len = min(count, info->buflen);
+
+		mutex_lock(&info->read_mutex);
+		r = pn544_i2c_read(info->i2c_dev, info->buf, len);
+		info->read_irq = PN544_NONE;
+		mutex_unlock(&info->read_mutex);
+
+		if (r < 0) {
+			dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r);
+			goto out;
+		}
+		print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, r, false);
+
+		*offset += r;
+		if (copy_to_user(buf, info->buf, r)) {
+			r = -EFAULT;
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static unsigned int pn544_poll(struct file *file, poll_table *wait)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p\n", __func__, info);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	poll_wait(file, &info->read_wait, wait);
+
+	if (pn544_irq_state(info) == PN544_INT) {
+		r = POLLIN | POLLRDNORM;
+		goto out;
+	}
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static ssize_t pn544_write(struct file *file, const char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	ssize_t	len;
+	int r;
+
+	dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__,
+		info, count);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	/*
+	 * XXX: should we detect rset-writes and clean possible
+	 * read_irq state
+	 */
+	if (info->state == PN544_ST_FW_READY) {
+		size_t fw_len;
+
+		if (count < PN544_FW_HEADER_SIZE) {
+			r = -EINVAL;
+			goto out;
+		}
+
+		len = min(count, info->buflen);
+		if (copy_from_user(info->buf, buf, len)) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, len, false);
+
+		fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) +
+			info->buf[2];
+
+		if (len > fw_len) /* 1 msg at a time */
+			len = fw_len;
+
+		r = pn544_fw_write(info->i2c_dev, info->buf, len);
+	} else {
+		if (count < PN544_LLC_MIN_SIZE) {
+			r = -EINVAL;
+			goto out;
+		}
+
+		len = min(count, info->buflen);
+		if (copy_from_user(info->buf, buf, len)) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, len, false);
+
+		if (len > (info->buf[0] + 1)) /* 1 msg at a time */
+			len  = info->buf[0] + 1;
+
+		r = pn544_i2c_write(info->i2c_dev, info->buf, len);
+	}
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+
+}
+
+static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	struct pn544_nfc_platform_data *pdata;
+	unsigned int val;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	pdata = info->i2c_dev->dev.platform_data;
+	switch (cmd) {
+	case PN544_GET_FW_MODE:
+		dev_dbg(&client->dev, "%s:  PN544_GET_FW_MODE\n", __func__);
+
+		val = (info->state == PN544_ST_FW_READY);
+		if (copy_to_user((void __user *)arg, &val, sizeof(val))) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		break;
+
+	case PN544_SET_FW_MODE:
+		dev_dbg(&client->dev, "%s:  PN544_SET_FW_MODE\n", __func__);
+
+		if (copy_from_user(&val, (void __user *)arg, sizeof(val))) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		if (val) {
+			if (info->state == PN544_ST_FW_READY)
+				break;
+
+			pn544_disable(info);
+			r = pn544_enable(info, FW_MODE);
+			if (r < 0)
+				goto out;
+		} else {
+			if (info->state == PN544_ST_READY)
+				break;
+			pn544_disable(info);
+			r = pn544_enable(info, HCI_MODE);
+			if (r < 0)
+				goto out;
+		}
+		file->f_pos = info->read_offset;
+		break;
+
+	case TCGETS:
+		dev_dbg(&client->dev, "%s:  TCGETS\n", __func__);
+
+		r = -ENOIOCTLCMD;
+		break;
+
+	default:
+		dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd);
+		r = -ENOIOCTLCMD;
+		break;
+	}
+
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static int pn544_open(struct inode *inode, struct file *file)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
+		info, info->i2c_dev);
+
+	mutex_lock(&info->mutex);
+
+	/*
+	 * Only 1 at a time.
+	 * XXX: maybe user (counter) would work better
+	 */
+	if (info->state != PN544_ST_COLD) {
+		r = -EBUSY;
+		goto out;
+	}
+
+	file->f_pos = info->read_offset;
+	r = pn544_enable(info, HCI_MODE);
+
+out:
+	mutex_unlock(&info->mutex);
+	return r;
+}
+
+static int pn544_close(struct inode *inode, struct file *file)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+
+	dev_dbg(&client->dev, "%s: info: %p, client %p\n",
+		__func__, info, info->i2c_dev);
+
+	mutex_lock(&info->mutex);
+	pn544_disable(info);
+	mutex_unlock(&info->mutex);
+
+	return 0;
+}
+
+static const struct file_operations pn544_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.read		= pn544_read,
+	.write		= pn544_write,
+	.poll		= pn544_poll,
+	.open		= pn544_open,
+	.release	= pn544_close,
+	.unlocked_ioctl	= pn544_ioctl,
+};
+
+#ifdef CONFIG_PM
+static int pn544_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct pn544_info *info;
+	int r = 0;
+
+	dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client);
+
+	info = i2c_get_clientdata(client);
+	dev_info(&client->dev, "%s: info: %p, client %p\n", __func__,
+		 info, client);
+
+	mutex_lock(&info->mutex);
+
+	switch (info->state) {
+	case PN544_ST_FW_READY:
+		/* Do not suspend while upgrading FW, please! */
+		r = -EPERM;
+		break;
+
+	case PN544_ST_READY:
+		/*
+		 * CHECK: Device should be in standby-mode. No way to check?
+		 * Allowing low power mode for the regulator is potentially
+		 * dangerous if pn544 does not go to suspension.
+		 */
+		break;
+
+	case PN544_ST_COLD:
+		break;
+	};
+
+	mutex_unlock(&info->mutex);
+	return r;
+}
+
+static int pn544_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct pn544_info *info = i2c_get_clientdata(client);
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
+		info, client);
+
+	mutex_lock(&info->mutex);
+
+	switch (info->state) {
+	case PN544_ST_READY:
+		/*
+		 * CHECK: If regulator low power mode is allowed in
+		 * pn544_suspend, we should go back to normal mode
+		 * here.
+		 */
+		break;
+
+	case PN544_ST_COLD:
+		break;
+
+	case PN544_ST_FW_READY:
+		break;
+	};
+
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume);
+#endif
+
+static struct device_attribute pn544_attr =
+	__ATTR(nfc_test, S_IRUGO, pn544_test, NULL);
+
+static int __devinit pn544_probe(struct i2c_client *client,
+				 const struct i2c_device_id *id)
+{
+	struct pn544_info *info;
+	struct pn544_nfc_platform_data *pdata;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+	dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+	/* private data allocation */
+	info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL);
+	if (!info) {
+		dev_err(&client->dev,
+			"Cannot allocate memory for pn544_info.\n");
+		r = -ENOMEM;
+		goto err_info_alloc;
+	}
+
+	info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER);
+	info->buf = kzalloc(info->buflen, GFP_KERNEL);
+	if (!info->buf) {
+		dev_err(&client->dev,
+			"Cannot allocate memory for pn544_info->buf.\n");
+		r = -ENOMEM;
+		goto err_buf_alloc;
+	}
+
+	info->regs[0].supply = reg_vdd_io;
+	info->regs[1].supply = reg_vbat;
+	info->regs[2].supply = reg_vsim;
+	r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
+				 info->regs);
+	if (r < 0)
+		goto err_kmalloc;
+
+	info->i2c_dev = client;
+	info->state = PN544_ST_COLD;
+	info->read_irq = PN544_NONE;
+	mutex_init(&info->read_mutex);
+	mutex_init(&info->mutex);
+	init_waitqueue_head(&info->read_wait);
+	i2c_set_clientdata(client, info);
+	pdata = client->dev.platform_data;
+	if (!pdata) {
+		dev_err(&client->dev, "No platform data\n");
+		r = -EINVAL;
+		goto err_reg;
+	}
+
+	if (!pdata->request_resources) {
+		dev_err(&client->dev, "request_resources() missing\n");
+		r = -EINVAL;
+		goto err_reg;
+	}
+
+	r = pdata->request_resources(client);
+	if (r) {
+		dev_err(&client->dev, "Cannot get platform resources\n");
+		goto err_reg;
+	}
+
+	r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn,
+				 IRQF_TRIGGER_RISING, PN544_DRIVER_NAME,
+				 info);
+	if (r < 0) {
+		dev_err(&client->dev, "Unable to register IRQ handler\n");
+		goto err_res;
+	}
+
+	/* If we don't have the test we don't need the sysfs file */
+	if (pdata->test) {
+		r = device_create_file(&client->dev, &pn544_attr);
+		if (r) {
+			dev_err(&client->dev,
+				"sysfs registration failed, error %d\n", r);
+			goto err_irq;
+		}
+	}
+
+	info->miscdev.minor = MISC_DYNAMIC_MINOR;
+	info->miscdev.name = PN544_DRIVER_NAME;
+	info->miscdev.fops = &pn544_fops;
+	info->miscdev.parent = &client->dev;
+	r = misc_register(&info->miscdev);
+	if (r < 0) {
+		dev_err(&client->dev, "Device registration failed\n");
+		goto err_sysfs;
+	}
+
+	dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n",
+		__func__, info, pdata, client);
+
+	return 0;
+
+err_sysfs:
+	if (pdata->test)
+		device_remove_file(&client->dev, &pn544_attr);
+err_irq:
+	free_irq(client->irq, info);
+err_res:
+	if (pdata->free_resources)
+		pdata->free_resources();
+err_reg:
+	regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
+err_kmalloc:
+	kfree(info->buf);
+err_buf_alloc:
+	kfree(info);
+err_info_alloc:
+	return r;
+}
+
+static __devexit int pn544_remove(struct i2c_client *client)
+{
+	struct pn544_info *info = i2c_get_clientdata(client);
+	struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	misc_deregister(&info->miscdev);
+	if (pdata->test)
+		device_remove_file(&client->dev, &pn544_attr);
+
+	if (info->state != PN544_ST_COLD) {
+		if (pdata->disable)
+			pdata->disable();
+
+		info->read_irq = PN544_NONE;
+	}
+
+	free_irq(client->irq, info);
+	if (pdata->free_resources)
+		pdata->free_resources();
+
+	regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
+	kfree(info->buf);
+	kfree(info);
+
+	return 0;
+}
+
+static struct i2c_driver pn544_driver = {
+	.driver = {
+		.name = PN544_DRIVER_NAME,
+#ifdef CONFIG_PM
+		.pm = &pn544_pm_ops,
+#endif
+	},
+	.probe = pn544_probe,
+	.id_table = pn544_id_table,
+	.remove = __devexit_p(pn544_remove),
+};
+
+static int __init pn544_init(void)
+{
+	int r;
+
+	pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+	r = i2c_add_driver(&pn544_driver);
+	if (r) {
+		pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+static void __exit pn544_exit(void)
+{
+	i2c_del_driver(&pn544_driver);
+	pr_info(DRIVER_DESC ", Exiting.\n");
+}
+
+module_init(pn544_init);
+module_exit(pn544_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c787c3d..af824e7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -692,12 +692,6 @@
 	return 1;
 }
 
-static void *__init early_device_tree_alloc(u64 size, u64 align)
-{
-	unsigned long mem = early_init_dt_alloc_memory_arch(size, align);
-	return __va(mem);
-}
-
 /**
  * unflatten_device_tree - create tree of device_nodes from flat blob
  *
@@ -709,7 +703,7 @@
 void __init unflatten_device_tree(void)
 {
 	__unflatten_device_tree(initial_boot_params, &allnodes,
-				early_device_tree_alloc);
+				early_init_dt_alloc_memory_arch);
 
 	/* Get pointer to OF "/chosen" node for use everywhere */
 	of_chosen = of_find_node_by_path("/chosen");
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 28295d0..4d87b5d 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -36,19 +36,55 @@
 	(p)->unique_id = of_pdt_unique_id++; \
 } while (0)
 
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
 {
-	return dp->path_component_name;
+	int len, ourlen, plen;
+	char *n;
+
+	dp->path_component_name = build_path_component(dp);
+
+	plen = strlen(dp->parent->full_name);
+	ourlen = strlen(dp->path_component_name);
+	len = ourlen + plen + 2;
+
+	n = prom_early_alloc(len);
+	strcpy(n, dp->parent->full_name);
+	if (!of_node_is_root(dp->parent)) {
+		strcpy(n + plen, "/");
+		plen++;
+	}
+	strcpy(n + plen, dp->path_component_name);
+
+	return n;
 }
 
-#else
+#else /* CONFIG_SPARC */
 
 static inline void of_pdt_incr_unique_id(void *p) { }
 static inline void irq_trans_init(struct device_node *dp) { }
 
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
 {
-	return dp->name;
+	static int failsafe_id = 0; /* for generating unique names on failure */
+	char *buf;
+	int len;
+
+	if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
+		goto failsafe;
+
+	buf = prom_early_alloc(len + 1);
+	if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
+		goto failsafe;
+	return buf;
+
+ failsafe:
+	buf = prom_early_alloc(strlen(dp->parent->full_name) +
+			       strlen(dp->name) + 16);
+	sprintf(buf, "%s/%s@unknown%i",
+		of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
+		dp->name, failsafe_id++);
+	pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
+	return buf;
 }
 
 #endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@
 	return buf;
 }
 
-static char * __init of_pdt_try_pkg2path(phandle node)
-{
-	char *res, *buf = NULL;
-	int len;
-
-	if (!of_pdt_prom_ops->pkg2path)
-		return NULL;
-
-	if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
-		return NULL;
-	buf = prom_early_alloc(len + 1);
-	if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
-		pr_err("%s: package-to-path failed\n", __func__);
-		return NULL;
-	}
-
-	res = strrchr(buf, '/');
-	if (!res) {
-		pr_err("%s: couldn't find / in %s\n", __func__, buf);
-		return NULL;
-	}
-	return res+1;
-}
-
-/*
- * When fetching the node's name, first try using package-to-path; if
- * that fails (either because the arch hasn't supplied a PROM callback,
- * or some other random failure), fall back to just looking at the node's
- * 'name' property.
- */
-static char * __init of_pdt_build_name(phandle node)
-{
-	char *buf;
-
-	buf = of_pdt_try_pkg2path(node);
-	if (!buf)
-		buf = of_pdt_get_one_property(node, "name");
-
-	return buf;
-}
-
 static struct device_node * __init of_pdt_create_node(phandle node,
 						    struct device_node *parent)
 {
@@ -187,7 +182,7 @@
 
 	kref_init(&dp->kref);
 
-	dp->name = of_pdt_build_name(node);
+	dp->name = of_pdt_get_one_property(node, "name");
 	dp->type = of_pdt_get_one_property(node, "device_type");
 	dp->phandle = node;
 
@@ -198,26 +193,6 @@
 	return dp;
 }
 
-static char * __init of_pdt_build_full_name(struct device_node *dp)
-{
-	int len, ourlen, plen;
-	char *n;
-
-	plen = strlen(dp->parent->full_name);
-	ourlen = strlen(of_pdt_node_name(dp));
-	len = ourlen + plen + 2;
-
-	n = prom_early_alloc(len);
-	strcpy(n, dp->parent->full_name);
-	if (!of_node_is_root(dp->parent)) {
-		strcpy(n + plen, "/");
-		plen++;
-	}
-	strcpy(n + plen, of_pdt_node_name(dp));
-
-	return n;
-}
-
 static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
 						   phandle node,
 						   struct device_node ***nextp)
@@ -240,9 +215,6 @@
 		*(*nextp) = dp;
 		*nextp = &dp->allnext;
 
-#if defined(CONFIG_SPARC)
-		dp->path_component_name = build_path_component(dp);
-#endif
 		dp->full_name = of_pdt_build_full_name(dp);
 
 		dp->child = of_pdt_build_tree(dp,
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index a2d9d1e..a848e02 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -678,7 +678,7 @@
 
 	/* Make sure we haven't left any pointers around in the wait
 	 * list. */
-	spin_lock (&port->waitlist_lock);
+	spin_lock_irq(&port->waitlist_lock);
 	if (dev->waitprev || dev->waitnext || port->waithead == dev) {
 		if (dev->waitprev)
 			dev->waitprev->waitnext = dev->waitnext;
@@ -689,7 +689,7 @@
 		else
 			port->waittail = dev->waitprev;
 	}
-	spin_unlock (&port->waitlist_lock);
+	spin_unlock_irq(&port->waitlist_lock);
 
 	kfree(dev->state);
 	kfree(dev);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 5b1630e..a9523fd 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -45,6 +45,7 @@
         depends on PCI && X86 && XEN
         select HOTPLUG
         select PCI_XEN
+	select XEN_XENBUS_FRONTEND
         default y
         help
           The PCI device frontend driver allows the kernel to import arbitrary
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7c24dce..44b0aee 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -168,8 +168,9 @@
 	u32 mask_bits = desc->masked;
 	unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
 						PCI_MSIX_ENTRY_VECTOR_CTRL;
-	mask_bits &= ~1;
-	mask_bits |= flag;
+	mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+	if (flag)
+		mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
 	writel(mask_bits, desc->mask_base + offset);
 
 	return mask_bits;
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index feff3be..65c42f8 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,12 +6,6 @@
 #ifndef MSI_H
 #define MSI_H
 
-#define PCI_MSIX_ENTRY_SIZE		16
-#define  PCI_MSIX_ENTRY_LOWER_ADDR	0
-#define  PCI_MSIX_ENTRY_UPPER_ADDR	4
-#define  PCI_MSIX_ENTRY_DATA		8
-#define  PCI_MSIX_ENTRY_VECTOR_CTRL	12
-
 #define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
 #define msi_lower_address_reg(base)	(base + PCI_MSI_ADDRESS_LO)
 #define msi_upper_address_reg(base)	(base + PCI_MSI_ADDRESS_HI)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 24e19c5..6fe0772 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -46,9 +46,9 @@
 	struct pci_dev *pci_dev = context;
 
 	if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
+		pci_wakeup_event(pci_dev);
 		pci_check_pme_status(pci_dev);
 		pm_runtime_resume(&pci_dev->dev);
-		pci_wakeup_event(pci_dev);
 		if (pci_dev->subordinate)
 			pci_pme_wakeup_bus(pci_dev->subordinate);
 	}
@@ -399,6 +399,7 @@
 
 	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
 		printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
+		pcie_clear_aspm();
 		pcie_no_aspm();
 	}
 
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8a6f797..88246dd 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -338,7 +338,7 @@
 }
 
 /**
- * __pci_device_probe()
+ * __pci_device_probe - check if a driver wants to claim a specific PCI device
  * @drv: driver to call to check if it wants the PCI device
  * @pci_dev: PCI device being probed
  * 
@@ -449,7 +449,8 @@
 			return error;
 	}
 
-	return pci_restore_state(pci_dev);
+	pci_restore_state(pci_dev);
+	return 0;
 }
 
 static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index f7b68ca..775e933 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -47,6 +47,10 @@
 	if (rc)
 		return rc;
 
+	/* no ids passed actually */
+	if (ids[0] == '\0')
+		return 0;
+
 	/* add ids specified in the module parameter */
 	p = ids;
 	while ((id = strsep(&p, ","))) {
@@ -54,6 +58,9 @@
 			subdevice = PCI_ANY_ID, class=0, class_mask=0;
 		int fields;
 
+		if (!strlen(id))
+			continue;
+
 		fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
 				&vendor, &device, &subvendor, &subdevice,
 				&class, &class_mask);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 63d5042..ea25e5b 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -23,6 +23,7 @@
 #include <linux/mm.h>
 #include <linux/fs.h>
 #include <linux/capability.h>
+#include <linux/security.h>
 #include <linux/pci-aspm.h>
 #include <linux/slab.h>
 #include "pci.h"
@@ -368,7 +369,7 @@
 	u8 *data = (u8*) buf;
 
 	/* Several chips lock up trying to read undefined config space */
-	if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
+	if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) {
 		size = dev->cfg_size;
 	} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
 		size = 128;
@@ -1149,7 +1150,7 @@
 		sysfs_bin_attr_init(attr);
 		attr->size = rom_size;
 		attr->attr.name = "rom";
-		attr->attr.mode = S_IRUSR;
+		attr->attr.mode = S_IRUSR | S_IWUSR;
 		attr->read = pci_read_rom;
 		attr->write = pci_write_rom;
 		retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 710c8a2..b714d78 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -937,14 +937,13 @@
  * pci_restore_state - Restore the saved state of a PCI device
  * @dev: - PCI device that we're dealing with
  */
-int 
-pci_restore_state(struct pci_dev *dev)
+void pci_restore_state(struct pci_dev *dev)
 {
 	int i;
 	u32 val;
 
 	if (!dev->state_saved)
-		return 0;
+		return;
 
 	/* PCI Express register must be restored first */
 	pci_restore_pcie_state(dev);
@@ -968,8 +967,6 @@
 	pci_restore_iov_state(dev);
 
 	dev->state_saved = false;
-
-	return 0;
 }
 
 static int do_pci_enable_device(struct pci_dev *dev, int bars)
@@ -1300,22 +1297,6 @@
 	return ret;
 }
 
-/*
- * Time to wait before the system can be put into a sleep state after reporting
- * a wakeup event signaled by a PCI device.
- */
-#define PCI_WAKEUP_COOLDOWN	100
-
-/**
- * pci_wakeup_event - Report a wakeup event related to a given PCI device.
- * @dev: Device to report the wakeup event for.
- */
-void pci_wakeup_event(struct pci_dev *dev)
-{
-	if (device_may_wakeup(&dev->dev))
-		pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN);
-}
-
 /**
  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
  * @dev: Device to handle.
@@ -1327,8 +1308,8 @@
 static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
 {
 	if (pci_check_pme_status(dev)) {
-		pm_request_resume(&dev->dev);
 		pci_wakeup_event(dev);
+		pm_request_resume(&dev->dev);
 	}
 	return 0;
 }
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7d33f66..f69d6e0 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -74,6 +74,12 @@
 extern void platform_pci_wakeup_init(struct pci_dev *dev);
 extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
 
+static inline void pci_wakeup_event(struct pci_dev *dev)
+{
+	/* Wait 100 ms before the system can be put into a sleep state. */
+	pm_wakeup_event(&dev->dev, 100);
+}
+
 static inline bool pci_is_bridge(struct pci_dev *pci_dev)
 {
 	return !!(pci_dev->subordinate);
@@ -140,14 +146,6 @@
 static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
 #endif
 
-#ifdef CONFIG_PCIEAER
-void pci_no_aer(void);
-bool pci_aer_available(void);
-#else
-static inline void pci_no_aer(void) { }
-static inline bool pci_aer_available(void) { return false; }
-#endif
-
 static inline int pci_no_d1d2(struct pci_dev *dev)
 {
 	unsigned int parent_dstates = 0;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index dda7098..dc29348 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -31,7 +31,7 @@
 # PCI Express ASPM
 #
 config PCIEASPM
-	bool "PCI Express ASPM control" if EMBEDDED
+	bool "PCI Express ASPM control" if EXPERT
 	depends on PCI && PCIEPORTBUS
 	default y
 	help
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 2b2b650..58ad791 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -17,6 +17,7 @@
 
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pci-acpi.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 9656e30..80c11d1 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -132,7 +132,6 @@
 
 #ifdef CONFIG_ACPI_APEI
 extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
-extern bool aer_acpi_firmware_first(void);
 #else
 static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
 {
@@ -140,8 +139,6 @@
 		return pci_dev->__aer_firmware_first;
 	return 0;
 }
-
-static inline bool aer_acpi_firmware_first(void) { return false; }
 #endif
 
 static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 7122281..3188cd9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -68,7 +68,7 @@
 	struct aspm_latency acceptable[8];
 };
 
-static int aspm_disabled, aspm_force;
+static int aspm_disabled, aspm_force, aspm_clear_state;
 static DEFINE_MUTEX(aspm_lock);
 static LIST_HEAD(link_list);
 
@@ -139,7 +139,7 @@
 {
 	/* Don't enable Clock PM if the link is not Clock PM capable */
 	if (!link->clkpm_capable && enable)
-		return;
+		enable = 0;
 	/* Need nothing if the specified equals to current state */
 	if (link->clkpm_enabled == enable)
 		return;
@@ -498,6 +498,10 @@
 	struct pci_dev *child;
 	int pos;
 	u32 reg32;
+
+	if (aspm_clear_state)
+		return -EINVAL;
+
 	/*
 	 * Some functions in a slot might not all be PCIe functions,
 	 * very strange. Disable ASPM for the whole slot
@@ -563,12 +567,15 @@
 	struct pcie_link_state *link;
 	int blacklist = !!pcie_aspm_sanity_check(pdev);
 
-	if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state)
+	if (!pci_is_pcie(pdev) || pdev->link_state)
 		return;
 	if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
 	    pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
 		return;
 
+	if (aspm_disabled && !aspm_clear_state)
+		return;
+
 	/* VIA has a strange chipset, root port is under a bridge */
 	if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
 	    pdev->bus->self)
@@ -641,7 +648,7 @@
 	struct pci_dev *parent = pdev->bus->self;
 	struct pcie_link_state *link, *root, *parent_link;
 
-	if (aspm_disabled || !pci_is_pcie(pdev) ||
+	if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
 	    !parent || !parent->link_state)
 		return;
 	if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
@@ -899,6 +906,12 @@
 
 __setup("pcie_aspm=", pcie_aspm_disable);
 
+void pcie_clear_aspm(void)
+{
+	if (!aspm_force)
+		aspm_clear_state = 1;
+}
+
 void pcie_no_aspm(void)
 {
 	if (!aspm_force)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 2f3c904..0057344 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -26,9 +26,6 @@
 #include "../pci.h"
 #include "portdrv.h"
 
-#define PCI_EXP_RTSTA_PME	0x10000 /* PME status */
-#define PCI_EXP_RTSTA_PENDING	0x20000 /* PME pending */
-
 /*
  * If this switch is set, MSI will not be used for PCIe PME signaling.  This
  * causes the PCIe port driver to use INTx interrupts only, but it turns out
@@ -74,22 +71,6 @@
 }
 
 /**
- * pcie_pme_clear_status - Clear root port PME interrupt status.
- * @dev: PCIe root port or event collector.
- */
-static void pcie_pme_clear_status(struct pci_dev *dev)
-{
-	int rtsta_pos;
-	u32 rtsta;
-
-	rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
-
-	pci_read_config_dword(dev, rtsta_pos, &rtsta);
-	rtsta |= PCI_EXP_RTSTA_PME;
-	pci_write_config_dword(dev, rtsta_pos, rtsta);
-}
-
-/**
  * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
  * @bus: PCI bus to scan.
  *
@@ -103,8 +84,8 @@
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		/* Skip PCIe devices in case we started from a root port. */
 		if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
-			pm_request_resume(&dev->dev);
 			pci_wakeup_event(dev);
+			pm_request_resume(&dev->dev);
 			ret = true;
 		}
 
@@ -206,8 +187,8 @@
 		/* The device is there, but we have to check its PME status. */
 		found = pci_check_pme_status(dev);
 		if (found) {
-			pm_request_resume(&dev->dev);
 			pci_wakeup_event(dev);
+			pm_request_resume(&dev->dev);
 		}
 		pci_dev_put(dev);
 	} else if (devfn) {
@@ -253,7 +234,7 @@
 			 * Clear PME status of the port.  If there are other
 			 * pending PMEs, the status will be set again.
 			 */
-			pcie_pme_clear_status(port);
+			pcie_clear_root_pme_status(port);
 
 			spin_unlock_irq(&data->lock);
 			pcie_pme_handle_request(port, rtsta & 0xffff);
@@ -378,7 +359,7 @@
 
 	port = srv->port;
 	pcie_pme_interrupt_enable(port, false);
-	pcie_pme_clear_status(port);
+	pcie_clear_root_pme_status(port);
 
 	ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
 	if (ret) {
@@ -402,7 +383,7 @@
 
 	spin_lock_irq(&data->lock);
 	pcie_pme_interrupt_enable(port, false);
-	pcie_pme_clear_status(port);
+	pcie_clear_root_pme_status(port);
 	data->noirq = true;
 	spin_unlock_irq(&data->lock);
 
@@ -422,7 +403,7 @@
 
 	spin_lock_irq(&data->lock);
 	data->noirq = false;
-	pcie_pme_clear_status(port);
+	pcie_clear_root_pme_status(port);
 	pcie_pme_interrupt_enable(port, true);
 	spin_unlock_irq(&data->lock);
 
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 7b5aba0..bd00a01 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,9 +20,6 @@
 
 #define get_descriptor_id(type, service) (((type - 4) << 4) | service)
 
-extern bool pcie_ports_disabled;
-extern bool pcie_ports_auto;
-
 extern struct bus_type pcie_port_bus_type;
 extern int pcie_port_device_register(struct pci_dev *dev);
 #ifdef CONFIG_PM
@@ -35,6 +32,8 @@
 
 struct pci_dev;
 
+extern void pcie_clear_root_pme_status(struct pci_dev *dev);
+
 #ifdef CONFIG_PCIE_PME
 extern bool pcie_pme_msi_disabled;
 
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index 5982b6a..a86b56e 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -33,7 +33,7 @@
  */
 int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
 {
-	acpi_status status;
+	struct acpi_pci_root *root;
 	acpi_handle handle;
 	u32 flags;
 
@@ -44,26 +44,11 @@
 	if (!handle)
 		return -EINVAL;
 
-	flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
-		| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
-		| OSC_PCI_EXPRESS_PME_CONTROL;
-
-	if (pci_aer_available()) {
-		if (aer_acpi_firmware_first())
-			dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
-		else
-			flags |= OSC_PCI_EXPRESS_AER_CONTROL;
-	}
-
-	status = acpi_pci_osc_control_set(handle, &flags,
-					OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-	if (ACPI_FAILURE(status)) {
-		dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
-			status);
+	root = acpi_pci_find_root(handle);
+	if (!root)
 		return -ENODEV;
-	}
 
-	dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
+	flags = root->osc_control_set;
 
 	*srv_mask = PCIE_PORT_SERVICE_VC;
 	if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index a9c222d..5130d0d 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -241,17 +241,17 @@
 	int cap_mask;
 	int err;
 
+	if (pcie_ports_disabled)
+		return 0;
+
 	err = pcie_port_platform_notify(dev, &cap_mask);
-	if (pcie_ports_auto) {
-		if (err) {
-			pcie_no_aspm();
-			return 0;
-		}
-	} else {
+	if (!pcie_ports_auto) {
 		cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
 				| PCIE_PORT_SERVICE_VC;
 		if (pci_aer_available())
 			cap_mask |= PCIE_PORT_SERVICE_AER;
+	} else if (err) {
+			return 0;
 	}
 
 	pos = pci_pcie_cap(dev);
@@ -349,15 +349,18 @@
 	int status, capabilities, i, nr_service;
 	int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
 
-	/* Get and check PCI Express port services */
-	capabilities = get_port_device_capability(dev);
-	if (!capabilities)
-		return -ENODEV;
-
 	/* Enable PCI Express port device */
 	status = pci_enable_device(dev);
 	if (status)
 		return status;
+
+	/* Get and check PCI Express port services */
+	capabilities = get_port_device_capability(dev);
+	if (!capabilities) {
+		pcie_no_aspm();
+		return 0;
+	}
+
 	pci_set_master(dev);
 	/*
 	 * Initialize service irqs. Don't use service devices that
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f9033e1..e0610bd 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -57,6 +57,22 @@
 
 /* global data */
 
+/**
+ * pcie_clear_root_pme_status - Clear root port PME interrupt status.
+ * @dev: PCIe root port or event collector.
+ */
+void pcie_clear_root_pme_status(struct pci_dev *dev)
+{
+	int rtsta_pos;
+	u32 rtsta;
+
+	rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
+
+	pci_read_config_dword(dev, rtsta_pos, &rtsta);
+	rtsta |= PCI_EXP_RTSTA_PME;
+	pci_write_config_dword(dev, rtsta_pos, rtsta);
+}
+
 static int pcie_portdrv_restore_config(struct pci_dev *dev)
 {
 	int retval;
@@ -69,6 +85,20 @@
 }
 
 #ifdef CONFIG_PM
+static int pcie_port_resume_noirq(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	/*
+	 * Some BIOSes forget to clear Root PME Status bits after system wakeup
+	 * which breaks ACPI-based runtime wakeup on PCI Express, so clear those
+	 * bits now just in case (shouldn't hurt).
+	 */
+	if(pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+		pcie_clear_root_pme_status(pdev);
+	return 0;
+}
+
 static const struct dev_pm_ops pcie_portdrv_pm_ops = {
 	.suspend	= pcie_port_device_suspend,
 	.resume		= pcie_port_device_resume,
@@ -76,6 +106,7 @@
 	.thaw		= pcie_port_device_resume,
 	.poweroff	= pcie_port_device_suspend,
 	.restore	= pcie_port_device_resume,
+	.resume_noirq	= pcie_port_resume_noirq,
 };
 
 #define PCIE_PORTDRV_PM_OPS	(&pcie_portdrv_pm_ops)
@@ -327,10 +358,8 @@
 {
 	int retval;
 
-	if (pcie_ports_disabled) {
-		pcie_no_aspm();
-		return -EACCES;
-	}
+	if (pcie_ports_disabled)
+		return pci_register_driver(&pcie_portdriver);
 
 	dmi_check_system(pcie_portdrv_dmi_table);
 
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index de886f3..6e318ce 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,7 +69,7 @@
 config YENTA
 	tristate "CardBus yenta-compatible bridge support"
 	depends on PCI
-	select CARDBUS if !EMBEDDED
+	select CARDBUS if !EXPERT
 	select PCCARD_NONSTATIC if PCMCIA != n
 	---help---
 	  This option enables support for CardBus host bridges.  Virtually
@@ -84,27 +84,27 @@
 
 config YENTA_O2
 	default y
-	bool "Special initialization for O2Micro bridges" if EMBEDDED
+	bool "Special initialization for O2Micro bridges" if EXPERT
 	depends on YENTA
 
 config YENTA_RICOH
 	default y
-	bool "Special initialization for Ricoh bridges" if EMBEDDED
+	bool "Special initialization for Ricoh bridges" if EXPERT
 	depends on YENTA
 
 config YENTA_TI
 	default y
-	bool "Special initialization for TI and EnE bridges" if EMBEDDED
+	bool "Special initialization for TI and EnE bridges" if EXPERT
 	depends on YENTA
 
 config YENTA_ENE_TUNE
 	default y
-	bool "Auto-tune EnE bridges for CB cards" if EMBEDDED
+	bool "Auto-tune EnE bridges for CB cards" if EXPERT
 	depends on YENTA_TI && CARDBUS
 
 config YENTA_TOSHIBA
 	default y
-	bool "Special initialization for Toshiba ToPIC bridges" if EMBEDDED
+	bool "Special initialization for Toshiba ToPIC bridges" if EXPERT
 	depends on YENTA
 
 config PD6729
diff --git a/drivers/pcmcia/m32r_cfc.h b/drivers/pcmcia/m32r_cfc.h
index 8146e3b..f558e1a 100644
--- a/drivers/pcmcia/m32r_cfc.h
+++ b/drivers/pcmcia/m32r_cfc.h
@@ -9,7 +9,7 @@
 #endif
 
 /*
- * M32R PC Card Controler
+ * M32R PC Card Controller
  */
 #define M32R_PCC0_BASE        0x00ef7000
 #define M32R_PCC1_BASE        0x00ef7020
diff --git a/drivers/pcmcia/m32r_pcc.h b/drivers/pcmcia/m32r_pcc.h
index e4fffe4..f95c585 100644
--- a/drivers/pcmcia/m32r_pcc.h
+++ b/drivers/pcmcia/m32r_pcc.h
@@ -5,7 +5,7 @@
 #define M32R_MAX_PCC	2
 
 /*
- * M32R PC Card Controler
+ * M32R PC Card Controller
  */
 #define M32R_PCC0_BASE        0x00ef7000
 #define M32R_PCC1_BASE        0x00ef7020
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 99d4f23..0db4827 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1198,7 +1198,7 @@
 	out_be32(M8XX_PGCRX(1),
 		 M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
 
-	/* intialize the fixed memory windows */
+	/* initialize the fixed memory windows */
 
 	for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
 		for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0bdda5b3..42fbf1a 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -518,6 +518,8 @@
 		flags |= CONF_ENABLE_IOCARD;
 	if (flags & CONF_ENABLE_IOCARD)
 		s->socket.flags |= SS_IOCARD;
+	if (flags & CONF_ENABLE_ZVCARD)
+		s->socket.flags |= SS_ZVCARD | SS_IOCARD;
 	if (flags & CONF_ENABLE_SPKR) {
 		s->socket.flags |= SS_SPKR_ENA;
 		status = CCSR_AUDIO_ENA;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3755e7c..2c54054 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -215,7 +215,7 @@
 }
 #endif
 
-static void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev)
 {
 	struct pcmcia_low_level *ops = dev->platform_data;
 	/*
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index bb62ea8..b609b45 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,4 @@
 int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
 void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
+void pxa2xx_configure_sockets(struct device *dev);
 
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
index c3f7219..a520395 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/drivers/pcmcia/pxa2xx_colibri.c
@@ -181,6 +181,9 @@
 {
 	int ret;
 
+	if (!machine_is_colibri() && !machine_is_colibri320())
+		return -ENODEV;
+
 	colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
 	if (!colibri_pcmcia_device)
 		return -ENOMEM;
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index b9f8c8f..25afe63 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -226,6 +226,7 @@
 		lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
 
 		pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+		pxa2xx_configure_sockets(&sadev->dev);
 		ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
 				pxa2xx_drv_pcmcia_add_one);
 	}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d163bc2..a59af5b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -227,7 +227,7 @@
 config IDEAPAD_LAPTOP
 	tristate "Lenovo IdeaPad Laptop Extras"
 	depends on ACPI
-	depends on RFKILL
+	depends on RFKILL && INPUT
 	select INPUT_SPARSEKMAP
 	help
 	  This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ee40d68..38b34a7 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -84,7 +84,7 @@
  */
 #define AMW0_GUID1		"67C3371D-95A3-4C37-BB61-DD47B491DAAB"
 #define AMW0_GUID2		"431F16ED-0C2B-444C-B267-27DEB140CF9C"
-#define WMID_GUID1		"6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
+#define WMID_GUID1		"6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
 #define WMID_GUID2		"95764E09-FB56-4e83-B31A-37761F60994A"
 #define WMID_GUID3		"61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
 
@@ -1021,7 +1021,7 @@
 	return 0;
 }
 
-static struct backlight_ops acer_bl_ops = {
+static const struct backlight_ops acer_bl_ops = {
 	.get_brightness = read_brightness,
 	.update_status = update_bl_status,
 };
@@ -1280,7 +1280,7 @@
 			return -EINVAL;
 	return count;
 }
-static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
+static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
 	set_bool_threeg);
 
 static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d235f44..f3aa6a7 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -640,7 +640,7 @@
 	return asus_lcd_set(asus, value);
 }
 
-static struct backlight_ops asusbl_ops = {
+static const struct backlight_ops asusbl_ops = {
 	.get_brightness = asus_read_brightness,
 	.update_status = update_bl_status,
 };
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index ca05aef..fe49593 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1081,14 +1081,8 @@
 	struct proc_dir_entry *proc;
 	mode_t mode;
 
-	/*
-	 * If parameter uid or gid is not changed, keep the default setting for
-	 * our proc entries (-rw-rw-rw-) else, it means we care about security,
-	 * and then set to -rw-rw----
-	 */
-
 	if ((asus_uid == 0) && (asus_gid == 0)) {
-		mode = S_IFREG | S_IRUGO | S_IWUGO;
+		mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
 	} else {
 		mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
 		printk(KERN_WARNING "  asus_uid and asus_gid parameters are "
@@ -1467,7 +1461,7 @@
 	return 0;
 }
 
-static struct backlight_ops asus_backlight_data = {
+static const struct backlight_ops asus_backlight_data = {
 	.get_brightness = read_brightness,
 	.update_status  = set_brightness_status,
 };
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index cf8a89a..ad24ef3 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -290,9 +290,12 @@
 	dell_send_request(buffer, 17, 11);
 
 	/* If the hardware switch controls this radio, and the hardware
-	   switch is disabled, don't allow changing the software state */
+	   switch is disabled, don't allow changing the software state.
+	   If the hardware switch is reported as not supported, always
+	   fire the SMI to toggle the killswitch. */
 	if ((hwswitch_state & BIT(hwswitch_bit)) &&
-	    !(buffer->output[1] & BIT(16))) {
+	    !(buffer->output[1] & BIT(16)) &&
+	    (buffer->output[1] & BIT(0))) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -398,6 +401,23 @@
 
 static void dell_update_rfkill(struct work_struct *ignored)
 {
+	int status;
+
+	get_buffer();
+	dell_send_request(buffer, 17, 11);
+	status = buffer->output[1];
+	release_buffer();
+
+	/* if hardware rfkill is not supported, set it explicitly */
+	if (!(status & BIT(0))) {
+		if (wifi_rfkill)
+			dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
+		if (bluetooth_rfkill)
+			dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
+		if (wwan_rfkill)
+			dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
+	}
+
 	if (wifi_rfkill)
 		dell_rfkill_query(wifi_rfkill, (void *)1);
 	if (bluetooth_rfkill)
@@ -546,7 +566,7 @@
 	return buffer->output[1];
 }
 
-static struct backlight_ops dell_ops = {
+static const struct backlight_ops dell_ops = {
 	.get_brightness = dell_get_intensity,
 	.update_status  = dell_send_intensity,
 };
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index e9fc530..49d9ad7 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1126,7 +1126,7 @@
 	return set_brightness(bd, bd->props.brightness);
 }
 
-static struct backlight_ops eeepcbl_ops = {
+static const struct backlight_ops eeepcbl_ops = {
 	.get_brightness = read_brightness,
 	.update_status = update_bl_status,
 };
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ad88b2e..95e3b09 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -437,7 +437,7 @@
 	return ret;
 }
 
-static struct backlight_ops fujitsubl_ops = {
+static const struct backlight_ops fujitsubl_ops = {
 	.get_brightness = bl_get_brightness,
 	.update_status = bl_update_status,
 };
@@ -689,7 +689,7 @@
 	if (error)
 		goto err_free_input_dev;
 
-	result = acpi_bus_get_power(fujitsu->acpi_handle, &state);
+	result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
 	if (result) {
 		printk(KERN_ERR "Error reading power state\n");
 		goto err_unregister_input_dev;
@@ -857,7 +857,7 @@
 	if (error)
 		goto err_free_input_dev;
 
-	result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state);
+	result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
 	if (result) {
 		printk(KERN_ERR "Error reading power state\n");
 		goto err_unregister_input_dev;
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 930e627..61433d4 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -60,69 +60,20 @@
 #define GPOSW_DOU 0x08
 #define GPOSW_RDRV 0x30
 
+#define GPIO_UPDATE_TYPE	0x80000000
 
 #define NUM_GPIO 24
 
-struct pmic_gpio_irq {
-	spinlock_t lock;
-	u32 trigger[NUM_GPIO];
-	u32 dirty;
-	struct work_struct work;
-};
-
-
 struct pmic_gpio {
+	struct mutex		buslock;
 	struct gpio_chip	chip;
-	struct pmic_gpio_irq	irqtypes;
 	void			*gpiointr;
 	int			irq;
 	unsigned		irq_base;
+	unsigned int		update_type;
+	u32			trigger_type;
 };
 
-static void pmic_program_irqtype(int gpio, int type)
-{
-	if (type & IRQ_TYPE_EDGE_RISING)
-		intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
-	else
-		intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
-
-	if (type & IRQ_TYPE_EDGE_FALLING)
-		intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
-	else
-		intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
-};
-
-static void pmic_irqtype_work(struct work_struct *work)
-{
-	struct pmic_gpio_irq *t =
-		container_of(work, struct pmic_gpio_irq, work);
-	unsigned long flags;
-	int i;
-	u16 type;
-
-	spin_lock_irqsave(&t->lock, flags);
-	/* As we drop the lock, we may need multiple scans if we race the
-	   pmic_irq_type function */
-	while (t->dirty) {
-		/*
-		 *	For each pin that has the dirty bit set send an IPC
-		 *	message to configure the hardware via the PMIC
-		 */
-		for (i = 0; i < NUM_GPIO; i++) {
-			if (!(t->dirty & (1 << i)))
-				continue;
-			t->dirty &= ~(1 << i);
-			/* We can't trust the array entry or dirty
-			   once the lock is dropped */
-			type = t->trigger[i];
-			spin_unlock_irqrestore(&t->lock, flags);
-			pmic_program_irqtype(i, type);
-			spin_lock_irqsave(&t->lock, flags);
-		}
-	}
-	spin_unlock_irqrestore(&t->lock, flags);
-}
-
 static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
 	if (offset > 8) {
@@ -190,25 +141,24 @@
 			1 << (offset - 16));
 }
 
-static int pmic_irq_type(unsigned irq, unsigned type)
+/*
+ * This is called from genirq with pg->buslock locked and
+ * irq_desc->lock held. We can not access the scu bus here, so we
+ * store the change and update in the bus_sync_unlock() function below
+ */
+static int pmic_irq_type(struct irq_data *data, unsigned type)
 {
-	struct pmic_gpio *pg = get_irq_chip_data(irq);
-	u32 gpio = irq - pg->irq_base;
-	unsigned long flags;
+	struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+	u32 gpio = data->irq - pg->irq_base;
 
 	if (gpio >= pg->chip.ngpio)
 		return -EINVAL;
 
-	spin_lock_irqsave(&pg->irqtypes.lock, flags);
-	pg->irqtypes.trigger[gpio] = type;
-	pg->irqtypes.dirty |=  (1 << gpio);
-	spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
-	schedule_work(&pg->irqtypes.work);
+	pg->trigger_type = type;
+	pg->update_type = gpio | GPIO_UPDATE_TYPE;
 	return 0;
 }
 
-
-
 static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
 	struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
@@ -217,38 +167,32 @@
 }
 
 /* the gpiointr register is read-clear, so just do nothing. */
-static void pmic_irq_unmask(unsigned irq)
-{
-};
+static void pmic_irq_unmask(struct irq_data *data) { }
 
-static void pmic_irq_mask(unsigned irq)
-{
-};
+static void pmic_irq_mask(struct irq_data *data) { }
 
 static struct irq_chip pmic_irqchip = {
 	.name		= "PMIC-GPIO",
-	.mask		= pmic_irq_mask,
-	.unmask		= pmic_irq_unmask,
-	.set_type	= pmic_irq_type,
+	.irq_mask	= pmic_irq_mask,
+	.irq_unmask	= pmic_irq_unmask,
+	.irq_set_type	= pmic_irq_type,
 };
 
-static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
+static irqreturn_t pmic_irq_handler(int irq, void *data)
 {
-	struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
+	struct pmic_gpio *pg = data;
 	u8 intsts = *((u8 *)pg->gpiointr + 4);
 	int gpio;
+	irqreturn_t ret = IRQ_NONE;
 
 	for (gpio = 0; gpio < 8; gpio++) {
 		if (intsts & (1 << gpio)) {
 			pr_debug("pmic pin %d triggered\n", gpio);
 			generic_handle_irq(pg->irq_base + gpio);
+			ret = IRQ_HANDLED;
 		}
 	}
-
-	if (desc->chip->irq_eoi)
-		desc->chip->irq_eoi(irq_get_irq_data(irq));
-	else
-		dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq);
+	return ret;
 }
 
 static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
@@ -297,8 +241,7 @@
 	pg->chip.can_sleep = 1;
 	pg->chip.dev = dev;
 
-	INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work);
-	spin_lock_init(&pg->irqtypes.lock);
+	mutex_init(&pg->buslock);
 
 	pg->chip.dev = dev;
 	retval = gpiochip_add(&pg->chip);
@@ -306,8 +249,13 @@
 		printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
 		goto err;
 	}
-	set_irq_data(pg->irq, pg);
-	set_irq_chained_handler(pg->irq, pmic_irq_handler);
+
+	retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
+	if (retval) {
+		printk(KERN_WARNING "pmic: Interrupt request failed\n");
+		goto err;
+	}
+
 	for (i = 0; i < 8; i++) {
 		set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
 					handle_simple_irq, "demux");
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 1752ef0..a91d510 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -26,7 +26,6 @@
 #include <linux/sfi.h>
 #include <asm/mrst.h>
 #include <asm/intel_scu_ipc.h>
-#include <asm/mrst.h>
 
 /* IPC defines the following message types */
 #define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
@@ -161,7 +160,7 @@
 {
 	int i, nc, bytes, d;
 	u32 offset = 0;
-	u32 err = 0;
+	int err;
 	u8 cbuf[IPC_WWBUF_SIZE] = { };
 	u32 *wbuf = (u32 *)&cbuf;
 
@@ -404,7 +403,7 @@
  */
 int intel_scu_ipc_simple_command(int cmd, int sub)
 {
-	u32 err = 0;
+	int err;
 
 	mutex_lock(&ipclock);
 	if (ipcdev.pdev == NULL) {
@@ -434,8 +433,7 @@
 int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
 							u32 *out, int outlen)
 {
-	u32 err = 0;
-	int i = 0;
+	int i, err;
 
 	mutex_lock(&ipclock);
 	if (ipcdev.pdev == NULL) {
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index ba3231d..b93a032 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -128,6 +128,6 @@
 module_init(ipc_module_init);
 module_exit(ipc_module_exit);
 
-MODULE_LICENSE("GPL V2");
+MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Utility driver for intel scu ipc");
 MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>");
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index b4a95bb..5e83370 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -858,7 +858,7 @@
 }
 
 static struct backlight_device *sony_backlight_device;
-static struct backlight_ops sony_backlight_ops = {
+static const struct backlight_ops sony_backlight_ops = {
 	.update_status = sony_backlight_update_status,
 	.get_brightness = sony_backlight_get_brightness,
 };
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 1fe0f1f..865ef78 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -162,7 +162,7 @@
 			return -EINVAL; \
 	return count; \
 } \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
 	show_bool_##value, set_bool_##value);
 
 show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a974ca3..eb99223 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2275,16 +2275,12 @@
 	if (keycode != KEY_RESERVED) {
 		mutex_lock(&tpacpi_inputdev_send_mutex);
 
+		input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
 		input_report_key(tpacpi_inputdev, keycode, 1);
-		if (keycode == KEY_UNKNOWN)
-			input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
-				    scancode);
 		input_sync(tpacpi_inputdev);
 
+		input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
 		input_report_key(tpacpi_inputdev, keycode, 0);
-		if (keycode == KEY_UNKNOWN)
-			input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
-				    scancode);
 		input_sync(tpacpi_inputdev);
 
 		mutex_unlock(&tpacpi_inputdev_send_mutex);
@@ -6110,7 +6106,7 @@
 			       BACKLIGHT_UPDATE_HOTKEY);
 }
 
-static struct backlight_ops ibm_backlight_data = {
+static const struct backlight_ops ibm_backlight_data = {
 	.get_brightness = brightness_get,
 	.update_status  = brightness_update_status,
 };
@@ -7194,7 +7190,7 @@
  * 		TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41)
  *
  *	FIRMWARE BUG: on some models, EC 0x2f might not be initialized at
- *	boot. Apparently the EC does not intialize it, so unless ACPI DSDT
+ *	boot. Apparently the EC does not initialize it, so unless ACPI DSDT
  *	does so, its initial value is meaningless (0x07).
  *
  *	For firmware bugs, refer to:
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 4276da7..209cced 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -841,7 +841,7 @@
 	remove_proc_entry("version", toshiba_proc_dir);
 }
 
-static struct backlight_ops toshiba_backlight_data = {
+static const struct backlight_ops toshiba_backlight_data = {
         .get_brightness = get_lcd,
         .update_status  = set_lcd_status,
 };
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index 8de3775..bfba893 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -2,11 +2,13 @@
 # Makefile for the Linux Plug-and-Play Support.
 #
 
-obj-y		:= core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
+obj-y		:= pnp.o
+
+pnp-y		:= core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
 
 obj-$(CONFIG_PNPACPI)		+= pnpacpi/
 obj-$(CONFIG_PNPBIOS)		+= pnpbios/
 obj-$(CONFIG_ISAPNP)		+= isapnp/
 
 # pnp_system_init goes after pnpacpi/pnpbios init
-obj-y				+= system.o
+pnp-y				+= system.o
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 0f34d96..cb6ce42 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -220,10 +220,5 @@
 int pnp_debug;
 
 #if defined(CONFIG_PNP_DEBUG_MESSAGES)
-static int __init pnp_debug_setup(char *__unused)
-{
-	pnp_debug = 1;
-	return 1;
-}
-__setup("pnp.debug", pnp_debug_setup);
+module_param_named(debug, pnp_debug, int, 0644);
 #endif
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index d1dbb9d..00e9403 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -189,8 +189,11 @@
 	if (!pnp_drv)
 		return 0;
 
-	if (pnp_dev->protocol->resume)
-		pnp_dev->protocol->resume(pnp_dev);
+	if (pnp_dev->protocol->resume) {
+		error = pnp_dev->protocol->resume(pnp_dev);
+		if (error)
+			return error;
+	}
 
 	if (pnp_can_write(pnp_dev)) {
 		error = pnp_start_dev(pnp_dev);
diff --git a/drivers/pnp/isapnp/Makefile b/drivers/pnp/isapnp/Makefile
index cac18bb..6e607aa 100644
--- a/drivers/pnp/isapnp/Makefile
+++ b/drivers/pnp/isapnp/Makefile
@@ -1,7 +1,7 @@
 #
 # Makefile for the kernel ISAPNP driver.
 #
+obj-y			+= pnp.o
+pnp-y			:= core.o compat.o
 
-isapnp-proc-$(CONFIG_PROC_FS) = proc.o
-
-obj-y := core.o compat.o $(isapnp-proc-y)
+pnp-$(CONFIG_PROC_FS)	+= proc.o
diff --git a/drivers/pnp/pnpacpi/Makefile b/drivers/pnp/pnpacpi/Makefile
index 905326f..40c93da 100644
--- a/drivers/pnp/pnpacpi/Makefile
+++ b/drivers/pnp/pnpacpi/Makefile
@@ -1,5 +1,6 @@
 #
 # Makefile for the kernel PNPACPI driver.
 #
+obj-y += pnp.o
 
-obj-y := core.o rsparser.o
+pnp-y := core.o rsparser.o
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 57313f4..ca84d50 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -81,12 +81,19 @@
 
 static int pnpacpi_set_resources(struct pnp_dev *dev)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle;
 	struct acpi_buffer buffer;
 	int ret;
 
 	pnp_dbg(&dev->dev, "set resources\n");
+
+	handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return -ENODEV;
+	}
+
 	ret = pnpacpi_build_resource_template(dev, &buffer);
 	if (ret)
 		return ret;
@@ -105,12 +112,18 @@
 
 static int pnpacpi_disable_resources(struct pnp_dev *dev)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle;
 	int ret;
 
 	dev_dbg(&dev->dev, "disable resources\n");
 
+	handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return 0;
+	}
+
 	/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
 	ret = 0;
 	if (acpi_bus_power_manageable(handle))
@@ -124,46 +137,74 @@
 #ifdef CONFIG_ACPI_SLEEP
 static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle;
+
+	handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return false;
+	}
 
 	return acpi_bus_can_wakeup(handle);
 }
 
 static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
-	int power_state;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle;
+	int error = 0;
+
+	handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return 0;
+	}
 
 	if (device_can_wakeup(&dev->dev)) {
-		int rc = acpi_pm_device_sleep_wake(&dev->dev,
+		error = acpi_pm_device_sleep_wake(&dev->dev,
 				device_may_wakeup(&dev->dev));
-
-		if (rc)
-			return rc;
+		if (error)
+			return error;
 	}
-	power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
-	if (power_state < 0)
-		power_state = (state.event == PM_EVENT_ON) ?
-				ACPI_STATE_D0 : ACPI_STATE_D3;
 
-	/* acpi_bus_set_power() often fails (keyboard port can't be
-	 * powered-down?), and in any case, our return value is ignored
-	 * by pnp_bus_suspend().  Hence we don't revert the wakeup
-	 * setting if the set_power fails.
-	 */
-	return acpi_bus_set_power(handle, power_state);
+	if (acpi_bus_power_manageable(handle)) {
+		int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
+
+		if (power_state < 0)
+			power_state = (state.event == PM_EVENT_ON) ?
+					ACPI_STATE_D0 : ACPI_STATE_D3;
+
+		/*
+		 * acpi_bus_set_power() often fails (keyboard port can't be
+		 * powered-down?), and in any case, our return value is ignored
+		 * by pnp_bus_suspend().  Hence we don't revert the wakeup
+		 * setting if the set_power fails.
+		 */
+		error = acpi_bus_set_power(handle, power_state);
+	}
+
+	return error;
 }
 
 static int pnpacpi_resume(struct pnp_dev *dev)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	int error = 0;
+
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return -ENODEV;
+	}
 
 	if (device_may_wakeup(&dev->dev))
 		acpi_pm_device_sleep_wake(&dev->dev, false);
-	return acpi_bus_set_power(handle, ACPI_STATE_D0);
+
+	if (acpi_bus_power_manageable(handle))
+		error = acpi_bus_set_power(handle, ACPI_STATE_D0);
+
+	return error;
 }
 #endif
 
diff --git a/drivers/pnp/pnpbios/Makefile b/drivers/pnp/pnpbios/Makefile
index 3cd3ed7..240b0ff 100644
--- a/drivers/pnp/pnpbios/Makefile
+++ b/drivers/pnp/pnpbios/Makefile
@@ -1,7 +1,8 @@
 #
 # Makefile for the kernel PNPBIOS driver.
 #
+obj-y := pnp.o
 
-pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o
+pnp-y := core.o bioscalls.o rsparser.o
 
-obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y)
+pnp-$(CONFIG_PNPBIOS_PROC_FS) += proc.o
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 60d83d9..61bf5d7 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -136,6 +136,16 @@
 	  in handheld and portable equipment. The MAX17040 is configured
 	  to operate with a single lithium cell
 
+config BATTERY_MAX17042
+	tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
+	depends on I2C
+	help
+	  MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
+	  in handheld and portable equipment. The MAX17042 is configured
+	  to operate with a single lithium cell. MAX8997 and MAX8966 are
+	  multi-function devices that include fuel gauages that are compatible
+	  with MAX17042.
+
 config BATTERY_Z2
 	tristate "Z2 battery driver"
 	depends on I2C && MACH_ZIPIT2
@@ -185,4 +195,14 @@
 	help
 	  Say Y here to enable support for TWL4030 Battery Charge Interface.
 
+config CHARGER_GPIO
+	tristate "GPIO charger"
+	depends on GPIOLIB
+	help
+	  Say Y to include support for chargers which report their online status
+	  through a GPIO pin.
+
+	  This driver can be build as a module. If so, the module will be
+	  called gpio-charger.
+
 endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index c75772e..8385bfa 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -25,6 +25,7 @@
 obj-$(CONFIG_BATTERY_BQ27x00)	+= bq27x00_battery.o
 obj-$(CONFIG_BATTERY_DA9030)	+= da9030_battery.o
 obj-$(CONFIG_BATTERY_MAX17040)	+= max17040_battery.o
+obj-$(CONFIG_BATTERY_MAX17042)	+= max17042_battery.o
 obj-$(CONFIG_BATTERY_Z2)	+= z2_battery.o
 obj-$(CONFIG_BATTERY_S3C_ADC)	+= s3c_adc_battery.o
 obj-$(CONFIG_CHARGER_PCF50633)	+= pcf50633-charger.o
@@ -32,3 +33,4 @@
 obj-$(CONFIG_BATTERY_INTEL_MID)	+= intel_mid_battery.o
 obj-$(CONFIG_CHARGER_ISP1704)	+= isp1704_charger.o
 obj-$(CONFIG_CHARGER_TWL4030)	+= twl4030_charger.o
+obj-$(CONFIG_CHARGER_GPIO)	+= gpio-charger.o
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 039f41a..548d263 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -295,7 +295,7 @@
 static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
 {
 	/* flush all pending status updates */
-	flush_scheduled_work();
+	flush_work_sync(&bat_work);
 	return 0;
 }
 
@@ -362,7 +362,7 @@
 err_psy_reg_main:
 
 	/* see comment in collie_bat_remove */
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 
 	i--;
 err_gpio:
@@ -382,12 +382,11 @@
 	power_supply_unregister(&collie_bat_main.psy);
 
 	/*
-	 * now flush all pending work.
-	 * we won't get any more schedules, since all
-	 * sources (isr and external_power_changed)
-	 * are unregistered now.
+	 * Now cancel the bat_work.  We won't get any more schedules,
+	 * since all sources (isr and external_power_changed) are
+	 * unregistered now.
 	 */
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 
 	for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
 		gpio_free(gpios[i].gpio);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index e7f8978..e534290 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -212,7 +212,7 @@
 	if (di->rem_capacity > 100)
 		di->rem_capacity = 100;
 
-	if (di->current_uA >= 100L)
+	if (di->current_uA < -100L)
 		di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
 					/ (di->current_uA / 100L);
 	else
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
new file mode 100644
index 0000000..25b88ac
--- /dev/null
+++ b/drivers/power/gpio-charger.c
@@ -0,0 +1,188 @@
+/*
+ *  Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *  Driver for chargers which report their online status through a GPIO pin
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#include <linux/power/gpio-charger.h>
+
+struct gpio_charger {
+	const struct gpio_charger_platform_data *pdata;
+	unsigned int irq;
+
+	struct power_supply charger;
+};
+
+static irqreturn_t gpio_charger_irq(int irq, void *devid)
+{
+	struct power_supply *charger = devid;
+
+	power_supply_changed(charger);
+
+	return IRQ_HANDLED;
+}
+
+static inline struct gpio_charger *psy_to_gpio_charger(struct power_supply *psy)
+{
+	return container_of(psy, struct gpio_charger, charger);
+}
+
+static int gpio_charger_get_property(struct power_supply *psy,
+		enum power_supply_property psp, union power_supply_propval *val)
+{
+	struct gpio_charger *gpio_charger = psy_to_gpio_charger(psy);
+	const struct gpio_charger_platform_data *pdata = gpio_charger->pdata;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = gpio_get_value(pdata->gpio);
+		val->intval ^= pdata->gpio_active_low;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static enum power_supply_property gpio_charger_properties[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int __devinit gpio_charger_probe(struct platform_device *pdev)
+{
+	const struct gpio_charger_platform_data *pdata = pdev->dev.platform_data;
+	struct gpio_charger *gpio_charger;
+	struct power_supply *charger;
+	int ret;
+	int irq;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data\n");
+		return -EINVAL;
+	}
+
+	if (!gpio_is_valid(pdata->gpio)) {
+		dev_err(&pdev->dev, "Invalid gpio pin\n");
+		return -EINVAL;
+	}
+
+	gpio_charger = kzalloc(sizeof(*gpio_charger), GFP_KERNEL);
+	if (!gpio_charger) {
+		dev_err(&pdev->dev, "Failed to alloc driver structure\n");
+		return -ENOMEM;
+	}
+
+	charger = &gpio_charger->charger;
+
+	charger->name = pdata->name ? pdata->name : "gpio-charger";
+	charger->type = pdata->type;
+	charger->properties = gpio_charger_properties;
+	charger->num_properties = ARRAY_SIZE(gpio_charger_properties);
+	charger->get_property = gpio_charger_get_property;
+	charger->supplied_to = pdata->supplied_to;
+	charger->num_supplicants = pdata->num_supplicants;
+
+	ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to request gpio pin: %d\n", ret);
+		goto err_free;
+	}
+	ret = gpio_direction_input(pdata->gpio);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to set gpio to input: %d\n", ret);
+		goto err_gpio_free;
+	}
+
+	gpio_charger->pdata = pdata;
+
+	ret = power_supply_register(&pdev->dev, charger);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to register power supply: %d\n",
+			ret);
+		goto err_gpio_free;
+	}
+
+	irq = gpio_to_irq(pdata->gpio);
+	if (irq > 0) {
+		ret = request_any_context_irq(irq, gpio_charger_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				dev_name(&pdev->dev), charger);
+		if (ret)
+			dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret);
+		else
+			gpio_charger->irq = irq;
+	}
+
+	platform_set_drvdata(pdev, gpio_charger);
+
+	return 0;
+
+err_gpio_free:
+	gpio_free(pdata->gpio);
+err_free:
+	kfree(gpio_charger);
+	return ret;
+}
+
+static int __devexit gpio_charger_remove(struct platform_device *pdev)
+{
+	struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+
+	if (gpio_charger->irq)
+		free_irq(gpio_charger->irq, &gpio_charger->charger);
+
+	power_supply_unregister(&gpio_charger->charger);
+
+	gpio_free(gpio_charger->pdata->gpio);
+
+	platform_set_drvdata(pdev, NULL);
+	kfree(gpio_charger);
+
+	return 0;
+}
+
+static struct platform_driver gpio_charger_driver = {
+	.probe = gpio_charger_probe,
+	.remove = __devexit_p(gpio_charger_remove),
+	.driver = {
+		.name = "gpio-charger",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init gpio_charger_init(void)
+{
+	return platform_driver_register(&gpio_charger_driver);
+}
+module_init(gpio_charger_init);
+
+static void __exit gpio_charger_exit(void)
+{
+	platform_driver_unregister(&gpio_charger_driver);
+}
+module_exit(gpio_charger_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Driver for chargers which report their online status through a GPIO");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-charger");
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index 36cf402..bce3a01 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -765,7 +765,7 @@
 	power_supply_unregister(&pbi->usb);
 	power_supply_unregister(&pbi->batt);
 
-	flush_scheduled_work();
+	cancel_work_sync(&pbi->handler);
 	kfree(pbi);
 	return 0;
 }
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 7251218..2ad9b14 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -59,11 +59,61 @@
 	struct notifier_block	nb;
 	struct work_struct	work;
 
-	char			model[7];
+	/* properties */
+	char			model[8];
 	unsigned		present:1;
+	unsigned		online:1;
+	unsigned		current_max;
+
+	/* temp storage variables */
+	unsigned long		event;
+	unsigned		max_power;
 };
 
 /*
+ * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB
+ * chargers).
+ *
+ * REVISIT: The method is defined in Battery Charging Specification and is
+ * applicable to any ULPI transceiver. Nothing isp170x specific here.
+ */
+static inline int isp1704_charger_type(struct isp1704_charger *isp)
+{
+	u8 reg;
+	u8 func_ctrl;
+	u8 otg_ctrl;
+	int type = POWER_SUPPLY_TYPE_USB_DCP;
+
+	func_ctrl = otg_io_read(isp->otg, ULPI_FUNC_CTRL);
+	otg_ctrl = otg_io_read(isp->otg, ULPI_OTG_CTRL);
+
+	/* disable pulldowns */
+	reg = ULPI_OTG_CTRL_DM_PULLDOWN | ULPI_OTG_CTRL_DP_PULLDOWN;
+	otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), reg);
+
+	/* full speed */
+	otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
+			ULPI_FUNC_CTRL_XCVRSEL_MASK);
+	otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL),
+			ULPI_FUNC_CTRL_FULL_SPEED);
+
+	/* Enable strong pull-up on DP (1.5K) and reset */
+	reg = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET;
+	otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), reg);
+	usleep_range(1000, 2000);
+
+	reg = otg_io_read(isp->otg, ULPI_DEBUG);
+	if ((reg & 3) != 3)
+		type = POWER_SUPPLY_TYPE_USB_CDP;
+
+	/* recover original state */
+	otg_io_write(isp->otg, ULPI_FUNC_CTRL, func_ctrl);
+	otg_io_write(isp->otg, ULPI_OTG_CTRL, otg_ctrl);
+
+	return type;
+}
+
+/*
  * ISP1704 detects PS/2 adapters as charger. To make sure the detected charger
  * is actually a dedicated charger, the following steps need to be taken.
  */
@@ -127,16 +177,19 @@
 static inline int isp1704_charger_detect(struct isp1704_charger *isp)
 {
 	unsigned long	timeout;
-	u8		r;
+	u8		pwr_ctrl;
 	int		ret = 0;
 
+	pwr_ctrl = otg_io_read(isp->otg, ISP1704_PWR_CTRL);
+
 	/* set SW control bit in PWR_CTRL register */
 	otg_io_write(isp->otg, ISP1704_PWR_CTRL,
 			ISP1704_PWR_CTRL_SWCTRL);
 
 	/* enable manual charger detection */
-	r = (ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN);
-	otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), r);
+	otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL),
+			ISP1704_PWR_CTRL_SWCTRL
+			| ISP1704_PWR_CTRL_DPVSRC_EN);
 	usleep_range(1000, 2000);
 
 	timeout = jiffies + msecs_to_jiffies(300);
@@ -147,7 +200,10 @@
 			ret = isp1704_charger_verify(isp);
 			break;
 		}
-	} while (!time_after(jiffies, timeout));
+	} while (!time_after(jiffies, timeout) && isp->online);
+
+	/* recover original state */
+	otg_io_write(isp->otg, ISP1704_PWR_CTRL, pwr_ctrl);
 
 	return ret;
 }
@@ -155,52 +211,92 @@
 static void isp1704_charger_work(struct work_struct *data)
 {
 	int			detect;
+	unsigned long		event;
+	unsigned		power;
 	struct isp1704_charger	*isp =
 		container_of(data, struct isp1704_charger, work);
+	static DEFINE_MUTEX(lock);
 
-	/*
-	 * FIXME Only supporting dedicated chargers even though isp1704 can
-	 * detect HUB and HOST chargers. If the device has already been
-	 * enumerated, the detection will break the connection.
-	 */
-	if (isp->otg->state != OTG_STATE_B_IDLE)
-		return;
+	event = isp->event;
+	power = isp->max_power;
 
-	/* disable data pullups */
-	if (isp->otg->gadget)
-		usb_gadget_disconnect(isp->otg->gadget);
+	mutex_lock(&lock);
 
-	/* detect charger */
-	detect = isp1704_charger_detect(isp);
-	if (detect) {
-		isp->present = detect;
-		power_supply_changed(&isp->psy);
+	switch (event) {
+	case USB_EVENT_VBUS:
+		isp->online = true;
+
+		/* detect charger */
+		detect = isp1704_charger_detect(isp);
+
+		if (detect) {
+			isp->present = detect;
+			isp->psy.type = isp1704_charger_type(isp);
+		}
+
+		switch (isp->psy.type) {
+		case POWER_SUPPLY_TYPE_USB_DCP:
+			isp->current_max = 1800;
+			break;
+		case POWER_SUPPLY_TYPE_USB_CDP:
+			/*
+			 * Only 500mA here or high speed chirp
+			 * handshaking may break
+			 */
+			isp->current_max = 500;
+			/* FALLTHROUGH */
+		case POWER_SUPPLY_TYPE_USB:
+		default:
+			/* enable data pullups */
+			if (isp->otg->gadget)
+				usb_gadget_connect(isp->otg->gadget);
+		}
+		break;
+	case USB_EVENT_NONE:
+		isp->online = false;
+		isp->current_max = 0;
+		isp->present = 0;
+		isp->current_max = 0;
+		isp->psy.type = POWER_SUPPLY_TYPE_USB;
+
+		/*
+		 * Disable data pullups. We need to prevent the controller from
+		 * enumerating.
+		 *
+		 * FIXME: This is here to allow charger detection with Host/HUB
+		 * chargers. The pullups may be enabled elsewhere, so this can
+		 * not be the final solution.
+		 */
+		if (isp->otg->gadget)
+			usb_gadget_disconnect(isp->otg->gadget);
+		break;
+	case USB_EVENT_ENUMERATED:
+		if (isp->present)
+			isp->current_max = 1800;
+		else
+			isp->current_max = power;
+		break;
+	default:
+		goto out;
 	}
 
-	/* enable data pullups */
-	if (isp->otg->gadget)
-		usb_gadget_connect(isp->otg->gadget);
+	power_supply_changed(&isp->psy);
+out:
+	mutex_unlock(&lock);
 }
 
 static int isp1704_notifier_call(struct notifier_block *nb,
-		unsigned long event, void *unused)
+		unsigned long event, void *power)
 {
 	struct isp1704_charger *isp =
 		container_of(nb, struct isp1704_charger, nb);
 
-	switch (event) {
-	case USB_EVENT_VBUS:
-		schedule_work(&isp->work);
-		break;
-	case USB_EVENT_NONE:
-		if (isp->present) {
-			isp->present = 0;
-			power_supply_changed(&isp->psy);
-		}
-		break;
-	default:
-		return NOTIFY_DONE;
-	}
+	isp->event = event;
+
+	if (power)
+		isp->max_power = *((unsigned *)power);
+
+	schedule_work(&isp->work);
 
 	return NOTIFY_OK;
 }
@@ -216,6 +312,12 @@
 	case POWER_SUPPLY_PROP_PRESENT:
 		val->intval = isp->present;
 		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = isp->online;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		val->intval = isp->current_max;
+		break;
 	case POWER_SUPPLY_PROP_MODEL_NAME:
 		val->strval = isp->model;
 		break;
@@ -230,6 +332,8 @@
 
 static enum power_supply_property power_props[] = {
 	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_MANUFACTURER,
 };
@@ -287,13 +391,13 @@
 	if (!isp->otg)
 		goto fail0;
 
+	isp->dev = &pdev->dev;
+	platform_set_drvdata(pdev, isp);
+
 	ret = isp1704_test_ulpi(isp);
 	if (ret < 0)
 		goto fail1;
 
-	isp->dev = &pdev->dev;
-	platform_set_drvdata(pdev, isp);
-
 	isp->psy.name		= "isp1704";
 	isp->psy.type		= POWER_SUPPLY_TYPE_USB;
 	isp->psy.properties	= power_props;
@@ -318,6 +422,23 @@
 
 	dev_info(isp->dev, "registered with product id %s\n", isp->model);
 
+	/*
+	 * Taking over the D+ pullup.
+	 *
+	 * FIXME: The device will be disconnected if it was already
+	 * enumerated. The charger driver should be always loaded before any
+	 * gadget is loaded.
+	 */
+	if (isp->otg->gadget)
+		usb_gadget_disconnect(isp->otg->gadget);
+
+	/* Detect charger if VBUS is valid (the cable was already plugged). */
+	ret = otg_io_read(isp->otg, ULPI_USB_INT_STS);
+	if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) {
+		isp->event = USB_EVENT_VBUS;
+		schedule_work(&isp->work);
+	}
+
 	return 0;
 fail2:
 	power_supply_unregister(&isp->psy);
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index a8108a7..02414db 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <linux/delay.h>
 #include <linux/gpio.h>
@@ -47,6 +48,8 @@
 
 	struct power_supply battery;
 	struct delayed_work work;
+
+	struct mutex lock;
 };
 
 static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy)
@@ -68,6 +71,8 @@
 	unsigned long val;
 	long voltage;
 
+	mutex_lock(&battery->lock);
+
 	INIT_COMPLETION(battery->read_completion);
 
 	enable_irq(battery->irq);
@@ -91,6 +96,8 @@
 	battery->cell->disable(battery->pdev);
 	disable_irq(battery->irq);
 
+	mutex_unlock(&battery->lock);
+
 	return voltage;
 }
 
@@ -240,6 +247,11 @@
 	struct jz_battery *jz_battery;
 	struct power_supply *battery;
 
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform_data supplied\n");
+		return -ENXIO;
+	}
+
 	jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL);
 	if (!jz_battery) {
 		dev_err(&pdev->dev, "Failed to allocate driver structure\n");
@@ -291,6 +303,7 @@
 	jz_battery->pdev = pdev;
 
 	init_completion(&jz_battery->read_completion);
+	mutex_init(&jz_battery->lock);
 
 	INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work);
 
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
new file mode 100644
index 0000000..c5c8805
--- /dev/null
+++ b/drivers/power/max17042_battery.c
@@ -0,0 +1,239 @@
+/*
+ * Fuel gauge driver for Maxim 17042 / 8966 / 8997
+ *  Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This driver is based on max17040_battery.c
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/power_supply.h>
+#include <linux/power/max17042_battery.h>
+
+enum max17042_register {
+	MAX17042_STATUS		= 0x00,
+	MAX17042_VALRT_Th	= 0x01,
+	MAX17042_TALRT_Th	= 0x02,
+	MAX17042_SALRT_Th	= 0x03,
+	MAX17042_AtRate		= 0x04,
+	MAX17042_RepCap		= 0x05,
+	MAX17042_RepSOC		= 0x06,
+	MAX17042_Age		= 0x07,
+	MAX17042_TEMP		= 0x08,
+	MAX17042_VCELL		= 0x09,
+	MAX17042_Current	= 0x0A,
+	MAX17042_AvgCurrent	= 0x0B,
+	MAX17042_Qresidual	= 0x0C,
+	MAX17042_SOC		= 0x0D,
+	MAX17042_AvSOC		= 0x0E,
+	MAX17042_RemCap		= 0x0F,
+	MAX17402_FullCAP	= 0x10,
+	MAX17042_TTE		= 0x11,
+	MAX17042_V_empty	= 0x12,
+
+	MAX17042_RSLOW		= 0x14,
+
+	MAX17042_AvgTA		= 0x16,
+	MAX17042_Cycles		= 0x17,
+	MAX17042_DesignCap	= 0x18,
+	MAX17042_AvgVCELL	= 0x19,
+	MAX17042_MinMaxTemp	= 0x1A,
+	MAX17042_MinMaxVolt	= 0x1B,
+	MAX17042_MinMaxCurr	= 0x1C,
+	MAX17042_CONFIG		= 0x1D,
+	MAX17042_ICHGTerm	= 0x1E,
+	MAX17042_AvCap		= 0x1F,
+	MAX17042_ManName	= 0x20,
+	MAX17042_DevName	= 0x21,
+	MAX17042_DevChem	= 0x22,
+
+	MAX17042_TempNom	= 0x24,
+	MAX17042_TempCold	= 0x25,
+	MAX17042_TempHot	= 0x26,
+	MAX17042_AIN		= 0x27,
+	MAX17042_LearnCFG	= 0x28,
+	MAX17042_SHFTCFG	= 0x29,
+	MAX17042_RelaxCFG	= 0x2A,
+	MAX17042_MiscCFG	= 0x2B,
+	MAX17042_TGAIN		= 0x2C,
+	MAx17042_TOFF		= 0x2D,
+	MAX17042_CGAIN		= 0x2E,
+	MAX17042_COFF		= 0x2F,
+
+	MAX17042_Q_empty	= 0x33,
+	MAX17042_T_empty	= 0x34,
+
+	MAX17042_RCOMP0		= 0x38,
+	MAX17042_TempCo		= 0x39,
+	MAX17042_Rx		= 0x3A,
+	MAX17042_T_empty0	= 0x3B,
+	MAX17042_TaskPeriod	= 0x3C,
+	MAX17042_FSTAT		= 0x3D,
+
+	MAX17042_SHDNTIMER	= 0x3F,
+
+	MAX17042_VFRemCap	= 0x4A,
+
+	MAX17042_QH		= 0x4D,
+	MAX17042_QL		= 0x4E,
+};
+
+struct max17042_chip {
+	struct i2c_client *client;
+	struct power_supply battery;
+	struct max17042_platform_data *pdata;
+};
+
+static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
+{
+	int ret = i2c_smbus_write_word_data(client, reg, value);
+
+	if (ret < 0)
+		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+	return ret;
+}
+
+static int max17042_read_reg(struct i2c_client *client, u8 reg)
+{
+	int ret = i2c_smbus_read_word_data(client, reg);
+
+	if (ret < 0)
+		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+	return ret;
+}
+
+static enum power_supply_property max17042_battery_props[] = {
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int max17042_get_property(struct power_supply *psy,
+			    enum power_supply_property psp,
+			    union power_supply_propval *val)
+{
+	struct max17042_chip *chip = container_of(psy,
+				struct max17042_chip, battery);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		val->intval = max17042_read_reg(chip->client,
+				MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+		val->intval = max17042_read_reg(chip->client,
+				MAX17042_AvgVCELL) * 83;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = max17042_read_reg(chip->client,
+				MAX17042_SOC) / 256;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __devinit max17042_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+	struct max17042_chip *chip;
+	int ret;
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+		return -EIO;
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->pdata = client->dev.platform_data;
+
+	i2c_set_clientdata(client, chip);
+
+	chip->battery.name		= "max17042_battery";
+	chip->battery.type		= POWER_SUPPLY_TYPE_BATTERY;
+	chip->battery.get_property	= max17042_get_property;
+	chip->battery.properties	= max17042_battery_props;
+	chip->battery.num_properties	= ARRAY_SIZE(max17042_battery_props);
+
+	ret = power_supply_register(&client->dev, &chip->battery);
+	if (ret) {
+		dev_err(&client->dev, "failed: power supply register\n");
+		i2c_set_clientdata(client, NULL);
+		kfree(chip);
+		return ret;
+	}
+
+	if (!chip->pdata->enable_current_sense) {
+		max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
+		max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
+		max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
+	}
+
+	return 0;
+}
+
+static int __devexit max17042_remove(struct i2c_client *client)
+{
+	struct max17042_chip *chip = i2c_get_clientdata(client);
+
+	power_supply_unregister(&chip->battery);
+	i2c_set_clientdata(client, NULL);
+	kfree(chip);
+	return 0;
+}
+
+static const struct i2c_device_id max17042_id[] = {
+	{ "max17042", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, max17042_id);
+
+static struct i2c_driver max17042_i2c_driver = {
+	.driver	= {
+		.name	= "max17042",
+	},
+	.probe		= max17042_probe,
+	.remove		= __devexit_p(max17042_remove),
+	.id_table	= max17042_id,
+};
+
+static int __init max17042_init(void)
+{
+	return i2c_add_driver(&max17042_i2c_driver);
+}
+module_init(max17042_init);
+
+static void __exit max17042_exit(void)
+{
+	i2c_del_driver(&max17042_i2c_driver);
+}
+module_exit(max17042_exit);
+
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 5bc1dcf..0b0ff3a 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -201,6 +201,72 @@
 	return ret;
 }
 
+static int olpc_bat_get_charge_full_design(union power_supply_propval *val)
+{
+	uint8_t ec_byte;
+	union power_supply_propval tech;
+	int ret, mfr;
+
+	ret = olpc_bat_get_tech(&tech);
+	if (ret)
+		return ret;
+
+	ec_byte = BAT_ADDR_MFR_TYPE;
+	ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1);
+	if (ret)
+		return ret;
+
+	mfr = ec_byte >> 4;
+
+	switch (tech.intval) {
+	case POWER_SUPPLY_TECHNOLOGY_NiMH:
+		switch (mfr) {
+		case 1: /* Gold Peak */
+			val->intval = 3000000*.8;
+			break;
+		default:
+			return -EIO;
+		}
+		break;
+
+	case POWER_SUPPLY_TECHNOLOGY_LiFe:
+		switch (mfr) {
+		case 1: /* Gold Peak */
+			val->intval = 2800000;
+			break;
+		case 2: /* BYD */
+			val->intval = 3100000;
+			break;
+		default:
+			return -EIO;
+		}
+		break;
+
+	default:
+		return -EIO;
+	}
+
+	return ret;
+}
+
+static int olpc_bat_get_charge_now(union power_supply_propval *val)
+{
+	uint8_t soc;
+	union power_supply_propval full;
+	int ret;
+
+	ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &soc, 1);
+	if (ret)
+		return ret;
+
+	ret = olpc_bat_get_charge_full_design(&full);
+	if (ret)
+		return ret;
+
+	val->intval = soc * (full.intval / 100);
+	return 0;
+}
+
 /*********************************************************************
  *		Battery properties
  *********************************************************************/
@@ -267,6 +333,7 @@
 			return ret;
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		ret = olpc_ec_cmd(EC_BAT_VOLTAGE, NULL, 0, (void *)&ec_word, 2);
 		if (ret)
 			return ret;
@@ -274,6 +341,7 @@
 		val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_AVG:
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
 		ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
 		if (ret)
 			return ret;
@@ -294,6 +362,16 @@
 		else
 			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		ret = olpc_bat_get_charge_full_design(val);
+		if (ret)
+			return ret;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		ret = olpc_bat_get_charge_now(val);
+		if (ret)
+			return ret;
+		break;
 	case POWER_SUPPLY_PROP_TEMP:
 		ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2);
 		if (ret)
@@ -331,16 +409,20 @@
 	return ret;
 }
 
-static enum power_supply_property olpc_bat_props[] = {
+static enum power_supply_property olpc_xo1_bat_props[] = {
 	POWER_SUPPLY_PROP_STATUS,
 	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_HEALTH,
 	POWER_SUPPLY_PROP_TECHNOLOGY,
 	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
 	POWER_SUPPLY_PROP_CAPACITY,
 	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TEMP_AMBIENT,
 	POWER_SUPPLY_PROP_MANUFACTURER,
@@ -348,6 +430,27 @@
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 };
 
+/* XO-1.5 does not have ambient temperature property */
+static enum power_supply_property olpc_xo15_bat_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_MANUFACTURER,
+	POWER_SUPPLY_PROP_SERIAL_NUMBER,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+};
+
 /* EEPROM reading goes completely around the power_supply API, sadly */
 
 #define EEPROM_START	0x20
@@ -419,8 +522,6 @@
 static struct platform_device *bat_pdev;
 
 static struct power_supply olpc_bat = {
-	.properties = olpc_bat_props,
-	.num_properties = ARRAY_SIZE(olpc_bat_props),
 	.get_property = olpc_bat_get_property,
 	.use_for_apm = 1,
 };
@@ -466,6 +567,13 @@
 		goto ac_failed;
 
 	olpc_bat.name = bat_pdev->name;
+	if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */
+		olpc_bat.properties = olpc_xo15_bat_props;
+		olpc_bat.num_properties = ARRAY_SIZE(olpc_xo15_bat_props);
+	} else { /* XO-1 */
+		olpc_bat.properties = olpc_xo1_bat_props;
+		olpc_bat.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
+	}
 
 	ret = power_supply_register(&bat_pdev->dev, &olpc_bat);
 	if (ret)
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 91606bb..970f733 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -190,10 +190,10 @@
 	goto success;
 
 create_triggers_failed:
-	device_unregister(psy->dev);
+	device_del(dev);
 kobject_set_name_failed:
 device_add_failed:
-	kfree(dev);
+	put_device(dev);
 success:
 	return rc;
 }
@@ -201,7 +201,7 @@
 
 void power_supply_unregister(struct power_supply *psy)
 {
-	flush_scheduled_work();
+	cancel_work_sync(&psy->changed_work);
 	power_supply_remove_triggers(psy);
 	device_unregister(psy->dev);
 }
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index fe16b48..4255f23 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -1,5 +1,5 @@
 /*
- *	iPAQ h1930/h1940/rx1950 battery controler driver
+ *	iPAQ h1930/h1940/rx1950 battery controller driver
  *	Copyright (c) Vasily Khoruzhick
  *	Based on h1940_battery.c by Arnaud Patard
  *
@@ -112,6 +112,13 @@
 	return volt_val + cur_val * impedance / 1000;
 }
 
+static int charge_finished(struct s3c_adc_bat *bat)
+{
+	return bat->pdata->gpio_inverted ?
+		!gpio_get_value(bat->pdata->gpio_charge_finished) :
+		gpio_get_value(bat->pdata->gpio_charge_finished);
+}
+
 static int s3c_adc_bat_get_property(struct power_supply *psy,
 				    enum power_supply_property psp,
 				    union power_supply_propval *val)
@@ -140,7 +147,7 @@
 
 	if (bat->cable_plugged &&
 		((bat->pdata->gpio_charge_finished < 0) ||
-		!gpio_get_value(bat->pdata->gpio_charge_finished))) {
+		!charge_finished(bat))) {
 		lut = bat->pdata->lut_acin;
 		lut_size = bat->pdata->lut_acin_cnt;
 	}
@@ -236,8 +243,7 @@
 		}
 	} else {
 		if ((bat->pdata->gpio_charge_finished >= 0) && is_plugged) {
-			is_charged = gpio_get_value(
-				main_bat.pdata->gpio_charge_finished);
+			is_charged = charge_finished(&main_bat);
 			if (is_charged) {
 				if (bat->pdata->disable_charger)
 					bat->pdata->disable_charger();
@@ -427,5 +433,5 @@
 module_exit(s3c_adc_bat_exit);
 
 MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
-MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controler driver");
+MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controller driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index ee04936..53f0d35 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -332,7 +332,7 @@
 static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
 {
 	/* flush all pending status updates */
-	flush_scheduled_work();
+	flush_work_sync(&bat_work);
 	return 0;
 }
 
@@ -422,7 +422,7 @@
 err_psy_reg_main:
 
 	/* see comment in tosa_bat_remove */
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 
 	i--;
 err_gpio:
@@ -445,12 +445,11 @@
 	power_supply_unregister(&tosa_bat_main.psy);
 
 	/*
-	 * now flush all pending work.
-	 * we won't get any more schedules, since all
-	 * sources (isr and external_power_changed)
-	 * are unregistered now.
+	 * Now cancel the bat_work.  We won't get any more schedules,
+	 * since all sources (isr and external_power_changed) are
+	 * unregistered now.
 	 */
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 
 	for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
 		gpio_free(gpios[i].gpio);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 5071d85..156559e 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -147,7 +147,7 @@
 #ifdef CONFIG_PM
 static int wm97xx_bat_suspend(struct device *dev)
 {
-	flush_scheduled_work();
+	flush_work_sync(&bat_work);
 	return 0;
 }
 
@@ -273,7 +273,7 @@
 		free_irq(gpio_to_irq(pdata->charge_gpio), dev);
 		gpio_free(pdata->charge_gpio);
 	}
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 	power_supply_unregister(&bat_ps);
 	kfree(prop);
 	return 0;
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index 85064a9..e5ed52d 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -254,7 +254,7 @@
 	struct z2_charger *charger = i2c_get_clientdata(client);
 	struct z2_battery_info *info = charger->info;
 
-	flush_scheduled_work();
+	cancel_work_sync(&charger->bat_work);
 	power_supply_unregister(&charger->batt_ps);
 
 	kfree(charger->batt_ps.properties);
@@ -271,7 +271,9 @@
 #ifdef CONFIG_PM
 static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
 {
-	flush_scheduled_work();
+	struct z2_charger *charger = i2c_get_clientdata(client);
+
+	flush_work_sync(&charger->bat_work);
 	return 0;
 }
 
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 1afe4e0..f0d3376 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -30,6 +30,17 @@
 	  messages to the system log.  Select this if you are having a
 	  problem with PPS support and want to see more of what is going on.
 
+config NTP_PPS
+	bool "PPS kernel consumer support"
+	depends on PPS && !NO_HZ
+	help
+	  This option adds support for direct in-kernel time
+	  syncronization using an external PPS signal.
+
+	  It doesn't work on tickless systems at the moment.
+
 source drivers/pps/clients/Kconfig
 
+source drivers/pps/generators/Kconfig
+
 endmenu
diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile
index 98960dd..4483eaa 100644
--- a/drivers/pps/Makefile
+++ b/drivers/pps/Makefile
@@ -3,7 +3,8 @@
 #
 
 pps_core-y			:= pps.o kapi.o sysfs.o
+pps_core-$(CONFIG_NTP_PPS)	+= kc.o
 obj-$(CONFIG_PPS)		:= pps_core.o
-obj-y				+= clients/
+obj-y				+= clients/ generators/
 
 ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 4e801bd7..8520a7f 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -22,4 +22,11 @@
 	  If you say yes here you get support for a PPS source connected
 	  with the CD (Carrier Detect) pin of your serial port.
 
+config PPS_CLIENT_PARPORT
+	tristate "Parallel port PPS client"
+	depends on PPS && PARPORT
+	help
+	  If you say yes here you get support for a PPS source connected
+	  with the interrupt pin of your parallel port.
+
 endif
diff --git a/drivers/pps/clients/Makefile b/drivers/pps/clients/Makefile
index 812c9b1..42517da 100644
--- a/drivers/pps/clients/Makefile
+++ b/drivers/pps/clients/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_PPS_CLIENT_KTIMER)	+= pps-ktimer.o
 obj-$(CONFIG_PPS_CLIENT_LDISC)	+= pps-ldisc.o
+obj-$(CONFIG_PPS_CLIENT_PARPORT) += pps_parport.o
 
 ifeq ($(CONFIG_PPS_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index e7ef5b8..82583b0 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -19,6 +19,7 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -31,7 +32,7 @@
  * Global variables
  */
 
-static int source;
+static struct pps_device *pps;
 static struct timer_list ktimer;
 
 /*
@@ -40,19 +41,12 @@
 
 static void pps_ktimer_event(unsigned long ptr)
 {
-	struct timespec __ts;
-	struct pps_ktime ts;
+	struct pps_event_time ts;
 
 	/* First of all we get the time stamp... */
-	getnstimeofday(&__ts);
+	pps_get_ts(&ts);
 
-	pr_info("PPS event at %lu\n", jiffies);
-
-	/* ... and translate it to PPS time data struct */
-	ts.sec = __ts.tv_sec;
-	ts.nsec = __ts.tv_nsec;
-
-	pps_event(source, &ts, PPS_CAPTUREASSERT, NULL);
+	pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL);
 
 	mod_timer(&ktimer, jiffies + HZ);
 }
@@ -61,12 +55,11 @@
  * The echo function
  */
 
-static void pps_ktimer_echo(int source, int event, void *data)
+static void pps_ktimer_echo(struct pps_device *pps, int event, void *data)
 {
-	pr_info("echo %s %s for source %d\n",
+	dev_info(pps->dev, "echo %s %s\n",
 		event & PPS_CAPTUREASSERT ? "assert" : "",
-		event & PPS_CAPTURECLEAR ? "clear" : "",
-		source);
+		event & PPS_CAPTURECLEAR ? "clear" : "");
 }
 
 /*
@@ -89,30 +82,27 @@
 
 static void __exit pps_ktimer_exit(void)
 {
-	del_timer_sync(&ktimer);
-	pps_unregister_source(source);
+	dev_info(pps->dev, "ktimer PPS source unregistered\n");
 
-	pr_info("ktimer PPS source unregistered\n");
+	del_timer_sync(&ktimer);
+	pps_unregister_source(pps);
 }
 
 static int __init pps_ktimer_init(void)
 {
-	int ret;
-
-	ret = pps_register_source(&pps_ktimer_info,
+	pps = pps_register_source(&pps_ktimer_info,
 				PPS_CAPTUREASSERT | PPS_OFFSETASSERT);
-	if (ret < 0) {
-		printk(KERN_ERR "cannot register ktimer source\n");
-		return ret;
+	if (pps == NULL) {
+		pr_err("cannot register PPS source\n");
+		return -ENOMEM;
 	}
-	source = ret;
 
 	setup_timer(&ktimer, pps_ktimer_event, 0);
 	mod_timer(&ktimer, jiffies + HZ);
 
-	pr_info("ktimer PPS source registered at %d\n", source);
+	dev_info(pps->dev, "ktimer PPS source registered\n");
 
-	return  0;
+	return 0;
 }
 
 module_init(pps_ktimer_init);
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index 8e1932d..79451f2 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -19,6 +19,8 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/serial_core.h>
 #include <linux/tty.h>
@@ -27,30 +29,18 @@
 #define PPS_TTY_MAGIC		0x0001
 
 static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status,
-				struct timespec *ts)
+				struct pps_event_time *ts)
 {
-	int id = (long)tty->disc_data;
-	struct timespec __ts;
-	struct pps_ktime pps_ts;
+	struct pps_device *pps = (struct pps_device *)tty->disc_data;
 
-	/* First of all we get the time stamp... */
-	getnstimeofday(&__ts);
-
-	/* Does caller give us a timestamp? */
-	if (ts) {	/* Yes. Let's use it! */
-		pps_ts.sec = ts->tv_sec;
-		pps_ts.nsec = ts->tv_nsec;
-	} else {	/* No. Do it ourself! */
-		pps_ts.sec = __ts.tv_sec;
-		pps_ts.nsec = __ts.tv_nsec;
-	}
+	BUG_ON(pps == NULL);
 
 	/* Now do the PPS event report */
-	pps_event(id, &pps_ts, status ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR,
-			NULL);
+	pps_event(pps, ts, status ? PPS_CAPTUREASSERT :
+			PPS_CAPTURECLEAR, NULL);
 
-	pr_debug("PPS %s at %lu on source #%d\n",
-			status ? "assert" : "clear", jiffies, id);
+	dev_dbg(pps->dev, "PPS %s at %lu\n",
+			status ? "assert" : "clear", jiffies);
 }
 
 static int (*alias_n_tty_open)(struct tty_struct *tty);
@@ -60,6 +50,7 @@
 	struct pps_source_info info;
 	struct tty_driver *drv = tty->driver;
 	int index = tty->index + drv->name_base;
+	struct pps_device *pps;
 	int ret;
 
 	info.owner = THIS_MODULE;
@@ -70,34 +61,42 @@
 			PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
 			PPS_CANWAIT | PPS_TSFMT_TSPEC;
 
-	ret = pps_register_source(&info, PPS_CAPTUREBOTH | \
+	pps = pps_register_source(&info, PPS_CAPTUREBOTH | \
 				PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
-	if (ret < 0) {
+	if (pps == NULL) {
 		pr_err("cannot register PPS source \"%s\"\n", info.path);
-		return ret;
+		return -ENOMEM;
 	}
-	tty->disc_data = (void *)(long)ret;
+	tty->disc_data = pps;
 
 	/* Should open N_TTY ldisc too */
 	ret = alias_n_tty_open(tty);
-	if (ret < 0)
-		pps_unregister_source((long)tty->disc_data);
+	if (ret < 0) {
+		pr_err("cannot open tty ldisc \"%s\"\n", info.path);
+		goto err_unregister;
+	}
 
-	pr_info("PPS source #%d \"%s\" added\n", ret, info.path);
+	dev_info(pps->dev, "source \"%s\" added\n", info.path);
 
 	return 0;
+
+err_unregister:
+	tty->disc_data = NULL;
+	pps_unregister_source(pps);
+	return ret;
 }
 
 static void (*alias_n_tty_close)(struct tty_struct *tty);
 
 static void pps_tty_close(struct tty_struct *tty)
 {
-	int id = (long)tty->disc_data;
+	struct pps_device *pps = (struct pps_device *)tty->disc_data;
 
-	pps_unregister_source(id);
 	alias_n_tty_close(tty);
 
-	pr_info("PPS source #%d removed\n", id);
+	tty->disc_data = NULL;
+	dev_info(pps->dev, "removed\n");
+	pps_unregister_source(pps);
 }
 
 static struct tty_ldisc_ops pps_ldisc_ops;
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
new file mode 100644
index 0000000..c571d6d
--- /dev/null
+++ b/drivers/pps/clients/pps_parport.c
@@ -0,0 +1,258 @@
+/*
+ * pps_parport.c -- kernel parallel port PPS client
+ *
+ *
+ * Copyright (C) 2009   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+/*
+ * TODO:
+ * implement echo over SEL pin
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irqnr.h>
+#include <linux/time.h>
+#include <linux/parport.h>
+#include <linux/pps_kernel.h>
+
+#define DRVDESC "parallel port PPS client"
+
+/* module parameters */
+
+#define CLEAR_WAIT_MAX		100
+#define CLEAR_WAIT_MAX_ERRORS	5
+
+static unsigned int clear_wait = 100;
+MODULE_PARM_DESC(clear_wait,
+	"Maximum number of port reads when polling for signal clear,"
+	" zero turns clear edge capture off entirely");
+module_param(clear_wait, uint, 0);
+
+
+/* internal per port structure */
+struct pps_client_pp {
+	struct pardevice *pardev;	/* parport device */
+	struct pps_device *pps;		/* PPS device */
+	unsigned int cw;		/* port clear timeout */
+	unsigned int cw_err;		/* number of timeouts */
+};
+
+static inline int signal_is_set(struct parport *port)
+{
+	return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0;
+}
+
+/* parport interrupt handler */
+static void parport_irq(void *handle)
+{
+	struct pps_event_time ts_assert, ts_clear;
+	struct pps_client_pp *dev = handle;
+	struct parport *port = dev->pardev->port;
+	unsigned int i;
+	unsigned long flags;
+
+	/* first of all we get the time stamp... */
+	pps_get_ts(&ts_assert);
+
+	if (dev->cw == 0)
+		/* clear edge capture disabled */
+		goto out_assert;
+
+	/* try capture the clear edge */
+
+	/* We have to disable interrupts here. The idea is to prevent
+	 * other interrupts on the same processor to introduce random
+	 * lags while polling the port. Reading from IO port is known
+	 * to take approximately 1us while other interrupt handlers can
+	 * take much more potentially.
+	 *
+	 * Interrupts won't be disabled for a long time because the
+	 * number of polls is limited by clear_wait parameter which is
+	 * kept rather low. So it should never be an issue.
+	 */
+	local_irq_save(flags);
+	/* check the signal (no signal means the pulse is lost this time) */
+	if (!signal_is_set(port)) {
+		local_irq_restore(flags);
+		dev_err(dev->pps->dev, "lost the signal\n");
+		goto out_assert;
+	}
+
+	/* poll the port until the signal is unset */
+	for (i = dev->cw; i; i--)
+		if (!signal_is_set(port)) {
+			pps_get_ts(&ts_clear);
+			local_irq_restore(flags);
+			dev->cw_err = 0;
+			goto out_both;
+		}
+	local_irq_restore(flags);
+
+	/* timeout */
+	dev->cw_err++;
+	if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
+		dev_err(dev->pps->dev, "disabled clear edge capture after %d"
+				" timeouts\n", dev->cw_err);
+		dev->cw = 0;
+		dev->cw_err = 0;
+	}
+
+out_assert:
+	/* fire assert event */
+	pps_event(dev->pps, &ts_assert,
+			PPS_CAPTUREASSERT, NULL);
+	return;
+
+out_both:
+	/* fire assert event */
+	pps_event(dev->pps, &ts_assert,
+			PPS_CAPTUREASSERT, NULL);
+	/* fire clear event */
+	pps_event(dev->pps, &ts_clear,
+			PPS_CAPTURECLEAR, NULL);
+	return;
+}
+
+/* the PPS echo function */
+static void pps_echo(struct pps_device *pps, int event, void *data)
+{
+	dev_info(pps->dev, "echo %s %s\n",
+		event & PPS_CAPTUREASSERT ? "assert" : "",
+		event & PPS_CAPTURECLEAR ? "clear" : "");
+}
+
+static void parport_attach(struct parport *port)
+{
+	struct pps_client_pp *device;
+	struct pps_source_info info = {
+		.name		= KBUILD_MODNAME,
+		.path		= "",
+		.mode		= PPS_CAPTUREBOTH | \
+				  PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
+				  PPS_ECHOASSERT | PPS_ECHOCLEAR | \
+				  PPS_CANWAIT | PPS_TSFMT_TSPEC,
+		.echo		= pps_echo,
+		.owner		= THIS_MODULE,
+		.dev		= NULL
+	};
+
+	device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL);
+	if (!device) {
+		pr_err("memory allocation failed, not attaching\n");
+		return;
+	}
+
+	device->pardev = parport_register_device(port, KBUILD_MODNAME,
+			NULL, NULL, parport_irq, PARPORT_FLAG_EXCL, device);
+	if (!device->pardev) {
+		pr_err("couldn't register with %s\n", port->name);
+		goto err_free;
+	}
+
+	if (parport_claim_or_block(device->pardev) < 0) {
+		pr_err("couldn't claim %s\n", port->name);
+		goto err_unregister_dev;
+	}
+
+	device->pps = pps_register_source(&info,
+			PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
+	if (device->pps == NULL) {
+		pr_err("couldn't register PPS source\n");
+		goto err_release_dev;
+	}
+
+	device->cw = clear_wait;
+
+	port->ops->enable_irq(port);
+
+	pr_info("attached to %s\n", port->name);
+
+	return;
+
+err_release_dev:
+	parport_release(device->pardev);
+err_unregister_dev:
+	parport_unregister_device(device->pardev);
+err_free:
+	kfree(device);
+}
+
+static void parport_detach(struct parport *port)
+{
+	struct pardevice *pardev = port->cad;
+	struct pps_client_pp *device;
+
+	/* FIXME: oooh, this is ugly! */
+	if (strcmp(pardev->name, KBUILD_MODNAME))
+		/* not our port */
+		return;
+
+	device = pardev->private;
+
+	port->ops->disable_irq(port);
+	pps_unregister_source(device->pps);
+	parport_release(pardev);
+	parport_unregister_device(pardev);
+	kfree(device);
+}
+
+static struct parport_driver pps_parport_driver = {
+	.name = KBUILD_MODNAME,
+	.attach = parport_attach,
+	.detach = parport_detach,
+};
+
+/* module staff */
+
+static int __init pps_parport_init(void)
+{
+	int ret;
+
+	pr_info(DRVDESC "\n");
+
+	if (clear_wait > CLEAR_WAIT_MAX) {
+		pr_err("clear_wait value should be not greater"
+				" then %d\n", CLEAR_WAIT_MAX);
+		return -EINVAL;
+	}
+
+	ret = parport_register_driver(&pps_parport_driver);
+	if (ret) {
+		pr_err("unable to register with parport\n");
+		return ret;
+	}
+
+	return  0;
+}
+
+static void __exit pps_parport_exit(void)
+{
+	parport_unregister_driver(&pps_parport_driver);
+}
+
+module_init(pps_parport_init);
+module_exit(pps_parport_exit);
+
+MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
+MODULE_DESCRIPTION(DRVDESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
new file mode 100644
index 0000000..e4c4f3d
--- /dev/null
+++ b/drivers/pps/generators/Kconfig
@@ -0,0 +1,13 @@
+#
+# PPS generators configuration
+#
+
+comment "PPS generators support"
+
+config PPS_GENERATOR_PARPORT
+	tristate "Parallel port PPS signal generator"
+	depends on PARPORT && BROKEN
+	help
+	  If you say yes here you get support for a PPS signal generator which
+	  utilizes STROBE pin of a parallel port to send PPS signals. It uses
+	  parport abstraction layer and hrtimers to precisely control the signal.
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
new file mode 100644
index 0000000..303304a6
--- /dev/null
+++ b/drivers/pps/generators/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for PPS generators.
+#
+
+obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
+
+ifeq ($(CONFIG_PPS_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
new file mode 100644
index 0000000..b93af3e
--- /dev/null
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -0,0 +1,282 @@
+/*
+ * pps_gen_parport.c -- kernel parallel port PPS signal generator
+ *
+ *
+ * Copyright (C) 2009   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+/*
+ * TODO:
+ * fix issues when realtime clock is adjusted in a leap
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <linux/parport.h>
+
+#define DRVDESC "parallel port PPS signal generator"
+
+#define SIGNAL		0
+#define NO_SIGNAL	PARPORT_CONTROL_STROBE
+
+/* module parameters */
+
+#define SEND_DELAY_MAX		100000
+
+static unsigned int send_delay = 30000;
+MODULE_PARM_DESC(delay,
+	"Delay between setting and dropping the signal (ns)");
+module_param_named(delay, send_delay, uint, 0);
+
+
+#define SAFETY_INTERVAL	3000	/* set the hrtimer earlier for safety (ns) */
+
+/* internal per port structure */
+struct pps_generator_pp {
+	struct pardevice *pardev;	/* parport device */
+	struct hrtimer timer;
+	long port_write_time;		/* calibrated port write time (ns) */
+};
+
+static struct pps_generator_pp device = {
+	.pardev = NULL,
+};
+
+static int attached;
+
+/* calibrated time between a hrtimer event and the reaction */
+static long hrtimer_error = SAFETY_INTERVAL;
+
+/* the kernel hrtimer event */
+static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
+{
+	struct timespec expire_time, ts1, ts2, ts3, dts;
+	struct pps_generator_pp *dev;
+	struct parport *port;
+	long lim, delta;
+	unsigned long flags;
+
+	/* We have to disable interrupts here. The idea is to prevent
+	 * other interrupts on the same processor to introduce random
+	 * lags while polling the clock. getnstimeofday() takes <1us on
+	 * most machines while other interrupt handlers can take much
+	 * more potentially.
+	 *
+	 * NB: approx time with blocked interrupts =
+	 * send_delay + 3 * SAFETY_INTERVAL
+	 */
+	local_irq_save(flags);
+
+	/* first of all we get the time stamp... */
+	getnstimeofday(&ts1);
+	expire_time = ktime_to_timespec(hrtimer_get_softexpires(timer));
+	dev = container_of(timer, struct pps_generator_pp, timer);
+	lim = NSEC_PER_SEC - send_delay - dev->port_write_time;
+
+	/* check if we are late */
+	if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) {
+		local_irq_restore(flags);
+		pr_err("we are late this time %ld.%09ld\n",
+				ts1.tv_sec, ts1.tv_nsec);
+		goto done;
+	}
+
+	/* busy loop until the time is right for an assert edge */
+	do {
+		getnstimeofday(&ts2);
+	} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
+
+	/* set the signal */
+	port = dev->pardev->port;
+	port->ops->write_control(port, SIGNAL);
+
+	/* busy loop until the time is right for a clear edge */
+	lim = NSEC_PER_SEC - dev->port_write_time;
+	do {
+		getnstimeofday(&ts2);
+	} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
+
+	/* unset the signal */
+	port->ops->write_control(port, NO_SIGNAL);
+
+	getnstimeofday(&ts3);
+
+	local_irq_restore(flags);
+
+	/* update calibrated port write time */
+	dts = timespec_sub(ts3, ts2);
+	dev->port_write_time =
+		(dev->port_write_time + timespec_to_ns(&dts)) >> 1;
+
+done:
+	/* update calibrated hrtimer error */
+	dts = timespec_sub(ts1, expire_time);
+	delta = timespec_to_ns(&dts);
+	/* If the new error value is bigger then the old, use the new
+	 * value, if not then slowly move towards the new value. This
+	 * way it should be safe in bad conditions and efficient in
+	 * good conditions.
+	 */
+	if (delta >= hrtimer_error)
+		hrtimer_error = delta;
+	else
+		hrtimer_error = (3 * hrtimer_error + delta) >> 2;
+
+	/* update the hrtimer expire time */
+	hrtimer_set_expires(timer,
+			ktime_set(expire_time.tv_sec + 1,
+				NSEC_PER_SEC - (send_delay +
+				dev->port_write_time + SAFETY_INTERVAL +
+				2 * hrtimer_error)));
+
+	return HRTIMER_RESTART;
+}
+
+/* calibrate port write time */
+#define PORT_NTESTS_SHIFT	5
+static void calibrate_port(struct pps_generator_pp *dev)
+{
+	struct parport *port = dev->pardev->port;
+	int i;
+	long acc = 0;
+
+	for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) {
+		struct timespec a, b;
+		unsigned long irq_flags;
+
+		local_irq_save(irq_flags);
+		getnstimeofday(&a);
+		port->ops->write_control(port, NO_SIGNAL);
+		getnstimeofday(&b);
+		local_irq_restore(irq_flags);
+
+		b = timespec_sub(b, a);
+		acc += timespec_to_ns(&b);
+	}
+
+	dev->port_write_time = acc >> PORT_NTESTS_SHIFT;
+	pr_info("port write takes %ldns\n", dev->port_write_time);
+}
+
+static inline ktime_t next_intr_time(struct pps_generator_pp *dev)
+{
+	struct timespec ts;
+
+	getnstimeofday(&ts);
+
+	return ktime_set(ts.tv_sec +
+			((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0),
+			NSEC_PER_SEC - (send_delay +
+			dev->port_write_time + 3 * SAFETY_INTERVAL));
+}
+
+static void parport_attach(struct parport *port)
+{
+	if (attached) {
+		/* we already have a port */
+		return;
+	}
+
+	device.pardev = parport_register_device(port, KBUILD_MODNAME,
+			NULL, NULL, NULL, PARPORT_FLAG_EXCL, &device);
+	if (!device.pardev) {
+		pr_err("couldn't register with %s\n", port->name);
+		return;
+	}
+
+	if (parport_claim_or_block(device.pardev) < 0) {
+		pr_err("couldn't claim %s\n", port->name);
+		goto err_unregister_dev;
+	}
+
+	pr_info("attached to %s\n", port->name);
+	attached = 1;
+
+	calibrate_port(&device);
+
+	hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+	device.timer.function = hrtimer_event;
+#ifdef CONFIG_PREEMPT_RT
+	/* hrtimer interrupt will run in the interrupt context with this */
+	device.timer.irqsafe = 1;
+#endif
+
+	hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
+
+	return;
+
+err_unregister_dev:
+	parport_unregister_device(device.pardev);
+}
+
+static void parport_detach(struct parport *port)
+{
+	if (port->cad != device.pardev)
+		return;	/* not our port */
+
+	hrtimer_cancel(&device.timer);
+	parport_release(device.pardev);
+	parport_unregister_device(device.pardev);
+}
+
+static struct parport_driver pps_gen_parport_driver = {
+	.name = KBUILD_MODNAME,
+	.attach = parport_attach,
+	.detach = parport_detach,
+};
+
+/* module staff */
+
+static int __init pps_gen_parport_init(void)
+{
+	int ret;
+
+	pr_info(DRVDESC "\n");
+
+	if (send_delay > SEND_DELAY_MAX) {
+		pr_err("delay value should be not greater"
+				" then %d\n", SEND_DELAY_MAX);
+		return -EINVAL;
+	}
+
+	ret = parport_register_driver(&pps_gen_parport_driver);
+	if (ret) {
+		pr_err("unable to register with parport\n");
+		return ret;
+	}
+
+	return  0;
+}
+
+static void __exit pps_gen_parport_exit(void)
+{
+	parport_unregister_driver(&pps_gen_parport_driver);
+	pr_info("hrtimer avg error is %ldns\n", hrtimer_error);
+}
+
+module_init(pps_gen_parport_init);
+module_exit(pps_gen_parport_exit);
+
+MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
+MODULE_DESCRIPTION(DRVDESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index 1aa02db..a4e8eb9 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -19,24 +19,20 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/time.h>
+#include <linux/timex.h>
 #include <linux/spinlock.h>
-#include <linux/idr.h>
 #include <linux/fs.h>
 #include <linux/pps_kernel.h>
 #include <linux/slab.h>
 
-/*
- * Global variables
- */
-
-DEFINE_SPINLOCK(pps_idr_lock);
-DEFINE_IDR(pps_idr);
+#include "kc.h"
 
 /*
  * Local functions
@@ -60,60 +56,6 @@
  * Exported functions
  */
 
-/* pps_get_source - find a PPS source
- * @source: the PPS source ID.
- *
- * This function is used to find an already registered PPS source into the
- * system.
- *
- * The function returns NULL if found nothing, otherwise it returns a pointer
- * to the PPS source data struct (the refcounter is incremented by 1).
- */
-
-struct pps_device *pps_get_source(int source)
-{
-	struct pps_device *pps;
-	unsigned long flags;
-
-	spin_lock_irqsave(&pps_idr_lock, flags);
-
-	pps = idr_find(&pps_idr, source);
-	if (pps != NULL)
-		atomic_inc(&pps->usage);
-
-	spin_unlock_irqrestore(&pps_idr_lock, flags);
-
-	return pps;
-}
-
-/* pps_put_source - free the PPS source data
- * @pps: a pointer to the PPS source.
- *
- * This function is used to free a PPS data struct if its refcount is 0.
- */
-
-void pps_put_source(struct pps_device *pps)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&pps_idr_lock, flags);
-	BUG_ON(atomic_read(&pps->usage) == 0);
-
-	if (!atomic_dec_and_test(&pps->usage)) {
-		pps = NULL;
-		goto exit;
-	}
-
-	/* No more reference to the PPS source. We can safely remove the
-	 * PPS data struct.
-	 */
-	idr_remove(&pps_idr, pps->id);
-
-exit:
-	spin_unlock_irqrestore(&pps_idr_lock, flags);
-	kfree(pps);
-}
-
 /* pps_register_source - add a PPS source in the system
  * @info: the PPS info struct
  * @default_params: the default PPS parameters of the new source
@@ -122,31 +64,31 @@
  * source is described by info's fields and it will have, as default PPS
  * parameters, the ones specified into default_params.
  *
- * The function returns, in case of success, the PPS source ID.
+ * The function returns, in case of success, the PPS device. Otherwise NULL.
  */
 
-int pps_register_source(struct pps_source_info *info, int default_params)
+struct pps_device *pps_register_source(struct pps_source_info *info,
+		int default_params)
 {
 	struct pps_device *pps;
-	int id;
 	int err;
 
 	/* Sanity checks */
 	if ((info->mode & default_params) != default_params) {
-		printk(KERN_ERR "pps: %s: unsupported default parameters\n",
+		pr_err("%s: unsupported default parameters\n",
 					info->name);
 		err = -EINVAL;
 		goto pps_register_source_exit;
 	}
 	if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 &&
 			info->echo == NULL) {
-		printk(KERN_ERR "pps: %s: echo function is not defined\n",
+		pr_err("%s: echo function is not defined\n",
 					info->name);
 		err = -EINVAL;
 		goto pps_register_source_exit;
 	}
 	if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
-		printk(KERN_ERR "pps: %s: unspecified time format\n",
+		pr_err("%s: unspecified time format\n",
 					info->name);
 		err = -EINVAL;
 		goto pps_register_source_exit;
@@ -168,94 +110,48 @@
 
 	init_waitqueue_head(&pps->queue);
 	spin_lock_init(&pps->lock);
-	atomic_set(&pps->usage, 1);
-
-	/* Get new ID for the new PPS source */
-	if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
-		err = -ENOMEM;
-		goto kfree_pps;
-	}
-
-	spin_lock_irq(&pps_idr_lock);
-
-	/* Now really allocate the PPS source.
-	 * After idr_get_new() calling the new source will be freely available
-	 * into the kernel.
-	 */
-	err = idr_get_new(&pps_idr, pps, &id);
-	if (err < 0) {
-		spin_unlock_irq(&pps_idr_lock);
-		goto kfree_pps;
-	}
-
-	id = id & MAX_ID_MASK;
-	if (id >= PPS_MAX_SOURCES) {
-		spin_unlock_irq(&pps_idr_lock);
-
-		printk(KERN_ERR "pps: %s: too many PPS sources in the system\n",
-					info->name);
-		err = -EBUSY;
-		goto free_idr;
-	}
-	pps->id = id;
-
-	spin_unlock_irq(&pps_idr_lock);
 
 	/* Create the char device */
 	err = pps_register_cdev(pps);
 	if (err < 0) {
-		printk(KERN_ERR "pps: %s: unable to create char device\n",
+		pr_err("%s: unable to create char device\n",
 					info->name);
-		goto free_idr;
+		goto kfree_pps;
 	}
 
-	pr_info("new PPS source %s at ID %d\n", info->name, id);
+	dev_info(pps->dev, "new PPS source %s\n", info->name);
 
-	return id;
-
-free_idr:
-	spin_lock_irq(&pps_idr_lock);
-	idr_remove(&pps_idr, id);
-	spin_unlock_irq(&pps_idr_lock);
+	return pps;
 
 kfree_pps:
 	kfree(pps);
 
 pps_register_source_exit:
-	printk(KERN_ERR "pps: %s: unable to register source\n", info->name);
+	pr_err("%s: unable to register source\n", info->name);
 
-	return err;
+	return NULL;
 }
 EXPORT_SYMBOL(pps_register_source);
 
 /* pps_unregister_source - remove a PPS source from the system
- * @source: the PPS source ID
+ * @pps: the PPS source
  *
  * This function is used to remove a previously registered PPS source from
  * the system.
  */
 
-void pps_unregister_source(int source)
+void pps_unregister_source(struct pps_device *pps)
 {
-	struct pps_device *pps;
-
-	spin_lock_irq(&pps_idr_lock);
-	pps = idr_find(&pps_idr, source);
-
-	if (!pps) {
-		BUG();
-		spin_unlock_irq(&pps_idr_lock);
-		return;
-	}
-	spin_unlock_irq(&pps_idr_lock);
-
+	pps_kc_remove(pps);
 	pps_unregister_cdev(pps);
-	pps_put_source(pps);
+
+	/* don't have to kfree(pps) here because it will be done on
+	 * device destruction */
 }
 EXPORT_SYMBOL(pps_unregister_source);
 
 /* pps_event - register a PPS event into the system
- * @source: the PPS source ID
+ * @pps: the PPS device
  * @ts: the event timestamp
  * @event: the event type
  * @data: userdef pointer
@@ -263,78 +159,72 @@
  * This function is used by each PPS client in order to register a new
  * PPS event into the system (it's usually called inside an IRQ handler).
  *
- * If an echo function is associated with the PPS source it will be called
+ * If an echo function is associated with the PPS device it will be called
  * as:
- *	pps->info.echo(source, event, data);
+ *	pps->info.echo(pps, event, data);
  */
-
-void pps_event(int source, struct pps_ktime *ts, int event, void *data)
+void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+		void *data)
 {
-	struct pps_device *pps;
 	unsigned long flags;
 	int captured = 0;
+	struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
 
-	if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) {
-		printk(KERN_ERR "pps: unknown event (%x) for source %d\n",
-			event, source);
-		return;
-	}
+	/* check event type */
+	BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
 
-	pps = pps_get_source(source);
-	if (!pps)
-		return;
+	dev_dbg(pps->dev, "PPS event at %ld.%09ld\n",
+			ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
 
-	pr_debug("PPS event on source %d at %llu.%06u\n",
-			pps->id, (unsigned long long) ts->sec, ts->nsec);
+	timespec_to_pps_ktime(&ts_real, ts->ts_real);
 
 	spin_lock_irqsave(&pps->lock, flags);
 
 	/* Must call the echo function? */
 	if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)))
-		pps->info.echo(source, event, data);
+		pps->info.echo(pps, event, data);
 
 	/* Check the event */
 	pps->current_mode = pps->params.mode;
-	if ((event & PPS_CAPTUREASSERT) &
-			(pps->params.mode & PPS_CAPTUREASSERT)) {
+	if (event & pps->params.mode & PPS_CAPTUREASSERT) {
 		/* We have to add an offset? */
 		if (pps->params.mode & PPS_OFFSETASSERT)
-			pps_add_offset(ts, &pps->params.assert_off_tu);
+			pps_add_offset(&ts_real,
+					&pps->params.assert_off_tu);
 
 		/* Save the time stamp */
-		pps->assert_tu = *ts;
+		pps->assert_tu = ts_real;
 		pps->assert_sequence++;
-		pr_debug("capture assert seq #%u for source %d\n",
-			pps->assert_sequence, source);
+		dev_dbg(pps->dev, "capture assert seq #%u\n",
+			pps->assert_sequence);
 
 		captured = ~0;
 	}
-	if ((event & PPS_CAPTURECLEAR) &
-			(pps->params.mode & PPS_CAPTURECLEAR)) {
+	if (event & pps->params.mode & PPS_CAPTURECLEAR) {
 		/* We have to add an offset? */
 		if (pps->params.mode & PPS_OFFSETCLEAR)
-			pps_add_offset(ts, &pps->params.clear_off_tu);
+			pps_add_offset(&ts_real,
+					&pps->params.clear_off_tu);
 
 		/* Save the time stamp */
-		pps->clear_tu = *ts;
+		pps->clear_tu = ts_real;
 		pps->clear_sequence++;
-		pr_debug("capture clear seq #%u for source %d\n",
-			pps->clear_sequence, source);
+		dev_dbg(pps->dev, "capture clear seq #%u\n",
+			pps->clear_sequence);
 
 		captured = ~0;
 	}
 
-	/* Wake up iif captured somthing */
+	pps_kc_event(pps, ts, event);
+
+	/* Wake up if captured something */
 	if (captured) {
-		pps->go = ~0;
-		wake_up_interruptible(&pps->queue);
+		pps->last_ev++;
+		wake_up_interruptible_all(&pps->queue);
 
 		kill_fasync(&pps->async_queue, SIGIO, POLL_IN);
 	}
 
 	spin_unlock_irqrestore(&pps->lock, flags);
-
-	/* Now we can release the PPS source for (possible) deregistration */
-	pps_put_source(pps);
 }
 EXPORT_SYMBOL(pps_event);
diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c
new file mode 100644
index 0000000..079e930
--- /dev/null
+++ b/drivers/pps/kc.c
@@ -0,0 +1,122 @@
+/*
+ * PPS kernel consumer API
+ *
+ * Copyright (C) 2009-2010   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pps_kernel.h>
+
+#include "kc.h"
+
+/*
+ * Global variables
+ */
+
+/* state variables to bind kernel consumer */
+DEFINE_SPINLOCK(pps_kc_hardpps_lock);
+/* PPS API (RFC 2783): current source and mode for kernel consumer */
+struct pps_device *pps_kc_hardpps_dev;	/* unique pointer to device */
+int pps_kc_hardpps_mode;		/* mode bits for kernel consumer */
+
+/* pps_kc_bind - control PPS kernel consumer binding
+ * @pps: the PPS source
+ * @bind_args: kernel consumer bind parameters
+ *
+ * This function is used to bind or unbind PPS kernel consumer according to
+ * supplied parameters. Should not be called in interrupt context.
+ */
+int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
+{
+	/* Check if another consumer is already bound */
+	spin_lock_irq(&pps_kc_hardpps_lock);
+
+	if (bind_args->edge == 0)
+		if (pps_kc_hardpps_dev == pps) {
+			pps_kc_hardpps_mode = 0;
+			pps_kc_hardpps_dev = NULL;
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_info(pps->dev, "unbound kernel"
+					" consumer\n");
+		} else {
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_err(pps->dev, "selected kernel consumer"
+					" is not bound\n");
+			return -EINVAL;
+		}
+	else
+		if (pps_kc_hardpps_dev == NULL ||
+				pps_kc_hardpps_dev == pps) {
+			pps_kc_hardpps_mode = bind_args->edge;
+			pps_kc_hardpps_dev = pps;
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_info(pps->dev, "bound kernel consumer: "
+				"edge=0x%x\n", bind_args->edge);
+		} else {
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_err(pps->dev, "another kernel consumer"
+					" is already bound\n");
+			return -EINVAL;
+		}
+
+	return 0;
+}
+
+/* pps_kc_remove - unbind kernel consumer on PPS source removal
+ * @pps: the PPS source
+ *
+ * This function is used to disable kernel consumer on PPS source removal
+ * if this source was bound to PPS kernel consumer. Can be called on any
+ * source safely. Should not be called in interrupt context.
+ */
+void pps_kc_remove(struct pps_device *pps)
+{
+	spin_lock_irq(&pps_kc_hardpps_lock);
+	if (pps == pps_kc_hardpps_dev) {
+		pps_kc_hardpps_mode = 0;
+		pps_kc_hardpps_dev = NULL;
+		spin_unlock_irq(&pps_kc_hardpps_lock);
+		dev_info(pps->dev, "unbound kernel consumer"
+				" on device removal\n");
+	} else
+		spin_unlock_irq(&pps_kc_hardpps_lock);
+}
+
+/* pps_kc_event - call hardpps() on PPS event
+ * @pps: the PPS source
+ * @ts: PPS event timestamp
+ * @event: PPS event edge
+ *
+ * This function calls hardpps() when an event from bound PPS source occurs.
+ */
+void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts,
+		int event)
+{
+	unsigned long flags;
+
+	/* Pass some events to kernel consumer if activated */
+	spin_lock_irqsave(&pps_kc_hardpps_lock, flags);
+	if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode)
+		hardpps(&ts->ts_real, &ts->ts_raw);
+	spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags);
+}
diff --git a/drivers/pps/kc.h b/drivers/pps/kc.h
new file mode 100644
index 0000000..d296fcd
--- /dev/null
+++ b/drivers/pps/kc.h
@@ -0,0 +1,46 @@
+/*
+ * PPS kernel consumer API header
+ *
+ * Copyright (C) 2009-2010   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_PPS_KC_H
+#define LINUX_PPS_KC_H
+
+#include <linux/errno.h>
+#include <linux/pps_kernel.h>
+
+#ifdef CONFIG_NTP_PPS
+
+extern int pps_kc_bind(struct pps_device *pps,
+		struct pps_bind_args *bind_args);
+extern void pps_kc_remove(struct pps_device *pps);
+extern void pps_kc_event(struct pps_device *pps,
+		struct pps_event_time *ts, int event);
+
+
+#else /* CONFIG_NTP_PPS */
+
+static inline int pps_kc_bind(struct pps_device *pps,
+		struct pps_bind_args *bind_args) { return -EOPNOTSUPP; }
+static inline void pps_kc_remove(struct pps_device *pps) {}
+static inline void pps_kc_event(struct pps_device *pps,
+		struct pps_event_time *ts, int event) {}
+
+#endif /* CONFIG_NTP_PPS */
+
+#endif /* LINUX_PPS_KC_H */
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index ca5183b..2baadd2 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -19,6 +19,7 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -26,9 +27,13 @@
 #include <linux/sched.h>
 #include <linux/uaccess.h>
 #include <linux/idr.h>
+#include <linux/mutex.h>
 #include <linux/cdev.h>
 #include <linux/poll.h>
 #include <linux/pps_kernel.h>
+#include <linux/slab.h>
+
+#include "kc.h"
 
 /*
  * Local variables
@@ -37,6 +42,9 @@
 static dev_t pps_devt;
 static struct class *pps_class;
 
+static DEFINE_MUTEX(pps_idr_lock);
+static DEFINE_IDR(pps_idr);
+
 /*
  * Char device methods
  */
@@ -61,15 +69,13 @@
 {
 	struct pps_device *pps = file->private_data;
 	struct pps_kparams params;
-	struct pps_fdata fdata;
-	unsigned long ticks;
 	void __user *uarg = (void __user *) arg;
 	int __user *iuarg = (int __user *) arg;
 	int err;
 
 	switch (cmd) {
 	case PPS_GETPARAMS:
-		pr_debug("PPS_GETPARAMS: source %d\n", pps->id);
+		dev_dbg(pps->dev, "PPS_GETPARAMS\n");
 
 		spin_lock_irq(&pps->lock);
 
@@ -85,7 +91,7 @@
 		break;
 
 	case PPS_SETPARAMS:
-		pr_debug("PPS_SETPARAMS: source %d\n", pps->id);
+		dev_dbg(pps->dev, "PPS_SETPARAMS\n");
 
 		/* Check the capabilities */
 		if (!capable(CAP_SYS_TIME))
@@ -95,14 +101,14 @@
 		if (err)
 			return -EFAULT;
 		if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
-			pr_debug("capture mode unspecified (%x)\n",
+			dev_dbg(pps->dev, "capture mode unspecified (%x)\n",
 								params.mode);
 			return -EINVAL;
 		}
 
 		/* Check for supported capabilities */
 		if ((params.mode & ~pps->info.mode) != 0) {
-			pr_debug("unsupported capabilities (%x)\n",
+			dev_dbg(pps->dev, "unsupported capabilities (%x)\n",
 								params.mode);
 			return -EINVAL;
 		}
@@ -115,7 +121,7 @@
 		/* Restore the read only parameters */
 		if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
 			/* section 3.3 of RFC 2783 interpreted */
-			pr_debug("time format unspecified (%x)\n",
+			dev_dbg(pps->dev, "time format unspecified (%x)\n",
 								params.mode);
 			pps->params.mode |= PPS_TSFMT_TSPEC;
 		}
@@ -128,7 +134,7 @@
 		break;
 
 	case PPS_GETCAP:
-		pr_debug("PPS_GETCAP: source %d\n", pps->id);
+		dev_dbg(pps->dev, "PPS_GETCAP\n");
 
 		err = put_user(pps->info.mode, iuarg);
 		if (err)
@@ -136,20 +142,26 @@
 
 		break;
 
-	case PPS_FETCH:
-		pr_debug("PPS_FETCH: source %d\n", pps->id);
+	case PPS_FETCH: {
+		struct pps_fdata fdata;
+		unsigned int ev;
+
+		dev_dbg(pps->dev, "PPS_FETCH\n");
 
 		err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
 		if (err)
 			return -EFAULT;
 
-		pps->go = 0;
+		ev = pps->last_ev;
 
 		/* Manage the timeout */
 		if (fdata.timeout.flags & PPS_TIME_INVALID)
-			err = wait_event_interruptible(pps->queue, pps->go);
+			err = wait_event_interruptible(pps->queue,
+					ev != pps->last_ev);
 		else {
-			pr_debug("timeout %lld.%09d\n",
+			unsigned long ticks;
+
+			dev_dbg(pps->dev, "timeout %lld.%09d\n",
 					(long long) fdata.timeout.sec,
 					fdata.timeout.nsec);
 			ticks = fdata.timeout.sec * HZ;
@@ -157,7 +169,9 @@
 
 			if (ticks != 0) {
 				err = wait_event_interruptible_timeout(
-						pps->queue, pps->go, ticks);
+						pps->queue,
+						ev != pps->last_ev,
+						ticks);
 				if (err == 0)
 					return -ETIMEDOUT;
 			}
@@ -165,7 +179,7 @@
 
 		/* Check for pending signals */
 		if (err == -ERESTARTSYS) {
-			pr_debug("pending signal caught\n");
+			dev_dbg(pps->dev, "pending signal caught\n");
 			return -EINTR;
 		}
 
@@ -185,10 +199,44 @@
 			return -EFAULT;
 
 		break;
+	}
+	case PPS_KC_BIND: {
+		struct pps_bind_args bind_args;
 
+		dev_dbg(pps->dev, "PPS_KC_BIND\n");
+
+		/* Check the capabilities */
+		if (!capable(CAP_SYS_TIME))
+			return -EPERM;
+
+		if (copy_from_user(&bind_args, uarg,
+					sizeof(struct pps_bind_args)))
+			return -EFAULT;
+
+		/* Check for supported capabilities */
+		if ((bind_args.edge & ~pps->info.mode) != 0) {
+			dev_err(pps->dev, "unsupported capabilities (%x)\n",
+					bind_args.edge);
+			return -EINVAL;
+		}
+
+		/* Validate parameters roughly */
+		if (bind_args.tsformat != PPS_TSFMT_TSPEC ||
+				(bind_args.edge & ~PPS_CAPTUREBOTH) != 0 ||
+				bind_args.consumer != PPS_KC_HARDPPS) {
+			dev_err(pps->dev, "invalid kernel consumer bind"
+					" parameters (%x)\n", bind_args.edge);
+			return -EINVAL;
+		}
+
+		err = pps_kc_bind(pps, &bind_args);
+		if (err < 0)
+			return err;
+
+		break;
+	}
 	default:
 		return -ENOTTY;
-		break;
 	}
 
 	return 0;
@@ -198,12 +246,6 @@
 {
 	struct pps_device *pps = container_of(inode->i_cdev,
 						struct pps_device, cdev);
-	int found;
-
-	found = pps_get_source(pps->id) != 0;
-	if (!found)
-		return -ENODEV;
-
 	file->private_data = pps;
 
 	return 0;
@@ -211,11 +253,6 @@
 
 static int pps_cdev_release(struct inode *inode, struct file *file)
 {
-	struct pps_device *pps = file->private_data;
-
-	/* Free the PPS source and wake up (possible) deregistration */
-	pps_put_source(pps);
-
 	return 0;
 }
 
@@ -233,25 +270,67 @@
 	.release	= pps_cdev_release,
 };
 
+static void pps_device_destruct(struct device *dev)
+{
+	struct pps_device *pps = dev_get_drvdata(dev);
+
+	/* release id here to protect others from using it while it's
+	 * still in use */
+	mutex_lock(&pps_idr_lock);
+	idr_remove(&pps_idr, pps->id);
+	mutex_unlock(&pps_idr_lock);
+
+	kfree(dev);
+	kfree(pps);
+}
+
 int pps_register_cdev(struct pps_device *pps)
 {
 	int err;
+	dev_t devt;
 
-	pps->devno = MKDEV(MAJOR(pps_devt), pps->id);
+	mutex_lock(&pps_idr_lock);
+	/* Get new ID for the new PPS source */
+	if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
+		mutex_unlock(&pps_idr_lock);
+		return -ENOMEM;
+	}
+
+	/* Now really allocate the PPS source.
+	 * After idr_get_new() calling the new source will be freely available
+	 * into the kernel.
+	 */
+	err = idr_get_new(&pps_idr, pps, &pps->id);
+	mutex_unlock(&pps_idr_lock);
+
+	if (err < 0)
+		return err;
+
+	pps->id &= MAX_ID_MASK;
+	if (pps->id >= PPS_MAX_SOURCES) {
+		pr_err("%s: too many PPS sources in the system\n",
+					pps->info.name);
+		err = -EBUSY;
+		goto free_idr;
+	}
+
+	devt = MKDEV(MAJOR(pps_devt), pps->id);
+
 	cdev_init(&pps->cdev, &pps_cdev_fops);
 	pps->cdev.owner = pps->info.owner;
 
-	err = cdev_add(&pps->cdev, pps->devno, 1);
+	err = cdev_add(&pps->cdev, devt, 1);
 	if (err) {
-		printk(KERN_ERR "pps: %s: failed to add char device %d:%d\n",
+		pr_err("%s: failed to add char device %d:%d\n",
 				pps->info.name, MAJOR(pps_devt), pps->id);
-		return err;
+		goto free_idr;
 	}
-	pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL,
+	pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
 							"pps%d", pps->id);
 	if (IS_ERR(pps->dev))
 		goto del_cdev;
-	dev_set_drvdata(pps->dev, pps);
+
+	pps->dev->release = pps_device_destruct;
 
 	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
 			MAJOR(pps_devt), pps->id);
@@ -261,12 +340,17 @@
 del_cdev:
 	cdev_del(&pps->cdev);
 
+free_idr:
+	mutex_lock(&pps_idr_lock);
+	idr_remove(&pps_idr, pps->id);
+	mutex_unlock(&pps_idr_lock);
+
 	return err;
 }
 
 void pps_unregister_cdev(struct pps_device *pps)
 {
-	device_destroy(pps_class, pps->devno);
+	device_destroy(pps_class, pps->dev->devt);
 	cdev_del(&pps->cdev);
 }
 
@@ -286,14 +370,14 @@
 
 	pps_class = class_create(THIS_MODULE, "pps");
 	if (!pps_class) {
-		printk(KERN_ERR "pps: failed to allocate class\n");
+		pr_err("failed to allocate class\n");
 		return -ENOMEM;
 	}
 	pps_class->dev_attrs = pps_attrs;
 
 	err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
 	if (err < 0) {
-		printk(KERN_ERR "pps: failed to allocate char device region\n");
+		pr_err("failed to allocate char device region\n");
 		goto remove_class;
 	}
 
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index ccea15c..50cb1e1 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_PS3_VUART) += ps3-vuart.o
 obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
-ps3av_mod-objs		+= ps3av.o ps3av_cmd.o
+ps3av_mod-y		:= ps3av.o ps3av_cmd.o
 obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
 obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o
 obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 1eb82c4..a50391b 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -46,7 +46,6 @@
 DEFINE_SPINLOCK(rio_global_list_lock);
 
 static int next_destid = 0;
-static int next_switchid = 0;
 static int next_net = 0;
 static int next_comptag = 1;
 
@@ -378,12 +377,30 @@
 	struct rio_dev *rdev;
 	struct rio_switch *rswitch = NULL;
 	int result, rdid;
+	size_t size;
+	u32 swpinfo = 0;
 
-	rdev = kzalloc(sizeof(struct rio_dev), GFP_KERNEL);
+	size = sizeof(struct rio_dev);
+	if (rio_mport_read_config_32(port, destid, hopcount,
+				     RIO_PEF_CAR, &result))
+		return NULL;
+
+	if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
+		rio_mport_read_config_32(port, destid, hopcount,
+					 RIO_SWP_INFO_CAR, &swpinfo);
+		if (result & RIO_PEF_SWITCH) {
+			size += (RIO_GET_TOTAL_PORTS(swpinfo) *
+				sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
+		}
+	}
+
+	rdev = kzalloc(size, GFP_KERNEL);
 	if (!rdev)
 		return NULL;
 
 	rdev->net = net;
+	rdev->pef = result;
+	rdev->swpinfo = swpinfo;
 	rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR,
 				 &result);
 	rdev->did = result >> 16;
@@ -397,8 +414,6 @@
 	rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR,
 				 &result);
 	rdev->asm_rev = result >> 16;
-	rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
-				 &rdev->pef);
 	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
 		rdev->efptr = result & 0xffff;
 		rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
@@ -408,11 +423,6 @@
 						hopcount, RIO_EFB_ERR_MGMNT);
 	}
 
-	if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
-		rio_mport_read_config_32(port, destid, hopcount,
-					 RIO_SWP_INFO_CAR, &rdev->swpinfo);
-	}
-
 	rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
 				 &rdev->src_ops);
 	rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR,
@@ -427,6 +437,10 @@
 		rio_mport_write_config_32(port, destid, hopcount,
 					  RIO_COMPONENT_TAG_CSR, next_comptag);
 		rdev->comp_tag = next_comptag++;
+	}  else {
+		rio_mport_read_config_32(port, destid, hopcount,
+					 RIO_COMPONENT_TAG_CSR,
+					 &rdev->comp_tag);
 	}
 
 	if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {
@@ -437,21 +451,20 @@
 				next_destid++;
 		} else
 			rdev->destid = rio_get_device_id(port, destid, hopcount);
-	} else
-		/* Switch device has an associated destID */
-		rdev->destid = RIO_INVALID_DESTID;
+
+		rdev->hopcount = 0xff;
+	} else {
+		/* Switch device has an associated destID which
+		 * will be adjusted later
+		 */
+		rdev->destid = destid;
+		rdev->hopcount = hopcount;
+	}
 
 	/* If a PE has both switch and other functions, show it as a switch */
 	if (rio_is_switch(rdev)) {
-		rswitch = kzalloc(sizeof(*rswitch) +
-				  RIO_GET_TOTAL_PORTS(rdev->swpinfo) *
-				  sizeof(rswitch->nextdev[0]),
-				  GFP_KERNEL);
-		if (!rswitch)
-			goto cleanup;
-		rswitch->switchid = next_switchid;
-		rswitch->hopcount = hopcount;
-		rswitch->destid = destid;
+		rswitch = rdev->rswitch;
+		rswitch->switchid = rdev->comp_tag & RIO_CTAG_UDEVID;
 		rswitch->port_ok = 0;
 		rswitch->route_table = kzalloc(sizeof(u8)*
 					RIO_MAX_ROUTE_ENTRIES(port->sys_size),
@@ -462,15 +475,13 @@
 		for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size);
 				rdid++)
 			rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
-		rdev->rswitch = rswitch;
-		rswitch->rdev = rdev;
 		dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
-			     rdev->rswitch->switchid);
+			     rswitch->switchid);
 		rio_switch_init(rdev, do_enum);
 
-		if (do_enum && rdev->rswitch->clr_table)
-			rdev->rswitch->clr_table(port, destid, hopcount,
-						 RIO_GLOBAL_TABLE);
+		if (do_enum && rswitch->clr_table)
+			rswitch->clr_table(port, destid, hopcount,
+					   RIO_GLOBAL_TABLE);
 
 		list_add_tail(&rswitch->node, &rio_switches);
 
@@ -506,10 +517,9 @@
 	return rdev;
 
 cleanup:
-	if (rswitch) {
+	if (rswitch->route_table)
 		kfree(rswitch->route_table);
-		kfree(rswitch);
-	}
+
 	kfree(rdev);
 	return NULL;
 }
@@ -632,8 +642,7 @@
 
 /**
  * rio_route_add_entry- Add a route entry to a switch routing table
- * @mport: Master port to send transaction
- * @rswitch: Switch device
+ * @rdev: RIO device
  * @table: Routing table ID
  * @route_destid: Destination ID to be routed
  * @route_port: Port number to be routed
@@ -647,31 +656,31 @@
  * on failure.
  */
 static int
-rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
+rio_route_add_entry(struct rio_dev *rdev,
 		    u16 table, u16 route_destid, u8 route_port, int lock)
 {
 	int rc;
 
 	if (lock) {
-		rc = rio_lock_device(mport, rswitch->destid,
-				     rswitch->hopcount, 1000);
+		rc = rio_lock_device(rdev->net->hport, rdev->destid,
+				     rdev->hopcount, 1000);
 		if (rc)
 			return rc;
 	}
 
-	rc = rswitch->add_entry(mport, rswitch->destid,
-					rswitch->hopcount, table,
-					route_destid, route_port);
+	rc = rdev->rswitch->add_entry(rdev->net->hport, rdev->destid,
+				      rdev->hopcount, table,
+				      route_destid, route_port);
 	if (lock)
-		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+		rio_unlock_device(rdev->net->hport, rdev->destid,
+				  rdev->hopcount);
 
 	return rc;
 }
 
 /**
  * rio_route_get_entry- Read a route entry in a switch routing table
- * @mport: Master port to send transaction
- * @rswitch: Switch device
+ * @rdev: RIO device
  * @table: Routing table ID
  * @route_destid: Destination ID to be routed
  * @route_port: Pointer to read port number into
@@ -685,23 +694,24 @@
  * on failure.
  */
 static int
-rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
+rio_route_get_entry(struct rio_dev *rdev, u16 table,
 		    u16 route_destid, u8 *route_port, int lock)
 {
 	int rc;
 
 	if (lock) {
-		rc = rio_lock_device(mport, rswitch->destid,
-				     rswitch->hopcount, 1000);
+		rc = rio_lock_device(rdev->net->hport, rdev->destid,
+				     rdev->hopcount, 1000);
 		if (rc)
 			return rc;
 	}
 
-	rc = rswitch->get_entry(mport, rswitch->destid,
-					rswitch->hopcount, table,
-					route_destid, route_port);
+	rc = rdev->rswitch->get_entry(rdev->net->hport, rdev->destid,
+				      rdev->hopcount, table,
+				      route_destid, route_port);
 	if (lock)
-		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+		rio_unlock_device(rdev->net->hport, rdev->destid,
+				  rdev->hopcount);
 
 	return rc;
 }
@@ -809,16 +819,15 @@
 		return -1;
 
 	if (rio_is_switch(rdev)) {
-		next_switchid++;
 		sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo);
-		rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
+		rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
 				    port->host_deviceid, sw_inport, 0);
 		rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
 
 		for (destid = 0; destid < next_destid; destid++) {
 			if (destid == port->host_deviceid)
 				continue;
-			rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
+			rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
 					    destid, sw_inport, 0);
 			rdev->rswitch->route_table[destid] = sw_inport;
 		}
@@ -850,8 +859,7 @@
 				    "RIO: scanning device on port %d\n",
 				    port_num);
 				rdev->rswitch->port_ok |= (1 << port_num);
-				rio_route_add_entry(port, rdev->rswitch,
-						RIO_GLOBAL_TABLE,
+				rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
 						RIO_ANY_DESTID(port->sys_size),
 						port_num, 0);
 
@@ -865,7 +873,7 @@
 					     destid < next_destid; destid++) {
 						if (destid == port->host_deviceid)
 							continue;
-						rio_route_add_entry(port, rdev->rswitch,
+						rio_route_add_entry(rdev,
 								    RIO_GLOBAL_TABLE,
 								    destid,
 								    port_num,
@@ -904,7 +912,7 @@
 				next_destid++;
 		}
 
-		rdev->rswitch->destid = sw_destid;
+		rdev->destid = sw_destid;
 	} else
 		pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n",
 		    rio_name(rdev), rdev->vid, rdev->did);
@@ -935,13 +943,15 @@
  * @port: Master port to send transactions
  * @destid: Current destination ID in network
  * @hopcount: Number of hops into the network
+ * @prev: previous rio_dev
+ * @prev_port: previous port number
  *
  * Recursively discovers a RIO network.  Transactions are sent via the
  * master port passed in @port.
  */
 static int __devinit
 rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
-	      u8 hopcount)
+	      u8 hopcount, struct rio_dev *prev, int prev_port)
 {
 	u8 port_num, route_port;
 	struct rio_dev *rdev;
@@ -951,14 +961,15 @@
 	if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) {
 		/* Add device to the global and bus/net specific list. */
 		list_add_tail(&rdev->net_list, &net->devices);
+		rdev->prev = prev;
+		if (prev && rio_is_switch(prev))
+			prev->rswitch->nextdev[prev_port] = rdev;
 	} else
 		return -1;
 
 	if (rio_is_switch(rdev)) {
-		next_switchid++;
-
 		/* Associated destid is how we accessed this switch */
-		rdev->rswitch->destid = destid;
+		rdev->destid = destid;
 
 		pr_debug(
 		    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
@@ -981,7 +992,7 @@
 				for (ndestid = 0;
 				     ndestid < RIO_ANY_DESTID(port->sys_size);
 				     ndestid++) {
-					rio_route_get_entry(port, rdev->rswitch,
+					rio_route_get_entry(rdev,
 							    RIO_GLOBAL_TABLE,
 							    ndestid,
 							    &route_port, 0);
@@ -992,8 +1003,8 @@
 				if (ndestid == RIO_ANY_DESTID(port->sys_size))
 					continue;
 				rio_unlock_device(port, destid, hopcount);
-				if (rio_disc_peer
-				    (net, port, ndestid, hopcount + 1) < 0)
+				if (rio_disc_peer(net, port, ndestid,
+					hopcount + 1, rdev, port_num) < 0)
 					return -1;
 			}
 		}
@@ -1069,14 +1080,14 @@
  */
 static void rio_update_route_tables(struct rio_mport *port)
 {
-	struct rio_dev *rdev;
+	struct rio_dev *rdev, *swrdev;
 	struct rio_switch *rswitch;
 	u8 sport;
 	u16 destid;
 
 	list_for_each_entry(rdev, &rio_devices, global_list) {
 
-		destid = (rio_is_switch(rdev))?rdev->rswitch->destid:rdev->destid;
+		destid = rdev->destid;
 
 		list_for_each_entry(rswitch, &rio_switches, node) {
 
@@ -1084,14 +1095,16 @@
 				continue;
 
 			if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
+				swrdev = sw_to_rio_dev(rswitch);
+
 				/* Skip if destid ends in empty switch*/
-				if (rswitch->destid == destid)
+				if (swrdev->destid == destid)
 					continue;
 
-				sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo);
+				sport = RIO_GET_PORT_NUM(swrdev->swpinfo);
 
 				if (rswitch->add_entry)	{
-					rio_route_add_entry(port, rswitch,
+					rio_route_add_entry(swrdev,
 						RIO_GLOBAL_TABLE, destid,
 						sport, 0);
 					rswitch->route_table[destid] = sport;
@@ -1203,21 +1216,20 @@
 
 	list_for_each_entry(rdev, &rio_devices, global_list)
 		if (rio_is_switch(rdev)) {
-			rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
-					rdev->rswitch->hopcount, 1000);
+			rio_lock_device(rdev->net->hport, rdev->destid,
+					rdev->hopcount, 1000);
 			for (i = 0;
 			     i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
 			     i++) {
-				if (rio_route_get_entry
-				    (rdev->net->hport, rdev->rswitch,
-				     RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
+				if (rio_route_get_entry(rdev,
+					RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
 					continue;
 				rdev->rswitch->route_table[i] = sport;
 			}
 
 			rio_unlock_device(rdev->net->hport,
-					  rdev->rswitch->destid,
-					  rdev->rswitch->hopcount);
+					  rdev->destid,
+					  rdev->hopcount);
 		}
 }
 
@@ -1284,7 +1296,7 @@
 						   mport->host_deviceid);
 
 		if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
-					0) < 0) {
+					0, NULL, 0) < 0) {
 			printk(KERN_INFO
 			       "RIO: master port %d device has failed discovery\n",
 			       mport->id);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 137ed93..1269fbd 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@
 
 	/* Several chips lock up trying to read undefined config space */
 	if (capable(CAP_SYS_ADMIN))
-		size = 0x200000;
+		size = RIO_MAINT_SPACE_SZ;
 
-	if (off > size)
+	if (off >= size)
 		return 0;
 	if (off + count > size) {
 		size -= off;
@@ -147,10 +147,10 @@
 	loff_t init_off = off;
 	u8 *data = (u8 *) buf;
 
-	if (off > 0x200000)
+	if (off >= RIO_MAINT_SPACE_SZ)
 		return 0;
-	if (off + count > 0x200000) {
-		size = 0x200000 - off;
+	if (off + count > RIO_MAINT_SPACE_SZ) {
+		size = RIO_MAINT_SPACE_SZ - off;
 		count = size;
 	}
 
@@ -200,7 +200,7 @@
 		 .name = "config",
 		 .mode = S_IRUGO | S_IWUSR,
 		 },
-	.size = 0x200000,
+	.size = RIO_MAINT_SPACE_SZ,
 	.read = rio_read_config,
 	.write = rio_write_config,
 };
@@ -217,7 +217,7 @@
 
 	err = device_create_bin_file(&rdev->dev, &rio_config_attr);
 
-	if (!err && rdev->rswitch) {
+	if (!err && (rdev->pef & RIO_PEF_SWITCH)) {
 		err = device_create_file(&rdev->dev, &dev_attr_routes);
 		if (!err && rdev->rswitch->sw_sysfs)
 			err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE);
@@ -239,7 +239,7 @@
 void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
 {
 	device_remove_bin_file(&rdev->dev, &rio_config_attr);
-	if (rdev->rswitch) {
+	if (rdev->pef & RIO_PEF_SWITCH) {
 		device_remove_file(&rdev->dev, &dev_attr_routes);
 		if (rdev->rswitch->sw_sysfs)
 			rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 7b5080c..cc2a3b7 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -471,16 +471,9 @@
  */
 int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
 	u32 regval;
 
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
+	rio_read_config_32(rdev,
 				 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
 				 &regval);
 	if (lock)
@@ -488,7 +481,7 @@
 	else
 		regval &= ~RIO_PORT_N_CTL_LOCKOUT;
 
-	rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
+	rio_write_config_32(rdev,
 				  rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
 				  regval);
 	return 0;
@@ -507,7 +500,7 @@
 rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
 {
 	u32 result;
-	int p_port, dstid, rc = -EIO;
+	int p_port, rc = -EIO;
 	struct rio_dev *prev = NULL;
 
 	/* Find switch with failed RIO link */
@@ -522,9 +515,7 @@
 	if (prev == NULL)
 		goto err_out;
 
-	dstid = (rdev->pef & RIO_PEF_SWITCH) ?
-			rdev->rswitch->destid : rdev->destid;
-	p_port = prev->rswitch->route_table[dstid];
+	p_port = prev->rswitch->route_table[rdev->destid];
 
 	if (p_port != RIO_INVALID_ROUTE) {
 		pr_debug("RIO: link failed on [%s]-P%d\n",
@@ -567,15 +558,8 @@
  */
 static int rio_chk_dev_access(struct rio_dev *rdev)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount);
+	return rio_mport_chk_dev_access(rdev->net->hport,
+					rdev->destid, rdev->hopcount);
 }
 
 /**
@@ -588,23 +572,20 @@
 static int
 rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int checkcount;
 
 	if (lnkresp) {
 		/* Read from link maintenance response register
 		 * to clear valid bit */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
 			&regval);
 		udelay(50);
 	}
 
 	/* Issue Input-status command */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 		rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
 		RIO_MNT_REQ_CMD_IS);
 
@@ -615,7 +596,7 @@
 	checkcount = 3;
 	while (checkcount--) {
 		udelay(50);
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
 			&regval);
 		if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
@@ -635,15 +616,12 @@
  */
 static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];
 	u32 regval;
 	u32 far_ackid, far_linkstat, near_ackid;
 
 	if (err_status == 0)
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
 			&err_status);
 
@@ -661,7 +639,7 @@
 			 pnum, regval);
 		far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
 		far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
 			&regval);
 		pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
@@ -679,9 +657,8 @@
 			/* Align near outstanding/outbound ackIDs with
 			 * far inbound.
 			 */
-			rio_mport_write_config_32(mport, destid,
-				hopcount, rdev->phys_efptr +
-					RIO_PORT_N_ACK_STS_CSR(pnum),
+			rio_write_config_32(rdev,
+				rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
 				(near_ackid << 24) |
 					(far_ackid << 8) | far_ackid);
 			/* Align far outstanding/outbound ackIDs with
@@ -698,7 +675,7 @@
 				pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
 		}
 rd_err:
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
 			&err_status);
 		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
@@ -710,7 +687,7 @@
 				     RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
 		udelay(50);
 
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
 			&err_status);
 		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
@@ -730,13 +707,10 @@
 int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
 {
 	struct rio_dev *rdev;
-	struct rio_mport *mport;
-	u8 hopcount;
-	u16 destid;
 	u32 err_status, em_perrdet, em_ltlerrdet;
 	int rc, portnum;
 
-	rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
+	rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL);
 	if (rdev == NULL) {
 		/* Device removed or enumeration error */
 		pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
@@ -800,17 +774,13 @@
 		return 0;
 	}
 
-	mport = rdev->net->hport;
-	destid = rdev->rswitch->destid;
-	hopcount = rdev->rswitch->hopcount;
-
 	/*
 	 * Process the port-write notification from switch
 	 */
 	if (rdev->rswitch->em_handle)
 		rdev->rswitch->em_handle(rdev, portnum);
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
 			&err_status);
 	pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
@@ -840,7 +810,7 @@
 			rdev->rswitch->port_ok &= ~(1 << portnum);
 			rio_set_port_lockout(rdev, portnum, 1);
 
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 				rdev->phys_efptr +
 					RIO_PORT_N_ACK_STS_CSR(portnum),
 				RIO_PORT_N_ACK_CLEAR);
@@ -851,28 +821,28 @@
 		}
 	}
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
 	if (em_perrdet) {
 		pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
 			 portnum, em_perrdet);
 		/* Clear EM Port N Error Detect CSR */
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 			rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
 	}
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
 	if (em_ltlerrdet) {
 		pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
 			 em_ltlerrdet);
 		/* Clear EM L/T Layer Error Detect CSR */
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 			rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
 	}
 
 	/* Clear remaining error bits and Port-Write Pending bit */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
 			err_status);
 
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 0bb871c..095016a 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -209,9 +209,6 @@
 static int
 idtg2_em_init(struct rio_dev *rdev)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int i, tmp;
 
@@ -220,29 +217,25 @@
 	 * All standard EM configuration should be performed at upper level.
 	 */
 
-	pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount);
+	pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
 
 	/* Set Port-Write info CSR: PRIO=3 and CRF=1 */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_PW_INFO_CSR, 0x0000e000);
+	rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000);
 
 	/*
 	 * Configure LT LAYER error reporting.
 	 */
 
 	/* Enable standard (RIO.p8) error reporting */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_LT_ERR_REPORT_EN,
+	rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN,
 			REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |
 			REM_LTL_ERR_UNSUPTR);
 
 	/* Use Port-Writes for LT layer error reporting.
 	 * Enable per-port reset
 	 */
-	rio_mport_read_config_32(mport, destid, hopcount,
-			IDT_DEV_CTRL_1, &regval);
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_DEV_CTRL_1,
+	rio_read_config_32(rdev, IDT_DEV_CTRL_1, &regval);
+	rio_write_config_32(rdev, IDT_DEV_CTRL_1,
 			regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);
 
 	/*
@@ -250,45 +243,40 @@
 	 */
 
 	/* Report all RIO.p8 errors supported by device */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
+	rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
 
 	/* Configure reporting of implementation specific errors/events */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED);
+	rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC,
+			    IDT_PORT_INIT_TX_ACQUIRED);
 
 	/* Use Port-Writes for port error reporting and enable error logging */
 	tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
 	for (i = 0; i < tmp; i++) {
-		rio_mport_read_config_32(mport, destid, hopcount,
-				IDT_PORT_OPS(i), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev, IDT_PORT_OPS(i), &regval);
+		rio_write_config_32(rdev,
 				IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |
 				IDT_PORT_OPS_PL_ELOG |
 				IDT_PORT_OPS_LL_ELOG |
 				IDT_PORT_OPS_LT_ELOG);
 	}
 	/* Overwrite error log if full */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
+	rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
 
 	/*
 	 * Configure LANE error reporting.
 	 */
 
 	/* Disable line error reporting */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_LANE_ERR_REPORT_EN_BC, 0);
+	rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0);
 
 	/* Use Port-Writes for lane error reporting (when enabled)
 	 * (do per-lane update because lanes may have different configuration)
 	 */
 	tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;
 	for (i = 0; i < tmp; i++) {
-		rio_mport_read_config_32(mport, destid, hopcount,
-				IDT_LANE_CTRL(i), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
-				IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW);
+		rio_read_config_32(rdev, IDT_LANE_CTRL(i), &regval);
+		rio_write_config_32(rdev, IDT_LANE_CTRL(i),
+				    regval | IDT_LANE_CTRL_GENPW);
 	}
 
 	/*
@@ -296,41 +284,32 @@
 	 */
 
 	/* Disable JTAG and I2C Error capture */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_AUX_PORT_ERR_CAP_EN, 0);
+	rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0);
 
 	/* Disable JTAG and I2C Error reporting/logging */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_AUX_ERR_REPORT_EN, 0);
+	rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0);
 
 	/* Disable Port-Write notification from JTAG */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_JTAG_CTRL, 0);
+	rio_write_config_32(rdev, IDT_JTAG_CTRL, 0);
 
 	/* Disable Port-Write notification from I2C */
-	rio_mport_read_config_32(mport, destid, hopcount,
-			IDT_I2C_MCTRL, &regval);
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_I2C_MCTRL,
-			regval & ~IDT_I2C_MCTRL_GENPW);
+	rio_read_config_32(rdev, IDT_I2C_MCTRL, &regval);
+	rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW);
 
 	/*
 	 * Configure CFG_BLK error reporting.
 	 */
 
 	/* Disable Configuration Block error capture */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_CFGBLK_ERR_CAPTURE_EN, 0);
+	rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0);
 
 	/* Disable Port-Writes for Configuration Block error reporting */
-	rio_mport_read_config_32(mport, destid, hopcount,
-			IDT_CFGBLK_ERR_REPORT, &regval);
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_CFGBLK_ERR_REPORT,
-			regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
+	rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, &regval);
+	rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT,
+			    regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
 
 	/* set TVAL = ~50us */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 		rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
 
 	return 0;
@@ -339,18 +318,15 @@
 static int
 idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval, em_perrdet, em_ltlerrdet;
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
 	if (em_ltlerrdet) {
 		/* Service Logical/Transport Layer Error(s) */
 		if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {
 			/* Implementation specific error reported */
-			rio_mport_read_config_32(mport, destid, hopcount,
+			rio_read_config_32(rdev,
 					IDT_ISLTL_ADDRESS_CAP, &regval);
 
 			pr_debug("RIO: %s Implementation Specific LTL errors" \
@@ -358,13 +334,12 @@
 				 rio_name(rdev), em_ltlerrdet, regval);
 
 			/* Clear implementation specific address capture CSR */
-			rio_mport_write_config_32(mport, destid, hopcount,
-					IDT_ISLTL_ADDRESS_CAP, 0);
+			rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0);
 
 		}
 	}
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
 	if (em_perrdet) {
 		/* Service Port-Level Error(s) */
@@ -372,14 +347,14 @@
 			/* Implementation Specific port error reported */
 
 			/* Get IS errors reported */
-			rio_mport_read_config_32(mport, destid, hopcount,
+			rio_read_config_32(rdev,
 					IDT_PORT_ISERR_DET(portnum), &regval);
 
 			pr_debug("RIO: %s Implementation Specific Port" \
 				 " errors 0x%x\n", rio_name(rdev), regval);
 
 			/* Clear all implementation specific events */
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 					IDT_PORT_ISERR_DET(portnum), 0);
 		}
 	}
@@ -391,14 +366,10 @@
 idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct rio_dev *rdev = to_rio_dev(dev);
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	ssize_t len = 0;
 	u32 regval;
 
-	while (!rio_mport_read_config_32(mport, destid, hopcount,
-					 IDT_ERR_RD, &regval)) {
+	while (!rio_read_config_32(rdev, IDT_ERR_RD, &regval)) {
 		if (!regval)    /* 0 = end of log */
 			break;
 		len += snprintf(buf + len, PAGE_SIZE - len,
@@ -445,3 +416,5 @@
 
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init);
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index fc9f637..3a97107 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -117,10 +117,6 @@
 
 static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
-
 	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
 	rdev->rswitch->add_entry = idtcps_route_add_entry;
 	rdev->rswitch->get_entry = idtcps_route_get_entry;
@@ -132,7 +128,7 @@
 
 	if (do_enum) {
 		/* set TVAL = ~50us */
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
 	}
 
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
index b9a389b..3994c00 100644
--- a/drivers/rapidio/switches/tsi568.c
+++ b/drivers/rapidio/switches/tsi568.c
@@ -113,22 +113,17 @@
 static int
 tsi568_em_init(struct rio_dev *rdev)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int portnum;
 
-	pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
+	pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
 
 	/* Make sure that Port-Writes are disabled (for all ports) */
 	for (portnum = 0;
 	     portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
-		rio_mport_read_config_32(mport, destid, hopcount,
-				TSI568_SP_MODE(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
-				TSI568_SP_MODE(portnum),
-				regval | TSI568_SP_MODE_PW_DIS);
+		rio_read_config_32(rdev, TSI568_SP_MODE(portnum), &regval);
+		rio_write_config_32(rdev, TSI568_SP_MODE(portnum),
+				    regval | TSI568_SP_MODE_PW_DIS);
 	}
 
 	return 0;
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 2003fb6..1a62934 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -158,48 +158,45 @@
 static int
 tsi57x_em_init(struct rio_dev *rdev)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int portnum;
 
-	pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
+	pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
 
 	for (portnum = 0;
 	     portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
 		/* Make sure that Port-Writes are enabled (for all ports) */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_MODE(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				TSI578_SP_MODE(portnum),
 				regval & ~TSI578_SP_MODE_PW_DIS);
 
 		/* Clear all pending interrupts */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				rdev->phys_efptr +
 					RIO_PORT_N_ERR_STS_CSR(portnum),
 				&regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				rdev->phys_efptr +
 					RIO_PORT_N_ERR_STS_CSR(portnum),
 				regval & 0x07120214);
 
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_INT_STATUS(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				TSI578_SP_INT_STATUS(portnum),
 				regval & 0x000700bd);
 
 		/* Enable all interrupts to allow ports to send a port-write */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_CTL_INDEP(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				TSI578_SP_CTL_INDEP(portnum),
 				regval | 0x000b0000);
 
 		/* Skip next (odd) port if the current port is in x4 mode */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 				&regval);
 		if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
@@ -207,7 +204,7 @@
 	}
 
 	/* set TVAL = ~50us */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 		rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
 
 	return 0;
@@ -217,14 +214,12 @@
 tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
 {
 	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 intstat, err_status;
 	int sendcount, checkcount;
 	u8 route_port;
 	u32 regval;
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
 			&err_status);
 
@@ -232,15 +227,15 @@
 	    (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
 			  RIO_PORT_N_ERR_STS_PW_INP_ES))) {
 		/* Remove any queued packets by locking/unlocking port */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 			&regval);
 		if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 				regval | RIO_PORT_N_CTL_LOCKOUT);
 			udelay(50);
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 				regval);
 		}
@@ -248,7 +243,7 @@
 		/* Read from link maintenance response register to clear
 		 * valid bit
 		 */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
 			&regval);
 
@@ -257,13 +252,12 @@
 		 */
 		sendcount = 3;
 		while (sendcount) {
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 					  TSI578_SP_CS_TX(portnum), 0x40fc8000);
 			checkcount = 3;
 			while (checkcount--) {
 				udelay(50);
-				rio_mport_read_config_32(
-					mport, destid, hopcount,
+				rio_read_config_32(rdev,
 					rdev->phys_efptr +
 						RIO_PORT_N_MNT_RSP_CSR(portnum),
 					&regval);
@@ -277,25 +271,23 @@
 
 exit_es:
 	/* Clear implementation specific error status bits */
-	rio_mport_read_config_32(mport, destid, hopcount,
-				 TSI578_SP_INT_STATUS(portnum), &intstat);
+	rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat);
 	pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
-		 destid, hopcount, portnum, intstat);
+		 rdev->destid, rdev->hopcount, portnum, intstat);
 
 	if (intstat & 0x10000) {
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_LUT_PEINF(portnum), &regval);
 		regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
 		route_port = rdev->rswitch->route_table[regval];
 		pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
 			rio_name(rdev), portnum, regval);
-		tsi57x_route_add_entry(mport, destid, hopcount,
+		tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount,
 				RIO_GLOBAL_TABLE, regval, route_port);
 	}
 
-	rio_mport_write_config_32(mport, destid, hopcount,
-				  TSI578_SP_INT_STATUS(portnum),
-				  intstat & 0x000700bd);
+	rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum),
+			    intstat & 0x000700bd);
 
 	return 0;
 }
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 2ce2eb7..dd63084 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -249,7 +249,7 @@
 }
 
 static int pm8607_set_voltage(struct regulator_dev *rdev,
-			      int min_uV, int max_uV)
+			      int min_uV, int max_uV, unsigned *selector)
 {
 	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
 	uint8_t val, mask;
@@ -263,6 +263,7 @@
 	ret = choose_voltage(rdev, min_uV, max_uV);
 	if (ret < 0)
 		return -EINVAL;
+	*selector = ret;
 	val = (uint8_t)(ret << info->vol_shift);
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index dd30e88..e1d9436 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -186,13 +186,25 @@
 	 This driver provides support for the voltage regulators of the
 	 PCAP2 PMIC.
 
+config REGULATOR_MC13XXX_CORE
+	tristate
+
 config REGULATOR_MC13783
 	tristate "Support regulators on Freescale MC13783 PMIC"
 	depends on MFD_MC13783
+	select REGULATOR_MC13XXX_CORE
 	help
 	  Say y here to support the regulators found on the Freescale MC13783
 	  PMIC.
 
+config REGULATOR_MC13892
+	tristate "Support regulators on Freescale MC13892 PMIC"
+	depends on MFD_MC13XXX
+	select REGULATOR_MC13XXX_CORE
+	help
+	  Say y here to support the regulators found on the Freescale MC13892
+	  PMIC.
+
 config REGULATOR_AB3100
 	tristate "ST-Ericsson AB3100 Regulator functions"
 	depends on AB3100_CORE
@@ -250,5 +262,15 @@
 	help
 	  This driver supports TPS6586X voltage regulator chips.
 
+config REGULATOR_TPS6524X
+	tristate "TI TPS6524X Power regulators"
+	depends on SPI
+	help
+	  This driver supports TPS6524X voltage regulator chips. TPS6524X
+	  provides three step-down converters and two general-purpose LDO
+	  voltage regulators.  This device is interfaced using a customized
+	  serial interface currently supported on the sequencer serial
+	  port controller.
+
 endif
 
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index bff8157..0b5e88c 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -30,10 +30,13 @@
 obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
 obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
+obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
+obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
 obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
 
 obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
 obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
 obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
 obj-$(CONFIG_REGULATOR_AB8500)	+= ab8500.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index b349266..ed6feaf 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -362,7 +362,8 @@
 }
 
 static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
-					int min_uV, int max_uV)
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
 	struct ab3100_regulator *abreg = reg->reg_data;
 	u8 regval;
@@ -373,6 +374,8 @@
 	if (bestindex < 0)
 		return bestindex;
 
+	*selector = bestindex;
+
 	err = abx500_get_register_interruptible(abreg->dev, 0,
 						abreg->regreg, &regval);
 	if (err) {
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index db6b70f..d9a052c 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -3,18 +3,13 @@
  *
  * License Terms: GNU General Public License v2
  *
- * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ *          Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
  *
  * AB8500 peripheral regulators
  *
- * AB8500 supports the following regulators,
- * LDOs - VAUDIO, VANAMIC2/2, VDIGMIC, VINTCORE12, VTVOUT,
- *        VAUX1/2/3, VANA
- *
- * for DB8500 cut 1.0 and previous versions of the silicon, all accesses
- * to registers are through the DB8500 SPI. In cut 1.1 onwards, these
- * accesses are through the DB8500 PRCMU I2C
- *
+ * AB8500 supports the following regulators:
+ *   VAUX1/2/3, VINTCORE, VTVOUT, VAUDIO, VAMIC1/2, VDMIC, VANA
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -28,38 +23,37 @@
 
 /**
  * struct ab8500_regulator_info - ab8500 regulator information
+ * @dev: device pointer
  * @desc: regulator description
- * @ab8500: ab8500 parent
  * @regulator_dev: regulator device
  * @max_uV: maximum voltage (for variable voltage supplies)
  * @min_uV: minimum voltage (for variable voltage supplies)
  * @fixed_uV: typical voltage (for fixed voltage supplies)
  * @update_bank: bank to control on/off
  * @update_reg: register to control on/off
- * @mask: mask to enable/disable regulator
- * @enable: bits to enable the regulator in normal(high power) mode
+ * @update_mask: mask to enable/disable regulator
+ * @update_val_enable: bits to enable the regulator in normal (high power) mode
  * @voltage_bank: bank to control regulator voltage
  * @voltage_reg: register to control regulator voltage
  * @voltage_mask: mask to control regulator voltage
- * @supported_voltages: supported voltage table
+ * @voltages: supported voltage table
  * @voltages_len: number of supported voltages for the regulator
  */
 struct ab8500_regulator_info {
 	struct device		*dev;
 	struct regulator_desc	desc;
-	struct ab8500		*ab8500;
 	struct regulator_dev	*regulator;
 	int max_uV;
 	int min_uV;
 	int fixed_uV;
 	u8 update_bank;
 	u8 update_reg;
-	u8 mask;
-	u8 enable;
+	u8 update_mask;
+	u8 update_val_enable;
 	u8 voltage_bank;
 	u8 voltage_reg;
 	u8 voltage_mask;
-	int const *supported_voltages;
+	int const *voltages;
 	int voltages_len;
 };
 
@@ -83,6 +77,17 @@
 	3300000,
 };
 
+static const int ldo_vaux3_voltages[] = {
+	1200000,
+	1500000,
+	1800000,
+	2100000,
+	2500000,
+	2750000,
+	2790000,
+	2910000,
+};
+
 static const int ldo_vintcore_voltages[] = {
 	1200000,
 	1225000,
@@ -95,57 +100,80 @@
 
 static int ab8500_regulator_enable(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
-		info->update_bank, info->update_reg, info->mask, info->enable);
+		info->update_bank, info->update_reg,
+		info->update_mask, info->update_val_enable);
 	if (ret < 0)
 		dev_err(rdev_get_dev(rdev),
 			"couldn't set enable bits for regulator\n");
+
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+		info->desc.name, info->update_bank, info->update_reg,
+		info->update_mask, info->update_val_enable);
+
 	return ret;
 }
 
 static int ab8500_regulator_disable(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
-		info->update_bank, info->update_reg, info->mask, 0x0);
+		info->update_bank, info->update_reg,
+		info->update_mask, 0x0);
 	if (ret < 0)
 		dev_err(rdev_get_dev(rdev),
 			"couldn't set disable bits for regulator\n");
+
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+		info->desc.name, info->update_bank, info->update_reg,
+		info->update_mask, 0x0);
+
 	return ret;
 }
 
 static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-	u8 value;
+	u8 regval;
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	ret = abx500_get_register_interruptible(info->dev,
-		info->update_bank, info->update_reg, &value);
+		info->update_bank, info->update_reg, &regval);
 	if (ret < 0) {
 		dev_err(rdev_get_dev(rdev),
 			"couldn't read 0x%x register\n", info->update_reg);
 		return ret;
 	}
 
-	if (value & info->mask)
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-is_enabled (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+		" 0x%x\n",
+		info->desc.name, info->update_bank, info->update_reg,
+		info->update_mask, regval);
+
+	if (regval & info->update_mask)
 		return true;
 	else
 		return false;
@@ -153,12 +181,12 @@
 
 static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
 {
-	int regulator_id;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	/* return the uV for the fixed regulators */
 	if (info->fixed_uV)
@@ -167,33 +195,40 @@
 	if (selector >= info->voltages_len)
 		return -EINVAL;
 
-	return info->supported_voltages[selector];
+	return info->voltages[selector];
 }
 
 static int ab8500_regulator_get_voltage(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret, val;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-	u8 value;
+	u8 regval;
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
-	ret = abx500_get_register_interruptible(info->dev, info->voltage_bank,
-		info->voltage_reg, &value);
+	ret = abx500_get_register_interruptible(info->dev,
+			info->voltage_bank, info->voltage_reg, &regval);
 	if (ret < 0) {
 		dev_err(rdev_get_dev(rdev),
 			"couldn't read voltage reg for regulator\n");
 		return ret;
 	}
 
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-get_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+		" 0x%x\n",
+		info->desc.name, info->voltage_bank, info->voltage_reg,
+		info->voltage_mask, regval);
+
 	/* vintcore has a different layout */
-	value &= info->voltage_mask;
-	if (regulator_id == AB8500_LDO_INTCORE)
-		ret = info->supported_voltages[value >> 0x3];
+	val = regval & info->voltage_mask;
+	if (info->desc.id == AB8500_LDO_INTCORE)
+		ret = info->voltages[val >> 0x3];
 	else
-		ret = info->supported_voltages[value];
+		ret = info->voltages[val];
 
 	return ret;
 }
@@ -206,8 +241,8 @@
 
 	/* check the supported voltage */
 	for (i = 0; i < info->voltages_len; i++) {
-		if ((info->supported_voltages[i] >= min_uV) &&
-		    (info->supported_voltages[i] <= max_uV))
+		if ((info->voltages[i] >= min_uV) &&
+		    (info->voltages[i] <= max_uV))
 			return i;
 	}
 
@@ -215,14 +250,17 @@
 }
 
 static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
-		int min_uV, int max_uV)
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+	u8 regval;
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	/* get the appropriate voltages within the range */
 	ret = ab8500_get_best_voltage_index(rdev, min_uV, max_uV);
@@ -232,14 +270,23 @@
 		return ret;
 	}
 
+	*selector = ret;
+
 	/* set the registers for the request */
+	regval = (u8)ret;
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
-		info->voltage_bank, info->voltage_reg,
-		info->voltage_mask, (u8)ret);
+			info->voltage_bank, info->voltage_reg,
+			info->voltage_mask, regval);
 	if (ret < 0)
 		dev_err(rdev_get_dev(rdev),
 		"couldn't set voltage reg for regulator\n");
 
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-set_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+		" 0x%x\n",
+		info->desc.name, info->voltage_bank, info->voltage_reg,
+		info->voltage_mask, regval);
+
 	return ret;
 }
 
@@ -254,17 +301,17 @@
 
 static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
 {
-	int regulator_id;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	return info->fixed_uV;
 }
 
-static struct regulator_ops ab8500_ldo_fixed_ops = {
+static struct regulator_ops ab8500_regulator_fixed_ops = {
 	.enable		= ab8500_regulator_enable,
 	.disable	= ab8500_regulator_disable,
 	.is_enabled	= ab8500_regulator_is_enabled,
@@ -272,89 +319,198 @@
 	.list_voltage	= ab8500_list_voltage,
 };
 
-#define AB8500_LDO(_id, min, max, bank, reg, reg_mask,		\
-		reg_enable, volt_bank, volt_reg, volt_mask,	\
-		voltages, len_volts)				\
-{								\
-	.desc	= {						\
-		.name	= "LDO-" #_id,				\
-		.ops	= &ab8500_regulator_ops,		\
-		.type	= REGULATOR_VOLTAGE,			\
-		.id	= AB8500_LDO_##_id,			\
-		.owner	= THIS_MODULE,				\
-	},							\
-	.min_uV		= (min) * 1000,				\
-	.max_uV		= (max) * 1000,				\
-	.update_bank	= bank,					\
-	.update_reg	= reg,					\
-	.mask		= reg_mask,				\
-	.enable		= reg_enable,				\
-	.voltage_bank	= volt_bank,				\
-	.voltage_reg	= volt_reg,				\
-	.voltage_mask	= volt_mask,				\
-	.supported_voltages = voltages,				\
-	.voltages_len	= len_volts,				\
-	.fixed_uV	= 0,					\
-}
-
-#define AB8500_FIXED_LDO(_id, fixed, bank, reg,		\
-			reg_mask, reg_enable)		\
-{							\
-	.desc	= {					\
-		.name	= "LDO-" #_id,			\
-		.ops	= &ab8500_ldo_fixed_ops,	\
-		.type	= REGULATOR_VOLTAGE,		\
-		.id	= AB8500_LDO_##_id,		\
-		.owner	= THIS_MODULE,			\
-	},						\
-	.fixed_uV	= fixed * 1000,			\
-	.update_bank	= bank,				\
-	.update_reg	= reg,				\
-	.mask		= reg_mask,			\
-	.enable		= reg_enable,			\
-}
-
-static struct ab8500_regulator_info ab8500_regulator_info[] = {
+static struct ab8500_regulator_info
+		ab8500_regulator_info[AB8500_NUM_REGULATORS] = {
 	/*
-	 * Variable Voltage LDOs
-	 * name, min uV, max uV, ctrl bank, ctrl reg, reg mask, enable mask,
-	 *      volt ctrl bank, volt ctrl reg, volt ctrl mask, volt table,
-	 *      num supported volts
+	 * Variable Voltage Regulators
+	 *   name, min mV, max mV,
+	 *   update bank, reg, mask, enable val
+	 *   volt bank, reg, mask, table, table length
 	 */
-	AB8500_LDO(AUX1, 1100, 3300, 0x04, 0x09, 0x3, 0x1, 0x04, 0x1f, 0xf,
-			ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
-	AB8500_LDO(AUX2, 1100, 3300, 0x04, 0x09, 0xc, 0x4, 0x04, 0x20, 0xf,
-			ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
-	AB8500_LDO(AUX3, 1100, 3300, 0x04, 0x0a, 0x3, 0x1, 0x04, 0x21, 0xf,
-			ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
-	AB8500_LDO(INTCORE, 1100, 3300, 0x03, 0x80, 0x4, 0x4, 0x03, 0x80, 0x38,
-		ldo_vintcore_voltages, ARRAY_SIZE(ldo_vintcore_voltages)),
+	[AB8500_LDO_AUX1] = {
+		.desc = {
+			.name		= "LDO-AUX1",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUX1,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vauxn_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x09,
+		.update_mask		= 0x03,
+		.update_val_enable	= 0x01,
+		.voltage_bank		= 0x04,
+		.voltage_reg		= 0x1f,
+		.voltage_mask		= 0x0f,
+		.voltages		= ldo_vauxn_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vauxn_voltages),
+	},
+	[AB8500_LDO_AUX2] = {
+		.desc = {
+			.name		= "LDO-AUX2",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUX2,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vauxn_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x09,
+		.update_mask		= 0x0c,
+		.update_val_enable	= 0x04,
+		.voltage_bank		= 0x04,
+		.voltage_reg		= 0x20,
+		.voltage_mask		= 0x0f,
+		.voltages		= ldo_vauxn_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vauxn_voltages),
+	},
+	[AB8500_LDO_AUX3] = {
+		.desc = {
+			.name		= "LDO-AUX3",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUX3,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vaux3_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x0a,
+		.update_mask		= 0x03,
+		.update_val_enable	= 0x01,
+		.voltage_bank		= 0x04,
+		.voltage_reg		= 0x21,
+		.voltage_mask		= 0x07,
+		.voltages		= ldo_vaux3_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vaux3_voltages),
+	},
+	[AB8500_LDO_INTCORE] = {
+		.desc = {
+			.name		= "LDO-INTCORE",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_INTCORE,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vintcore_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x80,
+		.update_mask		= 0x44,
+		.update_val_enable	= 0x04,
+		.voltage_bank		= 0x03,
+		.voltage_reg		= 0x80,
+		.voltage_mask		= 0x38,
+		.voltages		= ldo_vintcore_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vintcore_voltages),
+	},
 
 	/*
-	 * Fixed Voltage LDOs
-	 *		 name,	o/p uV, ctrl bank, ctrl reg, enable, disable
+	 * Fixed Voltage Regulators
+	 *   name, fixed mV,
+	 *   update bank, reg, mask, enable val
 	 */
-	AB8500_FIXED_LDO(TVOUT,	  2000, 0x03,      0x80,     0x2,    0x2),
-	AB8500_FIXED_LDO(AUDIO,   2000, 0x03,      0x83,     0x2,    0x2),
-	AB8500_FIXED_LDO(ANAMIC1, 2050, 0x03,      0x83,     0x4,    0x4),
-	AB8500_FIXED_LDO(ANAMIC2, 2050, 0x03,      0x83,     0x8,    0x8),
-	AB8500_FIXED_LDO(DMIC,    1800, 0x03,      0x83,     0x10,   0x10),
-	AB8500_FIXED_LDO(ANA,     1200, 0x03,      0x83,     0xc,    0x4),
+	[AB8500_LDO_TVOUT] = {
+		.desc = {
+			.name		= "LDO-TVOUT",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_TVOUT,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2000000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x80,
+		.update_mask		= 0x82,
+		.update_val_enable	= 0x02,
+	},
+	[AB8500_LDO_AUDIO] = {
+		.desc = {
+			.name		= "LDO-AUDIO",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUDIO,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2000000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x02,
+		.update_val_enable	= 0x02,
+	},
+	[AB8500_LDO_ANAMIC1] = {
+		.desc = {
+			.name		= "LDO-ANAMIC1",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_ANAMIC1,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2050000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x08,
+		.update_val_enable	= 0x08,
+	},
+	[AB8500_LDO_ANAMIC2] = {
+		.desc = {
+			.name		= "LDO-ANAMIC2",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_ANAMIC2,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2050000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x10,
+		.update_val_enable	= 0x10,
+	},
+	[AB8500_LDO_DMIC] = {
+		.desc = {
+			.name		= "LDO-DMIC",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_DMIC,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 1800000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x04,
+		.update_val_enable	= 0x04,
+	},
+	[AB8500_LDO_ANA] = {
+		.desc = {
+			.name		= "LDO-ANA",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_ANA,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 1200000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x06,
+		.update_mask		= 0x0c,
+		.update_val_enable	= 0x04,
+	},
+
+
 };
 
-static inline struct ab8500_regulator_info *find_regulator_info(int id)
-{
-	struct ab8500_regulator_info *info;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
-		info = &ab8500_regulator_info[i];
-		if (info->desc.id == id)
-			return info;
-	}
-	return NULL;
-}
-
 static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
 {
 	struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
@@ -366,6 +522,16 @@
 		return -EINVAL;
 	}
 	pdata = dev_get_platdata(ab8500->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "null pdata\n");
+		return -EINVAL;
+	}
+
+	/* make sure the platform data has the correct size */
+	if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) {
+		dev_err(&pdev->dev, "platform configuration error\n");
+		return -EINVAL;
+	}
 
 	/* register all regulators */
 	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -374,10 +540,22 @@
 		/* assign per-regulator data */
 		info = &ab8500_regulator_info[i];
 		info->dev = &pdev->dev;
-		info->ab8500 = ab8500;
 
+		/* fix for hardware before ab8500v2.0 */
+		if (abx500_get_chip_id(info->dev) < 0x20) {
+			if (info->desc.id == AB8500_LDO_AUX3) {
+				info->desc.n_voltages =
+					ARRAY_SIZE(ldo_vauxn_voltages);
+				info->voltages = ldo_vauxn_voltages;
+				info->voltages_len =
+					ARRAY_SIZE(ldo_vauxn_voltages);
+				info->voltage_mask = 0xf;
+			}
+		}
+
+		/* register regulator with framework */
 		info->regulator = regulator_register(&info->desc, &pdev->dev,
-				pdata->regulator[i], info);
+				&pdata->regulator[i], info);
 		if (IS_ERR(info->regulator)) {
 			err = PTR_ERR(info->regulator);
 			dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -389,6 +567,9 @@
 			}
 			return err;
 		}
+
+		dev_vdbg(rdev_get_dev(info->regulator),
+			"%s-probed\n", info->desc.name);
 	}
 
 	return 0;
@@ -401,6 +582,10 @@
 	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
 		struct ab8500_regulator_info *info = NULL;
 		info = &ab8500_regulator_info[i];
+
+		dev_vdbg(rdev_get_dev(info->regulator),
+			"%s-remove\n", info->desc.name);
+
 		regulator_unregister(info->regulator);
 	}
 
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ba521f0..9fa2095 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -13,8 +13,11 @@
  *
  */
 
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/slab.h>
 #include <linux/err.h>
@@ -25,16 +28,30 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/regulator.h>
+
 #include "dummy.h"
 
-#define REGULATOR_VERSION "0.5"
+#define rdev_err(rdev, fmt, ...)					\
+	pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_warn(rdev, fmt, ...)					\
+	pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_info(rdev, fmt, ...)					\
+	pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_dbg(rdev, fmt, ...)					\
+	pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
 
 static DEFINE_MUTEX(regulator_list_mutex);
 static LIST_HEAD(regulator_list);
 static LIST_HEAD(regulator_map_list);
-static int has_full_constraints;
+static bool has_full_constraints;
 static bool board_wants_dummy_regulator;
 
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_root;
+#endif
+
 /*
  * struct regulator_map
  *
@@ -71,6 +88,8 @@
 static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
 static void _notifier_call_chain(struct regulator_dev *rdev,
 				  unsigned long event, void *data);
+static int _regulator_do_set_voltage(struct regulator_dev *rdev,
+				     int min_uV, int max_uV);
 
 static const char *rdev_get_name(struct regulator_dev *rdev)
 {
@@ -111,13 +130,11 @@
 	BUG_ON(*min_uV > *max_uV);
 
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 
@@ -132,6 +149,27 @@
 	return 0;
 }
 
+/* Make sure we select a voltage that suits the needs of all
+ * regulator consumers
+ */
+static int regulator_check_consumers(struct regulator_dev *rdev,
+				     int *min_uV, int *max_uV)
+{
+	struct regulator *regulator;
+
+	list_for_each_entry(regulator, &rdev->consumer_list, list) {
+		if (*max_uV > regulator->max_uV)
+			*max_uV = regulator->max_uV;
+		if (*min_uV < regulator->min_uV)
+			*min_uV = regulator->min_uV;
+	}
+
+	if (*min_uV > *max_uV)
+		return -EINVAL;
+
+	return 0;
+}
+
 /* current constraint check */
 static int regulator_check_current_limit(struct regulator_dev *rdev,
 					int *min_uA, int *max_uA)
@@ -139,13 +177,11 @@
 	BUG_ON(*min_uA > *max_uA);
 
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 
@@ -174,18 +210,15 @@
 	}
 
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 	if (!(rdev->constraints->valid_modes_mask & mode)) {
-		printk(KERN_ERR "%s: invalid mode %x for %s\n",
-		       __func__, mode, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid mode %x\n", mode);
 		return -EINVAL;
 	}
 	return 0;
@@ -195,13 +228,11 @@
 static int regulator_check_drms(struct regulator_dev *rdev)
 {
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 	return 0;
@@ -553,18 +584,21 @@
 
 	err = regulator_check_drms(rdev);
 	if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
-	    !rdev->desc->ops->get_voltage || !rdev->desc->ops->set_mode)
+	    (!rdev->desc->ops->get_voltage &&
+	     !rdev->desc->ops->get_voltage_sel) ||
+	    !rdev->desc->ops->set_mode)
 		return;
 
 	/* get output voltage */
-	output_uV = rdev->desc->ops->get_voltage(rdev);
+	output_uV = _regulator_get_voltage(rdev);
 	if (output_uV <= 0)
 		return;
 
 	/* get input voltage */
-	if (rdev->supply && rdev->supply->desc->ops->get_voltage)
-		input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
-	else
+	input_uV = 0;
+	if (rdev->supply)
+		input_uV = _regulator_get_voltage(rdev);
+	if (input_uV <= 0)
 		input_uV = rdev->constraints->input_uV;
 	if (input_uV <= 0)
 		return;
@@ -598,20 +632,17 @@
 	 */
 	if (!rstate->enabled && !rstate->disabled) {
 		if (can_set_state)
-			printk(KERN_WARNING "%s: No configuration for %s\n",
-			       __func__, rdev_get_name(rdev));
+			rdev_warn(rdev, "No configuration\n");
 		return 0;
 	}
 
 	if (rstate->enabled && rstate->disabled) {
-		printk(KERN_ERR "%s: invalid configuration for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid configuration\n");
 		return -EINVAL;
 	}
 
 	if (!can_set_state) {
-		printk(KERN_ERR "%s: no way to set suspend state\n",
-			__func__);
+		rdev_err(rdev, "no way to set suspend state\n");
 		return -EINVAL;
 	}
 
@@ -620,15 +651,14 @@
 	else
 		ret = rdev->desc->ops->set_suspend_disable(rdev);
 	if (ret < 0) {
-		printk(KERN_ERR "%s: failed to enabled/disable\n", __func__);
+		rdev_err(rdev, "failed to enabled/disable\n");
 		return ret;
 	}
 
 	if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) {
 		ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to set voltage\n",
-				__func__);
+			rdev_err(rdev, "failed to set voltage\n");
 			return ret;
 		}
 	}
@@ -636,7 +666,7 @@
 	if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) {
 		ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to set mode\n", __func__);
+			rdev_err(rdev, "failed to set mode\n");
 			return ret;
 		}
 	}
@@ -714,29 +744,27 @@
 	if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
 		count += sprintf(buf + count, "standby");
 
-	printk(KERN_INFO "regulator: %s: %s\n", rdev_get_name(rdev), buf);
+	rdev_info(rdev, "%s\n", buf);
 }
 
 static int machine_constraints_voltage(struct regulator_dev *rdev,
 	struct regulation_constraints *constraints)
 {
 	struct regulator_ops *ops = rdev->desc->ops;
-	const char *name = rdev_get_name(rdev);
 	int ret;
 
 	/* do we need to apply the constraint voltage */
 	if (rdev->constraints->apply_uV &&
-		rdev->constraints->min_uV == rdev->constraints->max_uV &&
-		ops->set_voltage) {
-		ret = ops->set_voltage(rdev,
-			rdev->constraints->min_uV, rdev->constraints->max_uV);
-			if (ret < 0) {
-				printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
-				       __func__,
-				       rdev->constraints->min_uV, name);
-				rdev->constraints = NULL;
-				return ret;
-			}
+	    rdev->constraints->min_uV == rdev->constraints->max_uV) {
+		ret = _regulator_do_set_voltage(rdev,
+						rdev->constraints->min_uV,
+						rdev->constraints->max_uV);
+		if (ret < 0) {
+			rdev_err(rdev, "failed to apply %duV constraint\n",
+				 rdev->constraints->min_uV);
+			rdev->constraints = NULL;
+			return ret;
+		}
 	}
 
 	/* constrain machine-level voltage specs to fit
@@ -765,8 +793,7 @@
 
 		/* else require explicit machine-level constraints */
 		if (cmin <= 0 || cmax <= 0 || cmax < cmin) {
-			pr_err("%s: %s '%s' voltage constraints\n",
-				       __func__, "invalid", name);
+			rdev_err(rdev, "invalid voltage constraints\n");
 			return -EINVAL;
 		}
 
@@ -787,22 +814,19 @@
 
 		/* final: [min_uV..max_uV] valid iff constraints valid */
 		if (max_uV < min_uV) {
-			pr_err("%s: %s '%s' voltage constraints\n",
-				       __func__, "unsupportable", name);
+			rdev_err(rdev, "unsupportable voltage constraints\n");
 			return -EINVAL;
 		}
 
 		/* use regulator's subset of machine constraints */
 		if (constraints->min_uV < min_uV) {
-			pr_debug("%s: override '%s' %s, %d -> %d\n",
-				       __func__, name, "min_uV",
-					constraints->min_uV, min_uV);
+			rdev_dbg(rdev, "override min_uV, %d -> %d\n",
+				 constraints->min_uV, min_uV);
 			constraints->min_uV = min_uV;
 		}
 		if (constraints->max_uV > max_uV) {
-			pr_debug("%s: override '%s' %s, %d -> %d\n",
-				       __func__, name, "max_uV",
-					constraints->max_uV, max_uV);
+			rdev_dbg(rdev, "override max_uV, %d -> %d\n",
+				 constraints->max_uV, max_uV);
 			constraints->max_uV = max_uV;
 		}
 	}
@@ -822,26 +846,25 @@
  * set_mode.
  */
 static int set_machine_constraints(struct regulator_dev *rdev,
-	struct regulation_constraints *constraints)
+	const struct regulation_constraints *constraints)
 {
 	int ret = 0;
-	const char *name;
 	struct regulator_ops *ops = rdev->desc->ops;
 
-	rdev->constraints = constraints;
+	rdev->constraints = kmemdup(constraints, sizeof(*constraints),
+				    GFP_KERNEL);
+	if (!rdev->constraints)
+		return -ENOMEM;
 
-	name = rdev_get_name(rdev);
-
-	ret = machine_constraints_voltage(rdev, constraints);
+	ret = machine_constraints_voltage(rdev, rdev->constraints);
 	if (ret != 0)
 		goto out;
 
 	/* do we need to setup our suspend state */
 	if (constraints->initial_state) {
-		ret = suspend_prepare(rdev, constraints->initial_state);
+		ret = suspend_prepare(rdev, rdev->constraints->initial_state);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to set suspend state for %s\n",
-			       __func__, name);
+			rdev_err(rdev, "failed to set suspend state\n");
 			rdev->constraints = NULL;
 			goto out;
 		}
@@ -849,17 +872,14 @@
 
 	if (constraints->initial_mode) {
 		if (!ops->set_mode) {
-			printk(KERN_ERR "%s: no set_mode operation for %s\n",
-			       __func__, name);
+			rdev_err(rdev, "no set_mode operation\n");
 			ret = -EINVAL;
 			goto out;
 		}
 
-		ret = ops->set_mode(rdev, constraints->initial_mode);
+		ret = ops->set_mode(rdev, rdev->constraints->initial_mode);
 		if (ret < 0) {
-			printk(KERN_ERR
-			       "%s: failed to set initial mode for %s: %d\n",
-			       __func__, name, ret);
+			rdev_err(rdev, "failed to set initial mode: %d\n", ret);
 			goto out;
 		}
 	}
@@ -867,11 +887,11 @@
 	/* If the constraints say the regulator should be on at this point
 	 * and we have control then make sure it is enabled.
 	 */
-	if ((constraints->always_on || constraints->boot_on) && ops->enable) {
+	if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
+	    ops->enable) {
 		ret = ops->enable(rdev);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to enable %s\n",
-			       __func__, name);
+			rdev_err(rdev, "failed to enable\n");
 			rdev->constraints = NULL;
 			goto out;
 		}
@@ -899,9 +919,8 @@
 	err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj,
 				"supply");
 	if (err) {
-		printk(KERN_ERR
-		       "%s: could not add device link %s err %d\n",
-		       __func__, supply_rdev->dev.kobj.name, err);
+		rdev_err(rdev, "could not add device link %s err %d\n",
+			 supply_rdev->dev.kobj.name, err);
 		       goto out;
 	}
 	rdev->supply = supply_rdev;
@@ -957,10 +976,10 @@
 			continue;
 
 		dev_dbg(consumer_dev, "%s/%s is '%s' supply; fail %s/%s\n",
-				dev_name(&node->regulator->dev),
-				node->regulator->desc->name,
-				supply,
-				dev_name(&rdev->dev), rdev_get_name(rdev));
+			dev_name(&node->regulator->dev),
+			node->regulator->desc->name,
+			supply,
+			dev_name(&rdev->dev), rdev_get_name(rdev));
 		return -EBUSY;
 	}
 
@@ -1031,8 +1050,7 @@
 		regulator->dev_attr.show = device_requested_uA_show;
 		err = device_create_file(dev, &regulator->dev_attr);
 		if (err < 0) {
-			printk(KERN_WARNING "%s: could not add regulator_dev"
-				" load sysfs\n", __func__);
+			rdev_warn(rdev, "could not add regulator_dev requested microamps sysfs entry\n");
 			goto attr_name_err;
 		}
 
@@ -1049,9 +1067,8 @@
 		err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj,
 					buf);
 		if (err) {
-			printk(KERN_WARNING
-			       "%s: could not add device link %s err %d\n",
-			       __func__, dev->kobj.name, err);
+			rdev_warn(rdev, "could not add device link %s err %d\n",
+				  dev->kobj.name, err);
 			goto link_name_err;
 		}
 	}
@@ -1088,7 +1105,7 @@
 	int ret;
 
 	if (id == NULL) {
-		printk(KERN_ERR "regulator: get() with no identifier\n");
+		pr_err("get() with no identifier\n");
 		return regulator;
 	}
 
@@ -1122,8 +1139,8 @@
 	 * substitute in a dummy regulator so consumers can continue.
 	 */
 	if (!has_full_constraints) {
-		pr_warning("%s supply %s not found, using dummy regulator\n",
-			   devname, id);
+		pr_warn("%s supply %s not found, using dummy regulator\n",
+			devname, id);
 		rdev = dummy_regulator_rdev;
 		goto found;
 	}
@@ -1274,8 +1291,7 @@
 			ret = _regulator_enable(rdev->supply);
 			mutex_unlock(&rdev->supply->mutex);
 			if (ret < 0) {
-				printk(KERN_ERR "%s: failed to enable %s: %d\n",
-				       __func__, rdev_get_name(rdev), ret);
+				rdev_err(rdev, "failed to enable: %d\n", ret);
 				return ret;
 			}
 		}
@@ -1302,13 +1318,13 @@
 			if (ret >= 0) {
 				delay = ret;
 			} else {
-				printk(KERN_WARNING
-					"%s: enable_time() failed for %s: %d\n",
-					__func__, rdev_get_name(rdev),
-					ret);
+				rdev_warn(rdev, "enable_time() failed: %d\n",
+					   ret);
 				delay = 0;
 			}
 
+			trace_regulator_enable(rdev_get_name(rdev));
+
 			/* Allow the regulator to ramp; it would be useful
 			 * to extend this for bulk operations so that the
 			 * regulators can ramp together.  */
@@ -1316,6 +1332,8 @@
 			if (ret < 0)
 				return ret;
 
+			trace_regulator_enable_delay(rdev_get_name(rdev));
+
 			if (delay >= 1000) {
 				mdelay(delay / 1000);
 				udelay(delay % 1000);
@@ -1323,9 +1341,10 @@
 				udelay(delay);
 			}
 
+			trace_regulator_enable_complete(rdev_get_name(rdev));
+
 		} else if (ret < 0) {
-			printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
-			       __func__, rdev_get_name(rdev), ret);
+			rdev_err(rdev, "is_enabled() failed: %d\n", ret);
 			return ret;
 		}
 		/* Fallthrough on positive return values - already enabled */
@@ -1367,8 +1386,7 @@
 	*supply_rdev_ptr = NULL;
 
 	if (WARN(rdev->use_count <= 0,
-			"unbalanced disables for %s\n",
-			rdev_get_name(rdev)))
+		 "unbalanced disables for %s\n", rdev_get_name(rdev)))
 		return -EIO;
 
 	/* are we the last user and permitted to disable ? */
@@ -1378,13 +1396,16 @@
 		/* we are last user */
 		if (_regulator_can_change_status(rdev) &&
 		    rdev->desc->ops->disable) {
+			trace_regulator_disable(rdev_get_name(rdev));
+
 			ret = rdev->desc->ops->disable(rdev);
 			if (ret < 0) {
-				printk(KERN_ERR "%s: failed to disable %s\n",
-				       __func__, rdev_get_name(rdev));
+				rdev_err(rdev, "failed to disable\n");
 				return ret;
 			}
 
+			trace_regulator_disable_complete(rdev_get_name(rdev));
+
 			_notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
 					     NULL);
 		}
@@ -1451,8 +1472,7 @@
 		/* ah well, who wants to live forever... */
 		ret = rdev->desc->ops->disable(rdev);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to force disable %s\n",
-			       __func__, rdev_get_name(rdev));
+			rdev_err(rdev, "failed to force disable\n");
 			return ret;
 		}
 		/* notify other consumers that power has been forced off */
@@ -1605,6 +1625,62 @@
 	return 0;
 }
 
+static int _regulator_do_set_voltage(struct regulator_dev *rdev,
+				     int min_uV, int max_uV)
+{
+	int ret;
+	unsigned int selector;
+
+	trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
+
+	if (rdev->desc->ops->set_voltage) {
+		ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
+						   &selector);
+
+		if (rdev->desc->ops->list_voltage)
+			selector = rdev->desc->ops->list_voltage(rdev,
+								 selector);
+		else
+			selector = -1;
+	} else if (rdev->desc->ops->set_voltage_sel) {
+		int best_val = INT_MAX;
+		int i;
+
+		selector = 0;
+
+		/* Find the smallest voltage that falls within the specified
+		 * range.
+		 */
+		for (i = 0; i < rdev->desc->n_voltages; i++) {
+			ret = rdev->desc->ops->list_voltage(rdev, i);
+			if (ret < 0)
+				continue;
+
+			if (ret < best_val && ret >= min_uV && ret <= max_uV) {
+				best_val = ret;
+				selector = i;
+			}
+		}
+
+		if (best_val != INT_MAX) {
+			ret = rdev->desc->ops->set_voltage_sel(rdev, selector);
+			selector = best_val;
+		} else {
+			ret = -EINVAL;
+		}
+	} else {
+		ret = -EINVAL;
+	}
+
+	if (ret == 0)
+		_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
+				     NULL);
+
+	trace_regulator_set_voltage_complete(rdev_get_name(rdev), selector);
+
+	return ret;
+}
+
 /**
  * regulator_set_voltage - set regulator output voltage
  * @regulator: regulator source
@@ -1626,12 +1702,20 @@
 int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
 {
 	struct regulator_dev *rdev = regulator->rdev;
-	int ret;
+	int ret = 0;
 
 	mutex_lock(&rdev->mutex);
 
+	/* If we're setting the same range as last time the change
+	 * should be a noop (some cpufreq implementations use the same
+	 * voltage for multiple frequencies, for example).
+	 */
+	if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
+		goto out;
+
 	/* sanity check */
-	if (!rdev->desc->ops->set_voltage) {
+	if (!rdev->desc->ops->set_voltage &&
+	    !rdev->desc->ops->set_voltage_sel) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1642,18 +1726,76 @@
 		goto out;
 	regulator->min_uV = min_uV;
 	regulator->max_uV = max_uV;
-	ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV);
+
+	ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+	if (ret < 0)
+		goto out;
+
+	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
 
 out:
-	_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, NULL);
 	mutex_unlock(&rdev->mutex);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_set_voltage);
 
+/**
+ * regulator_sync_voltage - re-apply last regulator output voltage
+ * @regulator: regulator source
+ *
+ * Re-apply the last configured voltage.  This is intended to be used
+ * where some external control source the consumer is cooperating with
+ * has caused the configured voltage to change.
+ */
+int regulator_sync_voltage(struct regulator *regulator)
+{
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret, min_uV, max_uV;
+
+	mutex_lock(&rdev->mutex);
+
+	if (!rdev->desc->ops->set_voltage &&
+	    !rdev->desc->ops->set_voltage_sel) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* This is only going to work if we've had a voltage configured. */
+	if (!regulator->min_uV && !regulator->max_uV) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	min_uV = regulator->min_uV;
+	max_uV = regulator->max_uV;
+
+	/* This should be a paranoia check... */
+	ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
+	if (ret < 0)
+		goto out;
+
+	ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+	if (ret < 0)
+		goto out;
+
+	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+
+out:
+	mutex_unlock(&rdev->mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_sync_voltage);
+
 static int _regulator_get_voltage(struct regulator_dev *rdev)
 {
-	/* sanity check */
+	int sel;
+
+	if (rdev->desc->ops->get_voltage_sel) {
+		sel = rdev->desc->ops->get_voltage_sel(rdev);
+		if (sel < 0)
+			return sel;
+		return rdev->desc->ops->list_voltage(rdev, sel);
+	}
 	if (rdev->desc->ops->get_voltage)
 		return rdev->desc->ops->get_voltage(rdev);
 	else
@@ -1880,21 +2022,20 @@
 		goto out;
 
 	/* get output voltage */
-	output_uV = rdev->desc->ops->get_voltage(rdev);
+	output_uV = _regulator_get_voltage(rdev);
 	if (output_uV <= 0) {
-		printk(KERN_ERR "%s: invalid output voltage found for %s\n",
-			__func__, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid output voltage found\n");
 		goto out;
 	}
 
 	/* get input voltage */
-	if (rdev->supply && rdev->supply->desc->ops->get_voltage)
-		input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
-	else
+	input_uV = 0;
+	if (rdev->supply)
+		input_uV = _regulator_get_voltage(rdev->supply);
+	if (input_uV <= 0)
 		input_uV = rdev->constraints->input_uV;
 	if (input_uV <= 0) {
-		printk(KERN_ERR "%s: invalid input voltage found for %s\n",
-			__func__, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid input voltage found\n");
 		goto out;
 	}
 
@@ -1907,16 +2048,14 @@
 						 total_uA_load);
 	ret = regulator_check_mode(rdev, mode);
 	if (ret < 0) {
-		printk(KERN_ERR "%s: failed to get optimum mode for %s @"
-			" %d uA %d -> %d uV\n", __func__, rdev_get_name(rdev),
-			total_uA_load, input_uV, output_uV);
+		rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
+			 total_uA_load, input_uV, output_uV);
 		goto out;
 	}
 
 	ret = rdev->desc->ops->set_mode(rdev, mode);
 	if (ret < 0) {
-		printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
-			__func__, mode, rdev_get_name(rdev));
+		rdev_err(rdev, "failed to set optimum mode %x\n", mode);
 		goto out;
 	}
 	ret = mode;
@@ -2047,7 +2186,7 @@
 	return 0;
 
 err:
-	printk(KERN_ERR "Failed to enable %s: %d\n", consumers[i].supply, ret);
+	pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret);
 	for (--i; i >= 0; --i)
 		regulator_disable(consumers[i].consumer);
 
@@ -2082,8 +2221,7 @@
 	return 0;
 
 err:
-	printk(KERN_ERR "Failed to disable %s: %d\n", consumers[i].supply,
-	       ret);
+	pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
 	for (--i; i >= 0; --i)
 		regulator_enable(consumers[i].consumer);
 
@@ -2166,7 +2304,7 @@
 	int			status = 0;
 
 	/* some attributes need specific methods to be displayed */
-	if (ops->get_voltage) {
+	if (ops->get_voltage || ops->get_voltage_sel) {
 		status = device_create_file(dev, &dev_attr_microvolts);
 		if (status < 0)
 			return status;
@@ -2207,7 +2345,7 @@
 		return status;
 
 	/* constraints need specific supporting methods */
-	if (ops->set_voltage) {
+	if (ops->set_voltage || ops->set_voltage_sel) {
 		status = device_create_file(dev, &dev_attr_min_microvolts);
 		if (status < 0)
 			return status;
@@ -2271,6 +2409,23 @@
 	return status;
 }
 
+static void rdev_init_debugfs(struct regulator_dev *rdev)
+{
+#ifdef CONFIG_DEBUG_FS
+	rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
+	if (IS_ERR(rdev->debugfs) || !rdev->debugfs) {
+		rdev_warn(rdev, "Failed to create debugfs directory\n");
+		rdev->debugfs = NULL;
+		return;
+	}
+
+	debugfs_create_u32("use_count", 0444, rdev->debugfs,
+			   &rdev->use_count);
+	debugfs_create_u32("open_count", 0444, rdev->debugfs,
+			   &rdev->open_count);
+#endif
+}
+
 /**
  * regulator_register - register regulator
  * @regulator_desc: regulator to register
@@ -2282,7 +2437,7 @@
  * Returns 0 on success.
  */
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
-	struct device *dev, struct regulator_init_data *init_data,
+	struct device *dev, const struct regulator_init_data *init_data,
 	void *driver_data)
 {
 	static atomic_t regulator_no = ATOMIC_INIT(0);
@@ -2302,6 +2457,22 @@
 	if (!init_data)
 		return ERR_PTR(-EINVAL);
 
+	/* Only one of each should be implemented */
+	WARN_ON(regulator_desc->ops->get_voltage &&
+		regulator_desc->ops->get_voltage_sel);
+	WARN_ON(regulator_desc->ops->set_voltage &&
+		regulator_desc->ops->set_voltage_sel);
+
+	/* If we're using selectors we must implement list_voltage. */
+	if (regulator_desc->ops->get_voltage_sel &&
+	    !regulator_desc->ops->list_voltage) {
+		return ERR_PTR(-EINVAL);
+	}
+	if (regulator_desc->ops->set_voltage_sel &&
+	    !regulator_desc->ops->list_voltage) {
+		return ERR_PTR(-EINVAL);
+	}
+
 	rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
 	if (rdev == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -2399,6 +2570,8 @@
 	}
 
 	list_add(&rdev->list, &regulator_list);
+
+	rdev_init_debugfs(rdev);
 out:
 	mutex_unlock(&regulator_list_mutex);
 	return rdev;
@@ -2431,12 +2604,16 @@
 		return;
 
 	mutex_lock(&regulator_list_mutex);
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove_recursive(rdev->debugfs);
+#endif
 	WARN_ON(rdev->open_count);
 	unset_regulator_supplies(rdev);
 	list_del(&rdev->list);
 	if (rdev->supply)
 		sysfs_remove_link(&rdev->dev.kobj, "supply");
 	device_unregister(&rdev->dev);
+	kfree(rdev->constraints);
 	mutex_unlock(&regulator_list_mutex);
 }
 EXPORT_SYMBOL_GPL(regulator_unregister);
@@ -2465,8 +2642,7 @@
 		mutex_unlock(&rdev->mutex);
 
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to prepare %s\n",
-				__func__, rdev_get_name(rdev));
+			rdev_err(rdev, "failed to prepare\n");
 			goto out;
 		}
 	}
@@ -2572,10 +2748,16 @@
 {
 	int ret;
 
-	printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION);
-
 	ret = class_register(&regulator_class);
 
+#ifdef CONFIG_DEBUG_FS
+	debugfs_root = debugfs_create_dir("regulator", NULL);
+	if (IS_ERR(debugfs_root) || !debugfs_root) {
+		pr_warn("regulator: Failed to create debugfs directory\n");
+		debugfs_root = NULL;
+	}
+#endif
+
 	regulator_dummy_init();
 
 	return ret;
@@ -2590,7 +2772,6 @@
 	struct regulator_ops *ops;
 	struct regulation_constraints *c;
 	int enabled, ret;
-	const char *name;
 
 	mutex_lock(&regulator_list_mutex);
 
@@ -2602,8 +2783,6 @@
 		ops = rdev->desc->ops;
 		c = rdev->constraints;
 
-		name = rdev_get_name(rdev);
-
 		if (!ops->disable || (c && c->always_on))
 			continue;
 
@@ -2624,13 +2803,10 @@
 		if (has_full_constraints) {
 			/* We log since this may kill the system if it
 			 * goes wrong. */
-			printk(KERN_INFO "%s: disabling %s\n",
-			       __func__, name);
+			rdev_info(rdev, "disabling\n");
 			ret = ops->disable(rdev);
 			if (ret != 0) {
-				printk(KERN_ERR
-				       "%s: couldn't disable %s: %d\n",
-				       __func__, name, ret);
+				rdev_err(rdev, "couldn't disable: %d\n", ret);
 			}
 		} else {
 			/* The intention is that in future we will
@@ -2638,9 +2814,7 @@
 			 * so warn even if we aren't going to do
 			 * anything here.
 			 */
-			printk(KERN_WARNING
-			       "%s: incomplete constraints, leaving %s on\n",
-			       __func__, name);
+			rdev_warn(rdev, "incomplete constraints, leaving on\n");
 		}
 
 unlock:
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index f8c4661..362e082 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -107,7 +107,7 @@
 
 /* DA9030/DA9034 common operations */
 static int da903x_set_ldo_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV, unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -119,6 +119,7 @@
 	}
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
@@ -187,7 +188,8 @@
 
 /* DA9030 specific operations */
 static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
-				       int min_uV, int max_uV)
+				      int min_uV, int max_uV,
+				      unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da903x_dev = to_da903x_dev(rdev);
@@ -200,6 +202,7 @@
 	}
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 	val |= DA9030_LDO_UNLOCK; /* have to set UNLOCK bits */
@@ -214,7 +217,8 @@
 }
 
 static int da9030_set_ldo14_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV)
+				    int min_uV, int max_uV,
+				    unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da903x_dev = to_da903x_dev(rdev);
@@ -234,6 +238,7 @@
 		val = (min_uV - thresh + info->step_uV - 1) / info->step_uV;
 	}
 
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
@@ -263,7 +268,7 @@
 
 /* DA9034 specific operations */
 static int da9034_set_dvc_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV, unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -276,6 +281,7 @@
 	}
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
@@ -289,7 +295,7 @@
 }
 
 static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -302,6 +308,7 @@
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
 	val = (val >= 20) ? val - 12 : ((val > 7) ? 8 : val);
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index b8cc638..e4b3592 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -58,7 +58,9 @@
 	return data;
 }
 
-static int isl6271a_set_voltage(struct regulator_dev *dev, int minuV, int maxuV)
+static int isl6271a_set_voltage(struct regulator_dev *dev,
+				int minuV, int maxuV,
+				unsigned *selector)
 {
 	struct isl_pmic *pmic = rdev_get_drvdata(dev);
 	int vsel, err, data;
@@ -78,6 +80,8 @@
 	/* Convert the microvolts to data for the chip */
 	data = (vsel - ISL6271A_VOLTAGE_MIN) / ISL6271A_VOLTAGE_STEP;
 
+	*selector = data;
+
 	mutex_lock(&pmic->mtx);
 
 	err = i2c_smbus_write_byte(pmic->client, data);
@@ -169,7 +173,7 @@
 						init_data, pmic);
 		if (IS_ERR(pmic->rdev[i])) {
 			dev_err(&i2c->dev, "failed to register %s\n", id->name);
-			err = PTR_ERR(pmic->rdev);
+			err = PTR_ERR(pmic->rdev[i]);
 			goto error;
 		}
 	}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 3bb82b6..0f22ef1 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -168,7 +168,8 @@
 }
 
 static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV,
+				  unsigned int *selector)
 {
 	struct lp3971 *lp3971 = rdev_get_drvdata(dev);
 	int ldo = rdev_get_id(dev) - LP3971_LDO1;
@@ -187,6 +188,8 @@
 	if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
 			LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo),
 			val << LDO_VOL_CONTR_SHIFT(ldo));
@@ -256,7 +259,8 @@
 }
 
 static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				   int min_uV, int max_uV,
+				   unsigned int *selector)
 {
 	struct lp3971 *lp3971 = rdev_get_drvdata(dev);
 	int buck = rdev_get_id(dev) - LP3971_DCDC1;
@@ -277,6 +281,8 @@
 	if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
 	       BUCK_TARGET_VOL_MASK, val);
 	if (ret)
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index e07062f..6aa1b50 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -292,7 +292,8 @@
 }
 
 static int lp3972_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV,
+				  unsigned int *selector)
 {
 	struct lp3972 *lp3972 = rdev_get_drvdata(dev);
 	int ldo = rdev_get_id(dev) - LP3972_LDO1;
@@ -313,6 +314,8 @@
 	if (val > LP3972_LDO_VOL_MAX_IDX(ldo) || vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	shift = LP3972_LDO_VOL_CONTR_SHIFT(ldo);
 	ret = lp3972_set_bits(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo),
 		LP3972_LDO_VOL_MASK(ldo) << shift, val << shift);
@@ -416,7 +419,8 @@
 }
 
 static int lp3972_dcdc_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				   int min_uV, int max_uV,
+				   unsigned int *selector)
 {
 	struct lp3972 *lp3972 = rdev_get_drvdata(dev);
 	int buck = rdev_get_id(dev) - LP3972_DCDC1;
@@ -438,6 +442,8 @@
 	    vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	ret = lp3972_set_bits(lp3972, LP3972_BUCK_VOL1_REG(buck),
 				LP3972_BUCK_VOL_MASK, val);
 	if (ret)
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 559cfa2..3f49512 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -63,12 +63,12 @@
 	return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL);
 }
 
-static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			  unsigned *selector)
 {
 	struct max1586_data *max1586 = rdev_get_drvdata(rdev);
 	struct i2c_client *client = max1586->client;
 	unsigned range_uV = max1586->max_uV - max1586->min_uV;
-	unsigned selector;
 	u8 v3_prog;
 
 	if (min_uV > max1586->max_uV || max_uV < max1586->min_uV)
@@ -76,15 +76,15 @@
 	if (min_uV < max1586->min_uV)
 		min_uV = max1586->min_uV;
 
-	selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL +
+	*selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL +
 			range_uV - 1) / range_uV;
-	if (max1586_v3_calc_voltage(max1586, selector) > max_uV)
+	if (max1586_v3_calc_voltage(max1586, *selector) > max_uV)
 		return -EINVAL;
 
 	dev_dbg(&client->dev, "changing voltage v3 to %dmv\n",
-		max1586_v3_calc_voltage(max1586, selector) / 1000);
+		max1586_v3_calc_voltage(max1586, *selector) / 1000);
 
-	v3_prog = I2C_V3_SELECT | (u8) selector;
+	v3_prog = I2C_V3_SELECT | (u8) *selector;
 	return i2c_smbus_write_byte(client, v3_prog);
 }
 
@@ -110,10 +110,10 @@
 	return voltages_uv[selector];
 }
 
-static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			  unsigned int *selector)
 {
 	struct i2c_client *client = rdev_get_drvdata(rdev);
-	unsigned selector;
 	u8 v6_prog;
 
 	if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV)
@@ -122,21 +122,21 @@
 		return -EINVAL;
 
 	if (min_uV < 1800000)
-		selector = 0;
+		*selector = 0;
 	else if (min_uV < 2500000)
-		selector = 1;
+		*selector = 1;
 	else if (min_uV < 3000000)
-		selector = 2;
+		*selector = 2;
 	else if (min_uV >= 3000000)
-		selector = 3;
+		*selector = 3;
 
-	if (max1586_v6_calc_voltage(selector) > max_uV)
+	if (max1586_v6_calc_voltage(*selector) > max_uV)
 		return -EINVAL;
 
 	dev_dbg(&client->dev, "changing voltage v6 to %dmv\n",
-		max1586_v6_calc_voltage(selector) / 1000);
+		max1586_v6_calc_voltage(*selector) / 1000);
 
-	v6_prog = I2C_V6_SELECT | (u8) selector;
+	v6_prog = I2C_V6_SELECT | (u8) *selector;
 	return i2c_smbus_write_byte(client, v6_prog);
 }
 
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 6b60a9c..30eb9e5 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -155,7 +155,7 @@
 }
 
 static int max8649_set_voltage(struct regulator_dev *rdev,
-			       int min_uV, int max_uV)
+			       int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
 	unsigned char data, mask;
@@ -168,6 +168,7 @@
 	data = (min_uV - MAX8649_DCDC_VMIN + MAX8649_DCDC_STEP - 1)
 		/ MAX8649_DCDC_STEP;
 	mask = MAX8649_VOL_MASK;
+	*selector = data & mask;
 
 	return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
 }
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index c570e6e..33f5d9a 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -141,7 +141,8 @@
 	return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
 }
 
-static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			    unsigned int *s)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
 	u8 reg, selector, bits;
@@ -154,6 +155,7 @@
 
 	selector = (min_uV - (MAX8660_DCDC_MIN_UV - MAX8660_DCDC_STEP + 1))
 			/ MAX8660_DCDC_STEP;
+	*s = selector;
 
 	ret = max8660_dcdc_list(rdev, selector);
 	if (ret < 0 || ret > max_uV)
@@ -196,7 +198,8 @@
 	return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
 }
 
-static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			    unsigned int *s)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
 	u8 selector;
@@ -213,6 +216,8 @@
 	if (ret < 0 || ret > max_uV)
 		return -EINVAL;
 
+	*s = selector;
+
 	ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector);
 	if (ret)
 		return ret;
@@ -270,7 +275,8 @@
 	return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
 }
 
-static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV,
+			     int max_uV, unsigned int *s)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
 	u8 selector;
@@ -288,6 +294,8 @@
 	if (ret < 0 || ret > max_uV)
 		return -EINVAL;
 
+	*s = selector;
+
 	if (rdev_get_id(rdev) == MAX8660_V6)
 		return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector);
 	else
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 552cad8..8ae1475 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -55,7 +55,7 @@
 }
 
 static int max8925_set_voltage(struct regulator_dev *rdev,
-			       int min_uV, int max_uV)
+			       int min_uV, int max_uV, unsigned int *selector)
 {
 	struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
 	unsigned char data, mask;
@@ -66,6 +66,7 @@
 		return -EINVAL;
 	}
 	data = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = data;
 	data <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
 
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 0d5dda4..a8f4ecf 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -133,7 +133,7 @@
 }
 
 static int max8952_set_voltage(struct regulator_dev *rdev,
-				int min_uV, int max_uV)
+			       int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8952_data *max8952 = rdev_get_drvdata(rdev);
 	s8 vid = -1, i;
@@ -156,6 +156,7 @@
 	if (vid >= 0 && vid < MAX8952_NUM_DVS_MODE) {
 		max8952->vid0 = (vid % 2 == 1);
 		max8952->vid1 = (((vid >> 1) % 2) == 1);
+		*selector = vid;
 		gpio_set_value(max8952->pdata->gpio_vid0, max8952->vid0);
 		gpio_set_value(max8952->pdata->gpio_vid1, max8952->vid1);
 	} else
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 5c20756..0ec49ca 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -304,7 +304,7 @@
 }
 
 static int max8998_set_voltage_ldo(struct regulator_dev *rdev,
-				int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8998_data *max8998 = rdev_get_drvdata(rdev);
 	struct i2c_client *i2c = max8998->iodev->i2c;
@@ -331,6 +331,8 @@
 	if (desc->min + desc->step*i > max_vol)
 		return -EINVAL;
 
+	*selector = i;
+
 	ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
 	if (ret)
 		return ret;
@@ -352,7 +354,7 @@
 }
 
 static int max8998_set_voltage_buck(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8998_data *max8998 = rdev_get_drvdata(rdev);
 	struct max8998_platform_data *pdata =
@@ -384,6 +386,8 @@
 	if (desc->min + desc->step*i > max_vol)
 		return -EINVAL;
 
+	*selector = i;
+
 	ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
 	if (ret)
 		return ret;
@@ -420,6 +424,9 @@
 				}
 			}
 
+			if (pdata->buck_voltage_lock)
+				return -EINVAL;
+
 			/* no predefine regulator found */
 			max8998->buck1_idx = (buck1_last_val % 2) + 2;
 			dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n",
@@ -447,18 +454,26 @@
 			"BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n"
 			, i, max8998->buck2_vol[0], max8998->buck2_vol[1]);
 		if (gpio_is_valid(pdata->buck2_set3)) {
-			if (max8998->buck2_vol[0] == i) {
-				max8998->buck1_idx = 0;
-				buck2_gpio_set(pdata->buck2_set3, 0);
-			} else {
-				max8998->buck1_idx = 1;
-				ret = max8998_get_voltage_register(rdev, &reg,
-								   &shift,
-								   &mask);
-				ret = max8998_write_reg(i2c, reg, i);
-				max8998->buck2_vol[1] = i;
-				buck2_gpio_set(pdata->buck2_set3, 1);
+
+			/* check if requested voltage */
+			/* value is already defined */
+			for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) {
+				if (max8998->buck2_vol[j] == i) {
+					max8998->buck2_idx = j;
+					buck2_gpio_set(pdata->buck2_set3, j);
+					goto buck2_exit;
+				}
 			}
+
+			if (pdata->buck_voltage_lock)
+				return -EINVAL;
+
+			max8998_get_voltage_register(rdev,
+					&reg, &shift, &mask);
+			ret = max8998_write_reg(i2c, reg, i);
+			max8998->buck2_vol[max8998->buck2_idx] = i;
+			buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx);
+buck2_exit:
 			dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name,
 				gpio_get_value(pdata->buck2_set3));
 		} else {
@@ -703,6 +718,9 @@
 	platform_set_drvdata(pdev, max8998);
 	i2c = max8998->iodev->i2c;
 
+	max8998->buck1_idx = pdata->buck1_default_idx;
+	max8998->buck2_idx = pdata->buck2_default_idx;
+
 	/* NOTE: */
 	/* For unused GPIO NOT marked as -1 (thereof equal to 0)  WARN_ON */
 	/* will be displayed */
@@ -735,23 +753,46 @@
 		i = 0;
 		while (buck12_voltage_map_desc.min +
 		       buck12_voltage_map_desc.step*i
-		       != (pdata->buck1_max_voltage1 / 1000))
+		       < (pdata->buck1_voltage1 / 1000))
 			i++;
-		printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx);
 		max8998->buck1_vol[0] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
+		if (ret)
+			return ret;
 
 		/* Set predefined value for BUCK1 register 2 */
 		i = 0;
 		while (buck12_voltage_map_desc.min +
 		       buck12_voltage_map_desc.step*i
-		       != (pdata->buck1_max_voltage2 / 1000))
+		       < (pdata->buck1_voltage2 / 1000))
 			i++;
 
 		max8998->buck1_vol[1] = i;
-		printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx);
-		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i)
-			+ ret;
+		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
+		if (ret)
+			return ret;
+
+		/* Set predefined value for BUCK1 register 3 */
+		i = 0;
+		while (buck12_voltage_map_desc.min +
+		       buck12_voltage_map_desc.step*i
+		       < (pdata->buck1_voltage3 / 1000))
+			i++;
+
+		max8998->buck1_vol[2] = i;
+		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
+		if (ret)
+			return ret;
+
+		/* Set predefined value for BUCK1 register 4 */
+		i = 0;
+		while (buck12_voltage_map_desc.min +
+		       buck12_voltage_map_desc.step*i
+		       < (pdata->buck1_voltage4 / 1000))
+			i++;
+
+		max8998->buck1_vol[3] = i;
+		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
 		if (ret)
 			return ret;
 
@@ -768,18 +809,28 @@
 		gpio_direction_output(pdata->buck2_set3,
 				      max8998->buck2_idx & 0x1);
 
-		/* BUCK2 - set preset default voltage value to buck2_vol[0] */
+		/* BUCK2 register 1 */
 		i = 0;
 		while (buck12_voltage_map_desc.min +
 		       buck12_voltage_map_desc.step*i
-		       != (pdata->buck2_max_voltage / 1000))
+		       < (pdata->buck2_voltage1 / 1000))
 			i++;
-		printk(KERN_ERR "i:%d, buck2_idx:%d\n", i, max8998->buck2_idx);
 		max8998->buck2_vol[0] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
 		if (ret)
 			return ret;
 
+		/* BUCK2 register 2 */
+		i = 0;
+		while (buck12_voltage_map_desc.min +
+		       buck12_voltage_map_desc.step*i
+		       < (pdata->buck2_voltage2 / 1000))
+			i++;
+		printk(KERN_ERR "i2:%d, buck2_idx:%d\n", i, max8998->buck2_idx);
+		max8998->buck2_vol[1] = i;
+		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
+		if (ret)
+			return ret;
 	}
 
 	for (i = 0; i < pdata->num_regulators; i++) {
@@ -831,6 +882,12 @@
 	return 0;
 }
 
+static const struct platform_device_id max8998_pmic_id[] = {
+	{ "max8998-pmic", TYPE_MAX8998 },
+	{ "lp3974-pmic", TYPE_LP3974 },
+	{ }
+};
+
 static struct platform_driver max8998_pmic_driver = {
 	.driver = {
 		.name = "max8998-pmic",
@@ -838,6 +895,7 @@
 	},
 	.probe = max8998_pmic_probe,
 	.remove = __devexit_p(max8998_pmic_remove),
+	.id_table = max8998_pmic_id,
 };
 
 static int __init max8998_pmic_init(void)
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index ecd99f5..3e5d0c3 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -1,6 +1,7 @@
 /*
  * Regulator Driver for Freescale MC13783 PMIC
  *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
  * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
  *
@@ -17,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/err.h>
+#include "mc13xxx.h"
 
 #define MC13783_REG_SWITCHERS5			29
 #define MC13783_REG_SWITCHERS5_SW3EN			(1 << 20)
@@ -89,154 +91,106 @@
 #define MC13783_REG_POWERMISC_PWGTSPI_M			(3 << 15)
 
 
-struct mc13783_regulator {
-	struct regulator_desc desc;
-	int reg;
-	int enable_bit;
-	int vsel_reg;
-	int vsel_shift;
-	int vsel_mask;
-	int const *voltages;
-};
-
 /* Voltage Values */
-static const int const mc13783_sw3_val[] = {
+static const int mc13783_sw3_val[] = {
 	5000000, 5000000, 5000000, 5500000,
 };
 
-static const int const mc13783_vaudio_val[] = {
+static const int mc13783_vaudio_val[] = {
 	2775000,
 };
 
-static const int const mc13783_viohi_val[] = {
+static const int mc13783_viohi_val[] = {
 	2775000,
 };
 
-static const int const mc13783_violo_val[] = {
+static const int mc13783_violo_val[] = {
 	1200000, 1300000, 1500000, 1800000,
 };
 
-static const int const mc13783_vdig_val[] = {
+static const int mc13783_vdig_val[] = {
 	1200000, 1300000, 1500000, 1800000,
 };
 
-static const int const mc13783_vgen_val[] = {
+static const int mc13783_vgen_val[] = {
 	1200000, 1300000, 1500000, 1800000,
 	1100000, 2000000, 2775000, 2400000,
 };
 
-static const int const mc13783_vrfdig_val[] = {
+static const int mc13783_vrfdig_val[] = {
 	1200000, 1500000, 1800000, 1875000,
 };
 
-static const int const mc13783_vrfref_val[] = {
+static const int mc13783_vrfref_val[] = {
 	2475000, 2600000, 2700000, 2775000,
 };
 
-static const int const mc13783_vrfcp_val[] = {
+static const int mc13783_vrfcp_val[] = {
 	2700000, 2775000,
 };
 
-static const int const mc13783_vsim_val[] = {
+static const int mc13783_vsim_val[] = {
 	1800000, 2900000, 3000000,
 };
 
-static const int const mc13783_vesim_val[] = {
+static const int mc13783_vesim_val[] = {
 	1800000, 2900000,
 };
 
-static const int const mc13783_vcam_val[] = {
+static const int mc13783_vcam_val[] = {
 	1500000, 1800000, 2500000, 2550000,
 	2600000, 2750000, 2800000, 3000000,
 };
 
-static const int const mc13783_vrfbg_val[] = {
+static const int mc13783_vrfbg_val[] = {
 	1250000,
 };
 
-static const int const mc13783_vvib_val[] = {
+static const int mc13783_vvib_val[] = {
 	1300000, 1800000, 2000000, 3000000,
 };
 
-static const int const mc13783_vmmc_val[] = {
+static const int mc13783_vmmc_val[] = {
 	1600000, 1800000, 2000000, 2600000,
 	2700000, 2800000, 2900000, 3000000,
 };
 
-static const int const mc13783_vrf_val[] = {
+static const int mc13783_vrf_val[] = {
 	1500000, 1875000, 2700000, 2775000,
 };
 
-static const int const mc13783_gpo_val[] = {
+static const int mc13783_gpo_val[] = {
 	3100000,
 };
 
-static const int const mc13783_pwgtdrv_val[] = {
+static const int mc13783_pwgtdrv_val[] = {
 	5500000,
 };
 
-static struct regulator_ops mc13783_regulator_ops;
-static struct regulator_ops mc13783_fixed_regulator_ops;
 static struct regulator_ops mc13783_gpo_regulator_ops;
 
-#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages)	\
-	[MC13783_ ## prefix ## _ ## _name] = {				\
-		.desc = {						\
-			.name = #prefix "_" #_name,			\
-			.n_voltages = ARRAY_SIZE(_voltages),		\
-			.ops = &mc13783_regulator_ops,			\
-			.type = REGULATOR_VOLTAGE,			\
-			.id = MC13783_ ## prefix ## _ ## _name,		\
-			.owner = THIS_MODULE,				\
-		},							\
-		.reg = MC13783_REG_ ## _reg,				\
-		.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN,	\
-		.vsel_reg = MC13783_REG_ ## _vsel_reg,			\
-		.vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\
-		.vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\
-		.voltages =  _voltages,					\
-	}
+#define MC13783_DEFINE(prefix, name, reg, vsel_reg, voltages)	\
+	MC13xxx_DEFINE(MC13783_REG_, name, reg, vsel_reg, voltages, \
+			mc13xxx_regulator_ops)
 
-#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages)		\
-	[MC13783_ ## prefix ## _ ## _name] = {				\
-		.desc = {						\
-			.name = #prefix "_" #_name,			\
-			.n_voltages = ARRAY_SIZE(_voltages),		\
-			.ops = &mc13783_fixed_regulator_ops,		\
-			.type = REGULATOR_VOLTAGE,			\
-			.id = MC13783_ ## prefix ## _ ## _name,		\
-			.owner = THIS_MODULE,				\
-		},							\
-		.reg = MC13783_REG_ ## _reg,				\
-		.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN,	\
-		.voltages =  _voltages,					\
-	}
+#define MC13783_FIXED_DEFINE(prefix, name, reg, voltages)		\
+	MC13xxx_FIXED_DEFINE(MC13783_REG_, name, reg, voltages, \
+			mc13xxx_fixed_regulator_ops)
 
-#define MC13783_GPO_DEFINE(prefix, _name, _reg,  _voltages)		\
-	[MC13783_ ## prefix ## _ ## _name] = {				\
-		.desc = {						\
-			.name = #prefix "_" #_name,			\
-			.n_voltages = ARRAY_SIZE(_voltages),		\
-			.ops = &mc13783_gpo_regulator_ops,		\
-			.type = REGULATOR_VOLTAGE,			\
-			.id = MC13783_ ## prefix ## _ ## _name,		\
-			.owner = THIS_MODULE,				\
-		},							\
-		.reg = MC13783_REG_ ## _reg,				\
-		.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN,	\
-		.voltages =  _voltages,					\
-	}
+#define MC13783_GPO_DEFINE(prefix, name, reg, voltages)		\
+	MC13xxx_GPO_DEFINE(MC13783_REG_, name, reg, voltages, \
+			mc13783_gpo_regulator_ops)
 
 #define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages)		\
-	MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages)
+	MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
 #define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages)		\
-	MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages)
+	MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
 
-static struct mc13783_regulator mc13783_regulators[] = {
+static struct mc13xxx_regulator mc13783_regulators[] = {
 	MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
 
-	MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
-	MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val),
+	MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
+	MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val),
 	MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0,	\
 			    mc13783_violo_val),
 	MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,	\
@@ -255,7 +209,7 @@
 			    mc13783_vesim_val),
 	MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,	\
 			    mc13783_vcam_val),
-	MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
+	MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
 	MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1,	\
 			    mc13783_vvib_val),
 	MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1,	\
@@ -266,215 +220,24 @@
 			    mc13783_vmmc_val),
 	MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1,	\
 			    mc13783_vmmc_val),
-	MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
-	MC13783_GPO_DEFINE(REGU, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
+	MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, GPO3, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, GPO4, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
+	MC13783_GPO_DEFINE(REG, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
 };
 
-struct mc13783_regulator_priv {
-	struct mc13783 *mc13783;
-	u32 powermisc_pwgt_state;
-	struct regulator_dev *regulators[];
-};
-
-static int mc13783_regulator_enable(struct regulator_dev *rdev)
+static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
+		u32 val)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int id = rdev_get_id(rdev);
-	int ret;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
-			mc13783_regulators[id].enable_bit,
-			mc13783_regulators[id].enable_bit);
-	mc13783_unlock(priv->mc13783);
-
-	return ret;
-}
-
-static int mc13783_regulator_disable(struct regulator_dev *rdev)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int id = rdev_get_id(rdev);
-	int ret;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
-			mc13783_regulators[id].enable_bit, 0);
-	mc13783_unlock(priv->mc13783);
-
-	return ret;
-}
-
-static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int ret, id = rdev_get_id(rdev);
-	unsigned int val;
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
-	mc13783_unlock(priv->mc13783);
-
-	if (ret)
-		return ret;
-
-	return (val & mc13783_regulators[id].enable_bit) != 0;
-}
-
-static int mc13783_regulator_list_voltage(struct regulator_dev *rdev,
-						unsigned selector)
-{
-	int id = rdev_get_id(rdev);
-
-	if (selector >= mc13783_regulators[id].desc.n_voltages)
-		return -EINVAL;
-
-	return mc13783_regulators[id].voltages[selector];
-}
-
-static int mc13783_get_best_voltage_index(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
-{
-	int reg_id = rdev_get_id(rdev);
-	int i;
-	int bestmatch;
-	int bestindex;
-
-	/*
-	 * Locate the minimum voltage fitting the criteria on
-	 * this regulator. The switchable voltages are not
-	 * in strict falling order so we need to check them
-	 * all for the best match.
-	 */
-	bestmatch = INT_MAX;
-	bestindex = -1;
-	for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) {
-		if (mc13783_regulators[reg_id].voltages[i] >= min_uV &&
-		    mc13783_regulators[reg_id].voltages[i] < bestmatch) {
-			bestmatch = mc13783_regulators[reg_id].voltages[i];
-			bestindex = i;
-		}
-	}
-
-	if (bestindex < 0 || bestmatch > max_uV) {
-		dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
-				min_uV, max_uV);
-		return -EINVAL;
-	}
-	return bestindex;
-}
-
-static int mc13783_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int value, id = rdev_get_id(rdev);
-	int ret;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
-		__func__, id, min_uV, max_uV);
-
-	/* Find the best index */
-	value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV);
-	dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value);
-	if (value < 0)
-		return value;
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg,
-			mc13783_regulators[id].vsel_mask,
-			value << mc13783_regulators[id].vsel_shift);
-	mc13783_unlock(priv->mc13783);
-
-	return ret;
-}
-
-static int mc13783_regulator_get_voltage(struct regulator_dev *rdev)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int ret, id = rdev_get_id(rdev);
-	unsigned int val;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_read(priv->mc13783,
-				mc13783_regulators[id].vsel_reg, &val);
-	mc13783_unlock(priv->mc13783);
-
-	if (ret)
-		return ret;
-
-	val = (val & mc13783_regulators[id].vsel_mask)
-		>> mc13783_regulators[id].vsel_shift;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
-
-	BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages);
-
-	return mc13783_regulators[id].voltages[val];
-}
-
-static struct regulator_ops mc13783_regulator_ops = {
-	.enable = mc13783_regulator_enable,
-	.disable = mc13783_regulator_disable,
-	.is_enabled = mc13783_regulator_is_enabled,
-	.list_voltage = mc13783_regulator_list_voltage,
-	.set_voltage = mc13783_regulator_set_voltage,
-	.get_voltage = mc13783_regulator_get_voltage,
-};
-
-static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
-{
-	int id = rdev_get_id(rdev);
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
-		__func__, id, min_uV, max_uV);
-
-	if (min_uV >= mc13783_regulators[id].voltages[0] &&
-	    max_uV <= mc13783_regulators[id].voltages[0])
-		return 0;
-	else
-		return -EINVAL;
-}
-
-static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev)
-{
-	int id = rdev_get_id(rdev);
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	return mc13783_regulators[id].voltages[0];
-}
-
-static struct regulator_ops mc13783_fixed_regulator_ops = {
-	.enable = mc13783_regulator_enable,
-	.disable = mc13783_regulator_disable,
-	.is_enabled = mc13783_regulator_is_enabled,
-	.list_voltage = mc13783_regulator_list_voltage,
-	.set_voltage = mc13783_fixed_regulator_set_voltage,
-	.get_voltage = mc13783_fixed_regulator_get_voltage,
-};
-
-static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
-				 u32 val)
-{
-	struct mc13783 *mc13783 = priv->mc13783;
+	struct mc13xxx *mc13783 = priv->mc13xxx;
 	int ret;
 	u32 valread;
 
 	BUG_ON(val & ~mask);
 
-	ret = mc13783_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
+	ret = mc13xxx_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
 	if (ret)
 		return ret;
 
@@ -489,34 +252,36 @@
 	valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
 						priv->powermisc_pwgt_state;
 
-	return mc13783_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
+	return mc13xxx_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
 }
 
 static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
 	int id = rdev_get_id(rdev);
 	int ret;
-	u32 en_val = mc13783_regulators[id].enable_bit;
+	u32 en_val = mc13xxx_regulators[id].enable_bit;
 
 	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
 
 	/* Power Gate enable value is 0 */
-	if (id == MC13783_REGU_PWGT1SPI ||
-	    id == MC13783_REGU_PWGT2SPI)
+	if (id == MC13783_REG_PWGT1SPI ||
+	    id == MC13783_REG_PWGT2SPI)
 		en_val = 0;
 
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
 					en_val);
-	mc13783_unlock(priv->mc13783);
+	mc13xxx_unlock(priv->mc13xxx);
 
 	return ret;
 }
 
 static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
 	int id = rdev_get_id(rdev);
 	int ret;
 	u32 dis_val = 0;
@@ -524,27 +289,28 @@
 	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
 
 	/* Power Gate disable value is 1 */
-	if (id == MC13783_REGU_PWGT1SPI ||
-	    id == MC13783_REGU_PWGT2SPI)
-		dis_val = mc13783_regulators[id].enable_bit;
+	if (id == MC13783_REG_PWGT1SPI ||
+	    id == MC13783_REG_PWGT2SPI)
+		dis_val = mc13xxx_regulators[id].enable_bit;
 
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
 					dis_val);
-	mc13783_unlock(priv->mc13783);
+	mc13xxx_unlock(priv->mc13xxx);
 
 	return ret;
 }
 
 static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
 	int ret, id = rdev_get_id(rdev);
 	unsigned int val;
 
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
-	mc13783_unlock(priv->mc13783);
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
 
 	if (ret)
 		return ret;
@@ -554,22 +320,22 @@
 	val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
 	      (priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M);
 
-	return (val & mc13783_regulators[id].enable_bit) != 0;
+	return (val & mc13xxx_regulators[id].enable_bit) != 0;
 }
 
 static struct regulator_ops mc13783_gpo_regulator_ops = {
 	.enable = mc13783_gpo_regulator_enable,
 	.disable = mc13783_gpo_regulator_disable,
 	.is_enabled = mc13783_gpo_regulator_is_enabled,
-	.list_voltage = mc13783_regulator_list_voltage,
-	.set_voltage = mc13783_fixed_regulator_set_voltage,
-	.get_voltage = mc13783_fixed_regulator_get_voltage,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_fixed_regulator_set_voltage,
+	.get_voltage = mc13xxx_fixed_regulator_get_voltage,
 };
 
 static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
 {
-	struct mc13783_regulator_priv *priv;
-	struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
+	struct mc13xxx_regulator_priv *priv;
+	struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
 	struct mc13783_regulator_platform_data *pdata =
 		dev_get_platdata(&pdev->dev);
 	struct mc13783_regulator_init_data *init_data;
@@ -583,7 +349,8 @@
 	if (!priv)
 		return -ENOMEM;
 
-	priv->mc13783 = mc13783;
+	priv->mc13xxx_regulators = mc13783_regulators;
+	priv->mc13xxx = mc13783;
 
 	for (i = 0; i < pdata->num_regulators; i++) {
 		init_data = &pdata->regulators[i];
@@ -613,7 +380,7 @@
 
 static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
 {
-	struct mc13783_regulator_priv *priv = platform_get_drvdata(pdev);
+	struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
 	struct mc13783_regulator_platform_data *pdata =
 		dev_get_platdata(&pdev->dev);
 	int i;
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
new file mode 100644
index 0000000..1b8f739
--- /dev/null
+++ b/drivers/regulator/mc13892-regulator.c
@@ -0,0 +1,635 @@
+/*
+ * Regulator Driver for Freescale MC13892 PMIC
+ *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * Based on draft driver from Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/mc13892.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include "mc13xxx.h"
+
+#define MC13892_REVISION			7
+
+#define MC13892_POWERCTL0			13
+#define MC13892_POWERCTL0_USEROFFSPI		3
+#define MC13892_POWERCTL0_VCOINCELLVSEL		20
+#define MC13892_POWERCTL0_VCOINCELLVSEL_M	(7<<20)
+#define MC13892_POWERCTL0_VCOINCELLEN		(1<<23)
+
+#define MC13892_SWITCHERS0_SWxHI		(1<<23)
+
+#define MC13892_SWITCHERS0			24
+#define MC13892_SWITCHERS0_SW1VSEL		0
+#define MC13892_SWITCHERS0_SW1VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS0_SW1HI		(1<<23)
+#define MC13892_SWITCHERS0_SW1EN		0
+
+#define MC13892_SWITCHERS1			25
+#define MC13892_SWITCHERS1_SW2VSEL		0
+#define MC13892_SWITCHERS1_SW2VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS1_SW2HI		(1<<23)
+#define MC13892_SWITCHERS1_SW2EN		0
+
+#define MC13892_SWITCHERS2			26
+#define MC13892_SWITCHERS2_SW3VSEL		0
+#define MC13892_SWITCHERS2_SW3VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS2_SW3HI		(1<<23)
+#define MC13892_SWITCHERS2_SW3EN		0
+
+#define MC13892_SWITCHERS3			27
+#define MC13892_SWITCHERS3_SW4VSEL		0
+#define MC13892_SWITCHERS3_SW4VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS3_SW4HI		(1<<23)
+#define MC13892_SWITCHERS3_SW4EN		0
+
+#define MC13892_SWITCHERS4			28
+#define MC13892_SWITCHERS4_SW1MODE		0
+#define MC13892_SWITCHERS4_SW1MODE_AUTO		(8<<0)
+#define MC13892_SWITCHERS4_SW1MODE_M		(0xf<<0)
+#define MC13892_SWITCHERS4_SW2MODE		10
+#define MC13892_SWITCHERS4_SW2MODE_AUTO		(8<<10)
+#define MC13892_SWITCHERS4_SW2MODE_M		(0xf<<10)
+
+#define MC13892_SWITCHERS5			29
+#define MC13892_SWITCHERS5_SW3MODE		0
+#define MC13892_SWITCHERS5_SW3MODE_AUTO		(8<<0)
+#define MC13892_SWITCHERS5_SW3MODE_M		(0xf<<0)
+#define MC13892_SWITCHERS5_SW4MODE		8
+#define MC13892_SWITCHERS5_SW4MODE_AUTO		(8<<8)
+#define MC13892_SWITCHERS5_SW4MODE_M		(0xf<<8)
+#define MC13892_SWITCHERS5_SWBSTEN		(1<<20)
+
+#define MC13892_REGULATORSETTING0		30
+#define MC13892_REGULATORSETTING0_VGEN1VSEL	0
+#define MC13892_REGULATORSETTING0_VDIGVSEL	4
+#define MC13892_REGULATORSETTING0_VGEN2VSEL	6
+#define MC13892_REGULATORSETTING0_VPLLVSEL	9
+#define MC13892_REGULATORSETTING0_VUSB2VSEL	11
+#define MC13892_REGULATORSETTING0_VGEN3VSEL	14
+#define MC13892_REGULATORSETTING0_VCAMVSEL	16
+
+#define MC13892_REGULATORSETTING0_VGEN1VSEL_M	(3<<0)
+#define MC13892_REGULATORSETTING0_VDIGVSEL_M	(3<<4)
+#define MC13892_REGULATORSETTING0_VGEN2VSEL_M	(7<<6)
+#define MC13892_REGULATORSETTING0_VPLLVSEL_M	(3<<9)
+#define MC13892_REGULATORSETTING0_VUSB2VSEL_M	(3<<11)
+#define MC13892_REGULATORSETTING0_VGEN3VSEL_M	(1<<14)
+#define MC13892_REGULATORSETTING0_VCAMVSEL_M	(3<<16)
+
+#define MC13892_REGULATORSETTING1		31
+#define MC13892_REGULATORSETTING1_VVIDEOVSEL	2
+#define MC13892_REGULATORSETTING1_VAUDIOVSEL	4
+#define MC13892_REGULATORSETTING1_VSDVSEL	6
+
+#define MC13892_REGULATORSETTING1_VVIDEOVSEL_M	(3<<2)
+#define MC13892_REGULATORSETTING1_VAUDIOVSEL_M	(3<<4)
+#define MC13892_REGULATORSETTING1_VSDVSEL_M	(7<<6)
+
+#define MC13892_REGULATORMODE0			32
+#define MC13892_REGULATORMODE0_VGEN1EN		(1<<0)
+#define MC13892_REGULATORMODE0_VGEN1STDBY	(1<<1)
+#define MC13892_REGULATORMODE0_VGEN1MODE	(1<<2)
+#define MC13892_REGULATORMODE0_VIOHIEN		(1<<3)
+#define MC13892_REGULATORMODE0_VIOHISTDBY	(1<<4)
+#define MC13892_REGULATORMODE0_VIOHIMODE	(1<<5)
+#define MC13892_REGULATORMODE0_VDIGEN		(1<<9)
+#define MC13892_REGULATORMODE0_VDIGSTDBY	(1<<10)
+#define MC13892_REGULATORMODE0_VDIGMODE		(1<<11)
+#define MC13892_REGULATORMODE0_VGEN2EN		(1<<12)
+#define MC13892_REGULATORMODE0_VGEN2STDBY	(1<<13)
+#define MC13892_REGULATORMODE0_VGEN2MODE	(1<<14)
+#define MC13892_REGULATORMODE0_VPLLEN		(1<<15)
+#define MC13892_REGULATORMODE0_VPLLSTDBY	(1<<16)
+#define MC13892_REGULATORMODE0_VPLLMODE		(1<<17)
+#define MC13892_REGULATORMODE0_VUSB2EN		(1<<18)
+#define MC13892_REGULATORMODE0_VUSB2STDBY	(1<<19)
+#define MC13892_REGULATORMODE0_VUSB2MODE	(1<<20)
+
+#define MC13892_REGULATORMODE1			33
+#define MC13892_REGULATORMODE1_VGEN3EN		(1<<0)
+#define MC13892_REGULATORMODE1_VGEN3STDBY	(1<<1)
+#define MC13892_REGULATORMODE1_VGEN3MODE	(1<<2)
+#define MC13892_REGULATORMODE1_VCAMEN		(1<<6)
+#define MC13892_REGULATORMODE1_VCAMSTDBY	(1<<7)
+#define MC13892_REGULATORMODE1_VCAMMODE		(1<<8)
+#define MC13892_REGULATORMODE1_VCAMCONFIGEN	(1<<9)
+#define MC13892_REGULATORMODE1_VVIDEOEN		(1<<12)
+#define MC13892_REGULATORMODE1_VVIDEOSTDBY	(1<<13)
+#define MC13892_REGULATORMODE1_VVIDEOMODE	(1<<14)
+#define MC13892_REGULATORMODE1_VAUDIOEN		(1<<15)
+#define MC13892_REGULATORMODE1_VAUDIOSTDBY	(1<<16)
+#define MC13892_REGULATORMODE1_VAUDIOMODE	(1<<17)
+#define MC13892_REGULATORMODE1_VSDEN		(1<<18)
+#define MC13892_REGULATORMODE1_VSDSTDBY		(1<<19)
+#define MC13892_REGULATORMODE1_VSDMODE		(1<<20)
+
+#define MC13892_POWERMISC			34
+#define MC13892_POWERMISC_GPO1EN		(1<<6)
+#define MC13892_POWERMISC_GPO2EN		(1<<8)
+#define MC13892_POWERMISC_GPO3EN		(1<<10)
+#define MC13892_POWERMISC_GPO4EN		(1<<12)
+#define MC13892_POWERMISC_PWGT1SPIEN		(1<<15)
+#define MC13892_POWERMISC_PWGT2SPIEN		(1<<16)
+#define MC13892_POWERMISC_GPO4ADINEN		(1<<21)
+
+#define MC13892_POWERMISC_PWGTSPI_M		(3 << 15)
+
+#define MC13892_USB1				50
+#define MC13892_USB1_VUSBEN			(1<<3)
+
+static const int mc13892_vcoincell[] = {
+	2500000, 2700000, 2800000, 2900000, 3000000, 3100000,
+	3200000, 3300000,
+};
+
+static const int mc13892_sw1[] = {
+	600000,   625000,  650000,  675000,  700000,  725000,
+	750000,   775000,  800000,  825000,  850000,  875000,
+	900000,   925000,  950000,  975000, 1000000, 1025000,
+	1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+	1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
+	1350000, 1375000
+};
+
+static const int mc13892_sw[] = {
+	600000,   625000,  650000,  675000,  700000,  725000,
+	750000,   775000,  800000,  825000,  850000,  875000,
+	900000,   925000,  950000,  975000, 1000000, 1025000,
+	1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+	1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
+	1350000, 1375000, 1400000, 1425000, 1450000, 1475000,
+	1500000, 1525000, 1550000, 1575000, 1600000, 1625000,
+	1650000, 1675000, 1700000, 1725000, 1750000, 1775000,
+	1800000, 1825000, 1850000, 1875000
+};
+
+static const int mc13892_swbst[] = {
+	5000000,
+};
+
+static const int mc13892_viohi[] = {
+	2775000,
+};
+
+static const int mc13892_vpll[] = {
+	1050000, 1250000, 1650000, 1800000,
+};
+
+static const int mc13892_vdig[] = {
+	1050000, 1250000, 1650000, 1800000,
+};
+
+static const int mc13892_vsd[] = {
+	1800000, 2000000, 2600000, 2700000,
+	2800000, 2900000, 3000000, 3150000,
+};
+
+static const int mc13892_vusb2[] = {
+	2400000, 2600000, 2700000, 2775000,
+};
+
+static const int mc13892_vvideo[] = {
+	2700000, 2775000, 2500000, 2600000,
+};
+
+static const int mc13892_vaudio[] = {
+	2300000, 2500000, 2775000, 3000000,
+};
+
+static const int mc13892_vcam[] = {
+	2500000, 2600000, 2750000, 3000000,
+};
+
+static const int mc13892_vgen1[] = {
+	1200000, 1500000, 2775000, 3150000,
+};
+
+static const int mc13892_vgen2[] = {
+	1200000, 1500000, 1600000, 1800000,
+	2700000, 2800000, 3000000, 3150000,
+};
+
+static const int mc13892_vgen3[] = {
+	1800000, 2900000,
+};
+
+static const int mc13892_vusb[] = {
+	3300000,
+};
+
+static const int mc13892_gpo[] = {
+	2750000,
+};
+
+static const int mc13892_pwgtdrv[] = {
+	5000000,
+};
+
+static struct regulator_ops mc13892_gpo_regulator_ops;
+/* sw regulators need special care due to the "hi bit" */
+static struct regulator_ops mc13892_sw_regulator_ops;
+
+
+#define MC13892_FIXED_DEFINE(name, reg, voltages)		\
+	MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages,	\
+			mc13xxx_fixed_regulator_ops)
+
+#define MC13892_GPO_DEFINE(name, reg, voltages)			\
+	MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages,	\
+			mc13892_gpo_regulator_ops)
+
+#define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages)	\
+	MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+			mc13892_sw_regulator_ops)
+
+#define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages)	\
+	MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+			mc13xxx_regulator_ops)
+
+static struct mc13xxx_regulator mc13892_regulators[] = {
+	MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell),
+	MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1),
+	MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw),
+	MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw),
+	MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw),
+	MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst),
+	MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi),
+	MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vpll),
+	MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vdig),
+	MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1,	\
+		mc13892_vsd),
+	MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vusb2),
+	MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1,	\
+		mc13892_vvideo),
+	MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1,	\
+		mc13892_vaudio),
+	MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,	\
+		mc13892_vcam),
+	MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vgen1),
+	MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vgen2),
+	MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0,	\
+		mc13892_vgen3),
+	MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb),
+	MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv),
+	MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv),
+};
+
+static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
+				 u32 val)
+{
+	struct mc13xxx *mc13892 = priv->mc13xxx;
+	int ret;
+	u32 valread;
+
+	BUG_ON(val & ~mask);
+
+	ret = mc13xxx_reg_read(mc13892, MC13892_POWERMISC, &valread);
+	if (ret)
+		return ret;
+
+	/* Update the stored state for Power Gates. */
+	priv->powermisc_pwgt_state =
+		(priv->powermisc_pwgt_state & ~mask) | val;
+	priv->powermisc_pwgt_state &= MC13892_POWERMISC_PWGTSPI_M;
+
+	/* Construct the new register value */
+	valread = (valread & ~mask) | val;
+	/* Overwrite the PWGTxEN with the stored version */
+	valread = (valread & ~MC13892_POWERMISC_PWGTSPI_M) |
+		priv->powermisc_pwgt_state;
+
+	return mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread);
+}
+
+static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	int ret;
+	u32 en_val = mc13892_regulators[id].enable_bit;
+	u32 mask = mc13892_regulators[id].enable_bit;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	/* Power Gate enable value is 0 */
+	if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
+		en_val = 0;
+
+	if (id == MC13892_GPO4)
+		mask |= MC13892_POWERMISC_GPO4ADINEN;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13892_powermisc_rmw(priv, mask, en_val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	int ret;
+	u32 dis_val = 0;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	/* Power Gate disable value is 1 */
+	if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
+		dis_val = mc13892_regulators[id].enable_bit;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit,
+		dis_val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	/* Power Gates state is stored in powermisc_pwgt_state
+	 * where the meaning of bits is negated */
+	val = (val & ~MC13892_POWERMISC_PWGTSPI_M) |
+		(priv->powermisc_pwgt_state ^ MC13892_POWERMISC_PWGTSPI_M);
+
+	return (val & mc13892_regulators[id].enable_bit) != 0;
+}
+
+
+static struct regulator_ops mc13892_gpo_regulator_ops = {
+	.enable = mc13892_gpo_regulator_enable,
+	.disable = mc13892_gpo_regulator_disable,
+	.is_enabled = mc13892_gpo_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_fixed_regulator_set_voltage,
+	.get_voltage = mc13xxx_fixed_regulator_get_voltage,
+};
+
+static int mc13892_sw_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val, hi;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx,
+		mc13892_regulators[id].vsel_reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+	if (ret)
+		return ret;
+
+	hi  = val & MC13892_SWITCHERS0_SWxHI;
+	val = (val & mc13892_regulators[id].vsel_mask)
+		>> mc13892_regulators[id].vsel_shift;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+	if (hi)
+		val = (25000 * val) + 1100000;
+	else
+		val = (25000 * val) + 600000;
+
+	return val;
+}
+
+static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int hi, value, val, mask, id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+		__func__, id, min_uV, max_uV);
+
+	/* Find the best index */
+	value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
+	dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
+	if (value < 0)
+		return value;
+
+	value = mc13892_regulators[id].voltages[value];
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx,
+		mc13892_regulators[id].vsel_reg, &val);
+	if (ret)
+		goto err;
+
+	hi  = val & MC13892_SWITCHERS0_SWxHI;
+	if (value > 1375)
+		hi = 1;
+	if (value < 1100)
+		hi = 0;
+
+	if (hi) {
+		value = (value - 1100000) / 25000;
+		value |= MC13892_SWITCHERS0_SWxHI;
+	} else
+		value = (value - 600000) / 25000;
+
+	mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI;
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
+			mask, value << mc13892_regulators[id].vsel_shift);
+err:
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static struct regulator_ops mc13892_sw_regulator_ops = {
+	.is_enabled = mc13xxx_sw_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13892_sw_regulator_set_voltage,
+	.get_voltage = mc13892_sw_regulator_get_voltage,
+};
+
+static int mc13892_vcam_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	unsigned int en_val = 0;
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+
+	if (mode == REGULATOR_MODE_FAST)
+		en_val = MC13892_REGULATORMODE1_VCAMCONFIGEN;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg,
+		MC13892_REGULATORMODE1_VCAMCONFIGEN, en_val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	if (val & MC13892_REGULATORMODE1_VCAMCONFIGEN)
+		return REGULATOR_MODE_FAST;
+
+	return REGULATOR_MODE_NORMAL;
+}
+
+
+static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
+{
+	struct mc13xxx_regulator_priv *priv;
+	struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
+	struct mc13xxx_regulator_platform_data *pdata =
+		dev_get_platdata(&pdev->dev);
+	struct mc13xxx_regulator_init_data *init_data;
+	int i, ret;
+	u32 val;
+
+	priv = kzalloc(sizeof(*priv) +
+		pdata->num_regulators * sizeof(priv->regulators[0]),
+		GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->mc13xxx_regulators = mc13892_regulators;
+	priv->mc13xxx = mc13892;
+
+	mc13xxx_lock(mc13892);
+	ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
+	if (ret)
+		goto err_free;
+
+	/* enable switch auto mode */
+	if ((val & 0x0000FFFF) == 0x45d0) {
+		ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4,
+			MC13892_SWITCHERS4_SW1MODE_M |
+			MC13892_SWITCHERS4_SW2MODE_M,
+			MC13892_SWITCHERS4_SW1MODE_AUTO |
+			MC13892_SWITCHERS4_SW2MODE_AUTO);
+		if (ret)
+			goto err_free;
+
+		ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5,
+			MC13892_SWITCHERS5_SW3MODE_M |
+			MC13892_SWITCHERS5_SW4MODE_M,
+			MC13892_SWITCHERS5_SW3MODE_AUTO |
+			MC13892_SWITCHERS5_SW4MODE_AUTO);
+		if (ret)
+			goto err_free;
+	}
+	mc13xxx_unlock(mc13892);
+
+	mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
+		= mc13892_vcam_set_mode;
+	mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
+		= mc13892_vcam_get_mode;
+	for (i = 0; i < pdata->num_regulators; i++) {
+		init_data = &pdata->regulators[i];
+		priv->regulators[i] = regulator_register(
+			&mc13892_regulators[init_data->id].desc,
+			&pdev->dev, init_data->init_data, priv);
+
+		if (IS_ERR(priv->regulators[i])) {
+			dev_err(&pdev->dev, "failed to register regulator %s\n",
+				mc13892_regulators[i].desc.name);
+			ret = PTR_ERR(priv->regulators[i]);
+			goto err;
+		}
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	return 0;
+err:
+	while (--i >= 0)
+		regulator_unregister(priv->regulators[i]);
+
+err_free:
+	mc13xxx_unlock(mc13892);
+	kfree(priv);
+
+	return ret;
+}
+
+static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
+{
+	struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
+	struct mc13xxx_regulator_platform_data *pdata =
+		dev_get_platdata(&pdev->dev);
+	int i;
+
+	platform_set_drvdata(pdev, NULL);
+
+	for (i = 0; i < pdata->num_regulators; i++)
+		regulator_unregister(priv->regulators[i]);
+
+	kfree(priv);
+	return 0;
+}
+
+static struct platform_driver mc13892_regulator_driver = {
+	.driver	= {
+		.name	= "mc13892-regulator",
+		.owner	= THIS_MODULE,
+	},
+	.remove	= __devexit_p(mc13892_regulator_remove),
+	.probe	= mc13892_regulator_probe,
+};
+
+static int __init mc13892_regulator_init(void)
+{
+	return platform_driver_register(&mc13892_regulator_driver);
+}
+subsys_initcall(mc13892_regulator_init);
+
+static void __exit mc13892_regulator_exit(void)
+{
+	platform_driver_unregister(&mc13892_regulator_driver);
+}
+module_exit(mc13892_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13892 PMIC");
+MODULE_ALIAS("platform:mc13892-regulator");
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
new file mode 100644
index 0000000..2bb5de1
--- /dev/null
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -0,0 +1,241 @@
+/*
+ * Regulator Driver for Freescale MC13xxx PMIC
+ *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * Based on mc13783 regulator driver :
+ * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Regs infos taken from mc13xxx drivers from freescale and mc13xxx.pdf file
+ * from freescale
+ */
+
+#include <linux/mfd/mc13xxx.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include "mc13xxx.h"
+
+static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
+			mc13xxx_regulators[id].enable_bit,
+			mc13xxx_regulators[id].enable_bit);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13xxx_regulator_disable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
+			mc13xxx_regulators[id].enable_bit, 0);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	return (val & mc13xxx_regulators[id].enable_bit) != 0;
+}
+
+int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
+						unsigned selector)
+{
+	int id = rdev_get_id(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+
+	if (selector >= mc13xxx_regulators[id].desc.n_voltages)
+		return -EINVAL;
+
+	return mc13xxx_regulators[id].voltages[selector];
+}
+EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage);
+
+int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
+						int min_uV, int max_uV)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int reg_id = rdev_get_id(rdev);
+	int i;
+	int bestmatch;
+	int bestindex;
+
+	/*
+	 * Locate the minimum voltage fitting the criteria on
+	 * this regulator. The switchable voltages are not
+	 * in strict falling order so we need to check them
+	 * all for the best match.
+	 */
+	bestmatch = INT_MAX;
+	bestindex = -1;
+	for (i = 0; i < mc13xxx_regulators[reg_id].desc.n_voltages; i++) {
+		if (mc13xxx_regulators[reg_id].voltages[i] >= min_uV &&
+		    mc13xxx_regulators[reg_id].voltages[i] < bestmatch) {
+			bestmatch = mc13xxx_regulators[reg_id].voltages[i];
+			bestindex = i;
+		}
+	}
+
+	if (bestindex < 0 || bestmatch > max_uV) {
+		dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
+				min_uV, max_uV);
+		return -EINVAL;
+	}
+	return bestindex;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_get_best_voltage_index);
+
+static int mc13xxx_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+		int max_uV, unsigned *selector)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int value, id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+		__func__, id, min_uV, max_uV);
+
+	/* Find the best index */
+	value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
+	dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
+	if (value < 0)
+		return value;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].vsel_reg,
+			mc13xxx_regulators[id].vsel_mask,
+			value << mc13xxx_regulators[id].vsel_shift);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx,
+				mc13xxx_regulators[id].vsel_reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	val = (val & mc13xxx_regulators[id].vsel_mask)
+		>> mc13xxx_regulators[id].vsel_shift;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+	BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
+
+	return mc13xxx_regulators[id].voltages[val];
+}
+
+struct regulator_ops mc13xxx_regulator_ops = {
+	.enable = mc13xxx_regulator_enable,
+	.disable = mc13xxx_regulator_disable,
+	.is_enabled = mc13xxx_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_regulator_set_voltage,
+	.get_voltage = mc13xxx_regulator_get_voltage,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops);
+
+int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+	       int max_uV, unsigned *selector)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+		__func__, id, min_uV, max_uV);
+
+	if (min_uV >= mc13xxx_regulators[id].voltages[0] &&
+	    max_uV <= mc13xxx_regulators[id].voltages[0])
+		return 0;
+	else
+		return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
+
+int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	return mc13xxx_regulators[id].voltages[0];
+}
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage);
+
+struct regulator_ops mc13xxx_fixed_regulator_ops = {
+	.enable = mc13xxx_regulator_enable,
+	.disable = mc13xxx_regulator_disable,
+	.is_enabled = mc13xxx_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_fixed_regulator_set_voltage,
+	.get_voltage = mc13xxx_fixed_regulator_get_voltage,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
+
+int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	return 1;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
+MODULE_ALIAS("mc13xxx-regulator-core");
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
new file mode 100644
index 0000000..2775826
--- /dev/null
+++ b/drivers/regulator/mc13xxx.h
@@ -0,0 +1,101 @@
+/*
+ * mc13xxx.h - regulators for the Freescale mc13xxx PMIC
+ *
+ *  Copyright (C) 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_REGULATOR_MC13XXX_H
+#define __LINUX_REGULATOR_MC13XXX_H
+
+#include <linux/regulator/driver.h>
+
+struct mc13xxx_regulator {
+	struct regulator_desc desc;
+	int reg;
+	int enable_bit;
+	int vsel_reg;
+	int vsel_shift;
+	int vsel_mask;
+	int hi_bit;
+	int const *voltages;
+};
+
+struct mc13xxx_regulator_priv {
+	struct mc13xxx *mc13xxx;
+	u32 powermisc_pwgt_state;
+	struct mc13xxx_regulator *mc13xxx_regulators;
+	struct regulator_dev *regulators[];
+};
+
+extern int mc13xxx_sw_regulator(struct regulator_dev *rdev);
+extern int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev);
+extern int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
+						int min_uV, int max_uV);
+extern int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
+						unsigned selector);
+extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned *selector);
+extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
+
+extern struct regulator_ops mc13xxx_regulator_ops;
+extern struct regulator_ops mc13xxx_fixed_regulator_ops;
+
+#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops)	\
+	[prefix ## _name] = {				\
+		.desc = {						\
+			.name = #prefix "_" #_name,			\
+			.n_voltages = ARRAY_SIZE(_voltages),		\
+			.ops = &_ops,			\
+			.type = REGULATOR_VOLTAGE,			\
+			.id = prefix ## _name,		\
+			.owner = THIS_MODULE,				\
+		},							\
+		.reg = prefix ## _reg,				\
+		.enable_bit = prefix ## _reg ## _ ## _name ## EN,	\
+		.vsel_reg = prefix ## _vsel_reg,			\
+		.vsel_shift = prefix ## _vsel_reg ## _ ## _name ## VSEL,\
+		.vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\
+		.voltages =  _voltages,					\
+	}
+
+#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops)	\
+	[prefix ## _name] = {				\
+		.desc = {						\
+			.name = #prefix "_" #_name,			\
+			.n_voltages = ARRAY_SIZE(_voltages),		\
+			.ops = &_ops,		\
+			.type = REGULATOR_VOLTAGE,			\
+			.id = prefix ## _name,		\
+			.owner = THIS_MODULE,				\
+		},							\
+		.reg = prefix ## _reg,				\
+		.enable_bit = prefix ## _reg ## _ ## _name ## EN,	\
+		.voltages =  _voltages,					\
+	}
+
+#define MC13xxx_GPO_DEFINE(prefix, _name, _reg,  _voltages, _ops)	\
+	[prefix ## _name] = {				\
+		.desc = {						\
+			.name = #prefix "_" #_name,			\
+			.n_voltages = ARRAY_SIZE(_voltages),		\
+			.ops = &_ops,		\
+			.type = REGULATOR_VOLTAGE,			\
+			.id = prefix ## _name,		\
+			.owner = THIS_MODULE,				\
+		},							\
+		.reg = prefix ## _reg,				\
+		.enable_bit = prefix ## _reg ## _ ## _name ## EN,	\
+		.voltages =  _voltages,					\
+	}
+
+#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops)	\
+	MC13xxx_DEFINE(SW, _name, _reg, _vsel_reg, _voltages, ops)
+#define MC13xxx_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages, ops)	\
+	MC13xxx_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages, ops)
+
+#endif
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 29d0566..31f6e11 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -151,7 +151,8 @@
 };
 
 static int pcap_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
+				      int min_uV, int max_uV,
+				      unsigned *selector)
 {
 	struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
 	void *pcap = rdev_get_drvdata(rdev);
@@ -170,10 +171,12 @@
 			i = 0;
 
 		uV = vreg->voltage_table[i] * 1000;
-		if (min_uV <= uV && uV <= max_uV)
+		if (min_uV <= uV && uV <= max_uV) {
+			*selector = i;
 			return ezx_pcap_set_bits(pcap, vreg->reg,
 					(vreg->n_voltages - 1) << vreg->index,
 					i << vreg->index);
+		}
 
 		if (i == 0 && rdev_get_id(rdev) == V1)
 			i = vreg->n_voltages - 1;
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index c8f41dc..69a11d9 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -108,7 +108,8 @@
 }
 
 static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
+					  int min_uV, int max_uV,
+					  unsigned *selector)
 {
 	struct pcf50633 *pcf;
 	int regulator_id, millivolts;
@@ -147,6 +148,8 @@
 		return -EINVAL;
 	}
 
+	*selector = volt_bits;
+
 	return pcf50633_reg_write(pcf, regnr, volt_bits);
 }
 
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index cd6d4fc..60a7ca5 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -321,7 +321,8 @@
 }
 
 static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+				     int min_uV, int max_uV,
+				     unsigned *selector)
 {
 	struct tps_pmic *tps = rdev_get_drvdata(dev);
 	int dcdc = rdev_get_id(dev);
@@ -346,6 +347,8 @@
 			break;
 	}
 
+	*selector = vsel;
+
 	/* write to the register in case we found a match */
 	if (vsel == tps->info[dcdc]->table_len)
 		return -EINVAL;
@@ -371,7 +374,7 @@
 }
 
 static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct tps_pmic *tps = rdev_get_drvdata(dev);
 	int data, vsel, ldo = rdev_get_id(dev);
@@ -396,6 +399,8 @@
 	if (vsel == tps->info[ldo]->table_len)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
 	if (data < 0)
 		return data;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 020f587..0647552 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -369,7 +369,8 @@
 }
 
 static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+					  int min_uV, int max_uV,
+					  unsigned *selector)
 {
 	struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
 	int data, vsel, dcdc = rdev_get_id(dev);
@@ -415,6 +416,8 @@
 	if (vsel == tps->info[dcdc]->table_len)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	data = tps6507x_pmic_reg_read(tps, reg);
 	if (data < 0)
 		return data;
@@ -450,7 +453,8 @@
 }
 
 static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+					 int min_uV, int max_uV,
+					 unsigned *selector)
 {
 	struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
 	int data, vsel, ldo = rdev_get_id(dev);
@@ -483,6 +487,8 @@
 	if (vsel == tps->info[ldo]->table_len)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	data = tps6507x_pmic_reg_read(tps, reg);
 	if (data < 0)
 		return data;
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
new file mode 100644
index 0000000..176a6be
--- /dev/null
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -0,0 +1,693 @@
+/*
+ * Regulator driver for TPS6524x PMIC
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#define REG_LDO_SET		0x0
+#define LDO_ILIM_MASK		1	/* 0 = 400-800, 1 = 900-1500 */
+#define LDO_VSEL_MASK		0x0f
+#define LDO2_ILIM_SHIFT		12
+#define LDO2_VSEL_SHIFT		4
+#define LDO1_ILIM_SHIFT		8
+#define LDO1_VSEL_SHIFT		0
+
+#define REG_BLOCK_EN		0x1
+#define BLOCK_MASK		1
+#define BLOCK_LDO1_SHIFT	0
+#define BLOCK_LDO2_SHIFT	1
+#define BLOCK_LCD_SHIFT		2
+#define BLOCK_USB_SHIFT		3
+
+#define REG_DCDC_SET		0x2
+#define DCDC_VDCDC_MASK		0x1f
+#define DCDC_VDCDC1_SHIFT	0
+#define DCDC_VDCDC2_SHIFT	5
+#define DCDC_VDCDC3_SHIFT	10
+
+#define REG_DCDC_EN		0x3
+#define DCDCDCDC_EN_MASK	0x1
+#define DCDCDCDC1_EN_SHIFT	0
+#define DCDCDCDC1_PG_MSK	BIT(1)
+#define DCDCDCDC2_EN_SHIFT	2
+#define DCDCDCDC2_PG_MSK	BIT(3)
+#define DCDCDCDC3_EN_SHIFT	4
+#define DCDCDCDC3_PG_MSK	BIT(5)
+
+#define REG_USB			0x4
+#define USB_ILIM_SHIFT		0
+#define USB_ILIM_MASK		0x3
+#define USB_TSD_SHIFT		2
+#define USB_TSD_MASK		0x3
+#define USB_TWARN_SHIFT		4
+#define USB_TWARN_MASK		0x3
+#define USB_IWARN_SD		BIT(6)
+#define USB_FAST_LOOP		BIT(7)
+
+#define REG_ALARM		0x5
+#define ALARM_LDO1		BIT(0)
+#define ALARM_DCDC1		BIT(1)
+#define ALARM_DCDC2		BIT(2)
+#define ALARM_DCDC3		BIT(3)
+#define ALARM_LDO2		BIT(4)
+#define ALARM_USB_WARN		BIT(5)
+#define ALARM_USB_ALARM		BIT(6)
+#define ALARM_LCD		BIT(9)
+#define ALARM_TEMP_WARM		BIT(10)
+#define ALARM_TEMP_HOT		BIT(11)
+#define ALARM_NRST		BIT(14)
+#define ALARM_POWERUP		BIT(15)
+
+#define REG_INT_ENABLE		0x6
+#define INT_LDO1		BIT(0)
+#define INT_DCDC1		BIT(1)
+#define INT_DCDC2		BIT(2)
+#define INT_DCDC3		BIT(3)
+#define INT_LDO2		BIT(4)
+#define INT_USB_WARN		BIT(5)
+#define INT_USB_ALARM		BIT(6)
+#define INT_LCD			BIT(9)
+#define INT_TEMP_WARM		BIT(10)
+#define INT_TEMP_HOT		BIT(11)
+#define INT_GLOBAL_EN		BIT(15)
+
+#define REG_INT_STATUS		0x7
+#define STATUS_LDO1		BIT(0)
+#define STATUS_DCDC1		BIT(1)
+#define STATUS_DCDC2		BIT(2)
+#define STATUS_DCDC3		BIT(3)
+#define STATUS_LDO2		BIT(4)
+#define STATUS_USB_WARN		BIT(5)
+#define STATUS_USB_ALARM	BIT(6)
+#define STATUS_LCD		BIT(9)
+#define STATUS_TEMP_WARM	BIT(10)
+#define STATUS_TEMP_HOT		BIT(11)
+
+#define REG_SOFTWARE_RESET	0xb
+#define REG_WRITE_ENABLE	0xd
+#define REG_REV_ID		0xf
+
+#define N_DCDC			3
+#define N_LDO			2
+#define N_SWITCH		2
+#define N_REGULATORS		(3 /* DCDC */ + \
+				 2 /* LDO */  + \
+				 2 /* switch */)
+
+#define FIXED_ILIMSEL		BIT(0)
+#define FIXED_VOLTAGE		BIT(1)
+
+#define CMD_READ(reg)		((reg) << 6)
+#define CMD_WRITE(reg)		(BIT(5) | (reg) << 6)
+#define STAT_CLK		BIT(3)
+#define STAT_WRITE		BIT(2)
+#define STAT_INVALID		BIT(1)
+#define STAT_WP			BIT(0)
+
+struct field {
+	int		reg;
+	int		shift;
+	int		mask;
+};
+
+struct supply_info {
+	const char	*name;
+	int		n_voltages;
+	const int	*voltages;
+	int		fixed_voltage;
+	int		n_ilimsels;
+	const int	*ilimsels;
+	int		fixed_ilimsel;
+	int		flags;
+	struct field	enable, voltage, ilimsel;
+};
+
+struct tps6524x {
+	struct device		*dev;
+	struct spi_device	*spi;
+	struct mutex		lock;
+	struct regulator_desc	desc[N_REGULATORS];
+	struct regulator_dev	*rdev[N_REGULATORS];
+};
+
+static int __read_reg(struct tps6524x *hw, int reg)
+{
+	int error = 0;
+	u16 cmd = CMD_READ(reg), in;
+	u8 status;
+	struct spi_message m;
+	struct spi_transfer t[3];
+
+	spi_message_init(&m);
+	memset(t, 0, sizeof(t));
+
+	t[0].tx_buf = &cmd;
+	t[0].len = 2;
+	t[0].bits_per_word = 12;
+	spi_message_add_tail(&t[0], &m);
+
+	t[1].rx_buf = &in;
+	t[1].len = 2;
+	t[1].bits_per_word = 16;
+	spi_message_add_tail(&t[1], &m);
+
+	t[2].rx_buf = &status;
+	t[2].len = 1;
+	t[2].bits_per_word = 4;
+	spi_message_add_tail(&t[2], &m);
+
+	error = spi_sync(hw->spi, &m);
+	if (error < 0)
+		return error;
+
+	dev_dbg(hw->dev, "read reg %d, data %x, status %x\n",
+		reg, in, status);
+
+	if (!(status & STAT_CLK) || (status & STAT_WRITE))
+		return -EIO;
+
+	if (status & STAT_INVALID)
+		return -EINVAL;
+
+	return in;
+}
+
+static int read_reg(struct tps6524x *hw, int reg)
+{
+	int ret;
+
+	mutex_lock(&hw->lock);
+	ret = __read_reg(hw, reg);
+	mutex_unlock(&hw->lock);
+
+	return ret;
+}
+
+static int __write_reg(struct tps6524x *hw, int reg, int val)
+{
+	int error = 0;
+	u16 cmd = CMD_WRITE(reg), out = val;
+	u8 status;
+	struct spi_message m;
+	struct spi_transfer t[3];
+
+	spi_message_init(&m);
+	memset(t, 0, sizeof(t));
+
+	t[0].tx_buf = &cmd;
+	t[0].len = 2;
+	t[0].bits_per_word = 12;
+	spi_message_add_tail(&t[0], &m);
+
+	t[1].tx_buf = &out;
+	t[1].len = 2;
+	t[1].bits_per_word = 16;
+	spi_message_add_tail(&t[1], &m);
+
+	t[2].rx_buf = &status;
+	t[2].len = 1;
+	t[2].bits_per_word = 4;
+	spi_message_add_tail(&t[2], &m);
+
+	error = spi_sync(hw->spi, &m);
+	if (error < 0)
+		return error;
+
+	dev_dbg(hw->dev, "wrote reg %d, data %x, status %x\n",
+		reg, out, status);
+
+	if (!(status & STAT_CLK) || !(status & STAT_WRITE))
+		return -EIO;
+
+	if (status & (STAT_INVALID | STAT_WP))
+		return -EINVAL;
+
+	return error;
+}
+
+static int __rmw_reg(struct tps6524x *hw, int reg, int mask, int val)
+{
+	int ret;
+
+	ret = __read_reg(hw, reg);
+	if (ret < 0)
+		return ret;
+
+	ret &= ~mask;
+	ret |= val;
+
+	ret = __write_reg(hw, reg, ret);
+
+	return (ret < 0) ? ret : 0;
+}
+
+static int rmw_protect(struct tps6524x *hw, int reg, int mask, int val)
+{
+	int ret;
+
+	mutex_lock(&hw->lock);
+
+	ret = __write_reg(hw, REG_WRITE_ENABLE, 1);
+	if (ret) {
+		dev_err(hw->dev, "failed to set write enable\n");
+		goto error;
+	}
+
+	ret = __rmw_reg(hw, reg, mask, val);
+	if (ret)
+		dev_err(hw->dev, "failed to rmw register %d\n", reg);
+
+	ret = __write_reg(hw, REG_WRITE_ENABLE, 0);
+	if (ret) {
+		dev_err(hw->dev, "failed to clear write enable\n");
+		goto error;
+	}
+
+error:
+	mutex_unlock(&hw->lock);
+
+	return ret;
+}
+
+static int read_field(struct tps6524x *hw, const struct field *field)
+{
+	int tmp;
+
+	tmp = read_reg(hw, field->reg);
+	if (tmp < 0)
+		return tmp;
+
+	return (tmp >> field->shift) & field->mask;
+}
+
+static int write_field(struct tps6524x *hw, const struct field *field,
+		       int val)
+{
+	if (val & ~field->mask)
+		return -EOVERFLOW;
+
+	return rmw_protect(hw, field->reg,
+				    field->mask << field->shift,
+				    val << field->shift);
+}
+
+static const int dcdc1_voltages[] = {
+	 800000,  825000,  850000,  875000,
+	 900000,  925000,  950000,  975000,
+	1000000, 1025000, 1050000, 1075000,
+	1100000, 1125000, 1150000, 1175000,
+	1200000, 1225000, 1250000, 1275000,
+	1300000, 1325000, 1350000, 1375000,
+	1400000, 1425000, 1450000, 1475000,
+	1500000, 1525000, 1550000, 1575000,
+};
+
+static const int dcdc2_voltages[] = {
+	1400000, 1450000, 1500000, 1550000,
+	1600000, 1650000, 1700000, 1750000,
+	1800000, 1850000, 1900000, 1950000,
+	2000000, 2050000, 2100000, 2150000,
+	2200000, 2250000, 2300000, 2350000,
+	2400000, 2450000, 2500000, 2550000,
+	2600000, 2650000, 2700000, 2750000,
+	2800000, 2850000, 2900000, 2950000,
+};
+
+static const int dcdc3_voltages[] = {
+	2400000, 2450000, 2500000, 2550000, 2600000,
+	2650000, 2700000, 2750000, 2800000, 2850000,
+	2900000, 2950000, 3000000, 3050000, 3100000,
+	3150000, 3200000, 3250000, 3300000, 3350000,
+	3400000, 3450000, 3500000, 3550000, 3600000,
+};
+
+static const int ldo1_voltages[] = {
+	4300000, 4350000, 4400000, 4450000,
+	4500000, 4550000, 4600000, 4650000,
+	4700000, 4750000, 4800000, 4850000,
+	4900000, 4950000, 5000000, 5050000,
+};
+
+static const int ldo2_voltages[] = {
+	1100000, 1150000, 1200000, 1250000,
+	1300000, 1700000, 1750000, 1800000,
+	1850000, 1900000, 3150000, 3200000,
+	3250000, 3300000, 3350000, 3400000,
+};
+
+static const int ldo_ilimsel[] = {
+	400000, 1500000
+};
+
+static const int usb_ilimsel[] = {
+	200000, 400000, 800000, 1000000
+};
+
+#define __MK_FIELD(_reg, _mask, _shift) \
+	{ .reg = (_reg), .mask = (_mask), .shift = (_shift), }
+
+static const struct supply_info supply_info[N_REGULATORS] = {
+	{
+		.name		= "DCDC1",
+		.flags		= FIXED_ILIMSEL,
+		.n_voltages	= ARRAY_SIZE(dcdc1_voltages),
+		.voltages	= dcdc1_voltages,
+		.fixed_ilimsel	= 2400000,
+		.enable		= __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+					     DCDCDCDC1_EN_SHIFT),
+		.voltage	= __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+					     DCDC_VDCDC1_SHIFT),
+	},
+	{
+		.name		= "DCDC2",
+		.flags		= FIXED_ILIMSEL,
+		.n_voltages	= ARRAY_SIZE(dcdc2_voltages),
+		.voltages	= dcdc2_voltages,
+		.fixed_ilimsel	= 1200000,
+		.enable		= __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+					     DCDCDCDC2_EN_SHIFT),
+		.voltage	= __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+					     DCDC_VDCDC2_SHIFT),
+	},
+	{
+		.name		= "DCDC3",
+		.flags		= FIXED_ILIMSEL,
+		.n_voltages	= ARRAY_SIZE(dcdc3_voltages),
+		.voltages	= dcdc3_voltages,
+		.fixed_ilimsel	= 1200000,
+		.enable		= __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+					DCDCDCDC3_EN_SHIFT),
+		.voltage	= __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+					     DCDC_VDCDC3_SHIFT),
+	},
+	{
+		.name		= "LDO1",
+		.n_voltages	= ARRAY_SIZE(ldo1_voltages),
+		.voltages	= ldo1_voltages,
+		.n_ilimsels	= ARRAY_SIZE(ldo_ilimsel),
+		.ilimsels	= ldo_ilimsel,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_LDO1_SHIFT),
+		.voltage	= __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK,
+					     LDO1_VSEL_SHIFT),
+		.ilimsel	= __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK,
+					     LDO1_ILIM_SHIFT),
+	},
+	{
+		.name		= "LDO2",
+		.n_voltages	= ARRAY_SIZE(ldo2_voltages),
+		.voltages	= ldo2_voltages,
+		.n_ilimsels	= ARRAY_SIZE(ldo_ilimsel),
+		.ilimsels	= ldo_ilimsel,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_LDO2_SHIFT),
+		.voltage	= __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK,
+					     LDO2_VSEL_SHIFT),
+		.ilimsel	= __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK,
+					     LDO2_ILIM_SHIFT),
+	},
+	{
+		.name		= "USB",
+		.flags		= FIXED_VOLTAGE,
+		.fixed_voltage	= 5000000,
+		.n_ilimsels	= ARRAY_SIZE(usb_ilimsel),
+		.ilimsels	= usb_ilimsel,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_USB_SHIFT),
+		.ilimsel	= __MK_FIELD(REG_USB, USB_ILIM_MASK,
+					     USB_ILIM_SHIFT),
+	},
+	{
+		.name		= "LCD",
+		.flags		= FIXED_VOLTAGE | FIXED_ILIMSEL,
+		.fixed_voltage	= 5000000,
+		.fixed_ilimsel	=  400000,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_LCD_SHIFT),
+	},
+};
+
+static int list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_VOLTAGE)
+		return selector ? -EINVAL : info->fixed_voltage;
+
+	return ((selector < info->n_voltages) ?
+		info->voltages[selector] : -EINVAL);
+}
+
+static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+		       unsigned *selector)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	unsigned i;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_VOLTAGE)
+		return -EINVAL;
+
+	for (i = 0; i < info->n_voltages; i++)
+		if (min_uV <= info->voltages[i] &&
+		    max_uV >= info->voltages[i])
+			break;
+
+	if (i >= info->n_voltages)
+		i = info->n_voltages - 1;
+
+	*selector = info->voltages[i];
+
+	return write_field(hw, &info->voltage, i);
+}
+
+static int get_voltage(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	int ret;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_VOLTAGE)
+		return info->fixed_voltage;
+
+	ret = read_field(hw, &info->voltage);
+	if (ret < 0)
+		return ret;
+	if (WARN_ON(ret >= info->n_voltages))
+		return -EIO;
+
+	return info->voltages[ret];
+}
+
+static int set_current_limit(struct regulator_dev *rdev, int min_uA,
+			     int max_uA)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	int i;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_ILIMSEL)
+		return -EINVAL;
+
+	for (i = 0; i < info->n_ilimsels; i++)
+		if (min_uA <= info->ilimsels[i] &&
+		    max_uA >= info->ilimsels[i])
+			break;
+
+	if (i >= info->n_ilimsels)
+		return -EINVAL;
+
+	return write_field(hw, &info->ilimsel, i);
+}
+
+static int get_current_limit(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	int ret;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_ILIMSEL)
+		return info->fixed_ilimsel;
+
+	ret = read_field(hw, &info->ilimsel);
+	if (ret < 0)
+		return ret;
+	if (WARN_ON(ret >= info->n_ilimsels))
+		return -EIO;
+
+	return info->ilimsels[ret];
+}
+
+static int enable_supply(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	return write_field(hw, &info->enable, 1);
+}
+
+static int disable_supply(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	return write_field(hw, &info->enable, 0);
+}
+
+static int is_supply_enabled(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	return read_field(hw, &info->enable);
+}
+
+static struct regulator_ops regulator_ops = {
+	.is_enabled		= is_supply_enabled,
+	.enable			= enable_supply,
+	.disable		= disable_supply,
+	.get_voltage		= get_voltage,
+	.set_voltage		= set_voltage,
+	.list_voltage		= list_voltage,
+	.set_current_limit	= set_current_limit,
+	.get_current_limit	= get_current_limit,
+};
+
+static int __devexit pmic_remove(struct spi_device *spi)
+{
+	struct tps6524x *hw = spi_get_drvdata(spi);
+	int i;
+
+	if (!hw)
+		return 0;
+	for (i = 0; i < N_REGULATORS; i++) {
+		if (hw->rdev[i])
+			regulator_unregister(hw->rdev[i]);
+		hw->rdev[i] = NULL;
+	}
+	spi_set_drvdata(spi, NULL);
+	kfree(hw);
+	return 0;
+}
+
+static int __devinit pmic_probe(struct spi_device *spi)
+{
+	struct tps6524x *hw;
+	struct device *dev = &spi->dev;
+	const struct supply_info *info = supply_info;
+	struct regulator_init_data *init_data;
+	int ret = 0, i;
+
+	init_data = dev->platform_data;
+	if (!init_data) {
+		dev_err(dev, "could not find regulator platform data\n");
+		return -EINVAL;
+	}
+
+	hw = kzalloc(sizeof(struct tps6524x), GFP_KERNEL);
+	if (!hw) {
+		dev_err(dev, "cannot allocate regulator private data\n");
+		return -ENOMEM;
+	}
+	spi_set_drvdata(spi, hw);
+
+	memset(hw, 0, sizeof(struct tps6524x));
+	hw->dev = dev;
+	hw->spi = spi_dev_get(spi);
+	mutex_init(&hw->lock);
+
+	for (i = 0; i < N_REGULATORS; i++, info++, init_data++) {
+		hw->desc[i].name	= info->name;
+		hw->desc[i].id		= i;
+		hw->desc[i].n_voltages	= info->n_voltages;
+		hw->desc[i].ops		= &regulator_ops;
+		hw->desc[i].type	= REGULATOR_VOLTAGE;
+		hw->desc[i].owner	= THIS_MODULE;
+
+		if (info->flags & FIXED_VOLTAGE)
+			hw->desc[i].n_voltages = 1;
+
+		hw->rdev[i] = regulator_register(&hw->desc[i], dev,
+						 init_data, hw);
+		if (IS_ERR(hw->rdev[i])) {
+			ret = PTR_ERR(hw->rdev[i]);
+			hw->rdev[i] = NULL;
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	pmic_remove(spi);
+	return ret;
+}
+
+static struct spi_driver pmic_driver = {
+	.probe		= pmic_probe,
+	.remove		= __devexit_p(pmic_remove),
+	.driver		= {
+		.name	= "tps6524x",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pmic_driver_init(void)
+{
+	return spi_register_driver(&pmic_driver);
+}
+module_init(pmic_driver_init);
+
+static void __exit pmic_driver_exit(void)
+{
+	spi_unregister_driver(&pmic_driver);
+}
+module_exit(pmic_driver_exit);
+
+MODULE_DESCRIPTION("TPS6524X PMIC Driver");
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:tps6524x");
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 6d20b04..bb04a75 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -85,7 +85,8 @@
 
 static int __tps6586x_ldo_set_voltage(struct device *parent,
 				      struct tps6586x_regulator *ri,
-				      int min_uV, int max_uV)
+				      int min_uV, int max_uV,
+				      unsigned *selector)
 {
 	int val, uV;
 	uint8_t mask;
@@ -100,6 +101,8 @@
 		/* use the first in-range value */
 		if (min_uV <= uV && uV <= max_uV) {
 
+			*selector = val;
+
 			val <<= ri->volt_shift;
 			mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
 
@@ -111,12 +114,13 @@
 }
 
 static int tps6586x_ldo_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
 	struct device *parent = to_tps6586x_dev(rdev);
 
-	return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+	return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
+					  selector);
 }
 
 static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
@@ -140,13 +144,14 @@
 }
 
 static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
 	struct device *parent = to_tps6586x_dev(rdev);
 	int ret;
 
-	ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+	ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
+					 selector);
 	if (ret)
 		return ret;
 
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index a57262a..bd332cf 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -329,7 +329,8 @@
 }
 
 static int
-twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+		       unsigned *selector)
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	int			vsel;
@@ -345,9 +346,11 @@
 		/* REVISIT for VAUX2, first match may not be best/lowest */
 
 		/* use the first in-range value */
-		if (min_uV <= uV && uV <= max_uV)
+		if (min_uV <= uV && uV <= max_uV) {
+			*selector = vsel;
 			return twlreg_write(info, TWL_MODULE_PM_RECEIVER,
 							VREG_VOLTAGE, vsel);
+		}
 	}
 
 	return -EDOM;
@@ -389,7 +392,8 @@
 }
 
 static int
-twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+		       unsigned *selector)
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	int			vsel;
@@ -402,6 +406,7 @@
 	 * mV = 1000mv + 100mv * (vsel - 1)
 	 */
 	vsel = (min_uV/1000 - 1000)/100 + 1;
+	*selector = vsel;
 	return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel);
 
 }
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index dbfaf59..06df898 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@
 		return REGULATOR_MODE_IDLE;
 	default:
 		BUG();
+		return -EINVAL;
 	}
 }
 
@@ -302,7 +303,7 @@
 }
 
 static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = dcdc->wm831x;
@@ -314,6 +315,8 @@
 	if (vsel < 0)
 		return vsel;
 
+	*selector = vsel;
+
 	/* If this value is already set then do a GPIO update if we can */
 	if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
 		return wm831x_buckv_set_dvs(rdev, 0);
@@ -375,14 +378,14 @@
 	return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel);
 }
 
-static int wm831x_buckv_get_voltage(struct regulator_dev *rdev)
+static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 
 	if (dcdc->dvs_gpio && dcdc->dvs_gpio_state)
-		return wm831x_buckv_list_voltage(rdev, dcdc->dvs_vsel);
+		return dcdc->dvs_vsel;
 	else
-		return wm831x_buckv_list_voltage(rdev, dcdc->on_vsel);
+		return dcdc->on_vsel;
 }
 
 /* Current limit options */
@@ -424,7 +427,7 @@
 
 static struct regulator_ops wm831x_buckv_ops = {
 	.set_voltage = wm831x_buckv_set_voltage,
-	.get_voltage = wm831x_buckv_get_voltage,
+	.get_voltage_sel = wm831x_buckv_get_voltage_sel,
 	.list_voltage = wm831x_buckv_list_voltage,
 	.set_suspend_voltage = wm831x_buckv_set_suspend_voltage,
 	.set_current_limit = wm831x_buckv_set_current_limit,
@@ -636,7 +639,7 @@
 }
 
 static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg,
-					int min_uV, int max_uV)
+					int min_uV, int max_uV, int *selector)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = dcdc->wm831x;
@@ -650,16 +653,20 @@
 	if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_buckp_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV,
+				    unsigned *selector)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
 
-	return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV,
+					    selector);
 }
 
 static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev,
@@ -667,11 +674,12 @@
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
+	unsigned selector;
 
-	return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_buckp_get_voltage(struct regulator_dev *rdev)
+static int wm831x_buckp_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = dcdc->wm831x;
@@ -682,12 +690,12 @@
 	if (val < 0)
 		return val;
 
-	return wm831x_buckp_list_voltage(rdev, val & WM831X_DC3_ON_VSEL_MASK);
+	return val & WM831X_DC3_ON_VSEL_MASK;
 }
 
 static struct regulator_ops wm831x_buckp_ops = {
 	.set_voltage = wm831x_buckp_set_voltage,
-	.get_voltage = wm831x_buckp_get_voltage,
+	.get_voltage_sel = wm831x_buckp_get_voltage_sel,
 	.list_voltage = wm831x_buckp_list_voltage,
 	.set_suspend_voltage = wm831x_buckp_set_suspend_voltage,
 
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 9edf8f6..c94fc5b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -113,7 +113,8 @@
 }
 
 static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg,
-					 int min_uV, int max_uV)
+					 int min_uV, int max_uV,
+					 unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -133,16 +134,20 @@
 	if (ret < min_uV || ret > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_gp_ldo_set_voltage(struct regulator_dev *rdev,
-				     int min_uV, int max_uV)
+				     int min_uV, int max_uV,
+				     unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_ON_CONTROL;
 
-	return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+					     selector);
 }
 
 static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -150,11 +155,12 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+	unsigned int selector;
 
-	return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_gp_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_gp_ldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -167,7 +173,7 @@
 
 	ret &= WM831X_LDO1_ON_VSEL_MASK;
 
-	return wm831x_gp_ldo_list_voltage(rdev, ret);
+	return ret;
 }
 
 static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
@@ -287,7 +293,7 @@
 
 static struct regulator_ops wm831x_gp_ldo_ops = {
 	.list_voltage = wm831x_gp_ldo_list_voltage,
-	.get_voltage = wm831x_gp_ldo_get_voltage,
+	.get_voltage_sel = wm831x_gp_ldo_get_voltage_sel,
 	.set_voltage = wm831x_gp_ldo_set_voltage,
 	.set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
 	.get_mode = wm831x_gp_ldo_get_mode,
@@ -413,7 +419,8 @@
 }
 
 static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg,
-					 int min_uV, int max_uV)
+				       int min_uV, int max_uV,
+				       unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -433,16 +440,19 @@
 	if (ret < min_uV || ret > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_aldo_set_voltage(struct regulator_dev *rdev,
-				     int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_ON_CONTROL;
 
-	return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+					   selector);
 }
 
 static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -450,11 +460,12 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+	unsigned int selector;
 
-	return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_aldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_aldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -467,7 +478,7 @@
 
 	ret &= WM831X_LDO7_ON_VSEL_MASK;
 
-	return wm831x_aldo_list_voltage(rdev, ret);
+	return ret;
 }
 
 static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
@@ -548,7 +559,7 @@
 
 static struct regulator_ops wm831x_aldo_ops = {
 	.list_voltage = wm831x_aldo_list_voltage,
-	.get_voltage = wm831x_aldo_get_voltage,
+	.get_voltage_sel = wm831x_aldo_get_voltage_sel,
 	.set_voltage = wm831x_aldo_set_voltage,
 	.set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
 	.get_mode = wm831x_aldo_get_mode,
@@ -666,7 +677,8 @@
 
 static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev,
 					    int reg,
-					    int min_uV, int max_uV)
+					    int min_uV, int max_uV,
+					    unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -680,16 +692,20 @@
 	if (ret < min_uV || ret > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_alive_ldo_set_voltage(struct regulator_dev *rdev,
-				     int min_uV, int max_uV)
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL;
 
-	return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+						selector);
 }
 
 static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -697,11 +713,12 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL;
+	unsigned selector;
 
-	return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_alive_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_alive_ldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -714,7 +731,7 @@
 
 	ret &= WM831X_LDO11_ON_VSEL_MASK;
 
-	return wm831x_alive_ldo_list_voltage(rdev, ret);
+	return ret;
 }
 
 static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
@@ -736,7 +753,7 @@
 
 static struct regulator_ops wm831x_alive_ldo_ops = {
 	.list_voltage = wm831x_alive_ldo_list_voltage,
-	.get_voltage = wm831x_alive_ldo_get_voltage,
+	.get_voltage_sel = wm831x_alive_ldo_get_voltage_sel,
 	.set_voltage = wm831x_alive_ldo_set_voltage,
 	.set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage,
 	.get_status = wm831x_alive_ldo_get_status,
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index fe4b8a8..1bcb22c 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -360,7 +360,7 @@
 EXPORT_SYMBOL_GPL(wm8350_isink_set_flash);
 
 static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV,
-	int max_uV)
+				   int max_uV, unsigned *selector)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, dcdc = rdev_get_id(rdev), mV,
@@ -397,17 +397,18 @@
 		return -EINVAL;
 	}
 
+	*selector = mV;
+
 	/* all DCDCs have same mV bits */
 	val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
 	wm8350_reg_write(wm8350, volt_reg, val | mV);
 	return 0;
 }
 
-static int wm8350_dcdc_get_voltage(struct regulator_dev *rdev)
+static int wm8350_dcdc_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, dcdc = rdev_get_id(rdev);
-	u16 val;
 
 	switch (dcdc) {
 	case WM8350_DCDC_1:
@@ -429,8 +430,7 @@
 	}
 
 	/* all DCDCs have same mV bits */
-	val = wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
-	return wm8350_dcdc_val_to_mvolts(val) * 1000;
+	return wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
 }
 
 static int wm8350_dcdc_list_voltage(struct regulator_dev *rdev,
@@ -754,7 +754,7 @@
 }
 
 static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV,
-	int max_uV)
+				  int max_uV, unsigned *selector)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, ldo = rdev_get_id(rdev), mV, min_mV = min_uV / 1000,
@@ -797,17 +797,18 @@
 		return -EINVAL;
 	}
 
+	*selector = mV;
+
 	/* all LDOs have same mV bits */
 	val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
 	wm8350_reg_write(wm8350, volt_reg, val | mV);
 	return 0;
 }
 
-static int wm8350_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm8350_ldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, ldo = rdev_get_id(rdev);
-	u16 val;
 
 	switch (ldo) {
 	case WM8350_LDO_1:
@@ -827,8 +828,7 @@
 	}
 
 	/* all LDOs have same mV bits */
-	val = wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
-	return wm8350_ldo_val_to_mvolts(val) * 1000;
+	return wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
 }
 
 static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
@@ -1225,7 +1225,7 @@
 
 static struct regulator_ops wm8350_dcdc_ops = {
 	.set_voltage = wm8350_dcdc_set_voltage,
-	.get_voltage = wm8350_dcdc_get_voltage,
+	.get_voltage_sel = wm8350_dcdc_get_voltage_sel,
 	.list_voltage = wm8350_dcdc_list_voltage,
 	.enable = wm8350_dcdc_enable,
 	.disable = wm8350_dcdc_disable,
@@ -1249,7 +1249,7 @@
 
 static struct regulator_ops wm8350_ldo_ops = {
 	.set_voltage = wm8350_ldo_set_voltage,
-	.get_voltage = wm8350_ldo_get_voltage,
+	.get_voltage_sel = wm8350_ldo_get_voltage_sel,
 	.list_voltage = wm8350_ldo_list_voltage,
 	.enable = wm8350_ldo_enable,
 	.disable = wm8350_ldo_disable,
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 924c7eb..b42d01c 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -67,7 +67,7 @@
 }
 
 static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
 	u16 val;
@@ -93,6 +93,8 @@
 		val += 0xf;
 	}
 
+	*selector = val;
+
 	return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev),
 			       WM8400_LDO1_VSEL_MASK, val);
 }
@@ -156,7 +158,7 @@
 }
 
 static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
 	u16 val;
@@ -171,6 +173,8 @@
 		return -EINVAL;
 	BUG_ON(850000 + (25000 * val) < min_uV);
 
+	*selector = val;
+
 	return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
 			       WM8400_DC1_VSEL_MASK, val);
 }
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 03713bc..35b2958 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -86,7 +86,7 @@
 	return (selector * 100000) + 2400000;
 }
 
-static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev)
+static int wm8994_ldo1_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int val;
@@ -95,13 +95,11 @@
 	if (val < 0)
 		return val;
 
-	val = (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
-
-	return wm8994_ldo1_list_voltage(rdev, val);
+	return (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
 }
 
 static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev,
-				   int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *s)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int selector, v;
@@ -111,6 +109,7 @@
 	if (v < 0 || v > max_uV)
 		return -EINVAL;
 
+	*s = selector;
 	selector <<= WM8994_LDO1_VSEL_SHIFT;
 
 	return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1,
@@ -124,20 +123,29 @@
 	.enable_time = wm8994_ldo_enable_time,
 
 	.list_voltage = wm8994_ldo1_list_voltage,
-	.get_voltage = wm8994_ldo1_get_voltage,
+	.get_voltage_sel = wm8994_ldo1_get_voltage_sel,
 	.set_voltage = wm8994_ldo1_set_voltage,
 };
 
 static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
 				    unsigned int selector)
 {
+	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
 	if (selector > WM8994_LDO2_MAX_SELECTOR)
 		return -EINVAL;
 
-	return (selector * 100000) + 900000;
+	switch (ldo->wm8994->type) {
+	case WM8994:
+		return (selector * 100000) + 900000;
+	case WM8958:
+		return (selector * 100000) + 1000000;
+	default:
+		return -EINVAL;
+	}
 }
 
-static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev)
+static int wm8994_ldo2_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int val;
@@ -146,22 +154,31 @@
 	if (val < 0)
 		return val;
 
-	val = (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
-
-	return wm8994_ldo2_list_voltage(rdev, val);
+	return (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
 }
 
 static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev,
-				   int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *s)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int selector, v;
 
-	selector = (min_uV - 900000) / 100000;
+	switch (ldo->wm8994->type) {
+	case WM8994:
+		selector = (min_uV - 900000) / 100000;
+		break;
+	case WM8958:
+		selector = (min_uV - 1000000) / 100000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	v = wm8994_ldo2_list_voltage(rdev, selector);
 	if (v < 0 || v > max_uV)
 		return -EINVAL;
 
+	*s = selector;
 	selector <<= WM8994_LDO2_VSEL_SHIFT;
 
 	return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2,
@@ -175,7 +192,7 @@
 	.enable_time = wm8994_ldo_enable_time,
 
 	.list_voltage = wm8994_ldo2_list_voltage,
-	.get_voltage = wm8994_ldo2_get_voltage,
+	.get_voltage_sel = wm8994_ldo2_get_voltage_sel,
 	.set_voltage = wm8994_ldo2_set_voltage,
 };
 
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e6539cb..c404b61 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -16,6 +16,7 @@
 #include <linux/kdev_t.h>
 #include <linux/idr.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 
 #include "rtc-core.h"
 
@@ -142,6 +143,7 @@
 	rtc->id = id;
 	rtc->ops = ops;
 	rtc->owner = owner;
+	rtc->irq_freq = 1;
 	rtc->max_user_freq = 64;
 	rtc->dev.parent = dev;
 	rtc->dev.class = rtc_class;
@@ -152,6 +154,18 @@
 	spin_lock_init(&rtc->irq_task_lock);
 	init_waitqueue_head(&rtc->irq_queue);
 
+	/* Init timerqueue */
+	timerqueue_init_head(&rtc->timerqueue);
+	INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
+	/* Init aie timer */
+	rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, (void *)rtc);
+	/* Init uie timer */
+	rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, (void *)rtc);
+	/* Init pie timer */
+	hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	rtc->pie_timer.function = rtc_pie_update_irq;
+	rtc->pie_enabled = 0;
+
 	strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
 	dev_set_name(&rtc->dev, "rtc%d", id);
 
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c8162..cb2f072 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -14,6 +14,24 @@
 #include <linux/rtc.h>
 #include <linux/sched.h>
 #include <linux/log2.h>
+#include <linux/workqueue.h>
+
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
+
+static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
+{
+	int err;
+	if (!rtc->ops)
+		err = -ENODEV;
+	else if (!rtc->ops->read_time)
+		err = -EINVAL;
+	else {
+		memset(tm, 0, sizeof(struct rtc_time));
+		err = rtc->ops->read_time(rtc->dev.parent, tm);
+	}
+	return err;
+}
 
 int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
 {
@@ -23,15 +41,7 @@
 	if (err)
 		return err;
 
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->read_time)
-		err = -EINVAL;
-	else {
-		memset(tm, 0, sizeof(struct rtc_time));
-		err = rtc->ops->read_time(rtc->dev.parent, tm);
-	}
-
+	err = __rtc_read_time(rtc, tm);
 	mutex_unlock(&rtc->ops_lock);
 	return err;
 }
@@ -106,189 +116,61 @@
 }
 EXPORT_SYMBOL_GPL(rtc_set_mmss);
 
-static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
 	int err;
 
 	err = mutex_lock_interruptible(&rtc->ops_lock);
 	if (err)
 		return err;
-
 	if (rtc->ops == NULL)
 		err = -ENODEV;
 	else if (!rtc->ops->read_alarm)
 		err = -EINVAL;
 	else {
 		memset(alarm, 0, sizeof(struct rtc_wkalrm));
-		err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
+		alarm->enabled = rtc->aie_timer.enabled;
+		alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
 	}
-
 	mutex_unlock(&rtc->ops_lock);
+
 	return err;
 }
-
-int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
-{
-	int err;
-	struct rtc_time before, now;
-	int first_time = 1;
-	unsigned long t_now, t_alm;
-	enum { none, day, month, year } missing = none;
-	unsigned days;
-
-	/* The lower level RTC driver may return -1 in some fields,
-	 * creating invalid alarm->time values, for reasons like:
-	 *
-	 *   - The hardware may not be capable of filling them in;
-	 *     many alarms match only on time-of-day fields, not
-	 *     day/month/year calendar data.
-	 *
-	 *   - Some hardware uses illegal values as "wildcard" match
-	 *     values, which non-Linux firmware (like a BIOS) may try
-	 *     to set up as e.g. "alarm 15 minutes after each hour".
-	 *     Linux uses only oneshot alarms.
-	 *
-	 * When we see that here, we deal with it by using values from
-	 * a current RTC timestamp for any missing (-1) values.  The
-	 * RTC driver prevents "periodic alarm" modes.
-	 *
-	 * But this can be racey, because some fields of the RTC timestamp
-	 * may have wrapped in the interval since we read the RTC alarm,
-	 * which would lead to us inserting inconsistent values in place
-	 * of the -1 fields.
-	 *
-	 * Reading the alarm and timestamp in the reverse sequence
-	 * would have the same race condition, and not solve the issue.
-	 *
-	 * So, we must first read the RTC timestamp,
-	 * then read the RTC alarm value,
-	 * and then read a second RTC timestamp.
-	 *
-	 * If any fields of the second timestamp have changed
-	 * when compared with the first timestamp, then we know
-	 * our timestamp may be inconsistent with that used by
-	 * the low-level rtc_read_alarm_internal() function.
-	 *
-	 * So, when the two timestamps disagree, we just loop and do
-	 * the process again to get a fully consistent set of values.
-	 *
-	 * This could all instead be done in the lower level driver,
-	 * but since more than one lower level RTC implementation needs it,
-	 * then it's probably best best to do it here instead of there..
-	 */
-
-	/* Get the "before" timestamp */
-	err = rtc_read_time(rtc, &before);
-	if (err < 0)
-		return err;
-	do {
-		if (!first_time)
-			memcpy(&before, &now, sizeof(struct rtc_time));
-		first_time = 0;
-
-		/* get the RTC alarm values, which may be incomplete */
-		err = rtc_read_alarm_internal(rtc, alarm);
-		if (err)
-			return err;
-		if (!alarm->enabled)
-			return 0;
-
-		/* full-function RTCs won't have such missing fields */
-		if (rtc_valid_tm(&alarm->time) == 0)
-			return 0;
-
-		/* get the "after" timestamp, to detect wrapped fields */
-		err = rtc_read_time(rtc, &now);
-		if (err < 0)
-			return err;
-
-		/* note that tm_sec is a "don't care" value here: */
-	} while (   before.tm_min   != now.tm_min
-		 || before.tm_hour  != now.tm_hour
-		 || before.tm_mon   != now.tm_mon
-		 || before.tm_year  != now.tm_year);
-
-	/* Fill in the missing alarm fields using the timestamp; we
-	 * know there's at least one since alarm->time is invalid.
-	 */
-	if (alarm->time.tm_sec == -1)
-		alarm->time.tm_sec = now.tm_sec;
-	if (alarm->time.tm_min == -1)
-		alarm->time.tm_min = now.tm_min;
-	if (alarm->time.tm_hour == -1)
-		alarm->time.tm_hour = now.tm_hour;
-
-	/* For simplicity, only support date rollover for now */
-	if (alarm->time.tm_mday == -1) {
-		alarm->time.tm_mday = now.tm_mday;
-		missing = day;
-	}
-	if (alarm->time.tm_mon == -1) {
-		alarm->time.tm_mon = now.tm_mon;
-		if (missing == none)
-			missing = month;
-	}
-	if (alarm->time.tm_year == -1) {
-		alarm->time.tm_year = now.tm_year;
-		if (missing == none)
-			missing = year;
-	}
-
-	/* with luck, no rollover is needed */
-	rtc_tm_to_time(&now, &t_now);
-	rtc_tm_to_time(&alarm->time, &t_alm);
-	if (t_now < t_alm)
-		goto done;
-
-	switch (missing) {
-
-	/* 24 hour rollover ... if it's now 10am Monday, an alarm that
-	 * that will trigger at 5am will do so at 5am Tuesday, which
-	 * could also be in the next month or year.  This is a common
-	 * case, especially for PCs.
-	 */
-	case day:
-		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
-		t_alm += 24 * 60 * 60;
-		rtc_time_to_tm(t_alm, &alarm->time);
-		break;
-
-	/* Month rollover ... if it's the 31th, an alarm on the 3rd will
-	 * be next month.  An alarm matching on the 30th, 29th, or 28th
-	 * may end up in the month after that!  Many newer PCs support
-	 * this type of alarm.
-	 */
-	case month:
-		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
-		do {
-			if (alarm->time.tm_mon < 11)
-				alarm->time.tm_mon++;
-			else {
-				alarm->time.tm_mon = 0;
-				alarm->time.tm_year++;
-			}
-			days = rtc_month_days(alarm->time.tm_mon,
-					alarm->time.tm_year);
-		} while (days < alarm->time.tm_mday);
-		break;
-
-	/* Year rollover ... easy except for leap years! */
-	case year:
-		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
-		do {
-			alarm->time.tm_year++;
-		} while (rtc_valid_tm(&alarm->time) != 0);
-		break;
-
-	default:
-		dev_warn(&rtc->dev, "alarm rollover not handled\n");
-	}
-
-done:
-	return 0;
-}
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
+int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+	struct rtc_time tm;
+	long now, scheduled;
+	int err;
+
+	err = rtc_valid_tm(&alarm->time);
+	if (err)
+		return err;
+	rtc_tm_to_time(&alarm->time, &scheduled);
+
+	/* Make sure we're not setting alarms in the past */
+	err = __rtc_read_time(rtc, &tm);
+	rtc_tm_to_time(&tm, &now);
+	if (scheduled <= now)
+		return -ETIME;
+	/*
+	 * XXX - We just checked to make sure the alarm time is not
+	 * in the past, but there is still a race window where if
+	 * the is alarm set for the next second and the second ticks
+	 * over right here, before we set the alarm.
+	 */
+
+	if (!rtc->ops)
+		err = -ENODEV;
+	else if (!rtc->ops->set_alarm)
+		err = -EINVAL;
+	else
+		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+	return err;
+}
+
 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
 	int err;
@@ -300,14 +182,14 @@
 	err = mutex_lock_interruptible(&rtc->ops_lock);
 	if (err)
 		return err;
-
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->set_alarm)
-		err = -EINVAL;
-	else
-		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
+	if (rtc->aie_timer.enabled) {
+		rtc_timer_remove(rtc, &rtc->aie_timer);
+	}
+	rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
+	rtc->aie_timer.period = ktime_set(0, 0);
+	if (alarm->enabled) {
+		err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
+	}
 	mutex_unlock(&rtc->ops_lock);
 	return err;
 }
@@ -319,7 +201,16 @@
 	if (err)
 		return err;
 
-	if (!rtc->ops)
+	if (rtc->aie_timer.enabled != enabled) {
+		if (enabled)
+			err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
+		else
+			rtc_timer_remove(rtc, &rtc->aie_timer);
+	}
+
+	if (err)
+		/* nothing */;
+	else if (!rtc->ops)
 		err = -ENODEV;
 	else if (!rtc->ops->alarm_irq_enable)
 		err = -EINVAL;
@@ -340,19 +231,28 @@
 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
 	if (enabled == 0 && rtc->uie_irq_active) {
 		mutex_unlock(&rtc->ops_lock);
-		return rtc_dev_update_irq_enable_emul(rtc, enabled);
+		return rtc_dev_update_irq_enable_emul(rtc, 0);
 	}
 #endif
+	/* make sure we're changing state */
+	if (rtc->uie_rtctimer.enabled == enabled)
+		goto out;
 
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->update_irq_enable)
-		err = -EINVAL;
-	else
-		err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled);
+	if (enabled) {
+		struct rtc_time tm;
+		ktime_t now, onesec;
 
+		__rtc_read_time(rtc, &tm);
+		onesec = ktime_set(1, 0);
+		now = rtc_tm_to_ktime(tm);
+		rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
+		rtc->uie_rtctimer.period = ktime_set(1, 0);
+		err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
+	} else
+		rtc_timer_remove(rtc, &rtc->uie_rtctimer);
+
+out:
 	mutex_unlock(&rtc->ops_lock);
-
 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
 	/*
 	 * Enable emulation if the driver did not provide
@@ -364,11 +264,91 @@
 		err = rtc_dev_update_irq_enable_emul(rtc, enabled);
 #endif
 	return err;
+
 }
 EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
 
+
 /**
- * rtc_update_irq - report RTC periodic, alarm, and/or update irqs
+ * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
+ * @rtc: pointer to the rtc device
+ *
+ * This function is called when an AIE, UIE or PIE mode interrupt
+ * has occured (or been emulated).
+ *
+ * Triggers the registered irq_task function callback.
+ */
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
+{
+	unsigned long flags;
+
+	/* mark one irq of the appropriate mode */
+	spin_lock_irqsave(&rtc->irq_lock, flags);
+	rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
+	spin_unlock_irqrestore(&rtc->irq_lock, flags);
+
+	/* call the task func */
+	spin_lock_irqsave(&rtc->irq_task_lock, flags);
+	if (rtc->irq_task)
+		rtc->irq_task->func(rtc->irq_task->private_data);
+	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+
+	wake_up_interruptible(&rtc->irq_queue);
+	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
+}
+
+
+/**
+ * rtc_aie_update_irq - AIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the aie_timer expires.
+ */
+void rtc_aie_update_irq(void *private)
+{
+	struct rtc_device *rtc = (struct rtc_device *)private;
+	rtc_handle_legacy_irq(rtc, 1, RTC_AF);
+}
+
+
+/**
+ * rtc_uie_update_irq - UIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the uie_timer expires.
+ */
+void rtc_uie_update_irq(void *private)
+{
+	struct rtc_device *rtc = (struct rtc_device *)private;
+	rtc_handle_legacy_irq(rtc, 1,  RTC_UF);
+}
+
+
+/**
+ * rtc_pie_update_irq - PIE mode hrtimer hook
+ * @timer: pointer to the pie mode hrtimer
+ *
+ * This function is used to emulate PIE mode interrupts
+ * using an hrtimer. This function is called when the periodic
+ * hrtimer expires.
+ */
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
+{
+	struct rtc_device *rtc;
+	ktime_t period;
+	int count;
+	rtc = container_of(timer, struct rtc_device, pie_timer);
+
+	period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+	count = hrtimer_forward_now(timer, period);
+
+	rtc_handle_legacy_irq(rtc, count, RTC_PF);
+
+	return HRTIMER_RESTART;
+}
+
+/**
+ * rtc_update_irq - Triggered when a RTC interrupt occurs.
  * @rtc: the rtc device
  * @num: how many irqs are being reported (usually one)
  * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
@@ -377,19 +357,7 @@
 void rtc_update_irq(struct rtc_device *rtc,
 		unsigned long num, unsigned long events)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&rtc->irq_lock, flags);
-	rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
-	spin_unlock_irqrestore(&rtc->irq_lock, flags);
-
-	spin_lock_irqsave(&rtc->irq_task_lock, flags);
-	if (rtc->irq_task)
-		rtc->irq_task->func(rtc->irq_task->private_data);
-	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
-	wake_up_interruptible(&rtc->irq_queue);
-	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
+	schedule_work(&rtc->irqwork);
 }
 EXPORT_SYMBOL_GPL(rtc_update_irq);
 
@@ -477,18 +445,20 @@
 	int err = 0;
 	unsigned long flags;
 
-	if (rtc->ops->irq_set_state == NULL)
-		return -ENXIO;
-
 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
 	if (rtc->irq_task != NULL && task == NULL)
 		err = -EBUSY;
 	if (rtc->irq_task != task)
 		err = -EACCES;
-	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
 
-	if (err == 0)
-		err = rtc->ops->irq_set_state(rtc->dev.parent, enabled);
+	if (enabled) {
+		ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+		hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
+	} else {
+		hrtimer_cancel(&rtc->pie_timer);
+	}
+	rtc->pie_enabled = enabled;
+	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
 
 	return err;
 }
@@ -509,21 +479,206 @@
 	int err = 0;
 	unsigned long flags;
 
-	if (rtc->ops->irq_set_freq == NULL)
-		return -ENXIO;
+	if (freq <= 0)
+		return -EINVAL;
 
 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
 	if (rtc->irq_task != NULL && task == NULL)
 		err = -EBUSY;
 	if (rtc->irq_task != task)
 		err = -EACCES;
-	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
 	if (err == 0) {
-		err = rtc->ops->irq_set_freq(rtc->dev.parent, freq);
-		if (err == 0)
-			rtc->irq_freq = freq;
+		rtc->irq_freq = freq;
+		if (rtc->pie_enabled) {
+			ktime_t period;
+			hrtimer_cancel(&rtc->pie_timer);
+			period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+			hrtimer_start(&rtc->pie_timer, period,
+					HRTIMER_MODE_REL);
+		}
 	}
+	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
 	return err;
 }
 EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
+
+/**
+ * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being added.
+ *
+ * Enqueues a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Sets the enabled bit on the added timer.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+	timer->enabled = 1;
+	timerqueue_add(&rtc->timerqueue, &timer->node);
+	if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+		struct rtc_wkalrm alarm;
+		int err;
+		alarm.time = rtc_ktime_to_tm(timer->node.expires);
+		alarm.enabled = 1;
+		err = __rtc_set_alarm(rtc, &alarm);
+		if (err == -ETIME)
+			schedule_work(&rtc->irqwork);
+		else if (err) {
+			timerqueue_del(&rtc->timerqueue, &timer->node);
+			timer->enabled = 0;
+			return err;
+		}
+	}
+	return 0;
+}
+
+/**
+ * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Removes a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Clears the enabled bit on the removed timer.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+	struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+	timerqueue_del(&rtc->timerqueue, &timer->node);
+	timer->enabled = 0;
+	if (next == &timer->node) {
+		struct rtc_wkalrm alarm;
+		int err;
+		next = timerqueue_getnext(&rtc->timerqueue);
+		if (!next)
+			return;
+		alarm.time = rtc_ktime_to_tm(next->expires);
+		alarm.enabled = 1;
+		err = __rtc_set_alarm(rtc, &alarm);
+		if (err == -ETIME)
+			schedule_work(&rtc->irqwork);
+	}
+}
+
+/**
+ * rtc_timer_do_work - Expires rtc timers
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Expires rtc timers. Reprograms next alarm event if needed.
+ * Called via worktask.
+ *
+ * Serializes access to timerqueue via ops_lock mutex
+ */
+void rtc_timer_do_work(struct work_struct *work)
+{
+	struct rtc_timer *timer;
+	struct timerqueue_node *next;
+	ktime_t now;
+	struct rtc_time tm;
+
+	struct rtc_device *rtc =
+		container_of(work, struct rtc_device, irqwork);
+
+	mutex_lock(&rtc->ops_lock);
+again:
+	__rtc_read_time(rtc, &tm);
+	now = rtc_tm_to_ktime(tm);
+	while ((next = timerqueue_getnext(&rtc->timerqueue))) {
+		if (next->expires.tv64 > now.tv64)
+			break;
+
+		/* expire timer */
+		timer = container_of(next, struct rtc_timer, node);
+		timerqueue_del(&rtc->timerqueue, &timer->node);
+		timer->enabled = 0;
+		if (timer->task.func)
+			timer->task.func(timer->task.private_data);
+
+		/* Re-add/fwd periodic timers */
+		if (ktime_to_ns(timer->period)) {
+			timer->node.expires = ktime_add(timer->node.expires,
+							timer->period);
+			timer->enabled = 1;
+			timerqueue_add(&rtc->timerqueue, &timer->node);
+		}
+	}
+
+	/* Set next alarm */
+	if (next) {
+		struct rtc_wkalrm alarm;
+		int err;
+		alarm.time = rtc_ktime_to_tm(next->expires);
+		alarm.enabled = 1;
+		err = __rtc_set_alarm(rtc, &alarm);
+		if (err == -ETIME)
+			goto again;
+	}
+
+	mutex_unlock(&rtc->ops_lock);
+}
+
+
+/* rtc_timer_init - Initializes an rtc_timer
+ * @timer: timer to be intiialized
+ * @f: function pointer to be called when timer fires
+ * @data: private data passed to function pointer
+ *
+ * Kernel interface to initializing an rtc_timer.
+ */
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data)
+{
+	timerqueue_init(&timer->node);
+	timer->enabled = 0;
+	timer->task.func = f;
+	timer->task.private_data = data;
+}
+
+/* rtc_timer_start - Sets an rtc_timer to fire in the future
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ * @ expires: time at which to expire the timer
+ * @ period: period that the timer will recur
+ *
+ * Kernel interface to set an rtc_timer
+ */
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+			ktime_t expires, ktime_t period)
+{
+	int ret = 0;
+	mutex_lock(&rtc->ops_lock);
+	if (timer->enabled)
+		rtc_timer_remove(rtc, timer);
+
+	timer->node.expires = expires;
+	timer->period = period;
+
+	ret = rtc_timer_enqueue(rtc, timer);
+
+	mutex_unlock(&rtc->ops_lock);
+	return ret;
+}
+
+/* rtc_timer_cancel - Stops an rtc_timer
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ *
+ * Kernel interface to cancel an rtc_timer
+ */
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
+{
+	int ret = 0;
+	mutex_lock(&rtc->ops_lock);
+	if (timer->enabled)
+		rtc_timer_remove(rtc, timer);
+	mutex_unlock(&rtc->ops_lock);
+	return ret;
+}
+
+
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index b2752b6..e725d51 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -134,36 +134,29 @@
 	return ret;
 }
 
-static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
-			unsigned long arg)
+static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
 	int ret = 0;
 
 	spin_lock_irq(&rtc->lock);
 
-	switch (cmd) {
-	case RTC_AIE_ON:
+	if(enabled) {
 		if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
 			ret = -EINVAL;
-			break;
+			goto out;
 		}
 		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
 				| RTC_BIT(CTRL_TOPEN));
 		rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
 		rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
-		break;
-	case RTC_AIE_OFF:
+	} else {
 		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
 				& ~RTC_BIT(CTRL_TOPEN));
 		rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
 		rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
-		break;
-	default:
-		ret = -ENOIOCTLCMD;
-		break;
 	}
-
+out:
 	spin_unlock_irq(&rtc->lock);
 
 	return ret;
@@ -195,11 +188,11 @@
 }
 
 static struct rtc_class_ops at32_rtc_ops = {
-	.ioctl		= at32_rtc_ioctl,
 	.read_time	= at32_rtc_readtime,
 	.set_time	= at32_rtc_settime,
 	.read_alarm	= at32_rtc_readalarm,
 	.set_alarm	= at32_rtc_setalarm,
+	.alarm_irq_enable = at32_rtc_alarm_irq_enable,
 };
 
 static int __init at32_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index bc8bbca..26d1cf5 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -195,13 +195,6 @@
 
 	/* important:  scrub old status before enabling IRQs */
 	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm off */
-		at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
-		break;
-	case RTC_AIE_ON:	/* alarm on */
-		at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
-		at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
-		break;
 	case RTC_UIE_OFF:	/* update off */
 		at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
 		break;
@@ -217,6 +210,18 @@
 	return ret;
 }
 
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	pr_debug("%s(): cmd=%08x\n", __func__, enabled);
+
+	if (enabled) {
+		at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+		at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+	} else
+		at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+
+	return 0;
+}
 /*
  * Provide additional RTC information in /proc/driver/rtc
  */
@@ -270,6 +275,7 @@
 	.read_alarm	= at91_rtc_readalarm,
 	.set_alarm	= at91_rtc_setalarm,
 	.proc		= at91_rtc_proc,
+	.alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
 /*
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f677e07..5469c52 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -229,12 +229,6 @@
 	dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
 
 	switch (cmd) {
-	case RTC_AIE_OFF:		/* alarm off */
-		rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
-		break;
-	case RTC_AIE_ON:		/* alarm on */
-		rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
-		break;
 	case RTC_UIE_OFF:		/* update off */
 		rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
 		break;
@@ -249,6 +243,19 @@
 	return ret;
 }
 
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct sam9_rtc *rtc = dev_get_drvdata(dev);
+	u32 mr = rtt_readl(rtc, MR);
+
+	dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr);
+	if (enabled)
+		rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
+	else
+		rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
+	return 0;
+}
+
 /*
  * Provide additional RTC information in /proc/driver/rtc
  */
@@ -302,6 +309,7 @@
 	.read_alarm	= at91_rtc_readalarm,
 	.set_alarm	= at91_rtc_setalarm,
 	.proc		= at91_rtc_proc,
+	.alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
 /*
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index b4b6087..17971d9 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -259,15 +259,6 @@
 		bfin_rtc_int_clear(~RTC_ISTAT_SEC);
 		break;
 
-	case RTC_AIE_ON:
-		dev_dbg_stamp(dev);
-		bfin_rtc_int_set_alarm(rtc);
-		break;
-	case RTC_AIE_OFF:
-		dev_dbg_stamp(dev);
-		bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
-		break;
-
 	default:
 		dev_dbg_stamp(dev);
 		ret = -ENOIOCTLCMD;
@@ -276,6 +267,17 @@
 	return ret;
 }
 
+static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct bfin_rtc *rtc = dev_get_drvdata(dev);
+
+	dev_dbg_stamp(dev);
+	if (enabled)
+		bfin_rtc_int_set_alarm(rtc);
+	else
+		bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+}
+
 static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct bfin_rtc *rtc = dev_get_drvdata(dev);
@@ -362,6 +364,7 @@
 	.read_alarm    = bfin_rtc_read_alarm,
 	.set_alarm     = bfin_rtc_set_alarm,
 	.proc          = bfin_rtc_proc,
+	.alarm_irq_enable = bfin_rtc_alarm_irq_enable,
 };
 
 static int __devinit bfin_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5856167..c7ff8df 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -36,6 +36,7 @@
 #include <linux/platform_device.h>
 #include <linux/mod_devicetable.h>
 #include <linux/log2.h>
+#include <linux/pm.h>
 
 /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
 #include <asm-generic/rtc.h>
@@ -687,7 +688,8 @@
 #if	defined(CONFIG_ATARI)
 	address_space = 64;
 #elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
-			|| defined(__sparc__) || defined(__mips__)
+			|| defined(__sparc__) || defined(__mips__) \
+			|| defined(__powerpc__)
 	address_space = 128;
 #else
 #warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
@@ -850,7 +852,7 @@
 
 #ifdef	CONFIG_PM
 
-static int cmos_suspend(struct device *dev, pm_message_t mesg)
+static int cmos_suspend(struct device *dev)
 {
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
 	unsigned char	tmp;
@@ -898,7 +900,7 @@
  */
 static inline int cmos_poweroff(struct device *dev)
 {
-	return cmos_suspend(dev, PMSG_HIBERNATE);
+	return cmos_suspend(dev);
 }
 
 static int cmos_resume(struct device *dev)
@@ -945,9 +947,9 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
+
 #else
-#define	cmos_suspend	NULL
-#define	cmos_resume	NULL
 
 static inline int cmos_poweroff(struct device *dev)
 {
@@ -1077,7 +1079,7 @@
 
 static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
 {
-	return cmos_suspend(&pnp->dev, mesg);
+	return cmos_suspend(&pnp->dev);
 }
 
 static int cmos_pnp_resume(struct pnp_dev *pnp)
@@ -1157,8 +1159,9 @@
 	.shutdown	= cmos_platform_shutdown,
 	.driver = {
 		.name		= (char *) driver_name,
-		.suspend	= cmos_suspend,
-		.resume		= cmos_resume,
+#ifdef CONFIG_PM
+		.pm		= &cmos_pm_ops,
+#endif
 	}
 };
 
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 0cc0984..d0e06ed 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -76,7 +76,7 @@
 	}
 	spin_unlock_irq(&rtc->irq_lock);
 	if (num)
-		rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
+		rtc_handle_legacy_irq(rtc, num, RTC_UF);
 }
 static void rtc_uie_timer(unsigned long data)
 {
@@ -104,7 +104,7 @@
 		}
 		if (rtc->uie_task_active) {
 			spin_unlock_irq(&rtc->irq_lock);
-			flush_work_sync(&rtc->uie_task);
+			flush_scheduled_work();
 			spin_lock_irq(&rtc->irq_lock);
 		}
 		rtc->uie_irq_active = 0;
@@ -253,19 +253,7 @@
 	if (err)
 		goto done;
 
-	/* try the driver's ioctl interface */
-	if (ops->ioctl) {
-		err = ops->ioctl(rtc->dev.parent, cmd, arg);
-		if (err != -ENOIOCTLCMD) {
-			mutex_unlock(&rtc->ops_lock);
-			return err;
-		}
-	}
-
-	/* if the driver does not provide the ioctl interface
-	 * or if that particular ioctl was not implemented
-	 * (-ENOIOCTLCMD), we will try to emulate here.
-	 *
+	/*
 	 * Drivers *SHOULD NOT* provide ioctl implementations
 	 * for these requests.  Instead, provide methods to
 	 * support the following code, so that the RTC's main
@@ -428,7 +416,12 @@
 		return err;
 
 	default:
-		err = -ENOTTY;
+		/* Finally try the driver's ioctl interface */
+		if (ops->ioctl) {
+			err = ops->ioctl(rtc->dev.parent, cmd, arg);
+			if (err == -ENOIOCTLCMD)
+				err = -ENOTTY;
+		}
 		break;
 	}
 
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index bf430f9..60ce696 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -40,6 +40,26 @@
 	__raw_writel(data, &priv->rtcregs[reg]);
 }
 
+
+static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct ds1286_priv *priv = dev_get_drvdata(dev);
+	unsigned long flags;
+	unsigned char val;
+
+	/* Allow or mask alarm interrupts */
+	spin_lock_irqsave(&priv->lock, flags);
+	val = ds1286_rtc_read(priv, RTC_CMD);
+	if (enabled)
+		val &=  ~RTC_TDM;
+	else
+		val |=  RTC_TDM;
+	ds1286_rtc_write(priv, val, RTC_CMD);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return 0;
+}
+
 #ifdef CONFIG_RTC_INTF_DEV
 
 static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
@@ -49,22 +69,6 @@
 	unsigned char val;
 
 	switch (cmd) {
-	case RTC_AIE_OFF:
-		/* Mask alarm int. enab. bit	*/
-		spin_lock_irqsave(&priv->lock, flags);
-		val = ds1286_rtc_read(priv, RTC_CMD);
-		val |=  RTC_TDM;
-		ds1286_rtc_write(priv, val, RTC_CMD);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		break;
-	case RTC_AIE_ON:
-		/* Allow alarm interrupts.	*/
-		spin_lock_irqsave(&priv->lock, flags);
-		val = ds1286_rtc_read(priv, RTC_CMD);
-		val &=  ~RTC_TDM;
-		ds1286_rtc_write(priv, val, RTC_CMD);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		break;
 	case RTC_WIE_OFF:
 		/* Mask watchdog int. enab. bit	*/
 		spin_lock_irqsave(&priv->lock, flags);
@@ -316,12 +320,13 @@
 }
 
 static const struct rtc_class_ops ds1286_ops = {
-	.ioctl   	= ds1286_ioctl,
-	.proc   	= ds1286_proc,
+	.ioctl		= ds1286_ioctl,
+	.proc		= ds1286_proc,
 	.read_time	= ds1286_read_time,
 	.set_time	= ds1286_set_time,
 	.read_alarm	= ds1286_read_alarm,
 	.set_alarm	= ds1286_set_alarm,
+	.alarm_irq_enable = ds1286_alarm_irq_enable,
 };
 
 static int __devinit ds1286_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 077af1d..57fbcc1 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -139,49 +139,32 @@
  * Interface to RTC framework
  */
 
-#ifdef CONFIG_RTC_INTF_DEV
-
-/*
- * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
- */
-static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
+static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct ds1305	*ds1305 = dev_get_drvdata(dev);
 	u8		buf[2];
-	int		status = -ENOIOCTLCMD;
+	long		err = -EINVAL;
 
 	buf[0] = DS1305_WRITE | DS1305_CONTROL;
 	buf[1] = ds1305->ctrl[0];
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		status = 0;
-		if (!(buf[1] & DS1305_AEI0))
-			goto done;
-		buf[1] &= ~DS1305_AEI0;
-		break;
-
-	case RTC_AIE_ON:
-		status = 0;
+	if (enabled) {
 		if (ds1305->ctrl[0] & DS1305_AEI0)
 			goto done;
 		buf[1] |= DS1305_AEI0;
-		break;
+	} else {
+		if (!(buf[1] & DS1305_AEI0))
+			goto done;
+		buf[1] &= ~DS1305_AEI0;
 	}
-	if (status == 0) {
-		status = spi_write_then_read(ds1305->spi, buf, sizeof buf,
-				NULL, 0);
-		if (status >= 0)
-			ds1305->ctrl[0] = buf[1];
-	}
-
+	err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0);
+	if (err >= 0)
+		ds1305->ctrl[0] = buf[1];
 done:
-	return status;
+	return err;
+
 }
 
-#else
-#define ds1305_ioctl	NULL
-#endif
 
 /*
  * Get/set of date and time is pretty normal.
@@ -460,12 +443,12 @@
 #endif
 
 static const struct rtc_class_ops ds1305_ops = {
-	.ioctl		= ds1305_ioctl,
 	.read_time	= ds1305_get_time,
 	.set_time	= ds1305_set_time,
 	.read_alarm	= ds1305_get_alarm,
 	.set_alarm	= ds1305_set_alarm,
 	.proc		= ds1305_proc,
+	.alarm_irq_enable = ds1305_alarm_irq_enable,
 };
 
 static void ds1305_work(struct work_struct *work)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 0d559b6..4724ba3 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -495,50 +495,27 @@
 	return 0;
 }
 
-static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct i2c_client	*client = to_i2c_client(dev);
 	struct ds1307		*ds1307 = i2c_get_clientdata(client);
 	int			ret;
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		if (!test_bit(HAS_ALARM, &ds1307->flags))
-			return -ENOTTY;
+	if (!test_bit(HAS_ALARM, &ds1307->flags))
+		return -ENOTTY;
 
-		ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
-		if (ret < 0)
-			return ret;
+	ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+	if (ret < 0)
+		return ret;
 
+	if (enabled)
+		ret |= DS1337_BIT_A1IE;
+	else
 		ret &= ~DS1337_BIT_A1IE;
 
-		ret = i2c_smbus_write_byte_data(client,
-						DS1337_REG_CONTROL, ret);
-		if (ret < 0)
-			return ret;
-
-		break;
-
-	case RTC_AIE_ON:
-		if (!test_bit(HAS_ALARM, &ds1307->flags))
-			return -ENOTTY;
-
-		ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
-		if (ret < 0)
-			return ret;
-
-		ret |= DS1337_BIT_A1IE;
-
-		ret = i2c_smbus_write_byte_data(client,
-						DS1337_REG_CONTROL, ret);
-		if (ret < 0)
-			return ret;
-
-		break;
-
-	default:
-		return -ENOIOCTLCMD;
-	}
+	ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret);
+	if (ret < 0)
+		return ret;
 
 	return 0;
 }
@@ -548,7 +525,7 @@
 	.set_time	= ds1307_set_time,
 	.read_alarm	= ds1337_read_alarm,
 	.set_alarm	= ds1337_set_alarm,
-	.ioctl		= ds1307_ioctl,
+	.alarm_irq_enable = ds1307_alarm_irq_enable,
 };
 
 /*----------------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 47fb635..d834a63 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -307,42 +307,25 @@
 	mutex_unlock(&ds1374->mutex);
 }
 
-static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct ds1374 *ds1374 = i2c_get_clientdata(client);
-	int ret = -ENOIOCTLCMD;
+	int ret;
 
 	mutex_lock(&ds1374->mutex);
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
-		if (ret < 0)
-			goto out;
+	ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
+	if (ret < 0)
+		goto out;
 
-		ret &= ~DS1374_REG_CR_WACE;
-
-		ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
-		if (ret < 0)
-			goto out;
-
-		break;
-
-	case RTC_AIE_ON:
-		ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
-		if (ret < 0)
-			goto out;
-
+	if (enabled) {
 		ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE;
 		ret &= ~DS1374_REG_CR_WDALM;
-
-		ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
-		if (ret < 0)
-			goto out;
-
-		break;
+	} else {
+		ret &= ~DS1374_REG_CR_WACE;
 	}
+	ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
 
 out:
 	mutex_unlock(&ds1374->mutex);
@@ -354,7 +337,7 @@
 	.set_time = ds1374_set_time,
 	.read_alarm = ds1374_read_alarm,
 	.set_alarm = ds1374_set_alarm,
-	.ioctl = ds1374_ioctl,
+	.alarm_irq_enable = ds1374_alarm_irq_enable,
 };
 
 static int ds1374_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee1..9507354 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
 /*
  * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
  *
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
  * Author: Jack Lan <jack.lan@freescale.com>
  *
  * This program is free software; you can redistribute  it and/or modify it
@@ -141,9 +141,11 @@
 		time->tm_hour = bcd2bin(hour);
 	}
 
-	time->tm_wday = bcd2bin(week);
+	/* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+	time->tm_wday = bcd2bin(week) - 1;
 	time->tm_mday = bcd2bin(day);
-	time->tm_mon = bcd2bin(month & 0x7F);
+	/* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+	time->tm_mon = bcd2bin(month & 0x7F) - 1;
 	if (century)
 		add_century = 100;
 
@@ -162,9 +164,11 @@
 	buf[0] = bin2bcd(time->tm_sec);
 	buf[1] = bin2bcd(time->tm_min);
 	buf[2] = bin2bcd(time->tm_hour);
-	buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+	/* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+	buf[3] = bin2bcd(time->tm_wday + 1);
 	buf[4] = bin2bcd(time->tm_mday); /* Date */
-	buf[5] = bin2bcd(time->tm_mon);
+	/* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+	buf[5] = bin2bcd(time->tm_mon + 1);
 	if (time->tm_year >= 100) {
 		buf[5] |= 0x80;
 		buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 773851f..075f170 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -117,4 +117,32 @@
 }
 EXPORT_SYMBOL(rtc_tm_to_time);
 
+/*
+ * Convert rtc_time to ktime
+ */
+ktime_t rtc_tm_to_ktime(struct rtc_time tm)
+{
+	time_t time;
+	rtc_tm_to_time(&tm, &time);
+	return ktime_set(time, 0);
+}
+EXPORT_SYMBOL_GPL(rtc_tm_to_ktime);
+
+/*
+ * Convert ktime to rtc_time
+ */
+struct rtc_time rtc_ktime_to_tm(ktime_t kt)
+{
+	struct timespec ts;
+	struct rtc_time ret;
+
+	ts = ktime_to_timespec(kt);
+	/* Round up any ns */
+	if (ts.tv_nsec)
+		ts.tv_sec++;
+	rtc_time_to_tm(ts.tv_sec, &ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
+
 MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 5a8daa3..69fe664 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -213,41 +213,27 @@
 	return m41t80_set_datetime(to_i2c_client(dev), tm);
 }
 
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-static int
-m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	int rc;
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
-
 	rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
 	if (rc < 0)
 		goto err;
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		rc &= ~M41T80_ALMON_AFE;
-		break;
-	case RTC_AIE_ON:
+
+	if (enabled)
 		rc |= M41T80_ALMON_AFE;
-		break;
-	}
+	else
+		rc &= ~M41T80_ALMON_AFE;
+
 	if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
 		goto err;
+
 	return 0;
 err:
 	return -EIO;
 }
-#else
-#define	m41t80_rtc_ioctl NULL
-#endif
 
 static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
@@ -374,7 +360,7 @@
 	.read_alarm = m41t80_rtc_read_alarm,
 	.set_alarm = m41t80_rtc_set_alarm,
 	.proc = m41t80_rtc_proc,
-	.ioctl = m41t80_rtc_ioctl,
+	.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index a99a0b5..3978f4c 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -263,30 +263,21 @@
 /*
  * Handle commands from user-space
  */
-static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
-			unsigned long arg)
+static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	unsigned long flags;
-	int ret = 0;
 
 	spin_lock_irqsave(&m48t59->lock, flags);
-	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm interrupt off */
-		M48T59_WRITE(0x00, M48T59_INTR);
-		break;
-	case RTC_AIE_ON:	/* alarm interrupt on */
+	if (enabled)
 		M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
-		break;
-	default:
-		ret = -ENOIOCTLCMD;
-		break;
-	}
+	else
+		M48T59_WRITE(0x00, M48T59_INTR);
 	spin_unlock_irqrestore(&m48t59->lock, flags);
 
-	return ret;
+	return 0;
 }
 
 static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
@@ -330,12 +321,12 @@
 }
 
 static const struct rtc_class_ops m48t59_rtc_ops = {
-	.ioctl		= m48t59_rtc_ioctl,
 	.read_time	= m48t59_rtc_read_time,
 	.set_time	= m48t59_rtc_set_time,
 	.read_alarm	= m48t59_rtc_readalarm,
 	.set_alarm	= m48t59_rtc_setalarm,
 	.proc		= m48t59_rtc_proc,
+	.alarm_irq_enable = m48t59_rtc_alarm_irq_enable,
 };
 
 static const struct rtc_class_ops m48t02_rtc_ops = {
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 657403e..0ec3f588 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -139,12 +139,13 @@
 	if (IS_ERR(rtc))
 		return PTR_ERR(rtc);
 
+	dev_set_drvdata(&spi->dev, rtc);
 	return 0;
 }
 
 static int __devexit max6902_remove(struct spi_device *spi)
 {
-	struct rtc_device *rtc = platform_get_drvdata(spi);
+	struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
 
 	rtc_device_unregister(rtc);
 	return 0;
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index f22dee3..3f7bc6b 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -20,6 +20,7 @@
 #include <linux/platform_device.h>
 #include <linux/mfd/max8998.h>
 #include <linux/mfd/max8998-private.h>
+#include <linux/delay.h>
 
 #define MAX8998_RTC_SEC			0x00
 #define MAX8998_RTC_MIN			0x01
@@ -73,6 +74,7 @@
 	struct i2c_client	*rtc;
 	struct rtc_device	*rtc_dev;
 	int irq;
+	bool lp3974_bug_workaround;
 };
 
 static void max8998_data_to_tm(u8 *data, struct rtc_time *tm)
@@ -124,10 +126,16 @@
 {
 	struct max8998_rtc_info *info = dev_get_drvdata(dev);
 	u8 data[8];
+	int ret;
 
 	max8998_tm_to_data(tm, data);
 
-	return max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data);
+	ret = max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data);
+
+	if (info->lp3974_bug_workaround)
+		msleep(2000);
+
+	return ret;
 }
 
 static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -163,12 +171,29 @@
 
 static int max8998_rtc_stop_alarm(struct max8998_rtc_info *info)
 {
-	return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0);
+	int ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0);
+
+	if (info->lp3974_bug_workaround)
+		msleep(2000);
+
+	return ret;
 }
 
 static int max8998_rtc_start_alarm(struct max8998_rtc_info *info)
 {
-	return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0x77);
+	int ret;
+	u8 alarm0_conf = 0x77;
+
+	/* LP3974 with delay bug chips has rtc alarm bugs with "MONTH" field */
+	if (info->lp3974_bug_workaround)
+		alarm0_conf = 0x57;
+
+	ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, alarm0_conf);
+
+	if (info->lp3974_bug_workaround)
+		msleep(2000);
+
+	return ret;
 }
 
 static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -187,10 +212,13 @@
 	if (ret < 0)
 		return ret;
 
-	if (alrm->enabled)
-		return max8998_rtc_start_alarm(info);
+	if (info->lp3974_bug_workaround)
+		msleep(2000);
 
-	return 0;
+	if (alrm->enabled)
+		ret = max8998_rtc_start_alarm(info);
+
+	return ret;
 }
 
 static int max8998_rtc_alarm_irq_enable(struct device *dev,
@@ -224,6 +252,7 @@
 static int __devinit max8998_rtc_probe(struct platform_device *pdev)
 {
 	struct max8998_dev *max8998 = dev_get_drvdata(pdev->dev.parent);
+	struct max8998_platform_data *pdata = dev_get_platdata(max8998->dev);
 	struct max8998_rtc_info *info;
 	int ret;
 
@@ -249,10 +278,18 @@
 
 	ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0,
 			"rtc-alarm0", info);
+
 	if (ret < 0)
 		dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
 			info->irq, ret);
 
+	dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name);
+	if (pdata->rtc_delay) {
+		info->lp3974_bug_workaround = true;
+		dev_warn(&pdev->dev, "LP3974 with RTC REGERR option."
+				" RTC updates will be extremely slow.\n");
+	}
+
 	return 0;
 
 out_rtc:
@@ -273,6 +310,12 @@
 	return 0;
 }
 
+static const struct platform_device_id max8998_rtc_id[] = {
+	{ "max8998-rtc", TYPE_MAX8998 },
+	{ "lp3974-rtc", TYPE_LP3974 },
+	{ }
+};
+
 static struct platform_driver max8998_rtc_driver = {
 	.driver		= {
 		.name	= "max8998-rtc",
@@ -280,6 +323,7 @@
 	},
 	.probe		= max8998_rtc_probe,
 	.remove		= __devexit_p(max8998_rtc_remove),
+	.id_table	= max8998_rtc_id,
 };
 
 static int __init max8998_rtc_init(void)
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index bcd0cf6..1db62db 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -255,42 +255,21 @@
 	return 0;
 }
 
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
 /* Currently, the vRTC doesn't support UIE ON/OFF */
-static int
-mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct mrst_rtc	*mrst = dev_get_drvdata(dev);
 	unsigned long	flags;
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		if (!mrst->irq)
-			return -EINVAL;
-		break;
-	default:
-		/* PIE ON/OFF is handled by mrst_irq_set_state() */
-		return -ENOIOCTLCMD;
-	}
-
 	spin_lock_irqsave(&rtc_lock, flags);
-	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm off */
-		mrst_irq_disable(mrst, RTC_AIE);
-		break;
-	case RTC_AIE_ON:	/* alarm on */
+	if (enabled)
 		mrst_irq_enable(mrst, RTC_AIE);
-		break;
-	}
+	else
+		mrst_irq_disable(mrst, RTC_AIE);
 	spin_unlock_irqrestore(&rtc_lock, flags);
 	return 0;
 }
 
-#else
-#define	mrst_rtc_ioctl	NULL
-#endif
 
 #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
 
@@ -317,13 +296,13 @@
 #endif
 
 static const struct rtc_class_ops mrst_rtc_ops = {
-	.ioctl		= mrst_rtc_ioctl,
 	.read_time	= mrst_read_time,
 	.set_time	= mrst_set_time,
 	.read_alarm	= mrst_read_alarm,
 	.set_alarm	= mrst_set_alarm,
 	.proc		= mrst_procfs,
 	.irq_set_state	= mrst_irq_set_state,
+	.alarm_irq_enable = mrst_rtc_alarm_irq_enable,
 };
 
 static struct mrst_rtc	mrst_rtc;
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index b2fff0c..6782062 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -82,7 +82,7 @@
 static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
 				unsigned int reg)
 {
-	return __raw_writel(val, &priv->regs[reg]);
+	__raw_writel(val, &priv->regs[reg]);
 }
 
 static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index bcca472..60627a7 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -169,25 +169,19 @@
 	return 0;
 }
 
-static int mv_rtc_ioctl(struct device *dev, unsigned int cmd,
-			unsigned long arg)
+static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
 	void __iomem *ioaddr = pdata->ioaddr;
 
 	if (pdata->irq < 0)
-		return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
-		break;
-	case RTC_AIE_ON:
+		return -EINVAL; /* fall back into rtc-dev's emulation */
+
+	if (enabled)
 		writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
+	else
+		writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
 	return 0;
 }
 
@@ -216,7 +210,7 @@
 	.set_time	= mv_rtc_set_time,
 	.read_alarm	= mv_rtc_read_alarm,
 	.set_alarm	= mv_rtc_set_alarm,
-	.ioctl		= mv_rtc_ioctl,
+	.alarm_irq_enable = mv_rtc_alarm_irq_enable,
 };
 
 static int __devinit mv_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 73377b0..b4dbf3a3 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -143,8 +143,6 @@
 	u8 reg;
 
 	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
 	case RTC_UIE_OFF:
 	case RTC_UIE_ON:
 		break;
@@ -156,13 +154,6 @@
 	rtc_wait_not_busy();
 	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
 	switch (cmd) {
-	/* AIE = Alarm Interrupt Enable */
-	case RTC_AIE_OFF:
-		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
-		break;
-	case RTC_AIE_ON:
-		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
-		break;
 	/* UIE = Update Interrupt Enable (1/second) */
 	case RTC_UIE_OFF:
 		reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
@@ -182,6 +173,24 @@
 #define	omap_rtc_ioctl	NULL
 #endif
 
+static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	u8 reg;
+
+	local_irq_disable();
+	rtc_wait_not_busy();
+	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+	if (enabled)
+		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+	else
+		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+	rtc_wait_not_busy();
+	rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+	local_irq_enable();
+
+	return 0;
+}
+
 /* this hardware doesn't support "don't care" alarm fields */
 static int tm2bcd(struct rtc_time *tm)
 {
@@ -309,6 +318,7 @@
 	.set_time	= omap_rtc_set_time,
 	.read_alarm	= omap_rtc_read_alarm,
 	.set_alarm	= omap_rtc_set_alarm,
+	.alarm_irq_enable = omap_rtc_alarm_irq_enable,
 };
 
 static int omap_rtc_alarm;
@@ -429,13 +439,14 @@
 fail0:
 	iounmap(rtc_base);
 fail:
-	release_resource(mem);
+	release_mem_region(mem->start, resource_size(mem));
 	return -EIO;
 }
 
 static int __exit omap_rtc_remove(struct platform_device *pdev)
 {
 	struct rtc_device	*rtc = platform_get_drvdata(pdev);
+	struct resource		*mem = dev_get_drvdata(&rtc->dev);
 
 	device_init_wakeup(&pdev->dev, 0);
 
@@ -447,8 +458,9 @@
 	if (omap_rtc_timer != omap_rtc_alarm)
 		free_irq(omap_rtc_alarm, rtc);
 
-	release_resource(dev_get_drvdata(&rtc->dev));
 	rtc_device_unregister(rtc);
+	iounmap(rtc_base);
+	release_mem_region(mem->start, resource_size(mem));
 	return 0;
 }
 
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index c086fc3..242bbf8 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -81,12 +81,16 @@
 
 static int rtc_proc_open(struct inode *inode, struct file *file)
 {
+	int ret;
 	struct rtc_device *rtc = PDE(inode)->data;
 
 	if (!try_module_get(THIS_MODULE))
 		return -ENODEV;
 
-	return single_open(file, rtc_proc_show, rtc);
+	ret = single_open(file, rtc_proc_show, rtc);
+	if (ret)
+		module_put(THIS_MODULE);
+	return ret;
 }
 
 static int rtc_proc_release(struct inode *inode, struct file *file)
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 36eb661..694da39 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -76,7 +76,7 @@
 static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val,
 				unsigned int reg)
 {
-	return __raw_writel(val, &priv->regs[reg]);
+	__raw_writel(val, &priv->regs[reg]);
 }
 
 static void rp5c01_lock(struct rp5c01_priv *priv)
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index dd14e20..6aaa155 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -299,14 +299,6 @@
 		if (rs5c->type == rtc_rs5c372a
 				&& (buf & RS5C372A_CTRL1_SL1))
 			return -ENOIOCTLCMD;
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		/* these irq management calls only make sense for chips
-		 * which are wired up to an IRQ.
-		 */
-		if (!rs5c->has_irq)
-			return -ENOIOCTLCMD;
-		break;
 	default:
 		return -ENOIOCTLCMD;
 	}
@@ -317,12 +309,6 @@
 
 	addr = RS5C_ADDR(RS5C_REG_CTRL1);
 	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm off */
-		buf &= ~RS5C_CTRL1_AALE;
-		break;
-	case RTC_AIE_ON:	/* alarm on */
-		buf |= RS5C_CTRL1_AALE;
-		break;
 	case RTC_UIE_OFF:	/* update off */
 		buf &= ~RS5C_CTRL1_CT_MASK;
 		break;
@@ -347,6 +333,39 @@
 #endif
 
 
+static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct i2c_client	*client = to_i2c_client(dev);
+	struct rs5c372		*rs5c = i2c_get_clientdata(client);
+	unsigned char		buf;
+	int			status, addr;
+
+	buf = rs5c->regs[RS5C_REG_CTRL1];
+
+	if (!rs5c->has_irq)
+		return -EINVAL;
+
+	status = rs5c_get_regs(rs5c);
+	if (status < 0)
+		return status;
+
+	addr = RS5C_ADDR(RS5C_REG_CTRL1);
+	if (enabled)
+		buf |= RS5C_CTRL1_AALE;
+	else
+		buf &= ~RS5C_CTRL1_AALE;
+
+	if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
+		printk(KERN_WARNING "%s: can't update alarm\n",
+			rs5c->rtc->name);
+		status = -EIO;
+	} else
+		rs5c->regs[RS5C_REG_CTRL1] = buf;
+
+	return status;
+}
+
+
 /* NOTE:  Since RTC_WKALM_{RD,SET} were originally defined for EFI,
  * which only exposes a polled programming interface; and since
  * these calls map directly to those EFI requests; we don't demand
@@ -466,6 +485,7 @@
 	.set_time	= rs5c372_rtc_set_time,
 	.read_alarm	= rs5c_read_alarm,
 	.set_alarm	= rs5c_set_alarm,
+	.alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index cf953ec..b80fa28 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -77,18 +77,20 @@
 }
 
 /* Update control registers */
-static void s3c_rtc_setaie(int to)
+static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
 {
 	unsigned int tmp;
 
-	pr_debug("%s: aie=%d\n", __func__, to);
+	pr_debug("%s: aie=%d\n", __func__, enabled);
 
 	tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
 
-	if (to)
+	if (enabled)
 		tmp |= S3C2410_RTCALM_ALMEN;
 
 	writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
+
+	return 0;
 }
 
 static int s3c_rtc_setpie(struct device *dev, int enabled)
@@ -308,7 +310,7 @@
 
 	writeb(alrm_en, base + S3C2410_RTCALM);
 
-	s3c_rtc_setaie(alrm->enabled);
+	s3c_rtc_setaie(dev, alrm->enabled);
 
 	return 0;
 }
@@ -440,7 +442,7 @@
 	rtc_device_unregister(rtc);
 
 	s3c_rtc_setpie(&dev->dev, 0);
-	s3c_rtc_setaie(0);
+	s3c_rtc_setaie(&dev->dev, 0);
 
 	clk_disable(rtc_clk);
 	clk_put(rtc_clk);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 88ea52b..5dfe5ff 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -314,16 +314,6 @@
 		unsigned long arg)
 {
 	switch (cmd) {
-	case RTC_AIE_OFF:
-		spin_lock_irq(&sa1100_rtc_lock);
-		RTSR &= ~RTSR_ALE;
-		spin_unlock_irq(&sa1100_rtc_lock);
-		return 0;
-	case RTC_AIE_ON:
-		spin_lock_irq(&sa1100_rtc_lock);
-		RTSR |= RTSR_ALE;
-		spin_unlock_irq(&sa1100_rtc_lock);
-		return 0;
 	case RTC_UIE_OFF:
 		spin_lock_irq(&sa1100_rtc_lock);
 		RTSR &= ~RTSR_HZE;
@@ -338,6 +328,17 @@
 	return -ENOIOCTLCMD;
 }
 
+static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	spin_lock_irq(&sa1100_rtc_lock);
+	if (enabled)
+		RTSR |= RTSR_ALE;
+	else
+		RTSR &= ~RTSR_ALE;
+	spin_unlock_irq(&sa1100_rtc_lock);
+	return 0;
+}
+
 static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	rtc_time_to_tm(RCNR, tm);
@@ -410,6 +411,7 @@
 	.proc = sa1100_rtc_proc,
 	.irq_set_freq = sa1100_irq_set_freq,
 	.irq_set_state = sa1100_irq_set_state,
+	.alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
 };
 
 static int sa1100_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 06e41ed..93314a9 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -350,10 +350,6 @@
 	unsigned int ret = 0;
 
 	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		sh_rtc_setaie(dev, cmd == RTC_AIE_ON);
-		break;
 	case RTC_UIE_OFF:
 		rtc->periodic_freq &= ~PF_OXS;
 		sh_rtc_setcie(dev, 0);
@@ -369,6 +365,12 @@
 	return ret;
 }
 
+static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	sh_rtc_setaie(dev, enabled);
+	return 0;
+}
+
 static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -604,6 +606,7 @@
 	.irq_set_state	= sh_rtc_irq_set_state,
 	.irq_set_freq	= sh_rtc_irq_set_freq,
 	.proc		= sh_rtc_proc,
+	.alarm_irq_enable = sh_rtc_alarm_irq_enable,
 };
 
 static int __init sh_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 51725f7..a82d6fe 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -50,24 +50,9 @@
 	return 0;
 }
 
-static int test_rtc_ioctl(struct device *dev, unsigned int cmd,
-	unsigned long arg)
+static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
 {
-	/* We do support interrupts, they're generated
-	 * using the sysfs interface.
-	 */
-	switch (cmd) {
-	case RTC_PIE_ON:
-	case RTC_PIE_OFF:
-	case RTC_UIE_ON:
-	case RTC_UIE_OFF:
-	case RTC_AIE_ON:
-	case RTC_AIE_OFF:
-		return 0;
-
-	default:
-		return -ENOIOCTLCMD;
-	}
+	return 0;
 }
 
 static const struct rtc_class_ops test_rtc_ops = {
@@ -76,7 +61,7 @@
 	.read_alarm = test_rtc_read_alarm,
 	.set_alarm = test_rtc_set_alarm,
 	.set_mmss = test_rtc_set_mmss,
-	.ioctl = test_rtc_ioctl,
+	.alarm_irq_enable = test_rtc_alarm_irq_enable,
 };
 
 static ssize_t test_irq_show(struct device *dev,
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c324424..769190a 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -240,26 +240,6 @@
 static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
 	switch (cmd) {
-	case RTC_AIE_ON:
-		spin_lock_irq(&rtc_lock);
-
-		if (!alarm_enabled) {
-			enable_irq(aie_irq);
-			alarm_enabled = 1;
-		}
-
-		spin_unlock_irq(&rtc_lock);
-		break;
-	case RTC_AIE_OFF:
-		spin_lock_irq(&rtc_lock);
-
-		if (alarm_enabled) {
-			disable_irq(aie_irq);
-			alarm_enabled = 0;
-		}
-
-		spin_unlock_irq(&rtc_lock);
-		break;
 	case RTC_EPOCH_READ:
 		return put_user(epoch, (unsigned long __user *)arg);
 	case RTC_EPOCH_SET:
@@ -275,6 +255,24 @@
 	return 0;
 }
 
+static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	spin_lock_irq(&rtc_lock);
+	if (enabled) {
+		if (!alarm_enabled) {
+			enable_irq(aie_irq);
+			alarm_enabled = 1;
+		}
+	} else {
+		if (alarm_enabled) {
+			disable_irq(aie_irq);
+			alarm_enabled = 0;
+		}
+	}
+	spin_unlock_irq(&rtc_lock);
+	return 0;
+}
+
 static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
 {
 	struct platform_device *pdev = (struct platform_device *)dev_id;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 4155805..2b771f1 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -319,6 +319,9 @@
 
 	private = (struct dasd_eckd_private *) device->private;
 	lcu = private->lcu;
+	/* nothing to do if already disconnected */
+	if (!lcu)
+		return;
 	device->discipline->get_uid(device, &uid);
 	spin_lock_irqsave(&lcu->lock, flags);
 	list_del_init(&device->alias_list);
@@ -680,6 +683,9 @@
 
 	private = (struct dasd_eckd_private *) device->private;
 	lcu = private->lcu;
+	/* nothing to do if already removed */
+	if (!lcu)
+		return 0;
 	spin_lock_irqsave(&lcu->lock, flags);
 	_remove_device_from_lcu(lcu, device);
 	spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 318672d..a9fe23d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -72,7 +72,7 @@
 static struct ccw_device_id dasd_eckd_ids[] = {
 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
-	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
+	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 30a1ca3..5505bc0 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -103,7 +103,7 @@
 	struct block_device *bdev;
 
 	bdev = bdget_disk(block->gdp, 0);
-	if (!bdev || blkdev_get(bdev, FMODE_READ) < 0)
+	if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
 		return -ENODEV;
 	/*
 	 * See fs/partition/check.c:register_disk,rescan_partitions
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c881a14..1f6a4d8 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -62,8 +62,8 @@
 /*
  * Parameter parsing functions.
  */
-static int __initdata devs = XPRAM_DEVS;
-static char __initdata *sizes[XPRAM_MAX_DEVS];
+static int devs = XPRAM_DEVS;
+static char *sizes[XPRAM_MAX_DEVS];
 
 module_param(devs, int, 0);
 module_param_array(sizes, charp, NULL, 0);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 8cd58e4..5ad44da 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -460,7 +460,8 @@
 	  unsigned int cmd, unsigned long arg)
 {
 	void __user *argp;
-	int ct, perm;
+	unsigned int ct;
+	int perm;
 
 	argp = (void __user *)arg;
 
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 7a242f0..267b54e 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -280,6 +280,14 @@
 	return rc;
 }
 
+static inline void
+tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
+{
+	request->callback = (void *) tape_free_request;
+	request->callback_data = NULL;
+	tape_do_io_async(device, request);
+}
+
 extern int tape_oper_handler(int irq, int status);
 extern void tape_noper_handler(int irq, int status);
 extern int tape_open(struct tape_device *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index c17f35b..c265111 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -53,23 +53,11 @@
  * Medium sense for 34xx tapes. There is no 'real' medium sense call.
  * So we just do a normal sense.
  */
-static int
-tape_34xx_medium_sense(struct tape_device *device)
+static void __tape_34xx_medium_sense(struct tape_request *request)
 {
-	struct tape_request *request;
-	unsigned char       *sense;
-	int                  rc;
+	struct tape_device *device = request->device;
+	unsigned char *sense;
 
-	request = tape_alloc_request(1, 32);
-	if (IS_ERR(request)) {
-		DBF_EXCEPTION(6, "MSEN fail\n");
-		return PTR_ERR(request);
-	}
-
-	request->op = TO_MSEN;
-	tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
-
-	rc = tape_do_io_interruptible(device, request);
 	if (request->rc == 0) {
 		sense = request->cpdata;
 
@@ -88,15 +76,47 @@
 			device->tape_generic_status |= GMT_WR_PROT(~0);
 		else
 			device->tape_generic_status &= ~GMT_WR_PROT(~0);
-	} else {
+	} else
 		DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
 			request->rc);
-	}
 	tape_free_request(request);
+}
 
+static int tape_34xx_medium_sense(struct tape_device *device)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(1, 32);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "MSEN fail\n");
+		return PTR_ERR(request);
+	}
+
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+	rc = tape_do_io_interruptible(device, request);
+	__tape_34xx_medium_sense(request);
 	return rc;
 }
 
+static void tape_34xx_medium_sense_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 32);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "MSEN fail\n");
+		return;
+	}
+
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+	request->callback = (void *) __tape_34xx_medium_sense;
+	request->callback_data = NULL;
+	tape_do_io_async(device, request);
+}
+
 struct tape_34xx_work {
 	struct tape_device	*device;
 	enum tape_op		 op;
@@ -109,6 +129,9 @@
  * is inserted but cannot call tape_do_io* from an interrupt context.
  * Maybe that's useful for other actions we want to start from the
  * interrupt handler.
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
  */
 static void
 tape_34xx_work_handler(struct work_struct *work)
@@ -119,7 +142,7 @@
 
 	switch(p->op) {
 		case TO_MSEN:
-			tape_34xx_medium_sense(device);
+			tape_34xx_medium_sense_async(device);
 			break;
 		default:
 			DBF_EVENT(3, "T34XX: internal error: unknown work\n");
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index fbe361f..de2e99e 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -329,17 +329,17 @@
 /*
  * Enable encryption
  */
-static int tape_3592_enable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
 {
 	struct tape_request *request;
 	char *data;
 
 	DBF_EVENT(6, "tape_3592_enable_crypt\n");
 	if (!crypt_supported(device))
-		return -ENOSYS;
+		return ERR_PTR(-ENOSYS);
 	request = tape_alloc_request(2, 72);
 	if (IS_ERR(request))
-		return PTR_ERR(request);
+		return request;
 	data = request->cpdata;
 	memset(data,0,72);
 
@@ -354,23 +354,42 @@
 	request->op = TO_CRYPT_ON;
 	tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
 	tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+	return request;
+}
+
+static int tape_3592_enable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_enable_crypt(device);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
 	return tape_do_io_free(device, request);
 }
 
+static void tape_3592_enable_crypt_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_enable_crypt(device);
+	if (!IS_ERR(request))
+		tape_do_io_async_free(device, request);
+}
+
 /*
  * Disable encryption
  */
-static int tape_3592_disable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
 {
 	struct tape_request *request;
 	char *data;
 
 	DBF_EVENT(6, "tape_3592_disable_crypt\n");
 	if (!crypt_supported(device))
-		return -ENOSYS;
+		return ERR_PTR(-ENOSYS);
 	request = tape_alloc_request(2, 72);
 	if (IS_ERR(request))
-		return PTR_ERR(request);
+		return request;
 	data = request->cpdata;
 	memset(data,0,72);
 
@@ -383,9 +402,28 @@
 	tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
 	tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
 
+	return request;
+}
+
+static int tape_3592_disable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_disable_crypt(device);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
 	return tape_do_io_free(device, request);
 }
 
+static void tape_3592_disable_crypt_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_disable_crypt(device);
+	if (!IS_ERR(request))
+		tape_do_io_async_free(device, request);
+}
+
 /*
  * IOCTL: Set encryption status
  */
@@ -457,8 +495,7 @@
 /*
  * SENSE Medium: Get Sense data about medium state
  */
-static int
-tape_3590_sense_medium(struct tape_device *device)
+static int tape_3590_sense_medium(struct tape_device *device)
 {
 	struct tape_request *request;
 
@@ -470,6 +507,18 @@
 	return tape_do_io_free(device, request);
 }
 
+static void tape_3590_sense_medium_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 128);
+	if (IS_ERR(request))
+		return;
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+	tape_do_io_async_free(device, request);
+}
+
 /*
  * MTTELL: Tell block. Return the number of block relative to current file.
  */
@@ -546,15 +595,14 @@
  * 2. The attention msg is written to the "read subsystem data" buffer.
  * In this case we probably should print it to the console.
  */
-static int
-tape_3590_read_attmsg(struct tape_device *device)
+static void tape_3590_read_attmsg_async(struct tape_device *device)
 {
 	struct tape_request *request;
 	char *buf;
 
 	request = tape_alloc_request(3, 4096);
 	if (IS_ERR(request))
-		return PTR_ERR(request);
+		return;
 	request->op = TO_READ_ATTMSG;
 	buf = request->cpdata;
 	buf[0] = PREP_RD_SS_DATA;
@@ -562,12 +610,15 @@
 	tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
 	tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
 	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
-	return tape_do_io_free(device, request);
+	tape_do_io_async_free(device, request);
 }
 
 /*
  * These functions are used to schedule follow-up actions from within an
  * interrupt context (like unsolicited interrupts).
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
  */
 struct work_handler_data {
 	struct tape_device *device;
@@ -583,16 +634,16 @@
 
 	switch (p->op) {
 	case TO_MSEN:
-		tape_3590_sense_medium(p->device);
+		tape_3590_sense_medium_async(p->device);
 		break;
 	case TO_READ_ATTMSG:
-		tape_3590_read_attmsg(p->device);
+		tape_3590_read_attmsg_async(p->device);
 		break;
 	case TO_CRYPT_ON:
-		tape_3592_enable_crypt(p->device);
+		tape_3592_enable_crypt_async(p->device);
 		break;
 	case TO_CRYPT_OFF:
-		tape_3592_disable_crypt(p->device);
+		tape_3592_disable_crypt_async(p->device);
 		break;
 	default:
 		DBF_EVENT(3, "T3590: work handler undefined for "
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e8391b89..b7eaff9 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1835,6 +1835,7 @@
 	 * available again. Kick re-detection.
 	 */
 	cdev->private->flags.resuming = 1;
+	cdev->private->path_new_mask = LPM_ANYPATH;
 	css_schedule_eval(sch->schid);
 	spin_unlock_irq(sch->lock);
 	css_complete_work();
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e9fff2b..5640c89 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -476,7 +476,7 @@
 static int get_inbound_buffer_frontier(struct qdio_q *q)
 {
 	int count, stop;
-	unsigned char state;
+	unsigned char state = 0;
 
 	/*
 	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -643,7 +643,7 @@
 static int get_outbound_buffer_frontier(struct qdio_q *q)
 {
 	int count, stop;
-	unsigned char state;
+	unsigned char state = 0;
 
 	if (need_siga_sync(q))
 		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 09e7a05..30b2a82 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -841,7 +841,7 @@
 }
 
 /**
- * Emit buffer of a lan comand.
+ * Emit buffer of a lan command.
  */
 static void
 lcs_lancmd_timeout(unsigned long data)
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 65ebee0..b6a6356 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -565,7 +565,7 @@
 	struct iucv_event ev;
 	int rc;
 
-	if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
+	if (memcmp(iucvMagic, ipuser, 16))
 		/* ipuser must match iucvMagic. */
 		return -EINVAL;
 	rc = -EINVAL;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 29f848b..019ae58 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -988,16 +988,30 @@
 	chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
 	if (chp_dsc != NULL) {
 		/* CHPP field bit 6 == 1 -> single queue */
-		if ((chp_dsc->chpp & 0x02) == 0x02)
+		if ((chp_dsc->chpp & 0x02) == 0x02) {
+			if ((atomic_read(&card->qdio.state) !=
+				QETH_QDIO_UNINITIALIZED) &&
+			    (card->qdio.no_out_queues == 4))
+				/* change from 4 to 1 outbound queues */
+				qeth_free_qdio_buffers(card);
 			card->qdio.no_out_queues = 1;
+			if (card->qdio.default_out_queue != 0)
+				dev_info(&card->gdev->dev,
+					"Priority Queueing not supported\n");
+			card->qdio.default_out_queue = 0;
+		} else {
+			if ((atomic_read(&card->qdio.state) !=
+				QETH_QDIO_UNINITIALIZED) &&
+			    (card->qdio.no_out_queues == 1)) {
+				/* change from 1 to 4 outbound queues */
+				qeth_free_qdio_buffers(card);
+				card->qdio.default_out_queue = 2;
+			}
+			card->qdio.no_out_queues = 4;
+		}
 		card->info.func_level = 0x4100 + chp_dsc->desc;
 		kfree(chp_dsc);
 	}
-	if (card->qdio.no_out_queues == 1) {
-		card->qdio.default_out_queue = 0;
-		dev_info(&card->gdev->dev,
-			"Priority Queueing not supported\n");
-	}
 	QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
 	QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
 	return;
@@ -1832,33 +1846,6 @@
 	}
 }
 
-static inline int qeth_get_max_mtu_for_card(int cardtype)
-{
-	switch (cardtype) {
-
-	case QETH_CARD_TYPE_UNKNOWN:
-	case QETH_CARD_TYPE_OSD:
-	case QETH_CARD_TYPE_OSN:
-	case QETH_CARD_TYPE_OSM:
-	case QETH_CARD_TYPE_OSX:
-		return 61440;
-	case QETH_CARD_TYPE_IQD:
-		return 57344;
-	default:
-		return 1500;
-	}
-}
-
-static inline int qeth_get_mtu_out_of_mpc(int cardtype)
-{
-	switch (cardtype) {
-	case QETH_CARD_TYPE_IQD:
-		return 1;
-	default:
-		return 0;
-	}
-}
-
 static inline int qeth_get_mtu_outof_framesize(int framesize)
 {
 	switch (framesize) {
@@ -1881,10 +1868,9 @@
 	case QETH_CARD_TYPE_OSD:
 	case QETH_CARD_TYPE_OSM:
 	case QETH_CARD_TYPE_OSX:
-		return ((mtu >= 576) && (mtu <= 61440));
 	case QETH_CARD_TYPE_IQD:
 		return ((mtu >= 576) &&
-			(mtu <= card->info.max_mtu + 4096 - 32));
+			(mtu <= card->info.max_mtu));
 	case QETH_CARD_TYPE_OSN:
 	case QETH_CARD_TYPE_UNKNOWN:
 	default:
@@ -1907,7 +1893,7 @@
 	memcpy(&card->token.ulp_filter_r,
 	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
 	       QETH_MPC_TOKEN_LENGTH);
-	if (qeth_get_mtu_out_of_mpc(card->info.type)) {
+	if (card->info.type == QETH_CARD_TYPE_IQD) {
 		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
 		mtu = qeth_get_mtu_outof_framesize(framesize);
 		if (!mtu) {
@@ -1915,12 +1901,21 @@
 			QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
 			return 0;
 		}
-		card->info.max_mtu = mtu;
+		if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
+			/* frame size has changed */
+			if (card->dev &&
+			    ((card->dev->mtu == card->info.initial_mtu) ||
+			     (card->dev->mtu > mtu)))
+				card->dev->mtu = mtu;
+			qeth_free_qdio_buffers(card);
+		}
 		card->info.initial_mtu = mtu;
+		card->info.max_mtu = mtu;
 		card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
 	} else {
 		card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
-		card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
+		card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
+			iob->data);
 		card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
 	}
 
@@ -3775,6 +3770,47 @@
 	}
 }
 
+static void qeth_determine_capabilities(struct qeth_card *card)
+{
+	int rc;
+	int length;
+	char *prcd;
+	struct ccw_device *ddev;
+	int ddev_offline = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "detcapab");
+	ddev = CARD_DDEV(card);
+	if (!ddev->online) {
+		ddev_offline = 1;
+		rc = ccw_device_set_online(ddev);
+		if (rc) {
+			QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+			goto out;
+		}
+	}
+
+	rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+			dev_name(&card->gdev->dev), rc);
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out_offline;
+	}
+	qeth_configure_unitaddr(card, prcd);
+	qeth_configure_blkt_default(card, prcd);
+	kfree(prcd);
+
+	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+
+out_offline:
+	if (ddev_offline == 1)
+		ccw_device_set_offline(ddev);
+out:
+	return;
+}
+
 static int qeth_qdio_establish(struct qeth_card *card)
 {
 	struct qdio_initialize init_data;
@@ -3905,6 +3941,7 @@
 
 	QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
 	atomic_set(&card->force_alloc_skb, 0);
+	qeth_get_channel_path_desc(card);
 retry:
 	if (retries)
 		QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@@ -3933,6 +3970,7 @@
 		else
 			goto retry;
 	}
+	qeth_determine_capabilities(card);
 	qeth_init_tokens(card);
 	qeth_init_func_level(card);
 	rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -4202,41 +4240,6 @@
 	card->discipline.ccwgdriver = NULL;
 }
 
-static void qeth_determine_capabilities(struct qeth_card *card)
-{
-	int rc;
-	int length;
-	char *prcd;
-
-	QETH_DBF_TEXT(SETUP, 2, "detcapab");
-	rc = ccw_device_set_online(CARD_DDEV(card));
-	if (rc) {
-		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
-		goto out;
-	}
-
-
-	rc = qeth_read_conf_data(card, (void **) &prcd, &length);
-	if (rc) {
-		QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
-			dev_name(&card->gdev->dev), rc);
-		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
-		goto out_offline;
-	}
-	qeth_configure_unitaddr(card, prcd);
-	qeth_configure_blkt_default(card, prcd);
-	kfree(prcd);
-
-	rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
-	if (rc)
-		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
-
-out_offline:
-	ccw_device_set_offline(CARD_DDEV(card));
-out:
-	return;
-}
-
 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7a7a1b6..ada0fe7 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -573,13 +573,13 @@
 		case IPA_RC_L2_DUP_LAYER3_MAC:
 			dev_warn(&card->gdev->dev,
 				"MAC address %pM already exists\n",
-				card->dev->dev_addr);
+				cmd->data.setdelmac.mac);
 			break;
 		case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
 		case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
 			dev_warn(&card->gdev->dev,
 				"MAC address %pM is not authorized\n",
-				card->dev->dev_addr);
+				cmd->data.setdelmac.mac);
 			break;
 		default:
 			break;
@@ -831,12 +831,14 @@
 	return NETDEV_TX_OK;
 }
 
-static int qeth_l2_open(struct net_device *dev)
+static int __qeth_l2_open(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
 	int rc = 0;
 
 	QETH_CARD_TEXT(card, 4, "qethopen");
+	if (card->state == CARD_STATE_UP)
+		return rc;
 	if (card->state != CARD_STATE_SOFTSETUP)
 		return -ENODEV;
 
@@ -857,6 +859,18 @@
 	return rc;
 }
 
+static int qeth_l2_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "qethope_");
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "openREC");
+		return -ERESTARTSYS;
+	}
+	return __qeth_l2_open(dev);
+}
+
 static int qeth_l2_stop(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
@@ -1046,7 +1060,7 @@
 	if (recover_flag == CARD_STATE_RECOVER) {
 		if (recovery_mode &&
 		    card->info.type != QETH_CARD_TYPE_OSN) {
-			qeth_l2_open(card->dev);
+			__qeth_l2_open(card->dev);
 		} else {
 			rtnl_lock();
 			dev_open(card->dev);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e227e46..d09b0c4 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2998,7 +2998,9 @@
 	 */
 	if (iph->protocol == IPPROTO_UDP)
 		hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
-	hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+	hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
+		QETH_HDR_EXT_CSUM_HDR_REQ;
+	iph->check = 0;
 	if (card->options.performance_stats)
 		card->perf_stats.tx_csum++;
 }
@@ -3240,12 +3242,14 @@
 	return NETDEV_TX_OK;
 }
 
-static int qeth_l3_open(struct net_device *dev)
+static int __qeth_l3_open(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
 	int rc = 0;
 
 	QETH_CARD_TEXT(card, 4, "qethopen");
+	if (card->state == CARD_STATE_UP)
+		return rc;
 	if (card->state != CARD_STATE_SOFTSETUP)
 		return -ENODEV;
 	card->data.state = CH_STATE_UP;
@@ -3260,6 +3264,18 @@
 	return rc;
 }
 
+static int qeth_l3_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "qethope_");
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "openREC");
+		return -ERESTARTSYS;
+	}
+	return __qeth_l3_open(dev);
+}
+
 static int qeth_l3_stop(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
@@ -3564,7 +3580,7 @@
 		netif_carrier_off(card->dev);
 	if (recover_flag == CARD_STATE_RECOVER) {
 		if (recovery_mode)
-			qeth_l3_open(card->dev);
+			__qeth_l3_open(card->dev);
 		else {
 			rtnl_lock();
 			dev_open(card->dev);
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 65e1cf1..207b7d7 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -60,7 +60,7 @@
 static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
 			     u8 ipuser[16])
 {
-	if (strncmp(ipvmid, "*MSG    ", sizeof(ipvmid)) != 0)
+	if (strncmp(ipvmid, "*MSG    ", 8) != 0)
 		return -EINVAL;
 	/* Path pending from *MSG. */
 	return iucv_path_accept(path, &smsg_handler, "SMSGIUCV        ", NULL);
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 46342fe..303dde0 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -317,7 +317,7 @@
 
 /**
  * zfcp_cfdc_port_denied - Process "access denied" for port
- * @port: The port where the acces has been denied
+ * @port: The port where the access has been denied
  * @qual: The FSF status qualifier for the access denied FSF status
  */
 void zfcp_cfdc_port_denied(struct zfcp_port *port,
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index dc5ac6e..a391090 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -416,7 +416,7 @@
 	/* Go back and check they match */
 
 	outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL);	/* Reset program count 0 */
-	bios_addr -= 0x1000;	/* Reset the BIOS adddress      */
+	bios_addr -= 0x1000;	/* Reset the BIOS address */
 	for (i = 0, data32_ptr = (u8 *) & data32;	/* Check the code       */
 	     i < 0x1000;	/* Firmware code size = 4K      */
 	     i++, bios_addr++) {
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index afc9aeb..060ac4b 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -91,7 +91,7 @@
  *	aac_fib_setup	-	setup the fibs
  *	@dev: Adapter to set up
  *
- *	Allocate the PCI space for the fibs, map it and then intialise the
+ *	Allocate the PCI space for the fibs, map it and then initialise the
  *	fib area, the unmapped fib data and also the free list
  */
 
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.seq b/drivers/scsi/aic7xxx_old/aic7xxx.seq
index 5997e7c..1565be9 100644
--- a/drivers/scsi/aic7xxx_old/aic7xxx.seq
+++ b/drivers/scsi/aic7xxx_old/aic7xxx.seq
@@ -1178,7 +1178,7 @@
 /*
  * Retrieve an SCB by SCBID first searching the disconnected list falling
  * back to DMA'ing the SCB down from the host.  This routine assumes that
- * ARG_1 is the SCBID of interrest and that SINDEX is the position in the
+ * ARG_1 is the SCBID of interest and that SINDEX is the position in the
  * disconnected list to start the search from.  If SINDEX is SCB_LIST_NULL,
  * we go directly to the host for the SCB.
  */
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
index 28aaf34..40273a7 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg_def.h
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -1689,7 +1689,7 @@
 #define		PHY_START_CAL		0x01
 
 /*
- * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC)
+ * HST_PCIX2 Registers, Address Range: (0x00-0xFC)
  */
 #define PCIX_REG_BASE_ADR		0xB8040000
 
@@ -1802,7 +1802,7 @@
 #define PCIC_TP_CTRL	0xFC
 
 /*
- * EXSI Registers, Addresss Range: (0x00-0xFC)
+ * EXSI Registers, Address Range: (0x00-0xFC)
  */
 #define EXSI_REG_BASE_ADR		REG_BASE_ADDR_EXSI
 
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index c43698b..2959327 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -867,7 +867,7 @@
  * resources they have with this SCB, and then call this one at the
  * end of their timeout function.  To do this, one should initialize
  * the ascb->timer.{function, data, expires} prior to calling the post
- * funcion.  The timer is started by the post function.
+ * function. The timer is started by the post function.
  */
 void asd_ascb_timedout(unsigned long data)
 {
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index 7437461..390168f 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -797,7 +797,7 @@
 		int j;
 		/* Start from Page 1 of Mode 0 and 1. */
 		moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
-		/* All the fields of page 1 can be intialized to 0. */
+		/* All the fields of page 1 can be initialized to 0. */
 		for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
 			asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
 	}
@@ -938,7 +938,7 @@
 	asd_write_reg_dword(asd_ha, SCBPRO, 0);
 	asd_write_reg_dword(asd_ha, CSEQCON, 0);
 
-	/* Intialize CSEQ Mode 11 Interrupt Vectors.
+	/* Initialize CSEQ Mode 11 Interrupt Vectors.
 	 * The addresses are 16 bit wide and in dword units.
 	 * The values of their macros are in byte units.
 	 * Thus we have to divide by 4. */
@@ -961,7 +961,7 @@
 	asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
 
 	for (i = 0; i < 8; i++) {
-		/* Intialize Mode n Link m Interrupt Enable. */
+		/* Initialize Mode n Link m Interrupt Enable. */
 		asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
 		/* Initialize Mode n Request Mailbox. */
 		asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 475c31a..77b26f5 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -2,7 +2,7 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr.h
-**        BY    : Erich Chen
+**        BY    : Nick Cheng
 **   Description: SCSI RAID Device Driver for
 **                ARECA RAID Host adapter
 *******************************************************************************
@@ -46,8 +46,12 @@
 struct device_attribute;
 /*The limit of outstanding scsi command that firmware can handle*/
 #define ARCMSR_MAX_OUTSTANDING_CMD						256
-#define ARCMSR_MAX_FREECCB_NUM							320
-#define ARCMSR_DRIVER_VERSION		     "Driver Version 1.20.00.15 2010/02/02"
+#ifdef CONFIG_XEN
+	#define ARCMSR_MAX_FREECCB_NUM	160
+#else
+	#define ARCMSR_MAX_FREECCB_NUM	320
+#endif
+#define ARCMSR_DRIVER_VERSION		     "Driver Version 1.20.00.15 2010/08/05"
 #define ARCMSR_SCSI_INITIATOR_ID						255
 #define ARCMSR_MAX_XFER_SECTORS							512
 #define ARCMSR_MAX_XFER_SECTORS_B						4096
@@ -60,7 +64,6 @@
 #define ARCMSR_MAX_HBB_POSTQUEUE						264
 #define ARCMSR_MAX_XFER_LEN							0x26000 /* 152K */
 #define ARCMSR_CDB_SG_PAGE_LENGTH						256 
-#define SCSI_CMD_ARECA_SPECIFIC						0xE1
 #ifndef PCI_DEVICE_ID_ARECA_1880
 #define PCI_DEVICE_ID_ARECA_1880 0x1880
  #endif
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index a4e04c5..acdae33 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -2,7 +2,7 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr_attr.c
-**        BY    : Erich Chen
+**        BY    : Nick Cheng
 **   Description: attributes exported to sysfs and device host
 *******************************************************************************
 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 1cadcd6..984bd52 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2,7 +2,7 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr_hba.c
-**        BY    : Erich Chen
+**        BY    : Nick Cheng
 **   Description: SCSI RAID Device Driver for
 **                ARECA RAID Host adapter
 *******************************************************************************
@@ -76,7 +76,7 @@
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
 static int sleeptime = 10;
-static int retrycount = 30;
+static int retrycount = 12;
 wait_queue_head_t wait_q;
 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
 					struct scsi_cmnd *cmd);
@@ -187,7 +187,6 @@
 		if (isleep > 0) {
 			msleep(isleep*1000);
 		}
-		printk(KERN_NOTICE "wake-up\n");
 		return 0;
 }
 
@@ -921,7 +920,6 @@
 }
 
 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
-
 {
 	int id, lun;
 	if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
@@ -948,7 +946,7 @@
 				, pCCB->startdone
 				, atomic_read(&acb->ccboutstandingcount));
 		  return;
-		}
+	}
 	arcmsr_report_ccb_state(acb, pCCB, error);
 }
 
@@ -981,7 +979,7 @@
 	case ACB_ADAPTER_TYPE_B: {
 		struct MessageUnit_B *reg = acb->pmuB;
 		/*clear all outbound posted Q*/
-		writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, &reg->iop2drv_doorbell); /* clear doorbell interrupt */
+		writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
 			if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
 				writel(0, &reg->done_qbuffer[i]);
@@ -1511,7 +1509,6 @@
 		arcmsr_drain_donequeue(acb, pCCB, error);
 	}
 }
-
 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
 {
 	uint32_t index;
@@ -2106,10 +2103,6 @@
 	if (atomic_read(&acb->ccboutstandingcount) >=
 			ARCMSR_MAX_OUTSTANDING_CMD)
 		return SCSI_MLQUEUE_HOST_BUSY;
-	if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) {
-		printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n");
-		return 0;
-	}
 	ccb = arcmsr_get_freeccb(acb);
 	if (!ccb)
 		return SCSI_MLQUEUE_HOST_BUSY;
@@ -2393,6 +2386,7 @@
 	int index, rtn;
 	bool error;
 	polling_hbb_ccb_retry:
+
 	poll_count++;
 	/* clear doorbell interrupt */
 	writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
@@ -2663,6 +2657,7 @@
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 		return;
 	} else {
 		acb->fw_flag = FW_NORMAL;
@@ -2670,8 +2665,10 @@
 			atomic_set(&acb->rq_map_token, 16);
 		}
 		atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
-		if (atomic_dec_and_test(&acb->rq_map_token))
+		if (atomic_dec_and_test(&acb->rq_map_token)) {
+			mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 			return;
+		}
 		writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
 		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 	}
@@ -2682,15 +2679,18 @@
 {
 	struct MessageUnit_B __iomem *reg = acb->pmuB;
 	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 		return;
 	} else {
 		acb->fw_flag = FW_NORMAL;
 		if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
-			atomic_set(&acb->rq_map_token,16);
+			atomic_set(&acb->rq_map_token, 16);
 		}
 		atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
-		if(atomic_dec_and_test(&acb->rq_map_token))
+		if (atomic_dec_and_test(&acb->rq_map_token)) {
+			mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 			return;
+		}
 		writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
 		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 	}
@@ -2701,6 +2701,7 @@
 {
 	struct MessageUnit_C __iomem *reg = acb->pmuC;
 	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 		return;
 	} else {
 		acb->fw_flag = FW_NORMAL;
@@ -2708,8 +2709,10 @@
 			atomic_set(&acb->rq_map_token, 16);
 		}
 		atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
-		if (atomic_dec_and_test(&acb->rq_map_token))
+		if (atomic_dec_and_test(&acb->rq_map_token)) {
+			mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 			return;
+		}
 		writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
 		writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
 		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
@@ -2897,6 +2900,8 @@
 	uint32_t intmask_org;
 	uint8_t rtnval = 0x00;
 	int i = 0;
+	unsigned long flags;
+
 	if (atomic_read(&acb->ccboutstandingcount) != 0) {
 		/* disable all outbound interrupt */
 		intmask_org = arcmsr_disable_outbound_ints(acb);
@@ -2907,7 +2912,12 @@
 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
 			ccb = acb->pccb_pool[i];
 			if (ccb->startdone == ARCMSR_CCB_START) {
-				arcmsr_ccb_complete(ccb);
+				scsi_dma_unmap(ccb->pcmd);
+				ccb->startdone = ARCMSR_CCB_DONE;
+				ccb->ccb_flags = 0;
+				spin_lock_irqsave(&acb->ccblist_lock, flags);
+				list_add_tail(&ccb->list, &acb->ccb_free_list);
+				spin_unlock_irqrestore(&acb->ccblist_lock, flags);
 			}
 		}
 		atomic_set(&acb->ccboutstandingcount, 0);
@@ -2920,8 +2930,7 @@
 
 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
 {
-	struct AdapterControlBlock *acb =
-		(struct AdapterControlBlock *)cmd->device->host->hostdata;
+	struct AdapterControlBlock *acb;
 	uint32_t intmask_org, outbound_doorbell;
 	int retry_count = 0;
 	int rtn = FAILED;
@@ -2971,31 +2980,16 @@
 				atomic_set(&acb->rq_map_token, 16);
 				atomic_set(&acb->ante_token_value, 16);
 				acb->fw_flag = FW_NORMAL;
-				init_timer(&acb->eternal_timer);
-				acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-				acb->eternal_timer.data = (unsigned long) acb;
-				acb->eternal_timer.function = &arcmsr_request_device_map;
-				add_timer(&acb->eternal_timer);
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
 				rtn = SUCCESS;
 				printk(KERN_ERR "arcmsr: scsi  bus reset eh returns with success\n");
 			} else {
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
-				if (atomic_read(&acb->rq_map_token) == 0) {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					init_timer(&acb->eternal_timer);
-						acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-					acb->eternal_timer.data = (unsigned long) acb;
-					acb->eternal_timer.function = &arcmsr_request_device_map;
-					add_timer(&acb->eternal_timer);
-				} else {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
-				}
+				atomic_set(&acb->rq_map_token, 16);
+				atomic_set(&acb->ante_token_value, 16);
+				acb->fw_flag = FW_NORMAL;
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
 				rtn = SUCCESS;
 			}
 			break;
@@ -3007,21 +3001,10 @@
 				rtn = FAILED;
 			} else {
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
-				if (atomic_read(&acb->rq_map_token) == 0) {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					init_timer(&acb->eternal_timer);
-						acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-					acb->eternal_timer.data = (unsigned long) acb;
-					acb->eternal_timer.function = &arcmsr_request_device_map;
-					add_timer(&acb->eternal_timer);
-				} else {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
-				}
+				atomic_set(&acb->rq_map_token, 16);
+				atomic_set(&acb->ante_token_value, 16);
+				acb->fw_flag = FW_NORMAL;
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 				rtn = SUCCESS;
 			}
 			break;
@@ -3067,31 +3050,16 @@
 				atomic_set(&acb->rq_map_token, 16);
 				atomic_set(&acb->ante_token_value, 16);
 				acb->fw_flag = FW_NORMAL;
-				init_timer(&acb->eternal_timer);
-				acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
-				acb->eternal_timer.data = (unsigned long) acb;
-				acb->eternal_timer.function = &arcmsr_request_device_map;
-				add_timer(&acb->eternal_timer);
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
 				rtn = SUCCESS;
 				printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
 			} else {
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
-				if (atomic_read(&acb->rq_map_token) == 0) {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					init_timer(&acb->eternal_timer);
-						acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-					acb->eternal_timer.data = (unsigned long) acb;
-					acb->eternal_timer.function = &arcmsr_request_device_map;
-					add_timer(&acb->eternal_timer);
-				} else {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
-				}
+				atomic_set(&acb->rq_map_token, 16);
+				atomic_set(&acb->ante_token_value, 16);
+				acb->fw_flag = FW_NORMAL;
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
 				rtn = SUCCESS;
 			}
 			break;
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 9c410b2..c0353cd 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -1838,7 +1838,7 @@
 
 	case BFA_IOIM_SM_ABORT:
 		/*
-		 * IO is alraedy being cleaned up implicitly
+		 * IO is already being cleaned up implicitly
 		 */
 		ioim->io_cbfn = __bfa_cb_ioim_abort;
 		break;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 4e2eb92..43fa986b 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5646,7 +5646,7 @@
 	switch (status) {
 	case BFA_STATUS_OK:
 		/*
-		 * Initialiaze the V-Port fields
+		 * Initialize the V-Port fields
 		 */
 		__vport_fcid(vport) = vport->lps->lp_pid;
 		vport->vport_stats.fdisc_accepts++;
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 8f1b5c8..b0f8523 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3796,7 +3796,7 @@
  * adapter_add_device - Adds the device instance to the adaptor instance.
  *
  * @acb: The adapter device to be updated
- * @dcb: A newly created and intialised device instance to add.
+ * @dcb: A newly created and initialised device instance to add.
  **/
 static void adapter_add_device(struct AdapterCtlBlk *acb,
 		struct DeviceCtlBlk *dcb)
@@ -4498,7 +4498,7 @@
  * init_adapter - Grab the resource for the card, setup the adapter
  * information, set the card into a known state, create the various
  * tables etc etc. This basically gets all adapter information all up
- * to date, intialised and gets the chip in sync with it.
+ * to date, initialised and gets the chip in sync with it.
  *
  * @host:	This hosts adapter structure
  * @io_port:	The base I/O port
@@ -4789,7 +4789,7 @@
  * that it finds in the system. The pci_dev strcuture indicates which
  * instance we are being called from.
  * 
- * @dev: The PCI device to intialize.
+ * @dev: The PCI device to initialize.
  * @id: Looks like a pointer to the entry in our pci device table
  * that was actually matched by the PCI subsystem.
  *
@@ -4860,7 +4860,7 @@
  * dc395x_remove_one - Called to remove a single instance of the
  * adapter.
  *
- * @dev: The PCI device to intialize.
+ * @dev: The PCI device to initialize.
  **/
 static void __devexit dc395x_remove_one(struct pci_dev *dev)
 {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d3c5905..9c5c8be 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7515,16 +7515,10 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	volatile u32 int_reg;
-	int rc;
 
 	ENTER;
 	ioa_cfg->pdev->state_saved = true;
-	rc = pci_restore_state(ioa_cfg->pdev);
-
-	if (rc != PCIBIOS_SUCCESSFUL) {
-		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
-		return IPR_RC_JOB_CONTINUE;
-	}
+	pci_restore_state(ioa_cfg->pdev);
 
 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index cdc06cd..5962d1a 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1250,7 +1250,7 @@
 /**
  * fc_lun_reset() - Send a LUN RESET command to a device
  *		    and wait for the reply
- * @lport: The local port to sent the comand on
+ * @lport: The local port to sent the command on
  * @fsp:   The FCP packet that identifies the LUN to be reset
  * @id:	   The SCSI command ID
  * @lun:   The LUN ID to be reset
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 5815cbe..9a7aaf5 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -646,6 +646,7 @@
 
 	spin_lock_irqsave(shost->host_lock, flags);
 	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
+	shost->host_eh_scheduled = 0;
 	spin_unlock_irqrestore(shost->host_lock, flags);
 
 	SAS_DPRINTK("Enter %s\n", __func__);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c06491b..3512abb 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1335,7 +1335,7 @@
 }
 
 /**
- * lpfc_param_init - Intializes a cfg attribute
+ * lpfc_param_init - Initializes a cfg attribute
  *
  * Description:
  * Macro that given an attr e.g. hba_queue_depth expands
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f9f160a..bb01596 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2852,7 +2852,7 @@
 			if (unlikely(!fcf_record)) {
 				lpfc_printf_log(phba, KERN_ERR,
 					LOG_MBOX | LOG_SLI,
-					"2554 Could not allocate memmory for "
+					"2554 Could not allocate memory for "
 					"fcf record\n");
 				rc = -ENODEV;
 				goto out;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 462242d..6d0b36a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -8071,7 +8071,7 @@
 	 * the HBA.
 	 */
 
-	/* HBA interrupt will be diabled after this call */
+	/* HBA interrupt will be disabled after this call */
 	lpfc_sli_hba_down(phba);
 	/* Stop kthread signal shall trigger work_done one more time */
 	kthread_stop(phba->worker_thread);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 634b2fe..a359d2b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -10172,7 +10172,7 @@
  * lpfc_sli4_queue_free - free a queue structure and associated memory
  * @queue: The queue structure to free.
  *
- * This function frees a queue structure and the DMAable memeory used for
+ * This function frees a queue structure and the DMAable memory used for
  * the host resident queue. This function must be called after destroying the
  * queue on the HBA.
  **/
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index f564474..8534119 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -13,7 +13,7 @@
  */
 
 /*
- * Comand coalescing - This feature allows the driver to be able to combine
+ * Command coalescing - This feature allows the driver to be able to combine
  * two or more commands and issue as one command in order to boost I/O
  * performance. Useful if the nature of the I/O is sequential. It is not very
  * useful for random natured I/Os.
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index a7008c0..25506c7 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -224,7 +224,7 @@
 {
 	int err;
 
-	/* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */
+	/* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
 	mutex_lock(&mraid_mm_mutex);
 	err = mraid_mm_ioctl(filep, cmd, arg);
 	mutex_unlock(&mraid_mm_mutex);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index b2a8170..9ead039 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2176,9 +2176,9 @@
 		/* adjust hba_queue_depth, reply_free_queue_depth,
 		 * and queue_size
 		 */
-		ioc->hba_queue_depth -= queue_diff;
-		ioc->reply_free_queue_depth -= queue_diff;
-		queue_size -= queue_diff;
+		ioc->hba_queue_depth -= (queue_diff / 2);
+		ioc->reply_free_queue_depth -= (queue_diff / 2);
+		queue_size = facts->MaxReplyDescriptorPostQueueDepth;
 	}
 	ioc->reply_post_queue_depth = queue_size;
 
@@ -3941,6 +3941,8 @@
 static void
 _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
 {
+	mpt2sas_scsih_reset_handler(ioc, reset_phase);
+	mpt2sas_ctl_reset_handler(ioc, reset_phase);
 	switch (reset_phase) {
 	case MPT2_IOC_PRE_RESET:
 		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -3971,8 +3973,6 @@
 		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
 		break;
 	}
-	mpt2sas_scsih_reset_handler(ioc, reset_phase);
-	mpt2sas_ctl_reset_handler(ioc, reset_phase);
 }
 
 /**
@@ -4026,6 +4026,7 @@
 {
 	int r;
 	unsigned long flags;
+	u8 pe_complete = ioc->wait_for_port_enable_to_complete;
 
 	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
@@ -4068,6 +4069,14 @@
 	if (r)
 		goto out;
 	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
+
+	/* If this hard reset is called while port enable is active, then
+	 * there is no reason to call make_ioc_operational
+	 */
+	if (pe_complete) {
+		r = -EFAULT;
+		goto out;
+	}
 	r = _base_make_ioc_operational(ioc, sleep_flag);
 	if (!r)
 		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index eda347c..5ded3db 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -819,7 +819,7 @@
 }
 
 /**
- * mptscsih_get_scsi_lookup - returns scmd entry
+ * _scsih_scsi_lookup_get - returns scmd entry
  * @ioc: per adapter object
  * @smid: system request message index
  *
@@ -832,6 +832,28 @@
 }
 
 /**
+ * _scsih_scsi_lookup_get_clear - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will derefrence the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+	unsigned long flags;
+	struct scsi_cmnd *scmd;
+
+	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+	scmd = ioc->scsi_lookup[smid - 1].scmd;
+	ioc->scsi_lookup[smid - 1].scmd = NULL;
+	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+	return scmd;
+}
+
+/**
  * _scsih_scsi_lookup_find_by_scmd - scmd lookup
  * @ioc: per adapter object
  * @smid: system request message index
@@ -2981,9 +3003,6 @@
 	u16 handle;
 
 	for (i = 0 ; i < event_data->NumEntries; i++) {
-		if (event_data->PHY[i].PhyStatus &
-		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
-			continue;
 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
 		if (!handle)
 			continue;
@@ -3210,7 +3229,7 @@
 	u16 count = 0;
 
 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
-		scmd = _scsih_scsi_lookup_get(ioc, smid);
+		scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
 		if (!scmd)
 			continue;
 		count++;
@@ -3804,7 +3823,7 @@
 	u32 response_code = 0;
 
 	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
-	scmd = _scsih_scsi_lookup_get(ioc, smid);
+	scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
 	if (scmd == NULL)
 		return 1;
 
@@ -5005,6 +5024,12 @@
 		     event_data);
 #endif
 
+	/* In MPI Revision K (0xC), the internal device reset complete was
+	 * implemented, so avoid setting tm_busy flag for older firmware.
+	 */
+	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
+		return;
+
 	if (event_data->ReasonCode !=
 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
 	   event_data->ReasonCode !=
@@ -5099,6 +5124,7 @@
     struct fw_event_work *fw_event)
 {
 	struct scsi_cmnd *scmd;
+	struct scsi_device *sdev;
 	u16 smid, handle;
 	u32 lun;
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -5109,12 +5135,17 @@
 	Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
 #endif
 	u16 ioc_status;
+	unsigned long flags;
+	int r;
+
 	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
 	    "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
 	    event_data->PortWidth));
 	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
 
+	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+	ioc->broadcast_aen_busy = 0;
 	termination_count = 0;
 	query_count = 0;
 	mpi_reply = ioc->tm_cmds.reply;
@@ -5122,7 +5153,8 @@
 		scmd = _scsih_scsi_lookup_get(ioc, smid);
 		if (!scmd)
 			continue;
-		sas_device_priv_data = scmd->device->hostdata;
+		sdev = scmd->device;
+		sas_device_priv_data = sdev->hostdata;
 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
 			continue;
 		 /* skip hidden raid components */
@@ -5138,6 +5170,7 @@
 		lun = sas_device_priv_data->lun;
 		query_count++;
 
+		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 		mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
 		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
 		ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
@@ -5147,14 +5180,20 @@
 		    (mpi_reply->ResponseCode ==
 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
 		     mpi_reply->ResponseCode ==
-		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
+		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
+			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 			continue;
-
-		mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
-		    MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
+		}
+		r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+		    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+		    scmd);
+		if (r == FAILED)
+			sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
+			    "scmd(%p)\n", scmd);
 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
+		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 	}
-	ioc->broadcast_aen_busy = 0;
+	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
 	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
 	    "%s - exit, query_count = %d termination_count = %d\n",
@@ -6626,6 +6665,7 @@
 		destroy_workqueue(wq);
 
 	/* release all the volumes */
+	_scsih_ir_shutdown(ioc);
 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
 	    list) {
 		if (raid_device->starget) {
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f8c86b2..b95285f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -603,7 +603,7 @@
 #endif
 
 intx:
-	/* intialize the INT-X interrupt */
+	/* initialize the INT-X interrupt */
 	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
 		SHOST_TO_SAS_HA(pm8001_ha->shost));
 	return rc;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 300d59f..321cf3a 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2228,12 +2228,7 @@
 		/* Once either bist or pci reset is done, restore PCI config
 		 * space. If this fails, proceed with hard reset again
 		 */
-		if (pci_restore_state(pinstance->pdev)) {
-			pmcraid_info("config-space error resetting again\n");
-			pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
-			pmcraid_reset_alert(cmd);
-			break;
-		}
+		pci_restore_state(pinstance->pdev);
 
 		/* fail all pending commands */
 		pmcraid_fail_outstanding_cmds(pinstance);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44578b5..d3e58d7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1561,6 +1561,7 @@
 {
 	struct Scsi_Host *host = rport_to_shost(rport);
 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+	unsigned long flags;
 
 	if (!fcport)
 		return;
@@ -1573,10 +1574,10 @@
 	 * Transport has effectively 'deleted' the rport, clear
 	 * all local references.
 	 */
-	spin_lock_irq(host->host_lock);
+	spin_lock_irqsave(host->host_lock, flags);
 	fcport->rport = fcport->drport = NULL;
 	*((fc_port_t **)rport->dd_data) = NULL;
-	spin_unlock_irq(host->host_lock);
+	spin_unlock_irqrestore(host->host_lock, flags);
 
 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
 		return;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f948e1a..d9479c3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2505,11 +2505,12 @@
 {
 	fc_port_t *fcport = data;
 	struct fc_rport *rport;
+	unsigned long flags;
 
-	spin_lock_irq(fcport->vha->host->host_lock);
+	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
 	rport = fcport->drport ? fcport->drport: fcport->rport;
 	fcport->drport = NULL;
-	spin_unlock_irq(fcport->vha->host->host_lock);
+	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
 	if (rport)
 		fc_remote_port_delete(rport);
 }
@@ -2879,6 +2880,7 @@
 	struct fc_rport_identifiers rport_ids;
 	struct fc_rport *rport;
 	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
 
 	qla2x00_rport_del(fcport);
 
@@ -2893,9 +2895,9 @@
 		    "Unable to allocate fc remote port!\n");
 		return;
 	}
-	spin_lock_irq(fcport->vha->host->host_lock);
+	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
 	*((fc_port_t **)rport->dd_data) = fcport;
-	spin_unlock_irq(fcport->vha->host->host_lock);
+	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
 
 	rport->supported_classes = fcport->supported_classes;
 
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c194c23..f27724d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -562,7 +562,6 @@
 	}
 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-			atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
 			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 			cmd->result = DID_NO_CONNECT << 16;
 			goto qc24_fail_command;
@@ -2513,6 +2512,7 @@
 {
 	struct fc_rport *rport;
 	scsi_qla_host_t *base_vha;
+	unsigned long flags;
 
 	if (!fcport->rport)
 		return;
@@ -2520,9 +2520,9 @@
 	rport = fcport->rport;
 	if (defer) {
 		base_vha = pci_get_drvdata(vha->hw->pdev);
-		spin_lock_irq(vha->host->host_lock);
+		spin_lock_irqsave(vha->host->host_lock, flags);
 		fcport->drport = rport;
-		spin_unlock_irq(vha->host->host_lock);
+		spin_unlock_irqrestore(vha->host->host_lock, flags);
 		set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
 		qla2xxx_wake_dpc(base_vha);
 	} else
@@ -3282,10 +3282,10 @@
 
 	set_user_nice(current, -20);
 
+	set_current_state(TASK_INTERRUPTIBLE);
 	while (!kthread_should_stop()) {
 		DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
 
-		set_current_state(TASK_INTERRUPTIBLE);
 		schedule();
 		__set_current_state(TASK_RUNNING);
 
@@ -3454,7 +3454,9 @@
 		qla2x00_do_dpc_all_vps(base_vha);
 
 		ha->dpc_active = 0;
+		set_current_state(TASK_INTERRUPTIBLE);
 	} /* End of while(1) */
+	__set_current_state(TASK_RUNNING);
 
 	DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
 
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b31093..a6b2d72 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1671,7 +1671,7 @@
 			    unsigned long long lba, unsigned int num, int write)
 {
 	int ret;
-	unsigned int block, rest = 0;
+	unsigned long long block, rest = 0;
 	int (*func)(struct scsi_cmnd *, unsigned char *, int);
 
 	func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 501f67b..fb2bb35 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@
 					&sdev->request_queue->queue_flags);
 		if (flagset)
 			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
-		__blk_run_queue(sdev->request_queue);
+		__blk_run_queue(sdev->request_queue, false);
 		if (flagset)
 			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
 		spin_unlock(sdev->request_queue->queue_lock);
@@ -1977,8 +1977,7 @@
  *		in.
  *
  *	Returns zero if unsuccessful or an error if TUR failed.  For
- *	removable media, a return of NOT_READY or UNIT_ATTENTION is
- *	translated to success, with the ->changed flag updated.
+ *	removable media, UNIT_ATTENTION sets ->changed flag.
  **/
 int
 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
@@ -2005,16 +2004,6 @@
 	} while (scsi_sense_valid(sshdr) &&
 		 sshdr->sense_key == UNIT_ATTENTION && --retries);
 
-	if (!sshdr)
-		/* could not allocate sense buffer, so can't process it */
-		return result;
-
-	if (sdev->removable && scsi_sense_valid(sshdr) &&
-	    (sshdr->sense_key == UNIT_ATTENTION ||
-	     sshdr->sense_key == NOT_READY)) {
-		sdev->changed = 1;
-		result = 0;
-	}
 	if (!sshdr_external)
 		kfree(sshdr);
 	return result;
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index d53e650..a2ed201 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -477,7 +477,7 @@
 
 
 /**
- * scsi_netlink_init - Called by SCSI subsystem to intialize
+ * scsi_netlink_init - Called by SCSI subsystem to initialize
  * 	the SCSI transport netlink interface
  *
  **/
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4c68d36..490ce21 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -864,13 +864,15 @@
 
 	error = device_add(&sdev->sdev_gendev);
 	if (error) {
-		printk(KERN_INFO "error 1\n");
+		sdev_printk(KERN_INFO, sdev,
+				"failed to add device: %d\n", error);
 		return error;
 	}
 	device_enable_async_suspend(&sdev->sdev_dev);
 	error = device_add(&sdev->sdev_dev);
 	if (error) {
-		printk(KERN_INFO "error 2\n");
+		sdev_printk(KERN_INFO, sdev,
+				"failed to add class device: %d\n", error);
 		device_del(&sdev->sdev_gendev);
 		return error;
 	}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 998c01b..5c3ccfc 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@
 		  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
 	if (flagset)
 		queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
-	__blk_run_queue(rport->rqst_q);
+	__blk_run_queue(rport->rqst_q, false);
 	if (flagset)
 		queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
 	spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 365024b0..e567302 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -990,30 +990,51 @@
 
 static void set_media_not_present(struct scsi_disk *sdkp)
 {
-	sdkp->media_present = 0;
-	sdkp->capacity = 0;
-	sdkp->device->changed = 1;
+	if (sdkp->media_present)
+		sdkp->device->changed = 1;
+
+	if (sdkp->device->removable) {
+		sdkp->media_present = 0;
+		sdkp->capacity = 0;
+	}
+}
+
+static int media_not_present(struct scsi_disk *sdkp,
+			     struct scsi_sense_hdr *sshdr)
+{
+	if (!scsi_sense_valid(sshdr))
+		return 0;
+
+	/* not invoked for commands that could return deferred errors */
+	switch (sshdr->sense_key) {
+	case UNIT_ATTENTION:
+	case NOT_READY:
+		/* medium not present */
+		if (sshdr->asc == 0x3A) {
+			set_media_not_present(sdkp);
+			return 1;
+		}
+	}
+	return 0;
 }
 
 /**
- *	sd_media_changed - check if our medium changed
- *	@disk: kernel device descriptor 
+ *	sd_check_events - check media events
+ *	@disk: kernel device descriptor
+ *	@clearing: disk events currently being cleared
  *
- *	Returns 0 if not applicable or no change; 1 if change
+ *	Returns mask of DISK_EVENT_*.
  *
  *	Note: this function is invoked from the block subsystem.
  **/
-static int sd_media_changed(struct gendisk *disk)
+static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
 {
 	struct scsi_disk *sdkp = scsi_disk(disk);
 	struct scsi_device *sdp = sdkp->device;
 	struct scsi_sense_hdr *sshdr = NULL;
 	int retval;
 
-	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
-
-	if (!sdp->removable)
-		return 0;
+	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
 
 	/*
 	 * If the device is offline, don't send any commands - just pretend as
@@ -1043,48 +1064,32 @@
 					      sshdr);
 	}
 
-	/*
-	 * Unable to test, unit probably not ready.   This usually
-	 * means there is no disc in the drive.  Mark as changed,
-	 * and we will figure it out later once the drive is
-	 * available again.
-	 */
-	if (retval || (scsi_sense_valid(sshdr) &&
-		       /* 0x3a is medium not present */
-		       sshdr->asc == 0x3a)) {
+	/* failed to execute TUR, assume media not present */
+	if (host_byte(retval)) {
 		set_media_not_present(sdkp);
 		goto out;
 	}
 
+	if (media_not_present(sdkp, sshdr))
+		goto out;
+
 	/*
 	 * For removable scsi disk we have to recognise the presence
-	 * of a disk in the drive. This is kept in the struct scsi_disk
-	 * struct and tested at open !  Daniel Roche (dan@lectra.fr)
+	 * of a disk in the drive.
 	 */
+	if (!sdkp->media_present)
+		sdp->changed = 1;
 	sdkp->media_present = 1;
-
 out:
 	/*
-	 * Report a media change under the following conditions:
+	 * sdp->changed is set under the following conditions:
 	 *
-	 *	Medium is present now and wasn't present before.
-	 *	Medium wasn't present before and is present now.
-	 *	Medium was present at all times, but it changed while
-	 *		we weren't looking (sdp->changed is set).
-	 *
-	 * If there was no medium before and there is no medium now then
-	 * don't report a change, even if a medium was inserted and removed
-	 * while we weren't looking.
+	 *	Medium present state has changed in either direction.
+	 *	Device has indicated UNIT_ATTENTION.
 	 */
-	retval = (sdkp->media_present != sdkp->previous_state ||
-			(sdkp->media_present && sdp->changed));
-	if (retval)
-		sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
-	sdkp->previous_state = sdkp->media_present;
-
-	/* sdp->changed indicates medium was changed or is not present */
-	sdp->changed = !sdkp->media_present;
 	kfree(sshdr);
+	retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+	sdp->changed = 0;
 	return retval;
 }
 
@@ -1177,7 +1182,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_ioctl		= sd_compat_ioctl,
 #endif
-	.media_changed		= sd_media_changed,
+	.check_events		= sd_check_events,
 	.revalidate_disk	= sd_revalidate_disk,
 	.unlock_native_capacity	= sd_unlock_native_capacity,
 };
@@ -1320,23 +1325,6 @@
 	return good_bytes;
 }
 
-static int media_not_present(struct scsi_disk *sdkp,
-			     struct scsi_sense_hdr *sshdr)
-{
-
-	if (!scsi_sense_valid(sshdr))
-		return 0;
-	/* not invoked for commands that could return deferred errors */
-	if (sshdr->sense_key != NOT_READY &&
-	    sshdr->sense_key != UNIT_ATTENTION)
-		return 0;
-	if (sshdr->asc != 0x3A) /* medium not present */
-		return 0;
-
-	set_media_not_present(sdkp);
-	return 1;
-}
-
 /*
  * spinup disk - called only in sd_revalidate_disk()
  */
@@ -1511,7 +1499,7 @@
 	 */
 	if (sdp->removable &&
 	    sense_valid && sshdr->sense_key == NOT_READY)
-		sdp->changed = 1;
+		set_media_not_present(sdkp);
 
 	/*
 	 * We used to set media_present to 0 here to indicate no media
@@ -2397,8 +2385,10 @@
 
 	gd->driverfs_dev = &sdp->sdev_gendev;
 	gd->flags = GENHD_FL_EXT_DEVT;
-	if (sdp->removable)
+	if (sdp->removable) {
 		gd->flags |= GENHD_FL_REMOVABLE;
+		gd->events |= DISK_EVENT_MEDIA_CHANGE;
+	}
 
 	add_disk(gd);
 	sd_dif_config_host(sdkp);
@@ -2480,7 +2470,6 @@
 	sdkp->disk = gd;
 	sdkp->index = index;
 	atomic_set(&sdkp->openers, 0);
-	sdkp->previous_state = 1;
 
 	if (!sdp->request_queue->rq_timeout) {
 		if (sdp->type != TYPE_MOD)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 55488fa..c9d8f6c 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -55,7 +55,6 @@
 	u8		media_present;
 	u8		write_prot;
 	u8		protection_type;/* Data Integrity Field */
-	unsigned	previous_state : 1;
 	unsigned	ATO : 1;	/* state of disk ATO bit */
 	unsigned	WCE : 1;	/* state of disk WCE bit */
 	unsigned	RCD : 1;	/* state of disk RCD bit, unused */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d7b383c..aefadc6 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -104,14 +104,15 @@
 static void get_sectorsize(struct scsi_cd *);
 static void get_capabilities(struct scsi_cd *);
 
-static int sr_media_change(struct cdrom_device_info *, int);
+static unsigned int sr_check_events(struct cdrom_device_info *cdi,
+				    unsigned int clearing, int slot);
 static int sr_packet(struct cdrom_device_info *, struct packet_command *);
 
 static struct cdrom_device_ops sr_dops = {
 	.open			= sr_open,
 	.release	 	= sr_release,
 	.drive_status	 	= sr_drive_status,
-	.media_changed		= sr_media_change,
+	.check_events		= sr_check_events,
 	.tray_move		= sr_tray_move,
 	.lock_door		= sr_lock_door,
 	.select_speed		= sr_select_speed,
@@ -165,90 +166,92 @@
 	mutex_unlock(&sr_ref_mutex);
 }
 
-/* identical to scsi_test_unit_ready except that it doesn't
- * eat the NOT_READY returns for removable media */
-int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
+static unsigned int sr_get_events(struct scsi_device *sdev)
 {
-	int retries = MAX_RETRIES;
-	int the_result;
-	u8 cmd[] = {TEST_UNIT_READY, 0, 0, 0, 0, 0 };
+	u8 buf[8];
+	u8 cmd[] = { GET_EVENT_STATUS_NOTIFICATION,
+		     1,			/* polled */
+		     0, 0,		/* reserved */
+		     1 << 4,		/* notification class: media */
+		     0, 0,		/* reserved */
+		     0, sizeof(buf),	/* allocation length */
+		     0,			/* control */
+	};
+	struct event_header *eh = (void *)buf;
+	struct media_event_desc *med = (void *)(buf + 4);
+	struct scsi_sense_hdr sshdr;
+	int result;
 
-	/* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
-	 * conditions are gone, or a timeout happens
-	 */
-	do {
-		the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
-					      0, sshdr, SR_TIMEOUT,
-					      retries--, NULL);
-		if (scsi_sense_valid(sshdr) &&
-		    sshdr->sense_key == UNIT_ATTENTION)
-			sdev->changed = 1;
+	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, sizeof(buf),
+				  &sshdr, SR_TIMEOUT, MAX_RETRIES, NULL);
+	if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION)
+		return DISK_EVENT_MEDIA_CHANGE;
 
-	} while (retries > 0 &&
-		 (!scsi_status_is_good(the_result) ||
-		  (scsi_sense_valid(sshdr) &&
-		   sshdr->sense_key == UNIT_ATTENTION)));
-	return the_result;
+	if (result || be16_to_cpu(eh->data_len) < sizeof(*med))
+		return 0;
+
+	if (eh->nea || eh->notification_class != 0x4)
+		return 0;
+
+	if (med->media_event_code == 1)
+		return DISK_EVENT_EJECT_REQUEST;
+	else if (med->media_event_code == 2)
+		return DISK_EVENT_MEDIA_CHANGE;
+	return 0;
 }
 
 /*
- * This function checks to see if the media has been changed in the
- * CDROM drive.  It is possible that we have already sensed a change,
- * or the drive may have sensed one and not yet reported it.  We must
- * be ready for either case. This function always reports the current
- * value of the changed bit.  If flag is 0, then the changed bit is reset.
- * This function could be done as an ioctl, but we would need to have
- * an inode for that to work, and we do not always have one.
+ * This function checks to see if the media has been changed or eject
+ * button has been pressed.  It is possible that we have already
+ * sensed a change, or the drive may have sensed one and not yet
+ * reported it.  The past events are accumulated in sdev->changed and
+ * returned together with the current state.
  */
-
-static int sr_media_change(struct cdrom_device_info *cdi, int slot)
+static unsigned int sr_check_events(struct cdrom_device_info *cdi,
+				    unsigned int clearing, int slot)
 {
 	struct scsi_cd *cd = cdi->handle;
-	int retval;
-	struct scsi_sense_hdr *sshdr;
+	bool last_present;
+	struct scsi_sense_hdr sshdr;
+	unsigned int events;
+	int ret;
 
-	if (CDSL_CURRENT != slot) {
-		/* no changer support */
-		return -EINVAL;
+	/* no changer support */
+	if (CDSL_CURRENT != slot)
+		return 0;
+
+	events = sr_get_events(cd->device);
+	/*
+	 * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE
+	 * is being cleared.  Note that there are devices which hang
+	 * if asked to execute TUR repeatedly.
+	 */
+	if (!(clearing & DISK_EVENT_MEDIA_CHANGE))
+		goto skip_tur;
+
+	/* let's see whether the media is there with TUR */
+	last_present = cd->media_present;
+	ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
+
+	/*
+	 * Media is considered to be present if TUR succeeds or fails with
+	 * sense data indicating something other than media-not-present
+	 * (ASC 0x3a).
+	 */
+	cd->media_present = scsi_status_is_good(ret) ||
+		(scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a);
+
+	if (last_present != cd->media_present)
+		events |= DISK_EVENT_MEDIA_CHANGE;
+skip_tur:
+	if (cd->device->changed) {
+		events |= DISK_EVENT_MEDIA_CHANGE;
+		cd->device->changed = 0;
 	}
 
-	sshdr =  kzalloc(sizeof(*sshdr), GFP_KERNEL);
-	retval = sr_test_unit_ready(cd->device, sshdr);
-	if (retval || (scsi_sense_valid(sshdr) &&
-		       /* 0x3a is medium not present */
-		       sshdr->asc == 0x3a)) {
-		/* Media not present or unable to test, unit probably not
-		 * ready. This usually means there is no disc in the drive.
-		 * Mark as changed, and we will figure it out later once
-		 * the drive is available again.
-		 */
-		cd->device->changed = 1;
-		/* This will force a flush, if called from check_disk_change */
-		retval = 1;
-		goto out;
-	};
-
-	retval = cd->device->changed;
-	cd->device->changed = 0;
-	/* If the disk changed, the capacity will now be different,
-	 * so we force a re-read of this information */
-	if (retval) {
-		/* check multisession offset etc */
-		sr_cd_check(cdi);
-		get_sectorsize(cd);
-	}
-
-out:
-	/* Notify userspace, that media has changed. */
-	if (retval != cd->previous_state)
-		sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE,
-				     GFP_KERNEL);
-	cd->previous_state = retval;
-	kfree(sshdr);
-
-	return retval;
+	return events;
 }
- 
+
 /*
  * sr_done is the interrupt routine for the device driver.
  *
@@ -533,10 +536,25 @@
 	return ret;
 }
 
-static int sr_block_media_changed(struct gendisk *disk)
+static unsigned int sr_block_check_events(struct gendisk *disk,
+					  unsigned int clearing)
 {
 	struct scsi_cd *cd = scsi_cd(disk);
-	return cdrom_media_changed(&cd->cdi);
+	return cdrom_check_events(&cd->cdi, clearing);
+}
+
+static int sr_block_revalidate_disk(struct gendisk *disk)
+{
+	struct scsi_cd *cd = scsi_cd(disk);
+	struct scsi_sense_hdr sshdr;
+
+	/* if the unit is not ready, nothing more to do */
+	if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
+		return 0;
+
+	sr_cd_check(&cd->cdi);
+	get_sectorsize(cd);
+	return 0;
 }
 
 static const struct block_device_operations sr_bdops =
@@ -545,7 +563,8 @@
 	.open		= sr_block_open,
 	.release	= sr_block_release,
 	.ioctl		= sr_block_ioctl,
-	.media_changed	= sr_block_media_changed,
+	.check_events	= sr_block_check_events,
+	.revalidate_disk = sr_block_revalidate_disk,
 	/* 
 	 * No compat_ioctl for now because sr_block_ioctl never
 	 * seems to pass arbitary ioctls down to host drivers.
@@ -618,6 +637,7 @@
 	sprintf(disk->disk_name, "sr%d", minor);
 	disk->fops = &sr_bdops;
 	disk->flags = GENHD_FL_CD;
+	disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
 
 	blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
 
@@ -627,7 +647,7 @@
 	cd->disk = disk;
 	cd->capacity = 0x1fffff;
 	cd->device->changed = 1;	/* force recheck CD type */
-	cd->previous_state = 1;
+	cd->media_present = 1;
 	cd->use = 1;
 	cd->readcd_known = 0;
 	cd->readcd_cdda = 0;
@@ -780,7 +800,7 @@
 	}
 
 	/* eat unit attentions */
-	sr_test_unit_ready(cd->device, &sshdr);
+	scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
 
 	/* ask for mode page 0x2a */
 	rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 1e144df..e036f1d 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -40,7 +40,7 @@
 	unsigned xa_flag:1;	/* CD has XA sectors ? */
 	unsigned readcd_known:1;	/* drive supports READ_CD (0xbe) */
 	unsigned readcd_cdda:1;	/* reading audio data using READ_CD */
-	unsigned previous_state:1;	/* media has changed */
+	unsigned media_present:1;	/* media is present */
 	struct cdrom_device_info cdi;
 	/* We hold gendisk and scsi_device references on probe and use
 	 * the refs on this kref to decide when to release them */
@@ -61,7 +61,6 @@
 int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
 
 int sr_is_xa(Scsi_CD *);
-int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr);
 
 /* sr_vendor.c */
 void sr_vendor_init(Scsi_CD *);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 3cd8ffb..8be3055 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -307,7 +307,7 @@
 		/* we have no changer support */
 		return -EINVAL;
 	}
-	if (0 == sr_test_unit_ready(cd->device, &sshdr))
+	if (!scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
 		return CDS_DISC_OK;
 
 	/* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 6b97ded..b4543f5 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1866,7 +1866,7 @@
  *
  * This routine is similar to sym_set_workarounds(), except
  * that, at this point, we already know that the device was
- * successfully intialized at least once before, and so most
+ * successfully initialized at least once before, and so most
  * of the steps taken there are un-needed here.
  */
 static void sym2_reset_workarounds(struct pci_dev *pdev)
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index ceba593..04113e5 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -101,7 +101,7 @@
 		return NULL;
 
 	if (sfi_use_ioremap)
-		return ioremap(phys, size);
+		return ioremap_cache(phys, size);
 	else
 		return early_ioremap(phys, size);
 }
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index de885a0..f33e2dd 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -173,7 +173,8 @@
 	return 0;
 }
 
-#define VALID(x) (x | 0x80)
+#define SENSE_VALID_FLAG 0x80
+#define VALID(x) (x | SENSE_VALID_FLAG)
 
 static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
 	[IRQ_TYPE_EDGE_FALLING] = VALID(0),
@@ -201,7 +202,8 @@
 	ihp = intc_find_irq(d->sense, d->nr_sense, irq);
 	if (ihp) {
 		addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
-		intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
+		intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle,
+						    value & ~SENSE_VALID_FLAG);
 	}
 
 	return 0;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1906840..bb233a9c 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -53,6 +53,14 @@
 
 comment "SPI Master Controller Drivers"
 
+config SPI_ATH79
+	tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
+	depends on ATH79 && GENERIC_GPIO
+	select SPI_BITBANG
+	help
+	  This enables support for the SPI controller present on the
+	  Atheros AR71XX/AR724X/AR913X SoCs.
+
 config SPI_ATMEL
 	tristate "Atmel SPI Controller"
 	depends on (ARCH_AT91 || AVR32)
@@ -156,10 +164,10 @@
 	def_bool y if ARCH_MX31
 
 config SPI_IMX_VER_0_7
-	def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51
+	def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53
 
 config SPI_IMX_VER_2_3
-	def_bool y if ARCH_MX51
+	def_bool y if ARCH_MX51 || ARCH_MX53
 
 config SPI_IMX
 	tristate "Freescale i.MX SPI controllers"
@@ -310,8 +318,8 @@
 
 config SPI_S3C64XX
 	tristate "Samsung S3C64XX series type SPI"
-	depends on ARCH_S3C64XX && EXPERIMENTAL
-	select S3C64XX_DMA
+	depends on (ARCH_S3C64XX || ARCH_S5P64X0)
+	select S3C64XX_DMA if ARCH_S3C64XX
 	help
 	  SPI driver for Samsung S3C64XX and newer SoCs.
 
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 3a42463..86d1b5f 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -10,6 +10,7 @@
 
 # SPI master controller drivers (bus)
 obj-$(CONFIG_SPI_ATMEL)			+= atmel_spi.o
+obj-$(CONFIG_SPI_ATH79)			+= ath79_spi.o
 obj-$(CONFIG_SPI_BFIN)			+= spi_bfin5xx.o
 obj-$(CONFIG_SPI_BITBANG)		+= spi_bitbang.o
 obj-$(CONFIG_SPI_AU1550)		+= au1550_spi.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index a2a5921..71a1219 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1795,7 +1795,7 @@
 {
 	struct pl022_config_chip const *chip_info;
 	struct chip_data *chip;
-	struct ssp_clock_params clk_freq;
+	struct ssp_clock_params clk_freq = {0, };
 	int status = 0;
 	struct pl022 *pl022 = spi_master_get_devdata(spi->master);
 	unsigned int bits = spi->bits_per_word;
diff --git a/drivers/spi/ath79_spi.c b/drivers/spi/ath79_spi.c
new file mode 100644
index 0000000..fcff810
--- /dev/null
+++ b/drivers/spi/ath79_spi.c
@@ -0,0 +1,292 @@
+/*
+ * SPI controller driver for the Atheros AR71XX/AR724X/AR913X SoCs
+ *
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This driver has been based on the spi-gpio.c:
+ *	Copyright (C) 2006,2008 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79_spi_platform.h>
+
+#define DRV_NAME	"ath79-spi"
+
+struct ath79_spi {
+	struct spi_bitbang	bitbang;
+	u32			ioc_base;
+	u32			reg_ctrl;
+	void __iomem		*base;
+};
+
+static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg)
+{
+	return ioread32(sp->base + reg);
+}
+
+static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned reg, u32 val)
+{
+	iowrite32(val, sp->base + reg);
+}
+
+static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
+{
+	return spi_master_get_devdata(spi->master);
+}
+
+static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
+{
+	struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+	int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
+
+	if (is_active) {
+		/* set initial clock polarity */
+		if (spi->mode & SPI_CPOL)
+			sp->ioc_base |= AR71XX_SPI_IOC_CLK;
+		else
+			sp->ioc_base &= ~AR71XX_SPI_IOC_CLK;
+
+		ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+	}
+
+	if (spi->chip_select) {
+		struct ath79_spi_controller_data *cdata = spi->controller_data;
+
+		/* SPI is normally active-low */
+		gpio_set_value(cdata->gpio, cs_high);
+	} else {
+		if (cs_high)
+			sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+		else
+			sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+
+		ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+	}
+
+}
+
+static int ath79_spi_setup_cs(struct spi_device *spi)
+{
+	struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+	struct ath79_spi_controller_data *cdata;
+
+	cdata = spi->controller_data;
+	if (spi->chip_select && !cdata)
+		return -EINVAL;
+
+	/* enable GPIO mode */
+	ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
+
+	/* save CTRL register */
+	sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
+	sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
+
+	/* TODO: setup speed? */
+	ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
+
+	if (spi->chip_select) {
+		int status = 0;
+
+		status = gpio_request(cdata->gpio, dev_name(&spi->dev));
+		if (status)
+			return status;
+
+		status = gpio_direction_output(cdata->gpio,
+					       spi->mode & SPI_CS_HIGH);
+		if (status) {
+			gpio_free(cdata->gpio);
+			return status;
+		}
+	} else {
+		if (spi->mode & SPI_CS_HIGH)
+			sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+		else
+			sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+		ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+	}
+
+	return 0;
+}
+
+static void ath79_spi_cleanup_cs(struct spi_device *spi)
+{
+	struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+
+	if (spi->chip_select) {
+		struct ath79_spi_controller_data *cdata = spi->controller_data;
+		gpio_free(cdata->gpio);
+	}
+
+	/* restore CTRL register */
+	ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
+	/* disable GPIO mode */
+	ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
+}
+
+static int ath79_spi_setup(struct spi_device *spi)
+{
+	int status = 0;
+
+	if (spi->bits_per_word > 32)
+		return -EINVAL;
+
+	if (!spi->controller_state) {
+		status = ath79_spi_setup_cs(spi);
+		if (status)
+			return status;
+	}
+
+	status = spi_bitbang_setup(spi);
+	if (status && !spi->controller_state)
+		ath79_spi_cleanup_cs(spi);
+
+	return status;
+}
+
+static void ath79_spi_cleanup(struct spi_device *spi)
+{
+	ath79_spi_cleanup_cs(spi);
+	spi_bitbang_cleanup(spi);
+}
+
+static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
+			       u32 word, u8 bits)
+{
+	struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+	u32 ioc = sp->ioc_base;
+
+	/* clock starts at inactive polarity */
+	for (word <<= (32 - bits); likely(bits); bits--) {
+		u32 out;
+
+		if (word & (1 << 31))
+			out = ioc | AR71XX_SPI_IOC_DO;
+		else
+			out = ioc & ~AR71XX_SPI_IOC_DO;
+
+		/* setup MSB (to slave) on trailing edge */
+		ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
+		ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
+
+		word <<= 1;
+	}
+
+	return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
+}
+
+static __devinit int ath79_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master *master;
+	struct ath79_spi *sp;
+	struct ath79_spi_platform_data *pdata;
+	struct resource	*r;
+	int ret;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(*sp));
+	if (master == NULL) {
+		dev_err(&pdev->dev, "failed to allocate spi master\n");
+		return -ENOMEM;
+	}
+
+	sp = spi_master_get_devdata(master);
+	platform_set_drvdata(pdev, sp);
+
+	pdata = pdev->dev.platform_data;
+
+	master->setup = ath79_spi_setup;
+	master->cleanup = ath79_spi_cleanup;
+	if (pdata) {
+		master->bus_num = pdata->bus_num;
+		master->num_chipselect = pdata->num_chipselect;
+	} else {
+		master->bus_num = -1;
+		master->num_chipselect = 1;
+	}
+
+	sp->bitbang.master = spi_master_get(master);
+	sp->bitbang.chipselect = ath79_spi_chipselect;
+	sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
+	sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
+	sp->bitbang.flags = SPI_CS_HIGH;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (r == NULL) {
+		ret = -ENOENT;
+		goto err_put_master;
+	}
+
+	sp->base = ioremap(r->start, r->end - r->start + 1);
+	if (!sp->base) {
+		ret = -ENXIO;
+		goto err_put_master;
+	}
+
+	ret = spi_bitbang_start(&sp->bitbang);
+	if (ret)
+		goto err_unmap;
+
+	return 0;
+
+err_unmap:
+	iounmap(sp->base);
+err_put_master:
+	platform_set_drvdata(pdev, NULL);
+	spi_master_put(sp->bitbang.master);
+
+	return ret;
+}
+
+static __devexit int ath79_spi_remove(struct platform_device *pdev)
+{
+	struct ath79_spi *sp = platform_get_drvdata(pdev);
+
+	spi_bitbang_stop(&sp->bitbang);
+	iounmap(sp->base);
+	platform_set_drvdata(pdev, NULL);
+	spi_master_put(sp->bitbang.master);
+
+	return 0;
+}
+
+static struct platform_driver ath79_spi_driver = {
+	.probe		= ath79_spi_probe,
+	.remove		= __devexit_p(ath79_spi_remove),
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static __init int ath79_spi_init(void)
+{
+	return platform_driver_register(&ath79_spi_driver);
+}
+module_init(ath79_spi_init);
+
+static __exit void ath79_spi_exit(void)
+{
+	platform_driver_unregister(&ath79_spi_driver);
+}
+module_exit(ath79_spi_exit);
+
+MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index a067046..1a478bf 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -341,9 +341,9 @@
 /*
  * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
  *  - The buffer is either valid for CPU access, else NULL
- *  - If the buffer is valid, so is its DMA addresss
+ *  - If the buffer is valid, so is its DMA address
  *
- * This driver manages the dma addresss unless message->is_dma_mapped.
+ * This driver manages the dma address unless message->is_dma_mapped.
  */
 static int
 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
index db35bd9..2fa012c 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/dw_spi_mmio.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -68,8 +69,8 @@
 	}
 
 	dwsmmio->clk = clk_get(&pdev->dev, NULL);
-	if (!dwsmmio->clk) {
-		ret = -ENODEV;
+	if (IS_ERR(dwsmmio->clk)) {
+		ret = PTR_ERR(dwsmmio->clk);
 		goto err_irq;
 	}
 	clk_enable(dwsmmio->clk);
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 351d8a375..19752b0 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -7,10 +7,9 @@
 #include <linux/of_device.h>
 #include <linux/spi/pxa2xx_spi.h>
 
-struct awesome_struct {
+struct ce4100_info {
 	struct ssp_device ssp;
-	struct platform_device spi_pdev;
-	struct pxa2xx_spi_master spi_pdata;
+	struct platform_device *spi_pdev;
 };
 
 static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@
 }
 EXPORT_SYMBOL_GPL(pxa_ssp_free);
 
-static void plat_dev_release(struct device *dev)
-{
-	struct awesome_struct *as = container_of(dev,
-			struct awesome_struct, spi_pdev.dev);
-
-	of_device_node_put(&as->spi_pdev.dev);
-}
-
 static int __devinit ce4100_spi_probe(struct pci_dev *dev,
 		const struct pci_device_id *ent)
 {
 	int ret;
 	resource_size_t phys_beg;
 	resource_size_t phys_len;
-	struct awesome_struct *spi_info;
+	struct ce4100_info *spi_info;
 	struct platform_device *pdev;
-	struct pxa2xx_spi_master *spi_pdata;
+	struct pxa2xx_spi_master spi_pdata;
 	struct ssp_device *ssp;
 
 	ret = pci_enable_device(dev);
@@ -84,33 +75,30 @@
 		return ret;
 	}
 
+	pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
 	spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
-	if (!spi_info) {
+	if (!pdev || !spi_info ) {
 		ret = -ENOMEM;
-		goto err_kz;
+		goto err_nomem;
 	}
-	ssp = &spi_info->ssp;
-	pdev = &spi_info->spi_pdev;
-	spi_pdata =  &spi_info->spi_pdata;
+	memset(&spi_pdata, 0, sizeof(spi_pdata));
+	spi_pdata.num_chipselect = dev->devfn;
 
-	pdev->name = "pxa2xx-spi";
-	pdev->id = dev->devfn;
+	ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
+	if (ret)
+		goto err_nomem;
+
 	pdev->dev.parent = &dev->dev;
-	pdev->dev.platform_data = &spi_info->spi_pdata;
-
 #ifdef CONFIG_OF
 	pdev->dev.of_node = dev->dev.of_node;
 #endif
-	pdev->dev.release = plat_dev_release;
-
-	spi_pdata->num_chipselect = dev->devfn;
-
+	ssp = &spi_info->ssp;
 	ssp->phys_base = pci_resource_start(dev, 0);
 	ssp->mmio_base = ioremap(phys_beg, phys_len);
 	if (!ssp->mmio_base) {
 		dev_err(&pdev->dev, "failed to ioremap() registers\n");
 		ret = -EIO;
-		goto err_remap;
+		goto err_nomem;
 	}
 	ssp->irq = dev->irq;
 	ssp->port_id = pdev->id;
@@ -122,7 +110,7 @@
 
 	pci_set_drvdata(dev, spi_info);
 
-	ret = platform_device_register(pdev);
+	ret = platform_device_add(pdev);
 	if (ret)
 		goto err_dev_add;
 
@@ -135,27 +123,21 @@
 	mutex_unlock(&ssp_lock);
 	iounmap(ssp->mmio_base);
 
-err_remap:
-	kfree(spi_info);
-
-err_kz:
+err_nomem:
 	release_mem_region(phys_beg, phys_len);
-
+	platform_device_put(pdev);
+	kfree(spi_info);
 	return ret;
 }
 
 static void __devexit ce4100_spi_remove(struct pci_dev *dev)
 {
-	struct awesome_struct *spi_info;
-	struct platform_device *pdev;
+	struct ce4100_info *spi_info;
 	struct ssp_device *ssp;
 
 	spi_info = pci_get_drvdata(dev);
-
 	ssp = &spi_info->ssp;
-	pdev = &spi_info->spi_pdev;
-
-	platform_device_unregister(pdev);
+	platform_device_unregister(spi_info->spi_pdev);
 
 	iounmap(ssp->mmio_base);
 	release_mem_region(pci_resource_start(dev, 0),
@@ -171,7 +153,6 @@
 }
 
 static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
-
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
 	{ },
 };
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 9469564..1cf9d5f 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -743,6 +743,12 @@
 		.name = "imx51-ecspi",
 		.driver_data = SPI_IMX_VER_2_3,
 	}, {
+		.name = "imx53-cspi",
+		.driver_data = SPI_IMX_VER_0_7,
+	}, {
+		.name = "imx53-ecspi",
+		.driver_data = SPI_IMX_VER_2_3,
+	}, {
 		/* sentinel */
 	}
 };
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index d93b667..2c665fcea 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -509,9 +509,11 @@
 	bytes_done = 0;
 
 	while (bytes_done < t->len) {
+		void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL;
+		const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL;
 		n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
-					   t->tx_buf + bytes_done,
-					   t->rx_buf + bytes_done,
+					   tx_buf,
+					   rx_buf,
 					   words, bits);
 		if (n < 0)
 			break;
@@ -635,7 +637,7 @@
 	ret = spi_bitbang_stop(&p->bitbang);
 	if (!ret) {
 		pm_runtime_disable(&pdev->dev);
-		free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq);
+		free_irq(platform_get_irq(pdev, 0), p);
 		iounmap(p->mapbase);
 		clk_put(p->clk);
 		spi_master_put(p->bitbang.master);
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c
index bb7df02..891e590 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi_tegra.c
@@ -513,7 +513,7 @@
 	}
 
 	tspi->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR_OR_NULL(tspi->clk)) {
+	if (IS_ERR(tspi->clk)) {
 		dev_err(&pdev->dev, "can not get clock\n");
 		ret = PTR_ERR(tspi->clk);
 		goto err2;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 4e6245e..6034282 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -38,7 +38,7 @@
 
 
 /*
- * This supports acccess to SPI devices using normal userspace I/O calls.
+ * This supports access to SPI devices using normal userspace I/O calls.
  * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
  * and often mask message boundaries, full SPI support requires full duplex
  * transfers.  There are several kinds of internal message boundaries to
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 2d8cc45..42cdaa9 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -82,7 +82,7 @@
 
 config SSB_SILENT
 	bool "No SSB kernel messages"
-	depends on SSB && EMBEDDED
+	depends on SSB && EXPERT
 	help
 	  This option turns off all Sonics Silicon Backplane printks.
 	  Note that you won't be able to identify problems, once
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index c7345db..f853379 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -733,7 +733,7 @@
 
 	/* Fetch the vendor specific tuples. */
 	res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
-				ssb_pcmcia_do_get_invariants, sprom);
+				ssb_pcmcia_do_get_invariants, iv);
 	if ((res == 0) || (res == -ENOSPC))
 		return 0;
 
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 5a0985d..29884c0 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -420,6 +420,16 @@
 			bus->pcicore.dev = dev;
 #endif /* CONFIG_SSB_DRIVER_PCICORE */
 			break;
+		case SSB_DEV_ETHERNET:
+			if (bus->bustype == SSB_BUSTYPE_PCI) {
+				if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM &&
+				    (bus->host_pci->device & 0xFF00) == 0x4300) {
+					/* This is a dangling ethernet core on a
+					 * wireless device. Ignore it. */
+					continue;
+				}
+			}
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index e2d5869..5c8fcfc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -123,6 +123,8 @@
 
 source "drivers/staging/iio/Kconfig"
 
+source "drivers/staging/cs5535_gpio/Kconfig"
+
 source "drivers/staging/zram/Kconfig"
 
 source "drivers/staging/wlags49_h2/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index c7d2224..d538863 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -44,6 +44,7 @@
 obj-$(CONFIG_MRST_RAR_HANDLER)	+= memrar/
 obj-$(CONFIG_DX_SEP)            += sep/
 obj-$(CONFIG_IIO)		+= iio/
+obj-$(CONFIG_CS5535_GPIO)	+= cs5535_gpio/
 obj-$(CONFIG_ZRAM)		+= zram/
 obj-$(CONFIG_WLAGS49_H2)	+= wlags49_h2/
 obj-$(CONFIG_WLAGS49_H25)	+= wlags49_h25/
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
index 0e298db..29b8ab4 100644
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
@@ -360,8 +360,8 @@
         	status = 1;
         	goto complete;
     	}
-        len = (firmware->size > MAX_BDADDR_FORMAT_LENGTH)? MAX_BDADDR_FORMAT_LENGTH: firmware->size;
-	memcpy(config_bdaddr, firmware->data,len);
+	len = min(firmware->size, MAX_BDADDR_FORMAT_LENGTH - 1);
+	memcpy(config_bdaddr, firmware->data, len);
 	config_bdaddr[len] = '\0';
 	write_bdaddr(hdev,config_bdaddr,BDADDR_TYPE_STRING);
        	A_RELEASE_FIRMWARE(firmware);
diff --git a/drivers/staging/autofs/dirhash.c b/drivers/staging/autofs/dirhash.c
index d3f42c8..a08bd73 100644
--- a/drivers/staging/autofs/dirhash.c
+++ b/drivers/staging/autofs/dirhash.c
@@ -88,14 +88,13 @@
 		}
 		path.mnt = mnt;
 		path_get(&path);
-		if (!follow_down(&path)) {
+		if (!follow_down_one(&path)) {
 			path_put(&path);
 			DPRINTK(("autofs: not expirable\
 			(not a mounted directory): %s\n", ent->name));
 			continue;
 		}
-		while (d_mountpoint(path.dentry) && follow_down(&path))
-			;
+		follow_down(&path, false);  // TODO: need to check error
 		umount_ok = may_umount(path.mnt);
 		path_put(&path);
 
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 8ce4536..feade94 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -359,12 +359,11 @@
 
 		if(PacketToDrop)
 		{
-			struct netdev_queue *txq = netdev_get_tx_queue(Adapter->dev, iIndex);
 			if (netif_msg_tx_err(Adapter))
 				pr_info(PFX "%s: tx queue %d overlimit\n", 
 					Adapter->dev->name, iIndex);
 
-			txq->tx_dropped++;
+			netstats->tx_dropped++;
 
 			DEQUEUEPACKET(Adapter->PackInfo[iIndex].FirstTxQueue,
 						Adapter->PackInfo[iIndex].LastTxQueue);
@@ -404,7 +403,7 @@
 //	down(&Adapter->data_packet_queue_lock);
 	for(iQIndex=LowPriority; iQIndex<HiPriority; iQIndex++)
 	{
-		struct netdev_queue *txq = netdev_get_tx_queue(Adapter->dev, iQIndex);
+		struct net_device_stats *netstats = &Adapter->dev->stats;
 
 		spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
 		while(Adapter->PackInfo[iQIndex].FirstTxQueue)
@@ -413,7 +412,7 @@
 			if(PacketToDrop)
 			{
 				uiTotalPacketLength = PacketToDrop->len;
-				txq->tx_dropped++;
+				netstats->tx_dropped++;
 			}
 			else
 				uiTotalPacketLength = 0;
diff --git a/drivers/staging/bcm/Transmit.c b/drivers/staging/bcm/Transmit.c
index 0f70009..d5e4a74 100644
--- a/drivers/staging/bcm/Transmit.c
+++ b/drivers/staging/bcm/Transmit.c
@@ -157,11 +157,11 @@
 	}
 	else
 	{
-		struct netdev_queue *txq = netdev_get_tx_queue(Adapter->dev, QueueIndex);
+		struct net_device_stats *netstats = &Adapter->dev->stats;
 		Adapter->PackInfo[QueueIndex].uiTotalTxBytes += Leader.PLength;
 
-		txq->tx_bytes += Leader.PLength;
-		++txq->tx_packets;
+		netstats->tx_bytes += Leader.PLength;
+		++netstats->tx_packets;
 
 		Adapter->PackInfo[QueueIndex].uiCurrentTokenCount -= Leader.PLength << 3;
 		Adapter->PackInfo[QueueIndex].uiSentBytes += (Packet->len);
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index bdd629d..cd8392b 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -209,11 +209,8 @@
 	struct wl_info *wl = hw->priv;
 	ASSERT(wl);
 	WL_LOCK(wl);
-	wl_down(wl);
 	ieee80211_stop_queues(hw);
 	WL_UNLOCK(wl);
-
-	return;
 }
 
 static int
@@ -246,7 +243,14 @@
 static void
 wl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
-	return;
+	struct wl_info *wl;
+
+	wl = HW_TO_WL(hw);
+
+	/* put driver in down state */
+	WL_LOCK(wl);
+	wl_down(wl);
+	WL_UNLOCK(wl);
 }
 
 static int
@@ -259,9 +263,7 @@
 	switch (type) {
 	case NL80211_CHAN_HT20:
 	case NL80211_CHAN_NO_HT:
-		WL_LOCK(wl);
 		err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value);
-		WL_UNLOCK(wl);
 		break;
 	case NL80211_CHAN_HT40MINUS:
 	case NL80211_CHAN_HT40PLUS:
@@ -281,6 +283,7 @@
 	int err = 0;
 	int new_int;
 
+	WL_LOCK(wl);
 	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
 		WL_NONE("%s: Setting listen interval to %d\n",
 			__func__, conf->listen_interval);
@@ -337,6 +340,7 @@
 	}
 
  config_out:
+	WL_UNLOCK(wl);
 	return err;
 }
 
@@ -455,13 +459,21 @@
 
 static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
 {
+	struct wl_info *wl = hw->priv;
 	WL_NONE("Scan Start\n");
+	WL_LOCK(wl);
+	wlc_scan_start(wl->wlc);
+	WL_UNLOCK(wl);
 	return;
 }
 
 static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
 {
+	struct wl_info *wl = hw->priv;
 	WL_NONE("Scan Complete\n");
+	WL_LOCK(wl);
+	wlc_scan_stop(wl->wlc);
+	WL_UNLOCK(wl);
 	return;
 }
 
@@ -779,7 +791,7 @@
 	wl_found++;
 	return wl;
 
- fail:
+fail:
 	wl_free(wl);
 fail1:
 	return NULL;
@@ -1090,7 +1102,6 @@
 	return 0;
 }
 
-#ifdef LINUXSTA_PS
 static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 	struct wl_info *wl;
@@ -1105,11 +1116,12 @@
 		return -ENODEV;
 	}
 
+	/* only need to flag hw is down for proper resume */
 	WL_LOCK(wl);
-	wl_down(wl);
 	wl->pub->hw_up = false;
 	WL_UNLOCK(wl);
-	pci_save_state(pdev, wl->pci_psstate);
+
+	pci_save_state(pdev);
 	pci_disable_device(pdev);
 	return pci_set_power_state(pdev, PCI_D3hot);
 }
@@ -1133,7 +1145,7 @@
 	if (err)
 		return err;
 
-	pci_restore_state(pdev, wl->pci_psstate);
+	pci_restore_state(pdev);
 
 	err = pci_enable_device(pdev);
 	if (err)
@@ -1145,13 +1157,12 @@
 	if ((val & 0x0000ff00) != 0)
 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-	WL_LOCK(wl);
-	err = wl_up(wl);
-	WL_UNLOCK(wl);
-
+	/*
+	*  done. driver will be put in up state
+	*  in wl_ops_add_interface() call.
+	*/
 	return err;
 }
-#endif				/* LINUXSTA_PS */
 
 static void wl_remove(struct pci_dev *pdev)
 {
@@ -1184,14 +1195,12 @@
 }
 
 static struct pci_driver wl_pci_driver = {
- .name  = "brcm80211",
- .probe = wl_pci_probe,
-#ifdef LINUXSTA_PS
- .suspend = wl_suspend,
- .resume  = wl_resume,
-#endif				/* LINUXSTA_PS */
- .remove   = __devexit_p(wl_remove),
- .id_table = wl_id_table,
+	.name = "brcm80211",
+	.probe = wl_pci_probe,
+	.suspend = wl_suspend,
+	.resume = wl_resume,
+	.remove = __devexit_p(wl_remove),
+	.id_table = wl_id_table,
 };
 
 /**
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
index 1d5d01a..e37e805 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
@@ -5126,7 +5126,6 @@
 	fifo = prio2fifo[prio];
 
 	ASSERT((uint) skb_headroom(sdu) >= TXOFF);
-	ASSERT(!(sdu->cloned));
 	ASSERT(!(sdu->next));
 	ASSERT(!(sdu->prev));
 	ASSERT(fifo < NFIFO);
@@ -8462,3 +8461,16 @@
 
 	kfree(qi);
 }
+
+/*
+ * Flag 'scan in progress' to withold dynamic phy calibration
+ */
+void wlc_scan_start(struct wlc_info *wlc)
+{
+	wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
+}
+
+void wlc_scan_stop(struct wlc_info *wlc)
+{
+	wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
+}
diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h
index 146a690..aff4130 100644
--- a/drivers/staging/brcm80211/sys/wlc_pub.h
+++ b/drivers/staging/brcm80211/sys/wlc_pub.h
@@ -570,6 +570,8 @@
 extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate);
 extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg);
 extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg);
+extern void wlc_scan_start(struct wlc_info *wlc);
+extern void wlc_scan_stop(struct wlc_info *wlc);
 
 static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name,
 				    uint *arg)
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index aad4732..1502d80 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -439,6 +439,7 @@
 config COMEDI_NI_ATMIO
 	tristate "NI AT-MIO E series ISA-PNP card support"
 	depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON
+	select COMEDI_8255
 	default N
 	---help---
 	  Enable support for National Instruments AT-MIO E series cards
@@ -1040,6 +1041,8 @@
 config COMEDI_NI_PCIMIO
 	tristate "NI PCI-MIO-E series and M series support"
 	depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+	select COMEDI_8255
+	select COMEDI_FC
 	default N
 	---help---
 	  Enable support for National Instruments PCI-MIO-E series and M series
@@ -1164,6 +1167,7 @@
 config COMEDI_NI_MIO_CS
 	tristate "NI DAQCard E series PCMCIA support"
 	depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+	select COMEDI_8255
 	select COMEDI_FC
 	default N
 	---help---
@@ -1268,7 +1272,6 @@
 config COMEDI_NI_TIO
 	tristate "NI general purpose counter support"
 	depends on COMEDI_MITE
-	select COMEDI_8255
 	default N
 	---help---
 	  Enable support for National Instruments general purpose counters.
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index cd25b24..fd274e9 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -61,8 +61,6 @@
 #define PCI_DAQ_SIZE		4096
 #define PCI_DAQ_SIZE_660X       8192
 
-MODULE_LICENSE("GPL");
-
 struct mite_struct *mite_devices;
 EXPORT_SYMBOL(mite_devices);
 
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 14e716e..54741c9 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -527,3 +527,7 @@
 
 module_init(driver_ni6527_init_module);
 module_exit(driver_ni6527_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 8b8e2aa..403fc09 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -871,3 +871,7 @@
 
 module_init(driver_ni_65xx_init_module);
 module_exit(driver_ni_65xx_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 6612b08..ca2aeaa 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1421,3 +1421,7 @@
 	};
 	return 0;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index e9f034e..d8d91f9 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -384,3 +384,7 @@
 	mite_list_devices();
 	return -EIO;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 4d1868d..0728c3c 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -575,7 +575,8 @@
 	/* grab our IRQ */
 	if (irq) {
 		isr_flags = 0;
-		if (thisboard->bustype == pci_bustype)
+		if (thisboard->bustype == pci_bustype
+		    || thisboard->bustype == pcmcia_bustype)
 			isr_flags |= IRQF_SHARED;
 		if (request_irq(irq, labpc_interrupt, isr_flags,
 				driver_labpc.driver_name, dev)) {
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 84a15c3..005d2fe 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -1354,3 +1354,7 @@
 
 module_init(driver_pcidio_init_module);
 module_exit(driver_pcidio_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 23a3812..9148abd 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1853,3 +1853,7 @@
 
 	return 0;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/cs5535_gpio/Kconfig b/drivers/staging/cs5535_gpio/Kconfig
new file mode 100644
index 0000000..a1b3a8d
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/Kconfig
@@ -0,0 +1,11 @@
+config CS5535_GPIO
+	tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
+	depends on X86_32
+	help
+	  Note: this driver is DEPRECATED.  Please use the cs5535-gpio module
+	  in the GPIO section instead (CONFIG_GPIO_CS5535).
+
+	  Give userspace access to the GPIO pins on the AMD CS5535 and
+	  CS5536 Geode companion devices.
+
+	  If compiled as a module, it will be called cs5535_gpio.
diff --git a/drivers/staging/cs5535_gpio/Makefile b/drivers/staging/cs5535_gpio/Makefile
new file mode 100644
index 0000000..d67c4b8
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CS5535_GPIO)	+= cs5535_gpio.o
diff --git a/drivers/staging/cs5535_gpio/TODO b/drivers/staging/cs5535_gpio/TODO
new file mode 100644
index 0000000..98d1cd1
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/TODO
@@ -0,0 +1,6 @@
+This is an obsolete driver for some the CS5535 and CS5536 southbridge GPIOs.
+It has been replaced by a driver that makes use of the Linux GPIO subsystem.
+Please switch to that driver, and let dilinger@queued.net know if there's
+anything missing from the new driver.
+
+This driver is scheduled for removal in 2.6.40.
diff --git a/drivers/char/cs5535_gpio.c b/drivers/staging/cs5535_gpio/cs5535_gpio.c
similarity index 100%
rename from drivers/char/cs5535_gpio.c
rename to drivers/staging/cs5535_gpio/cs5535_gpio.c
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index b3d05fc..4fb8094 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -368,6 +368,7 @@
 		blkdev->gd->first_minor = 0;
 	blkdev->gd->fops = &block_ops;
 	blkdev->gd->private_data = blkdev;
+	blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
 	sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
 
 	blkvsc_do_inquiry(blkdev);
diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
index df9cd13..0edbe74 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/staging/hv/netvsc.c
@@ -1279,7 +1279,7 @@
 	/* ASSERT(device); */
 
 	packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
-			 GFP_KERNEL);
+			 GFP_ATOMIC);
 	if (!packet)
 		return;
 	buffer = packet;
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 0147b40..b41c964 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -236,6 +236,7 @@
 	if (status == 1) {
 		netif_carrier_on(net);
 		netif_wake_queue(net);
+		netif_notify_peers(net);
 	} else {
 		netif_carrier_off(net);
 		netif_stop_queue(net);
@@ -358,7 +359,6 @@
 
 	/* Set initial state */
 	netif_carrier_off(net);
-	netif_stop_queue(net);
 
 	net_device_ctx = netdev_priv(net);
 	net_device_ctx->device_ctx = device_ctx;
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index deb68c8..b8b54da 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -68,7 +68,7 @@
 	/* Corresponds to Vref / 2^(bits) */
 	unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
 
-	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+	return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7476_show_scale, NULL, 0);
 
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
index 6859089..5d85efa 100644
--- a/drivers/staging/iio/adc/ad7887_core.c
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -68,7 +68,7 @@
 	/* Corresponds to Vref / 2^(bits) */
 	unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
 
-	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+	return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7887_show_scale, NULL, 0);
 
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 6309d52..89ccf37 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -432,7 +432,7 @@
 	/* Corresponds to Vref / 2^(bits) */
 	unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
 
-	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+	return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 
 static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad799x_show_scale, NULL, 0);
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index e3387cd..0f87eca 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -87,7 +87,7 @@
 	/* Corresponds to Vref / 2^(bits) */
 	unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
 
-	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+	return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
 
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
index e38e89d..e2f6d6a 100644
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ b/drivers/staging/intel_sst/intelmid_v2_control.c
@@ -874,7 +874,10 @@
 		sc_access[3].reg_addr = 0x109;
 		sc_access[3].mask = MASK6;
 		sc_access[3].value = 0x00;
-		num_val = 4;
+		sc_access[4].reg_addr = 0x104;
+		sc_access[4].value = 0x3C;
+		sc_access[4].mask = 0xff;
+		num_val = 5;
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/staging/lirc/TODO.lirc_zilog b/drivers/staging/lirc/TODO.lirc_zilog
index 6aa312d..2d0263f 100644
--- a/drivers/staging/lirc/TODO.lirc_zilog
+++ b/drivers/staging/lirc/TODO.lirc_zilog
@@ -1,13 +1,37 @@
-The binding between hdpvr and lirc_zilog is currently disabled,
+1. Both ir-kbd-i2c and lirc_zilog provide support for RX events.
+The 'tx_only' lirc_zilog module parameter will allow ir-kbd-i2c
+and lirc_zilog to coexist in the kernel, if the user requires such a set-up.
+However the IR unit will not work well without coordination between the
+two modules.  A shared mutex, for transceiver access locking, needs to be
+supplied by bridge drivers, in struct IR_i2_init_data, to both ir-kbd-i2c
+and lirc_zilog, before they will coexist usefully.  This should be fixed
+before moving out of staging.
+
+2. References and locking need careful examination.  For cx18 and ivtv PCI
+cards, which are not easily "hot unplugged", the imperfect state of reference
+counting and locking is acceptable if not correct.  For USB connected units
+like HD PVR, PVR USB2, HVR-1900, and HVR1950, the likelyhood of an Ooops on
+unplug is probably great.  Proper reference counting and locking needs to be
+implemented before this module is moved out of staging.
+
+3. The binding between hdpvr and lirc_zilog is currently disabled,
 due to an OOPS reported a few years ago when both the hdpvr and cx18
 drivers were loaded in his system. More details can be seen at:
 	http://www.mail-archive.com/linux-media@vger.kernel.org/msg09163.html
 More tests need to be done, in order to fix the reported issue.
 
-There's a conflict between ir-kbd-i2c: Both provide support for RX events.
-Such conflict needs to be fixed, before moving it out of staging.
+4. In addition to providing a shared mutex for transceiver access
+locking, bridge drivers, if able, should provide a chip reset() callback
+to lirc_zilog via struct IR_i2c_init_data.  cx18 and ivtv already have routines
+to perform Z8 chip resets via GPIO manipulations.  This will allow lirc_zilog
+to bring the chip back to normal when it hangs, in the same places the
+original lirc_pvr150 driver code does.  This is not strictly needed, so it
+is not required to move lirc_zilog out of staging.
 
-The way I2C probe works, it will try to register the driver twice, one
-for RX and another for TX. The logic needs to be fixed to avoid such
-issue.
+5. Both lirc_zilog and ir-kbd-i2c support the Zilog Z8 for IR, as programmed
+and installed on Hauppauge products.  When working on either module, developers
+must consider at least the following bridge drivers which mention an IR Rx unit
+at address 0x71 (indicative of a Z8):
+
+	ivtv cx18 hdpvr pvrusb2 bt8xx cx88 saa7134
 
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 0da6b95..235cab0 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -447,6 +447,7 @@
 
 exit:
 	mutex_unlock(&context->ctx_lock);
+	kfree(data_buf);
 
 	return (!retval) ? n_bytes : retval;
 }
diff --git a/drivers/staging/lirc/lirc_it87.c b/drivers/staging/lirc/lirc_it87.c
index 929ae57..5938616 100644
--- a/drivers/staging/lirc/lirc_it87.c
+++ b/drivers/staging/lirc/lirc_it87.c
@@ -232,6 +232,7 @@
 		i++;
 	}
 	terminate_send(tx_buf[i - 1]);
+	kfree(tx_buf);
 	return n;
 }
 
diff --git a/drivers/staging/lirc/lirc_parallel.c b/drivers/staging/lirc/lirc_parallel.c
index dfd2c44..3a9c098 100644
--- a/drivers/staging/lirc/lirc_parallel.c
+++ b/drivers/staging/lirc/lirc_parallel.c
@@ -376,6 +376,7 @@
 	unsigned long flags;
 	int counttimer;
 	int *wbuf;
+	ssize_t ret;
 
 	if (!is_claimed)
 		return -EBUSY;
@@ -393,8 +394,10 @@
 	if (timer == 0) {
 		/* try again if device is ready */
 		timer = init_lirc_timer();
-		if (timer == 0)
-			return -EIO;
+		if (timer == 0) {
+			ret = -EIO;
+			goto out;
+		}
 	}
 
 	/* adjust values from usecs */
@@ -420,7 +423,8 @@
 			if (check_pselecd && (in(1) & LP_PSELECD)) {
 				lirc_off();
 				local_irq_restore(flags);
-				return -EIO;
+				ret = -EIO;
+				goto out;
 			}
 		} while (counttimer < wbuf[i]);
 		i++;
@@ -436,7 +440,8 @@
 			level = newlevel;
 			if (check_pselecd && (in(1) & LP_PSELECD)) {
 				local_irq_restore(flags);
-				return -EIO;
+				ret = -EIO;
+				goto out;
 			}
 		} while (counttimer < wbuf[i]);
 		i++;
@@ -445,7 +450,11 @@
 #else
 	/* place code that handles write without external timer here */
 #endif
-	return n;
+	ret = n;
+out:
+	kfree(wbuf);
+
+	return ret;
 }
 
 static unsigned int lirc_poll(struct file *file, poll_table *wait)
diff --git a/drivers/staging/lirc/lirc_sasem.c b/drivers/staging/lirc/lirc_sasem.c
index 998485e..925eabe 100644
--- a/drivers/staging/lirc/lirc_sasem.c
+++ b/drivers/staging/lirc/lirc_sasem.c
@@ -448,6 +448,7 @@
 exit:
 
 	mutex_unlock(&context->ctx_lock);
+	kfree(data_buf);
 
 	return (!retval) ? n_bytes : retval;
 }
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 9bcf149..1c3099b 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -966,7 +966,7 @@
 	if (n % sizeof(int) || count % 2 == 0)
 		return -EINVAL;
 	wbuf = memdup_user(buf, n);
-	if (PTR_ERR(wbuf))
+	if (IS_ERR(wbuf))
 		return PTR_ERR(wbuf);
 	spin_lock_irqsave(&hardware[type].lock, flags);
 	if (type == LIRC_IRDEO) {
@@ -981,6 +981,7 @@
 	}
 	off();
 	spin_unlock_irqrestore(&hardware[type].lock, flags);
+	kfree(wbuf);
 	return n;
 }
 
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index c553ab6..76be7b8 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -330,6 +330,7 @@
 	/* enable receiver */
 	Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE;
 #endif
+	kfree(tx_buf);
 	return count;
 }
 
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index ad29bb1..0aad0d7 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -20,6 +20,9 @@
  *
  * parts are cut&pasted from the lirc_i2c.c driver
  *
+ * Numerous changes updating lirc_zilog.c in kernel 2.6.38 and later are
+ * Copyright (C) 2011 Andy Walls <awalls@md.metrocast.net>
+ *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
  *  the Free Software Foundation; either version 2 of the License, or
@@ -60,38 +63,44 @@
 #include <media/lirc_dev.h>
 #include <media/lirc.h>
 
-struct IR {
-	struct lirc_driver l;
-
-	/* Device info */
-	struct mutex ir_lock;
-	int open;
-	bool is_hdpvr;
-
+struct IR_rx {
 	/* RX device */
-	struct i2c_client c_rx;
-	int have_rx;
+	struct i2c_client *c;
 
 	/* RX device buffer & lock */
 	struct lirc_buffer buf;
 	struct mutex buf_lock;
 
 	/* RX polling thread data */
-	struct completion *t_notify;
-	struct completion *t_notify2;
-	int shutdown;
 	struct task_struct *task;
 
 	/* RX read data */
 	unsigned char b[3];
+	bool hdpvr_data_fmt;
+};
 
+struct IR_tx {
 	/* TX device */
-	struct i2c_client c_tx;
+	struct i2c_client *c;
+
+	/* TX additional actions needed */
 	int need_boot;
-	int have_tx;
+	bool post_tx_ready_poll;
+};
+
+struct IR {
+	struct lirc_driver l;
+
+	struct mutex ir_lock;
+	int open;
+
+	struct i2c_adapter *adapter;
+	struct IR_rx *rx;
+	struct IR_tx *tx;
 };
 
 /* Minor -> data mapping */
+static struct mutex ir_devices_lock;
 static struct IR *ir_devices[MAX_IRCTL_DEVICES];
 
 /* Block size for IR transmitter */
@@ -124,14 +133,11 @@
 #define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
 					## args)
 #define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-
-#define ZILOG_HAUPPAUGE_IR_RX_NAME "Zilog/Hauppauge IR RX"
-#define ZILOG_HAUPPAUGE_IR_TX_NAME "Zilog/Hauppauge IR TX"
+#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
 
 /* module parameters */
 static int debug;	/* debug output */
-static int disable_rx;	/* disable RX device */
-static int disable_tx;	/* disable TX device */
+static int tx_only;	/* only handle the IR Tx function */
 static int minor = -1;	/* minor number */
 
 #define dprintk(fmt, args...)						\
@@ -150,8 +156,12 @@
 	int ret;
 	int failures = 0;
 	unsigned char sendbuf[1] = { 0 };
+	struct IR_rx *rx = ir->rx;
 
-	if (lirc_buffer_full(&ir->buf)) {
+	if (rx == NULL)
+		return -ENXIO;
+
+	if (lirc_buffer_full(&rx->buf)) {
 		dprintk("buffer overflow\n");
 		return -EOVERFLOW;
 	}
@@ -161,17 +171,25 @@
 	 * data and we have space
 	 */
 	do {
+		if (kthread_should_stop())
+			return -ENODATA;
+
 		/*
 		 * Lock i2c bus for the duration.  RX/TX chips interfere so
 		 * this is worth it
 		 */
 		mutex_lock(&ir->ir_lock);
 
+		if (kthread_should_stop()) {
+			mutex_unlock(&ir->ir_lock);
+			return -ENODATA;
+		}
+
 		/*
 		 * Send random "poll command" (?)  Windows driver does this
 		 * and it is a good point to detect chip failure.
 		 */
-		ret = i2c_master_send(&ir->c_rx, sendbuf, 1);
+		ret = i2c_master_send(rx->c, sendbuf, 1);
 		if (ret != 1) {
 			zilog_error("i2c_master_send failed with %d\n",	ret);
 			if (failures >= 3) {
@@ -186,45 +204,53 @@
 				    "trying reset\n");
 
 			set_current_state(TASK_UNINTERRUPTIBLE);
+			if (kthread_should_stop()) {
+				mutex_unlock(&ir->ir_lock);
+				return -ENODATA;
+			}
 			schedule_timeout((100 * HZ + 999) / 1000);
-			ir->need_boot = 1;
+			ir->tx->need_boot = 1;
 
 			++failures;
 			mutex_unlock(&ir->ir_lock);
 			continue;
 		}
 
-		ret = i2c_master_recv(&ir->c_rx, keybuf, sizeof(keybuf));
+		if (kthread_should_stop()) {
+			mutex_unlock(&ir->ir_lock);
+			return -ENODATA;
+		}
+		ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
 		mutex_unlock(&ir->ir_lock);
 		if (ret != sizeof(keybuf)) {
 			zilog_error("i2c_master_recv failed with %d -- "
 				    "keeping last read buffer\n", ret);
 		} else {
-			ir->b[0] = keybuf[3];
-			ir->b[1] = keybuf[4];
-			ir->b[2] = keybuf[5];
-			dprintk("key (0x%02x/0x%02x)\n", ir->b[0], ir->b[1]);
+			rx->b[0] = keybuf[3];
+			rx->b[1] = keybuf[4];
+			rx->b[2] = keybuf[5];
+			dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
 		}
 
 		/* key pressed ? */
-		if (ir->is_hdpvr) {
+		if (rx->hdpvr_data_fmt) {
 			if (got_data && (keybuf[0] == 0x80))
 				return 0;
 			else if (got_data && (keybuf[0] == 0x00))
 				return -ENODATA;
-		} else if ((ir->b[0] & 0x80) == 0)
+		} else if ((rx->b[0] & 0x80) == 0)
 			return got_data ? 0 : -ENODATA;
 
 		/* look what we have */
-		code = (((__u16)ir->b[0] & 0x7f) << 6) | (ir->b[1] >> 2);
+		code = (((__u16)rx->b[0] & 0x7f) << 6) | (rx->b[1] >> 2);
 
 		codes[0] = (code >> 8) & 0xff;
 		codes[1] = code & 0xff;
 
 		/* return it */
-		lirc_buffer_write(&ir->buf, codes);
+		lirc_buffer_write(&rx->buf, codes);
 		++got_data;
-	} while (!lirc_buffer_full(&ir->buf));
+	} while (!lirc_buffer_full(&rx->buf));
 
 	return 0;
 }
@@ -242,46 +268,35 @@
 static int lirc_thread(void *arg)
 {
 	struct IR *ir = arg;
-
-	if (ir->t_notify != NULL)
-		complete(ir->t_notify);
+	struct IR_rx *rx = ir->rx;
 
 	dprintk("poll thread started\n");
 
-	do {
-		if (ir->open) {
-			set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
 
-			/*
-			 * This is ~113*2 + 24 + jitter (2*repeat gap +
-			 * code length).  We use this interval as the chip
-			 * resets every time you poll it (bad!).  This is
-			 * therefore just sufficient to catch all of the
-			 * button presses.  It makes the remote much more
-			 * responsive.  You can see the difference by
-			 * running irw and holding down a button.  With
-			 * 100ms, the old polling interval, you'll notice
-			 * breaks in the repeat sequence corresponding to
-			 * lost keypresses.
-			 */
-			schedule_timeout((260 * HZ) / 1000);
-			if (ir->shutdown)
-				break;
-			if (!add_to_buf(ir))
-				wake_up_interruptible(&ir->buf.wait_poll);
-		} else {
-			/* if device not opened so we can sleep half a second */
-			set_current_state(TASK_INTERRUPTIBLE);
+		/* if device not opened, we can sleep half a second */
+		if (!ir->open) {
 			schedule_timeout(HZ/2);
+			continue;
 		}
-	} while (!ir->shutdown);
 
-	if (ir->t_notify2 != NULL)
-		wait_for_completion(ir->t_notify2);
-
-	ir->task = NULL;
-	if (ir->t_notify != NULL)
-		complete(ir->t_notify);
+		/*
+		 * This is ~113*2 + 24 + jitter (2*repeat gap + code length).
+		 * We use this interval as the chip resets every time you poll
+		 * it (bad!).  This is therefore just sufficient to catch all
+		 * of the button presses.  It makes the remote much more
+		 * responsive.  You can see the difference by running irw and
+		 * holding down a button.  With 100ms, the old polling
+		 * interval, you'll notice breaks in the repeat sequence
+		 * corresponding to lost keypresses.
+		 */
+		schedule_timeout((260 * HZ) / 1000);
+		if (kthread_should_stop())
+			break;
+		if (!add_to_buf(ir))
+			wake_up_interruptible(&rx->buf.wait_poll);
+	}
 
 	dprintk("poll thread ended\n");
 	return 0;
@@ -299,10 +314,10 @@
 	 * this is completely broken code. lirc_unregister_driver()
 	 * must be possible even when the device is open
 	 */
-	if (ir->c_rx.addr)
-		i2c_use_client(&ir->c_rx);
-	if (ir->c_tx.addr)
-		i2c_use_client(&ir->c_tx);
+	if (ir->rx != NULL)
+		i2c_use_client(ir->rx->c);
+	if (ir->tx != NULL)
+		i2c_use_client(ir->tx->c);
 
 	return 0;
 }
@@ -311,10 +326,10 @@
 {
 	struct IR *ir = data;
 
-	if (ir->c_rx.addr)
-		i2c_release_client(&ir->c_rx);
-	if (ir->c_tx.addr)
-		i2c_release_client(&ir->c_tx);
+	if (ir->rx)
+		i2c_release_client(ir->rx->c);
+	if (ir->tx)
+		i2c_release_client(ir->tx->c);
 	if (ir->l.owner != NULL)
 		module_put(ir->l.owner);
 }
@@ -453,7 +468,7 @@
 }
 
 /* send a block of data to the IR TX device */
-static int send_data_block(struct IR *ir, unsigned char *data_block)
+static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
 {
 	int i, j, ret;
 	unsigned char buf[5];
@@ -467,7 +482,7 @@
 			buf[1 + j] = data_block[i + j];
 		dprintk("%02x %02x %02x %02x %02x",
 			buf[0], buf[1], buf[2], buf[3], buf[4]);
-		ret = i2c_master_send(&ir->c_tx, buf, tosend + 1);
+		ret = i2c_master_send(tx->c, buf, tosend + 1);
 		if (ret != tosend + 1) {
 			zilog_error("i2c_master_send failed with %d\n", ret);
 			return ret < 0 ? ret : -EFAULT;
@@ -478,38 +493,50 @@
 }
 
 /* send boot data to the IR TX device */
-static int send_boot_data(struct IR *ir)
+static int send_boot_data(struct IR_tx *tx)
 {
-	int ret;
+	int ret, i;
 	unsigned char buf[4];
 
 	/* send the boot block */
-	ret = send_data_block(ir, tx_data->boot_data);
+	ret = send_data_block(tx, tx_data->boot_data);
 	if (ret != 0)
 		return ret;
 
-	/* kick it off? */
+	/* Hit the go button to activate the new boot data */
 	buf[0] = 0x00;
 	buf[1] = 0x20;
-	ret = i2c_master_send(&ir->c_tx, buf, 2);
+	ret = i2c_master_send(tx->c, buf, 2);
 	if (ret != 2) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
 	}
-	ret = i2c_master_send(&ir->c_tx, buf, 1);
+
+	/*
+	 * Wait for zilog to settle after hitting go post boot block upload.
+	 * Without this delay, the HD-PVR and HVR-1950 both return an -EIO
+	 * upon attempting to get firmware revision, and tx probe thus fails.
+	 */
+	for (i = 0; i < 10; i++) {
+		ret = i2c_master_send(tx->c, buf, 1);
+		if (ret == 1)
+			break;
+		udelay(100);
+	}
+
 	if (ret != 1) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
 	}
 
 	/* Here comes the firmware version... (hopefully) */
-	ret = i2c_master_recv(&ir->c_tx, buf, 4);
+	ret = i2c_master_recv(tx->c, buf, 4);
 	if (ret != 4) {
 		zilog_error("i2c_master_recv failed with %d\n", ret);
 		return 0;
 	}
-	if (buf[0] != 0x80) {
-		zilog_error("unexpected IR TX response: %02x\n", buf[0]);
+	if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
+		zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
 		return 0;
 	}
 	zilog_notify("Zilog/Hauppauge IR blaster firmware version "
@@ -543,7 +570,7 @@
 }
 
 /* load "firmware" for the IR TX device */
-static int fw_load(struct IR *ir)
+static int fw_load(struct IR_tx *tx)
 {
 	int ret;
 	unsigned int i;
@@ -558,7 +585,7 @@
 	}
 
 	/* Request codeset data file */
-	ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &ir->c_tx.dev);
+	ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &tx->c->dev);
 	if (ret != 0) {
 		zilog_error("firmware haup-ir-blaster.bin not available "
 			    "(%d)\n", ret);
@@ -685,20 +712,20 @@
 }
 
 /* initialise the IR TX device */
-static int tx_init(struct IR *ir)
+static int tx_init(struct IR_tx *tx)
 {
 	int ret;
 
 	/* Load 'firmware' */
-	ret = fw_load(ir);
+	ret = fw_load(tx);
 	if (ret != 0)
 		return ret;
 
 	/* Send boot block */
-	ret = send_boot_data(ir);
+	ret = send_boot_data(tx);
 	if (ret != 0)
 		return ret;
-	ir->need_boot = 0;
+	tx->need_boot = 0;
 
 	/* Looks good */
 	return 0;
@@ -714,20 +741,20 @@
 static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
 {
 	struct IR *ir = filep->private_data;
-	unsigned char buf[ir->buf.chunk_size];
+	struct IR_rx *rx = ir->rx;
 	int ret = 0, written = 0;
 	DECLARE_WAITQUEUE(wait, current);
 
 	dprintk("read called\n");
-	if (ir->c_rx.addr == 0)
+	if (rx == NULL)
 		return -ENODEV;
 
-	if (mutex_lock_interruptible(&ir->buf_lock))
+	if (mutex_lock_interruptible(&rx->buf_lock))
 		return -ERESTARTSYS;
 
-	if (n % ir->buf.chunk_size) {
+	if (n % rx->buf.chunk_size) {
 		dprintk("read result = -EINVAL\n");
-		mutex_unlock(&ir->buf_lock);
+		mutex_unlock(&rx->buf_lock);
 		return -EINVAL;
 	}
 
@@ -736,7 +763,7 @@
 	 * to avoid losing scan code (in case when queue is awaken somewhere
 	 * between while condition checking and scheduling)
 	 */
-	add_wait_queue(&ir->buf.wait_poll, &wait);
+	add_wait_queue(&rx->buf.wait_poll, &wait);
 	set_current_state(TASK_INTERRUPTIBLE);
 
 	/*
@@ -744,7 +771,7 @@
 	 * mode and 'copy_to_user' is happy, wait for data.
 	 */
 	while (written < n && ret == 0) {
-		if (lirc_buffer_empty(&ir->buf)) {
+		if (lirc_buffer_empty(&rx->buf)) {
 			/*
 			 * According to the read(2) man page, 'written' can be
 			 * returned as less than 'n', instead of blocking
@@ -764,16 +791,17 @@
 			schedule();
 			set_current_state(TASK_INTERRUPTIBLE);
 		} else {
-			lirc_buffer_read(&ir->buf, buf);
+			unsigned char buf[rx->buf.chunk_size];
+			lirc_buffer_read(&rx->buf, buf);
 			ret = copy_to_user((void *)outbuf+written, buf,
-					   ir->buf.chunk_size);
-			written += ir->buf.chunk_size;
+					   rx->buf.chunk_size);
+			written += rx->buf.chunk_size;
 		}
 	}
 
-	remove_wait_queue(&ir->buf.wait_poll, &wait);
+	remove_wait_queue(&rx->buf.wait_poll, &wait);
 	set_current_state(TASK_RUNNING);
-	mutex_unlock(&ir->buf_lock);
+	mutex_unlock(&rx->buf_lock);
 
 	dprintk("read result = %s (%d)\n",
 		ret ? "-EFAULT" : "OK", ret);
@@ -782,7 +810,7 @@
 }
 
 /* send a keypress to the IR TX device */
-static int send_code(struct IR *ir, unsigned int code, unsigned int key)
+static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
 {
 	unsigned char data_block[TX_BLOCK_SIZE];
 	unsigned char buf[2];
@@ -799,26 +827,34 @@
 		return ret;
 
 	/* Send the data block */
-	ret = send_data_block(ir, data_block);
+	ret = send_data_block(tx, data_block);
 	if (ret != 0)
 		return ret;
 
 	/* Send data block length? */
 	buf[0] = 0x00;
 	buf[1] = 0x40;
-	ret = i2c_master_send(&ir->c_tx, buf, 2);
+	ret = i2c_master_send(tx->c, buf, 2);
 	if (ret != 2) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
 	}
-	ret = i2c_master_send(&ir->c_tx, buf, 1);
+
+	/* Give the z8 a moment to process data block */
+	for (i = 0; i < 10; i++) {
+		ret = i2c_master_send(tx->c, buf, 1);
+		if (ret == 1)
+			break;
+		udelay(100);
+	}
+
 	if (ret != 1) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
 	}
 
 	/* Send finished download? */
-	ret = i2c_master_recv(&ir->c_tx, buf, 1);
+	ret = i2c_master_recv(tx->c, buf, 1);
 	if (ret != 1) {
 		zilog_error("i2c_master_recv failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
@@ -832,7 +868,7 @@
 	/* Send prepare command? */
 	buf[0] = 0x00;
 	buf[1] = 0x80;
-	ret = i2c_master_send(&ir->c_tx, buf, 2);
+	ret = i2c_master_send(tx->c, buf, 2);
 	if (ret != 2) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
@@ -843,7 +879,7 @@
 	 * last i2c_master_recv always fails with a -5, so for now, we're
 	 * going to skip this whole mess and say we're done on the HD PVR
 	 */
-	if (ir->is_hdpvr) {
+	if (!tx->post_tx_ready_poll) {
 		dprintk("sent code %u, key %u\n", code, key);
 		return 0;
 	}
@@ -857,7 +893,7 @@
 	for (i = 0; i < 20; ++i) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout((50 * HZ + 999) / 1000);
-		ret = i2c_master_send(&ir->c_tx, buf, 1);
+		ret = i2c_master_send(tx->c, buf, 1);
 		if (ret == 1)
 			break;
 		dprintk("NAK expected: i2c_master_send "
@@ -870,7 +906,7 @@
 	}
 
 	/* Seems to be an 'ok' response */
-	i = i2c_master_recv(&ir->c_tx, buf, 1);
+	i = i2c_master_recv(tx->c, buf, 1);
 	if (i != 1) {
 		zilog_error("i2c_master_recv failed with %d\n", ret);
 		return -EFAULT;
@@ -895,10 +931,11 @@
 			  loff_t *ppos)
 {
 	struct IR *ir = filep->private_data;
+	struct IR_tx *tx = ir->tx;
 	size_t i;
 	int failures = 0;
 
-	if (ir->c_tx.addr == 0)
+	if (tx == NULL)
 		return -ENODEV;
 
 	/* Validate user parameters */
@@ -919,15 +956,15 @@
 		}
 
 		/* Send boot data first if required */
-		if (ir->need_boot == 1) {
-			ret = send_boot_data(ir);
+		if (tx->need_boot == 1) {
+			ret = send_boot_data(tx);
 			if (ret == 0)
-				ir->need_boot = 0;
+				tx->need_boot = 0;
 		}
 
 		/* Send the code */
 		if (ret == 0) {
-			ret = send_code(ir, (unsigned)command >> 16,
+			ret = send_code(tx, (unsigned)command >> 16,
 					    (unsigned)command & 0xFFFF);
 			if (ret == -EPROTO) {
 				mutex_unlock(&ir->ir_lock);
@@ -952,7 +989,7 @@
 			}
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			schedule_timeout((100 * HZ + 999) / 1000);
-			ir->need_boot = 1;
+			tx->need_boot = 1;
 			++failures;
 		} else
 			i += sizeof(int);
@@ -969,22 +1006,23 @@
 static unsigned int poll(struct file *filep, poll_table *wait)
 {
 	struct IR *ir = filep->private_data;
+	struct IR_rx *rx = ir->rx;
 	unsigned int ret;
 
 	dprintk("poll called\n");
-	if (ir->c_rx.addr == 0)
+	if (rx == NULL)
 		return -ENODEV;
 
-	mutex_lock(&ir->buf_lock);
+	mutex_lock(&rx->buf_lock);
 
-	poll_wait(filep, &ir->buf.wait_poll, wait);
+	poll_wait(filep, &rx->buf.wait_poll, wait);
 
 	dprintk("poll result = %s\n",
-		lirc_buffer_empty(&ir->buf) ? "0" : "POLLIN|POLLRDNORM");
+		lirc_buffer_empty(&rx->buf) ? "0" : "POLLIN|POLLRDNORM");
 
-	ret = lirc_buffer_empty(&ir->buf) ? 0 : (POLLIN|POLLRDNORM);
+	ret = lirc_buffer_empty(&rx->buf) ? 0 : (POLLIN|POLLRDNORM);
 
-	mutex_unlock(&ir->buf_lock);
+	mutex_unlock(&rx->buf_lock);
 	return ret;
 }
 
@@ -994,10 +1032,9 @@
 	int result;
 	unsigned long mode, features = 0;
 
-	if (ir->c_rx.addr != 0)
+	features |= LIRC_CAN_SEND_PULSE;
+	if (ir->rx != NULL)
 		features |= LIRC_CAN_REC_LIRCCODE;
-	if (ir->c_tx.addr != 0)
-		features |= LIRC_CAN_SEND_PULSE;
 
 	switch (cmd) {
 	case LIRC_GET_LENGTH:
@@ -1024,15 +1061,9 @@
 			result = -EINVAL;
 		break;
 	case LIRC_GET_SEND_MODE:
-		if (!(features&LIRC_CAN_SEND_MASK))
-			return -ENOSYS;
-
 		result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg);
 		break;
 	case LIRC_SET_SEND_MODE:
-		if (!(features&LIRC_CAN_SEND_MASK))
-			return -ENOSYS;
-
 		result = get_user(mode, (unsigned long *) arg);
 		if (!result && mode != LIRC_MODE_PULSE)
 			return -EINVAL;
@@ -1043,6 +1074,15 @@
 	return result;
 }
 
+/* ir_devices_lock must be held */
+static struct IR *find_ir_device_by_minor(unsigned int minor)
+{
+	if (minor >= MAX_IRCTL_DEVICES)
+		return NULL;
+
+	return ir_devices[minor];
+}
+
 /*
  * Open the IR device.  Get hold of our IR structure and
  * stash it in private_data for the file
@@ -1051,15 +1091,15 @@
 {
 	struct IR *ir;
 	int ret;
+	unsigned int minor = MINOR(node->i_rdev);
 
 	/* find our IR struct */
-	unsigned minor = MINOR(node->i_rdev);
-	if (minor >= MAX_IRCTL_DEVICES) {
-		dprintk("minor %d: open result = -ENODEV\n",
-			minor);
+	mutex_lock(&ir_devices_lock);
+	ir = find_ir_device_by_minor(minor);
+	mutex_unlock(&ir_devices_lock);
+
+	if (ir == NULL)
 		return -ENODEV;
-	}
-	ir = ir_devices[minor];
 
 	/* increment in use count */
 	mutex_lock(&ir->ir_lock);
@@ -1106,7 +1146,6 @@
 
 static int ir_remove(struct i2c_client *client);
 static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
-static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg);
 
 #define ID_FLAG_TX	0x01
 #define ID_FLAG_HDPVR	0x02
@@ -1126,7 +1165,6 @@
 	},
 	.probe		= ir_probe,
 	.remove		= ir_remove,
-	.command	= ir_command,
 	.id_table	= ir_transceiver_id,
 };
 
@@ -1144,214 +1182,253 @@
 	.release	= close
 };
 
+static void destroy_rx_kthread(struct IR_rx *rx)
+{
+	/* end up polling thread */
+	if (rx != NULL && !IS_ERR_OR_NULL(rx->task)) {
+		kthread_stop(rx->task);
+		rx->task = NULL;
+	}
+}
+
+/* ir_devices_lock must be held */
+static int add_ir_device(struct IR *ir)
+{
+	int i;
+
+	for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+		if (ir_devices[i] == NULL) {
+			ir_devices[i] = ir;
+			break;
+		}
+
+	return i == MAX_IRCTL_DEVICES ? -ENOMEM : i;
+}
+
+/* ir_devices_lock must be held */
+static void del_ir_device(struct IR *ir)
+{
+	int i;
+
+	for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+		if (ir_devices[i] == ir) {
+			ir_devices[i] = NULL;
+			break;
+		}
+}
+
 static int ir_remove(struct i2c_client *client)
 {
 	struct IR *ir = i2c_get_clientdata(client);
 
-	mutex_lock(&ir->ir_lock);
+	mutex_lock(&ir_devices_lock);
 
-	if (ir->have_rx || ir->have_tx) {
-		DECLARE_COMPLETION(tn);
-		DECLARE_COMPLETION(tn2);
-
-		/* end up polling thread */
-		if (ir->task && !IS_ERR(ir->task)) {
-			ir->t_notify = &tn;
-			ir->t_notify2 = &tn2;
-			ir->shutdown = 1;
-			wake_up_process(ir->task);
-			complete(&tn2);
-			wait_for_completion(&tn);
-			ir->t_notify = NULL;
-			ir->t_notify2 = NULL;
-		}
-
-	} else {
-		mutex_unlock(&ir->ir_lock);
-		zilog_error("%s: detached from something we didn't "
-			    "attach to\n", __func__);
-		return -ENODEV;
+	if (ir == NULL) {
+		/* We destroyed everything when the first client came through */
+		mutex_unlock(&ir_devices_lock);
+		return 0;
 	}
 
-	/* unregister lirc driver */
-	if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
-		lirc_unregister_driver(ir->l.minor);
-		ir_devices[ir->l.minor] = NULL;
+	/* Good-bye LIRC */
+	lirc_unregister_driver(ir->l.minor);
+
+	/* Good-bye Rx */
+	destroy_rx_kthread(ir->rx);
+	if (ir->rx != NULL) {
+		if (ir->rx->buf.fifo_initialized)
+			lirc_buffer_free(&ir->rx->buf);
+		i2c_set_clientdata(ir->rx->c, NULL);
+		kfree(ir->rx);
 	}
 
-	/* free memory */
-	lirc_buffer_free(&ir->buf);
-	mutex_unlock(&ir->ir_lock);
+	/* Good-bye Tx */
+	i2c_set_clientdata(ir->tx->c, NULL);
+	kfree(ir->tx);
+
+	/* Good-bye IR */
+	del_ir_device(ir);
 	kfree(ir);
 
+	mutex_unlock(&ir_devices_lock);
 	return 0;
 }
 
+
+/* ir_devices_lock must be held */
+static struct IR *find_ir_device_by_adapter(struct i2c_adapter *adapter)
+{
+	int i;
+	struct IR *ir = NULL;
+
+	for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+		if (ir_devices[i] != NULL &&
+		    ir_devices[i]->adapter == adapter) {
+			ir = ir_devices[i];
+			break;
+		}
+
+	return ir;
+}
+
 static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
-	struct IR *ir = NULL;
+	struct IR *ir;
 	struct i2c_adapter *adap = client->adapter;
-	char buf;
 	int ret;
-	int have_rx = 0, have_tx = 0;
+	bool tx_probe = false;
 
-	dprintk("%s: adapter name (%s) nr %d, i2c_device_id name (%s), "
-		"client addr=0x%02x\n",
-		__func__, adap->name, adap->nr, id->name, client->addr);
+	dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
+		__func__, id->name, adap->nr, adap->name, client->addr);
 
 	/*
-	 * FIXME - This probe function probes both the Tx and Rx
-	 * addresses of the IR microcontroller.
-	 *
-	 * However, the I2C subsystem is passing along one I2C client at a
-	 * time, based on matches to the ir_transceiver_id[] table above.
-	 * The expectation is that each i2c_client address will be probed
-	 * individually by drivers so the I2C subsystem can mark all client
-	 * addresses as claimed or not.
-	 *
-	 * This probe routine causes only one of the client addresses, TX or RX,
-	 * to be claimed.  This will cause a problem if the I2C subsystem is
-	 * subsequently triggered to probe unclaimed clients again.
+	 * The IR receiver    is at i2c address 0x71.
+	 * The IR transmitter is at i2c address 0x70.
 	 */
-	/*
-	 * The external IR receiver is at i2c address 0x71.
-	 * The IR transmitter is at 0x70.
-	 */
-	client->addr = 0x70;
 
-	if (!disable_tx) {
-		if (i2c_master_recv(client, &buf, 1) == 1)
-			have_tx = 1;
-		dprintk("probe 0x70 @ %s: %s\n",
-			adap->name, have_tx ? "success" : "failed");
+	if (id->driver_data & ID_FLAG_TX)
+		tx_probe = true;
+	else if (tx_only) /* module option */
+		return -ENXIO;
+
+	zilog_info("probing IR %s on %s (i2c-%d)\n",
+		   tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
+
+	mutex_lock(&ir_devices_lock);
+
+	/* Use a single struct IR instance for both the Rx and Tx functions */
+	ir = find_ir_device_by_adapter(adap);
+	if (ir == NULL) {
+		ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
+		if (ir == NULL) {
+			ret = -ENOMEM;
+			goto out_no_ir;
+		}
+		/* store for use in ir_probe() again, and open() later on */
+		ret = add_ir_device(ir);
+		if (ret)
+			goto out_free_ir;
+
+		ir->adapter = adap;
+		mutex_init(&ir->ir_lock);
+
+		/* set lirc_dev stuff */
+		memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
+		ir->l.minor       = minor; /* module option */
+		ir->l.code_length = 13;
+		ir->l.rbuf	  = NULL;
+		ir->l.fops	  = &lirc_fops;
+		ir->l.data	  = ir;
+		ir->l.dev         = &adap->dev;
+		ir->l.sample_rate = 0;
 	}
 
-	if (!disable_rx) {
-		client->addr = 0x71;
-		if (i2c_master_recv(client, &buf, 1) == 1)
-			have_rx = 1;
-		dprintk("probe 0x71 @ %s: %s\n",
-			adap->name, have_rx ? "success" : "failed");
+	if (tx_probe) {
+		/* Set up a struct IR_tx instance */
+		ir->tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
+		if (ir->tx == NULL) {
+			ret = -ENOMEM;
+			goto out_free_xx;
+		}
+
+		ir->tx->c = client;
+		ir->tx->need_boot = 1;
+		ir->tx->post_tx_ready_poll =
+			       (id->driver_data & ID_FLAG_HDPVR) ? false : true;
+	} else {
+		/* Set up a struct IR_rx instance */
+		ir->rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
+		if (ir->rx == NULL) {
+			ret = -ENOMEM;
+			goto out_free_xx;
+		}
+
+		ret = lirc_buffer_init(&ir->rx->buf, 2, BUFLEN / 2);
+		if (ret)
+			goto out_free_xx;
+
+		mutex_init(&ir->rx->buf_lock);
+		ir->rx->c = client;
+		ir->rx->hdpvr_data_fmt =
+			       (id->driver_data & ID_FLAG_HDPVR) ? true : false;
+
+		/* set lirc_dev stuff */
+		ir->l.rbuf = &ir->rx->buf;
 	}
 
-	if (!(have_rx || have_tx)) {
-		zilog_error("%s: no devices found\n", adap->name);
-		goto out_nodev;
-	}
-
-	printk(KERN_INFO "lirc_zilog: chip found with %s\n",
-		have_rx && have_tx ? "RX and TX" :
-			have_rx ? "RX only" : "TX only");
-
-	ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
-
-	if (!ir)
-		goto out_nomem;
-
-	ret = lirc_buffer_init(&ir->buf, 2, BUFLEN / 2);
-	if (ret)
-		goto out_nomem;
-
-	mutex_init(&ir->ir_lock);
-	mutex_init(&ir->buf_lock);
-	ir->need_boot = 1;
-	ir->is_hdpvr = (id->driver_data & ID_FLAG_HDPVR) ? true : false;
-
-	memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
-	ir->l.minor = -1;
-
-	/* I2C attach to device */
 	i2c_set_clientdata(client, ir);
 
+	/* Proceed only if we have the required Tx and Rx clients ready to go */
+	if (ir->tx == NULL ||
+	    (ir->rx == NULL && !tx_only)) {
+		zilog_info("probe of IR %s on %s (i2c-%d) done. Waiting on "
+			   "IR %s.\n", tx_probe ? "Tx" : "Rx", adap->name,
+			   adap->nr, tx_probe ? "Rx" : "Tx");
+		goto out_ok;
+	}
+
 	/* initialise RX device */
-	if (have_rx) {
-		DECLARE_COMPLETION(tn);
-		memcpy(&ir->c_rx, client, sizeof(struct i2c_client));
-
-		ir->c_rx.addr = 0x71;
-		strlcpy(ir->c_rx.name, ZILOG_HAUPPAUGE_IR_RX_NAME,
-			I2C_NAME_SIZE);
-
+	if (ir->rx != NULL) {
 		/* try to fire up polling thread */
-		ir->t_notify = &tn;
-		ir->task = kthread_run(lirc_thread, ir, "lirc_zilog");
-		if (IS_ERR(ir->task)) {
-			ret = PTR_ERR(ir->task);
-			zilog_error("lirc_register_driver: cannot run "
-				    "poll thread %d\n", ret);
-			goto err;
+		ir->rx->task = kthread_run(lirc_thread, ir,
+					   "zilog-rx-i2c-%d", adap->nr);
+		if (IS_ERR(ir->rx->task)) {
+			ret = PTR_ERR(ir->rx->task);
+			zilog_error("%s: could not start IR Rx polling thread"
+				    "\n", __func__);
+			goto out_free_xx;
 		}
-		wait_for_completion(&tn);
-		ir->t_notify = NULL;
-		ir->have_rx = 1;
 	}
 
-	/* initialise TX device */
-	if (have_tx) {
-		memcpy(&ir->c_tx, client, sizeof(struct i2c_client));
-		ir->c_tx.addr = 0x70;
-		strlcpy(ir->c_tx.name, ZILOG_HAUPPAUGE_IR_TX_NAME,
-			I2C_NAME_SIZE);
-		ir->have_tx = 1;
-	}
-
-	/* set lirc_dev stuff */
-	ir->l.code_length = 13;
-	ir->l.rbuf	  = &ir->buf;
-	ir->l.fops	  = &lirc_fops;
-	ir->l.data	  = ir;
-	ir->l.minor       = minor;
-	ir->l.dev         = &adap->dev;
-	ir->l.sample_rate = 0;
-
 	/* register with lirc */
 	ir->l.minor = lirc_register_driver(&ir->l);
 	if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
-		zilog_error("ir_attach: \"minor\" must be between 0 and %d "
-			    "(%d)!\n", MAX_IRCTL_DEVICES-1, ir->l.minor);
+		zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
+			    __func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
 		ret = -EBADRQC;
-		goto err;
+		goto out_free_thread;
 	}
 
-	/* store this for getting back in open() later on */
-	ir_devices[ir->l.minor] = ir;
-
 	/*
 	 * if we have the tx device, load the 'firmware'.  We do this
 	 * after registering with lirc as otherwise hotplug seems to take
 	 * 10s to create the lirc device.
 	 */
-	if (have_tx) {
-		/* Special TX init */
-		ret = tx_init(ir);
-		if (ret != 0)
-			goto err;
+	ret = tx_init(ir->tx);
+	if (ret != 0)
+		goto out_unregister;
+
+	zilog_info("probe of IR %s on %s (i2c-%d) done. IR unit ready.\n",
+		   tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
+out_ok:
+	mutex_unlock(&ir_devices_lock);
+	return 0;
+
+out_unregister:
+	lirc_unregister_driver(ir->l.minor);
+out_free_thread:
+	destroy_rx_kthread(ir->rx);
+out_free_xx:
+	if (ir->rx != NULL) {
+		if (ir->rx->buf.fifo_initialized)
+			lirc_buffer_free(&ir->rx->buf);
+		if (ir->rx->c != NULL)
+			i2c_set_clientdata(ir->rx->c, NULL);
+		kfree(ir->rx);
 	}
-
-	return 0;
-
-err:
-	/* undo everything, hopefully... */
-	if (ir->c_rx.addr)
-		ir_remove(&ir->c_rx);
-	if (ir->c_tx.addr)
-		ir_remove(&ir->c_tx);
-	return ret;
-
-out_nodev:
-	zilog_error("no device found\n");
-	return -ENODEV;
-
-out_nomem:
-	zilog_error("memory allocation failure\n");
+	if (ir->tx != NULL) {
+		if (ir->tx->c != NULL)
+			i2c_set_clientdata(ir->tx->c, NULL);
+		kfree(ir->tx);
+	}
+out_free_ir:
+	del_ir_device(ir);
 	kfree(ir);
-	return -ENOMEM;
-}
-
-static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg)
-{
-	/* nothing */
-	return 0;
+out_no_ir:
+	zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
+		    __func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
+		   ret);
+	mutex_unlock(&ir_devices_lock);
+	return ret;
 }
 
 static int __init zilog_init(void)
@@ -1361,6 +1438,7 @@
 	zilog_notify("Zilog/Hauppauge IR driver initializing\n");
 
 	mutex_init(&tx_data_lock);
+	mutex_init(&ir_devices_lock);
 
 	request_module("firmware_class");
 
@@ -1386,7 +1464,8 @@
 
 MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
 MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
-	      "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver");
+	      "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
+	      "Andy Walls");
 MODULE_LICENSE("GPL");
 /* for compat with old name, which isn't all that accurate anymore */
 MODULE_ALIAS("lirc_pvr150");
@@ -1397,8 +1476,5 @@
 module_param(debug, bool, 0644);
 MODULE_PARM_DESC(debug, "Enable debugging messages");
 
-module_param(disable_rx, bool, 0644);
-MODULE_PARM_DESC(disable_rx, "Disable the IR receiver device");
-
-module_param(disable_tx, bool, 0644);
-MODULE_PARM_DESC(disable_tx, "Disable the IR transmitter device");
+module_param(tx_only, bool, 0644);
+MODULE_PARM_DESC(tx_only, "Only handle the IR transmit function");
diff --git a/drivers/staging/msm/msm_fb.c b/drivers/staging/msm/msm_fb.c
index 23fa049..a2f29d4 100644
--- a/drivers/staging/msm/msm_fb.c
+++ b/drivers/staging/msm/msm_fb.c
@@ -347,7 +347,7 @@
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(mfd->fbi, 1);
 
 	ret = msm_fb_suspend_sub(mfd);
@@ -358,7 +358,7 @@
 		pdev->dev.power.power_state = state;
 	}
 
-	release_console_sem();
+	console_unlock();
 	return ret;
 }
 #else
@@ -431,11 +431,11 @@
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	ret = msm_fb_resume_sub(mfd);
 	pdev->dev.power.power_state = PMSG_ON;
 	fb_set_suspend(mfd->fbi, 1);
-	release_console_sem();
+	console_unlock();
 
 	return ret;
 }
diff --git a/drivers/staging/msm/msm_fb_bl.c b/drivers/staging/msm/msm_fb_bl.c
index 033fc94..2a80775 100644
--- a/drivers/staging/msm/msm_fb_bl.c
+++ b/drivers/staging/msm/msm_fb_bl.c
@@ -42,7 +42,7 @@
 	return 0;
 }
 
-static struct backlight_ops msm_fb_bl_ops = {
+static const struct backlight_ops msm_fb_bl_ops = {
 	.get_brightness = msm_fb_bl_get_brightness,
 	.update_status = msm_fb_bl_update_status,
 };
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index ac2d3d0..35f9cda 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -1,6 +1,5 @@
 TODO:
 	- checkpatch.pl cleanups
-	- port geode gpio calls to newer cs5535 API
 	- see if vx855 gpio API can be made similar enough to cs5535 so we can
 	  share more code
 	- allow simultaneous XO-1 and XO-1.5 support
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 4ca45ec..56a283d 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -27,7 +27,6 @@
 #include <asm/uaccess.h>
 #include <linux/ctype.h>
 #include <linux/reboot.h>
-#include <linux/gpio.h>
 #include <asm/tsc.h>
 #include <asm/olpc.h>
 
@@ -49,7 +48,7 @@
 	int (*init)(void);
 	void (*bus_stabilize_wiggle)(void);
 	void (*set_dconload)(int);
-	int (*read_status)(void);
+	u8 (*read_status)(void);
 };
 
 static struct dcon_platform_data *pdata;
@@ -374,17 +373,17 @@
 		 *
 		 * For now, we just hope..
 		 */
-		acquire_console_sem();
+		console_lock();
 		ignore_fb_events = 1;
 		if (fb_blank(fbinfo, FB_BLANK_UNBLANK)) {
 			ignore_fb_events = 0;
-			release_console_sem();
+			console_unlock();
 			printk(KERN_ERR "olpc-dcon:  Failed to enter CPU mode\n");
 			dcon_pending = DCON_SOURCE_DCON;
 			return;
 		}
 		ignore_fb_events = 0;
-		release_console_sem();
+		console_unlock();
 
 		/* And turn off the DCON */
 		pdata->set_dconload(1);
@@ -436,12 +435,12 @@
 			}
 		}
 
-		acquire_console_sem();
+		console_lock();
 		ignore_fb_events = 1;
 		if (fb_blank(fbinfo, FB_BLANK_POWERDOWN))
 			printk(KERN_ERR "olpc-dcon:  couldn't blank fb!\n");
 		ignore_fb_events = 0;
-		release_console_sem();
+		console_unlock();
 
 		printk(KERN_INFO "olpc-dcon: The DCON has control\n");
 		break;
@@ -615,7 +614,7 @@
 	__ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
 };
 
-static struct backlight_ops dcon_bl_ops = {
+static const struct backlight_ops dcon_bl_ops = {
 	.get_brightness = dconbl_get,
 	.update_status = dconbl_set
 };
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 6453ca4..e566d21 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -29,26 +29,6 @@
 #define DCON_REG_SCAN_INT	9
 #define DCON_REG_BRIGHT		10
 
-/* GPIO registers (CS5536) */
-
-#define MSR_LBAR_GPIO		0x5140000C
-
-#define GPIOx_OUT_VAL     0x00
-#define GPIOx_OUT_EN      0x04
-#define GPIOx_IN_EN       0x20
-#define GPIOx_INV_EN      0x24
-#define GPIOx_IN_FLTR_EN  0x28
-#define GPIOx_EVNTCNT_EN  0x2C
-#define GPIOx_READ_BACK   0x30
-#define GPIOx_EVNT_EN     0x38
-#define GPIOx_NEGEDGE_EN  0x44
-#define GPIOx_NEGEDGE_STS 0x4C
-#define GPIO_FLT7_AMNT    0xD8
-#define GPIO_MAP_X        0xE0
-#define GPIO_MAP_Y        0xE4
-#define GPIO_FE7_SEL      0xF7
-
-
 /* Status values */
 
 #define DCONSTAT_SCANINT	0
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 779fb7d..043198d 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -10,54 +10,70 @@
  * modify it under the terms of version 2 of the GNU General Public
  * License as published by the Free Software Foundation.
  */
-
+#include <linux/cs5535.h>
+#include <linux/gpio.h>
 #include <asm/olpc.h>
 
 #include "olpc_dcon.h"
 
-/* Base address of the GPIO registers */
-static unsigned long gpio_base;
-
-/*
- * List of GPIOs that we care about:
- * (in)  GPIO12   -- DCONBLANK
- * (in)  GPIO[56] -- DCONSTAT[01]
- * (out) GPIO11   -- DCONLOAD
- */
-
-#define IN_GPIOS ((1<<5) | (1<<6) | (1<<7) | (1<<12))
-#define OUT_GPIOS (1<<11)
-
 static int dcon_init_xo_1(void)
 {
-	unsigned long lo, hi;
 	unsigned char lob;
 
-	rdmsr(MSR_LBAR_GPIO, lo, hi);
-
-	/* Check the mask and whether GPIO is enabled (sanity check) */
-	if (hi != 0x0000f001) {
-		printk(KERN_ERR "GPIO not enabled -- cannot use DCON\n");
-		return -ENODEV;
+	if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request STAT0 GPIO\n");
+		return -EIO;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request STAT1 GPIO\n");
+		goto err_gp_stat1;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request IRQ GPIO\n");
+		goto err_gp_irq;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request LOAD GPIO\n");
+		goto err_gp_load;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request BLANK GPIO\n");
+		goto err_gp_blank;
 	}
 
-	/* Mask off the IO base address */
-	gpio_base = lo & 0x0000ff00;
-
 	/* Turn off the event enable for GPIO7 just to be safe */
-	outl(1 << (16+7), gpio_base + GPIOx_EVNT_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+
+	/*
+	 * Determine the current state by reading the GPIO bit; earlier
+	 * stages of the boot process have established the state.
+	 *
+	 * Note that we read GPIO_OUPUT_VAL rather than GPIO_READ_BACK here;
+	 * this is because OFW will disable input for the pin and set a value..
+	 * READ_BACK will only contain a valid value if input is enabled and
+	 * then a value is set.  So, future readings of the pin can use
+	 * READ_BACK, but the first one cannot.  Awesome, huh?
+	 */
+	dcon_source = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
+		? DCON_SOURCE_CPU
+		: DCON_SOURCE_DCON;
+	dcon_pending = dcon_source;
 
 	/* Set the directions for the GPIO pins */
-	outl(OUT_GPIOS | (IN_GPIOS << 16), gpio_base + GPIOx_OUT_EN);
-	outl(IN_GPIOS | (OUT_GPIOS << 16), gpio_base + GPIOx_IN_EN);
+	gpio_direction_input(OLPC_GPIO_DCON_STAT0);
+	gpio_direction_input(OLPC_GPIO_DCON_STAT1);
+	gpio_direction_input(OLPC_GPIO_DCON_IRQ);
+	gpio_direction_input(OLPC_GPIO_DCON_BLANK);
+	gpio_direction_output(OLPC_GPIO_DCON_LOAD,
+			dcon_source == DCON_SOURCE_CPU);
 
 	/* Set up the interrupt mappings */
 
 	/* Set the IRQ to pair 2 */
-	geode_gpio_event_irq(OLPC_GPIO_DCON_IRQ, 2);
+	cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
 
 	/* Enable group 2 to trigger the DCON interrupt */
-	geode_gpio_set_irq(2, DCON_IRQ);
+	cs5535_gpio_set_irq(2, DCON_IRQ);
 
 	/* Select edge level for interrupt (in PIC) */
 	lob = inb(0x4d0);
@@ -65,52 +81,61 @@
 	outb(lob, 0x4d0);
 
 	/* Register the interupt handler */
-	if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver))
-		return -EIO;
+	if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver)) {
+		printk(KERN_ERR "olpc-dcon: failed to request DCON's irq\n");
+		goto err_req_irq;
+	}
 
 	/* Clear INV_EN for GPIO7 (DCONIRQ) */
-	outl((1<<(16+7)), gpio_base + GPIOx_INV_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
 
 	/* Enable filter for GPIO12 (DCONBLANK) */
-	outl(1<<(12), gpio_base + GPIOx_IN_FLTR_EN);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
 
 	/* Disable filter for GPIO7 */
-	outl(1<<(16+7), gpio_base + GPIOx_IN_FLTR_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
 
 	/* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
-
-	outl(1<<(16+7), gpio_base + GPIOx_EVNTCNT_EN);
-	outl(1<<(16+12), gpio_base + GPIOx_EVNTCNT_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
 
 	/* Add GPIO12 to the Filter Event Pair #7 */
-	outb(12, gpio_base + GPIO_FE7_SEL);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
 
 	/* Turn off negative Edge Enable for GPIO12 */
-	outl(1<<(16+12), gpio_base + GPIOx_NEGEDGE_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
 
 	/* Enable negative Edge Enable for GPIO7 */
-	outl(1<<7, gpio_base + GPIOx_NEGEDGE_EN);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
 
 	/* Zero the filter amount for Filter Event Pair #7 */
-	outw(0, gpio_base + GPIO_FLT7_AMNT);
+	cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
 
 	/* Clear the negative edge status for GPIO7 and GPIO12 */
-	outl((1<<7) | (1<<12), gpio_base+0x4c);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
 
 	/* FIXME:  Clear the posiitive status as well, just to be sure */
-	outl((1<<7) | (1<<12), gpio_base+0x48);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
 
 	/* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
-	outl((1<<(7))|(1<<12), gpio_base + GPIOx_EVNT_EN);
-
-	/* Determine the current state by reading the GPIO bit */
-	/* Earlier stages of the boot process have established the state */
-	dcon_source = inl(gpio_base + GPIOx_OUT_VAL) & (1<<11)
-		? DCON_SOURCE_CPU
-		: DCON_SOURCE_DCON;
-	dcon_pending = dcon_source;
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
 
 	return 0;
+
+err_req_irq:
+	gpio_free(OLPC_GPIO_DCON_BLANK);
+err_gp_blank:
+	gpio_free(OLPC_GPIO_DCON_LOAD);
+err_gp_load:
+	gpio_free(OLPC_GPIO_DCON_IRQ);
+err_gp_irq:
+	gpio_free(OLPC_GPIO_DCON_STAT1);
+err_gp_stat1:
+	gpio_free(OLPC_GPIO_DCON_STAT0);
+	return -EIO;
 }
 
 static void dcon_wiggle_xo_1(void)
@@ -128,37 +153,44 @@
 	 * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
 	 * GPIO15.
  	 */
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
-	geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
-	geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
-	geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
 
 	for (x = 0; x < 16; x++) {
 		udelay(5);
-		geode_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+		cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
 		udelay(5);
-		geode_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+		cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
 	}
 	udelay(5);
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
 }
 
 static void dcon_set_dconload_1(int val)
 {
-	if (val)	
-		outl(1<<11, gpio_base + GPIOx_OUT_VAL);
-	else
-		outl(1<<(11 + 16), gpio_base + GPIOx_OUT_VAL);
+	gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
 }
 
-static int dcon_read_status_xo_1(void)
+static u8 dcon_read_status_xo_1(void)
 {
-	int status = inl(gpio_base + GPIOx_READ_BACK) >> 5;
-	
+	u8 status;
+
+	status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+	status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+
 	/* Clear the negative edge status for GPIO7 */
-	outl(1 << 7, gpio_base + GPIOx_NEGEDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
 
 	return status;
 }
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index cca6a23..4f56098 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -195,9 +195,9 @@
 	}
 }
 
-static int dcon_read_status_xo_1_5(void) 
+static u8 dcon_read_status_xo_1_5(void)
 {
-	int status;
+	u8 status;
 	
 	if (!dcon_was_irq())
 		return -1;
diff --git a/drivers/staging/pohmelfs/net.c b/drivers/staging/pohmelfs/net.c
index 9279897..b2e9186 100644
--- a/drivers/staging/pohmelfs/net.c
+++ b/drivers/staging/pohmelfs/net.c
@@ -413,7 +413,7 @@
 				if (dentry) {
 					alias = d_materialise_unique(dentry, &npi->vfs_inode);
 					if (alias)
-						dput(dentry);
+						dput(alias);
 				}
 
 				dput(dentry);
diff --git a/drivers/staging/rt2860/rt_main_dev.c b/drivers/staging/rt2860/rt_main_dev.c
index 701561d..236dd36 100644
--- a/drivers/staging/rt2860/rt_main_dev.c
+++ b/drivers/staging/rt2860/rt_main_dev.c
@@ -484,8 +484,6 @@
 	net_dev->ml_priv = (void *)pAd;
 	pAd->net_dev = net_dev;
 
-	netif_stop_queue(net_dev);
-
 	return net_dev;
 
 }
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ee68d51..322bf49 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -106,6 +106,7 @@
 	{USB_DEVICE(0x0411, 0x016f)},	/* MelCo.,Inc. WLI-UC-G301N */
 	{USB_DEVICE(0x1737, 0x0070)},	/* Linksys WUSB100 */
 	{USB_DEVICE(0x1737, 0x0071)},	/* Linksys WUSB600N */
+	{USB_DEVICE(0x1737, 0x0078)},	/* Linksys WUSB100v2 */
 	{USB_DEVICE(0x0411, 0x00e8)},	/* Buffalo WLI-UC-G300N */
 	{USB_DEVICE(0x050d, 0x815c)},	/* Belkin F5D8053 */
 	{USB_DEVICE(0x100D, 0x9031)},	/* Motorola 2770 */
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 32088a6..84be383 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -128,12 +128,13 @@
 	u8 *ptmpchar = NULL, *ppayload, *ptr;
 	struct tx_desc *ptx_desc;
 	u32 txdscp_sz = sizeof(struct tx_desc);
+	u8 ret = _FAIL;
 
 	ulfilelength = rtl871x_open_fw(padapter, &phfwfile_hdl, &pmappedfw);
 	if (pmappedfw && (ulfilelength > 0)) {
 		update_fwhdr(&fwhdr, pmappedfw);
 		if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL)
-			goto exit_fail;
+			goto firmware_rel;
 		fill_fwpriv(padapter, &fwhdr.fwpriv);
 		/* firmware check ok */
 		maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ?
@@ -141,7 +142,7 @@
 		maxlen += txdscp_sz;
 		ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ);
 		if (ptmpchar == NULL)
-			return _FAIL;
+			goto firmware_rel;
 
 		ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ -
 			    ((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1)));
@@ -273,11 +274,13 @@
 			goto exit_fail;
 	} else
 		goto exit_fail;
-	return _SUCCESS;
+	ret = _SUCCESS;
 
 exit_fail:
 	kfree(ptmpchar);
-	return _FAIL;
+firmware_rel:
+	release_firmware((struct firmware *)phfwfile_hdl);
+	return ret;
 }
 
 uint rtl8712_hal_init(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index a692ee8..21ce2af 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -47,54 +47,123 @@
 static void r871xu_dev_remove(struct usb_interface *pusb_intf);
 
 static struct usb_device_id rtl871x_usb_id_tbl[] = {
-	/*92SU
-	 * Realtek */
-	{USB_DEVICE(0x0bda, 0x8171)},
-	{USB_DEVICE(0x0bda, 0x8172)},
+
+/* RTL8188SU */
+	/* Realtek */
+	{USB_DEVICE(0x0BDA, 0x8171)},
 	{USB_DEVICE(0x0bda, 0x8173)},
-	{USB_DEVICE(0x0bda, 0x8174)},
 	{USB_DEVICE(0x0bda, 0x8712)},
 	{USB_DEVICE(0x0bda, 0x8713)},
 	{USB_DEVICE(0x0bda, 0xC512)},
-	/* Abocom  */
+	/* Abocom */
 	{USB_DEVICE(0x07B8, 0x8188)},
-	/* Corega */
-	{USB_DEVICE(0x07aa, 0x0047)},
-	/* Dlink */
-	{USB_DEVICE(0x07d1, 0x3303)},
-	{USB_DEVICE(0x07d1, 0x3302)},
-	{USB_DEVICE(0x07d1, 0x3300)},
-	/* Dlink for Skyworth */
-	{USB_DEVICE(0x14b2, 0x3300)},
-	{USB_DEVICE(0x14b2, 0x3301)},
-	{USB_DEVICE(0x14b2, 0x3302)},
-	/* EnGenius */
-	{USB_DEVICE(0x1740, 0x9603)},
-	{USB_DEVICE(0x1740, 0x9605)},
+	/* ASUS */
+	{USB_DEVICE(0x0B05, 0x1786)},
+	{USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
 	/* Belkin */
-	{USB_DEVICE(0x050d, 0x815F)},
-	{USB_DEVICE(0x050d, 0x945A)},
-	{USB_DEVICE(0x050d, 0x845A)},
-	/* Guillemot */
-	{USB_DEVICE(0x06f8, 0xe031)},
+	{USB_DEVICE(0x050D, 0x945A)},
+	/* Corega */
+	{USB_DEVICE(0x07AA, 0x0047)},
+	/* D-Link */
+	{USB_DEVICE(0x2001, 0x3306)},
+	{USB_DEVICE(0x07D1, 0x3306)}, /* 11n mode disable */
 	/* Edimax */
 	{USB_DEVICE(0x7392, 0x7611)},
-	{USB_DEVICE(0x7392, 0x7612)},
-	{USB_DEVICE(0x7392, 0x7622)},
+	/* EnGenius */
+	{USB_DEVICE(0x1740, 0x9603)},
+	/* Hawking */
+	{USB_DEVICE(0x0E66, 0x0016)},
+	/* Hercules */
+	{USB_DEVICE(0x06F8, 0xE034)},
+	{USB_DEVICE(0x06F8, 0xE032)},
+	/* Logitec */
+	{USB_DEVICE(0x0789, 0x0167)},
+	/* PCI */
+	{USB_DEVICE(0x2019, 0xAB28)},
+	{USB_DEVICE(0x2019, 0xED16)},
 	/* Sitecom */
+	{USB_DEVICE(0x0DF6, 0x0057)},
 	{USB_DEVICE(0x0DF6, 0x0045)},
+	{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
+	{USB_DEVICE(0x0DF6, 0x004B)},
+	{USB_DEVICE(0x0DF6, 0x0063)},
+	/* Sweex */
+	{USB_DEVICE(0x177F, 0x0154)},
+	/* Thinkware */
+	{USB_DEVICE(0x0BDA, 0x5077)},
+	/* Toshiba */
+	{USB_DEVICE(0x1690, 0x0752)},
+	/* - */
+	{USB_DEVICE(0x20F4, 0x646B)},
+	{USB_DEVICE(0x083A, 0xC512)},
+
+/* RTL8191SU */
+	/* Realtek */
+	{USB_DEVICE(0x0BDA, 0x8172)},
+	/* Amigo */
+	{USB_DEVICE(0x0EB0, 0x9061)},
+	/* ASUS/EKB */
+	{USB_DEVICE(0x0BDA, 0x8172)},
+	{USB_DEVICE(0x13D3, 0x3323)},
+	{USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
+	{USB_DEVICE(0x13D3, 0x3342)},
+	/* ASUS/EKBLenovo */
+	{USB_DEVICE(0x13D3, 0x3333)},
+	{USB_DEVICE(0x13D3, 0x3334)},
+	{USB_DEVICE(0x13D3, 0x3335)}, /* 11n mode disable */
+	{USB_DEVICE(0x13D3, 0x3336)}, /* 11n mode disable */
+	/* ASUS/Media BOX */
+	{USB_DEVICE(0x13D3, 0x3309)},
+	/* Belkin */
+	{USB_DEVICE(0x050D, 0x815F)},
+	/* D-Link */
+	{USB_DEVICE(0x07D1, 0x3302)},
+	{USB_DEVICE(0x07D1, 0x3300)},
+	{USB_DEVICE(0x07D1, 0x3303)},
+	/* Edimax */
+	{USB_DEVICE(0x7392, 0x7612)},
+	/* EnGenius */
+	{USB_DEVICE(0x1740, 0x9605)},
+	/* Guillemot */
+	{USB_DEVICE(0x06F8, 0xE031)},
 	/* Hawking */
 	{USB_DEVICE(0x0E66, 0x0015)},
-	{USB_DEVICE(0x0E66, 0x0016)},
-	{USB_DEVICE(0x0b05, 0x1786)},
-	{USB_DEVICE(0x0b05, 0x1791)},    /* 11n mode disable */
-
+	/* Mediao */
 	{USB_DEVICE(0x13D3, 0x3306)},
-	{USB_DEVICE(0x13D3, 0x3309)},
+	/* PCI */
+	{USB_DEVICE(0x2019, 0xED18)},
+	{USB_DEVICE(0x2019, 0x4901)},
+	/* Sitecom */
+	{USB_DEVICE(0x0DF6, 0x0058)},
+	{USB_DEVICE(0x0DF6, 0x0049)},
+	{USB_DEVICE(0x0DF6, 0x004C)},
+	{USB_DEVICE(0x0DF6, 0x0064)},
+	/* Skyworth */
+	{USB_DEVICE(0x14b2, 0x3300)},
+	{USB_DEVICE(0x14b2, 0x3301)},
+	{USB_DEVICE(0x14B2, 0x3302)},
+	/* - */
+	{USB_DEVICE(0x04F2, 0xAFF2)},
+	{USB_DEVICE(0x04F2, 0xAFF5)},
+	{USB_DEVICE(0x04F2, 0xAFF6)},
+	{USB_DEVICE(0x13D3, 0x3339)},
+	{USB_DEVICE(0x13D3, 0x3340)}, /* 11n mode disable */
+	{USB_DEVICE(0x13D3, 0x3341)}, /* 11n mode disable */
 	{USB_DEVICE(0x13D3, 0x3310)},
-	{USB_DEVICE(0x13D3, 0x3311)},    /* 11n mode disable */
 	{USB_DEVICE(0x13D3, 0x3325)},
-	{USB_DEVICE(0x083A, 0xC512)},
+
+/* RTL8192SU */
+	/* Realtek */
+	{USB_DEVICE(0x0BDA, 0x8174)},
+	{USB_DEVICE(0x0BDA, 0x8174)},
+	/* Belkin */
+	{USB_DEVICE(0x050D, 0x845A)},
+	/* Corega */
+	{USB_DEVICE(0x07AA, 0x0051)},
+	/* Edimax */
+	{USB_DEVICE(0x7392, 0x7622)},
+	/* NEC */
+	{USB_DEVICE(0x0409, 0x02B6)},
 	{}
 };
 
@@ -103,8 +172,20 @@
 static struct specific_device_id specific_device_id_tbl[] = {
 	{.idVendor = 0x0b05, .idProduct = 0x1791,
 		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x0df6, .idProduct = 0x0059,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3306,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
 	{.idVendor = 0x13D3, .idProduct = 0x3311,
 		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3335,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3336,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3340,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3341,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
 	{}
 };
 
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index ac2bf11..701e8d5 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -269,7 +269,7 @@
 	return 0;
 }
 
-static struct backlight_ops backlight_ops = {
+static const struct backlight_ops backlight_ops = {
 	.get_brightness	= get_brightness,
 	.update_status	= update_status,
 };
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index f4b163f..d007e4a 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -1044,9 +1044,9 @@
 
 	/* when doing suspend, call fb apis and pci apis */
 	if (msg.event == PM_EVENT_SUSPEND) {
-		acquire_console_sem();
+		console_lock();
 		fb_set_suspend(&sfb->fb, 1);
-		release_console_sem();
+		console_unlock();
 		retv = pci_save_state(pdev);
 		pci_disable_device(pdev);
 		retv = pci_choose_state(pdev, msg);
@@ -1071,7 +1071,7 @@
 	/* when resuming, restore pci data and fb cursor */
 	if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) {
 		retv = pci_set_power_state(pdev, PCI_D0);
-		retv = pci_restore_state(pdev);
+		pci_restore_state(pdev);
 		if (pci_enable_device(pdev))
 			return -1;
 		pci_set_master(pdev);
@@ -1105,9 +1105,9 @@
 
 	smtcfb_setmode(sfb);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(&sfb->fb, 0);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c
index dd612f5..f204d33 100644
--- a/drivers/staging/smbfs/dir.c
+++ b/drivers/staging/smbfs/dir.c
@@ -283,7 +283,7 @@
 		unsigned int, const char *, const struct qstr *);
 static int smb_delete_dentry(const struct dentry *);
 
-static const struct dentry_operations smbfs_dentry_operations =
+const struct dentry_operations smbfs_dentry_operations =
 {
 	.d_revalidate	= smb_lookup_validate,
 	.d_hash		= smb_hash_dentry,
@@ -291,7 +291,7 @@
 	.d_delete	= smb_delete_dentry,
 };
 
-static const struct dentry_operations smbfs_dentry_operations_case =
+const struct dentry_operations smbfs_dentry_operations_case =
 {
 	.d_revalidate	= smb_lookup_validate,
 	.d_delete	= smb_delete_dentry,
@@ -403,12 +403,6 @@
 void
 smb_new_dentry(struct dentry *dentry)
 {
-	struct smb_sb_info *server = server_from_dentry(dentry);
-
-	if (server->mnt->flags & SMB_MOUNT_CASE)
-		d_set_d_op(dentry, &smbfs_dentry_operations_case);
-	else
-		d_set_d_op(dentry, &smbfs_dentry_operations);
 	dentry->d_time = jiffies;
 }
 
@@ -440,7 +434,6 @@
 	struct smb_fattr finfo;
 	struct inode *inode;
 	int error;
-	struct smb_sb_info *server;
 
 	error = -ENAMETOOLONG;
 	if (dentry->d_name.len > SMB_MAXNAMELEN)
@@ -468,12 +461,6 @@
 		inode = smb_iget(dir->i_sb, &finfo);
 		if (inode) {
 	add_entry:
-			server = server_from_dentry(dentry);
-			if (server->mnt->flags & SMB_MOUNT_CASE)
-				d_set_d_op(dentry, &smbfs_dentry_operations_case);
-			else
-				d_set_d_op(dentry, &smbfs_dentry_operations);
-
 			d_add(dentry, inode);
 			smb_renew_times(dentry);
 			error = 0;
diff --git a/drivers/staging/smbfs/inode.c b/drivers/staging/smbfs/inode.c
index 244319d..0778589 100644
--- a/drivers/staging/smbfs/inode.c
+++ b/drivers/staging/smbfs/inode.c
@@ -614,6 +614,10 @@
 		printk(KERN_ERR "smbfs: failed to start smbiod\n");
 		goto out_no_smbiod;
 	}
+	if (server->mnt->flags & SMB_MOUNT_CASE)
+		sb->s_d_op = &smbfs_dentry_operations_case;
+	else
+		sb->s_d_op = &smbfs_dentry_operations;
 
 	/*
 	 * Keep the super block locked while we get the root inode.
diff --git a/drivers/staging/smbfs/proto.h b/drivers/staging/smbfs/proto.h
index 05939a6..3883cb1 100644
--- a/drivers/staging/smbfs/proto.h
+++ b/drivers/staging/smbfs/proto.h
@@ -38,6 +38,8 @@
 extern const struct file_operations smb_dir_operations;
 extern const struct inode_operations smb_dir_inode_operations;
 extern const struct inode_operations smb_dir_inode_operations_unix;
+extern const struct dentry_operations smbfs_dentry_operations_case;
+extern const struct dentry_operations smbfs_dentry_operations;
 extern void smb_new_dentry(struct dentry *dentry);
 extern void smb_renew_times(struct dentry *dentry);
 /* cache.c */
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 408bb9b..07a7f54 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -332,7 +332,7 @@
 	unsigned long flags;
 
 	len = strlen(buf);
-	if (len > 0 || len < 3) {
+	if (len > 0 && len < 3) {
 		ch = buf[0];
 		if (ch == '\n')
 			ch = '0';
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index e8f047e..80183a7 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -986,12 +986,6 @@
 	input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
 						MAX_TOUCH_MAJOR, 0, 0);
 
-	retval = input_register_device(rmi4_data->input_dev);
-	if (retval) {
-		dev_err(&client->dev, "%s:input register failed\n", __func__);
-		goto err_input_register;
-	}
-
 	/* Clear interrupts */
 	synaptics_rmi4_i2c_block_read(rmi4_data,
 			rmi4_data->fn01_data_base_addr + 1, intr_status,
@@ -1003,15 +997,20 @@
 	if (retval) {
 		dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
 				__func__, platformdata->irq_number);
-		goto err_request_irq;
+		goto err_unset_clientdata;
+	}
+
+	retval = input_register_device(rmi4_data->input_dev);
+	if (retval) {
+		dev_err(&client->dev, "%s:input register failed\n", __func__);
+		goto err_free_irq;
 	}
 
 	return retval;
 
-err_request_irq:
+err_free_irq:
 	free_irq(platformdata->irq_number, rmi4_data);
-	input_unregister_device(rmi4_data->input_dev);
-err_input_register:
+err_unset_clientdata:
 	i2c_set_clientdata(client, NULL);
 err_query_dev:
 	if (platformdata->regulator_en) {
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 5718645..27e0aa8 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -949,7 +949,7 @@
  *      Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
  *      schedules a DPC to dispatch I/O.
  */
-void io_mbox_msg(u32 msg)
+int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
 {
 	struct io_mgr *pio_mgr;
 	struct dev_object *dev_obj;
@@ -959,9 +959,9 @@
 	dev_get_io_mgr(dev_obj, &pio_mgr);
 
 	if (!pio_mgr)
-		return;
+		return NOTIFY_BAD;
 
-	pio_mgr->intr_val = (u16)msg;
+	pio_mgr->intr_val = (u16)((u32)msg);
 	if (pio_mgr->intr_val & MBX_PM_CLASS)
 		io_dispatch_pm(pio_mgr);
 
@@ -973,7 +973,7 @@
 		spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
 		tasklet_schedule(&pio_mgr->dpc_tasklet);
 	}
-	return;
+	return NOTIFY_OK;
 }
 
 /*
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index a3b0a18..a3f69f6 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -223,6 +223,10 @@
 	bridge_msg_set_queue_id,
 };
 
+static struct notifier_block dsp_mbox_notifier = {
+	.notifier_call = io_mbox_msg,
+};
+
 static inline void flush_all(struct bridge_dev_context *dev_context)
 {
 	if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
@@ -553,7 +557,7 @@
 		 * Enable Mailbox events and also drain any pending
 		 * stale messages.
 		 */
-		dev_context->mbox = omap_mbox_get("dsp");
+		dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
 		if (IS_ERR(dev_context->mbox)) {
 			dev_context->mbox = NULL;
 			pr_err("%s: Failed to get dsp mailbox handle\n",
@@ -563,8 +567,6 @@
 
 	}
 	if (!status) {
-		dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
-
 /*PM_IVA2GRPSEL_PER = 0xC0;*/
 		temp = readl(resources->dw_per_pm_base + 0xA8);
 		temp = (temp & 0xFFFFFF30) | 0xC0;
@@ -685,7 +687,7 @@
 	/* Disable the mailbox interrupts */
 	if (dev_context->mbox) {
 		omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
-		omap_mbox_put(dev_context->mbox);
+		omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
 		dev_context->mbox = NULL;
 	}
 	/* Reset IVA2 clocks*/
@@ -786,10 +788,7 @@
 
 	pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
 	if (pt_attrs != NULL) {
-		/* Assuming that we use only DSP's memory map
-		 * until 0x4000:0000 , we would need only 1024
-		 * L1 enties i.e L1 size = 4K */
-		pt_attrs->l1_size = 0x1000;
+		pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
 		align_size = pt_attrs->l1_size;
 		/* Align sizes are expected to be power of 2 */
 		/* we like to get aligned on L1 table size */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
index 18aec55..8242c70 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -72,22 +72,17 @@
 /*
  *  ======== io_mbox_msg ========
  *  Purpose:
- *      Main interrupt handler for the shared memory Bridge channel manager.
- *      Calls the Bridge's chnlsm_isr to determine if this interrupt is ours,
- *      then schedules a DPC to dispatch I/O.
+ *	Main message handler for the shared memory Bridge channel manager.
+ *	Determine if this message is ours, then schedules a DPC to
+ *	dispatch I/O.
  *  Parameters:
- *      ref_data:   Pointer to the channel manager object for this board.
- *                  Set in an initial call to ISR_Install().
+ *	self:	Pointer to its own notifier_block struct.
+ *	len:	Length of message.
+ *	msg:	Message code received.
  *  Returns:
- *      TRUE if interrupt handled; FALSE otherwise.
- *  Requires:
- *      Must be in locked memory if executing in kernel mode.
- *      Must only call functions which are in locked memory if Kernel mode.
- *      Must only call asynchronous services.
- *      Interrupts are disabled and EOI for this interrupt has been sent.
- *  Ensures:
+ *	NOTIFY_OK if handled; NOTIFY_BAD otherwise.
  */
-void io_mbox_msg(u32 msg);
+int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg);
 
 /*
  *  ======== io_request_chnl ========
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
index 8fe017c..eb9b9f1 100644
--- a/drivers/staging/tm6000/tm6000-video.c
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -1450,29 +1450,55 @@
  * ------------------------------------------------------------------
  */
 
-int tm6000_v4l2_register(struct tm6000_core *dev)
+static struct video_device *vdev_init(struct tm6000_core *dev,
+		const struct video_device
+		*template, const char *type_name)
 {
-	int ret = -1;
 	struct video_device *vfd;
 
 	vfd = video_device_alloc();
-	if(!vfd) {
+	if (NULL == vfd)
+		return NULL;
+
+	*vfd = *template;
+	vfd->v4l2_dev = &dev->v4l2_dev;
+	vfd->release = video_device_release;
+	vfd->debug = tm6000_debug;
+	vfd->lock = &dev->lock;
+
+	snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
+
+	video_set_drvdata(vfd, dev);
+	return vfd;
+}
+
+int tm6000_v4l2_register(struct tm6000_core *dev)
+{
+	int ret = -1;
+
+	dev->vfd = vdev_init(dev, &tm6000_template, "video");
+
+	if (!dev->vfd) {
+		printk(KERN_INFO "%s: can't register video device\n",
+		       dev->name);
 		return -ENOMEM;
 	}
-	dev->vfd = vfd;
 
 	/* init video dma queues */
 	INIT_LIST_HEAD(&dev->vidq.active);
 	INIT_LIST_HEAD(&dev->vidq.queued);
 
-	memcpy(dev->vfd, &tm6000_template, sizeof(*(dev->vfd)));
-	dev->vfd->debug = tm6000_debug;
-	dev->vfd->lock = &dev->lock;
-
-	vfd->v4l2_dev = &dev->v4l2_dev;
-	video_set_drvdata(vfd, dev);
-
 	ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
+
+	if (ret < 0) {
+		printk(KERN_INFO "%s: can't register video device\n",
+		       dev->name);
+		return ret;
+	}
+
+	printk(KERN_INFO "%s: registered device %s\n",
+	       dev->name, video_device_node_name(dev->vfd));
+
 	printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
 	return ret;
 }
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index 30dbfb6..d732679 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -32,6 +32,7 @@
 
 struct stub_device {
 	struct usb_interface *interface;
+	struct usb_device *udev;
 	struct list_head list;
 
 	struct usbip_device ud;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index b186b5f..a7ce51c 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -258,10 +258,11 @@
 static void stub_device_reset(struct usbip_device *ud)
 {
 	struct stub_device *sdev = container_of(ud, struct stub_device, ud);
-	struct usb_device *udev = interface_to_usbdev(sdev->interface);
+	struct usb_device *udev = sdev->udev;
 	int ret;
 
 	usbip_udbg("device reset");
+
 	ret = usb_lock_device_for_reset(udev, sdev->interface);
 	if (ret < 0) {
 		dev_err(&udev->dev, "lock for reset\n");
@@ -309,7 +310,8 @@
  *
  * Allocates and initializes a new stub_device struct.
  */
-static struct stub_device *stub_device_alloc(struct usb_interface *interface)
+static struct stub_device *stub_device_alloc(struct usb_device *udev,
+					     struct usb_interface *interface)
 {
 	struct stub_device *sdev;
 	int busnum = interface_to_busnum(interface);
@@ -324,7 +326,8 @@
 		return NULL;
 	}
 
-	sdev->interface = interface;
+	sdev->interface = usb_get_intf(interface);
+	sdev->udev = usb_get_dev(udev);
 
 	/*
 	 * devid is defined with devnum when this driver is first allocated.
@@ -450,11 +453,12 @@
 			return err;
 		}
 
+		usb_get_intf(interface);
 		return 0;
 	}
 
 	/* ok. this is my device. */
-	sdev = stub_device_alloc(interface);
+	sdev = stub_device_alloc(udev, interface);
 	if (!sdev)
 		return -ENOMEM;
 
@@ -476,6 +480,8 @@
 		dev_err(&interface->dev, "create sysfs files for %s\n",
 			udev_busid);
 		usb_set_intfdata(interface, NULL);
+		usb_put_intf(interface);
+
 		busid_priv->interf_count = 0;
 
 		busid_priv->sdev = NULL;
@@ -545,6 +551,7 @@
 	if (busid_priv->interf_count > 1) {
 		busid_priv->interf_count--;
 		shutdown_busid(busid_priv);
+		usb_put_intf(interface);
 		return;
 	}
 
@@ -554,6 +561,9 @@
 	/* 1. shutdown the current connection */
 	shutdown_busid(busid_priv);
 
+	usb_put_dev(sdev->udev);
+	usb_put_intf(interface);
+
 	/* 3. free sdev */
 	busid_priv->sdev = NULL;
 	stub_device_free(sdev);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 3de6fd2..ae6ac82 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -364,7 +364,7 @@
 
 static int get_pipe(struct stub_device *sdev, int epnum, int dir)
 {
-	struct usb_device *udev = interface_to_usbdev(sdev->interface);
+	struct usb_device *udev = sdev->udev;
 	struct usb_host_endpoint *ep;
 	struct usb_endpoint_descriptor *epd = NULL;
 
@@ -484,7 +484,7 @@
 	int ret;
 	struct stub_priv *priv;
 	struct usbip_device *ud = &sdev->ud;
-	struct usb_device *udev = interface_to_usbdev(sdev->interface);
+	struct usb_device *udev = sdev->udev;
 	int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
 
 
diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
index 41a1fe5..afc3b1a 100644
--- a/drivers/staging/usbip/vhci.h
+++ b/drivers/staging/usbip/vhci.h
@@ -100,9 +100,6 @@
 	 * But, the index of this array begins from 0.
 	 */
 	struct vhci_device vdev[VHCI_NPORTS];
-
-	/* vhci_device which has not been assiged its address yet */
-	int pending_port;
 };
 
 
@@ -119,6 +116,9 @@
 void vhci_rx_loop(struct usbip_task *ut);
 void vhci_tx_loop(struct usbip_task *ut);
 
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+					    __u32 seqnum);
+
 #define hardware		(&the_controller->pdev.dev)
 
 static inline struct vhci_device *port_to_vdev(__u32 port)
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 08bd26a..a35fe61 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -138,8 +138,6 @@
 	 * the_controller->vdev[rhport].ud.status = VDEV_CONNECT;
 	 * spin_unlock(&the_controller->vdev[rhport].ud.lock); */
 
-	the_controller->pending_port = rhport;
-
 	spin_unlock_irqrestore(&the_controller->lock, flags);
 
 	usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
@@ -559,6 +557,7 @@
 	struct device *dev = &urb->dev->dev;
 	int ret = 0;
 	unsigned long flags;
+	struct vhci_device *vdev;
 
 	usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
 		    hcd, urb, mem_flags);
@@ -574,6 +573,18 @@
 		return urb->status;
 	}
 
+	vdev = port_to_vdev(urb->dev->portnum-1);
+
+	/* refuse enqueue for dead connection */
+	spin_lock(&vdev->ud.lock);
+	if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) {
+		usbip_uerr("enqueue for inactive port %d\n", vdev->rhport);
+		spin_unlock(&vdev->ud.lock);
+		spin_unlock_irqrestore(&the_controller->lock, flags);
+		return -ENODEV;
+	}
+	spin_unlock(&vdev->ud.lock);
+
 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
 	if (ret)
 		goto no_need_unlink;
@@ -592,8 +603,6 @@
 		__u8 type = usb_pipetype(urb->pipe);
 		struct usb_ctrlrequest *ctrlreq =
 				(struct usb_ctrlrequest *) urb->setup_packet;
-		struct vhci_device *vdev =
-				port_to_vdev(the_controller->pending_port);
 
 		if (type != PIPE_CONTROL || !ctrlreq) {
 			dev_err(dev, "invalid request to devnum 0\n");
@@ -607,7 +616,9 @@
 			dev_info(dev, "SetAddress Request (%d) to port %d\n",
 				 ctrlreq->wValue, vdev->rhport);
 
-			vdev->udev = urb->dev;
+			if (vdev->udev)
+				usb_put_dev(vdev->udev);
+			vdev->udev = usb_get_dev(urb->dev);
 
 			spin_lock(&vdev->ud.lock);
 			vdev->ud.status = VDEV_ST_USED;
@@ -627,8 +638,9 @@
 						"Get_Descriptor to device 0 "
 						"(get max pipe size)\n");
 
-			/* FIXME: reference count? (usb_get_dev()) */
-			vdev->udev = urb->dev;
+			if (vdev->udev)
+				usb_put_dev(vdev->udev);
+			vdev->udev = usb_get_dev(urb->dev);
 			goto out;
 
 		default:
@@ -805,7 +817,6 @@
 	return 0;
 }
 
-
 static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
 {
 	struct vhci_unlink *unlink, *tmp;
@@ -813,11 +824,34 @@
 	spin_lock(&vdev->priv_lock);
 
 	list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
+		usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
 		list_del(&unlink->list);
 		kfree(unlink);
 	}
 
 	list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
+		struct urb *urb;
+
+		/* give back URB of unanswered unlink request */
+		usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum);
+
+		urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+		if (!urb) {
+			usbip_uinfo("the urb (seqnum %lu) was already given back\n",
+							unlink->unlink_seqnum);
+			list_del(&unlink->list);
+			kfree(unlink);
+			continue;
+		}
+
+		urb->status = -ENODEV;
+
+		spin_lock(&the_controller->lock);
+		usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
+		spin_unlock(&the_controller->lock);
+
+		usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
+
 		list_del(&unlink->list);
 		kfree(unlink);
 	}
@@ -887,6 +921,10 @@
 	vdev->speed  = 0;
 	vdev->devid  = 0;
 
+	if (vdev->udev)
+		usb_put_dev(vdev->udev);
+	vdev->udev = NULL;
+
 	ud->tcp_socket = NULL;
 
 	ud->status = VDEV_ST_NULL;
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 8147d72..bf69914 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -23,16 +23,14 @@
 #include "vhci.h"
 
 
-/* get URB from transmitted urb queue */
-static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
 					    __u32 seqnum)
 {
 	struct vhci_priv *priv, *tmp;
 	struct urb *urb = NULL;
 	int status;
 
-	spin_lock(&vdev->priv_lock);
-
 	list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
 		if (priv->seqnum == seqnum) {
 			urb = priv->urb;
@@ -63,8 +61,6 @@
 		}
 	}
 
-	spin_unlock(&vdev->priv_lock);
-
 	return urb;
 }
 
@@ -74,9 +70,11 @@
 	struct usbip_device *ud = &vdev->ud;
 	struct urb *urb;
 
+	spin_lock(&vdev->priv_lock);
 
 	urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
 
+	spin_unlock(&vdev->priv_lock);
 
 	if (!urb) {
 		usbip_uerr("cannot find a urb of seqnum %u\n",
@@ -161,7 +159,12 @@
 		return;
 	}
 
+	spin_lock(&vdev->priv_lock);
+
 	urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+
+	spin_unlock(&vdev->priv_lock);
+
 	if (!urb) {
 		/*
 		 * I get the result of a unlink request. But, it seems that I
@@ -190,6 +193,19 @@
 	return;
 }
 
+static int vhci_priv_tx_empty(struct vhci_device *vdev)
+{
+	int empty = 0;
+
+	spin_lock(&vdev->priv_lock);
+
+	empty = list_empty(&vdev->priv_rx);
+
+	spin_unlock(&vdev->priv_lock);
+
+	return empty;
+}
+
 /* recv a pdu */
 static void vhci_rx_pdu(struct usbip_device *ud)
 {
@@ -202,11 +218,29 @@
 
 	memset(&pdu, 0, sizeof(pdu));
 
-
 	/* 1. receive a pdu header */
 	ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+	if (ret < 0) {
+		if (ret == -ECONNRESET)
+			usbip_uinfo("connection reset by peer\n");
+		else if (ret == -EAGAIN) {
+			/* ignore if connection was idle */
+			if (vhci_priv_tx_empty(vdev))
+				return;
+			usbip_uinfo("connection timed out with pending urbs\n");
+		} else if (ret != -ERESTARTSYS)
+			usbip_uinfo("xmit failed %d\n", ret);
+
+		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+		return;
+	}
+	if (ret == 0) {
+		usbip_uinfo("connection closed");
+		usbip_event_add(ud, VDEV_EVENT_DOWN);
+		return;
+	}
 	if (ret != sizeof(pdu)) {
-		usbip_uerr("receiving pdu failed! size is %d, should be %d\n",
+		usbip_uerr("received pdu size is %d, should be %d\n",
 					ret, (unsigned int)sizeof(pdu));
 		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
 		return;
diff --git a/drivers/staging/vme/bridges/Module.symvers b/drivers/staging/vme/bridges/Module.symvers
deleted file mode 100644
index e69de29..0000000
--- a/drivers/staging/vme/bridges/Module.symvers
+++ /dev/null
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 7016fdd..e19b932 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -3954,8 +3954,8 @@
 unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
 {
 
-	if ((((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA))
-			&& (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
+	if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) &&
+			(pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
 		return 1;
 
 	return 0;
@@ -8773,7 +8773,7 @@
 
 	if (pVBInfo->IF_DEF_LVDS == 0) {
 		CRT2Index = CRT2Index >> 6; /*  for LCD */
-		if (((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA)) { /*301b*/
+		if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/
 			if (pVBInfo->LCDResInfo != Panel1024x768)
 				VCLKIndex = LCDXlat2VCLK[CRT2Index];
 			else
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 5415712..4bd8cbd 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -227,6 +227,7 @@
 
 		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
 			handle_zero_page(page);
+			index++;
 			continue;
 		}
 
@@ -235,12 +236,14 @@
 			pr_debug("Read before write: sector=%lu, size=%u",
 				(ulong)(bio->bi_sector), bio->bi_size);
 			/* Do nothing */
+			index++;
 			continue;
 		}
 
 		/* Page is stored uncompressed since it's incompressible */
 		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
 			handle_uncompressed_page(zram, page, index);
+			index++;
 			continue;
 		}
 
@@ -320,6 +323,7 @@
 			mutex_unlock(&zram->lock);
 			zram_stat_inc(&zram->stats.pages_zero);
 			zram_set_flag(zram, index, ZRAM_ZERO);
+			index++;
 			continue;
 		}
 
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
new file mode 100644
index 0000000..2fac3be
--- /dev/null
+++ b/drivers/target/Kconfig
@@ -0,0 +1,32 @@
+
+menuconfig TARGET_CORE
+	tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
+	depends on SCSI && BLOCK
+	select CONFIGFS_FS
+	default n
+	help
+	Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
+	control path for target_core_mod.  This includes built-in TCM RAMDISK
+	subsystem logic for virtual LUN 0 access
+
+if TARGET_CORE
+
+config TCM_IBLOCK
+	tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+	help
+	Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
+	access to Linux/Block devices using BIO
+
+config TCM_FILEIO
+	tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
+	help
+	Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
+	access to Linux/VFS struct file or struct block_device
+
+config TCM_PSCSI
+	tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+	help
+	Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
+	passthrough access to Linux/SCSI device
+
+endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
new file mode 100644
index 0000000..973bb19
--- /dev/null
+++ b/drivers/target/Makefile
@@ -0,0 +1,23 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
+
+target_core_mod-y		:= target_core_configfs.o \
+				   target_core_device.o \
+				   target_core_fabric_configfs.o \
+				   target_core_fabric_lib.o \
+				   target_core_hba.o \
+				   target_core_pr.o \
+				   target_core_alua.o \
+				   target_core_scdb.o \
+				   target_core_tmr.o \
+				   target_core_tpg.o \
+				   target_core_transport.o \
+				   target_core_cdb.o \
+				   target_core_ua.o \
+				   target_core_rd.o
+
+obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
+
+# Subsystem modules
+obj-$(CONFIG_TCM_IBLOCK)	+= target_core_iblock.o
+obj-$(CONFIG_TCM_FILEIO)	+= target_core_file.o
+obj-$(CONFIG_TCM_PSCSI)		+= target_core_pscsi.o
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
new file mode 100644
index 0000000..2c5fcfe
--- /dev/null
+++ b/drivers/target/target_core_alua.c
@@ -0,0 +1,1991 @@
+/*******************************************************************************
+ * Filename:  target_core_alua.c
+ *
+ * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
+ *
+ * Copyright (c) 2009-2010 Rising Tide Systems
+ * Copyright (c) 2009-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_ua.h"
+
+static int core_alua_check_transition(int state, int *primary);
+static int core_alua_set_tg_pt_secondary_state(
+		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+		struct se_port *port, int explict, int offline);
+
+/*
+ * REPORT_TARGET_PORT_GROUPS
+ *
+ * See spc4r17 section 6.27
+ */
+int core_emulate_report_target_port_groups(struct se_cmd *cmd)
+{
+	struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+	struct se_port *port;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
+				    Target port group descriptor */
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+			tg_pt_gp_list) {
+		/*
+		 * PREF: Preferred target port bit, determine if this
+		 * bit should be set for port group.
+		 */
+		if (tg_pt_gp->tg_pt_gp_pref)
+			buf[off] = 0x80;
+		/*
+		 * Set the ASYMMETRIC ACCESS State
+		 */
+		buf[off++] |= (atomic_read(
+			&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+		/*
+		 * Set supported ASYMMETRIC ACCESS State bits
+		 */
+		buf[off] = 0x80; /* T_SUP */
+		buf[off] |= 0x40; /* O_SUP */
+		buf[off] |= 0x8; /* U_SUP */
+		buf[off] |= 0x4; /* S_SUP */
+		buf[off] |= 0x2; /* AN_SUP */
+		buf[off++] |= 0x1; /* AO_SUP */
+		/*
+		 * TARGET PORT GROUP
+		 */
+		buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
+
+		off++; /* Skip over Reserved */
+		/*
+		 * STATUS CODE
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
+		/*
+		 * Vendor Specific field
+		 */
+		buf[off++] = 0x00;
+		/*
+		 * TARGET PORT COUNT
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
+		rd_len += 8;
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+				tg_pt_gp_mem_list) {
+			port = tg_pt_gp_mem->tg_pt;
+			/*
+			 * Start Target Port descriptor format
+			 *
+			 * See spc4r17 section 6.2.7 Table 247
+			 */
+			off += 2; /* Skip over Obsolete */
+			/*
+			 * Set RELATIVE TARGET PORT IDENTIFIER
+			 */
+			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+			buf[off++] = (port->sep_rtpi & 0xff);
+			rd_len += 4;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	/*
+	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+	 */
+	buf[0] = ((rd_len >> 24) & 0xff);
+	buf[1] = ((rd_len >> 16) & 0xff);
+	buf[2] = ((rd_len >> 8) & 0xff);
+	buf[3] = (rd_len & 0xff);
+
+	return 0;
+}
+
+/*
+ * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ *
+ * See spc4r17 section 6.35
+ */
+int core_emulate_set_target_port_groups(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+	struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
+	struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
+	u32 len = 4; /* Skip over RESERVED area in header */
+	int alua_access_state, primary = 0, rc;
+	u16 tg_pt_id, rtpi;
+
+	if (!(l_port))
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	/*
+	 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+	 * for the local tg_pt_gp.
+	 */
+	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
+	if (!(l_tg_pt_gp_mem)) {
+		printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+	spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
+	if (!(l_tg_pt_gp)) {
+		spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+	rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
+	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	if (!(rc)) {
+		printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
+				" while TPGS_EXPLICT_ALUA is disabled\n");
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+
+	while (len < cmd->data_length) {
+		alua_access_state = (ptr[0] & 0x0f);
+		/*
+		 * Check the received ALUA access state, and determine if
+		 * the state is a primary or secondary target port asymmetric
+		 * access state.
+		 */
+		rc = core_alua_check_transition(alua_access_state, &primary);
+		if (rc != 0) {
+			/*
+			 * If the SET TARGET PORT GROUPS attempts to establish
+			 * an invalid combination of target port asymmetric
+			 * access states or attempts to establish an
+			 * unsupported target port asymmetric access state,
+			 * then the command shall be terminated with CHECK
+			 * CONDITION status, with the sense key set to ILLEGAL
+			 * REQUEST, and the additional sense code set to INVALID
+			 * FIELD IN PARAMETER LIST.
+			 */
+			return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		}
+		rc = -1;
+		/*
+		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
+		 * specifies a primary target port asymmetric access state,
+		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
+		 * a primary target port group for which the primary target
+		 * port asymmetric access state shall be changed. If the
+		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
+		 * port asymmetric access state, then the TARGET PORT GROUP OR
+		 * TARGET PORT field specifies the relative target port
+		 * identifier (see 3.1.120) of the target port for which the
+		 * secondary target port asymmetric access state shall be
+		 * changed.
+		 */
+		if (primary) {
+			tg_pt_id = ((ptr[2] << 8) & 0xff);
+			tg_pt_id |= (ptr[3] & 0xff);
+			/*
+			 * Locate the matching target port group ID from
+			 * the global tg_pt_gp list
+			 */
+			spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			list_for_each_entry(tg_pt_gp,
+					&T10_ALUA(su_dev)->tg_pt_gps_list,
+					tg_pt_gp_list) {
+				if (!(tg_pt_gp->tg_pt_gp_valid_id))
+					continue;
+
+				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
+					continue;
+
+				atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+				smp_mb__after_atomic_inc();
+				spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+				rc = core_alua_do_port_transition(tg_pt_gp,
+						dev, l_port, nacl,
+						alua_access_state, 1);
+
+				spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+				atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+				smp_mb__after_atomic_dec();
+				break;
+			}
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			/*
+			 * If not matching target port group ID can be located
+			 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
+			 */
+			if (rc != 0)
+				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		} else {
+			/*
+			 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
+			 * the Target Port in question for the the incoming
+			 * SET_TARGET_PORT_GROUPS op.
+			 */
+			rtpi = ((ptr[2] << 8) & 0xff);
+			rtpi |= (ptr[3] & 0xff);
+			/*
+			 * Locate the matching relative target port identifer
+			 * for the struct se_device storage object.
+			 */
+			spin_lock(&dev->se_port_lock);
+			list_for_each_entry(port, &dev->dev_sep_list,
+							sep_list) {
+				if (port->sep_rtpi != rtpi)
+					continue;
+
+				tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+				spin_unlock(&dev->se_port_lock);
+
+				rc = core_alua_set_tg_pt_secondary_state(
+						tg_pt_gp_mem, port, 1, 1);
+
+				spin_lock(&dev->se_port_lock);
+				break;
+			}
+			spin_unlock(&dev->se_port_lock);
+			/*
+			 * If not matching relative target port identifier can
+			 * be located, throw an exception with ASCQ:
+			 * INVALID_PARAMETER_LIST
+			 */
+			if (rc != 0)
+				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		}
+
+		ptr += 4;
+		len += 4;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_nonoptimized(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	int nonop_delay_msecs,
+	u8 *alua_ascq)
+{
+	/*
+	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
+	 * later to determine if processing of this cmd needs to be
+	 * temporarily delayed for the Active/NonOptimized primary access state.
+	 */
+	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
+	cmd->alua_nonop_delay = nonop_delay_msecs;
+	return 0;
+}
+
+static inline int core_alua_state_standby(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
+	 * spc4r17 section 5.9.2.4.4
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case LOG_SELECT:
+	case LOG_SENSE:
+	case MODE_SELECT:
+	case MODE_SENSE:
+	case REPORT_LUNS:
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+	case MAINTENANCE_IN:
+		switch (cdb[1]) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case PERSISTENT_RESERVE_IN:
+	case PERSISTENT_RESERVE_OUT:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_unavailable(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
+	 * spc4r17 section 5.9.2.4.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case MAINTENANCE_IN:
+		switch (cdb[1]) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_transition(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+	 * spc4r17 section 5.9.2.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case MAINTENANCE_IN:
+		switch (cdb[1]) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
+ * in transport_cmd_sequencer().  This function is assigned to
+ * struct t10_alua *->state_check() in core_setup_alua()
+ */
+static int core_alua_state_check_nop(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	return 0;
+}
+
+/*
+ * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
+ * This function is assigned to struct t10_alua *->state_check() in
+ * core_setup_alua()
+ *
+ * Also, this function can return three different return codes to
+ * signal transport_generic_cmd_sequencer()
+ *
+ * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 0: Used to signal success
+ * reutrn -1: Used to signal failure, and invalid cdb field
+ */
+static int core_alua_state_check(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	struct se_lun *lun = SE_LUN(cmd);
+	struct se_port *port = lun->lun_sep;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	int out_alua_state, nonop_delay_msecs;
+
+	if (!(port))
+		return 0;
+	/*
+	 * First, check for a struct se_port specific secondary ALUA target port
+	 * access state: OFFLINE
+	 */
+	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
+		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+		printk(KERN_INFO "ALUA: Got secondary offline status for local"
+				" target port\n");
+		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+		return 1;
+	}
+	 /*
+	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
+	 * ALUA target port group, to obtain current ALUA access state.
+	 * Otherwise look for the underlying struct se_device association with
+	 * a ALUA logical unit group.
+	 */
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	/*
+	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional
+	 * statement so the complier knows explictly to check this case first.
+	 * For the Optimized ALUA access state case, we want to process the
+	 * incoming fabric cmd ASAP..
+	 */
+	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+		return 0;
+
+	switch (out_alua_state) {
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		return core_alua_state_nonoptimized(cmd, cdb,
+					nonop_delay_msecs, alua_ascq);
+	case ALUA_ACCESS_STATE_STANDBY:
+		return core_alua_state_standby(cmd, cdb, alua_ascq);
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+	case ALUA_ACCESS_STATE_TRANSITION:
+		return core_alua_state_transition(cmd, cdb, alua_ascq);
+	/*
+	 * OFFLINE is a secondary ALUA target port group access state, that is
+	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
+	 */
+	case ALUA_ACCESS_STATE_OFFLINE:
+	default:
+		printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
+				out_alua_state);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Check implict and explict ALUA state change request.
+ */
+static int core_alua_check_transition(int state, int *primary)
+{
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+	case ALUA_ACCESS_STATE_STANDBY:
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		/*
+		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+		 * defined as primary target port asymmetric access states.
+		 */
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_OFFLINE:
+		/*
+		 * OFFLINE state is defined as a secondary target port
+		 * asymmetric access state.
+		 */
+		*primary = 0;
+		break;
+	default:
+		printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
+		return -1;
+	}
+
+	return 0;
+}
+
+static char *core_alua_dump_state(int state)
+{
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+		return "Active/Optimized";
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		return "Active/NonOptimized";
+	case ALUA_ACCESS_STATE_STANDBY:
+		return "Standby";
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		return "Unavailable";
+	case ALUA_ACCESS_STATE_OFFLINE:
+		return "Offline";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+char *core_alua_dump_status(int status)
+{
+	switch (status) {
+	case ALUA_STATUS_NONE:
+		return "None";
+	case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
+		return "Altered by Explict STPG";
+	case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
+		return "Altered by Implict ALUA";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+/*
+ * Used by fabric modules to determine when we need to delay processing
+ * for the Active/NonOptimized paths..
+ */
+int core_alua_check_nonop_delay(
+	struct se_cmd *cmd)
+{
+	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
+		return 0;
+	if (in_interrupt())
+		return 0;
+	/*
+	 * The ALUA Active/NonOptimized access state delay can be disabled
+	 * in via configfs with a value of zero
+	 */
+	if (!(cmd->alua_nonop_delay))
+		return 0;
+	/*
+	 * struct se_cmd->alua_nonop_delay gets set by a target port group
+	 * defined interval in core_alua_state_nonoptimized()
+	 */
+	msleep_interruptible(cmd->alua_nonop_delay);
+	return 0;
+}
+EXPORT_SYMBOL(core_alua_check_nonop_delay);
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
+ *
+ */
+static int core_alua_write_tpg_metadata(
+	const char *path,
+	unsigned char *md_buf,
+	u32 md_buf_len)
+{
+	mm_segment_t old_fs;
+	struct file *file;
+	struct iovec iov[1];
+	int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
+
+	memset(iov, 0, sizeof(struct iovec));
+
+	file = filp_open(path, flags, 0600);
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
+			path);
+		return -ENODEV;
+	}
+
+	iov[0].iov_base = &md_buf[0];
+	iov[0].iov_len = md_buf_len;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+	set_fs(old_fs);
+
+	if (ret < 0) {
+		printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
+		filp_close(file, NULL);
+		return -EIO;
+	}
+	filp_close(file, NULL);
+
+	return 0;
+}
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ */
+static int core_alua_update_tpg_primary_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	int primary_state,
+	unsigned char *md_buf)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_wwn *wwn = &su_dev->t10_wwn;
+	char path[ALUA_METADATA_PATH_LEN];
+	int len;
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+
+	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+			"tg_pt_gp_id=%hu\n"
+			"alua_access_state=0x%02x\n"
+			"alua_access_status=0x%02x\n",
+			tg_pt_gp->tg_pt_gp_id, primary_state,
+			tg_pt_gp->tg_pt_gp_alua_access_status);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN,
+		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+
+	return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_do_transition_tg_pt(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	struct se_port *l_port,
+	struct se_node_acl *nacl,
+	unsigned char *md_buf,
+	int new_state,
+	int explict)
+{
+	struct se_dev_entry *se_deve;
+	struct se_lun_acl *lacl;
+	struct se_port *port;
+	struct t10_alua_tg_pt_gp_member *mem;
+	int old_state = 0;
+	/*
+	 * Save the old primary ALUA access state, and set the current state
+	 * to ALUA_ACCESS_STATE_TRANSITION.
+	 */
+	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+			ALUA_ACCESS_STATE_TRANSITION);
+	tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
+				ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+				ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+	/*
+	 * Check for the optional ALUA primary state transition delay
+	 */
+	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
+				tg_pt_gp_mem_list) {
+		port = mem->tg_pt;
+		/*
+		 * After an implicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition for the initiator port associated with every I_T
+		 * nexus with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHAGED.
+		 *
+		 * After an explicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHANGED for the initiator port associated with
+		 * every I_T nexus other than the I_T nexus on which the SET
+		 * TARGET PORT GROUPS command
+		 */
+		atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_for_each_entry(se_deve, &port->sep_alua_list,
+					alua_port_list) {
+			lacl = se_deve->se_lun_acl;
+			/*
+			 * se_deve->se_lun_acl pointer may be NULL for a
+			 * entry created without explict Node+MappedLUN ACLs
+			 */
+			if (!(lacl))
+				continue;
+
+			if (explict &&
+			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
+			   (l_port != NULL) && (l_port == port))
+				continue;
+
+			core_scsi3_ua_allocate(lacl->se_lun_nacl,
+				se_deve->mapped_lun, 0x2A,
+				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
+		}
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+	/*
+	 * Update the ALUA metadata buf that has been allocated in
+	 * core_alua_do_port_transition(), this metadata will be written
+	 * to struct file.
+	 *
+	 * Note that there is the case where we do not want to update the
+	 * metadata when the saved metadata is being parsed in userspace
+	 * when setting the existing port access state and access status.
+	 *
+	 * Also note that the failure to write out the ALUA metadata to
+	 * struct file does NOT affect the actual ALUA transition.
+	 */
+	if (tg_pt_gp->tg_pt_gp_write_metadata) {
+		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
+		core_alua_update_tpg_primary_metadata(tg_pt_gp,
+					new_state, md_buf);
+		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
+	}
+	/*
+	 * Set the current primary ALUA access state to the requested new state
+	 */
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+
+	printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" from primary access state %s to %s\n", (explict) ? "explict" :
+		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
+		core_alua_dump_state(new_state));
+
+	return 0;
+}
+
+int core_alua_do_port_transition(
+	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
+	struct se_device *l_dev,
+	struct se_port *l_port,
+	struct se_node_acl *l_nacl,
+	int new_state,
+	int explict)
+{
+	struct se_device *dev;
+	struct se_port *port;
+	struct se_subsystem_dev *su_dev;
+	struct se_node_acl *nacl;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	unsigned char *md_buf;
+	int primary;
+
+	if (core_alua_check_transition(new_state, &primary) != 0)
+		return -EINVAL;
+
+	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
+	if (!(md_buf)) {
+		printk("Unable to allocate buf for ALUA metadata\n");
+		return -ENOMEM;
+	}
+
+	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
+	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = local_lu_gp_mem->lu_gp;
+	atomic_inc(&lu_gp->lu_gp_ref_cnt);
+	smp_mb__after_atomic_inc();
+	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
+	/*
+	 * For storage objects that are members of the 'default_lu_gp',
+	 * we only do transition on the passed *l_tp_pt_gp, and not
+	 * on all of the matching target port groups IDs in default_lu_gp.
+	 */
+	if (!(lu_gp->lu_gp_id)) {
+		/*
+		 * core_alua_do_transition_tg_pt() will always return
+		 * success.
+		 */
+		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
+					md_buf, new_state, explict);
+		atomic_dec(&lu_gp->lu_gp_ref_cnt);
+		smp_mb__after_atomic_dec();
+		kfree(md_buf);
+		return 0;
+	}
+	/*
+	 * For all other LU groups aside from 'default_lu_gp', walk all of
+	 * the associated storage objects looking for a matching target port
+	 * group ID from the local target port group.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
+				lu_gp_mem_list) {
+
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		su_dev = dev->se_sub_dev;
+		atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&lu_gp->lu_gp_lock);
+
+		spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+		list_for_each_entry(tg_pt_gp,
+				&T10_ALUA(su_dev)->tg_pt_gps_list,
+				tg_pt_gp_list) {
+
+			if (!(tg_pt_gp->tg_pt_gp_valid_id))
+				continue;
+			/*
+			 * If the target behavior port asymmetric access state
+			 * is changed for any target port group accessiable via
+			 * a logical unit within a LU group, the target port
+			 * behavior group asymmetric access states for the same
+			 * target port group accessible via other logical units
+			 * in that LU group will also change.
+			 */
+			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
+				continue;
+
+			if (l_tg_pt_gp == tg_pt_gp) {
+				port = l_port;
+				nacl = l_nacl;
+			} else {
+				port = NULL;
+				nacl = NULL;
+			}
+			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			/*
+			 * core_alua_do_transition_tg_pt() will always return
+			 * success.
+			 */
+			core_alua_do_transition_tg_pt(tg_pt_gp, port,
+					nacl, md_buf, new_state, explict);
+
+			spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			smp_mb__after_atomic_dec();
+		}
+		spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+		atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
+		" Group IDs: %hu %s transition to primary state: %s\n",
+		config_item_name(&lu_gp->lu_gp_group.cg_item),
+		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
+		core_alua_dump_state(new_state));
+
+	atomic_dec(&lu_gp->lu_gp_ref_cnt);
+	smp_mb__after_atomic_dec();
+	kfree(md_buf);
+	return 0;
+}
+
+/*
+ * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
+ */
+static int core_alua_update_tpg_secondary_metadata(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct se_port *port,
+	unsigned char *md_buf,
+	u32 md_buf_len)
+{
+	struct se_portal_group *se_tpg = port->sep_tpg;
+	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+	int len;
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
+
+	len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
+			TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
+
+	if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
+		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
+				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+			"alua_tg_pt_status=0x%02x\n",
+			atomic_read(&port->sep_tg_pt_secondary_offline),
+			port->sep_tg_pt_secondary_stat);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
+			TPG_TFO(se_tpg)->get_fabric_name(), wwn,
+			port->sep_lun->unpacked_lun);
+
+	return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_set_tg_pt_secondary_state(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct se_port *port,
+	int explict,
+	int offline)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	unsigned char *md_buf;
+	u32 md_buf_len;
+	int trans_delay_msecs;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (!(tg_pt_gp)) {
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		printk(KERN_ERR "Unable to complete secondary state"
+				" transition\n");
+		return -1;
+	}
+	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
+	/*
+	 * Set the secondary ALUA target port access state to OFFLINE
+	 * or release the previously secondary state for struct se_port
+	 */
+	if (offline)
+		atomic_set(&port->sep_tg_pt_secondary_offline, 1);
+	else
+		atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+
+	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
+	port->sep_tg_pt_secondary_stat = (explict) ?
+			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+
+	printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" to secondary access state: %s\n", (explict) ? "explict" :
+		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
+
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	/*
+	 * Do the optional transition delay after we set the secondary
+	 * ALUA access state.
+	 */
+	if (trans_delay_msecs != 0)
+		msleep_interruptible(trans_delay_msecs);
+	/*
+	 * See if we need to update the ALUA fabric port metadata for
+	 * secondary state and status
+	 */
+	if (port->sep_tg_pt_secondary_write_md) {
+		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
+		if (!(md_buf)) {
+			printk(KERN_ERR "Unable to allocate md_buf for"
+				" secondary ALUA access metadata\n");
+			return -1;
+		}
+		mutex_lock(&port->sep_tg_pt_md_mutex);
+		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
+				md_buf, md_buf_len);
+		mutex_unlock(&port->sep_tg_pt_md_mutex);
+
+		kfree(md_buf);
+	}
+
+	return 0;
+}
+
+struct t10_alua_lu_gp *
+core_alua_allocate_lu_gp(const char *name, int def_group)
+{
+	struct t10_alua_lu_gp *lu_gp;
+
+	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
+	if (!(lu_gp)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
+		return ERR_PTR(-ENOMEM);;
+	}
+	INIT_LIST_HEAD(&lu_gp->lu_gp_list);
+	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
+	spin_lock_init(&lu_gp->lu_gp_lock);
+	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
+
+	if (def_group) {
+		lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;;
+		lu_gp->lu_gp_valid_id = 1;
+		se_global->alua_lu_gps_count++;
+	}
+
+	return lu_gp;
+}
+
+int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
+{
+	struct t10_alua_lu_gp *lu_gp_tmp;
+	u16 lu_gp_id_tmp;
+	/*
+	 * The lu_gp->lu_gp_id may only be set once..
+	 */
+	if (lu_gp->lu_gp_valid_id) {
+		printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
+			" ignoring request\n");
+		return -1;
+	}
+
+	spin_lock(&se_global->lu_gps_lock);
+	if (se_global->alua_lu_gps_count == 0x0000ffff) {
+		printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
+				" 0x0000ffff reached\n");
+		spin_unlock(&se_global->lu_gps_lock);
+		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+		return -1;
+	}
+again:
+	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
+				se_global->alua_lu_gps_counter++;
+
+	list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
+		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
+			if (!(lu_gp_id))
+				goto again;
+
+			printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
+				" already exists, ignoring request\n",
+				lu_gp_id);
+			spin_unlock(&se_global->lu_gps_lock);
+			return -1;
+		}
+	}
+
+	lu_gp->lu_gp_id = lu_gp_id_tmp;
+	lu_gp->lu_gp_valid_id = 1;
+	list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
+	se_global->alua_lu_gps_count++;
+	spin_unlock(&se_global->lu_gps_lock);
+
+	return 0;
+}
+
+static struct t10_alua_lu_gp_member *
+core_alua_allocate_lu_gp_mem(struct se_device *dev)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
+	if (!(lu_gp_mem)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
+	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
+	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
+
+	lu_gp_mem->lu_gp_mem_dev = dev;
+	dev->dev_alua_lu_gp_mem = lu_gp_mem;
+
+	return lu_gp_mem;
+}
+
+void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
+	/*
+	 * Once we have reached this point, config_item_put() has
+	 * already been called from target_core_alua_drop_lu_gp().
+	 *
+	 * Here, we remove the *lu_gp from the global list so that
+	 * no associations can be made while we are releasing
+	 * struct t10_alua_lu_gp.
+	 */
+	spin_lock(&se_global->lu_gps_lock);
+	atomic_set(&lu_gp->lu_gp_shutdown, 1);
+	list_del(&lu_gp->lu_gp_list);
+	se_global->alua_lu_gps_count--;
+	spin_unlock(&se_global->lu_gps_lock);
+	/*
+	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
+	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
+	 * released with core_alua_put_lu_gp_from_name()
+	 */
+	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
+		cpu_relax();
+	/*
+	 * Release reference to struct t10_alua_lu_gp * from all associated
+	 * struct se_device.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
+				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		/*
+		 *
+		 * lu_gp_mem is assoicated with a single
+		 * struct se_device->dev_alua_lu_gp_mem, and is released when
+		 * struct se_device is released via core_alua_free_lu_gp_mem().
+		 *
+		 * If the passed lu_gp does NOT match the default_lu_gp, assume
+		 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+		 */
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		if (lu_gp != se_global->default_lu_gp)
+			__core_alua_attach_lu_gp_mem(lu_gp_mem,
+					se_global->default_lu_gp);
+		else
+			lu_gp_mem->lu_gp = NULL;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+}
+
+void core_alua_free_lu_gp_mem(struct se_device *dev)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!(lu_gp_mem))
+		return;
+
+	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
+		cpu_relax();
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if ((lu_gp)) {
+		spin_lock(&lu_gp->lu_gp_lock);
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		lu_gp_mem->lu_gp = NULL;
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
+}
+
+struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_item *ci;
+
+	spin_lock(&se_global->lu_gps_lock);
+	list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
+		if (!(lu_gp->lu_gp_valid_id))
+			continue;
+		ci = &lu_gp->lu_gp_group.cg_item;
+		if (!(strcmp(config_item_name(ci), name))) {
+			atomic_inc(&lu_gp->lu_gp_ref_cnt);
+			spin_unlock(&se_global->lu_gps_lock);
+			return lu_gp;
+		}
+	}
+	spin_unlock(&se_global->lu_gps_lock);
+
+	return NULL;
+}
+
+void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&se_global->lu_gps_lock);
+	atomic_dec(&lu_gp->lu_gp_ref_cnt);
+	spin_unlock(&se_global->lu_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_attach_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	lu_gp_mem->lu_gp = lu_gp;
+	lu_gp_mem->lu_gp_assoc = 1;
+	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
+	lu_gp->lu_gp_members++;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_drop_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_del(&lu_gp_mem->lu_gp_mem_list);
+	lu_gp_mem->lu_gp = NULL;
+	lu_gp_mem->lu_gp_assoc = 0;
+	lu_gp->lu_gp_members--;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+	struct se_subsystem_dev *su_dev,
+	const char *name,
+	int def_group)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
+	if (!(tg_pt_gp)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
+	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
+	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+	tg_pt_gp->tg_pt_gp_su_dev = su_dev;
+	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+		ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+	/*
+	 * Enable both explict and implict ALUA support by default
+	 */
+	tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+	/*
+	 * Set the default Active/NonOptimized Delay in milliseconds
+	 */
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
+
+	if (def_group) {
+		spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+		tg_pt_gp->tg_pt_gp_id =
+				T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+		tg_pt_gp->tg_pt_gp_valid_id = 1;
+		T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			      &T10_ALUA(su_dev)->tg_pt_gps_list);
+		spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	}
+
+	return tg_pt_gp;
+}
+
+int core_alua_set_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	u16 tg_pt_gp_id)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
+	u16 tg_pt_gp_id_tmp;
+	/*
+	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
+	 */
+	if (tg_pt_gp->tg_pt_gp_valid_id) {
+		printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
+			" ignoring request\n");
+		return -1;
+	}
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
+		printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
+			" 0x0000ffff reached\n");
+		spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+		return -1;
+	}
+again:
+	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
+			T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+
+	list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
+			if (!(tg_pt_gp_id))
+				goto again;
+
+			printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
+				" exists, ignoring request\n", tg_pt_gp_id);
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			return -1;
+		}
+	}
+
+	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
+	tg_pt_gp->tg_pt_gp_valid_id = 1;
+	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			&T10_ALUA(su_dev)->tg_pt_gps_list);
+	T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+	return 0;
+}
+
+struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+	struct se_port *port)
+{
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
+				GFP_KERNEL);
+	if (!(tg_pt_gp_mem)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+	spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
+
+	tg_pt_gp_mem->tg_pt = port;
+	port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
+	atomic_set(&port->sep_tg_pt_gp_active, 1);
+
+	return tg_pt_gp_mem;
+}
+
+void core_alua_free_tg_pt_gp(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+	/*
+	 * Once we have reached this point, config_item_put() has already
+	 * been called from target_core_alua_drop_tg_pt_gp().
+	 *
+	 * Here we remove *tg_pt_gp from the global list so that
+	 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
+	 */
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	list_del(&tg_pt_gp->tg_pt_gp_list);
+	T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	/*
+	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
+	 * core_alua_get_tg_pt_gp_by_name() in
+	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
+	 * to be released with core_alua_put_tg_pt_gp_from_name().
+	 */
+	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
+		cpu_relax();
+	/*
+	 * Release reference to struct t10_alua_tg_pt_gp from all associated
+	 * struct se_port.
+	 */
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
+			&tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
+		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+			tg_pt_gp->tg_pt_gp_members--;
+			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+		/*
+		 * tg_pt_gp_mem is assoicated with a single
+		 * se_port->sep_alua_tg_pt_gp_mem, and is released via
+		 * core_alua_free_tg_pt_gp_mem().
+		 *
+		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
+		 * assume we want to re-assocate a given tg_pt_gp_mem with
+		 * default_tg_pt_gp.
+		 */
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
+			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+					T10_ALUA(su_dev)->default_tg_pt_gp);
+		} else
+			tg_pt_gp_mem->tg_pt_gp = NULL;
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+}
+
+void core_alua_free_tg_pt_gp_mem(struct se_port *port)
+{
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return;
+
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem))
+		return;
+
+	while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
+		cpu_relax();
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if ((tg_pt_gp)) {
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+			tg_pt_gp->tg_pt_gp_members--;
+			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+		tg_pt_gp_mem->tg_pt_gp = NULL;
+	}
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
+}
+
+static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
+	struct se_subsystem_dev *su_dev,
+	const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct config_item *ci;
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (!(tg_pt_gp->tg_pt_gp_valid_id))
+			continue;
+		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		if (!(strcmp(config_item_name(ci), name))) {
+			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			return tg_pt_gp;
+		}
+	}
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+	return NULL;
+}
+
+static void core_alua_put_tg_pt_gp_from_name(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+void __core_alua_attach_tg_pt_gp_mem(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
+	tg_pt_gp_mem->tg_pt_gp_assoc = 1;
+	list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
+			&tg_pt_gp->tg_pt_gp_mem_list);
+	tg_pt_gp->tg_pt_gp_members++;
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+static void __core_alua_drop_tg_pt_gp_mem(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+	tg_pt_gp_mem->tg_pt_gp = NULL;
+	tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+	tg_pt_gp->tg_pt_gp_members--;
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
+{
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct config_item *tg_pt_ci;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	ssize_t len = 0;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return len;
+
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem))
+		return len;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if ((tg_pt_gp)) {
+		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
+			" %hu\nTG Port Primary Access State: %s\nTG Port "
+			"Primary Access Status: %s\nTG Port Secondary Access"
+			" State: %s\nTG Port Secondary Access Status: %s\n",
+			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
+			core_alua_dump_state(atomic_read(
+					&tg_pt_gp->tg_pt_gp_alua_access_state)),
+			core_alua_dump_status(
+				tg_pt_gp->tg_pt_gp_alua_access_status),
+			(atomic_read(&port->sep_tg_pt_secondary_offline)) ?
+			"Offline" : "None",
+			core_alua_dump_status(port->sep_tg_pt_secondary_stat));
+	}
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	return len;
+}
+
+ssize_t core_alua_store_tg_pt_gp_info(
+	struct se_port *port,
+	const char *page,
+	size_t count)
+{
+	struct se_portal_group *tpg;
+	struct se_lun *lun;
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+	int move = 0;
+
+	tpg = port->sep_tpg;
+	lun = port->sep_lun;
+
+	if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+		printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
+			" %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			TPG_TFO(tpg)->tpg_get_tag(tpg),
+			config_item_name(&lun->lun_group.cg_item));
+		return -EINVAL;
+	}
+
+	if (count > TG_PT_GROUP_NAME_BUF) {
+		printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA target port group alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
+		 * struct t10_alua_tg_pt_gp.  This reference is released with
+		 * core_alua_put_tg_pt_gp_from_name() below.
+		 */
+		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+					strstrip(buf));
+		if (!(tg_pt_gp_new))
+			return -ENODEV;
+	}
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem)) {
+		if (tg_pt_gp_new)
+			core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+		printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if ((tg_pt_gp)) {
+		/*
+		 * Clearing an existing tg_pt_gp association, and replacing
+		 * with the default_tg_pt_gp.
+		 */
+		if (!(tg_pt_gp_new)) {
+			printk(KERN_INFO "Target_Core_ConfigFS: Moving"
+				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
+				" alua/%s, ID: %hu back to"
+				" default_tg_pt_gp\n",
+				TPG_TFO(tpg)->tpg_get_wwn(tpg),
+				TPG_TFO(tpg)->tpg_get_tag(tpg),
+				config_item_name(&lun->lun_group.cg_item),
+				config_item_name(
+					&tg_pt_gp->tg_pt_gp_group.cg_item),
+				tg_pt_gp->tg_pt_gp_id);
+
+			__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+					T10_ALUA(su_dev)->default_tg_pt_gp);
+			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+			return count;
+		}
+		/*
+		 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
+		 */
+		__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+		move = 1;
+	}
+	/*
+	 * Associate tg_pt_gp_mem with tg_pt_gp_new.
+	 */
+	__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
+		"Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+		TPG_TFO(tpg)->tpg_get_tag(tpg),
+		config_item_name(&lun->lun_group.cg_item),
+		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
+		tg_pt_gp_new->tg_pt_gp_id);
+
+	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+	return count;
+}
+
+ssize_t core_alua_show_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
+	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
+		return sprintf(page, "Implict and Explict\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
+		return sprintf(page, "Implict\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
+		return sprintf(page, "Explict\n");
+	else
+		return sprintf(page, "None\n");
+}
+
+ssize_t core_alua_store_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_access_type\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
+		printk(KERN_ERR "Illegal value for alua_access_type:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	if (tmp == 3)
+		tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+	else if (tmp == 2)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+	else if (tmp == 1)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+	else
+		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
+
+	return count;
+}
+
+ssize_t core_alua_show_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
+}
+
+ssize_t core_alua_store_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
+		return -EINVAL;
+	}
+	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
+		printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_NONOP_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+}
+
+ssize_t core_alua_store_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
+		return -EINVAL;
+	}
+	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
+		printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_TRANS_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
+}
+
+ssize_t core_alua_store_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract preferred ALUA value\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
+{
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return sprintf(page, "%d\n",
+		atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
+}
+
+ssize_t core_alua_store_offline_bit(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned long tmp;
+	int ret;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem)) {
+		printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
+			lun->lun_sep, 0, (int)tmp);
+	if (ret < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_status(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
+}
+
+ssize_t core_alua_store_secondary_status(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
+		return -EINVAL;
+	}
+	if ((tmp != ALUA_STATUS_NONE) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+		printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+	lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_write_metadata(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n",
+			lun->lun_sep->sep_tg_pt_secondary_write_md);
+}
+
+ssize_t core_alua_store_secondary_write_metadata(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
+
+	return count;
+}
+
+int core_setup_alua(struct se_device *dev, int force_pt)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
+	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
+	 * cause a problem because libata and some SATA RAID HBAs appear
+	 * under Linux/SCSI, but emulate SCSI logic themselves.
+	 */
+	if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+	    !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
+		alua->alua_type = SPC_ALUA_PASSTHROUGH;
+		alua->alua_state_check = &core_alua_state_check_nop;
+		printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+			" emulation\n", TRANSPORT(dev)->name);
+		return 0;
+	}
+	/*
+	 * If SPC-3 or above is reported by real or emulated struct se_device,
+	 * use emulated ALUA.
+	 */
+	if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+		printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
+			" device\n", TRANSPORT(dev)->name);
+		/*
+		 * Assoicate this struct se_device with the default ALUA
+		 * LUN Group.
+		 */
+		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
+		if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
+			return -1;
+
+		alua->alua_type = SPC3_ALUA_EMULATED;
+		alua->alua_state_check = &core_alua_state_check;
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		__core_alua_attach_lu_gp_mem(lu_gp_mem,
+				se_global->default_lu_gp);
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
+			" core/alua/lu_gps/default_lu_gp\n",
+			TRANSPORT(dev)->name);
+	} else {
+		alua->alua_type = SPC2_ALUA_DISABLED;
+		alua->alua_state_check = &core_alua_state_check_nop;
+		printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
+			" device\n", TRANSPORT(dev)->name);
+	}
+
+	return 0;
+}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
new file mode 100644
index 0000000..c86f97a
--- /dev/null
+++ b/drivers/target/target_core_alua.h
@@ -0,0 +1,126 @@
+#ifndef TARGET_CORE_ALUA_H
+#define TARGET_CORE_ALUA_H
+
+/*
+ * INQUIRY response data, TPGS Field
+ *
+ * from spc4r17 section 6.4.2 Table 135
+ */
+#define TPGS_NO_ALUA				0x00
+#define TPGS_IMPLICT_ALUA			0x10
+#define TPGS_EXPLICT_ALUA			0x20
+
+/*
+ * ASYMMETRIC ACCESS STATE field
+ *
+ * from spc4r17 section 6.27 Table 245
+ */
+#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED	0x0
+#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1
+#define ALUA_ACCESS_STATE_STANDBY		0x2
+#define ALUA_ACCESS_STATE_UNAVAILABLE		0x3
+#define ALUA_ACCESS_STATE_OFFLINE		0xe
+#define ALUA_ACCESS_STATE_TRANSITION		0xf
+
+/*
+ * REPORT_TARGET_PORT_GROUP STATUS CODE
+ *
+ * from spc4r17 section 6.27 Table 246
+ */
+#define ALUA_STATUS_NONE				0x00
+#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG		0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA		0x02
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_04H_ALUA_STATE_TRANSITION			0x0a
+#define ASCQ_04H_ALUA_TG_PT_STANDBY			0x0b
+#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE			0x0c
+#define ASCQ_04H_ALUA_OFFLINE				0x12
+
+/*
+ * Used as the default for Active/NonOptimized delay (in milliseconds)
+ * This can also be changed via configfs on a per target port group basis..
+ */
+#define ALUA_DEFAULT_NONOP_DELAY_MSECS			100
+#define ALUA_MAX_NONOP_DELAY_MSECS			10000 /* 10 seconds */
+/*
+ * Used for implict and explict ALUA transitional delay, that is disabled
+ * by default, and is intended to be used for debugging client side ALUA code.
+ */
+#define ALUA_DEFAULT_TRANS_DELAY_MSECS			0
+#define ALUA_MAX_TRANS_DELAY_MSECS			30000 /* 30 seconds */
+/*
+ * Used by core_alua_update_tpg_primary_metadata() and
+ * core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_METADATA_PATH_LEN				512
+/*
+ * Used by core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_SECONDARY_METADATA_WWN_LEN			256
+
+extern struct kmem_cache *t10_alua_lu_gp_cache;
+extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+extern int core_emulate_report_target_port_groups(struct se_cmd *);
+extern int core_emulate_set_target_port_groups(struct se_cmd *);
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
+				struct se_device *, struct se_port *,
+				struct se_node_acl *, int, int);
+extern char *core_alua_dump_status(int);
+extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
+extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
+extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
+extern void core_alua_free_lu_gp_mem(struct se_device *);
+extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
+extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
+extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void core_alua_drop_lu_gp_dev(struct se_device *);
+extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+			struct se_subsystem_dev *, const char *, int);
+extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
+extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+					struct se_port *);
+extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
+extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
+extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
+					struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
+						size_t);
+extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
+extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+						char *);
+extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
+extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
+					size_t);
+extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
+extern ssize_t core_alua_store_secondary_status(struct se_lun *,
+					const char *, size_t);
+extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
+					char *);
+extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
+					const char *, size_t);
+extern int core_setup_alua(struct se_device *, int);
+
+#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
new file mode 100644
index 0000000..366080b
--- /dev/null
+++ b/drivers/target/target_core_cdb.c
@@ -0,0 +1,1131 @@
+/*
+ * CDB emulation for non-READ/WRITE commands.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include "target_core_ua.h"
+
+static void
+target_fill_alua_data(struct se_port *port, unsigned char *buf)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	/*
+	 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
+	 */
+	buf[5]	= 0x80;
+
+	/*
+	 * Set TPGS field for explict and/or implict ALUA access type
+	 * and opteration.
+	 *
+	 * See spc4r17 section 6.4.2 Table 135
+	 */
+	if (!port)
+		return;
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!tg_pt_gp_mem)
+		return;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (tg_pt_gp)
+		buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+}
+
+static int
+target_emulate_inquiry_std(struct se_cmd *cmd)
+{
+	struct se_lun *lun = SE_LUN(cmd);
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+
+	/*
+	 * Make sure we at least have 6 bytes of INQUIRY response
+	 * payload going back for EVPD=0
+	 */
+	if (cmd->data_length < 6) {
+		printk(KERN_ERR "SCSI Inquiry payload length: %u"
+			" too small for EVPD=0\n", cmd->data_length);
+		return -1;
+	}
+
+	buf[0] = dev->transport->get_device_type(dev);
+	if (buf[0] == TYPE_TAPE)
+		buf[1] = 0x80;
+	buf[2] = dev->transport->get_device_rev(dev);
+
+	/*
+	 * Enable SCCS and TPGS fields for Emulated ALUA
+	 */
+	if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
+		target_fill_alua_data(lun->lun_sep, buf);
+
+	if (cmd->data_length < 8) {
+		buf[4] = 1; /* Set additional length to 1 */
+		return 0;
+	}
+
+	buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
+
+	/*
+	 * Do not include vendor, product, reversion info in INQUIRY
+	 * response payload for cdbs with a small allocation length.
+	 */
+	if (cmd->data_length < 36) {
+		buf[4] = 3; /* Set additional length to 3 */
+		return 0;
+	}
+
+	snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
+	snprintf((unsigned char *)&buf[16], 16, "%s",
+		 &DEV_T10_WWN(dev)->model[0]);
+	snprintf((unsigned char *)&buf[32], 4, "%s",
+		 &DEV_T10_WWN(dev)->revision[0]);
+	buf[4] = 31; /* Set additional length to 31 */
+	return 0;
+}
+
+/* supported vital product data pages */
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+	buf[1] = 0x00;
+	if (cmd->data_length < 8)
+		return 0;
+
+	buf[4] = 0x0;
+	/*
+	 * Only report the INQUIRY EVPD=1 pages after a valid NAA
+	 * Registered Extended LUN WWN has been set via ConfigFS
+	 * during device creation/restart.
+	 */
+	if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		buf[3] = 3;
+		buf[5] = 0x80;
+		buf[6] = 0x83;
+		buf[7] = 0x86;
+	}
+
+	return 0;
+}
+
+/* unit serial number */
+static int
+target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	u16 len = 0;
+
+	buf[1] = 0x80;
+	if (dev->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		u32 unit_serial_len;
+
+		unit_serial_len =
+			strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+		unit_serial_len++; /* For NULL Terminator */
+
+		if (((len + 4) + unit_serial_len) > cmd->data_length) {
+			len += unit_serial_len;
+			buf[2] = ((len >> 8) & 0xff);
+			buf[3] = (len & 0xff);
+			return 0;
+		}
+		len += sprintf((unsigned char *)&buf[4], "%s",
+			&DEV_T10_WWN(dev)->unit_serial[0]);
+		len++; /* Extra Byte for NULL Terminator */
+		buf[3] = len;
+	}
+	return 0;
+}
+
+/*
+ * Device identification VPD, for a complete list of
+ * DESIGNATOR TYPEs see spc4r17 Table 459.
+ */
+static int
+target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_lun *lun = SE_LUN(cmd);
+	struct se_port *port = NULL;
+	struct se_portal_group *tpg = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char binary, binary_new;
+	unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
+	u32 prod_len;
+	u32 unit_serial_len, off = 0;
+	int i;
+	u16 len = 0, id_len;
+
+	buf[1] = 0x83;
+	off = 4;
+
+	/*
+	 * NAA IEEE Registered Extended Assigned designator format, see
+	 * spc4r17 section 7.7.3.6.5
+	 *
+	 * We depend upon a target_core_mod/ConfigFS provided
+	 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
+	 * value in order to return the NAA id.
+	 */
+	if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
+		goto check_t10_vend_desc;
+
+	if (off + 20 > cmd->data_length)
+		goto check_t10_vend_desc;
+
+	/* CODE SET == Binary */
+	buf[off++] = 0x1;
+
+	/* Set ASSOICATION == addressed logical unit: 0)b */
+	buf[off] = 0x00;
+
+	/* Identifier/Designator type == NAA identifier */
+	buf[off++] = 0x3;
+	off++;
+
+	/* Identifier/Designator length */
+	buf[off++] = 0x10;
+
+	/*
+	 * Start NAA IEEE Registered Extended Identifier/Designator
+	 */
+	buf[off++] = (0x6 << 4);
+
+	/*
+	 * Use OpenFabrics IEEE Company ID: 00 14 05
+	 */
+	buf[off++] = 0x01;
+	buf[off++] = 0x40;
+	buf[off] = (0x5 << 4);
+
+	/*
+	 * Return ConfigFS Unit Serial Number information for
+	 * VENDOR_SPECIFIC_IDENTIFIER and
+	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
+	 */
+	binary = transport_asciihex_to_binaryhex(
+				&DEV_T10_WWN(dev)->unit_serial[0]);
+	buf[off++] |= (binary & 0xf0) >> 4;
+	for (i = 0; i < 24; i += 2) {
+		binary_new = transport_asciihex_to_binaryhex(
+			&DEV_T10_WWN(dev)->unit_serial[i+2]);
+		buf[off] = (binary & 0x0f) << 4;
+		buf[off++] |= (binary_new & 0xf0) >> 4;
+		binary = binary_new;
+	}
+	len = 20;
+	off = (len + 4);
+
+check_t10_vend_desc:
+	/*
+	 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
+	 */
+	id_len = 8; /* For Vendor field */
+	prod_len = 4; /* For VPD Header */
+	prod_len += 8; /* For Vendor field */
+	prod_len += strlen(prod);
+	prod_len++; /* For : */
+
+	if (dev->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		unit_serial_len =
+			strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+		unit_serial_len++; /* For NULL Terminator */
+
+		if ((len + (id_len + 4) +
+		    (prod_len + unit_serial_len)) >
+				cmd->data_length) {
+			len += (prod_len + unit_serial_len);
+			goto check_port;
+		}
+		id_len += sprintf((unsigned char *)&buf[off+12],
+				"%s:%s", prod,
+				&DEV_T10_WWN(dev)->unit_serial[0]);
+	}
+	buf[off] = 0x2; /* ASCII */
+	buf[off+1] = 0x1; /* T10 Vendor ID */
+	buf[off+2] = 0x0;
+	memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
+	/* Extra Byte for NULL Terminator */
+	id_len++;
+	/* Identifier Length */
+	buf[off+3] = id_len;
+	/* Header size for Designation descriptor */
+	len += (id_len + 4);
+	off += (id_len + 4);
+	/*
+	 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
+	 */
+check_port:
+	port = lun->lun_sep;
+	if (port) {
+		struct t10_alua_lu_gp *lu_gp;
+		u32 padding, scsi_name_len;
+		u16 lu_gp_id = 0;
+		u16 tg_pt_gp_id = 0;
+		u16 tpgt;
+
+		tpg = port->sep_tpg;
+		/*
+		 * Relative target port identifer, see spc4r17
+		 * section 7.7.3.7
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+		if (((len + 4) + 8) > cmd->data_length) {
+			len += 8;
+			goto check_tpgi;
+		}
+		buf[off] =
+			(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOICATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Relative target port identifer */
+		buf[off++] |= 0x4;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		/* Skip over Obsolete field in RTPI payload
+		 * in Table 472 */
+		off += 2;
+		buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+		buf[off++] = (port->sep_rtpi & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Target port group identifier, see spc4r17
+		 * section 7.7.3.8
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+check_tpgi:
+		if (T10_ALUA(dev->se_sub_dev)->alua_type !=
+				SPC3_ALUA_EMULATED)
+			goto check_scsi_name;
+
+		if (((len + 4) + 8) > cmd->data_length) {
+			len += 8;
+			goto check_lu_gp;
+		}
+		tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+		if (!tg_pt_gp_mem)
+			goto check_lu_gp;
+
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+		if (!(tg_pt_gp)) {
+			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+			goto check_lu_gp;
+		}
+		tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+		buf[off] =
+			(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOICATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Target port group identifier */
+		buf[off++] |= 0x5;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Logical Unit Group identifier, see spc4r17
+		 * section 7.7.3.8
+		 */
+check_lu_gp:
+		if (((len + 4) + 8) > cmd->data_length) {
+			len += 8;
+			goto check_scsi_name;
+		}
+		lu_gp_mem = dev->dev_alua_lu_gp_mem;
+		if (!(lu_gp_mem))
+			goto check_scsi_name;
+
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		lu_gp = lu_gp_mem->lu_gp;
+		if (!(lu_gp)) {
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+			goto check_scsi_name;
+		}
+		lu_gp_id = lu_gp->lu_gp_id;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		/* DESIGNATOR TYPE == Logical Unit Group identifier */
+		buf[off++] |= 0x6;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((lu_gp_id >> 8) & 0xff);
+		buf[off++] = (lu_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * SCSI name string designator, see spc4r17
+		 * section 7.7.3.11
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+check_scsi_name:
+		scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
+		/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
+		scsi_name_len += 10;
+		/* Check for 4-byte padding */
+		padding = ((-scsi_name_len) & 3);
+		if (padding != 0)
+			scsi_name_len += padding;
+		/* Header size + Designation descriptor */
+		scsi_name_len += 4;
+
+		if (((len + 4) + scsi_name_len) > cmd->data_length) {
+			len += scsi_name_len;
+			goto set_len;
+		}
+		buf[off] =
+			(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOICATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == SCSI name string */
+		buf[off++] |= 0x8;
+		off += 2; /* Skip over Reserved and length */
+		/*
+		 * SCSI name string identifer containing, $FABRIC_MOD
+		 * dependent information.  For LIO-Target and iSCSI
+		 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
+		 * UTF-8 encoding.
+		 */
+		tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+		scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
+					TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
+		scsi_name_len += 1 /* Include  NULL terminator */;
+		/*
+		 * The null-terminated, null-padded (see 4.4.2) SCSI
+		 * NAME STRING field contains a UTF-8 format string.
+		 * The number of bytes in the SCSI NAME STRING field
+		 * (i.e., the value in the DESIGNATOR LENGTH field)
+		 * shall be no larger than 256 and shall be a multiple
+		 * of four.
+		 */
+		if (padding)
+			scsi_name_len += padding;
+
+		buf[off-1] = scsi_name_len;
+		off += scsi_name_len;
+		/* Header size + Designation descriptor */
+		len += (scsi_name_len + 4);
+	}
+set_len:
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
+	return 0;
+}
+
+/* Extended INQUIRY Data VPD Page */
+static int
+target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+{
+	if (cmd->data_length < 60)
+		return 0;
+
+	buf[1] = 0x86;
+	buf[2] = 0x3c;
+	/* Set HEADSUP, ORDSUP, SIMPSUP */
+	buf[5] = 0x07;
+
+	/* If WriteCache emulation is enabled, set V_SUP */
+	if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
+		buf[6] = 0x01;
+	return 0;
+}
+
+/* Block Limits VPD page */
+static int
+target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	int have_tp = 0;
+
+	/*
+	 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
+	 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
+	 * different page length for Thin Provisioning.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+		have_tp = 1;
+
+	if (cmd->data_length < (0x10 + 4)) {
+		printk(KERN_INFO "Received data_length: %u"
+			" too small for EVPD 0xb0\n",
+			cmd->data_length);
+		return -1;
+	}
+
+	if (have_tp && cmd->data_length < (0x3c + 4)) {
+		printk(KERN_INFO "Received data_length: %u"
+			" too small for TPE=1 EVPD 0xb0\n",
+			cmd->data_length);
+		have_tp = 0;
+	}
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[1] = 0xb0;
+	buf[3] = have_tp ? 0x3c : 0x10;
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
+	 */
+	put_unaligned_be16(1, &buf[6]);
+
+	/*
+	 * Set MAXIMUM TRANSFER LENGTH
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
+
+	/*
+	 * Exit now if we don't support TP or the initiator sent a too
+	 * short buffer.
+	 */
+	if (!have_tp || cmd->data_length < (0x3c + 4))
+		return 0;
+
+	/*
+	 * Set MAXIMUM UNMAP LBA COUNT
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
+
+	/*
+	 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
+			   &buf[24]);
+
+	/*
+	 * Set OPTIMAL UNMAP GRANULARITY
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
+
+	/*
+	 * UNMAP GRANULARITY ALIGNMENT
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
+			   &buf[32]);
+	if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
+		buf[32] |= 0x80; /* Set the UGAVALID bit */
+
+	return 0;
+}
+
+/* Thin Provisioning VPD */
+static int
+target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+
+	/*
+	 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
+	 *
+	 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
+	 * zero, then the page length shall be set to 0004h.  If the DP bit
+	 * is set to one, then the page length shall be set to the value
+	 * defined in table 162.
+	 */
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[1] = 0xb2;
+
+	/*
+	 * Set Hardcoded length mentioned above for DP=0
+	 */
+	put_unaligned_be16(0x0004, &buf[2]);
+
+	/*
+	 * The THRESHOLD EXPONENT field indicates the threshold set size in
+	 * LBAs as a power of 2 (i.e., the threshold set size is equal to
+	 * 2(threshold exponent)).
+	 *
+	 * Note that this is currently set to 0x00 as mkp says it will be
+	 * changing again.  We can enable this once it has settled in T10
+	 * and is actually used by Linux/SCSI ML code.
+	 */
+	buf[4] = 0x00;
+
+	/*
+	 * A TPU bit set to one indicates that the device server supports
+	 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
+	 * that the device server does not support the UNMAP command.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpu != 0)
+		buf[5] = 0x80;
+
+	/*
+	 * A TPWS bit set to one indicates that the device server supports
+	 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
+	 * A TPWS bit set to zero indicates that the device server does not
+	 * support the use of the WRITE SAME (16) command to unmap LBAs.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpws != 0)
+		buf[5] |= 0x40;
+
+	return 0;
+}
+
+static int
+target_emulate_inquiry(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned char *cdb = cmd->t_task->t_task_cdb;
+
+	if (!(cdb[1] & 0x1))
+		return target_emulate_inquiry_std(cmd);
+
+	/*
+	 * Make sure we at least have 4 bytes of INQUIRY response
+	 * payload for 0x00 going back for EVPD=1.  Note that 0x80
+	 * and 0x83 will check for enough payload data length and
+	 * jump to set_len: label when there is not enough inquiry EVPD
+	 * payload length left for the next outgoing EVPD metadata
+	 */
+	if (cmd->data_length < 4) {
+		printk(KERN_ERR "SCSI Inquiry payload length: %u"
+			" too small for EVPD=1\n", cmd->data_length);
+		return -1;
+	}
+	buf[0] = dev->transport->get_device_type(dev);
+
+	switch (cdb[2]) {
+	case 0x00:
+		return target_emulate_evpd_00(cmd, buf);
+	case 0x80:
+		return target_emulate_evpd_80(cmd, buf);
+	case 0x83:
+		return target_emulate_evpd_83(cmd, buf);
+	case 0x86:
+		return target_emulate_evpd_86(cmd, buf);
+	case 0xb0:
+		return target_emulate_evpd_b0(cmd, buf);
+	case 0xb2:
+		return target_emulate_evpd_b2(cmd, buf);
+	default:
+		printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+target_emulate_readcapacity(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	u32 blocks = dev->transport->get_blocks(dev);
+
+	buf[0] = (blocks >> 24) & 0xff;
+	buf[1] = (blocks >> 16) & 0xff;
+	buf[2] = (blocks >> 8) & 0xff;
+	buf[3] = blocks & 0xff;
+	buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+	buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+	buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+	buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
+	/*
+	 * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
+	*/
+	if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+		put_unaligned_be32(0xFFFFFFFF, &buf[0]);
+
+	return 0;
+}
+
+static int
+target_emulate_readcapacity_16(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned long long blocks = dev->transport->get_blocks(dev);
+
+	buf[0] = (blocks >> 56) & 0xff;
+	buf[1] = (blocks >> 48) & 0xff;
+	buf[2] = (blocks >> 40) & 0xff;
+	buf[3] = (blocks >> 32) & 0xff;
+	buf[4] = (blocks >> 24) & 0xff;
+	buf[5] = (blocks >> 16) & 0xff;
+	buf[6] = (blocks >> 8) & 0xff;
+	buf[7] = blocks & 0xff;
+	buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+	buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+	buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+	buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
+	/*
+	 * Set Thin Provisioning Enable bit following sbc3r22 in section
+	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+		buf[14] = 0x80;
+
+	return 0;
+}
+
+static int
+target_modesense_rwrecovery(unsigned char *p)
+{
+	p[0] = 0x01;
+	p[1] = 0x0a;
+
+	return 12;
+}
+
+static int
+target_modesense_control(struct se_device *dev, unsigned char *p)
+{
+	p[0] = 0x0a;
+	p[1] = 0x0a;
+	p[2] = 2;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
+	 *
+	 * 00b: The logical unit shall clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when a com-
+	 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
+	 * status.
+	 *
+	 * 10b: The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when
+	 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
+	 * CONFLICT status.
+	 *
+	 * 11b a The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall establish a unit attention condition for the
+	 * initiator port associated with the I_T nexus on which the BUSY,
+	 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
+	 * Depending on the status, the additional sense code shall be set to
+	 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
+	 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
+	 * command, a unit attention condition shall be established only once
+	 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
+	 * to the number of commands completed with one of those status codes.
+	 */
+	p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
+	       (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Task Aborted Status (TAS) bit set to zero.
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
+	p[8] = 0xff;
+	p[9] = 0xff;
+	p[11] = 30;
+
+	return 12;
+}
+
+static int
+target_modesense_caching(struct se_device *dev, unsigned char *p)
+{
+	p[0] = 0x08;
+	p[1] = 0x12;
+	if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
+		p[2] = 0x04; /* Write Cache Enable */
+	p[12] = 0x20; /* Disabled Read Ahead */
+
+	return 20;
+}
+
+static void
+target_modesense_write_protect(unsigned char *buf, int type)
+{
+	/*
+	 * I believe that the WP bit (bit 7) in the mode header is the same for
+	 * all device types..
+	 */
+	switch (type) {
+	case TYPE_DISK:
+	case TYPE_TAPE:
+	default:
+		buf[0] |= 0x80; /* WP bit */
+		break;
+	}
+}
+
+static void
+target_modesense_dpofua(unsigned char *buf, int type)
+{
+	switch (type) {
+	case TYPE_DISK:
+		buf[0] |= 0x10; /* DPOFUA bit */
+		break;
+	default:
+		break;
+	}
+}
+
+static int
+target_emulate_modesense(struct se_cmd *cmd, int ten)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	char *cdb = cmd->t_task->t_task_cdb;
+	unsigned char *rbuf = cmd->t_task->t_task_buf;
+	int type = dev->transport->get_device_type(dev);
+	int offset = (ten) ? 8 : 4;
+	int length = 0;
+	unsigned char buf[SE_MODE_PAGE_BUF];
+
+	memset(buf, 0, SE_MODE_PAGE_BUF);
+
+	switch (cdb[2] & 0x3f) {
+	case 0x01:
+		length = target_modesense_rwrecovery(&buf[offset]);
+		break;
+	case 0x08:
+		length = target_modesense_caching(dev, &buf[offset]);
+		break;
+	case 0x0a:
+		length = target_modesense_control(dev, &buf[offset]);
+		break;
+	case 0x3f:
+		length = target_modesense_rwrecovery(&buf[offset]);
+		length += target_modesense_caching(dev, &buf[offset+length]);
+		length += target_modesense_control(dev, &buf[offset+length]);
+		break;
+	default:
+		printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
+				cdb[2] & 0x3f);
+		return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+	}
+	offset += length;
+
+	if (ten) {
+		offset -= 2;
+		buf[0] = (offset >> 8) & 0xff;
+		buf[1] = offset & 0xff;
+
+		if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+		    (cmd->se_deve &&
+		    (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+			target_modesense_write_protect(&buf[3], type);
+
+		if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+		    (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+			target_modesense_dpofua(&buf[3], type);
+
+		if ((offset + 2) > cmd->data_length)
+			offset = cmd->data_length;
+
+	} else {
+		offset -= 1;
+		buf[0] = offset & 0xff;
+
+		if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+		    (cmd->se_deve &&
+		    (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+			target_modesense_write_protect(&buf[2], type);
+
+		if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+		    (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+			target_modesense_dpofua(&buf[2], type);
+
+		if ((offset + 1) > cmd->data_length)
+			offset = cmd->data_length;
+	}
+	memcpy(rbuf, buf, offset);
+
+	return 0;
+}
+
+static int
+target_emulate_request_sense(struct se_cmd *cmd)
+{
+	unsigned char *cdb = cmd->t_task->t_task_cdb;
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	u8 ua_asc = 0, ua_ascq = 0;
+
+	if (cdb[1] & 0x01) {
+		printk(KERN_ERR "REQUEST_SENSE description emulation not"
+			" supported\n");
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+	if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
+		/*
+		 * CURRENT ERROR, UNIT ATTENTION
+		 */
+		buf[0] = 0x70;
+		buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+		/*
+		 * Make sure request data length is enough for additional
+		 * sense data.
+		 */
+		if (cmd->data_length <= 18) {
+			buf[7] = 0x00;
+			return 0;
+		}
+		/*
+		 * The Additional Sense Code (ASC) from the UNIT ATTENTION
+		 */
+		buf[SPC_ASC_KEY_OFFSET] = ua_asc;
+		buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
+		buf[7] = 0x0A;
+	} else {
+		/*
+		 * CURRENT ERROR, NO SENSE
+		 */
+		buf[0] = 0x70;
+		buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
+		/*
+		 * Make sure request data length is enough for additional
+		 * sense data.
+		 */
+		if (cmd->data_length <= 18) {
+			buf[7] = 0x00;
+			return 0;
+		}
+		/*
+		 * NO ADDITIONAL SENSE INFORMATION
+		 */
+		buf[SPC_ASC_KEY_OFFSET] = 0x00;
+		buf[7] = 0x0A;
+	}
+
+	return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_unmap(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
+	unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+	sector_t lba;
+	unsigned int size = cmd->data_length, range;
+	int ret, offset;
+	unsigned short dl, bd_dl;
+
+	/* First UNMAP block descriptor starts at 8 byte offset */
+	offset = 8;
+	size -= 8;
+	dl = get_unaligned_be16(&cdb[0]);
+	bd_dl = get_unaligned_be16(&cdb[2]);
+	ptr = &buf[offset];
+	printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+	while (size) {
+		lba = get_unaligned_be64(&ptr[0]);
+		range = get_unaligned_be32(&ptr[8]);
+		printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
+				 (unsigned long long)lba, range);
+
+		ret = dev->transport->do_discard(dev, lba, range);
+		if (ret < 0) {
+			printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
+					ret);
+			return -1;
+		}
+
+		ptr += 16;
+		size -= 16;
+	}
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_write_same(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = SE_DEV(cmd);
+	sector_t lba = cmd->t_task->t_task_lba;
+	unsigned int range;
+	int ret;
+
+	range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+
+	printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
+			 (unsigned long long)lba, range);
+
+	ret = dev->transport->do_discard(dev, lba, range);
+	if (ret < 0) {
+		printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
+		return -1;
+	}
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+int
+transport_emulate_control_cdb(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned short service_action;
+	int ret = 0;
+
+	switch (cmd->t_task->t_task_cdb[0]) {
+	case INQUIRY:
+		ret = target_emulate_inquiry(cmd);
+		break;
+	case READ_CAPACITY:
+		ret = target_emulate_readcapacity(cmd);
+		break;
+	case MODE_SENSE:
+		ret = target_emulate_modesense(cmd, 0);
+		break;
+	case MODE_SENSE_10:
+		ret = target_emulate_modesense(cmd, 1);
+		break;
+	case SERVICE_ACTION_IN:
+		switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
+		case SAI_READ_CAPACITY_16:
+			ret = target_emulate_readcapacity_16(cmd);
+			break;
+		default:
+			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
+				cmd->t_task->t_task_cdb[1] & 0x1f);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		break;
+	case REQUEST_SENSE:
+		ret = target_emulate_request_sense(cmd);
+		break;
+	case UNMAP:
+		if (!dev->transport->do_discard) {
+			printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
+					dev->transport->name);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		ret = target_emulate_unmap(task);
+		break;
+	case WRITE_SAME_16:
+		if (!dev->transport->do_discard) {
+			printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
+					" for: %s\n", dev->transport->name);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		ret = target_emulate_write_same(task);
+		break;
+	case VARIABLE_LENGTH_CMD:
+		service_action =
+			get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
+		switch (service_action) {
+		case WRITE_SAME_32:
+			if (!dev->transport->do_discard) {
+				printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
+					" supported for: %s\n",
+					dev->transport->name);
+				return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+			}
+			ret = target_emulate_write_same(task);
+			break;
+		default:
+			printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
+					" 0x%02x\n", service_action);
+			break;
+		}
+		break;
+	case SYNCHRONIZE_CACHE:
+	case 0x91: /* SYNCHRONIZE_CACHE_16: */
+		if (!dev->transport->do_sync_cache) {
+			printk(KERN_ERR
+				"SYNCHRONIZE_CACHE emulation not supported"
+				" for: %s\n", dev->transport->name);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		dev->transport->do_sync_cache(task);
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case ERASE:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case SPACE:
+	case START_STOP:
+	case TEST_UNIT_READY:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+		break;
+	default:
+		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
+			cmd->t_task->t_task_cdb[0], dev->transport->name);
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+
+	if (ret < 0)
+		return ret;
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
new file mode 100644
index 0000000..caf8dc18
--- /dev/null
+++ b/drivers/target/target_core_configfs.c
@@ -0,0 +1,3240 @@
+/*******************************************************************************
+ * Filename:  target_core_configfs.c
+ *
+ * This file contains ConfigFS logic for the Generic Target Engine project.
+ *
+ * Copyright (c) 2008-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * based on configfs Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_rd.h"
+
+static struct list_head g_tf_list;
+static struct mutex g_tf_lock;
+
+struct target_core_configfs_attribute {
+	struct configfs_attribute attr;
+	ssize_t (*show)(void *, char *);
+	ssize_t (*store)(void *, const char *, size_t);
+};
+
+static inline struct se_hba *
+item_to_hba(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_hba, hba_group);
+}
+
+/*
+ * Attributes for /sys/kernel/config/target/
+ */
+static ssize_t target_core_attr_show(struct config_item *item,
+				      struct configfs_attribute *attr,
+				      char *page)
+{
+	return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
+		" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
+		utsname()->sysname, utsname()->machine);
+}
+
+static struct configfs_item_operations target_core_fabric_item_ops = {
+	.show_attribute = target_core_attr_show,
+};
+
+static struct configfs_attribute target_core_item_attr_version = {
+	.ca_owner	= THIS_MODULE,
+	.ca_name	= "version",
+	.ca_mode	= S_IRUGO,
+};
+
+static struct target_fabric_configfs *target_core_get_fabric(
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+
+	if (!(name))
+		return NULL;
+
+	mutex_lock(&g_tf_lock);
+	list_for_each_entry(tf, &g_tf_list, tf_list) {
+		if (!(strcmp(tf->tf_name, name))) {
+			atomic_inc(&tf->tf_access_cnt);
+			mutex_unlock(&g_tf_lock);
+			return tf;
+		}
+	}
+	mutex_unlock(&g_tf_lock);
+
+	return NULL;
+}
+
+/*
+ * Called from struct target_core_group_ops->make_group()
+ */
+static struct config_group *target_core_register_fabric(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
+			" %s\n", group, name);
+	/*
+	 * Ensure that TCM subsystem plugins are loaded at this point for
+	 * using the RAMDISK_DR virtual LUN 0 and all other struct se_port
+	 * LUN symlinks.
+	 */
+	if (transport_subsystem_check_init() < 0)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Below are some hardcoded request_module() calls to automatically
+	 * local fabric modules when the following is called:
+	 *
+	 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
+	 *
+	 * Note that this does not limit which TCM fabric module can be
+	 * registered, but simply provids auto loading logic for modules with
+	 * mkdir(2) system calls with known TCM fabric modules.
+	 */
+	if (!(strncmp(name, "iscsi", 5))) {
+		/*
+		 * Automatically load the LIO Target fabric module when the
+		 * following is called:
+		 *
+		 * mkdir -p $CONFIGFS/target/iscsi
+		 */
+		ret = request_module("iscsi_target_mod");
+		if (ret < 0) {
+			printk(KERN_ERR "request_module() failed for"
+				" iscsi_target_mod.ko: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	} else if (!(strncmp(name, "loopback", 8))) {
+		/*
+		 * Automatically load the tcm_loop fabric module when the
+		 * following is called:
+		 *
+		 * mkdir -p $CONFIGFS/target/loopback
+		 */
+		ret = request_module("tcm_loop");
+		if (ret < 0) {
+			printk(KERN_ERR "request_module() failed for"
+				" tcm_loop.ko: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	tf = target_core_get_fabric(name);
+	if (!(tf)) {
+		printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
+			name);
+		return ERR_PTR(-EINVAL);
+	}
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
+			" %s\n", tf->tf_name);
+	/*
+	 * On a successful target_core_get_fabric() look, the returned
+	 * struct target_fabric_configfs *tf will contain a usage reference.
+	 */
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+			&TF_CIT_TMPL(tf)->tfc_wwn_cit);
+
+	tf->tf_group.default_groups = tf->tf_default_groups;
+	tf->tf_group.default_groups[0] = &tf->tf_disc_group;
+	tf->tf_group.default_groups[1] = NULL;
+
+	config_group_init_type_name(&tf->tf_group, name,
+			&TF_CIT_TMPL(tf)->tfc_wwn_cit);
+	config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
+			&TF_CIT_TMPL(tf)->tfc_discovery_cit);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+			" %s\n", tf->tf_group.cg_item.ci_name);
+	/*
+	 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
+	 */
+	tf->tf_ops.tf_subsys = tf->tf_subsys;
+	tf->tf_fabric = &tf->tf_group.cg_item;
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+			" for %s\n", name);
+
+	return &tf->tf_group;
+}
+
+/*
+ * Called from struct target_core_group_ops->drop_item()
+ */
+static void target_core_deregister_fabric(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct target_fabric_configfs *tf = container_of(
+		to_config_group(item), struct target_fabric_configfs, tf_group);
+	struct config_group *tf_group;
+	struct config_item *df_item;
+	int i;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+		" tf list\n", config_item_name(item));
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+			" %s\n", tf->tf_name);
+	atomic_dec(&tf->tf_access_cnt);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
+			" tf->tf_fabric for %s\n", tf->tf_name);
+	tf->tf_fabric = NULL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+			" %s\n", config_item_name(item));
+
+	tf_group = &tf->tf_group;
+	for (i = 0; tf_group->default_groups[i]; i++) {
+		df_item = &tf_group->default_groups[i]->cg_item;
+		tf_group->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_fabric_group_ops = {
+	.make_group	= &target_core_register_fabric,
+	.drop_item	= &target_core_deregister_fabric,
+};
+
+/*
+ * All item attributes appearing in /sys/kernel/target/ appear here.
+ */
+static struct configfs_attribute *target_core_fabric_item_attrs[] = {
+	&target_core_item_attr_version,
+	NULL,
+};
+
+/*
+ * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
+ */
+static struct config_item_type target_core_fabrics_item = {
+	.ct_item_ops	= &target_core_fabric_item_ops,
+	.ct_group_ops	= &target_core_fabric_group_ops,
+	.ct_attrs	= target_core_fabric_item_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct configfs_subsystem target_core_fabrics = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf = "target",
+			.ci_type = &target_core_fabrics_item,
+		},
+	},
+};
+
+static struct configfs_subsystem *target_core_subsystem[] = {
+	&target_core_fabrics,
+	NULL,
+};
+
+/*##############################################################################
+// Start functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/*
+ * First function called by fabric modules to:
+ *
+ * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
+ * 2) Add struct target_fabric_configfs to g_tf_list
+ * 3) Return struct target_fabric_configfs to fabric module to be passed
+ *    into target_fabric_configfs_register().
+ */
+struct target_fabric_configfs *target_fabric_configfs_init(
+	struct module *fabric_mod,
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+
+	if (!(fabric_mod)) {
+		printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
+		return NULL;
+	}
+	if (!(name)) {
+		printk(KERN_ERR "Unable to locate passed fabric name\n");
+		return NULL;
+	}
+	if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
+		printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
+			"_NAME_SIZE\n", name);
+		return NULL;
+	}
+
+	tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
+	if (!(tf))
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&tf->tf_list);
+	atomic_set(&tf->tf_access_cnt, 0);
+	/*
+	 * Setup the default generic struct config_item_type's (cits) in
+	 * struct target_fabric_configfs->tf_cit_tmpl
+	 */
+	tf->tf_module = fabric_mod;
+	target_fabric_setup_cits(tf);
+
+	tf->tf_subsys = target_core_subsystem[0];
+	snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
+
+	mutex_lock(&g_tf_lock);
+	list_add_tail(&tf->tf_list, &g_tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+			">>>>>>>>>>>>>>\n");
+	printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
+			" %s\n", tf, tf->tf_name);
+	return tf;
+}
+EXPORT_SYMBOL(target_fabric_configfs_init);
+
+/*
+ * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
+ */
+void target_fabric_configfs_free(
+	struct target_fabric_configfs *tf)
+{
+	mutex_lock(&g_tf_lock);
+	list_del(&tf->tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	kfree(tf);
+}
+EXPORT_SYMBOL(target_fabric_configfs_free);
+
+/*
+ * Perform a sanity check of the passed tf->tf_ops before completing
+ * TCM fabric module registration.
+ */
+static int target_fabric_tf_ops_check(
+	struct target_fabric_configfs *tf)
+{
+	struct target_core_fabric_ops *tfo = &tf->tf_ops;
+
+	if (!(tfo->get_fabric_name)) {
+		printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_fabric_proto_ident)) {
+		printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_wwn)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_tag)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_default_depth)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_pr_transport_id)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_pr_transport_id_len)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_demo_mode)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_demo_mode_cache)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_demo_mode_write_protect)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_prod_mode_write_protect)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_alloc_fabric_acl)) {
+		printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_release_fabric_acl)) {
+		printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_inst_index)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->release_cmd_to_pool)) {
+		printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->release_cmd_direct)) {
+		printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->shutdown_session)) {
+		printk(KERN_ERR "Missing tfo->shutdown_session()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->close_session)) {
+		printk(KERN_ERR "Missing tfo->close_session()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->stop_session)) {
+		printk(KERN_ERR "Missing tfo->stop_session()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fall_back_to_erl0)) {
+		printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->sess_logged_in)) {
+		printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->sess_get_index)) {
+		printk(KERN_ERR "Missing tfo->sess_get_index()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->write_pending)) {
+		printk(KERN_ERR "Missing tfo->write_pending()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->write_pending_status)) {
+		printk(KERN_ERR "Missing tfo->write_pending_status()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->set_default_node_attributes)) {
+		printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_task_tag)) {
+		printk(KERN_ERR "Missing tfo->get_task_tag()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_cmd_state)) {
+		printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->new_cmd_failure)) {
+		printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->queue_data_in)) {
+		printk(KERN_ERR "Missing tfo->queue_data_in()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->queue_status)) {
+		printk(KERN_ERR "Missing tfo->queue_status()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->queue_tm_rsp)) {
+		printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->set_fabric_sense_len)) {
+		printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_fabric_sense_len)) {
+		printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->is_state_remove)) {
+		printk(KERN_ERR "Missing tfo->is_state_remove()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->pack_lun)) {
+		printk(KERN_ERR "Missing tfo->pack_lun()\n");
+		return -EINVAL;
+	}
+	/*
+	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
+	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+	 * target_core_fabric_configfs.c WWN+TPG group context code.
+	 */
+	if (!(tfo->fabric_make_wwn)) {
+		printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fabric_drop_wwn)) {
+		printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fabric_make_tpg)) {
+		printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fabric_drop_tpg)) {
+		printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Called 2nd from fabric module with returned parameter of
+ * struct target_fabric_configfs * from target_fabric_configfs_init().
+ *
+ * Upon a successful registration, the new fabric's struct config_item is
+ * return.  Also, a pointer to this struct is set in the passed
+ * struct target_fabric_configfs.
+ */
+int target_fabric_configfs_register(
+	struct target_fabric_configfs *tf)
+{
+	struct config_group *su_group;
+	int ret;
+
+	if (!(tf)) {
+		printk(KERN_ERR "Unable to locate target_fabric_configfs"
+			" pointer\n");
+		return -EINVAL;
+	}
+	if (!(tf->tf_subsys)) {
+		printk(KERN_ERR "Unable to target struct config_subsystem"
+			" pointer\n");
+		return -EINVAL;
+	}
+	su_group = &tf->tf_subsys->su_group;
+	if (!(su_group)) {
+		printk(KERN_ERR "Unable to locate target struct config_group"
+			" pointer\n");
+		return -EINVAL;
+	}
+	ret = target_fabric_tf_ops_check(tf);
+	if (ret < 0)
+		return ret;
+
+	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+		">>>>>>>>>>\n");
+	return 0;
+}
+EXPORT_SYMBOL(target_fabric_configfs_register);
+
+void target_fabric_configfs_deregister(
+	struct target_fabric_configfs *tf)
+{
+	struct config_group *su_group;
+	struct configfs_subsystem *su;
+
+	if (!(tf)) {
+		printk(KERN_ERR "Unable to locate passed target_fabric_"
+			"configfs\n");
+		return;
+	}
+	su = tf->tf_subsys;
+	if (!(su)) {
+		printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
+			" pointer\n");
+		return;
+	}
+	su_group = &tf->tf_subsys->su_group;
+	if (!(su_group)) {
+		printk(KERN_ERR "Unable to locate target struct config_group"
+			" pointer\n");
+		return;
+	}
+
+	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+			">>>>>>>>>>>>\n");
+	mutex_lock(&g_tf_lock);
+	if (atomic_read(&tf->tf_access_cnt)) {
+		mutex_unlock(&g_tf_lock);
+		printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
+			tf->tf_name);
+		BUG();
+	}
+	list_del(&tf->tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+			" %s\n", tf->tf_name);
+	tf->tf_module = NULL;
+	tf->tf_subsys = NULL;
+	kfree(tf);
+
+	printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+			">>>>>\n");
+	return;
+}
+EXPORT_SYMBOL(target_fabric_configfs_deregister);
+
+/*##############################################################################
+// Stop functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/* Start functions for struct config_item_type target_core_dev_attrib_cit */
+
+#define DEF_DEV_ATTRIB_SHOW(_name)					\
+static ssize_t target_core_dev_show_attr_##_name(			\
+	struct se_dev_attrib *da,					\
+	char *page)							\
+{									\
+	struct se_device *dev;						\
+	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
+	ssize_t rb;							\
+									\
+	spin_lock(&se_dev->se_dev_lock);				\
+	dev = se_dev->se_dev_ptr;					\
+	if (!(dev)) {							\
+		spin_unlock(&se_dev->se_dev_lock); 			\
+		return -ENODEV;						\
+	}								\
+	rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
+	spin_unlock(&se_dev->se_dev_lock);				\
+									\
+	return rb;							\
+}
+
+#define DEF_DEV_ATTRIB_STORE(_name)					\
+static ssize_t target_core_dev_store_attr_##_name(			\
+	struct se_dev_attrib *da,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct se_device *dev;						\
+	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
+	unsigned long val;						\
+	int ret;							\
+									\
+	spin_lock(&se_dev->se_dev_lock);				\
+	dev = se_dev->se_dev_ptr;					\
+	if (!(dev)) {							\
+		spin_unlock(&se_dev->se_dev_lock);			\
+		return -ENODEV;						\
+	}								\
+	ret = strict_strtoul(page, 0, &val);				\
+	if (ret < 0) {							\
+		spin_unlock(&se_dev->se_dev_lock);                      \
+		printk(KERN_ERR "strict_strtoul() failed with"		\
+			" ret: %d\n", ret);				\
+		return -EINVAL;						\
+	}								\
+	ret = se_dev_set_##_name(dev, (u32)val);			\
+	spin_unlock(&se_dev->se_dev_lock);				\
+									\
+	return (!ret) ? count : -EINVAL;				\
+}
+
+#define DEF_DEV_ATTRIB(_name)						\
+DEF_DEV_ATTRIB_SHOW(_name);						\
+DEF_DEV_ATTRIB_STORE(_name);
+
+#define DEF_DEV_ATTRIB_RO(_name)					\
+DEF_DEV_ATTRIB_SHOW(_name);
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
+#define SE_DEV_ATTR(_name, _mode)					\
+static struct target_core_dev_attrib_attribute				\
+			target_core_dev_attrib_##_name =		\
+		__CONFIGFS_EATTR(_name, _mode,				\
+		target_core_dev_show_attr_##_name,			\
+		target_core_dev_store_attr_##_name);
+
+#define SE_DEV_ATTR_RO(_name);						\
+static struct target_core_dev_attrib_attribute				\
+			target_core_dev_attrib_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_dev_show_attr_##_name);
+
+DEF_DEV_ATTRIB(emulate_dpo);
+SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_write);
+SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_read);
+SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_write_cache);
+SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
+SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tas);
+SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpu);
+SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpws);
+SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(enforce_pr_isids);
+SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_block_size);
+SE_DEV_ATTR_RO(hw_block_size);
+
+DEF_DEV_ATTRIB(block_size);
+SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_max_sectors);
+SE_DEV_ATTR_RO(hw_max_sectors);
+
+DEF_DEV_ATTRIB(max_sectors);
+SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(optimal_sectors);
+SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_queue_depth);
+SE_DEV_ATTR_RO(hw_queue_depth);
+
+DEF_DEV_ATTRIB(queue_depth);
+SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(task_timeout);
+SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_lba_count);
+SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_block_desc_count);
+SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity);
+SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity_alignment);
+SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
+
+static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
+	&target_core_dev_attrib_emulate_dpo.attr,
+	&target_core_dev_attrib_emulate_fua_write.attr,
+	&target_core_dev_attrib_emulate_fua_read.attr,
+	&target_core_dev_attrib_emulate_write_cache.attr,
+	&target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
+	&target_core_dev_attrib_emulate_tas.attr,
+	&target_core_dev_attrib_emulate_tpu.attr,
+	&target_core_dev_attrib_emulate_tpws.attr,
+	&target_core_dev_attrib_enforce_pr_isids.attr,
+	&target_core_dev_attrib_hw_block_size.attr,
+	&target_core_dev_attrib_block_size.attr,
+	&target_core_dev_attrib_hw_max_sectors.attr,
+	&target_core_dev_attrib_max_sectors.attr,
+	&target_core_dev_attrib_optimal_sectors.attr,
+	&target_core_dev_attrib_hw_queue_depth.attr,
+	&target_core_dev_attrib_queue_depth.attr,
+	&target_core_dev_attrib_task_timeout.attr,
+	&target_core_dev_attrib_max_unmap_lba_count.attr,
+	&target_core_dev_attrib_max_unmap_block_desc_count.attr,
+	&target_core_dev_attrib_unmap_granularity.attr,
+	&target_core_dev_attrib_unmap_granularity_alignment.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_attrib_ops = {
+	.show_attribute		= target_core_dev_attrib_attr_show,
+	.store_attribute	= target_core_dev_attrib_attr_store,
+};
+
+static struct config_item_type target_core_dev_attrib_cit = {
+	.ct_item_ops		= &target_core_dev_attrib_ops,
+	.ct_attrs		= target_core_dev_attrib_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_attrib_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_wwn_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
+#define SE_DEV_WWN_ATTR(_name, _mode)					\
+static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
+		__CONFIGFS_EATTR(_name, _mode,				\
+		target_core_dev_wwn_show_attr_##_name,			\
+		target_core_dev_wwn_store_attr_##_name);
+
+#define SE_DEV_WWN_ATTR_RO(_name);					\
+do {									\
+	static struct target_core_dev_wwn_attribute			\
+			target_core_dev_wwn_##_name =			\
+		__CONFIGFS_EATTR_RO(_name,				\
+		target_core_dev_wwn_show_attr_##_name);			\
+} while (0);
+
+/*
+ * VPD page 0x80 Unit serial
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
+	struct t10_wwn *t10_wwn,
+	char *page)
+{
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+
+	dev = se_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
+		&t10_wwn->unit_serial[0]);
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+	unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+
+	/*
+	 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
+	 * from the struct scsi_device level firmware, do not allow
+	 * VPD Unit Serial to be emulated.
+	 *
+	 * Note this struct scsi_device could also be emulating VPD
+	 * information from its drivers/scsi LLD.  But for now we assume
+	 * it is doing 'the right thing' wrt a world wide unique
+	 * VPD Unit Serial Number that OS dependent multipath can depend on.
+	 */
+	if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+		printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
+			" Unit Serial, ignoring request\n");
+		return -EOPNOTSUPP;
+	}
+
+	if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
+		printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
+		" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
+		return -EOVERFLOW;
+	}
+	/*
+	 * Check to see if any active $FABRIC_MOD exports exist.  If they
+	 * do exist, fail here as changing this information on the fly
+	 * (underneath the initiator side OS dependent multipath code)
+	 * could cause negative effects.
+	 */
+	dev = su_dev->se_dev_ptr;
+	if ((dev)) {
+		if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+			printk(KERN_ERR "Unable to set VPD Unit Serial while"
+				" active %d $FABRIC_MOD exports exist\n",
+				atomic_read(&dev->dev_export_obj.obj_access_count));
+			return -EINVAL;
+		}
+	}
+	/*
+	 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
+	 *
+	 * Also, strip any newline added from the userspace
+	 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
+	 */
+	memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
+	snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
+	snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+			"%s", strstrip(buf));
+	su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+			" %s\n", su_dev->t10_wwn.unit_serial);
+
+	return count;
+}
+
+SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Protocol Identifier
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
+	struct t10_wwn *t10_wwn,
+	char *page)
+{
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+	struct t10_vpd *vpd;
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	ssize_t len = 0;
+
+	dev = se_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	spin_lock(&t10_wwn->t10_vpd_lock);
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
+		if (!(vpd->protocol_identifier_set))
+			continue;
+
+		transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
+
+		if ((len + strlen(buf) > PAGE_SIZE))
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+	}
+	spin_unlock(&t10_wwn->t10_vpd_lock);
+
+	return len;
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
+
+/*
+ * Generic wrapper for dumping VPD identifiers by association.
+ */
+#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc)				\
+static ssize_t target_core_dev_wwn_show_attr_##_name(			\
+	struct t10_wwn *t10_wwn,					\
+	char *page)							\
+{									\
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;		\
+	struct se_device *dev;						\
+	struct t10_vpd *vpd;							\
+	unsigned char buf[VPD_TMP_BUF_SIZE];				\
+	ssize_t len = 0;						\
+									\
+	dev = se_dev->se_dev_ptr;					\
+	if (!(dev))							\
+		return -ENODEV;						\
+									\
+	spin_lock(&t10_wwn->t10_vpd_lock);				\
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {	\
+		if (vpd->association != _assoc)				\
+			continue;					\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);	\
+		if ((len + strlen(buf) > PAGE_SIZE))			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if ((len + strlen(buf) > PAGE_SIZE))			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if ((len + strlen(buf) > PAGE_SIZE))			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+	}								\
+	spin_unlock(&t10_wwn->t10_vpd_lock);				\
+									\
+	return len;							\
+}
+
+/*
+ * VPD page 0x83 Assoication: Logical Unit
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: Target Port
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: SCSI Target Device
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
+
+static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
+	&target_core_dev_wwn_vpd_unit_serial.attr,
+	&target_core_dev_wwn_vpd_protocol_identifier.attr,
+	&target_core_dev_wwn_vpd_assoc_logical_unit.attr,
+	&target_core_dev_wwn_vpd_assoc_target_port.attr,
+	&target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_wwn_ops = {
+	.show_attribute		= target_core_dev_wwn_attr_show,
+	.store_attribute	= target_core_dev_wwn_attr_store,
+};
+
+static struct config_item_type target_core_dev_wwn_cit = {
+	.ct_item_ops		= &target_core_dev_wwn_ops,
+	.ct_attrs		= target_core_dev_wwn_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*  End functions for struct config_item_type target_core_dev_wwn_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_pr_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+#define SE_DEV_PR_ATTR(_name, _mode)					\
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_dev_pr_show_attr_##_name,				\
+	target_core_dev_pr_store_attr_##_name);
+
+#define SE_DEV_PR_ATTR_RO(_name);					\
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name =	\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_dev_pr_show_attr_##_name);
+
+/*
+ * res_holder
+ */
+static ssize_t target_core_dev_pr_show_spc3_res(
+	struct se_device *dev,
+	char *page,
+	ssize_t *len)
+{
+	struct se_node_acl *se_nacl;
+	struct t10_pr_registration *pr_reg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		*len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return *len;
+	}
+	se_nacl = pr_reg->pr_reg_nacl;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	*len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+		TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+		se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return *len;
+}
+
+static ssize_t target_core_dev_pr_show_spc2_res(
+	struct se_device *dev,
+	char *page,
+	ssize_t *len)
+{
+	struct se_node_acl *se_nacl;
+
+	spin_lock(&dev->dev_reservation_lock);
+	se_nacl = dev->dev_reserved_node_acl;
+	if (!(se_nacl)) {
+		*len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return *len;
+	}
+	*len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
+		TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+		se_nacl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return *len;
+}
+
+static ssize_t target_core_dev_pr_show_attr_res_holder(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	ssize_t len = 0;
+
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	switch (T10_RES(su_dev)->res_type) {
+	case SPC3_PERSISTENT_RESERVATIONS:
+		target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
+				page, &len);
+		break;
+	case SPC2_RESERVATIONS:
+		target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
+				page, &len);
+		break;
+	case SPC_PASSTHROUGH:
+		len += sprintf(page+len, "Passthrough\n");
+		break;
+	default:
+		len += sprintf(page+len, "Unknown\n");
+		break;
+	}
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_holder);
+
+/*
+ * res_pr_all_tgt_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct t10_pr_registration *pr_reg;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	/*
+	 * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
+	 * Basic PERSISTENT RESERVER OUT parameter list, page 290
+	 */
+	if (pr_reg->pr_reg_all_tg_pt)
+		len = sprintf(page, "SPC-3 Reservation: All Target"
+			" Ports registration\n");
+	else
+		len = sprintf(page, "SPC-3 Reservation: Single"
+			" Target Port registration\n");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
+
+/*
+ * res_pr_generation
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_generation);
+
+/*
+ * res_pr_holder_tg_port
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct se_node_acl *se_nacl;
+	struct se_lun *lun;
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg;
+	struct target_core_fabric_ops *tfo;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	se_nacl = pr_reg->pr_reg_nacl;
+	se_tpg = se_nacl->se_tpg;
+	lun = pr_reg->pr_reg_tg_pt_lun;
+	tfo = TPG_TFO(se_tpg);
+
+	len += sprintf(page+len, "SPC-3 Reservation: %s"
+		" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
+		tfo->tpg_get_wwn(se_tpg));
+	len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
+		" Identifer Tag: %hu %s Portal Group Tag: %hu"
+		" %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
+		tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
+		tfo->get_fabric_name(), lun->unpacked_lun);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
+
+/*
+ * res_pr_registered_i_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct target_core_fabric_ops *tfo;
+	struct t10_pr_registration *pr_reg;
+	unsigned char buf[384];
+	char i_buf[PR_REG_ISID_ID_LEN];
+	ssize_t len = 0;
+	int reg_count = 0, prf_isid;
+
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	len += sprintf(page+len, "SPC-3 PR Registrations:\n");
+
+	spin_lock(&T10_RES(su_dev)->registration_lock);
+	list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+			pr_reg_list) {
+
+		memset(buf, 0, 384);
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+		prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+					PR_REG_ISID_ID_LEN);
+		sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
+			tfo->get_fabric_name(),
+			pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
+			&i_buf[0] : "", pr_reg->pr_res_key,
+			pr_reg->pr_res_generation);
+
+		if ((len + strlen(buf) > PAGE_SIZE))
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+		reg_count++;
+	}
+	spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+	if (!(reg_count))
+		len += sprintf(page+len, "None\n");
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
+
+/*
+ * res_pr_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_type(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct t10_pr_registration *pr_reg;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_type);
+
+/*
+ * res_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_type(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	ssize_t len = 0;
+
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	switch (T10_RES(su_dev)->res_type) {
+	case SPC3_PERSISTENT_RESERVATIONS:
+		len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
+		break;
+	case SPC2_RESERVATIONS:
+		len = sprintf(page, "SPC2_RESERVATIONS\n");
+		break;
+	case SPC_PASSTHROUGH:
+		len = sprintf(page, "SPC_PASSTHROUGH\n");
+		break;
+	default:
+		len = sprintf(page, "UNKNOWN\n");
+		break;
+	}
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_type);
+
+/*
+ * res_aptpl_active
+ */
+
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "APTPL Bit Status: %s\n",
+		(T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
+}
+
+SE_DEV_PR_ATTR_RO(res_aptpl_active);
+
+/*
+ * res_aptpl_metadata
+ */
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "Ready to process PR APTPL metadata..\n");
+}
+
+enum {
+	Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
+	Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
+	Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
+	Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_initiator_fabric, "initiator_fabric=%s"},
+	{Opt_initiator_node, "initiator_node=%s"},
+	{Opt_initiator_sid, "initiator_sid=%s"},
+	{Opt_sa_res_key, "sa_res_key=%s"},
+	{Opt_res_holder, "res_holder=%d"},
+	{Opt_res_type, "res_type=%d"},
+	{Opt_res_scope, "res_scope=%d"},
+	{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
+	{Opt_mapped_lun, "mapped_lun=%d"},
+	{Opt_target_fabric, "target_fabric=%s"},
+	{Opt_target_node, "target_node=%s"},
+	{Opt_tpgt, "tpgt=%d"},
+	{Opt_port_rtpi, "port_rtpi=%d"},
+	{Opt_target_lun, "target_lun=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
+	struct se_subsystem_dev *su_dev,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev;
+	unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL;
+	unsigned char *isid = NULL;
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	unsigned long long tmp_ll;
+	u64 sa_res_key = 0;
+	u32 mapped_lun = 0, target_lun = 0;
+	int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
+	u16 port_rpti = 0, tpgt = 0;
+	u8 type = 0, scope;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_INFO "Unable to process APTPL metadata while"
+			" active fabric exports exist\n");
+		return -EINVAL;
+	}
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_initiator_fabric:
+			i_fabric = match_strdup(&args[0]);
+			break;
+		case Opt_initiator_node:
+			i_port = match_strdup(&args[0]);
+			if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
+				printk(KERN_ERR "APTPL metadata initiator_node="
+					" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
+					PR_APTPL_MAX_IPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_initiator_sid:
+			isid = match_strdup(&args[0]);
+			if (strlen(isid) > PR_REG_ISID_LEN) {
+				printk(KERN_ERR "APTPL metadata initiator_isid"
+					"= exceeds PR_REG_ISID_LEN: %d\n",
+					PR_REG_ISID_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_sa_res_key:
+			arg_p = match_strdup(&args[0]);
+			ret = strict_strtoull(arg_p, 0, &tmp_ll);
+			if (ret < 0) {
+				printk(KERN_ERR "strict_strtoull() failed for"
+					" sa_res_key=\n");
+				goto out;
+			}
+			sa_res_key = (u64)tmp_ll;
+			break;
+		/*
+		 * PR APTPL Metadata for Reservation
+		 */
+		case Opt_res_holder:
+			match_int(args, &arg);
+			res_holder = arg;
+			break;
+		case Opt_res_type:
+			match_int(args, &arg);
+			type = (u8)arg;
+			break;
+		case Opt_res_scope:
+			match_int(args, &arg);
+			scope = (u8)arg;
+			break;
+		case Opt_res_all_tg_pt:
+			match_int(args, &arg);
+			all_tg_pt = (int)arg;
+			break;
+		case Opt_mapped_lun:
+			match_int(args, &arg);
+			mapped_lun = (u32)arg;
+			break;
+		/*
+		 * PR APTPL Metadata for Target Port
+		 */
+		case Opt_target_fabric:
+			t_fabric = match_strdup(&args[0]);
+			break;
+		case Opt_target_node:
+			t_port = match_strdup(&args[0]);
+			if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
+				printk(KERN_ERR "APTPL metadata target_node="
+					" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
+					PR_APTPL_MAX_TPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_tpgt:
+			match_int(args, &arg);
+			tpgt = (u16)arg;
+			break;
+		case Opt_port_rtpi:
+			match_int(args, &arg);
+			port_rpti = (u16)arg;
+			break;
+		case Opt_target_lun:
+			match_int(args, &arg);
+			target_lun = (u32)arg;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!(i_port) || !(t_port) || !(sa_res_key)) {
+		printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (res_holder && !(type)) {
+		printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
+				" holder\n", type);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
+			i_port, isid, mapped_lun, t_port, tpgt, target_lun,
+			res_holder, all_tg_pt, type);
+out:
+	kfree(orig);
+	return (ret == 0) ? count : ret;
+}
+
+SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+
+static struct configfs_attribute *target_core_dev_pr_attrs[] = {
+	&target_core_dev_pr_res_holder.attr,
+	&target_core_dev_pr_res_pr_all_tgt_pts.attr,
+	&target_core_dev_pr_res_pr_generation.attr,
+	&target_core_dev_pr_res_pr_holder_tg_port.attr,
+	&target_core_dev_pr_res_pr_registered_i_pts.attr,
+	&target_core_dev_pr_res_pr_type.attr,
+	&target_core_dev_pr_res_type.attr,
+	&target_core_dev_pr_res_aptpl_active.attr,
+	&target_core_dev_pr_res_aptpl_metadata.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_pr_ops = {
+	.show_attribute		= target_core_dev_pr_attr_show,
+	.store_attribute	= target_core_dev_pr_attr_store,
+};
+
+static struct config_item_type target_core_dev_pr_cit = {
+	.ct_item_ops		= &target_core_dev_pr_ops,
+	.ct_attrs		= target_core_dev_pr_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*  End functions for struct config_item_type target_core_dev_pr_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_cit */
+
+static ssize_t target_core_show_dev_info(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+	int bl = 0;
+	ssize_t read_bytes = 0;
+
+	if (!(se_dev->se_dev_ptr))
+		return -ENODEV;
+
+	transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+	read_bytes += bl;
+	read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_info = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "info",
+		    .ca_mode = S_IRUGO },
+	.show	= target_core_show_dev_info,
+	.store	= NULL,
+};
+
+static ssize_t target_core_store_dev_control(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+
+	if (!(se_dev->se_dev_su_ptr)) {
+		printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
+				"_dev_su_ptr\n");
+		return -EINVAL;
+	}
+
+	return t->set_configfs_dev_params(hba, se_dev, page, count);
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_control = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "control",
+		    .ca_mode = S_IWUSR },
+	.show	= NULL,
+	.store	= target_core_store_dev_control,
+};
+
+static ssize_t target_core_show_dev_alias(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+	if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+}
+
+static ssize_t target_core_store_dev_alias(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_DEV_ALIAS_LEN-1)) {
+		printk(KERN_ERR "alias count: %d exceeds"
+			" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
+			SE_DEV_ALIAS_LEN-1);
+		return -EINVAL;
+	}
+
+	se_dev->su_dev_flags |= SDF_USING_ALIAS;
+	read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
+			"%s", page);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&se_dev->se_dev_group.cg_item),
+		se_dev->se_dev_alias);
+
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alias = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "alias",
+		    .ca_mode =  S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_alias,
+	.store	= target_core_store_dev_alias,
+};
+
+static ssize_t target_core_show_dev_udev_path(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+	if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+}
+
+static ssize_t target_core_store_dev_udev_path(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_UDEV_PATH_LEN-1)) {
+		printk(KERN_ERR "udev_path count: %d exceeds"
+			" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
+			SE_UDEV_PATH_LEN-1);
+		return -EINVAL;
+	}
+
+	se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+	read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+			"%s", page);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&se_dev->se_dev_group.cg_item),
+		se_dev->se_dev_udev_path);
+
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "udev_path",
+		    .ca_mode =  S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_udev_path,
+	.store	= target_core_store_dev_udev_path,
+};
+
+static ssize_t target_core_store_dev_enable(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_device *dev;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+	char *ptr;
+
+	ptr = strstr(page, "1");
+	if (!(ptr)) {
+		printk(KERN_ERR "For dev_enable ops, only valid value"
+				" is \"1\"\n");
+		return -EINVAL;
+	}
+	if ((se_dev->se_dev_ptr)) {
+		printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
+				" object\n");
+		return -EEXIST;
+	}
+
+	if (t->check_configfs_dev_params(hba, se_dev) < 0)
+		return -EINVAL;
+
+	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+	if (!(dev) || IS_ERR(dev))
+		return -EINVAL;
+
+	se_dev->se_dev_ptr = dev;
+	printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+		" %p\n", se_dev->se_dev_ptr);
+
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_enable = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "enable",
+		    .ca_mode = S_IWUSR },
+	.show	= NULL,
+	.store	= target_core_store_dev_enable,
+};
+
+static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
+{
+	struct se_device *dev;
+	struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+	struct config_item *lu_ci;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
+		return len;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!(lu_gp_mem)) {
+		printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+				" pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if ((lu_gp)) {
+		lu_ci = &lu_gp->lu_gp_group.cg_item;
+		len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
+			config_item_name(lu_ci), lu_gp->lu_gp_id);
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	return len;
+}
+
+static ssize_t target_core_store_alua_lu_gp(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev;
+	struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = su_dev->se_dev_hba;
+	struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+	int move = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+		printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+			config_item_name(&hba->hba_group.cg_item),
+			config_item_name(&su_dev->se_dev_group.cg_item));
+		return -EINVAL;
+	}
+	if (count > LU_GROUP_NAME_BUF) {
+		printk(KERN_ERR "ALUA LU Group Alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA logical unit alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_lu_gp_by_name() will increment reference to
+		 * struct t10_alua_lu_gp.  This reference is released with
+		 * core_alua_get_lu_gp_by_name below().
+		 */
+		lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
+		if (!(lu_gp_new))
+			return -ENODEV;
+	}
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!(lu_gp_mem)) {
+		if (lu_gp_new)
+			core_alua_put_lu_gp_from_name(lu_gp_new);
+		printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+				" pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if ((lu_gp)) {
+		/*
+		 * Clearing an existing lu_gp association, and replacing
+		 * with NULL
+		 */
+		if (!(lu_gp_new)) {
+			printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
+				" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
+				" %hu\n",
+				config_item_name(&hba->hba_group.cg_item),
+				config_item_name(&su_dev->se_dev_group.cg_item),
+				config_item_name(&lu_gp->lu_gp_group.cg_item),
+				lu_gp->lu_gp_id);
+
+			__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+			return count;
+		}
+		/*
+		 * Removing existing association of lu_gp_mem with lu_gp
+		 */
+		__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+		move = 1;
+	}
+	/*
+	 * Associate lu_gp_mem with lu_gp_new.
+	 */
+	__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+		" core/alua/lu_gps/%s, ID: %hu\n",
+		(move) ? "Moving" : "Adding",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&su_dev->se_dev_group.cg_item),
+		config_item_name(&lu_gp_new->lu_gp_group.cg_item),
+		lu_gp_new->lu_gp_id);
+
+	core_alua_put_lu_gp_from_name(lu_gp_new);
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "alua_lu_gp",
+		    .ca_mode = S_IRUGO | S_IWUSR },
+	.show	= target_core_show_alua_lu_gp,
+	.store	= target_core_store_alua_lu_gp,
+};
+
+static struct configfs_attribute *lio_core_dev_attrs[] = {
+	&target_core_attr_dev_info.attr,
+	&target_core_attr_dev_control.attr,
+	&target_core_attr_dev_alias.attr,
+	&target_core_attr_dev_udev_path.attr,
+	&target_core_attr_dev_enable.attr,
+	&target_core_attr_dev_alua_lu_gp.attr,
+	NULL,
+};
+
+static void target_core_dev_release(struct config_item *item)
+{
+	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+				struct se_subsystem_dev, se_dev_group);
+	struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+	struct se_subsystem_api *t = hba->transport;
+	struct config_group *dev_cg = &se_dev->se_dev_group;
+
+	kfree(dev_cg->default_groups);
+	/*
+	 * This pointer will set when the storage is enabled with:
+	 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+	 */
+	if (se_dev->se_dev_ptr) {
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+			"virtual_device() for se_dev_ptr: %p\n",
+			se_dev->se_dev_ptr);
+
+		se_free_virtual_device(se_dev->se_dev_ptr, hba);
+	} else {
+		/*
+		 * Release struct se_subsystem_dev->se_dev_su_ptr..
+		 */
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+			"device() for se_dev_su_ptr: %p\n",
+			se_dev->se_dev_su_ptr);
+
+		t->free_device(se_dev->se_dev_su_ptr);
+	}
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+			"_dev_t: %p\n", se_dev);
+	kfree(se_dev);
+}
+
+static ssize_t target_core_dev_show(struct config_item *item,
+				     struct configfs_attribute *attr,
+				     char *page)
+{
+	struct se_subsystem_dev *se_dev = container_of(
+			to_config_group(item), struct se_subsystem_dev,
+			se_dev_group);
+	struct target_core_configfs_attribute *tc_attr = container_of(
+			attr, struct target_core_configfs_attribute, attr);
+
+	if (!(tc_attr->show))
+		return -EINVAL;
+
+	return tc_attr->show((void *)se_dev, page);
+}
+
+static ssize_t target_core_dev_store(struct config_item *item,
+				      struct configfs_attribute *attr,
+				      const char *page, size_t count)
+{
+	struct se_subsystem_dev *se_dev = container_of(
+			to_config_group(item), struct se_subsystem_dev,
+			se_dev_group);
+	struct target_core_configfs_attribute *tc_attr = container_of(
+			attr, struct target_core_configfs_attribute, attr);
+
+	if (!(tc_attr->store))
+		return -EINVAL;
+
+	return tc_attr->store((void *)se_dev, page, count);
+}
+
+static struct configfs_item_operations target_core_dev_item_ops = {
+	.release		= target_core_dev_release,
+	.show_attribute		= target_core_dev_show,
+	.store_attribute	= target_core_dev_store,
+};
+
+static struct config_item_type target_core_dev_cit = {
+	.ct_item_ops		= &target_core_dev_item_ops,
+	.ct_attrs		= lio_core_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
+#define SE_DEV_ALUA_LU_ATTR(_name, _mode)				\
+static struct target_core_alua_lu_gp_attribute				\
+			target_core_alua_lu_gp_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_alua_lu_gp_show_attr_##_name,			\
+	target_core_alua_lu_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_LU_ATTR_RO(_name)					\
+static struct target_core_alua_lu_gp_attribute				\
+			target_core_alua_lu_gp_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_alua_lu_gp_show_attr_##_name);
+
+/*
+ * lu_gp_id
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
+	struct t10_alua_lu_gp *lu_gp,
+	char *page)
+{
+	if (!(lu_gp->lu_gp_valid_id))
+		return 0;
+
+	return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
+}
+
+static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
+	struct t10_alua_lu_gp *lu_gp,
+	const char *page,
+	size_t count)
+{
+	struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	unsigned long lu_gp_id;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &lu_gp_id);
+	if (ret < 0) {
+		printk(KERN_ERR "strict_strtoul() returned %d for"
+			" lu_gp_id\n", ret);
+		return -EINVAL;
+	}
+	if (lu_gp_id > 0x0000ffff) {
+		printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", lu_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s to ID: %hu\n",
+		config_item_name(&alua_lu_gp_cg->cg_item),
+		lu_gp->lu_gp_id);
+
+	return count;
+}
+
+SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_members(
+	struct t10_alua_lu_gp *lu_gp,
+	char *page)
+{
+	struct se_device *dev;
+	struct se_hba *hba;
+	struct se_subsystem_dev *su_dev;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		su_dev = dev->se_sub_dev;
+		hba = su_dev->se_dev_hba;
+
+		cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+			config_item_name(&hba->hba_group.cg_item),
+			config_item_name(&su_dev->se_dev_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	return len;
+}
+
+SE_DEV_ALUA_LU_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
+
+static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
+	&target_core_alua_lu_gp_lu_gp_id.attr,
+	&target_core_alua_lu_gp_members.attr,
+	NULL,
+};
+
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	core_alua_free_lu_gp(lu_gp);
+}
+
+static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+	.release		= target_core_alua_lu_gp_release,
+	.show_attribute		= target_core_alua_lu_gp_attr_show,
+	.store_attribute	= target_core_alua_lu_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_lu_gp_cit = {
+	.ct_item_ops		= &target_core_alua_lu_gp_ops,
+	.ct_attrs		= target_core_alua_lu_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+static struct config_group *target_core_alua_create_lu_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_group *alua_lu_gp_cg = NULL;
+	struct config_item *alua_lu_gp_ci = NULL;
+
+	lu_gp = core_alua_allocate_lu_gp(name, 0);
+	if (IS_ERR(lu_gp))
+		return NULL;
+
+	alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_lu_gp_cg, name,
+			&target_core_alua_lu_gp_cit);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s\n",
+		config_item_name(alua_lu_gp_ci));
+
+	return alua_lu_gp_cg;
+
+}
+
+static void target_core_alua_drop_lu_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s, ID: %hu\n",
+		config_item_name(item), lu_gp->lu_gp_id);
+	/*
+	 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+	 * -> target_core_alua_lu_gp_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
+	.make_group		= &target_core_alua_create_lu_gp,
+	.drop_item		= &target_core_alua_drop_lu_gp,
+};
+
+static struct config_item_type target_core_alua_lu_gps_cit = {
+	.ct_item_ops		= NULL,
+	.ct_group_ops		= &target_core_alua_lu_gps_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
+#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode)				\
+static struct target_core_alua_tg_pt_gp_attribute			\
+			target_core_alua_tg_pt_gp_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_alua_tg_pt_gp_show_attr_##_name,			\
+	target_core_alua_tg_pt_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name)				\
+static struct target_core_alua_tg_pt_gp_attribute			\
+			target_core_alua_tg_pt_gp_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_alua_tg_pt_gp_show_attr_##_name);
+
+/*
+ * alua_access_state
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n",
+		atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	unsigned long tmp;
+	int new_state, ret;
+
+	if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+		printk(KERN_ERR "Unable to do implict ALUA on non valid"
+			" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk("Unable to extract new ALUA access state from"
+				" %s\n", page);
+		return -EINVAL;
+	}
+	new_state = (int)tmp;
+
+	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
+		printk(KERN_ERR "Unable to process implict configfs ALUA"
+			" transition while TPGS_IMPLICT_ALUA is diabled\n");
+		return -EINVAL;
+	}
+
+	ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+					NULL, NULL, new_state, 0);
+	return (!ret) ? count : -EINVAL;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_status
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%s\n",
+		core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int new_status, ret;
+
+	if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+		printk(KERN_ERR "Unable to do set ALUA access status on non"
+			" valid tg_pt_gp ID: %hu\n",
+			tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract new ALUA access status"
+				" from %s\n", page);
+		return -EINVAL;
+	}
+	new_status = (int)tmp;
+
+	if ((new_status != ALUA_STATUS_NONE) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+		printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
+				new_status);
+		return -EINVAL;
+	}
+
+	tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_type
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_access_type(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_access_type(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_write_metadata
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_write_metadata\n");
+		return -EINVAL;
+	}
+
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for alua_write_metadata:"
+			" %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
+
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
+
+
+
+/*
+ * nonop_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
+
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * trans_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * preferred
+ */
+
+static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_preferred_bit(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_preferred_bit(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
+
+/*
+ * tg_pt_gp_id
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	if (!(tg_pt_gp->tg_pt_gp_valid_id))
+		return 0;
+
+	return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	unsigned long tg_pt_gp_id;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tg_pt_gp_id);
+	if (ret < 0) {
+		printk(KERN_ERR "strict_strtoul() returned %d for"
+			" tg_pt_gp_id\n", ret);
+		return -EINVAL;
+	}
+	if (tg_pt_gp_id > 0x0000ffff) {
+		printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", tg_pt_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
+		"core/alua/tg_pt_gps/%s to ID: %hu\n",
+		config_item_name(&alua_tg_pt_gp_cg->cg_item),
+		tg_pt_gp->tg_pt_gp_id);
+
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	struct se_port *port;
+	struct se_portal_group *tpg;
+	struct se_lun *lun;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+			tg_pt_gp_mem_list) {
+		port = tg_pt_gp_mem->tg_pt;
+		tpg = port->sep_tpg;
+		lun = port->sep_lun;
+
+		cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
+			"/%s\n", TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			TPG_TFO(tpg)->tpg_get_tag(tpg),
+			config_item_name(&lun->lun_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	return len;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
+			tg_pt_gp_group);
+
+static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
+	&target_core_alua_tg_pt_gp_alua_access_state.attr,
+	&target_core_alua_tg_pt_gp_alua_access_status.attr,
+	&target_core_alua_tg_pt_gp_alua_access_type.attr,
+	&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
+	&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
+	&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
+	&target_core_alua_tg_pt_gp_preferred.attr,
+	&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
+	&target_core_alua_tg_pt_gp_members.attr,
+	NULL,
+};
+
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
+static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+	.release		= target_core_alua_tg_pt_gp_release,
+	.show_attribute		= target_core_alua_tg_pt_gp_attr_show,
+	.store_attribute	= target_core_alua_tg_pt_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+	.ct_item_ops		= &target_core_alua_tg_pt_gp_ops,
+	.ct_attrs		= target_core_alua_tg_pt_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+static struct config_group *target_core_alua_create_tg_pt_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua *alua = container_of(group, struct t10_alua,
+					alua_tg_pt_gps_group);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
+	struct config_group *alua_tg_pt_gp_cg = NULL;
+	struct config_item *alua_tg_pt_gp_ci = NULL;
+
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+	if (!(tg_pt_gp))
+		return NULL;
+
+	alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_tg_pt_gp_cg, name,
+			&target_core_alua_tg_pt_gp_cit);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s\n",
+		config_item_name(alua_tg_pt_gp_ci));
+
+	return alua_tg_pt_gp_cg;
+}
+
+static void target_core_alua_drop_tg_pt_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
+		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
+	/*
+	 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+	 * -> target_core_alua_tg_pt_gp_release().
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
+	.make_group		= &target_core_alua_create_tg_pt_gp,
+	.drop_item		= &target_core_alua_drop_tg_pt_gp,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gps_cit = {
+	.ct_group_ops		= &target_core_alua_tg_pt_gps_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_cit */
+
+/*
+ * target_core_alua_cit is a ConfigFS group that lives under
+ * /sys/kernel/config/target/core/alua.  There are default groups
+ * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
+ * target_core_alua_cit in target_core_init_configfs() below.
+ */
+static struct config_item_type target_core_alua_cit = {
+	.ct_item_ops		= NULL,
+	.ct_attrs		= NULL,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_cit */
+
+/* Start functions for struct config_item_type target_core_hba_cit */
+
+static struct config_group *target_core_make_subdev(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_subsystem_dev *se_dev;
+	struct se_subsystem_api *t;
+	struct config_item *hba_ci = &group->cg_item;
+	struct se_hba *hba = item_to_hba(hba_ci);
+	struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
+
+	if (mutex_lock_interruptible(&hba->hba_access_mutex))
+		return NULL;
+
+	/*
+	 * Locate the struct se_subsystem_api from parent's struct se_hba.
+	 */
+	t = hba->transport;
+
+	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+	if (!se_dev) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct se_subsystem_dev\n");
+		goto unlock;
+	}
+	INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+	spin_lock_init(&se_dev->t10_reservation.registration_lock);
+	spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+	spin_lock_init(&se_dev->se_dev_lock);
+	se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+	se_dev->t10_wwn.t10_sub_dev = se_dev;
+	se_dev->t10_alua.t10_sub_dev = se_dev;
+	se_dev->se_dev_attrib.da_sub_dev = se_dev;
+
+	se_dev->se_dev_hba = hba;
+	dev_cg = &se_dev->se_dev_group;
+
+	dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+			GFP_KERNEL);
+	if (!(dev_cg->default_groups))
+		goto out;
+	/*
+	 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
+	 * for ->allocate_virtdevice()
+	 *
+	 * se_dev->se_dev_ptr will be set after ->create_virtdev()
+	 * has been called successfully in the next level up in the
+	 * configfs tree for device object's struct config_group.
+	 */
+	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
+	if (!(se_dev->se_dev_su_ptr)) {
+		printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+			" from allocate_virtdevice()\n");
+		goto out;
+	}
+	spin_lock(&se_global->g_device_lock);
+	list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
+	spin_unlock(&se_global->g_device_lock);
+
+	config_group_init_type_name(&se_dev->se_dev_group, name,
+			&target_core_dev_cit);
+	config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+			&target_core_dev_attrib_cit);
+	config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+			&target_core_dev_pr_cit);
+	config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+			&target_core_dev_wwn_cit);
+	config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+			"alua", &target_core_alua_tg_pt_gps_cit);
+	dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
+	dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
+	dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
+	dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
+	dev_cg->default_groups[4] = NULL;
+	/*
+	 * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp
+	 */
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+	if (!(tg_pt_gp))
+		goto out;
+
+	tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+	tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!(tg_pt_gp_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
+				"default_groups\n");
+		goto out;
+	}
+
+	config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
+			"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
+	tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
+	tg_pt_gp_cg->default_groups[1] = NULL;
+	T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+		" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+
+	mutex_unlock(&hba->hba_access_mutex);
+	return &se_dev->se_dev_group;
+out:
+	if (T10_ALUA(se_dev)->default_tg_pt_gp) {
+		core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+		T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+	}
+	if (tg_pt_gp_cg)
+		kfree(tg_pt_gp_cg->default_groups);
+	if (dev_cg)
+		kfree(dev_cg->default_groups);
+	if (se_dev->se_dev_su_ptr)
+		t->free_device(se_dev->se_dev_su_ptr);
+	kfree(se_dev);
+unlock:
+	mutex_unlock(&hba->hba_access_mutex);
+	return NULL;
+}
+
+static void target_core_drop_subdev(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+				struct se_subsystem_dev, se_dev_group);
+	struct se_hba *hba;
+	struct se_subsystem_api *t;
+	struct config_item *df_item;
+	struct config_group *dev_cg, *tg_pt_gp_cg;
+	int i;
+
+	hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+
+	mutex_lock(&hba->hba_access_mutex);
+	t = hba->transport;
+
+	spin_lock(&se_global->g_device_lock);
+	list_del(&se_dev->g_se_dev_list);
+	spin_unlock(&se_global->g_device_lock);
+
+	tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+	for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
+		df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
+		tg_pt_gp_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(tg_pt_gp_cg->default_groups);
+	/*
+	 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+	 * directly from target_core_alua_tg_pt_gp_release().
+	 */
+	T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+
+	dev_cg = &se_dev->se_dev_group;
+	for (i = 0; dev_cg->default_groups[i]; i++) {
+		df_item = &dev_cg->default_groups[i]->cg_item;
+		dev_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	/*
+	 * The releasing of se_dev and associated se_dev->se_dev_ptr is done
+	 * from target_core_dev_item_ops->release() ->target_core_dev_release().
+	 */
+	config_item_put(item);
+	mutex_unlock(&hba->hba_access_mutex);
+}
+
+static struct configfs_group_operations target_core_hba_group_ops = {
+	.make_group		= target_core_make_subdev,
+	.drop_item		= target_core_drop_subdev,
+};
+
+CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
+#define SE_HBA_ATTR(_name, _mode)				\
+static struct target_core_hba_attribute				\
+		target_core_hba_##_name =			\
+		__CONFIGFS_EATTR(_name, _mode,			\
+		target_core_hba_show_attr_##_name,		\
+		target_core_hba_store_attr_##_name);
+
+#define SE_HBA_ATTR_RO(_name)					\
+static struct target_core_hba_attribute				\
+		target_core_hba_##_name =			\
+		__CONFIGFS_EATTR_RO(_name,			\
+		target_core_hba_show_attr_##_name);
+
+static ssize_t target_core_hba_show_attr_hba_info(
+	struct se_hba *hba,
+	char *page)
+{
+	return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
+			hba->hba_id, hba->transport->name,
+			TARGET_CORE_CONFIGFS_VERSION);
+}
+
+SE_HBA_ATTR_RO(hba_info);
+
+static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
+				char *page)
+{
+	int hba_mode = 0;
+
+	if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
+		hba_mode = 1;
+
+	return sprintf(page, "%d\n", hba_mode);
+}
+
+static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
+				const char *page, size_t count)
+{
+	struct se_subsystem_api *transport = hba->transport;
+	unsigned long mode_flag;
+	int ret;
+
+	if (transport->pmode_enable_hba == NULL)
+		return -EINVAL;
+
+	ret = strict_strtoul(page, 0, &mode_flag);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
+		return -EINVAL;
+	}
+
+	spin_lock(&hba->device_lock);
+	if (!(list_empty(&hba->hba_dev_list))) {
+		printk(KERN_ERR "Unable to set hba_mode with active devices\n");
+		spin_unlock(&hba->device_lock);
+		return -EINVAL;
+	}
+	spin_unlock(&hba->device_lock);
+
+	ret = transport->pmode_enable_hba(hba, mode_flag);
+	if (ret < 0)
+		return -EINVAL;
+	if (ret > 0)
+		hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+	else if (ret == 0)
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+
+	return count;
+}
+
+SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
+
+static void target_core_hba_release(struct config_item *item)
+{
+	struct se_hba *hba = container_of(to_config_group(item),
+				struct se_hba, hba_group);
+	core_delete_hba(hba);
+}
+
+static struct configfs_attribute *target_core_hba_attrs[] = {
+	&target_core_hba_hba_info.attr,
+	&target_core_hba_hba_mode.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_hba_item_ops = {
+	.release		= target_core_hba_release,
+	.show_attribute		= target_core_hba_attr_show,
+	.store_attribute	= target_core_hba_attr_store,
+};
+
+static struct config_item_type target_core_hba_cit = {
+	.ct_item_ops		= &target_core_hba_item_ops,
+	.ct_group_ops		= &target_core_hba_group_ops,
+	.ct_attrs		= target_core_hba_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *target_core_call_addhbatotarget(
+	struct config_group *group,
+	const char *name)
+{
+	char *se_plugin_str, *str, *str2;
+	struct se_hba *hba;
+	char buf[TARGET_CORE_NAME_MAX_LEN];
+	unsigned long plugin_dep_id = 0;
+	int ret;
+
+	memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
+	if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
+		printk(KERN_ERR "Passed *name strlen(): %d exceeds"
+			" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
+			TARGET_CORE_NAME_MAX_LEN);
+		return ERR_PTR(-ENAMETOOLONG);
+	}
+	snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
+
+	str = strstr(buf, "_");
+	if (!(str)) {
+		printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+		return ERR_PTR(-EINVAL);
+	}
+	se_plugin_str = buf;
+	/*
+	 * Special case for subsystem plugins that have "_" in their names.
+	 * Namely rd_direct and rd_mcp..
+	 */
+	str2 = strstr(str+1, "_");
+	if ((str2)) {
+		*str2 = '\0'; /* Terminate for *se_plugin_str */
+		str2++; /* Skip to start of plugin dependent ID */
+		str = str2;
+	} else {
+		*str = '\0'; /* Terminate for *se_plugin_str */
+		str++; /* Skip to start of plugin dependent ID */
+	}
+
+	ret = strict_strtoul(str, 0, &plugin_dep_id);
+	if (ret < 0) {
+		printk(KERN_ERR "strict_strtoul() returned %d for"
+				" plugin_dep_id\n", ret);
+		return ERR_PTR(-EINVAL);
+	}
+	/*
+	 * Load up TCM subsystem plugins if they have not already been loaded.
+	 */
+	if (transport_subsystem_check_init() < 0)
+		return ERR_PTR(-EINVAL);
+
+	hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
+	if (IS_ERR(hba))
+		return ERR_CAST(hba);
+
+	config_group_init_type_name(&hba->hba_group, name,
+			&target_core_hba_cit);
+
+	return &hba->hba_group;
+}
+
+static void target_core_call_delhbafromtarget(
+	struct config_group *group,
+	struct config_item *item)
+{
+	/*
+	 * core_delete_hba() is called from target_core_hba_item_ops->release()
+	 * -> target_core_hba_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_group_ops = {
+	.make_group	= target_core_call_addhbatotarget,
+	.drop_item	= target_core_call_delhbafromtarget,
+};
+
+static struct config_item_type target_core_cit = {
+	.ct_item_ops	= NULL,
+	.ct_group_ops	= &target_core_group_ops,
+	.ct_attrs	= NULL,
+	.ct_owner	= THIS_MODULE,
+};
+
+/* Stop functions for struct config_item_type target_core_hba_cit */
+
+static int target_core_init_configfs(void)
+{
+	struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
+	struct config_group *lu_gp_cg = NULL;
+	struct configfs_subsystem *subsys;
+	struct t10_alua_lu_gp *lu_gp;
+	int ret;
+
+	printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
+		" Engine: %s on %s/%s on "UTS_RELEASE"\n",
+		TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
+
+	subsys = target_core_subsystem[0];
+	config_group_init(&subsys->su_group);
+	mutex_init(&subsys->su_mutex);
+
+	INIT_LIST_HEAD(&g_tf_list);
+	mutex_init(&g_tf_lock);
+	init_scsi_index_table();
+	ret = init_se_global();
+	if (ret < 0)
+		return -1;
+	/*
+	 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
+	 * and ALUA Logical Unit Group and Target Port Group infrastructure.
+	 */
+	target_cg = &subsys->su_group;
+	target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!(target_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
+		goto out_global;
+	}
+
+	config_group_init_type_name(&se_global->target_core_hbagroup,
+			"core", &target_core_cit);
+	target_cg->default_groups[0] = &se_global->target_core_hbagroup;
+	target_cg->default_groups[1] = NULL;
+	/*
+	 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
+	 */
+	hba_cg = &se_global->target_core_hbagroup;
+	hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!(hba_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
+		goto out_global;
+	}
+	config_group_init_type_name(&se_global->alua_group,
+			"alua", &target_core_alua_cit);
+	hba_cg->default_groups[0] = &se_global->alua_group;
+	hba_cg->default_groups[1] = NULL;
+	/*
+	 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
+	 * groups under /sys/kernel/config/target/core/alua/
+	 */
+	alua_cg = &se_global->alua_group;
+	alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+			GFP_KERNEL);
+	if (!(alua_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
+		goto out_global;
+	}
+
+	config_group_init_type_name(&se_global->alua_lu_gps_group,
+			"lu_gps", &target_core_alua_lu_gps_cit);
+	alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
+	alua_cg->default_groups[1] = NULL;
+	/*
+	 * Add core/alua/lu_gps/default_lu_gp
+	 */
+	lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+	if (IS_ERR(lu_gp))
+		goto out_global;
+
+	lu_gp_cg = &se_global->alua_lu_gps_group;
+	lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+			GFP_KERNEL);
+	if (!(lu_gp_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
+		goto out_global;
+	}
+
+	config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
+				&target_core_alua_lu_gp_cit);
+	lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
+	lu_gp_cg->default_groups[1] = NULL;
+	se_global->default_lu_gp = lu_gp;
+	/*
+	 * Register the target_core_mod subsystem with configfs.
+	 */
+	ret = configfs_register_subsystem(subsys);
+	if (ret < 0) {
+		printk(KERN_ERR "Error %d while registering subsystem %s\n",
+			ret, subsys->su_group.cg_item.ci_namebuf);
+		goto out_global;
+	}
+	printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
+		" Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
+		" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
+	/*
+	 * Register built-in RAMDISK subsystem logic for virtual LUN 0
+	 */
+	ret = rd_module_init();
+	if (ret < 0)
+		goto out;
+
+	if (core_dev_setup_virtual_lun0() < 0)
+		goto out;
+
+	return 0;
+
+out:
+	configfs_unregister_subsystem(subsys);
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+out_global:
+	if (se_global->default_lu_gp) {
+		core_alua_free_lu_gp(se_global->default_lu_gp);
+		se_global->default_lu_gp = NULL;
+	}
+	if (lu_gp_cg)
+		kfree(lu_gp_cg->default_groups);
+	if (alua_cg)
+		kfree(alua_cg->default_groups);
+	if (hba_cg)
+		kfree(hba_cg->default_groups);
+	kfree(target_cg->default_groups);
+	release_se_global();
+	return -1;
+}
+
+static void target_core_exit_configfs(void)
+{
+	struct configfs_subsystem *subsys;
+	struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
+	struct config_item *item;
+	int i;
+
+	se_global->in_shutdown = 1;
+	subsys = target_core_subsystem[0];
+
+	lu_gp_cg = &se_global->alua_lu_gps_group;
+	for (i = 0; lu_gp_cg->default_groups[i]; i++) {
+		item = &lu_gp_cg->default_groups[i]->cg_item;
+		lu_gp_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(lu_gp_cg->default_groups);
+	lu_gp_cg->default_groups = NULL;
+
+	alua_cg = &se_global->alua_group;
+	for (i = 0; alua_cg->default_groups[i]; i++) {
+		item = &alua_cg->default_groups[i]->cg_item;
+		alua_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(alua_cg->default_groups);
+	alua_cg->default_groups = NULL;
+
+	hba_cg = &se_global->target_core_hbagroup;
+	for (i = 0; hba_cg->default_groups[i]; i++) {
+		item = &hba_cg->default_groups[i]->cg_item;
+		hba_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(hba_cg->default_groups);
+	hba_cg->default_groups = NULL;
+	/*
+	 * We expect subsys->su_group.default_groups to be released
+	 * by configfs subsystem provider logic..
+	 */
+	configfs_unregister_subsystem(subsys);
+	kfree(subsys->su_group.default_groups);
+
+	core_alua_free_lu_gp(se_global->default_lu_gp);
+	se_global->default_lu_gp = NULL;
+
+	printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
+			" Infrastructure\n");
+
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+	release_se_global();
+
+	return;
+}
+
+MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(target_core_init_configfs);
+module_exit(target_core_exit_configfs);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
new file mode 100644
index 0000000..5da051a
--- /dev/null
+++ b/drivers/target/target_core_device.c
@@ -0,0 +1,1693 @@
+/*******************************************************************************
+ * Filename:  target_core_device.c (based on iscsi_target_device.c)
+ *
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+static void se_dev_start(struct se_device *dev);
+static void se_dev_stop(struct se_device *dev);
+
+int transport_get_lun_for_cmd(
+	struct se_cmd *se_cmd,
+	unsigned char *cdb,
+	u32 unpacked_lun)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = SE_SESS(se_cmd);
+	unsigned long flags;
+	int read_only = 0;
+
+	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+	deve = se_cmd->se_deve =
+			&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+		if (se_cmd) {
+			deve->total_cmds++;
+			deve->total_bytes += se_cmd->data_length;
+
+			if (se_cmd->data_direction == DMA_TO_DEVICE) {
+				if (deve->lun_flags &
+						TRANSPORT_LUNFLAGS_READ_ONLY) {
+					read_only = 1;
+					goto out;
+				}
+				deve->write_bytes += se_cmd->data_length;
+			} else if (se_cmd->data_direction ==
+				   DMA_FROM_DEVICE) {
+				deve->read_bytes += se_cmd->data_length;
+			}
+		}
+		deve->deve_cmds++;
+
+		se_lun = se_cmd->se_lun = deve->se_lun;
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+		se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+	}
+out:
+	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+	if (!se_lun) {
+		if (read_only) {
+			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+				" Access for 0x%08x\n",
+				CMD_TFO(se_cmd)->get_fabric_name(),
+				unpacked_lun);
+			return -1;
+		} else {
+			/*
+			 * Use the se_portal_group->tpg_virt_lun0 to allow for
+			 * REPORT_LUNS, et al to be returned when no active
+			 * MappedLUN=0 exists for this Initiator Port.
+			 */
+			if (unpacked_lun != 0) {
+				se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+				printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+					" Access for 0x%08x\n",
+					CMD_TFO(se_cmd)->get_fabric_name(),
+					unpacked_lun);
+				return -1;
+			}
+			/*
+			 * Force WRITE PROTECT for virtual LUN 0
+			 */
+			if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+			    (se_cmd->data_direction != DMA_NONE)) {
+				se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+				return -1;
+			}
+#if 0
+			printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
+				CMD_TFO(se_cmd)->get_fabric_name());
+#endif
+			se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+			se_cmd->orig_fe_lun = 0;
+			se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+			se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+		}
+	}
+	/*
+	 * Determine if the struct se_lun is online.
+	 */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -1;
+	}
+
+	{
+	struct se_device *dev = se_lun->lun_se_dev;
+	spin_lock(&dev->stats_lock);
+	dev->num_cmds++;
+	if (se_cmd->data_direction == DMA_TO_DEVICE)
+		dev->write_bytes += se_cmd->data_length;
+	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+		dev->read_bytes += se_cmd->data_length;
+	spin_unlock(&dev->stats_lock);
+	}
+
+	/*
+	 * Add the iscsi_cmd_t to the struct se_lun's cmd list.  This list is used
+	 * for tracking state of struct se_cmds during LUN shutdown events.
+	 */
+	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
+	list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
+	atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
+#if 0
+	printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
+		CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
+#endif
+	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_cmd);
+
+int transport_get_lun_for_tmr(
+	struct se_cmd *se_cmd,
+	u32 unpacked_lun)
+{
+	struct se_device *dev = NULL;
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = SE_SESS(se_cmd);
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+
+	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+	deve = se_cmd->se_deve =
+			&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+		se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
+		dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+		se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+/*		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
+	}
+	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+	if (!se_lun) {
+		printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+			" Access for 0x%08x\n",
+			CMD_TFO(se_cmd)->get_fabric_name(),
+			unpacked_lun);
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -1;
+	}
+	/*
+	 * Determine if the struct se_lun is online.
+	 */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -1;
+	}
+
+	spin_lock(&dev->se_tmr_lock);
+	list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
+	spin_unlock(&dev->se_tmr_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_tmr);
+
+/*
+ * This function is called from core_scsi3_emulate_pro_register_and_move()
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
+ * when a matching rtpi is found.
+ */
+struct se_dev_entry *core_get_se_deve_from_rtpi(
+	struct se_node_acl *nacl,
+	u16 rtpi)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_port *port;
+	struct se_portal_group *tpg = nacl->se_tpg;
+	u32 i;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		lun = deve->se_lun;
+		if (!(lun)) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+		port = lun->lun_sep;
+		if (!(port)) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+		if (port->sep_rtpi != rtpi)
+			continue;
+
+		atomic_inc(&deve->pr_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		return deve;
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	return NULL;
+}
+
+int core_free_device_list_for_node(
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	u32 i;
+
+	if (!nacl->device_list)
+		return 0;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		if (!deve->se_lun) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+		lun = deve->se_lun;
+
+		spin_unlock_irq(&nacl->device_list_lock);
+		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+		spin_lock_irq(&nacl->device_list_lock);
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	kfree(nacl->device_list);
+	nacl->device_list = NULL;
+
+	return 0;
+}
+
+void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
+{
+	struct se_dev_entry *deve;
+
+	spin_lock_irq(&se_nacl->device_list_lock);
+	deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
+	deve->deve_cmds--;
+	spin_unlock_irq(&se_nacl->device_list_lock);
+
+	return;
+}
+
+void core_update_device_list_access(
+	u32 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[mapped_lun];
+	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+	} else {
+		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	return;
+}
+
+/*      core_update_device_list_for_node():
+ *
+ *
+ */
+int core_update_device_list_for_node(
+	struct se_lun *lun,
+	struct se_lun_acl *lun_acl,
+	u32 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg,
+	int enable)
+{
+	struct se_port *port = lun->lun_sep;
+	struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
+	int trans = 0;
+	/*
+	 * If the MappedLUN entry is being disabled, the entry in
+	 * port->sep_alua_list must be removed now before clearing the
+	 * struct se_dev_entry pointers below as logic in
+	 * core_alua_do_transition_tg_pt() depends on these being present.
+	 */
+	if (!(enable)) {
+		/*
+		 * deve->se_lun_acl will be NULL for demo-mode created LUNs
+		 * that have not been explictly concerted to MappedLUNs ->
+		 * struct se_lun_acl, but we remove deve->alua_port_list from
+		 * port->sep_alua_list. This also means that active UAs and
+		 * NodeACL context specific PR metadata for demo-mode
+		 * MappedLUN *deve will be released below..
+		 */
+		spin_lock_bh(&port->sep_alua_lock);
+		list_del(&deve->alua_port_list);
+		spin_unlock_bh(&port->sep_alua_lock);
+	}
+
+	spin_lock_irq(&nacl->device_list_lock);
+	if (enable) {
+		/*
+		 * Check if the call is handling demo mode -> explict LUN ACL
+		 * transition.  This transition must be for the same struct se_lun
+		 * + mapped_lun that was setup in demo mode..
+		 */
+		if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+			if (deve->se_lun_acl != NULL) {
+				printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
+					" already set for demo mode -> explict"
+					" LUN ACL transition\n");
+				spin_unlock_irq(&nacl->device_list_lock);
+				return -1;
+			}
+			if (deve->se_lun != lun) {
+				printk(KERN_ERR "struct se_dev_entry->se_lun does"
+					" match passed struct se_lun for demo mode"
+					" -> explict LUN ACL transition\n");
+				spin_unlock_irq(&nacl->device_list_lock);
+				return -1;
+			}
+			deve->se_lun_acl = lun_acl;
+			trans = 1;
+		} else {
+			deve->se_lun = lun;
+			deve->se_lun_acl = lun_acl;
+			deve->mapped_lun = mapped_lun;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
+		}
+
+		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+		}
+
+		if (trans) {
+			spin_unlock_irq(&nacl->device_list_lock);
+			return 0;
+		}
+		deve->creation_time = get_jiffies_64();
+		deve->attach_count++;
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		return 0;
+	}
+	/*
+	 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
+	 * PR operation to complete.
+	 */
+	spin_unlock_irq(&nacl->device_list_lock);
+	while (atomic_read(&deve->pr_ref_count) != 0)
+		cpu_relax();
+	spin_lock_irq(&nacl->device_list_lock);
+	/*
+	 * Disable struct se_dev_entry LUN ACL mapping
+	 */
+	core_scsi3_ua_release_all(deve);
+	deve->se_lun = NULL;
+	deve->se_lun_acl = NULL;
+	deve->lun_flags = 0;
+	deve->creation_time = 0;
+	deve->attach_count--;
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
+	return 0;
+}
+
+/*      core_clear_lun_from_tpg():
+ *
+ *
+ */
+void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
+{
+	struct se_node_acl *nacl;
+	struct se_dev_entry *deve;
+	u32 i;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
+		spin_unlock_bh(&tpg->acl_node_lock);
+
+		spin_lock_irq(&nacl->device_list_lock);
+		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+			deve = &nacl->device_list[i];
+			if (lun != deve->se_lun)
+				continue;
+			spin_unlock_irq(&nacl->device_list_lock);
+
+			core_update_device_list_for_node(lun, NULL,
+				deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
+				nacl, tpg, 0);
+
+			spin_lock_irq(&nacl->device_list_lock);
+		}
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		spin_lock_bh(&tpg->acl_node_lock);
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	return;
+}
+
+static struct se_port *core_alloc_port(struct se_device *dev)
+{
+	struct se_port *port, *port_tmp;
+
+	port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
+	if (!(port)) {
+		printk(KERN_ERR "Unable to allocate struct se_port\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&port->sep_alua_list);
+	INIT_LIST_HEAD(&port->sep_list);
+	atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+	spin_lock_init(&port->sep_alua_lock);
+	mutex_init(&port->sep_tg_pt_md_mutex);
+
+	spin_lock(&dev->se_port_lock);
+	if (dev->dev_port_count == 0x0000ffff) {
+		printk(KERN_WARNING "Reached dev->dev_port_count =="
+				" 0x0000ffff\n");
+		spin_unlock(&dev->se_port_lock);
+		return NULL;
+	}
+again:
+	/*
+	 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
+	 * Here is the table from spc4r17 section 7.7.3.8.
+	 *
+	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
+	 *
+	 * Code      Description
+	 * 0h        Reserved
+	 * 1h        Relative port 1, historically known as port A
+	 * 2h        Relative port 2, historically known as port B
+	 * 3h to FFFFh    Relative port 3 through 65 535
+	 */
+	port->sep_rtpi = dev->dev_rpti_counter++;
+	if (!(port->sep_rtpi))
+		goto again;
+
+	list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
+		/*
+		 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
+		 * for 16-bit wrap..
+		 */
+		if (port->sep_rtpi == port_tmp->sep_rtpi)
+			goto again;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return port;
+}
+
+static void core_export_port(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_port *port,
+	struct se_lun *lun)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
+
+	spin_lock(&dev->se_port_lock);
+	spin_lock(&lun->lun_sep_lock);
+	port->sep_tpg = tpg;
+	port->sep_lun = lun;
+	lun->lun_sep = port;
+	spin_unlock(&lun->lun_sep_lock);
+
+	list_add_tail(&port->sep_list, &dev->dev_sep_list);
+	spin_unlock(&dev->se_port_lock);
+
+	if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
+		tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
+		if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
+			printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
+					"_gp_member_t\n");
+			return;
+		}
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+			T10_ALUA(su_dev)->default_tg_pt_gp);
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
+			" Group: alua/default_tg_pt_gp\n",
+			TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
+	}
+
+	dev->dev_port_count++;
+	port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
+}
+
+/*
+ *	Called with struct se_device->se_port_lock spinlock held.
+ */
+static void core_release_port(struct se_device *dev, struct se_port *port)
+{
+	/*
+	 * Wait for any port reference for PR ALL_TG_PT=1 operation
+	 * to complete in __core_scsi3_alloc_registration()
+	 */
+	spin_unlock(&dev->se_port_lock);
+	if (atomic_read(&port->sep_tg_pt_ref_cnt))
+		cpu_relax();
+	spin_lock(&dev->se_port_lock);
+
+	core_alua_free_tg_pt_gp_mem(port);
+
+	list_del(&port->sep_list);
+	dev->dev_port_count--;
+	kfree(port);
+
+	return;
+}
+
+int core_dev_export(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	struct se_port *port;
+
+	port = core_alloc_port(dev);
+	if (!(port))
+		return -1;
+
+	lun->lun_se_dev = dev;
+	se_dev_start(dev);
+
+	atomic_inc(&dev->dev_export_obj.obj_access_count);
+	core_export_port(dev, tpg, port, lun);
+	return 0;
+}
+
+void core_dev_unexport(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	struct se_port *port = lun->lun_sep;
+
+	spin_lock(&lun->lun_sep_lock);
+	if (lun->lun_se_dev == NULL) {
+		spin_unlock(&lun->lun_sep_lock);
+		return;
+	}
+	spin_unlock(&lun->lun_sep_lock);
+
+	spin_lock(&dev->se_port_lock);
+	atomic_dec(&dev->dev_export_obj.obj_access_count);
+	core_release_port(dev, port);
+	spin_unlock(&dev->se_port_lock);
+
+	se_dev_stop(dev);
+	lun->lun_se_dev = NULL;
+}
+
+int transport_core_report_lun_response(struct se_cmd *se_cmd)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun;
+	struct se_session *se_sess = SE_SESS(se_cmd);
+	struct se_task *se_task;
+	unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
+	u32 cdb_offset = 0, lun_count = 0, offset = 8;
+	u64 i, lun;
+
+	list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
+		break;
+
+	if (!(se_task)) {
+		printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+
+	/*
+	 * If no struct se_session pointer is present, this struct se_cmd is
+	 * coming via a target_core_mod PASSTHROUGH op, and not through
+	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
+	 */
+	if (!(se_sess)) {
+		lun = 0;
+		buf[offset++] = ((lun >> 56) & 0xff);
+		buf[offset++] = ((lun >> 48) & 0xff);
+		buf[offset++] = ((lun >> 40) & 0xff);
+		buf[offset++] = ((lun >> 32) & 0xff);
+		buf[offset++] = ((lun >> 24) & 0xff);
+		buf[offset++] = ((lun >> 16) & 0xff);
+		buf[offset++] = ((lun >> 8) & 0xff);
+		buf[offset++] = (lun & 0xff);
+		lun_count = 1;
+		goto done;
+	}
+
+	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &SE_NODE_ACL(se_sess)->device_list[i];
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+		se_lun = deve->se_lun;
+		/*
+		 * We determine the correct LUN LIST LENGTH even once we
+		 * have reached the initial allocation length.
+		 * See SPC2-R20 7.19.
+		 */
+		lun_count++;
+		if ((cdb_offset + 8) >= se_cmd->data_length)
+			continue;
+
+		lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
+		buf[offset++] = ((lun >> 56) & 0xff);
+		buf[offset++] = ((lun >> 48) & 0xff);
+		buf[offset++] = ((lun >> 40) & 0xff);
+		buf[offset++] = ((lun >> 32) & 0xff);
+		buf[offset++] = ((lun >> 24) & 0xff);
+		buf[offset++] = ((lun >> 16) & 0xff);
+		buf[offset++] = ((lun >> 8) & 0xff);
+		buf[offset++] = (lun & 0xff);
+		cdb_offset += 8;
+	}
+	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+	/*
+	 * See SPC3 r07, page 159.
+	 */
+done:
+	lun_count *= 8;
+	buf[0] = ((lun_count >> 24) & 0xff);
+	buf[1] = ((lun_count >> 16) & 0xff);
+	buf[2] = ((lun_count >> 8) & 0xff);
+	buf[3] = (lun_count & 0xff);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	se_release_device_for_hba():
+ *
+ *
+ */
+void se_release_device_for_hba(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
+		se_dev_stop(dev);
+
+	if (dev->dev_ptr) {
+		kthread_stop(dev->process_thread);
+		if (dev->transport->free_device)
+			dev->transport->free_device(dev->dev_ptr);
+	}
+
+	spin_lock(&hba->device_lock);
+	list_del(&dev->dev_list);
+	hba->dev_count--;
+	spin_unlock(&hba->device_lock);
+
+	core_scsi3_free_all_registrations(dev);
+	se_release_vpd_for_dev(dev);
+
+	kfree(dev->dev_status_queue_obj);
+	kfree(dev->dev_queue_obj);
+	kfree(dev);
+
+	return;
+}
+
+void se_release_vpd_for_dev(struct se_device *dev)
+{
+	struct t10_vpd *vpd, *vpd_tmp;
+
+	spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+	list_for_each_entry_safe(vpd, vpd_tmp,
+			&DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
+		list_del(&vpd->vpd_list);
+		kfree(vpd);
+	}
+	spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+
+	return;
+}
+
+/*
+ * Called with struct se_hba->device_lock held.
+ */
+void se_clear_dev_ports(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+	struct se_lun *lun;
+	struct se_portal_group *tpg;
+	struct se_port *sep, *sep_tmp;
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+		spin_unlock(&dev->se_port_lock);
+		spin_unlock(&hba->device_lock);
+
+		lun = sep->sep_lun;
+		tpg = sep->sep_tpg;
+		spin_lock(&lun->lun_sep_lock);
+		if (lun->lun_se_dev == NULL) {
+			spin_unlock(&lun->lun_sep_lock);
+			continue;
+		}
+		spin_unlock(&lun->lun_sep_lock);
+
+		core_dev_del_lun(tpg, lun->unpacked_lun);
+
+		spin_lock(&hba->device_lock);
+		spin_lock(&dev->se_port_lock);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return;
+}
+
+/*	se_free_virtual_device():
+ *
+ *	Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
+ */
+int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
+{
+	spin_lock(&hba->device_lock);
+	se_clear_dev_ports(dev);
+	spin_unlock(&hba->device_lock);
+
+	core_alua_free_lu_gp_mem(dev);
+	se_release_device_for_hba(dev);
+
+	return 0;
+}
+
+static void se_dev_start(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	spin_lock(&hba->device_lock);
+	atomic_inc(&dev->dev_obj.obj_access_count);
+	if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
+		if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
+		} else if (dev->dev_status &
+			   TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
+			dev->dev_status &=
+				~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+		}
+	}
+	spin_unlock(&hba->device_lock);
+}
+
+static void se_dev_stop(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	spin_lock(&hba->device_lock);
+	atomic_dec(&dev->dev_obj.obj_access_count);
+	if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
+		if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
+		} else if (dev->dev_status &
+			   TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+		}
+	}
+	spin_unlock(&hba->device_lock);
+}
+
+int se_dev_check_online(struct se_device *dev)
+{
+	int ret;
+
+	spin_lock_irq(&dev->dev_status_lock);
+	ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+	       (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
+	spin_unlock_irq(&dev->dev_status_lock);
+
+	return ret;
+}
+
+int se_dev_check_shutdown(struct se_device *dev)
+{
+	int ret;
+
+	spin_lock_irq(&dev->dev_status_lock);
+	ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
+	spin_unlock_irq(&dev->dev_status_lock);
+
+	return ret;
+}
+
+void se_dev_set_default_attribs(
+	struct se_device *dev,
+	struct se_dev_limits *dev_limits)
+{
+	struct queue_limits *limits = &dev_limits->limits;
+
+	DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
+	DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
+	DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
+	DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+	DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+	DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
+	DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
+	DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
+	DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
+	DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
+	DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+	/*
+	 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
+	 * iblock_create_virtdevice() from struct queue_limits values
+	 * if blk_queue_discard()==1
+	 */
+	DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+	DEV_ATTRIB(dev)->max_unmap_block_desc_count =
+				DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+	DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+	DEV_ATTRIB(dev)->unmap_granularity_alignment =
+				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+	/*
+	 * block_size is based on subsystem plugin dependent requirements.
+	 */
+	DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
+	DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
+	/*
+	 * max_sectors is based on subsystem plugin dependent requirements.
+	 */
+	DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
+	DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
+	/*
+	 * Set optimal_sectors from max_sectors, which can be lowered via
+	 * configfs.
+	 */
+	DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
+	/*
+	 * queue_depth is based on subsystem plugin dependent requirements.
+	 */
+	DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
+	DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
+}
+
+int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
+{
+	if (task_timeout > DA_TASK_TIMEOUT_MAX) {
+		printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
+			" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
+		return -1;
+	} else {
+		DEV_ATTRIB(dev)->task_timeout = task_timeout;
+		printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
+			dev, task_timeout);
+	}
+
+	return 0;
+}
+
+int se_dev_set_max_unmap_lba_count(
+	struct se_device *dev,
+	u32 max_unmap_lba_count)
+{
+	DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
+	printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
+			dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
+	return 0;
+}
+
+int se_dev_set_max_unmap_block_desc_count(
+	struct se_device *dev,
+	u32 max_unmap_block_desc_count)
+{
+	DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
+	printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
+			dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
+	return 0;
+}
+
+int se_dev_set_unmap_granularity(
+	struct se_device *dev,
+	u32 unmap_granularity)
+{
+	DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
+	printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
+			dev, DEV_ATTRIB(dev)->unmap_granularity);
+	return 0;
+}
+
+int se_dev_set_unmap_granularity_alignment(
+	struct se_device *dev,
+	u32 unmap_granularity_alignment)
+{
+	DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
+	printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
+			dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
+	return 0;
+}
+
+int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->dpo_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_dpo = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
+			" bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
+	return 0;
+}
+
+int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_write_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_fua_write = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+			dev, DEV_ATTRIB(dev)->emulate_fua_write);
+	return 0;
+}
+
+int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_read_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_fua_read = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
+			dev, DEV_ATTRIB(dev)->emulate_fua_read);
+	return 0;
+}
+
+int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->write_cache_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_write_cache = flag;
+	printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+			dev, DEV_ATTRIB(dev)->emulate_write_cache);
+	return 0;
+}
+
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1) && (flag != 2)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+			" UA_INTRLCK_CTRL while dev_export_obj: %d count"
+			" exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
+	printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+		dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
+
+	return 0;
+}
+
+int se_dev_set_emulate_tas(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
+			" dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_tas = flag;
+	printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+		dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
+
+	return 0;
+}
+
+int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+		printk(KERN_ERR "Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	DEV_ATTRIB(dev)->emulate_tpu = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+				dev, flag);
+	return 0;
+}
+
+int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+		printk(KERN_ERR "Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	DEV_ATTRIB(dev)->emulate_tpws = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+				dev, flag);
+	return 0;
+}
+
+int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	DEV_ATTRIB(dev)->enforce_pr_isids = flag;
+	printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+		(DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
+	return 0;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
+{
+	u32 orig_queue_depth = dev->queue_depth;
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
+			" dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	if (!(queue_depth)) {
+		printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
+			"_depth\n", dev);
+		return -1;
+	}
+
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+			printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
+				" exceeds TCM/SE_Device TCQ: %u\n",
+				dev, queue_depth,
+				DEV_ATTRIB(dev)->hw_queue_depth);
+			return -1;
+		}
+	} else {
+		if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
+			if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+				printk(KERN_ERR "dev[%p]: Passed queue_depth:"
+					" %u exceeds TCM/SE_Device MAX"
+					" TCQ: %u\n", dev, queue_depth,
+					DEV_ATTRIB(dev)->hw_queue_depth);
+				return -1;
+			}
+		}
+	}
+
+	DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
+	if (queue_depth > orig_queue_depth)
+		atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
+	else if (queue_depth < orig_queue_depth)
+		atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
+
+	printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
+			dev, queue_depth);
+	return 0;
+}
+
+int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
+{
+	int force = 0; /* Force setting for VDEVS */
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+			" max_sectors while dev_export_obj: %d count exists\n",
+			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	if (!(max_sectors)) {
+		printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
+			" max_sectors\n", dev);
+		return -1;
+	}
+	if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
+		printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
+			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
+				DA_STATUS_MAX_SECTORS_MIN);
+		return -1;
+	}
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
+			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+				" greater than TCM/SE_Device max_sectors:"
+				" %u\n", dev, max_sectors,
+				DEV_ATTRIB(dev)->hw_max_sectors);
+			 return -1;
+		}
+	} else {
+		if (!(force) && (max_sectors >
+				 DEV_ATTRIB(dev)->hw_max_sectors)) {
+			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+				" greater than TCM/SE_Device max_sectors"
+				": %u, use force=1 to override.\n", dev,
+				max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
+			return -1;
+		}
+		if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+				" greater than DA_STATUS_MAX_SECTORS_MAX:"
+				" %u\n", dev, max_sectors,
+				DA_STATUS_MAX_SECTORS_MAX);
+			return -1;
+		}
+	}
+
+	DEV_ATTRIB(dev)->max_sectors = max_sectors;
+	printk("dev[%p]: SE Device max_sectors changed to %u\n",
+			dev, max_sectors);
+	return 0;
+}
+
+int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
+{
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+			" optimal_sectors while dev_export_obj: %d count exists\n",
+			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
+				" changed for TCM/pSCSI\n", dev);
+		return -EINVAL;
+	}
+	if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
+		printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
+			" greater than max_sectors: %u\n", dev,
+			optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
+		return -EINVAL;
+	}
+
+	DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
+	printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
+			dev, optimal_sectors);
+	return 0;
+}
+
+int se_dev_set_block_size(struct se_device *dev, u32 block_size)
+{
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
+			" while dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+
+	if ((block_size != 512) &&
+	    (block_size != 1024) &&
+	    (block_size != 2048) &&
+	    (block_size != 4096)) {
+		printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
+			" for SE device, must be 512, 1024, 2048 or 4096\n",
+			dev, block_size);
+		return -1;
+	}
+
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
+			" Physical Device, use for Linux/SCSI to change"
+			" block_size for underlying hardware\n", dev);
+		return -1;
+	}
+
+	DEV_ATTRIB(dev)->block_size = block_size;
+	printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
+			dev, block_size);
+	return 0;
+}
+
+struct se_lun *core_dev_add_lun(
+	struct se_portal_group *tpg,
+	struct se_hba *hba,
+	struct se_device *dev,
+	u32 lun)
+{
+	struct se_lun *lun_p;
+	u32 lun_access = 0;
+
+	if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
+		printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
+			atomic_read(&dev->dev_access_obj.obj_access_count));
+		return NULL;
+	}
+
+	lun_p = core_tpg_pre_addlun(tpg, lun);
+	if ((IS_ERR(lun_p)) || !(lun_p))
+		return NULL;
+
+	if (dev->dev_flags & DF_READ_ONLY)
+		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	else
+		lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+
+	if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
+		return NULL;
+
+	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+		" CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
+		TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
+	/*
+	 * Update LUN maps for dynamically added initiators when
+	 * generate_node_acl is enabled.
+	 */
+	if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
+		struct se_node_acl *acl;
+		spin_lock_bh(&tpg->acl_node_lock);
+		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+			if (acl->dynamic_node_acl) {
+				spin_unlock_bh(&tpg->acl_node_lock);
+				core_tpg_add_node_to_devs(acl, tpg);
+				spin_lock_bh(&tpg->acl_node_lock);
+			}
+		}
+		spin_unlock_bh(&tpg->acl_node_lock);
+	}
+
+	return lun_p;
+}
+
+/*      core_dev_del_lun():
+ *
+ *
+ */
+int core_dev_del_lun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun)
+{
+	struct se_lun *lun;
+	int ret = 0;
+
+	lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
+	if (!(lun))
+		return ret;
+
+	core_tpg_post_dellun(tpg, lun);
+
+	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+		" device object\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
+		TPG_TFO(tpg)->get_fabric_name());
+
+	return 0;
+}
+
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+			"_PER_TPG-1: %u for Target Portal Group: %hu\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+/*      core_dev_get_lun():
+ *
+ *
+ */
+static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+			"_TPG-1: %u for Target Portal Group: %hu\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	u32 mapped_lun,
+	char *initiatorname,
+	int *ret)
+{
+	struct se_lun_acl *lacl;
+	struct se_node_acl *nacl;
+
+	if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
+		printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
+			TPG_TFO(tpg)->get_fabric_name());
+		*ret = -EOVERFLOW;
+		return NULL;
+	}
+	nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (!(nacl)) {
+		*ret = -EINVAL;
+		return NULL;
+	}
+	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+	if (!(lacl)) {
+		printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
+		*ret = -ENOMEM;
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&lacl->lacl_list);
+	lacl->mapped_lun = mapped_lun;
+	lacl->se_lun_nacl = nacl;
+	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+
+	return lacl;
+}
+
+int core_dev_add_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl,
+	u32 unpacked_lun,
+	u32 lun_access)
+{
+	struct se_lun *lun;
+	struct se_node_acl *nacl;
+
+	lun = core_dev_get_lun(tpg, unpacked_lun);
+	if (!(lun)) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		return -EINVAL;
+	}
+
+	nacl = lacl->se_lun_nacl;
+	if (!(nacl))
+		return -EINVAL;
+
+	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
+	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
+		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+
+	lacl->se_lun = lun;
+
+	if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
+			lun_access, nacl, tpg, 1) < 0)
+		return -EINVAL;
+
+	spin_lock(&lun->lun_acl_lock);
+	list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
+	atomic_inc(&lun->lun_acl_count);
+	smp_mb__after_atomic_inc();
+	spin_unlock(&lun->lun_acl_lock);
+
+	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+		" InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
+		lacl->initiatorname);
+	/*
+	 * Check to see if there are any existing persistent reservation APTPL
+	 * pre-registrations that need to be enabled for this LUN ACL..
+	 */
+	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
+	return 0;
+}
+
+/*      core_dev_del_initiator_node_lun_acl():
+ *
+ *
+ */
+int core_dev_del_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	struct se_lun_acl *lacl)
+{
+	struct se_node_acl *nacl;
+
+	nacl = lacl->se_lun_nacl;
+	if (!(nacl))
+		return -EINVAL;
+
+	spin_lock(&lun->lun_acl_lock);
+	list_del(&lacl->lacl_list);
+	atomic_dec(&lun->lun_acl_count);
+	smp_mb__after_atomic_dec();
+	spin_unlock(&lun->lun_acl_lock);
+
+	core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
+		TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+	lacl->se_lun = NULL;
+
+	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+		" InitiatorNode: %s Mapped LUN: %u\n",
+		TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+		lacl->initiatorname, lacl->mapped_lun);
+
+	return 0;
+}
+
+void core_dev_free_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl)
+{
+	printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+		" Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg),
+		TPG_TFO(tpg)->get_fabric_name(),
+		lacl->initiatorname, lacl->mapped_lun);
+
+	kfree(lacl);
+}
+
+int core_dev_setup_virtual_lun0(void)
+{
+	struct se_hba *hba;
+	struct se_device *dev;
+	struct se_subsystem_dev *se_dev = NULL;
+	struct se_subsystem_api *t;
+	char buf[16];
+	int ret;
+
+	hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
+	if (IS_ERR(hba))
+		return PTR_ERR(hba);
+
+	se_global->g_lun0_hba = hba;
+	t = hba->transport;
+
+	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+	if (!(se_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct se_subsystem_dev\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+	spin_lock_init(&se_dev->t10_reservation.registration_lock);
+	spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+	spin_lock_init(&se_dev->se_dev_lock);
+	se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+	se_dev->t10_wwn.t10_sub_dev = se_dev;
+	se_dev->t10_alua.t10_sub_dev = se_dev;
+	se_dev->se_dev_attrib.da_sub_dev = se_dev;
+	se_dev->se_dev_hba = hba;
+
+	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
+	if (!(se_dev->se_dev_su_ptr)) {
+		printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+			" from allocate_virtdevice()\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	se_global->g_lun0_su_dev = se_dev;
+
+	memset(buf, 0, 16);
+	sprintf(buf, "rd_pages=8");
+	t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+
+	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+	if (!(dev) || IS_ERR(dev)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	se_dev->se_dev_ptr = dev;
+	se_global->g_lun0_dev = dev;
+
+	return 0;
+out:
+	se_global->g_lun0_su_dev = NULL;
+	kfree(se_dev);
+	if (se_global->g_lun0_hba) {
+		core_delete_hba(se_global->g_lun0_hba);
+		se_global->g_lun0_hba = NULL;
+	}
+	return ret;
+}
+
+
+void core_dev_release_virtual_lun0(void)
+{
+	struct se_hba *hba = se_global->g_lun0_hba;
+	struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
+
+	if (!(hba))
+		return;
+
+	if (se_global->g_lun0_dev)
+		se_free_virtual_device(se_global->g_lun0_dev, hba);
+
+	kfree(su_dev);
+	core_delete_hba(hba);
+}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
new file mode 100644
index 0000000..b65d1c8
--- /dev/null
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -0,0 +1,1034 @@
+/*******************************************************************************
+* Filename: target_core_fabric_configfs.c
+ *
+ * This file contains generic fabric module configfs infrastructure for
+ * TCM v4.x code
+ *
+ * Copyright (c) 2010 Rising Tide Systems
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
+*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{									\
+	struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl;	\
+	struct config_item_type *cit = &tfc->tfc_##_name##_cit;		\
+									\
+	cit->ct_item_ops = _item_ops;					\
+	cit->ct_group_ops = _group_ops;					\
+	cit->ct_attrs = _attrs;						\
+	cit->ct_owner = tf->tf_module;					\
+	printk("Setup generic %s\n", __stringify(_name));		\
+}
+
+/* Start of tfc_tpg_mappedlun_cit */
+
+static int target_fabric_mappedlun_link(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+			struct se_lun, lun_group);
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg;
+	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
+	int ret = 0, lun_access;
+	/*
+	 * Ensure that the source port exists
+	 */
+	if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
+		printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
+				"_tpg does not exist\n");
+		return -EINVAL;
+	}
+	se_tpg = lun->lun_sep->sep_tpg;
+
+	nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+	tpg_ci = &nacl_ci->ci_group->cg_item;
+	wwn_ci = &tpg_ci->ci_group->cg_item;
+	tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
+	wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
+	/*
+	 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
+	 */
+	if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
+		printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
+			config_item_name(wwn_ci));
+		return -EINVAL;
+	}
+	if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
+		printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
+			" TPGT: %s\n", config_item_name(wwn_ci),
+			config_item_name(tpg_ci));
+		return -EINVAL;
+	}
+	/*
+	 * If this struct se_node_acl was dynamically generated with
+	 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
+	 * which be will write protected (READ-ONLY) when
+	 * tpg_1/attrib/demo_mode_write_protect=1
+	 */
+	spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
+	deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
+		lun_access = deve->lun_flags;
+	else
+		lun_access =
+			(TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
+				se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
+					   TRANSPORT_LUNFLAGS_READ_WRITE;
+	spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
+	/*
+	 * Determine the actual mapped LUN value user wants..
+	 *
+	 * This value is what the SCSI Initiator actually sees the
+	 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+	 */
+	ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
+			lun->unpacked_lun, lun_access);
+
+	return (ret < 0) ? -EINVAL : 0;
+}
+
+static int target_fabric_mappedlun_unlink(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_lun *lun;
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
+	struct se_portal_group *se_tpg;
+	/*
+	 * Determine if the underlying MappedLUN has already been released..
+	 */
+	if (!(deve->se_lun))
+		return 0;
+
+	lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
+	se_tpg = lun->lun_sep->sep_tpg;
+
+	core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
+	return 0;
+}
+
+CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
+#define TCM_MAPPEDLUN_ATTR(_name, _mode)				\
+static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_fabric_mappedlun_show_##_name,				\
+	target_fabric_mappedlun_store_##_name);
+
+static ssize_t target_fabric_mappedlun_show_write_protect(
+	struct se_lun_acl *lacl,
+	char *page)
+{
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t len;
+
+	spin_lock_irq(&se_nacl->device_list_lock);
+	deve = &se_nacl->device_list[lacl->mapped_lun];
+	len = sprintf(page, "%d\n",
+			(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
+			1 : 0);
+	spin_unlock_irq(&se_nacl->device_list_lock);
+
+	return len;
+}
+
+static ssize_t target_fabric_mappedlun_store_write_protect(
+	struct se_lun_acl *lacl,
+	const char *page,
+	size_t count)
+{
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	unsigned long op;
+
+	if (strict_strtoul(page, 0, &op))
+		return -EINVAL;
+
+	if ((op != 1) && (op != 0))
+		return -EINVAL;
+
+	core_update_device_list_access(lacl->mapped_lun, (op) ?
+			TRANSPORT_LUNFLAGS_READ_ONLY :
+			TRANSPORT_LUNFLAGS_READ_WRITE,
+			lacl->se_lun_nacl);
+
+	printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
+		" Mapped LUN: %u Write Protect bit to %s\n",
+		TPG_TFO(se_tpg)->get_fabric_name(),
+		lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+
+	return count;
+
+}
+
+TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
+
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+				struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+	core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
+	&target_fabric_mappedlun_write_protect.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+	.release		= target_fabric_mappedlun_release,
+	.show_attribute		= target_fabric_mappedlun_attr_show,
+	.store_attribute	= target_fabric_mappedlun_attr_store,
+	.allow_link		= target_fabric_mappedlun_link,
+	.drop_link		= target_fabric_mappedlun_unlink,
+};
+
+TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
+		target_fabric_mappedlun_attrs);
+
+/* End of tfc_tpg_mappedlun_cit */
+
+/* Start of tfc_tpg_nacl_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
+
+static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
+	.show_attribute		= target_fabric_nacl_attrib_attr_show,
+	.store_attribute	= target_fabric_nacl_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_attrib_cit */
+
+/* Start of tfc_tpg_nacl_auth_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
+
+static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
+	.show_attribute		= target_fabric_nacl_auth_attr_show,
+	.store_attribute	= target_fabric_nacl_auth_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_auth_cit */
+
+/* Start of tfc_tpg_nacl_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
+
+static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
+	.show_attribute		= target_fabric_nacl_param_attr_show,
+	.store_attribute	= target_fabric_nacl_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_param_cit */
+
+/* Start of tfc_tpg_nacl_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
+
+static struct config_group *target_fabric_make_mappedlun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl = container_of(group,
+			struct se_node_acl, acl_group);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_lun_acl *lacl;
+	struct config_item *acl_ci;
+	char *buf;
+	unsigned long mapped_lun;
+	int ret = 0;
+
+	acl_ci = &group->cg_item;
+	if (!(acl_ci)) {
+		printk(KERN_ERR "Unable to locatel acl_ci\n");
+		return NULL;
+	}
+
+	buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate memory for name buf\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	snprintf(buf, strlen(name) + 1, "%s", name);
+	/*
+	 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
+	 */
+	if (strstr(buf, "lun_") != buf) {
+		printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
+			" name: %s\n", buf, name);
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * Determine the Mapped LUN value.  This is what the SCSI Initiator
+	 * Port will actually see.
+	 */
+	if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
+			config_item_name(acl_ci), &ret);
+	if (!(lacl))
+		goto out;
+
+	config_group_init_type_name(&lacl->se_lun_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+
+	kfree(buf);
+	return &lacl->se_lun_group;
+out:
+	kfree(buf);
+	return ERR_PTR(ret);
+}
+
+static void target_fabric_drop_mappedlun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	config_item_put(item);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_nodeacl(se_nacl);
+}
+
+static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+	.release		= target_fabric_nacl_base_release,
+	.show_attribute		= target_fabric_nacl_base_attr_show,
+	.store_attribute	= target_fabric_nacl_base_attr_store,
+};
+
+static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
+	.make_group		= target_fabric_make_mappedlun,
+	.drop_item		= target_fabric_drop_mappedlun,
+};
+
+TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+		&target_fabric_nacl_base_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_base_cit */
+
+/* Start of tfc_tpg_nacl_cit */
+
+static struct config_group *target_fabric_make_nodeacl(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_acl_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_node_acl *se_nacl;
+	struct config_group *nacl_cg;
+
+	if (!(tf->tf_ops.fabric_make_nodeacl)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
+	if (IS_ERR(se_nacl))
+		return ERR_PTR(PTR_ERR(se_nacl));
+
+	nacl_cg = &se_nacl->acl_group;
+	nacl_cg->default_groups = se_nacl->acl_default_groups;
+	nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
+	nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
+	nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
+	nacl_cg->default_groups[3] = NULL;
+
+	config_group_init_type_name(&se_nacl->acl_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+	config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+	config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+	config_group_init_type_name(&se_nacl->acl_param_group, "param",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+
+	return &se_nacl->acl_group;
+}
+
+static void target_fabric_drop_nodeacl(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct config_item *df_item;
+	struct config_group *nacl_cg;
+	int i;
+
+	nacl_cg = &se_nacl->acl_group;
+	for (i = 0; nacl_cg->default_groups[i]; i++) {
+		df_item = &nacl_cg->default_groups[i]->cg_item;
+		nacl_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	/*
+	 * struct se_node_acl free is done in target_fabric_nacl_base_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_nacl_group_ops = {
+	.make_group	= target_fabric_make_nodeacl,
+	.drop_item	= target_fabric_drop_nodeacl,
+};
+
+TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_cit */
+
+/* Start of tfc_tpg_np_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
+
+static void target_fabric_np_base_release(struct config_item *item)
+{
+	struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+				struct se_tpg_np, tpg_np_group);
+	struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
+static struct configfs_item_operations target_fabric_np_base_item_ops = {
+	.release		= target_fabric_np_base_release,
+	.show_attribute		= target_fabric_np_base_attr_show,
+	.store_attribute	= target_fabric_np_base_attr_store,
+};
+
+TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_np_base_cit */
+
+/* Start of tfc_tpg_np_cit */
+
+static struct config_group *target_fabric_make_np(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+				struct se_portal_group, tpg_np_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_tpg_np *se_tpg_np;
+
+	if (!(tf->tf_ops.fabric_make_np)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
+	if (!(se_tpg_np) || IS_ERR(se_tpg_np))
+		return ERR_PTR(-EINVAL);
+
+	se_tpg_np->tpg_np_parent = se_tpg;
+	config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+
+	return &se_tpg_np->tpg_np_group;
+}
+
+static void target_fabric_drop_np(
+	struct config_group *group,
+	struct config_item *item)
+{
+	/*
+	 * struct se_tpg_np is released via target_fabric_np_base_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_np_group_ops = {
+	.make_group	= &target_fabric_make_np,
+	.drop_item	= &target_fabric_drop_np,
+};
+
+TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
+
+/* End of tfc_tpg_np_cit */
+
+/* Start of tfc_tpg_port_cit */
+
+CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
+#define TCM_PORT_ATTR(_name, _mode)					\
+static struct target_fabric_port_attribute target_fabric_port_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_fabric_port_show_attr_##_name,				\
+	target_fabric_port_store_attr_##_name);
+
+#define TCM_PORT_ATTOR_RO(_name)					\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_fabric_port_show_attr_##_name);
+
+/*
+ * alua_tg_pt_gp
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_offline
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_offline_bit(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_offline_bit(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_status
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_secondary_status(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_secondary_status(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_write_md
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_secondary_write_metadata(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_secondary_write_metadata(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
+
+
+static struct configfs_attribute *target_fabric_port_attrs[] = {
+	&target_fabric_port_alua_tg_pt_gp.attr,
+	&target_fabric_port_alua_tg_pt_offline.attr,
+	&target_fabric_port_alua_tg_pt_status.attr,
+	&target_fabric_port_alua_tg_pt_write_md.attr,
+	NULL,
+};
+
+CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
+
+static int target_fabric_port_link(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct config_item *tpg_ci;
+	struct se_device *dev;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_lun *lun_p;
+	struct se_portal_group *se_tpg;
+	struct se_subsystem_dev *se_dev = container_of(
+				to_config_group(se_dev_ci), struct se_subsystem_dev,
+				se_dev_group);
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
+	se_tpg = container_of(to_config_group(tpg_ci),
+				struct se_portal_group, tpg_group);
+	tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (lun->lun_se_dev !=  NULL) {
+		printk(KERN_ERR "Port Symlink already exists\n");
+		return -EEXIST;
+	}
+
+	dev = se_dev->se_dev_ptr;
+	if (!(dev)) {
+		printk(KERN_ERR "Unable to locate struct se_device pointer from"
+			" %s\n", config_item_name(se_dev_ci));
+		ret = -ENODEV;
+		goto out;
+	}
+
+	lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
+				lun->unpacked_lun);
+	if ((IS_ERR(lun_p)) || !(lun_p)) {
+		printk(KERN_ERR "core_dev_add_lun() failed\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (tf->tf_ops.fabric_post_link) {
+		/*
+		 * Call the optional fabric_post_link() to allow a
+		 * fabric module to setup any additional state once
+		 * core_dev_add_lun() has been called..
+		 */
+		tf->tf_ops.fabric_post_link(se_tpg, lun);
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static int target_fabric_port_unlink(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (tf->tf_ops.fabric_pre_unlink) {
+		/*
+		 * Call the optional fabric_pre_unlink() to allow a
+		 * fabric module to release any additional stat before
+		 * core_dev_del_lun() is called.
+		*/
+		tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
+	}
+
+	core_dev_del_lun(se_tpg, lun->unpacked_lun);
+	return 0;
+}
+
+static struct configfs_item_operations target_fabric_port_item_ops = {
+	.show_attribute		= target_fabric_port_attr_show,
+	.store_attribute	= target_fabric_port_attr_store,
+	.allow_link		= target_fabric_port_link,
+	.drop_link		= target_fabric_port_unlink,
+};
+
+TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
+
+/* End of tfc_tpg_port_cit */
+
+/* Start of tfc_tpg_lun_cit */
+
+static struct config_group *target_fabric_make_lun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_lun *lun;
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_lun_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	unsigned long unpacked_lun;
+
+	if (strstr(name, "lun_") != name) {
+		printk(KERN_ERR "Unable to locate \'_\" in"
+				" \"lun_$LUN_NUMBER\"\n");
+		return ERR_PTR(-EINVAL);
+	}
+	if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
+	if (!(lun))
+		return ERR_PTR(-EINVAL);
+
+	config_group_init_type_name(&lun->lun_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+
+	return &lun->lun_group;
+}
+
+static void target_fabric_drop_lun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_lun_group_ops = {
+	.make_group	= &target_fabric_make_lun,
+	.drop_item	= &target_fabric_drop_lun,
+};
+
+TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
+
+/* End of tfc_tpg_lun_cit */
+
+/* Start of tfc_tpg_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
+
+static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
+	.show_attribute		= target_fabric_tpg_attrib_attr_show,
+	.store_attribute	= target_fabric_tpg_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_attrib_cit */
+
+/* Start of tfc_tpg_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
+
+static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
+	.show_attribute		= target_fabric_tpg_param_attr_show,
+	.store_attribute	= target_fabric_tpg_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_param_cit */
+
+/* Start of tfc_tpg_base_cit */
+/*
+ * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
+
+static void target_fabric_tpg_release(struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+			struct se_portal_group, tpg_group);
+	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
+static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+	.release		= target_fabric_tpg_release,
+	.show_attribute		= target_fabric_tpg_attr_show,
+	.store_attribute	= target_fabric_tpg_attr_store,
+};
+
+TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_base_cit */
+
+/* Start of tfc_tpg_cit */
+
+static struct config_group *target_fabric_make_tpg(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+	struct se_portal_group *se_tpg;
+
+	if (!(tf->tf_ops.fabric_make_tpg)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
+	if (!(se_tpg) || IS_ERR(se_tpg))
+		return ERR_PTR(-EINVAL);
+	/*
+	 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
+	 */
+	se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
+	se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
+	se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
+	se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
+	se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
+	se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
+	se_tpg->tpg_group.default_groups[5] = NULL;
+
+	config_group_init_type_name(&se_tpg->tpg_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+	config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
+			&TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+	config_group_init_type_name(&se_tpg->tpg_np_group, "np",
+			&TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+	config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+	config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
+			&TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+	config_group_init_type_name(&se_tpg->tpg_param_group, "param",
+			&TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+
+	return &se_tpg->tpg_group;
+}
+
+static void target_fabric_drop_tpg(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+				struct se_portal_group, tpg_group);
+	struct config_group *tpg_cg = &se_tpg->tpg_group;
+	struct config_item *df_item;
+	int i;
+	/*
+	 * Release default groups, but do not release tpg_cg->default_groups
+	 * memory as it is statically allocated at se_tpg->tpg_default_groups.
+	 */
+	for (i = 0; tpg_cg->default_groups[i]; i++) {
+		df_item = &tpg_cg->default_groups[i]->cg_item;
+		tpg_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+}
+
+static void target_fabric_release_wwn(struct config_item *item)
+{
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+	.release	= target_fabric_release_wwn,
+};
+
+static struct configfs_group_operations target_fabric_tpg_group_ops = {
+	.make_group	= target_fabric_make_tpg,
+	.drop_item	= target_fabric_drop_tpg,
+};
+
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+		NULL);
+
+/* End of tfc_tpg_cit */
+
+/* Start of tfc_wwn_cit */
+
+static struct config_group *target_fabric_make_wwn(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf = container_of(group,
+				struct target_fabric_configfs, tf_group);
+	struct se_wwn *wwn;
+
+	if (!(tf->tf_ops.fabric_make_wwn)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
+	if (!(wwn) || IS_ERR(wwn))
+		return ERR_PTR(-EINVAL);
+
+	wwn->wwn_tf = tf;
+	config_group_init_type_name(&wwn->wwn_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_cit);
+
+	return &wwn->wwn_group;
+}
+
+static void target_fabric_drop_wwn(
+	struct config_group *group,
+	struct config_item *item)
+{
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_wwn_group_ops = {
+	.make_group	= target_fabric_make_wwn,
+	.drop_item	= target_fabric_drop_wwn,
+};
+/*
+ * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
+
+static struct configfs_item_operations target_fabric_wwn_item_ops = {
+	.show_attribute		= target_fabric_wwn_attr_show,
+	.store_attribute	= target_fabric_wwn_attr_store,
+};
+
+TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
+
+/* End of tfc_wwn_cit */
+
+/* Start of tfc_discovery_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
+		tf_disc_group);
+
+static struct configfs_item_operations target_fabric_discovery_item_ops = {
+	.show_attribute		= target_fabric_discovery_attr_show,
+	.store_attribute	= target_fabric_discovery_attr_store,
+};
+
+TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
+
+/* End of tfc_discovery_cit */
+
+int target_fabric_setup_cits(struct target_fabric_configfs *tf)
+{
+	target_fabric_setup_discovery_cit(tf);
+	target_fabric_setup_wwn_cit(tf);
+	target_fabric_setup_tpg_cit(tf);
+	target_fabric_setup_tpg_base_cit(tf);
+	target_fabric_setup_tpg_port_cit(tf);
+	target_fabric_setup_tpg_lun_cit(tf);
+	target_fabric_setup_tpg_np_cit(tf);
+	target_fabric_setup_tpg_np_base_cit(tf);
+	target_fabric_setup_tpg_attrib_cit(tf);
+	target_fabric_setup_tpg_param_cit(tf);
+	target_fabric_setup_tpg_nacl_cit(tf);
+	target_fabric_setup_tpg_nacl_base_cit(tf);
+	target_fabric_setup_tpg_nacl_attrib_cit(tf);
+	target_fabric_setup_tpg_nacl_auth_cit(tf);
+	target_fabric_setup_tpg_nacl_param_cit(tf);
+	target_fabric_setup_tpg_mappedlun_cit(tf);
+
+	return 0;
+}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
new file mode 100644
index 0000000..2628564
--- /dev/null
+++ b/drivers/target/target_core_fabric_lib.c
@@ -0,0 +1,451 @@
+/*******************************************************************************
+ * Filename:  target_core_fabric_lib.c
+ *
+ * This file contains generic high level protocol identifier and PR
+ * handlers for TCM fabric modules
+ *
+ * Copyright (c) 2010 Rising Tide Systems, Inc.
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+/*
+ * Handlers for Serial Attached SCSI (SAS)
+ */
+u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	/*
+	 * Return a SAS Serial SCSI Protocol identifier for loopback operations
+	 * This is defined in  section 7.5.1 Table 362 in spc4r17
+	 */
+	return 0x6;
+}
+EXPORT_SYMBOL(sas_get_fabric_proto_ident);
+
+u32 sas_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	unsigned char binary, *ptr;
+	int i;
+	u32 off = 4;
+	/*
+	 * Set PROTOCOL IDENTIFIER to 6h for SAS
+	 */
+	buf[0] = 0x06;
+	/*
+	 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+	 * over SAS Serial SCSI Protocol
+	 */
+	ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
+
+	for (i = 0; i < 16; i += 2) {
+		binary = transport_asciihex_to_binaryhex(&ptr[i]);
+		buf[off++] = binary;
+	}
+	/*
+	 * The SAS Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id);
+
+u32 sas_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	*format_code = 0;
+	/*
+	 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+	 * over SAS Serial SCSI Protocol
+	 *
+	 * The SAS Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id_len);
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+char *sas_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	/*
+	 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+	 * for initiator ports using SCSI over SAS Serial SCSI Protocol
+	 *
+	 * The TransportID for a SAS Initiator Port is of fixed size of
+	 * 24 bytes, and SAS does not contain a I_T nexus identifier,
+	 * so we return the **port_nexus_ptr set to NULL.
+	 */
+	*port_nexus_ptr = NULL;
+	*out_tid_len = 24;
+
+	return (char *)&buf[4];
+}
+EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Fibre Channel Protocol (FCP)
+ */
+u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	return 0x0;	/* 0 = fcp-2 per SPC4 section 7.5.1 */
+}
+EXPORT_SYMBOL(fc_get_fabric_proto_ident);
+
+u32 fc_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	*format_code = 0;
+	/*
+	 * The FC Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id_len);
+
+u32 fc_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	unsigned char binary, *ptr;
+	int i;
+	u32 off = 8;
+	/*
+	 * PROTOCOL IDENTIFIER is 0h for FCP-2
+	 *
+	 * From spc4r17, 7.5.4.2 TransportID for initiator ports using
+	 * SCSI over Fibre Channel
+	 *
+	 * We convert the ASCII formatted N Port name into a binary
+	 * encoded TransportID.
+	 */
+	ptr = &se_nacl->initiatorname[0];
+
+	for (i = 0; i < 24; ) {
+		if (!(strncmp(&ptr[i], ":", 1))) {
+			i++;
+			continue;
+		}
+		binary = transport_asciihex_to_binaryhex(&ptr[i]);
+		buf[off++] = binary;
+		i += 2;
+	}
+	/*
+	 * The FC Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id);
+
+char *fc_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	/*
+	 * The TransportID for a FC N Port is of fixed size of
+	 * 24 bytes, and FC does not contain a I_T nexus identifier,
+	 * so we return the **port_nexus_ptr set to NULL.
+	 */
+	*port_nexus_ptr = NULL;
+	*out_tid_len = 24;
+
+	 return (char *)&buf[8];
+}
+EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Internet Small Computer Systems Interface (iSCSI)
+ */
+
+u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	/*
+	 * This value is defined for "Internet SCSI (iSCSI)"
+	 * in spc4r17 section 7.5.1 Table 362
+	 */
+	return 0x5;
+}
+EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
+
+u32 iscsi_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	u32 off = 4, padding = 0;
+	u16 len = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * Set PROTOCOL IDENTIFIER to 5h for iSCSI
+	*/
+	buf[0] = 0x05;
+	/*
+	 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+	 * ports using SCSI over iSCSI.
+	 *
+	 * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
+	 * shall contain the iSCSI name of an iSCSI initiator node (see
+	 * RFC 3720). The first ISCSI NAME field byte containing an ASCII
+	 * null character terminates the ISCSI NAME field without regard for
+	 * the specified length of the iSCSI TransportID or the contents of
+	 * the ADDITIONAL LENGTH field.
+	 */
+	len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
+	/*
+	 * Add Extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration and *format code == 1
+	 * 1, use iSCSI Initiator port TransportID format.
+	 *
+	 * Otherwise use iSCSI Initiator device TransportID format that
+	 * does not contain the ASCII encoded iSCSI Initiator iSID value
+	 * provied by the iSCSi Initiator during the iSCSI login process.
+	 */
+	if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
+		/*
+		 * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
+		 * format.
+		 */
+		buf[0] |= 0x40;
+		/*
+		 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+		 * ports using SCSI over iSCSI.  Table 390
+		 *
+		 * The SEPARATOR field shall contain the five ASCII
+		 * characters ",i,0x".
+		 *
+		 * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
+		 * field shall contain the iSCSI initiator session identifier
+		 * (see RFC 3720) in the form of ASCII characters that are the
+		 * hexadecimal digits converted from the binary iSCSI initiator
+		 * session identifier value. The first ISCSI INITIATOR SESSION
+		 * ID field byte containing an ASCII null character
+		 */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
+		buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
+		len += 5;
+		buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
+		buf[off+len] = '\0'; off++;
+		len += 7;
+	}
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	*/
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff);
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id);
+
+u32 iscsi_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	u32 len = 0, padding = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	len = strlen(se_nacl->initiatorname);
+	/*
+	 * Add extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration, use format code:
+	 * 01b: iSCSI Initiator port TransportID format
+	 *
+	 * If there is not an active iSCSI session, use format code:
+	 * 00b: iSCSI Initiator device TransportID format
+	 */
+	if (pr_reg->isid_present_at_reg) {
+		len += 5; /* For ",i,0x" ASCII seperator */
+		len += 7; /* For iSCSI Initiator Session ID + Null terminator */
+		*format_code = 1;
+	} else
+		*format_code = 0;
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	 */
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
+
+char *iscsi_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	char *p;
+	u32 tid_len, padding;
+	int i;
+	u16 add_len;
+	u8 format_code = (buf[0] & 0xc0);
+	/*
+	 * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
+	 *
+	 *       TransportID for initiator ports using SCSI over iSCSI,
+	 *       from Table 388 -- iSCSI TransportID formats.
+	 *
+	 *    00b     Initiator port is identified using the world wide unique
+	 *            SCSI device name of the iSCSI initiator
+	 *            device containing the initiator port (see table 389).
+	 *    01b     Initiator port is identified using the world wide unique
+	 *            initiator port identifier (see table 390).10b to 11b
+	 *            Reserved
+	 */
+	if ((format_code != 0x00) && (format_code != 0x40)) {
+		printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
+			" Initiator Transport ID\n", format_code);
+		return NULL;
+	}
+	/*
+	 * If the caller wants the TransportID Length, we set that value for the
+	 * entire iSCSI Tarnsport ID now.
+	 */
+	 if (out_tid_len != NULL) {
+		add_len = ((buf[2] >> 8) & 0xff);
+		add_len |= (buf[3] & 0xff);
+
+		tid_len = strlen((char *)&buf[4]);
+		tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
+		tid_len += 1; /* Add one byte for NULL terminator */
+		padding = ((-tid_len) & 3);
+		if (padding != 0)
+			tid_len += padding;
+
+		if ((add_len + 4) != tid_len) {
+			printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
+				"does not match calculated tid_len: %u,"
+				" using tid_len instead\n", add_len+4, tid_len);
+			*out_tid_len = tid_len;
+		} else
+			*out_tid_len = (add_len + 4);
+	}
+	/*
+	 * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
+	 * Session ID as defined in Table 390 - iSCSI initiator port TransportID
+	 * format.
+	 */
+	if (format_code == 0x40) {
+		p = strstr((char *)&buf[4], ",i,0x");
+		if (!(p)) {
+			printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
+				" for Initiator port identifier: %s\n",
+				(char *)&buf[4]);
+			return NULL;
+		}
+		*p = '\0'; /* Terminate iSCSI Name */
+		p += 5; /* Skip over ",i,0x" seperator */
+
+		*port_nexus_ptr = p;
+		/*
+		 * Go ahead and do the lower case conversion of the received
+		 * 12 ASCII characters representing the ISID in the TransportID
+		 * for comparision against the running iSCSI session's ISID from
+		 * iscsi_target.c:lio_sess_get_initiator_sid()
+		 */
+		for (i = 0; i < 12; i++) {
+			if (isdigit(*p)) {
+				p++;
+				continue;
+			}
+			*p = tolower(*p);
+			p++;
+		}
+	}
+
+	return (char *)&buf[4];
+}
+EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
new file mode 100644
index 0000000..0aaca88
--- /dev/null
+++ b/drivers/target/target_core_file.c
@@ -0,0 +1,688 @@
+/*******************************************************************************
+ * Filename:  target_core_file.c
+ *
+ * This file contains the Storage Engine <-> FILEIO transport specific functions
+ *
+ * Copyright (c) 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_file.h"
+
+#if 1
+#define DEBUG_FD_CACHE(x...) printk(x)
+#else
+#define DEBUG_FD_CACHE(x...)
+#endif
+
+#if 1
+#define DEBUG_FD_FUA(x...) printk(x)
+#else
+#define DEBUG_FD_FUA(x...)
+#endif
+
+static struct se_subsystem_api fileio_template;
+
+/*	fd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int fd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct fd_host *fd_host;
+
+	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
+	if (!(fd_host)) {
+		printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
+		return -1;
+	}
+
+	fd_host->fd_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) fd_host;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
+		TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+		" Target Core with TCQ Depth: %d MaxSectors: %u\n",
+		hba->hba_id, fd_host->fd_host_id,
+		atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
+
+	return 0;
+}
+
+static void fd_detach_hba(struct se_hba *hba)
+{
+	struct fd_host *fd_host = hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
+
+	kfree(fd_host);
+	hba->hba_ptr = NULL;
+}
+
+static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct fd_dev *fd_dev;
+	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+
+	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
+	if (!(fd_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
+		return NULL;
+	}
+
+	fd_dev->fd_host = fd_host;
+
+	printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
+
+	return fd_dev;
+}
+
+/*	fd_create_virtdevice(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static struct se_device *fd_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	char *dev_p = NULL;
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct queue_limits *limits;
+	struct fd_dev *fd_dev = (struct fd_dev *) p;
+	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+	mm_segment_t old_fs;
+	struct file *file;
+	struct inode *inode = NULL;
+	int dev_flags = 0, flags;
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	dev_p = getname(fd_dev->fd_dev_name);
+	set_fs(old_fs);
+
+	if (IS_ERR(dev_p)) {
+		printk(KERN_ERR "getname(%s) failed: %lu\n",
+			fd_dev->fd_dev_name, IS_ERR(dev_p));
+		goto fail;
+	}
+#if 0
+	if (di->no_create_file)
+		flags = O_RDWR | O_LARGEFILE;
+	else
+		flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#else
+	flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#endif
+/*	flags |= O_DIRECT; */
+	/*
+	 * If fd_buffered_io=1 has not been set explictly (the default),
+	 * use O_SYNC to force FILEIO writes to disk.
+	 */
+	if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
+		flags |= O_SYNC;
+
+	file = filp_open(dev_p, flags, 0600);
+
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+		goto fail;
+	}
+	fd_dev->fd_file = file;
+	/*
+	 * If using a block backend with this struct file, we extract
+	 * fd_dev->fd_[block,dev]_size from struct block_device.
+	 *
+	 * Otherwise, we use the passed fd_size= from configfs
+	 */
+	inode = file->f_mapping->host;
+	if (S_ISBLK(inode->i_mode)) {
+		struct request_queue *q;
+		/*
+		 * Setup the local scope queue_limits from struct request_queue->limits
+		 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+		 */
+		q = bdev_get_queue(inode->i_bdev);
+		limits = &dev_limits.limits;
+		limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
+		limits->max_hw_sectors = queue_max_hw_sectors(q);
+		limits->max_sectors = queue_max_sectors(q);
+		/*
+		 * Determine the number of bytes from i_size_read() minus
+		 * one (1) logical sector from underlying struct block_device
+		 */
+		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+		fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
+				       fd_dev->fd_block_size);
+
+		printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
+			" block_device blocks: %llu logical_block_size: %d\n",
+			fd_dev->fd_dev_size,
+			div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
+			fd_dev->fd_block_size);
+	} else {
+		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
+			printk(KERN_ERR "FILEIO: Missing fd_dev_size="
+				" parameter, and no backing struct"
+				" block_device\n");
+			goto fail;
+		}
+
+		limits = &dev_limits.limits;
+		limits->logical_block_size = FD_BLOCKSIZE;
+		limits->max_hw_sectors = FD_MAX_SECTORS;
+		limits->max_sectors = FD_MAX_SECTORS;
+		fd_dev->fd_block_size = FD_BLOCKSIZE;
+	}
+
+	dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+
+	dev = transport_add_device_to_core_hba(hba, &fileio_template,
+				se_dev, dev_flags, (void *)fd_dev,
+				&dev_limits, "FILEIO", FD_VERSION);
+	if (!(dev))
+		goto fail;
+
+	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+	fd_dev->fd_queue_depth = dev->queue_depth;
+
+	printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
+			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
+
+	putname(dev_p);
+	return dev;
+fail:
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+	putname(dev_p);
+	return NULL;
+}
+
+/*	fd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_device(void *p)
+{
+	struct fd_dev *fd_dev = (struct fd_dev *) p;
+
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+
+	kfree(fd_dev);
+}
+
+static inline struct fd_request *FILE_REQ(struct se_task *task)
+{
+	return container_of(task, struct fd_request, fd_task);
+}
+
+
+static struct se_task *
+fd_alloc_task(struct se_cmd *cmd)
+{
+	struct fd_request *fd_req;
+
+	fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
+	if (!(fd_req)) {
+		printk(KERN_ERR "Unable to allocate struct fd_request\n");
+		return NULL;
+	}
+
+	fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
+
+	return &fd_req->fd_task;
+}
+
+static int fd_do_readv(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+	struct file *fd = req->fd_dev->fd_file;
+	struct scatterlist *sg = task->task_sg;
+	struct iovec *iov;
+	mm_segment_t old_fs;
+	loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+	int ret = 0, i;
+
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+	if (!(iov)) {
+		printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
+		return -1;
+	}
+
+	for (i = 0; i < task->task_sg_num; i++) {
+		iov[i].iov_len = sg[i].length;
+		iov[i].iov_base = sg_virt(&sg[i]);
+	}
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
+	set_fs(old_fs);
+
+	kfree(iov);
+	/*
+	 * Return zeros and GOOD status even if the READ did not return
+	 * the expected virt_size for struct file w/o a backing struct
+	 * block_device.
+	 */
+	if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+		if (ret < 0 || ret != task->task_size) {
+			printk(KERN_ERR "vfs_readv() returned %d,"
+				" expecting %d for S_ISBLK\n", ret,
+				(int)task->task_size);
+			return -1;
+		}
+	} else {
+		if (ret < 0) {
+			printk(KERN_ERR "vfs_readv() returned %d for non"
+				" S_ISBLK\n", ret);
+			return -1;
+		}
+	}
+
+	return 1;
+}
+
+static int fd_do_writev(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+	struct file *fd = req->fd_dev->fd_file;
+	struct scatterlist *sg = task->task_sg;
+	struct iovec *iov;
+	mm_segment_t old_fs;
+	loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+	int ret, i = 0;
+
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+	if (!(iov)) {
+		printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
+		return -1;
+	}
+
+	for (i = 0; i < task->task_sg_num; i++) {
+		iov[i].iov_len = sg[i].length;
+		iov[i].iov_base = sg_virt(&sg[i]);
+	}
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
+	set_fs(old_fs);
+
+	kfree(iov);
+
+	if (ret < 0 || ret != task->task_size) {
+		printk(KERN_ERR "vfs_writev() returned %d\n", ret);
+		return -1;
+	}
+
+	return 1;
+}
+
+static void fd_emulate_sync_cache(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = cmd->se_dev;
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+	loff_t start, end;
+	int ret;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op
+	 */
+	if (immed)
+		transport_complete_sync_cache(cmd, 1);
+
+	/*
+	 * Determine if we will be flushing the entire device.
+	 */
+	if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
+		start = 0;
+		end = LLONG_MAX;
+	} else {
+		start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
+		if (cmd->data_length)
+			end = start + cmd->data_length;
+		else
+			end = LLONG_MAX;
+	}
+
+	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+	if (ret != 0)
+		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+
+	if (!immed)
+		transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int fd_emulated_write_cache(struct se_device *dev)
+{
+	return 1;
+}
+
+static int fd_emulated_dpo(struct se_device *dev)
+{
+	return 0;
+}
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int fd_emulated_fua_write(struct se_device *dev)
+{
+	return 1;
+}
+
+static int fd_emulated_fua_read(struct se_device *dev)
+{
+	return 0;
+}
+
+/*
+ * WRITE Force Unit Access (FUA) emulation on a per struct se_task
+ * LBA range basis..
+ */
+static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
+	loff_t end = start + task->task_size;
+	int ret;
+
+	DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+			task->task_lba, task->task_size);
+
+	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+	if (ret != 0)
+		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+}
+
+static int fd_do_task(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	int ret = 0;
+
+	/*
+	 * Call vectorized fileio functions to map struct scatterlist
+	 * physical memory addresses to struct iovec virtual memory.
+	 */
+	if (task->task_data_direction == DMA_FROM_DEVICE) {
+		ret = fd_do_readv(task);
+	} else {
+		ret = fd_do_writev(task);
+
+		if (ret > 0 &&
+		    DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
+		    DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+		    T_TASK(cmd)->t_tasks_fua) {
+			/*
+			 * We might need to be a bit smarter here
+			 * and return some sense data to let the initiator
+			 * know the FUA WRITE cache sync failed..?
+			 */
+			fd_emulate_write_fua(cmd, task);
+		}
+
+	}
+
+	if (ret < 0)
+		return ret;
+	if (ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	fd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_task(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+
+	kfree(req);
+}
+
+enum {
+	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_fd_dev_name, "fd_dev_name=%s"},
+	{Opt_fd_dev_size, "fd_dev_size=%s"},
+	{Opt_fd_buffered_io, "fd_buffered_id=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t fd_set_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page, ssize_t count)
+{
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_fd_dev_name:
+			snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
+					"%s", match_strdup(&args[0]));
+			printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
+					fd_dev->fd_dev_name);
+			fd_dev->fbd_flags |= FBDF_HAS_PATH;
+			break;
+		case Opt_fd_dev_size:
+			arg_p = match_strdup(&args[0]);
+			ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+			if (ret < 0) {
+				printk(KERN_ERR "strict_strtoull() failed for"
+						" fd_dev_size=\n");
+				goto out;
+			}
+			printk(KERN_INFO "FILEIO: Referencing Size: %llu"
+					" bytes\n", fd_dev->fd_dev_size);
+			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+			break;
+		case Opt_fd_buffered_io:
+			match_int(args, &arg);
+			if (arg != 1) {
+				printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
+				ret = -EINVAL;
+				goto out;
+			}
+
+			printk(KERN_INFO "FILEIO: Using buffered I/O"
+				" operations for struct fd_dev\n");
+
+			fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+	struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
+
+	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+		printk(KERN_ERR "Missing fd_dev_name=\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t fd_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+	ssize_t bl = 0;
+
+	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
+		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
+		(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
+		"Buffered" : "Synchronous");
+	return bl;
+}
+
+/*	fd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *fd_get_cdb(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+
+	return req->fd_scsi_cdb;
+}
+
+/*	fd_get_device_rev(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+/*	fd_get_device_type(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t fd_get_blocks(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
+			DEV_ATTRIB(dev)->block_size);
+
+	return blocks_long;
+}
+
+static struct se_subsystem_api fileio_template = {
+	.name			= "fileio",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
+	.attach_hba		= fd_attach_hba,
+	.detach_hba		= fd_detach_hba,
+	.allocate_virtdevice	= fd_allocate_virtdevice,
+	.create_virtdevice	= fd_create_virtdevice,
+	.free_device		= fd_free_device,
+	.dpo_emulated		= fd_emulated_dpo,
+	.fua_write_emulated	= fd_emulated_fua_write,
+	.fua_read_emulated	= fd_emulated_fua_read,
+	.write_cache_emulated	= fd_emulated_write_cache,
+	.alloc_task		= fd_alloc_task,
+	.do_task		= fd_do_task,
+	.do_sync_cache		= fd_emulate_sync_cache,
+	.free_task		= fd_free_task,
+	.check_configfs_dev_params = fd_check_configfs_dev_params,
+	.set_configfs_dev_params = fd_set_configfs_dev_params,
+	.show_configfs_dev_params = fd_show_configfs_dev_params,
+	.get_cdb		= fd_get_cdb,
+	.get_device_rev		= fd_get_device_rev,
+	.get_device_type	= fd_get_device_type,
+	.get_blocks		= fd_get_blocks,
+};
+
+static int __init fileio_module_init(void)
+{
+	return transport_subsystem_register(&fileio_template);
+}
+
+static void fileio_module_exit(void)
+{
+	transport_subsystem_release(&fileio_template);
+}
+
+MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(fileio_module_init);
+module_exit(fileio_module_exit);
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
new file mode 100644
index 0000000..ef4de2b
--- /dev/null
+++ b/drivers/target/target_core_file.h
@@ -0,0 +1,50 @@
+#ifndef TARGET_CORE_FILE_H
+#define TARGET_CORE_FILE_H
+
+#define FD_VERSION		"4.0"
+
+#define FD_MAX_DEV_NAME		256
+/* Maximum queuedepth for the FILEIO HBA */
+#define FD_HBA_QUEUE_DEPTH	256
+#define FD_DEVICE_QUEUE_DEPTH	32
+#define FD_MAX_DEVICE_QUEUE_DEPTH 128
+#define FD_BLOCKSIZE		512
+#define FD_MAX_SECTORS		1024
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+struct fd_request {
+	struct se_task	fd_task;
+	/* SCSI CDB from iSCSI Command PDU */
+	unsigned char	fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+	/* FILEIO device */
+	struct fd_dev	*fd_dev;
+} ____cacheline_aligned;
+
+#define FBDF_HAS_PATH		0x01
+#define FBDF_HAS_SIZE		0x02
+#define FDBD_USE_BUFFERED_IO	0x04
+
+struct fd_dev {
+	u32		fbd_flags;
+	unsigned char	fd_dev_name[FD_MAX_DEV_NAME];
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		fd_dev_id;
+	/* Number of SG tables in sg_table_array */
+	u32		fd_table_count;
+	u32		fd_queue_depth;
+	u32		fd_block_size;
+	unsigned long long fd_dev_size;
+	struct file	*fd_file;
+	/* FILEIO HBA device is connected to */
+	struct fd_host *fd_host;
+} ____cacheline_aligned;
+
+struct fd_host {
+	u32		fd_host_dev_id_count;
+	/* Unique FILEIO Host ID */
+	u32		fd_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_FILE_H */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
new file mode 100644
index 0000000..4bbe820
--- /dev/null
+++ b/drivers/target/target_core_hba.c
@@ -0,0 +1,185 @@
+/*******************************************************************************
+ * Filename:  target_core_hba.c
+ *
+ * This file copntains the iSCSI HBA Transport related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_hba.h"
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(subsystem_mutex);
+
+int transport_subsystem_register(struct se_subsystem_api *sub_api)
+{
+	struct se_subsystem_api *s;
+
+	INIT_LIST_HEAD(&sub_api->sub_api_list);
+
+	mutex_lock(&subsystem_mutex);
+	list_for_each_entry(s, &subsystem_list, sub_api_list) {
+		if (!(strcmp(s->name, sub_api->name))) {
+			printk(KERN_ERR "%p is already registered with"
+				" duplicate name %s, unable to process"
+				" request\n", s, s->name);
+			mutex_unlock(&subsystem_mutex);
+			return -EEXIST;
+		}
+	}
+	list_add_tail(&sub_api->sub_api_list, &subsystem_list);
+	mutex_unlock(&subsystem_mutex);
+
+	printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
+			" %p\n", sub_api->name, sub_api->owner);
+	return 0;
+}
+EXPORT_SYMBOL(transport_subsystem_register);
+
+void transport_subsystem_release(struct se_subsystem_api *sub_api)
+{
+	mutex_lock(&subsystem_mutex);
+	list_del(&sub_api->sub_api_list);
+	mutex_unlock(&subsystem_mutex);
+}
+EXPORT_SYMBOL(transport_subsystem_release);
+
+static struct se_subsystem_api *core_get_backend(const char *sub_name)
+{
+	struct se_subsystem_api *s;
+
+	mutex_lock(&subsystem_mutex);
+	list_for_each_entry(s, &subsystem_list, sub_api_list) {
+		if (!strcmp(s->name, sub_name))
+			goto found;
+	}
+	mutex_unlock(&subsystem_mutex);
+	return NULL;
+found:
+	if (s->owner && !try_module_get(s->owner))
+		s = NULL;
+	mutex_unlock(&subsystem_mutex);
+	return s;
+}
+
+struct se_hba *
+core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
+{
+	struct se_hba *hba;
+	int ret = 0;
+
+	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+	if (!hba) {
+		printk(KERN_ERR "Unable to allocate struct se_hba\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	INIT_LIST_HEAD(&hba->hba_dev_list);
+	spin_lock_init(&hba->device_lock);
+	spin_lock_init(&hba->hba_queue_lock);
+	mutex_init(&hba->hba_access_mutex);
+
+	hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
+	hba->hba_flags |= hba_flags;
+
+	atomic_set(&hba->max_queue_depth, 0);
+	atomic_set(&hba->left_queue_depth, 0);
+
+	hba->transport = core_get_backend(plugin_name);
+	if (!hba->transport) {
+		ret = -EINVAL;
+		goto out_free_hba;
+	}
+
+	ret = hba->transport->attach_hba(hba, plugin_dep_id);
+	if (ret < 0)
+		goto out_module_put;
+
+	spin_lock(&se_global->hba_lock);
+	hba->hba_id = se_global->g_hba_id_counter++;
+	list_add_tail(&hba->hba_list, &se_global->g_hba_list);
+	spin_unlock(&se_global->hba_lock);
+
+	printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
+			" Core\n", hba->hba_id);
+
+	return hba;
+
+out_module_put:
+	if (hba->transport->owner)
+		module_put(hba->transport->owner);
+	hba->transport = NULL;
+out_free_hba:
+	kfree(hba);
+	return ERR_PTR(ret);
+}
+
+int
+core_delete_hba(struct se_hba *hba)
+{
+	struct se_device *dev, *dev_tmp;
+
+	spin_lock(&hba->device_lock);
+	list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
+
+		se_clear_dev_ports(dev);
+		spin_unlock(&hba->device_lock);
+
+		se_release_device_for_hba(dev);
+
+		spin_lock(&hba->device_lock);
+	}
+	spin_unlock(&hba->device_lock);
+
+	hba->transport->detach_hba(hba);
+
+	spin_lock(&se_global->hba_lock);
+	list_del(&hba->hba_list);
+	spin_unlock(&se_global->hba_lock);
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
+			" Core\n", hba->hba_id);
+
+	if (hba->transport->owner)
+		module_put(hba->transport->owner);
+
+	hba->transport = NULL;
+	kfree(hba);
+	return 0;
+}
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
new file mode 100644
index 0000000..bb0fea5
--- /dev/null
+++ b/drivers/target/target_core_hba.h
@@ -0,0 +1,7 @@
+#ifndef TARGET_CORE_HBA_H
+#define TARGET_CORE_HBA_H
+
+extern struct se_hba *core_alloc_hba(const char *, u32, u32);
+extern int core_delete_hba(struct se_hba *);
+
+#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
new file mode 100644
index 0000000..67f0c09
--- /dev/null
+++ b/drivers/target/target_core_iblock.c
@@ -0,0 +1,810 @@
+/*******************************************************************************
+ * Filename:  target_core_iblock.c
+ *
+ * This file contains the Storage Engine  <-> Linux BlockIO transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/bio.h>
+#include <linux/genhd.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_iblock.h"
+
+#if 0
+#define DEBUG_IBLOCK(x...) printk(x)
+#else
+#define DEBUG_IBLOCK(x...)
+#endif
+
+static struct se_subsystem_api iblock_template;
+
+static void iblock_bio_done(struct bio *, int);
+
+/*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct iblock_hba *ib_host;
+
+	ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
+	if (!(ib_host)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct iblock_hba\n");
+		return -ENOMEM;
+	}
+
+	ib_host->iblock_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) ib_host;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+
+	printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
+		" Target Core TCQ Depth: %d\n", hba->hba_id,
+		ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
+
+	return 0;
+}
+
+static void iblock_detach_hba(struct se_hba *hba)
+{
+	struct iblock_hba *ib_host = hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
+		" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
+
+	kfree(ib_host);
+	hba->hba_ptr = NULL;
+}
+
+static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct iblock_dev *ib_dev = NULL;
+	struct iblock_hba *ib_host = hba->hba_ptr;
+
+	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
+	if (!(ib_dev)) {
+		printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
+		return NULL;
+	}
+	ib_dev->ibd_host = ib_host;
+
+	printk(KERN_INFO  "IBLOCK: Allocated ib_dev for %s\n", name);
+
+	return ib_dev;
+}
+
+static struct se_device *iblock_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	struct iblock_dev *ib_dev = p;
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct block_device *bd = NULL;
+	struct request_queue *q;
+	struct queue_limits *limits;
+	u32 dev_flags = 0;
+
+	if (!(ib_dev)) {
+		printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
+		return 0;
+	}
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+	/*
+	 * These settings need to be made tunable..
+	 */
+	ib_dev->ibd_bio_set = bioset_create(32, 64);
+	if (!(ib_dev->ibd_bio_set)) {
+		printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
+		return 0;
+	}
+	printk(KERN_INFO "IBLOCK: Created bio_set()\n");
+	/*
+	 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
+	 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
+	 */
+	printk(KERN_INFO  "IBLOCK: Claiming struct block_device: %s\n",
+			ib_dev->ibd_udev_path);
+
+	bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
+				FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
+	if (IS_ERR(bd))
+		goto failed;
+	/*
+	 * Setup the local scope queue_limits from struct request_queue->limits
+	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+	 */
+	q = bdev_get_queue(bd);
+	limits = &dev_limits.limits;
+	limits->logical_block_size = bdev_logical_block_size(bd);
+	limits->max_hw_sectors = queue_max_hw_sectors(q);
+	limits->max_sectors = queue_max_sectors(q);
+	dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
+
+	ib_dev->ibd_major = MAJOR(bd->bd_dev);
+	ib_dev->ibd_minor = MINOR(bd->bd_dev);
+	ib_dev->ibd_bd = bd;
+
+	dev = transport_add_device_to_core_hba(hba,
+			&iblock_template, se_dev, dev_flags, (void *)ib_dev,
+			&dev_limits, "IBLOCK", IBLOCK_VERSION);
+	if (!(dev))
+		goto failed;
+
+	ib_dev->ibd_depth = dev->queue_depth;
+
+	/*
+	 * Check if the underlying struct block_device request_queue supports
+	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+	 * in ATA and we need to set TPE=1
+	 */
+	if (blk_queue_discard(bdev_get_queue(bd))) {
+		struct request_queue *q = bdev_get_queue(bd);
+
+		DEV_ATTRIB(dev)->max_unmap_lba_count =
+				q->limits.max_discard_sectors;
+		/*
+		 * Currently hardcoded to 1 in Linux/SCSI code..
+		 */
+		DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
+		DEV_ATTRIB(dev)->unmap_granularity =
+				q->limits.discard_granularity;
+		DEV_ATTRIB(dev)->unmap_granularity_alignment =
+				q->limits.discard_alignment;
+
+		printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
+				" disabled by default\n");
+	}
+
+	return dev;
+
+failed:
+	if (ib_dev->ibd_bio_set) {
+		bioset_free(ib_dev->ibd_bio_set);
+		ib_dev->ibd_bio_set = NULL;
+	}
+	ib_dev->ibd_bd = NULL;
+	ib_dev->ibd_major = 0;
+	ib_dev->ibd_minor = 0;
+	return NULL;
+}
+
+static void iblock_free_device(void *p)
+{
+	struct iblock_dev *ib_dev = p;
+
+	if (ib_dev->ibd_bd != NULL)
+		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+	if (ib_dev->ibd_bio_set != NULL)
+		bioset_free(ib_dev->ibd_bio_set);
+	kfree(ib_dev);
+}
+
+static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
+{
+	return container_of(task, struct iblock_req, ib_task);
+}
+
+static struct se_task *
+iblock_alloc_task(struct se_cmd *cmd)
+{
+	struct iblock_req *ib_req;
+
+	ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+	if (!(ib_req)) {
+		printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
+		return NULL;
+	}
+
+	ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
+	atomic_set(&ib_req->ib_bio_cnt, 0);
+	return &ib_req->ib_task;
+}
+
+static unsigned long long iblock_emulate_read_cap_with_block_size(
+	struct se_device *dev,
+	struct block_device *bd,
+	struct request_queue *q)
+{
+	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
+					bdev_logical_block_size(bd)) - 1);
+	u32 block_size = bdev_logical_block_size(bd);
+
+	if (block_size == DEV_ATTRIB(dev)->block_size)
+		return blocks_long;
+
+	switch (block_size) {
+	case 4096:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 2048:
+			blocks_long <<= 1;
+			break;
+		case 1024:
+			blocks_long <<= 2;
+			break;
+		case 512:
+			blocks_long <<= 3;
+		default:
+			break;
+		}
+		break;
+	case 2048:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 4096:
+			blocks_long >>= 1;
+			break;
+		case 1024:
+			blocks_long <<= 1;
+			break;
+		case 512:
+			blocks_long <<= 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 1024:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 4096:
+			blocks_long >>= 2;
+			break;
+		case 2048:
+			blocks_long >>= 1;
+			break;
+		case 512:
+			blocks_long <<= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 512:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 4096:
+			blocks_long >>= 3;
+			break;
+		case 2048:
+			blocks_long >>= 2;
+			break;
+		case 1024:
+			blocks_long >>= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return blocks_long;
+}
+
+/*
+ * Emulate SYCHRONIZE_CACHE_*
+ */
+static void iblock_emulate_sync_cache(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+	int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
+	sector_t error_sector;
+	int ret;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op
+	 */
+	if (immed)
+		transport_complete_sync_cache(cmd, 1);
+
+	/*
+	 * blkdev_issue_flush() does not support a specifying a range, so
+	 * we have to flush the entire cache.
+	 */
+	ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
+	if (ret != 0) {
+		printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
+			" error_sector: %llu\n", ret,
+			(unsigned long long)error_sector);
+	}
+
+	if (!immed)
+		transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int iblock_emulated_write_cache(struct se_device *dev)
+{
+	return 1;
+}
+
+static int iblock_emulated_dpo(struct se_device *dev)
+{
+	return 0;
+}
+
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int iblock_emulated_fua_write(struct se_device *dev)
+{
+	return 1;
+}
+
+static int iblock_emulated_fua_read(struct se_device *dev)
+{
+	return 0;
+}
+
+static int iblock_do_task(struct se_task *task)
+{
+	struct se_device *dev = task->task_se_cmd->se_dev;
+	struct iblock_req *req = IBLOCK_REQ(task);
+	struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
+	struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
+	struct bio *bio = req->ib_bio, *nbio = NULL;
+	int rw;
+
+	if (task->task_data_direction == DMA_TO_DEVICE) {
+		/*
+		 * Force data to disk if we pretend to not have a volatile
+		 * write cache, or the initiator set the Force Unit Access bit.
+		 */
+		if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
+		    (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+		     T_TASK(task->task_se_cmd)->t_tasks_fua))
+			rw = WRITE_FUA;
+		else
+			rw = WRITE;
+	} else {
+		rw = READ;
+	}
+
+	while (bio) {
+		nbio = bio->bi_next;
+		bio->bi_next = NULL;
+		DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
+			" bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
+
+		submit_bio(rw, bio);
+		bio = nbio;
+	}
+
+	if (q->unplug_fn)
+		q->unplug_fn(q);
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
+{
+	struct iblock_dev *ibd = dev->dev_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	int barrier = 0;
+
+	return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
+}
+
+static void iblock_free_task(struct se_task *task)
+{
+	struct iblock_req *req = IBLOCK_REQ(task);
+	struct bio *bio, *hbio = req->ib_bio;
+	/*
+	 * We only release the bio(s) here if iblock_bio_done() has not called
+	 * bio_put() -> iblock_bio_destructor().
+	 */
+	while (hbio != NULL) {
+		bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_put(bio);
+	}
+
+	kfree(req);
+}
+
+enum {
+	Opt_udev_path, Opt_force, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_udev_path, "udev_path=%s"},
+	{Opt_force, "force=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
+					       struct se_subsystem_dev *se_dev,
+					       const char *page, ssize_t count)
+{
+	struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_udev_path:
+			if (ib_dev->ibd_bd) {
+				printk(KERN_ERR "Unable to set udev_path= while"
+					" ib_dev->ibd_bd exists\n");
+				ret = -EEXIST;
+				goto out;
+			}
+
+			ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
+				"%s", match_strdup(&args[0]));
+			printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
+					ib_dev->ibd_udev_path);
+			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+			break;
+		case Opt_force:
+			match_int(args, &arg);
+			ib_dev->ibd_force = arg;
+			printk(KERN_INFO "IBLOCK: Set force=%d\n",
+				ib_dev->ibd_force);
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t iblock_check_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev)
+{
+	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+
+	if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+		printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t iblock_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	char buf[BDEVNAME_SIZE];
+	ssize_t bl = 0;
+
+	if (bd)
+		bl += sprintf(b + bl, "iBlock device: %s",
+				bdevname(bd, buf));
+	if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
+		bl += sprintf(b + bl, "  UDEV PATH: %s\n",
+				ibd->ibd_udev_path);
+	} else
+		bl += sprintf(b + bl, "\n");
+
+	bl += sprintf(b + bl, "        ");
+	if (bd) {
+		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
+			ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
+			"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
+			"CLAIMED: IBLOCK" : "CLAIMED: OS");
+	} else {
+		bl += sprintf(b + bl, "Major: %d Minor: %d\n",
+			ibd->ibd_major, ibd->ibd_minor);
+	}
+
+	return bl;
+}
+
+static void iblock_bio_destructor(struct bio *bio)
+{
+	struct se_task *task = bio->bi_private;
+	struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+
+	bio_free(bio, ib_dev->ibd_bio_set);
+}
+
+static struct bio *iblock_get_bio(
+	struct se_task *task,
+	struct iblock_req *ib_req,
+	struct iblock_dev *ib_dev,
+	int *ret,
+	sector_t lba,
+	u32 sg_num)
+{
+	struct bio *bio;
+
+	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+	if (!(bio)) {
+		printk(KERN_ERR "Unable to allocate memory for bio\n");
+		*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+		return NULL;
+	}
+
+	DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
+		" %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
+	DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+
+	bio->bi_bdev = ib_dev->ibd_bd;
+	bio->bi_private = (void *) task;
+	bio->bi_destructor = iblock_bio_destructor;
+	bio->bi_end_io = &iblock_bio_done;
+	bio->bi_sector = lba;
+	atomic_inc(&ib_req->ib_bio_cnt);
+
+	DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
+	DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
+			atomic_read(&ib_req->ib_bio_cnt));
+	return bio;
+}
+
+static int iblock_map_task_SG(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = SE_DEV(cmd);
+	struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+	struct iblock_req *ib_req = IBLOCK_REQ(task);
+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+	struct scatterlist *sg;
+	int ret = 0;
+	u32 i, sg_num = task->task_sg_num;
+	sector_t block_lba;
+	/*
+	 * Do starting conversion up from non 512-byte blocksize with
+	 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
+	 */
+	if (DEV_ATTRIB(dev)->block_size == 4096)
+		block_lba = (task->task_lba << 3);
+	else if (DEV_ATTRIB(dev)->block_size == 2048)
+		block_lba = (task->task_lba << 2);
+	else if (DEV_ATTRIB(dev)->block_size == 1024)
+		block_lba = (task->task_lba << 1);
+	else if (DEV_ATTRIB(dev)->block_size == 512)
+		block_lba = task->task_lba;
+	else {
+		printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
+				" %u\n", DEV_ATTRIB(dev)->block_size);
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+
+	bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
+	if (!(bio))
+		return ret;
+
+	ib_req->ib_bio = bio;
+	hbio = tbio = bio;
+	/*
+	 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
+	 * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
+	 */
+	for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
+		DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+			" %p len: %u offset: %u\n", task, bio, sg_page(sg),
+				sg->length, sg->offset);
+again:
+		ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
+		if (ret != sg->length) {
+
+			DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
+					bio->bi_sector);
+			DEBUG_IBLOCK("** task->task_size: %u\n",
+					task->task_size);
+			DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
+					bio->bi_max_vecs);
+			DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
+					bio->bi_vcnt);
+
+			bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
+						block_lba, sg_num);
+			if (!(bio))
+				goto fail;
+
+			tbio = tbio->bi_next = bio;
+			DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
+				" list, Going to again\n", bio);
+			goto again;
+		}
+		/* Always in 512 byte units for Linux/Block */
+		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+		sg_num--;
+		DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
+			" sg_num to %u\n", task, sg_num);
+		DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
+				" to %llu\n", task, block_lba);
+		DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
+				" %u\n", task, bio->bi_vcnt);
+	}
+
+	return 0;
+fail:
+	while (hbio) {
+		bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_put(bio);
+	}
+	return ret;
+}
+
+static unsigned char *iblock_get_cdb(struct se_task *task)
+{
+	return IBLOCK_REQ(task)->ib_scsi_cdb;
+}
+
+static u32 iblock_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 iblock_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t iblock_get_blocks(struct se_device *dev)
+{
+	struct iblock_dev *ibd = dev->dev_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	struct request_queue *q = bdev_get_queue(bd);
+
+	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+	struct se_task *task = bio->bi_private;
+	struct iblock_req *ibr = IBLOCK_REQ(task);
+	/*
+	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+	 */
+	if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
+		err = -EIO;
+
+	if (err != 0) {
+		printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
+			" err: %d\n", bio, err);
+		/*
+		 * Bump the ib_bio_err_cnt and release bio.
+		 */
+		atomic_inc(&ibr->ib_bio_err_cnt);
+		smp_mb__after_atomic_inc();
+		bio_put(bio);
+		/*
+		 * Wait to complete the task until the last bio as completed.
+		 */
+		if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+			return;
+
+		ibr->ib_bio = NULL;
+		transport_complete_task(task, 0);
+		return;
+	}
+	DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+		task, bio, task->task_lba, bio->bi_sector, err);
+	/*
+	 * bio_put() will call iblock_bio_destructor() to release the bio back
+	 * to ibr->ib_bio_set.
+	 */
+	bio_put(bio);
+	/*
+	 * Wait to complete the task until the last bio as completed.
+	 */
+	if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+		return;
+	/*
+	 * Return GOOD status for task if zero ib_bio_err_cnt exists.
+	 */
+	ibr->ib_bio = NULL;
+	transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
+}
+
+static struct se_subsystem_api iblock_template = {
+	.name			= "iblock",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
+	.map_task_SG		= iblock_map_task_SG,
+	.attach_hba		= iblock_attach_hba,
+	.detach_hba		= iblock_detach_hba,
+	.allocate_virtdevice	= iblock_allocate_virtdevice,
+	.create_virtdevice	= iblock_create_virtdevice,
+	.free_device		= iblock_free_device,
+	.dpo_emulated		= iblock_emulated_dpo,
+	.fua_write_emulated	= iblock_emulated_fua_write,
+	.fua_read_emulated	= iblock_emulated_fua_read,
+	.write_cache_emulated	= iblock_emulated_write_cache,
+	.alloc_task		= iblock_alloc_task,
+	.do_task		= iblock_do_task,
+	.do_discard		= iblock_do_discard,
+	.do_sync_cache		= iblock_emulate_sync_cache,
+	.free_task		= iblock_free_task,
+	.check_configfs_dev_params = iblock_check_configfs_dev_params,
+	.set_configfs_dev_params = iblock_set_configfs_dev_params,
+	.show_configfs_dev_params = iblock_show_configfs_dev_params,
+	.get_cdb		= iblock_get_cdb,
+	.get_device_rev		= iblock_get_device_rev,
+	.get_device_type	= iblock_get_device_type,
+	.get_blocks		= iblock_get_blocks,
+};
+
+static int __init iblock_module_init(void)
+{
+	return transport_subsystem_register(&iblock_template);
+}
+
+static void iblock_module_exit(void)
+{
+	transport_subsystem_release(&iblock_template);
+}
+
+MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iblock_module_init);
+module_exit(iblock_module_exit);
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
new file mode 100644
index 0000000..64c1f4d
--- /dev/null
+++ b/drivers/target/target_core_iblock.h
@@ -0,0 +1,40 @@
+#ifndef TARGET_CORE_IBLOCK_H
+#define TARGET_CORE_IBLOCK_H
+
+#define IBLOCK_VERSION		"4.0"
+
+#define IBLOCK_HBA_QUEUE_DEPTH	512
+#define IBLOCK_DEVICE_QUEUE_DEPTH	32
+#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH	128
+#define IBLOCK_MAX_CDBS		16
+#define IBLOCK_LBA_SHIFT	9
+
+struct iblock_req {
+	struct se_task ib_task;
+	unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+	atomic_t ib_bio_cnt;
+	atomic_t ib_bio_err_cnt;
+	struct bio *ib_bio;
+	struct iblock_dev *ib_dev;
+} ____cacheline_aligned;
+
+#define IBDF_HAS_UDEV_PATH		0x01
+#define IBDF_HAS_FORCE			0x02
+
+struct iblock_dev {
+	unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
+	int	ibd_force;
+	int	ibd_major;
+	int	ibd_minor;
+	u32	ibd_depth;
+	u32	ibd_flags;
+	struct bio_set	*ibd_bio_set;
+	struct block_device *ibd_bd;
+	struct iblock_hba *ibd_host;
+} ____cacheline_aligned;
+
+struct iblock_hba {
+	int		iblock_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
new file mode 100644
index 0000000..2521f75
--- /dev/null
+++ b/drivers/target/target_core_pr.c
@@ -0,0 +1,4252 @@
+/*******************************************************************************
+ * Filename:  target_core_pr.c
+ *
+ * This file contains SPC-3 compliant persistent reservations and
+ * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
+ *
+ * Copyright (c) 2009, 2010 Rising Tide Systems
+ * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+/*
+ * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
+ */
+struct pr_transport_id_holder {
+	int dest_local_nexus;
+	struct t10_pr_registration *dest_pr_reg;
+	struct se_portal_group *dest_tpg;
+	struct se_node_acl *dest_node_acl;
+	struct se_dev_entry *dest_se_deve;
+	struct list_head dest_list;
+};
+
+int core_pr_dump_initiator_port(
+	struct t10_pr_registration *pr_reg,
+	char *buf,
+	u32 size)
+{
+	if (!(pr_reg->isid_present_at_reg))
+		return 0;
+
+	snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
+	return 1;
+}
+
+static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
+			struct t10_pr_registration *, int);
+
+static int core_scsi2_reservation_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	switch (cdb[0]) {
+	case INQUIRY:
+	case RELEASE:
+	case RELEASE_10:
+		return 0;
+	default:
+		return 1;
+	}
+
+	return 1;
+}
+
+static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	int ret;
+
+	if (!(sess))
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_reserved_node_acl || !sess) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return -1;
+	}
+	if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+static int core_scsi2_reservation_release(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg = sess->se_tpg;
+
+	if (!(sess) || !(tpg))
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_reserved_node_acl || !sess) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+
+	if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	dev->dev_reserved_node_acl = NULL;
+	dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+		dev->dev_res_bin_isid = 0;
+		dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
+		" MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+		sess->se_node_acl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return 0;
+}
+
+static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg = sess->se_tpg;
+
+	if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
+	    (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
+		printk(KERN_ERR "LongIO and Obselete Bits set, returning"
+				" ILLEGAL_REQUEST\n");
+		return PYX_TRANSPORT_ILLEGAL_REQUEST;
+	}
+	/*
+	 * This is currently the case for target_core_mod passthrough struct se_cmd
+	 * ops
+	 */
+	if (!(sess) || !(tpg))
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (dev->dev_reserved_node_acl &&
+	   (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+		printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+			TPG_TFO(tpg)->get_fabric_name());
+		printk(KERN_ERR "Original reserver LUN: %u %s\n",
+			SE_LUN(cmd)->unpacked_lun,
+			dev->dev_reserved_node_acl->initiatorname);
+		printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
+			" from %s \n", SE_LUN(cmd)->unpacked_lun,
+			cmd->se_deve->mapped_lun,
+			sess->se_node_acl->initiatorname);
+		spin_unlock(&dev->dev_reservation_lock);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+
+	dev->dev_reserved_node_acl = sess->se_node_acl;
+	dev->dev_flags |= DF_SPC2_RESERVATIONS;
+	if (sess->sess_bin_isid != 0) {
+		dev->dev_res_bin_isid = sess->sess_bin_isid;
+		dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+		" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+		sess->se_node_acl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return 0;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
+					struct se_node_acl *, struct se_session *);
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
+
+/*
+ * Setup in target_core_transport.c:transport_generic_cmd_sequencer()
+ * and called via struct se_cmd->transport_emulate_cdb() in TCM processing
+ * thread context.
+ */
+int core_scsi2_emulate_crh(struct se_cmd *cmd)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+	struct t10_pr_registration *pr_reg;
+	struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
+	unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+	int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
+	int conflict = 0;
+
+	if (!(se_sess))
+		return 0;
+
+	if (!(crh))
+		goto after_crh;
+
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+			se_sess);
+	if (pr_reg) {
+		/*
+		 * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
+		 * behavior
+		 *
+		 * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
+		 * status, but no reservation shall be established and the
+		 * persistent reservation shall not be changed, if the command
+		 * is received from a) and b) below.
+		 *
+		 * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
+		 * status, but the persistent reservation shall not be released,
+		 * if the command is received from a) and b)
+		 *
+		 * a) An I_T nexus that is a persistent reservation holder; or
+		 * b) An I_T nexus that is registered if a registrants only or
+		 *    all registrants type persistent reservation is present.
+		 *
+		 * In all other cases, a RESERVE(6) command, RESERVE(10) command,
+		 * RELEASE(6) command, or RELEASE(10) command shall be processed
+		 * as defined in SPC-2.
+		 */
+		if (pr_reg->pr_res_holder) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 0;
+		}
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 0;
+		}
+		core_scsi3_put_pr_reg(pr_reg);
+		conflict = 1;
+	} else {
+		/*
+		 * Following spc2r20 5.5.1 Reservations overview:
+		 *
+		 * If a logical unit has executed a PERSISTENT RESERVE OUT
+		 * command with the REGISTER or the REGISTER AND IGNORE
+		 * EXISTING KEY service action and is still registered by any
+		 * initiator, all RESERVE commands and all RELEASE commands
+		 * regardless of initiator shall conflict and shall terminate
+		 * with a RESERVATION CONFLICT status.
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+
+	if (conflict) {
+		printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
+			" while active SPC-3 registrations exist,"
+			" returning RESERVATION_CONFLICT\n");
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+
+after_crh:
+	if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10))
+		return core_scsi2_reservation_reserve(cmd);
+	else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10))
+		return core_scsi2_reservation_release(cmd);
+	else
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * Begin SPC-3/SPC-4 Persistent Reservations emulation support
+ *
+ * This function is called by those initiator ports who are *NOT*
+ * the active PR reservation holder when a reservation is present.
+ */
+static int core_scsi3_pr_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	struct se_dev_entry *se_deve;
+	struct se_session *se_sess = SE_SESS(cmd);
+	int other_cdb = 0, ignore_reg;
+	int registered_nexus = 0, ret = 1; /* Conflict by default */
+	int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
+	int we = 0; /* Write Exclusive */
+	int legacy = 0; /* Act like a legacy device and return
+			 * RESERVATION CONFLICT on some CDBs */
+	/*
+	 * A legacy SPC-2 reservation is being held.
+	 */
+	if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
+		return core_scsi2_reservation_seq_non_holder(cmd,
+					cdb, pr_reg_type);
+
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Determine if the registration should be ignored due to
+	 * non-matching ISIDs in core_scsi3_pr_reservation_check().
+	 */
+	ignore_reg = (pr_reg_type & 0x80000000);
+	if (ignore_reg)
+		pr_reg_type &= ~0x80000000;
+
+	switch (pr_reg_type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		/*
+		 * Some commands are only allowed for the persistent reservation
+		 * holder.
+		 */
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		/*
+		 * Some commands are only allowed for registered I_T Nexuses.
+		 */
+		reg_only = 1;
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		/*
+		 * Each registered I_T Nexus is a reservation holder.
+		 */
+		all_reg = 1;
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	default:
+		return -1;
+	}
+	/*
+	 * Referenced from spc4r17 table 45 for *NON* PR holder access
+	 */
+	switch (cdb[0]) {
+	case SECURITY_PROTOCOL_IN:
+		if (registered_nexus)
+			return 0;
+		ret = (we) ? 0 : 1;
+		break;
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+	case READ_ATTRIBUTE:
+	case READ_BUFFER:
+	case RECEIVE_DIAGNOSTIC:
+		if (legacy) {
+			ret = 1;
+			break;
+		}
+		if (registered_nexus) {
+			ret = 0;
+			break;
+		}
+		ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+		break;
+	case PERSISTENT_RESERVE_OUT:
+		/*
+		 * This follows PERSISTENT_RESERVE_OUT service actions that
+		 * are allowed in the presence of various reservations.
+		 * See spc4r17, table 46
+		 */
+		switch (cdb[1] & 0x1f) {
+		case PRO_CLEAR:
+		case PRO_PREEMPT:
+		case PRO_PREEMPT_AND_ABORT:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		case PRO_REGISTER:
+		case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+			ret = 0;
+			break;
+		case PRO_REGISTER_AND_MOVE:
+		case PRO_RESERVE:
+			ret = 1;
+			break;
+		case PRO_RELEASE:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		default:
+			printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+				" action: 0x%02x\n", cdb[1] & 0x1f);
+			return -1;
+		}
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		/* Handled by CRH=1 in core_scsi2_emulate_crh() */
+		ret = 0;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/* Handled by CRH=1 in core_scsi2_emulate_crh() */
+		ret = 0;
+		break;
+	case TEST_UNIT_READY:
+		ret = (legacy) ? 1 : 0; /* Conflict for legacy */
+		break;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_MANAGEMENT_PROTOCOL_IN:
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_SUPPORTED_OPERATION_CODES:
+		case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
+			if (legacy) {
+				ret = 1;
+				break;
+			}
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_ALIASES:
+		case MI_REPORT_IDENTIFYING_INFORMATION:
+		case MI_REPORT_PRIORITY:
+		case MI_REPORT_TARGET_PGS:
+		case MI_REPORT_TIMESTAMP:
+			ret = 0; /* Allowed */
+			break;
+		default:
+			printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
+				(cdb[1] & 0x1f));
+			return -1;
+		}
+		break;
+	case ACCESS_CONTROL_IN:
+	case ACCESS_CONTROL_OUT:
+	case INQUIRY:
+	case LOG_SENSE:
+	case READ_MEDIA_SERIAL_NUMBER:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+		ret = 0; /*/ Allowed CDBs */
+		break;
+	default:
+		other_cdb = 1;
+		break;
+	}
+	/*
+	 * Case where the CDB is explictly allowed in the above switch
+	 * statement.
+	 */
+	if (!(ret) && !(other_cdb)) {
+#if 0
+		printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
+			" reservation holder\n", cdb[0],
+			core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+		return ret;
+	}
+	/*
+	 * Check if write exclusive initiator ports *NOT* holding the
+	 * WRITE_EXCLUSIVE_* reservation.
+	 */
+	if ((we) && !(registered_nexus)) {
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			/*
+			 * Conflict for write exclusive
+			 */
+			printk(KERN_INFO "%s Conflict for unregistered nexus"
+				" %s CDB: 0x%02x to %s reservation\n",
+				transport_dump_cmd_direction(cmd),
+				se_sess->se_node_acl->initiatorname, cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+			return 1;
+		} else {
+			/*
+			 * Allow non WRITE CDBs for all Write Exclusive
+			 * PR TYPEs to pass for registered and
+			 * non-registered_nexuxes NOT holding the reservation.
+			 *
+			 * We only make noise for the unregisterd nexuses,
+			 * as we expect registered non-reservation holding
+			 * nexuses to issue CDBs.
+			 */
+#if 0
+			if (!(registered_nexus)) {
+				printk(KERN_INFO "Allowing implict CDB: 0x%02x"
+					" for %s reservation on unregistered"
+					" nexus\n", cdb[0],
+					core_scsi3_pr_dump_type(pr_reg_type));
+			}
+#endif
+			return 0;
+		}
+	} else if ((reg_only) || (all_reg)) {
+		if (registered_nexus) {
+			/*
+			 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
+			 * allow commands from registered nexuses.
+			 */
+#if 0
+			printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
+				" reservation\n", cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+			return 0;
+		}
+	}
+	printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+		" for %s reservation\n", transport_dump_cmd_direction(cmd),
+		(registered_nexus) ? "" : "un",
+		se_sess->se_node_acl->initiatorname, cdb[0],
+		core_scsi3_pr_dump_type(pr_reg_type));
+
+	return 1; /* Conflict by default */
+}
+
+static u32 core_scsi3_pr_generation(struct se_device *dev)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	u32 prg;
+	/*
+	 * PRGeneration field shall contain the value of a 32-bit wrapping
+	 * counter mainted by the device server.
+	 *
+	 * Note that this is done regardless of Active Persist across
+	 * Target PowerLoss (APTPL)
+	 *
+	 * See spc4r17 section 6.3.12 READ_KEYS service action
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	prg = T10_RES(su_dev)->pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return prg;
+}
+
+static int core_scsi3_pr_reservation_check(
+	struct se_cmd *cmd,
+	u32 *pr_reg_type)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	int ret;
+
+	if (!(sess))
+		return 0;
+	/*
+	 * A legacy SPC-2 reservation is being held.
+	 */
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS)
+		return core_scsi2_reservation_check(cmd, pr_reg_type);
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!(dev->dev_pr_res_holder)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	*pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+	cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+	if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return -1;
+	}
+	if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
+	       sess->sess_bin_isid) ? 0 : -1;
+	/*
+	 * Use bit in *pr_reg_type to notify ISID mismatch in
+	 * core_scsi3_pr_seq_non_holder().
+	 */
+	if (ret != 0)
+		*pr_reg_type |= 0x80000000;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+		return NULL;
+	}
+
+	pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
+					GFP_ATOMIC);
+	if (!(pr_reg->pr_aptpl_buf)) {
+		printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = nacl;
+	pr_reg->pr_reg_deve = deve;
+	pr_reg->pr_res_mapped_lun = deve->mapped_lun;
+	pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = aptpl;
+	pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
+	/*
+	 * If an ISID value for this SCSI Initiator Port exists,
+	 * save it to the registration now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+
+	return pr_reg;
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
+
+/*
+ * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
+ * modes.
+ */
+static struct t10_pr_registration *__core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_dev_entry *deve_tmp;
+	struct se_node_acl *nacl_tmp;
+	struct se_port *port, *port_tmp;
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
+	int ret;
+	/*
+	 * Create a registration for the I_T Nexus upon which the
+	 * PROUT REGISTER was received.
+	 */
+	pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
+			sa_res_key, all_tg_pt, aptpl);
+	if (!(pr_reg))
+		return NULL;
+	/*
+	 * Return pointer to pr_reg for ALL_TG_PT=0
+	 */
+	if (!(all_tg_pt))
+		return pr_reg;
+	/*
+	 * Create list of matching SCSI Initiator Port registrations
+	 * for ALL_TG_PT=1
+	 */
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
+		atomic_inc(&port->sep_tg_pt_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&dev->se_port_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_for_each_entry(deve_tmp, &port->sep_alua_list,
+					alua_port_list) {
+			/*
+			 * This pointer will be NULL for demo mode MappedLUNs
+			 * that have not been make explict via a ConfigFS
+			 * MappedLUN group for the SCSI Initiator Node ACL.
+			 */
+			if (!(deve_tmp->se_lun_acl))
+				continue;
+
+			nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
+			/*
+			 * Skip the matching struct se_node_acl that is allocated
+			 * above..
+			 */
+			if (nacl == nacl_tmp)
+				continue;
+			/*
+			 * Only perform PR registrations for target ports on
+			 * the same fabric module as the REGISTER w/ ALL_TG_PT=1
+			 * arrived.
+			 */
+			if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
+				continue;
+			/*
+			 * Look for a matching Initiator Node ACL in ASCII format
+			 */
+			if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
+				continue;
+
+			atomic_inc(&deve_tmp->pr_ref_count);
+			smp_mb__after_atomic_inc();
+			spin_unlock_bh(&port->sep_alua_lock);
+			/*
+			 * Grab a configfs group dependency that is released
+			 * for the exception path at label out: below, or upon
+			 * completion of adding ALL_TG_PT=1 registrations in
+			 * __core_scsi3_add_registration()
+			 */
+			ret = core_scsi3_lunacl_depend_item(deve_tmp);
+			if (ret < 0) {
+				printk(KERN_ERR "core_scsi3_lunacl_depend"
+						"_item() failed\n");
+				atomic_dec(&port->sep_tg_pt_ref_cnt);
+				smp_mb__after_atomic_dec();
+				atomic_dec(&deve_tmp->pr_ref_count);
+				smp_mb__after_atomic_dec();
+				goto out;
+			}
+			/*
+			 * Located a matching SCSI Initiator Port on a different
+			 * port, allocate the pr_reg_atp and attach it to the
+			 * pr_reg->pr_reg_atp_list that will be processed once
+			 * the original *pr_reg is processed in
+			 * __core_scsi3_add_registration()
+			 */
+			pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
+						nacl_tmp, deve_tmp, NULL,
+						sa_res_key, all_tg_pt, aptpl);
+			if (!(pr_reg_atp)) {
+				atomic_dec(&port->sep_tg_pt_ref_cnt);
+				smp_mb__after_atomic_dec();
+				atomic_dec(&deve_tmp->pr_ref_count);
+				smp_mb__after_atomic_dec();
+				core_scsi3_lunacl_undepend_item(deve_tmp);
+				goto out;
+			}
+
+			list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
+				      &pr_reg->pr_reg_atp_list);
+			spin_lock_bh(&port->sep_alua_lock);
+		}
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		spin_lock(&dev->se_port_lock);
+		atomic_dec(&port->sep_tg_pt_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return pr_reg;
+out:
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+	}
+	kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	return NULL;
+}
+
+int core_scsi3_alloc_aptpl_registration(
+	struct t10_reservation_template *pr_tmpl,
+	u64 sa_res_key,
+	unsigned char *i_port,
+	unsigned char *isid,
+	u32 mapped_lun,
+	unsigned char *t_port,
+	u16 tpgt,
+	u32 target_lun,
+	int res_holder,
+	int all_tg_pt,
+	u8 type)
+{
+	struct t10_pr_registration *pr_reg;
+
+	if (!(i_port) || !(t_port) || !(sa_res_key)) {
+		printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+		return -1;
+	}
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+		return -1;
+	}
+	pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = NULL;
+	pr_reg->pr_reg_deve = NULL;
+	pr_reg->pr_res_mapped_lun = mapped_lun;
+	pr_reg->pr_aptpl_target_lun = target_lun;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = 1;
+	pr_reg->pr_reg_tg_pt_lun = NULL;
+	pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
+	pr_reg->pr_res_type = type;
+	/*
+	 * If an ISID value had been saved in APTPL metadata for this
+	 * SCSI Initiator Port, restore it now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+	/*
+	 * Copy the i_port and t_port information from caller.
+	 */
+	snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
+	snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
+	pr_reg->pr_reg_tpgt = tpgt;
+	/*
+	 * Set pr_res_holder from caller, the pr_reg who is the reservation
+	 * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
+	 * the Initiator Node LUN ACL from the fabric module is created for
+	 * this registration.
+	 */
+	pr_reg->pr_res_holder = res_holder;
+
+	list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
+	printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
+			" metadata\n", (res_holder) ? "+reservation" : "");
+	return 0;
+}
+
+static void core_scsi3_aptpl_reserve(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_node_acl *node_acl,
+	struct t10_pr_registration *pr_reg)
+{
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	spin_lock(&dev->dev_reservation_lock);
+	dev->dev_pr_res_holder = pr_reg;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+		" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		TPG_TFO(tpg)->get_fabric_name(),
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+		TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+}
+
+static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
+				struct t10_pr_registration *, int, int);
+
+static int __core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u32 target_lun,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
+	unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+	u16 tpgt;
+
+	memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
+	memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
+	/*
+	 * Copy Initiator Port information from struct se_node_acl
+	 */
+	snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
+	snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
+			TPG_TFO(tpg)->tpg_get_wwn(tpg));
+	tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+	/*
+	 * Look for the matching registrations+reservation from those
+	 * created from APTPL metadata.  Note that multiple registrations
+	 * may exist for fabrics that use ISIDs in their SCSI Initiator Port
+	 * TransportIDs.
+	 */
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+		if (!(strcmp(pr_reg->pr_iport, i_port)) &&
+		     (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
+		    !(strcmp(pr_reg->pr_tport, t_port)) &&
+		     (pr_reg->pr_reg_tpgt == tpgt) &&
+		     (pr_reg->pr_aptpl_target_lun == target_lun)) {
+
+			pr_reg->pr_reg_nacl = nacl;
+			pr_reg->pr_reg_deve = deve;
+			pr_reg->pr_reg_tg_pt_lun = lun;
+
+			list_del(&pr_reg->pr_reg_aptpl_list);
+			spin_unlock(&pr_tmpl->aptpl_reg_lock);
+			/*
+			 * At this point all of the pointers in *pr_reg will
+			 * be setup, so go ahead and add the registration.
+			 */
+
+			__core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
+			/*
+			 * If this registration is the reservation holder,
+			 * make that happen now..
+			 */
+			if (pr_reg->pr_res_holder)
+				core_scsi3_aptpl_reserve(dev, tpg,
+						nacl, pr_reg);
+			/*
+			 * Reenable pr_aptpl_active to accept new metadata
+			 * updates once the SCSI device is active again..
+			 */
+			spin_lock(&pr_tmpl->aptpl_reg_lock);
+			pr_tmpl->pr_aptpl_active = 1;
+		}
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+
+	return 0;
+}
+
+int core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	struct se_lun_acl *lun_acl)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct se_node_acl *nacl = lun_acl->se_lun_nacl;
+	struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
+				lun->unpacked_lun, nacl, deve);
+}
+
+static void __core_scsi3_dump_registration(
+	struct target_core_fabric_ops *tfo,
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	int register_type)
+{
+	struct se_portal_group *se_tpg = nacl->se_tpg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
+		"_AND_MOVE" : (register_type == 1) ?
+		"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
+		(prf_isid) ? i_buf : "");
+	printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+		 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
+		tfo->tpg_get_tag(se_tpg));
+	printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n",  tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		TRANSPORT(dev)->name);
+	printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x  APTPL: %d\n", tfo->get_fabric_name(),
+		pr_reg->pr_res_key, pr_reg->pr_res_generation,
+		pr_reg->pr_reg_aptpl);
+}
+
+/*
+ * this function can be called with struct se_device->dev_reservation_lock
+ * when register_move = 1
+ */
+static void __core_scsi3_add_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	int register_type,
+	int register_move)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+
+	/*
+	 * Increment PRgeneration counter for struct se_device upon a successful
+	 * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
+	 *
+	 * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
+	 * action, the struct se_device->dev_reservation_lock will already be held,
+	 * so we do not call core_scsi3_pr_generation() which grabs the lock
+	 * for the REGISTER.
+	 */
+	pr_reg->pr_res_generation = (register_move) ?
+			T10_RES(su_dev)->pr_generation++ :
+			core_scsi3_pr_generation(dev);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
+	pr_reg->pr_reg_deve->def_pr_registered = 1;
+
+	__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
+	 */
+	if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
+		return;
+	/*
+	 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
+	 * allocated in __core_scsi3_alloc_registration()
+	 */
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+
+		pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		list_add_tail(&pr_reg_tmp->pr_reg_list,
+			      &pr_tmpl->registration_list);
+		pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
+
+		__core_scsi3_dump_registration(tfo, dev,
+				pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
+				register_type);
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Drop configfs group dependency reference from
+		 * __core_scsi3_alloc_registration()
+		 */
+		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+	}
+}
+
+static int core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl,
+	int register_type,
+	int register_move)
+{
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
+			sa_res_key, all_tg_pt, aptpl);
+	if (!(pr_reg))
+		return -1;
+
+	__core_scsi3_add_registration(dev, nacl, pr_reg,
+			register_type, register_move);
+	return 0;
+}
+
+static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	unsigned char *isid)
+{
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct se_portal_group *tpg;
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+		/*
+		 * First look for a matching struct se_node_acl
+		 */
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		/*
+		 * If this registration does NOT contain a fabric provided
+		 * ISID, then we have found a match.
+		 */
+		if (!(pr_reg->isid_present_at_reg)) {
+			/*
+			 * Determine if this SCSI device server requires that
+			 * SCSI Intiatior TransportID w/ ISIDs is enforced
+			 * for fabric modules (iSCSI) requiring them.
+			 */
+			if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+				if (DEV_ATTRIB(dev)->enforce_pr_isids)
+					continue;
+			}
+			atomic_inc(&pr_reg->pr_res_holders);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&pr_tmpl->registration_lock);
+			return pr_reg;
+		}
+		/*
+		 * If the *pr_reg contains a fabric defined ISID for multi-value
+		 * SCSI Initiator Port TransportIDs, then we expect a valid
+		 * matching ISID to be provided by the local SCSI Initiator Port.
+		 */
+		if (!(isid))
+			continue;
+		if (strcmp(isid, pr_reg->pr_reg_isid))
+			continue;
+
+		atomic_inc(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&pr_tmpl->registration_lock);
+		return pr_reg;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	return NULL;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_session *sess)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+	unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+
+	if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+		memset(&buf[0], 0, PR_REG_ISID_LEN);
+		TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
+					PR_REG_ISID_LEN);
+		isid_ptr = &buf[0];
+	}
+
+	return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
+}
+
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
+{
+	atomic_dec(&pr_reg->pr_res_holders);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_check_implict_release(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct t10_pr_registration *pr_res_holder;
+	int ret = 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!(pr_res_holder)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return ret;
+	}
+	if (pr_res_holder == pr_reg) {
+		/*
+		 * Perform an implict RELEASE if the registration that
+		 * is being released is holding the reservation.
+		 *
+		 * From spc4r17, section 5.7.11.1:
+		 *
+		 * e) If the I_T nexus is the persistent reservation holder
+		 *    and the persistent reservation is not an all registrants
+		 *    type, then a PERSISTENT RESERVE OUT command with REGISTER
+		 *    service action or REGISTER AND  IGNORE EXISTING KEY
+		 *    service action with the SERVICE ACTION RESERVATION KEY
+		 *    field set to zero (see 5.7.11.3).
+		 */
+		__core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+		ret = 1;
+		/*
+		 * For 'All Registrants' reservation types, all existing
+		 * registrations are still processed as reservation holders
+		 * in core_scsi3_pr_seq_non_holder() after the initial
+		 * reservation holder is implictly released here.
+		 */
+	} else if (pr_reg->pr_reg_all_tg_pt &&
+		  (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
+			  pr_reg->pr_reg_nacl->initiatorname)) &&
+		  (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
+			" UNREGISTER while existing reservation with matching"
+			" key 0x%016Lx is present from another SCSI Initiator"
+			" Port\n", pr_reg->pr_res_key);
+		ret = -1;
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+/*
+ * Called with struct t10_reservation_template->registration_lock held.
+ */
+static void __core_scsi3_free_registration(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int dec_holders)
+{
+	struct target_core_fabric_ops *tfo =
+			pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	pr_reg->pr_reg_deve->def_pr_registered = 0;
+	pr_reg->pr_reg_deve->pr_res_key = 0;
+	list_del(&pr_reg->pr_reg_list);
+	/*
+	 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
+	 * so call core_scsi3_put_pr_reg() to decrement our reference.
+	 */
+	if (dec_holders)
+		core_scsi3_put_pr_reg(pr_reg);
+	/*
+	 * Wait until all reference from any other I_T nexuses for this
+	 * *pr_reg have been released.  Because list_del() is called above,
+	 * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
+	 * count back to zero, and we release *pr_reg.
+	 */
+	while (atomic_read(&pr_reg->pr_res_holders) != 0) {
+		spin_unlock(&pr_tmpl->registration_lock);
+		printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
+				tfo->get_fabric_name());
+		cpu_relax();
+		spin_lock(&pr_tmpl->registration_lock);
+	}
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(),
+		pr_reg->pr_reg_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+	printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n", tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		TRANSPORT(dev)->name);
+	printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
+		pr_reg->pr_res_generation);
+
+	if (!(preempt_and_abort_list)) {
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+		return;
+	}
+	/*
+	 * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
+	 * are released once the ABORT_TASK_SET has completed..
+	 */
+	list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
+}
+
+void core_scsi3_free_pr_reg_from_nacl(
+	struct se_device *dev,
+	struct se_node_acl *nacl)
+{
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+	/*
+	 * If the passed se_node_acl matches the reservation holder,
+	 * release the reservation.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if ((pr_res_holder != NULL) &&
+	    (pr_res_holder->pr_reg_nacl == nacl))
+		__core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Release any registration associated with the struct se_node_acl.
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+}
+
+void core_scsi3_free_all_registrations(
+	struct se_device *dev)
+{
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder != NULL) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+				pr_res_holder, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+		list_del(&pr_reg->pr_reg_aptpl_list);
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+}
+
+static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
+{
+	return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+			&tpg->tpg_group.cg_item);
+}
+
+static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
+{
+	configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+			&tpg->tpg_group.cg_item);
+
+	atomic_dec(&tpg->tpg_pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+
+	if (nacl->dynamic_node_acl)
+		return 0;
+
+	return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+			&nacl->acl_group.cg_item);
+}
+
+static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+
+	if (nacl->dynamic_node_acl) {
+		atomic_dec(&nacl->acl_pr_ref_count);
+		smp_mb__after_atomic_dec();
+		return;
+	}
+
+	configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+			&nacl->acl_group.cg_item);
+
+	atomic_dec(&nacl->acl_pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	if (!(lun_acl))
+		return 0;
+
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+			&lun_acl->se_lun_group.cg_item);
+}
+
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	if (!(lun_acl)) {
+		atomic_dec(&se_deve->pr_ref_count);
+		smp_mb__after_atomic_dec();
+		return;
+	}
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+			&lun_acl->se_lun_group.cg_item);
+
+	atomic_dec(&se_deve->pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_decode_spec_i_port(
+	struct se_cmd *cmd,
+	struct se_portal_group *tpg,
+	unsigned char *l_isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_port *tmp_port;
+	struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_node_acl *dest_node_acl = NULL;
+	struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
+	struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	struct list_head tid_dest_list;
+	struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
+	struct target_core_fabric_ops *tmp_tf_ops;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
+	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+	u32 tpdl, tid_len = 0;
+	int ret, dest_local_nexus, prf_isid;
+	u32 dest_rtpi = 0;
+
+	memset(dest_iport, 0, 64);
+	INIT_LIST_HEAD(&tid_dest_list);
+
+	local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Allocate a struct pr_transport_id_holder and setup the
+	 * local_node_acl and local_se_deve pointers and add to
+	 * struct list_head tid_dest_list for add registration
+	 * processing in the loop of tid_dest_list below.
+	 */
+	tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
+	if (!(tidh_new)) {
+		printk(KERN_ERR "Unable to allocate tidh_new\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	INIT_LIST_HEAD(&tidh_new->dest_list);
+	tidh_new->dest_tpg = tpg;
+	tidh_new->dest_node_acl = se_sess->se_node_acl;
+	tidh_new->dest_se_deve = local_se_deve;
+
+	local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+				se_sess->se_node_acl, local_se_deve, l_isid,
+				sa_res_key, all_tg_pt, aptpl);
+	if (!(local_pr_reg)) {
+		kfree(tidh_new);
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	tidh_new->dest_pr_reg = local_pr_reg;
+	/*
+	 * The local I_T nexus does not hold any configfs dependances,
+	 * so we set tid_h->dest_local_nexus=1 to prevent the
+	 * configfs_undepend_item() calls in the tid_dest_list loops below.
+	 */
+	tidh_new->dest_local_nexus = 1;
+	list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+	/*
+	 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
+	 * first extract TransportID Parameter Data Length, and make sure
+	 * the value matches up to the SCSI expected data transfer length.
+	 */
+	tpdl = (buf[24] & 0xff) << 24;
+	tpdl |= (buf[25] & 0xff) << 16;
+	tpdl |= (buf[26] & 0xff) << 8;
+	tpdl |= buf[27] & 0xff;
+
+	if ((tpdl + 28) != cmd->data_length) {
+		printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+			" does not equal CDB data_length: %u\n", tpdl,
+			cmd->data_length);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	/*
+	 * Start processing the received transport IDs using the
+	 * receiving I_T Nexus portal's fabric dependent methods to
+	 * obtain the SCSI Initiator Port/Device Identifiers.
+	 */
+	ptr = &buf[28];
+
+	while (tpdl > 0) {
+		proto_ident = (ptr[0] & 0x0f);
+		dest_tpg = NULL;
+
+		spin_lock(&dev->se_port_lock);
+		list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
+			tmp_tpg = tmp_port->sep_tpg;
+			if (!(tmp_tpg))
+				continue;
+			tmp_tf_ops = TPG_TFO(tmp_tpg);
+			if (!(tmp_tf_ops))
+				continue;
+			if (!(tmp_tf_ops->get_fabric_proto_ident) ||
+			    !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
+				continue;
+			/*
+			 * Look for the matching proto_ident provided by
+			 * the received TransportID
+			 */
+			tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
+			if (tmp_proto_ident != proto_ident)
+				continue;
+			dest_rtpi = tmp_port->sep_rtpi;
+
+			i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
+					tmp_tpg, (const char *)ptr, &tid_len,
+					&iport_ptr);
+			if (!(i_str))
+				continue;
+
+			atomic_inc(&tmp_tpg->tpg_pr_ref_count);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&dev->se_port_lock);
+
+			ret = core_scsi3_tpg_depend_item(tmp_tpg);
+			if (ret != 0) {
+				printk(KERN_ERR " core_scsi3_tpg_depend_item()"
+					" for tmp_tpg\n");
+				atomic_dec(&tmp_tpg->tpg_pr_ref_count);
+				smp_mb__after_atomic_dec();
+				ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+				goto out;
+			}
+			/*
+			 * Locate the desination initiator ACL to be registered
+			 * from the decoded fabric module specific TransportID
+			 * at *i_str.
+			 */
+			spin_lock_bh(&tmp_tpg->acl_node_lock);
+			dest_node_acl = __core_tpg_get_initiator_node_acl(
+						tmp_tpg, i_str);
+			if (dest_node_acl) {
+				atomic_inc(&dest_node_acl->acl_pr_ref_count);
+				smp_mb__after_atomic_inc();
+			}
+			spin_unlock_bh(&tmp_tpg->acl_node_lock);
+
+			if (!(dest_node_acl)) {
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				spin_lock(&dev->se_port_lock);
+				continue;
+			}
+
+			ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+			if (ret != 0) {
+				printk(KERN_ERR "configfs_depend_item() failed"
+					" for dest_node_acl->acl_group\n");
+				atomic_dec(&dest_node_acl->acl_pr_ref_count);
+				smp_mb__after_atomic_dec();
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+				goto out;
+			}
+
+			dest_tpg = tmp_tpg;
+			printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
+				" %s Port RTPI: %hu\n",
+				TPG_TFO(dest_tpg)->get_fabric_name(),
+				dest_node_acl->initiatorname, dest_rtpi);
+
+			spin_lock(&dev->se_port_lock);
+			break;
+		}
+		spin_unlock(&dev->se_port_lock);
+
+		if (!(dest_tpg)) {
+			printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
+					" dest_tpg\n");
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+#if 0
+		printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+			" tid_len: %d for %s + %s\n",
+			TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
+			tpdl, tid_len, i_str, iport_ptr);
+#endif
+		if (tid_len > tpdl) {
+			printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+				" %u for Transport ID: %s\n", tid_len, ptr);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		/*
+		 * Locate the desintation struct se_dev_entry pointer for matching
+		 * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
+		 * Target Port.
+		 */
+		dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
+					dest_rtpi);
+		if (!(dest_se_deve)) {
+			printk(KERN_ERR "Unable to locate %s dest_se_deve"
+				" from destination RTPI: %hu\n",
+				TPG_TFO(dest_tpg)->get_fabric_name(),
+				dest_rtpi);
+
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+
+		ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+		if (ret < 0) {
+			printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
+					" failed\n");
+			atomic_dec(&dest_se_deve->pr_ref_count);
+			smp_mb__after_atomic_dec();
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+			goto out;
+		}
+#if 0
+		printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+			" dest_se_deve mapped_lun: %u\n",
+			TPG_TFO(dest_tpg)->get_fabric_name(),
+			dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
+#endif
+		/*
+		 * Skip any TransportIDs that already have a registration for
+		 * this target port.
+		 */
+		pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+		if (pr_reg_e) {
+			core_scsi3_put_pr_reg(pr_reg_e);
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ptr += tid_len;
+			tpdl -= tid_len;
+			tid_len = 0;
+			continue;
+		}
+		/*
+		 * Allocate a struct pr_transport_id_holder and setup
+		 * the dest_node_acl and dest_se_deve pointers for the
+		 * loop below.
+		 */
+		tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
+				GFP_KERNEL);
+		if (!(tidh_new)) {
+			printk(KERN_ERR "Unable to allocate tidh_new\n");
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+			goto out;
+		}
+		INIT_LIST_HEAD(&tidh_new->dest_list);
+		tidh_new->dest_tpg = dest_tpg;
+		tidh_new->dest_node_acl = dest_node_acl;
+		tidh_new->dest_se_deve = dest_se_deve;
+
+		/*
+		 * Allocate, but do NOT add the registration for the
+		 * TransportID referenced SCSI Initiator port.  This
+		 * done because of the following from spc4r17 in section
+		 * 6.14.3 wrt SPEC_I_PT:
+		 *
+		 * "If a registration fails for any initiator port (e.g., if th
+		 * logical unit does not have enough resources available to
+		 * hold the registration information), no registrations shall be
+		 * made, and the command shall be terminated with
+		 * CHECK CONDITION status."
+		 *
+		 * That means we call __core_scsi3_alloc_registration() here,
+		 * and then call __core_scsi3_add_registration() in the
+		 * 2nd loop which will never fail.
+		 */
+		dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+				dest_node_acl, dest_se_deve, iport_ptr,
+				sa_res_key, all_tg_pt, aptpl);
+		if (!(dest_pr_reg)) {
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			kfree(tidh_new);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		tidh_new->dest_pr_reg = dest_pr_reg;
+		list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+		ptr += tid_len;
+		tpdl -= tid_len;
+		tid_len = 0;
+
+	}
+	/*
+	 * Go ahead and create a registrations from tid_dest_list for the
+	 * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
+	 * and dest_se_deve.
+	 *
+	 * The SA Reservation Key from the PROUT is set for the
+	 * registration, and ALL_TG_PT is also passed.  ALL_TG_PT=1
+	 * means that the TransportID Initiator port will be
+	 * registered on all of the target ports in the SCSI target device
+	 * ALL_TG_PT=0 means the registration will only be for the
+	 * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
+	 * was received.
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+		dest_local_nexus = tidh->dest_local_nexus;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
+						PR_REG_ISID_ID_LEN);
+
+		__core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
+					dest_pr_reg, 0, 0);
+
+		printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
+			" registered Transport ID for Node: %s%s Mapped LUN:"
+			" %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
+			dest_node_acl->initiatorname, (prf_isid) ?
+			&i_buf[0] : "", dest_se_deve->mapped_lun);
+
+		if (dest_local_nexus)
+			continue;
+
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+
+	return 0;
+out:
+	/*
+	 * For the failure case, release everything from tid_dest_list
+	 * including *dest_pr_reg and the configfs dependances..
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+		dest_local_nexus = tidh->dest_local_nexus;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+		/*
+		 * Release any extra ALL_TG_PT=1 registrations for
+		 * the SPEC_I_PT=1 case.
+		 */
+		list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+				&dest_pr_reg->pr_reg_atp_list,
+				pr_reg_atp_mem_list) {
+			list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+			core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+			kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+		}
+
+		kfree(dest_pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
+
+		if (dest_local_nexus)
+			continue;
+
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+	return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held
+ */
+static int __core_scsi3_update_aptpl_buf(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len,
+	int clear_aptpl_metadata)
+{
+	struct se_lun *lun;
+	struct se_portal_group *tpg;
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct t10_pr_registration *pr_reg;
+	unsigned char tmp[512], isid_buf[32];
+	ssize_t len = 0;
+	int reg_count = 0;
+
+	memset(buf, 0, pr_aptpl_buf_len);
+	/*
+	 * Called to clear metadata once APTPL has been deactivated.
+	 */
+	if (clear_aptpl_metadata) {
+		snprintf(buf, pr_aptpl_buf_len,
+				"No Registrations or Reservations\n");
+		return 0;
+	}
+	/*
+	 * Walk the registration list..
+	 */
+	spin_lock(&T10_RES(su_dev)->registration_lock);
+	list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+			pr_reg_list) {
+
+		tmp[0] = '\0';
+		isid_buf[0] = '\0';
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		lun = pr_reg->pr_reg_tg_pt_lun;
+		/*
+		 * Write out any ISID value to APTPL metadata that was included
+		 * in the original registration.
+		 */
+		if (pr_reg->isid_present_at_reg)
+			snprintf(isid_buf, 32, "initiator_sid=%s\n",
+					pr_reg->pr_reg_isid);
+		/*
+		 * Include special metadata if the pr_reg matches the
+		 * reservation holder.
+		 */
+		if (dev->dev_pr_res_holder == pr_reg) {
+			snprintf(tmp, 512, "PR_REG_START: %d"
+				"\ninitiator_fabric=%s\n"
+				"initiator_node=%s\n%s"
+				"sa_res_key=%llu\n"
+				"res_holder=1\nres_type=%02x\n"
+				"res_scope=%02x\nres_all_tg_pt=%d\n"
+				"mapped_lun=%u\n", reg_count,
+				TPG_TFO(tpg)->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_res_type,
+				pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		} else {
+			snprintf(tmp, 512, "PR_REG_START: %d\n"
+				"initiator_fabric=%s\ninitiator_node=%s\n%s"
+				"sa_res_key=%llu\nres_holder=0\n"
+				"res_all_tg_pt=%d\nmapped_lun=%u\n",
+				reg_count, TPG_TFO(tpg)->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		}
+
+		if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+			printk(KERN_ERR "Unable to update renaming"
+				" APTPL metadata\n");
+			spin_unlock(&T10_RES(su_dev)->registration_lock);
+			return -1;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+
+		/*
+		 * Include information about the associated SCSI target port.
+		 */
+		snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
+			"tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
+			" %d\n", TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			TPG_TFO(tpg)->tpg_get_tag(tpg),
+			lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+
+		if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+			printk(KERN_ERR "Unable to update renaming"
+				" APTPL metadata\n");
+			spin_unlock(&T10_RES(su_dev)->registration_lock);
+			return -1;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+		reg_count++;
+	}
+	spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+	if (!(reg_count))
+		len += sprintf(buf+len, "No Registrations or Reservations");
+
+	return 0;
+}
+
+static int core_scsi3_update_aptpl_buf(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len,
+	int clear_aptpl_metadata)
+{
+	int ret;
+
+	spin_lock(&dev->dev_reservation_lock);
+	ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+				clear_aptpl_metadata);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+/*
+ * Called with struct se_device->aptpl_file_mutex held
+ */
+static int __core_scsi3_write_aptpl_to_file(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len)
+{
+	struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
+	struct file *file;
+	struct iovec iov[1];
+	mm_segment_t old_fs;
+	int flags = O_RDWR | O_CREAT | O_TRUNC;
+	char path[512];
+	int ret;
+
+	memset(iov, 0, sizeof(struct iovec));
+	memset(path, 0, 512);
+
+	if (strlen(&wwn->unit_serial[0]) > 512) {
+		printk(KERN_ERR "WWN value for struct se_device does not fit"
+			" into path buffer\n");
+		return -1;
+	}
+
+	snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+	file = filp_open(path, flags, 0600);
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		printk(KERN_ERR "filp_open(%s) for APTPL metadata"
+			" failed\n", path);
+		return -1;
+	}
+
+	iov[0].iov_base = &buf[0];
+	if (!(pr_aptpl_buf_len))
+		iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
+	else
+		iov[0].iov_len = pr_aptpl_buf_len;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+	set_fs(old_fs);
+
+	if (ret < 0) {
+		printk("Error writing APTPL metadata file: %s\n", path);
+		filp_close(file, NULL);
+		return -1;
+	}
+	filp_close(file, NULL);
+
+	return 0;
+}
+
+static int core_scsi3_update_and_write_aptpl(
+	struct se_device *dev,
+	unsigned char *in_buf,
+	u32 in_pr_aptpl_buf_len)
+{
+	unsigned char null_buf[64], *buf;
+	u32 pr_aptpl_buf_len;
+	int ret, clear_aptpl_metadata = 0;
+	/*
+	 * Can be called with a NULL pointer from PROUT service action CLEAR
+	 */
+	if (!(in_buf)) {
+		memset(null_buf, 0, 64);
+		buf = &null_buf[0];
+		/*
+		 * This will clear the APTPL metadata to:
+		 * "No Registrations or Reservations" status
+		 */
+		pr_aptpl_buf_len = 64;
+		clear_aptpl_metadata = 1;
+	} else {
+		buf = in_buf;
+		pr_aptpl_buf_len = in_pr_aptpl_buf_len;
+	}
+
+	ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+				clear_aptpl_metadata);
+	if (ret != 0)
+		return -1;
+	/*
+	 * __core_scsi3_write_aptpl_to_file() will call strlen()
+	 * on the passed buf to determine pr_aptpl_buf_len.
+	 */
+	ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
+	if (ret != 0)
+		return -1;
+
+	return ret;
+}
+
+static int core_scsi3_emulate_pro_register(
+	struct se_cmd *cmd,
+	u64 res_key,
+	u64 sa_res_key,
+	int aptpl,
+	int all_tg_pt,
+	int spec_i_pt,
+	int ignore_key)
+{
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *se_deve;
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	/* Used for APTPL metadata w/ UNREGISTER */
+	unsigned char *pr_aptpl_buf = NULL;
+	unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+	int pr_holder = 0, ret = 0, type;
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	se_tpg = se_sess->se_tpg;
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+
+	if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+		memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
+		TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
+				PR_REG_ISID_LEN);
+		isid_ptr = &isid_buf[0];
+	}
+	/*
+	 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
+	 */
+	pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!(pr_reg_e)) {
+		if (res_key) {
+			printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
+				" for SA REGISTER, returning CONFLICT\n");
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * Do nothing but return GOOD status.
+		 */
+		if (!(sa_res_key))
+			return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+
+		if (!(spec_i_pt)) {
+			/*
+			 * Perform the Service Action REGISTER on the Initiator
+			 * Port Endpoint that the PRO was received from on the
+			 * Logical Unit of the SCSI device server.
+			 */
+			ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+					se_sess->se_node_acl, se_deve, isid_ptr,
+					sa_res_key, all_tg_pt, aptpl,
+					ignore_key, 0);
+			if (ret != 0) {
+				printk(KERN_ERR "Unable to allocate"
+					" struct t10_pr_registration\n");
+				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			}
+		} else {
+			/*
+			 * Register both the Initiator port that received
+			 * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
+			 * TransportID from Parameter list and loop through
+			 * fabric dependent parameter list while calling
+			 * logic from of core_scsi3_alloc_registration() for
+			 * each TransportID provided SCSI Initiator Port/Device
+			 */
+			ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
+					isid_ptr, sa_res_key, all_tg_pt, aptpl);
+			if (ret != 0)
+				return ret;
+		}
+		/*
+		 * Nothing left to do for the APTPL=0 case.
+		 */
+		if (!(aptpl)) {
+			pr_tmpl->pr_aptpl_active = 0;
+			core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+			printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+					" REGISTER\n");
+			return 0;
+		}
+		/*
+		 * Locate the newly allocated local I_T Nexus *pr_reg, and
+		 * update the APTPL metadata information using its
+		 * preallocated *pr_reg->pr_aptpl_buf.
+		 */
+		pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+				se_sess->se_node_acl, se_sess);
+
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret)) {
+			pr_tmpl->pr_aptpl_active = 1;
+			printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+		}
+
+		core_scsi3_put_pr_reg(pr_reg);
+		return ret;
+	} else {
+		/*
+		 * Locate the existing *pr_reg via struct se_node_acl pointers
+		 */
+		pr_reg = pr_reg_e;
+		type = pr_reg->pr_res_type;
+
+		if (!(ignore_key)) {
+			if (res_key != pr_reg->pr_res_key) {
+				printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+					" res_key: 0x%016Lx does not match"
+					" existing SA REGISTER res_key:"
+					" 0x%016Lx\n", res_key,
+					pr_reg->pr_res_key);
+				core_scsi3_put_pr_reg(pr_reg);
+				return PYX_TRANSPORT_RESERVATION_CONFLICT;
+			}
+		}
+		if (spec_i_pt) {
+			printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
+				" set while sa_res_key=0\n");
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		}
+		/*
+		 * An existing ALL_TG_PT=1 registration being released
+		 * must also set ALL_TG_PT=1 in the incoming PROUT.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
+			printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+				" registration exists, but ALL_TG_PT=1 bit not"
+				" present in received PROUT\n");
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_INVALID_CDB_FIELD;
+		}
+		/*
+		 * Allocate APTPL metadata buffer used for UNREGISTER ops
+		 */
+		if (aptpl) {
+			pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
+						GFP_KERNEL);
+			if (!(pr_aptpl_buf)) {
+				printk(KERN_ERR "Unable to allocate"
+					" pr_aptpl_buf\n");
+				core_scsi3_put_pr_reg(pr_reg);
+				return PYX_TRANSPORT_LU_COMM_FAILURE;
+			}
+		}
+		/*
+		 * sa_res_key=0 Unregister Reservation Key for registered I_T
+		 * Nexus sa_res_key=1 Change Reservation Key for registered I_T
+		 * Nexus.
+		 */
+		if (!(sa_res_key)) {
+			pr_holder = core_scsi3_check_implict_release(
+					SE_DEV(cmd), pr_reg);
+			if (pr_holder < 0) {
+				kfree(pr_aptpl_buf);
+				core_scsi3_put_pr_reg(pr_reg);
+				return PYX_TRANSPORT_RESERVATION_CONFLICT;
+			}
+
+			spin_lock(&pr_tmpl->registration_lock);
+			/*
+			 * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+			 * and matching pr_res_key.
+			 */
+			if (pr_reg->pr_reg_all_tg_pt) {
+				list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+						&pr_tmpl->registration_list,
+						pr_reg_list) {
+
+					if (!(pr_reg_p->pr_reg_all_tg_pt))
+						continue;
+
+					if (pr_reg_p->pr_res_key != res_key)
+						continue;
+
+					if (pr_reg == pr_reg_p)
+						continue;
+
+					if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+						   pr_reg_p->pr_reg_nacl->initiatorname))
+						continue;
+
+					__core_scsi3_free_registration(dev,
+							pr_reg_p, NULL, 0);
+				}
+			}
+			/*
+			 * Release the calling I_T Nexus registration now..
+			 */
+			__core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
+							NULL, 1);
+			/*
+			 * From spc4r17, section 5.7.11.3 Unregistering
+			 *
+			 * If the persistent reservation is a registrants only
+			 * type, the device server shall establish a unit
+			 * attention condition for the initiator port associated
+			 * with every registered I_T nexus except for the I_T
+			 * nexus on which the PERSISTENT RESERVE OUT command was
+			 * received, with the additional sense code set to
+			 * RESERVATIONS RELEASED.
+			 */
+			if (pr_holder &&
+			   ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+			    (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
+				list_for_each_entry(pr_reg_p,
+						&pr_tmpl->registration_list,
+						pr_reg_list) {
+
+					core_scsi3_ua_allocate(
+						pr_reg_p->pr_reg_nacl,
+						pr_reg_p->pr_res_mapped_lun,
+						0x2A,
+						ASCQ_2AH_RESERVATIONS_RELEASED);
+				}
+			}
+			spin_unlock(&pr_tmpl->registration_lock);
+
+			if (!(aptpl)) {
+				pr_tmpl->pr_aptpl_active = 0;
+				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+				printk("SPC-3 PR: Set APTPL Bit Deactivated"
+						" for UNREGISTER\n");
+				return 0;
+			}
+
+			ret = core_scsi3_update_and_write_aptpl(dev,
+					&pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!(ret)) {
+				pr_tmpl->pr_aptpl_active = 1;
+				printk("SPC-3 PR: Set APTPL Bit Activated"
+						" for UNREGISTER\n");
+			}
+
+			kfree(pr_aptpl_buf);
+			return ret;
+		} else {
+			/*
+			 * Increment PRgeneration counter for struct se_device"
+			 * upon a successful REGISTER, see spc4r17 section 6.3.2
+			 * READ_KEYS service action.
+			 */
+			pr_reg->pr_res_generation = core_scsi3_pr_generation(
+							SE_DEV(cmd));
+			pr_reg->pr_res_key = sa_res_key;
+			printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+				" Key for %s to: 0x%016Lx PRgeneration:"
+				" 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
+				(ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
+				pr_reg->pr_reg_nacl->initiatorname,
+				pr_reg->pr_res_key, pr_reg->pr_res_generation);
+
+			if (!(aptpl)) {
+				pr_tmpl->pr_aptpl_active = 0;
+				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+				core_scsi3_put_pr_reg(pr_reg);
+				printk("SPC-3 PR: Set APTPL Bit Deactivated"
+						" for REGISTER\n");
+				return 0;
+			}
+
+			ret = core_scsi3_update_and_write_aptpl(dev,
+					&pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!(ret)) {
+				pr_tmpl->pr_aptpl_active = 1;
+				printk("SPC-3 PR: Set APTPL Bit Activated"
+						" for REGISTER\n");
+			}
+
+			kfree(pr_aptpl_buf);
+			core_scsi3_put_pr_reg(pr_reg);
+		}
+	}
+	return 0;
+}
+
+unsigned char *core_scsi3_pr_dump_type(int type)
+{
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		return "Write Exclusive Access";
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		return "Exclusive Access";
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		return "Write Exclusive Access, Registrants Only";
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		return "Exclusive Access, Registrants Only";
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		return "Write Exclusive Access, All Registrants";
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		return "Exclusive Access, All Registrants";
+	default:
+		break;
+	}
+
+	return "Unknown SPC-3 PR Type";
+}
+
+static int core_scsi3_pro_reserve(
+	struct se_cmd *cmd,
+	struct se_device *dev,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_dev_entry *se_deve;
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_res_holder;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int ret, prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	se_tpg = se_sess->se_tpg;
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+				se_sess);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RESERVE\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * An application client creates a persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RESERVE service action through
+	 * a registered I_T nexus with the following parameters:
+	 *    a) RESERVATION KEY set to the value of the reservation key that is
+	 * 	 registered with the logical unit for the I_T nexus; and
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * From above:
+	 *  b) TYPE field and SCOPE field set to the persistent reservation
+	 *     being created.
+	 *
+	 * Only one persistent reservation is allowed at a time per logical unit
+	 * and that persistent reservation has a scope of LU_SCOPE.
+	 */
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * See if we have an existing PR reservation holder pointer at
+	 * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
+	 * *pr_res_holder.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if ((pr_res_holder)) {
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command from an I_T nexus other than a persistent reservation
+		 * holder (see 5.7.10) that attempts to create a persistent
+		 * reservation when a persistent reservation already exists for
+		 * the logical unit, then the command shall be completed with
+		 * RESERVATION CONFLICT status.
+		 */
+		if (pr_res_holder != pr_reg) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s while reservation already held by"
+				" [%s]: %s, returning RESERVATION_CONFLICT\n",
+				CMD_TFO(cmd)->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If a persistent reservation holder attempts to modify the
+		 * type or scope of an existing persistent reservation, the
+		 * command shall be completed with RESERVATION CONFLICT status.
+		 */
+		if ((pr_res_holder->pr_res_type != type) ||
+		    (pr_res_holder->pr_res_scope != scope)) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s trying to change TYPE and/or SCOPE,"
+				" while reservation already held by [%s]: %s,"
+				" returning RESERVATION_CONFLICT\n",
+				CMD_TFO(cmd)->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command with RESERVE service action where the TYPE field and
+		 * the SCOPE field contain the same values as the existing type
+		 * and scope from a persistent reservation holder, it shall not
+		 * make any change to the existing persistent reservation and
+		 * shall completethe command with GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+	}
+	/*
+	 * Otherwise, our *pr_reg becomes the PR reservation holder for said
+	 * TYPE/SCOPE.  Also set the received scope and type in *pr_reg.
+	 */
+	pr_reg->pr_res_scope = scope;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_holder = 1;
+	dev->dev_pr_res_holder = pr_reg;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+			CMD_TFO(cmd)->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			(prf_isid) ? &i_buf[0] : "");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+					" for RESERVE\n");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg);
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_reserve(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	int ret = 0;
+
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
+		break;
+	default:
+		printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
+			" 0x%02x\n", type);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_release(
+	struct se_device *dev,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int explict)
+{
+	struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Go ahead and release the current PR reservation holder.
+	 */
+	dev->dev_pr_res_holder = NULL;
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
+		tfo->get_fabric_name(), se_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+	/*
+	 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
+	 */
+	pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
+}
+
+static int core_scsi3_emulate_pro_release(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	int ret, all_reg = 0;
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RELEASE\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * If there is no persistent reservation or in response to a persistent
+	 * reservation release request from a registered I_T nexus that is not a
+	 * persistent reservation holder (see 5.7.10), the device server shall
+	 * do the following:
+	 *
+	 *     a) Not release the persistent reservation, if any;
+	 *     b) Not remove any registrations; and
+	 *     c) Complete the command with GOOD status.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!(pr_res_holder)) {
+		/*
+		 * No persistent reservation, return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+	}
+	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+		all_reg = 1;
+
+	if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
+		/*
+		 * Non 'All Registrants' PR Type cases..
+		 * Release request from a registered I_T nexus that is not a
+		 * persistent reservation holder. return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * Only the persistent reservation holder (see 5.7.10) is allowed to
+	 * release a persistent reservation.
+	 *
+	 * An application client releases the persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RELEASE service action through
+	 * an I_T nexus that is a persistent reservation holder with the
+	 * following parameters:
+	 *
+	 *     a) RESERVATION KEY field set to the value of the reservation key
+	 *	  that is registered with the logical unit for the I_T nexus;
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing and above:
+	 *
+	 * b) TYPE field and SCOPE field set to match the persistent
+	 *    reservation being released.
+	 */
+	if ((pr_res_holder->pr_res_type != type) ||
+	    (pr_res_holder->pr_res_scope != scope)) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
+			" reservation from [%s]: %s with different TYPE "
+			"and/or SCOPE  while reservation already held by"
+			" [%s]: %s, returning RESERVATION_CONFLICT\n",
+			CMD_TFO(cmd)->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+			pr_res_holder->pr_reg_nacl->initiatorname);
+
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * In response to a persistent reservation release request from the
+	 * persistent reservation holder the device server shall perform a
+	 * release by doing the following as an uninterrupted series of actions:
+	 * a) Release the persistent reservation;
+	 * b) Not remove any registration(s);
+	 * c) If the released persistent reservation is a registrants only type
+	 * or all registrants type persistent reservation,
+	 *    the device server shall establish a unit attention condition for
+	 *    the initiator port associated with every regis-
+	 *    tered I_T nexus other than I_T nexus on which the PERSISTENT
+	 *    RESERVE OUT command with RELEASE service action was received,
+	 *    with the additional sense code set to RESERVATIONS RELEASED; and
+	 * d) If the persistent reservation is of any other type, the device
+	 *    server shall not establish a unit attention condition.
+	 */
+	__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
+			pr_reg, 1);
+
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
+	    (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		/*
+		 * If no UNIT ATTENTION conditions will be established for
+		 * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
+		 * go ahead and check for APTPL=1 update+write below
+		 */
+		goto write_aptpl;
+	}
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
+			pr_reg_list) {
+		/*
+		 * Do not establish a UNIT ATTENTION condition
+		 * for the calling I_T Nexus
+		 */
+		if (pr_reg_p == pr_reg)
+			continue;
+
+		core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
+				pr_reg_p->pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+write_aptpl:
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg);
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_clear(
+	struct se_cmd *cmd,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	u32 pr_res_mapped_lun = 0;
+	int calling_it_nexus = 0;
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+			se_sess->se_node_acl, se_sess);
+	if (!(pr_reg_n)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for CLEAR\n");
+			return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * From spc4r17 section 5.7.11.6, Clearing:
+	 *
+	 * Any application client may release the persistent reservation and
+	 * remove all registrations from a device server by issuing a
+	 * PERSISTENT RESERVE OUT command with CLEAR service action through a
+	 * registered I_T nexus with the following parameter:
+	 *
+	 *	a) RESERVATION KEY field set to the value of the reservation key
+	 * 	   that is registered with the logical unit for the I_T nexus.
+	 */
+	if (res_key != pr_reg_n->pr_res_key) {
+		printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+			" res_key: 0x%016Lx does not match"
+			" existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * a) Release the persistent reservation, if any;
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+			pr_res_holder, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * b) Remove all registration(s) (see spc4r17 5.7.7);
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg, NULL,
+					calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every registered I_T nexus other
+		 *    than the I_T nexus on which the PERSISTENT RESERVE OUT
+		 *    command with CLEAR service action was received, with the
+		 *    additional sense code set to RESERVATIONS PREEMPTED.
+		 */
+		if (!(calling_it_nexus))
+			core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
+		CMD_TFO(cmd)->get_fabric_name());
+
+	if (pr_tmpl->pr_aptpl_active) {
+		core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+		printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+				" for CLEAR\n");
+	}
+
+	core_scsi3_pr_generation(dev);
+	return 0;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_preempt(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int type,
+	int scope,
+	int abort)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Do an implict RELEASE of the existing reservation.
+	 */
+	if (dev->dev_pr_res_holder)
+		__core_scsi3_complete_pro_release(dev, nacl,
+				dev->dev_pr_res_holder, 0);
+
+	dev->dev_pr_res_holder = pr_reg;
+	pr_reg->pr_res_holder = 1;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+		core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+		nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+	/*
+	 * For PREEMPT_AND_ABORT, add the preempting reservation's
+	 * struct t10_pr_registration to the list that will be compared
+	 * against received CDBs..
+	 */
+	if (preempt_and_abort_list)
+		list_add_tail(&pr_reg->pr_reg_abort_list,
+				preempt_and_abort_list);
+}
+
+static void core_scsi3_release_preempt_and_abort(
+	struct list_head *preempt_and_abort_list,
+	struct t10_pr_registration *pr_reg_holder)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+				pr_reg_abort_list) {
+
+		list_del(&pr_reg->pr_reg_abort_list);
+		if (pr_reg_holder == pr_reg)
+			continue;
+		if (pr_reg->pr_res_holder) {
+			printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
+			continue;
+		}
+
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+}
+
+int core_scsi3_check_cdb_abort_and_preempt(
+	struct list_head *preempt_and_abort_list,
+	struct se_cmd *cmd)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+				pr_reg_abort_list) {
+		if (pr_reg->pr_res_key == cmd->pr_res_key)
+			return 0;
+	}
+
+	return 1;
+}
+
+static int core_scsi3_pro_preempt(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key,
+	u64 sa_res_key,
+	int abort)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *se_deve;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct list_head preempt_and_abort_list;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	u32 pr_res_mapped_lun = 0;
+	int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
+	int prh_type = 0, prh_scope = 0, ret;
+
+	if (!(se_sess))
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+				se_sess);
+	if (!(pr_reg_n)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for PREEMPT%s\n",
+			(abort) ? "_AND_ABORT" : "");
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	if (pr_reg_n->pr_res_key != res_key) {
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	INIT_LIST_HEAD(&preempt_and_abort_list);
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder &&
+	   ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
+		all_reg = 1;
+
+	if (!(all_reg) && !(sa_res_key)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
+	 *
+	 * If the SERVICE ACTION RESERVATION KEY field does not identify a
+	 * persistent reservation holder or there is no persistent reservation
+	 * holder (i.e., there is no persistent reservation), then the device
+	 * server shall perform a preempt by doing the following in an
+	 * uninterrupted series of actions. (See below..)
+	 */
+	if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
+		/*
+		 * No existing or SA Reservation Key matching reservations..
+		 *
+		 * PROUT SA PREEMPT with All Registrant type reservations are
+		 * allowed to be processed without a matching SA Reservation Key
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+			/*
+			 * Removing of registrations in non all registrants
+			 * type reservations without a matching SA reservation
+			 * key.
+			 *
+			 * a) Remove the registrations for all I_T nexuses
+			 *    specified by the SERVICE ACTION RESERVATION KEY
+			 *    field;
+			 * b) Ignore the contents of the SCOPE and TYPE fields;
+			 * c) Process tasks as defined in 5.7.1; and
+			 * d) Establish a unit attention condition for the
+			 *    initiator port associated with every I_T nexus
+			 *    that lost its registration other than the I_T
+			 *    nexus on which the PERSISTENT RESERVE OUT command
+			 *    was received, with the additional sense code set
+			 *    to REGISTRATIONS PREEMPTED.
+			 */
+			if (!(all_reg)) {
+				if (pr_reg->pr_res_key != sa_res_key)
+					continue;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(abort) ? &preempt_and_abort_list :
+						NULL, calling_it_nexus);
+				released_regs++;
+			} else {
+				/*
+				 * Case for any existing all registrants type
+				 * reservation, follow logic in spc4r17 section
+				 * 5.7.11.4 Preempting, Table 52 and Figure 7.
+				 *
+				 * For a ZERO SA Reservation key, release
+				 * all other registrations and do an implict
+				 * release of active persistent reservation.
+				 *
+				 * For a non-ZERO SA Reservation key, only
+				 * release the matching reservation key from
+				 * registrations.
+				 */
+				if ((sa_res_key) &&
+				     (pr_reg->pr_res_key != sa_res_key))
+					continue;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				if (calling_it_nexus)
+					continue;
+
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(abort) ? &preempt_and_abort_list :
+						NULL, 0);
+				released_regs++;
+			}
+			if (!(calling_it_nexus))
+				core_scsi3_ua_allocate(pr_reg_nacl,
+					pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_RESERVATIONS_PREEMPTED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
+		 * a PREEMPT AND ABORT service action sets the SERVICE ACTION
+		 * RESERVATION KEY field to a value that does not match any
+		 * registered reservation key, then the device server shall
+		 * complete the command with RESERVATION CONFLICT status.
+		 */
+		if (!(released_regs)) {
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg_n);
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * For an existing all registrants type reservation
+		 * with a zero SA rservation key, preempt the existing
+		 * reservation with the new PR type and scope.
+		 */
+		if (pr_res_holder && all_reg && !(sa_res_key)) {
+			__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+				(abort) ? &preempt_and_abort_list : NULL,
+				type, scope, abort);
+
+			if (abort)
+				core_scsi3_release_preempt_and_abort(
+					&preempt_and_abort_list, pr_reg_n);
+		}
+		spin_unlock(&dev->dev_reservation_lock);
+
+		if (pr_tmpl->pr_aptpl_active) {
+			ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+					&pr_reg_n->pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!(ret))
+				printk(KERN_INFO "SPC-3 PR: Updated APTPL"
+					" metadata for  PREEMPT%s\n", (abort) ?
+					"_AND_ABORT" : "");
+		}
+
+		core_scsi3_put_pr_reg(pr_reg_n);
+		core_scsi3_pr_generation(SE_DEV(cmd));
+		return 0;
+	}
+	/*
+	 * The PREEMPTing SA reservation key matches that of the
+	 * existing persistent reservation, first, we check if
+	 * we are preempting our own reservation.
+	 * From spc4r17, section 5.7.11.4.3 Preempting
+	 * persistent reservations and registration handling
+	 *
+	 * If an all registrants persistent reservation is not
+	 * present, it is not an error for the persistent
+	 * reservation holder to preempt itself (i.e., a
+	 * PERSISTENT RESERVE OUT with a PREEMPT service action
+	 * or a PREEMPT AND ABORT service action with the
+	 * SERVICE ACTION RESERVATION KEY value equal to the
+	 * persistent reservation holder's reservation key that
+	 * is received from the persistent reservation holder).
+	 * In that case, the device server shall establish the
+	 * new persistent reservation and maintain the
+	 * registration.
+	 */
+	prh_type = pr_res_holder->pr_res_type;
+	prh_scope = pr_res_holder->pr_res_scope;
+	/*
+	 * If the SERVICE ACTION RESERVATION KEY field identifies a
+	 * persistent reservation holder (see 5.7.10), the device
+	 * server shall perform a preempt by doing the following as
+	 * an uninterrupted series of actions:
+	 *
+	 * a) Release the persistent reservation for the holder
+	 *    identified by the SERVICE ACTION RESERVATION KEY field;
+	 */
+	if (pr_reg_n != pr_res_holder)
+		__core_scsi3_complete_pro_release(dev,
+				pr_res_holder->pr_reg_nacl,
+				dev->dev_pr_res_holder, 0);
+	/*
+	 * b) Remove the registrations for all I_T nexuses identified
+	 *    by the SERVICE ACTION RESERVATION KEY field, except the
+	 *    I_T nexus that is being used for the PERSISTENT RESERVE
+	 *    OUT command. If an all registrants persistent reservation
+	 *    is present and the SERVICE ACTION RESERVATION KEY field
+	 *    is set to zero, then all registrations shall be removed
+	 *    except for that of the I_T nexus that is being used for
+	 *    the PERSISTENT RESERVE OUT command;
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		if (calling_it_nexus)
+			continue;
+
+		if (pr_reg->pr_res_key != sa_res_key)
+			continue;
+
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg,
+				(abort) ? &preempt_and_abort_list : NULL,
+				calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every I_T nexus that lost its
+		 *    persistent reservation and/or registration, with the
+		 *    additional sense code set to REGISTRATIONS PREEMPTED;
+		 */
+		core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+				ASCQ_2AH_RESERVATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * c) Establish a persistent reservation for the preempting
+	 *    I_T nexus using the contents of the SCOPE and TYPE fields;
+	 */
+	__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+			(abort) ? &preempt_and_abort_list : NULL,
+			type, scope, abort);
+	/*
+	 * d) Process tasks as defined in 5.7.1;
+	 * e) See above..
+	 * f) If the type or scope has changed, then for every I_T nexus
+	 *    whose reservation key was not removed, except for the I_T
+	 *    nexus on which the PERSISTENT RESERVE OUT command was
+	 *    received, the device server shall establish a unit
+	 *    attention condition for the initiator port associated with
+	 *    that I_T nexus, with the additional sense code set to
+	 *    RESERVATIONS RELEASED. If the type or scope have not
+	 *    changed, then no unit attention condition(s) shall be
+	 *    established for this reason.
+	 */
+	if ((prh_type != type) || (prh_scope != scope)) {
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+
+			calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+			if (calling_it_nexus)
+				continue;
+
+			core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
+					pr_reg->pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_RESERVATIONS_RELEASED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Call LUN_RESET logic upon list of struct t10_pr_registration,
+	 * All received CDBs for the matching existing reservation and
+	 * registrations undergo ABORT_TASK logic.
+	 *
+	 * From there, core_scsi3_release_preempt_and_abort() will
+	 * release every registration in the list (which have already
+	 * been removed from the primary pr_reg list), except the
+	 * new persistent reservation holder, the calling Initiator Port.
+	 */
+	if (abort) {
+		core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
+		core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
+						pr_reg_n);
+	}
+
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg_n->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+				"%s\n", (abort) ? "_AND_ABORT" : "");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg_n);
+	core_scsi3_pr_generation(SE_DEV(cmd));
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_preempt(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key,
+	u64 sa_res_key,
+	int abort)
+{
+	int ret = 0;
+
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		ret = core_scsi3_pro_preempt(cmd, type, scope,
+				res_key, sa_res_key, abort);
+		break;
+	default:
+		printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
+			" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	return ret;
+}
+
+
+static int core_scsi3_emulate_pro_register_and_move(
+	struct se_cmd *cmd,
+	u64 res_key,
+	u64 sa_res_key,
+	int aptpl,
+	int unreg)
+{
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *se_deve, *dest_se_deve = NULL;
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
+	struct se_port *se_port;
+	struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
+	struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+	struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	unsigned char *initiator_str;
+	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+	u32 tid_len, tmp_tid_len;
+	int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+	unsigned short rtpi;
+	unsigned char proto_ident;
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	memset(dest_iport, 0, 64);
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	se_tpg = se_sess->se_tpg;
+	tf_ops = TPG_TFO(se_tpg);
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Follow logic from spc4r17 Section 5.7.8, Table 50 --
+	 *	Register behaviors for a REGISTER AND MOVE service action
+	 *
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+				se_sess);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
+			" *pr_reg for REGISTER_AND_MOVE\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * The provided reservation key much match the existing reservation key
+	 * provided during this initiator's I_T nexus registration.
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
+			" res_key: 0x%016Lx does not match existing SA REGISTER"
+			" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * The service active reservation key needs to be non zero
+	 */
+	if (!(sa_res_key)) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
+			" sa_res_key\n");
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * Determine the Relative Target Port Identifier where the reservation
+	 * will be moved to for the TransportID containing SCSI initiator WWN
+	 * information.
+	 */
+	rtpi = (buf[18] & 0xff) << 8;
+	rtpi |= buf[19] & 0xff;
+	tid_len = (buf[20] & 0xff) << 24;
+	tid_len |= (buf[21] & 0xff) << 16;
+	tid_len |= (buf[22] & 0xff) << 8;
+	tid_len |= buf[23] & 0xff;
+
+	if ((tid_len + 24) != cmd->data_length) {
+		printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+			" does not equal CDB data_length: %u\n", tid_len,
+			cmd->data_length);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
+		if (se_port->sep_rtpi != rtpi)
+			continue;
+		dest_se_tpg = se_port->sep_tpg;
+		if (!(dest_se_tpg))
+			continue;
+		dest_tf_ops = TPG_TFO(dest_se_tpg);
+		if (!(dest_tf_ops))
+			continue;
+
+		atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&dev->se_port_lock);
+
+		ret = core_scsi3_tpg_depend_item(dest_se_tpg);
+		if (ret != 0) {
+			printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
+				" for dest_se_tpg\n");
+			atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
+			smp_mb__after_atomic_dec();
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_LU_COMM_FAILURE;
+		}
+
+		spin_lock(&dev->se_port_lock);
+		break;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	if (!(dest_se_tpg) || (!dest_tf_ops)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" fabric ops from Relative Target Port Identifier:"
+			" %hu\n", rtpi);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	proto_ident = (buf[24] & 0x0f);
+#if 0
+	printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+			" 0x%02x\n", proto_ident);
+#endif
+	if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
+			" proto_ident: 0x%02x does not match ident: 0x%02x"
+			" from fabric: %s\n", proto_ident,
+			dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
+			dest_tf_ops->get_fabric_name());
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+			" containg a valid tpg_parse_pr_out_transport_id"
+			" function pointer\n");
+		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+		goto out;
+	}
+	initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
+			(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
+	if (!(initiator_str)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" initiator_str from Transport ID\n");
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+		" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
+		"port" : "device", initiator_str, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action specifies a TransportID that is the same as the initiator port
+	 * of the I_T nexus for the command received, then the command shall
+	 * be terminated with CHECK CONDITION status, with the sense key set to
+	 * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
+	 * IN PARAMETER LIST.
+	 */
+	pr_reg_nacl = pr_reg->pr_reg_nacl;
+	matching_iname = (!strcmp(initiator_str,
+				  pr_reg_nacl->initiatorname)) ? 1 : 0;
+	if (!(matching_iname))
+		goto after_iport_check;
+
+	if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+			" matches: %s on received I_T Nexus\n", initiator_str,
+			pr_reg_nacl->initiatorname);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+			" matches: %s %s on received I_T Nexus\n",
+			initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
+			pr_reg->pr_reg_isid);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+after_iport_check:
+	/*
+	 * Locate the destination struct se_node_acl from the received Transport ID
+	 */
+	spin_lock_bh(&dest_se_tpg->acl_node_lock);
+	dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
+				initiator_str);
+	if (dest_node_acl) {
+		atomic_inc(&dest_node_acl->acl_pr_ref_count);
+		smp_mb__after_atomic_inc();
+	}
+	spin_unlock_bh(&dest_se_tpg->acl_node_lock);
+
+	if (!(dest_node_acl)) {
+		printk(KERN_ERR "Unable to locate %s dest_node_acl for"
+			" TransportID%s\n", dest_tf_ops->get_fabric_name(),
+			initiator_str);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+	if (ret != 0) {
+		printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
+			" dest_node_acl\n");
+		atomic_dec(&dest_node_acl->acl_pr_ref_count);
+		smp_mb__after_atomic_dec();
+		dest_node_acl = NULL;
+		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+		goto out;
+	}
+#if 0
+	printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+		" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname);
+#endif
+	/*
+	 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
+	 * PORT IDENTIFIER.
+	 */
+	dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
+	if (!(dest_se_deve)) {
+		printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
+			" %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+	if (ret < 0) {
+		printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
+		atomic_dec(&dest_se_deve->pr_ref_count);
+		smp_mb__after_atomic_dec();
+		dest_se_deve = NULL;
+		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+		goto out;
+	}
+#if 0
+	printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+		" ACL for dest_se_deve->mapped_lun: %u\n",
+		dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
+		dest_se_deve->mapped_lun);
+#endif
+	/*
+	 * A persistent reservation needs to already existing in order to
+	 * successfully complete the REGISTER_AND_MOVE service action..
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!(pr_res_holder)) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
+			" currently held\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+		goto out;
+	}
+	/*
+	 * The received on I_T Nexus must be the reservation holder.
+	 *
+	 * From spc4r17 section 5.7.8  Table 50 --
+	 * 	Register behaviors for a REGISTER AND MOVE service action
+	 */
+	if (pr_res_holder != pr_reg) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+			" Nexus is not reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+		goto out;
+	}
+	/*
+	 * From spc4r17 section 5.7.8: registering and moving reservation
+	 *
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action is received and the established persistent reservation is a
+	 * Write Exclusive - All Registrants type or Exclusive Access -
+	 * All Registrants type reservation, then the command shall be completed
+	 * with RESERVATION CONFLICT status.
+	 */
+	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+			" reservation for type: %s\n",
+			core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+		goto out;
+	}
+	pr_res_nacl = pr_res_holder->pr_reg_nacl;
+	/*
+	 * b) Ignore the contents of the (received) SCOPE and TYPE fields;
+	 */
+	type = pr_res_holder->pr_res_type;
+	scope = pr_res_holder->pr_res_type;
+	/*
+	 * c) Associate the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field with the I_T nexus specified as the
+	 *    destination of the register and move, where:
+	 *    A) The I_T nexus is specified by the TransportID and the
+	 *	 RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
+	 *    B) Regardless of the TransportID format used, the association for
+	 *       the initiator port is based on either the initiator port name
+	 *       (see 3.1.71) on SCSI transport protocols where port names are
+	 *       required or the initiator port identifier (see 3.1.70) on SCSI
+	 *       transport protocols where port names are not required;
+	 * d) Register the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field;
+	 * e) Retain the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field and associated information;
+	 *
+	 * Also, It is not an error for a REGISTER AND MOVE service action to
+	 * register an I_T nexus that is already registered with the same
+	 * reservation key or a different reservation key.
+	 */
+	dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+	if (!(dest_pr_reg)) {
+		ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+				dest_node_acl, dest_se_deve, iport_ptr,
+				sa_res_key, 0, aptpl, 2, 1);
+		if (ret != 0) {
+			spin_unlock(&dev->dev_reservation_lock);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+						iport_ptr);
+		new_reg = 1;
+	}
+	/*
+	 * f) Release the persistent reservation for the persistent reservation
+	 *    holder (i.e., the I_T nexus on which the
+	 */
+	__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+			dev->dev_pr_res_holder, 0);
+	/*
+	 * g) Move the persistent reservation to the specified I_T nexus using
+	 *    the same scope and type as the persistent reservation released in
+	 *    item f); and
+	 */
+	dev->dev_pr_res_holder = dest_pr_reg;
+	dest_pr_reg->pr_res_holder = 1;
+	dest_pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Increment PRGeneration for existing registrations..
+	 */
+	if (!(new_reg))
+		dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+		" created new reservation holder TYPE: %s on object RTPI:"
+		" %hu  PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
+		core_scsi3_pr_dump_type(type), rtpi,
+		dest_pr_reg->pr_res_generation);
+	printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
+		" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
+		tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * It is now safe to release configfs group dependencies for destination
+	 * of Transport ID Initiator Device/Port Identifier
+	 */
+	core_scsi3_lunacl_undepend_item(dest_se_deve);
+	core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+	/*
+	 * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
+	 * nexus on which PERSISTENT RESERVE OUT command was received.
+	 */
+	if (unreg) {
+		spin_lock(&pr_tmpl->registration_lock);
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 1);
+		spin_unlock(&pr_tmpl->registration_lock);
+	} else
+		core_scsi3_put_pr_reg(pr_reg);
+
+	/*
+	 * Clear the APTPL metadata if APTPL has been disabled, otherwise
+	 * write out the updated metadata to struct file for this SCSI device.
+	 */
+	if (!(aptpl)) {
+		pr_tmpl->pr_aptpl_active = 0;
+		core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+		printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+				" REGISTER_AND_MOVE\n");
+	} else {
+		pr_tmpl->pr_aptpl_active = 1;
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&dest_pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk("SPC-3 PR: Set APTPL Bit Activated for"
+					" REGISTER_AND_MOVE\n");
+	}
+
+	core_scsi3_put_pr_reg(dest_pr_reg);
+	return 0;
+out:
+	if (dest_se_deve)
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+	if (dest_node_acl)
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+	core_scsi3_put_pr_reg(pr_reg);
+	return ret;
+}
+
+static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
+	__v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * See spc4r17 section 6.14 Table 170
+ */
+static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
+{
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u64 res_key, sa_res_key;
+	int sa, scope, type, aptpl;
+	int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
+	/*
+	 * FIXME: A NULL struct se_session pointer means an this is not coming from
+	 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
+	 */
+	if (!(SE_SESS(cmd)))
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	if (cmd->data_length < 24) {
+		printk(KERN_WARNING "SPC-PR: Recieved PR OUT parameter list"
+			" length too small: %u\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
+	 */
+	sa = (cdb[1] & 0x1f);
+	scope = (cdb[2] & 0xf0);
+	type = (cdb[2] & 0x0f);
+	/*
+	 * From PERSISTENT_RESERVE_OUT parameter list (payload)
+	 */
+	res_key = core_scsi3_extract_reservation_key(&buf[0]);
+	sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
+	/*
+	 * REGISTER_AND_MOVE uses a different SA parameter list containing
+	 * SCSI TransportIDs.
+	 */
+	if (sa != PRO_REGISTER_AND_MOVE) {
+		spec_i_pt = (buf[20] & 0x08);
+		all_tg_pt = (buf[20] & 0x04);
+		aptpl = (buf[20] & 0x01);
+	} else {
+		aptpl = (buf[17] & 0x01);
+		unreg = (buf[17] & 0x02);
+	}
+	/*
+	 * SPEC_I_PT=1 is only valid for Service action: REGISTER
+	 */
+	if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	/*
+	 * From spc4r17 section 6.14:
+	 *
+	 * If the SPEC_I_PT bit is set to zero, the service action is not
+	 * REGISTER AND MOVE, and the parameter list length is not 24, then
+	 * the command shall be terminated with CHECK CONDITION status, with
+	 * the sense key set to ILLEGAL REQUEST, and the additional sense
+	 * code set to PARAMETER LIST LENGTH ERROR.
+	 */
+	if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+	    (cmd->data_length != 24)) {
+		printk(KERN_WARNING "SPC-PR: Recieved PR OUT illegal parameter"
+			" list length: %u\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * (core_scsi3_emulate_pro_* function parameters
+	 * are defined by spc4r17 Table 174:
+	 * PERSISTENT_RESERVE_OUT service actions and valid parameters.
+	 */
+	switch (sa) {
+	case PRO_REGISTER:
+		return core_scsi3_emulate_pro_register(cmd,
+			res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
+	case PRO_RESERVE:
+		return core_scsi3_emulate_pro_reserve(cmd,
+			type, scope, res_key);
+	case PRO_RELEASE:
+		return core_scsi3_emulate_pro_release(cmd,
+			type, scope, res_key);
+	case PRO_CLEAR:
+		return core_scsi3_emulate_pro_clear(cmd, res_key);
+	case PRO_PREEMPT:
+		return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, 0);
+	case PRO_PREEMPT_AND_ABORT:
+		return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, 1);
+	case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+		return core_scsi3_emulate_pro_register(cmd,
+			0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
+	case PRO_REGISTER_AND_MOVE:
+		return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
+				sa_res_key, aptpl, unreg);
+	default:
+		printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+			" action: 0x%02x\n", cdb[1] & 0x1f);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_KEYS
+ *
+ * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
+ */
+static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u32 add_len = 0, off = 8;
+
+	if (cmd->data_length < 8) {
+		printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+	buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+	buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+	buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+	spin_lock(&T10_RES(su_dev)->registration_lock);
+	list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+			pr_reg_list) {
+		/*
+		 * Check for overflow of 8byte PRI READ_KEYS payload and
+		 * next reservation key list descriptor.
+		 */
+		if ((add_len + 8) > (cmd->data_length - 8))
+			break;
+
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+
+		add_len += 8;
+	}
+	spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
+ *
+ * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
+ */
+static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u64 pr_res_key;
+	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
+
+	if (cmd->data_length < 8) {
+		printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+	buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+	buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+	buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+	spin_lock(&se_dev->dev_reservation_lock);
+	pr_reg = se_dev->dev_pr_res_holder;
+	if ((pr_reg)) {
+		/*
+		 * Set the hardcoded Additional Length
+		 */
+		buf[4] = ((add_len >> 24) & 0xff);
+		buf[5] = ((add_len >> 16) & 0xff);
+		buf[6] = ((add_len >> 8) & 0xff);
+		buf[7] = (add_len & 0xff);
+
+		if (cmd->data_length < 22) {
+			spin_unlock(&se_dev->dev_reservation_lock);
+			return 0;
+		}
+		/*
+		 * Set the Reservation key.
+		 *
+		 * From spc4r17, section 5.7.10:
+		 * A persistent reservation holder has its reservation key
+		 * returned in the parameter data from a PERSISTENT
+		 * RESERVE IN command with READ RESERVATION service action as
+		 * follows:
+		 * a) For a persistent reservation of the type Write Exclusive
+		 *    - All Registrants or Exclusive Access ­ All Regitrants,
+		 *      the reservation key shall be set to zero; or
+		 * b) For all other persistent reservation types, the
+		 *    reservation key shall be set to the registered
+		 *    reservation key for the I_T nexus that holds the
+		 *    persistent reservation.
+		 */
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+			pr_res_key = 0;
+		else
+			pr_res_key = pr_reg->pr_res_key;
+
+		buf[8] = ((pr_res_key >> 56) & 0xff);
+		buf[9] = ((pr_res_key >> 48) & 0xff);
+		buf[10] = ((pr_res_key >> 40) & 0xff);
+		buf[11] = ((pr_res_key >> 32) & 0xff);
+		buf[12] = ((pr_res_key >> 24) & 0xff);
+		buf[13] = ((pr_res_key >> 16) & 0xff);
+		buf[14] = ((pr_res_key >> 8) & 0xff);
+		buf[15] = (pr_res_key & 0xff);
+		/*
+		 * Set the SCOPE and TYPE
+		 */
+		buf[21] = (pr_reg->pr_res_scope & 0xf0) |
+			  (pr_reg->pr_res_type & 0x0f);
+	}
+	spin_unlock(&se_dev->dev_reservation_lock);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
+ *
+ * See spc4r17 section 6.13.4 Table 165
+ */
+static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u16 add_len = 8; /* Hardcoded to 8. */
+
+	if (cmd->data_length < 6) {
+		printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+			" %u too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((add_len << 8) & 0xff);
+	buf[1] = (add_len & 0xff);
+	buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
+	buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
+	buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
+	buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
+	/*
+	 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+	 * set the TMV: Task Mask Valid bit.
+	 */
+	buf[3] |= 0x80;
+	/*
+	 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+	 */
+	buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+	/*
+	 * PTPL_A: Persistence across Target Power Loss Active bit
+	 */
+	if (pr_tmpl->pr_aptpl_active)
+		buf[3] |= 0x01;
+	/*
+	 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
+	 */
+	buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+	buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+	buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+	buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+	buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+	buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
+ *
+ * See spc4r17 section 6.13.5 Table 168 and 169
+ */
+static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = SE_DEV(cmd);
+	struct se_node_acl *se_nacl;
+	struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+	u32 off = 8; /* off into first Full Status descriptor */
+	int format_code = 0;
+
+	if (cmd->data_length < 8) {
+		printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+	buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+	buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+	buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		se_nacl = pr_reg->pr_reg_nacl;
+		se_tpg = pr_reg->pr_reg_nacl->se_tpg;
+		add_desc_len = 0;
+
+		atomic_inc(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Determine expected length of $FABRIC_MOD specific
+		 * TransportID full status descriptor..
+		 */
+		exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
+				se_tpg, se_nacl, pr_reg, &format_code);
+
+		if ((exp_desc_len + add_len) > cmd->data_length) {
+			printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
+				" out of buffer: %d\n", cmd->data_length);
+			spin_lock(&pr_tmpl->registration_lock);
+			atomic_dec(&pr_reg->pr_res_holders);
+			smp_mb__after_atomic_dec();
+			break;
+		}
+		/*
+		 * Set RESERVATION KEY
+		 */
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+		off += 4; /* Skip Over Reserved area */
+
+		/*
+		 * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt)
+			buf[off] = 0x02;
+		/*
+		 * The struct se_lun pointer will be present for the
+		 * reservation holder for PR_HOLDER bit.
+		 *
+		 * Also, if this registration is the reservation
+		 * holder, fill in SCOPE and TYPE in the next byte.
+		 */
+		if (pr_reg->pr_res_holder) {
+			buf[off++] |= 0x01;
+			buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
+				     (pr_reg->pr_res_type & 0x0f);
+		} else
+			off += 2;
+
+		off += 4; /* Skip over reserved area */
+		/*
+		 * From spc4r17 6.3.15:
+		 *
+		 * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
+		 * IDENTIFIER field contains the relative port identifier (see
+		 * 3.1.120) of the target port that is part of the I_T nexus
+		 * described by this full status descriptor. If the ALL_TG_PT
+		 * bit is set to one, the contents of the RELATIVE TARGET PORT
+		 * IDENTIFIER field are not defined by this standard.
+		 */
+		if (!(pr_reg->pr_reg_all_tg_pt)) {
+			struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
+
+			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+			buf[off++] = (port->sep_rtpi & 0xff);
+		} else
+			off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
+
+		/*
+		 * Now, have the $FABRIC_MOD fill in the protocol identifier
+		 */
+		desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
+				se_nacl, pr_reg, &format_code, &buf[off+4]);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		atomic_dec(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_dec();
+		/*
+		 * Set the ADDITIONAL DESCRIPTOR LENGTH
+		 */
+		buf[off++] = ((desc_len >> 24) & 0xff);
+		buf[off++] = ((desc_len >> 16) & 0xff);
+		buf[off++] = ((desc_len >> 8) & 0xff);
+		buf[off++] = (desc_len & 0xff);
+		/*
+		 * Size of full desctipor header minus TransportID
+		 * containing $FABRIC_MOD specific) initiator device/port
+		 * WWN information.
+		 *
+		 *  See spc4r17 Section 6.13.5 Table 169
+		 */
+		add_desc_len = (24 + desc_len);
+
+		off += desc_len;
+		add_len += add_desc_len;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Set ADDITIONAL_LENGTH
+	 */
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	return 0;
+}
+
+static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
+{
+	switch (cdb[1] & 0x1f) {
+	case PRI_READ_KEYS:
+		return core_scsi3_pri_read_keys(cmd);
+	case PRI_READ_RESERVATION:
+		return core_scsi3_pri_read_reservation(cmd);
+	case PRI_REPORT_CAPABILITIES:
+		return core_scsi3_pri_report_capabilities(cmd);
+	case PRI_READ_FULL_STATUS:
+		return core_scsi3_pri_read_full_status(cmd);
+	default:
+		printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
+			" action: 0x%02x\n", cdb[1] & 0x1f);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+}
+
+int core_scsi3_emulate_pr(struct se_cmd *cmd)
+{
+	unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+	struct se_device *dev = cmd->se_dev;
+	/*
+	 * Following spc2r20 5.5.1 Reservations overview:
+	 *
+	 * If a logical unit has been reserved by any RESERVE command and is
+	 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+	 * PERSISTENT RESERVE OUT commands shall conflict regardless of
+	 * initiator or service action and shall terminate with a RESERVATION
+	 * CONFLICT status.
+	 */
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
+		printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
+			" SPC-2 reservation is held, returning"
+			" RESERVATION_CONFLICT\n");
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+
+	return (cdb[0] == PERSISTENT_RESERVE_OUT) ?
+	       core_scsi3_emulate_pr_out(cmd, cdb) :
+	       core_scsi3_emulate_pr_in(cmd, cdb);
+}
+
+static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
+{
+	return 0;
+}
+
+static int core_pt_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	return 0;
+}
+
+int core_setup_reservations(struct se_device *dev, int force_pt)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_reservation_template *rest = &su_dev->t10_reservation;
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, use the reservations
+	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
+	 * cause a problem because libata and some SATA RAID HBAs appear
+	 * under Linux/SCSI, but to emulate reservations themselves.
+	 */
+	if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+	    !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
+		rest->res_type = SPC_PASSTHROUGH;
+		rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
+		rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
+		printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
+			" emulation\n", TRANSPORT(dev)->name);
+		return 0;
+	}
+	/*
+	 * If SPC-3 or above is reported by real or emulated struct se_device,
+	 * use emulated Persistent Reservations.
+	 */
+	if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+		rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
+		rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
+		rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
+		printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
+			" emulation\n", TRANSPORT(dev)->name);
+	} else {
+		rest->res_type = SPC2_RESERVATIONS;
+		rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
+		rest->pr_ops.t10_seq_non_holder =
+				&core_scsi2_reservation_seq_non_holder;
+		printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
+			TRANSPORT(dev)->name);
+	}
+
+	return 0;
+}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
new file mode 100644
index 0000000..5603bcf
--- /dev/null
+++ b/drivers/target/target_core_pr.h
@@ -0,0 +1,67 @@
+#ifndef TARGET_CORE_PR_H
+#define TARGET_CORE_PR_H
+/*
+ * PERSISTENT_RESERVE_OUT service action codes
+ *
+ * spc4r17 section 6.14.2 Table 171
+ */
+#define PRO_REGISTER				0x00
+#define PRO_RESERVE				0x01
+#define PRO_RELEASE				0x02
+#define PRO_CLEAR				0x03
+#define PRO_PREEMPT				0x04
+#define PRO_PREEMPT_AND_ABORT			0x05
+#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY	0x06
+#define PRO_REGISTER_AND_MOVE			0x07
+/*
+ * PERSISTENT_RESERVE_IN service action codes
+ *
+ * spc4r17 section 6.13.1 Table 159
+ */
+#define PRI_READ_KEYS				0x00
+#define PRI_READ_RESERVATION			0x01
+#define PRI_REPORT_CAPABILITIES			0x02
+#define PRI_READ_FULL_STATUS			0x03
+/*
+ * PERSISTENT_RESERVE_ SCOPE field
+ *
+ * spc4r17 section 6.13.3.3 Table 163
+ */
+#define PR_SCOPE_LU_SCOPE			0x00
+/*
+ * PERSISTENT_RESERVE_* TYPE field
+ *
+ * spc4r17 section 6.13.3.4 Table 164
+ */
+#define PR_TYPE_WRITE_EXCLUSIVE			0x01
+#define PR_TYPE_EXCLUSIVE_ACCESS		0x03
+#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY		0x05
+#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY	0x06
+#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG		0x07
+#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG		0x08
+
+#define PR_APTPL_MAX_IPORT_LEN			256
+#define PR_APTPL_MAX_TPORT_LEN			256
+
+extern struct kmem_cache *t10_pr_reg_cache;
+
+extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
+			char *, u32);
+extern int core_scsi2_emulate_crh(struct se_cmd *);
+extern int core_scsi3_alloc_aptpl_registration(
+			struct t10_reservation_template *, u64,
+			unsigned char *, unsigned char *, u32,
+			unsigned char *, u16, u32, int, int, u8);
+extern int core_scsi3_check_aptpl_registration(struct se_device *,
+			struct se_portal_group *, struct se_lun *,
+			struct se_lun_acl *);
+extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+					     struct se_node_acl *);
+extern void core_scsi3_free_all_registrations(struct se_device *);
+extern unsigned char *core_scsi3_pr_dump_type(int);
+extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
+						  struct se_cmd *);
+extern int core_scsi3_emulate_pr(struct se_cmd *);
+extern int core_setup_reservations(struct se_device *, int);
+
+#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
new file mode 100644
index 0000000..f2a08477a
--- /dev/null
+++ b/drivers/target/target_core_pscsi.c
@@ -0,0 +1,1470 @@
+/*******************************************************************************
+ * Filename:  target_core_pscsi.c
+ *
+ * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/genhd.h>
+#include <linux/cdrom.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_pscsi.h"
+
+#define ISPRINT(a)  ((a >= ' ') && (a <= '~'))
+
+static struct se_subsystem_api pscsi_template;
+
+static void pscsi_req_done(struct request *, int);
+
+/*	pscsi_get_sh():
+ *
+ *
+ */
+static struct Scsi_Host *pscsi_get_sh(u32 host_no)
+{
+	struct Scsi_Host *sh = NULL;
+
+	sh = scsi_host_lookup(host_no);
+	if (IS_ERR(sh)) {
+		printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
+				" %u\n", host_no);
+		return NULL;
+	}
+
+	return sh;
+}
+
+/*	pscsi_attach_hba():
+ *
+ * 	pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
+ *	from the passed SCSI Host ID.
+ */
+static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	int hba_depth;
+	struct pscsi_hba_virt *phv;
+
+	phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
+	if (!(phv)) {
+		printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
+		return -1;
+	}
+	phv->phv_host_id = host_id;
+	phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+	hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+	atomic_set(&hba->left_queue_depth, hba_depth);
+	atomic_set(&hba->max_queue_depth, hba_depth);
+
+	hba->hba_ptr = (void *)phv;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
+		" Target Core with TCQ Depth: %d\n", hba->hba_id,
+		atomic_read(&hba->max_queue_depth));
+
+	return 0;
+}
+
+static void pscsi_detach_hba(struct se_hba *hba)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	struct Scsi_Host *scsi_host = phv->phv_lld_host;
+
+	if (scsi_host) {
+		scsi_host_put(scsi_host);
+
+		printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
+			" Generic Target Core\n", hba->hba_id,
+			(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
+			"Unknown");
+	} else
+		printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
+			" from Generic Target Core\n", hba->hba_id);
+
+	kfree(phv);
+	hba->hba_ptr = NULL;
+}
+
+static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+{
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+	/*
+	 * Release the struct Scsi_Host
+	 */
+	if (!(mode_flag)) {
+		if (!(sh))
+			return 0;
+
+		phv->phv_lld_host = NULL;
+		phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+		atomic_set(&hba->left_queue_depth, hba_depth);
+		atomic_set(&hba->max_queue_depth, hba_depth);
+
+		printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+			" %s\n", hba->hba_id, (sh->hostt->name) ?
+			(sh->hostt->name) : "Unknown");
+
+		scsi_host_put(sh);
+		return 0;
+	}
+	/*
+	 * Otherwise, locate struct Scsi_Host from the original passed
+	 * pSCSI Host ID and enable for phba mode
+	 */
+	sh = pscsi_get_sh(phv->phv_host_id);
+	if (!(sh)) {
+		printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
+			" phv_host_id: %d\n", phv->phv_host_id);
+		return -1;
+	}
+	/*
+	 * Usually the SCSI LLD will use the hostt->can_queue value to define
+	 * its HBA TCQ depth.  Some other drivers (like 2.6 megaraid) don't set
+	 * this at all and set sh->can_queue at runtime.
+	 */
+	hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
+		sh->hostt->can_queue : sh->can_queue;
+
+	atomic_set(&hba->left_queue_depth, hba_depth);
+	atomic_set(&hba->max_queue_depth, hba_depth);
+
+	phv->phv_lld_host = sh;
+	phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+		hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
+
+	return 1;
+}
+
+static void pscsi_tape_read_blocksize(struct se_device *dev,
+		struct scsi_device *sdev)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(12, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = MODE_SENSE;
+	cdb[4] = 0x0c; /* 12 bytes */
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
+			HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	/*
+	 * If MODE_SENSE still returns zero, set the default value to 1024.
+	 */
+	sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+	if (!sdev->sector_size)
+		sdev->sector_size = 1024;
+out_free:
+	kfree(buf);
+}
+
+static void
+pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char *buf;
+
+	if (sdev->inquiry_len < INQUIRY_LEN)
+		return;
+
+	buf = sdev->inquiry;
+	if (!buf)
+		return;
+	/*
+	 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
+	 */
+	memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
+	memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
+	memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
+}
+
+static int
+pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return -1;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x80; /* Unit Serial Number */
+	cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
+
+	wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+
+	kfree(buf);
+	return 0;
+
+out_free:
+	kfree(buf);
+	return -1;
+}
+
+static void
+pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
+		struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
+	int ident_len, page_len, off = 4, ret;
+	struct t10_vpd *vpd;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x83; /* Device Identifier */
+	cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
+			      NULL, HZ, 1, NULL);
+	if (ret)
+		goto out;
+
+	page_len = (buf[2] << 8) | buf[3];
+	while (page_len > 0) {
+		/* Grab a pointer to the Identification descriptor */
+		page_83 = &buf[off];
+		ident_len = page_83[3];
+		if (!ident_len) {
+			printk(KERN_ERR "page_83[3]: identifier"
+					" length zero!\n");
+			break;
+		}
+		printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
+
+		vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
+		if (!vpd) {
+			printk(KERN_ERR "Unable to allocate memory for"
+					" struct t10_vpd\n");
+			goto out;
+		}
+		INIT_LIST_HEAD(&vpd->vpd_list);
+
+		transport_set_vpd_proto_id(vpd, page_83);
+		transport_set_vpd_assoc(vpd, page_83);
+
+		if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+		if (transport_set_vpd_ident(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+
+		list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
+		off += (ident_len + 4);
+		page_len -= (ident_len + 4);
+	}
+
+out:
+	kfree(buf);
+}
+
+/*	pscsi_add_device_to_list():
+ *
+ *
+ */
+static struct se_device *pscsi_add_device_to_list(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	struct pscsi_dev_virt *pdv,
+	struct scsi_device *sd,
+	int dev_flags)
+{
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct request_queue *q;
+	struct queue_limits *limits;
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	if (!sd->queue_depth) {
+		sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
+
+		printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
+			" queue_depth to %d\n", sd->channel, sd->id,
+				sd->lun, sd->queue_depth);
+	}
+	/*
+	 * Setup the local scope queue_limits from struct request_queue->limits
+	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+	 */
+	q = sd->request_queue;
+	limits = &dev_limits.limits;
+	limits->logical_block_size = sd->sector_size;
+	limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
+				  queue_max_hw_sectors(q) : sd->host->max_sectors;
+	limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
+				  queue_max_sectors(q) : sd->host->max_sectors;
+	dev_limits.hw_queue_depth = sd->queue_depth;
+	dev_limits.queue_depth = sd->queue_depth;
+	/*
+	 * Setup our standard INQUIRY info into se_dev->t10_wwn
+	 */
+	pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
+
+	/*
+	 * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
+	 * which has already been referenced with Linux SCSI code with
+	 * scsi_device_get() in this file's pscsi_create_virtdevice().
+	 *
+	 * The passthrough operations called by the transport_add_device_*
+	 * function below will require this pointer to be set for passthroug
+	 *  ops.
+	 *
+	 * For the shutdown case in pscsi_free_device(), this struct
+	 * scsi_device  reference is released with Linux SCSI code
+	 * scsi_device_put() and the pdv->pdv_sd cleared.
+	 */
+	pdv->pdv_sd = sd;
+
+	dev = transport_add_device_to_core_hba(hba, &pscsi_template,
+				se_dev, dev_flags, (void *)pdv,
+				&dev_limits, NULL, NULL);
+	if (!(dev)) {
+		pdv->pdv_sd = NULL;
+		return NULL;
+	}
+
+	/*
+	 * Locate VPD WWN Information used for various purposes within
+	 * the Storage Engine.
+	 */
+	if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+		/*
+		 * If VPD Unit Serial returned GOOD status, try
+		 * VPD Device Identification page (0x83).
+		 */
+		pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+	}
+
+	/*
+	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+	 */
+	if (sd->type == TYPE_TAPE)
+		pscsi_tape_read_blocksize(dev, sd);
+	return dev;
+}
+
+static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct pscsi_dev_virt *pdv;
+
+	pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
+	if (!(pdv)) {
+		printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
+		return NULL;
+	}
+	pdv->pdv_se_hba = hba;
+
+	printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+	return (void *)pdv;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_disk(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	struct block_device *bd;
+	u32 dev_flags = 0;
+
+	if (scsi_device_get(sd)) {
+		printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return NULL;
+	}
+	spin_unlock_irq(sh->host_lock);
+	/*
+	 * Claim exclusive struct block_device access to struct scsi_device
+	 * for TYPE_DISK using supplied udev_path
+	 */
+	bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+				FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
+	if (IS_ERR(bd)) {
+		printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
+		scsi_device_put(sd);
+		return NULL;
+	}
+	pdv->pdv_bd = bd;
+
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!(dev)) {
+		blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+		scsi_device_put(sd);
+		return NULL;
+	}
+	printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+		phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_rom(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	u32 dev_flags = 0;
+
+	if (scsi_device_get(sd)) {
+		printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return NULL;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!(dev)) {
+		scsi_device_put(sd);
+		return NULL;
+	}
+	printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+		sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+/*
+ *Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_other(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	u32 dev_flags = 0;
+
+	spin_unlock_irq(sh->host_lock);
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!(dev))
+		return NULL;
+
+	printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+		sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+static struct se_device *pscsi_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
+	struct se_device *dev;
+	struct scsi_device *sd;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	int legacy_mode_enable = 0;
+
+	if (!(pdv)) {
+		printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
+				" parameter\n");
+		return NULL;
+	}
+	/*
+	 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
+	 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
+	 */
+	if (!(sh)) {
+		if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+			printk(KERN_ERR "pSCSI: Unable to locate struct"
+				" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
+			return NULL;
+		}
+		/*
+		 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
+		 * reference, we enforce that udev_path has been set
+		 */
+		if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+			printk(KERN_ERR "pSCSI: udev_path attribute has not"
+				" been set before ENABLE=1\n");
+			return NULL;
+		}
+		/*
+		 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
+		 * use the original TCM hba ID to reference Linux/SCSI Host No
+		 * and enable for PHV_LLD_SCSI_HOST_NO mode.
+		 */
+		if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
+			spin_lock(&hba->device_lock);
+			if (!(list_empty(&hba->hba_dev_list))) {
+				printk(KERN_ERR "pSCSI: Unable to set hba_mode"
+					" with active devices\n");
+				spin_unlock(&hba->device_lock);
+				return NULL;
+			}
+			spin_unlock(&hba->device_lock);
+
+			if (pscsi_pmode_enable_hba(hba, 1) != 1)
+				return NULL;
+
+			legacy_mode_enable = 1;
+			hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+			sh = phv->phv_lld_host;
+		} else {
+			sh = pscsi_get_sh(pdv->pdv_host_id);
+			if (!(sh)) {
+				printk(KERN_ERR "pSCSI: Unable to locate"
+					" pdv_host_id: %d\n", pdv->pdv_host_id);
+				return NULL;
+			}
+		}
+	} else {
+		if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
+			printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
+				" struct Scsi_Host exists\n");
+			return NULL;
+		}
+	}
+
+	spin_lock_irq(sh->host_lock);
+	list_for_each_entry(sd, &sh->__devices, siblings) {
+		if ((pdv->pdv_channel_id != sd->channel) ||
+		    (pdv->pdv_target_id != sd->id) ||
+		    (pdv->pdv_lun_id != sd->lun))
+			continue;
+		/*
+		 * Functions will release the held struct scsi_host->host_lock
+		 * before calling calling pscsi_add_device_to_list() to register
+		 * struct scsi_device with target_core_mod.
+		 */
+		switch (sd->type) {
+		case TYPE_DISK:
+			dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+			break;
+		case TYPE_ROM:
+			dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+			break;
+		default:
+			dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+			break;
+		}
+
+		if (!(dev)) {
+			if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+				scsi_host_put(sh);
+			else if (legacy_mode_enable) {
+				pscsi_pmode_enable_hba(hba, 0);
+				hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+			}
+			pdv->pdv_sd = NULL;
+			return NULL;
+		}
+		return dev;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+		pdv->pdv_channel_id,  pdv->pdv_target_id, pdv->pdv_lun_id);
+
+	if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+		scsi_host_put(sh);
+	else if (legacy_mode_enable) {
+		pscsi_pmode_enable_hba(hba, 0);
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+	}
+
+	return NULL;
+}
+
+/*	pscsi_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void pscsi_free_device(void *p)
+{
+	struct pscsi_dev_virt *pdv = p;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	if (sd) {
+		/*
+		 * Release exclusive pSCSI internal struct block_device claim for
+		 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
+		 */
+		if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
+			blkdev_put(pdv->pdv_bd,
+				   FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+			pdv->pdv_bd = NULL;
+		}
+		/*
+		 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
+		 * to struct Scsi_Host now.
+		 */
+		if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
+		    (phv->phv_lld_host != NULL))
+			scsi_host_put(phv->phv_lld_host);
+
+		if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+			scsi_device_put(sd);
+
+		pdv->pdv_sd = NULL;
+	}
+
+	kfree(pdv);
+}
+
+static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
+{
+	return container_of(task, struct pscsi_plugin_task, pscsi_task);
+}
+
+
+/*	pscsi_transport_complete():
+ *
+ *
+ */
+static int pscsi_transport_complete(struct se_task *task)
+{
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+	int result;
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	unsigned char *cdb = &pt->pscsi_cdb[0];
+
+	result = pt->pscsi_result;
+	/*
+	 * Hack to make sure that Write-Protect modepage is set if R/O mode is
+	 * forced.
+	 */
+	if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
+	     (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		if (!TASK_CMD(task)->se_deve)
+			goto after_mode_sense;
+
+		if (TASK_CMD(task)->se_deve->lun_flags &
+				TRANSPORT_LUNFLAGS_READ_ONLY) {
+			unsigned char *buf = (unsigned char *)
+				T_TASK(task->task_se_cmd)->t_task_buf;
+
+			if (cdb[0] == MODE_SENSE_10) {
+				if (!(buf[3] & 0x80))
+					buf[3] |= 0x80;
+			} else {
+				if (!(buf[2] & 0x80))
+					buf[2] |= 0x80;
+			}
+		}
+	}
+after_mode_sense:
+
+	if (sd->type != TYPE_TAPE)
+		goto after_mode_select;
+
+	/*
+	 * Hack to correctly obtain the initiator requested blocksize for
+	 * TYPE_TAPE.  Since this value is dependent upon each tape media,
+	 * struct scsi_device->sector_size will not contain the correct value
+	 * by default, so we go ahead and set it so
+	 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
+	 * storage engine.
+	 */
+	if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
+	      (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		unsigned char *buf;
+		struct scatterlist *sg = task->task_sg;
+		u16 bdl;
+		u32 blocksize;
+
+		buf = sg_virt(&sg[0]);
+		if (!(buf)) {
+			printk(KERN_ERR "Unable to get buf for scatterlist\n");
+			goto after_mode_select;
+		}
+
+		if (cdb[0] == MODE_SELECT)
+			bdl = (buf[3]);
+		else
+			bdl = (buf[6] << 8) | (buf[7]);
+
+		if (!bdl)
+			goto after_mode_select;
+
+		if (cdb[0] == MODE_SELECT)
+			blocksize = (buf[9] << 16) | (buf[10] << 8) |
+					(buf[11]);
+		else
+			blocksize = (buf[13] << 16) | (buf[14] << 8) |
+					(buf[15]);
+
+		sd->sector_size = blocksize;
+	}
+after_mode_select:
+
+	if (status_byte(result) & CHECK_CONDITION)
+		return 1;
+
+	return 0;
+}
+
+static struct se_task *
+pscsi_alloc_task(struct se_cmd *cmd)
+{
+	struct pscsi_plugin_task *pt;
+	unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
+
+	pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
+	if (!pt) {
+		printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
+		return NULL;
+	}
+
+	/*
+	 * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
+	 * allocate the extended CDB buffer for per struct se_task context
+	 * pt->pscsi_cdb now.
+	 */
+	if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
+
+		pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
+		if (!(pt->pscsi_cdb)) {
+			printk(KERN_ERR "pSCSI: Unable to allocate extended"
+					" pt->pscsi_cdb\n");
+			return NULL;
+		}
+	} else
+		pt->pscsi_cdb = &pt->__pscsi_cdb[0];
+
+	return &pt->pscsi_task;
+}
+
+static inline void pscsi_blk_init_request(
+	struct se_task *task,
+	struct pscsi_plugin_task *pt,
+	struct request *req,
+	int bidi_read)
+{
+	/*
+	 * Defined as "scsi command" in include/linux/blkdev.h.
+	 */
+	req->cmd_type = REQ_TYPE_BLOCK_PC;
+	/*
+	 * For the extra BIDI-COMMAND READ struct request we do not
+	 * need to setup the remaining structure members
+	 */
+	if (bidi_read)
+		return;
+	/*
+	 * Setup the done function pointer for struct request,
+	 * also set the end_io_data pointer.to struct se_task.
+	 */
+	req->end_io = pscsi_req_done;
+	req->end_io_data = (void *)task;
+	/*
+	 * Load the referenced struct se_task's SCSI CDB into
+	 * include/linux/blkdev.h:struct request->cmd
+	 */
+	req->cmd_len = scsi_command_size(pt->pscsi_cdb);
+	req->cmd = &pt->pscsi_cdb[0];
+	/*
+	 * Setup pointer for outgoing sense data.
+	 */
+	req->sense = (void *)&pt->pscsi_sense[0];
+	req->sense_len = 0;
+}
+
+/*
+ * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
+*/
+static int pscsi_blk_get_request(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+
+	pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
+			(task->task_data_direction == DMA_TO_DEVICE),
+			GFP_KERNEL);
+	if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
+		printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
+				IS_ERR(pt->pscsi_req));
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+	 * and setup rq callback, CDB and sense.
+	 */
+	pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+	return 0;
+}
+
+/*      pscsi_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int pscsi_do_task(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	/*
+	 * Set the struct request->timeout value based on peripheral
+	 * device type from SCSI.
+	 */
+	if (pdv->pdv_sd->type == TYPE_DISK)
+		pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
+	else
+		pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
+
+	pt->pscsi_req->retries = PS_RETRY;
+	/*
+	 * Queue the struct request into the struct scsi_device->request_queue.
+	 * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
+	 * descriptor
+	 */
+	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
+			(task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
+			pscsi_req_done);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static void pscsi_free_task(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct se_cmd *cmd = task->task_se_cmd;
+
+	/*
+	 * Release the extended CDB allocation from pscsi_alloc_task()
+	 * if one exists.
+	 */
+	if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
+		kfree(pt->pscsi_cdb);
+	/*
+	 * We do not release the bio(s) here associated with this task, as
+	 * this is handled by bio_put() and pscsi_bi_endio().
+	 */
+	kfree(pt);
+}
+
+enum {
+	Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
+	Opt_scsi_lun_id, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_scsi_host_id, "scsi_host_id=%d"},
+	{Opt_scsi_channel_id, "scsi_channel_id=%d"},
+	{Opt_scsi_target_id, "scsi_target_id=%d"},
+	{Opt_scsi_lun_id, "scsi_lun_id=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_scsi_host_id:
+			if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+				printk(KERN_ERR "PSCSI[%d]: Unable to accept"
+					" scsi_host_id while phv_mode =="
+					" PHV_LLD_SCSI_HOST_NO\n",
+					phv->phv_host_id);
+				ret = -EINVAL;
+				goto out;
+			}
+			match_int(args, &arg);
+			pdv->pdv_host_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_host_id);
+			pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
+			break;
+		case Opt_scsi_channel_id:
+			match_int(args, &arg);
+			pdv->pdv_channel_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
+				" ID: %d\n",  phv->phv_host_id,
+				pdv->pdv_channel_id);
+			pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
+			break;
+		case Opt_scsi_target_id:
+			match_int(args, &arg);
+			pdv->pdv_target_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
+				" ID: %d\n", phv->phv_host_id,
+				pdv->pdv_target_id);
+			pdv->pdv_flags |= PDF_HAS_TARGET_ID;
+			break;
+		case Opt_scsi_lun_id:
+			match_int(args, &arg);
+			pdv->pdv_lun_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
+			pdv->pdv_flags |= PDF_HAS_LUN_ID;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t pscsi_check_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev)
+{
+	struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+
+	if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+		printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
+			" scsi_lun_id= parameters\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
+					      struct se_subsystem_dev *se_dev,
+					      char *b)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+        struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+	unsigned char host_id[16];
+	ssize_t bl;
+	int i;
+
+	if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+		snprintf(host_id, 16, "%d", pdv->pdv_host_id);
+	else
+		snprintf(host_id, 16, "PHBA Mode");
+
+	bl = sprintf(b, "SCSI Device Bus Location:"
+		" Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
+		pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
+		host_id);
+
+	if (sd) {
+		bl += sprintf(b + bl, "        ");
+		bl += sprintf(b + bl, "Vendor: ");
+		for (i = 0; i < 8; i++) {
+			if (ISPRINT(sd->vendor[i]))   /* printable character? */
+				bl += sprintf(b + bl, "%c", sd->vendor[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Model: ");
+		for (i = 0; i < 16; i++) {
+			if (ISPRINT(sd->model[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->model[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Rev: ");
+		for (i = 0; i < 4; i++) {
+			if (ISPRINT(sd->rev[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->rev[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, "\n");
+	}
+	return bl;
+}
+
+static void pscsi_bi_endio(struct bio *bio, int error)
+{
+	bio_put(bio);
+}
+
+static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
+{
+	struct bio *bio;
+	/*
+	 * Use bio_malloc() following the comment in for bio -> struct request
+	 * in block/blk-core.c:blk_make_request()
+	 */
+	bio = bio_kmalloc(GFP_KERNEL, sg_num);
+	if (!(bio)) {
+		printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
+		return NULL;
+	}
+	bio->bi_end_io = pscsi_bi_endio;
+
+	return bio;
+}
+
+#if 0
+#define DEBUG_PSCSI(x...) printk(x)
+#else
+#define DEBUG_PSCSI(x...)
+#endif
+
+static int __pscsi_map_task_SG(
+	struct se_task *task,
+	struct scatterlist *task_sg,
+	u32 task_sg_num,
+	int bidi_read)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+	struct page *page;
+	struct scatterlist *sg;
+	u32 data_len = task->task_size, i, len, bytes, off;
+	int nr_pages = (task->task_size + task_sg[0].offset +
+			PAGE_SIZE - 1) >> PAGE_SHIFT;
+	int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+	int rw = (task->task_data_direction == DMA_TO_DEVICE);
+
+	if (!task->task_size)
+		return 0;
+	/*
+	 * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
+	 * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
+	 * struct scatterlist memory.  The struct se_task->task_sg[] currently needs
+	 * to be attached to struct bios for submission to Linux/SCSI using
+	 * struct request to struct scsi_device->request_queue.
+	 *
+	 * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
+	 * is ported to upstream SCSI passthrough functionality that accepts
+	 * struct scatterlist->page_link or struct page as a paraemeter.
+	 */
+	DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
+
+	for_each_sg(task_sg, sg, task_sg_num, i) {
+		page = sg_page(sg);
+		off = sg->offset;
+		len = sg->length;
+
+		DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+			page, len, off);
+
+		while (len > 0 && data_len > 0) {
+			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+			bytes = min(bytes, data_len);
+
+			if (!(bio)) {
+				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+				nr_pages -= nr_vecs;
+				/*
+				 * Calls bio_kmalloc() and sets bio->bi_end_io()
+				 */
+				bio = pscsi_get_bio(pdv, nr_vecs);
+				if (!(bio))
+					goto fail;
+
+				if (rw)
+					bio->bi_rw |= REQ_WRITE;
+
+				DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
+					" dir: %s nr_vecs: %d\n", bio,
+					(rw) ? "rw" : "r", nr_vecs);
+				/*
+				 * Set *hbio pointer to handle the case:
+				 * nr_pages > BIO_MAX_PAGES, where additional
+				 * bios need to be added to complete a given
+				 * struct se_task
+				 */
+				if (!hbio)
+					hbio = tbio = bio;
+				else
+					tbio = tbio->bi_next = bio;
+			}
+
+			DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
+				" bio: %p page: %p len: %d off: %d\n", i, bio,
+				page, len, off);
+
+			rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
+					bio, page, bytes, off);
+			if (rc != bytes)
+				goto fail;
+
+			DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+				bio->bi_vcnt, nr_vecs);
+
+			if (bio->bi_vcnt > nr_vecs) {
+				DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
+					" %d i: %d bio: %p, allocating another"
+					" bio\n", bio->bi_vcnt, i, bio);
+				/*
+				 * Clear the pointer so that another bio will
+				 * be allocated with pscsi_get_bio() above, the
+				 * current bio has already been set *tbio and
+				 * bio->bi_next.
+				 */
+				bio = NULL;
+			}
+
+			page++;
+			len -= bytes;
+			data_len -= bytes;
+			off = 0;
+		}
+	}
+	/*
+	 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
+	 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
+	 */
+	if (!(bidi_read)) {
+		/*
+		 * Starting with v2.6.31, call blk_make_request() passing in *hbio to
+		 * allocate the pSCSI task a struct request.
+		 */
+		pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
+					hbio, GFP_KERNEL);
+		if (!(pt->pscsi_req)) {
+			printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
+			goto fail;
+		}
+		/*
+		 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+		 * and setup rq callback, CDB and sense.
+		 */
+		pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+
+		return task->task_sg_num;
+	}
+	/*
+	 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
+	 * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
+	 */
+	pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
+					hbio, GFP_KERNEL);
+	if (!(pt->pscsi_req->next_rq)) {
+		printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
+		goto fail;
+	}
+	pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
+
+	return task->task_sg_num;
+fail:
+	while (hbio) {
+		bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_endio(bio, 0);
+	}
+	return ret;
+}
+
+static int pscsi_map_task_SG(struct se_task *task)
+{
+	int ret;
+
+	/*
+	 * Setup the main struct request for the task->task_sg[] payload
+	 */
+
+	ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
+	if (ret >= 0 && task->task_sg_bidi) {
+		/*
+		 * If present, set up the extra BIDI-COMMAND SCSI READ
+		 * struct request and payload.
+		 */
+		ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
+					task->task_sg_num, 1);
+	}
+
+	if (ret < 0)
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	return 0;
+}
+
+/*	pscsi_map_task_non_SG():
+ *
+ *
+ */
+static int pscsi_map_task_non_SG(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	int ret = 0;
+
+	if (pscsi_blk_get_request(task) < 0)
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	if (!task->task_size)
+		return 0;
+
+	ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
+			pt->pscsi_req, T_TASK(cmd)->t_task_buf,
+			task->task_size, GFP_KERNEL);
+	if (ret < 0) {
+		printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	return 0;
+}
+
+static int pscsi_CDB_none(struct se_task *task)
+{
+	return pscsi_blk_get_request(task);
+}
+
+/*	pscsi_get_cdb():
+ *
+ *
+ */
+static unsigned char *pscsi_get_cdb(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	return pt->pscsi_cdb;
+}
+
+/*	pscsi_get_sense_buffer():
+ *
+ *
+ */
+static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	return (unsigned char *)&pt->pscsi_sense[0];
+}
+
+/*	pscsi_get_device_rev():
+ *
+ *
+ */
+static u32 pscsi_get_device_rev(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+}
+
+/*	pscsi_get_device_type():
+ *
+ *
+ */
+static u32 pscsi_get_device_type(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	return sd->type;
+}
+
+static sector_t pscsi_get_blocks(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+
+	if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+		return pdv->pdv_bd->bd_part->nr_sects;
+
+	dump_stack();
+	return 0;
+}
+
+/*	pscsi_handle_SAM_STATUS_failures():
+ *
+ *
+ */
+static inline void pscsi_process_SAM_status(
+	struct se_task *task,
+	struct pscsi_plugin_task *pt)
+{
+	task->task_scsi_status = status_byte(pt->pscsi_result);
+	if ((task->task_scsi_status)) {
+		task->task_scsi_status <<= 1;
+		printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+	}
+
+	switch (host_byte(pt->pscsi_result)) {
+	case DID_OK:
+		transport_complete_task(task, (!task->task_scsi_status));
+		break;
+	default:
+		printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+		task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		TASK_CMD(task)->transport_error_status =
+					PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		transport_complete_task(task, 0);
+		break;
+	}
+
+	return;
+}
+
+static void pscsi_req_done(struct request *req, int uptodate)
+{
+	struct se_task *task = req->end_io_data;
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	pt->pscsi_result = req->errors;
+	pt->pscsi_resid = req->resid_len;
+
+	pscsi_process_SAM_status(task, pt);
+	/*
+	 * Release BIDI-READ if present
+	 */
+	if (req->next_rq != NULL)
+		__blk_put_request(req->q, req->next_rq);
+
+	__blk_put_request(req->q, req);
+	pt->pscsi_req = NULL;
+}
+
+static struct se_subsystem_api pscsi_template = {
+	.name			= "pscsi",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_PHBA_PDEV,
+	.cdb_none		= pscsi_CDB_none,
+	.map_task_non_SG	= pscsi_map_task_non_SG,
+	.map_task_SG		= pscsi_map_task_SG,
+	.attach_hba		= pscsi_attach_hba,
+	.detach_hba		= pscsi_detach_hba,
+	.pmode_enable_hba	= pscsi_pmode_enable_hba,
+	.allocate_virtdevice	= pscsi_allocate_virtdevice,
+	.create_virtdevice	= pscsi_create_virtdevice,
+	.free_device		= pscsi_free_device,
+	.transport_complete	= pscsi_transport_complete,
+	.alloc_task		= pscsi_alloc_task,
+	.do_task		= pscsi_do_task,
+	.free_task		= pscsi_free_task,
+	.check_configfs_dev_params = pscsi_check_configfs_dev_params,
+	.set_configfs_dev_params = pscsi_set_configfs_dev_params,
+	.show_configfs_dev_params = pscsi_show_configfs_dev_params,
+	.get_cdb		= pscsi_get_cdb,
+	.get_sense_buffer	= pscsi_get_sense_buffer,
+	.get_device_rev		= pscsi_get_device_rev,
+	.get_device_type	= pscsi_get_device_type,
+	.get_blocks		= pscsi_get_blocks,
+};
+
+static int __init pscsi_module_init(void)
+{
+	return transport_subsystem_register(&pscsi_template);
+}
+
+static void pscsi_module_exit(void)
+{
+	transport_subsystem_release(&pscsi_template);
+}
+
+MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(pscsi_module_init);
+module_exit(pscsi_module_exit);
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
new file mode 100644
index 0000000..a4cd5d3
--- /dev/null
+++ b/drivers/target/target_core_pscsi.h
@@ -0,0 +1,65 @@
+#ifndef TARGET_CORE_PSCSI_H
+#define TARGET_CORE_PSCSI_H
+
+#define PSCSI_VERSION		"v4.0"
+#define PSCSI_VIRTUAL_HBA_DEPTH	2048
+
+/* used in pscsi_find_alloc_len() */
+#ifndef INQUIRY_DATA_SIZE
+#define INQUIRY_DATA_SIZE	0x24
+#endif
+
+/* used in pscsi_add_device_to_list() */
+#define PSCSI_DEFAULT_QUEUEDEPTH	1
+
+#define PS_RETRY		5
+#define PS_TIMEOUT_DISK		(15*HZ)
+#define PS_TIMEOUT_OTHER	(500*HZ)
+
+#include <linux/device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_device.h>
+#include <linux/kref.h>
+#include <linux/kobject.h>
+
+struct pscsi_plugin_task {
+	struct se_task pscsi_task;
+	unsigned char *pscsi_cdb;
+	unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
+	unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
+	int	pscsi_direction;
+	int	pscsi_result;
+	u32	pscsi_resid;
+	struct request *pscsi_req;
+} ____cacheline_aligned;
+
+#define PDF_HAS_CHANNEL_ID	0x01
+#define PDF_HAS_TARGET_ID	0x02
+#define PDF_HAS_LUN_ID		0x04
+#define PDF_HAS_VPD_UNIT_SERIAL 0x08
+#define PDF_HAS_VPD_DEV_IDENT	0x10
+#define PDF_HAS_VIRT_HOST_ID	0x20
+
+struct pscsi_dev_virt {
+	int	pdv_flags;
+	int	pdv_host_id;
+	int	pdv_channel_id;
+	int	pdv_target_id;
+	int	pdv_lun_id;
+	struct block_device *pdv_bd;
+	struct scsi_device *pdv_sd;
+	struct se_hba *pdv_se_hba;
+} ____cacheline_aligned;
+
+typedef enum phv_modes {
+	PHV_VIRUTAL_HOST_ID,
+	PHV_LLD_SCSI_HOST_NO
+} phv_modes_t;
+
+struct pscsi_hba_virt {
+	int			phv_host_id;
+	phv_modes_t		phv_mode;
+	struct Scsi_Host	*phv_lld_host;
+} ____cacheline_aligned;
+
+#endif   /*** TARGET_CORE_PSCSI_H ***/
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 0000000..979aebf
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,1091 @@
+/*******************************************************************************
+ * Filename:  target_core_rd.c
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_rd.h"
+
+static struct se_subsystem_api rd_dr_template;
+static struct se_subsystem_api rd_mcp_template;
+
+/* #define DEBUG_RAMDISK_MCP */
+/* #define DEBUG_RAMDISK_DR */
+
+/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct rd_host *rd_host;
+
+	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
+	if (!(rd_host)) {
+		printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
+		return -ENOMEM;
+	}
+
+	rd_host->rd_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) rd_host;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+		" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
+		rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
+		RD_MAX_SECTORS);
+
+	return 0;
+}
+
+static void rd_detach_hba(struct se_hba *hba)
+{
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
+
+	kfree(rd_host);
+	hba->hba_ptr = NULL;
+}
+
+/*	rd_release_device_space():
+ *
+ *
+ */
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+	u32 i, j, page_count = 0, sg_per_table;
+	struct rd_dev_sg_table *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+		return;
+
+	sg_table = rd_dev->sg_table_array;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg = sg_table[i].sg_table;
+		sg_per_table = sg_table[i].rd_sg_count;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = sg_page(&sg[j]);
+			if ((pg)) {
+				__free_page(pg);
+				page_count++;
+			}
+		}
+
+		kfree(sg);
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
+		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+	kfree(sg_table);
+	rd_dev->sg_table_array = NULL;
+	rd_dev->sg_table_count = 0;
+}
+
+
+/*	rd_build_device_space():
+ *
+ *
+ */
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	struct rd_dev_sg_table *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (rd_dev->rd_page_count <= 0) {
+		printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+			rd_dev->rd_page_count);
+		return -1;
+	}
+	total_sg_needed = rd_dev->rd_page_count;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+	if (!(sg_table)) {
+		printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+			" scatterlist tables\n");
+		return -1;
+	}
+
+	rd_dev->sg_table_array = sg_table;
+	rd_dev->sg_table_count = sg_tables;
+
+	while (total_sg_needed) {
+		sg_per_table = (total_sg_needed > max_sg_per_table) ?
+			max_sg_per_table : total_sg_needed;
+
+		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+				GFP_KERNEL);
+		if (!(sg)) {
+			printk(KERN_ERR "Unable to allocate scatterlist array"
+				" for struct rd_dev\n");
+			return -1;
+		}
+
+		sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+
+		sg_table[i].sg_table = sg;
+		sg_table[i].rd_sg_count = sg_per_table;
+		sg_table[i].page_start_offset = page_offset;
+		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
+						- 1;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = alloc_pages(GFP_KERNEL, 0);
+			if (!(pg)) {
+				printk(KERN_ERR "Unable to allocate scatterlist"
+					" pages for struct rd_dev_sg_table\n");
+				return -1;
+			}
+			sg_assign_page(&sg[j], pg);
+			sg[j].length = PAGE_SIZE;
+		}
+
+		page_offset += sg_per_table;
+		total_sg_needed -= sg_per_table;
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count);
+
+	return 0;
+}
+
+static void *rd_allocate_virtdevice(
+	struct se_hba *hba,
+	const char *name,
+	int rd_direct)
+{
+	struct rd_dev *rd_dev;
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
+	if (!(rd_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
+		return NULL;
+	}
+
+	rd_dev->rd_host = rd_host;
+	rd_dev->rd_direct = rd_direct;
+
+	return rd_dev;
+}
+
+static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 1);
+}
+
+static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 0);
+}
+
+/*	rd_create_virtdevice():
+ *
+ *
+ */
+static struct se_device *rd_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p,
+	int rd_direct)
+{
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct rd_dev *rd_dev = p;
+	struct rd_host *rd_host = hba->hba_ptr;
+	int dev_flags = 0;
+	char prod[16], rev[4];
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	if (rd_build_device_space(rd_dev) < 0)
+		goto fail;
+
+	snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
+	snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
+						RD_MCP_VERSION);
+
+	dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
+	dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
+	dev_limits.limits.max_sectors = RD_MAX_SECTORS;
+	dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
+
+	dev = transport_add_device_to_core_hba(hba,
+			(rd_dev->rd_direct) ? &rd_dr_template :
+			&rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
+			&dev_limits, prod, rev);
+	if (!(dev))
+		goto fail;
+
+	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
+	rd_dev->rd_queue_depth = dev->queue_depth;
+
+	printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+		" %u pages in %u tables, %lu total bytes\n",
+		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
+		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count,
+		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
+
+	return dev;
+
+fail:
+	rd_release_device_space(rd_dev);
+	return NULL;
+}
+
+static struct se_device *rd_DIRECT_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 1);
+}
+
+static struct se_device *rd_MEMCPY_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 0);
+}
+
+/*	rd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_device(void *p)
+{
+	struct rd_dev *rd_dev = p;
+
+	rd_release_device_space(rd_dev);
+	kfree(rd_dev);
+}
+
+static inline struct rd_request *RD_REQ(struct se_task *task)
+{
+	return container_of(task, struct rd_request, rd_task);
+}
+
+static struct se_task *
+rd_alloc_task(struct se_cmd *cmd)
+{
+	struct rd_request *rd_req;
+
+	rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
+	if (!rd_req) {
+		printk(KERN_ERR "Unable to allocate struct rd_request\n");
+		return NULL;
+	}
+	rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
+
+	return &rd_req->rd_task;
+}
+
+/*	rd_get_sg_table():
+ *
+ *
+ */
+static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
+{
+	u32 i;
+	struct rd_dev_sg_table *sg_table;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg_table = &rd_dev->sg_table_array[i];
+		if ((sg_table->page_start_offset <= page) &&
+		    (sg_table->page_end_offset >= page))
+			return sg_table;
+	}
+
+	printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
+			page);
+
+	return NULL;
+}
+
+/*	rd_MEMCPY_read():
+ *
+ *
+ */
+static int rd_MEMCPY_read(struct rd_request *req)
+{
+	struct se_task *task = &req->rd_task;
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = task->task_sg;
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+		" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	src_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_d[i].length - dst_offset) <
+		    (sg_s[j].length - src_offset)) {
+			length = (sg_d[i].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
+				sg_s[j].length);
+			printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src = sg_virt(&sg_s[j]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst_offset = 0;
+			src_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_s[j].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset,
+				j, sg_s[j].length);
+			printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			if (sg_d[i].length == length) {
+				i++;
+				dst_offset = 0;
+			} else
+				dst_offset = length;
+
+			src = sg_virt(&sg_s[j++]) + src_offset;
+			if (!src)
+				BUG();
+
+			src_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_write():
+ *
+ *
+ */
+static int rd_MEMCPY_write(struct rd_request *req)
+{
+	struct se_task *task = &req->rd_task;
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
+	sg_s = task->task_sg;
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+		" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	dst_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_s[i].length - src_offset) <
+		    (sg_d[j].length - dst_offset)) {
+			length = (sg_s[i].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i++]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst = sg_virt(&sg_d[j]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src_offset = 0;
+			dst_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_d[j].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i]) + src_offset;
+			if (!src)
+				BUG();
+
+			if (sg_s[i].length == length) {
+				i++;
+				src_offset = 0;
+			} else
+				src_offset = length;
+
+			dst = sg_virt(&sg_d[j++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			dst_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_d = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_MEMCPY_do_task(struct se_task *task)
+{
+	struct se_device *dev = task->se_dev;
+	struct rd_request *req = RD_REQ(task);
+	unsigned long long lba;
+	int ret;
+
+	req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+	lba = task->task_lba;
+	req->rd_offset = (do_div(lba,
+			  (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
+			   DEV_ATTRIB(dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (task->task_data_direction == DMA_FROM_DEVICE)
+		ret = rd_MEMCPY_read(req);
+	else
+		ret = rd_MEMCPY_write(req);
+
+	if (ret != 0)
+		return ret;
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_DIRECT_with_offset():
+ *
+ *
+ */
+static int rd_DIRECT_with_offset(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	struct rd_request *req = RD_REQ(task);
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct se_mem *se_mem;
+	struct scatterlist *sg_s;
+	u32 j = 0, set_offset = 1;
+	u32 get_next_table = 0, offset_length, table_sg_end;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+		(task->task_data_direction == DMA_TO_DEVICE) ?
+			"Write" : "Read",
+		task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
+#endif
+	while (req->rd_size) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		if (set_offset) {
+			offset_length = sg_s[j].length - req->rd_offset;
+			if (offset_length > req->rd_size)
+				offset_length = req->rd_size;
+
+			se_mem->se_page = sg_page(&sg_s[j++]);
+			se_mem->se_off = req->rd_offset;
+			se_mem->se_len = offset_length;
+
+			set_offset = 0;
+			get_next_table = (j > table_sg_end);
+			goto check_eot;
+		}
+
+		offset_length = (req->rd_size < req->rd_offset) ?
+			req->rd_size : req->rd_offset;
+
+		se_mem->se_page = sg_page(&sg_s[j]);
+		se_mem->se_len = offset_length;
+
+		set_offset = 1;
+
+check_eot:
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
+			" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
+			req->rd_page, req->rd_size, offset_length, j, se_mem,
+			se_mem->se_page, se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= offset_length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (!set_offset && !get_next_table)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk(KERN_INFO "page: %u in same page table\n",
+					req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_without_offset():
+ *
+ *
+ */
+static int rd_DIRECT_without_offset(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	struct rd_request *req = RD_REQ(task);
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct se_mem *se_mem;
+	struct scatterlist *sg_s;
+	u32 length, j = 0;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
+		(task->task_data_direction == DMA_TO_DEVICE) ?
+			"Write" : "Read",
+		task->task_lba, req->rd_size, req->rd_page);
+#endif
+	while (req->rd_size) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		length = (req->rd_size < sg_s[j].length) ?
+			req->rd_size : sg_s[j].length;
+
+		se_mem->se_page = sg_page(&sg_s[j++]);
+		se_mem->se_len = length;
+
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
+			" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
+			req->rd_size, j, se_mem, se_mem->se_page,
+			se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk("page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_do_se_mem_map():
+ *
+ *
+ */
+static int rd_DIRECT_do_se_mem_map(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	struct se_mem *in_se_mem,
+	struct se_mem **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset_in)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct rd_request *req = RD_REQ(task);
+	u32 task_offset = *task_offset_in;
+	unsigned long long lba;
+	int ret;
+
+	req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
+			PAGE_SIZE);
+	lba = task->task_lba;
+	req->rd_offset = (do_div(lba,
+			  (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
+			   DEV_ATTRIB(task->se_dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (req->rd_offset)
+		ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
+				task_offset_in);
+	else
+		ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
+				task_offset_in);
+
+	if (ret < 0)
+		return ret;
+
+	if (CMD_TFO(cmd)->task_sg_chaining == 0)
+		return 0;
+	/*
+	 * Currently prevent writers from multiple HW fabrics doing
+	 * pci_map_sg() to RD_DR's internal scatterlist memory.
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		printk(KERN_ERR "DMA_TO_DEVICE not supported for"
+				" RAMDISK_DR with task_sg_chaining=1\n");
+		return -1;
+	}
+	/*
+	 * Special case for if task_sg_chaining is enabled, then
+	 * we setup struct se_task->task_sg[], as it will be used by
+	 * transport_do_task_sg_chain() for creating chainged SGLs
+	 * across multiple struct se_task->task_sg[].
+	 */
+	if (!(transport_calc_sg_num(task,
+			list_entry(T_TASK(cmd)->t_mem_list->next,
+				   struct se_mem, se_list),
+			task_offset)))
+		return -1;
+
+	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+			list_entry(T_TASK(cmd)->t_mem_list->next,
+				   struct se_mem, se_list),
+			out_se_mem, se_mem_cnt, task_offset_in);
+}
+
+/*	rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_DIRECT_do_task(struct se_task *task)
+{
+	/*
+	 * At this point the locally allocated RD tables have been mapped
+	 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
+	 */
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_task(struct se_task *task)
+{
+	kfree(RD_REQ(task));
+}
+
+enum {
+	Opt_rd_pages, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_rd_pages, "rd_pages=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t rd_set_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_rd_pages:
+			match_int(args, &arg);
+			rd_dev->rd_page_count = arg;
+			printk(KERN_INFO "RAMDISK: Referencing Page"
+				" Count: %u\n", rd_dev->rd_page_count);
+			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
+			break;
+		default:
+			break;
+		}
+	}
+
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+
+	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+		printk(KERN_INFO "Missing rd_pages= parameter\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t rd_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: %s\n",
+			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
+			"rd_direct" : "rd_mcp");
+	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
+			"  SG_table_count: %u\n", rd_dev->rd_page_count,
+			PAGE_SIZE, rd_dev->sg_table_count);
+	return bl;
+}
+
+/*	rd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *rd_get_cdb(struct se_task *task)
+{
+	struct rd_request *req = RD_REQ(task);
+
+	return req->rd_scsi_cdb;
+}
+
+static u32 rd_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 rd_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t rd_get_blocks(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = dev->dev_ptr;
+	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+			DEV_ATTRIB(dev)->block_size) - 1;
+
+	return blocks_long;
+}
+
+static struct se_subsystem_api rd_dr_template = {
+	.name			= "rd_dr",
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_DIRECT_allocate_virtdevice,
+	.create_virtdevice	= rd_DIRECT_create_virtdevice,
+	.free_device		= rd_free_device,
+	.alloc_task		= rd_alloc_task,
+	.do_task		= rd_DIRECT_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_cdb		= rd_get_cdb,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_blocks		= rd_get_blocks,
+	.do_se_mem_map		= rd_DIRECT_do_se_mem_map,
+};
+
+static struct se_subsystem_api rd_mcp_template = {
+	.name			= "rd_mcp",
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
+	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
+	.free_device		= rd_free_device,
+	.alloc_task		= rd_alloc_task,
+	.do_task		= rd_MEMCPY_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_cdb		= rd_get_cdb,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_blocks		= rd_get_blocks,
+};
+
+int __init rd_module_init(void)
+{
+	int ret;
+
+	ret = transport_subsystem_register(&rd_dr_template);
+	if (ret < 0)
+		return ret;
+
+	ret = transport_subsystem_register(&rd_mcp_template);
+	if (ret < 0) {
+		transport_subsystem_release(&rd_dr_template);
+		return ret;
+	}
+
+	return 0;
+}
+
+void rd_module_exit(void)
+{
+	transport_subsystem_release(&rd_dr_template);
+	transport_subsystem_release(&rd_mcp_template);
+}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
new file mode 100644
index 0000000..13badfb
--- /dev/null
+++ b/drivers/target/target_core_rd.h
@@ -0,0 +1,73 @@
+#ifndef TARGET_CORE_RD_H
+#define TARGET_CORE_RD_H
+
+#define RD_HBA_VERSION		"v4.0"
+#define RD_DR_VERSION		"4.0"
+#define RD_MCP_VERSION		"4.0"
+
+/* Largest piece of memory kmalloc can allocate */
+#define RD_MAX_ALLOCATION_SIZE	65536
+/* Maximum queuedepth for the Ramdisk HBA */
+#define RD_HBA_QUEUE_DEPTH	256
+#define RD_DEVICE_QUEUE_DEPTH	32
+#define RD_MAX_DEVICE_QUEUE_DEPTH 128
+#define RD_BLOCKSIZE		512
+#define RD_MAX_SECTORS		1024
+
+extern struct kmem_cache *se_mem_cache;
+
+/* Used in target_core_init_configfs() for virtual LUN 0 access */
+int __init rd_module_init(void);
+void rd_module_exit(void);
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+struct rd_request {
+	struct se_task	rd_task;
+
+	/* SCSI CDB from iSCSI Command PDU */
+	unsigned char	rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+	/* Offset from start of page */
+	u32		rd_offset;
+	/* Starting page in Ramdisk for request */
+	u32		rd_page;
+	/* Total number of pages needed for request */
+	u32		rd_page_count;
+	/* Scatterlist count */
+	u32		rd_size;
+	/* Ramdisk device */
+	struct rd_dev	*rd_dev;
+} ____cacheline_aligned;
+
+struct rd_dev_sg_table {
+	u32		page_start_offset;
+	u32		page_end_offset;
+	u32		rd_sg_count;
+	struct scatterlist *sg_table;
+} ____cacheline_aligned;
+
+#define RDF_HAS_PAGE_COUNT	0x01
+
+struct rd_dev {
+	int		rd_direct;
+	u32		rd_flags;
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		rd_dev_id;
+	/* Total page count for ramdisk device */
+	u32		rd_page_count;
+	/* Number of SG tables in sg_table_array */
+	u32		sg_table_count;
+	u32		rd_queue_depth;
+	/* Array of rd_dev_sg_table_t containing scatterlists */
+	struct rd_dev_sg_table *sg_table_array;
+	/* Ramdisk HBA device is connected to */
+	struct rd_host *rd_host;
+} ____cacheline_aligned;
+
+struct rd_host {
+	u32		rd_host_dev_id_count;
+	u32		rd_host_id;		/* Unique Ramdisk Host ID */
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_RD_H */
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
new file mode 100644
index 0000000..dc6fed0
--- /dev/null
+++ b/drivers/target/target_core_scdb.c
@@ -0,0 +1,105 @@
+/*******************************************************************************
+ * Filename:  target_core_scdb.c
+ *
+ * This file contains the generic target engine Split CDB related functions.
+ *
+ * Copyright (c) 2004-2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <scsi/scsi.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_scdb.h"
+
+/*	split_cdb_XX_6():
+ *
+ *      21-bit LBA w/ 8-bit SECTORS
+ */
+void split_cdb_XX_6(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	cdb[1] = (lba >> 16) & 0x1f;
+	cdb[2] = (lba >> 8) & 0xff;
+	cdb[3] = lba & 0xff;
+	cdb[4] = *sectors & 0xff;
+}
+
+/*	split_cdb_XX_10():
+ *
+ *	32-bit LBA w/ 16-bit SECTORS
+ */
+void split_cdb_XX_10(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be32(lba, &cdb[2]);
+	put_unaligned_be16(*sectors, &cdb[7]);
+}
+
+/*	split_cdb_XX_12():
+ *
+ *	32-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_12(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be32(lba, &cdb[2]);
+	put_unaligned_be32(*sectors, &cdb[6]);
+}
+
+/*	split_cdb_XX_16():
+ *
+ *	64-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_16(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be64(lba, &cdb[2]);
+	put_unaligned_be32(*sectors, &cdb[10]);
+}
+
+/*
+ *	split_cdb_XX_32():
+ *
+ * 	64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
+ */
+void split_cdb_XX_32(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be64(lba, &cdb[12]);
+	put_unaligned_be32(*sectors, &cdb[28]);
+}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
new file mode 100644
index 0000000..98cd1c0
--- /dev/null
+++ b/drivers/target/target_core_scdb.h
@@ -0,0 +1,10 @@
+#ifndef TARGET_CORE_SCDB_H
+#define TARGET_CORE_SCDB_H
+
+extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
+
+#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
new file mode 100644
index 0000000..4a10983
--- /dev/null
+++ b/drivers/target/target_core_tmr.c
@@ -0,0 +1,409 @@
+/*******************************************************************************
+ * Filename:  target_core_tmr.c
+ *
+ * This file contains SPC-3 task management infrastructure
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+
+#define DEBUG_LUN_RESET
+#ifdef DEBUG_LUN_RESET
+#define DEBUG_LR(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_LR(x...)
+#endif
+
+struct se_tmr_req *core_tmr_alloc_req(
+	struct se_cmd *se_cmd,
+	void *fabric_tmr_ptr,
+	u8 function)
+{
+	struct se_tmr_req *tmr;
+
+	tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
+	if (!(tmr)) {
+		printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tmr->task_cmd = se_cmd;
+	tmr->fabric_tmr_ptr = fabric_tmr_ptr;
+	tmr->function = function;
+	INIT_LIST_HEAD(&tmr->tmr_list);
+
+	return tmr;
+}
+EXPORT_SYMBOL(core_tmr_alloc_req);
+
+void core_tmr_release_req(
+	struct se_tmr_req *tmr)
+{
+	struct se_device *dev = tmr->tmr_dev;
+
+	spin_lock(&dev->se_tmr_lock);
+	list_del(&tmr->tmr_list);
+	kmem_cache_free(se_tmr_req_cache, tmr);
+	spin_unlock(&dev->se_tmr_lock);
+}
+
+static void core_tmr_handle_tas_abort(
+	struct se_node_acl *tmr_nacl,
+	struct se_cmd *cmd,
+	int tas,
+	int fe_count)
+{
+	if (!(fe_count)) {
+		transport_cmd_finish_abort(cmd, 1);
+		return;
+	}
+	/*
+	 * TASK ABORTED status (TAS) bit support
+	*/
+	if (((tmr_nacl != NULL) &&
+	     (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+		transport_send_task_abort(cmd);
+
+	transport_cmd_finish_abort(cmd, 0);
+}
+
+int core_tmr_lun_reset(
+	struct se_device *dev,
+	struct se_tmr_req *tmr,
+	struct list_head *preempt_and_abort_list,
+	struct se_cmd *prout_cmd)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr, *qr_tmp;
+	struct se_node_acl *tmr_nacl = NULL;
+	struct se_portal_group *tmr_tpg = NULL;
+	struct se_queue_obj *qobj = dev->dev_queue_obj;
+	struct se_tmr_req *tmr_p, *tmr_pp;
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+	int fe_count, state, tas;
+	/*
+	 * TASK_ABORTED status bit, this is configurable via ConfigFS
+	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	tas = DEV_ATTRIB(dev)->emulate_tas;
+	/*
+	 * Determine if this se_tmr is coming from a $FABRIC_MOD
+	 * or struct se_device passthrough..
+	 */
+	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+		if (tmr_nacl && tmr_tpg) {
+			DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
+				" initiator port %s\n",
+				TPG_TFO(tmr_tpg)->get_fabric_name(),
+				tmr_nacl->initiatorname);
+		}
+	}
+	DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
+		(preempt_and_abort_list) ? "Preempt" : "TMR",
+		TRANSPORT(dev)->name, tas);
+	/*
+	 * Release all pending and outgoing TMRs aside from the received
+	 * LUN_RESET tmr..
+	 */
+	spin_lock(&dev->se_tmr_lock);
+	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
+		/*
+		 * Allow the received TMR to return with FUNCTION_COMPLETE.
+		 */
+		if (tmr && (tmr_p == tmr))
+			continue;
+
+		cmd = tmr_p->task_cmd;
+		if (!(cmd)) {
+			printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
+			continue;
+		}
+		/*
+		 * If this function was called with a valid pr_res_key
+		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
+		 * skip non regisration key matching TMRs.
+		 */
+		if ((preempt_and_abort_list != NULL) &&
+		    (core_scsi3_check_cdb_abort_and_preempt(
+					preempt_and_abort_list, cmd) != 0))
+			continue;
+		spin_unlock(&dev->se_tmr_lock);
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+		if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+			spin_lock(&dev->se_tmr_lock);
+			continue;
+		}
+		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+			spin_lock(&dev->se_tmr_lock);
+			continue;
+		}
+		DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+			" Response: 0x%02x, t_state: %d\n",
+			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+			tmr_p->function, tmr_p->response, cmd->t_state);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		transport_cmd_finish_abort_tmr(cmd);
+		spin_lock(&dev->se_tmr_lock);
+	}
+	spin_unlock(&dev->se_tmr_lock);
+	/*
+	 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
+	 * This is following sam4r17, section 5.6 Aborting commands, Table 38
+	 * for TMR LUN_RESET:
+	 *
+	 * a) "Yes" indicates that each command that is aborted on an I_T nexus
+	 * other than the one that caused the SCSI device condition is
+	 * completed with TASK ABORTED status, if the TAS bit is set to one in
+	 * the Control mode page (see SPC-4). "No" indicates that no status is
+	 * returned for aborted commands.
+	 *
+	 * d) If the logical unit reset is caused by a particular I_T nexus
+	 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
+	 * (TASK_ABORTED status) applies.
+	 *
+	 * Otherwise (e.g., if triggered by a hard reset), "no"
+	 * (no TASK_ABORTED SAM status) applies.
+	 *
+	 * Note that this seems to be independent of TAS (Task Aborted Status)
+	 * in the Control Mode Page.
+	 */
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
+				t_state_list) {
+		if (!(TASK_CMD(task))) {
+			printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+			continue;
+		}
+		cmd = TASK_CMD(task);
+
+		if (!T_TASK(cmd)) {
+			printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+				" %p ITT: 0x%08x\n", task, cmd,
+				CMD_TFO(cmd)->get_task_tag(cmd));
+			continue;
+		}
+		/*
+		 * For PREEMPT_AND_ABORT usage, only process commands
+		 * with a matching reservation key.
+		 */
+		if ((preempt_and_abort_list != NULL) &&
+		    (core_scsi3_check_cdb_abort_and_preempt(
+					preempt_and_abort_list, cmd) != 0))
+			continue;
+		/*
+		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+		 */
+		if (prout_cmd == cmd)
+			continue;
+
+		list_del(&task->t_state_list);
+		atomic_set(&task->task_state_active, 0);
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+		DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
+			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
+			"def_t_state: %d/%d cdb: 0x%02x\n",
+			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
+			CMD_TFO(cmd)->get_task_tag(cmd), 0,
+			CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+			cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
+		DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+			" t_task_cdbs: %d t_task_cdbs_left: %d"
+			" t_task_cdbs_sent: %d -- t_transport_active: %d"
+			" t_transport_stop: %d t_transport_sent: %d\n",
+			CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
+			T_TASK(cmd)->t_task_cdbs,
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+			atomic_read(&T_TASK(cmd)->t_transport_active),
+			atomic_read(&T_TASK(cmd)->t_transport_stop),
+			atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+		if (atomic_read(&task->task_active)) {
+			atomic_set(&task->task_stop, 1);
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+
+			DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
+				" for dev: %p\n", task, dev);
+			wait_for_completion(&task->task_stop_comp);
+			DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
+				" dev: %p\n", task, dev);
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+			atomic_set(&task->task_active, 0);
+			atomic_set(&task->task_stop, 0);
+		} else {
+			if (atomic_read(&task->task_execute_queue) != 0)
+				transport_remove_task_from_execute_queue(task, dev);
+		}
+		__transport_stop_task_timer(task, &flags);
+
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+			spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+			DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
+				" t_task_cdbs_ex_left: %d\n", task, dev,
+				atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+		fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
+
+		if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+			DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+				" task: %p, t_fe_count: %d dev: %p\n", task,
+				fe_count, dev);
+			atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+						flags);
+			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+		DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+			" t_fe_count: %d dev: %p\n", task, fe_count, dev);
+		atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+	/*
+	 * Release all commands remaining in the struct se_device cmd queue.
+	 *
+	 * This follows the same logic as above for the struct se_device
+	 * struct se_task state list, where commands are returned with
+	 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
+	 * reference, otherwise the struct se_cmd is released.
+	 */
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
+		cmd = (struct se_cmd *)qr->cmd;
+		if (!(cmd)) {
+			/*
+			 * Skip these for non PREEMPT_AND_ABORT usage..
+			 */
+			if (preempt_and_abort_list != NULL)
+				continue;
+
+			atomic_dec(&qobj->queue_cnt);
+			list_del(&qr->qr_list);
+			kfree(qr);
+			continue;
+		}
+		/*
+		 * For PREEMPT_AND_ABORT usage, only process commands
+		 * with a matching reservation key.
+		 */
+		if ((preempt_and_abort_list != NULL) &&
+		    (core_scsi3_check_cdb_abort_and_preempt(
+					preempt_and_abort_list, cmd) != 0))
+			continue;
+		/*
+		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+		 */
+		if (prout_cmd == cmd)
+			continue;
+
+		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+		atomic_dec(&qobj->queue_cnt);
+		list_del(&qr->qr_list);
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+		state = qr->state;
+		kfree(qr);
+
+		DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+			" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
+			"Preempt" : "", cmd, state,
+			atomic_read(&T_TASK(cmd)->t_fe_count));
+		/*
+		 * Signal that the command has failed via cmd->se_cmd_flags,
+		 * and call TFO->new_cmd_failure() to wakeup any fabric
+		 * dependent code used to wait for unsolicited data out
+		 * allocation to complete.  The fabric module is expected
+		 * to dump any remaining unsolicited data out for the aborted
+		 * command at this point.
+		 */
+		transport_new_cmd_failure(cmd);
+
+		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
+				atomic_read(&T_TASK(cmd)->t_fe_count));
+		spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	}
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+	/*
+	 * Clear any legacy SPC-2 reservation when called during
+	 * LOGICAL UNIT RESET
+	 */
+	if (!(preempt_and_abort_list) &&
+	     (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+		spin_lock(&dev->dev_reservation_lock);
+		dev->dev_reserved_node_acl = NULL;
+		dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+		spin_unlock(&dev->dev_reservation_lock);
+		printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+	}
+
+	spin_lock(&dev->stats_lock);
+	dev->num_resets++;
+	spin_unlock(&dev->stats_lock);
+
+	DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+			(preempt_and_abort_list) ? "Preempt" : "TMR",
+			TRANSPORT(dev)->name);
+	return 0;
+}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
new file mode 100644
index 0000000..c26f674
--- /dev/null
+++ b/drivers/target/target_core_tpg.c
@@ -0,0 +1,839 @@
+/*******************************************************************************
+ * Filename:  target_core_tpg.c
+ *
+ * This file contains generic Target Portal Group related functions.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_hba.h"
+
+/*	core_clear_initiator_node_from_tpg():
+ *
+ *
+ */
+static void core_clear_initiator_node_from_tpg(
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	int i;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_lun_acl *acl, *acl_tmp;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		if (!deve->se_lun) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+
+		lun = deve->se_lun;
+		spin_unlock_irq(&nacl->device_list_lock);
+		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+		spin_lock(&lun->lun_acl_lock);
+		list_for_each_entry_safe(acl, acl_tmp,
+					&lun->lun_acl_list, lacl_list) {
+			if (!(strcmp(acl->initiatorname,
+					nacl->initiatorname)) &&
+			     (acl->mapped_lun == deve->mapped_lun))
+				break;
+		}
+
+		if (!acl) {
+			printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
+				" mapped_lun: %u\n", nacl->initiatorname,
+				deve->mapped_lun);
+			spin_unlock(&lun->lun_acl_lock);
+			spin_lock_irq(&nacl->device_list_lock);
+			continue;
+		}
+
+		list_del(&acl->lacl_list);
+		spin_unlock(&lun->lun_acl_lock);
+
+		spin_lock_irq(&nacl->device_list_lock);
+		kfree(acl);
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+}
+
+/*	__core_tpg_get_initiator_node_acl():
+ *
+ *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
+ */
+struct se_node_acl *__core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	const char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+		if (!(strcmp(acl->initiatorname, initiatorname)))
+			return acl;
+	}
+
+	return NULL;
+}
+
+/*	core_tpg_get_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+		if (!(strcmp(acl->initiatorname, initiatorname)) &&
+		   (!(acl->dynamic_node_acl))) {
+			spin_unlock_bh(&tpg->acl_node_lock);
+			return acl;
+		}
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	return NULL;
+}
+
+/*	core_tpg_add_node_to_devs():
+ *
+ *
+ */
+void core_tpg_add_node_to_devs(
+	struct se_node_acl *acl,
+	struct se_portal_group *tpg)
+{
+	int i = 0;
+	u32 lun_access = 0;
+	struct se_lun *lun;
+	struct se_device *dev;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = &tpg->tpg_lun_list[i];
+		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+			continue;
+
+		spin_unlock(&tpg->tpg_lun_lock);
+
+		dev = lun->lun_se_dev;
+		/*
+		 * By default in LIO-Target $FABRIC_MOD,
+		 * demo_mode_write_protect is ON, or READ_ONLY;
+		 */
+		if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
+			if (dev->dev_flags & DF_READ_ONLY)
+				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+			else
+				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			/*
+			 * Allow only optical drives to issue R/W in default RO
+			 * demo mode.
+			 */
+			if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
+				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+			else
+				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		}
+
+		printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+			" access for LUN in Demo Mode\n",
+			TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
+			"READ-WRITE" : "READ-ONLY");
+
+		core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
+				lun_access, acl, tpg, 1);
+		spin_lock(&tpg->tpg_lun_lock);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+}
+
+/*      core_set_queue_depth_for_node():
+ *
+ *
+ */
+static int core_set_queue_depth_for_node(
+	struct se_portal_group *tpg,
+	struct se_node_acl *acl)
+{
+	if (!acl->queue_depth) {
+		printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
+			"defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
+			acl->initiatorname);
+		acl->queue_depth = 1;
+	}
+
+	return 0;
+}
+
+/*      core_create_device_list_for_node():
+ *
+ *
+ */
+static int core_create_device_list_for_node(struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+	int i;
+
+	nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
+				TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
+	if (!(nacl->device_list)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+			" struct se_node_acl->device_list\n");
+		return -1;
+	}
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		atomic_set(&deve->ua_count, 0);
+		atomic_set(&deve->pr_ref_count, 0);
+		spin_lock_init(&deve->ua_lock);
+		INIT_LIST_HEAD(&deve->alua_port_list);
+		INIT_LIST_HEAD(&deve->ua_list);
+	}
+
+	return 0;
+}
+
+/*	core_tpg_check_initiator_node_acl()
+ *
+ *
+ */
+struct se_node_acl *core_tpg_check_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if ((acl))
+		return acl;
+
+	if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
+		return NULL;
+
+	acl =  TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
+	if (!(acl))
+		return NULL;
+
+	INIT_LIST_HEAD(&acl->acl_list);
+	INIT_LIST_HEAD(&acl->acl_sess_list);
+	spin_lock_init(&acl->device_list_lock);
+	spin_lock_init(&acl->nacl_sess_lock);
+	atomic_set(&acl->acl_pr_ref_count, 0);
+	acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
+	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+	acl->se_tpg = tpg;
+	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+	spin_lock_init(&acl->stats_lock);
+	acl->dynamic_node_acl = 1;
+
+	TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+	if (core_create_device_list_for_node(acl) < 0) {
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return NULL;
+	}
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		core_free_device_list_for_node(acl, tpg);
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return NULL;
+	}
+
+	core_tpg_add_node_to_devs(acl, tpg);
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+	tpg->num_node_acls++;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
+
+void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
+{
+	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
+		cpu_relax();
+}
+
+void core_tpg_clear_object_luns(struct se_portal_group *tpg)
+{
+	int i, ret;
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = &tpg->tpg_lun_list[i];
+
+		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
+		    (lun->lun_se_dev == NULL))
+			continue;
+
+		spin_unlock(&tpg->tpg_lun_lock);
+		ret = core_dev_del_lun(tpg, lun->unpacked_lun);
+		spin_lock(&tpg->tpg_lun_lock);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+}
+EXPORT_SYMBOL(core_tpg_clear_object_luns);
+
+/*	core_tpg_add_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_add_initiator_node_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *se_nacl,
+	const char *initiatorname,
+	u32 queue_depth)
+{
+	struct se_node_acl *acl = NULL;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if ((acl)) {
+		if (acl->dynamic_node_acl) {
+			acl->dynamic_node_acl = 0;
+			printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
+				" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+				TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
+			spin_unlock_bh(&tpg->acl_node_lock);
+			/*
+			 * Release the locally allocated struct se_node_acl
+			 * because * core_tpg_add_initiator_node_acl() returned
+			 * a pointer to an existing demo mode node ACL.
+			 */
+			if (se_nacl)
+				TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
+							se_nacl);
+			goto done;
+		}
+
+		printk(KERN_ERR "ACL entry for %s Initiator"
+			" Node %s already exists for TPG %u, ignoring"
+			" request.\n",  TPG_TFO(tpg)->get_fabric_name(),
+			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock_bh(&tpg->acl_node_lock);
+		return ERR_PTR(-EEXIST);
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	if (!(se_nacl)) {
+		printk("struct se_node_acl pointer is NULL\n");
+		return ERR_PTR(-EINVAL);
+	}
+	/*
+	 * For v4.x logic the se_node_acl_s is hanging off a fabric
+	 * dependent structure allocated via
+	 * struct target_core_fabric_ops->fabric_make_nodeacl()
+	 */
+	acl = se_nacl;
+
+	INIT_LIST_HEAD(&acl->acl_list);
+	INIT_LIST_HEAD(&acl->acl_sess_list);
+	spin_lock_init(&acl->device_list_lock);
+	spin_lock_init(&acl->nacl_sess_lock);
+	atomic_set(&acl->acl_pr_ref_count, 0);
+	acl->queue_depth = queue_depth;
+	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+	acl->se_tpg = tpg;
+	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+	spin_lock_init(&acl->stats_lock);
+
+	TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+	if (core_create_device_list_for_node(acl) < 0) {
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		core_free_device_list_for_node(acl, tpg);
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+	tpg->num_node_acls++;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+done:
+	printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
+
+/*	core_tpg_del_initiator_node_acl():
+ *
+ *
+ */
+int core_tpg_del_initiator_node_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *acl,
+	int force)
+{
+	struct se_session *sess, *sess_tmp;
+	int dynamic_acl = 0;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	if (acl->dynamic_node_acl) {
+		acl->dynamic_node_acl = 0;
+		dynamic_acl = 1;
+	}
+	list_del(&acl->acl_list);
+	tpg->num_node_acls--;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	spin_lock_bh(&tpg->session_lock);
+	list_for_each_entry_safe(sess, sess_tmp,
+				&tpg->tpg_sess_list, sess_list) {
+		if (sess->se_node_acl != acl)
+			continue;
+		/*
+		 * Determine if the session needs to be closed by our context.
+		 */
+		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+			continue;
+
+		spin_unlock_bh(&tpg->session_lock);
+		/*
+		 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+		 * forcefully shutdown the $FABRIC_MOD session/nexus.
+		 */
+		TPG_TFO(tpg)->close_session(sess);
+
+		spin_lock_bh(&tpg->session_lock);
+	}
+	spin_unlock_bh(&tpg->session_lock);
+
+	core_tpg_wait_for_nacl_pr_ref(acl);
+	core_clear_initiator_node_from_tpg(acl, tpg);
+	core_free_device_list_for_node(acl, tpg);
+
+	printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+		TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
+
+/*	core_tpg_set_initiator_node_queue_depth():
+ *
+ *
+ */
+int core_tpg_set_initiator_node_queue_depth(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname,
+	u32 queue_depth,
+	int force)
+{
+	struct se_session *sess, *init_sess = NULL;
+	struct se_node_acl *acl;
+	int dynamic_acl = 0;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (!(acl)) {
+		printk(KERN_ERR "Access Control List entry for %s Initiator"
+			" Node %s does not exists for TPG %hu, ignoring"
+			" request.\n", TPG_TFO(tpg)->get_fabric_name(),
+			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock_bh(&tpg->acl_node_lock);
+		return -ENODEV;
+	}
+	if (acl->dynamic_node_acl) {
+		acl->dynamic_node_acl = 0;
+		dynamic_acl = 1;
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	spin_lock_bh(&tpg->session_lock);
+	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
+		if (sess->se_node_acl != acl)
+			continue;
+
+		if (!force) {
+			printk(KERN_ERR "Unable to change queue depth for %s"
+				" Initiator Node: %s while session is"
+				" operational.  To forcefully change the queue"
+				" depth and force session reinstatement"
+				" use the \"force=1\" parameter.\n",
+				TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+			spin_unlock_bh(&tpg->session_lock);
+
+			spin_lock_bh(&tpg->acl_node_lock);
+			if (dynamic_acl)
+				acl->dynamic_node_acl = 1;
+			spin_unlock_bh(&tpg->acl_node_lock);
+			return -EEXIST;
+		}
+		/*
+		 * Determine if the session needs to be closed by our context.
+		 */
+		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+			continue;
+
+		init_sess = sess;
+		break;
+	}
+
+	/*
+	 * User has requested to change the queue depth for a Initiator Node.
+	 * Change the value in the Node's struct se_node_acl, and call
+	 * core_set_queue_depth_for_node() to add the requested queue depth.
+	 *
+	 * Finally call  TPG_TFO(tpg)->close_session() to force session
+	 * reinstatement to occur if there is an active session for the
+	 * $FABRIC_MOD Initiator Node in question.
+	 */
+	acl->queue_depth = queue_depth;
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		spin_unlock_bh(&tpg->session_lock);
+		/*
+		 * Force session reinstatement if
+		 * core_set_queue_depth_for_node() failed, because we assume
+		 * the $FABRIC_MOD has already the set session reinstatement
+		 * bit from TPG_TFO(tpg)->shutdown_session() called above.
+		 */
+		if (init_sess)
+			TPG_TFO(tpg)->close_session(init_sess);
+
+		spin_lock_bh(&tpg->acl_node_lock);
+		if (dynamic_acl)
+			acl->dynamic_node_acl = 1;
+		spin_unlock_bh(&tpg->acl_node_lock);
+		return -EINVAL;
+	}
+	spin_unlock_bh(&tpg->session_lock);
+	/*
+	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+	 * forcefully shutdown the $FABRIC_MOD session/nexus.
+	 */
+	if (init_sess)
+		TPG_TFO(tpg)->close_session(init_sess);
+
+	printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
+		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
+		initiatorname, TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg));
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	if (dynamic_acl)
+		acl->dynamic_node_acl = 1;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+
+static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
+{
+	/* Set in core_dev_setup_virtual_lun0() */
+	struct se_device *dev = se_global->g_lun0_dev;
+	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	int ret;
+
+	lun->unpacked_lun = 0;
+	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+	atomic_set(&lun->lun_acl_count, 0);
+	init_completion(&lun->lun_shutdown_comp);
+	INIT_LIST_HEAD(&lun->lun_acl_list);
+	INIT_LIST_HEAD(&lun->lun_cmd_list);
+	spin_lock_init(&lun->lun_acl_lock);
+	spin_lock_init(&lun->lun_cmd_lock);
+	spin_lock_init(&lun->lun_sep_lock);
+
+	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+	if (ret < 0)
+		return -1;
+
+	return 0;
+}
+
+static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
+{
+	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+
+	core_tpg_post_dellun(se_tpg, lun);
+}
+
+int core_tpg_register(
+	struct target_core_fabric_ops *tfo,
+	struct se_wwn *se_wwn,
+	struct se_portal_group *se_tpg,
+	void *tpg_fabric_ptr,
+	int se_tpg_type)
+{
+	struct se_lun *lun;
+	u32 i;
+
+	se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
+				TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
+	if (!(se_tpg->tpg_lun_list)) {
+		printk(KERN_ERR "Unable to allocate struct se_portal_group->"
+				"tpg_lun_list\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = &se_tpg->tpg_lun_list[i];
+		lun->unpacked_lun = i;
+		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+		atomic_set(&lun->lun_acl_count, 0);
+		init_completion(&lun->lun_shutdown_comp);
+		INIT_LIST_HEAD(&lun->lun_acl_list);
+		INIT_LIST_HEAD(&lun->lun_cmd_list);
+		spin_lock_init(&lun->lun_acl_lock);
+		spin_lock_init(&lun->lun_cmd_lock);
+		spin_lock_init(&lun->lun_sep_lock);
+	}
+
+	se_tpg->se_tpg_type = se_tpg_type;
+	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
+	se_tpg->se_tpg_tfo = tfo;
+	se_tpg->se_tpg_wwn = se_wwn;
+	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
+	INIT_LIST_HEAD(&se_tpg->acl_node_list);
+	INIT_LIST_HEAD(&se_tpg->se_tpg_list);
+	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
+	spin_lock_init(&se_tpg->acl_node_lock);
+	spin_lock_init(&se_tpg->session_lock);
+	spin_lock_init(&se_tpg->tpg_lun_lock);
+
+	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
+		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
+			kfree(se_tpg);
+			return -ENOMEM;
+		}
+	}
+
+	spin_lock_bh(&se_global->se_tpg_lock);
+	list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
+	spin_unlock_bh(&se_global->se_tpg_lock);
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
+		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
+		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_register);
+
+int core_tpg_deregister(struct se_portal_group *se_tpg)
+{
+	struct se_node_acl *nacl, *nacl_tmp;
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+		" for endpoint: %s Portal Tag %u\n",
+		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+		"Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
+		TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
+		TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+	spin_lock_bh(&se_global->se_tpg_lock);
+	list_del(&se_tpg->se_tpg_list);
+	spin_unlock_bh(&se_global->se_tpg_lock);
+
+	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
+		cpu_relax();
+	/*
+	 * Release any remaining demo-mode generated se_node_acl that have
+	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+	 * in transport_deregister_session().
+	 */
+	spin_lock_bh(&se_tpg->acl_node_lock);
+	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+			acl_list) {
+		list_del(&nacl->acl_list);
+		se_tpg->num_node_acls--;
+		spin_unlock_bh(&se_tpg->acl_node_lock);
+
+		core_tpg_wait_for_nacl_pr_ref(nacl);
+		core_free_device_list_for_node(nacl, se_tpg);
+		TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+
+		spin_lock_bh(&se_tpg->acl_node_lock);
+	}
+	spin_unlock_bh(&se_tpg->acl_node_lock);
+
+	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
+		core_tpg_release_virtual_lun0(se_tpg);
+
+	se_tpg->se_tpg_fabric_ptr = NULL;
+	kfree(se_tpg->tpg_lun_list);
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_deregister);
+
+struct se_lun *core_tpg_pre_addlun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+			"-1: %u for Target Portal Group: %u\n",
+			TPG_TFO(tpg)->get_fabric_name(),
+			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		return ERR_PTR(-EOVERFLOW);
+	}
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
+		printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
+			" on %s Target Portal Group: %u, ignoring request.\n",
+			unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return ERR_PTR(-EINVAL);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+int core_tpg_post_addlun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u32 lun_access,
+	void *lun_ptr)
+{
+	if (core_dev_export(lun_ptr, tpg, lun) < 0)
+		return -1;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun->lun_access = lun_access;
+	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return 0;
+}
+
+static void core_tpg_shutdown_lun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	core_clear_lun_from_tpg(lun, tpg);
+	transport_clear_lun_from_sessions(lun);
+}
+
+struct se_lun *core_tpg_pre_dellun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun,
+	int *ret)
+{
+	struct se_lun *lun;
+
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+			"-1: %u for Target Portal Group: %u\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		return ERR_PTR(-EOVERFLOW);
+	}
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %u, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return ERR_PTR(-ENODEV);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+int core_tpg_post_dellun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	core_tpg_shutdown_lun(tpg, lun);
+
+	core_dev_unexport(lun->lun_se_dev, tpg, lun);
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return 0;
+}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
new file mode 100644
index 0000000..4bbf6c1
--- /dev/null
+++ b/drivers/target/target_core_transport.c
@@ -0,0 +1,6168 @@
+/*******************************************************************************
+ * Filename:  target_core_transport.c
+ *
+ * This file contains the Generic Target Engine Core.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <linux/cdrom.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_scdb.h"
+#include "target_core_ua.h"
+
+/* #define DEBUG_CDB_HANDLER */
+#ifdef DEBUG_CDB_HANDLER
+#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CDB_H(x...)
+#endif
+
+/* #define DEBUG_CMD_MAP */
+#ifdef DEBUG_CMD_MAP
+#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CMD_M(x...)
+#endif
+
+/* #define DEBUG_MEM_ALLOC */
+#ifdef DEBUG_MEM_ALLOC
+#define DEBUG_MEM(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM(x...)
+#endif
+
+/* #define DEBUG_MEM2_ALLOC */
+#ifdef DEBUG_MEM2_ALLOC
+#define DEBUG_MEM2(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM2(x...)
+#endif
+
+/* #define DEBUG_SG_CALC */
+#ifdef DEBUG_SG_CALC
+#define DEBUG_SC(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SC(x...)
+#endif
+
+/* #define DEBUG_SE_OBJ */
+#ifdef DEBUG_SE_OBJ
+#define DEBUG_SO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SO(x...)
+#endif
+
+/* #define DEBUG_CMD_VOL */
+#ifdef DEBUG_CMD_VOL
+#define DEBUG_VOL(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_VOL(x...)
+#endif
+
+/* #define DEBUG_CMD_STOP */
+#ifdef DEBUG_CMD_STOP
+#define DEBUG_CS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CS(x...)
+#endif
+
+/* #define DEBUG_PASSTHROUGH */
+#ifdef DEBUG_PASSTHROUGH
+#define DEBUG_PT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_PT(x...)
+#endif
+
+/* #define DEBUG_TASK_STOP */
+#ifdef DEBUG_TASK_STOP
+#define DEBUG_TS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TS(x...)
+#endif
+
+/* #define DEBUG_TRANSPORT_STOP */
+#ifdef DEBUG_TRANSPORT_STOP
+#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TRANSPORT_S(x...)
+#endif
+
+/* #define DEBUG_TASK_FAILURE */
+#ifdef DEBUG_TASK_FAILURE
+#define DEBUG_TF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TF(x...)
+#endif
+
+/* #define DEBUG_DEV_OFFLINE */
+#ifdef DEBUG_DEV_OFFLINE
+#define DEBUG_DO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_DO(x...)
+#endif
+
+/* #define DEBUG_TASK_STATE */
+#ifdef DEBUG_TASK_STATE
+#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TSTATE(x...)
+#endif
+
+/* #define DEBUG_STATUS_THR */
+#ifdef DEBUG_STATUS_THR
+#define DEBUG_ST(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_ST(x...)
+#endif
+
+/* #define DEBUG_TASK_TIMEOUT */
+#ifdef DEBUG_TASK_TIMEOUT
+#define DEBUG_TT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TT(x...)
+#endif
+
+/* #define DEBUG_GENERIC_REQUEST_FAILURE */
+#ifdef DEBUG_GENERIC_REQUEST_FAILURE
+#define DEBUG_GRF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_GRF(x...)
+#endif
+
+/* #define DEBUG_SAM_TASK_ATTRS */
+#ifdef DEBUG_SAM_TASK_ATTRS
+#define DEBUG_STA(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_STA(x...)
+#endif
+
+struct se_global *se_global;
+
+static struct kmem_cache *se_cmd_cache;
+static struct kmem_cache *se_sess_cache;
+struct kmem_cache *se_tmr_req_cache;
+struct kmem_cache *se_ua_cache;
+struct kmem_cache *se_mem_cache;
+struct kmem_cache *t10_pr_reg_cache;
+struct kmem_cache *t10_alua_lu_gp_cache;
+struct kmem_cache *t10_alua_lu_gp_mem_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+/* Used for transport_dev_get_map_*() */
+typedef int (*map_func_t)(struct se_task *, u32);
+
+static int transport_generic_write_pending(struct se_cmd *);
+static int transport_processing_thread(void *);
+static int __transport_execute_tasks(struct se_device *dev);
+static void transport_complete_task_attr(struct se_cmd *cmd);
+static void transport_direct_request_timeout(struct se_cmd *cmd);
+static void transport_free_dev_tasks(struct se_cmd *cmd);
+static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
+		unsigned long long starting_lba, u32 sectors,
+		enum dma_data_direction data_direction,
+		struct list_head *mem_list, int set_counts);
+static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
+		u32 dma_size);
+static int transport_generic_remove(struct se_cmd *cmd,
+		int release_to_pool, int session_reinstatement);
+static int transport_get_sectors(struct se_cmd *cmd);
+static struct list_head *transport_init_se_mem_list(void);
+static int transport_map_sg_to_mem(struct se_cmd *cmd,
+		struct list_head *se_mem_list, void *in_mem,
+		u32 *se_mem_cnt);
+static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
+		unsigned char *dst, struct list_head *se_mem_list);
+static void transport_release_fe_cmd(struct se_cmd *cmd);
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+		struct se_queue_obj *qobj);
+static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
+static void transport_stop_all_task_timers(struct se_cmd *cmd);
+
+int transport_emulate_control_cdb(struct se_task *task);
+
+int init_se_global(void)
+{
+	struct se_global *global;
+
+	global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
+	if (!(global)) {
+		printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
+		return -1;
+	}
+
+	INIT_LIST_HEAD(&global->g_lu_gps_list);
+	INIT_LIST_HEAD(&global->g_se_tpg_list);
+	INIT_LIST_HEAD(&global->g_hba_list);
+	INIT_LIST_HEAD(&global->g_se_dev_list);
+	spin_lock_init(&global->g_device_lock);
+	spin_lock_init(&global->hba_lock);
+	spin_lock_init(&global->se_tpg_lock);
+	spin_lock_init(&global->lu_gps_lock);
+	spin_lock_init(&global->plugin_class_lock);
+
+	se_cmd_cache = kmem_cache_create("se_cmd_cache",
+			sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
+	if (!(se_cmd_cache)) {
+		printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+		goto out;
+	}
+	se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
+			sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
+			0, NULL);
+	if (!(se_tmr_req_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+				" failed\n");
+		goto out;
+	}
+	se_sess_cache = kmem_cache_create("se_sess_cache",
+			sizeof(struct se_session), __alignof__(struct se_session),
+			0, NULL);
+	if (!(se_sess_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_session"
+				" failed\n");
+		goto out;
+	}
+	se_ua_cache = kmem_cache_create("se_ua_cache",
+			sizeof(struct se_ua), __alignof__(struct se_ua),
+			0, NULL);
+	if (!(se_ua_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
+		goto out;
+	}
+	se_mem_cache = kmem_cache_create("se_mem_cache",
+			sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
+	if (!(se_mem_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
+		goto out;
+	}
+	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
+			sizeof(struct t10_pr_registration),
+			__alignof__(struct t10_pr_registration), 0, NULL);
+	if (!(t10_pr_reg_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+				" failed\n");
+		goto out;
+	}
+	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
+			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
+			0, NULL);
+	if (!(t10_alua_lu_gp_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+				" failed\n");
+		goto out;
+	}
+	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
+			sizeof(struct t10_alua_lu_gp_member),
+			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
+	if (!(t10_alua_lu_gp_mem_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+				"cache failed\n");
+		goto out;
+	}
+	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
+			sizeof(struct t10_alua_tg_pt_gp),
+			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
+	if (!(t10_alua_tg_pt_gp_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+				"cache failed\n");
+		goto out;
+	}
+	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
+			"t10_alua_tg_pt_gp_mem_cache",
+			sizeof(struct t10_alua_tg_pt_gp_member),
+			__alignof__(struct t10_alua_tg_pt_gp_member),
+			0, NULL);
+	if (!(t10_alua_tg_pt_gp_mem_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+				"mem_t failed\n");
+		goto out;
+	}
+
+	se_global = global;
+
+	return 0;
+out:
+	if (se_cmd_cache)
+		kmem_cache_destroy(se_cmd_cache);
+	if (se_tmr_req_cache)
+		kmem_cache_destroy(se_tmr_req_cache);
+	if (se_sess_cache)
+		kmem_cache_destroy(se_sess_cache);
+	if (se_ua_cache)
+		kmem_cache_destroy(se_ua_cache);
+	if (se_mem_cache)
+		kmem_cache_destroy(se_mem_cache);
+	if (t10_pr_reg_cache)
+		kmem_cache_destroy(t10_pr_reg_cache);
+	if (t10_alua_lu_gp_cache)
+		kmem_cache_destroy(t10_alua_lu_gp_cache);
+	if (t10_alua_lu_gp_mem_cache)
+		kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+	if (t10_alua_tg_pt_gp_cache)
+		kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+	if (t10_alua_tg_pt_gp_mem_cache)
+		kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+	kfree(global);
+	return -1;
+}
+
+void release_se_global(void)
+{
+	struct se_global *global;
+
+	global = se_global;
+	if (!(global))
+		return;
+
+	kmem_cache_destroy(se_cmd_cache);
+	kmem_cache_destroy(se_tmr_req_cache);
+	kmem_cache_destroy(se_sess_cache);
+	kmem_cache_destroy(se_ua_cache);
+	kmem_cache_destroy(se_mem_cache);
+	kmem_cache_destroy(t10_pr_reg_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+	kfree(global);
+
+	se_global = NULL;
+}
+
+/* SCSI statistics table index */
+static struct scsi_index_table scsi_index_table;
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables.
+ */
+void init_scsi_index_table(void)
+{
+	memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+	spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+	u32 new_index;
+
+	if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+		printk(KERN_ERR "Invalid index type %d\n", type);
+		return -EINVAL;
+	}
+
+	spin_lock(&scsi_index_table.lock);
+	new_index = ++scsi_index_table.scsi_mib_index[type];
+	if (new_index == 0)
+		new_index = ++scsi_index_table.scsi_mib_index[type];
+	spin_unlock(&scsi_index_table.lock);
+
+	return new_index;
+}
+
+void transport_init_queue_obj(struct se_queue_obj *qobj)
+{
+	atomic_set(&qobj->queue_cnt, 0);
+	INIT_LIST_HEAD(&qobj->qobj_list);
+	init_waitqueue_head(&qobj->thread_wq);
+	spin_lock_init(&qobj->cmd_queue_lock);
+}
+EXPORT_SYMBOL(transport_init_queue_obj);
+
+static int transport_subsystem_reqmods(void)
+{
+	int ret;
+
+	ret = request_module("target_core_iblock");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_iblock\n");
+
+	ret = request_module("target_core_file");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_file\n");
+
+	ret = request_module("target_core_pscsi");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_pscsi\n");
+
+	ret = request_module("target_core_stgt");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_stgt\n");
+
+	return 0;
+}
+
+int transport_subsystem_check_init(void)
+{
+	if (se_global->g_sub_api_initialized)
+		return 0;
+	/*
+	 * Request the loading of known TCM subsystem plugins..
+	 */
+	if (transport_subsystem_reqmods() < 0)
+		return -1;
+
+	se_global->g_sub_api_initialized = 1;
+	return 0;
+}
+
+struct se_session *transport_init_session(void)
+{
+	struct se_session *se_sess;
+
+	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
+	if (!(se_sess)) {
+		printk(KERN_ERR "Unable to allocate struct se_session from"
+				" se_sess_cache\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&se_sess->sess_list);
+	INIT_LIST_HEAD(&se_sess->sess_acl_list);
+
+	return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/*
+ * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
+ */
+void __transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	unsigned char buf[PR_REG_ISID_LEN];
+
+	se_sess->se_tpg = se_tpg;
+	se_sess->fabric_sess_ptr = fabric_sess_ptr;
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
+	 *
+	 * Only set for struct se_session's that will actually be moving I/O.
+	 * eg: *NOT* discovery sessions.
+	 */
+	if (se_nacl) {
+		/*
+		 * If the fabric module supports an ISID based TransportID,
+		 * save this value in binary from the fabric I_T Nexus now.
+		 */
+		if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+			memset(&buf[0], 0, PR_REG_ISID_LEN);
+			TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
+					&buf[0], PR_REG_ISID_LEN);
+			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
+		}
+		spin_lock_irq(&se_nacl->nacl_sess_lock);
+		/*
+		 * The se_nacl->nacl_sess pointer will be set to the
+		 * last active I_T Nexus for each struct se_node_acl.
+		 */
+		se_nacl->nacl_sess = se_sess;
+
+		list_add_tail(&se_sess->sess_acl_list,
+			      &se_nacl->acl_sess_list);
+		spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	}
+	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+		TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
+}
+EXPORT_SYMBOL(__transport_register_session);
+
+void transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	spin_lock_bh(&se_tpg->session_lock);
+	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
+	spin_unlock_bh(&se_tpg->session_lock);
+}
+EXPORT_SYMBOL(transport_register_session);
+
+void transport_deregister_session_configfs(struct se_session *se_sess)
+{
+	struct se_node_acl *se_nacl;
+
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
+	 */
+	se_nacl = se_sess->se_node_acl;
+	if ((se_nacl)) {
+		spin_lock_irq(&se_nacl->nacl_sess_lock);
+		list_del(&se_sess->sess_acl_list);
+		/*
+		 * If the session list is empty, then clear the pointer.
+		 * Otherwise, set the struct se_session pointer from the tail
+		 * element of the per struct se_node_acl active session list.
+		 */
+		if (list_empty(&se_nacl->acl_sess_list))
+			se_nacl->nacl_sess = NULL;
+		else {
+			se_nacl->nacl_sess = container_of(
+					se_nacl->acl_sess_list.prev,
+					struct se_session, sess_acl_list);
+		}
+		spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	}
+}
+EXPORT_SYMBOL(transport_deregister_session_configfs);
+
+void transport_free_session(struct se_session *se_sess)
+{
+	kmem_cache_free(se_sess_cache, se_sess);
+}
+EXPORT_SYMBOL(transport_free_session);
+
+void transport_deregister_session(struct se_session *se_sess)
+{
+	struct se_portal_group *se_tpg = se_sess->se_tpg;
+	struct se_node_acl *se_nacl;
+
+	if (!(se_tpg)) {
+		transport_free_session(se_sess);
+		return;
+	}
+
+	spin_lock_bh(&se_tpg->session_lock);
+	list_del(&se_sess->sess_list);
+	se_sess->se_tpg = NULL;
+	se_sess->fabric_sess_ptr = NULL;
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	/*
+	 * Determine if we need to do extra work for this initiator node's
+	 * struct se_node_acl if it had been previously dynamically generated.
+	 */
+	se_nacl = se_sess->se_node_acl;
+	if ((se_nacl)) {
+		spin_lock_bh(&se_tpg->acl_node_lock);
+		if (se_nacl->dynamic_node_acl) {
+			if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
+					se_tpg))) {
+				list_del(&se_nacl->acl_list);
+				se_tpg->num_node_acls--;
+				spin_unlock_bh(&se_tpg->acl_node_lock);
+
+				core_tpg_wait_for_nacl_pr_ref(se_nacl);
+				core_free_device_list_for_node(se_nacl, se_tpg);
+				TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
+						se_nacl);
+				spin_lock_bh(&se_tpg->acl_node_lock);
+			}
+		}
+		spin_unlock_bh(&se_tpg->acl_node_lock);
+	}
+
+	transport_free_session(se_sess);
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
+		TPG_TFO(se_tpg)->get_fabric_name());
+}
+EXPORT_SYMBOL(transport_deregister_session);
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
+{
+	struct se_device *dev;
+	struct se_task *task;
+	unsigned long flags;
+
+	if (!T_TASK(cmd))
+		return;
+
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		dev = task->se_dev;
+		if (!(dev))
+			continue;
+
+		if (atomic_read(&task->task_active))
+			continue;
+
+		if (!(atomic_read(&task->task_state_active)))
+			continue;
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+		list_del(&task->t_state_list);
+		DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
+			CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+		atomic_set(&task->task_state_active, 0);
+		atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
+	}
+}
+
+/*	transport_cmd_check_stop():
+ *
+ *	'transport_off = 1' determines if t_transport_active should be cleared.
+ *	'transport_off = 2' determines if task_dev_state should be removed.
+ *
+ *	A non-zero u8 t_state sets cmd->t_state.
+ *	Returns 1 when command is stopped, else 0.
+ */
+static int transport_cmd_check_stop(
+	struct se_cmd *cmd,
+	int transport_off,
+	u8 t_state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * Determine if IOCTL context caller in requesting the stopping of this
+	 * command for LUN shutdown purposes.
+	 */
+	if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
+			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		cmd->deferred_t_state = cmd->t_state;
+		cmd->t_state = TRANSPORT_DEFERRED_CMD;
+		atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+		if (transport_off == 2)
+			transport_all_task_dev_remove_state(cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		complete(&T_TASK(cmd)->transport_lun_stop_comp);
+		return 1;
+	}
+	/*
+	 * Determine if frontend context caller is requesting the stopping of
+	 * this command for frontend excpections.
+	 */
+	if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
+			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		cmd->deferred_t_state = cmd->t_state;
+		cmd->t_state = TRANSPORT_DEFERRED_CMD;
+		if (transport_off == 2)
+			transport_all_task_dev_remove_state(cmd);
+
+		/*
+		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
+		 * to FE.
+		 */
+		if (transport_off == 2)
+			cmd->se_lun = NULL;
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		complete(&T_TASK(cmd)->t_transport_stop_comp);
+		return 1;
+	}
+	if (transport_off) {
+		atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+		if (transport_off == 2) {
+			transport_all_task_dev_remove_state(cmd);
+			/*
+			 * Clear struct se_cmd->se_lun before the transport_off == 2
+			 * handoff to fabric module.
+			 */
+			cmd->se_lun = NULL;
+			/*
+			 * Some fabric modules like tcm_loop can release
+			 * their internally allocated I/O refrence now and
+			 * struct se_cmd now.
+			 */
+			if (CMD_TFO(cmd)->check_stop_free != NULL) {
+				spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+
+				CMD_TFO(cmd)->check_stop_free(cmd);
+				return 1;
+			}
+		}
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		return 0;
+	} else if (t_state)
+		cmd->t_state = t_state;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return 0;
+}
+
+static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+{
+	return transport_cmd_check_stop(cmd, 2, 0);
+}
+
+static void transport_lun_remove_cmd(struct se_cmd *cmd)
+{
+	struct se_lun *lun = SE_LUN(cmd);
+	unsigned long flags;
+
+	if (!lun)
+		return;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		goto check_lun;
+	}
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+	transport_all_task_dev_remove_state(cmd);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_free_dev_tasks(cmd);
+
+check_lun:
+	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
+		list_del(&cmd->se_lun_list);
+		atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+#if 0
+		printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
+			CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
+#endif
+	}
+	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+}
+
+void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+{
+	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+	transport_lun_remove_cmd(cmd);
+
+	if (transport_cmd_check_stop_to_fabric(cmd))
+		return;
+	if (remove)
+		transport_generic_remove(cmd, 0, 0);
+}
+
+void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
+{
+	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+	if (transport_cmd_check_stop_to_fabric(cmd))
+		return;
+
+	transport_generic_remove(cmd, 0, 0);
+}
+
+static int transport_add_cmd_to_queue(
+	struct se_cmd *cmd,
+	int t_state)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_queue_obj *qobj = dev->dev_queue_obj;
+	struct se_queue_req *qr;
+	unsigned long flags;
+
+	qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
+	if (!(qr)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct se_queue_req\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&qr->qr_list);
+
+	qr->cmd = (void *)cmd;
+	qr->state = t_state;
+
+	if (t_state) {
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+		cmd->t_state = t_state;
+		atomic_set(&T_TASK(cmd)->t_transport_active, 1);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	}
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	list_add_tail(&qr->qr_list, &qobj->qobj_list);
+	atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	atomic_inc(&qobj->queue_cnt);
+	wake_up_interruptible(&qobj->thread_wq);
+	return 0;
+}
+
+/*
+ * Called with struct se_queue_obj->cmd_queue_lock held.
+ */
+static struct se_queue_req *
+__transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr = NULL;
+
+	if (list_empty(&qobj->qobj_list))
+		return NULL;
+
+	list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+		break;
+
+	if (qr->cmd) {
+		cmd = (struct se_cmd *)qr->cmd;
+		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+	}
+	list_del(&qr->qr_list);
+	atomic_dec(&qobj->queue_cnt);
+
+	return qr;
+}
+
+static struct se_queue_req *
+transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	if (list_empty(&qobj->qobj_list)) {
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+		return NULL;
+	}
+
+	list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+		break;
+
+	if (qr->cmd) {
+		cmd = (struct se_cmd *)qr->cmd;
+		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+	}
+	list_del(&qr->qr_list);
+	atomic_dec(&qobj->queue_cnt);
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	return qr;
+}
+
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+		struct se_queue_obj *qobj)
+{
+	struct se_cmd *q_cmd;
+	struct se_queue_req *qr = NULL, *qr_p = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+		return;
+	}
+
+	list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
+		q_cmd = (struct se_cmd *)qr->cmd;
+		if (q_cmd != cmd)
+			continue;
+
+		atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
+		atomic_dec(&qobj->queue_cnt);
+		list_del(&qr->qr_list);
+		kfree(qr);
+	}
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
+		printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
+			CMD_TFO(cmd)->get_task_tag(cmd),
+			atomic_read(&T_TASK(cmd)->t_transport_queue_active));
+	}
+}
+
+/*
+ * Completion function used by TCM subsystem plugins (such as FILEIO)
+ * for queueing up response from struct se_subsystem_api->do_task()
+ */
+void transport_complete_sync_cache(struct se_cmd *cmd, int good)
+{
+	struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
+				struct se_task, t_list);
+
+	if (good) {
+		cmd->scsi_status = SAM_STAT_GOOD;
+		task->task_scsi_status = GOOD;
+	} else {
+		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+		task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
+		TASK_CMD(task)->transport_error_status =
+					PYX_TRANSPORT_ILLEGAL_REQUEST;
+	}
+
+	transport_complete_task(task, good);
+}
+EXPORT_SYMBOL(transport_complete_sync_cache);
+
+/*	transport_complete_task():
+ *
+ *	Called from interrupt and non interrupt context depending
+ *	on the transport plugin.
+ */
+void transport_complete_task(struct se_task *task, int success)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = task->se_dev;
+	int t_state;
+	unsigned long flags;
+#if 0
+	printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+			T_TASK(cmd)->t_task_cdb[0], dev);
+#endif
+	if (dev) {
+		spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+		atomic_inc(&dev->depth_left);
+		atomic_inc(&SE_HBA(dev)->left_queue_depth);
+		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+	}
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	atomic_set(&task->task_active, 0);
+
+	/*
+	 * See if any sense data exists, if so set the TASK_SENSE flag.
+	 * Also check for any other post completion work that needs to be
+	 * done by the plugins.
+	 */
+	if (dev && dev->transport->transport_complete) {
+		if (dev->transport->transport_complete(task) != 0) {
+			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+			task->task_sense = 1;
+			success = 1;
+		}
+	}
+
+	/*
+	 * See if we are waiting for outstanding struct se_task
+	 * to complete for an exception condition
+	 */
+	if (atomic_read(&task->task_stop)) {
+		/*
+		 * Decrement T_TASK(cmd)->t_se_count if this task had
+		 * previously thrown its timeout exception handler.
+		 */
+		if (atomic_read(&task->task_timeout)) {
+			atomic_dec(&T_TASK(cmd)->t_se_count);
+			atomic_set(&task->task_timeout, 0);
+		}
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		complete(&task->task_stop_comp);
+		return;
+	}
+	/*
+	 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
+	 * left counter to determine when the struct se_cmd is ready to be queued to
+	 * the processing thread.
+	 */
+	if (atomic_read(&task->task_timeout)) {
+		if (!(atomic_dec_and_test(
+				&T_TASK(cmd)->t_task_cdbs_timeout_left))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+				flags);
+			return;
+		}
+		t_state = TRANSPORT_COMPLETE_TIMEOUT;
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		transport_add_cmd_to_queue(cmd, t_state);
+		return;
+	}
+	atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
+
+	/*
+	 * Decrement the outstanding t_task_cdbs_left count.  The last
+	 * struct se_task from struct se_cmd will complete itself into the
+	 * device queue depending upon int success.
+	 */
+	if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+		if (!success)
+			T_TASK(cmd)->t_tasks_failed = 1;
+
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+
+	if (!success || T_TASK(cmd)->t_tasks_failed) {
+		t_state = TRANSPORT_COMPLETE_FAILURE;
+		if (!task->task_error_status) {
+			task->task_error_status =
+				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+			cmd->transport_error_status =
+				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+	} else {
+		atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
+		t_state = TRANSPORT_COMPLETE_OK;
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_add_cmd_to_queue(cmd, t_state);
+}
+EXPORT_SYMBOL(transport_complete_task);
+
+/*
+ * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
+ * struct se_task list are ready to be added to the active execution list
+ * struct se_device
+
+ * Called with se_dev_t->execute_task_lock called.
+ */
+static inline int transport_add_task_check_sam_attr(
+	struct se_task *task,
+	struct se_task *task_prev,
+	struct se_device *dev)
+{
+	/*
+	 * No SAM Task attribute emulation enabled, add to tail of
+	 * execution queue
+	 */
+	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
+		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+		return 0;
+	}
+	/*
+	 * HEAD_OF_QUEUE attribute for received CDB, which means
+	 * the first task that is associated with a struct se_cmd goes to
+	 * head of the struct se_device->execute_task_list, and task_prev
+	 * after that for each subsequent task
+	 */
+	if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
+		list_add(&task->t_execute_list,
+				(task_prev != NULL) ?
+				&task_prev->t_execute_list :
+				&dev->execute_task_list);
+
+		DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+				" in execution queue\n",
+				T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+		return 1;
+	}
+	/*
+	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
+	 * transitioned from Dermant -> Active state, and are added to the end
+	 * of the struct se_device->execute_task_list
+	 */
+	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+	return 0;
+}
+
+/*	__transport_add_task_to_execute_queue():
+ *
+ *	Called with se_dev_t->execute_task_lock called.
+ */
+static void __transport_add_task_to_execute_queue(
+	struct se_task *task,
+	struct se_task *task_prev,
+	struct se_device *dev)
+{
+	int head_of_queue;
+
+	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
+	atomic_inc(&dev->execute_tasks);
+
+	if (atomic_read(&task->task_state_active))
+		return;
+	/*
+	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
+	 * state list as well.  Running with SAM Task Attribute emulation
+	 * will always return head_of_queue == 0 here
+	 */
+	if (head_of_queue)
+		list_add(&task->t_state_list, (task_prev) ?
+				&task_prev->t_state_list :
+				&dev->state_task_list);
+	else
+		list_add_tail(&task->t_state_list, &dev->state_task_list);
+
+	atomic_set(&task->task_state_active, 1);
+
+	DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+		CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
+		task, dev);
+}
+
+static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
+{
+	struct se_device *dev;
+	struct se_task *task;
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		dev = task->se_dev;
+
+		if (atomic_read(&task->task_state_active))
+			continue;
+
+		spin_lock(&dev->execute_task_lock);
+		list_add_tail(&task->t_state_list, &dev->state_task_list);
+		atomic_set(&task->task_state_active, 1);
+
+		DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+			CMD_TFO(task->task_se_cmd)->get_task_tag(
+			task->task_se_cmd), task, dev);
+
+		spin_unlock(&dev->execute_task_lock);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_task *task, *task_prev = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		if (atomic_read(&task->task_execute_queue))
+			continue;
+		/*
+		 * __transport_add_task_to_execute_queue() handles the
+		 * SAM Task Attribute emulation if enabled
+		 */
+		__transport_add_task_to_execute_queue(task, task_prev, dev);
+		atomic_set(&task->task_execute_queue, 1);
+		task_prev = task;
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+	return;
+}
+
+/*	transport_get_task_from_execute_queue():
+ *
+ *	Called with dev->execute_task_lock held.
+ */
+static struct se_task *
+transport_get_task_from_execute_queue(struct se_device *dev)
+{
+	struct se_task *task;
+
+	if (list_empty(&dev->execute_task_list))
+		return NULL;
+
+	list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
+		break;
+
+	list_del(&task->t_execute_list);
+	atomic_dec(&dev->execute_tasks);
+
+	return task;
+}
+
+/*	transport_remove_task_from_execute_queue():
+ *
+ *
+ */
+void transport_remove_task_from_execute_queue(
+	struct se_task *task,
+	struct se_device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_del(&task->t_execute_list);
+	atomic_dec(&dev->execute_tasks);
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+{
+	switch (cmd->data_direction) {
+	case DMA_NONE:
+		return "NONE";
+	case DMA_FROM_DEVICE:
+		return "READ";
+	case DMA_TO_DEVICE:
+		return "WRITE";
+	case DMA_BIDIRECTIONAL:
+		return "BIDI";
+	default:
+		break;
+	}
+
+	return "UNKNOWN";
+}
+
+void transport_dump_dev_state(
+	struct se_device *dev,
+	char *b,
+	int *bl)
+{
+	*bl += sprintf(b + *bl, "Status: ");
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		*bl += sprintf(b + *bl, "ACTIVATED");
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+		*bl += sprintf(b + *bl, "DEACTIVATED");
+		break;
+	case TRANSPORT_DEVICE_SHUTDOWN:
+		*bl += sprintf(b + *bl, "SHUTDOWN");
+		break;
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+		*bl += sprintf(b + *bl, "OFFLINE");
+		break;
+	default:
+		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
+		break;
+	}
+
+	*bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
+		atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
+		dev->queue_depth);
+	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
+		DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
+	*bl += sprintf(b + *bl, "        ");
+}
+
+/*	transport_release_all_cmds():
+ *
+ *
+ */
+static void transport_release_all_cmds(struct se_device *dev)
+{
+	struct se_cmd *cmd = NULL;
+	struct se_queue_req *qr = NULL, *qr_p = NULL;
+	int bug_out = 0, t_state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
+				qr_list) {
+
+		cmd = (struct se_cmd *)qr->cmd;
+		t_state = qr->state;
+		list_del(&qr->qr_list);
+		kfree(qr);
+		spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
+				flags);
+
+		printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+			" t_state: %u directly\n",
+			CMD_TFO(cmd)->get_task_tag(cmd),
+			CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
+
+		transport_release_fe_cmd(cmd);
+		bug_out = 1;
+
+		spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+#if 0
+	if (bug_out)
+		BUG();
+#endif
+}
+
+void transport_dump_vpd_proto_id(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
+
+	switch (vpd->protocol_identifier) {
+	case 0x00:
+		sprintf(buf+len, "Fibre Channel\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "Parallel SCSI\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SSA\n");
+		break;
+	case 0x30:
+		sprintf(buf+len, "IEEE 1394\n");
+		break;
+	case 0x40:
+		sprintf(buf+len, "SCSI Remote Direct Memory Access"
+				" Protocol\n");
+		break;
+	case 0x50:
+		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
+		break;
+	case 0x60:
+		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
+		break;
+	case 0x70:
+		sprintf(buf+len, "Automation/Drive Interface Transport"
+				" Protocol\n");
+		break;
+	case 0x80:
+		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n",
+				vpd->protocol_identifier);
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk(KERN_INFO "%s", buf);
+}
+
+void
+transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * Check if the Protocol Identifier Valid (PIV) bit is set..
+	 *
+	 * from spc3r23.pdf section 7.5.1
+	 */
+	 if (page_83[1] & 0x80) {
+		vpd->protocol_identifier = (page_83[0] & 0xf0);
+		vpd->protocol_identifier_set = 1;
+		transport_dump_vpd_proto_id(vpd, NULL, 0);
+	}
+}
+EXPORT_SYMBOL(transport_set_vpd_proto_id);
+
+int transport_dump_vpd_assoc(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0, len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Association: ");
+
+	switch (vpd->association) {
+	case 0x00:
+		sprintf(buf+len, "addressed logical unit\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "target port\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SCSI target device\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
+		ret = -1;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk("%s", buf);
+
+	return ret;
+}
+
+int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identification association..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 297
+	 */
+	vpd->association = (page_83[1] & 0x30);
+	return transport_dump_vpd_assoc(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_assoc);
+
+int transport_dump_vpd_ident_type(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0, len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Type: ");
+
+	switch (vpd->device_identifier_type) {
+	case 0x00:
+		sprintf(buf+len, "Vendor specific\n");
+		break;
+	case 0x01:
+		sprintf(buf+len, "T10 Vendor ID based\n");
+		break;
+	case 0x02:
+		sprintf(buf+len, "EUI-64 based\n");
+		break;
+	case 0x03:
+		sprintf(buf+len, "NAA\n");
+		break;
+	case 0x04:
+		sprintf(buf+len, "Relative target port identifier\n");
+		break;
+	case 0x08:
+		sprintf(buf+len, "SCSI name string\n");
+		break;
+	default:
+		sprintf(buf+len, "Unsupported: 0x%02x\n",
+				vpd->device_identifier_type);
+		ret = -1;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk("%s", buf);
+
+	return ret;
+}
+
+int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identifier type..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 298
+	 */
+	vpd->device_identifier_type = (page_83[1] & 0x0f);
+	return transport_dump_vpd_ident_type(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident_type);
+
+int transport_dump_vpd_ident(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x02: /* ASCII */
+		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x03: /* UTF-8 */
+		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	default:
+		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
+			" 0x%02x", vpd->device_identifier_code_set);
+		ret = -1;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk("%s", buf);
+
+	return ret;
+}
+
+int
+transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	static const char hex_str[] = "0123456789abcdef";
+	int j = 0, i = 4; /* offset to start of the identifer */
+
+	/*
+	 * The VPD Code Set (encoding)
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 296
+	 */
+	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		vpd->device_identifier[j++] =
+				hex_str[vpd->device_identifier_type];
+		while (i < (4 + page_83[3])) {
+			vpd->device_identifier[j++] =
+				hex_str[(page_83[i] & 0xf0) >> 4];
+			vpd->device_identifier[j++] =
+				hex_str[page_83[i] & 0x0f];
+			i++;
+		}
+		break;
+	case 0x02: /* ASCII */
+	case 0x03: /* UTF-8 */
+		while (i < (4 + page_83[3]))
+			vpd->device_identifier[j++] = page_83[i++];
+		break;
+	default:
+		break;
+	}
+
+	return transport_dump_vpd_ident(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident);
+
+static void core_setup_task_attr_emulation(struct se_device *dev)
+{
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, disable the
+	 * SAM Task Attribute emulation.
+	 *
+	 * This is currently not available in upsream Linux/SCSI Target
+	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
+	 */
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
+		return;
+	}
+
+	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
+	DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+		" device\n", TRANSPORT(dev)->name,
+		TRANSPORT(dev)->get_device_rev(dev));
+}
+
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+	struct t10_wwn *wwn = DEV_T10_WWN(dev);
+	int i, device_type;
+	/*
+	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+	 */
+	printk("  Vendor: ");
+	for (i = 0; i < 8; i++)
+		if (wwn->vendor[i] >= 0x20)
+			printk("%c", wwn->vendor[i]);
+		else
+			printk(" ");
+
+	printk("  Model: ");
+	for (i = 0; i < 16; i++)
+		if (wwn->model[i] >= 0x20)
+			printk("%c", wwn->model[i]);
+		else
+			printk(" ");
+
+	printk("  Revision: ");
+	for (i = 0; i < 4; i++)
+		if (wwn->revision[i] >= 0x20)
+			printk("%c", wwn->revision[i]);
+		else
+			printk(" ");
+
+	printk("\n");
+
+	device_type = TRANSPORT(dev)->get_device_type(dev);
+	printk("  Type:   %s ", scsi_device_type(device_type));
+	printk("                 ANSI SCSI revision: %02x\n",
+				TRANSPORT(dev)->get_device_rev(dev));
+}
+
+struct se_device *transport_add_device_to_core_hba(
+	struct se_hba *hba,
+	struct se_subsystem_api *transport,
+	struct se_subsystem_dev *se_dev,
+	u32 device_flags,
+	void *transport_dev,
+	struct se_dev_limits *dev_limits,
+	const char *inquiry_prod,
+	const char *inquiry_rev)
+{
+	int ret = 0, force_pt;
+	struct se_device  *dev;
+
+	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
+	if (!(dev)) {
+		printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
+		return NULL;
+	}
+	dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
+	if (!(dev->dev_queue_obj)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" dev->dev_queue_obj\n");
+		kfree(dev);
+		return NULL;
+	}
+	transport_init_queue_obj(dev->dev_queue_obj);
+
+	dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
+					GFP_KERNEL);
+	if (!(dev->dev_status_queue_obj)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" dev->dev_status_queue_obj\n");
+		kfree(dev->dev_queue_obj);
+		kfree(dev);
+		return NULL;
+	}
+	transport_init_queue_obj(dev->dev_status_queue_obj);
+
+	dev->dev_flags		= device_flags;
+	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
+	dev->dev_ptr		= (void *) transport_dev;
+	dev->se_hba		= hba;
+	dev->se_sub_dev		= se_dev;
+	dev->transport		= transport;
+	atomic_set(&dev->active_cmds, 0);
+	INIT_LIST_HEAD(&dev->dev_list);
+	INIT_LIST_HEAD(&dev->dev_sep_list);
+	INIT_LIST_HEAD(&dev->dev_tmr_list);
+	INIT_LIST_HEAD(&dev->execute_task_list);
+	INIT_LIST_HEAD(&dev->delayed_cmd_list);
+	INIT_LIST_HEAD(&dev->ordered_cmd_list);
+	INIT_LIST_HEAD(&dev->state_task_list);
+	spin_lock_init(&dev->execute_task_lock);
+	spin_lock_init(&dev->delayed_cmd_lock);
+	spin_lock_init(&dev->ordered_cmd_lock);
+	spin_lock_init(&dev->state_task_lock);
+	spin_lock_init(&dev->dev_alua_lock);
+	spin_lock_init(&dev->dev_reservation_lock);
+	spin_lock_init(&dev->dev_status_lock);
+	spin_lock_init(&dev->dev_status_thr_lock);
+	spin_lock_init(&dev->se_port_lock);
+	spin_lock_init(&dev->se_tmr_lock);
+
+	dev->queue_depth	= dev_limits->queue_depth;
+	atomic_set(&dev->depth_left, dev->queue_depth);
+	atomic_set(&dev->dev_ordered_id, 0);
+
+	se_dev_set_default_attribs(dev, dev_limits);
+
+	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+	dev->creation_time = get_jiffies_64();
+	spin_lock_init(&dev->stats_lock);
+
+	spin_lock(&hba->device_lock);
+	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
+	hba->dev_count++;
+	spin_unlock(&hba->device_lock);
+	/*
+	 * Setup the SAM Task Attribute emulation for struct se_device
+	 */
+	core_setup_task_attr_emulation(dev);
+	/*
+	 * Force PR and ALUA passthrough emulation with internal object use.
+	 */
+	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
+	/*
+	 * Setup the Reservations infrastructure for struct se_device
+	 */
+	core_setup_reservations(dev, force_pt);
+	/*
+	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
+	 */
+	if (core_setup_alua(dev, force_pt) < 0)
+		goto out;
+
+	/*
+	 * Startup the struct se_device processing thread
+	 */
+	dev->process_thread = kthread_run(transport_processing_thread, dev,
+					  "LIO_%s", TRANSPORT(dev)->name);
+	if (IS_ERR(dev->process_thread)) {
+		printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
+			TRANSPORT(dev)->name);
+		goto out;
+	}
+
+	/*
+	 * Preload the initial INQUIRY const values if we are doing
+	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+	 * passthrough because this is being provided by the backend LLD.
+	 * This is required so that transport_get_inquiry() copies these
+	 * originals once back into DEV_T10_WWN(dev) for the virtual device
+	 * setup.
+	 */
+	if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (!(inquiry_prod) || !(inquiry_prod)) {
+			printk(KERN_ERR "All non TCM/pSCSI plugins require"
+				" INQUIRY consts\n");
+			goto out;
+		}
+
+		strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
+		strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
+		strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
+	}
+	scsi_dump_inquiry(dev);
+
+out:
+	if (!ret)
+		return dev;
+	kthread_stop(dev->process_thread);
+
+	spin_lock(&hba->device_lock);
+	list_del(&dev->dev_list);
+	hba->dev_count--;
+	spin_unlock(&hba->device_lock);
+
+	se_release_vpd_for_dev(dev);
+
+	kfree(dev->dev_status_queue_obj);
+	kfree(dev->dev_queue_obj);
+	kfree(dev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(transport_add_device_to_core_hba);
+
+/*	transport_generic_prepare_cdb():
+ *
+ *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
+ *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
+ *	The point of this is since we are mapping iSCSI LUNs to
+ *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
+ *	devices and HBAs for a loop.
+ */
+static inline void transport_generic_prepare_cdb(
+	unsigned char *cdb)
+{
+	switch (cdb[0]) {
+	case READ_10: /* SBC - RDProtect */
+	case READ_12: /* SBC - RDProtect */
+	case READ_16: /* SBC - RDProtect */
+	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+	case VERIFY: /* SBC - VRProtect */
+	case VERIFY_16: /* SBC - VRProtect */
+	case WRITE_VERIFY: /* SBC - VRProtect */
+	case WRITE_VERIFY_12: /* SBC - VRProtect */
+		break;
+	default:
+		cdb[1] &= 0x1f; /* clear logical unit number */
+		break;
+	}
+}
+
+static struct se_task *
+transport_generic_get_task(struct se_cmd *cmd,
+		enum dma_data_direction data_direction)
+{
+	struct se_task *task;
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned long flags;
+
+	task = dev->transport->alloc_task(cmd);
+	if (!task) {
+		printk(KERN_ERR "Unable to allocate struct se_task\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&task->t_list);
+	INIT_LIST_HEAD(&task->t_execute_list);
+	INIT_LIST_HEAD(&task->t_state_list);
+	init_completion(&task->task_stop_comp);
+	task->task_no = T_TASK(cmd)->t_tasks_no++;
+	task->task_se_cmd = cmd;
+	task->se_dev = dev;
+	task->task_data_direction = data_direction;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return task;
+}
+
+static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
+
+void transport_device_setup_cmd(struct se_cmd *cmd)
+{
+	cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
+}
+EXPORT_SYMBOL(transport_device_setup_cmd);
+
+/*
+ * Used by fabric modules containing a local struct se_cmd within their
+ * fabric dependent per I/O descriptor.
+ */
+void transport_init_se_cmd(
+	struct se_cmd *cmd,
+	struct target_core_fabric_ops *tfo,
+	struct se_session *se_sess,
+	u32 data_length,
+	int data_direction,
+	int task_attr,
+	unsigned char *sense_buffer)
+{
+	INIT_LIST_HEAD(&cmd->se_lun_list);
+	INIT_LIST_HEAD(&cmd->se_delayed_list);
+	INIT_LIST_HEAD(&cmd->se_ordered_list);
+	/*
+	 * Setup t_task pointer to t_task_backstore
+	 */
+	cmd->t_task = &cmd->t_task_backstore;
+
+	INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
+	init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+	init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+	init_completion(&T_TASK(cmd)->t_transport_stop_comp);
+	spin_lock_init(&T_TASK(cmd)->t_state_lock);
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
+
+	cmd->se_tfo = tfo;
+	cmd->se_sess = se_sess;
+	cmd->data_length = data_length;
+	cmd->data_direction = data_direction;
+	cmd->sam_task_attr = task_attr;
+	cmd->sense_buffer = sense_buffer;
+}
+EXPORT_SYMBOL(transport_init_se_cmd);
+
+static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+{
+	/*
+	 * Check if SAM Task Attribute emulation is enabled for this
+	 * struct se_device storage object
+	 */
+	if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+		return 0;
+
+	if (cmd->sam_task_attr == TASK_ATTR_ACA) {
+		DEBUG_STA("SAM Task Attribute ACA"
+			" emulation is not supported\n");
+		return -1;
+	}
+	/*
+	 * Used to determine when ORDERED commands should go from
+	 * Dormant to Active status.
+	 */
+	cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
+	smp_mb__after_atomic_inc();
+	DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+			cmd->se_ordered_id, cmd->sam_task_attr,
+			TRANSPORT(cmd->se_dev)->name);
+	return 0;
+}
+
+void transport_free_se_cmd(
+	struct se_cmd *se_cmd)
+{
+	if (se_cmd->se_tmr_req)
+		core_tmr_release_req(se_cmd->se_tmr_req);
+	/*
+	 * Check and free any extended CDB buffer that was allocated
+	 */
+	if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
+		kfree(T_TASK(se_cmd)->t_task_cdb);
+}
+EXPORT_SYMBOL(transport_free_se_cmd);
+
+static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
+
+/*	transport_generic_allocate_tasks():
+ *
+ *	Called from fabric RX Thread.
+ */
+int transport_generic_allocate_tasks(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	int ret;
+
+	transport_generic_prepare_cdb(cdb);
+
+	/*
+	 * This is needed for early exceptions.
+	 */
+	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+
+	transport_device_setup_cmd(cmd);
+	/*
+	 * Ensure that the received CDB is less than the max (252 + 8) bytes
+	 * for VARIABLE_LENGTH_CMD
+	 */
+	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
+		printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+		return -1;
+	}
+	/*
+	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
+	 * allocate the additional extended CDB buffer now..  Otherwise
+	 * setup the pointer from __t_task_cdb to t_task_cdb.
+	 */
+	if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
+		T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
+						GFP_KERNEL);
+		if (!(T_TASK(cmd)->t_task_cdb)) {
+			printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
+				" %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
+				scsi_command_size(cdb),
+				(unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
+			return -1;
+		}
+	} else
+		T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
+	/*
+	 * Copy the original CDB into T_TASK(cmd).
+	 */
+	memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
+	/*
+	 * Setup the received CDB based on SCSI defined opcodes and
+	 * perform unit attention, persistent reservations and ALUA
+	 * checks for virtual device backends.  The T_TASK(cmd)->t_task_cdb
+	 * pointer is expected to be setup before we reach this point.
+	 */
+	ret = transport_generic_cmd_sequencer(cmd, cdb);
+	if (ret < 0)
+		return ret;
+	/*
+	 * Check for SAM Task Attribute Emulation
+	 */
+	if (transport_check_alloc_task_attr(cmd) < 0) {
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -2;
+	}
+	spin_lock(&cmd->se_lun->lun_sep_lock);
+	if (cmd->se_lun->lun_sep)
+		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
+	spin_unlock(&cmd->se_lun->lun_sep_lock);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_allocate_tasks);
+
+/*
+ * Used by fabric module frontends not defining a TFO->new_cmd_map()
+ * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
+ */
+int transport_generic_handle_cdb(
+	struct se_cmd *cmd)
+{
+	if (!SE_LUN(cmd)) {
+		dump_stack();
+		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+		return -1;
+	}
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb);
+
+/*
+ * Used by fabric module frontends defining a TFO->new_cmd_map() caller
+ * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
+ * complete setup in TCM process context w/ TFO->new_cmd_map().
+ */
+int transport_generic_handle_cdb_map(
+	struct se_cmd *cmd)
+{
+	if (!SE_LUN(cmd)) {
+		dump_stack();
+		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+		return -1;
+	}
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb_map);
+
+/*	transport_generic_handle_data():
+ *
+ *
+ */
+int transport_generic_handle_data(
+	struct se_cmd *cmd)
+{
+	/*
+	 * For the software fabric case, then we assume the nexus is being
+	 * failed/shutdown when signals are pending from the kthread context
+	 * caller, so we return a failure.  For the HW target mode case running
+	 * in interrupt code, the signal_pending() check is skipped.
+	 */
+	if (!in_interrupt() && signal_pending(current))
+		return -1;
+	/*
+	 * If the received CDB has aleady been ABORTED by the generic
+	 * target engine, we now call transport_check_aborted_status()
+	 * to queue any delated TASK_ABORTED status for the received CDB to the
+	 * fabric module as we are expecting no futher incoming DATA OUT
+	 * sequences at this point.
+	 */
+	if (transport_check_aborted_status(cmd, 1) != 0)
+		return 0;
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_data);
+
+/*	transport_generic_handle_tmr():
+ *
+ *
+ */
+int transport_generic_handle_tmr(
+	struct se_cmd *cmd)
+{
+	/*
+	 * This is needed for early exceptions.
+	 */
+	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+	transport_device_setup_cmd(cmd);
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
+{
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+	int ret = 0;
+
+	DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
+		CMD_TFO(cmd)->get_task_tag(cmd));
+
+	/*
+	 * No tasks remain in the execution queue
+	 */
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list) {
+		DEBUG_TS("task_no[%d] - Processing task %p\n",
+				task->task_no, task);
+		/*
+		 * If the struct se_task has not been sent and is not active,
+		 * remove the struct se_task from the execution queue.
+		 */
+		if (!atomic_read(&task->task_sent) &&
+		    !atomic_read(&task->task_active)) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+			transport_remove_task_from_execute_queue(task,
+					task->se_dev);
+
+			DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+				task->task_no);
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			continue;
+		}
+
+		/*
+		 * If the struct se_task is active, sleep until it is returned
+		 * from the plugin.
+		 */
+		if (atomic_read(&task->task_active)) {
+			atomic_set(&task->task_stop, 1);
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+
+			DEBUG_TS("task_no[%d] - Waiting to complete\n",
+				task->task_no);
+			wait_for_completion(&task->task_stop_comp);
+			DEBUG_TS("task_no[%d] - Stopped successfully\n",
+				task->task_no);
+
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+			atomic_set(&task->task_active, 0);
+			atomic_set(&task->task_stop, 0);
+		} else {
+			DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+			ret++;
+		}
+
+		__transport_stop_task_timer(task, &flags);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return ret;
+}
+
+static void transport_failure_reset_queue_depth(struct se_device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);;
+	atomic_inc(&dev->depth_left);
+	atomic_inc(&SE_HBA(dev)->left_queue_depth);
+	spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+}
+
+/*
+ * Handle SAM-esque emulation for generic transport request failures.
+ */
+static void transport_generic_request_failure(
+	struct se_cmd *cmd,
+	struct se_device *dev,
+	int complete,
+	int sc)
+{
+	DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+		" CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+		T_TASK(cmd)->t_task_cdb[0]);
+	DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+		" %d/%d transport_error_status: %d\n",
+		CMD_TFO(cmd)->get_cmd_state(cmd),
+		cmd->t_state, cmd->deferred_t_state,
+		cmd->transport_error_status);
+	DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
+		" t_transport_active: %d t_transport_stop: %d"
+		" t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
+		atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+		atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+		atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
+		atomic_read(&T_TASK(cmd)->t_transport_active),
+		atomic_read(&T_TASK(cmd)->t_transport_stop),
+		atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+	transport_stop_all_task_timers(cmd);
+
+	if (dev)
+		transport_failure_reset_queue_depth(dev);
+	/*
+	 * For SAM Task Attribute emulation for failed struct se_cmd
+	 */
+	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		transport_complete_task_attr(cmd);
+
+	if (complete) {
+		transport_direct_request_timeout(cmd);
+		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+
+	switch (cmd->transport_error_status) {
+	case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		break;
+	case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
+		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+		break;
+	case PYX_TRANSPORT_INVALID_CDB_FIELD:
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		break;
+	case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		break;
+	case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
+		if (!sc)
+			transport_new_cmd_failure(cmd);
+		/*
+		 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
+		 * we force this session to fall back to session
+		 * recovery.
+		 */
+		CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
+		CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
+
+		goto check_stop;
+	case PYX_TRANSPORT_LU_COMM_FAILURE:
+	case PYX_TRANSPORT_ILLEGAL_REQUEST:
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		break;
+	case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
+		cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+		break;
+	case PYX_TRANSPORT_WRITE_PROTECTED:
+		cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+		break;
+	case PYX_TRANSPORT_RESERVATION_CONFLICT:
+		/*
+		 * No SENSE Data payload for this case, set SCSI Status
+		 * and queue the response to $FABRIC_MOD.
+		 *
+		 * Uses linux/include/scsi/scsi.h SAM status codes defs
+		 */
+		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+		/*
+		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+		 * CONFLICT STATUS.
+		 *
+		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+		 */
+		if (SE_SESS(cmd) &&
+		    DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+			core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+				cmd->orig_fe_lun, 0x2C,
+				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+
+		CMD_TFO(cmd)->queue_status(cmd);
+		goto check_stop;
+	case PYX_TRANSPORT_USE_SENSE_REASON:
+		/*
+		 * struct se_cmd->scsi_sense_reason already set
+		 */
+		break;
+	default:
+		printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
+			T_TASK(cmd)->t_task_cdb[0],
+			cmd->transport_error_status);
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		break;
+	}
+
+	if (!sc)
+		transport_new_cmd_failure(cmd);
+	else
+		transport_send_check_condition_and_sense(cmd,
+			cmd->scsi_sense_reason, 0);
+check_stop:
+	transport_lun_remove_cmd(cmd);
+	if (!(transport_cmd_check_stop_to_fabric(cmd)))
+		;
+}
+
+static void transport_direct_request_timeout(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+	if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+
+	atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
+		   &T_TASK(cmd)->t_se_count);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_generic_request_timeout(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	/*
+	 * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
+	 * to allow last call to free memory resources.
+	 */
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
+		int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
+
+		atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_generic_remove(cmd, 0, 0);
+}
+
+static int
+transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
+{
+	unsigned char *buf;
+
+	buf = kzalloc(data_length, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate memory for buffer\n");
+		return -1;
+	}
+
+	T_TASK(cmd)->t_tasks_se_num = 0;
+	T_TASK(cmd)->t_task_buf = buf;
+
+	return 0;
+}
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+	spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+}
+
+/*
+ * Called from interrupt context.
+ */
+static void transport_task_timeout_handler(unsigned long data)
+{
+	struct se_task *task = (struct se_task *)data;
+	struct se_cmd *cmd = TASK_CMD(task);
+	unsigned long flags;
+
+	DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (task->task_flags & TF_STOP) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+	task->task_flags &= ~TF_RUNNING;
+
+	/*
+	 * Determine if transport_complete_task() has already been called.
+	 */
+	if (!(atomic_read(&task->task_active))) {
+		DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+				" == 0\n", task, cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+
+	atomic_inc(&T_TASK(cmd)->t_se_count);
+	atomic_inc(&T_TASK(cmd)->t_transport_timeout);
+	T_TASK(cmd)->t_tasks_failed = 1;
+
+	atomic_set(&task->task_timeout, 1);
+	task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
+	task->task_scsi_status = 1;
+
+	if (atomic_read(&task->task_stop)) {
+		DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+				" == 1\n", task, cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		complete(&task->task_stop_comp);
+		return;
+	}
+
+	if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+		DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+				" t_task_cdbs_left\n", task, cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+	DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+			task, cmd);
+
+	cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
+}
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_start_task_timer(struct se_task *task)
+{
+	struct se_device *dev = task->se_dev;
+	int timeout;
+
+	if (task->task_flags & TF_RUNNING)
+		return;
+	/*
+	 * If the task_timeout is disabled, exit now.
+	 */
+	timeout = DEV_ATTRIB(dev)->task_timeout;
+	if (!(timeout))
+		return;
+
+	init_timer(&task->task_timer);
+	task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
+	task->task_timer.data = (unsigned long) task;
+	task->task_timer.function = transport_task_timeout_handler;
+
+	task->task_flags |= TF_RUNNING;
+	add_timer(&task->task_timer);
+#if 0
+	printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+		" %d\n", task->task_se_cmd, task, timeout);
+#endif
+}
+
+/*
+ * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
+ */
+void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+
+	if (!(task->task_flags & TF_RUNNING))
+		return;
+
+	task->task_flags |= TF_STOP;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
+
+	del_timer_sync(&task->task_timer);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
+	task->task_flags &= ~TF_RUNNING;
+	task->task_flags &= ~TF_STOP;
+}
+
+static void transport_stop_all_task_timers(struct se_cmd *cmd)
+{
+	struct se_task *task = NULL, *task_tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list)
+		__transport_stop_task_timer(task, &flags);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline int transport_tcq_window_closed(struct se_device *dev)
+{
+	if (dev->dev_tcq_window_closed++ <
+			PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
+		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
+	} else
+		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
+
+	wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+	return 0;
+}
+
+/*
+ * Called from Fabric Module context from transport_execute_tasks()
+ *
+ * The return of this function determins if the tasks from struct se_cmd
+ * get added to the execution queue in transport_execute_tasks(),
+ * or are added to the delayed or ordered lists here.
+ */
+static inline int transport_execute_task_attr(struct se_cmd *cmd)
+{
+	if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+		return 1;
+	/*
+	 * Check for the existance of HEAD_OF_QUEUE, and if true return 1
+	 * to allow the passed struct se_cmd list of tasks to the front of the list.
+	 */
+	 if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+		atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
+		smp_mb__after_atomic_inc();
+		DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+			" 0x%02x, se_ordered_id: %u\n",
+			T_TASK(cmd)->t_task_cdb[0],
+			cmd->se_ordered_id);
+		return 1;
+	} else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+		spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
+		list_add_tail(&cmd->se_ordered_list,
+				&SE_DEV(cmd)->ordered_cmd_list);
+		spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
+
+		atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
+		smp_mb__after_atomic_inc();
+
+		DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+				" list, se_ordered_id: %u\n",
+				T_TASK(cmd)->t_task_cdb[0],
+				cmd->se_ordered_id);
+		/*
+		 * Add ORDERED command to tail of execution queue if
+		 * no other older commands exist that need to be
+		 * completed first.
+		 */
+		if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
+			return 1;
+	} else {
+		/*
+		 * For SIMPLE and UNTAGGED Task Attribute commands
+		 */
+		atomic_inc(&SE_DEV(cmd)->simple_cmds);
+		smp_mb__after_atomic_inc();
+	}
+	/*
+	 * Otherwise if one or more outstanding ORDERED task attribute exist,
+	 * add the dormant task(s) built for the passed struct se_cmd to the
+	 * execution queue and become in Active state for this struct se_device.
+	 */
+	if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
+		/*
+		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
+		 * will be drained upon competion of HEAD_OF_QUEUE task.
+		 */
+		spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
+		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
+		list_add_tail(&cmd->se_delayed_list,
+				&SE_DEV(cmd)->delayed_cmd_list);
+		spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
+
+		DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+			" delayed CMD list, se_ordered_id: %u\n",
+			T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
+			cmd->se_ordered_id);
+		/*
+		 * Return zero to let transport_execute_tasks() know
+		 * not to add the delayed tasks to the execution list.
+		 */
+		return 0;
+	}
+	/*
+	 * Otherwise, no ORDERED task attributes exist..
+	 */
+	return 1;
+}
+
+/*
+ * Called from fabric module context in transport_generic_new_cmd() and
+ * transport_generic_process_write()
+ */
+static int transport_execute_tasks(struct se_cmd *cmd)
+{
+	int add_tasks;
+
+	if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
+		if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
+			cmd->transport_error_status =
+				PYX_TRANSPORT_LU_COMM_FAILURE;
+			transport_generic_request_failure(cmd, NULL, 0, 1);
+			return 0;
+		}
+	}
+	/*
+	 * Call transport_cmd_check_stop() to see if a fabric exception
+	 * has occured that prevents execution.
+	 */
+	if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+		/*
+		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
+		 * attribute for the tasks of the received struct se_cmd CDB
+		 */
+		add_tasks = transport_execute_task_attr(cmd);
+		if (add_tasks == 0)
+			goto execute_tasks;
+		/*
+		 * This calls transport_add_tasks_from_cmd() to handle
+		 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
+		 * (if enabled) in __transport_add_task_to_execute_queue() and
+		 * transport_add_task_check_sam_attr().
+		 */
+		transport_add_tasks_from_cmd(cmd);
+	}
+	/*
+	 * Kick the execution queue for the cmd associated struct se_device
+	 * storage object.
+	 */
+execute_tasks:
+	__transport_execute_tasks(SE_DEV(cmd));
+	return 0;
+}
+
+/*
+ * Called to check struct se_device tcq depth window, and once open pull struct se_task
+ * from struct se_device->execute_task_list and
+ *
+ * Called from transport_processing_thread()
+ */
+static int __transport_execute_tasks(struct se_device *dev)
+{
+	int error;
+	struct se_cmd *cmd = NULL;
+	struct se_task *task;
+	unsigned long flags;
+
+	/*
+	 * Check if there is enough room in the device and HBA queue to send
+	 * struct se_transport_task's to the selected transport.
+	 */
+check_depth:
+	spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+	if (!(atomic_read(&dev->depth_left)) ||
+	    !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
+		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+		return transport_tcq_window_closed(dev);
+	}
+	dev->dev_tcq_window_closed = 0;
+
+	spin_lock(&dev->execute_task_lock);
+	task = transport_get_task_from_execute_queue(dev);
+	spin_unlock(&dev->execute_task_lock);
+
+	if (!task) {
+		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+		return 0;
+	}
+
+	atomic_dec(&dev->depth_left);
+	atomic_dec(&SE_HBA(dev)->left_queue_depth);
+	spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+
+	cmd = TASK_CMD(task);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	atomic_set(&task->task_active, 1);
+	atomic_set(&task->task_sent, 1);
+	atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
+
+	if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
+	    T_TASK(cmd)->t_task_cdbs)
+		atomic_set(&cmd->transport_sent, 1);
+
+	transport_start_task_timer(task);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * The struct se_cmd->transport_emulate_cdb() function pointer is used
+	 * to grab REPORT_LUNS CDBs before they hit the
+	 * struct se_subsystem_api->do_task() caller below.
+	 */
+	if (cmd->transport_emulate_cdb) {
+		error = cmd->transport_emulate_cdb(cmd);
+		if (error != 0) {
+			cmd->transport_error_status = error;
+			atomic_set(&task->task_active, 0);
+			atomic_set(&cmd->transport_sent, 0);
+			transport_stop_tasks_for_cmd(cmd);
+			transport_generic_request_failure(cmd, dev, 0, 1);
+			goto check_depth;
+		}
+		/*
+		 * Handle the successful completion for transport_emulate_cdb()
+		 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
+		 * Otherwise the caller is expected to complete the task with
+		 * proper status.
+		 */
+		if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
+			cmd->scsi_status = SAM_STAT_GOOD;
+			task->task_scsi_status = GOOD;
+			transport_complete_task(task, 1);
+		}
+	} else {
+		/*
+		 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
+		 * RAMDISK we use the internal transport_emulate_control_cdb() logic
+		 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
+		 * LUN emulation code.
+		 *
+		 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
+		 * call ->do_task() directly and let the underlying TCM subsystem plugin
+		 * code handle the CDB emulation.
+		 */
+		if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
+		    (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+			error = transport_emulate_control_cdb(task);
+		else
+			error = TRANSPORT(dev)->do_task(task);
+
+		if (error != 0) {
+			cmd->transport_error_status = error;
+			atomic_set(&task->task_active, 0);
+			atomic_set(&cmd->transport_sent, 0);
+			transport_stop_tasks_for_cmd(cmd);
+			transport_generic_request_failure(cmd, dev, 0, 1);
+		}
+	}
+
+	goto check_depth;
+
+	return 0;
+}
+
+void transport_new_cmd_failure(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+	/*
+	 * Any unsolicited data will get dumped for failed command inside of
+	 * the fabric plugin
+	 */
+	spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+	se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
+	se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+
+	CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
+}
+
+static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
+
+static inline u32 transport_get_sectors_6(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 8-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * Use 24-bit allocation length for TYPE_TAPE.
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 8-bit sector value.
+	 */
+type_disk:
+	return (u32)cdb[4];
+}
+
+static inline u32 transport_get_sectors_10(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 16-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * XXX_10 is not defined in SSC, throw an exception
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+		*ret = -1;
+		return 0;
+	}
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 16-bit sector value.
+	 */
+type_disk:
+	return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * XXX_12 is not defined in SSC, throw an exception
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+		*ret = -1;
+		return 0;
+	}
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 32-bit sector value.
+	 */
+type_disk:
+	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * Use 24-bit allocation length for TYPE_TAPE.
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
+
+type_disk:
+	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+		    (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+		    (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_get_size(
+	u32 sectors,
+	unsigned char *cdb,
+	struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+		if (cdb[1] & 1) { /* sectors */
+			return DEV_ATTRIB(dev)->block_size * sectors;
+		} else /* bytes */
+			return sectors;
+	}
+#if 0
+	printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
+			" %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
+			DEV_ATTRIB(dev)->block_size * sectors,
+			TRANSPORT(dev)->name);
+#endif
+	return DEV_ATTRIB(dev)->block_size * sectors;
+}
+
+unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
+{
+	unsigned char result = 0;
+	/*
+	 * MSB
+	 */
+	if ((val[0] >= 'a') && (val[0] <= 'f'))
+		result = ((val[0] - 'a' + 10) & 0xf) << 4;
+	else
+		if ((val[0] >= 'A') && (val[0] <= 'F'))
+			result = ((val[0] - 'A' + 10) & 0xf) << 4;
+		else /* digit */
+			result = ((val[0] - '0') & 0xf) << 4;
+	/*
+	 * LSB
+	 */
+	if ((val[1] >= 'a') && (val[1] <= 'f'))
+		result |= ((val[1] - 'a' + 10) & 0xf);
+	else
+		if ((val[1] >= 'A') && (val[1] <= 'F'))
+			result |= ((val[1] - 'A' + 10) & 0xf);
+		else /* digit */
+			result |= ((val[1] - '0') & 0xf);
+
+	return result;
+}
+EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
+
+static void transport_xor_callback(struct se_cmd *cmd)
+{
+	unsigned char *buf, *addr;
+	struct se_mem *se_mem;
+	unsigned int offset;
+	int i;
+	/*
+	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+	 *
+	 * 1) read the specified logical block(s);
+	 * 2) transfer logical blocks from the data-out buffer;
+	 * 3) XOR the logical blocks transferred from the data-out buffer with
+	 *    the logical blocks read, storing the resulting XOR data in a buffer;
+	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+	 *    blocks transferred from the data-out buffer; and
+	 * 5) transfer the resulting XOR data to the data-in buffer.
+	 */
+	buf = kmalloc(cmd->data_length, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+		return;
+	}
+	/*
+	 * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
+	 * into the locally allocated *buf
+	 */
+	transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
+	/*
+	 * Now perform the XOR against the BIDI read memory located at
+	 * T_TASK(cmd)->t_mem_bidi_list
+	 */
+
+	offset = 0;
+	list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
+		addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
+		if (!(addr))
+			goto out;
+
+		for (i = 0; i < se_mem->se_len; i++)
+			*(addr + se_mem->se_off + i) ^= *(buf + offset + i);
+
+		offset += se_mem->se_len;
+		kunmap_atomic(addr, KM_USER0);
+	}
+out:
+	kfree(buf);
+}
+
+/*
+ * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
+ */
+static int transport_get_sense_data(struct se_cmd *cmd)
+{
+	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
+	struct se_device *dev;
+	struct se_task *task = NULL, *task_tmp;
+	unsigned long flags;
+	u32 offset = 0;
+
+	if (!SE_LUN(cmd)) {
+		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+		return -1;
+	}
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return 0;
+	}
+
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list) {
+
+		if (!task->task_sense)
+			continue;
+
+		dev = task->se_dev;
+		if (!(dev))
+			continue;
+
+		if (!TRANSPORT(dev)->get_sense_buffer) {
+			printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
+					" is NULL\n");
+			continue;
+		}
+
+		sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
+		if (!(sense_buffer)) {
+			printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+				" sense buffer for task with sense\n",
+				CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
+			continue;
+		}
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+				TRANSPORT_SENSE_BUFFER);
+
+		memcpy((void *)&buffer[offset], (void *)sense_buffer,
+				TRANSPORT_SENSE_BUFFER);
+		cmd->scsi_status = task->task_scsi_status;
+		/* Automatically padded */
+		cmd->scsi_sense_length =
+				(TRANSPORT_SENSE_BUFFER + offset);
+
+		printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+				" and sense\n",
+			dev->se_hba->hba_id, TRANSPORT(dev)->name,
+				cmd->scsi_status);
+		return 0;
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return -1;
+}
+
+static int transport_allocate_resources(struct se_cmd *cmd)
+{
+	u32 length = cmd->data_length;
+
+	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
+		return transport_generic_get_mem(cmd, length, PAGE_SIZE);
+	else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
+		return transport_generic_allocate_buf(cmd, length);
+	else
+		return 0;
+}
+
+static int
+transport_handle_reservation_conflict(struct se_cmd *cmd)
+{
+	cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+	cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+	/*
+	 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+	 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+	 * CONFLICT STATUS.
+	 *
+	 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+	 */
+	if (SE_SESS(cmd) &&
+	    DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+		core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+			cmd->orig_fe_lun, 0x2C,
+			ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+	return -2;
+}
+
+/*	transport_generic_cmd_sequencer():
+ *
+ *	Generic Command Sequencer that should work for most DAS transport
+ *	drivers.
+ *
+ *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
+ *	RX Thread.
+ *
+ *	FIXME: Need to support other SCSI OPCODES where as well.
+ */
+static int transport_generic_cmd_sequencer(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	int ret = 0, sector_ret = 0, passthrough;
+	u32 sectors = 0, size = 0, pr_reg_type = 0;
+	u16 service_action;
+	u8 alua_ascq = 0;
+	/*
+	 * Check for an existing UNIT ATTENTION condition
+	 */
+	if (core_scsi3_ua_check(cmd, cdb) < 0) {
+		cmd->transport_wait_for_tasks =
+				&transport_nop_wait_for_tasks;
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
+		return -2;
+	}
+	/*
+	 * Check status of Asymmetric Logical Unit Assignment port
+	 */
+	ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
+	if (ret != 0) {
+		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+		/*
+		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessable';
+		 * The ALUA additional sense code qualifier (ASCQ) is determined
+		 * by the ALUA primary or secondary access state..
+		 */
+		if (ret > 0) {
+#if 0
+			printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
+				CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
+#endif
+			transport_set_sense_codes(cmd, 0x04, alua_ascq);
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
+			return -2;
+		}
+		goto out_invalid_cdb_field;
+	}
+	/*
+	 * Check status for SPC-3 Persistent Reservations
+	 */
+	if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
+		if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
+					cmd, cdb, pr_reg_type) != 0)
+			return transport_handle_reservation_conflict(cmd);
+		/*
+		 * This means the CDB is allowed for the SCSI Initiator port
+		 * when said port is *NOT* holding the legacy SPC-2 or
+		 * SPC-3 Persistent Reservation.
+		 */
+	}
+
+	switch (cdb[0]) {
+	case READ_6:
+		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_6;
+		T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_10:
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_10;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_12:
+		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_12;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_16;
+		T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_6:
+		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_6;
+		T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_10:
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_10;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_12:
+		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_12;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_16;
+		T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case XDWRITEREAD_10:
+		if ((cmd->data_direction != DMA_TO_DEVICE) ||
+		    !(T_TASK(cmd)->t_tasks_bidi))
+			goto out_invalid_cdb_field;
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_10;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		passthrough = (TRANSPORT(dev)->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+		/*
+		 * Skip the remaining assignments for TCM/PSCSI passthrough
+		 */
+		if (passthrough)
+			break;
+		/*
+		 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
+		 */
+		cmd->transport_complete_callback = &transport_xor_callback;
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		break;
+	case VARIABLE_LENGTH_CMD:
+		service_action = get_unaligned_be16(&cdb[8]);
+		/*
+		 * Determine if this is TCM/PSCSI device and we should disable
+		 * internal emulation for this CDB.
+		 */
+		passthrough = (TRANSPORT(dev)->transport_type ==
+					TRANSPORT_PLUGIN_PHBA_PDEV);
+
+		switch (service_action) {
+		case XDWRITEREAD_32:
+			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+			if (sector_ret)
+				goto out_unsupported_cdb;
+			size = transport_get_size(sectors, cdb, cmd);
+			/*
+			 * Use WRITE_32 and READ_32 opcodes for the emulated
+			 * XDWRITE_READ_32 logic.
+			 */
+			cmd->transport_split_cdb = &split_cdb_XX_32;
+			T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
+			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+
+			/*
+			 * Skip the remaining assignments for TCM/PSCSI passthrough
+			 */
+			if (passthrough)
+				break;
+
+			/*
+			 * Setup BIDI XOR callback to be run during
+			 * transport_generic_complete_ok()
+			 */
+			cmd->transport_complete_callback = &transport_xor_callback;
+			T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
+			break;
+		case WRITE_SAME_32:
+			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+			if (sector_ret)
+				goto out_unsupported_cdb;
+			size = transport_get_size(sectors, cdb, cmd);
+			T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
+			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+			/*
+			 * Skip the remaining assignments for TCM/PSCSI passthrough
+			 */
+			if (passthrough)
+				break;
+
+			if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
+				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+					" bits not supported for Block Discard"
+					" Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+			/*
+			 * Currently for the emulated case we only accept
+			 * tpws with the UNMAP=1 bit set.
+			 */
+			if (!(cdb[10] & 0x08)) {
+				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+					" supported for Block Discard Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+			break;
+		default:
+			printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+				" 0x%04x not supported\n", service_action);
+			goto out_unsupported_cdb;
+		}
+		break;
+	case 0xa3:
+		if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+			/* MAINTENANCE_IN from SCC-2 */
+			/*
+			 * Check for emulated MI_REPORT_TARGET_PGS.
+			 */
+			if (cdb[1] == MI_REPORT_TARGET_PGS) {
+				cmd->transport_emulate_cdb =
+				(T10_ALUA(su_dev)->alua_type ==
+				 SPC3_ALUA_EMULATED) ?
+				&core_emulate_report_target_port_groups :
+				NULL;
+			}
+			size = (cdb[6] << 24) | (cdb[7] << 16) |
+			       (cdb[8] << 8) | cdb[9];
+		} else {
+			/* GPCMD_SEND_KEY from multi media commands */
+			size = (cdb[8] << 8) + cdb[9];
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case MODE_SELECT:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MODE_SELECT_10:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MODE_SENSE:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case MODE_SENSE_10:
+	case GPCMD_READ_BUFFER_CAPACITY:
+	case GPCMD_SEND_OPC:
+	case LOG_SELECT:
+	case LOG_SENSE:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_BLOCK_LIMITS:
+		size = READ_BLOCK_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case GPCMD_GET_CONFIGURATION:
+	case GPCMD_READ_FORMAT_CAPACITIES:
+	case GPCMD_READ_DISC_INFO:
+	case GPCMD_READ_TRACK_RZONE_INFO:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case PERSISTENT_RESERVE_IN:
+	case PERSISTENT_RESERVE_OUT:
+		cmd->transport_emulate_cdb =
+			(T10_RES(su_dev)->res_type ==
+			 SPC3_PERSISTENT_RESERVATIONS) ?
+			&core_scsi3_emulate_pr : NULL;
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case GPCMD_MECHANISM_STATUS:
+	case GPCMD_READ_DVD_STRUCTURE:
+		size = (cdb[8] << 8) + cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case READ_POSITION:
+		size = READ_POSITION_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case 0xa4:
+		if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+			/* MAINTENANCE_OUT from SCC-2
+			 *
+			 * Check for emulated MO_SET_TARGET_PGS.
+			 */
+			if (cdb[1] == MO_SET_TARGET_PGS) {
+				cmd->transport_emulate_cdb =
+				(T10_ALUA(su_dev)->alua_type ==
+					SPC3_ALUA_EMULATED) ?
+				&core_emulate_set_target_port_groups :
+				NULL;
+			}
+
+			size = (cdb[6] << 24) | (cdb[7] << 16) |
+			       (cdb[8] << 8) | cdb[9];
+		} else  {
+			/* GPCMD_REPORT_KEY from multi media commands */
+			size = (cdb[8] << 8) + cdb[9];
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case INQUIRY:
+		size = (cdb[3] << 8) + cdb[4];
+		/*
+		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+		 * See spc4r17 section 5.3
+		 */
+		if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+			cmd->sam_task_attr = TASK_ATTR_HOQ;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_BUFFER:
+		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_CAPACITY:
+		size = READ_CAP_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_MEDIA_SERIAL_NUMBER:
+	case SECURITY_PROTOCOL_IN:
+	case SECURITY_PROTOCOL_OUT:
+		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case SERVICE_ACTION_IN:
+	case ACCESS_CONTROL_IN:
+	case ACCESS_CONTROL_OUT:
+	case EXTENDED_COPY:
+	case READ_ATTRIBUTE:
+	case RECEIVE_COPY_RESULTS:
+	case WRITE_ATTRIBUTE:
+		size = (cdb[10] << 24) | (cdb[11] << 16) |
+		       (cdb[12] << 8) | cdb[13];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+		size = (cdb[3] << 8) | cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
+#if 0
+	case GPCMD_READ_CD:
+		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		size = (2336 * sectors);
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+#endif
+	case READ_TOC:
+		size = cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case REQUEST_SENSE:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_ELEMENT_STATUS:
+		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case WRITE_BUFFER:
+		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/*
+		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
+		 */
+		if (cdb[0] == RESERVE_10)
+			size = (cdb[7] << 8) | cdb[8];
+		else
+			size = cmd->data_length;
+
+		/*
+		 * Setup the legacy emulated handler for SPC-2 and
+		 * >= SPC-3 compatible reservation handling (CRH=1)
+		 * Otherwise, we assume the underlying SCSI logic is
+		 * is running in SPC_PASSTHROUGH, and wants reservations
+		 * emulation disabled.
+		 */
+		cmd->transport_emulate_cdb =
+				(T10_RES(su_dev)->res_type !=
+				 SPC_PASSTHROUGH) ?
+				&core_scsi2_emulate_crh : NULL;
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		/*
+		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
+		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
+		*/
+		if (cdb[0] == RELEASE_10)
+			size = (cdb[7] << 8) | cdb[8];
+		else
+			size = cmd->data_length;
+
+		cmd->transport_emulate_cdb =
+				(T10_RES(su_dev)->res_type !=
+				 SPC_PASSTHROUGH) ?
+				&core_scsi2_emulate_crh : NULL;
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case SYNCHRONIZE_CACHE:
+	case 0x91: /* SYNCHRONIZE_CACHE_16: */
+		/*
+		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
+		 */
+		if (cdb[0] == SYNCHRONIZE_CACHE) {
+			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+			T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		} else {
+			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+			T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+		}
+		if (sector_ret)
+			goto out_unsupported_cdb;
+
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+
+		/*
+		 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
+		 */
+		if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+			break;
+		/*
+		 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
+		 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
+		 */
+		cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
+		/*
+		 * Check to ensure that LBA + Range does not exceed past end of
+		 * device.
+		 */
+		if (transport_get_sectors(cmd) < 0)
+			goto out_invalid_cdb_field;
+		break;
+	case UNMAP:
+		size = get_unaligned_be16(&cdb[7]);
+		passthrough = (TRANSPORT(dev)->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+		/*
+		 * Determine if the received UNMAP used to for direct passthrough
+		 * into Linux/SCSI with struct request via TCM/pSCSI or we are
+		 * signaling the use of internal transport_generic_unmap() emulation
+		 * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
+		 * subsystem plugin backstores.
+		 */
+		if (!(passthrough))
+			cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
+
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case WRITE_SAME_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
+		passthrough = (TRANSPORT(dev)->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+		/*
+		 * Determine if the received WRITE_SAME_16 is used to for direct
+		 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
+		 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
+		 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
+		 * TCM/FILEIO subsystem plugin backstores.
+		 */
+		if (!(passthrough)) {
+			if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
+				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+					" bits not supported for Block Discard"
+					" Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+			/*
+			 * Currently for the emulated case we only accept
+			 * tpws with the UNMAP=1 bit set.
+			 */
+			if (!(cdb[1] & 0x08)) {
+				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+					" supported for Block Discard Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case GPCMD_CLOSE_TRACK:
+	case ERASE:
+	case INITIALIZE_ELEMENT_STATUS:
+	case GPCMD_LOAD_UNLOAD:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case GPCMD_SET_SPEED:
+	case SPACE:
+	case START_STOP:
+	case TEST_UNIT_READY:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+	case MOVE_MEDIUM:
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case REPORT_LUNS:
+		cmd->transport_emulate_cdb =
+				&transport_core_report_lun_response;
+		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		/*
+		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+		 * See spc4r17 section 5.3
+		 */
+		if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+			cmd->sam_task_attr = TASK_ATTR_HOQ;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	default:
+		printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+			" 0x%02x, sending CHECK_CONDITION.\n",
+			CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
+		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+		goto out_unsupported_cdb;
+	}
+
+	if (size != cmd->data_length) {
+		printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
+			" 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
+				cmd->data_length, size, cdb[0]);
+
+		cmd->cmd_spdtl = size;
+
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			printk(KERN_ERR "Rejecting underflow/overflow"
+					" WRITE data\n");
+			goto out_invalid_cdb_field;
+		}
+		/*
+		 * Reject READ_* or WRITE_* with overflow/underflow for
+		 * type SCF_SCSI_DATA_SG_IO_CDB.
+		 */
+		if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512))  {
+			printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+				" CDB on non 512-byte sector setup subsystem"
+				" plugin: %s\n", TRANSPORT(dev)->name);
+			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+			goto out_invalid_cdb_field;
+		}
+
+		if (size > cmd->data_length) {
+			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+			cmd->residual_count = (size - cmd->data_length);
+		} else {
+			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+			cmd->residual_count = (cmd->data_length - size);
+		}
+		cmd->data_length = size;
+	}
+
+	transport_set_supported_SAM_opcode(cmd);
+	return ret;
+
+out_unsupported_cdb:
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+	return -2;
+out_invalid_cdb_field:
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+	return -2;
+}
+
+static inline void transport_release_tasks(struct se_cmd *);
+
+/*
+ * This function will copy a contiguous *src buffer into a destination
+ * struct scatterlist array.
+ */
+static void transport_memcpy_write_contig(
+	struct se_cmd *cmd,
+	struct scatterlist *sg_d,
+	unsigned char *src)
+{
+	u32 i = 0, length = 0, total_length = cmd->data_length;
+	void *dst;
+
+	while (total_length) {
+		length = sg_d[i].length;
+
+		if (length > total_length)
+			length = total_length;
+
+		dst = sg_virt(&sg_d[i]);
+
+		memcpy(dst, src, length);
+
+		if (!(total_length -= length))
+			return;
+
+		src += length;
+		i++;
+	}
+}
+
+/*
+ * This function will copy a struct scatterlist array *sg_s into a destination
+ * contiguous *dst buffer.
+ */
+static void transport_memcpy_read_contig(
+	struct se_cmd *cmd,
+	unsigned char *dst,
+	struct scatterlist *sg_s)
+{
+	u32 i = 0, length = 0, total_length = cmd->data_length;
+	void *src;
+
+	while (total_length) {
+		length = sg_s[i].length;
+
+		if (length > total_length)
+			length = total_length;
+
+		src = sg_virt(&sg_s[i]);
+
+		memcpy(dst, src, length);
+
+		if (!(total_length -= length))
+			return;
+
+		dst += length;
+		i++;
+	}
+}
+
+static void transport_memcpy_se_mem_read_contig(
+	struct se_cmd *cmd,
+	unsigned char *dst,
+	struct list_head *se_mem_list)
+{
+	struct se_mem *se_mem;
+	void *src;
+	u32 length = 0, total_length = cmd->data_length;
+
+	list_for_each_entry(se_mem, se_mem_list, se_list) {
+		length = se_mem->se_len;
+
+		if (length > total_length)
+			length = total_length;
+
+		src = page_address(se_mem->se_page) + se_mem->se_off;
+
+		memcpy(dst, src, length);
+
+		if (!(total_length -= length))
+			return;
+
+		dst += length;
+	}
+}
+
+/*
+ * Called from transport_generic_complete_ok() and
+ * transport_generic_request_failure() to determine which dormant/delayed
+ * and ordered cmds need to have their tasks added to the execution queue.
+ */
+static void transport_complete_task_attr(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_cmd *cmd_p, *cmd_tmp;
+	int new_active_tasks = 0;
+
+	if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
+		atomic_dec(&dev->simple_cmds);
+		smp_mb__after_atomic_dec();
+		dev->dev_cur_ordered_id++;
+		DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
+			cmd->se_ordered_id);
+	} else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+		atomic_dec(&dev->dev_hoq_count);
+		smp_mb__after_atomic_dec();
+		dev->dev_cur_ordered_id++;
+		DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
+			cmd->se_ordered_id);
+	} else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+		spin_lock(&dev->ordered_cmd_lock);
+		list_del(&cmd->se_ordered_list);
+		atomic_dec(&dev->dev_ordered_sync);
+		smp_mb__after_atomic_dec();
+		spin_unlock(&dev->ordered_cmd_lock);
+
+		dev->dev_cur_ordered_id++;
+		DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+	}
+	/*
+	 * Process all commands up to the last received
+	 * ORDERED task attribute which requires another blocking
+	 * boundary
+	 */
+	spin_lock(&dev->delayed_cmd_lock);
+	list_for_each_entry_safe(cmd_p, cmd_tmp,
+			&dev->delayed_cmd_list, se_delayed_list) {
+
+		list_del(&cmd_p->se_delayed_list);
+		spin_unlock(&dev->delayed_cmd_lock);
+
+		DEBUG_STA("Calling add_tasks() for"
+			" cmd_p: 0x%02x Task Attr: 0x%02x"
+			" Dormant -> Active, se_ordered_id: %u\n",
+			T_TASK(cmd_p)->t_task_cdb[0],
+			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
+
+		transport_add_tasks_from_cmd(cmd_p);
+		new_active_tasks++;
+
+		spin_lock(&dev->delayed_cmd_lock);
+		if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
+			break;
+	}
+	spin_unlock(&dev->delayed_cmd_lock);
+	/*
+	 * If new tasks have become active, wake up the transport thread
+	 * to do the processing of the Active tasks.
+	 */
+	if (new_active_tasks != 0)
+		wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+}
+
+static void transport_generic_complete_ok(struct se_cmd *cmd)
+{
+	int reason = 0;
+	/*
+	 * Check if we need to move delayed/dormant tasks from cmds on the
+	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
+	 * Attribute.
+	 */
+	if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		transport_complete_task_attr(cmd);
+	/*
+	 * Check if we need to retrieve a sense buffer from
+	 * the struct se_cmd in question.
+	 */
+	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+		if (transport_get_sense_data(cmd) < 0)
+			reason = TCM_NON_EXISTENT_LUN;
+
+		/*
+		 * Only set when an struct se_task->task_scsi_status returned
+		 * a non GOOD status.
+		 */
+		if (cmd->scsi_status) {
+			transport_send_check_condition_and_sense(
+					cmd, reason, 1);
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop_to_fabric(cmd);
+			return;
+		}
+	}
+	/*
+	 * Check for a callback, used by amoungst other things
+	 * XDWRITE_READ_10 emulation.
+	 */
+	if (cmd->transport_complete_callback)
+		cmd->transport_complete_callback(cmd);
+
+	switch (cmd->data_direction) {
+	case DMA_FROM_DEVICE:
+		spin_lock(&cmd->se_lun->lun_sep_lock);
+		if (SE_LUN(cmd)->lun_sep) {
+			SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+					cmd->data_length;
+		}
+		spin_unlock(&cmd->se_lun->lun_sep_lock);
+		/*
+		 * If enabled by TCM fabirc module pre-registered SGL
+		 * memory, perform the memcpy() from the TCM internal
+		 * contigious buffer back to the original SGL.
+		 */
+		if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+			transport_memcpy_write_contig(cmd,
+				 T_TASK(cmd)->t_task_pt_sgl,
+				 T_TASK(cmd)->t_task_buf);
+
+		CMD_TFO(cmd)->queue_data_in(cmd);
+		break;
+	case DMA_TO_DEVICE:
+		spin_lock(&cmd->se_lun->lun_sep_lock);
+		if (SE_LUN(cmd)->lun_sep) {
+			SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
+				cmd->data_length;
+		}
+		spin_unlock(&cmd->se_lun->lun_sep_lock);
+		/*
+		 * Check if we need to send READ payload for BIDI-COMMAND
+		 */
+		if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
+			spin_lock(&cmd->se_lun->lun_sep_lock);
+			if (SE_LUN(cmd)->lun_sep) {
+				SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+					cmd->data_length;
+			}
+			spin_unlock(&cmd->se_lun->lun_sep_lock);
+			CMD_TFO(cmd)->queue_data_in(cmd);
+			break;
+		}
+		/* Fall through for DMA_TO_DEVICE */
+	case DMA_NONE:
+		CMD_TFO(cmd)->queue_status(cmd);
+		break;
+	default:
+		break;
+	}
+
+	transport_lun_remove_cmd(cmd);
+	transport_cmd_check_stop_to_fabric(cmd);
+}
+
+static void transport_free_dev_tasks(struct se_cmd *cmd)
+{
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list) {
+		if (atomic_read(&task->task_active))
+			continue;
+
+		kfree(task->task_sg_bidi);
+		kfree(task->task_sg);
+
+		list_del(&task->t_list);
+
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		if (task->se_dev)
+			TRANSPORT(task->se_dev)->free_task(task);
+		else
+			printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+				task->task_no);
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+	struct se_mem *se_mem, *se_mem_tmp;
+	int free_page = 1;
+
+	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+		free_page = 0;
+	if (cmd->se_dev->transport->do_se_mem_map)
+		free_page = 0;
+
+	if (T_TASK(cmd)->t_task_buf) {
+		kfree(T_TASK(cmd)->t_task_buf);
+		T_TASK(cmd)->t_task_buf = NULL;
+		return;
+	}
+
+	/*
+	 * Caller will handle releasing of struct se_mem.
+	 */
+	if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
+		return;
+
+	if (!(T_TASK(cmd)->t_tasks_se_num))
+		return;
+
+	list_for_each_entry_safe(se_mem, se_mem_tmp,
+			T_TASK(cmd)->t_mem_list, se_list) {
+		/*
+		 * We only release call __free_page(struct se_mem->se_page) when
+		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+		 */
+		if (free_page)
+			__free_page(se_mem->se_page);
+
+		list_del(&se_mem->se_list);
+		kmem_cache_free(se_mem_cache, se_mem);
+	}
+
+	if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
+		list_for_each_entry_safe(se_mem, se_mem_tmp,
+				T_TASK(cmd)->t_mem_bidi_list, se_list) {
+			/*
+			 * We only release call __free_page(struct se_mem->se_page) when
+			 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+			 */
+			if (free_page)
+				__free_page(se_mem->se_page);
+
+			list_del(&se_mem->se_list);
+			kmem_cache_free(se_mem_cache, se_mem);
+		}
+	}
+
+	kfree(T_TASK(cmd)->t_mem_bidi_list);
+	T_TASK(cmd)->t_mem_bidi_list = NULL;
+	kfree(T_TASK(cmd)->t_mem_list);
+	T_TASK(cmd)->t_mem_list = NULL;
+	T_TASK(cmd)->t_tasks_se_num = 0;
+}
+
+static inline void transport_release_tasks(struct se_cmd *cmd)
+{
+	transport_free_dev_tasks(cmd);
+}
+
+static inline int transport_dec_and_check(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+			return 1;
+		}
+	}
+
+	if (atomic_read(&T_TASK(cmd)->t_se_count)) {
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+			return 1;
+		}
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return 0;
+}
+
+static void transport_release_fe_cmd(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	if (transport_dec_and_check(cmd))
+		return;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		goto free_pages;
+	}
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+	transport_all_task_dev_remove_state(cmd);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_release_tasks(cmd);
+free_pages:
+	transport_free_pages(cmd);
+	transport_free_se_cmd(cmd);
+	CMD_TFO(cmd)->release_cmd_direct(cmd);
+}
+
+static int transport_generic_remove(
+	struct se_cmd *cmd,
+	int release_to_pool,
+	int session_reinstatement)
+{
+	unsigned long flags;
+
+	if (!(T_TASK(cmd)))
+		goto release_cmd;
+
+	if (transport_dec_and_check(cmd)) {
+		if (session_reinstatement) {
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			transport_all_task_dev_remove_state(cmd);
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+		}
+		return 1;
+	}
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		goto free_pages;
+	}
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+	transport_all_task_dev_remove_state(cmd);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_release_tasks(cmd);
+free_pages:
+	transport_free_pages(cmd);
+
+release_cmd:
+	if (release_to_pool) {
+		transport_release_cmd_to_pool(cmd);
+	} else {
+		transport_free_se_cmd(cmd);
+		CMD_TFO(cmd)->release_cmd_direct(cmd);
+	}
+
+	return 0;
+}
+
+/*
+ * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
+ * @cmd:  Associated se_cmd descriptor
+ * @mem:  SGL style memory for TCM WRITE / READ
+ * @sg_mem_num: Number of SGL elements
+ * @mem_bidi_in: SGL style memory for TCM BIDI READ
+ * @sg_mem_bidi_num: Number of BIDI READ SGL elements
+ *
+ * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
+ * of parameters.
+ */
+int transport_generic_map_mem_to_cmd(
+	struct se_cmd *cmd,
+	struct scatterlist *mem,
+	u32 sg_mem_num,
+	struct scatterlist *mem_bidi_in,
+	u32 sg_mem_bidi_num)
+{
+	u32 se_mem_cnt_out = 0;
+	int ret;
+
+	if (!(mem) || !(sg_mem_num))
+		return 0;
+	/*
+	 * Passed *mem will contain a list_head containing preformatted
+	 * struct se_mem elements...
+	 */
+	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
+		if ((mem_bidi_in) || (sg_mem_bidi_num)) {
+			printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
+				" with BIDI-COMMAND\n");
+			return -ENOSYS;
+		}
+
+		T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
+		T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
+		cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
+		return 0;
+	}
+	/*
+	 * Otherwise, assume the caller is passing a struct scatterlist
+	 * array from include/linux/scatterlist.h
+	 */
+	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+		/*
+		 * For CDB using TCM struct se_mem linked list scatterlist memory
+		 * processed into a TCM struct se_subsystem_dev, we do the mapping
+		 * from the passed physical memory to struct se_mem->se_page here.
+		 */
+		T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+		if (!(T_TASK(cmd)->t_mem_list))
+			return -ENOMEM;
+
+		ret = transport_map_sg_to_mem(cmd,
+			T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
+		if (ret < 0)
+			return -ENOMEM;
+
+		T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
+		/*
+		 * Setup BIDI READ list of struct se_mem elements
+		 */
+		if ((mem_bidi_in) && (sg_mem_bidi_num)) {
+			T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+			if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+				kfree(T_TASK(cmd)->t_mem_list);
+				return -ENOMEM;
+			}
+			se_mem_cnt_out = 0;
+
+			ret = transport_map_sg_to_mem(cmd,
+				T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
+				&se_mem_cnt_out);
+			if (ret < 0) {
+				kfree(T_TASK(cmd)->t_mem_list);
+				return -ENOMEM;
+			}
+
+			T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
+		}
+		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+	} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+		if (mem_bidi_in || sg_mem_bidi_num) {
+			printk(KERN_ERR "BIDI-Commands not supported using "
+				"SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
+			return -ENOSYS;
+		}
+		/*
+		 * For incoming CDBs using a contiguous buffer internall with TCM,
+		 * save the passed struct scatterlist memory.  After TCM storage object
+		 * processing has completed for this struct se_cmd, TCM core will call
+		 * transport_memcpy_[write,read]_contig() as necessary from
+		 * transport_generic_complete_ok() and transport_write_pending() in order
+		 * to copy the TCM buffer to/from the original passed *mem in SGL ->
+		 * struct scatterlist format.
+		 */
+		cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
+		T_TASK(cmd)->t_task_pt_sgl = mem;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
+
+
+static inline long long transport_dev_end_lba(struct se_device *dev)
+{
+	return dev->transport->get_blocks(dev) + 1;
+}
+
+static int transport_get_sectors(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+
+	T_TASK(cmd)->t_tasks_sectors =
+		(cmd->data_length / DEV_ATTRIB(dev)->block_size);
+	if (!(T_TASK(cmd)->t_tasks_sectors))
+		T_TASK(cmd)->t_tasks_sectors = 1;
+
+	if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
+		return 0;
+
+	if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
+	     transport_dev_end_lba(dev)) {
+		printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
+			" transport_dev_end_lba(): %llu\n",
+			T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+			transport_dev_end_lba(dev));
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+		return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
+	}
+
+	return 0;
+}
+
+static int transport_new_cmd_obj(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	u32 task_cdbs = 0, rc;
+
+	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+		task_cdbs++;
+		T_TASK(cmd)->t_task_cdbs++;
+	} else {
+		int set_counts = 1;
+
+		/*
+		 * Setup any BIDI READ tasks and memory from
+		 * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
+		 * are queued first for the non pSCSI passthrough case.
+		 */
+		if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+		    (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
+			rc = transport_generic_get_cdb_count(cmd,
+				T_TASK(cmd)->t_task_lba,
+				T_TASK(cmd)->t_tasks_sectors,
+				DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
+				set_counts);
+			if (!(rc)) {
+				cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+				cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+				return PYX_TRANSPORT_LU_COMM_FAILURE;
+			}
+			set_counts = 0;
+		}
+		/*
+		 * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
+		 * Note for BIDI transfers this will contain the WRITE payload
+		 */
+		task_cdbs = transport_generic_get_cdb_count(cmd,
+				T_TASK(cmd)->t_task_lba,
+				T_TASK(cmd)->t_tasks_sectors,
+				cmd->data_direction, T_TASK(cmd)->t_mem_list,
+				set_counts);
+		if (!(task_cdbs)) {
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			return PYX_TRANSPORT_LU_COMM_FAILURE;
+		}
+		T_TASK(cmd)->t_task_cdbs += task_cdbs;
+
+#if 0
+		printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
+			" %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
+			T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+			T_TASK(cmd)->t_task_cdbs);
+#endif
+	}
+
+	atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
+	atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
+	atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
+	return 0;
+}
+
+static struct list_head *transport_init_se_mem_list(void)
+{
+	struct list_head *se_mem_list;
+
+	se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (!(se_mem_list)) {
+		printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(se_mem_list);
+
+	return se_mem_list;
+}
+
+static int
+transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
+{
+	unsigned char *buf;
+	struct se_mem *se_mem;
+
+	T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+	if (!(T_TASK(cmd)->t_mem_list))
+		return -ENOMEM;
+
+	/*
+	 * If the device uses memory mapping this is enough.
+	 */
+	if (cmd->se_dev->transport->do_se_mem_map)
+		return 0;
+
+	/*
+	 * Setup BIDI-COMMAND READ list of struct se_mem elements
+	 */
+	if (T_TASK(cmd)->t_tasks_bidi) {
+		T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+		if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+			kfree(T_TASK(cmd)->t_mem_list);
+			return -ENOMEM;
+		}
+	}
+
+	while (length) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			goto out;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+		se_mem->se_len = (length > dma_size) ? dma_size : length;
+
+/* #warning FIXME Allocate contigous pages for struct se_mem elements */
+		se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
+		if (!(se_mem->se_page)) {
+			printk(KERN_ERR "alloc_pages() failed\n");
+			goto out;
+		}
+
+		buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
+		if (!(buf)) {
+			printk(KERN_ERR "kmap_atomic() failed\n");
+			goto out;
+		}
+		memset(buf, 0, se_mem->se_len);
+		kunmap_atomic(buf, KM_IRQ0);
+
+		list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
+		T_TASK(cmd)->t_tasks_se_num++;
+
+		DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
+			" Offset(%u)\n", se_mem->se_page, se_mem->se_len,
+			se_mem->se_off);
+
+		length -= se_mem->se_len;
+	}
+
+	DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
+			T_TASK(cmd)->t_tasks_se_num);
+
+	return 0;
+out:
+	return -1;
+}
+
+extern u32 transport_calc_sg_num(
+	struct se_task *task,
+	struct se_mem *in_se_mem,
+	u32 task_offset)
+{
+	struct se_cmd *se_cmd = task->task_se_cmd;
+	struct se_device *se_dev = SE_DEV(se_cmd);
+	struct se_mem *se_mem = in_se_mem;
+	struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
+	u32 sg_length, task_size = task->task_size, task_sg_num_padded;
+
+	while (task_size != 0) {
+		DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
+			" se_mem->se_off(%u) task_offset(%u)\n",
+			se_mem->se_page, se_mem->se_len,
+			se_mem->se_off, task_offset);
+
+		if (task_offset == 0) {
+			if (task_size >= se_mem->se_len) {
+				sg_length = se_mem->se_len;
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list)))
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+			} else {
+				sg_length = task_size;
+				task_size -= sg_length;
+				goto next;
+			}
+
+			DEBUG_SC("sg_length(%u) task_size(%u)\n",
+					sg_length, task_size);
+		} else {
+			if ((se_mem->se_len - task_offset) > task_size) {
+				sg_length = task_size;
+				task_size -= sg_length;
+				goto next;
+			 } else {
+				sg_length = (se_mem->se_len - task_offset);
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list)))
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+			}
+
+			DEBUG_SC("sg_length(%u) task_size(%u)\n",
+					sg_length, task_size);
+
+			task_offset = 0;
+		}
+		task_size -= sg_length;
+next:
+		DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
+			task->task_no, task_size);
+
+		task->task_sg_num++;
+	}
+	/*
+	 * Check if the fabric module driver is requesting that all
+	 * struct se_task->task_sg[] be chained together..  If so,
+	 * then allocate an extra padding SG entry for linking and
+	 * marking the end of the chained SGL.
+	 */
+	if (tfo->task_sg_chaining) {
+		task_sg_num_padded = (task->task_sg_num + 1);
+		task->task_padded_sg = 1;
+	} else
+		task_sg_num_padded = task->task_sg_num;
+
+	task->task_sg = kzalloc(task_sg_num_padded *
+			sizeof(struct scatterlist), GFP_KERNEL);
+	if (!(task->task_sg)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" task->task_sg\n");
+		return 0;
+	}
+	sg_init_table(&task->task_sg[0], task_sg_num_padded);
+	/*
+	 * Setup task->task_sg_bidi for SCSI READ payload for
+	 * TCM/pSCSI passthrough if present for BIDI-COMMAND
+	 */
+	if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
+	    (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
+		task->task_sg_bidi = kzalloc(task_sg_num_padded *
+				sizeof(struct scatterlist), GFP_KERNEL);
+		if (!(task->task_sg_bidi)) {
+			printk(KERN_ERR "Unable to allocate memory for"
+				" task->task_sg_bidi\n");
+			return 0;
+		}
+		sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
+	}
+	/*
+	 * For the chaining case, setup the proper end of SGL for the
+	 * initial submission struct task into struct se_subsystem_api.
+	 * This will be cleared later by transport_do_task_sg_chain()
+	 */
+	if (task->task_padded_sg) {
+		sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
+		/*
+		 * Added the 'if' check before marking end of bi-directional
+		 * scatterlist (which gets created only in case of request
+		 * (RD + WR).
+		 */
+		if (task->task_sg_bidi)
+			sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
+	}
+
+	DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
+		" task_sg_num_padded(%u)\n", task->task_sg_num,
+		task_sg_num_padded);
+
+	return task->task_sg_num;
+}
+
+static inline int transport_set_tasks_sectors_disk(
+	struct se_task *task,
+	struct se_device *dev,
+	unsigned long long lba,
+	u32 sectors,
+	int *max_sectors_set)
+{
+	if ((lba + sectors) > transport_dev_end_lba(dev)) {
+		task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
+
+		if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
+			task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+			*max_sectors_set = 1;
+		}
+	} else {
+		if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+			task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+			*max_sectors_set = 1;
+		} else
+			task->task_sectors = sectors;
+	}
+
+	return 0;
+}
+
+static inline int transport_set_tasks_sectors_non_disk(
+	struct se_task *task,
+	struct se_device *dev,
+	unsigned long long lba,
+	u32 sectors,
+	int *max_sectors_set)
+{
+	if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+		task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+		*max_sectors_set = 1;
+	} else
+		task->task_sectors = sectors;
+
+	return 0;
+}
+
+static inline int transport_set_tasks_sectors(
+	struct se_task *task,
+	struct se_device *dev,
+	unsigned long long lba,
+	u32 sectors,
+	int *max_sectors_set)
+{
+	return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
+		transport_set_tasks_sectors_disk(task, dev, lba, sectors,
+				max_sectors_set) :
+		transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
+				max_sectors_set);
+}
+
+static int transport_map_sg_to_mem(
+	struct se_cmd *cmd,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	u32 *se_mem_cnt)
+{
+	struct se_mem *se_mem;
+	struct scatterlist *sg;
+	u32 sg_count = 1, cmd_size = cmd->data_length;
+
+	if (!in_mem) {
+		printk(KERN_ERR "No source scatterlist\n");
+		return -1;
+	}
+	sg = (struct scatterlist *)in_mem;
+
+	while (cmd_size) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+		DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
+			" sg_page: %p offset: %d length: %d\n", cmd_size,
+			sg_page(sg), sg->offset, sg->length);
+
+		se_mem->se_page = sg_page(sg);
+		se_mem->se_off = sg->offset;
+
+		if (cmd_size > sg->length) {
+			se_mem->se_len = sg->length;
+			sg = sg_next(sg);
+			sg_count++;
+		} else
+			se_mem->se_len = cmd_size;
+
+		cmd_size -= se_mem->se_len;
+
+		DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
+				*se_mem_cnt, cmd_size);
+		DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
+				se_mem->se_page, se_mem->se_off, se_mem->se_len);
+
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+	}
+
+	DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
+		" struct se_mem\n", sg_count, *se_mem_cnt);
+
+	if (sg_count != *se_mem_cnt)
+		BUG();
+
+	return 0;
+}
+
+/*	transport_map_mem_to_sg():
+ *
+ *
+ */
+int transport_map_mem_to_sg(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	struct se_mem *in_se_mem,
+	struct se_mem **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	struct se_cmd *se_cmd = task->task_se_cmd;
+	struct se_mem *se_mem = in_se_mem;
+	struct scatterlist *sg = (struct scatterlist *)in_mem;
+	u32 task_size = task->task_size, sg_no = 0;
+
+	if (!sg) {
+		printk(KERN_ERR "Unable to locate valid struct"
+				" scatterlist pointer\n");
+		return -1;
+	}
+
+	while (task_size != 0) {
+		/*
+		 * Setup the contigious array of scatterlists for
+		 * this struct se_task.
+		 */
+		sg_assign_page(sg, se_mem->se_page);
+
+		if (*task_offset == 0) {
+			sg->offset = se_mem->se_off;
+
+			if (task_size >= se_mem->se_len) {
+				sg->length = se_mem->se_len;
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list))) {
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+					(*se_mem_cnt)++;
+				}
+			} else {
+				sg->length = task_size;
+				/*
+				 * Determine if we need to calculate an offset
+				 * into the struct se_mem on the next go around..
+				 */
+				task_size -= sg->length;
+				if (!(task_size))
+					*task_offset = sg->length;
+
+				goto next;
+			}
+
+		} else {
+			sg->offset = (*task_offset + se_mem->se_off);
+
+			if ((se_mem->se_len - *task_offset) > task_size) {
+				sg->length = task_size;
+				/*
+				 * Determine if we need to calculate an offset
+				 * into the struct se_mem on the next go around..
+				 */
+				task_size -= sg->length;
+				if (!(task_size))
+					*task_offset += sg->length;
+
+				goto next;
+			} else {
+				sg->length = (se_mem->se_len - *task_offset);
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list))) {
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+					(*se_mem_cnt)++;
+				}
+			}
+
+			*task_offset = 0;
+		}
+		task_size -= sg->length;
+next:
+		DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
+			" task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
+			sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
+
+		sg_no++;
+		if (!(task_size))
+			break;
+
+		sg = sg_next(sg);
+
+		if (task_size > se_cmd->data_length)
+			BUG();
+	}
+	*out_se_mem = se_mem;
+
+	DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
+		" SGs\n", task->task_no, *se_mem_cnt, sg_no);
+
+	return 0;
+}
+
+/*
+ * This function can be used by HW target mode drivers to create a linked
+ * scatterlist from all contiguously allocated struct se_task->task_sg[].
+ * This is intended to be called during the completion path by TCM Core
+ * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
+ */
+void transport_do_task_sg_chain(struct se_cmd *cmd)
+{
+	struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
+	struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
+	struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
+	struct se_task *task;
+	struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
+	u32 task_sg_num = 0, sg_count = 0;
+	int i;
+
+	if (tfo->task_sg_chaining == 0) {
+		printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
+				" %s\n", tfo->get_fabric_name());
+		dump_stack();
+		return;
+	}
+	/*
+	 * Walk the struct se_task list and setup scatterlist chains
+	 * for each contiguosly allocated struct se_task->task_sg[].
+	 */
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		if (!(task->task_sg) || !(task->task_padded_sg))
+			continue;
+
+		if (sg_head && sg_link) {
+			sg_head_cur = &task->task_sg[0];
+			sg_link_cur = &task->task_sg[task->task_sg_num];
+			/*
+			 * Either add chain or mark end of scatterlist
+			 */
+			if (!(list_is_last(&task->t_list,
+					&T_TASK(cmd)->t_task_list))) {
+				/*
+				 * Clear existing SGL termination bit set in
+				 * transport_calc_sg_num(), see sg_mark_end()
+				 */
+				sg_end_cur = &task->task_sg[task->task_sg_num - 1];
+				sg_end_cur->page_link &= ~0x02;
+
+				sg_chain(sg_head, task_sg_num, sg_head_cur);
+				sg_count += (task->task_sg_num + 1);
+			} else
+				sg_count += task->task_sg_num;
+
+			sg_head = sg_head_cur;
+			sg_link = sg_link_cur;
+			task_sg_num = task->task_sg_num;
+			continue;
+		}
+		sg_head = sg_first = &task->task_sg[0];
+		sg_link = &task->task_sg[task->task_sg_num];
+		task_sg_num = task->task_sg_num;
+		/*
+		 * Check for single task..
+		 */
+		if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
+			/*
+			 * Clear existing SGL termination bit set in
+			 * transport_calc_sg_num(), see sg_mark_end()
+			 */
+			sg_end = &task->task_sg[task->task_sg_num - 1];
+			sg_end->page_link &= ~0x02;
+			sg_count += (task->task_sg_num + 1);
+		} else
+			sg_count += task->task_sg_num;
+	}
+	/*
+	 * Setup the starting pointer and total t_tasks_sg_linked_no including
+	 * padding SGs for linking and to mark the end.
+	 */
+	T_TASK(cmd)->t_tasks_sg_chained = sg_first;
+	T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
+
+	DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
+		" t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
+		T_TASK(cmd)->t_tasks_sg_chained_no);
+
+	for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
+			T_TASK(cmd)->t_tasks_sg_chained_no, i) {
+
+		DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
+			sg, sg_page(sg), sg->length, sg->offset);
+		if (sg_is_chain(sg))
+			DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+		if (sg_is_last(sg))
+			DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+	}
+
+}
+EXPORT_SYMBOL(transport_do_task_sg_chain);
+
+static int transport_do_se_mem_map(
+	struct se_device *dev,
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	struct se_mem *in_se_mem,
+	struct se_mem **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset_in)
+{
+	u32 task_offset = *task_offset_in;
+	int ret = 0;
+	/*
+	 * se_subsystem_api_t->do_se_mem_map is used when internal allocation
+	 * has been done by the transport plugin.
+	 */
+	if (TRANSPORT(dev)->do_se_mem_map) {
+		ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
+				in_mem, in_se_mem, out_se_mem, se_mem_cnt,
+				task_offset_in);
+		if (ret == 0)
+			T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+
+		return ret;
+	}
+
+	BUG_ON(list_empty(se_mem_list));
+	/*
+	 * This is the normal path for all normal non BIDI and BIDI-COMMAND
+	 * WRITE payloads..  If we need to do BIDI READ passthrough for
+	 * TCM/pSCSI the first call to transport_do_se_mem_map ->
+	 * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
+	 * allocation for task->task_sg_bidi, and the subsequent call to
+	 * transport_do_se_mem_map() from transport_generic_get_cdb_count()
+	 */
+	if (!(task->task_sg_bidi)) {
+		/*
+		 * Assume default that transport plugin speaks preallocated
+		 * scatterlists.
+		 */
+		if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
+			return -1;
+		/*
+		 * struct se_task->task_sg now contains the struct scatterlist array.
+		 */
+		return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+					in_se_mem, out_se_mem, se_mem_cnt,
+					task_offset_in);
+	}
+	/*
+	 * Handle the se_mem_list -> struct task->task_sg_bidi
+	 * memory map for the extra BIDI READ payload
+	 */
+	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
+				in_se_mem, out_se_mem, se_mem_cnt,
+				task_offset_in);
+}
+
+static u32 transport_generic_get_cdb_count(
+	struct se_cmd *cmd,
+	unsigned long long lba,
+	u32 sectors,
+	enum dma_data_direction data_direction,
+	struct list_head *mem_list,
+	int set_counts)
+{
+	unsigned char *cdb = NULL;
+	struct se_task *task;
+	struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+	struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
+	struct se_device *dev = SE_DEV(cmd);
+	int max_sectors_set = 0, ret;
+	u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
+
+	if (!mem_list) {
+		printk(KERN_ERR "mem_list is NULL in transport_generic_get"
+				"_cdb_count()\n");
+		return 0;
+	}
+	/*
+	 * While using RAMDISK_DR backstores is the only case where
+	 * mem_list will ever be empty at this point.
+	 */
+	if (!(list_empty(mem_list)))
+		se_mem = list_entry(mem_list->next, struct se_mem, se_list);
+	/*
+	 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
+	 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
+	 */
+	if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+	    !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
+	    (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
+		se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
+					struct se_mem, se_list);
+
+	while (sectors) {
+		DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
+			CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
+			transport_dev_end_lba(dev));
+
+		task = transport_generic_get_task(cmd, data_direction);
+		if (!(task))
+			goto out;
+
+		transport_set_tasks_sectors(task, dev, lba, sectors,
+				&max_sectors_set);
+
+		task->task_lba = lba;
+		lba += task->task_sectors;
+		sectors -= task->task_sectors;
+		task->task_size = (task->task_sectors *
+				   DEV_ATTRIB(dev)->block_size);
+
+		cdb = TRANSPORT(dev)->get_cdb(task);
+		if ((cdb)) {
+			memcpy(cdb, T_TASK(cmd)->t_task_cdb,
+				scsi_command_size(T_TASK(cmd)->t_task_cdb));
+			cmd->transport_split_cdb(task->task_lba,
+					&task->task_sectors, cdb);
+		}
+
+		/*
+		 * Perform the SE OBJ plugin and/or Transport plugin specific
+		 * mapping for T_TASK(cmd)->t_mem_list. And setup the
+		 * task->task_sg and if necessary task->task_sg_bidi
+		 */
+		ret = transport_do_se_mem_map(dev, task, mem_list,
+				NULL, se_mem, &se_mem_lout, &se_mem_cnt,
+				&task_offset_in);
+		if (ret < 0)
+			goto out;
+
+		se_mem = se_mem_lout;
+		/*
+		 * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
+		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
+		 *
+		 * Note that the first call to transport_do_se_mem_map() above will
+		 * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
+		 * -> transport_calc_sg_num(), and the second here will do the
+		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
+		 */
+		if (task->task_sg_bidi != NULL) {
+			ret = transport_do_se_mem_map(dev, task,
+				T_TASK(cmd)->t_mem_bidi_list, NULL,
+				se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
+				&task_offset_in);
+			if (ret < 0)
+				goto out;
+
+			se_mem_bidi = se_mem_bidi_lout;
+		}
+		task_cdbs++;
+
+		DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
+				task_cdbs, task->task_sg_num);
+
+		if (max_sectors_set) {
+			max_sectors_set = 0;
+			continue;
+		}
+
+		if (!sectors)
+			break;
+	}
+
+	if (set_counts) {
+		atomic_inc(&T_TASK(cmd)->t_fe_count);
+		atomic_inc(&T_TASK(cmd)->t_se_count);
+	}
+
+	DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
+		CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
+		? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
+
+	return task_cdbs;
+out:
+	return 0;
+}
+
+static int
+transport_map_control_cmd_to_task(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *cdb;
+	struct se_task *task;
+	int ret;
+
+	task = transport_generic_get_task(cmd, cmd->data_direction);
+	if (!task)
+		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+	cdb = TRANSPORT(dev)->get_cdb(task);
+	if (cdb)
+		memcpy(cdb, cmd->t_task->t_task_cdb,
+			scsi_command_size(cmd->t_task->t_task_cdb));
+
+	task->task_size = cmd->data_length;
+	task->task_sg_num =
+		(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
+
+	atomic_inc(&cmd->t_task->t_fe_count);
+	atomic_inc(&cmd->t_task->t_se_count);
+
+	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
+		struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+		u32 se_mem_cnt = 0, task_offset = 0;
+
+		if (!list_empty(T_TASK(cmd)->t_mem_list))
+			se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
+					struct se_mem, se_list);
+
+		ret = transport_do_se_mem_map(dev, task,
+				cmd->t_task->t_mem_list, NULL, se_mem,
+				&se_mem_lout, &se_mem_cnt, &task_offset);
+		if (ret < 0)
+			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+		if (dev->transport->map_task_SG)
+			return dev->transport->map_task_SG(task);
+		return 0;
+	} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+		if (dev->transport->map_task_non_SG)
+			return dev->transport->map_task_non_SG(task);
+		return 0;
+	} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+		if (dev->transport->cdb_none)
+			return dev->transport->cdb_none(task);
+		return 0;
+	} else {
+		BUG();
+		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+	}
+}
+
+/*	 transport_generic_new_cmd(): Called from transport_processing_thread()
+ *
+ *	 Allocate storage transport resources from a set of values predefined
+ *	 by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
+ *	 Any non zero return here is treated as an "out of resource' op here.
+ */
+	/*
+	 * Generate struct se_task(s) and/or their payloads for this CDB.
+	 */
+static int transport_generic_new_cmd(struct se_cmd *cmd)
+{
+	struct se_portal_group *se_tpg;
+	struct se_task *task;
+	struct se_device *dev = SE_DEV(cmd);
+	int ret = 0;
+
+	/*
+	 * Determine is the TCM fabric module has already allocated physical
+	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
+	 * to setup beforehand the linked list of physical memory at
+	 * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
+	 */
+	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+		ret = transport_allocate_resources(cmd);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = transport_get_sectors(cmd);
+	if (ret < 0)
+		return ret;
+
+	ret = transport_new_cmd_obj(cmd);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Determine if the calling TCM fabric module is talking to
+	 * Linux/NET via kernel sockets and needs to allocate a
+	 * struct iovec array to complete the struct se_cmd
+	 */
+	se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
+	if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
+		ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
+		if (ret < 0)
+			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+	}
+
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+		list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+			if (atomic_read(&task->task_sent))
+				continue;
+			if (!dev->transport->map_task_SG)
+				continue;
+
+			ret = dev->transport->map_task_SG(task);
+			if (ret < 0)
+				return ret;
+		}
+	} else {
+		ret = transport_map_control_cmd_to_task(cmd);
+		if (ret < 0)
+			return ret;
+	}
+
+	/*
+	 * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
+	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
+	 * will be added to the struct se_device execution queue after its WRITE
+	 * data has arrived. (ie: It gets handled by the transport processing
+	 * thread a second time)
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		transport_add_tasks_to_state_queue(cmd);
+		return transport_generic_write_pending(cmd);
+	}
+	/*
+	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
+	 * to the execution queue.
+	 */
+	transport_execute_tasks(cmd);
+	return 0;
+}
+
+/*	transport_generic_process_write():
+ *
+ *
+ */
+void transport_generic_process_write(struct se_cmd *cmd)
+{
+#if 0
+	/*
+	 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
+	 * original EDTL
+	 */
+	if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+		if (!T_TASK(cmd)->t_tasks_se_num) {
+			unsigned char *dst, *buf =
+				(unsigned char *)T_TASK(cmd)->t_task_buf;
+
+			dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
+			if (!(dst)) {
+				printk(KERN_ERR "Unable to allocate memory for"
+						" WRITE underflow\n");
+				transport_generic_request_failure(cmd, NULL,
+					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+				return;
+			}
+			memcpy(dst, buf, cmd->cmd_spdtl);
+
+			kfree(T_TASK(cmd)->t_task_buf);
+			T_TASK(cmd)->t_task_buf = dst;
+		} else {
+			struct scatterlist *sg =
+				(struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
+			struct scatterlist *orig_sg;
+
+			orig_sg = kzalloc(sizeof(struct scatterlist) *
+					T_TASK(cmd)->t_tasks_se_num,
+					GFP_KERNEL))) {
+			if (!(orig_sg)) {
+				printk(KERN_ERR "Unable to allocate memory"
+						" for WRITE underflow\n");
+				transport_generic_request_failure(cmd, NULL,
+					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+				return;
+			}
+
+			memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
+					sizeof(struct scatterlist) *
+					T_TASK(cmd)->t_tasks_se_num);
+
+			cmd->data_length = cmd->cmd_spdtl;
+			/*
+			 * FIXME, clear out original struct se_task and state
+			 * information.
+			 */
+			if (transport_generic_new_cmd(cmd) < 0) {
+				transport_generic_request_failure(cmd, NULL,
+					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+				kfree(orig_sg);
+				return;
+			}
+
+			transport_memcpy_write_sg(cmd, orig_sg);
+		}
+	}
+#endif
+	transport_execute_tasks(cmd);
+}
+EXPORT_SYMBOL(transport_generic_process_write);
+
+/*	transport_generic_write_pending():
+ *
+ *
+ */
+static int transport_generic_write_pending(struct se_cmd *cmd)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	cmd->t_state = TRANSPORT_WRITE_PENDING;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * For the TCM control CDBs using a contiguous buffer, do the memcpy
+	 * from the passed Linux/SCSI struct scatterlist located at
+	 * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
+	 * T_TASK(se_cmd)->t_task_buf.
+	 */
+	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+		transport_memcpy_read_contig(cmd,
+				T_TASK(cmd)->t_task_buf,
+				T_TASK(cmd)->t_task_pt_sgl);
+	/*
+	 * Clear the se_cmd for WRITE_PENDING status in order to set
+	 * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
+	 * can be called from HW target mode interrupt code.  This is safe
+	 * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
+	 * because the se_cmd->se_lun pointer is not being cleared.
+	 */
+	transport_cmd_check_stop(cmd, 1, 0);
+
+	/*
+	 * Call the fabric write_pending function here to let the
+	 * frontend know that WRITE buffers are ready.
+	 */
+	ret = CMD_TFO(cmd)->write_pending(cmd);
+	if (ret < 0)
+		return ret;
+
+	return PYX_TRANSPORT_WRITE_PENDING;
+}
+
+/*	transport_release_cmd_to_pool():
+ *
+ *
+ */
+void transport_release_cmd_to_pool(struct se_cmd *cmd)
+{
+	BUG_ON(!T_TASK(cmd));
+	BUG_ON(!CMD_TFO(cmd));
+
+	transport_free_se_cmd(cmd);
+	CMD_TFO(cmd)->release_cmd_to_pool(cmd);
+}
+EXPORT_SYMBOL(transport_release_cmd_to_pool);
+
+/*	transport_generic_free_cmd():
+ *
+ *	Called from processing frontend to release storage engine resources
+ */
+void transport_generic_free_cmd(
+	struct se_cmd *cmd,
+	int wait_for_tasks,
+	int release_to_pool,
+	int session_reinstatement)
+{
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
+		transport_release_cmd_to_pool(cmd);
+	else {
+		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
+
+		if (SE_LUN(cmd)) {
+#if 0
+			printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
+				" SE_LUN(cmd)\n", cmd,
+				CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+			transport_lun_remove_cmd(cmd);
+		}
+
+		if (wait_for_tasks && cmd->transport_wait_for_tasks)
+			cmd->transport_wait_for_tasks(cmd, 0, 0);
+
+		transport_generic_remove(cmd, release_to_pool,
+				session_reinstatement);
+	}
+}
+EXPORT_SYMBOL(transport_generic_free_cmd);
+
+static void transport_nop_wait_for_tasks(
+	struct se_cmd *cmd,
+	int remove_cmd,
+	int session_reinstatement)
+{
+	return;
+}
+
+/*	transport_lun_wait_for_tasks():
+ *
+ *	Called from ConfigFS context to stop the passed struct se_cmd to allow
+ *	an struct se_lun to be successfully shutdown.
+ */
+static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
+{
+	unsigned long flags;
+	int ret;
+	/*
+	 * If the frontend has already requested this struct se_cmd to
+	 * be stopped, we can safely ignore this struct se_cmd.
+	 */
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+		atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+		DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
+			" TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		transport_cmd_check_stop(cmd, 1, 0);
+		return -1;
+	}
+	atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+	ret = transport_stop_tasks_for_cmd(cmd);
+
+	DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
+			" %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
+	if (!ret) {
+		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+				CMD_TFO(cmd)->get_task_tag(cmd));
+		wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+				CMD_TFO(cmd)->get_task_tag(cmd));
+	}
+	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+	return 0;
+}
+
+/* #define DEBUG_CLEAR_LUN */
+#ifdef DEBUG_CLEAR_LUN
+#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CLEAR_L(x...)
+#endif
+
+static void __transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+	struct se_cmd *cmd = NULL;
+	unsigned long lun_flags, cmd_flags;
+	/*
+	 * Do exception processing and return CHECK_CONDITION status to the
+	 * Initiator Port.
+	 */
+	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+	while (!list_empty_careful(&lun->lun_cmd_list)) {
+		cmd = list_entry(lun->lun_cmd_list.next,
+			struct se_cmd, se_lun_list);
+		list_del(&cmd->se_lun_list);
+
+		if (!(T_TASK(cmd))) {
+			printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
+				"[i,t]_state: %u/%u\n",
+				CMD_TFO(cmd)->get_task_tag(cmd),
+				CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+			BUG();
+		}
+		atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+		/*
+		 * This will notify iscsi_target_transport.c:
+		 * transport_cmd_check_stop() that a LUN shutdown is in
+		 * progress for the iscsi_cmd_t.
+		 */
+		spin_lock(&T_TASK(cmd)->t_state_lock);
+		DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
+			"_lun_stop for  ITT: 0x%08x\n",
+			SE_LUN(cmd)->unpacked_lun,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+		atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
+		spin_unlock(&T_TASK(cmd)->t_state_lock);
+
+		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+
+		if (!(SE_LUN(cmd))) {
+			printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
+				CMD_TFO(cmd)->get_task_tag(cmd),
+				CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+			BUG();
+		}
+		/*
+		 * If the Storage engine still owns the iscsi_cmd_t, determine
+		 * and/or stop its context.
+		 */
+		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
+			"_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
+			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+			continue;
+		}
+
+		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+			"_wait_for_tasks(): SUCCESS\n",
+			SE_LUN(cmd)->unpacked_lun,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+		if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+			goto check_cond;
+		}
+		atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+		transport_all_task_dev_remove_state(cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+
+		transport_free_dev_tasks(cmd);
+		/*
+		 * The Storage engine stopped this struct se_cmd before it was
+		 * send to the fabric frontend for delivery back to the
+		 * Initiator Node.  Return this SCSI CDB back with an
+		 * CHECK_CONDITION status.
+		 */
+check_cond:
+		transport_send_check_condition_and_sense(cmd,
+				TCM_NON_EXISTENT_LUN, 0);
+		/*
+		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
+		 * be released, notify the waiting thread now that LU has
+		 * finished accessing it.
+		 */
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+		if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
+			DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+				" struct se_cmd: %p ITT: 0x%08x\n",
+				lun->unpacked_lun,
+				cmd, CMD_TFO(cmd)->get_task_tag(cmd));
+
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					cmd_flags);
+			transport_cmd_check_stop(cmd, 1, 0);
+			complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+			continue;
+		}
+		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+			lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
+
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+	}
+	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+}
+
+static int transport_clear_lun_thread(void *p)
+{
+	struct se_lun *lun = (struct se_lun *)p;
+
+	__transport_clear_lun_from_sessions(lun);
+	complete(&lun->lun_shutdown_comp);
+
+	return 0;
+}
+
+int transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+	struct task_struct *kt;
+
+	kt = kthread_run(transport_clear_lun_thread, (void *)lun,
+			"tcm_cl_%u", lun->unpacked_lun);
+	if (IS_ERR(kt)) {
+		printk(KERN_ERR "Unable to start clear_lun thread\n");
+		return -1;
+	}
+	wait_for_completion(&lun->lun_shutdown_comp);
+
+	return 0;
+}
+
+/*	transport_generic_wait_for_tasks():
+ *
+ *	Called from frontend or passthrough context to wait for storage engine
+ *	to pause and/or release frontend generated struct se_cmd.
+ */
+static void transport_generic_wait_for_tasks(
+	struct se_cmd *cmd,
+	int remove_cmd,
+	int session_reinstatement)
+{
+	unsigned long flags;
+
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
+		return;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * If we are already stopped due to an external event (ie: LUN shutdown)
+	 * sleep until the connection can have the passed struct se_cmd back.
+	 * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
+	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
+	 * has completed its operation on the struct se_cmd.
+	 */
+	if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+
+		DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
+			" wait_for_completion(&T_TASK(cmd)transport_lun_fe"
+			"_stop_comp); for ITT: 0x%08x\n",
+			CMD_TFO(cmd)->get_task_tag(cmd));
+		/*
+		 * There is a special case for WRITES where a FE exception +
+		 * LUN shutdown means ConfigFS context is still sleeping on
+		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
+		 * We go ahead and up transport_lun_stop_comp just to be sure
+		 * here.
+		 */
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		complete(&T_TASK(cmd)->transport_lun_stop_comp);
+		wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+		transport_all_task_dev_remove_state(cmd);
+		/*
+		 * At this point, the frontend who was the originator of this
+		 * struct se_cmd, now owns the structure and can be released through
+		 * normal means below.
+		 */
+		DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
+			" wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
+			"stop_comp); for ITT: 0x%08x\n",
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+	}
+	if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
+	     atomic_read(&T_TASK(cmd)->t_transport_aborted))
+		goto remove;
+
+	atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
+
+	DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
+		" = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+		CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+		cmd->deferred_t_state);
+
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+	wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+	atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
+
+	DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
+		"&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
+		CMD_TFO(cmd)->get_task_tag(cmd));
+remove:
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	if (!remove_cmd)
+		return;
+
+	transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
+}
+
+static int transport_get_sense_codes(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	*asc = cmd->scsi_asc;
+	*ascq = cmd->scsi_ascq;
+
+	return 0;
+}
+
+static int transport_set_sense_codes(
+	struct se_cmd *cmd,
+	u8 asc,
+	u8 ascq)
+{
+	cmd->scsi_asc = asc;
+	cmd->scsi_ascq = ascq;
+
+	return 0;
+}
+
+int transport_send_check_condition_and_sense(
+	struct se_cmd *cmd,
+	u8 reason,
+	int from_transport)
+{
+	unsigned char *buffer = cmd->sense_buffer;
+	unsigned long flags;
+	int offset;
+	u8 asc = 0, ascq = 0;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return 0;
+	}
+	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	if (!reason && from_transport)
+		goto after_reason;
+
+	if (!from_transport)
+		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+	/*
+	 * Data Segment and SenseLength of the fabric response PDU.
+	 *
+	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
+	 * from include/scsi/scsi_cmnd.h
+	 */
+	offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+				TRANSPORT_SENSE_BUFFER);
+	/*
+	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
+	 * SENSE KEY values from include/scsi/scsi.h
+	 */
+	switch (reason) {
+	case TCM_NON_EXISTENT_LUN:
+	case TCM_UNSUPPORTED_SCSI_OPCODE:
+	case TCM_SECTOR_COUNT_TOO_MANY:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID COMMAND OPERATION CODE */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
+		break;
+	case TCM_UNKNOWN_MODE_PAGE:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID FIELD IN CDB */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+		break;
+	case TCM_CHECK_CONDITION_ABORT_CMD:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* BUS DEVICE RESET FUNCTION OCCURRED */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
+		break;
+	case TCM_INCORRECT_AMOUNT_OF_DATA:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* WRITE ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+		/* NOT ENOUGH UNSOLICITED DATA */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
+		break;
+	case TCM_INVALID_CDB_FIELD:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* INVALID FIELD IN CDB */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+		break;
+	case TCM_INVALID_PARAMETER_LIST:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* INVALID FIELD IN PARAMETER LIST */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
+		break;
+	case TCM_UNEXPECTED_UNSOLICITED_DATA:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* WRITE ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+		/* UNEXPECTED_UNSOLICITED_DATA */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
+		break;
+	case TCM_SERVICE_CRC_ERROR:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* PROTOCOL SERVICE CRC ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
+		/* N/A */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
+		break;
+	case TCM_SNACK_REJECTED:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* READ ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
+		/* FAILED RETRANSMISSION REQUEST */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
+		break;
+	case TCM_WRITE_PROTECTED:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* DATA PROTECT */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+		/* WRITE PROTECTED */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+		break;
+	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* UNIT ATTENTION */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+		break;
+	case TCM_CHECK_CONDITION_NOT_READY:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* Not Ready */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+		transport_get_sense_codes(cmd, &asc, &ascq);
+		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+		break;
+	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+	default:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* LOGICAL UNIT COMMUNICATION FAILURE */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
+		break;
+	}
+	/*
+	 * This code uses linux/include/scsi/scsi.h SAM status codes!
+	 */
+	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+	/*
+	 * Automatically padded, this value is encoded in the fabric's
+	 * data_length response PDU containing the SCSI defined sense data.
+	 */
+	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
+
+after_reason:
+	CMD_TFO(cmd)->queue_status(cmd);
+	return 0;
+}
+EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+	int ret = 0;
+
+	if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
+		if (!(send_status) ||
+		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+			return 1;
+#if 0
+		printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+			" status for CDB: 0x%02x ITT: 0x%08x\n",
+			T_TASK(cmd)->t_task_cdb[0],
+			CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+		CMD_TFO(cmd)->queue_status(cmd);
+		ret = 1;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(transport_check_aborted_status);
+
+void transport_send_task_abort(struct se_cmd *cmd)
+{
+	/*
+	 * If there are still expected incoming fabric WRITEs, we wait
+	 * until until they have completed before sending a TASK_ABORTED
+	 * response.  This response with TASK_ABORTED status will be
+	 * queued back to fabric module by transport_check_aborted_status().
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
+			atomic_inc(&T_TASK(cmd)->t_transport_aborted);
+			smp_mb__after_atomic_inc();
+			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+			transport_new_cmd_failure(cmd);
+			return;
+		}
+	}
+	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+#if 0
+	printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+		" ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
+		CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+	CMD_TFO(cmd)->queue_status(cmd);
+}
+
+/*	transport_generic_do_tmr():
+ *
+ *
+ */
+int transport_generic_do_tmr(struct se_cmd *cmd)
+{
+	struct se_cmd *ref_cmd;
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_tmr_req *tmr = cmd->se_tmr_req;
+	int ret;
+
+	switch (tmr->function) {
+	case ABORT_TASK:
+		ref_cmd = tmr->ref_cmd;
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	case ABORT_TASK_SET:
+	case CLEAR_ACA:
+	case CLEAR_TASK_SET:
+		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+		break;
+	case LUN_RESET:
+		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
+		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
+					 TMR_FUNCTION_REJECTED;
+		break;
+#if 0
+	case TARGET_WARM_RESET:
+		transport_generic_host_reset(dev->se_hba);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	case TARGET_COLD_RESET:
+		transport_generic_host_reset(dev->se_hba);
+		transport_generic_cold_reset(dev->se_hba);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+#endif
+	default:
+		printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+				tmr->function);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	}
+
+	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+	CMD_TFO(cmd)->queue_tm_rsp(cmd);
+
+	transport_cmd_check_stop(cmd, 2, 0);
+	return 0;
+}
+
+/*
+ *	Called with spin_lock_irq(&dev->execute_task_lock); held
+ *
+ */
+static struct se_task *
+transport_get_task_from_state_list(struct se_device *dev)
+{
+	struct se_task *task;
+
+	if (list_empty(&dev->state_task_list))
+		return NULL;
+
+	list_for_each_entry(task, &dev->state_task_list, t_state_list)
+		break;
+
+	list_del(&task->t_state_list);
+	atomic_set(&task->task_state_active, 0);
+
+	return task;
+}
+
+static void transport_processing_shutdown(struct se_device *dev)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr;
+	struct se_task *task;
+	u8 state;
+	unsigned long flags;
+	/*
+	 * Empty the struct se_device's struct se_task state list.
+	 */
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	while ((task = transport_get_task_from_state_list(dev))) {
+		if (!(TASK_CMD(task))) {
+			printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+			continue;
+		}
+		cmd = TASK_CMD(task);
+
+		if (!T_TASK(cmd)) {
+			printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+				" %p ITT: 0x%08x\n", task, cmd,
+				CMD_TFO(cmd)->get_task_tag(cmd));
+			continue;
+		}
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+		DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
+			" i_state/def_i_state: %d/%d, t_state/def_t_state:"
+			" %d/%d cdb: 0x%02x\n", cmd, task,
+			CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
+			CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
+			cmd->t_state, cmd->deferred_t_state,
+			T_TASK(cmd)->t_task_cdb[0]);
+		DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+			" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
+			" t_transport_stop: %d t_transport_sent: %d\n",
+			CMD_TFO(cmd)->get_task_tag(cmd),
+			T_TASK(cmd)->t_task_cdbs,
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+			atomic_read(&T_TASK(cmd)->t_transport_active),
+			atomic_read(&T_TASK(cmd)->t_transport_stop),
+			atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+		if (atomic_read(&task->task_active)) {
+			atomic_set(&task->task_stop, 1);
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+
+			DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+				" %p\n", task, dev);
+			wait_for_completion(&task->task_stop_comp);
+			DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+				task, dev);
+
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+			atomic_set(&task->task_active, 0);
+			atomic_set(&task->task_stop, 0);
+		} else {
+			if (atomic_read(&task->task_execute_queue) != 0)
+				transport_remove_task_from_execute_queue(task, dev);
+		}
+		__transport_stop_task_timer(task, &flags);
+
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+			spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+
+			DEBUG_DO("Skipping task: %p, dev: %p for"
+				" t_task_cdbs_ex_left: %d\n", task, dev,
+				atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+
+		if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+			DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+					" %p\n", task, dev);
+
+			if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+				spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+				transport_send_check_condition_and_sense(
+					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
+					0);
+				transport_remove_cmd_from_queue(cmd,
+					SE_DEV(cmd)->dev_queue_obj);
+
+				transport_lun_remove_cmd(cmd);
+				transport_cmd_check_stop(cmd, 1, 0);
+			} else {
+				spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+
+				transport_remove_cmd_from_queue(cmd,
+					SE_DEV(cmd)->dev_queue_obj);
+
+				transport_lun_remove_cmd(cmd);
+
+				if (transport_cmd_check_stop(cmd, 1, 0))
+					transport_generic_remove(cmd, 0, 0);
+			}
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+		DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+				task, dev);
+
+		if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+			transport_send_check_condition_and_sense(cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+			transport_remove_cmd_from_queue(cmd,
+				SE_DEV(cmd)->dev_queue_obj);
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop(cmd, 1, 0);
+		} else {
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+
+			transport_remove_cmd_from_queue(cmd,
+				SE_DEV(cmd)->dev_queue_obj);
+			transport_lun_remove_cmd(cmd);
+
+			if (transport_cmd_check_stop(cmd, 1, 0))
+				transport_generic_remove(cmd, 0, 0);
+		}
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+	/*
+	 * Empty the struct se_device's struct se_cmd list.
+	 */
+	spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
+		spin_unlock_irqrestore(
+				&dev->dev_queue_obj->cmd_queue_lock, flags);
+		cmd = (struct se_cmd *)qr->cmd;
+		state = qr->state;
+		kfree(qr);
+
+		DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
+				cmd, state);
+
+		if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+			transport_send_check_condition_and_sense(cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop(cmd, 1, 0);
+		} else {
+			transport_lun_remove_cmd(cmd);
+			if (transport_cmd_check_stop(cmd, 1, 0))
+				transport_generic_remove(cmd, 0, 0);
+		}
+		spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+}
+
+/*	transport_processing_thread():
+ *
+ *
+ */
+static int transport_processing_thread(void *param)
+{
+	int ret, t_state;
+	struct se_cmd *cmd;
+	struct se_device *dev = (struct se_device *) param;
+	struct se_queue_req *qr;
+
+	set_user_nice(current, -20);
+
+	while (!kthread_should_stop()) {
+		ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
+				atomic_read(&dev->dev_queue_obj->queue_cnt) ||
+				kthread_should_stop());
+		if (ret < 0)
+			goto out;
+
+		spin_lock_irq(&dev->dev_status_lock);
+		if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
+			spin_unlock_irq(&dev->dev_status_lock);
+			transport_processing_shutdown(dev);
+			continue;
+		}
+		spin_unlock_irq(&dev->dev_status_lock);
+
+get_cmd:
+		__transport_execute_tasks(dev);
+
+		qr = transport_get_qr_from_queue(dev->dev_queue_obj);
+		if (!(qr))
+			continue;
+
+		cmd = (struct se_cmd *)qr->cmd;
+		t_state = qr->state;
+		kfree(qr);
+
+		switch (t_state) {
+		case TRANSPORT_NEW_CMD_MAP:
+			if (!(CMD_TFO(cmd)->new_cmd_map)) {
+				printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
+					" NULL for TRANSPORT_NEW_CMD_MAP\n");
+				BUG();
+			}
+			ret = CMD_TFO(cmd)->new_cmd_map(cmd);
+			if (ret < 0) {
+				cmd->transport_error_status = ret;
+				transport_generic_request_failure(cmd, NULL,
+						0, (cmd->data_direction !=
+						    DMA_TO_DEVICE));
+				break;
+			}
+			/* Fall through */
+		case TRANSPORT_NEW_CMD:
+			ret = transport_generic_new_cmd(cmd);
+			if (ret < 0) {
+				cmd->transport_error_status = ret;
+				transport_generic_request_failure(cmd, NULL,
+					0, (cmd->data_direction !=
+					 DMA_TO_DEVICE));
+			}
+			break;
+		case TRANSPORT_PROCESS_WRITE:
+			transport_generic_process_write(cmd);
+			break;
+		case TRANSPORT_COMPLETE_OK:
+			transport_stop_all_task_timers(cmd);
+			transport_generic_complete_ok(cmd);
+			break;
+		case TRANSPORT_REMOVE:
+			transport_generic_remove(cmd, 1, 0);
+			break;
+		case TRANSPORT_PROCESS_TMR:
+			transport_generic_do_tmr(cmd);
+			break;
+		case TRANSPORT_COMPLETE_FAILURE:
+			transport_generic_request_failure(cmd, NULL, 1, 1);
+			break;
+		case TRANSPORT_COMPLETE_TIMEOUT:
+			transport_stop_all_task_timers(cmd);
+			transport_generic_request_timeout(cmd);
+			break;
+		default:
+			printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+				" %d for ITT: 0x%08x i_state: %d on SE LUN:"
+				" %u\n", t_state, cmd->deferred_t_state,
+				CMD_TFO(cmd)->get_task_tag(cmd),
+				CMD_TFO(cmd)->get_cmd_state(cmd),
+				SE_LUN(cmd)->unpacked_lun);
+			BUG();
+		}
+
+		goto get_cmd;
+	}
+
+out:
+	transport_release_all_cmds(dev);
+	dev->process_thread = NULL;
+	return 0;
+}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
new file mode 100644
index 0000000..a2ef346
--- /dev/null
+++ b/drivers/target/target_core_ua.c
@@ -0,0 +1,332 @@
+/*******************************************************************************
+ * Filename: target_core_ua.c
+ *
+ * This file contains logic for SPC-3 Unit Attention emulation
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+int core_scsi3_ua_check(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+
+	if (!(sess))
+		return 0;
+
+	nacl = sess->se_node_acl;
+	if (!(nacl))
+		return 0;
+
+	deve = &nacl->device_list[cmd->orig_fe_lun];
+	if (!(atomic_read(&deve->ua_count)))
+		return 0;
+	/*
+	 * From sam4r14, section 5.14 Unit attention condition:
+	 *
+	 * a) if an INQUIRY command enters the enabled command state, the
+	 *    device server shall process the INQUIRY command and shall neither
+	 *    report nor clear any unit attention condition;
+	 * b) if a REPORT LUNS command enters the enabled command state, the
+	 *    device server shall process the REPORT LUNS command and shall not
+	 *    report any unit attention condition;
+	 * e) if a REQUEST SENSE command enters the enabled command state while
+	 *    a unit attention condition exists for the SCSI initiator port
+	 *    associated with the I_T nexus on which the REQUEST SENSE command
+	 *    was received, then the device server shall process the command
+	 *    and either:
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+		return 0;
+	default:
+		return -1;
+	}
+
+	return -1;
+}
+
+int core_scsi3_ua_allocate(
+	struct se_node_acl *nacl,
+	u32 unpacked_lun,
+	u8 asc,
+	u8 ascq)
+{
+	struct se_dev_entry *deve;
+	struct se_ua *ua, *ua_p, *ua_tmp;
+	/*
+	 * PASSTHROUGH OPS
+	 */
+	if (!(nacl))
+		return -1;
+
+	ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
+	if (!(ua)) {
+		printk(KERN_ERR "Unable to allocate struct se_ua\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&ua->ua_dev_list);
+	INIT_LIST_HEAD(&ua->ua_nacl_list);
+
+	ua->ua_nacl = nacl;
+	ua->ua_asc = asc;
+	ua->ua_ascq = ascq;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[unpacked_lun];
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * Do not report the same UNIT ATTENTION twice..
+		 */
+		if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
+			spin_unlock(&deve->ua_lock);
+			spin_unlock_irq(&nacl->device_list_lock);
+			kmem_cache_free(se_ua_cache, ua);
+			return 0;
+		}
+		/*
+		 * Attach the highest priority Unit Attention to
+		 * the head of the list following sam4r14,
+		 * Section 5.14 Unit Attention Condition:
+		 *
+		 * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
+		 * POWER ON OCCURRED or
+		 * DEVICE INTERNAL RESET
+		 * SCSI BUS RESET OCCURRED or
+		 * MICROCODE HAS BEEN CHANGED or
+		 * protocol specific
+		 * BUS DEVICE RESET FUNCTION OCCURRED
+		 * I_T NEXUS LOSS OCCURRED
+		 * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
+		 * all others                                    Lowest
+		 *
+		 * Each of the ASCQ codes listed above are defined in
+		 * the 29h ASC family, see spc4r17 Table D.1
+		 */
+		if (ua_p->ua_asc == 0x29) {
+			if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
+				list_add(&ua->ua_nacl_list,
+						&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else if (ua_p->ua_asc == 0x2a) {
+			/*
+			 * Incoming Family 29h ASCQ codes will override
+			 * Family 2AHh ASCQ codes for Unit Attention condition.
+			 */
+			if ((asc == 0x29) || (ascq > ua_p->ua_asc))
+				list_add(&ua->ua_nacl_list,
+					&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else
+			list_add_tail(&ua->ua_nacl_list,
+				&deve->ua_list);
+		spin_unlock(&deve->ua_lock);
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		atomic_inc(&deve->ua_count);
+		smp_mb__after_atomic_inc();
+		return 0;
+	}
+	list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+		" 0x%02x, ASCQ: 0x%02x\n",
+		TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
+		asc, ascq);
+
+	atomic_inc(&deve->ua_count);
+	smp_mb__after_atomic_inc();
+	return 0;
+}
+
+void core_scsi3_ua_release_all(
+	struct se_dev_entry *deve)
+{
+	struct se_ua *ua, *ua_p;
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+}
+
+void core_scsi3_ua_for_check_condition(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!(sess))
+		return;
+
+	nacl = sess->se_node_acl;
+	if (!(nacl))
+		return;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[cmd->orig_fe_lun];
+	if (!(atomic_read(&deve->ua_count))) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
+	 * sense data for the received CDB.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * For ua_intlck_ctrl code not equal to 00b, only report the
+		 * highest priority UNIT_ATTENTION and ASC/ASCQ without
+		 * clearing it.
+		 */
+		if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			break;
+		}
+		/*
+		 * Otherwise for the default 00b, release the UNIT ATTENTION
+		 * condition.  Return the ASC/ASCQ of the higest priority UA
+		 * (head of the list) in the outgoing CHECK_CONDITION + sense.
+		 */
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
+		" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
+		" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
+		TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+		(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+		"Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
+		cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
+}
+
+int core_scsi3_ua_clear_for_request_sense(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!(sess))
+		return -1;
+
+	nacl = sess->se_node_acl;
+	if (!(nacl))
+		return -1;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[cmd->orig_fe_lun];
+	if (!(atomic_read(&deve->ua_count))) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -1;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list.  The First (and hence highest priority)
+	 * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
+	 * matching struct se_lun.
+	 *
+	 * Once the returning ASC/ASCQ values are set, we go ahead and
+	 * release all of the Unit Attention conditions for the assoicated
+	 * struct se_lun.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
+		" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
+		" ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+		cmd->orig_fe_lun, *asc, *ascq);
+
+	return (head) ? -1 : 0;
+}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
new file mode 100644
index 0000000..6e6b034
--- /dev/null
+++ b/drivers/target/target_core_ua.h
@@ -0,0 +1,36 @@
+#ifndef TARGET_CORE_UA_H
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED	0x00
+#define ASCQ_29H_POWER_ON_OCCURRED				0x01
+#define ASCQ_29H_SCSI_BUS_RESET_OCCURED				0x02
+#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED		0x03
+#define ASCQ_29H_DEVICE_INTERNAL_RESET				0x04
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED	0x05
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD		0x06
+#define ASCQ_29H_NEXUS_LOSS_OCCURRED				0x07
+
+#define ASCQ_2AH_PARAMETERS_CHANGED				0x00
+#define ASCQ_2AH_MODE_PARAMETERS_CHANGED			0x01
+#define ASCQ_2AH_LOG_PARAMETERS_CHANGED				0x02
+#define ASCQ_2AH_RESERVATIONS_PREEMPTED				0x03
+#define ASCQ_2AH_RESERVATIONS_RELEASED				0x04
+#define ASCQ_2AH_REGISTRATIONS_PREEMPTED			0x05
+#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED		0x06
+#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_PRIORITY_CHANGED				0x08
+
+#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS		0x09
+
+extern struct kmem_cache *se_ua_cache;
+
+extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
+extern void core_scsi3_ua_release_all(struct se_dev_entry *);
+extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
+						u8 *, u8 *);
+
+#endif /* TARGET_CORE_UA_H */
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index 0d236f4..b001019 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -284,12 +284,11 @@
 
 module_param(ixjdebug, int, 0);
 
-static struct pci_device_id ixj_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ixj_pci_tbl) = {
 	{ PCI_VENDOR_ID_QUICKNET, PCI_DEVICE_ID_QUICKNET_XJ,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{ }
 };
-
 MODULE_DEVICE_TABLE(pci, ixj_pci_tbl);
 
 /************************************************************************
@@ -6581,7 +6580,8 @@
 	case IXJCTL_SET_FILTER:
 		if (copy_from_user(&jf, argp, sizeof(jf))) 
 			retval = -EFAULT;
-		retval = ixj_init_filter(j, &jf);
+		else
+			retval = ixj_init_filter(j, &jf);
 		break;
 	case IXJCTL_SET_FILTER_RAW:
 		if (copy_from_user(&jfr, argp, sizeof(jfr))) 
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 13c72c6..713b7ea 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -32,6 +32,8 @@
 #include <linux/thermal.h>
 #include <linux/spinlock.h>
 #include <linux/reboot.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
 
 MODULE_AUTHOR("Zhang Rui");
 MODULE_DESCRIPTION("Generic thermal management sysfs support");
@@ -58,6 +60,8 @@
 static LIST_HEAD(thermal_cdev_list);
 static DEFINE_MUTEX(thermal_list_lock);
 
+static unsigned int thermal_event_seqnum;
+
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
 	int err;
@@ -823,11 +827,8 @@
  * @devdata:	device private data.
  * @ops:		standard thermal cooling devices callbacks.
  */
-struct thermal_cooling_device *thermal_cooling_device_register(char *type,
-							       void *devdata,
-							       struct
-							       thermal_cooling_device_ops
-							       *ops)
+struct thermal_cooling_device *thermal_cooling_device_register(
+     char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
 {
 	struct thermal_cooling_device *cdev;
 	struct thermal_zone_device *pos;
@@ -1048,13 +1049,9 @@
  * section 11.1.5.1 of the ACPI specification 3.0.
  */
 struct thermal_zone_device *thermal_zone_device_register(char *type,
-							 int trips,
-							 void *devdata, struct
-							 thermal_zone_device_ops
-							 *ops, int tc1, int
-							 tc2,
-							 int passive_delay,
-							 int polling_delay)
+	int trips, void *devdata,
+	const struct thermal_zone_device_ops *ops,
+	int tc1, int tc2, int passive_delay, int polling_delay)
 {
 	struct thermal_zone_device *tz;
 	struct thermal_cooling_device *pos;
@@ -1214,6 +1211,103 @@
 
 EXPORT_SYMBOL(thermal_zone_device_unregister);
 
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.name = THERMAL_GENL_FAMILY_NAME,
+	.version = THERMAL_GENL_VERSION,
+	.maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+	.name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
+int generate_netlink_event(u32 orig, enum events event)
+{
+	struct sk_buff *skb;
+	struct nlattr *attr;
+	struct thermal_genl_event *thermal_event;
+	void *msg_header;
+	int size;
+	int result;
+
+	/* allocate memory */
+	size = nla_total_size(sizeof(struct thermal_genl_event)) + \
+				nla_total_size(0);
+
+	skb = genlmsg_new(size, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	/* add the genetlink message header */
+	msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++,
+				 &thermal_event_genl_family, 0,
+				 THERMAL_GENL_CMD_EVENT);
+	if (!msg_header) {
+		nlmsg_free(skb);
+		return -ENOMEM;
+	}
+
+	/* fill the data */
+	attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \
+			sizeof(struct thermal_genl_event));
+
+	if (!attr) {
+		nlmsg_free(skb);
+		return -EINVAL;
+	}
+
+	thermal_event = nla_data(attr);
+	if (!thermal_event) {
+		nlmsg_free(skb);
+		return -EINVAL;
+	}
+
+	memset(thermal_event, 0, sizeof(struct thermal_genl_event));
+
+	thermal_event->orig = orig;
+	thermal_event->event = event;
+
+	/* send multicast genetlink message */
+	result = genlmsg_end(skb, msg_header);
+	if (result < 0) {
+		nlmsg_free(skb);
+		return result;
+	}
+
+	result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
+	if (result)
+		printk(KERN_INFO "failed to send netlink event:%d", result);
+
+	return result;
+}
+EXPORT_SYMBOL(generate_netlink_event);
+
+static int genetlink_init(void)
+{
+	int result;
+
+	result = genl_register_family(&thermal_event_genl_family);
+	if (result)
+		return result;
+
+	result = genl_register_mc_group(&thermal_event_genl_family,
+					&thermal_event_mcgrp);
+	if (result)
+		genl_unregister_family(&thermal_event_genl_family);
+	return result;
+}
+
+static void genetlink_exit(void)
+{
+	genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
 static int __init thermal_init(void)
 {
 	int result = 0;
@@ -1225,6 +1319,7 @@
 		mutex_destroy(&thermal_idr_lock);
 		mutex_destroy(&thermal_list_lock);
 	}
+	result = genetlink_init();
 	return result;
 }
 
@@ -1235,7 +1330,8 @@
 	idr_destroy(&thermal_cdev_idr);
 	mutex_destroy(&thermal_idr_lock);
 	mutex_destroy(&thermal_list_lock);
+	genetlink_exit();
 }
 
-subsys_initcall(thermal_init);
+fs_initcall(thermal_init);
 module_exit(thermal_exit);
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index c43ef48..3962772 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -9,3 +9,5 @@
 obj-$(CONFIG_R3964)		+= n_r3964.o
 
 obj-y				+= vt/
+obj-$(CONFIG_HVC_DRIVER)	+= hvc/
+obj-y				+= serial/
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
new file mode 100644
index 0000000..d79e7e9
--- /dev/null
+++ b/drivers/tty/hvc/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_HVC_CONSOLE)	+= hvc_vio.o hvsi.o
+obj-$(CONFIG_HVC_ISERIES)	+= hvc_iseries.o
+obj-$(CONFIG_HVC_RTAS)		+= hvc_rtas.o
+obj-$(CONFIG_HVC_TILE)		+= hvc_tile.o
+obj-$(CONFIG_HVC_DCC)		+= hvc_dcc.o
+obj-$(CONFIG_HVC_BEAT)		+= hvc_beat.o
+obj-$(CONFIG_HVC_DRIVER)	+= hvc_console.o
+obj-$(CONFIG_HVC_IRQ)		+= hvc_irq.o
+obj-$(CONFIG_HVC_XEN)		+= hvc_xen.o
+obj-$(CONFIG_HVC_IUCV)		+= hvc_iucv.o
+obj-$(CONFIG_HVC_UDBG)		+= hvc_udbg.o
+obj-$(CONFIG_HVCS)		+= hvcs.o
diff --git a/drivers/char/hvc_beat.c b/drivers/tty/hvc/hvc_beat.c
similarity index 100%
rename from drivers/char/hvc_beat.c
rename to drivers/tty/hvc/hvc_beat.c
diff --git a/drivers/char/hvc_console.c b/drivers/tty/hvc/hvc_console.c
similarity index 100%
rename from drivers/char/hvc_console.c
rename to drivers/tty/hvc/hvc_console.c
diff --git a/drivers/char/hvc_console.h b/drivers/tty/hvc/hvc_console.h
similarity index 100%
rename from drivers/char/hvc_console.h
rename to drivers/tty/hvc/hvc_console.h
diff --git a/drivers/char/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
similarity index 100%
rename from drivers/char/hvc_dcc.c
rename to drivers/tty/hvc/hvc_dcc.c
diff --git a/drivers/char/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
similarity index 100%
rename from drivers/char/hvc_irq.c
rename to drivers/tty/hvc/hvc_irq.c
diff --git a/drivers/char/hvc_iseries.c b/drivers/tty/hvc/hvc_iseries.c
similarity index 100%
rename from drivers/char/hvc_iseries.c
rename to drivers/tty/hvc/hvc_iseries.c
diff --git a/drivers/char/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
similarity index 100%
rename from drivers/char/hvc_iucv.c
rename to drivers/tty/hvc/hvc_iucv.c
diff --git a/drivers/char/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
similarity index 100%
rename from drivers/char/hvc_rtas.c
rename to drivers/tty/hvc/hvc_rtas.c
diff --git a/drivers/char/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
similarity index 100%
rename from drivers/char/hvc_tile.c
rename to drivers/tty/hvc/hvc_tile.c
diff --git a/drivers/char/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
similarity index 100%
rename from drivers/char/hvc_udbg.c
rename to drivers/tty/hvc/hvc_udbg.c
diff --git a/drivers/char/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
similarity index 98%
rename from drivers/char/hvc_vio.c
rename to drivers/tty/hvc/hvc_vio.c
index 27370e9..5e2f52b 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -39,7 +39,7 @@
 
 #include "hvc_console.h"
 
-char hvc_driver_name[] = "hvc_console";
+static const char hvc_driver_name[] = "hvc_console";
 
 static struct vio_device_id hvc_driver_table[] __devinitdata = {
 	{"serial", "hvterm1"},
diff --git a/drivers/char/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
similarity index 100%
rename from drivers/char/hvc_xen.c
rename to drivers/tty/hvc/hvc_xen.c
diff --git a/drivers/char/hvcs.c b/drivers/tty/hvc/hvcs.c
similarity index 100%
rename from drivers/char/hvcs.c
rename to drivers/tty/hvc/hvcs.c
diff --git a/drivers/char/hvsi.c b/drivers/tty/hvc/hvsi.c
similarity index 100%
rename from drivers/char/hvsi.c
rename to drivers/tty/hvc/hvsi.c
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 44b8412..aa2e5d3 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2414,6 +2414,7 @@
 
 	gsm->initiator = c->initiator;
 	gsm->mru = c->mru;
+	gsm->mtu = c->mtu;
 	gsm->encoding = c->encapsulation;
 	gsm->adaption = c->adaption;
 	gsm->n2 = c->n2;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 47d3228..52fc0c9 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -581,8 +581,9 @@
 			   __u8 __user *buf, size_t nr)
 {
 	struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
-	int ret;
+	int ret = 0;
 	struct n_hdlc_buf *rbuf;
+	DECLARE_WAITQUEUE(wait, current);
 
 	if (debuglevel >= DEBUG_LEVEL_INFO)	
 		printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__);
@@ -598,57 +599,55 @@
 		return -EFAULT;
 	}
 
-	tty_lock();
+	add_wait_queue(&tty->read_wait, &wait);
 
 	for (;;) {
 		if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
-			tty_unlock();
-			return -EIO;
+			ret = -EIO;
+			break;
 		}
+		if (tty_hung_up_p(file))
+			break;
 
-		n_hdlc = tty2n_hdlc (tty);
-		if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
-			 tty != n_hdlc->tty) {
-			tty_unlock();
-			return 0;
-		}
+		set_current_state(TASK_INTERRUPTIBLE);
 
 		rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
-		if (rbuf)
+		if (rbuf) {
+			if (rbuf->count > nr) {
+				/* too large for caller's buffer */
+				ret = -EOVERFLOW;
+			} else {
+				if (copy_to_user(buf, rbuf->buf, rbuf->count))
+					ret = -EFAULT;
+				else
+					ret = rbuf->count;
+			}
+
+			if (n_hdlc->rx_free_buf_list.count >
+			    DEFAULT_RX_BUF_COUNT)
+				kfree(rbuf);
+			else
+				n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
 			break;
+		}
 			
 		/* no data */
 		if (file->f_flags & O_NONBLOCK) {
-			tty_unlock();
-			return -EAGAIN;
+			ret = -EAGAIN;
+			break;
 		}
-			
-		interruptible_sleep_on (&tty->read_wait);
+
+		schedule();
+
 		if (signal_pending(current)) {
-			tty_unlock();
-			return -EINTR;
+			ret = -EINTR;
+			break;
 		}
 	}
-		
-	if (rbuf->count > nr)
-		/* frame too large for caller's buffer (discard frame) */
-		ret = -EOVERFLOW;
-	else {
-		/* Copy the data to the caller's buffer */
-		if (copy_to_user(buf, rbuf->buf, rbuf->count))
-			ret = -EFAULT;
-		else
-			ret = rbuf->count;
-	}
-	
-	/* return HDLC buffer to free list unless the free list */
-	/* count has exceeded the default value, in which case the */
-	/* buffer is freed back to the OS to conserve memory */
-	if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
-		kfree(rbuf);
-	else	
-		n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf);
-	tty_unlock();
+
+	remove_wait_queue(&tty->read_wait, &wait);
+	__set_current_state(TASK_RUNNING);
+
 	return ret;
 	
 }	/* end of n_hdlc_tty_read() */
@@ -691,14 +690,15 @@
 		count = maxframe;
 	}
 	
-	tty_lock();
-
 	add_wait_queue(&tty->write_wait, &wait);
-	set_current_state(TASK_INTERRUPTIBLE);
+
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
 	
-	/* Allocate transmit buffer */
-	/* sleep until transmit buffer available */		
-	while (!(tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list))) {
+		tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
+		if (tbuf)
+			break;
+
 		if (file->f_flags & O_NONBLOCK) {
 			error = -EAGAIN;
 			break;
@@ -719,7 +719,7 @@
 		}
 	}
 
-	set_current_state(TASK_RUNNING);
+	__set_current_state(TASK_RUNNING);
 	remove_wait_queue(&tty->write_wait, &wait);
 
 	if (!error) {		
@@ -731,7 +731,7 @@
 		n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
 		n_hdlc_send_frames(n_hdlc,tty);
 	}
-	tty_unlock();
+
 	return error;
 	
 }	/* end of n_hdlc_tty_write() */
diff --git a/drivers/serial/21285.c b/drivers/tty/serial/21285.c
similarity index 100%
rename from drivers/serial/21285.c
rename to drivers/tty/serial/21285.c
diff --git a/drivers/serial/68328serial.c b/drivers/tty/serial/68328serial.c
similarity index 98%
rename from drivers/serial/68328serial.c
rename to drivers/tty/serial/68328serial.c
index be0ebce..de0160e 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -262,7 +262,7 @@
 
 static void receive_chars(struct m68k_serial *info, unsigned short rx)
 {
-	struct tty_struct *tty = info->port.tty;
+	struct tty_struct *tty = info->tty;
 	m68328_uart *uart = &uart_addr[info->line];
 	unsigned char ch, flag;
 
@@ -329,7 +329,7 @@
 		goto clear_and_return;
 	}
 
-	if((info->xmit_cnt <= 0) || info->port.tty->stopped) {
+	if((info->xmit_cnt <= 0) || info->tty->stopped) {
 		/* That's peculiar... TX ints off */
 		uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
 		goto clear_and_return;
@@ -383,7 +383,7 @@
 	struct m68k_serial	*info = container_of(work, struct m68k_serial, tqueue);
 	struct tty_struct	*tty;
 	
-	tty = info->port.tty;
+	tty = info->tty;
 	if (!tty)
 		return;
 #if 0
@@ -407,7 +407,7 @@
 	struct m68k_serial	*info = container_of(work, struct m68k_serial, tqueue_hangup);
 	struct tty_struct	*tty;
 	
-	tty = info->port.tty;
+	tty = info->tty;
 	if (!tty)
 		return;
 
@@ -451,8 +451,8 @@
 	uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
 #endif
 
-	if (info->port.tty)
-		clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+	if (info->tty)
+		clear_bit(TTY_IO_ERROR, &info->tty->flags);
 	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
 
 	/*
@@ -486,8 +486,8 @@
 		info->xmit_buf = 0;
 	}
 
-	if (info->port.tty)
-		set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+	if (info->tty)
+		set_bit(TTY_IO_ERROR, &info->tty->flags);
 	
 	info->flags &= ~S_INITIALIZED;
 	local_irq_restore(flags);
@@ -553,9 +553,9 @@
 	unsigned cflag;
 	int	i;
 
-	if (!info->port.tty || !info->port.tty->termios)
+	if (!info->tty || !info->tty->termios)
 		return;
-	cflag = info->port.tty->termios->c_cflag;
+	cflag = info->tty->termios->c_cflag;
 	if (!(port = info->port))
 		return;
 
@@ -970,7 +970,6 @@
 static int rs_ioctl(struct tty_struct *tty, struct file * file,
 		    unsigned int cmd, unsigned long arg)
 {
-	int error;
 	struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
 	int retval;
 
@@ -1104,7 +1103,7 @@
 	tty_ldisc_flush(tty);
 	tty->closing = 0;
 	info->event = 0;
-	info->port.tty = NULL;
+	info->tty = NULL;
 #warning "This is not and has never been valid so fix it"	
 #if 0
 	if (tty->ldisc.num != ldiscs[N_TTY].num) {
@@ -1142,7 +1141,7 @@
 	info->event = 0;
 	info->count = 0;
 	info->flags &= ~S_NORMAL_ACTIVE;
-	info->port.tty = NULL;
+	info->tty = NULL;
 	wake_up_interruptible(&info->open_wait);
 }
 
@@ -1261,7 +1260,7 @@
 
 	info->count++;
 	tty->driver_data = info;
-	info->port.tty = tty;
+	info->tty = tty;
 
 	/*
 	 * Start up serial port
@@ -1338,7 +1337,7 @@
 	    info = &m68k_soft[i];
 	    info->magic = SERIAL_MAGIC;
 	    info->port = (int) &uart_addr[i];
-	    info->port.tty = NULL;
+	    info->tty = NULL;
 	    info->irq = uart_irqs[i];
 	    info->custom_divisor = 16;
 	    info->close_delay = 50;
diff --git a/drivers/serial/68328serial.h b/drivers/tty/serial/68328serial.h
similarity index 100%
rename from drivers/serial/68328serial.h
rename to drivers/tty/serial/68328serial.h
diff --git a/drivers/serial/68360serial.c b/drivers/tty/serial/68360serial.c
similarity index 99%
rename from drivers/serial/68360serial.c
rename to drivers/tty/serial/68360serial.c
index 88b1335..bc21eea 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/tty/serial/68360serial.c
@@ -2428,6 +2428,7 @@
 	/* .read_proc = rs_360_read_proc, */
 	.tiocmget = rs_360_tiocmget,
 	.tiocmset = rs_360_tiocmset,
+	.get_icount = rs_360_get_icount,
 };
 
 static int __init rs_360_init(void)
diff --git a/drivers/serial/8250.c b/drivers/tty/serial/8250.c
similarity index 99%
rename from drivers/serial/8250.c
rename to drivers/tty/serial/8250.c
index b25e6e4..3975df6 100644
--- a/drivers/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -236,7 +236,8 @@
 		.fifo_size	= 128,
 		.tx_loadsz	= 128,
 		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+		/* UART_CAP_EFR breaks billionon CF bluetooth card. */
+		.flags		= UART_CAP_FIFO | UART_CAP_SLEEP,
 	},
 	[PORT_16654] = {
 		.name		= "ST16654",
diff --git a/drivers/serial/8250.h b/drivers/tty/serial/8250.h
similarity index 100%
rename from drivers/serial/8250.h
rename to drivers/tty/serial/8250.h
diff --git a/drivers/serial/8250_accent.c b/drivers/tty/serial/8250_accent.c
similarity index 100%
rename from drivers/serial/8250_accent.c
rename to drivers/tty/serial/8250_accent.c
diff --git a/drivers/serial/8250_acorn.c b/drivers/tty/serial/8250_acorn.c
similarity index 100%
rename from drivers/serial/8250_acorn.c
rename to drivers/tty/serial/8250_acorn.c
diff --git a/drivers/serial/8250_boca.c b/drivers/tty/serial/8250_boca.c
similarity index 100%
rename from drivers/serial/8250_boca.c
rename to drivers/tty/serial/8250_boca.c
diff --git a/drivers/serial/8250_early.c b/drivers/tty/serial/8250_early.c
similarity index 100%
rename from drivers/serial/8250_early.c
rename to drivers/tty/serial/8250_early.c
diff --git a/drivers/serial/8250_exar_st16c554.c b/drivers/tty/serial/8250_exar_st16c554.c
similarity index 100%
rename from drivers/serial/8250_exar_st16c554.c
rename to drivers/tty/serial/8250_exar_st16c554.c
diff --git a/drivers/serial/8250_fourport.c b/drivers/tty/serial/8250_fourport.c
similarity index 100%
rename from drivers/serial/8250_fourport.c
rename to drivers/tty/serial/8250_fourport.c
diff --git a/drivers/serial/8250_gsc.c b/drivers/tty/serial/8250_gsc.c
similarity index 100%
rename from drivers/serial/8250_gsc.c
rename to drivers/tty/serial/8250_gsc.c
diff --git a/drivers/serial/8250_hp300.c b/drivers/tty/serial/8250_hp300.c
similarity index 100%
rename from drivers/serial/8250_hp300.c
rename to drivers/tty/serial/8250_hp300.c
diff --git a/drivers/serial/8250_hub6.c b/drivers/tty/serial/8250_hub6.c
similarity index 100%
rename from drivers/serial/8250_hub6.c
rename to drivers/tty/serial/8250_hub6.c
diff --git a/drivers/serial/8250_mca.c b/drivers/tty/serial/8250_mca.c
similarity index 100%
rename from drivers/serial/8250_mca.c
rename to drivers/tty/serial/8250_mca.c
diff --git a/drivers/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
similarity index 100%
rename from drivers/serial/8250_pci.c
rename to drivers/tty/serial/8250_pci.c
diff --git a/drivers/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
similarity index 100%
rename from drivers/serial/8250_pnp.c
rename to drivers/tty/serial/8250_pnp.c
diff --git a/drivers/serial/Kconfig b/drivers/tty/serial/Kconfig
similarity index 99%
rename from drivers/serial/Kconfig
rename to drivers/tty/serial/Kconfig
index c1df767..2b83346 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -81,7 +81,7 @@
 	default SERIAL_8250
 
 config SERIAL_8250_PCI
-	tristate "8250/16550 PCI device support" if EMBEDDED
+	tristate "8250/16550 PCI device support" if EXPERT
 	depends on SERIAL_8250 && PCI
 	default SERIAL_8250
 	help
@@ -90,7 +90,7 @@
 	  Saves about 9K.
 
 config SERIAL_8250_PNP
-	tristate "8250/16550 PNP device support" if EMBEDDED
+	tristate "8250/16550 PNP device support" if EXPERT
 	depends on SERIAL_8250 && PNP
 	default SERIAL_8250
 	help
@@ -1518,6 +1518,7 @@
 config SERIAL_GRLIB_GAISLER_APBUART
 	tristate "GRLIB APBUART serial support"
 	depends on OF
+	select SERIAL_CORE
 	---help---
 	Add support for the GRLIB APBUART serial port.
 
diff --git a/drivers/serial/Makefile b/drivers/tty/serial/Makefile
similarity index 100%
rename from drivers/serial/Makefile
rename to drivers/tty/serial/Makefile
diff --git a/drivers/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
similarity index 100%
rename from drivers/serial/altera_jtaguart.c
rename to drivers/tty/serial/altera_jtaguart.c
diff --git a/drivers/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
similarity index 100%
rename from drivers/serial/altera_uart.c
rename to drivers/tty/serial/altera_uart.c
diff --git a/drivers/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
similarity index 100%
rename from drivers/serial/amba-pl010.c
rename to drivers/tty/serial/amba-pl010.c
diff --git a/drivers/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
similarity index 100%
rename from drivers/serial/amba-pl011.c
rename to drivers/tty/serial/amba-pl011.c
diff --git a/drivers/serial/apbuart.c b/drivers/tty/serial/apbuart.c
similarity index 100%
rename from drivers/serial/apbuart.c
rename to drivers/tty/serial/apbuart.c
diff --git a/drivers/serial/apbuart.h b/drivers/tty/serial/apbuart.h
similarity index 100%
rename from drivers/serial/apbuart.h
rename to drivers/tty/serial/apbuart.h
diff --git a/drivers/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
similarity index 99%
rename from drivers/serial/atmel_serial.c
rename to drivers/tty/serial/atmel_serial.c
index 3892666..2a1d52f 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1732,6 +1732,11 @@
 	device_init_wakeup(&pdev->dev, 1);
 	platform_set_drvdata(pdev, port);
 
+	if (port->rs485.flags & SER_RS485_ENABLED) {
+		UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
+		UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
+	}
+
 	return 0;
 
 err_add_port:
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
similarity index 100%
rename from drivers/serial/bcm63xx_uart.c
rename to drivers/tty/serial/bcm63xx_uart.c
diff --git a/drivers/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
similarity index 98%
rename from drivers/serial/bfin_5xx.c
rename to drivers/tty/serial/bfin_5xx.c
index e381b89..9b1ff2b 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/tty/serial/bfin_5xx.c
@@ -370,10 +370,8 @@
 {
 	struct bfin_serial_port *uart = dev_id;
 
-	spin_lock(&uart->port.lock);
 	while (UART_GET_LSR(uart) & DR)
 		bfin_serial_rx_chars(uart);
-	spin_unlock(&uart->port.lock);
 
 	return IRQ_HANDLED;
 }
@@ -490,9 +488,8 @@
 {
 	int x_pos, pos;
 
-	dma_disable_irq(uart->tx_dma_channel);
-	dma_disable_irq(uart->rx_dma_channel);
-	spin_lock_bh(&uart->port.lock);
+	dma_disable_irq_nosync(uart->rx_dma_channel);
+	spin_lock_bh(&uart->rx_lock);
 
 	/* 2D DMA RX buffer ring is used. Because curr_y_count and
 	 * curr_x_count can't be read as an atomic operation,
@@ -523,8 +520,7 @@
 		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
 	}
 
-	spin_unlock_bh(&uart->port.lock);
-	dma_enable_irq(uart->tx_dma_channel);
+	spin_unlock_bh(&uart->rx_lock);
 	dma_enable_irq(uart->rx_dma_channel);
 
 	mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -571,7 +567,7 @@
 	unsigned short irqstat;
 	int x_pos, pos;
 
-	spin_lock(&uart->port.lock);
+	spin_lock(&uart->rx_lock);
 	irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
 	clear_dma_irqstat(uart->rx_dma_channel);
 
@@ -589,7 +585,7 @@
 		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
 	}
 
-	spin_unlock(&uart->port.lock);
+	spin_unlock(&uart->rx_lock);
 
 	return IRQ_HANDLED;
 }
@@ -1332,6 +1328,7 @@
 		}
 
 #ifdef CONFIG_SERIAL_BFIN_DMA
+		spin_lock_init(&uart->rx_lock);
 		uart->tx_done	    = 1;
 		uart->tx_count	    = 0;
 
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
similarity index 100%
rename from drivers/serial/bfin_sport_uart.c
rename to drivers/tty/serial/bfin_sport_uart.c
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
similarity index 100%
rename from drivers/serial/bfin_sport_uart.h
rename to drivers/tty/serial/bfin_sport_uart.h
diff --git a/drivers/serial/clps711x.c b/drivers/tty/serial/clps711x.c
similarity index 100%
rename from drivers/serial/clps711x.c
rename to drivers/tty/serial/clps711x.c
diff --git a/drivers/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile
similarity index 100%
rename from drivers/serial/cpm_uart/Makefile
rename to drivers/tty/serial/cpm_uart/Makefile
diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart.h
rename to drivers/tty/serial/cpm_uart/cpm_uart.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_core.c
rename to drivers/tty/serial/cpm_uart/cpm_uart_core.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_cpm1.c
rename to drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_cpm1.h
rename to drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_cpm2.c
rename to drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_cpm2.h
rename to drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
diff --git a/drivers/serial/crisv10.c b/drivers/tty/serial/crisv10.c
similarity index 100%
rename from drivers/serial/crisv10.c
rename to drivers/tty/serial/crisv10.c
diff --git a/drivers/serial/crisv10.h b/drivers/tty/serial/crisv10.h
similarity index 100%
rename from drivers/serial/crisv10.h
rename to drivers/tty/serial/crisv10.h
diff --git a/drivers/serial/dz.c b/drivers/tty/serial/dz.c
similarity index 100%
rename from drivers/serial/dz.c
rename to drivers/tty/serial/dz.c
diff --git a/drivers/serial/dz.h b/drivers/tty/serial/dz.h
similarity index 100%
rename from drivers/serial/dz.h
rename to drivers/tty/serial/dz.h
diff --git a/drivers/serial/icom.c b/drivers/tty/serial/icom.c
similarity index 100%
rename from drivers/serial/icom.c
rename to drivers/tty/serial/icom.c
diff --git a/drivers/serial/icom.h b/drivers/tty/serial/icom.h
similarity index 100%
rename from drivers/serial/icom.h
rename to drivers/tty/serial/icom.h
diff --git a/drivers/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
similarity index 100%
rename from drivers/serial/ifx6x60.c
rename to drivers/tty/serial/ifx6x60.c
diff --git a/drivers/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
similarity index 100%
rename from drivers/serial/ifx6x60.h
rename to drivers/tty/serial/ifx6x60.h
diff --git a/drivers/serial/imx.c b/drivers/tty/serial/imx.c
similarity index 100%
rename from drivers/serial/imx.c
rename to drivers/tty/serial/imx.c
diff --git a/drivers/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
similarity index 100%
rename from drivers/serial/ioc3_serial.c
rename to drivers/tty/serial/ioc3_serial.c
diff --git a/drivers/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
similarity index 100%
rename from drivers/serial/ioc4_serial.c
rename to drivers/tty/serial/ioc4_serial.c
diff --git a/drivers/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
similarity index 100%
rename from drivers/serial/ip22zilog.c
rename to drivers/tty/serial/ip22zilog.c
diff --git a/drivers/serial/ip22zilog.h b/drivers/tty/serial/ip22zilog.h
similarity index 100%
rename from drivers/serial/ip22zilog.h
rename to drivers/tty/serial/ip22zilog.h
diff --git a/drivers/serial/jsm/Makefile b/drivers/tty/serial/jsm/Makefile
similarity index 100%
rename from drivers/serial/jsm/Makefile
rename to drivers/tty/serial/jsm/Makefile
diff --git a/drivers/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
similarity index 100%
rename from drivers/serial/jsm/jsm.h
rename to drivers/tty/serial/jsm/jsm.h
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
similarity index 100%
rename from drivers/serial/jsm/jsm_driver.c
rename to drivers/tty/serial/jsm/jsm_driver.c
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
similarity index 100%
rename from drivers/serial/jsm/jsm_neo.c
rename to drivers/tty/serial/jsm/jsm_neo.c
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
similarity index 100%
rename from drivers/serial/jsm/jsm_tty.c
rename to drivers/tty/serial/jsm/jsm_tty.c
diff --git a/drivers/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
similarity index 100%
rename from drivers/serial/kgdboc.c
rename to drivers/tty/serial/kgdboc.c
diff --git a/drivers/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
similarity index 100%
rename from drivers/serial/m32r_sio.c
rename to drivers/tty/serial/m32r_sio.c
diff --git a/drivers/serial/m32r_sio.h b/drivers/tty/serial/m32r_sio.h
similarity index 100%
rename from drivers/serial/m32r_sio.h
rename to drivers/tty/serial/m32r_sio.h
diff --git a/drivers/serial/m32r_sio_reg.h b/drivers/tty/serial/m32r_sio_reg.h
similarity index 100%
rename from drivers/serial/m32r_sio_reg.h
rename to drivers/tty/serial/m32r_sio_reg.h
diff --git a/drivers/serial/max3100.c b/drivers/tty/serial/max3100.c
similarity index 99%
rename from drivers/serial/max3100.c
rename to drivers/tty/serial/max3100.c
index beb1afa2..7b951ad 100644
--- a/drivers/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -601,7 +601,7 @@
 	s->rts = 0;
 
 	sprintf(b, "max3100-%d", s->minor);
-	s->workqueue = create_freezeable_workqueue(b);
+	s->workqueue = create_freezable_workqueue(b);
 	if (!s->workqueue) {
 		dev_warn(&s->spi->dev, "cannot create workqueue\n");
 		return -EBUSY;
diff --git a/drivers/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
similarity index 100%
rename from drivers/serial/max3107-aava.c
rename to drivers/tty/serial/max3107-aava.c
diff --git a/drivers/serial/max3107.c b/drivers/tty/serial/max3107.c
similarity index 99%
rename from drivers/serial/max3107.c
rename to drivers/tty/serial/max3107.c
index 910870e..750b4f6 100644
--- a/drivers/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -833,7 +833,7 @@
 	struct max3107_port *s = container_of(port, struct max3107_port, port);
 
 	/* Initialize work queue */
-	s->workqueue = create_freezeable_workqueue("max3107");
+	s->workqueue = create_freezable_workqueue("max3107");
 	if (!s->workqueue) {
 		dev_err(&s->spi->dev, "Workqueue creation failed\n");
 		return -EBUSY;
diff --git a/drivers/serial/max3107.h b/drivers/tty/serial/max3107.h
similarity index 100%
rename from drivers/serial/max3107.h
rename to drivers/tty/serial/max3107.h
diff --git a/drivers/serial/mcf.c b/drivers/tty/serial/mcf.c
similarity index 100%
rename from drivers/serial/mcf.c
rename to drivers/tty/serial/mcf.c
diff --git a/drivers/serial/mfd.c b/drivers/tty/serial/mfd.c
similarity index 100%
rename from drivers/serial/mfd.c
rename to drivers/tty/serial/mfd.c
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
similarity index 100%
rename from drivers/serial/mpc52xx_uart.c
rename to drivers/tty/serial/mpc52xx_uart.c
diff --git a/drivers/serial/mpsc.c b/drivers/tty/serial/mpsc.c
similarity index 100%
rename from drivers/serial/mpsc.c
rename to drivers/tty/serial/mpsc.c
diff --git a/drivers/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
similarity index 100%
rename from drivers/serial/mrst_max3110.c
rename to drivers/tty/serial/mrst_max3110.c
diff --git a/drivers/serial/mrst_max3110.h b/drivers/tty/serial/mrst_max3110.h
similarity index 100%
rename from drivers/serial/mrst_max3110.h
rename to drivers/tty/serial/mrst_max3110.h
diff --git a/drivers/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
similarity index 100%
rename from drivers/serial/msm_serial.c
rename to drivers/tty/serial/msm_serial.c
diff --git a/drivers/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
similarity index 100%
rename from drivers/serial/msm_serial.h
rename to drivers/tty/serial/msm_serial.h
diff --git a/drivers/serial/mux.c b/drivers/tty/serial/mux.c
similarity index 100%
rename from drivers/serial/mux.c
rename to drivers/tty/serial/mux.c
diff --git a/drivers/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
similarity index 100%
rename from drivers/serial/netx-serial.c
rename to drivers/tty/serial/netx-serial.c
diff --git a/drivers/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
similarity index 100%
rename from drivers/serial/nwpserial.c
rename to drivers/tty/serial/nwpserial.c
diff --git a/drivers/serial/of_serial.c b/drivers/tty/serial/of_serial.c
similarity index 100%
rename from drivers/serial/of_serial.c
rename to drivers/tty/serial/of_serial.c
diff --git a/drivers/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
similarity index 100%
rename from drivers/serial/omap-serial.c
rename to drivers/tty/serial/omap-serial.c
diff --git a/drivers/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
similarity index 100%
rename from drivers/serial/pch_uart.c
rename to drivers/tty/serial/pch_uart.c
diff --git a/drivers/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
similarity index 100%
rename from drivers/serial/pmac_zilog.c
rename to drivers/tty/serial/pmac_zilog.c
diff --git a/drivers/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
similarity index 100%
rename from drivers/serial/pmac_zilog.h
rename to drivers/tty/serial/pmac_zilog.h
diff --git a/drivers/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
similarity index 100%
rename from drivers/serial/pnx8xxx_uart.c
rename to drivers/tty/serial/pnx8xxx_uart.c
diff --git a/drivers/serial/pxa.c b/drivers/tty/serial/pxa.c
similarity index 100%
rename from drivers/serial/pxa.c
rename to drivers/tty/serial/pxa.c
diff --git a/drivers/serial/s3c2400.c b/drivers/tty/serial/s3c2400.c
similarity index 100%
rename from drivers/serial/s3c2400.c
rename to drivers/tty/serial/s3c2400.c
diff --git a/drivers/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
similarity index 100%
rename from drivers/serial/s3c2410.c
rename to drivers/tty/serial/s3c2410.c
diff --git a/drivers/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
similarity index 100%
rename from drivers/serial/s3c2412.c
rename to drivers/tty/serial/s3c2412.c
diff --git a/drivers/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
similarity index 100%
rename from drivers/serial/s3c2440.c
rename to drivers/tty/serial/s3c2440.c
diff --git a/drivers/serial/s3c24a0.c b/drivers/tty/serial/s3c24a0.c
similarity index 100%
rename from drivers/serial/s3c24a0.c
rename to drivers/tty/serial/s3c24a0.c
diff --git a/drivers/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
similarity index 100%
rename from drivers/serial/s3c6400.c
rename to drivers/tty/serial/s3c6400.c
diff --git a/drivers/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
similarity index 100%
rename from drivers/serial/s5pv210.c
rename to drivers/tty/serial/s5pv210.c
diff --git a/drivers/serial/sa1100.c b/drivers/tty/serial/sa1100.c
similarity index 100%
rename from drivers/serial/sa1100.c
rename to drivers/tty/serial/sa1100.c
diff --git a/drivers/serial/samsung.c b/drivers/tty/serial/samsung.c
similarity index 99%
rename from drivers/serial/samsung.c
rename to drivers/tty/serial/samsung.c
index 7ac2bf5..2335eda 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -883,10 +883,10 @@
 
 static struct uart_driver s3c24xx_uart_drv = {
 	.owner		= THIS_MODULE,
-	.dev_name	= "s3c2410_serial",
+	.driver_name	= "s3c2410_serial",
 	.nr		= CONFIG_SERIAL_SAMSUNG_UARTS,
 	.cons		= S3C24XX_SERIAL_CONSOLE,
-	.driver_name	= S3C24XX_SERIAL_NAME,
+	.dev_name	= S3C24XX_SERIAL_NAME,
 	.major		= S3C24XX_SERIAL_MAJOR,
 	.minor		= S3C24XX_SERIAL_MINOR,
 };
diff --git a/drivers/serial/samsung.h b/drivers/tty/serial/samsung.h
similarity index 100%
rename from drivers/serial/samsung.h
rename to drivers/tty/serial/samsung.h
diff --git a/drivers/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
similarity index 99%
rename from drivers/serial/sb1250-duart.c
rename to drivers/tty/serial/sb1250-duart.c
index a2f2b32..602d984 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -829,7 +829,7 @@
 #ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
 /*
  * Serial console stuff.  Very basic, polling driver for doing serial
- * console output.  The console_sem is held by the caller, so we
+ * console output.  The console_lock is held by the caller, so we
  * shouldn't be interrupted for more console activity.
  */
 static void sbd_console_putchar(struct uart_port *uport, int ch)
diff --git a/drivers/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
similarity index 100%
rename from drivers/serial/sc26xx.c
rename to drivers/tty/serial/sc26xx.c
diff --git a/drivers/serial/serial_core.c b/drivers/tty/serial/serial_core.c
similarity index 100%
rename from drivers/serial/serial_core.c
rename to drivers/tty/serial/serial_core.c
diff --git a/drivers/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
similarity index 99%
rename from drivers/serial/serial_cs.c
rename to drivers/tty/serial/serial_cs.c
index 93760b2..1ef4df9 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -712,6 +712,7 @@
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
 	PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
 	PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+	PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
 	PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
 	PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
 	PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
diff --git a/drivers/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
similarity index 100%
rename from drivers/serial/serial_ks8695.c
rename to drivers/tty/serial/serial_ks8695.c
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/tty/serial/serial_lh7a40x.c
similarity index 100%
rename from drivers/serial/serial_lh7a40x.c
rename to drivers/tty/serial/serial_lh7a40x.c
diff --git a/drivers/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
similarity index 100%
rename from drivers/serial/serial_txx9.c
rename to drivers/tty/serial/serial_txx9.c
diff --git a/drivers/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
similarity index 95%
rename from drivers/serial/sh-sci.c
rename to drivers/tty/serial/sh-sci.c
index c291b3a..92c91c8 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -3,7 +3,7 @@
  *
  * SuperH on-chip serial module support.  (SCI with no FIFO / with FIFO)
  *
- *  Copyright (C) 2002 - 2008  Paul Mundt
+ *  Copyright (C) 2002 - 2011  Paul Mundt
  *  Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
  *
  * based off of the old drivers/char/sh-sci.c by:
@@ -81,14 +81,22 @@
 	struct timer_list	break_timer;
 	int			break_flag;
 
+	/* SCSCR initialization */
+	unsigned int		scscr;
+
+	/* SCBRR calculation algo */
+	unsigned int		scbrr_algo_id;
+
 	/* Interface clock */
 	struct clk		*iclk;
 	/* Function clock */
 	struct clk		*fclk;
 
 	struct list_head	node;
+
 	struct dma_chan			*chan_tx;
 	struct dma_chan			*chan_rx;
+
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	struct device			*dma_dev;
 	unsigned int			slave_tx;
@@ -415,9 +423,9 @@
 	if (!(status & SCxSR_TDxE(port))) {
 		ctrl = sci_in(port, SCSCR);
 		if (uart_circ_empty(xmit))
-			ctrl &= ~SCI_CTRL_FLAGS_TIE;
+			ctrl &= ~SCSCR_TIE;
 		else
-			ctrl |= SCI_CTRL_FLAGS_TIE;
+			ctrl |= SCSCR_TIE;
 		sci_out(port, SCSCR, ctrl);
 		return;
 	}
@@ -459,7 +467,7 @@
 			sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
 		}
 
-		ctrl |= SCI_CTRL_FLAGS_TIE;
+		ctrl |= SCSCR_TIE;
 		sci_out(port, SCSCR, ctrl);
 	}
 }
@@ -708,7 +716,7 @@
 			disable_irq_nosync(irq);
 			scr |= 0x4000;
 		} else {
-			scr &= ~SCI_CTRL_FLAGS_RIE;
+			scr &= ~SCSCR_RIE;
 		}
 		sci_out(port, SCSCR, scr);
 		/* Clear current interrupt */
@@ -777,6 +785,18 @@
 	return IRQ_HANDLED;
 }
 
+static inline unsigned long port_rx_irq_mask(struct uart_port *port)
+{
+	/*
+	 * Not all ports (such as SCIFA) will support REIE. Rather than
+	 * special-casing the port type, we check the port initialization
+	 * IRQ enable mask to see whether the IRQ is desired at all. If
+	 * it's unset, it's logically inferred that there's no point in
+	 * testing for it.
+	 */
+	return SCSCR_RIE | (to_sci_port(port)->scscr & SCSCR_REIE);
+}
+
 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
 {
 	unsigned short ssr_status, scr_status, err_enabled;
@@ -786,22 +806,25 @@
 
 	ssr_status = sci_in(port, SCxSR);
 	scr_status = sci_in(port, SCSCR);
-	err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE);
+	err_enabled = scr_status & port_rx_irq_mask(port);
 
 	/* Tx Interrupt */
-	if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) &&
+	if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
 	    !s->chan_tx)
 		ret = sci_tx_interrupt(irq, ptr);
+
 	/*
 	 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
 	 * DR flags
 	 */
 	if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
-	    (scr_status & SCI_CTRL_FLAGS_RIE))
+	    (scr_status & SCSCR_RIE))
 		ret = sci_rx_interrupt(irq, ptr);
+
 	/* Error Interrupt */
 	if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
 		ret = sci_er_interrupt(irq, ptr);
+
 	/* Break Interrupt */
 	if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
 		ret = sci_br_interrupt(irq, ptr);
@@ -951,7 +974,7 @@
 		schedule_work(&s->work_tx);
 	} else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
 		u16 ctrl = sci_in(port, SCSCR);
-		sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
+		sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
 	}
 
 	spin_unlock_irqrestore(&port->lock, flags);
@@ -1214,14 +1237,16 @@
 		if (new != scr)
 			sci_out(port, SCSCR, new);
 	}
+
 	if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
 	    s->cookie_tx < 0)
 		schedule_work(&s->work_tx);
 #endif
+
 	if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
 		/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
 		ctrl = sci_in(port, SCSCR);
-		sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
+		sci_out(port, SCSCR, ctrl | SCSCR_TIE);
 	}
 }
 
@@ -1231,20 +1256,24 @@
 
 	/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
 	ctrl = sci_in(port, SCSCR);
+
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x8000;
-	ctrl &= ~SCI_CTRL_FLAGS_TIE;
+
+	ctrl &= ~SCSCR_TIE;
+
 	sci_out(port, SCSCR, ctrl);
 }
 
 static void sci_start_rx(struct uart_port *port)
 {
-	unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE;
+	unsigned short ctrl;
 
-	/* Set RIE (Receive Interrupt Enable) bit in SCSCR */
-	ctrl |= sci_in(port, SCSCR);
+	ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
+
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x4000;
+
 	sci_out(port, SCSCR, ctrl);
 }
 
@@ -1252,11 +1281,13 @@
 {
 	unsigned short ctrl;
 
-	/* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
 	ctrl = sci_in(port, SCSCR);
+
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x4000;
-	ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
+
+	ctrl &= ~port_rx_irq_mask(port);
+
 	sci_out(port, SCSCR, ctrl);
 }
 
@@ -1296,7 +1327,7 @@
 		scr &= ~0x4000;
 		enable_irq(s->irqs[1]);
 	}
-	sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
+	sci_out(port, SCSCR, scr | SCSCR_RIE);
 	dev_dbg(port->dev, "DMA Rx timed out\n");
 	schedule_work(&s->work_rx);
 }
@@ -1442,12 +1473,31 @@
 		s->disable(port);
 }
 
+static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
+				   unsigned long freq)
+{
+	switch (algo_id) {
+	case SCBRR_ALGO_1:
+		return ((freq + 16 * bps) / (16 * bps) - 1);
+	case SCBRR_ALGO_2:
+		return ((freq + 16 * bps) / (32 * bps) - 1);
+	case SCBRR_ALGO_3:
+		return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
+	case SCBRR_ALGO_4:
+		return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
+	case SCBRR_ALGO_5:
+		return (((freq * 1000 / 32) / bps) - 1);
+	}
+
+	/* Warn, but use a safe default */
+	WARN_ON(1);
+	return ((freq + 16 * bps) / (32 * bps) - 1);
+}
+
 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
 			    struct ktermios *old)
 {
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
 	struct sci_port *s = to_sci_port(port);
-#endif
 	unsigned int status, baud, smr_val, max_baud;
 	int t = -1;
 	u16 scfcr = 0;
@@ -1464,7 +1514,7 @@
 
 	baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
 	if (likely(baud && port->uartclk))
-		t = SCBRR_VALUE(baud, port->uartclk);
+		t = sci_scbrr_calc(s->scbrr_algo_id, baud, port->uartclk);
 
 	do {
 		status = sci_in(port, SCxSR);
@@ -1490,7 +1540,7 @@
 	sci_out(port, SCSMR, smr_val);
 
 	dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
-		SCSCR_INIT(port));
+		s->scscr);
 
 	if (t > 0) {
 		if (t >= 256) {
@@ -1506,7 +1556,7 @@
 	sci_init_pins(port, termios->c_cflag);
 	sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
 
-	sci_out(port, SCSCR, SCSCR_INIT(port));
+	sci_out(port, SCSCR, s->scscr);
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	/*
@@ -1679,9 +1729,11 @@
 	port->mapbase	= p->mapbase;
 	port->membase	= p->membase;
 
-	port->irq	= p->irqs[SCIx_TXI_IRQ];
-	port->flags	= p->flags;
-	sci_port->type	= port->type = p->type;
+	port->irq		= p->irqs[SCIx_TXI_IRQ];
+	port->flags		= p->flags;
+	sci_port->type		= port->type = p->type;
+	sci_port->scscr		= p->scscr;
+	sci_port->scbrr_algo_id	= p->scbrr_algo_id;
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	sci_port->dma_dev	= p->dma_dev;
diff --git a/drivers/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
similarity index 76%
rename from drivers/serial/sh-sci.h
rename to drivers/tty/serial/sh-sci.h
index 4bc614e..b223d6c 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -15,27 +15,17 @@
     defined(CONFIG_CPU_SUBTYPE_SH7709)
 # define SCPCR  0xA4000116 /* 16 bit SCI and SCIF */
 # define SCPDR  0xA4000136 /* 8  bit SCI and SCIF */
-# define SCSCR_INIT(port)          0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7705)
 # define SCIF0		0xA4400000
 # define SCIF2		0xA4410000
-# define SCSMR_Ir	0xA44A0000
-# define IRDA_SCIF	SCIF0
 # define SCPCR 0xA4000116
 # define SCPDR 0xA4000136
-
-/* Set the clock source,
- * SCIF2 (0xA4410000) -> External clock, SCK pin used as clock input
- * SCIF0 (0xA4400000) -> Internal clock, SCK pin as serial clock output
- */
-# define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0
 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
       defined(CONFIG_CPU_SUBTYPE_SH7721) || \
       defined(CONFIG_ARCH_SH73A0) || \
       defined(CONFIG_ARCH_SH7367) || \
       defined(CONFIG_ARCH_SH7377) || \
       defined(CONFIG_ARCH_SH7372)
-# define SCSCR_INIT(port)  0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */
 # define PORT_PTCR	   0xA405011EUL
 # define PORT_PVCR	   0xA4050122UL
 # define SCIF_ORER	   0x0200   /* overrun error bit */
@@ -43,7 +33,6 @@
 # define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
 # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001   /* overrun error bit */
-# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7750)  || \
       defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
       defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
@@ -53,39 +42,31 @@
 # define SCSPTR1 0xffe0001c /* 8  bit SCI */
 # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001   /* overrun error bit */
-# define SCSCR_INIT(port) (((port)->type == PORT_SCI) ? \
-	0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \
-	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ )
 #elif defined(CONFIG_CPU_SUBTYPE_SH7760)
 # define SCSPTR0 0xfe600024 /* 16 bit SCIF */
 # define SCSPTR1 0xfe610024 /* 16 bit SCIF */
 # define SCSPTR2 0xfe620024 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)          0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
 # define SCSPTR0 0xA4400000	  /* 16 bit SCIF */
 # define SCIF_ORER 0x0001   /* overrun error bit */
 # define PACR 0xa4050100
 # define PBCR 0xa4050102
-# define SCSCR_INIT(port)          0x3B
 #elif defined(CONFIG_CPU_SUBTYPE_SH7343)
 # define SCSPTR0 0xffe00010	/* 16 bit SCIF */
 # define SCSPTR1 0xffe10010	/* 16 bit SCIF */
 # define SCSPTR2 0xffe20010	/* 16 bit SCIF */
 # define SCSPTR3 0xffe30010	/* 16 bit SCIF */
-# define SCSCR_INIT(port) 0x32	/* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7722)
 # define PADR			0xA4050120
 # define PSDR			0xA405013e
 # define PWDR			0xA4050166
 # define PSCR			0xA405011E
 # define SCIF_ORER		0x0001	/* overrun error bit */
-# define SCSCR_INIT(port)	0x0038	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7366)
 # define SCPDR0			0xA405013E      /* 16 bit SCIF0 PSDR */
 # define SCSPTR0		SCPDR0
 # define SCIF_ORER		0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)	0x0038  /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7723)
 # define SCSPTR0                0xa4050160
 # define SCSPTR1                0xa405013e
@@ -94,62 +75,38 @@
 # define SCSPTR4                0xa4050128
 # define SCSPTR5                0xa4050128
 # define SCIF_ORER              0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)       0x0038  /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7724)
 # define SCIF_ORER              0x0001  /* overrun error bit */
-# define SCSCR_INIT(port) ((port)->type == PORT_SCIFA ? \
-	0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \
-	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ )
 #elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
 # define SCSPTR2 0xffe80020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001   /* overrun error bit */
-# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
-# define SCIF_BASE_ADDR    0x01030000
-# define SCIF_ADDR_SH5     PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
 # define SCIF_PTR2_OFFS    0x0000020
-# define SCIF_LSR2_OFFS    0x0000024
 # define SCSPTR2           ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */
-# define SCLSR2            ((port->mapbase)+SCIF_LSR2_OFFS) /* 16 bit SCIF */
-# define SCSCR_INIT(port)  0x38		/* TIE=0,RIE=0, TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
-# define SCSCR_INIT(port)          0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
 # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
 #elif defined(CONFIG_H8S2678)
-# define SCSCR_INIT(port)          0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
 # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
 #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
 # define SCSPTR0 0xfe4b0020
 # define SCSPTR1 0xfe4b0020
 # define SCSPTR2 0xfe4b0020
 # define SCIF_ORER 0x0001
-# define SCSCR_INIT(port)	0x38
 # define SCIF_ONLY
 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
 # define SCSPTR0 0xffe00024 /* 16 bit SCIF */
 # define SCSPTR1 0xffe08024 /* 16 bit SCIF */
 # define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */
 # define SCIF_ORER 0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)	0x38	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7770)
 # define SCSPTR0 0xff923020 /* 16 bit SCIF */
 # define SCSPTR1 0xff924020 /* 16 bit SCIF */
 # define SCSPTR2 0xff925020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)	0x3c /* TIE=0,RIE=0,TE=1,RE=1,REIE=1,cke=2 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7780)
 # define SCSPTR0	0xffe00024	/* 16 bit SCIF */
 # define SCSPTR1	0xffe10024	/* 16 bit SCIF */
 # define SCIF_ORER	0x0001		/* Overrun error bit */
-
-#if defined(CONFIG_SH_SH2007)
-/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=0 */
-# define SCSCR_INIT(port)	0x38
-#else
-/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=1 */
-# define SCSCR_INIT(port)	0x3a
-#endif
-
 #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
       defined(CONFIG_CPU_SUBTYPE_SH7786)
 # define SCSPTR0	0xffea0024	/* 16 bit SCIF */
@@ -159,7 +116,6 @@
 # define SCSPTR4	0xffee0024	/* 16 bit SCIF */
 # define SCSPTR5	0xffef0024	/* 16 bit SCIF */
 # define SCIF_ORER	0x0001		/* Overrun error bit */
-# define SCSCR_INIT(port)	0x3a	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
       defined(CONFIG_CPU_SUBTYPE_SH7203) || \
       defined(CONFIG_CPU_SUBTYPE_SH7206) || \
@@ -174,52 +130,21 @@
 #  define SCSPTR6 0xfffeB020 /* 16 bit SCIF */
 #  define SCSPTR7 0xfffeB820 /* 16 bit SCIF */
 # endif
-# define SCSCR_INIT(port)	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
 # define SCSPTR0 0xf8400020 /* 16 bit SCIF */
 # define SCSPTR1 0xf8410020 /* 16 bit SCIF */
 # define SCSPTR2 0xf8420020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SHX3)
 # define SCSPTR0 0xffc30020		/* 16 bit SCIF */
 # define SCSPTR1 0xffc40020		/* 16 bit SCIF */
 # define SCSPTR2 0xffc50020		/* 16 bit SCIF */
 # define SCSPTR3 0xffc60020		/* 16 bit SCIF */
 # define SCIF_ORER 0x0001		/* Overrun error bit */
-# define SCSCR_INIT(port)	0x38	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #else
 # error CPU subtype not defined
 #endif
 
-/* SCSCR */
-#define SCI_CTRL_FLAGS_TIE  0x80 /* all */
-#define SCI_CTRL_FLAGS_RIE  0x40 /* all */
-#define SCI_CTRL_FLAGS_TE   0x20 /* all */
-#define SCI_CTRL_FLAGS_RE   0x10 /* all */
-#if defined(CONFIG_CPU_SUBTYPE_SH7750)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7091)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7722)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7751)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7763)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7780)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7785)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7786)  || \
-    defined(CONFIG_CPU_SUBTYPE_SHX3)
-#define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
-#define SCI_CTRL_FLAGS_REIE ((port)->type == PORT_SCIFA ? 0 : 8)
-#else
-#define SCI_CTRL_FLAGS_REIE 0
-#endif
-/*      SCI_CTRL_FLAGS_MPIE 0x08  * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/*      SCI_CTRL_FLAGS_TEIE 0x04  * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/*      SCI_CTRL_FLAGS_CKE1 0x02  * all */
-/*      SCI_CTRL_FLAGS_CKE0 0x01  * 7707 SCI/SCIF, 7708 SCI, 7709 SCI/SCIF, 7750 SCI */
-
 /* SCxSR SCI */
 #define SCI_TDRE  0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
 #define SCI_RDRF  0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
@@ -300,23 +225,11 @@
 /* SCFCR */
 #define SCFCR_RFRST 0x0002
 #define SCFCR_TFRST 0x0004
-#define SCFCR_TCRST 0x4000
 #define SCFCR_MCE   0x0008
 
 #define SCI_MAJOR		204
 #define SCI_MINOR_START		8
 
-/* Generic serial flags */
-#define SCI_RX_THROTTLE		0x0000001
-
-#define SCI_MAGIC 0xbabeface
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define SCI_EVENT_WRITE_WAKEUP	0
-
 #define SCI_IN(size, offset)					\
   if ((size) == 8) {						\
     return ioread8(port->membase + (offset));			\
@@ -445,8 +358,6 @@
 SCIF_FNS(SCSMR,  0x00, 16)
 SCIF_FNS(SCBRR,  0x04,  8)
 SCIF_FNS(SCSCR,  0x08, 16)
-SCIF_FNS(SCTDSR, 0x0c,  8)
-SCIF_FNS(SCFER,  0x10, 16)
 SCIF_FNS(SCxSR,  0x14, 16)
 SCIF_FNS(SCFCR,  0x18, 16)
 SCIF_FNS(SCFDR,  0x1c, 16)
@@ -476,8 +387,6 @@
 SCIx_FNS(SCxSR,  0x14, 16, 0x10, 16)
 SCIx_FNS(SCxRDR, 0x24,  8, 0x14,  8)
 SCIx_FNS(SCSPTR, 0,     0,    0,  0)
-SCIF_FNS(SCTDSR, 0x0c,  8)
-SCIF_FNS(SCFER,  0x10, 16)
 SCIF_FNS(SCFCR,  0x18, 16)
 SCIF_FNS(SCFDR,  0x1c, 16)
 SCIF_FNS(SCLSR,  0x24, 16)
@@ -503,7 +412,6 @@
 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
 SCIF_FNS(SCFDR,				0,  0, 0x1C, 16)
 SCIF_FNS(SCSPTR2,			0,  0, 0x20, 16)
-SCIF_FNS(SCLSR2,			0,  0, 0x24, 16)
 SCIF_FNS(SCTFDR,		     0x0e, 16, 0x1C, 16)
 SCIF_FNS(SCRFDR,		     0x0e, 16, 0x20, 16)
 SCIF_FNS(SCSPTR,			0,  0, 0x24, 16)
@@ -597,64 +505,3 @@
 	return 1;
 }
 #endif
-
-/*
- * Values for the BitRate Register (SCBRR)
- *
- * The values are actually divisors for a frequency which can
- * be internal to the SH3 (14.7456MHz) or derived from an external
- * clock source.  This driver assumes the internal clock is used;
- * to support using an external clock source, config options or
- * possibly command-line options would need to be added.
- *
- * Also, to support speeds below 2400 (why?) the lower 2 bits of
- * the SCSMR register would also need to be set to non-zero values.
- *
- * -- Greg Banks 27Feb2000
- *
- * Answer: The SCBRR register is only eight bits, and the value in
- * it gets larger with lower baud rates. At around 2400 (depending on
- * the peripherial module clock) you run out of bits. However the
- * lower two bits of SCSMR allow the module clock to be divided down,
- * scaling the value which is needed in SCBRR.
- *
- * -- Stuart Menefy - 23 May 2000
- *
- * I meant, why would anyone bother with bitrates below 2400.
- *
- * -- Greg Banks - 7Jul2000
- *
- * You "speedist"!  How will I use my 110bps ASR-33 teletype with paper
- * tape reader as a console!
- *
- * -- Mitch Davis - 15 Jul 2000
- */
-
-#if (defined(CONFIG_CPU_SUBTYPE_SH7780)  || \
-     defined(CONFIG_CPU_SUBTYPE_SH7785)  || \
-     defined(CONFIG_CPU_SUBTYPE_SH7786)) && \
-    !defined(CONFIG_SH_SH2007)
-#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7720) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7721) || \
-      defined(CONFIG_ARCH_SH73A0) || \
-      defined(CONFIG_ARCH_SH7367) || \
-      defined(CONFIG_ARCH_SH7377) || \
-      defined(CONFIG_ARCH_SH7372)
-#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
-      defined(CONFIG_CPU_SUBTYPE_SH7724)
-static inline int scbrr_calc(struct uart_port *port, int bps, int clk)
-{
-	if (port->type == PORT_SCIF)
-		return (clk+16*bps)/(32*bps)-1;
-	else
-		return ((clk*2)+16*bps)/(16*bps)-1;
-}
-#define SCBRR_VALUE(bps, clk) scbrr_calc(port, bps, clk)
-#elif defined(__H8300H__) || defined(__H8300S__)
-#define SCBRR_VALUE(bps, clk) (((clk*1000/32)/bps)-1)
-#else /* Generic SH */
-#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(32*bps)-1)
-#endif
diff --git a/drivers/serial/sn_console.c b/drivers/tty/serial/sn_console.c
similarity index 100%
rename from drivers/serial/sn_console.c
rename to drivers/tty/serial/sn_console.c
diff --git a/drivers/serial/suncore.c b/drivers/tty/serial/suncore.c
similarity index 100%
rename from drivers/serial/suncore.c
rename to drivers/tty/serial/suncore.c
diff --git a/drivers/serial/suncore.h b/drivers/tty/serial/suncore.h
similarity index 100%
rename from drivers/serial/suncore.h
rename to drivers/tty/serial/suncore.h
diff --git a/drivers/serial/sunhv.c b/drivers/tty/serial/sunhv.c
similarity index 100%
rename from drivers/serial/sunhv.c
rename to drivers/tty/serial/sunhv.c
diff --git a/drivers/serial/sunsab.c b/drivers/tty/serial/sunsab.c
similarity index 100%
rename from drivers/serial/sunsab.c
rename to drivers/tty/serial/sunsab.c
diff --git a/drivers/serial/sunsab.h b/drivers/tty/serial/sunsab.h
similarity index 100%
rename from drivers/serial/sunsab.h
rename to drivers/tty/serial/sunsab.h
diff --git a/drivers/serial/sunsu.c b/drivers/tty/serial/sunsu.c
similarity index 100%
rename from drivers/serial/sunsu.c
rename to drivers/tty/serial/sunsu.c
diff --git a/drivers/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
similarity index 100%
rename from drivers/serial/sunzilog.c
rename to drivers/tty/serial/sunzilog.c
diff --git a/drivers/serial/sunzilog.h b/drivers/tty/serial/sunzilog.h
similarity index 100%
rename from drivers/serial/sunzilog.h
rename to drivers/tty/serial/sunzilog.h
diff --git a/drivers/serial/timbuart.c b/drivers/tty/serial/timbuart.c
similarity index 100%
rename from drivers/serial/timbuart.c
rename to drivers/tty/serial/timbuart.c
diff --git a/drivers/serial/timbuart.h b/drivers/tty/serial/timbuart.h
similarity index 100%
rename from drivers/serial/timbuart.h
rename to drivers/tty/serial/timbuart.h
diff --git a/drivers/serial/uartlite.c b/drivers/tty/serial/uartlite.c
similarity index 100%
rename from drivers/serial/uartlite.c
rename to drivers/tty/serial/uartlite.c
diff --git a/drivers/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
similarity index 100%
rename from drivers/serial/ucc_uart.c
rename to drivers/tty/serial/ucc_uart.c
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
similarity index 100%
rename from drivers/serial/vr41xx_siu.c
rename to drivers/tty/serial/vr41xx_siu.c
diff --git a/drivers/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
similarity index 100%
rename from drivers/serial/vt8500_serial.c
rename to drivers/tty/serial/vt8500_serial.c
diff --git a/drivers/serial/zs.c b/drivers/tty/serial/zs.c
similarity index 100%
rename from drivers/serial/zs.c
rename to drivers/tty/serial/zs.c
diff --git a/drivers/serial/zs.h b/drivers/tty/serial/zs.h
similarity index 100%
rename from drivers/serial/zs.h
rename to drivers/tty/serial/zs.h
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index c556ed9..81f1395 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -46,7 +46,7 @@
 #include <asm/irq_regs.h>
 
 /* Whether we react on sysrq keys or just ignore them */
-static int __read_mostly sysrq_enabled = 1;
+static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
 static bool __read_mostly sysrq_always_enabled;
 
 static bool sysrq_on(void)
@@ -571,6 +571,7 @@
 	unsigned int alt_use;
 	bool active;
 	bool need_reinject;
+	bool reinjecting;
 };
 
 static void sysrq_reinject_alt_sysrq(struct work_struct *work)
@@ -581,6 +582,10 @@
 	unsigned int alt_code = sysrq->alt_use;
 
 	if (sysrq->need_reinject) {
+		/* we do not want the assignment to be reordered */
+		sysrq->reinjecting = true;
+		mb();
+
 		/* Simulate press and release of Alt + SysRq */
 		input_inject_event(handle, EV_KEY, alt_code, 1);
 		input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1);
@@ -589,6 +594,9 @@
 		input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0);
 		input_inject_event(handle, EV_KEY, alt_code, 0);
 		input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
+
+		mb();
+		sysrq->reinjecting = false;
 	}
 }
 
@@ -599,6 +607,13 @@
 	bool was_active = sysrq->active;
 	bool suppress;
 
+	/*
+	 * Do not filter anything if we are in the process of re-injecting
+	 * Alt+SysRq combination.
+	 */
+	if (sysrq->reinjecting)
+		return false;
+
 	switch (type) {
 
 	case EV_SYN:
@@ -629,7 +644,7 @@
 				sysrq->alt_use = sysrq->alt;
 				/*
 				 * If nothing else will be pressed we'll need
-				 * to * re-inject Alt-SysRq keysroke.
+				 * to re-inject Alt-SysRq keysroke.
 				 */
 				sysrq->need_reinject = true;
 			}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 464d09d..0065da4 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3256,8 +3256,8 @@
 	struct console *c;
 	ssize_t count = 0;
 
-	acquire_console_sem();
-	for (c = console_drivers; c; c = c->next) {
+	console_lock();
+	for_each_console(c) {
 		if (!c->device)
 			continue;
 		if (!c->write)
@@ -3271,7 +3271,7 @@
 	while (i--)
 		count += sprintf(buf + count, "%s%d%c",
 				 cs[i]->name, cs[i]->index, i ? ' ':'\n');
-	release_console_sem();
+	console_unlock();
 
 	return count;
 }
@@ -3306,7 +3306,7 @@
 	if (IS_ERR(consdev))
 		consdev = NULL;
 	else
-		device_create_file(consdev, &dev_attr_active);
+		WARN_ON(device_create_file(consdev, &dev_attr_active) < 0);
 
 #ifdef CONFIG_VT
 	vty_init(&console_fops);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ebae344..c956ed6 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -316,9 +316,9 @@
 	/* always called with BTM from vt_ioctl */
 	WARN_ON(!tty_locked());
 
-	acquire_console_sem();
+	console_lock();
 	poke_blanked_console();
-	release_console_sem();
+	console_unlock();
 
 	ld = tty_ldisc_ref(tty);
 	if (!ld) {
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index eab3a1f..a672ed1 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -202,7 +202,7 @@
 	/* Select the proper current console and verify
 	 * sanity of the situation under the console lock.
 	 */
-	acquire_console_sem();
+	console_lock();
 
 	attr = (currcons & 128);
 	currcons = (currcons & 127);
@@ -336,9 +336,9 @@
 		 * the pagefault handling code may want to call printk().
 		 */
 
-		release_console_sem();
+		console_unlock();
 		ret = copy_to_user(buf, con_buf_start, orig_count);
-		acquire_console_sem();
+		console_lock();
 
 		if (ret) {
 			read += (orig_count - ret);
@@ -354,7 +354,7 @@
 	if (read)
 		ret = read;
 unlock_out:
-	release_console_sem();
+	console_unlock();
 	mutex_unlock(&con_buf_mtx);
 	return ret;
 }
@@ -379,7 +379,7 @@
 	/* Select the proper current console and verify
 	 * sanity of the situation under the console lock.
 	 */
-	acquire_console_sem();
+	console_lock();
 
 	attr = (currcons & 128);
 	currcons = (currcons & 127);
@@ -414,9 +414,9 @@
 		/* Temporarily drop the console lock so that we can read
 		 * in the write data from userspace safely.
 		 */
-		release_console_sem();
+		console_unlock();
 		ret = copy_from_user(con_buf, buf, this_round);
-		acquire_console_sem();
+		console_lock();
 
 		if (ret) {
 			this_round -= ret;
@@ -542,7 +542,7 @@
 		vcs_scr_updated(vc);
 
 unlock_out:
-	release_console_sem();
+	console_unlock();
 
 	mutex_unlock(&con_buf_mtx);
 
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 76407ec..147ede34 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1003,9 +1003,9 @@
 	struct vc_data *vc = tty->driver_data;
 	int ret;
 
-	acquire_console_sem();
+	console_lock();
 	ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
-	release_console_sem();
+	console_unlock();
 	return ret;
 }
 
@@ -1271,7 +1271,7 @@
 	vc->vc_color = vc->vc_def_color;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_m(struct vc_data *vc)
 {
 	int i;
@@ -1415,7 +1415,7 @@
 	return vc_cons[fg_console].d->vc_report_mouse;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void set_mode(struct vc_data *vc, int on_off)
 {
 	int i;
@@ -1485,7 +1485,7 @@
 		}
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void setterm_command(struct vc_data *vc)
 {
 	switch(vc->vc_par[0]) {
@@ -1545,7 +1545,7 @@
 	}
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_at(struct vc_data *vc, unsigned int nr)
 {
 	if (nr > vc->vc_cols - vc->vc_x)
@@ -1555,7 +1555,7 @@
 	insert_char(vc, nr);
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_L(struct vc_data *vc, unsigned int nr)
 {
 	if (nr > vc->vc_rows - vc->vc_y)
@@ -1566,7 +1566,7 @@
 	vc->vc_need_wrap = 0;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_P(struct vc_data *vc, unsigned int nr)
 {
 	if (nr > vc->vc_cols - vc->vc_x)
@@ -1576,7 +1576,7 @@
 	delete_char(vc, nr);
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_M(struct vc_data *vc, unsigned int nr)
 {
 	if (nr > vc->vc_rows - vc->vc_y)
@@ -1587,7 +1587,7 @@
 	vc->vc_need_wrap = 0;
 }
 
-/* console_sem is held (except via vc_init->reset_terminal */
+/* console_lock is held (except via vc_init->reset_terminal */
 static void save_cur(struct vc_data *vc)
 {
 	vc->vc_saved_x		= vc->vc_x;
@@ -1603,7 +1603,7 @@
 	vc->vc_saved_G1		= vc->vc_G1_charset;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void restore_cur(struct vc_data *vc)
 {
 	gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y);
@@ -1625,7 +1625,7 @@
 	EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
 	ESpalette };
 
-/* console_sem is held (except via vc_init()) */
+/* console_lock is held (except via vc_init()) */
 static void reset_terminal(struct vc_data *vc, int do_clear)
 {
 	vc->vc_top		= 0;
@@ -1685,7 +1685,7 @@
 	    csi_J(vc, 2);
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
 {
 	/*
@@ -2119,7 +2119,7 @@
 	return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
 }
 
-/* acquires console_sem */
+/* acquires console_lock */
 static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
 #ifdef VT_BUF_VRAM_ONLY
@@ -2147,11 +2147,11 @@
 
 	might_sleep();
 
-	acquire_console_sem();
+	console_lock();
 	vc = tty->driver_data;
 	if (vc == NULL) {
 		printk(KERN_ERR "vt: argh, driver_data is NULL !\n");
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -2159,7 +2159,7 @@
 	if (!vc_cons_allocated(currcons)) {
 	    /* could this happen? */
 		printk_once("con_write: tty %d not allocated\n", currcons+1);
-	    release_console_sem();
+	    console_unlock();
 	    return 0;
 	}
 
@@ -2375,7 +2375,7 @@
 	}
 	FLUSH
 	console_conditional_schedule();
-	release_console_sem();
+	console_unlock();
 	notify_update(vc);
 	return n;
 #undef FLUSH
@@ -2388,11 +2388,11 @@
  * us to do the switches asynchronously (needed when we want
  * to switch due to a keyboard interrupt).  Synchronization
  * with other console code and prevention of re-entrancy is
- * ensured with console_sem.
+ * ensured with console_lock.
  */
 static void console_callback(struct work_struct *ignored)
 {
-	acquire_console_sem();
+	console_lock();
 
 	if (want_console >= 0) {
 		if (want_console != fg_console &&
@@ -2422,7 +2422,7 @@
 	}
 	notify_update(vc_cons[fg_console].d);
 
-	release_console_sem();
+	console_unlock();
 }
 
 int set_console(int nr)
@@ -2603,7 +2603,7 @@
  */
 
 /*
- * Generally a bit racy with respect to console_sem().
+ * Generally a bit racy with respect to console_lock();.
  *
  * There are some functions which don't need it.
  *
@@ -2629,17 +2629,17 @@
 	switch (type)
 	{
 		case TIOCL_SETSEL:
-			acquire_console_sem();
+			console_lock();
 			ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
-			release_console_sem();
+			console_unlock();
 			break;
 		case TIOCL_PASTESEL:
 			ret = paste_selection(tty);
 			break;
 		case TIOCL_UNBLANKSCREEN:
-			acquire_console_sem();
+			console_lock();
 			unblank_screen();
-			release_console_sem();
+			console_unlock();
 			break;
 		case TIOCL_SELLOADLUT:
 			ret = sel_loadlut(p);
@@ -2688,10 +2688,10 @@
 			}
 			break;
 		case TIOCL_BLANKSCREEN:	/* until explicitly unblanked, not only poked */
-			acquire_console_sem();
+			console_lock();
 			ignore_poke = 1;
 			do_blank_screen(0);
-			release_console_sem();
+			console_unlock();
 			break;
 		case TIOCL_BLANKEDSCREEN:
 			ret = console_blanked;
@@ -2790,11 +2790,11 @@
 		return;
 
 	/* if we race with con_close(), vt may be null */
-	acquire_console_sem();
+	console_lock();
 	vc = tty->driver_data;
 	if (vc)
 		set_cursor(vc);
-	release_console_sem();
+	console_unlock();
 }
 
 /*
@@ -2805,7 +2805,7 @@
 	unsigned int currcons = tty->index;
 	int ret = 0;
 
-	acquire_console_sem();
+	console_lock();
 	if (tty->driver_data == NULL) {
 		ret = vc_allocate(currcons);
 		if (ret == 0) {
@@ -2813,7 +2813,7 @@
 
 			/* Still being freed */
 			if (vc->port.tty) {
-				release_console_sem();
+				console_unlock();
 				return -ERESTARTSYS;
 			}
 			tty->driver_data = vc;
@@ -2827,11 +2827,11 @@
 				tty->termios->c_iflag |= IUTF8;
 			else
 				tty->termios->c_iflag &= ~IUTF8;
-			release_console_sem();
+			console_unlock();
 			return ret;
 		}
 	}
-	release_console_sem();
+	console_unlock();
 	return ret;
 }
 
@@ -2844,9 +2844,9 @@
 {
 	struct vc_data *vc = tty->driver_data;
 	BUG_ON(vc == NULL);
-	acquire_console_sem();
+	console_lock();
 	vc->port.tty = NULL;
-	release_console_sem();
+	console_unlock();
 	tty_shutdown(tty);
 }
 
@@ -2893,13 +2893,13 @@
 	struct vc_data *vc;
 	unsigned int currcons = 0, i;
 
-	acquire_console_sem();
+	console_lock();
 
 	if (conswitchp)
 		display_desc = conswitchp->con_startup();
 	if (!display_desc) {
 		fg_console = 0;
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -2946,7 +2946,7 @@
 	printable = 1;
 	printk("\n");
 
-	release_console_sem();
+	console_unlock();
 
 #ifdef CONFIG_VT_CONSOLE
 	register_console(&vt_console_driver);
@@ -2994,7 +2994,7 @@
 	if (IS_ERR(tty0dev))
 		tty0dev = NULL;
 	else
-		device_create_file(tty0dev, &dev_attr_active);
+		WARN_ON(device_create_file(tty0dev, &dev_attr_active) < 0);
 
 	vcs_init();
 
@@ -3037,7 +3037,7 @@
 	if (!try_module_get(owner))
 		return -ENODEV;
 
-	acquire_console_sem();
+	console_lock();
 
 	/* check if driver is registered */
 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3122,7 +3122,7 @@
 
 	retval = 0;
 err:
-	release_console_sem();
+	console_unlock();
 	module_put(owner);
 	return retval;
 };
@@ -3171,7 +3171,7 @@
 	if (!try_module_get(owner))
 		return -ENODEV;
 
-	acquire_console_sem();
+	console_lock();
 
 	/* check if driver is registered and if it is unbindable */
 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3185,7 +3185,7 @@
 	}
 
 	if (retval) {
-		release_console_sem();
+		console_unlock();
 		goto err;
 	}
 
@@ -3204,12 +3204,12 @@
 	}
 
 	if (retval) {
-		release_console_sem();
+		console_unlock();
 		goto err;
 	}
 
 	if (!con_is_bound(csw)) {
-		release_console_sem();
+		console_unlock();
 		goto err;
 	}
 
@@ -3238,7 +3238,7 @@
 	if (!con_is_bound(csw))
 		con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
 
-	release_console_sem();
+	console_unlock();
 	/* ignore return value, binding should not fail */
 	bind_con_driver(defcsw, first, last, deflt);
 err:
@@ -3538,14 +3538,14 @@
 	if (!try_module_get(owner))
 		return -ENODEV;
 
-	acquire_console_sem();
+	console_lock();
 
 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
 		con_driver = &registered_con_driver[i];
 
 		/* already registered */
 		if (con_driver->con == csw)
-			retval = -EINVAL;
+			retval = -EBUSY;
 	}
 
 	if (retval)
@@ -3592,7 +3592,7 @@
 	}
 
 err:
-	release_console_sem();
+	console_unlock();
 	module_put(owner);
 	return retval;
 }
@@ -3613,7 +3613,7 @@
 {
 	int i, retval = -ENODEV;
 
-	acquire_console_sem();
+	console_lock();
 
 	/* cannot unregister a bound driver */
 	if (con_is_bound(csw))
@@ -3639,7 +3639,7 @@
 		}
 	}
 err:
-	release_console_sem();
+	console_unlock();
 	return retval;
 }
 EXPORT_SYMBOL(unregister_con_driver);
@@ -3656,7 +3656,12 @@
 	int err;
 
 	err = register_con_driver(csw, first, last);
-
+	/* if we get an busy error we still want to bind the console driver
+	 * and return success, as we may have unbound the console driver
+	 * but not unregistered it.
+	*/
+	if (err == -EBUSY)
+		err = 0;
 	if (!err)
 		bind_con_driver(csw, first, last, deflt);
 
@@ -3934,9 +3939,9 @@
 {
 	int rc;
 
-	acquire_console_sem();
+	console_lock();
 	rc = set_get_cmap (arg,1);
-	release_console_sem();
+	console_unlock();
 
 	return rc;
 }
@@ -3945,9 +3950,9 @@
 {
 	int rc;
 
-	acquire_console_sem();
+	console_lock();
 	rc = set_get_cmap (arg,0);
-	release_console_sem();
+	console_unlock();
 
 	return rc;
 }
@@ -3994,12 +3999,12 @@
 	} else
 		font.data = NULL;
 
-	acquire_console_sem();
+	console_lock();
 	if (vc->vc_sw->con_font_get)
 		rc = vc->vc_sw->con_font_get(vc, &font);
 	else
 		rc = -ENOSYS;
-	release_console_sem();
+	console_unlock();
 
 	if (rc)
 		goto out;
@@ -4076,12 +4081,12 @@
 	font.data = memdup_user(op->data, size);
 	if (IS_ERR(font.data))
 		return PTR_ERR(font.data);
-	acquire_console_sem();
+	console_lock();
 	if (vc->vc_sw->con_font_set)
 		rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
 	else
 		rc = -ENOSYS;
-	release_console_sem();
+	console_unlock();
 	kfree(font.data);
 	return rc;
 }
@@ -4103,12 +4108,12 @@
 	else
 		name[MAX_FONT_NAME - 1] = 0;
 
-	acquire_console_sem();
+	console_lock();
 	if (vc->vc_sw->con_font_default)
 		rc = vc->vc_sw->con_font_default(vc, &font, s);
 	else
 		rc = -ENOSYS;
-	release_console_sem();
+	console_unlock();
 	if (!rc) {
 		op->width = font.width;
 		op->height = font.height;
@@ -4124,7 +4129,7 @@
 	if (vc->vc_mode != KD_TEXT)
 		return -EINVAL;
 
-	acquire_console_sem();
+	console_lock();
 	if (!vc->vc_sw->con_font_copy)
 		rc = -ENOSYS;
 	else if (con < 0 || !vc_cons_allocated(con))
@@ -4133,7 +4138,7 @@
 		rc = 0;
 	else
 		rc = vc->vc_sw->con_font_copy(vc, con);
-	release_console_sem();
+	console_unlock();
 	return rc;
 }
 
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 6b68a0f..1235ebd 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -649,12 +649,12 @@
 		/*
 		 * explicitly blank/unblank the screen if switching modes
 		 */
-		acquire_console_sem();
+		console_lock();
 		if (arg == KD_TEXT)
 			do_unblank_screen(1);
 		else
 			do_blank_screen(1);
-		release_console_sem();
+		console_unlock();
 		break;
 
 	case KDGETMODE:
@@ -893,7 +893,7 @@
 			ret = -EINVAL;
 			goto out;
 		}
-		acquire_console_sem();
+		console_lock();
 		vc->vt_mode = tmp;
 		/* the frsig is ignored, so we set it to 0 */
 		vc->vt_mode.frsig = 0;
@@ -901,7 +901,7 @@
 		vc->vt_pid = get_pid(task_pid(current));
 		/* no switch is required -- saw@shade.msu.ru */
 		vc->vt_newvt = -1;
-		release_console_sem();
+		console_unlock();
 		break;
 	}
 
@@ -910,9 +910,9 @@
 		struct vt_mode tmp;
 		int rc;
 
-		acquire_console_sem();
+		console_lock();
 		memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode));
-		release_console_sem();
+		console_unlock();
 
 		rc = copy_to_user(up, &tmp, sizeof(struct vt_mode));
 		if (rc)
@@ -965,9 +965,9 @@
 			ret =  -ENXIO;
 		else {
 			arg--;
-			acquire_console_sem();
+			console_lock();
 			ret = vc_allocate(arg);
-			release_console_sem();
+			console_unlock();
 			if (ret)
 				break;
 			set_console(arg);
@@ -990,7 +990,7 @@
 			ret = -ENXIO;
 		else {
 			vsa.console--;
-			acquire_console_sem();
+			console_lock();
 			ret = vc_allocate(vsa.console);
 			if (ret == 0) {
 				struct vc_data *nvc;
@@ -1003,7 +1003,7 @@
 				put_pid(nvc->vt_pid);
 				nvc->vt_pid = get_pid(task_pid(current));
 			}
-			release_console_sem();
+			console_unlock();
 			if (ret)
 				break;
 			/* Commence switch and lock */
@@ -1044,7 +1044,7 @@
 		/*
 		 * Switching-from response
 		 */
-		acquire_console_sem();
+		console_lock();
 		if (vc->vt_newvt >= 0) {
 			if (arg == 0)
 				/*
@@ -1063,7 +1063,7 @@
 				vc->vt_newvt = -1;
 				ret = vc_allocate(newvt);
 				if (ret) {
-					release_console_sem();
+					console_unlock();
 					break;
 				}
 				/*
@@ -1083,7 +1083,7 @@
 			if (arg != VT_ACKACQ)
 				ret = -EINVAL;
 		}
-		release_console_sem();
+		console_unlock();
 		break;
 
 	 /*
@@ -1096,20 +1096,20 @@
 		}
 		if (arg == 0) {
 		    /* deallocate all unused consoles, but leave 0 */
-			acquire_console_sem();
+			console_lock();
 			for (i=1; i<MAX_NR_CONSOLES; i++)
 				if (! VT_BUSY(i))
 					vc_deallocate(i);
-			release_console_sem();
+			console_unlock();
 		} else {
 			/* deallocate a single console, if possible */
 			arg--;
 			if (VT_BUSY(arg))
 				ret = -EBUSY;
 			else if (arg) {			      /* leave 0 */
-				acquire_console_sem();
+				console_lock();
 				vc_deallocate(arg);
-				release_console_sem();
+				console_unlock();
 			}
 		}
 		break;
@@ -1126,7 +1126,7 @@
 		    get_user(cc, &vtsizes->v_cols))
 			ret = -EFAULT;
 		else {
-			acquire_console_sem();
+			console_lock();
 			for (i = 0; i < MAX_NR_CONSOLES; i++) {
 				vc = vc_cons[i].d;
 
@@ -1135,7 +1135,7 @@
 					vc_resize(vc_cons[i].d, cc, ll);
 				}
 			}
-			release_console_sem();
+			console_unlock();
 		}
 		break;
 	}
@@ -1187,14 +1187,14 @@
 		for (i = 0; i < MAX_NR_CONSOLES; i++) {
 			if (!vc_cons[i].d)
 				continue;
-			acquire_console_sem();
+			console_lock();
 			if (vlin)
 				vc_cons[i].d->vc_scan_lines = vlin;
 			if (clin)
 				vc_cons[i].d->vc_font.height = clin;
 			vc_cons[i].d->vc_resize_user = 1;
 			vc_resize(vc_cons[i].d, cc, ll);
-			release_console_sem();
+			console_unlock();
 		}
 		break;
 	}
@@ -1367,7 +1367,7 @@
 	struct vc_data *vc;
 	struct tty_struct *tty;
 
-	acquire_console_sem();
+	console_lock();
 	vc = vc_con->d;
 	if (vc) {
 		tty = vc->port.tty;
@@ -1379,7 +1379,7 @@
 			__do_SAK(tty);
 		reset_vc(vc);
 	}
-	release_console_sem();
+	console_unlock();
 }
 
 #ifdef CONFIG_COMPAT
@@ -1737,10 +1737,10 @@
 {
 	int prev;
 
-	acquire_console_sem();
+	console_lock();
 	/* Graphics mode - up to X */
 	if (disable_vt_switch) {
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 	prev = fg_console;
@@ -1748,7 +1748,7 @@
 	if (alloc && vc_allocate(vt)) {
 		/* we can't have a free VC for now. Too bad,
 		 * we don't want to mess the screen for now. */
-		release_console_sem();
+		console_unlock();
 		return -ENOSPC;
 	}
 
@@ -1758,10 +1758,10 @@
 		 * Let the calling function know so it can decide
 		 * what to do.
 		 */
-		release_console_sem();
+		console_unlock();
 		return -EIO;
 	}
-	release_console_sem();
+	console_unlock();
 	tty_lock();
 	if (vt_waitactive(vt + 1)) {
 		pr_debug("Suspend: Can't switch VCs.");
@@ -1781,8 +1781,8 @@
  */
 void pm_set_vt_switch(int do_switch)
 {
-	acquire_console_sem();
+	console_lock();
 	disable_vt_switch = !do_switch;
-	release_console_sem();
+	console_unlock();
 }
 EXPORT_SYMBOL(pm_set_vt_switch);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d6ede98..4ab49d4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1607,6 +1607,7 @@
 	{ NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
 	{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
 	{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
 	{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
 
 	/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 6ee4451..47085e5 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -342,7 +342,7 @@
 		goto outnp;
 	}
 
-	if (!file->f_flags && O_NONBLOCK)
+	if (!(file->f_flags & O_NONBLOCK))
 		r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
 								&desc->flags));
 	else
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index bcc2477..18d02e3 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -123,9 +123,9 @@
 
 config USB_OTG_WHITELIST
 	bool "Rely on OTG Targeted Peripherals List"
-	depends on USB_OTG || EMBEDDED
+	depends on USB_OTG || EXPERT
 	default y if USB_OTG
-	default n if EMBEDDED
+	default n if EXPERT
 	help
 	  If you say Y here, the "otg_whitelist.h" file will be used as a
 	  product whitelist, so USB peripherals not listed there will be
@@ -141,7 +141,7 @@
 
 config USB_OTG_BLACKLIST_HUB
 	bool "Disable external hubs"
-	depends on USB_OTG || EMBEDDED
+	depends on USB_OTG || EXPERT
 	help
 	  If you say Y here, then Linux will refuse to enumerate
 	  external hubs.  OTG hosts are allowed to reduce hardware
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 9da2505..df502a9 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -192,12 +192,12 @@
 	ep_dev->dev.parent = parent;
 	ep_dev->dev.release = ep_device_release;
 	dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
-	device_enable_async_suspend(&ep_dev->dev);
 
 	retval = device_register(&ep_dev->dev);
 	if (retval)
 		goto error_register;
 
+	device_enable_async_suspend(&ep_dev->dev);
 	endpoint->ep_dev = ep_dev;
 	return retval;
 
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b55d4607..f71e8e3 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -405,7 +405,12 @@
 			return retval;
 	}
 
-	synchronize_irq(pci_dev->irq);
+	/* If MSI-X is enabled, the driver will have synchronized all vectors
+	 * in pci_suspend(). If MSI or legacy PCI is enabled, that will be
+	 * synchronized here.
+	 */
+	if (!hcd->msix_enabled)
+		synchronize_irq(pci_dev->irq);
 
 	/* Downstream ports from this root hub should already be quiesced, so
 	 * there will be no DMA activity.  Now we can shut down the upstream
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6a95017..e935f71 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1955,7 +1955,6 @@
 
 	dev_dbg(&rhdev->dev, "usb %s%s\n",
 			(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
-	clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 	if (!hcd->driver->bus_resume)
 		return -ENOENT;
 	if (hcd->state == HC_STATE_RUNNING)
@@ -1963,6 +1962,7 @@
 
 	hcd->state = HC_STATE_RESUMING;
 	status = hcd->driver->bus_resume(hcd);
+	clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 	if (status == 0) {
 		/* TRSMRCY = 10 msec */
 		msleep(10);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b98efae..0f299b7 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -676,6 +676,8 @@
 static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 {
 	struct usb_device *hdev = hub->hdev;
+	struct usb_hcd *hcd;
+	int ret;
 	int port1;
 	int status;
 	bool need_debounce_delay = false;
@@ -714,6 +716,25 @@
 			usb_autopm_get_interface_no_resume(
 					to_usb_interface(hub->intfdev));
 			return;		/* Continues at init2: below */
+		} else if (type == HUB_RESET_RESUME) {
+			/* The internal host controller state for the hub device
+			 * may be gone after a host power loss on system resume.
+			 * Update the device's info so the HW knows it's a hub.
+			 */
+			hcd = bus_to_hcd(hdev->bus);
+			if (hcd->driver->update_hub_device) {
+				ret = hcd->driver->update_hub_device(hcd, hdev,
+						&hub->tt, GFP_NOIO);
+				if (ret < 0) {
+					dev_err(hub->intfdev, "Host not "
+							"accepting hub info "
+							"update.\n");
+					dev_err(hub->intfdev, "LS/FS devices "
+							"and hubs may not work "
+							"under this hub\n.");
+				}
+			}
+			hub_power_on(hub, true);
 		} else {
 			hub_power_on(hub, true);
 		}
@@ -2660,17 +2681,13 @@
 
 	mutex_lock(&usb_address0_mutex);
 
-	if (!udev->config && oldspeed == USB_SPEED_SUPER) {
-		/* Don't reset USB 3.0 devices during an initial setup */
-		usb_set_device_state(udev, USB_STATE_DEFAULT);
-	} else {
-		/* Reset the device; full speed may morph to high speed */
-		/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
-		retval = hub_port_reset(hub, port1, udev, delay);
-		if (retval < 0)		/* error or disconnect */
-			goto fail;
-		/* success, speed is known */
-	}
+	/* Reset the device; full speed may morph to high speed */
+	/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+	retval = hub_port_reset(hub, port1, udev, delay);
+	if (retval < 0)		/* error or disconnect */
+		goto fail;
+	/* success, speed is known */
+
 	retval = -ENODEV;
 
 	if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
@@ -2732,6 +2749,11 @@
 		udev->ttport = hdev->ttport;
 	} else if (udev->speed != USB_SPEED_HIGH
 			&& hdev->speed == USB_SPEED_HIGH) {
+		if (!hub->tt.hub) {
+			dev_err(&udev->dev, "parent hub has no TT\n");
+			retval = -EINVAL;
+			goto fail;
+		}
 		udev->tt = &hub->tt;
 		udev->ttport = port1;
 	}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 44c5954..81ce6a8 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -48,6 +48,10 @@
 	{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
 
+	/* Samsung Android phone modem - ID conflict with SPH-I500 */
+	{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
+			USB_QUIRK_CONFIG_INTF_STRINGS },
+
 	/* Roland SC-8820 */
 	{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -68,6 +72,10 @@
 	/* M-Systems Flash Disk Pioneers */
 	{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* Keytouch QWERTY Panel keyboard */
+	{ USB_DEVICE(0x0926, 0x3333), .driver_info =
+			USB_QUIRK_CONFIG_INTF_STRINGS },
+
 	/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
 	{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
 
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 1dc9739..d500996 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -509,7 +509,7 @@
 	select USB_GADGET_SELECTED
 
 config USB_GADGET_EG20T
-	boolean "Intel EG20T(Topcliff) USB Device controller"
+	boolean "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
 	depends on PCI
 	select USB_GADGET_DUALSPEED
 	help
@@ -525,6 +525,11 @@
 	  This driver dose not support interrupt transfer or isochronous
 	  transfer modes.
 
+	  This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
+	  for IVI(In-Vehicle Infotainment) use.
+	  ML7213 is companion chip for Intel Atom E6xx series.
+	  ML7213 is completely compatible for Intel EG20T PCH.
+
 config USB_EG20T
 	tristate
 	depends on USB_GADGET_EG20T
@@ -541,6 +546,8 @@
 	  ci13xxx_udc core.
 	  This driver depends on OTG driver for PHY initialization,
 	  clock management, powering up VBUS, and power management.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
 
 	  Say "y" to link the driver statically, or "m" to build a
 	  dynamically linked module called "ci13xxx_msm" and force all
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 31656a2..a1c67ae 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -76,10 +76,21 @@
 
 /* control endpoint description */
 static const struct usb_endpoint_descriptor
-ctrl_endpt_desc = {
+ctrl_endpt_out_desc = {
 	.bLength         = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType = USB_DT_ENDPOINT,
 
+	.bEndpointAddress = USB_DIR_OUT,
+	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+static const struct usb_endpoint_descriptor
+ctrl_endpt_in_desc = {
+	.bLength         = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+
+	.bEndpointAddress = USB_DIR_IN,
 	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
 	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
 };
@@ -265,10 +276,10 @@
 	hw_bank.size /= sizeof(u32);
 
 	reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
-	if (reg == 0 || reg > ENDPT_MAX)
-		return -ENODEV;
+	hw_ep_max = reg * 2;   /* cache hw ENDPT_MAX */
 
-	hw_ep_max = reg;   /* cache hw ENDPT_MAX */
+	if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
+		return -ENODEV;
 
 	/* setup lock mode ? */
 
@@ -1197,16 +1208,17 @@
 	}
 
 	spin_lock_irqsave(udc->lock, flags);
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+	for (i = 0; i < hw_ep_max/2; i++) {
+		struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
+		struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
 		n += scnprintf(buf + n, PAGE_SIZE - n,
 			       "EP=%02i: RX=%08X TX=%08X\n",
-			       i, (u32)mEp->qh[RX].dma, (u32)mEp->qh[TX].dma);
+			       i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
 		for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
 			n += scnprintf(buf + n, PAGE_SIZE - n,
 				       " %04X:    %08X    %08X\n", j,
-				       *((u32 *)mEp->qh[RX].ptr + j),
-				       *((u32 *)mEp->qh[TX].ptr + j));
+				       *((u32 *)mEpRx->qh.ptr + j),
+				       *((u32 *)mEpTx->qh.ptr + j));
 		}
 	}
 	spin_unlock_irqrestore(udc->lock, flags);
@@ -1293,7 +1305,7 @@
 	unsigned long flags;
 	struct list_head   *ptr = NULL;
 	struct ci13xxx_req *req = NULL;
-	unsigned i, j, k, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+	unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
 
 	dbg_trace("[%s] %p\n", __func__, buf);
 	if (attr == NULL || buf == NULL) {
@@ -1303,22 +1315,20 @@
 
 	spin_lock_irqsave(udc->lock, flags);
 	for (i = 0; i < hw_ep_max; i++)
-		for (k = RX; k <= TX; k++)
-			list_for_each(ptr, &udc->ci13xxx_ep[i].qh[k].queue)
-			{
-				req = list_entry(ptr,
-						 struct ci13xxx_req, queue);
+		list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
+		{
+			req = list_entry(ptr, struct ci13xxx_req, queue);
 
+			n += scnprintf(buf + n, PAGE_SIZE - n,
+					"EP=%02i: TD=%08X %s\n",
+					i % hw_ep_max/2, (u32)req->dma,
+					((i < hw_ep_max/2) ? "RX" : "TX"));
+
+			for (j = 0; j < qSize; j++)
 				n += scnprintf(buf + n, PAGE_SIZE - n,
-					       "EP=%02i: TD=%08X %s\n",
-					       i, (u32)req->dma,
-					       ((k == RX) ? "RX" : "TX"));
-
-				for (j = 0; j < qSize; j++)
-					n += scnprintf(buf + n, PAGE_SIZE - n,
-						       " %04X:    %08X\n", j,
-						       *((u32 *)req->ptr + j));
-			}
+						" %04X:    %08X\n", j,
+						*((u32 *)req->ptr + j));
+		}
 	spin_unlock_irqrestore(udc->lock, flags);
 
 	return n;
@@ -1467,12 +1477,12 @@
 	 *  At this point it's guaranteed exclusive access to qhead
 	 *  (endpt is not primed) so it's no need to use tripwire
 	 */
-	mEp->qh[mEp->dir].ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
-	mEp->qh[mEp->dir].ptr->td.token &= ~TD_STATUS;   /* clear status */
+	mEp->qh.ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
+	mEp->qh.ptr->td.token &= ~TD_STATUS;   /* clear status */
 	if (mReq->req.zero == 0)
-		mEp->qh[mEp->dir].ptr->cap |=  QH_ZLT;
+		mEp->qh.ptr->cap |=  QH_ZLT;
 	else
-		mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+		mEp->qh.ptr->cap &= ~QH_ZLT;
 
 	wmb();   /* synchronize before ep prime */
 
@@ -1542,11 +1552,11 @@
 
 	hw_ep_flush(mEp->num, mEp->dir);
 
-	while (!list_empty(&mEp->qh[mEp->dir].queue)) {
+	while (!list_empty(&mEp->qh.queue)) {
 
 		/* pop oldest request */
 		struct ci13xxx_req *mReq = \
-			list_entry(mEp->qh[mEp->dir].queue.next,
+			list_entry(mEp->qh.queue.next,
 				   struct ci13xxx_req, queue);
 		list_del_init(&mReq->queue);
 		mReq->req.status = -ESHUTDOWN;
@@ -1571,8 +1581,6 @@
 {
 	struct usb_ep *ep;
 	struct ci13xxx    *udc = container_of(gadget, struct ci13xxx, gadget);
-	struct ci13xxx_ep *mEp = container_of(gadget->ep0,
-					      struct ci13xxx_ep, ep);
 
 	trace("%p", gadget);
 
@@ -1583,7 +1591,8 @@
 	gadget_for_each_ep(ep, gadget) {
 		usb_ep_fifo_flush(ep);
 	}
-	usb_ep_fifo_flush(gadget->ep0);
+	usb_ep_fifo_flush(&udc->ep0out.ep);
+	usb_ep_fifo_flush(&udc->ep0in.ep);
 
 	udc->driver->disconnect(gadget);
 
@@ -1591,11 +1600,12 @@
 	gadget_for_each_ep(ep, gadget) {
 		usb_ep_disable(ep);
 	}
-	usb_ep_disable(gadget->ep0);
+	usb_ep_disable(&udc->ep0out.ep);
+	usb_ep_disable(&udc->ep0in.ep);
 
-	if (mEp->status != NULL) {
-		usb_ep_free_request(gadget->ep0, mEp->status);
-		mEp->status = NULL;
+	if (udc->status != NULL) {
+		usb_ep_free_request(&udc->ep0in.ep, udc->status);
+		udc->status = NULL;
 	}
 
 	return 0;
@@ -1614,7 +1624,6 @@
 __releases(udc->lock)
 __acquires(udc->lock)
 {
-	struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[0];
 	int retval;
 
 	trace("%p", udc);
@@ -1635,11 +1644,15 @@
 	if (retval)
 		goto done;
 
-	retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc);
+	retval = usb_ep_enable(&udc->ep0out.ep, &ctrl_endpt_out_desc);
+	if (retval)
+		goto done;
+
+	retval = usb_ep_enable(&udc->ep0in.ep, &ctrl_endpt_in_desc);
 	if (!retval) {
-		mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_ATOMIC);
-		if (mEp->status == NULL) {
-			usb_ep_disable(&mEp->ep);
+		udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC);
+		if (udc->status == NULL) {
+			usb_ep_disable(&udc->ep0out.ep);
 			retval = -ENOMEM;
 		}
 	}
@@ -1672,16 +1685,17 @@
 
 /**
  * isr_get_status_response: get_status request response
- * @ep:    endpoint
+ * @udc: udc struct
  * @setup: setup request packet
  *
  * This function returns an error code
  */
-static int isr_get_status_response(struct ci13xxx_ep *mEp,
+static int isr_get_status_response(struct ci13xxx *udc,
 				   struct usb_ctrlrequest *setup)
 __releases(mEp->lock)
 __acquires(mEp->lock)
 {
+	struct ci13xxx_ep *mEp = &udc->ep0in;
 	struct usb_request *req = NULL;
 	gfp_t gfp_flags = GFP_ATOMIC;
 	int dir, num, retval;
@@ -1736,27 +1750,23 @@
 
 /**
  * isr_setup_status_phase: queues the status phase of a setup transation
- * @mEp: endpoint
+ * @udc: udc struct
  *
  * This function returns an error code
  */
-static int isr_setup_status_phase(struct ci13xxx_ep *mEp)
+static int isr_setup_status_phase(struct ci13xxx *udc)
 __releases(mEp->lock)
 __acquires(mEp->lock)
 {
 	int retval;
+	struct ci13xxx_ep *mEp;
 
-	trace("%p", mEp);
+	trace("%p", udc);
 
-	/* mEp is always valid & configured */
-
-	if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-		mEp->dir = (mEp->dir == TX) ? RX : TX;
-
-	mEp->status->no_interrupt = 1;
+	mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
 
 	spin_unlock(mEp->lock);
-	retval = usb_ep_queue(&mEp->ep, mEp->status, GFP_ATOMIC);
+	retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
 	spin_lock(mEp->lock);
 
 	return retval;
@@ -1778,11 +1788,11 @@
 
 	trace("%p", mEp);
 
-	if (list_empty(&mEp->qh[mEp->dir].queue))
+	if (list_empty(&mEp->qh.queue))
 		return -EINVAL;
 
 	/* pop oldest request */
-	mReq = list_entry(mEp->qh[mEp->dir].queue.next,
+	mReq = list_entry(mEp->qh.queue.next,
 			  struct ci13xxx_req, queue);
 	list_del_init(&mReq->queue);
 
@@ -1794,10 +1804,10 @@
 
 	dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
 
-	if (!list_empty(&mEp->qh[mEp->dir].queue)) {
+	if (!list_empty(&mEp->qh.queue)) {
 		struct ci13xxx_req* mReqEnq;
 
-		mReqEnq = list_entry(mEp->qh[mEp->dir].queue.next,
+		mReqEnq = list_entry(mEp->qh.queue.next,
 				  struct ci13xxx_req, queue);
 		_hardware_enqueue(mEp, mReqEnq);
 	}
@@ -1836,16 +1846,14 @@
 		int type, num, err = -EINVAL;
 		struct usb_ctrlrequest req;
 
-
 		if (mEp->desc == NULL)
 			continue;   /* not configured */
 
-		if ((mEp->dir == RX && hw_test_and_clear_complete(i)) ||
-		    (mEp->dir == TX && hw_test_and_clear_complete(i + 16))) {
+		if (hw_test_and_clear_complete(i)) {
 			err = isr_tr_complete_low(mEp);
 			if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
 				if (err > 0)   /* needs status phase */
-					err = isr_setup_status_phase(mEp);
+					err = isr_setup_status_phase(udc);
 				if (err < 0) {
 					dbg_event(_usb_addr(mEp),
 						  "ERROR", err);
@@ -1866,15 +1874,22 @@
 			continue;
 		}
 
+		/*
+		 * Flush data and handshake transactions of previous
+		 * setup packet.
+		 */
+		_ep_nuke(&udc->ep0out);
+		_ep_nuke(&udc->ep0in);
+
 		/* read_setup_packet */
 		do {
 			hw_test_and_set_setup_guard();
-			memcpy(&req, &mEp->qh[RX].ptr->setup, sizeof(req));
+			memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
 		} while (!hw_test_and_clear_setup_guard());
 
 		type = req.bRequestType;
 
-		mEp->dir = (type & USB_DIR_IN) ? TX : RX;
+		udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
 
 		dbg_setup(_usb_addr(mEp), &req);
 
@@ -1895,7 +1910,7 @@
 				if (err)
 					break;
 			}
-			err = isr_setup_status_phase(mEp);
+			err = isr_setup_status_phase(udc);
 			break;
 		case USB_REQ_GET_STATUS:
 			if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
@@ -1905,7 +1920,7 @@
 			if (le16_to_cpu(req.wLength) != 2 ||
 			    le16_to_cpu(req.wValue)  != 0)
 				break;
-			err = isr_get_status_response(mEp, &req);
+			err = isr_get_status_response(udc, &req);
 			break;
 		case USB_REQ_SET_ADDRESS:
 			if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
@@ -1916,7 +1931,7 @@
 			err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
 			if (err)
 				break;
-			err = isr_setup_status_phase(mEp);
+			err = isr_setup_status_phase(udc);
 			break;
 		case USB_REQ_SET_FEATURE:
 			if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
@@ -1932,12 +1947,12 @@
 			spin_lock(udc->lock);
 			if (err)
 				break;
-			err = isr_setup_status_phase(mEp);
+			err = isr_setup_status_phase(udc);
 			break;
 		default:
 delegate:
 			if (req.wLength == 0)   /* no data phase */
-				mEp->dir = TX;
+				udc->ep0_dir = TX;
 
 			spin_unlock(udc->lock);
 			err = udc->driver->setup(&udc->gadget, &req);
@@ -1968,7 +1983,7 @@
 		     const struct usb_endpoint_descriptor *desc)
 {
 	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	int direction, retval = 0;
+	int retval = 0;
 	unsigned long flags;
 
 	trace("%p, %p", ep, desc);
@@ -1982,7 +1997,7 @@
 
 	mEp->desc = desc;
 
-	if (!list_empty(&mEp->qh[mEp->dir].queue))
+	if (!list_empty(&mEp->qh.queue))
 		warn("enabling a non-empty endpoint!");
 
 	mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
@@ -1991,29 +2006,22 @@
 
 	mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
 
-	direction = mEp->dir;
-	do {
-		dbg_event(_usb_addr(mEp), "ENABLE", 0);
+	dbg_event(_usb_addr(mEp), "ENABLE", 0);
 
-		mEp->qh[mEp->dir].ptr->cap = 0;
+	mEp->qh.ptr->cap = 0;
 
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mEp->qh[mEp->dir].ptr->cap |=  QH_IOS;
-		else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
-			mEp->qh[mEp->dir].ptr->cap &= ~QH_MULT;
-		else
-			mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+	if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+		mEp->qh.ptr->cap |=  QH_IOS;
+	else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
+		mEp->qh.ptr->cap &= ~QH_MULT;
+	else
+		mEp->qh.ptr->cap &= ~QH_ZLT;
 
-		mEp->qh[mEp->dir].ptr->cap |=
-			(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
-		mEp->qh[mEp->dir].ptr->td.next |= TD_TERMINATE;   /* needed? */
+	mEp->qh.ptr->cap |=
+		(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
+	mEp->qh.ptr->td.next |= TD_TERMINATE;   /* needed? */
 
-		retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
-
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mEp->dir = (mEp->dir == TX) ? RX : TX;
-
-	} while (mEp->dir != direction);
+	retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
 
 	spin_unlock_irqrestore(mEp->lock, flags);
 	return retval;
@@ -2146,7 +2154,7 @@
 	spin_lock_irqsave(mEp->lock, flags);
 
 	if (mEp->type == USB_ENDPOINT_XFER_CONTROL &&
-	    !list_empty(&mEp->qh[mEp->dir].queue)) {
+	    !list_empty(&mEp->qh.queue)) {
 		_ep_nuke(mEp);
 		retval = -EOVERFLOW;
 		warn("endpoint ctrl %X nuked", _usb_addr(mEp));
@@ -2170,9 +2178,9 @@
 	/* push request */
 	mReq->req.status = -EINPROGRESS;
 	mReq->req.actual = 0;
-	list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue);
+	list_add_tail(&mReq->queue, &mEp->qh.queue);
 
-	if (list_is_singular(&mEp->qh[mEp->dir].queue))
+	if (list_is_singular(&mEp->qh.queue))
 		retval = _hardware_enqueue(mEp, mReq);
 
 	if (retval == -EALREADY) {
@@ -2199,7 +2207,7 @@
 	trace("%p, %p", ep, req);
 
 	if (ep == NULL || req == NULL || mEp->desc == NULL ||
-	    list_empty(&mReq->queue)  || list_empty(&mEp->qh[mEp->dir].queue))
+	    list_empty(&mReq->queue)  || list_empty(&mEp->qh.queue))
 		return -EINVAL;
 
 	spin_lock_irqsave(mEp->lock, flags);
@@ -2244,7 +2252,7 @@
 #ifndef STALL_IN
 	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
 	if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
-	    !list_empty(&mEp->qh[mEp->dir].queue)) {
+	    !list_empty(&mEp->qh.queue)) {
 		spin_unlock_irqrestore(mEp->lock, flags);
 		return -EAGAIN;
 	}
@@ -2355,7 +2363,7 @@
 		if (is_active) {
 			pm_runtime_get_sync(&_gadget->dev);
 			hw_device_reset(udc);
-			hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+			hw_device_state(udc->ep0out.qh.dma);
 		} else {
 			hw_device_state(0);
 			if (udc->udc_driver->notify_event)
@@ -2390,7 +2398,8 @@
 		int (*bind)(struct usb_gadget *))
 {
 	struct ci13xxx *udc = _udc;
-	unsigned long i, k, flags;
+	unsigned long flags;
+	int i, j;
 	int retval = -ENOMEM;
 
 	trace("%p", driver);
@@ -2427,45 +2436,46 @@
 
 	info("hw_ep_max = %d", hw_ep_max);
 
-	udc->driver = driver;
 	udc->gadget.dev.driver = NULL;
 
 	retval = 0;
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+	for (i = 0; i < hw_ep_max/2; i++) {
+		for (j = RX; j <= TX; j++) {
+			int k = i + j * hw_ep_max/2;
+			struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
 
-		scnprintf(mEp->name, sizeof(mEp->name), "ep%i", (int)i);
+			scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
+					(j == TX)  ? "in" : "out");
 
-		mEp->lock         = udc->lock;
-		mEp->device       = &udc->gadget.dev;
-		mEp->td_pool      = udc->td_pool;
+			mEp->lock         = udc->lock;
+			mEp->device       = &udc->gadget.dev;
+			mEp->td_pool      = udc->td_pool;
 
-		mEp->ep.name      = mEp->name;
-		mEp->ep.ops       = &usb_ep_ops;
-		mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+			mEp->ep.name      = mEp->name;
+			mEp->ep.ops       = &usb_ep_ops;
+			mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
 
-		/* this allocation cannot be random */
-		for (k = RX; k <= TX; k++) {
-			INIT_LIST_HEAD(&mEp->qh[k].queue);
+			INIT_LIST_HEAD(&mEp->qh.queue);
 			spin_unlock_irqrestore(udc->lock, flags);
-			mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool,
-							GFP_KERNEL,
-							&mEp->qh[k].dma);
+			mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
+					&mEp->qh.dma);
 			spin_lock_irqsave(udc->lock, flags);
-			if (mEp->qh[k].ptr == NULL)
+			if (mEp->qh.ptr == NULL)
 				retval = -ENOMEM;
 			else
-				memset(mEp->qh[k].ptr, 0,
-				       sizeof(*mEp->qh[k].ptr));
-		}
-		if (i == 0)
-			udc->gadget.ep0 = &mEp->ep;
-		else
+				memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
+
+			/* skip ep0 out and in endpoints */
+			if (i == 0)
+				continue;
+
 			list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+		}
 	}
 	if (retval)
 		goto done;
 
+	udc->gadget.ep0 = &udc->ep0in.ep;
 	/* bind gadget */
 	driver->driver.bus     = NULL;
 	udc->gadget.dev.driver = &driver->driver;
@@ -2479,6 +2489,7 @@
 		goto done;
 	}
 
+	udc->driver = driver;
 	pm_runtime_get_sync(&udc->gadget.dev);
 	if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
 		if (udc->vbus_active) {
@@ -2490,14 +2501,12 @@
 		}
 	}
 
-	retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+	retval = hw_device_state(udc->ep0out.qh.dma);
 	if (retval)
 		pm_runtime_put_sync(&udc->gadget.dev);
 
  done:
 	spin_unlock_irqrestore(udc->lock, flags);
-	if (retval)
-		usb_gadget_unregister_driver(driver);
 	return retval;
 }
 EXPORT_SYMBOL(usb_gadget_probe_driver);
@@ -2510,7 +2519,7 @@
 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 {
 	struct ci13xxx *udc = _udc;
-	unsigned long i, k, flags;
+	unsigned long i, flags;
 
 	trace("%p", driver);
 
@@ -2546,17 +2555,14 @@
 	for (i = 0; i < hw_ep_max; i++) {
 		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
 
-		if (i == 0)
-			udc->gadget.ep0 = NULL;
-		else if (!list_empty(&mEp->ep.ep_list))
+		if (!list_empty(&mEp->ep.ep_list))
 			list_del_init(&mEp->ep.ep_list);
 
-		for (k = RX; k <= TX; k++)
-			if (mEp->qh[k].ptr != NULL)
-				dma_pool_free(udc->qh_pool,
-					      mEp->qh[k].ptr, mEp->qh[k].dma);
+		if (mEp->qh.ptr != NULL)
+			dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
 	}
 
+	udc->gadget.ep0 = NULL;
 	udc->driver = NULL;
 
 	spin_unlock_irqrestore(udc->lock, flags);
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index f61fed0..a2492b6 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -20,7 +20,7 @@
  * DEFINE
  *****************************************************************************/
 #define CI13XXX_PAGE_SIZE  4096ul /* page size for TD's */
-#define ENDPT_MAX          (16)
+#define ENDPT_MAX          (32)
 #define CTRL_PAYLOAD_MAX   (64)
 #define RX        (0)  /* similar to USB_DIR_OUT but can be used as an index */
 #define TX        (1)  /* similar to USB_DIR_IN  but can be used as an index */
@@ -88,8 +88,7 @@
 		struct list_head   queue;
 		struct ci13xxx_qh *ptr;
 		dma_addr_t         dma;
-	}                                      qh[2];
-	struct usb_request                    *status;
+	}                                      qh;
 	int                                    wedge;
 
 	/* global resources */
@@ -119,9 +118,13 @@
 
 	struct dma_pool           *qh_pool;   /* DMA pool for queue heads */
 	struct dma_pool           *td_pool;   /* DMA pool for transfer descs */
+	struct usb_request        *status;    /* ep0 status request */
 
 	struct usb_gadget          gadget;     /* USB slave device */
 	struct ci13xxx_ep          ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
+	u32                        ep0_dir;    /* ep0 direction */
+#define ep0out ci13xxx_ep[0]
+#define ep0in  ci13xxx_ep[16]
 
 	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
 	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f6ff845..1ba4bef 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -928,8 +928,9 @@
 		 */
 		switch (ctrl->bRequestType & USB_RECIP_MASK) {
 		case USB_RECIP_INTERFACE:
-			if (cdev->config)
-				f = cdev->config->interface[intf];
+			if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
+				break;
+			f = cdev->config->interface[intf];
 			break;
 
 		case USB_RECIP_ENDPOINT:
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index b5dbb23..6d8e533 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -293,6 +293,7 @@
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
 
 #include "gadget_chips.h"
 
@@ -2763,7 +2764,7 @@
 			return ERR_PTR(-ENOMEM);
 		common->free_storage_on_release = 1;
 	} else {
-		memset(common, 0, sizeof common);
+		memset(common, 0, sizeof *common);
 		common->free_storage_on_release = 0;
 	}
 
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 3c6e1a0..5e14950 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,14 +346,19 @@
 
 		if (unlikely(!skb))
 			break;
-		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
-				req->actual);
+
+		if (skb->len == 0) { /* First fragment */
+			skb->protocol = htons(ETH_P_PHONET);
+			skb_reset_mac_header(skb);
+			/* Can't use pskb_pull() on page in IRQ */
+			memcpy(skb_put(skb, 1), page_address(page), 1);
+		}
+
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+				skb->len == 0, req->actual);
 		page = NULL;
 
 		if (req->actual < req->length) { /* Last fragment */
-			skb->protocol = htons(ETH_P_PHONET);
-			skb_reset_mac_header(skb);
-			pskb_pull(skb, 1);
 			skb->dev = dev;
 			dev->stats.rx_packets++;
 			dev->stats.rx_bytes += skb->len;
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 1210534..5408186 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1320,7 +1320,7 @@
 };
 
 /*******************************************************************************
- * USB gadged driver functions
+ * USB gadget driver functions
  *******************************************************************************
  */
 int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index 7779724..1eca8b4 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -3086,7 +3086,7 @@
 
 	kfree(dev->ep);
 
-	/* diable IRQ handler */
+	/* disable IRQ handler */
 	if (dev->got_irq)
 		free_irq(pdev->irq, dev);
 
@@ -3406,7 +3406,7 @@
 	/* disable interrupt and set controller to stop state */
 	langwell_udc_stop(dev);
 
-	/* diable IRQ handler */
+	/* disable IRQ handler */
 	if (dev->got_irq)
 		free_irq(pdev->irq, dev);
 	dev->got_irq = 0;
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 0c8dd81..b120dbb 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -198,10 +198,10 @@
 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
 /* Value of EP Buffer Size */
-#define UDC_EP0IN_BUFF_SIZE	64
-#define UDC_EPIN_BUFF_SIZE	512
-#define UDC_EP0OUT_BUFF_SIZE	64
-#define UDC_EPOUT_BUFF_SIZE	512
+#define UDC_EP0IN_BUFF_SIZE	16
+#define UDC_EPIN_BUFF_SIZE	256
+#define UDC_EP0OUT_BUFF_SIZE	16
+#define UDC_EPOUT_BUFF_SIZE	256
 /* Value of EP maximum packet size */
 #define UDC_EP0IN_MAX_PKT_SIZE	64
 #define UDC_EP0OUT_MAX_PKT_SIZE	64
@@ -351,7 +351,7 @@
 	struct pci_pool		*data_requests;
 	struct pci_pool		*stp_requests;
 	dma_addr_t			dma_addr;
-	unsigned long			ep0out_buf[64];
+	void				*ep0out_buf;
 	struct usb_ctrlrequest		setup_data;
 	unsigned long			phys_addr;
 	void __iomem			*base_addr;
@@ -361,6 +361,8 @@
 
 #define PCH_UDC_PCI_BAR			1
 #define PCI_DEVICE_ID_INTEL_EG20T_UDC	0x8808
+#define PCI_VENDOR_ID_ROHM		0x10DB
+#define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
 
 static const char	ep0_string[] = "ep0in";
 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
@@ -1219,11 +1221,11 @@
 	dev = ep->dev;
 	if (req->dma_mapped) {
 		if (ep->in)
-			pci_unmap_single(dev->pdev, req->req.dma,
-					 req->req.length, PCI_DMA_TODEVICE);
+			dma_unmap_single(&dev->pdev->dev, req->req.dma,
+					 req->req.length, DMA_TO_DEVICE);
 		else
-			pci_unmap_single(dev->pdev, req->req.dma,
-					 req->req.length, PCI_DMA_FROMDEVICE);
+			dma_unmap_single(&dev->pdev->dev, req->req.dma,
+					 req->req.length, DMA_FROM_DEVICE);
 		req->dma_mapped = 0;
 		req->req.dma = DMA_ADDR_INVALID;
 	}
@@ -1414,7 +1416,6 @@
 
 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
 	td_data = req->td_data;
-	ep->td_data = req->td_data;
 	/* Set the status bits for all descriptors */
 	while (1) {
 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
@@ -1613,15 +1614,19 @@
 	if (usbreq->length &&
 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
 		if (ep->in)
-			usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
-					usbreq->length, PCI_DMA_TODEVICE);
+			usbreq->dma = dma_map_single(&dev->pdev->dev,
+						     usbreq->buf,
+						     usbreq->length,
+						     DMA_TO_DEVICE);
 		else
-			usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
-					usbreq->length, PCI_DMA_FROMDEVICE);
+			usbreq->dma = dma_map_single(&dev->pdev->dev,
+						     usbreq->buf,
+						     usbreq->length,
+						     DMA_FROM_DEVICE);
 		req->dma_mapped = 1;
 	}
 	if (usbreq->length > 0) {
-		retval = prepare_dma(ep, req, gfp);
+		retval = prepare_dma(ep, req, GFP_ATOMIC);
 		if (retval)
 			goto probe_end;
 	}
@@ -1646,7 +1651,6 @@
 			pch_udc_wait_ep_stall(ep);
 			pch_udc_ep_clear_nak(ep);
 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
-			pch_udc_set_dma(dev, DMA_DIR_TX);
 		}
 	}
 	/* Now add this request to the ep's pending requests */
@@ -1926,6 +1930,7 @@
 	    PCH_UDC_BS_DMA_DONE)
 		return;
 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+	pch_udc_ep_set_ddptr(ep, 0);
 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
 	    PCH_UDC_RTS_SUCC) {
 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
@@ -1963,7 +1968,7 @@
 	u32	epsts;
 	struct pch_udc_ep	*ep;
 
-	ep = &dev->ep[2*ep_num];
+	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
 	epsts = ep->epsts;
 	ep->epsts = 0;
 
@@ -2008,7 +2013,7 @@
 	struct pch_udc_ep		*ep;
 	struct pch_udc_request		*req = NULL;
 
-	ep = &dev->ep[2*ep_num + 1];
+	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
 	epsts = ep->epsts;
 	ep->epsts = 0;
 
@@ -2025,10 +2030,11 @@
 	}
 	if (epsts & UDC_EPSTS_HE)
 		return;
-	if (epsts & UDC_EPSTS_RSS)
+	if (epsts & UDC_EPSTS_RSS) {
 		pch_udc_ep_set_stall(ep);
 		pch_udc_enable_ep_interrupts(ep->dev,
 					     PCH_UDC_EPINT(ep->in, ep->num));
+	}
 	if (epsts & UDC_EPSTS_RCS) {
 		if (!dev->prot_stall) {
 			pch_udc_ep_clear_stall(ep);
@@ -2060,8 +2066,10 @@
 {
 	u32	epsts;
 	struct pch_udc_ep	*ep;
+	struct pch_udc_ep	*ep_out;
 
 	ep = &dev->ep[UDC_EP0IN_IDX];
+	ep_out = &dev->ep[UDC_EP0OUT_IDX];
 	epsts = ep->epsts;
 	ep->epsts = 0;
 
@@ -2073,8 +2081,16 @@
 		return;
 	if (epsts & UDC_EPSTS_HE)
 		return;
-	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall))
+	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
 		pch_udc_complete_transfer(ep);
+		pch_udc_clear_dma(dev, DMA_DIR_RX);
+		ep_out->td_data->status = (ep_out->td_data->status &
+					~PCH_UDC_BUFF_STS) |
+					PCH_UDC_BS_HST_RDY;
+		pch_udc_ep_clear_nak(ep_out);
+		pch_udc_set_dma(dev, DMA_DIR_RX);
+		pch_udc_ep_set_rrdy(ep_out);
+	}
 	/* On IN interrupt, provide data if we have any */
 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
 	     !(epsts & UDC_EPSTS_TXEMPTY))
@@ -2102,11 +2118,9 @@
 		dev->stall = 0;
 		dev->ep[UDC_EP0IN_IDX].halted = 0;
 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
-		/* In data not ready */
-		pch_udc_ep_set_nak(&(dev->ep[UDC_EP0IN_IDX]));
 		dev->setup_data = ep->td_stp->request;
 		pch_udc_init_setup_buff(ep->td_stp);
-		pch_udc_clear_dma(dev, DMA_DIR_TX);
+		pch_udc_clear_dma(dev, DMA_DIR_RX);
 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
 				      dev->ep[UDC_EP0IN_IDX].in);
 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
@@ -2122,14 +2136,23 @@
 		setup_supported = dev->driver->setup(&dev->gadget,
 						     &dev->setup_data);
 		spin_lock(&dev->lock);
+
+		if (dev->setup_data.bRequestType & USB_DIR_IN) {
+			ep->td_data->status = (ep->td_data->status &
+						~PCH_UDC_BUFF_STS) |
+						PCH_UDC_BS_HST_RDY;
+			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+		}
 		/* ep0 in returns data on IN phase */
 		if (setup_supported >= 0 && setup_supported <
 					    UDC_EP0IN_MAX_PKT_SIZE) {
 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
 			/* Gadget would have queued a request when
 			 * we called the setup */
-			pch_udc_set_dma(dev, DMA_DIR_RX);
-			pch_udc_ep_clear_nak(ep);
+			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
+				pch_udc_set_dma(dev, DMA_DIR_RX);
+				pch_udc_ep_clear_nak(ep);
+			}
 		} else if (setup_supported < 0) {
 			/* if unsupported request, then stall */
 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
@@ -2142,22 +2165,13 @@
 		}
 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
-		if (list_empty(&ep->queue)) {
-			dev_err(&dev->pdev->dev, "%s: No request\n", __func__);
-			ep->td_data->status = (ep->td_data->status &
-					       ~PCH_UDC_BUFF_STS) |
-					       PCH_UDC_BS_HST_RDY;
-			pch_udc_set_dma(dev, DMA_DIR_RX);
-		} else {
-			/* control write */
-			/* next function will pickuo an clear the status */
+		pch_udc_clear_dma(dev, DMA_DIR_RX);
+		pch_udc_ep_set_ddptr(ep, 0);
+		if (!list_empty(&ep->queue)) {
 			ep->epsts = stat;
-
-			pch_udc_svc_data_out(dev, 0);
-			/* re-program desc. pointer for possible ZLPs */
-			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
-			pch_udc_set_dma(dev, DMA_DIR_RX);
+			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
 		}
+		pch_udc_set_dma(dev, DMA_DIR_RX);
 	}
 	pch_udc_ep_set_rrdy(ep);
 }
@@ -2174,7 +2188,7 @@
 	struct pch_udc_ep	*ep;
 	struct pch_udc_request *req;
 
-	ep = &dev->ep[2*ep_num];
+	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
 	if (!list_empty(&ep->queue)) {
 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
 		pch_udc_enable_ep_interrupts(ep->dev,
@@ -2196,13 +2210,13 @@
 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
 		/* IN */
 		if (ep_intr & (0x1 << i)) {
-			ep = &dev->ep[2*i];
+			ep = &dev->ep[UDC_EPIN_IDX(i)];
 			ep->epsts = pch_udc_read_ep_status(ep);
 			pch_udc_clear_ep_status(ep, ep->epsts);
 		}
 		/* OUT */
 		if (ep_intr & (0x10000 << i)) {
-			ep = &dev->ep[2*i+1];
+			ep = &dev->ep[UDC_EPOUT_IDX(i)];
 			ep->epsts = pch_udc_read_ep_status(ep);
 			pch_udc_clear_ep_status(ep, ep->epsts);
 		}
@@ -2563,9 +2577,6 @@
 	dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
 	dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
 
-	dev->dma_addr = pci_map_single(dev->pdev, dev->ep0out_buf, 256,
-				  PCI_DMA_FROMDEVICE);
-
 	/* remove ep0 in and out from the list.  They have own pointer */
 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
@@ -2637,6 +2648,13 @@
 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
+
+	dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
+	if (!dev->ep0out_buf)
+		return -ENOMEM;
+	dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
+				       UDC_EP0OUT_BUFF_SIZE * 4,
+				       DMA_FROM_DEVICE);
 	return 0;
 }
 
@@ -2700,7 +2718,8 @@
 
 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
 
-	/* Assues that there are no pending requets with this driver */
+	/* Assures that there are no pending requests with this driver */
+	driver->disconnect(&dev->gadget);
 	driver->unbind(&dev->gadget);
 	dev->gadget.dev.driver = NULL;
 	dev->driver = NULL;
@@ -2750,6 +2769,11 @@
 		pci_pool_destroy(dev->stp_requests);
 	}
 
+	if (dev->dma_addr)
+		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
+				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
+	kfree(dev->ep0out_buf);
+
 	pch_udc_exit(dev);
 
 	if (dev->irq_registered)
@@ -2792,11 +2816,7 @@
 	int ret;
 
 	pci_set_power_state(pdev, PCI_D0);
-	ret = pci_restore_state(pdev);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: pci_restore_state failed\n", __func__);
-		return ret;
-	}
+	pci_restore_state(pdev);
 	ret = pci_enable_device(pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
@@ -2914,6 +2934,11 @@
 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
 		.class_mask = 0xffffffff,
 	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
+		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+		.class_mask = 0xffffffff,
+	},
 	{ 0 },
 };
 
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2fc8636..12ff6cf 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -131,31 +131,31 @@
  * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
  */
 
-static ushort __initdata idVendor;
+static ushort idVendor;
 module_param(idVendor, ushort, S_IRUGO);
 MODULE_PARM_DESC(idVendor, "USB Vendor ID");
 
-static ushort __initdata idProduct;
+static ushort idProduct;
 module_param(idProduct, ushort, S_IRUGO);
 MODULE_PARM_DESC(idProduct, "USB Product ID");
 
-static ushort __initdata bcdDevice;
+static ushort bcdDevice;
 module_param(bcdDevice, ushort, S_IRUGO);
 MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
 
-static char *__initdata iManufacturer;
+static char *iManufacturer;
 module_param(iManufacturer, charp, S_IRUGO);
 MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
 
-static char *__initdata iProduct;
+static char *iProduct;
 module_param(iProduct, charp, S_IRUGO);
 MODULE_PARM_DESC(iProduct, "USB Product string");
 
-static char *__initdata iSerialNum;
+static char *iSerialNum;
 module_param(iSerialNum, charp, S_IRUGO);
 MODULE_PARM_DESC(iSerialNum, "1");
 
-static char *__initdata iPNPstring;
+static char *iPNPstring;
 module_param(iPNPstring, charp, S_IRUGO);
 MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
 
@@ -1596,13 +1596,12 @@
 	int status;
 
 	mutex_lock(&usb_printer_gadget.lock_printer_io);
-	class_destroy(usb_gadget_class);
-	unregister_chrdev_region(g_printer_devno, 2);
-
 	status = usb_gadget_unregister_driver(&printer_driver);
 	if (status)
 		ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
 
+	unregister_chrdev_region(g_printer_devno, 2);
+	class_destroy(usb_gadget_class);
 	mutex_unlock(&usb_printer_gadget.lock_printer_io);
 }
 module_exit(cleanup);
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 20d43da..0151185 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -258,7 +258,7 @@
 		break;
 	case R8A66597_BULK:
 		/* isochronous pipes may be used as bulk pipes */
-		if (info->pipe > R8A66597_BASE_PIPENUM_BULK)
+		if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
 			bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
 		else
 			bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 3b513ba..b015561 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -543,7 +543,7 @@
 	ro = curlun->initially_ro;
 	if (!ro) {
 		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
-		if (-EROFS == PTR_ERR(filp))
+		if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES)
 			ro = 1;
 	}
 	if (ro)
@@ -558,10 +558,7 @@
 
 	if (filp->f_path.dentry)
 		inode = filp->f_path.dentry->d_inode;
-	if (inode && S_ISBLK(inode->i_mode)) {
-		if (bdev_read_only(inode->i_bdev))
-			ro = 1;
-	} else if (!inode || !S_ISREG(inode->i_mode)) {
+	if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
 		LINFO(curlun, "invalid file type: %s\n", filename);
 		goto out;
 	}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 24046c0..0e6afa2 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -151,6 +151,8 @@
 	  Qualcomm chipsets. Root Hub has inbuilt TT.
 	  This driver depends on OTG driver for PHY initialization,
 	  clock management, powering up VBUS, and power management.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
 
 config USB_EHCI_HCD_PPC_OF
 	bool "EHCI support for PPC USB controller on OF platform bus"
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 2baf8a8..a869e3c 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -227,8 +227,8 @@
 	 * mark HW unaccessible.  The PM and USB cores make sure that
 	 * the root hub is either suspended or stopped.
 	 */
-	spin_lock_irqsave(&ehci->lock, flags);
 	ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
+	spin_lock_irqsave(&ehci->lock, flags);
 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
 	(void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 86e4289..5c761df 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -52,7 +52,6 @@
 	struct resource *res;
 	int irq;
 	int retval;
-	unsigned int temp;
 
 	pr_debug("initializing FSL-SOC USB Controller\n");
 
@@ -126,18 +125,6 @@
 		goto err3;
 	}
 
-	/*
-	 * Check if it is MPC5121 SoC, otherwise set pdata->have_sysif_regs
-	 * flag for 83xx or 8536 system interface registers.
-	 */
-	if (pdata->big_endian_mmio)
-		temp = in_be32(hcd->regs + FSL_SOC_USB_ID);
-	else
-		temp = in_le32(hcd->regs + FSL_SOC_USB_ID);
-
-	if ((temp & ID_MSK) != (~((temp & NID_MSK) >> 8) & ID_MSK))
-		pdata->have_sysif_regs = 1;
-
 	/* Enable USB controller, 83xx or 8536 */
 	if (pdata->have_sysif_regs)
 		setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 2c83537..3fabed3 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -19,9 +19,6 @@
 #define _EHCI_FSL_H
 
 /* offsets for the non-ehci registers in the FSL SOC USB controller */
-#define FSL_SOC_USB_ID		0x0
-#define ID_MSK			0x3f
-#define NID_MSK			0x3f00
 #define FSL_SOC_USB_ULPIVP	0x170
 #define FSL_SOC_USB_PORTSC1	0x184
 #define PORT_PTS_MSK		(3<<30)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6fee3cd..74dcf49 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -572,6 +572,8 @@
 	ehci->iaa_watchdog.function = ehci_iaa_watchdog;
 	ehci->iaa_watchdog.data = (unsigned long) ehci;
 
+	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+
 	/*
 	 * hw default: 1K periodic list heads, one per frame.
 	 * periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -579,11 +581,20 @@
 	ehci->periodic_size = DEFAULT_I_TDPS;
 	INIT_LIST_HEAD(&ehci->cached_itd_list);
 	INIT_LIST_HEAD(&ehci->cached_sitd_list);
+
+	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+		/* periodic schedule size can be smaller than default */
+		switch (EHCI_TUNE_FLS) {
+		case 0: ehci->periodic_size = 1024; break;
+		case 1: ehci->periodic_size = 512; break;
+		case 2: ehci->periodic_size = 256; break;
+		default:	BUG();
+		}
+	}
 	if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
 		return retval;
 
 	/* controllers may cache some of the periodic schedule ... */
-	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
 	if (HCC_ISOC_CACHE(hcc_params))		// full frame cache
 		ehci->i_thresh = 2 + 8;
 	else					// N microframes cached
@@ -637,12 +648,6 @@
 		/* periodic schedule size can be smaller than default */
 		temp &= ~(3 << 2);
 		temp |= (EHCI_TUNE_FLS << 2);
-		switch (EHCI_TUNE_FLS) {
-		case 0: ehci->periodic_size = 1024; break;
-		case 1: ehci->periodic_size = 512; break;
-		case 2: ehci->periodic_size = 256; break;
-		default:	BUG();
-		}
 	}
 	if (HCC_LPM(hcc_params)) {
 		/* support link power management EHCI 1.1 addendum */
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 796ea0c..8a515f0 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -111,6 +111,7 @@
 {
 	int		port;
 	u32		temp;
+	unsigned long	flags;
 
 	/* If remote wakeup is enabled for the root hub but disabled
 	 * for the controller, we must adjust all the port wakeup flags
@@ -120,6 +121,8 @@
 	if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
 		return;
 
+	spin_lock_irqsave(&ehci->lock, flags);
+
 	/* clear phy low-power mode before changing wakeup flags */
 	if (ehci->has_hostpc) {
 		port = HCS_N_PORTS(ehci->hcs_params);
@@ -131,7 +134,9 @@
 			temp = ehci_readl(ehci, hostpc_reg);
 			ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
 		}
+		spin_unlock_irqrestore(&ehci->lock, flags);
 		msleep(5);
+		spin_lock_irqsave(&ehci->lock, flags);
 	}
 
 	port = HCS_N_PORTS(ehci->hcs_params);
@@ -170,6 +175,8 @@
 	/* Does the root hub have a port wakeup pending? */
 	if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
 		usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
 }
 
 static int ehci_bus_suspend (struct usb_hcd *hcd)
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index fa59b26..c8e360d 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -21,10 +21,13 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
 #include <linux/slab.h>
 
 #include <mach/mxc_ehci.h>
 
+#include <asm/mach-types.h>
+
 #define ULPI_VIEWPORT_OFFSET	0x170
 
 struct ehci_mxc_priv {
@@ -114,6 +117,7 @@
 	struct usb_hcd *hcd;
 	struct resource *res;
 	int irq, ret;
+	unsigned int flags;
 	struct ehci_mxc_priv *priv;
 	struct device *dev = &pdev->dev;
 	struct ehci_hcd *ehci;
@@ -177,8 +181,8 @@
 		clk_enable(priv->ahbclk);
 	}
 
-	/* "dr" device has its own clock */
-	if (pdev->id == 0) {
+	/* "dr" device has its own clock on i.MX51 */
+	if (cpu_is_mx51() && (pdev->id == 0)) {
 		priv->phy1clk = clk_get(dev, "usb_phy1");
 		if (IS_ERR(priv->phy1clk)) {
 			ret = PTR_ERR(priv->phy1clk);
@@ -240,6 +244,23 @@
 	if (ret)
 		goto err_add;
 
+	if (pdata->otg) {
+		/*
+		 * efikamx and efikasb have some hardware bug which is
+		 * preventing usb to work unless CHRGVBUS is set.
+		 * It's in violation of USB specs
+		 */
+		if (machine_is_mx51_efikamx() || machine_is_mx51_efikasb()) {
+			flags = otg_io_read(pdata->otg, ULPI_OTG_CTRL);
+			flags |= ULPI_OTG_CTRL_CHRGVBUS;
+			ret = otg_io_write(pdata->otg, flags, ULPI_OTG_CTRL);
+			if (ret) {
+				dev_err(dev, "unable to set CHRVBUS\n");
+				goto err_add;
+			}
+		}
+	}
+
 	return 0;
 
 err_add:
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 680f2ef..f784ceb 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -796,7 +796,7 @@
 	hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
 			dev_name(&pdev->dev));
 	if (!hcd) {
-		dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
+		dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
 		ret = -ENOMEM;
 		goto err_create_hcd;
 	}
@@ -864,7 +864,7 @@
 
 	ret = omap_start_ehc(omap, hcd);
 	if (ret) {
-		dev_dbg(&pdev->dev, "failed to start ehci\n");
+		dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
 		goto err_start;
 	}
 
@@ -879,7 +879,7 @@
 
 	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
 	if (ret) {
-		dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+		dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
 		goto err_add_hcd;
 	}
 
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 76179c3..07bb982 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -44,28 +44,35 @@
 	return 0;
 }
 
-static int ehci_quirk_amd_SB800(struct ehci_hcd *ehci)
+static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
 {
 	struct pci_dev *amd_smbus_dev;
 	u8 rev = 0;
 
 	amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
-	if (!amd_smbus_dev)
-		return 0;
-
-	pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
-	if (rev < 0x40) {
-		pci_dev_put(amd_smbus_dev);
-		amd_smbus_dev = NULL;
-		return 0;
+	if (amd_smbus_dev) {
+		pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+		if (rev < 0x40) {
+			pci_dev_put(amd_smbus_dev);
+			amd_smbus_dev = NULL;
+			return 0;
+		}
+	} else {
+		amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
+		if (!amd_smbus_dev)
+			return 0;
+		pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+		if (rev < 0x11 || rev > 0x18) {
+			pci_dev_put(amd_smbus_dev);
+			amd_smbus_dev = NULL;
+			return 0;
+		}
 	}
 
 	if (!amd_nb_dev)
 		amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
-	if (!amd_nb_dev)
-		ehci_err(ehci, "QUIRK: unable to get AMD NB device\n");
 
-	ehci_info(ehci, "QUIRK: Enable AMD SB800 L1 fix\n");
+	ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
 
 	pci_dev_put(amd_smbus_dev);
 	amd_smbus_dev = NULL;
@@ -131,7 +138,7 @@
 	/* cache this readonly data; minimize chip reads */
 	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
 
-	if (ehci_quirk_amd_SB800(ehci))
+	if (ehci_quirk_amd_hudson(ehci))
 		ehci->amd_l1_fix = 1;
 
 	retval = ehci_halt(ehci);
@@ -360,8 +367,8 @@
 	 * mark HW unaccessible.  The PM and USB cores make sure that
 	 * the root hub is either suspended or stopped.
 	 */
-	spin_lock_irqsave (&ehci->lock, flags);
 	ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
+	spin_lock_irqsave (&ehci->lock, flags);
 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
 	(void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index e8f4f36..a6f21b8 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -29,6 +29,7 @@
 
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
 
 /**
  * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 20092a2..12fd184 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -98,13 +98,13 @@
 	usb->intr_nesting_cnt--;
 }
 
-/* diable the usb interrupt */
+/* disable the usb interrupt */
 void fhci_usb_disable_interrupt(struct fhci_usb *usb)
 {
 	struct fhci_hcd *fhci = usb->fhci;
 
 	if (usb->intr_nesting_cnt == 0) {
-		/* diable the timer interrupt */
+		/* disable the timer interrupt */
 		disable_irq_nosync(fhci->timer->irq);
 
 		/* disable the usb interrupt */
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index 7be548c..38fe058 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -271,8 +271,8 @@
 
 /*
  * Collect the submitted frames and inform the application about them
- * It is also prepearing the TDs for new frames. If the Tx interrupts
- * are diabled, the application should call that routine to get
+ * It is also preparing the TDs for new frames. If the Tx interrupts
+ * are disabled, the application should call that routine to get
  * confirmation about the submitted frames. Otherwise, the routine is
  * called frome the interrupt service routine during the Tx interrupt.
  * In that case the application is informed by calling the application
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 574b99e..79a66d6 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -262,19 +262,24 @@
 	}
 }
 
-struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
+static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
 	.big_endian_desc = 1,
 	.big_endian_mmio = 1,
 	.es = 1,
+	.have_sysif_regs = 0,
 	.le_setup_buf = 1,
 	.init = fsl_usb2_mpc5121_init,
 	.exit = fsl_usb2_mpc5121_exit,
 };
 #endif /* CONFIG_PPC_MPC512x */
 
+static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
+	.have_sysif_regs = 1,
+};
+
 static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
-	{ .compatible = "fsl-usb2-mph", },
-	{ .compatible = "fsl-usb2-dr", },
+	{ .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
+	{ .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
 #ifdef CONFIG_PPC_MPC512x
 	{ .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
 #endif
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index e49b75a..f90d003 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1658,7 +1658,7 @@
 
 	spin_lock_irqsave(&imx21->lock, flags);
 
-	/* Reset the Host controler modules */
+	/* Reset the Host controller modules */
 	writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
 		USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
 		imx21->regs + USBOTG_RST_CTRL);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 32149be..e0cb12b 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3094,7 +3094,7 @@
 
 	/* Some boards (mostly VIA?) report bogus overcurrent indications,
 	 * causing massive log spam unless we completely ignore them.  It
-	 * may be relevant that VIA VT8235 controlers, where PORT_POWER is
+	 * may be relevant that VIA VT8235 controllers, where PORT_POWER is
 	 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
 	 * PORT_POWER; that's surprising, but maybe within-spec.
 	 */
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 990f06b..2e9602a 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -861,6 +861,7 @@
 			DBG("dev %d ep%d maxpacket %d\n",
 				udev->devnum, epnum, ep->maxpacket);
 			retval = -EINVAL;
+			kfree(ep);
 			goto fail;
 		}
 
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4ab..0231814 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@
 	}
 }
 
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
 {
-	void *addr;
+	struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+	void __iomem *addr;
 	u32 temp;
 	u64 temp_64;
 
@@ -449,7 +450,7 @@
 	}
 }
 
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
 {
 	/* Fields are 32 bits wide, DMA addresses are in bytes */
 	int field_size = 32 / 8;
@@ -488,7 +489,7 @@
 		dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
 }
 
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
 		     struct xhci_container_ctx *ctx,
 		     unsigned int last_ep)
 {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f..a953439 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@
 
 /***************** Streams structures manipulation *************************/
 
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
 		unsigned int num_stream_ctxs,
 		struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
 {
@@ -335,7 +335,7 @@
  * The stream context array must be a power of 2, and can be as small as
  * 64 bytes or as large as 1MB.
  */
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
 		unsigned int num_stream_ctxs, dma_addr_t *dma,
 		gfp_t mem_flags)
 {
@@ -1900,11 +1900,11 @@
 	val &= DBOFF_MASK;
 	xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
 			" from cap regs base addr\n", val);
-	xhci->dba = (void *) xhci->cap_regs + val;
+	xhci->dba = (void __iomem *) xhci->cap_regs + val;
 	xhci_dbg_regs(xhci);
 	xhci_print_run_regs(xhci);
 	/* Set ir_set to interrupt register set 0 */
-	xhci->ir_set = (void *) xhci->run_regs->ir_set;
+	xhci->ir_set = &xhci->run_regs->ir_set[0];
 
 	/*
 	 * Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@
 	/* Set the event ring dequeue address */
 	xhci_set_hc_event_deq(xhci);
 	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+	xhci_print_ir_set(xhci, 0);
 
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index df558f6..3289bf4 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -308,11 +308,8 @@
 /* Ring the host controller doorbell after placing a command on the ring */
 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
 {
-	u32 temp;
-
 	xhci_dbg(xhci, "// Ding dong!\n");
-	temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
-	xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+	xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
 	/* Flush PCI posted writes */
 	xhci_readl(xhci, &xhci->dba->doorbell[0]);
 }
@@ -322,26 +319,24 @@
 		unsigned int ep_index,
 		unsigned int stream_id)
 {
-	struct xhci_virt_ep *ep;
-	unsigned int ep_state;
-	u32 field;
 	__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+	unsigned int ep_state = ep->ep_state;
 
-	ep = &xhci->devs[slot_id]->eps[ep_index];
-	ep_state = ep->ep_state;
 	/* Don't ring the doorbell for this endpoint if there are pending
-	 * cancellations because the we don't want to interrupt processing.
+	 * cancellations because we don't want to interrupt processing.
 	 * We don't want to restart any stream rings if there's a set dequeue
 	 * pointer command pending because the device can choose to start any
 	 * stream once the endpoint is on the HW schedule.
 	 * FIXME - check all the stream rings for pending cancellations.
 	 */
-	if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
-			&& !(ep_state & EP_HALTED)) {
-		field = xhci_readl(xhci, db_addr) & DB_MASK;
-		field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
-		xhci_writel(xhci, field, db_addr);
-	}
+	if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
+	    (ep_state & EP_HALTED))
+		return;
+	xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
+	/* The CPU has better things to do at this point than wait for a
+	 * write-posting flush.  It'll get there soon enough.
+	 */
 }
 
 /* Ring the doorbell for any rings with pending URBs */
@@ -479,8 +474,11 @@
 	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
 			dev->eps[ep_index].stopped_trb,
 			&state->new_cycle_state);
-	if (!state->new_deq_seg)
-		BUG();
+	if (!state->new_deq_seg) {
+		WARN_ON(1);
+		return;
+	}
+
 	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
 	xhci_dbg(xhci, "Finding endpoint context\n");
 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -491,8 +489,10 @@
 	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
 			state->new_deq_ptr,
 			&state->new_cycle_state);
-	if (!state->new_deq_seg)
-		BUG();
+	if (!state->new_deq_seg) {
+		WARN_ON(1);
+		return;
+	}
 
 	trb = &state->new_deq_ptr->generic;
 	if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -1188,7 +1188,7 @@
 
 	addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
 	temp = xhci_readl(xhci, addr);
-	if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
+	if (hcd->state == HC_STATE_SUSPENDED) {
 		xhci_dbg(xhci, "resume root hub\n");
 		usb_hcd_resume_root_hub(hcd);
 	}
@@ -1710,8 +1710,7 @@
 		/* Others already handled above */
 		break;
 	}
-	dev_dbg(&td->urb->dev->dev,
-			"ep %#x - asked for %d bytes, "
+	xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
 			"%d bytes untransferred\n",
 			td->urb->ep->desc.bEndpointAddress,
 			td->urb->transfer_buffer_length,
@@ -2369,12 +2368,13 @@
 
 		/* Scatter gather list entries may cross 64KB boundaries */
 		running_total = TRB_MAX_BUFF_SIZE -
-			(sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+			(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+		running_total &= TRB_MAX_BUFF_SIZE - 1;
 		if (running_total != 0)
 			num_trbs++;
 
 		/* How many more 64KB chunks to transfer, how many more TRBs? */
-		while (running_total < sg_dma_len(sg)) {
+		while (running_total < sg_dma_len(sg) && running_total < temp) {
 			num_trbs++;
 			running_total += TRB_MAX_BUFF_SIZE;
 		}
@@ -2389,7 +2389,8 @@
 	}
 	xhci_dbg(xhci, "\n");
 	if (!in_interrupt())
-		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+		xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
+				"num_trbs = %d\n",
 				urb->ep->desc.bEndpointAddress,
 				urb->transfer_buffer_length,
 				num_trbs);
@@ -2399,11 +2400,11 @@
 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
 {
 	if (num_trbs != 0)
-		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
 				"TRBs, %d left\n", __func__,
 				urb->ep->desc.bEndpointAddress, num_trbs);
 	if (running_total != urb->transfer_buffer_length)
-		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
 				"queued %#x (%d), asked for %#x (%d)\n",
 				__func__,
 				urb->ep->desc.bEndpointAddress,
@@ -2414,14 +2415,17 @@
 
 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
 		unsigned int ep_index, unsigned int stream_id, int start_cycle,
-		struct xhci_generic_trb *start_trb, struct xhci_td *td)
+		struct xhci_generic_trb *start_trb)
 {
 	/*
 	 * Pass all the TRBs to the hardware at once and make sure this write
 	 * isn't reordered.
 	 */
 	wmb();
-	start_trb->field[3] |= start_cycle;
+	if (start_cycle)
+		start_trb->field[3] |= start_cycle;
+	else
+		start_trb->field[3] &= ~0x1;
 	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
 }
 
@@ -2449,7 +2453,7 @@
 	 * to set the polling interval (once the API is added).
 	 */
 	if (xhci_interval != ep_interval) {
-		if (!printk_ratelimit())
+		if (printk_ratelimit())
 			dev_dbg(&urb->dev->dev, "Driver uses different interval"
 					" (%d microframe%s) than xHCI "
 					"(%d microframe%s)\n",
@@ -2535,8 +2539,7 @@
 	sg = urb->sg;
 	addr = (u64) sg_dma_address(sg);
 	this_sg_len = sg_dma_len(sg);
-	trb_buff_len = TRB_MAX_BUFF_SIZE -
-		(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+	trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
 	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
 	if (trb_buff_len > urb->transfer_buffer_length)
 		trb_buff_len = urb->transfer_buffer_length;
@@ -2551,9 +2554,11 @@
 		u32 remainder = 0;
 
 		/* Don't change the cycle bit of the first TRB until later */
-		if (first_trb)
+		if (first_trb) {
 			first_trb = false;
-		else
+			if (start_cycle == 0)
+				field |= 0x1;
+		} else
 			field |= ep_ring->cycle_state;
 
 		/* Chain all the TRBs together; clear the chain bit in the last
@@ -2572,7 +2577,7 @@
 				(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
 				(unsigned int) addr + trb_buff_len);
 		if (TRB_MAX_BUFF_SIZE -
-				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+				(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
 			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
 			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
 					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@
 		}
 
 		trb_buff_len = TRB_MAX_BUFF_SIZE -
-			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+			(addr & (TRB_MAX_BUFF_SIZE - 1));
 		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
 			trb_buff_len =
@@ -2625,7 +2630,7 @@
 
 	check_trb_math(urb, num_trbs, running_total);
 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
-			start_cycle, start_trb, td);
+			start_cycle, start_trb);
 	return 0;
 }
 
@@ -2656,7 +2661,8 @@
 	num_trbs = 0;
 	/* How much data is (potentially) left before the 64KB boundary? */
 	running_total = TRB_MAX_BUFF_SIZE -
-		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+	running_total &= TRB_MAX_BUFF_SIZE - 1;
 
 	/* If there's some data on this 64KB chunk, or we have to send a
 	 * zero-length transfer, we need at least one TRB
@@ -2671,7 +2677,8 @@
 	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
 
 	if (!in_interrupt())
-		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+		xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
+				"addr = %#llx, num_trbs = %d\n",
 				urb->ep->desc.bEndpointAddress,
 				urb->transfer_buffer_length,
 				urb->transfer_buffer_length,
@@ -2699,8 +2706,8 @@
 	/* How much data is in the first TRB? */
 	addr = (u64) urb->transfer_dma;
 	trb_buff_len = TRB_MAX_BUFF_SIZE -
-		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
-	if (urb->transfer_buffer_length < trb_buff_len)
+		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+	if (trb_buff_len > urb->transfer_buffer_length)
 		trb_buff_len = urb->transfer_buffer_length;
 
 	first_trb = true;
@@ -2711,9 +2718,11 @@
 		field = 0;
 
 		/* Don't change the cycle bit of the first TRB until later */
-		if (first_trb)
+		if (first_trb) {
 			first_trb = false;
-		else
+			if (start_cycle == 0)
+				field |= 0x1;
+		} else
 			field |= ep_ring->cycle_state;
 
 		/* Chain all the TRBs together; clear the chain bit in the last
@@ -2757,7 +2766,7 @@
 
 	check_trb_math(urb, num_trbs, running_total);
 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
-			start_cycle, start_trb, td);
+			start_cycle, start_trb);
 	return 0;
 }
 
@@ -2818,13 +2827,17 @@
 	/* Queue setup TRB - see section 6.4.1.2.1 */
 	/* FIXME better way to translate setup_packet into two u32 fields? */
 	setup = (struct usb_ctrlrequest *) urb->setup_packet;
+	field = 0;
+	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
+	if (start_cycle == 0)
+		field |= 0x1;
 	queue_trb(xhci, ep_ring, false, true,
 			/* FIXME endianness is probably going to bite my ass here. */
 			setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
 			setup->wIndex | setup->wLength << 16,
 			TRB_LEN(8) | TRB_INTR_TARGET(0),
 			/* Immediate data in pointer */
-			TRB_IDT | TRB_TYPE(TRB_SETUP));
+			field);
 
 	/* If there's data, queue data TRBs */
 	field = 0;
@@ -2859,7 +2872,7 @@
 			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
 
 	giveback_first_trb(xhci, slot_id, ep_index, 0,
-			start_cycle, start_trb, td);
+			start_cycle, start_trb);
 	return 0;
 }
 
@@ -2872,8 +2885,8 @@
 	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
 	td_len = urb->iso_frame_desc[i].length;
 
-	running_total = TRB_MAX_BUFF_SIZE -
-			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+	running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+	running_total &= TRB_MAX_BUFF_SIZE - 1;
 	if (running_total != 0)
 		num_trbs++;
 
@@ -2900,6 +2913,7 @@
 	int running_total, trb_buff_len, td_len, td_remain_len, ret;
 	u64 start_addr, addr;
 	int i, j;
+	bool more_trbs_coming;
 
 	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
@@ -2910,7 +2924,7 @@
 	}
 
 	if (!in_interrupt())
-		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
+		xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
 				" addr = %#llx, num_tds = %d\n",
 				urb->ep->desc.bEndpointAddress,
 				urb->transfer_buffer_length,
@@ -2950,7 +2964,10 @@
 				field |= TRB_TYPE(TRB_ISOC);
 				/* Assume URB_ISO_ASAP is set */
 				field |= TRB_SIA;
-				if (i > 0)
+				if (i == 0) {
+					if (start_cycle == 0)
+						field |= 0x1;
+				} else
 					field |= ep_ring->cycle_state;
 				first_trb = false;
 			} else {
@@ -2965,9 +2982,11 @@
 			 */
 			if (j < trbs_per_td - 1) {
 				field |= TRB_CHAIN;
+				more_trbs_coming = true;
 			} else {
 				td->last_trb = ep_ring->enqueue;
 				field |= TRB_IOC;
+				more_trbs_coming = false;
 			}
 
 			/* Calculate TRB length */
@@ -2980,7 +2999,7 @@
 			length_field = TRB_LEN(trb_buff_len) |
 				remainder |
 				TRB_INTR_TARGET(0);
-			queue_trb(xhci, ep_ring, false, false,
+			queue_trb(xhci, ep_ring, false, more_trbs_coming,
 				lower_32_bits(addr),
 				upper_32_bits(addr),
 				length_field,
@@ -3003,10 +3022,8 @@
 		}
 	}
 
-	wmb();
-	start_trb->field[3] |= start_cycle;
-
-	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+			start_cycle, start_trb);
 	return 0;
 }
 
@@ -3064,7 +3081,7 @@
 	 * to set the polling interval (once the API is added).
 	 */
 	if (xhci_interval != ep_interval) {
-		if (!printk_ratelimit())
+		if (printk_ratelimit())
 			dev_dbg(&urb->dev->dev, "Driver uses different interval"
 					" (%d microframe%s) than xHCI "
 					"(%d microframe%s)\n",
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45e4a31..2083fc2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@
 /*
  * Set the run bit and wait for the host to be running.
  */
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
 {
 	u32 temp;
 	int ret;
@@ -226,7 +226,8 @@
 static int xhci_setup_msix(struct xhci_hcd *xhci)
 {
 	int i, ret = 0;
-	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 
 	/*
 	 * calculate number of msi-x vectors supported.
@@ -265,6 +266,7 @@
 			goto disable_msix;
 	}
 
+	hcd->msix_enabled = 1;
 	return ret;
 
 disable_msix:
@@ -280,7 +282,8 @@
 /* Free any IRQs and disable MSI-X */
 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
 {
-	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 
 	xhci_free_irq(xhci);
 
@@ -292,6 +295,7 @@
 		pci_disable_msi(pdev);
 	}
 
+	hcd->msix_enabled = 0;
 	return;
 }
 
@@ -325,7 +329,7 @@
 
 
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
 {
 	unsigned long flags;
 	int temp;
@@ -469,7 +473,7 @@
 			xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
 	xhci_writel(xhci, ER_IRQ_ENABLE(temp),
 			&xhci->ir_set->irq_pending);
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+	xhci_print_ir_set(xhci, 0);
 
 	if (NUM_TEST_NOOPS > 0)
 		doorbell = xhci_setup_one_noop(xhci);
@@ -508,9 +512,10 @@
 	spin_lock_irq(&xhci->lock);
 	xhci_halt(xhci);
 	xhci_reset(xhci);
-	xhci_cleanup_msix(xhci);
 	spin_unlock_irq(&xhci->lock);
 
+	xhci_cleanup_msix(xhci);
+
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 	/* Tell the event ring poll function not to reschedule */
 	xhci->zombie = 1;
@@ -523,7 +528,7 @@
 	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 	xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 			&xhci->ir_set->irq_pending);
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+	xhci_print_ir_set(xhci, 0);
 
 	xhci_dbg(xhci, "cleaning up memory\n");
 	xhci_mem_cleanup(xhci);
@@ -544,9 +549,10 @@
 
 	spin_lock_irq(&xhci->lock);
 	xhci_halt(xhci);
-	xhci_cleanup_msix(xhci);
 	spin_unlock_irq(&xhci->lock);
 
+	xhci_cleanup_msix(xhci);
+
 	xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
 		    xhci_readl(xhci, &xhci->op_regs->status));
 }
@@ -647,6 +653,7 @@
 	int			rc = 0;
 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
 	u32			command;
+	int			i;
 
 	spin_lock_irq(&xhci->lock);
 	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -677,10 +684,15 @@
 		spin_unlock_irq(&xhci->lock);
 		return -ETIMEDOUT;
 	}
-	/* step 5: remove core well power */
-	xhci_cleanup_msix(xhci);
 	spin_unlock_irq(&xhci->lock);
 
+	/* step 5: remove core well power */
+	/* synchronize irq when using MSI-X */
+	if (xhci->msix_entries) {
+		for (i = 0; i < xhci->msix_count; i++)
+			synchronize_irq(xhci->msix_entries[i].vector);
+	}
+
 	return rc;
 }
 
@@ -694,7 +706,6 @@
 {
 	u32			command, temp = 0;
 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
-	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
 	int	old_state, retval;
 
 	old_state = hcd->state;
@@ -729,9 +740,8 @@
 		xhci_dbg(xhci, "Stop HCD\n");
 		xhci_halt(xhci);
 		xhci_reset(xhci);
-		if (hibernated)
-			xhci_cleanup_msix(xhci);
 		spin_unlock_irq(&xhci->lock);
+		xhci_cleanup_msix(xhci);
 
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 		/* Tell the event ring poll function not to reschedule */
@@ -745,7 +755,7 @@
 		temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 		xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 				&xhci->ir_set->irq_pending);
-		xhci_print_ir_set(xhci, xhci->ir_set, 0);
+		xhci_print_ir_set(xhci, 0);
 
 		xhci_dbg(xhci, "cleaning up memory\n");
 		xhci_mem_cleanup(xhci);
@@ -765,30 +775,6 @@
 		return retval;
 	}
 
-	spin_unlock_irq(&xhci->lock);
-	/* Re-setup MSI-X */
-	if (hcd->irq)
-		free_irq(hcd->irq, hcd);
-	hcd->irq = -1;
-
-	retval = xhci_setup_msix(xhci);
-	if (retval)
-		/* fall back to msi*/
-		retval = xhci_setup_msi(xhci);
-
-	if (retval) {
-		/* fall back to legacy interrupt*/
-		retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
-					hcd->irq_descr, hcd);
-		if (retval) {
-			xhci_err(xhci, "request interrupt %d failed\n",
-					pdev->irq);
-			return retval;
-		}
-		hcd->irq = pdev->irq;
-	}
-
-	spin_lock_irq(&xhci->lock);
 	/* step 4: set Run/Stop bit */
 	command = xhci_readl(xhci, &xhci->op_regs->command);
 	command |= CMD_RUN;
@@ -871,7 +857,7 @@
 /* Returns 1 if the arguments are OK;
  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
  */
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
 		struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
 		const char *func) {
 	struct xhci_hcd	*xhci;
@@ -1707,7 +1693,7 @@
 	xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
 }
 
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
 		unsigned int slot_id, unsigned int ep_index,
 		struct xhci_dequeue_state *deq_state)
 {
@@ -2445,8 +2431,12 @@
 		xhci_err(xhci, "Error while assigning device slot ID\n");
 		return 0;
 	}
-	/* xhci_alloc_virt_device() does not touch rings; no need to lock */
-	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
+	/* xhci_alloc_virt_device() does not touch rings; no need to lock.
+	 * Use GFP_NOIO, since this function can be called from
+	 * xhci_discover_or_reset_device(), which may be called as part of
+	 * mass storage driver error handling.
+	 */
+	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
 		/* Disable slot, if we can do it without mem alloc */
 		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
 		spin_lock_irqsave(&xhci->lock, flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 170c367..7f127df 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -436,22 +436,18 @@
 /**
  * struct doorbell_array
  *
+ * Bits  0 -  7: Endpoint target
+ * Bits  8 - 15: RsvdZ
+ * Bits 16 - 31: Stream ID
+ *
  * Section 5.6
  */
 struct xhci_doorbell_array {
 	u32	doorbell[256];
 };
 
-#define	DB_TARGET_MASK		0xFFFFFF00
-#define	DB_STREAM_ID_MASK	0x0000FFFF
-#define	DB_TARGET_HOST		0x0
-#define	DB_STREAM_ID_HOST	0x0
-#define	DB_MASK			(0xff << 8)
-
-/* Endpoint Target - bits 0:7 */
-#define EPI_TO_DB(p)		(((p) + 1) & 0xff)
-#define STREAM_ID_TO_DB(p)	(((p) & 0xffff) << 16)
-
+#define DB_VALUE(ep, stream)	((((ep) + 1) & 0xff) | ((stream) << 16))
+#define DB_VALUE_HOST		0x00000000
 
 /**
  * struct xhci_protocol_caps
@@ -1352,7 +1348,7 @@
 }
 
 /* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
 void xhci_print_registers(struct xhci_hcd *xhci);
 void xhci_dbg_regs(struct xhci_hcd *xhci);
 void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 44f8b922..a6afd15 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -717,7 +717,7 @@
 		goto exit;
 	}
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL);
 	if (dev == NULL) {
 		dev_err(&interface->dev, "Out of memory\n");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c9078e4..e573e4704 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -769,7 +769,7 @@
 	int i;
 	int retval = -ENOMEM;
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL);
 	if (dev == NULL) {
 		dev_err(&interface->dev, "Out of memory\n");
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index edffef6..eefb827 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -642,7 +642,7 @@
 	int i;
 	int retval = -ENOMEM;
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (dev == NULL) {
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 1732d9b..1616ad1 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -45,7 +45,7 @@
 
 static void change_color(struct usb_led *led)
 {
-	int retval;
+	int retval = 0;
 	unsigned char *buffer;
 
 	buffer = kmalloc(8, GFP_KERNEL);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 4ff2158..f7a2057 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -776,7 +776,6 @@
 	{ USB_DEVICE(0x0557, 0x2001) },
 	{ USB_DEVICE(0x0729, 0x1284) },
 	{ USB_DEVICE(0x1293, 0x0002) },
-	{ USB_DEVICE(0x1293, 0x0002) },
 	{ USB_DEVICE(0x050d, 0x0002) },
 	{ }						/* Terminating entry */
 };
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index eeba228..9d49d1c 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -404,6 +404,7 @@
 		musb->xceiv->set_power = bfin_musb_set_power;
 
 	musb->isr = blackfin_interrupt;
+	musb->double_buffer_not_ok = true;
 
 	return 0;
 }
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 07cf394..c292d5c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -128,12 +128,7 @@
 
 static inline struct musb *dev_to_musb(struct device *dev)
 {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-	/* usbcore insists dev->driver_data is a "struct hcd *" */
-	return hcd_to_musb(dev_get_drvdata(dev));
-#else
 	return dev_get_drvdata(dev);
-#endif
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1869,6 +1864,7 @@
 	INIT_LIST_HEAD(&musb->out_bulk);
 
 	hcd->uses_new_polling = 1;
+	hcd->has_tt = 1;
 
 	musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
 	musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
@@ -1876,10 +1872,9 @@
 	musb = kzalloc(sizeof *musb, GFP_KERNEL);
 	if (!musb)
 		return NULL;
-	dev_set_drvdata(dev, musb);
 
 #endif
-
+	dev_set_drvdata(dev, musb);
 	musb->mregs = mbase;
 	musb->ctrl_base = mbase;
 	musb->nIrq = -ENODEV;
@@ -2191,7 +2186,7 @@
 	void __iomem	*base;
 
 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!iomem || irq == 0)
+	if (!iomem || irq <= 0)
 		return -ENODEV;
 
 	base = ioremap(iomem->start, resource_size(iomem));
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d0c236f..e6400be 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -497,6 +497,19 @@
 	struct usb_gadget_driver *gadget_driver;	/* its driver */
 #endif
 
+	/*
+	 * FIXME: Remove this flag.
+	 *
+	 * This is only added to allow Blackfin to work
+	 * with current driver. For some unknown reason
+	 * Blackfin doesn't work with double buffering
+	 * and that's enabled by default.
+	 *
+	 * We added this flag to forcefully disable double
+	 * buffering until we get it working.
+	 */
+	unsigned                double_buffer_not_ok:1 __deprecated;
+
 	struct musb_hdrc_config	*config;
 
 #ifdef MUSB_CONFIG_PROC_FS
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 916065b..3a97c4e 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -169,6 +169,9 @@
 							dma_addr_t dma_addr,
 							u32 length);
 	int			(*channel_abort)(struct dma_channel *);
+	int			(*is_compatible)(struct dma_channel *channel,
+							u16 maxpacket,
+							void *buf, u32 length);
 };
 
 /* called after channel_program(), may indicate a fault */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9b162df..2fe3046 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -92,11 +92,33 @@
 
 /* ----------------------------------------------------------------------- */
 
+#define is_buffer_mapped(req) (is_dma_capable() && \
+					(req->map_state != UN_MAPPED))
+
 /* Maps the buffer to dma  */
 
 static inline void map_dma_buffer(struct musb_request *request,
-				struct musb *musb)
+			struct musb *musb, struct musb_ep *musb_ep)
 {
+	int compatible = true;
+	struct dma_controller *dma = musb->dma_controller;
+
+	request->map_state = UN_MAPPED;
+
+	if (!is_dma_capable() || !musb_ep->dma)
+		return;
+
+	/* Check if DMA engine can handle this request.
+	 * DMA code must reject the USB request explicitly.
+	 * Default behaviour is to map the request.
+	 */
+	if (dma->is_compatible)
+		compatible = dma->is_compatible(musb_ep->dma,
+				musb_ep->packet_sz, request->request.buf,
+				request->request.length);
+	if (!compatible)
+		return;
+
 	if (request->request.dma == DMA_ADDR_INVALID) {
 		request->request.dma = dma_map_single(
 				musb->controller,
@@ -105,7 +127,7 @@
 				request->tx
 					? DMA_TO_DEVICE
 					: DMA_FROM_DEVICE);
-		request->mapped = 1;
+		request->map_state = MUSB_MAPPED;
 	} else {
 		dma_sync_single_for_device(musb->controller,
 			request->request.dma,
@@ -113,7 +135,7 @@
 			request->tx
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
-		request->mapped = 0;
+		request->map_state = PRE_MAPPED;
 	}
 }
 
@@ -121,11 +143,14 @@
 static inline void unmap_dma_buffer(struct musb_request *request,
 				struct musb *musb)
 {
+	if (!is_buffer_mapped(request))
+		return;
+
 	if (request->request.dma == DMA_ADDR_INVALID) {
 		DBG(20, "not unmapping a never mapped buffer\n");
 		return;
 	}
-	if (request->mapped) {
+	if (request->map_state == MUSB_MAPPED) {
 		dma_unmap_single(musb->controller,
 			request->request.dma,
 			request->request.length,
@@ -133,16 +158,15 @@
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
 		request->request.dma = DMA_ADDR_INVALID;
-		request->mapped = 0;
-	} else {
+	} else { /* PRE_MAPPED */
 		dma_sync_single_for_cpu(musb->controller,
 			request->request.dma,
 			request->request.length,
 			request->tx
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
-
 	}
+	request->map_state = UN_MAPPED;
 }
 
 /*
@@ -172,8 +196,7 @@
 
 	ep->busy = 1;
 	spin_unlock(&musb->lock);
-	if (is_dma_capable() && ep->dma)
-		unmap_dma_buffer(req, musb);
+	unmap_dma_buffer(req, musb);
 	if (request->status == 0)
 		DBG(5, "%s done request %p,  %d/%d\n",
 				ep->end_point.name, request,
@@ -335,7 +358,7 @@
 			csr);
 
 #ifndef	CONFIG_MUSB_PIO_ONLY
-	if (is_dma_capable() && musb_ep->dma) {
+	if (is_buffer_mapped(req)) {
 		struct dma_controller	*c = musb->dma_controller;
 		size_t request_size;
 
@@ -436,8 +459,7 @@
 		 * Unmap the dma buffer back to cpu if dma channel
 		 * programming fails
 		 */
-		if (is_dma_capable() && musb_ep->dma)
-			unmap_dma_buffer(req, musb);
+		unmap_dma_buffer(req, musb);
 
 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
 				(u8 *) (request->buf + request->actual));
@@ -627,7 +649,7 @@
 		return;
 	}
 
-	if (is_cppi_enabled() && musb_ep->dma) {
+	if (is_cppi_enabled() && is_buffer_mapped(req)) {
 		struct dma_controller	*c = musb->dma_controller;
 		struct dma_channel	*channel = musb_ep->dma;
 
@@ -658,7 +680,7 @@
 		len = musb_readw(epio, MUSB_RXCOUNT);
 		if (request->actual < request->length) {
 #ifdef CONFIG_USB_INVENTRA_DMA
-			if (is_dma_capable() && musb_ep->dma) {
+			if (is_buffer_mapped(req)) {
 				struct dma_controller	*c;
 				struct dma_channel	*channel;
 				int			use_dma = 0;
@@ -742,7 +764,7 @@
 			fifo_count = min_t(unsigned, len, fifo_count);
 
 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
-			if (tusb_dma_omap() && musb_ep->dma) {
+			if (tusb_dma_omap() && is_buffer_mapped(req)) {
 				struct dma_controller *c = musb->dma_controller;
 				struct dma_channel *channel = musb_ep->dma;
 				u32 dma_addr = request->dma + request->actual;
@@ -762,7 +784,7 @@
 			 * programming fails. This buffer is mapped if the
 			 * channel allocation is successful
 			 */
-			 if (is_dma_capable() && musb_ep->dma) {
+			 if (is_buffer_mapped(req)) {
 				unmap_dma_buffer(req, musb);
 
 				/*
@@ -989,7 +1011,11 @@
 		/* Set TXMAXP with the FIFO size of the endpoint
 		 * to disable double buffering mode.
 		 */
-		musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		if (musb->double_buffer_not_ok)
+			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
+		else
+			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
+					| (musb_ep->hb_mult << 11));
 
 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
 		if (musb_readw(regs, MUSB_TXCSR)
@@ -1025,7 +1051,11 @@
 		/* Set RXMAXP with the FIFO size of the endpoint
 		 * to disable double buffering mode.
 		 */
-		musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		if (musb->double_buffer_not_ok)
+			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
+		else
+			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
+					| (musb_ep->hb_mult << 11));
 
 		/* force shared fifo to OUT-only mode */
 		if (hw_ep->is_shared_fifo) {
@@ -1214,10 +1244,7 @@
 	request->epnum = musb_ep->current_epnum;
 	request->tx = musb_ep->is_in;
 
-	if (is_dma_capable() && musb_ep->dma)
-		map_dma_buffer(request, musb);
-	else
-		request->mapped = 0;
+	map_dma_buffer(request, musb, musb_ep);
 
 	spin_lock_irqsave(&musb->lock, lockflags);
 
@@ -1684,7 +1711,7 @@
 	struct musb_hw_ep	*hw_ep;
 	unsigned		count = 0;
 
-	/* intialize endpoint list just once */
+	/* initialize endpoint list just once */
 	INIT_LIST_HEAD(&(musb->g.ep_list));
 
 	for (epnum = 0, hw_ep = musb->endpoints;
@@ -1765,7 +1792,7 @@
  *
  * -EINVAL something went wrong (not driver)
  * -EBUSY another gadget is already using the controller
- * -ENOMEM no memeory to perform the operation
+ * -ENOMEM no memory to perform the operation
  *
  * @param driver the gadget driver
  * @param bind the driver's bind function
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index dec8dc0..a55354f 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -35,13 +35,19 @@
 #ifndef __MUSB_GADGET_H
 #define __MUSB_GADGET_H
 
+enum buffer_map_state {
+	UN_MAPPED = 0,
+	PRE_MAPPED,
+	MUSB_MAPPED
+};
+
 struct musb_request {
 	struct usb_request	request;
 	struct musb_ep		*ep;
 	struct musb		*musb;
 	u8 tx;			/* endpoint direction */
 	u8 epnum;
-	u8 mapped;
+	enum buffer_map_state map_state;
 };
 
 static inline struct musb_request *to_musb_request(struct usb_request *req)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4d5bcb4..0f523d7 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -609,7 +609,7 @@
 	/* Set RXMAXP with the FIFO size of the endpoint
 	 * to disable double buffer mode.
 	 */
-	if (musb->hwvers < MUSB_HWVERS_2000)
+	if (musb->double_buffer_not_ok)
 		musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
 	else
 		musb_writew(ep->regs, MUSB_RXMAXP,
@@ -784,14 +784,13 @@
 		/* protocol/endpoint/interval/NAKlimit */
 		if (epnum) {
 			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
-			if (can_bulk_split(musb, qh->type))
+			if (musb->double_buffer_not_ok)
 				musb_writew(epio, MUSB_TXMAXP,
-					packet_sz
-					| ((hw_ep->max_packet_sz_tx /
-						packet_sz) - 1) << 11);
+						hw_ep->max_packet_sz_tx);
 			else
 				musb_writew(epio, MUSB_TXMAXP,
-					packet_sz);
+						qh->maxpacket |
+						((qh->hb_mult - 1) << 11));
 			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
 		} else {
 			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index f763d62..21056c9 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -94,24 +94,33 @@
 {
 	musb_writew(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
-		((u16)((u32) dma_addr & 0xFFFF)));
+		dma_addr);
 	musb_writew(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
-		((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
+		(dma_addr >> 16));
 }
 
 static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
 {
-	return musb_readl(mbase,
+	u32 count = musb_readw(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
+
+	count = count << 16;
+
+	count |= musb_readw(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
+
+	return count;
 }
 
 static inline void musb_write_hsdma_count(void __iomem *mbase,
 				u8 bchannel, u32 len)
 {
-	musb_writel(mbase,
+	musb_writew(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
+	musb_writew(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
-		len);
+		(len >> 16));
 }
 
 #endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3f1233..bc8badd 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -362,6 +362,7 @@
 
 static int omap2430_musb_exit(struct musb *musb)
 {
+	del_timer_sync(&musb_idle_timer);
 
 	omap2430_low_level_exit(musb);
 	otg_put_transceiver(musb->xceiv);
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 9fb875d..9ffc823 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -103,6 +103,8 @@
 	  required after resetting the hardware and power management.
 	  This driver is required even for peripheral only or host only
 	  mode configurations.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
 
 config AB8500_USB
         tristate "AB8500 USB Transceiver Driver"
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index e70014a..8acf165 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -132,6 +132,8 @@
 
 	platform_set_drvdata(pdev, nop);
 
+	BLOCKING_INIT_NOTIFIER_HEAD(&nop->otg.notifier);
+
 	return 0;
 exit:
 	kfree(nop);
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
index 059d9ac..770d799 100644
--- a/drivers/usb/otg/ulpi.c
+++ b/drivers/usb/otg/ulpi.c
@@ -45,7 +45,7 @@
 /* ULPI hardcoded IDs, used for probing */
 static struct ulpi_info ulpi_ids[] = {
 	ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
-	ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB3319"),
+	ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
 };
 
 static int ulpi_set_otg_flags(struct otg_transceiver *otg)
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 63f7cc4..7b8815d 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -486,12 +486,22 @@
 	if (actual_length >= 4) {
 		struct ch341_private *priv = usb_get_serial_port_data(port);
 		unsigned long flags;
+		u8 prev_line_status = priv->line_status;
 
 		spin_lock_irqsave(&priv->lock, flags);
 		priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
 		if ((data[1] & CH341_MULT_STAT))
 			priv->multi_status_change = 1;
 		spin_unlock_irqrestore(&priv->lock, flags);
+
+		if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
+			struct tty_struct *tty = tty_port_tty_get(&port->port);
+			if (tty)
+				usb_serial_handle_dcd_change(port, tty,
+					    priv->line_status & CH341_BIT_DCD);
+			tty_kref_put(tty);
+		}
+
 		wake_up_interruptible(&priv->delta_msr_wait);
 	}
 
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8d7731d..735ea03 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -49,7 +49,6 @@
 static void cp210x_break_ctl(struct tty_struct *, int);
 static int cp210x_startup(struct usb_serial *);
 static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
-static int cp210x_carrier_raised(struct usb_serial_port *p);
 
 static int debug;
 
@@ -87,7 +86,6 @@
 	{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
 	{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
 	{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
-	{ USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
 	{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
 	{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
 	{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
@@ -110,7 +108,9 @@
 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
 	{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+	{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
 	{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -165,8 +165,7 @@
 	.tiocmget 		= cp210x_tiocmget,
 	.tiocmset		= cp210x_tiocmset,
 	.attach			= cp210x_startup,
-	.dtr_rts		= cp210x_dtr_rts,
-	.carrier_raised		= cp210x_carrier_raised
+	.dtr_rts		= cp210x_dtr_rts
 };
 
 /* Config request types */
@@ -765,15 +764,6 @@
 	return result;
 }
 
-static int cp210x_carrier_raised(struct usb_serial_port *p)
-{
-	unsigned int control;
-	cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
-	if (control & CONTROL_DCD)
-		return 1;
-	return 0;
-}
-
 static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
 {
 	struct usb_serial_port *port = tty->driver_data;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index b92070c..666e5a6 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -455,7 +455,6 @@
 static int digi_chars_in_buffer(struct tty_struct *tty);
 static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
 static void digi_close(struct usb_serial_port *port);
-static int digi_carrier_raised(struct usb_serial_port *port);
 static void digi_dtr_rts(struct usb_serial_port *port, int on);
 static int digi_startup_device(struct usb_serial *serial);
 static int digi_startup(struct usb_serial *serial);
@@ -511,7 +510,6 @@
 	.open =				digi_open,
 	.close =			digi_close,
 	.dtr_rts =			digi_dtr_rts,
-	.carrier_raised =		digi_carrier_raised,
 	.write =			digi_write,
 	.write_room =			digi_write_room,
 	.write_bulk_callback = 		digi_write_bulk_callback,
@@ -1339,14 +1337,6 @@
 	digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
 }
 
-static int digi_carrier_raised(struct usb_serial_port *port)
-{
-	struct digi_port *priv = usb_get_serial_port_data(port);
-	if (priv->dp_modem_signals & TIOCM_CD)
-		return 1;
-	return 0;
-}
-
 static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
 	int ret;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a2668d0..f349a36 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -100,6 +100,7 @@
 static int   ftdi_jtag_probe(struct usb_serial *serial);
 static int   ftdi_mtxorb_hack_setup(struct usb_serial *serial);
 static int   ftdi_NDI_device_setup(struct usb_serial *serial);
+static int   ftdi_stmclite_probe(struct usb_serial *serial);
 static void  ftdi_USB_UIRT_setup(struct ftdi_private *priv);
 static void  ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
 
@@ -123,6 +124,10 @@
 	.port_probe = ftdi_HE_TIRA1_setup,
 };
 
+static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+	.probe	= ftdi_stmclite_probe,
+};
+
 /*
  * The 8U232AM has the same API as the sio except for:
  * - it can support MUCH higher baudrates; up to:
@@ -616,6 +621,7 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
 	{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+	{ USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
 	{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
 	{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
 	{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
@@ -676,7 +682,17 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
-	{ USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
@@ -800,6 +816,8 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
 	{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+		.driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
 	{ },					/* Optional parameter entry */
 	{ }					/* Terminating entry */
 };
@@ -1699,6 +1717,25 @@
 }
 
 /*
+ * First and second port on STMCLiteadaptors is reserved for JTAG interface
+ * and the forth port for pio
+ */
+static int ftdi_stmclite_probe(struct usb_serial *serial)
+{
+	struct usb_device *udev = serial->dev;
+	struct usb_interface *interface = serial->interface;
+
+	dbg("%s", __func__);
+
+	if (interface == udev->actconfig->interface[2])
+		return 0;
+
+	dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
+
+	return -ENODEV;
+}
+
+/*
  * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
  * We have to correct it if we want to read from it.
  */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index bf08672..117e8e6 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -518,6 +518,12 @@
 #define RATOC_PRODUCT_ID_USB60F	0xb020
 
 /*
+ * Acton Research Corp.
+ */
+#define ACTON_VID		0x0647	/* Vendor ID */
+#define ACTON_SPECTRAPRO_PID	0x0100
+
+/*
  * Contec products (http://www.contec.com)
  * Submitted by Daniel Sangorrin
  */
@@ -569,11 +575,23 @@
 #define OCT_US101_PID		0x0421	/* OCT US101 USB to RS-232 */
 
 /*
- * Icom ID-1 digital transceiver
+ * Definitions for Icom Inc. devices
  */
-
-#define ICOM_ID1_VID            0x0C26
-#define ICOM_ID1_PID            0x0004
+#define ICOM_VID		0x0C26 /* Icom vendor ID */
+/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
+#define ICOM_ID_1_PID		0x0004 /* ID-1 USB to RS-232 */
+/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
+#define ICOM_OPC_U_UC_PID	0x0018 /* OPC-478UC, OPC-1122U cloning cable */
+/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
+#define ICOM_ID_RP2C1_PID	0x0009 /* ID-RP2C Asset 1 to RS-232 */
+#define ICOM_ID_RP2C2_PID	0x000A /* ID-RP2C Asset 2 to RS-232 */
+#define ICOM_ID_RP2D_PID	0x000B /* ID-RP2D configuration port*/
+#define ICOM_ID_RP2VT_PID	0x000C /* ID-RP2V Transmit config port */
+#define ICOM_ID_RP2VR_PID	0x000D /* ID-RP2V Receive config port */
+#define ICOM_ID_RP4KVT_PID	0x0010 /* ID-RP4000V Transmit config port */
+#define ICOM_ID_RP4KVR_PID	0x0011 /* ID-RP4000V Receive config port */
+#define ICOM_ID_RP2KVT_PID	0x0012 /* ID-RP2000V Transmit config port */
+#define ICOM_ID_RP2KVR_PID	0x0013 /* ID-RP2000V Receive config port */
 
 /*
  * GN Otometrics (http://www.otometrics.com)
@@ -1022,6 +1040,12 @@
 #define WHT_PID			0x0004 /* Wireless Handheld Terminal */
 
 /*
+ * STMicroelectonics
+ */
+#define ST_VID			0x0483
+#define ST_STMCLT1030_PID	0x3747 /* ST Micro Connect Lite STMCLT1030 */
+
+/*
  * Papouch products (http://www.papouch.com/)
  * Submitted by Folkert van Heusden
  */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index e6833e2..e4db5ad 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -479,6 +479,26 @@
 }
 EXPORT_SYMBOL_GPL(usb_serial_handle_break);
 
+/**
+ *	usb_serial_handle_dcd_change - handle a change of carrier detect state
+ *	@port: usb_serial_port structure for the open port
+ *	@tty: tty_struct structure for the port
+ *	@status: new carrier detect status, nonzero if active
+ */
+void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+				struct tty_struct *tty, unsigned int status)
+{
+	struct tty_port *port = &usb_port->port;
+
+	dbg("%s - port %d, status %d", __func__, usb_port->number, status);
+
+	if (status)
+		wake_up_interruptible(&port->open_wait);
+	else if (tty && !C_CLOCAL(tty))
+		tty_hangup(tty);
+}
+EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
+
 int usb_serial_generic_resume(struct usb_serial *serial)
 {
 	struct usb_serial_port *port;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index cd769ef..3b246d9 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2889,8 +2889,8 @@
 
 	dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
 
-	edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
-	edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
+	edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
+	edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
 	edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
 
 	for (rec = ihex_next_binrec(rec); rec;
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 6ab2a3f..178b22e 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -199,6 +199,7 @@
 		.name		= "epic",
 	},
 	.description		= "EPiC device",
+	.usb_driver		= &io_driver,
 	.id_table		= Epic_port_id_table,
 	.num_ports		= 1,
 	.open			= edge_open,
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 12ed594..99b97c0 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1275,6 +1275,7 @@
 		   .name = "iuu_phoenix",
 		   },
 	.id_table = id_table,
+	.usb_driver = &iuu_driver,
 	.num_ports = 1,
 	.bulk_in_size = 512,
 	.bulk_out_size = 512,
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 2d8baf6..ce134dc 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -546,6 +546,7 @@
 		.name		= "keyspan_no_firm",
 	},
 	.description		= "Keyspan - (without firmware)",
+	.usb_driver		= &keyspan_driver,
 	.id_table		= keyspan_pre_ids,
 	.num_ports		= 1,
 	.attach			= keyspan_fake_startup,
@@ -557,6 +558,7 @@
 		.name		= "keyspan_1",
 	},
 	.description		= "Keyspan 1 port adapter",
+	.usb_driver		= &keyspan_driver,
 	.id_table		= keyspan_1port_ids,
 	.num_ports		= 1,
 	.open			= keyspan_open,
@@ -579,6 +581,7 @@
 		.name		= "keyspan_2",
 	},
 	.description		= "Keyspan 2 port adapter",
+	.usb_driver		= &keyspan_driver,
 	.id_table		= keyspan_2port_ids,
 	.num_ports		= 2,
 	.open			= keyspan_open,
@@ -601,6 +604,7 @@
 		.name		= "keyspan_4",
 	},
 	.description		= "Keyspan 4 port adapter",
+	.usb_driver		= &keyspan_driver,
 	.id_table		= keyspan_4port_ids,
 	.num_ports		= 4,
 	.open			= keyspan_open,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index a10dd56..554a869 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -679,22 +679,6 @@
 	}
 }
 
-static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
-{
-	struct usb_serial *serial = port->serial;
-	unsigned char modembits;
-
-	/* If we can read the modem status and the DCD is low then
-	   carrier is not raised yet */
-	if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
-		if (!(modembits & (1>>6)))
-			return 0;
-	}
-	/* Carrier raised, or we failed (eg disconnected) so
-	   progress accordingly */
-	return 1;
-}
-
 
 static int keyspan_pda_open(struct tty_struct *tty,
 					struct usb_serial_port *port)
@@ -881,7 +865,6 @@
 	.id_table =		id_table_std,
 	.num_ports =		1,
 	.dtr_rts =		keyspan_pda_dtr_rts,
-	.carrier_raised	=	keyspan_pda_carrier_raised,
 	.open =			keyspan_pda_open,
 	.close =		keyspan_pda_close,
 	.write =		keyspan_pda_write,
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index cf17183..653465f 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -44,6 +44,7 @@
 		.name =		"moto-modem",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&moto_driver,
 	.num_ports =		1,
 };
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7487782..5f46838 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -382,7 +382,16 @@
 #define HAIER_VENDOR_ID				0x201e
 #define HAIER_PRODUCT_CE100			0x2009
 
-#define CINTERION_VENDOR_ID			0x0681
+/* Cinterion (formerly Siemens) products */
+#define SIEMENS_VENDOR_ID				0x0681
+#define CINTERION_VENDOR_ID				0x1e2d
+#define CINTERION_PRODUCT_HC25_MDM		0x0047
+#define CINTERION_PRODUCT_HC25_MDMNET	0x0040
+#define CINTERION_PRODUCT_HC28_MDM		0x004C
+#define CINTERION_PRODUCT_HC28_MDMNET	0x004A /* same for HC28J */
+#define CINTERION_PRODUCT_EU3_E			0x0051
+#define CINTERION_PRODUCT_EU3_P			0x0052
+#define CINTERION_PRODUCT_PH8			0x0053
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID			0x0b3c
@@ -944,7 +953,17 @@
 	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
 	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
 	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-	{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+	/* Cinterion */
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
 	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
 	{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 5be866b..7361320 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -157,6 +157,7 @@
 		.name =		"oti6858",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&oti6858_driver,
 	.num_ports =		1,
 	.open =			oti6858_open,
 	.close =		oti6858_close,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 8ae4c6c..08c9181 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -50,6 +50,7 @@
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
+	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
@@ -677,9 +678,11 @@
 {
 
 	struct pl2303_private *priv = usb_get_serial_port_data(port);
+	struct tty_struct *tty;
 	unsigned long flags;
 	u8 status_idx = UART_STATE;
 	u8 length = UART_STATE + 1;
+	u8 prev_line_status;
 	u16 idv, idp;
 
 	idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
@@ -701,11 +704,20 @@
 
 	/* Save off the uart status for others to look at */
 	spin_lock_irqsave(&priv->lock, flags);
+	prev_line_status = priv->line_status;
 	priv->line_status = data[status_idx];
 	spin_unlock_irqrestore(&priv->lock, flags);
 	if (priv->line_status & UART_BREAK_ERROR)
 		usb_serial_handle_break(port);
 	wake_up_interruptible(&priv->delta_msr_wait);
+
+	tty = tty_port_tty_get(&port->port);
+	if (!tty)
+		return;
+	if ((priv->line_status ^ prev_line_status) & UART_DCD)
+		usb_serial_handle_dcd_change(port, tty,
+				priv->line_status & UART_DCD);
+	tty_kref_put(tty);
 }
 
 static void pl2303_read_int_callback(struct urb *urb)
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 43eb9bd..1b025f7 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -21,6 +21,7 @@
 #define PL2303_PRODUCT_ID_MMX		0x0612
 #define PL2303_PRODUCT_ID_GPRS		0x0609
 #define PL2303_PRODUCT_ID_HCR331	0x331a
+#define PL2303_PRODUCT_ID_MOTOROLA	0x0307
 
 #define ATEN_VENDOR_ID		0x0557
 #define ATEN_VENDOR_ID2		0x0547
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 214a3e5..30b73e6 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -36,6 +36,7 @@
 #define UTSTARCOM_PRODUCT_UM175_V1		0x3712
 #define UTSTARCOM_PRODUCT_UM175_V2		0x3714
 #define UTSTARCOM_PRODUCT_UM175_ALLTEL		0x3715
+#define PANTECH_PRODUCT_UML290_VZW		0x3718
 
 /* CMOTECH devices */
 #define CMOTECH_VENDOR_ID			0x16d8
@@ -66,6 +67,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
 	{ },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
@@ -84,6 +86,7 @@
 		.name =		"qcaux",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&qcaux_driver,
 	.num_ports =		1,
 };
 
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
index cb8195c..74cd4cc 100644
--- a/drivers/usb/serial/siemens_mpi.c
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -42,6 +42,7 @@
 		.name =		"siemens_mpi",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&siemens_usb_mpi_driver,
 	.num_ports =		1,
 };
 
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 7481ff8..0457813 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -301,6 +301,9 @@
 	{ USB_DEVICE(0x1199, 0x68A3), 	/* Sierra Wireless Direct IP modems */
 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
 	},
+	{ USB_DEVICE(0x0f3d, 0x68A3), 	/* Airprime/Sierra Wireless Direct IP modems */
+	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+	},
        { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
 
 	{ }
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 765aa98..cbfb70b 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -133,7 +133,7 @@
 
 /* how come ??? */
 #define UART_STATE			0x08
-#define UART_STATE_TRANSIENT_MASK	0x74
+#define UART_STATE_TRANSIENT_MASK	0x75
 #define UART_DCD			0x01
 #define UART_DSR			0x02
 #define UART_BREAK_ERROR		0x04
@@ -525,6 +525,10 @@
 		/* overrun is special, not associated with a char */
 		if (status & UART_OVERRUN_ERROR)
 			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+		if (status & UART_DCD)
+			usb_serial_handle_dcd_change(port, tty,
+				   priv->line_status & MSR_STATUS_LINE_DCD);
 	}
 
 	tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
@@ -645,6 +649,7 @@
 		.name =		"SPCP8x5",
 	},
 	.id_table		= id_table,
+	.usb_driver		= &spcp8x5_driver,
 	.num_ports		= 1,
 	.open 			= spcp8x5_open,
 	.dtr_rts		= spcp8x5_dtr_rts,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index b2902f3..a910004 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -369,9 +369,9 @@
 
 static void __exit ti_exit(void)
 {
+	usb_deregister(&ti_usb_driver);
 	usb_serial_deregister(&ti_1port_device);
 	usb_serial_deregister(&ti_2port_device);
-	usb_deregister(&ti_usb_driver);
 }
 
 
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6954de5..546a521 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1344,11 +1344,15 @@
 		return -ENODEV;
 
 	fixup_generic(driver);
-	if (driver->usb_driver)
-		driver->usb_driver->supports_autosuspend = 1;
 
 	if (!driver->description)
 		driver->description = driver->driver.name;
+	if (!driver->usb_driver) {
+		WARN(1, "Serial driver %s has no usb_driver\n",
+				driver->description);
+		return -EINVAL;
+	}
+	driver->usb_driver->supports_autosuspend = 1;
 
 	/* Add this device to our list of devices */
 	mutex_lock(&table_lock);
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index f2ed6a3..95a8214 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -75,6 +75,7 @@
 		.name =		"debug",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&debug_driver,
 	.num_ports =		1,
 	.bulk_out_size =	USB_DEBUG_MAX_PACKET_SIZE,
 	.break_ctl =		usb_debug_break_ctl,
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index b004b2a..9c014e2 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -295,12 +295,15 @@
 		    __func__, status, endpoint);
 	} else {
 		tty = tty_port_tty_get(&port->port);
-		if (urb->actual_length) {
-			tty_insert_flip_string(tty, data, urb->actual_length);
-			tty_flip_buffer_push(tty);
-		} else
-			dbg("%s: empty read urb received", __func__);
-		tty_kref_put(tty);
+		if (tty) {
+			if (urb->actual_length) {
+				tty_insert_flip_string(tty, data,
+						urb->actual_length);
+				tty_flip_buffer_push(tty);
+			} else
+				dbg("%s: empty read urb received", __func__);
+			tty_kref_put(tty);
+		}
 
 		/* Resubmit urb so we continue receiving */
 		if (status != -ESHUTDOWN) {
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 15a5d89..1c11959 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
+#include <linux/usb/cdc.h>
 #include "visor.h"
 
 /*
@@ -479,6 +480,17 @@
 
 	dbg("%s", __func__);
 
+	/*
+	 * some Samsung Android phones in modem mode have the same ID
+	 * as SPH-I500, but they are ACM devices, so dont bind to them
+	 */
+	if (id->idVendor == SAMSUNG_VENDOR_ID &&
+		id->idProduct == SAMSUNG_SPH_I500_ID &&
+		serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
+		serial->dev->descriptor.bDeviceSubClass ==
+			USB_CDC_SUBCLASS_ACM)
+		return -ENODEV;
+
 	if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
 		dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
 			serial->dev->actconfig->desc.bConfigurationValue);
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index c854fde..2c85530 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,4 +31,9 @@
 		"Cypress ISD-300LP",
 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
 
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
+		"Super Top",
+		"USB 2.0  SATA BRIDGE",
+		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
 #endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index fcc1e32..c1602b8 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1044,6 +1044,15 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_BULK32),
 
+/* Reported by <ttkspam@free.fr>
+ * The device reports a vendor-specific device class, requiring an
+ * explicit vendor/product match.
+ */
+UNUSUAL_DEV(  0x0851, 0x1542, 0x0002, 0x0002,
+		"MagicPixel",
+		"FW_Omega2",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL, 0),
+
 /* Andrew Lunn <andrew@lunn.ch>
  * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
  * on LUN 4.
@@ -1388,6 +1397,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE ),
 
+/* Submitted by Nick Holloway */
+UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
+		"VTech",
+		"Kidizoom",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY ),
+
 /* Reported by Michael Stattmann <michael@stattmann.com> */
 UNUSUAL_DEV(  0x0fce, 0xd008, 0x0000, 0x0000,
 		"Sony Ericsson",
@@ -1872,6 +1888,22 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_NO_READ_DISC_INFO ),
 
+/* Patch by Richard Schütz <r.schtz@t-online.de>
+ * This external hard drive enclosure uses a JMicron chip which
+ * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
+UNUSUAL_DEV(  0x1e68, 0x001b, 0x0000, 0x0000,
+		"TrekStor GmbH & Co. KG",
+		"DataStation maxi g.u",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+
+/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
+UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
+		"Coby Electronics",
+		"MP3 Player",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+
 UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
 		"ST",
 		"2A",
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index c7b1d81..8cb9d80 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -49,7 +49,7 @@
  *
  *  USB Stack port number    4 (1 based)
  *  WUSB code port index     3 (0 based)
- *  USB Addresss             5 (2 based -- 0 is for default, 1 for root hub)
+ *  USB Address             5 (2 based -- 0 is for default, 1 for root hub)
  *
  *  Now, because we don't use the concept as default address exactly
  *  like the (wired) USB code does, we need to kind of skip it. So we
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9b3ca10..f616cef 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -128,8 +128,7 @@
 	size_t hdr_size;
 	struct socket *sock;
 
-	/* TODO: check that we are running from vhost_worker?
-	 * Not sure it's worth it, it's straight-forward enough. */
+	/* TODO: check that we are running from vhost_worker? */
 	sock = rcu_dereference_check(vq->private_data, 1);
 	if (!sock)
 		return;
@@ -306,7 +305,8 @@
 	size_t len, total_len = 0;
 	int err;
 	size_t hdr_size;
-	struct socket *sock = rcu_dereference(vq->private_data);
+	/* TODO: check that we are running from vhost_worker? */
+	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
 	if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
 		return;
 
@@ -415,7 +415,8 @@
 	int err, headcount;
 	size_t vhost_hlen, sock_hlen;
 	size_t vhost_len, sock_len;
-	struct socket *sock = rcu_dereference(vq->private_data);
+	/* TODO: check that we are running from vhost_worker? */
+	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
 	if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
 		return;
 
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 38244f5..ade0568 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -97,22 +97,26 @@
 	remove_wait_queue(poll->wqh, &poll->wait);
 }
 
+static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
+				unsigned seq)
+{
+	int left;
+	spin_lock_irq(&dev->work_lock);
+	left = seq - work->done_seq;
+	spin_unlock_irq(&dev->work_lock);
+	return left <= 0;
+}
+
 static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 {
 	unsigned seq;
-	int left;
 	int flushing;
 
 	spin_lock_irq(&dev->work_lock);
 	seq = work->queue_seq;
 	work->flushing++;
 	spin_unlock_irq(&dev->work_lock);
-	wait_event(work->done, ({
-		   spin_lock_irq(&dev->work_lock);
-		   left = seq - work->done_seq <= 0;
-		   spin_unlock_irq(&dev->work_lock);
-		   left;
-	}));
+	wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 	spin_lock_irq(&dev->work_lock);
 	flushing = --work->flushing;
 	spin_unlock_irq(&dev->work_lock);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2af44b7..b3363ae 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -173,9 +173,9 @@
 {
 	unsigned acked_features;
 
-	acked_features =
-		rcu_dereference_index_check(dev->acked_features,
-					    lockdep_is_held(&dev->mutex));
+	/* TODO: check that we are running from vhost_worker or dev mutex is
+	 * held? */
+	acked_features = rcu_dereference_index_check(dev->acked_features, 1);
 	return acked_features & (1 << bit);
 }
 
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 55dc6fb..6bafb51b 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -11,6 +11,13 @@
 config HAVE_FB_IMX
 	bool
 
+config SH_MIPI_DSI
+	tristate
+	depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
+
+config SH_LCD_MIPI_DSI
+	bool
+
 source "drivers/char/agp/Kconfig"
 
 source "drivers/gpu/vga/Kconfig"
@@ -414,7 +421,7 @@
 	  Y here.
 
 config FB_IMX
-	tristate "Motorola i.MX LCD support"
+	tristate "Freescale i.MX LCD support"
 	depends on FB && (HAVE_FB_IMX || ARCH_MX1 || ARCH_MX2)
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
@@ -1220,7 +1227,7 @@
 
 config FB_INTEL
 	tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EMBEDDED
+	depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EXPERT
 	select FB_MODE_HELPERS
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
@@ -1273,7 +1280,7 @@
 	  module will be called matroxfb.
 
 	  You can pass several parameters to the driver at boot time or at
-	  module load time. The parameters look like "video=matrox:XXX", and
+	  module load time. The parameters look like "video=matroxfb:XXX", and
 	  are described in <file:Documentation/fb/matroxfb.txt>.
 
 config FB_MATROX_MILLENIUM
@@ -1990,13 +1997,6 @@
 
 	  If unsure, say N.
 
-config SH_MIPI_DSI
-	tristate
-	depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
-
-config SH_LCD_MIPI_DSI
-	bool
-
 config FB_SH_MOBILE_LCDC
 	tristate "SuperH Mobile LCDC framebuffer support"
 	depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index d583bea..391ac93 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -23,7 +23,7 @@
 #include <linux/svga.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
 #ifdef CONFIG_MTRR
@@ -1091,12 +1091,12 @@
 
 	dev_info(info->device, "suspend\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -1107,7 +1107,7 @@
 	pci_set_power_state(dev, pci_choose_state(dev, state));
 
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -1122,7 +1122,7 @@
 
 	dev_info(info->device, "resume\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if (par->ref_count == 0)
@@ -1141,7 +1141,7 @@
 
 fail:
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 #else
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 8dce251..bac16345 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -111,7 +111,7 @@
 	return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
 }
 
-static struct backlight_ops atmel_lcdc_bl_ops = {
+static const struct backlight_ops atmel_lcdc_bl_ops = {
 	.update_status = atmel_bl_update_status,
 	.get_brightness = atmel_bl_get_brightness,
 };
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 34a0851..4cb6a57 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1786,7 +1786,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops aty128_bl_data = {
+static const struct backlight_ops aty128_bl_data = {
 	.get_brightness	= aty128_bl_get_brightness,
 	.update_status	= aty128_bl_update_status,
 };
@@ -1860,11 +1860,11 @@
 {
         struct aty128fb_par *par = data;
 
-	if (try_acquire_console_sem())
+	if (!console_trylock())
 		return;
 	pci_restore_state(par->pdev);
 	aty128_do_resume(par->pdev);
-	release_console_sem();
+	console_unlock();
 }
 #endif /* CONFIG_PPC_PMAC */
 
@@ -2438,7 +2438,7 @@
 
 	printk(KERN_DEBUG "aty128fb: suspending...\n");
 	
-	acquire_console_sem();
+	console_lock();
 
 	fb_set_suspend(info, 1);
 
@@ -2470,7 +2470,7 @@
 	if (state.event != PM_EVENT_ON)
 		aty128_set_suspend(par, 1);
 
-	release_console_sem();
+	console_unlock();
 
 	pdev->dev.power.power_state = state;
 
@@ -2527,9 +2527,9 @@
 {
 	int rc;
 
-	acquire_console_sem();
+	console_lock();
 	rc = aty128_do_resume(pdev);
-	release_console_sem();
+	console_unlock();
 
 	return rc;
 }
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 5a3ce3a..94e293f 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2069,7 +2069,7 @@
 	if (state.event == pdev->dev.power.power_state.event)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 
 	fb_set_suspend(info, 1);
 
@@ -2097,14 +2097,14 @@
 		par->lock_blank = 0;
 		atyfb_blank(FB_BLANK_UNBLANK, info);
 		fb_set_suspend(info, 0);
-		release_console_sem();
+		console_unlock();
 		return -EIO;
 	}
 #else
 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 #endif
 
-	release_console_sem();
+	console_unlock();
 
 	pdev->dev.power.power_state = state;
 
@@ -2133,7 +2133,7 @@
 	if (pdev->dev.power.power_state.event == PM_EVENT_ON)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 
 	/*
 	 * PCI state will have been restored by the core, so
@@ -2161,7 +2161,7 @@
 	par->lock_blank = 0;
 	atyfb_blank(FB_BLANK_UNBLANK, info);
 
-	release_console_sem();
+	console_unlock();
 
 	pdev->dev.power.power_state = PMSG_ON;
 
@@ -2221,7 +2221,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops aty_bl_data = {
+static const struct backlight_ops aty_bl_data = {
 	.get_brightness = aty_bl_get_brightness,
 	.update_status	= aty_bl_update_status,
 };
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 256966e..9b811dd 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -128,7 +128,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops radeon_bl_data = {
+static const struct backlight_ops radeon_bl_data = {
 	.get_brightness = radeon_bl_get_brightness,
 	.update_status	= radeon_bl_update_status,
 };
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index c4e1764..92bda58 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2626,7 +2626,7 @@
 		goto done;
 	}
 
-	acquire_console_sem();
+	console_lock();
 
 	fb_set_suspend(info, 1);
 
@@ -2690,7 +2690,7 @@
 	if (rinfo->pm_mode & radeon_pm_d2)
 		radeon_set_suspend(rinfo, 1);
 
-	release_console_sem();
+	console_unlock();
 
  done:
 	pdev->dev.power.power_state = mesg;
@@ -2715,10 +2715,10 @@
 		return 0;
 
 	if (rinfo->no_schedule) {
-		if (try_acquire_console_sem())
+		if (!console_trylock())
 			return 0;
 	} else
-		acquire_console_sem();
+		console_lock();
 
 	printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n",
 	       pci_name(pdev), pdev->dev.power.power_state.event);
@@ -2783,7 +2783,7 @@
 	pdev->dev.power.power_state = PMSG_ON;
 
  bail:
-	release_console_sem();
+	console_unlock();
 
 	return rc;
 }
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 38ffc3f..b224396 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -21,7 +21,7 @@
 #define MAX_BRIGHTNESS		(0xFF)
 #define MIN_BRIGHTNESS		(0)
 
-#define CURRENT_MASK		(0x1F << 1)
+#define CURRENT_BITMASK		(0x1F << 1)
 
 struct pm860x_backlight_data {
 	struct pm860x_chip *chip;
@@ -85,7 +85,7 @@
 	if ((data->current_brightness == 0) && brightness) {
 		if (data->iset) {
 			ret = pm860x_set_bits(data->i2c, wled_idc(data->port),
-					      CURRENT_MASK, data->iset);
+					      CURRENT_BITMASK, data->iset);
 			if (ret < 0)
 				goto out;
 		}
@@ -155,7 +155,7 @@
 	return -EINVAL;
 }
 
-static struct backlight_ops pm860x_backlight_ops = {
+static const struct backlight_ops pm860x_backlight_ops = {
 	.options	= BL_CORE_SUSPENDRESUME,
 	.update_status	= pm860x_backlight_update_status,
 	.get_brightness	= pm860x_backlight_get_brightness,
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index c67801e..98ad3e5 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -25,7 +25,7 @@
 struct l4f00242t03_priv {
 	struct spi_device	*spi;
 	struct lcd_device	*ld;
-	int lcd_on:1;
+	int lcd_state;
 	struct regulator *io_reg;
 	struct regulator *core_reg;
 };
@@ -62,11 +62,36 @@
 		regulator_enable(priv->core_reg);
 	}
 
+	l4f00242t03_reset(pdata->reset_gpio);
+
 	gpio_set_value(pdata->data_enable_gpio, 1);
 	msleep(60);
 	spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
 }
 
+static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
+{
+	struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+
+	dev_dbg(&spi->dev, "Powering down LCD\n");
+
+	gpio_set_value(pdata->data_enable_gpio, 0);
+
+	if (priv->io_reg)
+		regulator_disable(priv->io_reg);
+
+	if (priv->core_reg)
+		regulator_disable(priv->core_reg);
+}
+
+static int l4f00242t03_lcd_power_get(struct lcd_device *ld)
+{
+	struct l4f00242t03_priv *priv = lcd_get_data(ld);
+
+	return priv->lcd_state;
+}
+
 static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
 {
 	struct l4f00242t03_priv *priv = lcd_get_data(ld);
@@ -79,35 +104,54 @@
 	const u16 disoff = 0x28;
 
 	if (power <= FB_BLANK_NORMAL) {
-		if (priv->lcd_on)
-			return 0;
+		if (priv->lcd_state <= FB_BLANK_NORMAL) {
+			/* Do nothing, the LCD is running */
+		} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
+			dev_dbg(&spi->dev, "Resuming LCD\n");
 
-		dev_dbg(&spi->dev, "turning on LCD\n");
+			spi_write(spi, (const u8 *)&slpout, sizeof(u16));
+			msleep(60);
+			spi_write(spi, (const u8 *)&dison, sizeof(u16));
+		} else {
+			/* priv->lcd_state == FB_BLANK_POWERDOWN */
+			l4f00242t03_lcd_init(spi);
+			priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
+			l4f00242t03_lcd_power_set(priv->ld, power);
+		}
+	} else if (power < FB_BLANK_POWERDOWN) {
+		if (priv->lcd_state <= FB_BLANK_NORMAL) {
+			/* Send the display in standby */
+			dev_dbg(&spi->dev, "Standby the LCD\n");
 
-		spi_write(spi, (const u8 *)&slpout, sizeof(u16));
-		msleep(60);
-		spi_write(spi, (const u8 *)&dison, sizeof(u16));
-
-		priv->lcd_on = 1;
+			spi_write(spi, (const u8 *)&disoff, sizeof(u16));
+			msleep(60);
+			spi_write(spi, (const u8 *)&slpin, sizeof(u16));
+		} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
+			/* Do nothing, the LCD is already in standby */
+		} else {
+			/* priv->lcd_state == FB_BLANK_POWERDOWN */
+			l4f00242t03_lcd_init(spi);
+			priv->lcd_state = FB_BLANK_UNBLANK;
+			l4f00242t03_lcd_power_set(ld, power);
+		}
 	} else {
-		if (!priv->lcd_on)
-			return 0;
-
-		dev_dbg(&spi->dev, "turning off LCD\n");
-
-		spi_write(spi, (const u8 *)&disoff, sizeof(u16));
-		msleep(60);
-		spi_write(spi, (const u8 *)&slpin, sizeof(u16));
-
-		priv->lcd_on = 0;
+		/* power == FB_BLANK_POWERDOWN */
+		if (priv->lcd_state != FB_BLANK_POWERDOWN) {
+			/* Clear the screen before shutting down */
+			spi_write(spi, (const u8 *)&disoff, sizeof(u16));
+			msleep(60);
+			l4f00242t03_lcd_powerdown(spi);
+		}
 	}
 
+	priv->lcd_state = power;
+
 	return 0;
 }
 
 static struct lcd_ops l4f_ops = {
 	.set_power	= l4f00242t03_lcd_power_set,
-	.get_power	= NULL,
+	.get_power	= l4f00242t03_lcd_power_get,
 };
 
 static int __devinit l4f00242t03_probe(struct spi_device *spi)
@@ -185,9 +229,9 @@
 	}
 
 	/* Init the LCD */
-	l4f00242t03_reset(pdata->reset_gpio);
 	l4f00242t03_lcd_init(spi);
-	l4f00242t03_lcd_power_set(priv->ld, 1);
+	priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
+	l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_UNBLANK);
 
 	dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
 
@@ -214,9 +258,11 @@
 	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
 	struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data;
 
-	l4f00242t03_lcd_power_set(priv->ld, 0);
+	l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
 	lcd_device_unregister(priv->ld);
 
+	dev_set_drvdata(&spi->dev, NULL);
+
 	gpio_free(pdata->data_enable_gpio);
 	gpio_free(pdata->reset_gpio);
 
@@ -230,6 +276,15 @@
 	return 0;
 }
 
+static void l4f00242t03_shutdown(struct spi_device *spi)
+{
+	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+
+	if (priv)
+		l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
+
+}
+
 static struct spi_driver l4f00242t03_driver = {
 	.driver = {
 		.name	= "l4f00242t03",
@@ -237,6 +292,7 @@
 	},
 	.probe		= l4f00242t03_probe,
 	.remove		= __devexit_p(l4f00242t03_remove),
+	.shutdown	= l4f00242t03_shutdown,
 };
 
 static __init int l4f00242t03_init(void)
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 8010aae..dd0e84a 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -239,11 +239,15 @@
 	lcd->spi = spi;
 	lcd->power = FB_BLANK_POWERDOWN;
 	lcd->buffer = kzalloc(8, GFP_KERNEL);
+	if (!lcd->buffer) {
+		ret = -ENOMEM;
+		goto out_free_lcd;
+	}
 
 	ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
 	if (IS_ERR(ld)) {
 		ret = PTR_ERR(ld);
-		goto out_free_lcd;
+		goto out_free_buffer;
 	}
 	lcd->ld = ld;
 
@@ -257,6 +261,8 @@
 
 out_unregister:
 	lcd_device_unregister(ld);
+out_free_buffer:
+	kfree(lcd->buffer);
 out_free_lcd:
 	kfree(lcd);
 	return ret;
@@ -268,6 +274,7 @@
 
 	ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
 	lcd_device_unregister(lcd->ld);
+	kfree(lcd->buffer);
 	kfree(lcd);
 
 	return 0;
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index b2b2c7b..209acc1 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -92,7 +92,7 @@
 	return ret;
 }
 
-static struct backlight_ops max8925_backlight_ops = {
+static const struct backlight_ops max8925_backlight_ops = {
 	.options	= BL_CORE_SUSPENDRESUME,
 	.update_status	= max8925_backlight_update_status,
 	.get_brightness	= max8925_backlight_get_brightness,
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index 18c5078..47c21fb 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -696,6 +696,7 @@
 {
 	struct backlight_properties props;
 	dma_addr_t dma_handle;
+	int ret;
 
 	if (request_dma(CH_PPI, KBUILD_MODNAME)) {
 		pr_err("couldn't request PPI DMA\n");
@@ -704,17 +705,16 @@
 
 	if (request_ports()) {
 		pr_err("couldn't request gpio port\n");
-		free_dma(CH_PPI);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto out_ports;
 	}
 
 	fb_buffer = dma_alloc_coherent(NULL, TOTAL_VIDEO_MEM_SIZE,
 				       &dma_handle, GFP_KERNEL);
 	if (fb_buffer == NULL) {
 		pr_err("couldn't allocate dma buffer\n");
-		free_dma(CH_PPI);
-		free_ports();
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_dma_coherent;
 	}
 
 	if (L1_DATA_A_LENGTH)
@@ -725,10 +725,8 @@
 
 	if (dma_desc_table == NULL) {
 		pr_err("couldn't allocate dma descriptor\n");
-		free_dma(CH_PPI);
-		free_ports();
-		dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_table;
 	}
 
 	bfin_lq035_fb.screen_base = (void *)fb_buffer;
@@ -771,31 +769,21 @@
 	bfin_lq035_fb.pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
 	if (bfin_lq035_fb.pseudo_palette == NULL) {
 		pr_err("failed to allocate pseudo_palette\n");
-		free_dma(CH_PPI);
-		free_ports();
-		dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_palette;
 	}
 
 	if (fb_alloc_cmap(&bfin_lq035_fb.cmap, NBR_PALETTE, 0) < 0) {
 		pr_err("failed to allocate colormap (%d entries)\n",
 			NBR_PALETTE);
-		free_dma(CH_PPI);
-		free_ports();
-		dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-		kfree(bfin_lq035_fb.pseudo_palette);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto out_cmap;
 	}
 
 	if (register_framebuffer(&bfin_lq035_fb) < 0) {
 		pr_err("unable to register framebuffer\n");
-		free_dma(CH_PPI);
-		free_ports();
-		dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-		fb_buffer = NULL;
-		kfree(bfin_lq035_fb.pseudo_palette);
-		fb_dealloc_cmap(&bfin_lq035_fb.cmap);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out_reg;
 	}
 
 	i2c_add_driver(&ad5280_driver);
@@ -807,11 +795,31 @@
 
 	lcd_dev = lcd_device_register(KBUILD_MODNAME, &pdev->dev, NULL,
 				      &bfin_lcd_ops);
+	if (IS_ERR(lcd_dev)) {
+		pr_err("unable to register lcd\n");
+		ret = PTR_ERR(lcd_dev);
+		goto out_lcd;
+	}
 	lcd_dev->props.max_contrast = 255,
 
 	pr_info("initialized");
 
 	return 0;
+out_lcd:
+	unregister_framebuffer(&bfin_lq035_fb);
+out_reg:
+	fb_dealloc_cmap(&bfin_lq035_fb.cmap);
+out_cmap:
+	kfree(bfin_lq035_fb.pseudo_palette);
+out_palette:
+out_table:
+	dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
+	fb_buffer = NULL;
+out_dma_coherent:
+	free_ports();
+out_ports:
+	free_dma(CH_PPI);
+	return ret;
 }
 
 static int __devexit bfin_lq035_remove(struct platform_device *pdev)
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index d637e1f..cff742a 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -460,10 +460,10 @@
 	if (!(state.event & PM_EVENT_SLEEP))
 		goto done;
 
-	acquire_console_sem();
+	console_lock();
 	chipsfb_blank(1, p);
 	fb_set_suspend(p, 1);
-	release_console_sem();
+	console_unlock();
  done:
 	pdev->dev.power.power_state = state;
 	return 0;
@@ -473,10 +473,10 @@
 {
         struct fb_info *p = pci_get_drvdata(pdev);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(p, 0);
 	chipsfb_blank(0, p);
-	release_console_sem();
+	console_unlock();
 
 	pdev->dev.power.power_state = PMSG_ON;
 	return 0;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 5a35f22..2209e35 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -5,7 +5,7 @@
 menu "Console display driver support"
 
 config VGA_CONSOLE
-	bool "VGA text console" if EMBEDDED || !X86
+	bool "VGA text console" if EXPERT || !X86
 	depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
 	default y
 	help
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 7ccc967..9c092b8 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -375,14 +375,14 @@
 	int c;
 	int mode;
 
-	acquire_console_sem();
+	console_lock();
 	if (ops && ops->currcon != -1)
 		vc = vc_cons[ops->currcon].d;
 
 	if (!vc || !CON_IS_VISIBLE(vc) ||
  	    registered_fb[con2fb_map[vc->vc_num]] != info ||
 	    vc->vc_deccm != 1) {
-		release_console_sem();
+		console_unlock();
 		return;
 	}
 
@@ -392,7 +392,7 @@
 		CM_ERASE : CM_DRAW;
 	ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
 		    get_color(vc, info, c, 0));
-	release_console_sem();
+	console_unlock();
 }
 
 static void cursor_timer_handler(unsigned long dev_addr)
@@ -836,7 +836,7 @@
 
 	found = search_fb_in_map(newidx);
 
-	acquire_console_sem();
+	console_lock();
 	con2fb_map[unit] = newidx;
 	if (!err && !found)
  		err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
@@ -863,7 +863,7 @@
 	if (!search_fb_in_map(info_idx))
 		info_idx = newidx;
 
-	release_console_sem();
+	console_unlock();
  	return err;
 }
 
@@ -3321,7 +3321,7 @@
 	if (fbcon_has_exited)
 		return count;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3331,7 +3331,7 @@
 	rotate = simple_strtoul(buf, last, 0);
 	fbcon_rotate(info, rotate);
 err:
-	release_console_sem();
+	console_unlock();
 	return count;
 }
 
@@ -3346,7 +3346,7 @@
 	if (fbcon_has_exited)
 		return count;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3356,7 +3356,7 @@
 	rotate = simple_strtoul(buf, last, 0);
 	fbcon_rotate_all(info, rotate);
 err:
-	release_console_sem();
+	console_unlock();
 	return count;
 }
 
@@ -3369,7 +3369,7 @@
 	if (fbcon_has_exited)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3378,7 +3378,7 @@
 	info = registered_fb[idx];
 	rotate = fbcon_get_rotate(info);
 err:
-	release_console_sem();
+	console_unlock();
 	return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
 }
 
@@ -3392,7 +3392,7 @@
 	if (fbcon_has_exited)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3406,7 +3406,7 @@
 
 	blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
 err:
-	release_console_sem();
+	console_unlock();
 	return snprintf(buf, PAGE_SIZE, "%d\n", blink);
 }
 
@@ -3421,7 +3421,7 @@
 	if (fbcon_has_exited)
 		return count;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3443,7 +3443,7 @@
 	}
 
 err:
-	release_console_sem();
+	console_unlock();
 	return count;
 }
 
@@ -3482,7 +3482,7 @@
 	if (num_registered_fb) {
 		int i;
 
-		acquire_console_sem();
+		console_lock();
 
 		for (i = 0; i < FB_MAX; i++) {
 			if (registered_fb[i] != NULL) {
@@ -3491,7 +3491,7 @@
 			}
 		}
 
-		release_console_sem();
+		console_unlock();
 		fbcon_takeover(0);
 	}
 }
@@ -3552,7 +3552,7 @@
 {
 	int i;
 
-	acquire_console_sem();
+	console_lock();
 	fb_register_client(&fbcon_event_notifier);
 	fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL,
 				     "fbcon");
@@ -3568,7 +3568,7 @@
 	for (i = 0; i < MAX_NR_CONSOLES; i++)
 		con2fb_map[i] = -1;
 
-	release_console_sem();
+	console_unlock();
 	fbcon_start();
 	return 0;
 }
@@ -3591,12 +3591,12 @@
 
 static void __exit fb_console_exit(void)
 {
-	acquire_console_sem();
+	console_lock();
 	fb_unregister_client(&fbcon_event_notifier);
 	fbcon_deinit_device();
 	device_destroy(fb_class, MKDEV(0, 0));
 	fbcon_exit();
-	release_console_sem();
+	console_unlock();
 	unregister_con_driver(&fb_con);
 }	
 
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 915448e..915fd74 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -202,11 +202,7 @@
 	}
 }
 
-/*
- * Called only duing init so call of alloc_bootmen is ok.
- * Marked __init_refok to silence modpost.
- */
-static void __init_refok vgacon_scrollback_startup(void)
+static void vgacon_scrollback_startup(void)
 {
 	vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
 	vgacon_scrollback_init(vga_video_num_columns * 2);
@@ -375,7 +371,8 @@
 	u16 saved1, saved2;
 	volatile u16 *p;
 
-	if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) {
+	if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB ||
+	    screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
 	      no_vga:
 #ifdef CONFIG_DUMMY_CONSOLE
 		conswitchp = &dummy_con;
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index c265aed..8d61ef9 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -1092,9 +1092,10 @@
 
 irq_freq:
 #ifdef CONFIG_CPU_FREQ
+	lcd_da8xx_cpufreq_deregister(par);
+#endif
 err_cpu_freq:
 	unregister_framebuffer(da8xx_fb_info);
-#endif
 
 err_dealloc_cmap:
 	fb_dealloc_cmap(&da8xx_fb_info->cmap);
@@ -1130,14 +1131,14 @@
 	struct fb_info *info = platform_get_drvdata(dev);
 	struct da8xx_fb_par *par = info->par;
 
-	acquire_console_sem();
+	console_lock();
 	if (par->panel_power_ctrl)
 		par->panel_power_ctrl(0);
 
 	fb_set_suspend(info, 1);
 	lcd_disable_raster();
 	clk_disable(par->lcdc_clk);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -1146,14 +1147,14 @@
 	struct fb_info *info = platform_get_drvdata(dev);
 	struct da8xx_fb_par *par = info->par;
 
-	acquire_console_sem();
+	console_lock();
 	if (par->panel_power_ctrl)
 		par->panel_power_ctrl(1);
 
 	clk_enable(par->lcdc_clk);
 	lcd_enable_raster();
 	fb_set_suspend(info, 0);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 0c99de0..b358d04 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -483,7 +483,7 @@
 				  info->screen_base, info->fix.smem_start);
 }
 
-static int __init ep93xxfb_probe(struct platform_device *pdev)
+static int __devinit ep93xxfb_probe(struct platform_device *pdev)
 {
 	struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
 	struct fb_info *info;
@@ -598,7 +598,7 @@
 	return err;
 }
 
-static int ep93xxfb_remove(struct platform_device *pdev)
+static int __devexit ep93xxfb_remove(struct platform_device *pdev)
 {
 	struct fb_info *info = platform_get_drvdata(pdev);
 	struct ep93xx_fbi *fbi = info->par;
@@ -622,7 +622,7 @@
 
 static struct platform_driver ep93xxfb_driver = {
 	.probe		= ep93xxfb_probe,
-	.remove		= ep93xxfb_remove,
+	.remove		= __devexit_p(ep93xxfb_remove),
 	.driver = {
 		.name	= "ep93xx-fb",
 		.owner	= THIS_MODULE,
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 4ac1201..e2bf953 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1036,11 +1036,11 @@
 			return -EFAULT;
 		if (!lock_fb_info(info))
 			return -ENODEV;
-		acquire_console_sem();
+		console_lock();
 		info->flags |= FBINFO_MISC_USEREVENT;
 		ret = fb_set_var(info, &var);
 		info->flags &= ~FBINFO_MISC_USEREVENT;
-		release_console_sem();
+		console_unlock();
 		unlock_fb_info(info);
 		if (!ret && copy_to_user(argp, &var, sizeof(var)))
 			ret = -EFAULT;
@@ -1072,9 +1072,9 @@
 			return -EFAULT;
 		if (!lock_fb_info(info))
 			return -ENODEV;
-		acquire_console_sem();
+		console_lock();
 		ret = fb_pan_display(info, &var);
-		release_console_sem();
+		console_unlock();
 		unlock_fb_info(info);
 		if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
 			return -EFAULT;
@@ -1119,11 +1119,11 @@
 	case FBIOBLANK:
 		if (!lock_fb_info(info))
 			return -ENODEV;
-		acquire_console_sem();
+		console_lock();
 		info->flags |= FBINFO_MISC_USEREVENT;
 		ret = fb_blank(info, arg);
 		info->flags &= ~FBINFO_MISC_USEREVENT;
-		release_console_sem();
+		console_unlock();
 		unlock_fb_info(info);
 		break;
 	default:
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 0a08f13..f4a3277 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -90,11 +90,11 @@
 	int err;
 
 	var->activate |= FB_ACTIVATE_FORCE;
-	acquire_console_sem();
+	console_lock();
 	fb_info->flags |= FBINFO_MISC_USEREVENT;
 	err = fb_set_var(fb_info, var);
 	fb_info->flags &= ~FBINFO_MISC_USEREVENT;
-	release_console_sem();
+	console_unlock();
 	if (err)
 		return err;
 	return 0;
@@ -175,7 +175,7 @@
 	if (i * sizeof(struct fb_videomode) != count)
 		return -EINVAL;
 
-	acquire_console_sem();
+	console_lock();
 	list_splice(&fb_info->modelist, &old_list);
 	fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
 				 &fb_info->modelist);
@@ -185,7 +185,7 @@
 	} else
 		fb_destroy_modelist(&old_list);
 
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -301,11 +301,11 @@
 	char *last = NULL;
 	int err;
 
-	acquire_console_sem();
+	console_lock();
 	fb_info->flags |= FBINFO_MISC_USEREVENT;
 	err = fb_blank(fb_info, simple_strtoul(buf, &last, 0));
 	fb_info->flags &= ~FBINFO_MISC_USEREVENT;
-	release_console_sem();
+	console_unlock();
 	if (err < 0)
 		return err;
 	return count;
@@ -364,9 +364,9 @@
 		return -EINVAL;
 	var.yoffset = simple_strtoul(last, &last, 0);
 
-	acquire_console_sem();
+	console_lock();
 	err = fb_pan_display(fb_info, &var);
-	release_console_sem();
+	console_unlock();
 
 	if (err < 0)
 		return err;
@@ -399,9 +399,9 @@
 
 	state = simple_strtoul(buf, &last, 0);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(fb_info, (int)state);
-	release_console_sem();
+	console_unlock();
 
 	return count;
 }
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 70b1d9d..b4f19db 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -344,10 +344,10 @@
 	struct fb_info *info = pci_get_drvdata(pdev);
 
 	if (state.event == PM_EVENT_SUSPEND) {
-		acquire_console_sem();
+		console_lock();
 		gx_powerdown(info);
 		fb_set_suspend(info, 1);
-		release_console_sem();
+		console_unlock();
 	}
 
 	/* there's no point in setting PCI states; we emulate PCI, so
@@ -361,7 +361,7 @@
 	struct fb_info *info = pci_get_drvdata(pdev);
 	int ret;
 
-	acquire_console_sem();
+	console_lock();
 	ret = gx_powerup(info);
 	if (ret) {
 		printk(KERN_ERR "gxfb:  power up failed!\n");
@@ -369,7 +369,7 @@
 	}
 
 	fb_set_suspend(info, 0);
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 #endif
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 39bdbed..416851c 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -465,10 +465,10 @@
 	struct fb_info *info = pci_get_drvdata(pdev);
 
 	if (state.event == PM_EVENT_SUSPEND) {
-		acquire_console_sem();
+		console_lock();
 		lx_powerdown(info);
 		fb_set_suspend(info, 1);
-		release_console_sem();
+		console_unlock();
 	}
 
 	/* there's no point in setting PCI states; we emulate PCI, so
@@ -482,7 +482,7 @@
 	struct fb_info *info = pci_get_drvdata(pdev);
 	int ret;
 
-	acquire_console_sem();
+	console_lock();
 	ret = lx_powerup(info);
 	if (ret) {
 		printk(KERN_ERR "lxfb:  power up failed!\n");
@@ -490,7 +490,7 @@
 	}
 
 	fb_set_suspend(info, 0);
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 #else
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 5743ea2..318f6fb 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1574,7 +1574,7 @@
 		return 0;
 	}
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(info, 1);
 
 	if (info->fbops->fb_sync)
@@ -1587,7 +1587,7 @@
 	pci_save_state(dev);
 	pci_disable_device(dev);
 	pci_set_power_state(dev, pci_choose_state(dev, mesg));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -1605,7 +1605,7 @@
 		return 0;
 	}
 
-	acquire_console_sem();
+	console_lock();
 	pci_set_power_state(dev, PCI_D0);
 	pci_restore_state(dev);
 
@@ -1621,7 +1621,7 @@
 	fb_set_suspend (info, 0);
 	info->fbops->fb_blank(VESA_NO_BLANKING, info);
 fail:
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 /***********************************************************************
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 1ab2c25..69bd4a5 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -974,6 +974,6 @@
 module_init(imxfb_init);
 module_exit(imxfb_cleanup);
 
-MODULE_DESCRIPTION("Motorola i.MX framebuffer driver");
+MODULE_DESCRIPTION("Freescale i.MX framebuffer driver");
 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
 MODULE_LICENSE("GPL");
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
index 670ecaa..de36693 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/jz4740_fb.c
@@ -778,9 +778,9 @@
 {
 	struct jzfb *jzfb = dev_get_drvdata(dev);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(jzfb->fb, 1);
-	release_console_sem();
+	console_unlock();
 
 	mutex_lock(&jzfb->lock);
 	if (jzfb->is_enabled)
@@ -800,9 +800,9 @@
 		jzfb_enable(jzfb);
 	mutex_unlock(&jzfb->lock);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(jzfb->fb, 0);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 052dd9f..a082deb 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1247,46 +1247,46 @@
 };
 
 /* initialized by setup, see explanation at end of file (search for MODULE_PARM_DESC) */
-static unsigned int mem;		/* "matrox:mem:xxxxxM" */
+static unsigned int mem;		/* "matroxfb:mem:xxxxxM" */
 static int option_precise_width = 1;	/* cannot be changed, option_precise_width==0 must imply noaccel */
-static int inv24;			/* "matrox:inv24" */
-static int cross4MB = -1;		/* "matrox:cross4MB" */
-static int disabled;			/* "matrox:disabled" */
-static int noaccel;			/* "matrox:noaccel" */
-static int nopan;			/* "matrox:nopan" */
-static int no_pci_retry;		/* "matrox:nopciretry" */
-static int novga;			/* "matrox:novga" */
-static int nobios;			/* "matrox:nobios" */
-static int noinit = 1;			/* "matrox:init" */
-static int inverse;			/* "matrox:inverse" */
-static int sgram;			/* "matrox:sgram" */
+static int inv24;			/* "matroxfb:inv24" */
+static int cross4MB = -1;		/* "matroxfb:cross4MB" */
+static int disabled;			/* "matroxfb:disabled" */
+static int noaccel;			/* "matroxfb:noaccel" */
+static int nopan;			/* "matroxfb:nopan" */
+static int no_pci_retry;		/* "matroxfb:nopciretry" */
+static int novga;			/* "matroxfb:novga" */
+static int nobios;			/* "matroxfb:nobios" */
+static int noinit = 1;			/* "matroxfb:init" */
+static int inverse;			/* "matroxfb:inverse" */
+static int sgram;			/* "matroxfb:sgram" */
 #ifdef CONFIG_MTRR
-static int mtrr = 1;			/* "matrox:nomtrr" */
+static int mtrr = 1;			/* "matroxfb:nomtrr" */
 #endif
-static int grayscale;			/* "matrox:grayscale" */
-static int dev = -1;			/* "matrox:dev:xxxxx" */
-static unsigned int vesa = ~0;		/* "matrox:vesa:xxxxx" */
-static int depth = -1;			/* "matrox:depth:xxxxx" */
-static unsigned int xres;		/* "matrox:xres:xxxxx" */
-static unsigned int yres;		/* "matrox:yres:xxxxx" */
-static unsigned int upper = ~0;		/* "matrox:upper:xxxxx" */
-static unsigned int lower = ~0;		/* "matrox:lower:xxxxx" */
-static unsigned int vslen;		/* "matrox:vslen:xxxxx" */
-static unsigned int left = ~0;		/* "matrox:left:xxxxx" */
-static unsigned int right = ~0;		/* "matrox:right:xxxxx" */
-static unsigned int hslen;		/* "matrox:hslen:xxxxx" */
-static unsigned int pixclock;		/* "matrox:pixclock:xxxxx" */
-static int sync = -1;			/* "matrox:sync:xxxxx" */
-static unsigned int fv;			/* "matrox:fv:xxxxx" */
-static unsigned int fh;			/* "matrox:fh:xxxxxk" */
-static unsigned int maxclk;		/* "matrox:maxclk:xxxxM" */
-static int dfp;				/* "matrox:dfp */
-static int dfp_type = -1;		/* "matrox:dfp:xxx */
-static int memtype = -1;		/* "matrox:memtype:xxx" */
-static char outputs[8];			/* "matrox:outputs:xxx" */
+static int grayscale;			/* "matroxfb:grayscale" */
+static int dev = -1;			/* "matroxfb:dev:xxxxx" */
+static unsigned int vesa = ~0;		/* "matroxfb:vesa:xxxxx" */
+static int depth = -1;			/* "matroxfb:depth:xxxxx" */
+static unsigned int xres;		/* "matroxfb:xres:xxxxx" */
+static unsigned int yres;		/* "matroxfb:yres:xxxxx" */
+static unsigned int upper = ~0;		/* "matroxfb:upper:xxxxx" */
+static unsigned int lower = ~0;		/* "matroxfb:lower:xxxxx" */
+static unsigned int vslen;		/* "matroxfb:vslen:xxxxx" */
+static unsigned int left = ~0;		/* "matroxfb:left:xxxxx" */
+static unsigned int right = ~0;		/* "matroxfb:right:xxxxx" */
+static unsigned int hslen;		/* "matroxfb:hslen:xxxxx" */
+static unsigned int pixclock;		/* "matroxfb:pixclock:xxxxx" */
+static int sync = -1;			/* "matroxfb:sync:xxxxx" */
+static unsigned int fv;			/* "matroxfb:fv:xxxxx" */
+static unsigned int fh;			/* "matroxfb:fh:xxxxxk" */
+static unsigned int maxclk;		/* "matroxfb:maxclk:xxxxM" */
+static int dfp;				/* "matroxfb:dfp */
+static int dfp_type = -1;		/* "matroxfb:dfp:xxx */
+static int memtype = -1;		/* "matroxfb:memtype:xxx" */
+static char outputs[8];			/* "matroxfb:outputs:xxx" */
 
 #ifndef MODULE
-static char videomode[64];		/* "matrox:mode:xxxxx" or "matrox:xxxxx" */
+static char videomode[64];		/* "matroxfb:mode:xxxxx" or "matroxfb:xxxxx" */
 #endif
 
 static int matroxfb_getmemory(struct matrox_fb_info *minfo,
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index d2bb365..48c3ea8 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -32,300 +32,320 @@
 const char *fb_mode_option;
 EXPORT_SYMBOL_GPL(fb_mode_option);
 
-    /*
-     *  Standard video mode definitions (taken from XFree86)
-     */
+/*
+ *  Standard video mode definitions (taken from XFree86)
+ */
 
 static const struct fb_videomode modedb[] = {
-    {
+
 	/* 640x400 @ 70 Hz, 31.5 kHz hsync */
-	NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 60 Hz, 31.5 kHz hsync */
-	NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,	0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 800x600 @ 56 Hz, 35.15 kHz hsync */
-	NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2,	0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */
-	NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8,
-	0, FB_VMODE_INTERLACED
-    }, {
+	{ NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, 0,
+		FB_VMODE_INTERLACED },
+
 	/* 640x400 @ 85 Hz, 37.86 kHz hsync */
-	NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
-	FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
+		FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 72 Hz, 36.5 kHz hsync */
-	NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 75 Hz, 37.50 kHz hsync */
-	NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3,	0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 800x600 @ 60 Hz, 37.8 kHz hsync */
-	NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 85 Hz, 43.27 kHz hsync */
-	NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */
-	NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10,
-	0, FB_VMODE_INTERLACED
-    }, {
+	{ NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, 0,
+		FB_VMODE_INTERLACED },
 	/* 800x600 @ 72 Hz, 48.0 kHz hsync */
-	NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 60 Hz, 48.4 kHz hsync */
-	NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 100 Hz, 53.01 kHz hsync */
-	NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6,	0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 60 Hz, 53.5 kHz hsync */
-	NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 800x600 @ 85 Hz, 55.84 kHz hsync */
-	NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 70 Hz, 56.5 kHz hsync */
-	NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */
-	NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12,
-	0, FB_VMODE_INTERLACED
-    }, {
+	{ NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12,	0,
+		FB_VMODE_INTERLACED },
+
 	/* 800x600 @ 100 Hz, 64.02 kHz hsync */
-	NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 76 Hz, 62.5 kHz hsync */
-	NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 70 Hz, 62.4 kHz hsync */
-	NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 61 Hz, 64.2 kHz hsync */
-	NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1400x1050 @ 60Hz, 63.9 kHz hsync */
-	NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3,
-	0, FB_VMODE_NONINTERLACED   	
-    }, {
+	{ NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/
-	NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/
-        NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 85 Hz, 70.24 kHz hsync */
-	NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 78 Hz, 70.8 kHz hsync */
-	NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 70 Hz, 74.59 kHz hsync */
-	NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1600x1200 @ 60Hz, 75.00 kHz hsync */
-	NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 84 Hz, 76.0 kHz hsync */
-	NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 74 Hz, 78.85 kHz hsync */
-	NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 100Hz, 80.21 kHz hsync */
-	NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 76 Hz, 81.13 kHz hsync */
-	NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1600x1200 @ 70 Hz, 87.50 kHz hsync */
-	NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 100 Hz, 89.62 kHz hsync */
-	NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 85 Hz, 91.15 kHz hsync */
-	NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1600x1200 @ 75 Hz, 93.75 kHz hsync */
-	NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1680x1050 @ 60 Hz, 65.191 kHz hsync */
-	NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1600x1200 @ 85 Hz, 105.77 kHz hsync */
-	NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 100 Hz, 107.16 kHz hsync */
-	NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1800x1440 @ 64Hz, 96.15 kHz hsync  */
-	NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1800x1440 @ 70Hz, 104.52 kHz hsync  */
-	NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 512x384 @ 78 Hz, 31.50 kHz hsync */
-	NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 512x384 @ 85 Hz, 34.38 kHz hsync */
-	NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */
-	NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */
-	NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 320x240 @ 72 Hz, 36.5 kHz hsync */
-	NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */
-	NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 400x300 @ 60 Hz, 37.8 kHz hsync */
-	NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 400x300 @ 72 Hz, 48.0 kHz hsync */
-	NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3,	0,
+		FB_VMODE_DOUBLE },
+
 	/* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */
-	NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 480x300 @ 60 Hz, 37.8 kHz hsync */
-	NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 480x300 @ 63 Hz, 39.6 kHz hsync */
-	NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 480x300 @ 72 Hz, 48.0 kHz hsync */
-	NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
-	NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
-	FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-	FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */
-	NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */
-	NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5,
-	0, FB_VMODE_NONINTERLACED
-   }, {
+	{ NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
-	NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
-       /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
-       NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
-       0, FB_VMODE_INTERLACED
-    }, {
-       /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
-       NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
-       0, FB_VMODE_INTERLACED
-    }, {
+	{ NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
+	/* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+	{ NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5, 0,
+		FB_VMODE_INTERLACED },
+
+	/* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+	{ NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5, 0,
+		FB_VMODE_INTERLACED },
+
 	/* 864x480 @ 60 Hz, 35.15 kHz hsync */
-	NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0,
-	0, FB_VMODE_NONINTERLACED
-    },
+	{ NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0,
+		0, FB_VMODE_NONINTERLACED },
 };
 
 #ifdef CONFIG_FB_MODE_HELPERS
 const struct fb_videomode cea_modes[64] = {
 	/* #1: 640x480p@59.94/60Hz */
 	[1] = {
-		NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #3: 720x480p@59.94/60Hz */
 	[3] = {
-		NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #5: 1920x1080i@59.94/60Hz */
 	[5] = {
 		NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, 0,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_INTERLACED, 0,
 	},
 	/* #7: 720(1440)x480iH@59.94/60Hz */
 	[7] = {
-		NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0, FB_VMODE_INTERLACED, 0,
+		NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
+		FB_VMODE_INTERLACED, 0,
 	},
 	/* #9: 720(1440)x240pH@59.94/60Hz */
 	[9] = {
-		NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #18: 720x576pH@50Hz */
 	[18] = {
-		NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #19: 1280x720p@50Hz */
 	[19] = {
 		NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, 0,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #20: 1920x1080i@50Hz */
 	[20] = {
 		NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, 0,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_INTERLACED, 0,
 	},
 	/* #32: 1920x1080p@23.98/24Hz */
 	[32] = {
 		NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, 0,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #35: (2880)x480p4x@59.94/60Hz */
 	[35] = {
-		NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 };
 
@@ -340,10 +360,10 @@
 	{ NULL, 85, 721, 400, 28169, 108, 36, 42, 01, 72, 3,
 	  FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 3 640x480-60 VESA */
-	{ NULL, 60, 640, 480, 39682,  48, 16, 33, 10, 96, 2, 
+	{ NULL, 60, 640, 480, 39682,  48, 16, 33, 10, 96, 2,
 	  0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 4 640x480-72 VESA */
-	{ NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2, 
+	{ NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2,
 	  0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 5 640x480-75 VESA */
 	{ NULL, 75, 640, 480, 31746, 120, 16, 16, 01, 64, 3,
@@ -426,7 +446,7 @@
 	  FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
 	  FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 26 1600x1200-75 VESA */
-	{ NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, 
+	{ NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
 	  FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
 	  FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 27 1600x1200-85 VESA */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index cb01391..7e3a490 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1177,9 +1177,9 @@
 	struct mx3fb_data *mx3fb = platform_get_drvdata(pdev);
 	struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(mx3fb->fbi, 1);
-	release_console_sem();
+	console_unlock();
 
 	if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
 		sdc_disable_channel(mx3_fbi);
@@ -1202,9 +1202,9 @@
 		sdc_set_brightness(mx3fb, mx3fb->backlight_level);
 	}
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(mx3fb->fbi, 0);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 81687ed..f838d9e 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -15,6 +15,7 @@
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/mm.h>
@@ -597,9 +598,9 @@
 	}
 
 	fbi->clk = clk_get(&pdev->dev, NULL);
-	if (!fbi->clk || IS_ERR(fbi->clk)) {
+	if (IS_ERR(fbi->clk)) {
 		printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n");
-		ret = -ENOENT;
+		ret = PTR_ERR(fbi->clk);
 		goto release_irq;
 	}
 
@@ -695,6 +696,8 @@
 	nuc900fb_stop_lcd(fbinfo);
 	msleep(1);
 
+	unregister_framebuffer(fbinfo);
+	nuc900fb_cpufreq_deregister(fbi);
 	nuc900fb_unmap_video_memory(fbinfo);
 
 	iounmap(fbi->io);
@@ -722,7 +725,7 @@
 	struct fb_info	   *fbinfo = platform_get_drvdata(dev);
 	struct nuc900fb_info *info = fbinfo->par;
 
-	nuc900fb_stop_lcd();
+	nuc900fb_stop_lcd(fbinfo);
 	msleep(1);
 	clk_disable(info->clk);
 	return 0;
@@ -739,7 +742,7 @@
 	msleep(1);
 
 	nuc900fb_init_registers(fbinfo);
-	nuc900fb_activate_var(bfinfo);
+	nuc900fb_activate_var(fbinfo);
 
 	return 0;
 }
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 2fb552a..6aac6d1 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -87,7 +87,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops nvidia_bl_ops = {
+static const struct backlight_ops nvidia_bl_ops = {
 	.get_brightness = nvidia_bl_get_brightness,
 	.update_status	= nvidia_bl_update_status,
 };
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index efe10ff..081dc47 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1057,7 +1057,7 @@
 
 	if (mesg.event == PM_EVENT_PRETHAW)
 		mesg.event = PM_EVENT_FREEZE;
-	acquire_console_sem();
+	console_lock();
 	par->pm_state = mesg.event;
 
 	if (mesg.event & PM_EVENT_SLEEP) {
@@ -1070,7 +1070,7 @@
 	}
 	dev->dev.power.power_state = mesg;
 
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 
@@ -1079,7 +1079,7 @@
 	struct fb_info *info = pci_get_drvdata(dev);
 	struct nvidia_par *par = info->par;
 
-	acquire_console_sem();
+	console_lock();
 	pci_set_power_state(dev, PCI_D0);
 
 	if (par->pm_state != PM_EVENT_FREEZE) {
@@ -1097,7 +1097,7 @@
 	nvidiafb_blank(FB_BLANK_UNBLANK, info);
 
 fail:
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 #else
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index 12327bb..940cab3 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -1,11 +1,13 @@
 menu "OMAP2/3 Display Device Drivers"
         depends on OMAP2_DSS
 
-config PANEL_GENERIC
-        tristate "Generic Panel"
+config PANEL_GENERIC_DPI
+        tristate "Generic DPI Panel"
         help
-	  Generic panel driver.
-	  Used for DVI output for Beagle and OMAP3 SDP.
+	  Generic DPI panel driver.
+	  Supports DVI output for Beagle and OMAP3 SDP.
+	  Supports LCD Panel used in TI SDP3430 and EVM boards,
+	  OMAP3517 EVM boards and CM-T35.
 
 config PANEL_SHARP_LS037V7DW01
         tristate "Sharp LS037V7DW01 LCD Panel"
@@ -14,11 +16,12 @@
         help
           LCD Panel used in TI's SDP3430 and EVM boards
 
-config PANEL_SHARP_LQ043T1DG01
-        tristate "Sharp LQ043T1DG01 LCD Panel"
-        depends on OMAP2_DSS
-        help
-          LCD Panel used in TI's OMAP3517 EVM boards
+config PANEL_NEC_NL8048HL11_01B
+	tristate "NEC NL8048HL11-01B Panel"
+	depends on OMAP2_DSS
+	help
+		This NEC NL8048HL11-01B panel is TFT LCD
+		used in the Zoom2/3/3630 sdp boards.
 
 config PANEL_TAAL
         tristate "Taal DSI Panel"
@@ -26,12 +29,6 @@
         help
           Taal DSI command mode panel from TPO.
 
-config PANEL_TOPPOLY_TDO35S
-        tristate "Toppoly TDO35S LCD Panel support"
-        depends on OMAP2_DSS
-        help
-          LCD Panel used in CM-T35
-
 config PANEL_TPO_TD043MTEA1
         tristate "TPO TD043MTEA1 LCD Panel"
         depends on OMAP2_DSS && SPI
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index aa38609..861f025 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -1,8 +1,7 @@
-obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o
+obj-$(CONFIG_PANEL_GENERIC_DPI) += panel-generic-dpi.o
 obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
-obj-$(CONFIG_PANEL_SHARP_LQ043T1DG01) += panel-sharp-lq043t1dg01.o
+obj-$(CONFIG_PANEL_NEC_NL8048HL11_01B) += panel-nec-nl8048hl11-01b.o
 
 obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
-obj-$(CONFIG_PANEL_TOPPOLY_TDO35S) += panel-toppoly-tdo35s.o
 obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
 obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
new file mode 100644
index 0000000..07eb30e
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -0,0 +1,365 @@
+/*
+ * Generic DPI Panels support
+ *
+ * Copyright (C) 2010 Canonical Ltd.
+ * Author: Bryan Wu <bryan.wu@canonical.com>
+ *
+ * LCD panel driver for Sharp LQ043T1DG01
+ *
+ * Copyright (C) 2009 Texas Instruments Inc
+ * Author: Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * LCD panel driver for Toppoly TDO35S
+ *
+ * Copyright (C) 2009 CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <plat/panel-generic-dpi.h>
+
+struct panel_config {
+	struct omap_video_timings timings;
+
+	int acbi;	/* ac-bias pin transitions per interrupt */
+	/* Unit: line clocks */
+	int acb;	/* ac-bias pin frequency */
+
+	enum omap_panel_config config;
+
+	int power_on_delay;
+	int power_off_delay;
+
+	/*
+	 * Used to match device to panel configuration
+	 * when use generic panel driver
+	 */
+	const char *name;
+};
+
+/* Panel configurations */
+static struct panel_config generic_dpi_panels[] = {
+	/* Generic Panel */
+	{
+		{
+			.x_res		= 640,
+			.y_res		= 480,
+
+			.pixel_clock	= 23500,
+
+			.hfp		= 48,
+			.hsw		= 32,
+			.hbp		= 80,
+
+			.vfp		= 3,
+			.vsw		= 4,
+			.vbp		= 7,
+		},
+		.acbi			= 0x0,
+		.acb			= 0x0,
+		.config			= OMAP_DSS_LCD_TFT,
+		.power_on_delay		= 0,
+		.power_off_delay	= 0,
+		.name			= "generic",
+	},
+
+	/* Sharp LQ043T1DG01 */
+	{
+		{
+			.x_res		= 480,
+			.y_res		= 272,
+
+			.pixel_clock	= 9000,
+
+			.hsw		= 42,
+			.hfp		= 3,
+			.hbp		= 2,
+
+			.vsw		= 11,
+			.vfp		= 3,
+			.vbp		= 2,
+		},
+		.acbi			= 0x0,
+		.acb			= 0x0,
+		.config			= OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+					OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO,
+		.power_on_delay		= 50,
+		.power_off_delay	= 100,
+		.name			= "sharp_lq",
+	},
+
+	/* Sharp LS037V7DW01 */
+	{
+		{
+			.x_res		= 480,
+			.y_res		= 640,
+
+			.pixel_clock	= 19200,
+
+			.hsw		= 2,
+			.hfp		= 1,
+			.hbp		= 28,
+
+			.vsw		= 1,
+			.vfp		= 1,
+			.vbp		= 1,
+		},
+		.acbi			= 0x0,
+		.acb			= 0x28,
+		.config			= OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+						OMAP_DSS_LCD_IHS,
+		.power_on_delay		= 50,
+		.power_off_delay	= 100,
+		.name			= "sharp_ls",
+	},
+
+	/* Toppoly TDO35S */
+	{
+		{
+			.x_res		= 480,
+			.y_res		= 640,
+
+			.pixel_clock	= 26000,
+
+			.hfp		= 104,
+			.hsw		= 8,
+			.hbp		= 8,
+
+			.vfp		= 4,
+			.vsw		= 2,
+			.vbp		= 2,
+		},
+		.acbi			= 0x0,
+		.acb			= 0x0,
+		.config			= OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+					OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC |
+					OMAP_DSS_LCD_ONOFF,
+		.power_on_delay		= 0,
+		.power_off_delay	= 0,
+		.name			= "toppoly_tdo35s",
+	},
+};
+
+struct panel_drv_data {
+
+	struct omap_dss_device *dssdev;
+
+	struct panel_config *panel_config;
+};
+
+static inline struct panel_generic_dpi_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+	return (struct panel_generic_dpi_data *) dssdev->data;
+}
+
+static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
+{
+	int r;
+	struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+	struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+	struct panel_config *panel_config = drv_data->panel_config;
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+		return 0;
+
+	r = omapdss_dpi_display_enable(dssdev);
+	if (r)
+		goto err0;
+
+	/* wait couple of vsyncs until enabling the LCD */
+	if (panel_config->power_on_delay)
+		msleep(panel_config->power_on_delay);
+
+	if (panel_data->platform_enable) {
+		r = panel_data->platform_enable(dssdev);
+		if (r)
+			goto err1;
+	}
+
+	return 0;
+err1:
+	omapdss_dpi_display_disable(dssdev);
+err0:
+	return r;
+}
+
+static void generic_dpi_panel_power_off(struct omap_dss_device *dssdev)
+{
+	struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+	struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+	struct panel_config *panel_config = drv_data->panel_config;
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+		return;
+
+	if (panel_data->platform_disable)
+		panel_data->platform_disable(dssdev);
+
+	/* wait couple of vsyncs after disabling the LCD */
+	if (panel_config->power_off_delay)
+		msleep(panel_config->power_off_delay);
+
+	omapdss_dpi_display_disable(dssdev);
+}
+
+static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
+{
+	struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+	struct panel_config *panel_config = NULL;
+	struct panel_drv_data *drv_data = NULL;
+	int i;
+
+	dev_dbg(&dssdev->dev, "probe\n");
+
+	if (!panel_data || !panel_data->name)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(generic_dpi_panels); i++) {
+		if (strcmp(panel_data->name, generic_dpi_panels[i].name) == 0) {
+			panel_config = &generic_dpi_panels[i];
+			break;
+		}
+	}
+
+	if (!panel_config)
+		return -EINVAL;
+
+	dssdev->panel.config = panel_config->config;
+	dssdev->panel.timings = panel_config->timings;
+	dssdev->panel.acb = panel_config->acb;
+	dssdev->panel.acbi = panel_config->acbi;
+
+	drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data)
+		return -ENOMEM;
+
+	drv_data->dssdev = dssdev;
+	drv_data->panel_config = panel_config;
+
+	dev_set_drvdata(&dssdev->dev, drv_data);
+
+	return 0;
+}
+
+static void generic_dpi_panel_remove(struct omap_dss_device *dssdev)
+{
+	struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+
+	dev_dbg(&dssdev->dev, "remove\n");
+
+	kfree(drv_data);
+
+	dev_set_drvdata(&dssdev->dev, NULL);
+}
+
+static int generic_dpi_panel_enable(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	r = generic_dpi_panel_power_on(dssdev);
+	if (r)
+		return r;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	return 0;
+}
+
+static void generic_dpi_panel_disable(struct omap_dss_device *dssdev)
+{
+	generic_dpi_panel_power_off(dssdev);
+
+	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int generic_dpi_panel_suspend(struct omap_dss_device *dssdev)
+{
+	generic_dpi_panel_power_off(dssdev);
+
+	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+	return 0;
+}
+
+static int generic_dpi_panel_resume(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	r = generic_dpi_panel_power_on(dssdev);
+	if (r)
+		return r;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	return 0;
+}
+
+static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	dpi_set_timings(dssdev, timings);
+}
+
+static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	*timings = dssdev->panel.timings;
+}
+
+static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	return dpi_check_timings(dssdev, timings);
+}
+
+static struct omap_dss_driver dpi_driver = {
+	.probe		= generic_dpi_panel_probe,
+	.remove		= generic_dpi_panel_remove,
+
+	.enable		= generic_dpi_panel_enable,
+	.disable	= generic_dpi_panel_disable,
+	.suspend	= generic_dpi_panel_suspend,
+	.resume		= generic_dpi_panel_resume,
+
+	.set_timings	= generic_dpi_panel_set_timings,
+	.get_timings	= generic_dpi_panel_get_timings,
+	.check_timings	= generic_dpi_panel_check_timings,
+
+	.driver         = {
+		.name   = "generic_dpi_panel",
+		.owner  = THIS_MODULE,
+	},
+};
+
+static int __init generic_dpi_panel_drv_init(void)
+{
+	return omap_dss_register_driver(&dpi_driver);
+}
+
+static void __exit generic_dpi_panel_drv_exit(void)
+{
+	omap_dss_unregister_driver(&dpi_driver);
+}
+
+module_init(generic_dpi_panel_drv_init);
+module_exit(generic_dpi_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c
deleted file mode 100644
index 395a68d..0000000
--- a/drivers/video/omap2/displays/panel-generic.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Generic panel support
- *
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include <plat/display.h>
-
-static struct omap_video_timings generic_panel_timings = {
-	/* 640 x 480 @ 60 Hz  Reduced blanking VESA CVT 0.31M3-R */
-	.x_res		= 640,
-	.y_res		= 480,
-	.pixel_clock	= 23500,
-	.hfp		= 48,
-	.hsw		= 32,
-	.hbp		= 80,
-	.vfp		= 3,
-	.vsw		= 4,
-	.vbp		= 7,
-};
-
-static int generic_panel_power_on(struct omap_dss_device *dssdev)
-{
-	int r;
-
-	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
-		return 0;
-
-	r = omapdss_dpi_display_enable(dssdev);
-	if (r)
-		goto err0;
-
-	if (dssdev->platform_enable) {
-		r = dssdev->platform_enable(dssdev);
-		if (r)
-			goto err1;
-	}
-
-	return 0;
-err1:
-	omapdss_dpi_display_disable(dssdev);
-err0:
-	return r;
-}
-
-static void generic_panel_power_off(struct omap_dss_device *dssdev)
-{
-	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
-		return;
-
-	if (dssdev->platform_disable)
-		dssdev->platform_disable(dssdev);
-
-	omapdss_dpi_display_disable(dssdev);
-}
-
-static int generic_panel_probe(struct omap_dss_device *dssdev)
-{
-	dssdev->panel.config = OMAP_DSS_LCD_TFT;
-	dssdev->panel.timings = generic_panel_timings;
-
-	return 0;
-}
-
-static void generic_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int generic_panel_enable(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = generic_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static void generic_panel_disable(struct omap_dss_device *dssdev)
-{
-	generic_panel_power_off(dssdev);
-
-	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int generic_panel_suspend(struct omap_dss_device *dssdev)
-{
-	generic_panel_power_off(dssdev);
-	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-	return 0;
-}
-
-static int generic_panel_resume(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = generic_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static void generic_panel_set_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	dpi_set_timings(dssdev, timings);
-}
-
-static void generic_panel_get_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	*timings = dssdev->panel.timings;
-}
-
-static int generic_panel_check_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	return dpi_check_timings(dssdev, timings);
-}
-
-static struct omap_dss_driver generic_driver = {
-	.probe		= generic_panel_probe,
-	.remove		= generic_panel_remove,
-
-	.enable		= generic_panel_enable,
-	.disable	= generic_panel_disable,
-	.suspend	= generic_panel_suspend,
-	.resume		= generic_panel_resume,
-
-	.set_timings	= generic_panel_set_timings,
-	.get_timings	= generic_panel_get_timings,
-	.check_timings	= generic_panel_check_timings,
-
-	.driver         = {
-		.name   = "generic_panel",
-		.owner  = THIS_MODULE,
-	},
-};
-
-static int __init generic_panel_drv_init(void)
-{
-	return omap_dss_register_driver(&generic_driver);
-}
-
-static void __exit generic_panel_drv_exit(void)
-{
-	omap_dss_unregister_driver(&generic_driver);
-}
-
-module_init(generic_panel_drv_init);
-module_exit(generic_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
new file mode 100644
index 0000000..925e0fa
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -0,0 +1,325 @@
+/*
+ * Support for NEC-nl8048hl11-01b panel driver
+ *
+ * Copyright (C) 2010 Texas Instruments Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+
+#include <plat/display.h>
+
+#define LCD_XRES		800
+#define LCD_YRES		480
+/*
+ * NEC PIX Clock Ratings
+ * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz
+ */
+#define LCD_PIXEL_CLOCK		23800
+
+struct nec_8048_data {
+	struct backlight_device *bl;
+};
+
+static const struct {
+	unsigned char addr;
+	unsigned char dat;
+} nec_8048_init_seq[] = {
+	{ 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 },
+	{ 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 },
+	{ 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 },	{ 24, 0x25 },
+	{ 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F },
+	{ 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F },	{ 38, 0x0F },
+	{ 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 },	{ 43, 0x0F },
+	{ 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F },	{ 48, 0x0F },
+	{ 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
+	{ 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 },	{ 86, 0x14 },
+	{ 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 },	{ 93, 0x0C },
+	{ 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 },
+	{ 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 },
+	{ 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 },
+	{ 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 },
+	{ 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC },
+	{ 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 },
+	{ 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 },
+	{ 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
+};
+
+/*
+ * NEC NL8048HL11-01B  Manual
+ * defines HFB, HSW, HBP, VFP, VSW, VBP as shown below
+ */
+
+static struct omap_video_timings nec_8048_panel_timings = {
+	/* 800 x 480 @ 60 Hz  Reduced blanking VESA CVT 0.31M3-R */
+	.x_res		= LCD_XRES,
+	.y_res		= LCD_YRES,
+	.pixel_clock	= LCD_PIXEL_CLOCK,
+	.hfp		= 6,
+	.hsw		= 1,
+	.hbp		= 4,
+	.vfp		= 3,
+	.vsw		= 1,
+	.vbp		= 4,
+};
+
+static int nec_8048_bl_update_status(struct backlight_device *bl)
+{
+	struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
+	int level;
+
+	if (!dssdev->set_backlight)
+		return -EINVAL;
+
+	if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+			bl->props.power == FB_BLANK_UNBLANK)
+		level = bl->props.brightness;
+	else
+		level = 0;
+
+	return dssdev->set_backlight(dssdev, level);
+}
+
+static int nec_8048_bl_get_brightness(struct backlight_device *bl)
+{
+	if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+			bl->props.power == FB_BLANK_UNBLANK)
+		return bl->props.brightness;
+
+	return 0;
+}
+
+static const struct backlight_ops nec_8048_bl_ops = {
+	.get_brightness	= nec_8048_bl_get_brightness,
+	.update_status	= nec_8048_bl_update_status,
+};
+
+static int nec_8048_panel_probe(struct omap_dss_device *dssdev)
+{
+	struct backlight_device *bl;
+	struct nec_8048_data *necd;
+	struct backlight_properties props;
+	int r;
+
+	dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+				OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_RF |
+				OMAP_DSS_LCD_ONOFF;
+	dssdev->panel.timings = nec_8048_panel_timings;
+
+	necd = kzalloc(sizeof(*necd), GFP_KERNEL);
+	if (!necd)
+		return -ENOMEM;
+
+	dev_set_drvdata(&dssdev->dev, necd);
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 255;
+
+	bl = backlight_device_register("nec-8048", &dssdev->dev, dssdev,
+			&nec_8048_bl_ops, &props);
+	if (IS_ERR(bl)) {
+		r = PTR_ERR(bl);
+		kfree(necd);
+		return r;
+	}
+	necd->bl = bl;
+
+	bl->props.fb_blank = FB_BLANK_UNBLANK;
+	bl->props.power = FB_BLANK_UNBLANK;
+	bl->props.max_brightness = dssdev->max_backlight_level;
+	bl->props.brightness = dssdev->max_backlight_level;
+
+	r = nec_8048_bl_update_status(bl);
+	if (r < 0)
+		dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
+	return 0;
+}
+
+static void nec_8048_panel_remove(struct omap_dss_device *dssdev)
+{
+	struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bl = necd->bl;
+
+	bl->props.power = FB_BLANK_POWERDOWN;
+	nec_8048_bl_update_status(bl);
+	backlight_device_unregister(bl);
+
+	kfree(necd);
+}
+
+static int nec_8048_panel_enable(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+	struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bl = necd->bl;
+
+	if (dssdev->platform_enable) {
+		r = dssdev->platform_enable(dssdev);
+		if (r)
+			return r;
+	}
+
+	r = nec_8048_bl_update_status(bl);
+	if (r < 0)
+		dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
+	r = omapdss_dpi_display_enable(dssdev);
+
+	return r;
+}
+
+static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
+{
+	struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bl = necd->bl;
+
+	omapdss_dpi_display_disable(dssdev);
+
+	bl->props.brightness = 0;
+	nec_8048_bl_update_status(bl);
+
+	if (dssdev->platform_disable)
+		dssdev->platform_disable(dssdev);
+}
+
+static int nec_8048_panel_suspend(struct omap_dss_device *dssdev)
+{
+	nec_8048_panel_disable(dssdev);
+	return 0;
+}
+
+static int nec_8048_panel_resume(struct omap_dss_device *dssdev)
+{
+	return nec_8048_panel_enable(dssdev);
+}
+
+static int nec_8048_recommended_bpp(struct omap_dss_device *dssdev)
+{
+	return 16;
+}
+
+static struct omap_dss_driver nec_8048_driver = {
+	.probe			= nec_8048_panel_probe,
+	.remove			= nec_8048_panel_remove,
+	.enable			= nec_8048_panel_enable,
+	.disable		= nec_8048_panel_disable,
+	.suspend		= nec_8048_panel_suspend,
+	.resume			= nec_8048_panel_resume,
+	.get_recommended_bpp	= nec_8048_recommended_bpp,
+
+	.driver		= {
+		.name		= "NEC_8048_panel",
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr,
+			unsigned char reg_data)
+{
+	int ret = 0;
+	unsigned int cmd = 0, data = 0;
+
+	cmd = 0x0000 | reg_addr; /* register address write */
+	data = 0x0100 | reg_data ; /* register data write */
+	data = (cmd << 16) | data;
+
+	ret = spi_write(spi, (unsigned char *)&data, 4);
+	if (ret)
+		pr_err("error in spi_write %x\n", data);
+
+	return ret;
+}
+
+static int init_nec_8048_wvga_lcd(struct spi_device *spi)
+{
+	unsigned int i;
+	/* Initialization Sequence */
+	/* nec_8048_spi_send(spi, REG, VAL) */
+	for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++)
+		nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
+				nec_8048_init_seq[i].dat);
+	udelay(20);
+	nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
+				nec_8048_init_seq[i].dat);
+	return 0;
+}
+
+static int nec_8048_spi_probe(struct spi_device *spi)
+{
+	spi->mode = SPI_MODE_0;
+	spi->bits_per_word = 32;
+	spi_setup(spi);
+
+	init_nec_8048_wvga_lcd(spi);
+
+	return omap_dss_register_driver(&nec_8048_driver);
+}
+
+static int nec_8048_spi_remove(struct spi_device *spi)
+{
+	omap_dss_unregister_driver(&nec_8048_driver);
+
+	return 0;
+}
+
+static int nec_8048_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+{
+	nec_8048_spi_send(spi, 2, 0x01);
+	mdelay(40);
+
+	return 0;
+}
+
+static int nec_8048_spi_resume(struct spi_device *spi)
+{
+	/* reinitialize the panel */
+	spi_setup(spi);
+	nec_8048_spi_send(spi, 2, 0x00);
+	init_nec_8048_wvga_lcd(spi);
+
+	return 0;
+}
+
+static struct spi_driver nec_8048_spi_driver = {
+	.probe		= nec_8048_spi_probe,
+	.remove		= __devexit_p(nec_8048_spi_remove),
+	.suspend	= nec_8048_spi_suspend,
+	.resume		= nec_8048_spi_resume,
+	.driver		= {
+		.name	= "nec_8048_spi",
+		.bus	= &spi_bus_type,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init nec_8048_lcd_init(void)
+{
+	return spi_register_driver(&nec_8048_spi_driver);
+}
+
+static void __exit nec_8048_lcd_exit(void)
+{
+	return spi_unregister_driver(&nec_8048_spi_driver);
+}
+
+module_init(nec_8048_lcd_init);
+module_exit(nec_8048_lcd_exit);
+MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
+MODULE_DESCRIPTION("NEC-nl8048hl11-01b Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c b/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
deleted file mode 100644
index 0c6896c..0000000
--- a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * LCD panel driver for Sharp LQ043T1DG01
- *
- * Copyright (C) 2009 Texas Instruments Inc
- * Author: Vaibhav Hiremath <hvaibhav@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/err.h>
-
-#include <plat/display.h>
-
-static struct omap_video_timings sharp_lq_timings = {
-	.x_res = 480,
-	.y_res = 272,
-
-	.pixel_clock	= 9000,
-
-	.hsw		= 42,
-	.hfp		= 3,
-	.hbp		= 2,
-
-	.vsw		= 11,
-	.vfp		= 3,
-	.vbp		= 2,
-};
-
-static int sharp_lq_panel_power_on(struct omap_dss_device *dssdev)
-{
-	int r;
-
-	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
-		return 0;
-
-	r = omapdss_dpi_display_enable(dssdev);
-	if (r)
-		goto err0;
-
-	/* wait couple of vsyncs until enabling the LCD */
-	msleep(50);
-
-	if (dssdev->platform_enable) {
-		r = dssdev->platform_enable(dssdev);
-		if (r)
-			goto err1;
-	}
-
-	return 0;
-err1:
-	omapdss_dpi_display_disable(dssdev);
-err0:
-	return r;
-}
-
-static void sharp_lq_panel_power_off(struct omap_dss_device *dssdev)
-{
-	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
-		return;
-
-	if (dssdev->platform_disable)
-		dssdev->platform_disable(dssdev);
-
-	/* wait at least 5 vsyncs after disabling the LCD */
-	msleep(100);
-
-	omapdss_dpi_display_disable(dssdev);
-}
-
-static int sharp_lq_panel_probe(struct omap_dss_device *dssdev)
-{
-
-	dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
-		OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO;
-	dssdev->panel.acb = 0x0;
-	dssdev->panel.timings = sharp_lq_timings;
-
-	return 0;
-}
-
-static void sharp_lq_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int sharp_lq_panel_enable(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = sharp_lq_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static void sharp_lq_panel_disable(struct omap_dss_device *dssdev)
-{
-	sharp_lq_panel_power_off(dssdev);
-
-	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int sharp_lq_panel_suspend(struct omap_dss_device *dssdev)
-{
-	sharp_lq_panel_power_off(dssdev);
-	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-	return 0;
-}
-
-static int sharp_lq_panel_resume(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = sharp_lq_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static struct omap_dss_driver sharp_lq_driver = {
-	.probe		= sharp_lq_panel_probe,
-	.remove		= sharp_lq_panel_remove,
-
-	.enable		= sharp_lq_panel_enable,
-	.disable	= sharp_lq_panel_disable,
-	.suspend	= sharp_lq_panel_suspend,
-	.resume		= sharp_lq_panel_resume,
-
-	.driver         = {
-		.name   = "sharp_lq_panel",
-		.owner  = THIS_MODULE,
-	},
-};
-
-static int __init sharp_lq_panel_drv_init(void)
-{
-	return omap_dss_register_driver(&sharp_lq_driver);
-}
-
-static void __exit sharp_lq_panel_drv_exit(void)
-{
-	omap_dss_unregister_driver(&sharp_lq_driver);
-}
-
-module_init(sharp_lq_panel_drv_init);
-module_exit(sharp_lq_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index e1c765d..61026f9 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -465,7 +465,7 @@
 	return 0;
 }
 
-static struct backlight_ops taal_bl_ops = {
+static const struct backlight_ops taal_bl_ops = {
 	.get_brightness = taal_bl_get_intensity,
 	.update_status  = taal_bl_update_status,
 };
diff --git a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
deleted file mode 100644
index 526e906..0000000
--- a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * LCD panel driver for Toppoly TDO35S
- *
- * Copyright (C) 2009 CompuLab, Ltd.
- * Author: Mike Rapoport <mike@compulab.co.il>
- *
- * Based on generic panel support
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include <plat/display.h>
-
-static struct omap_video_timings toppoly_tdo_panel_timings = {
-	/* 640 x 480 @ 60 Hz  Reduced blanking VESA CVT 0.31M3-R */
-	.x_res		= 480,
-	.y_res		= 640,
-
-	.pixel_clock	= 26000,
-
-	.hfp		= 104,
-	.hsw		= 8,
-	.hbp		= 8,
-
-	.vfp		= 4,
-	.vsw		= 2,
-	.vbp		= 2,
-};
-
-static int toppoly_tdo_panel_power_on(struct omap_dss_device *dssdev)
-{
-	int r;
-
-	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
-		return 0;
-
-	r = omapdss_dpi_display_enable(dssdev);
-	if (r)
-		goto err0;
-
-	if (dssdev->platform_enable) {
-		r = dssdev->platform_enable(dssdev);
-		if (r)
-			goto err1;
-	}
-
-	return 0;
-err1:
-	omapdss_dpi_display_disable(dssdev);
-err0:
-	return r;
-}
-
-static void toppoly_tdo_panel_power_off(struct omap_dss_device *dssdev)
-{
-	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
-		return;
-
-	if (dssdev->platform_disable)
-		dssdev->platform_disable(dssdev);
-
-	omapdss_dpi_display_disable(dssdev);
-}
-
-static int toppoly_tdo_panel_probe(struct omap_dss_device *dssdev)
-{
-	dssdev->panel.config = OMAP_DSS_LCD_TFT |
-			       OMAP_DSS_LCD_IVS |
-			       OMAP_DSS_LCD_IHS |
-			       OMAP_DSS_LCD_IPC |
-			       OMAP_DSS_LCD_ONOFF;
-
-	dssdev->panel.timings = toppoly_tdo_panel_timings;
-
-	return 0;
-}
-
-static void toppoly_tdo_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int toppoly_tdo_panel_enable(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = toppoly_tdo_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static void toppoly_tdo_panel_disable(struct omap_dss_device *dssdev)
-{
-	toppoly_tdo_panel_power_off(dssdev);
-
-	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int toppoly_tdo_panel_suspend(struct omap_dss_device *dssdev)
-{
-	toppoly_tdo_panel_power_off(dssdev);
-	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-	return 0;
-}
-
-static int toppoly_tdo_panel_resume(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = toppoly_tdo_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static struct omap_dss_driver generic_driver = {
-	.probe		= toppoly_tdo_panel_probe,
-	.remove		= toppoly_tdo_panel_remove,
-
-	.enable		= toppoly_tdo_panel_enable,
-	.disable	= toppoly_tdo_panel_disable,
-	.suspend	= toppoly_tdo_panel_suspend,
-	.resume		= toppoly_tdo_panel_resume,
-
-	.driver         = {
-		.name   = "toppoly_tdo35s_panel",
-		.owner  = THIS_MODULE,
-	},
-};
-
-static int __init toppoly_tdo_panel_drv_init(void)
-{
-	return omap_dss_register_driver(&generic_driver);
-}
-
-static void __exit toppoly_tdo_panel_drv_exit(void)
-{
-	omap_dss_unregister_driver(&generic_driver);
-}
-
-module_init(toppoly_tdo_panel_drv_init);
-module_exit(toppoly_tdo_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index fa40fa5..9f8c69f 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -44,34 +44,40 @@
 /* DISPC */
 #define DISPC_BASE			0x48050400
 
-#define DISPC_SZ_REGS			SZ_1K
+#define DISPC_SZ_REGS			SZ_4K
 
 struct dispc_reg { u16 idx; };
 
 #define DISPC_REG(idx)			((const struct dispc_reg) { idx })
 
-/* DISPC common */
+/*
+ * DISPC common registers and
+ * DISPC channel registers , ch = 0 for LCD, ch = 1 for
+ * DIGIT, and ch = 2 for LCD2
+ */
 #define DISPC_REVISION			DISPC_REG(0x0000)
 #define DISPC_SYSCONFIG			DISPC_REG(0x0010)
 #define DISPC_SYSSTATUS			DISPC_REG(0x0014)
 #define DISPC_IRQSTATUS			DISPC_REG(0x0018)
 #define DISPC_IRQENABLE			DISPC_REG(0x001C)
 #define DISPC_CONTROL			DISPC_REG(0x0040)
+#define DISPC_CONTROL2			DISPC_REG(0x0238)
 #define DISPC_CONFIG			DISPC_REG(0x0044)
+#define DISPC_CONFIG2			DISPC_REG(0x0620)
 #define DISPC_CAPABLE			DISPC_REG(0x0048)
-#define DISPC_DEFAULT_COLOR0		DISPC_REG(0x004C)
-#define DISPC_DEFAULT_COLOR1		DISPC_REG(0x0050)
-#define DISPC_TRANS_COLOR0		DISPC_REG(0x0054)
-#define DISPC_TRANS_COLOR1		DISPC_REG(0x0058)
+#define DISPC_DEFAULT_COLOR(ch)		DISPC_REG(ch == 0 ? 0x004C : \
+					(ch == 1 ? 0x0050 : 0x03AC))
+#define DISPC_TRANS_COLOR(ch)		DISPC_REG(ch == 0 ? 0x0054 : \
+					(ch == 1 ? 0x0058 : 0x03B0))
 #define DISPC_LINE_STATUS		DISPC_REG(0x005C)
 #define DISPC_LINE_NUMBER		DISPC_REG(0x0060)
-#define DISPC_TIMING_H			DISPC_REG(0x0064)
-#define DISPC_TIMING_V			DISPC_REG(0x0068)
-#define DISPC_POL_FREQ			DISPC_REG(0x006C)
-#define DISPC_DIVISOR			DISPC_REG(0x0070)
+#define DISPC_TIMING_H(ch)		DISPC_REG(ch != 2 ? 0x0064 : 0x0400)
+#define DISPC_TIMING_V(ch)		DISPC_REG(ch != 2 ? 0x0068 : 0x0404)
+#define DISPC_POL_FREQ(ch)		DISPC_REG(ch != 2 ? 0x006C : 0x0408)
+#define DISPC_DIVISOR(ch)		DISPC_REG(ch != 2 ? 0x0070 : 0x040C)
 #define DISPC_GLOBAL_ALPHA		DISPC_REG(0x0074)
 #define DISPC_SIZE_DIG			DISPC_REG(0x0078)
-#define DISPC_SIZE_LCD			DISPC_REG(0x007C)
+#define DISPC_SIZE_LCD(ch)		DISPC_REG(ch != 2 ? 0x007C : 0x03CC)
 
 /* DISPC GFX plane */
 #define DISPC_GFX_BA0			DISPC_REG(0x0080)
@@ -86,13 +92,12 @@
 #define DISPC_GFX_WINDOW_SKIP		DISPC_REG(0x00B4)
 #define DISPC_GFX_TABLE_BA		DISPC_REG(0x00B8)
 
-#define DISPC_DATA_CYCLE1		DISPC_REG(0x01D4)
-#define DISPC_DATA_CYCLE2		DISPC_REG(0x01D8)
-#define DISPC_DATA_CYCLE3		DISPC_REG(0x01DC)
-
-#define DISPC_CPR_COEF_R		DISPC_REG(0x0220)
-#define DISPC_CPR_COEF_G		DISPC_REG(0x0224)
-#define DISPC_CPR_COEF_B		DISPC_REG(0x0228)
+#define DISPC_DATA_CYCLE1(ch)		DISPC_REG(ch != 2 ? 0x01D4 : 0x03C0)
+#define DISPC_DATA_CYCLE2(ch)		DISPC_REG(ch != 2 ? 0x01D8 : 0x03C4)
+#define DISPC_DATA_CYCLE3(ch)		DISPC_REG(ch != 2 ? 0x01DC : 0x03C8)
+#define DISPC_CPR_COEF_R(ch)		DISPC_REG(ch != 2 ? 0x0220 : 0x03BC)
+#define DISPC_CPR_COEF_G(ch)		DISPC_REG(ch != 2 ? 0x0224 : 0x03B8)
+#define DISPC_CPR_COEF_B(ch)		DISPC_REG(ch != 2 ? 0x0228 : 0x03B4)
 
 #define DISPC_GFX_PRELOAD		DISPC_REG(0x022C)
 
@@ -217,18 +222,29 @@
 	SR(IRQENABLE);
 	SR(CONTROL);
 	SR(CONFIG);
-	SR(DEFAULT_COLOR0);
-	SR(DEFAULT_COLOR1);
-	SR(TRANS_COLOR0);
-	SR(TRANS_COLOR1);
+	SR(DEFAULT_COLOR(0));
+	SR(DEFAULT_COLOR(1));
+	SR(TRANS_COLOR(0));
+	SR(TRANS_COLOR(1));
 	SR(LINE_NUMBER);
-	SR(TIMING_H);
-	SR(TIMING_V);
-	SR(POL_FREQ);
-	SR(DIVISOR);
+	SR(TIMING_H(0));
+	SR(TIMING_V(0));
+	SR(POL_FREQ(0));
+	SR(DIVISOR(0));
 	SR(GLOBAL_ALPHA);
 	SR(SIZE_DIG);
-	SR(SIZE_LCD);
+	SR(SIZE_LCD(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		SR(CONTROL2);
+		SR(DEFAULT_COLOR(2));
+		SR(TRANS_COLOR(2));
+		SR(SIZE_LCD(2));
+		SR(TIMING_H(2));
+		SR(TIMING_V(2));
+		SR(POL_FREQ(2));
+		SR(DIVISOR(2));
+		SR(CONFIG2);
+	}
 
 	SR(GFX_BA0);
 	SR(GFX_BA1);
@@ -241,13 +257,22 @@
 	SR(GFX_WINDOW_SKIP);
 	SR(GFX_TABLE_BA);
 
-	SR(DATA_CYCLE1);
-	SR(DATA_CYCLE2);
-	SR(DATA_CYCLE3);
+	SR(DATA_CYCLE1(0));
+	SR(DATA_CYCLE2(0));
+	SR(DATA_CYCLE3(0));
 
-	SR(CPR_COEF_R);
-	SR(CPR_COEF_G);
-	SR(CPR_COEF_B);
+	SR(CPR_COEF_R(0));
+	SR(CPR_COEF_G(0));
+	SR(CPR_COEF_B(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		SR(CPR_COEF_B(2));
+		SR(CPR_COEF_G(2));
+		SR(CPR_COEF_R(2));
+
+		SR(DATA_CYCLE1(2));
+		SR(DATA_CYCLE2(2));
+		SR(DATA_CYCLE3(2));
+	}
 
 	SR(GFX_PRELOAD);
 
@@ -356,18 +381,28 @@
 	/*RR(IRQENABLE);*/
 	/*RR(CONTROL);*/
 	RR(CONFIG);
-	RR(DEFAULT_COLOR0);
-	RR(DEFAULT_COLOR1);
-	RR(TRANS_COLOR0);
-	RR(TRANS_COLOR1);
+	RR(DEFAULT_COLOR(0));
+	RR(DEFAULT_COLOR(1));
+	RR(TRANS_COLOR(0));
+	RR(TRANS_COLOR(1));
 	RR(LINE_NUMBER);
-	RR(TIMING_H);
-	RR(TIMING_V);
-	RR(POL_FREQ);
-	RR(DIVISOR);
+	RR(TIMING_H(0));
+	RR(TIMING_V(0));
+	RR(POL_FREQ(0));
+	RR(DIVISOR(0));
 	RR(GLOBAL_ALPHA);
 	RR(SIZE_DIG);
-	RR(SIZE_LCD);
+	RR(SIZE_LCD(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		RR(DEFAULT_COLOR(2));
+		RR(TRANS_COLOR(2));
+		RR(SIZE_LCD(2));
+		RR(TIMING_H(2));
+		RR(TIMING_V(2));
+		RR(POL_FREQ(2));
+		RR(DIVISOR(2));
+		RR(CONFIG2);
+	}
 
 	RR(GFX_BA0);
 	RR(GFX_BA1);
@@ -380,13 +415,22 @@
 	RR(GFX_WINDOW_SKIP);
 	RR(GFX_TABLE_BA);
 
-	RR(DATA_CYCLE1);
-	RR(DATA_CYCLE2);
-	RR(DATA_CYCLE3);
+	RR(DATA_CYCLE1(0));
+	RR(DATA_CYCLE2(0));
+	RR(DATA_CYCLE3(0));
 
-	RR(CPR_COEF_R);
-	RR(CPR_COEF_G);
-	RR(CPR_COEF_B);
+	RR(CPR_COEF_R(0));
+	RR(CPR_COEF_G(0));
+	RR(CPR_COEF_B(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		RR(DATA_CYCLE1(2));
+		RR(DATA_CYCLE2(2));
+		RR(DATA_CYCLE3(2));
+
+		RR(CPR_COEF_B(2));
+		RR(CPR_COEF_G(2));
+		RR(CPR_COEF_R(2));
+	}
 
 	RR(GFX_PRELOAD);
 
@@ -490,7 +534,8 @@
 
 	/* enable last, because LCD & DIGIT enable are here */
 	RR(CONTROL);
-
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		RR(CONTROL2);
 	/* clear spurious SYNC_LOST_DIGIT interrupts */
 	dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
 
@@ -516,42 +561,63 @@
 {
 	int bit;
 
-	if (channel == OMAP_DSS_CHANNEL_LCD)
+	if (channel == OMAP_DSS_CHANNEL_LCD ||
+			channel == OMAP_DSS_CHANNEL_LCD2)
 		bit = 5; /* GOLCD */
 	else
 		bit = 6; /* GODIGIT */
 
-	return REG_GET(DISPC_CONTROL, bit, bit) == 1;
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		return REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+	else
+		return REG_GET(DISPC_CONTROL, bit, bit) == 1;
 }
 
 void dispc_go(enum omap_channel channel)
 {
 	int bit;
+	bool enable_bit, go_bit;
 
 	enable_clocks(1);
 
-	if (channel == OMAP_DSS_CHANNEL_LCD)
+	if (channel == OMAP_DSS_CHANNEL_LCD ||
+			channel == OMAP_DSS_CHANNEL_LCD2)
 		bit = 0; /* LCDENABLE */
 	else
 		bit = 1; /* DIGITALENABLE */
 
 	/* if the channel is not enabled, we don't need GO */
-	if (REG_GET(DISPC_CONTROL, bit, bit) == 0)
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		enable_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+	else
+		enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
+
+	if (!enable_bit)
 		goto end;
 
-	if (channel == OMAP_DSS_CHANNEL_LCD)
+	if (channel == OMAP_DSS_CHANNEL_LCD ||
+			channel == OMAP_DSS_CHANNEL_LCD2)
 		bit = 5; /* GOLCD */
 	else
 		bit = 6; /* GODIGIT */
 
-	if (REG_GET(DISPC_CONTROL, bit, bit) == 1) {
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		go_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+	else
+		go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
+
+	if (go_bit) {
 		DSSERR("GO bit not down for channel %d\n", channel);
 		goto end;
 	}
 
-	DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : "DIGIT");
+	DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
+		(channel == OMAP_DSS_CHANNEL_LCD2 ? "LCD2" : "DIGIT"));
 
-	REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
+	else
+		REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
 end:
 	enable_clocks(0);
 }
@@ -773,13 +839,26 @@
 	dispc_write_reg(vsi_reg[plane-1], val);
 }
 
+static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable)
+{
+	if (!dss_has_feature(FEAT_PRE_MULT_ALPHA))
+		return;
+
+	if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+		plane == OMAP_DSS_VIDEO1)
+		return;
+
+	REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 28, 28);
+}
+
 static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
 {
 	if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
 		return;
 
-	BUG_ON(!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
-			plane == OMAP_DSS_VIDEO1);
+	if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+		plane == OMAP_DSS_VIDEO1)
+		return;
 
 	if (plane == OMAP_DSS_GFX)
 		REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
@@ -851,6 +930,7 @@
 {
 	int shift;
 	u32 val;
+	int chan = 0, chan2 = 0;
 
 	switch (plane) {
 	case OMAP_DSS_GFX:
@@ -866,7 +946,29 @@
 	}
 
 	val = dispc_read_reg(dispc_reg_att[plane]);
-	val = FLD_MOD(val, channel, shift, shift);
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		switch (channel) {
+		case OMAP_DSS_CHANNEL_LCD:
+			chan = 0;
+			chan2 = 0;
+			break;
+		case OMAP_DSS_CHANNEL_DIGIT:
+			chan = 1;
+			chan2 = 0;
+			break;
+		case OMAP_DSS_CHANNEL_LCD2:
+			chan = 0;
+			chan2 = 1;
+			break;
+		default:
+			BUG();
+		}
+
+		val = FLD_MOD(val, chan, shift, shift);
+		val = FLD_MOD(val, chan2, 31, 30);
+	} else {
+		val = FLD_MOD(val, channel, shift, shift);
+	}
 	dispc_write_reg(dispc_reg_att[plane], val);
 }
 
@@ -923,13 +1025,13 @@
 	enable_clocks(0);
 }
 
-void dispc_set_lcd_size(u16 width, u16 height)
+void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
 {
 	u32 val;
 	BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
 	val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
 	enable_clocks(1);
-	dispc_write_reg(DISPC_SIZE_LCD, val);
+	dispc_write_reg(DISPC_SIZE_LCD(channel), val);
 	enable_clocks(0);
 }
 
@@ -1426,12 +1528,13 @@
 	}
 }
 
-static unsigned long calc_fclk_five_taps(u16 width, u16 height,
-		u16 out_width, u16 out_height, enum omap_color_mode color_mode)
+static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
+		u16 height, u16 out_width, u16 out_height,
+		enum omap_color_mode color_mode)
 {
 	u32 fclk = 0;
 	/* FIXME venc pclk? */
-	u64 tmp, pclk = dispc_pclk_rate();
+	u64 tmp, pclk = dispc_pclk_rate(channel);
 
 	if (height > out_height) {
 		/* FIXME get real display PPL */
@@ -1463,8 +1566,8 @@
 	return fclk;
 }
 
-static unsigned long calc_fclk(u16 width, u16 height,
-		u16 out_width, u16 out_height)
+static unsigned long calc_fclk(enum omap_channel channel, u16 width,
+		u16 height, u16 out_width, u16 out_height)
 {
 	unsigned int hf, vf;
 
@@ -1488,7 +1591,7 @@
 		vf = 1;
 
 	/* FIXME venc pclk? */
-	return dispc_pclk_rate() * vf * hf;
+	return dispc_pclk_rate(channel) * vf * hf;
 }
 
 void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
@@ -1507,7 +1610,8 @@
 		bool ilace,
 		enum omap_dss_rotation_type rotation_type,
 		u8 rotation, int mirror,
-		u8 global_alpha)
+		u8 global_alpha, u8 pre_mult_alpha,
+		enum omap_channel channel)
 {
 	const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
 	bool five_taps = 0;
@@ -1536,29 +1640,12 @@
 				height, pos_y, out_height);
 	}
 
+	if (!dss_feat_color_mode_supported(plane, color_mode))
+		return -EINVAL;
+
 	if (plane == OMAP_DSS_GFX) {
 		if (width != out_width || height != out_height)
 			return -EINVAL;
-
-		switch (color_mode) {
-		case OMAP_DSS_COLOR_ARGB16:
-		case OMAP_DSS_COLOR_ARGB32:
-		case OMAP_DSS_COLOR_RGBA32:
-			if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
-				return -EINVAL;
-		case OMAP_DSS_COLOR_RGBX32:
-			if (cpu_is_omap24xx())
-				return -EINVAL;
-			/* fall through */
-		case OMAP_DSS_COLOR_RGB12U:
-		case OMAP_DSS_COLOR_RGB16:
-		case OMAP_DSS_COLOR_RGB24P:
-		case OMAP_DSS_COLOR_RGB24U:
-			break;
-
-		default:
-			return -EINVAL;
-		}
 	} else {
 		/* video plane */
 
@@ -1572,42 +1659,16 @@
 		   out_height > height * 8)
 			return -EINVAL;
 
-		switch (color_mode) {
-		case OMAP_DSS_COLOR_RGBX32:
-		case OMAP_DSS_COLOR_RGB12U:
-			if (cpu_is_omap24xx())
-				return -EINVAL;
-			/* fall through */
-		case OMAP_DSS_COLOR_RGB16:
-		case OMAP_DSS_COLOR_RGB24P:
-		case OMAP_DSS_COLOR_RGB24U:
-			break;
-
-		case OMAP_DSS_COLOR_ARGB16:
-		case OMAP_DSS_COLOR_ARGB32:
-		case OMAP_DSS_COLOR_RGBA32:
-			if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
-				return -EINVAL;
-			if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
-					plane == OMAP_DSS_VIDEO1)
-				return -EINVAL;
-			break;
-
-		case OMAP_DSS_COLOR_YUV2:
-		case OMAP_DSS_COLOR_UYVY:
+		if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+			color_mode == OMAP_DSS_COLOR_UYVY)
 			cconv = 1;
-			break;
-
-		default:
-			return -EINVAL;
-		}
 
 		/* Must use 5-tap filter? */
 		five_taps = height > out_height * 2;
 
 		if (!five_taps) {
-			fclk = calc_fclk(width, height,
-					out_width, out_height);
+			fclk = calc_fclk(channel, width, height, out_width,
+					out_height);
 
 			/* Try 5-tap filter if 3-tap fclk is too high */
 			if (cpu_is_omap34xx() && height > out_height &&
@@ -1621,7 +1682,7 @@
 		}
 
 		if (five_taps)
-			fclk = calc_fclk_five_taps(width, height,
+			fclk = calc_fclk_five_taps(channel, width, height,
 					out_width, out_height, color_mode);
 
 		DSSDBG("required fclk rate = %lu Hz\n", fclk);
@@ -1693,8 +1754,8 @@
 
 	_dispc_set_rotation_attrs(plane, rotation, mirror, color_mode);
 
-	if (plane != OMAP_DSS_VIDEO1)
-		_dispc_setup_global_alpha(plane, global_alpha);
+	_dispc_set_pre_mult_alpha(plane, pre_mult_alpha);
+	_dispc_setup_global_alpha(plane, global_alpha);
 
 	return 0;
 }
@@ -1710,36 +1771,44 @@
 	complete(compl);
 }
 
-static void _enable_lcd_out(bool enable)
+static void _enable_lcd_out(enum omap_channel channel, bool enable)
 {
-	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0);
+	else
+		REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
 }
 
-static void dispc_enable_lcd_out(bool enable)
+static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
 {
 	struct completion frame_done_completion;
 	bool is_on;
 	int r;
+	u32 irq;
 
 	enable_clocks(1);
 
 	/* When we disable LCD output, we need to wait until frame is done.
 	 * Otherwise the DSS is still working, and turning off the clocks
 	 * prevents DSS from going to OFF mode */
-	is_on = REG_GET(DISPC_CONTROL, 0, 0);
+	is_on = channel == OMAP_DSS_CHANNEL_LCD2 ?
+			REG_GET(DISPC_CONTROL2, 0, 0) :
+			REG_GET(DISPC_CONTROL, 0, 0);
+
+	irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 :
+			DISPC_IRQ_FRAMEDONE;
 
 	if (!enable && is_on) {
 		init_completion(&frame_done_completion);
 
 		r = omap_dispc_register_isr(dispc_disable_isr,
-				&frame_done_completion,
-				DISPC_IRQ_FRAMEDONE);
+				&frame_done_completion, irq);
 
 		if (r)
 			DSSERR("failed to register FRAMEDONE isr\n");
 	}
 
-	_enable_lcd_out(enable);
+	_enable_lcd_out(channel, enable);
 
 	if (!enable && is_on) {
 		if (!wait_for_completion_timeout(&frame_done_completion,
@@ -1747,8 +1816,7 @@
 			DSSERR("timeout waiting for FRAME DONE\n");
 
 		r = omap_dispc_unregister_isr(dispc_disable_isr,
-				&frame_done_completion,
-				DISPC_IRQ_FRAMEDONE);
+				&frame_done_completion, irq);
 
 		if (r)
 			DSSERR("failed to unregister FRAMEDONE isr\n");
@@ -1818,6 +1886,8 @@
 		unsigned long flags;
 		spin_lock_irqsave(&dispc.irq_lock, flags);
 		dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+		if (dss_has_feature(FEAT_MGR_LCD2))
+			dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
 		dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
 		_omap_dispc_set_irqs();
 		spin_unlock_irqrestore(&dispc.irq_lock, flags);
@@ -1832,14 +1902,17 @@
 		return !!REG_GET(DISPC_CONTROL, 0, 0);
 	else if (channel == OMAP_DSS_CHANNEL_DIGIT)
 		return !!REG_GET(DISPC_CONTROL, 1, 1);
+	else if (channel == OMAP_DSS_CHANNEL_LCD2)
+		return !!REG_GET(DISPC_CONTROL2, 0, 0);
 	else
 		BUG();
 }
 
 void dispc_enable_channel(enum omap_channel channel, bool enable)
 {
-	if (channel == OMAP_DSS_CHANNEL_LCD)
-		dispc_enable_lcd_out(enable);
+	if (channel == OMAP_DSS_CHANNEL_LCD ||
+			channel == OMAP_DSS_CHANNEL_LCD2)
+		dispc_enable_lcd_out(channel, enable);
 	else if (channel == OMAP_DSS_CHANNEL_DIGIT)
 		dispc_enable_digit_out(enable);
 	else
@@ -1848,6 +1921,9 @@
 
 void dispc_lcd_enable_signal_polarity(bool act_high)
 {
+	if (!dss_has_feature(FEAT_LCDENABLEPOL))
+		return;
+
 	enable_clocks(1);
 	REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
 	enable_clocks(0);
@@ -1855,6 +1931,9 @@
 
 void dispc_lcd_enable_signal(bool enable)
 {
+	if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
+		return;
+
 	enable_clocks(1);
 	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
 	enable_clocks(0);
@@ -1862,20 +1941,27 @@
 
 void dispc_pck_free_enable(bool enable)
 {
+	if (!dss_has_feature(FEAT_PCKFREEENABLE))
+		return;
+
 	enable_clocks(1);
 	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
 	enable_clocks(0);
 }
 
-void dispc_enable_fifohandcheck(bool enable)
+void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
 {
 	enable_clocks(1);
-	REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
+	else
+		REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
 	enable_clocks(0);
 }
 
 
-void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
+void dispc_set_lcd_display_type(enum omap_channel channel,
+		enum omap_lcd_display_type type)
 {
 	int mode;
 
@@ -1894,7 +1980,10 @@
 	}
 
 	enable_clocks(1);
-	REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
+	else
+		REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
 	enable_clocks(0);
 }
 
@@ -1908,25 +1997,21 @@
 
 void dispc_set_default_color(enum omap_channel channel, u32 color)
 {
-	const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
-				DISPC_DEFAULT_COLOR1 };
-
 	enable_clocks(1);
-	dispc_write_reg(def_reg[channel], color);
+	dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
 	enable_clocks(0);
 }
 
 u32 dispc_get_default_color(enum omap_channel channel)
 {
-	const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
-				DISPC_DEFAULT_COLOR1 };
 	u32 l;
 
 	BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT &&
-	       channel != OMAP_DSS_CHANNEL_LCD);
+		channel != OMAP_DSS_CHANNEL_LCD &&
+		channel != OMAP_DSS_CHANNEL_LCD2);
 
 	enable_clocks(1);
-	l = dispc_read_reg(def_reg[channel]);
+	l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
 	enable_clocks(0);
 
 	return l;
@@ -1936,16 +2021,15 @@
 		enum omap_dss_trans_key_type type,
 		u32 trans_key)
 {
-	const struct dispc_reg tr_reg[] = {
-		DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
-
 	enable_clocks(1);
 	if (ch == OMAP_DSS_CHANNEL_LCD)
 		REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
-	else /* OMAP_DSS_CHANNEL_DIGIT */
+	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 		REG_FLD_MOD(DISPC_CONFIG, type, 13, 13);
+	else /* OMAP_DSS_CHANNEL_LCD2 */
+		REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
 
-	dispc_write_reg(tr_reg[ch], trans_key);
+	dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
 	enable_clocks(0);
 }
 
@@ -1953,21 +2037,20 @@
 		enum omap_dss_trans_key_type *type,
 		u32 *trans_key)
 {
-	const struct dispc_reg tr_reg[] = {
-		DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
-
 	enable_clocks(1);
 	if (type) {
 		if (ch == OMAP_DSS_CHANNEL_LCD)
 			*type = REG_GET(DISPC_CONFIG, 11, 11);
 		else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 			*type = REG_GET(DISPC_CONFIG, 13, 13);
+		else if (ch == OMAP_DSS_CHANNEL_LCD2)
+			*type = REG_GET(DISPC_CONFIG2, 11, 11);
 		else
 			BUG();
 	}
 
 	if (trans_key)
-		*trans_key = dispc_read_reg(tr_reg[ch]);
+		*trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
 	enable_clocks(0);
 }
 
@@ -1976,8 +2059,10 @@
 	enable_clocks(1);
 	if (ch == OMAP_DSS_CHANNEL_LCD)
 		REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
-	else /* OMAP_DSS_CHANNEL_DIGIT */
+	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 		REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
+	else /* OMAP_DSS_CHANNEL_LCD2 */
+		REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
 	enable_clocks(0);
 }
 void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
@@ -1988,8 +2073,10 @@
 	enable_clocks(1);
 	if (ch == OMAP_DSS_CHANNEL_LCD)
 		REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
-	else /* OMAP_DSS_CHANNEL_DIGIT */
+	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 		REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
+	else /* OMAP_DSS_CHANNEL_LCD2 */
+		REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18);
 	enable_clocks(0);
 }
 bool dispc_alpha_blending_enabled(enum omap_channel ch)
@@ -2003,13 +2090,14 @@
 	if (ch == OMAP_DSS_CHANNEL_LCD)
 		enabled = REG_GET(DISPC_CONFIG, 18, 18);
 	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
-		enabled = REG_GET(DISPC_CONFIG, 18, 18);
+		enabled = REG_GET(DISPC_CONFIG, 19, 19);
+	else if (ch == OMAP_DSS_CHANNEL_LCD2)
+		enabled = REG_GET(DISPC_CONFIG2, 18, 18);
 	else
 		BUG();
 	enable_clocks(0);
 
 	return enabled;
-
 }
 
 
@@ -2022,6 +2110,8 @@
 		enabled = REG_GET(DISPC_CONFIG, 10, 10);
 	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 		enabled = REG_GET(DISPC_CONFIG, 12, 12);
+	else if (ch == OMAP_DSS_CHANNEL_LCD2)
+		enabled = REG_GET(DISPC_CONFIG2, 10, 10);
 	else
 		BUG();
 	enable_clocks(0);
@@ -2030,7 +2120,7 @@
 }
 
 
-void dispc_set_tft_data_lines(u8 data_lines)
+void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
 {
 	int code;
 
@@ -2053,11 +2143,15 @@
 	}
 
 	enable_clocks(1);
-	REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
+	else
+		REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
 	enable_clocks(0);
 }
 
-void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode)
+void dispc_set_parallel_interface_mode(enum omap_channel channel,
+		enum omap_parallel_interface_mode mode)
 {
 	u32 l;
 	int stallmode;
@@ -2087,13 +2181,17 @@
 
 	enable_clocks(1);
 
-	l = dispc_read_reg(DISPC_CONTROL);
-
-	l = FLD_MOD(l, stallmode, 11, 11);
-	l = FLD_MOD(l, gpout0, 15, 15);
-	l = FLD_MOD(l, gpout1, 16, 16);
-
-	dispc_write_reg(DISPC_CONTROL, l);
+	if (channel == OMAP_DSS_CHANNEL_LCD2) {
+		l = dispc_read_reg(DISPC_CONTROL2);
+		l = FLD_MOD(l, stallmode, 11, 11);
+		dispc_write_reg(DISPC_CONTROL2, l);
+	} else {
+		l = dispc_read_reg(DISPC_CONTROL);
+		l = FLD_MOD(l, stallmode, 11, 11);
+		l = FLD_MOD(l, gpout0, 15, 15);
+		l = FLD_MOD(l, gpout1, 16, 16);
+		dispc_write_reg(DISPC_CONTROL, l);
+	}
 
 	enable_clocks(0);
 }
@@ -2129,8 +2227,8 @@
 			timings->vfp, timings->vbp);
 }
 
-static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp,
-				   int vsw, int vfp, int vbp)
+static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
+		int hfp, int hbp, int vsw, int vfp, int vbp)
 {
 	u32 timing_h, timing_v;
 
@@ -2149,13 +2247,14 @@
 	}
 
 	enable_clocks(1);
-	dispc_write_reg(DISPC_TIMING_H, timing_h);
-	dispc_write_reg(DISPC_TIMING_V, timing_v);
+	dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
+	dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
 	enable_clocks(0);
 }
 
 /* change name to mode? */
-void dispc_set_lcd_timings(struct omap_video_timings *timings)
+void dispc_set_lcd_timings(enum omap_channel channel,
+		struct omap_video_timings *timings)
 {
 	unsigned xtot, ytot;
 	unsigned long ht, vt;
@@ -2165,10 +2264,11 @@
 				timings->vfp, timings->vbp))
 		BUG();
 
-	_dispc_set_lcd_timings(timings->hsw, timings->hfp, timings->hbp,
-			timings->vsw, timings->vfp, timings->vbp);
+	_dispc_set_lcd_timings(channel, timings->hsw, timings->hfp,
+			timings->hbp, timings->vsw, timings->vfp,
+			timings->vbp);
 
-	dispc_set_lcd_size(timings->x_res, timings->y_res);
+	dispc_set_lcd_size(channel, timings->x_res, timings->y_res);
 
 	xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
 	ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
@@ -2176,7 +2276,8 @@
 	ht = (timings->pixel_clock * 1000) / xtot;
 	vt = (timings->pixel_clock * 1000) / xtot / ytot;
 
-	DSSDBG("xres %u yres %u\n", timings->x_res, timings->y_res);
+	DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res,
+			timings->y_res);
 	DSSDBG("pck %u\n", timings->pixel_clock);
 	DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
 			timings->hsw, timings->hfp, timings->hbp,
@@ -2185,21 +2286,23 @@
 	DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
 }
 
-static void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div)
+static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
+		u16 pck_div)
 {
 	BUG_ON(lck_div < 1);
 	BUG_ON(pck_div < 2);
 
 	enable_clocks(1);
-	dispc_write_reg(DISPC_DIVISOR,
+	dispc_write_reg(DISPC_DIVISOR(channel),
 			FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
 	enable_clocks(0);
 }
 
-static void dispc_get_lcd_divisor(int *lck_div, int *pck_div)
+static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
+		int *pck_div)
 {
 	u32 l;
-	l = dispc_read_reg(DISPC_DIVISOR);
+	l = dispc_read_reg(DISPC_DIVISOR(channel));
 	*lck_div = FLD_GET(l, 23, 16);
 	*pck_div = FLD_GET(l, 7, 0);
 }
@@ -2219,13 +2322,13 @@
 	return r;
 }
 
-unsigned long dispc_lclk_rate(void)
+unsigned long dispc_lclk_rate(enum omap_channel channel)
 {
 	int lcd;
 	unsigned long r;
 	u32 l;
 
-	l = dispc_read_reg(DISPC_DIVISOR);
+	l = dispc_read_reg(DISPC_DIVISOR(channel));
 
 	lcd = FLD_GET(l, 23, 16);
 
@@ -2234,13 +2337,13 @@
 	return r / lcd;
 }
 
-unsigned long dispc_pclk_rate(void)
+unsigned long dispc_pclk_rate(enum omap_channel channel)
 {
 	int lcd, pcd;
 	unsigned long r;
 	u32 l;
 
-	l = dispc_read_reg(DISPC_DIVISOR);
+	l = dispc_read_reg(DISPC_DIVISOR(channel));
 
 	lcd = FLD_GET(l, 23, 16);
 	pcd = FLD_GET(l, 7, 0);
@@ -2256,8 +2359,6 @@
 
 	enable_clocks(1);
 
-	dispc_get_lcd_divisor(&lcd, &pcd);
-
 	seq_printf(s, "- DISPC -\n");
 
 	seq_printf(s, "dispc fclk source = %s\n",
@@ -2265,9 +2366,25 @@
 			"dss1_alwon_fclk" : "dsi1_pll_fclk");
 
 	seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
-	seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(), lcd);
-	seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(), pcd);
 
+	seq_printf(s, "- LCD1 -\n");
+
+	dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
+
+	seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
+			dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd);
+	seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
+			dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd);
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		seq_printf(s, "- LCD2 -\n");
+
+		dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
+
+		seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
+				dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd);
+		seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
+				dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
+	}
 	enable_clocks(0);
 }
 
@@ -2309,6 +2426,12 @@
 	PIS(SYNC_LOST);
 	PIS(SYNC_LOST_DIGIT);
 	PIS(WAKEUP);
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		PIS(FRAMEDONE2);
+		PIS(VSYNC2);
+		PIS(ACBIAS_COUNT_STAT2);
+		PIS(SYNC_LOST2);
+	}
 #undef PIS
 }
 #endif
@@ -2327,19 +2450,30 @@
 	DUMPREG(DISPC_CONTROL);
 	DUMPREG(DISPC_CONFIG);
 	DUMPREG(DISPC_CAPABLE);
-	DUMPREG(DISPC_DEFAULT_COLOR0);
-	DUMPREG(DISPC_DEFAULT_COLOR1);
-	DUMPREG(DISPC_TRANS_COLOR0);
-	DUMPREG(DISPC_TRANS_COLOR1);
+	DUMPREG(DISPC_DEFAULT_COLOR(0));
+	DUMPREG(DISPC_DEFAULT_COLOR(1));
+	DUMPREG(DISPC_TRANS_COLOR(0));
+	DUMPREG(DISPC_TRANS_COLOR(1));
 	DUMPREG(DISPC_LINE_STATUS);
 	DUMPREG(DISPC_LINE_NUMBER);
-	DUMPREG(DISPC_TIMING_H);
-	DUMPREG(DISPC_TIMING_V);
-	DUMPREG(DISPC_POL_FREQ);
-	DUMPREG(DISPC_DIVISOR);
+	DUMPREG(DISPC_TIMING_H(0));
+	DUMPREG(DISPC_TIMING_V(0));
+	DUMPREG(DISPC_POL_FREQ(0));
+	DUMPREG(DISPC_DIVISOR(0));
 	DUMPREG(DISPC_GLOBAL_ALPHA);
 	DUMPREG(DISPC_SIZE_DIG);
-	DUMPREG(DISPC_SIZE_LCD);
+	DUMPREG(DISPC_SIZE_LCD(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		DUMPREG(DISPC_CONTROL2);
+		DUMPREG(DISPC_CONFIG2);
+		DUMPREG(DISPC_DEFAULT_COLOR(2));
+		DUMPREG(DISPC_TRANS_COLOR(2));
+		DUMPREG(DISPC_TIMING_H(2));
+		DUMPREG(DISPC_TIMING_V(2));
+		DUMPREG(DISPC_POL_FREQ(2));
+		DUMPREG(DISPC_DIVISOR(2));
+		DUMPREG(DISPC_SIZE_LCD(2));
+	}
 
 	DUMPREG(DISPC_GFX_BA0);
 	DUMPREG(DISPC_GFX_BA1);
@@ -2353,13 +2487,22 @@
 	DUMPREG(DISPC_GFX_WINDOW_SKIP);
 	DUMPREG(DISPC_GFX_TABLE_BA);
 
-	DUMPREG(DISPC_DATA_CYCLE1);
-	DUMPREG(DISPC_DATA_CYCLE2);
-	DUMPREG(DISPC_DATA_CYCLE3);
+	DUMPREG(DISPC_DATA_CYCLE1(0));
+	DUMPREG(DISPC_DATA_CYCLE2(0));
+	DUMPREG(DISPC_DATA_CYCLE3(0));
 
-	DUMPREG(DISPC_CPR_COEF_R);
-	DUMPREG(DISPC_CPR_COEF_G);
-	DUMPREG(DISPC_CPR_COEF_B);
+	DUMPREG(DISPC_CPR_COEF_R(0));
+	DUMPREG(DISPC_CPR_COEF_G(0));
+	DUMPREG(DISPC_CPR_COEF_B(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		DUMPREG(DISPC_DATA_CYCLE1(2));
+		DUMPREG(DISPC_DATA_CYCLE2(2));
+		DUMPREG(DISPC_DATA_CYCLE3(2));
+
+		DUMPREG(DISPC_CPR_COEF_R(2));
+		DUMPREG(DISPC_CPR_COEF_G(2));
+		DUMPREG(DISPC_CPR_COEF_B(2));
+	}
 
 	DUMPREG(DISPC_GFX_PRELOAD);
 
@@ -2458,8 +2601,8 @@
 #undef DUMPREG
 }
 
-static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc,
-				bool ihs, bool ivs, u8 acbi, u8 acb)
+static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf,
+		bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi, u8 acb)
 {
 	u32 l = 0;
 
@@ -2476,13 +2619,14 @@
 	l |= FLD_VAL(acb, 7, 0);
 
 	enable_clocks(1);
-	dispc_write_reg(DISPC_POL_FREQ, l);
+	dispc_write_reg(DISPC_POL_FREQ(channel), l);
 	enable_clocks(0);
 }
 
-void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb)
+void dispc_set_pol_freq(enum omap_channel channel,
+		enum omap_panel_config config, u8 acbi, u8 acb)
 {
-	_dispc_set_pol_freq((config & OMAP_DSS_LCD_ONOFF) != 0,
+	_dispc_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0,
 			(config & OMAP_DSS_LCD_RF) != 0,
 			(config & OMAP_DSS_LCD_IEO) != 0,
 			(config & OMAP_DSS_LCD_IPC) != 0,
@@ -2551,24 +2695,26 @@
 	return 0;
 }
 
-int dispc_set_clock_div(struct dispc_clock_info *cinfo)
+int dispc_set_clock_div(enum omap_channel channel,
+		struct dispc_clock_info *cinfo)
 {
 	DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
 	DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
 
-	dispc_set_lcd_divisor(cinfo->lck_div, cinfo->pck_div);
+	dispc_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div);
 
 	return 0;
 }
 
-int dispc_get_clock_div(struct dispc_clock_info *cinfo)
+int dispc_get_clock_div(enum omap_channel channel,
+		struct dispc_clock_info *cinfo)
 {
 	unsigned long fck;
 
 	fck = dispc_fclk_rate();
 
-	cinfo->lck_div = REG_GET(DISPC_DIVISOR, 23, 16);
-	cinfo->pck_div = REG_GET(DISPC_DIVISOR, 7, 0);
+	cinfo->lck_div = REG_GET(DISPC_DIVISOR(channel), 23, 16);
+	cinfo->pck_div = REG_GET(DISPC_DIVISOR(channel), 7, 0);
 
 	cinfo->lck = fck / cinfo->lck_div;
 	cinfo->pck = cinfo->lck / cinfo->pck_div;
@@ -2708,6 +2854,8 @@
 	PIS(VID2_FIFO_UNDERFLOW);
 	PIS(SYNC_LOST);
 	PIS(SYNC_LOST_DIGIT);
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		PIS(SYNC_LOST2);
 #undef PIS
 
 	printk("\n");
@@ -2926,6 +3074,45 @@
 		}
 	}
 
+	if (errors & DISPC_IRQ_SYNC_LOST2) {
+		struct omap_overlay_manager *manager = NULL;
+		bool enable = false;
+
+		DSSERR("SYNC_LOST for LCD2, disabling LCD2\n");
+
+		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+			struct omap_overlay_manager *mgr;
+			mgr = omap_dss_get_overlay_manager(i);
+
+			if (mgr->id == OMAP_DSS_CHANNEL_LCD2) {
+				manager = mgr;
+				enable = mgr->device->state ==
+						OMAP_DSS_DISPLAY_ACTIVE;
+				mgr->device->driver->disable(mgr->device);
+				break;
+			}
+		}
+
+		if (manager) {
+			struct omap_dss_device *dssdev = manager->device;
+			for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+				struct omap_overlay *ovl;
+				ovl = omap_dss_get_overlay(i);
+
+				if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+					continue;
+
+				if (ovl->id != 0 && ovl->manager == manager)
+					dispc_enable_plane(ovl->id, 0);
+			}
+
+			dispc_go(manager->id);
+			mdelay(50);
+			if (enable)
+				dssdev->driver->enable(dssdev);
+		}
+	}
+
 	if (errors & DISPC_IRQ_OCP_ERR) {
 		DSSERR("OCP_ERR\n");
 		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
@@ -3033,6 +3220,8 @@
 	memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr));
 
 	dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
 
 	/* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
 	 * so clear it */
@@ -3065,7 +3254,8 @@
 	dispc_write_reg(DISPC_SYSCONFIG, l);
 
 	/* FUNCGATED */
-	REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+	if (dss_has_feature(FEAT_FUNCGATED))
+		REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
 
 	/* L3 firewall setting: enable access to OCM RAM */
 	/* XXX this should be somewhere in plat-omap */
@@ -3139,17 +3329,18 @@
 		       enum omap_color_mode color_mode,
 		       bool ilace,
 		       enum omap_dss_rotation_type rotation_type,
-		       u8 rotation, bool mirror, u8 global_alpha)
+		       u8 rotation, bool mirror, u8 global_alpha,
+		       u8 pre_mult_alpha, enum omap_channel channel)
 {
 	int r = 0;
 
 	DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
-	       "%dx%d, ilace %d, cmode %x, rot %d, mir %d\n",
+	       "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
 	       plane, paddr, screen_width, pos_x, pos_y,
 	       width, height,
 	       out_width, out_height,
 	       ilace, color_mode,
-	       rotation, mirror);
+	       rotation, mirror, channel);
 
 	enable_clocks(1);
 
@@ -3161,7 +3352,8 @@
 			   color_mode, ilace,
 			   rotation_type,
 			   rotation, mirror,
-			   global_alpha);
+			   global_alpha,
+			   pre_mult_alpha, channel);
 
 	enable_clocks(0);
 
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 960e977..75fb0a5 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -40,8 +40,9 @@
 } dpi;
 
 #ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
-static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
-		unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
+		unsigned long pck_req, unsigned long *fck, int *lck_div,
+		int *pck_div)
 {
 	struct dsi_clock_info dsi_cinfo;
 	struct dispc_clock_info dispc_cinfo;
@@ -58,7 +59,7 @@
 
 	dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK);
 
-	r = dispc_set_clock_div(&dispc_cinfo);
+	r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
 	if (r)
 		return r;
 
@@ -69,8 +70,9 @@
 	return 0;
 }
 #else
-static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req,
-		unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
+		unsigned long pck_req, unsigned long *fck, int *lck_div,
+		int *pck_div)
 {
 	struct dss_clock_info dss_cinfo;
 	struct dispc_clock_info dispc_cinfo;
@@ -84,7 +86,7 @@
 	if (r)
 		return r;
 
-	r = dispc_set_clock_div(&dispc_cinfo);
+	r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
 	if (r)
 		return r;
 
@@ -107,17 +109,17 @@
 
 	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
 
-	dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
-			dssdev->panel.acb);
+	dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
+			dssdev->panel.acbi, dssdev->panel.acb);
 
 	is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
 
 #ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
-	r = dpi_set_dsi_clk(is_tft, t->pixel_clock * 1000,
-			&fck, &lck_div, &pck_div);
+	r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck,
+			&lck_div, &pck_div);
 #else
-	r = dpi_set_dispc_clk(is_tft, t->pixel_clock * 1000,
-			&fck, &lck_div, &pck_div);
+	r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck,
+			&lck_div, &pck_div);
 #endif
 	if (r)
 		goto err0;
@@ -132,7 +134,7 @@
 		t->pixel_clock = pck;
 	}
 
-	dispc_set_lcd_timings(t);
+	dispc_set_lcd_timings(dssdev->manager->id, t);
 
 err0:
 	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -145,10 +147,12 @@
 
 	is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
 
-	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
-	dispc_set_lcd_display_type(is_tft ? OMAP_DSS_LCD_DISPLAY_TFT :
-			OMAP_DSS_LCD_DISPLAY_STN);
-	dispc_set_tft_data_lines(dssdev->phy.dpi.data_lines);
+	dispc_set_parallel_interface_mode(dssdev->manager->id,
+			OMAP_DSS_PARALLELMODE_BYPASS);
+	dispc_set_lcd_display_type(dssdev->manager->id, is_tft ?
+			OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
+	dispc_set_tft_data_lines(dssdev->manager->id,
+			dssdev->phy.dpi.data_lines);
 
 	return 0;
 }
@@ -234,7 +238,7 @@
 	dssdev->panel.timings = *timings;
 	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
 		dpi_set_mode(dssdev);
-		dispc_go(OMAP_DSS_CHANNEL_LCD);
+		dispc_go(dssdev->manager->id);
 	}
 }
 EXPORT_SYMBOL(dpi_set_timings);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index aa4f7a5..ddf3a05 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -792,7 +792,8 @@
 }
 
 /* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
+static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
+		struct dsi_clock_info *cinfo)
 {
 	if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
 		return -EINVAL;
@@ -812,7 +813,7 @@
 		 * with DSS2_FCK source also */
 		cinfo->highfreq = 0;
 	} else {
-		cinfo->clkin = dispc_pclk_rate();
+		cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
 
 		if (cinfo->clkin < 32000000)
 			cinfo->highfreq = 0;
@@ -1206,8 +1207,8 @@
 
 	seq_printf(s,	"VP_CLK\t\t%lu\n"
 			"VP_PCLK\t\t%lu\n",
-			dispc_lclk_rate(),
-			dispc_pclk_rate());
+			dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD),
+			dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD));
 
 	enable_clocks(0);
 }
@@ -2888,7 +2889,7 @@
 	if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
 		dss_setup_partial_planes(dssdev, x, y, w, h,
 				enlarge_update_area);
-		dispc_set_lcd_size(*w, *h);
+		dispc_set_lcd_size(dssdev->manager->id, *w, *h);
 	}
 
 	return 0;
@@ -2947,12 +2948,14 @@
 		return r;
 	}
 
-	dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+	dispc_set_lcd_display_type(dssdev->manager->id,
+			OMAP_DSS_LCD_DISPLAY_TFT);
 
-	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
-	dispc_enable_fifohandcheck(1);
+	dispc_set_parallel_interface_mode(dssdev->manager->id,
+			OMAP_DSS_PARALLELMODE_DSI);
+	dispc_enable_fifohandcheck(dssdev->manager->id, 1);
 
-	dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+	dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
 
 	{
 		struct omap_video_timings timings = {
@@ -2964,7 +2967,7 @@
 			.vbp		= 0,
 		};
 
-		dispc_set_lcd_timings(&timings);
+		dispc_set_lcd_timings(dssdev->manager->id, &timings);
 	}
 
 	return 0;
@@ -2987,7 +2990,7 @@
 	cinfo.regm  = dssdev->phy.dsi.div.regm;
 	cinfo.regm3 = dssdev->phy.dsi.div.regm3;
 	cinfo.regm4 = dssdev->phy.dsi.div.regm4;
-	r = dsi_calc_clock_rates(&cinfo);
+	r = dsi_calc_clock_rates(dssdev, &cinfo);
 	if (r) {
 		DSSERR("Failed to calc dsi clocks\n");
 		return r;
@@ -3019,7 +3022,7 @@
 		return r;
 	}
 
-	r = dispc_set_clock_div(&dispc_cinfo);
+	r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
 	if (r) {
 		DSSERR("Failed to set dispc clocks\n");
 		return r;
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 5c7940d..b394951 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -333,9 +333,9 @@
 void dispc_lcd_enable_signal_polarity(bool act_high);
 void dispc_lcd_enable_signal(bool enable);
 void dispc_pck_free_enable(bool enable);
-void dispc_enable_fifohandcheck(bool enable);
+void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable);
 
-void dispc_set_lcd_size(u16 width, u16 height);
+void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
 void dispc_set_digit_size(u16 width, u16 height);
 u32 dispc_get_plane_fifo_size(enum omap_plane plane);
 void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
@@ -359,7 +359,8 @@
 		      bool ilace,
 		      enum omap_dss_rotation_type rotation_type,
 		      u8 rotation, bool mirror,
-		      u8 global_alpha);
+		      u8 global_alpha, u8 pre_mult_alpha,
+		      enum omap_channel channel);
 
 bool dispc_go_busy(enum omap_channel channel);
 void dispc_go(enum omap_channel channel);
@@ -368,9 +369,11 @@
 int dispc_enable_plane(enum omap_plane plane, bool enable);
 void dispc_enable_replication(enum omap_plane plane, bool enable);
 
-void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode);
-void dispc_set_tft_data_lines(u8 data_lines);
-void dispc_set_lcd_display_type(enum omap_lcd_display_type type);
+void dispc_set_parallel_interface_mode(enum omap_channel channel,
+		enum omap_parallel_interface_mode mode);
+void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
+void dispc_set_lcd_display_type(enum omap_channel channel,
+		enum omap_lcd_display_type type);
 void dispc_set_loadmode(enum omap_dss_load_mode mode);
 
 void dispc_set_default_color(enum omap_channel channel, u32 color);
@@ -387,17 +390,21 @@
 bool dispc_alpha_blending_enabled(enum omap_channel ch);
 
 bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
-void dispc_set_lcd_timings(struct omap_video_timings *timings);
+void dispc_set_lcd_timings(enum omap_channel channel,
+		struct omap_video_timings *timings);
 unsigned long dispc_fclk_rate(void);
-unsigned long dispc_lclk_rate(void);
-unsigned long dispc_pclk_rate(void);
-void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb);
+unsigned long dispc_lclk_rate(enum omap_channel channel);
+unsigned long dispc_pclk_rate(enum omap_channel channel);
+void dispc_set_pol_freq(enum omap_channel channel,
+		enum omap_panel_config config, u8 acbi, u8 acb);
 void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
 		struct dispc_clock_info *cinfo);
 int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
 		struct dispc_clock_info *cinfo);
-int dispc_set_clock_div(struct dispc_clock_info *cinfo);
-int dispc_get_clock_div(struct dispc_clock_info *cinfo);
+int dispc_set_clock_div(enum omap_channel channel,
+		struct dispc_clock_info *cinfo);
+int dispc_get_clock_div(enum omap_channel channel,
+		struct dispc_clock_info *cinfo);
 
 
 /* VENC */
@@ -424,8 +431,8 @@
 
 int rfbi_configure(int rfbi_module, int bpp, int lines);
 void rfbi_enable_rfbi(bool enable);
-void rfbi_transfer_area(u16 width, u16 height,
-			     void (callback)(void *data), void *data);
+void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
+		u16 height, void (callback)(void *data), void *data);
 void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
 unsigned long rfbi_get_max_tx_rate(void);
 int rfbi_init_display(struct omap_dss_device *display);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 867f68d..cf3ef69 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -82,6 +82,18 @@
 	OMAP_DISPLAY_TYPE_VENC,
 };
 
+static const enum omap_display_type omap4_dss_supported_displays[] = {
+	/* OMAP_DSS_CHANNEL_LCD */
+	OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI,
+
+	/* OMAP_DSS_CHANNEL_DIGIT */
+	OMAP_DISPLAY_TYPE_VENC,
+
+	/* OMAP_DSS_CHANNEL_LCD2 */
+	OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
+	OMAP_DISPLAY_TYPE_DSI,
+};
+
 static const enum omap_color_mode omap2_dss_supported_color_modes[] = {
 	/* OMAP_DSS_GFX */
 	OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
@@ -127,6 +139,10 @@
 	.reg_fields = omap2_dss_reg_fields,
 	.num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields),
 
+	.has_feature	=
+		FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL |
+		FEAT_PCKFREEENABLE | FEAT_FUNCGATED,
+
 	.num_mgrs = 2,
 	.num_ovls = 3,
 	.supported_displays = omap2_dss_supported_displays,
@@ -134,11 +150,14 @@
 };
 
 /* OMAP3 DSS Features */
-static struct omap_dss_features omap3_dss_features = {
+static struct omap_dss_features omap3430_dss_features = {
 	.reg_fields = omap3_dss_reg_fields,
 	.num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
 
-	.has_feature	= FEAT_GLOBAL_ALPHA,
+	.has_feature	=
+		FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
+		FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
+		FEAT_FUNCGATED,
 
 	.num_mgrs = 2,
 	.num_ovls = 3,
@@ -146,6 +165,36 @@
 	.supported_color_modes = omap3_dss_supported_color_modes,
 };
 
+static struct omap_dss_features omap3630_dss_features = {
+	.reg_fields = omap3_dss_reg_fields,
+	.num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
+
+	.has_feature    =
+		FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
+		FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
+		FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED,
+
+	.num_mgrs = 2,
+	.num_ovls = 3,
+	.supported_displays = omap3_dss_supported_displays,
+	.supported_color_modes = omap3_dss_supported_color_modes,
+};
+
+/* OMAP4 DSS Features */
+static struct omap_dss_features omap4_dss_features = {
+	.reg_fields = omap3_dss_reg_fields,
+	.num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
+
+	.has_feature	=
+		FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
+		FEAT_MGR_LCD2,
+
+	.num_mgrs = 3,
+	.num_ovls = 3,
+	.supported_displays = omap4_dss_supported_displays,
+	.supported_color_modes = omap3_dss_supported_color_modes,
+};
+
 /* Functions returning values related to a DSS feature */
 int dss_feat_get_num_mgrs(void)
 {
@@ -167,6 +216,13 @@
 	return omap_current_dss_features->supported_color_modes[plane];
 }
 
+bool dss_feat_color_mode_supported(enum omap_plane plane,
+		enum omap_color_mode color_mode)
+{
+	return omap_current_dss_features->supported_color_modes[plane] &
+			color_mode;
+}
+
 /* DSS has_feature check */
 bool dss_has_feature(enum dss_feat_id id)
 {
@@ -186,6 +242,10 @@
 {
 	if (cpu_is_omap24xx())
 		omap_current_dss_features = &omap2_dss_features;
+	else if (cpu_is_omap3630())
+		omap_current_dss_features = &omap3630_dss_features;
+	else if (cpu_is_omap34xx())
+		omap_current_dss_features = &omap3430_dss_features;
 	else
-		omap_current_dss_features = &omap3_dss_features;
+		omap_current_dss_features = &omap4_dss_features;
 }
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index cb231ea..b9c70be 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -20,13 +20,19 @@
 #ifndef __OMAP2_DSS_FEATURES_H
 #define __OMAP2_DSS_FEATURES_H
 
-#define MAX_DSS_MANAGERS	2
+#define MAX_DSS_MANAGERS	3
 #define MAX_DSS_OVERLAYS	3
 
 /* DSS has feature id */
 enum dss_feat_id {
 	FEAT_GLOBAL_ALPHA	= 1 << 0,
 	FEAT_GLOBAL_ALPHA_VID1	= 1 << 1,
+	FEAT_PRE_MULT_ALPHA	= 1 << 2,
+	FEAT_LCDENABLEPOL	= 1 << 3,
+	FEAT_LCDENABLESIGNAL	= 1 << 4,
+	FEAT_PCKFREEENABLE	= 1 << 5,
+	FEAT_FUNCGATED		= 1 << 6,
+	FEAT_MGR_LCD2		= 1 << 7,
 };
 
 /* DSS register field id */
@@ -43,6 +49,8 @@
 int dss_feat_get_num_ovls(void);
 enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
 enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+bool dss_feat_color_mode_supported(enum omap_plane plane,
+		enum omap_color_mode color_mode);
 
 bool dss_has_feature(enum dss_feat_id id);
 void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 545e9b9..172d4e6 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -406,6 +406,7 @@
 	u16 out_width;	/* if 0, out_width == width */
 	u16 out_height;	/* if 0, out_height == height */
 	u8 global_alpha;
+	u8 pre_mult_alpha;
 
 	enum omap_channel channel;
 	bool replication;
@@ -512,11 +513,14 @@
 	unsigned long timeout = msecs_to_jiffies(500);
 	u32 irq;
 
-	if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC)
+	if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
 		irq = DISPC_IRQ_EVSYNC_ODD;
-	else
-		irq = DISPC_IRQ_VSYNC;
-
+	} else {
+		if (mgr->id == OMAP_DSS_CHANNEL_LCD)
+			irq = DISPC_IRQ_VSYNC;
+		else
+			irq = DISPC_IRQ_VSYNC2;
+	}
 	return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
 }
 
@@ -524,7 +528,6 @@
 {
 	unsigned long timeout = msecs_to_jiffies(500);
 	struct manager_cache_data *mc;
-	enum omap_channel channel;
 	u32 irq;
 	int r;
 	int i;
@@ -535,7 +538,6 @@
 
 	if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
 		irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
-		channel = OMAP_DSS_CHANNEL_DIGIT;
 	} else {
 		if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
 			enum omap_dss_update_mode mode;
@@ -543,11 +545,14 @@
 			if (mode != OMAP_DSS_UPDATE_AUTO)
 				return 0;
 
-			irq = DISPC_IRQ_FRAMEDONE;
+			irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+				DISPC_IRQ_FRAMEDONE
+				: DISPC_IRQ_FRAMEDONE2;
 		} else {
-			irq = DISPC_IRQ_VSYNC;
+			irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+				DISPC_IRQ_VSYNC
+				: DISPC_IRQ_VSYNC2;
 		}
-		channel = OMAP_DSS_CHANNEL_LCD;
 	}
 
 	mc = &dss_cache.manager_cache[mgr->id];
@@ -594,7 +599,6 @@
 int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
 {
 	unsigned long timeout = msecs_to_jiffies(500);
-	enum omap_channel channel;
 	struct overlay_cache_data *oc;
 	struct omap_dss_device *dssdev;
 	u32 irq;
@@ -611,7 +615,6 @@
 
 	if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
 		irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
-		channel = OMAP_DSS_CHANNEL_DIGIT;
 	} else {
 		if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
 			enum omap_dss_update_mode mode;
@@ -619,11 +622,14 @@
 			if (mode != OMAP_DSS_UPDATE_AUTO)
 				return 0;
 
-			irq = DISPC_IRQ_FRAMEDONE;
+			irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+				DISPC_IRQ_FRAMEDONE
+				: DISPC_IRQ_FRAMEDONE2;
 		} else {
-			irq = DISPC_IRQ_VSYNC;
+			irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+				DISPC_IRQ_VSYNC
+				: DISPC_IRQ_VSYNC2;
 		}
-		channel = OMAP_DSS_CHANNEL_LCD;
 	}
 
 	oc = &dss_cache.overlay_cache[ovl->id];
@@ -842,7 +848,9 @@
 			c->rotation_type,
 			c->rotation,
 			c->mirror,
-			c->global_alpha);
+			c->global_alpha,
+			c->pre_mult_alpha,
+			c->channel);
 
 	if (r) {
 		/* this shouldn't happen */
@@ -894,10 +902,10 @@
 	r = 0;
 	busy = false;
 
-	mgr_busy[0] = dispc_go_busy(0);
-	mgr_busy[1] = dispc_go_busy(1);
-	mgr_go[0] = false;
-	mgr_go[1] = false;
+	for (i = 0; i < num_mgrs; i++) {
+		mgr_busy[i] = dispc_go_busy(i);
+		mgr_go[i] = false;
+	}
 
 	/* Commit overlay settings */
 	for (i = 0; i < num_ovls; ++i) {
@@ -1156,9 +1164,10 @@
 	const int num_mgrs = dss_feat_get_num_mgrs();
 	int i, r;
 	bool mgr_busy[MAX_DSS_MANAGERS];
+	u32 irq_mask;
 
-	mgr_busy[0] = dispc_go_busy(0);
-	mgr_busy[1] = dispc_go_busy(1);
+	for (i = 0; i < num_mgrs; i++)
+		mgr_busy[i] = dispc_go_busy(i);
 
 	spin_lock(&dss_cache.lock);
 
@@ -1179,8 +1188,8 @@
 		goto end;
 
 	/* re-read busy flags */
-	mgr_busy[0] = dispc_go_busy(0);
-	mgr_busy[1] = dispc_go_busy(1);
+	for (i = 0; i < num_mgrs; i++)
+		mgr_busy[i] = dispc_go_busy(i);
 
 	/* keep running as long as there are busy managers, so that
 	 * we can collect overlay-applied information */
@@ -1189,9 +1198,12 @@
 			goto end;
 	}
 
-	omap_dispc_unregister_isr(dss_apply_irq_handler, NULL,
-			DISPC_IRQ_VSYNC	| DISPC_IRQ_EVSYNC_ODD |
-			DISPC_IRQ_EVSYNC_EVEN);
+	irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
+			DISPC_IRQ_EVSYNC_EVEN;
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		irq_mask |= DISPC_IRQ_VSYNC2;
+
+	omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask);
 	dss_cache.irq_enabled = false;
 
 end:
@@ -1265,6 +1277,7 @@
 		oc->out_width = ovl->info.out_width;
 		oc->out_height = ovl->info.out_height;
 		oc->global_alpha = ovl->info.global_alpha;
+		oc->pre_mult_alpha = ovl->info.pre_mult_alpha;
 
 		oc->replication =
 			dss_use_replication(dssdev, ovl->info.color_mode);
@@ -1383,9 +1396,14 @@
 	r = 0;
 	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
 	if (!dss_cache.irq_enabled) {
-		r = omap_dispc_register_isr(dss_apply_irq_handler, NULL,
-				DISPC_IRQ_VSYNC	| DISPC_IRQ_EVSYNC_ODD |
-				DISPC_IRQ_EVSYNC_EVEN);
+		u32 mask;
+
+		mask = DISPC_IRQ_VSYNC	| DISPC_IRQ_EVSYNC_ODD |
+			DISPC_IRQ_EVSYNC_EVEN;
+		if (dss_has_feature(FEAT_MGR_LCD2))
+			mask |= DISPC_IRQ_VSYNC2;
+
+		r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
 		dss_cache.irq_enabled = true;
 	}
 	configure_dispc();
@@ -1477,6 +1495,10 @@
 			mgr->name = "tv";
 			mgr->id = OMAP_DSS_CHANNEL_DIGIT;
 			break;
+		case 2:
+			mgr->name = "lcd2";
+			mgr->id = OMAP_DSS_CHANNEL_LCD2;
+			break;
 		}
 
 		mgr->set_device = &omap_dss_set_device;
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 75642c2..456efef 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -257,6 +257,43 @@
 	return size;
 }
 
+static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			ovl->info.pre_mult_alpha);
+}
+
+static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
+		const char *buf, size_t size)
+{
+	int r;
+	struct omap_overlay_info info;
+
+	ovl->get_overlay_info(ovl, &info);
+
+	/* only GFX and Video2 plane support pre alpha multiplied
+	 * set zero for Video1 plane
+	 */
+	if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+		ovl->id == OMAP_DSS_VIDEO1)
+		info.pre_mult_alpha = 0;
+	else
+		info.pre_mult_alpha = simple_strtoul(buf, NULL, 10);
+
+	r = ovl->set_overlay_info(ovl, &info);
+	if (r)
+		return r;
+
+	if (ovl->manager) {
+		r = ovl->manager->apply(ovl->manager);
+		if (r)
+			return r;
+	}
+
+	return size;
+}
+
 struct overlay_attribute {
 	struct attribute attr;
 	ssize_t (*show)(struct omap_overlay *, char *);
@@ -280,6 +317,9 @@
 		overlay_enabled_show, overlay_enabled_store);
 static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
 		overlay_global_alpha_show, overlay_global_alpha_store);
+static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
+		overlay_pre_mult_alpha_show,
+		overlay_pre_mult_alpha_store);
 
 static struct attribute *overlay_sysfs_attrs[] = {
 	&overlay_attr_name.attr,
@@ -290,6 +330,7 @@
 	&overlay_attr_output_size.attr,
 	&overlay_attr_enabled.attr,
 	&overlay_attr_global_alpha.attr,
+	&overlay_attr_pre_mult_alpha.attr,
 	NULL
 };
 
@@ -623,12 +664,22 @@
 	int i;
 	struct omap_overlay_manager *lcd_mgr;
 	struct omap_overlay_manager *tv_mgr;
+	struct omap_overlay_manager *lcd2_mgr = NULL;
 	struct omap_overlay_manager *mgr = NULL;
 
 	lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD);
 	tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV);
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2);
 
-	if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
+	if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
+		if (!lcd2_mgr->device || force) {
+			if (lcd2_mgr->device)
+				lcd2_mgr->unset_device(lcd2_mgr);
+			lcd2_mgr->set_device(lcd2_mgr, dssdev);
+			mgr = lcd2_mgr;
+		}
+	} else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
 		if (!lcd_mgr->device || force) {
 			if (lcd_mgr->device)
 				lcd_mgr->unset_device(lcd_mgr);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index bbe6246..10a2ffe 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -301,8 +301,8 @@
 }
 EXPORT_SYMBOL(omap_rfbi_write_pixels);
 
-void rfbi_transfer_area(u16 width, u16 height,
-			     void (callback)(void *data), void *data)
+void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
+		u16 height, void (*callback)(void *data), void *data)
 {
 	u32 l;
 
@@ -311,9 +311,9 @@
 
 	DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
 
-	dispc_set_lcd_size(width, height);
+	dispc_set_lcd_size(dssdev->manager->id, width, height);
 
-	dispc_enable_channel(OMAP_DSS_CHANNEL_LCD, true);
+	dispc_enable_channel(dssdev->manager->id, true);
 
 	rfbi.framedone_callback = callback;
 	rfbi.framedone_callback_data = data;
@@ -887,7 +887,7 @@
 
 	if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
 		dss_setup_partial_planes(dssdev, x, y, w, h, true);
-		dispc_set_lcd_size(*w, *h);
+		dispc_set_lcd_size(dssdev->manager->id, *w, *h);
 	}
 
 	return 0;
@@ -899,7 +899,7 @@
 		void (*callback)(void *), void *data)
 {
 	if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
-		rfbi_transfer_area(w, h, callback, data);
+		rfbi_transfer_area(dssdev, w, h, callback, data);
 	} else {
 		struct omap_overlay *ovl;
 		void __iomem *addr;
@@ -1018,11 +1018,13 @@
 		goto err1;
 	}
 
-	dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+	dispc_set_lcd_display_type(dssdev->manager->id,
+			OMAP_DSS_LCD_DISPLAY_TFT);
 
-	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_RFBI);
+	dispc_set_parallel_interface_mode(dssdev->manager->id,
+			OMAP_DSS_PARALLELMODE_RFBI);
 
-	dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+	dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
 
 	rfbi_configure(dssdev->phy.rfbi.channel,
 			       dssdev->ctrl.pixel_size,
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index ee07a3c..b64adf7 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -35,12 +35,16 @@
 	struct regulator *vdds_sdi_reg;
 } sdi;
 
-static void sdi_basic_init(void)
-{
-	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
+static void sdi_basic_init(struct omap_dss_device *dssdev)
 
-	dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
-	dispc_set_tft_data_lines(24);
+{
+	dispc_set_parallel_interface_mode(dssdev->manager->id,
+			OMAP_DSS_PARALLELMODE_BYPASS);
+
+	dispc_set_lcd_display_type(dssdev->manager->id,
+			OMAP_DSS_LCD_DISPLAY_TFT);
+
+	dispc_set_tft_data_lines(dssdev->manager->id, 24);
 	dispc_lcd_enable_signal_polarity(1);
 }
 
@@ -68,20 +72,20 @@
 	if (!sdi.skip_init)
 		dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
 
-	sdi_basic_init();
+	sdi_basic_init(dssdev);
 
 	/* 15.5.9.1.2 */
 	dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF;
 
-	dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
-			dssdev->panel.acb);
+	dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
+			dssdev->panel.acbi, dssdev->panel.acb);
 
 	if (!sdi.skip_init) {
 		r = dss_calc_clock_div(1, t->pixel_clock * 1000,
 				&dss_cinfo, &dispc_cinfo);
 	} else {
 		r = dss_get_clock_div(&dss_cinfo);
-		r = dispc_get_clock_div(&dispc_cinfo);
+		r = dispc_get_clock_div(dssdev->manager->id, &dispc_cinfo);
 	}
 
 	if (r)
@@ -102,13 +106,13 @@
 	}
 
 
-	dispc_set_lcd_timings(t);
+	dispc_set_lcd_timings(dssdev->manager->id, t);
 
 	r = dss_set_clock_div(&dss_cinfo);
 	if (r)
 		goto err2;
 
-	r = dispc_set_clock_div(&dispc_cinfo);
+	r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
 	if (r)
 		goto err2;
 
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 6a704f1..4fdab8e 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -2132,8 +2132,9 @@
 	char *str, *options, *this_opt;
 	int r = 0;
 
-	str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL);
-	strcpy(str, def_mode);
+	str = kstrdup(def_mode, GFP_KERNEL);
+	if (!str)
+		return -ENOMEM;
 	options = str;
 
 	while (!r && (this_opt = strsep(&options, ",")) != NULL) {
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 9c0144e..65560a1 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -513,9 +513,9 @@
 	if (atomic_dec_and_test(&ps3fb.f_count)) {
 		if (atomic_read(&ps3fb.ext_flip)) {
 			atomic_set(&ps3fb.ext_flip, 0);
-			if (!try_acquire_console_sem()) {
+			if (console_trylock()) {
 				ps3fb_sync(info, 0);	/* single buffer */
-				release_console_sem();
+				console_unlock();
 			}
 		}
 	}
@@ -830,14 +830,14 @@
 			if (vmode) {
 				var = info->var;
 				fb_videomode_to_var(&var, vmode);
-				acquire_console_sem();
+				console_lock();
 				info->flags |= FBINFO_MISC_USEREVENT;
 				/* Force, in case only special bits changed */
 				var.activate |= FB_ACTIVATE_FORCE;
 				par->new_mode_id = val;
 				retval = fb_set_var(info, &var);
 				info->flags &= ~FBINFO_MISC_USEREVENT;
-				release_console_sem();
+				console_unlock();
 			}
 			break;
 		}
@@ -881,9 +881,9 @@
 			break;
 
 		dev_dbg(info->device, "PS3FB_IOCTL_FSEL:%d\n", val);
-		acquire_console_sem();
+		console_lock();
 		retval = ps3fb_sync(info, val);
-		release_console_sem();
+		console_unlock();
 		break;
 
 	default:
@@ -903,9 +903,9 @@
 		set_current_state(TASK_INTERRUPTIBLE);
 		if (ps3fb.is_kicked) {
 			ps3fb.is_kicked = 0;
-			acquire_console_sem();
+			console_lock();
 			ps3fb_sync(info, 0);	/* single buffer */
-			release_console_sem();
+			console_unlock();
 		}
 		schedule();
 	}
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index cea6403..35f61dd 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -701,16 +701,12 @@
 	 */
 	pxa168fb_init_mode(info, mi);
 
-	ret = pxa168fb_check_var(&info->var, info);
-	if (ret)
-		goto failed_free_fbmem;
-
 	/*
 	 * Fill in sane defaults.
 	 */
 	ret = pxa168fb_check_var(&info->var, info);
 	if (ret)
-		goto failed;
+		goto failed_free_fbmem;
 
 	/*
 	 * enable controller clock
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index b81168d..cf4beb9 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -1,5 +1,5 @@
 /*
- *  pxa3xx-gc.c - Linux kernel module for PXA3xx graphics controllers
+ *  pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
  *
  *  This driver needs a DirectFB counterpart in user space, communication
  *  is handled via mmap()ed memory areas and an ioctl.
@@ -421,7 +421,7 @@
 		buffer->next = priv->free;
 		priv->free = buffer;
 		spin_unlock_irqrestore(&priv->spinlock, flags);
-		return ret;
+		return -EFAULT;
 	}
 
 	buffer->length = words;
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 618f36b..da38818 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -331,7 +331,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops riva_bl_ops = {
+static const struct backlight_ops riva_bl_ops = {
 	.get_brightness = riva_bl_get_brightness,
 	.update_status	= riva_bl_update_status,
 };
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 46b4309..61c819e3 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -13,6 +13,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/mm.h>
@@ -918,9 +919,9 @@
 	}
 
 	info->clk = clk_get(NULL, "lcd");
-	if (!info->clk || IS_ERR(info->clk)) {
+	if (IS_ERR(info->clk)) {
 		printk(KERN_ERR "failed to get lcd clock source\n");
-		ret = -ENOENT;
+		ret = PTR_ERR(info->clk);
 		goto release_irq;
 	}
 
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index dce8c97..75738a9 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -22,7 +22,7 @@
 #include <linux/svga.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
 #ifdef CONFIG_MTRR
@@ -1113,12 +1113,12 @@
 
 	dev_info(info->device, "suspend\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -1129,7 +1129,7 @@
 	pci_set_power_state(dev, pci_choose_state(dev, state));
 
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -1145,12 +1145,12 @@
 
 	dev_info(info->device, "resume\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if (par->ref_count == 0) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -1159,7 +1159,7 @@
 	err = pci_enable_device(dev);
 	if (err) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		dev_err(info->device, "error %d enabling device for resume\n", err);
 		return err;
 	}
@@ -1169,7 +1169,7 @@
 	fb_set_suspend(info, 0);
 
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 842d157..487911e 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2373,7 +2373,7 @@
 	if (mesg.event == PM_EVENT_FREEZE)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(info, 1);
 
 	if (info->fbops->fb_sync)
@@ -2385,7 +2385,7 @@
 	pci_save_state(dev);
 	pci_disable_device(dev);
 	pci_set_power_state(dev, pci_choose_state(dev, mesg));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -2409,7 +2409,7 @@
 		return 0;
 	}
 
-	acquire_console_sem();
+	console_lock();
 
 	pci_set_power_state(dev, PCI_D0);
 	pci_restore_state(dev);
@@ -2423,7 +2423,7 @@
 	savagefb_set_par(info);
 	fb_set_suspend(info, 0);
 	savagefb_blank(FB_BLANK_UNBLANK, info);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 8c59cc8..2b9e56a 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <sound/soc.h>
 #include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
@@ -221,6 +222,7 @@
 	struct delayed_work edid_work;
 	struct fb_var_screeninfo var;
 	struct fb_monspecs monspec;
+	struct notifier_block notifier;
 };
 
 static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
@@ -737,7 +739,7 @@
 	struct fb_modelist *modelist = NULL;
 	unsigned int f_width = 0, f_height = 0, f_refresh = 0;
 	unsigned long found_rate_error = ULONG_MAX; /* silly compiler... */
-	bool exact_match = false;
+	bool scanning = false, preferred_bad = false;
 	u8 edid[128];
 	char *forced;
 	int i;
@@ -800,6 +802,9 @@
 		if (i < 2) {
 			f_width = 0;
 			f_height = 0;
+		} else {
+			/* The user wants us to use the EDID data */
+			scanning = true;
 		}
 		dev_dbg(hdmi->dev, "Forced mode %ux%u@%uHz\n",
 			f_width, f_height, f_refresh);
@@ -807,37 +812,56 @@
 
 	/* Walk monitor modes to find the best or the exact match */
 	for (i = 0, mode = hdmi->monspec.modedb;
-	     f_width && f_height && i < hdmi->monspec.modedb_len && !exact_match;
+	     i < hdmi->monspec.modedb_len && scanning;
 	     i++, mode++) {
 		unsigned long rate_error;
 
-		/* No interest in unmatching modes */
-		if (f_width != mode->xres || f_height != mode->yres)
+		if (!f_width && !f_height) {
+			/*
+			 * A parameter string "video=sh_mobile_lcdc:0x0" means
+			 * use the preferred EDID mode. If it is rejected by
+			 * .fb_check_var(), keep looking, until an acceptable
+			 * one is found.
+			 */
+			if ((mode->flag & FB_MODE_IS_FIRST) || preferred_bad)
+				scanning = false;
+			else
+				continue;
+		} else if (f_width != mode->xres || f_height != mode->yres) {
+			/* No interest in unmatching modes */
 			continue;
+		}
 
 		rate_error = sh_hdmi_rate_error(hdmi, mode, hdmi_rate, parent_rate);
 
-		if (f_refresh == mode->refresh || (!f_refresh && !rate_error))
-			/*
-			 * Exact match if either the refresh rate matches or it
-			 * hasn't been specified and we've found a mode, for
-			 * which we can configure the clock precisely
-			 */
-			exact_match = true;
-		else if (found && found_rate_error <= rate_error)
-			/*
-			 * We otherwise search for the closest matching clock
-			 * rate - either if no refresh rate has been specified
-			 * or we cannot find an exactly matching one
-			 */
-			continue;
+		if (scanning) {
+			if (f_refresh == mode->refresh || (!f_refresh && !rate_error))
+				/*
+				 * Exact match if either the refresh rate
+				 * matches or it hasn't been specified and we've
+				 * found a mode, for which we can configure the
+				 * clock precisely
+				 */
+				scanning = false;
+			else if (found && found_rate_error <= rate_error)
+				/*
+				 * We otherwise search for the closest matching
+				 * clock rate - either if no refresh rate has
+				 * been specified or we cannot find an exactly
+				 * matching one
+				 */
+				continue;
+		}
 
 		/* Check if supported: sufficient fb memory, supported clock-rate */
 		fb_videomode_to_var(var, mode);
 
+		var->bits_per_pixel = info->var.bits_per_pixel;
+
 		if (info && info->fbops->fb_check_var &&
 		    info->fbops->fb_check_var(var, info)) {
-			exact_match = false;
+			scanning = true;
+			preferred_bad = true;
 			continue;
 		}
 
@@ -855,9 +879,9 @@
 	 * driver, and passing ->info with HDMI platform data.
 	 */
 	if (info && !found) {
-		modelist = hdmi->info->modelist.next &&
-			!list_empty(&hdmi->info->modelist) ?
-			list_entry(hdmi->info->modelist.next,
+		modelist = info->modelist.next &&
+			!list_empty(&info->modelist) ?
+			list_entry(info->modelist.next,
 				   struct fb_modelist, list) :
 			NULL;
 
@@ -1100,6 +1124,7 @@
 	mutex_lock(&hdmi->mutex);
 
 	if (hdmi->hp_state == HDMI_HOTPLUG_CONNECTED) {
+		struct fb_info *info = hdmi->info;
 		unsigned long parent_rate = 0, hdmi_rate;
 
 		/* A device has been plugged in */
@@ -1121,22 +1146,21 @@
 		/* Switched to another (d) power-save mode */
 		msleep(10);
 
-		if (!hdmi->info)
+		if (!info)
 			goto out;
 
-		ch = hdmi->info->par;
+		ch = info->par;
 
-		acquire_console_sem();
+		console_lock();
 
 		/* HDMI plug in */
 		if (!sh_hdmi_must_reconfigure(hdmi) &&
-		    hdmi->info->state == FBINFO_STATE_RUNNING) {
+		    info->state == FBINFO_STATE_RUNNING) {
 			/*
 			 * First activation with the default monitor - just turn
 			 * on, if we run a resume here, the logo disappears
 			 */
-			if (lock_fb_info(hdmi->info)) {
-				struct fb_info *info = hdmi->info;
+			if (lock_fb_info(info)) {
 				info->var.width = hdmi->var.width;
 				info->var.height = hdmi->var.height;
 				sh_hdmi_display_on(hdmi, info);
@@ -1144,10 +1168,10 @@
 			}
 		} else {
 			/* New monitor or have to wake up */
-			fb_set_suspend(hdmi->info, 0);
+			fb_set_suspend(info, 0);
 		}
 
-		release_console_sem();
+		console_unlock();
 	} else {
 		ret = 0;
 		if (!hdmi->info)
@@ -1157,12 +1181,12 @@
 		fb_destroy_modedb(hdmi->monspec.modedb);
 		hdmi->monspec.modedb = NULL;
 
-		acquire_console_sem();
+		console_lock();
 
 		/* HDMI disconnect */
 		fb_set_suspend(hdmi->info, 1);
 
-		release_console_sem();
+		console_unlock();
 		pm_runtime_put(hdmi->dev);
 	}
 
@@ -1175,13 +1199,6 @@
 }
 
 static int sh_hdmi_notify(struct notifier_block *nb,
-			  unsigned long action, void *data);
-
-static struct notifier_block sh_hdmi_notifier = {
-	.notifier_call = sh_hdmi_notify,
-};
-
-static int sh_hdmi_notify(struct notifier_block *nb,
 			  unsigned long action, void *data)
 {
 	struct fb_event *event = data;
@@ -1190,7 +1207,7 @@
 	struct sh_mobile_lcdc_board_cfg	*board_cfg = &ch->cfg.board_cfg;
 	struct sh_hdmi *hdmi = board_cfg->board_data;
 
-	if (nb != &sh_hdmi_notifier || !hdmi || hdmi->info != info)
+	if (!hdmi || nb != &hdmi->notifier || hdmi->info != info)
 		return NOTIFY_DONE;
 
 	switch(action) {
@@ -1209,11 +1226,11 @@
 		 * temporarily, synchronise with the work queue and re-acquire
 		 * the info->lock.
 		 */
-		unlock_fb_info(hdmi->info);
+		unlock_fb_info(info);
 		mutex_lock(&hdmi->mutex);
 		hdmi->info = NULL;
 		mutex_unlock(&hdmi->mutex);
-		lock_fb_info(hdmi->info);
+		lock_fb_info(info);
 		return NOTIFY_OK;
 	}
 	return NOTIFY_DONE;
@@ -1311,6 +1328,9 @@
 		goto ecodec;
 	}
 
+	hdmi->notifier.notifier_call = sh_hdmi_notify;
+	fb_register_client(&hdmi->notifier);
+
 	return 0;
 
 ecodec:
@@ -1341,6 +1361,8 @@
 
 	snd_soc_unregister_codec(&pdev->dev);
 
+	fb_unregister_client(&hdmi->notifier);
+
 	board_cfg->display_on = NULL;
 	board_cfg->display_off = NULL;
 	board_cfg->board_data = NULL;
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index bd4840a..bf12e53 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -912,9 +912,9 @@
 
 	/* Nothing to reconfigure, when called from fbcon */
 	if (user) {
-		acquire_console_sem();
+		console_lock();
 		sh_mobile_fb_reconfig(info);
-		release_console_sem();
+		console_unlock();
 	}
 
 	mutex_unlock(&ch->open_lock);
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index b7dc180..bcb44a5 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -2010,9 +2010,9 @@
 
 	/* tell console/fb driver we are suspending */
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(fbi, 1);
-	release_console_sem();
+	console_unlock();
 
 	/* backup copies in case chip is powered down over suspend */
 
@@ -2069,9 +2069,9 @@
 		memcpy_toio(par->cursor.k_addr, par->store_cursor,
 			    par->cursor.size);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(fbi, 0);
-	release_console_sem();
+	console_unlock();
 
 	vfree(par->store_fb);
 	vfree(par->store_cursor);
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index dee64c3..2ab7041 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -536,7 +536,7 @@
 	fbiinit2 = sst_read(FBIINIT2);
 	fbiinit3 = sst_read(FBIINIT3);
 
-	/* everything is reset. we enable fbiinit2/3 remap : dac acces ok */
+	/* everything is reset. we enable fbiinit2/3 remap : dac access ok */
 	pci_write_config_dword(sst_dev, PCI_INIT_ENABLE,
 	                       PCI_EN_INIT_WR | PCI_REMAP_DAC );
 
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index 6913fe1..dfef88c 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -25,7 +25,7 @@
 #include <linux/fb.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
-/* Why should fb driver call console functions? because acquire_console_sem() */
+/* Why should fb driver call console functions? because console_lock() */
 #include <linux/console.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tmio.h>
@@ -944,7 +944,7 @@
 	struct mfd_cell *cell = dev->dev.platform_data;
 	int retval = 0;
 
-	acquire_console_sem();
+	console_lock();
 
 	fb_set_suspend(info, 1);
 
@@ -965,7 +965,7 @@
 	if (cell->suspend)
 		retval = cell->suspend(dev);
 
-	release_console_sem();
+	console_unlock();
 
 	return retval;
 }
@@ -976,7 +976,7 @@
 	struct mfd_cell *cell = dev->dev.platform_data;
 	int retval = 0;
 
-	acquire_console_sem();
+	console_lock();
 
 	if (cell->resume) {
 		retval = cell->resume(dev);
@@ -992,7 +992,7 @@
 
 	fb_set_suspend(info, 0);
 out:
-	release_console_sem();
+	console_unlock();
 	return retval;
 }
 #else
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 289edd5..4e66349e 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1674,17 +1674,17 @@
 #ifdef CONFIG_PM
 static int viafb_suspend(void *unused)
 {
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(viafbinfo, 1);
 	viafb_sync(viafbinfo);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
 
 static int viafb_resume(void *unused)
 {
-	acquire_console_sem();
+	console_lock();
 	if (viaparinfo->shared->vdev->engine_mmio)
 		viafb_reset_engine(viaparinfo);
 	viafb_set_par(viafbinfo);
@@ -1692,7 +1692,7 @@
 		viafb_set_par(viafbinfo1);
 	fb_set_suspend(viafbinfo, 0);
 
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 7617f12..0e120d6 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -215,6 +215,33 @@
 	return 0;
 }
 
+/*
+ * vt8500lcd_blank():
+ *	Blank the display by setting all palette values to zero.  Note,
+ * 	True Color modes do not really use the palette, so this will not
+ *      blank the display in all modes.
+ */
+static int vt8500lcd_blank(int blank, struct fb_info *info)
+{
+	int i;
+
+	switch (blank) {
+	case FB_BLANK_POWERDOWN:
+	case FB_BLANK_VSYNC_SUSPEND:
+	case FB_BLANK_HSYNC_SUSPEND:
+	case FB_BLANK_NORMAL:
+		if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
+		    info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
+			for (i = 0; i < 256; i++)
+				vt8500lcd_setcolreg(i, 0, 0, 0, 0, info);
+	case FB_BLANK_UNBLANK:
+		if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
+		    info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
+			fb_set_cmap(&info->cmap, info);
+	}
+	return 0;
+}
+
 static struct fb_ops vt8500lcd_ops = {
 	.owner		= THIS_MODULE,
 	.fb_set_par	= vt8500lcd_set_par,
@@ -225,6 +252,7 @@
 	.fb_sync	= wmt_ge_sync,
 	.fb_ioctl	= vt8500lcd_ioctl,
 	.fb_pan_display	= vt8500lcd_pan_display,
+	.fb_blank	= vt8500lcd_blank,
 };
 
 static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id)
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 85d76ec..a2965ab 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -23,7 +23,7 @@
 #include <linux/svga.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
 #ifdef CONFIG_MTRR
@@ -819,12 +819,12 @@
 
 	dev_info(info->device, "suspend\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -835,7 +835,7 @@
 	pci_set_power_state(dev, pci_choose_state(dev, state));
 
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -850,7 +850,7 @@
 
 	dev_info(info->device, "resume\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if (par->ref_count == 0)
@@ -869,7 +869,7 @@
 
 fail:
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 3e6934d..a20218c 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -491,12 +491,12 @@
 	if (console_set_on_cmdline)
 		return;
 
-	acquire_console_sem();
+	console_lock();
 	for_each_console(c) {
 		if (!strcmp(c->name, "tty") && c->index == 0)
 			break;
 	}
-	release_console_sem();
+	console_unlock();
 	if (c) {
 		unregister_console(c);
 		c->flags |= CON_CONSDEV;
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index ef8d9d5..4fb5b2b 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -96,11 +96,6 @@
 
 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
 
-/* A PCI device has it's own struct device and so does a virtio device so
- * we create a place for the virtio devices to show up in sysfs.  I think it
- * would make more sense for virtio to not insist on having it's own device. */
-static struct device *virtio_pci_root;
-
 /* Convert a generic virtio device to our structure */
 static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
 {
@@ -629,7 +624,7 @@
 	if (vp_dev == NULL)
 		return -ENOMEM;
 
-	vp_dev->vdev.dev.parent = virtio_pci_root;
+	vp_dev->vdev.dev.parent = &pci_dev->dev;
 	vp_dev->vdev.dev.release = virtio_pci_release_dev;
 	vp_dev->vdev.config = &virtio_pci_config_ops;
 	vp_dev->pci_dev = pci_dev;
@@ -717,17 +712,7 @@
 
 static int __init virtio_pci_init(void)
 {
-	int err;
-
-	virtio_pci_root = root_device_register("virtio-pci");
-	if (IS_ERR(virtio_pci_root))
-		return PTR_ERR(virtio_pci_root);
-
-	err = pci_register_driver(&virtio_pci_driver);
-	if (err)
-		root_device_unregister(virtio_pci_root);
-
-	return err;
+	return pci_register_driver(&virtio_pci_driver);
 }
 
 module_init(virtio_pci_init);
@@ -735,7 +720,6 @@
 static void __exit virtio_pci_exit(void)
 {
 	pci_unregister_driver(&virtio_pci_driver);
-	root_device_unregister(virtio_pci_root);
 }
 
 module_exit(virtio_pci_exit);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3a7e9ff..38e96ab 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -593,19 +593,17 @@
 
 	/* get interface & functional clock objects */
 	hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
-	hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+	if (IS_ERR(hdq_data->hdq_ick)) {
+		dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
+		ret = PTR_ERR(hdq_data->hdq_ick);
+		goto err_ick;
+	}
 
-	if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
-		dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
-		if (IS_ERR(hdq_data->hdq_ick)) {
-			ret = PTR_ERR(hdq_data->hdq_ick);
-			goto err_clk;
-		}
-		if (IS_ERR(hdq_data->hdq_fck)) {
-			ret = PTR_ERR(hdq_data->hdq_fck);
-			clk_put(hdq_data->hdq_ick);
-			goto err_clk;
-		}
+	hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+	if (IS_ERR(hdq_data->hdq_fck)) {
+		dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
+		ret = PTR_ERR(hdq_data->hdq_fck);
+		goto err_fck;
 	}
 
 	hdq_data->hdq_usecount = 0;
@@ -665,10 +663,12 @@
 	clk_disable(hdq_data->hdq_ick);
 
 err_intfclk:
-	clk_put(hdq_data->hdq_ick);
 	clk_put(hdq_data->hdq_fck);
 
-err_clk:
+err_fck:
+	clk_put(hdq_data->hdq_ick);
+
+err_ick:
 	iounmap(hdq_data->hdq_base);
 
 err_ioremap:
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 1f51366..f0c9096 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -16,6 +16,17 @@
 	  Say Y here if you want to connect 1-wire
 	  simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
 
+config W1_SLAVE_DS2423
+	tristate "Counter 1-wire device (DS2423)"
+	select CRC16
+	help
+	  If you enable this you can read the counter values available
+	  in the DS2423 chipset from the w1_slave file under the
+	  sys file system.
+
+	  Say Y here if you want to use a 1-wire
+	  counter family device (DS2423).
+
 config W1_SLAVE_DS2431
 	tristate "1kb EEPROM family support (DS2431)"
 	help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index f1f51f1..3c76350 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_W1_SLAVE_THERM)	+= w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)	+= w1_smem.o
+obj-$(CONFIG_W1_SLAVE_DS2423)	+= w1_ds2423.o
 obj-$(CONFIG_W1_SLAVE_DS2431)	+= w1_ds2431.o
 obj-$(CONFIG_W1_SLAVE_DS2433)	+= w1_ds2433.o
 obj-$(CONFIG_W1_SLAVE_DS2760)	+= w1_ds2760.o
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
new file mode 100644
index 0000000..7a7dbe50
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -0,0 +1,166 @@
+/*
+ *	w1_ds2423.c
+ *
+ * Copyright (c) 2010 Mika Laitio <lamikr@pilppa.org>
+ *
+ * This driver will read and write the value of 4 counters to w1_slave file in
+ * sys filesystem.
+ * Inspired by the w1_therm and w1_ds2431 drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the therms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/crc16.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+
+#define CRC16_VALID	0xb001
+#define CRC16_INIT	0
+
+#define COUNTER_COUNT 4
+#define READ_BYTE_COUNT 42
+
+static ssize_t w1_counter_read(struct device *device,
+	struct device_attribute *attr, char *buf);
+
+static struct device_attribute w1_counter_attr =
+	__ATTR(w1_slave, S_IRUGO, w1_counter_read, NULL);
+
+static ssize_t w1_counter_read(struct device *device,
+	struct device_attribute *attr, char *out_buf)
+{
+	struct w1_slave *sl = dev_to_w1_slave(device);
+	struct w1_master *dev = sl->master;
+	u8 rbuf[COUNTER_COUNT * READ_BYTE_COUNT];
+	u8 wrbuf[3];
+	int rom_addr;
+	int read_byte_count;
+	int result;
+	ssize_t c;
+	int ii;
+	int p;
+	int crc;
+
+	c		= PAGE_SIZE;
+	rom_addr	= (12 << 5) + 31;
+	wrbuf[0]	= 0xA5;
+	wrbuf[1]	= rom_addr & 0xFF;
+	wrbuf[2]	= rom_addr >> 8;
+	mutex_lock(&dev->mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_block(dev, wrbuf, 3);
+		read_byte_count = 0;
+		for (p = 0; p < 4; p++) {
+			/*
+			 * 1 byte for first bytes in ram page read
+			 * 4 bytes for counter
+			 * 4 bytes for zero bits
+			 * 2 bytes for crc
+			 * 31 remaining bytes from the ram page
+			 */
+			read_byte_count += w1_read_block(dev,
+				rbuf + (p * READ_BYTE_COUNT), READ_BYTE_COUNT);
+			for (ii = 0; ii < READ_BYTE_COUNT; ++ii)
+				c -= snprintf(out_buf + PAGE_SIZE - c,
+					c, "%02x ",
+					rbuf[(p * READ_BYTE_COUNT) + ii]);
+			if (read_byte_count != (p + 1) * READ_BYTE_COUNT) {
+				dev_warn(device,
+					"w1_counter_read() returned %u bytes "
+					"instead of %d bytes wanted.\n",
+					read_byte_count,
+					READ_BYTE_COUNT);
+				c -= snprintf(out_buf + PAGE_SIZE - c,
+					c, "crc=NO\n");
+			} else {
+				if (p == 0) {
+					crc = crc16(CRC16_INIT, wrbuf, 3);
+					crc = crc16(crc, rbuf, 11);
+				} else {
+					/*
+					 * DS2423 calculates crc from all bytes
+					 * read after the previous crc bytes.
+					 */
+					crc = crc16(CRC16_INIT,
+						(rbuf + 11) +
+						((p - 1) * READ_BYTE_COUNT),
+						READ_BYTE_COUNT);
+				}
+				if (crc == CRC16_VALID) {
+					result = 0;
+					for (ii = 4; ii > 0; ii--) {
+						result <<= 8;
+						result |= rbuf[(p *
+							READ_BYTE_COUNT) + ii];
+					}
+					c -= snprintf(out_buf + PAGE_SIZE - c,
+						c, "crc=YES c=%d\n", result);
+				} else {
+					c -= snprintf(out_buf + PAGE_SIZE - c,
+						c, "crc=NO\n");
+				}
+			}
+		}
+	} else {
+		c -= snprintf(out_buf + PAGE_SIZE - c, c, "Connection error");
+	}
+	mutex_unlock(&dev->mutex);
+	return PAGE_SIZE - c;
+}
+
+static int w1_f1d_add_slave(struct w1_slave *sl)
+{
+	return device_create_file(&sl->dev, &w1_counter_attr);
+}
+
+static void w1_f1d_remove_slave(struct w1_slave *sl)
+{
+	device_remove_file(&sl->dev, &w1_counter_attr);
+}
+
+static struct w1_family_ops w1_f1d_fops = {
+	.add_slave      = w1_f1d_add_slave,
+	.remove_slave   = w1_f1d_remove_slave,
+};
+
+static struct w1_family w1_family_1d = {
+	.fid = W1_COUNTER_DS2423,
+	.fops = &w1_f1d_fops,
+};
+
+static int __init w1_f1d_init(void)
+{
+	return w1_register_family(&w1_family_1d);
+}
+
+static void __exit w1_f1d_exit(void)
+{
+	w1_unregister_family(&w1_family_1d);
+}
+
+module_init(w1_f1d_init);
+module_exit(w1_f1d_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>");
+MODULE_DESCRIPTION("w1 family 1d driver for DS2423, 4 counters and 4kb ram");
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 3ca1b92..f3b636d 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -30,6 +30,7 @@
 #define W1_FAMILY_SMEM_01	0x01
 #define W1_FAMILY_SMEM_81	0x81
 #define W1_THERM_DS18S20 	0x10
+#define W1_COUNTER_DS2423	0x1D
 #define W1_THERM_DS1822  	0x22
 #define W1_EEPROM_DS2433  	0x23
 #define W1_THERM_DS18B20 	0x28
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index a5ad77e..31649b7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -409,15 +409,26 @@
 	  Most people will say N.
 
 config F71808E_WDT
-	tristate "Fintek F71808E, F71882FG and F71889FG Watchdog"
+	tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog"
 	depends on X86 && EXPERIMENTAL
 	help
 	  This is the driver for the hardware watchdog on the Fintek
-	  F71808E, F71882FG and F71889FG Super I/O controllers.
+	  F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers.
 
 	  You can compile this driver directly into the kernel, or use
 	  it as a module.  The module will be called f71808e_wdt.
 
+config SP5100_TCO
+	tristate "AMD/ATI SP5100 TCO Timer/Watchdog"
+	depends on X86 && PCI
+	---help---
+	  Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO
+	  (Total Cost of Ownership) timer is a watchdog timer that will reboot
+	  the machine after its expiration. The expiration time can be
+	  configured with the "heartbeat" parameter.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sp5100_tco.
 
 config GEODE_WDT
 	tristate "AMD Geode CS5535/CS5536 Watchdog"
@@ -631,6 +642,24 @@
 
 	  Most people will say N.
 
+config NV_TCO
+	tristate "nVidia TCO Timer/Watchdog"
+	depends on X86 && PCI
+	---help---
+	  Hardware driver for the TCO timer built into the nVidia Hub family
+	  (such as the MCP51).  The TCO (Total Cost of Ownership) timer is a
+	  watchdog timer that will reboot the machine after its second
+	  expiration. The expiration time can be configured with the
+	  "heartbeat" parameter.
+
+	  On some motherboards the driver may fail to reset the chipset's
+	  NO_REBOOT flag which prevents the watchdog from rebooting the
+	  machine. If this is the case you will get a kernel message like
+	  "failed to reset NO_REBOOT flag, reboot disabled by hardware".
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nv_tco.
+
 config RDC321X_WDT
 	tristate "RDC R-321x SoC watchdog"
 	depends on X86_RDC321X
@@ -722,14 +751,15 @@
 	  Most people will say N.
 
 config W83627HF_WDT
-	tristate "W83627HF Watchdog Timer"
+	tristate "W83627HF/W83627DHG Watchdog Timer"
 	depends on X86
 	---help---
 	  This is the driver for the hardware watchdog on the W83627HF chipset
 	  as used in Advantech PC-9578 and Tyan S2721-533 motherboards
-	  (and likely others).  This watchdog simply watches your kernel to
-	  make sure it doesn't freeze, and if it does, it reboots your computer
-	  after a certain amount of time.
+	  (and likely others). The driver also supports the W83627DHG chip.
+	  This watchdog simply watches your kernel to make sure it doesn't
+	  freeze, and if it does, it reboots your computer after a certain
+	  amount of time.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called w83627hf_wdt.
@@ -832,10 +862,22 @@
 
 # M68K Architecture
 
-# M68KNOMMU Architecture
+config M54xx_WATCHDOG
+	tristate "MCF54xx watchdog support"
+	depends on M548x
+	help
+	  To compile this driver as a module, choose M here: the
+	  module will be called m54xx_wdt.
 
 # MIPS Architecture
 
+config ATH79_WDT
+	tristate "Atheros AR71XX/AR724X/AR913X hardware watchdog"
+	depends on ATH79
+	help
+	  Hardware driver for the built-in watchdog timer on the Atheros
+	  AR71XX/AR724X/AR913X SoCs.
+
 config BCM47XX_WDT
 	tristate "Broadcom BCM47xx Watchdog Timer"
 	depends on BCM47XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 4b0ef38..20e44c4 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -68,6 +68,7 @@
 obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o
 obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
 obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o
+obj-$(CONFIG_SP5100_TCO) += sp5100_tco.o
 obj-$(CONFIG_GEODE_WDT) += geodewdt.o
 obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
 obj-$(CONFIG_SBC_FITPC2_WATCHDOG) += sbc_fitpc2_wdt.o
@@ -86,6 +87,7 @@
 obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
 obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
 obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o
+obj-$(CONFIG_NV_TCO) += nv_tco.o
 obj-$(CONFIG_RDC321X_WDT) += rdc321x_wdt.o
 obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
 obj-$(CONFIG_SBC8360_WDT) += sbc8360.o
@@ -104,10 +106,10 @@
 # M32R Architecture
 
 # M68K Architecture
-
-# M68KNOMMU Architecture
+obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
 
 # MIPS Architecture
+obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
 obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
 obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
 obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index 1e9caea..fa4d360 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@
  *	want to register another driver on the same PCI id.
  */
 
-static struct pci_device_id ali_pci_tbl[] = {
+static struct pci_device_id ali_pci_tbl[] __used = {
 	{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
 	{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
 	{ 0, },
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index d8d4da9..4b7a2b4 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -430,7 +430,7 @@
 module_init(alim7101_wdt_init);
 module_exit(alim7101_wdt_unload);
 
-static struct pci_device_id alim7101_pci_tbl[] __devinitdata = {
+static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
 	{ }
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
new file mode 100644
index 0000000..725c84b
--- /dev/null
+++ b/drivers/watchdog/ath79_wdt.c
@@ -0,0 +1,305 @@
+/*
+ * Atheros AR71XX/AR724X/AR913X built-in hardware watchdog timer.
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This driver was based on: drivers/watchdog/ixp4xx_wdt.c
+ *	Author: Deepak Saxena <dsaxena@plexity.net>
+ *	Copyright 2004 (c) MontaVista, Software, Inc.
+ *
+ * which again was based on sa1100 driver,
+ *	Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+#define DRIVER_NAME	"ath79-wdt"
+
+#define WDT_TIMEOUT	15	/* seconds */
+
+#define WDOG_CTRL_LAST_RESET	BIT(31)
+#define WDOG_CTRL_ACTION_MASK	3
+#define WDOG_CTRL_ACTION_NONE	0	/* no action */
+#define WDOG_CTRL_ACTION_GPI	1	/* general purpose interrupt */
+#define WDOG_CTRL_ACTION_NMI	2	/* NMI */
+#define WDOG_CTRL_ACTION_FCR	3	/* full chip reset */
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+			   "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int timeout = WDT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
+			  "(default=" __MODULE_STRING(WDT_TIMEOUT) "s)");
+
+static unsigned long wdt_flags;
+
+#define WDT_FLAGS_BUSY		0
+#define WDT_FLAGS_EXPECT_CLOSE	1
+
+static struct clk *wdt_clk;
+static unsigned long wdt_freq;
+static int boot_status;
+static int max_timeout;
+
+static inline void ath79_wdt_keepalive(void)
+{
+	ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+}
+
+static inline void ath79_wdt_enable(void)
+{
+	ath79_wdt_keepalive();
+	ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+}
+
+static inline void ath79_wdt_disable(void)
+{
+	ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+}
+
+static int ath79_wdt_set_timeout(int val)
+{
+	if (val < 1 || val > max_timeout)
+		return -EINVAL;
+
+	timeout = val;
+	ath79_wdt_keepalive();
+
+	return 0;
+}
+
+static int ath79_wdt_open(struct inode *inode, struct file *file)
+{
+	if (test_and_set_bit(WDT_FLAGS_BUSY, &wdt_flags))
+		return -EBUSY;
+
+	clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+	ath79_wdt_enable();
+
+	return nonseekable_open(inode, file);
+}
+
+static int ath79_wdt_release(struct inode *inode, struct file *file)
+{
+	if (test_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags))
+		ath79_wdt_disable();
+	else {
+		pr_crit(DRIVER_NAME ": device closed unexpectedly, "
+			"watchdog timer will not stop!\n");
+		ath79_wdt_keepalive();
+	}
+
+	clear_bit(WDT_FLAGS_BUSY, &wdt_flags);
+	clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+
+	return 0;
+}
+
+static ssize_t ath79_wdt_write(struct file *file, const char *data,
+				size_t len, loff_t *ppos)
+{
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+
+			for (i = 0; i != len; i++) {
+				char c;
+
+				if (get_user(c, data + i))
+					return -EFAULT;
+
+				if (c == 'V')
+					set_bit(WDT_FLAGS_EXPECT_CLOSE,
+						&wdt_flags);
+			}
+		}
+
+		ath79_wdt_keepalive();
+	}
+
+	return len;
+}
+
+static const struct watchdog_info ath79_wdt_info = {
+	.options		= WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+				  WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
+	.firmware_version	= 0,
+	.identity		= "ATH79 watchdog",
+};
+
+static long ath79_wdt_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	int err;
+	int t;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		err = copy_to_user(argp, &ath79_wdt_info,
+				   sizeof(ath79_wdt_info)) ? -EFAULT : 0;
+		break;
+
+	case WDIOC_GETSTATUS:
+		err = put_user(0, p);
+		break;
+
+	case WDIOC_GETBOOTSTATUS:
+		err = put_user(boot_status, p);
+		break;
+
+	case WDIOC_KEEPALIVE:
+		ath79_wdt_keepalive();
+		err = 0;
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		err = get_user(t, p);
+		if (err)
+			break;
+
+		err = ath79_wdt_set_timeout(t);
+		if (err)
+			break;
+
+		/* fallthrough */
+	case WDIOC_GETTIMEOUT:
+		err = put_user(timeout, p);
+		break;
+
+	default:
+		err = -ENOTTY;
+		break;
+	}
+
+	return err;
+}
+
+static const struct file_operations ath79_wdt_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.write		= ath79_wdt_write,
+	.unlocked_ioctl	= ath79_wdt_ioctl,
+	.open		= ath79_wdt_open,
+	.release	= ath79_wdt_release,
+};
+
+static struct miscdevice ath79_wdt_miscdev = {
+	.minor = WATCHDOG_MINOR,
+	.name = "watchdog",
+	.fops = &ath79_wdt_fops,
+};
+
+static int __devinit ath79_wdt_probe(struct platform_device *pdev)
+{
+	u32 ctrl;
+	int err;
+
+	wdt_clk = clk_get(&pdev->dev, "wdt");
+	if (IS_ERR(wdt_clk))
+		return PTR_ERR(wdt_clk);
+
+	err = clk_enable(wdt_clk);
+	if (err)
+		goto err_clk_put;
+
+	wdt_freq = clk_get_rate(wdt_clk);
+	if (!wdt_freq) {
+		err = -EINVAL;
+		goto err_clk_disable;
+	}
+
+	max_timeout = (0xfffffffful / wdt_freq);
+	if (timeout < 1 || timeout > max_timeout) {
+		timeout = max_timeout;
+		dev_info(&pdev->dev,
+			"timeout value must be 0 < timeout < %d, using %d\n",
+			max_timeout, timeout);
+	}
+
+	ctrl = ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+	boot_status = (ctrl & WDOG_CTRL_LAST_RESET) ? WDIOF_CARDRESET : 0;
+
+	err = misc_register(&ath79_wdt_miscdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"unable to register misc device, err=%d\n", err);
+		goto err_clk_disable;
+	}
+
+	return 0;
+
+err_clk_disable:
+	clk_disable(wdt_clk);
+err_clk_put:
+	clk_put(wdt_clk);
+	return err;
+}
+
+static int __devexit ath79_wdt_remove(struct platform_device *pdev)
+{
+	misc_deregister(&ath79_wdt_miscdev);
+	clk_disable(wdt_clk);
+	clk_put(wdt_clk);
+	return 0;
+}
+
+static void ath97_wdt_shutdown(struct platform_device *pdev)
+{
+	ath79_wdt_disable();
+}
+
+static struct platform_driver ath79_wdt_driver = {
+	.remove		= __devexit_p(ath79_wdt_remove),
+	.shutdown	= ath97_wdt_shutdown,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init ath79_wdt_init(void)
+{
+	return platform_driver_probe(&ath79_wdt_driver, ath79_wdt_probe);
+}
+module_init(ath79_wdt_init);
+
+static void __exit ath79_wdt_exit(void)
+{
+	platform_driver_unregister(&ath79_wdt_driver);
+}
+module_exit(ath79_wdt_exit);
+
+MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X hardware watchdog driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
+MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index d11ffb0..7e7ec9c 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -85,6 +85,22 @@
 	return 0;
 }
 
+static void __booke_wdt_set(void *data)
+{
+	u32 val;
+
+	val = mfspr(SPRN_TCR);
+	val &= ~WDTP_MASK;
+	val |= WDTP(booke_wdt_period);
+
+	mtspr(SPRN_TCR, val);
+}
+
+static void booke_wdt_set(void)
+{
+	on_each_cpu(__booke_wdt_set, NULL, 0);
+}
+
 static void __booke_wdt_ping(void *data)
 {
 	mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
@@ -181,8 +197,7 @@
 #else
 		booke_wdt_period = tmp;
 #endif
-		mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP_MASK) |
-						WDTP(booke_wdt_period));
+		booke_wdt_set();
 		return 0;
 	case WDIOC_GETTIMEOUT:
 		return put_user(booke_wdt_period, p);
@@ -193,8 +208,15 @@
 	return 0;
 }
 
+/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
+static unsigned long wdt_is_active;
+
 static int booke_wdt_open(struct inode *inode, struct file *file)
 {
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &wdt_is_active))
+		return -EBUSY;
+
 	spin_lock(&booke_wdt_lock);
 	if (booke_wdt_enabled == 0) {
 		booke_wdt_enabled = 1;
@@ -210,8 +232,17 @@
 
 static int booke_wdt_release(struct inode *inode, struct file *file)
 {
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+	/* Normally, the watchdog is disabled when /dev/watchdog is closed, but
+	 * if CONFIG_WATCHDOG_NOWAYOUT is defined, then it means that the
+	 * watchdog should remain enabled.  So we disable it only if
+	 * CONFIG_WATCHDOG_NOWAYOUT is not defined.
+	 */
 	on_each_cpu(__booke_wdt_disable, NULL, 0);
 	booke_wdt_enabled = 0;
+#endif
+
+	clear_bit(0, &wdt_is_active);
 
 	return 0;
 }
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index eca855a..3de4ba0 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -646,7 +646,7 @@
 	struct cpwd *p = dev_get_drvdata(&op->dev);
 	int i;
 
-	for (i = 0; i < 4; i++) {
+	for (i = 0; i < WD_NUMDEVS; i++) {
 		misc_deregister(&p->devs[i].misc);
 
 		if (!p->enabled) {
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 65e5796..d4d8d1f 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -42,18 +42,21 @@
 #define SIO_REG_DEVID		0x20	/* Device ID (2 bytes) */
 #define SIO_REG_DEVREV		0x22	/* Device revision */
 #define SIO_REG_MANID		0x23	/* Fintek ID (2 bytes) */
+#define SIO_REG_ROM_ADDR_SEL	0x27	/* ROM address select */
+#define SIO_REG_MFUNCT1		0x29	/* Multi function select 1 */
+#define SIO_REG_MFUNCT2		0x2a	/* Multi function select 2 */
+#define SIO_REG_MFUNCT3		0x2b	/* Multi function select 3 */
 #define SIO_REG_ENABLE		0x30	/* Logical device enable */
 #define SIO_REG_ADDR		0x60	/* Logical device address (2 bytes) */
 
 #define SIO_FINTEK_ID		0x1934	/* Manufacturers ID */
-#define SIO_F71808_ID		0x0901  /* Chipset ID */
-#define SIO_F71858_ID		0x0507  /* Chipset ID */
+#define SIO_F71808_ID		0x0901	/* Chipset ID */
+#define SIO_F71858_ID		0x0507	/* Chipset ID */
 #define SIO_F71862_ID		0x0601	/* Chipset ID */
+#define SIO_F71869_ID		0x0814	/* Chipset ID */
 #define SIO_F71882_ID		0x0541	/* Chipset ID */
 #define SIO_F71889_ID		0x0723	/* Chipset ID */
 
-#define	F71882FG_REG_START		0x01
-
 #define F71808FG_REG_WDO_CONF		0xf0
 #define F71808FG_REG_WDT_CONF		0xf5
 #define F71808FG_REG_WD_TIME		0xf6
@@ -70,13 +73,15 @@
 #define WATCHDOG_MAX_TIMEOUT	(60 * 255)
 #define WATCHDOG_PULSE_WIDTH	125	/* 125 ms, default pulse width for
 					   watchdog signal */
+#define WATCHDOG_F71862FG_PIN	63	/* default watchdog reset output
+					   pin number 63 */
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
 static const int max_timeout = WATCHDOG_MAX_TIMEOUT;
-static int timeout = 60;	/* default timeout in seconds */
+static int timeout = WATCHDOG_TIMEOUT;	/* default timeout in seconds */
 module_param(timeout, int, 0);
 MODULE_PARM_DESC(timeout,
 	"Watchdog timeout in seconds. 1<= timeout <="
@@ -89,6 +94,12 @@
 	"Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms"
 			" (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")");
 
+static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN;
+module_param(f71862fg_pin, uint, 0);
+MODULE_PARM_DESC(f71862fg_pin,
+	"Watchdog f71862fg reset output pin configuration. Choose pin 56 or 63"
+			" (default=" __MODULE_STRING(WATCHDOG_F71862FG_PIN)")");
+
 static int nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0444);
 MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
@@ -98,12 +109,13 @@
 MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
 	" given initial timeout. Zero (default) disables this feature.");
 
-enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg };
+enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg };
 
 static const char *f71808e_names[] = {
 	"f71808fg",
 	"f71858fg",
 	"f71862fg",
+	"f71869",
 	"f71882fg",
 	"f71889fg",
 };
@@ -282,6 +294,28 @@
 	return err;
 }
 
+static int f71862fg_pin_configure(unsigned short ioaddr)
+{
+	/* When ioaddr is non-zero the calling function has to take care of
+	   mutex handling and superio preparation! */
+
+	if (f71862fg_pin == 63) {
+		if (ioaddr) {
+			/* SPI must be disabled first to use this pin! */
+			superio_clear_bit(ioaddr, SIO_REG_ROM_ADDR_SEL, 6);
+			superio_set_bit(ioaddr, SIO_REG_MFUNCT3, 4);
+		}
+	} else if (f71862fg_pin == 56) {
+		if (ioaddr)
+			superio_set_bit(ioaddr, SIO_REG_MFUNCT1, 1);
+	} else {
+		printk(KERN_ERR DRVNAME ": Invalid argument f71862fg_pin=%d\n",
+				f71862fg_pin);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int watchdog_start(void)
 {
 	/* Make sure we don't die as soon as the watchdog is enabled below */
@@ -299,19 +333,30 @@
 	switch (watchdog.type) {
 	case f71808fg:
 		/* Set pin 21 to GPIO23/WDTRST#, then to WDTRST# */
-		superio_clear_bit(watchdog.sioaddr, 0x2a, 3);
-		superio_clear_bit(watchdog.sioaddr, 0x2b, 3);
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT2, 3);
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 3);
+		break;
+
+	case f71862fg:
+		err = f71862fg_pin_configure(watchdog.sioaddr);
+		if (err)
+			goto exit_superio;
+		break;
+
+	case f71869:
+		/* GPIO14 --> WDTRST# */
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4);
 		break;
 
 	case f71882fg:
 		/* Set pin 56 to WDTRST# */
-		superio_set_bit(watchdog.sioaddr, 0x29, 1);
+		superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 1);
 		break;
 
 	case f71889fg:
 		/* set pin 40 to WDTRST# */
-		superio_outb(watchdog.sioaddr, 0x2b,
-				superio_inb(watchdog.sioaddr, 0x2b) & 0xcf);
+		superio_outb(watchdog.sioaddr, SIO_REG_MFUNCT3,
+			superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf);
 		break;
 
 	default:
@@ -711,16 +756,19 @@
 	case SIO_F71808_ID:
 		watchdog.type = f71808fg;
 		break;
+	case SIO_F71862_ID:
+		watchdog.type = f71862fg;
+		err = f71862fg_pin_configure(0); /* validate module parameter */
+		break;
+	case SIO_F71869_ID:
+		watchdog.type = f71869;
+		break;
 	case SIO_F71882_ID:
 		watchdog.type = f71882fg;
 		break;
 	case SIO_F71889_ID:
 		watchdog.type = f71889fg;
 		break;
-	case SIO_F71862_ID:
-		/* These have a watchdog, though it isn't implemented (yet). */
-		err = -ENOSYS;
-		goto exit;
 	case SIO_F71858_ID:
 		/* Confirmed (by datasheet) not to have a watchdog. */
 		err = -ENODEV;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index dea7b5b..204a560 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -469,7 +469,7 @@
 	unsigned long rom_pl;
 	static int die_nmi_called;
 
-	if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
+	if (ulReason != DIE_NMIUNKNOWN)
 		goto out;
 
 	if (!hpwdt_nmi_decoding)
@@ -710,7 +710,7 @@
 	return 0;
 }
 
-static void __devexit hpwdt_exit_nmi_decoding(void)
+static void hpwdt_exit_nmi_decoding(void)
 {
 	unregister_die_notifier(&die_notifier);
 	if (cru_rom_addr)
@@ -726,7 +726,7 @@
 	return 0;
 }
 
-static void __devexit hpwdt_exit_nmi_decoding(void)
+static void hpwdt_exit_nmi_decoding(void)
 {
 }
 #endif /* CONFIG_HPWDT_NMI_DECODING */
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index b8838d2..2c6c2b4 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -1,7 +1,7 @@
 /*
  *	intel TCO Watchdog Driver
  *
- *	(c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
+ *	(c) Copyright 2006-2010 Wim Van Sebroeck <wim@iguana.be>.
  *
  *	This program is free software; you can redistribute it and/or
  *	modify it under the terms of the GNU General Public License
@@ -26,13 +26,15 @@
  *	document number 301473-002, 301474-026: 82801F (ICH6)
  *	document number 313082-001, 313075-006: 631xESB, 632xESB
  *	document number 307013-003, 307014-024: 82801G (ICH7)
+ *	document number 322896-001, 322897-001: NM10
  *	document number 313056-003, 313057-017: 82801H (ICH8)
  *	document number 316972-004, 316973-012: 82801I (ICH9)
  *	document number 319973-002, 319974-002: 82801J (ICH10)
  *	document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
  *	document number 320066-003, 320257-008: EP80597 (IICH)
- *	document number TBD                   : Cougar Point (CPT)
+ *	document number 324645-001, 324646-001: Cougar Point (CPT)
  *	document number TBD                   : Patsburg (PBG)
+ *	document number TBD                   : DH89xxCC
  */
 
 /*
@@ -85,6 +87,7 @@
 	TCO_ICH7DH,	/* ICH7DH */
 	TCO_ICH7M,	/* ICH7-M & ICH7-U */
 	TCO_ICH7MDH,	/* ICH7-M DH */
+	TCO_NM10,	/* NM10 */
 	TCO_ICH8,	/* ICH8 & ICH8R */
 	TCO_ICH8DH,	/* ICH8DH */
 	TCO_ICH8DO,	/* ICH8DO */
@@ -149,6 +152,7 @@
 	TCO_CPT31,	/* Cougar Point */
 	TCO_PBG1,	/* Patsburg */
 	TCO_PBG2,	/* Patsburg */
+	TCO_DH89XXCC,	/* DH89xxCC */
 };
 
 static struct {
@@ -174,6 +178,7 @@
 	{"ICH7DH", 2},
 	{"ICH7-M or ICH7-U", 2},
 	{"ICH7-M DH", 2},
+	{"NM10", 2},
 	{"ICH8 or ICH8R", 2},
 	{"ICH8DH", 2},
 	{"ICH8DO", 2},
@@ -238,6 +243,7 @@
 	{"Cougar Point", 2},
 	{"Patsburg", 2},
 	{"Patsburg", 2},
+	{"DH89xxCC", 2},
 	{NULL, 0}
 };
 
@@ -291,6 +297,7 @@
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30,		TCO_ICH7DH)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1,		TCO_ICH7M)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31,		TCO_ICH7MDH)},
+	{ ITCO_PCI_DEVICE(0x27bc,				TCO_NM10)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0,		TCO_ICH8)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2,		TCO_ICH8DH)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3,		TCO_ICH8DO)},
@@ -355,6 +362,7 @@
 	{ ITCO_PCI_DEVICE(0x1c5f,				TCO_CPT31)},
 	{ ITCO_PCI_DEVICE(0x1d40,				TCO_PBG1)},
 	{ ITCO_PCI_DEVICE(0x1d41,				TCO_PBG2)},
+	{ ITCO_PCI_DEVICE(0x2310,				TCO_DH89XXCC)},
 	{ 0, },			/* End of list */
 };
 MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 2852bb2..8114719 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -21,7 +21,7 @@
 #include <linux/watchdog.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
-#include <mach/timex.h>
+#include <mach/hardware.h>
 #include <mach/regs-timer.h>
 
 #define WDT_DEFAULT_TIME	5	/* seconds */
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
new file mode 100644
index 0000000..4d43286
--- /dev/null
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -0,0 +1,227 @@
+/*
+ * drivers/watchdog/m54xx_wdt.c
+ *
+ * Watchdog driver for ColdFire MCF547x & MCF548x processors
+ * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be>
+ *
+ * Adapted from the IXP4xx watchdog driver, which carries these notices:
+ *
+ *  Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ *  Copyright 2004 (c) MontaVista, Software, Inc.
+ *  Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/ioport.h>
+#include <linux/uaccess.h>
+
+#include <asm/coldfire.h>
+#include <asm/m54xxsim.h>
+#include <asm/m54xxgpt.h>
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+static unsigned int heartbeat = 30;	/* (secs) Default is 0.5 minute */
+static unsigned long wdt_status;
+
+#define	WDT_IN_USE		0
+#define	WDT_OK_TO_CLOSE		1
+
+static void wdt_enable(void)
+{
+	unsigned int gms0;
+
+	/* preserve GPIO usage, if any */
+	gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+	if (gms0 & MCF_GPT_GMS_TMS_GPIO)
+		gms0 &= (MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_GPIO_MASK
+							| MCF_GPT_GMS_OD);
+	else
+		gms0 = MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_OD;
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+	__raw_writel(MCF_GPT_GCIR_PRE(heartbeat*(MCF_BUSCLK/0xffff)) |
+			MCF_GPT_GCIR_CNT(0xffff), MCF_MBAR + MCF_GPT_GCIR0);
+	gms0 |= MCF_GPT_GMS_OCPW(0xA5) | MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE;
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static void wdt_disable(void)
+{
+	unsigned int gms0;
+
+	/* disable watchdog */
+	gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+	gms0 &= ~(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE);
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static void wdt_keepalive(void)
+{
+	unsigned int gms0;
+
+	gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+	gms0 |= MCF_GPT_GMS_OCPW(0xA5);
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static int m54xx_wdt_open(struct inode *inode, struct file *file)
+{
+	if (test_and_set_bit(WDT_IN_USE, &wdt_status))
+		return -EBUSY;
+
+	clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+	wdt_enable();
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t m54xx_wdt_write(struct file *file, const char *data,
+						size_t len, loff_t *ppos)
+{
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+			for (i = 0; i != len; i++) {
+				char c;
+
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					set_bit(WDT_OK_TO_CLOSE, &wdt_status);
+			}
+		}
+		wdt_keepalive();
+	}
+	return len;
+}
+
+static const struct watchdog_info ident = {
+	.options	= WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
+				WDIOF_KEEPALIVEPING,
+	.identity	= "Coldfire M54xx Watchdog",
+};
+
+static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
+							 unsigned long arg)
+{
+	int ret = -ENOTTY;
+	int time;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		ret = copy_to_user((struct watchdog_info *)arg, &ident,
+				   sizeof(ident)) ? -EFAULT : 0;
+		break;
+
+	case WDIOC_GETSTATUS:
+		ret = put_user(0, (int *)arg);
+		break;
+
+	case WDIOC_GETBOOTSTATUS:
+		ret = put_user(0, (int *)arg);
+		break;
+
+	case WDIOC_KEEPALIVE:
+		wdt_keepalive();
+		ret = 0;
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		ret = get_user(time, (int *)arg);
+		if (ret)
+			break;
+
+		if (time <= 0 || time > 30) {
+			ret = -EINVAL;
+			break;
+		}
+
+		heartbeat = time;
+		wdt_enable();
+		/* Fall through */
+
+	case WDIOC_GETTIMEOUT:
+		ret = put_user(heartbeat, (int *)arg);
+		break;
+	}
+	return ret;
+}
+
+static int m54xx_wdt_release(struct inode *inode, struct file *file)
+{
+	if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
+		wdt_disable();
+	else {
+		printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
+					"timer will not stop\n");
+		wdt_keepalive();
+	}
+	clear_bit(WDT_IN_USE, &wdt_status);
+	clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+	return 0;
+}
+
+
+static const struct file_operations m54xx_wdt_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.write		= m54xx_wdt_write,
+	.unlocked_ioctl	= m54xx_wdt_ioctl,
+	.open		= m54xx_wdt_open,
+	.release	= m54xx_wdt_release,
+};
+
+static struct miscdevice m54xx_wdt_miscdev = {
+	.minor		= WATCHDOG_MINOR,
+	.name		= "watchdog",
+	.fops		= &m54xx_wdt_fops,
+};
+
+static int __init m54xx_wdt_init(void)
+{
+	if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
+						"Coldfire M54xx Watchdog")) {
+		printk(KERN_WARNING
+				"Coldfire M54xx Watchdog : I/O region busy\n");
+		return -EBUSY;
+	}
+	printk(KERN_INFO "ColdFire watchdog driver is loaded.\n");
+
+	return misc_register(&m54xx_wdt_miscdev);
+}
+
+static void __exit m54xx_wdt_exit(void)
+{
+	misc_deregister(&m54xx_wdt_miscdev);
+	release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
+}
+
+module_init(m54xx_wdt_init);
+module_exit(m54xx_wdt_exit);
+
+MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
+MODULE_DESCRIPTION("Coldfire M54xx Watchdog");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
new file mode 100644
index 0000000..1a50aa7
--- /dev/null
+++ b/drivers/watchdog/nv_tco.c
@@ -0,0 +1,512 @@
+/*
+ *	nv_tco 0.01:	TCO timer driver for NV chipsets
+ *
+ *	(c) Copyright 2005 Google Inc., All Rights Reserved.
+ *
+ *	Based off i8xx_tco.c:
+ *	(c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ *	Reserved.
+ *				http://www.kernelconcepts.de
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	TCO timer driver for NV chipsets
+ *	based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+/*
+ *	Includes, defines, variables, module parameters, ...
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "nv_tco.h"
+
+/* Module and version information */
+#define TCO_VERSION "0.01"
+#define TCO_MODULE_NAME "NV_TCO"
+#define TCO_DRIVER_NAME   TCO_MODULE_NAME ", v" TCO_VERSION
+#define PFX TCO_MODULE_NAME ": "
+
+/* internal variables */
+static unsigned int tcobase;
+static DEFINE_SPINLOCK(tco_lock);	/* Guards the hardware */
+static unsigned long timer_alive;
+static char tco_expect_close;
+static struct pci_dev *tco_pci;
+
+/* the watchdog platform device */
+static struct platform_device *nv_tco_platform_device;
+
+/* module parameters */
+#define WATCHDOG_HEARTBEAT 30	/* 30 sec default heartbeat (2<heartbeat<39) */
+static int heartbeat = WATCHDOG_HEARTBEAT;  /* in seconds */
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<heartbeat<39, "
+			    "default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+		" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/*
+ * Some TCO specific functions
+ */
+static inline unsigned char seconds_to_ticks(int seconds)
+{
+	/* the internal timer is stored as ticks which decrement
+	 * every 0.6 seconds */
+	return (seconds * 10) / 6;
+}
+
+static void tco_timer_start(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = inl(TCO_CNT(tcobase));
+	val &= ~TCO_CNT_TCOHALT;
+	outl(val, TCO_CNT(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_stop(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = inl(TCO_CNT(tcobase));
+	val |= TCO_CNT_TCOHALT;
+	outl(val, TCO_CNT(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_keepalive(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	outb(0x01, TCO_RLD(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static int tco_timer_set_heartbeat(int t)
+{
+	int ret = 0;
+	unsigned char tmrval;
+	unsigned long flags;
+	u8 val;
+
+	/*
+	 * note seconds_to_ticks(t) > t, so if t > 0x3f, so is
+	 * tmrval=seconds_to_ticks(t).  Check that the count in seconds isn't
+	 * out of range on it's own (to avoid overflow in tmrval).
+	 */
+	if (t < 0 || t > 0x3f)
+		return -EINVAL;
+	tmrval = seconds_to_ticks(t);
+
+	/* "Values of 0h-3h are ignored and should not be attempted" */
+	if (tmrval > 0x3f || tmrval < 0x04)
+		return -EINVAL;
+
+	/* Write new heartbeat to watchdog */
+	spin_lock_irqsave(&tco_lock, flags);
+	val = inb(TCO_TMR(tcobase));
+	val &= 0xc0;
+	val |= tmrval;
+	outb(val, TCO_TMR(tcobase));
+	val = inb(TCO_TMR(tcobase));
+
+	if ((val & 0x3f) != tmrval)
+		ret = -EINVAL;
+	spin_unlock_irqrestore(&tco_lock, flags);
+
+	if (ret)
+		return ret;
+
+	heartbeat = t;
+	return 0;
+}
+
+/*
+ *	/dev/watchdog handling
+ */
+
+static int nv_tco_open(struct inode *inode, struct file *file)
+{
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &timer_alive))
+		return -EBUSY;
+
+	/* Reload and activate timer */
+	tco_timer_keepalive();
+	tco_timer_start();
+	return nonseekable_open(inode, file);
+}
+
+static int nv_tco_release(struct inode *inode, struct file *file)
+{
+	/* Shut off the timer */
+	if (tco_expect_close == 42) {
+		tco_timer_stop();
+	} else {
+		printk(KERN_CRIT PFX "Unexpected close, not stopping "
+		       "watchdog!\n");
+		tco_timer_keepalive();
+	}
+	clear_bit(0, &timer_alive);
+	tco_expect_close = 0;
+	return 0;
+}
+
+static ssize_t nv_tco_write(struct file *file, const char __user *data,
+			    size_t len, loff_t *ppos)
+{
+	/* See if we got the magic character 'V' and reload the timer */
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/*
+			 * note: just in case someone wrote the magic character
+			 * five months ago...
+			 */
+			tco_expect_close = 0;
+
+			/*
+			 * scan to see whether or not we got the magic
+			 * character
+			 */
+			for (i = 0; i != len; i++) {
+				char c;
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					tco_expect_close = 42;
+			}
+		}
+
+		/* someone wrote to us, we should reload the timer */
+		tco_timer_keepalive();
+	}
+	return len;
+}
+
+static long nv_tco_ioctl(struct file *file, unsigned int cmd,
+			 unsigned long arg)
+{
+	int new_options, retval = -EINVAL;
+	int new_heartbeat;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	static const struct watchdog_info ident = {
+		.options =		WDIOF_SETTIMEOUT |
+					WDIOF_KEEPALIVEPING |
+					WDIOF_MAGICCLOSE,
+		.firmware_version =	0,
+		.identity =		TCO_MODULE_NAME,
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(0, p);
+	case WDIOC_SETOPTIONS:
+		if (get_user(new_options, p))
+			return -EFAULT;
+		if (new_options & WDIOS_DISABLECARD) {
+			tco_timer_stop();
+			retval = 0;
+		}
+		if (new_options & WDIOS_ENABLECARD) {
+			tco_timer_keepalive();
+			tco_timer_start();
+			retval = 0;
+		}
+		return retval;
+	case WDIOC_KEEPALIVE:
+		tco_timer_keepalive();
+		return 0;
+	case WDIOC_SETTIMEOUT:
+		if (get_user(new_heartbeat, p))
+			return -EFAULT;
+		if (tco_timer_set_heartbeat(new_heartbeat))
+			return -EINVAL;
+		tco_timer_keepalive();
+		/* Fall through */
+	case WDIOC_GETTIMEOUT:
+		return put_user(heartbeat, p);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/*
+ *	Kernel Interfaces
+ */
+
+static const struct file_operations nv_tco_fops = {
+	.owner =		THIS_MODULE,
+	.llseek =		no_llseek,
+	.write =		nv_tco_write,
+	.unlocked_ioctl =	nv_tco_ioctl,
+	.open =			nv_tco_open,
+	.release =		nv_tco_release,
+};
+
+static struct miscdevice nv_tco_miscdev = {
+	.minor =	WATCHDOG_MINOR,
+	.name =		"watchdog",
+	.fops =		&nv_tco_fops,
+};
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static struct pci_device_id tco_pci_tbl[] = {
+	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
+	  PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
+	  PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0, },			/* End of list */
+};
+MODULE_DEVICE_TABLE(pci, tco_pci_tbl);
+
+/*
+ *	Init & exit routines
+ */
+
+static unsigned char __init nv_tco_getdevice(void)
+{
+	struct pci_dev *dev = NULL;
+	u32 val;
+
+	/* Find the PCI device */
+	for_each_pci_dev(dev) {
+		if (pci_match_id(tco_pci_tbl, dev) != NULL) {
+			tco_pci = dev;
+			break;
+		}
+	}
+
+	if (!tco_pci)
+		return 0;
+
+	/* Find the base io port */
+	pci_read_config_dword(tco_pci, 0x64, &val);
+	val &= 0xffff;
+	if (val == 0x0001 || val == 0x0000) {
+		/* Something is wrong here, bar isn't setup */
+		printk(KERN_ERR PFX "failed to get tcobase address\n");
+		return 0;
+	}
+	val &= 0xff00;
+	tcobase = val + 0x40;
+
+	if (!request_region(tcobase, 0x10, "NV TCO")) {
+		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+		       tcobase);
+		return 0;
+	}
+
+	/* Set a reasonable heartbeat before we stop the timer */
+	tco_timer_set_heartbeat(30);
+
+	/*
+	 * Stop the TCO before we change anything so we don't race with
+	 * a zeroed timer.
+	 */
+	tco_timer_keepalive();
+	tco_timer_stop();
+
+	/* Disable SMI caused by TCO */
+	if (!request_region(MCP51_SMI_EN(tcobase), 4, "NV TCO")) {
+		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+		       MCP51_SMI_EN(tcobase));
+		goto out;
+	}
+	val = inl(MCP51_SMI_EN(tcobase));
+	val &= ~MCP51_SMI_EN_TCO;
+	outl(val, MCP51_SMI_EN(tcobase));
+	val = inl(MCP51_SMI_EN(tcobase));
+	release_region(MCP51_SMI_EN(tcobase), 4);
+	if (val & MCP51_SMI_EN_TCO) {
+		printk(KERN_ERR PFX "Could not disable SMI caused by TCO\n");
+		goto out;
+	}
+
+	/* Check chipset's NO_REBOOT bit */
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	val |= MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+	pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	if (!(val & MCP51_SMBUS_SETUP_B_TCO_REBOOT)) {
+		printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, reboot "
+		       "disabled by hardware\n");
+		goto out;
+	}
+
+	return 1;
+out:
+	release_region(tcobase, 0x10);
+	return 0;
+}
+
+static int __devinit nv_tco_init(struct platform_device *dev)
+{
+	int ret;
+
+	/* Check whether or not the hardware watchdog is there */
+	if (!nv_tco_getdevice())
+		return -ENODEV;
+
+	/* Check to see if last reboot was due to watchdog timeout */
+	printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n",
+	       inl(TCO_STS(tcobase)) & TCO_STS_TCO2TO_STS ? "" : "not ");
+
+	/* Clear out the old status */
+	outl(TCO_STS_RESET, TCO_STS(tcobase));
+
+	/*
+	 * Check that the heartbeat value is within it's range.
+	 * If not, reset to the default.
+	 */
+	if (tco_timer_set_heartbeat(heartbeat)) {
+		heartbeat = WATCHDOG_HEARTBEAT;
+		tco_timer_set_heartbeat(heartbeat);
+		printk(KERN_INFO PFX "heartbeat value must be 2<heartbeat<39, "
+		       "using %d\n", heartbeat);
+	}
+
+	ret = misc_register(&nv_tco_miscdev);
+	if (ret != 0) {
+		printk(KERN_ERR PFX "cannot register miscdev on minor=%d "
+		       "(err=%d)\n", WATCHDOG_MINOR, ret);
+		goto unreg_region;
+	}
+
+	clear_bit(0, &timer_alive);
+
+	tco_timer_stop();
+
+	printk(KERN_INFO PFX "initialized (0x%04x). heartbeat=%d sec "
+	       "(nowayout=%d)\n", tcobase, heartbeat, nowayout);
+
+	return 0;
+
+unreg_region:
+	release_region(tcobase, 0x10);
+	return ret;
+}
+
+static void __devexit nv_tco_cleanup(void)
+{
+	u32 val;
+
+	/* Stop the timer before we leave */
+	if (!nowayout)
+		tco_timer_stop();
+
+	/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+	pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	if (val & MCP51_SMBUS_SETUP_B_TCO_REBOOT) {
+		printk(KERN_CRIT PFX "Couldn't unset REBOOT bit.  Machine may "
+		       "soon reset\n");
+	}
+
+	/* Deregister */
+	misc_deregister(&nv_tco_miscdev);
+	release_region(tcobase, 0x10);
+}
+
+static int __devexit nv_tco_remove(struct platform_device *dev)
+{
+	if (tcobase)
+		nv_tco_cleanup();
+
+	return 0;
+}
+
+static void nv_tco_shutdown(struct platform_device *dev)
+{
+	tco_timer_stop();
+}
+
+static struct platform_driver nv_tco_driver = {
+	.probe		= nv_tco_init,
+	.remove		= __devexit_p(nv_tco_remove),
+	.shutdown	= nv_tco_shutdown,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= TCO_MODULE_NAME,
+	},
+};
+
+static int __init nv_tco_init_module(void)
+{
+	int err;
+
+	printk(KERN_INFO PFX "NV TCO WatchDog Timer Driver v%s\n",
+	       TCO_VERSION);
+
+	err = platform_driver_register(&nv_tco_driver);
+	if (err)
+		return err;
+
+	nv_tco_platform_device = platform_device_register_simple(
+					TCO_MODULE_NAME, -1, NULL, 0);
+	if (IS_ERR(nv_tco_platform_device)) {
+		err = PTR_ERR(nv_tco_platform_device);
+		goto unreg_platform_driver;
+	}
+
+	return 0;
+
+unreg_platform_driver:
+	platform_driver_unregister(&nv_tco_driver);
+	return err;
+}
+
+static void __exit nv_tco_cleanup_module(void)
+{
+	platform_device_unregister(nv_tco_platform_device);
+	platform_driver_unregister(&nv_tco_driver);
+	printk(KERN_INFO PFX "NV TCO Watchdog Module Unloaded.\n");
+}
+
+module_init(nv_tco_init_module);
+module_exit(nv_tco_cleanup_module);
+
+MODULE_AUTHOR("Mike Waychison");
+MODULE_DESCRIPTION("TCO timer driver for NV chipsets");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/nv_tco.h b/drivers/watchdog/nv_tco.h
new file mode 100644
index 0000000..c2d1d04
--- /dev/null
+++ b/drivers/watchdog/nv_tco.h
@@ -0,0 +1,64 @@
+/*
+ *	nv_tco:	TCO timer driver for nVidia chipsets.
+ *
+ *	(c) Copyright 2005 Google Inc., All Rights Reserved.
+ *
+ *	Supported Chipsets:
+ *		- MCP51/MCP55
+ *
+ *	(c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ *	Reserved.
+ *				http://www.kernelconcepts.de
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	Neither kernel concepts nor Nils Faerber admit liability nor provide
+ *	warranty for any of this software. This material is provided
+ *	"AS-IS" and at no charge.
+ *
+ *	(c) Copyright 2000	kernel concepts <nils@kernelconcepts.de>
+ *				developed for
+ *                              Jentro AG, Haar/Munich (Germany)
+ *
+ *	TCO timer driver for NV chipsets
+ *	based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+/*
+ * Some address definitions for the TCO
+ */
+
+#define TCO_RLD(base)	((base) + 0x00)	/* TCO Timer Reload and Current Value */
+#define TCO_TMR(base)	((base) + 0x01)	/* TCO Timer Initial Value	*/
+
+#define TCO_STS(base)	((base) + 0x04)	/* TCO Status Register		*/
+/*
+ * TCO Boot Status bit: set on TCO reset, reset by software or standby
+ * power-good (survives reboots), unfortunately this bit is never
+ * set.
+ */
+#  define TCO_STS_BOOT_STS	(1 << 9)
+/*
+ * first and 2nd timeout status bits, these also survive a warm boot,
+ * and they work, so we use them.
+ */
+#  define TCO_STS_TCO_INT_STS	(1 << 1)
+#  define TCO_STS_TCO2TO_STS	(1 << 10)
+#  define TCO_STS_RESET		(TCO_STS_BOOT_STS | TCO_STS_TCO2TO_STS | \
+				 TCO_STS_TCO_INT_STS)
+
+#define TCO_CNT(base)	((base) + 0x08)	/* TCO Control Register	*/
+#  define TCO_CNT_TCOHALT	(1 << 12)
+
+#define MCP51_SMBUS_SETUP_B 0xe8
+#  define MCP51_SMBUS_SETUP_B_TCO_REBOOT (1 << 25)
+
+/*
+ * The SMI_EN register is at the base io address + 0x04,
+ * while TCOBASE is + 0x40.
+ */
+#define MCP51_SMI_EN(base)	((base) - 0x40 + 0x04)
+#  define MCP51_SMI_EN_TCO	((1 << 4) | (1 << 5))
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index c7d67e9..7990625 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -201,11 +201,14 @@
 static int __init fitpc2_wdt_init(void)
 {
 	int err;
+	const char *brd_name;
 
-	if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2"))
+	brd_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+	if (!brd_name || !strstr(brd_name, "SBC-FITPC2"))
 		return -ENODEV;
 
-	pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME));
+	pr_info("%s found\n", brd_name);
 
 	if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) {
 		pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 0461858..b61ab1c 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -508,7 +508,7 @@
 	sch311x_sio_outb(sio_config_port, 0x07, 0x0a);
 
 	/* Check if Logical Device Register is currently active */
-	if (sch311x_sio_inb(sio_config_port, 0x30) && 0x01 == 0)
+	if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0)
 		printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n");
 
 	/* Get the base address of the runtime registers */
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
new file mode 100644
index 0000000..8083728
--- /dev/null
+++ b/drivers/watchdog/sp5100_tco.c
@@ -0,0 +1,480 @@
+/*
+ *	sp5100_tco :	TCO timer driver for sp5100 chipsets
+ *
+ *	(c) Copyright 2009 Google Inc., All Rights Reserved.
+ *
+ *	Based on i8xx_tco.c:
+ *	(c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ *	Reserved.
+ *				http://www.kernelconcepts.de
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide"
+ */
+
+/*
+ *	Includes, defines, variables, module parameters, ...
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "sp5100_tco.h"
+
+/* Module and version information */
+#define TCO_VERSION "0.01"
+#define TCO_MODULE_NAME "SP5100 TCO timer"
+#define TCO_DRIVER_NAME   TCO_MODULE_NAME ", v" TCO_VERSION
+#define PFX TCO_MODULE_NAME ": "
+
+/* internal variables */
+static void __iomem *tcobase;
+static unsigned int pm_iobase;
+static DEFINE_SPINLOCK(tco_lock);	/* Guards the hardware */
+static unsigned long timer_alive;
+static char tco_expect_close;
+static struct pci_dev *sp5100_tco_pci;
+
+/* the watchdog platform device */
+static struct platform_device *sp5100_tco_platform_device;
+
+/* module parameters */
+
+#define WATCHDOG_HEARTBEAT 60	/* 60 sec default heartbeat. */
+static int heartbeat = WATCHDOG_HEARTBEAT;  /* in seconds */
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (default="
+		 __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+		" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/*
+ * Some TCO specific functions
+ */
+static void tco_timer_start(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val |= SP5100_WDT_START_STOP_BIT;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_stop(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val &= ~SP5100_WDT_START_STOP_BIT;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_keepalive(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val |= SP5100_WDT_TRIGGER_BIT;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static int tco_timer_set_heartbeat(int t)
+{
+	unsigned long flags;
+
+	if (t < 0 || t > 0xffff)
+		return -EINVAL;
+
+	/* Write new heartbeat to watchdog */
+	spin_lock_irqsave(&tco_lock, flags);
+	writel(t, SP5100_WDT_COUNT(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+
+	heartbeat = t;
+	return 0;
+}
+
+/*
+ *	/dev/watchdog handling
+ */
+
+static int sp5100_tco_open(struct inode *inode, struct file *file)
+{
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &timer_alive))
+		return -EBUSY;
+
+	/* Reload and activate timer */
+	tco_timer_start();
+	tco_timer_keepalive();
+	return nonseekable_open(inode, file);
+}
+
+static int sp5100_tco_release(struct inode *inode, struct file *file)
+{
+	/* Shut off the timer. */
+	if (tco_expect_close == 42) {
+		tco_timer_stop();
+	} else {
+		printk(KERN_CRIT PFX
+			"Unexpected close, not stopping watchdog!\n");
+		tco_timer_keepalive();
+	}
+	clear_bit(0, &timer_alive);
+	tco_expect_close = 0;
+	return 0;
+}
+
+static ssize_t sp5100_tco_write(struct file *file, const char __user *data,
+				size_t len, loff_t *ppos)
+{
+	/* See if we got the magic character 'V' and reload the timer */
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/* note: just in case someone wrote the magic character
+			 * five months ago... */
+			tco_expect_close = 0;
+
+			/* scan to see whether or not we got the magic character
+			 */
+			for (i = 0; i != len; i++) {
+				char c;
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					tco_expect_close = 42;
+			}
+		}
+
+		/* someone wrote to us, we should reload the timer */
+		tco_timer_keepalive();
+	}
+	return len;
+}
+
+static long sp5100_tco_ioctl(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	int new_options, retval = -EINVAL;
+	int new_heartbeat;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	static const struct watchdog_info ident = {
+		.options =		WDIOF_SETTIMEOUT |
+					WDIOF_KEEPALIVEPING |
+					WDIOF_MAGICCLOSE,
+		.firmware_version =	0,
+		.identity =		TCO_MODULE_NAME,
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user(argp, &ident,
+			sizeof(ident)) ? -EFAULT : 0;
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(0, p);
+	case WDIOC_SETOPTIONS:
+		if (get_user(new_options, p))
+			return -EFAULT;
+		if (new_options & WDIOS_DISABLECARD) {
+			tco_timer_stop();
+			retval = 0;
+		}
+		if (new_options & WDIOS_ENABLECARD) {
+			tco_timer_start();
+			tco_timer_keepalive();
+			retval = 0;
+		}
+		return retval;
+	case WDIOC_KEEPALIVE:
+		tco_timer_keepalive();
+		return 0;
+	case WDIOC_SETTIMEOUT:
+		if (get_user(new_heartbeat, p))
+			return -EFAULT;
+		if (tco_timer_set_heartbeat(new_heartbeat))
+			return -EINVAL;
+		tco_timer_keepalive();
+		/* Fall through */
+	case WDIOC_GETTIMEOUT:
+		return put_user(heartbeat, p);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/*
+ * Kernel Interfaces
+ */
+
+static const struct file_operations sp5100_tco_fops = {
+	.owner =		THIS_MODULE,
+	.llseek =		no_llseek,
+	.write =		sp5100_tco_write,
+	.unlocked_ioctl =	sp5100_tco_ioctl,
+	.open =			sp5100_tco_open,
+	.release =		sp5100_tco_release,
+};
+
+static struct miscdevice sp5100_tco_miscdev = {
+	.minor =	WATCHDOG_MINOR,
+	.name =		"watchdog",
+	.fops =		&sp5100_tco_fops,
+};
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
+ * register a pci_driver, because someone else might
+ * want to register another driver on the same PCI id.
+ */
+static struct pci_device_id sp5100_tco_pci_tbl[] = {
+	{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
+	  PCI_ANY_ID, },
+	{ 0, },			/* End of list */
+};
+MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
+
+/*
+ * Init & exit routines
+ */
+
+static unsigned char __devinit sp5100_tco_setupdevice(void)
+{
+	struct pci_dev *dev = NULL;
+	u32 val;
+
+	/* Match the PCI device */
+	for_each_pci_dev(dev) {
+		if (pci_match_id(sp5100_tco_pci_tbl, dev) != NULL) {
+			sp5100_tco_pci = dev;
+			break;
+		}
+	}
+
+	if (!sp5100_tco_pci)
+		return 0;
+
+	/* Request the IO ports used by this driver */
+	pm_iobase = SP5100_IO_PM_INDEX_REG;
+	if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, "SP5100 TCO")) {
+		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+			pm_iobase);
+		goto exit;
+	}
+
+	/* Find the watchdog base address. */
+	outb(SP5100_PM_WATCHDOG_BASE3, SP5100_IO_PM_INDEX_REG);
+	val = inb(SP5100_IO_PM_DATA_REG);
+	outb(SP5100_PM_WATCHDOG_BASE2, SP5100_IO_PM_INDEX_REG);
+	val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
+	outb(SP5100_PM_WATCHDOG_BASE1, SP5100_IO_PM_INDEX_REG);
+	val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
+	outb(SP5100_PM_WATCHDOG_BASE0, SP5100_IO_PM_INDEX_REG);
+	/* Low three bits of BASE0 are reserved. */
+	val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8);
+
+	tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE);
+	if (tcobase == 0) {
+		printk(KERN_ERR PFX "failed to get tcobase address\n");
+		goto unreg_region;
+	}
+
+	/* Enable watchdog decode bit */
+	pci_read_config_dword(sp5100_tco_pci,
+			      SP5100_PCI_WATCHDOG_MISC_REG,
+			      &val);
+
+	val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+
+	pci_write_config_dword(sp5100_tco_pci,
+			       SP5100_PCI_WATCHDOG_MISC_REG,
+			       val);
+
+	/* Enable Watchdog timer and set the resolution to 1 sec. */
+	outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
+	val = inb(SP5100_IO_PM_DATA_REG);
+	val |= SP5100_PM_WATCHDOG_SECOND_RES;
+	val &= ~SP5100_PM_WATCHDOG_DISABLE;
+	outb(val, SP5100_IO_PM_DATA_REG);
+
+	/* Check that the watchdog action is set to reset the system. */
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val &= ~SP5100_PM_WATCHDOG_ACTION_RESET;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+
+	/* Set a reasonable heartbeat before we stop the timer */
+	tco_timer_set_heartbeat(heartbeat);
+
+	/*
+	 * Stop the TCO before we change anything so we don't race with
+	 * a zeroed timer.
+	 */
+	tco_timer_stop();
+
+	/* Done */
+	return 1;
+
+	iounmap(tcobase);
+unreg_region:
+	release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+exit:
+	return 0;
+}
+
+static int __devinit sp5100_tco_init(struct platform_device *dev)
+{
+	int ret;
+	u32 val;
+
+	/* Check whether or not the hardware watchdog is there. If found, then
+	 * set it up.
+	 */
+	if (!sp5100_tco_setupdevice())
+		return -ENODEV;
+
+	/* Check to see if last reboot was due to watchdog timeout */
+	printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n",
+	       readl(SP5100_WDT_CONTROL(tcobase)) & SP5100_PM_WATCHDOG_FIRED ?
+		      "" : "not ");
+
+	/* Clear out the old status */
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val &= ~SP5100_PM_WATCHDOG_FIRED;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+
+	/*
+	 * Check that the heartbeat value is within it's range.
+	 * If not, reset to the default.
+	 */
+	if (tco_timer_set_heartbeat(heartbeat)) {
+		heartbeat = WATCHDOG_HEARTBEAT;
+		tco_timer_set_heartbeat(heartbeat);
+	}
+
+	ret = misc_register(&sp5100_tco_miscdev);
+	if (ret != 0) {
+		printk(KERN_ERR PFX "cannot register miscdev on minor="
+		       "%d (err=%d)\n",
+		       WATCHDOG_MINOR, ret);
+		goto exit;
+	}
+
+	clear_bit(0, &timer_alive);
+
+	printk(KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec"
+		" (nowayout=%d)\n",
+		tcobase, heartbeat, nowayout);
+
+	return 0;
+
+exit:
+	iounmap(tcobase);
+	release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+	return ret;
+}
+
+static void __devexit sp5100_tco_cleanup(void)
+{
+	/* Stop the timer before we leave */
+	if (!nowayout)
+		tco_timer_stop();
+
+	/* Deregister */
+	misc_deregister(&sp5100_tco_miscdev);
+	iounmap(tcobase);
+	release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+}
+
+static int __devexit sp5100_tco_remove(struct platform_device *dev)
+{
+	if (tcobase)
+		sp5100_tco_cleanup();
+	return 0;
+}
+
+static void sp5100_tco_shutdown(struct platform_device *dev)
+{
+	tco_timer_stop();
+}
+
+static struct platform_driver sp5100_tco_driver = {
+	.probe		= sp5100_tco_init,
+	.remove		= __devexit_p(sp5100_tco_remove),
+	.shutdown	= sp5100_tco_shutdown,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= TCO_MODULE_NAME,
+	},
+};
+
+static int __init sp5100_tco_init_module(void)
+{
+	int err;
+
+	printk(KERN_INFO PFX "SP5100 TCO WatchDog Timer Driver v%s\n",
+	       TCO_VERSION);
+
+	err = platform_driver_register(&sp5100_tco_driver);
+	if (err)
+		return err;
+
+	sp5100_tco_platform_device = platform_device_register_simple(
+					TCO_MODULE_NAME, -1, NULL, 0);
+	if (IS_ERR(sp5100_tco_platform_device)) {
+		err = PTR_ERR(sp5100_tco_platform_device);
+		goto unreg_platform_driver;
+	}
+
+	return 0;
+
+unreg_platform_driver:
+	platform_driver_unregister(&sp5100_tco_driver);
+	return err;
+}
+
+static void __exit sp5100_tco_cleanup_module(void)
+{
+	platform_device_unregister(sp5100_tco_platform_device);
+	platform_driver_unregister(&sp5100_tco_driver);
+	printk(KERN_INFO PFX "SP5100 TCO Watchdog Module Unloaded.\n");
+}
+
+module_init(sp5100_tco_init_module);
+module_exit(sp5100_tco_cleanup_module);
+
+MODULE_AUTHOR("Priyanka Gupta");
+MODULE_DESCRIPTION("TCO timer driver for SP5100 chipset");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
new file mode 100644
index 0000000..a5a16cc
--- /dev/null
+++ b/drivers/watchdog/sp5100_tco.h
@@ -0,0 +1,41 @@
+/*
+ *	sp5100_tco:	TCO timer driver for sp5100 chipsets.
+ *
+ *	(c) Copyright 2009 Google Inc., All Rights Reserved.
+ *
+ *	TCO timer driver for sp5100 chipsets
+ */
+
+/*
+ * Some address definitions for the Watchdog
+ */
+
+#define SP5100_WDT_MEM_MAP_SIZE		0x08
+#define SP5100_WDT_CONTROL(base)	((base) + 0x00) /* Watchdog Control */
+#define SP5100_WDT_COUNT(base)		((base) + 0x04) /* Watchdog Count */
+
+#define SP5100_WDT_START_STOP_BIT	1
+#define SP5100_WDT_TRIGGER_BIT		(1 << 7)
+
+#define SP5100_PCI_WATCHDOG_MISC_REG	0x41
+#define SP5100_PCI_WATCHDOG_DECODE_EN	(1 << 3)
+
+#define SP5100_PM_IOPORTS_SIZE		0x02
+
+/* These two IO registers are hardcoded and there doesn't seem to be a way to
+ * read them from a register.
+ */
+#define SP5100_IO_PM_INDEX_REG		0xCD6
+#define SP5100_IO_PM_DATA_REG		0xCD7
+
+#define SP5100_PM_WATCHDOG_CONTROL	0x69
+#define SP5100_PM_WATCHDOG_BASE0	0x6C
+#define SP5100_PM_WATCHDOG_BASE1	0x6D
+#define SP5100_PM_WATCHDOG_BASE2	0x6E
+#define SP5100_PM_WATCHDOG_BASE3	0x6F
+
+#define SP5100_PM_WATCHDOG_FIRED	(1 << 1)
+#define SP5100_PM_WATCHDOG_ACTION_RESET	(1 << 2)
+
+#define SP5100_PM_WATCHDOG_DISABLE	1
+#define SP5100_PM_WATCHDOG_SECOND_RES	(3 << 1)
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 0f5288d..e5c91d4 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -42,7 +42,7 @@
 
 #include <asm/system.h>
 
-#define WATCHDOG_NAME "w83627hf/thf/hg WDT"
+#define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
 #define PFX WATCHDOG_NAME ": "
 #define WATCHDOG_TIMEOUT 60		/* 60 sec default timeout */
 
@@ -89,7 +89,7 @@
 		c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */
 		outb_p(0x2b, WDT_EFER);
 		outb_p(c, WDT_EFDR);	/* set GPIO3 to WDT0 */
-	} else if (c == 0x88) {	/* W83627EHF */
+	} else if (c == 0x88 || c == 0xa0) {	/* W83627EHF / W83627DHG */
 		outb_p(0x2d, WDT_EFER); /* select GPIO5 */
 		c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */
 		outb_p(0x2d, WDT_EFER);
@@ -129,6 +129,8 @@
 	t = inb_p(WDT_EFDR);      /* read CRF5 */
 	t &= ~0x0C;               /* set second mode & disable keyboard
 				    turning off watchdog */
+	t |= 0x02;		  /* enable the WDTO# output low pulse
+				    to the KBRST# pin (PIN60) */
 	outb_p(t, WDT_EFDR);    /* Write back to CRF5 */
 
 	outb_p(0xF7, WDT_EFER); /* Select CRF7 */
@@ -321,7 +323,7 @@
 {
 	int ret;
 
-	printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG Super I/O chip initialising.\n");
+	printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising.\n");
 
 	if (wdt_set_heartbeat(timeout)) {
 		wdt_set_heartbeat(WATCHDOG_TIMEOUT);
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index a6c12de..df2a64d 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -109,7 +109,7 @@
 	outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */
 	outb_p(0x30, WDT_EFER); /* select CR30 */
 	c = inb_p(WDT_EFDR);
-	outb_p(c || 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
+	outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
 
 	return 0;
 }
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 6e6180c..07bec09 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -29,6 +29,14 @@
 	  firing.
 	  If in doubt, say yes.
 
+config XEN_BACKEND
+	bool "Backend driver support"
+	depends on XEN_DOM0
+	default y
+	help
+	  Support for backend device drivers that provide I/O services
+	  to other virtual machines.
+
 config XENFS
 	tristate "Xen filesystem"
 	default y
@@ -62,9 +70,19 @@
 	 virtual environment, /sys/hypervisor will still be present,
 	 but will have no xen contents.
 
+config XEN_XENBUS_FRONTEND
+	tristate
+
+config XEN_GNTDEV
+	tristate "userspace grant access device driver"
+	depends on XEN
+	select MMU_NOTIFIER
+	help
+	  Allows userspace processes to use grants.
+
 config XEN_PLATFORM_PCI
 	tristate "xen platform pci device driver"
-	depends on XEN_PVHVM
+	depends on XEN_PVHVM && PCI
 	default m
 	help
 	  Driver for the Xen PCI Platform device: it is responsible for
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 533a199..5088cc2 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -9,11 +9,14 @@
 obj-$(CONFIG_XEN_XENCOMM)	+= xencomm.o
 obj-$(CONFIG_XEN_BALLOON)	+= balloon.o
 obj-$(CONFIG_XEN_DEV_EVTCHN)	+= xen-evtchn.o
+obj-$(CONFIG_XEN_GNTDEV)	+= xen-gntdev.o
 obj-$(CONFIG_XENFS)		+= xenfs/
 obj-$(CONFIG_XEN_SYS_HYPERVISOR)	+= sys-hypervisor.o
-obj-$(CONFIG_XEN_PLATFORM_PCI)	+= platform-pci.o
+obj-$(CONFIG_XEN_PLATFORM_PCI)	+= xen-platform-pci.o
 obj-$(CONFIG_SWIOTLB_XEN)	+= swiotlb-xen.o
 obj-$(CONFIG_XEN_DOM0)		+= pci.o
 
 xen-evtchn-y			:= evtchn.o
+xen-gntdev-y				:= gntdev.o
 
+xen-platform-pci-y		:= platform-pci.o
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
new file mode 100644
index 0000000..1e31cdc
--- /dev/null
+++ b/drivers/xen/gntdev.c
@@ -0,0 +1,665 @@
+/******************************************************************************
+ * gntdev.c
+ *
+ * Device for accessing (in user-space) pages that have been granted by other
+ * domains.
+ *
+ * Copyright (c) 2006-2007, D G Murray.
+ *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/mmu_notifier.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include <xen/xen.h>
+#include <xen/grant_table.h>
+#include <xen/gntdev.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/page.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
+	      "Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_DESCRIPTION("User-space granted page access driver");
+
+static int limit = 1024;
+module_param(limit, int, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at "
+		"once by a gntdev instance");
+
+struct gntdev_priv {
+	struct list_head maps;
+	uint32_t used;
+	uint32_t limit;
+	/* lock protects maps from concurrent changes */
+	spinlock_t lock;
+	struct mm_struct *mm;
+	struct mmu_notifier mn;
+};
+
+struct grant_map {
+	struct list_head next;
+	struct gntdev_priv *priv;
+	struct vm_area_struct *vma;
+	int index;
+	int count;
+	int flags;
+	int is_mapped;
+	struct ioctl_gntdev_grant_ref *grants;
+	struct gnttab_map_grant_ref   *map_ops;
+	struct gnttab_unmap_grant_ref *unmap_ops;
+	struct page **pages;
+};
+
+/* ------------------------------------------------------------------ */
+
+static void gntdev_print_maps(struct gntdev_priv *priv,
+			      char *text, int text_index)
+{
+#ifdef DEBUG
+	struct grant_map *map;
+
+	pr_debug("maps list (priv %p, usage %d/%d)\n",
+	       priv, priv->used, priv->limit);
+
+	list_for_each_entry(map, &priv->maps, next)
+		pr_debug("  index %2d, count %2d %s\n",
+		       map->index, map->count,
+		       map->index == text_index && text ? text : "");
+#endif
+}
+
+static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+{
+	struct grant_map *add;
+	int i;
+
+	add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
+	if (NULL == add)
+		return NULL;
+
+	add->grants    = kzalloc(sizeof(add->grants[0])    * count, GFP_KERNEL);
+	add->map_ops   = kzalloc(sizeof(add->map_ops[0])   * count, GFP_KERNEL);
+	add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
+	add->pages     = kzalloc(sizeof(add->pages[0])     * count, GFP_KERNEL);
+	if (NULL == add->grants    ||
+	    NULL == add->map_ops   ||
+	    NULL == add->unmap_ops ||
+	    NULL == add->pages)
+		goto err;
+
+	for (i = 0; i < count; i++) {
+		add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+		if (add->pages[i] == NULL)
+			goto err;
+	}
+
+	add->index = 0;
+	add->count = count;
+	add->priv  = priv;
+
+	if (add->count + priv->used > priv->limit)
+		goto err;
+
+	return add;
+
+err:
+	if (add->pages)
+		for (i = 0; i < count; i++) {
+			if (add->pages[i])
+				__free_page(add->pages[i]);
+		}
+	kfree(add->pages);
+	kfree(add->grants);
+	kfree(add->map_ops);
+	kfree(add->unmap_ops);
+	kfree(add);
+	return NULL;
+}
+
+static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
+{
+	struct grant_map *map;
+
+	list_for_each_entry(map, &priv->maps, next) {
+		if (add->index + add->count < map->index) {
+			list_add_tail(&add->next, &map->next);
+			goto done;
+		}
+		add->index = map->index + map->count;
+	}
+	list_add_tail(&add->next, &priv->maps);
+
+done:
+	priv->used += add->count;
+	gntdev_print_maps(priv, "[new]", add->index);
+}
+
+static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
+		int index, int count)
+{
+	struct grant_map *map;
+
+	list_for_each_entry(map, &priv->maps, next) {
+		if (map->index != index)
+			continue;
+		if (map->count != count)
+			continue;
+		return map;
+	}
+	return NULL;
+}
+
+static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv,
+					       unsigned long vaddr)
+{
+	struct grant_map *map;
+
+	list_for_each_entry(map, &priv->maps, next) {
+		if (!map->vma)
+			continue;
+		if (vaddr < map->vma->vm_start)
+			continue;
+		if (vaddr >= map->vma->vm_end)
+			continue;
+		return map;
+	}
+	return NULL;
+}
+
+static int gntdev_del_map(struct grant_map *map)
+{
+	int i;
+
+	if (map->vma)
+		return -EBUSY;
+	for (i = 0; i < map->count; i++)
+		if (map->unmap_ops[i].handle)
+			return -EBUSY;
+
+	map->priv->used -= map->count;
+	list_del(&map->next);
+	return 0;
+}
+
+static void gntdev_free_map(struct grant_map *map)
+{
+	int i;
+
+	if (!map)
+		return;
+
+	if (map->pages)
+		for (i = 0; i < map->count; i++) {
+			if (map->pages[i])
+				__free_page(map->pages[i]);
+		}
+	kfree(map->pages);
+	kfree(map->grants);
+	kfree(map->map_ops);
+	kfree(map->unmap_ops);
+	kfree(map);
+}
+
+/* ------------------------------------------------------------------ */
+
+static int find_grant_ptes(pte_t *pte, pgtable_t token,
+		unsigned long addr, void *data)
+{
+	struct grant_map *map = data;
+	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+	u64 pte_maddr;
+
+	BUG_ON(pgnr >= map->count);
+	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
+
+	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr,
+			  GNTMAP_contains_pte | map->flags,
+			  map->grants[pgnr].ref,
+			  map->grants[pgnr].domid);
+	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr,
+			    GNTMAP_contains_pte | map->flags,
+			    0 /* handle */);
+	return 0;
+}
+
+static int map_grant_pages(struct grant_map *map)
+{
+	int i, err = 0;
+
+	pr_debug("map %d+%d\n", map->index, map->count);
+	err = gnttab_map_refs(map->map_ops, map->pages, map->count);
+	if (err)
+		return err;
+
+	for (i = 0; i < map->count; i++) {
+		if (map->map_ops[i].status)
+			err = -EINVAL;
+		map->unmap_ops[i].handle = map->map_ops[i].handle;
+	}
+	return err;
+}
+
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+{
+	int i, err = 0;
+
+	pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
+	err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages);
+	if (err)
+		return err;
+
+	for (i = 0; i < pages; i++) {
+		if (map->unmap_ops[offset+i].status)
+			err = -EINVAL;
+		map->unmap_ops[offset+i].handle = 0;
+	}
+	return err;
+}
+
+/* ------------------------------------------------------------------ */
+
+static void gntdev_vma_close(struct vm_area_struct *vma)
+{
+	struct grant_map *map = vma->vm_private_data;
+
+	pr_debug("close %p\n", vma);
+	map->is_mapped = 0;
+	map->vma = NULL;
+	vma->vm_private_data = NULL;
+}
+
+static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n",
+			vmf->virtual_address, vmf->pgoff);
+	vmf->flags = VM_FAULT_ERROR;
+	return 0;
+}
+
+static struct vm_operations_struct gntdev_vmops = {
+	.close = gntdev_vma_close,
+	.fault = gntdev_vma_fault,
+};
+
+/* ------------------------------------------------------------------ */
+
+static void mn_invl_range_start(struct mmu_notifier *mn,
+				struct mm_struct *mm,
+				unsigned long start, unsigned long end)
+{
+	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
+	struct grant_map *map;
+	unsigned long mstart, mend;
+	int err;
+
+	spin_lock(&priv->lock);
+	list_for_each_entry(map, &priv->maps, next) {
+		if (!map->vma)
+			continue;
+		if (!map->is_mapped)
+			continue;
+		if (map->vma->vm_start >= end)
+			continue;
+		if (map->vma->vm_end <= start)
+			continue;
+		mstart = max(start, map->vma->vm_start);
+		mend   = min(end,   map->vma->vm_end);
+		pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
+				map->index, map->count,
+				map->vma->vm_start, map->vma->vm_end,
+				start, end, mstart, mend);
+		err = unmap_grant_pages(map,
+					(mstart - map->vma->vm_start) >> PAGE_SHIFT,
+					(mend - mstart) >> PAGE_SHIFT);
+		WARN_ON(err);
+	}
+	spin_unlock(&priv->lock);
+}
+
+static void mn_invl_page(struct mmu_notifier *mn,
+			 struct mm_struct *mm,
+			 unsigned long address)
+{
+	mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
+}
+
+static void mn_release(struct mmu_notifier *mn,
+		       struct mm_struct *mm)
+{
+	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
+	struct grant_map *map;
+	int err;
+
+	spin_lock(&priv->lock);
+	list_for_each_entry(map, &priv->maps, next) {
+		if (!map->vma)
+			continue;
+		pr_debug("map %d+%d (%lx %lx)\n",
+				map->index, map->count,
+				map->vma->vm_start, map->vma->vm_end);
+		err = unmap_grant_pages(map, /* offset */ 0, map->count);
+		WARN_ON(err);
+	}
+	spin_unlock(&priv->lock);
+}
+
+struct mmu_notifier_ops gntdev_mmu_ops = {
+	.release                = mn_release,
+	.invalidate_page        = mn_invl_page,
+	.invalidate_range_start = mn_invl_range_start,
+};
+
+/* ------------------------------------------------------------------ */
+
+static int gntdev_open(struct inode *inode, struct file *flip)
+{
+	struct gntdev_priv *priv;
+	int ret = 0;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&priv->maps);
+	spin_lock_init(&priv->lock);
+	priv->limit = limit;
+
+	priv->mm = get_task_mm(current);
+	if (!priv->mm) {
+		kfree(priv);
+		return -ENOMEM;
+	}
+	priv->mn.ops = &gntdev_mmu_ops;
+	ret = mmu_notifier_register(&priv->mn, priv->mm);
+	mmput(priv->mm);
+
+	if (ret) {
+		kfree(priv);
+		return ret;
+	}
+
+	flip->private_data = priv;
+	pr_debug("priv %p\n", priv);
+
+	return 0;
+}
+
+static int gntdev_release(struct inode *inode, struct file *flip)
+{
+	struct gntdev_priv *priv = flip->private_data;
+	struct grant_map *map;
+	int err;
+
+	pr_debug("priv %p\n", priv);
+
+	spin_lock(&priv->lock);
+	while (!list_empty(&priv->maps)) {
+		map = list_entry(priv->maps.next, struct grant_map, next);
+		err = gntdev_del_map(map);
+		if (WARN_ON(err))
+			gntdev_free_map(map);
+
+	}
+	spin_unlock(&priv->lock);
+
+	mmu_notifier_unregister(&priv->mn, priv->mm);
+	kfree(priv);
+	return 0;
+}
+
+static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
+				       struct ioctl_gntdev_map_grant_ref __user *u)
+{
+	struct ioctl_gntdev_map_grant_ref op;
+	struct grant_map *map;
+	int err;
+
+	if (copy_from_user(&op, u, sizeof(op)) != 0)
+		return -EFAULT;
+	pr_debug("priv %p, add %d\n", priv, op.count);
+	if (unlikely(op.count <= 0))
+		return -EINVAL;
+	if (unlikely(op.count > priv->limit))
+		return -EINVAL;
+
+	err = -ENOMEM;
+	map = gntdev_alloc_map(priv, op.count);
+	if (!map)
+		return err;
+	if (copy_from_user(map->grants, &u->refs,
+			   sizeof(map->grants[0]) * op.count) != 0) {
+		gntdev_free_map(map);
+		return err;
+	}
+
+	spin_lock(&priv->lock);
+	gntdev_add_map(priv, map);
+	op.index = map->index << PAGE_SHIFT;
+	spin_unlock(&priv->lock);
+
+	if (copy_to_user(u, &op, sizeof(op)) != 0) {
+		spin_lock(&priv->lock);
+		gntdev_del_map(map);
+		spin_unlock(&priv->lock);
+		gntdev_free_map(map);
+		return err;
+	}
+	return 0;
+}
+
+static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
+					 struct ioctl_gntdev_unmap_grant_ref __user *u)
+{
+	struct ioctl_gntdev_unmap_grant_ref op;
+	struct grant_map *map;
+	int err = -ENOENT;
+
+	if (copy_from_user(&op, u, sizeof(op)) != 0)
+		return -EFAULT;
+	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
+
+	spin_lock(&priv->lock);
+	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
+	if (map)
+		err = gntdev_del_map(map);
+	spin_unlock(&priv->lock);
+	if (!err)
+		gntdev_free_map(map);
+	return err;
+}
+
+static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
+					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
+{
+	struct ioctl_gntdev_get_offset_for_vaddr op;
+	struct grant_map *map;
+
+	if (copy_from_user(&op, u, sizeof(op)) != 0)
+		return -EFAULT;
+	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
+
+	spin_lock(&priv->lock);
+	map = gntdev_find_map_vaddr(priv, op.vaddr);
+	if (map == NULL ||
+	    map->vma->vm_start != op.vaddr) {
+		spin_unlock(&priv->lock);
+		return -EINVAL;
+	}
+	op.offset = map->index << PAGE_SHIFT;
+	op.count = map->count;
+	spin_unlock(&priv->lock);
+
+	if (copy_to_user(u, &op, sizeof(op)) != 0)
+		return -EFAULT;
+	return 0;
+}
+
+static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
+					struct ioctl_gntdev_set_max_grants __user *u)
+{
+	struct ioctl_gntdev_set_max_grants op;
+
+	if (copy_from_user(&op, u, sizeof(op)) != 0)
+		return -EFAULT;
+	pr_debug("priv %p, limit %d\n", priv, op.count);
+	if (op.count > limit)
+		return -E2BIG;
+
+	spin_lock(&priv->lock);
+	priv->limit = op.count;
+	spin_unlock(&priv->lock);
+	return 0;
+}
+
+static long gntdev_ioctl(struct file *flip,
+			 unsigned int cmd, unsigned long arg)
+{
+	struct gntdev_priv *priv = flip->private_data;
+	void __user *ptr = (void __user *)arg;
+
+	switch (cmd) {
+	case IOCTL_GNTDEV_MAP_GRANT_REF:
+		return gntdev_ioctl_map_grant_ref(priv, ptr);
+
+	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
+		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
+
+	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
+		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
+
+	case IOCTL_GNTDEV_SET_MAX_GRANTS:
+		return gntdev_ioctl_set_max_grants(priv, ptr);
+
+	default:
+		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
+		return -ENOIOCTLCMD;
+	}
+
+	return 0;
+}
+
+static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+{
+	struct gntdev_priv *priv = flip->private_data;
+	int index = vma->vm_pgoff;
+	int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	struct grant_map *map;
+	int err = -EINVAL;
+
+	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
+		return -EINVAL;
+
+	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
+			index, count, vma->vm_start, vma->vm_pgoff);
+
+	spin_lock(&priv->lock);
+	map = gntdev_find_map_index(priv, index, count);
+	if (!map)
+		goto unlock_out;
+	if (map->vma)
+		goto unlock_out;
+	if (priv->mm != vma->vm_mm) {
+		printk(KERN_WARNING "Huh? Other mm?\n");
+		goto unlock_out;
+	}
+
+	vma->vm_ops = &gntdev_vmops;
+
+	vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
+
+	vma->vm_private_data = map;
+	map->vma = vma;
+
+	map->flags = GNTMAP_host_map | GNTMAP_application_map;
+	if (!(vma->vm_flags & VM_WRITE))
+		map->flags |= GNTMAP_readonly;
+
+	spin_unlock(&priv->lock);
+
+	err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+				  vma->vm_end - vma->vm_start,
+				  find_grant_ptes, map);
+	if (err) {
+		printk(KERN_WARNING "find_grant_ptes() failure.\n");
+		return err;
+	}
+
+	err = map_grant_pages(map);
+	if (err) {
+		printk(KERN_WARNING "map_grant_pages() failure.\n");
+		return err;
+	}
+
+	map->is_mapped = 1;
+
+	return 0;
+
+unlock_out:
+	spin_unlock(&priv->lock);
+	return err;
+}
+
+static const struct file_operations gntdev_fops = {
+	.owner = THIS_MODULE,
+	.open = gntdev_open,
+	.release = gntdev_release,
+	.mmap = gntdev_mmap,
+	.unlocked_ioctl = gntdev_ioctl
+};
+
+static struct miscdevice gntdev_miscdev = {
+	.minor        = MISC_DYNAMIC_MINOR,
+	.name         = "xen/gntdev",
+	.fops         = &gntdev_fops,
+};
+
+/* ------------------------------------------------------------------ */
+
+static int __init gntdev_init(void)
+{
+	int err;
+
+	if (!xen_domain())
+		return -ENODEV;
+
+	err = misc_register(&gntdev_miscdev);
+	if (err != 0) {
+		printk(KERN_ERR "Could not register gntdev device\n");
+		return err;
+	}
+	return 0;
+}
+
+static void __exit gntdev_exit(void)
+{
+	misc_deregister(&gntdev_miscdev);
+}
+
+module_init(gntdev_init);
+module_exit(gntdev_exit);
+
+/* ------------------------------------------------------------------ */
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 6c45318..9ef54eb 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -447,6 +447,52 @@
 }
 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 
+int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+		    struct page **pages, unsigned int count)
+{
+	int i, ret;
+	pte_t *pte;
+	unsigned long mfn;
+
+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < count; i++) {
+		/* m2p override only supported for GNTMAP_contains_pte mappings */
+		if (!(map_ops[i].flags & GNTMAP_contains_pte))
+			continue;
+		pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
+				(map_ops[i].host_addr & ~PAGE_MASK));
+		mfn = pte_mfn(*pte);
+		ret = m2p_add_override(mfn, pages[i]);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_map_refs);
+
+int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+		struct page **pages, unsigned int count)
+{
+	int i, ret;
+
+	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < count; i++) {
+		ret = m2p_remove_override(pages[i]);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+
 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
 {
 	struct gnttab_setup_table setup;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index db8c4c4..2417727 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -37,11 +37,19 @@
 #ifdef CONFIG_PM_SLEEP
 static int xen_hvm_suspend(void *data)
 {
+	int err;
 	struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
 	int *cancelled = data;
 
 	BUG_ON(!irqs_disabled());
 
+	err = sysdev_suspend(PMSG_SUSPEND);
+	if (err) {
+		printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
+		       err);
+		return err;
+	}
+
 	*cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
 
 	xen_hvm_post_suspend(*cancelled);
@@ -53,6 +61,8 @@
 		xen_timer_resume();
 	}
 
+	sysdev_resume();
+
 	return 0;
 }
 
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index c01b5dd..afbe041 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -105,7 +105,7 @@
 				       const struct pci_device_id *ent)
 {
 	int i, ret;
-	long ioaddr, iolen;
+	long ioaddr;
 	long mmio_addr, mmio_len;
 	unsigned int max_nr_gframes;
 
@@ -114,7 +114,6 @@
 		return i;
 
 	ioaddr = pci_resource_start(pdev, 0);
-	iolen = pci_resource_len(pdev, 0);
 
 	mmio_addr = pci_resource_start(pdev, 1);
 	mmio_len = pci_resource_len(pdev, 1);
@@ -125,19 +124,13 @@
 		goto pci_out;
 	}
 
-	if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) {
-		dev_err(&pdev->dev, "MEM I/O resource 0x%lx @ 0x%lx busy\n",
-		       mmio_addr, mmio_len);
-		ret = -EBUSY;
+	ret = pci_request_region(pdev, 1, DRV_NAME);
+	if (ret < 0)
 		goto pci_out;
-	}
 
-	if (request_region(ioaddr, iolen, DRV_NAME) == NULL) {
-		dev_err(&pdev->dev, "I/O resource 0x%lx @ 0x%lx busy\n",
-		       iolen, ioaddr);
-		ret = -EBUSY;
+	ret = pci_request_region(pdev, 0, DRV_NAME);
+	if (ret < 0)
 		goto mem_out;
-	}
 
 	platform_mmio = mmio_addr;
 	platform_mmiolen = mmio_len;
@@ -169,9 +162,9 @@
 	return 0;
 
 out:
-	release_region(ioaddr, iolen);
+	pci_release_region(pdev, 0);
 mem_out:
-	release_mem_region(mmio_addr, mmio_len);
+	pci_release_region(pdev, 1);
 pci_out:
 	pci_disable_device(pdev);
 	return ret;
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 5571f5b..8dca685 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -5,3 +5,8 @@
 xenbus-objs += xenbus_comms.o
 xenbus-objs += xenbus_xs.o
 xenbus-objs += xenbus_probe.o
+
+xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
+xenbus-objs += $(xenbus-be-objs-y)
+
+obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index deb9c4b..baa65e7 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -56,7 +56,6 @@
 #include <xen/events.h>
 #include <xen/page.h>
 
-#include <xen/platform_pci.h>
 #include <xen/hvm.h>
 
 #include "xenbus_comms.h"
@@ -73,15 +72,6 @@
 
 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
 
-static void wait_for_devices(struct xenbus_driver *xendrv);
-
-static int xenbus_probe_frontend(const char *type, const char *name);
-
-static void xenbus_dev_shutdown(struct device *_dev);
-
-static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
-static int xenbus_dev_resume(struct device *dev);
-
 /* If something in array of ids matches this device, return it. */
 static const struct xenbus_device_id *
 match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@@ -102,34 +92,7 @@
 
 	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
 }
-
-static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
-{
-	struct xenbus_device *dev = to_xenbus_device(_dev);
-
-	if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
-		return -ENOMEM;
-
-	return 0;
-}
-
-/* device/<type>/<id> => <type>-<id> */
-static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
-{
-	nodename = strchr(nodename, '/');
-	if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
-		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
-		return -EINVAL;
-	}
-
-	strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
-	if (!strchr(bus_id, '/')) {
-		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
-		return -EINVAL;
-	}
-	*strchr(bus_id, '/') = '-';
-	return 0;
-}
+EXPORT_SYMBOL_GPL(xenbus_match);
 
 
 static void free_otherend_details(struct xenbus_device *dev)
@@ -149,7 +112,30 @@
 }
 
 
-int read_otherend_details(struct xenbus_device *xendev,
+static int talk_to_otherend(struct xenbus_device *dev)
+{
+	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+
+	free_otherend_watch(dev);
+	free_otherend_details(dev);
+
+	return drv->read_otherend_details(dev);
+}
+
+
+
+static int watch_otherend(struct xenbus_device *dev)
+{
+	struct xen_bus_type *bus =
+		container_of(dev->dev.bus, struct xen_bus_type, bus);
+
+	return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
+				    bus->otherend_changed,
+				    "%s/%s", dev->otherend, "state");
+}
+
+
+int xenbus_read_otherend_details(struct xenbus_device *xendev,
 				 char *id_node, char *path_node)
 {
 	int err = xenbus_gather(XBT_NIL, xendev->nodename,
@@ -174,39 +160,11 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
 
-
-static int read_backend_details(struct xenbus_device *xendev)
-{
-	return read_otherend_details(xendev, "backend-id", "backend");
-}
-
-static struct device_attribute xenbus_dev_attrs[] = {
-	__ATTR_NULL
-};
-
-/* Bus type for frontend drivers. */
-static struct xen_bus_type xenbus_frontend = {
-	.root = "device",
-	.levels = 2, 		/* device/type/<id> */
-	.get_bus_id = frontend_bus_id,
-	.probe = xenbus_probe_frontend,
-	.bus = {
-		.name      = "xen",
-		.match     = xenbus_match,
-		.uevent    = xenbus_uevent,
-		.probe     = xenbus_dev_probe,
-		.remove    = xenbus_dev_remove,
-		.shutdown  = xenbus_dev_shutdown,
-		.dev_attrs = xenbus_dev_attrs,
-
-		.suspend   = xenbus_dev_suspend,
-		.resume    = xenbus_dev_resume,
-	},
-};
-
-static void otherend_changed(struct xenbus_watch *watch,
-			     const char **vec, unsigned int len)
+void xenbus_otherend_changed(struct xenbus_watch *watch,
+			     const char **vec, unsigned int len,
+			     int ignore_on_shutdown)
 {
 	struct xenbus_device *dev =
 		container_of(watch, struct xenbus_device, otherend_watch);
@@ -234,11 +192,7 @@
 	 * work that can fail e.g., when the rootfs is gone.
 	 */
 	if (system_state > SYSTEM_RUNNING) {
-		struct xen_bus_type *bus = bus;
-		bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
-		/* If we're frontend, drive the state machine to Closed. */
-		/* This should cause the backend to release our resources. */
-		if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
+		if (ignore_on_shutdown && (state == XenbusStateClosing))
 			xenbus_frontend_closed(dev);
 		return;
 	}
@@ -246,25 +200,7 @@
 	if (drv->otherend_changed)
 		drv->otherend_changed(dev, state);
 }
-
-
-static int talk_to_otherend(struct xenbus_device *dev)
-{
-	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-
-	free_otherend_watch(dev);
-	free_otherend_details(dev);
-
-	return drv->read_otherend_details(dev);
-}
-
-
-static int watch_otherend(struct xenbus_device *dev)
-{
-	return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
-				    "%s/%s", dev->otherend, "state");
-}
-
+EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
 
 int xenbus_dev_probe(struct device *_dev)
 {
@@ -308,8 +244,9 @@
 fail:
 	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
 	xenbus_switch_state(dev, XenbusStateClosed);
-	return -ENODEV;
+	return err;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_probe);
 
 int xenbus_dev_remove(struct device *_dev)
 {
@@ -327,8 +264,9 @@
 	xenbus_switch_state(dev, XenbusStateClosed);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_remove);
 
-static void xenbus_dev_shutdown(struct device *_dev)
+void xenbus_dev_shutdown(struct device *_dev)
 {
 	struct xenbus_device *dev = to_xenbus_device(_dev);
 	unsigned long timeout = 5*HZ;
@@ -349,6 +287,7 @@
  out:
 	put_device(&dev->dev);
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
 
 int xenbus_register_driver_common(struct xenbus_driver *drv,
 				  struct xen_bus_type *bus,
@@ -362,25 +301,7 @@
 
 	return driver_register(&drv->driver);
 }
-
-int __xenbus_register_frontend(struct xenbus_driver *drv,
-			       struct module *owner, const char *mod_name)
-{
-	int ret;
-
-	drv->read_otherend_details = read_backend_details;
-
-	ret = xenbus_register_driver_common(drv, &xenbus_frontend,
-					    owner, mod_name);
-	if (ret)
-		return ret;
-
-	/* If this driver is loaded as a module wait for devices to attach. */
-	wait_for_devices(drv);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
 
 void xenbus_unregister_driver(struct xenbus_driver *drv)
 {
@@ -551,24 +472,7 @@
 	kfree(xendev);
 	return err;
 }
-
-/* device/<typename>/<name> */
-static int xenbus_probe_frontend(const char *type, const char *name)
-{
-	char *nodename;
-	int err;
-
-	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
-			     xenbus_frontend.root, type, name);
-	if (!nodename)
-		return -ENOMEM;
-
-	DPRINTK("%s", nodename);
-
-	err = xenbus_probe_node(&xenbus_frontend, type, nodename);
-	kfree(nodename);
-	return err;
-}
+EXPORT_SYMBOL_GPL(xenbus_probe_node);
 
 static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
 {
@@ -582,10 +486,11 @@
 		return PTR_ERR(dir);
 
 	for (i = 0; i < dir_n; i++) {
-		err = bus->probe(type, dir[i]);
+		err = bus->probe(bus, type, dir[i]);
 		if (err)
 			break;
 	}
+
 	kfree(dir);
 	return err;
 }
@@ -605,9 +510,11 @@
 		if (err)
 			break;
 	}
+
 	kfree(dir);
 	return err;
 }
+EXPORT_SYMBOL_GPL(xenbus_probe_devices);
 
 static unsigned int char_count(const char *str, char c)
 {
@@ -670,32 +577,18 @@
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_changed);
 
-static void frontend_changed(struct xenbus_watch *watch,
-			     const char **vec, unsigned int len)
-{
-	DPRINTK("");
-
-	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
-}
-
-/* We watch for devices appearing and vanishing. */
-static struct xenbus_watch fe_watch = {
-	.node = "device",
-	.callback = frontend_changed,
-};
-
-static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
+int xenbus_dev_suspend(struct device *dev, pm_message_t state)
 {
 	int err = 0;
 	struct xenbus_driver *drv;
-	struct xenbus_device *xdev;
+	struct xenbus_device *xdev
+		= container_of(dev, struct xenbus_device, dev);
 
-	DPRINTK("");
+	DPRINTK("%s", xdev->nodename);
 
 	if (dev->driver == NULL)
 		return 0;
 	drv = to_xenbus_driver(dev->driver);
-	xdev = container_of(dev, struct xenbus_device, dev);
 	if (drv->suspend)
 		err = drv->suspend(xdev, state);
 	if (err)
@@ -703,21 +596,20 @@
 		       "xenbus: suspend %s failed: %i\n", dev_name(dev), err);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
 
-static int xenbus_dev_resume(struct device *dev)
+int xenbus_dev_resume(struct device *dev)
 {
 	int err;
 	struct xenbus_driver *drv;
-	struct xenbus_device *xdev;
+	struct xenbus_device *xdev
+		= container_of(dev, struct xenbus_device, dev);
 
-	DPRINTK("");
+	DPRINTK("%s", xdev->nodename);
 
 	if (dev->driver == NULL)
 		return 0;
-
 	drv = to_xenbus_driver(dev->driver);
-	xdev = container_of(dev, struct xenbus_device, dev);
-
 	err = talk_to_otherend(xdev);
 	if (err) {
 		printk(KERN_WARNING
@@ -748,6 +640,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_resume);
 
 /* A flag to determine if xenstored is 'ready' (i.e. has started) */
 int xenstored_ready = 0;
@@ -776,11 +669,6 @@
 {
 	xenstored_ready = 1;
 
-	/* Enumerate devices in xenstore and watch for changes. */
-	xenbus_probe_devices(&xenbus_frontend);
-	register_xenbus_watch(&fe_watch);
-	xenbus_backend_probe_and_watch();
-
 	/* Notify others that xenstore is up */
 	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
 }
@@ -809,16 +697,7 @@
 
 	err = -ENODEV;
 	if (!xen_domain())
-		goto out_error;
-
-	/* Register ourselves with the kernel bus subsystem */
-	err = bus_register(&xenbus_frontend.bus);
-	if (err)
-		goto out_error;
-
-	err = xenbus_backend_bus_register();
-	if (err)
-		goto out_unreg_front;
+		return err;
 
 	/*
 	 * Domain0 doesn't have a store_evtchn or store_mfn yet.
@@ -874,7 +753,7 @@
 	if (err) {
 		printk(KERN_WARNING
 		       "XENBUS: Error initializing xenstore comms: %i\n", err);
-		goto out_unreg_back;
+		goto out_error;
 	}
 
 #ifdef CONFIG_XEN_COMPAT_XENFS
@@ -887,133 +766,13 @@
 
 	return 0;
 
-  out_unreg_back:
-	xenbus_backend_bus_unregister();
-
-  out_unreg_front:
-	bus_unregister(&xenbus_frontend.bus);
-
   out_error:
 	if (page != 0)
 		free_page(page);
+
 	return err;
 }
 
 postcore_initcall(xenbus_init);
 
 MODULE_LICENSE("GPL");
-
-static int is_device_connecting(struct device *dev, void *data)
-{
-	struct xenbus_device *xendev = to_xenbus_device(dev);
-	struct device_driver *drv = data;
-	struct xenbus_driver *xendrv;
-
-	/*
-	 * A device with no driver will never connect. We care only about
-	 * devices which should currently be in the process of connecting.
-	 */
-	if (!dev->driver)
-		return 0;
-
-	/* Is this search limited to a particular driver? */
-	if (drv && (dev->driver != drv))
-		return 0;
-
-	xendrv = to_xenbus_driver(dev->driver);
-	return (xendev->state < XenbusStateConnected ||
-		(xendev->state == XenbusStateConnected &&
-		 xendrv->is_ready && !xendrv->is_ready(xendev)));
-}
-
-static int exists_connecting_device(struct device_driver *drv)
-{
-	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-				is_device_connecting);
-}
-
-static int print_device_status(struct device *dev, void *data)
-{
-	struct xenbus_device *xendev = to_xenbus_device(dev);
-	struct device_driver *drv = data;
-
-	/* Is this operation limited to a particular driver? */
-	if (drv && (dev->driver != drv))
-		return 0;
-
-	if (!dev->driver) {
-		/* Information only: is this too noisy? */
-		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
-		       xendev->nodename);
-	} else if (xendev->state < XenbusStateConnected) {
-		enum xenbus_state rstate = XenbusStateUnknown;
-		if (xendev->otherend)
-			rstate = xenbus_read_driver_state(xendev->otherend);
-		printk(KERN_WARNING "XENBUS: Timeout connecting "
-		       "to device: %s (local state %d, remote state %d)\n",
-		       xendev->nodename, xendev->state, rstate);
-	}
-
-	return 0;
-}
-
-/* We only wait for device setup after most initcalls have run. */
-static int ready_to_wait_for_devices;
-
-/*
- * On a 5-minute timeout, wait for all devices currently configured.  We need
- * to do this to guarantee that the filesystems and / or network devices
- * needed for boot are available, before we can allow the boot to proceed.
- *
- * This needs to be on a late_initcall, to happen after the frontend device
- * drivers have been initialised, but before the root fs is mounted.
- *
- * A possible improvement here would be to have the tools add a per-device
- * flag to the store entry, indicating whether it is needed at boot time.
- * This would allow people who knew what they were doing to accelerate their
- * boot slightly, but of course needs tools or manual intervention to set up
- * those flags correctly.
- */
-static void wait_for_devices(struct xenbus_driver *xendrv)
-{
-	unsigned long start = jiffies;
-	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
-	unsigned int seconds_waited = 0;
-
-	if (!ready_to_wait_for_devices || !xen_domain())
-		return;
-
-	while (exists_connecting_device(drv)) {
-		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
-			if (!seconds_waited)
-				printk(KERN_WARNING "XENBUS: Waiting for "
-				       "devices to initialise: ");
-			seconds_waited += 5;
-			printk("%us...", 300 - seconds_waited);
-			if (seconds_waited == 300)
-				break;
-		}
-
-		schedule_timeout_interruptible(HZ/10);
-	}
-
-	if (seconds_waited)
-		printk("\n");
-
-	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-			 print_device_status);
-}
-
-#ifndef MODULE
-static int __init boot_wait_for_devices(void)
-{
-	if (xen_hvm_domain() && !xen_platform_pci_unplug)
-		return -ENODEV;
-
-	ready_to_wait_for_devices = 1;
-	wait_for_devices(NULL);
-	return 0;
-}
-
-late_initcall(boot_wait_for_devices);
-#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 6c5e318..2466581 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -36,26 +36,15 @@
 
 #define XEN_BUS_ID_SIZE			20
 
-#ifdef CONFIG_XEN_BACKEND
-extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
-extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
-extern void xenbus_backend_probe_and_watch(void);
-extern int xenbus_backend_bus_register(void);
-extern void xenbus_backend_bus_unregister(void);
-#else
-static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
-static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
-static inline void xenbus_backend_probe_and_watch(void) {}
-static inline int xenbus_backend_bus_register(void) { return 0; }
-static inline void xenbus_backend_bus_unregister(void) {}
-#endif
-
 struct xen_bus_type
 {
 	char *root;
 	unsigned int levels;
 	int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
-	int (*probe)(const char *type, const char *dir);
+	int (*probe)(struct xen_bus_type *bus, const char *type,
+		     const char *dir);
+	void (*otherend_changed)(struct xenbus_watch *watch, const char **vec,
+				 unsigned int len);
 	struct bus_type bus;
 };
 
@@ -73,4 +62,16 @@
 
 extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
 
+extern void xenbus_dev_shutdown(struct device *_dev);
+
+extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
+extern int xenbus_dev_resume(struct device *dev);
+
+extern void xenbus_otherend_changed(struct xenbus_watch *watch,
+				    const char **vec, unsigned int len,
+				    int ignore_on_shutdown);
+
+extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
+					char *id_node, char *path_node);
+
 #endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
new file mode 100644
index 0000000..6cf467b
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -0,0 +1,276 @@
+/******************************************************************************
+ * Talks to Xen Store to figure out what devices we have (backend half).
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
+ * Copyright (C) 2005, 2006 XenSource Ltd
+ * Copyright (C) 2007 Solarflare Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define DPRINTK(fmt, args...)				\
+	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
+		 __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/features.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
+static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
+{
+	int domid, err;
+	const char *devid, *type, *frontend;
+	unsigned int typelen;
+
+	type = strchr(nodename, '/');
+	if (!type)
+		return -EINVAL;
+	type++;
+	typelen = strcspn(type, "/");
+	if (!typelen || type[typelen] != '/')
+		return -EINVAL;
+
+	devid = strrchr(nodename, '/') + 1;
+
+	err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
+			    "frontend", NULL, &frontend,
+			    NULL);
+	if (err)
+		return err;
+	if (strlen(frontend) == 0)
+		err = -ERANGE;
+	if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
+		err = -ENOENT;
+	kfree(frontend);
+
+	if (err)
+		return err;
+
+	if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s",
+		     typelen, type, domid, devid) >= XEN_BUS_ID_SIZE)
+		return -ENOSPC;
+	return 0;
+}
+
+static int xenbus_uevent_backend(struct device *dev,
+				 struct kobj_uevent_env *env)
+{
+	struct xenbus_device *xdev;
+	struct xenbus_driver *drv;
+	struct xen_bus_type *bus;
+
+	DPRINTK("");
+
+	if (dev == NULL)
+		return -ENODEV;
+
+	xdev = to_xenbus_device(dev);
+	bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
+	if (xdev == NULL)
+		return -ENODEV;
+
+	/* stuff we want to pass to /sbin/hotplug */
+	if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root))
+		return -ENOMEM;
+
+	if (dev->driver) {
+		drv = to_xenbus_driver(dev->driver);
+		if (drv && drv->uevent)
+			return drv->uevent(xdev, env);
+	}
+
+	return 0;
+}
+
+/* backend/<typename>/<frontend-uuid>/<name> */
+static int xenbus_probe_backend_unit(struct xen_bus_type *bus,
+				     const char *dir,
+				     const char *type,
+				     const char *name)
+{
+	char *nodename;
+	int err;
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
+	if (!nodename)
+		return -ENOMEM;
+
+	DPRINTK("%s\n", nodename);
+
+	err = xenbus_probe_node(bus, type, nodename);
+	kfree(nodename);
+	return err;
+}
+
+/* backend/<typename>/<frontend-domid> */
+static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
+				const char *domid)
+{
+	char *nodename;
+	int err = 0;
+	char **dir;
+	unsigned int i, dir_n = 0;
+
+	DPRINTK("");
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid);
+	if (!nodename)
+		return -ENOMEM;
+
+	dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
+	if (IS_ERR(dir)) {
+		kfree(nodename);
+		return PTR_ERR(dir);
+	}
+
+	for (i = 0; i < dir_n; i++) {
+		err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]);
+		if (err)
+			break;
+	}
+	kfree(dir);
+	kfree(nodename);
+	return err;
+}
+
+static void frontend_changed(struct xenbus_watch *watch,
+			    const char **vec, unsigned int len)
+{
+	xenbus_otherend_changed(watch, vec, len, 0);
+}
+
+static struct device_attribute xenbus_backend_dev_attrs[] = {
+	__ATTR_NULL
+};
+
+static struct xen_bus_type xenbus_backend = {
+	.root = "backend",
+	.levels = 3,		/* backend/type/<frontend>/<id> */
+	.get_bus_id = backend_bus_id,
+	.probe = xenbus_probe_backend,
+	.otherend_changed = frontend_changed,
+	.bus = {
+		.name		= "xen-backend",
+		.match		= xenbus_match,
+		.uevent		= xenbus_uevent_backend,
+		.probe		= xenbus_dev_probe,
+		.remove		= xenbus_dev_remove,
+		.shutdown	= xenbus_dev_shutdown,
+		.dev_attrs	= xenbus_backend_dev_attrs,
+	},
+};
+
+static void backend_changed(struct xenbus_watch *watch,
+			    const char **vec, unsigned int len)
+{
+	DPRINTK("");
+
+	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
+}
+
+static struct xenbus_watch be_watch = {
+	.node = "backend",
+	.callback = backend_changed,
+};
+
+static int read_frontend_details(struct xenbus_device *xendev)
+{
+	return xenbus_read_otherend_details(xendev, "frontend-id", "frontend");
+}
+
+int xenbus_dev_is_online(struct xenbus_device *dev)
+{
+	int rc, val;
+
+	rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
+	if (rc != 1)
+		val = 0; /* no online node present */
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
+
+int __xenbus_register_backend(struct xenbus_driver *drv,
+			      struct module *owner, const char *mod_name)
+{
+	drv->read_otherend_details = read_frontend_details;
+
+	return xenbus_register_driver_common(drv, &xenbus_backend,
+					     owner, mod_name);
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_backend);
+
+static int backend_probe_and_watch(struct notifier_block *notifier,
+				   unsigned long event,
+				   void *data)
+{
+	/* Enumerate devices in xenstore and watch for changes. */
+	xenbus_probe_devices(&xenbus_backend);
+	register_xenbus_watch(&be_watch);
+
+	return NOTIFY_DONE;
+}
+
+static int __init xenbus_probe_backend_init(void)
+{
+	static struct notifier_block xenstore_notifier = {
+		.notifier_call = backend_probe_and_watch
+	};
+	int err;
+
+	DPRINTK("");
+
+	/* Register ourselves with the kernel bus subsystem */
+	err = bus_register(&xenbus_backend.bus);
+	if (err)
+		return err;
+
+	register_xenstore_notifier(&xenstore_notifier);
+
+	return 0;
+}
+subsys_initcall(xenbus_probe_backend_init);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
new file mode 100644
index 0000000..5bcc2d6
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -0,0 +1,294 @@
+#define DPRINTK(fmt, args...)				\
+	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
+		 __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/platform_pci.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+
+/* device/<type>/<id> => <type>-<id> */
+static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
+{
+	nodename = strchr(nodename, '/');
+	if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
+		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
+		return -EINVAL;
+	}
+
+	strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+	if (!strchr(bus_id, '/')) {
+		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
+		return -EINVAL;
+	}
+	*strchr(bus_id, '/') = '-';
+	return 0;
+}
+
+/* device/<typename>/<name> */
+static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type,
+				 const char *name)
+{
+	char *nodename;
+	int err;
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name);
+	if (!nodename)
+		return -ENOMEM;
+
+	DPRINTK("%s", nodename);
+
+	err = xenbus_probe_node(bus, type, nodename);
+	kfree(nodename);
+	return err;
+}
+
+static int xenbus_uevent_frontend(struct device *_dev,
+				  struct kobj_uevent_env *env)
+{
+	struct xenbus_device *dev = to_xenbus_device(_dev);
+
+	if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
+		return -ENOMEM;
+
+	return 0;
+}
+
+
+static void backend_changed(struct xenbus_watch *watch,
+			    const char **vec, unsigned int len)
+{
+	xenbus_otherend_changed(watch, vec, len, 1);
+}
+
+static struct device_attribute xenbus_frontend_dev_attrs[] = {
+	__ATTR_NULL
+};
+
+static struct xen_bus_type xenbus_frontend = {
+	.root = "device",
+	.levels = 2,		/* device/type/<id> */
+	.get_bus_id = frontend_bus_id,
+	.probe = xenbus_probe_frontend,
+	.otherend_changed = backend_changed,
+	.bus = {
+		.name		= "xen",
+		.match		= xenbus_match,
+		.uevent		= xenbus_uevent_frontend,
+		.probe		= xenbus_dev_probe,
+		.remove		= xenbus_dev_remove,
+		.shutdown	= xenbus_dev_shutdown,
+		.dev_attrs	= xenbus_frontend_dev_attrs,
+
+		.suspend	= xenbus_dev_suspend,
+		.resume		= xenbus_dev_resume,
+	},
+};
+
+static void frontend_changed(struct xenbus_watch *watch,
+			     const char **vec, unsigned int len)
+{
+	DPRINTK("");
+
+	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
+}
+
+
+/* We watch for devices appearing and vanishing. */
+static struct xenbus_watch fe_watch = {
+	.node = "device",
+	.callback = frontend_changed,
+};
+
+static int read_backend_details(struct xenbus_device *xendev)
+{
+	return xenbus_read_otherend_details(xendev, "backend-id", "backend");
+}
+
+static int is_device_connecting(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct device_driver *drv = data;
+	struct xenbus_driver *xendrv;
+
+	/*
+	 * A device with no driver will never connect. We care only about
+	 * devices which should currently be in the process of connecting.
+	 */
+	if (!dev->driver)
+		return 0;
+
+	/* Is this search limited to a particular driver? */
+	if (drv && (dev->driver != drv))
+		return 0;
+
+	xendrv = to_xenbus_driver(dev->driver);
+	return (xendev->state < XenbusStateConnected ||
+		(xendev->state == XenbusStateConnected &&
+		 xendrv->is_ready && !xendrv->is_ready(xendev)));
+}
+
+static int exists_connecting_device(struct device_driver *drv)
+{
+	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+				is_device_connecting);
+}
+
+static int print_device_status(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct device_driver *drv = data;
+
+	/* Is this operation limited to a particular driver? */
+	if (drv && (dev->driver != drv))
+		return 0;
+
+	if (!dev->driver) {
+		/* Information only: is this too noisy? */
+		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+		       xendev->nodename);
+	} else if (xendev->state < XenbusStateConnected) {
+		enum xenbus_state rstate = XenbusStateUnknown;
+		if (xendev->otherend)
+			rstate = xenbus_read_driver_state(xendev->otherend);
+		printk(KERN_WARNING "XENBUS: Timeout connecting "
+		       "to device: %s (local state %d, remote state %d)\n",
+		       xendev->nodename, xendev->state, rstate);
+	}
+
+	return 0;
+}
+
+/* We only wait for device setup after most initcalls have run. */
+static int ready_to_wait_for_devices;
+
+/*
+ * On a 5-minute timeout, wait for all devices currently configured.  We need
+ * to do this to guarantee that the filesystems and / or network devices
+ * needed for boot are available, before we can allow the boot to proceed.
+ *
+ * This needs to be on a late_initcall, to happen after the frontend device
+ * drivers have been initialised, but before the root fs is mounted.
+ *
+ * A possible improvement here would be to have the tools add a per-device
+ * flag to the store entry, indicating whether it is needed at boot time.
+ * This would allow people who knew what they were doing to accelerate their
+ * boot slightly, but of course needs tools or manual intervention to set up
+ * those flags correctly.
+ */
+static void wait_for_devices(struct xenbus_driver *xendrv)
+{
+	unsigned long start = jiffies;
+	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
+	unsigned int seconds_waited = 0;
+
+	if (!ready_to_wait_for_devices || !xen_domain())
+		return;
+
+	while (exists_connecting_device(drv)) {
+		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
+			if (!seconds_waited)
+				printk(KERN_WARNING "XENBUS: Waiting for "
+				       "devices to initialise: ");
+			seconds_waited += 5;
+			printk("%us...", 300 - seconds_waited);
+			if (seconds_waited == 300)
+				break;
+		}
+
+		schedule_timeout_interruptible(HZ/10);
+	}
+
+	if (seconds_waited)
+		printk("\n");
+
+	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+			 print_device_status);
+}
+
+int __xenbus_register_frontend(struct xenbus_driver *drv,
+			       struct module *owner, const char *mod_name)
+{
+	int ret;
+
+	drv->read_otherend_details = read_backend_details;
+
+	ret = xenbus_register_driver_common(drv, &xenbus_frontend,
+					    owner, mod_name);
+	if (ret)
+		return ret;
+
+	/* If this driver is loaded as a module wait for devices to attach. */
+	wait_for_devices(drv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+
+static int frontend_probe_and_watch(struct notifier_block *notifier,
+				   unsigned long event,
+				   void *data)
+{
+	/* Enumerate devices in xenstore and watch for changes. */
+	xenbus_probe_devices(&xenbus_frontend);
+	register_xenbus_watch(&fe_watch);
+
+	return NOTIFY_DONE;
+}
+
+
+static int __init xenbus_probe_frontend_init(void)
+{
+	static struct notifier_block xenstore_notifier = {
+		.notifier_call = frontend_probe_and_watch
+	};
+	int err;
+
+	DPRINTK("");
+
+	/* Register ourselves with the kernel bus subsystem */
+	err = bus_register(&xenbus_frontend.bus);
+	if (err)
+		return err;
+
+	register_xenstore_notifier(&xenstore_notifier);
+
+	return 0;
+}
+subsys_initcall(xenbus_probe_frontend_init);
+
+#ifndef MODULE
+static int __init boot_wait_for_devices(void)
+{
+	if (xen_hvm_domain() && !xen_platform_pci_unplug)
+		return -ENODEV;
+
+	ready_to_wait_for_devices = 1;
+	wait_for_devices(NULL);
+	return 0;
+}
+
+late_initcall(boot_wait_for_devices);
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index 1c12360..bbd000f 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -122,6 +122,7 @@
 	int ret;
 
 	mutex_lock(&u->reply_mutex);
+again:
 	while (list_empty(&u->read_buffers)) {
 		mutex_unlock(&u->reply_mutex);
 		if (filp->f_flags & O_NONBLOCK)
@@ -144,7 +145,7 @@
 		i += sz - ret;
 		rb->cons += sz - ret;
 
-		if (ret != sz) {
+		if (ret != 0) {
 			if (i == 0)
 				i = -EFAULT;
 			goto out;
@@ -160,6 +161,8 @@
 					struct read_buffer, list);
 		}
 	}
+	if (i == 0)
+		goto again;
 
 out:
 	mutex_unlock(&u->reply_mutex);
@@ -407,6 +410,7 @@
 
 		mutex_lock(&u->reply_mutex);
 		rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
+		wake_up(&u->read_waitq);
 		mutex_unlock(&u->reply_mutex);
 	}
 
@@ -455,7 +459,7 @@
 
 	ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
 
-	if (ret == len) {
+	if (ret != 0) {
 		rc = -EFAULT;
 		goto out;
 	}
@@ -488,21 +492,6 @@
 	msg_type = u->u.msg.type;
 
 	switch (msg_type) {
-	case XS_TRANSACTION_START:
-	case XS_TRANSACTION_END:
-	case XS_DIRECTORY:
-	case XS_READ:
-	case XS_GET_PERMS:
-	case XS_RELEASE:
-	case XS_GET_DOMAIN_PATH:
-	case XS_WRITE:
-	case XS_MKDIR:
-	case XS_RM:
-	case XS_SET_PERMS:
-		/* Send out a transaction */
-		ret = xenbus_write_transaction(msg_type, u);
-		break;
-
 	case XS_WATCH:
 	case XS_UNWATCH:
 		/* (Un)Ask for some path to be watched for changes */
@@ -510,7 +499,8 @@
 		break;
 
 	default:
-		ret = -EINVAL;
+		/* Send out a transaction */
+		ret = xenbus_write_transaction(msg_type, u);
 		break;
 	}
 	if (ret != 0)
@@ -555,6 +545,7 @@
 	struct xenbus_file_priv *u = filp->private_data;
 	struct xenbus_transaction_holder *trans, *tmp;
 	struct watch_adapter *watch, *tmp_watch;
+	struct read_buffer *rb, *tmp_rb;
 
 	/*
 	 * No need for locking here because there are no other users,
@@ -573,6 +564,10 @@
 		free_watch_adapter(watch);
 	}
 
+	list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
+		list_del(&rb->list);
+		kfree(rb);
+	}
 	kfree(u);
 
 	return 0;
diff --git a/firmware/ihex2fw.c b/firmware/ihex2fw.c
index ba0cf0b..cf38e15 100644
--- a/firmware/ihex2fw.c
+++ b/firmware/ihex2fw.c
@@ -124,8 +124,7 @@
 	if (process_ihex(data, st.st_size))
 		return 1;
 
-	output_records(outfd);
-	return 0;
+	return output_records(outfd);
 }
 
 static int process_ihex(uint8_t *data, ssize_t size)
@@ -269,11 +268,13 @@
 
 		p->addr = htonl(p->addr);
 		p->len = htons(p->len);
-		write(outfd, &p->addr, writelen);
+		if (write(outfd, &p->addr, writelen) != writelen)
+			return 1;
 		p = p->next;
 	}
 	/* EOF record is zero length, since we don't bother to represent
 	   the type field in the binary version */
-	write(outfd, zeroes, 6);
+	if (write(outfd, zeroes, 6) != 6)
+		return 1;
 	return 0;
 }
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 7e05114..814ac4e 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -9,6 +9,8 @@
 
 	  If unsure, say N.
 
+if 9P_FS
+
 config 9P_FSCACHE
 	bool "Enable 9P client caching support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
@@ -20,7 +22,6 @@
 
 config 9P_FS_POSIX_ACL
 	bool "9P POSIX Access Control Lists"
-	depends on 9P_FS
 	select FS_POSIX_ACL
 	help
 	  POSIX Access Control Lists (ACLs) support permissions for users and
@@ -30,3 +31,5 @@
 	  Linux website <http://acl.bestbits.at/>.
 
 	  If you don't know what Access Control Lists are, say N
+
+endif
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index f8ba37e..ab8c127 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -3,6 +3,7 @@
 9p-objs := \
 	vfs_super.o \
 	vfs_inode.o \
+	vfs_inode_dotl.o \
 	vfs_addr.o \
 	vfs_file.o \
 	vfs_dir.o \
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 6e58c4c..02a2cf6 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -28,7 +28,7 @@
 {
 	ssize_t size;
 	void *value = NULL;
-	struct posix_acl *acl = NULL;;
+	struct posix_acl *acl = NULL;
 
 	size = v9fs_fid_xattr_get(fid, name, NULL, 0);
 	if (size > 0) {
@@ -365,7 +365,7 @@
 	case ACL_TYPE_DEFAULT:
 		name = POSIX_ACL_XATTR_DEFAULT;
 		if (!S_ISDIR(inode->i_mode)) {
-			retval = -EINVAL;
+			retval = acl ? -EINVAL : 0;
 			goto err_out;
 		}
 		break;
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index cb63968..c4b5d88 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -113,9 +113,27 @@
 
 struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
 									char *);
-void v9fs_session_close(struct v9fs_session_info *v9ses);
-void v9fs_session_cancel(struct v9fs_session_info *v9ses);
-void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
+extern void v9fs_session_close(struct v9fs_session_info *v9ses);
+extern void v9fs_session_cancel(struct v9fs_session_info *v9ses);
+extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
+extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
+			struct nameidata *nameidata);
+extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
+extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
+extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+			struct inode *new_dir, struct dentry *new_dentry);
+extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd,
+			void *p);
+extern struct inode *v9fs_inode(struct v9fs_session_info *v9ses,
+			struct p9_fid *fid,
+			struct super_block *sb);
+
+extern const struct inode_operations v9fs_dir_inode_operations_dotl;
+extern const struct inode_operations v9fs_file_inode_operations_dotl;
+extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
+extern struct inode *v9fs_inode_dotl(struct v9fs_session_info *v9ses,
+			struct p9_fid *fid,
+			struct super_block *sb);
 
 /* other default globals */
 #define V9FS_PORT	564
@@ -138,3 +156,21 @@
 {
 	return v9ses->flags & V9FS_PROTO_2000L;
 }
+
+/**
+ * v9fs_inode_from_fid - Helper routine to populate an inode by
+ * issuing a attribute request
+ * @v9ses: session information
+ * @fid: fid to issue attribute request for
+ * @sb: superblock on which to create inode
+ *
+ */
+static inline struct inode *
+v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+				struct super_block *sb)
+{
+	if (v9fs_proto_dotl(v9ses))
+		return v9fs_inode_dotl(v9ses, fid, sb);
+	else
+		return v9fs_inode(v9ses, fid, sb);
+}
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index bab0eac..b789f8e 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -59,7 +59,6 @@
 int v9fs_dir_release(struct inode *inode, struct file *filp);
 int v9fs_file_open(struct inode *inode, struct file *file);
 void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
-void v9fs_dentry_release(struct dentry *);
 int v9fs_uflags2omode(int uflags, int extended);
 
 ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 466d2a4..233b7d4 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -86,7 +86,7 @@
  *
  */
 
-void v9fs_dentry_release(struct dentry *dentry)
+static void v9fs_dentry_release(struct dentry *dentry)
 {
 	struct v9fs_dentry *dent;
 	struct p9_fid *temp, *current_fid;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 5978298..b76a40b 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -49,15 +49,8 @@
 
 static const struct inode_operations v9fs_dir_inode_operations;
 static const struct inode_operations v9fs_dir_inode_operations_dotu;
-static const struct inode_operations v9fs_dir_inode_operations_dotl;
 static const struct inode_operations v9fs_file_inode_operations;
-static const struct inode_operations v9fs_file_inode_operations_dotl;
 static const struct inode_operations v9fs_symlink_inode_operations;
-static const struct inode_operations v9fs_symlink_inode_operations_dotl;
-
-static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
-		    dev_t rdev);
 
 /**
  * unixmode2p9mode - convert unix mode bits to plan 9
@@ -251,41 +244,6 @@
 #endif
 
 /**
- * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
- * new file system object. This checks the S_ISGID to determine the owning
- * group of the new file system object.
- */
-
-static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
-{
-	BUG_ON(dir_inode == NULL);
-
-	if (dir_inode->i_mode & S_ISGID) {
-		/* set_gid bit is set.*/
-		return dir_inode->i_gid;
-	}
-	return current_fsgid();
-}
-
-/**
- * v9fs_dentry_from_dir_inode - helper function to get the dentry from
- * dir inode.
- *
- */
-
-static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
-{
-	struct dentry *dentry;
-
-	spin_lock(&inode->i_lock);
-	/* Directory should have only one entry. */
-	BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
-	dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
-	spin_unlock(&inode->i_lock);
-	return dentry;
-}
-
-/**
  * v9fs_get_inode - helper function to setup an inode
  * @sb: superblock
  * @mode: mode to setup inode with
@@ -454,7 +412,7 @@
 #endif
 }
 
-static struct inode *
+struct inode *
 v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid,
 	struct super_block *sb)
 {
@@ -489,60 +447,6 @@
 	return ERR_PTR(err);
 }
 
-static struct inode *
-v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
-	struct super_block *sb)
-{
-	struct inode *ret = NULL;
-	int err;
-	struct p9_stat_dotl *st;
-
-	st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
-	if (IS_ERR(st))
-		return ERR_CAST(st);
-
-	ret = v9fs_get_inode(sb, st->st_mode);
-	if (IS_ERR(ret)) {
-		err = PTR_ERR(ret);
-		goto error;
-	}
-
-	v9fs_stat2inode_dotl(st, ret);
-	ret->i_ino = v9fs_qid2ino(&st->qid);
-#ifdef CONFIG_9P_FSCACHE
-	v9fs_vcookie_set_qid(ret, &st->qid);
-	v9fs_cache_inode_get_cookie(ret);
-#endif
-	err = v9fs_get_acl(ret, fid);
-	if (err) {
-		iput(ret);
-		goto error;
-	}
-	kfree(st);
-	return ret;
-error:
-	kfree(st);
-	return ERR_PTR(err);
-}
-
-/**
- * v9fs_inode_from_fid - Helper routine to populate an inode by
- * issuing a attribute request
- * @v9ses: session information
- * @fid: fid to issue attribute request for
- * @sb: superblock on which to create inode
- *
- */
-static inline struct inode *
-v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
-			struct super_block *sb)
-{
-	if (v9fs_proto_dotl(v9ses))
-		return v9fs_inode_dotl(v9ses, fid, sb);
-	else
-		return v9fs_inode(v9ses, fid, sb);
-}
-
 /**
  * v9fs_remove - helper function to remove files and directories
  * @dir: directory inode that is being deleted
@@ -633,12 +537,6 @@
 		P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
 		goto error;
 	}
-
-	if (v9ses->cache)
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-	else
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-
 	d_instantiate(dentry, inode);
 	err = v9fs_fid_add(dentry, fid);
 	if (err < 0)
@@ -657,144 +555,6 @@
 }
 
 /**
- * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
- * @dir: directory inode that is being created
- * @dentry:  dentry that is being deleted
- * @mode: create permissions
- * @nd: path information
- *
- */
-
-static int
-v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
-		struct nameidata *nd)
-{
-	int err = 0;
-	char *name = NULL;
-	gid_t gid;
-	int flags;
-	mode_t mode;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid = NULL;
-	struct p9_fid *dfid, *ofid;
-	struct file *filp;
-	struct p9_qid qid;
-	struct inode *inode;
-	struct posix_acl *pacl = NULL, *dacl = NULL;
-
-	v9ses = v9fs_inode2v9ses(dir);
-	if (nd && nd->flags & LOOKUP_OPEN)
-		flags = nd->intent.open.flags - 1;
-	else {
-		/*
-		 * create call without LOOKUP_OPEN is due
-		 * to mknod of regular files. So use mknod
-		 * operation.
-		 */
-		return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
-	}
-
-	name = (char *) dentry->d_name.name;
-	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
-			"mode:0x%x\n", name, flags, omode);
-
-	dfid = v9fs_fid_lookup(dentry->d_parent);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		return err;
-	}
-
-	/* clone a fid to use for creation */
-	ofid = p9_client_walk(dfid, 0, NULL, 1);
-	if (IS_ERR(ofid)) {
-		err = PTR_ERR(ofid);
-		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
-		return err;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-
-	mode = omode;
-	/* Update mode based on ACL value */
-	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
-	if (err) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-			   "Failed to get acl values in creat %d\n", err);
-		goto error;
-	}
-	err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-				"p9_client_open_dotl failed in creat %d\n",
-				err);
-		goto error;
-	}
-	/* instantiate inode and assign the unopened fid to the dentry */
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE ||
-	    (nd && nd->flags & LOOKUP_OPEN)) {
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-				err);
-			fid = NULL;
-			goto error;
-		}
-
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-				err);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		/* The fid would get clunked via a dput */
-		fid = NULL;
-	} else {
-		/*
-		 * Not in cached mode. No need to populate
-		 * inode with stat. We need to get an inode
-		 * so that we can set the acl with dentry
-		 */
-		inode = v9fs_get_inode(dir->i_sb, mode);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-		d_instantiate(dentry, inode);
-	}
-	/* Now set the ACL based on the default value */
-	v9fs_set_create_acl(dentry, dacl, pacl);
-
-	/* if we are opening a file, assign the open fid to the file */
-	if (nd && nd->flags & LOOKUP_OPEN) {
-		filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
-		if (IS_ERR(filp)) {
-			p9_client_clunk(ofid);
-			return PTR_ERR(filp);
-		}
-		filp->private_data = ofid;
-	} else
-		p9_client_clunk(ofid);
-
-	return 0;
-
-error:
-	if (ofid)
-		p9_client_clunk(ofid);
-	if (fid)
-		p9_client_clunk(fid);
-	return err;
-}
-
-/**
  * v9fs_vfs_create - VFS hook to create files
  * @dir: directory inode that is being created
  * @dentry:  dentry that is being deleted
@@ -884,107 +644,6 @@
 	return err;
 }
 
-
-/**
- * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
- * @dir:  inode that is being unlinked
- * @dentry: dentry that is being unlinked
- * @mode: mode for new directory
- *
- */
-
-static int v9fs_vfs_mkdir_dotl(struct inode *dir,
-			       struct dentry *dentry, int omode)
-{
-	int err;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid = NULL, *dfid = NULL;
-	gid_t gid;
-	char *name;
-	mode_t mode;
-	struct inode *inode;
-	struct p9_qid qid;
-	struct dentry *dir_dentry;
-	struct posix_acl *dacl = NULL, *pacl = NULL;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
-	err = 0;
-	v9ses = v9fs_inode2v9ses(dir);
-
-	omode |= S_IFDIR;
-	if (dir->i_mode & S_ISGID)
-		omode |= S_ISGID;
-
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
-	dfid = v9fs_fid_lookup(dir_dentry);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		dfid = NULL;
-		goto error;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-	mode = omode;
-	/* Update mode based on ACL value */
-	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
-	if (err) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-			   "Failed to get acl values in mkdir %d\n", err);
-		goto error;
-	}
-	name = (char *) dentry->d_name.name;
-	err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
-	if (err < 0)
-		goto error;
-
-	/* instantiate inode and assign the unopened fid to the dentry */
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-				err);
-			fid = NULL;
-			goto error;
-		}
-
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-				err);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		fid = NULL;
-	} else {
-		/*
-		 * Not in cached mode. No need to populate
-		 * inode with stat. We need to get an inode
-		 * so that we can set the acl with dentry
-		 */
-		inode = v9fs_get_inode(dir->i_sb, mode);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-		d_instantiate(dentry, inode);
-	}
-	/* Now set the ACL based on the default value */
-	v9fs_set_create_acl(dentry, dacl, pacl);
-
-error:
-	if (fid)
-		p9_client_clunk(fid);
-	return err;
-}
-
 /**
  * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode
  * @dir:  inode that is being walked from
@@ -993,7 +652,7 @@
  *
  */
 
-static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
+struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
 				      struct nameidata *nameidata)
 {
 	struct super_block *sb;
@@ -1040,11 +699,6 @@
 		goto error_iput;
 
 inst_out:
-	if (v9ses->cache)
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-	else
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-
 	d_add(dentry, inode);
 	return NULL;
 
@@ -1063,7 +717,7 @@
  *
  */
 
-static int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
+int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
 {
 	return v9fs_remove(i, d, 0);
 }
@@ -1075,7 +729,7 @@
  *
  */
 
-static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
+int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
 {
 	return v9fs_remove(i, d, 1);
 }
@@ -1089,7 +743,7 @@
  *
  */
 
-static int
+int
 v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 		struct inode *new_dir, struct dentry *new_dentry)
 {
@@ -1196,42 +850,6 @@
 	return 0;
 }
 
-static int
-v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
-		 struct kstat *stat)
-{
-	int err;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid;
-	struct p9_stat_dotl *st;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
-	err = -EPERM;
-	v9ses = v9fs_inode2v9ses(dentry->d_inode);
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
-		return simple_getattr(mnt, dentry, stat);
-
-	fid = v9fs_fid_lookup(dentry);
-	if (IS_ERR(fid))
-		return PTR_ERR(fid);
-
-	/* Ask for all the fields in stat structure. Server will return
-	 * whatever it supports
-	 */
-
-	st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
-	if (IS_ERR(st))
-		return PTR_ERR(st);
-
-	v9fs_stat2inode_dotl(st, dentry->d_inode);
-	generic_fillattr(dentry->d_inode, stat);
-	/* Change block size to what the server returned */
-	stat->blksize = st->st_blksize;
-
-	kfree(st);
-	return 0;
-}
-
 /**
  * v9fs_vfs_setattr - set file metadata
  * @dentry: file whose metadata to set
@@ -1291,64 +909,6 @@
 }
 
 /**
- * v9fs_vfs_setattr_dotl - set file metadata
- * @dentry: file whose metadata to set
- * @iattr: metadata assignment structure
- *
- */
-
-int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
-{
-	int retval;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid;
-	struct p9_iattr_dotl p9attr;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "\n");
-
-	retval = inode_change_ok(dentry->d_inode, iattr);
-	if (retval)
-		return retval;
-
-	p9attr.valid = iattr->ia_valid;
-	p9attr.mode = iattr->ia_mode;
-	p9attr.uid = iattr->ia_uid;
-	p9attr.gid = iattr->ia_gid;
-	p9attr.size = iattr->ia_size;
-	p9attr.atime_sec = iattr->ia_atime.tv_sec;
-	p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
-	p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
-	p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
-
-	retval = -EPERM;
-	v9ses = v9fs_inode2v9ses(dentry->d_inode);
-	fid = v9fs_fid_lookup(dentry);
-	if (IS_ERR(fid))
-		return PTR_ERR(fid);
-
-	retval = p9_client_setattr(fid, &p9attr);
-	if (retval < 0)
-		return retval;
-
-	if ((iattr->ia_valid & ATTR_SIZE) &&
-	    iattr->ia_size != i_size_read(dentry->d_inode)) {
-		retval = vmtruncate(dentry->d_inode, iattr->ia_size);
-		if (retval)
-			return retval;
-	}
-
-	setattr_copy(dentry->d_inode, iattr);
-	mark_inode_dirty(dentry->d_inode);
-	if (iattr->ia_valid & ATTR_MODE) {
-		/* We also want to update ACL when we update mode bits */
-		retval = v9fs_acl_chmod(dentry);
-		if (retval < 0)
-			return retval;
-	}
-	return 0;
-}
-
-/**
  * v9fs_stat2inode - populate an inode structure with mistat info
  * @stat: Plan 9 metadata (mistat) structure
  * @inode: inode to populate
@@ -1426,77 +986,6 @@
 }
 
 /**
- * v9fs_stat2inode_dotl - populate an inode structure with stat info
- * @stat: stat structure
- * @inode: inode to populate
- * @sb: superblock of filesystem
- *
- */
-
-void
-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
-{
-
-	if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
-		inode->i_atime.tv_sec = stat->st_atime_sec;
-		inode->i_atime.tv_nsec = stat->st_atime_nsec;
-		inode->i_mtime.tv_sec = stat->st_mtime_sec;
-		inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
-		inode->i_ctime.tv_sec = stat->st_ctime_sec;
-		inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
-		inode->i_uid = stat->st_uid;
-		inode->i_gid = stat->st_gid;
-		inode->i_nlink = stat->st_nlink;
-		inode->i_mode = stat->st_mode;
-		inode->i_rdev = new_decode_dev(stat->st_rdev);
-
-		if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
-			init_special_inode(inode, inode->i_mode, inode->i_rdev);
-
-		i_size_write(inode, stat->st_size);
-		inode->i_blocks = stat->st_blocks;
-	} else {
-		if (stat->st_result_mask & P9_STATS_ATIME) {
-			inode->i_atime.tv_sec = stat->st_atime_sec;
-			inode->i_atime.tv_nsec = stat->st_atime_nsec;
-		}
-		if (stat->st_result_mask & P9_STATS_MTIME) {
-			inode->i_mtime.tv_sec = stat->st_mtime_sec;
-			inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
-		}
-		if (stat->st_result_mask & P9_STATS_CTIME) {
-			inode->i_ctime.tv_sec = stat->st_ctime_sec;
-			inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
-		}
-		if (stat->st_result_mask & P9_STATS_UID)
-			inode->i_uid = stat->st_uid;
-		if (stat->st_result_mask & P9_STATS_GID)
-			inode->i_gid = stat->st_gid;
-		if (stat->st_result_mask & P9_STATS_NLINK)
-			inode->i_nlink = stat->st_nlink;
-		if (stat->st_result_mask & P9_STATS_MODE) {
-			inode->i_mode = stat->st_mode;
-			if ((S_ISBLK(inode->i_mode)) ||
-						(S_ISCHR(inode->i_mode)))
-				init_special_inode(inode, inode->i_mode,
-								inode->i_rdev);
-		}
-		if (stat->st_result_mask & P9_STATS_RDEV)
-			inode->i_rdev = new_decode_dev(stat->st_rdev);
-		if (stat->st_result_mask & P9_STATS_SIZE)
-			i_size_write(inode, stat->st_size);
-		if (stat->st_result_mask & P9_STATS_BLOCKS)
-			inode->i_blocks = stat->st_blocks;
-	}
-	if (stat->st_result_mask & P9_STATS_GEN)
-			inode->i_generation = stat->st_gen;
-
-	/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
-	 * because the inode structure does not have fields for them.
-	 */
-}
-
-/**
  * v9fs_qid2ino - convert qid into inode number
  * @qid: qid to hash
  *
@@ -1602,7 +1091,7 @@
  *
  */
 
-static void
+void
 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
 {
 	char *s = nd_get_link(nd);
@@ -1646,94 +1135,6 @@
 }
 
 /**
- * v9fs_vfs_symlink_dotl - helper function to create symlinks
- * @dir: directory inode containing symlink
- * @dentry: dentry for symlink
- * @symname: symlink data
- *
- * See Also: 9P2000.L RFC for more information
- *
- */
-
-static int
-v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
-		const char *symname)
-{
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *dfid;
-	struct p9_fid *fid = NULL;
-	struct inode *inode;
-	struct p9_qid qid;
-	char *name;
-	int err;
-	gid_t gid;
-
-	name = (char *) dentry->d_name.name;
-	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
-			dir->i_ino, name, symname);
-	v9ses = v9fs_inode2v9ses(dir);
-
-	dfid = v9fs_fid_lookup(dentry->d_parent);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		return err;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-
-	/* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
-	err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
-
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
-		goto error;
-	}
-
-	if (v9ses->cache) {
-		/* Now walk from the parent so we can get an unopened fid. */
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-					err);
-			fid = NULL;
-			goto error;
-		}
-
-		/* instantiate inode and assign the unopened fid to dentry */
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-					err);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		fid = NULL;
-	} else {
-		/* Not in cached mode. No need to populate inode with stat */
-		inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-		d_instantiate(dentry, inode);
-	}
-
-error:
-	if (fid)
-		p9_client_clunk(fid);
-
-	return err;
-}
-
-/**
  * v9fs_vfs_symlink - helper function to create symlinks
  * @dir: directory inode containing symlink
  * @dentry: dentry for symlink
@@ -1792,77 +1193,6 @@
 }
 
 /**
- * v9fs_vfs_link_dotl - create a hardlink for dotl
- * @old_dentry: dentry for file to link to
- * @dir: inode destination for new link
- * @dentry: dentry for link
- *
- */
-
-static int
-v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
-		struct dentry *dentry)
-{
-	int err;
-	struct p9_fid *dfid, *oldfid;
-	char *name;
-	struct v9fs_session_info *v9ses;
-	struct dentry *dir_dentry;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
-			dir->i_ino, old_dentry->d_name.name,
-			dentry->d_name.name);
-
-	v9ses = v9fs_inode2v9ses(dir);
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
-	dfid = v9fs_fid_lookup(dir_dentry);
-	if (IS_ERR(dfid))
-		return PTR_ERR(dfid);
-
-	oldfid = v9fs_fid_lookup(old_dentry);
-	if (IS_ERR(oldfid))
-		return PTR_ERR(oldfid);
-
-	name = (char *) dentry->d_name.name;
-
-	err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
-
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
-		return err;
-	}
-
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-		/* Get the latest stat info from server. */
-		struct p9_fid *fid;
-		struct p9_stat_dotl *st;
-
-		fid = v9fs_fid_lookup(old_dentry);
-		if (IS_ERR(fid))
-			return PTR_ERR(fid);
-
-		st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
-		if (IS_ERR(st))
-			return PTR_ERR(st);
-
-		v9fs_stat2inode_dotl(st, old_dentry->d_inode);
-
-		kfree(st);
-	} else {
-		/* Caching disabled. No need to get upto date stat info.
-		 * This dentry will be released immediately. So, just hold the
-		 * inode
-		 */
-		ihold(old_dentry->d_inode);
-	}
-
-	d_set_d_op(dentry, old_dentry->d_op);
-	d_instantiate(dentry, old_dentry->d_inode);
-
-	return err;
-}
-
-/**
  * v9fs_vfs_mknod - create a special file
  * @dir: inode destination for new link
  * @dentry: dentry for file
@@ -1907,160 +1237,6 @@
 	return retval;
 }
 
-/**
- * v9fs_vfs_mknod_dotl - create a special file
- * @dir: inode destination for new link
- * @dentry: dentry for file
- * @mode: mode for creation
- * @rdev: device associated with special file
- *
- */
-static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
-		dev_t rdev)
-{
-	int err;
-	char *name;
-	mode_t mode;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid = NULL, *dfid = NULL;
-	struct inode *inode;
-	gid_t gid;
-	struct p9_qid qid;
-	struct dentry *dir_dentry;
-	struct posix_acl *dacl = NULL, *pacl = NULL;
-
-	P9_DPRINTK(P9_DEBUG_VFS,
-		" %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
-		dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
-
-	if (!new_valid_dev(rdev))
-		return -EINVAL;
-
-	v9ses = v9fs_inode2v9ses(dir);
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
-	dfid = v9fs_fid_lookup(dir_dentry);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		dfid = NULL;
-		goto error;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-	mode = omode;
-	/* Update mode based on ACL value */
-	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
-	if (err) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-			   "Failed to get acl values in mknod %d\n", err);
-		goto error;
-	}
-	name = (char *) dentry->d_name.name;
-
-	err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
-	if (err < 0)
-		goto error;
-
-	/* instantiate inode and assign the unopened fid to the dentry */
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-				err);
-			fid = NULL;
-			goto error;
-		}
-
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-				err);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		fid = NULL;
-	} else {
-		/*
-		 * Not in cached mode. No need to populate inode with stat.
-		 * socket syscall returns a fd, so we need instantiate
-		 */
-		inode = v9fs_get_inode(dir->i_sb, mode);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-		d_instantiate(dentry, inode);
-	}
-	/* Now set the ACL based on the default value */
-	v9fs_set_create_acl(dentry, dacl, pacl);
-error:
-	if (fid)
-		p9_client_clunk(fid);
-	return err;
-}
-
-static int
-v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen)
-{
-	int retval;
-	struct p9_fid *fid;
-	char *target = NULL;
-
-	P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
-	retval = -EPERM;
-	fid = v9fs_fid_lookup(dentry);
-	if (IS_ERR(fid))
-		return PTR_ERR(fid);
-
-	retval = p9_client_readlink(fid, &target);
-	if (retval < 0)
-		return retval;
-
-	strncpy(buffer, target, buflen);
-	P9_DPRINTK(P9_DEBUG_VFS, "%s -> %s\n", dentry->d_name.name, buffer);
-
-	retval = strnlen(buffer, buflen);
-	return retval;
-}
-
-/**
- * v9fs_vfs_follow_link_dotl - follow a symlink path
- * @dentry: dentry for symlink
- * @nd: nameidata
- *
- */
-
-static void *
-v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
-{
-	int len = 0;
-	char *link = __getname();
-
-	P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name);
-
-	if (!link)
-		link = ERR_PTR(-ENOMEM);
-	else {
-		len = v9fs_vfs_readlink_dotl(dentry, link, PATH_MAX);
-		if (len < 0) {
-			__putname(link);
-			link = ERR_PTR(len);
-		} else
-			link[min(len, PATH_MAX-1)] = 0;
-	}
-	nd_set_link(nd, link);
-
-	return NULL;
-}
-
 static const struct inode_operations v9fs_dir_inode_operations_dotu = {
 	.create = v9fs_vfs_create,
 	.lookup = v9fs_vfs_lookup,
@@ -2075,25 +1251,6 @@
 	.setattr = v9fs_vfs_setattr,
 };
 
-static const struct inode_operations v9fs_dir_inode_operations_dotl = {
-	.create = v9fs_vfs_create_dotl,
-	.lookup = v9fs_vfs_lookup,
-	.link = v9fs_vfs_link_dotl,
-	.symlink = v9fs_vfs_symlink_dotl,
-	.unlink = v9fs_vfs_unlink,
-	.mkdir = v9fs_vfs_mkdir_dotl,
-	.rmdir = v9fs_vfs_rmdir,
-	.mknod = v9fs_vfs_mknod_dotl,
-	.rename = v9fs_vfs_rename,
-	.getattr = v9fs_vfs_getattr_dotl,
-	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
-	.listxattr = v9fs_listxattr,
-	.check_acl = v9fs_check_acl,
-};
-
 static const struct inode_operations v9fs_dir_inode_operations = {
 	.create = v9fs_vfs_create,
 	.lookup = v9fs_vfs_lookup,
@@ -2111,16 +1268,6 @@
 	.setattr = v9fs_vfs_setattr,
 };
 
-static const struct inode_operations v9fs_file_inode_operations_dotl = {
-	.getattr = v9fs_vfs_getattr_dotl,
-	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
-	.listxattr = v9fs_listxattr,
-	.check_acl = v9fs_check_acl,
-};
-
 static const struct inode_operations v9fs_symlink_inode_operations = {
 	.readlink = generic_readlink,
 	.follow_link = v9fs_vfs_follow_link,
@@ -2129,14 +1276,3 @@
 	.setattr = v9fs_vfs_setattr,
 };
 
-static const struct inode_operations v9fs_symlink_inode_operations_dotl = {
-	.readlink = v9fs_vfs_readlink_dotl,
-	.follow_link = v9fs_vfs_follow_link_dotl,
-	.put_link = v9fs_vfs_put_link,
-	.getattr = v9fs_vfs_getattr_dotl,
-	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
-	.listxattr = v9fs_listxattr,
-};
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
new file mode 100644
index 0000000..fe3ffa9
--- /dev/null
+++ b/fs/9p/vfs_inode_dotl.c
@@ -0,0 +1,824 @@
+/*
+ *  linux/fs/9p/vfs_inode_dotl.c
+ *
+ * This file contains vfs inode ops for the 9P2000.L protocol.
+ *
+ *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
+ *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/inet.h>
+#include <linux/namei.h>
+#include <linux/idr.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+
+#include "v9fs.h"
+#include "v9fs_vfs.h"
+#include "fid.h"
+#include "cache.h"
+#include "xattr.h"
+#include "acl.h"
+
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+		    dev_t rdev);
+
+/**
+ * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
+ * new file system object. This checks the S_ISGID to determine the owning
+ * group of the new file system object.
+ */
+
+static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
+{
+	BUG_ON(dir_inode == NULL);
+
+	if (dir_inode->i_mode & S_ISGID) {
+		/* set_gid bit is set.*/
+		return dir_inode->i_gid;
+	}
+	return current_fsgid();
+}
+
+/**
+ * v9fs_dentry_from_dir_inode - helper function to get the dentry from
+ * dir inode.
+ *
+ */
+
+static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
+{
+	struct dentry *dentry;
+
+	spin_lock(&inode->i_lock);
+	/* Directory should have only one entry. */
+	BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
+	dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
+	spin_unlock(&inode->i_lock);
+	return dentry;
+}
+
+struct inode *
+v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+	struct super_block *sb)
+{
+	struct inode *ret = NULL;
+	int err;
+	struct p9_stat_dotl *st;
+
+	st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+	if (IS_ERR(st))
+		return ERR_CAST(st);
+
+	ret = v9fs_get_inode(sb, st->st_mode);
+	if (IS_ERR(ret)) {
+		err = PTR_ERR(ret);
+		goto error;
+	}
+
+	v9fs_stat2inode_dotl(st, ret);
+	ret->i_ino = v9fs_qid2ino(&st->qid);
+#ifdef CONFIG_9P_FSCACHE
+	v9fs_vcookie_set_qid(ret, &st->qid);
+	v9fs_cache_inode_get_cookie(ret);
+#endif
+	err = v9fs_get_acl(ret, fid);
+	if (err) {
+		iput(ret);
+		goto error;
+	}
+	kfree(st);
+	return ret;
+error:
+	kfree(st);
+	return ERR_PTR(err);
+}
+
+/**
+ * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
+ * @dir: directory inode that is being created
+ * @dentry:  dentry that is being deleted
+ * @mode: create permissions
+ * @nd: path information
+ *
+ */
+
+static int
+v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
+		struct nameidata *nd)
+{
+	int err = 0;
+	char *name = NULL;
+	gid_t gid;
+	int flags;
+	mode_t mode;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid = NULL;
+	struct p9_fid *dfid, *ofid;
+	struct file *filp;
+	struct p9_qid qid;
+	struct inode *inode;
+	struct posix_acl *pacl = NULL, *dacl = NULL;
+
+	v9ses = v9fs_inode2v9ses(dir);
+	if (nd && nd->flags & LOOKUP_OPEN)
+		flags = nd->intent.open.flags - 1;
+	else {
+		/*
+		 * create call without LOOKUP_OPEN is due
+		 * to mknod of regular files. So use mknod
+		 * operation.
+		 */
+		return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
+	}
+
+	name = (char *) dentry->d_name.name;
+	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
+			"mode:0x%x\n", name, flags, omode);
+
+	dfid = v9fs_fid_lookup(dentry->d_parent);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		return err;
+	}
+
+	/* clone a fid to use for creation */
+	ofid = p9_client_walk(dfid, 0, NULL, 1);
+	if (IS_ERR(ofid)) {
+		err = PTR_ERR(ofid);
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+		return err;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+
+	mode = omode;
+	/* Update mode based on ACL value */
+	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+			   "Failed to get acl values in creat %d\n", err);
+		goto error;
+	}
+	err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+				"p9_client_open_dotl failed in creat %d\n",
+				err);
+		goto error;
+	}
+
+	/* instantiate inode and assign the unopened fid to the dentry */
+	fid = p9_client_walk(dfid, 1, &name, 1);
+	if (IS_ERR(fid)) {
+		err = PTR_ERR(fid);
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+		fid = NULL;
+		goto error;
+	}
+	inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
+		goto error;
+	}
+	d_instantiate(dentry, inode);
+	err = v9fs_fid_add(dentry, fid);
+	if (err < 0)
+		goto error;
+
+	/* Now set the ACL based on the default value */
+	v9fs_set_create_acl(dentry, dacl, pacl);
+
+	/* Since we are opening a file, assign the open fid to the file */
+	filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
+	if (IS_ERR(filp)) {
+		p9_client_clunk(ofid);
+		return PTR_ERR(filp);
+	}
+	filp->private_data = ofid;
+	return 0;
+
+error:
+	if (ofid)
+		p9_client_clunk(ofid);
+	if (fid)
+		p9_client_clunk(fid);
+	return err;
+}
+
+/**
+ * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
+ * @dir:  inode that is being unlinked
+ * @dentry: dentry that is being unlinked
+ * @mode: mode for new directory
+ *
+ */
+
+static int v9fs_vfs_mkdir_dotl(struct inode *dir,
+			       struct dentry *dentry, int omode)
+{
+	int err;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid = NULL, *dfid = NULL;
+	gid_t gid;
+	char *name;
+	mode_t mode;
+	struct inode *inode;
+	struct p9_qid qid;
+	struct dentry *dir_dentry;
+	struct posix_acl *dacl = NULL, *pacl = NULL;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+	err = 0;
+	v9ses = v9fs_inode2v9ses(dir);
+
+	omode |= S_IFDIR;
+	if (dir->i_mode & S_ISGID)
+		omode |= S_ISGID;
+
+	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dfid = v9fs_fid_lookup(dir_dentry);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		dfid = NULL;
+		goto error;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+	mode = omode;
+	/* Update mode based on ACL value */
+	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+			   "Failed to get acl values in mkdir %d\n", err);
+		goto error;
+	}
+	name = (char *) dentry->d_name.name;
+	err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
+	if (err < 0)
+		goto error;
+
+	/* instantiate inode and assign the unopened fid to the dentry */
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+		fid = p9_client_walk(dfid, 1, &name, 1);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+				err);
+			fid = NULL;
+			goto error;
+		}
+
+		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+				err);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+		err = v9fs_fid_add(dentry, fid);
+		if (err < 0)
+			goto error;
+		fid = NULL;
+	} else {
+		/*
+		 * Not in cached mode. No need to populate
+		 * inode with stat. We need to get an inode
+		 * so that we can set the acl with dentry
+		 */
+		inode = v9fs_get_inode(dir->i_sb, mode);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+	}
+	/* Now set the ACL based on the default value */
+	v9fs_set_create_acl(dentry, dacl, pacl);
+
+error:
+	if (fid)
+		p9_client_clunk(fid);
+	return err;
+}
+
+static int
+v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
+		 struct kstat *stat)
+{
+	int err;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid;
+	struct p9_stat_dotl *st;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
+	err = -EPERM;
+	v9ses = v9fs_inode2v9ses(dentry->d_inode);
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
+		return simple_getattr(mnt, dentry, stat);
+
+	fid = v9fs_fid_lookup(dentry);
+	if (IS_ERR(fid))
+		return PTR_ERR(fid);
+
+	/* Ask for all the fields in stat structure. Server will return
+	 * whatever it supports
+	 */
+
+	st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
+	if (IS_ERR(st))
+		return PTR_ERR(st);
+
+	v9fs_stat2inode_dotl(st, dentry->d_inode);
+	generic_fillattr(dentry->d_inode, stat);
+	/* Change block size to what the server returned */
+	stat->blksize = st->st_blksize;
+
+	kfree(st);
+	return 0;
+}
+
+/**
+ * v9fs_vfs_setattr_dotl - set file metadata
+ * @dentry: file whose metadata to set
+ * @iattr: metadata assignment structure
+ *
+ */
+
+int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
+{
+	int retval;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid;
+	struct p9_iattr_dotl p9attr;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "\n");
+
+	retval = inode_change_ok(dentry->d_inode, iattr);
+	if (retval)
+		return retval;
+
+	p9attr.valid = iattr->ia_valid;
+	p9attr.mode = iattr->ia_mode;
+	p9attr.uid = iattr->ia_uid;
+	p9attr.gid = iattr->ia_gid;
+	p9attr.size = iattr->ia_size;
+	p9attr.atime_sec = iattr->ia_atime.tv_sec;
+	p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
+	p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
+	p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
+
+	retval = -EPERM;
+	v9ses = v9fs_inode2v9ses(dentry->d_inode);
+	fid = v9fs_fid_lookup(dentry);
+	if (IS_ERR(fid))
+		return PTR_ERR(fid);
+
+	retval = p9_client_setattr(fid, &p9attr);
+	if (retval < 0)
+		return retval;
+
+	if ((iattr->ia_valid & ATTR_SIZE) &&
+	    iattr->ia_size != i_size_read(dentry->d_inode)) {
+		retval = vmtruncate(dentry->d_inode, iattr->ia_size);
+		if (retval)
+			return retval;
+	}
+
+	setattr_copy(dentry->d_inode, iattr);
+	mark_inode_dirty(dentry->d_inode);
+	if (iattr->ia_valid & ATTR_MODE) {
+		/* We also want to update ACL when we update mode bits */
+		retval = v9fs_acl_chmod(dentry);
+		if (retval < 0)
+			return retval;
+	}
+	return 0;
+}
+
+/**
+ * v9fs_stat2inode_dotl - populate an inode structure with stat info
+ * @stat: stat structure
+ * @inode: inode to populate
+ * @sb: superblock of filesystem
+ *
+ */
+
+void
+v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
+{
+
+	if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
+		inode->i_atime.tv_sec = stat->st_atime_sec;
+		inode->i_atime.tv_nsec = stat->st_atime_nsec;
+		inode->i_mtime.tv_sec = stat->st_mtime_sec;
+		inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+		inode->i_ctime.tv_sec = stat->st_ctime_sec;
+		inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+		inode->i_uid = stat->st_uid;
+		inode->i_gid = stat->st_gid;
+		inode->i_nlink = stat->st_nlink;
+		inode->i_mode = stat->st_mode;
+		inode->i_rdev = new_decode_dev(stat->st_rdev);
+
+		if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
+			init_special_inode(inode, inode->i_mode, inode->i_rdev);
+
+		i_size_write(inode, stat->st_size);
+		inode->i_blocks = stat->st_blocks;
+	} else {
+		if (stat->st_result_mask & P9_STATS_ATIME) {
+			inode->i_atime.tv_sec = stat->st_atime_sec;
+			inode->i_atime.tv_nsec = stat->st_atime_nsec;
+		}
+		if (stat->st_result_mask & P9_STATS_MTIME) {
+			inode->i_mtime.tv_sec = stat->st_mtime_sec;
+			inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+		}
+		if (stat->st_result_mask & P9_STATS_CTIME) {
+			inode->i_ctime.tv_sec = stat->st_ctime_sec;
+			inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+		}
+		if (stat->st_result_mask & P9_STATS_UID)
+			inode->i_uid = stat->st_uid;
+		if (stat->st_result_mask & P9_STATS_GID)
+			inode->i_gid = stat->st_gid;
+		if (stat->st_result_mask & P9_STATS_NLINK)
+			inode->i_nlink = stat->st_nlink;
+		if (stat->st_result_mask & P9_STATS_MODE) {
+			inode->i_mode = stat->st_mode;
+			if ((S_ISBLK(inode->i_mode)) ||
+						(S_ISCHR(inode->i_mode)))
+				init_special_inode(inode, inode->i_mode,
+								inode->i_rdev);
+		}
+		if (stat->st_result_mask & P9_STATS_RDEV)
+			inode->i_rdev = new_decode_dev(stat->st_rdev);
+		if (stat->st_result_mask & P9_STATS_SIZE)
+			i_size_write(inode, stat->st_size);
+		if (stat->st_result_mask & P9_STATS_BLOCKS)
+			inode->i_blocks = stat->st_blocks;
+	}
+	if (stat->st_result_mask & P9_STATS_GEN)
+			inode->i_generation = stat->st_gen;
+
+	/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
+	 * because the inode structure does not have fields for them.
+	 */
+}
+
+static int
+v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
+		const char *symname)
+{
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *dfid;
+	struct p9_fid *fid = NULL;
+	struct inode *inode;
+	struct p9_qid qid;
+	char *name;
+	int err;
+	gid_t gid;
+
+	name = (char *) dentry->d_name.name;
+	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
+			dir->i_ino, name, symname);
+	v9ses = v9fs_inode2v9ses(dir);
+
+	dfid = v9fs_fid_lookup(dentry->d_parent);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		return err;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+
+	/* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
+	err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
+
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
+		goto error;
+	}
+
+	if (v9ses->cache) {
+		/* Now walk from the parent so we can get an unopened fid. */
+		fid = p9_client_walk(dfid, 1, &name, 1);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+					err);
+			fid = NULL;
+			goto error;
+		}
+
+		/* instantiate inode and assign the unopened fid to dentry */
+		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+					err);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+		err = v9fs_fid_add(dentry, fid);
+		if (err < 0)
+			goto error;
+		fid = NULL;
+	} else {
+		/* Not in cached mode. No need to populate inode with stat */
+		inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+	}
+
+error:
+	if (fid)
+		p9_client_clunk(fid);
+
+	return err;
+}
+
+/**
+ * v9fs_vfs_link_dotl - create a hardlink for dotl
+ * @old_dentry: dentry for file to link to
+ * @dir: inode destination for new link
+ * @dentry: dentry for link
+ *
+ */
+
+static int
+v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
+		struct dentry *dentry)
+{
+	int err;
+	struct p9_fid *dfid, *oldfid;
+	char *name;
+	struct v9fs_session_info *v9ses;
+	struct dentry *dir_dentry;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
+			dir->i_ino, old_dentry->d_name.name,
+			dentry->d_name.name);
+
+	v9ses = v9fs_inode2v9ses(dir);
+	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dfid = v9fs_fid_lookup(dir_dentry);
+	if (IS_ERR(dfid))
+		return PTR_ERR(dfid);
+
+	oldfid = v9fs_fid_lookup(old_dentry);
+	if (IS_ERR(oldfid))
+		return PTR_ERR(oldfid);
+
+	name = (char *) dentry->d_name.name;
+
+	err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
+
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
+		return err;
+	}
+
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+		/* Get the latest stat info from server. */
+		struct p9_fid *fid;
+		struct p9_stat_dotl *st;
+
+		fid = v9fs_fid_lookup(old_dentry);
+		if (IS_ERR(fid))
+			return PTR_ERR(fid);
+
+		st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+		if (IS_ERR(st))
+			return PTR_ERR(st);
+
+		v9fs_stat2inode_dotl(st, old_dentry->d_inode);
+
+		kfree(st);
+	} else {
+		/* Caching disabled. No need to get upto date stat info.
+		 * This dentry will be released immediately. So, just hold the
+		 * inode
+		 */
+		ihold(old_dentry->d_inode);
+	}
+	d_instantiate(dentry, old_dentry->d_inode);
+
+	return err;
+}
+
+/**
+ * v9fs_vfs_mknod_dotl - create a special file
+ * @dir: inode destination for new link
+ * @dentry: dentry for file
+ * @mode: mode for creation
+ * @rdev: device associated with special file
+ *
+ */
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+		dev_t rdev)
+{
+	int err;
+	char *name;
+	mode_t mode;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid = NULL, *dfid = NULL;
+	struct inode *inode;
+	gid_t gid;
+	struct p9_qid qid;
+	struct dentry *dir_dentry;
+	struct posix_acl *dacl = NULL, *pacl = NULL;
+
+	P9_DPRINTK(P9_DEBUG_VFS,
+		" %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
+		dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
+
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+
+	v9ses = v9fs_inode2v9ses(dir);
+	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dfid = v9fs_fid_lookup(dir_dentry);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		dfid = NULL;
+		goto error;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+	mode = omode;
+	/* Update mode based on ACL value */
+	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+			   "Failed to get acl values in mknod %d\n", err);
+		goto error;
+	}
+	name = (char *) dentry->d_name.name;
+
+	err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
+	if (err < 0)
+		goto error;
+
+	/* instantiate inode and assign the unopened fid to the dentry */
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+		fid = p9_client_walk(dfid, 1, &name, 1);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+				err);
+			fid = NULL;
+			goto error;
+		}
+
+		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+				err);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+		err = v9fs_fid_add(dentry, fid);
+		if (err < 0)
+			goto error;
+		fid = NULL;
+	} else {
+		/*
+		 * Not in cached mode. No need to populate inode with stat.
+		 * socket syscall returns a fd, so we need instantiate
+		 */
+		inode = v9fs_get_inode(dir->i_sb, mode);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+	}
+	/* Now set the ACL based on the default value */
+	v9fs_set_create_acl(dentry, dacl, pacl);
+error:
+	if (fid)
+		p9_client_clunk(fid);
+	return err;
+}
+
+/**
+ * v9fs_vfs_follow_link_dotl - follow a symlink path
+ * @dentry: dentry for symlink
+ * @nd: nameidata
+ *
+ */
+
+static void *
+v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
+{
+	int retval;
+	struct p9_fid *fid;
+	char *link = __getname();
+	char *target;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
+
+	if (!link) {
+		link = ERR_PTR(-ENOMEM);
+		goto ndset;
+	}
+	fid = v9fs_fid_lookup(dentry);
+	if (IS_ERR(fid)) {
+		__putname(link);
+		link = ERR_PTR(PTR_ERR(fid));
+		goto ndset;
+	}
+	retval = p9_client_readlink(fid, &target);
+	if (!retval) {
+		strcpy(link, target);
+		kfree(target);
+		goto ndset;
+	}
+	__putname(link);
+	link = ERR_PTR(retval);
+ndset:
+	nd_set_link(nd, link);
+	return NULL;
+}
+
+const struct inode_operations v9fs_dir_inode_operations_dotl = {
+	.create = v9fs_vfs_create_dotl,
+	.lookup = v9fs_vfs_lookup,
+	.link = v9fs_vfs_link_dotl,
+	.symlink = v9fs_vfs_symlink_dotl,
+	.unlink = v9fs_vfs_unlink,
+	.mkdir = v9fs_vfs_mkdir_dotl,
+	.rmdir = v9fs_vfs_rmdir,
+	.mknod = v9fs_vfs_mknod_dotl,
+	.rename = v9fs_vfs_rename,
+	.getattr = v9fs_vfs_getattr_dotl,
+	.setattr = v9fs_vfs_setattr_dotl,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
+	.removexattr = generic_removexattr,
+	.listxattr = v9fs_listxattr,
+	.check_acl = v9fs_check_acl,
+};
+
+const struct inode_operations v9fs_file_inode_operations_dotl = {
+	.getattr = v9fs_vfs_getattr_dotl,
+	.setattr = v9fs_vfs_setattr_dotl,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
+	.removexattr = generic_removexattr,
+	.listxattr = v9fs_listxattr,
+	.check_acl = v9fs_check_acl,
+};
+
+const struct inode_operations v9fs_symlink_inode_operations_dotl = {
+	.readlink = generic_readlink,
+	.follow_link = v9fs_vfs_follow_link_dotl,
+	.put_link = v9fs_vfs_put_link,
+	.getattr = v9fs_vfs_getattr_dotl,
+	.setattr = v9fs_vfs_setattr_dotl,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
+	.removexattr = generic_removexattr,
+	.listxattr = v9fs_listxattr,
+};
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index c55c614..dbaabe3 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -141,6 +141,11 @@
 	}
 	v9fs_fill_super(sb, v9ses, flags, data);
 
+	if (v9ses->cache)
+		sb->s_d_op = &v9fs_cached_dentry_operations;
+	else
+		sb->s_d_op = &v9fs_dentry_operations;
+
 	inode = v9fs_get_inode(sb, S_IFDIR | mode);
 	if (IS_ERR(inode)) {
 		retval = PTR_ERR(inode);
@@ -217,9 +222,6 @@
 
 	P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
 
-	if (s->s_root)
-		v9fs_dentry_release(s->s_root);	/* clunk root */
-
 	kill_anon_super(s);
 
 	v9fs_session_cancel(v9ses);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 43ec7df..d288773 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -133,7 +133,7 @@
 			"p9_client_xattrcreate failed %d\n", retval);
 		goto error;
 	}
-	msize = fid->clnt->msize;;
+	msize = fid->clnt->msize;
 	while (value_len) {
 		if (value_len > (msize - P9_IOHDRSZ))
 			write_count = msize - P9_IOHDRSZ;
diff --git a/fs/Kconfig b/fs/Kconfig
index 771f457..3db9caa 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -30,15 +30,6 @@
 source "fs/reiserfs/Kconfig"
 source "fs/jfs/Kconfig"
 
-config FS_POSIX_ACL
-# Posix ACL utility routines (for now, only ext2/ext3/jfs/reiserfs/nfs4)
-#
-# NOTE: you can implement Posix ACLs without these helpers (XFS does).
-# 	Never use this symbol for ifdefs.
-#
-	bool
-	default n
-
 source "fs/xfs/Kconfig"
 source "fs/gfs2/Kconfig"
 source "fs/ocfs2/Kconfig"
@@ -47,11 +38,19 @@
 
 endif # BLOCK
 
+# Posix ACL utility routines
+#
+# Note: Posix ACLs can be implemented without these helpers.  Never use
+# this symbol for ifdefs in core code.
+#
+config FS_POSIX_ACL
+	def_bool n
+
 config EXPORTFS
 	tristate
 
 config FILE_LOCKING
-	bool "Enable POSIX file locking API" if EMBEDDED
+	bool "Enable POSIX file locking API" if EXPERT
 	default y
 	help
 	  This option enables standard file locking support, required
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index bf7693c..3b4a764 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -276,7 +276,6 @@
 	struct object_info obj;
 	int error;
 
-	d_set_d_op(dentry, &adfs_dentry_operations);
 	lock_kernel();
 	error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj);
 	if (error == 0) {
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index a4041b5..2d79540 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -473,6 +473,7 @@
 		asb->s_namelen = ADFS_F_NAME_LEN;
 	}
 
+	sb->s_d_op = &adfs_dentry_operations;
 	root = adfs_iget(sb, &root_obj);
 	sb->s_root = d_alloc_root(root);
 	if (!sb->s_root) {
@@ -483,8 +484,7 @@
 		kfree(asb->s_map);
 		adfs_error(sb, "get root inode failed\n");
 		goto error;
-	} else
-		d_set_d_op(sb->s_root, &adfs_dentry_operations);
+	}
 	unlock_kernel();
 	return 0;
 
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index a8cbdeb..0e95f73 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -201,6 +201,7 @@
 extern const struct address_space_operations	 affs_aops_ofs;
 
 extern const struct dentry_operations	 affs_dentry_operations;
+extern const struct dentry_operations	 affs_intl_dentry_operations;
 
 static inline void
 affs_set_blocksize(struct super_block *sb, int size)
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 944a404..e3e9efc 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -32,7 +32,7 @@
 	.d_compare	= affs_compare_dentry,
 };
 
-static const struct dentry_operations affs_intl_dentry_operations = {
+const struct dentry_operations affs_intl_dentry_operations = {
 	.d_hash		= affs_intl_hash_dentry,
 	.d_compare	= affs_intl_compare_dentry,
 };
@@ -240,7 +240,6 @@
 		if (IS_ERR(inode))
 			return ERR_CAST(inode);
 	}
-	d_set_d_op(dentry, AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations);
 	d_add(dentry, inode);
 	return NULL;
 }
diff --git a/fs/affs/super.c b/fs/affs/super.c
index d39081b..b31507d 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -477,12 +477,16 @@
 		goto out_error_noinode;
 	}
 
+	if (AFFS_SB(sb)->s_flags & SF_INTL)
+		sb->s_d_op = &affs_intl_dentry_operations;
+	else
+		sb->s_d_op = &affs_dentry_operations;
+
 	sb->s_root = d_alloc_root(root_inode);
 	if (!sb->s_root) {
 		printk(KERN_ERR "AFFS: Get root inode failed\n");
 		goto out_error;
 	}
-	d_set_d_op(sb->s_root, &affs_dentry_operations);
 
 	pr_debug("AFFS: s_flags=%lX\n",sb->s_flags);
 	return 0;
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index a3bcec7..1c8c6cc 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -289,7 +289,7 @@
 	call->server = server;
 
 	INIT_WORK(&call->work, SRXAFSCB_CallBack);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -336,7 +336,7 @@
 	call->server = server;
 
 	INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -367,7 +367,7 @@
 	call->server = server;
 
 	INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -400,7 +400,7 @@
 	call->state = AFS_CALL_REPLYING;
 
 	INIT_WORK(&call->work, SRXAFSCB_Probe);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -496,7 +496,7 @@
 	call->state = AFS_CALL_REPLYING;
 
 	INIT_WORK(&call->work, SRXAFSCB_ProbeUuid);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -580,6 +580,6 @@
 	call->state = AFS_CALL_REPLYING;
 
 	INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 34a3263..20c106f 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -62,10 +62,11 @@
 	.setattr	= afs_setattr,
 };
 
-static const struct dentry_operations afs_fs_dentry_operations = {
+const struct dentry_operations afs_fs_dentry_operations = {
 	.d_revalidate	= afs_d_revalidate,
 	.d_delete	= afs_d_delete,
 	.d_release	= afs_d_release,
+	.d_automount	= afs_d_automount,
 };
 
 #define AFS_DIR_HASHTBL_SIZE	128
@@ -582,8 +583,6 @@
 	}
 
 success:
-	d_set_d_op(dentry, &afs_fs_dentry_operations);
-
 	d_add(dentry, inode);
 	_leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }",
 	       fid.vnode,
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 0747339..db66c52 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -184,7 +184,8 @@
 	inode->i_generation	= 0;
 
 	set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
-	inode->i_flags |= S_NOATIME;
+	set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+	inode->i_flags |= S_AUTOMOUNT | S_NOATIME;
 	unlock_new_inode(inode);
 	_leave(" = %p", inode);
 	return inode;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 6d4bc1c..5a9b684 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -486,6 +486,7 @@
  * dir.c
  */
 extern const struct inode_operations afs_dir_inode_operations;
+extern const struct dentry_operations afs_fs_dentry_operations;
 extern const struct file_operations afs_dir_file_operations;
 
 /*
@@ -576,6 +577,7 @@
 /*
  * main.c
  */
+extern struct workqueue_struct *afs_wq;
 extern struct afs_uuid afs_uuid;
 
 /*
@@ -590,6 +592,7 @@
 extern const struct inode_operations afs_autocell_inode_operations;
 extern const struct file_operations afs_mntpt_file_operations;
 
+extern struct vfsmount *afs_d_automount(struct path *);
 extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
 extern void afs_mntpt_kill_timer(void);
 
diff --git a/fs/afs/main.c b/fs/afs/main.c
index cfd1cbe..42dd2e4 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -30,6 +30,7 @@
 MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
 
 struct afs_uuid afs_uuid;
+struct workqueue_struct *afs_wq;
 
 /*
  * get a client UUID
@@ -87,10 +88,16 @@
 	if (ret < 0)
 		return ret;
 
+	/* create workqueue */
+	ret = -ENOMEM;
+	afs_wq = alloc_workqueue("afs", 0, 0);
+	if (!afs_wq)
+		return ret;
+
 	/* register the /proc stuff */
 	ret = afs_proc_init();
 	if (ret < 0)
-		return ret;
+		goto error_proc;
 
 #ifdef CONFIG_AFS_FSCACHE
 	/* we want to be able to cache */
@@ -140,6 +147,8 @@
 error_cache:
 #endif
 	afs_proc_cleanup();
+error_proc:
+	destroy_workqueue(afs_wq);
 	rcu_barrier();
 	printk(KERN_ERR "kAFS: failed to register: %d\n", ret);
 	return ret;
@@ -163,7 +172,7 @@
 	afs_purge_servers();
 	afs_callback_update_kill();
 	afs_vlocation_purge();
-	flush_scheduled_work();
+	destroy_workqueue(afs_wq);
 	afs_cell_purge();
 #ifdef CONFIG_AFS_FSCACHE
 	fscache_unregister_netfs(&afs_cache_netfs);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 6153417..aa59184 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -24,7 +24,6 @@
 				       struct dentry *dentry,
 				       struct nameidata *nd);
 static int afs_mntpt_open(struct inode *inode, struct file *file);
-static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd);
 static void afs_mntpt_expiry_timed_out(struct work_struct *work);
 
 const struct file_operations afs_mntpt_file_operations = {
@@ -34,13 +33,11 @@
 
 const struct inode_operations afs_mntpt_inode_operations = {
 	.lookup		= afs_mntpt_lookup,
-	.follow_link	= afs_mntpt_follow_link,
 	.readlink	= page_readlink,
 	.getattr	= afs_getattr,
 };
 
 const struct inode_operations afs_autocell_inode_operations = {
-	.follow_link	= afs_mntpt_follow_link,
 	.getattr	= afs_getattr,
 };
 
@@ -88,6 +85,7 @@
 		_debug("symlink is a mountpoint");
 		spin_lock(&vnode->lock);
 		set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+		vnode->vfs_inode.i_flags |= S_AUTOMOUNT;
 		spin_unlock(&vnode->lock);
 	}
 
@@ -238,52 +236,24 @@
 }
 
 /*
- * follow a link from a mountpoint directory, thus causing it to be mounted
+ * handle an automount point
  */
-static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
+struct vfsmount *afs_d_automount(struct path *path)
 {
 	struct vfsmount *newmnt;
-	int err;
 
-	_enter("%p{%s},{%s:%p{%s},}",
-	       dentry,
-	       dentry->d_name.name,
-	       nd->path.mnt->mnt_devname,
-	       dentry,
-	       nd->path.dentry->d_name.name);
+	_enter("{%s,%s}", path->mnt->mnt_devname, path->dentry->d_name.name);
 
-	dput(nd->path.dentry);
-	nd->path.dentry = dget(dentry);
+	newmnt = afs_mntpt_do_automount(path->dentry);
+	if (IS_ERR(newmnt))
+		return newmnt;
 
-	newmnt = afs_mntpt_do_automount(nd->path.dentry);
-	if (IS_ERR(newmnt)) {
-		path_put(&nd->path);
-		return (void *)newmnt;
-	}
-
-	mntget(newmnt);
-	err = do_add_mount(newmnt, &nd->path, MNT_SHRINKABLE, &afs_vfsmounts);
-	switch (err) {
-	case 0:
-		path_put(&nd->path);
-		nd->path.mnt = newmnt;
-		nd->path.dentry = dget(newmnt->mnt_root);
-		schedule_delayed_work(&afs_mntpt_expiry_timer,
-				      afs_mntpt_expiry_timeout * HZ);
-		break;
-	case -EBUSY:
-		/* someone else made a mount here whilst we were busy */
-		while (d_mountpoint(nd->path.dentry) &&
-		       follow_down(&nd->path))
-			;
-		err = 0;
-	default:
-		mntput(newmnt);
-		break;
-	}
-
-	_leave(" = %d", err);
-	return ERR_PTR(err);
+	mntget(newmnt); /* prevent immediate expiration */
+	mnt_set_expiry(newmnt, &afs_vfsmounts);
+	queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
+			   afs_mntpt_expiry_timeout * HZ);
+	_leave(" = %p {%s}", newmnt, newmnt->mnt_devname);
+	return newmnt;
 }
 
 /*
@@ -295,8 +265,8 @@
 
 	if (!list_empty(&afs_vfsmounts)) {
 		mark_mounts_for_expiry(&afs_vfsmounts);
-		schedule_delayed_work(&afs_mntpt_expiry_timer,
-				      afs_mntpt_expiry_timeout * HZ);
+		queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
+				   afs_mntpt_expiry_timeout * HZ);
 	}
 
 	_leave("");
@@ -310,6 +280,5 @@
 	_enter("");
 
 	ASSERT(list_empty(&afs_vfsmounts));
-	cancel_delayed_work(&afs_mntpt_expiry_timer);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&afs_mntpt_expiry_timer);
 }
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 654d8fd..e45a323 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -410,7 +410,7 @@
 	if (!call) {
 		/* its an incoming call for our callback service */
 		skb_queue_tail(&afs_incoming_calls, skb);
-		schedule_work(&afs_collect_incoming_call_work);
+		queue_work(afs_wq, &afs_collect_incoming_call_work);
 	} else {
 		/* route the messages directly to the appropriate call */
 		skb_queue_tail(&call->rx_queue, skb);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 9fdc7fe..d59b751 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -238,8 +238,8 @@
 	if (atomic_read(&server->usage) == 0) {
 		list_move_tail(&server->grave, &afs_server_graveyard);
 		server->time_of_death = get_seconds();
-		schedule_delayed_work(&afs_server_reaper,
-				      afs_server_timeout * HZ);
+		queue_delayed_work(afs_wq, &afs_server_reaper,
+				   afs_server_timeout * HZ);
 	}
 	spin_unlock(&afs_server_graveyard_lock);
 	_leave(" [dead]");
@@ -285,10 +285,11 @@
 		expiry = server->time_of_death + afs_server_timeout;
 		if (expiry > now) {
 			delay = (expiry - now) * HZ;
-			if (!schedule_delayed_work(&afs_server_reaper, delay)) {
+			if (!queue_delayed_work(afs_wq, &afs_server_reaper,
+						delay)) {
 				cancel_delayed_work(&afs_server_reaper);
-				schedule_delayed_work(&afs_server_reaper,
-						      delay);
+				queue_delayed_work(afs_wq, &afs_server_reaper,
+						   delay);
 			}
 			break;
 		}
@@ -323,5 +324,5 @@
 {
 	afs_server_timeout = 0;
 	cancel_delayed_work(&afs_server_reaper);
-	schedule_delayed_work(&afs_server_reaper, 0);
+	queue_delayed_work(afs_wq, &afs_server_reaper, 0);
 }
diff --git a/fs/afs/super.c b/fs/afs/super.c
index f901a9d..fb240e8 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -336,6 +336,7 @@
 	if (!root)
 		goto error;
 
+	sb->s_d_op = &afs_fs_dentry_operations;
 	sb->s_root = root;
 
 	_leave(" = 0");
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 9ac260d..431984d 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -507,8 +507,8 @@
 		_debug("buried");
 		list_move_tail(&vl->grave, &afs_vlocation_graveyard);
 		vl->time_of_death = get_seconds();
-		schedule_delayed_work(&afs_vlocation_reap,
-				      afs_vlocation_timeout * HZ);
+		queue_delayed_work(afs_wq, &afs_vlocation_reap,
+				   afs_vlocation_timeout * HZ);
 
 		/* suspend updates on this record */
 		if (!list_empty(&vl->update)) {
@@ -561,11 +561,11 @@
 		if (expiry > now) {
 			delay = (expiry - now) * HZ;
 			_debug("delay %lu", delay);
-			if (!schedule_delayed_work(&afs_vlocation_reap,
-						   delay)) {
+			if (!queue_delayed_work(afs_wq, &afs_vlocation_reap,
+						delay)) {
 				cancel_delayed_work(&afs_vlocation_reap);
-				schedule_delayed_work(&afs_vlocation_reap,
-						      delay);
+				queue_delayed_work(afs_wq, &afs_vlocation_reap,
+						   delay);
 			}
 			break;
 		}
@@ -620,7 +620,7 @@
 	destroy_workqueue(afs_vlocation_update_worker);
 
 	cancel_delayed_work(&afs_vlocation_reap);
-	schedule_delayed_work(&afs_vlocation_reap, 0);
+	queue_delayed_work(afs_wq, &afs_vlocation_reap, 0);
 }
 
 /*
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 15690bb..789b3af 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -140,6 +140,7 @@
 	candidate->first = candidate->last = index;
 	candidate->offset_first = from;
 	candidate->to_last = to;
+	INIT_LIST_HEAD(&candidate->link);
 	candidate->usage = 1;
 	candidate->state = AFS_WBACK_PENDING;
 	init_waitqueue_head(&candidate->waitq);
diff --git a/fs/aio.c b/fs/aio.c
index 8c8f6c5..26869cd 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -87,7 +87,7 @@
 
 	aio_wq = create_workqueue("aio");
 	abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
-	BUG_ON(!abe_pool);
+	BUG_ON(!aio_wq || !abe_pool);
 
 	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
 
@@ -239,15 +239,23 @@
 	call_rcu(&ctx->rcu_head, ctx_rcu_free);
 }
 
-#define get_ioctx(kioctx) do {						\
-	BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\
-	atomic_inc(&(kioctx)->users);					\
-} while (0)
-#define put_ioctx(kioctx) do {						\
-	BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\
-	if (unlikely(atomic_dec_and_test(&(kioctx)->users))) 		\
-		__put_ioctx(kioctx);					\
-} while (0)
+static inline void get_ioctx(struct kioctx *kioctx)
+{
+	BUG_ON(atomic_read(&kioctx->users) <= 0);
+	atomic_inc(&kioctx->users);
+}
+
+static inline int try_get_ioctx(struct kioctx *kioctx)
+{
+	return atomic_inc_not_zero(&kioctx->users);
+}
+
+static inline void put_ioctx(struct kioctx *kioctx)
+{
+	BUG_ON(atomic_read(&kioctx->users) <= 0);
+	if (unlikely(atomic_dec_and_test(&kioctx->users)))
+		__put_ioctx(kioctx);
+}
 
 /* ioctx_alloc
  *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
@@ -601,8 +609,13 @@
 	rcu_read_lock();
 
 	hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
-		if (ctx->user_id == ctx_id && !ctx->dead) {
-			get_ioctx(ctx);
+		/*
+		 * RCU protects us against accessing freed memory but
+		 * we have to be careful not to get a reference when the
+		 * reference count already dropped to 0 (ctx->dead test
+		 * is unreliable because of races).
+		 */
+		if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
 			ret = ctx;
 			break;
 		}
@@ -798,29 +811,12 @@
 	queue_delayed_work(aio_wq, &ctx->wq, timeout);
 }
 
-
 /*
- * aio_run_iocbs:
- * 	Process all pending retries queued on the ioctx
- * 	run list.
- * Assumes it is operating within the aio issuer's mm
- * context.
- */
-static inline void aio_run_iocbs(struct kioctx *ctx)
-{
-	int requeue;
-
-	spin_lock_irq(&ctx->ctx_lock);
-
-	requeue = __aio_run_iocbs(ctx);
-	spin_unlock_irq(&ctx->ctx_lock);
-	if (requeue)
-		aio_queue_work(ctx);
-}
-
-/*
- * just like aio_run_iocbs, but keeps running them until
- * the list stays empty
+ * aio_run_all_iocbs:
+ *	Process all pending retries queued on the ioctx
+ *	run list, and keep running them until the list
+ *	stays empty.
+ * Assumes it is operating within the aio issuer's mm context.
  */
 static inline void aio_run_all_iocbs(struct kioctx *ctx)
 {
@@ -1646,6 +1642,23 @@
 		goto out_put_req;
 
 	spin_lock_irq(&ctx->ctx_lock);
+	/*
+	 * We could have raced with io_destroy() and are currently holding a
+	 * reference to ctx which should be destroyed. We cannot submit IO
+	 * since ctx gets freed as soon as io_submit() puts its reference.  The
+	 * check here is reliable: io_destroy() sets ctx->dead before waiting
+	 * for outstanding IO and the barrier between these two is realized by
+	 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock.  Analogously we
+	 * increment ctx->reqs_active before checking for ctx->dead and the
+	 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
+	 * don't see ctx->dead set here, io_destroy() waits for our IO to
+	 * finish.
+	 */
+	if (ctx->dead) {
+		spin_unlock_irq(&ctx->ctx_lock);
+		ret = -EINVAL;
+		goto out_put_req;
+	}
 	aio_run_iocb(req);
 	if (!list_empty(&ctx->run_list)) {
 		/* drain the run list */
@@ -1839,7 +1852,7 @@
 	long ret = -EINVAL;
 
 	if (likely(ioctx)) {
-		if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0))
+		if (likely(min_nr <= nr && min_nr >= 0))
 			ret = read_events(ioctx, min_nr, nr, events, timeout);
 		put_ioctx(ioctx);
 	}
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 5fd38112a..c5567cb 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -26,12 +26,6 @@
 static struct inode *anon_inode_inode;
 static const struct file_operations anon_inode_fops;
 
-static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
-				int flags, const char *dev_name, void *data)
-{
-	return mount_pseudo(fs_type, "anon_inode:", NULL, ANON_INODE_FS_MAGIC);
-}
-
 /*
  * anon_inodefs_dname() is called from d_path().
  */
@@ -41,14 +35,22 @@
 				dentry->d_name.name);
 }
 
+static const struct dentry_operations anon_inodefs_dentry_operations = {
+	.d_dname	= anon_inodefs_dname,
+};
+
+static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
+				int flags, const char *dev_name, void *data)
+{
+	return mount_pseudo(fs_type, "anon_inode:", NULL,
+			&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
+}
+
 static struct file_system_type anon_inode_fs_type = {
 	.name		= "anon_inodefs",
 	.mount		= anon_inodefs_mount,
 	.kill_sb	= kill_anon_super,
 };
-static const struct dentry_operations anon_inodefs_dentry_operations = {
-	.d_dname	= anon_inodefs_dname,
-};
 
 /*
  * nop .set_page_dirty method so that people can use .page_mkwrite on
@@ -64,9 +66,9 @@
 };
 
 /**
- * anon_inode_getfd - creates a new file instance by hooking it up to an
- *                    anonymous inode, and a dentry that describe the "class"
- *                    of the file
+ * anon_inode_getfile - creates a new file instance by hooking it up to an
+ *                      anonymous inode, and a dentry that describe the "class"
+ *                      of the file
  *
  * @name:    [in]    name of the "class" of the new file
  * @fops:    [in]    file operations for the new file
@@ -113,7 +115,6 @@
 	 */
 	ihold(anon_inode_inode);
 
-	d_set_d_op(path.dentry, &anon_inodefs_dentry_operations);
 	d_instantiate(path.dentry, anon_inode_inode);
 
 	error = -ENFILE;
@@ -232,7 +233,7 @@
 	return 0;
 
 err_mntput:
-	mntput_long(anon_inode_mnt);
+	mntput(anon_inode_mnt);
 err_unregister_filesystem:
 	unregister_filesystem(&anon_inode_fs_type);
 err_exit:
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 0fffe1c..54f9237 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -88,18 +88,9 @@
 
 	uid_t uid;
 	gid_t gid;
-
-	mode_t	mode;
-	size_t	size;
-
-	void (*free)(struct autofs_info *);
-	union {
-		const char *symlink;
-	} u;
 };
 
 #define AUTOFS_INF_EXPIRING	(1<<0) /* dentry is in the process of expiring */
-#define AUTOFS_INF_MOUNTPOINT	(1<<1) /* mountpoint status for direct expire */
 #define AUTOFS_INF_PENDING	(1<<2) /* dentry pending mount */
 
 struct autofs_wait_queue {
@@ -176,14 +167,7 @@
 	return 0;
 }
 
-static inline void autofs4_copy_atime(struct file *src, struct file *dst)
-{
-	dst->f_path.dentry->d_inode->i_atime =
-		src->f_path.dentry->d_inode->i_atime;
-	return;
-}
-
-struct inode *autofs4_get_inode(struct super_block *, struct autofs_info *);
+struct inode *autofs4_get_inode(struct super_block *, mode_t);
 void autofs4_free_ino(struct autofs_info *);
 
 /* Expiration */
@@ -212,16 +196,89 @@
 
 extern const struct inode_operations autofs4_symlink_inode_operations;
 extern const struct inode_operations autofs4_dir_inode_operations;
-extern const struct inode_operations autofs4_root_inode_operations;
-extern const struct inode_operations autofs4_indirect_root_inode_operations;
-extern const struct inode_operations autofs4_direct_root_inode_operations;
 extern const struct file_operations autofs4_dir_operations;
 extern const struct file_operations autofs4_root_operations;
+extern const struct dentry_operations autofs4_dentry_operations;
+
+/* VFS automount flags management functions */
+
+static inline void __managed_dentry_set_automount(struct dentry *dentry)
+{
+	dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
+}
+
+static inline void managed_dentry_set_automount(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_set_automount(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_clear_automount(struct dentry *dentry)
+{
+	dentry->d_flags &= ~DCACHE_NEED_AUTOMOUNT;
+}
+
+static inline void managed_dentry_clear_automount(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_clear_automount(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_set_transit(struct dentry *dentry)
+{
+	dentry->d_flags |= DCACHE_MANAGE_TRANSIT;
+}
+
+static inline void managed_dentry_set_transit(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_set_transit(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_clear_transit(struct dentry *dentry)
+{
+	dentry->d_flags &= ~DCACHE_MANAGE_TRANSIT;
+}
+
+static inline void managed_dentry_clear_transit(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_clear_transit(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_set_managed(struct dentry *dentry)
+{
+	dentry->d_flags |= (DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT);
+}
+
+static inline void managed_dentry_set_managed(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_set_managed(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+
+static inline void __managed_dentry_clear_managed(struct dentry *dentry)
+{
+	dentry->d_flags &= ~(DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT);
+}
+
+static inline void managed_dentry_clear_managed(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__managed_dentry_clear_managed(dentry);
+	spin_unlock(&dentry->d_lock);
+}
 
 /* Initializing function */
 
 int autofs4_fill_super(struct super_block *, void *, int);
-struct autofs_info *autofs4_init_ino(struct autofs_info *, struct autofs_sb_info *sbi, mode_t mode);
+struct autofs_info *autofs4_new_ino(struct autofs_sb_info *);
+void autofs4_clean_ino(struct autofs_info *);
 
 /* Queue management functions */
 
@@ -229,19 +286,6 @@
 int autofs4_wait_release(struct autofs_sb_info *,autofs_wqt_t,int);
 void autofs4_catatonic_mode(struct autofs_sb_info *);
 
-static inline int autofs4_follow_mount(struct path *path)
-{
-	int res = 0;
-
-	while (d_mountpoint(path->dentry)) {
-		int followed = follow_down(path);
-		if (!followed)
-			break;
-		res = 1;
-	}
-	return res;
-}
-
 static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi)
 {
 	return new_encode_dev(sbi->sb->s_dev);
@@ -294,5 +338,4 @@
 	return;
 }
 
-void autofs4_dentry_release(struct dentry *);
 extern void autofs4_kill_sb(struct super_block *);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index eff9a41..1442da4 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -551,7 +551,7 @@
 
 		err = have_submounts(path.dentry);
 
-		if (follow_down(&path))
+		if (follow_down_one(&path))
 			magic = path.mnt->mnt_sb->s_magic;
 	}
 
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index cc1d013..f43100b 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -26,10 +26,6 @@
 	if (ino == NULL)
 		return 0;
 
-	/* No point expiring a pending mount */
-	if (ino->flags & AUTOFS_INF_PENDING)
-		return 0;
-
 	if (!do_now) {
 		/* Too young to die */
 		if (!timeout || time_after(ino->last_used + timeout, now))
@@ -56,7 +52,7 @@
 
 	path_get(&path);
 
-	if (!follow_down(&path))
+	if (!follow_down_one(&path))
 		goto done;
 
 	if (is_autofs4_dentry(path.dentry)) {
@@ -100,7 +96,7 @@
 	struct dentry *p, *ret;
 
 	if (prev == NULL)
-		return dget(prev);
+		return dget(root);
 
 	spin_lock(&autofs4_lock);
 relock:
@@ -137,7 +133,7 @@
 	spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
 	/* Negative dentry - try next */
 	if (!simple_positive(ret)) {
-		spin_unlock(&ret->d_lock);
+		spin_unlock(&p->d_lock);
 		p = ret;
 		goto again;
 	}
@@ -283,6 +279,7 @@
 	unsigned long timeout;
 	struct dentry *root = dget(sb->s_root);
 	int do_now = how & AUTOFS_EXP_IMMEDIATE;
+	struct autofs_info *ino;
 
 	if (!root)
 		return NULL;
@@ -291,19 +288,21 @@
 	timeout = sbi->exp_timeout;
 
 	spin_lock(&sbi->fs_lock);
+	ino = autofs4_dentry_ino(root);
+	/* No point expiring a pending mount */
+	if (ino->flags & AUTOFS_INF_PENDING) {
+		spin_unlock(&sbi->fs_lock);
+		return NULL;
+	}
+	managed_dentry_set_transit(root);
 	if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
 		struct autofs_info *ino = autofs4_dentry_ino(root);
-		if (d_mountpoint(root)) {
-			ino->flags |= AUTOFS_INF_MOUNTPOINT;
-			spin_lock(&root->d_lock);
-			root->d_flags &= ~DCACHE_MOUNTED;
-			spin_unlock(&root->d_lock);
-		}
 		ino->flags |= AUTOFS_INF_EXPIRING;
 		init_completion(&ino->expire_complete);
 		spin_unlock(&sbi->fs_lock);
 		return root;
 	}
+	managed_dentry_clear_transit(root);
 	spin_unlock(&sbi->fs_lock);
 	dput(root);
 
@@ -340,6 +339,10 @@
 	while ((dentry = get_next_positive_dentry(dentry, root))) {
 		spin_lock(&sbi->fs_lock);
 		ino = autofs4_dentry_ino(dentry);
+		/* No point expiring a pending mount */
+		if (ino->flags & AUTOFS_INF_PENDING)
+			goto cont;
+		managed_dentry_set_transit(dentry);
 
 		/*
 		 * Case 1: (i) indirect mount or top level pseudo direct mount
@@ -399,6 +402,8 @@
 			}
 		}
 next:
+		managed_dentry_clear_transit(dentry);
+cont:
 		spin_unlock(&sbi->fs_lock);
 	}
 	return NULL;
@@ -479,6 +484,8 @@
 	spin_lock(&sbi->fs_lock);
 	ino = autofs4_dentry_ino(dentry);
 	ino->flags &= ~AUTOFS_INF_EXPIRING;
+	if (!d_unhashed(dentry))
+		managed_dentry_clear_transit(dentry);
 	complete_all(&ino->expire_complete);
 	spin_unlock(&sbi->fs_lock);
 
@@ -504,18 +511,18 @@
 		ret = autofs4_wait(sbi, dentry, NFY_EXPIRE);
 
 		spin_lock(&sbi->fs_lock);
-		if (ino->flags & AUTOFS_INF_MOUNTPOINT) {
-			spin_lock(&sb->s_root->d_lock);
-			/*
-			 * If we haven't been expired away, then reset
-			 * mounted status.
-			 */
-			if (mnt->mnt_parent != mnt)
-				sb->s_root->d_flags |= DCACHE_MOUNTED;
-			spin_unlock(&sb->s_root->d_lock);
-			ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
-		}
 		ino->flags &= ~AUTOFS_INF_EXPIRING;
+		spin_lock(&dentry->d_lock);
+		if (ret)
+			__managed_dentry_clear_transit(dentry);
+		else {
+			if ((IS_ROOT(dentry) ||
+			    (autofs_type_indirect(sbi->type) &&
+			     IS_ROOT(dentry->d_parent))) &&
+			    !(dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+				__managed_dentry_set_automount(dentry);
+		}
+		spin_unlock(&dentry->d_lock);
 		complete_all(&ino->expire_complete);
 		spin_unlock(&sbi->fs_lock);
 		dput(dentry);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index a7bdb9d..180fa24 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -22,77 +22,27 @@
 #include "autofs_i.h"
 #include <linux/module.h>
 
-static void ino_lnkfree(struct autofs_info *ino)
+struct autofs_info *autofs4_new_ino(struct autofs_sb_info *sbi)
 {
-	if (ino->u.symlink) {
-		kfree(ino->u.symlink);
-		ino->u.symlink = NULL;
+	struct autofs_info *ino = kzalloc(sizeof(*ino), GFP_KERNEL);
+	if (ino) {
+		INIT_LIST_HEAD(&ino->active);
+		INIT_LIST_HEAD(&ino->expiring);
+		ino->last_used = jiffies;
+		ino->sbi = sbi;
 	}
+	return ino;
 }
 
-struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
-				     struct autofs_sb_info *sbi, mode_t mode)
+void autofs4_clean_ino(struct autofs_info *ino)
 {
-	int reinit = 1;
-
-	if (ino == NULL) {
-		reinit = 0;
-		ino = kmalloc(sizeof(*ino), GFP_KERNEL);
-	}
-
-	if (ino == NULL)
-		return NULL;
-
-	if (!reinit) {
-		ino->flags = 0;
-		ino->inode = NULL;
-		ino->dentry = NULL;
-		ino->size = 0;
-		INIT_LIST_HEAD(&ino->active);
-		ino->active_count = 0;
-		INIT_LIST_HEAD(&ino->expiring);
-		atomic_set(&ino->count, 0);
-	}
-
 	ino->uid = 0;
 	ino->gid = 0;
-	ino->mode = mode;
 	ino->last_used = jiffies;
-
-	ino->sbi = sbi;
-
-	if (reinit && ino->free)
-		(ino->free)(ino);
-
-	memset(&ino->u, 0, sizeof(ino->u));
-
-	ino->free = NULL;
-
-	if (S_ISLNK(mode))
-		ino->free = ino_lnkfree;
-
-	return ino;
 }
 
 void autofs4_free_ino(struct autofs_info *ino)
 {
-	struct autofs_info *p_ino;
-
-	if (ino->dentry) {
-		ino->dentry->d_fsdata = NULL;
-		if (ino->dentry->d_inode) {
-			struct dentry *parent = ino->dentry->d_parent;
-			if (atomic_dec_and_test(&ino->count)) {
-				p_ino = autofs4_dentry_ino(parent);
-				if (p_ino && parent != ino->dentry)
-					atomic_dec(&p_ino->count);
-			}
-			dput(ino->dentry);
-		}
-		ino->dentry = NULL;
-	}
-	if (ino->free)
-		(ino->free)(ino);
 	kfree(ino);
 }
 
@@ -148,9 +98,16 @@
 	return 0;
 }
 
+static void autofs4_evict_inode(struct inode *inode)
+{
+	end_writeback(inode);
+	kfree(inode->i_private);
+}
+
 static const struct super_operations autofs4_sops = {
 	.statfs		= simple_statfs,
 	.show_options	= autofs4_show_options,
+	.evict_inode	= autofs4_evict_inode,
 };
 
 enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto,
@@ -240,21 +197,6 @@
 	return (*pipefd < 0);
 }
 
-static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi)
-{
-	struct autofs_info *ino;
-
-	ino = autofs4_init_ino(NULL, sbi, S_IFDIR | 0755);
-	if (!ino)
-		return NULL;
-
-	return ino;
-}
-
-static const struct dentry_operations autofs4_sb_dentry_operations = {
-	.d_release      = autofs4_dentry_release,
-};
-
 int autofs4_fill_super(struct super_block *s, void *data, int silent)
 {
 	struct inode * root_inode;
@@ -292,15 +234,16 @@
 	s->s_blocksize_bits = 10;
 	s->s_magic = AUTOFS_SUPER_MAGIC;
 	s->s_op = &autofs4_sops;
+	s->s_d_op = &autofs4_dentry_operations;
 	s->s_time_gran = 1;
 
 	/*
 	 * Get the root inode and dentry, but defer checking for errors.
 	 */
-	ino = autofs4_mkroot(sbi);
+	ino = autofs4_new_ino(sbi);
 	if (!ino)
 		goto fail_free;
-	root_inode = autofs4_get_inode(s, ino);
+	root_inode = autofs4_get_inode(s, S_IFDIR | 0755);
 	if (!root_inode)
 		goto fail_ino;
 
@@ -309,7 +252,6 @@
 		goto fail_iput;
 	pipe = NULL;
 
-	d_set_d_op(root, &autofs4_sb_dentry_operations);
 	root->d_fsdata = ino;
 
 	/* Can this call block? */
@@ -320,10 +262,11 @@
 		goto fail_dput;
 	}
 
+	if (autofs_type_trigger(sbi->type))
+		__managed_dentry_set_managed(root);
+
 	root_inode->i_fop = &autofs4_root_operations;
-	root_inode->i_op = autofs_type_trigger(sbi->type) ?
-			&autofs4_direct_root_inode_operations :
-			&autofs4_indirect_root_inode_operations;
+	root_inode->i_op = &autofs4_dir_inode_operations;
 
 	/* Couldn't this be tested earlier? */
 	if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
@@ -383,16 +326,14 @@
 	return -EINVAL;
 }
 
-struct inode *autofs4_get_inode(struct super_block *sb,
-				struct autofs_info *inf)
+struct inode *autofs4_get_inode(struct super_block *sb, mode_t mode)
 {
 	struct inode *inode = new_inode(sb);
 
 	if (inode == NULL)
 		return NULL;
 
-	inf->inode = inode;
-	inode->i_mode = inf->mode;
+	inode->i_mode = mode;
 	if (sb->s_root) {
 		inode->i_uid = sb->s_root->d_inode->i_uid;
 		inode->i_gid = sb->s_root->d_inode->i_gid;
@@ -400,12 +341,11 @@
 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 	inode->i_ino = get_next_ino();
 
-	if (S_ISDIR(inf->mode)) {
+	if (S_ISDIR(mode)) {
 		inode->i_nlink = 2;
 		inode->i_op = &autofs4_dir_inode_operations;
 		inode->i_fop = &autofs4_dir_operations;
-	} else if (S_ISLNK(inf->mode)) {
-		inode->i_size = inf->size;
+	} else if (S_ISLNK(mode)) {
 		inode->i_op = &autofs4_symlink_inode_operations;
 	}
 
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 651e4ef..014e7ab 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -35,10 +35,9 @@
 #endif
 static int autofs4_dir_open(struct inode *inode, struct file *file);
 static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *);
-static void *autofs4_follow_link(struct dentry *, struct nameidata *);
-
-#define TRIGGER_FLAGS   (LOOKUP_CONTINUE | LOOKUP_DIRECTORY)
-#define TRIGGER_INTENTS (LOOKUP_OPEN | LOOKUP_CREATE)
+static struct vfsmount *autofs4_d_automount(struct path *);
+static int autofs4_d_manage(struct dentry *, bool, bool);
+static void autofs4_dentry_release(struct dentry *);
 
 const struct file_operations autofs4_root_operations = {
 	.open		= dcache_dir_open,
@@ -60,22 +59,6 @@
 	.llseek		= dcache_dir_lseek,
 };
 
-const struct inode_operations autofs4_indirect_root_inode_operations = {
-	.lookup		= autofs4_lookup,
-	.unlink		= autofs4_dir_unlink,
-	.symlink	= autofs4_dir_symlink,
-	.mkdir		= autofs4_dir_mkdir,
-	.rmdir		= autofs4_dir_rmdir,
-};
-
-const struct inode_operations autofs4_direct_root_inode_operations = {
-	.lookup		= autofs4_lookup,
-	.unlink		= autofs4_dir_unlink,
-	.mkdir		= autofs4_dir_mkdir,
-	.rmdir		= autofs4_dir_rmdir,
-	.follow_link	= autofs4_follow_link,
-};
-
 const struct inode_operations autofs4_dir_inode_operations = {
 	.lookup		= autofs4_lookup,
 	.unlink		= autofs4_dir_unlink,
@@ -84,6 +67,12 @@
 	.rmdir		= autofs4_dir_rmdir,
 };
 
+const struct dentry_operations autofs4_dentry_operations = {
+	.d_automount	= autofs4_d_automount,
+	.d_manage	= autofs4_d_manage,
+	.d_release	= autofs4_dentry_release,
+};
+
 static void autofs4_add_active(struct dentry *dentry)
 {
 	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
@@ -116,14 +105,6 @@
 	return;
 }
 
-static unsigned int autofs4_need_mount(unsigned int flags)
-{
-	unsigned int res = 0;
-	if (flags & (TRIGGER_FLAGS | TRIGGER_INTENTS))
-		res = 1;
-	return res;
-}
-
 static int autofs4_dir_open(struct inode *inode, struct file *file)
 {
 	struct dentry *dentry = file->f_path.dentry;
@@ -158,279 +139,28 @@
 	return dcache_dir_open(inode, file);
 }
 
-static int try_to_fill_dentry(struct dentry *dentry, int flags)
+static void autofs4_dentry_release(struct dentry *de)
 {
-	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
-	struct autofs_info *ino = autofs4_dentry_ino(dentry);
-	int status;
-
-	DPRINTK("dentry=%p %.*s ino=%p",
-		 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
-
-	/*
-	 * Wait for a pending mount, triggering one if there
-	 * isn't one already
-	 */
-	if (dentry->d_inode == NULL) {
-		DPRINTK("waiting for mount name=%.*s",
-			 dentry->d_name.len, dentry->d_name.name);
-
-		status = autofs4_wait(sbi, dentry, NFY_MOUNT);
-
-		DPRINTK("mount done status=%d", status);
-
-		/* Turn this into a real negative dentry? */
-		if (status == -ENOENT) {
-			spin_lock(&sbi->fs_lock);
-			ino->flags &= ~AUTOFS_INF_PENDING;
-			spin_unlock(&sbi->fs_lock);
-			return status;
-		} else if (status) {
-			/* Return a negative dentry, but leave it "pending" */
-			return status;
-		}
-	/* Trigger mount for path component or follow link */
-	} else if (ino->flags & AUTOFS_INF_PENDING ||
-			autofs4_need_mount(flags)) {
-		DPRINTK("waiting for mount name=%.*s",
-			dentry->d_name.len, dentry->d_name.name);
-
-		spin_lock(&sbi->fs_lock);
-		ino->flags |= AUTOFS_INF_PENDING;
-		spin_unlock(&sbi->fs_lock);
-		status = autofs4_wait(sbi, dentry, NFY_MOUNT);
-
-		DPRINTK("mount done status=%d", status);
-
-		if (status) {
-			spin_lock(&sbi->fs_lock);
-			ino->flags &= ~AUTOFS_INF_PENDING;
-			spin_unlock(&sbi->fs_lock);
-			return status;
-		}
-	}
-
-	/* Initialize expiry counter after successful mount */
-	ino->last_used = jiffies;
-
-	spin_lock(&sbi->fs_lock);
-	ino->flags &= ~AUTOFS_INF_PENDING;
-	spin_unlock(&sbi->fs_lock);
-
-	return 0;
-}
-
-/* For autofs direct mounts the follow link triggers the mount */
-static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
-	struct autofs_info *ino = autofs4_dentry_ino(dentry);
-	int oz_mode = autofs4_oz_mode(sbi);
-	unsigned int lookup_type;
-	int status;
-
-	DPRINTK("dentry=%p %.*s oz_mode=%d nd->flags=%d",
-		dentry, dentry->d_name.len, dentry->d_name.name, oz_mode,
-		nd->flags);
-	/*
-	 * For an expire of a covered direct or offset mount we need
-	 * to break out of follow_down() at the autofs mount trigger
-	 * (d_mounted--), so we can see the expiring flag, and manage
-	 * the blocking and following here until the expire is completed.
-	 */
-	if (oz_mode) {
-		spin_lock(&sbi->fs_lock);
-		if (ino->flags & AUTOFS_INF_EXPIRING) {
-			spin_unlock(&sbi->fs_lock);
-			/* Follow down to our covering mount. */
-			if (!follow_down(&nd->path))
-				goto done;
-			goto follow;
-		}
-		spin_unlock(&sbi->fs_lock);
-		goto done;
-	}
-
-	/* If an expire request is pending everyone must wait. */
-	autofs4_expire_wait(dentry);
-
-	/* We trigger a mount for almost all flags */
-	lookup_type = autofs4_need_mount(nd->flags);
-	spin_lock(&sbi->fs_lock);
-	spin_lock(&autofs4_lock);
-	spin_lock(&dentry->d_lock);
-	if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) {
-		spin_unlock(&dentry->d_lock);
-		spin_unlock(&autofs4_lock);
-		spin_unlock(&sbi->fs_lock);
-		goto follow;
-	}
-
-	/*
-	 * If the dentry contains directories then it is an autofs
-	 * multi-mount with no root mount offset. So don't try to
-	 * mount it again.
-	 */
-	if (ino->flags & AUTOFS_INF_PENDING ||
-	    (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
-		spin_unlock(&dentry->d_lock);
-		spin_unlock(&autofs4_lock);
-		spin_unlock(&sbi->fs_lock);
-
-		status = try_to_fill_dentry(dentry, nd->flags);
-		if (status)
-			goto out_error;
-
-		goto follow;
-	}
-	spin_unlock(&dentry->d_lock);
-	spin_unlock(&autofs4_lock);
-	spin_unlock(&sbi->fs_lock);
-follow:
-	/*
-	 * If there is no root mount it must be an autofs
-	 * multi-mount with no root offset so we don't need
-	 * to follow it.
-	 */
-	if (d_mountpoint(dentry)) {
-		if (!autofs4_follow_mount(&nd->path)) {
-			status = -ENOENT;
-			goto out_error;
-		}
-	}
-
-done:
-	return NULL;
-
-out_error:
-	path_put(&nd->path);
-	return ERR_PTR(status);
-}
-
-/*
- * Revalidate is called on every cache lookup.  Some of those
- * cache lookups may actually happen while the dentry is not
- * yet completely filled in, and revalidate has to delay such
- * lookups..
- */
-static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
-{
-	struct inode *dir;
-	struct autofs_sb_info *sbi;
-	int oz_mode;
-	int flags = nd ? nd->flags : 0;
-	int status = 1;
-
-	if (flags & LOOKUP_RCU)
-		return -ECHILD;
-
-	dir = dentry->d_parent->d_inode;
-	sbi = autofs4_sbi(dir->i_sb);
-	oz_mode = autofs4_oz_mode(sbi);
-
-	/* Pending dentry */
-	spin_lock(&sbi->fs_lock);
-	if (autofs4_ispending(dentry)) {
-		/* The daemon never causes a mount to trigger */
-		spin_unlock(&sbi->fs_lock);
-
-		if (oz_mode)
-			return 1;
-
-		/*
-		 * If the directory has gone away due to an expire
-		 * we have been called as ->d_revalidate() and so
-		 * we need to return false and proceed to ->lookup().
-		 */
-		if (autofs4_expire_wait(dentry) == -EAGAIN)
-			return 0;
-
-		/*
-		 * A zero status is success otherwise we have a
-		 * negative error code.
-		 */
-		status = try_to_fill_dentry(dentry, flags);
-		if (status == 0)
-			return 1;
-
-		return status;
-	}
-	spin_unlock(&sbi->fs_lock);
-
-	/* Negative dentry.. invalidate if "old" */
-	if (dentry->d_inode == NULL)
-		return 0;
-
-	/* Check for a non-mountpoint directory with no contents */
-	spin_lock(&autofs4_lock);
-	spin_lock(&dentry->d_lock);
-	if (S_ISDIR(dentry->d_inode->i_mode) &&
-	    !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
-		DPRINTK("dentry=%p %.*s, emptydir",
-			 dentry, dentry->d_name.len, dentry->d_name.name);
-		spin_unlock(&dentry->d_lock);
-		spin_unlock(&autofs4_lock);
-
-		/* The daemon never causes a mount to trigger */
-		if (oz_mode)
-			return 1;
-
-		/*
-		 * A zero status is success otherwise we have a
-		 * negative error code.
-		 */
-		status = try_to_fill_dentry(dentry, flags);
-		if (status == 0)
-			return 1;
-
-		return status;
-	}
-	spin_unlock(&dentry->d_lock);
-	spin_unlock(&autofs4_lock);
-
-	return 1;
-}
-
-void autofs4_dentry_release(struct dentry *de)
-{
-	struct autofs_info *inf;
+	struct autofs_info *ino = autofs4_dentry_ino(de);
+	struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb);
 
 	DPRINTK("releasing %p", de);
 
-	inf = autofs4_dentry_ino(de);
-	de->d_fsdata = NULL;
+	if (!ino)
+		return;
 
-	if (inf) {
-		struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb);
-
-		if (sbi) {
-			spin_lock(&sbi->lookup_lock);
-			if (!list_empty(&inf->active))
-				list_del(&inf->active);
-			if (!list_empty(&inf->expiring))
-				list_del(&inf->expiring);
-			spin_unlock(&sbi->lookup_lock);
-		}
-
-		inf->dentry = NULL;
-		inf->inode = NULL;
-
-		autofs4_free_ino(inf);
+	if (sbi) {
+		spin_lock(&sbi->lookup_lock);
+		if (!list_empty(&ino->active))
+			list_del(&ino->active);
+		if (!list_empty(&ino->expiring))
+			list_del(&ino->expiring);
+		spin_unlock(&sbi->lookup_lock);
 	}
+
+	autofs4_free_ino(ino);
 }
 
-/* For dentries of directories in the root dir */
-static const struct dentry_operations autofs4_root_dentry_operations = {
-	.d_revalidate	= autofs4_revalidate,
-	.d_release	= autofs4_dentry_release,
-};
-
-/* For other dentries */
-static const struct dentry_operations autofs4_dentry_operations = {
-	.d_revalidate	= autofs4_revalidate,
-	.d_release	= autofs4_dentry_release,
-};
-
 static struct dentry *autofs4_lookup_active(struct dentry *dentry)
 {
 	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
@@ -541,51 +271,246 @@
 	return NULL;
 }
 
+static int autofs4_mount_wait(struct dentry *dentry)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	int status;
+
+	if (ino->flags & AUTOFS_INF_PENDING) {
+		DPRINTK("waiting for mount name=%.*s",
+			dentry->d_name.len, dentry->d_name.name);
+		status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+		DPRINTK("mount wait done status=%d", status);
+		ino->last_used = jiffies;
+		return status;
+	}
+	return 0;
+}
+
+static int do_expire_wait(struct dentry *dentry)
+{
+	struct dentry *expiring;
+
+	expiring = autofs4_lookup_expiring(dentry);
+	if (!expiring)
+		return autofs4_expire_wait(dentry);
+	else {
+		/*
+		 * If we are racing with expire the request might not
+		 * be quite complete, but the directory has been removed
+		 * so it must have been successful, just wait for it.
+		 */
+		autofs4_expire_wait(expiring);
+		autofs4_del_expiring(expiring);
+		dput(expiring);
+	}
+	return 0;
+}
+
+static struct dentry *autofs4_mountpoint_changed(struct path *path)
+{
+	struct dentry *dentry = path->dentry;
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+
+	/*
+	 * If this is an indirect mount the dentry could have gone away
+	 * as a result of an expire and a new one created.
+	 */
+	if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
+		struct dentry *parent = dentry->d_parent;
+		struct dentry *new = d_lookup(parent, &dentry->d_name);
+		if (!new)
+			return NULL;
+		dput(path->dentry);
+		path->dentry = new;
+	}
+	return path->dentry;
+}
+
+static struct vfsmount *autofs4_d_automount(struct path *path)
+{
+	struct dentry *dentry = path->dentry;
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	int status;
+
+	DPRINTK("dentry=%p %.*s",
+		dentry, dentry->d_name.len, dentry->d_name.name);
+
+	/*
+	 * Someone may have manually umounted this or it was a submount
+	 * that has gone away.
+	 */
+	spin_lock(&dentry->d_lock);
+	if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
+		if (!(dentry->d_flags & DCACHE_MANAGE_TRANSIT) &&
+		     (dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+			__managed_dentry_set_transit(path->dentry);
+	}
+	spin_unlock(&dentry->d_lock);
+
+	/* The daemon never triggers a mount. */
+	if (autofs4_oz_mode(sbi))
+		return NULL;
+
+	/*
+	 * If an expire request is pending everyone must wait.
+	 * If the expire fails we're still mounted so continue
+	 * the follow and return. A return of -EAGAIN (which only
+	 * happens with indirect mounts) means the expire completed
+	 * and the directory was removed, so just go ahead and try
+	 * the mount.
+	 */
+	status = do_expire_wait(dentry);
+	if (status && status != -EAGAIN)
+		return NULL;
+
+	/* Callback to the daemon to perform the mount or wait */
+	spin_lock(&sbi->fs_lock);
+	if (ino->flags & AUTOFS_INF_PENDING) {
+		spin_unlock(&sbi->fs_lock);
+		status = autofs4_mount_wait(dentry);
+		if (status)
+			return ERR_PTR(status);
+		spin_lock(&sbi->fs_lock);
+		goto done;
+	}
+
+	/*
+	 * If the dentry is a symlink it's equivalent to a directory
+	 * having d_mountpoint() true, so there's no need to call back
+	 * to the daemon.
+	 */
+	if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))
+		goto done;
+	if (!d_mountpoint(dentry)) {
+		/*
+		 * It's possible that user space hasn't removed directories
+		 * after umounting a rootless multi-mount, although it
+		 * should. For v5 have_submounts() is sufficient to handle
+		 * this because the leaves of the directory tree under the
+		 * mount never trigger mounts themselves (they have an autofs
+		 * trigger mount mounted on them). But v4 pseudo direct mounts
+		 * do need the leaves to to trigger mounts. In this case we
+		 * have no choice but to use the list_empty() check and
+		 * require user space behave.
+		 */
+		if (sbi->version > 4) {
+			if (have_submounts(dentry))
+				goto done;
+		} else {
+			spin_lock(&dentry->d_lock);
+			if (!list_empty(&dentry->d_subdirs)) {
+				spin_unlock(&dentry->d_lock);
+				goto done;
+			}
+			spin_unlock(&dentry->d_lock);
+		}
+		ino->flags |= AUTOFS_INF_PENDING;
+		spin_unlock(&sbi->fs_lock);
+		status = autofs4_mount_wait(dentry);
+		if (status)
+			return ERR_PTR(status);
+		spin_lock(&sbi->fs_lock);
+		ino->flags &= ~AUTOFS_INF_PENDING;
+	}
+done:
+	if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
+		/*
+		 * Any needed mounting has been completed and the path updated
+		 * so turn this into a normal dentry so we don't continually
+		 * call ->d_automount() and ->d_manage().
+		 */
+		spin_lock(&dentry->d_lock);
+		__managed_dentry_clear_transit(dentry);
+		/*
+		 * Only clear DMANAGED_AUTOMOUNT for rootless multi-mounts and
+		 * symlinks as in all other cases the dentry will be covered by
+		 * an actual mount so ->d_automount() won't be called during
+		 * the follow.
+		 */
+		if ((!d_mountpoint(dentry) &&
+		    !list_empty(&dentry->d_subdirs)) ||
+		    (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
+			__managed_dentry_clear_automount(dentry);
+		spin_unlock(&dentry->d_lock);
+	}
+	spin_unlock(&sbi->fs_lock);
+
+	/* Mount succeeded, check if we ended up with a new dentry */
+	dentry = autofs4_mountpoint_changed(path);
+	if (!dentry)
+		return ERR_PTR(-ENOENT);
+
+	return NULL;
+}
+
+int autofs4_d_manage(struct dentry *dentry, bool mounting_here, bool rcu_walk)
+{
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+
+	DPRINTK("dentry=%p %.*s",
+		dentry, dentry->d_name.len, dentry->d_name.name);
+
+	/* The daemon never waits. */
+	if (autofs4_oz_mode(sbi) || mounting_here) {
+		if (!d_mountpoint(dentry))
+			return -EISDIR;
+		return 0;
+	}
+
+	/* We need to sleep, so we need pathwalk to be in ref-mode */
+	if (rcu_walk)
+		return -ECHILD;
+
+	/* Wait for pending expires */
+	do_expire_wait(dentry);
+
+	/*
+	 * This dentry may be under construction so wait on mount
+	 * completion.
+	 */
+	return autofs4_mount_wait(dentry);
+}
+
 /* Lookups in the root directory */
 static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
 	struct autofs_sb_info *sbi;
 	struct autofs_info *ino;
-	struct dentry *expiring, *active;
-	int oz_mode;
+	struct dentry *active;
 
-	DPRINTK("name = %.*s",
-		dentry->d_name.len, dentry->d_name.name);
+	DPRINTK("name = %.*s", dentry->d_name.len, dentry->d_name.name);
 
 	/* File name too long to exist */
 	if (dentry->d_name.len > NAME_MAX)
 		return ERR_PTR(-ENAMETOOLONG);
 
 	sbi = autofs4_sbi(dir->i_sb);
-	oz_mode = autofs4_oz_mode(sbi);
 
 	DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
-		 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
+		current->pid, task_pgrp_nr(current), sbi->catatonic,
+		autofs4_oz_mode(sbi));
 
 	active = autofs4_lookup_active(dentry);
 	if (active) {
-		dentry = active;
-		ino = autofs4_dentry_ino(dentry);
+		return active;
 	} else {
 		/*
-		 * Mark the dentry incomplete but don't hash it. We do this
-		 * to serialize our inode creation operations (symlink and
-		 * mkdir) which prevents deadlock during the callback to
-		 * the daemon. Subsequent user space lookups for the same
-		 * dentry are placed on the wait queue while the daemon
-		 * itself is allowed passage unresticted so the create
-		 * operation itself can then hash the dentry. Finally,
-		 * we check for the hashed dentry and return the newly
-		 * hashed dentry.
+		 * A dentry that is not within the root can never trigger a
+		 * mount operation, unless the directory already exists, so we
+		 * can return fail immediately.  The daemon however does need
+		 * to create directories within the file system.
 		 */
-		d_set_d_op(dentry, &autofs4_root_dentry_operations);
+		if (!autofs4_oz_mode(sbi) && !IS_ROOT(dentry->d_parent))
+			return ERR_PTR(-ENOENT);
 
-		/*
-		 * And we need to ensure that the same dentry is used for
-		 * all following lookup calls until it is hashed so that
-		 * the dentry flags are persistent throughout the request.
-		 */
-		ino = autofs4_init_ino(NULL, sbi, 0555);
+		/* Mark entries in the root as mount triggers */
+		if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent))
+			__managed_dentry_set_managed(dentry);
+
+		ino = autofs4_new_ino(sbi);
 		if (!ino)
 			return ERR_PTR(-ENOMEM);
 
@@ -596,82 +521,6 @@
 
 		d_instantiate(dentry, NULL);
 	}
-
-	if (!oz_mode) {
-		mutex_unlock(&dir->i_mutex);
-		expiring = autofs4_lookup_expiring(dentry);
-		if (expiring) {
-			/*
-			 * If we are racing with expire the request might not
-			 * be quite complete but the directory has been removed
-			 * so it must have been successful, so just wait for it.
-			 */
-			autofs4_expire_wait(expiring);
-			autofs4_del_expiring(expiring);
-			dput(expiring);
-		}
-
-		spin_lock(&sbi->fs_lock);
-		ino->flags |= AUTOFS_INF_PENDING;
-		spin_unlock(&sbi->fs_lock);
-		if (dentry->d_op && dentry->d_op->d_revalidate)
-			(dentry->d_op->d_revalidate)(dentry, nd);
-		mutex_lock(&dir->i_mutex);
-	}
-
-	/*
-	 * If we are still pending, check if we had to handle
-	 * a signal. If so we can force a restart..
-	 */
-	if (ino->flags & AUTOFS_INF_PENDING) {
-		/* See if we were interrupted */
-		if (signal_pending(current)) {
-			sigset_t *sigset = &current->pending.signal;
-			if (sigismember (sigset, SIGKILL) ||
-			    sigismember (sigset, SIGQUIT) ||
-			    sigismember (sigset, SIGINT)) {
-			    if (active)
-				dput(active);
-			    return ERR_PTR(-ERESTARTNOINTR);
-			}
-		}
-		if (!oz_mode) {
-			spin_lock(&sbi->fs_lock);
-			ino->flags &= ~AUTOFS_INF_PENDING;
-			spin_unlock(&sbi->fs_lock);
-		}
-	}
-
-	/*
-	 * If this dentry is unhashed, then we shouldn't honour this
-	 * lookup.  Returning ENOENT here doesn't do the right thing
-	 * for all system calls, but it should be OK for the operations
-	 * we permit from an autofs.
-	 */
-	if (!oz_mode && d_unhashed(dentry)) {
-		/*
-		 * A user space application can (and has done in the past)
-		 * remove and re-create this directory during the callback.
-		 * This can leave us with an unhashed dentry, but a
-		 * successful mount!  So we need to perform another
-		 * cached lookup in case the dentry now exists.
-		 */
-		struct dentry *parent = dentry->d_parent;
-		struct dentry *new = d_lookup(parent, &dentry->d_name);
-		if (new != NULL)
-			dentry = new;
-		else
-			dentry = ERR_PTR(-ENOENT);
-
-		if (active)
-			dput(active);
-
-		return dentry;
-	}
-
-	if (active)
-		return active;
-
 	return NULL;
 }
 
@@ -683,6 +532,7 @@
 	struct autofs_info *ino = autofs4_dentry_ino(dentry);
 	struct autofs_info *p_ino;
 	struct inode *inode;
+	size_t size = strlen(symname);
 	char *cp;
 
 	DPRINTK("%s <- %.*s", symname,
@@ -691,45 +541,35 @@
 	if (!autofs4_oz_mode(sbi))
 		return -EACCES;
 
-	ino = autofs4_init_ino(ino, sbi, S_IFLNK | 0555);
-	if (!ino)
-		return -ENOMEM;
+	BUG_ON(!ino);
+
+	autofs4_clean_ino(ino);
 
 	autofs4_del_active(dentry);
 
-	ino->size = strlen(symname);
-	cp = kmalloc(ino->size + 1, GFP_KERNEL);
-	if (!cp) {
-		if (!dentry->d_fsdata)
-			kfree(ino);
+	cp = kmalloc(size + 1, GFP_KERNEL);
+	if (!cp)
 		return -ENOMEM;
-	}
 
 	strcpy(cp, symname);
 
-	inode = autofs4_get_inode(dir->i_sb, ino);
+	inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555);
 	if (!inode) {
 		kfree(cp);
 		if (!dentry->d_fsdata)
 			kfree(ino);
 		return -ENOMEM;
 	}
+	inode->i_private = cp;
+	inode->i_size = size;
 	d_add(dentry, inode);
 
-	if (dir == dir->i_sb->s_root->d_inode)
-		d_set_d_op(dentry, &autofs4_root_dentry_operations);
-	else
-		d_set_d_op(dentry, &autofs4_dentry_operations);
-
-	dentry->d_fsdata = ino;
-	ino->dentry = dget(dentry);
+	dget(dentry);
 	atomic_inc(&ino->count);
 	p_ino = autofs4_dentry_ino(dentry->d_parent);
 	if (p_ino && dentry->d_parent != dentry)
 		atomic_inc(&p_ino->count);
-	ino->inode = inode;
 
-	ino->u.symlink = cp;
 	dir->i_mtime = CURRENT_TIME;
 
 	return 0;
@@ -782,6 +622,58 @@
 	return 0;
 }
 
+/*
+ * Version 4 of autofs provides a pseudo direct mount implementation
+ * that relies on directories at the leaves of a directory tree under
+ * an indirect mount to trigger mounts. To allow for this we need to
+ * set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves
+ * of the directory tree. There is no need to clear the automount flag
+ * following a mount or restore it after an expire because these mounts
+ * are always covered. However, it is neccessary to ensure that these
+ * flags are clear on non-empty directories to avoid unnecessary calls
+ * during path walks.
+ */
+static void autofs_set_leaf_automount_flags(struct dentry *dentry)
+{
+	struct dentry *parent;
+
+	/* root and dentrys in the root are already handled */
+	if (IS_ROOT(dentry->d_parent))
+		return;
+
+	managed_dentry_set_managed(dentry);
+
+	parent = dentry->d_parent;
+	/* only consider parents below dentrys in the root */
+	if (IS_ROOT(parent->d_parent))
+		return;
+	managed_dentry_clear_managed(parent);
+	return;
+}
+
+static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
+{
+	struct list_head *d_child;
+	struct dentry *parent;
+
+	/* flags for dentrys in the root are handled elsewhere */
+	if (IS_ROOT(dentry->d_parent))
+		return;
+
+	managed_dentry_clear_managed(dentry);
+
+	parent = dentry->d_parent;
+	/* only consider parents below dentrys in the root */
+	if (IS_ROOT(parent->d_parent))
+		return;
+	d_child = &dentry->d_u.d_child;
+	/* Set parent managed if it's becoming empty */
+	if (d_child->next == &parent->d_subdirs &&
+	    d_child->prev == &parent->d_subdirs)
+		managed_dentry_set_managed(parent);
+	return;
+}
+
 static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
 {
 	struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
@@ -809,6 +701,9 @@
 	spin_unlock(&dentry->d_lock);
 	spin_unlock(&autofs4_lock);
 
+	if (sbi->version < 5)
+		autofs_clear_leaf_automount_flags(dentry);
+
 	if (atomic_dec_and_test(&ino->count)) {
 		p_ino = autofs4_dentry_ino(dentry->d_parent);
 		if (p_ino && dentry->d_parent != dentry)
@@ -837,32 +732,25 @@
 	DPRINTK("dentry %p, creating %.*s",
 		dentry, dentry->d_name.len, dentry->d_name.name);
 
-	ino = autofs4_init_ino(ino, sbi, S_IFDIR | 0555);
-	if (!ino)
-		return -ENOMEM;
+	BUG_ON(!ino);
+
+	autofs4_clean_ino(ino);
 
 	autofs4_del_active(dentry);
 
-	inode = autofs4_get_inode(dir->i_sb, ino);
-	if (!inode) {
-		if (!dentry->d_fsdata)
-			kfree(ino);
+	inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
+	if (!inode)
 		return -ENOMEM;
-	}
 	d_add(dentry, inode);
 
-	if (dir == dir->i_sb->s_root->d_inode)
-		d_set_d_op(dentry, &autofs4_root_dentry_operations);
-	else
-		d_set_d_op(dentry, &autofs4_dentry_operations);
+	if (sbi->version < 5)
+		autofs_set_leaf_automount_flags(dentry);
 
-	dentry->d_fsdata = ino;
-	ino->dentry = dget(dentry);
+	dget(dentry);
 	atomic_inc(&ino->count);
 	p_ino = autofs4_dentry_ino(dentry->d_parent);
 	if (p_ino && dentry->d_parent != dentry)
 		atomic_inc(&p_ino->count);
-	ino->inode = inode;
 	inc_nlink(dir);
 	dir->i_mtime = CURRENT_TIME;
 
@@ -944,8 +832,7 @@
 int is_autofs4_dentry(struct dentry *dentry)
 {
 	return dentry && dentry->d_inode &&
-		(dentry->d_op == &autofs4_root_dentry_operations ||
-		 dentry->d_op == &autofs4_dentry_operations) &&
+		dentry->d_op == &autofs4_dentry_operations &&
 		dentry->d_fsdata != NULL;
 }
 
diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
index b4ea829..f27c094 100644
--- a/fs/autofs4/symlink.c
+++ b/fs/autofs4/symlink.c
@@ -14,8 +14,7 @@
 
 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
-	struct autofs_info *ino = autofs4_dentry_ino(dentry);
-	nd_set_link(nd, (char *)ino->u.symlink);
+	nd_set_link(nd, dentry->d_inode->i_private);
 	return NULL;
 }
 
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index c5f8459..5601005 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -309,6 +309,9 @@
 	 * completed while we waited on the mutex ...
 	 */
 	if (notify == NFY_MOUNT) {
+		struct dentry *new = NULL;
+		int valid = 1;
+
 		/*
 		 * If the dentry was successfully mounted while we slept
 		 * on the wait queue mutex we can return success. If it
@@ -316,8 +319,20 @@
 		 * a multi-mount with no mount at it's base) we can
 		 * continue on and create a new request.
 		 */
+		if (!IS_ROOT(dentry)) {
+			if (dentry->d_inode && d_unhashed(dentry)) {
+				struct dentry *parent = dentry->d_parent;
+				new = d_lookup(parent, &dentry->d_name);
+				if (new)
+					dentry = new;
+			}
+		}
 		if (have_submounts(dentry))
-			return 0;
+			valid = 0;
+
+		if (new)
+			dput(new);
+		return valid;
 	}
 
 	return 1;
diff --git a/fs/befs/endian.h b/fs/befs/endian.h
index 6cb84d8..2722387 100644
--- a/fs/befs/endian.h
+++ b/fs/befs/endian.h
@@ -102,22 +102,22 @@
 }
 
 static inline befs_data_stream
-fsds_to_cpu(const struct super_block *sb, befs_disk_data_stream n)
+fsds_to_cpu(const struct super_block *sb, const befs_disk_data_stream *n)
 {
 	befs_data_stream data;
 	int i;
 
 	for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; ++i)
-		data.direct[i] = fsrun_to_cpu(sb, n.direct[i]);
+		data.direct[i] = fsrun_to_cpu(sb, n->direct[i]);
 
-	data.max_direct_range = fs64_to_cpu(sb, n.max_direct_range);
-	data.indirect = fsrun_to_cpu(sb, n.indirect);
-	data.max_indirect_range = fs64_to_cpu(sb, n.max_indirect_range);
-	data.double_indirect = fsrun_to_cpu(sb, n.double_indirect);
+	data.max_direct_range = fs64_to_cpu(sb, n->max_direct_range);
+	data.indirect = fsrun_to_cpu(sb, n->indirect);
+	data.max_indirect_range = fs64_to_cpu(sb, n->max_indirect_range);
+	data.double_indirect = fsrun_to_cpu(sb, n->double_indirect);
 	data.max_double_indirect_range = fs64_to_cpu(sb,
-						     n.
+						     n->
 						     max_double_indirect_range);
-	data.size = fs64_to_cpu(sb, n.size);
+	data.size = fs64_to_cpu(sb, n->size);
 
 	return data;
 }
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index de93581..b1d0c79 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -390,7 +390,7 @@
 		int num_blks;
 
 		befs_ino->i_data.ds =
-		    fsds_to_cpu(sb, raw_inode->data.datastream);
+		    fsds_to_cpu(sb, &raw_inode->data.datastream);
 
 		num_blks = befs_count_blocks(sb, &befs_ino->i_data.ds);
 		inode->i_blocks =
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 6884e19..d5b640b 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -66,12 +66,11 @@
 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
 
 static struct linux_binfmt elf_format = {
-		.module		= THIS_MODULE,
-		.load_binary	= load_elf_binary,
-		.load_shlib	= load_elf_library,
-		.core_dump	= elf_core_dump,
-		.min_coredump	= ELF_EXEC_PAGESIZE,
-		.hasvdso	= 1
+	.module		= THIS_MODULE,
+	.load_binary	= load_elf_binary,
+	.load_shlib	= load_elf_library,
+	.core_dump	= elf_core_dump,
+	.min_coredump	= ELF_EXEC_PAGESIZE,
 };
 
 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
@@ -316,8 +315,6 @@
 	return 0;
 }
 
-#ifndef elf_map
-
 static unsigned long elf_map(struct file *filep, unsigned long addr,
 		struct elf_phdr *eppnt, int prot, int type,
 		unsigned long total_size)
@@ -354,8 +351,6 @@
 	return(map_addr);
 }
 
-#endif /* !elf_map */
-
 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
 {
 	int i, first_idx = -1, last_idx = -1;
@@ -421,7 +416,7 @@
 		goto out;
 
 	retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
-			     (char *)elf_phdata,size);
+			     (char *)elf_phdata, size);
 	error = -EIO;
 	if (retval != size) {
 		if (retval < 0)
@@ -601,7 +596,7 @@
 		goto out;
 	if (!elf_check_arch(&loc->elf_ex))
 		goto out;
-	if (!bprm->file->f_op||!bprm->file->f_op->mmap)
+	if (!bprm->file->f_op || !bprm->file->f_op->mmap)
 		goto out;
 
 	/* Now read in all of the header information */
@@ -761,8 +756,8 @@
 			/* There was a PT_LOAD segment with p_memsz > p_filesz
 			   before this one. Map anonymous pages, if needed,
 			   and clear the area.  */
-			retval = set_brk (elf_bss + load_bias,
-					  elf_brk + load_bias);
+			retval = set_brk(elf_bss + load_bias,
+					 elf_brk + load_bias);
 			if (retval) {
 				send_sig(SIGKILL, current, 0);
 				goto out_free_dentry;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 4d0ff5e..e49cce2 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -782,7 +782,12 @@
 {
 	unsigned int i;
 
-	kintegrityd_wq = create_workqueue("kintegrityd");
+	/*
+	 * kintegrityd won't block much but may burn a lot of CPU cycles.
+	 * Make it highpri CPU intensive wq with max concurrency of 1.
+	 */
+	kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
+					 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
 	if (!kintegrityd_wq)
 		panic("Failed to create kintegrityd\n");
 
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 771f235..8892870 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -433,7 +433,7 @@
 	INIT_LIST_HEAD(&bdev->bd_inodes);
 	INIT_LIST_HEAD(&bdev->bd_list);
 #ifdef CONFIG_SYSFS
-	INIT_LIST_HEAD(&bdev->bd_holder_list);
+	INIT_LIST_HEAD(&bdev->bd_holder_disks);
 #endif
 	inode_init_once(&ei->vfs_inode);
 	/* Initialize mutex for freeze. */
@@ -473,7 +473,7 @@
 static struct dentry *bd_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576);
+	return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, 0x62646576);
 }
 
 static struct file_system_type bd_type = {
@@ -669,7 +669,7 @@
 	else if (bdev->bd_contains == bdev)
 		return true;  	 /* is a whole device which isn't held */
 
-	else if (whole->bd_holder == bd_claim)
+	else if (whole->bd_holder == bd_may_claim)
 		return true; 	 /* is a partition of a device that is being partitioned */
 	else if (whole->bd_holder != NULL)
 		return false;	 /* is a partition of a held device */
@@ -781,452 +781,162 @@
 	}
 }
 
-/* releases bdev_lock */
-static void __bd_abort_claiming(struct block_device *whole, void *holder)
-{
-	BUG_ON(whole->bd_claiming != holder);
-	whole->bd_claiming = NULL;
-	wake_up_bit(&whole->bd_claiming, 0);
-
-	spin_unlock(&bdev_lock);
-	bdput(whole);
-}
-
-/**
- * bd_abort_claiming - abort claiming a block device
- * @whole: whole block device returned by bd_start_claiming()
- * @holder: holder trying to claim @bdev
- *
- * Abort a claiming block started by bd_start_claiming().  Note that
- * @whole is not the block device to be claimed but the whole device
- * returned by bd_start_claiming().
- *
- * CONTEXT:
- * Grabs and releases bdev_lock.
- */
-static void bd_abort_claiming(struct block_device *whole, void *holder)
-{
-	spin_lock(&bdev_lock);
-	__bd_abort_claiming(whole, holder);		/* releases bdev_lock */
-}
-
-/* increment holders when we have a legitimate claim. requires bdev_lock */
-static void __bd_claim(struct block_device *bdev, struct block_device *whole,
-					void *holder)
-{
-	/* note that for a whole device bd_holders
-	 * will be incremented twice, and bd_holder will
-	 * be set to bd_claim before being set to holder
-	 */
-	whole->bd_holders++;
-	whole->bd_holder = bd_claim;
-	bdev->bd_holders++;
-	bdev->bd_holder = holder;
-}
-
-/**
- * bd_finish_claiming - finish claiming a block device
- * @bdev: block device of interest (passed to bd_start_claiming())
- * @whole: whole block device returned by bd_start_claiming()
- * @holder: holder trying to claim @bdev
- *
- * Finish a claiming block started by bd_start_claiming().
- *
- * CONTEXT:
- * Grabs and releases bdev_lock.
- */
-static void bd_finish_claiming(struct block_device *bdev,
-				struct block_device *whole, void *holder)
-{
-	spin_lock(&bdev_lock);
-	BUG_ON(!bd_may_claim(bdev, whole, holder));
-	__bd_claim(bdev, whole, holder);
-	__bd_abort_claiming(whole, holder); /* not actually an abort */
-}
-
-/**
- * bd_claim - claim a block device
- * @bdev: block device to claim
- * @holder: holder trying to claim @bdev
- *
- * Try to claim @bdev which must have been opened successfully.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * 0 if successful, -EBUSY if @bdev is already claimed.
- */
-int bd_claim(struct block_device *bdev, void *holder)
-{
-	struct block_device *whole = bdev->bd_contains;
-	int res;
-
-	might_sleep();
-
-	spin_lock(&bdev_lock);
-	res = bd_prepare_to_claim(bdev, whole, holder);
-	if (res == 0)
-		__bd_claim(bdev, whole, holder);
-	spin_unlock(&bdev_lock);
-
-	return res;
-}
-EXPORT_SYMBOL(bd_claim);
-
-void bd_release(struct block_device *bdev)
-{
-	spin_lock(&bdev_lock);
-	if (!--bdev->bd_contains->bd_holders)
-		bdev->bd_contains->bd_holder = NULL;
-	if (!--bdev->bd_holders)
-		bdev->bd_holder = NULL;
-	spin_unlock(&bdev_lock);
-}
-
-EXPORT_SYMBOL(bd_release);
-
 #ifdef CONFIG_SYSFS
-/*
- * Functions for bd_claim_by_kobject / bd_release_from_kobject
- *
- *     If a kobject is passed to bd_claim_by_kobject()
- *     and the kobject has a parent directory,
- *     following symlinks are created:
- *        o from the kobject to the claimed bdev
- *        o from "holders" directory of the bdev to the parent of the kobject
- *     bd_release_from_kobject() removes these symlinks.
- *
- *     Example:
- *        If /dev/dm-0 maps to /dev/sda, kobject corresponding to
- *        /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
- *           /sys/block/dm-0/slaves/sda --> /sys/block/sda
- *           /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
- */
+struct bd_holder_disk {
+	struct list_head	list;
+	struct gendisk		*disk;
+	int			refcnt;
+};
+
+static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
+						  struct gendisk *disk)
+{
+	struct bd_holder_disk *holder;
+
+	list_for_each_entry(holder, &bdev->bd_holder_disks, list)
+		if (holder->disk == disk)
+			return holder;
+	return NULL;
+}
 
 static int add_symlink(struct kobject *from, struct kobject *to)
 {
-	if (!from || !to)
-		return 0;
 	return sysfs_create_link(from, to, kobject_name(to));
 }
 
 static void del_symlink(struct kobject *from, struct kobject *to)
 {
-	if (!from || !to)
-		return;
 	sysfs_remove_link(from, kobject_name(to));
 }
 
-/*
- * 'struct bd_holder' contains pointers to kobjects symlinked by
- * bd_claim_by_kobject.
- * It's connected to bd_holder_list which is protected by bdev->bd_sem.
- */
-struct bd_holder {
-	struct list_head list;	/* chain of holders of the bdev */
-	int count;		/* references from the holder */
-	struct kobject *sdir;	/* holder object, e.g. "/block/dm-0/slaves" */
-	struct kobject *hdev;	/* e.g. "/block/dm-0" */
-	struct kobject *hdir;	/* e.g. "/block/sda/holders" */
-	struct kobject *sdev;	/* e.g. "/block/sda" */
-};
-
-/*
- * Get references of related kobjects at once.
- * Returns 1 on success. 0 on failure.
- *
- * Should call bd_holder_release_dirs() after successful use.
- */
-static int bd_holder_grab_dirs(struct block_device *bdev,
-			struct bd_holder *bo)
-{
-	if (!bdev || !bo)
-		return 0;
-
-	bo->sdir = kobject_get(bo->sdir);
-	if (!bo->sdir)
-		return 0;
-
-	bo->hdev = kobject_get(bo->sdir->parent);
-	if (!bo->hdev)
-		goto fail_put_sdir;
-
-	bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj);
-	if (!bo->sdev)
-		goto fail_put_hdev;
-
-	bo->hdir = kobject_get(bdev->bd_part->holder_dir);
-	if (!bo->hdir)
-		goto fail_put_sdev;
-
-	return 1;
-
-fail_put_sdev:
-	kobject_put(bo->sdev);
-fail_put_hdev:
-	kobject_put(bo->hdev);
-fail_put_sdir:
-	kobject_put(bo->sdir);
-
-	return 0;
-}
-
-/* Put references of related kobjects at once. */
-static void bd_holder_release_dirs(struct bd_holder *bo)
-{
-	kobject_put(bo->hdir);
-	kobject_put(bo->sdev);
-	kobject_put(bo->hdev);
-	kobject_put(bo->sdir);
-}
-
-static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
-{
-	struct bd_holder *bo;
-
-	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-	if (!bo)
-		return NULL;
-
-	bo->count = 1;
-	bo->sdir = kobj;
-
-	return bo;
-}
-
-static void free_bd_holder(struct bd_holder *bo)
-{
-	kfree(bo);
-}
-
 /**
- * find_bd_holder - find matching struct bd_holder from the block device
+ * bd_link_disk_holder - create symlinks between holding disk and slave bdev
+ * @bdev: the claimed slave bdev
+ * @disk: the holding disk
  *
- * @bdev:	struct block device to be searched
- * @bo:		target struct bd_holder
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
  *
- * Returns matching entry with @bo in @bdev->bd_holder_list.
- * If found, increment the reference count and return the pointer.
- * If not found, returns NULL.
+ * This functions creates the following sysfs symlinks.
+ *
+ * - from "slaves" directory of the holder @disk to the claimed @bdev
+ * - from "holders" directory of the @bdev to the holder @disk
+ *
+ * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
+ * passed to bd_link_disk_holder(), then:
+ *
+ *   /sys/block/dm-0/slaves/sda --> /sys/block/sda
+ *   /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
+ *
+ * The caller must have claimed @bdev before calling this function and
+ * ensure that both @bdev and @disk are valid during the creation and
+ * lifetime of these symlinks.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
  */
-static struct bd_holder *find_bd_holder(struct block_device *bdev,
-					struct bd_holder *bo)
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
 {
-	struct bd_holder *tmp;
-
-	list_for_each_entry(tmp, &bdev->bd_holder_list, list)
-		if (tmp->sdir == bo->sdir) {
-			tmp->count++;
-			return tmp;
-		}
-
-	return NULL;
-}
-
-/**
- * add_bd_holder - create sysfs symlinks for bd_claim() relationship
- *
- * @bdev:	block device to be bd_claimed
- * @bo:		preallocated and initialized by alloc_bd_holder()
- *
- * Add @bo to @bdev->bd_holder_list, create symlinks.
- *
- * Returns 0 if symlinks are created.
- * Returns -ve if something fails.
- */
-static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
-{
-	int err;
-
-	if (!bo)
-		return -EINVAL;
-
-	if (!bd_holder_grab_dirs(bdev, bo))
-		return -EBUSY;
-
-	err = add_symlink(bo->sdir, bo->sdev);
-	if (err)
-		return err;
-
-	err = add_symlink(bo->hdir, bo->hdev);
-	if (err) {
-		del_symlink(bo->sdir, bo->sdev);
-		return err;
-	}
-
-	list_add_tail(&bo->list, &bdev->bd_holder_list);
-	return 0;
-}
-
-/**
- * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
- *
- * @bdev:	block device to be bd_claimed
- * @kobj:	holder's kobject
- *
- * If there is matching entry with @kobj in @bdev->bd_holder_list
- * and no other bd_claim() from the same kobject,
- * remove the struct bd_holder from the list, delete symlinks for it.
- *
- * Returns a pointer to the struct bd_holder when it's removed from the list
- * and ready to be freed.
- * Returns NULL if matching claim isn't found or there is other bd_claim()
- * by the same kobject.
- */
-static struct bd_holder *del_bd_holder(struct block_device *bdev,
-					struct kobject *kobj)
-{
-	struct bd_holder *bo;
-
-	list_for_each_entry(bo, &bdev->bd_holder_list, list) {
-		if (bo->sdir == kobj) {
-			bo->count--;
-			BUG_ON(bo->count < 0);
-			if (!bo->count) {
-				list_del(&bo->list);
-				del_symlink(bo->sdir, bo->sdev);
-				del_symlink(bo->hdir, bo->hdev);
-				bd_holder_release_dirs(bo);
-				return bo;
-			}
-			break;
-		}
-	}
-
-	return NULL;
-}
-
-/**
- * bd_claim_by_kobject - bd_claim() with additional kobject signature
- *
- * @bdev:	block device to be claimed
- * @holder:	holder's signature
- * @kobj:	holder's kobject
- *
- * Do bd_claim() and if it succeeds, create sysfs symlinks between
- * the bdev and the holder's kobject.
- * Use bd_release_from_kobject() when relesing the claimed bdev.
- *
- * Returns 0 on success. (same as bd_claim())
- * Returns errno on failure.
- */
-static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
-				struct kobject *kobj)
-{
-	int err;
-	struct bd_holder *bo, *found;
-
-	if (!kobj)
-		return -EINVAL;
-
-	bo = alloc_bd_holder(kobj);
-	if (!bo)
-		return -ENOMEM;
+	struct bd_holder_disk *holder;
+	int ret = 0;
 
 	mutex_lock(&bdev->bd_mutex);
 
-	err = bd_claim(bdev, holder);
-	if (err)
-		goto fail;
+	WARN_ON_ONCE(!bdev->bd_holder);
 
-	found = find_bd_holder(bdev, bo);
-	if (found)
-		goto fail;
+	/* FIXME: remove the following once add_disk() handles errors */
+	if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
+		goto out_unlock;
 
-	err = add_bd_holder(bdev, bo);
-	if (err)
-		bd_release(bdev);
-	else
-		bo = NULL;
-fail:
+	holder = bd_find_holder_disk(bdev, disk);
+	if (holder) {
+		holder->refcnt++;
+		goto out_unlock;
+	}
+
+	holder = kzalloc(sizeof(*holder), GFP_KERNEL);
+	if (!holder) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	INIT_LIST_HEAD(&holder->list);
+	holder->disk = disk;
+	holder->refcnt = 1;
+
+	ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+	if (ret)
+		goto out_free;
+
+	ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
+	if (ret)
+		goto out_del;
+	/*
+	 * bdev could be deleted beneath us which would implicitly destroy
+	 * the holder directory.  Hold on to it.
+	 */
+	kobject_get(bdev->bd_part->holder_dir);
+
+	list_add(&holder->list, &bdev->bd_holder_disks);
+	goto out_unlock;
+
+out_del:
+	del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+out_free:
+	kfree(holder);
+out_unlock:
 	mutex_unlock(&bdev->bd_mutex);
-	free_bd_holder(bo);
-	return err;
+	return ret;
 }
+EXPORT_SYMBOL_GPL(bd_link_disk_holder);
 
 /**
- * bd_release_from_kobject - bd_release() with additional kobject signature
+ * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
+ * @bdev: the calimed slave bdev
+ * @disk: the holding disk
  *
- * @bdev:	block device to be released
- * @kobj:	holder's kobject
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
  *
- * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
+ * CONTEXT:
+ * Might sleep.
  */
-static void bd_release_from_kobject(struct block_device *bdev,
-					struct kobject *kobj)
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
 {
-	if (!kobj)
-		return;
+	struct bd_holder_disk *holder;
 
 	mutex_lock(&bdev->bd_mutex);
-	bd_release(bdev);
-	free_bd_holder(del_bd_holder(bdev, kobj));
+
+	holder = bd_find_holder_disk(bdev, disk);
+
+	if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
+		del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+		del_symlink(bdev->bd_part->holder_dir,
+			    &disk_to_dev(disk)->kobj);
+		kobject_put(bdev->bd_part->holder_dir);
+		list_del_init(&holder->list);
+		kfree(holder);
+	}
+
 	mutex_unlock(&bdev->bd_mutex);
 }
-
-/**
- * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
- *
- * @bdev:	block device to be claimed
- * @holder:	holder's signature
- * @disk:	holder's gendisk
- *
- * Call bd_claim_by_kobject() with getting @disk->slave_dir.
- */
-int bd_claim_by_disk(struct block_device *bdev, void *holder,
-			struct gendisk *disk)
-{
-	return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
-}
-EXPORT_SYMBOL_GPL(bd_claim_by_disk);
-
-/**
- * bd_release_from_disk - wrapper function for bd_release_from_kobject()
- *
- * @bdev:	block device to be claimed
- * @disk:	holder's gendisk
- *
- * Call bd_release_from_kobject() and put @disk->slave_dir.
- */
-void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
-{
-	bd_release_from_kobject(bdev, disk->slave_dir);
-	kobject_put(disk->slave_dir);
-}
-EXPORT_SYMBOL_GPL(bd_release_from_disk);
+EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
 #endif
 
-/*
- * Tries to open block device by device number.  Use it ONLY if you
- * really do not have anything better - i.e. when you are behind a
- * truly sucky interface and all you are given is a device number.  _Never_
- * to be used for internal purposes.  If you ever need it - reconsider
- * your API.
- */
-struct block_device *open_by_devnum(dev_t dev, fmode_t mode)
-{
-	struct block_device *bdev = bdget(dev);
-	int err = -ENOMEM;
-	if (bdev)
-		err = blkdev_get(bdev, mode);
-	return err ? ERR_PTR(err) : bdev;
-}
-
-EXPORT_SYMBOL(open_by_devnum);
-
 /**
  * flush_disk - invalidates all buffer-cache entries on a disk
  *
  * @bdev:      struct block device to be flushed
+ * @kill_dirty: flag to guide handling of dirty inodes
  *
  * Invalidates all buffer-cache entries on a disk. It should be called
  * when a disk has been changed -- either by a media change or online
  * resize.
  */
-static void flush_disk(struct block_device *bdev)
+static void flush_disk(struct block_device *bdev, bool kill_dirty)
 {
-	if (__invalidate_device(bdev)) {
+	if (__invalidate_device(bdev, kill_dirty)) {
 		char name[BDEVNAME_SIZE] = "";
 
 		if (bdev->bd_disk)
@@ -1263,7 +973,7 @@
 		       "%s: detected capacity change from %lld to %lld\n",
 		       name, bdev_size, disk_size);
 		i_size_write(bdev->bd_inode, disk_size);
-		flush_disk(bdev);
+		flush_disk(bdev, false);
 	}
 }
 EXPORT_SYMBOL(check_disk_size_change);
@@ -1309,13 +1019,14 @@
 {
 	struct gendisk *disk = bdev->bd_disk;
 	const struct block_device_operations *bdops = disk->fops;
+	unsigned int events;
 
-	if (!bdops->media_changed)
-		return 0;
-	if (!bdops->media_changed(bdev->bd_disk))
+	events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
+				   DISK_EVENT_EJECT_REQUEST);
+	if (!(events & DISK_EVENT_MEDIA_CHANGE))
 		return 0;
 
-	flush_disk(bdev);
+	flush_disk(bdev, true);
 	if (bdops->revalidate_disk)
 		bdops->revalidate_disk(bdev->bd_disk);
 	return 1;
@@ -1475,17 +1186,170 @@
 	return ret;
 }
 
-int blkdev_get(struct block_device *bdev, fmode_t mode)
+/**
+ * blkdev_get - open a block device
+ * @bdev: block_device to open
+ * @mode: FMODE_* mask
+ * @holder: exclusive holder identifier
+ *
+ * Open @bdev with @mode.  If @mode includes %FMODE_EXCL, @bdev is
+ * open with exclusive access.  Specifying %FMODE_EXCL with %NULL
+ * @holder is invalid.  Exclusive opens may nest for the same @holder.
+ *
+ * On success, the reference count of @bdev is unchanged.  On failure,
+ * @bdev is put.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
 {
-	return __blkdev_get(bdev, mode, 0);
+	struct block_device *whole = NULL;
+	int res;
+
+	WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
+
+	if ((mode & FMODE_EXCL) && holder) {
+		whole = bd_start_claiming(bdev, holder);
+		if (IS_ERR(whole)) {
+			bdput(bdev);
+			return PTR_ERR(whole);
+		}
+	}
+
+	res = __blkdev_get(bdev, mode, 0);
+
+	if (whole) {
+		/* finish claiming */
+		mutex_lock(&bdev->bd_mutex);
+		spin_lock(&bdev_lock);
+
+		if (!res) {
+			BUG_ON(!bd_may_claim(bdev, whole, holder));
+			/*
+			 * Note that for a whole device bd_holders
+			 * will be incremented twice, and bd_holder
+			 * will be set to bd_may_claim before being
+			 * set to holder
+			 */
+			whole->bd_holders++;
+			whole->bd_holder = bd_may_claim;
+			bdev->bd_holders++;
+			bdev->bd_holder = holder;
+		}
+
+		/* tell others that we're done */
+		BUG_ON(whole->bd_claiming != holder);
+		whole->bd_claiming = NULL;
+		wake_up_bit(&whole->bd_claiming, 0);
+
+		spin_unlock(&bdev_lock);
+
+		/*
+		 * Block event polling for write claims.  Any write
+		 * holder makes the write_holder state stick until all
+		 * are released.  This is good enough and tracking
+		 * individual writeable reference is too fragile given
+		 * the way @mode is used in blkdev_get/put().
+		 */
+		if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
+			bdev->bd_write_holder = true;
+			disk_block_events(bdev->bd_disk);
+		}
+
+		mutex_unlock(&bdev->bd_mutex);
+		bdput(whole);
+	}
+
+	return res;
 }
 EXPORT_SYMBOL(blkdev_get);
 
+/**
+ * blkdev_get_by_path - open a block device by name
+ * @path: path to the block device to open
+ * @mode: FMODE_* mask
+ * @holder: exclusive holder identifier
+ *
+ * Open the blockdevice described by the device file at @path.  @mode
+ * and @holder are identical to blkdev_get().
+ *
+ * On success, the returned block_device has reference count of one.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * Pointer to block_device on success, ERR_PTR(-errno) on failure.
+ */
+struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+					void *holder)
+{
+	struct block_device *bdev;
+	int err;
+
+	bdev = lookup_bdev(path);
+	if (IS_ERR(bdev))
+		return bdev;
+
+	err = blkdev_get(bdev, mode, holder);
+	if (err)
+		return ERR_PTR(err);
+
+	if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
+		blkdev_put(bdev, mode);
+		return ERR_PTR(-EACCES);
+	}
+
+	return bdev;
+}
+EXPORT_SYMBOL(blkdev_get_by_path);
+
+/**
+ * blkdev_get_by_dev - open a block device by device number
+ * @dev: device number of block device to open
+ * @mode: FMODE_* mask
+ * @holder: exclusive holder identifier
+ *
+ * Open the blockdevice described by device number @dev.  @mode and
+ * @holder are identical to blkdev_get().
+ *
+ * Use it ONLY if you really do not have anything better - i.e. when
+ * you are behind a truly sucky interface and all you are given is a
+ * device number.  _Never_ to be used for internal purposes.  If you
+ * ever need it - reconsider your API.
+ *
+ * On success, the returned block_device has reference count of one.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * Pointer to block_device on success, ERR_PTR(-errno) on failure.
+ */
+struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
+{
+	struct block_device *bdev;
+	int err;
+
+	bdev = bdget(dev);
+	if (!bdev)
+		return ERR_PTR(-ENOMEM);
+
+	err = blkdev_get(bdev, mode, holder);
+	if (err)
+		return ERR_PTR(err);
+
+	return bdev;
+}
+EXPORT_SYMBOL(blkdev_get_by_dev);
+
 static int blkdev_open(struct inode * inode, struct file * filp)
 {
-	struct block_device *whole = NULL;
 	struct block_device *bdev;
-	int res;
 
 	/*
 	 * Preserve backwards compatibility and allow large file access
@@ -1506,26 +1370,9 @@
 	if (bdev == NULL)
 		return -ENOMEM;
 
-	if (filp->f_mode & FMODE_EXCL) {
-		whole = bd_start_claiming(bdev, filp);
-		if (IS_ERR(whole)) {
-			bdput(bdev);
-			return PTR_ERR(whole);
-		}
-	}
-
 	filp->f_mapping = bdev->bd_inode->i_mapping;
 
-	res = blkdev_get(bdev, filp->f_mode);
-
-	if (whole) {
-		if (res == 0)
-			bd_finish_claiming(bdev, whole, filp);
-		else
-			bd_abort_claiming(whole, filp);
-	}
-
-	return res;
+	return blkdev_get(bdev, filp->f_mode, filp);
 }
 
 static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
@@ -1539,6 +1386,7 @@
 		bdev->bd_part_count--;
 
 	if (!--bdev->bd_openers) {
+		WARN_ON_ONCE(bdev->bd_holders);
 		sync_blockdev(bdev);
 		kill_bdev(bdev);
 	}
@@ -1569,6 +1417,44 @@
 
 int blkdev_put(struct block_device *bdev, fmode_t mode)
 {
+	if (mode & FMODE_EXCL) {
+		bool bdev_free;
+
+		/*
+		 * Release a claim on the device.  The holder fields
+		 * are protected with bdev_lock.  bd_mutex is to
+		 * synchronize disk_holder unlinking.
+		 */
+		mutex_lock(&bdev->bd_mutex);
+		spin_lock(&bdev_lock);
+
+		WARN_ON_ONCE(--bdev->bd_holders < 0);
+		WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
+
+		/* bd_contains might point to self, check in a separate step */
+		if ((bdev_free = !bdev->bd_holders))
+			bdev->bd_holder = NULL;
+		if (!bdev->bd_contains->bd_holders)
+			bdev->bd_contains->bd_holder = NULL;
+
+		spin_unlock(&bdev_lock);
+
+		/*
+		 * If this was the last claim, remove holder link and
+		 * unblock evpoll if it was a write holder.
+		 */
+		if (bdev_free) {
+			if (bdev->bd_write_holder) {
+				disk_unblock_events(bdev->bd_disk);
+				bdev->bd_write_holder = false;
+			} else
+				disk_check_events(bdev->bd_disk);
+		}
+
+		mutex_unlock(&bdev->bd_mutex);
+	} else
+		disk_check_events(bdev->bd_disk);
+
 	return __blkdev_put(bdev, mode, 0);
 }
 EXPORT_SYMBOL(blkdev_put);
@@ -1576,8 +1462,7 @@
 static int blkdev_close(struct inode * inode, struct file * filp)
 {
 	struct block_device *bdev = I_BDEV(filp->f_mapping->host);
-	if (bdev->bd_holder == filp)
-		bd_release(bdev);
+
 	return blkdev_put(bdev, filp->f_mode);
 }
 
@@ -1722,68 +1607,7 @@
 }
 EXPORT_SYMBOL(lookup_bdev);
 
-/**
- * open_bdev_exclusive  -  open a block device by name and set it up for use
- *
- * @path:	special file representing the block device
- * @mode:	FMODE_... combination to pass be used
- * @holder:	owner for exclusion
- *
- * Open the blockdevice described by the special file at @path, claim it
- * for the @holder.
- */
-struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
-{
-	struct block_device *bdev, *whole;
-	int error;
-
-	bdev = lookup_bdev(path);
-	if (IS_ERR(bdev))
-		return bdev;
-
-	whole = bd_start_claiming(bdev, holder);
-	if (IS_ERR(whole)) {
-		bdput(bdev);
-		return whole;
-	}
-
-	error = blkdev_get(bdev, mode);
-	if (error)
-		goto out_abort_claiming;
-
-	error = -EACCES;
-	if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
-		goto out_blkdev_put;
-
-	bd_finish_claiming(bdev, whole, holder);
-	return bdev;
-
-out_blkdev_put:
-	blkdev_put(bdev, mode);
-out_abort_claiming:
-	bd_abort_claiming(whole, holder);
-	return ERR_PTR(error);
-}
-
-EXPORT_SYMBOL(open_bdev_exclusive);
-
-/**
- * close_bdev_exclusive  -  close a blockdevice opened by open_bdev_exclusive()
- *
- * @bdev:	blockdevice to close
- * @mode:	mode, must match that used to open.
- *
- * This is the counterpart to open_bdev_exclusive().
- */
-void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)
-{
-	bd_release(bdev);
-	blkdev_put(bdev, mode);
-}
-
-EXPORT_SYMBOL(close_bdev_exclusive);
-
-int __invalidate_device(struct block_device *bdev)
+int __invalidate_device(struct block_device *bdev, bool kill_dirty)
 {
 	struct super_block *sb = get_super(bdev);
 	int res = 0;
@@ -1796,7 +1620,7 @@
 		 * hold).
 		 */
 		shrink_dcache_sb(sb);
-		res = invalidate_inodes(sb);
+		res = invalidate_inodes(sb, kill_dirty);
 		drop_super(sb);
 	}
 	invalidate_bdev(bdev);
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 7bb3c02..ecb9fd3 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -4,6 +4,8 @@
 	select LIBCRC32C
 	select ZLIB_INFLATE
 	select ZLIB_DEFLATE
+	select LZO_COMPRESS
+	select LZO_DECOMPRESS
 	help
 	  Btrfs is a new filesystem with extents, writable snapshotting,
 	  support for multiple devices and many more features.
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index a35eb36..31610ea 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -6,5 +6,5 @@
 	   transaction.o inode.o file.o tree-defrag.o \
 	   extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
 	   extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
-	   export.o tree-log.o acl.o free-space-cache.o zlib.o \
+	   export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \
 	   compression.o delayed-ref.o relocation.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 6ae2c8c..9c94934 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -37,6 +37,9 @@
 	char *value = NULL;
 	struct posix_acl *acl;
 
+	if (!IS_POSIXACL(inode))
+		return NULL;
+
 	acl = get_cached_acl(inode, type);
 	if (acl != ACL_NOT_CACHED)
 		return acl;
@@ -60,8 +63,10 @@
 		size = __btrfs_getxattr(inode, name, value, size);
 		if (size > 0) {
 			acl = posix_acl_from_xattr(value, size);
-			if (IS_ERR(acl))
+			if (IS_ERR(acl)) {
+				kfree(value);
 				return acl;
+			}
 			set_cached_acl(inode, type, acl);
 		}
 		kfree(value);
@@ -82,6 +87,9 @@
 	struct posix_acl *acl;
 	int ret = 0;
 
+	if (!IS_POSIXACL(dentry->d_inode))
+		return -EOPNOTSUPP;
+
 	acl = btrfs_get_acl(dentry->d_inode, type);
 
 	if (IS_ERR(acl))
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 6ad63f1..ccc991c 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -157,7 +157,7 @@
 	/*
 	 * always compress this one file
 	 */
-	unsigned force_compress:1;
+	unsigned force_compress:4;
 
 	struct inode vfs_inode;
 };
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b50bc4b..4d2110e 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -62,6 +62,9 @@
 	/* number of bytes on disk */
 	unsigned long compressed_len;
 
+	/* the compression algorithm for this bio */
+	int compress_type;
+
 	/* number of compressed pages in the array */
 	unsigned long nr_pages;
 
@@ -173,11 +176,12 @@
 	/* ok, we're the last bio for this extent, lets start
 	 * the decompression.
 	 */
-	ret = btrfs_zlib_decompress_biovec(cb->compressed_pages,
-					cb->start,
-					cb->orig_bio->bi_io_vec,
-					cb->orig_bio->bi_vcnt,
-					cb->compressed_len);
+	ret = btrfs_decompress_biovec(cb->compress_type,
+				      cb->compressed_pages,
+				      cb->start,
+				      cb->orig_bio->bi_io_vec,
+				      cb->orig_bio->bi_vcnt,
+				      cb->compressed_len);
 csum_failed:
 	if (ret)
 		cb->errors = 1;
@@ -558,7 +562,7 @@
 	u64 em_len;
 	u64 em_start;
 	struct extent_map *em;
-	int ret;
+	int ret = -ENOMEM;
 	u32 *sums;
 
 	tree = &BTRFS_I(inode)->io_tree;
@@ -573,6 +577,9 @@
 
 	compressed_len = em->block_len;
 	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+	if (!cb)
+		goto out;
+
 	atomic_set(&cb->pending_bios, 0);
 	cb->errors = 0;
 	cb->inode = inode;
@@ -588,17 +595,23 @@
 
 	cb->len = uncompressed_len;
 	cb->compressed_len = compressed_len;
+	cb->compress_type = extent_compress_type(bio_flags);
 	cb->orig_bio = bio;
 
 	nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
 				 PAGE_CACHE_SIZE;
-	cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages,
+	cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
 				       GFP_NOFS);
+	if (!cb->compressed_pages)
+		goto fail1;
+
 	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 
 	for (page_index = 0; page_index < nr_pages; page_index++) {
 		cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
 							      __GFP_HIGHMEM);
+		if (!cb->compressed_pages[page_index])
+			goto fail2;
 	}
 	cb->nr_pages = nr_pages;
 
@@ -609,6 +622,8 @@
 	cb->len = uncompressed_len;
 
 	comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
+	if (!comp_bio)
+		goto fail2;
 	comp_bio->bi_private = cb;
 	comp_bio->bi_end_io = end_compressed_bio_read;
 	atomic_inc(&cb->pending_bios);
@@ -676,4 +691,329 @@
 
 	bio_put(comp_bio);
 	return 0;
+
+fail2:
+	for (page_index = 0; page_index < nr_pages; page_index++)
+		free_page((unsigned long)cb->compressed_pages[page_index]);
+
+	kfree(cb->compressed_pages);
+fail1:
+	kfree(cb);
+out:
+	free_extent_map(em);
+	return ret;
+}
+
+static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
+static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES];
+static int comp_num_workspace[BTRFS_COMPRESS_TYPES];
+static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
+static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
+
+struct btrfs_compress_op *btrfs_compress_op[] = {
+	&btrfs_zlib_compress,
+	&btrfs_lzo_compress,
+};
+
+int __init btrfs_init_compress(void)
+{
+	int i;
+
+	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
+		INIT_LIST_HEAD(&comp_idle_workspace[i]);
+		spin_lock_init(&comp_workspace_lock[i]);
+		atomic_set(&comp_alloc_workspace[i], 0);
+		init_waitqueue_head(&comp_workspace_wait[i]);
+	}
+	return 0;
+}
+
+/*
+ * this finds an available workspace or allocates a new one
+ * ERR_PTR is returned if things go bad.
+ */
+static struct list_head *find_workspace(int type)
+{
+	struct list_head *workspace;
+	int cpus = num_online_cpus();
+	int idx = type - 1;
+
+	struct list_head *idle_workspace	= &comp_idle_workspace[idx];
+	spinlock_t *workspace_lock		= &comp_workspace_lock[idx];
+	atomic_t *alloc_workspace		= &comp_alloc_workspace[idx];
+	wait_queue_head_t *workspace_wait	= &comp_workspace_wait[idx];
+	int *num_workspace			= &comp_num_workspace[idx];
+again:
+	spin_lock(workspace_lock);
+	if (!list_empty(idle_workspace)) {
+		workspace = idle_workspace->next;
+		list_del(workspace);
+		(*num_workspace)--;
+		spin_unlock(workspace_lock);
+		return workspace;
+
+	}
+	if (atomic_read(alloc_workspace) > cpus) {
+		DEFINE_WAIT(wait);
+
+		spin_unlock(workspace_lock);
+		prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
+		if (atomic_read(alloc_workspace) > cpus && !*num_workspace)
+			schedule();
+		finish_wait(workspace_wait, &wait);
+		goto again;
+	}
+	atomic_inc(alloc_workspace);
+	spin_unlock(workspace_lock);
+
+	workspace = btrfs_compress_op[idx]->alloc_workspace();
+	if (IS_ERR(workspace)) {
+		atomic_dec(alloc_workspace);
+		wake_up(workspace_wait);
+	}
+	return workspace;
+}
+
+/*
+ * put a workspace struct back on the list or free it if we have enough
+ * idle ones sitting around
+ */
+static void free_workspace(int type, struct list_head *workspace)
+{
+	int idx = type - 1;
+	struct list_head *idle_workspace	= &comp_idle_workspace[idx];
+	spinlock_t *workspace_lock		= &comp_workspace_lock[idx];
+	atomic_t *alloc_workspace		= &comp_alloc_workspace[idx];
+	wait_queue_head_t *workspace_wait	= &comp_workspace_wait[idx];
+	int *num_workspace			= &comp_num_workspace[idx];
+
+	spin_lock(workspace_lock);
+	if (*num_workspace < num_online_cpus()) {
+		list_add_tail(workspace, idle_workspace);
+		(*num_workspace)++;
+		spin_unlock(workspace_lock);
+		goto wake;
+	}
+	spin_unlock(workspace_lock);
+
+	btrfs_compress_op[idx]->free_workspace(workspace);
+	atomic_dec(alloc_workspace);
+wake:
+	if (waitqueue_active(workspace_wait))
+		wake_up(workspace_wait);
+}
+
+/*
+ * cleanup function for module exit
+ */
+static void free_workspaces(void)
+{
+	struct list_head *workspace;
+	int i;
+
+	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
+		while (!list_empty(&comp_idle_workspace[i])) {
+			workspace = comp_idle_workspace[i].next;
+			list_del(workspace);
+			btrfs_compress_op[i]->free_workspace(workspace);
+			atomic_dec(&comp_alloc_workspace[i]);
+		}
+	}
+}
+
+/*
+ * given an address space and start/len, compress the bytes.
+ *
+ * pages are allocated to hold the compressed result and stored
+ * in 'pages'
+ *
+ * out_pages is used to return the number of pages allocated.  There
+ * may be pages allocated even if we return an error
+ *
+ * total_in is used to return the number of bytes actually read.  It
+ * may be smaller then len if we had to exit early because we
+ * ran out of room in the pages array or because we cross the
+ * max_out threshold.
+ *
+ * total_out is used to return the total number of compressed bytes
+ *
+ * max_out tells us the max number of bytes that we're allowed to
+ * stuff into pages
+ */
+int btrfs_compress_pages(int type, struct address_space *mapping,
+			 u64 start, unsigned long len,
+			 struct page **pages,
+			 unsigned long nr_dest_pages,
+			 unsigned long *out_pages,
+			 unsigned long *total_in,
+			 unsigned long *total_out,
+			 unsigned long max_out)
+{
+	struct list_head *workspace;
+	int ret;
+
+	workspace = find_workspace(type);
+	if (IS_ERR(workspace))
+		return -1;
+
+	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
+						      start, len, pages,
+						      nr_dest_pages, out_pages,
+						      total_in, total_out,
+						      max_out);
+	free_workspace(type, workspace);
+	return ret;
+}
+
+/*
+ * pages_in is an array of pages with compressed data.
+ *
+ * disk_start is the starting logical offset of this array in the file
+ *
+ * bvec is a bio_vec of pages from the file that we want to decompress into
+ *
+ * vcnt is the count of pages in the biovec
+ *
+ * srclen is the number of bytes in pages_in
+ *
+ * The basic idea is that we have a bio that was created by readpages.
+ * The pages in the bio are for the uncompressed data, and they may not
+ * be contiguous.  They all correspond to the range of bytes covered by
+ * the compressed extent.
+ */
+int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start,
+			    struct bio_vec *bvec, int vcnt, size_t srclen)
+{
+	struct list_head *workspace;
+	int ret;
+
+	workspace = find_workspace(type);
+	if (IS_ERR(workspace))
+		return -ENOMEM;
+
+	ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
+							 disk_start,
+							 bvec, vcnt, srclen);
+	free_workspace(type, workspace);
+	return ret;
+}
+
+/*
+ * a less complex decompression routine.  Our compressed data fits in a
+ * single page, and we want to read a single page out of it.
+ * start_byte tells us the offset into the compressed data we're interested in
+ */
+int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
+		     unsigned long start_byte, size_t srclen, size_t destlen)
+{
+	struct list_head *workspace;
+	int ret;
+
+	workspace = find_workspace(type);
+	if (IS_ERR(workspace))
+		return -ENOMEM;
+
+	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
+						  dest_page, start_byte,
+						  srclen, destlen);
+
+	free_workspace(type, workspace);
+	return ret;
+}
+
+void btrfs_exit_compress(void)
+{
+	free_workspaces();
+}
+
+/*
+ * Copy uncompressed data from working buffer to pages.
+ *
+ * buf_start is the byte offset we're of the start of our workspace buffer.
+ *
+ * total_out is the last byte of the buffer
+ */
+int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
+			      unsigned long total_out, u64 disk_start,
+			      struct bio_vec *bvec, int vcnt,
+			      unsigned long *page_index,
+			      unsigned long *pg_offset)
+{
+	unsigned long buf_offset;
+	unsigned long current_buf_start;
+	unsigned long start_byte;
+	unsigned long working_bytes = total_out - buf_start;
+	unsigned long bytes;
+	char *kaddr;
+	struct page *page_out = bvec[*page_index].bv_page;
+
+	/*
+	 * start byte is the first byte of the page we're currently
+	 * copying into relative to the start of the compressed data.
+	 */
+	start_byte = page_offset(page_out) - disk_start;
+
+	/* we haven't yet hit data corresponding to this page */
+	if (total_out <= start_byte)
+		return 1;
+
+	/*
+	 * the start of the data we care about is offset into
+	 * the middle of our working buffer
+	 */
+	if (total_out > start_byte && buf_start < start_byte) {
+		buf_offset = start_byte - buf_start;
+		working_bytes -= buf_offset;
+	} else {
+		buf_offset = 0;
+	}
+	current_buf_start = buf_start;
+
+	/* copy bytes from the working buffer into the pages */
+	while (working_bytes > 0) {
+		bytes = min(PAGE_CACHE_SIZE - *pg_offset,
+			    PAGE_CACHE_SIZE - buf_offset);
+		bytes = min(bytes, working_bytes);
+		kaddr = kmap_atomic(page_out, KM_USER0);
+		memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
+		kunmap_atomic(kaddr, KM_USER0);
+		flush_dcache_page(page_out);
+
+		*pg_offset += bytes;
+		buf_offset += bytes;
+		working_bytes -= bytes;
+		current_buf_start += bytes;
+
+		/* check if we need to pick another page */
+		if (*pg_offset == PAGE_CACHE_SIZE) {
+			(*page_index)++;
+			if (*page_index >= vcnt)
+				return 0;
+
+			page_out = bvec[*page_index].bv_page;
+			*pg_offset = 0;
+			start_byte = page_offset(page_out) - disk_start;
+
+			/*
+			 * make sure our new page is covered by this
+			 * working buffer
+			 */
+			if (total_out <= start_byte)
+				return 1;
+
+			/*
+			 * the next page in the biovec might not be adjacent
+			 * to the last page, but it might still be found
+			 * inside this working buffer. bump our offset pointer
+			 */
+			if (total_out > start_byte &&
+			    current_buf_start < start_byte) {
+				buf_offset = start_byte - buf_start;
+				working_bytes = total_out - start_byte;
+				current_buf_start = buf_start + buf_offset;
+			}
+		}
+	}
+
+	return 1;
 }
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 421f5b4..5100017 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -19,24 +19,27 @@
 #ifndef __BTRFS_COMPRESSION_
 #define __BTRFS_COMPRESSION_
 
-int btrfs_zlib_decompress(unsigned char *data_in,
-			  struct page *dest_page,
-			  unsigned long start_byte,
-			  size_t srclen, size_t destlen);
-int btrfs_zlib_compress_pages(struct address_space *mapping,
-			      u64 start, unsigned long len,
-			      struct page **pages,
-			      unsigned long nr_dest_pages,
-			      unsigned long *out_pages,
-			      unsigned long *total_in,
-			      unsigned long *total_out,
-			      unsigned long max_out);
-int btrfs_zlib_decompress_biovec(struct page **pages_in,
-			      u64 disk_start,
-			      struct bio_vec *bvec,
-			      int vcnt,
-			      size_t srclen);
-void btrfs_zlib_exit(void);
+int btrfs_init_compress(void);
+void btrfs_exit_compress(void);
+
+int btrfs_compress_pages(int type, struct address_space *mapping,
+			 u64 start, unsigned long len,
+			 struct page **pages,
+			 unsigned long nr_dest_pages,
+			 unsigned long *out_pages,
+			 unsigned long *total_in,
+			 unsigned long *total_out,
+			 unsigned long max_out);
+int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start,
+			    struct bio_vec *bvec, int vcnt, size_t srclen);
+int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
+		     unsigned long start_byte, size_t srclen, size_t destlen);
+int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
+			      unsigned long total_out, u64 disk_start,
+			      struct bio_vec *bvec, int vcnt,
+			      unsigned long *page_index,
+			      unsigned long *pg_offset);
+
 int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 				  unsigned long len, u64 disk_start,
 				  unsigned long compressed_len,
@@ -44,4 +47,37 @@
 				  unsigned long nr_pages);
 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 				 int mirror_num, unsigned long bio_flags);
+
+struct btrfs_compress_op {
+	struct list_head *(*alloc_workspace)(void);
+
+	void (*free_workspace)(struct list_head *workspace);
+
+	int (*compress_pages)(struct list_head *workspace,
+			      struct address_space *mapping,
+			      u64 start, unsigned long len,
+			      struct page **pages,
+			      unsigned long nr_dest_pages,
+			      unsigned long *out_pages,
+			      unsigned long *total_in,
+			      unsigned long *total_out,
+			      unsigned long max_out);
+
+	int (*decompress_biovec)(struct list_head *workspace,
+				 struct page **pages_in,
+				 u64 disk_start,
+				 struct bio_vec *bvec,
+				 int vcnt,
+				 size_t srclen);
+
+	int (*decompress)(struct list_head *workspace,
+			  unsigned char *data_in,
+			  struct page *dest_page,
+			  unsigned long start_byte,
+			  size_t srclen, size_t destlen);
+};
+
+extern struct btrfs_compress_op btrfs_zlib_compress;
+extern struct btrfs_compress_op btrfs_lzo_compress;
+
 #endif
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 9ac1715..b5baff0 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -105,6 +105,8 @@
 /* this also releases the path */
 void btrfs_free_path(struct btrfs_path *p)
 {
+	if (!p)
+		return;
 	btrfs_release_path(NULL, p);
 	kmem_cache_free(btrfs_path_cachep, p);
 }
@@ -2514,6 +2516,9 @@
 	btrfs_assert_tree_locked(path->nodes[1]);
 
 	right = read_node_slot(root, upper, slot + 1);
+	if (right == NULL)
+		return 1;
+
 	btrfs_tree_lock(right);
 	btrfs_set_lock_blocking(right);
 
@@ -2764,6 +2769,9 @@
 	btrfs_assert_tree_locked(path->nodes[1]);
 
 	left = read_node_slot(root, path->nodes[1], slot - 1);
+	if (left == NULL)
+		return 1;
+
 	btrfs_tree_lock(left);
 	btrfs_set_lock_blocking(left);
 
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index a142d20..7f78cc7 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -27,6 +27,7 @@
 #include <linux/backing-dev.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
+#include <linux/kobject.h>
 #include <asm/kmap_types.h>
 #include "extent_io.h"
 #include "extent_map.h"
@@ -294,6 +295,14 @@
 #define BTRFS_FSID_SIZE 16
 #define BTRFS_HEADER_FLAG_WRITTEN	(1ULL << 0)
 #define BTRFS_HEADER_FLAG_RELOC		(1ULL << 1)
+
+/*
+ * File system states
+ */
+
+/* Errors detected */
+#define BTRFS_SUPER_FLAG_ERROR		(1ULL << 2)
+
 #define BTRFS_SUPER_FLAG_SEEDING	(1ULL << 32)
 #define BTRFS_SUPER_FLAG_METADUMP	(1ULL << 33)
 
@@ -398,13 +407,15 @@
 #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF	(1ULL << 0)
 #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL	(1ULL << 1)
 #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS	(1ULL << 2)
+#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO	(1ULL << 3)
 
 #define BTRFS_FEATURE_COMPAT_SUPP		0ULL
 #define BTRFS_FEATURE_COMPAT_RO_SUPP		0ULL
 #define BTRFS_FEATURE_INCOMPAT_SUPP			\
 	(BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF |		\
 	 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL |	\
-	 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+	 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS |		\
+	 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO)
 
 /*
  * A leaf is full of items. offset and size tell us where to find
@@ -551,9 +562,11 @@
 } __attribute__ ((__packed__));
 
 enum btrfs_compression_type {
-	BTRFS_COMPRESS_NONE = 0,
-	BTRFS_COMPRESS_ZLIB = 1,
-	BTRFS_COMPRESS_LAST = 2,
+	BTRFS_COMPRESS_NONE  = 0,
+	BTRFS_COMPRESS_ZLIB  = 1,
+	BTRFS_COMPRESS_LZO   = 2,
+	BTRFS_COMPRESS_TYPES = 2,
+	BTRFS_COMPRESS_LAST  = 3,
 };
 
 struct btrfs_inode_item {
@@ -597,6 +610,8 @@
 	u8 type;
 } __attribute__ ((__packed__));
 
+#define BTRFS_ROOT_SUBVOL_RDONLY	(1ULL << 0)
+
 struct btrfs_root_item {
 	struct btrfs_inode_item inode;
 	__le64 generation;
@@ -714,6 +729,15 @@
 	u64 disk_total;		/* total bytes on disk, takes mirrors into
 				   account */
 
+	/*
+	 * we bump reservation progress every time we decrement
+	 * bytes_reserved.  This way people waiting for reservations
+	 * know something good has happened and they can check
+	 * for progress.  The number here isn't to be trusted, it
+	 * just shows reclaim activity
+	 */
+	unsigned long reservation_progress;
+
 	int full;		/* indicates that we cannot allocate any more
 				   chunks for this space */
 	int force_alloc;	/* set if we need to force a chunk alloc for
@@ -895,7 +919,8 @@
 	 */
 	u64 last_trans_log_full_commit;
 	u64 open_ioctl_trans;
-	unsigned long mount_opt;
+	unsigned long mount_opt:20;
+	unsigned long compress_type:4;
 	u64 max_inline;
 	u64 alloc_start;
 	struct btrfs_transaction *running_transaction;
@@ -1050,6 +1075,9 @@
 	unsigned metadata_ratio;
 
 	void *bdev_holder;
+
+	/* filesystem state */
+	u64 fs_state;
 };
 
 /*
@@ -1235,6 +1263,7 @@
 #define BTRFS_MOUNT_SPACE_CACHE		(1 << 12)
 #define BTRFS_MOUNT_CLEAR_CACHE		(1 << 13)
 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
+#define BTRFS_MOUNT_ENOSPC_DEBUG	 (1 << 15)
 
 #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
@@ -1893,6 +1922,11 @@
 BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
 			 last_snapshot, 64);
 
+static inline bool btrfs_root_readonly(struct btrfs_root *root)
+{
+	return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY;
+}
+
 /* struct btrfs_super_block */
 
 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64);
@@ -2145,6 +2179,7 @@
 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *root, u64 group_start);
 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
+u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
 void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
 int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
@@ -2188,6 +2223,14 @@
 int btrfs_set_block_group_rw(struct btrfs_root *root,
 			     struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
+u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
+int btrfs_error_unpin_extent_range(struct btrfs_root *root,
+				   u64 start, u64 end);
+int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
+			       u64 num_bytes);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, u64 type);
+
 /* ctree.c */
 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
 		     int level, int *slot);
@@ -2541,6 +2584,14 @@
 /* super.c */
 int btrfs_parse_options(struct btrfs_root *root, char *options);
 int btrfs_sync_fs(struct super_block *sb, int wait);
+void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
+		     unsigned int line, int errno);
+
+#define btrfs_std_error(fs_info, errno)				\
+do {								\
+	if ((errno))						\
+		__btrfs_std_error((fs_info), __func__, __LINE__, (errno));\
+} while (0)
 
 /* acl.c */
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 51d2e4d..e1aa8d6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -44,6 +44,20 @@
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
 static void free_fs_root(struct btrfs_root *root);
+static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+				    int read_only);
+static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
+static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
+static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+				      struct btrfs_root *root);
+static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
+static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
+static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+					struct extent_io_tree *dirty_pages,
+					int mark);
+static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+				       struct extent_io_tree *pinned_extents);
+static int btrfs_cleanup_transaction(struct btrfs_root *root);
 
 /*
  * end_io_wq structs are used to do processing in task context when an IO is
@@ -345,14 +359,22 @@
 
 	tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-	if (page->private == EXTENT_PAGE_PRIVATE)
+	if (page->private == EXTENT_PAGE_PRIVATE) {
+		WARN_ON(1);
 		goto out;
-	if (!page->private)
+	}
+	if (!page->private) {
+		WARN_ON(1);
 		goto out;
+	}
 	len = page->private >> 2;
 	WARN_ON(len == 0);
 
 	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+	if (eb == NULL) {
+		WARN_ON(1);
+		goto out;
+	}
 	ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
 					     btrfs_header_generation(eb));
 	BUG_ON(ret);
@@ -427,6 +449,10 @@
 	WARN_ON(len == 0);
 
 	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+	if (eb == NULL) {
+		ret = -EIO;
+		goto out;
+	}
 
 	found_start = btrfs_header_bytenr(eb);
 	if (found_start != start) {
@@ -1145,6 +1171,7 @@
 	}
 	btrfs_free_path(path);
 	if (ret) {
+		kfree(root);
 		if (ret > 0)
 			ret = -ENOENT;
 		return ERR_PTR(ret);
@@ -1527,6 +1554,7 @@
 		spin_unlock(&root->fs_info->new_trans_lock);
 
 		trans = btrfs_join_transaction(root, 1);
+		BUG_ON(IS_ERR(trans));
 		if (transid == trans->transid) {
 			ret = btrfs_commit_transaction(trans, root);
 			BUG_ON(ret);
@@ -1713,8 +1741,10 @@
 		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
 
 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
-	if (!bh)
+	if (!bh) {
+		err = -EINVAL;
 		goto fail_iput;
+	}
 
 	memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
 	memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
@@ -1727,6 +1757,11 @@
 	if (!btrfs_super_root(disk_super))
 		goto fail_iput;
 
+	/* check FS state, whether FS is broken. */
+	fs_info->fs_state |= btrfs_super_flags(disk_super);
+
+	btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+
 	ret = btrfs_parse_options(tree_root, options);
 	if (ret) {
 		err = ret;
@@ -1744,10 +1779,10 @@
 	}
 
 	features = btrfs_super_incompat_flags(disk_super);
-	if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) {
-		features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
-		btrfs_set_super_incompat_flags(disk_super, features);
-	}
+	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
+	if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
+		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+	btrfs_set_super_incompat_flags(disk_super, features);
 
 	features = btrfs_super_compat_ro_flags(disk_super) &
 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
@@ -1957,7 +1992,9 @@
 		btrfs_set_opt(fs_info->mount_opt, SSD);
 	}
 
-	if (btrfs_super_log_root(disk_super) != 0) {
+	/* do not make disk changes in broken FS */
+	if (btrfs_super_log_root(disk_super) != 0 &&
+	    !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
 		u64 bytenr = btrfs_super_log_root(disk_super);
 
 		if (fs_devices->rw_devices == 0) {
@@ -2421,10 +2458,14 @@
 	up_write(&root->fs_info->cleanup_work_sem);
 
 	trans = btrfs_join_transaction(root, 1);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 	ret = btrfs_commit_transaction(trans, root);
 	BUG_ON(ret);
 	/* run commit again to drop the original snapshot */
 	trans = btrfs_join_transaction(root, 1);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 	btrfs_commit_transaction(trans, root);
 	ret = btrfs_write_and_wait_transaction(NULL, root);
 	BUG_ON(ret);
@@ -2442,8 +2483,28 @@
 	smp_mb();
 
 	btrfs_put_block_group_cache(fs_info);
+
+	/*
+	 * Here come 2 situations when btrfs is broken to flip readonly:
+	 *
+	 * 1. when btrfs flips readonly somewhere else before
+	 * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
+	 * and btrfs will skip to write sb directly to keep
+	 * ERROR state on disk.
+	 *
+	 * 2. when btrfs flips readonly just in btrfs_commit_super,
+	 * and in such case, btrfs cannnot write sb via btrfs_commit_super,
+	 * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
+	 * btrfs will cleanup all FS resources first and write sb then.
+	 */
 	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
-		ret =  btrfs_commit_super(root);
+		ret = btrfs_commit_super(root);
+		if (ret)
+			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
+	}
+
+	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+		ret = btrfs_error_commit_super(root);
 		if (ret)
 			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
 	}
@@ -2502,6 +2563,8 @@
 	kfree(fs_info->chunk_root);
 	kfree(fs_info->dev_root);
 	kfree(fs_info->csum_root);
+	kfree(fs_info);
+
 	return 0;
 }
 
@@ -2619,6 +2682,352 @@
 	return 0;
 }
 
+static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+			      int read_only)
+{
+	if (read_only)
+		return;
+
+	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+		printk(KERN_WARNING "warning: mount fs with errors, "
+		       "running btrfsck is recommended\n");
+}
+
+int btrfs_error_commit_super(struct btrfs_root *root)
+{
+	int ret;
+
+	mutex_lock(&root->fs_info->cleaner_mutex);
+	btrfs_run_delayed_iputs(root);
+	mutex_unlock(&root->fs_info->cleaner_mutex);
+
+	down_write(&root->fs_info->cleanup_work_sem);
+	up_write(&root->fs_info->cleanup_work_sem);
+
+	/* cleanup FS via transaction */
+	btrfs_cleanup_transaction(root);
+
+	ret = write_ctree_super(NULL, root, 0);
+
+	return ret;
+}
+
+static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
+{
+	struct btrfs_inode *btrfs_inode;
+	struct list_head splice;
+
+	INIT_LIST_HEAD(&splice);
+
+	mutex_lock(&root->fs_info->ordered_operations_mutex);
+	spin_lock(&root->fs_info->ordered_extent_lock);
+
+	list_splice_init(&root->fs_info->ordered_operations, &splice);
+	while (!list_empty(&splice)) {
+		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
+					 ordered_operations);
+
+		list_del_init(&btrfs_inode->ordered_operations);
+
+		btrfs_invalidate_inodes(btrfs_inode->root);
+	}
+
+	spin_unlock(&root->fs_info->ordered_extent_lock);
+	mutex_unlock(&root->fs_info->ordered_operations_mutex);
+
+	return 0;
+}
+
+static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
+{
+	struct list_head splice;
+	struct btrfs_ordered_extent *ordered;
+	struct inode *inode;
+
+	INIT_LIST_HEAD(&splice);
+
+	spin_lock(&root->fs_info->ordered_extent_lock);
+
+	list_splice_init(&root->fs_info->ordered_extents, &splice);
+	while (!list_empty(&splice)) {
+		ordered = list_entry(splice.next, struct btrfs_ordered_extent,
+				     root_extent_list);
+
+		list_del_init(&ordered->root_extent_list);
+		atomic_inc(&ordered->refs);
+
+		/* the inode may be getting freed (in sys_unlink path). */
+		inode = igrab(ordered->inode);
+
+		spin_unlock(&root->fs_info->ordered_extent_lock);
+		if (inode)
+			iput(inode);
+
+		atomic_set(&ordered->refs, 1);
+		btrfs_put_ordered_extent(ordered);
+
+		spin_lock(&root->fs_info->ordered_extent_lock);
+	}
+
+	spin_unlock(&root->fs_info->ordered_extent_lock);
+
+	return 0;
+}
+
+static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+				      struct btrfs_root *root)
+{
+	struct rb_node *node;
+	struct btrfs_delayed_ref_root *delayed_refs;
+	struct btrfs_delayed_ref_node *ref;
+	int ret = 0;
+
+	delayed_refs = &trans->delayed_refs;
+
+	spin_lock(&delayed_refs->lock);
+	if (delayed_refs->num_entries == 0) {
+		printk(KERN_INFO "delayed_refs has NO entry\n");
+		return ret;
+	}
+
+	node = rb_first(&delayed_refs->root);
+	while (node) {
+		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+		node = rb_next(node);
+
+		ref->in_tree = 0;
+		rb_erase(&ref->rb_node, &delayed_refs->root);
+		delayed_refs->num_entries--;
+
+		atomic_set(&ref->refs, 1);
+		if (btrfs_delayed_ref_is_head(ref)) {
+			struct btrfs_delayed_ref_head *head;
+
+			head = btrfs_delayed_node_to_head(ref);
+			mutex_lock(&head->mutex);
+			kfree(head->extent_op);
+			delayed_refs->num_heads--;
+			if (list_empty(&head->cluster))
+				delayed_refs->num_heads_ready--;
+			list_del_init(&head->cluster);
+			mutex_unlock(&head->mutex);
+		}
+
+		spin_unlock(&delayed_refs->lock);
+		btrfs_put_delayed_ref(ref);
+
+		cond_resched();
+		spin_lock(&delayed_refs->lock);
+	}
+
+	spin_unlock(&delayed_refs->lock);
+
+	return ret;
+}
+
+static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
+{
+	struct btrfs_pending_snapshot *snapshot;
+	struct list_head splice;
+
+	INIT_LIST_HEAD(&splice);
+
+	list_splice_init(&t->pending_snapshots, &splice);
+
+	while (!list_empty(&splice)) {
+		snapshot = list_entry(splice.next,
+				      struct btrfs_pending_snapshot,
+				      list);
+
+		list_del_init(&snapshot->list);
+
+		kfree(snapshot);
+	}
+
+	return 0;
+}
+
+static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
+{
+	struct btrfs_inode *btrfs_inode;
+	struct list_head splice;
+
+	INIT_LIST_HEAD(&splice);
+
+	list_splice_init(&root->fs_info->delalloc_inodes, &splice);
+
+	spin_lock(&root->fs_info->delalloc_lock);
+
+	while (!list_empty(&splice)) {
+		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
+				    delalloc_inodes);
+
+		list_del_init(&btrfs_inode->delalloc_inodes);
+
+		btrfs_invalidate_inodes(btrfs_inode->root);
+	}
+
+	spin_unlock(&root->fs_info->delalloc_lock);
+
+	return 0;
+}
+
+static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+					struct extent_io_tree *dirty_pages,
+					int mark)
+{
+	int ret;
+	struct page *page;
+	struct inode *btree_inode = root->fs_info->btree_inode;
+	struct extent_buffer *eb;
+	u64 start = 0;
+	u64 end;
+	u64 offset;
+	unsigned long index;
+
+	while (1) {
+		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
+					    mark);
+		if (ret)
+			break;
+
+		clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
+		while (start <= end) {
+			index = start >> PAGE_CACHE_SHIFT;
+			start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
+			page = find_get_page(btree_inode->i_mapping, index);
+			if (!page)
+				continue;
+			offset = page_offset(page);
+
+			spin_lock(&dirty_pages->buffer_lock);
+			eb = radix_tree_lookup(
+			     &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
+					       offset >> PAGE_CACHE_SHIFT);
+			spin_unlock(&dirty_pages->buffer_lock);
+			if (eb) {
+				ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
+							 &eb->bflags);
+				atomic_set(&eb->refs, 1);
+			}
+			if (PageWriteback(page))
+				end_page_writeback(page);
+
+			lock_page(page);
+			if (PageDirty(page)) {
+				clear_page_dirty_for_io(page);
+				spin_lock_irq(&page->mapping->tree_lock);
+				radix_tree_tag_clear(&page->mapping->page_tree,
+							page_index(page),
+							PAGECACHE_TAG_DIRTY);
+				spin_unlock_irq(&page->mapping->tree_lock);
+			}
+
+			page->mapping->a_ops->invalidatepage(page, 0);
+			unlock_page(page);
+		}
+	}
+
+	return ret;
+}
+
+static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+				       struct extent_io_tree *pinned_extents)
+{
+	struct extent_io_tree *unpin;
+	u64 start;
+	u64 end;
+	int ret;
+
+	unpin = pinned_extents;
+	while (1) {
+		ret = find_first_extent_bit(unpin, 0, &start, &end,
+					    EXTENT_DIRTY);
+		if (ret)
+			break;
+
+		/* opt_discard */
+		ret = btrfs_error_discard_extent(root, start, end + 1 - start);
+
+		clear_extent_dirty(unpin, start, end, GFP_NOFS);
+		btrfs_error_unpin_extent_range(root, start, end);
+		cond_resched();
+	}
+
+	return 0;
+}
+
+static int btrfs_cleanup_transaction(struct btrfs_root *root)
+{
+	struct btrfs_transaction *t;
+	LIST_HEAD(list);
+
+	WARN_ON(1);
+
+	mutex_lock(&root->fs_info->trans_mutex);
+	mutex_lock(&root->fs_info->transaction_kthread_mutex);
+
+	list_splice_init(&root->fs_info->trans_list, &list);
+	while (!list_empty(&list)) {
+		t = list_entry(list.next, struct btrfs_transaction, list);
+		if (!t)
+			break;
+
+		btrfs_destroy_ordered_operations(root);
+
+		btrfs_destroy_ordered_extents(root);
+
+		btrfs_destroy_delayed_refs(t, root);
+
+		btrfs_block_rsv_release(root,
+					&root->fs_info->trans_block_rsv,
+					t->dirty_pages.dirty_bytes);
+
+		/* FIXME: cleanup wait for commit */
+		t->in_commit = 1;
+		t->blocked = 1;
+		if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
+			wake_up(&root->fs_info->transaction_blocked_wait);
+
+		t->blocked = 0;
+		if (waitqueue_active(&root->fs_info->transaction_wait))
+			wake_up(&root->fs_info->transaction_wait);
+		mutex_unlock(&root->fs_info->trans_mutex);
+
+		mutex_lock(&root->fs_info->trans_mutex);
+		t->commit_done = 1;
+		if (waitqueue_active(&t->commit_wait))
+			wake_up(&t->commit_wait);
+		mutex_unlock(&root->fs_info->trans_mutex);
+
+		mutex_lock(&root->fs_info->trans_mutex);
+
+		btrfs_destroy_pending_snapshots(t);
+
+		btrfs_destroy_delalloc_inodes(root);
+
+		spin_lock(&root->fs_info->new_trans_lock);
+		root->fs_info->running_transaction = NULL;
+		spin_unlock(&root->fs_info->new_trans_lock);
+
+		btrfs_destroy_marked_extents(root, &t->dirty_pages,
+					     EXTENT_DIRTY);
+
+		btrfs_destroy_pinned_extent(root,
+					    root->fs_info->pinned_extents);
+
+		t->use_count = 0;
+		list_del_init(&t->list);
+		memset(t, 0, sizeof(*t));
+		kmem_cache_free(btrfs_transaction_cachep, t);
+	}
+
+	mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+	mutex_unlock(&root->fs_info->trans_mutex);
+
+	return 0;
+}
+
 static struct extent_io_ops btree_extent_io_ops = {
 	.write_cache_pages_lock_hook = btree_lock_page_hook,
 	.readpage_end_io_hook = btree_readpage_end_io_hook,
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 88e825a..07b20dc 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -52,6 +52,7 @@
 		      struct btrfs_root *root, int max_mirrors);
 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
 int btrfs_commit_super(struct btrfs_root *root);
+int btrfs_error_commit_super(struct btrfs_root *root);
 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
 					    u64 bytenr, u32 blocksize);
 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 0ccf9a8..ff27d7a 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -65,7 +65,6 @@
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(sb)->fs_info;
 	struct btrfs_root *root;
-	struct dentry *dentry;
 	struct inode *inode;
 	struct btrfs_key key;
 	int index;
@@ -108,10 +107,7 @@
 		return ERR_PTR(-ESTALE);
 	}
 
-	dentry = d_obtain_alias(inode);
-	if (!IS_ERR(dentry))
-		d_set_d_op(dentry, &btrfs_dentry_operations);
-	return dentry;
+	return d_obtain_alias(inode);
 fail:
 	srcu_read_unlock(&fs_info->subvol_srcu, index);
 	return ERR_PTR(err);
@@ -166,7 +162,6 @@
 static struct dentry *btrfs_get_parent(struct dentry *child)
 {
 	struct inode *dir = child->d_inode;
-	struct dentry *dentry;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
@@ -176,6 +171,8 @@
 	int ret;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return ERR_PTR(-ENOMEM);
 
 	if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
 		key.objectid = root->root_key.objectid;
@@ -223,10 +220,7 @@
 
 	key.type = BTRFS_INODE_ITEM_KEY;
 	key.offset = 0;
-	dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
-	if (!IS_ERR(dentry))
-		d_set_d_op(dentry, &btrfs_dentry_operations);
-	return dentry;
+	return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
 fail:
 	btrfs_free_path(path);
 	return ERR_PTR(ret);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 227e581..7b3089b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -320,11 +320,6 @@
 	if (!path)
 		return -ENOMEM;
 
-	exclude_super_stripes(extent_root, block_group);
-	spin_lock(&block_group->space_info->lock);
-	block_group->space_info->bytes_readonly += block_group->bytes_super;
-	spin_unlock(&block_group->space_info->lock);
-
 	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
 
 	/*
@@ -467,8 +462,10 @@
 			cache->cached = BTRFS_CACHE_NO;
 		}
 		spin_unlock(&cache->lock);
-		if (ret == 1)
+		if (ret == 1) {
+			free_excluded_extents(fs_info->extent_root, cache);
 			return 0;
+		}
 	}
 
 	if (load_cache_only)
@@ -3089,7 +3086,7 @@
 	return btrfs_reduce_alloc_profile(root, flags);
 }
 
-static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
+u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
 {
 	u64 flags;
 
@@ -3161,8 +3158,12 @@
 					     bytes + 2 * 1024 * 1024,
 					     alloc_target, 0);
 			btrfs_end_transaction(trans, root);
-			if (ret < 0)
-				return ret;
+			if (ret < 0) {
+				if (ret != -ENOSPC)
+					return ret;
+				else
+					goto commit_trans;
+			}
 
 			if (!data_sinfo) {
 				btrfs_set_inode_space_info(root, inode);
@@ -3173,6 +3174,7 @@
 		spin_unlock(&data_sinfo->lock);
 
 		/* commit the current transaction and try again */
+commit_trans:
 		if (!committed && !root->fs_info->open_ioctl_trans) {
 			committed = 1;
 			trans = btrfs_join_transaction(root, 1);
@@ -3339,21 +3341,24 @@
 	u64 reserved;
 	u64 max_reclaim;
 	u64 reclaimed = 0;
-	int pause = 1;
+	long time_left;
 	int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
+	int loops = 0;
+	unsigned long progress;
 
 	block_rsv = &root->fs_info->delalloc_block_rsv;
 	space_info = block_rsv->space_info;
 
 	smp_mb();
 	reserved = space_info->bytes_reserved;
+	progress = space_info->reservation_progress;
 
 	if (reserved == 0)
 		return 0;
 
 	max_reclaim = min(reserved, to_reclaim);
 
-	while (1) {
+	while (loops < 1024) {
 		/* have the flusher threads jump in and do some IO */
 		smp_mb();
 		nr_pages = min_t(unsigned long, nr_pages,
@@ -3366,17 +3371,31 @@
 		reserved = space_info->bytes_reserved;
 		spin_unlock(&space_info->lock);
 
+		loops++;
+
 		if (reserved == 0 || reclaimed >= max_reclaim)
 			break;
 
 		if (trans && trans->transaction->blocked)
 			return -EAGAIN;
 
-		__set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(pause);
-		pause <<= 1;
-		if (pause > HZ / 10)
-			pause = HZ / 10;
+		time_left = schedule_timeout_interruptible(1);
+
+		/* We were interrupted, exit */
+		if (time_left)
+			break;
+
+		/* we've kicked the IO a few times, if anything has been freed,
+		 * exit.  There is no sense in looping here for a long time
+		 * when we really need to commit the transaction, or there are
+		 * just too many writers without enough free space
+		 */
+
+		if (loops > 3) {
+			smp_mb();
+			if (progress != space_info->reservation_progress)
+				break;
+		}
 
 	}
 	return reclaimed >= to_reclaim;
@@ -3583,10 +3602,23 @@
 
 	if (num_bytes > 0) {
 		if (dest) {
-			block_rsv_add_bytes(dest, num_bytes, 0);
-		} else {
+			spin_lock(&dest->lock);
+			if (!dest->full) {
+				u64 bytes_to_add;
+
+				bytes_to_add = dest->size - dest->reserved;
+				bytes_to_add = min(num_bytes, bytes_to_add);
+				dest->reserved += bytes_to_add;
+				if (dest->reserved >= dest->size)
+					dest->full = 1;
+				num_bytes -= bytes_to_add;
+			}
+			spin_unlock(&dest->lock);
+		}
+		if (num_bytes) {
 			spin_lock(&space_info->lock);
 			space_info->bytes_reserved -= num_bytes;
+			space_info->reservation_progress++;
 			spin_unlock(&space_info->lock);
 		}
 	}
@@ -3721,11 +3753,6 @@
 		return 0;
 	}
 
-	WARN_ON(1);
-	printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
-		block_rsv->size, block_rsv->reserved,
-		block_rsv->freed[0], block_rsv->freed[1]);
-
 	return -ENOSPC;
 }
 
@@ -3824,6 +3851,7 @@
 	if (block_rsv->reserved >= block_rsv->size) {
 		num_bytes = block_rsv->reserved - block_rsv->size;
 		sinfo->bytes_reserved -= num_bytes;
+		sinfo->reservation_progress++;
 		block_rsv->reserved = block_rsv->size;
 		block_rsv->full = 1;
 	}
@@ -3985,7 +4013,6 @@
 		to_reserve = 0;
 	}
 	spin_unlock(&BTRFS_I(inode)->accounting_lock);
-
 	to_reserve += calc_csum_metadata_size(inode, num_bytes);
 	ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
 	if (ret)
@@ -4012,6 +4039,7 @@
 
 	num_bytes = ALIGN(num_bytes, root->sectorsize);
 	atomic_dec(&BTRFS_I(inode)->outstanding_extents);
+	WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
 
 	spin_lock(&BTRFS_I(inode)->accounting_lock);
 	nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
@@ -4112,6 +4140,7 @@
 			btrfs_set_block_group_used(&cache->item, old_val);
 			cache->reserved -= num_bytes;
 			cache->space_info->bytes_reserved -= num_bytes;
+			cache->space_info->reservation_progress++;
 			cache->space_info->bytes_used += num_bytes;
 			cache->space_info->disk_used += num_bytes * factor;
 			spin_unlock(&cache->lock);
@@ -4163,6 +4192,7 @@
 	if (reserved) {
 		cache->reserved -= num_bytes;
 		cache->space_info->bytes_reserved -= num_bytes;
+		cache->space_info->reservation_progress++;
 	}
 	spin_unlock(&cache->lock);
 	spin_unlock(&cache->space_info->lock);
@@ -4213,6 +4243,7 @@
 				space_info->bytes_readonly += num_bytes;
 			cache->reserved -= num_bytes;
 			space_info->bytes_reserved -= num_bytes;
+			space_info->reservation_progress++;
 		}
 		spin_unlock(&cache->lock);
 		spin_unlock(&space_info->lock);
@@ -4691,6 +4722,7 @@
 		if (ret) {
 			spin_lock(&cache->space_info->lock);
 			cache->space_info->bytes_reserved -= buf->len;
+			cache->space_info->reservation_progress++;
 			spin_unlock(&cache->space_info->lock);
 		}
 		goto out;
@@ -5355,7 +5387,7 @@
 			       num_bytes, data, 1);
 		goto again;
 	}
-	if (ret == -ENOSPC) {
+	if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
 		struct btrfs_space_info *sinfo;
 
 		sinfo = __find_space_info(root->fs_info, data);
@@ -5633,6 +5665,7 @@
 	      struct btrfs_root *root, u32 blocksize)
 {
 	struct btrfs_block_rsv *block_rsv;
+	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
 	int ret;
 
 	block_rsv = get_block_rsv(trans, root);
@@ -5640,14 +5673,39 @@
 	if (block_rsv->size == 0) {
 		ret = reserve_metadata_bytes(trans, root, block_rsv,
 					     blocksize, 0);
-		if (ret)
+		/*
+		 * If we couldn't reserve metadata bytes try and use some from
+		 * the global reserve.
+		 */
+		if (ret && block_rsv != global_rsv) {
+			ret = block_rsv_use_bytes(global_rsv, blocksize);
+			if (!ret)
+				return global_rsv;
 			return ERR_PTR(ret);
+		} else if (ret) {
+			return ERR_PTR(ret);
+		}
 		return block_rsv;
 	}
 
 	ret = block_rsv_use_bytes(block_rsv, blocksize);
 	if (!ret)
 		return block_rsv;
+	if (ret) {
+		WARN_ON(1);
+		ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
+					     0);
+		if (!ret) {
+			spin_lock(&block_rsv->lock);
+			block_rsv->size += blocksize;
+			spin_unlock(&block_rsv->lock);
+			return block_rsv;
+		} else if (ret && block_rsv != global_rsv) {
+			ret = block_rsv_use_bytes(global_rsv, blocksize);
+			if (!ret)
+				return global_rsv;
+		}
+	}
 
 	return ERR_PTR(-ENOSPC);
 }
@@ -6221,6 +6279,8 @@
 	BUG_ON(!wc);
 
 	trans = btrfs_start_transaction(tree_root, 0);
+	BUG_ON(IS_ERR(trans));
+
 	if (block_rsv)
 		trans->block_rsv = block_rsv;
 
@@ -6318,6 +6378,7 @@
 
 			btrfs_end_transaction_throttle(trans, tree_root);
 			trans = btrfs_start_transaction(tree_root, 0);
+			BUG_ON(IS_ERR(trans));
 			if (block_rsv)
 				trans->block_rsv = block_rsv;
 		}
@@ -6446,6 +6507,8 @@
 	int ret = 0;
 
 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
+	if (!ra)
+		return -ENOMEM;
 
 	mutex_lock(&inode->i_mutex);
 	first_index = start >> PAGE_CACHE_SHIFT;
@@ -6531,7 +6594,7 @@
 	u64 end = start + extent_key->offset - 1;
 
 	em = alloc_extent_map(GFP_NOFS);
-	BUG_ON(!em || IS_ERR(em));
+	BUG_ON(!em);
 
 	em->start = start;
 	em->len = extent_key->offset;
@@ -7477,7 +7540,7 @@
 		BUG_ON(reloc_root->commit_root != NULL);
 		while (1) {
 			trans = btrfs_join_transaction(root, 1);
-			BUG_ON(!trans);
+			BUG_ON(IS_ERR(trans));
 
 			mutex_lock(&root->fs_info->drop_mutex);
 			ret = btrfs_drop_snapshot(trans, reloc_root);
@@ -7535,7 +7598,7 @@
 
 	if (found) {
 		trans = btrfs_start_transaction(root, 1);
-		BUG_ON(!trans);
+		BUG_ON(IS_ERR(trans));
 		ret = btrfs_commit_transaction(trans, root);
 		BUG_ON(ret);
 	}
@@ -7779,7 +7842,7 @@
 
 
 	trans = btrfs_start_transaction(extent_root, 1);
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 
 	if (extent_key->objectid == 0) {
 		ret = del_extent_zero(trans, extent_root, path, extent_key);
@@ -7970,13 +8033,14 @@
 
 	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
 	    sinfo->bytes_may_use + sinfo->bytes_readonly +
-	    cache->reserved_pinned + num_bytes < sinfo->total_bytes) {
+	    cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
 		sinfo->bytes_readonly += num_bytes;
 		sinfo->bytes_reserved += cache->reserved_pinned;
 		cache->reserved_pinned = 0;
 		cache->ro = 1;
 		ret = 0;
 	}
+
 	spin_unlock(&cache->lock);
 	spin_unlock(&sinfo->lock);
 	return ret;
@@ -8012,6 +8076,69 @@
 	return ret;
 }
 
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, u64 type)
+{
+	u64 alloc_flags = get_alloc_profile(root, type);
+	return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+}
+
+/*
+ * helper to account the unused space of all the readonly block group in the
+ * list. takes mirrors into account.
+ */
+static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
+{
+	struct btrfs_block_group_cache *block_group;
+	u64 free_bytes = 0;
+	int factor;
+
+	list_for_each_entry(block_group, groups_list, list) {
+		spin_lock(&block_group->lock);
+
+		if (!block_group->ro) {
+			spin_unlock(&block_group->lock);
+			continue;
+		}
+
+		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
+					  BTRFS_BLOCK_GROUP_RAID10 |
+					  BTRFS_BLOCK_GROUP_DUP))
+			factor = 2;
+		else
+			factor = 1;
+
+		free_bytes += (block_group->key.offset -
+			       btrfs_block_group_used(&block_group->item)) *
+			       factor;
+
+		spin_unlock(&block_group->lock);
+	}
+
+	return free_bytes;
+}
+
+/*
+ * helper to account the unused space of all the readonly block group in the
+ * space_info. takes mirrors into account.
+ */
+u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
+{
+	int i;
+	u64 free_bytes = 0;
+
+	spin_lock(&sinfo->lock);
+
+	for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
+		if (!list_empty(&sinfo->block_groups[i]))
+			free_bytes += __btrfs_get_ro_block_group_free_space(
+						&sinfo->block_groups[i]);
+
+	spin_unlock(&sinfo->lock);
+
+	return free_bytes;
+}
+
 int btrfs_set_block_group_rw(struct btrfs_root *root,
 			      struct btrfs_block_group_cache *cache)
 {
@@ -8092,7 +8219,7 @@
 	mutex_lock(&root->fs_info->chunk_mutex);
 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
 		u64 min_free = btrfs_block_group_used(&block_group->item);
-		u64 dev_offset, max_avail;
+		u64 dev_offset;
 
 		/*
 		 * check to make sure we can actually find a chunk with enough
@@ -8100,7 +8227,7 @@
 		 */
 		if (device->total_bytes > device->bytes_used + min_free) {
 			ret = find_free_dev_extent(NULL, device, min_free,
-						   &dev_offset, &max_avail);
+						   &dev_offset, NULL);
 			if (!ret)
 				break;
 			ret = -1;
@@ -8213,6 +8340,13 @@
 		if (block_group->cached == BTRFS_CACHE_STARTED)
 			wait_block_group_cache_done(block_group);
 
+		/*
+		 * We haven't cached this block group, which means we could
+		 * possibly have excluded extents on this block group.
+		 */
+		if (block_group->cached == BTRFS_CACHE_NO)
+			free_excluded_extents(info->extent_root, block_group);
+
 		btrfs_remove_free_space_cache(block_group);
 		btrfs_put_block_group(block_group);
 
@@ -8328,6 +8462,13 @@
 		cache->sectorsize = root->sectorsize;
 
 		/*
+		 * We need to exclude the super stripes now so that the space
+		 * info has super bytes accounted for, otherwise we'll think
+		 * we have more space than we actually do.
+		 */
+		exclude_super_stripes(root, cache);
+
+		/*
 		 * check for two cases, either we are full, and therefore
 		 * don't need to bother with the caching work since we won't
 		 * find any space, or we are empty, and we can just add all
@@ -8335,12 +8476,10 @@
 		 * time, particularly in the full case.
 		 */
 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
-			exclude_super_stripes(root, cache);
 			cache->last_byte_to_unpin = (u64)-1;
 			cache->cached = BTRFS_CACHE_FINISHED;
 			free_excluded_extents(root, cache);
 		} else if (btrfs_block_group_used(&cache->item) == 0) {
-			exclude_super_stripes(root, cache);
 			cache->last_byte_to_unpin = (u64)-1;
 			cache->cached = BTRFS_CACHE_FINISHED;
 			add_new_free_space(cache, root->fs_info,
@@ -8584,3 +8723,14 @@
 	btrfs_free_path(path);
 	return ret;
 }
+
+int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+{
+	return unpin_extent_range(root, start, end);
+}
+
+int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
+			       u64 num_bytes)
+{
+	return btrfs_discard_extent(root, bytenr, num_bytes);
+}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 3e86b9f..714adc4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1433,12 +1433,13 @@
  */
 u64 count_range_bits(struct extent_io_tree *tree,
 		     u64 *start, u64 search_end, u64 max_bytes,
-		     unsigned long bits)
+		     unsigned long bits, int contig)
 {
 	struct rb_node *node;
 	struct extent_state *state;
 	u64 cur_start = *start;
 	u64 total_bytes = 0;
+	u64 last = 0;
 	int found = 0;
 
 	if (search_end <= cur_start) {
@@ -1463,7 +1464,9 @@
 		state = rb_entry(node, struct extent_state, rb_node);
 		if (state->start > search_end)
 			break;
-		if (state->end >= cur_start && (state->state & bits)) {
+		if (contig && found && state->start > last + 1)
+			break;
+		if (state->end >= cur_start && (state->state & bits) == bits) {
 			total_bytes += min(search_end, state->end) + 1 -
 				       max(cur_start, state->start);
 			if (total_bytes >= max_bytes)
@@ -1472,6 +1475,9 @@
 				*start = state->start;
 				found = 1;
 			}
+			last = state->end;
+		} else if (contig && found) {
+			break;
 		}
 		node = rb_next(node);
 		if (!node)
@@ -1865,7 +1871,7 @@
 	bio_get(bio);
 
 	if (tree->ops && tree->ops->submit_bio_hook)
-		tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
+		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
 					   mirror_num, bio_flags, start);
 	else
 		submit_bio(rw, bio);
@@ -1920,6 +1926,8 @@
 		nr = bio_get_nr_vecs(bdev);
 
 	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
+	if (!bio)
+		return -ENOMEM;
 
 	bio_add_page(bio, page, page_size, offset);
 	bio->bi_end_io = end_io_func;
@@ -1944,6 +1952,7 @@
 
 static void set_page_extent_head(struct page *page, unsigned long len)
 {
+	WARN_ON(!PagePrivate(page));
 	set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
 }
 
@@ -2028,8 +2037,11 @@
 		BUG_ON(extent_map_end(em) <= cur);
 		BUG_ON(end < cur);
 
-		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
 			this_bio_flag = EXTENT_BIO_COMPRESSED;
+			extent_set_compress_type(&this_bio_flag,
+						 em->compress_type);
+		}
 
 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
 		cur_end = min(extent_map_end(em) - 1, end);
@@ -2123,7 +2135,7 @@
 	ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
 				      &bio_flags);
 	if (bio)
-		submit_one_bio(READ, bio, 0, bio_flags);
+		ret = submit_one_bio(READ, bio, 0, bio_flags);
 	return ret;
 }
 
@@ -2816,9 +2828,17 @@
 		 * at this point we can safely clear everything except the
 		 * locked bit and the nodatasum bit
 		 */
-		clear_extent_bit(tree, start, end,
+		ret = clear_extent_bit(tree, start, end,
 				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
 				 0, 0, NULL, mask);
+
+		/* if clear_extent_bit failed for enomem reasons,
+		 * we can't allow the release to continue.
+		 */
+		if (ret < 0)
+			ret = 0;
+		else
+			ret = 1;
 	}
 	return ret;
 }
@@ -2898,6 +2918,46 @@
 	return sector;
 }
 
+/*
+ * helper function for fiemap, which doesn't want to see any holes.
+ * This maps until we find something past 'last'
+ */
+static struct extent_map *get_extent_skip_holes(struct inode *inode,
+						u64 offset,
+						u64 last,
+						get_extent_t *get_extent)
+{
+	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
+	struct extent_map *em;
+	u64 len;
+
+	if (offset >= last)
+		return NULL;
+
+	while(1) {
+		len = last - offset;
+		if (len == 0)
+			break;
+		len = (len + sectorsize - 1) & ~(sectorsize - 1);
+		em = get_extent(inode, NULL, 0, offset, len, 0);
+		if (!em || IS_ERR(em))
+			return em;
+
+		/* if this isn't a hole return it */
+		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
+		    em->block_start != EXTENT_MAP_HOLE) {
+			return em;
+		}
+
+		/* this is a hole, advance to the next extent */
+		offset = extent_map_end(em);
+		free_extent_map(em);
+		if (offset >= last)
+			break;
+	}
+	return NULL;
+}
+
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		__u64 start, __u64 len, get_extent_t *get_extent)
 {
@@ -2907,16 +2967,19 @@
 	u32 flags = 0;
 	u32 found_type;
 	u64 last;
+	u64 last_for_get_extent = 0;
 	u64 disko = 0;
+	u64 isize = i_size_read(inode);
 	struct btrfs_key found_key;
 	struct extent_map *em = NULL;
 	struct extent_state *cached_state = NULL;
 	struct btrfs_path *path;
 	struct btrfs_file_extent_item *item;
 	int end = 0;
-	u64 em_start = 0, em_len = 0;
+	u64 em_start = 0;
+	u64 em_len = 0;
+	u64 em_end = 0;
 	unsigned long emflags;
-	int hole = 0;
 
 	if (len == 0)
 		return -EINVAL;
@@ -2926,6 +2989,10 @@
 		return -ENOMEM;
 	path->leave_spinning = 1;
 
+	/*
+	 * lookup the last file extent.  We're not using i_size here
+	 * because there might be preallocation past i_size
+	 */
 	ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
 				       path, inode->i_ino, -1, 0);
 	if (ret < 0) {
@@ -2939,18 +3006,38 @@
 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
 	found_type = btrfs_key_type(&found_key);
 
-	/* No extents, just return */
+	/* No extents, but there might be delalloc bits */
 	if (found_key.objectid != inode->i_ino ||
 	    found_type != BTRFS_EXTENT_DATA_KEY) {
-		btrfs_free_path(path);
-		return 0;
+		/* have to trust i_size as the end */
+		last = (u64)-1;
+		last_for_get_extent = isize;
+	} else {
+		/*
+		 * remember the start of the last extent.  There are a
+		 * bunch of different factors that go into the length of the
+		 * extent, so its much less complex to remember where it started
+		 */
+		last = found_key.offset;
+		last_for_get_extent = last + 1;
 	}
-	last = found_key.offset;
 	btrfs_free_path(path);
 
+	/*
+	 * we might have some extents allocated but more delalloc past those
+	 * extents.  so, we trust isize unless the start of the last extent is
+	 * beyond isize
+	 */
+	if (last < isize) {
+		last = (u64)-1;
+		last_for_get_extent = isize;
+	}
+
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
 			 &cached_state, GFP_NOFS);
-	em = get_extent(inode, NULL, 0, off, max - off, 0);
+
+	em = get_extent_skip_holes(inode, off, last_for_get_extent,
+				   get_extent);
 	if (!em)
 		goto out;
 	if (IS_ERR(em)) {
@@ -2959,22 +3046,38 @@
 	}
 
 	while (!end) {
-		hole = 0;
-		off = em->start + em->len;
-		if (off >= max)
-			end = 1;
+		u64 offset_in_extent;
 
-		if (em->block_start == EXTENT_MAP_HOLE) {
-			hole = 1;
-			goto next;
-		}
+		/* break if the extent we found is outside the range */
+		if (em->start >= max || extent_map_end(em) < off)
+			break;
 
-		em_start = em->start;
-		em_len = em->len;
+		/*
+		 * get_extent may return an extent that starts before our
+		 * requested range.  We have to make sure the ranges
+		 * we return to fiemap always move forward and don't
+		 * overlap, so adjust the offsets here
+		 */
+		em_start = max(em->start, off);
 
+		/*
+		 * record the offset from the start of the extent
+		 * for adjusting the disk offset below
+		 */
+		offset_in_extent = em_start - em->start;
+		em_end = extent_map_end(em);
+		em_len = em_end - em_start;
+		emflags = em->flags;
 		disko = 0;
 		flags = 0;
 
+		/*
+		 * bump off for our next call to get_extent
+		 */
+		off = extent_map_end(em);
+		if (off >= max)
+			end = 1;
+
 		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
 			end = 1;
 			flags |= FIEMAP_EXTENT_LAST;
@@ -2985,42 +3088,34 @@
 			flags |= (FIEMAP_EXTENT_DELALLOC |
 				  FIEMAP_EXTENT_UNKNOWN);
 		} else {
-			disko = em->block_start;
+			disko = em->block_start + offset_in_extent;
 		}
 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
 			flags |= FIEMAP_EXTENT_ENCODED;
 
-next:
-		emflags = em->flags;
 		free_extent_map(em);
 		em = NULL;
-		if (!end) {
-			em = get_extent(inode, NULL, 0, off, max - off, 0);
-			if (!em)
-				goto out;
-			if (IS_ERR(em)) {
-				ret = PTR_ERR(em);
-				goto out;
-			}
-			emflags = em->flags;
-		}
-
-		if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
+		if ((em_start >= last) || em_len == (u64)-1 ||
+		   (last == (u64)-1 && isize <= em_end)) {
 			flags |= FIEMAP_EXTENT_LAST;
 			end = 1;
 		}
 
-		if (em_start == last) {
+		/* now scan forward to see if this is really the last extent. */
+		em = get_extent_skip_holes(inode, off, last_for_get_extent,
+					   get_extent);
+		if (IS_ERR(em)) {
+			ret = PTR_ERR(em);
+			goto out;
+		}
+		if (!em) {
 			flags |= FIEMAP_EXTENT_LAST;
 			end = 1;
 		}
-
-		if (!hole) {
-			ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
-						em_len, flags);
-			if (ret)
-				goto out_free;
-		}
+		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+					      em_len, flags);
+		if (ret)
+			goto out_free;
 	}
 out_free:
 	free_extent_map(em);
@@ -3072,6 +3167,8 @@
 #endif
 
 	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
+	if (eb == NULL)
+		return NULL;
 	eb->start = start;
 	eb->len = len;
 	spin_lock_init(&eb->lock);
@@ -3187,7 +3284,13 @@
 		}
 		if (!PageUptodate(p))
 			uptodate = 0;
-		unlock_page(p);
+
+		/*
+		 * see below about how we avoid a nasty race with release page
+		 * and why we unlock later
+		 */
+		if (i != 0)
+			unlock_page(p);
 	}
 	if (uptodate)
 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
@@ -3211,9 +3314,26 @@
 	atomic_inc(&eb->refs);
 	spin_unlock(&tree->buffer_lock);
 	radix_tree_preload_end();
+
+	/*
+	 * there is a race where release page may have
+	 * tried to find this extent buffer in the radix
+	 * but failed.  It will tell the VM it is safe to
+	 * reclaim the, and it will clear the page private bit.
+	 * We must make sure to set the page private bit properly
+	 * after the extent buffer is in the radix tree so
+	 * it doesn't get lost
+	 */
+	set_page_extent_mapped(eb->first_page);
+	set_page_extent_head(eb->first_page, eb->len);
+	if (!page0)
+		unlock_page(eb->first_page);
 	return eb;
 
 free_eb:
+	if (eb->first_page && !page0)
+		unlock_page(eb->first_page);
+
 	if (!atomic_dec_and_test(&eb->refs))
 		return exists;
 	btrfs_release_extent_buffer(eb);
@@ -3264,10 +3384,11 @@
 			continue;
 
 		lock_page(page);
+		WARN_ON(!PagePrivate(page));
+
+		set_page_extent_mapped(page);
 		if (i == 0)
 			set_page_extent_head(page, eb->len);
-		else
-			set_page_private(page, EXTENT_PAGE_PRIVATE);
 
 		clear_page_dirty_for_io(page);
 		spin_lock_irq(&page->mapping->tree_lock);
@@ -3457,6 +3578,13 @@
 
 	for (i = start_i; i < num_pages; i++) {
 		page = extent_buffer_page(eb, i);
+
+		WARN_ON(!PagePrivate(page));
+
+		set_page_extent_mapped(page);
+		if (i == 0)
+			set_page_extent_head(page, eb->len);
+
 		if (inc_all_pages)
 			page_cache_get(page);
 		if (!PageUptodate(page)) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 4183c81..9318dfe 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -20,8 +20,12 @@
 #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
 #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
 
-/* flags for bio submission */
+/*
+ * flags for bio submission. The high bits indicate the compression
+ * type for this bio
+ */
 #define EXTENT_BIO_COMPRESSED 1
+#define EXTENT_BIO_FLAG_SHIFT 16
 
 /* these are bit numbers for test/set bit */
 #define EXTENT_BUFFER_UPTODATE 0
@@ -135,6 +139,17 @@
 	wait_queue_head_t lock_wq;
 };
 
+static inline void extent_set_compress_type(unsigned long *bio_flags,
+					    int compress_type)
+{
+	*bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
+}
+
+static inline int extent_compress_type(unsigned long bio_flags)
+{
+	return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
+}
+
 struct extent_map_tree;
 
 static inline struct extent_state *extent_state_next(struct extent_state *state)
@@ -176,7 +191,7 @@
 
 u64 count_range_bits(struct extent_io_tree *tree,
 		     u64 *start, u64 search_end,
-		     u64 max_bytes, unsigned long bits);
+		     u64 max_bytes, unsigned long bits, int contig);
 
 void free_extent_state(struct extent_state *state);
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 23cb8da..2b6c12e 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -3,6 +3,7 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/hardirq.h>
+#include "ctree.h"
 #include "extent_map.h"
 
 
@@ -50,10 +51,11 @@
 {
 	struct extent_map *em;
 	em = kmem_cache_alloc(extent_map_cache, mask);
-	if (!em || IS_ERR(em))
-		return em;
+	if (!em)
+		return NULL;
 	em->in_tree = 0;
 	em->flags = 0;
+	em->compress_type = BTRFS_COMPRESS_NONE;
 	atomic_set(&em->refs, 1);
 	return em;
 }
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index ab6d74b..28b44db 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -26,7 +26,8 @@
 	unsigned long flags;
 	struct block_device *bdev;
 	atomic_t refs;
-	int in_tree;
+	unsigned int in_tree:1;
+	unsigned int compress_type:4;
 };
 
 struct extent_map_tree {
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index a562a25..4f19a3e 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -536,6 +536,8 @@
 	root = root->fs_info->csum_root;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
 
 	while (1) {
 		key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@@ -548,7 +550,10 @@
 			if (path->slots[0] == 0)
 				goto out;
 			path->slots[0]--;
+		} else if (ret < 0) {
+			goto out;
 		}
+
 		leaf = path->nodes[0];
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 66836d8..f447b78 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -24,6 +24,7 @@
 #include <linux/string.h>
 #include <linux/backing-dev.h>
 #include <linux/mpage.h>
+#include <linux/falloc.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
 #include <linux/statfs.h>
@@ -69,6 +70,19 @@
 
 		/* Flush processor's dcache for this page */
 		flush_dcache_page(page);
+
+		/*
+		 * if we get a partial write, we can end up with
+		 * partially up to date pages.  These add
+		 * a lot of complexity, so make sure they don't
+		 * happen by forcing this copy to be retried.
+		 *
+		 * The rest of the btrfs_file_write code will fall
+		 * back to page at a time copies after we return 0.
+		 */
+		if (!PageUptodate(page) && copied < count)
+			copied = 0;
+
 		iov_iter_advance(i, copied);
 		write_bytes -= copied;
 		total_copied += copied;
@@ -185,6 +199,7 @@
 			split = alloc_extent_map(GFP_NOFS);
 		if (!split2)
 			split2 = alloc_extent_map(GFP_NOFS);
+		BUG_ON(!split || !split2);
 
 		write_lock(&em_tree->lock);
 		em = lookup_extent_mapping(em_tree, start, len);
@@ -224,6 +239,7 @@
 
 			split->bdev = em->bdev;
 			split->flags = flags;
+			split->compress_type = em->compress_type;
 			ret = add_extent_mapping(em_tree, split);
 			BUG_ON(ret);
 			free_extent_map(split);
@@ -238,6 +254,7 @@
 			split->len = em->start + em->len - (start + len);
 			split->bdev = em->bdev;
 			split->flags = flags;
+			split->compress_type = em->compress_type;
 
 			if (compressed) {
 				split->block_len = em->block_len;
@@ -759,6 +776,27 @@
 }
 
 /*
+ * on error we return an unlocked page and the error value
+ * on success we return a locked page and 0
+ */
+static int prepare_uptodate_page(struct page *page, u64 pos)
+{
+	int ret = 0;
+
+	if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
+		ret = btrfs_readpage(NULL, page);
+		if (ret)
+			return ret;
+		lock_page(page);
+		if (!PageUptodate(page)) {
+			unlock_page(page);
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+/*
  * this gets pages into the page cache and locks them down, it also properly
  * waits for data=ordered extents to finish before allowing the pages to be
  * modified.
@@ -773,6 +811,7 @@
 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
 	struct inode *inode = fdentry(file)->d_inode;
 	int err = 0;
+	int faili = 0;
 	u64 start_pos;
 	u64 last_pos;
 
@@ -790,11 +829,24 @@
 	for (i = 0; i < num_pages; i++) {
 		pages[i] = grab_cache_page(inode->i_mapping, index + i);
 		if (!pages[i]) {
+			faili = i - 1;
 			err = -ENOMEM;
-			BUG_ON(1);
+			goto fail;
+		}
+
+		if (i == 0)
+			err = prepare_uptodate_page(pages[i], pos);
+		if (i == num_pages - 1)
+			err = prepare_uptodate_page(pages[i],
+						    pos + write_bytes);
+		if (err) {
+			page_cache_release(pages[i]);
+			faili = i - 1;
+			goto fail;
 		}
 		wait_on_page_writeback(pages[i]);
 	}
+	err = 0;
 	if (start_pos < inode->i_size) {
 		struct btrfs_ordered_extent *ordered;
 		lock_extent_bits(&BTRFS_I(inode)->io_tree,
@@ -834,6 +886,14 @@
 		WARN_ON(!PageLocked(pages[i]));
 	}
 	return 0;
+fail:
+	while (faili >= 0) {
+		unlock_page(pages[faili]);
+		page_cache_release(pages[faili]);
+		faili--;
+	}
+	return err;
+
 }
 
 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
@@ -843,7 +903,6 @@
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = fdentry(file)->d_inode;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
-	struct page *pinned[2];
 	struct page **pages = NULL;
 	struct iov_iter i;
 	loff_t *ppos = &iocb->ki_pos;
@@ -864,9 +923,6 @@
 	will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
 		      (file->f_flags & O_DIRECT));
 
-	pinned[0] = NULL;
-	pinned[1] = NULL;
-
 	start_pos = pos;
 
 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
@@ -890,6 +946,17 @@
 	if (err)
 		goto out;
 
+	/*
+	 * If BTRFS flips readonly due to some impossible error
+	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
+	 * although we have opened a file as writable, we have
+	 * to stop this write operation to ensure FS consistency.
+	 */
+	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+		err = -EROFS;
+		goto out;
+	}
+
 	file_update_time(file);
 	BTRFS_I(inode)->sequence++;
 
@@ -932,6 +999,10 @@
 		     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
 		     (sizeof(struct page *)));
 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
+	if (!pages) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
 	/* generic_write_checks can change our pos */
 	start_pos = pos;
@@ -939,39 +1010,13 @@
 	first_index = pos >> PAGE_CACHE_SHIFT;
 	last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
 
-	/*
-	 * there are lots of better ways to do this, but this code
-	 * makes sure the first and last page in the file range are
-	 * up to date and ready for cow
-	 */
-	if ((pos & (PAGE_CACHE_SIZE - 1))) {
-		pinned[0] = grab_cache_page(inode->i_mapping, first_index);
-		if (!PageUptodate(pinned[0])) {
-			ret = btrfs_readpage(NULL, pinned[0]);
-			BUG_ON(ret);
-			wait_on_page_locked(pinned[0]);
-		} else {
-			unlock_page(pinned[0]);
-		}
-	}
-	if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
-		pinned[1] = grab_cache_page(inode->i_mapping, last_index);
-		if (!PageUptodate(pinned[1])) {
-			ret = btrfs_readpage(NULL, pinned[1]);
-			BUG_ON(ret);
-			wait_on_page_locked(pinned[1]);
-		} else {
-			unlock_page(pinned[1]);
-		}
-	}
-
 	while (iov_iter_count(&i) > 0) {
 		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
 		size_t write_bytes = min(iov_iter_count(&i),
 					 nrptrs * (size_t)PAGE_CACHE_SIZE -
 					 offset);
-		size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
-					PAGE_CACHE_SHIFT;
+		size_t num_pages = (write_bytes + offset +
+				    PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
 		WARN_ON(num_pages > nrptrs);
 		memset(pages, 0, sizeof(struct page *) * nrptrs);
@@ -1001,8 +1046,20 @@
 
 		copied = btrfs_copy_from_user(pos, num_pages,
 					   write_bytes, pages, &i);
-		dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >>
-					PAGE_CACHE_SHIFT;
+
+		/*
+		 * if we have trouble faulting in the pages, fall
+		 * back to one page at a time
+		 */
+		if (copied < write_bytes)
+			nrptrs = 1;
+
+		if (copied == 0)
+			dirty_pages = 0;
+		else
+			dirty_pages = (copied + offset +
+				       PAGE_CACHE_SIZE - 1) >>
+				       PAGE_CACHE_SHIFT;
 
 		if (num_pages > dirty_pages) {
 			if (copied > 0)
@@ -1046,10 +1103,6 @@
 		err = ret;
 
 	kfree(pages);
-	if (pinned[0])
-		page_cache_release(pinned[0]);
-	if (pinned[1])
-		page_cache_release(pinned[1]);
 	*ppos = pos;
 
 	/*
@@ -1237,6 +1290,117 @@
 	return 0;
 }
 
+static long btrfs_fallocate(struct file *file, int mode,
+			    loff_t offset, loff_t len)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct extent_state *cached_state = NULL;
+	u64 cur_offset;
+	u64 last_byte;
+	u64 alloc_start;
+	u64 alloc_end;
+	u64 alloc_hint = 0;
+	u64 locked_end;
+	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
+	struct extent_map *em;
+	int ret;
+
+	alloc_start = offset & ~mask;
+	alloc_end =  (offset + len + mask) & ~mask;
+
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode & ~FALLOC_FL_KEEP_SIZE)
+		return -EOPNOTSUPP;
+
+	/*
+	 * wait for ordered IO before we have any locks.  We'll loop again
+	 * below with the locks held.
+	 */
+	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
+
+	mutex_lock(&inode->i_mutex);
+	ret = inode_newsize_ok(inode, alloc_end);
+	if (ret)
+		goto out;
+
+	if (alloc_start > inode->i_size) {
+		ret = btrfs_cont_expand(inode, alloc_start);
+		if (ret)
+			goto out;
+	}
+
+	ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
+	if (ret)
+		goto out;
+
+	locked_end = alloc_end - 1;
+	while (1) {
+		struct btrfs_ordered_extent *ordered;
+
+		/* the extent lock is ordered inside the running
+		 * transaction
+		 */
+		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
+				 locked_end, 0, &cached_state, GFP_NOFS);
+		ordered = btrfs_lookup_first_ordered_extent(inode,
+							    alloc_end - 1);
+		if (ordered &&
+		    ordered->file_offset + ordered->len > alloc_start &&
+		    ordered->file_offset < alloc_end) {
+			btrfs_put_ordered_extent(ordered);
+			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+					     alloc_start, locked_end,
+					     &cached_state, GFP_NOFS);
+			/*
+			 * we can't wait on the range with the transaction
+			 * running or with the extent lock held
+			 */
+			btrfs_wait_ordered_range(inode, alloc_start,
+						 alloc_end - alloc_start);
+		} else {
+			if (ordered)
+				btrfs_put_ordered_extent(ordered);
+			break;
+		}
+	}
+
+	cur_offset = alloc_start;
+	while (1) {
+		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
+				      alloc_end - cur_offset, 0);
+		BUG_ON(IS_ERR(em) || !em);
+		last_byte = min(extent_map_end(em), alloc_end);
+		last_byte = (last_byte + mask) & ~mask;
+		if (em->block_start == EXTENT_MAP_HOLE ||
+		    (cur_offset >= inode->i_size &&
+		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
+			ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
+							last_byte - cur_offset,
+							1 << inode->i_blkbits,
+							offset + len,
+							&alloc_hint);
+			if (ret < 0) {
+				free_extent_map(em);
+				break;
+			}
+		}
+		free_extent_map(em);
+
+		cur_offset = last_byte;
+		if (cur_offset >= alloc_end) {
+			ret = 0;
+			break;
+		}
+	}
+	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+			     &cached_state, GFP_NOFS);
+
+	btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
+out:
+	mutex_unlock(&inode->i_mutex);
+	return ret;
+}
+
 const struct file_operations btrfs_file_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= do_sync_read,
@@ -1248,6 +1412,7 @@
 	.open		= generic_file_open,
 	.release	= btrfs_release_file,
 	.fsync		= btrfs_sync_file,
+	.fallocate	= btrfs_fallocate,
 	.unlocked_ioctl	= btrfs_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= btrfs_ioctl,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 60d6842..a039065 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -987,11 +987,18 @@
 	return entry;
 }
 
-static void unlink_free_space(struct btrfs_block_group_cache *block_group,
-			      struct btrfs_free_space *info)
+static inline void
+__unlink_free_space(struct btrfs_block_group_cache *block_group,
+		    struct btrfs_free_space *info)
 {
 	rb_erase(&info->offset_index, &block_group->free_space_offset);
 	block_group->free_extents--;
+}
+
+static void unlink_free_space(struct btrfs_block_group_cache *block_group,
+			      struct btrfs_free_space *info)
+{
+	__unlink_free_space(block_group, info);
 	block_group->free_space -= info->bytes;
 }
 
@@ -1016,14 +1023,18 @@
 	u64 max_bytes;
 	u64 bitmap_bytes;
 	u64 extent_bytes;
+	u64 size = block_group->key.offset;
 
 	/*
 	 * The goal is to keep the total amount of memory used per 1gb of space
 	 * at or below 32k, so we need to adjust how much memory we allow to be
 	 * used by extent based free space tracking
 	 */
-	max_bytes = MAX_CACHE_BYTES_PER_GIG *
-		(div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
+	if (size < 1024 * 1024 * 1024)
+		max_bytes = MAX_CACHE_BYTES_PER_GIG;
+	else
+		max_bytes = MAX_CACHE_BYTES_PER_GIG *
+			div64_u64(size, 1024 * 1024 * 1024);
 
 	/*
 	 * we want to account for 1 more bitmap than what we have so we can make
@@ -1171,6 +1182,16 @@
 	recalculate_thresholds(block_group);
 }
 
+static void free_bitmap(struct btrfs_block_group_cache *block_group,
+			struct btrfs_free_space *bitmap_info)
+{
+	unlink_free_space(block_group, bitmap_info);
+	kfree(bitmap_info->bitmap);
+	kfree(bitmap_info);
+	block_group->total_bitmaps--;
+	recalculate_thresholds(block_group);
+}
+
 static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
 			      struct btrfs_free_space *bitmap_info,
 			      u64 *offset, u64 *bytes)
@@ -1195,6 +1216,7 @@
 	 */
 	search_start = *offset;
 	search_bytes = *bytes;
+	search_bytes = min(search_bytes, end - search_start + 1);
 	ret = search_bitmap(block_group, bitmap_info, &search_start,
 			    &search_bytes);
 	BUG_ON(ret < 0 || search_start != *offset);
@@ -1211,13 +1233,8 @@
 
 	if (*bytes) {
 		struct rb_node *next = rb_next(&bitmap_info->offset_index);
-		if (!bitmap_info->bytes) {
-			unlink_free_space(block_group, bitmap_info);
-			kfree(bitmap_info->bitmap);
-			kfree(bitmap_info);
-			block_group->total_bitmaps--;
-			recalculate_thresholds(block_group);
-		}
+		if (!bitmap_info->bytes)
+			free_bitmap(block_group, bitmap_info);
 
 		/*
 		 * no entry after this bitmap, but we still have bytes to
@@ -1250,13 +1267,8 @@
 			return -EAGAIN;
 
 		goto again;
-	} else if (!bitmap_info->bytes) {
-		unlink_free_space(block_group, bitmap_info);
-		kfree(bitmap_info->bitmap);
-		kfree(bitmap_info);
-		block_group->total_bitmaps--;
-		recalculate_thresholds(block_group);
-	}
+	} else if (!bitmap_info->bytes)
+		free_bitmap(block_group, bitmap_info);
 
 	return 0;
 }
@@ -1359,22 +1371,14 @@
 	return ret;
 }
 
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
-			 u64 offset, u64 bytes)
+bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
+			  struct btrfs_free_space *info, bool update_stat)
 {
-	struct btrfs_free_space *right_info = NULL;
-	struct btrfs_free_space *left_info = NULL;
-	struct btrfs_free_space *info = NULL;
-	int ret = 0;
-
-	info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
-	if (!info)
-		return -ENOMEM;
-
-	info->offset = offset;
-	info->bytes = bytes;
-
-	spin_lock(&block_group->tree_lock);
+	struct btrfs_free_space *left_info;
+	struct btrfs_free_space *right_info;
+	bool merged = false;
+	u64 offset = info->offset;
+	u64 bytes = info->bytes;
 
 	/*
 	 * first we want to see if there is free space adjacent to the range we
@@ -1388,37 +1392,62 @@
 	else
 		left_info = tree_search_offset(block_group, offset - 1, 0, 0);
 
-	/*
-	 * If there was no extent directly to the left or right of this new
-	 * extent then we know we're going to have to allocate a new extent, so
-	 * before we do that see if we need to drop this into a bitmap
-	 */
-	if ((!left_info || left_info->bitmap) &&
-	    (!right_info || right_info->bitmap)) {
-		ret = insert_into_bitmap(block_group, info);
-
-		if (ret < 0) {
-			goto out;
-		} else if (ret) {
-			ret = 0;
-			goto out;
-		}
-	}
-
 	if (right_info && !right_info->bitmap) {
-		unlink_free_space(block_group, right_info);
+		if (update_stat)
+			unlink_free_space(block_group, right_info);
+		else
+			__unlink_free_space(block_group, right_info);
 		info->bytes += right_info->bytes;
 		kfree(right_info);
+		merged = true;
 	}
 
 	if (left_info && !left_info->bitmap &&
 	    left_info->offset + left_info->bytes == offset) {
-		unlink_free_space(block_group, left_info);
+		if (update_stat)
+			unlink_free_space(block_group, left_info);
+		else
+			__unlink_free_space(block_group, left_info);
 		info->offset = left_info->offset;
 		info->bytes += left_info->bytes;
 		kfree(left_info);
+		merged = true;
 	}
 
+	return merged;
+}
+
+int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+			 u64 offset, u64 bytes)
+{
+	struct btrfs_free_space *info;
+	int ret = 0;
+
+	info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+	if (!info)
+		return -ENOMEM;
+
+	info->offset = offset;
+	info->bytes = bytes;
+
+	spin_lock(&block_group->tree_lock);
+
+	if (try_merge_free_space(block_group, info, true))
+		goto link;
+
+	/*
+	 * There was no extent directly to the left or right of this new
+	 * extent then we know we're going to have to allocate a new extent, so
+	 * before we do that see if we need to drop this into a bitmap
+	 */
+	ret = insert_into_bitmap(block_group, info);
+	if (ret < 0) {
+		goto out;
+	} else if (ret) {
+		ret = 0;
+		goto out;
+	}
+link:
 	ret = link_free_space(block_group, info);
 	if (ret)
 		kfree(info);
@@ -1621,6 +1650,7 @@
 		node = rb_next(&entry->offset_index);
 		rb_erase(&entry->offset_index, &cluster->root);
 		BUG_ON(entry->bitmap);
+		try_merge_free_space(block_group, entry, false);
 		tree_insert_offset(&block_group->free_space_offset,
 				   entry->offset, &entry->offset_index, 0);
 	}
@@ -1685,13 +1715,8 @@
 	ret = offset;
 	if (entry->bitmap) {
 		bitmap_clear_bits(block_group, entry, offset, bytes);
-		if (!entry->bytes) {
-			unlink_free_space(block_group, entry);
-			kfree(entry->bitmap);
-			kfree(entry);
-			block_group->total_bitmaps--;
-			recalculate_thresholds(block_group);
-		}
+		if (!entry->bytes)
+			free_bitmap(block_group, entry);
 	} else {
 		unlink_free_space(block_group, entry);
 		entry->offset += bytes;
@@ -1789,6 +1814,8 @@
 
 	ret = search_start;
 	bitmap_clear_bits(block_group, entry, ret, bytes);
+	if (entry->bytes == 0)
+		free_bitmap(block_group, entry);
 out:
 	spin_unlock(&cluster->lock);
 	spin_unlock(&block_group->tree_lock);
@@ -1842,15 +1869,26 @@
 		entry->offset += bytes;
 		entry->bytes -= bytes;
 
-		if (entry->bytes == 0) {
+		if (entry->bytes == 0)
 			rb_erase(&entry->offset_index, &cluster->root);
-			kfree(entry);
-		}
 		break;
 	}
 out:
 	spin_unlock(&cluster->lock);
 
+	if (!ret)
+		return 0;
+
+	spin_lock(&block_group->tree_lock);
+
+	block_group->free_space -= bytes;
+	if (entry->bytes == 0) {
+		block_group->free_extents--;
+		kfree(entry);
+	}
+
+	spin_unlock(&block_group->tree_lock);
+
 	return ret;
 }
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a0ff46a..9007bbd 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -122,10 +122,10 @@
 	size_t cur_size = size;
 	size_t datasize;
 	unsigned long offset;
-	int use_compress = 0;
+	int compress_type = BTRFS_COMPRESS_NONE;
 
 	if (compressed_size && compressed_pages) {
-		use_compress = 1;
+		compress_type = root->fs_info->compress_type;
 		cur_size = compressed_size;
 	}
 
@@ -159,7 +159,7 @@
 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
 	ptr = btrfs_file_extent_inline_start(ei);
 
-	if (use_compress) {
+	if (compress_type != BTRFS_COMPRESS_NONE) {
 		struct page *cpage;
 		int i = 0;
 		while (compressed_size > 0) {
@@ -176,7 +176,7 @@
 			compressed_size -= cur_size;
 		}
 		btrfs_set_file_extent_compression(leaf, ei,
-						  BTRFS_COMPRESS_ZLIB);
+						  compress_type);
 	} else {
 		page = find_get_page(inode->i_mapping,
 				     start >> PAGE_CACHE_SHIFT);
@@ -263,6 +263,7 @@
 	u64 compressed_size;
 	struct page **pages;
 	unsigned long nr_pages;
+	int compress_type;
 	struct list_head list;
 };
 
@@ -280,7 +281,8 @@
 				     u64 start, u64 ram_size,
 				     u64 compressed_size,
 				     struct page **pages,
-				     unsigned long nr_pages)
+				     unsigned long nr_pages,
+				     int compress_type)
 {
 	struct async_extent *async_extent;
 
@@ -290,6 +292,7 @@
 	async_extent->compressed_size = compressed_size;
 	async_extent->pages = pages;
 	async_extent->nr_pages = nr_pages;
+	async_extent->compress_type = compress_type;
 	list_add_tail(&async_extent->list, &cow->extents);
 	return 0;
 }
@@ -332,6 +335,7 @@
 	unsigned long max_uncompressed = 128 * 1024;
 	int i;
 	int will_compress;
+	int compress_type = root->fs_info->compress_type;
 
 	actual_end = min_t(u64, isize, end + 1);
 again:
@@ -381,12 +385,16 @@
 		WARN_ON(pages);
 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
 
-		ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
-						total_compressed, pages,
-						nr_pages, &nr_pages_ret,
-						&total_in,
-						&total_compressed,
-						max_compressed);
+		if (BTRFS_I(inode)->force_compress)
+			compress_type = BTRFS_I(inode)->force_compress;
+
+		ret = btrfs_compress_pages(compress_type,
+					   inode->i_mapping, start,
+					   total_compressed, pages,
+					   nr_pages, &nr_pages_ret,
+					   &total_in,
+					   &total_compressed,
+					   max_compressed);
 
 		if (!ret) {
 			unsigned long offset = total_compressed &
@@ -408,7 +416,7 @@
 	}
 	if (start == 0) {
 		trans = btrfs_join_transaction(root, 1);
-		BUG_ON(!trans);
+		BUG_ON(IS_ERR(trans));
 		btrfs_set_trans_block_group(trans, inode);
 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -493,7 +501,8 @@
 		 * and will submit them to the elevator.
 		 */
 		add_async_extent(async_cow, start, num_bytes,
-				 total_compressed, pages, nr_pages_ret);
+				 total_compressed, pages, nr_pages_ret,
+				 compress_type);
 
 		if (start + num_bytes < end) {
 			start += num_bytes;
@@ -515,7 +524,8 @@
 			__set_page_dirty_nobuffers(locked_page);
 			/* unlocked later on in the async handlers */
 		}
-		add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
+		add_async_extent(async_cow, start, end - start + 1,
+				 0, NULL, 0, BTRFS_COMPRESS_NONE);
 		*num_added += 1;
 	}
 
@@ -602,6 +612,7 @@
 			    GFP_NOFS);
 
 		trans = btrfs_join_transaction(root, 1);
+		BUG_ON(IS_ERR(trans));
 		ret = btrfs_reserve_extent(trans, root,
 					   async_extent->compressed_size,
 					   async_extent->compressed_size,
@@ -633,6 +644,7 @@
 					async_extent->ram_size - 1, 0);
 
 		em = alloc_extent_map(GFP_NOFS);
+		BUG_ON(!em);
 		em->start = async_extent->start;
 		em->len = async_extent->ram_size;
 		em->orig_start = em->start;
@@ -640,6 +652,7 @@
 		em->block_start = ins.objectid;
 		em->block_len = ins.offset;
 		em->bdev = root->fs_info->fs_devices->latest_bdev;
+		em->compress_type = async_extent->compress_type;
 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 
@@ -656,11 +669,13 @@
 						async_extent->ram_size - 1, 0);
 		}
 
-		ret = btrfs_add_ordered_extent(inode, async_extent->start,
-					       ins.objectid,
-					       async_extent->ram_size,
-					       ins.offset,
-					       BTRFS_ORDERED_COMPRESSED);
+		ret = btrfs_add_ordered_extent_compress(inode,
+						async_extent->start,
+						ins.objectid,
+						async_extent->ram_size,
+						ins.offset,
+						BTRFS_ORDERED_COMPRESSED,
+						async_extent->compress_type);
 		BUG_ON(ret);
 
 		/*
@@ -758,7 +773,7 @@
 
 	BUG_ON(root == root->fs_info->tree_root);
 	trans = btrfs_join_transaction(root, 1);
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 	btrfs_set_trans_block_group(trans, inode);
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -806,6 +821,7 @@
 		BUG_ON(ret);
 
 		em = alloc_extent_map(GFP_NOFS);
+		BUG_ON(!em);
 		em->start = start;
 		em->orig_start = em->start;
 		ram_size = ins.offset;
@@ -1036,7 +1052,7 @@
 	} else {
 		trans = btrfs_join_transaction(root, 1);
 	}
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 
 	cow_start = (u64)-1;
 	cur_offset = start;
@@ -1155,6 +1171,7 @@
 			struct extent_map_tree *em_tree;
 			em_tree = &BTRFS_I(inode)->extent_tree;
 			em = alloc_extent_map(GFP_NOFS);
+			BUG_ON(!em);
 			em->start = cur_offset;
 			em->orig_start = em->start;
 			em->len = num_bytes;
@@ -1544,6 +1561,7 @@
 out_page:
 	unlock_page(page);
 	page_cache_release(page);
+	kfree(fixup);
 }
 
 /*
@@ -1670,7 +1688,7 @@
 	struct btrfs_ordered_extent *ordered_extent = NULL;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct extent_state *cached_state = NULL;
-	int compressed = 0;
+	int compress_type = 0;
 	int ret;
 	bool nolock = false;
 
@@ -1690,7 +1708,7 @@
 				trans = btrfs_join_transaction_nolock(root, 1);
 			else
 				trans = btrfs_join_transaction(root, 1);
-			BUG_ON(!trans);
+			BUG_ON(IS_ERR(trans));
 			btrfs_set_trans_block_group(trans, inode);
 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 			ret = btrfs_update_inode(trans, root, inode);
@@ -1707,13 +1725,14 @@
 		trans = btrfs_join_transaction_nolock(root, 1);
 	else
 		trans = btrfs_join_transaction(root, 1);
+	BUG_ON(IS_ERR(trans));
 	btrfs_set_trans_block_group(trans, inode);
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
-		compressed = 1;
+		compress_type = ordered_extent->compress_type;
 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
-		BUG_ON(compressed);
+		BUG_ON(compress_type);
 		ret = btrfs_mark_extent_written(trans, inode,
 						ordered_extent->file_offset,
 						ordered_extent->file_offset +
@@ -1727,7 +1746,7 @@
 						ordered_extent->disk_len,
 						ordered_extent->len,
 						ordered_extent->len,
-						compressed, 0, 0,
+						compress_type, 0, 0,
 						BTRFS_FILE_EXTENT_REG);
 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
 				   ordered_extent->file_offset,
@@ -1829,6 +1848,8 @@
 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
 			logical = em->block_start;
 			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
+			extent_set_compress_type(&failrec->bio_flags,
+						 em->compress_type);
 		}
 		failrec->logical = logical;
 		free_extent_map(em);
@@ -1892,7 +1913,7 @@
 
 	private = 0;
 	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
-			     (u64)-1, 1, EXTENT_DIRTY)) {
+			     (u64)-1, 1, EXTENT_DIRTY, 0)) {
 		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
 					start, &private_failure);
 		if (ret == 0) {
@@ -2339,6 +2360,7 @@
 		 */
 		if (is_bad_inode(inode)) {
 			trans = btrfs_start_transaction(root, 0);
+			BUG_ON(IS_ERR(trans));
 			btrfs_orphan_del(trans, inode);
 			btrfs_end_transaction(trans, root);
 			iput(inode);
@@ -2366,6 +2388,7 @@
 
 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
 		trans = btrfs_join_transaction(root, 1);
+		BUG_ON(IS_ERR(trans));
 		btrfs_end_transaction(trans, root);
 	}
 
@@ -2626,7 +2649,7 @@
 	path = btrfs_alloc_path();
 	if (!path) {
 		ret = -ENOMEM;
-		goto err;
+		goto out;
 	}
 
 	path->leave_spinning = 1;
@@ -2699,9 +2722,10 @@
 	struct extent_buffer *eb;
 	int level;
 	u64 refs = 1;
-	int uninitialized_var(ret);
 
 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
+		int ret;
+
 		if (!path->nodes[level])
 			break;
 		eb = path->nodes[level];
@@ -2712,7 +2736,7 @@
 		if (refs > 1)
 			return 1;
 	}
-	return ret; /* XXX callers? */
+	return 0;
 }
 
 /*
@@ -3671,8 +3695,12 @@
 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	struct inode *inode = dentry->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int err;
 
+	if (btrfs_root_readonly(root))
+		return -EROFS;
+
 	err = inode_change_ok(inode, attr);
 	if (err)
 		return err;
@@ -4084,8 +4112,6 @@
 	int index;
 	int ret;
 
-	d_set_d_op(dentry, &btrfs_dentry_operations);
-
 	if (dentry->d_name.len > BTRFS_NAME_LEN)
 		return ERR_PTR(-ENAMETOOLONG);
 
@@ -4117,7 +4143,7 @@
 	}
 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
 
-	if (root != sub_root) {
+	if (!IS_ERR(inode) && root != sub_root) {
 		down_read(&root->fs_info->cleanup_work_sem);
 		if (!(inode->i_sb->s_flags & MS_RDONLY))
 			btrfs_orphan_cleanup(sub_root);
@@ -4330,6 +4356,8 @@
 			trans = btrfs_join_transaction_nolock(root, 1);
 		else
 			trans = btrfs_join_transaction(root, 1);
+		if (IS_ERR(trans))
+			return PTR_ERR(trans);
 		btrfs_set_trans_block_group(trans, inode);
 		if (nolock)
 			ret = btrfs_end_transaction_nolock(trans, root);
@@ -4355,6 +4383,7 @@
 		return;
 
 	trans = btrfs_join_transaction(root, 1);
+	BUG_ON(IS_ERR(trans));
 	btrfs_set_trans_block_group(trans, inode);
 
 	ret = btrfs_update_inode(trans, root, inode);
@@ -4792,10 +4821,11 @@
 		goto fail;
 
 	/*
-	 * 1 item for inode ref
+	 * 2 items for inode and inode ref
 	 * 2 items for dir items
+	 * 1 item for parent inode
 	 */
-	trans = btrfs_start_transaction(root, 3);
+	trans = btrfs_start_transaction(root, 5);
 	if (IS_ERR(trans)) {
 		err = PTR_ERR(trans);
 		goto fail;
@@ -4930,8 +4960,10 @@
 	size_t max_size;
 	unsigned long inline_size;
 	unsigned long ptr;
+	int compress_type;
 
 	WARN_ON(pg_offset != 0);
+	compress_type = btrfs_file_extent_compression(leaf, item);
 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
 	inline_size = btrfs_file_extent_inline_item_len(leaf,
 					btrfs_item_nr(leaf, path->slots[0]));
@@ -4941,8 +4973,8 @@
 	read_extent_buffer(leaf, tmp, ptr, inline_size);
 
 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
-	ret = btrfs_zlib_decompress(tmp, page, extent_offset,
-				    inline_size, max_size);
+	ret = btrfs_decompress(compress_type, tmp, page,
+			       extent_offset, inline_size, max_size);
 	if (ret) {
 		char *kaddr = kmap_atomic(page, KM_USER0);
 		unsigned long copy_size = min_t(u64,
@@ -4984,7 +5016,7 @@
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct btrfs_trans_handle *trans = NULL;
-	int compressed;
+	int compress_type;
 
 again:
 	read_lock(&em_tree->lock);
@@ -5043,7 +5075,7 @@
 
 	found_type = btrfs_file_extent_type(leaf, item);
 	extent_start = found_key.offset;
-	compressed = btrfs_file_extent_compression(leaf, item);
+	compress_type = btrfs_file_extent_compression(leaf, item);
 	if (found_type == BTRFS_FILE_EXTENT_REG ||
 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
 		extent_end = extent_start +
@@ -5089,8 +5121,9 @@
 			em->block_start = EXTENT_MAP_HOLE;
 			goto insert;
 		}
-		if (compressed) {
+		if (compress_type != BTRFS_COMPRESS_NONE) {
 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+			em->compress_type = compress_type;
 			em->block_start = bytenr;
 			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
 									 item);
@@ -5124,12 +5157,14 @@
 		em->len = (copy_size + root->sectorsize - 1) &
 			~((u64)root->sectorsize - 1);
 		em->orig_start = EXTENT_MAP_INLINE;
-		if (compressed)
+		if (compress_type) {
 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+			em->compress_type = compress_type;
+		}
 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
 		if (create == 0 && !PageUptodate(page)) {
-			if (btrfs_file_extent_compression(leaf, item) ==
-			    BTRFS_COMPRESS_ZLIB) {
+			if (btrfs_file_extent_compression(leaf, item) !=
+			    BTRFS_COMPRESS_NONE) {
 				ret = uncompress_inline(path, inode, page,
 							pg_offset,
 							extent_offset, item);
@@ -5154,6 +5189,8 @@
 				em = NULL;
 				btrfs_release_path(root, path);
 				trans = btrfs_join_transaction(root, 1);
+				if (IS_ERR(trans))
+					return ERR_CAST(trans);
 				goto again;
 			}
 			map = kmap(page);
@@ -5244,6 +5281,128 @@
 	return em;
 }
 
+struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
+					   size_t pg_offset, u64 start, u64 len,
+					   int create)
+{
+	struct extent_map *em;
+	struct extent_map *hole_em = NULL;
+	u64 range_start = start;
+	u64 end;
+	u64 found;
+	u64 found_end;
+	int err = 0;
+
+	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
+	if (IS_ERR(em))
+		return em;
+	if (em) {
+		/*
+		 * if our em maps to a hole, there might
+		 * actually be delalloc bytes behind it
+		 */
+		if (em->block_start != EXTENT_MAP_HOLE)
+			return em;
+		else
+			hole_em = em;
+	}
+
+	/* check to see if we've wrapped (len == -1 or similar) */
+	end = start + len;
+	if (end < start)
+		end = (u64)-1;
+	else
+		end -= 1;
+
+	em = NULL;
+
+	/* ok, we didn't find anything, lets look for delalloc */
+	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
+				 end, len, EXTENT_DELALLOC, 1);
+	found_end = range_start + found;
+	if (found_end < range_start)
+		found_end = (u64)-1;
+
+	/*
+	 * we didn't find anything useful, return
+	 * the original results from get_extent()
+	 */
+	if (range_start > end || found_end <= start) {
+		em = hole_em;
+		hole_em = NULL;
+		goto out;
+	}
+
+	/* adjust the range_start to make sure it doesn't
+	 * go backwards from the start they passed in
+	 */
+	range_start = max(start,range_start);
+	found = found_end - range_start;
+
+	if (found > 0) {
+		u64 hole_start = start;
+		u64 hole_len = len;
+
+		em = alloc_extent_map(GFP_NOFS);
+		if (!em) {
+			err = -ENOMEM;
+			goto out;
+		}
+		/*
+		 * when btrfs_get_extent can't find anything it
+		 * returns one huge hole
+		 *
+		 * make sure what it found really fits our range, and
+		 * adjust to make sure it is based on the start from
+		 * the caller
+		 */
+		if (hole_em) {
+			u64 calc_end = extent_map_end(hole_em);
+
+			if (calc_end <= start || (hole_em->start > end)) {
+				free_extent_map(hole_em);
+				hole_em = NULL;
+			} else {
+				hole_start = max(hole_em->start, start);
+				hole_len = calc_end - hole_start;
+			}
+		}
+		em->bdev = NULL;
+		if (hole_em && range_start > hole_start) {
+			/* our hole starts before our delalloc, so we
+			 * have to return just the parts of the hole
+			 * that go until  the delalloc starts
+			 */
+			em->len = min(hole_len,
+				      range_start - hole_start);
+			em->start = hole_start;
+			em->orig_start = hole_start;
+			/*
+			 * don't adjust block start at all,
+			 * it is fixed at EXTENT_MAP_HOLE
+			 */
+			em->block_start = hole_em->block_start;
+			em->block_len = hole_len;
+		} else {
+			em->start = range_start;
+			em->len = found;
+			em->orig_start = range_start;
+			em->block_start = EXTENT_MAP_DELALLOC;
+			em->block_len = found;
+		}
+	} else if (hole_em) {
+		return hole_em;
+	}
+out:
+
+	free_extent_map(hole_em);
+	if (err) {
+		free_extent_map(em);
+		return ERR_PTR(err);
+	}
+	return em;
+}
+
 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
 						  u64 start, u64 len)
 {
@@ -5258,8 +5417,8 @@
 	btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
 
 	trans = btrfs_join_transaction(root, 0);
-	if (!trans)
-		return ERR_PTR(-ENOMEM);
+	if (IS_ERR(trans))
+		return ERR_CAST(trans);
 
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -5483,7 +5642,7 @@
 		 * while we look for nocow cross refs
 		 */
 		trans = btrfs_join_transaction(root, 0);
-		if (!trans)
+		if (IS_ERR(trans))
 			goto must_cow;
 
 		if (can_nocow_odirect(trans, inode, start, len) == 1) {
@@ -5618,7 +5777,7 @@
 	BUG_ON(!ordered);
 
 	trans = btrfs_join_transaction(root, 1);
-	if (!trans) {
+	if (IS_ERR(trans)) {
 		err = -ENOMEM;
 		goto out;
 	}
@@ -5898,6 +6057,7 @@
 	if (!skip_sum) {
 		dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
 		if (!dip->csums) {
+			kfree(dip);
 			ret = -ENOMEM;
 			goto free_ordered;
 		}
@@ -6066,7 +6226,7 @@
 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		__u64 start, __u64 len)
 {
-	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
+	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
 }
 
 int btrfs_readpage(struct file *file, struct page *page)
@@ -6479,7 +6639,7 @@
 	ei->ordered_data_close = 0;
 	ei->orphan_meta_reserved = 0;
 	ei->dummy_inode = 0;
-	ei->force_compress = 0;
+	ei->force_compress = BTRFS_COMPRESS_NONE;
 
 	inode = &ei->vfs_inode;
 	extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
@@ -7100,112 +7260,6 @@
 					   min_size, actual_len, alloc_hint, trans);
 }
 
-static long btrfs_fallocate(struct inode *inode, int mode,
-			    loff_t offset, loff_t len)
-{
-	struct extent_state *cached_state = NULL;
-	u64 cur_offset;
-	u64 last_byte;
-	u64 alloc_start;
-	u64 alloc_end;
-	u64 alloc_hint = 0;
-	u64 locked_end;
-	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
-	struct extent_map *em;
-	int ret;
-
-	alloc_start = offset & ~mask;
-	alloc_end =  (offset + len + mask) & ~mask;
-
-	/*
-	 * wait for ordered IO before we have any locks.  We'll loop again
-	 * below with the locks held.
-	 */
-	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
-
-	mutex_lock(&inode->i_mutex);
-	ret = inode_newsize_ok(inode, alloc_end);
-	if (ret)
-		goto out;
-
-	if (alloc_start > inode->i_size) {
-		ret = btrfs_cont_expand(inode, alloc_start);
-		if (ret)
-			goto out;
-	}
-
-	ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
-	if (ret)
-		goto out;
-
-	locked_end = alloc_end - 1;
-	while (1) {
-		struct btrfs_ordered_extent *ordered;
-
-		/* the extent lock is ordered inside the running
-		 * transaction
-		 */
-		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
-				 locked_end, 0, &cached_state, GFP_NOFS);
-		ordered = btrfs_lookup_first_ordered_extent(inode,
-							    alloc_end - 1);
-		if (ordered &&
-		    ordered->file_offset + ordered->len > alloc_start &&
-		    ordered->file_offset < alloc_end) {
-			btrfs_put_ordered_extent(ordered);
-			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
-					     alloc_start, locked_end,
-					     &cached_state, GFP_NOFS);
-			/*
-			 * we can't wait on the range with the transaction
-			 * running or with the extent lock held
-			 */
-			btrfs_wait_ordered_range(inode, alloc_start,
-						 alloc_end - alloc_start);
-		} else {
-			if (ordered)
-				btrfs_put_ordered_extent(ordered);
-			break;
-		}
-	}
-
-	cur_offset = alloc_start;
-	while (1) {
-		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
-				      alloc_end - cur_offset, 0);
-		BUG_ON(IS_ERR(em) || !em);
-		last_byte = min(extent_map_end(em), alloc_end);
-		last_byte = (last_byte + mask) & ~mask;
-		if (em->block_start == EXTENT_MAP_HOLE ||
-		    (cur_offset >= inode->i_size &&
-		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
-			ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
-							last_byte - cur_offset,
-							1 << inode->i_blkbits,
-							offset + len,
-							&alloc_hint);
-			if (ret < 0) {
-				free_extent_map(em);
-				break;
-			}
-		}
-		free_extent_map(em);
-
-		cur_offset = last_byte;
-		if (cur_offset >= alloc_end) {
-			ret = 0;
-			break;
-		}
-	}
-	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
-			     &cached_state, GFP_NOFS);
-
-	btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
-out:
-	mutex_unlock(&inode->i_mutex);
-	return ret;
-}
-
 static int btrfs_set_page_dirty(struct page *page)
 {
 	return __set_page_dirty_nobuffers(page);
@@ -7213,6 +7267,10 @@
 
 static int btrfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+
+	if (btrfs_root_readonly(root) && (mask & MAY_WRITE))
+		return -EROFS;
 	if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
 		return -EACCES;
 	return generic_permission(inode, mask, flags, btrfs_check_acl);
@@ -7308,7 +7366,6 @@
 	.listxattr      = btrfs_listxattr,
 	.removexattr	= btrfs_removexattr,
 	.permission	= btrfs_permission,
-	.fallocate	= btrfs_fallocate,
 	.fiemap		= btrfs_fiemap,
 };
 static const struct inode_operations btrfs_special_inode_operations = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index f87552a..5fdb2ab 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -147,6 +147,9 @@
 	unsigned int flags, oldflags;
 	int ret;
 
+	if (btrfs_root_readonly(root))
+		return -EROFS;
+
 	if (copy_from_user(&flags, arg, sizeof(flags)))
 		return -EFAULT;
 
@@ -200,7 +203,7 @@
 
 
 	trans = btrfs_join_transaction(root, 1);
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 
 	ret = btrfs_update_inode(trans, root, inode);
 	BUG_ON(ret);
@@ -360,7 +363,8 @@
 }
 
 static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
-			   char *name, int namelen, u64 *async_transid)
+			   char *name, int namelen, u64 *async_transid,
+			   bool readonly)
 {
 	struct inode *inode;
 	struct dentry *parent;
@@ -378,6 +382,7 @@
 	btrfs_init_block_rsv(&pending_snapshot->block_rsv);
 	pending_snapshot->dentry = dentry;
 	pending_snapshot->root = root;
+	pending_snapshot->readonly = readonly;
 
 	trans = btrfs_start_transaction(root->fs_info->extent_root, 5);
 	if (IS_ERR(trans)) {
@@ -509,7 +514,7 @@
 static noinline int btrfs_mksubvol(struct path *parent,
 				   char *name, int namelen,
 				   struct btrfs_root *snap_src,
-				   u64 *async_transid)
+				   u64 *async_transid, bool readonly)
 {
 	struct inode *dir  = parent->dentry->d_inode;
 	struct dentry *dentry;
@@ -541,7 +546,7 @@
 
 	if (snap_src) {
 		error = create_snapshot(snap_src, dentry,
-					name, namelen, async_transid);
+					name, namelen, async_transid, readonly);
 	} else {
 		error = create_subvol(BTRFS_I(dir)->root, dentry,
 				      name, namelen, async_transid);
@@ -638,9 +643,11 @@
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct btrfs_ordered_extent *ordered;
 	struct page *page;
+	struct btrfs_super_block *disk_super;
 	unsigned long last_index;
 	unsigned long ra_pages = root->fs_info->bdi.ra_pages;
 	unsigned long total_read = 0;
+	u64 features;
 	u64 page_start;
 	u64 page_end;
 	u64 last_len = 0;
@@ -648,6 +655,14 @@
 	u64 defrag_end = 0;
 	unsigned long i;
 	int ret;
+	int compress_type = BTRFS_COMPRESS_ZLIB;
+
+	if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
+		if (range->compress_type > BTRFS_COMPRESS_TYPES)
+			return -EINVAL;
+		if (range->compress_type)
+			compress_type = range->compress_type;
+	}
 
 	if (inode->i_size == 0)
 		return 0;
@@ -683,7 +698,7 @@
 		total_read++;
 		mutex_lock(&inode->i_mutex);
 		if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
-			BTRFS_I(inode)->force_compress = 1;
+			BTRFS_I(inode)->force_compress = compress_type;
 
 		ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
 		if (ret)
@@ -781,10 +796,17 @@
 		atomic_dec(&root->fs_info->async_submit_draining);
 
 		mutex_lock(&inode->i_mutex);
-		BTRFS_I(inode)->force_compress = 0;
+		BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
 		mutex_unlock(&inode->i_mutex);
 	}
 
+	disk_super = &root->fs_info->super_copy;
+	features = btrfs_super_incompat_flags(disk_super);
+	if (range->compress_type == BTRFS_COMPRESS_LZO) {
+		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+		btrfs_set_super_incompat_flags(disk_super, features);
+	}
+
 	return 0;
 
 err_reservations:
@@ -885,6 +907,10 @@
 
 	if (new_size > old_size) {
 		trans = btrfs_start_transaction(root, 0);
+		if (IS_ERR(trans)) {
+			ret = PTR_ERR(trans);
+			goto out_unlock;
+		}
 		ret = btrfs_grow_device(trans, device, new_size);
 		btrfs_commit_transaction(trans, root);
 	} else {
@@ -901,7 +927,8 @@
 						    char *name,
 						    unsigned long fd,
 						    int subvol,
-						    u64 *transid)
+						    u64 *transid,
+						    bool readonly)
 {
 	struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
 	struct file *src_file;
@@ -919,7 +946,7 @@
 
 	if (subvol) {
 		ret = btrfs_mksubvol(&file->f_path, name, namelen,
-				     NULL, transid);
+				     NULL, transid, readonly);
 	} else {
 		struct inode *src_inode;
 		src_file = fget(fd);
@@ -938,7 +965,7 @@
 		}
 		ret = btrfs_mksubvol(&file->f_path, name, namelen,
 				     BTRFS_I(src_inode)->root,
-				     transid);
+				     transid, readonly);
 		fput(src_file);
 	}
 out:
@@ -946,58 +973,142 @@
 }
 
 static noinline int btrfs_ioctl_snap_create(struct file *file,
-					    void __user *arg, int subvol,
-					    int v2)
+					    void __user *arg, int subvol)
 {
-	struct btrfs_ioctl_vol_args *vol_args = NULL;
-	struct btrfs_ioctl_vol_args_v2 *vol_args_v2 = NULL;
-	char *name;
-	u64 fd;
+	struct btrfs_ioctl_vol_args *vol_args;
 	int ret;
 
-	if (v2) {
-		u64 transid = 0;
-		u64 *ptr = NULL;
+	vol_args = memdup_user(arg, sizeof(*vol_args));
+	if (IS_ERR(vol_args))
+		return PTR_ERR(vol_args);
+	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
 
-		vol_args_v2 = memdup_user(arg, sizeof(*vol_args_v2));
-		if (IS_ERR(vol_args_v2))
-			return PTR_ERR(vol_args_v2);
+	ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
+					      vol_args->fd, subvol,
+					      NULL, false);
 
-		if (vol_args_v2->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) {
-			ret = -EINVAL;
-			goto out;
-		}
+	kfree(vol_args);
+	return ret;
+}
 
-		name = vol_args_v2->name;
-		fd = vol_args_v2->fd;
-		vol_args_v2->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
+static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
+					       void __user *arg, int subvol)
+{
+	struct btrfs_ioctl_vol_args_v2 *vol_args;
+	int ret;
+	u64 transid = 0;
+	u64 *ptr = NULL;
+	bool readonly = false;
 
-		if (vol_args_v2->flags & BTRFS_SUBVOL_CREATE_ASYNC)
-			ptr = &transid;
+	vol_args = memdup_user(arg, sizeof(*vol_args));
+	if (IS_ERR(vol_args))
+		return PTR_ERR(vol_args);
+	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
 
-		ret = btrfs_ioctl_snap_create_transid(file, name, fd,
-						      subvol, ptr);
-
-		if (ret == 0 && ptr &&
-		    copy_to_user(arg +
-				 offsetof(struct btrfs_ioctl_vol_args_v2,
-					  transid), ptr, sizeof(*ptr)))
-			ret = -EFAULT;
-	} else {
-		vol_args = memdup_user(arg, sizeof(*vol_args));
-		if (IS_ERR(vol_args))
-			return PTR_ERR(vol_args);
-		name = vol_args->name;
-		fd = vol_args->fd;
-		vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
-
-		ret = btrfs_ioctl_snap_create_transid(file, name, fd,
-						      subvol, NULL);
+	if (vol_args->flags &
+	    ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY)) {
+		ret = -EOPNOTSUPP;
+		goto out;
 	}
+
+	if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
+		ptr = &transid;
+	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
+		readonly = true;
+
+	ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
+					      vol_args->fd, subvol,
+					      ptr, readonly);
+
+	if (ret == 0 && ptr &&
+	    copy_to_user(arg +
+			 offsetof(struct btrfs_ioctl_vol_args_v2,
+				  transid), ptr, sizeof(*ptr)))
+		ret = -EFAULT;
 out:
 	kfree(vol_args);
-	kfree(vol_args_v2);
+	return ret;
+}
 
+static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
+						void __user *arg)
+{
+	struct inode *inode = fdentry(file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret = 0;
+	u64 flags = 0;
+
+	if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+		return -EINVAL;
+
+	down_read(&root->fs_info->subvol_sem);
+	if (btrfs_root_readonly(root))
+		flags |= BTRFS_SUBVOL_RDONLY;
+	up_read(&root->fs_info->subvol_sem);
+
+	if (copy_to_user(arg, &flags, sizeof(flags)))
+		ret = -EFAULT;
+
+	return ret;
+}
+
+static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
+					      void __user *arg)
+{
+	struct inode *inode = fdentry(file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+	u64 root_flags;
+	u64 flags;
+	int ret = 0;
+
+	if (root->fs_info->sb->s_flags & MS_RDONLY)
+		return -EROFS;
+
+	if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+		return -EINVAL;
+
+	if (copy_from_user(&flags, arg, sizeof(flags)))
+		return -EFAULT;
+
+	if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
+		return -EINVAL;
+
+	if (flags & ~BTRFS_SUBVOL_RDONLY)
+		return -EOPNOTSUPP;
+
+	if (!is_owner_or_cap(inode))
+		return -EACCES;
+
+	down_write(&root->fs_info->subvol_sem);
+
+	/* nothing to do */
+	if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
+		goto out;
+
+	root_flags = btrfs_root_flags(&root->root_item);
+	if (flags & BTRFS_SUBVOL_RDONLY)
+		btrfs_set_root_flags(&root->root_item,
+				     root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
+	else
+		btrfs_set_root_flags(&root->root_item,
+				     root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
+
+	trans = btrfs_start_transaction(root, 1);
+	if (IS_ERR(trans)) {
+		ret = PTR_ERR(trans);
+		goto out_reset;
+	}
+
+	ret = btrfs_update_root(trans, root->fs_info->tree_root,
+				&root->root_key, &root->root_item);
+
+	btrfs_commit_transaction(trans, root);
+out_reset:
+	if (ret)
+		btrfs_set_root_flags(&root->root_item, root_flags);
+out:
+	up_write(&root->fs_info->subvol_sem);
 	return ret;
 }
 
@@ -1509,6 +1620,9 @@
 	struct btrfs_ioctl_defrag_range_args *range;
 	int ret;
 
+	if (btrfs_root_readonly(root))
+		return -EROFS;
+
 	ret = mnt_want_write(file->f_path.mnt);
 	if (ret)
 		return ret;
@@ -1637,6 +1751,9 @@
 	if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND))
 		return -EINVAL;
 
+	if (btrfs_root_readonly(root))
+		return -EROFS;
+
 	ret = mnt_want_write(file->f_path.mnt);
 	if (ret)
 		return ret;
@@ -1788,7 +1905,10 @@
 
 			memcpy(&new_key, &key, sizeof(new_key));
 			new_key.objectid = inode->i_ino;
-			new_key.offset = key.offset + destoff - off;
+			if (off <= key.offset)
+				new_key.offset = key.offset + destoff - off;
+			else
+				new_key.offset = destoff;
 
 			trans = btrfs_start_transaction(root, 1);
 			if (IS_ERR(trans)) {
@@ -1958,6 +2078,10 @@
 	if (file->private_data)
 		goto out;
 
+	ret = -EROFS;
+	if (btrfs_root_readonly(root))
+		goto out;
+
 	ret = mnt_want_write(file->f_path.mnt);
 	if (ret)
 		goto out;
@@ -1968,7 +2092,7 @@
 
 	ret = -ENOMEM;
 	trans = btrfs_start_ioctl_transaction(root, 0);
-	if (!trans)
+	if (IS_ERR(trans))
 		goto out_drop;
 
 	file->private_data = trans;
@@ -2024,9 +2148,9 @@
 	path->leave_spinning = 1;
 
 	trans = btrfs_start_transaction(root, 1);
-	if (!trans) {
+	if (IS_ERR(trans)) {
 		btrfs_free_path(path);
-		return -ENOMEM;
+		return PTR_ERR(trans);
 	}
 
 	dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
@@ -2087,7 +2211,7 @@
 	int num_types = 4;
 	int alloc_size;
 	int ret = 0;
-	int slot_count = 0;
+	u64 slot_count = 0;
 	int i, c;
 
 	if (copy_from_user(&space_args,
@@ -2126,7 +2250,7 @@
 		goto out;
 	}
 
-	slot_count = min_t(int, space_args.space_slots, slot_count);
+	slot_count = min_t(u64, space_args.space_slots, slot_count);
 
 	alloc_size = sizeof(*dest) * slot_count;
 
@@ -2146,6 +2270,9 @@
 	for (i = 0; i < num_types; i++) {
 		struct btrfs_space_info *tmp;
 
+		if (!slot_count)
+			break;
+
 		info = NULL;
 		rcu_read_lock();
 		list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
@@ -2167,7 +2294,10 @@
 				memcpy(dest, &space, sizeof(space));
 				dest++;
 				space_args.total_spaces++;
+				slot_count--;
 			}
+			if (!slot_count)
+				break;
 		}
 		up_read(&info->groups_sem);
 	}
@@ -2220,6 +2350,8 @@
 	u64 transid;
 
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 	transid = trans->transid;
 	btrfs_commit_transaction_async(trans, root, 0);
 
@@ -2257,13 +2389,17 @@
 	case FS_IOC_GETVERSION:
 		return btrfs_ioctl_getversion(file, argp);
 	case BTRFS_IOC_SNAP_CREATE:
-		return btrfs_ioctl_snap_create(file, argp, 0, 0);
+		return btrfs_ioctl_snap_create(file, argp, 0);
 	case BTRFS_IOC_SNAP_CREATE_V2:
-		return btrfs_ioctl_snap_create(file, argp, 0, 1);
+		return btrfs_ioctl_snap_create_v2(file, argp, 0);
 	case BTRFS_IOC_SUBVOL_CREATE:
-		return btrfs_ioctl_snap_create(file, argp, 1, 0);
+		return btrfs_ioctl_snap_create(file, argp, 1);
 	case BTRFS_IOC_SNAP_DESTROY:
 		return btrfs_ioctl_snap_destroy(file, argp);
+	case BTRFS_IOC_SUBVOL_GETFLAGS:
+		return btrfs_ioctl_subvol_getflags(file, argp);
+	case BTRFS_IOC_SUBVOL_SETFLAGS:
+		return btrfs_ioctl_subvol_setflags(file, argp);
 	case BTRFS_IOC_DEFAULT_SUBVOL:
 		return btrfs_ioctl_default_subvol(file, argp);
 	case BTRFS_IOC_DEFRAG:
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index c344d12..8fb3821 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -31,6 +31,7 @@
 };
 
 #define BTRFS_SUBVOL_CREATE_ASYNC	(1ULL << 0)
+#define BTRFS_SUBVOL_RDONLY		(1ULL << 1)
 
 #define BTRFS_SUBVOL_NAME_MAX 4039
 struct btrfs_ioctl_vol_args_v2 {
@@ -133,8 +134,15 @@
 	 */
 	__u32 extent_thresh;
 
+	/*
+	 * which compression method to use if turning on compression
+	 * for this defrag operation.  If unspecified, zlib will
+	 * be used
+	 */
+	__u32 compress_type;
+
 	/* spare for later */
-	__u32 unused[5];
+	__u32 unused[4];
 };
 
 struct btrfs_ioctl_space_info {
@@ -193,4 +201,6 @@
 #define BTRFS_IOC_WAIT_SYNC  _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
 #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
 				   struct btrfs_ioctl_vol_args_v2)
+#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
+#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
 #endif
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
new file mode 100644
index 0000000..a178f5e
--- /dev/null
+++ b/fs/btrfs/lzo.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+#include <linux/lzo.h>
+#include "compression.h"
+
+#define LZO_LEN	4
+
+struct workspace {
+	void *mem;
+	void *buf;	/* where compressed data goes */
+	void *cbuf;	/* where decompressed data goes */
+	struct list_head list;
+};
+
+static void lzo_free_workspace(struct list_head *ws)
+{
+	struct workspace *workspace = list_entry(ws, struct workspace, list);
+
+	vfree(workspace->buf);
+	vfree(workspace->cbuf);
+	vfree(workspace->mem);
+	kfree(workspace);
+}
+
+static struct list_head *lzo_alloc_workspace(void)
+{
+	struct workspace *workspace;
+
+	workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
+	if (!workspace)
+		return ERR_PTR(-ENOMEM);
+
+	workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
+	workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
+	workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
+	if (!workspace->mem || !workspace->buf || !workspace->cbuf)
+		goto fail;
+
+	INIT_LIST_HEAD(&workspace->list);
+
+	return &workspace->list;
+fail:
+	lzo_free_workspace(&workspace->list);
+	return ERR_PTR(-ENOMEM);
+}
+
+static inline void write_compress_length(char *buf, size_t len)
+{
+	__le32 dlen;
+
+	dlen = cpu_to_le32(len);
+	memcpy(buf, &dlen, LZO_LEN);
+}
+
+static inline size_t read_compress_length(char *buf)
+{
+	__le32 dlen;
+
+	memcpy(&dlen, buf, LZO_LEN);
+	return le32_to_cpu(dlen);
+}
+
+static int lzo_compress_pages(struct list_head *ws,
+			      struct address_space *mapping,
+			      u64 start, unsigned long len,
+			      struct page **pages,
+			      unsigned long nr_dest_pages,
+			      unsigned long *out_pages,
+			      unsigned long *total_in,
+			      unsigned long *total_out,
+			      unsigned long max_out)
+{
+	struct workspace *workspace = list_entry(ws, struct workspace, list);
+	int ret = 0;
+	char *data_in;
+	char *cpage_out;
+	int nr_pages = 0;
+	struct page *in_page = NULL;
+	struct page *out_page = NULL;
+	unsigned long bytes_left;
+
+	size_t in_len;
+	size_t out_len;
+	char *buf;
+	unsigned long tot_in = 0;
+	unsigned long tot_out = 0;
+	unsigned long pg_bytes_left;
+	unsigned long out_offset;
+	unsigned long bytes;
+
+	*out_pages = 0;
+	*total_out = 0;
+	*total_in = 0;
+
+	in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+	data_in = kmap(in_page);
+
+	/*
+	 * store the size of all chunks of compressed data in
+	 * the first 4 bytes
+	 */
+	out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+	if (out_page == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	cpage_out = kmap(out_page);
+	out_offset = LZO_LEN;
+	tot_out = LZO_LEN;
+	pages[0] = out_page;
+	nr_pages = 1;
+	pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
+
+	/* compress at most one page of data each time */
+	in_len = min(len, PAGE_CACHE_SIZE);
+	while (tot_in < len) {
+		ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
+				       &out_len, workspace->mem);
+		if (ret != LZO_E_OK) {
+			printk(KERN_DEBUG "btrfs deflate in loop returned %d\n",
+			       ret);
+			ret = -1;
+			goto out;
+		}
+
+		/* store the size of this chunk of compressed data */
+		write_compress_length(cpage_out + out_offset, out_len);
+		tot_out += LZO_LEN;
+		out_offset += LZO_LEN;
+		pg_bytes_left -= LZO_LEN;
+
+		tot_in += in_len;
+		tot_out += out_len;
+
+		/* copy bytes from the working buffer into the pages */
+		buf = workspace->cbuf;
+		while (out_len) {
+			bytes = min_t(unsigned long, pg_bytes_left, out_len);
+
+			memcpy(cpage_out + out_offset, buf, bytes);
+
+			out_len -= bytes;
+			pg_bytes_left -= bytes;
+			buf += bytes;
+			out_offset += bytes;
+
+			/*
+			 * we need another page for writing out.
+			 *
+			 * Note if there's less than 4 bytes left, we just
+			 * skip to a new page.
+			 */
+			if ((out_len == 0 && pg_bytes_left < LZO_LEN) ||
+			    pg_bytes_left == 0) {
+				if (pg_bytes_left) {
+					memset(cpage_out + out_offset, 0,
+					       pg_bytes_left);
+					tot_out += pg_bytes_left;
+				}
+
+				/* we're done, don't allocate new page */
+				if (out_len == 0 && tot_in >= len)
+					break;
+
+				kunmap(out_page);
+				if (nr_pages == nr_dest_pages) {
+					out_page = NULL;
+					ret = -1;
+					goto out;
+				}
+
+				out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+				if (out_page == NULL) {
+					ret = -ENOMEM;
+					goto out;
+				}
+				cpage_out = kmap(out_page);
+				pages[nr_pages++] = out_page;
+
+				pg_bytes_left = PAGE_CACHE_SIZE;
+				out_offset = 0;
+			}
+		}
+
+		/* we're making it bigger, give up */
+		if (tot_in > 8192 && tot_in < tot_out)
+			goto out;
+
+		/* we're all done */
+		if (tot_in >= len)
+			break;
+
+		if (tot_out > max_out)
+			break;
+
+		bytes_left = len - tot_in;
+		kunmap(in_page);
+		page_cache_release(in_page);
+
+		start += PAGE_CACHE_SIZE;
+		in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+		data_in = kmap(in_page);
+		in_len = min(bytes_left, PAGE_CACHE_SIZE);
+	}
+
+	if (tot_out > tot_in)
+		goto out;
+
+	/* store the size of all chunks of compressed data */
+	cpage_out = kmap(pages[0]);
+	write_compress_length(cpage_out, tot_out);
+
+	kunmap(pages[0]);
+
+	ret = 0;
+	*total_out = tot_out;
+	*total_in = tot_in;
+out:
+	*out_pages = nr_pages;
+	if (out_page)
+		kunmap(out_page);
+
+	if (in_page) {
+		kunmap(in_page);
+		page_cache_release(in_page);
+	}
+
+	return ret;
+}
+
+static int lzo_decompress_biovec(struct list_head *ws,
+				 struct page **pages_in,
+				 u64 disk_start,
+				 struct bio_vec *bvec,
+				 int vcnt,
+				 size_t srclen)
+{
+	struct workspace *workspace = list_entry(ws, struct workspace, list);
+	int ret = 0, ret2;
+	char *data_in;
+	unsigned long page_in_index = 0;
+	unsigned long page_out_index = 0;
+	unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
+					PAGE_CACHE_SIZE;
+	unsigned long buf_start;
+	unsigned long buf_offset = 0;
+	unsigned long bytes;
+	unsigned long working_bytes;
+	unsigned long pg_offset;
+
+	size_t in_len;
+	size_t out_len;
+	unsigned long in_offset;
+	unsigned long in_page_bytes_left;
+	unsigned long tot_in;
+	unsigned long tot_out;
+	unsigned long tot_len;
+	char *buf;
+	bool may_late_unmap, need_unmap;
+
+	data_in = kmap(pages_in[0]);
+	tot_len = read_compress_length(data_in);
+
+	tot_in = LZO_LEN;
+	in_offset = LZO_LEN;
+	tot_len = min_t(size_t, srclen, tot_len);
+	in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
+
+	tot_out = 0;
+	pg_offset = 0;
+
+	while (tot_in < tot_len) {
+		in_len = read_compress_length(data_in + in_offset);
+		in_page_bytes_left -= LZO_LEN;
+		in_offset += LZO_LEN;
+		tot_in += LZO_LEN;
+
+		tot_in += in_len;
+		working_bytes = in_len;
+		may_late_unmap = need_unmap = false;
+
+		/* fast path: avoid using the working buffer */
+		if (in_page_bytes_left >= in_len) {
+			buf = data_in + in_offset;
+			bytes = in_len;
+			may_late_unmap = true;
+			goto cont;
+		}
+
+		/* copy bytes from the pages into the working buffer */
+		buf = workspace->cbuf;
+		buf_offset = 0;
+		while (working_bytes) {
+			bytes = min(working_bytes, in_page_bytes_left);
+
+			memcpy(buf + buf_offset, data_in + in_offset, bytes);
+			buf_offset += bytes;
+cont:
+			working_bytes -= bytes;
+			in_page_bytes_left -= bytes;
+			in_offset += bytes;
+
+			/* check if we need to pick another page */
+			if ((working_bytes == 0 && in_page_bytes_left < LZO_LEN)
+			    || in_page_bytes_left == 0) {
+				tot_in += in_page_bytes_left;
+
+				if (working_bytes == 0 && tot_in >= tot_len)
+					break;
+
+				if (page_in_index + 1 >= total_pages_in) {
+					ret = -1;
+					goto done;
+				}
+
+				if (may_late_unmap)
+					need_unmap = true;
+				else
+					kunmap(pages_in[page_in_index]);
+
+				data_in = kmap(pages_in[++page_in_index]);
+
+				in_page_bytes_left = PAGE_CACHE_SIZE;
+				in_offset = 0;
+			}
+		}
+
+		out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
+		ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
+					    &out_len);
+		if (need_unmap)
+			kunmap(pages_in[page_in_index - 1]);
+		if (ret != LZO_E_OK) {
+			printk(KERN_WARNING "btrfs decompress failed\n");
+			ret = -1;
+			break;
+		}
+
+		buf_start = tot_out;
+		tot_out += out_len;
+
+		ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
+						 tot_out, disk_start,
+						 bvec, vcnt,
+						 &page_out_index, &pg_offset);
+		if (ret2 == 0)
+			break;
+	}
+done:
+	kunmap(pages_in[page_in_index]);
+	return ret;
+}
+
+static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
+			  struct page *dest_page,
+			  unsigned long start_byte,
+			  size_t srclen, size_t destlen)
+{
+	struct workspace *workspace = list_entry(ws, struct workspace, list);
+	size_t in_len;
+	size_t out_len;
+	size_t tot_len;
+	int ret = 0;
+	char *kaddr;
+	unsigned long bytes;
+
+	BUG_ON(srclen < LZO_LEN);
+
+	tot_len = read_compress_length(data_in);
+	data_in += LZO_LEN;
+
+	in_len = read_compress_length(data_in);
+	data_in += LZO_LEN;
+
+	out_len = PAGE_CACHE_SIZE;
+	ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
+	if (ret != LZO_E_OK) {
+		printk(KERN_WARNING "btrfs decompress failed!\n");
+		ret = -1;
+		goto out;
+	}
+
+	if (out_len < start_byte) {
+		ret = -1;
+		goto out;
+	}
+
+	bytes = min_t(unsigned long, destlen, out_len - start_byte);
+
+	kaddr = kmap_atomic(dest_page, KM_USER0);
+	memcpy(kaddr, workspace->buf + start_byte, bytes);
+	kunmap_atomic(kaddr, KM_USER0);
+out:
+	return ret;
+}
+
+struct btrfs_compress_op btrfs_lzo_compress = {
+	.alloc_workspace	= lzo_alloc_workspace,
+	.free_workspace		= lzo_free_workspace,
+	.compress_pages		= lzo_compress_pages,
+	.decompress_biovec	= lzo_decompress_biovec,
+	.decompress		= lzo_decompress,
+};
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index ae7737e..083a554 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -141,7 +141,7 @@
 					  u64 file_offset)
 {
 	struct rb_root *root = &tree->tree;
-	struct rb_node *prev;
+	struct rb_node *prev = NULL;
 	struct rb_node *ret;
 	struct btrfs_ordered_extent *entry;
 
@@ -172,7 +172,7 @@
  */
 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 				      u64 start, u64 len, u64 disk_len,
-				      int type, int dio)
+				      int type, int dio, int compress_type)
 {
 	struct btrfs_ordered_inode_tree *tree;
 	struct rb_node *node;
@@ -189,6 +189,7 @@
 	entry->disk_len = disk_len;
 	entry->bytes_left = len;
 	entry->inode = inode;
+	entry->compress_type = compress_type;
 	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
 		set_bit(type, &entry->flags);
 
@@ -220,14 +221,25 @@
 			     u64 start, u64 len, u64 disk_len, int type)
 {
 	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
-					  disk_len, type, 0);
+					  disk_len, type, 0,
+					  BTRFS_COMPRESS_NONE);
 }
 
 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
 				 u64 start, u64 len, u64 disk_len, int type)
 {
 	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
-					  disk_len, type, 1);
+					  disk_len, type, 1,
+					  BTRFS_COMPRESS_NONE);
+}
+
+int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
+				      u64 start, u64 len, u64 disk_len,
+				      int type, int compress_type)
+{
+	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
+					  disk_len, type, 0,
+					  compress_type);
 }
 
 /*
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 61dca83..ff1f69a 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -68,7 +68,7 @@
 
 #define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */
 
-#define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */
+#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */
 
 #define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */
 
@@ -93,6 +93,9 @@
 	/* flags (described above) */
 	unsigned long flags;
 
+	/* compression algorithm */
+	int compress_type;
+
 	/* reference count */
 	atomic_t refs;
 
@@ -148,6 +151,9 @@
 			     u64 start, u64 len, u64 disk_len, int type);
 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
 				 u64 start, u64 len, u64 disk_len, int type);
+int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
+				      u64 start, u64 len, u64 disk_len,
+				      int type, int compress_type);
 int btrfs_add_ordered_sum(struct inode *inode,
 			  struct btrfs_ordered_extent *entry,
 			  struct btrfs_ordered_sum *sum);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 0d126be..fb2605d 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -260,6 +260,7 @@
 #else
 			BUG();
 #endif
+			break;
 		case BTRFS_BLOCK_GROUP_ITEM_KEY:
 			bi = btrfs_item_ptr(l, i,
 					    struct btrfs_block_group_item);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 045c9c2..31ade58 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1157,6 +1157,7 @@
 	new_node->bytenr = dest->node->start;
 	new_node->level = node->level;
 	new_node->lowest = node->lowest;
+	new_node->checked = 1;
 	new_node->root = dest;
 
 	if (!node->lowest) {
@@ -2028,6 +2029,7 @@
 
 	while (1) {
 		trans = btrfs_start_transaction(root, 0);
+		BUG_ON(IS_ERR(trans));
 		trans->block_rsv = rc->block_rsv;
 
 		ret = btrfs_block_rsv_check(trans, root, rc->block_rsv,
@@ -2147,6 +2149,12 @@
 	}
 
 	trans = btrfs_join_transaction(rc->extent_root, 1);
+	if (IS_ERR(trans)) {
+		if (!err)
+			btrfs_block_rsv_release(rc->extent_root,
+						rc->block_rsv, num_bytes);
+		return PTR_ERR(trans);
+	}
 
 	if (!err) {
 		if (num_bytes != rc->merging_rsv_size) {
@@ -3222,6 +3230,7 @@
 	trans = btrfs_join_transaction(root, 0);
 	if (IS_ERR(trans)) {
 		btrfs_free_path(path);
+		ret = PTR_ERR(trans);
 		goto out;
 	}
 
@@ -3628,6 +3637,7 @@
 	set_reloc_control(rc);
 
 	trans = btrfs_join_transaction(rc->extent_root, 1);
+	BUG_ON(IS_ERR(trans));
 	btrfs_commit_transaction(trans, rc->extent_root);
 	return 0;
 }
@@ -3644,6 +3654,7 @@
 	u32 item_size;
 	int ret;
 	int err = 0;
+	int progress = 0;
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -3656,8 +3667,10 @@
 	}
 
 	while (1) {
+		progress++;
 		trans = btrfs_start_transaction(rc->extent_root, 0);
-
+		BUG_ON(IS_ERR(trans));
+restart:
 		if (update_backref_cache(trans, &rc->backref_cache)) {
 			btrfs_end_transaction(trans, rc->extent_root);
 			continue;
@@ -3770,6 +3783,15 @@
 			}
 		}
 	}
+	if (trans && progress && err == -ENOSPC) {
+		ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+					      rc->block_group->flags);
+		if (ret == 0) {
+			err = 0;
+			progress = 0;
+			goto restart;
+		}
+	}
 
 	btrfs_release_path(rc->extent_root, path);
 	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
@@ -3804,7 +3826,10 @@
 
 	/* get rid of pinned extents */
 	trans = btrfs_join_transaction(rc->extent_root, 1);
-	btrfs_commit_transaction(trans, rc->extent_root);
+	if (IS_ERR(trans))
+		err = PTR_ERR(trans);
+	else
+		btrfs_commit_transaction(trans, rc->extent_root);
 out_free:
 	btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
 	btrfs_free_path(path);
@@ -4022,6 +4047,7 @@
 	int ret;
 
 	trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
+	BUG_ON(IS_ERR(trans));
 
 	memset(&root->root_item.drop_progress, 0,
 		sizeof(root->root_item.drop_progress));
@@ -4125,6 +4151,11 @@
 	set_reloc_control(rc);
 
 	trans = btrfs_join_transaction(rc->extent_root, 1);
+	if (IS_ERR(trans)) {
+		unset_reloc_control(rc);
+		err = PTR_ERR(trans);
+		goto out_free;
+	}
 
 	rc->merge_reloc_tree = 1;
 
@@ -4154,9 +4185,13 @@
 	unset_reloc_control(rc);
 
 	trans = btrfs_join_transaction(rc->extent_root, 1);
-	btrfs_commit_transaction(trans, rc->extent_root);
-out:
+	if (IS_ERR(trans))
+		err = PTR_ERR(trans);
+	else
+		btrfs_commit_transaction(trans, rc->extent_root);
+out_free:
 	kfree(rc);
+out:
 	while (!list_empty(&reloc_roots)) {
 		reloc_root = list_entry(reloc_roots.next,
 					struct btrfs_root, root_list);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 883c6fa..d39a989 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -54,6 +54,90 @@
 
 static const struct super_operations btrfs_super_ops;
 
+static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
+				      char nbuf[16])
+{
+	char *errstr = NULL;
+
+	switch (errno) {
+	case -EIO:
+		errstr = "IO failure";
+		break;
+	case -ENOMEM:
+		errstr = "Out of memory";
+		break;
+	case -EROFS:
+		errstr = "Readonly filesystem";
+		break;
+	default:
+		if (nbuf) {
+			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
+				errstr = nbuf;
+		}
+		break;
+	}
+
+	return errstr;
+}
+
+static void __save_error_info(struct btrfs_fs_info *fs_info)
+{
+	/*
+	 * today we only save the error info into ram.  Long term we'll
+	 * also send it down to the disk
+	 */
+	fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
+}
+
+/* NOTE:
+ *	We move write_super stuff at umount in order to avoid deadlock
+ *	for umount hold all lock.
+ */
+static void save_error_info(struct btrfs_fs_info *fs_info)
+{
+	__save_error_info(fs_info);
+}
+
+/* btrfs handle error by forcing the filesystem readonly */
+static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
+{
+	struct super_block *sb = fs_info->sb;
+
+	if (sb->s_flags & MS_RDONLY)
+		return;
+
+	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+		sb->s_flags |= MS_RDONLY;
+		printk(KERN_INFO "btrfs is forced readonly\n");
+	}
+}
+
+/*
+ * __btrfs_std_error decodes expected errors from the caller and
+ * invokes the approciate error response.
+ */
+void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
+		     unsigned int line, int errno)
+{
+	struct super_block *sb = fs_info->sb;
+	char nbuf[16];
+	const char *errstr;
+
+	/*
+	 * Special case: if the error is EROFS, and we're already
+	 * under MS_RDONLY, then it is safe here.
+	 */
+	if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
+		return;
+
+	errstr = btrfs_decode_error(fs_info, errno, nbuf);
+	printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
+		sb->s_id, function, line, errstr);
+	save_error_info(fs_info);
+
+	btrfs_handle_error(fs_info);
+}
+
 static void btrfs_put_super(struct super_block *sb)
 {
 	struct btrfs_root *root = btrfs_sb(sb);
@@ -69,9 +153,10 @@
 	Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum,
 	Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd,
 	Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
-	Opt_compress_force, Opt_notreelog, Opt_ratio, Opt_flushoncommit,
-	Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_err,
-	Opt_user_subvol_rm_allowed,
+	Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
+	Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
+	Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
+	Opt_enospc_debug, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -86,7 +171,9 @@
 	{Opt_alloc_start, "alloc_start=%s"},
 	{Opt_thread_pool, "thread_pool=%d"},
 	{Opt_compress, "compress"},
+	{Opt_compress_type, "compress=%s"},
 	{Opt_compress_force, "compress-force"},
+	{Opt_compress_force_type, "compress-force=%s"},
 	{Opt_ssd, "ssd"},
 	{Opt_ssd_spread, "ssd_spread"},
 	{Opt_nossd, "nossd"},
@@ -98,6 +185,7 @@
 	{Opt_space_cache, "space_cache"},
 	{Opt_clear_cache, "clear_cache"},
 	{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
+	{Opt_enospc_debug, "enospc_debug"},
 	{Opt_err, NULL},
 };
 
@@ -112,6 +200,8 @@
 	char *p, *num, *orig;
 	int intarg;
 	int ret = 0;
+	char *compress_type;
+	bool compress_force = false;
 
 	if (!options)
 		return 0;
@@ -154,14 +244,32 @@
 			btrfs_set_opt(info->mount_opt, NODATACOW);
 			btrfs_set_opt(info->mount_opt, NODATASUM);
 			break;
-		case Opt_compress:
-			printk(KERN_INFO "btrfs: use compression\n");
-			btrfs_set_opt(info->mount_opt, COMPRESS);
-			break;
 		case Opt_compress_force:
-			printk(KERN_INFO "btrfs: forcing compression\n");
-			btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
+		case Opt_compress_force_type:
+			compress_force = true;
+		case Opt_compress:
+		case Opt_compress_type:
+			if (token == Opt_compress ||
+			    token == Opt_compress_force ||
+			    strcmp(args[0].from, "zlib") == 0) {
+				compress_type = "zlib";
+				info->compress_type = BTRFS_COMPRESS_ZLIB;
+			} else if (strcmp(args[0].from, "lzo") == 0) {
+				compress_type = "lzo";
+				info->compress_type = BTRFS_COMPRESS_LZO;
+			} else {
+				ret = -EINVAL;
+				goto out;
+			}
+
 			btrfs_set_opt(info->mount_opt, COMPRESS);
+			if (compress_force) {
+				btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
+				pr_info("btrfs: force %s compression\n",
+					compress_type);
+			} else
+				pr_info("btrfs: use %s compression\n",
+					compress_type);
 			break;
 		case Opt_ssd:
 			printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
@@ -252,6 +360,9 @@
 		case Opt_user_subvol_rm_allowed:
 			btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
 			break;
+		case Opt_enospc_debug:
+			btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
+			break;
 		case Opt_err:
 			printk(KERN_INFO "btrfs: unrecognized mount option "
 			       "'%s'\n", p);
@@ -277,7 +388,7 @@
 		struct btrfs_fs_devices **fs_devices)
 {
 	substring_t args[MAX_OPT_ARGS];
-	char *opts, *p;
+	char *opts, *orig, *p;
 	int error = 0;
 	int intarg;
 
@@ -291,6 +402,7 @@
 	opts = kstrdup(options, GFP_KERNEL);
 	if (!opts)
 		return -ENOMEM;
+	orig = opts;
 
 	while ((p = strsep(&opts, ",")) != NULL) {
 		int token;
@@ -326,7 +438,7 @@
 	}
 
  out_free_opts:
-	kfree(opts);
+	kfree(orig);
  out:
 	/*
 	 * If no subvolume name is specified we use the default one.  Allocate
@@ -460,6 +572,7 @@
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 	sb->s_magic = BTRFS_SUPER_MAGIC;
 	sb->s_op = &btrfs_super_ops;
+	sb->s_d_op = &btrfs_dentry_operations;
 	sb->s_export_op = &btrfs_export_ops;
 	sb->s_xattr = btrfs_xattr_handlers;
 	sb->s_time_gran = 1;
@@ -516,6 +629,8 @@
 	btrfs_wait_ordered_extents(root, 0, 0);
 
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 	ret = btrfs_commit_transaction(trans, root);
 	return ret;
 }
@@ -654,6 +769,8 @@
 		}
 
 		btrfs_close_devices(fs_devices);
+		kfree(fs_info);
+		kfree(tree_root);
 	} else {
 		char b[BDEVNAME_SIZE];
 
@@ -752,6 +869,127 @@
 	return 0;
 }
 
+/*
+ * The helper to calc the free space on the devices that can be used to store
+ * file data.
+ */
+static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
+{
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_device_info *devices_info;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+	struct btrfs_device *device;
+	u64 skip_space;
+	u64 type;
+	u64 avail_space;
+	u64 used_space;
+	u64 min_stripe_size;
+	int min_stripes = 1;
+	int i = 0, nr_devices;
+	int ret;
+
+	nr_devices = fs_info->fs_devices->rw_devices;
+	BUG_ON(!nr_devices);
+
+	devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
+			       GFP_NOFS);
+	if (!devices_info)
+		return -ENOMEM;
+
+	/* calc min stripe number for data space alloction */
+	type = btrfs_get_alloc_profile(root, 1);
+	if (type & BTRFS_BLOCK_GROUP_RAID0)
+		min_stripes = 2;
+	else if (type & BTRFS_BLOCK_GROUP_RAID1)
+		min_stripes = 2;
+	else if (type & BTRFS_BLOCK_GROUP_RAID10)
+		min_stripes = 4;
+
+	if (type & BTRFS_BLOCK_GROUP_DUP)
+		min_stripe_size = 2 * BTRFS_STRIPE_LEN;
+	else
+		min_stripe_size = BTRFS_STRIPE_LEN;
+
+	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
+		if (!device->in_fs_metadata)
+			continue;
+
+		avail_space = device->total_bytes - device->bytes_used;
+
+		/* align with stripe_len */
+		do_div(avail_space, BTRFS_STRIPE_LEN);
+		avail_space *= BTRFS_STRIPE_LEN;
+
+		/*
+		 * In order to avoid overwritting the superblock on the drive,
+		 * btrfs starts at an offset of at least 1MB when doing chunk
+		 * allocation.
+		 */
+		skip_space = 1024 * 1024;
+
+		/* user can set the offset in fs_info->alloc_start. */
+		if (fs_info->alloc_start + BTRFS_STRIPE_LEN <=
+		    device->total_bytes)
+			skip_space = max(fs_info->alloc_start, skip_space);
+
+		/*
+		 * btrfs can not use the free space in [0, skip_space - 1],
+		 * we must subtract it from the total. In order to implement
+		 * it, we account the used space in this range first.
+		 */
+		ret = btrfs_account_dev_extents_size(device, 0, skip_space - 1,
+						     &used_space);
+		if (ret) {
+			kfree(devices_info);
+			return ret;
+		}
+
+		/* calc the free space in [0, skip_space - 1] */
+		skip_space -= used_space;
+
+		/*
+		 * we can use the free space in [0, skip_space - 1], subtract
+		 * it from the total.
+		 */
+		if (avail_space && avail_space >= skip_space)
+			avail_space -= skip_space;
+		else
+			avail_space = 0;
+
+		if (avail_space < min_stripe_size)
+			continue;
+
+		devices_info[i].dev = device;
+		devices_info[i].max_avail = avail_space;
+
+		i++;
+	}
+
+	nr_devices = i;
+
+	btrfs_descending_sort_devices(devices_info, nr_devices);
+
+	i = nr_devices - 1;
+	avail_space = 0;
+	while (nr_devices >= min_stripes) {
+		if (devices_info[i].max_avail >= min_stripe_size) {
+			int j;
+			u64 alloc_size;
+
+			avail_space += devices_info[i].max_avail * min_stripes;
+			alloc_size = devices_info[i].max_avail;
+			for (j = i + 1 - min_stripes; j <= i; j++)
+				devices_info[j].max_avail -= alloc_size;
+		}
+		i--;
+		nr_devices--;
+	}
+
+	kfree(devices_info);
+	*free_bytes = avail_space;
+	return 0;
+}
+
 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct btrfs_root *root = btrfs_sb(dentry->d_sb);
@@ -759,17 +997,21 @@
 	struct list_head *head = &root->fs_info->space_info;
 	struct btrfs_space_info *found;
 	u64 total_used = 0;
-	u64 total_used_data = 0;
+	u64 total_free_data = 0;
 	int bits = dentry->d_sb->s_blocksize_bits;
 	__be32 *fsid = (__be32 *)root->fs_info->fsid;
+	int ret;
 
+	/* holding chunk_muext to avoid allocating new chunks */
+	mutex_lock(&root->fs_info->chunk_mutex);
 	rcu_read_lock();
 	list_for_each_entry_rcu(found, head, list) {
-		if (found->flags & (BTRFS_BLOCK_GROUP_METADATA |
-				    BTRFS_BLOCK_GROUP_SYSTEM))
-			total_used_data += found->disk_total;
-		else
-			total_used_data += found->disk_used;
+		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
+			total_free_data += found->disk_total - found->disk_used;
+			total_free_data -=
+				btrfs_account_ro_block_groups_free_space(found);
+		}
+
 		total_used += found->disk_used;
 	}
 	rcu_read_unlock();
@@ -777,9 +1019,17 @@
 	buf->f_namelen = BTRFS_NAME_LEN;
 	buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits;
 	buf->f_bfree = buf->f_blocks - (total_used >> bits);
-	buf->f_bavail = buf->f_blocks - (total_used_data >> bits);
 	buf->f_bsize = dentry->d_sb->s_blocksize;
 	buf->f_type = BTRFS_SUPER_MAGIC;
+	buf->f_bavail = total_free_data;
+	ret = btrfs_calc_avail_data_space(root, &total_free_data);
+	if (ret) {
+		mutex_unlock(&root->fs_info->chunk_mutex);
+		return ret;
+	}
+	buf->f_bavail += total_free_data;
+	buf->f_bavail = buf->f_bavail >> bits;
+	mutex_unlock(&root->fs_info->chunk_mutex);
 
 	/* We treat it as constant endianness (it doesn't matter _which_)
 	   because we want the fsid to come out the same whether mounted
@@ -896,10 +1146,14 @@
 	if (err)
 		return err;
 
-	err = btrfs_init_cachep();
+	err = btrfs_init_compress();
 	if (err)
 		goto free_sysfs;
 
+	err = btrfs_init_cachep();
+	if (err)
+		goto free_compress;
+
 	err = extent_io_init();
 	if (err)
 		goto free_cachep;
@@ -927,6 +1181,8 @@
 	extent_io_exit();
 free_cachep:
 	btrfs_destroy_cachep();
+free_compress:
+	btrfs_exit_compress();
 free_sysfs:
 	btrfs_exit_sysfs();
 	return err;
@@ -941,7 +1197,7 @@
 	unregister_filesystem(&btrfs_fs_type);
 	btrfs_exit_sysfs();
 	btrfs_cleanup_fs_uuids();
-	btrfs_zlib_exit();
+	btrfs_exit_compress();
 }
 
 module_init(init_btrfs_fs)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f50e931..3d73c8d 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -181,6 +181,9 @@
 	struct btrfs_trans_handle *h;
 	struct btrfs_transaction *cur_trans;
 	int ret;
+
+	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+		return ERR_PTR(-EROFS);
 again:
 	h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
 	if (!h)
@@ -910,6 +913,7 @@
 	u64 to_reserve = 0;
 	u64 index = 0;
 	u64 objectid;
+	u64 root_flags;
 
 	new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
 	if (!new_root_item) {
@@ -967,6 +971,13 @@
 	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
 	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
 
+	root_flags = btrfs_root_flags(new_root_item);
+	if (pending->readonly)
+		root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
+	else
+		root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
+	btrfs_set_root_flags(new_root_item, root_flags);
+
 	old = btrfs_lock_root_node(root);
 	btrfs_cow_block(trans, root, old, NULL, 0, &old);
 	btrfs_set_lock_blocking(old);
@@ -1150,6 +1161,11 @@
 	INIT_DELAYED_WORK(&ac->work, do_async_commit);
 	ac->root = root;
 	ac->newtrans = btrfs_join_transaction(root, 0);
+	if (IS_ERR(ac->newtrans)) {
+		int err = PTR_ERR(ac->newtrans);
+		kfree(ac);
+		return err;
+	}
 
 	/* take transaction reference */
 	mutex_lock(&root->fs_info->trans_mutex);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index f104b57..229a594 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -62,6 +62,7 @@
 	struct btrfs_block_rsv block_rsv;
 	/* extra metadata reseration for relocation */
 	int error;
+	bool readonly;
 	struct list_head list;
 };
 
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 054744a..a4bbb85 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -338,6 +338,12 @@
 		}
 		dst_copy = kmalloc(item_size, GFP_NOFS);
 		src_copy = kmalloc(item_size, GFP_NOFS);
+		if (!dst_copy || !src_copy) {
+			btrfs_release_path(root, path);
+			kfree(dst_copy);
+			kfree(src_copy);
+			return -ENOMEM;
+		}
 
 		read_extent_buffer(eb, src_copy, src_ptr, item_size);
 
@@ -665,6 +671,9 @@
 	btrfs_dir_item_key_to_cpu(leaf, di, &location);
 	name_len = btrfs_dir_name_len(leaf, di);
 	name = kmalloc(name_len, GFP_NOFS);
+	if (!name)
+		return -ENOMEM;
+
 	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
 	btrfs_release_path(root, path);
 
@@ -744,6 +753,9 @@
 	int match = 0;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
 	ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
 	if (ret != 0)
 		goto out;
@@ -967,6 +979,8 @@
 	key.offset = (u64)-1;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
 
 	while (1) {
 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -1178,6 +1192,9 @@
 
 	name_len = btrfs_dir_name_len(eb, di);
 	name = kmalloc(name_len, GFP_NOFS);
+	if (!name)
+		return -ENOMEM;
+
 	log_type = btrfs_dir_type(eb, di);
 	read_extent_buffer(eb, name, (unsigned long)(di + 1),
 		   name_len);
@@ -1692,6 +1709,8 @@
 		root_owner = btrfs_header_owner(parent);
 
 		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
+		if (!next)
+			return -ENOMEM;
 
 		if (*level == 1) {
 			wc->process_func(root, next, wc, ptr_gen);
@@ -2032,6 +2051,7 @@
 		wait_log_commit(trans, log_root_tree,
 				log_root_tree->log_transid);
 		mutex_unlock(&log_root_tree->log_mutex);
+		ret = 0;
 		goto out;
 	}
 	atomic_set(&log_root_tree->log_commit[index2], 1);
@@ -2096,7 +2116,7 @@
 	smp_mb();
 	if (waitqueue_active(&root->log_commit_wait[index1]))
 		wake_up(&root->log_commit_wait[index1]);
-	return 0;
+	return ret;
 }
 
 static void free_log_tree(struct btrfs_trans_handle *trans,
@@ -2194,6 +2214,9 @@
 
 	log = root->log_root;
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
 	di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
 				   name, name_len, -1);
 	if (IS_ERR(di)) {
@@ -2594,6 +2617,9 @@
 
 	ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
 			   nr * sizeof(u32), GFP_NOFS);
+	if (!ins_data)
+		return -ENOMEM;
+
 	ins_sizes = (u32 *)ins_data;
 	ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
 
@@ -2725,7 +2751,13 @@
 	log = root->log_root;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
 	dst_path = btrfs_alloc_path();
+	if (!dst_path) {
+		btrfs_free_path(path);
+		return -ENOMEM;
+	}
 
 	min_key.objectid = inode->i_ino;
 	min_key.type = BTRFS_INODE_ITEM_KEY;
@@ -3080,6 +3112,7 @@
 	BUG_ON(!path);
 
 	trans = btrfs_start_transaction(fs_info->tree_root, 0);
+	BUG_ON(IS_ERR(trans));
 
 	wc.trans = trans;
 	wc.pin = 1;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6b98845..dd13eb8 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -22,6 +22,7 @@
 #include <linux/blkdev.h>
 #include <linux/random.h>
 #include <linux/iocontext.h>
+#include <linux/capability.h>
 #include <asm/div64.h>
 #include "compat.h"
 #include "ctree.h"
@@ -493,7 +494,7 @@
 			continue;
 
 		if (device->bdev) {
-			close_bdev_exclusive(device->bdev, device->mode);
+			blkdev_put(device->bdev, device->mode);
 			device->bdev = NULL;
 			fs_devices->open_devices--;
 		}
@@ -527,7 +528,7 @@
 
 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
 		if (device->bdev) {
-			close_bdev_exclusive(device->bdev, device->mode);
+			blkdev_put(device->bdev, device->mode);
 			fs_devices->open_devices--;
 		}
 		if (device->writeable) {
@@ -584,13 +585,15 @@
 	int seeding = 1;
 	int ret = 0;
 
+	flags |= FMODE_EXCL;
+
 	list_for_each_entry(device, head, dev_list) {
 		if (device->bdev)
 			continue;
 		if (!device->name)
 			continue;
 
-		bdev = open_bdev_exclusive(device->name, flags, holder);
+		bdev = blkdev_get_by_path(device->name, flags, holder);
 		if (IS_ERR(bdev)) {
 			printk(KERN_INFO "open %s failed\n", device->name);
 			goto error;
@@ -598,8 +601,10 @@
 		set_blocksize(bdev, 4096);
 
 		bh = btrfs_read_dev_super(bdev);
-		if (!bh)
+		if (!bh) {
+			ret = -EINVAL;
 			goto error_close;
+		}
 
 		disk_super = (struct btrfs_super_block *)bh->b_data;
 		devid = btrfs_stack_device_id(&disk_super->dev_item);
@@ -642,7 +647,7 @@
 error_brelse:
 		brelse(bh);
 error_close:
-		close_bdev_exclusive(bdev, FMODE_READ);
+		blkdev_put(bdev, flags);
 error:
 		continue;
 	}
@@ -688,7 +693,8 @@
 
 	mutex_lock(&uuid_mutex);
 
-	bdev = open_bdev_exclusive(path, flags, holder);
+	flags |= FMODE_EXCL;
+	bdev = blkdev_get_by_path(path, flags, holder);
 
 	if (IS_ERR(bdev)) {
 		ret = PTR_ERR(bdev);
@@ -700,7 +706,7 @@
 		goto error_close;
 	bh = btrfs_read_dev_super(bdev);
 	if (!bh) {
-		ret = -EIO;
+		ret = -EINVAL;
 		goto error_close;
 	}
 	disk_super = (struct btrfs_super_block *)bh->b_data;
@@ -720,65 +726,48 @@
 
 	brelse(bh);
 error_close:
-	close_bdev_exclusive(bdev, flags);
+	blkdev_put(bdev, flags);
 error:
 	mutex_unlock(&uuid_mutex);
 	return ret;
 }
 
-/*
- * this uses a pretty simple search, the expectation is that it is
- * called very infrequently and that a given device has a small number
- * of extents
- */
-int find_free_dev_extent(struct btrfs_trans_handle *trans,
-			 struct btrfs_device *device, u64 num_bytes,
-			 u64 *start, u64 *max_avail)
+/* helper to account the used device space in the range */
+int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
+				   u64 end, u64 *length)
 {
 	struct btrfs_key key;
 	struct btrfs_root *root = device->dev_root;
-	struct btrfs_dev_extent *dev_extent = NULL;
+	struct btrfs_dev_extent *dev_extent;
 	struct btrfs_path *path;
-	u64 hole_size = 0;
-	u64 last_byte = 0;
-	u64 search_start = 0;
-	u64 search_end = device->total_bytes;
+	u64 extent_end;
 	int ret;
-	int slot = 0;
-	int start_found;
+	int slot;
 	struct extent_buffer *l;
 
+	*length = 0;
+
+	if (start >= device->total_bytes)
+		return 0;
+
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
 	path->reada = 2;
-	start_found = 0;
-
-	/* FIXME use last free of some kind */
-
-	/* we don't want to overwrite the superblock on the drive,
-	 * so we make sure to start at an offset of at least 1MB
-	 */
-	search_start = max((u64)1024 * 1024, search_start);
-
-	if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
-		search_start = max(root->fs_info->alloc_start, search_start);
 
 	key.objectid = device->devid;
-	key.offset = search_start;
+	key.offset = start;
 	key.type = BTRFS_DEV_EXTENT_KEY;
-	ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
+
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 	if (ret < 0)
-		goto error;
+		goto out;
 	if (ret > 0) {
 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
 		if (ret < 0)
-			goto error;
-		if (ret > 0)
-			start_found = 1;
+			goto out;
 	}
-	l = path->nodes[0];
-	btrfs_item_key_to_cpu(l, &key, path->slots[0]);
+
 	while (1) {
 		l = path->nodes[0];
 		slot = path->slots[0];
@@ -787,24 +776,9 @@
 			if (ret == 0)
 				continue;
 			if (ret < 0)
-				goto error;
-no_more_items:
-			if (!start_found) {
-				if (search_start >= search_end) {
-					ret = -ENOSPC;
-					goto error;
-				}
-				*start = search_start;
-				start_found = 1;
-				goto check_pending;
-			}
-			*start = last_byte > search_start ?
-				last_byte : search_start;
-			if (search_end <= *start) {
-				ret = -ENOSPC;
-				goto error;
-			}
-			goto check_pending;
+				goto out;
+
+			break;
 		}
 		btrfs_item_key_to_cpu(l, &key, slot);
 
@@ -812,48 +786,187 @@
 			goto next;
 
 		if (key.objectid > device->devid)
-			goto no_more_items;
+			break;
 
-		if (key.offset >= search_start && key.offset > last_byte &&
-		    start_found) {
-			if (last_byte < search_start)
-				last_byte = search_start;
-			hole_size = key.offset - last_byte;
-
-			if (hole_size > *max_avail)
-				*max_avail = hole_size;
-
-			if (key.offset > last_byte &&
-			    hole_size >= num_bytes) {
-				*start = last_byte;
-				goto check_pending;
-			}
-		}
 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
 			goto next;
 
-		start_found = 1;
 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
-		last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
+		extent_end = key.offset + btrfs_dev_extent_length(l,
+								  dev_extent);
+		if (key.offset <= start && extent_end > end) {
+			*length = end - start + 1;
+			break;
+		} else if (key.offset <= start && extent_end > start)
+			*length += extent_end - start;
+		else if (key.offset > start && extent_end <= end)
+			*length += extent_end - key.offset;
+		else if (key.offset > start && key.offset <= end) {
+			*length += end - key.offset + 1;
+			break;
+		} else if (key.offset > end)
+			break;
+
+next:
+		path->slots[0]++;
+	}
+	ret = 0;
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * find_free_dev_extent - find free space in the specified device
+ * @trans:	transaction handler
+ * @device:	the device which we search the free space in
+ * @num_bytes:	the size of the free space that we need
+ * @start:	store the start of the free space.
+ * @len:	the size of the free space. that we find, or the size of the max
+ * 		free space if we don't find suitable free space
+ *
+ * this uses a pretty simple search, the expectation is that it is
+ * called very infrequently and that a given device has a small number
+ * of extents
+ *
+ * @start is used to store the start of the free space if we find. But if we
+ * don't find suitable free space, it will be used to store the start position
+ * of the max free space.
+ *
+ * @len is used to store the size of the free space that we find.
+ * But if we don't find suitable free space, it is used to store the size of
+ * the max free space.
+ */
+int find_free_dev_extent(struct btrfs_trans_handle *trans,
+			 struct btrfs_device *device, u64 num_bytes,
+			 u64 *start, u64 *len)
+{
+	struct btrfs_key key;
+	struct btrfs_root *root = device->dev_root;
+	struct btrfs_dev_extent *dev_extent;
+	struct btrfs_path *path;
+	u64 hole_size;
+	u64 max_hole_start;
+	u64 max_hole_size;
+	u64 extent_end;
+	u64 search_start;
+	u64 search_end = device->total_bytes;
+	int ret;
+	int slot;
+	struct extent_buffer *l;
+
+	/* FIXME use last free of some kind */
+
+	/* we don't want to overwrite the superblock on the drive,
+	 * so we make sure to start at an offset of at least 1MB
+	 */
+	search_start = 1024 * 1024;
+
+	if (root->fs_info->alloc_start + num_bytes <= search_end)
+		search_start = max(root->fs_info->alloc_start, search_start);
+
+	max_hole_start = search_start;
+	max_hole_size = 0;
+
+	if (search_start >= search_end) {
+		ret = -ENOSPC;
+		goto error;
+	}
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	path->reada = 2;
+
+	key.objectid = device->devid;
+	key.offset = search_start;
+	key.type = BTRFS_DEV_EXTENT_KEY;
+
+	ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+	if (ret > 0) {
+		ret = btrfs_previous_item(root, path, key.objectid, key.type);
+		if (ret < 0)
+			goto out;
+	}
+
+	while (1) {
+		l = path->nodes[0];
+		slot = path->slots[0];
+		if (slot >= btrfs_header_nritems(l)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret == 0)
+				continue;
+			if (ret < 0)
+				goto out;
+
+			break;
+		}
+		btrfs_item_key_to_cpu(l, &key, slot);
+
+		if (key.objectid < device->devid)
+			goto next;
+
+		if (key.objectid > device->devid)
+			break;
+
+		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
+			goto next;
+
+		if (key.offset > search_start) {
+			hole_size = key.offset - search_start;
+
+			if (hole_size > max_hole_size) {
+				max_hole_start = search_start;
+				max_hole_size = hole_size;
+			}
+
+			/*
+			 * If this free space is greater than which we need,
+			 * it must be the max free space that we have found
+			 * until now, so max_hole_start must point to the start
+			 * of this free space and the length of this free space
+			 * is stored in max_hole_size. Thus, we return
+			 * max_hole_start and max_hole_size and go back to the
+			 * caller.
+			 */
+			if (hole_size >= num_bytes) {
+				ret = 0;
+				goto out;
+			}
+		}
+
+		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
+		extent_end = key.offset + btrfs_dev_extent_length(l,
+								  dev_extent);
+		if (extent_end > search_start)
+			search_start = extent_end;
 next:
 		path->slots[0]++;
 		cond_resched();
 	}
-check_pending:
-	/* we have to make sure we didn't find an extent that has already
-	 * been allocated by the map tree or the original allocation
-	 */
-	BUG_ON(*start < search_start);
 
-	if (*start + num_bytes > search_end) {
-		ret = -ENOSPC;
-		goto error;
+	hole_size = search_end- search_start;
+	if (hole_size > max_hole_size) {
+		max_hole_start = search_start;
+		max_hole_size = hole_size;
 	}
-	/* check for pending inserts here */
-	ret = 0;
 
-error:
+	/* See above. */
+	if (hole_size < num_bytes)
+		ret = -ENOSPC;
+	else
+		ret = 0;
+
+out:
 	btrfs_free_path(path);
+error:
+	*start = max_hole_start;
+	if (len)
+		*len = max_hole_size;
 	return ret;
 }
 
@@ -1100,6 +1213,10 @@
 		return -ENOMEM;
 
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans)) {
+		btrfs_free_path(path);
+		return PTR_ERR(trans);
+	}
 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
 	key.type = BTRFS_DEV_ITEM_KEY;
 	key.offset = device->devid;
@@ -1183,8 +1300,8 @@
 			goto out;
 		}
 	} else {
-		bdev = open_bdev_exclusive(device_path, FMODE_READ,
-				      root->fs_info->bdev_holder);
+		bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
+					  root->fs_info->bdev_holder);
 		if (IS_ERR(bdev)) {
 			ret = PTR_ERR(bdev);
 			goto out;
@@ -1193,7 +1310,7 @@
 		set_blocksize(bdev, 4096);
 		bh = btrfs_read_dev_super(bdev);
 		if (!bh) {
-			ret = -EIO;
+			ret = -EINVAL;
 			goto error_close;
 		}
 		disk_super = (struct btrfs_super_block *)bh->b_data;
@@ -1221,11 +1338,11 @@
 
 	ret = btrfs_shrink_device(device, 0);
 	if (ret)
-		goto error_brelse;
+		goto error_undo;
 
 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
 	if (ret)
-		goto error_brelse;
+		goto error_undo;
 
 	device->in_fs_metadata = 0;
 
@@ -1251,7 +1368,7 @@
 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
 
 	if (device->bdev) {
-		close_bdev_exclusive(device->bdev, device->mode);
+		blkdev_put(device->bdev, device->mode);
 		device->bdev = NULL;
 		device->fs_devices->open_devices--;
 	}
@@ -1294,11 +1411,18 @@
 	brelse(bh);
 error_close:
 	if (bdev)
-		close_bdev_exclusive(bdev, FMODE_READ);
+		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
 out:
 	mutex_unlock(&root->fs_info->volume_mutex);
 	mutex_unlock(&uuid_mutex);
 	return ret;
+error_undo:
+	if (device->writeable) {
+		list_add(&device->dev_alloc_list,
+			 &root->fs_info->fs_devices->alloc_list);
+		root->fs_info->fs_devices->rw_devices++;
+	}
+	goto error_brelse;
 }
 
 /*
@@ -1446,7 +1570,8 @@
 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
 		return -EINVAL;
 
-	bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
+	bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+				  root->fs_info->bdev_holder);
 	if (IS_ERR(bdev))
 		return PTR_ERR(bdev);
 
@@ -1487,11 +1612,19 @@
 
 	ret = find_next_devid(root, &device->devid);
 	if (ret) {
+		kfree(device->name);
 		kfree(device);
 		goto error;
 	}
 
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans)) {
+		kfree(device->name);
+		kfree(device);
+		ret = PTR_ERR(trans);
+		goto error;
+	}
+
 	lock_chunks(root);
 
 	device->writeable = 1;
@@ -1507,7 +1640,7 @@
 	device->dev_root = root->fs_info->dev_root;
 	device->bdev = bdev;
 	device->in_fs_metadata = 1;
-	device->mode = 0;
+	device->mode = FMODE_EXCL;
 	set_blocksize(device->bdev, 4096);
 
 	if (seeding_dev) {
@@ -1572,7 +1705,7 @@
 	mutex_unlock(&root->fs_info->volume_mutex);
 	return ret;
 error:
-	close_bdev_exclusive(bdev, 0);
+	blkdev_put(bdev, FMODE_EXCL);
 	if (seeding_dev) {
 		mutex_unlock(&uuid_mutex);
 		up_write(&sb->s_umount);
@@ -1759,7 +1892,7 @@
 		return ret;
 
 	trans = btrfs_start_transaction(root, 0);
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 
 	lock_chunks(root);
 
@@ -1912,6 +2045,9 @@
 	if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
 		return -EROFS;
 
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
 	mutex_lock(&dev_root->fs_info->volume_mutex);
 	dev_root = dev_root->fs_info->dev_root;
 
@@ -1930,7 +2066,7 @@
 		BUG_ON(ret);
 
 		trans = btrfs_start_transaction(dev_root, 0);
-		BUG_ON(!trans);
+		BUG_ON(IS_ERR(trans));
 
 		ret = btrfs_grow_device(trans, device, old_size);
 		BUG_ON(ret);
@@ -2096,6 +2232,11 @@
 
 	/* Shrinking succeeded, else we would be at "done". */
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans)) {
+		ret = PTR_ERR(trans);
+		goto done;
+	}
+
 	lock_chunks(root);
 
 	device->disk_total_bytes = new_size;
@@ -2150,66 +2291,67 @@
 		return calc_size * num_stripes;
 }
 
-static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *extent_root,
-			       struct map_lookup **map_ret,
-			       u64 *num_bytes, u64 *stripe_size,
-			       u64 start, u64 type)
+/* Used to sort the devices by max_avail(descending sort) */
+int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2)
 {
-	struct btrfs_fs_info *info = extent_root->fs_info;
-	struct btrfs_device *device = NULL;
-	struct btrfs_fs_devices *fs_devices = info->fs_devices;
-	struct list_head *cur;
-	struct map_lookup *map = NULL;
-	struct extent_map_tree *em_tree;
-	struct extent_map *em;
-	struct list_head private_devs;
-	int min_stripe_size = 1 * 1024 * 1024;
-	u64 calc_size = 1024 * 1024 * 1024;
-	u64 max_chunk_size = calc_size;
-	u64 min_free;
-	u64 avail;
-	u64 max_avail = 0;
-	u64 dev_offset;
-	int num_stripes = 1;
-	int min_stripes = 1;
-	int sub_stripes = 0;
-	int looped = 0;
-	int ret;
-	int index;
-	int stripe_len = 64 * 1024;
+	if (((struct btrfs_device_info *)dev_info1)->max_avail >
+	    ((struct btrfs_device_info *)dev_info2)->max_avail)
+		return -1;
+	else if (((struct btrfs_device_info *)dev_info1)->max_avail <
+		 ((struct btrfs_device_info *)dev_info2)->max_avail)
+		return 1;
+	else
+		return 0;
+}
 
-	if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
-	    (type & BTRFS_BLOCK_GROUP_DUP)) {
-		WARN_ON(1);
-		type &= ~BTRFS_BLOCK_GROUP_DUP;
-	}
-	if (list_empty(&fs_devices->alloc_list))
-		return -ENOSPC;
+static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type,
+				 int *num_stripes, int *min_stripes,
+				 int *sub_stripes)
+{
+	*num_stripes = 1;
+	*min_stripes = 1;
+	*sub_stripes = 0;
 
 	if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
-		num_stripes = fs_devices->rw_devices;
-		min_stripes = 2;
+		*num_stripes = fs_devices->rw_devices;
+		*min_stripes = 2;
 	}
 	if (type & (BTRFS_BLOCK_GROUP_DUP)) {
-		num_stripes = 2;
-		min_stripes = 2;
+		*num_stripes = 2;
+		*min_stripes = 2;
 	}
 	if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
 		if (fs_devices->rw_devices < 2)
 			return -ENOSPC;
-		num_stripes = 2;
-		min_stripes = 2;
+		*num_stripes = 2;
+		*min_stripes = 2;
 	}
 	if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
-		num_stripes = fs_devices->rw_devices;
-		if (num_stripes < 4)
+		*num_stripes = fs_devices->rw_devices;
+		if (*num_stripes < 4)
 			return -ENOSPC;
-		num_stripes &= ~(u32)1;
-		sub_stripes = 2;
-		min_stripes = 4;
+		*num_stripes &= ~(u32)1;
+		*sub_stripes = 2;
+		*min_stripes = 4;
 	}
 
+	return 0;
+}
+
+static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices,
+				    u64 proposed_size, u64 type,
+				    int num_stripes, int small_stripe)
+{
+	int min_stripe_size = 1 * 1024 * 1024;
+	u64 calc_size = proposed_size;
+	u64 max_chunk_size = calc_size;
+	int ncopies = 1;
+
+	if (type & (BTRFS_BLOCK_GROUP_RAID1 |
+		    BTRFS_BLOCK_GROUP_DUP |
+		    BTRFS_BLOCK_GROUP_RAID10))
+		ncopies = 2;
+
 	if (type & BTRFS_BLOCK_GROUP_DATA) {
 		max_chunk_size = 10 * calc_size;
 		min_stripe_size = 64 * 1024 * 1024;
@@ -2226,51 +2368,209 @@
 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
 			     max_chunk_size);
 
-again:
-	max_avail = 0;
-	if (!map || map->num_stripes != num_stripes) {
-		kfree(map);
-		map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
-		if (!map)
-			return -ENOMEM;
-		map->num_stripes = num_stripes;
-	}
-
-	if (calc_size * num_stripes > max_chunk_size) {
-		calc_size = max_chunk_size;
+	if (calc_size * num_stripes > max_chunk_size * ncopies) {
+		calc_size = max_chunk_size * ncopies;
 		do_div(calc_size, num_stripes);
-		do_div(calc_size, stripe_len);
-		calc_size *= stripe_len;
+		do_div(calc_size, BTRFS_STRIPE_LEN);
+		calc_size *= BTRFS_STRIPE_LEN;
 	}
 
 	/* we don't want tiny stripes */
-	if (!looped)
+	if (!small_stripe)
 		calc_size = max_t(u64, min_stripe_size, calc_size);
 
 	/*
-	 * we're about to do_div by the stripe_len so lets make sure
+	 * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure
 	 * we end up with something bigger than a stripe
 	 */
-	calc_size = max_t(u64, calc_size, stripe_len * 4);
+	calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN);
 
-	do_div(calc_size, stripe_len);
-	calc_size *= stripe_len;
+	do_div(calc_size, BTRFS_STRIPE_LEN);
+	calc_size *= BTRFS_STRIPE_LEN;
+
+	return calc_size;
+}
+
+static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map,
+						      int num_stripes)
+{
+	struct map_lookup *new;
+	size_t len = map_lookup_size(num_stripes);
+
+	BUG_ON(map->num_stripes < num_stripes);
+
+	if (map->num_stripes == num_stripes)
+		return map;
+
+	new = kmalloc(len, GFP_NOFS);
+	if (!new) {
+		/* just change map->num_stripes */
+		map->num_stripes = num_stripes;
+		return map;
+	}
+
+	memcpy(new, map, len);
+	new->num_stripes = num_stripes;
+	kfree(map);
+	return new;
+}
+
+/*
+ * helper to allocate device space from btrfs_device_info, in which we stored
+ * max free space information of every device. It is used when we can not
+ * allocate chunks by default size.
+ *
+ * By this helper, we can allocate a new chunk as larger as possible.
+ */
+static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans,
+				    struct btrfs_fs_devices *fs_devices,
+				    struct btrfs_device_info *devices,
+				    int nr_device, u64 type,
+				    struct map_lookup **map_lookup,
+				    int min_stripes, u64 *stripe_size)
+{
+	int i, index, sort_again = 0;
+	int min_devices = min_stripes;
+	u64 max_avail, min_free;
+	struct map_lookup *map = *map_lookup;
+	int ret;
+
+	if (nr_device < min_stripes)
+		return -ENOSPC;
+
+	btrfs_descending_sort_devices(devices, nr_device);
+
+	max_avail = devices[0].max_avail;
+	if (!max_avail)
+		return -ENOSPC;
+
+	for (i = 0; i < nr_device; i++) {
+		/*
+		 * if dev_offset = 0, it means the free space of this device
+		 * is less than what we need, and we didn't search max avail
+		 * extent on this device, so do it now.
+		 */
+		if (!devices[i].dev_offset) {
+			ret = find_free_dev_extent(trans, devices[i].dev,
+						   max_avail,
+						   &devices[i].dev_offset,
+						   &devices[i].max_avail);
+			if (ret != 0 && ret != -ENOSPC)
+				return ret;
+			sort_again = 1;
+		}
+	}
+
+	/* we update the max avail free extent of each devices, sort again */
+	if (sort_again)
+		btrfs_descending_sort_devices(devices, nr_device);
+
+	if (type & BTRFS_BLOCK_GROUP_DUP)
+		min_devices = 1;
+
+	if (!devices[min_devices - 1].max_avail)
+		return -ENOSPC;
+
+	max_avail = devices[min_devices - 1].max_avail;
+	if (type & BTRFS_BLOCK_GROUP_DUP)
+		do_div(max_avail, 2);
+
+	max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type,
+					     min_stripes, 1);
+	if (type & BTRFS_BLOCK_GROUP_DUP)
+		min_free = max_avail * 2;
+	else
+		min_free = max_avail;
+
+	if (min_free > devices[min_devices - 1].max_avail)
+		return -ENOSPC;
+
+	map = __shrink_map_lookup_stripes(map, min_stripes);
+	*stripe_size = max_avail;
+
+	index = 0;
+	for (i = 0; i < min_stripes; i++) {
+		map->stripes[i].dev = devices[index].dev;
+		map->stripes[i].physical = devices[index].dev_offset;
+		if (type & BTRFS_BLOCK_GROUP_DUP) {
+			i++;
+			map->stripes[i].dev = devices[index].dev;
+			map->stripes[i].physical = devices[index].dev_offset +
+						   max_avail;
+		}
+		index++;
+	}
+	*map_lookup = map;
+
+	return 0;
+}
+
+static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *extent_root,
+			       struct map_lookup **map_ret,
+			       u64 *num_bytes, u64 *stripe_size,
+			       u64 start, u64 type)
+{
+	struct btrfs_fs_info *info = extent_root->fs_info;
+	struct btrfs_device *device = NULL;
+	struct btrfs_fs_devices *fs_devices = info->fs_devices;
+	struct list_head *cur;
+	struct map_lookup *map;
+	struct extent_map_tree *em_tree;
+	struct extent_map *em;
+	struct btrfs_device_info *devices_info;
+	struct list_head private_devs;
+	u64 calc_size = 1024 * 1024 * 1024;
+	u64 min_free;
+	u64 avail;
+	u64 dev_offset;
+	int num_stripes;
+	int min_stripes;
+	int sub_stripes;
+	int min_devices;	/* the min number of devices we need */
+	int i;
+	int ret;
+	int index;
+
+	if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
+	    (type & BTRFS_BLOCK_GROUP_DUP)) {
+		WARN_ON(1);
+		type &= ~BTRFS_BLOCK_GROUP_DUP;
+	}
+	if (list_empty(&fs_devices->alloc_list))
+		return -ENOSPC;
+
+	ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes,
+				    &min_stripes, &sub_stripes);
+	if (ret)
+		return ret;
+
+	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
+			       GFP_NOFS);
+	if (!devices_info)
+		return -ENOMEM;
+
+	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
+	if (!map) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	map->num_stripes = num_stripes;
 
 	cur = fs_devices->alloc_list.next;
 	index = 0;
+	i = 0;
 
-	if (type & BTRFS_BLOCK_GROUP_DUP)
+	calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type,
+					     num_stripes, 0);
+
+	if (type & BTRFS_BLOCK_GROUP_DUP) {
 		min_free = calc_size * 2;
-	else
+		min_devices = 1;
+	} else {
 		min_free = calc_size;
-
-	/*
-	 * we add 1MB because we never use the first 1MB of the device, unless
-	 * we've looped, then we are likely allocating the maximum amount of
-	 * space left already
-	 */
-	if (!looped)
-		min_free += 1024 * 1024;
+		min_devices = min_stripes;
+	}
 
 	INIT_LIST_HEAD(&private_devs);
 	while (index < num_stripes) {
@@ -2283,27 +2583,39 @@
 		cur = cur->next;
 
 		if (device->in_fs_metadata && avail >= min_free) {
-			ret = find_free_dev_extent(trans, device,
-						   min_free, &dev_offset,
-						   &max_avail);
+			ret = find_free_dev_extent(trans, device, min_free,
+						   &devices_info[i].dev_offset,
+						   &devices_info[i].max_avail);
 			if (ret == 0) {
 				list_move_tail(&device->dev_alloc_list,
 					       &private_devs);
 				map->stripes[index].dev = device;
-				map->stripes[index].physical = dev_offset;
+				map->stripes[index].physical =
+						devices_info[i].dev_offset;
 				index++;
 				if (type & BTRFS_BLOCK_GROUP_DUP) {
 					map->stripes[index].dev = device;
 					map->stripes[index].physical =
-						dev_offset + calc_size;
+						devices_info[i].dev_offset +
+						calc_size;
 					index++;
 				}
-			}
-		} else if (device->in_fs_metadata && avail > max_avail)
-			max_avail = avail;
+			} else if (ret != -ENOSPC)
+				goto error;
+
+			devices_info[i].dev = device;
+			i++;
+		} else if (device->in_fs_metadata &&
+			   avail >= BTRFS_STRIPE_LEN) {
+			devices_info[i].dev = device;
+			devices_info[i].max_avail = avail;
+			i++;
+		}
+
 		if (cur == &fs_devices->alloc_list)
 			break;
 	}
+
 	list_splice(&private_devs, &fs_devices->alloc_list);
 	if (index < num_stripes) {
 		if (index >= min_stripes) {
@@ -2312,34 +2624,36 @@
 				num_stripes /= sub_stripes;
 				num_stripes *= sub_stripes;
 			}
-			looped = 1;
-			goto again;
+
+			map = __shrink_map_lookup_stripes(map, num_stripes);
+		} else if (i >= min_devices) {
+			ret = __btrfs_alloc_tiny_space(trans, fs_devices,
+						       devices_info, i, type,
+						       &map, min_stripes,
+						       &calc_size);
+			if (ret)
+				goto error;
+		} else {
+			ret = -ENOSPC;
+			goto error;
 		}
-		if (!looped && max_avail > 0) {
-			looped = 1;
-			calc_size = max_avail;
-			goto again;
-		}
-		kfree(map);
-		return -ENOSPC;
 	}
 	map->sector_size = extent_root->sectorsize;
-	map->stripe_len = stripe_len;
-	map->io_align = stripe_len;
-	map->io_width = stripe_len;
+	map->stripe_len = BTRFS_STRIPE_LEN;
+	map->io_align = BTRFS_STRIPE_LEN;
+	map->io_width = BTRFS_STRIPE_LEN;
 	map->type = type;
-	map->num_stripes = num_stripes;
 	map->sub_stripes = sub_stripes;
 
 	*map_ret = map;
 	*stripe_size = calc_size;
 	*num_bytes = chunk_bytes_by_type(type, calc_size,
-					 num_stripes, sub_stripes);
+					 map->num_stripes, sub_stripes);
 
 	em = alloc_extent_map(GFP_NOFS);
 	if (!em) {
-		kfree(map);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto error;
 	}
 	em->bdev = (struct block_device *)map;
 	em->start = start;
@@ -2372,7 +2686,13 @@
 		index++;
 	}
 
+	kfree(devices_info);
 	return 0;
+
+error:
+	kfree(map);
+	kfree(devices_info);
+	return ret;
 }
 
 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 2740db49..7fb59d4 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -20,8 +20,11 @@
 #define __BTRFS_VOLUMES_
 
 #include <linux/bio.h>
+#include <linux/sort.h>
 #include "async-thread.h"
 
+#define BTRFS_STRIPE_LEN	(64 * 1024)
+
 struct buffer_head;
 struct btrfs_pending_bios {
 	struct bio *head;
@@ -50,7 +53,7 @@
 
 	struct block_device *bdev;
 
-	/* the mode sent to open_bdev_exclusive */
+	/* the mode sent to blkdev_get */
 	fmode_t mode;
 
 	char *name;
@@ -136,6 +139,30 @@
 	struct btrfs_bio_stripe stripes[];
 };
 
+struct btrfs_device_info {
+	struct btrfs_device *dev;
+	u64 dev_offset;
+	u64 max_avail;
+};
+
+/* Used to sort the devices by max_avail(descending sort) */
+int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2);
+
+/*
+ * sort the devices by max_avail, in which max free extent size of each device
+ * is stored.(Descending Sort)
+ */
+static inline void btrfs_descending_sort_devices(
+					struct btrfs_device_info *devices,
+					size_t nr_devices)
+{
+	sort(devices, nr_devices, sizeof(struct btrfs_device_info),
+	     btrfs_cmp_device_free_bytes, NULL);
+}
+
+int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
+				   u64 end, u64 *length);
+
 #define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \
 			    (sizeof(struct btrfs_bio_stripe) * (n)))
 
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 698fdd2..a577653 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -316,6 +316,15 @@
 int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
 		   size_t size, int flags)
 {
+	struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
+
+	/*
+	 * The permission on security.* and system.* is not checked
+	 * in permission().
+	 */
+	if (btrfs_root_readonly(root))
+		return -EROFS;
+
 	/*
 	 * If this is a request for a synthetic attribute in the system.*
 	 * namespace use the generic infrastructure to resolve a handler
@@ -336,6 +345,15 @@
 
 int btrfs_removexattr(struct dentry *dentry, const char *name)
 {
+	struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
+
+	/*
+	 * The permission on security.* and system.* is not checked
+	 * in permission().
+	 */
+	if (btrfs_root_readonly(root))
+		return -EROFS;
+
 	/*
 	 * If this is a request for a synthetic attribute in the system.*
 	 * namespace use the generic infrastructure to resolve a handler
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index b9cd544..f5ec2d4 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -32,15 +32,6 @@
 #include <linux/bio.h>
 #include "compression.h"
 
-/* Plan: call deflate() with avail_in == *sourcelen,
-	avail_out = *dstlen - 12 and flush == Z_FINISH.
-	If it doesn't manage to finish,	call it again with
-	avail_in == 0 and avail_out set to the remaining 12
-	bytes for it to clean up.
-   Q: Is 12 bytes sufficient?
-*/
-#define STREAM_END_SPACE 12
-
 struct workspace {
 	z_stream inf_strm;
 	z_stream def_strm;
@@ -48,152 +39,51 @@
 	struct list_head list;
 };
 
-static LIST_HEAD(idle_workspace);
-static DEFINE_SPINLOCK(workspace_lock);
-static unsigned long num_workspace;
-static atomic_t alloc_workspace = ATOMIC_INIT(0);
-static DECLARE_WAIT_QUEUE_HEAD(workspace_wait);
-
-/*
- * this finds an available zlib workspace or allocates a new one
- * NULL or an ERR_PTR is returned if things go bad.
- */
-static struct workspace *find_zlib_workspace(void)
+static void zlib_free_workspace(struct list_head *ws)
 {
-	struct workspace *workspace;
-	int ret;
-	int cpus = num_online_cpus();
+	struct workspace *workspace = list_entry(ws, struct workspace, list);
 
-again:
-	spin_lock(&workspace_lock);
-	if (!list_empty(&idle_workspace)) {
-		workspace = list_entry(idle_workspace.next, struct workspace,
-				       list);
-		list_del(&workspace->list);
-		num_workspace--;
-		spin_unlock(&workspace_lock);
-		return workspace;
-
-	}
-	spin_unlock(&workspace_lock);
-	if (atomic_read(&alloc_workspace) > cpus) {
-		DEFINE_WAIT(wait);
-		prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
-		if (atomic_read(&alloc_workspace) > cpus)
-			schedule();
-		finish_wait(&workspace_wait, &wait);
-		goto again;
-	}
-	atomic_inc(&alloc_workspace);
-	workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
-	if (!workspace) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
-	if (!workspace->def_strm.workspace) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-	workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
-	if (!workspace->inf_strm.workspace) {
-		ret = -ENOMEM;
-		goto fail_inflate;
-	}
-	workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
-	if (!workspace->buf) {
-		ret = -ENOMEM;
-		goto fail_kmalloc;
-	}
-	return workspace;
-
-fail_kmalloc:
-	vfree(workspace->inf_strm.workspace);
-fail_inflate:
-	vfree(workspace->def_strm.workspace);
-fail:
-	kfree(workspace);
-	atomic_dec(&alloc_workspace);
-	wake_up(&workspace_wait);
-	return ERR_PTR(ret);
-}
-
-/*
- * put a workspace struct back on the list or free it if we have enough
- * idle ones sitting around
- */
-static int free_workspace(struct workspace *workspace)
-{
-	spin_lock(&workspace_lock);
-	if (num_workspace < num_online_cpus()) {
-		list_add_tail(&workspace->list, &idle_workspace);
-		num_workspace++;
-		spin_unlock(&workspace_lock);
-		if (waitqueue_active(&workspace_wait))
-			wake_up(&workspace_wait);
-		return 0;
-	}
-	spin_unlock(&workspace_lock);
 	vfree(workspace->def_strm.workspace);
 	vfree(workspace->inf_strm.workspace);
 	kfree(workspace->buf);
 	kfree(workspace);
-
-	atomic_dec(&alloc_workspace);
-	if (waitqueue_active(&workspace_wait))
-		wake_up(&workspace_wait);
-	return 0;
 }
 
-/*
- * cleanup function for module exit
- */
-static void free_workspaces(void)
+static struct list_head *zlib_alloc_workspace(void)
 {
 	struct workspace *workspace;
-	while (!list_empty(&idle_workspace)) {
-		workspace = list_entry(idle_workspace.next, struct workspace,
-				       list);
-		list_del(&workspace->list);
-		vfree(workspace->def_strm.workspace);
-		vfree(workspace->inf_strm.workspace);
-		kfree(workspace->buf);
-		kfree(workspace);
-		atomic_dec(&alloc_workspace);
-	}
+
+	workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
+	if (!workspace)
+		return ERR_PTR(-ENOMEM);
+
+	workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
+	workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
+	workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+	if (!workspace->def_strm.workspace ||
+	    !workspace->inf_strm.workspace || !workspace->buf)
+		goto fail;
+
+	INIT_LIST_HEAD(&workspace->list);
+
+	return &workspace->list;
+fail:
+	zlib_free_workspace(&workspace->list);
+	return ERR_PTR(-ENOMEM);
 }
 
-/*
- * given an address space and start/len, compress the bytes.
- *
- * pages are allocated to hold the compressed result and stored
- * in 'pages'
- *
- * out_pages is used to return the number of pages allocated.  There
- * may be pages allocated even if we return an error
- *
- * total_in is used to return the number of bytes actually read.  It
- * may be smaller then len if we had to exit early because we
- * ran out of room in the pages array or because we cross the
- * max_out threshold.
- *
- * total_out is used to return the total number of compressed bytes
- *
- * max_out tells us the max number of bytes that we're allowed to
- * stuff into pages
- */
-int btrfs_zlib_compress_pages(struct address_space *mapping,
-			      u64 start, unsigned long len,
-			      struct page **pages,
-			      unsigned long nr_dest_pages,
-			      unsigned long *out_pages,
-			      unsigned long *total_in,
-			      unsigned long *total_out,
-			      unsigned long max_out)
+static int zlib_compress_pages(struct list_head *ws,
+			       struct address_space *mapping,
+			       u64 start, unsigned long len,
+			       struct page **pages,
+			       unsigned long nr_dest_pages,
+			       unsigned long *out_pages,
+			       unsigned long *total_in,
+			       unsigned long *total_out,
+			       unsigned long max_out)
 {
+	struct workspace *workspace = list_entry(ws, struct workspace, list);
 	int ret;
-	struct workspace *workspace;
 	char *data_in;
 	char *cpage_out;
 	int nr_pages = 0;
@@ -205,10 +95,6 @@
 	*total_out = 0;
 	*total_in = 0;
 
-	workspace = find_zlib_workspace();
-	if (IS_ERR(workspace))
-		return -1;
-
 	if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
 		printk(KERN_WARNING "deflateInit failed\n");
 		ret = -1;
@@ -222,6 +108,10 @@
 	data_in = kmap(in_page);
 
 	out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+	if (out_page == NULL) {
+		ret = -1;
+		goto out;
+	}
 	cpage_out = kmap(out_page);
 	pages[0] = out_page;
 	nr_pages = 1;
@@ -260,6 +150,10 @@
 				goto out;
 			}
 			out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+			if (out_page == NULL) {
+				ret = -1;
+				goto out;
+			}
 			cpage_out = kmap(out_page);
 			pages[nr_pages] = out_page;
 			nr_pages++;
@@ -314,55 +208,26 @@
 		kunmap(in_page);
 		page_cache_release(in_page);
 	}
-	free_workspace(workspace);
 	return ret;
 }
 
-/*
- * pages_in is an array of pages with compressed data.
- *
- * disk_start is the starting logical offset of this array in the file
- *
- * bvec is a bio_vec of pages from the file that we want to decompress into
- *
- * vcnt is the count of pages in the biovec
- *
- * srclen is the number of bytes in pages_in
- *
- * The basic idea is that we have a bio that was created by readpages.
- * The pages in the bio are for the uncompressed data, and they may not
- * be contiguous.  They all correspond to the range of bytes covered by
- * the compressed extent.
- */
-int btrfs_zlib_decompress_biovec(struct page **pages_in,
-			      u64 disk_start,
-			      struct bio_vec *bvec,
-			      int vcnt,
-			      size_t srclen)
+static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
+				  u64 disk_start,
+				  struct bio_vec *bvec,
+				  int vcnt,
+				  size_t srclen)
 {
-	int ret = 0;
+	struct workspace *workspace = list_entry(ws, struct workspace, list);
+	int ret = 0, ret2;
 	int wbits = MAX_WBITS;
-	struct workspace *workspace;
 	char *data_in;
 	size_t total_out = 0;
-	unsigned long page_bytes_left;
 	unsigned long page_in_index = 0;
 	unsigned long page_out_index = 0;
-	struct page *page_out;
 	unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
 					PAGE_CACHE_SIZE;
 	unsigned long buf_start;
-	unsigned long buf_offset;
-	unsigned long bytes;
-	unsigned long working_bytes;
 	unsigned long pg_offset;
-	unsigned long start_byte;
-	unsigned long current_buf_start;
-	char *kaddr;
-
-	workspace = find_zlib_workspace();
-	if (IS_ERR(workspace))
-		return -ENOMEM;
 
 	data_in = kmap(pages_in[page_in_index]);
 	workspace->inf_strm.next_in = data_in;
@@ -372,8 +237,6 @@
 	workspace->inf_strm.total_out = 0;
 	workspace->inf_strm.next_out = workspace->buf;
 	workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
-	page_out = bvec[page_out_index].bv_page;
-	page_bytes_left = PAGE_CACHE_SIZE;
 	pg_offset = 0;
 
 	/* If it's deflate, and it's got no preset dictionary, then
@@ -389,107 +252,29 @@
 
 	if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
 		printk(KERN_WARNING "inflateInit failed\n");
-		ret = -1;
-		goto out;
+		return -1;
 	}
 	while (workspace->inf_strm.total_in < srclen) {
 		ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
 		if (ret != Z_OK && ret != Z_STREAM_END)
 			break;
-		/*
-		 * buf start is the byte offset we're of the start of
-		 * our workspace buffer
-		 */
-		buf_start = total_out;
 
-		/* total_out is the last byte of the workspace buffer */
+		buf_start = total_out;
 		total_out = workspace->inf_strm.total_out;
 
-		working_bytes = total_out - buf_start;
-
-		/*
-		 * start byte is the first byte of the page we're currently
-		 * copying into relative to the start of the compressed data.
-		 */
-		start_byte = page_offset(page_out) - disk_start;
-
-		if (working_bytes == 0) {
-			/* we didn't make progress in this inflate
-			 * call, we're done
-			 */
-			if (ret != Z_STREAM_END)
-				ret = -1;
+		/* we didn't make progress in this inflate call, we're done */
+		if (buf_start == total_out)
 			break;
+
+		ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
+						 total_out, disk_start,
+						 bvec, vcnt,
+						 &page_out_index, &pg_offset);
+		if (ret2 == 0) {
+			ret = 0;
+			goto done;
 		}
 
-		/* we haven't yet hit data corresponding to this page */
-		if (total_out <= start_byte)
-			goto next;
-
-		/*
-		 * the start of the data we care about is offset into
-		 * the middle of our working buffer
-		 */
-		if (total_out > start_byte && buf_start < start_byte) {
-			buf_offset = start_byte - buf_start;
-			working_bytes -= buf_offset;
-		} else {
-			buf_offset = 0;
-		}
-		current_buf_start = buf_start;
-
-		/* copy bytes from the working buffer into the pages */
-		while (working_bytes > 0) {
-			bytes = min(PAGE_CACHE_SIZE - pg_offset,
-				    PAGE_CACHE_SIZE - buf_offset);
-			bytes = min(bytes, working_bytes);
-			kaddr = kmap_atomic(page_out, KM_USER0);
-			memcpy(kaddr + pg_offset, workspace->buf + buf_offset,
-			       bytes);
-			kunmap_atomic(kaddr, KM_USER0);
-			flush_dcache_page(page_out);
-
-			pg_offset += bytes;
-			page_bytes_left -= bytes;
-			buf_offset += bytes;
-			working_bytes -= bytes;
-			current_buf_start += bytes;
-
-			/* check if we need to pick another page */
-			if (page_bytes_left == 0) {
-				page_out_index++;
-				if (page_out_index >= vcnt) {
-					ret = 0;
-					goto done;
-				}
-
-				page_out = bvec[page_out_index].bv_page;
-				pg_offset = 0;
-				page_bytes_left = PAGE_CACHE_SIZE;
-				start_byte = page_offset(page_out) - disk_start;
-
-				/*
-				 * make sure our new page is covered by this
-				 * working buffer
-				 */
-				if (total_out <= start_byte)
-					goto next;
-
-				/* the next page in the biovec might not
-				 * be adjacent to the last page, but it
-				 * might still be found inside this working
-				 * buffer.  bump our offset pointer
-				 */
-				if (total_out > start_byte &&
-				    current_buf_start < start_byte) {
-					buf_offset = start_byte - buf_start;
-					working_bytes = total_out - start_byte;
-					current_buf_start = buf_start +
-						buf_offset;
-				}
-			}
-		}
-next:
 		workspace->inf_strm.next_out = workspace->buf;
 		workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
 
@@ -516,35 +301,21 @@
 	zlib_inflateEnd(&workspace->inf_strm);
 	if (data_in)
 		kunmap(pages_in[page_in_index]);
-out:
-	free_workspace(workspace);
 	return ret;
 }
 
-/*
- * a less complex decompression routine.  Our compressed data fits in a
- * single page, and we want to read a single page out of it.
- * start_byte tells us the offset into the compressed data we're interested in
- */
-int btrfs_zlib_decompress(unsigned char *data_in,
-			  struct page *dest_page,
-			  unsigned long start_byte,
-			  size_t srclen, size_t destlen)
+static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
+			   struct page *dest_page,
+			   unsigned long start_byte,
+			   size_t srclen, size_t destlen)
 {
+	struct workspace *workspace = list_entry(ws, struct workspace, list);
 	int ret = 0;
 	int wbits = MAX_WBITS;
-	struct workspace *workspace;
 	unsigned long bytes_left = destlen;
 	unsigned long total_out = 0;
 	char *kaddr;
 
-	if (destlen > PAGE_CACHE_SIZE)
-		return -ENOMEM;
-
-	workspace = find_zlib_workspace();
-	if (IS_ERR(workspace))
-		return -ENOMEM;
-
 	workspace->inf_strm.next_in = data_in;
 	workspace->inf_strm.avail_in = srclen;
 	workspace->inf_strm.total_in = 0;
@@ -565,8 +336,7 @@
 
 	if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
 		printk(KERN_WARNING "inflateInit failed\n");
-		ret = -1;
-		goto out;
+		return -1;
 	}
 
 	while (bytes_left > 0) {
@@ -616,12 +386,13 @@
 		ret = 0;
 
 	zlib_inflateEnd(&workspace->inf_strm);
-out:
-	free_workspace(workspace);
 	return ret;
 }
 
-void btrfs_zlib_exit(void)
-{
-    free_workspaces();
-}
+struct btrfs_compress_op btrfs_zlib_compress = {
+	.alloc_workspace	= zlib_alloc_workspace,
+	.free_workspace		= zlib_free_workspace,
+	.compress_pages		= zlib_compress_pages,
+	.decompress_biovec	= zlib_decompress_biovec,
+	.decompress		= zlib_decompress,
+};
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 9e6c4f2..bd35212 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -2,31 +2,10 @@
 # Makefile for CEPH filesystem.
 #
 
-ifneq ($(KERNELRELEASE),)
-
 obj-$(CONFIG_CEPH_FS) += ceph.o
 
-ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
+ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
 	export.o caps.o snap.o xattr.o \
 	mds_client.o mdsmap.o strings.o ceph_frag.o \
 	debugfs.o
 
-else
-#Otherwise we were called directly from the command
-# line; invoke the kernel build system.
-
-KERNELDIR ?= /lib/modules/$(shell uname -r)/build
-PWD := $(shell pwd)
-
-default: all
-
-all:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules
-
-modules_install:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules_install
-
-clean:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) clean
-
-endif
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 60d27bc..6b61ded 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1560,9 +1560,10 @@
 		/* NOTE: no side-effects allowed, until we take s_mutex */
 
 		revoking = cap->implemented & ~cap->issued;
-		if (revoking)
-			dout(" mds%d revoking %s\n", cap->mds,
-			     ceph_cap_string(revoking));
+		dout(" mds%d cap %p issued %s implemented %s revoking %s\n",
+		     cap->mds, cap, ceph_cap_string(cap->issued),
+		     ceph_cap_string(cap->implemented),
+		     ceph_cap_string(revoking));
 
 		if (cap == ci->i_auth_cap &&
 		    (cap->issued & CEPH_CAP_FILE_WR)) {
@@ -1658,6 +1659,8 @@
 
 		if (cap == ci->i_auth_cap && ci->i_dirty_caps)
 			flushing = __mark_caps_flushing(inode, session);
+		else
+			flushing = 0;
 
 		mds = cap->mds;  /* remember mds, so we don't repeat */
 		sent++;
@@ -1940,6 +1943,35 @@
 	}
 }
 
+static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
+				     struct ceph_mds_session *session,
+				     struct inode *inode)
+{
+	struct ceph_inode_info *ci = ceph_inode(inode);
+	struct ceph_cap *cap;
+	int delayed = 0;
+
+	spin_lock(&inode->i_lock);
+	cap = ci->i_auth_cap;
+	dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
+	     ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
+	__ceph_flush_snaps(ci, &session, 1);
+	if (ci->i_flushing_caps) {
+		delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
+				     __ceph_caps_used(ci),
+				     __ceph_caps_wanted(ci),
+				     cap->issued | cap->implemented,
+				     ci->i_flushing_caps, NULL);
+		if (delayed) {
+			spin_lock(&inode->i_lock);
+			__cap_delay_requeue(mdsc, ci);
+			spin_unlock(&inode->i_lock);
+		}
+	} else {
+		spin_unlock(&inode->i_lock);
+	}
+}
+
 
 /*
  * Take references to capabilities we hold, so that we don't release
@@ -2687,7 +2719,7 @@
 	ceph_add_cap(inode, session, cap_id, -1,
 		     issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
 		     NULL /* no caps context */);
-	try_flush_caps(inode, session, NULL);
+	kick_flushing_inode_caps(mdsc, session, inode);
 	up_read(&mdsc->snap_rwsem);
 
 	/* make sure we re-request max_size, if necessary */
@@ -2785,8 +2817,7 @@
 	case CEPH_CAP_OP_IMPORT:
 		handle_cap_import(mdsc, inode, h, session,
 				  snaptrace, snaptrace_len);
-		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
-				session);
+		ceph_check_caps(ceph_inode(inode), 0, session);
 		goto done_unlocked;
 	}
 
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 7ae1b3d..08f65fa 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -60,10 +60,13 @@
 	for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) {
 		req = rb_entry(rp, struct ceph_mds_request, r_node);
 
-		if (req->r_request)
-			seq_printf(s, "%lld\tmds%d\t", req->r_tid, req->r_mds);
-		else
+		if (req->r_request && req->r_session)
+			seq_printf(s, "%lld\tmds%d\t", req->r_tid,
+				   req->r_session->s_mds);
+		else if (!req->r_request)
 			seq_printf(s, "%lld\t(no request)\t", req->r_tid);
+		else
+			seq_printf(s, "%lld\t(no session)\t", req->r_tid);
 
 		seq_printf(s, "%s", ceph_mds_op_name(req->r_op));
 
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index fa7ca04..ebafa65 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -409,7 +409,7 @@
 	spin_lock(&inode->i_lock);
 	if (ci->i_release_count == fi->dir_release_count) {
 		dout(" marking %p complete\n", inode);
-		ci->i_ceph_flags |= CEPH_I_COMPLETE;
+		/* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
 		ci->i_max_offset = filp->f_pos;
 	}
 	spin_unlock(&inode->i_lock);
@@ -496,6 +496,7 @@
 
 	/* .snap dir? */
 	if (err == -ENOENT &&
+	    ceph_snap(parent) == CEPH_NOSNAP &&
 	    strcmp(dentry->d_name.name,
 		   fsc->mount_options->snapdir_name) == 0) {
 		struct inode *inode = ceph_get_snapdir(parent);
@@ -992,7 +993,7 @@
 {
 	struct inode *dir;
 
-	if (nd->flags & LOOKUP_RCU)
+	if (nd && nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 
 	dir = dentry->d_parent->d_inode;
@@ -1029,28 +1030,8 @@
 static void ceph_dentry_release(struct dentry *dentry)
 {
 	struct ceph_dentry_info *di = ceph_dentry(dentry);
-	struct inode *parent_inode = NULL;
-	u64 snapid = CEPH_NOSNAP;
 
-	if (!IS_ROOT(dentry)) {
-		parent_inode = dentry->d_parent->d_inode;
-		if (parent_inode)
-			snapid = ceph_snap(parent_inode);
-	}
-	dout("dentry_release %p parent %p\n", dentry, parent_inode);
-	if (parent_inode && snapid != CEPH_SNAPDIR) {
-		struct ceph_inode_info *ci = ceph_inode(parent_inode);
-
-		spin_lock(&parent_inode->i_lock);
-		if (ci->i_shared_gen == di->lease_shared_gen ||
-		    snapid <= CEPH_MAXSNAP) {
-			dout(" clearing %p complete (d_release)\n",
-			     parent_inode);
-			ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
-			ci->i_release_count++;
-		}
-		spin_unlock(&parent_inode->i_lock);
-	}
+	dout("dentry_release %p\n", dentry);
 	if (di) {
 		ceph_dentry_lru_del(dentry);
 		if (di->lease_session)
@@ -1224,6 +1205,26 @@
 	}
 }
 
+/*
+ * Return name hash for a given dentry.  This is dependent on
+ * the parent directory's hash function.
+ */
+unsigned ceph_dentry_hash(struct dentry *dn)
+{
+	struct inode *dir = dn->d_parent->d_inode;
+	struct ceph_inode_info *dci = ceph_inode(dir);
+
+	switch (dci->i_dir_layout.dl_dir_hash) {
+	case 0:	/* for backward compat */
+	case CEPH_STR_HASH_LINUX:
+		return dn->d_name.hash;
+
+	default:
+		return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+				     dn->d_name.name, dn->d_name.len);
+	}
+}
+
 const struct file_operations ceph_dir_fops = {
 	.read = ceph_read_dir,
 	.readdir = ceph_readdir,
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 2297d94..e410561 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -59,7 +59,7 @@
 		dout("encode_fh %p connectable\n", dentry);
 		cfh->ino = ceph_ino(dentry->d_inode);
 		cfh->parent_ino = ceph_ino(parent->d_inode);
-		cfh->parent_name_hash = parent->d_name.hash;
+		cfh->parent_name_hash = ceph_dentry_hash(parent);
 		*max_len = connected_handle_length;
 		type = 2;
 	} else if (*max_len >= handle_length) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e61de4f..193bfa5 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -297,6 +297,8 @@
 	ci->i_release_count = 0;
 	ci->i_symlink = NULL;
 
+	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
+
 	ci->i_fragtree = RB_ROOT;
 	mutex_init(&ci->i_fragtree_mutex);
 
@@ -689,6 +691,8 @@
 		inode->i_op = &ceph_dir_iops;
 		inode->i_fop = &ceph_dir_fops;
 
+		ci->i_dir_layout = iinfo->dir_layout;
+
 		ci->i_files = le64_to_cpu(info->files);
 		ci->i_subdirs = le64_to_cpu(info->subdirs);
 		ci->i_rbytes = le64_to_cpu(info->rbytes);
@@ -703,13 +707,9 @@
 		    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
 		    (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
 			dout(" marking %p complete (empty)\n", inode);
-			ci->i_ceph_flags |= CEPH_I_COMPLETE;
+			/* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
 			ci->i_max_offset = 2;
 		}
-
-		/* it may be better to set st_size in getattr instead? */
-		if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
-			inode->i_size = ci->i_rbytes;
 		break;
 	default:
 		pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
@@ -1815,7 +1815,11 @@
 		else
 			stat->dev = 0;
 		if (S_ISDIR(inode->i_mode)) {
-			stat->size = ci->i_rbytes;
+			if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
+						RBYTES))
+				stat->size = ci->i_rbytes;
+			else
+				stat->size = ci->i_files + ci->i_subdirs;
 			stat->blocks = 0;
 			stat->blksize = 65536;
 		}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a50fca1..a1ee8fa 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -60,7 +60,8 @@
  * parse individual inode info
  */
 static int parse_reply_info_in(void **p, void *end,
-			       struct ceph_mds_reply_info_in *info)
+			       struct ceph_mds_reply_info_in *info,
+			       int features)
 {
 	int err = -EIO;
 
@@ -74,6 +75,12 @@
 	info->symlink = *p;
 	*p += info->symlink_len;
 
+	if (features & CEPH_FEATURE_DIRLAYOUTHASH)
+		ceph_decode_copy_safe(p, end, &info->dir_layout,
+				      sizeof(info->dir_layout), bad);
+	else
+		memset(&info->dir_layout, 0, sizeof(info->dir_layout));
+
 	ceph_decode_32_safe(p, end, info->xattr_len, bad);
 	ceph_decode_need(p, end, info->xattr_len, bad);
 	info->xattr_data = *p;
@@ -88,12 +95,13 @@
  * target inode.
  */
 static int parse_reply_info_trace(void **p, void *end,
-				  struct ceph_mds_reply_info_parsed *info)
+				  struct ceph_mds_reply_info_parsed *info,
+				  int features)
 {
 	int err;
 
 	if (info->head->is_dentry) {
-		err = parse_reply_info_in(p, end, &info->diri);
+		err = parse_reply_info_in(p, end, &info->diri, features);
 		if (err < 0)
 			goto out_bad;
 
@@ -114,7 +122,7 @@
 	}
 
 	if (info->head->is_target) {
-		err = parse_reply_info_in(p, end, &info->targeti);
+		err = parse_reply_info_in(p, end, &info->targeti, features);
 		if (err < 0)
 			goto out_bad;
 	}
@@ -134,7 +142,8 @@
  * parse readdir results
  */
 static int parse_reply_info_dir(void **p, void *end,
-				struct ceph_mds_reply_info_parsed *info)
+				struct ceph_mds_reply_info_parsed *info,
+				int features)
 {
 	u32 num, i = 0;
 	int err;
@@ -182,7 +191,7 @@
 		*p += sizeof(struct ceph_mds_reply_lease);
 
 		/* inode */
-		err = parse_reply_info_in(p, end, &info->dir_in[i]);
+		err = parse_reply_info_in(p, end, &info->dir_in[i], features);
 		if (err < 0)
 			goto out_bad;
 		i++;
@@ -205,7 +214,8 @@
  * parse fcntl F_GETLK results
  */
 static int parse_reply_info_filelock(void **p, void *end,
-                struct ceph_mds_reply_info_parsed *info)
+				     struct ceph_mds_reply_info_parsed *info,
+				     int features)
 {
 	if (*p + sizeof(*info->filelock_reply) > end)
 		goto bad;
@@ -225,19 +235,21 @@
  * parse extra results
  */
 static int parse_reply_info_extra(void **p, void *end,
-                struct ceph_mds_reply_info_parsed *info)
+				  struct ceph_mds_reply_info_parsed *info,
+				  int features)
 {
 	if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
-		return parse_reply_info_filelock(p, end, info);
+		return parse_reply_info_filelock(p, end, info, features);
 	else
-		return parse_reply_info_dir(p, end, info);
+		return parse_reply_info_dir(p, end, info, features);
 }
 
 /*
  * parse entire mds reply
  */
 static int parse_reply_info(struct ceph_msg *msg,
-			    struct ceph_mds_reply_info_parsed *info)
+			    struct ceph_mds_reply_info_parsed *info,
+			    int features)
 {
 	void *p, *end;
 	u32 len;
@@ -250,7 +262,7 @@
 	/* trace */
 	ceph_decode_32_safe(&p, end, len, bad);
 	if (len > 0) {
-		err = parse_reply_info_trace(&p, p+len, info);
+		err = parse_reply_info_trace(&p, p+len, info, features);
 		if (err < 0)
 			goto out_bad;
 	}
@@ -258,7 +270,7 @@
 	/* extra */
 	ceph_decode_32_safe(&p, end, len, bad);
 	if (len > 0) {
-		err = parse_reply_info_extra(&p, p+len, info);
+		err = parse_reply_info_extra(&p, p+len, info, features);
 		if (err < 0)
 			goto out_bad;
 	}
@@ -654,7 +666,7 @@
 		} else {
 			/* dir + name */
 			inode = dir;
-			hash = req->r_dentry->d_name.hash;
+			hash = ceph_dentry_hash(req->r_dentry);
 			is_hash = true;
 		}
 	}
@@ -681,9 +693,11 @@
 				dout("choose_mds %p %llx.%llx "
 				     "frag %u mds%d (%d/%d)\n",
 				     inode, ceph_vinop(inode),
-				     frag.frag, frag.mds,
+				     frag.frag, mds,
 				     (int)r, frag.ndist);
-				return mds;
+				if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
+				    CEPH_MDS_STATE_ACTIVE)
+					return mds;
 			}
 
 			/* since this file/dir wasn't known to be
@@ -696,7 +710,9 @@
 				dout("choose_mds %p %llx.%llx "
 				     "frag %u mds%d (auth)\n",
 				     inode, ceph_vinop(inode), frag.frag, mds);
-				return mds;
+				if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
+				    CEPH_MDS_STATE_ACTIVE)
+					return mds;
 			}
 		}
 	}
@@ -1693,7 +1709,6 @@
 	struct ceph_msg *msg;
 	int flags = 0;
 
-	req->r_mds = mds;
 	req->r_attempts++;
 	if (req->r_inode) {
 		struct ceph_cap *cap =
@@ -1780,6 +1795,8 @@
 		goto finish;
 	}
 
+	put_request_session(req);
+
 	mds = __choose_mds(mdsc, req);
 	if (mds < 0 ||
 	    ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
@@ -1797,6 +1814,8 @@
 			goto finish;
 		}
 	}
+	req->r_session = get_session(session);
+
 	dout("do_request mds%d session %p state %s\n", mds, session,
 	     session_state_name(session->s_state));
 	if (session->s_state != CEPH_MDS_SESSION_OPEN &&
@@ -1809,7 +1828,6 @@
 	}
 
 	/* send request */
-	req->r_session = get_session(session);
 	req->r_resend_mds = -1;   /* forget any previous mds hint */
 
 	if (req->r_request_started == 0)   /* note request start time */
@@ -1863,7 +1881,6 @@
 		if (req->r_session &&
 		    req->r_session->s_mds == mds) {
 			dout(" kicking tid %llu\n", req->r_tid);
-			put_request_session(req);
 			__do_request(mdsc, req);
 		}
 	}
@@ -2056,8 +2073,11 @@
 			goto out;
 		} else  {
 			struct ceph_inode_info *ci = ceph_inode(req->r_inode);
-			struct ceph_cap *cap =
-				ceph_get_cap_for_mds(ci, req->r_mds);;
+			struct ceph_cap *cap = NULL;
+
+			if (req->r_session)
+				cap = ceph_get_cap_for_mds(ci,
+						   req->r_session->s_mds);
 
 			dout("already using auth");
 			if ((!cap || cap != ci->i_auth_cap) ||
@@ -2101,7 +2121,7 @@
 
 	dout("handle_reply tid %lld result %d\n", tid, result);
 	rinfo = &req->r_reply_info;
-	err = parse_reply_info(msg, rinfo);
+	err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
 	mutex_unlock(&mdsc->mutex);
 
 	mutex_lock(&session->s_mutex);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index aabe563..4e3a9cc 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -35,6 +35,7 @@
  */
 struct ceph_mds_reply_info_in {
 	struct ceph_mds_reply_inode *in;
+	struct ceph_dir_layout dir_layout;
 	u32 symlink_len;
 	char *symlink;
 	u32 xattr_len;
@@ -165,7 +166,6 @@
 	struct ceph_mds_client *r_mdsc;
 
 	int r_op;                    /* mds op code */
-	int r_mds;
 
 	/* operation on what? */
 	struct inode *r_inode;              /* arg1 */
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 39c243a..f40b913 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -584,10 +584,14 @@
 	if (lastinode)
 		iput(lastinode);
 
-	dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino);
-	list_for_each_entry(child, &realm->children, child_item)
-		queue_realm_cap_snaps(child);
+	list_for_each_entry(child, &realm->children, child_item) {
+		dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
+		     realm, realm->ino, child, child->ino);
+		list_del_init(&child->dirty_item);
+		list_add(&child->dirty_item, &realm->dirty_item);
+	}
 
+	list_del_init(&realm->dirty_item);
 	dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
 }
 
@@ -683,7 +687,9 @@
 	 * queue cap snaps _after_ we've built the new snap contexts,
 	 * so that i_head_snapc can be set appropriately.
 	 */
-	list_for_each_entry(realm, &dirty_realms, dirty_item) {
+	while (!list_empty(&dirty_realms)) {
+		realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
+					 dirty_item);
 		queue_realm_cap_snaps(realm);
 	}
 
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 08b460a..9c50854 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -290,6 +290,8 @@
 
         fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
         fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
+	fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
+	fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
         fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
         fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
         fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
@@ -428,7 +430,8 @@
 		goto fail;
 	}
 	fsc->client->extra_mon_dispatch = extra_mon_dispatch;
-	fsc->client->supported_features |= CEPH_FEATURE_FLOCK;
+	fsc->client->supported_features |= CEPH_FEATURE_FLOCK |
+		CEPH_FEATURE_DIRLAYOUTHASH;
 	fsc->client->monc.want_mdsmap = 1;
 
 	fsc->mount_options = fsopt;
@@ -443,13 +446,17 @@
 		goto fail_client;
 
 	err = -ENOMEM;
-	fsc->wb_wq = create_workqueue("ceph-writeback");
+	/*
+	 * The number of concurrent works can be high but they don't need
+	 * to be processed in parallel, limit concurrency.
+	 */
+	fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
 	if (fsc->wb_wq == NULL)
 		goto fail_bdi;
-	fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
+	fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
 	if (fsc->pg_inv_wq == NULL)
 		goto fail_wb_wq;
-	fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc");
+	fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
 	if (fsc->trunc_wq == NULL)
 		goto fail_pg_inv_wq;
 
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 4553d88..20b907d 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -239,6 +239,7 @@
 	unsigned i_ceph_flags;
 	unsigned long i_release_count;
 
+	struct ceph_dir_layout i_dir_layout;
 	struct ceph_file_layout i_layout;
 	char *i_symlink;
 
@@ -768,6 +769,7 @@
 extern void ceph_dentry_lru_touch(struct dentry *dn);
 extern void ceph_dentry_lru_del(struct dentry *dn);
 extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
+extern unsigned ceph_dentry_hash(struct dentry *dn);
 
 /*
  * our d_ops vary depending on whether the inode is live,
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 6e12a6b..8c9eba6 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -219,6 +219,7 @@
 	struct rb_node **p;
 	struct rb_node *parent = NULL;
 	struct ceph_inode_xattr *xattr = NULL;
+	int name_len = strlen(name);
 	int c;
 
 	p = &ci->i_xattrs.index.rb_node;
@@ -226,6 +227,8 @@
 		parent = *p;
 		xattr = rb_entry(parent, struct ceph_inode_xattr, node);
 		c = strncmp(name, xattr->name, xattr->name_len);
+		if (c == 0 && name_len > xattr->name_len)
+			c = 1;
 		if (c < 0)
 			p = &(*p)->rb_left;
 		else if (c > 0)
diff --git a/fs/char_dev.c b/fs/char_dev.c
index e5b9df9..dca9e5e 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -59,7 +59,7 @@
 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
 
 /* index in the above */
-static inline int major_to_index(int major)
+static inline int major_to_index(unsigned major)
 {
 	return major % CHRDEV_MAJOR_HASH_SIZE;
 }
@@ -417,18 +417,6 @@
 	return ret;
 }
 
-int cdev_index(struct inode *inode)
-{
-	int idx;
-	struct kobject *kobj;
-
-	kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
-	if (!kobj)
-		return -1;
-	kobject_put(kobj);
-	return idx;
-}
-
 void cd_forget(struct inode *inode)
 {
 	spin_lock(&cdev_lock);
@@ -582,7 +570,6 @@
 EXPORT_SYMBOL(cdev_alloc);
 EXPORT_SYMBOL(cdev_del);
 EXPORT_SYMBOL(cdev_add);
-EXPORT_SYMBOL(cdev_index);
 EXPORT_SYMBOL(__register_chrdev);
 EXPORT_SYMBOL(__unregister_chrdev);
 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index ee45648..7cb0f7f 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -3,6 +3,7 @@
 	depends on INET
 	select NLS
 	select CRYPTO
+	select CRYPTO_MD4
 	select CRYPTO_MD5
 	select CRYPTO_HMAC
 	select CRYPTO_ARC4
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 43b19dd..d875584 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -5,7 +5,7 @@
 
 cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
 	  link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \
-	  md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
+	  cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
 	  readdir.o ioctl.o sess.o export.o
 
 cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
diff --git a/fs/cifs/README b/fs/cifs/README
index 46af99a..fe16835 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -452,6 +452,11 @@
 		if oplock (caching token) is granted and held. Note that
 		direct allows write operations larger than page size
 		to be sent to the server.
+  strictcache   Use for switching on strict cache mode. In this mode the
+		client read from the cache all the time it has Oplock Level II,
+		otherwise - read from the server. All written data are stored
+		in the cache, but if the client doesn't have Exclusive Oplock,
+		it writes the data to the server.
   acl   	Allow setfacl and getfacl to manage posix ACLs if server
 		supports them.  (default)
   noacl 	Do not allow setfacl and getfacl calls on this mount
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index ede9830..65829d3 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -79,11 +79,11 @@
 	spin_lock(&GlobalMid_Lock);
 	list_for_each(tmp, &server->pending_mid_q) {
 		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-		cERROR(1, "State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
+		cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d",
 			mid_entry->midState,
 			(int)mid_entry->command,
 			mid_entry->pid,
-			mid_entry->tsk,
+			mid_entry->callback_data,
 			mid_entry->mid);
 #ifdef CONFIG_CIFS_STATS2
 		cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
@@ -218,11 +218,11 @@
 				mid_entry = list_entry(tmp3, struct mid_q_entry,
 					qhead);
 				seq_printf(m, "\tState: %d com: %d pid:"
-						" %d tsk: %p mid %d\n",
+						" %d cbdata: %p mid %d\n",
 						mid_entry->midState,
 						(int)mid_entry->command,
 						mid_entry->pid,
-						mid_entry->tsk,
+						mid_entry->callback_data,
 						mid_entry->mid);
 			}
 			spin_unlock(&GlobalMid_Lock);
@@ -331,7 +331,7 @@
 				atomic_read(&totSmBufAllocCount));
 #endif /* CONFIG_CIFS_STATS2 */
 
-	seq_printf(m, "Operations (MIDs): %d\n", midCount.counter);
+	seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
 	seq_printf(m,
 		"\n%d session %d share reconnects\n",
 		tcpSesReconnectCount.counter, tconInfoReconnectCount.counter);
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index c68a056..0a265ad 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -255,35 +255,6 @@
 
 }
 
-static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
-				struct list_head *mntlist)
-{
-	/* stolen from afs code */
-	int err;
-
-	mntget(newmnt);
-	err = do_add_mount(newmnt, &nd->path, nd->path.mnt->mnt_flags | MNT_SHRINKABLE, mntlist);
-	switch (err) {
-	case 0:
-		path_put(&nd->path);
-		nd->path.mnt = newmnt;
-		nd->path.dentry = dget(newmnt->mnt_root);
-		schedule_delayed_work(&cifs_dfs_automount_task,
-				      cifs_dfs_mountpoint_expiry_timeout);
-		break;
-	case -EBUSY:
-		/* someone else made a mount here whilst we were busy */
-		while (d_mountpoint(nd->path.dentry) &&
-		       follow_down(&nd->path))
-			;
-		err = 0;
-	default:
-		mntput(newmnt);
-		break;
-	}
-	return err;
-}
-
 static void dump_referral(const struct dfs_info3_param *ref)
 {
 	cFYI(1, "DFS: ref path: %s", ref->path_name);
@@ -293,27 +264,23 @@
 				ref->path_consumed);
 }
 
-
-static void*
-cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+/*
+ * Create a vfsmount that we can automount
+ */
+static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
 {
 	struct dfs_info3_param *referrals = NULL;
 	unsigned int num_referrals = 0;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsSesInfo *ses;
-	char *full_path = NULL;
+	char *full_path;
 	int xid, i;
-	int rc = 0;
-	struct vfsmount *mnt = ERR_PTR(-ENOENT);
+	int rc;
+	struct vfsmount *mnt;
 	struct tcon_link *tlink;
 
 	cFYI(1, "in %s", __func__);
-	BUG_ON(IS_ROOT(dentry));
-
-	xid = GetXid();
-
-	dput(nd->path.dentry);
-	nd->path.dentry = dget(dentry);
+	BUG_ON(IS_ROOT(mntpt));
 
 	/*
 	 * The MSDFS spec states that paths in DFS referral requests and
@@ -321,66 +288,83 @@
 	 * the double backslashes usually used in the UNC. This function
 	 * gives us the latter, so we must adjust the result.
 	 */
-	full_path = build_path_from_dentry(dentry);
-	if (full_path == NULL) {
-		rc = -ENOMEM;
-		goto out_err;
-	}
+	mnt = ERR_PTR(-ENOMEM);
+	full_path = build_path_from_dentry(mntpt);
+	if (full_path == NULL)
+		goto cdda_exit;
 
-	cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
+	cifs_sb = CIFS_SB(mntpt->d_inode->i_sb);
 	tlink = cifs_sb_tlink(cifs_sb);
 	if (IS_ERR(tlink)) {
-		rc = PTR_ERR(tlink);
-		goto out_err;
+		mnt = ERR_CAST(tlink);
+		goto free_full_path;
 	}
 	ses = tlink_tcon(tlink)->ses;
 
+	xid = GetXid();
 	rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
 		&num_referrals, &referrals,
 		cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+	FreeXid(xid);
 
 	cifs_put_tlink(tlink);
 
+	mnt = ERR_PTR(-ENOENT);
 	for (i = 0; i < num_referrals; i++) {
 		int len;
-		dump_referral(referrals+i);
+		dump_referral(referrals + i);
 		/* connect to a node */
 		len = strlen(referrals[i].node_name);
 		if (len < 2) {
 			cERROR(1, "%s: Net Address path too short: %s",
 					__func__, referrals[i].node_name);
-			rc = -EINVAL;
-			goto out_err;
+			mnt = ERR_PTR(-EINVAL);
+			break;
 		}
 		mnt = cifs_dfs_do_refmount(cifs_sb,
 				full_path, referrals + i);
 		cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__,
 					referrals[i].node_name, mnt);
-
-		/* complete mount procedure if we accured submount */
 		if (!IS_ERR(mnt))
-			break;
+			goto success;
 	}
 
-	/* we need it cause for() above could exit without valid submount */
-	rc = PTR_ERR(mnt);
-	if (IS_ERR(mnt))
-		goto out_err;
+	/* no valid submounts were found; return error from get_dfs_path() by
+	 * preference */
+	if (rc != 0)
+		mnt = ERR_PTR(rc);
 
-	rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list);
-
-out:
-	FreeXid(xid);
+success:
 	free_dfs_info_array(referrals, num_referrals);
+free_full_path:
 	kfree(full_path);
+cdda_exit:
 	cFYI(1, "leaving %s" , __func__);
-	return ERR_PTR(rc);
-out_err:
-	path_put(&nd->path);
-	goto out;
+	return mnt;
+}
+
+/*
+ * Attempt to automount the referral
+ */
+struct vfsmount *cifs_dfs_d_automount(struct path *path)
+{
+	struct vfsmount *newmnt;
+
+	cFYI(1, "in %s", __func__);
+
+	newmnt = cifs_dfs_do_automount(path->dentry);
+	if (IS_ERR(newmnt)) {
+		cFYI(1, "leaving %s [automount failed]" , __func__);
+		return newmnt;
+	}
+
+	mntget(newmnt); /* prevent immediate expiration */
+	mnt_set_expiry(newmnt, &cifs_dfs_automount_list);
+	schedule_delayed_work(&cifs_dfs_automount_task,
+			      cifs_dfs_mountpoint_expiry_timeout);
+	cFYI(1, "leaving %s [ok]" , __func__);
+	return newmnt;
 }
 
 const struct inode_operations cifs_dfs_referral_inode_operations = {
-	.follow_link = cifs_dfs_follow_mountpoint,
 };
-
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 7852cd6..ac51cd2 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -40,6 +40,7 @@
 #define CIFS_MOUNT_FSCACHE	0x8000 /* local caching enabled */
 #define CIFS_MOUNT_MF_SYMLINKS	0x10000 /* Minshall+French Symlinks enabled */
 #define CIFS_MOUNT_MULTIUSER	0x20000 /* multiuser mount */
+#define CIFS_MOUNT_STRICT_IO	0x40000 /* strict cache mode */
 
 struct cifs_sb_info {
 	struct rb_root tlink_tree;
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 430f510..fc0fd4f 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -44,10 +44,14 @@
 	int charlen, outlen = 0;
 	int maxwords = maxbytes / 2;
 	char tmp[NLS_MAX_CHARSET_SIZE];
+	__u16 ftmp;
 
-	for (i = 0; i < maxwords && from[i]; i++) {
-		charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp,
-					     NLS_MAX_CHARSET_SIZE);
+	for (i = 0; i < maxwords; i++) {
+		ftmp = get_unaligned_le16(&from[i]);
+		if (ftmp == 0)
+			break;
+
+		charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
 		if (charlen > 0)
 			outlen += charlen;
 		else
@@ -58,9 +62,9 @@
 }
 
 /*
- * cifs_mapchar - convert a little-endian char to proper char in codepage
+ * cifs_mapchar - convert a host-endian char to proper char in codepage
  * @target - where converted character should be copied
- * @src_char - 2 byte little-endian source character
+ * @src_char - 2 byte host-endian source character
  * @cp - codepage to which character should be converted
  * @mapchar - should character be mapped according to mapchars mount option?
  *
@@ -69,7 +73,7 @@
  * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
  */
 static int
-cifs_mapchar(char *target, const __le16 src_char, const struct nls_table *cp,
+cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
 	     bool mapchar)
 {
 	int len = 1;
@@ -82,7 +86,7 @@
 	 *     build_path_from_dentry are modified, as they use slash as
 	 *     separator.
 	 */
-	switch (le16_to_cpu(src_char)) {
+	switch (src_char) {
 	case UNI_COLON:
 		*target = ':';
 		break;
@@ -109,8 +113,7 @@
 	return len;
 
 cp_convert:
-	len = cp->uni2char(le16_to_cpu(src_char), target,
-			   NLS_MAX_CHARSET_SIZE);
+	len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
 	if (len <= 0) {
 		*target = '?';
 		len = 1;
@@ -149,6 +152,7 @@
 	int nullsize = nls_nullsize(codepage);
 	int fromwords = fromlen / 2;
 	char tmp[NLS_MAX_CHARSET_SIZE];
+	__u16 ftmp;
 
 	/*
 	 * because the chars can be of varying widths, we need to take care
@@ -158,19 +162,23 @@
 	 */
 	safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
 
-	for (i = 0; i < fromwords && from[i]; i++) {
+	for (i = 0; i < fromwords; i++) {
+		ftmp = get_unaligned_le16(&from[i]);
+		if (ftmp == 0)
+			break;
+
 		/*
 		 * check to see if converting this character might make the
 		 * conversion bleed into the null terminator
 		 */
 		if (outlen >= safelen) {
-			charlen = cifs_mapchar(tmp, from[i], codepage, mapchar);
+			charlen = cifs_mapchar(tmp, ftmp, codepage, mapchar);
 			if ((outlen + charlen) > (tolen - nullsize))
 				break;
 		}
 
 		/* put converted char into 'to' buffer */
-		charlen = cifs_mapchar(&to[outlen], from[i], codepage, mapchar);
+		charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
 		outlen += charlen;
 	}
 
@@ -193,24 +201,21 @@
 {
 	int charlen;
 	int i;
-	wchar_t *wchar_to = (wchar_t *)to; /* needed to quiet sparse */
+	wchar_t wchar_to; /* needed to quiet sparse */
 
 	for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
-
-		/* works for 2.4.0 kernel or later */
-		charlen = codepage->char2uni(from, len, &wchar_to[i]);
+		charlen = codepage->char2uni(from, len, &wchar_to);
 		if (charlen < 1) {
-			cERROR(1, "strtoUCS: char2uni of %d returned %d",
-				(int)*from, charlen);
+			cERROR(1, "strtoUCS: char2uni of 0x%x returned %d",
+				*from, charlen);
 			/* A question mark */
-			to[i] = cpu_to_le16(0x003f);
+			wchar_to = 0x003f;
 			charlen = 1;
-		} else
-			to[i] = cpu_to_le16(wchar_to[i]);
-
+		}
+		put_unaligned_le16(wchar_to, &to[i]);
 	}
 
-	to[i] = 0;
+	put_unaligned_le16(0, &to[i]);
 	return i;
 }
 
@@ -252,3 +257,79 @@
 	return dst;
 }
 
+/*
+ * Convert 16 bit Unicode pathname to wire format from string in current code
+ * page. Conversion may involve remapping up the six characters that are
+ * only legal in POSIX-like OS (if they are present in the string). Path
+ * names are little endian 16 bit Unicode on the wire
+ */
+int
+cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+		 const struct nls_table *cp, int mapChars)
+{
+	int i, j, charlen;
+	int len_remaining = maxlen;
+	char src_char;
+	__u16 temp;
+
+	if (!mapChars)
+		return cifs_strtoUCS(target, source, PATH_MAX, cp);
+
+	for (i = 0, j = 0; i < maxlen; j++) {
+		src_char = source[i];
+		switch (src_char) {
+		case 0:
+			put_unaligned_le16(0, &target[j]);
+			goto ctoUCS_out;
+		case ':':
+			temp = UNI_COLON;
+			break;
+		case '*':
+			temp = UNI_ASTERIK;
+			break;
+		case '?':
+			temp = UNI_QUESTION;
+			break;
+		case '<':
+			temp = UNI_LESSTHAN;
+			break;
+		case '>':
+			temp = UNI_GRTRTHAN;
+			break;
+		case '|':
+			temp = UNI_PIPE;
+			break;
+		/*
+		 * FIXME: We can not handle remapping backslash (UNI_SLASH)
+		 * until all the calls to build_path_from_dentry are modified,
+		 * as they use backslash as separator.
+		 */
+		default:
+			charlen = cp->char2uni(source+i, len_remaining,
+						&temp);
+			/*
+			 * if no match, use question mark, which at least in
+			 * some cases serves as wild card
+			 */
+			if (charlen < 1) {
+				temp = 0x003f;
+				charlen = 1;
+			}
+			len_remaining -= charlen;
+			/*
+			 * character may take more than one byte in the source
+			 * string, but will take exactly two bytes in the
+			 * target string
+			 */
+			i += charlen;
+			continue;
+		}
+		put_unaligned_le16(temp, &target[j]);
+		i++; /* move to next char in source string */
+		len_remaining--;
+	}
+
+ctoUCS_out:
+	return i;
+}
+
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index a437ec3..beeebf1 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -41,9 +41,12 @@
 ;
 
 
-/* security id for everyone */
+/* security id for everyone/world system group */
 static const struct cifs_sid sid_everyone = {
 	1, 1, {0, 0, 0, 0, 0, 1}, {0} };
+/* security id for Authenticated Users system group */
+static const struct cifs_sid sid_authusers = {
+	1, 1, {0, 0, 0, 0, 0, 5}, {11} };
 /* group users */
 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
 
@@ -365,10 +368,14 @@
 	if (num_aces  > 0) {
 		umode_t user_mask = S_IRWXU;
 		umode_t group_mask = S_IRWXG;
-		umode_t other_mask = S_IRWXO;
+		umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
 
 		ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
 				GFP_KERNEL);
+		if (!ppace) {
+			cERROR(1, "DACL memory allocation error");
+			return;
+		}
 
 		for (i = 0; i < num_aces; ++i) {
 			ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
@@ -390,6 +397,12 @@
 						     ppace[i]->type,
 						     &fattr->cf_mode,
 						     &other_mask);
+			if (compare_sids(&(ppace[i]->sid), &sid_authusers))
+				access_flags_to_mode(ppace[i]->access_req,
+						     ppace[i]->type,
+						     &fattr->cf_mode,
+						     &other_mask);
+
 
 /*			memcpy((void *)(&(cifscred->aces[i])),
 				(void *)ppace[i],
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 66f3d50..a51585f 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -24,7 +24,6 @@
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifs_debug.h"
-#include "md5.h"
 #include "cifs_unicode.h"
 #include "cifsproto.h"
 #include "ntlmssp.h"
@@ -37,11 +36,6 @@
 /* Note that the smb header signature field on input contains the
 	sequence number before this function is called */
 
-extern void mdfour(unsigned char *out, unsigned char *in, int n);
-extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
-extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
-		       unsigned char *p24);
-
 static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
 				struct TCP_Server_Info *server, char *signature)
 {
@@ -234,6 +228,7 @@
 /* first calculate 24 bytes ntlm response and then 16 byte session key */
 int setup_ntlm_response(struct cifsSesInfo *ses)
 {
+	int rc = 0;
 	unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE;
 	char temp_key[CIFS_SESS_KEY_SIZE];
 
@@ -247,13 +242,26 @@
 	}
 	ses->auth_key.len = temp_len;
 
-	SMBNTencrypt(ses->password, ses->server->cryptkey,
+	rc = SMBNTencrypt(ses->password, ses->server->cryptkey,
 			ses->auth_key.response + CIFS_SESS_KEY_SIZE);
+	if (rc) {
+		cFYI(1, "%s Can't generate NTLM response, error: %d",
+			__func__, rc);
+		return rc;
+	}
 
-	E_md4hash(ses->password, temp_key);
-	mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE);
+	rc = E_md4hash(ses->password, temp_key);
+	if (rc) {
+		cFYI(1, "%s Can't generate NT hash, error: %d", __func__, rc);
+		return rc;
+	}
 
-	return 0;
+	rc = mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE);
+	if (rc)
+		cFYI(1, "%s Can't generate NTLM session key, error: %d",
+			__func__, rc);
+
+	return rc;
 }
 
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
@@ -649,9 +657,10 @@
 	get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
 
 	tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
-	if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
+	if (IS_ERR(tfm_arc4)) {
+		rc = PTR_ERR(tfm_arc4);
 		cERROR(1, "could not allocate crypto API arc4\n");
-		return PTR_ERR(tfm_arc4);
+		return rc;
 	}
 
 	desc.tfm = tfm_arc4;
@@ -700,14 +709,13 @@
 	unsigned int size;
 
 	server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
-	if (!server->secmech.hmacmd5 ||
-			IS_ERR(server->secmech.hmacmd5)) {
+	if (IS_ERR(server->secmech.hmacmd5)) {
 		cERROR(1, "could not allocate crypto hmacmd5\n");
 		return PTR_ERR(server->secmech.hmacmd5);
 	}
 
 	server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
-	if (!server->secmech.md5 || IS_ERR(server->secmech.md5)) {
+	if (IS_ERR(server->secmech.md5)) {
 		cERROR(1, "could not allocate crypto md5\n");
 		rc = PTR_ERR(server->secmech.md5);
 		goto crypto_allocate_md5_fail;
diff --git a/fs/cifs/cifsencrypt.h b/fs/cifs/cifsencrypt.h
deleted file mode 100644
index 15d2ec0..0000000
--- a/fs/cifs/cifsencrypt.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *   fs/cifs/cifsencrypt.h
- *
- *   Copyright (c) International Business Machines  Corp., 2005
- *   Author(s): Steve French (sfrench@us.ibm.com)
- *
- *   Externs for misc. small encryption routines
- *   so we do not have to put them in cifsproto.h
- *
- *   This library is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU Lesser General Public License as published
- *   by the Free Software Foundation; either version 2.1 of the License, or
- *   (at your option) any later version.
- *
- *   This library is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
- *   the GNU Lesser General Public License for more details.
- *
- *   You should have received a copy of the GNU Lesser General Public License
- *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* md4.c */
-extern void mdfour(unsigned char *out, unsigned char *in, int n);
-/* smbdes.c */
-extern void E_P16(unsigned char *p14, unsigned char *p16);
-extern void E_P24(unsigned char *p21, const unsigned char *c8,
-		  unsigned char *p24);
-
-
-
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 5e7075d..f297013 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -77,7 +77,11 @@
 module_param(cifs_max_pending, int, 0);
 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
 				   "Default: 50 Range: 2 to 256");
-
+unsigned short echo_retries = 5;
+module_param(echo_retries, ushort, 0644);
+MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
+			       "reconnecting server. Default: 5. 0 means "
+			       "never reconnect.");
 extern mempool_t *cifs_sm_req_poolp;
 extern mempool_t *cifs_req_poolp;
 extern mempool_t *cifs_mid_poolp;
@@ -174,6 +178,12 @@
 		goto out_no_root;
 	}
 
+	/* do that *after* d_alloc_root() - we want NULL ->d_op for root here */
+	if (cifs_sb_master_tcon(cifs_sb)->nocase)
+		sb->s_d_op = &cifs_ci_dentry_ops;
+	else
+		sb->s_d_op = &cifs_dentry_ops;
+
 #ifdef CONFIG_CIFS_EXPERIMENTAL
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 		cFYI(1, "export ops supported");
@@ -590,10 +600,17 @@
 {
 	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
 	ssize_t written;
+	int rc;
 
 	written = generic_file_aio_write(iocb, iov, nr_segs, pos);
-	if (!CIFS_I(inode)->clientCanCacheAll)
-		filemap_fdatawrite(inode->i_mapping);
+
+	if (CIFS_I(inode)->clientCanCacheAll)
+		return written;
+
+	rc = filemap_fdatawrite(inode->i_mapping);
+	if (rc)
+		cFYI(1, "cifs_file_aio_write: %d rc on %p inode", rc, inode);
+
 	return written;
 }
 
@@ -723,6 +740,25 @@
 	.setlease = cifs_setlease,
 };
 
+const struct file_operations cifs_file_strict_ops = {
+	.read = do_sync_read,
+	.write = do_sync_write,
+	.aio_read = cifs_strict_readv,
+	.aio_write = cifs_strict_writev,
+	.open = cifs_open,
+	.release = cifs_close,
+	.lock = cifs_lock,
+	.fsync = cifs_strict_fsync,
+	.flush = cifs_flush,
+	.mmap = cifs_file_strict_mmap,
+	.splice_read = generic_file_splice_read,
+	.llseek = cifs_llseek,
+#ifdef CONFIG_CIFS_POSIX
+	.unlocked_ioctl	= cifs_ioctl,
+#endif /* CONFIG_CIFS_POSIX */
+	.setlease = cifs_setlease,
+};
+
 const struct file_operations cifs_file_direct_ops = {
 	/* no aio, no readv -
 	   BB reevaluate whether they can be done with directio, no cache */
@@ -741,6 +777,7 @@
 	.llseek = cifs_llseek,
 	.setlease = cifs_setlease,
 };
+
 const struct file_operations cifs_file_nobrl_ops = {
 	.read = do_sync_read,
 	.write = do_sync_write,
@@ -759,6 +796,24 @@
 	.setlease = cifs_setlease,
 };
 
+const struct file_operations cifs_file_strict_nobrl_ops = {
+	.read = do_sync_read,
+	.write = do_sync_write,
+	.aio_read = cifs_strict_readv,
+	.aio_write = cifs_strict_writev,
+	.open = cifs_open,
+	.release = cifs_close,
+	.fsync = cifs_strict_fsync,
+	.flush = cifs_flush,
+	.mmap = cifs_file_strict_mmap,
+	.splice_read = generic_file_splice_read,
+	.llseek = cifs_llseek,
+#ifdef CONFIG_CIFS_POSIX
+	.unlocked_ioctl	= cifs_ioctl,
+#endif /* CONFIG_CIFS_POSIX */
+	.setlease = cifs_setlease,
+};
+
 const struct file_operations cifs_file_direct_nobrl_ops = {
 	/* no mmap, no aio, no readv -
 	   BB reevaluate whether they can be done with directio, no cache */
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 897b2b2..a9371b6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -61,6 +61,7 @@
 		       struct dentry *);
 extern int cifs_revalidate_file(struct file *filp);
 extern int cifs_revalidate_dentry(struct dentry *);
+extern void cifs_invalidate_mapping(struct inode *inode);
 extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int cifs_setattr(struct dentry *, struct iattr *);
 
@@ -72,19 +73,27 @@
 /* Functions related to files and directories */
 extern const struct file_operations cifs_file_ops;
 extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
-extern const struct file_operations cifs_file_nobrl_ops;
-extern const struct file_operations cifs_file_direct_nobrl_ops; /* no brlocks */
+extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
+extern const struct file_operations cifs_file_nobrl_ops; /* no brlocks */
+extern const struct file_operations cifs_file_direct_nobrl_ops;
+extern const struct file_operations cifs_file_strict_nobrl_ops;
 extern int cifs_open(struct inode *inode, struct file *file);
 extern int cifs_close(struct inode *inode, struct file *file);
 extern int cifs_closedir(struct inode *inode, struct file *file);
 extern ssize_t cifs_user_read(struct file *file, char __user *read_data,
-			 size_t read_size, loff_t *poffset);
+			      size_t read_size, loff_t *poffset);
+extern ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
+				 unsigned long nr_segs, loff_t pos);
 extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
-			 size_t write_size, loff_t *poffset);
+			       size_t write_size, loff_t *poffset);
+extern ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
+				  unsigned long nr_segs, loff_t pos);
 extern int cifs_lock(struct file *, int, struct file_lock *);
 extern int cifs_fsync(struct file *, int);
+extern int cifs_strict_fsync(struct file *, int);
 extern int cifs_flush(struct file *, fl_owner_t id);
 extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
+extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
 extern const struct file_operations cifs_dir_ops;
 extern int cifs_dir_open(struct inode *inode, struct file *file);
 extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
@@ -93,6 +102,12 @@
 extern const struct dentry_operations cifs_dentry_ops;
 extern const struct dentry_operations cifs_ci_dentry_ops;
 
+#ifdef CONFIG_CIFS_DFS_UPCALL
+extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
+#else
+#define cifs_dfs_d_automount NULL
+#endif
+
 /* Functions related to symlinks */
 extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
 extern void cifs_put_link(struct dentry *direntry,
@@ -112,5 +127,5 @@
 extern const struct export_operations cifs_export_ops;
 #endif /* EXPERIMENTAL */
 
-#define CIFS_VERSION   "1.68"
+#define CIFS_VERSION   "1.71"
 #endif				/* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 606ca8b..17afb0f 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -161,46 +161,41 @@
 	int srv_count; /* reference counter */
 	/* 15 character server name + 0x20 16th byte indicating type = srv */
 	char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+	enum statusEnum tcpStatus; /* what we think the status is */
 	char *hostname; /* hostname portion of UNC string */
 	struct socket *ssocket;
 	struct sockaddr_storage dstaddr;
 	struct sockaddr_storage srcaddr; /* locally bind to this IP */
+#ifdef CONFIG_NET_NS
+	struct net *net;
+#endif
 	wait_queue_head_t response_q;
 	wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
 	struct list_head pending_mid_q;
-	void *Server_NlsInfo;	/* BB - placeholder for future NLS info  */
-	unsigned short server_codepage;	/* codepage for the server    */
-	enum protocolEnum protocolType;
-	char versionMajor;
-	char versionMinor;
-	bool svlocal:1;			/* local server or remote */
 	bool noblocksnd;		/* use blocking sendmsg */
 	bool noautotune;		/* do not autotune send buf sizes */
 	bool tcp_nodelay;
 	atomic_t inFlight;  /* number of requests on the wire to server */
-#ifdef CONFIG_CIFS_STATS2
-	atomic_t inSend; /* requests trying to send */
-	atomic_t num_waiters;   /* blocked waiting to get in sendrecv */
-#endif
-	enum statusEnum tcpStatus; /* what we think the status is */
 	struct mutex srv_mutex;
 	struct task_struct *tsk;
 	char server_GUID[16];
 	char secMode;
+	bool session_estab; /* mark when very first sess is established */
+	u16 dialect; /* dialect index that server chose */
 	enum securityEnum secType;
 	unsigned int maxReq;	/* Clients should submit no more */
 	/* than maxReq distinct unanswered SMBs to the server when using  */
 	/* multiplexed reads or writes */
 	unsigned int maxBuf;	/* maxBuf specifies the maximum */
 	/* message size the server can send or receive for non-raw SMBs */
+	/* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */
+	/* when socket is setup (and during reconnect) before NegProt sent */
 	unsigned int max_rw;	/* maxRw specifies the maximum */
 	/* message size the server can send or receive for */
 	/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
 	unsigned int max_vcs;	/* maximum number of smb sessions, at least
 				   those that can be specified uniquely with
 				   vcnumbers */
-	char sessid[4];		/* unique token id for this session */
-	/* (returned on Negotiate */
 	int capabilities; /* allow selective disabling of caps by smb sess */
 	int timeAdj;  /* Adjust for difference in server time zone in sec */
 	__u16 CurrentMid;         /* multiplex id - rotating counter */
@@ -210,20 +205,53 @@
 	__u32 sequence_number; /* for signing, protected by srv_mutex */
 	struct session_key session_key;
 	unsigned long lstrp; /* when we got last response from this server */
-	u16 dialect; /* dialect index that server chose */
 	struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
 	/* extended security flavors that server supports */
+	bool	sec_ntlmssp;		/* supports NTLMSSP */
+	bool	sec_kerberosu2u;	/* supports U2U Kerberos */
 	bool	sec_kerberos;		/* supports plain Kerberos */
 	bool	sec_mskerberos;		/* supports legacy MS Kerberos */
-	bool	sec_kerberosu2u;	/* supports U2U Kerberos */
-	bool	sec_ntlmssp;		/* supports NTLMSSP */
-	bool session_estab; /* mark when very first sess is established */
+	struct delayed_work	echo; /* echo ping workqueue job */
 #ifdef CONFIG_CIFS_FSCACHE
 	struct fscache_cookie   *fscache; /* client index cache cookie */
 #endif
+#ifdef CONFIG_CIFS_STATS2
+	atomic_t inSend; /* requests trying to send */
+	atomic_t num_waiters;   /* blocked waiting to get in sendrecv */
+#endif
 };
 
 /*
+ * Macros to allow the TCP_Server_Info->net field and related code to drop out
+ * when CONFIG_NET_NS isn't set.
+ */
+
+#ifdef CONFIG_NET_NS
+
+static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
+{
+	return srv->net;
+}
+
+static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
+{
+	srv->net = net;
+}
+
+#else
+
+static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
+{
+	return &init_net;
+}
+
+static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
+{
+}
+
+#endif
+
+/*
  * Session structure.  One of these for each uid session with a particular host
  */
 struct cifsSesInfo {
@@ -446,11 +474,11 @@
 	/* BB add in lists for dirty pages i.e. write caching info for oplock */
 	struct list_head openFileList;
 	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
-	unsigned long time;	/* jiffies of last update/check of inode */
-	bool clientCanCacheRead:1;	/* read oplock */
-	bool clientCanCacheAll:1;	/* read and writebehind oplock */
-	bool delete_pending:1;		/* DELETE_ON_CLOSE is set */
-	bool invalid_mapping:1;		/* pagecache is invalid */
+	bool clientCanCacheRead;	/* read oplock */
+	bool clientCanCacheAll;		/* read and writebehind oplock */
+	bool delete_pending;		/* DELETE_ON_CLOSE is set */
+	bool invalid_mapping;		/* pagecache is invalid */
+	unsigned long time;		/* jiffies of last update of inode */
 	u64  server_eof;		/* current file size on server */
 	u64  uniqueid;			/* server inode number */
 	u64  createtime;		/* creation time on server */
@@ -508,6 +536,18 @@
 
 #endif
 
+struct mid_q_entry;
+
+/*
+ * This is the prototype for the mid callback function. When creating one,
+ * take special care to avoid deadlocks. Things to bear in mind:
+ *
+ * - it will be called by cifsd
+ * - the GlobalMid_Lock will be held
+ * - the mid will be removed from the pending_mid_q list
+ */
+typedef void (mid_callback_t)(struct mid_q_entry *mid);
+
 /* one of these for every pending CIFS request to the server */
 struct mid_q_entry {
 	struct list_head qhead;	/* mids waiting on reply from this server */
@@ -519,7 +559,8 @@
 	unsigned long when_sent; /* time when smb send finished */
 	unsigned long when_received; /* when demux complete (taken off wire) */
 #endif
-	struct task_struct *tsk;	/* task waiting for response */
+	mid_callback_t *callback; /* call completion callback */
+	void *callback_data;	  /* general purpose pointer for callback */
 	struct smb_hdr *resp_buf;	/* response buffer */
 	int midState;	/* wish this were enum but can not pass to wait_event */
 	__u8 command;	/* smb command code */
@@ -613,7 +654,7 @@
 #define   MID_REQUEST_SUBMITTED 2
 #define   MID_RESPONSE_RECEIVED 4
 #define   MID_RETRY_NEEDED      8 /* session closed while this request out */
-#define   MID_NO_RESP_NEEDED 0x10
+#define   MID_RESPONSE_MALFORMED 0x10
 
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
@@ -622,12 +663,9 @@
 #define   CIFS_IOVEC            4    /* array of response buffers */
 
 /* Type of Request to SendReceive2 */
-#define   CIFS_STD_OP	        0    /* normal request timeout */
-#define   CIFS_LONG_OP          1    /* long op (up to 45 sec, oplock time) */
-#define   CIFS_VLONG_OP         2    /* sloow op - can take up to 180 seconds */
-#define   CIFS_BLOCKING_OP      4    /* operation can block */
-#define   CIFS_ASYNC_OP         8    /* do not wait for response */
-#define   CIFS_TIMEOUT_MASK 0x00F    /* only one of 5 above set in req */
+#define   CIFS_BLOCKING_OP      1    /* operation can block */
+#define   CIFS_ASYNC_OP         2    /* do not wait for response */
+#define   CIFS_TIMEOUT_MASK 0x003    /* only one of above set in req */
 #define   CIFS_LOG_ERROR    0x010    /* log NT STATUS if non-zero */
 #define   CIFS_LARGE_BUF_OP 0x020    /* large request buffer */
 #define   CIFS_NO_RESP      0x040    /* no response buffer required */
@@ -790,6 +828,9 @@
 GLOBAL_EXTERN unsigned int cifs_min_small;  /* min size of small buf pool */
 GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
 
+/* reconnect after this many failed echo attempts */
+GLOBAL_EXTERN unsigned short echo_retries;
+
 void cifs_oplock_break(struct work_struct *work);
 void cifs_oplock_break_get(struct cifsFileInfo *cfile);
 void cifs_oplock_break_put(struct cifsFileInfo *cfile);
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index de36b09..b5c8cc5 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -23,6 +23,7 @@
 #define _CIFSPDU_H
 
 #include <net/sock.h>
+#include <asm/unaligned.h>
 #include "smbfsctl.h"
 
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
@@ -50,6 +51,7 @@
 #define SMB_COM_SETATTR               0x09 /* trivial response */
 #define SMB_COM_LOCKING_ANDX          0x24 /* trivial response */
 #define SMB_COM_COPY                  0x29 /* trivial rsp, fail filename ignrd*/
+#define SMB_COM_ECHO                  0x2B /* echo request */
 #define SMB_COM_OPEN_ANDX             0x2D /* Legacy open for old servers */
 #define SMB_COM_READ_ANDX             0x2E
 #define SMB_COM_WRITE_ANDX            0x2F
@@ -425,11 +427,49 @@
 	__u16 Mid;
 	__u8 WordCount;
 } __attribute__((packed));
-/* given a pointer to an smb_hdr retrieve the value of byte count */
-#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
-#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
+
+/* given a pointer to an smb_hdr retrieve a char pointer to the byte count */
+#define BCC(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + \
+			 (2 * (smb_var)->WordCount))
+
 /* given a pointer to an smb_hdr retrieve the pointer to the byte area */
-#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2)
+#define pByteArea(smb_var) (BCC(smb_var) + 2)
+
+/* get the converted ByteCount for a SMB packet and return it */
+static inline __u16
+get_bcc(struct smb_hdr *hdr)
+{
+	__u16 *bc_ptr = (__u16 *)BCC(hdr);
+
+	return get_unaligned(bc_ptr);
+}
+
+/* get the unconverted ByteCount for a SMB packet and return it */
+static inline __u16
+get_bcc_le(struct smb_hdr *hdr)
+{
+	__le16 *bc_ptr = (__le16 *)BCC(hdr);
+
+	return get_unaligned_le16(bc_ptr);
+}
+
+/* set the ByteCount for a SMB packet in host-byte order */
+static inline void
+put_bcc(__u16 count, struct smb_hdr *hdr)
+{
+	__u16 *bc_ptr = (__u16 *)BCC(hdr);
+
+	put_unaligned(count, bc_ptr);
+}
+
+/* set the ByteCount for a SMB packet in little-endian */
+static inline void
+put_bcc_le(__u16 count, struct smb_hdr *hdr)
+{
+	__le16 *bc_ptr = (__le16 *)BCC(hdr);
+
+	put_unaligned_le16(count, bc_ptr);
+}
 
 /*
  * Computer Name Length (since Netbios name was length 16 with last byte 0x20)
@@ -760,6 +800,20 @@
  *
  */
 
+typedef struct smb_com_echo_req {
+	struct	smb_hdr hdr;
+	__le16	EchoCount;
+	__le16	ByteCount;
+	char	Data[1];
+} __attribute__((packed)) ECHO_REQ;
+
+typedef struct smb_com_echo_rsp {
+	struct	smb_hdr hdr;
+	__le16	SequenceNumber;
+	__le16	ByteCount;
+	char	Data[1];
+} __attribute__((packed)) ECHO_RSP;
+
 typedef struct smb_com_logoff_andx_req {
 	struct smb_hdr hdr;	/* wct = 2 */
 	__u8 AndXCommand;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e6d1481..8096f27 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -61,6 +61,12 @@
 		const char *fullpath, const struct dfs_info3_param *ref,
 		char **devname);
 /* extern void renew_parental_timestamps(struct dentry *direntry);*/
+extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
+					struct TCP_Server_Info *server);
+extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
+extern int cifs_call_async(struct TCP_Server_Info *server,
+			   struct smb_hdr *in_buf, mid_callback_t *callback,
+			   void *cbdata);
 extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
 			struct smb_hdr * /* input */ ,
 			struct smb_hdr * /* out */ ,
@@ -79,6 +85,8 @@
 extern bool is_valid_oplock_break(struct smb_hdr *smb,
 				  struct TCP_Server_Info *);
 extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
+extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
+			    unsigned int bytes_written);
 extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
 extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
 extern unsigned int smbCalcSize(struct smb_hdr *ptr);
@@ -347,12 +355,13 @@
 			const __u16 netfid, const __u64 len,
 			const __u64 offset, const __u32 numUnlock,
 			const __u32 numLock, const __u8 lockType,
-			const bool waitFlag);
+			const bool waitFlag, const __u8 oplock_level);
 extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
 			const __u16 smb_file_id, const int get_flag,
 			const __u64 len, struct file_lock *,
 			const __u16 lock_type, const bool waitFlag);
 extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
+extern int CIFSSMBEcho(struct TCP_Server_Info *server);
 extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
 
 extern struct cifsSesInfo *sesInfoAlloc(void);
@@ -366,7 +375,7 @@
 extern int cifs_verify_signature(struct smb_hdr *,
 				 struct TCP_Server_Info *server,
 				__u32 expected_sequence_number);
-extern void SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
+extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
 extern int setup_ntlm_response(struct cifsSesInfo *);
 extern int setup_ntlmv2_rsp(struct cifsSesInfo *, const struct nls_table *);
 extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
@@ -416,4 +425,11 @@
 extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
 		const unsigned char *path,
 		struct cifs_sb_info *cifs_sb, int xid);
+extern int mdfour(unsigned char *, unsigned char *, int);
+extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
+extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
+			unsigned char *p24);
+extern void E_P16(unsigned char *p14, unsigned char *p16);
+extern void E_P24(unsigned char *p21, const unsigned char *c8,
+			unsigned char *p24);
 #endif			/* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 2f6795e..904aa47 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -136,9 +136,6 @@
 		}
 	}
 
-	if (ses->status == CifsExiting)
-		return -EIO;
-
 	/*
 	 * Give demultiplex thread up to 10 seconds to reconnect, should be
 	 * greater than cifs socket timeout which is 7 seconds
@@ -156,7 +153,7 @@
 		 * retrying until process is killed or server comes
 		 * back on-line
 		 */
-		if (!tcon->retry || ses->status == CifsExiting) {
+		if (!tcon->retry) {
 			cFYI(1, "gave up waiting on reconnect in smb_init");
 			return -EHOSTDOWN;
 		}
@@ -331,37 +328,35 @@
 
 static int validate_t2(struct smb_t2_rsp *pSMB)
 {
-	int rc = -EINVAL;
-	int total_size;
-	char *pBCC;
+	unsigned int total_size;
 
-	/* check for plausible wct, bcc and t2 data and parm sizes */
+	/* check for plausible wct */
+	if (pSMB->hdr.WordCount < 10)
+		goto vt2_err;
+
 	/* check for parm and data offset going beyond end of smb */
-	if (pSMB->hdr.WordCount >= 10) {
-		if ((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) &&
-		   (le16_to_cpu(pSMB->t2_rsp.DataOffset) <= 1024)) {
-			/* check that bcc is at least as big as parms + data */
-			/* check that bcc is less than negotiated smb buffer */
-			total_size = le16_to_cpu(pSMB->t2_rsp.ParameterCount);
-			if (total_size < 512) {
-				total_size +=
-					le16_to_cpu(pSMB->t2_rsp.DataCount);
-				/* BCC le converted in SendReceive */
-				pBCC = (pSMB->hdr.WordCount * 2) +
-					sizeof(struct smb_hdr) +
-					(char *)pSMB;
-				if ((total_size <= (*(u16 *)pBCC)) &&
-				   (total_size <
-					CIFSMaxBufSize+MAX_CIFS_HDR_SIZE)) {
-					return 0;
-				}
-			}
-		}
-	}
+	if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 ||
+	    get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024)
+		goto vt2_err;
+
+	/* check that bcc is at least as big as parms + data */
+	/* check that bcc is less than negotiated smb buffer */
+	total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount);
+	if (total_size >= 512)
+		goto vt2_err;
+
+	total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount);
+	if (total_size > get_bcc(&pSMB->hdr) ||
+	    total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)
+		goto vt2_err;
+
+	return 0;
+vt2_err:
 	cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB,
 		sizeof(struct smb_t2_rsp) + 16);
-	return rc;
+	return -EINVAL;
 }
+
 int
 CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
 {
@@ -452,7 +447,6 @@
 		server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
 				(__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
 		server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
-		GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey);
 		/* even though we do not use raw we might as well set this
 		accurately, in case we ever find a need for it */
 		if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
@@ -566,7 +560,6 @@
 			(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
 	server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
 	cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
-	GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
 	server->capabilities = le32_to_cpu(pSMBr->Capabilities);
 	server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
 	server->timeAdj *= 60;
@@ -706,6 +699,53 @@
 	return rc;
 }
 
+/*
+ * This is a no-op for now. We're not really interested in the reply, but
+ * rather in the fact that the server sent one and that server->lstrp
+ * gets updated.
+ *
+ * FIXME: maybe we should consider checking that the reply matches request?
+ */
+static void
+cifs_echo_callback(struct mid_q_entry *mid)
+{
+	struct TCP_Server_Info *server = mid->callback_data;
+
+	DeleteMidQEntry(mid);
+	atomic_dec(&server->inFlight);
+	wake_up(&server->request_q);
+}
+
+int
+CIFSSMBEcho(struct TCP_Server_Info *server)
+{
+	ECHO_REQ *smb;
+	int rc = 0;
+
+	cFYI(1, "In echo request");
+
+	rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb);
+	if (rc)
+		return rc;
+
+	/* set up echo request */
+	smb->hdr.Tid = cpu_to_le16(0xffff);
+	smb->hdr.WordCount = 1;
+	put_unaligned_le16(1, &smb->EchoCount);
+	put_bcc_le(1, &smb->hdr);
+	smb->Data[0] = 'a';
+	smb->hdr.smb_buf_length += 3;
+
+	rc = cifs_call_async(server, (struct smb_hdr *)smb,
+				cifs_echo_callback, server);
+	if (rc)
+		cFYI(1, "Echo request failed: %d", rc);
+
+	cifs_small_buf_release(smb);
+
+	return rc;
+}
+
 int
 CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
 {
@@ -1193,7 +1233,7 @@
 	pSMB->ByteCount = cpu_to_le16(count);
 	/* long_op set to 1 to allow for oplock break timeouts */
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-			(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+			(struct smb_hdr *)pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->num_opens);
 	if (rc) {
 		cFYI(1, "Error in Open = %d", rc);
@@ -1306,7 +1346,7 @@
 	pSMB->ByteCount = cpu_to_le16(count);
 	/* long_op set to 1 to allow for oplock break timeouts */
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-			(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+			(struct smb_hdr *)pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->num_opens);
 	if (rc) {
 		cFYI(1, "Error in Open = %d", rc);
@@ -1388,7 +1428,7 @@
 	iov[0].iov_base = (char *)pSMB;
 	iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
 	rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
-			 &resp_buf_type, CIFS_STD_OP | CIFS_LOG_ERROR);
+			 &resp_buf_type, CIFS_LOG_ERROR);
 	cifs_stats_inc(&tcon->num_reads);
 	pSMBr = (READ_RSP *)iov[0].iov_base;
 	if (rc) {
@@ -1663,7 +1703,8 @@
 CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
 	    const __u16 smb_file_id, const __u64 len,
 	    const __u64 offset, const __u32 numUnlock,
-	    const __u32 numLock, const __u8 lockType, const bool waitFlag)
+	    const __u32 numLock, const __u8 lockType,
+	    const bool waitFlag, const __u8 oplock_level)
 {
 	int rc = 0;
 	LOCK_REQ *pSMB = NULL;
@@ -1691,6 +1732,7 @@
 	pSMB->NumberOfLocks = cpu_to_le16(numLock);
 	pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
 	pSMB->LockType = lockType;
+	pSMB->OplockLevel = oplock_level;
 	pSMB->AndXCommand = 0xFF;	/* none */
 	pSMB->Fid = smb_file_id; /* netfid stays le */
 
@@ -3087,7 +3129,7 @@
 	iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
 
 	rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
-			 CIFS_STD_OP);
+			 0);
 	cifs_stats_inc(&tcon->num_acl_get);
 	if (rc) {
 		cFYI(1, "Send error in QuerySecDesc = %d", rc);
@@ -4869,7 +4911,6 @@
 		   __u16 fid, __u32 pid_of_opener, bool SetAllocation)
 {
 	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
-	char *data_offset;
 	struct file_end_of_file_info *parm_data;
 	int rc = 0;
 	__u16 params, param_offset, offset, byte_count, count;
@@ -4893,8 +4934,6 @@
 	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
 	offset = param_offset + params;
 
-	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
-
 	count = sizeof(struct file_end_of_file_info);
 	pSMB->MaxParameterCount = cpu_to_le16(2);
 	/* BB find exact max SMB PDU from sess structure BB */
@@ -5562,7 +5601,7 @@
 	}
 
 	/* make sure list_len doesn't go past end of SMB */
-	end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
+	end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr);
 	if ((char *)ea_response_data + list_len > end_of_smb) {
 		cFYI(1, "EA list appears to go beyond SMB");
 		rc = -EIO;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a65d311..8d6c17a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -52,8 +52,8 @@
 #define CIFS_PORT 445
 #define RFC1001_PORT 139
 
-extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
-			 unsigned char *p24);
+/* SMB echo "timeout" -- FIXME: tunable? */
+#define SMB_ECHO_INTERVAL (60 * HZ)
 
 extern mempool_t *cifs_req_poolp;
 
@@ -84,6 +84,7 @@
 	bool no_xattr:1;   /* set if xattr (EA) support should be disabled*/
 	bool server_ino:1; /* use inode numbers from server ie UniqueId */
 	bool direct_io:1;
+	bool strict_io:1; /* strict cache behavior */
 	bool remap:1;      /* set to remap seven reserved chars in filenames */
 	bool posix_paths:1; /* unset to not ask for posix pathnames. */
 	bool no_linux_ext:1;
@@ -152,6 +153,7 @@
 
 	/* before reconnecting the tcp session, mark the smb session (uid)
 		and the tid bad so they are not used until reconnected */
+	cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
 	spin_lock(&cifs_tcp_ses_lock);
 	list_for_each(tmp, &server->smb_ses_list) {
 		ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
@@ -163,7 +165,9 @@
 		}
 	}
 	spin_unlock(&cifs_tcp_ses_lock);
+
 	/* do not want to be sending data on a socket we are freeing */
+	cFYI(1, "%s: tearing down socket", __func__);
 	mutex_lock(&server->srv_mutex);
 	if (server->ssocket) {
 		cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state,
@@ -180,22 +184,20 @@
 	kfree(server->session_key.response);
 	server->session_key.response = NULL;
 	server->session_key.len = 0;
+	server->lstrp = jiffies;
+	mutex_unlock(&server->srv_mutex);
 
+	/* mark submitted MIDs for retry and issue callback */
+	cFYI(1, "%s: issuing mid callbacks", __func__);
 	spin_lock(&GlobalMid_Lock);
-	list_for_each(tmp, &server->pending_mid_q) {
-		mid_entry = list_entry(tmp, struct
-					mid_q_entry,
-					qhead);
-		if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
-				/* Mark other intransit requests as needing
-				   retry so we do not immediately mark the
-				   session bad again (ie after we reconnect
-				   below) as they timeout too */
+	list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+		if (mid_entry->midState == MID_REQUEST_SUBMITTED)
 			mid_entry->midState = MID_RETRY_NEEDED;
-		}
+		list_del_init(&mid_entry->qhead);
+		mid_entry->callback(mid_entry);
 	}
 	spin_unlock(&GlobalMid_Lock);
-	mutex_unlock(&server->srv_mutex);
 
 	while ((server->tcpStatus != CifsExiting) &&
 	       (server->tcpStatus != CifsGood)) {
@@ -212,10 +214,9 @@
 			if (server->tcpStatus != CifsExiting)
 				server->tcpStatus = CifsGood;
 			spin_unlock(&GlobalMid_Lock);
-	/*		atomic_set(&server->inFlight,0);*/
-			wake_up(&server->response_q);
 		}
 	}
+
 	return rc;
 }
 
@@ -229,9 +230,8 @@
 static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
 {
 	struct smb_t2_rsp *pSMBt;
-	int total_data_size;
-	int data_in_this_rsp;
 	int remaining;
+	__u16 total_data_size, data_in_this_rsp;
 
 	if (pSMB->Command != SMB_COM_TRANSACTION2)
 		return 0;
@@ -245,8 +245,8 @@
 
 	pSMBt = (struct smb_t2_rsp *)pSMB;
 
-	total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
-	data_in_this_rsp = le16_to_cpu(pSMBt->t2_rsp.DataCount);
+	total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+	data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
 
 	remaining = total_data_size - data_in_this_rsp;
 
@@ -272,21 +272,18 @@
 {
 	struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond;
 	struct smb_t2_rsp *pSMBt  = (struct smb_t2_rsp *)pTargetSMB;
-	int total_data_size;
-	int total_in_buf;
-	int remaining;
-	int total_in_buf2;
 	char *data_area_of_target;
 	char *data_area_of_buf2;
-	__u16 byte_count;
+	int remaining;
+	__u16 byte_count, total_data_size, total_in_buf, total_in_buf2;
 
-	total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
+	total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
 
-	if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
+	if (total_data_size !=
+	    get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount))
 		cFYI(1, "total data size of primary and secondary t2 differ");
-	}
 
-	total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount);
+	total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
 
 	remaining = total_data_size - total_in_buf;
 
@@ -296,28 +293,28 @@
 	if (remaining == 0) /* nothing to do, ignore */
 		return 0;
 
-	total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount);
+	total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount);
 	if (remaining < total_in_buf2) {
 		cFYI(1, "transact2 2nd response contains too much data");
 	}
 
 	/* find end of first SMB data area */
 	data_area_of_target = (char *)&pSMBt->hdr.Protocol +
-				le16_to_cpu(pSMBt->t2_rsp.DataOffset);
+				get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
 	/* validate target area */
 
-	data_area_of_buf2 = (char *) &pSMB2->hdr.Protocol +
-					le16_to_cpu(pSMB2->t2_rsp.DataOffset);
+	data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol +
+				get_unaligned_le16(&pSMB2->t2_rsp.DataOffset);
 
 	data_area_of_target += total_in_buf;
 
 	/* copy second buffer into end of first buffer */
 	memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
 	total_in_buf += total_in_buf2;
-	pSMBt->t2_rsp.DataCount = cpu_to_le16(total_in_buf);
-	byte_count = le16_to_cpu(BCC_LE(pTargetSMB));
+	put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount);
+	byte_count = get_bcc_le(pTargetSMB);
 	byte_count += total_in_buf2;
-	BCC_LE(pTargetSMB) = cpu_to_le16(byte_count);
+	put_bcc_le(byte_count, pTargetSMB);
 
 	byte_count = pTargetSMB->smb_buf_length;
 	byte_count += total_in_buf2;
@@ -331,7 +328,31 @@
 		return 0; /* we are done */
 	} else /* more responses to go */
 		return 1;
+}
 
+static void
+cifs_echo_request(struct work_struct *work)
+{
+	int rc;
+	struct TCP_Server_Info *server = container_of(work,
+					struct TCP_Server_Info, echo.work);
+
+	/*
+	 * We cannot send an echo until the NEGOTIATE_PROTOCOL request is
+	 * done, which is indicated by maxBuf != 0. Also, no need to ping if
+	 * we got a response recently
+	 */
+	if (server->maxBuf == 0 ||
+	    time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+		goto requeue_echo;
+
+	rc = CIFSSMBEcho(server);
+	if (rc)
+		cFYI(1, "Unable to send echo request to server: %s",
+			server->hostname);
+
+requeue_echo:
+	queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
 }
 
 static int
@@ -345,8 +366,7 @@
 	struct msghdr smb_msg;
 	struct kvec iov;
 	struct socket *csocket = server->ssocket;
-	struct list_head *tmp;
-	struct cifsSesInfo *ses;
+	struct list_head *tmp, *tmp2;
 	struct task_struct *task_to_wake = NULL;
 	struct mid_q_entry *mid_entry;
 	char temp;
@@ -399,7 +419,20 @@
 		smb_msg.msg_control = NULL;
 		smb_msg.msg_controllen = 0;
 		pdu_length = 4; /* enough to get RFC1001 header */
+
 incomplete_rcv:
+		if (echo_retries > 0 &&
+		    time_after(jiffies, server->lstrp +
+					(echo_retries * SMB_ECHO_INTERVAL))) {
+			cERROR(1, "Server %s has not responded in %d seconds. "
+				  "Reconnecting...", server->hostname,
+				  (echo_retries * SMB_ECHO_INTERVAL / HZ));
+			cifs_reconnect(server);
+			csocket = server->ssocket;
+			wake_up(&server->response_q);
+			continue;
+		}
+
 		length =
 		    kernel_recvmsg(csocket, &smb_msg,
 				&iov, 1, pdu_length, 0 /* BB other flags? */);
@@ -550,25 +583,36 @@
 		else if (reconnect == 1)
 			continue;
 
-		length += 4; /* account for rfc1002 hdr */
+		total_read += 4; /* account for rfc1002 hdr */
 
+		dump_smb(smb_buffer, total_read);
 
-		dump_smb(smb_buffer, length);
-		if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
-			cifs_dump_mem("Bad SMB: ", smb_buffer, 48);
-			continue;
-		}
+		/*
+		 * We know that we received enough to get to the MID as we
+		 * checked the pdu_length earlier. Now check to see
+		 * if the rest of the header is OK. We borrow the length
+		 * var for the rest of the loop to avoid a new stack var.
+		 *
+		 * 48 bytes is enough to display the header and a little bit
+		 * into the payload for debugging purposes.
+		 */
+		length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
+		if (length != 0)
+			cifs_dump_mem("Bad SMB: ", smb_buffer,
+					min_t(unsigned int, total_read, 48));
 
+		mid_entry = NULL;
+		server->lstrp = jiffies;
 
-		task_to_wake = NULL;
 		spin_lock(&GlobalMid_Lock);
-		list_for_each(tmp, &server->pending_mid_q) {
+		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
 
 			if ((mid_entry->mid == smb_buffer->Mid) &&
 			    (mid_entry->midState == MID_REQUEST_SUBMITTED) &&
 			    (mid_entry->command == smb_buffer->Command)) {
-				if (check2ndT2(smb_buffer,server->maxBuf) > 0) {
+				if (length == 0 &&
+				   check2ndT2(smb_buffer, server->maxBuf) > 0) {
 					/* We have a multipart transact2 resp */
 					isMultiRsp = true;
 					if (mid_entry->resp_buf) {
@@ -603,20 +647,24 @@
 				mid_entry->resp_buf = smb_buffer;
 				mid_entry->largeBuf = isLargeBuf;
 multi_t2_fnd:
-				task_to_wake = mid_entry->tsk;
-				mid_entry->midState = MID_RESPONSE_RECEIVED;
+				if (length == 0)
+					mid_entry->midState =
+							MID_RESPONSE_RECEIVED;
+				else
+					mid_entry->midState =
+							MID_RESPONSE_MALFORMED;
 #ifdef CONFIG_CIFS_STATS2
 				mid_entry->when_received = jiffies;
 #endif
-				/* so we do not time out requests to  server
-				which is still responding (since server could
-				be busy but not dead) */
-				server->lstrp = jiffies;
+				list_del_init(&mid_entry->qhead);
+				mid_entry->callback(mid_entry);
 				break;
 			}
+			mid_entry = NULL;
 		}
 		spin_unlock(&GlobalMid_Lock);
-		if (task_to_wake) {
+
+		if (mid_entry != NULL) {
 			/* Was previous buf put in mpx struct for multi-rsp? */
 			if (!isMultiRsp) {
 				/* smb buffer will be freed by user thread */
@@ -625,11 +673,13 @@
 				else
 					smallbuf = NULL;
 			}
-			wake_up_process(task_to_wake);
+		} else if (length != 0) {
+			/* response sanity checks failed */
+			continue;
 		} else if (!is_valid_oplock_break(smb_buffer, server) &&
 			   !isMultiRsp) {
 			cERROR(1, "No task to wake, unknown frame received! "
-				   "NumMids %d", midCount.counter);
+				   "NumMids %d", atomic_read(&midCount));
 			cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
 				      sizeof(struct smb_hdr));
 #ifdef CONFIG_CIFS_DEBUG2
@@ -677,44 +727,16 @@
 	if (smallbuf) /* no sense logging a debug message if NULL */
 		cifs_small_buf_release(smallbuf);
 
-	/*
-	 * BB: we shouldn't have to do any of this. It shouldn't be
-	 * possible to exit from the thread with active SMB sessions
-	 */
-	spin_lock(&cifs_tcp_ses_lock);
-	if (list_empty(&server->pending_mid_q)) {
-		/* loop through server session structures attached to this and
-		    mark them dead */
-		list_for_each(tmp, &server->smb_ses_list) {
-			ses = list_entry(tmp, struct cifsSesInfo,
-					 smb_ses_list);
-			ses->status = CifsExiting;
-			ses->server = NULL;
-		}
-		spin_unlock(&cifs_tcp_ses_lock);
-	} else {
-		/* although we can not zero the server struct pointer yet,
-		since there are active requests which may depnd on them,
-		mark the corresponding SMB sessions as exiting too */
-		list_for_each(tmp, &server->smb_ses_list) {
-			ses = list_entry(tmp, struct cifsSesInfo,
-					 smb_ses_list);
-			ses->status = CifsExiting;
-		}
-
+	if (!list_empty(&server->pending_mid_q)) {
 		spin_lock(&GlobalMid_Lock);
-		list_for_each(tmp, &server->pending_mid_q) {
-		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-			if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
-				cFYI(1, "Clearing Mid 0x%x - waking up ",
+		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+			cFYI(1, "Clearing Mid 0x%x - issuing callback",
 					 mid_entry->mid);
-				task_to_wake = mid_entry->tsk;
-				if (task_to_wake)
-					wake_up_process(task_to_wake);
-			}
+			list_del_init(&mid_entry->qhead);
+			mid_entry->callback(mid_entry);
 		}
 		spin_unlock(&GlobalMid_Lock);
-		spin_unlock(&cifs_tcp_ses_lock);
 		/* 1/8th of sec is more than enough time for them to exit */
 		msleep(125);
 	}
@@ -732,18 +754,6 @@
 		coming home not much else we can do but free the memory */
 	}
 
-	/* last chance to mark ses pointers invalid
-	if there are any pointing to this (e.g
-	if a crazy root user tried to kill cifsd
-	kernel thread explicitly this might happen) */
-	/* BB: This shouldn't be necessary, see above */
-	spin_lock(&cifs_tcp_ses_lock);
-	list_for_each(tmp, &server->smb_ses_list) {
-		ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
-		ses->server = NULL;
-	}
-	spin_unlock(&cifs_tcp_ses_lock);
-
 	kfree(server->hostname);
 	task_to_wake = xchg(&server->tsk, NULL);
 	kfree(server);
@@ -1113,6 +1123,8 @@
 		} else if (!strnicmp(data, "uid", 3) && value && *value) {
 			vol->linux_uid = simple_strtoul(value, &value, 0);
 			uid_specified = true;
+		} else if (!strnicmp(data, "cruid", 5) && value && *value) {
+			vol->cred_uid = simple_strtoul(value, &value, 0);
 		} else if (!strnicmp(data, "forceuid", 8)) {
 			override_uid = 1;
 		} else if (!strnicmp(data, "noforceuid", 10)) {
@@ -1353,6 +1365,8 @@
 			vol->direct_io = 1;
 		} else if (strnicmp(data, "forcedirectio", 13) == 0) {
 			vol->direct_io = 1;
+		} else if (strnicmp(data, "strictcache", 11) == 0) {
+			vol->strict_io = 1;
 		} else if (strnicmp(data, "noac", 4) == 0) {
 			printk(KERN_WARNING "CIFS: Mount option noac not "
 				"supported. Instead set "
@@ -1577,6 +1591,9 @@
 
 	spin_lock(&cifs_tcp_ses_lock);
 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+		if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
+			continue;
+
 		if (!match_address(server, addr,
 				   (struct sockaddr *)&vol->srcaddr))
 			continue;
@@ -1607,9 +1624,13 @@
 		return;
 	}
 
+	put_net(cifs_net_ns(server));
+
 	list_del_init(&server->tcp_ses_list);
 	spin_unlock(&cifs_tcp_ses_lock);
 
+	cancel_delayed_work_sync(&server->echo);
+
 	spin_lock(&GlobalMid_Lock);
 	server->tcpStatus = CifsExiting;
 	spin_unlock(&GlobalMid_Lock);
@@ -1679,6 +1700,7 @@
 		goto out_err;
 	}
 
+	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
 	tcp_ses->hostname = extract_hostname(volume_info->UNC);
 	if (IS_ERR(tcp_ses->hostname)) {
 		rc = PTR_ERR(tcp_ses->hostname);
@@ -1699,8 +1721,10 @@
 		volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
 	tcp_ses->session_estab = false;
 	tcp_ses->sequence_number = 0;
+	tcp_ses->lstrp = jiffies;
 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
+	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
 
 	/*
 	 * at this point we are the only ones with the pointer
@@ -1749,11 +1773,16 @@
 
 	cifs_fscache_get_client_cookie(tcp_ses);
 
+	/* queue echo request delayed work */
+	queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
+
 	return tcp_ses;
 
 out_err_crypto_release:
 	cifs_crypto_shash_release(tcp_ses);
 
+	put_net(cifs_net_ns(tcp_ses));
+
 out_err:
 	if (tcp_ses) {
 		if (!IS_ERR(tcp_ses->hostname))
@@ -2265,8 +2294,8 @@
 	}
 
 	if (socket == NULL) {
-		rc = sock_create_kern(sfamily, SOCK_STREAM,
-				      IPPROTO_TCP, &socket);
+		rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
+				   IPPROTO_TCP, &socket, 1);
 		if (rc < 0) {
 			cERROR(1, "Error %d creating socket", rc);
 			server->ssocket = NULL;
@@ -2578,6 +2607,8 @@
 	if (pvolume_info->multiuser)
 		cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
 					    CIFS_MOUNT_NO_PERM);
+	if (pvolume_info->strict_io)
+		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO;
 	if (pvolume_info->direct_io) {
 		cFYI(1, "mounting share using direct i/o");
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
@@ -2934,8 +2965,8 @@
 	TCONX_RSP *pSMBr;
 	unsigned char *bcc_ptr;
 	int rc = 0;
-	int length, bytes_left;
-	__u16 count;
+	int length;
+	__u16 bytes_left, count;
 
 	if (ses == NULL)
 		return -EIO;
@@ -2963,7 +2994,7 @@
 		bcc_ptr++;              /* skip password */
 		/* already aligned so no need to do it below */
 	} else {
-		pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
+		pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
 		/* BB FIXME add code to fail this if NTLMv2 or Kerberos
 		   specified as required (when that support is added to
 		   the vfs in the future) as only NTLM or the much
@@ -2979,9 +3010,10 @@
 					 bcc_ptr);
 		else
 #endif /* CIFS_WEAK_PW_HASH */
-		SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr);
+		rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
+					bcc_ptr);
 
-		bcc_ptr += CIFS_SESS_KEY_SIZE;
+		bcc_ptr += CIFS_AUTH_RESP_SIZE;
 		if (ses->capabilities & CAP_UNICODE) {
 			/* must align unicode strings */
 			*bcc_ptr = 0; /* null byte password */
@@ -3019,7 +3051,7 @@
 	pSMB->ByteCount = cpu_to_le16(count);
 
 	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
-			 CIFS_STD_OP);
+			 0);
 
 	/* above now done in SendReceive */
 	if ((rc == 0) && (tcon != NULL)) {
@@ -3029,7 +3061,7 @@
 		tcon->need_reconnect = false;
 		tcon->tid = smb_buffer_response->Tid;
 		bcc_ptr = pByteArea(smb_buffer_response);
-		bytes_left = BCC(smb_buffer_response);
+		bytes_left = get_bcc(smb_buffer_response);
 		length = strnlen(bcc_ptr, bytes_left - 2);
 		if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
 			is_unicode = true;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 2e77382..dd5f229 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -130,17 +130,6 @@
 	return full_path;
 }
 
-static void setup_cifs_dentry(struct cifsTconInfo *tcon,
-			      struct dentry *direntry,
-			      struct inode *newinode)
-{
-	if (tcon->nocase)
-		d_set_d_op(direntry, &cifs_ci_dentry_ops);
-	else
-		d_set_d_op(direntry, &cifs_dentry_ops);
-	d_instantiate(direntry, newinode);
-}
-
 /* Inode operations in similar order to how they appear in Linux file fs.h */
 
 int
@@ -327,7 +316,7 @@
 
 cifs_create_set_dentry:
 	if (rc == 0)
-		setup_cifs_dentry(tcon, direntry, newinode);
+		d_instantiate(direntry, newinode);
 	else
 		cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
 
@@ -418,10 +407,6 @@
 
 		rc = cifs_get_inode_info_unix(&newinode, full_path,
 						inode->i_sb, xid);
-		if (pTcon->nocase)
-			d_set_d_op(direntry, &cifs_ci_dentry_ops);
-		else
-			d_set_d_op(direntry, &cifs_dentry_ops);
 
 		if (rc == 0)
 			d_instantiate(direntry, newinode);
@@ -601,10 +586,6 @@
 				parent_dir_inode->i_sb, xid, NULL);
 
 	if ((rc == 0) && (newInode != NULL)) {
-		if (pTcon->nocase)
-			d_set_d_op(direntry, &cifs_ci_dentry_ops);
-		else
-			d_set_d_op(direntry, &cifs_dentry_ops);
 		d_add(direntry, newInode);
 		if (posix_open) {
 			filp = lookup_instantiate_filp(nd, direntry,
@@ -631,10 +612,6 @@
 	} else if (rc == -ENOENT) {
 		rc = 0;
 		direntry->d_time = jiffies;
-		if (pTcon->nocase)
-			d_set_d_op(direntry, &cifs_ci_dentry_ops);
-		else
-			d_set_d_op(direntry, &cifs_dentry_ops);
 		d_add(direntry, NULL);
 	/*	if it was once a directory (but how can we tell?) we could do
 		shrink_dcache_parent(direntry); */
@@ -698,6 +675,7 @@
 
 const struct dentry_operations cifs_dentry_ops = {
 	.d_revalidate = cifs_d_revalidate,
+	.d_automount = cifs_dfs_d_automount,
 /* d_delete:       cifs_d_delete,      */ /* not needed except for debugging */
 };
 
@@ -734,4 +712,5 @@
 	.d_revalidate = cifs_d_revalidate,
 	.d_hash = cifs_ci_hash,
 	.d_compare = cifs_ci_compare,
+	.d_automount = cifs_dfs_d_automount,
 };
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d843631..e964b1c 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -287,6 +287,7 @@
 	struct inode *inode = cifs_file->dentry->d_inode;
 	struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 	struct cifsLockInfo *li, *tmp;
 
 	spin_lock(&cifs_file_list_lock);
@@ -302,6 +303,13 @@
 	if (list_empty(&cifsi->openFileList)) {
 		cFYI(1, "closing last open instance for inode %p",
 			cifs_file->dentry->d_inode);
+
+		/* in strict cache mode we need invalidate mapping on the last
+		   close  because it may cause a error when we open this file
+		   again and get at least level II oplock */
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+			CIFS_I(inode)->invalid_mapping = true;
+
 		cifs_set_oplock_level(cifsi, 0);
 	}
 	spin_unlock(&cifs_file_list_lock);
@@ -338,7 +346,6 @@
 	struct cifsTconInfo *tcon;
 	struct tcon_link *tlink;
 	struct cifsFileInfo *pCifsFile = NULL;
-	struct cifsInodeInfo *pCifsInode;
 	char *full_path = NULL;
 	bool posix_open_ok = false;
 	__u16 netfid;
@@ -353,8 +360,6 @@
 	}
 	tcon = tlink_tcon(tlink);
 
-	pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
-
 	full_path = build_path_from_dentry(file->f_path.dentry);
 	if (full_path == NULL) {
 		rc = -ENOMEM;
@@ -726,12 +731,12 @@
 
 		/* BB we could chain these into one lock request BB */
 		rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
-				 0, 1, lockType, 0 /* wait flag */ );
+				 0, 1, lockType, 0 /* wait flag */, 0);
 		if (rc == 0) {
 			rc = CIFSSMBLock(xid, tcon, netfid, length,
 					 pfLock->fl_start, 1 /* numUnlock */ ,
 					 0 /* numLock */ , lockType,
-					 0 /* wait flag */ );
+					 0 /* wait flag */, 0);
 			pfLock->fl_type = F_UNLCK;
 			if (rc != 0)
 				cERROR(1, "Error unlocking previously locked "
@@ -748,13 +753,13 @@
 				rc = CIFSSMBLock(xid, tcon, netfid, length,
 					pfLock->fl_start, 0, 1,
 					lockType | LOCKING_ANDX_SHARED_LOCK,
-					0 /* wait flag */);
+					0 /* wait flag */, 0);
 				if (rc == 0) {
 					rc = CIFSSMBLock(xid, tcon, netfid,
 						length, pfLock->fl_start, 1, 0,
 						lockType |
 						LOCKING_ANDX_SHARED_LOCK,
-						0 /* wait flag */);
+						0 /* wait flag */, 0);
 					pfLock->fl_type = F_RDLCK;
 					if (rc != 0)
 						cERROR(1, "Error unlocking "
@@ -797,8 +802,8 @@
 
 		if (numLock) {
 			rc = CIFSSMBLock(xid, tcon, netfid, length,
-					pfLock->fl_start,
-					0, numLock, lockType, wait_flag);
+					 pfLock->fl_start, 0, numLock, lockType,
+					 wait_flag, 0);
 
 			if (rc == 0) {
 				/* For Windows locks we must store them. */
@@ -818,9 +823,9 @@
 						(pfLock->fl_start + length) >=
 						(li->offset + li->length)) {
 					stored_rc = CIFSSMBLock(xid, tcon,
-							netfid,
-							li->length, li->offset,
-							1, 0, li->type, false);
+							netfid, li->length,
+							li->offset, 1, 0,
+							li->type, false, 0);
 					if (stored_rc)
 						rc = stored_rc;
 					else {
@@ -839,31 +844,8 @@
 	return rc;
 }
 
-/*
- * Set the timeout on write requests past EOF. For some servers (Windows)
- * these calls can be very long.
- *
- * If we're writing >10M past the EOF we give a 180s timeout. Anything less
- * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
- * The 10M cutoff is totally arbitrary. A better scheme for this would be
- * welcome if someone wants to suggest one.
- *
- * We may be able to do a better job with this if there were some way to
- * declare that a file should be sparse.
- */
-static int
-cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
-{
-	if (offset <= cifsi->server_eof)
-		return CIFS_STD_OP;
-	else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
-		return CIFS_VLONG_OP;
-	else
-		return CIFS_LONG_OP;
-}
-
 /* update the file size (if needed) after a write */
-static void
+void
 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
 		      unsigned int bytes_written)
 {
@@ -882,7 +864,7 @@
 	unsigned int total_written;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	int xid, long_op;
+	int xid;
 	struct cifsFileInfo *open_file;
 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
@@ -903,7 +885,6 @@
 
 	xid = GetXid();
 
-	long_op = cifs_write_timeout(cifsi, *poffset);
 	for (total_written = 0; write_size > total_written;
 	     total_written += bytes_written) {
 		rc = -EAGAIN;
@@ -931,7 +912,7 @@
 				min_t(const int, cifs_sb->wsize,
 				      write_size - total_written),
 				*poffset, &bytes_written,
-				NULL, write_data + total_written, long_op);
+				NULL, write_data + total_written, 0);
 		}
 		if (rc || (bytes_written == 0)) {
 			if (total_written)
@@ -944,8 +925,6 @@
 			cifs_update_eof(cifsi, *poffset, bytes_written);
 			*poffset += bytes_written;
 		}
-		long_op = CIFS_STD_OP; /* subsequent writes fast -
-				    15 seconds is plenty */
 	}
 
 	cifs_stats_bytes_written(pTcon, total_written);
@@ -974,7 +953,7 @@
 	unsigned int total_written;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	int xid, long_op;
+	int xid;
 	struct dentry *dentry = open_file->dentry;
 	struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
 
@@ -987,7 +966,6 @@
 
 	xid = GetXid();
 
-	long_op = cifs_write_timeout(cifsi, *poffset);
 	for (total_written = 0; write_size > total_written;
 	     total_written += bytes_written) {
 		rc = -EAGAIN;
@@ -1017,7 +995,7 @@
 				rc = CIFSSMBWrite2(xid, pTcon,
 						open_file->netfid, len,
 						*poffset, &bytes_written,
-						iov, 1, long_op);
+						iov, 1, 0);
 			} else
 				rc = CIFSSMBWrite(xid, pTcon,
 					 open_file->netfid,
@@ -1025,7 +1003,7 @@
 					       write_size - total_written),
 					 *poffset, &bytes_written,
 					 write_data + total_written,
-					 NULL, long_op);
+					 NULL, 0);
 		}
 		if (rc || (bytes_written == 0)) {
 			if (total_written)
@@ -1038,8 +1016,6 @@
 			cifs_update_eof(cifsi, *poffset, bytes_written);
 			*poffset += bytes_written;
 		}
-		long_op = CIFS_STD_OP; /* subsequent writes fast -
-				    15 seconds is plenty */
 	}
 
 	cifs_stats_bytes_written(pTcon, total_written);
@@ -1167,7 +1143,6 @@
 	char *write_data;
 	int rc = -EFAULT;
 	int bytes_written = 0;
-	struct cifs_sb_info *cifs_sb;
 	struct inode *inode;
 	struct cifsFileInfo *open_file;
 
@@ -1175,7 +1150,6 @@
 		return -EFAULT;
 
 	inode = page->mapping->host;
-	cifs_sb = CIFS_SB(inode->i_sb);
 
 	offset += (loff_t)from;
 	write_data = kmap(page);
@@ -1239,7 +1213,7 @@
 	struct pagevec pvec;
 	int rc = 0;
 	int scanned = 0;
-	int xid, long_op;
+	int xid;
 
 	cifs_sb = CIFS_SB(mapping->host->i_sb);
 
@@ -1377,43 +1351,67 @@
 				break;
 		}
 		if (n_iov) {
+retry_write:
 			open_file = find_writable_file(CIFS_I(mapping->host),
 							false);
 			if (!open_file) {
 				cERROR(1, "No writable handles for inode");
 				rc = -EBADF;
 			} else {
-				long_op = cifs_write_timeout(cifsi, offset);
 				rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
 						   bytes_to_write, offset,
 						   &bytes_written, iov, n_iov,
-						   long_op);
+						   0);
 				cifsFileInfo_put(open_file);
-				cifs_update_eof(cifsi, offset, bytes_written);
 			}
 
-			if (rc || bytes_written < bytes_to_write) {
-				cERROR(1, "Write2 ret %d, wrote %d",
-					  rc, bytes_written);
-				mapping_set_error(mapping, rc);
-			} else {
+			cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
+
+			/*
+			 * For now, treat a short write as if nothing got
+			 * written. A zero length write however indicates
+			 * ENOSPC or EFBIG. We have no way to know which
+			 * though, so call it ENOSPC for now. EFBIG would
+			 * get translated to AS_EIO anyway.
+			 *
+			 * FIXME: make it take into account the data that did
+			 *	  get written
+			 */
+			if (rc == 0) {
+				if (bytes_written == 0)
+					rc = -ENOSPC;
+				else if (bytes_written < bytes_to_write)
+					rc = -EAGAIN;
+			}
+
+			/* retry on data-integrity flush */
+			if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
+				goto retry_write;
+
+			/* fix the stats and EOF */
+			if (bytes_written > 0) {
 				cifs_stats_bytes_written(tcon, bytes_written);
+				cifs_update_eof(cifsi, offset, bytes_written);
 			}
 
 			for (i = 0; i < n_iov; i++) {
 				page = pvec.pages[first + i];
-				/* Should we also set page error on
-				success rc but too little data written? */
-				/* BB investigate retry logic on temporary
-				server crash cases and how recovery works
-				when page marked as error */
-				if (rc)
+				/* on retryable write error, redirty page */
+				if (rc == -EAGAIN)
+					redirty_page_for_writepage(wbc, page);
+				else if (rc != 0)
 					SetPageError(page);
 				kunmap(page);
 				unlock_page(page);
 				end_page_writeback(page);
 				page_cache_release(page);
 			}
+
+			if (rc != -EAGAIN)
+				mapping_set_error(mapping, rc);
+			else
+				rc = 0;
+
 			if ((wbc->nr_to_write -= n_iov) <= 0)
 				done = 1;
 			index = next;
@@ -1525,27 +1523,47 @@
 	return rc;
 }
 
-int cifs_fsync(struct file *file, int datasync)
+int cifs_strict_fsync(struct file *file, int datasync)
 {
 	int xid;
 	int rc = 0;
 	struct cifsTconInfo *tcon;
 	struct cifsFileInfo *smbfile = file->private_data;
 	struct inode *inode = file->f_path.dentry->d_inode;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 
 	xid = GetXid();
 
 	cFYI(1, "Sync file - name: %s datasync: 0x%x",
 		file->f_path.dentry->d_name.name, datasync);
 
-	rc = filemap_write_and_wait(inode->i_mapping);
-	if (rc == 0) {
-		struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+	if (!CIFS_I(inode)->clientCanCacheRead)
+		cifs_invalidate_mapping(inode);
 
-		tcon = tlink_tcon(smbfile->tlink);
-		if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
-			rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
-	}
+	tcon = tlink_tcon(smbfile->tlink);
+	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
+		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
+
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_fsync(struct file *file, int datasync)
+{
+	int xid;
+	int rc = 0;
+	struct cifsTconInfo *tcon;
+	struct cifsFileInfo *smbfile = file->private_data;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+
+	xid = GetXid();
+
+	cFYI(1, "Sync file - name: %s datasync: 0x%x",
+		file->f_path.dentry->d_name.name, datasync);
+
+	tcon = tlink_tcon(smbfile->tlink);
+	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
+		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
 
 	FreeXid(xid);
 	return rc;
@@ -1596,42 +1614,244 @@
 	return rc;
 }
 
-ssize_t cifs_user_read(struct file *file, char __user *read_data,
-	size_t read_size, loff_t *poffset)
+static int
+cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
 {
-	int rc = -EACCES;
+	int rc = 0;
+	unsigned long i;
+
+	for (i = 0; i < num_pages; i++) {
+		pages[i] = alloc_page(__GFP_HIGHMEM);
+		if (!pages[i]) {
+			/*
+			 * save number of pages we have already allocated and
+			 * return with ENOMEM error
+			 */
+			num_pages = i;
+			rc = -ENOMEM;
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	for (i = 0; i < num_pages; i++)
+		put_page(pages[i]);
+	return rc;
+}
+
+static inline
+size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
+{
+	size_t num_pages;
+	size_t clen;
+
+	clen = min_t(const size_t, len, wsize);
+	num_pages = clen / PAGE_CACHE_SIZE;
+	if (clen % PAGE_CACHE_SIZE)
+		num_pages++;
+
+	if (cur_len)
+		*cur_len = clen;
+
+	return num_pages;
+}
+
+static ssize_t
+cifs_iovec_write(struct file *file, const struct iovec *iov,
+		 unsigned long nr_segs, loff_t *poffset)
+{
+	unsigned int written;
+	unsigned long num_pages, npages, i;
+	size_t copied, len, cur_len;
+	ssize_t total_written = 0;
+	struct kvec *to_send;
+	struct page **pages;
+	struct iov_iter it;
+	struct inode *inode;
+	struct cifsFileInfo *open_file;
+	struct cifsTconInfo *pTcon;
+	struct cifs_sb_info *cifs_sb;
+	int xid, rc;
+
+	len = iov_length(iov, nr_segs);
+	if (!len)
+		return 0;
+
+	rc = generic_write_checks(file, poffset, &len, 0);
+	if (rc)
+		return rc;
+
+	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+	num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
+
+	pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
+	if (!to_send) {
+		kfree(pages);
+		return -ENOMEM;
+	}
+
+	rc = cifs_write_allocate_pages(pages, num_pages);
+	if (rc) {
+		kfree(pages);
+		kfree(to_send);
+		return rc;
+	}
+
+	xid = GetXid();
+	open_file = file->private_data;
+	pTcon = tlink_tcon(open_file->tlink);
+	inode = file->f_path.dentry->d_inode;
+
+	iov_iter_init(&it, iov, nr_segs, len, 0);
+	npages = num_pages;
+
+	do {
+		size_t save_len = cur_len;
+		for (i = 0; i < npages; i++) {
+			copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
+			copied = iov_iter_copy_from_user(pages[i], &it, 0,
+							 copied);
+			cur_len -= copied;
+			iov_iter_advance(&it, copied);
+			to_send[i+1].iov_base = kmap(pages[i]);
+			to_send[i+1].iov_len = copied;
+		}
+
+		cur_len = save_len - cur_len;
+
+		do {
+			if (open_file->invalidHandle) {
+				rc = cifs_reopen_file(open_file, false);
+				if (rc != 0)
+					break;
+			}
+			rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
+					   cur_len, *poffset, &written,
+					   to_send, npages, 0);
+		} while (rc == -EAGAIN);
+
+		for (i = 0; i < npages; i++)
+			kunmap(pages[i]);
+
+		if (written) {
+			len -= written;
+			total_written += written;
+			cifs_update_eof(CIFS_I(inode), *poffset, written);
+			*poffset += written;
+		} else if (rc < 0) {
+			if (!total_written)
+				total_written = rc;
+			break;
+		}
+
+		/* get length and number of kvecs of the next write */
+		npages = get_numpages(cifs_sb->wsize, len, &cur_len);
+	} while (len > 0);
+
+	if (total_written > 0) {
+		spin_lock(&inode->i_lock);
+		if (*poffset > inode->i_size)
+			i_size_write(inode, *poffset);
+		spin_unlock(&inode->i_lock);
+	}
+
+	cifs_stats_bytes_written(pTcon, total_written);
+	mark_inode_dirty_sync(inode);
+
+	for (i = 0; i < num_pages; i++)
+		put_page(pages[i]);
+	kfree(to_send);
+	kfree(pages);
+	FreeXid(xid);
+	return total_written;
+}
+
+static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
+				unsigned long nr_segs, loff_t pos)
+{
+	ssize_t written;
+	struct inode *inode;
+
+	inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+	/*
+	 * BB - optimize the way when signing is disabled. We can drop this
+	 * extra memory-to-memory copying and use iovec buffers for constructing
+	 * write request.
+	 */
+
+	written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
+	if (written > 0) {
+		CIFS_I(inode)->invalid_mapping = true;
+		iocb->ki_pos = pos;
+	}
+
+	return written;
+}
+
+ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
+			   unsigned long nr_segs, loff_t pos)
+{
+	struct inode *inode;
+
+	inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+	if (CIFS_I(inode)->clientCanCacheAll)
+		return generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+	/*
+	 * In strict cache mode we need to write the data to the server exactly
+	 * from the pos to pos+len-1 rather than flush all affected pages
+	 * because it may cause a error with mandatory locks on these pages but
+	 * not on the region from pos to ppos+len-1.
+	 */
+
+	return cifs_user_writev(iocb, iov, nr_segs, pos);
+}
+
+static ssize_t
+cifs_iovec_read(struct file *file, const struct iovec *iov,
+		 unsigned long nr_segs, loff_t *poffset)
+{
+	int rc;
+	int xid;
+	ssize_t total_read;
 	unsigned int bytes_read = 0;
-	unsigned int total_read = 0;
-	unsigned int current_read_size;
+	size_t len, cur_len;
+	int iov_offset = 0;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	int xid;
 	struct cifsFileInfo *open_file;
-	char *smb_read_data;
-	char __user *current_offset;
 	struct smb_com_read_rsp *pSMBr;
+	char *read_data;
+
+	if (!nr_segs)
+		return 0;
+
+	len = iov_length(iov, nr_segs);
+	if (!len)
+		return 0;
 
 	xid = GetXid();
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 
-	if (file->private_data == NULL) {
-		rc = -EBADF;
-		FreeXid(xid);
-		return rc;
-	}
 	open_file = file->private_data;
 	pTcon = tlink_tcon(open_file->tlink);
 
 	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
 		cFYI(1, "attempting read on write only file instance");
 
-	for (total_read = 0, current_offset = read_data;
-	     read_size > total_read;
-	     total_read += bytes_read, current_offset += bytes_read) {
-		current_read_size = min_t(const int, read_size - total_read,
-					  cifs_sb->rsize);
+	for (total_read = 0; total_read < len; total_read += bytes_read) {
+		cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
 		rc = -EAGAIN;
-		smb_read_data = NULL;
+		read_data = NULL;
+
 		while (rc == -EAGAIN) {
 			int buf_type = CIFS_NO_BUFFER;
 			if (open_file->invalidHandle) {
@@ -1639,27 +1859,25 @@
 				if (rc != 0)
 					break;
 			}
-			rc = CIFSSMBRead(xid, pTcon,
-					 open_file->netfid,
-					 current_read_size, *poffset,
-					 &bytes_read, &smb_read_data,
-					 &buf_type);
-			pSMBr = (struct smb_com_read_rsp *)smb_read_data;
-			if (smb_read_data) {
-				if (copy_to_user(current_offset,
-						smb_read_data +
-						4 /* RFC1001 length field */ +
-						le16_to_cpu(pSMBr->DataOffset),
-						bytes_read))
+			rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
+					 cur_len, *poffset, &bytes_read,
+					 &read_data, &buf_type);
+			pSMBr = (struct smb_com_read_rsp *)read_data;
+			if (read_data) {
+				char *data_offset = read_data + 4 +
+						le16_to_cpu(pSMBr->DataOffset);
+				if (memcpy_toiovecend(iov, data_offset,
+						      iov_offset, bytes_read))
 					rc = -EFAULT;
-
 				if (buf_type == CIFS_SMALL_BUFFER)
-					cifs_small_buf_release(smb_read_data);
+					cifs_small_buf_release(read_data);
 				else if (buf_type == CIFS_LARGE_BUFFER)
-					cifs_buf_release(smb_read_data);
-				smb_read_data = NULL;
+					cifs_buf_release(read_data);
+				read_data = NULL;
+				iov_offset += bytes_read;
 			}
 		}
+
 		if (rc || (bytes_read == 0)) {
 			if (total_read) {
 				break;
@@ -1672,13 +1890,57 @@
 			*poffset += bytes_read;
 		}
 	}
+
 	FreeXid(xid);
 	return total_read;
 }
 
+ssize_t cifs_user_read(struct file *file, char __user *read_data,
+		       size_t read_size, loff_t *poffset)
+{
+	struct iovec iov;
+	iov.iov_base = read_data;
+	iov.iov_len = read_size;
+
+	return cifs_iovec_read(file, &iov, 1, poffset);
+}
+
+static ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
+			       unsigned long nr_segs, loff_t pos)
+{
+	ssize_t read;
+
+	read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
+	if (read > 0)
+		iocb->ki_pos = pos;
+
+	return read;
+}
+
+ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
+			  unsigned long nr_segs, loff_t pos)
+{
+	struct inode *inode;
+
+	inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+	if (CIFS_I(inode)->clientCanCacheRead)
+		return generic_file_aio_read(iocb, iov, nr_segs, pos);
+
+	/*
+	 * In strict cache mode we need to read from the server all the time
+	 * if we don't have level II oplock because the server can delay mtime
+	 * change - so we can't make a decision about inode invalidating.
+	 * And we can also fail with pagereading if there are mandatory locks
+	 * on pages affected by this read but not on the region from pos to
+	 * pos+len-1.
+	 */
+
+	return cifs_user_readv(iocb, iov, nr_segs, pos);
+}
 
 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
-	loff_t *poffset)
+			 loff_t *poffset)
 {
 	int rc = -EACCES;
 	unsigned int bytes_read = 0;
@@ -1746,6 +2008,21 @@
 	return total_read;
 }
 
+int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int rc, xid;
+	struct inode *inode = file->f_path.dentry->d_inode;
+
+	xid = GetXid();
+
+	if (!CIFS_I(inode)->clientCanCacheRead)
+		cifs_invalidate_mapping(inode);
+
+	rc = generic_file_mmap(file, vma);
+	FreeXid(xid);
+	return rc;
+}
+
 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	int rc, xid;
@@ -2192,7 +2469,8 @@
 	 */
 	if (!cfile->oplock_break_cancelled) {
 		rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
-				 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
+				 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
+				 cinode->clientCanCacheRead ? 1 : 0);
 		cFYI(1, "Oplock release rc = %d", rc);
 	}
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 0c7e369..8852470 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -32,7 +32,7 @@
 #include "fscache.h"
 
 
-static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
+static void cifs_set_ops(struct inode *inode)
 {
 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 
@@ -44,13 +44,17 @@
 				inode->i_fop = &cifs_file_direct_nobrl_ops;
 			else
 				inode->i_fop = &cifs_file_direct_ops;
+		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+				inode->i_fop = &cifs_file_strict_nobrl_ops;
+			else
+				inode->i_fop = &cifs_file_strict_ops;
 		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
 			inode->i_fop = &cifs_file_nobrl_ops;
 		else { /* not direct, send byte range locks */
 			inode->i_fop = &cifs_file_ops;
 		}
 
-
 		/* check if server can support readpages */
 		if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
 				PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
@@ -60,7 +64,7 @@
 		break;
 	case S_IFDIR:
 #ifdef CONFIG_CIFS_DFS_UPCALL
-		if (is_dfs_referral) {
+		if (IS_AUTOMOUNT(inode)) {
 			inode->i_op = &cifs_dfs_referral_inode_operations;
 		} else {
 #else /* NO DFS support, treat as a directory */
@@ -167,7 +171,9 @@
 	}
 	spin_unlock(&inode->i_lock);
 
-	cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL);
+	if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
+		inode->i_flags |= S_AUTOMOUNT;
+	cifs_set_ops(inode);
 }
 
 void
@@ -1324,10 +1330,6 @@
 /*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
 	to set uid/gid */
 			inc_nlink(inode);
-			if (pTcon->nocase)
-				d_set_d_op(direntry, &cifs_ci_dentry_ops);
-			else
-				d_set_d_op(direntry, &cifs_dentry_ops);
 
 			cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
 			cifs_fill_uniqueid(inode->i_sb, &fattr);
@@ -1368,10 +1370,6 @@
 			rc = cifs_get_inode_info(&newinode, full_path, NULL,
 						 inode->i_sb, xid, NULL);
 
-		if (pTcon->nocase)
-			d_set_d_op(direntry, &cifs_ci_dentry_ops);
-		else
-			d_set_d_op(direntry, &cifs_dentry_ops);
 		d_instantiate(direntry, newinode);
 		 /* setting nlink not necessary except in cases where we
 		  * failed to get it from the server or was set bogus */
@@ -1685,7 +1683,7 @@
 /*
  * Zap the cache. Called when invalid_mapping flag is set.
  */
-static void
+void
 cifs_invalidate_mapping(struct inode *inode)
 {
 	int rc;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index fe2f6a9..e8804d3 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -28,7 +28,6 @@
 #include "cifsproto.h"
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
-#include "md5.h"
 
 #define CIFS_MF_SYMLINK_LEN_OFFSET (4+1)
 #define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1))
@@ -47,6 +46,45 @@
 	md5_hash[12], md5_hash[13], md5_hash[14], md5_hash[15]
 
 static int
+symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
+{
+	int rc;
+	unsigned int size;
+	struct crypto_shash *md5;
+	struct sdesc *sdescmd5;
+
+	md5 = crypto_alloc_shash("md5", 0, 0);
+	if (IS_ERR(md5)) {
+		rc = PTR_ERR(md5);
+		cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc);
+		return rc;
+	}
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
+	sdescmd5 = kmalloc(size, GFP_KERNEL);
+	if (!sdescmd5) {
+		rc = -ENOMEM;
+		cERROR(1, "%s: Memory allocation failure\n", __func__);
+		goto symlink_hash_err;
+	}
+	sdescmd5->shash.tfm = md5;
+	sdescmd5->shash.flags = 0x0;
+
+	rc = crypto_shash_init(&sdescmd5->shash);
+	if (rc) {
+		cERROR(1, "%s: Could not init md5 shash\n", __func__);
+		goto symlink_hash_err;
+	}
+	crypto_shash_update(&sdescmd5->shash, link_str, link_len);
+	rc = crypto_shash_final(&sdescmd5->shash, md5_hash);
+
+symlink_hash_err:
+	crypto_free_shash(md5);
+	kfree(sdescmd5);
+
+	return rc;
+}
+
+static int
 CIFSParseMFSymlink(const u8 *buf,
 		   unsigned int buf_len,
 		   unsigned int *_link_len,
@@ -56,7 +94,6 @@
 	unsigned int link_len;
 	const char *md5_str1;
 	const char *link_str;
-	struct MD5Context md5_ctx;
 	u8 md5_hash[16];
 	char md5_str2[34];
 
@@ -70,9 +107,11 @@
 	if (rc != 1)
 		return -EINVAL;
 
-	cifs_MD5_init(&md5_ctx);
-	cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
-	cifs_MD5_final(md5_hash, &md5_ctx);
+	rc = symlink_hash(link_len, link_str, md5_hash);
+	if (rc) {
+		cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc);
+		return rc;
+	}
 
 	snprintf(md5_str2, sizeof(md5_str2),
 		 CIFS_MF_SYMLINK_MD5_FORMAT,
@@ -94,9 +133,9 @@
 static int
 CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
 {
+	int rc;
 	unsigned int link_len;
 	unsigned int ofs;
-	struct MD5Context md5_ctx;
 	u8 md5_hash[16];
 
 	if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
@@ -107,9 +146,11 @@
 	if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
 		return -ENAMETOOLONG;
 
-	cifs_MD5_init(&md5_ctx);
-	cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
-	cifs_MD5_final(md5_hash, &md5_ctx);
+	rc = symlink_hash(link_len, link_str, md5_hash);
+	if (rc) {
+		cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc);
+		return rc;
+	}
 
 	snprintf(buf, buf_len,
 		 CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
@@ -524,10 +565,6 @@
 			cFYI(1, "Create symlink ok, getinodeinfo fail rc = %d",
 			      rc);
 		} else {
-			if (pTcon->nocase)
-				d_set_d_op(direntry, &cifs_ci_dentry_ops);
-			else
-				d_set_d_op(direntry, &cifs_dentry_ops);
 			d_instantiate(direntry, newinode);
 		}
 	}
diff --git a/fs/cifs/md4.c b/fs/cifs/md4.c
deleted file mode 100644
index a725c26..0000000
--- a/fs/cifs/md4.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
-   Unix SMB/Netbios implementation.
-   Version 1.9.
-   a implementation of MD4 designed for use in the SMB authentication protocol
-   Copyright (C) Andrew Tridgell 1997-1998.
-   Modified by Steve French (sfrench@us.ibm.com) 2002-2003
-
-   This program is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2 of the License, or
-   (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-#include <linux/module.h>
-#include <linux/fs.h>
-#include "cifsencrypt.h"
-
-/* NOTE: This code makes no attempt to be fast! */
-
-static __u32
-F(__u32 X, __u32 Y, __u32 Z)
-{
-	return (X & Y) | ((~X) & Z);
-}
-
-static __u32
-G(__u32 X, __u32 Y, __u32 Z)
-{
-	return (X & Y) | (X & Z) | (Y & Z);
-}
-
-static __u32
-H(__u32 X, __u32 Y, __u32 Z)
-{
-	return X ^ Y ^ Z;
-}
-
-static __u32
-lshift(__u32 x, int s)
-{
-	x &= 0xFFFFFFFF;
-	return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s));
-}
-
-#define ROUND1(a,b,c,d,k,s) (*a) = lshift((*a) + F(*b,*c,*d) + X[k], s)
-#define ROUND2(a,b,c,d,k,s) (*a) = lshift((*a) + G(*b,*c,*d) + X[k] + (__u32)0x5A827999,s)
-#define ROUND3(a,b,c,d,k,s) (*a) = lshift((*a) + H(*b,*c,*d) + X[k] + (__u32)0x6ED9EBA1,s)
-
-/* this applies md4 to 64 byte chunks */
-static void
-mdfour64(__u32 *M, __u32 *A, __u32 *B, __u32 *C, __u32 *D)
-{
-	int j;
-	__u32 AA, BB, CC, DD;
-	__u32 X[16];
-
-
-	for (j = 0; j < 16; j++)
-		X[j] = M[j];
-
-	AA = *A;
-	BB = *B;
-	CC = *C;
-	DD = *D;
-
-	ROUND1(A, B, C, D, 0, 3);
-	ROUND1(D, A, B, C, 1, 7);
-	ROUND1(C, D, A, B, 2, 11);
-	ROUND1(B, C, D, A, 3, 19);
-	ROUND1(A, B, C, D, 4, 3);
-	ROUND1(D, A, B, C, 5, 7);
-	ROUND1(C, D, A, B, 6, 11);
-	ROUND1(B, C, D, A, 7, 19);
-	ROUND1(A, B, C, D, 8, 3);
-	ROUND1(D, A, B, C, 9, 7);
-	ROUND1(C, D, A, B, 10, 11);
-	ROUND1(B, C, D, A, 11, 19);
-	ROUND1(A, B, C, D, 12, 3);
-	ROUND1(D, A, B, C, 13, 7);
-	ROUND1(C, D, A, B, 14, 11);
-	ROUND1(B, C, D, A, 15, 19);
-
-	ROUND2(A, B, C, D, 0, 3);
-	ROUND2(D, A, B, C, 4, 5);
-	ROUND2(C, D, A, B, 8, 9);
-	ROUND2(B, C, D, A, 12, 13);
-	ROUND2(A, B, C, D, 1, 3);
-	ROUND2(D, A, B, C, 5, 5);
-	ROUND2(C, D, A, B, 9, 9);
-	ROUND2(B, C, D, A, 13, 13);
-	ROUND2(A, B, C, D, 2, 3);
-	ROUND2(D, A, B, C, 6, 5);
-	ROUND2(C, D, A, B, 10, 9);
-	ROUND2(B, C, D, A, 14, 13);
-	ROUND2(A, B, C, D, 3, 3);
-	ROUND2(D, A, B, C, 7, 5);
-	ROUND2(C, D, A, B, 11, 9);
-	ROUND2(B, C, D, A, 15, 13);
-
-	ROUND3(A, B, C, D, 0, 3);
-	ROUND3(D, A, B, C, 8, 9);
-	ROUND3(C, D, A, B, 4, 11);
-	ROUND3(B, C, D, A, 12, 15);
-	ROUND3(A, B, C, D, 2, 3);
-	ROUND3(D, A, B, C, 10, 9);
-	ROUND3(C, D, A, B, 6, 11);
-	ROUND3(B, C, D, A, 14, 15);
-	ROUND3(A, B, C, D, 1, 3);
-	ROUND3(D, A, B, C, 9, 9);
-	ROUND3(C, D, A, B, 5, 11);
-	ROUND3(B, C, D, A, 13, 15);
-	ROUND3(A, B, C, D, 3, 3);
-	ROUND3(D, A, B, C, 11, 9);
-	ROUND3(C, D, A, B, 7, 11);
-	ROUND3(B, C, D, A, 15, 15);
-
-	*A += AA;
-	*B += BB;
-	*C += CC;
-	*D += DD;
-
-	*A &= 0xFFFFFFFF;
-	*B &= 0xFFFFFFFF;
-	*C &= 0xFFFFFFFF;
-	*D &= 0xFFFFFFFF;
-
-	for (j = 0; j < 16; j++)
-		X[j] = 0;
-}
-
-static void
-copy64(__u32 *M, unsigned char *in)
-{
-	int i;
-
-	for (i = 0; i < 16; i++)
-		M[i] = (in[i * 4 + 3] << 24) | (in[i * 4 + 2] << 16) |
-		    (in[i * 4 + 1] << 8) | (in[i * 4 + 0] << 0);
-}
-
-static void
-copy4(unsigned char *out, __u32 x)
-{
-	out[0] = x & 0xFF;
-	out[1] = (x >> 8) & 0xFF;
-	out[2] = (x >> 16) & 0xFF;
-	out[3] = (x >> 24) & 0xFF;
-}
-
-/* produce a md4 message digest from data of length n bytes */
-void
-mdfour(unsigned char *out, unsigned char *in, int n)
-{
-	unsigned char buf[128];
-	__u32 M[16];
-	__u32 b = n * 8;
-	int i;
-	__u32 A = 0x67452301;
-	__u32 B = 0xefcdab89;
-	__u32 C = 0x98badcfe;
-	__u32 D = 0x10325476;
-
-	while (n > 64) {
-		copy64(M, in);
-		mdfour64(M, &A, &B, &C, &D);
-		in += 64;
-		n -= 64;
-	}
-
-	for (i = 0; i < 128; i++)
-		buf[i] = 0;
-	memcpy(buf, in, n);
-	buf[n] = 0x80;
-
-	if (n <= 55) {
-		copy4(buf + 56, b);
-		copy64(M, buf);
-		mdfour64(M, &A, &B, &C, &D);
-	} else {
-		copy4(buf + 120, b);
-		copy64(M, buf);
-		mdfour64(M, &A, &B, &C, &D);
-		copy64(M, buf + 64);
-		mdfour64(M, &A, &B, &C, &D);
-	}
-
-	for (i = 0; i < 128; i++)
-		buf[i] = 0;
-	copy64(M, buf);
-
-	copy4(out, A);
-	copy4(out + 4, B);
-	copy4(out + 8, C);
-	copy4(out + 12, D);
-
-	A = B = C = D = 0;
-}
diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c
deleted file mode 100644
index 98b66a5..0000000
--- a/fs/cifs/md5.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * This code implements the MD5 message-digest algorithm.
- * The algorithm is due to Ron Rivest.  This code was
- * written by Colin Plumb in 1993, no copyright is claimed.
- * This code is in the public domain; do with it what you wish.
- *
- * Equivalent code is available from RSA Data Security, Inc.
- * This code has been tested against that, and is equivalent,
- * except that you don't need to include two pages of legalese
- * with every copy.
- *
- * To compute the message digest of a chunk of bytes, declare an
- * MD5Context structure, pass it to cifs_MD5_init, call cifs_MD5_update as
- * needed on buffers full of bytes, and then call cifs_MD5_final, which
- * will fill a supplied 16-byte array with the digest.
- */
-
-/* This code slightly modified to fit into Samba by
-   abartlet@samba.org Jun 2001
-   and to fit the cifs vfs by
-   Steve French sfrench@us.ibm.com */
-
-#include <linux/string.h>
-#include "md5.h"
-
-static void MD5Transform(__u32 buf[4], __u32 const in[16]);
-
-/*
- * Note: this code is harmless on little-endian machines.
- */
-static void
-byteReverse(unsigned char *buf, unsigned longs)
-{
-	__u32 t;
-	do {
-		t = (__u32) ((unsigned) buf[3] << 8 | buf[2]) << 16 |
-		    ((unsigned) buf[1] << 8 | buf[0]);
-		*(__u32 *) buf = t;
-		buf += 4;
-	} while (--longs);
-}
-
-/*
- * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
- * initialization constants.
- */
-void
-cifs_MD5_init(struct MD5Context *ctx)
-{
-	ctx->buf[0] = 0x67452301;
-	ctx->buf[1] = 0xefcdab89;
-	ctx->buf[2] = 0x98badcfe;
-	ctx->buf[3] = 0x10325476;
-
-	ctx->bits[0] = 0;
-	ctx->bits[1] = 0;
-}
-
-/*
- * Update context to reflect the concatenation of another buffer full
- * of bytes.
- */
-void
-cifs_MD5_update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
-{
-	register __u32 t;
-
-	/* Update bitcount */
-
-	t = ctx->bits[0];
-	if ((ctx->bits[0] = t + ((__u32) len << 3)) < t)
-		ctx->bits[1]++;	/* Carry from low to high */
-	ctx->bits[1] += len >> 29;
-
-	t = (t >> 3) & 0x3f;	/* Bytes already in shsInfo->data */
-
-	/* Handle any leading odd-sized chunks */
-
-	if (t) {
-		unsigned char *p = (unsigned char *) ctx->in + t;
-
-		t = 64 - t;
-		if (len < t) {
-			memmove(p, buf, len);
-			return;
-		}
-		memmove(p, buf, t);
-		byteReverse(ctx->in, 16);
-		MD5Transform(ctx->buf, (__u32 *) ctx->in);
-		buf += t;
-		len -= t;
-	}
-	/* Process data in 64-byte chunks */
-
-	while (len >= 64) {
-		memmove(ctx->in, buf, 64);
-		byteReverse(ctx->in, 16);
-		MD5Transform(ctx->buf, (__u32 *) ctx->in);
-		buf += 64;
-		len -= 64;
-	}
-
-	/* Handle any remaining bytes of data. */
-
-	memmove(ctx->in, buf, len);
-}
-
-/*
- * Final wrapup - pad to 64-byte boundary with the bit pattern
- * 1 0* (64-bit count of bits processed, MSB-first)
- */
-void
-cifs_MD5_final(unsigned char digest[16], struct MD5Context *ctx)
-{
-	unsigned int count;
-	unsigned char *p;
-
-	/* Compute number of bytes mod 64 */
-	count = (ctx->bits[0] >> 3) & 0x3F;
-
-	/* Set the first char of padding to 0x80.  This is safe since there is
-	   always at least one byte free */
-	p = ctx->in + count;
-	*p++ = 0x80;
-
-	/* Bytes of padding needed to make 64 bytes */
-	count = 64 - 1 - count;
-
-	/* Pad out to 56 mod 64 */
-	if (count < 8) {
-		/* Two lots of padding:  Pad the first block to 64 bytes */
-		memset(p, 0, count);
-		byteReverse(ctx->in, 16);
-		MD5Transform(ctx->buf, (__u32 *) ctx->in);
-
-		/* Now fill the next block with 56 bytes */
-		memset(ctx->in, 0, 56);
-	} else {
-		/* Pad block to 56 bytes */
-		memset(p, 0, count - 8);
-	}
-	byteReverse(ctx->in, 14);
-
-	/* Append length in bits and transform */
-	((__u32 *) ctx->in)[14] = ctx->bits[0];
-	((__u32 *) ctx->in)[15] = ctx->bits[1];
-
-	MD5Transform(ctx->buf, (__u32 *) ctx->in);
-	byteReverse((unsigned char *) ctx->buf, 4);
-	memmove(digest, ctx->buf, 16);
-	memset(ctx, 0, sizeof(*ctx));	/* In case it's sensitive */
-}
-
-/* The four core functions - F1 is optimized somewhat */
-
-/* #define F1(x, y, z) (x & y | ~x & z) */
-#define F1(x, y, z) (z ^ (x & (y ^ z)))
-#define F2(x, y, z) F1(z, x, y)
-#define F3(x, y, z) (x ^ y ^ z)
-#define F4(x, y, z) (y ^ (x | ~z))
-
-/* This is the central step in the MD5 algorithm. */
-#define MD5STEP(f, w, x, y, z, data, s) \
-	(w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x)
-
-/*
- * The core of the MD5 algorithm, this alters an existing MD5 hash to
- * reflect the addition of 16 longwords of new data.  cifs_MD5_update blocks
- * the data and converts bytes into longwords for this routine.
- */
-static void
-MD5Transform(__u32 buf[4], __u32 const in[16])
-{
-	register __u32 a, b, c, d;
-
-	a = buf[0];
-	b = buf[1];
-	c = buf[2];
-	d = buf[3];
-
-	MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
-	MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
-	MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
-	MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
-	MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
-	MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
-	MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
-	MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
-	MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
-	MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
-	MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
-	MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
-	MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
-	MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
-	MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
-	MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
-
-	MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
-	MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
-	MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
-	MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
-	MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
-	MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
-	MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
-	MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
-	MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
-	MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
-	MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
-	MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
-	MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
-	MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
-	MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
-	MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
-
-	MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
-	MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
-	MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
-	MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
-	MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
-	MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
-	MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
-	MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
-	MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
-	MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
-	MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
-	MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
-	MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
-	MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
-	MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
-	MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
-
-	MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
-	MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
-	MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
-	MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
-	MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
-	MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
-	MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
-	MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
-	MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
-	MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
-	MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
-	MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
-	MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
-	MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
-	MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
-	MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
-
-	buf[0] += a;
-	buf[1] += b;
-	buf[2] += c;
-	buf[3] += d;
-}
-
-#if 0   /* currently unused */
-/***********************************************************************
- the rfc 2104 version of hmac_md5 initialisation.
-***********************************************************************/
-static void
-hmac_md5_init_rfc2104(unsigned char *key, int key_len,
-		      struct HMACMD5Context *ctx)
-{
-	int i;
-
-	/* if key is longer than 64 bytes reset it to key=MD5(key) */
-	if (key_len > 64) {
-		unsigned char tk[16];
-		struct MD5Context tctx;
-
-		cifs_MD5_init(&tctx);
-		cifs_MD5_update(&tctx, key, key_len);
-		cifs_MD5_final(tk, &tctx);
-
-		key = tk;
-		key_len = 16;
-	}
-
-	/* start out by storing key in pads */
-	memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad));
-	memset(ctx->k_opad, 0, sizeof(ctx->k_opad));
-	memcpy(ctx->k_ipad, key, key_len);
-	memcpy(ctx->k_opad, key, key_len);
-
-	/* XOR key with ipad and opad values */
-	for (i = 0; i < 64; i++) {
-		ctx->k_ipad[i] ^= 0x36;
-		ctx->k_opad[i] ^= 0x5c;
-	}
-
-	cifs_MD5_init(&ctx->ctx);
-	cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
-}
-#endif
-
-/***********************************************************************
- the microsoft version of hmac_md5 initialisation.
-***********************************************************************/
-void
-hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
-			 struct HMACMD5Context *ctx)
-{
-	int i;
-
-	/* if key is longer than 64 bytes truncate it */
-	if (key_len > 64)
-		key_len = 64;
-
-	/* start out by storing key in pads */
-	memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad));
-	memset(ctx->k_opad, 0, sizeof(ctx->k_opad));
-	memcpy(ctx->k_ipad, key, key_len);
-	memcpy(ctx->k_opad, key, key_len);
-
-	/* XOR key with ipad and opad values */
-	for (i = 0; i < 64; i++) {
-		ctx->k_ipad[i] ^= 0x36;
-		ctx->k_opad[i] ^= 0x5c;
-	}
-
-	cifs_MD5_init(&ctx->ctx);
-	cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
-}
-
-/***********************************************************************
- update hmac_md5 "inner" buffer
-***********************************************************************/
-void
-hmac_md5_update(const unsigned char *text, int text_len,
-		struct HMACMD5Context *ctx)
-{
-	cifs_MD5_update(&ctx->ctx, text, text_len);	/* then text of datagram */
-}
-
-/***********************************************************************
- finish off hmac_md5 "inner" buffer and generate outer one.
-***********************************************************************/
-void
-hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx)
-{
-	struct MD5Context ctx_o;
-
-	cifs_MD5_final(digest, &ctx->ctx);
-
-	cifs_MD5_init(&ctx_o);
-	cifs_MD5_update(&ctx_o, ctx->k_opad, 64);
-	cifs_MD5_update(&ctx_o, digest, 16);
-	cifs_MD5_final(digest, &ctx_o);
-}
-
-/***********************************************************
- single function to calculate an HMAC MD5 digest from data.
- use the microsoft hmacmd5 init method because the key is 16 bytes.
-************************************************************/
-#if 0 /* currently unused */
-static void
-hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
-	 unsigned char *digest)
-{
-	struct HMACMD5Context ctx;
-	hmac_md5_init_limK_to_64(key, 16, &ctx);
-	if (data_len != 0)
-		hmac_md5_update(data, data_len, &ctx);
-
-	hmac_md5_final(digest, &ctx);
-}
-#endif
diff --git a/fs/cifs/md5.h b/fs/cifs/md5.h
deleted file mode 100644
index 6fba8cb..0000000
--- a/fs/cifs/md5.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef MD5_H
-#define MD5_H
-#ifndef HEADER_MD5_H
-/* Try to avoid clashes with OpenSSL */
-#define HEADER_MD5_H
-#endif
-
-struct MD5Context {
-	__u32 buf[4];
-	__u32 bits[2];
-	unsigned char in[64];
-};
-#endif				/* !MD5_H */
-
-#ifndef _HMAC_MD5_H
-struct HMACMD5Context {
-	struct MD5Context ctx;
-	unsigned char k_ipad[65];
-	unsigned char k_opad[65];
-};
-#endif				/* _HMAC_MD5_H */
-
-void cifs_MD5_init(struct MD5Context *context);
-void cifs_MD5_update(struct MD5Context *context, unsigned char const *buf,
-			unsigned len);
-void cifs_MD5_final(unsigned char digest[16], struct MD5Context *context);
-
-/* The following definitions come from lib/hmacmd5.c  */
-
-/* void hmac_md5_init_rfc2104(unsigned char *key, int key_len,
-			struct HMACMD5Context *ctx);*/
-void hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
-			struct HMACMD5Context *ctx);
-void hmac_md5_update(const unsigned char *text, int text_len,
-			struct HMACMD5Context *ctx);
-void hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx);
-/* void hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
-			unsigned char *digest);*/
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 43f1028..2a930a7 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -236,10 +236,7 @@
 {
 	__u16 mid = 0;
 	__u16 last_mid;
-	int   collision;
-
-	if (server == NULL)
-		return mid;
+	bool collision;
 
 	spin_lock(&GlobalMid_Lock);
 	last_mid = server->CurrentMid; /* we do not want to loop forever */
@@ -252,24 +249,38 @@
 	(and it would also have to have been a request that
 	 did not time out) */
 	while (server->CurrentMid != last_mid) {
-		struct list_head *tmp;
 		struct mid_q_entry *mid_entry;
+		unsigned int num_mids;
 
-		collision = 0;
+		collision = false;
 		if (server->CurrentMid == 0)
 			server->CurrentMid++;
 
-		list_for_each(tmp, &server->pending_mid_q) {
-			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-
-			if ((mid_entry->mid == server->CurrentMid) &&
-			    (mid_entry->midState == MID_REQUEST_SUBMITTED)) {
+		num_mids = 0;
+		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+			++num_mids;
+			if (mid_entry->mid == server->CurrentMid &&
+			    mid_entry->midState == MID_REQUEST_SUBMITTED) {
 				/* This mid is in use, try a different one */
-				collision = 1;
+				collision = true;
 				break;
 			}
 		}
-		if (collision == 0) {
+
+		/*
+		 * if we have more than 32k mids in the list, then something
+		 * is very wrong. Possibly a local user is trying to DoS the
+		 * box by issuing long-running calls and SIGKILL'ing them. If
+		 * we get to 2^16 mids then we're in big trouble as this
+		 * function could loop forever.
+		 *
+		 * Go ahead and assign out the mid in this situation, but force
+		 * an eventual reconnect to clean out the pending_mid_q.
+		 */
+		if (num_mids > 32768)
+			server->tcpStatus = CifsNeedReconnect;
+
+		if (!collision) {
 			mid = server->CurrentMid;
 			break;
 		}
@@ -381,29 +392,31 @@
 }
 
 static int
-checkSMBhdr(struct smb_hdr *smb, __u16 mid)
+check_smb_hdr(struct smb_hdr *smb, __u16 mid)
 {
-	/* Make sure that this really is an SMB, that it is a response,
-	   and that the message ids match */
-	if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) &&
-		(mid == smb->Mid)) {
-		if (smb->Flags & SMBFLG_RESPONSE)
-			return 0;
-		else {
-		/* only one valid case where server sends us request */
-			if (smb->Command == SMB_COM_LOCKING_ANDX)
-				return 0;
-			else
-				cERROR(1, "Received Request not response");
-		}
-	} else { /* bad signature or mid */
-		if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
-			cERROR(1, "Bad protocol string signature header %x",
-				*(unsigned int *) smb->Protocol);
-		if (mid != smb->Mid)
-			cERROR(1, "Mids do not match");
+	/* does it have the right SMB "signature" ? */
+	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
+		cERROR(1, "Bad protocol string signature header 0x%x",
+			*(unsigned int *)smb->Protocol);
+		return 1;
 	}
-	cERROR(1, "bad smb detected. The Mid=%d", smb->Mid);
+
+	/* Make sure that message ids match */
+	if (mid != smb->Mid) {
+		cERROR(1, "Mids do not match. received=%u expected=%u",
+			smb->Mid, mid);
+		return 1;
+	}
+
+	/* if it's a response then accept */
+	if (smb->Flags & SMBFLG_RESPONSE)
+		return 0;
+
+	/* only one valid case where server sends us request */
+	if (smb->Command == SMB_COM_LOCKING_ANDX)
+		return 0;
+
+	cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
 	return 1;
 }
 
@@ -448,7 +461,7 @@
 		return 1;
 	}
 
-	if (checkSMBhdr(smb, mid))
+	if (check_smb_hdr(smb, mid))
 		return 1;
 	clc_len = smbCalcSize_LE(smb);
 
@@ -465,25 +478,26 @@
 			if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
 				return 0; /* bcc wrapped */
 		}
-		cFYI(1, "Calculated size %d vs length %d mismatch for mid %d",
+		cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
 				clc_len, 4 + len, smb->Mid);
-		/* Windows XP can return a few bytes too much, presumably
-		an illegal pad, at the end of byte range lock responses
-		so we allow for that three byte pad, as long as actual
-		received length is as long or longer than calculated length */
-		/* We have now had to extend this more, since there is a
-		case in which it needs to be bigger still to handle a
-		malformed response to transact2 findfirst from WinXP when
-		access denied is returned and thus bcc and wct are zero
-		but server says length is 0x21 bytes too long as if the server
-		forget to reset the smb rfc1001 length when it reset the
-		wct and bcc to minimum size and drop the t2 parms and data */
-		if ((4+len > clc_len) && (len <= clc_len + 512))
-			return 0;
-		else {
-			cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d",
+
+		if (4 + len < clc_len) {
+			cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
 					len, smb->Mid);
 			return 1;
+		} else if (len > clc_len + 512) {
+			/*
+			 * Some servers (Windows XP in particular) send more
+			 * data than the lengths in the SMB packet would
+			 * indicate on certain calls (byte range locks and
+			 * trans2 find first calls in particular). While the
+			 * client can handle such a frame by ignoring the
+			 * trailing data, we choose limit the amount of extra
+			 * data to 512 bytes.
+			 */
+			cERROR(1, "RFC1001 size %u more than 512 bytes larger "
+				  "than SMB for mid=%u", len, smb->Mid);
+			return 1;
 		}
 	}
 	return 0;
@@ -571,7 +585,7 @@
 				pCifsInode = CIFS_I(netfile->dentry->d_inode);
 
 				cifs_set_oplock_level(pCifsInode,
-						      pSMB->OplockLevel);
+					pSMB->OplockLevel ? OPLOCK_READ : 0);
 				/*
 				 * cifs_oplock_break_put() can't be called
 				 * from here.  Get reference after queueing
@@ -637,77 +651,6 @@
 	return;
 }
 
-/* Convert 16 bit Unicode pathname to wire format from string in current code
-   page.  Conversion may involve remapping up the seven characters that are
-   only legal in POSIX-like OS (if they are present in the string). Path
-   names are little endian 16 bit Unicode on the wire */
-int
-cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
-		 const struct nls_table *cp, int mapChars)
-{
-	int i, j, charlen;
-	int len_remaining = maxlen;
-	char src_char;
-	__u16 temp;
-
-	if (!mapChars)
-		return cifs_strtoUCS(target, source, PATH_MAX, cp);
-
-	for (i = 0, j = 0; i < maxlen; j++) {
-		src_char = source[i];
-		switch (src_char) {
-			case 0:
-				target[j] = 0;
-				goto ctoUCS_out;
-			case ':':
-				target[j] = cpu_to_le16(UNI_COLON);
-				break;
-			case '*':
-				target[j] = cpu_to_le16(UNI_ASTERIK);
-				break;
-			case '?':
-				target[j] = cpu_to_le16(UNI_QUESTION);
-				break;
-			case '<':
-				target[j] = cpu_to_le16(UNI_LESSTHAN);
-				break;
-			case '>':
-				target[j] = cpu_to_le16(UNI_GRTRTHAN);
-				break;
-			case '|':
-				target[j] = cpu_to_le16(UNI_PIPE);
-				break;
-			/* BB We can not handle remapping slash until
-			   all the calls to build_path_from_dentry
-			   are modified, as they use slash as separator BB */
-			/* case '\\':
-				target[j] = cpu_to_le16(UNI_SLASH);
-				break;*/
-			default:
-				charlen = cp->char2uni(source+i,
-					len_remaining, &temp);
-				/* if no match, use question mark, which
-				at least in some cases servers as wild card */
-				if (charlen < 1) {
-					target[j] = cpu_to_le16(0x003f);
-					charlen = 1;
-				} else
-					target[j] = cpu_to_le16(temp);
-				len_remaining -= charlen;
-				/* character may take more than one byte in the
-				   the source string, but will take exactly two
-				   bytes in the target string */
-				i += charlen;
-				continue;
-		}
-		i++; /* move to next char in source string */
-		len_remaining--;
-	}
-
-ctoUCS_out:
-	return i;
-}
-
 void
 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
 {
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 9aad47a..79f641e 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -170,7 +170,7 @@
 {
 	int rc, alen, slen;
 	const char *pct;
-	char *endp, scope_id[13];
+	char scope_id[13];
 	struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
 	struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
 
@@ -197,9 +197,9 @@
 		memcpy(scope_id, pct + 1, slen);
 		scope_id[slen] = '\0';
 
-		s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0);
-		if (endp != scope_id + slen)
-			return 0;
+		rc = strict_strtoul(scope_id, 0,
+					(unsigned long *)&s6->sin6_scope_id);
+		rc = (rc == 0) ? 1 : 0;
 	}
 
 	return rc;
@@ -899,8 +899,8 @@
 	}
 	/* else ERRHRD class errors or junk  - return EIO */
 
-	cFYI(1, "Mapping smb error code %d to POSIX err %d",
-		 smberrcode, rc);
+	cFYI(1, "Mapping smb error code 0x%x to POSIX err %d",
+		 le32_to_cpu(smb->Status.CifsError), rc);
 
 	/* generic corrective action e.g. reconnect SMB session on
 	 * ERRbaduid could be added */
@@ -916,14 +916,14 @@
 smbCalcSize(struct smb_hdr *ptr)
 {
 	return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
-		2 /* size of the bcc field */ + BCC(ptr));
+		2 /* size of the bcc field */ + get_bcc(ptr));
 }
 
 unsigned int
 smbCalcSize_LE(struct smb_hdr *ptr)
 {
 	return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
-		2 /* size of the bcc field */ + le16_to_cpu(BCC_LE(ptr)));
+		2 /* size of the bcc field */ + get_bcc_le(ptr));
 }
 
 /* The following are taken from fs/ntfs/util.c */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 76b1b37..f8e4cd2 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -102,11 +102,6 @@
 		return NULL;
 	}
 
-	if (cifs_sb_master_tcon(CIFS_SB(sb))->nocase)
-		d_set_d_op(dentry, &cifs_ci_dentry_ops);
-	else
-		d_set_d_op(dentry, &cifs_dentry_ops);
-
 	alias = d_materialise_unique(dentry, inode);
 	if (alias != NULL) {
 		dput(dentry);
@@ -769,7 +764,6 @@
 {
 	int rc = 0;
 	int xid, i;
-	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
 	struct cifsFileInfo *cifsFile = NULL;
 	char *current_entry;
@@ -780,8 +774,6 @@
 
 	xid = GetXid();
 
-	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-
 	/*
 	 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
 	 * '..'. Otherwise we won't be able to notify VFS in case of failure.
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index eb74648..1676570 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -277,7 +277,7 @@
 }
 
 static void
-decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
+decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
 		      const struct nls_table *nls_cp)
 {
 	int len;
@@ -323,7 +323,7 @@
 	return;
 }
 
-static int decode_ascii_ssetup(char **pbcc_area, int bleft,
+static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
 			       struct cifsSesInfo *ses,
 			       const struct nls_table *nls_cp)
 {
@@ -575,12 +575,11 @@
 	char *str_area;
 	SESSION_SETUP_ANDX *pSMB;
 	__u32 capabilities;
-	int count;
+	__u16 count;
 	int resp_buf_type;
 	struct kvec iov[3];
 	enum securityEnum type;
-	__u16 action;
-	int bytes_remaining;
+	__u16 action, bytes_remaining;
 	struct key *spnego_key = NULL;
 	__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
 	u16 blob_len;
@@ -657,13 +656,13 @@
 
 	if (type == LANMAN) {
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
-		char lnm_session_key[CIFS_SESS_KEY_SIZE];
+		char lnm_session_key[CIFS_AUTH_RESP_SIZE];
 
 		pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
 
 		/* no capabilities flags in old lanman negotiation */
 
-		pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
+		pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
 
 		/* Calculate hash with password and copy into bcc_ptr.
 		 * Encryption Key (stored as in cryptkey) gets used if the
@@ -676,8 +675,8 @@
 					true : false, lnm_session_key);
 
 		ses->flags |= CIFS_SES_LANMAN;
-		memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE);
-		bcc_ptr += CIFS_SESS_KEY_SIZE;
+		memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
+		bcc_ptr += CIFS_AUTH_RESP_SIZE;
 
 		/* can not sign if LANMAN negotiated so no need
 		to calculate signing key? but what if server
@@ -876,10 +875,10 @@
 	count = iov[1].iov_len + iov[2].iov_len;
 	smb_buf->smb_buf_length += count;
 
-	BCC_LE(smb_buf) = cpu_to_le16(count);
+	put_bcc_le(count, smb_buf);
 
 	rc = SendReceive2(xid, ses, iov, 3 /* num_iovecs */, &resp_buf_type,
-			  CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR);
+			  CIFS_LOG_ERROR);
 	/* SMB request buf freed in SendReceive2 */
 
 	pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
@@ -910,7 +909,7 @@
 	cFYI(1, "UID = %d ", ses->Suid);
 	/* response can have either 3 or 4 word count - Samba sends 3 */
 	/* and lanman response is 3 */
-	bytes_remaining = BCC(smb_buf);
+	bytes_remaining = get_bcc(smb_buf);
 	bcc_ptr = pByteArea(smb_buf);
 
 	if (smb_buf->WordCount == 4) {
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index b6b6dcb..0472148 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -45,7 +45,6 @@
    up with a different answer to the one above)
 */
 #include <linux/slab.h>
-#include "cifsencrypt.h"
 #define uchar unsigned char
 
 static uchar perm1[56] = { 57, 49, 41, 33, 25, 17, 9,
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 192ea51..b5041c8 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -32,9 +32,8 @@
 #include "cifs_unicode.h"
 #include "cifspdu.h"
 #include "cifsglob.h"
-#include "md5.h"
 #include "cifs_debug.h"
-#include "cifsencrypt.h"
+#include "cifsproto.h"
 
 #ifndef false
 #define false 0
@@ -48,14 +47,58 @@
 #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
 #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val)))
 
-/*The following definitions come from  libsmb/smbencrypt.c  */
+/* produce a md4 message digest from data of length n bytes */
+int
+mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
+{
+	int rc;
+	unsigned int size;
+	struct crypto_shash *md4;
+	struct sdesc *sdescmd4;
 
-void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
-		unsigned char *p24);
-void E_md4hash(const unsigned char *passwd, unsigned char *p16);
-static void SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8,
-		   unsigned char p24[24]);
-void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
+	md4 = crypto_alloc_shash("md4", 0, 0);
+	if (IS_ERR(md4)) {
+		rc = PTR_ERR(md4);
+		cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc);
+		return rc;
+	}
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
+	sdescmd4 = kmalloc(size, GFP_KERNEL);
+	if (!sdescmd4) {
+		rc = -ENOMEM;
+		cERROR(1, "%s: Memory allocation failure\n", __func__);
+		goto mdfour_err;
+	}
+	sdescmd4->shash.tfm = md4;
+	sdescmd4->shash.flags = 0x0;
+
+	rc = crypto_shash_init(&sdescmd4->shash);
+	if (rc) {
+		cERROR(1, "%s: Could not init md4 shash\n", __func__);
+		goto mdfour_err;
+	}
+	crypto_shash_update(&sdescmd4->shash, link_str, link_len);
+	rc = crypto_shash_final(&sdescmd4->shash, md4_hash);
+
+mdfour_err:
+	crypto_free_shash(md4);
+	kfree(sdescmd4);
+
+	return rc;
+}
+
+/* Does the des encryption from the NT or LM MD4 hash. */
+static void
+SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8,
+	      unsigned char p24[24])
+{
+	unsigned char p21[21];
+
+	memset(p21, '\0', 21);
+
+	memcpy(p21, passwd, 16);
+	E_P24(p21, c8, p24);
+}
 
 /*
    This implements the X/Open SMB password encryption
@@ -118,9 +161,10 @@
  * Creates the MD4 Hash of the users password in NT UNICODE.
  */
 
-void
+int
 E_md4hash(const unsigned char *passwd, unsigned char *p16)
 {
+	int rc;
 	int len;
 	__u16 wpwd[129];
 
@@ -139,8 +183,10 @@
 	/* Calculate length in bytes */
 	len = _my_wcslen(wpwd) * sizeof(__u16);
 
-	mdfour(p16, (unsigned char *) wpwd, len);
+	rc = mdfour(p16, (unsigned char *) wpwd, len);
 	memset(wpwd, 0, 129 * 2);
+
+	return rc;
 }
 
 #if 0 /* currently unused */
@@ -212,19 +258,6 @@
 }
 #endif
 
-/* Does the des encryption from the NT or LM MD4 hash. */
-static void
-SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8,
-	      unsigned char p24[24])
-{
-	unsigned char p21[21];
-
-	memset(p21, '\0', 21);
-
-	memcpy(p21, passwd, 16);
-	E_P24(p21, c8, p24);
-}
-
 /* Does the des encryption from the FIRST 8 BYTES of the NT or LM MD4 hash. */
 #if 0 /* currently unused */
 static void
@@ -242,16 +275,21 @@
 #endif
 
 /* Does the NT MD4 hash then des encryption. */
-
-void
+int
 SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
 {
+	int rc;
 	unsigned char p21[21];
 
 	memset(p21, '\0', 21);
 
-	E_md4hash(passwd, p21);
+	rc = E_md4hash(passwd, p21);
+	if (rc) {
+		cFYI(1, "%s Can't generate NT hash, error: %d", __func__, rc);
+		return rc;
+	}
 	SMBOWFencrypt(p21, c8, p24);
+	return rc;
 }
 
 
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 59ca81b..46d8756f 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -36,7 +36,13 @@
 
 extern mempool_t *cifs_mid_poolp;
 
-static struct mid_q_entry *
+static void
+wake_up_task(struct mid_q_entry *mid)
+{
+	wake_up_process(mid->callback_data);
+}
+
+struct mid_q_entry *
 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 {
 	struct mid_q_entry *temp;
@@ -58,28 +64,28 @@
 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
 		/* when mid allocated can be before when sent */
 		temp->when_alloc = jiffies;
-		temp->tsk = current;
+
+		/*
+		 * The default is for the mid to be synchronous, so the
+		 * default callback just wakes up the current task.
+		 */
+		temp->callback = wake_up_task;
+		temp->callback_data = current;
 	}
 
-	spin_lock(&GlobalMid_Lock);
-	list_add_tail(&temp->qhead, &server->pending_mid_q);
 	atomic_inc(&midCount);
 	temp->midState = MID_REQUEST_ALLOCATED;
-	spin_unlock(&GlobalMid_Lock);
 	return temp;
 }
 
-static void
+void
 DeleteMidQEntry(struct mid_q_entry *midEntry)
 {
 #ifdef CONFIG_CIFS_STATS2
 	unsigned long now;
 #endif
-	spin_lock(&GlobalMid_Lock);
 	midEntry->midState = MID_FREE;
-	list_del(&midEntry->qhead);
 	atomic_dec(&midCount);
-	spin_unlock(&GlobalMid_Lock);
 	if (midEntry->largeBuf)
 		cifs_buf_release(midEntry->resp_buf);
 	else
@@ -103,6 +109,16 @@
 	mempool_free(midEntry, cifs_mid_poolp);
 }
 
+static void
+delete_mid(struct mid_q_entry *mid)
+{
+	spin_lock(&GlobalMid_Lock);
+	list_del(&mid->qhead);
+	spin_unlock(&GlobalMid_Lock);
+
+	DeleteMidQEntry(mid);
+}
+
 static int
 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
 {
@@ -220,9 +236,9 @@
 		server->tcpStatus = CifsNeedReconnect;
 	}
 
-	if (rc < 0) {
+	if (rc < 0 && rc != -EINTR)
 		cERROR(1, "Error %d sending data on socket to server", rc);
-	} else
+	else
 		rc = 0;
 
 	/* Don't want to modify the buffer as a
@@ -244,31 +260,31 @@
 	return smb_sendv(server, &iov, 1);
 }
 
-static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
+static int wait_for_free_request(struct TCP_Server_Info *server,
+				 const int long_op)
 {
 	if (long_op == CIFS_ASYNC_OP) {
 		/* oplock breaks must not be held up */
-		atomic_inc(&ses->server->inFlight);
+		atomic_inc(&server->inFlight);
 		return 0;
 	}
 
 	spin_lock(&GlobalMid_Lock);
 	while (1) {
-		if (atomic_read(&ses->server->inFlight) >=
-				cifs_max_pending){
+		if (atomic_read(&server->inFlight) >= cifs_max_pending) {
 			spin_unlock(&GlobalMid_Lock);
 #ifdef CONFIG_CIFS_STATS2
-			atomic_inc(&ses->server->num_waiters);
+			atomic_inc(&server->num_waiters);
 #endif
-			wait_event(ses->server->request_q,
-				   atomic_read(&ses->server->inFlight)
+			wait_event(server->request_q,
+				   atomic_read(&server->inFlight)
 				     < cifs_max_pending);
 #ifdef CONFIG_CIFS_STATS2
-			atomic_dec(&ses->server->num_waiters);
+			atomic_dec(&server->num_waiters);
 #endif
 			spin_lock(&GlobalMid_Lock);
 		} else {
-			if (ses->server->tcpStatus == CifsExiting) {
+			if (server->tcpStatus == CifsExiting) {
 				spin_unlock(&GlobalMid_Lock);
 				return -ENOENT;
 			}
@@ -278,7 +294,7 @@
 
 			/* update # of requests on the wire to server */
 			if (long_op != CIFS_BLOCKING_OP)
-				atomic_inc(&ses->server->inFlight);
+				atomic_inc(&server->inFlight);
 			spin_unlock(&GlobalMid_Lock);
 			break;
 		}
@@ -308,55 +324,87 @@
 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
 	if (*ppmidQ == NULL)
 		return -ENOMEM;
+	spin_lock(&GlobalMid_Lock);
+	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
+	spin_unlock(&GlobalMid_Lock);
 	return 0;
 }
 
-static int wait_for_response(struct cifsSesInfo *ses,
-			struct mid_q_entry *midQ,
-			unsigned long timeout,
-			unsigned long time_to_wait)
+static int
+wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 {
-	unsigned long curr_timeout;
+	int error;
 
-	for (;;) {
-		curr_timeout = timeout + jiffies;
-		wait_event_timeout(ses->server->response_q,
-			midQ->midState != MID_REQUEST_SUBMITTED, timeout);
+	error = wait_event_killable(server->response_q,
+				    midQ->midState != MID_REQUEST_SUBMITTED);
+	if (error < 0)
+		return -ERESTARTSYS;
 
-		if (time_after(jiffies, curr_timeout) &&
-			(midQ->midState == MID_REQUEST_SUBMITTED) &&
-			((ses->server->tcpStatus == CifsGood) ||
-			 (ses->server->tcpStatus == CifsNew))) {
-
-			unsigned long lrt;
-
-			/* We timed out. Is the server still
-			   sending replies ? */
-			spin_lock(&GlobalMid_Lock);
-			lrt = ses->server->lstrp;
-			spin_unlock(&GlobalMid_Lock);
-
-			/* Calculate time_to_wait past last receive time.
-			 Although we prefer not to time out if the
-			 server is still responding - we will time
-			 out if the server takes more than 15 (or 45
-			 or 180) seconds to respond to this request
-			 and has not responded to any request from
-			 other threads on the client within 10 seconds */
-			lrt += time_to_wait;
-			if (time_after(jiffies, lrt)) {
-				/* No replies for time_to_wait. */
-				cERROR(1, "server not responding");
-				return -1;
-			}
-		} else {
-			return 0;
-		}
-	}
+	return 0;
 }
 
 
 /*
+ * Send a SMB request and set the callback function in the mid to handle
+ * the result. Caller is responsible for dealing with timeouts.
+ */
+int
+cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+		mid_callback_t *callback, void *cbdata)
+{
+	int rc;
+	struct mid_q_entry *mid;
+
+	rc = wait_for_free_request(server, CIFS_ASYNC_OP);
+	if (rc)
+		return rc;
+
+	/* enable signing if server requires it */
+	if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+		in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+	mutex_lock(&server->srv_mutex);
+	mid = AllocMidQEntry(in_buf, server);
+	if (mid == NULL) {
+		mutex_unlock(&server->srv_mutex);
+		return -ENOMEM;
+	}
+
+	/* put it on the pending_mid_q */
+	spin_lock(&GlobalMid_Lock);
+	list_add_tail(&mid->qhead, &server->pending_mid_q);
+	spin_unlock(&GlobalMid_Lock);
+
+	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+	if (rc) {
+		mutex_unlock(&server->srv_mutex);
+		goto out_err;
+	}
+
+	mid->callback = callback;
+	mid->callback_data = cbdata;
+	mid->midState = MID_REQUEST_SUBMITTED;
+#ifdef CONFIG_CIFS_STATS2
+	atomic_inc(&server->inSend);
+#endif
+	rc = smb_send(server, in_buf, in_buf->smb_buf_length);
+#ifdef CONFIG_CIFS_STATS2
+	atomic_dec(&server->inSend);
+	mid->when_sent = jiffies;
+#endif
+	mutex_unlock(&server->srv_mutex);
+	if (rc)
+		goto out_err;
+
+	return rc;
+out_err:
+	delete_mid(mid);
+	atomic_dec(&server->inFlight);
+	wake_up(&server->request_q);
+	return rc;
+}
+
+/*
  *
  * Send an SMB Request.  No response info (other than return code)
  * needs to be parsed.
@@ -382,6 +430,84 @@
 	return rc;
 }
 
+static int
+sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+{
+	int rc = 0;
+
+	cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
+		mid->mid, mid->midState);
+
+	spin_lock(&GlobalMid_Lock);
+	/* ensure that it's no longer on the pending_mid_q */
+	list_del_init(&mid->qhead);
+
+	switch (mid->midState) {
+	case MID_RESPONSE_RECEIVED:
+		spin_unlock(&GlobalMid_Lock);
+		return rc;
+	case MID_REQUEST_SUBMITTED:
+		/* socket is going down, reject all calls */
+		if (server->tcpStatus == CifsExiting) {
+			cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d",
+			       __func__, mid->mid, mid->command, mid->midState);
+			rc = -EHOSTDOWN;
+			break;
+		}
+	case MID_RETRY_NEEDED:
+		rc = -EAGAIN;
+		break;
+	case MID_RESPONSE_MALFORMED:
+		rc = -EIO;
+		break;
+	default:
+		cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
+			mid->mid, mid->midState);
+		rc = -EIO;
+	}
+	spin_unlock(&GlobalMid_Lock);
+
+	DeleteMidQEntry(mid);
+	return rc;
+}
+
+/*
+ * An NT cancel request header looks just like the original request except:
+ *
+ * The Command is SMB_COM_NT_CANCEL
+ * The WordCount is zeroed out
+ * The ByteCount is zeroed out
+ *
+ * This function mangles an existing request buffer into a
+ * SMB_COM_NT_CANCEL request and then sends it.
+ */
+static int
+send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+		struct mid_q_entry *mid)
+{
+	int rc = 0;
+
+	/* -4 for RFC1001 length and +2 for BCC field */
+	in_buf->smb_buf_length = sizeof(struct smb_hdr) - 4  + 2;
+	in_buf->Command = SMB_COM_NT_CANCEL;
+	in_buf->WordCount = 0;
+	put_bcc_le(0, in_buf);
+
+	mutex_lock(&server->srv_mutex);
+	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+	if (rc) {
+		mutex_unlock(&server->srv_mutex);
+		return rc;
+	}
+	rc = smb_send(server, in_buf, in_buf->smb_buf_length);
+	mutex_unlock(&server->srv_mutex);
+
+	cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
+		in_buf->Mid, rc);
+
+	return rc;
+}
+
 int
 SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
 	     struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
@@ -390,7 +516,6 @@
 	int rc = 0;
 	int long_op;
 	unsigned int receive_len;
-	unsigned long timeout;
 	struct mid_q_entry *midQ;
 	struct smb_hdr *in_buf = iov[0].iov_base;
 
@@ -413,7 +538,7 @@
 	   to the same server. We may make this configurable later or
 	   use ses->maxReq */
 
-	rc = wait_for_free_request(ses, long_op);
+	rc = wait_for_free_request(ses->server, long_op);
 	if (rc) {
 		cifs_small_buf_release(in_buf);
 		return rc;
@@ -452,70 +577,41 @@
 #endif
 
 	mutex_unlock(&ses->server->srv_mutex);
-	cifs_small_buf_release(in_buf);
 
-	if (rc < 0)
-		goto out;
-
-	if (long_op == CIFS_STD_OP)
-		timeout = 15 * HZ;
-	else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */
-		timeout = 180 * HZ;
-	else if (long_op == CIFS_LONG_OP)
-		timeout = 45 * HZ; /* should be greater than
-			servers oplock break timeout (about 43 seconds) */
-	else if (long_op == CIFS_ASYNC_OP)
-		goto out;
-	else if (long_op == CIFS_BLOCKING_OP)
-		timeout = 0x7FFFFFFF; /*  large, but not so large as to wrap */
-	else {
-		cERROR(1, "unknown timeout flag %d", long_op);
-		rc = -EIO;
+	if (rc < 0) {
+		cifs_small_buf_release(in_buf);
 		goto out;
 	}
 
-	/* wait for 15 seconds or until woken up due to response arriving or
-	   due to last connection to this server being unmounted */
-	if (signal_pending(current)) {
-		/* if signal pending do not hold up user for full smb timeout
-		but we still give response a chance to complete */
-		timeout = 2 * HZ;
+	if (long_op == CIFS_ASYNC_OP) {
+		cifs_small_buf_release(in_buf);
+		goto out;
 	}
 
-	/* No user interrupts in wait - wreaks havoc with performance */
-	wait_for_response(ses, midQ, timeout, 10 * HZ);
-
-	spin_lock(&GlobalMid_Lock);
-
-	if (midQ->resp_buf == NULL) {
-		cERROR(1, "No response to cmd %d mid %d",
-			midQ->command, midQ->mid);
+	rc = wait_for_response(ses->server, midQ);
+	if (rc != 0) {
+		send_nt_cancel(ses->server, in_buf, midQ);
+		spin_lock(&GlobalMid_Lock);
 		if (midQ->midState == MID_REQUEST_SUBMITTED) {
-			if (ses->server->tcpStatus == CifsExiting)
-				rc = -EHOSTDOWN;
-			else {
-				ses->server->tcpStatus = CifsNeedReconnect;
-				midQ->midState = MID_RETRY_NEEDED;
-			}
-		}
-
-		if (rc != -EHOSTDOWN) {
-			if (midQ->midState == MID_RETRY_NEEDED) {
-				rc = -EAGAIN;
-				cFYI(1, "marking request for retry");
-			} else {
-				rc = -EIO;
-			}
+			midQ->callback = DeleteMidQEntry;
+			spin_unlock(&GlobalMid_Lock);
+			cifs_small_buf_release(in_buf);
+			atomic_dec(&ses->server->inFlight);
+			wake_up(&ses->server->request_q);
+			return rc;
 		}
 		spin_unlock(&GlobalMid_Lock);
-		DeleteMidQEntry(midQ);
-		/* Update # of requests on wire to server */
+	}
+
+	cifs_small_buf_release(in_buf);
+
+	rc = sync_mid_result(midQ, ses->server);
+	if (rc != 0) {
 		atomic_dec(&ses->server->inFlight);
 		wake_up(&ses->server->request_q);
 		return rc;
 	}
 
-	spin_unlock(&GlobalMid_Lock);
 	receive_len = midQ->resp_buf->smb_buf_length;
 
 	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
@@ -559,19 +655,18 @@
 		if (receive_len >= sizeof(struct smb_hdr) - 4
 		    /* do not count RFC1001 header */  +
 		    (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
-			BCC(midQ->resp_buf) =
-				le16_to_cpu(BCC_LE(midQ->resp_buf));
+			put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf);
 		if ((flags & CIFS_NO_RESP) == 0)
 			midQ->resp_buf = NULL;  /* mark it so buf will
 						   not be freed by
-						   DeleteMidQEntry */
+						   delete_mid */
 	} else {
 		rc = -EIO;
 		cFYI(1, "Bad MID state?");
 	}
 
 out:
-	DeleteMidQEntry(midQ);
+	delete_mid(midQ);
 	atomic_dec(&ses->server->inFlight);
 	wake_up(&ses->server->request_q);
 
@@ -585,7 +680,6 @@
 {
 	int rc = 0;
 	unsigned int receive_len;
-	unsigned long timeout;
 	struct mid_q_entry *midQ;
 
 	if (ses == NULL) {
@@ -610,7 +704,7 @@
 		return -EIO;
 	}
 
-	rc = wait_for_free_request(ses, long_op);
+	rc = wait_for_free_request(ses->server, long_op);
 	if (rc)
 		return rc;
 
@@ -649,64 +743,31 @@
 	if (rc < 0)
 		goto out;
 
-	if (long_op == CIFS_STD_OP)
-		timeout = 15 * HZ;
-	/* wait for 15 seconds or until woken up due to response arriving or
-	   due to last connection to this server being unmounted */
-	else if (long_op == CIFS_ASYNC_OP)
+	if (long_op == CIFS_ASYNC_OP)
 		goto out;
-	else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */
-		timeout = 180 * HZ;
-	else if (long_op == CIFS_LONG_OP)
-		timeout = 45 * HZ; /* should be greater than
-			servers oplock break timeout (about 43 seconds) */
-	else if (long_op == CIFS_BLOCKING_OP)
-		timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
-	else {
-		cERROR(1, "unknown timeout flag %d", long_op);
-		rc = -EIO;
-		goto out;
-	}
 
-	if (signal_pending(current)) {
-		/* if signal pending do not hold up user for full smb timeout
-		but we still give response a chance to complete */
-		timeout = 2 * HZ;
-	}
-
-	/* No user interrupts in wait - wreaks havoc with performance */
-	wait_for_response(ses, midQ, timeout, 10 * HZ);
-
-	spin_lock(&GlobalMid_Lock);
-	if (midQ->resp_buf == NULL) {
-		cERROR(1, "No response for cmd %d mid %d",
-			  midQ->command, midQ->mid);
+	rc = wait_for_response(ses->server, midQ);
+	if (rc != 0) {
+		send_nt_cancel(ses->server, in_buf, midQ);
+		spin_lock(&GlobalMid_Lock);
 		if (midQ->midState == MID_REQUEST_SUBMITTED) {
-			if (ses->server->tcpStatus == CifsExiting)
-				rc = -EHOSTDOWN;
-			else {
-				ses->server->tcpStatus = CifsNeedReconnect;
-				midQ->midState = MID_RETRY_NEEDED;
-			}
-		}
-
-		if (rc != -EHOSTDOWN) {
-			if (midQ->midState == MID_RETRY_NEEDED) {
-				rc = -EAGAIN;
-				cFYI(1, "marking request for retry");
-			} else {
-				rc = -EIO;
-			}
+			/* no longer considered to be "in-flight" */
+			midQ->callback = DeleteMidQEntry;
+			spin_unlock(&GlobalMid_Lock);
+			atomic_dec(&ses->server->inFlight);
+			wake_up(&ses->server->request_q);
+			return rc;
 		}
 		spin_unlock(&GlobalMid_Lock);
-		DeleteMidQEntry(midQ);
-		/* Update # of requests on wire to server */
+	}
+
+	rc = sync_mid_result(midQ, ses->server);
+	if (rc != 0) {
 		atomic_dec(&ses->server->inFlight);
 		wake_up(&ses->server->request_q);
 		return rc;
 	}
 
-	spin_unlock(&GlobalMid_Lock);
 	receive_len = midQ->resp_buf->smb_buf_length;
 
 	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
@@ -748,43 +809,20 @@
 		if (receive_len >= sizeof(struct smb_hdr) - 4
 		    /* do not count RFC1001 header */  +
 		    (2 * out_buf->WordCount) + 2 /* bcc */ )
-			BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
+			put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf);
 	} else {
 		rc = -EIO;
 		cERROR(1, "Bad MID state?");
 	}
 
 out:
-	DeleteMidQEntry(midQ);
+	delete_mid(midQ);
 	atomic_dec(&ses->server->inFlight);
 	wake_up(&ses->server->request_q);
 
 	return rc;
 }
 
-/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */
-
-static int
-send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
-		struct mid_q_entry *midQ)
-{
-	int rc = 0;
-	struct cifsSesInfo *ses = tcon->ses;
-	__u16 mid = in_buf->Mid;
-
-	header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
-	in_buf->Mid = mid;
-	mutex_lock(&ses->server->srv_mutex);
-	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
-	if (rc) {
-		mutex_unlock(&ses->server->srv_mutex);
-		return rc;
-	}
-	rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
-	mutex_unlock(&ses->server->srv_mutex);
-	return rc;
-}
-
 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
    blocking lock to return. */
 
@@ -807,7 +845,7 @@
 	pSMB->hdr.Mid = GetNextMid(ses->server);
 
 	return SendReceive(xid, ses, in_buf, out_buf,
-			&bytes_returned, CIFS_STD_OP);
+			&bytes_returned, 0);
 }
 
 int
@@ -845,7 +883,7 @@
 		return -EIO;
 	}
 
-	rc = wait_for_free_request(ses, CIFS_BLOCKING_OP);
+	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
 	if (rc)
 		return rc;
 
@@ -863,7 +901,7 @@
 
 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
 	if (rc) {
-		DeleteMidQEntry(midQ);
+		delete_mid(midQ);
 		mutex_unlock(&ses->server->srv_mutex);
 		return rc;
 	}
@@ -880,7 +918,7 @@
 	mutex_unlock(&ses->server->srv_mutex);
 
 	if (rc < 0) {
-		DeleteMidQEntry(midQ);
+		delete_mid(midQ);
 		return rc;
 	}
 
@@ -899,10 +937,9 @@
 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
 			   blocking lock to return. */
-
-			rc = send_nt_cancel(tcon, in_buf, midQ);
+			rc = send_nt_cancel(ses->server, in_buf, midQ);
 			if (rc) {
-				DeleteMidQEntry(midQ);
+				delete_mid(midQ);
 				return rc;
 			}
 		} else {
@@ -914,47 +951,33 @@
 			/* If we get -ENOLCK back the lock may have
 			   already been removed. Don't exit in this case. */
 			if (rc && rc != -ENOLCK) {
-				DeleteMidQEntry(midQ);
+				delete_mid(midQ);
 				return rc;
 			}
 		}
 
-		/* Wait 5 seconds for the response. */
-		if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ) == 0) {
-			/* We got the response - restart system call. */
-			rstart = 1;
+		rc = wait_for_response(ses->server, midQ);
+		if (rc) {
+			send_nt_cancel(ses->server, in_buf, midQ);
+			spin_lock(&GlobalMid_Lock);
+			if (midQ->midState == MID_REQUEST_SUBMITTED) {
+				/* no longer considered to be "in-flight" */
+				midQ->callback = DeleteMidQEntry;
+				spin_unlock(&GlobalMid_Lock);
+				return rc;
+			}
+			spin_unlock(&GlobalMid_Lock);
 		}
+
+		/* We got the response - restart system call. */
+		rstart = 1;
 	}
 
-	spin_lock(&GlobalMid_Lock);
-	if (midQ->resp_buf) {
-		spin_unlock(&GlobalMid_Lock);
-		receive_len = midQ->resp_buf->smb_buf_length;
-	} else {
-		cERROR(1, "No response for cmd %d mid %d",
-			  midQ->command, midQ->mid);
-		if (midQ->midState == MID_REQUEST_SUBMITTED) {
-			if (ses->server->tcpStatus == CifsExiting)
-				rc = -EHOSTDOWN;
-			else {
-				ses->server->tcpStatus = CifsNeedReconnect;
-				midQ->midState = MID_RETRY_NEEDED;
-			}
-		}
-
-		if (rc != -EHOSTDOWN) {
-			if (midQ->midState == MID_RETRY_NEEDED) {
-				rc = -EAGAIN;
-				cFYI(1, "marking request for retry");
-			} else {
-				rc = -EIO;
-			}
-		}
-		spin_unlock(&GlobalMid_Lock);
-		DeleteMidQEntry(midQ);
+	rc = sync_mid_result(midQ, ses->server);
+	if (rc != 0)
 		return rc;
-	}
 
+	receive_len = midQ->resp_buf->smb_buf_length;
 	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
 		cERROR(1, "Frame too large received.  Length: %d  Xid: %d",
 			receive_len, xid);
@@ -998,10 +1021,10 @@
 	if (receive_len >= sizeof(struct smb_hdr) - 4
 	    /* do not count RFC1001 header */  +
 	    (2 * out_buf->WordCount) + 2 /* bcc */ )
-		BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
+		put_bcc(get_bcc_le(out_buf), out_buf);
 
 out:
-	DeleteMidQEntry(midQ);
+	delete_mid(midQ);
 	if (rstart && rc == -EACCES)
 		return -ERESTARTSYS;
 	return rc;
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 5525e1c..6901578 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -20,10 +20,9 @@
 #include <linux/spinlock.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 static atomic_t permission_epoch = ATOMIC_INIT(0);
 
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
index 6022405..6475877 100644
--- a/fs/coda/cnode.c
+++ b/fs/coda/cnode.c
@@ -7,9 +7,8 @@
 #include <linux/time.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
+#include "coda_linux.h"
 
 static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2)
 {
diff --git a/include/linux/coda_cache.h b/fs/coda/coda_cache.h
similarity index 100%
rename from include/linux/coda_cache.h
rename to fs/coda/coda_cache.h
diff --git a/include/linux/coda_fs_i.h b/fs/coda/coda_fs_i.h
similarity index 100%
rename from include/linux/coda_fs_i.h
rename to fs/coda/coda_fs_i.h
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index bf4a3fd..2bdbcc1 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -17,9 +17,8 @@
 #include <linux/string.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
+#include "coda_linux.h"
 
 /* initialize the debugging variables */
 int coda_fake_statfs;
diff --git a/include/linux/coda_linux.h b/fs/coda/coda_linux.h
similarity index 96%
rename from include/linux/coda_linux.h
rename to fs/coda/coda_linux.h
index 4ccc59c..9b0c532 100644
--- a/include/linux/coda_linux.h
+++ b/fs/coda/coda_linux.h
@@ -20,13 +20,15 @@
 #include <linux/wait.h>		
 #include <linux/types.h>
 #include <linux/fs.h>
-#include <linux/coda_fs_i.h>
+#include "coda_fs_i.h"
 
 /* operations */
 extern const struct inode_operations coda_dir_inode_operations;
 extern const struct inode_operations coda_file_inode_operations;
 extern const struct inode_operations coda_ioctl_inode_operations;
 
+extern const struct dentry_operations coda_dentry_operations;
+
 extern const struct address_space_operations coda_file_aops;
 extern const struct address_space_operations coda_symlink_aops;
 
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 29badd9..2b8dae4 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -23,10 +23,9 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 #include "coda_int.h"
 
@@ -61,7 +60,7 @@
 }
 #define CODA_EIO_ERROR ((void *) (coda_return_EIO))
 
-static const struct dentry_operations coda_dentry_operations =
+const struct dentry_operations coda_dentry_operations =
 {
 	.d_revalidate	= coda_dentry_revalidate,
 	.d_delete	= coda_dentry_delete,
@@ -126,8 +125,6 @@
 		return ERR_PTR(error);
 
 exit:
-	d_set_d_op(entry, &coda_dentry_operations);
-
 	if (inode && (type & CODA_NOCACHE))
 		coda_flag_inode(inode, C_VATTR | C_PURGE);
 
diff --git a/fs/coda/file.c b/fs/coda/file.c
index c8b50ba..0433057 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -21,10 +21,9 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
 
+#include "coda_linux.h"
 #include "coda_int.h"
 
 static ssize_t
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 50dc7d1..871b277 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -28,10 +28,9 @@
 #include <linux/vmalloc.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 #include "coda_int.h"
 
@@ -45,7 +44,7 @@
 static struct inode *coda_alloc_inode(struct super_block *sb)
 {
 	struct coda_inode_info *ei;
-	ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
+	ei = kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	memset(&ei->c_fid, 0, sizeof(struct CodaFid));
@@ -193,6 +192,7 @@
 	sb->s_blocksize_bits = 12;
 	sb->s_magic = CODA_SUPER_MAGIC;
 	sb->s_op = &coda_super_operations;
+	sb->s_d_op = &coda_dentry_operations;
 	sb->s_bdi = &vc->bdi;
 
 	/* get root fid from Venus: this needs the root inode */
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 741f0bd..6cbb3af 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -19,10 +19,10 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
 
+#include "coda_linux.h"
+
 /* pioctl ops */
 static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags);
 static long coda_pioctl(struct file *filp, unsigned int cmd,
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 62647a8..8f616e0 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -43,10 +43,10 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
 
+#include "coda_linux.h"
+
 #include "coda_int.h"
 
 /* statistics */
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index af78f00..ab94ef6 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -16,9 +16,9 @@
 #include <linux/pagemap.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
+
+#include "coda_linux.h"
 
 static int coda_symlink_filler(struct file *file, struct page *page)
 {
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index c3563ca..9727e0c 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -33,10 +33,9 @@
 #include <linux/vfs.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 #include "coda_int.h"
 
diff --git a/fs/compat.c b/fs/compat.c
index eb1740a..691c3fd 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -257,7 +257,7 @@
 }
 
 /*
- * The following statfs calls are copies of code from fs/open.c and
+ * The following statfs calls are copies of code from fs/statfs.c and
  * should be checked against those from time to time
  */
 asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf)
@@ -320,7 +320,9 @@
 	    __put_user(kbuf->f_namelen, &ubuf->f_namelen) ||
 	    __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) ||
 	    __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) ||
-	    __put_user(kbuf->f_frsize, &ubuf->f_frsize))
+	    __put_user(kbuf->f_frsize, &ubuf->f_frsize) ||
+	    __put_user(kbuf->f_flags, &ubuf->f_flags) ||
+	    __clear_user(ubuf->f_spare, sizeof(ubuf->f_spare)))
 		return -EFAULT;
 	return 0;
 }
@@ -597,10 +599,8 @@
 	if (nr_segs > fast_segs) {
 		ret = -ENOMEM;
 		iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
-		if (iov == NULL) {
-			*ret_pointer = fast_pointer;
+		if (iov == NULL)
 			goto out;
-		}
 	}
 	*ret_pointer = iov;
 
@@ -1228,7 +1228,9 @@
 	file = fget_light(fd, &fput_needed);
 	if (!file)
 		return -EBADF;
-	ret = compat_readv(file, vec, vlen, &pos);
+	ret = -ESPIPE;
+	if (file->f_mode & FMODE_PREAD)
+		ret = compat_readv(file, vec, vlen, &pos);
 	fput_light(file, fput_needed);
 	return ret;
 }
@@ -1285,7 +1287,9 @@
 	file = fget_light(fd, &fput_needed);
 	if (!file)
 		return -EBADF;
-	ret = compat_writev(file, vec, vlen, &pos);
+	ret = -ESPIPE;
+	if (file->f_mode & FMODE_PWRITE)
+		ret = compat_writev(file, vec, vlen, &pos);
 	fput_light(file, fput_needed);
 	return ret;
 }
diff --git a/fs/configfs/Kconfig b/fs/configfs/Kconfig
index 13587cc..9febcde 100644
--- a/fs/configfs/Kconfig
+++ b/fs/configfs/Kconfig
@@ -1,8 +1,8 @@
 config CONFIGFS_FS
 	tristate "Userspace-driven configuration filesystem"
-	depends on SYSFS
+	select SYSFS
 	help
-	  configfs is a ram-based filesystem that provides the converse
+	  configfs is a RAM-based filesystem that provides the converse
 	  of sysfs's functionality. Where sysfs is a filesystem-based
 	  view of kernel objects, configfs is a filesystem-based manager
 	  of kernel objects, or config_items.
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 026cf68..82bda8f 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -90,6 +90,7 @@
 extern const struct file_operations bin_fops;
 extern const struct inode_operations configfs_dir_inode_operations;
 extern const struct inode_operations configfs_symlink_inode_operations;
+extern const struct dentry_operations configfs_dentry_ops;
 
 extern int configfs_symlink(struct inode *dir, struct dentry *dentry,
 			    const char *symname);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 36637a8..90ff3cb 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -72,7 +72,7 @@
 	return 1;
 }
 
-static const struct dentry_operations configfs_dentry_ops = {
+const struct dentry_operations configfs_dentry_ops = {
 	.d_iput		= configfs_d_iput,
 	/* simple_delete_dentry() isn't exported */
 	.d_delete	= configfs_d_delete,
@@ -442,7 +442,6 @@
 		return error;
 	}
 
-	d_set_d_op(dentry, &configfs_dentry_ops);
 	d_rehash(dentry);
 
 	return 0;
@@ -489,7 +488,6 @@
 		 */
 		if (dentry->d_name.len > NAME_MAX)
 			return ERR_PTR(-ENAMETOOLONG);
-		d_set_d_op(dentry, &configfs_dentry_ops);
 		d_add(dentry, NULL);
 		return NULL;
 	}
@@ -683,7 +681,6 @@
 	ret = -ENOMEM;
 	child = d_alloc(parent, &name);
 	if (child) {
-		d_set_d_op(child, &configfs_dentry_ops);
 		d_add(child, NULL);
 
 		ret = configfs_attach_group(&parent_group->cg_item,
@@ -1681,7 +1678,6 @@
 	err = -ENOMEM;
 	dentry = d_alloc(configfs_sb->s_root, &name);
 	if (dentry) {
-		d_set_d_op(dentry, &configfs_dentry_ops);
 		d_add(dentry, NULL);
 
 		err = configfs_attach_group(sd->s_element, &group->cg_item,
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index 7d3607f..ecc6217 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -101,6 +101,7 @@
 	configfs_root_group.cg_item.ci_dentry = root;
 	root->d_fsdata = &configfs_root;
 	sb->s_root = root;
+	sb->s_d_op = &configfs_dentry_ops; /* the rest get that */
 	return 0;
 }
 
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 32fd5fe..e141939 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -34,57 +34,81 @@
 static DEFINE_MUTEX(read_mutex);
 
 
-/* These two macros may change in future, to provide better st_ino
-   semantics. */
-#define CRAMINO(x)	(((x)->offset && (x)->size)?(x)->offset<<2:1)
+/* These macros may change in future, to provide better st_ino semantics. */
 #define OFFSET(x)	((x)->i_ino)
 
-static void setup_inode(struct inode *inode, struct cramfs_inode * cramfs_inode)
+static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
 {
+	if (!cino->offset)
+		return offset + 1;
+	if (!cino->size)
+		return offset + 1;
+
+	/*
+	 * The file mode test fixes buggy mkcramfs implementations where
+	 * cramfs_inode->offset is set to a non zero value for entries
+	 * which did not contain data, like devices node and fifos.
+	 */
+	switch (cino->mode & S_IFMT) {
+	case S_IFREG:
+	case S_IFDIR:
+	case S_IFLNK:
+		return cino->offset << 2;
+	default:
+		break;
+	}
+	return offset + 1;
+}
+
+static struct inode *get_cramfs_inode(struct super_block *sb,
+	struct cramfs_inode *cramfs_inode, unsigned int offset)
+{
+	struct inode *inode;
 	static struct timespec zerotime;
+
+	inode = iget_locked(sb, cramino(cramfs_inode, offset));
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	if (!(inode->i_state & I_NEW))
+		return inode;
+
+	switch (cramfs_inode->mode & S_IFMT) {
+	case S_IFREG:
+		inode->i_fop = &generic_ro_fops;
+		inode->i_data.a_ops = &cramfs_aops;
+		break;
+	case S_IFDIR:
+		inode->i_op = &cramfs_dir_inode_operations;
+		inode->i_fop = &cramfs_directory_operations;
+		break;
+	case S_IFLNK:
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_data.a_ops = &cramfs_aops;
+		break;
+	default:
+		init_special_inode(inode, cramfs_inode->mode,
+				old_decode_dev(cramfs_inode->size));
+	}
+
 	inode->i_mode = cramfs_inode->mode;
 	inode->i_uid = cramfs_inode->uid;
-	inode->i_size = cramfs_inode->size;
-	inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
 	inode->i_gid = cramfs_inode->gid;
+
+	/* if the lower 2 bits are zero, the inode contains data */
+	if (!(inode->i_ino & 3)) {
+		inode->i_size = cramfs_inode->size;
+		inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
+	}
+
 	/* Struct copy intentional */
 	inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
 	/* inode->i_nlink is left 1 - arguably wrong for directories,
 	   but it's the best we can do without reading the directory
 	   contents.  1 yields the right result in GNU find, even
 	   without -noleaf option. */
-	if (S_ISREG(inode->i_mode)) {
-		inode->i_fop = &generic_ro_fops;
-		inode->i_data.a_ops = &cramfs_aops;
-	} else if (S_ISDIR(inode->i_mode)) {
-		inode->i_op = &cramfs_dir_inode_operations;
-		inode->i_fop = &cramfs_directory_operations;
-	} else if (S_ISLNK(inode->i_mode)) {
-		inode->i_op = &page_symlink_inode_operations;
-		inode->i_data.a_ops = &cramfs_aops;
-	} else {
-		init_special_inode(inode, inode->i_mode,
-			old_decode_dev(cramfs_inode->size));
-	}
-}
 
-static struct inode *get_cramfs_inode(struct super_block *sb,
-				struct cramfs_inode * cramfs_inode)
-{
-	struct inode *inode;
-	if (CRAMINO(cramfs_inode) == 1) {
-		inode = new_inode(sb);
-		if (inode) {
-			inode->i_ino = 1;
-			setup_inode(inode, cramfs_inode);
-		}
-	} else {
-		inode = iget_locked(sb, CRAMINO(cramfs_inode));
-		if (inode && (inode->i_state & I_NEW)) {
-			setup_inode(inode, cramfs_inode);
-			unlock_new_inode(inode);
-		}
-	}
+	unlock_new_inode(inode);
+
 	return inode;
 }
 
@@ -265,6 +289,9 @@
 		printk(KERN_ERR "cramfs: root is not a directory\n");
 		goto out;
 	}
+	/* correct strange, hard-coded permissions of mkcramfs */
+	super.root.mode |= (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
+
 	root_offset = super.root.offset << 2;
 	if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) {
 		sbi->size=super.size;
@@ -289,7 +316,7 @@
 
 	/* Set it all up.. */
 	sb->s_op = &cramfs_ops;
-	root = get_cramfs_inode(sb, &super.root);
+	root = get_cramfs_inode(sb, &super.root, 0);
 	if (!root)
 		goto out;
 	sb->s_root = d_alloc_root(root);
@@ -365,7 +392,7 @@
 		 */
 		namelen = de->namelen << 2;
 		memcpy(buf, name, namelen);
-		ino = CRAMINO(de);
+		ino = cramino(de, OFFSET(inode) + offset);
 		mode = de->mode;
 		mutex_unlock(&read_mutex);
 		nextoffset = offset + sizeof(*de) + namelen;
@@ -404,8 +431,9 @@
 		struct cramfs_inode *de;
 		char *name;
 		int namelen, retval;
+		int dir_off = OFFSET(dir) + offset;
 
-		de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
+		de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
 		name = (char *)(de+1);
 
 		/* Try to take advantage of sorted directories */
@@ -436,7 +464,7 @@
 		if (!retval) {
 			struct cramfs_inode entry = *de;
 			mutex_unlock(&read_mutex);
-			d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
+			d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off));
 			return NULL;
 		}
 		/* else (retval < 0) */
diff --git a/fs/dcache.c b/fs/dcache.c
index 5699d4c..611ffe9 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -176,6 +176,7 @@
 
 /**
  * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
+ * @dentry: the target dentry
  * After this call, in-progress rcu-walk path lookup will fail. This
  * should be called after unhashing, and after changing d_inode (if
  * the dentry has not already been unhashed).
@@ -281,6 +282,7 @@
 /**
  * d_kill - kill dentry and return parent
  * @dentry: dentry to kill
+ * @parent: parent dentry
  *
  * The dentry must already be unhashed and removed from the LRU.
  *
@@ -1320,6 +1322,7 @@
 		__dget_dlock(parent);
 		dentry->d_parent = parent;
 		dentry->d_sb = parent->d_sb;
+		d_set_d_op(dentry, dentry->d_sb->s_d_op);
 		list_add(&dentry->d_u.d_child, &parent->d_subdirs);
 		spin_unlock(&parent->d_lock);
 	}
@@ -1335,6 +1338,7 @@
 	struct dentry *dentry = d_alloc(NULL, name);
 	if (dentry) {
 		dentry->d_sb = sb;
+		d_set_d_op(dentry, dentry->d_sb->s_d_op);
 		dentry->d_parent = dentry;
 		dentry->d_flags |= DCACHE_DISCONNECTED;
 	}
@@ -1355,8 +1359,8 @@
 
 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
 {
-	BUG_ON(dentry->d_op);
-	BUG_ON(dentry->d_flags & (DCACHE_OP_HASH	|
+	WARN_ON_ONCE(dentry->d_op);
+	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
 				DCACHE_OP_COMPARE	|
 				DCACHE_OP_REVALIDATE	|
 				DCACHE_OP_DELETE ));
@@ -1378,8 +1382,11 @@
 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
 {
 	spin_lock(&dentry->d_lock);
-	if (inode)
+	if (inode) {
+		if (unlikely(IS_AUTOMOUNT(inode)))
+			dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
 		list_add(&dentry->d_alias, &inode->i_dentry);
+	}
 	dentry->d_inode = inode;
 	dentry_rcuwalk_barrier(dentry);
 	spin_unlock(&dentry->d_lock);
@@ -1507,6 +1514,7 @@
 		res = d_alloc(NULL, &name);
 		if (res) {
 			res->d_sb = root_inode->i_sb;
+			d_set_d_op(res, res->d_sb->s_d_op);
 			res->d_parent = res;
 			d_instantiate(res, root_inode);
 		}
@@ -1515,6 +1523,28 @@
 }
 EXPORT_SYMBOL(d_alloc_root);
 
+static struct dentry * __d_find_any_alias(struct inode *inode)
+{
+	struct dentry *alias;
+
+	if (list_empty(&inode->i_dentry))
+		return NULL;
+	alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
+	__dget(alias);
+	return alias;
+}
+
+static struct dentry * d_find_any_alias(struct inode *inode)
+{
+	struct dentry *de;
+
+	spin_lock(&inode->i_lock);
+	de = __d_find_any_alias(inode);
+	spin_unlock(&inode->i_lock);
+	return de;
+}
+
+
 /**
  * d_obtain_alias - find or allocate a dentry for a given inode
  * @inode: inode to allocate the dentry for
@@ -1544,7 +1574,7 @@
 	if (IS_ERR(inode))
 		return ERR_CAST(inode);
 
-	res = d_find_alias(inode);
+	res = d_find_any_alias(inode);
 	if (res)
 		goto out_iput;
 
@@ -1557,7 +1587,7 @@
 
 
 	spin_lock(&inode->i_lock);
-	res = __d_find_alias(inode, 0);
+	res = __d_find_any_alias(inode);
 	if (res) {
 		spin_unlock(&inode->i_lock);
 		dput(tmp);
@@ -1567,6 +1597,7 @@
 	/* attach a disconnected dentry */
 	spin_lock(&tmp->d_lock);
 	tmp->d_sb = inode->i_sb;
+	d_set_d_op(tmp, tmp->d_sb->s_d_op);
 	tmp->d_inode = inode;
 	tmp->d_flags |= DCACHE_DISCONNECTED;
 	list_add(&tmp->d_alias, &inode->i_dentry);
@@ -2449,8 +2480,7 @@
 }
 
 /**
- * Prepend path string to a buffer
- *
+ * prepend_path - Prepend path string to a buffer
  * @path: the dentry/vfsmount to report
  * @root: root vfsmnt/dentry (may be modified by this function)
  * @buffer: pointer to the end of the buffer
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 85882f6..b044705 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -325,12 +325,16 @@
 }
 EXPORT_SYMBOL_GPL(dio_end_io);
 
-static int
+static void
 dio_bio_alloc(struct dio *dio, struct block_device *bdev,
 		sector_t first_sector, int nr_vecs)
 {
 	struct bio *bio;
 
+	/*
+	 * bio_alloc() is guaranteed to return a bio when called with
+	 * __GFP_WAIT and we request a valid number of vectors.
+	 */
 	bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
 	bio->bi_bdev = bdev;
@@ -342,7 +346,6 @@
 
 	dio->bio = bio;
 	dio->logical_offset_in_bio = dio->cur_page_fs_offset;
-	return 0;
 }
 
 /*
@@ -583,8 +586,9 @@
 		goto out;
 	sector = start_sector << (dio->blkbits - 9);
 	nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
+	nr_pages = min(nr_pages, BIO_MAX_PAGES);
 	BUG_ON(nr_pages <= 0);
-	ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
+	dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
 	dio->boundary = 0;
 out:
 	return ret;
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index 2dbb422..1897eb1b 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -1,8 +1,7 @@
 menuconfig DLM
 	tristate "Distributed Lock Manager (DLM)"
 	depends on EXPERIMENTAL && INET
-	depends on SYSFS && (IPV6 || IPV6=n)
-	select CONFIGFS_FS
+	depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n)
 	select IP_SCTP
 	help
 	A general purpose distributed lock manager for kernel or userspace
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 9c64ae9..2d8c87b 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1468,15 +1468,13 @@
 
 static int work_start(void)
 {
-	recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM |
-					 WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+	recv_workqueue = create_singlethread_workqueue("dlm_recv");
 	if (!recv_workqueue) {
 		log_print("can't start dlm_recv");
 		return -ENOMEM;
 	}
 
-	send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM |
-					 WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+	send_workqueue = create_singlethread_workqueue("dlm_send");
 	if (!send_workqueue) {
 		log_print("can't start dlm_send");
 		destroy_workqueue(recv_workqueue);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index cbadc1b..bfd8b68 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -348,7 +348,7 @@
 	BUG_ON(!crypt_stat || !crypt_stat->tfm
 	       || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
 	if (unlikely(ecryptfs_verbosity > 0)) {
-		ecryptfs_printk(KERN_DEBUG, "Key size [%d]; key:\n",
+		ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
 				crypt_stat->key_size);
 		ecryptfs_dump_hex(crypt_stat->key,
 				  crypt_stat->key_size);
@@ -413,10 +413,9 @@
 	rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
 				(extent_base + extent_offset));
 	if (rc) {
-		ecryptfs_printk(KERN_ERR, "Error attempting to "
-				"derive IV for extent [0x%.16x]; "
-				"rc = [%d]\n", (extent_base + extent_offset),
-				rc);
+		ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for "
+			"extent [0x%.16llx]; rc = [%d]\n",
+			(unsigned long long)(extent_base + extent_offset), rc);
 		goto out;
 	}
 	if (unlikely(ecryptfs_verbosity > 0)) {
@@ -443,9 +442,9 @@
 	}
 	rc = 0;
 	if (unlikely(ecryptfs_verbosity > 0)) {
-		ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
-				"rc = [%d]\n", (extent_base + extent_offset),
-				rc);
+		ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; "
+			"rc = [%d]\n",
+			(unsigned long long)(extent_base + extent_offset), rc);
 		ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
 				"encryption:\n");
 		ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
@@ -540,10 +539,9 @@
 	rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
 				(extent_base + extent_offset));
 	if (rc) {
-		ecryptfs_printk(KERN_ERR, "Error attempting to "
-				"derive IV for extent [0x%.16x]; "
-				"rc = [%d]\n", (extent_base + extent_offset),
-				rc);
+		ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for "
+			"extent [0x%.16llx]; rc = [%d]\n",
+			(unsigned long long)(extent_base + extent_offset), rc);
 		goto out;
 	}
 	if (unlikely(ecryptfs_verbosity > 0)) {
@@ -571,9 +569,9 @@
 	}
 	rc = 0;
 	if (unlikely(ecryptfs_verbosity > 0)) {
-		ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
-				"rc = [%d]\n", (extent_base + extent_offset),
-				rc);
+		ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; "
+			"rc = [%d]\n",
+			(unsigned long long)(extent_base + extent_offset), rc);
 		ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
 				"decryption:\n");
 		ecryptfs_dump_hex((char *)(page_address(page)
@@ -780,7 +778,7 @@
 	}
 	ecryptfs_printk(KERN_DEBUG,
 			"Initializing cipher [%s]; strlen = [%d]; "
-			"key_size_bits = [%d]\n",
+			"key_size_bits = [%zd]\n",
 			crypt_stat->cipher, (int)strlen(crypt_stat->cipher),
 			crypt_stat->key_size << 3);
 	if (crypt_stat->tfm) {
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 6fc4f31..534c1d4 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -46,24 +46,28 @@
 {
 	struct dentry *lower_dentry;
 	struct vfsmount *lower_mnt;
-	struct dentry *dentry_save;
-	struct vfsmount *vfsmount_save;
+	struct dentry *dentry_save = NULL;
+	struct vfsmount *vfsmount_save = NULL;
 	int rc = 1;
 
-	if (nd->flags & LOOKUP_RCU)
+	if (nd && nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 
 	lower_dentry = ecryptfs_dentry_to_lower(dentry);
 	lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
 	if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
 		goto out;
-	dentry_save = nd->path.dentry;
-	vfsmount_save = nd->path.mnt;
-	nd->path.dentry = lower_dentry;
-	nd->path.mnt = lower_mnt;
+	if (nd) {
+		dentry_save = nd->path.dentry;
+		vfsmount_save = nd->path.mnt;
+		nd->path.dentry = lower_dentry;
+		nd->path.mnt = lower_mnt;
+	}
 	rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
-	nd->path.dentry = dentry_save;
-	nd->path.mnt = vfsmount_save;
+	if (nd) {
+		nd->path.dentry = dentry_save;
+		nd->path.mnt = vfsmount_save;
+	}
 	if (dentry->d_inode) {
 		struct inode *lower_inode =
 			ecryptfs_inode_to_lower(dentry->d_inode);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 413a3c4..e007534 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -192,7 +192,6 @@
 		(((struct user_key_payload*)key->payload.data)->data);
 }
 
-#define ECRYPTFS_SUPER_MAGIC 0xf15f
 #define ECRYPTFS_MAX_KEYSET_SIZE 1024
 #define ECRYPTFS_MAX_CIPHER_NAME_SIZE 32
 #define ECRYPTFS_MAX_NUM_ENC_KEYS 64
@@ -584,6 +583,7 @@
 
 #define ecryptfs_printk(type, fmt, arg...) \
         __ecryptfs_printk(type "%s: " fmt, __func__, ## arg);
+__attribute__ ((format(printf, 1, 2)))
 void __ecryptfs_printk(const char *fmt, ...);
 
 extern const struct file_operations ecryptfs_main_fops;
@@ -632,8 +632,7 @@
 		       u32 flags);
 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
 					struct dentry *lower_dentry,
-					struct inode *ecryptfs_dir_inode,
-					struct nameidata *ecryptfs_nd);
+					struct inode *ecryptfs_dir_inode);
 int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
 					 size_t *decrypted_name_size,
 					 struct dentry *ecryptfs_dentry,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 91da029..7d1050e 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -47,7 +47,7 @@
 				const struct iovec *iov,
 				unsigned long nr_segs, loff_t pos)
 {
-	int rc;
+	ssize_t rc;
 	struct dentry *lower_dentry;
 	struct vfsmount *lower_vfsmount;
 	struct file *file = iocb->ki_filp;
@@ -191,18 +191,16 @@
 				      | ECRYPTFS_ENCRYPTED);
 	}
 	mutex_unlock(&crypt_stat->cs_mutex);
-	if (!ecryptfs_inode_to_private(inode)->lower_file) {
-		rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
-		if (rc) {
-			printk(KERN_ERR "%s: Error attempting to initialize "
-			       "the persistent file for the dentry with name "
-			       "[%s]; rc = [%d]\n", __func__,
-			       ecryptfs_dentry->d_name.name, rc);
-			goto out_free;
-		}
+	rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
+	if (rc) {
+		printk(KERN_ERR "%s: Error attempting to initialize "
+			"the persistent file for the dentry with name "
+			"[%s]; rc = [%d]\n", __func__,
+			ecryptfs_dentry->d_name.name, rc);
+		goto out_free;
 	}
-	if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
-	    && !(file->f_flags & O_RDONLY)) {
+	if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE)
+	    == O_RDONLY && (file->f_flags & O_ACCMODE) != O_RDONLY) {
 		rc = -EPERM;
 		printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
 		       "file must hence be opened RO\n", __func__);
@@ -243,9 +241,9 @@
 		}
 	}
 	mutex_unlock(&crypt_stat->cs_mutex);
-	ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = [0x%.16x] "
-			"size: [0x%.16x]\n", inode, inode->i_ino,
-			i_size_read(inode));
+	ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = "
+			"[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
+			(unsigned long long)i_size_read(inode));
 	goto out;
 out_free:
 	kmem_cache_free(ecryptfs_file_info_cache,
@@ -319,6 +317,7 @@
 
 const struct file_operations ecryptfs_dir_fops = {
 	.readdir = ecryptfs_readdir,
+	.read = generic_read_dir,
 	.unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = ecryptfs_compat_ioctl,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 337352a..b592938 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -74,16 +74,20 @@
 	unsigned int flags_save;
 	int rc;
 
-	dentry_save = nd->path.dentry;
-	vfsmount_save = nd->path.mnt;
-	flags_save = nd->flags;
-	nd->path.dentry = lower_dentry;
-	nd->path.mnt = lower_mnt;
-	nd->flags &= ~LOOKUP_OPEN;
+	if (nd) {
+		dentry_save = nd->path.dentry;
+		vfsmount_save = nd->path.mnt;
+		flags_save = nd->flags;
+		nd->path.dentry = lower_dentry;
+		nd->path.mnt = lower_mnt;
+		nd->flags &= ~LOOKUP_OPEN;
+	}
 	rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
-	nd->path.dentry = dentry_save;
-	nd->path.mnt = vfsmount_save;
-	nd->flags = flags_save;
+	if (nd) {
+		nd->path.dentry = dentry_save;
+		nd->path.mnt = vfsmount_save;
+		nd->flags = flags_save;
+	}
 	return rc;
 }
 
@@ -185,15 +189,13 @@
 				"context; rc = [%d]\n", rc);
 		goto out;
 	}
-	if (!ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->lower_file) {
-		rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
-		if (rc) {
-			printk(KERN_ERR "%s: Error attempting to initialize "
-			       "the persistent file for the dentry with name "
-			       "[%s]; rc = [%d]\n", __func__,
-			       ecryptfs_dentry->d_name.name, rc);
-			goto out;
-		}
+	rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
+	if (rc) {
+		printk(KERN_ERR "%s: Error attempting to initialize "
+			"the persistent file for the dentry with name "
+			"[%s]; rc = [%d]\n", __func__,
+			ecryptfs_dentry->d_name.name, rc);
+		goto out;
 	}
 	rc = ecryptfs_write_metadata(ecryptfs_dentry);
 	if (rc) {
@@ -243,8 +245,7 @@
  */
 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
 					struct dentry *lower_dentry,
-					struct inode *ecryptfs_dir_inode,
-					struct nameidata *ecryptfs_nd)
+					struct inode *ecryptfs_dir_inode)
 {
 	struct dentry *lower_dir_dentry;
 	struct vfsmount *lower_mnt;
@@ -292,8 +293,6 @@
 		goto out;
 	if (special_file(lower_inode->i_mode))
 		goto out;
-	if (!ecryptfs_nd)
-		goto out;
 	/* Released in this function */
 	page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER);
 	if (!page_virt) {
@@ -302,15 +301,13 @@
 		rc = -ENOMEM;
 		goto out;
 	}
-	if (!ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->lower_file) {
-		rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
-		if (rc) {
-			printk(KERN_ERR "%s: Error attempting to initialize "
-			       "the persistent file for the dentry with name "
-			       "[%s]; rc = [%d]\n", __func__,
-			       ecryptfs_dentry->d_name.name, rc);
-			goto out_free_kmem;
-		}
+	rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
+	if (rc) {
+		printk(KERN_ERR "%s: Error attempting to initialize "
+			"the persistent file for the dentry with name "
+			"[%s]; rc = [%d]\n", __func__,
+			ecryptfs_dentry->d_name.name, rc);
+		goto out_free_kmem;
 	}
 	crypt_stat = &ecryptfs_inode_to_private(
 					ecryptfs_dentry->d_inode)->crypt_stat;
@@ -353,75 +350,6 @@
 }
 
 /**
- * ecryptfs_new_lower_dentry
- * @name: The name of the new dentry.
- * @lower_dir_dentry: Parent directory of the new dentry.
- * @nd: nameidata from last lookup.
- *
- * Create a new dentry or get it from lower parent dir.
- */
-static struct dentry *
-ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
-			  struct nameidata *nd)
-{
-	struct dentry *new_dentry;
-	struct dentry *tmp;
-	struct inode *lower_dir_inode;
-
-	lower_dir_inode = lower_dir_dentry->d_inode;
-
-	tmp = d_alloc(lower_dir_dentry, name);
-	if (!tmp)
-		return ERR_PTR(-ENOMEM);
-
-	mutex_lock(&lower_dir_inode->i_mutex);
-	new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd);
-	mutex_unlock(&lower_dir_inode->i_mutex);
-
-	if (!new_dentry)
-		new_dentry = tmp;
-	else
-		dput(tmp);
-
-	return new_dentry;
-}
-
-
-/**
- * ecryptfs_lookup_one_lower
- * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
- * @lower_dir_dentry: lower parent directory
- * @name: lower file name
- *
- * Get the lower dentry from vfs. If lower dentry does not exist yet,
- * create it.
- */
-static struct dentry *
-ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
-			  struct dentry *lower_dir_dentry, struct qstr *name)
-{
-	struct nameidata nd;
-	struct vfsmount *lower_mnt;
-	int err;
-
-	lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
-				    ecryptfs_dentry->d_parent));
-	err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
-	mntput(lower_mnt);
-
-	if (!err) {
-		/* we dont need the mount */
-		mntput(nd.path.mnt);
-		return nd.path.dentry;
-	}
-	if (err != -ENOENT)
-		return ERR_PTR(err);
-
-	/* create a new lower dentry */
-	return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd);
-}
-
-/**
  * ecryptfs_lookup
  * @ecryptfs_dir_inode: The eCryptfs directory inode
  * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
@@ -438,10 +366,8 @@
 	size_t encrypted_and_encoded_name_size;
 	struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
 	struct dentry *lower_dir_dentry, *lower_dentry;
-	struct qstr lower_name;
 	int rc = 0;
 
-	d_set_d_op(ecryptfs_dentry, &ecryptfs_dops);
 	if ((ecryptfs_dentry->d_name.len == 1
 	     && !strcmp(ecryptfs_dentry->d_name.name, "."))
 	    || (ecryptfs_dentry->d_name.len == 2
@@ -449,20 +375,14 @@
 		goto out_d_drop;
 	}
 	lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
-	lower_name.name = ecryptfs_dentry->d_name.name;
-	lower_name.len = ecryptfs_dentry->d_name.len;
-	lower_name.hash = ecryptfs_dentry->d_name.hash;
-	if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
-		rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
-				lower_dir_dentry->d_inode, &lower_name);
-		if (rc < 0)
-			goto out_d_drop;
-	}
-	lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
-						 lower_dir_dentry, &lower_name);
+	mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
+	lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
+				      lower_dir_dentry,
+				      ecryptfs_dentry->d_name.len);
+	mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
 	if (IS_ERR(lower_dentry)) {
 		rc = PTR_ERR(lower_dentry);
-		ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
+		ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
 				"[%d] on lower_dentry = [%s]\n", __func__, rc,
 				encrypted_and_encoded_name);
 		goto out_d_drop;
@@ -484,28 +404,21 @@
 		       "filename; rc = [%d]\n", __func__, rc);
 		goto out_d_drop;
 	}
-	lower_name.name = encrypted_and_encoded_name;
-	lower_name.len = encrypted_and_encoded_name_size;
-	lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
-	if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
-		rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
-				lower_dir_dentry->d_inode, &lower_name);
-		if (rc < 0)
-			goto out_d_drop;
-	}
-	lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
-						 lower_dir_dentry, &lower_name);
+	mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
+	lower_dentry = lookup_one_len(encrypted_and_encoded_name,
+				      lower_dir_dentry,
+				      encrypted_and_encoded_name_size);
+	mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
 	if (IS_ERR(lower_dentry)) {
 		rc = PTR_ERR(lower_dentry);
-		ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
+		ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
 				"[%d] on lower_dentry = [%s]\n", __func__, rc,
 				encrypted_and_encoded_name);
 		goto out_d_drop;
 	}
 lookup_and_interpose:
 	rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry,
-						 ecryptfs_dir_inode,
-						 ecryptfs_nd);
+						 ecryptfs_dir_inode);
 	goto out;
 out_d_drop:
 	d_drop(ecryptfs_dentry);
@@ -1097,6 +1010,8 @@
 	rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
 			 ecryptfs_dentry_to_lower(dentry), &lower_stat);
 	if (!rc) {
+		fsstack_copy_attr_all(dentry->d_inode,
+				      ecryptfs_inode_to_lower(dentry->d_inode));
 		generic_fillattr(dentry->d_inode, stat);
 		stat->blocks = lower_stat.blocks;
 	}
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index b1f6858..c1436cf 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -59,7 +59,7 @@
 		break;
 	default:
 		ecryptfs_printk(KERN_WARNING, "Unknown error code: "
-				"[0x%.16x]\n", err_code);
+				"[0x%.16lx]\n", err_code);
 		rc = -EINVAL;
 	}
 	return rc;
@@ -130,7 +130,7 @@
 	} else {
 		rc = -EINVAL;
 		ecryptfs_printk(KERN_WARNING,
-				"Unsupported packet size: [%d]\n", size);
+				"Unsupported packet size: [%zd]\n", size);
 	}
 	return rc;
 }
@@ -1672,7 +1672,7 @@
 	       auth_tok->session_key.decrypted_key_size);
 	crypt_stat->flags |= ECRYPTFS_KEY_VALID;
 	if (unlikely(ecryptfs_verbosity > 0)) {
-		ecryptfs_printk(KERN_DEBUG, "FEK of size [%d]:\n",
+		ecryptfs_printk(KERN_DEBUG, "FEK of size [%zd]:\n",
 				crypt_stat->key_size);
 		ecryptfs_dump_hex(crypt_stat->key,
 				  crypt_stat->key_size);
@@ -1754,7 +1754,7 @@
 			if (ECRYPTFS_SIG_SIZE != tag_11_contents_size) {
 				ecryptfs_printk(KERN_ERR, "Expected "
 						"signature of size [%d]; "
-						"read size [%d]\n",
+						"read size [%zd]\n",
 						ECRYPTFS_SIG_SIZE,
 						tag_11_contents_size);
 				rc = -EIO;
@@ -1787,8 +1787,8 @@
 			goto out_wipe_list;
 			break;
 		default:
-			ecryptfs_printk(KERN_DEBUG, "No packet at offset "
-					"[%d] of the file header; hex value of "
+			ecryptfs_printk(KERN_DEBUG, "No packet at offset [%zd] "
+					"of the file header; hex value of "
 					"character is [0x%.2x]\n", i, src[i]);
 			next_packet_is_auth_tok_packet = 0;
 		}
@@ -1864,8 +1864,8 @@
 				"session key for authentication token with sig "
 				"[%.*s]; rc = [%d]. Removing auth tok "
 				"candidate from the list and searching for "
-				"the next match.\n", candidate_auth_tok_sig,
-				ECRYPTFS_SIG_SIZE_HEX, rc);
+				"the next match.\n", ECRYPTFS_SIG_SIZE_HEX,
+				candidate_auth_tok_sig,	rc);
 		list_for_each_entry_safe(auth_tok_list_item,
 					 auth_tok_list_item_tmp,
 					 &auth_tok_list, list) {
@@ -2168,7 +2168,7 @@
 	if (encrypted_session_key_valid) {
 		ecryptfs_printk(KERN_DEBUG, "encrypted_session_key_valid != 0; "
 				"using auth_tok->session_key.encrypted_key, "
-				"where key_rec->enc_key_size = [%d]\n",
+				"where key_rec->enc_key_size = [%zd]\n",
 				key_rec->enc_key_size);
 		memcpy(key_rec->enc_key,
 		       auth_tok->session_key.encrypted_key,
@@ -2198,7 +2198,7 @@
 	if (rc < 1 || rc > 2) {
 		ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
 				"for crypt_stat session key; expected rc = 1; "
-				"got rc = [%d]. key_rec->enc_key_size = [%d]\n",
+				"got rc = [%d]. key_rec->enc_key_size = [%zd]\n",
 				rc, key_rec->enc_key_size);
 		rc = -ENOMEM;
 		goto out;
@@ -2209,7 +2209,7 @@
 		ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
 				"for crypt_stat encrypted session key; "
 				"expected rc = 1; got rc = [%d]. "
-				"key_rec->enc_key_size = [%d]\n", rc,
+				"key_rec->enc_key_size = [%zd]\n", rc,
 				key_rec->enc_key_size);
 		rc = -ENOMEM;
 		goto out;
@@ -2224,7 +2224,7 @@
 		goto out;
 	}
 	rc = 0;
-	ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes of the key\n",
+	ecryptfs_printk(KERN_DEBUG, "Encrypting [%zd] bytes of the key\n",
 			crypt_stat->key_size);
 	rc = crypto_blkcipher_encrypt(&desc, dst_sg, src_sg,
 				      (*key_rec).enc_key_size);
@@ -2235,7 +2235,7 @@
 	}
 	ecryptfs_printk(KERN_DEBUG, "This should be the encrypted key:\n");
 	if (ecryptfs_verbosity > 0) {
-		ecryptfs_printk(KERN_DEBUG, "EFEK of size [%d]:\n",
+		ecryptfs_printk(KERN_DEBUG, "EFEK of size [%zd]:\n",
 				key_rec->enc_key_size);
 		ecryptfs_dump_hex(key_rec->enc_key,
 				  key_rec->enc_key_size);
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 3510386..758323a 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -36,6 +36,7 @@
 #include <linux/parser.h>
 #include <linux/fs_stack.h>
 #include <linux/slab.h>
+#include <linux/magic.h>
 #include "ecryptfs_kernel.h"
 
 /**
@@ -141,25 +142,12 @@
 	return rc;
 }
 
-/**
- * ecryptfs_interpose
- * @lower_dentry: Existing dentry in the lower filesystem
- * @dentry: ecryptfs' dentry
- * @sb: ecryptfs's super_block
- * @flags: flags to govern behavior of interpose procedure
- *
- * Interposes upper and lower dentries.
- *
- * Returns zero on success; non-zero otherwise
- */
-int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
-		       struct super_block *sb, u32 flags)
+static struct inode *ecryptfs_get_inode(struct inode *lower_inode,
+		       struct super_block *sb)
 {
-	struct inode *lower_inode;
 	struct inode *inode;
 	int rc = 0;
 
-	lower_inode = lower_dentry->d_inode;
 	if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb)) {
 		rc = -EXDEV;
 		goto out;
@@ -189,17 +177,38 @@
 	if (special_file(lower_inode->i_mode))
 		init_special_inode(inode, lower_inode->i_mode,
 				   lower_inode->i_rdev);
-	d_set_d_op(dentry, &ecryptfs_dops);
 	fsstack_copy_attr_all(inode, lower_inode);
 	/* This size will be overwritten for real files w/ headers and
 	 * other metadata */
 	fsstack_copy_inode_size(inode, lower_inode);
+	return inode;
+out:
+	return ERR_PTR(rc);
+}
+
+/**
+ * ecryptfs_interpose
+ * @lower_dentry: Existing dentry in the lower filesystem
+ * @dentry: ecryptfs' dentry
+ * @sb: ecryptfs's super_block
+ * @flags: flags to govern behavior of interpose procedure
+ *
+ * Interposes upper and lower dentries.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
+		       struct super_block *sb, u32 flags)
+{
+	struct inode *lower_inode = lower_dentry->d_inode;
+	struct inode *inode = ecryptfs_get_inode(lower_inode, sb);
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
 	if (flags & ECRYPTFS_INTERPOSE_FLAG_D_ADD)
 		d_add(dentry, inode);
 	else
 		d_instantiate(dentry, inode);
-out:
-	return rc;
+	return 0;
 }
 
 enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig,
@@ -492,59 +501,11 @@
 static struct file_system_type ecryptfs_fs_type;
 
 /**
- * ecryptfs_read_super
- * @sb: The ecryptfs super block
- * @dev_name: The path to mount over
- *
- * Read the super block of the lower filesystem, and use
- * ecryptfs_interpose to create our initial inode and super block
- * struct.
- */
-static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
-{
-	struct path path;
-	int rc;
-
-	rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
-	if (rc) {
-		ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
-		goto out;
-	}
-	if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
-		rc = -EINVAL;
-		printk(KERN_ERR "Mount on filesystem of type "
-			"eCryptfs explicitly disallowed due to "
-			"known incompatibilities\n");
-		goto out_free;
-	}
-	ecryptfs_set_superblock_lower(sb, path.dentry->d_sb);
-	sb->s_maxbytes = path.dentry->d_sb->s_maxbytes;
-	sb->s_blocksize = path.dentry->d_sb->s_blocksize;
-	ecryptfs_set_dentry_lower(sb->s_root, path.dentry);
-	ecryptfs_set_dentry_lower_mnt(sb->s_root, path.mnt);
-	rc = ecryptfs_interpose(path.dentry, sb->s_root, sb, 0);
-	if (rc)
-		goto out_free;
-	rc = 0;
-	goto out;
-out_free:
-	path_put(&path);
-out:
-	return rc;
-}
-
-/**
  * ecryptfs_get_sb
  * @fs_type
  * @flags
  * @dev_name: The path to mount over
  * @raw_data: The options passed into the kernel
- *
- * The whole ecryptfs_get_sb process is broken into 3 functions:
- * ecryptfs_parse_options(): handle options passed to ecryptfs, if any
- * ecryptfs_read_super(): this accesses the lower filesystem and uses
- *                        ecryptfs_interpose to perform most of the linking
- * ecryptfs_interpose(): links the lower filesystem into ecryptfs (inode.c)
  */
 static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags,
 			const char *dev_name, void *raw_data)
@@ -553,6 +514,8 @@
 	struct ecryptfs_sb_info *sbi;
 	struct ecryptfs_dentry_info *root_info;
 	const char *err = "Getting sb failed";
+	struct inode *inode;
+	struct path path;
 	int rc;
 
 	sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
@@ -575,10 +538,8 @@
 
 	s->s_flags = flags;
 	rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
-	if (rc) {
-		deactivate_locked_super(s);
-		goto out;
-	}
+	if (rc)
+		goto out1;
 
 	ecryptfs_set_superblock_private(s, sbi);
 	s->s_bdi = &sbi->bdi;
@@ -586,34 +547,55 @@
 	/* ->kill_sb() will take care of sbi after that point */
 	sbi = NULL;
 	s->s_op = &ecryptfs_sops;
+	s->s_d_op = &ecryptfs_dops;
+
+	err = "Reading sb failed";
+	rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
+	if (rc) {
+		ecryptfs_printk(KERN_WARNING, "kern_path() failed\n");
+		goto out1;
+	}
+	if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
+		rc = -EINVAL;
+		printk(KERN_ERR "Mount on filesystem of type "
+			"eCryptfs explicitly disallowed due to "
+			"known incompatibilities\n");
+		goto out_free;
+	}
+	ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
+	s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+	s->s_blocksize = path.dentry->d_sb->s_blocksize;
+	s->s_magic = ECRYPTFS_SUPER_MAGIC;
+
+	inode = ecryptfs_get_inode(path.dentry->d_inode, s);
+	rc = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_free;
+
+	s->s_root = d_alloc_root(inode);
+	if (!s->s_root) {
+		iput(inode);
+		rc = -ENOMEM;
+		goto out_free;
+	}
 
 	rc = -ENOMEM;
-	s->s_root = d_alloc(NULL, &(const struct qstr) {
-			     .hash = 0,.name = "/",.len = 1});
-	if (!s->s_root) {
-		deactivate_locked_super(s);
-		goto out;
-	}
-	d_set_d_op(s->s_root, &ecryptfs_dops);
-	s->s_root->d_sb = s;
-	s->s_root->d_parent = s->s_root;
-
 	root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
-	if (!root_info) {
-		deactivate_locked_super(s);
-		goto out;
-	}
+	if (!root_info)
+		goto out_free;
+
 	/* ->kill_sb() will take care of root_info */
 	ecryptfs_set_dentry_private(s->s_root, root_info);
+	ecryptfs_set_dentry_lower(s->s_root, path.dentry);
+	ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
+
 	s->s_flags |= MS_ACTIVE;
-	rc = ecryptfs_read_super(s, dev_name);
-	if (rc) {
-		deactivate_locked_super(s);
-		err = "Reading sb failed";
-		goto out;
-	}
 	return dget(s->s_root);
 
+out_free:
+	path_put(&path);
+out1:
+	deactivate_locked_super(s);
 out:
 	if (sbi) {
 		ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
@@ -828,9 +810,10 @@
 		ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
 				"larger than the host's page size, and so "
 				"eCryptfs cannot run on this system. The "
-				"default eCryptfs extent size is [%d] bytes; "
-				"the page size is [%d] bytes.\n",
-				ECRYPTFS_DEFAULT_EXTENT_SIZE, PAGE_CACHE_SIZE);
+				"default eCryptfs extent size is [%u] bytes; "
+				"the page size is [%lu] bytes.\n",
+				ECRYPTFS_DEFAULT_EXTENT_SIZE,
+				(unsigned long)PAGE_CACHE_SIZE);
 		goto out;
 	}
 	rc = ecryptfs_init_kmem_caches();
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index b1d8275..cc64fca 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -65,7 +65,7 @@
 	rc = ecryptfs_encrypt_page(page);
 	if (rc) {
 		ecryptfs_printk(KERN_WARNING, "Error encrypting "
-				"page (upper index [0x%.16x])\n", page->index);
+				"page (upper index [0x%.16lx])\n", page->index);
 		ClearPageUptodate(page);
 		goto out;
 	}
@@ -237,7 +237,7 @@
 		ClearPageUptodate(page);
 	else
 		SetPageUptodate(page);
-	ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
+	ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n",
 			page->index);
 	unlock_page(page);
 	return rc;
@@ -290,6 +290,7 @@
 		return -ENOMEM;
 	*pagep = page;
 
+	prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT);
 	if (!PageUptodate(page)) {
 		struct ecryptfs_crypt_stat *crypt_stat =
 			&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
@@ -335,18 +336,23 @@
 				SetPageUptodate(page);
 			}
 		} else {
-			rc = ecryptfs_decrypt_page(page);
-			if (rc) {
-				printk(KERN_ERR "%s: Error decrypting page "
-				       "at index [%ld]; rc = [%d]\n",
-				       __func__, page->index, rc);
-				ClearPageUptodate(page);
-				goto out;
+			if (prev_page_end_size
+			    >= i_size_read(page->mapping->host)) {
+				zero_user(page, 0, PAGE_CACHE_SIZE);
+			} else {
+				rc = ecryptfs_decrypt_page(page);
+				if (rc) {
+					printk(KERN_ERR "%s: Error decrypting "
+					       "page at index [%ld]; "
+					       "rc = [%d]\n",
+					       __func__, page->index, rc);
+					ClearPageUptodate(page);
+					goto out;
+				}
 			}
 			SetPageUptodate(page);
 		}
 	}
-	prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT);
 	/* If creating a page or more of holes, zero them out via truncate.
 	 * Note, this will increase i_size. */
 	if (index != 0) {
@@ -488,7 +494,7 @@
 	} else
 		ecryptfs_printk(KERN_DEBUG, "Not a new file\n");
 	ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
-			"(page w/ index = [0x%.16x], to = [%d])\n", index, to);
+			"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
 	if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
 		rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0,
 						       to);
@@ -503,19 +509,20 @@
 	rc = fill_zeros_to_end_of_page(page, to);
 	if (rc) {
 		ecryptfs_printk(KERN_WARNING, "Error attempting to fill "
-			"zeros in page with index = [0x%.16x]\n", index);
+			"zeros in page with index = [0x%.16lx]\n", index);
 		goto out;
 	}
 	rc = ecryptfs_encrypt_page(page);
 	if (rc) {
 		ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
-				"index [0x%.16x])\n", index);
+				"index [0x%.16lx])\n", index);
 		goto out;
 	}
 	if (pos + copied > i_size_read(ecryptfs_inode)) {
 		i_size_write(ecryptfs_inode, pos + copied);
 		ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
-				"[0x%.16x]\n", i_size_read(ecryptfs_inode));
+			"[0x%.16llx]\n",
+			(unsigned long long)i_size_read(ecryptfs_inode));
 	}
 	rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
 	if (rc)
diff --git a/fs/eventfd.c b/fs/eventfd.c
index e0194b3..d9a5917 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -99,7 +99,7 @@
  * @ctx: [in] Pointer to eventfd context.
  *
  * The eventfd context reference must have been previously acquired either
- * with eventfd_ctx_get() or eventfd_ctx_fdget()).
+ * with eventfd_ctx_get() or eventfd_ctx_fdget().
  */
 void eventfd_ctx_put(struct eventfd_ctx *ctx)
 {
@@ -146,9 +146,9 @@
  * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
  * @ctx: [in] Pointer to eventfd context.
  * @wait: [in] Wait queue to be removed.
- * @cnt: [out] Pointer to the 64bit conter value.
+ * @cnt: [out] Pointer to the 64-bit counter value.
  *
- * Returns zero if successful, or the following error codes:
+ * Returns %0 if successful, or the following error codes:
  *
  * -EAGAIN      : The operation would have blocked.
  *
@@ -175,11 +175,11 @@
  * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
  * @ctx: [in] Pointer to eventfd context.
  * @no_wait: [in] Different from zero if the operation should not block.
- * @cnt: [out] Pointer to the 64bit conter value.
+ * @cnt: [out] Pointer to the 64-bit counter value.
  *
- * Returns zero if successful, or the following error codes:
+ * Returns %0 if successful, or the following error codes:
  *
- * -EAGAIN      : The operation would have blocked but @no_wait was nonzero.
+ * -EAGAIN      : The operation would have blocked but @no_wait was non-zero.
  * -ERESTARTSYS : A signal interrupted the wait operation.
  *
  * If @no_wait is zero, the function might sleep until the eventfd internal
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 8cf0724..4a09af9 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -63,6 +63,13 @@
  * cleanup path and it is also acquired by eventpoll_release_file()
  * if a file has been pushed inside an epoll set and it is then
  * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
+ * It is also acquired when inserting an epoll fd onto another epoll
+ * fd. We do this so that we walk the epoll tree and ensure that this
+ * insertion does not create a cycle of epoll file descriptors, which
+ * could lead to deadlock. We need a global mutex to prevent two
+ * simultaneous inserts (A into B and B into A) from racing and
+ * constructing a cycle without either insert observing that it is
+ * going to.
  * It is possible to drop the "ep->mtx" and to use the global
  * mutex "epmutex" (together with "ep->lock") to have it working,
  * but having "ep->mtx" will make the interface more scalable.
@@ -217,13 +224,16 @@
  * Configuration options available inside /proc/sys/fs/epoll/
  */
 /* Maximum number of epoll watched descriptors, per user */
-static int max_user_watches __read_mostly;
+static long max_user_watches __read_mostly;
 
 /*
  * This mutex is used to serialize ep_free() and eventpoll_release_file().
  */
 static DEFINE_MUTEX(epmutex);
 
+/* Used to check for epoll file descriptor inclusion loops */
+static struct nested_calls poll_loop_ncalls;
+
 /* Used for safe wake up implementation */
 static struct nested_calls poll_safewake_ncalls;
 
@@ -240,16 +250,18 @@
 
 #include <linux/sysctl.h>
 
-static int zero;
+static long zero;
+static long long_max = LONG_MAX;
 
 ctl_table epoll_table[] = {
 	{
 		.procname	= "max_user_watches",
 		.data		= &max_user_watches,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(max_user_watches),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &zero,
+		.extra2		= &long_max,
 	},
 	{ }
 };
@@ -561,7 +573,7 @@
 	/* At this point it is safe to free the eventpoll item */
 	kmem_cache_free(epi_cache, epi);
 
-	atomic_dec(&ep->user->epoll_watches);
+	atomic_long_dec(&ep->user->epoll_watches);
 
 	return 0;
 }
@@ -898,11 +910,12 @@
 {
 	int error, revents, pwake = 0;
 	unsigned long flags;
+	long user_watches;
 	struct epitem *epi;
 	struct ep_pqueue epq;
 
-	if (unlikely(atomic_read(&ep->user->epoll_watches) >=
-		     max_user_watches))
+	user_watches = atomic_long_read(&ep->user->epoll_watches);
+	if (unlikely(user_watches >= max_user_watches))
 		return -ENOSPC;
 	if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
 		return -ENOMEM;
@@ -966,7 +979,7 @@
 
 	spin_unlock_irqrestore(&ep->lock, flags);
 
-	atomic_inc(&ep->user->epoll_watches);
+	atomic_long_inc(&ep->user->epoll_watches);
 
 	/* We have to call this outside the lock */
 	if (pwake)
@@ -1111,6 +1124,17 @@
 	return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
 }
 
+static inline struct timespec ep_set_mstimeout(long ms)
+{
+	struct timespec now, ts = {
+		.tv_sec = ms / MSEC_PER_SEC,
+		.tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
+	};
+
+	ktime_get_ts(&now);
+	return timespec_add_safe(now, ts);
+}
+
 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 		   int maxevents, long timeout)
 {
@@ -1118,12 +1142,11 @@
 	unsigned long flags;
 	long slack;
 	wait_queue_t wait;
-	struct timespec end_time;
 	ktime_t expires, *to = NULL;
 
 	if (timeout > 0) {
-		ktime_get_ts(&end_time);
-		timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
+		struct timespec end_time = ep_set_mstimeout(timeout);
+
 		slack = select_estimate_accuracy(&end_time);
 		to = &expires;
 		*to = timespec_to_ktime(end_time);
@@ -1185,6 +1208,62 @@
 	return res;
 }
 
+/**
+ * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
+ *                      API, to verify that adding an epoll file inside another
+ *                      epoll structure, does not violate the constraints, in
+ *                      terms of closed loops, or too deep chains (which can
+ *                      result in excessive stack usage).
+ *
+ * @priv: Pointer to the epoll file to be currently checked.
+ * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
+ *          data structure pointer.
+ * @call_nests: Current dept of the @ep_call_nested() call stack.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ *          structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+{
+	int error = 0;
+	struct file *file = priv;
+	struct eventpoll *ep = file->private_data;
+	struct rb_node *rbp;
+	struct epitem *epi;
+
+	mutex_lock(&ep->mtx);
+	for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+		epi = rb_entry(rbp, struct epitem, rbn);
+		if (unlikely(is_file_epoll(epi->ffd.file))) {
+			error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+					       ep_loop_check_proc, epi->ffd.file,
+					       epi->ffd.file->private_data, current);
+			if (error != 0)
+				break;
+		}
+	}
+	mutex_unlock(&ep->mtx);
+
+	return error;
+}
+
+/**
+ * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
+ *                 another epoll file (represented by @ep) does not create
+ *                 closed loops or too deep chains.
+ *
+ * @ep: Pointer to the epoll private data structure.
+ * @file: Pointer to the epoll file to be checked.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ *          structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check(struct eventpoll *ep, struct file *file)
+{
+	return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+			      ep_loop_check_proc, file, ep, current);
+}
+
 /*
  * Open an eventpoll file descriptor.
  */
@@ -1233,6 +1312,7 @@
 		struct epoll_event __user *, event)
 {
 	int error;
+	int did_lock_epmutex = 0;
 	struct file *file, *tfile;
 	struct eventpoll *ep;
 	struct epitem *epi;
@@ -1274,6 +1354,25 @@
 	 */
 	ep = file->private_data;
 
+	/*
+	 * When we insert an epoll file descriptor, inside another epoll file
+	 * descriptor, there is the change of creating closed loops, which are
+	 * better be handled here, than in more critical paths.
+	 *
+	 * We hold epmutex across the loop check and the insert in this case, in
+	 * order to prevent two separate inserts from racing and each doing the
+	 * insert "at the same time" such that ep_loop_check passes on both
+	 * before either one does the insert, thereby creating a cycle.
+	 */
+	if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
+		mutex_lock(&epmutex);
+		did_lock_epmutex = 1;
+		error = -ELOOP;
+		if (ep_loop_check(ep, tfile) != 0)
+			goto error_tgt_fput;
+	}
+
+
 	mutex_lock(&ep->mtx);
 
 	/*
@@ -1309,6 +1408,9 @@
 	mutex_unlock(&ep->mtx);
 
 error_tgt_fput:
+	if (unlikely(did_lock_epmutex))
+		mutex_unlock(&epmutex);
+
 	fput(tfile);
 error_fput:
 	fput(file);
@@ -1426,6 +1528,13 @@
 	 */
 	max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
 		EP_ITEM_COST;
+	BUG_ON(max_user_watches < 0);
+
+	/*
+	 * Initialize the structure used to perform epoll file descriptor
+	 * inclusion loops checks.
+	 */
+	ep_nested_calls_init(&poll_loop_ncalls);
 
 	/* Initialize the structure used to perform safe poll wait head wake ups */
 	ep_nested_calls_init(&poll_safewake_ncalls);
diff --git a/fs/exec.c b/fs/exec.c
index c62efcb..52a447d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -120,7 +120,7 @@
 		goto out;
 
 	file = do_filp_open(AT_FDCWD, tmp,
-				O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+				O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
 				MAY_READ | MAY_EXEC | MAY_OPEN);
 	putname(tmp);
 	error = PTR_ERR(file);
@@ -723,7 +723,7 @@
 	int err;
 
 	file = do_filp_open(AT_FDCWD, name,
-				O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+				O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
 				MAY_EXEC | MAY_OPEN);
 	if (IS_ERR(file))
 		goto out;
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 4268542..a755523 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1030,7 +1030,6 @@
 		memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
 	}
 
-	inode->i_mapping->backing_dev_info = sb->s_bdi;
 	if (S_ISREG(inode->i_mode)) {
 		inode->i_op = &exofs_file_inode_operations;
 		inode->i_fop = &exofs_file_operations;
@@ -1131,7 +1130,6 @@
 
 	sbi = sb->s_fs_info;
 
-	inode->i_mapping->backing_dev_info = sb->s_bdi;
 	sb->s_dirt = 1;
 	inode_init_owner(inode, dir, mode);
 	inode->i_ino = sbi->s_nextid++;
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 264e95d..4d70db1 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -272,7 +272,6 @@
 		new_de = exofs_find_entry(new_dir, new_dentry, &new_page);
 		if (!new_de)
 			goto out_dir;
-		inode_inc_link_count(old_inode);
 		err = exofs_set_link(new_dir, new_de, new_page, old_inode);
 		new_inode->i_ctime = CURRENT_TIME;
 		if (dir_de)
@@ -286,12 +285,9 @@
 			if (new_dir->i_nlink >= EXOFS_LINK_MAX)
 				goto out_dir;
 		}
-		inode_inc_link_count(old_inode);
 		err = exofs_add_link(new_dentry, old_inode);
-		if (err) {
-			inode_dec_link_count(old_inode);
+		if (err)
 			goto out_dir;
-		}
 		if (dir_de)
 			inode_inc_link_count(new_dir);
 	}
@@ -299,7 +295,7 @@
 	old_inode->i_ctime = CURRENT_TIME;
 
 	exofs_delete_entry(old_de, old_page);
-	inode_dec_link_count(old_inode);
+	mark_inode_dirty(old_inode);
 
 	if (dir_de) {
 		err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 2709b34..47cda41 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -28,21 +28,30 @@
 
 typedef struct ext2_dir_entry_2 ext2_dirent;
 
+/*
+ * Tests against MAX_REC_LEN etc were put in place for 64k block
+ * sizes; if that is not possible on this arch, we can skip
+ * those tests and speed things up.
+ */
 static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
 {
 	unsigned len = le16_to_cpu(dlen);
 
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == EXT2_MAX_REC_LEN)
 		return 1 << 16;
+#endif
 	return len;
 }
 
 static inline __le16 ext2_rec_len_to_disk(unsigned len)
 {
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == (1 << 16))
 		return cpu_to_le16(EXT2_MAX_REC_LEN);
 	else
 		BUG_ON(len > (1 << 16));
+#endif
 	return cpu_to_le16(len);
 }
 
@@ -129,15 +138,15 @@
 		p = (ext2_dirent *)(kaddr + offs);
 		rec_len = ext2_rec_len_from_disk(p->rec_len);
 
-		if (rec_len < EXT2_DIR_REC_LEN(1))
+		if (unlikely(rec_len < EXT2_DIR_REC_LEN(1)))
 			goto Eshort;
-		if (rec_len & 3)
+		if (unlikely(rec_len & 3))
 			goto Ealign;
-		if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
+		if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len)))
 			goto Enamelen;
-		if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+		if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)))
 			goto Espan;
-		if (le32_to_cpu(p->inode) > max_inumber)
+		if (unlikely(le32_to_cpu(p->inode) > max_inumber))
 			goto Einumber;
 	}
 	if (offs != limit)
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index f8aecd2..adb9185 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -67,7 +67,7 @@
 	inode = NULL;
 	if (ino) {
 		inode = ext2_iget(dir->i_sb, ino);
-		if (unlikely(IS_ERR(inode))) {
+		if (IS_ERR(inode)) {
 			if (PTR_ERR(inode) == -ESTALE) {
 				ext2_error(dir->i_sb, __func__,
 						"deleted inode referenced: %lu",
@@ -344,7 +344,6 @@
 		new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
 		if (!new_de)
 			goto out_dir;
-		inode_inc_link_count(old_inode);
 		ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
 		new_inode->i_ctime = CURRENT_TIME_SEC;
 		if (dir_de)
@@ -356,12 +355,9 @@
 			if (new_dir->i_nlink >= EXT2_LINK_MAX)
 				goto out_dir;
 		}
-		inode_inc_link_count(old_inode);
 		err = ext2_add_link(new_dentry, old_inode);
-		if (err) {
-			inode_dec_link_count(old_inode);
+		if (err)
 			goto out_dir;
-		}
 		if (dir_de)
 			inode_inc_link_count(new_dir);
 	}
@@ -369,12 +365,11 @@
 	/*
 	 * Like most other Unix systems, set the ctime for inodes on a
  	 * rename.
-	 * inode_dec_link_count() will mark the inode dirty.
 	 */
 	old_inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(old_inode);
 
 	ext2_delete_entry (old_de, old_page);
-	inode_dec_link_count(old_inode);
 
 	if (dir_de) {
 		if (old_dir != new_dir)
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index e0c6380..7731695 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -43,9 +43,10 @@
 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
 static int ext2_sync_fs(struct super_block *sb, int wait);
 
-void ext2_error (struct super_block * sb, const char * function,
-		 const char * fmt, ...)
+void ext2_error(struct super_block *sb, const char *function,
+		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 	struct ext2_sb_info *sbi = EXT2_SB(sb);
 	struct ext2_super_block *es = sbi->s_es;
@@ -59,9 +60,13 @@
 	}
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT2-fs (%s): error: %s: ", sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	if (test_opt(sb, ERRORS_PANIC))
@@ -76,12 +81,16 @@
 void ext2_msg(struct super_block *sb, const char *prefix,
 		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk("%sEXT2-fs (%s): ", prefix, sb->s_id);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+
 	va_end(args);
 }
 
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index f84700b..c2e4dce 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -199,14 +199,6 @@
 			goto found;
 		entry = next;
 	}
-	/* Check the remaining name entries */
-	while (!IS_LAST_ENTRY(entry)) {
-		struct ext2_xattr_entry *next =
-			EXT2_XATTR_NEXT(entry);
-		if ((char *)next >= end)
-			goto bad_block;
-		entry = next;
-	}
 	if (ext2_xattr_cache_insert(bh))
 		ea_idebug(inode, "cache insert failed");
 	error = -ENODATA;
@@ -355,7 +347,7 @@
 /*
  * ext2_xattr_set()
  *
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode.  Value
  * is NULL to remove an existing extended attribute, and non-NULL to
  * either replace an existing extended attribute, or create a new extended
  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index b3db226..045995c 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -20,6 +20,7 @@
 #include <linux/ext3_jbd.h>
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
+#include <linux/blkdev.h>
 
 /*
  * balloc.c contains the blocks allocation and deallocation routines
@@ -39,6 +40,21 @@
 
 #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
 
+/*
+ * Calculate the block group number and offset, given a block number
+ */
+static void ext3_get_group_no_and_offset(struct super_block *sb,
+	ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp)
+{
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+
+	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
+	if (offsetp)
+		*offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
+	if (blockgrpp)
+		*blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
+}
+
 /**
  * ext3_get_group_desc() -- load group descriptor from disk
  * @sb:			super block
@@ -1885,3 +1901,253 @@
 	return ext3_bg_num_gdb_meta(sb,group);
 
 }
+
+/**
+ * ext3_trim_all_free -- function to trim all free space in alloc. group
+ * @sb:			super block for file system
+ * @group:		allocation group to trim
+ * @start:		first group block to examine
+ * @max:		last group block to examine
+ * @gdp:		allocation group description structure
+ * @minblocks:		minimum extent block count
+ *
+ * ext3_trim_all_free walks through group's block bitmap searching for free
+ * blocks. When the free block is found, it tries to allocate this block and
+ * consequent free block to get the biggest free extent possible, until it
+ * reaches any used block. Then issue a TRIM command on this extent and free
+ * the extent in the block bitmap. This is done until whole group is scanned.
+ */
+ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group,
+				ext3_grpblk_t start, ext3_grpblk_t max,
+				ext3_grpblk_t minblocks)
+{
+	handle_t *handle;
+	ext3_grpblk_t next, free_blocks, bit, freed, count = 0;
+	ext3_fsblk_t discard_block;
+	struct ext3_sb_info *sbi;
+	struct buffer_head *gdp_bh, *bitmap_bh = NULL;
+	struct ext3_group_desc *gdp;
+	int err = 0, ret = 0;
+
+	/*
+	 * We will update one block bitmap, and one group descriptor
+	 */
+	handle = ext3_journal_start_sb(sb, 2);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	bitmap_bh = read_block_bitmap(sb, group);
+	if (!bitmap_bh) {
+		err = -EIO;
+		goto err_out;
+	}
+
+	BUFFER_TRACE(bitmap_bh, "getting undo access");
+	err = ext3_journal_get_undo_access(handle, bitmap_bh);
+	if (err)
+		goto err_out;
+
+	gdp = ext3_get_group_desc(sb, group, &gdp_bh);
+	if (!gdp) {
+		err = -EIO;
+		goto err_out;
+	}
+
+	BUFFER_TRACE(gdp_bh, "get_write_access");
+	err = ext3_journal_get_write_access(handle, gdp_bh);
+	if (err)
+		goto err_out;
+
+	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+	sbi = EXT3_SB(sb);
+
+	 /* Walk through the whole group */
+	while (start < max) {
+		start = bitmap_search_next_usable_block(start, bitmap_bh, max);
+		if (start < 0)
+			break;
+		next = start;
+
+		/*
+		 * Allocate contiguous free extents by setting bits in the
+		 * block bitmap
+		 */
+		while (next < max
+			&& claim_block(sb_bgl_lock(sbi, group),
+					next, bitmap_bh)) {
+			next++;
+		}
+
+		 /* We did not claim any blocks */
+		if (next == start)
+			continue;
+
+		discard_block = (ext3_fsblk_t)start +
+				ext3_group_first_block_no(sb, group);
+
+		/* Update counters */
+		spin_lock(sb_bgl_lock(sbi, group));
+		le16_add_cpu(&gdp->bg_free_blocks_count, start - next);
+		spin_unlock(sb_bgl_lock(sbi, group));
+		percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
+
+		/* Do not issue a TRIM on extents smaller than minblocks */
+		if ((next - start) < minblocks)
+			goto free_extent;
+
+		 /* Send the TRIM command down to the device */
+		err = sb_issue_discard(sb, discard_block, next - start,
+				       GFP_NOFS, 0);
+		count += (next - start);
+free_extent:
+		freed = 0;
+
+		/*
+		 * Clear bits in the bitmap
+		 */
+		for (bit = start; bit < next; bit++) {
+			BUFFER_TRACE(bitmap_bh, "clear bit");
+			if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group),
+						bit, bitmap_bh->b_data)) {
+				ext3_error(sb, __func__,
+					"bit already cleared for block "E3FSBLK,
+					 (unsigned long)bit);
+				BUFFER_TRACE(bitmap_bh, "bit already cleared");
+			} else {
+				freed++;
+			}
+		}
+
+		/* Update couters */
+		spin_lock(sb_bgl_lock(sbi, group));
+		le16_add_cpu(&gdp->bg_free_blocks_count, freed);
+		spin_unlock(sb_bgl_lock(sbi, group));
+		percpu_counter_add(&sbi->s_freeblocks_counter, freed);
+
+		start = next;
+		if (err < 0) {
+			if (err != -EOPNOTSUPP)
+				ext3_warning(sb, __func__, "Discard command "
+					     "returned error %d\n", err);
+			break;
+		}
+
+		if (fatal_signal_pending(current)) {
+			err = -ERESTARTSYS;
+			break;
+		}
+
+		cond_resched();
+
+		/* No more suitable extents */
+		if ((free_blocks - count) < minblocks)
+			break;
+	}
+
+	/* We dirtied the bitmap block */
+	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+	ret = ext3_journal_dirty_metadata(handle, bitmap_bh);
+	if (!err)
+		err = ret;
+
+	/* And the group descriptor block */
+	BUFFER_TRACE(gdp_bh, "dirtied group descriptor block");
+	ret = ext3_journal_dirty_metadata(handle, gdp_bh);
+	if (!err)
+		err = ret;
+
+	ext3_debug("trimmed %d blocks in the group %d\n",
+		count, group);
+
+err_out:
+	if (err)
+		count = err;
+	ext3_journal_stop(handle);
+	brelse(bitmap_bh);
+
+	return count;
+}
+
+/**
+ * ext3_trim_fs() -- trim ioctl handle function
+ * @sb:			superblock for filesystem
+ * @start:		First Byte to trim
+ * @len:		number of Bytes to trim from start
+ * @minlen:		minimum extent length in Bytes
+ *
+ * ext3_trim_fs goes through all allocation groups containing Bytes from
+ * start to start+len. For each such a group ext3_trim_all_free function
+ * is invoked to trim all free space.
+ */
+int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
+{
+	ext3_grpblk_t last_block, first_block, free_blocks;
+	unsigned long first_group, last_group;
+	unsigned long group, ngroups;
+	struct ext3_group_desc *gdp;
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+	uint64_t start, len, minlen, trimmed;
+	ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
+	int ret = 0;
+
+	start = range->start >> sb->s_blocksize_bits;
+	len = range->len >> sb->s_blocksize_bits;
+	minlen = range->minlen >> sb->s_blocksize_bits;
+	trimmed = 0;
+
+	if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)))
+		return -EINVAL;
+	if (start >= max_blks)
+		goto out;
+	if (start < le32_to_cpu(es->s_first_data_block)) {
+		len -= le32_to_cpu(es->s_first_data_block) - start;
+		start = le32_to_cpu(es->s_first_data_block);
+	}
+	if (start + len > max_blks)
+		len = max_blks - start;
+
+	ngroups = EXT3_SB(sb)->s_groups_count;
+	smp_rmb();
+
+	/* Determine first and last group to examine based on start and len */
+	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
+				     &first_group, &first_block);
+	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) (start + len),
+				     &last_group, &last_block);
+	last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
+	last_block = EXT3_BLOCKS_PER_GROUP(sb);
+
+	if (first_group > last_group)
+		return -EINVAL;
+
+	for (group = first_group; group <= last_group; group++) {
+		gdp = ext3_get_group_desc(sb, group, NULL);
+		if (!gdp)
+			break;
+
+		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+		if (free_blocks < minlen)
+			continue;
+
+		if (len >= EXT3_BLOCKS_PER_GROUP(sb))
+			len -= (EXT3_BLOCKS_PER_GROUP(sb) - first_block);
+		else
+			last_block = first_block + len;
+
+		ret = ext3_trim_all_free(sb, group, first_block,
+					last_block, minlen);
+		if (ret < 0)
+			break;
+
+		trimmed += ret;
+		first_block = 0;
+	}
+
+	if (ret >= 0)
+		ret = 0;
+
+out:
+	range->len = trimmed * sb->s_blocksize;
+
+	return ret;
+}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index e2e72c3..34f0a07 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -69,25 +69,26 @@
 	const char * error_msg = NULL;
 	const int rlen = ext3_rec_len_from_disk(de->rec_len);
 
-	if (rlen < EXT3_DIR_REC_LEN(1))
+	if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
 		error_msg = "rec_len is smaller than minimal";
-	else if (rlen % 4 != 0)
+	else if (unlikely(rlen % 4 != 0))
 		error_msg = "rec_len % 4 != 0";
-	else if (rlen < EXT3_DIR_REC_LEN(de->name_len))
+	else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
 		error_msg = "rec_len is too small for name_len";
-	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+	else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
 		error_msg = "directory entry across blocks";
-	else if (le32_to_cpu(de->inode) >
-			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
+	else if (unlikely(le32_to_cpu(de->inode) >
+			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
 		error_msg = "inode out of bounds";
 
-	if (error_msg != NULL)
+	if (unlikely(error_msg != NULL))
 		ext3_error (dir->i_sb, function,
 			"bad entry in directory #%lu: %s - "
 			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
 			dir->i_ino, error_msg, offset,
 			(unsigned long) le32_to_cpu(de->inode),
 			rlen, de->name_len);
+
 	return error_msg == NULL ? 1 : 0;
 }
 
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index a958061..ae94f6d 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2145,13 +2145,15 @@
 	if (try_to_extend_transaction(handle, inode)) {
 		if (bh) {
 			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			ext3_journal_dirty_metadata(handle, bh);
+			if (ext3_journal_dirty_metadata(handle, bh))
+				return;
 		}
 		ext3_mark_inode_dirty(handle, inode);
 		truncate_restart_transaction(handle, inode);
 		if (bh) {
 			BUFFER_TRACE(bh, "retaking write access");
-			ext3_journal_get_write_access(handle, bh);
+			if (ext3_journal_get_write_access(handle, bh))
+				return;
 		}
 	}
 
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 8897481..fc080dd 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -276,7 +276,29 @@
 		mnt_drop_write(filp->f_path.mnt);
 		return err;
 	}
+	case FITRIM: {
 
+		struct super_block *sb = inode->i_sb;
+		struct fstrim_range range;
+		int ret = 0;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&range, (struct fstrim_range *)arg,
+				   sizeof(range)))
+			return -EFAULT;
+
+		ret = ext3_trim_fs(sb, &range);
+		if (ret < 0)
+			return ret;
+
+		if (copy_to_user((struct fstrim_range *)arg, &range,
+				 sizeof(range)))
+			return -EFAULT;
+
+		return 0;
+	}
 
 	default:
 		return -ENOTTY;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index bce9dce..b27ba71 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -858,6 +858,7 @@
 	struct buffer_head * bh_use[NAMEI_RA_SIZE];
 	struct buffer_head * bh, *ret = NULL;
 	unsigned long start, block, b;
+	const u8 *name = entry->name;
 	int ra_max = 0;		/* Number of bh's in the readahead
 				   buffer, bh_use[] */
 	int ra_ptr = 0;		/* Current index into readahead
@@ -871,6 +872,16 @@
 	namelen = entry->len;
 	if (namelen > EXT3_NAME_LEN)
 		return NULL;
+	if ((namelen <= 2) && (name[0] == '.') &&
+	    (name[1] == '.' || name[1] == 0)) {
+		/*
+		 * "." or ".." will only be in the first block
+		 * NFS may look up ".."; "." should be handled by the VFS
+		 */
+		block = start = 0;
+		nblocks = 1;
+		goto restart;
+	}
 	if (is_dx(dir)) {
 		bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
 		/*
@@ -961,55 +972,35 @@
 			struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
 			int *err)
 {
-	struct super_block * sb;
+	struct super_block *sb = dir->i_sb;
 	struct dx_hash_info	hinfo;
-	u32 hash;
 	struct dx_frame frames[2], *frame;
-	struct ext3_dir_entry_2 *de, *top;
 	struct buffer_head *bh;
 	unsigned long block;
 	int retval;
-	int namelen = entry->len;
-	const u8 *name = entry->name;
 
-	sb = dir->i_sb;
-	/* NFS may look up ".." - look at dx_root directory block */
-	if (namelen > 2 || name[0] != '.'|| (namelen == 2 && name[1] != '.')) {
-		if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
-			return NULL;
-	} else {
-		frame = frames;
-		frame->bh = NULL;			/* for dx_release() */
-		frame->at = (struct dx_entry *)frames;	/* hack for zero entry*/
-		dx_set_block(frame->at, 0);		/* dx_root block is 0 */
-	}
-	hash = hinfo.hash;
+	if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
+		return NULL;
 	do {
 		block = dx_get_block(frame->at);
 		if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
 			goto errout;
-		de = (struct ext3_dir_entry_2 *) bh->b_data;
-		top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
-				       EXT3_DIR_REC_LEN(0));
-		for (; de < top; de = ext3_next_entry(de)) {
-			int off = (block << EXT3_BLOCK_SIZE_BITS(sb))
-				  + ((char *) de - bh->b_data);
 
-			if (!ext3_check_dir_entry(__func__, dir, de, bh, off)) {
-				brelse(bh);
-				*err = ERR_BAD_DX_DIR;
-				goto errout;
-			}
-
-			if (ext3_match(namelen, name, de)) {
-				*res_dir = de;
-				dx_release(frames);
-				return bh;
-			}
+		retval = search_dirblock(bh, dir, entry,
+					 block << EXT3_BLOCK_SIZE_BITS(sb),
+					 res_dir);
+		if (retval == 1) {
+			dx_release(frames);
+			return bh;
 		}
-		brelse (bh);
+		brelse(bh);
+		if (retval == -1) {
+			*err = ERR_BAD_DX_DIR;
+			goto errout;
+		}
+
 		/* Check to see if we should continue to search */
-		retval = ext3_htree_next_block(dir, hash, frame,
+		retval = ext3_htree_next_block(dir, hinfo.hash, frame,
 					       frames, NULL);
 		if (retval < 0) {
 			ext3_warning(sb, __func__,
@@ -1047,7 +1038,7 @@
 			return ERR_PTR(-EIO);
 		}
 		inode = ext3_iget(dir->i_sb, ino);
-		if (unlikely(IS_ERR(inode))) {
+		if (IS_ERR(inode)) {
 			if (PTR_ERR(inode) == -ESTALE) {
 				ext3_error(dir->i_sb, __func__,
 						"deleted inode referenced: %lu",
@@ -1607,7 +1598,9 @@
 			if (err)
 				goto journal_error;
 		}
-		ext3_journal_dirty_metadata(handle, frames[0].bh);
+		err = ext3_journal_dirty_metadata(handle, frames[0].bh);
+		if (err)
+			goto journal_error;
 	}
 	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
 	if (!de)
@@ -1644,8 +1637,13 @@
 		if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
 			return -EIO;
 		if (de == de_del)  {
+			int err;
+
 			BUFFER_TRACE(bh, "get_write_access");
-			ext3_journal_get_write_access(handle, bh);
+			err = ext3_journal_get_write_access(handle, bh);
+			if (err)
+				goto journal_error;
+
 			if (pde)
 				pde->rec_len = ext3_rec_len_to_disk(
 					ext3_rec_len_from_disk(pde->rec_len) +
@@ -1654,7 +1652,12 @@
 				de->inode = 0;
 			dir->i_version++;
 			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			ext3_journal_dirty_metadata(handle, bh);
+			err = ext3_journal_dirty_metadata(handle, bh);
+			if (err) {
+journal_error:
+				ext3_std_error(dir->i_sb, err);
+				return err;
+			}
 			return 0;
 		}
 		i += ext3_rec_len_from_disk(de->rec_len);
@@ -1762,7 +1765,7 @@
 {
 	handle_t *handle;
 	struct inode * inode;
-	struct buffer_head * dir_block;
+	struct buffer_head * dir_block = NULL;
 	struct ext3_dir_entry_2 * de;
 	int err, retries = 0;
 
@@ -1790,15 +1793,14 @@
 	inode->i_fop = &ext3_dir_operations;
 	inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
 	dir_block = ext3_bread (handle, inode, 0, 1, &err);
-	if (!dir_block) {
-		drop_nlink(inode); /* is this nlink == 0? */
-		unlock_new_inode(inode);
-		ext3_mark_inode_dirty(handle, inode);
-		iput (inode);
-		goto out_stop;
-	}
+	if (!dir_block)
+		goto out_clear_inode;
+
 	BUFFER_TRACE(dir_block, "get_write_access");
-	ext3_journal_get_write_access(handle, dir_block);
+	err = ext3_journal_get_write_access(handle, dir_block);
+	if (err)
+		goto out_clear_inode;
+
 	de = (struct ext3_dir_entry_2 *) dir_block->b_data;
 	de->inode = cpu_to_le32(inode->i_ino);
 	de->name_len = 1;
@@ -1814,11 +1816,16 @@
 	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
 	inode->i_nlink = 2;
 	BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
-	ext3_journal_dirty_metadata(handle, dir_block);
-	brelse (dir_block);
-	ext3_mark_inode_dirty(handle, inode);
-	err = ext3_add_entry (handle, dentry, inode);
+	err = ext3_journal_dirty_metadata(handle, dir_block);
+	if (err)
+		goto out_clear_inode;
+
+	err = ext3_mark_inode_dirty(handle, inode);
+	if (!err)
+		err = ext3_add_entry (handle, dentry, inode);
+
 	if (err) {
+out_clear_inode:
 		inode->i_nlink = 0;
 		unlock_new_inode(inode);
 		ext3_mark_inode_dirty(handle, inode);
@@ -1827,10 +1834,14 @@
 	}
 	inc_nlink(dir);
 	ext3_update_dx_flag(dir);
-	ext3_mark_inode_dirty(handle, dir);
+	err = ext3_mark_inode_dirty(handle, dir);
+	if (err)
+		goto out_clear_inode;
+
 	d_instantiate(dentry, inode);
 	unlock_new_inode(inode);
 out_stop:
+	brelse(dir_block);
 	ext3_journal_stop(handle);
 	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
 		goto retry;
@@ -2353,7 +2364,9 @@
 			goto end_rename;
 	} else {
 		BUFFER_TRACE(new_bh, "get write access");
-		ext3_journal_get_write_access(handle, new_bh);
+		retval = ext3_journal_get_write_access(handle, new_bh);
+		if (retval)
+			goto journal_error;
 		new_de->inode = cpu_to_le32(old_inode->i_ino);
 		if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
 					      EXT3_FEATURE_INCOMPAT_FILETYPE))
@@ -2362,7 +2375,9 @@
 		new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
 		ext3_mark_inode_dirty(handle, new_dir);
 		BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
-		ext3_journal_dirty_metadata(handle, new_bh);
+		retval = ext3_journal_dirty_metadata(handle, new_bh);
+		if (retval)
+			goto journal_error;
 		brelse(new_bh);
 		new_bh = NULL;
 	}
@@ -2411,10 +2426,17 @@
 	ext3_update_dx_flag(old_dir);
 	if (dir_bh) {
 		BUFFER_TRACE(dir_bh, "get_write_access");
-		ext3_journal_get_write_access(handle, dir_bh);
+		retval = ext3_journal_get_write_access(handle, dir_bh);
+		if (retval)
+			goto journal_error;
 		PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
 		BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
-		ext3_journal_dirty_metadata(handle, dir_bh);
+		retval = ext3_journal_dirty_metadata(handle, dir_bh);
+		if (retval) {
+journal_error:
+			ext3_std_error(new_dir->i_sb, retval);
+			goto end_rename;
+		}
 		drop_nlink(old_dir);
 		if (new_inode) {
 			drop_nlink(new_inode);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index e746d30..108b142 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -249,7 +249,11 @@
 		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
 		set_buffer_uptodate(gdb);
 		unlock_buffer(gdb);
-		ext3_journal_dirty_metadata(handle, gdb);
+		err = ext3_journal_dirty_metadata(handle, gdb);
+		if (err) {
+			brelse(gdb);
+			goto exit_bh;
+		}
 		ext3_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -269,7 +273,11 @@
 			err = PTR_ERR(gdb);
 			goto exit_bh;
 		}
-		ext3_journal_dirty_metadata(handle, gdb);
+		err = ext3_journal_dirty_metadata(handle, gdb);
+		if (err) {
+			brelse(gdb);
+			goto exit_bh;
+		}
 		ext3_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -295,7 +303,11 @@
 			err = PTR_ERR(it);
 			goto exit_bh;
 		}
-		ext3_journal_dirty_metadata(handle, it);
+		err = ext3_journal_dirty_metadata(handle, it);
+		if (err) {
+			brelse(it);
+			goto exit_bh;
+		}
 		brelse(it);
 		ext3_set_bit(bit, bh->b_data);
 	}
@@ -306,7 +318,9 @@
 
 	mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
 			bh->b_data);
-	ext3_journal_dirty_metadata(handle, bh);
+	err = ext3_journal_dirty_metadata(handle, bh);
+	if (err)
+		goto exit_bh;
 	brelse(bh);
 
 	/* Mark unused entries in inode bitmap used */
@@ -319,7 +333,7 @@
 
 	mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
 			bh->b_data);
-	ext3_journal_dirty_metadata(handle, bh);
+	err = ext3_journal_dirty_metadata(handle, bh);
 exit_bh:
 	brelse(bh);
 
@@ -503,12 +517,19 @@
 	 * reserved inode, and will become GDT blocks (primary and backup).
 	 */
 	data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
-	ext3_journal_dirty_metadata(handle, dind);
+	err = ext3_journal_dirty_metadata(handle, dind);
+	if (err)
+		goto exit_group_desc;
 	brelse(dind);
+	dind = NULL;
 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
-	ext3_mark_iloc_dirty(handle, inode, &iloc);
+	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+	if (err)
+		goto exit_group_desc;
 	memset((*primary)->b_data, 0, sb->s_blocksize);
-	ext3_journal_dirty_metadata(handle, *primary);
+	err = ext3_journal_dirty_metadata(handle, *primary);
+	if (err)
+		goto exit_group_desc;
 
 	o_group_desc = EXT3_SB(sb)->s_group_desc;
 	memcpy(n_group_desc, o_group_desc,
@@ -519,10 +540,14 @@
 	kfree(o_group_desc);
 
 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	if (err)
+		goto exit_inode;
 
 	return 0;
 
+exit_group_desc:
+	kfree(n_group_desc);
 exit_inode:
 	//ext3_journal_release_buffer(handle, iloc.bh);
 	brelse(iloc.bh);
@@ -706,16 +731,20 @@
 		}
 		ext3_debug("update metadata backup %#04lx\n",
 			  (unsigned long)bh->b_blocknr);
-		if ((err = ext3_journal_get_write_access(handle, bh)))
+		if ((err = ext3_journal_get_write_access(handle, bh))) {
+			brelse(bh);
 			break;
+		}
 		lock_buffer(bh);
 		memcpy(bh->b_data, data, size);
 		if (rest)
 			memset(bh->b_data + size, 0, rest);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
-		ext3_journal_dirty_metadata(handle, bh);
+		err = ext3_journal_dirty_metadata(handle, bh);
 		brelse(bh);
+		if (err)
+			break;
 	}
 	if ((err2 = ext3_journal_stop(handle)) && !err)
 		err = err2;
@@ -922,7 +951,9 @@
 	/* Update the global fs size fields */
 	sbi->s_groups_count++;
 
-	ext3_journal_dirty_metadata(handle, primary);
+	err = ext3_journal_dirty_metadata(handle, primary);
+	if (err)
+		goto exit_journal;
 
 	/* Update the reserved block counts only once the new group is
 	 * active. */
@@ -934,7 +965,7 @@
 	percpu_counter_add(&sbi->s_freeinodes_counter,
 			   EXT3_INODES_PER_GROUP(sb));
 
-	ext3_journal_dirty_metadata(handle, sbi->s_sbh);
+	err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
 
 exit_journal:
 	mutex_unlock(&sbi->s_resize_lock);
@@ -1064,8 +1095,14 @@
 		goto exit_put;
 	}
 	es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
-	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
 	mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
+	if (err) {
+		ext3_warning(sb, __func__,
+			     "error %d on journal dirty metadata", err);
+		ext3_journal_stop(handle);
+		goto exit_put;
+	}
 	ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
 		   o_blocks_count, o_blocks_count + add);
 	ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 77ce161..85c8cc8 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -143,12 +143,16 @@
 void ext3_msg(struct super_block *sb, const char *prefix,
 		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk("%sEXT3-fs (%s): ", prefix, sb->s_id);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+
 	va_end(args);
 }
 
@@ -195,15 +199,20 @@
 			sb->s_id);
 }
 
-void ext3_error (struct super_block * sb, const char * function,
-		 const char * fmt, ...)
+void ext3_error(struct super_block *sb, const char *function,
+		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	ext3_handle_error(sb);
@@ -274,15 +283,20 @@
  * case we take the easy way out and panic immediately.
  */
 
-void ext3_abort (struct super_block * sb, const char * function,
-		 const char * fmt, ...)
+void ext3_abort(struct super_block *sb, const char *function,
+		 const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT3-fs (%s): error: %s: ", sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	if (test_opt(sb, ERRORS_PANIC))
@@ -300,16 +314,20 @@
 		journal_abort(EXT3_SB(sb)->s_journal, -EIO);
 }
 
-void ext3_warning (struct super_block * sb, const char * function,
-		   const char * fmt, ...)
+void ext3_warning(struct super_block *sb, const char *function,
+		  const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_WARNING "EXT3-fs (%s): warning: %s: ",
-	       sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 }
 
@@ -346,7 +364,7 @@
 	struct block_device *bdev;
 	char b[BDEVNAME_SIZE];
 
-	bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
 	if (IS_ERR(bdev))
 		goto fail;
 	return bdev;
@@ -363,8 +381,7 @@
  */
 static int ext3_blkdev_put(struct block_device *bdev)
 {
-	bd_release(bdev);
-	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 static int ext3_blkdev_remove(struct ext3_sb_info *sbi)
@@ -737,7 +754,7 @@
 static int ext3_mark_dquot_dirty(struct dquot *dquot);
 static int ext3_write_info(struct super_block *sb, int type);
 static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-				char *path);
+			 struct path *path);
 static int ext3_quota_on_mount(struct super_block *sb, int type);
 static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
 			       size_t len, loff_t off);
@@ -1848,13 +1865,15 @@
 		goto failed_mount;
 	}
 
-	if (generic_check_addressable(sb->s_blocksize_bits,
-				      le32_to_cpu(es->s_blocks_count))) {
+	err = generic_check_addressable(sb->s_blocksize_bits,
+					le32_to_cpu(es->s_blocks_count));
+	if (err) {
 		ext3_msg(sb, KERN_ERR,
 			"error: filesystem is too large to mount safely");
 		if (sizeof(sector_t) < 8)
 			ext3_msg(sb, KERN_ERR,
 				"error: CONFIG_LBDAF not enabled");
+		ret = err;
 		goto failed_mount;
 	}
 
@@ -2142,13 +2161,6 @@
 	if (bdev == NULL)
 		return NULL;
 
-	if (bd_claim(bdev, sb)) {
-		ext3_msg(sb, KERN_ERR,
-			"error: failed to claim external journal device");
-		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
-		return NULL;
-	}
-
 	blocksize = sb->s_blocksize;
 	hblock = bdev_logical_block_size(bdev);
 	if (blocksize < hblock) {
@@ -2297,7 +2309,7 @@
 	EXT3_SB(sb)->s_journal = journal;
 	ext3_clear_journal_err(sb, es);
 
-	if (journal_devnum &&
+	if (!really_read_only && journal_devnum &&
 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
 		es->s_journal_dev = cpu_to_le32(journal_devnum);
 
@@ -2865,27 +2877,20 @@
  * Standard function to be called on quota_on
  */
 static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-			 char *name)
+			 struct path *path)
 {
 	int err;
-	struct path path;
 
 	if (!test_opt(sb, QUOTA))
 		return -EINVAL;
 
-	err = kern_path(name, LOOKUP_FOLLOW, &path);
-	if (err)
-		return err;
-
 	/* Quotafile not on the same filesystem? */
-	if (path.mnt->mnt_sb != sb) {
-		path_put(&path);
+	if (path->mnt->mnt_sb != sb)
 		return -EXDEV;
-	}
 	/* Journaling quota? */
 	if (EXT3_SB(sb)->s_qf_names[type]) {
 		/* Quotafile not of fs root? */
-		if (path.dentry->d_parent != sb->s_root)
+		if (path->dentry->d_parent != sb->s_root)
 			ext3_msg(sb, KERN_WARNING,
 				"warning: Quota file not on filesystem root. "
 				"Journaled quota will not work.");
@@ -2895,7 +2900,7 @@
 	 * When we journal data on quota file, we have to flush journal to see
 	 * all updates to the file when we bypass pagecache...
 	 */
-	if (ext3_should_journal_data(path.dentry->d_inode)) {
+	if (ext3_should_journal_data(path->dentry->d_inode)) {
 		/*
 		 * We don't need to lock updates but journal_flush() could
 		 * otherwise be livelocked...
@@ -2903,15 +2908,11 @@
 		journal_lock_updates(EXT3_SB(sb)->s_journal);
 		err = journal_flush(EXT3_SB(sb)->s_journal);
 		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err) {
-			path_put(&path);
+		if (err)
 			return err;
-		}
 	}
 
-	err = dquot_quota_on_path(sb, type, format_id, &path);
-	path_put(&path);
-	return err;
+	return dquot_quota_on(sb, type, format_id, path);
 }
 
 /* Read data from quotafile - avoid pagecache and such because we cannot afford
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index e69dc6d..32e6cc2 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -925,7 +925,7 @@
 /*
  * ext3_xattr_set_handle()
  *
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode.  Value
  * is NULL to remove an existing extended attribute, and non-NULL to
  * either replace an existing extended attribute, or create a new extended
  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 14c3af2..adf96b8 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -592,7 +592,8 @@
 	 * Account for the allocated meta blocks.  We will never
 	 * fail EDQUOT for metdata, but we do account for it.
 	 */
-	if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
+	if (!(*errp) &&
+	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
 		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ece76fb..164c560 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -60,9 +60,13 @@
 	return (ext4_filetype_table[filetype]);
 }
 
-
+/*
+ * Return 0 if the directory entry is OK, and 1 if there is a problem
+ *
+ * Note: this is the opposite of what ext2 and ext3 historically returned...
+ */
 int __ext4_check_dir_entry(const char *function, unsigned int line,
-			   struct inode *dir,
+			   struct inode *dir, struct file *filp,
 			   struct ext4_dir_entry_2 *de,
 			   struct buffer_head *bh,
 			   unsigned int offset)
@@ -71,26 +75,37 @@
 	const int rlen = ext4_rec_len_from_disk(de->rec_len,
 						dir->i_sb->s_blocksize);
 
-	if (rlen < EXT4_DIR_REC_LEN(1))
+	if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
 		error_msg = "rec_len is smaller than minimal";
-	else if (rlen % 4 != 0)
+	else if (unlikely(rlen % 4 != 0))
 		error_msg = "rec_len % 4 != 0";
-	else if (rlen < EXT4_DIR_REC_LEN(de->name_len))
+	else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
 		error_msg = "rec_len is too small for name_len";
-	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+	else if (unlikely(((char *) de - bh->b_data) + rlen >
+			  dir->i_sb->s_blocksize))
 		error_msg = "directory entry across blocks";
-	else if (le32_to_cpu(de->inode) >
-			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))
+	else if (unlikely(le32_to_cpu(de->inode) >
+			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
 		error_msg = "inode out of bounds";
+	else
+		return 0;
 
-	if (error_msg != NULL)
-		ext4_error_inode(dir, function, line, bh->b_blocknr,
-			"bad entry in directory: %s - "
-			"offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
-			error_msg, (unsigned) (offset%bh->b_size), offset,
-			le32_to_cpu(de->inode),
-			rlen, de->name_len);
-	return error_msg == NULL ? 1 : 0;
+	if (filp)
+		ext4_error_file(filp, function, line, bh ? bh->b_blocknr : 0,
+				"bad entry in directory: %s - offset=%u(%u), "
+				"inode=%u, rec_len=%d, name_len=%d",
+				error_msg, (unsigned) (offset%bh->b_size),
+				offset, le32_to_cpu(de->inode),
+				rlen, de->name_len);
+	else
+		ext4_error_inode(dir, function, line, bh ? bh->b_blocknr : 0,
+				"bad entry in directory: %s - offset=%u(%u), "
+				"inode=%u, rec_len=%d, name_len=%d",
+				error_msg, (unsigned) (offset%bh->b_size),
+				offset, le32_to_cpu(de->inode),
+				rlen, de->name_len);
+
+	return 1;
 }
 
 static int ext4_readdir(struct file *filp,
@@ -152,8 +167,9 @@
 		 */
 		if (!bh) {
 			if (!dir_has_error) {
-				EXT4_ERROR_INODE(inode, "directory "
-					   "contains a hole at offset %Lu",
+				EXT4_ERROR_FILE(filp, 0,
+						"directory contains a "
+						"hole at offset %llu",
 					   (unsigned long long) filp->f_pos);
 				dir_has_error = 1;
 			}
@@ -194,8 +210,8 @@
 		while (!error && filp->f_pos < inode->i_size
 		       && offset < sb->s_blocksize) {
 			de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
-			if (!ext4_check_dir_entry(inode, de,
-						  bh, offset)) {
+			if (ext4_check_dir_entry(inode, filp, de,
+						 bh, offset)) {
 				/*
 				 * On error, skip the f_pos to the next block
 				 */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 94ce3d7..3aa0b72 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -62,8 +62,8 @@
 #define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...)			\
 	ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
 
-#define EXT4_ERROR_FILE(file, fmt, a...)	\
-	ext4_error_file(__func__, __LINE__, (file), (fmt), ## a)
+#define EXT4_ERROR_FILE(file, block, fmt, a...)				\
+	ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
 
 /* data type for block offset of block group */
 typedef int ext4_grpblk_t;
@@ -561,23 +561,7 @@
 #define EXT4_IOC32_SETVERSION_OLD	FS_IOC32_SETVERSION
 #endif
 
-
-/*
- *  Mount options
- */
-struct ext4_mount_options {
-	unsigned long s_mount_opt;
-	uid_t s_resuid;
-	gid_t s_resgid;
-	unsigned long s_commit_interval;
-	u32 s_min_batch_time, s_max_batch_time;
-#ifdef CONFIG_QUOTA
-	int s_jquota_fmt;
-	char *s_qf_names[MAXQUOTAS];
-#endif
-};
-
-/* Max physical block we can addres w/o extents */
+/* Max physical block we can address w/o extents */
 #define EXT4_MAX_BLOCK_FILE_PHYS	0xFFFFFFFF
 
 /*
@@ -709,6 +693,8 @@
 	if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra))     \
 		ext4_decode_extra_time(&(inode)->xtime,			       \
 				       raw_inode->xtime ## _extra);	       \
+	else								       \
+		(inode)->xtime.tv_nsec = 0;				       \
 } while (0)
 
 #define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode)			       \
@@ -719,6 +705,8 @@
 	if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra))	       \
 		ext4_decode_extra_time(&(einode)->xtime,		       \
 				       raw_inode->xtime ## _extra);	       \
+	else								       \
+		(einode)->xtime.tv_nsec = 0;				       \
 } while (0)
 
 #define i_disk_version osd1.linux1.l_i_version
@@ -750,12 +738,13 @@
 
 /*
  * storage for cached extent
+ * If ec_len == 0, then the cache is invalid.
+ * If ec_start == 0, then the cache represents a gap (null mapping)
  */
 struct ext4_ext_cache {
 	ext4_fsblk_t	ec_start;
 	ext4_lblk_t	ec_block;
 	__u32		ec_len; /* must be 32bit to return holes */
-	__u32		ec_type;
 };
 
 /*
@@ -774,10 +763,12 @@
 	 * near to their parent directory's inode.
 	 */
 	ext4_group_t	i_block_group;
+	ext4_lblk_t	i_dir_start_lookup;
+#if (BITS_PER_LONG < 64)
 	unsigned long	i_state_flags;		/* Dynamic state flags */
+#endif
 	unsigned long	i_flags;
 
-	ext4_lblk_t		i_dir_start_lookup;
 #ifdef CONFIG_EXT4_FS_XATTR
 	/*
 	 * Extended attributes can be read independently of the main file
@@ -820,7 +811,7 @@
 	 */
 	struct rw_semaphore i_data_sem;
 	struct inode vfs_inode;
-	struct jbd2_inode jinode;
+	struct jbd2_inode *jinode;
 
 	struct ext4_ext_cache i_cached_extent;
 	/*
@@ -840,14 +831,12 @@
 	unsigned int i_reserved_data_blocks;
 	unsigned int i_reserved_meta_blocks;
 	unsigned int i_allocated_meta_blocks;
-	unsigned short i_delalloc_reserved_flag;
-	sector_t i_da_metadata_calc_last_lblock;
+	ext4_lblk_t i_da_metadata_calc_last_lblock;
 	int i_da_metadata_calc_len;
 
 	/* on-disk additional length */
 	__u16 i_extra_isize;
 
-	spinlock_t i_block_reservation_lock;
 #ifdef CONFIG_QUOTA
 	/* quota space reservation, managed internally by quota code */
 	qsize_t i_reserved_quota;
@@ -856,9 +845,12 @@
 	/* completed IOs that might need unwritten extents handling */
 	struct list_head i_completed_io_list;
 	spinlock_t i_completed_io_lock;
+	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
 	/* current io_end structure for async DIO write*/
 	ext4_io_end_t *cur_aio_dio;
-	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
+	atomic_t i_aiodio_unwritten; /* Nr. of inflight conversions pending */
+
+	spinlock_t i_block_reservation_lock;
 
 	/*
 	 * Transactions that contain inode's metadata needed to complete
@@ -917,11 +909,20 @@
 #define EXT4_MOUNT_DISCARD		0x40000000 /* Issue DISCARD requests */
 #define EXT4_MOUNT_INIT_INODE_TABLE	0x80000000 /* Initialize uninitialized itables */
 
-#define clear_opt(o, opt)		o &= ~EXT4_MOUNT_##opt
-#define set_opt(o, opt)			o |= EXT4_MOUNT_##opt
+#define clear_opt(sb, opt)		EXT4_SB(sb)->s_mount_opt &= \
+						~EXT4_MOUNT_##opt
+#define set_opt(sb, opt)		EXT4_SB(sb)->s_mount_opt |= \
+						EXT4_MOUNT_##opt
 #define test_opt(sb, opt)		(EXT4_SB(sb)->s_mount_opt & \
 					 EXT4_MOUNT_##opt)
 
+#define clear_opt2(sb, opt)		EXT4_SB(sb)->s_mount_opt2 &= \
+						~EXT4_MOUNT2_##opt
+#define set_opt2(sb, opt)		EXT4_SB(sb)->s_mount_opt2 |= \
+						EXT4_MOUNT2_##opt
+#define test_opt2(sb, opt)		(EXT4_SB(sb)->s_mount_opt2 & \
+					 EXT4_MOUNT2_##opt)
+
 #define ext4_set_bit			ext2_set_bit
 #define ext4_set_bit_atomic		ext2_set_bit_atomic
 #define ext4_clear_bit			ext2_clear_bit
@@ -1087,6 +1088,7 @@
 	struct ext4_super_block *s_es;	/* Pointer to the super block in the buffer */
 	struct buffer_head **s_group_desc;
 	unsigned int s_mount_opt;
+	unsigned int s_mount_opt2;
 	unsigned int s_mount_flags;
 	ext4_fsblk_t s_sb_block;
 	uid_t s_resuid;
@@ -1237,24 +1239,39 @@
 	EXT4_STATE_EXT_MIGRATE,		/* Inode is migrating */
 	EXT4_STATE_DIO_UNWRITTEN,	/* need convert on dio done*/
 	EXT4_STATE_NEWENTRY,		/* File just added to dir */
+	EXT4_STATE_DELALLOC_RESERVED,	/* blks already reserved for delalloc */
 };
 
-#define EXT4_INODE_BIT_FNS(name, field)					\
+#define EXT4_INODE_BIT_FNS(name, field, offset)				\
 static inline int ext4_test_inode_##name(struct inode *inode, int bit)	\
 {									\
-	return test_bit(bit, &EXT4_I(inode)->i_##field);		\
+	return test_bit(bit + (offset), &EXT4_I(inode)->i_##field);	\
 }									\
 static inline void ext4_set_inode_##name(struct inode *inode, int bit)	\
 {									\
-	set_bit(bit, &EXT4_I(inode)->i_##field);			\
+	set_bit(bit + (offset), &EXT4_I(inode)->i_##field);		\
 }									\
 static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
 {									\
-	clear_bit(bit, &EXT4_I(inode)->i_##field);			\
+	clear_bit(bit + (offset), &EXT4_I(inode)->i_##field);		\
 }
 
-EXT4_INODE_BIT_FNS(flag, flags)
-EXT4_INODE_BIT_FNS(state, state_flags)
+EXT4_INODE_BIT_FNS(flag, flags, 0)
+#if (BITS_PER_LONG < 64)
+EXT4_INODE_BIT_FNS(state, state_flags, 0)
+
+static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
+{
+	(ei)->i_state_flags = 0;
+}
+#else
+EXT4_INODE_BIT_FNS(state, flags, 32)
+
+static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
+{
+	/* We depend on the fact that callers will set i_flags */
+}
+#endif
 #else
 /* Assume that user mode programs are passing in an ext4fs superblock, not
  * a kernel struct super_block.  This will allow us to call the feature-test
@@ -1642,10 +1659,12 @@
 
 /* dir.c */
 extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
+				  struct file *,
 				  struct ext4_dir_entry_2 *,
 				  struct buffer_head *, unsigned int);
-#define ext4_check_dir_entry(dir, de, bh, offset) \
-	__ext4_check_dir_entry(__func__, __LINE__, (dir), (de), (bh), (offset))
+#define ext4_check_dir_entry(dir, filp, de, bh, offset)			\
+	unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
+					(de), (bh), (offset)))
 extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
 				    __u32 minor_hash,
 				    struct ext4_dir_entry_2 *dirent);
@@ -1653,6 +1672,7 @@
 
 /* fsync.c */
 extern int ext4_sync_file(struct file *, int);
+extern int ext4_flush_completed_IO(struct inode *);
 
 /* hash.c */
 extern int ext4fs_dirhash(const char *name, int len, struct
@@ -1752,8 +1772,8 @@
 			     ext4_fsblk_t, const char *, ...)
 	__attribute__ ((format (printf, 5, 6)));
 extern void ext4_error_file(struct file *, const char *, unsigned int,
-			    const char *, ...)
-	__attribute__ ((format (printf, 4, 5)));
+			    ext4_fsblk_t, const char *, ...)
+	__attribute__ ((format (printf, 5, 6)));
 extern void __ext4_std_error(struct super_block *, const char *,
 			     unsigned int, int);
 extern void __ext4_abort(struct super_block *, const char *, unsigned int,
@@ -2046,7 +2066,7 @@
 extern void ext4_ext_truncate(struct inode *);
 extern void ext4_ext_init(struct super_block *);
 extern void ext4_ext_release(struct super_block *);
-extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
+extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
 			  loff_t len);
 extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
 			  ssize_t len);
@@ -2100,6 +2120,15 @@
 
 #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
 
+/* For ioend & aio unwritten conversion wait queues */
+#define EXT4_WQ_HASH_SZ		37
+#define ext4_ioend_wq(v)   (&ext4__ioend_wq[((unsigned long)(v)) %\
+					    EXT4_WQ_HASH_SZ])
+#define ext4_aio_mutex(v)  (&ext4__aio_mutex[((unsigned long)(v)) %\
+					     EXT4_WQ_HASH_SZ])
+extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
+extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
+
 #endif	/* __KERNEL__ */
 
 #endif	/* _EXT4_H */
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 28ce70f..2e29abb 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -119,10 +119,6 @@
  * structure for external API
  */
 
-#define EXT4_EXT_CACHE_NO	0
-#define EXT4_EXT_CACHE_GAP	1
-#define EXT4_EXT_CACHE_EXTENT	2
-
 /*
  * to be called by ext4_ext_walk_space()
  * negative retcode - error
@@ -197,7 +193,7 @@
 static inline void
 ext4_ext_invalidate_cache(struct inode *inode)
 {
-	EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO;
+	EXT4_I(inode)->i_cached_extent.ec_len = 0;
 }
 
 static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
@@ -278,7 +274,7 @@
 }
 
 extern int ext4_ext_calc_metadata_amount(struct inode *inode,
-					 sector_t lblocks);
+					 ext4_lblk_t lblocks);
 extern int ext4_extent_tree_init(handle_t *, struct inode *);
 extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
 						   int num,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b0bd792..d8b992e 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -253,7 +253,7 @@
 static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
 {
 	if (ext4_handle_valid(handle))
-		return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode);
+		return jbd2_journal_file_inode(handle, EXT4_I(inode)->jinode);
 	return 0;
 }
 
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0554c48..ccce8a7 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -117,11 +117,33 @@
 		struct ext4_extent *ex;
 		depth = path->p_depth;
 
-		/* try to predict block placement */
+		/*
+		 * Try to predict block placement assuming that we are
+		 * filling in a file which will eventually be
+		 * non-sparse --- i.e., in the case of libbfd writing
+		 * an ELF object sections out-of-order but in a way
+		 * the eventually results in a contiguous object or
+		 * executable file, or some database extending a table
+		 * space file.  However, this is actually somewhat
+		 * non-ideal if we are writing a sparse file such as
+		 * qemu or KVM writing a raw image file that is going
+		 * to stay fairly sparse, since it will end up
+		 * fragmenting the file system's free space.  Maybe we
+		 * should have some hueristics or some way to allow
+		 * userspace to pass a hint to file system,
+		 * especiially if the latter case turns out to be
+		 * common.
+		 */
 		ex = path[depth].p_ext;
-		if (ex)
-			return (ext4_ext_pblock(ex) +
-				(block - le32_to_cpu(ex->ee_block)));
+		if (ex) {
+			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
+			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
+
+			if (block > ext_block)
+				return ext_pblk + (block - ext_block);
+			else
+				return ext_pblk - (ext_block - block);
+		}
 
 		/* it looks like index is empty;
 		 * try to find starting block from index itself */
@@ -244,7 +266,7 @@
  * to allocate @blocks
  * Worse case is one block per extent
  */
-int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
+int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 {
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	int idxs, num = 0;
@@ -1872,12 +1894,10 @@
 			cbex.ec_block = start;
 			cbex.ec_len = end - start;
 			cbex.ec_start = 0;
-			cbex.ec_type = EXT4_EXT_CACHE_GAP;
 		} else {
 			cbex.ec_block = le32_to_cpu(ex->ee_block);
 			cbex.ec_len = ext4_ext_get_actual_len(ex);
 			cbex.ec_start = ext4_ext_pblock(ex);
-			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
 		}
 
 		if (unlikely(cbex.ec_len == 0)) {
@@ -1917,13 +1937,12 @@
 
 static void
 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
-			__u32 len, ext4_fsblk_t start, int type)
+			__u32 len, ext4_fsblk_t start)
 {
 	struct ext4_ext_cache *cex;
 	BUG_ON(len == 0);
 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 	cex = &EXT4_I(inode)->i_cached_extent;
-	cex->ec_type = type;
 	cex->ec_block = block;
 	cex->ec_len = len;
 	cex->ec_start = start;
@@ -1976,15 +1995,18 @@
 	}
 
 	ext_debug(" -> %u:%lu\n", lblock, len);
-	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
+	ext4_ext_put_in_cache(inode, lblock, len, 0);
 }
 
+/*
+ * Return 0 if cache is invalid; 1 if the cache is valid
+ */
 static int
 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
 			struct ext4_extent *ex)
 {
 	struct ext4_ext_cache *cex;
-	int ret = EXT4_EXT_CACHE_NO;
+	int ret = 0;
 
 	/*
 	 * We borrow i_block_reservation_lock to protect i_cached_extent
@@ -1993,11 +2015,9 @@
 	cex = &EXT4_I(inode)->i_cached_extent;
 
 	/* has cache valid data? */
-	if (cex->ec_type == EXT4_EXT_CACHE_NO)
+	if (cex->ec_len == 0)
 		goto errout;
 
-	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
-			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
 	if (in_range(block, cex->ec_block, cex->ec_len)) {
 		ex->ee_block = cpu_to_le32(cex->ec_block);
 		ext4_ext_store_pblock(ex, cex->ec_start);
@@ -2005,7 +2025,7 @@
 		ext_debug("%u cached by %u:%u:%llu\n",
 				block,
 				cex->ec_block, cex->ec_len, cex->ec_start);
-		ret = cex->ec_type;
+		ret = 1;
 	}
 errout:
 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
@@ -2825,14 +2845,14 @@
  * to an uninitialized extent.
  *
  * Writing to an uninitized extent may result in splitting the uninitialized
- * extent into multiple /intialized unintialized extents (up to three)
+ * extent into multiple /initialized uninitialized extents (up to three)
  * There are three possibilities:
  *   a> There is no split required: Entire extent should be uninitialized
  *   b> Splits in two extents: Write is happening at either end of the extent
  *   c> Splits in three extents: Somone is writing in middle of the extent
  *
  * One of more index blocks maybe needed if the extent tree grow after
- * the unintialized extent split. To prevent ENOSPC occur at the IO
+ * the uninitialized extent split. To prevent ENOSPC occur at the IO
  * complete, we need to split the uninitialized extent before DIO submit
  * the IO. The uninitialized extent called at this time will be split
  * into three uninitialized extent(at most). After IO complete, the part
@@ -3082,7 +3102,7 @@
  * Handle EOFBLOCKS_FL flag, clearing it if necessary
  */
 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
-			      struct ext4_map_blocks *map,
+			      ext4_lblk_t lblk,
 			      struct ext4_ext_path *path,
 			      unsigned int len)
 {
@@ -3112,7 +3132,7 @@
 	 * this turns out to be false, we can bail out from this
 	 * function immediately.
 	 */
-	if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
+	if (lblk + len < le32_to_cpu(last_ex->ee_block) +
 	    ext4_ext_get_actual_len(last_ex))
 		return 0;
 	/*
@@ -3154,9 +3174,10 @@
 		 * that this IO needs to convertion to written when IO is
 		 * completed
 		 */
-		if (io)
+		if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
 			io->flag = EXT4_IO_END_UNWRITTEN;
-		else
+			atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
+		} else
 			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
 		if (ext4_should_dioread_nolock(inode))
 			map->m_flags |= EXT4_MAP_UNINIT;
@@ -3168,8 +3189,8 @@
 							path);
 		if (ret >= 0) {
 			ext4_update_inode_fsync_trans(handle, inode, 1);
-			err = check_eofblocks_fl(handle, inode, map, path,
-						 map->m_len);
+			err = check_eofblocks_fl(handle, inode, map->m_lblk,
+						 path, map->m_len);
 		} else
 			err = ret;
 		goto out2;
@@ -3199,7 +3220,8 @@
 	ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
 	if (ret >= 0) {
 		ext4_update_inode_fsync_trans(handle, inode, 1);
-		err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
+		err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
+					 map->m_len);
 		if (err < 0)
 			goto out2;
 	}
@@ -3276,7 +3298,7 @@
 	struct ext4_extent_header *eh;
 	struct ext4_extent newex, *ex;
 	ext4_fsblk_t newblock;
-	int err = 0, depth, ret, cache_type;
+	int err = 0, depth, ret;
 	unsigned int allocated = 0;
 	struct ext4_allocation_request ar;
 	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3285,9 +3307,8 @@
 		  map->m_lblk, map->m_len, inode->i_ino);
 
 	/* check in cache */
-	cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
-	if (cache_type) {
-		if (cache_type == EXT4_EXT_CACHE_GAP) {
+	if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
+		if (!newex.ee_start_lo && !newex.ee_start_hi) {
 			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
 				/*
 				 * block isn't allocated yet and
@@ -3296,7 +3317,7 @@
 				goto out2;
 			}
 			/* we should allocate requested block */
-		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
+		} else {
 			/* block is already allocated */
 			newblock = map->m_lblk
 				   - le32_to_cpu(newex.ee_block)
@@ -3305,8 +3326,6 @@
 			allocated = ext4_ext_get_actual_len(&newex) -
 				(map->m_lblk - le32_to_cpu(newex.ee_block));
 			goto out;
-		} else {
-			BUG();
 		}
 	}
 
@@ -3357,8 +3376,7 @@
 			/* Do not put uninitialized extent in the cache */
 			if (!ext4_ext_is_uninitialized(ex)) {
 				ext4_ext_put_in_cache(inode, ee_block,
-							ee_len, ee_start,
-							EXT4_EXT_CACHE_EXTENT);
+							ee_len, ee_start);
 				goto out;
 			}
 			ret = ext4_ext_handle_uninitialized_extents(handle,
@@ -3446,9 +3464,10 @@
 		 * that we need to perform convertion when IO is done.
 		 */
 		if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
-			if (io)
+			if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
 				io->flag = EXT4_IO_END_UNWRITTEN;
-			else
+				atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
+			} else
 				ext4_set_inode_state(inode,
 						     EXT4_STATE_DIO_UNWRITTEN);
 		}
@@ -3456,7 +3475,7 @@
 			map->m_flags |= EXT4_MAP_UNINIT;
 	}
 
-	err = check_eofblocks_fl(handle, inode, map, path, ar.len);
+	err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
 	if (err)
 		goto out2;
 
@@ -3490,8 +3509,7 @@
 	 * when it is _not_ an uninitialized extent.
 	 */
 	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
-		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
-						EXT4_EXT_CACHE_EXTENT);
+		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
 		ext4_update_inode_fsync_trans(handle, inode, 1);
 	} else
 		ext4_update_inode_fsync_trans(handle, inode, 0);
@@ -3519,6 +3537,12 @@
 	int err = 0;
 
 	/*
+	 * finish any pending end_io work so we won't run the risk of
+	 * converting any truncated blocks to initialized later
+	 */
+	ext4_flush_completed_IO(inode);
+
+	/*
 	 * probably first extent we're gonna free will be last in block
 	 */
 	err = ext4_writepage_trans_blocks(inode);
@@ -3605,14 +3629,15 @@
 }
 
 /*
- * preallocate space for a file. This implements ext4's fallocate inode
+ * preallocate space for a file. This implements ext4's fallocate file
  * operation, which gets called from sys_fallocate system call.
  * For block-mapped files, posix_fallocate should fall back to the method
  * of writing zeroes to the required new blocks (the same behavior which is
  * expected for file systems which do not support fallocate() system call).
  */
-long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
+long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 {
+	struct inode *inode = file->f_path.dentry->d_inode;
 	handle_t *handle;
 	loff_t new_size;
 	unsigned int max_blocks;
@@ -3622,6 +3647,10 @@
 	struct ext4_map_blocks map;
 	unsigned int credits, blkbits = inode->i_blkbits;
 
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode & ~FALLOC_FL_KEEP_SIZE)
+		return -EOPNOTSUPP;
+
 	/*
 	 * currently supporting (pre)allocate mode for extent-based
 	 * files _only_
@@ -3629,10 +3658,6 @@
 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 		return -EOPNOTSUPP;
 
-	/* preallocation to directories is currently not supported */
-	if (S_ISDIR(inode->i_mode))
-		return -ENODEV;
-
 	map.m_lblk = offset >> blkbits;
 	/*
 	 * We can't just convert len to max_blocks because
@@ -3767,7 +3792,7 @@
 
 	logical =  (__u64)newex->ec_block << blksize_bits;
 
-	if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
+	if (newex->ec_start == 0) {
 		pgoff_t offset;
 		struct page *page;
 		struct buffer_head *bh = NULL;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 5a5c55d..7b80d54 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -55,11 +55,47 @@
 	return 0;
 }
 
+static void ext4_aiodio_wait(struct inode *inode)
+{
+	wait_queue_head_t *wq = ext4_ioend_wq(inode);
+
+	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0));
+}
+
+/*
+ * This tests whether the IO in question is block-aligned or not.
+ * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
+ * are converted to written only after the IO is complete.  Until they are
+ * mapped, these blocks appear as holes, so dio_zero_block() will assume that
+ * it needs to zero out portions of the start and/or end block.  If 2 AIO
+ * threads are at work on the same unwritten block, they must be synchronized
+ * or one thread will zero the other's data, causing corruption.
+ */
+static int
+ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
+		   unsigned long nr_segs, loff_t pos)
+{
+	struct super_block *sb = inode->i_sb;
+	int blockmask = sb->s_blocksize - 1;
+	size_t count = iov_length(iov, nr_segs);
+	loff_t final_size = pos + count;
+
+	if (pos >= inode->i_size)
+		return 0;
+
+	if ((pos & blockmask) || (final_size & blockmask))
+		return 1;
+
+	return 0;
+}
+
 static ssize_t
 ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
 		unsigned long nr_segs, loff_t pos)
 {
 	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+	int unaligned_aio = 0;
+	int ret;
 
 	/*
 	 * If we have encountered a bitmap-format file, the size limit
@@ -78,9 +114,31 @@
 			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
 					      sbi->s_bitmap_maxbytes - pos);
 		}
+	} else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
+		   !is_sync_kiocb(iocb))) {
+		unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
 	}
 
-	return generic_file_aio_write(iocb, iov, nr_segs, pos);
+	/* Unaligned direct AIO must be serialized; see comment above */
+	if (unaligned_aio) {
+		static unsigned long unaligned_warn_time;
+
+		/* Warn about this once per day */
+		if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
+			ext4_msg(inode->i_sb, KERN_WARNING,
+				 "Unaligned AIO/DIO on inode %ld by %s; "
+				 "performance will be poor.",
+				 inode->i_ino, current->comm);
+		mutex_lock(ext4_aio_mutex(inode));
+		ext4_aiodio_wait(inode);
+	}
+
+	ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+	if (unaligned_aio)
+		mutex_unlock(ext4_aio_mutex(inode));
+
+	return ret;
 }
 
 static const struct vm_operations_struct ext4_file_vm_ops = {
@@ -104,6 +162,7 @@
 {
 	struct super_block *sb = inode->i_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	struct ext4_inode_info *ei = EXT4_I(inode);
 	struct vfsmount *mnt = filp->f_path.mnt;
 	struct path path;
 	char buf[64], *cp;
@@ -127,6 +186,27 @@
 			ext4_mark_super_dirty(sb);
 		}
 	}
+	/*
+	 * Set up the jbd2_inode if we are opening the inode for
+	 * writing and the journal is present
+	 */
+	if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
+		struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
+
+		spin_lock(&inode->i_lock);
+		if (!ei->jinode) {
+			if (!jinode) {
+				spin_unlock(&inode->i_lock);
+				return -ENOMEM;
+			}
+			ei->jinode = jinode;
+			jbd2_journal_init_jbd_inode(ei->jinode, inode);
+			jinode = NULL;
+		}
+		spin_unlock(&inode->i_lock);
+		if (unlikely(jinode != NULL))
+			jbd2_free_inode(jinode);
+	}
 	return dquot_file_open(inode, filp);
 }
 
@@ -188,6 +268,7 @@
 	.fsync		= ext4_sync_file,
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= generic_file_splice_write,
+	.fallocate	= ext4_fallocate,
 };
 
 const struct inode_operations ext4_file_inode_operations = {
@@ -201,7 +282,6 @@
 	.removexattr	= generic_removexattr,
 #endif
 	.check_acl	= ext4_check_acl,
-	.fallocate	= ext4_fallocate,
 	.fiemap		= ext4_fiemap,
 };
 
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index c1a7bc9..7829b28 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -75,7 +75,7 @@
  * to written.
  * The function return the number of pending IOs on success.
  */
-static int flush_completed_IO(struct inode *inode)
+extern int ext4_flush_completed_IO(struct inode *inode)
 {
 	ext4_io_end_t *io;
 	struct ext4_inode_info *ei = EXT4_I(inode);
@@ -169,7 +169,7 @@
 	if (inode->i_sb->s_flags & MS_RDONLY)
 		return 0;
 
-	ret = flush_completed_IO(inode);
+	ret = ext4_flush_completed_IO(inode);
 	if (ret < 0)
 		return ret;
 
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 1ce240a..eb9097a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1027,7 +1027,7 @@
 	inode->i_generation = sbi->s_next_generation++;
 	spin_unlock(&sbi->s_next_gen_lock);
 
-	ei->i_state_flags = 0;
+	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
 
 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e659597..9f7f9e4 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -39,7 +39,9 @@
 #include <linux/bio.h>
 #include <linux/workqueue.h>
 #include <linux/kernel.h>
+#include <linux/printk.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -54,10 +56,17 @@
 					      loff_t new_size)
 {
 	trace_ext4_begin_ordered_truncate(inode, new_size);
-	return jbd2_journal_begin_ordered_truncate(
-					EXT4_SB(inode->i_sb)->s_journal,
-					&EXT4_I(inode)->jinode,
-					new_size);
+	/*
+	 * If jinode is zero, then we never opened the file for
+	 * writing, so there's no need to call
+	 * jbd2_journal_begin_ordered_truncate() since there's no
+	 * outstanding writes we need to flush.
+	 */
+	if (!EXT4_I(inode)->jinode)
+		return 0;
+	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
+						   EXT4_I(inode)->jinode,
+						   new_size);
 }
 
 static void ext4_invalidatepage(struct page *page, unsigned long offset);
@@ -552,7 +561,7 @@
 }
 
 /**
- *	ext4_blks_to_allocate: Look up the block map and count the number
+ *	ext4_blks_to_allocate - Look up the block map and count the number
  *	of direct blocks need to be allocated for the given branch.
  *
  *	@branch: chain of indirect blocks
@@ -591,13 +600,19 @@
 
 /**
  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
+ *	@handle: handle for this transaction
+ *	@inode: inode which needs allocated blocks
+ *	@iblock: the logical block to start allocated at
+ *	@goal: preferred physical block of allocation
  *	@indirect_blks: the number of blocks need to allocate for indirect
  *			blocks
- *
+ *	@blks: number of desired blocks
  *	@new_blocks: on return it will store the new block numbers for
  *	the indirect blocks(if needed) and the first direct block,
- *	@blks:	on return it will store the total number of allocated
- *		direct blocks
+ *	@err: on return it will store the error code
+ *
+ *	This function will return the number of blocks allocated as
+ *	requested by the passed-in parameters.
  */
 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
 			     ext4_lblk_t iblock, ext4_fsblk_t goal,
@@ -711,9 +726,11 @@
 
 /**
  *	ext4_alloc_branch - allocate and set up a chain of blocks.
+ *	@handle: handle for this transaction
  *	@inode: owner
  *	@indirect_blks: number of allocated indirect blocks
  *	@blks: number of allocated direct blocks
+ *	@goal: preferred place for allocation
  *	@offsets: offsets (in the blocks) to store the pointers to next.
  *	@branch: place to store the chain in.
  *
@@ -826,6 +843,7 @@
 
 /**
  * ext4_splice_branch - splice the allocated branch onto inode.
+ * @handle: handle for this transaction
  * @inode: owner
  * @block: (logical) number of block we are adding
  * @chain: chain of indirect blocks (with a missing link - see
@@ -1081,7 +1099,7 @@
  * Calculate the number of metadata blocks need to reserve
  * to allocate a block located at @lblock
  */
-static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
+static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 {
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 		return ext4_ext_calc_metadata_amount(inode, lblock);
@@ -1320,7 +1338,7 @@
 	 * avoid double accounting
 	 */
 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-		EXT4_I(inode)->i_delalloc_reserved_flag = 1;
+		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 	/*
 	 * We need to check for EXT4 here because migrate
 	 * could have changed the inode type in between
@@ -1350,7 +1368,7 @@
 			ext4_da_update_reserve_space(inode, retval, 1);
 	}
 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
+		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 
 	up_write((&EXT4_I(inode)->i_data_sem));
 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
@@ -1878,7 +1896,7 @@
 /*
  * Reserve a single block located at lblock
  */
-static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
+static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
 {
 	int retries = 0;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2239,7 +2257,7 @@
 	 * affects functions in many different parts of the allocation
 	 * call path.  This flag exists primarily because we don't
 	 * want to change *many* call functions, so ext4_map_blocks()
-	 * will set the magic i_delalloc_reserved_flag once the
+	 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
 	 * inode's allocation semaphore is taken.
 	 *
 	 * If the blocks in questions were delalloc blocks, set
@@ -3362,7 +3380,7 @@
 	 * doing I/O at all.
 	 *
 	 * We could call write_cache_pages(), and then redirty all of
-	 * the pages by calling redirty_page_for_writeback() but that
+	 * the pages by calling redirty_page_for_writepage() but that
 	 * would be ugly in the extreme.  So instead we would need to
 	 * replicate parts of the code in the above functions,
 	 * simplifying them becuase we wouldn't actually intend to
@@ -3720,8 +3738,7 @@
 retry:
 	io_end = ext4_init_io_end(inode, GFP_ATOMIC);
 	if (!io_end) {
-		if (printk_ratelimit())
-			printk(KERN_WARNING "%s: allocation fail\n", __func__);
+		pr_warn_ratelimited("%s: allocation fail\n", __func__);
 		schedule();
 		goto retry;
 	}
@@ -3745,9 +3762,9 @@
  * preallocated extents, and those write extend the file, no need to
  * fall back to buffered IO.
  *
- * For holes, we fallocate those blocks, mark them as unintialized
+ * For holes, we fallocate those blocks, mark them as uninitialized
  * If those blocks were preallocated, we mark sure they are splited, but
- * still keep the range to write as unintialized.
+ * still keep the range to write as uninitialized.
  *
  * The unwrritten extents will be converted to written when DIO is completed.
  * For async direct IO, since the IO may still pending when return, we
@@ -4045,7 +4062,7 @@
 	if (ext4_should_journal_data(inode)) {
 		err = ext4_handle_dirty_metadata(handle, inode, bh);
 	} else {
-		if (ext4_should_order_data(inode))
+		if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode)
 			err = ext4_jbd2_file_inode(handle, inode);
 		mark_buffer_dirty(bh);
 	}
@@ -4169,6 +4186,7 @@
 {
 	__le32 *p;
 	int	flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
+	int	err;
 
 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
 		flags |= EXT4_FREE_BLOCKS_METADATA;
@@ -4184,11 +4202,23 @@
 	if (try_to_extend_transaction(handle, inode)) {
 		if (bh) {
 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-			ext4_handle_dirty_metadata(handle, inode, bh);
+			err = ext4_handle_dirty_metadata(handle, inode, bh);
+			if (unlikely(err)) {
+				ext4_std_error(inode->i_sb, err);
+				return 1;
+			}
 		}
-		ext4_mark_inode_dirty(handle, inode);
-		ext4_truncate_restart_trans(handle, inode,
-					    blocks_for_truncate(inode));
+		err = ext4_mark_inode_dirty(handle, inode);
+		if (unlikely(err)) {
+			ext4_std_error(inode->i_sb, err);
+			return 1;
+		}
+		err = ext4_truncate_restart_trans(handle, inode,
+						  blocks_for_truncate(inode));
+		if (unlikely(err)) {
+			ext4_std_error(inode->i_sb, err);
+			return 1;
+		}
 		if (bh) {
 			BUFFER_TRACE(bh, "retaking write access");
 			ext4_journal_get_write_access(handle, bh);
@@ -4349,6 +4379,7 @@
 					(__le32 *) bh->b_data,
 					(__le32 *) bh->b_data + addr_per_block,
 					depth);
+			brelse(bh);
 
 			/*
 			 * Everything below this this pointer has been
@@ -4859,7 +4890,7 @@
 	}
 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 
-	ei->i_state_flags = 0;
+	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
 	ei->i_dir_start_lookup = 0;
 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
 	/* We now have enough fields to check if the inode was active or not.
@@ -5118,7 +5149,7 @@
 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
 		goto out_brelse;
 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
-	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 	    cpu_to_le32(EXT4_OS_HURD))
 		raw_inode->i_file_acl_high =
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5b4d4e3..d1fe09a 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -342,10 +342,15 @@
 /* We create slab caches for groupinfo data structures based on the
  * superblock block size.  There will be one per mounted filesystem for
  * each unique s_blocksize_bits */
-#define NR_GRPINFO_CACHES	\
-	(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1)
+#define NR_GRPINFO_CACHES 8
 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
 
+static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
+	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
+	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
+	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
+};
+
 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
 					ext4_group_t group);
 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
@@ -2414,6 +2419,55 @@
 	return -ENOMEM;
 }
 
+static void ext4_groupinfo_destroy_slabs(void)
+{
+	int i;
+
+	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
+		if (ext4_groupinfo_caches[i])
+			kmem_cache_destroy(ext4_groupinfo_caches[i]);
+		ext4_groupinfo_caches[i] = NULL;
+	}
+}
+
+static int ext4_groupinfo_create_slab(size_t size)
+{
+	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
+	int slab_size;
+	int blocksize_bits = order_base_2(size);
+	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
+	struct kmem_cache *cachep;
+
+	if (cache_index >= NR_GRPINFO_CACHES)
+		return -EINVAL;
+
+	if (unlikely(cache_index < 0))
+		cache_index = 0;
+
+	mutex_lock(&ext4_grpinfo_slab_create_mutex);
+	if (ext4_groupinfo_caches[cache_index]) {
+		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
+		return 0;	/* Already created */
+	}
+
+	slab_size = offsetof(struct ext4_group_info,
+				bb_counters[blocksize_bits + 2]);
+
+	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
+					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
+					NULL);
+
+	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
+	if (!cachep) {
+		printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n");
+		return -ENOMEM;
+	}
+
+	ext4_groupinfo_caches[cache_index] = cachep;
+
+	return 0;
+}
+
 int ext4_mb_init(struct super_block *sb, int needs_recovery)
 {
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -2421,9 +2475,6 @@
 	unsigned offset;
 	unsigned max;
 	int ret;
-	int cache_index;
-	struct kmem_cache *cachep;
-	char *namep = NULL;
 
 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
 
@@ -2440,30 +2491,9 @@
 		goto out;
 	}
 
-	cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
-	cachep = ext4_groupinfo_caches[cache_index];
-	if (!cachep) {
-		char name[32];
-		int len = offsetof(struct ext4_group_info,
-					bb_counters[sb->s_blocksize_bits + 2]);
-
-		sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits);
-		namep = kstrdup(name, GFP_KERNEL);
-		if (!namep) {
-			ret = -ENOMEM;
-			goto out;
-		}
-
-		/* Need to free the kmem_cache_name() when we
-		 * destroy the slab */
-		cachep = kmem_cache_create(namep, len, 0,
-					     SLAB_RECLAIM_ACCOUNT, NULL);
-		if (!cachep) {
-			ret = -ENOMEM;
-			goto out;
-		}
-		ext4_groupinfo_caches[cache_index] = cachep;
-	}
+	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
+	if (ret < 0)
+		goto out;
 
 	/* order 0 is regular bitmap */
 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
@@ -2520,7 +2550,6 @@
 	if (ret) {
 		kfree(sbi->s_mb_offsets);
 		kfree(sbi->s_mb_maxs);
-		kfree(namep);
 	}
 	return ret;
 }
@@ -2608,18 +2637,12 @@
 static inline int ext4_issue_discard(struct super_block *sb,
 		ext4_group_t block_group, ext4_grpblk_t block, int count)
 {
-	int ret;
 	ext4_fsblk_t discard_block;
 
 	discard_block = block + ext4_group_first_block_no(sb, block_group);
 	trace_ext4_discard_blocks(sb,
 			(unsigned long long) discard_block, count);
-	ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
-	if (ret == -EOPNOTSUPP) {
-		ext4_warning(sb, "discard not supported, disabling");
-		clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
-	}
-	return ret;
+	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
 }
 
 /*
@@ -2631,7 +2654,7 @@
 	struct super_block *sb = journal->j_private;
 	struct ext4_buddy e4b;
 	struct ext4_group_info *db;
-	int err, count = 0, count2 = 0;
+	int err, ret, count = 0, count2 = 0;
 	struct ext4_free_data *entry;
 	struct list_head *l, *ltmp;
 
@@ -2641,9 +2664,15 @@
 		mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
 			 entry->count, entry->group, entry);
 
-		if (test_opt(sb, DISCARD))
-			ext4_issue_discard(sb, entry->group,
+		if (test_opt(sb, DISCARD)) {
+			ret = ext4_issue_discard(sb, entry->group,
 					entry->start_blk, entry->count);
+			if (unlikely(ret == -EOPNOTSUPP)) {
+				ext4_warning(sb, "discard not supported, "
+						 "disabling");
+				clear_opt(sb, DISCARD);
+			}
+		}
 
 		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
 		/* we expect to find existing buddy because it's pinned */
@@ -2734,7 +2763,6 @@
 
 void ext4_exit_mballoc(void)
 {
-	int i;
 	/*
 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
 	 * before destroying the slab cache.
@@ -2743,15 +2771,7 @@
 	kmem_cache_destroy(ext4_pspace_cachep);
 	kmem_cache_destroy(ext4_ac_cachep);
 	kmem_cache_destroy(ext4_free_ext_cachep);
-
-	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
-		struct kmem_cache *cachep = ext4_groupinfo_caches[i];
-		if (cachep) {
-			char *name = (char *)kmem_cache_name(cachep);
-			kmem_cache_destroy(cachep);
-			kfree(name);
-		}
-	}
+	ext4_groupinfo_destroy_slabs();
 	ext4_remove_debugfs_entry();
 }
 
@@ -3881,19 +3901,6 @@
 	}
 }
 
-/*
- * finds all preallocated spaces and return blocks being freed to them
- * if preallocated space becomes full (no block is used from the space)
- * then the function frees space in buddy
- * XXX: at the moment, truncate (which is the only way to free blocks)
- * discards all preallocations
- */
-static void ext4_mb_return_to_preallocation(struct inode *inode,
-					struct ext4_buddy *e4b,
-					sector_t block, int count)
-{
-	BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
-}
 #ifdef CONFIG_EXT4_DEBUG
 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
 {
@@ -4283,7 +4290,7 @@
 	 * EDQUOT check, as blocks and quotas have been already
 	 * reserved when data being copied into pagecache.
 	 */
-	if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+	if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
 		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
 	else {
 		/* Without delayed allocation we need to verify
@@ -4380,7 +4387,8 @@
 	if (inquota && ar->len < inquota)
 		dquot_free_block(ar->inode, inquota - ar->len);
 	if (!ar->len) {
-		if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+		if (!ext4_test_inode_state(ar->inode,
+					   EXT4_STATE_DELALLOC_RESERVED))
 			/* release all the reserved blocks if non delalloc */
 			percpu_counter_sub(&sbi->s_dirtyblocks_counter,
 						reserv_blks);
@@ -4626,7 +4634,11 @@
 		 * blocks being freed are metadata. these blocks shouldn't
 		 * be used until this transaction is committed
 		 */
-		new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+		new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+		if (!new_entry) {
+			err = -ENOMEM;
+			goto error_return;
+		}
 		new_entry->start_blk = bit;
 		new_entry->group  = block_group;
 		new_entry->count = count;
@@ -4643,7 +4655,6 @@
 		ext4_lock_group(sb, block_group);
 		mb_clear_bits(bitmap_bh->b_data, bit, count);
 		mb_free_blocks(inode, &e4b, bit, count);
-		ext4_mb_return_to_preallocation(inode, &e4b, block, count);
 	}
 
 	ret = ext4_free_blks_count(sb, gdp) + count;
@@ -4718,8 +4729,6 @@
 	ext4_unlock_group(sb, group);
 
 	ret = ext4_issue_discard(sb, group, start, count);
-	if (ret)
-		ext4_std_error(sb, ret);
 
 	ext4_lock_group(sb, group);
 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
@@ -4819,6 +4828,8 @@
 	ext4_group_t group, ngroups = ext4_get_groups_count(sb);
 	ext4_grpblk_t cnt = 0, first_block, last_block;
 	uint64_t start, len, minlen, trimmed;
+	ext4_fsblk_t first_data_blk =
+			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
 	int ret = 0;
 
 	start = range->start >> sb->s_blocksize_bits;
@@ -4828,6 +4839,10 @@
 
 	if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
 		return -EINVAL;
+	if (start < first_data_blk) {
+		len -= first_data_blk - start;
+		start = first_data_blk;
+	}
 
 	/* Determine first and last group to examine based on start and len */
 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
@@ -4851,7 +4866,7 @@
 		if (len >= EXT4_BLOCKS_PER_GROUP(sb))
 			len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
 		else
-			last_block = len;
+			last_block = first_block + len;
 
 		if (e4b.bd_info->bb_free >= minlen) {
 			cnt = ext4_trim_all_free(sb, &e4b, first_block,
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 25f3a97..b0a126f 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -496,7 +496,7 @@
 	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
 		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
 	tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
-				   S_IFREG, 0, goal);
+				   S_IFREG, NULL, goal);
 	if (IS_ERR(tmp_inode)) {
 		retval = -ENOMEM;
 		ext4_journal_stop(handle);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index dc40e75..5485390 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -581,9 +581,9 @@
 					   dir->i_sb->s_blocksize -
 					   EXT4_DIR_REC_LEN(0));
 	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
-		if (!ext4_check_dir_entry(dir, de, bh,
-					(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
-						+((char *)de - bh->b_data))) {
+		if (ext4_check_dir_entry(dir, NULL, de, bh,
+				(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+					 + ((char *)de - bh->b_data))) {
 			/* On error, skip the f_pos to the next block. */
 			dir_file->f_pos = (dir_file->f_pos |
 					(dir->i_sb->s_blocksize - 1)) + 1;
@@ -820,7 +820,7 @@
 		if ((char *) de + namelen <= dlimit &&
 		    ext4_match (namelen, name, de)) {
 			/* found a match - just to be sure, do a full check */
-			if (!ext4_check_dir_entry(dir, de, bh, offset))
+			if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
 				return -1;
 			*res_dir = de;
 			return 1;
@@ -1036,7 +1036,7 @@
 			return ERR_PTR(-EIO);
 		}
 		inode = ext4_iget(dir->i_sb, ino);
-		if (unlikely(IS_ERR(inode))) {
+		if (IS_ERR(inode)) {
 			if (PTR_ERR(inode) == -ESTALE) {
 				EXT4_ERROR_INODE(dir,
 						 "deleted inode referenced: %u",
@@ -1269,7 +1269,7 @@
 		de = (struct ext4_dir_entry_2 *)bh->b_data;
 		top = bh->b_data + blocksize - reclen;
 		while ((char *) de <= top) {
-			if (!ext4_check_dir_entry(dir, de, bh, offset))
+			if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
 				return -EIO;
 			if (ext4_match(namelen, name, de))
 				return -EEXIST;
@@ -1602,7 +1602,11 @@
 			if (err)
 				goto journal_error;
 		}
-		ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+		err = ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+		if (err) {
+			ext4_std_error(inode->i_sb, err);
+			goto cleanup;
+		}
 	}
 	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
 	if (!de)
@@ -1630,17 +1634,21 @@
 {
 	struct ext4_dir_entry_2 *de, *pde;
 	unsigned int blocksize = dir->i_sb->s_blocksize;
-	int i;
+	int i, err;
 
 	i = 0;
 	pde = NULL;
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
 	while (i < bh->b_size) {
-		if (!ext4_check_dir_entry(dir, de, bh, i))
+		if (ext4_check_dir_entry(dir, NULL, de, bh, i))
 			return -EIO;
 		if (de == de_del)  {
 			BUFFER_TRACE(bh, "get_write_access");
-			ext4_journal_get_write_access(handle, bh);
+			err = ext4_journal_get_write_access(handle, bh);
+			if (unlikely(err)) {
+				ext4_std_error(dir->i_sb, err);
+				return err;
+			}
 			if (pde)
 				pde->rec_len = ext4_rec_len_to_disk(
 					ext4_rec_len_from_disk(pde->rec_len,
@@ -1652,7 +1660,11 @@
 				de->inode = 0;
 			dir->i_version++;
 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-			ext4_handle_dirty_metadata(handle, dir, bh);
+			err = ext4_handle_dirty_metadata(handle, dir, bh);
+			if (unlikely(err)) {
+				ext4_std_error(dir->i_sb, err);
+				return err;
+			}
 			return 0;
 		}
 		i += ext4_rec_len_from_disk(de->rec_len, blocksize);
@@ -1789,7 +1801,7 @@
 {
 	handle_t *handle;
 	struct inode *inode;
-	struct buffer_head *dir_block;
+	struct buffer_head *dir_block = NULL;
 	struct ext4_dir_entry_2 *de;
 	unsigned int blocksize = dir->i_sb->s_blocksize;
 	int err, retries = 0;
@@ -1822,7 +1834,9 @@
 	if (!dir_block)
 		goto out_clear_inode;
 	BUFFER_TRACE(dir_block, "get_write_access");
-	ext4_journal_get_write_access(handle, dir_block);
+	err = ext4_journal_get_write_access(handle, dir_block);
+	if (err)
+		goto out_clear_inode;
 	de = (struct ext4_dir_entry_2 *) dir_block->b_data;
 	de->inode = cpu_to_le32(inode->i_ino);
 	de->name_len = 1;
@@ -1839,10 +1853,12 @@
 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
 	inode->i_nlink = 2;
 	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-	ext4_handle_dirty_metadata(handle, dir, dir_block);
-	brelse(dir_block);
-	ext4_mark_inode_dirty(handle, inode);
-	err = ext4_add_entry(handle, dentry, inode);
+	err = ext4_handle_dirty_metadata(handle, dir, dir_block);
+	if (err)
+		goto out_clear_inode;
+	err = ext4_mark_inode_dirty(handle, inode);
+	if (!err)
+		err = ext4_add_entry(handle, dentry, inode);
 	if (err) {
 out_clear_inode:
 		clear_nlink(inode);
@@ -1853,10 +1869,13 @@
 	}
 	ext4_inc_count(handle, dir);
 	ext4_update_dx_flag(dir);
-	ext4_mark_inode_dirty(handle, dir);
+	err = ext4_mark_inode_dirty(handle, dir);
+	if (err)
+		goto out_clear_inode;
 	d_instantiate(dentry, inode);
 	unlock_new_inode(inode);
 out_stop:
+	brelse(dir_block);
 	ext4_journal_stop(handle);
 	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
 		goto retry;
@@ -1919,7 +1938,7 @@
 			}
 			de = (struct ext4_dir_entry_2 *) bh->b_data;
 		}
-		if (!ext4_check_dir_entry(inode, de, bh, offset)) {
+		if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
 			de = (struct ext4_dir_entry_2 *)(bh->b_data +
 							 sb->s_blocksize);
 			offset = (offset | (sb->s_blocksize - 1)) + 1;
@@ -2407,7 +2426,11 @@
 					ext4_current_time(new_dir);
 		ext4_mark_inode_dirty(handle, new_dir);
 		BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
-		ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+		retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+		if (unlikely(retval)) {
+			ext4_std_error(new_dir->i_sb, retval);
+			goto end_rename;
+		}
 		brelse(new_bh);
 		new_bh = NULL;
 	}
@@ -2459,7 +2482,11 @@
 		PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
 						cpu_to_le32(new_dir->i_ino);
 		BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
-		ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
+		retval = ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
+		if (retval) {
+			ext4_std_error(old_dir->i_sb, retval);
+			goto end_rename;
+		}
 		ext4_dec_count(handle, old_dir);
 		if (new_inode) {
 			/* checked empty_dir above, can't have another parent,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index beacce1..955cc30 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -32,25 +32,16 @@
 
 static struct kmem_cache *io_page_cachep, *io_end_cachep;
 
-#define WQ_HASH_SZ		37
-#define to_ioend_wq(v)	(&ioend_wq[((unsigned long)v) % WQ_HASH_SZ])
-static wait_queue_head_t ioend_wq[WQ_HASH_SZ];
-
 int __init ext4_init_pageio(void)
 {
-	int i;
-
 	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
 	if (io_page_cachep == NULL)
 		return -ENOMEM;
 	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
-	if (io_page_cachep == NULL) {
+	if (io_end_cachep == NULL) {
 		kmem_cache_destroy(io_page_cachep);
 		return -ENOMEM;
 	}
-	for (i = 0; i < WQ_HASH_SZ; i++)
-		init_waitqueue_head(&ioend_wq[i]);
-
 	return 0;
 }
 
@@ -62,7 +53,7 @@
 
 void ext4_ioend_wait(struct inode *inode)
 {
-	wait_queue_head_t *wq = to_ioend_wq(inode);
+	wait_queue_head_t *wq = ext4_ioend_wq(inode);
 
 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
 }
@@ -87,7 +78,7 @@
 	for (i = 0; i < io->num_io_pages; i++)
 		put_io_page(io->pages[i]);
 	io->num_io_pages = 0;
-	wq = to_ioend_wq(io->inode);
+	wq = ext4_ioend_wq(io->inode);
 	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
 	    waitqueue_active(wq))
 		wake_up_all(wq);
@@ -102,6 +93,7 @@
 	struct inode *inode = io->inode;
 	loff_t offset = io->offset;
 	ssize_t size = io->size;
+	wait_queue_head_t *wq;
 	int ret = 0;
 
 	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
@@ -126,7 +118,16 @@
 	if (io->iocb)
 		aio_complete(io->iocb, io->result, 0);
 	/* clear the DIO AIO unwritten flag */
-	io->flag &= ~EXT4_IO_END_UNWRITTEN;
+	if (io->flag & EXT4_IO_END_UNWRITTEN) {
+		io->flag &= ~EXT4_IO_END_UNWRITTEN;
+		/* Wake up anyone waiting on unwritten extent conversion */
+		wq = ext4_ioend_wq(io->inode);
+		if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) &&
+		    waitqueue_active(wq)) {
+			wake_up_all(wq);
+		}
+	}
+
 	return ret;
 }
 
@@ -158,11 +159,8 @@
 
 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
 {
-	ext4_io_end_t *io = NULL;
-
-	io = kmem_cache_alloc(io_end_cachep, flags);
+	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
 	if (io) {
-		memset(io, 0, sizeof(*io));
 		atomic_inc(&EXT4_I(inode)->i_ioend_count);
 		io->inode = inode;
 		INIT_WORK(&io->work, ext4_end_io_work);
@@ -193,6 +191,7 @@
 	struct inode *inode;
 	unsigned long flags;
 	int i;
+	sector_t bi_sector = bio->bi_sector;
 
 	BUG_ON(!io_end);
 	bio->bi_private = NULL;
@@ -210,9 +209,7 @@
 		if (error)
 			SetPageError(page);
 		BUG_ON(!head);
-		if (head->b_size == PAGE_CACHE_SIZE)
-			clear_buffer_dirty(head);
-		else {
+		if (head->b_size != PAGE_CACHE_SIZE) {
 			loff_t offset;
 			loff_t io_end_offset = io_end->offset + io_end->size;
 
@@ -224,7 +221,6 @@
 					if (error)
 						buffer_io_error(bh);
 
-					clear_buffer_dirty(bh);
 				}
 				if (buffer_delay(bh))
 					partial_write = 1;
@@ -260,7 +256,7 @@
 			     (unsigned long long) io_end->offset,
 			     (long) io_end->size,
 			     (unsigned long long)
-			     bio->bi_sector >> (inode->i_blkbits - 9));
+			     bi_sector >> (inode->i_blkbits - 9));
 	}
 
 	/* Add the io_end to per-inode completed io list*/
@@ -383,6 +379,7 @@
 
 	blocksize = 1 << inode->i_blkbits;
 
+	BUG_ON(!PageLocked(page));
 	BUG_ON(PageWriteback(page));
 	set_page_writeback(page);
 	ClearPageError(page);
@@ -400,12 +397,14 @@
 	for (bh = head = page_buffers(page), block_start = 0;
 	     bh != head || !block_start;
 	     block_start = block_end, bh = bh->b_this_page) {
+
 		block_end = block_start + blocksize;
 		if (block_start >= len) {
 			clear_buffer_dirty(bh);
 			set_buffer_uptodate(bh);
 			continue;
 		}
+		clear_buffer_dirty(bh);
 		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
 		if (ret) {
 			/*
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 981c847..3ecc6e4 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -220,7 +220,11 @@
 		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
 		set_buffer_uptodate(gdb);
 		unlock_buffer(gdb);
-		ext4_handle_dirty_metadata(handle, NULL, gdb);
+		err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+		if (unlikely(err)) {
+			brelse(gdb);
+			goto exit_bh;
+		}
 		ext4_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -258,7 +262,11 @@
 
 	ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
 			     bh->b_data);
-	ext4_handle_dirty_metadata(handle, NULL, bh);
+	err = ext4_handle_dirty_metadata(handle, NULL, bh);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_bh;
+	}
 	brelse(bh);
 	/* Mark unused entries in inode bitmap used */
 	ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
@@ -270,7 +278,9 @@
 
 	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
 			     bh->b_data);
-	ext4_handle_dirty_metadata(handle, NULL, bh);
+	err = ext4_handle_dirty_metadata(handle, NULL, bh);
+	if (unlikely(err))
+		ext4_std_error(sb, err);
 exit_bh:
 	brelse(bh);
 
@@ -422,17 +432,21 @@
 		goto exit_dind;
 	}
 
-	if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh)))
+	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+	if (unlikely(err))
 		goto exit_dind;
 
-	if ((err = ext4_journal_get_write_access(handle, *primary)))
+	err = ext4_journal_get_write_access(handle, *primary);
+	if (unlikely(err))
 		goto exit_sbh;
 
-	if ((err = ext4_journal_get_write_access(handle, dind)))
-		goto exit_primary;
+	err = ext4_journal_get_write_access(handle, dind);
+	if (unlikely(err))
+		ext4_std_error(sb, err);
 
 	/* ext4_reserve_inode_write() gets a reference on the iloc */
-	if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
+	err = ext4_reserve_inode_write(handle, inode, &iloc);
+	if (unlikely(err))
 		goto exit_dindj;
 
 	n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
@@ -454,12 +468,20 @@
 	 * reserved inode, and will become GDT blocks (primary and backup).
 	 */
 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
-	ext4_handle_dirty_metadata(handle, NULL, dind);
-	brelse(dind);
+	err = ext4_handle_dirty_metadata(handle, NULL, dind);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_inode;
+	}
 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
 	ext4_mark_iloc_dirty(handle, inode, &iloc);
 	memset((*primary)->b_data, 0, sb->s_blocksize);
-	ext4_handle_dirty_metadata(handle, NULL, *primary);
+	err = ext4_handle_dirty_metadata(handle, NULL, *primary);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_inode;
+	}
+	brelse(dind);
 
 	o_group_desc = EXT4_SB(sb)->s_group_desc;
 	memcpy(n_group_desc, o_group_desc,
@@ -470,19 +492,19 @@
 	kfree(o_group_desc);
 
 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+	if (err)
+		ext4_std_error(sb, err);
 
-	return 0;
+	return err;
 
 exit_inode:
 	/* ext4_journal_release_buffer(handle, iloc.bh); */
 	brelse(iloc.bh);
 exit_dindj:
 	/* ext4_journal_release_buffer(handle, dind); */
-exit_primary:
-	/* ext4_journal_release_buffer(handle, *primary); */
 exit_sbh:
-	/* ext4_journal_release_buffer(handle, *primary); */
+	/* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
 exit_dind:
 	brelse(dind);
 exit_bh:
@@ -665,7 +687,9 @@
 			memset(bh->b_data + size, 0, rest);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
-		ext4_handle_dirty_metadata(handle, NULL, bh);
+		err = ext4_handle_dirty_metadata(handle, NULL, bh);
+		if (unlikely(err))
+			ext4_std_error(sb, err);
 		brelse(bh);
 	}
 	if ((err2 = ext4_journal_stop(handle)) && !err)
@@ -883,7 +907,11 @@
 	/* Update the global fs size fields */
 	sbi->s_groups_count++;
 
-	ext4_handle_dirty_metadata(handle, NULL, primary);
+	err = ext4_handle_dirty_metadata(handle, NULL, primary);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_journal;
+	}
 
 	/* Update the reserved block counts only once the new group is
 	 * active. */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cd37f9d..f6a318f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -77,6 +77,7 @@
 		       const char *dev_name, void *data);
 static void ext4_destroy_lazyinit_thread(void);
 static void ext4_unregister_li_request(struct super_block *sb);
+static void ext4_clear_request_list(void);
 
 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static struct file_system_type ext3_fs_type = {
@@ -388,13 +389,14 @@
 void __ext4_error(struct super_block *sb, const char *function,
 		  unsigned int line, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ",
-	       sb->s_id, function, line, current->comm);
-	vprintk(fmt, args);
-	printk("\n");
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+	       sb->s_id, function, line, current->comm, &vaf);
 	va_end(args);
 
 	ext4_handle_error(sb);
@@ -405,28 +407,31 @@
 		      const char *fmt, ...)
 {
 	va_list args;
+	struct va_format vaf;
 	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
 
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
 	es->s_last_error_block = cpu_to_le64(block);
 	save_error_info(inode->i_sb, function, line);
 	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
 	       inode->i_sb->s_id, function, line, inode->i_ino);
 	if (block)
-		printk("block %llu: ", block);
-	printk("comm %s: ", current->comm);
-	vprintk(fmt, args);
-	printk("\n");
+		printk(KERN_CONT "block %llu: ", block);
+	printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf);
 	va_end(args);
 
 	ext4_handle_error(inode->i_sb);
 }
 
 void ext4_error_file(struct file *file, const char *function,
-		     unsigned int line, const char *fmt, ...)
+		     unsigned int line, ext4_fsblk_t block,
+		     const char *fmt, ...)
 {
 	va_list args;
+	struct va_format vaf;
 	struct ext4_super_block *es;
 	struct inode *inode = file->f_dentry->d_inode;
 	char pathname[80], *path;
@@ -434,17 +439,18 @@
 	es = EXT4_SB(inode->i_sb)->s_es;
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
 	save_error_info(inode->i_sb, function, line);
-	va_start(args, fmt);
 	path = d_path(&(file->f_path), pathname, sizeof(pathname));
-	if (!path)
+	if (IS_ERR(path))
 		path = "(unknown)";
 	printk(KERN_CRIT
-	       "EXT4-fs error (device %s): %s:%d: inode #%lu "
-	       "(comm %s path %s): ",
-	       inode->i_sb->s_id, function, line, inode->i_ino,
-	       current->comm, path);
-	vprintk(fmt, args);
-	printk("\n");
+	       "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
+	       inode->i_sb->s_id, function, line, inode->i_ino);
+	if (block)
+		printk(KERN_CONT "block %llu: ", block);
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf);
 	va_end(args);
 
 	ext4_handle_error(inode->i_sb);
@@ -543,28 +549,29 @@
 		panic("EXT4-fs panic from previous error\n");
 }
 
-void ext4_msg (struct super_block * sb, const char *prefix,
-		   const char *fmt, ...)
+void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk("%sEXT4-fs (%s): ", prefix, sb->s_id);
-	vprintk(fmt, args);
-	printk("\n");
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
 	va_end(args);
 }
 
 void __ext4_warning(struct super_block *sb, const char *function,
 		    unsigned int line, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ",
-	       sb->s_id, function, line);
-	vprintk(fmt, args);
-	printk("\n");
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
+	       sb->s_id, function, line, &vaf);
 	va_end(args);
 }
 
@@ -575,21 +582,25 @@
 __releases(bitlock)
 __acquires(bitlock)
 {
+	struct va_format vaf;
 	va_list args;
 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 
 	es->s_last_error_ino = cpu_to_le32(ino);
 	es->s_last_error_block = cpu_to_le64(block);
 	__save_error_info(sb, function, line);
+
 	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
 	       sb->s_id, function, line, grp);
 	if (ino)
-		printk("inode %lu: ", ino);
+		printk(KERN_CONT "inode %lu: ", ino);
 	if (block)
-		printk("block %llu:", (unsigned long long) block);
-	vprintk(fmt, args);
-	printk("\n");
+		printk(KERN_CONT "block %llu:", (unsigned long long) block);
+	printk(KERN_CONT "%pV\n", &vaf);
 	va_end(args);
 
 	if (test_opt(sb, ERRORS_CONT)) {
@@ -647,7 +658,7 @@
 	struct block_device *bdev;
 	char b[BDEVNAME_SIZE];
 
-	bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
 	if (IS_ERR(bdev))
 		goto fail;
 	return bdev;
@@ -663,8 +674,7 @@
  */
 static int ext4_blkdev_put(struct block_device *bdev)
 {
-	bd_release(bdev);
-	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
@@ -808,27 +818,22 @@
 	memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
 	INIT_LIST_HEAD(&ei->i_prealloc_list);
 	spin_lock_init(&ei->i_prealloc_lock);
-	/*
-	 * Note:  We can be called before EXT4_SB(sb)->s_journal is set,
-	 * therefore it can be null here.  Don't check it, just initialize
-	 * jinode.
-	 */
-	jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
 	ei->i_reserved_data_blocks = 0;
 	ei->i_reserved_meta_blocks = 0;
 	ei->i_allocated_meta_blocks = 0;
 	ei->i_da_metadata_calc_len = 0;
-	ei->i_delalloc_reserved_flag = 0;
 	spin_lock_init(&(ei->i_block_reservation_lock));
 #ifdef CONFIG_QUOTA
 	ei->i_reserved_quota = 0;
 #endif
+	ei->jinode = NULL;
 	INIT_LIST_HEAD(&ei->i_completed_io_list);
 	spin_lock_init(&ei->i_completed_io_lock);
 	ei->cur_aio_dio = NULL;
 	ei->i_sync_tid = 0;
 	ei->i_datasync_tid = 0;
 	atomic_set(&ei->i_ioend_count, 0);
+	atomic_set(&ei->i_aiodio_unwritten, 0);
 
 	return &ei->vfs_inode;
 }
@@ -898,9 +903,12 @@
 	end_writeback(inode);
 	dquot_drop(inode);
 	ext4_discard_preallocations(inode);
-	if (EXT4_JOURNAL(inode))
-		jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
-				       &EXT4_I(inode)->jinode);
+	if (EXT4_I(inode)->jinode) {
+		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
+					       EXT4_I(inode)->jinode);
+		jbd2_free_inode(EXT4_I(inode)->jinode);
+		EXT4_I(inode)->jinode = NULL;
+	}
 }
 
 static inline void ext4_show_quota_options(struct seq_file *seq,
@@ -1155,7 +1163,7 @@
 static int ext4_mark_dquot_dirty(struct dquot *dquot);
 static int ext4_write_info(struct super_block *sb, int type);
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
-				char *path);
+			 struct path *path);
 static int ext4_quota_off(struct super_block *sb, int type);
 static int ext4_quota_on_mount(struct super_block *sb, int type);
 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
@@ -1393,7 +1401,7 @@
 		sbi->s_qf_names[qtype] = NULL;
 		return 0;
 	}
-	set_opt(sbi->s_mount_opt, QUOTA);
+	set_opt(sb, QUOTA);
 	return 1;
 }
 
@@ -1448,21 +1456,21 @@
 		switch (token) {
 		case Opt_bsd_df:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			clear_opt(sbi->s_mount_opt, MINIX_DF);
+			clear_opt(sb, MINIX_DF);
 			break;
 		case Opt_minix_df:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			set_opt(sbi->s_mount_opt, MINIX_DF);
+			set_opt(sb, MINIX_DF);
 
 			break;
 		case Opt_grpid:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			set_opt(sbi->s_mount_opt, GRPID);
+			set_opt(sb, GRPID);
 
 			break;
 		case Opt_nogrpid:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			clear_opt(sbi->s_mount_opt, GRPID);
+			clear_opt(sb, GRPID);
 
 			break;
 		case Opt_resuid:
@@ -1480,38 +1488,38 @@
 			/* *sb_block = match_int(&args[0]); */
 			break;
 		case Opt_err_panic:
-			clear_opt(sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt(sbi->s_mount_opt, ERRORS_RO);
-			set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+			clear_opt(sb, ERRORS_CONT);
+			clear_opt(sb, ERRORS_RO);
+			set_opt(sb, ERRORS_PANIC);
 			break;
 		case Opt_err_ro:
-			clear_opt(sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt(sbi->s_mount_opt, ERRORS_RO);
+			clear_opt(sb, ERRORS_CONT);
+			clear_opt(sb, ERRORS_PANIC);
+			set_opt(sb, ERRORS_RO);
 			break;
 		case Opt_err_cont:
-			clear_opt(sbi->s_mount_opt, ERRORS_RO);
-			clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt(sbi->s_mount_opt, ERRORS_CONT);
+			clear_opt(sb, ERRORS_RO);
+			clear_opt(sb, ERRORS_PANIC);
+			set_opt(sb, ERRORS_CONT);
 			break;
 		case Opt_nouid32:
-			set_opt(sbi->s_mount_opt, NO_UID32);
+			set_opt(sb, NO_UID32);
 			break;
 		case Opt_debug:
-			set_opt(sbi->s_mount_opt, DEBUG);
+			set_opt(sb, DEBUG);
 			break;
 		case Opt_oldalloc:
-			set_opt(sbi->s_mount_opt, OLDALLOC);
+			set_opt(sb, OLDALLOC);
 			break;
 		case Opt_orlov:
-			clear_opt(sbi->s_mount_opt, OLDALLOC);
+			clear_opt(sb, OLDALLOC);
 			break;
 #ifdef CONFIG_EXT4_FS_XATTR
 		case Opt_user_xattr:
-			set_opt(sbi->s_mount_opt, XATTR_USER);
+			set_opt(sb, XATTR_USER);
 			break;
 		case Opt_nouser_xattr:
-			clear_opt(sbi->s_mount_opt, XATTR_USER);
+			clear_opt(sb, XATTR_USER);
 			break;
 #else
 		case Opt_user_xattr:
@@ -1521,10 +1529,10 @@
 #endif
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
 		case Opt_acl:
-			set_opt(sbi->s_mount_opt, POSIX_ACL);
+			set_opt(sb, POSIX_ACL);
 			break;
 		case Opt_noacl:
-			clear_opt(sbi->s_mount_opt, POSIX_ACL);
+			clear_opt(sb, POSIX_ACL);
 			break;
 #else
 		case Opt_acl:
@@ -1543,7 +1551,7 @@
 					 "Cannot specify journal on remount");
 				return 0;
 			}
-			set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
+			set_opt(sb, UPDATE_JOURNAL);
 			break;
 		case Opt_journal_dev:
 			if (is_remount) {
@@ -1556,14 +1564,14 @@
 			*journal_devnum = option;
 			break;
 		case Opt_journal_checksum:
-			set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
+			set_opt(sb, JOURNAL_CHECKSUM);
 			break;
 		case Opt_journal_async_commit:
-			set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
-			set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
+			set_opt(sb, JOURNAL_ASYNC_COMMIT);
+			set_opt(sb, JOURNAL_CHECKSUM);
 			break;
 		case Opt_noload:
-			set_opt(sbi->s_mount_opt, NOLOAD);
+			set_opt(sb, NOLOAD);
 			break;
 		case Opt_commit:
 			if (match_int(&args[0], &option))
@@ -1606,15 +1614,15 @@
 					return 0;
 				}
 			} else {
-				clear_opt(sbi->s_mount_opt, DATA_FLAGS);
+				clear_opt(sb, DATA_FLAGS);
 				sbi->s_mount_opt |= data_opt;
 			}
 			break;
 		case Opt_data_err_abort:
-			set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+			set_opt(sb, DATA_ERR_ABORT);
 			break;
 		case Opt_data_err_ignore:
-			clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+			clear_opt(sb, DATA_ERR_ABORT);
 			break;
 #ifdef CONFIG_QUOTA
 		case Opt_usrjquota:
@@ -1654,12 +1662,12 @@
 			break;
 		case Opt_quota:
 		case Opt_usrquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, USRQUOTA);
+			set_opt(sb, QUOTA);
+			set_opt(sb, USRQUOTA);
 			break;
 		case Opt_grpquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, GRPQUOTA);
+			set_opt(sb, QUOTA);
+			set_opt(sb, GRPQUOTA);
 			break;
 		case Opt_noquota:
 			if (sb_any_quota_loaded(sb)) {
@@ -1667,9 +1675,9 @@
 					"options when quota turned on");
 				return 0;
 			}
-			clear_opt(sbi->s_mount_opt, QUOTA);
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
+			clear_opt(sb, QUOTA);
+			clear_opt(sb, USRQUOTA);
+			clear_opt(sb, GRPQUOTA);
 			break;
 #else
 		case Opt_quota:
@@ -1695,7 +1703,7 @@
 			sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
 			break;
 		case Opt_nobarrier:
-			clear_opt(sbi->s_mount_opt, BARRIER);
+			clear_opt(sb, BARRIER);
 			break;
 		case Opt_barrier:
 			if (args[0].from) {
@@ -1704,9 +1712,9 @@
 			} else
 				option = 1;	/* No argument, default to 1 */
 			if (option)
-				set_opt(sbi->s_mount_opt, BARRIER);
+				set_opt(sb, BARRIER);
 			else
-				clear_opt(sbi->s_mount_opt, BARRIER);
+				clear_opt(sb, BARRIER);
 			break;
 		case Opt_ignore:
 			break;
@@ -1730,17 +1738,17 @@
 				 "Ignoring deprecated bh option");
 			break;
 		case Opt_i_version:
-			set_opt(sbi->s_mount_opt, I_VERSION);
+			set_opt(sb, I_VERSION);
 			sb->s_flags |= MS_I_VERSION;
 			break;
 		case Opt_nodelalloc:
-			clear_opt(sbi->s_mount_opt, DELALLOC);
+			clear_opt(sb, DELALLOC);
 			break;
 		case Opt_mblk_io_submit:
-			set_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
+			set_opt(sb, MBLK_IO_SUBMIT);
 			break;
 		case Opt_nomblk_io_submit:
-			clear_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
+			clear_opt(sb, MBLK_IO_SUBMIT);
 			break;
 		case Opt_stripe:
 			if (match_int(&args[0], &option))
@@ -1750,13 +1758,13 @@
 			sbi->s_stripe = option;
 			break;
 		case Opt_delalloc:
-			set_opt(sbi->s_mount_opt, DELALLOC);
+			set_opt(sb, DELALLOC);
 			break;
 		case Opt_block_validity:
-			set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+			set_opt(sb, BLOCK_VALIDITY);
 			break;
 		case Opt_noblock_validity:
-			clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+			clear_opt(sb, BLOCK_VALIDITY);
 			break;
 		case Opt_inode_readahead_blks:
 			if (match_int(&args[0], &option))
@@ -1780,7 +1788,7 @@
 							    option);
 			break;
 		case Opt_noauto_da_alloc:
-			set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
+			set_opt(sb, NO_AUTO_DA_ALLOC);
 			break;
 		case Opt_auto_da_alloc:
 			if (args[0].from) {
@@ -1789,24 +1797,24 @@
 			} else
 				option = 1;	/* No argument, default to 1 */
 			if (option)
-				clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
+				clear_opt(sb, NO_AUTO_DA_ALLOC);
 			else
-				set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
+				set_opt(sb,NO_AUTO_DA_ALLOC);
 			break;
 		case Opt_discard:
-			set_opt(sbi->s_mount_opt, DISCARD);
+			set_opt(sb, DISCARD);
 			break;
 		case Opt_nodiscard:
-			clear_opt(sbi->s_mount_opt, DISCARD);
+			clear_opt(sb, DISCARD);
 			break;
 		case Opt_dioread_nolock:
-			set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			set_opt(sb, DIOREAD_NOLOCK);
 			break;
 		case Opt_dioread_lock:
-			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			clear_opt(sb, DIOREAD_NOLOCK);
 			break;
 		case Opt_init_inode_table:
-			set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+			set_opt(sb, INIT_INODE_TABLE);
 			if (args[0].from) {
 				if (match_int(&args[0], &option))
 					return 0;
@@ -1817,7 +1825,7 @@
 			sbi->s_li_wait_mult = option;
 			break;
 		case Opt_noinit_inode_table:
-			clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+			clear_opt(sb, INIT_INODE_TABLE);
 			break;
 		default:
 			ext4_msg(sb, KERN_ERR,
@@ -1829,10 +1837,10 @@
 #ifdef CONFIG_QUOTA
 	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
 		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
+			clear_opt(sb, USRQUOTA);
 
 		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
+			clear_opt(sb, GRPQUOTA);
 
 		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
 			ext4_msg(sb, KERN_ERR, "old and new quota "
@@ -1902,12 +1910,12 @@
 	ext4_commit_super(sb, 1);
 	if (test_opt(sb, DEBUG))
 		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
-				"bpg=%lu, ipg=%lu, mo=%04x]\n",
+				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
 			sb->s_blocksize,
 			sbi->s_groups_count,
 			EXT4_BLOCKS_PER_GROUP(sb),
 			EXT4_INODES_PER_GROUP(sb),
-			sbi->s_mount_opt);
+			sbi->s_mount_opt, sbi->s_mount_opt2);
 
 	return res;
 }
@@ -1937,14 +1945,13 @@
 	size = flex_group_count * sizeof(struct flex_groups);
 	sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
 	if (sbi->s_flex_groups == NULL) {
-		sbi->s_flex_groups = vmalloc(size);
-		if (sbi->s_flex_groups)
-			memset(sbi->s_flex_groups, 0, size);
-	}
-	if (sbi->s_flex_groups == NULL) {
-		ext4_msg(sb, KERN_ERR, "not enough memory for "
-				"%u flex groups", flex_group_count);
-		goto failed;
+		sbi->s_flex_groups = vzalloc(size);
+		if (sbi->s_flex_groups == NULL) {
+			ext4_msg(sb, KERN_ERR,
+				 "not enough memory for %u flex groups",
+				 flex_group_count);
+			goto failed;
+		}
 	}
 
 	for (i = 0; i < sbi->s_groups_count; i++) {
@@ -2711,6 +2718,8 @@
 	mutex_unlock(&ext4_li_info->li_list_mtx);
 }
 
+static struct task_struct *ext4_lazyinit_task;
+
 /*
  * This is the function where ext4lazyinit thread lives. It walks
  * through the request list searching for next scheduled filesystem.
@@ -2779,6 +2788,10 @@
 		if (time_before(jiffies, next_wakeup))
 			schedule();
 		finish_wait(&eli->li_wait_daemon, &wait);
+		if (kthread_should_stop()) {
+			ext4_clear_request_list();
+			goto exit_thread;
+		}
 	}
 
 exit_thread:
@@ -2803,6 +2816,7 @@
 	wake_up(&eli->li_wait_task);
 
 	kfree(ext4_li_info);
+	ext4_lazyinit_task = NULL;
 	ext4_li_info = NULL;
 	mutex_unlock(&ext4_li_mtx);
 
@@ -2825,11 +2839,10 @@
 
 static int ext4_run_lazyinit_thread(void)
 {
-	struct task_struct *t;
-
-	t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit");
-	if (IS_ERR(t)) {
-		int err = PTR_ERR(t);
+	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
+					 ext4_li_info, "ext4lazyinit");
+	if (IS_ERR(ext4_lazyinit_task)) {
+		int err = PTR_ERR(ext4_lazyinit_task);
 		ext4_clear_request_list();
 		del_timer_sync(&ext4_li_info->li_timer);
 		kfree(ext4_li_info);
@@ -2923,7 +2936,7 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_li_request *elr;
 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
-	int ret;
+	int ret = 0;
 
 	if (sbi->s_li_request != NULL)
 		return 0;
@@ -2980,16 +2993,10 @@
 	 * If thread exited earlier
 	 * there's nothing to be done.
 	 */
-	if (!ext4_li_info)
+	if (!ext4_li_info || !ext4_lazyinit_task)
 		return;
 
-	ext4_clear_request_list();
-
-	while (ext4_li_info->li_task) {
-		wake_up(&ext4_li_info->li_wait_daemon);
-		wait_event(ext4_li_info->li_wait_task,
-			   ext4_li_info->li_task == NULL);
-	}
+	kthread_stop(ext4_lazyinit_task);
 }
 
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
@@ -3078,41 +3085,41 @@
 
 	/* Set defaults before we parse the mount options */
 	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
-	set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+	set_opt(sb, INIT_INODE_TABLE);
 	if (def_mount_opts & EXT4_DEFM_DEBUG)
-		set_opt(sbi->s_mount_opt, DEBUG);
+		set_opt(sb, DEBUG);
 	if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
 		ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
 			"2.6.38");
-		set_opt(sbi->s_mount_opt, GRPID);
+		set_opt(sb, GRPID);
 	}
 	if (def_mount_opts & EXT4_DEFM_UID16)
-		set_opt(sbi->s_mount_opt, NO_UID32);
+		set_opt(sb, NO_UID32);
 #ifdef CONFIG_EXT4_FS_XATTR
 	if (def_mount_opts & EXT4_DEFM_XATTR_USER)
-		set_opt(sbi->s_mount_opt, XATTR_USER);
+		set_opt(sb, XATTR_USER);
 #endif
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
 	if (def_mount_opts & EXT4_DEFM_ACL)
-		set_opt(sbi->s_mount_opt, POSIX_ACL);
+		set_opt(sb, POSIX_ACL);
 #endif
 	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
-		set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+		set_opt(sb, JOURNAL_DATA);
 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
-		set_opt(sbi->s_mount_opt, ORDERED_DATA);
+		set_opt(sb, ORDERED_DATA);
 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
-		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
+		set_opt(sb, WRITEBACK_DATA);
 
 	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
-		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+		set_opt(sb, ERRORS_PANIC);
 	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
-		set_opt(sbi->s_mount_opt, ERRORS_CONT);
+		set_opt(sb, ERRORS_CONT);
 	else
-		set_opt(sbi->s_mount_opt, ERRORS_RO);
+		set_opt(sb, ERRORS_RO);
 	if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
-		set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+		set_opt(sb, BLOCK_VALIDITY);
 	if (def_mount_opts & EXT4_DEFM_DISCARD)
-		set_opt(sbi->s_mount_opt, DISCARD);
+		set_opt(sb, DISCARD);
 
 	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
 	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -3121,7 +3128,7 @@
 	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
 
 	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
-		set_opt(sbi->s_mount_opt, BARRIER);
+		set_opt(sb, BARRIER);
 
 	/*
 	 * enable delayed allocation by default
@@ -3129,7 +3136,7 @@
 	 */
 	if (!IS_EXT3_SB(sb) &&
 	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
-		set_opt(sbi->s_mount_opt, DELALLOC);
+		set_opt(sb, DELALLOC);
 
 	if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
 			   &journal_devnum, &journal_ioprio, NULL, 0)) {
@@ -3432,8 +3439,8 @@
 		       "suppressed and not mounted read-only");
 		goto failed_mount_wq;
 	} else {
-		clear_opt(sbi->s_mount_opt, DATA_FLAGS);
-		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
+		clear_opt(sb, DATA_FLAGS);
+		set_opt(sb, WRITEBACK_DATA);
 		sbi->s_journal = NULL;
 		needs_recovery = 0;
 		goto no_journal;
@@ -3471,9 +3478,9 @@
 		 */
 		if (jbd2_journal_check_available_features
 		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
-			set_opt(sbi->s_mount_opt, ORDERED_DATA);
+			set_opt(sb, ORDERED_DATA);
 		else
-			set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+			set_opt(sb, JOURNAL_DATA);
 		break;
 
 	case EXT4_MOUNT_ORDERED_DATA:
@@ -3563,18 +3570,18 @@
 	    (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
 		ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
 			 "requested data journaling mode");
-		clear_opt(sbi->s_mount_opt, DELALLOC);
+		clear_opt(sb, DELALLOC);
 	}
 	if (test_opt(sb, DIOREAD_NOLOCK)) {
 		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
 			ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
 				"option - requested data journaling mode");
-			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			clear_opt(sb, DIOREAD_NOLOCK);
 		}
 		if (sb->s_blocksize < PAGE_SIZE) {
 			ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
 				"option - block size is too small");
-			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			clear_opt(sb, DIOREAD_NOLOCK);
 		}
 	}
 
@@ -3772,13 +3779,6 @@
 	if (bdev == NULL)
 		return NULL;
 
-	if (bd_claim(bdev, sb)) {
-		ext4_msg(sb, KERN_ERR,
-			"failed to claim external journal device");
-		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
-		return NULL;
-	}
-
 	blocksize = sb->s_blocksize;
 	hblock = bdev_logical_block_size(bdev);
 	if (blocksize < hblock) {
@@ -4173,6 +4173,22 @@
 	return 0;
 }
 
+/*
+ * Structure to save mount options for ext4_remount's benefit
+ */
+struct ext4_mount_options {
+	unsigned long s_mount_opt;
+	unsigned long s_mount_opt2;
+	uid_t s_resuid;
+	gid_t s_resgid;
+	unsigned long s_commit_interval;
+	u32 s_min_batch_time, s_max_batch_time;
+#ifdef CONFIG_QUOTA
+	int s_jquota_fmt;
+	char *s_qf_names[MAXQUOTAS];
+#endif
+};
+
 static int ext4_remount(struct super_block *sb, int *flags, char *data)
 {
 	struct ext4_super_block *es;
@@ -4193,6 +4209,7 @@
 	lock_super(sb);
 	old_sb_flags = sb->s_flags;
 	old_opts.s_mount_opt = sbi->s_mount_opt;
+	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
 	old_opts.s_resuid = sbi->s_resuid;
 	old_opts.s_resgid = sbi->s_resgid;
 	old_opts.s_commit_interval = sbi->s_commit_interval;
@@ -4346,6 +4363,7 @@
 restore_opts:
 	sb->s_flags = old_sb_flags;
 	sbi->s_mount_opt = old_opts.s_mount_opt;
+	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
 	sbi->s_resuid = old_opts.s_resuid;
 	sbi->s_resgid = old_opts.s_resgid;
 	sbi->s_commit_interval = old_opts.s_commit_interval;
@@ -4542,27 +4560,20 @@
  * Standard function to be called on quota_on
  */
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
-			 char *name)
+			 struct path *path)
 {
 	int err;
-	struct path path;
 
 	if (!test_opt(sb, QUOTA))
 		return -EINVAL;
 
-	err = kern_path(name, LOOKUP_FOLLOW, &path);
-	if (err)
-		return err;
-
 	/* Quotafile not on the same filesystem? */
-	if (path.mnt->mnt_sb != sb) {
-		path_put(&path);
+	if (path->mnt->mnt_sb != sb)
 		return -EXDEV;
-	}
 	/* Journaling quota? */
 	if (EXT4_SB(sb)->s_qf_names[type]) {
 		/* Quotafile not in fs root? */
-		if (path.dentry->d_parent != sb->s_root)
+		if (path->dentry->d_parent != sb->s_root)
 			ext4_msg(sb, KERN_WARNING,
 				"Quota file not on filesystem root. "
 				"Journaled quota will not work");
@@ -4573,7 +4584,7 @@
 	 * all updates to the file when we bypass pagecache...
 	 */
 	if (EXT4_SB(sb)->s_journal &&
-	    ext4_should_journal_data(path.dentry->d_inode)) {
+	    ext4_should_journal_data(path->dentry->d_inode)) {
 		/*
 		 * We don't need to lock updates but journal_flush() could
 		 * otherwise be livelocked...
@@ -4581,15 +4592,11 @@
 		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
 		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
 		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
-		if (err) {
-			path_put(&path);
+		if (err)
 			return err;
-		}
 	}
 
-	err = dquot_quota_on_path(sb, type, format_id, &path);
-	path_put(&path);
-	return err;
+	return dquot_quota_on(sb, type, format_id, path);
 }
 
 static int ext4_quota_off(struct super_block *sb, int type)
@@ -4763,7 +4770,7 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-int __init ext4_init_feat_adverts(void)
+static int __init ext4_init_feat_adverts(void)
 {
 	struct ext4_features *ef;
 	int ret = -ENOMEM;
@@ -4787,23 +4794,44 @@
 	return ret;
 }
 
+static void ext4_exit_feat_adverts(void)
+{
+	kobject_put(&ext4_feat->f_kobj);
+	wait_for_completion(&ext4_feat->f_kobj_unregister);
+	kfree(ext4_feat);
+}
+
+/* Shared across all ext4 file systems */
+wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
+struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
+
 static int __init ext4_init_fs(void)
 {
-	int err;
+	int i, err;
 
 	ext4_check_flag_values();
+
+	for (i = 0; i < EXT4_WQ_HASH_SZ; i++) {
+		mutex_init(&ext4__aio_mutex[i]);
+		init_waitqueue_head(&ext4__ioend_wq[i]);
+	}
+
 	err = ext4_init_pageio();
 	if (err)
 		return err;
 	err = ext4_init_system_zone();
 	if (err)
-		goto out5;
+		goto out7;
 	ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
 	if (!ext4_kset)
-		goto out4;
+		goto out6;
 	ext4_proc_root = proc_mkdir("fs/ext4", NULL);
+	if (!ext4_proc_root)
+		goto out5;
 
 	err = ext4_init_feat_adverts();
+	if (err)
+		goto out4;
 
 	err = ext4_init_mballoc();
 	if (err)
@@ -4833,12 +4861,14 @@
 out2:
 	ext4_exit_mballoc();
 out3:
-	kfree(ext4_feat);
-	remove_proc_entry("fs/ext4", NULL);
-	kset_unregister(ext4_kset);
+	ext4_exit_feat_adverts();
 out4:
-	ext4_exit_system_zone();
+	remove_proc_entry("fs/ext4", NULL);
 out5:
+	kset_unregister(ext4_kset);
+out6:
+	ext4_exit_system_zone();
+out7:
 	ext4_exit_pageio();
 	return err;
 }
@@ -4852,6 +4882,7 @@
 	destroy_inodecache();
 	ext4_exit_xattr();
 	ext4_exit_mballoc();
+	ext4_exit_feat_adverts();
 	remove_proc_entry("fs/ext4", NULL);
 	kset_unregister(ext4_kset);
 	ext4_exit_system_zone();
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fa4b899..fc32176 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -427,23 +427,23 @@
 static int
 ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
 {
-	int i_error, b_error;
+	int ret, ret2;
 
 	down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
-	i_error = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
-	if (i_error < 0) {
-		b_error = 0;
-	} else {
-		if (buffer) {
-			buffer += i_error;
-			buffer_size -= i_error;
-		}
-		b_error = ext4_xattr_block_list(dentry, buffer, buffer_size);
-		if (b_error < 0)
-			i_error = 0;
+	ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
+	if (ret < 0)
+		goto errout;
+	if (buffer) {
+		buffer += ret;
+		buffer_size -= ret;
 	}
+	ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
+	if (ret < 0)
+		goto errout;
+	ret += ret2;
+errout:
 	up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
-	return i_error + b_error;
+	return ret;
 }
 
 /*
@@ -947,7 +947,7 @@
 /*
  * ext4_xattr_set_handle()
  *
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode.  Value
  * is NULL to remove an existing extended attribute, and non-NULL to
  * either replace an existing extended attribute, or create a new extended
  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index d75a77f..f504089 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -319,7 +319,8 @@
 			struct msdos_dir_entry *de, loff_t i_pos);
 extern int fat_sync_inode(struct inode *inode);
 extern int fat_fill_super(struct super_block *sb, void *data, int silent,
-			const struct inode_operations *fs_dir_inode_ops, int isvfat);
+			const struct inode_operations *fs_dir_inode_ops,
+			int isvfat, void (*setup)(struct super_block *));
 
 extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
 		            struct inode *i2);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 206351a..86753fe 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -703,7 +703,6 @@
 		struct fid *fid, int fh_len, int fh_type)
 {
 	struct inode *inode = NULL;
-	struct dentry *result;
 	u32 *fh = fid->raw;
 
 	if (fh_len < 5 || fh_type != 3)
@@ -748,10 +747,7 @@
 	 * the fat_iget lookup again.  If that fails, then we are totally out
 	 * of luck.  But all that is for another day
 	 */
-	result = d_obtain_alias(inode);
-	if (!IS_ERR(result))
-		d_set_d_op(result, sb->s_root->d_op);
-	return result;
+	return d_obtain_alias(inode);
 }
 
 static int
@@ -799,8 +795,6 @@
 	brelse(bh);
 
 	parent = d_obtain_alias(inode);
-	if (!IS_ERR(parent))
-		d_set_d_op(parent, sb->s_root->d_op);
 out:
 	unlock_super(sb);
 
@@ -1244,7 +1238,8 @@
  * Read the super block of an MS-DOS FS.
  */
 int fat_fill_super(struct super_block *sb, void *data, int silent,
-		   const struct inode_operations *fs_dir_inode_ops, int isvfat)
+		   const struct inode_operations *fs_dir_inode_ops, int isvfat,
+		   void (*setup)(struct super_block *))
 {
 	struct inode *root_inode = NULL, *fat_inode = NULL;
 	struct buffer_head *bh;
@@ -1280,6 +1275,8 @@
 	if (error)
 		goto out_fail;
 
+	setup(sb); /* flavour-specific stuff that needs options */
+
 	error = -EIO;
 	sb_min_blocksize(sb, 512);
 	bh = sb_bread(sb, 0);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 35ffe43..7114990 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -227,11 +227,7 @@
 	}
 out:
 	unlock_super(sb);
-	d_set_d_op(dentry, &msdos_dentry_operations);
-	dentry = d_splice_alias(inode, dentry);
-	if (dentry)
-		d_set_d_op(dentry, &msdos_dentry_operations);
-	return dentry;
+	return d_splice_alias(inode, dentry);
 
 error:
 	unlock_super(sb);
@@ -661,21 +657,16 @@
 	.getattr	= fat_getattr,
 };
 
+static void setup(struct super_block *sb)
+{
+	sb->s_d_op = &msdos_dentry_operations;
+	sb->s_flags |= MS_NOATIME;
+}
+
 static int msdos_fill_super(struct super_block *sb, void *data, int silent)
 {
-	int res;
-
-	lock_super(sb);
-	res = fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, 0);
-	if (res) {
-		unlock_super(sb);
-		return res;
-	}
-
-	sb->s_flags |= MS_NOATIME;
-	d_set_d_op(sb->s_root, &msdos_dentry_operations);
-	unlock_super(sb);
-	return 0;
+	return fat_fill_super(sb, data, silent, &msdos_dir_inode_operations,
+			     0, setup);
 }
 
 static struct dentry *msdos_mount(struct file_system_type *fs_type,
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index e3ffc5e..adae3fb 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -43,7 +43,7 @@
 
 static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	if (nd->flags & LOOKUP_RCU)
+	if (nd && nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 
 	/* This is not negative dentry. Always valid. */
@@ -54,7 +54,7 @@
 
 static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
 {
-	if (nd->flags & LOOKUP_RCU)
+	if (nd && nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 
 	/*
@@ -772,13 +772,10 @@
 
 out:
 	unlock_super(sb);
-	d_set_d_op(dentry, sb->s_root->d_op);
 	dentry->d_time = dentry->d_parent->d_inode->i_version;
 	dentry = d_splice_alias(inode, dentry);
-	if (dentry) {
-		d_set_d_op(dentry, sb->s_root->d_op);
+	if (dentry)
 		dentry->d_time = dentry->d_parent->d_inode->i_version;
-	}
 	return dentry;
 
 error:
@@ -1066,24 +1063,18 @@
 	.getattr	= fat_getattr,
 };
 
+static void setup(struct super_block *sb)
+{
+	if (MSDOS_SB(sb)->options.name_check != 's')
+		sb->s_d_op = &vfat_ci_dentry_ops;
+	else
+		sb->s_d_op = &vfat_dentry_ops;
+}
+
 static int vfat_fill_super(struct super_block *sb, void *data, int silent)
 {
-	int res;
-
-	lock_super(sb);
-	res = fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, 1);
-	if (res) {
-		unlock_super(sb);
-		return res;
-	}
-
-	if (MSDOS_SB(sb)->options.name_check != 's')
-		d_set_d_op(sb->s_root, &vfat_ci_dentry_ops);
-	else
-		d_set_d_op(sb->s_root, &vfat_dentry_ops);
-
-	unlock_super(sb);
-	return 0;
+	return fat_fill_super(sb, data, silent, &vfat_dir_inode_operations,
+			     1, setup);
 }
 
 static struct dentry *vfat_mount(struct file_system_type *fs_type,
diff --git a/fs/fcntl.c b/fs/fcntl.c
index ecc8b39..cb10261 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -815,7 +815,7 @@
 		__O_SYNC	| O_DSYNC	| FASYNC	|
 		O_DIRECT	| O_LARGEFILE	| O_DIRECTORY	|
 		O_NOFOLLOW	| O_NOATIME	| O_CLOEXEC	|
-		FMODE_EXEC
+		__FMODE_EXEC
 		));
 
 	fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/file_table.c b/fs/file_table.c
index c3dee38..eb36b6b 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -125,13 +125,13 @@
 		goto fail;
 
 	percpu_counter_inc(&nr_files);
+	f->f_cred = get_cred(cred);
 	if (security_file_alloc(f))
 		goto fail_sec;
 
 	INIT_LIST_HEAD(&f->f_u.fu_list);
 	atomic_long_set(&f->f_count, 1);
 	rwlock_init(&f->f_owner.lock);
-	f->f_cred = get_cred(cred);
 	spin_lock_init(&f->f_lock);
 	eventpoll_init_file(f);
 	/* f->f_version: 0 */
@@ -311,7 +311,7 @@
 	struct files_struct *files = current->files;
 
 	*fput_needed = 0;
-	if (likely((atomic_read(&files->count) == 1))) {
+	if (atomic_read(&files->count) == 1) {
 		file = fcheck_files(files, fd);
 	} else {
 		rcu_read_lock();
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 3d06ccc..59c6e49 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -84,13 +84,9 @@
 	return list_entry(head, struct inode, i_wb_list);
 }
 
-static void bdi_queue_work(struct backing_dev_info *bdi,
-		struct wb_writeback_work *work)
+/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
+static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
 {
-	trace_writeback_queue(bdi, work);
-
-	spin_lock_bh(&bdi->wb_lock);
-	list_add_tail(&work->list, &bdi->work_list);
 	if (bdi->wb.task) {
 		wake_up_process(bdi->wb.task);
 	} else {
@@ -98,15 +94,26 @@
 		 * The bdi thread isn't there, wake up the forker thread which
 		 * will create and run it.
 		 */
-		trace_writeback_nothread(bdi, work);
 		wake_up_process(default_backing_dev_info.wb.task);
 	}
+}
+
+static void bdi_queue_work(struct backing_dev_info *bdi,
+			   struct wb_writeback_work *work)
+{
+	trace_writeback_queue(bdi, work);
+
+	spin_lock_bh(&bdi->wb_lock);
+	list_add_tail(&work->list, &bdi->work_list);
+	if (!bdi->wb.task)
+		trace_writeback_nothread(bdi, work);
+	bdi_wakeup_flusher(bdi);
 	spin_unlock_bh(&bdi->wb_lock);
 }
 
 static void
 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
-		bool range_cyclic, bool for_background)
+		      bool range_cyclic)
 {
 	struct wb_writeback_work *work;
 
@@ -126,7 +133,6 @@
 	work->sync_mode	= WB_SYNC_NONE;
 	work->nr_pages	= nr_pages;
 	work->range_cyclic = range_cyclic;
-	work->for_background = for_background;
 
 	bdi_queue_work(bdi, work);
 }
@@ -144,7 +150,7 @@
  */
 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
 {
-	__bdi_start_writeback(bdi, nr_pages, true, false);
+	__bdi_start_writeback(bdi, nr_pages, true);
 }
 
 /**
@@ -152,13 +158,21 @@
  * @bdi: the backing device to write from
  *
  * Description:
- *   This does WB_SYNC_NONE background writeback. The IO is only
- *   started when this function returns, we make no guarentees on
- *   completion. Caller need not hold sb s_umount semaphore.
+ *   This makes sure WB_SYNC_NONE background writeback happens. When
+ *   this function returns, it is only guaranteed that for given BDI
+ *   some IO is happening if we are over background dirty threshold.
+ *   Caller need not hold sb s_umount semaphore.
  */
 void bdi_start_background_writeback(struct backing_dev_info *bdi)
 {
-	__bdi_start_writeback(bdi, LONG_MAX, true, true);
+	/*
+	 * We just wake up the flusher thread. It will perform background
+	 * writeback as soon as there is no other work to do.
+	 */
+	trace_writeback_wake_background(bdi);
+	spin_lock_bh(&bdi->wb_lock);
+	bdi_wakeup_flusher(bdi);
+	spin_unlock_bh(&bdi->wb_lock);
 }
 
 /*
@@ -616,6 +630,7 @@
 	};
 	unsigned long oldest_jif;
 	long wrote = 0;
+	long write_chunk;
 	struct inode *inode;
 
 	if (wbc.for_kupdate) {
@@ -628,6 +643,24 @@
 		wbc.range_end = LLONG_MAX;
 	}
 
+	/*
+	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
+	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
+	 * here avoids calling into writeback_inodes_wb() more than once.
+	 *
+	 * The intended call sequence for WB_SYNC_ALL writeback is:
+	 *
+	 *      wb_writeback()
+	 *          __writeback_inodes_sb()     <== called only once
+	 *              write_cache_pages()     <== called once for each inode
+	 *                   (quickly) tag currently dirty pages
+	 *                   (maybe slowly) sync all tagged pages
+	 */
+	if (wbc.sync_mode == WB_SYNC_NONE)
+		write_chunk = MAX_WRITEBACK_PAGES;
+	else
+		write_chunk = LONG_MAX;
+
 	wbc.wb_start = jiffies; /* livelock avoidance */
 	for (;;) {
 		/*
@@ -637,6 +670,16 @@
 			break;
 
 		/*
+		 * Background writeout and kupdate-style writeback may
+		 * run forever. Stop them if there is other work to do
+		 * so that e.g. sync can proceed. They'll be restarted
+		 * after the other works are all done.
+		 */
+		if ((work->for_background || work->for_kupdate) &&
+		    !list_empty(&wb->bdi->work_list))
+			break;
+
+		/*
 		 * For background writeout, stop when we are below the
 		 * background dirty threshold
 		 */
@@ -644,7 +687,7 @@
 			break;
 
 		wbc.more_io = 0;
-		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
+		wbc.nr_to_write = write_chunk;
 		wbc.pages_skipped = 0;
 
 		trace_wbc_writeback_start(&wbc, wb->bdi);
@@ -654,8 +697,8 @@
 			writeback_inodes_wb(wb, &wbc);
 		trace_wbc_writeback_written(&wbc, wb->bdi);
 
-		work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
-		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+		work->nr_pages -= write_chunk - wbc.nr_to_write;
+		wrote += write_chunk - wbc.nr_to_write;
 
 		/*
 		 * If we consumed everything, see if we have more
@@ -670,7 +713,7 @@
 		/*
 		 * Did we write something? Try for more
 		 */
-		if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
+		if (wbc.nr_to_write < write_chunk)
 			continue;
 		/*
 		 * Nothing written. Wait for some inode to
@@ -718,6 +761,23 @@
 		get_nr_dirty_inodes();
 }
 
+static long wb_check_background_flush(struct bdi_writeback *wb)
+{
+	if (over_bground_thresh()) {
+
+		struct wb_writeback_work work = {
+			.nr_pages	= LONG_MAX,
+			.sync_mode	= WB_SYNC_NONE,
+			.for_background	= 1,
+			.range_cyclic	= 1,
+		};
+
+		return wb_writeback(wb, &work);
+	}
+
+	return 0;
+}
+
 static long wb_check_old_data_flush(struct bdi_writeback *wb)
 {
 	unsigned long expired;
@@ -787,6 +847,7 @@
 	 * Check for periodic writeback, kupdated() style
 	 */
 	wrote += wb_check_old_data_flush(wb);
+	wrote += wb_check_background_flush(wb);
 	clear_bit(BDI_writeback_running, &wb->bdi->state);
 
 	return wrote;
@@ -873,7 +934,7 @@
 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
 		if (!bdi_has_dirty_io(bdi))
 			continue;
-		__bdi_start_writeback(bdi, nr_pages, false, false);
+		__bdi_start_writeback(bdi, nr_pages, false);
 	}
 	rcu_read_unlock();
 }
@@ -1164,7 +1225,7 @@
  * @sb: the superblock
  *
  * This function writes and waits on any dirty inode belonging to this
- * super_block. The number of pages synced is returned.
+ * super_block.
  */
 void sync_inodes_sb(struct super_block *sb)
 {
@@ -1242,11 +1303,11 @@
 EXPORT_SYMBOL(sync_inode);
 
 /**
- * sync_inode - write an inode to disk
+ * sync_inode_metadata - write an inode to disk
  * @inode: the inode to sync
  * @wait: wait for I/O to complete.
  *
- * Write an inode to disk and adjust it's dirty state after completion.
+ * Write an inode to disk and adjust its dirty state after completion.
  *
  * Note: only writes the actual inode, no associated data or other metadata.
  */
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index 68ca487..78b519c 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -4,6 +4,19 @@
 #include <linux/path.h>
 #include <linux/slab.h>
 #include <linux/fs_struct.h>
+#include "internal.h"
+
+static inline void path_get_longterm(struct path *path)
+{
+	path_get(path);
+	mnt_make_longterm(path->mnt);
+}
+
+static inline void path_put_longterm(struct path *path)
+{
+	mnt_make_shortterm(path->mnt);
+	path_put(path);
+}
 
 /*
  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
@@ -17,11 +30,11 @@
 	write_seqcount_begin(&fs->seq);
 	old_root = fs->root;
 	fs->root = *path;
-	path_get_long(path);
+	path_get_longterm(path);
 	write_seqcount_end(&fs->seq);
 	spin_unlock(&fs->lock);
 	if (old_root.dentry)
-		path_put_long(&old_root);
+		path_put_longterm(&old_root);
 }
 
 /*
@@ -36,12 +49,12 @@
 	write_seqcount_begin(&fs->seq);
 	old_pwd = fs->pwd;
 	fs->pwd = *path;
-	path_get_long(path);
+	path_get_longterm(path);
 	write_seqcount_end(&fs->seq);
 	spin_unlock(&fs->lock);
 
 	if (old_pwd.dentry)
-		path_put_long(&old_pwd);
+		path_put_longterm(&old_pwd);
 }
 
 void chroot_fs_refs(struct path *old_root, struct path *new_root)
@@ -59,13 +72,13 @@
 			write_seqcount_begin(&fs->seq);
 			if (fs->root.dentry == old_root->dentry
 			    && fs->root.mnt == old_root->mnt) {
-				path_get_long(new_root);
+				path_get_longterm(new_root);
 				fs->root = *new_root;
 				count++;
 			}
 			if (fs->pwd.dentry == old_root->dentry
 			    && fs->pwd.mnt == old_root->mnt) {
-				path_get_long(new_root);
+				path_get_longterm(new_root);
 				fs->pwd = *new_root;
 				count++;
 			}
@@ -76,13 +89,13 @@
 	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
 	while (count--)
-		path_put_long(old_root);
+		path_put_longterm(old_root);
 }
 
 void free_fs_struct(struct fs_struct *fs)
 {
-	path_put_long(&fs->root);
-	path_put_long(&fs->pwd);
+	path_put_longterm(&fs->root);
+	path_put_longterm(&fs->pwd);
 	kmem_cache_free(fs_cachep, fs);
 }
 
@@ -118,9 +131,9 @@
 
 		spin_lock(&old->lock);
 		fs->root = old->root;
-		path_get_long(&fs->root);
+		path_get_longterm(&fs->root);
 		fs->pwd = old->pwd;
-		path_get_long(&fs->pwd);
+		path_get_longterm(&fs->pwd);
 		spin_unlock(&old->lock);
 	}
 	return fs;
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index b9f34ea..48a18f1 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -101,7 +101,7 @@
 		object->n_ops++;
 		object->n_exclusive++;	/* reads and writes must wait */
 
-		if (object->n_ops > 0) {
+		if (object->n_ops > 1) {
 			atomic_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
 			fscache_stat(&fscache_n_op_pend);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 042af73..8bd0ef9 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -158,7 +158,7 @@
 {
 	struct inode *inode;
 
-	if (nd->flags & LOOKUP_RCU)
+	if (nd && nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 
 	inode = entry->d_inode;
@@ -350,7 +350,6 @@
 	}
 
 	entry = newent ? newent : entry;
-	d_set_d_op(entry, &fuse_dentry_operations);
 	if (outarg_valid)
 		fuse_change_entry_timeout(entry, &outarg);
 	else
@@ -1284,8 +1283,11 @@
 	if (err)
 		return err;
 
-	if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc)
-		return 0;
+	if (attr->ia_valid & ATTR_OPEN) {
+		if (fc->atomic_o_trunc)
+			return 0;
+		file = NULL;
+	}
 
 	if (attr->ia_valid & ATTR_SIZE)
 		is_truncate = true;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 95da1bc..9e0832d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -86,18 +86,52 @@
 	return ff;
 }
 
-static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_release_async(struct work_struct *work)
 {
-	path_put(&req->misc.release.path);
+	struct fuse_req *req;
+	struct fuse_conn *fc;
+	struct path path;
+
+	req = container_of(work, struct fuse_req, misc.release.work);
+	path = req->misc.release.path;
+	fc = get_fuse_conn(path.dentry->d_inode);
+
+	fuse_put_request(fc, req);
+	path_put(&path);
 }
 
-static void fuse_file_put(struct fuse_file *ff)
+static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
+{
+	if (fc->destroy_req) {
+		/*
+		 * If this is a fuseblk mount, then it's possible that
+		 * releasing the path will result in releasing the
+		 * super block and sending the DESTROY request.  If
+		 * the server is single threaded, this would hang.
+		 * For this reason do the path_put() in a separate
+		 * thread.
+		 */
+		atomic_inc(&req->count);
+		INIT_WORK(&req->misc.release.work, fuse_release_async);
+		schedule_work(&req->misc.release.work);
+	} else {
+		path_put(&req->misc.release.path);
+	}
+}
+
+static void fuse_file_put(struct fuse_file *ff, bool sync)
 {
 	if (atomic_dec_and_test(&ff->count)) {
 		struct fuse_req *req = ff->reserved_req;
 
-		req->end = fuse_release_end;
-		fuse_request_send_background(ff->fc, req);
+		if (sync) {
+			fuse_request_send(ff->fc, req);
+			path_put(&req->misc.release.path);
+			fuse_put_request(ff->fc, req);
+		} else {
+			req->end = fuse_release_end;
+			fuse_request_send_background(ff->fc, req);
+		}
 		kfree(ff);
 	}
 }
@@ -219,8 +253,12 @@
 	 * Normally this will send the RELEASE request, however if
 	 * some asynchronous READ or WRITE requests are outstanding,
 	 * the sending will be delayed.
+	 *
+	 * Make the release synchronous if this is a fuseblk mount,
+	 * synchronous RELEASE is allowed (and desirable) in this case
+	 * because the server can be trusted not to screw up.
 	 */
-	fuse_file_put(ff);
+	fuse_file_put(ff, ff->fc->destroy_req != NULL);
 }
 
 static int fuse_open(struct inode *inode, struct file *file)
@@ -558,7 +596,7 @@
 		page_cache_release(page);
 	}
 	if (req->ff)
-		fuse_file_put(req->ff);
+		fuse_file_put(req->ff, false);
 }
 
 static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1137,7 +1175,7 @@
 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
 {
 	__free_page(req->pages[0]);
-	fuse_file_put(req->ff);
+	fuse_file_put(req->ff, false);
 }
 
 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ae5744a..d428694 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,7 @@
 #include <linux/rwsem.h>
 #include <linux/rbtree.h>
 #include <linux/poll.h>
+#include <linux/workqueue.h>
 
 /** Max number of pages that can be used in a single read request */
 #define FUSE_MAX_PAGES_PER_REQ 32
@@ -262,7 +263,10 @@
 	/** Data for asynchronous requests */
 	union {
 		struct {
-			struct fuse_release_in in;
+			union {
+				struct fuse_release_in in;
+				struct work_struct work;
+			};
 			struct path path;
 		} release;
 		struct fuse_init_in init_in;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index f62b32c..9e3f68c 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -617,10 +617,8 @@
 		goto out_iput;
 
 	entry = d_obtain_alias(inode);
-	if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) {
-		d_set_d_op(entry, &fuse_dentry_operations);
+	if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID)
 		fuse_invalidate_entry_cache(entry);
-	}
 
 	return entry;
 
@@ -719,10 +717,8 @@
 	}
 
 	parent = d_obtain_alias(inode);
-	if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) {
-		d_set_d_op(parent, &fuse_dentry_operations);
+	if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID)
 		fuse_invalidate_entry_cache(parent);
-	}
 
 	return parent;
 }
@@ -989,6 +985,8 @@
 		iput(root);
 		goto err_put_conn;
 	}
+	/* only now - we want root dentry with NULL ->d_op */
+	sb->s_d_op = &fuse_dentry_operations;
 
 	init_req = fuse_request_alloc();
 	if (!init_req)
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 4a45633..0da8da2 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -44,7 +44,7 @@
 	int error;
 	int had_lock = 0;
 
-	if (nd->flags & LOOKUP_RCU)
+	if (nd && nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 
 	parent = dget_parent(dentry);
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 97012ec..9023db8 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -126,12 +126,7 @@
 
 static struct dentry *gfs2_get_parent(struct dentry *child)
 {
-	struct dentry *dentry;
-
-	dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
-	if (!IS_ERR(dentry))
-		d_set_d_op(dentry, &gfs2_dops);
-	return dentry;
+	return d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
 }
 
 static struct dentry *gfs2_get_dentry(struct super_block *sb,
@@ -139,7 +134,6 @@
 {
 	struct gfs2_sbd *sdp = sb->s_fs_info;
 	struct inode *inode;
-	struct dentry *dentry;
 
 	inode = gfs2_ilookup(sb, inum->no_addr);
 	if (inode) {
@@ -156,10 +150,7 @@
 		return ERR_CAST(inode);
 
 out_inode:
-	dentry = d_obtain_alias(inode);
-	if (!IS_ERR(dentry))
-		d_set_d_op(dentry, &gfs2_dops);
-	return dentry;
+	return d_obtain_alias(inode);
 }
 
 static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index fca6689..7cfdcb9 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -19,6 +19,8 @@
 #include <linux/fs.h>
 #include <linux/gfs2_ondisk.h>
 #include <linux/ext2_fs.h>
+#include <linux/falloc.h>
+#include <linux/swap.h>
 #include <linux/crc32.h>
 #include <linux/writeback.h>
 #include <asm/uaccess.h>
@@ -610,6 +612,260 @@
 	return generic_file_aio_write(iocb, iov, nr_segs, pos);
 }
 
+static void empty_write_end(struct page *page, unsigned from,
+			   unsigned to)
+{
+	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+
+	page_zero_new_buffers(page, from, to);
+	flush_dcache_page(page);
+	mark_page_accessed(page);
+
+	if (!gfs2_is_writeback(ip))
+		gfs2_page_add_databufs(ip, page, from, to);
+
+	block_commit_write(page, from, to);
+}
+
+static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
+{
+	unsigned start, end, next;
+	struct buffer_head *bh, *head;
+	int error;
+
+	if (!page_has_buffers(page)) {
+		error = __block_write_begin(page, from, to - from, gfs2_block_map);
+		if (unlikely(error))
+			return error;
+
+		empty_write_end(page, from, to);
+		return 0;
+	}
+
+	bh = head = page_buffers(page);
+	next = end = 0;
+	while (next < from) {
+		next += bh->b_size;
+		bh = bh->b_this_page;
+	}
+	start = next;
+	do {
+		next += bh->b_size;
+		if (buffer_mapped(bh)) {
+			if (end) {
+				error = __block_write_begin(page, start, end - start,
+							    gfs2_block_map);
+				if (unlikely(error))
+					return error;
+				empty_write_end(page, start, end);
+				end = 0;
+			}
+			start = next;
+		}
+		else
+			end = next;
+		bh = bh->b_this_page;
+	} while (next < to);
+
+	if (end) {
+		error = __block_write_begin(page, start, end - start, gfs2_block_map);
+		if (unlikely(error))
+			return error;
+		empty_write_end(page, start, end);
+	}
+
+	return 0;
+}
+
+static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
+			   int mode)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct buffer_head *dibh;
+	int error;
+	u64 start = offset >> PAGE_CACHE_SHIFT;
+	unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
+	u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+	pgoff_t curr;
+	struct page *page;
+	unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
+	unsigned int from, to;
+
+	if (!end_offset)
+		end_offset = PAGE_CACHE_SIZE;
+
+	error = gfs2_meta_inode_buffer(ip, &dibh);
+	if (unlikely(error))
+		goto out;
+
+	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+
+	if (gfs2_is_stuffed(ip)) {
+		error = gfs2_unstuff_dinode(ip, NULL);
+		if (unlikely(error))
+			goto out;
+	}
+
+	curr = start;
+	offset = start << PAGE_CACHE_SHIFT;
+	from = start_offset;
+	to = PAGE_CACHE_SIZE;
+	while (curr <= end) {
+		page = grab_cache_page_write_begin(inode->i_mapping, curr,
+						   AOP_FLAG_NOFS);
+		if (unlikely(!page)) {
+			error = -ENOMEM;
+			goto out;
+		}
+
+		if (curr == end)
+			to = end_offset;
+		error = write_empty_blocks(page, from, to);
+		if (!error && offset + to > inode->i_size &&
+		    !(mode & FALLOC_FL_KEEP_SIZE)) {
+			i_size_write(inode, offset + to);
+		}
+		unlock_page(page);
+		page_cache_release(page);
+		if (error)
+			goto out;
+		curr++;
+		offset += PAGE_CACHE_SIZE;
+		from = 0;
+	}
+
+	gfs2_dinode_out(ip, dibh->b_data);
+	mark_inode_dirty(inode);
+
+	brelse(dibh);
+
+out:
+	return error;
+}
+
+static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
+			    unsigned int *data_blocks, unsigned int *ind_blocks)
+{
+	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+	unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
+	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
+
+	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
+		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
+		max_data -= tmp;
+	}
+	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
+	   so it might end up with fewer data blocks */
+	if (max_data <= *data_blocks)
+		return;
+	*data_blocks = max_data;
+	*ind_blocks = max_blocks - max_data;
+	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
+	if (*len > max) {
+		*len = max;
+		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
+	}
+}
+
+static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
+			   loff_t len)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct gfs2_sbd *sdp = GFS2_SB(inode);
+	struct gfs2_inode *ip = GFS2_I(inode);
+	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
+	loff_t bytes, max_bytes;
+	struct gfs2_alloc *al;
+	int error;
+	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
+	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
+
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode & ~FALLOC_FL_KEEP_SIZE)
+		return -EOPNOTSUPP;
+
+	offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
+		 sdp->sd_sb.sb_bsize_shift;
+
+	len = next - offset;
+	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
+	if (!bytes)
+		bytes = UINT_MAX;
+
+	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
+	error = gfs2_glock_nq(&ip->i_gh);
+	if (unlikely(error))
+		goto out_uninit;
+
+	if (!gfs2_write_alloc_required(ip, offset, len))
+		goto out_unlock;
+
+	while (len > 0) {
+		if (len < bytes)
+			bytes = len;
+		al = gfs2_alloc_get(ip);
+		if (!al) {
+			error = -ENOMEM;
+			goto out_unlock;
+		}
+
+		error = gfs2_quota_lock_check(ip);
+		if (error)
+			goto out_alloc_put;
+
+retry:
+		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
+
+		al->al_requested = data_blocks + ind_blocks;
+		error = gfs2_inplace_reserve(ip);
+		if (error) {
+			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
+				bytes >>= 1;
+				goto retry;
+			}
+			goto out_qunlock;
+		}
+		max_bytes = bytes;
+		calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
+		al->al_requested = data_blocks + ind_blocks;
+
+		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
+			  RES_RG_HDR + gfs2_rg_blocks(al);
+		if (gfs2_is_jdata(ip))
+			rblocks += data_blocks ? data_blocks : 1;
+
+		error = gfs2_trans_begin(sdp, rblocks,
+					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+		if (error)
+			goto out_trans_fail;
+
+		error = fallocate_chunk(inode, offset, max_bytes, mode);
+		gfs2_trans_end(sdp);
+
+		if (error)
+			goto out_trans_fail;
+
+		len -= max_bytes;
+		offset += max_bytes;
+		gfs2_inplace_release(ip);
+		gfs2_quota_unlock(ip);
+		gfs2_alloc_put(ip);
+	}
+	goto out_unlock;
+
+out_trans_fail:
+	gfs2_inplace_release(ip);
+out_qunlock:
+	gfs2_quota_unlock(ip);
+out_alloc_put:
+	gfs2_alloc_put(ip);
+out_unlock:
+	gfs2_glock_dq(&ip->i_gh);
+out_uninit:
+	gfs2_holder_uninit(&ip->i_gh);
+	return error;
+}
+
 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
 
 /**
@@ -765,6 +1021,7 @@
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= generic_file_splice_write,
 	.setlease	= gfs2_setlease,
+	.fallocate	= gfs2_fallocate,
 };
 
 const struct file_operations gfs2_dir_fops = {
@@ -794,6 +1051,7 @@
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= generic_file_splice_write,
 	.setlease	= generic_setlease,
+	.fallocate	= gfs2_fallocate,
 };
 
 const struct file_operations gfs2_dir_fops_nolock = {
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 08a8beb..7cd9a5a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1779,11 +1779,11 @@
 #endif
 
 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
-					  WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
 	if (IS_ERR(glock_workqueue))
 		return PTR_ERR(glock_workqueue);
 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
-						WQ_MEM_RECLAIM | WQ_FREEZEABLE,
+						WQ_MEM_RECLAIM | WQ_FREEZABLE,
 						0);
 	if (IS_ERR(gfs2_delete_workqueue)) {
 		destroy_workqueue(glock_workqueue);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 2232b3c..7aa7d4f 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -74,16 +74,14 @@
 }
 
 /**
- * GFS2 lookup code fills in vfs inode contents based on info obtained
- * from directory entry inside gfs2_inode_lookup(). This has caused issues
- * with NFS code path since its get_dentry routine doesn't have the relevant
- * directory entry when gfs2_inode_lookup() is invoked. Part of the code
- * segment inside gfs2_inode_lookup code needs to get moved around.
+ * gfs2_set_iop - Sets inode operations
+ * @inode: The inode with correct i_mode filled in
  *
- * Clears I_NEW as well.
- **/
+ * GFS2 lookup code fills in vfs inode contents based on info obtained
+ * from directory entry inside gfs2_inode_lookup().
+ */
 
-void gfs2_set_iop(struct inode *inode)
+static void gfs2_set_iop(struct inode *inode)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
 	umode_t mode = inode->i_mode;
@@ -106,8 +104,6 @@
 		inode->i_op = &gfs2_file_iops;
 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
 	}
-
-	unlock_new_inode(inode);
 }
 
 /**
@@ -119,10 +115,8 @@
  * Returns: A VFS inode, or an error
  */
 
-struct inode *gfs2_inode_lookup(struct super_block *sb,
-				unsigned int type,
-				u64 no_addr,
-				u64 no_formal_ino)
+struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
+				u64 no_addr, u64 no_formal_ino)
 {
 	struct inode *inode;
 	struct gfs2_inode *ip;
@@ -152,51 +146,37 @@
 		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
 		if (unlikely(error))
 			goto fail_iopen;
-		ip->i_iopen_gh.gh_gl->gl_object = ip;
 
+		ip->i_iopen_gh.gh_gl->gl_object = ip;
 		gfs2_glock_put(io_gl);
 		io_gl = NULL;
 
-		if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
-			goto gfs2_nfsbypass;
-
-		inode->i_mode = DT2IF(type);
-
-		/*
-		 * We must read the inode in order to work out its type in
-		 * this case. Note that this doesn't happen often as we normally
-		 * know the type beforehand. This code path only occurs during
-		 * unlinked inode recovery (where it is safe to do this glock,
-		 * which is not true in the general case).
-		 */
 		if (type == DT_UNKNOWN) {
-			struct gfs2_holder gh;
-			error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
-			if (unlikely(error))
-				goto fail_glock;
-			/* Inode is now uptodate */
-			gfs2_glock_dq_uninit(&gh);
+			/* Inode glock must be locked already */
+			error = gfs2_inode_refresh(GFS2_I(inode));
+			if (error)
+				goto fail_refresh;
+		} else {
+			inode->i_mode = DT2IF(type);
 		}
 
 		gfs2_set_iop(inode);
+		unlock_new_inode(inode);
 	}
 
-gfs2_nfsbypass:
 	return inode;
-fail_glock:
-	gfs2_glock_dq(&ip->i_iopen_gh);
+
+fail_refresh:
+	ip->i_iopen_gh.gh_gl->gl_object = NULL;
+	gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_iopen:
 	if (io_gl)
 		gfs2_glock_put(io_gl);
 fail_put:
-	if (inode->i_state & I_NEW)
-		ip->i_gl->gl_object = NULL;
+	ip->i_gl->gl_object = NULL;
 	gfs2_glock_put(ip->i_gl);
 fail:
-	if (inode->i_state & I_NEW)
-		iget_failed(inode);
-	else
-		iput(inode);
+	iget_failed(inode);
 	return ERR_PTR(error);
 }
 
@@ -221,14 +201,6 @@
 	if (IS_ERR(inode))
 		goto fail;
 
-	error = gfs2_inode_refresh(GFS2_I(inode));
-	if (error)
-		goto fail_iput;
-
-	/* Pick up the works we bypass in gfs2_inode_lookup */
-	if (inode->i_state & I_NEW) 
-		gfs2_set_iop(inode);
-
 	/* Two extra checks for NFS only */
 	if (no_formal_ino) {
 		error = -ESTALE;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 732a183..3e00a66 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -96,7 +96,6 @@
 	return -EIO;
 }
 
-extern void gfs2_set_iop(struct inode *inode);
 extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 
 				       u64 no_addr, u64 no_formal_ino);
 extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index ebef7ab..72c31a3 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -59,14 +59,7 @@
 	struct address_space *mapping = (struct address_space *)(gl + 1);
 
 	gfs2_init_glock_once(gl);
-	memset(mapping, 0, sizeof(*mapping));
-	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
-	spin_lock_init(&mapping->tree_lock);
-	spin_lock_init(&mapping->i_mmap_lock);
-	INIT_LIST_HEAD(&mapping->private_list);
-	spin_lock_init(&mapping->private_lock);
-	INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
-	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+	address_space_init_once(mapping);
 }
 
 /**
@@ -144,7 +137,7 @@
 
 	error = -ENOMEM;
 	gfs_recovery_wq = alloc_workqueue("gfs_recovery",
-					  WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0);
+					  WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
 	if (!gfs_recovery_wq)
 		goto fail_wq;
 
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 2aeabd4..777927c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -440,7 +440,6 @@
 		iput(inode);
 		return -ENOMEM;
 	}
-	d_set_d_op(dentry, &gfs2_dops);
 	*dptr = dentry;
 	return 0;
 }
@@ -1106,6 +1105,7 @@
 
 	sb->s_magic = GFS2_MAGIC;
 	sb->s_op = &gfs2_super_ops;
+	sb->s_d_op = &gfs2_dops;
 	sb->s_export_op = &gfs2_export_ops;
 	sb->s_xattr = gfs2_xattr_handlers;
 	sb->s_qcop = &gfs2_quotactl_ops;
@@ -1268,7 +1268,7 @@
 {
 	struct block_device *bdev;
 	struct super_block *s;
-	fmode_t mode = FMODE_READ;
+	fmode_t mode = FMODE_READ | FMODE_EXCL;
 	int error;
 	struct gfs2_args args;
 	struct gfs2_sbd *sdp;
@@ -1276,7 +1276,7 @@
 	if (!(flags & MS_RDONLY))
 		mode |= FMODE_WRITE;
 
-	bdev = open_bdev_exclusive(dev_name, mode, fs_type);
+	bdev = blkdev_get_by_path(dev_name, mode, fs_type);
 	if (IS_ERR(bdev))
 		return ERR_CAST(bdev);
 
@@ -1298,7 +1298,7 @@
 		goto error_bdev;
 
 	if (s->s_root)
-		close_bdev_exclusive(bdev, mode);
+		blkdev_put(bdev, mode);
 
 	memset(&args, 0, sizeof(args));
 	args.ar_quota = GFS2_QUOTA_DEFAULT;
@@ -1342,7 +1342,7 @@
 	deactivate_locked_super(s);
 	return ERR_PTR(error);
 error_bdev:
-	close_bdev_exclusive(bdev, mode);
+	blkdev_put(bdev, mode);
 	return ERR_PTR(error);
 }
 
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 1501db4..d8b26ac 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -18,8 +18,6 @@
 #include <linux/gfs2_ondisk.h>
 #include <linux/crc32.h>
 #include <linux/fiemap.h>
-#include <linux/swap.h>
-#include <linux/falloc.h>
 #include <asm/uaccess.h>
 
 #include "gfs2.h"
@@ -106,8 +104,6 @@
 {
 	struct inode *inode = NULL;
 
-	d_set_d_op(dentry, &gfs2_dops);
-
 	inode = gfs2_lookupi(dir, &dentry->d_name, 0);
 	if (inode && IS_ERR(inode))
 		return ERR_CAST(inode);
@@ -1259,257 +1255,6 @@
 	return ret;
 }
 
-static void empty_write_end(struct page *page, unsigned from,
-			   unsigned to)
-{
-	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
-
-	page_zero_new_buffers(page, from, to);
-	flush_dcache_page(page);
-	mark_page_accessed(page);
-
-	if (!gfs2_is_writeback(ip))
-		gfs2_page_add_databufs(ip, page, from, to);
-
-	block_commit_write(page, from, to);
-}
-
-
-static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
-{
-	unsigned start, end, next;
-	struct buffer_head *bh, *head;
-	int error;
-
-	if (!page_has_buffers(page)) {
-		error = __block_write_begin(page, from, to - from, gfs2_block_map);
-		if (unlikely(error))
-			return error;
-
-		empty_write_end(page, from, to);
-		return 0;
-	}
-
-	bh = head = page_buffers(page);
-	next = end = 0;
-	while (next < from) {
-		next += bh->b_size;
-		bh = bh->b_this_page;
-	}
-	start = next;
-	do {
-		next += bh->b_size;
-		if (buffer_mapped(bh)) {
-			if (end) {
-				error = __block_write_begin(page, start, end - start,
-							    gfs2_block_map);
-				if (unlikely(error))
-					return error;
-				empty_write_end(page, start, end);
-				end = 0;
-			}
-			start = next;
-		}
-		else
-			end = next;
-		bh = bh->b_this_page;
-	} while (next < to);
-
-	if (end) {
-		error = __block_write_begin(page, start, end - start, gfs2_block_map);
-		if (unlikely(error))
-			return error;
-		empty_write_end(page, start, end);
-	}
-
-	return 0;
-}
-
-static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
-			   int mode)
-{
-	struct gfs2_inode *ip = GFS2_I(inode);
-	struct buffer_head *dibh;
-	int error;
-	u64 start = offset >> PAGE_CACHE_SHIFT;
-	unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
-	u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
-	pgoff_t curr;
-	struct page *page;
-	unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
-	unsigned int from, to;
-
-	if (!end_offset)
-		end_offset = PAGE_CACHE_SIZE;
-
-	error = gfs2_meta_inode_buffer(ip, &dibh);
-	if (unlikely(error))
-		goto out;
-
-	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-
-	if (gfs2_is_stuffed(ip)) {
-		error = gfs2_unstuff_dinode(ip, NULL);
-		if (unlikely(error))
-			goto out;
-	}
-
-	curr = start;
-	offset = start << PAGE_CACHE_SHIFT;
-	from = start_offset;
-	to = PAGE_CACHE_SIZE;
-	while (curr <= end) {
-		page = grab_cache_page_write_begin(inode->i_mapping, curr,
-						   AOP_FLAG_NOFS);
-		if (unlikely(!page)) {
-			error = -ENOMEM;
-			goto out;
-		}
-
-		if (curr == end)
-			to = end_offset;
-		error = write_empty_blocks(page, from, to);
-		if (!error && offset + to > inode->i_size &&
-		    !(mode & FALLOC_FL_KEEP_SIZE)) {
-			i_size_write(inode, offset + to);
-		}
-		unlock_page(page);
-		page_cache_release(page);
-		if (error)
-			goto out;
-		curr++;
-		offset += PAGE_CACHE_SIZE;
-		from = 0;
-	}
-
-	gfs2_dinode_out(ip, dibh->b_data);
-	mark_inode_dirty(inode);
-
-	brelse(dibh);
-
-out:
-	return error;
-}
-
-static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
-			    unsigned int *data_blocks, unsigned int *ind_blocks)
-{
-	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
-	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
-
-	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
-		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
-		max_data -= tmp;
-	}
-	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
-	   so it might end up with fewer data blocks */
-	if (max_data <= *data_blocks)
-		return;
-	*data_blocks = max_data;
-	*ind_blocks = max_blocks - max_data;
-	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
-	if (*len > max) {
-		*len = max;
-		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
-	}
-}
-
-static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset,
-			   loff_t len)
-{
-	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	struct gfs2_inode *ip = GFS2_I(inode);
-	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
-	loff_t bytes, max_bytes;
-	struct gfs2_alloc *al;
-	int error;
-	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
-	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
-
-	offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
-		 sdp->sd_sb.sb_bsize_shift;
-
-	len = next - offset;
-	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
-	if (!bytes)
-		bytes = UINT_MAX;
-
-	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
-	error = gfs2_glock_nq(&ip->i_gh);
-	if (unlikely(error))
-		goto out_uninit;
-
-	if (!gfs2_write_alloc_required(ip, offset, len))
-		goto out_unlock;
-
-	while (len > 0) {
-		if (len < bytes)
-			bytes = len;
-		al = gfs2_alloc_get(ip);
-		if (!al) {
-			error = -ENOMEM;
-			goto out_unlock;
-		}
-
-		error = gfs2_quota_lock_check(ip);
-		if (error)
-			goto out_alloc_put;
-
-retry:
-		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
-
-		al->al_requested = data_blocks + ind_blocks;
-		error = gfs2_inplace_reserve(ip);
-		if (error) {
-			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
-				bytes >>= 1;
-				goto retry;
-			}
-			goto out_qunlock;
-		}
-		max_bytes = bytes;
-		calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
-		al->al_requested = data_blocks + ind_blocks;
-
-		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
-			  RES_RG_HDR + gfs2_rg_blocks(al);
-		if (gfs2_is_jdata(ip))
-			rblocks += data_blocks ? data_blocks : 1;
-
-		error = gfs2_trans_begin(sdp, rblocks,
-					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
-		if (error)
-			goto out_trans_fail;
-
-		error = fallocate_chunk(inode, offset, max_bytes, mode);
-		gfs2_trans_end(sdp);
-
-		if (error)
-			goto out_trans_fail;
-
-		len -= max_bytes;
-		offset += max_bytes;
-		gfs2_inplace_release(ip);
-		gfs2_quota_unlock(ip);
-		gfs2_alloc_put(ip);
-	}
-	goto out_unlock;
-
-out_trans_fail:
-	gfs2_inplace_release(ip);
-out_qunlock:
-	gfs2_quota_unlock(ip);
-out_alloc_put:
-	gfs2_alloc_put(ip);
-out_unlock:
-	gfs2_glock_dq(&ip->i_gh);
-out_uninit:
-	gfs2_holder_uninit(&ip->i_gh);
-	return error;
-}
-
-
 static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		       u64 start, u64 len)
 {
@@ -1560,7 +1305,6 @@
 	.getxattr = gfs2_getxattr,
 	.listxattr = gfs2_listxattr,
 	.removexattr = gfs2_removexattr,
-	.fallocate = gfs2_fallocate,
 	.fiemap = gfs2_fiemap,
 };
 
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 16c2eca..ec73ed7 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1336,6 +1336,7 @@
 	if (error)
 		goto out_truncate;
 
+	ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
 	gfs2_glock_dq_wait(&ip->i_iopen_gh);
 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
 	error = gfs2_glock_nq(&ip->i_iopen_gh);
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index ea4aefe..b4d70b1 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -25,8 +25,6 @@
 	struct inode *inode = NULL;
 	int res;
 
-	d_set_d_op(dentry, &hfs_dentry_operations);
-
 	hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
 	hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name);
 	res = hfs_brec_read(&fd, &rec, sizeof(rec));
@@ -240,46 +238,22 @@
 }
 
 /*
- * hfs_unlink()
+ * hfs_remove()
  *
- * This is the unlink() entry in the inode_operations structure for
- * regular HFS directories.  The purpose is to delete an existing
- * file, given the inode for the parent directory and the name
- * (and its length) of the existing file.
+ * This serves as both unlink() and rmdir() in the inode_operations
+ * structure for regular HFS directories.  The purpose is to delete
+ * an existing child, given the inode for the parent directory and
+ * the name (and its length) of the existing directory.
+ *
+ * HFS does not have hardlinks, so both rmdir and unlink set the
+ * link count to 0.  The only difference is the emptiness check.
  */
-static int hfs_unlink(struct inode *dir, struct dentry *dentry)
+static int hfs_remove(struct inode *dir, struct dentry *dentry)
 {
-	struct inode *inode;
+	struct inode *inode = dentry->d_inode;
 	int res;
 
-	inode = dentry->d_inode;
-	res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
-	if (res)
-		return res;
-
-	drop_nlink(inode);
-	hfs_delete_inode(inode);
-	inode->i_ctime = CURRENT_TIME_SEC;
-	mark_inode_dirty(inode);
-
-	return res;
-}
-
-/*
- * hfs_rmdir()
- *
- * This is the rmdir() entry in the inode_operations structure for
- * regular HFS directories.  The purpose is to delete an existing
- * directory, given the inode for the parent directory and the name
- * (and its length) of the existing directory.
- */
-static int hfs_rmdir(struct inode *dir, struct dentry *dentry)
-{
-	struct inode *inode;
-	int res;
-
-	inode = dentry->d_inode;
-	if (inode->i_size != 2)
+	if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
 		return -ENOTEMPTY;
 	res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
 	if (res)
@@ -309,7 +283,7 @@
 
 	/* Unlink destination if it already exists */
 	if (new_dentry->d_inode) {
-		res = hfs_unlink(new_dir, new_dentry);
+		res = hfs_remove(new_dir, new_dentry);
 		if (res)
 			return res;
 	}
@@ -334,9 +308,9 @@
 const struct inode_operations hfs_dir_inode_operations = {
 	.create		= hfs_create,
 	.lookup		= hfs_lookup,
-	.unlink		= hfs_unlink,
+	.unlink		= hfs_remove,
 	.mkdir		= hfs_mkdir,
-	.rmdir		= hfs_rmdir,
+	.rmdir		= hfs_remove,
 	.rename		= hfs_rename,
 	.setattr	= hfs_inode_setattr,
 };
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 0bef62a..1b55f70 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -429,13 +429,12 @@
 	if (!root_inode)
 		goto bail_no_root;
 
+	sb->s_d_op = &hfs_dentry_operations;
 	res = -ENOMEM;
 	sb->s_root = d_alloc_root(root_inode);
 	if (!sb->s_root)
 		goto bail_iput;
 
-	d_set_d_op(sb->s_root, &hfs_dentry_operations);
-
 	/* everything's okay */
 	return 0;
 
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index f896dc84..4df5059 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -37,7 +37,6 @@
 
 	sb = dir->i_sb;
 
-	d_set_d_op(dentry, &hfsplus_dentry_operations);
 	dentry->d_fsdata = NULL;
 	hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
 	hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 52a0bca..b1991a2 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -397,8 +397,8 @@
 	u32 start, len, goal;
 	int res;
 
-	if (sbi->total_blocks - sbi->free_blocks + 8 >
-			sbi->alloc_file->i_size * 8) {
+	if (sbi->alloc_file->i_size * 8 <
+	    sbi->total_blocks - sbi->free_blocks + 8) {
 		/* extend alloc file */
 		printk(KERN_ERR "hfs: extend alloc file! "
 				"(%llu,%u,%u)\n",
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
index d66ad11..40ad88c 100644
--- a/fs/hfsplus/part_tbl.c
+++ b/fs/hfsplus/part_tbl.c
@@ -134,7 +134,7 @@
 	res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK,
 				 data, READ);
 	if (res)
-		return res;
+		goto out;
 
 	switch (be16_to_cpu(*((__be16 *)data))) {
 	case HFS_OLD_PMAP_MAGIC:
@@ -147,7 +147,7 @@
 		res = -ENOENT;
 		break;
 	}
-
+out:
 	kfree(data);
 	return res;
 }
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 6ee6ad2..b49b555 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -338,20 +338,22 @@
 	struct inode *root, *inode;
 	struct qstr str;
 	struct nls_table *nls = NULL;
-	int err = -EINVAL;
+	int err;
 
+	err = -EINVAL;
 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
 	if (!sbi)
-		return -ENOMEM;
+		goto out;
 
 	sb->s_fs_info = sbi;
 	mutex_init(&sbi->alloc_mutex);
 	mutex_init(&sbi->vh_mutex);
 	hfsplus_fill_defaults(sbi);
+
+	err = -EINVAL;
 	if (!hfsplus_parse_options(data, sbi)) {
 		printk(KERN_ERR "hfs: unable to parse mount options\n");
-		err = -EINVAL;
-		goto cleanup;
+		goto out_unload_nls;
 	}
 
 	/* temporarily use utf8 to correctly find the hidden dir below */
@@ -359,16 +361,14 @@
 	sbi->nls = load_nls("utf8");
 	if (!sbi->nls) {
 		printk(KERN_ERR "hfs: unable to load nls for utf8\n");
-		err = -EINVAL;
-		goto cleanup;
+		goto out_unload_nls;
 	}
 
 	/* Grab the volume header */
 	if (hfsplus_read_wrapper(sb)) {
 		if (!silent)
 			printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n");
-		err = -EINVAL;
-		goto cleanup;
+		goto out_unload_nls;
 	}
 	vhdr = sbi->s_vhdr;
 
@@ -377,7 +377,7 @@
 	if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
 	    be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
 		printk(KERN_ERR "hfs: wrong filesystem version\n");
-		goto cleanup;
+		goto out_free_vhdr;
 	}
 	sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
 	sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
@@ -421,19 +421,19 @@
 	sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
 	if (!sbi->ext_tree) {
 		printk(KERN_ERR "hfs: failed to load extents file\n");
-		goto cleanup;
+		goto out_free_vhdr;
 	}
 	sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
 	if (!sbi->cat_tree) {
 		printk(KERN_ERR "hfs: failed to load catalog file\n");
-		goto cleanup;
+		goto out_close_ext_tree;
 	}
 
 	inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
 	if (IS_ERR(inode)) {
 		printk(KERN_ERR "hfs: failed to load allocation file\n");
 		err = PTR_ERR(inode);
-		goto cleanup;
+		goto out_close_cat_tree;
 	}
 	sbi->alloc_file = inode;
 
@@ -442,15 +442,8 @@
 	if (IS_ERR(root)) {
 		printk(KERN_ERR "hfs: failed to load root directory\n");
 		err = PTR_ERR(root);
-		goto cleanup;
+		goto out_put_alloc_file;
 	}
-	sb->s_root = d_alloc_root(root);
-	if (!sb->s_root) {
-		iput(root);
-		err = -ENOMEM;
-		goto cleanup;
-	}
-	d_set_d_op(sb->s_root, &hfsplus_dentry_operations);
 
 	str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
 	str.name = HFSP_HIDDENDIR_NAME;
@@ -459,46 +452,69 @@
 	if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
 		hfs_find_exit(&fd);
 		if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
-			goto cleanup;
+			goto out_put_root;
 		inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
 		if (IS_ERR(inode)) {
 			err = PTR_ERR(inode);
-			goto cleanup;
+			goto out_put_root;
 		}
 		sbi->hidden_dir = inode;
 	} else
 		hfs_find_exit(&fd);
 
-	if (sb->s_flags & MS_RDONLY)
-		goto out;
+	if (!(sb->s_flags & MS_RDONLY)) {
+		/*
+		 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
+		 * all three are registered with Apple for our use
+		 */
+		vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
+		vhdr->modify_date = hfsp_now2mt();
+		be32_add_cpu(&vhdr->write_count, 1);
+		vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
+		vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
+		hfsplus_sync_fs(sb, 1);
 
-	/* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
-	 * all three are registered with Apple for our use
-	 */
-	vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
-	vhdr->modify_date = hfsp_now2mt();
-	be32_add_cpu(&vhdr->write_count, 1);
-	vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
-	vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
-	hfsplus_sync_fs(sb, 1);
+		if (!sbi->hidden_dir) {
+			mutex_lock(&sbi->vh_mutex);
+			sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
+			hfsplus_create_cat(sbi->hidden_dir->i_ino, root, &str,
+					   sbi->hidden_dir);
+			mutex_unlock(&sbi->vh_mutex);
 
-	if (!sbi->hidden_dir) {
-		mutex_lock(&sbi->vh_mutex);
-		sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
-		hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode,
-				   &str, sbi->hidden_dir);
-		mutex_unlock(&sbi->vh_mutex);
-
-		hfsplus_mark_inode_dirty(sbi->hidden_dir, HFSPLUS_I_CAT_DIRTY);
+			hfsplus_mark_inode_dirty(sbi->hidden_dir,
+						 HFSPLUS_I_CAT_DIRTY);
+		}
 	}
-out:
+
+	sb->s_d_op = &hfsplus_dentry_operations;
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root) {
+		err = -ENOMEM;
+		goto out_put_hidden_dir;
+	}
+
 	unload_nls(sbi->nls);
 	sbi->nls = nls;
 	return 0;
 
-cleanup:
-	hfsplus_put_super(sb);
+out_put_hidden_dir:
+	iput(sbi->hidden_dir);
+out_put_root:
+	iput(sbi->alloc_file);
+out_put_alloc_file:
+	iput(sbi->alloc_file);
+out_close_cat_tree:
+	hfs_btree_close(sbi->cat_tree);
+out_close_ext_tree:
+	hfs_btree_close(sbi->ext_tree);
+out_free_vhdr:
+	kfree(sbi->s_vhdr);
+	kfree(sbi->s_backup_vhdr);
+out_unload_nls:
+	unload_nls(sbi->nls);
 	unload_nls(nls);
+	kfree(sbi);
+out:
 	return err;
 }
 
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 1962317..3031d81 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -167,7 +167,7 @@
 		break;
 	case cpu_to_be16(HFSP_WRAP_MAGIC):
 		if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
-			goto out;
+			goto out_free_backup_vhdr;
 		wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
 		part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
 		part_size = wd.embed_count * wd.ablk_size;
@@ -179,7 +179,7 @@
 		 * (should do this only for cdrom/loop though)
 		 */
 		if (hfs_part_find(sb, &part_start, &part_size))
-			goto out;
+			goto out_free_backup_vhdr;
 		goto reread;
 	}
 
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index d3244d9..2638c834e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -612,7 +612,6 @@
 		goto out_put;
 
 	d_add(dentry, inode);
-	d_set_d_op(dentry, &hostfs_dentry_ops);
 	return NULL;
 
  out_put:
@@ -922,6 +921,7 @@
 	sb->s_blocksize_bits = 10;
 	sb->s_magic = HOSTFS_SUPER_MAGIC;
 	sb->s_op = &hostfs_sbops;
+	sb->s_d_op = &hostfs_dentry_ops;
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 
 	/* NULL is printed as <NULL> by sprintf: avoid that. */
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 32c13a9..05d4816 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -58,12 +58,7 @@
 	return 0;
 }
 
-static const struct dentry_operations hpfs_dentry_operations = {
+const struct dentry_operations hpfs_dentry_operations = {
 	.d_hash		= hpfs_hash_dentry,
 	.d_compare	= hpfs_compare_dentry,
 };
-
-void hpfs_set_dentry_operations(struct dentry *dentry)
-{
-	d_set_d_op(dentry, &hpfs_dentry_operations);
-}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 2338130..d32f63a 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -298,7 +298,6 @@
 
 	end:
 	end_add:
-	hpfs_set_dentry_operations(dentry);
 	unlock_kernel();
 	d_add(dentry, result);
 	return NULL;
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 2fee17d..1c43dbe 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -233,7 +233,7 @@
 
 /* dentry.c */
 
-void hpfs_set_dentry_operations(struct dentry *);
+extern const struct dentry_operations hpfs_dentry_operations;
 
 /* dir.c */
 
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 56f0da1..1ae35ba 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -281,7 +281,7 @@
 	    attr->ia_size != i_size_read(inode)) {
 		error = vmtruncate(inode, attr->ia_size);
 		if (error)
-			return error;
+			goto out_unlock;
 	}
 
 	setattr_copy(inode, attr);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 49935ba..b30426b 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -550,6 +550,7 @@
 	/* Fill superblock stuff */
 	s->s_magic = HPFS_SUPER_MAGIC;
 	s->s_op = &hpfs_sops;
+	s->s_d_op = &hpfs_dentry_operations;
 
 	sbi->sb_root = superblock->root;
 	sbi->sb_fs_size = superblock->n_sectors;
@@ -651,7 +652,6 @@
 		iput(root);
 		goto bail0;
 	}
-	hpfs_set_dentry_operations(s->s_root);
 
 	/*
 	 * find the root directory's . pointer & finish filling in the inode
diff --git a/fs/inode.c b/fs/inode.c
index da85e56..0647d80 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -295,6 +295,20 @@
 		call_rcu(&inode->i_rcu, i_callback);
 }
 
+void address_space_init_once(struct address_space *mapping)
+{
+	memset(mapping, 0, sizeof(*mapping));
+	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+	spin_lock_init(&mapping->tree_lock);
+	spin_lock_init(&mapping->i_mmap_lock);
+	INIT_LIST_HEAD(&mapping->private_list);
+	spin_lock_init(&mapping->private_lock);
+	INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+	mutex_init(&mapping->unmap_mutex);
+}
+EXPORT_SYMBOL(address_space_init_once);
+
 /*
  * These are initializations that only need to be done
  * once, because the fields are idempotent across use
@@ -308,13 +322,7 @@
 	INIT_LIST_HEAD(&inode->i_devices);
 	INIT_LIST_HEAD(&inode->i_wb_list);
 	INIT_LIST_HEAD(&inode->i_lru);
-	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
-	spin_lock_init(&inode->i_data.tree_lock);
-	spin_lock_init(&inode->i_data.i_mmap_lock);
-	INIT_LIST_HEAD(&inode->i_data.private_list);
-	spin_lock_init(&inode->i_data.private_lock);
-	INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
-	INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
+	address_space_init_once(&inode->i_data);
 	i_size_ordered_init(inode);
 #ifdef CONFIG_FSNOTIFY
 	INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
@@ -540,11 +548,14 @@
 /**
  * invalidate_inodes	- attempt to free all inodes on a superblock
  * @sb:		superblock to operate on
+ * @kill_dirty: flag to guide handling of dirty inodes
  *
  * Attempts to free all inodes for a given superblock.  If there were any
  * busy inodes return a non-zero value, else zero.
+ * If @kill_dirty is set, discard dirty inodes too, otherwise treat
+ * them as busy.
  */
-int invalidate_inodes(struct super_block *sb)
+int invalidate_inodes(struct super_block *sb, bool kill_dirty)
 {
 	int busy = 0;
 	struct inode *inode, *next;
@@ -556,6 +567,10 @@
 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
 			continue;
+		if (inode->i_state & I_DIRTY && !kill_dirty) {
+			busy = 1;
+			continue;
+		}
 		if (atomic_read(&inode->i_count)) {
 			busy = 1;
 			continue;
diff --git a/fs/internal.h b/fs/internal.h
index 9687c2e..9b976b5 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -70,6 +70,10 @@
 extern void release_mounts(struct list_head *);
 extern void umount_tree(struct vfsmount *, int, struct list_head *);
 extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
+extern int finish_automount(struct vfsmount *, struct path *);
+
+extern void mnt_make_longterm(struct vfsmount *);
+extern void mnt_make_shortterm(struct vfsmount *);
 
 extern void __init mnt_init(void);
 
@@ -108,4 +112,4 @@
  */
 extern int get_nr_dirty_inodes(void);
 extern void evict_inodes(struct super_block *);
-extern int invalidate_inodes(struct super_block *);
+extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index d6cc164..1eebeb7 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -86,7 +86,7 @@
 			    u64 phys, u64 len, u32 flags)
 {
 	struct fiemap_extent extent;
-	struct fiemap_extent *dest = fieinfo->fi_extents_start;
+	struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
 
 	/* only count the extents */
 	if (fieinfo->fi_extents_max == 0) {
@@ -173,6 +173,7 @@
 static int ioctl_fiemap(struct file *filp, unsigned long arg)
 {
 	struct fiemap fiemap;
+	struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
 	struct fiemap_extent_info fieinfo = { 0, };
 	struct inode *inode = filp->f_path.dentry->d_inode;
 	struct super_block *sb = inode->i_sb;
@@ -182,8 +183,7 @@
 	if (!inode->i_op->fiemap)
 		return -EOPNOTSUPP;
 
-	if (copy_from_user(&fiemap, (struct fiemap __user *)arg,
-			   sizeof(struct fiemap)))
+	if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
 		return -EFAULT;
 
 	if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
@@ -196,7 +196,7 @@
 
 	fieinfo.fi_flags = fiemap.fm_flags;
 	fieinfo.fi_extents_max = fiemap.fm_extent_count;
-	fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
+	fieinfo.fi_extents_start = ufiemap->fm_extents;
 
 	if (fiemap.fm_extent_count != 0 &&
 	    !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
@@ -209,7 +209,7 @@
 	error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
 	fiemap.fm_flags = fieinfo.fi_flags;
 	fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
-	if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
+	if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
 		error = -EFAULT;
 
 	return error;
@@ -273,6 +273,13 @@
 		len = isize;
 	}
 
+	/*
+	 * Some filesystems can't deal with being asked to map less than
+	 * blocksize, so make sure our len is at least block length.
+	 */
+	if (logical_to_blk(inode, len) == 0)
+		len = blk_to_logical(inode, 1);
+
 	start_blk = logical_to_blk(inode, start);
 	last_blk = logical_to_blk(inode, start + len - 1);
 
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 844a790..a0f3833 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -939,17 +939,18 @@
 		goto out_iput;
 	}
 
-	/* get the root dentry */
-	s->s_root = d_alloc_root(inode);
-	if (!(s->s_root))
-		goto out_no_root;
-
 	table = 0;
 	if (joliet_level)
 		table += 2;
 	if (opt.check == 'r')
 		table++;
-	d_set_d_op(s->s_root, &isofs_dentry_ops[table]);
+
+	s->s_d_op = &isofs_dentry_ops[table];
+
+	/* get the root dentry */
+	s->s_root = d_alloc_root(inode);
+	if (!(s->s_root))
+		goto out_no_root;
 
 	kfree(opt.iocharset);
 
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 679a849..4fb3e80 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -172,8 +172,6 @@
 	struct inode *inode;
 	struct page *page;
 
-	d_set_d_op(dentry, dir->i_sb->s_root->d_op);
-
 	page = alloc_page(GFP_USER);
 	if (!page)
 		return ERR_PTR(-ENOMEM);
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 846a3f3..5b2e4c3 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -207,7 +207,7 @@
 	 * the committing transaction.  Really, we only need to give it
 	 * committing_transaction->t_outstanding_credits plus "enough" for
 	 * the log control blocks.
-	 * Also, this test is inconsitent with the matching one in
+	 * Also, this test is inconsistent with the matching one in
 	 * journal_extend().
 	 */
 	if (__log_space_left(journal) < jbd_space_needed(journal)) {
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f837ba9..97e7346 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -43,6 +43,7 @@
 #include <linux/vmalloc.h>
 #include <linux/backing-dev.h>
 #include <linux/bitops.h>
+#include <linux/ratelimit.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/jbd2.h>
@@ -93,6 +94,7 @@
 EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
+EXPORT_SYMBOL(jbd2_inode_cache);
 
 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
 static void __journal_abort_soft (journal_t *journal, int errno);
@@ -471,7 +473,8 @@
 }
 
 /*
- * Called under j_state_lock.  Returns true if a transaction commit was started.
+ * Called with j_state_lock locked for writing.
+ * Returns true if a transaction commit was started.
  */
 int __jbd2_log_start_commit(journal_t *journal, tid_t target)
 {
@@ -518,11 +521,13 @@
 {
 	transaction_t *transaction = NULL;
 	tid_t tid;
+	int need_to_start = 0;
 
 	read_lock(&journal->j_state_lock);
 	if (journal->j_running_transaction && !current->journal_info) {
 		transaction = journal->j_running_transaction;
-		__jbd2_log_start_commit(journal, transaction->t_tid);
+		if (!tid_geq(journal->j_commit_request, transaction->t_tid))
+			need_to_start = 1;
 	} else if (journal->j_committing_transaction)
 		transaction = journal->j_committing_transaction;
 
@@ -533,6 +538,8 @@
 
 	tid = transaction->t_tid;
 	read_unlock(&journal->j_state_lock);
+	if (need_to_start)
+		jbd2_log_start_commit(journal, tid);
 	jbd2_log_wait_commit(journal, tid);
 	return 1;
 }
@@ -827,7 +834,7 @@
 
 	journal = kzalloc(sizeof(*journal), GFP_KERNEL);
 	if (!journal)
-		goto fail;
+		return NULL;
 
 	init_waitqueue_head(&journal->j_wait_transaction_locked);
 	init_waitqueue_head(&journal->j_wait_logspace);
@@ -852,14 +859,12 @@
 	err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
 	if (err) {
 		kfree(journal);
-		goto fail;
+		return NULL;
 	}
 
 	spin_lock_init(&journal->j_history_lock);
 
 	return journal;
-fail:
-	return NULL;
 }
 
 /* jbd2_journal_init_dev and jbd2_journal_init_inode:
@@ -1982,7 +1987,6 @@
 static struct journal_head *journal_alloc_journal_head(void)
 {
 	struct journal_head *ret;
-	static unsigned long last_warning;
 
 #ifdef CONFIG_JBD2_DEBUG
 	atomic_inc(&nr_journal_heads);
@@ -1990,11 +1994,7 @@
 	ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
 	if (!ret) {
 		jbd_debug(1, "out of memory for journal_head\n");
-		if (time_after(jiffies, last_warning + 5*HZ)) {
-			printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-			       __func__);
-			last_warning = jiffies;
-		}
+		pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
 		while (!ret) {
 			yield();
 			ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
@@ -2292,17 +2292,19 @@
 
 #endif
 
-struct kmem_cache *jbd2_handle_cache;
+struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
 
 static int __init journal_init_handle_cache(void)
 {
-	jbd2_handle_cache = kmem_cache_create("jbd2_journal_handle",
-				sizeof(handle_t),
-				0,		/* offset */
-				SLAB_TEMPORARY,	/* flags */
-				NULL);		/* ctor */
+	jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
 	if (jbd2_handle_cache == NULL) {
-		printk(KERN_EMERG "JBD: failed to create handle cache\n");
+		printk(KERN_EMERG "JBD2: failed to create handle cache\n");
+		return -ENOMEM;
+	}
+	jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
+	if (jbd2_inode_cache == NULL) {
+		printk(KERN_EMERG "JBD2: failed to create inode cache\n");
+		kmem_cache_destroy(jbd2_handle_cache);
 		return -ENOMEM;
 	}
 	return 0;
@@ -2312,6 +2314,9 @@
 {
 	if (jbd2_handle_cache)
 		kmem_cache_destroy(jbd2_handle_cache);
+	if (jbd2_inode_cache)
+		kmem_cache_destroy(jbd2_inode_cache);
+
 }
 
 /*
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 2bc4d5f..1cad869 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -299,10 +299,10 @@
 #ifdef CONFIG_JBD2_DEBUG
 		int dropped = info.end_transaction - 
 			be32_to_cpu(journal->j_superblock->s_sequence);
-#endif
 		jbd_debug(1,
 			  "JBD: ignoring %d transaction%s from the journal.\n",
 			  dropped, (dropped == 1) ? "" : "s");
+#endif
 		journal->j_transaction_sequence = ++info.end_transaction;
 	}
 
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 6bf0a24..1d11910 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -117,10 +117,10 @@
 static int start_this_handle(journal_t *journal, handle_t *handle,
 			     int gfp_mask)
 {
-	transaction_t *transaction;
-	int needed;
-	int nblocks = handle->h_buffer_credits;
-	transaction_t *new_transaction = NULL;
+	transaction_t	*transaction, *new_transaction = NULL;
+	tid_t		tid;
+	int		needed, need_to_start;
+	int		nblocks = handle->h_buffer_credits;
 
 	if (nblocks > journal->j_max_transaction_buffers) {
 		printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
@@ -222,8 +222,11 @@
 		atomic_sub(nblocks, &transaction->t_outstanding_credits);
 		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
 				TASK_UNINTERRUPTIBLE);
-		__jbd2_log_start_commit(journal, transaction->t_tid);
+		tid = transaction->t_tid;
+		need_to_start = !tid_geq(journal->j_commit_request, tid);
 		read_unlock(&journal->j_state_lock);
+		if (need_to_start)
+			jbd2_log_start_commit(journal, tid);
 		schedule();
 		finish_wait(&journal->j_wait_transaction_locked, &wait);
 		goto repeat;
@@ -251,7 +254,7 @@
 	 * the committing transaction.  Really, we only need to give it
 	 * committing_transaction->t_outstanding_credits plus "enough" for
 	 * the log control blocks.
-	 * Also, this test is inconsitent with the matching one in
+	 * Also, this test is inconsistent with the matching one in
 	 * jbd2_journal_extend().
 	 */
 	if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
@@ -340,9 +343,7 @@
 		jbd2_free_handle(handle);
 		current->journal_info = NULL;
 		handle = ERR_PTR(err);
-		goto out;
 	}
-out:
 	return handle;
 }
 EXPORT_SYMBOL(jbd2__journal_start);
@@ -444,7 +445,8 @@
 {
 	transaction_t *transaction = handle->h_transaction;
 	journal_t *journal = transaction->t_journal;
-	int ret;
+	tid_t		tid;
+	int		need_to_start, ret;
 
 	/* If we've had an abort of any type, don't even think about
 	 * actually doing the restart! */
@@ -467,8 +469,11 @@
 	spin_unlock(&transaction->t_handle_lock);
 
 	jbd_debug(2, "restarting handle %p\n", handle);
-	__jbd2_log_start_commit(journal, transaction->t_tid);
+	tid = transaction->t_tid;
+	need_to_start = !tid_geq(journal->j_commit_request, tid);
 	read_unlock(&journal->j_state_lock);
+	if (need_to_start)
+		jbd2_log_start_commit(journal, tid);
 
 	lock_map_release(&handle->h_lockdep_map);
 	handle->h_buffer_credits = nblocks;
@@ -589,7 +594,7 @@
 	transaction = handle->h_transaction;
 	journal = transaction->t_journal;
 
-	jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
+	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
 
 	JBUFFER_TRACE(jh, "entry");
 repeat:
@@ -774,7 +779,7 @@
 		J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
 			    "Possible IO failure.\n");
 		page = jh2bh(jh)->b_page;
-		offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
+		offset = offset_in_page(jh2bh(jh)->b_data);
 		source = kmap_atomic(page, KM_USER0);
 		/* Fire data frozen trigger just before we copy the data */
 		jbd2_buffer_frozen_trigger(jh, source + offset,
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 85c6be2..3005ec4 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -336,14 +336,13 @@
 	size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
 #ifndef __ECOS
 	if (jffs2_blocks_use_vmalloc(c))
-		c->blocks = vmalloc(size);
+		c->blocks = vzalloc(size);
 	else
 #endif
-		c->blocks = kmalloc(size, GFP_KERNEL);
+		c->blocks = kzalloc(size, GFP_KERNEL);
 	if (!c->blocks)
 		return -ENOMEM;
 
-	memset(c->blocks, 0, size);
 	for (i=0; i<c->nr_blocks; i++) {
 		INIT_LIST_HEAD(&c->blocks[i].list);
 		c->blocks[i].offset = i * c->sector_size;
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index f864005..0bc6a6c 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -144,4 +144,4 @@
 	void *os_priv;
 };
 
-#endif /* _JFFS2_FB_SB */
+#endif /* _JFFS2_FS_SB */
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 9b572ca..4f9cc04 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -151,7 +151,7 @@
 		JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
 			    offset, je32_to_cpu(rx.hdr_crc), crc);
 		xd->flags |= JFFS2_XFLAGS_INVALID;
-		return EIO;
+		return -EIO;
 	}
 	totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
 	if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
@@ -167,7 +167,7 @@
 			    je32_to_cpu(rx.xid), xd->xid,
 			    je32_to_cpu(rx.version), xd->version);
 		xd->flags |= JFFS2_XFLAGS_INVALID;
-		return EIO;
+		return -EIO;
 	}
 	xd->xprefix = rx.xprefix;
 	xd->name_len = rx.name_len;
@@ -230,7 +230,7 @@
 			      ref_offset(xd->node), xd->data_crc, crc);
 		kfree(data);
 		xd->flags |= JFFS2_XFLAGS_INVALID;
-		return EIO;
+		return -EIO;
 	}
 
 	xd->flags |= JFFS2_XFLAGS_HOT;
@@ -268,7 +268,7 @@
 	if (xd->xname)
 		return 0;
 	if (xd->flags & JFFS2_XFLAGS_INVALID)
-		return EIO;
+		return -EIO;
 	if (unlikely(is_xattr_datum_unchecked(c, xd)))
 		rc = do_verify_xattr_datum(c, xd);
 	if (!rc)
@@ -460,7 +460,7 @@
 	if (crc != je32_to_cpu(rr.node_crc)) {
 		JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
 			    offset, je32_to_cpu(rr.node_crc), crc);
-		return EIO;
+		return -EIO;
 	}
 	if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
 	    || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
@@ -470,7 +470,7 @@
 			    offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
 			    je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
 			    je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
-		return EIO;
+		return -EIO;
 	}
 	ref->ino = je32_to_cpu(rr.ino);
 	ref->xid = je32_to_cpu(rr.xid);
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index e1b8493..278e3fb 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1120,16 +1120,13 @@
 	 * file systems to log may have n-to-1 relationship;
 	 */
 
-	bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE);
+	bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+				 log);
 	if (IS_ERR(bdev)) {
 		rc = -PTR_ERR(bdev);
 		goto free;
 	}
 
-	if ((rc = bd_claim(bdev, log))) {
-		goto close;
-	}
-
 	log->bdev = bdev;
 	memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid));
 
@@ -1137,7 +1134,7 @@
 	 * initialize log:
 	 */
 	if ((rc = lmLogInit(log)))
-		goto unclaim;
+		goto close;
 
 	list_add(&log->journal_list, &jfs_external_logs);
 
@@ -1163,11 +1160,8 @@
 	list_del(&log->journal_list);
 	lbmLogShutdown(log);
 
-      unclaim:
-	bd_release(bdev);
-
       close:		/* close external log device */
-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 
       free:		/* free log descriptor */
 	mutex_unlock(&jfs_log_mutex);
@@ -1512,8 +1506,7 @@
 	bdev = log->bdev;
 	rc = lmLogShutdown(log);
 
-	bd_release(bdev);
-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 
 	kfree(log);
 
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 4414e3a..5a2b269 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1465,9 +1465,6 @@
 
 	jfs_info("jfs_lookup: name = %s", name);
 
-	if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
-		d_set_d_op(dentry, &jfs_ci_dentry_operations);
-
 	if ((name[0] == '.') && (len == 1))
 		inum = dip->i_ino;
 	else if (strcmp(name, "..") == 0)
@@ -1492,12 +1489,7 @@
 		return ERR_CAST(ip);
 	}
 
-	dentry = d_splice_alias(ip, dentry);
-
-	if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
-		d_set_d_op(dentry, &jfs_ci_dentry_operations);
-
-	return dentry;
+	return d_splice_alias(ip, dentry);
 }
 
 static struct inode *jfs_nfs_get_inode(struct super_block *sb,
@@ -1608,7 +1600,7 @@
 
 static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	if (nd->flags & LOOKUP_RCU)
+	if (nd && nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 	/*
 	 * This is not negative dentry. Always valid.
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 3150d76..eeca48a 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -515,6 +515,9 @@
 
 	sb->s_magic = JFS_SUPER_MAGIC;
 
+	if (sbi->mntflag & JFS_OS2)
+		sb->s_d_op = &jfs_ci_dentry_operations;
+
 	inode = jfs_iget(sb, ROOT_I);
 	if (IS_ERR(inode)) {
 		ret = PTR_ERR(inode);
@@ -524,9 +527,6 @@
 	if (!sb->s_root)
 		goto out_no_root;
 
-	if (sbi->mntflag & JFS_OS2)
-		d_set_d_op(sb->s_root, &jfs_ci_dentry_operations);
-
 	/* logical blocks are represented by 40 bits in pxd_t, etc. */
 	sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
 #if BITS_PER_LONG == 32
diff --git a/fs/libfs.c b/fs/libfs.c
index 889311e..c88eab5 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -217,7 +217,8 @@
  * will never be mountable)
  */
 struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name,
-	const struct super_operations *ops, unsigned long magic)
+	const struct super_operations *ops,
+	const struct dentry_operations *dops, unsigned long magic)
 {
 	struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
 	struct dentry *dentry;
@@ -254,6 +255,7 @@
 	dentry->d_parent = dentry;
 	d_instantiate(dentry, root);
 	s->s_root = dentry;
+	s->s_d_op = dops;
 	s->s_flags |= MS_ACTIVE;
 	return dget(s->s_root);
 
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
index 97f6073..ca58d64 100644
--- a/fs/lockd/Makefile
+++ b/fs/lockd/Makefile
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_LOCKD) += lockd.o
 
-lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \
-	        svcproc.o svcsubs.o mon.o xdr.o grace.o
-lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o
+lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
+	        svcshare.o svcproc.o svcsubs.o mon.o xdr.o grace.o
+lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
 lockd-objs		      := $(lockd-objs-y)
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
new file mode 100644
index 0000000..f848b52
--- /dev/null
+++ b/fs/lockd/clnt4xdr.c
@@ -0,0 +1,605 @@
+/*
+ * linux/fs/lockd/clnt4xdr.c
+ *
+ * XDR functions to encode/decode NLM version 4 RPC arguments and results.
+ *
+ * NLM client-side only.
+ *
+ * Copyright (C) 2010, Oracle.  All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
+#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
+#endif
+
+#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
+#  error "NLM host name cannot be larger than NLM's maximum string length!"
+#endif
+
+/*
+ * Declare the space requirements for NLM arguments and replies as
+ * number of 32bit-words
+ */
+#define NLM4_void_sz		(0)
+#define NLM4_cookie_sz		(1+(NLM_MAXCOOKIELEN>>2))
+#define NLM4_caller_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM4_owner_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM4_fhandle_sz		(1+(NFS3_FHSIZE>>2))
+#define NLM4_lock_sz		(5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz)
+#define NLM4_holder_sz		(6+NLM4_owner_sz)
+
+#define NLM4_testargs_sz	(NLM4_cookie_sz+1+NLM4_lock_sz)
+#define NLM4_lockargs_sz	(NLM4_cookie_sz+4+NLM4_lock_sz)
+#define NLM4_cancargs_sz	(NLM4_cookie_sz+2+NLM4_lock_sz)
+#define NLM4_unlockargs_sz	(NLM4_cookie_sz+NLM4_lock_sz)
+
+#define NLM4_testres_sz		(NLM4_cookie_sz+1+NLM4_holder_sz)
+#define NLM4_res_sz		(NLM4_cookie_sz+1)
+#define NLM4_norep_sz		(0)
+
+
+static s64 loff_t_to_s64(loff_t offset)
+{
+	s64 res;
+
+	if (offset >= NLM4_OFFSET_MAX)
+		res = NLM4_OFFSET_MAX;
+	else if (offset <= -NLM4_OFFSET_MAX)
+		res = -NLM4_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+static void nlm4_compute_offsets(const struct nlm_lock *lock,
+				 u64 *l_offset, u64 *l_len)
+{
+	const struct file_lock *fl = &lock->fl;
+
+	BUG_ON(fl->fl_start > NLM4_OFFSET_MAX);
+	BUG_ON(fl->fl_end > NLM4_OFFSET_MAX &&
+				fl->fl_end != OFFSET_MAX);
+
+	*l_offset = loff_t_to_s64(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		*l_len = 0;
+	else
+		*l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+	dprintk("lockd: %s prematurely hit the end of our receive buffer. "
+		"Remaining buffer length is %tu words.\n",
+		func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NLMv4 basic data types
+ *
+ * Basic NLMv4 data types are defined in Appendix II, section 6.1.4
+ * of RFC 1813: "NFS Version 3 Protocol Specification" and in Chapter
+ * 10 of X/Open's "Protocols for Interworking: XNFS, Version 3W".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+static void encode_bool(struct xdr_stream *xdr, const int value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = value ? xdr_one : xdr_zero;
+}
+
+static void encode_int32(struct xdr_stream *xdr, const s32 value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(value);
+}
+
+/*
+ *	typedef opaque netobj<MAXNETOBJ_SZ>
+ */
+static void encode_netobj(struct xdr_stream *xdr,
+			  const u8 *data, const unsigned int length)
+{
+	__be32 *p;
+
+	BUG_ON(length > XDR_MAX_NETOBJ);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, data, length);
+}
+
+static int decode_netobj(struct xdr_stream *xdr,
+			 struct xdr_netobj *obj)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	if (unlikely(length > XDR_MAX_NETOBJ))
+		goto out_size;
+	obj->len = length;
+	obj->data = (u8 *)p;
+	return 0;
+out_size:
+	dprintk("NFS: returned netobj was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj cookie;
+ */
+static void encode_cookie(struct xdr_stream *xdr,
+			  const struct nlm_cookie *cookie)
+{
+	BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
+	encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
+}
+
+static int decode_cookie(struct xdr_stream *xdr,
+			     struct nlm_cookie *cookie)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	/* apparently HPUX can return empty cookies */
+	if (length == 0)
+		goto out_hpux;
+	if (length > NLM_MAXCOOKIELEN)
+		goto out_size;
+	p = xdr_inline_decode(xdr, length);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	cookie->len = length;
+	memcpy(cookie->data, p, length);
+	return 0;
+out_hpux:
+	cookie->len = 4;
+	memset(cookie->data, 0, 4);
+	return 0;
+out_size:
+	dprintk("NFS: returned cookie was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj fh;
+ */
+static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	BUG_ON(fh->size > NFS3_FHSIZE);
+	encode_netobj(xdr, (u8 *)&fh->data, fh->size);
+}
+
+/*
+ *	enum nlm4_stats {
+ *		NLM4_GRANTED = 0,
+ *		NLM4_DENIED = 1,
+ *		NLM4_DENIED_NOLOCKS = 2,
+ *		NLM4_BLOCKED = 3,
+ *		NLM4_DENIED_GRACE_PERIOD = 4,
+ *		NLM4_DEADLCK = 5,
+ *		NLM4_ROFS = 6,
+ *		NLM4_STALE_FH = 7,
+ *		NLM4_FBIG = 8,
+ *		NLM4_FAILED = 9
+ *	};
+ *
+ *	struct nlm4_stat {
+ *		nlm4_stats stat;
+ *	};
+ *
+ * NB: we don't swap bytes for the NLM status values.  The upper
+ * layers deal directly with the status value in network byte
+ * order.
+ */
+static void encode_nlm4_stat(struct xdr_stream *xdr,
+			     const __be32 stat)
+{
+	__be32 *p;
+
+	BUG_ON(be32_to_cpu(stat) > NLM_FAILED);
+	p = xdr_reserve_space(xdr, 4);
+	*p = stat;
+}
+
+static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (unlikely(*p > nlm4_failed))
+		goto out_bad_xdr;
+	*stat = *p;
+	return 0;
+out_bad_xdr:
+	dprintk("%s: server returned invalid nlm4_stats value: %u\n",
+			__func__, be32_to_cpup(p));
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	struct nlm4_holder {
+ *		bool	exclusive;
+ *		int32	svid;
+ *		netobj	oh;
+ *		uint64	l_offset;
+ *		uint64	l_len;
+ *	};
+ */
+static void encode_nlm4_holder(struct xdr_stream *xdr,
+			       const struct nlm_res *result)
+{
+	const struct nlm_lock *lock = &result->lock;
+	u64 l_offset, l_len;
+	__be32 *p;
+
+	encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+	encode_int32(xdr, lock->svid);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 4);
+	nlm4_compute_offsets(lock, &l_offset, &l_len);
+	p = xdr_encode_hyper(p, l_offset);
+	xdr_encode_hyper(p, l_len);
+}
+
+static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
+{
+	struct nlm_lock *lock = &result->lock;
+	struct file_lock *fl = &lock->fl;
+	u64 l_offset, l_len;
+	u32 exclusive;
+	int error;
+	__be32 *p;
+	s32 end;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(fl);
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	exclusive = be32_to_cpup(p++);
+	lock->svid = be32_to_cpup(p);
+	fl->fl_pid = (pid_t)lock->svid;
+
+	error = decode_netobj(xdr, &lock->oh);
+	if (unlikely(error))
+		goto out;
+
+	p = xdr_inline_decode(xdr, 8 + 8);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = exclusive != 0 ? F_WRLCK : F_RDLCK;
+	p = xdr_decode_hyper(p, &l_offset);
+	xdr_decode_hyper(p, &l_len);
+	end = l_offset + l_len - 1;
+
+	fl->fl_start = (loff_t)l_offset;
+	if (l_len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = (loff_t)end;
+	error = 0;
+out:
+	return error;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	string caller_name<LM_MAXSTRLEN>;
+ */
+static void encode_caller_name(struct xdr_stream *xdr, const char *name)
+{
+	/* NB: client-side does not set lock->len */
+	u32 length = strlen(name);
+	__be32 *p;
+
+	BUG_ON(length > NLM_MAXSTRLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+/*
+ *	struct nlm4_lock {
+ *		string	caller_name<LM_MAXSTRLEN>;
+ *		netobj	fh;
+ *		netobj	oh;
+ *		int32	svid;
+ *		uint64	l_offset;
+ *		uint64	l_len;
+ *	};
+ */
+static void encode_nlm4_lock(struct xdr_stream *xdr,
+			     const struct nlm_lock *lock)
+{
+	u64 l_offset, l_len;
+	__be32 *p;
+
+	encode_caller_name(xdr, lock->caller);
+	encode_fh(xdr, &lock->fh);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 8 + 8);
+	*p++ = cpu_to_be32(lock->svid);
+
+	nlm4_compute_offsets(lock, &l_offset, &l_len);
+	p = xdr_encode_hyper(p, l_offset);
+	xdr_encode_hyper(p, l_len);
+}
+
+
+/*
+ * NLMv4 XDR encode functions
+ *
+ * NLMv4 argument types are defined in Appendix II of RFC 1813:
+ * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	struct nlm4_testargs {
+ *		netobj cookie;
+ *		bool exclusive;
+ *		struct nlm4_lock alock;
+ *	};
+ */
+static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm4_lockargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm4_lock alock;
+ *		bool reclaim;
+ *		int state;
+ *	};
+ */
+static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm4_lock(xdr, lock);
+	encode_bool(xdr, args->reclaim);
+	encode_int32(xdr, args->state);
+}
+
+/*
+ *	struct nlm4_cancargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm4_lock alock;
+ *	};
+ */
+static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm4_unlockargs {
+ *		netobj cookie;
+ *		struct nlm4_lock alock;
+ *	};
+ */
+static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm4_res {
+ *		netobj cookie;
+ *		nlm4_stat stat;
+ *	};
+ */
+static void nlm4_xdr_enc_res(struct rpc_rqst *req,
+			     struct xdr_stream *xdr,
+			     const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm4_stat(xdr, result->status);
+}
+
+/*
+ *	union nlm4_testrply switch (nlm4_stats stat) {
+ *	case NLM4_DENIED:
+ *		struct nlm4_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm4_testres {
+ *		netobj cookie;
+ *		nlm4_testrply test_stat;
+ *	};
+ */
+static void nlm4_xdr_enc_testres(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm4_stat(xdr, result->status);
+	if (result->status == nlm_lck_denied)
+		encode_nlm4_holder(xdr, result);
+}
+
+
+/*
+ * NLMv4 XDR decode functions
+ *
+ * NLMv4 argument types are defined in Appendix II of RFC 1813:
+ * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	union nlm4_testrply switch (nlm4_stats stat) {
+ *	case NLM4_DENIED:
+ *		struct nlm4_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm4_testres {
+ *		netobj cookie;
+ *		nlm4_testrply test_stat;
+ *	};
+ */
+static int decode_nlm4_testrply(struct xdr_stream *xdr,
+				struct nlm_res *result)
+{
+	int error;
+
+	error = decode_nlm4_stat(xdr, &result->status);
+	if (unlikely(error))
+		goto out;
+	if (result->status == nlm_lck_denied)
+		error = decode_nlm4_holder(xdr, result);
+out:
+	return error;
+}
+
+static int nlm4_xdr_dec_testres(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm4_testrply(xdr, result);
+out:
+	return error;
+}
+
+/*
+ *	struct nlm4_res {
+ *		netobj cookie;
+ *		nlm4_stat stat;
+ *	};
+ */
+static int nlm4_xdr_dec_res(struct rpc_rqst *req,
+			    struct xdr_stream *xdr,
+			    struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm4_stat(xdr, &result->status);
+out:
+	return error;
+}
+
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm4_xdr_dec_norep	NULL
+
+#define PROC(proc, argtype, restype)					\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdreproc_t)nlm4_xdr_enc_##argtype,		\
+	.p_decode    = (kxdrdproc_t)nlm4_xdr_dec_##restype,		\
+	.p_arglen    = NLM4_##argtype##_sz,				\
+	.p_replen    = NLM4_##restype##_sz,				\
+	.p_statidx   = NLMPROC_##proc,					\
+	.p_name      = #proc,						\
+	}
+
+static struct rpc_procinfo	nlm4_procedures[] = {
+	PROC(TEST,		testargs,	testres),
+	PROC(LOCK,		lockargs,	res),
+	PROC(CANCEL,		cancargs,	res),
+	PROC(UNLOCK,		unlockargs,	res),
+	PROC(GRANTED,		testargs,	res),
+	PROC(TEST_MSG,		testargs,	norep),
+	PROC(LOCK_MSG,		lockargs,	norep),
+	PROC(CANCEL_MSG,	cancargs,	norep),
+	PROC(UNLOCK_MSG,	unlockargs,	norep),
+	PROC(GRANTED_MSG,	testargs,	norep),
+	PROC(TEST_RES,		testres,	norep),
+	PROC(LOCK_RES,		res,		norep),
+	PROC(CANCEL_RES,	res,		norep),
+	PROC(UNLOCK_RES,	res,		norep),
+	PROC(GRANTED_RES,	res,		norep),
+};
+
+struct rpc_version	nlm_version4 = {
+	.number		= 4,
+	.nrprocs	= ARRAY_SIZE(nlm4_procedures),
+	.procs		= nlm4_procedures,
+};
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 25509eb..8d4ea83 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -79,7 +79,7 @@
  */
 void nlmclnt_done(struct nlm_host *host)
 {
-	nlm_release_host(host);
+	nlmclnt_release_host(host);
 	lockd_down();
 }
 EXPORT_SYMBOL_GPL(nlmclnt_done);
@@ -273,7 +273,7 @@
 	spin_unlock(&nlm_blocked_lock);
 
 	/* Release host handle after use */
-	nlm_release_host(host);
+	nlmclnt_release_host(host);
 	lockd_down();
 	return 0;
 }
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 332c54c..adb45ec 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -58,7 +58,7 @@
 		return;
 	list_del(&lockowner->list);
 	spin_unlock(&lockowner->host->h_lock);
-	nlm_release_host(lockowner->host);
+	nlmclnt_release_host(lockowner->host);
 	kfree(lockowner);
 }
 
@@ -207,22 +207,22 @@
 		printk("nlm_alloc_call: failed, waiting for memory\n");
 		schedule_timeout_interruptible(5*HZ);
 	}
-	nlm_release_host(host);
+	nlmclnt_release_host(host);
 	return NULL;
 }
 
-void nlm_release_call(struct nlm_rqst *call)
+void nlmclnt_release_call(struct nlm_rqst *call)
 {
 	if (!atomic_dec_and_test(&call->a_count))
 		return;
-	nlm_release_host(call->a_host);
+	nlmclnt_release_host(call->a_host);
 	nlmclnt_release_lockargs(call);
 	kfree(call);
 }
 
 static void nlmclnt_rpc_release(void *data)
 {
-	nlm_release_call(data);
+	nlmclnt_release_call(data);
 }
 
 static int nlm_wait_on_grace(wait_queue_head_t *queue)
@@ -436,7 +436,7 @@
 			status = nlm_stat_to_errno(req->a_res.status);
 	}
 out:
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 }
 
@@ -593,7 +593,7 @@
 out_unblock:
 	nlmclnt_finish_block(block);
 out:
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 out_unlock:
 	/* Fatal error: ensure that we remove the lock altogether */
@@ -694,7 +694,7 @@
 	/* What to do now? I'm out of my depth... */
 	status = -ENOLCK;
 out:
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 }
 
@@ -755,7 +755,7 @@
 			NLMPROC_CANCEL, &nlmclnt_cancel_ops);
 	if (status == 0 && req->a_res.status == nlm_lck_denied)
 		status = -ENOLCK;
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 }
 
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
new file mode 100644
index 0000000..180ac34
--- /dev/null
+++ b/fs/lockd/clntxdr.c
@@ -0,0 +1,627 @@
+/*
+ * linux/fs/lockd/clntxdr.c
+ *
+ * XDR functions to encode/decode NLM version 3 RPC arguments and results.
+ * NLM version 3 is backwards compatible with NLM versions 1 and 2.
+ *
+ * NLM client-side only.
+ *
+ * Copyright (C) 2010, Oracle.  All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
+#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
+#endif
+
+/*
+ * Declare the space requirements for NLM arguments and replies as
+ * number of 32bit-words
+ */
+#define NLM_cookie_sz		(1+(NLM_MAXCOOKIELEN>>2))
+#define NLM_caller_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM_owner_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM_fhandle_sz		(1+(NFS2_FHSIZE>>2))
+#define NLM_lock_sz		(3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz)
+#define NLM_holder_sz		(4+NLM_owner_sz)
+
+#define NLM_testargs_sz		(NLM_cookie_sz+1+NLM_lock_sz)
+#define NLM_lockargs_sz		(NLM_cookie_sz+4+NLM_lock_sz)
+#define NLM_cancargs_sz		(NLM_cookie_sz+2+NLM_lock_sz)
+#define NLM_unlockargs_sz	(NLM_cookie_sz+NLM_lock_sz)
+
+#define NLM_testres_sz		(NLM_cookie_sz+1+NLM_holder_sz)
+#define NLM_res_sz		(NLM_cookie_sz+1)
+#define NLM_norep_sz		(0)
+
+
+static s32 loff_t_to_s32(loff_t offset)
+{
+	s32 res;
+
+	if (offset >= NLM_OFFSET_MAX)
+		res = NLM_OFFSET_MAX;
+	else if (offset <= -NLM_OFFSET_MAX)
+		res = -NLM_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+static void nlm_compute_offsets(const struct nlm_lock *lock,
+				u32 *l_offset, u32 *l_len)
+{
+	const struct file_lock *fl = &lock->fl;
+
+	BUG_ON(fl->fl_start > NLM_OFFSET_MAX);
+	BUG_ON(fl->fl_end > NLM_OFFSET_MAX &&
+				fl->fl_end != OFFSET_MAX);
+
+	*l_offset = loff_t_to_s32(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		*l_len = 0;
+	else
+		*l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+	dprintk("lockd: %s prematurely hit the end of our receive buffer. "
+		"Remaining buffer length is %tu words.\n",
+		func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NLMv3 basic data types
+ *
+ * Basic NLMv3 data types are not defined in an IETF standards
+ * document.  X/Open has a description of these data types that
+ * is useful.  See Chapter 10 of "Protocols for Interworking:
+ * XNFS, Version 3W".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+static void encode_bool(struct xdr_stream *xdr, const int value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = value ? xdr_one : xdr_zero;
+}
+
+static void encode_int32(struct xdr_stream *xdr, const s32 value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(value);
+}
+
+/*
+ *	typedef opaque netobj<MAXNETOBJ_SZ>
+ */
+static void encode_netobj(struct xdr_stream *xdr,
+			  const u8 *data, const unsigned int length)
+{
+	__be32 *p;
+
+	BUG_ON(length > XDR_MAX_NETOBJ);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, data, length);
+}
+
+static int decode_netobj(struct xdr_stream *xdr,
+			 struct xdr_netobj *obj)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	if (unlikely(length > XDR_MAX_NETOBJ))
+		goto out_size;
+	obj->len = length;
+	obj->data = (u8 *)p;
+	return 0;
+out_size:
+	dprintk("NFS: returned netobj was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj cookie;
+ */
+static void encode_cookie(struct xdr_stream *xdr,
+			  const struct nlm_cookie *cookie)
+{
+	BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
+	encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
+}
+
+static int decode_cookie(struct xdr_stream *xdr,
+			 struct nlm_cookie *cookie)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	/* apparently HPUX can return empty cookies */
+	if (length == 0)
+		goto out_hpux;
+	if (length > NLM_MAXCOOKIELEN)
+		goto out_size;
+	p = xdr_inline_decode(xdr, length);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	cookie->len = length;
+	memcpy(cookie->data, p, length);
+	return 0;
+out_hpux:
+	cookie->len = 4;
+	memset(cookie->data, 0, 4);
+	return 0;
+out_size:
+	dprintk("NFS: returned cookie was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj fh;
+ */
+static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	BUG_ON(fh->size != NFS2_FHSIZE);
+	encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE);
+}
+
+/*
+ *	enum nlm_stats {
+ *		LCK_GRANTED = 0,
+ *		LCK_DENIED = 1,
+ *		LCK_DENIED_NOLOCKS = 2,
+ *		LCK_BLOCKED = 3,
+ *		LCK_DENIED_GRACE_PERIOD = 4
+ *	};
+ *
+ *
+ *	struct nlm_stat {
+ *		nlm_stats stat;
+ *	};
+ *
+ * NB: we don't swap bytes for the NLM status values.  The upper
+ * layers deal directly with the status value in network byte
+ * order.
+ */
+
+static void encode_nlm_stat(struct xdr_stream *xdr,
+			    const __be32 stat)
+{
+	__be32 *p;
+
+	BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+	p = xdr_reserve_space(xdr, 4);
+	*p = stat;
+}
+
+static int decode_nlm_stat(struct xdr_stream *xdr,
+			   __be32 *stat)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (unlikely(*p > nlm_lck_denied_grace_period))
+		goto out_enum;
+	*stat = *p;
+	return 0;
+out_enum:
+	dprintk("%s: server returned invalid nlm_stats value: %u\n",
+		__func__, be32_to_cpup(p));
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	struct nlm_holder {
+ *		bool exclusive;
+ *		int uppid;
+ *		netobj oh;
+ *		unsigned l_offset;
+ *		unsigned l_len;
+ *	};
+ */
+static void encode_nlm_holder(struct xdr_stream *xdr,
+			      const struct nlm_res *result)
+{
+	const struct nlm_lock *lock = &result->lock;
+	u32 l_offset, l_len;
+	__be32 *p;
+
+	encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+	encode_int32(xdr, lock->svid);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 4);
+	nlm_compute_offsets(lock, &l_offset, &l_len);
+	*p++ = cpu_to_be32(l_offset);
+	*p   = cpu_to_be32(l_len);
+}
+
+static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result)
+{
+	struct nlm_lock *lock = &result->lock;
+	struct file_lock *fl = &lock->fl;
+	u32 exclusive, l_offset, l_len;
+	int error;
+	__be32 *p;
+	s32 end;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(fl);
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	exclusive = be32_to_cpup(p++);
+	lock->svid = be32_to_cpup(p);
+	fl->fl_pid = (pid_t)lock->svid;
+
+	error = decode_netobj(xdr, &lock->oh);
+	if (unlikely(error))
+		goto out;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = exclusive != 0 ? F_WRLCK : F_RDLCK;
+	l_offset = be32_to_cpup(p++);
+	l_len = be32_to_cpup(p);
+	end = l_offset + l_len - 1;
+
+	fl->fl_start = (loff_t)l_offset;
+	if (l_len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = (loff_t)end;
+	error = 0;
+out:
+	return error;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	string caller_name<LM_MAXSTRLEN>;
+ */
+static void encode_caller_name(struct xdr_stream *xdr, const char *name)
+{
+	/* NB: client-side does not set lock->len */
+	u32 length = strlen(name);
+	__be32 *p;
+
+	BUG_ON(length > NLM_MAXSTRLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+/*
+ *	struct nlm_lock {
+ *		string caller_name<LM_MAXSTRLEN>;
+ *		netobj fh;
+ *		netobj oh;
+ *		int uppid;
+ *		unsigned l_offset;
+ *		unsigned l_len;
+ *	};
+ */
+static void encode_nlm_lock(struct xdr_stream *xdr,
+			    const struct nlm_lock *lock)
+{
+	u32 l_offset, l_len;
+	__be32 *p;
+
+	encode_caller_name(xdr, lock->caller);
+	encode_fh(xdr, &lock->fh);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+	*p++ = cpu_to_be32(lock->svid);
+
+	nlm_compute_offsets(lock, &l_offset, &l_len);
+	*p++ = cpu_to_be32(l_offset);
+	*p   = cpu_to_be32(l_len);
+}
+
+
+/*
+ * NLMv3 XDR encode functions
+ *
+ * NLMv3 argument types are defined in Chapter 10 of The Open Group's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	struct nlm_testargs {
+ *		netobj cookie;
+ *		bool exclusive;
+ *		struct nlm_lock alock;
+ *	};
+ */
+static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm_lockargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm_lock alock;
+ *		bool reclaim;
+ *		int state;
+ *	};
+ */
+static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm_lock(xdr, lock);
+	encode_bool(xdr, args->reclaim);
+	encode_int32(xdr, args->state);
+}
+
+/*
+ *	struct nlm_cancargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm_lock alock;
+ *	};
+ */
+static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm_unlockargs {
+ *		netobj cookie;
+ *		struct nlm_lock alock;
+ *	};
+ */
+static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_nlm_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm_res {
+ *		netobj cookie;
+ *		nlm_stat stat;
+ *	};
+ */
+static void nlm_xdr_enc_res(struct rpc_rqst *req,
+			    struct xdr_stream *xdr,
+			    const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm_stat(xdr, result->status);
+}
+
+/*
+ *	union nlm_testrply switch (nlm_stats stat) {
+ *	case LCK_DENIED:
+ *		struct nlm_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm_testres {
+ *		netobj cookie;
+ *		nlm_testrply test_stat;
+ *	};
+ */
+static void encode_nlm_testrply(struct xdr_stream *xdr,
+				const struct nlm_res *result)
+{
+	if (result->status == nlm_lck_denied)
+		encode_nlm_holder(xdr, result);
+}
+
+static void nlm_xdr_enc_testres(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm_stat(xdr, result->status);
+	encode_nlm_testrply(xdr, result);
+}
+
+
+/*
+ * NLMv3 XDR decode functions
+ *
+ * NLMv3 result types are defined in Chapter 10 of The Open Group's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	union nlm_testrply switch (nlm_stats stat) {
+ *	case LCK_DENIED:
+ *		struct nlm_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm_testres {
+ *		netobj cookie;
+ *		nlm_testrply test_stat;
+ *	};
+ */
+static int decode_nlm_testrply(struct xdr_stream *xdr,
+			       struct nlm_res *result)
+{
+	int error;
+
+	error = decode_nlm_stat(xdr, &result->status);
+	if (unlikely(error))
+		goto out;
+	if (result->status == nlm_lck_denied)
+		error = decode_nlm_holder(xdr, result);
+out:
+	return error;
+}
+
+static int nlm_xdr_dec_testres(struct rpc_rqst *req,
+			       struct xdr_stream *xdr,
+			       struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm_testrply(xdr, result);
+out:
+	return error;
+}
+
+/*
+ *	struct nlm_res {
+ *		netobj cookie;
+ *		nlm_stat stat;
+ *	};
+ */
+static int nlm_xdr_dec_res(struct rpc_rqst *req,
+			   struct xdr_stream *xdr,
+			   struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm_stat(xdr, &result->status);
+out:
+	return error;
+}
+
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm_xdr_dec_norep	NULL
+
+#define PROC(proc, argtype, restype)	\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdreproc_t)nlm_xdr_enc_##argtype,		\
+	.p_decode    = (kxdrdproc_t)nlm_xdr_dec_##restype,		\
+	.p_arglen    = NLM_##argtype##_sz,				\
+	.p_replen    = NLM_##restype##_sz,				\
+	.p_statidx   = NLMPROC_##proc,					\
+	.p_name      = #proc,						\
+	}
+
+static struct rpc_procinfo	nlm_procedures[] = {
+	PROC(TEST,		testargs,	testres),
+	PROC(LOCK,		lockargs,	res),
+	PROC(CANCEL,		cancargs,	res),
+	PROC(UNLOCK,		unlockargs,	res),
+	PROC(GRANTED,		testargs,	res),
+	PROC(TEST_MSG,		testargs,	norep),
+	PROC(LOCK_MSG,		lockargs,	norep),
+	PROC(CANCEL_MSG,	cancargs,	norep),
+	PROC(UNLOCK_MSG,	unlockargs,	norep),
+	PROC(GRANTED_MSG,	testargs,	norep),
+	PROC(TEST_RES,		testres,	norep),
+	PROC(LOCK_RES,		res,		norep),
+	PROC(CANCEL_RES,	res,		norep),
+	PROC(UNLOCK_RES,	res,		norep),
+	PROC(GRANTED_RES,	res,		norep),
+};
+
+static struct rpc_version	nlm_version1 = {
+		.number		= 1,
+		.nrprocs	= ARRAY_SIZE(nlm_procedures),
+		.procs		= nlm_procedures,
+};
+
+static struct rpc_version	nlm_version3 = {
+		.number		= 3,
+		.nrprocs	= ARRAY_SIZE(nlm_procedures),
+		.procs		= nlm_procedures,
+};
+
+static struct rpc_version	*nlm_versions[] = {
+	[1] = &nlm_version1,
+	[3] = &nlm_version3,
+#ifdef CONFIG_LOCKD_V4
+	[4] = &nlm_version4,
+#endif
+};
+
+static struct rpc_stat		nlm_rpc_stats;
+
+struct rpc_program		nlm_program = {
+		.name		= "lockd",
+		.number		= NLM_PROGRAM,
+		.nrvers		= ARRAY_SIZE(nlm_versions),
+		.version	= nlm_versions,
+		.stats		= &nlm_rpc_stats,
+};
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index ed0c59f..b7c99bf 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -25,9 +25,22 @@
 #define NLM_HOST_EXPIRE		(300 * HZ)
 #define NLM_HOST_COLLECT	(120 * HZ)
 
-static struct hlist_head	nlm_hosts[NLM_HOST_NRHASH];
+static struct hlist_head	nlm_server_hosts[NLM_HOST_NRHASH];
+static struct hlist_head	nlm_client_hosts[NLM_HOST_NRHASH];
+
+#define for_each_host(host, pos, chain, table) \
+	for ((chain) = (table); \
+	     (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
+		hlist_for_each_entry((host), (pos), (chain), h_hash)
+
+#define for_each_host_safe(host, pos, next, chain, table) \
+	for ((chain) = (table); \
+	     (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
+		hlist_for_each_entry_safe((host), (pos), (next), \
+						(chain), h_hash)
+
 static unsigned long		next_gc;
-static int			nrhosts;
+static unsigned long		nrhosts;
 static DEFINE_MUTEX(nlm_host_mutex);
 
 static void			nlm_gc_hosts(void);
@@ -40,8 +53,6 @@
 	const u32		version;	/* NLM version to search for */
 	const char		*hostname;	/* remote's hostname */
 	const size_t		hostname_len;	/* it's length */
-	const struct sockaddr	*src_sap;	/* our address (optional) */
-	const size_t		src_len;	/* it's length */
 	const int		noresvport;	/* use non-priv port */
 };
 
@@ -88,127 +99,83 @@
 }
 
 /*
- * Common host lookup routine for server & client
+ * Allocate and initialize an nlm_host.  Common to both client and server.
  */
-static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
+static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
+				       struct nsm_handle *nsm)
 {
-	struct hlist_head *chain;
-	struct hlist_node *pos;
-	struct nlm_host	*host;
-	struct nsm_handle *nsm = NULL;
+	struct nlm_host *host = NULL;
+	unsigned long now = jiffies;
 
-	mutex_lock(&nlm_host_mutex);
-
-	if (time_after_eq(jiffies, next_gc))
-		nlm_gc_hosts();
-
-	/* We may keep several nlm_host objects for a peer, because each
-	 * nlm_host is identified by
-	 * (address, protocol, version, server/client)
-	 * We could probably simplify this a little by putting all those
-	 * different NLM rpc_clients into one single nlm_host object.
-	 * This would allow us to have one nlm_host per address.
-	 */
-	chain = &nlm_hosts[nlm_hash_address(ni->sap)];
-	hlist_for_each_entry(host, pos, chain, h_hash) {
-		if (!rpc_cmp_addr(nlm_addr(host), ni->sap))
-			continue;
-
-		/* See if we have an NSM handle for this client */
-		if (!nsm)
-			nsm = host->h_nsmhandle;
-
-		if (host->h_proto != ni->protocol)
-			continue;
-		if (host->h_version != ni->version)
-			continue;
-		if (host->h_server != ni->server)
-			continue;
-		if (ni->server && ni->src_len != 0 &&
-		    !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap))
-			continue;
-
-		/* Move to head of hash chain. */
-		hlist_del(&host->h_hash);
-		hlist_add_head(&host->h_hash, chain);
-
-		nlm_get_host(host);
-		dprintk("lockd: nlm_lookup_host found host %s (%s)\n",
-				host->h_name, host->h_addrbuf);
-		goto out;
-	}
-
-	/*
-	 * The host wasn't in our hash table.  If we don't
-	 * have an NSM handle for it yet, create one.
-	 */
-	if (nsm)
+	if (nsm != NULL)
 		atomic_inc(&nsm->sm_count);
 	else {
 		host = NULL;
 		nsm = nsm_get_handle(ni->sap, ni->salen,
 					ni->hostname, ni->hostname_len);
-		if (!nsm) {
-			dprintk("lockd: nlm_lookup_host failed; "
-				"no nsm handle\n");
+		if (unlikely(nsm == NULL)) {
+			dprintk("lockd: %s failed; no nsm handle\n",
+				__func__);
 			goto out;
 		}
 	}
 
-	host = kzalloc(sizeof(*host), GFP_KERNEL);
-	if (!host) {
+	host = kmalloc(sizeof(*host), GFP_KERNEL);
+	if (unlikely(host == NULL)) {
+		dprintk("lockd: %s failed; no memory\n", __func__);
 		nsm_release(nsm);
-		dprintk("lockd: nlm_lookup_host failed; no memory\n");
 		goto out;
 	}
-	host->h_name	   = nsm->sm_name;
-	host->h_addrbuf    = nsm->sm_addrbuf;
+
 	memcpy(nlm_addr(host), ni->sap, ni->salen);
-	host->h_addrlen = ni->salen;
+	host->h_addrlen    = ni->salen;
 	rpc_set_port(nlm_addr(host), 0);
-	memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len);
-	host->h_srcaddrlen = ni->src_len;
+	host->h_srcaddrlen = 0;
+
+	host->h_rpcclnt    = NULL;
+	host->h_name	   = nsm->sm_name;
 	host->h_version    = ni->version;
 	host->h_proto      = ni->protocol;
-	host->h_rpcclnt    = NULL;
-	mutex_init(&host->h_mutex);
-	host->h_nextrebind = jiffies + NLM_HOST_REBIND;
-	host->h_expires    = jiffies + NLM_HOST_EXPIRE;
-	atomic_set(&host->h_count, 1);
+	host->h_reclaiming = 0;
+	host->h_server     = ni->server;
+	host->h_noresvport = ni->noresvport;
+	host->h_inuse      = 0;
 	init_waitqueue_head(&host->h_gracewait);
 	init_rwsem(&host->h_rwsem);
-	host->h_state      = 0;			/* pseudo NSM state */
-	host->h_nsmstate   = 0;			/* real NSM state */
-	host->h_nsmhandle  = nsm;
-	host->h_server	   = ni->server;
-	host->h_noresvport = ni->noresvport;
-	hlist_add_head(&host->h_hash, chain);
+	host->h_state      = 0;
+	host->h_nsmstate   = 0;
+	host->h_pidcount   = 0;
+	atomic_set(&host->h_count, 1);
+	mutex_init(&host->h_mutex);
+	host->h_nextrebind = now + NLM_HOST_REBIND;
+	host->h_expires    = now + NLM_HOST_EXPIRE;
 	INIT_LIST_HEAD(&host->h_lockowners);
 	spin_lock_init(&host->h_lock);
 	INIT_LIST_HEAD(&host->h_granted);
 	INIT_LIST_HEAD(&host->h_reclaim);
-
-	nrhosts++;
-
-	dprintk("lockd: nlm_lookup_host created host %s\n",
-			host->h_name);
+	host->h_nsmhandle  = nsm;
+	host->h_addrbuf    = nsm->sm_addrbuf;
 
 out:
-	mutex_unlock(&nlm_host_mutex);
 	return host;
 }
 
 /*
- * Destroy a host
+ * Destroy an nlm_host and free associated resources
+ *
+ * Caller must hold nlm_host_mutex.
  */
-static void
-nlm_destroy_host(struct nlm_host *host)
+static void nlm_destroy_host_locked(struct nlm_host *host)
 {
 	struct rpc_clnt	*clnt;
 
+	dprintk("lockd: destroy host %s\n", host->h_name);
+
 	BUG_ON(!list_empty(&host->h_lockowners));
 	BUG_ON(atomic_read(&host->h_count));
 
+	hlist_del_init(&host->h_hash);
+
 	nsm_unmonitor(host);
 	nsm_release(host->h_nsmhandle);
 
@@ -216,6 +183,8 @@
 	if (clnt != NULL)
 		rpc_shutdown_client(clnt);
 	kfree(host);
+
+	nrhosts--;
 }
 
 /**
@@ -249,12 +218,76 @@
 		.hostname_len	= strlen(hostname),
 		.noresvport	= noresvport,
 	};
+	struct hlist_head *chain;
+	struct hlist_node *pos;
+	struct nlm_host	*host;
+	struct nsm_handle *nsm = NULL;
 
 	dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__,
 			(hostname ? hostname : "<none>"), version,
 			(protocol == IPPROTO_UDP ? "udp" : "tcp"));
 
-	return nlm_lookup_host(&ni);
+	mutex_lock(&nlm_host_mutex);
+
+	chain = &nlm_client_hosts[nlm_hash_address(sap)];
+	hlist_for_each_entry(host, pos, chain, h_hash) {
+		if (!rpc_cmp_addr(nlm_addr(host), sap))
+			continue;
+
+		/* Same address. Share an NSM handle if we already have one */
+		if (nsm == NULL)
+			nsm = host->h_nsmhandle;
+
+		if (host->h_proto != protocol)
+			continue;
+		if (host->h_version != version)
+			continue;
+
+		nlm_get_host(host);
+		dprintk("lockd: %s found host %s (%s)\n", __func__,
+			host->h_name, host->h_addrbuf);
+		goto out;
+	}
+
+	host = nlm_alloc_host(&ni, nsm);
+	if (unlikely(host == NULL))
+		goto out;
+
+	hlist_add_head(&host->h_hash, chain);
+	nrhosts++;
+
+	dprintk("lockd: %s created host %s (%s)\n", __func__,
+		host->h_name, host->h_addrbuf);
+
+out:
+	mutex_unlock(&nlm_host_mutex);
+	return host;
+}
+
+/**
+ * nlmclnt_release_host - release client nlm_host
+ * @host: nlm_host to release
+ *
+ */
+void nlmclnt_release_host(struct nlm_host *host)
+{
+	if (host == NULL)
+		return;
+
+	dprintk("lockd: release client host %s\n", host->h_name);
+
+	BUG_ON(atomic_read(&host->h_count) < 0);
+	BUG_ON(host->h_server);
+
+	if (atomic_dec_and_test(&host->h_count)) {
+		BUG_ON(!list_empty(&host->h_lockowners));
+		BUG_ON(!list_empty(&host->h_granted));
+		BUG_ON(!list_empty(&host->h_reclaim));
+
+		mutex_lock(&nlm_host_mutex);
+		nlm_destroy_host_locked(host);
+		mutex_unlock(&nlm_host_mutex);
+	}
 }
 
 /**
@@ -279,12 +312,18 @@
 				    const char *hostname,
 				    const size_t hostname_len)
 {
+	struct hlist_head *chain;
+	struct hlist_node *pos;
+	struct nlm_host	*host = NULL;
+	struct nsm_handle *nsm = NULL;
 	struct sockaddr_in sin = {
 		.sin_family	= AF_INET,
 	};
 	struct sockaddr_in6 sin6 = {
 		.sin6_family	= AF_INET6,
 	};
+	struct sockaddr *src_sap;
+	size_t src_len = rqstp->rq_addrlen;
 	struct nlm_lookup_host_info ni = {
 		.server		= 1,
 		.sap		= svc_addr(rqstp),
@@ -293,27 +332,91 @@
 		.version	= rqstp->rq_vers,
 		.hostname	= hostname,
 		.hostname_len	= hostname_len,
-		.src_len	= rqstp->rq_addrlen,
 	};
 
 	dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
 			(int)hostname_len, hostname, rqstp->rq_vers,
 			(rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
 
+	mutex_lock(&nlm_host_mutex);
+
 	switch (ni.sap->sa_family) {
 	case AF_INET:
 		sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr;
-		ni.src_sap = (struct sockaddr *)&sin;
+		src_sap = (struct sockaddr *)&sin;
 		break;
 	case AF_INET6:
 		ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6);
-		ni.src_sap = (struct sockaddr *)&sin6;
+		src_sap = (struct sockaddr *)&sin6;
 		break;
 	default:
-		return NULL;
+		dprintk("lockd: %s failed; unrecognized address family\n",
+			__func__);
+		goto out;
 	}
 
-	return nlm_lookup_host(&ni);
+	if (time_after_eq(jiffies, next_gc))
+		nlm_gc_hosts();
+
+	chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
+	hlist_for_each_entry(host, pos, chain, h_hash) {
+		if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
+			continue;
+
+		/* Same address. Share an NSM handle if we already have one */
+		if (nsm == NULL)
+			nsm = host->h_nsmhandle;
+
+		if (host->h_proto != ni.protocol)
+			continue;
+		if (host->h_version != ni.version)
+			continue;
+		if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap))
+			continue;
+
+		/* Move to head of hash chain. */
+		hlist_del(&host->h_hash);
+		hlist_add_head(&host->h_hash, chain);
+
+		nlm_get_host(host);
+		dprintk("lockd: %s found host %s (%s)\n",
+			__func__, host->h_name, host->h_addrbuf);
+		goto out;
+	}
+
+	host = nlm_alloc_host(&ni, nsm);
+	if (unlikely(host == NULL))
+		goto out;
+
+	memcpy(nlm_srcaddr(host), src_sap, src_len);
+	host->h_srcaddrlen = src_len;
+	hlist_add_head(&host->h_hash, chain);
+	nrhosts++;
+
+	dprintk("lockd: %s created host %s (%s)\n",
+		__func__, host->h_name, host->h_addrbuf);
+
+out:
+	mutex_unlock(&nlm_host_mutex);
+	return host;
+}
+
+/**
+ * nlmsvc_release_host - release server nlm_host
+ * @host: nlm_host to release
+ *
+ * Host is destroyed later in nlm_gc_host().
+ */
+void nlmsvc_release_host(struct nlm_host *host)
+{
+	if (host == NULL)
+		return;
+
+	dprintk("lockd: release server host %s\n", host->h_name);
+
+	BUG_ON(atomic_read(&host->h_count) < 0);
+	BUG_ON(!host->h_server);
+	atomic_dec(&host->h_count);
 }
 
 /*
@@ -413,20 +516,29 @@
 	return host;
 }
 
-/*
- * Release NLM host after use
- */
-void nlm_release_host(struct nlm_host *host)
+static struct nlm_host *next_host_state(struct hlist_head *cache,
+					struct nsm_handle *nsm,
+					const struct nlm_reboot *info)
 {
-	if (host != NULL) {
-		dprintk("lockd: release host %s\n", host->h_name);
-		BUG_ON(atomic_read(&host->h_count) < 0);
-		if (atomic_dec_and_test(&host->h_count)) {
-			BUG_ON(!list_empty(&host->h_lockowners));
-			BUG_ON(!list_empty(&host->h_granted));
-			BUG_ON(!list_empty(&host->h_reclaim));
+	struct nlm_host *host;
+	struct hlist_head *chain;
+	struct hlist_node *pos;
+
+	mutex_lock(&nlm_host_mutex);
+	for_each_host(host, pos, chain, cache) {
+		if (host->h_nsmhandle == nsm
+		    && host->h_nsmstate != info->state) {
+			host->h_nsmstate = info->state;
+			host->h_state++;
+
+			nlm_get_host(host);
+			mutex_unlock(&nlm_host_mutex);
+			return host;
 		}
 	}
+
+	mutex_unlock(&nlm_host_mutex);
+	return NULL;
 }
 
 /**
@@ -438,8 +550,6 @@
  */
 void nlm_host_rebooted(const struct nlm_reboot *info)
 {
-	struct hlist_head *chain;
-	struct hlist_node *pos;
 	struct nsm_handle *nsm;
 	struct nlm_host	*host;
 
@@ -452,32 +562,15 @@
 	 * lock for this.
 	 * To avoid processing a host several times, we match the nsmstate.
 	 */
-again:	mutex_lock(&nlm_host_mutex);
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry(host, pos, chain, h_hash) {
-			if (host->h_nsmhandle == nsm
-			 && host->h_nsmstate != info->state) {
-				host->h_nsmstate = info->state;
-				host->h_state++;
-
-				nlm_get_host(host);
-				mutex_unlock(&nlm_host_mutex);
-
-				if (host->h_server) {
-					/* We're server for this guy, just ditch
-					 * all the locks he held. */
-					nlmsvc_free_host_resources(host);
-				} else {
-					/* He's the server, initiate lock recovery. */
-					nlmclnt_recovery(host);
-				}
-
-				nlm_release_host(host);
-				goto again;
-			}
-		}
+	while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) {
+		nlmsvc_free_host_resources(host);
+		nlmsvc_release_host(host);
 	}
-	mutex_unlock(&nlm_host_mutex);
+	while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) {
+		nlmclnt_recovery(host);
+		nlmclnt_release_host(host);
+	}
+
 	nsm_release(nsm);
 }
 
@@ -497,13 +590,11 @@
 
 	/* First, make all hosts eligible for gc */
 	dprintk("lockd: nuking all hosts...\n");
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry(host, pos, chain, h_hash) {
-			host->h_expires = jiffies - 1;
-			if (host->h_rpcclnt) {
-				rpc_shutdown_client(host->h_rpcclnt);
-				host->h_rpcclnt = NULL;
-			}
+	for_each_host(host, pos, chain, nlm_server_hosts) {
+		host->h_expires = jiffies - 1;
+		if (host->h_rpcclnt) {
+			rpc_shutdown_client(host->h_rpcclnt);
+			host->h_rpcclnt = NULL;
 		}
 	}
 
@@ -512,15 +603,13 @@
 	mutex_unlock(&nlm_host_mutex);
 
 	/* complain if any hosts are left */
-	if (nrhosts) {
+	if (nrhosts != 0) {
 		printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
-		dprintk("lockd: %d hosts left:\n", nrhosts);
-		for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-			hlist_for_each_entry(host, pos, chain, h_hash) {
-				dprintk("       %s (cnt %d use %d exp %ld)\n",
-					host->h_name, atomic_read(&host->h_count),
-					host->h_inuse, host->h_expires);
-			}
+		dprintk("lockd: %lu hosts left:\n", nrhosts);
+		for_each_host(host, pos, chain, nlm_server_hosts) {
+			dprintk("       %s (cnt %d use %d exp %ld)\n",
+				host->h_name, atomic_read(&host->h_count),
+				host->h_inuse, host->h_expires);
 		}
 	}
 }
@@ -538,29 +627,22 @@
 	struct nlm_host	*host;
 
 	dprintk("lockd: host garbage collection\n");
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry(host, pos, chain, h_hash)
-			host->h_inuse = 0;
-	}
+	for_each_host(host, pos, chain, nlm_server_hosts)
+		host->h_inuse = 0;
 
 	/* Mark all hosts that hold locks, blocks or shares */
 	nlmsvc_mark_resources();
 
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
-			if (atomic_read(&host->h_count) || host->h_inuse
-			 || time_before(jiffies, host->h_expires)) {
-				dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
-					host->h_name, atomic_read(&host->h_count),
-					host->h_inuse, host->h_expires);
-				continue;
-			}
-			dprintk("lockd: delete host %s\n", host->h_name);
-			hlist_del_init(&host->h_hash);
-
-			nlm_destroy_host(host);
-			nrhosts--;
+	for_each_host_safe(host, pos, next, chain, nlm_server_hosts) {
+		if (atomic_read(&host->h_count) || host->h_inuse
+		 || time_before(jiffies, host->h_expires)) {
+			dprintk("nlm_gc_hosts skipping %s "
+				"(cnt %d use %d exp %ld)\n",
+				host->h_name, atomic_read(&host->h_count),
+				host->h_inuse, host->h_expires);
+			continue;
 		}
+		nlm_destroy_host_locked(host);
 	}
 
 	next_gc = jiffies + NLM_HOST_COLLECT;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index e0c9189..23d7451 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -401,26 +401,22 @@
  * Status Monitor wire protocol.
  */
 
-static int encode_nsm_string(struct xdr_stream *xdr, const char *string)
+static void encode_nsm_string(struct xdr_stream *xdr, const char *string)
 {
 	const u32 len = strlen(string);
 	__be32 *p;
 
-	if (unlikely(len > SM_MAXSTRLEN))
-		return -EIO;
-	p = xdr_reserve_space(xdr, sizeof(u32) + len);
-	if (unlikely(p == NULL))
-		return -EIO;
+	BUG_ON(len > SM_MAXSTRLEN);
+	p = xdr_reserve_space(xdr, 4 + len);
 	xdr_encode_opaque(p, string, len);
-	return 0;
 }
 
 /*
  * "mon_name" specifies the host to be monitored.
  */
-static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	return encode_nsm_string(xdr, argp->mon_name);
+	encode_nsm_string(xdr, argp->mon_name);
 }
 
 /*
@@ -429,35 +425,25 @@
  * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name"
  * has changed.
  */
-static int encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	int status;
 	__be32 *p;
 
-	status = encode_nsm_string(xdr, utsname()->nodename);
-	if (unlikely(status != 0))
-		return status;
-	p = xdr_reserve_space(xdr, 3 * sizeof(u32));
-	if (unlikely(p == NULL))
-		return -EIO;
-	*p++ = htonl(argp->prog);
-	*p++ = htonl(argp->vers);
-	*p++ = htonl(argp->proc);
-	return 0;
+	encode_nsm_string(xdr, utsname()->nodename);
+	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+	*p++ = cpu_to_be32(argp->prog);
+	*p++ = cpu_to_be32(argp->vers);
+	*p = cpu_to_be32(argp->proc);
 }
 
 /*
  * The "mon_id" argument specifies the non-private arguments
  * of an NSMPROC_MON or NSMPROC_UNMON call.
  */
-static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	int status;
-
-	status = encode_mon_name(xdr, argp);
-	if (unlikely(status != 0))
-		return status;
-	return encode_my_id(xdr, argp);
+	encode_mon_name(xdr, argp);
+	encode_my_id(xdr, argp);
 }
 
 /*
@@ -465,68 +451,56 @@
  * by the NSMPROC_MON call. This information will be supplied in the
  * NLMPROC_SM_NOTIFY call.
  */
-static int encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
 	__be32 *p;
 
 	p = xdr_reserve_space(xdr, SM_PRIV_SIZE);
+	xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
+}
+
+static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr,
+			    const struct nsm_args *argp)
+{
+	encode_mon_id(xdr, argp);
+	encode_priv(xdr, argp);
+}
+
+static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      const struct nsm_args *argp)
+{
+	encode_mon_id(xdr, argp);
+}
+
+static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
+				struct xdr_stream *xdr,
+				struct nsm_res *resp)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
+	resp->status = be32_to_cpup(p++);
+	resp->state = be32_to_cpup(p);
+
+	dprintk("lockd: %s status %d state %d\n",
+		__func__, resp->status, resp->state);
 	return 0;
 }
 
-static int xdr_enc_mon(struct rpc_rqst *req, __be32 *p,
-		       const struct nsm_args *argp)
-{
-	struct xdr_stream xdr;
-	int status;
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	status = encode_mon_id(&xdr, argp);
-	if (unlikely(status))
-		return status;
-	return encode_priv(&xdr, argp);
-}
-
-static int xdr_enc_unmon(struct rpc_rqst *req, __be32 *p,
-			 const struct nsm_args *argp)
-{
-	struct xdr_stream xdr;
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	return encode_mon_id(&xdr, argp);
-}
-
-static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p,
+static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
+			    struct xdr_stream *xdr,
 			    struct nsm_res *resp)
 {
-	struct xdr_stream xdr;
+	__be32 *p;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	p = xdr_inline_decode(&xdr, 2 * sizeof(u32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	resp->status = ntohl(*p++);
-	resp->state = ntohl(*p);
+	resp->state = be32_to_cpup(p);
 
-	dprintk("lockd: xdr_dec_stat_res status %d state %d\n",
-			resp->status, resp->state);
-	return 0;
-}
-
-static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p,
-			struct nsm_res *resp)
-{
-	struct xdr_stream xdr;
-
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	p = xdr_inline_decode(&xdr, sizeof(u32));
-	if (unlikely(p == NULL))
-		return -EIO;
-	resp->state = ntohl(*p);
-
-	dprintk("lockd: xdr_dec_stat state %d\n", resp->state);
+	dprintk("lockd: %s state %d\n", __func__, resp->state);
 	return 0;
 }
 
@@ -542,8 +516,8 @@
 static struct rpc_procinfo	nsm_procedures[] = {
 [NSMPROC_MON] = {
 		.p_proc		= NSMPROC_MON,
-		.p_encode	= (kxdrproc_t)xdr_enc_mon,
-		.p_decode	= (kxdrproc_t)xdr_dec_stat_res,
+		.p_encode	= (kxdreproc_t)nsm_xdr_enc_mon,
+		.p_decode	= (kxdrdproc_t)nsm_xdr_dec_stat_res,
 		.p_arglen	= SM_mon_sz,
 		.p_replen	= SM_monres_sz,
 		.p_statidx	= NSMPROC_MON,
@@ -551,8 +525,8 @@
 	},
 [NSMPROC_UNMON] = {
 		.p_proc		= NSMPROC_UNMON,
-		.p_encode	= (kxdrproc_t)xdr_enc_unmon,
-		.p_decode	= (kxdrproc_t)xdr_dec_stat,
+		.p_encode	= (kxdreproc_t)nsm_xdr_enc_unmon,
+		.p_decode	= (kxdrdproc_t)nsm_xdr_dec_stat,
 		.p_arglen	= SM_mon_id_sz,
 		.p_replen	= SM_unmonres_sz,
 		.p_statidx	= NSMPROC_UNMON,
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 38d2611..9a41fdc 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -51,7 +51,7 @@
 	return 0;
 
 no_locks:
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
  	if (error)
 		return error;	
 	return nlm_lck_denied_nolocks;
@@ -92,7 +92,7 @@
 	else
 		dprintk("lockd: TEST4        status %d\n", ntohl(resp->status));
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -134,7 +134,7 @@
 	else
 		dprintk("lockd: LOCK         status %d\n", ntohl(resp->status));
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -164,7 +164,7 @@
 	resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
 
 	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -197,7 +197,7 @@
 	resp->status = nlmsvc_unlock(file, &argp->lock);
 
 	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -229,7 +229,7 @@
 
 static void nlm4svc_callback_release(void *data)
 {
-	nlm_release_call(data);
+	nlmsvc_release_call(data);
 }
 
 static const struct rpc_call_ops nlm4svc_callback_ops = {
@@ -261,7 +261,7 @@
 
 	stat = func(rqstp, argp, &call->a_res);
 	if (stat != 0) {
-		nlm_release_call(call);
+		nlmsvc_release_call(call);
 		return stat;
 	}
 
@@ -334,7 +334,7 @@
 	resp->status = nlmsvc_share_file(host, file, argp);
 
 	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -367,7 +367,7 @@
 	resp->status = nlmsvc_unshare_file(host, file, argp);
 
 	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -399,7 +399,7 @@
 		return rpc_success;
 
 	nlmsvc_free_host_resources(host);
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	return rpc_success;
 }
 
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index ef5659b..6e31695 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -46,6 +46,7 @@
 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
 static const struct rpc_call_ops nlmsvc_grant_ops;
+static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie);
 
 /*
  * The list of blocked locks to retry
@@ -233,7 +234,7 @@
 failed_free:
 	kfree(block);
 failed:
-	nlm_release_call(call);
+	nlmsvc_release_call(call);
 	return NULL;
 }
 
@@ -266,7 +267,7 @@
 	mutex_unlock(&file->f_mutex);
 
 	nlmsvc_freegrantargs(block->b_call);
-	nlm_release_call(block->b_call);
+	nlmsvc_release_call(block->b_call);
 	nlm_release_file(block->b_file);
 	kfree(block->b_fl);
 	kfree(block);
@@ -934,3 +935,32 @@
 
 	return timeout;
 }
+
+#ifdef RPC_DEBUG
+static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
+{
+	/*
+	 * We can get away with a static buffer because we're only
+	 * called with BKL held.
+	 */
+	static char buf[2*NLM_MAXCOOKIELEN+1];
+	unsigned int i, len = sizeof(buf);
+	char *p = buf;
+
+	len--;	/* allow for trailing \0 */
+	if (len < 3)
+		return "???";
+	for (i = 0 ; i < cookie->len ; i++) {
+		if (len < 2) {
+			strcpy(p-3, "...");
+			break;
+		}
+		sprintf(p, "%02x", cookie->data[i]);
+		p += 2;
+		len -= 2;
+	}
+	*p = '\0';
+
+	return buf;
+}
+#endif
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 0caea53..d27aab1 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -80,7 +80,7 @@
 	return 0;
 
 no_locks:
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	if (error)
 		return error;
 	return nlm_lck_denied_nolocks;
@@ -122,7 +122,7 @@
 		dprintk("lockd: TEST          status %d vers %d\n",
 			ntohl(resp->status), rqstp->rq_vers);
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -164,7 +164,7 @@
 	else
 		dprintk("lockd: LOCK         status %d\n", ntohl(resp->status));
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -194,7 +194,7 @@
 	resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
 
 	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -227,7 +227,7 @@
 	resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
 
 	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -257,9 +257,17 @@
 			-task->tk_status);
 }
 
+void nlmsvc_release_call(struct nlm_rqst *call)
+{
+	if (!atomic_dec_and_test(&call->a_count))
+		return;
+	nlmsvc_release_host(call->a_host);
+	kfree(call);
+}
+
 static void nlmsvc_callback_release(void *data)
 {
-	nlm_release_call(data);
+	nlmsvc_release_call(data);
 }
 
 static const struct rpc_call_ops nlmsvc_callback_ops = {
@@ -291,7 +299,7 @@
 
 	stat = func(rqstp, argp, &call->a_res);
 	if (stat != 0) {
-		nlm_release_call(call);
+		nlmsvc_release_call(call);
 		return stat;
 	}
 
@@ -366,7 +374,7 @@
 	resp->status = cast_status(nlmsvc_share_file(host, file, argp));
 
 	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -399,7 +407,7 @@
 	resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
 
 	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -431,7 +439,7 @@
 		return rpc_success;
 
 	nlmsvc_free_host_resources(host);
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	return rpc_success;
 }
 
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index b583ab0..964666c 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -149,37 +149,6 @@
 }
 
 /*
- * Encode a lock as part of an NLM call
- */
-static __be32 *
-nlm_encode_lock(__be32 *p, struct nlm_lock *lock)
-{
-	struct file_lock	*fl = &lock->fl;
-	__s32			start, len;
-
-	if (!(p = xdr_encode_string(p, lock->caller))
-	 || !(p = nlm_encode_fh(p, &lock->fh))
-	 || !(p = nlm_encode_oh(p, &lock->oh)))
-		return NULL;
-
-	if (fl->fl_start > NLM_OFFSET_MAX
-	 || (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
-		return NULL;
-
-	start = loff_t_to_s32(fl->fl_start);
-	if (fl->fl_end == OFFSET_MAX)
-		len = 0;
-	else
-		len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
-
-	*p++ = htonl(lock->svid);
-	*p++ = htonl(start);
-	*p++ = htonl(len);
-
-	return p;
-}
-
-/*
  * Encode result of a TEST/TEST_MSG call
  */
 static __be32 *
@@ -372,259 +341,3 @@
 {
 	return xdr_ressize_check(rqstp, p);
 }
-
-/*
- * Now, the client side XDR functions
- */
-#ifdef NLMCLNT_SUPPORT_SHARES
-static int
-nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
-{
-	return 0;
-}
-#endif
-
-static int
-nlmclt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	if (resp->status == nlm_lck_denied) {
-		struct file_lock	*fl = &resp->lock.fl;
-		u32			excl;
-		s32			start, len, end;
-
-		memset(&resp->lock, 0, sizeof(resp->lock));
-		locks_init_lock(fl);
-		excl = ntohl(*p++);
-		resp->lock.svid = ntohl(*p++);
-		fl->fl_pid = (pid_t)resp->lock.svid;
-		if (!(p = nlm_decode_oh(p, &resp->lock.oh)))
-			return -EIO;
-
-		fl->fl_flags = FL_POSIX;
-		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
-		start = ntohl(*p++);
-		len = ntohl(*p++);
-		end = start + len - 1;
-
-		fl->fl_start = s32_to_loff_t(start);
-		if (len == 0 || end < 0)
-			fl->fl_end = OFFSET_MAX;
-		else
-			fl->fl_end = s32_to_loff_t(end);
-	}
-	return 0;
-}
-
-
-static int
-nlmclt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	*p++ = argp->reclaim? xdr_one : xdr_zero;
-	*p++ = htonl(argp->state);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
-		return -EIO;
-	*p++ = resp->status;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_encode_testres(p, resp)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	return 0;
-}
-
-#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
-#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
-#endif
-
-/*
- * Buffer requirements for NLM
- */
-#define NLM_void_sz		0
-#define NLM_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
-#define NLM_caller_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM_owner_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM_fhandle_sz		1+XDR_QUADLEN(NFS2_FHSIZE)
-#define NLM_lock_sz		3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz
-#define NLM_holder_sz		4+NLM_owner_sz
-
-#define NLM_testargs_sz		NLM_cookie_sz+1+NLM_lock_sz
-#define NLM_lockargs_sz		NLM_cookie_sz+4+NLM_lock_sz
-#define NLM_cancargs_sz		NLM_cookie_sz+2+NLM_lock_sz
-#define NLM_unlockargs_sz	NLM_cookie_sz+NLM_lock_sz
-
-#define NLM_testres_sz		NLM_cookie_sz+1+NLM_holder_sz
-#define NLM_res_sz		NLM_cookie_sz+1
-#define NLM_norep_sz		0
-
-/*
- * For NLM, a void procedure really returns nothing
- */
-#define nlmclt_decode_norep	NULL
-
-#define PROC(proc, argtype, restype)	\
-[NLMPROC_##proc] = {							\
-	.p_proc      = NLMPROC_##proc,					\
-	.p_encode    = (kxdrproc_t) nlmclt_encode_##argtype,		\
-	.p_decode    = (kxdrproc_t) nlmclt_decode_##restype,		\
-	.p_arglen    = NLM_##argtype##_sz,				\
-	.p_replen    = NLM_##restype##_sz,				\
-	.p_statidx   = NLMPROC_##proc,					\
-	.p_name      = #proc,						\
-	}
-
-static struct rpc_procinfo	nlm_procedures[] = {
-    PROC(TEST,		testargs,	testres),
-    PROC(LOCK,		lockargs,	res),
-    PROC(CANCEL,	cancargs,	res),
-    PROC(UNLOCK,	unlockargs,	res),
-    PROC(GRANTED,	testargs,	res),
-    PROC(TEST_MSG,	testargs,	norep),
-    PROC(LOCK_MSG,	lockargs,	norep),
-    PROC(CANCEL_MSG,	cancargs,	norep),
-    PROC(UNLOCK_MSG,	unlockargs,	norep),
-    PROC(GRANTED_MSG,	testargs,	norep),
-    PROC(TEST_RES,	testres,	norep),
-    PROC(LOCK_RES,	res,		norep),
-    PROC(CANCEL_RES,	res,		norep),
-    PROC(UNLOCK_RES,	res,		norep),
-    PROC(GRANTED_RES,	res,		norep),
-#ifdef NLMCLNT_SUPPORT_SHARES
-    PROC(SHARE,		shareargs,	shareres),
-    PROC(UNSHARE,	shareargs,	shareres),
-    PROC(NM_LOCK,	lockargs,	res),
-    PROC(FREE_ALL,	notify,		void),
-#endif
-};
-
-static struct rpc_version	nlm_version1 = {
-		.number		= 1,
-		.nrprocs	= 16,
-		.procs		= nlm_procedures,
-};
-
-static struct rpc_version	nlm_version3 = {
-		.number		= 3,
-		.nrprocs	= 24,
-		.procs		= nlm_procedures,
-};
-
-static struct rpc_version *	nlm_versions[] = {
-	[1] = &nlm_version1,
-	[3] = &nlm_version3,
-#ifdef 	CONFIG_LOCKD_V4
-	[4] = &nlm_version4,
-#endif
-};
-
-static struct rpc_stat		nlm_stats;
-
-struct rpc_program		nlm_program = {
-		.name		= "lockd",
-		.number		= NLM_PROGRAM,
-		.nrvers		= ARRAY_SIZE(nlm_versions),
-		.version	= nlm_versions,
-		.stats		= &nlm_stats,
-};
-
-#ifdef RPC_DEBUG
-const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
-{
-	/*
-	 * We can get away with a static buffer because we're only
-	 * called with BKL held.
-	 */
-	static char buf[2*NLM_MAXCOOKIELEN+1];
-	unsigned int i, len = sizeof(buf);
-	char *p = buf;
-
-	len--;	/* allow for trailing \0 */
-	if (len < 3)
-		return "???";
-	for (i = 0 ; i < cookie->len ; i++) {
-		if (len < 2) {
-			strcpy(p-3, "...");
-			break;
-		}
-		sprintf(p, "%02x", cookie->data[i]);
-		p += 2;
-		len -= 2;
-	}
-	*p = '\0';
-
-	return buf;
-}
-#endif
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index ad9dbbc..dfa4789 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -93,15 +93,6 @@
 	return p + XDR_QUADLEN(f->size);
 }
 
-static __be32 *
-nlm4_encode_fh(__be32 *p, struct nfs_fh *f)
-{
-	*p++ = htonl(f->size);
-	if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */
-	memcpy(p, f->data, f->size);
-	return p + XDR_QUADLEN(f->size);
-}
-
 /*
  * Encode and decode owner handle
  */
@@ -112,12 +103,6 @@
 }
 
 static __be32 *
-nlm4_encode_oh(__be32 *p, struct xdr_netobj *oh)
-{
-	return xdr_encode_netobj(p, oh);
-}
-
-static __be32 *
 nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
 {
 	struct file_lock	*fl = &lock->fl;
@@ -150,38 +135,6 @@
 }
 
 /*
- * Encode a lock as part of an NLM call
- */
-static __be32 *
-nlm4_encode_lock(__be32 *p, struct nlm_lock *lock)
-{
-	struct file_lock	*fl = &lock->fl;
-	__s64			start, len;
-
-	if (!(p = xdr_encode_string(p, lock->caller))
-	 || !(p = nlm4_encode_fh(p, &lock->fh))
-	 || !(p = nlm4_encode_oh(p, &lock->oh)))
-		return NULL;
-
-	if (fl->fl_start > NLM4_OFFSET_MAX
-	 || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
-		return NULL;
-
-	*p++ = htonl(lock->svid);
-
-	start = loff_t_to_s64(fl->fl_start);
-	if (fl->fl_end == OFFSET_MAX)
-		len = 0;
-	else
-		len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
-
-	p = xdr_encode_hyper(p, start);
-	p = xdr_encode_hyper(p, len);
-
-	return p;
-}
-
-/*
  * Encode result of a TEST/TEST_MSG call
  */
 static __be32 *
@@ -379,211 +332,3 @@
 {
 	return xdr_ressize_check(rqstp, p);
 }
-
-/*
- * Now, the client side XDR functions
- */
-#ifdef NLMCLNT_SUPPORT_SHARES
-static int
-nlm4clt_decode_void(struct rpc_rqst *req, __be32 *p, void *ptr)
-{
-	return 0;
-}
-#endif
-
-static int
-nlm4clt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	if (resp->status == nlm_lck_denied) {
-		struct file_lock	*fl = &resp->lock.fl;
-		u32			excl;
-		__u64			start, len;
-		__s64			end;
-
-		memset(&resp->lock, 0, sizeof(resp->lock));
-		locks_init_lock(fl);
-		excl = ntohl(*p++);
-		resp->lock.svid = ntohl(*p++);
-		fl->fl_pid = (pid_t)resp->lock.svid;
-		if (!(p = nlm4_decode_oh(p, &resp->lock.oh)))
-			return -EIO;
-
-		fl->fl_flags = FL_POSIX;
-		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
-		p = xdr_decode_hyper(p, &start);
-		p = xdr_decode_hyper(p, &len);
-		end = start + len - 1;
-
-		fl->fl_start = s64_to_loff_t(start);
-		if (len == 0 || end < 0)
-			fl->fl_end = OFFSET_MAX;
-		else
-			fl->fl_end = s64_to_loff_t(end);
-	}
-	return 0;
-}
-
-
-static int
-nlm4clt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	*p++ = argp->reclaim? xdr_one : xdr_zero;
-	*p++ = htonl(argp->state);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
-		return -EIO;
-	*p++ = resp->status;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_encode_testres(p, resp)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	return 0;
-}
-
-#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
-#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
-#endif
-
-#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
-#  error "NLM host name cannot be larger than NLM's maximum string length!"
-#endif
-
-/*
- * Buffer requirements for NLM
- */
-#define NLM4_void_sz		0
-#define NLM4_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
-#define NLM4_caller_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM4_owner_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM4_fhandle_sz		1+XDR_QUADLEN(NFS3_FHSIZE)
-#define NLM4_lock_sz		5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz
-#define NLM4_holder_sz		6+NLM4_owner_sz
-
-#define NLM4_testargs_sz	NLM4_cookie_sz+1+NLM4_lock_sz
-#define NLM4_lockargs_sz	NLM4_cookie_sz+4+NLM4_lock_sz
-#define NLM4_cancargs_sz	NLM4_cookie_sz+2+NLM4_lock_sz
-#define NLM4_unlockargs_sz	NLM4_cookie_sz+NLM4_lock_sz
-
-#define NLM4_testres_sz		NLM4_cookie_sz+1+NLM4_holder_sz
-#define NLM4_res_sz		NLM4_cookie_sz+1
-#define NLM4_norep_sz		0
-
-/*
- * For NLM, a void procedure really returns nothing
- */
-#define nlm4clt_decode_norep	NULL
-
-#define PROC(proc, argtype, restype)					\
-[NLMPROC_##proc] = {							\
-	.p_proc      = NLMPROC_##proc,					\
-	.p_encode    = (kxdrproc_t) nlm4clt_encode_##argtype,		\
-	.p_decode    = (kxdrproc_t) nlm4clt_decode_##restype,		\
-	.p_arglen    = NLM4_##argtype##_sz,				\
-	.p_replen    = NLM4_##restype##_sz,				\
-	.p_statidx   = NLMPROC_##proc,					\
-	.p_name      = #proc,						\
-	}
-
-static struct rpc_procinfo	nlm4_procedures[] = {
-    PROC(TEST,		testargs,	testres),
-    PROC(LOCK,		lockargs,	res),
-    PROC(CANCEL,	cancargs,	res),
-    PROC(UNLOCK,	unlockargs,	res),
-    PROC(GRANTED,	testargs,	res),
-    PROC(TEST_MSG,	testargs,	norep),
-    PROC(LOCK_MSG,	lockargs,	norep),
-    PROC(CANCEL_MSG,	cancargs,	norep),
-    PROC(UNLOCK_MSG,	unlockargs,	norep),
-    PROC(GRANTED_MSG,	testargs,	norep),
-    PROC(TEST_RES,	testres,	norep),
-    PROC(LOCK_RES,	res,		norep),
-    PROC(CANCEL_RES,	res,		norep),
-    PROC(UNLOCK_RES,	res,		norep),
-    PROC(GRANTED_RES,	res,		norep),
-#ifdef NLMCLNT_SUPPORT_SHARES
-    PROC(SHARE,		shareargs,	shareres),
-    PROC(UNSHARE,	shareargs,	shareres),
-    PROC(NM_LOCK,	lockargs,	res),
-    PROC(FREE_ALL,	notify,		void),
-#endif
-};
-
-struct rpc_version	nlm_version4 = {
-	.number		= 4,
-	.nrprocs	= 24,
-	.procs		= nlm4_procedures,
-};
diff --git a/fs/locks.c b/fs/locks.c
index 08415b2..0f39982 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -444,15 +444,9 @@
 	fl->fl_file->f_owner.signum = 0;
 }
 
-static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
-{
-	return fl->fl_file == try->fl_file;
-}
-
 static const struct lock_manager_operations lease_manager_ops = {
 	.fl_break = lease_break_callback,
 	.fl_release_private = lease_release_private_callback,
-	.fl_mylease = lease_mylease_callback,
 	.fl_change = lease_modify,
 };
 
@@ -1405,7 +1399,7 @@
 	for (before = &inode->i_flock;
 			((fl = *before) != NULL) && IS_LEASE(fl);
 			before = &fl->fl_next) {
-		if (lease->fl_lmops->fl_mylease(fl, lease))
+		if (fl->fl_file == filp)
 			my_before = before;
 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
 			/*
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 92ca6fb..723bc5b 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -300,7 +300,7 @@
 
 static void bdev_put_device(struct logfs_super *s)
 {
-	close_bdev_exclusive(s->s_bdev, FMODE_READ|FMODE_WRITE);
+	blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
@@ -325,13 +325,14 @@
 {
 	struct block_device *bdev;
 
-	bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type);
+	bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+				  type);
 	if (IS_ERR(bdev))
 		return PTR_ERR(bdev);
 
 	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
 		int mtdnr = MINOR(bdev->bd_dev);
-		close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
+		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 		return logfs_get_sb_mtd(p, mtdnr);
 	}
 
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 9344474..a25444ab 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -76,18 +76,6 @@
 EXPORT_SYMBOL(mb_cache_entry_find_next);
 #endif
 
-struct mb_cache {
-	struct list_head		c_cache_list;
-	const char			*c_name;
-	atomic_t			c_entry_count;
-	int				c_max_entries;
-	int				c_bucket_bits;
-	struct kmem_cache		*c_entry_cache;
-	struct list_head		*c_block_hash;
-	struct list_head		*c_index_hash;
-};
-
-
 /*
  * Global data: list of all mbcache's, lru list, and a spinlock for
  * accessing cache data structures on SMP machines. The lru list is
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 1b9e077..6e6777f 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -23,8 +23,6 @@
 	struct inode * inode = NULL;
 	ino_t ino;
 
-	d_set_d_op(dentry, dir->i_sb->s_root->d_op);
-
 	if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen)
 		return ERR_PTR(-ENAMETOOLONG);
 
@@ -215,7 +213,6 @@
 		new_de = minix_find_entry(new_dentry, &new_page);
 		if (!new_de)
 			goto out_dir;
-		inode_inc_link_count(old_inode);
 		minix_set_link(new_de, new_page, old_inode);
 		new_inode->i_ctime = CURRENT_TIME_SEC;
 		if (dir_de)
@@ -227,18 +224,15 @@
 			if (new_dir->i_nlink >= info->s_link_max)
 				goto out_dir;
 		}
-		inode_inc_link_count(old_inode);
 		err = minix_add_link(new_dentry, old_inode);
-		if (err) {
-			inode_dec_link_count(old_inode);
+		if (err)
 			goto out_dir;
-		}
 		if (dir_de)
 			inode_inc_link_count(new_dir);
 	}
 
 	minix_delete_entry(old_de, old_page);
-	inode_dec_link_count(old_inode);
+	mark_inode_dirty(old_inode);
 
 	if (dir_de) {
 		minix_set_link(dir_de, dir_page, new_dir);
diff --git a/fs/mpage.c b/fs/mpage.c
index fd56ca2..d78455a 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -40,7 +40,7 @@
  * status of that page is hard.  See end_buffer_async_read() for the details.
  * There is no point in duplicating all that complexity.
  */
-static void mpage_end_io_read(struct bio *bio, int err)
+static void mpage_end_io(struct bio *bio, int err)
 {
 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -50,44 +50,29 @@
 
 		if (--bvec >= bio->bi_io_vec)
 			prefetchw(&bvec->bv_page->flags);
-
-		if (uptodate) {
-			SetPageUptodate(page);
-		} else {
-			ClearPageUptodate(page);
-			SetPageError(page);
+		if (bio_data_dir(bio) == READ) {
+			if (uptodate) {
+				SetPageUptodate(page);
+			} else {
+				ClearPageUptodate(page);
+				SetPageError(page);
+			}
+			unlock_page(page);
+		} else { /* bio_data_dir(bio) == WRITE */
+			if (!uptodate) {
+				SetPageError(page);
+				if (page->mapping)
+					set_bit(AS_EIO, &page->mapping->flags);
+			}
+			end_page_writeback(page);
 		}
-		unlock_page(page);
-	} while (bvec >= bio->bi_io_vec);
-	bio_put(bio);
-}
-
-static void mpage_end_io_write(struct bio *bio, int err)
-{
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-
-	do {
-		struct page *page = bvec->bv_page;
-
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
-
-		if (!uptodate){
-			SetPageError(page);
-			if (page->mapping)
-				set_bit(AS_EIO, &page->mapping->flags);
-		}
-		end_page_writeback(page);
 	} while (bvec >= bio->bi_io_vec);
 	bio_put(bio);
 }
 
 static struct bio *mpage_bio_submit(int rw, struct bio *bio)
 {
-	bio->bi_end_io = mpage_end_io_read;
-	if (rw == WRITE)
-		bio->bi_end_io = mpage_end_io_write;
+	bio->bi_end_io = mpage_end_io;
 	submit_bio(rw, bio);
 	return NULL;
 }
diff --git a/fs/namei.c b/fs/namei.c
index 24ece10..a4689eb 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -368,18 +368,6 @@
 EXPORT_SYMBOL(path_get);
 
 /**
- * path_get_long - get a long reference to a path
- * @path: path to get the reference to
- *
- * Given a path increment the reference count to the dentry and the vfsmount.
- */
-void path_get_long(struct path *path)
-{
-	mntget_long(path->mnt);
-	dget(path->dentry);
-}
-
-/**
  * path_put - put a reference to a path
  * @path: path to put the reference to
  *
@@ -393,18 +381,6 @@
 EXPORT_SYMBOL(path_put);
 
 /**
- * path_put_long - put a long reference to a path
- * @path: path to put the reference to
- *
- * Given a path decrement the reference count to the dentry and the vfsmount.
- */
-void path_put_long(struct path *path)
-{
-	dput(path->dentry);
-	mntput_long(path->mnt);
-}
-
-/**
  * nameidata_drop_rcu - drop this nameidata out of rcu-walk
  * @nd: nameidata pathwalk data to drop
  * Returns: 0 on success, -ECHILD on failure
@@ -577,32 +553,25 @@
  */
 void release_open_intent(struct nameidata *nd)
 {
-	if (nd->intent.open.file->f_path.dentry == NULL)
-		put_filp(nd->intent.open.file);
-	else
-		fput(nd->intent.open.file);
-}
+	struct file *file = nd->intent.open.file;
 
-static int d_revalidate(struct dentry *dentry, struct nameidata *nd)
-{
-	int status;
-
-	status = dentry->d_op->d_revalidate(dentry, nd);
-	if (status == -ECHILD) {
-		if (nameidata_dentry_drop_rcu(nd, dentry))
-			return status;
-		status = dentry->d_op->d_revalidate(dentry, nd);
+	if (file && !IS_ERR(file)) {
+		if (file->f_path.dentry == NULL)
+			put_filp(file);
+		else
+			fput(file);
 	}
-
-	return status;
 }
 
-static inline struct dentry *
+static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	return dentry->d_op->d_revalidate(dentry, nd);
+}
+
+static struct dentry *
 do_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	int status;
-
-	status = d_revalidate(dentry, nd);
+	int status = d_revalidate(dentry, nd);
 	if (unlikely(status <= 0)) {
 		/*
 		 * The dentry failed validation.
@@ -611,24 +580,39 @@
 		 * to return a fail status.
 		 */
 		if (status < 0) {
-			/* If we're in rcu-walk, we don't have a ref */
-			if (!(nd->flags & LOOKUP_RCU))
-				dput(dentry);
+			dput(dentry);
 			dentry = ERR_PTR(status);
-
-		} else {
-			/* Don't d_invalidate in rcu-walk mode */
-			if (nameidata_dentry_drop_rcu_maybe(nd, dentry))
-				return ERR_PTR(-ECHILD);
-			if (!d_invalidate(dentry)) {
-				dput(dentry);
-				dentry = NULL;
-			}
+		} else if (!d_invalidate(dentry)) {
+			dput(dentry);
+			dentry = NULL;
 		}
 	}
 	return dentry;
 }
 
+static inline struct dentry *
+do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd)
+{
+	int status = d_revalidate(dentry, nd);
+	if (likely(status > 0))
+		return dentry;
+	if (status == -ECHILD) {
+		if (nameidata_dentry_drop_rcu(nd, dentry))
+			return ERR_PTR(-ECHILD);
+		return do_revalidate(dentry, nd);
+	}
+	if (status < 0)
+		return ERR_PTR(status);
+	/* Don't d_invalidate in rcu-walk mode */
+	if (nameidata_dentry_drop_rcu(nd, dentry))
+		return ERR_PTR(-ECHILD);
+	if (!d_invalidate(dentry)) {
+		dput(dentry);
+		dentry = NULL;
+	}
+	return dentry;
+}
+
 static inline int need_reval_dot(struct dentry *dentry)
 {
 	if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
@@ -761,7 +745,8 @@
 		mntput(path->mnt);
 }
 
-static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
+static inline void path_to_nameidata(const struct path *path,
+					struct nameidata *nd)
 {
 	if (!(nd->flags & LOOKUP_RCU)) {
 		dput(nd->path.dentry);
@@ -773,20 +758,18 @@
 }
 
 static __always_inline int
-__do_follow_link(struct path *path, struct nameidata *nd, void **p)
+__do_follow_link(const struct path *link, struct nameidata *nd, void **p)
 {
 	int error;
-	struct dentry *dentry = path->dentry;
+	struct dentry *dentry = link->dentry;
 
-	touch_atime(path->mnt, dentry);
+	BUG_ON(nd->flags & LOOKUP_RCU);
+
+	touch_atime(link->mnt, dentry);
 	nd_set_link(nd, NULL);
 
-	if (path->mnt != nd->path.mnt) {
-		path_to_nameidata(path, nd);
-		nd->inode = nd->path.dentry->d_inode;
-		dget(dentry);
-	}
-	mntget(path->mnt);
+	if (link->mnt == nd->path.mnt)
+		mntget(link->mnt);
 
 	nd->last_type = LAST_BIND;
 	*p = dentry->d_inode->i_op->follow_link(dentry, nd);
@@ -812,10 +795,16 @@
  * Without that kind of total limit, nasty chains of consecutive
  * symlinks can cause almost arbitrarily long lookups. 
  */
-static inline int do_follow_link(struct path *path, struct nameidata *nd)
+static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd)
 {
 	void *cookie;
 	int err = -ELOOP;
+
+	/* We drop rcu-walk here */
+	if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry))
+		return -ECHILD;
+	BUG_ON(inode != path->dentry->d_inode);
+
 	if (current->link_count >= MAX_NESTED_LINKS)
 		goto loop;
 	if (current->total_link_count >= 40)
@@ -877,54 +866,148 @@
 }
 
 /*
- * serialization is taken care of in namespace.c
+ * Perform an automount
+ * - return -EISDIR to tell follow_managed() to stop and return the path we
+ *   were called with.
  */
-static void __follow_mount_rcu(struct nameidata *nd, struct path *path,
-				struct inode **inode)
+static int follow_automount(struct path *path, unsigned flags,
+			    bool *need_mntput)
 {
-	while (d_mountpoint(path->dentry)) {
-		struct vfsmount *mounted;
-		mounted = __lookup_mnt(path->mnt, path->dentry, 1);
-		if (!mounted)
-			return;
-		path->mnt = mounted;
-		path->dentry = mounted->mnt_root;
-		nd->seq = read_seqcount_begin(&path->dentry->d_seq);
-		*inode = path->dentry->d_inode;
-	}
-}
+	struct vfsmount *mnt;
+	int err;
 
-static int __follow_mount(struct path *path)
-{
-	int res = 0;
-	while (d_mountpoint(path->dentry)) {
-		struct vfsmount *mounted = lookup_mnt(path);
-		if (!mounted)
-			break;
+	if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
+		return -EREMOTE;
+
+	/* We don't want to mount if someone supplied AT_NO_AUTOMOUNT
+	 * and this is the terminal part of the path.
+	 */
+	if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_CONTINUE))
+		return -EISDIR; /* we actually want to stop here */
+
+	/* We want to mount if someone is trying to open/create a file of any
+	 * type under the mountpoint, wants to traverse through the mountpoint
+	 * or wants to open the mounted directory.
+	 *
+	 * We don't want to mount if someone's just doing a stat and they've
+	 * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
+	 * appended a '/' to the name.
+	 */
+	if (!(flags & LOOKUP_FOLLOW) &&
+	    !(flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY |
+		       LOOKUP_OPEN | LOOKUP_CREATE)))
+		return -EISDIR;
+
+	current->total_link_count++;
+	if (current->total_link_count >= 40)
+		return -ELOOP;
+
+	mnt = path->dentry->d_op->d_automount(path);
+	if (IS_ERR(mnt)) {
+		/*
+		 * The filesystem is allowed to return -EISDIR here to indicate
+		 * it doesn't want to automount.  For instance, autofs would do
+		 * this so that its userspace daemon can mount on this dentry.
+		 *
+		 * However, we can only permit this if it's a terminal point in
+		 * the path being looked up; if it wasn't then the remainder of
+		 * the path is inaccessible and we should say so.
+		 */
+		if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_CONTINUE))
+			return -EREMOTE;
+		return PTR_ERR(mnt);
+	}
+
+	if (!mnt) /* mount collision */
+		return 0;
+
+	err = finish_automount(mnt, path);
+
+	switch (err) {
+	case -EBUSY:
+		/* Someone else made a mount here whilst we were busy */
+		return 0;
+	case 0:
 		dput(path->dentry);
-		if (res)
+		if (*need_mntput)
 			mntput(path->mnt);
-		path->mnt = mounted;
-		path->dentry = dget(mounted->mnt_root);
-		res = 1;
+		path->mnt = mnt;
+		path->dentry = dget(mnt->mnt_root);
+		*need_mntput = true;
+		return 0;
+	default:
+		return err;
 	}
-	return res;
+
 }
 
-static void follow_mount(struct path *path)
+/*
+ * Handle a dentry that is managed in some way.
+ * - Flagged for transit management (autofs)
+ * - Flagged as mountpoint
+ * - Flagged as automount point
+ *
+ * This may only be called in refwalk mode.
+ *
+ * Serialization is taken care of in namespace.c
+ */
+static int follow_managed(struct path *path, unsigned flags)
 {
-	while (d_mountpoint(path->dentry)) {
-		struct vfsmount *mounted = lookup_mnt(path);
-		if (!mounted)
-			break;
-		dput(path->dentry);
-		mntput(path->mnt);
-		path->mnt = mounted;
-		path->dentry = dget(mounted->mnt_root);
+	unsigned managed;
+	bool need_mntput = false;
+	int ret;
+
+	/* Given that we're not holding a lock here, we retain the value in a
+	 * local variable for each dentry as we look at it so that we don't see
+	 * the components of that value change under us */
+	while (managed = ACCESS_ONCE(path->dentry->d_flags),
+	       managed &= DCACHE_MANAGED_DENTRY,
+	       unlikely(managed != 0)) {
+		/* Allow the filesystem to manage the transit without i_mutex
+		 * being held. */
+		if (managed & DCACHE_MANAGE_TRANSIT) {
+			BUG_ON(!path->dentry->d_op);
+			BUG_ON(!path->dentry->d_op->d_manage);
+			ret = path->dentry->d_op->d_manage(path->dentry,
+							   false, false);
+			if (ret < 0)
+				return ret == -EISDIR ? 0 : ret;
+		}
+
+		/* Transit to a mounted filesystem. */
+		if (managed & DCACHE_MOUNTED) {
+			struct vfsmount *mounted = lookup_mnt(path);
+			if (mounted) {
+				dput(path->dentry);
+				if (need_mntput)
+					mntput(path->mnt);
+				path->mnt = mounted;
+				path->dentry = dget(mounted->mnt_root);
+				need_mntput = true;
+				continue;
+			}
+
+			/* Something is mounted on this dentry in another
+			 * namespace and/or whatever was mounted there in this
+			 * namespace got unmounted before we managed to get the
+			 * vfsmount_lock */
+		}
+
+		/* Handle an automount point */
+		if (managed & DCACHE_NEED_AUTOMOUNT) {
+			ret = follow_automount(path, flags, &need_mntput);
+			if (ret < 0)
+				return ret == -EISDIR ? 0 : ret;
+			continue;
+		}
+
+		/* We didn't change the current path point */
+		break;
 	}
+	return 0;
 }
 
-int follow_down(struct path *path)
+int follow_down_one(struct path *path)
 {
 	struct vfsmount *mounted;
 
@@ -939,13 +1022,41 @@
 	return 0;
 }
 
+/*
+ * Skip to top of mountpoint pile in rcuwalk mode.  We abort the rcu-walk if we
+ * meet a managed dentry and we're not walking to "..".  True is returned to
+ * continue, false to abort.
+ */
+static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
+			       struct inode **inode, bool reverse_transit)
+{
+	while (d_mountpoint(path->dentry)) {
+		struct vfsmount *mounted;
+		if (unlikely(path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) &&
+		    !reverse_transit &&
+		    path->dentry->d_op->d_manage(path->dentry, false, true) < 0)
+			return false;
+		mounted = __lookup_mnt(path->mnt, path->dentry, 1);
+		if (!mounted)
+			break;
+		path->mnt = mounted;
+		path->dentry = mounted->mnt_root;
+		nd->seq = read_seqcount_begin(&path->dentry->d_seq);
+		*inode = path->dentry->d_inode;
+	}
+
+	if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+		return reverse_transit;
+	return true;
+}
+
 static int follow_dotdot_rcu(struct nameidata *nd)
 {
 	struct inode *inode = nd->inode;
 
 	set_root_rcu(nd);
 
-	while(1) {
+	while (1) {
 		if (nd->path.dentry == nd->root.dentry &&
 		    nd->path.mnt == nd->root.mnt) {
 			break;
@@ -968,12 +1079,80 @@
 		nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
 		inode = nd->path.dentry->d_inode;
 	}
-	__follow_mount_rcu(nd, &nd->path, &inode);
+	__follow_mount_rcu(nd, &nd->path, &inode, true);
 	nd->inode = inode;
 
 	return 0;
 }
 
+/*
+ * Follow down to the covering mount currently visible to userspace.  At each
+ * point, the filesystem owning that dentry may be queried as to whether the
+ * caller is permitted to proceed or not.
+ *
+ * Care must be taken as namespace_sem may be held (indicated by mounting_here
+ * being true).
+ */
+int follow_down(struct path *path, bool mounting_here)
+{
+	unsigned managed;
+	int ret;
+
+	while (managed = ACCESS_ONCE(path->dentry->d_flags),
+	       unlikely(managed & DCACHE_MANAGED_DENTRY)) {
+		/* Allow the filesystem to manage the transit without i_mutex
+		 * being held.
+		 *
+		 * We indicate to the filesystem if someone is trying to mount
+		 * something here.  This gives autofs the chance to deny anyone
+		 * other than its daemon the right to mount on its
+		 * superstructure.
+		 *
+		 * The filesystem may sleep at this point.
+		 */
+		if (managed & DCACHE_MANAGE_TRANSIT) {
+			BUG_ON(!path->dentry->d_op);
+			BUG_ON(!path->dentry->d_op->d_manage);
+			ret = path->dentry->d_op->d_manage(
+				path->dentry, mounting_here, false);
+			if (ret < 0)
+				return ret == -EISDIR ? 0 : ret;
+		}
+
+		/* Transit to a mounted filesystem. */
+		if (managed & DCACHE_MOUNTED) {
+			struct vfsmount *mounted = lookup_mnt(path);
+			if (!mounted)
+				break;
+			dput(path->dentry);
+			mntput(path->mnt);
+			path->mnt = mounted;
+			path->dentry = dget(mounted->mnt_root);
+			continue;
+		}
+
+		/* Don't handle automount points here */
+		break;
+	}
+	return 0;
+}
+
+/*
+ * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
+ */
+static void follow_mount(struct path *path)
+{
+	while (d_mountpoint(path->dentry)) {
+		struct vfsmount *mounted = lookup_mnt(path);
+		if (!mounted)
+			break;
+		dput(path->dentry);
+		mntput(path->mnt);
+		path->mnt = mounted;
+		path->dentry = dget(mounted->mnt_root);
+	}
+}
+
 static void follow_dotdot(struct nameidata *nd)
 {
 	set_root(nd);
@@ -1038,12 +1217,14 @@
 	struct vfsmount *mnt = nd->path.mnt;
 	struct dentry *dentry, *parent = nd->path.dentry;
 	struct inode *dir;
+	int err;
+
 	/*
 	 * See if the low-level filesystem might want
 	 * to use its own hash..
 	 */
 	if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
-		int err = parent->d_op->d_hash(parent, nd->inode, name);
+		err = parent->d_op->d_hash(parent, nd->inode, name);
 		if (err < 0)
 			return err;
 	}
@@ -1068,24 +1249,43 @@
 			return -ECHILD;
 
 		nd->seq = seq;
-		if (dentry->d_flags & DCACHE_OP_REVALIDATE)
-			goto need_revalidate;
+		if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
+			dentry = do_revalidate_rcu(dentry, nd);
+			if (!dentry)
+				goto need_lookup;
+			if (IS_ERR(dentry))
+				goto fail;
+			if (!(nd->flags & LOOKUP_RCU))
+				goto done;
+		}
 		path->mnt = mnt;
 		path->dentry = dentry;
-		__follow_mount_rcu(nd, path, inode);
-	} else {
-		dentry = __d_lookup(parent, name);
+		if (likely(__follow_mount_rcu(nd, path, inode, false)))
+			return 0;
+		if (nameidata_drop_rcu(nd))
+			return -ECHILD;
+		/* fallthru */
+	}
+	dentry = __d_lookup(parent, name);
+	if (!dentry)
+		goto need_lookup;
+found:
+	if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
+		dentry = do_revalidate(dentry, nd);
 		if (!dentry)
 			goto need_lookup;
-found:
-		if (dentry->d_flags & DCACHE_OP_REVALIDATE)
-			goto need_revalidate;
-done:
-		path->mnt = mnt;
-		path->dentry = dentry;
-		__follow_mount(path);
-		*inode = path->dentry->d_inode;
+		if (IS_ERR(dentry))
+			goto fail;
 	}
+done:
+	path->mnt = mnt;
+	path->dentry = dentry;
+	err = follow_managed(path, nd->flags);
+	if (unlikely(err < 0)) {
+		path_put_conditional(path, nd);
+		return err;
+	}
+	*inode = path->dentry->d_inode;
 	return 0;
 
 need_lookup:
@@ -1118,30 +1318,11 @@
 	mutex_unlock(&dir->i_mutex);
 	goto found;
 
-need_revalidate:
-	dentry = do_revalidate(dentry, nd);
-	if (!dentry)
-		goto need_lookup;
-	if (IS_ERR(dentry))
-		goto fail;
-	goto done;
-
 fail:
 	return PTR_ERR(dentry);
 }
 
 /*
- * This is a temporary kludge to deal with "automount" symlinks; proper
- * solution is to trigger them on follow_mount(), so that do_lookup()
- * would DTRT.  To be killed before 2.6.34-final.
- */
-static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
-{
-	return inode && unlikely(inode->i_op->follow_link) &&
-		((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
-}
-
-/*
  * Name resolution.
  * This is the basic name resolution function, turning a pathname into
  * the final dentry. We expect 'base' to be positive and a directory.
@@ -1233,11 +1414,7 @@
 			goto out_dput;
 
 		if (inode->i_op->follow_link) {
-			/* We commonly drop rcu-walk here */
-			if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
-				return -ECHILD;
-			BUG_ON(inode != next.dentry->d_inode);
-			err = do_follow_link(&next, nd);
+			err = do_follow_link(inode, &next, nd);
 			if (err)
 				goto return_err;
 			nd->inode = nd->path.dentry->d_inode;
@@ -1279,11 +1456,9 @@
 		err = do_lookup(nd, &this, &next, &inode);
 		if (err)
 			break;
-		if (follow_on_final(inode, lookup_flags)) {
-			if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
-				return -ECHILD;
-			BUG_ON(inode != next.dentry->d_inode);
-			err = do_follow_link(&next, nd);
+		if (inode && unlikely(inode->i_op->follow_link) &&
+		    (lookup_flags & LOOKUP_FOLLOW)) {
+			err = do_follow_link(inode, &next, nd);
 			if (err)
 				goto return_err;
 			nd->inode = nd->path.dentry->d_inode;
@@ -1317,12 +1492,15 @@
 		 * We may need to check the cached dentry for staleness.
 		 */
 		if (need_reval_dot(nd->path.dentry)) {
+			if (nameidata_drop_rcu_last_maybe(nd))
+				return -ECHILD;
 			/* Note: we do not d_invalidate() */
 			err = d_revalidate(nd->path.dentry, nd);
 			if (!err)
 				err = -ESTALE;
 			if (err < 0)
 				break;
+			return 0;
 		}
 return_base:
 		if (nameidata_drop_rcu_last_maybe(nd))
@@ -1368,6 +1546,7 @@
 		/* nd->path had been dropped */
 		current->total_link_count = 0;
 		nd->path = save;
+		nd->inode = save.dentry->d_inode;
 		path_get(&nd->path);
 		nd->flags |= LOOKUP_REVAL;
 		result = link_path_walk(name, nd);
@@ -1950,8 +2129,9 @@
 	return break_lease(inode, flag);
 }
 
-static int handle_truncate(struct path *path)
+static int handle_truncate(struct file *filp)
 {
+	struct path *path = &filp->f_path;
 	struct inode *inode = path->dentry->d_inode;
 	int error = get_write_access(inode);
 	if (error)
@@ -1965,7 +2145,7 @@
 	if (!error) {
 		error = do_truncate(path->dentry, 0,
 				    ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
-				    NULL);
+				    filp);
 	}
 	put_write_access(inode);
 	return error;
@@ -2063,7 +2243,7 @@
 	}
 	if (!IS_ERR(filp)) {
 		if (will_truncate) {
-			error = handle_truncate(&nd->path);
+			error = handle_truncate(filp);
 			if (error) {
 				fput(filp);
 				filp = ERR_PTR(error);
@@ -2081,8 +2261,6 @@
 	return filp;
 
 exit:
-	if (!IS_ERR(nd->intent.open.file))
-		release_open_intent(nd);
 	path_put(&nd->path);
 	return ERR_PTR(error);
 }
@@ -2104,11 +2282,13 @@
 		dir = nd->path.dentry;
 	case LAST_DOT:
 		if (need_reval_dot(dir)) {
-			error = d_revalidate(nd->path.dentry, nd);
-			if (!error)
-				error = -ESTALE;
-			if (error < 0)
+			int status = d_revalidate(nd->path.dentry, nd);
+			if (!status)
+				status = -ESTALE;
+			if (status < 0) {
+				error = status;
 				goto exit;
+			}
 		}
 		/* fallthrough */
 	case LAST_ROOT:
@@ -2178,11 +2358,9 @@
 	if (open_flag & O_EXCL)
 		goto exit_dput;
 
-	if (__follow_mount(path)) {
-		error = -ELOOP;
-		if (open_flag & O_NOFOLLOW)
-			goto exit_dput;
-	}
+	error = follow_managed(path, nd->flags);
+	if (error < 0)
+		goto exit_dput;
 
 	error = -ENOENT;
 	if (!path->dentry->d_inode)
@@ -2205,8 +2383,6 @@
 exit_dput:
 	path_put_conditional(path, nd);
 exit:
-	if (!IS_ERR(nd->intent.open.file))
-		release_open_intent(nd);
 	path_put(&nd->path);
 	return ERR_PTR(error);
 }
@@ -2280,21 +2456,29 @@
 	/* !O_CREAT, simple open */
 	error = do_path_lookup(dfd, pathname, flags, &nd);
 	if (unlikely(error))
-		goto out_filp;
+		goto out_filp2;
 	error = -ELOOP;
 	if (!(nd.flags & LOOKUP_FOLLOW)) {
 		if (nd.inode->i_op->follow_link)
-			goto out_path;
+			goto out_path2;
 	}
 	error = -ENOTDIR;
 	if (nd.flags & LOOKUP_DIRECTORY) {
 		if (!nd.inode->i_op->lookup)
-			goto out_path;
+			goto out_path2;
 	}
 	audit_inode(pathname, nd.path.dentry);
 	filp = finish_open(&nd, open_flag, acc_mode);
+out2:
+	release_open_intent(&nd);
 	return filp;
 
+out_path2:
+	path_put(&nd.path);
+out_filp2:
+	filp = ERR_PTR(error);
+	goto out2;
+
 creat:
 	/* OK, have to create the file. Find the parent. */
 	error = path_init_rcu(dfd, pathname,
@@ -2327,11 +2511,11 @@
 	nd.flags = flags;
 	filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
 	while (unlikely(!filp)) { /* trailing symlink */
-		struct path holder;
+		struct path link = path;
+		struct inode *linki = link.dentry->d_inode;
 		void *cookie;
 		error = -ELOOP;
-		/* S_ISDIR part is a temporary automount kludge */
-		if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(nd.inode->i_mode))
+		if (!(nd.flags & LOOKUP_FOLLOW))
 			goto exit_dput;
 		if (count++ == 32)
 			goto exit_dput;
@@ -2347,29 +2531,29 @@
 		 * just set LAST_BIND.
 		 */
 		nd.flags |= LOOKUP_PARENT;
-		error = security_inode_follow_link(path.dentry, &nd);
+		error = security_inode_follow_link(link.dentry, &nd);
 		if (error)
 			goto exit_dput;
-		error = __do_follow_link(&path, &nd, &cookie);
+		error = __do_follow_link(&link, &nd, &cookie);
 		if (unlikely(error)) {
-			if (!IS_ERR(cookie) && nd.inode->i_op->put_link)
-				nd.inode->i_op->put_link(path.dentry, &nd, cookie);
+			if (!IS_ERR(cookie) && linki->i_op->put_link)
+				linki->i_op->put_link(link.dentry, &nd, cookie);
 			/* nd.path had been dropped */
-			nd.path = path;
+			nd.path = link;
 			goto out_path;
 		}
-		holder = path;
 		nd.flags &= ~LOOKUP_PARENT;
 		filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
-		if (nd.inode->i_op->put_link)
-			nd.inode->i_op->put_link(holder.dentry, &nd, cookie);
-		path_put(&holder);
+		if (linki->i_op->put_link)
+			linki->i_op->put_link(link.dentry, &nd, cookie);
+		path_put(&link);
 	}
 out:
 	if (nd.root.mnt)
 		path_put(&nd.root);
 	if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL))
 		goto reval;
+	release_open_intent(&nd);
 	return filp;
 
 exit_dput:
@@ -2377,8 +2561,6 @@
 out_path:
 	path_put(&nd.path);
 out_filp:
-	if (!IS_ERR(nd.intent.open.file))
-		release_open_intent(&nd);
 	filp = ERR_PTR(error);
 	goto out;
 }
@@ -3391,6 +3573,7 @@
 };
 
 EXPORT_SYMBOL(user_path_at);
+EXPORT_SYMBOL(follow_down_one);
 EXPORT_SYMBOL(follow_down);
 EXPORT_SYMBOL(follow_up);
 EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
diff --git a/fs/namespace.c b/fs/namespace.c
index 3ddfd90..d1edf26 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -183,7 +183,7 @@
 unsigned int mnt_get_count(struct vfsmount *mnt)
 {
 #ifdef CONFIG_SMP
-	unsigned int count = atomic_read(&mnt->mnt_longrefs);
+	unsigned int count = 0;
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
@@ -217,7 +217,7 @@
 		if (!mnt->mnt_pcp)
 			goto out_free_devname;
 
-		atomic_set(&mnt->mnt_longrefs, 1);
+		this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
 #else
 		mnt->mnt_count = 1;
 		mnt->mnt_writers = 0;
@@ -611,6 +611,21 @@
 	list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
 }
 
+static inline void __mnt_make_longterm(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+	atomic_inc(&mnt->mnt_longterm);
+#endif
+}
+
+/* needs vfsmount lock for write */
+static inline void __mnt_make_shortterm(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+	atomic_dec(&mnt->mnt_longterm);
+#endif
+}
+
 /*
  * vfsmount lock must be held for write
  */
@@ -624,8 +639,11 @@
 	BUG_ON(parent == mnt);
 
 	list_add_tail(&head, &mnt->mnt_list);
-	list_for_each_entry(m, &head, mnt_list)
+	list_for_each_entry(m, &head, mnt_list) {
 		m->mnt_ns = n;
+		__mnt_make_longterm(m);
+	}
+
 	list_splice(&head, n->list.prev);
 
 	list_add_tail(&mnt->mnt_hash, mount_hashtable +
@@ -734,51 +752,30 @@
 	deactivate_super(sb);
 }
 
-#ifdef CONFIG_SMP
-static inline void __mntput(struct vfsmount *mnt, int longrefs)
+static void mntput_no_expire(struct vfsmount *mnt)
 {
-	if (!longrefs) {
 put_again:
-		br_read_lock(vfsmount_lock);
-		if (likely(atomic_read(&mnt->mnt_longrefs))) {
-			mnt_dec_count(mnt);
-			br_read_unlock(vfsmount_lock);
-			return;
-		}
+#ifdef CONFIG_SMP
+	br_read_lock(vfsmount_lock);
+	if (likely(atomic_read(&mnt->mnt_longterm))) {
+		mnt_dec_count(mnt);
 		br_read_unlock(vfsmount_lock);
-	} else {
-		BUG_ON(!atomic_read(&mnt->mnt_longrefs));
-		if (atomic_add_unless(&mnt->mnt_longrefs, -1, 1))
-			return;
+		return;
 	}
+	br_read_unlock(vfsmount_lock);
 
 	br_write_lock(vfsmount_lock);
-	if (!longrefs)
-		mnt_dec_count(mnt);
-	else
-		atomic_dec(&mnt->mnt_longrefs);
+	mnt_dec_count(mnt);
 	if (mnt_get_count(mnt)) {
 		br_write_unlock(vfsmount_lock);
 		return;
 	}
-	if (unlikely(mnt->mnt_pinned)) {
-		mnt_add_count(mnt, mnt->mnt_pinned + 1);
-		mnt->mnt_pinned = 0;
-		br_write_unlock(vfsmount_lock);
-		acct_auto_close_mnt(mnt);
-		goto put_again;
-	}
-	br_write_unlock(vfsmount_lock);
-	mntfree(mnt);
-}
 #else
-static inline void __mntput(struct vfsmount *mnt, int longrefs)
-{
-put_again:
 	mnt_dec_count(mnt);
 	if (likely(mnt_get_count(mnt)))
 		return;
 	br_write_lock(vfsmount_lock);
+#endif
 	if (unlikely(mnt->mnt_pinned)) {
 		mnt_add_count(mnt, mnt->mnt_pinned + 1);
 		mnt->mnt_pinned = 0;
@@ -789,12 +786,6 @@
 	br_write_unlock(vfsmount_lock);
 	mntfree(mnt);
 }
-#endif
-
-static void mntput_no_expire(struct vfsmount *mnt)
-{
-	__mntput(mnt, 0);
-}
 
 void mntput(struct vfsmount *mnt)
 {
@@ -802,7 +793,7 @@
 		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
 		if (unlikely(mnt->mnt_expiry_mark))
 			mnt->mnt_expiry_mark = 0;
-		__mntput(mnt, 0);
+		mntput_no_expire(mnt);
 	}
 }
 EXPORT_SYMBOL(mntput);
@@ -815,33 +806,6 @@
 }
 EXPORT_SYMBOL(mntget);
 
-void mntput_long(struct vfsmount *mnt)
-{
-#ifdef CONFIG_SMP
-	if (mnt) {
-		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
-		if (unlikely(mnt->mnt_expiry_mark))
-			mnt->mnt_expiry_mark = 0;
-		__mntput(mnt, 1);
-	}
-#else
-	mntput(mnt);
-#endif
-}
-EXPORT_SYMBOL(mntput_long);
-
-struct vfsmount *mntget_long(struct vfsmount *mnt)
-{
-#ifdef CONFIG_SMP
-	if (mnt)
-		atomic_inc(&mnt->mnt_longrefs);
-	return mnt;
-#else
-	return mntget(mnt);
-#endif
-}
-EXPORT_SYMBOL(mntget_long);
-
 void mnt_pin(struct vfsmount *mnt)
 {
 	br_write_lock(vfsmount_lock);
@@ -1216,7 +1180,7 @@
 			dput(dentry);
 			mntput(m);
 		}
-		mntput_long(mnt);
+		mntput(mnt);
 	}
 }
 
@@ -1226,19 +1190,21 @@
  */
 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
 {
+	LIST_HEAD(tmp_list);
 	struct vfsmount *p;
 
 	for (p = mnt; p; p = next_mnt(p, mnt))
-		list_move(&p->mnt_hash, kill);
+		list_move(&p->mnt_hash, &tmp_list);
 
 	if (propagate)
-		propagate_umount(kill);
+		propagate_umount(&tmp_list);
 
-	list_for_each_entry(p, kill, mnt_hash) {
+	list_for_each_entry(p, &tmp_list, mnt_hash) {
 		list_del_init(&p->mnt_expire);
 		list_del_init(&p->mnt_list);
 		__touch_mnt_namespace(p->mnt_ns);
 		p->mnt_ns = NULL;
+		__mnt_make_shortterm(p);
 		list_del_init(&p->mnt_child);
 		if (p->mnt_parent != p) {
 			p->mnt_parent->mnt_ghosts++;
@@ -1246,6 +1212,7 @@
 		}
 		change_mnt_propagation(p, MS_PRIVATE);
 	}
+	list_splice(&tmp_list, kill);
 }
 
 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
@@ -1277,7 +1244,7 @@
 		 */
 		br_write_lock(vfsmount_lock);
 		if (mnt_get_count(mnt) != 2) {
-			br_write_lock(vfsmount_lock);
+			br_write_unlock(vfsmount_lock);
 			return -EBUSY;
 		}
 		br_write_unlock(vfsmount_lock);
@@ -1844,9 +1811,10 @@
 		return err;
 
 	down_write(&namespace_sem);
-	while (d_mountpoint(path->dentry) &&
-	       follow_down(path))
-		;
+	err = follow_down(path, true);
+	if (err < 0)
+		goto out;
+
 	err = -EINVAL;
 	if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
 		goto out;
@@ -1904,6 +1872,8 @@
 	return err;
 }
 
+static int do_add_mount(struct vfsmount *, struct path *, int);
+
 /*
  * create a new mount for userspace and request it to be added into the
  * namespace's tree
@@ -1912,6 +1882,7 @@
 			int mnt_flags, char *name, void *data)
 {
 	struct vfsmount *mnt;
+	int err;
 
 	if (!type)
 		return -EINVAL;
@@ -1924,15 +1895,47 @@
 	if (IS_ERR(mnt))
 		return PTR_ERR(mnt);
 
-	return do_add_mount(mnt, path, mnt_flags, NULL);
+	err = do_add_mount(mnt, path, mnt_flags);
+	if (err)
+		mntput(mnt);
+	return err;
+}
+
+int finish_automount(struct vfsmount *m, struct path *path)
+{
+	int err;
+	/* The new mount record should have at least 2 refs to prevent it being
+	 * expired before we get a chance to add it
+	 */
+	BUG_ON(mnt_get_count(m) < 2);
+
+	if (m->mnt_sb == path->mnt->mnt_sb &&
+	    m->mnt_root == path->dentry) {
+		err = -ELOOP;
+		goto fail;
+	}
+
+	err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
+	if (!err)
+		return 0;
+fail:
+	/* remove m from any expiration list it may be on */
+	if (!list_empty(&m->mnt_expire)) {
+		down_write(&namespace_sem);
+		br_write_lock(vfsmount_lock);
+		list_del_init(&m->mnt_expire);
+		br_write_unlock(vfsmount_lock);
+		up_write(&namespace_sem);
+	}
+	mntput(m);
+	mntput(m);
+	return err;
 }
 
 /*
  * add a mount into a namespace's mount tree
- * - provide the option of adding the new mount to an expiration list
  */
-int do_add_mount(struct vfsmount *newmnt, struct path *path,
-		 int mnt_flags, struct list_head *fslist)
+static int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags)
 {
 	int err;
 
@@ -1940,9 +1943,10 @@
 
 	down_write(&namespace_sem);
 	/* Something was mounted here while we slept */
-	while (d_mountpoint(path->dentry) &&
-	       follow_down(path))
-		;
+	err = follow_down(path, true);
+	if (err < 0)
+		goto unlock;
+
 	err = -EINVAL;
 	if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
 		goto unlock;
@@ -1958,22 +1962,29 @@
 		goto unlock;
 
 	newmnt->mnt_flags = mnt_flags;
-	if ((err = graft_tree(newmnt, path)))
-		goto unlock;
-
-	if (fslist) /* add to the specified expiration list */
-		list_add_tail(&newmnt->mnt_expire, fslist);
-
-	up_write(&namespace_sem);
-	return 0;
+	err = graft_tree(newmnt, path);
 
 unlock:
 	up_write(&namespace_sem);
-	mntput_long(newmnt);
 	return err;
 }
 
-EXPORT_SYMBOL_GPL(do_add_mount);
+/**
+ * mnt_set_expiry - Put a mount on an expiration list
+ * @mnt: The mount to list.
+ * @expiry_list: The list to add the mount to.
+ */
+void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
+{
+	down_write(&namespace_sem);
+	br_write_lock(vfsmount_lock);
+
+	list_add_tail(&mnt->mnt_expire, expiry_list);
+
+	br_write_unlock(vfsmount_lock);
+	up_write(&namespace_sem);
+}
+EXPORT_SYMBOL(mnt_set_expiry);
 
 /*
  * process a list of expirable mountpoints with the intent of discarding any
@@ -2262,6 +2273,22 @@
 	return new_ns;
 }
 
+void mnt_make_longterm(struct vfsmount *mnt)
+{
+	__mnt_make_longterm(mnt);
+}
+
+void mnt_make_shortterm(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+	if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
+		return;
+	br_write_lock(vfsmount_lock);
+	atomic_dec(&mnt->mnt_longterm);
+	br_write_unlock(vfsmount_lock);
+#endif
+}
+
 /*
  * Allocate a new namespace structure and populate it with contents
  * copied from the namespace of the passed in task structure.
@@ -2299,14 +2326,19 @@
 	q = new_ns->root;
 	while (p) {
 		q->mnt_ns = new_ns;
+		__mnt_make_longterm(q);
 		if (fs) {
 			if (p == fs->root.mnt) {
+				fs->root.mnt = mntget(q);
+				__mnt_make_longterm(q);
+				mnt_make_shortterm(p);
 				rootmnt = p;
-				fs->root.mnt = mntget_long(q);
 			}
 			if (p == fs->pwd.mnt) {
+				fs->pwd.mnt = mntget(q);
+				__mnt_make_longterm(q);
+				mnt_make_shortterm(p);
 				pwdmnt = p;
-				fs->pwd.mnt = mntget_long(q);
 			}
 		}
 		p = next_mnt(p, mnt_ns->root);
@@ -2315,9 +2347,9 @@
 	up_write(&namespace_sem);
 
 	if (rootmnt)
-		mntput_long(rootmnt);
+		mntput(rootmnt);
 	if (pwdmnt)
-		mntput_long(pwdmnt);
+		mntput(pwdmnt);
 
 	return new_ns;
 }
@@ -2350,6 +2382,7 @@
 	new_ns = alloc_mnt_ns();
 	if (!IS_ERR(new_ns)) {
 		mnt->mnt_ns = new_ns;
+		__mnt_make_longterm(mnt);
 		new_ns->root = mnt;
 		list_add(&new_ns->list, &new_ns->root->mnt_list);
 	}
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 28f136d..f6946bb 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -21,9 +21,7 @@
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
 
-#include <linux/ncp_fs.h>
-
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 static void ncp_read_volume_list(struct file *, void *, filldir_t,
 				struct ncp_cache_control *);
@@ -82,7 +80,7 @@
 		unsigned int, const char *, const struct qstr *);
 static int ncp_delete_dentry(const struct dentry *);
 
-static const struct dentry_operations ncp_dentry_operations =
+const struct dentry_operations ncp_dentry_operations =
 {
 	.d_revalidate	= ncp_lookup_validate,
 	.d_hash		= ncp_hash_dentry,
@@ -90,14 +88,6 @@
 	.d_delete	= ncp_delete_dentry,
 };
 
-const struct dentry_operations ncp_root_dentry_operations =
-{
-	.d_hash		= ncp_hash_dentry,
-	.d_compare	= ncp_compare_dentry,
-	.d_delete	= ncp_delete_dentry,
-};
-
-
 #define ncp_namespace(i)	(NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber])
 
 static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator)
@@ -309,6 +299,9 @@
 	int res, val = 0, len;
 	__u8 __name[NCP_MAXPATHLEN + 1];
 
+	if (dentry == dentry->d_sb->s_root)
+		return 1;
+
 	if (nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 
@@ -637,7 +630,6 @@
 		entry->ino = iunique(dir->i_sb, 2);
 		inode = ncp_iget(dir->i_sb, entry);
 		if (inode) {
-			d_set_d_op(newdent, &ncp_dentry_operations);
 			d_instantiate(newdent, inode);
 			if (!hashed)
 				d_rehash(newdent);
@@ -893,7 +885,6 @@
 	if (inode) {
 		ncp_new_dentry(dentry);
 add_entry:
-		d_set_d_op(dentry, &ncp_dentry_operations);
 		d_add(dentry, inode);
 		error = 0;
 	}
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index cb50aaf..0ed65e0 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -18,8 +18,7 @@
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
 
-#include <linux/ncp_fs.h>
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 static int ncp_fsync(struct file *file, int datasync)
 {
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 9b39a5d..00a1d1c 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -31,11 +31,9 @@
 #include <linux/seq_file.h>
 #include <linux/namei.h>
 
-#include <linux/ncp_fs.h>
-
 #include <net/sock.h>
 
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 #include "getopt.h"
 
 #define NCP_DEFAULT_FILE_MODE 0600
@@ -544,6 +542,7 @@
 	sb->s_blocksize_bits = 10;
 	sb->s_magic = NCP_SUPER_MAGIC;
 	sb->s_op = &ncp_sops;
+	sb->s_d_op = &ncp_dentry_operations;
 	sb->s_bdi = &server->bdi;
 
 	server = NCP_SBP(sb);
@@ -723,7 +722,6 @@
 	sb->s_root = d_alloc_root(root_inode);
         if (!sb->s_root)
 		goto out_no_root;
-	d_set_d_op(sb->s_root, &ncp_root_dentry_operations);
 	return 0;
 
 out_no_root:
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index d40a547..790e92a 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -20,11 +20,9 @@
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
 
-#include <linux/ncp_fs.h>
-
 #include <asm/uaccess.h>
 
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 /* maximum limit for ncp_objectname_ioctl */
 #define NCP_OBJECT_NAME_MAX_LEN	4096
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 56f5b3a..a7c07b4 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -16,12 +16,12 @@
 #include <linux/mman.h>
 #include <linux/string.h>
 #include <linux/fcntl.h>
-#include <linux/ncp_fs.h>
 
-#include "ncplib_kernel.h"
 #include <asm/uaccess.h>
 #include <asm/system.h>
 
+#include "ncp_fs.h"
+
 /*
  * Fill in the supplied page for mmap
  * XXX: how are we excluding truncate/invalidate here? Maybe need to lock
diff --git a/fs/ncpfs/ncp_fs.h b/fs/ncpfs/ncp_fs.h
new file mode 100644
index 0000000..31831af
--- /dev/null
+++ b/fs/ncpfs/ncp_fs.h
@@ -0,0 +1,98 @@
+#include <linux/ncp_fs.h>
+#include "ncp_fs_i.h"
+#include "ncp_fs_sb.h"
+
+/* define because it is easy to change PRINTK to {*}PRINTK */
+#define PRINTK(format, args...) printk(KERN_DEBUG format , ## args)
+
+#undef NCPFS_PARANOIA
+#ifdef NCPFS_PARANOIA
+#define PPRINTK(format, args...) PRINTK(format , ## args)
+#else
+#define PPRINTK(format, args...)
+#endif
+
+#ifndef DEBUG_NCP
+#define DEBUG_NCP 0
+#endif
+#if DEBUG_NCP > 0
+#define DPRINTK(format, args...) PRINTK(format , ## args)
+#else
+#define DPRINTK(format, args...)
+#endif
+#if DEBUG_NCP > 1
+#define DDPRINTK(format, args...) PRINTK(format , ## args)
+#else
+#define DDPRINTK(format, args...)
+#endif
+
+#define NCP_MAX_RPC_TIMEOUT (6*HZ)
+
+
+struct ncp_entry_info {
+	struct nw_info_struct	i;
+	ino_t			ino;
+	int			opened;
+	int			access;
+	unsigned int		volume;
+	__u8			file_handle[6];
+};
+
+static inline struct ncp_server *NCP_SBP(const struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+#define NCP_SERVER(inode)	NCP_SBP((inode)->i_sb)
+static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode)
+{
+	return container_of(inode, struct ncp_inode_info, vfs_inode);
+}
+
+/* linux/fs/ncpfs/inode.c */
+int ncp_notify_change(struct dentry *, struct iattr *);
+struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *);
+void ncp_update_inode(struct inode *, struct ncp_entry_info *);
+void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
+
+/* linux/fs/ncpfs/dir.c */
+extern const struct inode_operations ncp_dir_inode_operations;
+extern const struct file_operations ncp_dir_operations;
+extern const struct dentry_operations ncp_dentry_operations;
+int ncp_conn_logged_in(struct super_block *);
+int ncp_date_dos2unix(__le16 time, __le16 date);
+void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
+
+/* linux/fs/ncpfs/ioctl.c */
+long ncp_ioctl(struct file *, unsigned int, unsigned long);
+long ncp_compat_ioctl(struct file *, unsigned int, unsigned long);
+
+/* linux/fs/ncpfs/sock.c */
+int ncp_request2(struct ncp_server *server, int function,
+	void* reply, int max_reply_size);
+static inline int ncp_request(struct ncp_server *server, int function) {
+	return ncp_request2(server, function, server->packet, server->packet_size);
+}
+int ncp_connect(struct ncp_server *server);
+int ncp_disconnect(struct ncp_server *server);
+void ncp_lock_server(struct ncp_server *server);
+void ncp_unlock_server(struct ncp_server *server);
+
+/* linux/fs/ncpfs/symlink.c */
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+extern const struct address_space_operations ncp_symlink_aops;
+int ncp_symlink(struct inode*, struct dentry*, const char*);
+#endif
+
+/* linux/fs/ncpfs/file.c */
+extern const struct inode_operations ncp_file_inode_operations;
+extern const struct file_operations ncp_file_operations;
+int ncp_make_open(struct inode *, int);
+
+/* linux/fs/ncpfs/mmap.c */
+int ncp_mmap(struct file *, struct vm_area_struct *);
+
+/* linux/fs/ncpfs/ncplib_kernel.c */
+int ncp_make_closed(struct inode *);
+
+#include "ncplib_kernel.h"
diff --git a/include/linux/ncp_fs_i.h b/fs/ncpfs/ncp_fs_i.h
similarity index 100%
rename from include/linux/ncp_fs_i.h
rename to fs/ncpfs/ncp_fs_i.h
diff --git a/include/linux/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
similarity index 86%
rename from include/linux/ncp_fs_sb.h
rename to fs/ncpfs/ncp_fs_sb.h
index d64b0e8..4af803f 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/fs/ncpfs/ncp_fs_sb.h
@@ -13,15 +13,30 @@
 #include <linux/net.h>
 #include <linux/mutex.h>
 #include <linux/backing-dev.h>
-
-#ifdef __KERNEL__
-
 #include <linux/workqueue.h>
 
 #define NCP_DEFAULT_OPTIONS 0		/* 2 for packet signatures */
 
 struct sock;
 
+struct ncp_mount_data_kernel {
+	unsigned long    flags;		/* NCP_MOUNT_* flags */
+	unsigned int	 int_flags;	/* internal flags */
+#define NCP_IMOUNT_LOGGEDIN_POSSIBLE	0x0001
+	__kernel_uid32_t mounted_uid;	/* Who may umount() this filesystem? */
+	struct pid      *wdog_pid;	/* Who cares for our watchdog packets? */
+	unsigned int     ncp_fd;	/* The socket to the ncp port */
+	unsigned int     time_out;	/* How long should I wait after
+					   sending a NCP request? */
+	unsigned int     retry_count;	/* And how often should I retry? */
+	unsigned char	 mounted_vol[NCP_VOLNAME_LEN + 1];
+	__kernel_uid32_t uid;
+	__kernel_gid32_t gid;
+	__kernel_mode_t  file_mode;
+	__kernel_mode_t  dir_mode;
+	int		 info_fd;
+};
+
 struct ncp_server {
 
 	struct ncp_mount_data_kernel m;	/* Nearly all of the mount data is of
@@ -158,7 +173,4 @@
 	server->conn_status |= 0x01;
 }
 
-#endif				/* __KERNEL__ */
-
 #endif
- 
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index a95615a..981a956 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -11,7 +11,7 @@
 
 
 
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 static inline void assert_server_locked(struct ncp_server *server)
 {
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 1220df7..09881e6 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -32,8 +32,6 @@
 #include <linux/ctype.h>
 #endif /* CONFIG_NCPFS_NLS */
 
-#include <linux/ncp_fs.h>
-
 #define NCP_MIN_SYMLINK_SIZE	8
 #define NCP_MAX_SYMLINK_SIZE	512
 
diff --git a/fs/ncpfs/ncpsign_kernel.c b/fs/ncpfs/ncpsign_kernel.c
index d8b2d7e..0890759 100644
--- a/fs/ncpfs/ncpsign_kernel.c
+++ b/fs/ncpfs/ncpsign_kernel.c
@@ -11,6 +11,7 @@
 #include <linux/string.h>
 #include <linux/ncp.h>
 #include <linux/bitops.h>
+#include "ncp_fs.h"
 #include "ncpsign_kernel.h"
 
 /* i386: 32-bit, little endian, handles mis-alignment */
diff --git a/fs/ncpfs/ncpsign_kernel.h b/fs/ncpfs/ncpsign_kernel.h
index 6451a68..d9a1438 100644
--- a/fs/ncpfs/ncpsign_kernel.h
+++ b/fs/ncpfs/ncpsign_kernel.h
@@ -8,8 +8,6 @@
 #ifndef _NCPSIGN_KERNEL_H
 #define _NCPSIGN_KERNEL_H
 
-#include <linux/ncp_fs.h>
-
 #ifdef CONFIG_NCPFS_PACKET_SIGNING
 void __sign_packet(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, void *sign_buff);
 int sign_verify_reply(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, const void *sign_buff);
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 668bd26..3a15872 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -28,7 +28,7 @@
 #include <linux/poll.h>
 #include <linux/file.h>
 
-#include <linux/ncp_fs.h>
+#include "ncp_fs.h"
 
 #include "ncpsign_kernel.h"
 
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
index c634fd1..661f861 100644
--- a/fs/ncpfs/symlink.c
+++ b/fs/ncpfs/symlink.c
@@ -25,13 +25,11 @@
 
 #include <linux/errno.h>
 #include <linux/fs.h>
-#include <linux/ncp_fs.h>
 #include <linux/time.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/stat.h>
-#include "ncplib_kernel.h"
-
+#include "ncp_fs.h"
 
 /* these magic numbers must appear in the symlink file -- this makes it a bit
    more resilient against the magic attributes being set on random files. */
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 93a8b3b..e3d2942 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -16,9 +16,7 @@
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/sunrpc/svcauth_gss.h>
-#if defined(CONFIG_NFS_V4_1)
 #include <linux/sunrpc/bc_xprt.h>
-#endif
 
 #include <net/inet_sock.h>
 
@@ -177,30 +175,38 @@
 struct svc_rqst *
 nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
 {
-	struct svc_xprt *bc_xprt;
-	struct svc_rqst *rqstp = ERR_PTR(-ENOMEM);
+	struct svc_rqst *rqstp;
+	int ret;
 
-	dprintk("--> %s\n", __func__);
-	/* Create a svc_sock for the service */
-	bc_xprt = svc_sock_create(serv, xprt->prot);
-	if (!bc_xprt)
+	/*
+	 * Create an svc_sock for the back channel service that shares the
+	 * fore channel connection.
+	 * Returns the input port (0) and sets the svc_serv bc_xprt on success
+	 */
+	ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
+			      SVC_SOCK_ANONYMOUS);
+	if (ret < 0) {
+		rqstp = ERR_PTR(ret);
 		goto out;
+	}
 
 	/*
 	 * Save the svc_serv in the transport so that it can
 	 * be referenced when the session backchannel is initialized
 	 */
-	serv->bc_xprt = bc_xprt;
 	xprt->bc_serv = serv;
 
 	INIT_LIST_HEAD(&serv->sv_cb_list);
 	spin_lock_init(&serv->sv_cb_lock);
 	init_waitqueue_head(&serv->sv_cb_waitq);
 	rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
-	if (IS_ERR(rqstp))
-		svc_sock_destroy(bc_xprt);
+	if (IS_ERR(rqstp)) {
+		svc_xprt_put(serv->sv_bc_xprt);
+		serv->sv_bc_xprt = NULL;
+	}
 out:
-	dprintk("--> %s return %p\n", __func__, rqstp);
+	dprintk("--> %s return %ld\n", __func__,
+		IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0);
 	return rqstp;
 }
 
@@ -322,58 +328,58 @@
 	mutex_unlock(&nfs_callback_mutex);
 }
 
-static int check_gss_callback_principal(struct nfs_client *clp,
-					struct svc_rqst *rqstp)
+/* Boolean check of RPC_AUTH_GSS principal */
+int
+check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
 {
 	struct rpc_clnt *r = clp->cl_rpcclient;
 	char *p = svc_gss_principal(rqstp);
 
+	if (rqstp->rq_authop->flavour != RPC_AUTH_GSS)
+		return 1;
+
+	/* No RPC_AUTH_GSS on NFSv4.1 back channel yet */
+	if (clp->cl_minorversion != 0)
+		return 0;
 	/*
 	 * It might just be a normal user principal, in which case
 	 * userspace won't bother to tell us the name at all.
 	 */
 	if (p == NULL)
-		return SVC_DENIED;
+		return 0;
 
 	/* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */
 
 	if (memcmp(p, "nfs@", 4) != 0)
-		return SVC_DENIED;
+		return 0;
 	p += 4;
 	if (strcmp(p, r->cl_server) != 0)
-		return SVC_DENIED;
-	return SVC_OK;
+		return 0;
+	return 1;
 }
 
+/*
+ * pg_authenticate method for nfsv4 callback threads.
+ *
+ * The authflavor has been negotiated, so an incorrect flavor is a server
+ * bug. Drop packets with incorrect authflavor.
+ *
+ * All other checking done after NFS decoding where the nfs_client can be
+ * found in nfs4_callback_compound
+ */
 static int nfs_callback_authenticate(struct svc_rqst *rqstp)
 {
-	struct nfs_client *clp;
-	RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
-	int ret = SVC_OK;
-
-	/* Don't talk to strangers */
-	clp = nfs_find_client(svc_addr(rqstp), 4);
-	if (clp == NULL)
-		return SVC_DROP;
-
-	dprintk("%s: %s NFSv4 callback!\n", __func__,
-			svc_print_addr(rqstp, buf, sizeof(buf)));
-
 	switch (rqstp->rq_authop->flavour) {
-		case RPC_AUTH_NULL:
-			if (rqstp->rq_proc != CB_NULL)
-				ret = SVC_DENIED;
-			break;
-		case RPC_AUTH_UNIX:
-			break;
-		case RPC_AUTH_GSS:
-			ret = check_gss_callback_principal(clp, rqstp);
-			break;
-		default:
-			ret = SVC_DENIED;
+	case RPC_AUTH_NULL:
+		if (rqstp->rq_proc != CB_NULL)
+			return SVC_DROP;
+		break;
+	case RPC_AUTH_GSS:
+		/* No RPC_AUTH_GSS support yet in NFSv4.1 */
+		 if (svc_is_backchannel(rqstp))
+			return SVC_DROP;
 	}
-	nfs_put_client(clp);
-	return ret;
+	return SVC_OK;
 }
 
 /*
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 85a7cfd..46d93ce 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -7,6 +7,7 @@
  */
 #ifndef __LINUX_FS_NFS_CALLBACK_H
 #define __LINUX_FS_NFS_CALLBACK_H
+#include <linux/sunrpc/svc.h>
 
 #define NFS4_CALLBACK 0x40000000
 #define NFS4_CALLBACK_XDRSIZE 2048
@@ -34,10 +35,16 @@
 	OP_CB_ILLEGAL = 10044,
 };
 
+struct cb_process_state {
+	__be32			drc_status;
+	struct nfs_client	*clp;
+};
+
 struct cb_compound_hdr_arg {
 	unsigned int taglen;
 	const char *tag;
 	unsigned int minorversion;
+	unsigned int cb_ident; /* v4.0 callback identifier */
 	unsigned nops;
 };
 
@@ -103,14 +110,23 @@
 	uint32_t			csr_target_highestslotid;
 };
 
-extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
-				       struct cb_sequenceres *res);
+extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
+				       struct cb_sequenceres *res,
+				       struct cb_process_state *cps);
 
 extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation,
 					     const nfs4_stateid *stateid);
 
 #define RCA4_TYPE_MASK_RDATA_DLG	0
 #define RCA4_TYPE_MASK_WDATA_DLG	1
+#define RCA4_TYPE_MASK_DIR_DLG         2
+#define RCA4_TYPE_MASK_FILE_LAYOUT     3
+#define RCA4_TYPE_MASK_BLK_LAYOUT      4
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MIN  8
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX  9
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15
+#define RCA4_TYPE_MASK_ALL 0xf31f
 
 struct cb_recallanyargs {
 	struct sockaddr	*craa_addr;
@@ -118,25 +134,52 @@
 	uint32_t	craa_type_mask;
 };
 
-extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy);
+extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args,
+					void *dummy,
+					struct cb_process_state *cps);
 
 struct cb_recallslotargs {
 	struct sockaddr	*crsa_addr;
 	uint32_t	crsa_target_max_slots;
 };
-extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args,
-					  void *dummy);
+extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args,
+					 void *dummy,
+					 struct cb_process_state *cps);
 
+struct cb_layoutrecallargs {
+	struct sockaddr		*cbl_addr;
+	uint32_t		cbl_recall_type;
+	uint32_t		cbl_layout_type;
+	uint32_t		cbl_layoutchanged;
+	union {
+		struct {
+			struct nfs_fh		cbl_fh;
+			struct pnfs_layout_range cbl_range;
+			nfs4_stateid		cbl_stateid;
+		};
+		struct nfs_fsid		cbl_fsid;
+	};
+};
+
+extern unsigned nfs4_callback_layoutrecall(
+	struct cb_layoutrecallargs *args,
+	void *dummy, struct cb_process_state *cps);
+
+extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
+extern void nfs4_cb_take_slot(struct nfs_client *clp);
 #endif /* CONFIG_NFS_V4_1 */
-
-extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
-extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
-
+extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
+extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+				    struct cb_getattrres *res,
+				    struct cb_process_state *cps);
+extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+				   struct cb_process_state *cps);
 #ifdef CONFIG_NFS_V4
 extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
 extern void nfs_callback_down(int minorversion);
 extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
 					    const nfs4_stateid *stateid);
+extern int nfs4_set_callback_sessionid(struct nfs_client *clp);
 #endif /* CONFIG_NFS_V4 */
 /*
  * nfs41: Callbacks are expected to not cause substantial latency,
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2950fca..8958757 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -12,30 +12,33 @@
 #include "callback.h"
 #include "delegation.h"
 #include "internal.h"
+#include "pnfs.h"
 
 #ifdef NFS_DEBUG
 #define NFSDBG_FACILITY NFSDBG_CALLBACK
 #endif
- 
-__be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res)
+
+__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+			     struct cb_getattrres *res,
+			     struct cb_process_state *cps)
 {
-	struct nfs_client *clp;
 	struct nfs_delegation *delegation;
 	struct nfs_inode *nfsi;
 	struct inode *inode;
 
-	res->bitmap[0] = res->bitmap[1] = 0;
-	res->status = htonl(NFS4ERR_BADHANDLE);
-	clp = nfs_find_client(args->addr, 4);
-	if (clp == NULL)
+	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 		goto out;
 
-	dprintk("NFS: GETATTR callback request from %s\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+	res->bitmap[0] = res->bitmap[1] = 0;
+	res->status = htonl(NFS4ERR_BADHANDLE);
 
-	inode = nfs_delegation_find_inode(clp, &args->fh);
+	dprintk("NFS: GETATTR callback request from %s\n",
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 	if (inode == NULL)
-		goto out_putclient;
+		goto out;
 	nfsi = NFS_I(inode);
 	rcu_read_lock();
 	delegation = rcu_dereference(nfsi->delegation);
@@ -55,49 +58,41 @@
 out_iput:
 	rcu_read_unlock();
 	iput(inode);
-out_putclient:
-	nfs_put_client(clp);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
 	return res->status;
 }
 
-__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
+__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+			    struct cb_process_state *cps)
 {
-	struct nfs_client *clp;
 	struct inode *inode;
 	__be32 res;
 	
-	res = htonl(NFS4ERR_BADHANDLE);
-	clp = nfs_find_client(args->addr, 4);
-	if (clp == NULL)
+	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 		goto out;
 
 	dprintk("NFS: RECALL callback request from %s\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 
-	do {
-		struct nfs_client *prev = clp;
-
-		inode = nfs_delegation_find_inode(clp, &args->fh);
-		if (inode != NULL) {
-			/* Set up a helper thread to actually return the delegation */
-			switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
-				case 0:
-					res = 0;
-					break;
-				case -ENOENT:
-					if (res != 0)
-						res = htonl(NFS4ERR_BAD_STATEID);
-					break;
-				default:
-					res = htonl(NFS4ERR_RESOURCE);
-			}
-			iput(inode);
-		}
-		clp = nfs_find_client_next(prev);
-		nfs_put_client(prev);
-	} while (clp != NULL);
+	res = htonl(NFS4ERR_BADHANDLE);
+	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
+	if (inode == NULL)
+		goto out;
+	/* Set up a helper thread to actually return the delegation */
+	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
+	case 0:
+		res = 0;
+		break;
+	case -ENOENT:
+		if (res != 0)
+			res = htonl(NFS4ERR_BAD_STATEID);
+		break;
+	default:
+		res = htonl(NFS4ERR_RESOURCE);
+	}
+	iput(inode);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
 	return res;
@@ -113,6 +108,139 @@
 
 #if defined(CONFIG_NFS_V4_1)
 
+static u32 initiate_file_draining(struct nfs_client *clp,
+				  struct cb_layoutrecallargs *args)
+{
+	struct pnfs_layout_hdr *lo;
+	struct inode *ino;
+	bool found = false;
+	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+	LIST_HEAD(free_me_list);
+
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
+		if (nfs_compare_fh(&args->cbl_fh,
+				   &NFS_I(lo->plh_inode)->fh))
+			continue;
+		ino = igrab(lo->plh_inode);
+		if (!ino)
+			continue;
+		found = true;
+		/* Without this, layout can be freed as soon
+		 * as we release cl_lock.
+		 */
+		get_layout_hdr(lo);
+		break;
+	}
+	spin_unlock(&clp->cl_lock);
+	if (!found)
+		return NFS4ERR_NOMATCHING_LAYOUT;
+
+	spin_lock(&ino->i_lock);
+	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+	    mark_matching_lsegs_invalid(lo, &free_me_list,
+					args->cbl_range.iomode))
+		rv = NFS4ERR_DELAY;
+	else
+		rv = NFS4ERR_NOMATCHING_LAYOUT;
+	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
+	spin_unlock(&ino->i_lock);
+	pnfs_free_lseg_list(&free_me_list);
+	put_layout_hdr(lo);
+	iput(ino);
+	return rv;
+}
+
+static u32 initiate_bulk_draining(struct nfs_client *clp,
+				  struct cb_layoutrecallargs *args)
+{
+	struct pnfs_layout_hdr *lo;
+	struct inode *ino;
+	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+	struct pnfs_layout_hdr *tmp;
+	LIST_HEAD(recall_list);
+	LIST_HEAD(free_me_list);
+	struct pnfs_layout_range range = {
+		.iomode = IOMODE_ANY,
+		.offset = 0,
+		.length = NFS4_MAX_UINT64,
+	};
+
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
+		if ((args->cbl_recall_type == RETURN_FSID) &&
+		    memcmp(&NFS_SERVER(lo->plh_inode)->fsid,
+			   &args->cbl_fsid, sizeof(struct nfs_fsid)))
+			continue;
+		if (!igrab(lo->plh_inode))
+			continue;
+		get_layout_hdr(lo);
+		BUG_ON(!list_empty(&lo->plh_bulk_recall));
+		list_add(&lo->plh_bulk_recall, &recall_list);
+	}
+	spin_unlock(&clp->cl_lock);
+	list_for_each_entry_safe(lo, tmp,
+				 &recall_list, plh_bulk_recall) {
+		ino = lo->plh_inode;
+		spin_lock(&ino->i_lock);
+		set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+		if (mark_matching_lsegs_invalid(lo, &free_me_list, range.iomode))
+			rv = NFS4ERR_DELAY;
+		list_del_init(&lo->plh_bulk_recall);
+		spin_unlock(&ino->i_lock);
+		put_layout_hdr(lo);
+		iput(ino);
+	}
+	pnfs_free_lseg_list(&free_me_list);
+	return rv;
+}
+
+static u32 do_callback_layoutrecall(struct nfs_client *clp,
+				    struct cb_layoutrecallargs *args)
+{
+	u32 res = NFS4ERR_DELAY;
+
+	dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
+	if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state))
+		goto out;
+	if (args->cbl_recall_type == RETURN_FILE)
+		res = initiate_file_draining(clp, args);
+	else
+		res = initiate_bulk_draining(clp, args);
+	clear_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state);
+out:
+	dprintk("%s returning %i\n", __func__, res);
+	return res;
+
+}
+
+__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
+				  void *dummy, struct cb_process_state *cps)
+{
+	u32 res;
+
+	dprintk("%s: -->\n", __func__);
+
+	if (cps->clp)
+		res = do_callback_layoutrecall(cps->clp, args);
+	else
+		res = NFS4ERR_OP_NOT_IN_SESSION;
+
+	dprintk("%s: exit with status = %d\n", __func__, res);
+	return cpu_to_be32(res);
+}
+
+static void pnfs_recall_all_layouts(struct nfs_client *clp)
+{
+	struct cb_layoutrecallargs args;
+
+	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
+	memset(&args, 0, sizeof(args));
+	args.cbl_recall_type = RETURN_ALL;
+	/* FIXME we ignore errors, what should we do? */
+	do_callback_layoutrecall(clp, &args);
+}
+
 int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
 {
 	if (delegation == NULL)
@@ -185,42 +313,6 @@
 }
 
 /*
- * Returns a pointer to a held 'struct nfs_client' that matches the server's
- * address, major version number, and session ID.  It is the caller's
- * responsibility to release the returned reference.
- *
- * Returns NULL if there are no connections with sessions, or if no session
- * matches the one of interest.
- */
- static struct nfs_client *find_client_with_session(
-	const struct sockaddr *addr, u32 nfsversion,
-	struct nfs4_sessionid *sessionid)
-{
-	struct nfs_client *clp;
-
-	clp = nfs_find_client(addr, 4);
-	if (clp == NULL)
-		return NULL;
-
-	do {
-		struct nfs_client *prev = clp;
-
-		if (clp->cl_session != NULL) {
-			if (memcmp(clp->cl_session->sess_id.data,
-					sessionid->data,
-					NFS4_MAX_SESSIONID_LEN) == 0) {
-				/* Returns a held reference to clp */
-				return clp;
-			}
-		}
-		clp = nfs_find_client_next(prev);
-		nfs_put_client(prev);
-	} while (clp != NULL);
-
-	return NULL;
-}
-
-/*
  * For each referring call triple, check the session's slot table for
  * a match.  If the slot is in use and the sequence numbers match, the
  * client is still waiting for a response to the original request.
@@ -276,20 +368,28 @@
 }
 
 __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
-				struct cb_sequenceres *res)
+			      struct cb_sequenceres *res,
+			      struct cb_process_state *cps)
 {
 	struct nfs_client *clp;
 	int i;
-	__be32 status;
+	__be32 status = htonl(NFS4ERR_BADSESSION);
 
-	status = htonl(NFS4ERR_BADSESSION);
-	clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);
+	cps->clp = NULL;
+
+	clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid);
 	if (clp == NULL)
 		goto out;
 
+	/* state manager is resetting the session */
+	if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
+		status = NFS4ERR_DELAY;
+		goto out;
+	}
+
 	status = validate_seqid(&clp->cl_session->bc_slot_table, args);
 	if (status)
-		goto out_putclient;
+		goto out;
 
 	/*
 	 * Check for pending referring calls.  If a match is found, a
@@ -298,7 +398,7 @@
 	 */
 	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
 		status = htonl(NFS4ERR_DELAY);
-		goto out_putclient;
+		goto out;
 	}
 
 	memcpy(&res->csr_sessionid, &args->csa_sessionid,
@@ -307,83 +407,93 @@
 	res->csr_slotid = args->csa_slotid;
 	res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
 	res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+	nfs4_cb_take_slot(clp);
 
-out_putclient:
-	nfs_put_client(clp);
 out:
+	cps->clp = clp; /* put in nfs4_callback_compound */
 	for (i = 0; i < args->csa_nrclists; i++)
 		kfree(args->csa_rclists[i].rcl_refcalls);
 	kfree(args->csa_rclists);
 
-	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP))
-		res->csr_status = 0;
-	else
+	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
+		cps->drc_status = status;
+		status = 0;
+	} else
 		res->csr_status = status;
+
 	dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
 		ntohl(status), ntohl(res->csr_status));
 	return status;
 }
 
-__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)
+static bool
+validate_bitmap_values(unsigned long mask)
 {
-	struct nfs_client *clp;
+	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
+}
+
+__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
+			       struct cb_process_state *cps)
+{
 	__be32 status;
 	fmode_t flags = 0;
 
-	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
-	clp = nfs_find_client(args->craa_addr, 4);
-	if (clp == NULL)
+	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
+	if (!cps->clp) /* set in cb_sequence */
 		goto out;
 
 	dprintk("NFS: RECALL_ANY callback request from %s\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 
+	status = cpu_to_be32(NFS4ERR_INVAL);
+	if (!validate_bitmap_values(args->craa_type_mask))
+		goto out;
+
+	status = cpu_to_be32(NFS4_OK);
 	if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
 		     &args->craa_type_mask))
 		flags = FMODE_READ;
 	if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
 		     &args->craa_type_mask))
 		flags |= FMODE_WRITE;
-
+	if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
+		     &args->craa_type_mask))
+		pnfs_recall_all_layouts(cps->clp);
 	if (flags)
-		nfs_expire_all_delegation_types(clp, flags);
-	status = htonl(NFS4_OK);
+		nfs_expire_all_delegation_types(cps->clp, flags);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 	return status;
 }
 
 /* Reduce the fore channel's max_slots to the target value */
-__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy)
+__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
+				struct cb_process_state *cps)
 {
-	struct nfs_client *clp;
 	struct nfs4_slot_table *fc_tbl;
 	__be32 status;
 
 	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
-	clp = nfs_find_client(args->crsa_addr, 4);
-	if (clp == NULL)
+	if (!cps->clp) /* set in cb_sequence */
 		goto out;
 
 	dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
 		args->crsa_target_max_slots);
 
-	fc_tbl = &clp->cl_session->fc_slot_table;
+	fc_tbl = &cps->clp->cl_session->fc_slot_table;
 
 	status = htonl(NFS4ERR_BAD_HIGH_SLOT);
 	if (args->crsa_target_max_slots > fc_tbl->max_slots ||
 	    args->crsa_target_max_slots < 1)
-		goto out_putclient;
+		goto out;
 
 	status = htonl(NFS4_OK);
 	if (args->crsa_target_max_slots == fc_tbl->max_slots)
-		goto out_putclient;
+		goto out;
 
 	fc_tbl->target_max_slots = args->crsa_target_max_slots;
-	nfs41_handle_recall_slot(clp);
-out_putclient:
-	nfs_put_client(clp);	/* balance nfs_find_client */
+	nfs41_handle_recall_slot(cps->clp);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 	return status;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 05af212..14e0f93 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -10,8 +10,10 @@
 #include <linux/nfs4.h>
 #include <linux/nfs_fs.h>
 #include <linux/slab.h>
+#include <linux/sunrpc/bc_xprt.h>
 #include "nfs4_fs.h"
 #include "callback.h"
+#include "internal.h"
 
 #define CB_OP_TAGLEN_MAXSZ	(512)
 #define CB_OP_HDR_RES_MAXSZ	(2 + CB_OP_TAGLEN_MAXSZ)
@@ -22,6 +24,7 @@
 #define CB_OP_RECALL_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
 
 #if defined(CONFIG_NFS_V4_1)
+#define CB_OP_LAYOUTRECALL_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
 #define CB_OP_SEQUENCE_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ + \
 					4 + 1 + 3)
 #define CB_OP_RECALLANY_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
@@ -33,7 +36,8 @@
 /* Internal error code */
 #define NFS4ERR_RESOURCE_HDR	11050
 
-typedef __be32 (*callback_process_op_t)(void *, void *);
+typedef __be32 (*callback_process_op_t)(void *, void *,
+					struct cb_process_state *);
 typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
 typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
 
@@ -160,7 +164,7 @@
 	hdr->minorversion = ntohl(*p++);
 	/* Check minor version is zero or one. */
 	if (hdr->minorversion <= 1) {
-		p++;	/* skip callback_ident */
+		hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */
 	} else {
 		printk(KERN_WARNING "%s: NFSv4 server callback with "
 			"illegal minor version %u!\n",
@@ -220,6 +224,66 @@
 
 #if defined(CONFIG_NFS_V4_1)
 
+static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
+				       struct xdr_stream *xdr,
+				       struct cb_layoutrecallargs *args)
+{
+	__be32 *p;
+	__be32 status = 0;
+	uint32_t iomode;
+
+	args->cbl_addr = svc_addr(rqstp);
+	p = read_buf(xdr, 4 * sizeof(uint32_t));
+	if (unlikely(p == NULL)) {
+		status = htonl(NFS4ERR_BADXDR);
+		goto out;
+	}
+
+	args->cbl_layout_type = ntohl(*p++);
+	/* Depite the spec's xdr, iomode really belongs in the FILE switch,
+	 * as it is unuseable and ignored with the other types.
+	 */
+	iomode = ntohl(*p++);
+	args->cbl_layoutchanged = ntohl(*p++);
+	args->cbl_recall_type = ntohl(*p++);
+
+	if (args->cbl_recall_type == RETURN_FILE) {
+		args->cbl_range.iomode = iomode;
+		status = decode_fh(xdr, &args->cbl_fh);
+		if (unlikely(status != 0))
+			goto out;
+
+		p = read_buf(xdr, 2 * sizeof(uint64_t));
+		if (unlikely(p == NULL)) {
+			status = htonl(NFS4ERR_BADXDR);
+			goto out;
+		}
+		p = xdr_decode_hyper(p, &args->cbl_range.offset);
+		p = xdr_decode_hyper(p, &args->cbl_range.length);
+		status = decode_stateid(xdr, &args->cbl_stateid);
+		if (unlikely(status != 0))
+			goto out;
+	} else if (args->cbl_recall_type == RETURN_FSID) {
+		p = read_buf(xdr, 2 * sizeof(uint64_t));
+		if (unlikely(p == NULL)) {
+			status = htonl(NFS4ERR_BADXDR);
+			goto out;
+		}
+		p = xdr_decode_hyper(p, &args->cbl_fsid.major);
+		p = xdr_decode_hyper(p, &args->cbl_fsid.minor);
+	} else if (args->cbl_recall_type != RETURN_ALL) {
+		status = htonl(NFS4ERR_BADXDR);
+		goto out;
+	}
+	dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n",
+		__func__,
+		args->cbl_layout_type, iomode,
+		args->cbl_layoutchanged, args->cbl_recall_type);
+out:
+	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+	return status;
+}
+
 static __be32 decode_sessionid(struct xdr_stream *xdr,
 				 struct nfs4_sessionid *sid)
 {
@@ -574,10 +638,10 @@
 	case OP_CB_SEQUENCE:
 	case OP_CB_RECALL_ANY:
 	case OP_CB_RECALL_SLOT:
+	case OP_CB_LAYOUTRECALL:
 		*op = &callback_ops[op_nr];
 		break;
 
-	case OP_CB_LAYOUTRECALL:
 	case OP_CB_NOTIFY_DEVICEID:
 	case OP_CB_NOTIFY:
 	case OP_CB_PUSH_DELEG:
@@ -593,6 +657,37 @@
 	return htonl(NFS_OK);
 }
 
+static void nfs4_callback_free_slot(struct nfs4_session *session)
+{
+	struct nfs4_slot_table *tbl = &session->bc_slot_table;
+
+	spin_lock(&tbl->slot_tbl_lock);
+	/*
+	 * Let the state manager know callback processing done.
+	 * A single slot, so highest used slotid is either 0 or -1
+	 */
+	tbl->highest_used_slotid--;
+	nfs4_check_drain_bc_complete(session);
+	spin_unlock(&tbl->slot_tbl_lock);
+}
+
+static void nfs4_cb_free_slot(struct nfs_client *clp)
+{
+	if (clp && clp->cl_session)
+		nfs4_callback_free_slot(clp->cl_session);
+}
+
+/* A single slot, so highest used slotid is either 0 or -1 */
+void nfs4_cb_take_slot(struct nfs_client *clp)
+{
+	struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table;
+
+	spin_lock(&tbl->slot_tbl_lock);
+	tbl->highest_used_slotid++;
+	BUG_ON(tbl->highest_used_slotid != 0);
+	spin_unlock(&tbl->slot_tbl_lock);
+}
+
 #else /* CONFIG_NFS_V4_1 */
 
 static __be32
@@ -601,6 +696,9 @@
 	return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
 }
 
+static void nfs4_cb_free_slot(struct nfs_client *clp)
+{
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 static __be32
@@ -621,7 +719,8 @@
 static __be32 process_op(uint32_t minorversion, int nop,
 		struct svc_rqst *rqstp,
 		struct xdr_stream *xdr_in, void *argp,
-		struct xdr_stream *xdr_out, void *resp, int* drc_status)
+		struct xdr_stream *xdr_out, void *resp,
+		struct cb_process_state *cps)
 {
 	struct callback_op *op = &callback_ops[0];
 	unsigned int op_nr;
@@ -644,8 +743,8 @@
 	if (status)
 		goto encode_hdr;
 
-	if (*drc_status) {
-		status = *drc_status;
+	if (cps->drc_status) {
+		status = cps->drc_status;
 		goto encode_hdr;
 	}
 
@@ -653,16 +752,10 @@
 	if (maxlen > 0 && maxlen < PAGE_SIZE) {
 		status = op->decode_args(rqstp, xdr_in, argp);
 		if (likely(status == 0))
-			status = op->process_op(argp, resp);
+			status = op->process_op(argp, resp, cps);
 	} else
 		status = htonl(NFS4ERR_RESOURCE);
 
-	/* Only set by OP_CB_SEQUENCE processing */
-	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
-		*drc_status = status;
-		status = 0;
-	}
-
 encode_hdr:
 	res = encode_op_hdr(xdr_out, op_nr, status);
 	if (unlikely(res))
@@ -681,8 +774,11 @@
 	struct cb_compound_hdr_arg hdr_arg = { 0 };
 	struct cb_compound_hdr_res hdr_res = { NULL };
 	struct xdr_stream xdr_in, xdr_out;
-	__be32 *p;
-	__be32 status, drc_status = 0;
+	__be32 *p, status;
+	struct cb_process_state cps = {
+		.drc_status = 0,
+		.clp = NULL,
+	};
 	unsigned int nops = 0;
 
 	dprintk("%s: start\n", __func__);
@@ -696,6 +792,12 @@
 	if (status == __constant_htonl(NFS4ERR_RESOURCE))
 		return rpc_garbage_args;
 
+	if (hdr_arg.minorversion == 0) {
+		cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident);
+		if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
+			return rpc_drop_reply;
+	}
+
 	hdr_res.taglen = hdr_arg.taglen;
 	hdr_res.tag = hdr_arg.tag;
 	if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
@@ -703,7 +805,7 @@
 
 	while (status == 0 && nops != hdr_arg.nops) {
 		status = process_op(hdr_arg.minorversion, nops, rqstp,
-				    &xdr_in, argp, &xdr_out, resp, &drc_status);
+				    &xdr_in, argp, &xdr_out, resp, &cps);
 		nops++;
 	}
 
@@ -716,6 +818,8 @@
 
 	*hdr_res.status = status;
 	*hdr_res.nops = htonl(nops);
+	nfs4_cb_free_slot(cps.clp);
+	nfs_put_client(cps.clp);
 	dprintk("%s: done, status = %u\n", __func__, ntohl(status));
 	return rpc_success;
 }
@@ -739,6 +843,12 @@
 		.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
 	},
 #if defined(CONFIG_NFS_V4_1)
+	[OP_CB_LAYOUTRECALL] = {
+		.process_op = (callback_process_op_t)nfs4_callback_layoutrecall,
+		.decode_args =
+			(callback_decode_arg_t)decode_layoutrecall_args,
+		.res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
+	},
 	[OP_CB_SEQUENCE] = {
 		.process_op = (callback_process_op_t)nfs4_callback_sequence,
 		.decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 0870d0d..bd3ca32 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -56,6 +56,30 @@
 static LIST_HEAD(nfs_client_list);
 static LIST_HEAD(nfs_volume_list);
 static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
+#ifdef CONFIG_NFS_V4
+static DEFINE_IDR(cb_ident_idr); /* Protected by nfs_client_lock */
+
+/*
+ * Get a unique NFSv4.0 callback identifier which will be used
+ * by the V4.0 callback service to lookup the nfs_client struct
+ */
+static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
+{
+	int ret = 0;
+
+	if (clp->rpc_ops->version != 4 || minorversion != 0)
+		return ret;
+retry:
+	if (!idr_pre_get(&cb_ident_idr, GFP_KERNEL))
+		return -ENOMEM;
+	spin_lock(&nfs_client_lock);
+	ret = idr_get_new(&cb_ident_idr, clp, &clp->cl_cb_ident);
+	spin_unlock(&nfs_client_lock);
+	if (ret == -EAGAIN)
+		goto retry;
+	return ret;
+}
+#endif /* CONFIG_NFS_V4 */
 
 /*
  * RPC cruft for NFS
@@ -144,7 +168,10 @@
 	clp->cl_proto = cl_init->proto;
 
 #ifdef CONFIG_NFS_V4
-	INIT_LIST_HEAD(&clp->cl_delegations);
+	err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
+	if (err)
+		goto error_cleanup;
+
 	spin_lock_init(&clp->cl_lock);
 	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
@@ -170,21 +197,17 @@
 }
 
 #ifdef CONFIG_NFS_V4
-/*
- * Clears/puts all minor version specific parts from an nfs_client struct
- * reverting it to minorversion 0.
- */
-static void nfs4_clear_client_minor_version(struct nfs_client *clp)
-{
 #ifdef CONFIG_NFS_V4_1
-	if (nfs4_has_session(clp)) {
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+	if (nfs4_has_session(clp))
 		nfs4_destroy_session(clp->cl_session);
-		clp->cl_session = NULL;
-	}
-
-	clp->cl_mvops = nfs_v4_minor_ops[0];
-#endif /* CONFIG_NFS_V4_1 */
 }
+#else /* CONFIG_NFS_V4_1 */
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
 
 /*
  * Destroy the NFS4 callback service
@@ -199,17 +222,49 @@
 {
 	if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
 		nfs4_kill_renewd(clp);
-	nfs4_clear_client_minor_version(clp);
+	nfs4_shutdown_session(clp);
 	nfs4_destroy_callback(clp);
 	if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
 		nfs_idmap_delete(clp);
 
 	rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
 }
+
+/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
+void nfs_cleanup_cb_ident_idr(void)
+{
+	idr_destroy(&cb_ident_idr);
+}
+
+/* nfs_client_lock held */
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+	if (clp->cl_cb_ident)
+		idr_remove(&cb_ident_idr, clp->cl_cb_ident);
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+	rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
+}
+
 #else
 static void nfs4_shutdown_client(struct nfs_client *clp)
 {
 }
+
+void nfs_cleanup_cb_ident_idr(void)
+{
+}
+
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+}
+
 #endif /* CONFIG_NFS_V4 */
 
 /*
@@ -248,6 +303,7 @@
 
 	if (atomic_dec_and_lock(&clp->cl_count, &nfs_client_lock)) {
 		list_del(&clp->cl_share_link);
+		nfs_cb_idr_remove_locked(clp);
 		spin_unlock(&nfs_client_lock);
 
 		BUG_ON(!list_empty(&clp->cl_superblocks));
@@ -363,70 +419,28 @@
 	return 0;
 }
 
-/*
- * Find a client by IP address and protocol version
- * - returns NULL if no such client
- */
-struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion)
+/* Common match routine for v4.0 and v4.1 callback services */
+bool
+nfs4_cb_match_client(const struct sockaddr *addr, struct nfs_client *clp,
+		     u32 minorversion)
 {
-	struct nfs_client *clp;
+	struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
 
-	spin_lock(&nfs_client_lock);
-	list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
-		struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
+	/* Don't match clients that failed to initialise */
+	if (!(clp->cl_cons_state == NFS_CS_READY ||
+	    clp->cl_cons_state == NFS_CS_SESSION_INITING))
+		return false;
 
-		/* Don't match clients that failed to initialise properly */
-		if (!(clp->cl_cons_state == NFS_CS_READY ||
-		      clp->cl_cons_state == NFS_CS_SESSION_INITING))
-			continue;
+	/* Match the version and minorversion */
+	if (clp->rpc_ops->version != 4 ||
+	    clp->cl_minorversion != minorversion)
+		return false;
 
-		/* Different NFS versions cannot share the same nfs_client */
-		if (clp->rpc_ops->version != nfsversion)
-			continue;
+	/* Match only the IP address, not the port number */
+	if (!nfs_sockaddr_match_ipaddr(addr, clap))
+		return false;
 
-		/* Match only the IP address, not the port number */
-		if (!nfs_sockaddr_match_ipaddr(addr, clap))
-			continue;
-
-		atomic_inc(&clp->cl_count);
-		spin_unlock(&nfs_client_lock);
-		return clp;
-	}
-	spin_unlock(&nfs_client_lock);
-	return NULL;
-}
-
-/*
- * Find a client by IP address and protocol version
- * - returns NULL if no such client
- */
-struct nfs_client *nfs_find_client_next(struct nfs_client *clp)
-{
-	struct sockaddr *sap = (struct sockaddr *)&clp->cl_addr;
-	u32 nfsvers = clp->rpc_ops->version;
-
-	spin_lock(&nfs_client_lock);
-	list_for_each_entry_continue(clp, &nfs_client_list, cl_share_link) {
-		struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
-
-		/* Don't match clients that failed to initialise properly */
-		if (clp->cl_cons_state != NFS_CS_READY)
-			continue;
-
-		/* Different NFS versions cannot share the same nfs_client */
-		if (clp->rpc_ops->version != nfsvers)
-			continue;
-
-		/* Match only the IP address, not the port number */
-		if (!nfs_sockaddr_match_ipaddr(sap, clap))
-			continue;
-
-		atomic_inc(&clp->cl_count);
-		spin_unlock(&nfs_client_lock);
-		return clp;
-	}
-	spin_unlock(&nfs_client_lock);
-	return NULL;
+	return true;
 }
 
 /*
@@ -988,6 +1002,27 @@
 	target->options = source->options;
 }
 
+static void nfs_server_insert_lists(struct nfs_server *server)
+{
+	struct nfs_client *clp = server->nfs_client;
+
+	spin_lock(&nfs_client_lock);
+	list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
+	list_add_tail(&server->master_link, &nfs_volume_list);
+	spin_unlock(&nfs_client_lock);
+
+}
+
+static void nfs_server_remove_lists(struct nfs_server *server)
+{
+	spin_lock(&nfs_client_lock);
+	list_del_rcu(&server->client_link);
+	list_del(&server->master_link);
+	spin_unlock(&nfs_client_lock);
+
+	synchronize_rcu();
+}
+
 /*
  * Allocate and initialise a server record
  */
@@ -1004,6 +1039,7 @@
 	/* Zero out the NFS state stuff */
 	INIT_LIST_HEAD(&server->client_link);
 	INIT_LIST_HEAD(&server->master_link);
+	INIT_LIST_HEAD(&server->delegations);
 
 	atomic_set(&server->active, 0);
 
@@ -1019,6 +1055,8 @@
 		return NULL;
 	}
 
+	pnfs_init_server(server);
+
 	return server;
 }
 
@@ -1029,11 +1067,8 @@
 {
 	dprintk("--> nfs_free_server()\n");
 
+	nfs_server_remove_lists(server);
 	unset_pnfs_layoutdriver(server);
-	spin_lock(&nfs_client_lock);
-	list_del(&server->client_link);
-	list_del(&server->master_link);
-	spin_unlock(&nfs_client_lock);
 
 	if (server->destroy != NULL)
 		server->destroy(server);
@@ -1108,11 +1143,7 @@
 		(unsigned long long) server->fsid.major,
 		(unsigned long long) server->fsid.minor);
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 	nfs_free_fattr(fattr);
 	return server;
@@ -1125,6 +1156,96 @@
 
 #ifdef CONFIG_NFS_V4
 /*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by IP address, protocol version, and minorversion
+ *
+ * Called from the pg_authenticate method. The callback identifier
+ * is not used as it has not been decoded.
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_no_ident(const struct sockaddr *addr)
+{
+	struct nfs_client *clp;
+
+	spin_lock(&nfs_client_lock);
+	list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
+		if (nfs4_cb_match_client(addr, clp, 0) == false)
+			continue;
+		atomic_inc(&clp->cl_count);
+		spin_unlock(&nfs_client_lock);
+		return clp;
+	}
+	spin_unlock(&nfs_client_lock);
+	return NULL;
+}
+
+/*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by callback identifier
+ */
+struct nfs_client *
+nfs4_find_client_ident(int cb_ident)
+{
+	struct nfs_client *clp;
+
+	spin_lock(&nfs_client_lock);
+	clp = idr_find(&cb_ident_idr, cb_ident);
+	if (clp)
+		atomic_inc(&clp->cl_count);
+	spin_unlock(&nfs_client_lock);
+	return clp;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * NFSv4.1 callback thread helper
+ * For CB_COMPOUND calls, find a client by IP address, protocol version,
+ * minorversion, and sessionID
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *addr,
+			   struct nfs4_sessionid *sid)
+{
+	struct nfs_client *clp;
+
+	spin_lock(&nfs_client_lock);
+	list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
+		if (nfs4_cb_match_client(addr, clp, 1) == false)
+			continue;
+
+		if (!nfs4_has_session(clp))
+			continue;
+
+		/* Match sessionid*/
+		if (memcmp(clp->cl_session->sess_id.data,
+		    sid->data, NFS4_MAX_SESSIONID_LEN) != 0)
+			continue;
+
+		atomic_inc(&clp->cl_count);
+		spin_unlock(&nfs_client_lock);
+		return clp;
+	}
+	spin_unlock(&nfs_client_lock);
+	return NULL;
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *addr,
+			   struct nfs4_sessionid *sid)
+{
+	return NULL;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
  * Initialize the NFS4 callback service
  */
 static int nfs4_init_callback(struct nfs_client *clp)
@@ -1342,11 +1463,7 @@
 	if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
 		server->namelen = NFS4_MAXNAMLEN;
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 out:
 	nfs_free_fattr(fattr);
@@ -1551,11 +1668,7 @@
 	if (error < 0)
 		goto out_free_server;
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 
 	nfs_free_fattr(fattr_fsinfo);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 1fd62fc..bbbc6bf 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -23,8 +23,6 @@
 
 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
 {
-	if (delegation->cred)
-		put_rpccred(delegation->cred);
 	kfree(delegation);
 }
 
@@ -37,14 +35,30 @@
 
 static void nfs_free_delegation(struct nfs_delegation *delegation)
 {
+	if (delegation->cred) {
+		put_rpccred(delegation->cred);
+		delegation->cred = NULL;
+	}
 	call_rcu(&delegation->rcu, nfs_free_delegation_callback);
 }
 
+/**
+ * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
+ * @delegation: delegation to process
+ *
+ */
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
 {
 	set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
 }
 
+/**
+ * nfs_have_delegation - check if inode has a delegation
+ * @inode: inode to check
+ * @flags: delegation types to check for
+ *
+ * Returns one if inode has the indicated delegation, otherwise zero.
+ */
 int nfs_have_delegation(struct inode *inode, fmode_t flags)
 {
 	struct nfs_delegation *delegation;
@@ -119,10 +133,15 @@
 	return 0;
 }
 
-/*
- * Set up a delegation on an inode
+/**
+ * nfs_inode_reclaim_delegation - process a delegation reclaim request
+ * @inode: inode to process
+ * @cred: credential to use for request
+ * @res: new delegation state from server
+ *
  */
-void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+				  struct nfs_openres *res)
 {
 	struct nfs_delegation *delegation;
 	struct rpc_cred *oldcred = NULL;
@@ -175,38 +194,52 @@
 	return inode;
 }
 
-static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi,
-							   const nfs4_stateid *stateid,
-							   struct nfs_client *clp)
+static struct nfs_delegation *
+nfs_detach_delegation_locked(struct nfs_inode *nfsi,
+			     struct nfs_server *server)
 {
 	struct nfs_delegation *delegation =
 		rcu_dereference_protected(nfsi->delegation,
-					  lockdep_is_held(&clp->cl_lock));
+				lockdep_is_held(&server->nfs_client->cl_lock));
 
 	if (delegation == NULL)
 		goto nomatch;
+
 	spin_lock(&delegation->lock);
-	if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
-				sizeof(delegation->stateid.data)) != 0)
-		goto nomatch_unlock;
 	list_del_rcu(&delegation->super_list);
 	delegation->inode = NULL;
 	nfsi->delegation_state = 0;
 	rcu_assign_pointer(nfsi->delegation, NULL);
 	spin_unlock(&delegation->lock);
 	return delegation;
-nomatch_unlock:
-	spin_unlock(&delegation->lock);
 nomatch:
 	return NULL;
 }
 
-/*
- * Set up a delegation on an inode
+static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
+						    struct nfs_server *server)
+{
+	struct nfs_client *clp = server->nfs_client;
+	struct nfs_delegation *delegation;
+
+	spin_lock(&clp->cl_lock);
+	delegation = nfs_detach_delegation_locked(nfsi, server);
+	spin_unlock(&clp->cl_lock);
+	return delegation;
+}
+
+/**
+ * nfs_inode_set_delegation - set up a delegation on an inode
+ * @inode: inode to which delegation applies
+ * @cred: cred to use for subsequent delegation processing
+ * @res: new delegation state from server
+ *
+ * Returns zero on success, or a negative errno value.
  */
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation, *old_delegation;
 	struct nfs_delegation *freeme = NULL;
@@ -227,7 +260,7 @@
 
 	spin_lock(&clp->cl_lock);
 	old_delegation = rcu_dereference_protected(nfsi->delegation,
-						   lockdep_is_held(&clp->cl_lock));
+					lockdep_is_held(&clp->cl_lock));
 	if (old_delegation != NULL) {
 		if (memcmp(&delegation->stateid, &old_delegation->stateid,
 					sizeof(old_delegation->stateid)) == 0 &&
@@ -246,9 +279,9 @@
 			delegation = NULL;
 			goto out;
 		}
-		freeme = nfs_detach_delegation_locked(nfsi, NULL, clp);
+		freeme = nfs_detach_delegation_locked(nfsi, server);
 	}
-	list_add_rcu(&delegation->super_list, &clp->cl_delegations);
+	list_add_rcu(&delegation->super_list, &server->delegations);
 	nfsi->delegation_state = delegation->type;
 	rcu_assign_pointer(nfsi->delegation, delegation);
 	delegation = NULL;
@@ -290,73 +323,85 @@
 	return err;
 }
 
-/*
- * Return all delegations that have been marked for return
+/**
+ * nfs_client_return_marked_delegations - return previously marked delegations
+ * @clp: nfs_client to process
+ *
+ * Returns zero on success, or a negative errno value.
  */
 int nfs_client_return_marked_delegations(struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation;
+	struct nfs_server *server;
 	struct inode *inode;
 	int err = 0;
 
 restart:
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
-		if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
-			continue;
-		inode = nfs_delegation_grab_inode(delegation);
-		if (inode == NULL)
-			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
-		rcu_read_unlock();
-		if (delegation != NULL) {
-			filemap_flush(inode->i_mapping);
-			err = __nfs_inode_return_delegation(inode, delegation, 0);
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry_rcu(delegation, &server->delegations,
+								super_list) {
+			if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
+							&delegation->flags))
+				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL)
+				continue;
+			delegation = nfs_detach_delegation(NFS_I(inode),
+								server);
+			rcu_read_unlock();
+
+			if (delegation != NULL) {
+				filemap_flush(inode->i_mapping);
+				err = __nfs_inode_return_delegation(inode,
+								delegation, 0);
+			}
+			iput(inode);
+			if (!err)
+				goto restart;
+			set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+			return err;
 		}
-		iput(inode);
-		if (!err)
-			goto restart;
-		set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
-		return err;
 	}
 	rcu_read_unlock();
 	return 0;
 }
 
-/*
- * This function returns the delegation without reclaiming opens
- * or protecting against delegation reclaims.
- * It is therefore really only safe to be called from
- * nfs4_clear_inode()
+/**
+ * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
+ * @inode: inode to process
+ *
+ * Does not protect against delegation reclaims, therefore really only safe
+ * to be called from nfs4_clear_inode().
  */
 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, server);
 		if (delegation != NULL)
 			nfs_do_return_delegation(inode, delegation, 0);
 	}
 }
 
+/**
+ * nfs_inode_return_delegation - synchronously return a delegation
+ * @inode: inode to process
+ *
+ * Returns zero on success, or a negative errno value.
+ */
 int nfs_inode_return_delegation(struct inode *inode)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation;
 	int err = 0;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, server);
 		if (delegation != NULL) {
 			nfs_wb_all(inode);
 			err = __nfs_inode_return_delegation(inode, delegation, 1);
@@ -365,46 +410,61 @@
 	return err;
 }
 
-static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation)
+static void nfs_mark_return_delegation(struct nfs_delegation *delegation)
 {
+	struct nfs_client *clp = NFS_SERVER(delegation->inode)->nfs_client;
+
 	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
 	set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
 }
 
-/*
- * Return all delegations associated to a super block
+/**
+ * nfs_super_return_all_delegations - return delegations for one superblock
+ * @sb: sb to process
+ *
  */
 void nfs_super_return_all_delegations(struct super_block *sb)
 {
-	struct nfs_client *clp = NFS_SB(sb)->nfs_client;
+	struct nfs_server *server = NFS_SB(sb);
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_delegation *delegation;
 
 	if (clp == NULL)
 		return;
+
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		spin_lock(&delegation->lock);
-		if (delegation->inode != NULL && delegation->inode->i_sb == sb)
-			set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+		set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
 		spin_unlock(&delegation->lock);
 	}
 	rcu_read_unlock();
+
 	if (nfs_client_return_marked_delegations(clp) != 0)
 		nfs4_schedule_state_manager(clp);
 }
 
-static
-void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags)
+static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
+						 fmode_t flags)
 {
 	struct nfs_delegation *delegation;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
 			continue;
 		if (delegation->type & flags)
-			nfs_mark_return_delegation(clp, delegation);
+			nfs_mark_return_delegation(delegation);
 	}
+}
+
+static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
+							fmode_t flags)
+{
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_mark_return_all_delegation_types(server, flags);
 	rcu_read_unlock();
 }
 
@@ -419,19 +479,32 @@
 		nfs4_schedule_state_manager(clp);
 }
 
+/**
+ * nfs_expire_all_delegation_types
+ * @clp: client to process
+ * @flags: delegation types to expire
+ *
+ */
 void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
 {
 	nfs_client_mark_return_all_delegation_types(clp, flags);
 	nfs_delegation_run_state_manager(clp);
 }
 
+/**
+ * nfs_expire_all_delegations
+ * @clp: client to process
+ *
+ */
 void nfs_expire_all_delegations(struct nfs_client *clp)
 {
 	nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
 }
 
-/*
- * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
+/**
+ * nfs_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
+ * @clp: client to process
+ *
  */
 void nfs_handle_cb_pathdown(struct nfs_client *clp)
 {
@@ -440,29 +513,43 @@
 	nfs_client_mark_return_all_delegations(clp);
 }
 
-static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp)
+static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
 {
 	struct nfs_delegation *delegation;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
 			continue;
-		nfs_mark_return_delegation(clp, delegation);
+		nfs_mark_return_delegation(delegation);
 	}
-	rcu_read_unlock();
 }
 
+/**
+ * nfs_expire_unreferenced_delegations - Eliminate unused delegations
+ * @clp: nfs_client to process
+ *
+ */
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
 {
-	nfs_client_mark_return_unreferenced_delegations(clp);
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_mark_return_unreferenced_delegations(server);
+	rcu_read_unlock();
+
 	nfs_delegation_run_state_manager(clp);
 }
 
-/*
- * Asynchronous delegation recall!
+/**
+ * nfs_async_inode_return_delegation - asynchronously return a delegation
+ * @inode: inode to process
+ * @stateid: state ID information from CB_RECALL arguments
+ *
+ * Returns zero on success, or a negative errno value.
  */
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
+int nfs_async_inode_return_delegation(struct inode *inode,
+				      const nfs4_stateid *stateid)
 {
 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
 	struct nfs_delegation *delegation;
@@ -474,22 +561,21 @@
 		rcu_read_unlock();
 		return -ENOENT;
 	}
-
-	nfs_mark_return_delegation(clp, delegation);
+	nfs_mark_return_delegation(delegation);
 	rcu_read_unlock();
+
 	nfs_delegation_run_state_manager(clp);
 	return 0;
 }
 
-/*
- * Retrieve the inode associated with a delegation
- */
-struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
+static struct inode *
+nfs_delegation_find_inode_server(struct nfs_server *server,
+				 const struct nfs_fh *fhandle)
 {
 	struct nfs_delegation *delegation;
 	struct inode *res = NULL;
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		spin_lock(&delegation->lock);
 		if (delegation->inode != NULL &&
 		    nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
@@ -499,49 +585,121 @@
 		if (res != NULL)
 			break;
 	}
+	return res;
+}
+
+/**
+ * nfs_delegation_find_inode - retrieve the inode associated with a delegation
+ * @clp: client state handle
+ * @fhandle: filehandle from a delegation recall
+ *
+ * Returns pointer to inode matching "fhandle," or NULL if a matching inode
+ * cannot be found.
+ */
+struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
+					const struct nfs_fh *fhandle)
+{
+	struct nfs_server *server;
+	struct inode *res = NULL;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		res = nfs_delegation_find_inode_server(server, fhandle);
+		if (res != NULL)
+			break;
+	}
 	rcu_read_unlock();
 	return res;
 }
 
-/*
- * Mark all delegations as needing to be reclaimed
+static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
+{
+	struct nfs_delegation *delegation;
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list)
+		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+}
+
+/**
+ * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
+ * @clp: nfs_client to process
+ *
  */
 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
 {
-	struct nfs_delegation *delegation;
+	struct nfs_server *server;
+
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
-		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_delegation_mark_reclaim_server(server);
 	rcu_read_unlock();
 }
 
-/*
- * Reap all unclaimed delegations after reboot recovery is done
+/**
+ * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
+ * @clp: nfs_client to process
+ *
  */
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation;
+	struct nfs_server *server;
 	struct inode *inode;
+
 restart:
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
-		if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0)
-			continue;
-		inode = nfs_delegation_grab_inode(delegation);
-		if (inode == NULL)
-			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
-		rcu_read_unlock();
-		if (delegation != NULL)
-			nfs_free_delegation(delegation);
-		iput(inode);
-		goto restart;
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry_rcu(delegation, &server->delegations,
+								super_list) {
+			if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
+						&delegation->flags) == 0)
+				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL)
+				continue;
+			delegation = nfs_detach_delegation(NFS_I(inode),
+								server);
+			rcu_read_unlock();
+
+			if (delegation != NULL)
+				nfs_free_delegation(delegation);
+			iput(inode);
+			goto restart;
+		}
 	}
 	rcu_read_unlock();
 }
 
+/**
+ * nfs_delegations_present - check for existence of delegations
+ * @clp: client state handle
+ *
+ * Returns one if there are any nfs_delegation structures attached
+ * to this nfs_client.
+ */
+int nfs_delegations_present(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+	int ret = 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		if (!list_empty(&server->delegations)) {
+			ret = 1;
+			break;
+		}
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * nfs4_copy_delegation_stateid - Copy inode's state ID information
+ * @dst: stateid data structure to fill in
+ * @inode: inode to check
+ *
+ * Returns one and fills in "dst->data" * if inode had a delegation,
+ * otherwise zero is returned.
+ */
 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 2026304..d9322e4 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -44,6 +44,7 @@
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
 void nfs_handle_cb_pathdown(struct nfs_client *clp);
 int nfs_client_return_marked_delegations(struct nfs_client *clp);
+int nfs_delegations_present(struct nfs_client *clp);
 
 void nfs_delegation_mark_reclaim(struct nfs_client *clp);
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index d33da53..2c3eb33 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -33,8 +33,8 @@
 #include <linux/namei.h>
 #include <linux/mount.h>
 #include <linux/sched.h>
-#include <linux/vmalloc.h>
 #include <linux/kmemleak.h>
+#include <linux/xattr.h>
 
 #include "delegation.h"
 #include "iostat.h"
@@ -125,9 +125,10 @@
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
-	.getxattr       = nfs4_getxattr,
-	.setxattr       = nfs4_setxattr,
-	.listxattr      = nfs4_listxattr,
+	.getxattr	= generic_getxattr,
+	.setxattr	= generic_setxattr,
+	.listxattr	= generic_listxattr,
+	.removexattr	= generic_removexattr,
 };
 
 #endif /* CONFIG_NFS_V4 */
@@ -172,7 +173,7 @@
 	struct nfs_cache_array_entry array[0];
 };
 
-typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int);
 typedef struct {
 	struct file	*file;
 	struct page	*page;
@@ -378,14 +379,14 @@
 	return error;
 }
 
-/* Fill in an entry based on the xdr code stored in desc->page */
-static
-int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct xdr_stream *stream)
+static int xdr_decode(nfs_readdir_descriptor_t *desc,
+		      struct nfs_entry *entry, struct xdr_stream *xdr)
 {
-	__be32 *p = desc->decode(stream, entry, NFS_SERVER(desc->file->f_path.dentry->d_inode), desc->plus);
-	if (IS_ERR(p))
-		return PTR_ERR(p);
+	int error;
 
+	error = desc->decode(xdr, entry, desc->plus);
+	if (error)
+		return error;
 	entry->fattr->time_start = desc->timestamp;
 	entry->fattr->gencount = desc->gencount;
 	return 0;
@@ -438,7 +439,6 @@
 	if (dentry == NULL)
 		return;
 
-	d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops);
 	inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
 	if (IS_ERR(inode))
 		goto out;
@@ -459,25 +459,26 @@
 /* Perform conversion from xdr to cache array */
 static
 int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
-				void *xdr_page, struct page *page, unsigned int buflen)
+				struct page **xdr_pages, struct page *page, unsigned int buflen)
 {
 	struct xdr_stream stream;
-	struct xdr_buf buf;
-	__be32 *ptr = xdr_page;
+	struct xdr_buf buf = {
+		.pages = xdr_pages,
+		.page_len = buflen,
+		.buflen = buflen,
+		.len = buflen,
+	};
+	struct page *scratch;
 	struct nfs_cache_array *array;
 	unsigned int count = 0;
 	int status;
 
-	buf.head->iov_base = xdr_page;
-	buf.head->iov_len = buflen;
-	buf.tail->iov_len = 0;
-	buf.page_base = 0;
-	buf.page_len = 0;
-	buf.buflen = buf.head->iov_len;
-	buf.len = buf.head->iov_len;
+	scratch = alloc_page(GFP_KERNEL);
+	if (scratch == NULL)
+		return -ENOMEM;
 
-	xdr_init_decode(&stream, &buf, ptr);
-
+	xdr_init_decode(&stream, &buf, NULL);
+	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
 
 	do {
 		status = xdr_decode(desc, entry, &stream);
@@ -506,6 +507,8 @@
 		} else
 			status = PTR_ERR(array);
 	}
+
+	put_page(scratch);
 	return status;
 }
 
@@ -521,7 +524,6 @@
 void nfs_readdir_free_large_page(void *ptr, struct page **pages,
 		unsigned int npages)
 {
-	vm_unmap_ram(ptr, npages);
 	nfs_readdir_free_pagearray(pages, npages);
 }
 
@@ -530,9 +532,8 @@
  * to nfs_readdir_free_large_page
  */
 static
-void *nfs_readdir_large_page(struct page **pages, unsigned int npages)
+int nfs_readdir_large_page(struct page **pages, unsigned int npages)
 {
-	void *ptr;
 	unsigned int i;
 
 	for (i = 0; i < npages; i++) {
@@ -541,13 +542,11 @@
 			goto out_freepages;
 		pages[i] = page;
 	}
+	return 0;
 
-	ptr = vm_map_ram(pages, npages, 0, PAGE_KERNEL);
-	if (!IS_ERR_OR_NULL(ptr))
-		return ptr;
 out_freepages:
 	nfs_readdir_free_pagearray(pages, i);
-	return NULL;
+	return -ENOMEM;
 }
 
 static
@@ -566,6 +565,7 @@
 	entry.eof = 0;
 	entry.fh = nfs_alloc_fhandle();
 	entry.fattr = nfs_alloc_fattr();
+	entry.server = NFS_SERVER(inode);
 	if (entry.fh == NULL || entry.fattr == NULL)
 		goto out;
 
@@ -577,8 +577,8 @@
 	memset(array, 0, sizeof(struct nfs_cache_array));
 	array->eof_index = -1;
 
-	pages_ptr = nfs_readdir_large_page(pages, array_size);
-	if (!pages_ptr)
+	status = nfs_readdir_large_page(pages, array_size);
+	if (status < 0)
 		goto out_release_array;
 	do {
 		unsigned int pglen;
@@ -587,7 +587,7 @@
 		if (status < 0)
 			break;
 		pglen = status;
-		status = nfs_readdir_page_filler(desc, &entry, pages_ptr, page, pglen);
+		status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
 		if (status < 0) {
 			if (status == -ENOSPC)
 				status = 0;
@@ -970,7 +970,7 @@
 {
 	struct nfs_server *server = NFS_SERVER(inode);
 
-	if (test_bit(NFS_INO_MOUNTPOINT, &NFS_I(inode)->flags))
+	if (IS_AUTOMOUNT(inode))
 		return 0;
 	if (nd != NULL) {
 		/* VFS wants an on-the-wire revalidation */
@@ -1173,6 +1173,7 @@
 	.d_revalidate	= nfs_lookup_revalidate,
 	.d_delete	= nfs_dentry_delete,
 	.d_iput		= nfs_dentry_iput,
+	.d_automount	= nfs_d_automount,
 };
 
 static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
@@ -1192,8 +1193,6 @@
 	if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
 		goto out;
 
-	d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops);
-
 	/*
 	 * If we're doing an exclusive create, optimize away the lookup
 	 * but don't hash the dentry.
@@ -1221,7 +1220,7 @@
 		goto out_unblock_sillyrename;
 	}
 	inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
-	res = (struct dentry *)inode;
+	res = ERR_CAST(inode);
 	if (IS_ERR(res))
 		goto out_unblock_sillyrename;
 
@@ -1248,6 +1247,7 @@
 	.d_revalidate	= nfs_open_revalidate,
 	.d_delete	= nfs_dentry_delete,
 	.d_iput		= nfs_dentry_iput,
+	.d_automount	= nfs_d_automount,
 };
 
 /*
@@ -1337,7 +1337,6 @@
 		res = ERR_PTR(-ENAMETOOLONG);
 		goto out;
 	}
-	d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops);
 
 	/* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash
 	 * the dentry. */
@@ -1355,8 +1354,7 @@
 	if (nd->flags & LOOKUP_CREATE) {
 		attr.ia_mode = nd->intent.open.create_mode;
 		attr.ia_valid = ATTR_MODE;
-		if (!IS_POSIXACL(dir))
-			attr.ia_mode &= ~current_umask();
+		attr.ia_mode &= ~current_umask();
 	} else {
 		open_flags &= ~(O_EXCL | O_CREAT);
 		attr.ia_valid = 0;
@@ -1410,11 +1408,15 @@
 static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
 	struct dentry *parent = NULL;
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode;
 	struct inode *dir;
 	struct nfs_open_context *ctx;
 	int openflags, ret = 0;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	inode = dentry->d_inode;
 	if (!is_atomic_open(nd) || d_mountpoint(dentry))
 		goto no_open;
 
@@ -1583,6 +1585,7 @@
 {
 	struct iattr attr;
 	int error;
+	int open_flags = 0;
 
 	dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
 			dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
@@ -1590,7 +1593,10 @@
 	attr.ia_mode = mode;
 	attr.ia_valid = ATTR_MODE;
 
-	error = NFS_PROTO(dir)->create(dir, dentry, &attr, 0, NULL);
+	if ((nd->flags & LOOKUP_CREATE) != 0)
+		open_flags = nd->intent.open.flags;
+
+	error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL);
 	if (error != 0)
 		goto out_err;
 	return 0;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e6ace0d..9943a75 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -407,15 +407,18 @@
 		pos += vec->iov_len;
 	}
 
+	/*
+	 * If no bytes were started, return the error, and let the
+	 * generic layer handle the completion.
+	 */
+	if (requested_bytes == 0) {
+		nfs_direct_req_release(dreq);
+		return result < 0 ? result : -EIO;
+	}
+
 	if (put_dreq(dreq))
 		nfs_direct_complete(dreq);
-
-	if (requested_bytes != 0)
-		return 0;
-
-	if (result < 0)
-		return result;
-	return -EIO;
+	return 0;
 }
 
 static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
@@ -841,15 +844,18 @@
 		pos += vec->iov_len;
 	}
 
+	/*
+	 * If no bytes were started, return the error, and let the
+	 * generic layer handle the completion.
+	 */
+	if (requested_bytes == 0) {
+		nfs_direct_req_release(dreq);
+		return result < 0 ? result : -EIO;
+	}
+
 	if (put_dreq(dreq))
 		nfs_direct_write_complete(dreq, dreq->inode);
-
-	if (requested_bytes != 0)
-		return 0;
-
-	if (result < 0)
-		return result;
-	return -EIO;
+	return 0;
 }
 
 static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 5596c6a..b5ffe8fa 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -119,9 +119,6 @@
 	}
 
 	security_d_instantiate(ret, inode);
-
-	if (ret->d_op == NULL)
-		d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops);
 out:
 	nfs_free_fattr(fsinfo.fattr);
 	return ret;
@@ -227,9 +224,6 @@
 
 	security_d_instantiate(ret, inode);
 
-	if (ret->d_op == NULL)
-		d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops);
-
 out:
 	nfs_free_fattr(fattr);
 	dprintk("<-- nfs4_get_root()\n");
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 4e2d9b6..1869688 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -238,7 +238,7 @@
 	return nfs_idmap_lookup_name(gid, "group", buf, buflen);
 }
 
-#else  /* CONFIG_NFS_USE_IDMAPPER not defined */
+#else  /* CONFIG_NFS_USE_NEW_IDMAPPER not defined */
 
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 017daa3..2f8e618 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -37,6 +37,7 @@
 #include <linux/inet.h>
 #include <linux/nfs_xdr.h>
 #include <linux/slab.h>
+#include <linux/compat.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -89,7 +90,11 @@
  */
 u64 nfs_compat_user_ino64(u64 fileid)
 {
-	int ino;
+#ifdef CONFIG_COMPAT
+	compat_ulong_t ino;
+#else	
+	unsigned long ino;
+#endif
 
 	if (enable_ino64)
 		return fileid;
@@ -300,7 +305,7 @@
 				else
 					inode->i_op = &nfs_mountpoint_inode_operations;
 				inode->i_fop = NULL;
-				set_bit(NFS_INO_MOUNTPOINT, &nfsi->flags);
+				inode->i_flags |= S_AUTOMOUNT;
 			}
 		} else if (S_ISLNK(inode->i_mode))
 			inode->i_op = &nfs_symlink_inode_operations;
@@ -881,9 +886,10 @@
 	return ret;
 }
 
-static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
+	unsigned long ret = 0;
 
 	if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
 			&& (fattr->valid & NFS_ATTR_FATTR_CHANGE)
@@ -891,25 +897,32 @@
 		nfsi->change_attr = fattr->change_attr;
 		if (S_ISDIR(inode->i_mode))
 			nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+		ret |= NFS_INO_INVALID_ATTR;
 	}
 	/* If we have atomic WCC data, we may update some attributes */
 	if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
 			&& (fattr->valid & NFS_ATTR_FATTR_CTIME)
-			&& timespec_equal(&inode->i_ctime, &fattr->pre_ctime))
-			memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+			&& timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
+		memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+		ret |= NFS_INO_INVALID_ATTR;
+	}
 
 	if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
 			&& (fattr->valid & NFS_ATTR_FATTR_MTIME)
 			&& timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
-			memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
-			if (S_ISDIR(inode->i_mode))
-				nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+		memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+		if (S_ISDIR(inode->i_mode))
+			nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+		ret |= NFS_INO_INVALID_ATTR;
 	}
 	if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
 			&& (fattr->valid & NFS_ATTR_FATTR_SIZE)
 			&& i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
-			&& nfsi->npages == 0)
-			i_size_write(inode, nfs_size_to_loff_t(fattr->size));
+			&& nfsi->npages == 0) {
+		i_size_write(inode, nfs_size_to_loff_t(fattr->size));
+		ret |= NFS_INO_INVALID_ATTR;
+	}
+	return ret;
 }
 
 /**
@@ -1208,7 +1221,7 @@
 	/* Update the fsid? */
 	if (S_ISDIR(inode->i_mode) && (fattr->valid & NFS_ATTR_FATTR_FSID) &&
 			!nfs_fsid_equal(&server->fsid, &fattr->fsid) &&
-			!test_bit(NFS_INO_MOUNTPOINT, &nfsi->flags))
+			!IS_AUTOMOUNT(inode))
 		server->fsid = fattr->fsid;
 
 	/*
@@ -1223,7 +1236,7 @@
 			| NFS_INO_REVAL_PAGECACHE);
 
 	/* Do atomic weak cache consistency updates */
-	nfs_wcc_update_inode(inode, fattr);
+	invalid |= nfs_wcc_update_inode(inode, fattr);
 
 	/* More cache consistency checks */
 	if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
@@ -1410,9 +1423,9 @@
  */
 void nfs4_evict_inode(struct inode *inode)
 {
+	pnfs_destroy_layout(NFS_I(inode));
 	truncate_inode_pages(&inode->i_data, 0);
 	end_writeback(inode);
-	pnfs_destroy_layout(NFS_I(inode));
 	/* If we are holding a delegation, return it! */
 	nfs_inode_return_delegation_noreclaim(inode);
 	/* First call standard NFS clear_inode() code */
@@ -1619,6 +1632,7 @@
 #ifdef CONFIG_PROC_FS
 	rpc_proc_unregister("nfs");
 #endif
+	nfs_cleanup_cb_ident_idr();
 	unregister_nfs_fs();
 	nfs_fs_proc_exit();
 	nfsiod_stop();
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e6356b7..cf9fdbd 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -128,9 +128,12 @@
 /* client.c */
 extern struct rpc_program nfs_program;
 
+extern void nfs_cleanup_cb_ident_idr(void);
 extern void nfs_put_client(struct nfs_client *);
-extern struct nfs_client *nfs_find_client(const struct sockaddr *, u32);
-extern struct nfs_client *nfs_find_client_next(struct nfs_client *);
+extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *);
+extern struct nfs_client *nfs4_find_client_ident(int);
+extern struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *);
 extern struct nfs_server *nfs_create_server(
 					const struct nfs_parsed_mount_data *,
 					struct nfs_fh *);
@@ -185,17 +188,20 @@
 extern void nfs_destroy_directcache(void);
 
 /* nfs2xdr.c */
-extern int nfs_stat_to_errno(int);
+extern int nfs_stat_to_errno(enum nfs_stat);
 extern struct rpc_procinfo nfs_procedures[];
-extern __be32 *nfs_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs2_decode_dirent(struct xdr_stream *,
+				struct nfs_entry *, int);
 
 /* nfs3xdr.c */
 extern struct rpc_procinfo nfs3_procedures[];
-extern __be32 *nfs3_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs3_decode_dirent(struct xdr_stream *,
+				struct nfs_entry *, int);
 
 /* nfs4xdr.c */
 #ifdef CONFIG_NFS_V4
-extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs4_decode_dirent(struct xdr_stream *,
+				struct nfs_entry *, int);
 #endif
 #ifdef CONFIG_NFS_V4_1
 extern const u32 nfs41_maxread_overhead;
@@ -245,6 +251,7 @@
 		      const struct dentry *droot,
 		      const struct dentry *dentry,
 		      char *buffer, ssize_t buflen);
+extern struct vfsmount *nfs_d_automount(struct path *path);
 
 /* getroot.c */
 extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *);
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 4f981f1..d4c2d6b 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -236,10 +236,8 @@
 		.authflavor	= RPC_AUTH_UNIX,
 		.flags		= RPC_CLNT_CREATE_NOPING,
 	};
-	struct mountres	result;
 	struct rpc_message msg	= {
 		.rpc_argp	= info->dirpath,
-		.rpc_resp	= &result,
 	};
 	struct rpc_clnt *clnt;
 	int status;
@@ -248,7 +246,7 @@
 		args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
 
 	clnt = rpc_create(&args);
-	if (unlikely(IS_ERR(clnt)))
+	if (IS_ERR(clnt))
 		goto out_clnt_err;
 
 	dprintk("NFS: sending UMNT request for %s:%s\n",
@@ -280,29 +278,20 @@
  * XDR encode/decode functions for MOUNT
  */
 
-static int encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
+static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
 {
 	const u32 pathname_len = strlen(pathname);
 	__be32 *p;
 
-	if (unlikely(pathname_len > MNTPATHLEN))
-		return -EIO;
-
-	p = xdr_reserve_space(xdr, sizeof(u32) + pathname_len);
-	if (unlikely(p == NULL))
-		return -EIO;
+	BUG_ON(pathname_len > MNTPATHLEN);
+	p = xdr_reserve_space(xdr, 4 + pathname_len);
 	xdr_encode_opaque(p, pathname, pathname_len);
-
-	return 0;
 }
 
-static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p,
-			   const char *dirpath)
+static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const char *dirpath)
 {
-	struct xdr_stream xdr;
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	return encode_mntdirpath(&xdr, dirpath);
+	encode_mntdirpath(xdr, dirpath);
 }
 
 /*
@@ -320,10 +309,10 @@
 	u32 status;
 	__be32 *p;
 
-	p = xdr_inline_decode(xdr, sizeof(status));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	status = ntohl(*p);
+	status = be32_to_cpup(p);
 
 	for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) {
 		if (mnt_errtbl[i].status == status) {
@@ -351,18 +340,16 @@
 	return 0;
 }
 
-static int mnt_dec_mountres(struct rpc_rqst *req, __be32 *p,
-			    struct mountres *res)
+static int mnt_xdr_dec_mountres(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				struct mountres *res)
 {
-	struct xdr_stream xdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	status = decode_status(&xdr, res);
+	status = decode_status(xdr, res);
 	if (unlikely(status != 0 || res->errno != 0))
 		return status;
-	return decode_fhandle(&xdr, res);
+	return decode_fhandle(xdr, res);
 }
 
 static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
@@ -371,10 +358,10 @@
 	u32 status;
 	__be32 *p;
 
-	p = xdr_inline_decode(xdr, sizeof(status));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	status = ntohl(*p);
+	status = be32_to_cpup(p);
 
 	for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) {
 		if (mnt3_errtbl[i].status == status) {
@@ -394,11 +381,11 @@
 	u32 size;
 	__be32 *p;
 
-	p = xdr_inline_decode(xdr, sizeof(size));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
 
-	size = ntohl(*p++);
+	size = be32_to_cpup(p);
 	if (size > NFS3_FHSIZE || size == 0)
 		return -EIO;
 
@@ -421,15 +408,15 @@
 	if (*count == 0)
 		return 0;
 
-	p = xdr_inline_decode(xdr, sizeof(entries));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	entries = ntohl(*p);
+	entries = be32_to_cpup(p);
 	dprintk("NFS: received %u auth flavors\n", entries);
 	if (entries > NFS_MAX_SECFLAVORS)
 		entries = NFS_MAX_SECFLAVORS;
 
-	p = xdr_inline_decode(xdr, sizeof(u32) * entries);
+	p = xdr_inline_decode(xdr, 4 * entries);
 	if (unlikely(p == NULL))
 		return -EIO;
 
@@ -437,7 +424,7 @@
 		entries = *count;
 
 	for (i = 0; i < entries; i++) {
-		flavors[i] = ntohl(*p++);
+		flavors[i] = be32_to_cpup(p++);
 		dprintk("NFS:   auth flavor[%u]: %d\n", i, flavors[i]);
 	}
 	*count = i;
@@ -445,30 +432,28 @@
 	return 0;
 }
 
-static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p,
-			     struct mountres *res)
+static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 struct mountres *res)
 {
-	struct xdr_stream xdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	status = decode_fhs_status(&xdr, res);
+	status = decode_fhs_status(xdr, res);
 	if (unlikely(status != 0 || res->errno != 0))
 		return status;
-	status = decode_fhandle3(&xdr, res);
+	status = decode_fhandle3(xdr, res);
 	if (unlikely(status != 0)) {
 		res->errno = -EBADHANDLE;
 		return 0;
 	}
-	return decode_auth_flavors(&xdr, res);
+	return decode_auth_flavors(xdr, res);
 }
 
 static struct rpc_procinfo mnt_procedures[] = {
 	[MOUNTPROC_MNT] = {
 		.p_proc		= MOUNTPROC_MNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
-		.p_decode	= (kxdrproc_t)mnt_dec_mountres,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
+		.p_decode	= (kxdrdproc_t)mnt_xdr_dec_mountres,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_replen	= MNT_dec_mountres_sz,
 		.p_statidx	= MOUNTPROC_MNT,
@@ -476,7 +461,7 @@
 	},
 	[MOUNTPROC_UMNT] = {
 		.p_proc		= MOUNTPROC_UMNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_statidx	= MOUNTPROC_UMNT,
 		.p_name		= "UMOUNT",
@@ -486,8 +471,8 @@
 static struct rpc_procinfo mnt3_procedures[] = {
 	[MOUNTPROC3_MNT] = {
 		.p_proc		= MOUNTPROC3_MNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
-		.p_decode	= (kxdrproc_t)mnt_dec_mountres3,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
+		.p_decode	= (kxdrdproc_t)mnt_xdr_dec_mountres3,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_replen	= MNT_dec_mountres3_sz,
 		.p_statidx	= MOUNTPROC3_MNT,
@@ -495,7 +480,7 @@
 	},
 	[MOUNTPROC3_UMNT] = {
 		.p_proc		= MOUNTPROC3_UMNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_statidx	= MOUNTPROC3_UMNT,
 		.p_name		= "UMOUNT",
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 74aaf39..f32b860 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -97,9 +97,8 @@
 }
 
 /*
- * nfs_follow_mountpoint - handle crossing a mountpoint on the server
- * @dentry - dentry of mountpoint
- * @nd - nameidata info
+ * nfs_d_automount - Handle crossing a mountpoint on the server
+ * @path - The mountpoint
  *
  * When we encounter a mountpoint on the server, we want to set up
  * a mountpoint on the client too, to prevent inode numbers from
@@ -109,87 +108,65 @@
  * situation, and that different filesystems may want to use
  * different security flavours.
  */
-static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+struct vfsmount *nfs_d_automount(struct path *path)
 {
 	struct vfsmount *mnt;
-	struct nfs_server *server = NFS_SERVER(dentry->d_inode);
+	struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
 	struct dentry *parent;
 	struct nfs_fh *fh = NULL;
 	struct nfs_fattr *fattr = NULL;
 	int err;
 
-	dprintk("--> nfs_follow_mountpoint()\n");
+	dprintk("--> nfs_d_automount()\n");
 
-	err = -ESTALE;
-	if (IS_ROOT(dentry))
-		goto out_err;
+	mnt = ERR_PTR(-ESTALE);
+	if (IS_ROOT(path->dentry))
+		goto out_nofree;
 
-	err = -ENOMEM;
+	mnt = ERR_PTR(-ENOMEM);
 	fh = nfs_alloc_fhandle();
 	fattr = nfs_alloc_fattr();
 	if (fh == NULL || fattr == NULL)
-		goto out_err;
+		goto out;
 
 	dprintk("%s: enter\n", __func__);
-	dput(nd->path.dentry);
-	nd->path.dentry = dget(dentry);
 
-	/* Look it up again */
-	parent = dget_parent(nd->path.dentry);
+	/* Look it up again to get its attributes */
+	parent = dget_parent(path->dentry);
 	err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
-						  &nd->path.dentry->d_name,
+						  &path->dentry->d_name,
 						  fh, fattr);
 	dput(parent);
-	if (err != 0)
-		goto out_err;
+	if (err != 0) {
+		mnt = ERR_PTR(err);
+		goto out;
+	}
 
 	if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
-		mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
+		mnt = nfs_do_refmount(path->mnt, path->dentry);
 	else
-		mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, fh,
-				      fattr);
-	err = PTR_ERR(mnt);
+		mnt = nfs_do_submount(path->mnt, path->dentry, fh, fattr);
 	if (IS_ERR(mnt))
-		goto out_err;
+		goto out;
 
-	mntget(mnt);
-	err = do_add_mount(mnt, &nd->path, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
-			   &nfs_automount_list);
-	if (err < 0) {
-		mntput(mnt);
-		if (err == -EBUSY)
-			goto out_follow;
-		goto out_err;
-	}
-	path_put(&nd->path);
-	nd->path.mnt = mnt;
-	nd->path.dentry = dget(mnt->mnt_root);
+	dprintk("%s: done, success\n", __func__);
+	mntget(mnt); /* prevent immediate expiration */
+	mnt_set_expiry(mnt, &nfs_automount_list);
 	schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+
 out:
 	nfs_free_fattr(fattr);
 	nfs_free_fhandle(fh);
-	dprintk("%s: done, returned %d\n", __func__, err);
-
-	dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
-	return ERR_PTR(err);
-out_err:
-	path_put(&nd->path);
-	goto out;
-out_follow:
-	while (d_mountpoint(nd->path.dentry) &&
-	       follow_down(&nd->path))
-		;
-	err = 0;
-	goto out;
+out_nofree:
+	dprintk("<-- nfs_follow_mountpoint() = %p\n", mnt);
+	return mnt;
 }
 
 const struct inode_operations nfs_mountpoint_inode_operations = {
-	.follow_link	= nfs_follow_mountpoint,
 	.getattr	= nfs_getattr,
 };
 
 const struct inode_operations nfs_referral_inode_operations = {
-	.follow_link	= nfs_follow_mountpoint,
 };
 
 static void nfs_expire_automounts(struct work_struct *work)
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5914a19..792cb13 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -61,584 +61,1008 @@
 #define NFS_readdirres_sz	(1)
 #define NFS_statfsres_sz	(1+NFS_info_sz)
 
-/*
- * Common NFS XDR functions as inlines
- */
-static inline __be32 *
-xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fhandle)
-{
-	memcpy(p, fhandle->data, NFS2_FHSIZE);
-	return p + XDR_QUADLEN(NFS2_FHSIZE);
-}
-
-static inline __be32 *
-xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle)
-{
-	/* NFSv2 handles have a fixed length */
-	fhandle->size = NFS2_FHSIZE;
-	memcpy(fhandle->data, p, NFS2_FHSIZE);
-	return p + XDR_QUADLEN(NFS2_FHSIZE);
-}
-
-static inline __be32*
-xdr_encode_time(__be32 *p, struct timespec *timep)
-{
-	*p++ = htonl(timep->tv_sec);
-	/* Convert nanoseconds into microseconds */
-	*p++ = htonl(timep->tv_nsec ? timep->tv_nsec / 1000 : 0);
-	return p;
-}
-
-static inline __be32*
-xdr_encode_current_server_time(__be32 *p, struct timespec *timep)
-{
-	/*
-	 * Passing the invalid value useconds=1000000 is a
-	 * Sun convention for "set to current server time".
-	 * It's needed to make permissions checks for the
-	 * "touch" program across v2 mounts to Solaris and
-	 * Irix boxes work correctly. See description of
-	 * sattr in section 6.1 of "NFS Illustrated" by
-	 * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
-	 */
-	*p++ = htonl(timep->tv_sec);
-	*p++ = htonl(1000000);
-	return p;
-}
-
-static inline __be32*
-xdr_decode_time(__be32 *p, struct timespec *timep)
-{
-	timep->tv_sec = ntohl(*p++);
-	/* Convert microseconds into nanoseconds */
-	timep->tv_nsec = ntohl(*p++) * 1000;
-	return p;
-}
-
-static __be32 *
-xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
-{
-	u32 rdev, type;
-	type = ntohl(*p++);
-	fattr->mode = ntohl(*p++);
-	fattr->nlink = ntohl(*p++);
-	fattr->uid = ntohl(*p++);
-	fattr->gid = ntohl(*p++);
-	fattr->size = ntohl(*p++);
-	fattr->du.nfs2.blocksize = ntohl(*p++);
-	rdev = ntohl(*p++);
-	fattr->du.nfs2.blocks = ntohl(*p++);
-	fattr->fsid.major = ntohl(*p++);
-	fattr->fsid.minor = 0;
-	fattr->fileid = ntohl(*p++);
-	p = xdr_decode_time(p, &fattr->atime);
-	p = xdr_decode_time(p, &fattr->mtime);
-	p = xdr_decode_time(p, &fattr->ctime);
-	fattr->valid |= NFS_ATTR_FATTR_V2;
-	fattr->rdev = new_decode_dev(rdev);
-	if (type == NFCHR && rdev == NFS2_FIFO_DEV) {
-		fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
-		fattr->rdev = 0;
-	}
-	return p;
-}
-
-static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
-{
-	const __be32 not_set = __constant_htonl(0xFFFFFFFF);
-
-	*p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set;
-	*p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set;
-	*p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set;
-	*p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set;
-
-	if (attr->ia_valid & ATTR_ATIME_SET) {
-		p = xdr_encode_time(p, &attr->ia_atime);
-	} else if (attr->ia_valid & ATTR_ATIME) {
-		p = xdr_encode_current_server_time(p, &attr->ia_atime);
-	} else {
-		*p++ = not_set;
-		*p++ = not_set;
-	}
-
-	if (attr->ia_valid & ATTR_MTIME_SET) {
-		p = xdr_encode_time(p, &attr->ia_mtime);
-	} else if (attr->ia_valid & ATTR_MTIME) {
-		p = xdr_encode_current_server_time(p, &attr->ia_mtime);
-	} else {
-		*p++ = not_set;	
-		*p++ = not_set;
-	}
-  	return p;
-}
 
 /*
- * NFS encode functions
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
  */
-/*
- * Encode file handle argument
- * GETATTR, READLINK, STATFS
- */
-static int
-nfs_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
-{
-	p = xdr_encode_fhandle(p, fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode SETATTR arguments
- */
-static int
-nfs_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs_sattrargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_sattr(p, args->sattr);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode directory ops argument
- * LOOKUP, RMDIR
- */
-static int
-nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode REMOVE argument
- */
-static int
-nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name.name, args->name.len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Arguments to a READ call. Since we read data directly into the page
- * cache, we also set up the reply iovec here so that iov[1] points
- * exactly to the page we want to fetch.
- */
-static int
-nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+				 unsigned int base, unsigned int len,
+				 unsigned int bufsize)
 {
 	struct rpc_auth	*auth = req->rq_cred->cr_auth;
 	unsigned int replen;
-	u32 offset = (u32)args->offset;
-	u32 count = args->count;
 
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(offset);
-	*p++ = htonl(count);
-	*p++ = htonl(count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen,
-			 args->pages, args->pgbase, count);
-	req->rq_rcv_buf.flags |= XDRBUF_READ;
-	return 0;
+	replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+	xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
 }
 
 /*
- * Decode READ reply
+ * Handle decode buffer overflows out-of-line.
  */
-static int
-nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
-{
-	struct kvec *iov = req->rq_rcv_buf.head;
-	size_t hdrlen;
-	u32 count, recvd;
-	int status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	p = xdr_decode_fattr(p, res->fattr);
-
-	count = ntohl(*p++);
-	res->eof = 0;
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READ reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READ header is short. iovec will be shifted.\n");
-		xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
-	}
-
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (count > recvd) {
-		dprintk("NFS: server cheating in read reply: "
-			"count %u > recvd %u\n", count, recvd);
-		count = recvd;
-	}
-
-	dprintk("RPC:      readres OK count %u\n", count);
-	if (count < res->count)
-		res->count = count;
-
-	return count;
-}
-
-
-/*
- * Write arguments. Splice the buffer to be written into the iovec.
- */
-static int
-nfs_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
-{
-	struct xdr_buf *sndbuf = &req->rq_snd_buf;
-	u32 offset = (u32)args->offset;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(offset);
-	*p++ = htonl(offset);
-	*p++ = htonl(count);
-	*p++ = htonl(count);
-	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
-	/* Copy the page array */
-	xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
-	sndbuf->flags |= XDRBUF_WRITE;
-	return 0;
-}
-
-/*
- * Encode create arguments
- * CREATE, MKDIR
- */
-static int
-nfs_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs_createargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	p = xdr_encode_sattr(p, args->sattr);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode RENAME arguments
- */
-static int
-nfs_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
-{
-	p = xdr_encode_fhandle(p, args->old_dir);
-	p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
-	p = xdr_encode_fhandle(p, args->new_dir);
-	p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode LINK arguments
- */
-static int
-nfs_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs_linkargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_fhandle(p, args->tofh);
-	p = xdr_encode_array(p, args->toname, args->tolen);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode SYMLINK arguments
- */
-static int
-nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *args)
-{
-	struct xdr_buf *sndbuf = &req->rq_snd_buf;
-	size_t pad;
-
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_array(p, args->fromname, args->fromlen);
-	*p++ = htonl(args->pathlen);
-	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
-	xdr_encode_pages(sndbuf, args->pages, 0, args->pathlen);
-
-	/*
-	 * xdr_encode_pages may have added a few bytes to ensure the
-	 * pathname ends on a 4-byte boundary.  Start encoding the
-	 * attributes after the pad bytes.
-	 */
-	pad = sndbuf->tail->iov_len;
-	if (pad > 0)
-		p++;
-	p = xdr_encode_sattr(p, args->sattr);
-	sndbuf->len += xdr_adjust_iovec(sndbuf->tail, p) - pad;
-	return 0;
-}
-
-/*
- * Encode arguments to readdir call
- */
-static int
-nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(args->cookie);
-	*p++ = htonl(count); /* see above */
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
-	return 0;
-}
-
-/*
- * Decode the result of a readdir call.
- * We're not really decoding anymore, we just leave the buffer untouched
- * and only check that it is syntactically correct.
- * The real decoding happens in nfs_decode_entry below, called directly
- * from nfs_readdir for each entry.
- */
-static int
-nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
-{
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
-	struct page **page;
-	size_t hdrlen;
-	unsigned int pglen, recvd;
-	int status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READDIR reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-
-	pglen = rcvbuf->page_len;
-	recvd = rcvbuf->len - hdrlen;
-	if (pglen > recvd)
-		pglen = recvd;
-	page = rcvbuf->pages;
-	return pglen;
-}
-
 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
 {
-	dprintk("nfs: %s: prematurely hit end of receive buffer. "
+	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
 		"Remaining buffer length is %tu words.\n",
 		func, xdr->end - xdr->p);
 }
 
-__be32 *
-nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
+
+/*
+ * Encode/decode NFSv2 basic data types
+ *
+ * Basic NFSv2 data types are defined in section 2.3 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+/*
+ *	typedef opaque	nfsdata<>;
+ */
+static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_readres *result)
+{
+	u32 recvd, count;
+	size_t hdrlen;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p);
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(count > recvd))
+		goto out_cheating;
+out:
+	xdr_read_pages(xdr, count);
+	result->eof = 0;	/* NFSv2 does not pass EOF flag on the wire. */
+	result->count = count;
+	return count;
+out_cheating:
+	dprintk("NFS: server cheating in read result: "
+		"count %u > recvd %u\n", count, recvd);
+	count = recvd;
+	goto out;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	enum stat {
+ *		NFS_OK = 0,
+ *		NFSERR_PERM = 1,
+ *		NFSERR_NOENT = 2,
+ *		NFSERR_IO = 5,
+ *		NFSERR_NXIO = 6,
+ *		NFSERR_ACCES = 13,
+ *		NFSERR_EXIST = 17,
+ *		NFSERR_NODEV = 19,
+ *		NFSERR_NOTDIR = 20,
+ *		NFSERR_ISDIR = 21,
+ *		NFSERR_FBIG = 27,
+ *		NFSERR_NOSPC = 28,
+ *		NFSERR_ROFS = 30,
+ *		NFSERR_NAMETOOLONG = 63,
+ *		NFSERR_NOTEMPTY = 66,
+ *		NFSERR_DQUOT = 69,
+ *		NFSERR_STALE = 70,
+ *		NFSERR_WFLUSH = 99
+ *	};
+ */
+static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
 {
 	__be32 *p;
+
 	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	if (!ntohl(*p++)) {
-		p = xdr_inline_decode(xdr, 4);
-		if (unlikely(!p))
-			goto out_overflow;
-		if (!ntohl(*p++))
-			return ERR_PTR(-EAGAIN);
-		entry->eof = 1;
-		return ERR_PTR(-EBADCOOKIE);
+	*status = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.2.  ftype
+ *
+ *	enum ftype {
+ *		NFNON = 0,
+ *		NFREG = 1,
+ *		NFDIR = 2,
+ *		NFBLK = 3,
+ *		NFCHR = 4,
+ *		NFLNK = 5
+ *	};
+ *
+ */
+static __be32 *xdr_decode_ftype(__be32 *p, u32 *type)
+{
+	*type = be32_to_cpup(p++);
+	if (unlikely(*type > NF2FIFO))
+		*type = NFBAD;
+	return p;
+}
+
+/*
+ * 2.3.3.  fhandle
+ *
+ *	typedef opaque fhandle[FHSIZE];
+ */
+static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	__be32 *p;
+
+	BUG_ON(fh->size != NFS2_FHSIZE);
+	p = xdr_reserve_space(xdr, NFS2_FHSIZE);
+	memcpy(p, fh->data, NFS2_FHSIZE);
+}
+
+static int decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS2_FHSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	fh->size = NFS2_FHSIZE;
+	memcpy(fh->data, p, NFS2_FHSIZE);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.4.  timeval
+ *
+ *	struct timeval {
+ *		unsigned int seconds;
+ *		unsigned int useconds;
+ *	};
+ */
+static __be32 *xdr_encode_time(__be32 *p, const struct timespec *timep)
+{
+	*p++ = cpu_to_be32(timep->tv_sec);
+	if (timep->tv_nsec != 0)
+		*p++ = cpu_to_be32(timep->tv_nsec / NSEC_PER_USEC);
+	else
+		*p++ = cpu_to_be32(0);
+	return p;
+}
+
+/*
+ * Passing the invalid value useconds=1000000 is a Sun convention for
+ * "set to current server time".  It's needed to make permissions checks
+ * for the "touch" program across v2 mounts to Solaris and Irix servers
+ * work correctly.  See description of sattr in section 6.1 of "NFS
+ * Illustrated" by Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5.
+ */
+static __be32 *xdr_encode_current_server_time(__be32 *p,
+					      const struct timespec *timep)
+{
+	*p++ = cpu_to_be32(timep->tv_sec);
+	*p++ = cpu_to_be32(1000000);
+	return p;
+}
+
+static __be32 *xdr_decode_time(__be32 *p, struct timespec *timep)
+{
+	timep->tv_sec = be32_to_cpup(p++);
+	timep->tv_nsec = be32_to_cpup(p++) * NSEC_PER_USEC;
+	return p;
+}
+
+/*
+ * 2.3.5.  fattr
+ *
+ *	struct fattr {
+ *		ftype		type;
+ *		unsigned int	mode;
+ *		unsigned int	nlink;
+ *		unsigned int	uid;
+ *		unsigned int	gid;
+ *		unsigned int	size;
+ *		unsigned int	blocksize;
+ *		unsigned int	rdev;
+ *		unsigned int	blocks;
+ *		unsigned int	fsid;
+ *		unsigned int	fileid;
+ *		timeval		atime;
+ *		timeval		mtime;
+ *		timeval		ctime;
+ *	};
+ *
+ */
+static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+	u32 rdev, type;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS_fattr_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	fattr->valid |= NFS_ATTR_FATTR_V2;
+
+	p = xdr_decode_ftype(p, &type);
+
+	fattr->mode = be32_to_cpup(p++);
+	fattr->nlink = be32_to_cpup(p++);
+	fattr->uid = be32_to_cpup(p++);
+	fattr->gid = be32_to_cpup(p++);
+	fattr->size = be32_to_cpup(p++);
+	fattr->du.nfs2.blocksize = be32_to_cpup(p++);
+
+	rdev = be32_to_cpup(p++);
+	fattr->rdev = new_decode_dev(rdev);
+	if (type == (u32)NFCHR && rdev == (u32)NFS2_FIFO_DEV) {
+		fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
+		fattr->rdev = 0;
 	}
 
-	p = xdr_inline_decode(xdr, 8);
-	if (unlikely(!p))
-		goto out_overflow;
+	fattr->du.nfs2.blocks = be32_to_cpup(p++);
+	fattr->fsid.major = be32_to_cpup(p++);
+	fattr->fsid.minor = 0;
+	fattr->fileid = be32_to_cpup(p++);
 
-	entry->ino	  = ntohl(*p++);
-	entry->len	  = ntohl(*p++);
+	p = xdr_decode_time(p, &fattr->atime);
+	p = xdr_decode_time(p, &fattr->mtime);
+	xdr_decode_time(p, &fattr->ctime);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
 
-	p = xdr_inline_decode(xdr, entry->len + 4);
-	if (unlikely(!p))
+/*
+ * 2.3.6.  sattr
+ *
+ *	struct sattr {
+ *		unsigned int	mode;
+ *		unsigned int	uid;
+ *		unsigned int	gid;
+ *		unsigned int	size;
+ *		timeval		atime;
+ *		timeval		mtime;
+ *	};
+ */
+
+#define NFS2_SATTR_NOT_SET	(0xffffffff)
+
+static __be32 *xdr_time_not_set(__be32 *p)
+{
+	*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	return p;
+}
+
+static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS_sattr_sz << 2);
+
+	if (attr->ia_valid & ATTR_MODE)
+		*p++ = cpu_to_be32(attr->ia_mode);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	if (attr->ia_valid & ATTR_UID)
+		*p++ = cpu_to_be32(attr->ia_uid);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	if (attr->ia_valid & ATTR_GID)
+		*p++ = cpu_to_be32(attr->ia_gid);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	if (attr->ia_valid & ATTR_SIZE)
+		*p++ = cpu_to_be32((u32)attr->ia_size);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+
+	if (attr->ia_valid & ATTR_ATIME_SET)
+		p = xdr_encode_time(p, &attr->ia_atime);
+	else if (attr->ia_valid & ATTR_ATIME)
+		p = xdr_encode_current_server_time(p, &attr->ia_atime);
+	else
+		p = xdr_time_not_set(p);
+	if (attr->ia_valid & ATTR_MTIME_SET)
+		xdr_encode_time(p, &attr->ia_mtime);
+	else if (attr->ia_valid & ATTR_MTIME)
+		xdr_encode_current_server_time(p, &attr->ia_mtime);
+	else
+		xdr_time_not_set(p);
+}
+
+/*
+ * 2.3.7.  filename
+ *
+ *	typedef string filename<MAXNAMLEN>;
+ */
+static void encode_filename(struct xdr_stream *xdr,
+			    const char *name, u32 length)
+{
+	__be32 *p;
+
+	BUG_ON(length > NFS2_MAXNAMLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+static int decode_filename_inline(struct xdr_stream *xdr,
+				  const char **name, u32 *length)
+{
+	__be32 *p;
+	u32 count;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	entry->name	  = (const char *) p;
-	p		 += XDR_QUADLEN(entry->len);
-	entry->prev_cookie	  = entry->cookie;
-	entry->cookie	  = ntohl(*p++);
+	count = be32_to_cpup(p);
+	if (count > NFS3_MAXNAMLEN)
+		goto out_nametoolong;
+	p = xdr_inline_decode(xdr, count);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	*name = (const char *)p;
+	*length = count;
+	return 0;
+out_nametoolong:
+	dprintk("NFS: returned filename too long: %u\n", count);
+	return -ENAMETOOLONG;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.8.  path
+ *
+ *	typedef string path<MAXPATHLEN>;
+ */
+static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length)
+{
+	__be32 *p;
+
+	BUG_ON(length > NFS2_MAXPATHLEN);
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(length);
+	xdr_write_pages(xdr, pages, 0, length);
+}
+
+static int decode_path(struct xdr_stream *xdr)
+{
+	u32 length, recvd;
+	size_t hdrlen;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p);
+	if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN))
+		goto out_size;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(length > recvd))
+		goto out_cheating;
+
+	xdr_read_pages(xdr, length);
+	xdr_terminate_string(xdr->buf, length);
+	return 0;
+out_size:
+	dprintk("NFS: returned pathname too long: %u\n", length);
+	return -ENAMETOOLONG;
+out_cheating:
+	dprintk("NFS: server cheating in pathname result: "
+		"length %u > received %u\n", length, recvd);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.9.  attrstat
+ *
+ *	union attrstat switch (stat status) {
+ *	case NFS_OK:
+ *		fattr attributes;
+ *	default:
+ *		void;
+ *	};
+ */
+static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_fattr(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 2.3.10.  diropargs
+ *
+ *	struct diropargs {
+ *		fhandle  dir;
+ *		filename name;
+ *	};
+ */
+static void encode_diropargs(struct xdr_stream *xdr, const struct nfs_fh *fh,
+			     const char *name, u32 length)
+{
+	encode_fhandle(xdr, fh);
+	encode_filename(xdr, name, length);
+}
+
+/*
+ * 2.3.11.  diropres
+ *
+ *	union diropres switch (stat status) {
+ *	case NFS_OK:
+ *		struct {
+ *			fhandle file;
+ *			fattr   attributes;
+ *		} diropok;
+ *	default:
+ *		void;
+ *	};
+ */
+static int decode_diropok(struct xdr_stream *xdr, struct nfs_diropok *result)
+{
+	int error;
+
+	error = decode_fhandle(xdr, result->fh);
+	if (unlikely(error))
+		goto out;
+	error = decode_fattr(xdr, result->fattr);
+out:
+	return error;
+}
+
+static int decode_diropres(struct xdr_stream *xdr, struct nfs_diropok *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_diropok(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+
+/*
+ * NFSv2 XDR encode functions
+ *
+ * NFSv2 argument types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ */
+
+static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nfs_fh *fh)
+{
+	encode_fhandle(xdr, fh);
+}
+
+/*
+ * 2.2.3.  sattrargs
+ *
+ *	struct sattrargs {
+ *		fhandle file;
+ *		sattr attributes;
+ *	};
+ */
+static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_sattrargs *args)
+{
+	encode_fhandle(xdr, args->fh);
+	encode_sattr(xdr, args->sattr);
+}
+
+static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_diropargs *args)
+{
+	encode_diropargs(xdr, args->fh, args->name, args->len);
+}
+
+static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs_readlinkargs *args)
+{
+	encode_fhandle(xdr, args->fh);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->pglen, NFS_readlinkres_sz);
+}
+
+/*
+ * 2.2.7.  readargs
+ *
+ *	struct readargs {
+ *		fhandle file;
+ *		unsigned offset;
+ *		unsigned count;
+ *		unsigned totalcount;
+ *	};
+ */
+static void encode_readargs(struct xdr_stream *xdr,
+			    const struct nfs_readargs *args)
+{
+	u32 offset = args->offset;
+	u32 count = args->count;
+	__be32 *p;
+
+	encode_fhandle(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+	*p++ = cpu_to_be32(offset);
+	*p++ = cpu_to_be32(count);
+	*p = cpu_to_be32(count);
+}
+
+static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nfs_readargs *args)
+{
+	encode_readargs(xdr, args);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->count, NFS_readres_sz);
+	req->rq_rcv_buf.flags |= XDRBUF_READ;
+}
+
+/*
+ * 2.2.9.  writeargs
+ *
+ *	struct writeargs {
+ *		fhandle file;
+ *		unsigned beginoffset;
+ *		unsigned offset;
+ *		unsigned totalcount;
+ *		nfsdata data;
+ *	};
+ */
+static void encode_writeargs(struct xdr_stream *xdr,
+			     const struct nfs_writeargs *args)
+{
+	u32 offset = args->offset;
+	u32 count = args->count;
+	__be32 *p;
+
+	encode_fhandle(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
+	*p++ = cpu_to_be32(offset);
+	*p++ = cpu_to_be32(offset);
+	*p++ = cpu_to_be32(count);
+
+	/* nfsdata */
+	*p = cpu_to_be32(count);
+	xdr_write_pages(xdr, args->pages, args->pgbase, count);
+}
+
+static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_writeargs *args)
+{
+	encode_writeargs(xdr, args);
+	xdr->buf->flags |= XDRBUF_WRITE;
+}
+
+/*
+ * 2.2.10.  createargs
+ *
+ *	struct createargs {
+ *		diropargs where;
+ *		sattr attributes;
+ *	};
+ */
+static void nfs2_xdr_enc_createargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_createargs *args)
+{
+	encode_diropargs(xdr, args->fh, args->name, args->len);
+	encode_sattr(xdr, args->sattr);
+}
+
+static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_removeargs *args)
+{
+	encode_diropargs(xdr, args->fh, args->name.name, args->name.len);
+}
+
+/*
+ * 2.2.12.  renameargs
+ *
+ *	struct renameargs {
+ *		diropargs from;
+ *		diropargs to;
+ *	};
+ */
+static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_renameargs *args)
+{
+	const struct qstr *old = args->old_name;
+	const struct qstr *new = args->new_name;
+
+	encode_diropargs(xdr, args->old_dir, old->name, old->len);
+	encode_diropargs(xdr, args->new_dir, new->name, new->len);
+}
+
+/*
+ * 2.2.13.  linkargs
+ *
+ *	struct linkargs {
+ *		fhandle from;
+ *		diropargs to;
+ *	};
+ */
+static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nfs_linkargs *args)
+{
+	encode_fhandle(xdr, args->fromfh);
+	encode_diropargs(xdr, args->tofh, args->toname, args->tolen);
+}
+
+/*
+ * 2.2.14.  symlinkargs
+ *
+ *	struct symlinkargs {
+ *		diropargs from;
+ *		path to;
+ *		sattr attributes;
+ *	};
+ */
+static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_symlinkargs *args)
+{
+	encode_diropargs(xdr, args->fromfh, args->fromname, args->fromlen);
+	encode_path(xdr, args->pages, args->pathlen);
+	encode_sattr(xdr, args->sattr);
+}
+
+/*
+ * 2.2.17.  readdirargs
+ *
+ *	struct readdirargs {
+ *		fhandle dir;
+ *		nfscookie cookie;
+ *		unsigned count;
+ *	};
+ */
+static void encode_readdirargs(struct xdr_stream *xdr,
+			       const struct nfs_readdirargs *args)
+{
+	__be32 *p;
+
+	encode_fhandle(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 4 + 4);
+	*p++ = cpu_to_be32(args->cookie);
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_readdirargs *args)
+{
+	encode_readdirargs(xdr, args);
+	prepare_reply_buffer(req, args->pages, 0,
+					args->count, NFS_readdirres_sz);
+}
+
+/*
+ * NFSv2 XDR decode functions
+ *
+ * NFSv2 result types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ */
+
+static int nfs2_xdr_dec_stat(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     void *__unused)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_fattr *result)
+{
+	return decode_attrstat(xdr, result);
+}
+
+static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_diropok *result)
+{
+	return decode_diropres(xdr, result);
+}
+
+/*
+ * 2.2.6.  readlinkres
+ *
+ *	union readlinkres switch (stat status) {
+ *	case NFS_OK:
+ *		path data;
+ *	default:
+ *		void;
+ *	};
+ */
+static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req,
+				    struct xdr_stream *xdr, void *__unused)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_path(xdr);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 2.2.7.  readres
+ *
+ *	union readres switch (stat status) {
+ *	case NFS_OK:
+ *		fattr attributes;
+ *		nfsdata data;
+ *	default:
+ *		void;
+ *	};
+ */
+static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_readres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_fattr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_nfsdata(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_writeres *result)
+{
+	/* All NFSv2 writes are "file sync" writes */
+	result->verf->committed = NFS_FILE_SYNC;
+	return decode_attrstat(xdr, result->fattr);
+}
+
+/**
+ * nfs2_decode_dirent - Decode a single NFSv2 directory entry stored in
+ *                      the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 2.2.17.  entry
+ *
+ *	struct entry {
+ *		unsigned	fileid;
+ *		filename	name;
+ *		nfscookie	cookie;
+ *		entry		*nextentry;
+ *	};
+ */
+int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+		       int plus)
+{
+	__be32 *p;
+	int error;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p++ == xdr_zero) {
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(p == NULL))
+			goto out_overflow;
+		if (*p++ == xdr_zero)
+			return -EAGAIN;
+		entry->eof = 1;
+		return -EBADCOOKIE;
+	}
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	entry->ino = be32_to_cpup(p);
+
+	error = decode_filename_inline(xdr, &entry->name, &entry->len);
+	if (unlikely(error))
+		return error;
+
+	/*
+	 * The type (size and byte order) of nfscookie isn't defined in
+	 * RFC 1094.  This implementation assumes that it's an XDR uint32.
+	 */
+	entry->prev_cookie = entry->cookie;
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	entry->cookie = be32_to_cpup(p);
 
 	entry->d_type = DT_UNKNOWN;
 
-	p = xdr_inline_peek(xdr, 8);
-	if (p != NULL)
-		entry->eof = !p[0] && p[1];
-	else
-		entry->eof = 0;
-
-	return p;
+	return 0;
 
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EAGAIN);
+	return -EAGAIN;
 }
 
 /*
- * NFS XDR decode functions
+ * 2.2.17.  readdirres
+ *
+ *	union readdirres switch (stat status) {
+ *	case NFS_OK:
+ *		struct {
+ *			entry *entries;
+ *			bool eof;
+ *		} readdirok;
+ *	default:
+ *		void;
+ *	};
+ *
+ * Read the directory contents into the page cache, but don't
+ * touch them.  The actual decoding is done by nfs2_decode_dirent()
+ * during subsequent nfs_readdir() calls.
  */
-/*
- * Decode simple status reply
- */
-static int
-nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy)
+static int decode_readdirok(struct xdr_stream *xdr)
 {
-	int	status;
-
-	if ((status = ntohl(*p++)) != 0)
-		status = nfs_stat_to_errno(status);
-	return status;
-}
-
-/*
- * Decode attrstat reply
- * GETATTR, SETATTR, WRITE
- */
-static int
-nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
-{
-	int	status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	xdr_decode_fattr(p, fattr);
-	return 0;
-}
-
-/*
- * Decode diropres reply
- * LOOKUP, CREATE, MKDIR
- */
-static int
-nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
-{
-	int	status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	p = xdr_decode_fhandle(p, res->fh);
-	xdr_decode_fattr(p, res->fattr);
-	return 0;
-}
-
-/*
- * Encode READLINK args
- */
-static int
-nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
-	return 0;
-}
-
-/*
- * Decode READLINK reply
- */
-static int
-nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
-{
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
+	u32 recvd, pglen;
 	size_t hdrlen;
-	u32 len, recvd;
-	int	status;
 
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	/* Convert length of symlink */
-	len = ntohl(*p++);
-	if (len >= rcvbuf->page_len) {
-		dprintk("nfs: server returned giant symlink!\n");
-		return -ENAMETOOLONG;
-	}
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READLINK reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (recvd < len) {
-		dprintk("NFS: server cheating in readlink reply: "
-				"count %u > recvd %u\n", len, recvd);
-		return -EIO;
-	}
+	pglen = xdr->buf->page_len;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(pglen > recvd))
+		goto out_cheating;
+out:
+	xdr_read_pages(xdr, pglen);
+	return pglen;
+out_cheating:
+	dprintk("NFS: server cheating in readdir result: "
+		"pglen %u > recvd %u\n", pglen, recvd);
+	pglen = recvd;
+	goto out;
+}
 
-	xdr_terminate_string(rcvbuf, len);
-	return 0;
+static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req,
+				   struct xdr_stream *xdr, void *__unused)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_readdirok(xdr);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode WRITE reply
+ * 2.2.18.  statfsres
+ *
+ *	union statfsres (stat status) {
+ *	case NFS_OK:
+ *		struct {
+ *			unsigned tsize;
+ *			unsigned bsize;
+ *			unsigned blocks;
+ *			unsigned bfree;
+ *			unsigned bavail;
+ *		} info;
+ *	default:
+ *		void;
+ *	};
  */
-static int
-nfs_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int decode_info(struct xdr_stream *xdr, struct nfs2_fsstat *result)
 {
-	res->verf->committed = NFS_FILE_SYNC;
-	return nfs_xdr_attrstat(req, p, res->fattr);
-}
+	__be32 *p;
 
-/*
- * Decode STATFS reply
- */
-static int
-nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res)
-{
-	int	status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-
-	res->tsize  = ntohl(*p++);
-	res->bsize  = ntohl(*p++);
-	res->blocks = ntohl(*p++);
-	res->bfree  = ntohl(*p++);
-	res->bavail = ntohl(*p++);
+	p = xdr_inline_decode(xdr, NFS_info_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->tsize  = be32_to_cpup(p++);
+	result->bsize  = be32_to_cpup(p++);
+	result->blocks = be32_to_cpup(p++);
+	result->bfree  = be32_to_cpup(p++);
+	result->bavail = be32_to_cpup(p);
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
+static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  struct nfs2_fsstat *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_info(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+
 /*
  * We need to translate between nfs status return values and
  * the local errno values which may not be the same.
  */
-static struct {
+static const struct {
 	int stat;
 	int errno;
 } nfs_errtbl[] = {
@@ -678,28 +1102,30 @@
 	{ -1,			-EIO		}
 };
 
-/*
- * Convert an NFS error code to a local one.
- * This one is used jointly by NFSv2 and NFSv3.
+/**
+ * nfs_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized.  This function is used jointly by NFSv2 and NFSv3.
  */
-int
-nfs_stat_to_errno(int stat)
+int nfs_stat_to_errno(enum nfs_stat status)
 {
 	int i;
 
 	for (i = 0; nfs_errtbl[i].stat != -1; i++) {
-		if (nfs_errtbl[i].stat == stat)
+		if (nfs_errtbl[i].stat == (int)status)
 			return nfs_errtbl[i].errno;
 	}
-	dprintk("nfs_stat_to_errno: bad nfs status return value: %d\n", stat);
+	dprintk("NFS: Unrecognized nfs status value: %u\n", status);
 	return nfs_errtbl[i].errno;
 }
 
 #define PROC(proc, argtype, restype, timer)				\
 [NFSPROC_##proc] = {							\
 	.p_proc	    =  NFSPROC_##proc,					\
-	.p_encode   =  (kxdrproc_t) nfs_xdr_##argtype,			\
-	.p_decode   =  (kxdrproc_t) nfs_xdr_##restype,			\
+	.p_encode   =  (kxdreproc_t)nfs2_xdr_enc_##argtype,		\
+	.p_decode   =  (kxdrdproc_t)nfs2_xdr_dec_##restype,		\
 	.p_arglen   =  NFS_##argtype##_sz,				\
 	.p_replen   =  NFS_##restype##_sz,				\
 	.p_timer    =  timer,						\
@@ -707,21 +1133,21 @@
 	.p_name     =  #proc,						\
 	}
 struct rpc_procinfo	nfs_procedures[] = {
-    PROC(GETATTR,	fhandle,	attrstat, 1),
-    PROC(SETATTR,	sattrargs,	attrstat, 0),
-    PROC(LOOKUP,	diropargs,	diropres, 2),
-    PROC(READLINK,	readlinkargs,	readlinkres, 3),
-    PROC(READ,		readargs,	readres, 3),
-    PROC(WRITE,		writeargs,	writeres, 4),
-    PROC(CREATE,	createargs,	diropres, 0),
-    PROC(REMOVE,	removeargs,	stat, 0),
-    PROC(RENAME,	renameargs,	stat, 0),
-    PROC(LINK,		linkargs,	stat, 0),
-    PROC(SYMLINK,	symlinkargs,	stat, 0),
-    PROC(MKDIR,		createargs,	diropres, 0),
-    PROC(RMDIR,		diropargs,	stat, 0),
-    PROC(READDIR,	readdirargs,	readdirres, 3),
-    PROC(STATFS,	fhandle,	statfsres, 0),
+	PROC(GETATTR,	fhandle,	attrstat,	1),
+	PROC(SETATTR,	sattrargs,	attrstat,	0),
+	PROC(LOOKUP,	diropargs,	diropres,	2),
+	PROC(READLINK,	readlinkargs,	readlinkres,	3),
+	PROC(READ,	readargs,	readres,	3),
+	PROC(WRITE,	writeargs,	writeres,	4),
+	PROC(CREATE,	createargs,	diropres,	0),
+	PROC(REMOVE,	removeargs,	stat,		0),
+	PROC(RENAME,	renameargs,	stat,		0),
+	PROC(LINK,	linkargs,	stat,		0),
+	PROC(SYMLINK,	symlinkargs,	stat,		0),
+	PROC(MKDIR,	createargs,	diropres,	0),
+	PROC(RMDIR,	diropargs,	stat,		0),
+	PROC(READDIR,	readdirargs,	readdirres,	3),
+	PROC(STATFS,	fhandle,	statfsres,	0),
 };
 
 struct rpc_version		nfs_version2 = {
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 9f88c5f..2743427 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -311,8 +311,8 @@
 	if (!nfs_server_capable(inode, NFS_CAP_ACLS))
 		goto out;
 
-	/* We are doing this here, because XDR marshalling can only
-	   return -ENOMEM. */
+	/* We are doing this here because XDR marshalling does not
+	 * return any results, it BUGs. */
 	status = -ENOSPC;
 	if (acl != NULL && acl->a_count > NFS_ACL_MAX_ENTRIES)
 		goto out;
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index f6cc60f..183c6b1 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -37,18 +37,16 @@
 #define NFS3_filename_sz	(1+(NFS3_MAXNAMLEN>>2))
 #define NFS3_path_sz		(1+(NFS3_MAXPATHLEN>>2))
 #define NFS3_fattr_sz		(21)
-#define NFS3_wcc_attr_sz		(6)
+#define NFS3_cookieverf_sz	(NFS3_COOKIEVERFSIZE>>2)
+#define NFS3_wcc_attr_sz	(6)
 #define NFS3_pre_op_attr_sz	(1+NFS3_wcc_attr_sz)
 #define NFS3_post_op_attr_sz	(1+NFS3_fattr_sz)
-#define NFS3_wcc_data_sz		(NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
-#define NFS3_fsstat_sz		
-#define NFS3_fsinfo_sz		
-#define NFS3_pathconf_sz		
-#define NFS3_entry_sz		(NFS3_filename_sz+3)
-
-#define NFS3_sattrargs_sz	(NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_wcc_data_sz	(NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
 #define NFS3_diropargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
-#define NFS3_removeargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
+
+#define NFS3_getattrargs_sz	(NFS3_fh_sz)
+#define NFS3_setattrargs_sz	(NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_lookupargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
 #define NFS3_accessargs_sz	(NFS3_fh_sz+1)
 #define NFS3_readlinkargs_sz	(NFS3_fh_sz)
 #define NFS3_readargs_sz	(NFS3_fh_sz+3)
@@ -57,14 +55,16 @@
 #define NFS3_mkdirargs_sz	(NFS3_diropargs_sz+NFS3_sattr_sz)
 #define NFS3_symlinkargs_sz	(NFS3_diropargs_sz+1+NFS3_sattr_sz)
 #define NFS3_mknodargs_sz	(NFS3_diropargs_sz+2+NFS3_sattr_sz)
+#define NFS3_removeargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
 #define NFS3_renameargs_sz	(NFS3_diropargs_sz+NFS3_diropargs_sz)
 #define NFS3_linkargs_sz		(NFS3_fh_sz+NFS3_diropargs_sz)
-#define NFS3_readdirargs_sz	(NFS3_fh_sz+2)
+#define NFS3_readdirargs_sz	(NFS3_fh_sz+NFS3_cookieverf_sz+3)
+#define NFS3_readdirplusargs_sz	(NFS3_fh_sz+NFS3_cookieverf_sz+4)
 #define NFS3_commitargs_sz	(NFS3_fh_sz+3)
 
-#define NFS3_attrstat_sz	(1+NFS3_fattr_sz)
-#define NFS3_wccstat_sz		(1+NFS3_wcc_data_sz)
-#define NFS3_removeres_sz	(NFS3_wccstat_sz)
+#define NFS3_getattrres_sz	(1+NFS3_fattr_sz)
+#define NFS3_setattrres_sz	(1+NFS3_wcc_data_sz)
+#define NFS3_removeres_sz	(NFS3_setattrres_sz)
 #define NFS3_lookupres_sz	(1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
 #define NFS3_accessres_sz	(1+NFS3_post_op_attr_sz+1)
 #define NFS3_readlinkres_sz	(1+NFS3_post_op_attr_sz+1)
@@ -100,1079 +100,2365 @@
 	[NF3FIFO] = S_IFIFO,
 };
 
+/*
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
+ */
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+				 unsigned int base, unsigned int len,
+				 unsigned int bufsize)
+{
+	struct rpc_auth	*auth = req->rq_cred->cr_auth;
+	unsigned int replen;
+
+	replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+	xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
 {
-	dprintk("nfs: %s: prematurely hit end of receive buffer. "
+	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
 		"Remaining buffer length is %tu words.\n",
 		func, xdr->end - xdr->p);
 }
 
+
 /*
- * Common NFS XDR functions as inlines
+ * Encode/decode NFSv3 basic data types
+ *
+ * Basic NFSv3 data types are defined in section 2.5 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
  */
-static inline __be32 *
-xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fh)
+
+static void encode_uint32(struct xdr_stream *xdr, u32 value)
 {
-	return xdr_encode_array(p, fh->data, fh->size);
+	__be32 *p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(value);
 }
 
-static inline __be32 *
-xdr_decode_fhandle(__be32 *p, struct nfs_fh *fh)
-{
-	if ((fh->size = ntohl(*p++)) <= NFS3_FHSIZE) {
-		memcpy(fh->data, p, fh->size);
-		return p + XDR_QUADLEN(fh->size);
-	}
-	return NULL;
-}
-
-static inline __be32 *
-xdr_decode_fhandle_stream(struct xdr_stream *xdr, struct nfs_fh *fh)
+static int decode_uint32(struct xdr_stream *xdr, u32 *value)
 {
 	__be32 *p;
+
 	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	fh->size = ntohl(*p++);
-
-	if (fh->size <= NFS3_FHSIZE) {
-		p = xdr_inline_decode(xdr, fh->size);
-		if (unlikely(!p))
-			goto out_overflow;
-		memcpy(fh->data, p, fh->size);
-		return p + XDR_QUADLEN(fh->size);
-	}
-	return NULL;
-
+	*value = be32_to_cpup(p);
+	return 0;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EIO);
+	return -EIO;
+}
+
+static int decode_uint64(struct xdr_stream *xdr, u64 *value)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 8);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	xdr_decode_hyper(p, value);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
 /*
- * Encode/decode time.
+ * fileid3
+ *
+ *	typedef uint64 fileid3;
  */
-static inline __be32 *
-xdr_encode_time3(__be32 *p, struct timespec *timep)
+static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid)
 {
-	*p++ = htonl(timep->tv_sec);
-	*p++ = htonl(timep->tv_nsec);
-	return p;
+	return xdr_decode_hyper(p, fileid);
 }
 
-static inline __be32 *
-xdr_decode_time3(__be32 *p, struct timespec *timep)
+static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid)
 {
-	timep->tv_sec = ntohl(*p++);
-	timep->tv_nsec = ntohl(*p++);
-	return p;
+	return decode_uint64(xdr, fileid);
 }
 
-static __be32 *
-xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
+/*
+ * filename3
+ *
+ *	typedef string filename3<>;
+ */
+static void encode_filename3(struct xdr_stream *xdr,
+			     const char *name, u32 length)
 {
-	unsigned int	type, major, minor;
-	umode_t		fmode;
+	__be32 *p;
 
-	type = ntohl(*p++);
+	BUG_ON(length > NFS3_MAXNAMLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+static int decode_inline_filename3(struct xdr_stream *xdr,
+				   const char **name, u32 *length)
+{
+	__be32 *p;
+	u32 count;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p);
+	if (count > NFS3_MAXNAMLEN)
+		goto out_nametoolong;
+	p = xdr_inline_decode(xdr, count);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	*name = (const char *)p;
+	*length = count;
+	return 0;
+
+out_nametoolong:
+	dprintk("NFS: returned filename too long: %u\n", count);
+	return -ENAMETOOLONG;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * nfspath3
+ *
+ *	typedef string nfspath3<>;
+ */
+static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
+			    const u32 length)
+{
+	BUG_ON(length > NFS3_MAXPATHLEN);
+	encode_uint32(xdr, length);
+	xdr_write_pages(xdr, pages, 0, length);
+}
+
+static int decode_nfspath3(struct xdr_stream *xdr)
+{
+	u32 recvd, count;
+	size_t hdrlen;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p);
+	if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
+		goto out_nametoolong;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(count > recvd))
+		goto out_cheating;
+
+	xdr_read_pages(xdr, count);
+	xdr_terminate_string(xdr->buf, count);
+	return 0;
+
+out_nametoolong:
+	dprintk("NFS: returned pathname too long: %u\n", count);
+	return -ENAMETOOLONG;
+out_cheating:
+	dprintk("NFS: server cheating in pathname result: "
+		"count %u > recvd %u\n", count, recvd);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * cookie3
+ *
+ *	typedef uint64 cookie3
+ */
+static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie)
+{
+	return xdr_encode_hyper(p, cookie);
+}
+
+static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie)
+{
+	return decode_uint64(xdr, cookie);
+}
+
+/*
+ * cookieverf3
+ *
+ *	typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE];
+ */
+static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier)
+{
+	memcpy(p, verifier, NFS3_COOKIEVERFSIZE);
+	return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE);
+}
+
+static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * createverf3
+ *
+ *	typedef opaque createverf3[NFS3_CREATEVERFSIZE];
+ */
+static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE);
+	memcpy(p, verifier, NFS3_CREATEVERFSIZE);
+}
+
+static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	memcpy(verifier, p, NFS3_WRITEVERFSIZE);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * size3
+ *
+ *	typedef uint64 size3;
+ */
+static __be32 *xdr_decode_size3(__be32 *p, u64 *size)
+{
+	return xdr_decode_hyper(p, size);
+}
+
+/*
+ * nfsstat3
+ *
+ *	enum nfsstat3 {
+ *		NFS3_OK = 0,
+ *		...
+ *	}
+ */
+#define NFS3_OK		NFS_OK
+
+static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	*status = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * ftype3
+ *
+ *	enum ftype3 {
+ *		NF3REG	= 1,
+ *		NF3DIR	= 2,
+ *		NF3BLK	= 3,
+ *		NF3CHR	= 4,
+ *		NF3LNK	= 5,
+ *		NF3SOCK	= 6,
+ *		NF3FIFO	= 7
+ *	};
+ */
+static void encode_ftype3(struct xdr_stream *xdr, const u32 type)
+{
+	BUG_ON(type > NF3FIFO);
+	encode_uint32(xdr, type);
+}
+
+static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode)
+{
+	u32 type;
+
+	type = be32_to_cpup(p++);
 	if (type > NF3FIFO)
 		type = NF3NON;
-	fmode = nfs_type2fmt[type];
-	fattr->mode = (ntohl(*p++) & ~S_IFMT) | fmode;
-	fattr->nlink = ntohl(*p++);
-	fattr->uid = ntohl(*p++);
-	fattr->gid = ntohl(*p++);
-	p = xdr_decode_hyper(p, &fattr->size);
-	p = xdr_decode_hyper(p, &fattr->du.nfs3.used);
+	*mode = nfs_type2fmt[type];
+	return p;
+}
 
-	/* Turn remote device info into Linux-specific dev_t */
-	major = ntohl(*p++);
-	minor = ntohl(*p++);
-	fattr->rdev = MKDEV(major, minor);
-	if (MAJOR(fattr->rdev) != major || MINOR(fattr->rdev) != minor)
-		fattr->rdev = 0;
+/*
+ * specdata3
+ *
+ *     struct specdata3 {
+ *             uint32  specdata1;
+ *             uint32  specdata2;
+ *     };
+ */
+static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 8);
+	*p++ = cpu_to_be32(MAJOR(rdev));
+	*p = cpu_to_be32(MINOR(rdev));
+}
+
+static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev)
+{
+	unsigned int major, minor;
+
+	major = be32_to_cpup(p++);
+	minor = be32_to_cpup(p++);
+	*rdev = MKDEV(major, minor);
+	if (MAJOR(*rdev) != major || MINOR(*rdev) != minor)
+		*rdev = 0;
+	return p;
+}
+
+/*
+ * nfs_fh3
+ *
+ *	struct nfs_fh3 {
+ *		opaque       data<NFS3_FHSIZE>;
+ *	};
+ */
+static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	__be32 *p;
+
+	BUG_ON(fh->size > NFS3_FHSIZE);
+	p = xdr_reserve_space(xdr, 4 + fh->size);
+	xdr_encode_opaque(p, fh->data, fh->size);
+}
+
+static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	if (unlikely(length > NFS3_FHSIZE))
+		goto out_toobig;
+	p = xdr_inline_decode(xdr, length);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	fh->size = length;
+	memcpy(fh->data, p, length);
+	return 0;
+out_toobig:
+	dprintk("NFS: file handle size (%u) too big\n", length);
+	return -E2BIG;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static void zero_nfs_fh3(struct nfs_fh *fh)
+{
+	memset(fh, 0, sizeof(*fh));
+}
+
+/*
+ * nfstime3
+ *
+ *	struct nfstime3 {
+ *		uint32	seconds;
+ *		uint32	nseconds;
+ *	};
+ */
+static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep)
+{
+	*p++ = cpu_to_be32(timep->tv_sec);
+	*p++ = cpu_to_be32(timep->tv_nsec);
+	return p;
+}
+
+static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep)
+{
+	timep->tv_sec = be32_to_cpup(p++);
+	timep->tv_nsec = be32_to_cpup(p++);
+	return p;
+}
+
+/*
+ * sattr3
+ *
+ *	enum time_how {
+ *		DONT_CHANGE		= 0,
+ *		SET_TO_SERVER_TIME	= 1,
+ *		SET_TO_CLIENT_TIME	= 2
+ *	};
+ *
+ *	union set_mode3 switch (bool set_it) {
+ *	case TRUE:
+ *		mode3	mode;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_uid3 switch (bool set_it) {
+ *	case TRUE:
+ *		uid3	uid;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_gid3 switch (bool set_it) {
+ *	case TRUE:
+ *		gid3	gid;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_size3 switch (bool set_it) {
+ *	case TRUE:
+ *		size3	size;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_atime switch (time_how set_it) {
+ *	case SET_TO_CLIENT_TIME:
+ *		nfstime3	atime;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_mtime switch (time_how set_it) {
+ *	case SET_TO_CLIENT_TIME:
+ *		nfstime3  mtime;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct sattr3 {
+ *		set_mode3	mode;
+ *		set_uid3	uid;
+ *		set_gid3	gid;
+ *		set_size3	size;
+ *		set_atime	atime;
+ *		set_mtime	mtime;
+ *	};
+ */
+static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
+{
+	u32 nbytes;
+	__be32 *p;
+
+	/*
+	 * In order to make only a single xdr_reserve_space() call,
+	 * pre-compute the total number of bytes to be reserved.
+	 * Six boolean values, one for each set_foo field, are always
+	 * present in the encoded result, so start there.
+	 */
+	nbytes = 6 * 4;
+	if (attr->ia_valid & ATTR_MODE)
+		nbytes += 4;
+	if (attr->ia_valid & ATTR_UID)
+		nbytes += 4;
+	if (attr->ia_valid & ATTR_GID)
+		nbytes += 4;
+	if (attr->ia_valid & ATTR_SIZE)
+		nbytes += 8;
+	if (attr->ia_valid & ATTR_ATIME_SET)
+		nbytes += 8;
+	if (attr->ia_valid & ATTR_MTIME_SET)
+		nbytes += 8;
+	p = xdr_reserve_space(xdr, nbytes);
+
+	if (attr->ia_valid & ATTR_MODE) {
+		*p++ = xdr_one;
+		*p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_UID) {
+		*p++ = xdr_one;
+		*p++ = cpu_to_be32(attr->ia_uid);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_GID) {
+		*p++ = xdr_one;
+		*p++ = cpu_to_be32(attr->ia_gid);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_SIZE) {
+		*p++ = xdr_one;
+		p = xdr_encode_hyper(p, (u64)attr->ia_size);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_ATIME_SET) {
+		*p++ = xdr_two;
+		p = xdr_encode_nfstime3(p, &attr->ia_atime);
+	} else if (attr->ia_valid & ATTR_ATIME) {
+		*p++ = xdr_one;
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_MTIME_SET) {
+		*p++ = xdr_two;
+		xdr_encode_nfstime3(p, &attr->ia_mtime);
+	} else if (attr->ia_valid & ATTR_MTIME) {
+		*p = xdr_one;
+	} else
+		*p = xdr_zero;
+}
+
+/*
+ * fattr3
+ *
+ *	struct fattr3 {
+ *		ftype3		type;
+ *		mode3		mode;
+ *		uint32		nlink;
+ *		uid3		uid;
+ *		gid3		gid;
+ *		size3		size;
+ *		size3		used;
+ *		specdata3	rdev;
+ *		uint64		fsid;
+ *		fileid3		fileid;
+ *		nfstime3	atime;
+ *		nfstime3	mtime;
+ *		nfstime3	ctime;
+ *	};
+ */
+static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+	umode_t fmode;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	p = xdr_decode_ftype3(p, &fmode);
+
+	fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
+	fattr->nlink = be32_to_cpup(p++);
+	fattr->uid = be32_to_cpup(p++);
+	fattr->gid = be32_to_cpup(p++);
+
+	p = xdr_decode_size3(p, &fattr->size);
+	p = xdr_decode_size3(p, &fattr->du.nfs3.used);
+	p = xdr_decode_specdata3(p, &fattr->rdev);
 
 	p = xdr_decode_hyper(p, &fattr->fsid.major);
 	fattr->fsid.minor = 0;
-	p = xdr_decode_hyper(p, &fattr->fileid);
-	p = xdr_decode_time3(p, &fattr->atime);
-	p = xdr_decode_time3(p, &fattr->mtime);
-	p = xdr_decode_time3(p, &fattr->ctime);
 
-	/* Update the mode bits */
+	p = xdr_decode_fileid3(p, &fattr->fileid);
+	p = xdr_decode_nfstime3(p, &fattr->atime);
+	p = xdr_decode_nfstime3(p, &fattr->mtime);
+	xdr_decode_nfstime3(p, &fattr->ctime);
+
 	fattr->valid |= NFS_ATTR_FATTR_V3;
-	return p;
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
+/*
+ * post_op_attr
+ *
+ *	union post_op_attr switch (bool attributes_follow) {
+ *	case TRUE:
+ *		fattr3	attributes;
+ *	case FALSE:
+ *		void;
+ *	};
+ */
+static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
-	if (attr->ia_valid & ATTR_MODE) {
-		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_mode & S_IALLUGO);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_UID) {
-		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_uid);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_GID) {
-		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_gid);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_SIZE) {
-		*p++ = xdr_one;
-		p = xdr_encode_hyper(p, (__u64) attr->ia_size);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_ATIME_SET) {
-		*p++ = xdr_two;
-		p = xdr_encode_time3(p, &attr->ia_atime);
-	} else if (attr->ia_valid & ATTR_ATIME) {
-		*p++ = xdr_one;
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_MTIME_SET) {
-		*p++ = xdr_two;
-		p = xdr_encode_time3(p, &attr->ia_mtime);
-	} else if (attr->ia_valid & ATTR_MTIME) {
-		*p++ = xdr_one;
-	} else {
-		*p++ = xdr_zero;
-	}
-	return p;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p != xdr_zero)
+		return decode_fattr3(xdr, fattr);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_decode_wcc_attr(__be32 *p, struct nfs_fattr *fattr)
+/*
+ * wcc_attr
+ *	struct wcc_attr {
+ *		size3		size;
+ *		nfstime3	mtime;
+ *		nfstime3	ctime;
+ *	};
+ */
+static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
-	p = xdr_decode_hyper(p, &fattr->pre_size);
-	p = xdr_decode_time3(p, &fattr->pre_mtime);
-	p = xdr_decode_time3(p, &fattr->pre_ctime);
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
 	fattr->valid |= NFS_ATTR_FATTR_PRESIZE
 		| NFS_ATTR_FATTR_PREMTIME
 		| NFS_ATTR_FATTR_PRECTIME;
-	return p;
+
+	p = xdr_decode_size3(p, &fattr->pre_size);
+	p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
+	xdr_decode_nfstime3(p, &fattr->pre_ctime);
+
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_decode_post_op_attr(__be32 *p, struct nfs_fattr *fattr)
-{
-	if (*p++)
-		p = xdr_decode_fattr(p, fattr);
-	return p;
-}
-
-static inline __be32 *
-xdr_decode_post_op_attr_stream(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+/*
+ * pre_op_attr
+ *	union pre_op_attr switch (bool attributes_follow) {
+ *	case TRUE:
+ *		wcc_attr	attributes;
+ *	case FALSE:
+ *		void;
+ *	};
+ *
+ * wcc_data
+ *
+ *	struct wcc_data {
+ *		pre_op_attr	before;
+ *		post_op_attr	after;
+ *	};
+ */
+static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
 	__be32 *p;
 
 	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	if (ntohl(*p++)) {
-		p = xdr_inline_decode(xdr, 84);
-		if (unlikely(!p))
-			goto out_overflow;
-		p = xdr_decode_fattr(p, fattr);
-	}
-	return p;
+	if (*p != xdr_zero)
+		return decode_wcc_attr(xdr, fattr);
+	return 0;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EIO);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_decode_pre_op_attr(__be32 *p, struct nfs_fattr *fattr)
+static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
-	if (*p++)
-		return xdr_decode_wcc_attr(p, fattr);
-	return p;
-}
+	int error;
 
-
-static inline __be32 *
-xdr_decode_wcc_data(__be32 *p, struct nfs_fattr *fattr)
-{
-	p = xdr_decode_pre_op_attr(p, fattr);
-	return xdr_decode_post_op_attr(p, fattr);
+	error = decode_pre_op_attr(xdr, fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, fattr);
+out:
+	return error;
 }
 
 /*
- * NFS encode functions
+ * post_op_fh3
+ *
+ *	union post_op_fh3 switch (bool handle_follows) {
+ *	case TRUE:
+ *		nfs_fh3  handle;
+ *	case FALSE:
+ *		void;
+ *	};
  */
-
-/*
- * Encode file handle argument
- */
-static int
-nfs3_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
+static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
 {
-	p = xdr_encode_fhandle(p, fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	__be32 *p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p != xdr_zero)
+		return decode_nfs_fh3(xdr, fh);
+	zero_nfs_fh3(fh);
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
 /*
- * Encode SETATTR arguments
+ * diropargs3
+ *
+ *	struct diropargs3 {
+ *		nfs_fh3		dir;
+ *		filename3	name;
+ *	};
  */
-static int
-nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args)
+static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
+			      const char *name, u32 length)
 {
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_sattr(p, args->sattr);
-	*p++ = htonl(args->guard);
-	if (args->guard)
-		p = xdr_encode_time3(p, &args->guardtime);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
+	encode_nfs_fh3(xdr, fh);
+	encode_filename3(xdr, name, length);
+}
+
+
+/*
+ * NFSv3 XDR encode functions
+ *
+ * NFSv3 argument types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ */
+
+/*
+ * 3.3.1  GETATTR3args
+ *
+ *	struct GETATTR3args {
+ *		nfs_fh3  object;
+ *	};
+ */
+static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs_fh *fh)
+{
+	encode_nfs_fh3(xdr, fh);
 }
 
 /*
- * Encode directory ops argument
+ * 3.3.2  SETATTR3args
+ *
+ *	union sattrguard3 switch (bool check) {
+ *	case TRUE:
+ *		nfstime3  obj_ctime;
+ *	case FALSE:
+ *		void;
+ *	};
+ *
+ *	struct SETATTR3args {
+ *		nfs_fh3		object;
+ *		sattr3		new_attributes;
+ *		sattrguard3	guard;
+ *	};
  */
-static int
-nfs3_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs3_diropargs *args)
+static void encode_sattrguard3(struct xdr_stream *xdr,
+			       const struct nfs3_sattrargs *args)
 {
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
+	__be32 *p;
 
-/*
- * Encode REMOVE argument
- */
-static int
-nfs3_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name.name, args->name.len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode access() argument
- */
-static int
-nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(args->access);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Arguments to a READ call. Since we read data directly into the page
- * cache, we also set up the reply iovec here so that iov[1] points
- * exactly to the page we want to fetch.
- */
-static int
-nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_hyper(p, args->offset);
-	*p++ = htonl(count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen,
-			 args->pages, args->pgbase, count);
-	req->rq_rcv_buf.flags |= XDRBUF_READ;
-	return 0;
-}
-
-/*
- * Write arguments. Splice the buffer to be written into the iovec.
- */
-static int
-nfs3_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
-{
-	struct xdr_buf *sndbuf = &req->rq_snd_buf;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_hyper(p, args->offset);
-	*p++ = htonl(count);
-	*p++ = htonl(args->stable);
-	*p++ = htonl(count);
-	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
-	/* Copy the page array */
-	xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
-	sndbuf->flags |= XDRBUF_WRITE;
-	return 0;
-}
-
-/*
- * Encode CREATE arguments
- */
-static int
-nfs3_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs3_createargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-
-	*p++ = htonl(args->createmode);
-	if (args->createmode == NFS3_CREATE_EXCLUSIVE) {
-		*p++ = args->verifier[0];
-		*p++ = args->verifier[1];
-	} else
-		p = xdr_encode_sattr(p, args->sattr);
-
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode MKDIR arguments
- */
-static int
-nfs3_xdr_mkdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mkdirargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	p = xdr_encode_sattr(p, args->sattr);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode SYMLINK arguments
- */
-static int
-nfs3_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_symlinkargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_array(p, args->fromname, args->fromlen);
-	p = xdr_encode_sattr(p, args->sattr);
-	*p++ = htonl(args->pathlen);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Copy the page */
-	xdr_encode_pages(&req->rq_snd_buf, args->pages, 0, args->pathlen);
-	return 0;
-}
-
-/*
- * Encode MKNOD arguments
- */
-static int
-nfs3_xdr_mknodargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mknodargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	*p++ = htonl(args->type);
-	p = xdr_encode_sattr(p, args->sattr);
-	if (args->type == NF3CHR || args->type == NF3BLK) {
-		*p++ = htonl(MAJOR(args->rdev));
-		*p++ = htonl(MINOR(args->rdev));
-	}
-
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode RENAME arguments
- */
-static int
-nfs3_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
-{
-	p = xdr_encode_fhandle(p, args->old_dir);
-	p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
-	p = xdr_encode_fhandle(p, args->new_dir);
-	p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode LINK arguments
- */
-static int
-nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_fhandle(p, args->tofh);
-	p = xdr_encode_array(p, args->toname, args->tolen);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode arguments to readdir call
- */
-static int
-nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_hyper(p, args->cookie);
-	*p++ = args->verf[0];
-	*p++ = args->verf[1];
-	if (args->plus) {
-		/* readdirplus: need dircount + buffer size.
-		 * We just make sure we make dircount big enough */
-		*p++ = htonl(count >> 3);
-	}
-	*p++ = htonl(count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readdirres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
-	return 0;
-}
-
-/*
- * Decode the result of a readdir call.
- * We just check for syntactical correctness.
- */
-static int
-nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res)
-{
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
-	struct page **page;
-	size_t hdrlen;
-	u32 recvd, pglen;
-	int status;
-
-	status = ntohl(*p++);
-	/* Decode post_op_attrs */
-	p = xdr_decode_post_op_attr(p, res->dir_attr);
-	if (status)
-		return nfs_stat_to_errno(status);
-	/* Decode verifier cookie */
-	if (res->verf) {
-		res->verf[0] = *p++;
-		res->verf[1] = *p++;
+	if (args->guard) {
+		p = xdr_reserve_space(xdr, 4 + 8);
+		*p++ = xdr_one;
+		xdr_encode_nfstime3(p, &args->guardtime);
 	} else {
-		p += 2;
+		p = xdr_reserve_space(xdr, 4);
+		*p = xdr_zero;
 	}
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READDIR reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-
-	pglen = rcvbuf->page_len;
-	recvd = rcvbuf->len - hdrlen;
-	if (pglen > recvd)
-		pglen = recvd;
-	page = rcvbuf->pages;
-
-	return pglen;
 }
 
-__be32 *
-nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
+static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs3_sattrargs *args)
 {
-	__be32 *p;
-	struct nfs_entry old = *entry;
-
-	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
-		goto out_overflow;
-	if (!ntohl(*p++)) {
-		p = xdr_inline_decode(xdr, 4);
-		if (unlikely(!p))
-			goto out_overflow;
-		if (!ntohl(*p++))
-			return ERR_PTR(-EAGAIN);
-		entry->eof = 1;
-		return ERR_PTR(-EBADCOOKIE);
-	}
-
-	p = xdr_inline_decode(xdr, 12);
-	if (unlikely(!p))
-		goto out_overflow;
-	p = xdr_decode_hyper(p, &entry->ino);
-	entry->len  = ntohl(*p++);
-
-	p = xdr_inline_decode(xdr, entry->len + 8);
-	if (unlikely(!p))
-		goto out_overflow;
-	entry->name = (const char *) p;
-	p += XDR_QUADLEN(entry->len);
-	entry->prev_cookie = entry->cookie;
-	p = xdr_decode_hyper(p, &entry->cookie);
-
-	entry->d_type = DT_UNKNOWN;
-	if (plus) {
-		entry->fattr->valid = 0;
-		p = xdr_decode_post_op_attr_stream(xdr, entry->fattr);
-		if (IS_ERR(p))
-			goto out_overflow_exit;
-		entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
-		/* In fact, a post_op_fh3: */
-		p = xdr_inline_decode(xdr, 4);
-		if (unlikely(!p))
-			goto out_overflow;
-		if (*p++) {
-			p = xdr_decode_fhandle_stream(xdr, entry->fh);
-			if (IS_ERR(p))
-				goto out_overflow_exit;
-			/* Ugh -- server reply was truncated */
-			if (p == NULL) {
-				dprintk("NFS: FH truncated\n");
-				*entry = old;
-				return ERR_PTR(-EAGAIN);
-			}
-		} else
-			memset((u8*)(entry->fh), 0, sizeof(*entry->fh));
-	}
-
-	p = xdr_inline_peek(xdr, 8);
-	if (p != NULL)
-		entry->eof = !p[0] && p[1];
-	else
-		entry->eof = 0;
-
-	return p;
-
-out_overflow:
-	print_overflow_msg(__func__, xdr);
-out_overflow_exit:
-	return ERR_PTR(-EAGAIN);
+	encode_nfs_fh3(xdr, args->fh);
+	encode_sattr3(xdr, args->sattr);
+	encode_sattrguard3(xdr, args);
 }
 
 /*
- * Encode COMMIT arguments
+ * 3.3.3  LOOKUP3args
+ *
+ *	struct LOOKUP3args {
+ *		diropargs3  what;
+ *	};
  */
-static int
-nfs3_xdr_commitargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_diropargs *args)
 {
-	p = xdr_encode_fhandle(p, args->fh);
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+}
+
+/*
+ * 3.3.4  ACCESS3args
+ *
+ *	struct ACCESS3args {
+ *		nfs_fh3		object;
+ *		uint32		access;
+ *	};
+ */
+static void encode_access3args(struct xdr_stream *xdr,
+			       const struct nfs3_accessargs *args)
+{
+	encode_nfs_fh3(xdr, args->fh);
+	encode_uint32(xdr, args->access);
+}
+
+static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_accessargs *args)
+{
+	encode_access3args(xdr, args);
+}
+
+/*
+ * 3.3.5  READLINK3args
+ *
+ *	struct READLINK3args {
+ *		nfs_fh3	symlink;
+ *	};
+ */
+static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
+				       struct xdr_stream *xdr,
+				       const struct nfs3_readlinkargs *args)
+{
+	encode_nfs_fh3(xdr, args->fh);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->pglen, NFS3_readlinkres_sz);
+}
+
+/*
+ * 3.3.6  READ3args
+ *
+ *	struct READ3args {
+ *		nfs_fh3		file;
+ *		offset3		offset;
+ *		count3		count;
+ *	};
+ */
+static void encode_read3args(struct xdr_stream *xdr,
+			     const struct nfs_readargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + 4);
 	p = xdr_encode_hyper(p, args->offset);
-	*p++ = htonl(args->count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_readargs *args)
+{
+	encode_read3args(xdr, args);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->count, NFS3_readres_sz);
+	req->rq_rcv_buf.flags |= XDRBUF_READ;
+}
+
+/*
+ * 3.3.7  WRITE3args
+ *
+ *	enum stable_how {
+ *		UNSTABLE  = 0,
+ *		DATA_SYNC = 1,
+ *		FILE_SYNC = 2
+ *	};
+ *
+ *	struct WRITE3args {
+ *		nfs_fh3		file;
+ *		offset3		offset;
+ *		count3		count;
+ *		stable_how	stable;
+ *		opaque		data<>;
+ *	};
+ */
+static void encode_write3args(struct xdr_stream *xdr,
+			      const struct nfs_writeargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4);
+	p = xdr_encode_hyper(p, args->offset);
+	*p++ = cpu_to_be32(args->count);
+	*p++ = cpu_to_be32(args->stable);
+	*p = cpu_to_be32(args->count);
+	xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
+}
+
+static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_writeargs *args)
+{
+	encode_write3args(xdr, args);
+	xdr->buf->flags |= XDRBUF_WRITE;
+}
+
+/*
+ * 3.3.8  CREATE3args
+ *
+ *	enum createmode3 {
+ *		UNCHECKED = 0,
+ *		GUARDED   = 1,
+ *		EXCLUSIVE = 2
+ *	};
+ *
+ *	union createhow3 switch (createmode3 mode) {
+ *	case UNCHECKED:
+ *	case GUARDED:
+ *		sattr3       obj_attributes;
+ *	case EXCLUSIVE:
+ *		createverf3  verf;
+ *	};
+ *
+ *	struct CREATE3args {
+ *		diropargs3	where;
+ *		createhow3	how;
+ *	};
+ */
+static void encode_createhow3(struct xdr_stream *xdr,
+			      const struct nfs3_createargs *args)
+{
+	encode_uint32(xdr, args->createmode);
+	switch (args->createmode) {
+	case NFS3_CREATE_UNCHECKED:
+	case NFS3_CREATE_GUARDED:
+		encode_sattr3(xdr, args->sattr);
+		break;
+	case NFS3_CREATE_EXCLUSIVE:
+		encode_createverf3(xdr, args->verifier);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_createargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+	encode_createhow3(xdr, args);
+}
+
+/*
+ * 3.3.9  MKDIR3args
+ *
+ *	struct MKDIR3args {
+ *		diropargs3	where;
+ *		sattr3		attributes;
+ *	};
+ */
+static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs3_mkdirargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+	encode_sattr3(xdr, args->sattr);
+}
+
+/*
+ * 3.3.10  SYMLINK3args
+ *
+ *	struct symlinkdata3 {
+ *		sattr3		symlink_attributes;
+ *		nfspath3	symlink_data;
+ *	};
+ *
+ *	struct SYMLINK3args {
+ *		diropargs3	where;
+ *		symlinkdata3	symlink;
+ *	};
+ */
+static void encode_symlinkdata3(struct xdr_stream *xdr,
+				const struct nfs3_symlinkargs *args)
+{
+	encode_sattr3(xdr, args->sattr);
+	encode_nfspath3(xdr, args->pages, args->pathlen);
+}
+
+static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs3_symlinkargs *args)
+{
+	encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
+	encode_symlinkdata3(xdr, args);
+}
+
+/*
+ * 3.3.11  MKNOD3args
+ *
+ *	struct devicedata3 {
+ *		sattr3		dev_attributes;
+ *		specdata3	spec;
+ *	};
+ *
+ *	union mknoddata3 switch (ftype3 type) {
+ *	case NF3CHR:
+ *	case NF3BLK:
+ *		devicedata3	device;
+ *	case NF3SOCK:
+ *	case NF3FIFO:
+ *		sattr3		pipe_attributes;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct MKNOD3args {
+ *		diropargs3	where;
+ *		mknoddata3	what;
+ *	};
+ */
+static void encode_devicedata3(struct xdr_stream *xdr,
+			       const struct nfs3_mknodargs *args)
+{
+	encode_sattr3(xdr, args->sattr);
+	encode_specdata3(xdr, args->rdev);
+}
+
+static void encode_mknoddata3(struct xdr_stream *xdr,
+			      const struct nfs3_mknodargs *args)
+{
+	encode_ftype3(xdr, args->type);
+	switch (args->type) {
+	case NF3CHR:
+	case NF3BLK:
+		encode_devicedata3(xdr, args);
+		break;
+	case NF3SOCK:
+	case NF3FIFO:
+		encode_sattr3(xdr, args->sattr);
+		break;
+	case NF3REG:
+	case NF3DIR:
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs3_mknodargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+	encode_mknoddata3(xdr, args);
+}
+
+/*
+ * 3.3.12  REMOVE3args
+ *
+ *	struct REMOVE3args {
+ *		diropargs3  object;
+ *	};
+ */
+static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_removeargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
+}
+
+/*
+ * 3.3.14  RENAME3args
+ *
+ *	struct RENAME3args {
+ *		diropargs3	from;
+ *		diropargs3	to;
+ *	};
+ */
+static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_renameargs *args)
+{
+	const struct qstr *old = args->old_name;
+	const struct qstr *new = args->new_name;
+
+	encode_diropargs3(xdr, args->old_dir, old->name, old->len);
+	encode_diropargs3(xdr, args->new_dir, new->name, new->len);
+}
+
+/*
+ * 3.3.15  LINK3args
+ *
+ *	struct LINK3args {
+ *		nfs_fh3		file;
+ *		diropargs3	link;
+ *	};
+ */
+static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs3_linkargs *args)
+{
+	encode_nfs_fh3(xdr, args->fromfh);
+	encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
+}
+
+/*
+ * 3.3.16  READDIR3args
+ *
+ *	struct READDIR3args {
+ *		nfs_fh3		dir;
+ *		cookie3		cookie;
+ *		cookieverf3	cookieverf;
+ *		count3		count;
+ *	};
+ */
+static void encode_readdir3args(struct xdr_stream *xdr,
+				const struct nfs3_readdirargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4);
+	p = xdr_encode_cookie3(p, args->cookie);
+	p = xdr_encode_cookieverf3(p, args->verf);
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs3_readdirargs *args)
+{
+	encode_readdir3args(xdr, args);
+	prepare_reply_buffer(req, args->pages, 0,
+				args->count, NFS3_readdirres_sz);
+}
+
+/*
+ * 3.3.17  READDIRPLUS3args
+ *
+ *	struct READDIRPLUS3args {
+ *		nfs_fh3		dir;
+ *		cookie3		cookie;
+ *		cookieverf3	cookieverf;
+ *		count3		dircount;
+ *		count3		maxcount;
+ *	};
+ */
+static void encode_readdirplus3args(struct xdr_stream *xdr,
+				    const struct nfs3_readdirargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4);
+	p = xdr_encode_cookie3(p, args->cookie);
+	p = xdr_encode_cookieverf3(p, args->verf);
+
+	/*
+	 * readdirplus: need dircount + buffer size.
+	 * We just make sure we make dircount big enough
+	 */
+	*p++ = cpu_to_be32(args->count >> 3);
+
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
+					  struct xdr_stream *xdr,
+					  const struct nfs3_readdirargs *args)
+{
+	encode_readdirplus3args(xdr, args);
+	prepare_reply_buffer(req, args->pages, 0,
+				args->count, NFS3_readdirres_sz);
+}
+
+/*
+ * 3.3.21  COMMIT3args
+ *
+ *	struct COMMIT3args {
+ *		nfs_fh3		file;
+ *		offset3		offset;
+ *		count3		count;
+ *	};
+ */
+static void encode_commit3args(struct xdr_stream *xdr,
+			       const struct nfs_writeargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + 4);
+	p = xdr_encode_hyper(p, args->offset);
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_writeargs *args)
+{
+	encode_commit3args(xdr, args);
 }
 
 #ifdef CONFIG_NFS_V3_ACL
-/*
- * Encode GETACL arguments
- */
-static int
-nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p,
-		    struct nfs3_getaclargs *args)
+
+static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_getaclargs *args)
 {
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(args->mask);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	if (args->mask & (NFS_ACL | NFS_DFACL)) {
-		/* Inline the page array */
-		replen = (RPC_REPHDRSIZE + auth->au_rslack +
-			  ACL3_getaclres_sz) << 2;
-		xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0,
-				 NFSACL_MAXPAGES << PAGE_SHIFT);
-	}
-	return 0;
+	encode_nfs_fh3(xdr, args->fh);
+	encode_uint32(xdr, args->mask);
+	if (args->mask & (NFS_ACL | NFS_DFACL))
+		prepare_reply_buffer(req, args->pages, 0,
+					NFSACL_MAXPAGES << PAGE_SHIFT,
+					ACL3_getaclres_sz);
 }
 
-/*
- * Encode SETACL arguments
- */
-static int
-nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p,
-                   struct nfs3_setaclargs *args)
+static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_setaclargs *args)
 {
-	struct xdr_buf *buf = &req->rq_snd_buf;
 	unsigned int base;
-	int err;
+	int error;
 
-	p = xdr_encode_fhandle(p, NFS_FH(args->inode));
-	*p++ = htonl(args->mask);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	encode_nfs_fh3(xdr, NFS_FH(args->inode));
+	encode_uint32(xdr, args->mask);
+
 	base = req->rq_slen;
-
 	if (args->npages != 0)
-		xdr_encode_pages(buf, args->pages, 0, args->len);
+		xdr_write_pages(xdr, args->pages, 0, args->len);
 	else
-		req->rq_slen = xdr_adjust_iovec(req->rq_svec,
-				p + XDR_QUADLEN(args->len));
+		xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
 
-	err = nfsacl_encode(buf, base, args->inode,
+	error = nfsacl_encode(xdr->buf, base, args->inode,
 			    (args->mask & NFS_ACL) ?
 			    args->acl_access : NULL, 1, 0);
-	if (err > 0)
-		err = nfsacl_encode(buf, base + err, args->inode,
-				    (args->mask & NFS_DFACL) ?
-				    args->acl_default : NULL, 1,
-				    NFS_ACL_DEFAULT);
-	return (err > 0) ? 0 : err;
+	BUG_ON(error < 0);
+	error = nfsacl_encode(xdr->buf, base + error, args->inode,
+			    (args->mask & NFS_DFACL) ?
+			    args->acl_default : NULL, 1,
+			    NFS_ACL_DEFAULT);
+	BUG_ON(error < 0);
 }
+
 #endif  /* CONFIG_NFS_V3_ACL */
 
 /*
- * NFS XDR decode functions
+ * NFSv3 XDR decode functions
+ *
+ * NFSv3 result types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
  */
 
 /*
- * Decode attrstat reply.
+ * 3.3.1  GETATTR3res
+ *
+ *	struct GETATTR3resok {
+ *		fattr3		obj_attributes;
+ *	};
+ *
+ *	union GETATTR3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		GETATTR3resok  resok;
+ *	default:
+ *		void;
+ *	};
  */
-static int
-nfs3_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs_fattr *result)
 {
-	int	status;
+	enum nfs_stat status;
+	int error;
 
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	xdr_decode_fattr(p, fattr);
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_fattr3(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode status+wcc_data reply
- * SATTR, REMOVE, RMDIR
+ * 3.3.2  SETATTR3res
+ *
+ *	struct SETATTR3resok {
+ *		wcc_data  obj_wcc;
+ *	};
+ *
+ *	struct SETATTR3resfail {
+ *		wcc_data  obj_wcc;
+ *	};
+ *
+ *	union SETATTR3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		SETATTR3resok   resok;
+ *	default:
+ *		SETATTR3resfail resfail;
+ *	};
  */
-static int
-nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs_fattr *result)
 {
-	int	status;
+	enum nfs_stat status;
+	int error;
 
-	if ((status = ntohl(*p++)))
-		status = nfs_stat_to_errno(status);
-	xdr_decode_wcc_data(p, fattr);
-	return status;
-}
-
-static int
-nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res)
-{
-	return nfs3_xdr_wccstat(req, p, res->dir_attr);
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode LOOKUP reply
+ * 3.3.3  LOOKUP3res
+ *
+ *	struct LOOKUP3resok {
+ *		nfs_fh3		object;
+ *		post_op_attr	obj_attributes;
+ *		post_op_attr	dir_attributes;
+ *	};
+ *
+ *	struct LOOKUP3resfail {
+ *		post_op_attr	dir_attributes;
+ *	};
+ *
+ *	union LOOKUP3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		LOOKUP3resok	resok;
+ *	default:
+ *		LOOKUP3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_lookupres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
+static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_diropres *result)
 {
-	int	status;
+	enum nfs_stat status;
+	int error;
 
-	if ((status = ntohl(*p++))) {
-		status = nfs_stat_to_errno(status);
-	} else {
-		if (!(p = xdr_decode_fhandle(p, res->fh)))
-			return -errno_NFSERR_IO;
-		p = xdr_decode_post_op_attr(p, res->fattr);
-	}
-	xdr_decode_post_op_attr(p, res->dir_attr);
-	return status;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_nfs_fh3(xdr, result->fh);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->dir_attr);
+out:
+	return error;
+out_default:
+	error = decode_post_op_attr(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode ACCESS reply
+ * 3.3.4  ACCESS3res
+ *
+ *	struct ACCESS3resok {
+ *		post_op_attr	obj_attributes;
+ *		uint32		access;
+ *	};
+ *
+ *	struct ACCESS3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union ACCESS3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		ACCESS3resok	resok;
+ *	default:
+ *		ACCESS3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res)
+static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_accessres *result)
 {
-	int	status = ntohl(*p++);
+	enum nfs_stat status;
+	int error;
 
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status)
-		return nfs_stat_to_errno(status);
-	res->access = ntohl(*p++);
-	return 0;
-}
-
-static int
-nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readlinkres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_uint32(xdr, &result->access);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode READLINK reply
+ * 3.3.5  READLINK3res
+ *
+ *	struct READLINK3resok {
+ *		post_op_attr	symlink_attributes;
+ *		nfspath3	data;
+ *	};
+ *
+ *	struct READLINK3resfail {
+ *		post_op_attr	symlink_attributes;
+ *	};
+ *
+ *	union READLINK3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		READLINK3resok	resok;
+ *	default:
+ *		READLINK3resfail resfail;
+ *	};
  */
-static int
-nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs_fattr *result)
 {
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_nfspath3(xdr);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.6  READ3res
+ *
+ *	struct READ3resok {
+ *		post_op_attr	file_attributes;
+ *		count3		count;
+ *		bool		eof;
+ *		opaque		data<>;
+ *	};
+ *
+ *	struct READ3resfail {
+ *		post_op_attr	file_attributes;
+ *	};
+ *
+ *	union READ3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		READ3resok	resok;
+ *	default:
+ *		READ3resfail	resfail;
+ *	};
+ */
+static int decode_read3resok(struct xdr_stream *xdr,
+			     struct nfs_readres *result)
+{
+	u32 eof, count, ocount, recvd;
 	size_t hdrlen;
-	u32 len, recvd;
-	int	status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-	p = xdr_decode_post_op_attr(p, fattr);
+	p = xdr_inline_decode(xdr, 4 + 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p++);
+	eof = be32_to_cpup(p++);
+	ocount = be32_to_cpup(p++);
+	if (unlikely(ocount != count))
+		goto out_mismatch;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(count > recvd))
+		goto out_cheating;
 
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	/* Convert length of symlink */
-	len = ntohl(*p++);
-	if (len >= rcvbuf->page_len) {
-		dprintk("nfs: server returned giant symlink!\n");
-		return -ENAMETOOLONG;
-	}
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READLINK reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READLINK header is short. "
-			"iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (recvd < len) {
-		dprintk("NFS: server cheating in readlink reply: "
-				"count %u > recvd %u\n", len, recvd);
-		return -EIO;
-	}
-
-	xdr_terminate_string(rcvbuf, len);
-	return 0;
-}
-
-/*
- * Decode READ reply
- */
-static int
-nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
-{
-	struct kvec *iov = req->rq_rcv_buf.head;
-	size_t hdrlen;
-	u32 count, ocount, recvd;
-	int status;
-
-	status = ntohl(*p++);
-	p = xdr_decode_post_op_attr(p, res->fattr);
-
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	/* Decode reply count and EOF flag. NFSv3 is somewhat redundant
-	 * in that it puts the count both in the res struct and in the
-	 * opaque data count. */
-	count    = ntohl(*p++);
-	res->eof = ntohl(*p++);
-	ocount   = ntohl(*p++);
-
-	if (ocount != count) {
-		dprintk("NFS: READ count doesn't match RPC opaque count.\n");
-		return -errno_NFSERR_IO;
-	}
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READ reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-       		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READ header is short. iovec will be shifted.\n");
-		xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
-	}
-
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (count > recvd) {
-		dprintk("NFS: server cheating in read reply: "
-			"count %u > recvd %u\n", count, recvd);
-		count = recvd;
-		res->eof = 0;
-	}
-
-	if (count < res->count)
-		res->count = count;
-
+out:
+	xdr_read_pages(xdr, count);
+	result->eof = eof;
+	result->count = count;
 	return count;
+out_mismatch:
+	dprintk("NFS: READ count doesn't match length of opaque: "
+		"count %u != ocount %u\n", count, ocount);
+	return -EIO;
+out_cheating:
+	dprintk("NFS: server cheating in read result: "
+		"count %u > recvd %u\n", count, recvd);
+	count = recvd;
+	eof = 0;
+	goto out;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_readres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_read3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode WRITE response
+ * 3.3.7  WRITE3res
+ *
+ *	enum stable_how {
+ *		UNSTABLE  = 0,
+ *		DATA_SYNC = 1,
+ *		FILE_SYNC = 2
+ *	};
+ *
+ *	struct WRITE3resok {
+ *		wcc_data	file_wcc;
+ *		count3		count;
+ *		stable_how	committed;
+ *		writeverf3	verf;
+ *	};
+ *
+ *	struct WRITE3resfail {
+ *		wcc_data	file_wcc;
+ *	};
+ *
+ *	union WRITE3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		WRITE3resok	resok;
+ *	default:
+ *		WRITE3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int decode_write3resok(struct xdr_stream *xdr,
+			      struct nfs_writeres *result)
 {
-	int	status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-	p = xdr_decode_wcc_data(p, res->fattr);
+	p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->count = be32_to_cpup(p++);
+	result->verf->committed = be32_to_cpup(p++);
+	if (unlikely(result->verf->committed > NFS_FILE_SYNC))
+		goto out_badvalue;
+	memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE);
+	return result->count;
+out_badvalue:
+	dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
 
-	if (status != 0)
-		return nfs_stat_to_errno(status);
+static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  struct nfs_writeres *result)
+{
+	enum nfs_stat status;
+	int error;
 
-	res->count = ntohl(*p++);
-	res->verf->committed = (enum nfs3_stable_how)ntohl(*p++);
-	res->verf->verifier[0] = *p++;
-	res->verf->verifier[1] = *p++;
-
-	return res->count;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_write3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode a CREATE response
+ * 3.3.8  CREATE3res
+ *
+ *	struct CREATE3resok {
+ *		post_op_fh3	obj;
+ *		post_op_attr	obj_attributes;
+ *		wcc_data	dir_wcc;
+ *	};
+ *
+ *	struct CREATE3resfail {
+ *		wcc_data	dir_wcc;
+ *	};
+ *
+ *	union CREATE3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		CREATE3resok	resok;
+ *	default:
+ *		CREATE3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
+static int decode_create3resok(struct xdr_stream *xdr,
+			       struct nfs3_diropres *result)
 {
-	int	status;
+	int error;
 
-	status = ntohl(*p++);
-	if (status == 0) {
-		if (*p++) {
-			if (!(p = xdr_decode_fhandle(p, res->fh)))
-				return -errno_NFSERR_IO;
-			p = xdr_decode_post_op_attr(p, res->fattr);
-		} else {
-			memset(res->fh, 0, sizeof(*res->fh));
-			/* Do decode post_op_attr but set it to NULL */
-			p = xdr_decode_post_op_attr(p, res->fattr);
-			res->fattr->valid = 0;
-		}
-	} else {
-		status = nfs_stat_to_errno(status);
+	error = decode_post_op_fh3(xdr, result->fh);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	/* The server isn't required to return a file handle.
+	 * If it didn't, force the client to perform a LOOKUP
+	 * to determine the correct file handle and attribute
+	 * values for the new object. */
+	if (result->fh->size == 0)
+		result->fattr->valid = 0;
+	error = decode_wcc_data(xdr, result->dir_attr);
+out:
+	return error;
+}
+
+static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_diropres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_create3resok(xdr, result);
+out:
+	return error;
+out_default:
+	error = decode_wcc_data(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.12  REMOVE3res
+ *
+ *	struct REMOVE3resok {
+ *		wcc_data    dir_wcc;
+ *	};
+ *
+ *	struct REMOVE3resfail {
+ *		wcc_data    dir_wcc;
+ *	};
+ *
+ *	union REMOVE3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		REMOVE3resok   resok;
+ *	default:
+ *		REMOVE3resfail resfail;
+ *	};
+ */
+static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_removeres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.14  RENAME3res
+ *
+ *	struct RENAME3resok {
+ *		wcc_data	fromdir_wcc;
+ *		wcc_data	todir_wcc;
+ *	};
+ *
+ *	struct RENAME3resfail {
+ *		wcc_data	fromdir_wcc;
+ *		wcc_data	todir_wcc;
+ *	};
+ *
+ *	union RENAME3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		RENAME3resok   resok;
+ *	default:
+ *		RENAME3resfail resfail;
+ *	};
+ */
+static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_renameres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->old_fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->new_fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.15  LINK3res
+ *
+ *	struct LINK3resok {
+ *		post_op_attr	file_attributes;
+ *		wcc_data	linkdir_wcc;
+ *	};
+ *
+ *	struct LINK3resfail {
+ *		post_op_attr	file_attributes;
+ *		wcc_data	linkdir_wcc;
+ *	};
+ *
+ *	union LINK3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		LINK3resok	resok;
+ *	default:
+ *		LINK3resfail	resfail;
+ *	};
+ */
+static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs3_linkres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
+}
+
+/**
+ * nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in
+ *			the local page cache
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 3.3.16  entry3
+ *
+ *	struct entry3 {
+ *		fileid3		fileid;
+ *		filename3	name;
+ *		cookie3		cookie;
+ *		fhandle3	filehandle;
+ *		post_op_attr3	attributes;
+ *		entry3		*nextentry;
+ *	};
+ *
+ * 3.3.17  entryplus3
+ *	struct entryplus3 {
+ *		fileid3		fileid;
+ *		filename3	name;
+ *		cookie3		cookie;
+ *		post_op_attr	name_attributes;
+ *		post_op_fh3	name_handle;
+ *		entryplus3	*nextentry;
+ *	};
+ */
+int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+		       int plus)
+{
+	struct nfs_entry old = *entry;
+	__be32 *p;
+	int error;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p == xdr_zero) {
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(p == NULL))
+			goto out_overflow;
+		if (*p == xdr_zero)
+			return -EAGAIN;
+		entry->eof = 1;
+		return -EBADCOOKIE;
 	}
-	p = xdr_decode_wcc_data(p, res->dir_attr);
-	return status;
+
+	error = decode_fileid3(xdr, &entry->ino);
+	if (unlikely(error))
+		return error;
+
+	error = decode_inline_filename3(xdr, &entry->name, &entry->len);
+	if (unlikely(error))
+		return error;
+
+	entry->prev_cookie = entry->cookie;
+	error = decode_cookie3(xdr, &entry->cookie);
+	if (unlikely(error))
+		return error;
+
+	entry->d_type = DT_UNKNOWN;
+
+	if (plus) {
+		entry->fattr->valid = 0;
+		error = decode_post_op_attr(xdr, entry->fattr);
+		if (unlikely(error))
+			return error;
+		if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
+			entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
+
+		/* In fact, a post_op_fh3: */
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(p == NULL))
+			goto out_overflow;
+		if (*p != xdr_zero) {
+			error = decode_nfs_fh3(xdr, entry->fh);
+			if (unlikely(error)) {
+				if (error == -E2BIG)
+					goto out_truncated;
+				return error;
+			}
+		} else
+			zero_nfs_fh3(entry->fh);
+	}
+
+	return 0;
+
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EAGAIN;
+out_truncated:
+	dprintk("NFS: directory entry contains invalid file handle\n");
+	*entry = old;
+	return -EAGAIN;
 }
 
 /*
- * Decode RENAME reply
+ * 3.3.16  READDIR3res
+ *
+ *	struct dirlist3 {
+ *		entry3		*entries;
+ *		bool		eof;
+ *	};
+ *
+ *	struct READDIR3resok {
+ *		post_op_attr	dir_attributes;
+ *		cookieverf3	cookieverf;
+ *		dirlist3	reply;
+ *	};
+ *
+ *	struct READDIR3resfail {
+ *		post_op_attr	dir_attributes;
+ *	};
+ *
+ *	union READDIR3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		READDIR3resok	resok;
+ *	default:
+ *		READDIR3resfail	resfail;
+ *	};
+ *
+ * Read the directory contents into the page cache, but otherwise
+ * don't touch them.  The actual decoding is done by nfs3_decode_entry()
+ * during subsequent nfs_readdir() calls.
  */
-static int
-nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs_renameres *res)
+static int decode_dirlist3(struct xdr_stream *xdr)
 {
-	int	status;
+	u32 recvd, pglen;
+	size_t hdrlen;
 
-	if ((status = ntohl(*p++)) != 0)
-		status = nfs_stat_to_errno(status);
-	p = xdr_decode_wcc_data(p, res->old_fattr);
-	p = xdr_decode_wcc_data(p, res->new_fattr);
-	return status;
+	pglen = xdr->buf->page_len;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(pglen > recvd))
+		goto out_cheating;
+out:
+	xdr_read_pages(xdr, pglen);
+	return pglen;
+out_cheating:
+	dprintk("NFS: server cheating in readdir result: "
+		"pglen %u > recvd %u\n", pglen, recvd);
+	pglen = recvd;
+	goto out;
+}
+
+static int decode_readdir3resok(struct xdr_stream *xdr,
+				struct nfs3_readdirres *result)
+{
+	int error;
+
+	error = decode_post_op_attr(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	/* XXX: do we need to check if result->verf != NULL ? */
+	error = decode_cookieverf3(xdr, result->verf);
+	if (unlikely(error))
+		goto out;
+	error = decode_dirlist3(xdr);
+out:
+	return error;
+}
+
+static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs3_readdirres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_readdir3resok(xdr, result);
+out:
+	return error;
+out_default:
+	error = decode_post_op_attr(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode LINK reply
+ * 3.3.18  FSSTAT3res
+ *
+ *	struct FSSTAT3resok {
+ *		post_op_attr	obj_attributes;
+ *		size3		tbytes;
+ *		size3		fbytes;
+ *		size3		abytes;
+ *		size3		tfiles;
+ *		size3		ffiles;
+ *		size3		afiles;
+ *		uint32		invarsec;
+ *	};
+ *
+ *	struct FSSTAT3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union FSSTAT3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		FSSTAT3resok	resok;
+ *	default:
+ *		FSSTAT3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_linkres(struct rpc_rqst *req, __be32 *p, struct nfs3_linkres *res)
+static int decode_fsstat3resok(struct xdr_stream *xdr,
+			       struct nfs_fsstat *result)
 {
-	int	status;
+	__be32 *p;
 
-	if ((status = ntohl(*p++)) != 0)
-		status = nfs_stat_to_errno(status);
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	p = xdr_decode_wcc_data(p, res->dir_attr);
-	return status;
-}
-
-/*
- * Decode FSSTAT reply
- */
-static int
-nfs3_xdr_fsstatres(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *res)
-{
-	int		status;
-
-	status = ntohl(*p++);
-
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	p = xdr_decode_hyper(p, &res->tbytes);
-	p = xdr_decode_hyper(p, &res->fbytes);
-	p = xdr_decode_hyper(p, &res->abytes);
-	p = xdr_decode_hyper(p, &res->tfiles);
-	p = xdr_decode_hyper(p, &res->ffiles);
-	p = xdr_decode_hyper(p, &res->afiles);
-
+	p = xdr_inline_decode(xdr, 8 * 6 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	p = xdr_decode_size3(p, &result->tbytes);
+	p = xdr_decode_size3(p, &result->fbytes);
+	p = xdr_decode_size3(p, &result->abytes);
+	p = xdr_decode_size3(p, &result->tfiles);
+	p = xdr_decode_size3(p, &result->ffiles);
+	xdr_decode_size3(p, &result->afiles);
 	/* ignore invarsec */
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_fsstat *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_fsstat3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode FSINFO reply
+ * 3.3.19  FSINFO3res
+ *
+ *	struct FSINFO3resok {
+ *		post_op_attr	obj_attributes;
+ *		uint32		rtmax;
+ *		uint32		rtpref;
+ *		uint32		rtmult;
+ *		uint32		wtmax;
+ *		uint32		wtpref;
+ *		uint32		wtmult;
+ *		uint32		dtpref;
+ *		size3		maxfilesize;
+ *		nfstime3	time_delta;
+ *		uint32		properties;
+ *	};
+ *
+ *	struct FSINFO3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union FSINFO3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		FSINFO3resok	resok;
+ *	default:
+ *		FSINFO3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res)
+static int decode_fsinfo3resok(struct xdr_stream *xdr,
+			       struct nfs_fsinfo *result)
 {
-	int		status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	res->rtmax  = ntohl(*p++);
-	res->rtpref = ntohl(*p++);
-	res->rtmult = ntohl(*p++);
-	res->wtmax  = ntohl(*p++);
-	res->wtpref = ntohl(*p++);
-	res->wtmult = ntohl(*p++);
-	res->dtpref = ntohl(*p++);
-	p = xdr_decode_hyper(p, &res->maxfilesize);
-	p = xdr_decode_time3(p, &res->time_delta);
+	p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->rtmax  = be32_to_cpup(p++);
+	result->rtpref = be32_to_cpup(p++);
+	result->rtmult = be32_to_cpup(p++);
+	result->wtmax  = be32_to_cpup(p++);
+	result->wtpref = be32_to_cpup(p++);
+	result->wtmult = be32_to_cpup(p++);
+	result->dtpref = be32_to_cpup(p++);
+	p = xdr_decode_size3(p, &result->maxfilesize);
+	xdr_decode_nfstime3(p, &result->time_delta);
 
 	/* ignore properties */
-	res->lease_time = 0;
+	result->lease_time = 0;
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_fsinfo *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_fsinfo3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode PATHCONF reply
+ * 3.3.20  PATHCONF3res
+ *
+ *	struct PATHCONF3resok {
+ *		post_op_attr	obj_attributes;
+ *		uint32		linkmax;
+ *		uint32		name_max;
+ *		bool		no_trunc;
+ *		bool		chown_restricted;
+ *		bool		case_insensitive;
+ *		bool		case_preserving;
+ *	};
+ *
+ *	struct PATHCONF3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union PATHCONF3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		PATHCONF3resok	resok;
+ *	default:
+ *		PATHCONF3resfail resfail;
+ *	};
  */
-static int
-nfs3_xdr_pathconfres(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *res)
+static int decode_pathconf3resok(struct xdr_stream *xdr,
+				 struct nfs_pathconf *result)
 {
-	int		status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-	res->max_link = ntohl(*p++);
-	res->max_namelen = ntohl(*p++);
-
+	p = xdr_inline_decode(xdr, 4 * 6);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->max_link = be32_to_cpup(p++);
+	result->max_namelen = be32_to_cpup(p);
 	/* ignore remaining fields */
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs_pathconf *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_pathconf3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode COMMIT reply
+ * 3.3.21  COMMIT3res
+ *
+ *	struct COMMIT3resok {
+ *		wcc_data	file_wcc;
+ *		writeverf3	verf;
+ *	};
+ *
+ *	struct COMMIT3resfail {
+ *		wcc_data	file_wcc;
+ *	};
+ *
+ *	union COMMIT3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		COMMIT3resok	resok;
+ *	default:
+ *		COMMIT3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_commitres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_writeres *result)
 {
-	int		status;
+	enum nfs_stat status;
+	int error;
 
-	status = ntohl(*p++);
-	p = xdr_decode_wcc_data(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	res->verf->verifier[0] = *p++;
-	res->verf->verifier[1] = *p++;
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_writeverf3(xdr, result->verf->verifier);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 #ifdef CONFIG_NFS_V3_ACL
-/*
- * Decode GETACL reply
- */
-static int
-nfs3_xdr_getaclres(struct rpc_rqst *req, __be32 *p,
-		   struct nfs3_getaclres *res)
+
+static inline int decode_getacl3resok(struct xdr_stream *xdr,
+				      struct nfs3_getaclres *result)
 {
-	struct xdr_buf *buf = &req->rq_rcv_buf;
-	int status = ntohl(*p++);
 	struct posix_acl **acl;
 	unsigned int *aclcnt;
-	int err, base;
+	size_t hdrlen;
+	int error;
 
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	res->mask = ntohl(*p++);
-	if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
-		return -EINVAL;
-	base = (char *)p - (char *)req->rq_rcv_buf.head->iov_base;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_uint32(xdr, &result->mask);
+	if (unlikely(error))
+		goto out;
+	error = -EINVAL;
+	if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
+		goto out;
 
-	acl = (res->mask & NFS_ACL) ? &res->acl_access : NULL;
-	aclcnt = (res->mask & NFS_ACLCNT) ? &res->acl_access_count : NULL;
-	err = nfsacl_decode(buf, base, aclcnt, acl);
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
 
-	acl = (res->mask & NFS_DFACL) ? &res->acl_default : NULL;
-	aclcnt = (res->mask & NFS_DFACLCNT) ? &res->acl_default_count : NULL;
-	if (err > 0)
-		err = nfsacl_decode(buf, base + err, aclcnt, acl);
-	return (err > 0) ? 0 : err;
+	acl = NULL;
+	if (result->mask & NFS_ACL)
+		acl = &result->acl_access;
+	aclcnt = NULL;
+	if (result->mask & NFS_ACLCNT)
+		aclcnt = &result->acl_access_count;
+	error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl);
+	if (unlikely(error <= 0))
+		goto out;
+
+	acl = NULL;
+	if (result->mask & NFS_DFACL)
+		acl = &result->acl_default;
+	aclcnt = NULL;
+	if (result->mask & NFS_DFACLCNT)
+		aclcnt = &result->acl_default_count;
+	error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl);
+	if (unlikely(error <= 0))
+		return error;
+	error = 0;
+out:
+	return error;
 }
 
-/*
- * Decode setacl reply.
- */
-static int
-nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_getaclres *result)
 {
-	int status = ntohl(*p++);
+	enum nfs_stat status;
+	int error;
 
-	if (status)
-		return nfs_stat_to_errno(status);
-	xdr_decode_post_op_attr(p, fattr);
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_getacl3resok(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
+
+static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_fattr *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_post_op_attr(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
 #endif  /* CONFIG_NFS_V3_ACL */
 
 #define PROC(proc, argtype, restype, timer)				\
 [NFS3PROC_##proc] = {							\
 	.p_proc      = NFS3PROC_##proc,					\
-	.p_encode    = (kxdrproc_t) nfs3_xdr_##argtype,			\
-	.p_decode    = (kxdrproc_t) nfs3_xdr_##restype,			\
-	.p_arglen    = NFS3_##argtype##_sz,				\
-	.p_replen    = NFS3_##restype##_sz,				\
+	.p_encode    = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args,	\
+	.p_decode    = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res,	\
+	.p_arglen    = NFS3_##argtype##args_sz,				\
+	.p_replen    = NFS3_##restype##res_sz,				\
 	.p_timer     = timer,						\
 	.p_statidx   = NFS3PROC_##proc,					\
 	.p_name      = #proc,						\
 	}
 
 struct rpc_procinfo	nfs3_procedures[] = {
-  PROC(GETATTR,		fhandle,	attrstat, 1),
-  PROC(SETATTR, 	sattrargs,	wccstat, 0),
-  PROC(LOOKUP,		diropargs,	lookupres, 2),
-  PROC(ACCESS,		accessargs,	accessres, 1),
-  PROC(READLINK,	readlinkargs,	readlinkres, 3),
-  PROC(READ,		readargs,	readres, 3),
-  PROC(WRITE,		writeargs,	writeres, 4),
-  PROC(CREATE,		createargs,	createres, 0),
-  PROC(MKDIR,		mkdirargs,	createres, 0),
-  PROC(SYMLINK,		symlinkargs,	createres, 0),
-  PROC(MKNOD,		mknodargs,	createres, 0),
-  PROC(REMOVE,		removeargs,	removeres, 0),
-  PROC(RMDIR,		diropargs,	wccstat, 0),
-  PROC(RENAME,		renameargs,	renameres, 0),
-  PROC(LINK,		linkargs,	linkres, 0),
-  PROC(READDIR,		readdirargs,	readdirres, 3),
-  PROC(READDIRPLUS,	readdirargs,	readdirres, 3),
-  PROC(FSSTAT,		fhandle,	fsstatres, 0),
-  PROC(FSINFO,  	fhandle,	fsinfores, 0),
-  PROC(PATHCONF,	fhandle,	pathconfres, 0),
-  PROC(COMMIT,		commitargs,	commitres, 5),
+	PROC(GETATTR,		getattr,	getattr,	1),
+	PROC(SETATTR,		setattr,	setattr,	0),
+	PROC(LOOKUP,		lookup,		lookup,		2),
+	PROC(ACCESS,		access,		access,		1),
+	PROC(READLINK,		readlink,	readlink,	3),
+	PROC(READ,		read,		read,		3),
+	PROC(WRITE,		write,		write,		4),
+	PROC(CREATE,		create,		create,		0),
+	PROC(MKDIR,		mkdir,		create,		0),
+	PROC(SYMLINK,		symlink,	create,		0),
+	PROC(MKNOD,		mknod,		create,		0),
+	PROC(REMOVE,		remove,		remove,		0),
+	PROC(RMDIR,		lookup,		setattr,	0),
+	PROC(RENAME,		rename,		rename,		0),
+	PROC(LINK,		link,		link,		0),
+	PROC(READDIR,		readdir,	readdir,	3),
+	PROC(READDIRPLUS,	readdirplus,	readdir,	3),
+	PROC(FSSTAT,		getattr,	fsstat,		0),
+	PROC(FSINFO,		getattr,	fsinfo,		0),
+	PROC(PATHCONF,		getattr,	pathconf,	0),
+	PROC(COMMIT,		commit,		commit,		5),
 };
 
 struct rpc_version		nfs_version3 = {
@@ -1185,8 +2471,8 @@
 static struct rpc_procinfo	nfs3_acl_procedures[] = {
 	[ACLPROC3_GETACL] = {
 		.p_proc = ACLPROC3_GETACL,
-		.p_encode = (kxdrproc_t) nfs3_xdr_getaclargs,
-		.p_decode = (kxdrproc_t) nfs3_xdr_getaclres,
+		.p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
+		.p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
 		.p_arglen = ACL3_getaclargs_sz,
 		.p_replen = ACL3_getaclres_sz,
 		.p_timer = 1,
@@ -1194,8 +2480,8 @@
 	},
 	[ACLPROC3_SETACL] = {
 		.p_proc = ACLPROC3_SETACL,
-		.p_encode = (kxdrproc_t) nfs3_xdr_setaclargs,
-		.p_decode = (kxdrproc_t) nfs3_xdr_setaclres,
+		.p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
+		.p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
 		.p_arglen = ACL3_setaclargs_sz,
 		.p_replen = ACL3_setaclres_sz,
 		.p_timer = 0,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9fa4963..1be36cf 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@
 	NFS4CLNT_RECLAIM_REBOOT,
 	NFS4CLNT_RECLAIM_NOGRACE,
 	NFS4CLNT_DELEGRETURN,
+	NFS4CLNT_LAYOUTRECALL,
 	NFS4CLNT_SESSION_RESET,
 	NFS4CLNT_RECALL_SLOT,
 };
@@ -109,7 +110,7 @@
 struct nfs4_state_owner {
 	struct nfs_unique_id so_owner_id;
 	struct nfs_server    *so_server;
-	struct rb_node	     so_client_node;
+	struct rb_node	     so_server_node;
 
 	struct rpc_cred	     *so_cred;	 /* Associated cred */
 
@@ -227,12 +228,6 @@
 extern const struct dentry_operations nfs4_dentry_operations;
 extern const struct inode_operations nfs4_dir_inode_operations;
 
-/* inode.c */
-extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
-extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int);
-extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
-
-
 /* nfs4proc.c */
 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
@@ -241,11 +236,12 @@
 extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait);
+extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
 extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
 extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
 		struct nfs4_fs_locations *fs_locations, struct page *page);
 extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
+extern const struct xattr_handler *nfs4_xattr_handlers[];
 
 #if defined(CONFIG_NFS_V4_1)
 static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
@@ -302,6 +298,11 @@
 #if defined(CONFIG_NFS_V4_1)
 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
+extern void nfs4_schedule_session_recovery(struct nfs4_session *);
+#else
+static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
+{
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
@@ -311,10 +312,9 @@
 extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t);
 extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t);
 extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
-extern void nfs4_schedule_state_recovery(struct nfs_client *);
+extern void nfs4_schedule_lease_recovery(struct nfs_client *);
 extern void nfs4_schedule_state_manager(struct nfs_client *);
-extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
-extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
+extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
 extern void nfs41_handle_recall_slot(struct nfs_client *clp);
 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
@@ -331,7 +331,6 @@
 extern const nfs4_stateid zero_stateid;
 
 /* nfs4xdr.c */
-extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
 extern struct rpc_procinfo nfs4_procedures[];
 
 struct nfs4_mount_data;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 2e92f0d..23f930c 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -82,7 +82,7 @@
 {
 	struct nfs4_file_layout_dsaddr *dsaddr;
 	int status = -EINVAL;
-	struct nfs_server *nfss = NFS_SERVER(lo->inode);
+	struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
 
 	dprintk("--> %s\n", __func__);
 
@@ -101,7 +101,7 @@
 	/* find and reference the deviceid */
 	dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id);
 	if (dsaddr == NULL) {
-		dsaddr = get_device_info(lo->inode, id);
+		dsaddr = get_device_info(lo->plh_inode, id);
 		if (dsaddr == NULL)
 			goto out;
 	}
@@ -243,7 +243,7 @@
 static void
 filelayout_free_lseg(struct pnfs_layout_segment *lseg)
 {
-	struct nfs_server *nfss = NFS_SERVER(lseg->layout->inode);
+	struct nfs_server *nfss = NFS_SERVER(lseg->pls_layout->plh_inode);
 	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
 
 	dprintk("--> %s\n", __func__);
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 51fe64a..b73c343 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -214,17 +214,26 @@
 
 	/* ipv6 length plus port is legal */
 	if (rlen > INET6_ADDRSTRLEN + 8) {
-		dprintk("%s Invalid address, length %d\n", __func__,
+		dprintk("%s: Invalid address, length %d\n", __func__,
 			rlen);
 		goto out_err;
 	}
 	buf = kmalloc(rlen + 1, GFP_KERNEL);
+	if (!buf) {
+		dprintk("%s: Not enough memory\n", __func__);
+		goto out_err;
+	}
 	buf[rlen] = '\0';
 	memcpy(buf, r_addr, rlen);
 
 	/* replace the port dots with dashes for the in4_pton() delimiter*/
 	for (i = 0; i < 2; i++) {
 		char *res = strrchr(buf, '.');
+		if (!res) {
+			dprintk("%s: Failed finding expected dots in port\n",
+				__func__);
+			goto out_free;
+		}
 		*res = '-';
 	}
 
@@ -240,7 +249,7 @@
 	port = htons((tmp[0] << 8) | (tmp[1]));
 
 	ds = nfs4_pnfs_ds_add(inode, ip_addr, port);
-	dprintk("%s Decoded address and port %s\n", __func__, buf);
+	dprintk("%s: Decoded address and port %s\n", __func__, buf);
 out_free:
 	kfree(buf);
 out_err:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4435e5e..0a07e35 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -49,6 +49,8 @@
 #include <linux/mount.h>
 #include <linux/module.h>
 #include <linux/sunrpc/bc_xprt.h>
+#include <linux/xattr.h>
+#include <linux/utsname.h>
 
 #include "nfs4_fs.h"
 #include "delegation.h"
@@ -254,12 +256,13 @@
 		case -NFS4ERR_OPENMODE:
 			if (state == NULL)
 				break;
-			nfs4_state_mark_reclaim_nograce(clp, state);
-			goto do_state_recovery;
+			nfs4_schedule_stateid_recovery(server, state);
+			goto wait_on_recovery;
 		case -NFS4ERR_STALE_STATEID:
 		case -NFS4ERR_STALE_CLIENTID:
 		case -NFS4ERR_EXPIRED:
-			goto do_state_recovery;
+			nfs4_schedule_lease_recovery(clp);
+			goto wait_on_recovery;
 #if defined(CONFIG_NFS_V4_1)
 		case -NFS4ERR_BADSESSION:
 		case -NFS4ERR_BADSLOT:
@@ -270,7 +273,7 @@
 		case -NFS4ERR_SEQ_MISORDERED:
 			dprintk("%s ERROR: %d Reset session\n", __func__,
 				errorcode);
-			nfs4_schedule_state_recovery(clp);
+			nfs4_schedule_session_recovery(clp->cl_session);
 			exception->retry = 1;
 			break;
 #endif /* defined(CONFIG_NFS_V4_1) */
@@ -293,8 +296,7 @@
 	}
 	/* We failed to handle the error */
 	return nfs4_map_errors(ret);
-do_state_recovery:
-	nfs4_schedule_state_recovery(clp);
+wait_on_recovery:
 	ret = nfs4_wait_clnt_recover(clp);
 	if (ret == 0)
 		exception->retry = 1;
@@ -355,9 +357,9 @@
 }
 
 /*
- * Signal state manager thread if session is drained
+ * Signal state manager thread if session fore channel is drained
  */
-static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
+static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
 {
 	struct rpc_task *task;
 
@@ -371,8 +373,20 @@
 	if (ses->fc_slot_table.highest_used_slotid != -1)
 		return;
 
-	dprintk("%s COMPLETE: Session Drained\n", __func__);
-	complete(&ses->complete);
+	dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
+	complete(&ses->fc_slot_table.complete);
+}
+
+/*
+ * Signal state manager thread if session back channel is drained
+ */
+void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
+{
+	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
+	    ses->bc_slot_table.highest_used_slotid != -1)
+		return;
+	dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
+	complete(&ses->bc_slot_table.complete);
 }
 
 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
@@ -389,7 +403,7 @@
 
 	spin_lock(&tbl->slot_tbl_lock);
 	nfs4_free_slot(tbl, res->sr_slot);
-	nfs41_check_drain_session_complete(res->sr_session);
+	nfs4_check_drain_fc_complete(res->sr_session);
 	spin_unlock(&tbl->slot_tbl_lock);
 	res->sr_slot = NULL;
 }
@@ -421,8 +435,8 @@
 		clp = res->sr_session->clp;
 		do_renew_lease(clp, timestamp);
 		/* Check sequence flags */
-		if (atomic_read(&clp->cl_count) > 1)
-			nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
+		if (res->sr_status_flags != 0)
+			nfs4_schedule_lease_recovery(clp);
 		break;
 	case -NFS4ERR_DELAY:
 		/* The server detected a resend of the RPC call and
@@ -1241,14 +1255,13 @@
 			case -NFS4ERR_BAD_HIGH_SLOT:
 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
 			case -NFS4ERR_DEADSESSION:
-				nfs4_schedule_state_recovery(
-					server->nfs_client);
+				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
 				goto out;
 			case -NFS4ERR_STALE_CLIENTID:
 			case -NFS4ERR_STALE_STATEID:
 			case -NFS4ERR_EXPIRED:
 				/* Don't recall a delegation if it was lost */
-				nfs4_schedule_state_recovery(server->nfs_client);
+				nfs4_schedule_lease_recovery(server->nfs_client);
 				goto out;
 			case -ERESTARTSYS:
 				/*
@@ -1257,7 +1270,7 @@
 				 */
 			case -NFS4ERR_ADMIN_REVOKED:
 			case -NFS4ERR_BAD_STATEID:
-				nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+				nfs4_schedule_stateid_recovery(server, state);
 			case -EKEYEXPIRED:
 				/*
 				 * User RPCSEC_GSS context has expired.
@@ -1573,7 +1586,7 @@
 		if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
 		    !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
 			break;
-		nfs4_schedule_state_recovery(clp);
+		nfs4_schedule_state_manager(clp);
 		ret = -EIO;
 	}
 	return ret;
@@ -1826,6 +1839,8 @@
 	struct nfs_closeres res;
 	struct nfs_fattr fattr;
 	unsigned long timestamp;
+	bool roc;
+	u32 roc_barrier;
 };
 
 static void nfs4_free_closedata(void *data)
@@ -1833,6 +1848,8 @@
 	struct nfs4_closedata *calldata = data;
 	struct nfs4_state_owner *sp = calldata->state->owner;
 
+	if (calldata->roc)
+		pnfs_roc_release(calldata->state->inode);
 	nfs4_put_open_state(calldata->state);
 	nfs_free_seqid(calldata->arg.seqid);
 	nfs4_put_state_owner(sp);
@@ -1865,6 +1882,9 @@
 	 */
 	switch (task->tk_status) {
 		case 0:
+			if (calldata->roc)
+				pnfs_roc_set_barrier(state->inode,
+						     calldata->roc_barrier);
 			nfs_set_open_stateid(state, &calldata->res.stateid, 0);
 			renew_lease(server, calldata->timestamp);
 			nfs4_close_clear_stateid_flags(state,
@@ -1917,8 +1937,15 @@
 		return;
 	}
 
-	if (calldata->arg.fmode == 0)
+	if (calldata->arg.fmode == 0) {
 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
+		if (calldata->roc &&
+		    pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
+			rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
+				     task, NULL);
+			return;
+		}
+	}
 
 	nfs_fattr_init(calldata->res.fattr);
 	calldata->timestamp = jiffies;
@@ -1946,7 +1973,7 @@
  *
  * NOTE: Caller must be holding the sp->so_owner semaphore!
  */
-int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait)
+int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
 {
 	struct nfs_server *server = NFS_SERVER(state->inode);
 	struct nfs4_closedata *calldata;
@@ -1981,11 +2008,12 @@
 	calldata->res.fattr = &calldata->fattr;
 	calldata->res.seqid = calldata->arg.seqid;
 	calldata->res.server = server;
+	calldata->roc = roc;
 	path_get(path);
 	calldata->path = *path;
 
-	msg.rpc_argp = &calldata->arg,
-	msg.rpc_resp = &calldata->res,
+	msg.rpc_argp = &calldata->arg;
+	msg.rpc_resp = &calldata->res;
 	task_setup_data.callback_data = calldata;
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
@@ -1998,6 +2026,8 @@
 out_free_calldata:
 	kfree(calldata);
 out:
+	if (roc)
+		pnfs_roc_release(state->inode);
 	nfs4_put_open_state(state);
 	nfs4_put_state_owner(sp);
 	return status;
@@ -2486,6 +2516,7 @@
 		path = &ctx->path;
 		fmode = ctx->mode;
 	}
+	sattr->ia_mode &= ~current_umask();
 	state = nfs4_do_open(dir, path, fmode, flags, sattr, cred);
 	d_drop(dentry);
 	if (IS_ERR(state)) {
@@ -2816,6 +2847,8 @@
 {
 	struct nfs4_exception exception = { };
 	int err;
+
+	sattr->ia_mode &= ~current_umask();
 	do {
 		err = nfs4_handle_exception(NFS_SERVER(dir),
 				_nfs4_proc_mkdir(dir, dentry, sattr),
@@ -2916,6 +2949,8 @@
 {
 	struct nfs4_exception exception = { };
 	int err;
+
+	sattr->ia_mode &= ~current_umask();
 	do {
 		err = nfs4_handle_exception(NFS_SERVER(dir),
 				_nfs4_proc_mknod(dir, dentry, sattr, rdev),
@@ -3142,7 +3177,7 @@
 	if (task->tk_status < 0) {
 		/* Unless we're shutting down, schedule state recovery! */
 		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
-			nfs4_schedule_state_recovery(clp);
+			nfs4_schedule_lease_recovery(clp);
 		return;
 	}
 	do_renew_lease(clp, timestamp);
@@ -3216,6 +3251,35 @@
 	}
 }
 
+static int buf_to_pages_noslab(const void *buf, size_t buflen,
+		struct page **pages, unsigned int *pgbase)
+{
+	struct page *newpage, **spages;
+	int rc = 0;
+	size_t len;
+	spages = pages;
+
+	do {
+		len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
+		newpage = alloc_page(GFP_KERNEL);
+
+		if (newpage == NULL)
+			goto unwind;
+		memcpy(page_address(newpage), buf, len);
+                buf += len;
+                buflen -= len;
+		*pages++ = newpage;
+		rc++;
+	} while (buflen != 0);
+
+	return rc;
+
+unwind:
+	for(; rc > 0; rc--)
+		__free_page(spages[rc-1]);
+	return -ENOMEM;
+}
+
 struct nfs4_cached_acl {
 	int cached;
 	size_t len;
@@ -3384,13 +3448,23 @@
 		.rpc_argp	= &arg,
 		.rpc_resp	= &res,
 	};
-	int ret;
+	int ret, i;
 
 	if (!nfs4_server_supports_acls(server))
 		return -EOPNOTSUPP;
+	i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
+	if (i < 0)
+		return i;
 	nfs_inode_return_delegation(inode);
-	buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
 	ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
+
+	/*
+	 * Free each page after tx, so the only ref left is
+	 * held by the network stack
+	 */
+	for (; i > 0; i--)
+		put_page(pages[i-1]);
+
 	/*
 	 * Acl update can result in inode attribute update.
 	 * so mark the attribute cache invalid.
@@ -3428,12 +3502,13 @@
 		case -NFS4ERR_OPENMODE:
 			if (state == NULL)
 				break;
-			nfs4_state_mark_reclaim_nograce(clp, state);
-			goto do_state_recovery;
+			nfs4_schedule_stateid_recovery(server, state);
+			goto wait_on_recovery;
 		case -NFS4ERR_STALE_STATEID:
 		case -NFS4ERR_STALE_CLIENTID:
 		case -NFS4ERR_EXPIRED:
-			goto do_state_recovery;
+			nfs4_schedule_lease_recovery(clp);
+			goto wait_on_recovery;
 #if defined(CONFIG_NFS_V4_1)
 		case -NFS4ERR_BADSESSION:
 		case -NFS4ERR_BADSLOT:
@@ -3444,7 +3519,7 @@
 		case -NFS4ERR_SEQ_MISORDERED:
 			dprintk("%s ERROR %d, Reset session\n", __func__,
 				task->tk_status);
-			nfs4_schedule_state_recovery(clp);
+			nfs4_schedule_session_recovery(clp->cl_session);
 			task->tk_status = 0;
 			return -EAGAIN;
 #endif /* CONFIG_NFS_V4_1 */
@@ -3461,9 +3536,8 @@
 	}
 	task->tk_status = nfs4_map_errors(task->tk_status);
 	return 0;
-do_state_recovery:
+wait_on_recovery:
 	rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
-	nfs4_schedule_state_recovery(clp);
 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
 		rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
 	task->tk_status = 0;
@@ -3478,6 +3552,7 @@
 	struct nfs4_setclientid setclientid = {
 		.sc_verifier = &sc_verifier,
 		.sc_prog = program,
+		.sc_cb_ident = clp->cl_cb_ident,
 	};
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
@@ -3517,7 +3592,7 @@
 		if (signalled())
 			break;
 		if (loop++ & 1)
-			ssleep(clp->cl_lease_time + 1);
+			ssleep(clp->cl_lease_time / HZ + 1);
 		else
 			if (++clp->cl_id_uniquifier == 0)
 				break;
@@ -3663,8 +3738,8 @@
 	data->rpc_status = 0;
 
 	task_setup_data.callback_data = data;
-	msg.rpc_argp = &data->args,
-	msg.rpc_resp = &data->res,
+	msg.rpc_argp = &data->args;
+	msg.rpc_resp = &data->res;
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
 		return PTR_ERR(task);
@@ -3743,6 +3818,7 @@
 		goto out;
 	lsp = request->fl_u.nfs4_fl.owner;
 	arg.lock_owner.id = lsp->ls_id.id;
+	arg.lock_owner.s_dev = server->s_dev;
 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
 	switch (status) {
 		case 0:
@@ -3908,8 +3984,8 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	msg.rpc_argp = &data->arg,
-	msg.rpc_resp = &data->res,
+	msg.rpc_argp = &data->arg;
+	msg.rpc_resp = &data->res;
 	task_setup_data.callback_data = data;
 	return rpc_run_task(&task_setup_data);
 }
@@ -3988,6 +4064,7 @@
 	p->arg.lock_stateid = &lsp->ls_stateid;
 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
 	p->arg.lock_owner.id = lsp->ls_id.id;
+	p->arg.lock_owner.s_dev = server->s_dev;
 	p->res.lock_seqid = p->arg.lock_seqid;
 	p->lsp = lsp;
 	p->server = server;
@@ -4071,7 +4148,7 @@
 		task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
 				data->arg.lock_seqid);
 		if (!IS_ERR(task))
-			rpc_put_task(task);
+			rpc_put_task_async(task);
 		dprintk("%s: cancelling lock!\n", __func__);
 	} else
 		nfs_free_seqid(data->arg.lock_seqid);
@@ -4095,23 +4172,18 @@
 
 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
 {
-	struct nfs_client *clp = server->nfs_client;
-	struct nfs4_state *state = lsp->ls_state;
-
 	switch (error) {
 	case -NFS4ERR_ADMIN_REVOKED:
 	case -NFS4ERR_BAD_STATEID:
-	case -NFS4ERR_EXPIRED:
+		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
 		if (new_lock_owner != 0 ||
 		   (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
-			nfs4_state_mark_reclaim_nograce(clp, state);
-		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+			nfs4_schedule_stateid_recovery(server, lsp->ls_state);
 		break;
 	case -NFS4ERR_STALE_STATEID:
-		if (new_lock_owner != 0 ||
-		    (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
-			nfs4_state_mark_reclaim_reboot(clp, state);
 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+	case -NFS4ERR_EXPIRED:
+		nfs4_schedule_lease_recovery(server->nfs_client);
 	};
 }
 
@@ -4145,8 +4217,8 @@
 			data->arg.reclaim = NFS_LOCK_RECLAIM;
 		task_setup_data.callback_ops = &nfs4_recover_lock_ops;
 	}
-	msg.rpc_argp = &data->arg,
-	msg.rpc_resp = &data->res,
+	msg.rpc_argp = &data->arg;
+	msg.rpc_resp = &data->res;
 	task_setup_data.callback_data = data;
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
@@ -4327,12 +4399,14 @@
 			case -NFS4ERR_EXPIRED:
 			case -NFS4ERR_STALE_CLIENTID:
 			case -NFS4ERR_STALE_STATEID:
+				nfs4_schedule_lease_recovery(server->nfs_client);
+				goto out;
 			case -NFS4ERR_BADSESSION:
 			case -NFS4ERR_BADSLOT:
 			case -NFS4ERR_BAD_HIGH_SLOT:
 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
 			case -NFS4ERR_DEADSESSION:
-				nfs4_schedule_state_recovery(server->nfs_client);
+				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
 				goto out;
 			case -ERESTARTSYS:
 				/*
@@ -4342,7 +4416,7 @@
 			case -NFS4ERR_ADMIN_REVOKED:
 			case -NFS4ERR_BAD_STATEID:
 			case -NFS4ERR_OPENMODE:
-				nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+				nfs4_schedule_stateid_recovery(server, state);
 				err = 0;
 				goto out;
 			case -EKEYEXPIRED:
@@ -4392,48 +4466,43 @@
 		return;
 	args->lock_owner.clientid = server->nfs_client->cl_clientid;
 	args->lock_owner.id = lsp->ls_id.id;
+	args->lock_owner.s_dev = server->s_dev;
 	msg.rpc_argp = args;
 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
 }
 
 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
 
-int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
-		size_t buflen, int flags)
+static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
+				   const void *buf, size_t buflen,
+				   int flags, int type)
 {
-	struct inode *inode = dentry->d_inode;
+	if (strcmp(key, "") != 0)
+		return -EINVAL;
 
-	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
-		return -EOPNOTSUPP;
-
-	return nfs4_proc_set_acl(inode, buf, buflen);
+	return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
 }
 
-/* The getxattr man page suggests returning -ENODATA for unknown attributes,
- * and that's what we'll do for e.g. user attributes that haven't been set.
- * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported
- * attributes in kernel-managed attribute namespaces. */
-ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf,
-		size_t buflen)
+static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
+				   void *buf, size_t buflen, int type)
 {
-	struct inode *inode = dentry->d_inode;
+	if (strcmp(key, "") != 0)
+		return -EINVAL;
 
-	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
-		return -EOPNOTSUPP;
-
-	return nfs4_proc_get_acl(inode, buf, buflen);
+	return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
 }
 
-ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen)
+static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
+				       size_t list_len, const char *name,
+				       size_t name_len, int type)
 {
-	size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1;
+	size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
 
 	if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
 		return 0;
-	if (buf && buflen < len)
-		return -ERANGE;
-	if (buf)
-		memcpy(buf, XATTR_NAME_NFSV4_ACL, len);
+
+	if (list && len <= list_len)
+		memcpy(list, XATTR_NAME_NFSV4_ACL, len);
 	return len;
 }
 
@@ -4486,6 +4555,25 @@
 
 #ifdef CONFIG_NFS_V4_1
 /*
+ * Check the exchange flags returned by the server for invalid flags, having
+ * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
+ * DS flags set.
+ */
+static int nfs4_check_cl_exchange_flags(u32 flags)
+{
+	if (flags & ~EXCHGID4_FLAG_MASK_R)
+		goto out_inval;
+	if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
+	    (flags & EXCHGID4_FLAG_USE_NON_PNFS))
+		goto out_inval;
+	if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
+		goto out_inval;
+	return NFS_OK;
+out_inval:
+	return -NFS4ERR_INVAL;
+}
+
+/*
  * nfs4_proc_exchange_id()
  *
  * Since the clientid has expired, all compounds using sessions
@@ -4498,7 +4586,7 @@
 	nfs4_verifier verifier;
 	struct nfs41_exchange_id_args args = {
 		.client = clp,
-		.flags = clp->cl_exchange_flags,
+		.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
 	};
 	struct nfs41_exchange_id_res res = {
 		.client = clp,
@@ -4515,34 +4603,21 @@
 	dprintk("--> %s\n", __func__);
 	BUG_ON(clp == NULL);
 
-	/* Remove server-only flags */
-	args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R;
-
 	p = (u32 *)verifier.data;
 	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
 	args.verifier = &verifier;
 
-	while (1) {
-		args.id_len = scnprintf(args.id, sizeof(args.id),
-					"%s/%s %u",
-					clp->cl_ipaddr,
-					rpc_peeraddr2str(clp->cl_rpcclient,
-							 RPC_DISPLAY_ADDR),
-					clp->cl_id_uniquifier);
+	args.id_len = scnprintf(args.id, sizeof(args.id),
+				"%s/%s.%s/%u",
+				clp->cl_ipaddr,
+				init_utsname()->nodename,
+				init_utsname()->domainname,
+				clp->cl_rpcclient->cl_auth->au_flavor);
 
-		status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
-
-		if (status != -NFS4ERR_CLID_INUSE)
-			break;
-
-		if (signalled())
-			break;
-
-		if (++clp->cl_id_uniquifier == 0)
-			break;
-	}
-
+	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+	if (!status)
+		status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
 	dprintk("<-- %s status= %d\n", __func__, status);
 	return status;
 }
@@ -4776,17 +4851,17 @@
 	if (!session)
 		return NULL;
 
-	init_completion(&session->complete);
-
 	tbl = &session->fc_slot_table;
 	tbl->highest_used_slotid = -1;
 	spin_lock_init(&tbl->slot_tbl_lock);
 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+	init_completion(&tbl->complete);
 
 	tbl = &session->bc_slot_table;
 	tbl->highest_used_slotid = -1;
 	spin_lock_init(&tbl->slot_tbl_lock);
 	rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+	init_completion(&tbl->complete);
 
 	session->session_state = 1<<NFS4_SESSION_INITING;
 
@@ -4948,10 +5023,20 @@
 	int status;
 	unsigned *ptr;
 	struct nfs4_session *session = clp->cl_session;
+	long timeout = 0;
+	int err;
 
 	dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
 
-	status = _nfs4_proc_create_session(clp);
+	do {
+		status = _nfs4_proc_create_session(clp);
+		if (status == -NFS4ERR_DELAY) {
+			err = nfs4_delay(clp->cl_rpcclient, &timeout);
+			if (err)
+				status = err;
+		}
+	} while (status == -NFS4ERR_DELAY);
+
 	if (status)
 		goto out;
 
@@ -5060,7 +5145,7 @@
 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
 		return -EAGAIN;
 	default:
-		nfs4_schedule_state_recovery(clp);
+		nfs4_schedule_lease_recovery(clp);
 	}
 	return 0;
 }
@@ -5147,7 +5232,7 @@
 	if (IS_ERR(task))
 		ret = PTR_ERR(task);
 	else
-		rpc_put_task(task);
+		rpc_put_task_async(task);
 	dprintk("<-- %s status=%d\n", __func__, ret);
 	return ret;
 }
@@ -5163,8 +5248,13 @@
 		goto out;
 	}
 	ret = rpc_wait_for_completion_task(task);
-	if (!ret)
+	if (!ret) {
+		struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
+
+		if (task->tk_status == 0)
+			nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
 		ret = task->tk_status;
+	}
 	rpc_put_task(task);
 out:
 	dprintk("<-- %s status=%d\n", __func__, ret);
@@ -5201,7 +5291,7 @@
 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
 		return -EAGAIN;
 	default:
-		nfs4_schedule_state_recovery(clp);
+		nfs4_schedule_lease_recovery(clp);
 	}
 	return 0;
 }
@@ -5269,6 +5359,9 @@
 		status = PTR_ERR(task);
 		goto out;
 	}
+	status = nfs4_wait_for_completion_rpc_task(task);
+	if (status == 0)
+		status = task->tk_status;
 	rpc_put_task(task);
 	return 0;
 out:
@@ -5280,13 +5373,23 @@
 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
 {
 	struct nfs4_layoutget *lgp = calldata;
-	struct inode *ino = lgp->args.inode;
-	struct nfs_server *server = NFS_SERVER(ino);
+	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
 
 	dprintk("--> %s\n", __func__);
+	/* Note the is a race here, where a CB_LAYOUTRECALL can come in
+	 * right now covering the LAYOUTGET we are about to send.
+	 * However, that is not so catastrophic, and there seems
+	 * to be no way to prevent it completely.
+	 */
 	if (nfs4_setup_sequence(server, &lgp->args.seq_args,
 				&lgp->res.seq_res, 0, task))
 		return;
+	if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
+					  NFS_I(lgp->args.inode)->layout,
+					  lgp->args.ctx->state)) {
+		rpc_exit(task, NFS4_OK);
+		return;
+	}
 	rpc_call_start(task);
 }
 
@@ -5313,7 +5416,6 @@
 			return;
 		}
 	}
-	lgp->status = task->tk_status;
 	dprintk("<-- %s\n", __func__);
 }
 
@@ -5322,7 +5424,6 @@
 	struct nfs4_layoutget *lgp = calldata;
 
 	dprintk("--> %s\n", __func__);
-	put_layout_hdr(lgp->args.inode);
 	if (lgp->res.layout.buf != NULL)
 		free_page((unsigned long) lgp->res.layout.buf);
 	put_nfs_open_context(lgp->args.ctx);
@@ -5367,13 +5468,10 @@
 	if (IS_ERR(task))
 		return PTR_ERR(task);
 	status = nfs4_wait_for_completion_rpc_task(task);
-	if (status != 0)
-		goto out;
-	status = lgp->status;
-	if (status != 0)
-		goto out;
-	status = pnfs_layout_process(lgp);
-out:
+	if (status == 0)
+		status = task->tk_status;
+	if (status == 0)
+		status = pnfs_layout_process(lgp);
 	rpc_put_task(task);
 	dprintk("<-- %s status=%d\n", __func__, status);
 	return status;
@@ -5504,9 +5602,10 @@
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
-	.getxattr	= nfs4_getxattr,
-	.setxattr	= nfs4_setxattr,
-	.listxattr	= nfs4_listxattr,
+	.getxattr	= generic_getxattr,
+	.setxattr	= generic_setxattr,
+	.listxattr	= generic_listxattr,
+	.removexattr	= generic_removexattr,
 };
 
 const struct nfs_rpc_ops nfs_v4_clientops = {
@@ -5551,6 +5650,18 @@
 	.open_context	= nfs4_atomic_open,
 };
 
+static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
+	.prefix	= XATTR_NAME_NFSV4_ACL,
+	.list	= nfs4_xattr_list_nfs4_acl,
+	.get	= nfs4_xattr_get_nfs4_acl,
+	.set	= nfs4_xattr_set_nfs4_acl,
+};
+
+const struct xattr_handler *nfs4_xattr_handlers[] = {
+	&nfs4_xattr_nfs4_acl_handler,
+	NULL
+};
+
 /*
  * Local variables:
  *  c-basic-offset: 8
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 72b6c58..402143d 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -63,9 +63,14 @@
 
 	ops = clp->cl_mvops->state_renewal_ops;
 	dprintk("%s: start\n", __func__);
-	/* Are there any active superblocks? */
-	if (list_empty(&clp->cl_superblocks))
+
+	rcu_read_lock();
+	if (list_empty(&clp->cl_superblocks)) {
+		rcu_read_unlock();
 		goto out;
+	}
+	rcu_read_unlock();
+
 	spin_lock(&clp->cl_lock);
 	lease = clp->cl_lease_time;
 	last = clp->cl_last_renewal;
@@ -75,7 +80,7 @@
 		cred = ops->get_state_renewal_cred_locked(clp);
 		spin_unlock(&clp->cl_lock);
 		if (cred == NULL) {
-			if (list_empty(&clp->cl_delegations)) {
+			if (!nfs_delegations_present(clp)) {
 				set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
 				goto out;
 			}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f575a31..0592288 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -105,14 +105,17 @@
 		put_rpccred(cred);
 }
 
-struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_renew_cred_server_locked(struct nfs_server *server)
 {
+	struct rpc_cred *cred = NULL;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
-	struct rpc_cred *cred = NULL;
 
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		if (list_empty(&sp->so_states))
 			continue;
 		cred = get_rpccred(sp->so_cred);
@@ -121,6 +124,28 @@
 	return cred;
 }
 
+/**
+ * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ * Caller must hold clp->cl_lock.
+ */
+struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+{
+	struct rpc_cred *cred = NULL;
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		cred = nfs4_get_renew_cred_server_locked(server);
+		if (cred != NULL)
+			break;
+	}
+	rcu_read_unlock();
+	return cred;
+}
+
 #if defined(CONFIG_NFS_V4_1)
 
 static int nfs41_setup_state_renewal(struct nfs_client *clp)
@@ -142,6 +167,11 @@
 	return status;
 }
 
+/*
+ * Back channel returns NFS4ERR_DELAY for new requests when
+ * NFS4_SESSION_DRAINING is set so there is no work to be done when draining
+ * is ended.
+ */
 static void nfs4_end_drain_session(struct nfs_client *clp)
 {
 	struct nfs4_session *ses = clp->cl_session;
@@ -165,22 +195,32 @@
 	}
 }
 
-static int nfs4_begin_drain_session(struct nfs_client *clp)
+static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
 {
-	struct nfs4_session *ses = clp->cl_session;
-	struct nfs4_slot_table *tbl = &ses->fc_slot_table;
-
 	spin_lock(&tbl->slot_tbl_lock);
-	set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
 	if (tbl->highest_used_slotid != -1) {
-		INIT_COMPLETION(ses->complete);
+		INIT_COMPLETION(tbl->complete);
 		spin_unlock(&tbl->slot_tbl_lock);
-		return wait_for_completion_interruptible(&ses->complete);
+		return wait_for_completion_interruptible(&tbl->complete);
 	}
 	spin_unlock(&tbl->slot_tbl_lock);
 	return 0;
 }
 
+static int nfs4_begin_drain_session(struct nfs_client *clp)
+{
+	struct nfs4_session *ses = clp->cl_session;
+	int ret = 0;
+
+	set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
+	/* back channel */
+	ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
+	if (ret)
+		return ret;
+	/* fore channel */
+	return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
+}
+
 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
 {
 	int status;
@@ -210,28 +250,56 @@
 
 #endif /* CONFIG_NFS_V4_1 */
 
-struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_setclientid_cred_server(struct nfs_server *server)
 {
+	struct nfs_client *clp = server->nfs_client;
+	struct rpc_cred *cred = NULL;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
-	struct rpc_cred *cred;
 
 	spin_lock(&clp->cl_lock);
-	cred = nfs4_get_machine_cred_locked(clp);
-	if (cred != NULL)
-		goto out;
-	pos = rb_first(&clp->cl_state_owners);
+	pos = rb_first(&server->state_owners);
 	if (pos != NULL) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		cred = get_rpccred(sp->so_cred);
 	}
-out:
 	spin_unlock(&clp->cl_lock);
 	return cred;
 }
 
-static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
-		__u64 minval, int maxbits)
+/**
+ * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+	struct rpc_cred *cred;
+
+	spin_lock(&clp->cl_lock);
+	cred = nfs4_get_machine_cred_locked(clp);
+	spin_unlock(&clp->cl_lock);
+	if (cred != NULL)
+		goto out;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		cred = nfs4_get_setclientid_cred_server(server);
+		if (cred != NULL)
+			break;
+	}
+	rcu_read_unlock();
+
+out:
+	return cred;
+}
+
+static void nfs_alloc_unique_id_locked(struct rb_root *root,
+				       struct nfs_unique_id *new,
+				       __u64 minval, int maxbits)
 {
 	struct rb_node **p, *parent;
 	struct nfs_unique_id *pos;
@@ -286,16 +354,15 @@
 }
 
 static struct nfs4_state_owner *
-nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
 {
-	struct nfs_client *clp = server->nfs_client;
-	struct rb_node **p = &clp->cl_state_owners.rb_node,
+	struct rb_node **p = &server->state_owners.rb_node,
 		       *parent = NULL;
 	struct nfs4_state_owner *sp, *res = NULL;
 
 	while (*p != NULL) {
 		parent = *p;
-		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 
 		if (server < sp->so_server) {
 			p = &parent->rb_left;
@@ -319,24 +386,17 @@
 }
 
 static struct nfs4_state_owner *
-nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
+nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
 {
-	struct rb_node **p = &clp->cl_state_owners.rb_node,
+	struct nfs_server *server = new->so_server;
+	struct rb_node **p = &server->state_owners.rb_node,
 		       *parent = NULL;
 	struct nfs4_state_owner *sp;
 
 	while (*p != NULL) {
 		parent = *p;
-		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 
-		if (new->so_server < sp->so_server) {
-			p = &parent->rb_left;
-			continue;
-		}
-		if (new->so_server > sp->so_server) {
-			p = &parent->rb_right;
-			continue;
-		}
 		if (new->so_cred < sp->so_cred)
 			p = &parent->rb_left;
 		else if (new->so_cred > sp->so_cred)
@@ -346,18 +406,21 @@
 			return sp;
 		}
 	}
-	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
-	rb_link_node(&new->so_client_node, parent, p);
-	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
+	nfs_alloc_unique_id_locked(&server->openowner_id,
+					&new->so_owner_id, 1, 64);
+	rb_link_node(&new->so_server_node, parent, p);
+	rb_insert_color(&new->so_server_node, &server->state_owners);
 	return new;
 }
 
 static void
-nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
+nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
 {
-	if (!RB_EMPTY_NODE(&sp->so_client_node))
-		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
-	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
+	struct nfs_server *server = sp->so_server;
+
+	if (!RB_EMPTY_NODE(&sp->so_server_node))
+		rb_erase(&sp->so_server_node, &server->state_owners);
+	nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id);
 }
 
 /*
@@ -386,23 +449,32 @@
 static void
 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
 {
-	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
-		struct nfs_client *clp = sp->so_server->nfs_client;
+	if (!RB_EMPTY_NODE(&sp->so_server_node)) {
+		struct nfs_server *server = sp->so_server;
+		struct nfs_client *clp = server->nfs_client;
 
 		spin_lock(&clp->cl_lock);
-		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
-		RB_CLEAR_NODE(&sp->so_client_node);
+		rb_erase(&sp->so_server_node, &server->state_owners);
+		RB_CLEAR_NODE(&sp->so_server_node);
 		spin_unlock(&clp->cl_lock);
 	}
 }
 
-struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+/**
+ * nfs4_get_state_owner - Look up a state owner given a credential
+ * @server: nfs_server to search
+ * @cred: RPC credential to match
+ *
+ * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
+ */
+struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
+					      struct rpc_cred *cred)
 {
 	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp, *new;
 
 	spin_lock(&clp->cl_lock);
-	sp = nfs4_find_state_owner(server, cred);
+	sp = nfs4_find_state_owner_locked(server, cred);
 	spin_unlock(&clp->cl_lock);
 	if (sp != NULL)
 		return sp;
@@ -412,7 +484,7 @@
 	new->so_server = server;
 	new->so_cred = cred;
 	spin_lock(&clp->cl_lock);
-	sp = nfs4_insert_state_owner(clp, new);
+	sp = nfs4_insert_state_owner_locked(new);
 	spin_unlock(&clp->cl_lock);
 	if (sp == new)
 		get_rpccred(cred);
@@ -423,6 +495,11 @@
 	return sp;
 }
 
+/**
+ * nfs4_put_state_owner - Release a nfs4_state_owner
+ * @sp: state owner data to release
+ *
+ */
 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 {
 	struct nfs_client *clp = sp->so_server->nfs_client;
@@ -430,7 +507,7 @@
 
 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
 		return;
-	nfs4_remove_state_owner(clp, sp);
+	nfs4_remove_state_owner_locked(sp);
 	spin_unlock(&clp->cl_lock);
 	rpc_destroy_wait_queue(&sp->so_sequence.wait);
 	put_rpccred(cred);
@@ -585,8 +662,11 @@
 	if (!call_close) {
 		nfs4_put_open_state(state);
 		nfs4_put_state_owner(owner);
-	} else
-		nfs4_do_close(path, state, gfp_mask, wait);
+	} else {
+		bool roc = pnfs_roc(state->inode);
+
+		nfs4_do_close(path, state, gfp_mask, wait, roc);
+	}
 }
 
 void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
@@ -633,7 +713,8 @@
 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
 {
 	struct nfs4_lock_state *lsp;
-	struct nfs_client *clp = state->owner->so_server->nfs_client;
+	struct nfs_server *server = state->owner->so_server;
+	struct nfs_client *clp = server->nfs_client;
 
 	lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
 	if (lsp == NULL)
@@ -657,7 +738,7 @@
 		return NULL;
 	}
 	spin_lock(&clp->cl_lock);
-	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
+	nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64);
 	spin_unlock(&clp->cl_lock);
 	INIT_LIST_HEAD(&lsp->ls_locks);
 	return lsp;
@@ -665,10 +746,11 @@
 
 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
 {
-	struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
+	struct nfs_server *server = lsp->ls_state->owner->so_server;
+	struct nfs_client *clp = server->nfs_client;
 
 	spin_lock(&clp->cl_lock);
-	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
+	nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id);
 	spin_unlock(&clp->cl_lock);
 	rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
 	kfree(lsp);
@@ -925,9 +1007,9 @@
 }
 
 /*
- * Schedule a state recovery attempt
+ * Schedule a lease recovery attempt
  */
-void nfs4_schedule_state_recovery(struct nfs_client *clp)
+void nfs4_schedule_lease_recovery(struct nfs_client *clp)
 {
 	if (!clp)
 		return;
@@ -936,7 +1018,7 @@
 	nfs4_schedule_state_manager(clp);
 }
 
-int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
+static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
 {
 
 	set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
@@ -950,7 +1032,7 @@
 	return 1;
 }
 
-int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
+static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
 {
 	set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
 	clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
@@ -959,6 +1041,14 @@
 	return 1;
 }
 
+void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
+{
+	struct nfs_client *clp = server->nfs_client;
+
+	nfs4_state_mark_reclaim_nograce(clp, state);
+	nfs4_schedule_state_manager(clp);
+}
+
 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
 {
 	struct inode *inode = state->inode;
@@ -1114,15 +1204,19 @@
 	}
 }
 
-static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+static void nfs4_reset_seqids(struct nfs_server *server,
+	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
 {
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
 	struct nfs4_state *state;
 
-	/* Reset all sequence ids to zero */
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	spin_lock(&clp->cl_lock);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		sp->so_seqid.flags = 0;
 		spin_lock(&sp->so_lock);
 		list_for_each_entry(state, &sp->so_states, open_states) {
@@ -1131,6 +1225,18 @@
 		}
 		spin_unlock(&sp->so_lock);
 	}
+	spin_unlock(&clp->cl_lock);
+}
+
+static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
+	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+{
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs4_reset_seqids(server, mark_reclaim);
+	rcu_read_unlock();
 }
 
 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
@@ -1148,25 +1254,41 @@
 		(void)ops->reclaim_complete(clp);
 }
 
-static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+static void nfs4_clear_reclaim_server(struct nfs_server *server)
 {
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
 	struct nfs4_state *state;
 
-	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
-		return 0;
-
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	spin_lock(&clp->cl_lock);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		spin_lock(&sp->so_lock);
 		list_for_each_entry(state, &sp->so_states, open_states) {
-			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
+			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
+						&state->flags))
 				continue;
 			nfs4_state_mark_reclaim_nograce(clp, state);
 		}
 		spin_unlock(&sp->so_lock);
 	}
+	spin_unlock(&clp->cl_lock);
+}
+
+static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+
+	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+		return 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs4_clear_reclaim_server(server);
+	rcu_read_unlock();
 
 	nfs_delegation_reap_unclaimed(clp);
 	return 1;
@@ -1238,27 +1360,40 @@
 
 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
 {
+	struct nfs4_state_owner *sp;
+	struct nfs_server *server;
 	struct rb_node *pos;
 	int status = 0;
 
 restart:
-	spin_lock(&clp->cl_lock);
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
-		if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
-			continue;
-		atomic_inc(&sp->so_count);
-		spin_unlock(&clp->cl_lock);
-		status = nfs4_reclaim_open_state(sp, ops);
-		if (status < 0) {
-			set_bit(ops->owner_flag_bit, &sp->so_flags);
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		spin_lock(&clp->cl_lock);
+		for (pos = rb_first(&server->state_owners);
+		     pos != NULL;
+		     pos = rb_next(pos)) {
+			sp = rb_entry(pos,
+				struct nfs4_state_owner, so_server_node);
+			if (!test_and_clear_bit(ops->owner_flag_bit,
+							&sp->so_flags))
+				continue;
+			atomic_inc(&sp->so_count);
+			spin_unlock(&clp->cl_lock);
+			rcu_read_unlock();
+
+			status = nfs4_reclaim_open_state(sp, ops);
+			if (status < 0) {
+				set_bit(ops->owner_flag_bit, &sp->so_flags);
+				nfs4_put_state_owner(sp);
+				return nfs4_recovery_handle_error(clp, status);
+			}
+
 			nfs4_put_state_owner(sp);
-			return nfs4_recovery_handle_error(clp, status);
+			goto restart;
 		}
-		nfs4_put_state_owner(sp);
-		goto restart;
+		spin_unlock(&clp->cl_lock);
 	}
-	spin_unlock(&clp->cl_lock);
+	rcu_read_unlock();
 	return status;
 }
 
@@ -1309,10 +1444,15 @@
 }
 
 #ifdef CONFIG_NFS_V4_1
+void nfs4_schedule_session_recovery(struct nfs4_session *session)
+{
+	nfs4_schedule_lease_recovery(session->clp);
+}
+
 void nfs41_handle_recall_slot(struct nfs_client *clp)
 {
 	set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
-	nfs4_schedule_state_recovery(clp);
+	nfs4_schedule_state_manager(clp);
 }
 
 static void nfs4_reset_all_state(struct nfs_client *clp)
@@ -1320,7 +1460,7 @@
 	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
 		clp->cl_boot_time = CURRENT_TIME;
 		nfs4_state_start_reclaim_nograce(clp);
-		nfs4_schedule_state_recovery(clp);
+		nfs4_schedule_state_manager(clp);
 	}
 }
 
@@ -1328,7 +1468,7 @@
 {
 	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
 		nfs4_state_start_reclaim_reboot(clp);
-		nfs4_schedule_state_recovery(clp);
+		nfs4_schedule_state_manager(clp);
 	}
 }
 
@@ -1348,7 +1488,7 @@
 {
 	nfs_expire_all_delegations(clp);
 	if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
-		nfs4_schedule_state_recovery(clp);
+		nfs4_schedule_state_manager(clp);
 }
 
 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 9f1826b..94d50e8 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -71,8 +71,8 @@
 /* lock,open owner id:
  * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT  >> 2)
  */
-#define open_owner_id_maxsz	(1 + 4)
-#define lock_owner_id_maxsz	(1 + 4)
+#define open_owner_id_maxsz	(1 + 1 + 4)
+#define lock_owner_id_maxsz	(1 + 1 + 4)
 #define decode_lockowner_maxsz	(1 + XDR_QUADLEN(IDMAP_NAMESZ))
 #define compound_encode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
 #define compound_decode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
@@ -1088,10 +1088,11 @@
 {
 	__be32 *p;
 
-	p = reserve_space(xdr, 28);
+	p = reserve_space(xdr, 32);
 	p = xdr_encode_hyper(p, lowner->clientid);
-	*p++ = cpu_to_be32(16);
+	*p++ = cpu_to_be32(20);
 	p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+	*p++ = cpu_to_be32(lowner->s_dev);
 	xdr_encode_hyper(p, lowner->id);
 }
 
@@ -1210,10 +1211,11 @@
 	*p++ = cpu_to_be32(OP_OPEN);
 	*p = cpu_to_be32(arg->seqid->sequence->counter);
 	encode_share_access(xdr, arg->fmode);
-	p = reserve_space(xdr, 28);
+	p = reserve_space(xdr, 32);
 	p = xdr_encode_hyper(p, arg->clientid);
-	*p++ = cpu_to_be32(16);
+	*p++ = cpu_to_be32(20);
 	p = xdr_encode_opaque_fixed(p, "open id:", 8);
+	*p++ = cpu_to_be32(arg->server->s_dev);
 	xdr_encode_hyper(p, arg->id);
 }
 
@@ -1510,7 +1512,7 @@
 	hdr->replen += decode_restorefh_maxsz;
 }
 
-static int
+static void
 encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr)
 {
 	__be32 *p;
@@ -1521,14 +1523,12 @@
 	p = reserve_space(xdr, 2*4);
 	*p++ = cpu_to_be32(1);
 	*p = cpu_to_be32(FATTR4_WORD0_ACL);
-	if (arg->acl_len % 4)
-		return -EINVAL;
+	BUG_ON(arg->acl_len % 4);
 	p = reserve_space(xdr, 4);
 	*p = cpu_to_be32(arg->acl_len);
 	xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
 	hdr->nops++;
 	hdr->replen += decode_setacl_maxsz;
-	return 0;
 }
 
 static void
@@ -1660,7 +1660,7 @@
 
 	p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12);
 	*p++ = cpu_to_be32(OP_CREATE_SESSION);
-	p = xdr_encode_hyper(p, clp->cl_ex_clid);
+	p = xdr_encode_hyper(p, clp->cl_clientid);
 	*p++ = cpu_to_be32(clp->cl_seqid);			/*Sequence id */
 	*p++ = cpu_to_be32(args->flags);			/*flags */
 
@@ -1789,7 +1789,6 @@
 		      const struct nfs4_layoutget_args *args,
 		      struct compound_hdr *hdr)
 {
-	nfs4_stateid stateid;
 	__be32 *p;
 
 	p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
@@ -1800,9 +1799,7 @@
 	p = xdr_encode_hyper(p, args->range.offset);
 	p = xdr_encode_hyper(p, args->range.length);
 	p = xdr_encode_hyper(p, args->minlength);
-	pnfs_get_layout_stateid(&stateid, NFS_I(args->inode)->layout,
-				args->ctx->state);
-	p = xdr_encode_opaque_fixed(p, &stateid.data, NFS4_STATEID_SIZE);
+	p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE);
 	*p = cpu_to_be32(args->maxcount);
 
 	dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n",
@@ -1833,393 +1830,362 @@
 /*
  * Encode an ACCESS request
  */
-static int nfs4_xdr_enc_access(struct rpc_rqst *req, __be32 *p, const struct nfs4_accessargs *args)
+static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_accessargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_access(&xdr, args->access, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_access(xdr, args->access, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode LOOKUP request
  */
-static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_arg *args)
+static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_lookup_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_lookup(&xdr, args->name, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_lookup(xdr, args->name, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode LOOKUP_ROOT request
  */
-static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_root_arg *args)
+static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs4_lookup_root_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putrootfh(&xdr, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putrootfh(xdr, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode REMOVE request
  */
-static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
+static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs_removeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_remove(&xdr, &args->name, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_remove(xdr, &args->name, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode RENAME request
  */
-static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs_renameargs *args)
+static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs_renameargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->old_dir, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_putfh(&xdr, args->new_dir, &hdr);
-	encode_rename(&xdr, args->old_name, args->new_name, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->old_dir, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_putfh(xdr, args->new_dir, &hdr);
+	encode_rename(xdr, args->old_name, args->new_name, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode LINK request
  */
-static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_link_arg *args)
+static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     const struct nfs4_link_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_link(&xdr, args->name, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_link(xdr, args->name, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode CREATE request
  */
-static int nfs4_xdr_enc_create(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args)
+static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_create_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_create(&xdr, args, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_create(xdr, args, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode SYMLINK request
  */
-static int nfs4_xdr_enc_symlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args)
+static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 const struct nfs4_create_arg *args)
 {
-	return nfs4_xdr_enc_create(req, p, args);
+	nfs4_xdr_enc_create(req, xdr, args);
 }
 
 /*
  * Encode GETATTR request
  */
-static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nfs4_getattr_arg *args)
+static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 const struct nfs4_getattr_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a CLOSE request
  */
-static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args)
+static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_closeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_close(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_close(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN request
  */
-static int nfs4_xdr_enc_open(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args)
+static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      struct nfs_openargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_open(&xdr, args, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_open(xdr, args, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN_CONFIRM request
  */
-static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_open_confirmargs *args)
+static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      struct nfs_open_confirmargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops   = 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_open_confirm(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_open_confirm(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN request with no attributes.
  */
-static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args)
+static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs_openargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_open(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_open(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN_DOWNGRADE request
  */
-static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args)
+static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					struct nfs_closeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_open_downgrade(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_open_downgrade(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a LOCK request
  */
-static int nfs4_xdr_enc_lock(struct rpc_rqst *req, __be32 *p, struct nfs_lock_args *args)
+static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      struct nfs_lock_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_lock(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_lock(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a LOCKT request
  */
-static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, __be32 *p, struct nfs_lockt_args *args)
+static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_lockt_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_lockt(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_lockt(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a LOCKU request
  */
-static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_args *args)
+static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_locku_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_locku(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_locku(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
-static int nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, __be32 *p, struct nfs_release_lockowner_args *args)
+static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
+					   struct xdr_stream *xdr,
+					struct nfs_release_lockowner_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_release_lockowner(&xdr, &args->lock_owner, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_release_lockowner(xdr, &args->lock_owner, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a READLINK request
  */
-static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_readlink *args)
+static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  const struct nfs4_readlink *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_readlink(&xdr, args, req, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_readlink(xdr, args, req, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
 			args->pgbase, args->pglen);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a READDIR request
  */
-static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nfs4_readdir_arg *args)
+static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 const struct nfs4_readdir_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_readdir(&xdr, args, req, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_readdir(xdr, args, req, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
 			 args->pgbase, args->count);
@@ -2227,428 +2193,387 @@
 			__func__, hdr.replen << 2, args->pages,
 			args->pgbase, args->count);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a READ request
  */
-static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
+static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      struct nfs_readargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_read(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_read(xdr, args, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
 			 args->pages, args->pgbase, args->count);
 	req->rq_rcv_buf.flags |= XDRBUF_READ;
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an SETATTR request
  */
-static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_setattrargs *args)
+static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_setattrargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_setattr(&xdr, args, args->server, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_setattr(xdr, args, args->server, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a GETACL request
  */
-static int
-nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p,
-		struct nfs_getaclargs *args)
+static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_getaclargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 	uint32_t replen;
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
 	replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
-	encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
+	encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
 		args->acl_pages, args->acl_pgbase, args->acl_len);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a WRITE request
  */
-static int nfs4_xdr_enc_write(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_writeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_write(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_write(xdr, args, &hdr);
 	req->rq_snd_buf.flags |= XDRBUF_WRITE;
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  *  a COMMIT request
  */
-static int nfs4_xdr_enc_commit(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_writeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_commit(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_commit(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * FSINFO request
  */
-static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs4_fsinfo_arg *args)
+static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs4_fsinfo_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_fsinfo(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_fsinfo(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a PATHCONF request
  */
-static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, __be32 *p, const struct nfs4_pathconf_arg *args)
+static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  const struct nfs4_pathconf_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
 			   &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a STATFS request
  */
-static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs4_statfs_arg *args)
+static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_statfs_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
 			   args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * GETATTR_BITMAP request
  */
-static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p,
-				    struct nfs4_server_caps_arg *args)
+static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs4_server_caps_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fhandle, &hdr);
-	encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fhandle, &hdr);
+	encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
 			   FATTR4_WORD0_LINK_SUPPORT|
 			   FATTR4_WORD0_SYMLINK_SUPPORT|
 			   FATTR4_WORD0_ACLSUPPORT, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a RENEW request
  */
-static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp)
+static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_client *clp)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_renew(&xdr, clp, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_renew(xdr, clp, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a SETCLIENTID request
  */
-static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid *sc)
+static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs4_setclientid *sc)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_setclientid(&xdr, sc, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_setclientid(xdr, sc, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a SETCLIENTID_CONFIRM request
  */
-static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid_res *arg)
+static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
+					     struct xdr_stream *xdr,
+					     struct nfs4_setclientid_res *arg)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
 	const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_setclientid_confirm(&xdr, arg, &hdr);
-	encode_putrootfh(&xdr, &hdr);
-	encode_fsinfo(&xdr, lease_bitmap, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_setclientid_confirm(xdr, arg, &hdr);
+	encode_putrootfh(xdr, &hdr);
+	encode_fsinfo(xdr, lease_bitmap, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * DELEGRETURN request
  */
-static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, __be32 *p, const struct nfs4_delegreturnargs *args)
+static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs4_delegreturnargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fhandle, &hdr);
-	encode_delegreturn(&xdr, args->stateid, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fhandle, &hdr);
+	encode_delegreturn(xdr, args->stateid, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode FS_LOCATIONS request
  */
-static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs4_fs_locations_arg *args)
+static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      struct nfs4_fs_locations_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 	uint32_t replen;
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_lookup(&xdr, args->name, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_lookup(xdr, args->name, &hdr);
 	replen = hdr.replen;	/* get the attribute into args->page */
-	encode_fs_locations(&xdr, args->bitmask, &hdr);
+	encode_fs_locations(xdr, args->bitmask, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
 			0, PAGE_SIZE);
 	encode_nops(&hdr);
-	return 0;
 }
 
 #if defined(CONFIG_NFS_V4_1)
 /*
  * EXCHANGE_ID request
  */
-static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
-				    struct nfs41_exchange_id_args *args)
+static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs41_exchange_id_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = args->client->cl_mvops->minor_version,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_exchange_id(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_exchange_id(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a CREATE_SESSION request
  */
-static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
-				       struct nfs41_create_session_args *args)
+static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					struct nfs41_create_session_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = args->client->cl_mvops->minor_version,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_create_session(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_create_session(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a DESTROY_SESSION request
  */
-static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
-					struct nfs4_session *session)
+static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
+					 struct xdr_stream *xdr,
+					 struct nfs4_session *session)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = session->clp->cl_mvops->minor_version,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_destroy_session(&xdr, session, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_destroy_session(xdr, session, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a SEQUENCE request
  */
-static int nfs4_xdr_enc_sequence(struct rpc_rqst *req, uint32_t *p,
-				 struct nfs4_sequence_args *args)
+static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  struct nfs4_sequence_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a GET_LEASE_TIME request
  */
-static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
-				       struct nfs4_get_lease_time_args *args)
+static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					struct nfs4_get_lease_time_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
 	};
 	const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->la_seq_args, &hdr);
-	encode_putrootfh(&xdr, &hdr);
-	encode_fsinfo(&xdr, lease_bitmap, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->la_seq_args, &hdr);
+	encode_putrootfh(xdr, &hdr);
+	encode_fsinfo(xdr, lease_bitmap, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a RECLAIM_COMPLETE request
  */
-static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p,
-				     struct nfs41_reclaim_complete_args *args)
+static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
+					  struct xdr_stream *xdr,
+				struct nfs41_reclaim_complete_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args)
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_reclaim_complete(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_reclaim_complete(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode GETDEVICEINFO request
  */
-static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p,
-				      struct nfs4_getdeviceinfo_args *args)
+static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
+				       struct xdr_stream *xdr,
+				       struct nfs4_getdeviceinfo_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_getdeviceinfo(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_getdeviceinfo(xdr, args, &hdr);
 
 	/* set up reply kvec. Subtract notification bitmap max size (2)
 	 * so that notification bitmap is put in xdr_buf tail */
@@ -2657,27 +2582,24 @@
 			 args->pdev->pglen);
 
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  *  Encode LAYOUTGET request
  */
-static int nfs4_xdr_enc_layoutget(struct rpc_rqst *req, uint32_t *p,
-				  struct nfs4_layoutget_args *args)
+static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs4_layoutget_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, NFS_FH(args->inode), &hdr);
-	encode_layoutget(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+	encode_layoutget(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 #endif /* CONFIG_NFS_V4_1 */
 
@@ -4475,7 +4397,7 @@
 		goto out_overflow;
 	eof = be32_to_cpup(p++);
 	count = be32_to_cpup(p);
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
 	recvd = req->rq_rcv_buf.len - hdrlen;
 	if (count > recvd) {
 		dprintk("NFS: server cheating in read reply: "
@@ -4772,7 +4694,7 @@
 	p = xdr_inline_decode(xdr, 8);
 	if (unlikely(!p))
 		goto out_overflow;
-	xdr_decode_hyper(p, &clp->cl_ex_clid);
+	xdr_decode_hyper(p, &clp->cl_clientid);
 	p = xdr_inline_decode(xdr, 12);
 	if (unlikely(!p))
 		goto out_overflow;
@@ -5000,7 +4922,7 @@
 		goto out_overflow;
 	len = be32_to_cpup(p);
 	if (len) {
-		int i;
+		uint32_t i;
 
 		p = xdr_inline_decode(xdr, 4 * len);
 		if (unlikely(!p))
@@ -5090,26 +5012,26 @@
 /*
  * Decode OPEN_DOWNGRADE response
  */
-static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res)
+static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
+				       struct xdr_stream *xdr,
+				       struct nfs_closeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_open_downgrade(&xdr, res);
+	status = decode_open_downgrade(xdr, res);
 	if (status != 0)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5118,26 +5040,25 @@
 /*
  * Decode ACCESS response
  */
-static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_accessres *res)
+static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs4_accessres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status != 0)
 		goto out;
-	status = decode_access(&xdr, res);
+	status = decode_access(xdr, res);
 	if (status != 0)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5146,26 +5067,28 @@
 /*
  * Decode LOOKUP response
  */
-static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res)
+static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs4_lookup_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_lookup(&xdr)) != 0)
+	status = decode_lookup(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_getfh(&xdr, res->fh)) != 0)
+	status = decode_getfh(xdr, res->fh);
+	if (status)
 		goto out;
-	status = decode_getfattr(&xdr, res->fattr, res->server
+	status = decode_getfattr(xdr, res->fattr, res->server
 			,!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5174,23 +5097,25 @@
 /*
  * Decode LOOKUP_ROOT response
  */
-static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res)
+static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
+				    struct nfs4_lookup_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putrootfh(&xdr)) != 0)
+	status = decode_putrootfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_getfh(&xdr, res->fh)) == 0)
-		status = decode_getfattr(&xdr, res->fattr, res->server,
+	status = decode_getfh(xdr, res->fh);
+	if (status == 0)
+		status = decode_getfattr(xdr, res->fattr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5199,24 +5124,25 @@
 /*
  * Decode REMOVE response
  */
-static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_removeres *res)
+static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs_removeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
+	status = decode_remove(xdr, &res->cinfo);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->dir_attr, res->server,
+	decode_getfattr(xdr, res->dir_attr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5225,34 +5151,38 @@
 /*
  * Decode RENAME response
  */
-static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs_renameres *res)
+static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs_renameres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_savefh(&xdr)) != 0)
+	status = decode_savefh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0)
+	status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
+	if (status)
 		goto out;
 	/* Current FH is target directory */
-	if (decode_getfattr(&xdr, res->new_fattr, res->server,
+	if (decode_getfattr(xdr, res->new_fattr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if ((status = decode_restorefh(&xdr)) != 0)
+	status = decode_restorefh(xdr);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->old_fattr, res->server,
+	decode_getfattr(xdr, res->old_fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5261,37 +5191,41 @@
 /*
  * Decode LINK response
  */
-static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link_res *res)
+static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs4_link_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_savefh(&xdr)) != 0)
+	status = decode_savefh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_link(&xdr, &res->cinfo)) != 0)
+	status = decode_link(xdr, &res->cinfo);
+	if (status)
 		goto out;
 	/*
 	 * Note order: OP_LINK leaves the directory as the current
 	 *             filehandle.
 	 */
-	if (decode_getfattr(&xdr, res->dir_attr, res->server,
+	if (decode_getfattr(xdr, res->dir_attr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if ((status = decode_restorefh(&xdr)) != 0)
+	status = decode_restorefh(xdr);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5300,33 +5234,37 @@
 /*
  * Decode CREATE response
  */
-static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res)
+static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs4_create_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_savefh(&xdr)) != 0)
+	status = decode_savefh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0)
+	status = decode_create(xdr, &res->dir_cinfo);
+	if (status)
 		goto out;
-	if ((status = decode_getfh(&xdr, res->fh)) != 0)
+	status = decode_getfh(xdr, res->fh);
+	if (status)
 		goto out;
-	if (decode_getfattr(&xdr, res->fattr, res->server,
+	if (decode_getfattr(xdr, res->fattr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if ((status = decode_restorefh(&xdr)) != 0)
+	status = decode_restorefh(xdr);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->dir_fattr, res->server,
+	decode_getfattr(xdr, res->dir_fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5335,31 +5273,31 @@
 /*
  * Decode SYMLINK response
  */
-static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res)
+static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+				struct nfs4_create_res *res)
 {
-	return nfs4_xdr_dec_create(rqstp, p, res);
+	return nfs4_xdr_dec_create(rqstp, xdr, res);
 }
 
 /*
  * Decode GETATTR response
  */
-static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_getattr_res *res)
+static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+				struct nfs4_getattr_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_getfattr(&xdr, res->fattr, res->server,
+	status = decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5368,46 +5306,40 @@
 /*
  * Encode an SETACL request
  */
-static int
-nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args)
+static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_setaclargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
-	int status;
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	status = encode_setacl(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_setacl(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return status;
 }
 
 /*
  * Decode SETACL response
  */
 static int
-nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p,
+nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
 		    struct nfs_setaclres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_setattr(&xdr);
+	status = decode_setattr(xdr);
 out:
 	return status;
 }
@@ -5416,24 +5348,22 @@
  * Decode GETACL response
  */
 static int
-nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p,
+nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
 		    struct nfs_getaclres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_getacl(&xdr, rqstp, &res->acl_len);
+	status = decode_getacl(xdr, rqstp, &res->acl_len);
 
 out:
 	return status;
@@ -5442,23 +5372,22 @@
 /*
  * Decode CLOSE response
  */
-static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res)
+static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_closeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_close(&xdr, res);
+	status = decode_close(xdr, res);
 	if (status != 0)
 		goto out;
 	/*
@@ -5467,7 +5396,7 @@
 	 * 	an ESTALE error. Shouldn't be a problem,
 	 * 	though, since fattr->valid will remain unset.
 	 */
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5476,36 +5405,35 @@
 /*
  * Decode OPEN response
  */
-static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res)
+static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs_openres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_savefh(&xdr);
+	status = decode_savefh(xdr);
 	if (status)
 		goto out;
-	status = decode_open(&xdr, res);
+	status = decode_open(xdr, res);
 	if (status)
 		goto out;
-	if (decode_getfh(&xdr, &res->fh) != 0)
+	if (decode_getfh(xdr, &res->fh) != 0)
 		goto out;
-	if (decode_getfattr(&xdr, res->f_attr, res->server,
+	if (decode_getfattr(xdr, res->f_attr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if (decode_restorefh(&xdr) != 0)
+	if (decode_restorefh(xdr) != 0)
 		goto out;
-	decode_getfattr(&xdr, res->dir_attr, res->server,
+	decode_getfattr(xdr, res->dir_attr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5514,20 +5442,20 @@
 /*
  * Decode OPEN_CONFIRM response
  */
-static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, __be32 *p, struct nfs_open_confirmres *res)
+static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp,
+				     struct xdr_stream *xdr,
+				     struct nfs_open_confirmres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_open_confirm(&xdr, res);
+	status = decode_open_confirm(xdr, res);
 out:
 	return status;
 }
@@ -5535,26 +5463,26 @@
 /*
  * Decode OPEN response
  */
-static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res)
+static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
+				    struct nfs_openres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_open(&xdr, res);
+	status = decode_open(xdr, res);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->f_attr, res->server,
+	decode_getfattr(xdr, res->f_attr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5563,26 +5491,26 @@
 /*
  * Decode SETATTR response
  */
-static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_setattrres *res)
+static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
+				struct xdr_stream *xdr,
+				struct nfs_setattrres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_setattr(&xdr);
+	status = decode_setattr(xdr);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5591,23 +5519,22 @@
 /*
  * Decode LOCK response
  */
-static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lock_res *res)
+static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs_lock_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_lock(&xdr, res);
+	status = decode_lock(xdr, res);
 out:
 	return status;
 }
@@ -5615,23 +5542,22 @@
 /*
  * Decode LOCKT response
  */
-static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lockt_res *res)
+static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_lockt_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_lockt(&xdr, res);
+	status = decode_lockt(xdr, res);
 out:
 	return status;
 }
@@ -5639,61 +5565,58 @@
 /*
  * Decode LOCKU response
  */
-static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, __be32 *p, struct nfs_locku_res *res)
+static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_locku_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_locku(&xdr, res);
+	status = decode_locku(xdr, res);
 out:
 	return status;
 }
 
-static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
+					  struct xdr_stream *xdr, void *dummy)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_release_lockowner(&xdr);
+		status = decode_release_lockowner(xdr);
 	return status;
 }
 
 /*
  * Decode READLINK response
  */
-static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p,
+static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp,
+				 struct xdr_stream *xdr,
 				 struct nfs4_readlink_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_readlink(&xdr, rqstp);
+	status = decode_readlink(xdr, rqstp);
 out:
 	return status;
 }
@@ -5701,23 +5624,22 @@
 /*
  * Decode READDIR response
  */
-static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_readdir_res *res)
+static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+				struct nfs4_readdir_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_readdir(&xdr, rqstp, res);
+	status = decode_readdir(xdr, rqstp, res);
 out:
 	return status;
 }
@@ -5725,23 +5647,22 @@
 /*
  * Decode Read response
  */
-static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, __be32 *p, struct nfs_readres *res)
+static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs_readres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_read(&xdr, rqstp, res);
+	status = decode_read(xdr, rqstp, res);
 	if (!status)
 		status = res->count;
 out:
@@ -5751,26 +5672,25 @@
 /*
  * Decode WRITE response
  */
-static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res)
+static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_writeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_write(&xdr, res);
+	status = decode_write(xdr, res);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 	if (!status)
 		status = res->count;
@@ -5781,26 +5701,25 @@
 /*
  * Decode COMMIT response
  */
-static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res)
+static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs_writeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_commit(&xdr, res);
+	status = decode_commit(xdr, res);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5809,85 +5728,80 @@
 /*
  * Decode FSINFO response
  */
-static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
 			       struct nfs4_fsinfo_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, req);
+		status = decode_sequence(xdr, &res->seq_res, req);
 	if (!status)
-		status = decode_putfh(&xdr);
+		status = decode_putfh(xdr);
 	if (!status)
-		status = decode_fsinfo(&xdr, res->fsinfo);
+		status = decode_fsinfo(xdr, res->fsinfo);
 	return status;
 }
 
 /*
  * Decode PATHCONF response
  */
-static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
 				 struct nfs4_pathconf_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, req);
+		status = decode_sequence(xdr, &res->seq_res, req);
 	if (!status)
-		status = decode_putfh(&xdr);
+		status = decode_putfh(xdr);
 	if (!status)
-		status = decode_pathconf(&xdr, res->pathconf);
+		status = decode_pathconf(xdr, res->pathconf);
 	return status;
 }
 
 /*
  * Decode STATFS response
  */
-static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
 			       struct nfs4_statfs_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, req);
+		status = decode_sequence(xdr, &res->seq_res, req);
 	if (!status)
-		status = decode_putfh(&xdr);
+		status = decode_putfh(xdr);
 	if (!status)
-		status = decode_statfs(&xdr, res->fsstat);
+		status = decode_statfs(xdr, res->fsstat);
 	return status;
 }
 
 /*
  * Decode GETATTR_BITMAP response
  */
-static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res)
+static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs4_server_caps_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, req);
+	status = decode_sequence(xdr, &res->seq_res, req);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	status = decode_server_caps(&xdr, res);
+	status = decode_server_caps(xdr, res);
 out:
 	return status;
 }
@@ -5895,79 +5809,77 @@
 /*
  * Decode RENEW response
  */
-static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      void *__unused)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_renew(&xdr);
+		status = decode_renew(xdr);
 	return status;
 }
 
 /*
  * Decode SETCLIENTID response
  */
-static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
-		struct nfs4_setclientid_res *res)
+static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs4_setclientid_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_setclientid(&xdr, res);
+		status = decode_setclientid(xdr, res);
 	return status;
 }
 
 /*
  * Decode SETCLIENTID_CONFIRM response
  */
-static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
+static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
+					    struct xdr_stream *xdr,
+					    struct nfs_fsinfo *fsinfo)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_setclientid_confirm(&xdr);
+		status = decode_setclientid_confirm(xdr);
 	if (!status)
-		status = decode_putrootfh(&xdr);
+		status = decode_putrootfh(xdr);
 	if (!status)
-		status = decode_fsinfo(&xdr, fsinfo);
+		status = decode_fsinfo(xdr, fsinfo);
 	return status;
 }
 
 /*
  * Decode DELEGRETURN response
  */
-static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res)
+static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
+				    struct nfs4_delegreturnres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status != 0)
 		goto out;
-	status = decode_delegreturn(&xdr);
+	status = decode_delegreturn(xdr);
 	if (status != 0)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5976,26 +5888,27 @@
 /*
  * Decode FS_LOCATIONS response
  */
-static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
 				     struct nfs4_fs_locations_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, req);
+	status = decode_sequence(xdr, &res->seq_res, req);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_lookup(&xdr)) != 0)
+	status = decode_lookup(xdr);
+	if (status)
 		goto out;
-	xdr_enter_page(&xdr, PAGE_SIZE);
-	status = decode_getfattr(&xdr, &res->fs_locations->fattr,
+	xdr_enter_page(xdr, PAGE_SIZE);
+	status = decode_getfattr(xdr, &res->fs_locations->fattr,
 				 res->fs_locations->server,
 				 !RPC_IS_ASYNC(req->rq_task));
 out:
@@ -6006,129 +5919,122 @@
 /*
  * Decode EXCHANGE_ID response
  */
-static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
 				    void *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_exchange_id(&xdr, res);
+		status = decode_exchange_id(xdr, res);
 	return status;
 }
 
 /*
  * Decode CREATE_SESSION response
  */
-static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
+				       struct xdr_stream *xdr,
 				       struct nfs41_create_session_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_create_session(&xdr, res);
+		status = decode_create_session(xdr, res);
 	return status;
 }
 
 /*
  * Decode DESTROY_SESSION response
  */
-static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
-					void *dummy)
+static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
+					struct xdr_stream *xdr,
+					void *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_destroy_session(&xdr, dummy);
+		status = decode_destroy_session(xdr, res);
 	return status;
 }
 
 /*
  * Decode SEQUENCE response
  */
-static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
+				 struct xdr_stream *xdr,
 				 struct nfs4_sequence_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, res, rqstp);
+		status = decode_sequence(xdr, res, rqstp);
 	return status;
 }
 
 /*
  * Decode GET_LEASE_TIME response
  */
-static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
+				       struct xdr_stream *xdr,
 				       struct nfs4_get_lease_time_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->lr_seq_res, rqstp);
+		status = decode_sequence(xdr, &res->lr_seq_res, rqstp);
 	if (!status)
-		status = decode_putrootfh(&xdr);
+		status = decode_putrootfh(xdr);
 	if (!status)
-		status = decode_fsinfo(&xdr, res->lr_fsinfo);
+		status = decode_fsinfo(xdr, res->lr_fsinfo);
 	return status;
 }
 
 /*
  * Decode RECLAIM_COMPLETE response
  */
-static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
+					 struct xdr_stream *xdr,
 					 struct nfs41_reclaim_complete_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, rqstp);
+		status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (!status)
-		status = decode_reclaim_complete(&xdr, (void *)NULL);
+		status = decode_reclaim_complete(xdr, (void *)NULL);
 	return status;
 }
 
 /*
  * Decode GETDEVINFO response
  */
-static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
+				      struct xdr_stream *xdr,
 				      struct nfs4_getdeviceinfo_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status != 0)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status != 0)
 		goto out;
-	status = decode_getdeviceinfo(&xdr, res->pdev);
+	status = decode_getdeviceinfo(xdr, res->pdev);
 out:
 	return status;
 }
@@ -6136,45 +6042,58 @@
 /*
  * Decode LAYOUTGET response
  */
-static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
+				  struct xdr_stream *xdr,
 				  struct nfs4_layoutget_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_layoutget(&xdr, rqstp, res);
+	status = decode_layoutget(xdr, rqstp, res);
 out:
 	return status;
 }
 #endif /* CONFIG_NFS_V4_1 */
 
-__be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
-			   struct nfs_server *server, int plus)
+/**
+ * nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in
+ *                      the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ */
+int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+		       int plus)
 {
 	uint32_t bitmap[2] = {0};
 	uint32_t len;
 	__be32 *p = xdr_inline_decode(xdr, 4);
 	if (unlikely(!p))
 		goto out_overflow;
-	if (!ntohl(*p++)) {
+	if (*p == xdr_zero) {
 		p = xdr_inline_decode(xdr, 4);
 		if (unlikely(!p))
 			goto out_overflow;
-		if (!ntohl(*p++))
-			return ERR_PTR(-EAGAIN);
+		if (*p == xdr_zero)
+			return -EAGAIN;
 		entry->eof = 1;
-		return ERR_PTR(-EBADCOOKIE);
+		return -EBADCOOKIE;
 	}
 
 	p = xdr_inline_decode(xdr, 12);
@@ -6182,7 +6101,7 @@
 		goto out_overflow;
 	entry->prev_cookie = entry->cookie;
 	p = xdr_decode_hyper(p, &entry->cookie);
-	entry->len = ntohl(*p++);
+	entry->len = be32_to_cpup(p);
 
 	p = xdr_inline_decode(xdr, entry->len);
 	if (unlikely(!p))
@@ -6203,7 +6122,8 @@
 	if (decode_attr_length(xdr, &len, &p) < 0)
 		goto out_overflow;
 
-	if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, server, 1) < 0)
+	if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
+					entry->server, 1) < 0)
 		goto out_overflow;
 	if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
 		entry->ino = entry->fattr->fileid;
@@ -6212,20 +6132,11 @@
 	if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
 		entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
 
-	if (verify_attr_len(xdr, p, len) < 0)
-		goto out_overflow;
-
-	p = xdr_inline_peek(xdr, 8);
-	if (p != NULL)
-		entry->eof = !p[0] && p[1];
-	else
-		entry->eof = 0;
-
-	return p;
+	return 0;
 
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EAGAIN);
+	return -EAGAIN;
 }
 
 /*
@@ -6301,8 +6212,8 @@
 #define PROC(proc, argtype, restype)				\
 [NFSPROC4_CLNT_##proc] = {					\
 	.p_proc   = NFSPROC4_COMPOUND,				\
-	.p_encode = (kxdrproc_t) nfs4_xdr_##argtype,		\
-	.p_decode = (kxdrproc_t) nfs4_xdr_##restype,		\
+	.p_encode = (kxdreproc_t)nfs4_xdr_##argtype,		\
+	.p_decode = (kxdrdproc_t)nfs4_xdr_##restype,		\
 	.p_arglen = NFS4_##argtype##_sz,			\
 	.p_replen = NFS4_##restype##_sz,			\
 	.p_statidx = NFSPROC4_CLNT_##proc,			\
@@ -6310,50 +6221,50 @@
 }
 
 struct rpc_procinfo	nfs4_procedures[] = {
-  PROC(READ,		enc_read,	dec_read),
-  PROC(WRITE,		enc_write,	dec_write),
-  PROC(COMMIT,		enc_commit,	dec_commit),
-  PROC(OPEN,		enc_open,	dec_open),
-  PROC(OPEN_CONFIRM,	enc_open_confirm,	dec_open_confirm),
-  PROC(OPEN_NOATTR,	enc_open_noattr,	dec_open_noattr),
-  PROC(OPEN_DOWNGRADE,	enc_open_downgrade,	dec_open_downgrade),
-  PROC(CLOSE,		enc_close,	dec_close),
-  PROC(SETATTR,		enc_setattr,	dec_setattr),
-  PROC(FSINFO,		enc_fsinfo,	dec_fsinfo),
-  PROC(RENEW,		enc_renew,	dec_renew),
-  PROC(SETCLIENTID,	enc_setclientid,	dec_setclientid),
-  PROC(SETCLIENTID_CONFIRM,	enc_setclientid_confirm,	dec_setclientid_confirm),
-  PROC(LOCK,            enc_lock,       dec_lock),
-  PROC(LOCKT,           enc_lockt,      dec_lockt),
-  PROC(LOCKU,           enc_locku,      dec_locku),
-  PROC(ACCESS,		enc_access,	dec_access),
-  PROC(GETATTR,		enc_getattr,	dec_getattr),
-  PROC(LOOKUP,		enc_lookup,	dec_lookup),
-  PROC(LOOKUP_ROOT,	enc_lookup_root,	dec_lookup_root),
-  PROC(REMOVE,		enc_remove,	dec_remove),
-  PROC(RENAME,		enc_rename,	dec_rename),
-  PROC(LINK,		enc_link,	dec_link),
-  PROC(SYMLINK,		enc_symlink,	dec_symlink),
-  PROC(CREATE,		enc_create,	dec_create),
-  PROC(PATHCONF,	enc_pathconf,	dec_pathconf),
-  PROC(STATFS,		enc_statfs,	dec_statfs),
-  PROC(READLINK,	enc_readlink,	dec_readlink),
-  PROC(READDIR,		enc_readdir,	dec_readdir),
-  PROC(SERVER_CAPS,	enc_server_caps, dec_server_caps),
-  PROC(DELEGRETURN,	enc_delegreturn, dec_delegreturn),
-  PROC(GETACL,		enc_getacl,	dec_getacl),
-  PROC(SETACL,		enc_setacl,	dec_setacl),
-  PROC(FS_LOCATIONS,	enc_fs_locations, dec_fs_locations),
-  PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
+	PROC(READ,		enc_read,		dec_read),
+	PROC(WRITE,		enc_write,		dec_write),
+	PROC(COMMIT,		enc_commit,		dec_commit),
+	PROC(OPEN,		enc_open,		dec_open),
+	PROC(OPEN_CONFIRM,	enc_open_confirm,	dec_open_confirm),
+	PROC(OPEN_NOATTR,	enc_open_noattr,	dec_open_noattr),
+	PROC(OPEN_DOWNGRADE,	enc_open_downgrade,	dec_open_downgrade),
+	PROC(CLOSE,		enc_close,		dec_close),
+	PROC(SETATTR,		enc_setattr,		dec_setattr),
+	PROC(FSINFO,		enc_fsinfo,		dec_fsinfo),
+	PROC(RENEW,		enc_renew,		dec_renew),
+	PROC(SETCLIENTID,	enc_setclientid,	dec_setclientid),
+	PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm),
+	PROC(LOCK,		enc_lock,		dec_lock),
+	PROC(LOCKT,		enc_lockt,		dec_lockt),
+	PROC(LOCKU,		enc_locku,		dec_locku),
+	PROC(ACCESS,		enc_access,		dec_access),
+	PROC(GETATTR,		enc_getattr,		dec_getattr),
+	PROC(LOOKUP,		enc_lookup,		dec_lookup),
+	PROC(LOOKUP_ROOT,	enc_lookup_root,	dec_lookup_root),
+	PROC(REMOVE,		enc_remove,		dec_remove),
+	PROC(RENAME,		enc_rename,		dec_rename),
+	PROC(LINK,		enc_link,		dec_link),
+	PROC(SYMLINK,		enc_symlink,		dec_symlink),
+	PROC(CREATE,		enc_create,		dec_create),
+	PROC(PATHCONF,		enc_pathconf,		dec_pathconf),
+	PROC(STATFS,		enc_statfs,		dec_statfs),
+	PROC(READLINK,		enc_readlink,		dec_readlink),
+	PROC(READDIR,		enc_readdir,		dec_readdir),
+	PROC(SERVER_CAPS,	enc_server_caps,	dec_server_caps),
+	PROC(DELEGRETURN,	enc_delegreturn,	dec_delegreturn),
+	PROC(GETACL,		enc_getacl,		dec_getacl),
+	PROC(SETACL,		enc_setacl,		dec_setacl),
+	PROC(FS_LOCATIONS,	enc_fs_locations,	dec_fs_locations),
+	PROC(RELEASE_LOCKOWNER,	enc_release_lockowner,	dec_release_lockowner),
 #if defined(CONFIG_NFS_V4_1)
-  PROC(EXCHANGE_ID,	enc_exchange_id,	dec_exchange_id),
-  PROC(CREATE_SESSION,	enc_create_session,	dec_create_session),
-  PROC(DESTROY_SESSION,	enc_destroy_session,	dec_destroy_session),
-  PROC(SEQUENCE,	enc_sequence,	dec_sequence),
-  PROC(GET_LEASE_TIME,	enc_get_lease_time,	dec_get_lease_time),
-  PROC(RECLAIM_COMPLETE, enc_reclaim_complete,  dec_reclaim_complete),
-  PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
-  PROC(LAYOUTGET,  enc_layoutget,     dec_layoutget),
+	PROC(EXCHANGE_ID,	enc_exchange_id,	dec_exchange_id),
+	PROC(CREATE_SESSION,	enc_create_session,	dec_create_session),
+	PROC(DESTROY_SESSION,	enc_destroy_session,	dec_destroy_session),
+	PROC(SEQUENCE,		enc_sequence,		dec_sequence),
+	PROC(GET_LEASE_TIME,	enc_get_lease_time,	dec_get_lease_time),
+	PROC(RECLAIM_COMPLETE,	enc_reclaim_complete,	dec_reclaim_complete),
+	PROC(GETDEVICEINFO,	enc_getdeviceinfo,	dec_getdeviceinfo),
+	PROC(LAYOUTGET,		enc_layoutget,		dec_layoutget),
 #endif /* CONFIG_NFS_V4_1 */
 };
 
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 903908a..c541093 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -86,11 +86,14 @@
 /* Default path we try to mount. "%s" gets replaced by our IP address */
 #define NFS_ROOT		"/tftpboot/%s"
 
+/* Default NFSROOT mount options. */
+#define NFS_DEF_OPTIONS		"udp"
+
 /* Parameters passed from the kernel command line */
 static char nfs_root_parms[256] __initdata = "";
 
 /* Text-based mount options passed to super.c */
-static char nfs_root_options[256] __initdata = "";
+static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS;
 
 /* Address of NFS server */
 static __be32 servaddr __initdata = htonl(INADDR_NONE);
@@ -160,8 +163,14 @@
 }
 
 static int __init root_nfs_cat(char *dest, const char *src,
-				  const size_t destlen)
+			       const size_t destlen)
 {
+	size_t len = strlen(dest);
+
+	if (len && dest[len - 1] != ',')
+		if (strlcat(dest, ",", destlen) > destlen)
+			return -1;
+
 	if (strlcat(dest, src, destlen) > destlen)
 		return -1;
 	return 0;
@@ -194,16 +203,6 @@
 		if (root_nfs_cat(nfs_root_options, incoming,
 						sizeof(nfs_root_options)))
 			return -1;
-
-	/*
-	 * Possibly prepare for more options to be appended
-	 */
-	if (nfs_root_options[0] != '\0' &&
-	    nfs_root_options[strlen(nfs_root_options)] != ',')
-		if (root_nfs_cat(nfs_root_options, ",",
-						sizeof(nfs_root_options)))
-			return -1;
-
 	return 0;
 }
 
@@ -217,7 +216,7 @@
  */
 static int __init root_nfs_data(char *cmdline)
 {
-	char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
+	char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
 	int len, retval = -1;
 	char *tmp = NULL;
 	const size_t tmplen = sizeof(nfs_export_path);
@@ -244,9 +243,9 @@
 	 * Append mandatory options for nfsroot so they override
 	 * what has come before
 	 */
-	snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4",
+	snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4",
 			&servaddr);
-	if (root_nfs_cat(nfs_root_options, addr_option,
+	if (root_nfs_cat(nfs_root_options, mand_options,
 						sizeof(nfs_root_options)))
 		goto out_optionstoolong;
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b68536c..e1164e3 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -26,12 +26,9 @@
 static inline struct nfs_page *
 nfs_page_alloc(void)
 {
-	struct nfs_page	*p;
-	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
-	if (p) {
-		memset(p, 0, sizeof(*p));
+	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
+	if (p)
 		INIT_LIST_HEAD(&p->wb_list);
-	}
 	return p;
 }
 
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index db77342..1b1bc1a 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -177,105 +177,149 @@
  * pNFS client layout cache
  */
 
-static void
-get_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+/* Need to hold i_lock if caller does not already hold reference */
+void
+get_layout_hdr(struct pnfs_layout_hdr *lo)
 {
-	assert_spin_locked(&lo->inode->i_lock);
-	lo->refcount++;
+	atomic_inc(&lo->plh_refcount);
+}
+
+static void
+destroy_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+	dprintk("%s: freeing layout cache %p\n", __func__, lo);
+	BUG_ON(!list_empty(&lo->plh_layouts));
+	NFS_I(lo->plh_inode)->layout = NULL;
+	kfree(lo);
 }
 
 static void
 put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
 {
-	assert_spin_locked(&lo->inode->i_lock);
-	BUG_ON(lo->refcount == 0);
-
-	lo->refcount--;
-	if (!lo->refcount) {
-		dprintk("%s: freeing layout cache %p\n", __func__, lo);
-		BUG_ON(!list_empty(&lo->layouts));
-		NFS_I(lo->inode)->layout = NULL;
-		kfree(lo);
-	}
+	if (atomic_dec_and_test(&lo->plh_refcount))
+		destroy_layout_hdr(lo);
 }
 
 void
-put_layout_hdr(struct inode *inode)
+put_layout_hdr(struct pnfs_layout_hdr *lo)
 {
-	spin_lock(&inode->i_lock);
-	put_layout_hdr_locked(NFS_I(inode)->layout);
-	spin_unlock(&inode->i_lock);
+	struct inode *inode = lo->plh_inode;
+
+	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+		destroy_layout_hdr(lo);
+		spin_unlock(&inode->i_lock);
+	}
 }
 
 static void
 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
 {
-	INIT_LIST_HEAD(&lseg->fi_list);
-	kref_init(&lseg->kref);
-	lseg->layout = lo;
+	INIT_LIST_HEAD(&lseg->pls_list);
+	atomic_set(&lseg->pls_refcount, 1);
+	smp_mb();
+	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
+	lseg->pls_layout = lo;
 }
 
-/* Called without i_lock held, as the free_lseg call may sleep */
-static void
-destroy_lseg(struct kref *kref)
+static void free_lseg(struct pnfs_layout_segment *lseg)
 {
-	struct pnfs_layout_segment *lseg =
-		container_of(kref, struct pnfs_layout_segment, kref);
-	struct inode *ino = lseg->layout->inode;
+	struct inode *ino = lseg->pls_layout->plh_inode;
 
-	dprintk("--> %s\n", __func__);
 	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
-	/* Matched by get_layout_hdr_locked in pnfs_insert_layout */
-	put_layout_hdr(ino);
+	/* Matched by get_layout_hdr in pnfs_insert_layout */
+	put_layout_hdr(NFS_I(ino)->layout);
 }
 
-static void
-put_lseg(struct pnfs_layout_segment *lseg)
+/* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg
+ * could sleep, so must be called outside of the lock.
+ * Returns 1 if object was removed, otherwise return 0.
+ */
+static int
+put_lseg_locked(struct pnfs_layout_segment *lseg,
+		struct list_head *tmp_list)
 {
-	if (!lseg)
-		return;
+	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
+		atomic_read(&lseg->pls_refcount),
+		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+	if (atomic_dec_and_test(&lseg->pls_refcount)) {
+		struct inode *ino = lseg->pls_layout->plh_inode;
 
-	dprintk("%s: lseg %p ref %d\n", __func__, lseg,
-		atomic_read(&lseg->kref.refcount));
-	kref_put(&lseg->kref, destroy_lseg);
+		BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+		list_del(&lseg->pls_list);
+		if (list_empty(&lseg->pls_layout->plh_segs)) {
+			struct nfs_client *clp;
+
+			clp = NFS_SERVER(ino)->nfs_client;
+			spin_lock(&clp->cl_lock);
+			/* List does not take a reference, so no need for put here */
+			list_del_init(&lseg->pls_layout->plh_layouts);
+			spin_unlock(&clp->cl_lock);
+			clear_bit(NFS_LAYOUT_BULK_RECALL, &lseg->pls_layout->plh_flags);
+		}
+		rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
+		list_add(&lseg->pls_list, tmp_list);
+		return 1;
+	}
+	return 0;
 }
 
-static void
-pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list)
+static bool
+should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
+{
+	return (recall_iomode == IOMODE_ANY ||
+		lseg_iomode == recall_iomode);
+}
+
+/* Returns 1 if lseg is removed from list, 0 otherwise */
+static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
+			     struct list_head *tmp_list)
+{
+	int rv = 0;
+
+	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
+		/* Remove the reference keeping the lseg in the
+		 * list.  It will now be removed when all
+		 * outstanding io is finished.
+		 */
+		rv = put_lseg_locked(lseg, tmp_list);
+	}
+	return rv;
+}
+
+/* Returns count of number of matching invalid lsegs remaining in list
+ * after call.
+ */
+int
+mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+			    struct list_head *tmp_list,
+			    u32 iomode)
 {
 	struct pnfs_layout_segment *lseg, *next;
-	struct nfs_client *clp;
+	int invalid = 0, removed = 0;
 
 	dprintk("%s:Begin lo %p\n", __func__, lo);
 
-	assert_spin_locked(&lo->inode->i_lock);
-	list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) {
-		dprintk("%s: freeing lseg %p\n", __func__, lseg);
-		list_move(&lseg->fi_list, tmp_list);
-	}
-	clp = NFS_SERVER(lo->inode)->nfs_client;
-	spin_lock(&clp->cl_lock);
-	/* List does not take a reference, so no need for put here */
-	list_del_init(&lo->layouts);
-	spin_unlock(&clp->cl_lock);
-	write_seqlock(&lo->seqlock);
-	clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
-	write_sequnlock(&lo->seqlock);
-
-	dprintk("%s:Return\n", __func__);
+	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+		if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
+			dprintk("%s: freeing lseg %p iomode %d "
+				"offset %llu length %llu\n", __func__,
+				lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
+				lseg->pls_range.length);
+			invalid++;
+			removed += mark_lseg_invalid(lseg, tmp_list);
+		}
+	dprintk("%s:Return %i\n", __func__, invalid - removed);
+	return invalid - removed;
 }
 
-static void
-pnfs_free_lseg_list(struct list_head *tmp_list)
+void
+pnfs_free_lseg_list(struct list_head *free_me)
 {
-	struct pnfs_layout_segment *lseg;
+	struct pnfs_layout_segment *lseg, *tmp;
 
-	while (!list_empty(tmp_list)) {
-		lseg = list_entry(tmp_list->next, struct pnfs_layout_segment,
-				fi_list);
-		dprintk("%s calling put_lseg on %p\n", __func__, lseg);
-		list_del(&lseg->fi_list);
-		put_lseg(lseg);
+	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
+		list_del(&lseg->pls_list);
+		free_lseg(lseg);
 	}
 }
 
@@ -288,7 +332,8 @@
 	spin_lock(&nfsi->vfs_inode.i_lock);
 	lo = nfsi->layout;
 	if (lo) {
-		pnfs_clear_lseg_list(lo, &tmp_list);
+		set_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags);
+		mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
 		/* Matched by refcount set to 1 in alloc_init_layout_hdr */
 		put_layout_hdr_locked(lo);
 	}
@@ -312,76 +357,80 @@
 
 	while (!list_empty(&tmp_list)) {
 		lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
-				layouts);
+				plh_layouts);
 		dprintk("%s freeing layout for inode %lu\n", __func__,
-			lo->inode->i_ino);
-		pnfs_destroy_layout(NFS_I(lo->inode));
+			lo->plh_inode->i_ino);
+		pnfs_destroy_layout(NFS_I(lo->plh_inode));
 	}
 }
 
-/* update lo->stateid with new if is more recent
- *
- * lo->stateid could be the open stateid, in which case we just use what given.
- */
-static void
-pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
-			const nfs4_stateid *new)
-{
-	nfs4_stateid *old = &lo->stateid;
-	bool overwrite = false;
-
-	write_seqlock(&lo->seqlock);
-	if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) ||
-	    memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other)))
-		overwrite = true;
-	else {
-		u32 oldseq, newseq;
-
-		oldseq = be32_to_cpu(old->stateid.seqid);
-		newseq = be32_to_cpu(new->stateid.seqid);
-		if ((int)(newseq - oldseq) > 0)
-			overwrite = true;
-	}
-	if (overwrite)
-		memcpy(&old->stateid, &new->stateid, sizeof(new->stateid));
-	write_sequnlock(&lo->seqlock);
-}
-
-static void
-pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo,
-			      struct nfs4_state *state)
-{
-	int seq;
-
-	dprintk("--> %s\n", __func__);
-	write_seqlock(&lo->seqlock);
-	do {
-		seq = read_seqbegin(&state->seqlock);
-		memcpy(lo->stateid.data, state->stateid.data,
-		       sizeof(state->stateid.data));
-	} while (read_seqretry(&state->seqlock, seq));
-	set_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
-	write_sequnlock(&lo->seqlock);
-	dprintk("<-- %s\n", __func__);
-}
-
+/* update lo->plh_stateid with new if is more recent */
 void
-pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
-			struct nfs4_state *open_state)
+pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
+			bool update_barrier)
 {
-	int seq;
+	u32 oldseq, newseq;
+
+	oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
+	newseq = be32_to_cpu(new->stateid.seqid);
+	if ((int)(newseq - oldseq) > 0) {
+		memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
+		if (update_barrier) {
+			u32 new_barrier = be32_to_cpu(new->stateid.seqid);
+
+			if ((int)(new_barrier - lo->plh_barrier))
+				lo->plh_barrier = new_barrier;
+		} else {
+			/* Because of wraparound, we want to keep the barrier
+			 * "close" to the current seqids.  It needs to be
+			 * within 2**31 to count as "behind", so if it
+			 * gets too near that limit, give us a litle leeway
+			 * and bring it to within 2**30.
+			 * NOTE - and yes, this is all unsigned arithmetic.
+			 */
+			if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
+				lo->plh_barrier = newseq - (1 << 30);
+		}
+	}
+}
+
+/* lget is set to 1 if called from inside send_layoutget call chain */
+static bool
+pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
+			int lget)
+{
+	if ((stateid) &&
+	    (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
+		return true;
+	return lo->plh_block_lgets ||
+		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+		(list_empty(&lo->plh_segs) &&
+		 (atomic_read(&lo->plh_outstanding) > lget));
+}
+
+int
+pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+			      struct nfs4_state *open_state)
+{
+	int status = 0;
 
 	dprintk("--> %s\n", __func__);
-	do {
-		seq = read_seqbegin(&lo->seqlock);
-		if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) {
-			/* This will trigger retry of the read */
-			pnfs_layout_from_open_stateid(lo, open_state);
-		} else
-			memcpy(dst->data, lo->stateid.data,
-			       sizeof(lo->stateid.data));
-	} while (read_seqretry(&lo->seqlock, seq));
+	spin_lock(&lo->plh_inode->i_lock);
+	if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
+		status = -EAGAIN;
+	} else if (list_empty(&lo->plh_segs)) {
+		int seq;
+
+		do {
+			seq = read_seqbegin(&open_state->seqlock);
+			memcpy(dst->data, open_state->stateid.data,
+			       sizeof(open_state->stateid.data));
+		} while (read_seqretry(&open_state->seqlock, seq));
+	} else
+		memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
+	spin_unlock(&lo->plh_inode->i_lock);
 	dprintk("<-- %s\n", __func__);
+	return status;
 }
 
 /*
@@ -395,7 +444,7 @@
 	   struct nfs_open_context *ctx,
 	   u32 iomode)
 {
-	struct inode *ino = lo->inode;
+	struct inode *ino = lo->plh_inode;
 	struct nfs_server *server = NFS_SERVER(ino);
 	struct nfs4_layoutget *lgp;
 	struct pnfs_layout_segment *lseg = NULL;
@@ -404,10 +453,8 @@
 
 	BUG_ON(ctx == NULL);
 	lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
-	if (lgp == NULL) {
-		put_layout_hdr(lo->inode);
+	if (lgp == NULL)
 		return NULL;
-	}
 	lgp->args.minlength = NFS4_MAX_UINT64;
 	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
 	lgp->args.range.iomode = iomode;
@@ -424,11 +471,88 @@
 	nfs4_proc_layoutget(lgp);
 	if (!lseg) {
 		/* remember that LAYOUTGET failed and suspend trying */
-		set_bit(lo_fail_bit(iomode), &lo->state);
+		set_bit(lo_fail_bit(iomode), &lo->plh_flags);
 	}
 	return lseg;
 }
 
+bool pnfs_roc(struct inode *ino)
+{
+	struct pnfs_layout_hdr *lo;
+	struct pnfs_layout_segment *lseg, *tmp;
+	LIST_HEAD(tmp_list);
+	bool found = false;
+
+	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
+		goto out_nolayout;
+	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
+		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+			mark_lseg_invalid(lseg, &tmp_list);
+			found = true;
+		}
+	if (!found)
+		goto out_nolayout;
+	lo->plh_block_lgets++;
+	get_layout_hdr(lo); /* matched in pnfs_roc_release */
+	spin_unlock(&ino->i_lock);
+	pnfs_free_lseg_list(&tmp_list);
+	return true;
+
+out_nolayout:
+	spin_unlock(&ino->i_lock);
+	return false;
+}
+
+void pnfs_roc_release(struct inode *ino)
+{
+	struct pnfs_layout_hdr *lo;
+
+	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	lo->plh_block_lgets--;
+	put_layout_hdr_locked(lo);
+	spin_unlock(&ino->i_lock);
+}
+
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+	struct pnfs_layout_hdr *lo;
+
+	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	if ((int)(barrier - lo->plh_barrier) > 0)
+		lo->plh_barrier = barrier;
+	spin_unlock(&ino->i_lock);
+}
+
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+	struct nfs_inode *nfsi = NFS_I(ino);
+	struct pnfs_layout_segment *lseg;
+	bool found = false;
+
+	spin_lock(&ino->i_lock);
+	list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
+		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+			found = true;
+			break;
+		}
+	if (!found) {
+		struct pnfs_layout_hdr *lo = nfsi->layout;
+		u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
+
+		/* Since close does not return a layout stateid for use as
+		 * a barrier, we choose the worst-case barrier.
+		 */
+		*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
+	}
+	spin_unlock(&ino->i_lock);
+	return found;
+}
+
 /*
  * Compare two layout segments for sorting into layout cache.
  * We want to preferentially return RW over RO layouts, so ensure those
@@ -450,37 +574,29 @@
 
 	dprintk("%s:Begin\n", __func__);
 
-	assert_spin_locked(&lo->inode->i_lock);
-	if (list_empty(&lo->segs)) {
-		struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client;
-
-		spin_lock(&clp->cl_lock);
-		BUG_ON(!list_empty(&lo->layouts));
-		list_add_tail(&lo->layouts, &clp->cl_layouts);
-		spin_unlock(&clp->cl_lock);
-	}
-	list_for_each_entry(lp, &lo->segs, fi_list) {
-		if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0)
+	assert_spin_locked(&lo->plh_inode->i_lock);
+	list_for_each_entry(lp, &lo->plh_segs, pls_list) {
+		if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
 			continue;
-		list_add_tail(&lseg->fi_list, &lp->fi_list);
+		list_add_tail(&lseg->pls_list, &lp->pls_list);
 		dprintk("%s: inserted lseg %p "
 			"iomode %d offset %llu length %llu before "
 			"lp %p iomode %d offset %llu length %llu\n",
-			__func__, lseg, lseg->range.iomode,
-			lseg->range.offset, lseg->range.length,
-			lp, lp->range.iomode, lp->range.offset,
-			lp->range.length);
+			__func__, lseg, lseg->pls_range.iomode,
+			lseg->pls_range.offset, lseg->pls_range.length,
+			lp, lp->pls_range.iomode, lp->pls_range.offset,
+			lp->pls_range.length);
 		found = 1;
 		break;
 	}
 	if (!found) {
-		list_add_tail(&lseg->fi_list, &lo->segs);
+		list_add_tail(&lseg->pls_list, &lo->plh_segs);
 		dprintk("%s: inserted lseg %p "
 			"iomode %d offset %llu length %llu at tail\n",
-			__func__, lseg, lseg->range.iomode,
-			lseg->range.offset, lseg->range.length);
+			__func__, lseg, lseg->pls_range.iomode,
+			lseg->pls_range.offset, lseg->pls_range.length);
 	}
-	get_layout_hdr_locked(lo);
+	get_layout_hdr(lo);
 
 	dprintk("%s:Return\n", __func__);
 }
@@ -493,11 +609,11 @@
 	lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
 	if (!lo)
 		return NULL;
-	lo->refcount = 1;
-	INIT_LIST_HEAD(&lo->layouts);
-	INIT_LIST_HEAD(&lo->segs);
-	seqlock_init(&lo->seqlock);
-	lo->inode = ino;
+	atomic_set(&lo->plh_refcount, 1);
+	INIT_LIST_HEAD(&lo->plh_layouts);
+	INIT_LIST_HEAD(&lo->plh_segs);
+	INIT_LIST_HEAD(&lo->plh_bulk_recall);
+	lo->plh_inode = ino;
 	return lo;
 }
 
@@ -510,9 +626,12 @@
 	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
 
 	assert_spin_locked(&ino->i_lock);
-	if (nfsi->layout)
-		return nfsi->layout;
-
+	if (nfsi->layout) {
+		if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
+			return NULL;
+		else
+			return nfsi->layout;
+	}
 	spin_unlock(&ino->i_lock);
 	new = alloc_init_layout_hdr(ino);
 	spin_lock(&ino->i_lock);
@@ -538,31 +657,32 @@
 static int
 is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
 {
-	return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW);
+	return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
 }
 
 /*
  * lookup range in layout
  */
 static struct pnfs_layout_segment *
-pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
+pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
 {
 	struct pnfs_layout_segment *lseg, *ret = NULL;
 
 	dprintk("%s:Begin\n", __func__);
 
-	assert_spin_locked(&lo->inode->i_lock);
-	list_for_each_entry(lseg, &lo->segs, fi_list) {
-		if (is_matching_lseg(lseg, iomode)) {
+	assert_spin_locked(&lo->plh_inode->i_lock);
+	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
+		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
+		    is_matching_lseg(lseg, iomode)) {
 			ret = lseg;
 			break;
 		}
-		if (cmp_layout(iomode, lseg->range.iomode) > 0)
+		if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
 			break;
 	}
 
 	dprintk("%s:Return lseg %p ref %d\n",
-		__func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0);
+		__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
 	return ret;
 }
 
@@ -576,6 +696,7 @@
 		   enum pnfs_iomode iomode)
 {
 	struct nfs_inode *nfsi = NFS_I(ino);
+	struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
 	struct pnfs_layout_hdr *lo;
 	struct pnfs_layout_segment *lseg = NULL;
 
@@ -588,25 +709,53 @@
 		goto out_unlock;
 	}
 
-	/* Check to see if the layout for the given range already exists */
-	lseg = pnfs_has_layout(lo, iomode);
-	if (lseg) {
-		dprintk("%s: Using cached lseg %p for iomode %d)\n",
-			__func__, lseg, iomode);
+	/* Do we even need to bother with this? */
+	if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+		dprintk("%s matches recall, use MDS\n", __func__);
 		goto out_unlock;
 	}
-
-	/* if LAYOUTGET already failed once we don't try again */
-	if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state))
+	/* Check to see if the layout for the given range already exists */
+	lseg = pnfs_find_lseg(lo, iomode);
+	if (lseg)
 		goto out_unlock;
 
-	get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */
+	/* if LAYOUTGET already failed once we don't try again */
+	if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
+		goto out_unlock;
+
+	if (pnfs_layoutgets_blocked(lo, NULL, 0))
+		goto out_unlock;
+	atomic_inc(&lo->plh_outstanding);
+
+	get_layout_hdr(lo);
+	if (list_empty(&lo->plh_segs)) {
+		/* The lo must be on the clp list if there is any
+		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
+		 */
+		spin_lock(&clp->cl_lock);
+		BUG_ON(!list_empty(&lo->plh_layouts));
+		list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
+		spin_unlock(&clp->cl_lock);
+	}
 	spin_unlock(&ino->i_lock);
 
 	lseg = send_layoutget(lo, ctx, iomode);
+	if (!lseg) {
+		spin_lock(&ino->i_lock);
+		if (list_empty(&lo->plh_segs)) {
+			spin_lock(&clp->cl_lock);
+			list_del_init(&lo->plh_layouts);
+			spin_unlock(&clp->cl_lock);
+			clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+		}
+		spin_unlock(&ino->i_lock);
+	}
+	atomic_dec(&lo->plh_outstanding);
+	put_layout_hdr(lo);
 out:
 	dprintk("%s end, state 0x%lx lseg %p\n", __func__,
-		nfsi->layout->state, lseg);
+		nfsi->layout->plh_flags, lseg);
 	return lseg;
 out_unlock:
 	spin_unlock(&ino->i_lock);
@@ -619,9 +768,21 @@
 	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
 	struct nfs4_layoutget_res *res = &lgp->res;
 	struct pnfs_layout_segment *lseg;
-	struct inode *ino = lo->inode;
+	struct inode *ino = lo->plh_inode;
+	struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
 	int status = 0;
 
+	/* Verify we got what we asked for.
+	 * Note that because the xdr parsing only accepts a single
+	 * element array, this can fail even if the server is behaving
+	 * correctly.
+	 */
+	if (lgp->args.range.iomode > res->range.iomode ||
+	    res->range.offset != 0 ||
+	    res->range.length != NFS4_MAX_UINT64) {
+		status = -EINVAL;
+		goto out;
+	}
 	/* Inject layout blob into I/O device driver */
 	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
 	if (!lseg || IS_ERR(lseg)) {
@@ -635,16 +796,37 @@
 	}
 
 	spin_lock(&ino->i_lock);
+	if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+		dprintk("%s forget reply due to recall\n", __func__);
+		goto out_forget_reply;
+	}
+
+	if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
+		dprintk("%s forget reply due to state\n", __func__);
+		goto out_forget_reply;
+	}
 	init_lseg(lo, lseg);
-	lseg->range = res->range;
+	lseg->pls_range = res->range;
 	*lgp->lsegpp = lseg;
 	pnfs_insert_layout(lo, lseg);
 
+	if (res->return_on_close) {
+		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
+		set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
+	}
+
 	/* Done processing layoutget. Set the layout stateid */
-	pnfs_set_layout_stateid(lo, &res->stateid);
+	pnfs_set_layout_stateid(lo, &res->stateid, false);
 	spin_unlock(&ino->i_lock);
 out:
 	return status;
+
+out_forget_reply:
+	spin_unlock(&ino->i_lock);
+	lseg->pls_layout = lo;
+	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+	goto out;
 }
 
 /*
@@ -769,7 +951,7 @@
 {
 	struct pnfs_deviceid_cache *local = clp->cl_devid_cache;
 
-	dprintk("--> %s cl_devid_cache %p\n", __func__, clp->cl_devid_cache);
+	dprintk("--> %s ({%d})\n", __func__, atomic_read(&local->dc_ref));
 	if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) {
 		int i;
 		/* Verify cache is empty */
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index e12367d..e2612ea 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -30,11 +30,17 @@
 #ifndef FS_NFS_PNFS_H
 #define FS_NFS_PNFS_H
 
+enum {
+	NFS_LSEG_VALID = 0,	/* cleared when lseg is recalled/returned */
+	NFS_LSEG_ROC,		/* roc bit received from server */
+};
+
 struct pnfs_layout_segment {
-	struct list_head fi_list;
-	struct pnfs_layout_range range;
-	struct kref kref;
-	struct pnfs_layout_hdr *layout;
+	struct list_head pls_list;
+	struct pnfs_layout_range pls_range;
+	atomic_t pls_refcount;
+	unsigned long pls_flags;
+	struct pnfs_layout_hdr *pls_layout;
 };
 
 #ifdef CONFIG_NFS_V4_1
@@ -44,7 +50,9 @@
 enum {
 	NFS_LAYOUT_RO_FAILED = 0,	/* get ro layout failed stop trying */
 	NFS_LAYOUT_RW_FAILED,		/* get rw layout failed stop trying */
-	NFS_LAYOUT_STATEID_SET,		/* have a valid layout stateid */
+	NFS_LAYOUT_BULK_RECALL,		/* bulk recall affecting layout */
+	NFS_LAYOUT_ROC,			/* some lseg had roc bit set */
+	NFS_LAYOUT_DESTROYED,		/* no new use of layout allowed */
 };
 
 /* Per-layout driver specific registration structure */
@@ -60,13 +68,16 @@
 };
 
 struct pnfs_layout_hdr {
-	unsigned long		refcount;
-	struct list_head	layouts;   /* other client layouts */
-	struct list_head	segs;      /* layout segments list */
-	seqlock_t		seqlock;   /* Protects the stateid */
-	nfs4_stateid		stateid;
-	unsigned long		state;
-	struct inode		*inode;
+	atomic_t		plh_refcount;
+	struct list_head	plh_layouts;   /* other client layouts */
+	struct list_head	plh_bulk_recall; /* clnt list of bulk recalls */
+	struct list_head	plh_segs;      /* layout segments list */
+	nfs4_stateid		plh_stateid;
+	atomic_t		plh_outstanding; /* number of RPCs out */
+	unsigned long		plh_block_lgets; /* block LAYOUTGET if >0 */
+	u32			plh_barrier; /* ignore lower seqids */
+	unsigned long		plh_flags;
+	struct inode		*plh_inode;
 };
 
 struct pnfs_device {
@@ -134,17 +145,30 @@
 extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
 
 /* pnfs.c */
+void get_layout_hdr(struct pnfs_layout_hdr *lo);
 struct pnfs_layout_segment *
 pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
 		   enum pnfs_iomode access_type);
 void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
 void unset_pnfs_layoutdriver(struct nfs_server *);
 int pnfs_layout_process(struct nfs4_layoutget *lgp);
+void pnfs_free_lseg_list(struct list_head *tmp_list);
 void pnfs_destroy_layout(struct nfs_inode *);
 void pnfs_destroy_all_layouts(struct nfs_client *);
-void put_layout_hdr(struct inode *inode);
-void pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
-			     struct nfs4_state *open_state);
+void put_layout_hdr(struct pnfs_layout_hdr *lo);
+void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
+			     const nfs4_stateid *new,
+			     bool update_barrier);
+int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
+				  struct pnfs_layout_hdr *lo,
+				  struct nfs4_state *open_state);
+int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+				struct list_head *tmp_list,
+				u32 iomode);
+bool pnfs_roc(struct inode *ino);
+void pnfs_roc_release(struct inode *ino);
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
 
 
 static inline int lo_fail_bit(u32 iomode)
@@ -176,6 +200,28 @@
 	return NULL;
 }
 
+static inline bool
+pnfs_roc(struct inode *ino)
+{
+	return false;
+}
+
+static inline void
+pnfs_roc_release(struct inode *ino)
+{
+}
+
+static inline void
+pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+}
+
+static inline bool
+pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+	return false;
+}
+
 static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id)
 {
 }
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 58e7f84..77d5e21 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -458,7 +458,7 @@
 	fattr = nfs_alloc_fattr();
 	status = -ENOMEM;
 	if (fh == NULL || fattr == NULL)
-		goto out;
+		goto out_free;
 
 	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
 	nfs_mark_for_revalidate(dir);
@@ -471,6 +471,7 @@
 	if (status == 0)
 		status = nfs_instantiate(dentry, fh, fattr);
 
+out_free:
 	nfs_free_fattr(fattr);
 	nfs_free_fhandle(fh);
 out:
@@ -731,7 +732,7 @@
 	.statfs		= nfs_proc_statfs,
 	.fsinfo		= nfs_proc_fsinfo,
 	.pathconf	= nfs_proc_pathconf,
-	.decode_dirent	= nfs_decode_dirent,
+	.decode_dirent	= nfs2_decode_dirent,
 	.read_setup	= nfs_proc_read_setup,
 	.read_done	= nfs_read_done,
 	.write_setup	= nfs_proc_write_setup,
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4100630..b68c860 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -598,7 +598,9 @@
 
 	if (nfss->mountd_version || showdefaults)
 		seq_printf(m, ",mountvers=%u", nfss->mountd_version);
-	if (nfss->mountd_port || showdefaults)
+	if ((nfss->mountd_port &&
+		nfss->mountd_port != (unsigned short)NFS_UNSPEC_PORT) ||
+		showdefaults)
 		seq_printf(m, ",mountport=%u", nfss->mountd_port);
 
 	nfs_show_mountd_netid(m, nfss, showdefaults);
@@ -2200,6 +2202,7 @@
 
 	s->s_flags = sb_mntdata->mntflags;
 	s->s_fs_info = server;
+	s->s_d_op = server->nfs_client->rpc_ops->dentry_ops;
 	ret = set_anon_super(s, server);
 	if (ret == 0)
 		server->s_dev = s->s_dev;
@@ -2494,7 +2497,13 @@
 	sb->s_maxbytes = old_sb->s_maxbytes;
 	sb->s_time_gran = 1;
 	sb->s_op = old_sb->s_op;
- 	nfs_initialise_sb(sb);
+	/*
+	 * The VFS shouldn't apply the umask to mode bits. We will do
+	 * so ourselves when necessary.
+	 */
+	sb->s_flags  |= MS_POSIXACL;
+	sb->s_xattr  = old_sb->s_xattr;
+	nfs_initialise_sb(sb);
 }
 
 /*
@@ -2504,6 +2513,12 @@
 {
 	sb->s_time_gran = 1;
 	sb->s_op = &nfs4_sops;
+	/*
+	 * The VFS shouldn't apply the umask to mode bits. We will do
+	 * so ourselves when necessary.
+	 */
+	sb->s_flags  |= MS_POSIXACL;
+	sb->s_xattr = nfs4_xattr_handlers;
 	nfs_initialise_sb(sb);
 }
 
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 8fe9eb4..6481d53 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -180,7 +180,7 @@
 	task_setup_data.rpc_client = NFS_CLIENT(dir);
 	task = rpc_run_task(&task_setup_data);
 	if (!IS_ERR(task))
-		rpc_put_task(task);
+		rpc_put_task_async(task);
 	return 1;
 }
 
@@ -429,7 +429,7 @@
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (data == NULL)
 		return ERR_PTR(-ENOMEM);
-	task_setup_data.callback_data = data,
+	task_setup_data.callback_data = data;
 
 	data->cred = rpc_lookup_cred();
 	if (IS_ERR(data->cred)) {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 10d648ea..42b92d7 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -932,7 +932,7 @@
 	while (!list_empty(&list)) {
 		data = list_entry(list.next, struct nfs_write_data, pages);
 		list_del(&data->pages);
-		nfs_writedata_release(data);
+		nfs_writedata_free(data);
 	}
 	nfs_redirty_request(req);
 	return -ENOMEM;
@@ -1292,6 +1292,8 @@
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
 		return PTR_ERR(task);
+	if (how & FLUSH_SYNC)
+		rpc_wait_for_completion_task(task);
 	rpc_put_task(task);
 	return 0;
 }
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index fc1c525..84c27d6 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -42,6 +42,11 @@
 	gid_t gid;
 };
 
+struct nfsacl_simple_acl {
+	struct posix_acl acl;
+	struct posix_acl_entry ace[4];
+};
+
 static int
 xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem)
 {
@@ -72,9 +77,20 @@
 	return 0;
 }
 
-unsigned int
-nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
-	      struct posix_acl *acl, int encode_entries, int typeflag)
+/**
+ * nfsacl_encode - Encode an NFSv3 ACL
+ *
+ * @buf: destination xdr_buf to contain XDR encoded ACL
+ * @base: byte offset in xdr_buf where XDR'd ACL begins
+ * @inode: inode of file whose ACL this is
+ * @acl: posix_acl to encode
+ * @encode_entries: whether to encode ACEs as well
+ * @typeflag: ACL type: NFS_ACL_DEFAULT or zero
+ *
+ * Returns size of encoded ACL in bytes or a negative errno value.
+ */
+int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
+		  struct posix_acl *acl, int encode_entries, int typeflag)
 {
 	int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0;
 	struct nfsacl_encode_desc nfsacl_desc = {
@@ -88,17 +104,22 @@
 		.uid = inode->i_uid,
 		.gid = inode->i_gid,
 	};
+	struct nfsacl_simple_acl aclbuf;
 	int err;
-	struct posix_acl *acl2 = NULL;
 
 	if (entries > NFS_ACL_MAX_ENTRIES ||
 	    xdr_encode_word(buf, base, entries))
 		return -EINVAL;
 	if (encode_entries && acl && acl->a_count == 3) {
-		/* Fake up an ACL_MASK entry. */
-		acl2 = posix_acl_alloc(4, GFP_KERNEL);
-		if (!acl2)
-			return -ENOMEM;
+		struct posix_acl *acl2 = &aclbuf.acl;
+
+		/* Avoid the use of posix_acl_alloc().  nfsacl_encode() is
+		 * invoked in contexts where a memory allocation failure is
+		 * fatal.  Fortunately this fake ACL is small enough to
+		 * construct on the stack. */
+		memset(acl2, 0, sizeof(acl2));
+		posix_acl_init(acl2, 4);
+
 		/* Insert entries in canonical order: other orders seem
 		 to confuse Solaris VxFS. */
 		acl2->a_entries[0] = acl->a_entries[0];  /* ACL_USER_OBJ */
@@ -109,8 +130,6 @@
 		nfsacl_desc.acl = acl2;
 	}
 	err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc);
-	if (acl2)
-		posix_acl_release(acl2);
 	if (!err)
 		err = 8 + nfsacl_desc.desc.elem_size *
 			  nfsacl_desc.desc.array_len;
@@ -224,9 +243,18 @@
 	return 0;
 }
 
-unsigned int
-nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
-	      struct posix_acl **pacl)
+/**
+ * nfsacl_decode - Decode an NFSv3 ACL
+ *
+ * @buf: xdr_buf containing XDR'd ACL data to decode
+ * @base: byte offset in xdr_buf where XDR'd ACL begins
+ * @aclcnt: count of ACEs in decoded posix_acl
+ * @pacl: buffer in which to place decoded posix_acl
+ *
+ * Returns the length of the decoded ACL in bytes, or a negative errno value.
+ */
+int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
+		  struct posix_acl **pacl)
 {
 	struct nfsacl_decode_desc nfsacl_desc = {
 		.desc = {
diff --git a/include/linux/nfs4_acl.h b/fs/nfsd/acl.h
similarity index 97%
rename from include/linux/nfs4_acl.h
rename to fs/nfsd/acl.h
index c9c05a7..34e5c40 100644
--- a/include/linux/nfs4_acl.h
+++ b/fs/nfsd/acl.h
@@ -1,6 +1,4 @@
 /*
- *  include/linux/nfs4_acl.c
- *
  *  Common NFSv4 ACL handling definitions.
  *
  *  Copyright (c) 2002 The Regents of the University of Michigan.
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c0fcb7a..8b31e5f 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1,4 +1,3 @@
-#define MSNFS	/* HACK HACK */
 /*
  * NFS exporting and validation.
  *
@@ -1444,9 +1443,6 @@
 	{ NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
 	{ NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
 	{ NFSEXP_V4ROOT, {"v4root", ""}},
-#ifdef MSNFS
-	{ NFSEXP_MSNFS, {"msnfs", ""}},
-#endif
 	{ 0, {"", ""}}
 };
 
diff --git a/include/linux/nfsd_idmap.h b/fs/nfsd/idmap.h
similarity index 91%
rename from include/linux/nfsd_idmap.h
rename to fs/nfsd/idmap.h
index d4a2ac1..2f3be13 100644
--- a/include/linux/nfsd_idmap.h
+++ b/fs/nfsd/idmap.h
@@ -1,6 +1,4 @@
 /*
- *  include/linux/nfsd_idmap.h
- *
  *  Mapping of UID to name and vice versa.
  *
  *  Copyright (c) 2002, 2003 The Regents of the University of
@@ -56,8 +54,8 @@
 }
 #endif
 
-int nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
-int nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
+__be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
+__be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
 int nfsd_map_uid_to_name(struct svc_rqst *, __u32, char *);
 int nfsd_map_gid_to_name(struct svc_rqst *, __u32, char *);
 
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 5b7e302..2247fc9 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -151,10 +151,10 @@
 	__be32	nfserr;
 	u32	max_blocksize = svc_max_payload(rqstp);
 
-	dprintk("nfsd: READ(3) %s %lu bytes at %lu\n",
+	dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
 				SVCFH_fmt(&argp->fh),
 				(unsigned long) argp->count,
-				(unsigned long) argp->offset);
+				(unsigned long long) argp->offset);
 
 	/* Obtain buffer pointer for payload.
 	 * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof)
@@ -191,10 +191,10 @@
 	__be32	nfserr;
 	unsigned long cnt = argp->len;
 
-	dprintk("nfsd: WRITE(3)    %s %d bytes at %ld%s\n",
+	dprintk("nfsd: WRITE(3)    %s %d bytes at %Lu%s\n",
 				SVCFH_fmt(&argp->fh),
 				argp->len,
-				(unsigned long) argp->offset,
+				(unsigned long long) argp->offset,
 				argp->stable? " stable" : "");
 
 	fh_copy(&resp->fh, &argp->fh);
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index e480526..ad88f1c 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -36,7 +36,7 @@
 
 #include <linux/slab.h>
 #include <linux/nfs_fs.h>
-#include <linux/nfs4_acl.h>
+#include "acl.h"
 
 
 /* mode bit translations: */
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 143da2e..02eb4ed 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -50,11 +50,6 @@
 	NFSPROC4_CLNT_CB_SEQUENCE,
 };
 
-enum nfs_cb_opnum4 {
-	OP_CB_RECALL            = 4,
-	OP_CB_SEQUENCE          = 11,
-};
-
 #define NFS4_MAXTAGLEN		20
 
 #define NFS4_enc_cb_null_sz		0
@@ -79,61 +74,6 @@
 					cb_sequence_dec_sz +            \
 					op_dec_sz)
 
-/*
-* Generic encode routines from fs/nfs/nfs4xdr.c
-*/
-static inline __be32 *
-xdr_writemem(__be32 *p, const void *ptr, int nbytes)
-{
-	int tmp = XDR_QUADLEN(nbytes);
-	if (!tmp)
-		return p;
-	p[tmp-1] = 0;
-	memcpy(p, ptr, nbytes);
-	return p + tmp;
-}
-
-#define WRITE32(n)               *p++ = htonl(n)
-#define WRITEMEM(ptr,nbytes)     do {                           \
-	p = xdr_writemem(p, ptr, nbytes);                       \
-} while (0)
-#define RESERVE_SPACE(nbytes)   do {                            \
-	p = xdr_reserve_space(xdr, nbytes);                     \
-	if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
-	BUG_ON(!p);                                             \
-} while (0)
-
-/*
- * Generic decode routines from fs/nfs/nfs4xdr.c
- */
-#define DECODE_TAIL                             \
-	status = 0;                             \
-out:                                            \
-	return status;                          \
-xdr_error:                                      \
-	dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
-	status = -EIO;                          \
-	goto out
-
-#define READ32(x)         (x) = ntohl(*p++)
-#define READ64(x)         do {                  \
-	(x) = (u64)ntohl(*p++) << 32;           \
-	(x) |= ntohl(*p++);                     \
-} while (0)
-#define READTIME(x)       do {                  \
-	p++;                                    \
-	(x.tv_sec) = ntohl(*p++);               \
-	(x.tv_nsec) = ntohl(*p++);              \
-} while (0)
-#define READ_BUF(nbytes)  do { \
-	p = xdr_inline_decode(xdr, nbytes); \
-	if (!p) { \
-		dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
-			__func__, __LINE__); \
-		return -EIO; \
-	} \
-} while (0)
-
 struct nfs4_cb_compound_hdr {
 	/* args */
 	u32		ident;	/* minorversion 0 only */
@@ -144,271 +84,487 @@
 	int		status;
 };
 
-static struct {
-int stat;
-int errno;
-} nfs_cb_errtbl[] = {
-	{ NFS4_OK,		0               },
-	{ NFS4ERR_PERM,		EPERM           },
-	{ NFS4ERR_NOENT,	ENOENT          },
-	{ NFS4ERR_IO,		EIO             },
-	{ NFS4ERR_NXIO,		ENXIO           },
-	{ NFS4ERR_ACCESS,	EACCES          },
-	{ NFS4ERR_EXIST,	EEXIST          },
-	{ NFS4ERR_XDEV,		EXDEV           },
-	{ NFS4ERR_NOTDIR,	ENOTDIR         },
-	{ NFS4ERR_ISDIR,	EISDIR          },
-	{ NFS4ERR_INVAL,	EINVAL          },
-	{ NFS4ERR_FBIG,		EFBIG           },
-	{ NFS4ERR_NOSPC,	ENOSPC          },
-	{ NFS4ERR_ROFS,		EROFS           },
-	{ NFS4ERR_MLINK,	EMLINK          },
-	{ NFS4ERR_NAMETOOLONG,	ENAMETOOLONG    },
-	{ NFS4ERR_NOTEMPTY,	ENOTEMPTY       },
-	{ NFS4ERR_DQUOT,	EDQUOT          },
-	{ NFS4ERR_STALE,	ESTALE          },
-	{ NFS4ERR_BADHANDLE,	EBADHANDLE      },
-	{ NFS4ERR_BAD_COOKIE,	EBADCOOKIE      },
-	{ NFS4ERR_NOTSUPP,	ENOTSUPP        },
-	{ NFS4ERR_TOOSMALL,	ETOOSMALL       },
-	{ NFS4ERR_SERVERFAULT,	ESERVERFAULT    },
-	{ NFS4ERR_BADTYPE,	EBADTYPE        },
-	{ NFS4ERR_LOCKED,	EAGAIN          },
-	{ NFS4ERR_RESOURCE,	EREMOTEIO       },
-	{ NFS4ERR_SYMLINK,	ELOOP           },
-	{ NFS4ERR_OP_ILLEGAL,	EOPNOTSUPP      },
-	{ NFS4ERR_DEADLOCK,	EDEADLK         },
-	{ -1,                   EIO             }
-};
-
-static int
-nfs_cb_stat_to_errno(int stat)
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
 {
-	int i;
-	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
-		if (nfs_cb_errtbl[i].stat == stat)
-			return nfs_cb_errtbl[i].errno;
-	}
-	/* If we cannot translate the error, the recovery routines should
-	* handle it.
-	* Note: remaining NFSv4 error codes have values > 10000, so should
-	* not conflict with native Linux error codes.
-	*/
-	return stat;
+	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
+		"Remaining buffer length is %tu words.\n",
+		func, xdr->end - xdr->p);
+}
+
+static __be32 *xdr_encode_empty_array(__be32 *p)
+{
+	*p++ = xdr_zero;
+	return p;
 }
 
 /*
- * XDR encode
+ * Encode/decode NFSv4 CB basic data types
+ *
+ * Basic NFSv4 callback data types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section
+ * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
+ * 1 Protocol"
  */
 
-static void
-encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
+/*
+ *	nfs_cb_opnum4
+ *
+ *	enum nfs_cb_opnum4 {
+ *		OP_CB_GETATTR		= 3,
+ *		  ...
+ *	};
+ */
+enum nfs_cb_opnum4 {
+	OP_CB_GETATTR			= 3,
+	OP_CB_RECALL			= 4,
+	OP_CB_LAYOUTRECALL		= 5,
+	OP_CB_NOTIFY			= 6,
+	OP_CB_PUSH_DELEG		= 7,
+	OP_CB_RECALL_ANY		= 8,
+	OP_CB_RECALLABLE_OBJ_AVAIL	= 9,
+	OP_CB_RECALL_SLOT		= 10,
+	OP_CB_SEQUENCE			= 11,
+	OP_CB_WANTS_CANCELLED		= 12,
+	OP_CB_NOTIFY_LOCK		= 13,
+	OP_CB_NOTIFY_DEVICEID		= 14,
+	OP_CB_ILLEGAL			= 10044
+};
+
+static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
 {
 	__be32 *p;
 
-	RESERVE_SPACE(sizeof(stateid_t));
-	WRITE32(sid->si_generation);
-	WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(op);
 }
 
-static void
-encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
+/*
+ * nfs_fh4
+ *
+ *	typedef opaque nfs_fh4<NFS4_FHSIZE>;
+ */
+static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
+{
+	u32 length = fh->fh_size;
+	__be32 *p;
+
+	BUG_ON(length > NFS4_FHSIZE);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, &fh->fh_base, length);
+}
+
+/*
+ * stateid4
+ *
+ *	struct stateid4 {
+ *		uint32_t	seqid;
+ *		opaque		other[12];
+ *	};
+ */
+static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
+	*p++ = cpu_to_be32(sid->si_generation);
+	xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
+}
+
+/*
+ * sessionid4
+ *
+ *	typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
+ */
+static void encode_sessionid4(struct xdr_stream *xdr,
+			      const struct nfsd4_session *session)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
+	xdr_encode_opaque_fixed(p, session->se_sessionid.data,
+					NFS4_MAX_SESSIONID_LEN);
+}
+
+/*
+ * nfsstat4
+ */
+static const struct {
+	int stat;
+	int errno;
+} nfs_cb_errtbl[] = {
+	{ NFS4_OK,		0		},
+	{ NFS4ERR_PERM,		-EPERM		},
+	{ NFS4ERR_NOENT,	-ENOENT		},
+	{ NFS4ERR_IO,		-EIO		},
+	{ NFS4ERR_NXIO,		-ENXIO		},
+	{ NFS4ERR_ACCESS,	-EACCES		},
+	{ NFS4ERR_EXIST,	-EEXIST		},
+	{ NFS4ERR_XDEV,		-EXDEV		},
+	{ NFS4ERR_NOTDIR,	-ENOTDIR	},
+	{ NFS4ERR_ISDIR,	-EISDIR		},
+	{ NFS4ERR_INVAL,	-EINVAL		},
+	{ NFS4ERR_FBIG,		-EFBIG		},
+	{ NFS4ERR_NOSPC,	-ENOSPC		},
+	{ NFS4ERR_ROFS,		-EROFS		},
+	{ NFS4ERR_MLINK,	-EMLINK		},
+	{ NFS4ERR_NAMETOOLONG,	-ENAMETOOLONG	},
+	{ NFS4ERR_NOTEMPTY,	-ENOTEMPTY	},
+	{ NFS4ERR_DQUOT,	-EDQUOT		},
+	{ NFS4ERR_STALE,	-ESTALE		},
+	{ NFS4ERR_BADHANDLE,	-EBADHANDLE	},
+	{ NFS4ERR_BAD_COOKIE,	-EBADCOOKIE	},
+	{ NFS4ERR_NOTSUPP,	-ENOTSUPP	},
+	{ NFS4ERR_TOOSMALL,	-ETOOSMALL	},
+	{ NFS4ERR_SERVERFAULT,	-ESERVERFAULT	},
+	{ NFS4ERR_BADTYPE,	-EBADTYPE	},
+	{ NFS4ERR_LOCKED,	-EAGAIN		},
+	{ NFS4ERR_RESOURCE,	-EREMOTEIO	},
+	{ NFS4ERR_SYMLINK,	-ELOOP		},
+	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
+	{ NFS4ERR_DEADLOCK,	-EDEADLK	},
+	{ -1,			-EIO		}
+};
+
+/*
+ * If we cannot translate the error, the recovery routines should
+ * handle it.
+ *
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+static int nfs_cb_stat_to_errno(int status)
+{
+	int i;
+
+	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
+		if (nfs_cb_errtbl[i].stat == status)
+			return nfs_cb_errtbl[i].errno;
+	}
+
+	dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
+	return -status;
+}
+
+static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
+			       enum nfsstat4 *status)
+{
+	__be32 *p;
+	u32 op;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	op = be32_to_cpup(p++);
+	if (unlikely(op != expected))
+		goto out_unexpected;
+	*status = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+out_unexpected:
+	dprintk("NFSD: Callback server returned operation %d but "
+		"we issued a request for %d\n", op, expected);
+	return -EIO;
+}
+
+/*
+ * CB_COMPOUND4args
+ *
+ *	struct CB_COMPOUND4args {
+ *		utf8str_cs	tag;
+ *		uint32_t	minorversion;
+ *		uint32_t	callback_ident;
+ *		nfs_cb_argop4	argarray<>;
+ *	};
+*/
+static void encode_cb_compound4args(struct xdr_stream *xdr,
+				    struct nfs4_cb_compound_hdr *hdr)
 {
 	__be32 * p;
 
-	RESERVE_SPACE(16);
-	WRITE32(0);            /* tag length is always 0 */
-	WRITE32(hdr->minorversion);
-	WRITE32(hdr->ident);
+	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
+	p = xdr_encode_empty_array(p);		/* empty tag */
+	*p++ = cpu_to_be32(hdr->minorversion);
+	*p++ = cpu_to_be32(hdr->ident);
+
 	hdr->nops_p = p;
-	WRITE32(hdr->nops);
+	*p = cpu_to_be32(hdr->nops);		/* argarray element count */
 }
 
+/*
+ * Update argarray element count
+ */
 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
 {
-	*hdr->nops_p = htonl(hdr->nops);
+	BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
+	*hdr->nops_p = cpu_to_be32(hdr->nops);
 }
 
-static void
-encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
-		struct nfs4_cb_compound_hdr *hdr)
+/*
+ * CB_COMPOUND4res
+ *
+ *	struct CB_COMPOUND4res {
+ *		nfsstat4	status;
+ *		utf8str_cs	tag;
+ *		nfs_cb_resop4	resarray<>;
+ *	};
+ */
+static int decode_cb_compound4res(struct xdr_stream *xdr,
+				  struct nfs4_cb_compound_hdr *hdr)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	hdr->status = be32_to_cpup(p++);
+	/* Ignore the tag */
+	length = be32_to_cpup(p++);
+	p = xdr_inline_decode(xdr, length + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	hdr->nops = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * CB_RECALL4args
+ *
+ *	struct CB_RECALL4args {
+ *		stateid4	stateid;
+ *		bool		truncate;
+ *		nfs_fh4		fh;
+ *	};
+ */
+static void encode_cb_recall4args(struct xdr_stream *xdr,
+				  const struct nfs4_delegation *dp,
+				  struct nfs4_cb_compound_hdr *hdr)
 {
 	__be32 *p;
-	int len = dp->dl_fh.fh_size;
 
-	RESERVE_SPACE(4);
-	WRITE32(OP_CB_RECALL);
-	encode_stateid(xdr, &dp->dl_stateid);
-	RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
-	WRITE32(0); /* truncate optimization not implemented */
-	WRITE32(len);
-	WRITEMEM(&dp->dl_fh.fh_base, len);
+	encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
+	encode_stateid4(xdr, &dp->dl_stateid);
+
+	p = xdr_reserve_space(xdr, 4);
+	*p++ = xdr_zero;			/* truncate */
+
+	encode_nfs_fh4(xdr, &dp->dl_fh);
+
 	hdr->nops++;
 }
 
-static void
-encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
-		   struct nfs4_cb_compound_hdr *hdr)
+/*
+ * CB_SEQUENCE4args
+ *
+ *	struct CB_SEQUENCE4args {
+ *		sessionid4		csa_sessionid;
+ *		sequenceid4		csa_sequenceid;
+ *		slotid4			csa_slotid;
+ *		slotid4			csa_highest_slotid;
+ *		bool			csa_cachethis;
+ *		referring_call_list4	csa_referring_call_lists<>;
+ *	};
+ */
+static void encode_cb_sequence4args(struct xdr_stream *xdr,
+				    const struct nfsd4_callback *cb,
+				    struct nfs4_cb_compound_hdr *hdr)
 {
+	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
 	__be32 *p;
-	struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
 
 	if (hdr->minorversion == 0)
 		return;
 
-	RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
+	encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
+	encode_sessionid4(xdr, session);
 
-	WRITE32(OP_CB_SEQUENCE);
-	WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
-	WRITE32(ses->se_cb_seq_nr);
-	WRITE32(0);		/* slotid, always 0 */
-	WRITE32(0);		/* highest slotid always 0 */
-	WRITE32(0);		/* cachethis always 0 */
-	WRITE32(0); /* FIXME: support referring_call_lists */
+	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
+	*p++ = cpu_to_be32(session->se_cb_seq_nr);	/* csa_sequenceid */
+	*p++ = xdr_zero;			/* csa_slotid */
+	*p++ = xdr_zero;			/* csa_highest_slotid */
+	*p++ = xdr_zero;			/* csa_cachethis */
+	xdr_encode_empty_array(p);		/* csa_referring_call_lists */
+
 	hdr->nops++;
 }
 
-static int
-nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
-{
-	struct xdr_stream xdrs, *xdr = &xdrs;
-
-	xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
-        RESERVE_SPACE(0);
-	return 0;
-}
-
-static int
-nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
-		struct nfsd4_callback *cb)
-{
-	struct xdr_stream xdr;
-	struct nfs4_delegation *args = cb->cb_op;
-	struct nfs4_cb_compound_hdr hdr = {
-		.ident = cb->cb_clp->cl_cb_ident,
-		.minorversion = cb->cb_minorversion,
-	};
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_cb_compound_hdr(&xdr, &hdr);
-	encode_cb_sequence(&xdr, cb, &hdr);
-	encode_cb_recall(&xdr, args, &hdr);
-	encode_cb_nops(&hdr);
-	return 0;
-}
-
-
-static int
-decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
-        __be32 *p;
-	u32 taglen;
-
-        READ_BUF(8);
-        READ32(hdr->status);
-	/* We've got no use for the tag; ignore it: */
-        READ32(taglen);
-        READ_BUF(taglen + 4);
-        p += XDR_QUADLEN(taglen);
-        READ32(hdr->nops);
-        return 0;
-}
-
-static int
-decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
-{
-	__be32 *p;
-	u32 op;
-	int32_t nfserr;
-
-	READ_BUF(8);
-	READ32(op);
-	if (op != expected) {
-		dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
-		         " operation %d but we issued a request for %d\n",
-		         op, expected);
-		return -EIO;
-	}
-	READ32(nfserr);
-	if (nfserr != NFS_OK)
-		return -nfs_cb_stat_to_errno(nfserr);
-	return 0;
-}
-
 /*
+ * CB_SEQUENCE4resok
+ *
+ *	struct CB_SEQUENCE4resok {
+ *		sessionid4	csr_sessionid;
+ *		sequenceid4	csr_sequenceid;
+ *		slotid4		csr_slotid;
+ *		slotid4		csr_highest_slotid;
+ *		slotid4		csr_target_highest_slotid;
+ *	};
+ *
+ *	union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
+ *	case NFS4_OK:
+ *		CB_SEQUENCE4resok	csr_resok4;
+ *	default:
+ *		void;
+ *	};
+ *
  * Our current back channel implmentation supports a single backchannel
  * with a single slot.
  */
-static int
-decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
-		   struct rpc_rqst *rqstp)
+static int decode_cb_sequence4resok(struct xdr_stream *xdr,
+				    struct nfsd4_callback *cb)
 {
-	struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
+	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
 	struct nfs4_sessionid id;
 	int status;
-	u32 dummy;
 	__be32 *p;
+	u32 dummy;
 
-	if (cb->cb_minorversion == 0)
-		return 0;
-
-	status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
-	if (status)
-		return status;
+	status = -ESERVERFAULT;
 
 	/*
 	 * If the server returns different values for sessionID, slotID or
 	 * sequence number, the server is looney tunes.
 	 */
-	status = -ESERVERFAULT;
-
-	READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
+	p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
 	memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
+	if (memcmp(id.data, session->se_sessionid.data,
+					NFS4_MAX_SESSIONID_LEN) != 0) {
+		dprintk("NFS: %s Invalid session id\n", __func__);
+		goto out;
+	}
 	p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
-	if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
-		dprintk("%s Invalid session id\n", __func__);
+
+	dummy = be32_to_cpup(p++);
+	if (dummy != session->se_cb_seq_nr) {
+		dprintk("NFS: %s Invalid sequence number\n", __func__);
 		goto out;
 	}
-	READ32(dummy);
-	if (dummy != ses->se_cb_seq_nr) {
-		dprintk("%s Invalid sequence number\n", __func__);
-		goto out;
-	}
-	READ32(dummy); 	/* slotid must be 0 */
+
+	dummy = be32_to_cpup(p++);
 	if (dummy != 0) {
-		dprintk("%s Invalid slotid\n", __func__);
+		dprintk("NFS: %s Invalid slotid\n", __func__);
 		goto out;
 	}
-	/* FIXME: process highest slotid and target highest slotid */
+
+	/*
+	 * FIXME: process highest slotid and target highest slotid
+	 */
 	status = 0;
 out:
 	return status;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int decode_cb_sequence4res(struct xdr_stream *xdr,
+				  struct nfsd4_callback *cb)
+{
+	enum nfsstat4 nfserr;
+	int status;
+
+	if (cb->cb_minorversion == 0)
+		return 0;
+
+	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr);
+	if (unlikely(status))
+		goto out;
+	if (unlikely(nfserr != NFS4_OK))
+		goto out_default;
+	status = decode_cb_sequence4resok(xdr, cb);
+out:
+	return status;
+out_default:
+	return nfs_cb_stat_to_errno(nfserr);
+}
+
+/*
+ * NFSv4.0 and NFSv4.1 XDR encode functions
+ *
+ * NFSv4.0 callback argument types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section 20
+ * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
+ * Protocol".
+ */
+
+/*
+ * NB: Without this zero space reservation, callbacks over krb5p fail
+ */
+static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 void *__unused)
+{
+	xdr_reserve_space(xdr, 0);
+}
+
+/*
+ * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+ */
+static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
+				   const struct nfsd4_callback *cb)
+{
+	const struct nfs4_delegation *args = cb->cb_op;
+	struct nfs4_cb_compound_hdr hdr = {
+		.ident = cb->cb_clp->cl_cb_ident,
+		.minorversion = cb->cb_minorversion,
+	};
+
+	encode_cb_compound4args(xdr, &hdr);
+	encode_cb_sequence4args(xdr, cb, &hdr);
+	encode_cb_recall4args(xdr, args, &hdr);
+	encode_cb_nops(&hdr);
 }
 
 
-static int
-nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
+/*
+ * NFSv4.0 and NFSv4.1 XDR decode functions
+ *
+ * NFSv4.0 callback result types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section 20
+ * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
+ * Protocol".
+ */
+
+static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+				void *__unused)
 {
 	return 0;
 }
 
-static int
-nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
-		struct nfsd4_callback *cb)
+/*
+ * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+ */
+static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
+				  struct xdr_stream *xdr,
+				  struct nfsd4_callback *cb)
 {
-	struct xdr_stream xdr;
 	struct nfs4_cb_compound_hdr hdr;
+	enum nfsstat4 nfserr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_cb_compound_hdr(&xdr, &hdr);
-	if (status)
+	status = decode_cb_compound4res(xdr, &hdr);
+	if (unlikely(status))
 		goto out;
-	if (cb) {
-		status = decode_cb_sequence(&xdr, cb, rqstp);
-		if (status)
+
+	if (cb != NULL) {
+		status = decode_cb_sequence4res(xdr, cb);
+		if (unlikely(status))
 			goto out;
 	}
-	status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
+
+	status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr);
+	if (unlikely(status))
+		goto out;
+	if (unlikely(nfserr != NFS4_OK))
+		status = nfs_cb_stat_to_errno(nfserr);
 out:
 	return status;
 }
@@ -416,23 +572,23 @@
 /*
  * RPC procedure tables
  */
-#define PROC(proc, call, argtype, restype)                              \
-[NFSPROC4_CLNT_##proc] = {                                      	\
-        .p_proc   = NFSPROC4_CB_##call,					\
-        .p_encode = (kxdrproc_t) nfs4_xdr_##argtype,                    \
-        .p_decode = (kxdrproc_t) nfs4_xdr_##restype,                    \
-        .p_arglen = NFS4_##argtype##_sz,                                \
-        .p_replen = NFS4_##restype##_sz,                                \
-        .p_statidx = NFSPROC4_CB_##call,				\
-	.p_name   = #proc,                                              \
+#define PROC(proc, call, argtype, restype)				\
+[NFSPROC4_CLNT_##proc] = {						\
+	.p_proc    = NFSPROC4_CB_##call,				\
+	.p_encode  = (kxdreproc_t)nfs4_xdr_enc_##argtype,		\
+	.p_decode  = (kxdrdproc_t)nfs4_xdr_dec_##restype,		\
+	.p_arglen  = NFS4_enc_##argtype##_sz,				\
+	.p_replen  = NFS4_dec_##restype##_sz,				\
+	.p_statidx = NFSPROC4_CB_##call,				\
+	.p_name    = #proc,						\
 }
 
-static struct rpc_procinfo     nfs4_cb_procedures[] = {
-    PROC(CB_NULL,      NULL,     enc_cb_null,     dec_cb_null),
-    PROC(CB_RECALL,    COMPOUND,   enc_cb_recall,      dec_cb_recall),
+static struct rpc_procinfo nfs4_cb_procedures[] = {
+	PROC(CB_NULL,	NULL,		cb_null,	cb_null),
+	PROC(CB_RECALL,	COMPOUND,	cb_recall,	cb_recall),
 };
 
-static struct rpc_version       nfs_cb_version4 = {
+static struct rpc_version nfs_cb_version4 = {
 /*
  * Note on the callback rpc program version number: despite language in rfc
  * 5661 section 18.36.3 requiring servers to use 4 in this field, the
@@ -440,29 +596,29 @@
  * in practice that appears to be what implementations use.  The section
  * 18.36.3 language is expected to be fixed in an erratum.
  */
-        .number                 = 1,
-        .nrprocs                = ARRAY_SIZE(nfs4_cb_procedures),
-        .procs                  = nfs4_cb_procedures
+	.number			= 1,
+	.nrprocs		= ARRAY_SIZE(nfs4_cb_procedures),
+	.procs			= nfs4_cb_procedures
 };
 
-static struct rpc_version *	nfs_cb_version[] = {
+static struct rpc_version *nfs_cb_version[] = {
 	&nfs_cb_version4,
 };
 
 static struct rpc_program cb_program;
 
 static struct rpc_stat cb_stats = {
-		.program	= &cb_program
+	.program		= &cb_program
 };
 
 #define NFS4_CALLBACK 0x40000000
 static struct rpc_program cb_program = {
-		.name 		= "nfs4_cb",
-		.number		= NFS4_CALLBACK,
-		.nrvers		= ARRAY_SIZE(nfs_cb_version),
-		.version	= nfs_cb_version,
-		.stats		= &cb_stats,
-		.pipe_dir_name  = "/nfsd4_cb",
+	.name			= "nfs4_cb",
+	.number			= NFS4_CALLBACK,
+	.nrvers			= ARRAY_SIZE(nfs_cb_version),
+	.version		= nfs_cb_version,
+	.stats			= &cb_stats,
+	.pipe_dir_name		= "/nfsd4_cb",
 };
 
 static int max_cb_time(void)
@@ -470,10 +626,8 @@
 	return max(nfsd4_lease/10, (time_t)1) * HZ;
 }
 
-/* Reference counting, callback cleanup, etc., all look racy as heck.
- * And why is cl_cb_set an atomic? */
 
-int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
+static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
 {
 	struct rpc_timeout	timeparms = {
 		.to_initval	= max_cb_time(),
@@ -483,6 +637,7 @@
 		.net		= &init_net,
 		.address	= (struct sockaddr *) &conn->cb_addr,
 		.addrsize	= conn->cb_addrlen,
+		.saddress	= (struct sockaddr *) &conn->cb_saddr,
 		.timeout	= &timeparms,
 		.program	= &cb_program,
 		.version	= 0,
@@ -499,6 +654,10 @@
 		args.protocol = XPRT_TRANSPORT_TCP;
 		clp->cl_cb_ident = conn->cb_ident;
 	} else {
+		if (!conn->cb_xprt)
+			return -EINVAL;
+		clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
+		clp->cl_cb_session = ses;
 		args.bc_xprt = conn->cb_xprt;
 		args.prognumber = clp->cl_cb_session->se_cb_prog;
 		args.protocol = XPRT_TRANSPORT_BC_TCP;
@@ -521,14 +680,20 @@
 		(int)clp->cl_name.len, clp->cl_name.data, reason);
 }
 
+static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
+{
+	clp->cl_cb_state = NFSD4_CB_DOWN;
+	warn_no_callback_path(clp, reason);
+}
+
 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
 {
 	struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
 
 	if (task->tk_status)
-		warn_no_callback_path(clp, task->tk_status);
+		nfsd4_mark_cb_down(clp, task->tk_status);
 	else
-		atomic_set(&clp->cl_cb_set, 1);
+		clp->cl_cb_state = NFSD4_CB_UP;
 }
 
 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
@@ -551,6 +716,11 @@
 
 static struct workqueue_struct *callback_wq;
 
+static void run_nfsd4_cb(struct nfsd4_callback *cb)
+{
+	queue_work(callback_wq, &cb->cb_work);
+}
+
 static void do_probe_callback(struct nfs4_client *clp)
 {
 	struct nfsd4_callback *cb = &clp->cl_cb_null;
@@ -565,7 +735,7 @@
 
 	cb->cb_ops = &nfsd4_cb_probe_ops;
 
-	queue_work(callback_wq, &cb->cb_work);
+	run_nfsd4_cb(cb);
 }
 
 /*
@@ -574,14 +744,21 @@
  */
 void nfsd4_probe_callback(struct nfs4_client *clp)
 {
+	/* XXX: atomicity?  Also, should we be using cl_cb_flags? */
+	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
 	set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
 	do_probe_callback(clp);
 }
 
+void nfsd4_probe_callback_sync(struct nfs4_client *clp)
+{
+	nfsd4_probe_callback(clp);
+	flush_workqueue(callback_wq);
+}
+
 void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
 {
-	BUG_ON(atomic_read(&clp->cl_cb_set));
-
+	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
 	spin_lock(&clp->cl_lock);
 	memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
 	spin_unlock(&clp->cl_lock);
@@ -592,24 +769,14 @@
  * If the slot is available, then mark it busy.  Otherwise, set the
  * thread for sleeping on the callback RPC wait queue.
  */
-static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
-		struct rpc_task *task)
+static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
 {
-	u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
-	int status = 0;
-
-	dprintk("%s: %u:%u:%u:%u\n", __func__,
-		ptr[0], ptr[1], ptr[2], ptr[3]);
-
 	if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
 		rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
 		dprintk("%s slot is busy\n", __func__);
-		status = -EAGAIN;
-		goto out;
+		return false;
 	}
-out:
-	dprintk("%s status=%d\n", __func__, status);
-	return status;
+	return true;
 }
 
 /*
@@ -622,20 +789,19 @@
 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
 	struct nfs4_client *clp = dp->dl_client;
 	u32 minorversion = clp->cl_minorversion;
-	int status = 0;
 
 	cb->cb_minorversion = minorversion;
 	if (minorversion) {
-		status = nfsd41_cb_setup_sequence(clp, task);
-		if (status) {
-			if (status != -EAGAIN) {
-				/* terminate rpc task */
-				task->tk_status = status;
-				task->tk_action = NULL;
-			}
+		if (!nfsd41_cb_get_slot(clp, task))
 			return;
-		}
 	}
+	spin_lock(&clp->cl_lock);
+	if (list_empty(&cb->cb_per_client)) {
+		/* This is the first call, not a restart */
+		cb->cb_done = false;
+		list_add(&cb->cb_per_client, &clp->cl_callbacks);
+	}
+	spin_unlock(&clp->cl_lock);
 	rpc_call_start(task);
 }
 
@@ -671,15 +837,18 @@
 
 	nfsd4_cb_done(task, calldata);
 
-	if (current_rpc_client == NULL) {
-		/* We're shutting down; give up. */
-		/* XXX: err, or is it ok just to fall through
-		 * and rpc_restart_call? */
+	if (current_rpc_client != task->tk_client) {
+		/* We're shutting down or changing cl_cb_client; leave
+		 * it to nfsd4_process_cb_update to restart the call if
+		 * necessary. */
 		return;
 	}
 
+	if (cb->cb_done)
+		return;
 	switch (task->tk_status) {
 	case 0:
+		cb->cb_done = true;
 		return;
 	case -EBADHANDLE:
 	case -NFS4ERR_BAD_STATEID:
@@ -688,32 +857,30 @@
 		break;
 	default:
 		/* Network partition? */
-		atomic_set(&clp->cl_cb_set, 0);
-		warn_no_callback_path(clp, task->tk_status);
-		if (current_rpc_client != task->tk_client) {
-			/* queue a callback on the new connection: */
-			atomic_inc(&dp->dl_count);
-			nfsd4_cb_recall(dp);
-			return;
-		}
+		nfsd4_mark_cb_down(clp, task->tk_status);
 	}
 	if (dp->dl_retries--) {
 		rpc_delay(task, 2*HZ);
 		task->tk_status = 0;
 		rpc_restart_call_prepare(task);
 		return;
-	} else {
-		atomic_set(&clp->cl_cb_set, 0);
-		warn_no_callback_path(clp, task->tk_status);
 	}
+	nfsd4_mark_cb_down(clp, task->tk_status);
+	cb->cb_done = true;
 }
 
 static void nfsd4_cb_recall_release(void *calldata)
 {
 	struct nfsd4_callback *cb = calldata;
+	struct nfs4_client *clp = cb->cb_clp;
 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
 
-	nfs4_put_delegation(dp);
+	if (cb->cb_done) {
+		spin_lock(&clp->cl_lock);
+		list_del(&cb->cb_per_client);
+		spin_unlock(&clp->cl_lock);
+		nfs4_put_delegation(dp);
+	}
 }
 
 static const struct rpc_call_ops nfsd4_cb_recall_ops = {
@@ -748,16 +915,33 @@
 	flush_workqueue(callback_wq);
 }
 
-void nfsd4_release_cb(struct nfsd4_callback *cb)
+static void nfsd4_release_cb(struct nfsd4_callback *cb)
 {
 	if (cb->cb_ops->rpc_release)
 		cb->cb_ops->rpc_release(cb);
 }
 
-void nfsd4_process_cb_update(struct nfsd4_callback *cb)
+/* requires cl_lock: */
+static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
+{
+	struct nfsd4_session *s;
+	struct nfsd4_conn *c;
+
+	list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
+		list_for_each_entry(c, &s->se_conns, cn_persession) {
+			if (c->cn_flags & NFS4_CDFC4_BACK)
+				return c;
+		}
+	}
+	return NULL;
+}
+
+static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
 {
 	struct nfs4_cb_conn conn;
 	struct nfs4_client *clp = cb->cb_clp;
+	struct nfsd4_session *ses = NULL;
+	struct nfsd4_conn *c;
 	int err;
 
 	/*
@@ -768,6 +952,10 @@
 		rpc_shutdown_client(clp->cl_cb_client);
 		clp->cl_cb_client = NULL;
 	}
+	if (clp->cl_cb_conn.cb_xprt) {
+		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
+		clp->cl_cb_conn.cb_xprt = NULL;
+	}
 	if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
 		return;
 	spin_lock(&clp->cl_lock);
@@ -778,11 +966,22 @@
 	BUG_ON(!clp->cl_cb_flags);
 	clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
 	memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
+	c = __nfsd4_find_backchannel(clp);
+	if (c) {
+		svc_xprt_get(c->cn_xprt);
+		conn.cb_xprt = c->cn_xprt;
+		ses = c->cn_session;
+	}
 	spin_unlock(&clp->cl_lock);
 
-	err = setup_callback_client(clp, &conn);
-	if (err)
+	err = setup_callback_client(clp, &conn, ses);
+	if (err) {
 		warn_no_callback_path(clp, err);
+		return;
+	}
+	/* Yay, the callback channel's back! Restart any callbacks: */
+	list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client)
+		run_nfsd4_cb(cb);
 }
 
 void nfsd4_do_callback_rpc(struct work_struct *w)
@@ -807,10 +1006,11 @@
 void nfsd4_cb_recall(struct nfs4_delegation *dp)
 {
 	struct nfsd4_callback *cb = &dp->dl_recall;
+	struct nfs4_client *clp = dp->dl_client;
 
 	dp->dl_retries = 1;
 	cb->cb_op = dp;
-	cb->cb_clp = dp->dl_client;
+	cb->cb_clp = clp;
 	cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
 	cb->cb_msg.rpc_argp = cb;
 	cb->cb_msg.rpc_resp = cb;
@@ -819,5 +1019,8 @@
 	cb->cb_ops = &nfsd4_cb_recall_ops;
 	dp->dl_retries = 1;
 
-	queue_work(callback_wq, &dp->dl_recall.cb_work);
+	INIT_LIST_HEAD(&cb->cb_per_client);
+	cb->cb_done = true;
+
+	run_nfsd4_cb(&dp->dl_recall);
 }
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index f0695e8..6d2c397 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -33,10 +33,11 @@
  */
 
 #include <linux/module.h>
-#include <linux/nfsd_idmap.h>
 #include <linux/seq_file.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include "idmap.h"
+#include "nfsd.h"
 
 /*
  * Cache entry
@@ -514,7 +515,7 @@
 	return clp->name;
 }
 
-static int
+static __be32
 idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
 		uid_t *id)
 {
@@ -524,15 +525,15 @@
 	int ret;
 
 	if (namelen + 1 > sizeof(key.name))
-		return -EINVAL;
+		return nfserr_badowner;
 	memcpy(key.name, name, namelen);
 	key.name[namelen] = '\0';
 	strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
 	ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
 	if (ret == -ENOENT)
-		ret = -ESRCH; /* nfserr_badname */
+		return nfserr_badowner;
 	if (ret)
-		return ret;
+		return nfserrno(ret);
 	*id = item->id;
 	cache_put(&item->h, &nametoid_cache);
 	return 0;
@@ -560,14 +561,14 @@
 	return ret;
 }
 
-int
+__be32
 nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
 		__u32 *id)
 {
 	return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
 }
 
-int
+__be32
 nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
 		__u32 *id)
 {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0cdfd02..db52546 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -604,9 +604,7 @@
 	return status;
 }
 
-static __be32
-nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
-	      void *arg)
+static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
 {
 	struct svc_fh tmp_fh;
 	__be32 ret;
@@ -615,13 +613,19 @@
 	ret = exp_pseudoroot(rqstp, &tmp_fh);
 	if (ret)
 		return ret;
-	if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) {
+	if (tmp_fh.fh_dentry == fh->fh_dentry) {
 		fh_put(&tmp_fh);
 		return nfserr_noent;
 	}
 	fh_put(&tmp_fh);
-	return nfsd_lookup(rqstp, &cstate->current_fh,
-			   "..", 2, &cstate->current_fh);
+	return nfsd_lookup(rqstp, fh, "..", 2, fh);
+}
+
+static __be32
+nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+	      void *arg)
+{
+	return nfsd4_do_lookupp(rqstp, &cstate->current_fh);
 }
 
 static __be32
@@ -769,10 +773,36 @@
 	} else
 		secinfo->si_exp = exp;
 	dput(dentry);
+	if (cstate->minorversion)
+		/* See rfc 5661 section 2.6.3.1.1.8 */
+		fh_put(&cstate->current_fh);
 	return err;
 }
 
 static __be32
+nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+	      struct nfsd4_secinfo_no_name *sin)
+{
+	__be32 err;
+
+	switch (sin->sin_style) {
+	case NFS4_SECINFO_STYLE4_CURRENT_FH:
+		break;
+	case NFS4_SECINFO_STYLE4_PARENT:
+		err = nfsd4_do_lookupp(rqstp, &cstate->current_fh);
+		if (err)
+			return err;
+		break;
+	default:
+		return nfserr_inval;
+	}
+	exp_get(cstate->current_fh.fh_export);
+	sin->sin_exp = cstate->current_fh.fh_export;
+	fh_put(&cstate->current_fh);
+	return nfs_ok;
+}
+
+static __be32
 nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	      struct nfsd4_setattr *setattr)
 {
@@ -974,8 +1004,8 @@
  * Also note, enforced elsewhere:
  *	- SEQUENCE other than as first op results in
  *	  NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().)
- *	- BIND_CONN_TO_SESSION must be the only op in its compound
- *	  (Will be enforced in nfsd4_bind_conn_to_session().)
+ *	- BIND_CONN_TO_SESSION must be the only op in its compound.
+ *	  (Enforced in nfsd4_bind_conn_to_session().)
  *	- DESTROY_SESSION must be the final operation in a compound, if
  *	  sessionid's in SEQUENCE and DESTROY_SESSION are the same.
  *	  (Enforced in nfsd4_destroy_session().)
@@ -1126,10 +1156,6 @@
 
 		nfsd4_increment_op_stats(op->opnum);
 	}
-	if (!rqstp->rq_usedeferral && status == nfserr_dropit) {
-		dprintk("%s Dropit - send NFS4ERR_DELAY\n", __func__);
-		status = nfserr_jukebox;
-	}
 
 	resp->cstate.status = status;
 	fh_put(&resp->cstate.current_fh);
@@ -1300,6 +1326,11 @@
 		.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
 		.op_name = "OP_EXCHANGE_ID",
 	},
+	[OP_BIND_CONN_TO_SESSION] = {
+		.op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
+		.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+		.op_name = "OP_BIND_CONN_TO_SESSION",
+	},
 	[OP_CREATE_SESSION] = {
 		.op_func = (nfsd4op_func)nfsd4_create_session,
 		.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
@@ -1320,6 +1351,10 @@
 		.op_flags = ALLOWED_WITHOUT_FH,
 		.op_name = "OP_RECLAIM_COMPLETE",
 	},
+	[OP_SECINFO_NO_NAME] = {
+		.op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
+		.op_name = "OP_SECINFO_NO_NAME",
+	},
 };
 
 static const char *nfsd4_op_name(unsigned opnum)
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 7e26caa..ffb59ef 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -302,7 +302,6 @@
 {
 	int status;
 
-	/* note: we currently use this path only for minorversion 0 */
 	if (nfs4_has_reclaimed_state(child->d_name.name, false))
 		return 0;
 
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index fbd18c3..7b566ec 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -230,8 +230,6 @@
 	dp->dl_client = clp;
 	get_nfs4_file(fp);
 	dp->dl_file = fp;
-	nfs4_file_get_access(fp, O_RDONLY);
-	dp->dl_flock = NULL;
 	dp->dl_type = type;
 	dp->dl_stateid.si_boot = boot_time;
 	dp->dl_stateid.si_stateownerid = current_delegid++;
@@ -240,8 +238,6 @@
 	fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
 	dp->dl_time = 0;
 	atomic_set(&dp->dl_count, 1);
-	list_add(&dp->dl_perfile, &fp->fi_delegations);
-	list_add(&dp->dl_perclnt, &clp->cl_delegations);
 	INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
 	return dp;
 }
@@ -257,32 +253,25 @@
 	}
 }
 
-/* Remove the associated file_lock first, then remove the delegation.
- * lease_modify() is called to remove the FS_LEASE file_lock from
- * the i_flock list, eventually calling nfsd's lock_manager
- * fl_release_callback.
- */
-static void
-nfs4_close_delegation(struct nfs4_delegation *dp)
+static void nfs4_put_deleg_lease(struct nfs4_file *fp)
 {
-	struct file *filp = find_readable_file(dp->dl_file);
-
-	dprintk("NFSD: close_delegation dp %p\n",dp);
-	if (dp->dl_flock)
-		vfs_setlease(filp, F_UNLCK, &dp->dl_flock);
-	nfs4_file_put_access(dp->dl_file, O_RDONLY);
+	if (atomic_dec_and_test(&fp->fi_delegees)) {
+		vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
+		fp->fi_lease = NULL;
+		fp->fi_deleg_file = NULL;
+	}
 }
 
 /* Called under the state lock. */
 static void
 unhash_delegation(struct nfs4_delegation *dp)
 {
-	list_del_init(&dp->dl_perfile);
 	list_del_init(&dp->dl_perclnt);
 	spin_lock(&recall_lock);
+	list_del_init(&dp->dl_perfile);
 	list_del_init(&dp->dl_recall_lru);
 	spin_unlock(&recall_lock);
-	nfs4_close_delegation(dp);
+	nfs4_put_deleg_lease(dp->dl_file);
 	nfs4_put_delegation(dp);
 }
 
@@ -642,6 +631,7 @@
 		free_conn(c);
 	}
 	spin_unlock(&clp->cl_lock);
+	nfsd4_probe_callback(clp);
 }
 
 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
@@ -679,15 +669,12 @@
 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
 }
 
-static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
+static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
 {
 	struct nfsd4_conn *conn;
-	u32 flags = NFS4_CDFC4_FORE;
 	int ret;
 
-	if (ses->se_flags & SESSION4_BACK_CHAN)
-		flags |= NFS4_CDFC4_BACK;
-	conn = alloc_conn(rqstp, flags);
+	conn = alloc_conn(rqstp, dir);
 	if (!conn)
 		return nfserr_jukebox;
 	nfsd4_hash_conn(conn, ses);
@@ -698,6 +685,17 @@
 	return nfs_ok;
 }
 
+static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
+{
+	u32 dir = NFS4_CDFC4_FORE;
+
+	if (ses->se_flags & SESSION4_BACK_CHAN)
+		dir |= NFS4_CDFC4_BACK;
+
+	return nfsd4_new_conn(rqstp, ses, dir);
+}
+
+/* must be called under client_lock */
 static void nfsd4_del_conns(struct nfsd4_session *s)
 {
 	struct nfs4_client *clp = s->se_client;
@@ -749,6 +747,8 @@
 	 */
 	slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
 	numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
+	if (numslots < 1)
+		return NULL;
 
 	new = alloc_session(slotsize, numslots);
 	if (!new) {
@@ -769,25 +769,30 @@
 	idx = hash_sessionid(&new->se_sessionid);
 	spin_lock(&client_lock);
 	list_add(&new->se_hash, &sessionid_hashtbl[idx]);
+	spin_lock(&clp->cl_lock);
 	list_add(&new->se_perclnt, &clp->cl_sessions);
+	spin_unlock(&clp->cl_lock);
 	spin_unlock(&client_lock);
 
-	status = nfsd4_new_conn(rqstp, new);
+	status = nfsd4_new_conn_from_crses(rqstp, new);
 	/* whoops: benny points out, status is ignored! (err, or bogus) */
 	if (status) {
 		free_session(&new->se_ref);
 		return NULL;
 	}
-	if (!clp->cl_cb_session && (cses->flags & SESSION4_BACK_CHAN)) {
+	if (cses->flags & SESSION4_BACK_CHAN) {
 		struct sockaddr *sa = svc_addr(rqstp);
-
-		clp->cl_cb_session = new;
-		clp->cl_cb_conn.cb_xprt = rqstp->rq_xprt;
-		svc_xprt_get(rqstp->rq_xprt);
+		/*
+		 * This is a little silly; with sessions there's no real
+		 * use for the callback address.  Use the peer address
+		 * as a reasonable default for now, but consider fixing
+		 * the rpc client not to require an address in the
+		 * future:
+		 */
 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
-		nfsd4_probe_callback(clp);
 	}
+	nfsd4_probe_callback(clp);
 	return new;
 }
 
@@ -817,7 +822,9 @@
 unhash_session(struct nfsd4_session *ses)
 {
 	list_del(&ses->se_hash);
+	spin_lock(&ses->se_client->cl_lock);
 	list_del(&ses->se_perclnt);
+	spin_unlock(&ses->se_client->cl_lock);
 }
 
 /* must be called under the client_lock */
@@ -923,8 +930,10 @@
 
 	mark_client_expired(clp);
 	list_del(&clp->cl_lru);
+	spin_lock(&clp->cl_lock);
 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
 		list_del_init(&ses->se_hash);
+	spin_unlock(&clp->cl_lock);
 }
 
 static void
@@ -938,8 +947,6 @@
 	spin_lock(&recall_lock);
 	while (!list_empty(&clp->cl_delegations)) {
 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
-		dprintk("NFSD: expire client. dp %p, fp %p\n", dp,
-				dp->dl_flock);
 		list_del_init(&dp->dl_perclnt);
 		list_move(&dp->dl_recall_lru, &reaplist);
 	}
@@ -1051,12 +1058,13 @@
 
 	memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
 	atomic_set(&clp->cl_refcount, 0);
-	atomic_set(&clp->cl_cb_set, 0);
+	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
 	INIT_LIST_HEAD(&clp->cl_idhash);
 	INIT_LIST_HEAD(&clp->cl_strhash);
 	INIT_LIST_HEAD(&clp->cl_openowners);
 	INIT_LIST_HEAD(&clp->cl_delegations);
 	INIT_LIST_HEAD(&clp->cl_lru);
+	INIT_LIST_HEAD(&clp->cl_callbacks);
 	spin_lock_init(&clp->cl_lock);
 	INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
 	clp->cl_time = get_seconds();
@@ -1132,54 +1140,55 @@
 	return NULL;
 }
 
-/*
- * Return 1 iff clp's clientid establishment method matches the use_exchange_id
- * parameter. Matching is based on the fact the at least one of the
- * EXCHGID4_FLAG_USE_{NON_PNFS,PNFS_MDS,PNFS_DS} flags must be set for v4.1
- *
- * FIXME: we need to unify the clientid namespaces for nfsv4.x
- * and correctly deal with client upgrade/downgrade in EXCHANGE_ID
- * and SET_CLIENTID{,_CONFIRM}
- */
-static inline int
-match_clientid_establishment(struct nfs4_client *clp, bool use_exchange_id)
+static bool clp_used_exchangeid(struct nfs4_client *clp)
 {
-	bool has_exchange_flags = (clp->cl_exchange_flags != 0);
-	return use_exchange_id == has_exchange_flags;
-}
+	return clp->cl_exchange_flags != 0;
+} 
 
 static struct nfs4_client *
-find_confirmed_client_by_str(const char *dname, unsigned int hashval,
-			     bool use_exchange_id)
+find_confirmed_client_by_str(const char *dname, unsigned int hashval)
 {
 	struct nfs4_client *clp;
 
 	list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
-		if (same_name(clp->cl_recdir, dname) &&
-		    match_clientid_establishment(clp, use_exchange_id))
+		if (same_name(clp->cl_recdir, dname))
 			return clp;
 	}
 	return NULL;
 }
 
 static struct nfs4_client *
-find_unconfirmed_client_by_str(const char *dname, unsigned int hashval,
-			       bool use_exchange_id)
+find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
 {
 	struct nfs4_client *clp;
 
 	list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
-		if (same_name(clp->cl_recdir, dname) &&
-		    match_clientid_establishment(clp, use_exchange_id))
+		if (same_name(clp->cl_recdir, dname))
 			return clp;
 	}
 	return NULL;
 }
 
+static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr)
+{
+	switch (family) {
+	case AF_INET:
+		((struct sockaddr_in *)sa)->sin_family = AF_INET;
+		((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr;
+		return;
+	case AF_INET6:
+		((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6;
+		((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6;
+		return;
+	}
+}
+
 static void
-gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid)
+gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
 {
 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
+	struct sockaddr	*sa = svc_addr(rqstp);
+	u32 scopeid = rpc_get_scope_id(sa);
 	unsigned short expected_family;
 
 	/* Currently, we only support tcp and tcp6 for the callback channel */
@@ -1205,6 +1214,7 @@
 
 	conn->cb_prog = se->se_callback_prog;
 	conn->cb_ident = se->se_callback_ident;
+	rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr);
 	return;
 out_err:
 	conn->cb_addr.ss_family = AF_UNSPEC;
@@ -1344,7 +1354,7 @@
 	case SP4_NONE:
 		break;
 	case SP4_SSV:
-		return nfserr_encr_alg_unsupp;
+		return nfserr_serverfault;
 	default:
 		BUG();				/* checked by xdr code */
 	case SP4_MACH_CRED:
@@ -1361,8 +1371,12 @@
 	nfs4_lock_state();
 	status = nfs_ok;
 
-	conf = find_confirmed_client_by_str(dname, strhashval, true);
+	conf = find_confirmed_client_by_str(dname, strhashval);
 	if (conf) {
+		if (!clp_used_exchangeid(conf)) {
+			status = nfserr_clid_inuse; /* XXX: ? */
+			goto out;
+		}
 		if (!same_verf(&verf, &conf->cl_verifier)) {
 			/* 18.35.4 case 8 */
 			if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
@@ -1403,7 +1417,7 @@
 		goto out;
 	}
 
-	unconf  = find_unconfirmed_client_by_str(dname, strhashval, true);
+	unconf  = find_unconfirmed_client_by_str(dname, strhashval);
 	if (unconf) {
 		/*
 		 * Possible retry or client restart.  Per 18.35.4 case 4,
@@ -1560,6 +1574,8 @@
 	status = nfs_ok;
 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
 	       NFS4_MAX_SESSIONID_LEN);
+	memcpy(&cr_ses->fore_channel, &new->se_fchannel,
+		sizeof(struct nfsd4_channel_attrs));
 	cs_slot->sl_seqid++;
 	cr_ses->seqid = cs_slot->sl_seqid;
 
@@ -1581,6 +1597,45 @@
 	return argp->opcnt == resp->opcnt;
 }
 
+static __be32 nfsd4_map_bcts_dir(u32 *dir)
+{
+	switch (*dir) {
+	case NFS4_CDFC4_FORE:
+	case NFS4_CDFC4_BACK:
+		return nfs_ok;
+	case NFS4_CDFC4_FORE_OR_BOTH:
+	case NFS4_CDFC4_BACK_OR_BOTH:
+		*dir = NFS4_CDFC4_BOTH;
+		return nfs_ok;
+	};
+	return nfserr_inval;
+}
+
+__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
+		     struct nfsd4_compound_state *cstate,
+		     struct nfsd4_bind_conn_to_session *bcts)
+{
+	__be32 status;
+
+	if (!nfsd4_last_compound_op(rqstp))
+		return nfserr_not_only_op;
+	spin_lock(&client_lock);
+	cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
+	/* Sorta weird: we only need the refcnt'ing because new_conn acquires
+	 * client_lock iself: */
+	if (cstate->session) {
+		nfsd4_get_session(cstate->session);
+		atomic_inc(&cstate->session->se_client->cl_refcount);
+	}
+	spin_unlock(&client_lock);
+	if (!cstate->session)
+		return nfserr_badsession;
+
+	status = nfsd4_map_bcts_dir(&bcts->dir);
+	nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
+	return nfs_ok;
+}
+
 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
 {
 	if (!session)
@@ -1619,8 +1674,7 @@
 	spin_unlock(&client_lock);
 
 	nfs4_lock_state();
-	/* wait for callbacks */
-	nfsd4_shutdown_callback(ses->se_client);
+	nfsd4_probe_callback_sync(ses->se_client);
 	nfs4_unlock_state();
 
 	nfsd4_del_conns(ses);
@@ -1733,8 +1787,12 @@
 out:
 	/* Hold a session reference until done processing the compound. */
 	if (cstate->session) {
+		struct nfs4_client *clp = session->se_client;
+
 		nfsd4_get_session(cstate->session);
-		atomic_inc(&session->se_client->cl_refcount);
+		atomic_inc(&clp->cl_refcount);
+		if (clp->cl_cb_state == NFSD4_CB_DOWN)
+			seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN;
 	}
 	kfree(conn);
 	spin_unlock(&client_lock);
@@ -1775,7 +1833,6 @@
 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 		  struct nfsd4_setclientid *setclid)
 {
-	struct sockaddr		*sa = svc_addr(rqstp);
 	struct xdr_netobj 	clname = { 
 		.len = setclid->se_namelen,
 		.data = setclid->se_name,
@@ -1801,10 +1858,12 @@
 	strhashval = clientstr_hashval(dname);
 
 	nfs4_lock_state();
-	conf = find_confirmed_client_by_str(dname, strhashval, false);
+	conf = find_confirmed_client_by_str(dname, strhashval);
 	if (conf) {
 		/* RFC 3530 14.2.33 CASE 0: */
 		status = nfserr_clid_inuse;
+		if (clp_used_exchangeid(conf))
+			goto out;
 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
 			char addr_str[INET6_ADDRSTRLEN];
 			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
@@ -1819,7 +1878,7 @@
 	 * has a description of SETCLIENTID request processing consisting
 	 * of 5 bullet points, labeled as CASE0 - CASE4 below.
 	 */
-	unconf = find_unconfirmed_client_by_str(dname, strhashval, false);
+	unconf = find_unconfirmed_client_by_str(dname, strhashval);
 	status = nfserr_resource;
 	if (!conf) {
 		/*
@@ -1876,7 +1935,7 @@
 	 * for consistent minorversion use throughout:
 	 */
 	new->cl_minorversion = 0;
-	gen_callback(new, setclid, rpc_get_scope_id(sa));
+	gen_callback(new, setclid, rqstp);
 	add_to_unconfirmed(new, strhashval);
 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
@@ -1935,7 +1994,6 @@
 		if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
 			status = nfserr_clid_inuse;
 		else {
-			atomic_set(&conf->cl_cb_set, 0);
 			nfsd4_change_callback(conf, &unconf->cl_cb_conn);
 			nfsd4_probe_callback(conf);
 			expire_client(unconf);
@@ -1964,7 +2022,7 @@
 			unsigned int hash =
 				clientstr_hashval(unconf->cl_recdir);
 			conf = find_confirmed_client_by_str(unconf->cl_recdir,
-							    hash, false);
+							    hash);
 			if (conf) {
 				nfsd4_remove_clid_dir(conf);
 				expire_client(conf);
@@ -2007,6 +2065,7 @@
 		fp->fi_inode = igrab(ino);
 		fp->fi_id = current_fileid++;
 		fp->fi_had_conflict = false;
+		fp->fi_lease = NULL;
 		memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
 		memset(fp->fi_access, 0, sizeof(fp->fi_access));
 		spin_lock(&recall_lock);
@@ -2258,23 +2317,8 @@
 		nfs4_file_put_access(fp, O_RDONLY);
 }
 
-/*
- * Spawn a thread to perform a recall on the delegation represented
- * by the lease (file_lock)
- *
- * Called from break_lease() with lock_flocks() held.
- * Note: we assume break_lease will only call this *once* for any given
- * lease.
- */
-static
-void nfsd_break_deleg_cb(struct file_lock *fl)
+static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
 {
-	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
-
-	dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl);
-	if (!dp)
-		return;
-
 	/* We're assuming the state code never drops its reference
 	 * without first removing the lease.  Since we're in this lease
 	 * callback (and since the lease code is serialized by the kernel
@@ -2282,59 +2326,37 @@
 	 * it's safe to take a reference: */
 	atomic_inc(&dp->dl_count);
 
-	spin_lock(&recall_lock);
 	list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
-	spin_unlock(&recall_lock);
 
 	/* only place dl_time is set. protected by lock_flocks*/
 	dp->dl_time = get_seconds();
 
-	/*
-	 * We don't want the locks code to timeout the lease for us;
-	 * we'll remove it ourself if the delegation isn't returned
-	 * in time.
-	 */
-	fl->fl_break_time = 0;
-
-	dp->dl_file->fi_had_conflict = true;
 	nfsd4_cb_recall(dp);
 }
 
-/*
- * The file_lock is being reapd.
- *
- * Called by locks_free_lock() with lock_flocks() held.
- */
-static
-void nfsd_release_deleg_cb(struct file_lock *fl)
+/* Called from break_lease() with lock_flocks() held. */
+static void nfsd_break_deleg_cb(struct file_lock *fl)
 {
-	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
+	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
+	struct nfs4_delegation *dp;
 
-	dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count));
+	BUG_ON(!fp);
+	/* We assume break_lease is only called once per lease: */
+	BUG_ON(fp->fi_had_conflict);
+	/*
+	 * We don't want the locks code to timeout the lease for us;
+	 * we'll remove it ourself if a delegation isn't returned
+	 * in time:
+	 */
+	fl->fl_break_time = 0;
 
-	if (!(fl->fl_flags & FL_LEASE) || !dp)
-		return;
-	dp->dl_flock = NULL;
+	spin_lock(&recall_lock);
+	fp->fi_had_conflict = true;
+	list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
+		nfsd_break_one_deleg(dp);
+	spin_unlock(&recall_lock);
 }
 
-/*
- * Called from setlease() with lock_flocks() held
- */
-static
-int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try)
-{
-	struct nfs4_delegation *onlistd =
-		(struct nfs4_delegation *)onlist->fl_owner;
-	struct nfs4_delegation *tryd =
-		(struct nfs4_delegation *)try->fl_owner;
-
-	if (onlist->fl_lmops != try->fl_lmops)
-		return 0;
-
-	return onlistd->dl_client == tryd->dl_client;
-}
-
-
 static
 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
 {
@@ -2346,8 +2368,6 @@
 
 static const struct lock_manager_operations nfsd_lease_mng_ops = {
 	.fl_break = nfsd_break_deleg_cb,
-	.fl_release_private = nfsd_release_deleg_cb,
-	.fl_mylease = nfsd_same_client_deleg_cb,
 	.fl_change = nfsd_change_deleg_cb,
 };
 
@@ -2427,10 +2447,13 @@
 {
 	struct nfs4_delegation *dp;
 
-	list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) {
-		if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid)
+	spin_lock(&recall_lock);
+	list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
+		if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) {
+			spin_unlock(&recall_lock);
 			return dp;
-	}
+		}
+	spin_unlock(&recall_lock);
 	return NULL;
 }
 
@@ -2514,8 +2537,6 @@
 	if (!fp->fi_fds[oflag]) {
 		status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
 			&fp->fi_fds[oflag]);
-		if (status == nfserr_dropit)
-			status = nfserr_jukebox;
 		if (status)
 			return status;
 	}
@@ -2596,6 +2617,79 @@
 	open->op_stateowner->so_client->cl_firststate = 1;
 }
 
+/* Should we give out recallable state?: */
+static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
+{
+	if (clp->cl_cb_state == NFSD4_CB_UP)
+		return true;
+	/*
+	 * In the sessions case, since we don't have to establish a
+	 * separate connection for callbacks, we assume it's OK
+	 * until we hear otherwise:
+	 */
+	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
+}
+
+static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
+{
+	struct file_lock *fl;
+
+	fl = locks_alloc_lock();
+	if (!fl)
+		return NULL;
+	locks_init_lock(fl);
+	fl->fl_lmops = &nfsd_lease_mng_ops;
+	fl->fl_flags = FL_LEASE;
+	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
+	fl->fl_end = OFFSET_MAX;
+	fl->fl_owner = (fl_owner_t)(dp->dl_file);
+	fl->fl_pid = current->tgid;
+	return fl;
+}
+
+static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
+{
+	struct nfs4_file *fp = dp->dl_file;
+	struct file_lock *fl;
+	int status;
+
+	fl = nfs4_alloc_init_lease(dp, flag);
+	if (!fl)
+		return -ENOMEM;
+	fl->fl_file = find_readable_file(fp);
+	list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
+	status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
+	if (status) {
+		list_del_init(&dp->dl_perclnt);
+		locks_free_lock(fl);
+		return -ENOMEM;
+	}
+	fp->fi_lease = fl;
+	fp->fi_deleg_file = fl->fl_file;
+	get_file(fp->fi_deleg_file);
+	atomic_set(&fp->fi_delegees, 1);
+	list_add(&dp->dl_perfile, &fp->fi_delegations);
+	return 0;
+}
+
+static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
+{
+	struct nfs4_file *fp = dp->dl_file;
+
+	if (!fp->fi_lease)
+		return nfs4_setlease(dp, flag);
+	spin_lock(&recall_lock);
+	if (fp->fi_had_conflict) {
+		spin_unlock(&recall_lock);
+		return -EAGAIN;
+	}
+	atomic_inc(&fp->fi_delegees);
+	list_add(&dp->dl_perfile, &fp->fi_delegations);
+	spin_unlock(&recall_lock);
+	list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
+	return 0;
+}
+
 /*
  * Attempt to hand out a delegation.
  */
@@ -2604,10 +2698,10 @@
 {
 	struct nfs4_delegation *dp;
 	struct nfs4_stateowner *sop = stp->st_stateowner;
-	int cb_up = atomic_read(&sop->so_client->cl_cb_set);
-	struct file_lock *fl;
+	int cb_up;
 	int status, flag = 0;
 
+	cb_up = nfsd4_cb_channel_good(sop->so_client);
 	flag = NFS4_OPEN_DELEGATE_NONE;
 	open->op_recall = 0;
 	switch (open->op_claim_type) {
@@ -2635,36 +2729,11 @@
 	}
 
 	dp = alloc_init_deleg(sop->so_client, stp, fh, flag);
-	if (dp == NULL) {
-		flag = NFS4_OPEN_DELEGATE_NONE;
-		goto out;
-	}
-	status = -ENOMEM;
-	fl = locks_alloc_lock();
-	if (!fl)
-		goto out;
-	locks_init_lock(fl);
-	fl->fl_lmops = &nfsd_lease_mng_ops;
-	fl->fl_flags = FL_LEASE;
-	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
-	fl->fl_end = OFFSET_MAX;
-	fl->fl_owner =  (fl_owner_t)dp;
-	fl->fl_file = find_readable_file(stp->st_file);
-	BUG_ON(!fl->fl_file);
-	fl->fl_pid = current->tgid;
-	dp->dl_flock = fl;
-
-	/* vfs_setlease checks to see if delegation should be handed out.
-	 * the lock_manager callbacks fl_mylease and fl_change are used
-	 */
-	if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
-		dprintk("NFSD: setlease failed [%d], no delegation\n", status);
-		dp->dl_flock = NULL;
-		locks_free_lock(fl);
-		unhash_delegation(dp);
-		flag = NFS4_OPEN_DELEGATE_NONE;
-		goto out;
-	}
+	if (dp == NULL)
+		goto out_no_deleg;
+	status = nfs4_set_delegation(dp, flag);
+	if (status)
+		goto out_free;
 
 	memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
 
@@ -2676,6 +2745,12 @@
 			&& open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
 	open->op_delegate_type = flag;
+	return;
+out_free:
+	nfs4_put_delegation(dp);
+out_no_deleg:
+	flag = NFS4_OPEN_DELEGATE_NONE;
+	goto out;
 }
 
 /*
@@ -2794,7 +2869,7 @@
 	renew_client(clp);
 	status = nfserr_cb_path_down;
 	if (!list_empty(&clp->cl_delegations)
-			&& !atomic_read(&clp->cl_cb_set))
+			&& clp->cl_cb_state != NFSD4_CB_UP)
 		goto out;
 	status = nfs_ok;
 out:
@@ -2870,8 +2945,6 @@
 				test_val = u;
 			break;
 		}
-		dprintk("NFSD: purging unused delegation dp %p, fp %p\n",
-			            dp, dp->dl_flock);
 		list_move(&dp->dl_recall_lru, &reaplist);
 	}
 	spin_unlock(&recall_lock);
@@ -3081,9 +3154,10 @@
 		if (status)
 			goto out;
 		renew_client(dp->dl_client);
-		if (filpp)
-			*filpp = find_readable_file(dp->dl_file);
-		BUG_ON(!*filpp);
+		if (filpp) {
+			*filpp = dp->dl_file->fi_deleg_file;
+			BUG_ON(!*filpp);
+		}
 	} else { /* open or lock stateid */
 		stp = find_stateid(stateid, flags);
 		if (!stp)
@@ -4107,7 +4181,7 @@
 	unsigned int strhashval = clientstr_hashval(name);
 	struct nfs4_client *clp;
 
-	clp = find_confirmed_client_by_str(name, strhashval, use_exchange_id);
+	clp = find_confirmed_client_by_str(name, strhashval);
 	return clp ? 1 : 0;
 }
 
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index f35a94a..615f0a9 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -44,13 +44,14 @@
 #include <linux/namei.h>
 #include <linux/statfs.h>
 #include <linux/utsname.h>
-#include <linux/nfsd_idmap.h>
-#include <linux/nfs4_acl.h>
 #include <linux/sunrpc/svcauth_gss.h>
 
+#include "idmap.h"
+#include "acl.h"
 #include "xdr4.h"
 #include "vfs.h"
 
+
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
 /*
@@ -288,17 +289,17 @@
 			len += XDR_QUADLEN(dummy32) << 2;
 			READMEM(buf, dummy32);
 			ace->whotype = nfs4_acl_get_whotype(buf, dummy32);
-			host_err = 0;
+			status = nfs_ok;
 			if (ace->whotype != NFS4_ACL_WHO_NAMED)
 				ace->who = 0;
 			else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
-				host_err = nfsd_map_name_to_gid(argp->rqstp,
+				status = nfsd_map_name_to_gid(argp->rqstp,
 						buf, dummy32, &ace->who);
 			else
-				host_err = nfsd_map_name_to_uid(argp->rqstp,
+				status = nfsd_map_name_to_uid(argp->rqstp,
 						buf, dummy32, &ace->who);
-			if (host_err)
-				goto out_nfserr;
+			if (status)
+				return status;
 		}
 	} else
 		*acl = NULL;
@@ -316,8 +317,8 @@
 		READ_BUF(dummy32);
 		len += (XDR_QUADLEN(dummy32) << 2);
 		READMEM(buf, dummy32);
-		if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
-			goto out_nfserr;
+		if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
+			return status;
 		iattr->ia_valid |= ATTR_UID;
 	}
 	if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
@@ -327,8 +328,8 @@
 		READ_BUF(dummy32);
 		len += (XDR_QUADLEN(dummy32) << 2);
 		READMEM(buf, dummy32);
-		if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
-			goto out_nfserr;
+		if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
+			return status;
 		iattr->ia_valid |= ATTR_GID;
 	}
 	if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
@@ -420,6 +421,21 @@
 	DECODE_TAIL;
 }
 
+static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
+{
+	DECODE_HEAD;
+	u32 dummy;
+
+	READ_BUF(NFS4_MAX_SESSIONID_LEN + 8);
+	COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
+	READ32(bcts->dir);
+	/* XXX: Perhaps Tom Tucker could help us figure out how we
+	 * should be using ctsa_use_conn_in_rdma_mode: */
+	READ32(dummy);
+
+	DECODE_TAIL;
+}
+
 static __be32
 nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
 {
@@ -847,6 +863,17 @@
 }
 
 static __be32
+nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
+		     struct nfsd4_secinfo_no_name *sin)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(sin->sin_style);
+	DECODE_TAIL;
+}
+
+static __be32
 nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
 {
 	__be32 status;
@@ -1005,7 +1032,7 @@
 nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
 			 struct nfsd4_exchange_id *exid)
 {
-	int dummy;
+	int dummy, tmp;
 	DECODE_HEAD;
 
 	READ_BUF(NFS4_VERIFIER_SIZE);
@@ -1053,15 +1080,23 @@
 
 		/* ssp_hash_algs<> */
 		READ_BUF(4);
-		READ32(dummy);
-		READ_BUF(dummy);
-		p += XDR_QUADLEN(dummy);
+		READ32(tmp);
+		while (tmp--) {
+			READ_BUF(4);
+			READ32(dummy);
+			READ_BUF(dummy);
+			p += XDR_QUADLEN(dummy);
+		}
 
 		/* ssp_encr_algs<> */
 		READ_BUF(4);
-		READ32(dummy);
-		READ_BUF(dummy);
-		p += XDR_QUADLEN(dummy);
+		READ32(tmp);
+		while (tmp--) {
+			READ_BUF(4);
+			READ32(dummy);
+			READ_BUF(dummy);
+			p += XDR_QUADLEN(dummy);
+		}
 
 		/* ssp_window and ssp_num_gss_handles */
 		READ_BUF(8);
@@ -1107,7 +1142,7 @@
 
 	u32 dummy;
 	char *machine_name;
-	int i;
+	int i, j;
 	int nr_secflavs;
 
 	READ_BUF(16);
@@ -1180,7 +1215,7 @@
 			READ_BUF(4);
 			READ32(dummy);
 			READ_BUF(dummy * 4);
-			for (i = 0; i < dummy; ++i)
+			for (j = 0; j < dummy; ++j)
 				READ32(dummy);
 			break;
 		case RPC_AUTH_GSS:
@@ -1339,7 +1374,7 @@
 
 	/* new operations for NFSv4.1 */
 	[OP_BACKCHANNEL_CTL]	= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_notsupp,
+	[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
 	[OP_EXCHANGE_ID]	= (nfsd4_dec)nfsd4_decode_exchange_id,
 	[OP_CREATE_SESSION]	= (nfsd4_dec)nfsd4_decode_create_session,
 	[OP_DESTROY_SESSION]	= (nfsd4_dec)nfsd4_decode_destroy_session,
@@ -1350,7 +1385,7 @@
 	[OP_LAYOUTCOMMIT]	= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_LAYOUTGET]		= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_LAYOUTRETURN]	= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_SECINFO_NO_NAME]	= (nfsd4_dec)nfsd4_decode_notsupp,
+	[OP_SECINFO_NO_NAME]	= (nfsd4_dec)nfsd4_decode_secinfo_no_name,
 	[OP_SEQUENCE]		= (nfsd4_dec)nfsd4_decode_sequence,
 	[OP_SET_SSV]		= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_TEST_STATEID]	= (nfsd4_dec)nfsd4_decode_notsupp,
@@ -2309,8 +2344,6 @@
 	case nfserr_resource:
 		nfserr = nfserr_toosmall;
 		goto fail;
-	case nfserr_dropit:
-		goto fail;
 	case nfserr_noent:
 		goto skip_entry;
 	default:
@@ -2365,6 +2398,21 @@
 	return nfserr;
 }
 
+static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
+{
+	__be32 *p;
+
+	if (!nfserr) {
+		RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 8);
+		WRITEMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
+		WRITE32(bcts->dir);
+		/* XXX: ? */
+		WRITE32(0);
+		ADJUST_ARGS();
+	}
+	return nfserr;
+}
+
 static __be32
 nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
 {
@@ -2826,11 +2874,10 @@
 }
 
 static __be32
-nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
-		     struct nfsd4_secinfo *secinfo)
+nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp,
+			 __be32 nfserr,struct svc_export *exp)
 {
 	int i = 0;
-	struct svc_export *exp = secinfo->si_exp;
 	u32 nflavs;
 	struct exp_flavor_info *flavs;
 	struct exp_flavor_info def_flavs[2];
@@ -2892,6 +2939,20 @@
 	return nfserr;
 }
 
+static __be32
+nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+		     struct nfsd4_secinfo *secinfo)
+{
+	return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->si_exp);
+}
+
+static __be32
+nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
+		     struct nfsd4_secinfo_no_name *secinfo)
+{
+	return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->sin_exp);
+}
+
 /*
  * The SETATTR encode routine is special -- it always encodes a bitmap,
  * regardless of the error status.
@@ -3076,13 +3137,9 @@
 	WRITE32(seq->seqid);
 	WRITE32(seq->slotid);
 	WRITE32(seq->maxslots);
-	/*
-	 * FIXME: for now:
-	 *   target_maxslots = maxslots
-	 *   status_flags = 0
-	 */
+	/* For now: target_maxslots = maxslots */
 	WRITE32(seq->maxslots);
-	WRITE32(0);
+	WRITE32(seq->status_flags);
 
 	ADJUST_ARGS();
 	resp->cstate.datap = p; /* DRC cache data pointer */
@@ -3143,7 +3200,7 @@
 
 	/* NFSv4.1 operations */
 	[OP_BACKCHANNEL_CTL]	= (nfsd4_enc)nfsd4_encode_noop,
-	[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_noop,
+	[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
 	[OP_EXCHANGE_ID]	= (nfsd4_enc)nfsd4_encode_exchange_id,
 	[OP_CREATE_SESSION]	= (nfsd4_enc)nfsd4_encode_create_session,
 	[OP_DESTROY_SESSION]	= (nfsd4_enc)nfsd4_encode_destroy_session,
@@ -3154,7 +3211,7 @@
 	[OP_LAYOUTCOMMIT]	= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_LAYOUTGET]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_LAYOUTRETURN]	= (nfsd4_enc)nfsd4_encode_noop,
-	[OP_SECINFO_NO_NAME]	= (nfsd4_enc)nfsd4_encode_noop,
+	[OP_SECINFO_NO_NAME]	= (nfsd4_enc)nfsd4_encode_secinfo_no_name,
 	[OP_SEQUENCE]		= (nfsd4_enc)nfsd4_encode_sequence,
 	[OP_SET_SSV]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_TEST_STATEID]	= (nfsd4_enc)nfsd4_encode_noop,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 4514ebb..33b3e2b 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -8,12 +8,12 @@
 #include <linux/namei.h>
 #include <linux/ctype.h>
 
-#include <linux/nfsd_idmap.h>
 #include <linux/sunrpc/svcsock.h>
 #include <linux/nfsd/syscall.h>
 #include <linux/lockd/lockd.h>
 #include <linux/sunrpc/clnt.h>
 
+#include "idmap.h"
 #include "nfsd.h"
 #include "cache.h"
 
@@ -127,6 +127,7 @@
 
 static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
 {
+#ifdef CONFIG_NFSD_DEPRECATED
 	static int warned;
 	if (file->f_dentry->d_name.name[0] == '.' && !warned) {
 		printk(KERN_INFO
@@ -135,6 +136,7 @@
 		       current->comm, file->f_dentry->d_name.name);
 		warned = 1;
 	}
+#endif
 	if (! file->private_data) {
 		/* An attempt to read a transaction file without writing
 		 * causes a 0-byte write so that the file can return
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 6b641cf..7ecfa24 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -158,6 +158,7 @@
 #define	nfserr_attrnotsupp	cpu_to_be32(NFSERR_ATTRNOTSUPP)
 #define	nfserr_bad_xdr		cpu_to_be32(NFSERR_BAD_XDR)
 #define	nfserr_openmode		cpu_to_be32(NFSERR_OPENMODE)
+#define	nfserr_badowner		cpu_to_be32(NFSERR_BADOWNER)
 #define	nfserr_locks_held	cpu_to_be32(NFSERR_LOCKS_HELD)
 #define	nfserr_op_illegal	cpu_to_be32(NFSERR_OP_ILLEGAL)
 #define	nfserr_grace		cpu_to_be32(NFSERR_GRACE)
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 08e1726..e15dc45 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -735,9 +735,9 @@
 		{ nfserr_stale, -ESTALE },
 		{ nfserr_jukebox, -ETIMEDOUT },
 		{ nfserr_jukebox, -ERESTARTSYS },
-		{ nfserr_dropit, -EAGAIN },
-		{ nfserr_dropit, -ENOMEM },
-		{ nfserr_badname, -ESRCH },
+		{ nfserr_jukebox, -EAGAIN },
+		{ nfserr_jukebox, -EWOULDBLOCK },
+		{ nfserr_jukebox, -ENOMEM },
 		{ nfserr_io, -ETXTBSY },
 		{ nfserr_notsupp, -EOPNOTSUPP },
 		{ nfserr_toosmall, -ETOOSMALL },
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 2bae1d8..18743c4 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -608,7 +608,7 @@
 	/* Now call the procedure handler, and encode NFS status. */
 	nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
 	nfserr = map_new_errors(rqstp->rq_vers, nfserr);
-	if (nfserr == nfserr_dropit) {
+	if (nfserr == nfserr_dropit || rqstp->rq_dropme) {
 		dprintk("nfsd: Dropping request; may be revisited later\n");
 		nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
 		return 0;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 39adc27..2d31224 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -68,10 +68,12 @@
 struct nfsd4_callback {
 	void *cb_op;
 	struct nfs4_client *cb_clp;
+	struct list_head cb_per_client;
 	u32 cb_minorversion;
 	struct rpc_message cb_msg;
 	const struct rpc_call_ops *cb_ops;
 	struct work_struct cb_work;
+	bool cb_done;
 };
 
 struct nfs4_delegation {
@@ -81,7 +83,6 @@
 	atomic_t		dl_count;       /* ref count */
 	struct nfs4_client	*dl_client;
 	struct nfs4_file	*dl_file;
-	struct file_lock	*dl_flock;
 	u32			dl_type;
 	time_t			dl_time;
 /* For recall: */
@@ -95,6 +96,7 @@
 struct nfs4_cb_conn {
 	/* SETCLIENTID info */
 	struct sockaddr_storage	cb_addr;
+	struct sockaddr_storage	cb_saddr;
 	size_t			cb_addrlen;
 	u32                     cb_prog; /* used only in 4.0 case;
 					    per-session otherwise */
@@ -146,6 +148,11 @@
 	u32				gid;
 };
 
+struct nfsd4_bind_conn_to_session {
+	struct nfs4_sessionid		sessionid;
+	u32				dir;
+};
+
 /* The single slot clientid cache structure */
 struct nfsd4_clid_slot {
 	u32				sl_seqid;
@@ -235,9 +242,13 @@
 	unsigned long		cl_cb_flags;
 	struct rpc_clnt		*cl_cb_client;
 	u32			cl_cb_ident;
-	atomic_t		cl_cb_set;
+#define NFSD4_CB_UP		0
+#define NFSD4_CB_UNKNOWN	1
+#define NFSD4_CB_DOWN		2
+	int			cl_cb_state;
 	struct nfsd4_callback	cl_cb_null;
 	struct nfsd4_session	*cl_cb_session;
+	struct list_head	cl_callbacks; /* list of in-progress callbacks */
 
 	/* for all client information that callback code might need: */
 	spinlock_t		cl_lock;
@@ -366,6 +377,9 @@
 	 */
 	atomic_t		fi_readers;
 	atomic_t		fi_writers;
+	struct file		*fi_deleg_file;
+	struct file_lock	*fi_lease;
+	atomic_t		fi_delegees;
 	struct inode		*fi_inode;
 	u32                     fi_id;      /* used with stateowner->so_id 
 					     * for stateid_hashtbl hash */
@@ -454,6 +468,7 @@
 extern void nfs4_free_stateowner(struct kref *kref);
 extern int set_callback_cred(void);
 extern void nfsd4_probe_callback(struct nfs4_client *clp);
+extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
 extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
 extern void nfsd4_do_callback_rpc(struct work_struct *);
 extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 3a35902..da1d970 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1,4 +1,3 @@
-#define MSNFS	/* HACK HACK */
 /*
  * File operations used by nfsd. Some of these have been ripped from
  * other parts of the kernel because they weren't exported, others
@@ -35,8 +34,8 @@
 #endif /* CONFIG_NFSD_V3 */
 
 #ifdef CONFIG_NFSD_V4
-#include <linux/nfs4_acl.h>
-#include <linux/nfsd_idmap.h>
+#include "acl.h"
+#include "idmap.h"
 #endif /* CONFIG_NFSD_V4 */
 
 #include "nfsd.h"
@@ -88,8 +87,9 @@
 			    .dentry = dget(dentry)};
 	int err = 0;
 
-	while (d_mountpoint(path.dentry) && follow_down(&path))
-		;
+	err = follow_down(&path, false);
+	if (err < 0)
+		goto out;
 
 	exp2 = rqst_exp_get_by_name(rqstp, &path);
 	if (IS_ERR(exp2)) {
@@ -273,6 +273,13 @@
 	return err;
 }
 
+static int nfsd_break_lease(struct inode *inode)
+{
+	if (!S_ISREG(inode->i_mode))
+		return 0;
+	return break_lease(inode, O_WRONLY | O_NONBLOCK);
+}
+
 /*
  * Commit metadata changes to stable storage.
  */
@@ -375,16 +382,6 @@
 				goto out;
 		}
 
-		/*
-		 * If we are changing the size of the file, then
-		 * we need to break all leases.
-		 */
-		host_err = break_lease(inode, O_WRONLY | O_NONBLOCK);
-		if (host_err == -EWOULDBLOCK)
-			host_err = -ETIMEDOUT;
-		if (host_err) /* ENOMEM or EWOULDBLOCK */
-			goto out_nfserr;
-
 		host_err = get_write_access(inode);
 		if (host_err)
 			goto out_nfserr;
@@ -425,7 +422,11 @@
 
 	err = nfserr_notsync;
 	if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+		host_err = nfsd_break_lease(inode);
+		if (host_err)
+			goto out_nfserr;
 		fh_lock(fhp);
+
 		host_err = notify_change(dentry, iap);
 		err = nfserrno(host_err);
 		fh_unlock(fhp);
@@ -752,8 +753,6 @@
 	 */
 	if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
 		host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
-	if (host_err == -EWOULDBLOCK)
-		host_err = -ETIMEDOUT;
 	if (host_err) /* NOMEM or WOULDBLOCK */
 		goto out_nfserr;
 
@@ -809,7 +808,7 @@
 		if (ra->p_count == 0)
 			frap = rap;
 	}
-	depth = nfsdstats.ra_size*11/10;
+	depth = nfsdstats.ra_size;
 	if (!frap) {	
 		spin_unlock(&rab->pb_lock);
 		return NULL;
@@ -845,11 +844,6 @@
 	struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
 	struct page *page = buf->page;
 	size_t size;
-	int ret;
-
-	ret = buf->ops->confirm(pipe, buf);
-	if (unlikely(ret))
-		return ret;
 
 	size = sd->len;
 
@@ -879,15 +873,6 @@
 	return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
 }
 
-static inline int svc_msnfs(struct svc_fh *ffhp)
-{
-#ifdef MSNFS
-	return (ffhp->fh_export->ex_flags & NFSEXP_MSNFS);
-#else
-	return 0;
-#endif
-}
-
 static __be32
 nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
               loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
@@ -900,9 +885,6 @@
 	err = nfserr_perm;
 	inode = file->f_path.dentry->d_inode;
 
-	if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count))
-		goto out;
-
 	if (file->f_op->splice_read && rqstp->rq_splice_ok) {
 		struct splice_desc sd = {
 			.len		= 0,
@@ -927,7 +909,6 @@
 		fsnotify_access(file);
 	} else 
 		err = nfserrno(host_err);
-out:
 	return err;
 }
 
@@ -992,14 +973,6 @@
 	int			stable = *stablep;
 	int			use_wgather;
 
-#ifdef MSNFS
-	err = nfserr_perm;
-
-	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
-		(!lock_may_write(file->f_path.dentry->d_inode, offset, *cnt)))
-		goto out;
-#endif
-
 	dentry = file->f_path.dentry;
 	inode = dentry->d_inode;
 	exp   = fhp->fh_export;
@@ -1050,7 +1023,6 @@
 		err = 0;
 	else
 		err = nfserrno(host_err);
-out:
 	return err;
 }
 
@@ -1670,6 +1642,12 @@
 		err = nfserrno(host_err);
 		goto out_dput;
 	}
+	err = nfserr_noent;
+	if (!dold->d_inode)
+		goto out_drop_write;
+	host_err = nfsd_break_lease(dold->d_inode);
+	if (host_err)
+		goto out_drop_write;
 	host_err = vfs_link(dold, dirp, dnew);
 	if (!host_err) {
 		err = nfserrno(commit_metadata(ffhp));
@@ -1681,6 +1659,7 @@
 		else
 			err = nfserrno(host_err);
 	}
+out_drop_write:
 	mnt_drop_write(tfhp->fh_export->ex_path.mnt);
 out_dput:
 	dput(dnew);
@@ -1755,12 +1734,6 @@
 	if (ndentry == trap)
 		goto out_dput_new;
 
-	if (svc_msnfs(ffhp) &&
-		((odentry->d_count > 1) || (ndentry->d_count > 1))) {
-			host_err = -EPERM;
-			goto out_dput_new;
-	}
-
 	host_err = -EXDEV;
 	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
 		goto out_dput_new;
@@ -1768,15 +1741,24 @@
 	if (host_err)
 		goto out_dput_new;
 
+	host_err = nfsd_break_lease(odentry->d_inode);
+	if (host_err)
+		goto out_drop_write;
+	if (ndentry->d_inode) {
+		host_err = nfsd_break_lease(ndentry->d_inode);
+		if (host_err)
+			goto out_drop_write;
+	}
+	if (host_err)
+		goto out_drop_write;
 	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
 	if (!host_err) {
 		host_err = commit_metadata(tfhp);
 		if (!host_err)
 			host_err = commit_metadata(ffhp);
 	}
-
+out_drop_write:
 	mnt_drop_write(ffhp->fh_export->ex_path.mnt);
-
  out_dput_new:
 	dput(ndentry);
  out_dput_old:
@@ -1837,26 +1819,22 @@
 
 	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
 	if (host_err)
-		goto out_nfserr;
+		goto out_put;
 
-	if (type != S_IFDIR) { /* It's UNLINK */
-#ifdef MSNFS
-		if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
-			(rdentry->d_count > 1)) {
-			host_err = -EPERM;
-		} else
-#endif
+	host_err = nfsd_break_lease(rdentry->d_inode);
+	if (host_err)
+		goto out_drop_write;
+	if (type != S_IFDIR)
 		host_err = vfs_unlink(dirp, rdentry);
-	} else { /* It's RMDIR */
+	else
 		host_err = vfs_rmdir(dirp, rdentry);
-	}
-
-	dput(rdentry);
-
 	if (!host_err)
 		host_err = commit_metadata(fhp);
-
+out_drop_write:
 	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+out_put:
+	dput(rdentry);
+
 out_nfserr:
 	err = nfserrno(host_err);
 out:
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 60fce3d..366401e 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -311,6 +311,11 @@
 	struct svc_export *si_exp;			/* response */
 };
 
+struct nfsd4_secinfo_no_name {
+	u32 sin_style;					/* request */
+	struct svc_export *sin_exp;			/* response */
+};
+
 struct nfsd4_setattr {
 	stateid_t	sa_stateid;         /* request */
 	u32		sa_bmval[3];        /* request */
@@ -373,8 +378,8 @@
 	u32			cachethis;		/* request */
 #if 0
 	u32			target_maxslots;	/* response */
-	u32			status_flags;		/* response */
 #endif /* not yet */
+	u32			status_flags;		/* response */
 };
 
 struct nfsd4_destroy_session {
@@ -422,6 +427,7 @@
 
 		/* NFSv4.1 */
 		struct nfsd4_exchange_id	exchange_id;
+		struct nfsd4_bind_conn_to_session bind_conn_to_session;
 		struct nfsd4_create_session	create_session;
 		struct nfsd4_destroy_session	destroy_session;
 		struct nfsd4_sequence		sequence;
@@ -518,6 +524,7 @@
 		struct nfsd4_sequence *seq);
 extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
 		struct nfsd4_compound_state *, struct nfsd4_exchange_id *);
+extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_bind_conn_to_session *);
 extern __be32 nfsd4_create_session(struct svc_rqst *,
 		struct nfsd4_compound_state *,
 		struct nfsd4_create_session *);
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 388e9e8..85f7baa 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -35,11 +35,6 @@
 #include "btnode.h"
 
 
-void nilfs_btnode_cache_init_once(struct address_space *btnc)
-{
-	nilfs_mapping_init_once(btnc);
-}
-
 static const struct address_space_operations def_btnode_aops = {
 	.sync_page		= block_sync_page,
 };
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 7903749..1b8ebd8 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -37,7 +37,6 @@
 	struct buffer_head *newbh;
 };
 
-void nilfs_btnode_cache_init_once(struct address_space *);
 void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
 void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 6a0e2a1..a0babd2 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -454,9 +454,9 @@
 	struct backing_dev_info *bdi = inode->i_sb->s_bdi;
 
 	INIT_LIST_HEAD(&shadow->frozen_buffers);
-	nilfs_mapping_init_once(&shadow->frozen_data);
+	address_space_init_once(&shadow->frozen_data);
 	nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
-	nilfs_mapping_init_once(&shadow->frozen_btnodes);
+	address_space_init_once(&shadow->frozen_btnodes);
 	nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
 	mi->mi_shadow = shadow;
 	return 0;
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 9803427..161791d 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -397,7 +397,6 @@
 		new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
 		if (!new_de)
 			goto out_dir;
-		inc_nlink(old_inode);
 		nilfs_set_link(new_dir, new_de, new_page, old_inode);
 		nilfs_mark_inode_dirty(new_dir);
 		new_inode->i_ctime = CURRENT_TIME;
@@ -411,13 +410,9 @@
 			if (new_dir->i_nlink >= NILFS_LINK_MAX)
 				goto out_dir;
 		}
-		inc_nlink(old_inode);
 		err = nilfs_add_link(new_dentry, old_inode);
-		if (err) {
-			drop_nlink(old_inode);
-			nilfs_mark_inode_dirty(old_inode);
+		if (err)
 			goto out_dir;
-		}
 		if (dir_de) {
 			inc_nlink(new_dir);
 			nilfs_mark_inode_dirty(new_dir);
@@ -431,7 +426,6 @@
 	old_inode->i_ctime = CURRENT_TIME;
 
 	nilfs_delete_entry(old_de, old_page);
-	drop_nlink(old_inode);
 
 	if (dir_de) {
 		nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 0c43241..a585b35 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -492,19 +492,6 @@
 	return nc;
 }
 
-void nilfs_mapping_init_once(struct address_space *mapping)
-{
-	memset(mapping, 0, sizeof(*mapping));
-	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
-	spin_lock_init(&mapping->tree_lock);
-	INIT_LIST_HEAD(&mapping->private_list);
-	spin_lock_init(&mapping->private_lock);
-
-	spin_lock_init(&mapping->i_mmap_lock);
-	INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
-	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
-}
-
 void nilfs_mapping_init(struct address_space *mapping,
 			struct backing_dev_info *bdi,
 			const struct address_space_operations *aops)
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 622df27..2a00953 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -61,7 +61,6 @@
 int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
 void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 void nilfs_clear_dirty_pages(struct address_space *);
-void nilfs_mapping_init_once(struct address_space *mapping);
 void nilfs_mapping_init(struct address_space *mapping,
 			struct backing_dev_info *bdi,
 			const struct address_space_operations *aops);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 55ebae5..2de9f63 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -430,7 +430,8 @@
 	nilfs_segctor_map_segsum_entry(
 		sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
 
-	if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
+	if (NILFS_I(inode)->i_root &&
+	    !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
 		set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
 	/* skip finfo */
 }
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 70dfdd5..1673b3d 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -704,7 +704,8 @@
 	sbp[0]->s_state =
 		cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
 	/* synchronize sbp[1] with sbp[0] */
-	memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
+	if (sbp[1])
+		memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
 	return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
 }
 
@@ -1163,14 +1164,14 @@
 {
 	struct nilfs_super_data sd;
 	struct super_block *s;
-	fmode_t mode = FMODE_READ;
+	fmode_t mode = FMODE_READ | FMODE_EXCL;
 	struct dentry *root_dentry;
 	int err, s_new = false;
 
 	if (!(flags & MS_RDONLY))
 		mode |= FMODE_WRITE;
 
-	sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type);
+	sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
 	if (IS_ERR(sd.bdev))
 		return ERR_CAST(sd.bdev);
 
@@ -1249,7 +1250,7 @@
 	}
 
 	if (!s_new)
-		close_bdev_exclusive(sd.bdev, mode);
+		blkdev_put(sd.bdev, mode);
 
 	return root_dentry;
 
@@ -1258,7 +1259,7 @@
 
  failed:
 	if (!s_new)
-		close_bdev_exclusive(sd.bdev, mode);
+		blkdev_put(sd.bdev, mode);
 	return ERR_PTR(err);
 }
 
@@ -1278,7 +1279,7 @@
 #ifdef CONFIG_NILFS_XATTR
 	init_rwsem(&ii->xattr_sem);
 #endif
-	nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
+	address_space_init_once(&ii->i_btnode_cache);
 	ii->i_bmap = &ii->i_bmap_data;
 	inode_init_once(&ii->vfs_inode);
 }
diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig
index 3ac36b7b..7dceff0 100644
--- a/fs/notify/fanotify/Kconfig
+++ b/fs/notify/fanotify/Kconfig
@@ -6,7 +6,7 @@
 	---help---
 	   Say Y here to enable fanotify suport.  fanotify is a file access
 	   notification system which differs from inotify in that it sends
-	   and open file descriptor to the userspace listener along with
+	   an open file descriptor to the userspace listener along with
 	   the event.
 
 	   If unsure, say Y.
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index 58b6be9..4ff028f 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -6,7 +6,7 @@
 	     index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
 	     unistr.o upcase.o
 
-EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\"
+EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.30\"
 
 ifeq ($(CONFIG_NTFS_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 113ebd9..f4b1057 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1,7 +1,7 @@
 /*
  * file.c - NTFS kernel file operations.  Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
  *
  * This program/include file is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as published
@@ -1380,15 +1380,14 @@
  * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
  * single-segment behaviour.
  *
- * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
- * when atomic and when not atomic.  This is ok because
- * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
- * and it is ok to call this when non-atomic.
- * Infact, the only difference between __copy_from_user_inatomic() and
+ * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
+ * atomic and when not atomic.  This is ok because it calls
+ * __copy_from_user_inatomic() and it is ok to call this when non-atomic.  In
+ * fact, the only difference between __copy_from_user_inatomic() and
  * __copy_from_user() is that the latter calls might_sleep() and the former
- * should not zero the tail of the buffer on error.  And on many
- * architectures __copy_from_user_inatomic() is just defined to
- * __copy_from_user() so it makes no difference at all on those architectures.
+ * should not zero the tail of the buffer on error.  And on many architectures
+ * __copy_from_user_inatomic() is just defined to __copy_from_user() so it
+ * makes no difference at all on those architectures.
  */
 static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
 		unsigned nr_pages, unsigned ofs, const struct iovec **iov,
@@ -1409,28 +1408,28 @@
 		if (unlikely(copied != len)) {
 			/* Do it the slow way. */
 			addr = kmap(*pages);
-			copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
-					*iov, *iov_ofs, len);
-			/*
-			 * Zero the rest of the target like __copy_from_user().
-			 */
-			memset(addr + ofs + copied, 0, len - copied);
-			kunmap(*pages);
+			copied = __ntfs_copy_from_user_iovec_inatomic(addr +
+					ofs, *iov, *iov_ofs, len);
 			if (unlikely(copied != len))
 				goto err_out;
+			kunmap(*pages);
 		}
 		total += len;
+		ntfs_set_next_iovec(iov, iov_ofs, len);
 		bytes -= len;
 		if (!bytes)
 			break;
-		ntfs_set_next_iovec(iov, iov_ofs, len);
 		ofs = 0;
 	} while (++pages < last_page);
 out:
 	return total;
 err_out:
-	total += copied;
+	BUG_ON(copied > len);
 	/* Zero the rest of the target like __copy_from_user(). */
+	memset(addr + ofs + copied, 0, len - copied);
+	kunmap(*pages);
+	total += copied;
+	ntfs_set_next_iovec(iov, iov_ofs, copied);
 	while (++pages < last_page) {
 		bytes -= len;
 		if (!bytes)
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index b572b67..326e747 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -1,7 +1,7 @@
 /**
  * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
  * Copyright (c) 2002 Richard Russon
  *
  * This program/include file is free software; you can redistribute it and/or
@@ -2576,6 +2576,8 @@
 	flush_dcache_page(page);
 	SetPageUptodate(page);
 	if (base_ni) {
+		MFT_RECORD *m_tmp;
+
 		/*
 		 * Setup the base mft record in the extent mft record.  This
 		 * completes initialization of the allocated extent mft record
@@ -2588,11 +2590,11 @@
 		 * attach it to the base inode @base_ni and map, pin, and lock
 		 * its, i.e. the allocated, mft record.
 		 */
-		m = map_extent_mft_record(base_ni, bit, &ni);
-		if (IS_ERR(m)) {
+		m_tmp = map_extent_mft_record(base_ni, bit, &ni);
+		if (IS_ERR(m_tmp)) {
 			ntfs_error(vol->sb, "Failed to map allocated extent "
 					"mft record 0x%llx.", (long long)bit);
-			err = PTR_ERR(m);
+			err = PTR_ERR(m_tmp);
 			/* Set the mft record itself not in use. */
 			m->flags &= cpu_to_le16(
 					~le16_to_cpu(MFT_RECORD_IN_USE));
@@ -2603,6 +2605,7 @@
 			ntfs_unmap_page(page);
 			goto undo_mftbmp_alloc;
 		}
+		BUG_ON(m != m_tmp);
 		/*
 		 * Make sure the allocated mft record is written out to disk.
 		 * No need to set the inode dirty because the caller is going
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index a30ecacc..29099a0 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1,7 +1,7 @@
 /*
  * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
  * Copyright (c) 2001,2002 Richard Russon
  *
  * This program/include file is free software; you can redistribute it and/or
@@ -3193,8 +3193,8 @@
 	ntfs_sysctl(0);
 }
 
-MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>");
-MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2007 Anton Altaparmakov");
+MODULE_AUTHOR("Anton Altaparmakov <anton@tuxera.com>");
+MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.");
 MODULE_VERSION(NTFS_VERSION);
 MODULE_LICENSE("GPL");
 #ifdef DEBUG
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
index 0d84066..77a8de5 100644
--- a/fs/ocfs2/Kconfig
+++ b/fs/ocfs2/Kconfig
@@ -1,7 +1,6 @@
 config OCFS2_FS
 	tristate "OCFS2 file system support"
-	depends on NET && SYSFS
-	select CONFIGFS_FS
+	depends on NET && SYSFS && CONFIGFS_FS
 	select JBD2
 	select CRC32
 	select QUOTA
@@ -51,7 +50,7 @@
 
 config OCFS2_FS_STATS
 	bool "OCFS2 statistics"
-	depends on OCFS2_FS
+	depends on OCFS2_FS && DEBUG_FS
 	default y
 	help
 	  This option allows some fs statistics to be captured. Enabling
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 592fae5..e4984e2 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -565,7 +565,6 @@
 	return ret;
 }
 
-static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
 static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
 					 struct ocfs2_extent_block *eb);
 static void ocfs2_adjust_rightmost_records(handle_t *handle,
@@ -5858,6 +5857,7 @@
 
 	ocfs2_journal_dirty(handle, tl_bh);
 
+	osb->truncated_clusters += num_clusters;
 bail:
 	mlog_exit(status);
 	return status;
@@ -5929,6 +5929,8 @@
 		i--;
 	}
 
+	osb->truncated_clusters = 0;
+
 bail:
 	mlog_exit(status);
 	return status;
@@ -7139,64 +7141,6 @@
 }
 
 /*
- * Expects the inode to already be locked.
- */
-int ocfs2_prepare_truncate(struct ocfs2_super *osb,
-			   struct inode *inode,
-			   struct buffer_head *fe_bh,
-			   struct ocfs2_truncate_context **tc)
-{
-	int status;
-	unsigned int new_i_clusters;
-	struct ocfs2_dinode *fe;
-	struct ocfs2_extent_block *eb;
-	struct buffer_head *last_eb_bh = NULL;
-
-	mlog_entry_void();
-
-	*tc = NULL;
-
-	new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
-						  i_size_read(inode));
-	fe = (struct ocfs2_dinode *) fe_bh->b_data;
-
-	mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
-	     "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters,
-	     (unsigned long long)le64_to_cpu(fe->i_size));
-
-	*tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
-	if (!(*tc)) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-	ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
-
-	if (fe->id2.i_list.l_tree_depth) {
-		status = ocfs2_read_extent_block(INODE_CACHE(inode),
-						 le64_to_cpu(fe->i_last_eb_blk),
-						 &last_eb_bh);
-		if (status < 0) {
-			mlog_errno(status);
-			goto bail;
-		}
-		eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
-	}
-
-	(*tc)->tc_last_eb_bh = last_eb_bh;
-
-	status = 0;
-bail:
-	if (status < 0) {
-		if (*tc)
-			ocfs2_free_truncate_context(*tc);
-		*tc = NULL;
-	}
-	mlog_exit_void();
-	return status;
-}
-
-/*
  * 'start' is inclusive, 'end' is not.
  */
 int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
@@ -7270,18 +7214,3 @@
 out:
 	return ret;
 }
-
-static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
-{
-	/*
-	 * The caller is responsible for completing deallocation
-	 * before freeing the context.
-	 */
-	if (tc->tc_dealloc.c_first_suballocator != NULL)
-		mlog(ML_NOTICE,
-		     "Truncate completion has non-empty dealloc context\n");
-
-	brelse(tc->tc_last_eb_bh);
-
-	kfree(tc);
-}
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 55762b5..3bd08a0 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -228,10 +228,6 @@
 
 int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
 				  u64 range_start, u64 range_end);
-int ocfs2_prepare_truncate(struct ocfs2_super *osb,
-			   struct inode *inode,
-			   struct buffer_head *fe_bh,
-			   struct ocfs2_truncate_context **tc);
 int ocfs2_commit_truncate(struct ocfs2_super *osb,
 			  struct inode *inode,
 			  struct buffer_head *di_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0d7c554..1fbb0e2 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1630,6 +1630,43 @@
 	return ret;
 }
 
+/*
+ * Try to flush truncate logs if we can free enough clusters from it.
+ * As for return value, "< 0" means error, "0" no space and "1" means
+ * we have freed enough spaces and let the caller try to allocate again.
+ */
+static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
+					  unsigned int needed)
+{
+	tid_t target;
+	int ret = 0;
+	unsigned int truncated_clusters;
+
+	mutex_lock(&osb->osb_tl_inode->i_mutex);
+	truncated_clusters = osb->truncated_clusters;
+	mutex_unlock(&osb->osb_tl_inode->i_mutex);
+
+	/*
+	 * Check whether we can succeed in allocating if we free
+	 * the truncate log.
+	 */
+	if (truncated_clusters < needed)
+		goto out;
+
+	ret = ocfs2_flush_truncate_log(osb);
+	if (ret) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
+		jbd2_log_wait_commit(osb->journal->j_journal, target);
+		ret = 1;
+	}
+out:
+	return ret;
+}
+
 int ocfs2_write_begin_nolock(struct file *filp,
 			     struct address_space *mapping,
 			     loff_t pos, unsigned len, unsigned flags,
@@ -1637,7 +1674,7 @@
 			     struct buffer_head *di_bh, struct page *mmap_page)
 {
 	int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
-	unsigned int clusters_to_alloc, extents_to_split;
+	unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
 	struct ocfs2_write_ctxt *wc;
 	struct inode *inode = mapping->host;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -1646,7 +1683,9 @@
 	struct ocfs2_alloc_context *meta_ac = NULL;
 	handle_t *handle;
 	struct ocfs2_extent_tree et;
+	int try_free = 1, ret1;
 
+try_again:
 	ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
 	if (ret) {
 		mlog_errno(ret);
@@ -1681,6 +1720,7 @@
 		mlog_errno(ret);
 		goto out;
 	} else if (ret == 1) {
+		clusters_need = wc->w_clen;
 		ret = ocfs2_refcount_cow(inode, filp, di_bh,
 					 wc->w_cpos, wc->w_clen, UINT_MAX);
 		if (ret) {
@@ -1695,6 +1735,7 @@
 		mlog_errno(ret);
 		goto out;
 	}
+	clusters_need += clusters_to_alloc;
 
 	di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
 
@@ -1817,6 +1858,22 @@
 		ocfs2_free_alloc_context(data_ac);
 	if (meta_ac)
 		ocfs2_free_alloc_context(meta_ac);
+
+	if (ret == -ENOSPC && try_free) {
+		/*
+		 * Try to free some truncate log so that we can have enough
+		 * clusters to allocate.
+		 */
+		try_free = 0;
+
+		ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
+		if (ret1 == 1)
+			goto try_again;
+
+		if (ret1 < 0)
+			mlog_errno(ret1);
+	}
+
 	return ret;
 }
 
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9e3d45b..b108e86 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -82,6 +82,7 @@
 #define O2HB_DB_TYPE_REGION_LIVENODES	4
 #define O2HB_DB_TYPE_REGION_NUMBER	5
 #define O2HB_DB_TYPE_REGION_ELAPSED_TIME	6
+#define O2HB_DB_TYPE_REGION_PINNED	7
 struct o2hb_debug_buf {
 	int db_type;
 	int db_size;
@@ -101,6 +102,7 @@
 #define O2HB_DEBUG_FAILEDREGIONS	"failed_regions"
 #define O2HB_DEBUG_REGION_NUMBER	"num"
 #define O2HB_DEBUG_REGION_ELAPSED_TIME	"elapsed_time_in_ms"
+#define O2HB_DEBUG_REGION_PINNED	"pinned"
 
 static struct dentry *o2hb_debug_dir;
 static struct dentry *o2hb_debug_livenodes;
@@ -132,6 +134,33 @@
 unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
 unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
 
+/*
+ * o2hb_dependent_users tracks the number of registered callbacks that depend
+ * on heartbeat. o2net and o2dlm are two entities that register this callback.
+ * However only o2dlm depends on the heartbeat. It does not want the heartbeat
+ * to stop while a dlm domain is still active.
+ */
+unsigned int o2hb_dependent_users;
+
+/*
+ * In global heartbeat mode, all regions are pinned if there are one or more
+ * dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All
+ * regions are unpinned if the region count exceeds the cut off or the number
+ * of dependent users falls to zero.
+ */
+#define O2HB_PIN_CUT_OFF		3
+
+/*
+ * In local heartbeat mode, we assume the dlm domain name to be the same as
+ * region uuid. This is true for domains created for the file system but not
+ * necessarily true for userdlm domains. This is a known limitation.
+ *
+ * In global heartbeat mode, we pin/unpin all o2hb regions. This solution
+ * works for both file system and userdlm domains.
+ */
+static int o2hb_region_pin(const char *region_uuid);
+static void o2hb_region_unpin(const char *region_uuid);
+
 /* Only sets a new threshold if there are no active regions.
  *
  * No locking or otherwise interesting code is required for reading
@@ -186,7 +215,9 @@
 	struct config_item	hr_item;
 
 	struct list_head	hr_all_item;
-	unsigned		hr_unclean_stop:1;
+	unsigned		hr_unclean_stop:1,
+				hr_item_pinned:1,
+				hr_item_dropped:1;
 
 	/* protected by the hr_callback_sem */
 	struct task_struct 	*hr_task;
@@ -212,9 +243,11 @@
 	struct dentry		*hr_debug_livenodes;
 	struct dentry		*hr_debug_regnum;
 	struct dentry		*hr_debug_elapsed_time;
+	struct dentry		*hr_debug_pinned;
 	struct o2hb_debug_buf	*hr_db_livenodes;
 	struct o2hb_debug_buf	*hr_db_regnum;
 	struct o2hb_debug_buf	*hr_db_elapsed_time;
+	struct o2hb_debug_buf	*hr_db_pinned;
 
 	/* let the person setting up hb wait for it to return until it
 	 * has reached a 'steady' state.  This will be fixed when we have
@@ -701,6 +734,14 @@
 	       config_item_name(&reg->hr_item));
 
 	set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
+
+	/*
+	 * If global heartbeat active, unpin all regions if the
+	 * region count > CUT_OFF
+	 */
+	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+			   O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
+		o2hb_region_unpin(NULL);
 }
 
 static int o2hb_check_slot(struct o2hb_region *reg,
@@ -1041,6 +1082,9 @@
 
 	set_user_nice(current, -20);
 
+	/* Pin node */
+	o2nm_depend_this_node();
+
 	while (!kthread_should_stop() && !reg->hr_unclean_stop) {
 		/* We track the time spent inside
 		 * o2hb_do_disk_heartbeat so that we avoid more than
@@ -1090,6 +1134,9 @@
 		mlog_errno(ret);
 	}
 
+	/* Unpin node */
+	o2nm_undepend_this_node();
+
 	mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
 
 	return 0;
@@ -1142,6 +1189,12 @@
 						 reg->hr_last_timeout_start));
 		goto done;
 
+	case O2HB_DB_TYPE_REGION_PINNED:
+		reg = (struct o2hb_region *)db->db_data;
+		out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
+				!!reg->hr_item_pinned);
+		goto done;
+
 	default:
 		goto done;
 	}
@@ -1315,6 +1368,8 @@
 	memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap));
 	memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap));
 
+	o2hb_dependent_users = 0;
+
 	return o2hb_debug_init();
 }
 
@@ -1384,6 +1439,7 @@
 	debugfs_remove(reg->hr_debug_livenodes);
 	debugfs_remove(reg->hr_debug_regnum);
 	debugfs_remove(reg->hr_debug_elapsed_time);
+	debugfs_remove(reg->hr_debug_pinned);
 	debugfs_remove(reg->hr_debug_dir);
 
 	spin_lock(&o2hb_live_lock);
@@ -1673,7 +1729,7 @@
 		goto out;
 
 	reg->hr_bdev = I_BDEV(filp->f_mapping->host);
-	ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ);
+	ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL);
 	if (ret) {
 		reg->hr_bdev = NULL;
 		goto out;
@@ -1948,6 +2004,18 @@
 		goto bail;
 	}
 
+	reg->hr_debug_pinned =
+			o2hb_debug_create(O2HB_DEBUG_REGION_PINNED,
+					  reg->hr_debug_dir,
+					  &(reg->hr_db_pinned),
+					  sizeof(*(reg->hr_db_pinned)),
+					  O2HB_DB_TYPE_REGION_PINNED,
+					  0, 0, reg);
+	if (!reg->hr_debug_pinned) {
+		mlog_errno(ret);
+		goto bail;
+	}
+
 	ret = 0;
 bail:
 	return ret;
@@ -2002,15 +2070,20 @@
 {
 	struct task_struct *hb_task;
 	struct o2hb_region *reg = to_o2hb_region(item);
+	int quorum_region = 0;
 
 	/* stop the thread when the user removes the region dir */
 	spin_lock(&o2hb_live_lock);
 	if (o2hb_global_heartbeat_active()) {
 		clear_bit(reg->hr_region_num, o2hb_region_bitmap);
 		clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
+		if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+			quorum_region = 1;
+		clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
 	}
 	hb_task = reg->hr_task;
 	reg->hr_task = NULL;
+	reg->hr_item_dropped = 1;
 	spin_unlock(&o2hb_live_lock);
 
 	if (hb_task)
@@ -2028,7 +2101,27 @@
 	if (o2hb_global_heartbeat_active())
 		printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
 		       config_item_name(&reg->hr_item));
+
 	config_item_put(item);
+
+	if (!o2hb_global_heartbeat_active() || !quorum_region)
+		return;
+
+	/*
+	 * If global heartbeat active and there are dependent users,
+	 * pin all regions if quorum region count <= CUT_OFF
+	 */
+	spin_lock(&o2hb_live_lock);
+
+	if (!o2hb_dependent_users)
+		goto unlock;
+
+	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+			   O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
+		o2hb_region_pin(NULL);
+
+unlock:
+	spin_unlock(&o2hb_live_lock);
 }
 
 struct o2hb_heartbeat_group_attribute {
@@ -2214,63 +2307,138 @@
 }
 EXPORT_SYMBOL_GPL(o2hb_setup_callback);
 
-static struct o2hb_region *o2hb_find_region(const char *region_uuid)
+/*
+ * In local heartbeat mode, region_uuid passed matches the dlm domain name.
+ * In global heartbeat mode, region_uuid passed is NULL.
+ *
+ * In local, we only pin the matching region. In global we pin all the active
+ * regions.
+ */
+static int o2hb_region_pin(const char *region_uuid)
 {
-	struct o2hb_region *p, *reg = NULL;
+	int ret = 0, found = 0;
+	struct o2hb_region *reg;
+	char *uuid;
 
 	assert_spin_locked(&o2hb_live_lock);
 
-	list_for_each_entry(p, &o2hb_all_regions, hr_all_item) {
-		if (!strcmp(region_uuid, config_item_name(&p->hr_item))) {
-			reg = p;
-			break;
+	list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
+		uuid = config_item_name(&reg->hr_item);
+
+		/* local heartbeat */
+		if (region_uuid) {
+			if (strcmp(region_uuid, uuid))
+				continue;
+			found = 1;
 		}
+
+		if (reg->hr_item_pinned || reg->hr_item_dropped)
+			goto skip_pin;
+
+		/* Ignore ENOENT only for local hb (userdlm domain) */
+		ret = o2nm_depend_item(&reg->hr_item);
+		if (!ret) {
+			mlog(ML_CLUSTER, "Pin region %s\n", uuid);
+			reg->hr_item_pinned = 1;
+		} else {
+			if (ret == -ENOENT && found)
+				ret = 0;
+			else {
+				mlog(ML_ERROR, "Pin region %s fails with %d\n",
+				     uuid, ret);
+				break;
+			}
+		}
+skip_pin:
+		if (found)
+			break;
 	}
 
-	return reg;
-}
-
-static int o2hb_region_get(const char *region_uuid)
-{
-	int ret = 0;
-	struct o2hb_region *reg;
-
-	spin_lock(&o2hb_live_lock);
-
-	reg = o2hb_find_region(region_uuid);
-	if (!reg)
-		ret = -ENOENT;
-	spin_unlock(&o2hb_live_lock);
-
-	if (ret)
-		goto out;
-
-	ret = o2nm_depend_this_node();
-	if (ret)
-		goto out;
-
-	ret = o2nm_depend_item(&reg->hr_item);
-	if (ret)
-		o2nm_undepend_this_node();
-
-out:
 	return ret;
 }
 
-static void o2hb_region_put(const char *region_uuid)
+/*
+ * In local heartbeat mode, region_uuid passed matches the dlm domain name.
+ * In global heartbeat mode, region_uuid passed is NULL.
+ *
+ * In local, we only unpin the matching region. In global we unpin all the
+ * active regions.
+ */
+static void o2hb_region_unpin(const char *region_uuid)
 {
 	struct o2hb_region *reg;
+	char *uuid;
+	int found = 0;
+
+	assert_spin_locked(&o2hb_live_lock);
+
+	list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
+		uuid = config_item_name(&reg->hr_item);
+		if (region_uuid) {
+			if (strcmp(region_uuid, uuid))
+				continue;
+			found = 1;
+		}
+
+		if (reg->hr_item_pinned) {
+			mlog(ML_CLUSTER, "Unpin region %s\n", uuid);
+			o2nm_undepend_item(&reg->hr_item);
+			reg->hr_item_pinned = 0;
+		}
+		if (found)
+			break;
+	}
+}
+
+static int o2hb_region_inc_user(const char *region_uuid)
+{
+	int ret = 0;
 
 	spin_lock(&o2hb_live_lock);
 
-	reg = o2hb_find_region(region_uuid);
-
-	spin_unlock(&o2hb_live_lock);
-
-	if (reg) {
-		o2nm_undepend_item(&reg->hr_item);
-		o2nm_undepend_this_node();
+	/* local heartbeat */
+	if (!o2hb_global_heartbeat_active()) {
+	    ret = o2hb_region_pin(region_uuid);
+	    goto unlock;
 	}
+
+	/*
+	 * if global heartbeat active and this is the first dependent user,
+	 * pin all regions if quorum region count <= CUT_OFF
+	 */
+	o2hb_dependent_users++;
+	if (o2hb_dependent_users > 1)
+		goto unlock;
+
+	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+			   O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
+		ret = o2hb_region_pin(NULL);
+
+unlock:
+	spin_unlock(&o2hb_live_lock);
+	return ret;
+}
+
+void o2hb_region_dec_user(const char *region_uuid)
+{
+	spin_lock(&o2hb_live_lock);
+
+	/* local heartbeat */
+	if (!o2hb_global_heartbeat_active()) {
+	    o2hb_region_unpin(region_uuid);
+	    goto unlock;
+	}
+
+	/*
+	 * if global heartbeat active and there are no dependent users,
+	 * unpin all quorum regions
+	 */
+	o2hb_dependent_users--;
+	if (!o2hb_dependent_users)
+		o2hb_region_unpin(NULL);
+
+unlock:
+	spin_unlock(&o2hb_live_lock);
 }
 
 int o2hb_register_callback(const char *region_uuid,
@@ -2291,9 +2459,11 @@
 	}
 
 	if (region_uuid) {
-		ret = o2hb_region_get(region_uuid);
-		if (ret)
+		ret = o2hb_region_inc_user(region_uuid);
+		if (ret) {
+			mlog_errno(ret);
 			goto out;
+		}
 	}
 
 	down_write(&o2hb_callback_sem);
@@ -2311,7 +2481,7 @@
 	up_write(&o2hb_callback_sem);
 	ret = 0;
 out:
-	mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n",
+	mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
 	     ret, __builtin_return_address(0), hc);
 	return ret;
 }
@@ -2322,7 +2492,7 @@
 {
 	BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
 
-	mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n",
+	mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
 	     __builtin_return_address(0), hc);
 
 	/* XXX Can this happen _with_ a region reference? */
@@ -2330,7 +2500,7 @@
 		return;
 
 	if (region_uuid)
-		o2hb_region_put(region_uuid);
+		o2hb_region_dec_user(region_uuid);
 
 	down_write(&o2hb_callback_sem);
 
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index a3f150e..3a583590 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -46,10 +46,15 @@
 #define O2NET_DEBUG_DIR		"o2net"
 #define SC_DEBUG_NAME		"sock_containers"
 #define NST_DEBUG_NAME		"send_tracking"
+#define STATS_DEBUG_NAME	"stats"
+
+#define SHOW_SOCK_CONTAINERS	0
+#define SHOW_SOCK_STATS		1
 
 static struct dentry *o2net_dentry;
 static struct dentry *sc_dentry;
 static struct dentry *nst_dentry;
+static struct dentry *stats_dentry;
 
 static DEFINE_SPINLOCK(o2net_debug_lock);
 
@@ -123,37 +128,42 @@
 static int nst_seq_show(struct seq_file *seq, void *v)
 {
 	struct o2net_send_tracking *nst, *dummy_nst = seq->private;
+	ktime_t now;
+	s64 sock, send, status;
 
 	spin_lock(&o2net_debug_lock);
 	nst = next_nst(dummy_nst);
+	if (!nst)
+		goto out;
 
-	if (nst != NULL) {
-		/* get_task_comm isn't exported.  oh well. */
-		seq_printf(seq, "%p:\n"
-			   "  pid:          %lu\n"
-			   "  tgid:         %lu\n"
-			   "  process name: %s\n"
-			   "  node:         %u\n"
-			   "  sc:           %p\n"
-			   "  message id:   %d\n"
-			   "  message type: %u\n"
-			   "  message key:  0x%08x\n"
-			   "  sock acquiry: %lu.%ld\n"
-			   "  send start:   %lu.%ld\n"
-			   "  wait start:   %lu.%ld\n",
-			   nst, (unsigned long)nst->st_task->pid,
-			   (unsigned long)nst->st_task->tgid,
-			   nst->st_task->comm, nst->st_node,
-			   nst->st_sc, nst->st_id, nst->st_msg_type,
-			   nst->st_msg_key,
-			   nst->st_sock_time.tv_sec,
-			   (long)nst->st_sock_time.tv_usec,
-			   nst->st_send_time.tv_sec,
-			   (long)nst->st_send_time.tv_usec,
-			   nst->st_status_time.tv_sec,
-			   (long)nst->st_status_time.tv_usec);
-	}
+	now = ktime_get();
+	sock = ktime_to_us(ktime_sub(now, nst->st_sock_time));
+	send = ktime_to_us(ktime_sub(now, nst->st_send_time));
+	status = ktime_to_us(ktime_sub(now, nst->st_status_time));
 
+	/* get_task_comm isn't exported.  oh well. */
+	seq_printf(seq, "%p:\n"
+		   "  pid:          %lu\n"
+		   "  tgid:         %lu\n"
+		   "  process name: %s\n"
+		   "  node:         %u\n"
+		   "  sc:           %p\n"
+		   "  message id:   %d\n"
+		   "  message type: %u\n"
+		   "  message key:  0x%08x\n"
+		   "  sock acquiry: %lld usecs ago\n"
+		   "  send start:   %lld usecs ago\n"
+		   "  wait start:   %lld usecs ago\n",
+		   nst, (unsigned long)task_pid_nr(nst->st_task),
+		   (unsigned long)nst->st_task->tgid,
+		   nst->st_task->comm, nst->st_node,
+		   nst->st_sc, nst->st_id, nst->st_msg_type,
+		   nst->st_msg_key,
+		   (long long)sock,
+		   (long long)send,
+		   (long long)status);
+
+out:
 	spin_unlock(&o2net_debug_lock);
 
 	return 0;
@@ -228,6 +238,11 @@
 	spin_unlock(&o2net_debug_lock);
 }
 
+struct o2net_sock_debug {
+	int dbg_ctxt;
+	struct o2net_sock_container *dbg_sock;
+};
+
 static struct o2net_sock_container
 			*next_sc(struct o2net_sock_container *sc_start)
 {
@@ -253,7 +268,8 @@
 
 static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
 {
-	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
 
 	spin_lock(&o2net_debug_lock);
 	sc = next_sc(dummy_sc);
@@ -264,7 +280,8 @@
 
 static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
 
 	spin_lock(&o2net_debug_lock);
 	sc = next_sc(dummy_sc);
@@ -276,65 +293,107 @@
 	return sc; /* unused, just needs to be null when done */
 }
 
-#define TV_SEC_USEC(TV) TV.tv_sec, (long)TV.tv_usec
+#ifdef CONFIG_OCFS2_FS_STATS
+# define sc_send_count(_s)		((_s)->sc_send_count)
+# define sc_recv_count(_s)		((_s)->sc_recv_count)
+# define sc_tv_acquiry_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_acquiry_total))
+# define sc_tv_send_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_send_total))
+# define sc_tv_status_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_status_total))
+# define sc_tv_process_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_process_total))
+#else
+# define sc_send_count(_s)		(0U)
+# define sc_recv_count(_s)		(0U)
+# define sc_tv_acquiry_total_ns(_s)	(0LL)
+# define sc_tv_send_total_ns(_s)	(0LL)
+# define sc_tv_status_total_ns(_s)	(0LL)
+# define sc_tv_process_total_ns(_s)	(0LL)
+#endif
+
+/* So that debugfs.ocfs2 can determine which format is being used */
+#define O2NET_STATS_STR_VERSION		1
+static void sc_show_sock_stats(struct seq_file *seq,
+			       struct o2net_sock_container *sc)
+{
+	if (!sc)
+		return;
+
+	seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION,
+		   sc->sc_node->nd_num, (unsigned long)sc_send_count(sc),
+		   (long long)sc_tv_acquiry_total_ns(sc),
+		   (long long)sc_tv_send_total_ns(sc),
+		   (long long)sc_tv_status_total_ns(sc),
+		   (unsigned long)sc_recv_count(sc),
+		   (long long)sc_tv_process_total_ns(sc));
+}
+
+static void sc_show_sock_container(struct seq_file *seq,
+				   struct o2net_sock_container *sc)
+{
+	struct inet_sock *inet = NULL;
+	__be32 saddr = 0, daddr = 0;
+	__be16 sport = 0, dport = 0;
+
+	if (!sc)
+		return;
+
+	if (sc->sc_sock) {
+		inet = inet_sk(sc->sc_sock->sk);
+		/* the stack's structs aren't sparse endian clean */
+		saddr = (__force __be32)inet->inet_saddr;
+		daddr = (__force __be32)inet->inet_daddr;
+		sport = (__force __be16)inet->inet_sport;
+		dport = (__force __be16)inet->inet_dport;
+	}
+
+	/* XXX sigh, inet-> doesn't have sparse annotation so any
+	 * use of it here generates a warning with -Wbitwise */
+	seq_printf(seq, "%p:\n"
+		   "  krefs:           %d\n"
+		   "  sock:            %pI4:%u -> "
+				      "%pI4:%u\n"
+		   "  remote node:     %s\n"
+		   "  page off:        %zu\n"
+		   "  handshake ok:    %u\n"
+		   "  timer:           %lld usecs\n"
+		   "  data ready:      %lld usecs\n"
+		   "  advance start:   %lld usecs\n"
+		   "  advance stop:    %lld usecs\n"
+		   "  func start:      %lld usecs\n"
+		   "  func stop:       %lld usecs\n"
+		   "  func key:        0x%08x\n"
+		   "  func type:       %u\n",
+		   sc,
+		   atomic_read(&sc->sc_kref.refcount),
+		   &saddr, inet ? ntohs(sport) : 0,
+		   &daddr, inet ? ntohs(dport) : 0,
+		   sc->sc_node->nd_name,
+		   sc->sc_page_off,
+		   sc->sc_handshake_ok,
+		   (long long)ktime_to_us(sc->sc_tv_timer),
+		   (long long)ktime_to_us(sc->sc_tv_data_ready),
+		   (long long)ktime_to_us(sc->sc_tv_advance_start),
+		   (long long)ktime_to_us(sc->sc_tv_advance_stop),
+		   (long long)ktime_to_us(sc->sc_tv_func_start),
+		   (long long)ktime_to_us(sc->sc_tv_func_stop),
+		   sc->sc_msg_key,
+		   sc->sc_msg_type);
+}
 
 static int sc_seq_show(struct seq_file *seq, void *v)
 {
-	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
 
 	spin_lock(&o2net_debug_lock);
 	sc = next_sc(dummy_sc);
 
-	if (sc != NULL) {
-		struct inet_sock *inet = NULL;
-
-		__be32 saddr = 0, daddr = 0;
-		__be16 sport = 0, dport = 0;
-
-		if (sc->sc_sock) {
-			inet = inet_sk(sc->sc_sock->sk);
-			/* the stack's structs aren't sparse endian clean */
-			saddr = (__force __be32)inet->inet_saddr;
-			daddr = (__force __be32)inet->inet_daddr;
-			sport = (__force __be16)inet->inet_sport;
-			dport = (__force __be16)inet->inet_dport;
-		}
-
-		/* XXX sigh, inet-> doesn't have sparse annotation so any
-		 * use of it here generates a warning with -Wbitwise */
-		seq_printf(seq, "%p:\n"
-			   "  krefs:           %d\n"
-			   "  sock:            %pI4:%u -> "
-					      "%pI4:%u\n"
-			   "  remote node:     %s\n"
-			   "  page off:        %zu\n"
-			   "  handshake ok:    %u\n"
-			   "  timer:           %lu.%ld\n"
-			   "  data ready:      %lu.%ld\n"
-			   "  advance start:   %lu.%ld\n"
-			   "  advance stop:    %lu.%ld\n"
-			   "  func start:      %lu.%ld\n"
-			   "  func stop:       %lu.%ld\n"
-			   "  func key:        %u\n"
-			   "  func type:       %u\n",
-			   sc,
-			   atomic_read(&sc->sc_kref.refcount),
-			   &saddr, inet ? ntohs(sport) : 0,
-			   &daddr, inet ? ntohs(dport) : 0,
-			   sc->sc_node->nd_name,
-			   sc->sc_page_off,
-			   sc->sc_handshake_ok,
-			   TV_SEC_USEC(sc->sc_tv_timer),
-			   TV_SEC_USEC(sc->sc_tv_data_ready),
-			   TV_SEC_USEC(sc->sc_tv_advance_start),
-			   TV_SEC_USEC(sc->sc_tv_advance_stop),
-			   TV_SEC_USEC(sc->sc_tv_func_start),
-			   TV_SEC_USEC(sc->sc_tv_func_stop),
-			   sc->sc_msg_key,
-			   sc->sc_msg_type);
+	if (sc) {
+		if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS)
+			sc_show_sock_container(seq, sc);
+		else
+			sc_show_sock_stats(seq, sc);
 	}
 
-
 	spin_unlock(&o2net_debug_lock);
 
 	return 0;
@@ -351,7 +410,7 @@
 	.show = sc_seq_show,
 };
 
-static int sc_fop_open(struct inode *inode, struct file *file)
+static int sc_common_open(struct file *file, struct o2net_sock_debug *sd)
 {
 	struct o2net_sock_container *dummy_sc;
 	struct seq_file *seq;
@@ -369,7 +428,8 @@
 		goto out;
 
 	seq = file->private_data;
-	seq->private = dummy_sc;
+	seq->private = sd;
+	sd->dbg_sock = dummy_sc;
 	o2net_debug_add_sc(dummy_sc);
 
 	dummy_sc = NULL;
@@ -382,12 +442,48 @@
 static int sc_fop_release(struct inode *inode, struct file *file)
 {
 	struct seq_file *seq = file->private_data;
-	struct o2net_sock_container *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *dummy_sc = sd->dbg_sock;
 
 	o2net_debug_del_sc(dummy_sc);
 	return seq_release_private(inode, file);
 }
 
+static int stats_fop_open(struct inode *inode, struct file *file)
+{
+	struct o2net_sock_debug *sd;
+
+	sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
+	if (sd == NULL)
+		return -ENOMEM;
+
+	sd->dbg_ctxt = SHOW_SOCK_STATS;
+	sd->dbg_sock = NULL;
+
+	return sc_common_open(file, sd);
+}
+
+static const struct file_operations stats_seq_fops = {
+	.open = stats_fop_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = sc_fop_release,
+};
+
+static int sc_fop_open(struct inode *inode, struct file *file)
+{
+	struct o2net_sock_debug *sd;
+
+	sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
+	if (sd == NULL)
+		return -ENOMEM;
+
+	sd->dbg_ctxt = SHOW_SOCK_CONTAINERS;
+	sd->dbg_sock = NULL;
+
+	return sc_common_open(file, sd);
+}
+
 static const struct file_operations sc_seq_fops = {
 	.open = sc_fop_open,
 	.read = seq_read,
@@ -419,25 +515,29 @@
 		goto bail;
 	}
 
+	stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
+					   o2net_dentry, NULL,
+					   &stats_seq_fops);
+	if (!stats_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
 	return 0;
 bail:
-	if (sc_dentry)
-		debugfs_remove(sc_dentry);
-	if (nst_dentry)
-		debugfs_remove(nst_dentry);
-	if (o2net_dentry)
-		debugfs_remove(o2net_dentry);
+	debugfs_remove(stats_dentry);
+	debugfs_remove(sc_dentry);
+	debugfs_remove(nst_dentry);
+	debugfs_remove(o2net_dentry);
 	return -ENOMEM;
 }
 
 void o2net_debugfs_exit(void)
 {
-	if (sc_dentry)
-		debugfs_remove(sc_dentry);
-	if (nst_dentry)
-		debugfs_remove(nst_dentry);
-	if (o2net_dentry)
-		debugfs_remove(o2net_dentry);
+	debugfs_remove(stats_dentry);
+	debugfs_remove(sc_dentry);
+	debugfs_remove(nst_dentry);
+	debugfs_remove(o2net_dentry);
 }
 
 #endif	/* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 9aa426e..3b11cb1 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -153,63 +153,114 @@
 	nst->st_node = node;
 }
 
-static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
-{
-	do_gettimeofday(&nst->st_sock_time);
-}
-
-static void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
-{
-	do_gettimeofday(&nst->st_send_time);
-}
-
-static void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
-{
-	do_gettimeofday(&nst->st_status_time);
-}
-
-static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
-					 struct o2net_sock_container *sc)
-{
-	nst->st_sc = sc;
-}
-
-static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id)
-{
-	nst->st_id = msg_id;
-}
-
-#else  /* CONFIG_DEBUG_FS */
-
-static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
-				  u32 msgkey, struct task_struct *task, u8 node)
-{
-}
-
 static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
 {
+	nst->st_sock_time = ktime_get();
 }
 
 static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
 {
+	nst->st_send_time = ktime_get();
 }
 
 static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
 {
+	nst->st_status_time = ktime_get();
 }
 
 static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
 						struct o2net_sock_container *sc)
 {
+	nst->st_sc = sc;
 }
 
 static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
 					u32 msg_id)
 {
+	nst->st_id = msg_id;
 }
 
+static inline void o2net_set_sock_timer(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_timer = ktime_get();
+}
+
+static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_data_ready = ktime_get();
+}
+
+static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_advance_start = ktime_get();
+}
+
+static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_advance_stop = ktime_get();
+}
+
+static inline void o2net_set_func_start_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_func_start = ktime_get();
+}
+
+static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_func_stop = ktime_get();
+}
+
+static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
+{
+	return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
+}
+#else  /* CONFIG_DEBUG_FS */
+# define o2net_init_nst(a, b, c, d, e)
+# define o2net_set_nst_sock_time(a)
+# define o2net_set_nst_send_time(a)
+# define o2net_set_nst_status_time(a)
+# define o2net_set_nst_sock_container(a, b)
+# define o2net_set_nst_msg_id(a, b)
+# define o2net_set_sock_timer(a)
+# define o2net_set_data_ready_time(a)
+# define o2net_set_advance_start_time(a)
+# define o2net_set_advance_stop_time(a)
+# define o2net_set_func_start_time(a)
+# define o2net_set_func_stop_time(a)
+# define o2net_get_func_run_time(a)		(ktime_t)0
 #endif /* CONFIG_DEBUG_FS */
 
+#ifdef CONFIG_OCFS2_FS_STATS
+static void o2net_update_send_stats(struct o2net_send_tracking *nst,
+				    struct o2net_sock_container *sc)
+{
+	sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
+					   ktime_sub(ktime_get(),
+						     nst->st_status_time));
+	sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
+					 ktime_sub(nst->st_status_time,
+						   nst->st_send_time));
+	sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
+					    ktime_sub(nst->st_send_time,
+						      nst->st_sock_time));
+	sc->sc_send_count++;
+}
+
+static void o2net_update_recv_stats(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
+					    o2net_get_func_run_time(sc));
+	sc->sc_recv_count++;
+}
+
+#else
+
+# define o2net_update_send_stats(a, b)
+
+# define o2net_update_recv_stats(sc)
+
+#endif /* CONFIG_OCFS2_FS_STATS */
+
 static inline int o2net_reconnect_delay(void)
 {
 	return o2nm_single_cluster->cl_reconnect_delay_ms;
@@ -355,6 +406,7 @@
 		sc->sc_sock = NULL;
 	}
 
+	o2nm_undepend_item(&sc->sc_node->nd_item);
 	o2nm_node_put(sc->sc_node);
 	sc->sc_node = NULL;
 
@@ -376,6 +428,7 @@
 {
 	struct o2net_sock_container *sc, *ret = NULL;
 	struct page *page = NULL;
+	int status = 0;
 
 	page = alloc_page(GFP_NOFS);
 	sc = kzalloc(sizeof(*sc), GFP_NOFS);
@@ -386,6 +439,13 @@
 	o2nm_node_get(node);
 	sc->sc_node = node;
 
+	/* pin the node item of the remote node */
+	status = o2nm_depend_item(&node->nd_item);
+	if (status) {
+		mlog_errno(status);
+		o2nm_node_put(node);
+		goto out;
+	}
 	INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
 	INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
 	INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
@@ -546,7 +606,7 @@
 	if (sk->sk_user_data) {
 		struct o2net_sock_container *sc = sk->sk_user_data;
 		sclog(sc, "data_ready hit\n");
-		do_gettimeofday(&sc->sc_tv_data_ready);
+		o2net_set_data_ready_time(sc);
 		o2net_sc_queue_work(sc, &sc->sc_rx_work);
 		ready = sc->sc_data_ready;
 	} else {
@@ -1070,6 +1130,8 @@
 	o2net_set_nst_status_time(&nst);
 	wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
 
+	o2net_update_send_stats(&nst, sc);
+
 	/* Note that we avoid overwriting the callers status return
 	 * variable if a system error was reported on the other
 	 * side. Callers beware. */
@@ -1183,13 +1245,15 @@
 	if (syserr != O2NET_ERR_NONE)
 		goto out_respond;
 
-	do_gettimeofday(&sc->sc_tv_func_start);
+	o2net_set_func_start_time(sc);
 	sc->sc_msg_key = be32_to_cpu(hdr->key);
 	sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
 	handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
 					     be16_to_cpu(hdr->data_len),
 					nmh->nh_func_data, &ret_data);
-	do_gettimeofday(&sc->sc_tv_func_stop);
+	o2net_set_func_stop_time(sc);
+
+	o2net_update_recv_stats(sc);
 
 out_respond:
 	/* this destroys the hdr, so don't use it after this */
@@ -1300,7 +1364,7 @@
 	size_t datalen;
 
 	sclog(sc, "receiving\n");
-	do_gettimeofday(&sc->sc_tv_advance_start);
+	o2net_set_advance_start_time(sc);
 
 	if (unlikely(sc->sc_handshake_ok == 0)) {
 		if(sc->sc_page_off < sizeof(struct o2net_handshake)) {
@@ -1375,7 +1439,7 @@
 
 out:
 	sclog(sc, "ret = %d\n", ret);
-	do_gettimeofday(&sc->sc_tv_advance_stop);
+	o2net_set_advance_stop_time(sc);
 	return ret;
 }
 
@@ -1475,27 +1539,28 @@
 {
 	struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
-	struct timeval now;
 
-	do_gettimeofday(&now);
+#ifdef CONFIG_DEBUG_FS
+	ktime_t now = ktime_get();
+#endif
 
 	printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
 	     "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
 		     o2net_idle_timeout() / 1000,
 		     o2net_idle_timeout() % 1000);
-	mlog(ML_NOTICE, "here are some times that might help debug the "
-	     "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
-	     "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
-	     sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
-	     now.tv_sec, (long) now.tv_usec,
-	     sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
-	     sc->sc_tv_advance_start.tv_sec,
-	     (long) sc->sc_tv_advance_start.tv_usec,
-	     sc->sc_tv_advance_stop.tv_sec,
-	     (long) sc->sc_tv_advance_stop.tv_usec,
+
+#ifdef CONFIG_DEBUG_FS
+	mlog(ML_NOTICE, "Here are some times that might help debug the "
+	     "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
+	     "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
+	     (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
+	     (long long)ktime_to_us(sc->sc_tv_data_ready),
+	     (long long)ktime_to_us(sc->sc_tv_advance_start),
+	     (long long)ktime_to_us(sc->sc_tv_advance_stop),
 	     sc->sc_msg_key, sc->sc_msg_type,
-	     sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec,
-	     sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec);
+	     (long long)ktime_to_us(sc->sc_tv_func_start),
+	     (long long)ktime_to_us(sc->sc_tv_func_stop));
+#endif
 
 	/*
 	 * Initialize the nn_timeout so that the next connection attempt
@@ -1511,7 +1576,7 @@
 	o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
 	o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
 		      msecs_to_jiffies(o2net_keepalive_delay()));
-	do_gettimeofday(&sc->sc_tv_timer);
+	o2net_set_sock_timer(sc);
 	mod_timer(&sc->sc_idle_timeout,
 	       jiffies + msecs_to_jiffies(o2net_idle_timeout()));
 }
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 15fdbdf..4cbcb65 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -166,18 +166,27 @@
 	/* original handlers for the sockets */
 	void			(*sc_state_change)(struct sock *sk);
 	void			(*sc_data_ready)(struct sock *sk, int bytes);
-#ifdef CONFIG_DEBUG_FS
-	struct list_head        sc_net_debug_item;
-#endif
-	struct timeval 		sc_tv_timer;
-	struct timeval 		sc_tv_data_ready;
-	struct timeval 		sc_tv_advance_start;
-	struct timeval 		sc_tv_advance_stop;
-	struct timeval 		sc_tv_func_start;
-	struct timeval 		sc_tv_func_stop;
+
 	u32			sc_msg_key;
 	u16			sc_msg_type;
 
+#ifdef CONFIG_DEBUG_FS
+	struct list_head        sc_net_debug_item;
+	ktime_t			sc_tv_timer;
+	ktime_t			sc_tv_data_ready;
+	ktime_t			sc_tv_advance_start;
+	ktime_t			sc_tv_advance_stop;
+	ktime_t			sc_tv_func_start;
+	ktime_t			sc_tv_func_stop;
+#endif
+#ifdef CONFIG_OCFS2_FS_STATS
+	ktime_t			sc_tv_acquiry_total;
+	ktime_t			sc_tv_send_total;
+	ktime_t			sc_tv_status_total;
+	u32			sc_send_count;
+	u32			sc_recv_count;
+	ktime_t			sc_tv_process_total;
+#endif
 	struct mutex		sc_send_lock;
 };
 
@@ -220,9 +229,9 @@
 	u32				st_msg_type;
 	u32				st_msg_key;
 	u8				st_node;
-	struct timeval			st_sock_time;
-	struct timeval			st_send_time;
-	struct timeval			st_status_time;
+	ktime_t				st_sock_time;
+	ktime_t				st_send_time;
+	ktime_t				st_status_time;
 };
 #else
 struct o2net_send_tracking {
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 6d80ecc..7eb9040 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -56,7 +56,7 @@
 	int ret = 0;    /* if all else fails, just return false */
 	struct ocfs2_super *osb;
 
-	if (nd->flags & LOOKUP_RCU)
+	if (nd && nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 
 	inode = dentry->d_inode;
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index f449991..3a3ed4b 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -90,19 +90,29 @@
 
 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
+	struct dlm_lock_resource *res;
 
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
 
+	res = lock->lockres;
+
 	assert_spin_locked(&dlm->ast_lock);
+
 	if (!list_empty(&lock->ast_list)) {
-		mlog(ML_ERROR, "ast list not empty!!  pending=%d, newlevel=%d\n",
+		mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
+		     "AST list not empty, pending %d, newlevel %d\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
 		     lock->ast_pending, lock->ml.type);
 		BUG();
 	}
 	if (lock->ast_pending)
-		mlog(0, "lock has an ast getting flushed right now\n");
+		mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	/* putting lock on list, add a ref */
 	dlm_lock_get(lock);
@@ -110,9 +120,10 @@
 
 	/* check to see if this ast obsoletes the bast */
 	if (dlm_should_cancel_bast(dlm, lock)) {
-		struct dlm_lock_resource *res = lock->lockres;
-		mlog(0, "%s: cancelling bast for %.*s\n",
-		     dlm->name, res->lockname.len, res->lockname.name);
+		mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 		lock->bast_pending = 0;
 		list_del_init(&lock->bast_list);
 		lock->ml.highest_blocked = LKM_IVMODE;
@@ -134,8 +145,6 @@
 
 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
-
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
 
@@ -147,15 +156,21 @@
 
 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
+	struct dlm_lock_resource *res;
 
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
+
 	assert_spin_locked(&dlm->ast_lock);
 
+	res = lock->lockres;
+
 	BUG_ON(!list_empty(&lock->bast_list));
 	if (lock->bast_pending)
-		mlog(0, "lock has a bast getting flushed right now\n");
+		mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	/* putting lock on list, add a ref */
 	dlm_lock_get(lock);
@@ -167,8 +182,6 @@
 
 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
-
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
 
@@ -213,7 +226,10 @@
 	dlm_astlockfunc_t *fn;
 	struct dlm_lockstatus *lksb;
 
-	mlog_entry_void();
+	mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
+	     res->lockname.len, res->lockname.name,
+	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	lksb = lock->lksb;
 	fn = lock->ast;
@@ -231,7 +247,10 @@
 	struct dlm_lockstatus *lksb;
 	int lksbflags;
 
-	mlog_entry_void();
+	mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
+	     res->lockname.len, res->lockname.name,
+	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	lksb = lock->lksb;
 	BUG_ON(lock->ml.node == dlm->node_num);
@@ -250,9 +269,14 @@
 {
 	dlm_bastlockfunc_t *fn = lock->bast;
 
-	mlog_entry_void();
 	BUG_ON(lock->ml.node != dlm->node_num);
 
+	mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
+	     dlm->name, res->lockname.len, res->lockname.name,
+	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+	     blocked_type);
+
 	(*fn)(lock->astdata, blocked_type);
 }
 
@@ -332,7 +356,8 @@
 	/* cannot get a proxy ast message if this node owns it */
 	BUG_ON(res->owner == dlm->node_num);
 
-	mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name);
+	mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+	     res->lockname.name);
 
 	spin_lock(&res->spinlock);
 	if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -382,8 +407,12 @@
 	if (past->type == DLM_AST) {
 		/* do not alter lock refcount.  switching lists. */
 		list_move_tail(&lock->list, &res->granted);
-		mlog(0, "ast: Adding to granted list... type=%d, "
-		     "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
+		mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
+		     lock->ml.type, lock->ml.convert_type);
+
 		if (lock->ml.convert_type != LKM_IVMODE) {
 			lock->ml.type = lock->ml.convert_type;
 			lock->ml.convert_type = LKM_IVMODE;
@@ -426,9 +455,9 @@
 	size_t veclen = 1;
 	int status;
 
-	mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
-		   res->lockname.len, res->lockname.name, lock->ml.node,
-		   msg_type, blocked_type);
+	mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name,
+	     res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
+	     blocked_type);
 
 	memset(&past, 0, sizeof(struct dlm_proxy_ast));
 	past.node_idx = dlm->node_num;
@@ -441,7 +470,6 @@
 	vec[0].iov_len = sizeof(struct dlm_proxy_ast);
 	vec[0].iov_base = &past;
 	if (flags & DLM_LKSB_GET_LVB) {
-		mlog(0, "returning requested LVB data\n");
 		be32_add_cpu(&past.flags, LKM_GET_LVB);
 		vec[1].iov_len = DLM_LVB_LEN;
 		vec[1].iov_base = lock->lksb->lvb;
@@ -451,8 +479,8 @@
 	ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
 				     lock->ml.node, &status);
 	if (ret < 0)
-		mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
-		     "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key,
+		mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n",
+		     dlm->name, res->lockname.len, res->lockname.name, ret,
 		     lock->ml.node);
 	else {
 		if (status == DLM_RECOVERING) {
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index b36d0bf..4bdf7ba 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -50,10 +50,10 @@
 #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
 
 enum dlm_mle_type {
-	DLM_MLE_BLOCK,
-	DLM_MLE_MASTER,
-	DLM_MLE_MIGRATION,
-	DLM_MLE_NUM_TYPES
+	DLM_MLE_BLOCK = 0,
+	DLM_MLE_MASTER = 1,
+	DLM_MLE_MIGRATION = 2,
+	DLM_MLE_NUM_TYPES = 3,
 };
 
 struct dlm_master_list_entry {
@@ -82,8 +82,8 @@
 
 enum dlm_ast_type {
 	DLM_AST = 0,
-	DLM_BAST,
-	DLM_ASTUNLOCK
+	DLM_BAST = 1,
+	DLM_ASTUNLOCK = 2,
 };
 
 
@@ -119,9 +119,9 @@
 
 enum dlm_ctxt_state {
 	DLM_CTXT_NEW = 0,
-	DLM_CTXT_JOINED,
-	DLM_CTXT_IN_SHUTDOWN,
-	DLM_CTXT_LEAVING,
+	DLM_CTXT_JOINED = 1,
+	DLM_CTXT_IN_SHUTDOWN = 2,
+	DLM_CTXT_LEAVING = 3,
 };
 
 struct dlm_ctxt
@@ -388,8 +388,8 @@
 
 enum dlm_lockres_list {
 	DLM_GRANTED_LIST = 0,
-	DLM_CONVERTING_LIST,
-	DLM_BLOCKED_LIST
+	DLM_CONVERTING_LIST = 1,
+	DLM_BLOCKED_LIST = 2,
 };
 
 static inline int dlm_lvb_is_empty(char *lvb)
@@ -427,27 +427,27 @@
 
 
 enum {
-	DLM_MASTER_REQUEST_MSG    = 500,
-	DLM_UNUSED_MSG1,         /* 501 */
-	DLM_ASSERT_MASTER_MSG,	 /* 502 */
-	DLM_CREATE_LOCK_MSG,	 /* 503 */
-	DLM_CONVERT_LOCK_MSG,	 /* 504 */
-	DLM_PROXY_AST_MSG,	 /* 505 */
-	DLM_UNLOCK_LOCK_MSG,	 /* 506 */
-	DLM_DEREF_LOCKRES_MSG,	 /* 507 */
-	DLM_MIGRATE_REQUEST_MSG, /* 508 */
-	DLM_MIG_LOCKRES_MSG, 	 /* 509 */
-	DLM_QUERY_JOIN_MSG,	 /* 510 */
-	DLM_ASSERT_JOINED_MSG,	 /* 511 */
-	DLM_CANCEL_JOIN_MSG,	 /* 512 */
-	DLM_EXIT_DOMAIN_MSG,	 /* 513 */
-	DLM_MASTER_REQUERY_MSG,	 /* 514 */
-	DLM_LOCK_REQUEST_MSG,	 /* 515 */
-	DLM_RECO_DATA_DONE_MSG,	 /* 516 */
-	DLM_BEGIN_RECO_MSG,	 /* 517 */
-	DLM_FINALIZE_RECO_MSG,	 /* 518 */
-	DLM_QUERY_REGION,	 /* 519 */
-	DLM_QUERY_NODEINFO,	 /* 520 */
+	DLM_MASTER_REQUEST_MSG		= 500,
+	DLM_UNUSED_MSG1			= 501,
+	DLM_ASSERT_MASTER_MSG		= 502,
+	DLM_CREATE_LOCK_MSG		= 503,
+	DLM_CONVERT_LOCK_MSG		= 504,
+	DLM_PROXY_AST_MSG		= 505,
+	DLM_UNLOCK_LOCK_MSG		= 506,
+	DLM_DEREF_LOCKRES_MSG		= 507,
+	DLM_MIGRATE_REQUEST_MSG		= 508,
+	DLM_MIG_LOCKRES_MSG		= 509,
+	DLM_QUERY_JOIN_MSG		= 510,
+	DLM_ASSERT_JOINED_MSG		= 511,
+	DLM_CANCEL_JOIN_MSG		= 512,
+	DLM_EXIT_DOMAIN_MSG		= 513,
+	DLM_MASTER_REQUERY_MSG		= 514,
+	DLM_LOCK_REQUEST_MSG		= 515,
+	DLM_RECO_DATA_DONE_MSG		= 516,
+	DLM_BEGIN_RECO_MSG		= 517,
+	DLM_FINALIZE_RECO_MSG		= 518,
+	DLM_QUERY_REGION		= 519,
+	DLM_QUERY_NODEINFO		= 520,
 };
 
 struct dlm_reco_node_data
@@ -460,19 +460,19 @@
 enum {
 	DLM_RECO_NODE_DATA_DEAD = -1,
 	DLM_RECO_NODE_DATA_INIT = 0,
-	DLM_RECO_NODE_DATA_REQUESTING,
-	DLM_RECO_NODE_DATA_REQUESTED,
-	DLM_RECO_NODE_DATA_RECEIVING,
-	DLM_RECO_NODE_DATA_DONE,
-	DLM_RECO_NODE_DATA_FINALIZE_SENT,
+	DLM_RECO_NODE_DATA_REQUESTING = 1,
+	DLM_RECO_NODE_DATA_REQUESTED = 2,
+	DLM_RECO_NODE_DATA_RECEIVING = 3,
+	DLM_RECO_NODE_DATA_DONE = 4,
+	DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
 };
 
 
 enum {
 	DLM_MASTER_RESP_NO = 0,
-	DLM_MASTER_RESP_YES,
-	DLM_MASTER_RESP_MAYBE,
-	DLM_MASTER_RESP_ERROR
+	DLM_MASTER_RESP_YES = 1,
+	DLM_MASTER_RESP_MAYBE = 2,
+	DLM_MASTER_RESP_ERROR = 3,
 };
 
 
@@ -649,9 +649,9 @@
 #define DLM_MOD_KEY (0x666c6172)
 enum dlm_query_join_response_code {
 	JOIN_DISALLOW = 0,
-	JOIN_OK,
-	JOIN_OK_NO_MAP,
-	JOIN_PROTOCOL_MISMATCH,
+	JOIN_OK = 1,
+	JOIN_OK_NO_MAP = 2,
+	JOIN_PROTOCOL_MISMATCH = 3,
 };
 
 struct dlm_query_join_packet {
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 272ec86..04a32be 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -370,92 +370,46 @@
 	kref_get(&dc->debug_refcnt);
 }
 
-static struct debug_buffer *debug_buffer_allocate(void)
+static int debug_release(struct inode *inode, struct file *file)
 {
-	struct debug_buffer *db = NULL;
-
-	db = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
-	if (!db)
-		goto bail;
-
-	db->len = PAGE_SIZE;
-	db->buf = kmalloc(db->len, GFP_KERNEL);
-	if (!db->buf)
-		goto bail;
-
-	return db;
-bail:
-	kfree(db);
-	return NULL;
-}
-
-static ssize_t debug_buffer_read(struct file *file, char __user *buf,
-				 size_t nbytes, loff_t *ppos)
-{
-	struct debug_buffer *db = file->private_data;
-
-	return simple_read_from_buffer(buf, nbytes, ppos, db->buf, db->len);
-}
-
-static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence)
-{
-	struct debug_buffer *db = file->private_data;
-	loff_t new = -1;
-
-	switch (whence) {
-	case 0:
-		new = off;
-		break;
-	case 1:
-		new = file->f_pos + off;
-		break;
-	}
-
-	if (new < 0 || new > db->len)
-		return -EINVAL;
-
-	return (file->f_pos = new);
-}
-
-static int debug_buffer_release(struct inode *inode, struct file *file)
-{
-	struct debug_buffer *db = file->private_data;
-
-	if (db)
-		kfree(db->buf);
-	kfree(db);
-
+	free_page((unsigned long)file->private_data);
 	return 0;
 }
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+			  size_t nbytes, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+				       i_size_read(file->f_mapping->host));
+}
 /* end - util funcs */
 
 /* begin - purge list funcs */
-static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
 {
 	struct dlm_lock_resource *res;
 	int out = 0;
 	unsigned long total = 0;
 
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Dumping Purgelist for Domain: %s\n", dlm->name);
 
 	spin_lock(&dlm->spinlock);
 	list_for_each_entry(res, &dlm->purge_list, purge) {
 		++total;
-		if (db->len - out < 100)
+		if (len - out < 100)
 			continue;
 		spin_lock(&res->spinlock);
 		out += stringify_lockname(res->lockname.name,
 					  res->lockname.len,
-					  db->buf + out, db->len - out);
-		out += snprintf(db->buf + out, db->len - out, "\t%ld\n",
+					  buf + out, len - out);
+		out += snprintf(buf + out, len - out, "\t%ld\n",
 				(jiffies - res->last_used)/HZ);
 		spin_unlock(&res->spinlock);
 	}
 	spin_unlock(&dlm->spinlock);
 
-	out += snprintf(db->buf + out, db->len - out,
-			"Total on list: %ld\n", total);
+	out += snprintf(buf + out, len - out, "Total on list: %ld\n", total);
 
 	return out;
 }
@@ -463,15 +417,15 @@
 static int debug_purgelist_open(struct inode *inode, struct file *file)
 {
 	struct dlm_ctxt *dlm = inode->i_private;
-	struct debug_buffer *db;
+	char *buf = NULL;
 
-	db = debug_buffer_allocate();
-	if (!db)
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (!buf)
 		goto bail;
 
-	db->len = debug_purgelist_print(dlm, db);
+	i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1));
 
-	file->private_data = db;
+	file->private_data = buf;
 
 	return 0;
 bail:
@@ -480,14 +434,14 @@
 
 static const struct file_operations debug_purgelist_fops = {
 	.open =		debug_purgelist_open,
-	.release =	debug_buffer_release,
-	.read =		debug_buffer_read,
-	.llseek =	debug_buffer_llseek,
+	.release =	debug_release,
+	.read =		debug_read,
+	.llseek =	generic_file_llseek,
 };
 /* end - purge list funcs */
 
 /* begin - debug mle funcs */
-static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
 {
 	struct dlm_master_list_entry *mle;
 	struct hlist_head *bucket;
@@ -495,7 +449,7 @@
 	int i, out = 0;
 	unsigned long total = 0, longest = 0, bucket_count = 0;
 
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Dumping MLEs for Domain: %s\n", dlm->name);
 
 	spin_lock(&dlm->master_lock);
@@ -506,16 +460,16 @@
 					  master_hash_node);
 			++total;
 			++bucket_count;
-			if (db->len - out < 200)
+			if (len - out < 200)
 				continue;
-			out += dump_mle(mle, db->buf + out, db->len - out);
+			out += dump_mle(mle, buf + out, len - out);
 		}
 		longest = max(longest, bucket_count);
 		bucket_count = 0;
 	}
 	spin_unlock(&dlm->master_lock);
 
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Total: %ld, Longest: %ld\n", total, longest);
 	return out;
 }
@@ -523,15 +477,15 @@
 static int debug_mle_open(struct inode *inode, struct file *file)
 {
 	struct dlm_ctxt *dlm = inode->i_private;
-	struct debug_buffer *db;
+	char *buf = NULL;
 
-	db = debug_buffer_allocate();
-	if (!db)
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (!buf)
 		goto bail;
 
-	db->len = debug_mle_print(dlm, db);
+	i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1));
 
-	file->private_data = db;
+	file->private_data = buf;
 
 	return 0;
 bail:
@@ -540,9 +494,9 @@
 
 static const struct file_operations debug_mle_fops = {
 	.open =		debug_mle_open,
-	.release =	debug_buffer_release,
-	.read =		debug_buffer_read,
-	.llseek =	debug_buffer_llseek,
+	.release =	debug_release,
+	.read =		debug_read,
+	.llseek =	generic_file_llseek,
 };
 
 /* end - debug mle funcs */
@@ -757,7 +711,7 @@
 /* end - debug lockres funcs */
 
 /* begin - debug state funcs */
-static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
 {
 	int out = 0;
 	struct dlm_reco_node_data *node;
@@ -781,35 +735,35 @@
 	}
 
 	/* Domain: xxxxxxxxxx  Key: 0xdfbac769 */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Domain: %s  Key: 0x%08x  Protocol: %d.%d\n",
 			dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major,
 			dlm->dlm_locking_proto.pv_minor);
 
 	/* Thread Pid: xxx  Node: xxx  State: xxxxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Thread Pid: %d  Node: %d  State: %s\n",
-			dlm->dlm_thread_task->pid, dlm->node_num, state);
+			task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state);
 
 	/* Number of Joins: xxx  Joining Node: xxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Number of Joins: %d  Joining Node: %d\n",
 			dlm->num_joins, dlm->joining_node);
 
 	/* Domain Map: xx xx xx */
-	out += snprintf(db->buf + out, db->len - out, "Domain Map: ");
+	out += snprintf(buf + out, len - out, "Domain Map: ");
 	out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES,
-				 db->buf + out, db->len - out);
-	out += snprintf(db->buf + out, db->len - out, "\n");
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
 
 	/* Live Map: xx xx xx */
-	out += snprintf(db->buf + out, db->len - out, "Live Map: ");
+	out += snprintf(buf + out, len - out, "Live Map: ");
 	out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
-				 db->buf + out, db->len - out);
-	out += snprintf(db->buf + out, db->len - out, "\n");
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
 
 	/* Lock Resources: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Lock Resources: %d (%d)\n",
 			atomic_read(&dlm->res_cur_count),
 			atomic_read(&dlm->res_tot_count));
@@ -821,29 +775,29 @@
 		cur_mles += atomic_read(&dlm->mle_cur_count[i]);
 
 	/* MLEs: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"MLEs: %d (%d)\n", cur_mles, tot_mles);
 
 	/*  Blocking: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"  Blocking: %d (%d)\n",
 			atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
 			atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
 
 	/*  Mastery: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"  Mastery: %d (%d)\n",
 			atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
 			atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
 
 	/*  Migration: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"  Migration: %d (%d)\n",
 			atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
 			atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
 
 	/* Lists: Dirty=Empty  Purge=InUse  PendingASTs=Empty  ... */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Lists: Dirty=%s  Purge=%s  PendingASTs=%s  "
 			"PendingBASTs=%s\n",
 			(list_empty(&dlm->dirty_list) ? "Empty" : "InUse"),
@@ -852,12 +806,12 @@
 			(list_empty(&dlm->pending_basts) ? "Empty" : "InUse"));
 
 	/* Purge Count: xxx  Refs: xxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Purge Count: %d  Refs: %d\n", dlm->purge_count,
 			atomic_read(&dlm->dlm_refs.refcount));
 
 	/* Dead Node: xxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Dead Node: %d\n", dlm->reco.dead_node);
 
 	/* What about DLM_RECO_STATE_FINALIZE? */
@@ -867,19 +821,19 @@
 		state = "INACTIVE";
 
 	/* Recovery Pid: xxxx  Master: xxx  State: xxxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Recovery Pid: %d  Master: %d  State: %s\n",
-			dlm->dlm_reco_thread_task->pid,
+			task_pid_nr(dlm->dlm_reco_thread_task),
 			dlm->reco.new_master, state);
 
 	/* Recovery Map: xx xx */
-	out += snprintf(db->buf + out, db->len - out, "Recovery Map: ");
+	out += snprintf(buf + out, len - out, "Recovery Map: ");
 	out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES,
-				 db->buf + out, db->len - out);
-	out += snprintf(db->buf + out, db->len - out, "\n");
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
 
 	/* Recovery Node State: */
-	out += snprintf(db->buf + out, db->len - out, "Recovery Node State:\n");
+	out += snprintf(buf + out, len - out, "Recovery Node State:\n");
 	list_for_each_entry(node, &dlm->reco.node_data, list) {
 		switch (node->state) {
 		case DLM_RECO_NODE_DATA_INIT:
@@ -907,7 +861,7 @@
 			state = "BAD";
 			break;
 		}
-		out += snprintf(db->buf + out, db->len - out, "\t%u - %s\n",
+		out += snprintf(buf + out, len - out, "\t%u - %s\n",
 				node->node_num, state);
 	}
 
@@ -919,15 +873,15 @@
 static int debug_state_open(struct inode *inode, struct file *file)
 {
 	struct dlm_ctxt *dlm = inode->i_private;
-	struct debug_buffer *db = NULL;
+	char *buf = NULL;
 
-	db = debug_buffer_allocate();
-	if (!db)
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (!buf)
 		goto bail;
 
-	db->len = debug_state_print(dlm, db);
+	i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1));
 
-	file->private_data = db;
+	file->private_data = buf;
 
 	return 0;
 bail:
@@ -936,9 +890,9 @@
 
 static const struct file_operations debug_state_fops = {
 	.open =		debug_state_open,
-	.release =	debug_buffer_release,
-	.read =		debug_buffer_read,
-	.llseek =	debug_buffer_llseek,
+	.release =	debug_release,
+	.read =		debug_read,
+	.llseek =	generic_file_llseek,
 };
 /* end  - debug state funcs */
 
@@ -1002,14 +956,10 @@
 	struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
 
 	if (dc) {
-		if (dc->debug_purgelist_dentry)
-			debugfs_remove(dc->debug_purgelist_dentry);
-		if (dc->debug_mle_dentry)
-			debugfs_remove(dc->debug_mle_dentry);
-		if (dc->debug_lockres_dentry)
-			debugfs_remove(dc->debug_lockres_dentry);
-		if (dc->debug_state_dentry)
-			debugfs_remove(dc->debug_state_dentry);
+		debugfs_remove(dc->debug_purgelist_dentry);
+		debugfs_remove(dc->debug_mle_dentry);
+		debugfs_remove(dc->debug_lockres_dentry);
+		debugfs_remove(dc->debug_state_dentry);
 		dlm_debug_put(dc);
 	}
 }
@@ -1040,8 +990,7 @@
 
 void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
 {
-	if (dlm->dlm_debugfs_subroot)
-		debugfs_remove(dlm->dlm_debugfs_subroot);
+	debugfs_remove(dlm->dlm_debugfs_subroot);
 }
 
 /* debugfs root */
@@ -1057,7 +1006,6 @@
 
 void dlm_destroy_debugfs_root(void)
 {
-	if (dlm_debugfs_root)
-		debugfs_remove(dlm_debugfs_root);
+	debugfs_remove(dlm_debugfs_root);
 }
 #endif	/* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index 8c686d2..1f27c48 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -37,11 +37,6 @@
 	struct dentry *debug_purgelist_dentry;
 };
 
-struct debug_buffer {
-	int len;
-	char *buf;
-};
-
 struct debug_lockres {
 	int dl_len;
 	char *dl_buf;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index cc2aaa9..7e38a07 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -460,8 +460,6 @@
 		}
 		cond_resched_lock(&dlm->spinlock);
 		num += n;
-		mlog(0, "%s: touched %d lockreses in bucket %d "
-		     "(tot=%d)\n", dlm->name, n, i, num);
 	}
 	spin_unlock(&dlm->spinlock);
 	wake_up(&dlm->dlm_thread_wq);
@@ -1661,8 +1659,8 @@
 
 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
 {
-	o2hb_unregister_callback(NULL, &dlm->dlm_hb_up);
-	o2hb_unregister_callback(NULL, &dlm->dlm_hb_down);
+	o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
+	o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
 	o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
 }
 
@@ -1674,13 +1672,13 @@
 
 	o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
 			    dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
-	status = o2hb_register_callback(NULL, &dlm->dlm_hb_down);
+	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
 	if (status)
 		goto bail;
 
 	o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
 			    dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
-	status = o2hb_register_callback(NULL, &dlm->dlm_hb_up);
+	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
 	if (status)
 		goto bail;
 
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 69cf369..7009292 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -106,6 +106,9 @@
 
 		if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
 			return 0;
+		if (!dlm_lock_compatible(tmplock->ml.convert_type,
+					 lock->ml.type))
+			return 0;
 	}
 
 	return 1;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 2211acf..1d6d1d2 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -122,15 +122,13 @@
 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
 			      struct dlm_lock_resource *res)
 {
-	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
-
 	assert_spin_locked(&dlm->spinlock);
 	assert_spin_locked(&res->spinlock);
 
 	if (__dlm_lockres_unused(res)){
 		if (list_empty(&res->purge)) {
-			mlog(0, "putting lockres %.*s:%p onto purge list\n",
-			     res->lockname.len, res->lockname.name, res);
+			mlog(0, "%s: Adding res %.*s to purge list\n",
+			     dlm->name, res->lockname.len, res->lockname.name);
 
 			res->last_used = jiffies;
 			dlm_lockres_get(res);
@@ -138,8 +136,8 @@
 			dlm->purge_count++;
 		}
 	} else if (!list_empty(&res->purge)) {
-		mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
-		     res->lockname.len, res->lockname.name, res, res->owner);
+		mlog(0, "%s: Removing res %.*s from purge list\n",
+		     dlm->name, res->lockname.len, res->lockname.name);
 
 		list_del_init(&res->purge);
 		dlm_lockres_put(res);
@@ -150,7 +148,6 @@
 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
 			    struct dlm_lock_resource *res)
 {
-	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
 	spin_lock(&dlm->spinlock);
 	spin_lock(&res->spinlock);
 
@@ -171,9 +168,8 @@
 
 	master = (res->owner == dlm->node_num);
 
-
-	mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
-	     res->lockname.name, master);
+	mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
+	     res->lockname.len, res->lockname.name, master);
 
 	if (!master) {
 		res->state |= DLM_LOCK_RES_DROPPING_REF;
@@ -189,27 +185,25 @@
 		/* clear our bit from the master's refmap, ignore errors */
 		ret = dlm_drop_lockres_ref(dlm, res);
 		if (ret < 0) {
-			mlog_errno(ret);
+			mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
+			     res->lockname.len, res->lockname.name, ret);
 			if (!dlm_is_host_down(ret))
 				BUG();
 		}
-		mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
-		     dlm->name, res->lockname.len, res->lockname.name, ret);
 		spin_lock(&dlm->spinlock);
 		spin_lock(&res->spinlock);
 	}
 
 	if (!list_empty(&res->purge)) {
-		mlog(0, "removing lockres %.*s:%p from purgelist, "
-		     "master = %d\n", res->lockname.len, res->lockname.name,
-		     res, master);
+		mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
+		     dlm->name, res->lockname.len, res->lockname.name, master);
 		list_del_init(&res->purge);
 		dlm_lockres_put(res);
 		dlm->purge_count--;
 	}
 
 	if (!__dlm_lockres_unused(res)) {
-		mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
+		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
 		     dlm->name, res->lockname.len, res->lockname.name);
 		__dlm_print_one_lock_resource(res);
 		BUG();
@@ -266,10 +260,10 @@
 		unused = __dlm_lockres_unused(lockres);
 		if (!unused ||
 		    (lockres->state & DLM_LOCK_RES_MIGRATING)) {
-			mlog(0, "lockres %s:%.*s: is in use or "
-			     "being remastered, used %d, state %d\n",
-			     dlm->name, lockres->lockname.len,
-			     lockres->lockname.name, !unused, lockres->state);
+			mlog(0, "%s: res %.*s is in use or being remastered, "
+			     "used %d, state %d\n", dlm->name,
+			     lockres->lockname.len, lockres->lockname.name,
+			     !unused, lockres->state);
 			list_move_tail(&dlm->purge_list, &lockres->purge);
 			spin_unlock(&lockres->spinlock);
 			continue;
@@ -296,15 +290,12 @@
 	struct list_head *head;
 	int can_grant = 1;
 
-	//mlog(0, "res->lockname.len=%d\n", res->lockname.len);
-	//mlog(0, "res->lockname.name=%p\n", res->lockname.name);
-	//mlog(0, "shuffle res %.*s\n", res->lockname.len,
-	//	  res->lockname.name);
-
-	/* because this function is called with the lockres
+	/*
+	 * Because this function is called with the lockres
 	 * spinlock, and because we know that it is not migrating/
 	 * recovering/in-progress, it is fine to reserve asts and
-	 * basts right before queueing them all throughout */
+	 * basts right before queueing them all throughout
+	 */
 	assert_spin_locked(&dlm->ast_lock);
 	assert_spin_locked(&res->spinlock);
 	BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
@@ -314,13 +305,13 @@
 converting:
 	if (list_empty(&res->converting))
 		goto blocked;
-	mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
-	     res->lockname.name);
+	mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
+	     res->lockname.len, res->lockname.name);
 
 	target = list_entry(res->converting.next, struct dlm_lock, list);
 	if (target->ml.convert_type == LKM_IVMODE) {
-		mlog(ML_ERROR, "%.*s: converting a lock with no "
-		     "convert_type!\n", res->lockname.len, res->lockname.name);
+		mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
+		     dlm->name, res->lockname.len, res->lockname.name);
 		BUG();
 	}
 	head = &res->granted;
@@ -365,9 +356,12 @@
 		spin_lock(&target->spinlock);
 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
 
-		mlog(0, "calling ast for converting lock: %.*s, have: %d, "
-		     "granting: %d, node: %u\n", res->lockname.len,
-		     res->lockname.name, target->ml.type,
+		mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
+		     "%d => %d, node %u\n", dlm->name, res->lockname.len,
+		     res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
+		     target->ml.type,
 		     target->ml.convert_type, target->ml.node);
 
 		target->ml.type = target->ml.convert_type;
@@ -428,11 +422,14 @@
 		spin_lock(&target->spinlock);
 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
 
-		mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
-		     "node: %u\n", res->lockname.len, res->lockname.name,
+		mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
+		     "node %u\n", dlm->name, res->lockname.len,
+		     res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
 		     target->ml.type, target->ml.node);
 
-		// target->ml.type is already correct
+		/* target->ml.type is already correct */
 		list_move_tail(&target->list, &res->granted);
 
 		BUG_ON(!target->lksb);
@@ -453,7 +450,6 @@
 /* must have NO locks when calling this with res !=NULL * */
 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
-	mlog_entry("dlm=%p, res=%p\n", dlm, res);
 	if (res) {
 		spin_lock(&dlm->spinlock);
 		spin_lock(&res->spinlock);
@@ -466,8 +462,6 @@
 
 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
-	mlog_entry("dlm=%p, res=%p\n", dlm, res);
-
 	assert_spin_locked(&dlm->spinlock);
 	assert_spin_locked(&res->spinlock);
 
@@ -484,13 +478,16 @@
 			res->state |= DLM_LOCK_RES_DIRTY;
 		}
 	}
+
+	mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+	     res->lockname.name);
 }
 
 
 /* Launch the NM thread for the mounted volume */
 int dlm_launch_thread(struct dlm_ctxt *dlm)
 {
-	mlog(0, "starting dlm thread...\n");
+	mlog(0, "Starting dlm_thread...\n");
 
 	dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
 	if (IS_ERR(dlm->dlm_thread_task)) {
@@ -505,7 +502,7 @@
 void dlm_complete_thread(struct dlm_ctxt *dlm)
 {
 	if (dlm->dlm_thread_task) {
-		mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
+		mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
 		kthread_stop(dlm->dlm_thread_task);
 		dlm->dlm_thread_task = NULL;
 	}
@@ -536,7 +533,12 @@
 		/* get an extra ref on lock */
 		dlm_lock_get(lock);
 		res = lock->lockres;
-		mlog(0, "delivering an ast for this lockres\n");
+		mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
+		     "node %u\n", dlm->name, res->lockname.len,
+		     res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+		     lock->ml.type, lock->ml.node);
 
 		BUG_ON(!lock->ast_pending);
 
@@ -557,9 +559,9 @@
 		/* possible that another ast was queued while
 		 * we were delivering the last one */
 		if (!list_empty(&lock->ast_list)) {
-			mlog(0, "aha another ast got queued while "
-			     "we were finishing the last one.  will "
-			     "keep the ast_pending flag set.\n");
+			mlog(0, "%s: res %.*s, AST queued while flushing last "
+			     "one\n", dlm->name, res->lockname.len,
+			     res->lockname.name);
 		} else
 			lock->ast_pending = 0;
 
@@ -590,8 +592,12 @@
 		dlm_lock_put(lock);
 		spin_unlock(&dlm->ast_lock);
 
-		mlog(0, "delivering a bast for this lockres "
-		     "(blocked = %d\n", hi);
+		mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
+		     "blocked %d, node %u\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+		     hi, lock->ml.node);
 
 		if (lock->ml.node != dlm->node_num) {
 			ret = dlm_send_proxy_bast(dlm, res, lock, hi);
@@ -605,9 +611,9 @@
 		/* possible that another bast was queued while
 		 * we were delivering the last one */
 		if (!list_empty(&lock->bast_list)) {
-			mlog(0, "aha another bast got queued while "
-			     "we were finishing the last one.  will "
-			     "keep the bast_pending flag set.\n");
+			mlog(0, "%s: res %.*s, BAST queued while flushing last "
+			     "one\n", dlm->name, res->lockname.len,
+			     res->lockname.name);
 		} else
 			lock->bast_pending = 0;
 
@@ -675,11 +681,12 @@
 			spin_lock(&res->spinlock);
 			if (res->owner != dlm->node_num) {
 				__dlm_print_one_lock_resource(res);
-				mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
-				     res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
-				     res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
-				     res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
-				     res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
+				mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
+				     " dirty %d\n", dlm->name,
+				     !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
+				     !!(res->state & DLM_LOCK_RES_MIGRATING),
+				     !!(res->state & DLM_LOCK_RES_RECOVERING),
+				     !!(res->state & DLM_LOCK_RES_DIRTY));
 			}
 			BUG_ON(res->owner != dlm->node_num);
 
@@ -693,8 +700,8 @@
 				res->state &= ~DLM_LOCK_RES_DIRTY;
 				spin_unlock(&res->spinlock);
 				spin_unlock(&dlm->ast_lock);
-				mlog(0, "delaying list shuffling for in-"
-				     "progress lockres %.*s, state=%d\n",
+				mlog(0, "%s: res %.*s, inprogress, delay list "
+				     "shuffle, state %d\n", dlm->name,
 				     res->lockname.len, res->lockname.name,
 				     res->state);
 				delay = 1;
@@ -706,10 +713,6 @@
 			 * spinlock and do NOT have the dlm lock.
 			 * safe to reserve/queue asts and run the lists. */
 
-			mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
-			     "res=%.*s\n", dlm->name,
-			     res->lockname.len, res->lockname.name);
-
 			/* called while holding lockres lock */
 			dlm_shuffle_lists(dlm, res);
 			res->state &= ~DLM_LOCK_RES_DIRTY;
@@ -733,7 +736,8 @@
 			/* unlikely, but we may need to give time to
 			 * other tasks */
 			if (!--n) {
-				mlog(0, "throttling dlm_thread\n");
+				mlog(0, "%s: Throttling dlm thread\n",
+				     dlm->name);
 				break;
 			}
 		}
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 6adafa5..5dbc306 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -137,9 +137,7 @@
 	}
 
 	result = d_obtain_alias(inode);
-	if (!IS_ERR(result))
-		d_set_d_op(result, &ocfs2_dentry_ops);
-	else
+	if (IS_ERR(result))
 		mlog_errno(PTR_ERR(result));
 
 bail:
@@ -175,8 +173,6 @@
 	}
 
 	parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
-	if (!IS_ERR(parent))
-		d_set_d_op(parent, &ocfs2_dentry_ops);
 
 bail_unlock:
 	ocfs2_inode_unlock(dir, 0);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index bdadbae0..a665195 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1989,28 +1989,32 @@
 	return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
 }
 
-static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
+static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
 			    loff_t len)
 {
+	struct inode *inode = file->f_path.dentry->d_inode;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct ocfs2_space_resv sr;
 	int change_size = 1;
+	int cmd = OCFS2_IOC_RESVSP64;
 
+	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+		return -EOPNOTSUPP;
 	if (!ocfs2_writes_unwritten_extents(osb))
 		return -EOPNOTSUPP;
 
-	if (S_ISDIR(inode->i_mode))
-		return -ENODEV;
-
 	if (mode & FALLOC_FL_KEEP_SIZE)
 		change_size = 0;
 
+	if (mode & FALLOC_FL_PUNCH_HOLE)
+		cmd = OCFS2_IOC_UNRESVSP64;
+
 	sr.l_whence = 0;
 	sr.l_start = (s64)offset;
 	sr.l_len = (s64)len;
 
-	return __ocfs2_change_file_space(NULL, inode, offset,
-					 OCFS2_IOC_RESVSP64, &sr, change_size);
+	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
+					 change_size);
 }
 
 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
@@ -2606,7 +2610,6 @@
 	.getxattr	= generic_getxattr,
 	.listxattr	= ocfs2_listxattr,
 	.removexattr	= generic_removexattr,
-	.fallocate	= ocfs2_fallocate,
 	.fiemap		= ocfs2_fiemap,
 };
 
@@ -2638,6 +2641,7 @@
 	.flock		= ocfs2_flock,
 	.splice_read	= ocfs2_file_splice_read,
 	.splice_write	= ocfs2_file_splice_write,
+	.fallocate	= ocfs2_fallocate,
 };
 
 const struct file_operations ocfs2_dops = {
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index f935fd6..4068c6c 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -434,7 +434,7 @@
 	 * #1 and #2 can be simply solved by never taking the lock
 	 * here for system files (which are the only type we read
 	 * during mount). It's a heavier approach, but our main
-	 * concern is user-accesible files anyway.
+	 * concern is user-accessible files anyway.
 	 *
 	 * #3 works itself out because we'll eventually take the
 	 * cluster lock before trusting anything anyway.
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 43e56b9..6180da1 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -405,9 +405,9 @@
 	       ocfs2_quota_trans_credits(sb);
 }
 
-/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe +
- * bitmap block for the new bit) dx_root update for free list */
-#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1)
+/* data block for new dir/symlink, allocation of directory block, dx_root
+ * update for free list */
+#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1)
 
 static inline int ocfs2_add_dir_index_credits(struct super_block *sb)
 {
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d14cad6..849fb4a 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -147,7 +147,6 @@
 	spin_unlock(&oi->ip_lock);
 
 bail_add:
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 	ret = d_splice_alias(inode, dentry);
 
 	if (inode) {
@@ -415,7 +414,6 @@
 		mlog_errno(status);
 		goto leave;
 	}
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 
 	status = ocfs2_add_entry(handle, dentry, inode,
 				 OCFS2_I(inode)->ip_blkno, parent_fe_bh,
@@ -743,7 +741,6 @@
 	}
 
 	ihold(inode);
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 	d_instantiate(dentry, inode);
 
 out_commit:
@@ -1017,8 +1014,11 @@
 		 * An error return must mean that no cluster locks
 		 * were held on function exit.
 		 */
-		if (oi1->ip_blkno != oi2->ip_blkno)
+		if (oi1->ip_blkno != oi2->ip_blkno) {
 			ocfs2_inode_unlock(inode2, 1);
+			brelse(*bh2);
+			*bh2 = NULL;
+		}
 
 		if (status != -ENOENT)
 			mlog_errno(status);
@@ -1794,7 +1794,6 @@
 		mlog_errno(status);
 		goto bail;
 	}
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 
 	status = ocfs2_add_entry(handle, dentry, inode,
 				 le64_to_cpu(fe->i_blkno), parent_fe_bh,
@@ -2459,7 +2458,6 @@
 		goto out_commit;
 	}
 
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 	d_instantiate(dentry, inode);
 	status = 0;
 out_commit:
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 70dd3b1..51cd689 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -420,6 +420,11 @@
 	struct inode			*osb_tl_inode;
 	struct buffer_head		*osb_tl_bh;
 	struct delayed_work		osb_truncate_log_wq;
+	/*
+	 * How many clusters in our truncate log.
+	 * It must be protected by osb_tl_inode->i_mutex.
+	 */
+	unsigned int truncated_clusters;
 
 	struct ocfs2_node_map		osb_recovering_orphan_dirs;
 	unsigned int			*osb_orphan_wipes;
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index b5f9160..19ebc5a 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3228,7 +3228,7 @@
 					u32 num_clusters, unsigned int e_flags)
 {
 	int ret, delete, index, credits =  0;
-	u32 new_bit, new_len;
+	u32 new_bit, new_len, orig_num_clusters;
 	unsigned int set_len;
 	struct ocfs2_super *osb = OCFS2_SB(sb);
 	handle_t *handle;
@@ -3261,6 +3261,8 @@
 		goto out;
 	}
 
+	orig_num_clusters = num_clusters;
+
 	while (num_clusters) {
 		ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
 					     p_cluster, num_clusters,
@@ -3348,7 +3350,8 @@
 	 * in write-back mode.
 	 */
 	if (context->get_clusters == ocfs2_di_get_clusters) {
-		ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
+		ret = ocfs2_cow_sync_writeback(sb, context, cpos,
+					       orig_num_clusters);
 		if (ret)
 			mlog_errno(ret);
 	}
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 5fed60d..71998d4 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1916,7 +1916,7 @@
 	if (res->sr_bg_blkno) {
 		/* Attempt to short-circuit the usual search mechanism
 		 * by jumping straight to the most recently used
-		 * allocation group. This helps us mantain some
+		 * allocation group. This helps us maintain some
 		 * contiguousness across allocations. */
 		status = ocfs2_search_one_group(ac, handle, bits_wanted,
 						min_bits, res, &bits_left);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 17ff46f..36c423f 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -993,8 +993,7 @@
 }
 
 /* Handle quota on quotactl */
-static int ocfs2_quota_on(struct super_block *sb, int type, int format_id,
-			  char *path)
+static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
 {
 	unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
 					     OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
@@ -1013,7 +1012,7 @@
 }
 
 static const struct quotactl_ops ocfs2_quotactl_ops = {
-	.quota_on	= ocfs2_quota_on,
+	.quota_on_meta	= ocfs2_quota_on,
 	.quota_off	= ocfs2_quota_off,
 	.quota_sync	= dquot_quota_sync,
 	.get_info	= dquot_get_dqinfo,
@@ -1317,7 +1316,7 @@
 			       struct mount_options *mopt,
 			       int is_remount)
 {
-	int status;
+	int status, user_stack = 0;
 	char *p;
 	u32 tmp;
 
@@ -1460,6 +1459,15 @@
 			memcpy(mopt->cluster_stack, args[0].from,
 			       OCFS2_STACK_LABEL_LEN);
 			mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
+			/*
+			 * Open code the memcmp here as we don't have
+			 * an osb to pass to
+			 * ocfs2_userspace_stack().
+			 */
+			if (memcmp(mopt->cluster_stack,
+				   OCFS2_CLASSIC_CLUSTER_STACK,
+				   OCFS2_STACK_LABEL_LEN))
+				user_stack = 1;
 			break;
 		case Opt_inode64:
 			mopt->mount_opt |= OCFS2_MOUNT_INODE64;
@@ -1515,13 +1523,16 @@
 		}
 	}
 
-	/* Ensure only one heartbeat mode */
-	tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
-				 OCFS2_MOUNT_HB_NONE);
-	if (hweight32(tmp) != 1) {
-		mlog(ML_ERROR, "Invalid heartbeat mount options\n");
-		status = 0;
-		goto bail;
+	if (user_stack == 0) {
+		/* Ensure only one heartbeat mode */
+		tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
+					 OCFS2_MOUNT_HB_GLOBAL |
+					 OCFS2_MOUNT_HB_NONE);
+		if (hweight32(tmp) != 1) {
+			mlog(ML_ERROR, "Invalid heartbeat mount options\n");
+			status = 0;
+			goto bail;
+		}
 	}
 
 	status = 1;
@@ -2097,6 +2108,7 @@
 
 	sb->s_fs_info = osb;
 	sb->s_op = &ocfs2_sops;
+	sb->s_d_op = &ocfs2_dentry_ops;
 	sb->s_export_op = &ocfs2_export_ops;
 	sb->s_qcop = &ocfs2_quotactl_ops;
 	sb->dq_op = &ocfs2_quota_operations;
diff --git a/fs/open.c b/fs/open.c
index 4197b9e..b47aab3 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -223,11 +223,24 @@
 		return -EINVAL;
 
 	/* Return error if mode is not supported */
-	if (mode && !(mode & FALLOC_FL_KEEP_SIZE))
+	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+		return -EOPNOTSUPP;
+
+	/* Punch hole must have keep size set */
+	if ((mode & FALLOC_FL_PUNCH_HOLE) &&
+	    !(mode & FALLOC_FL_KEEP_SIZE))
 		return -EOPNOTSUPP;
 
 	if (!(file->f_mode & FMODE_WRITE))
 		return -EBADF;
+
+	/* It's not possible punch hole on append only file */
+	if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode))
+		return -EPERM;
+
+	if (IS_IMMUTABLE(inode))
+		return -EPERM;
+
 	/*
 	 * Revalidate the write permissions, in case security policy has
 	 * changed since the files were opened.
@@ -250,10 +263,10 @@
 	if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
 		return -EFBIG;
 
-	if (!inode->i_op->fallocate)
+	if (!file->f_op->fallocate)
 		return -EOPNOTSUPP;
 
-	return inode->i_op->fallocate(inode, mode, offset, len);
+	return file->f_op->fallocate(file, mode, offset, len);
 }
 
 SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
@@ -785,6 +798,8 @@
 
 	/* Pick up the filp from the open intent */
 	filp = nd->intent.open.file;
+	nd->intent.open.file = NULL;
+
 	/* Has the filesystem initialised the file for us? */
 	if (filp->f_path.dentry == NULL) {
 		path_get(&nd->path);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 0a8b0ad..9c21119 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -237,6 +237,13 @@
 	return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
 }
 
+ssize_t part_ro_show(struct device *dev,
+		       struct device_attribute *attr, char *buf)
+{
+	struct hd_struct *p = dev_to_part(dev);
+	return sprintf(buf, "%d\n", p->policy ? 1 : 0);
+}
+
 ssize_t part_alignment_offset_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
@@ -312,6 +319,7 @@
 static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
 static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
+static DEVICE_ATTR(ro, S_IRUGO, part_ro_show, NULL);
 static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
 static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show,
 		   NULL);
@@ -326,6 +334,7 @@
 	&dev_attr_partition.attr,
 	&dev_attr_start.attr,
 	&dev_attr_size.attr,
+	&dev_attr_ro.attr,
 	&dev_attr_alignment_offset.attr,
 	&dev_attr_discard_alignment.attr,
 	&dev_attr_stat.attr,
@@ -372,6 +381,11 @@
 	put_device(part_to_dev(part));
 }
 
+void __delete_partition(struct hd_struct *part)
+{
+	call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+}
+
 void delete_partition(struct gendisk *disk, int partno)
 {
 	struct disk_part_tbl *ptbl = disk->part_tbl;
@@ -390,7 +404,7 @@
 	kobject_put(part->holder_dir);
 	device_del(part_to_dev(part));
 
-	call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+	hd_struct_put(part);
 }
 
 static ssize_t whole_disk_show(struct device *dev,
@@ -489,6 +503,7 @@
 	if (!dev_get_uevent_suppress(ddev))
 		kobject_uevent(&pdev->kobj, KOBJ_ADD);
 
+	hd_ref_init(p);
 	return p;
 
 out_free_info:
@@ -507,65 +522,6 @@
 	return ERR_PTR(err);
 }
 
-/* Not exported, helper to add_disk(). */
-void register_disk(struct gendisk *disk)
-{
-	struct device *ddev = disk_to_dev(disk);
-	struct block_device *bdev;
-	struct disk_part_iter piter;
-	struct hd_struct *part;
-	int err;
-
-	ddev->parent = disk->driverfs_dev;
-
-	dev_set_name(ddev, disk->disk_name);
-
-	/* delay uevents, until we scanned partition table */
-	dev_set_uevent_suppress(ddev, 1);
-
-	if (device_add(ddev))
-		return;
-	if (!sysfs_deprecated) {
-		err = sysfs_create_link(block_depr, &ddev->kobj,
-					kobject_name(&ddev->kobj));
-		if (err) {
-			device_del(ddev);
-			return;
-		}
-	}
-	disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
-	disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
-
-	/* No minors to use for partitions */
-	if (!disk_partitionable(disk))
-		goto exit;
-
-	/* No such device (e.g., media were just removed) */
-	if (!get_capacity(disk))
-		goto exit;
-
-	bdev = bdget_disk(disk, 0);
-	if (!bdev)
-		goto exit;
-
-	bdev->bd_invalidated = 1;
-	err = blkdev_get(bdev, FMODE_READ);
-	if (err < 0)
-		goto exit;
-	blkdev_put(bdev, FMODE_READ);
-
-exit:
-	/* announce disk after possible partitions are created */
-	dev_set_uevent_suppress(ddev, 0);
-	kobject_uevent(&ddev->kobj, KOBJ_ADD);
-
-	/* announce possible partitions */
-	disk_part_iter_init(&piter, disk, 0);
-	while ((part = disk_part_iter_next(&piter)))
-		kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
-	disk_part_iter_exit(&piter);
-}
-
 static bool disk_unlock_native_capacity(struct gendisk *disk)
 {
 	const struct block_device_operations *bdops = disk->fops;
@@ -728,33 +684,3 @@
 }
 
 EXPORT_SYMBOL(read_dev_sector);
-
-void del_gendisk(struct gendisk *disk)
-{
-	struct disk_part_iter piter;
-	struct hd_struct *part;
-
-	/* invalidate stuff */
-	disk_part_iter_init(&piter, disk,
-			     DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
-	while ((part = disk_part_iter_next(&piter))) {
-		invalidate_partition(disk, part->partno);
-		delete_partition(disk, part->partno);
-	}
-	disk_part_iter_exit(&piter);
-
-	invalidate_partition(disk, 0);
-	blk_free_devt(disk_to_dev(disk)->devt);
-	set_capacity(disk, 0);
-	disk->flags &= ~GENHD_FL_UP;
-	unlink_gendisk(disk);
-	part_stat_set_all(&disk->part0, 0);
-	disk->part0.stamp = 0;
-
-	kobject_put(disk->part0.holder_dir);
-	kobject_put(disk->slave_dir);
-	disk->driverfs_dev = NULL;
-	if (!sysfs_deprecated)
-		sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
-	device_del(disk_to_dev(disk));
-}
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 789c625..b10e354 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -251,6 +251,11 @@
 	}
 
 	vm->vblk_size     = get_unaligned_be32(data + 0x08);
+	if (vm->vblk_size == 0) {
+		ldm_error ("Illegal VBLK size");
+		return false;
+	}
+
 	vm->vblk_offset   = get_unaligned_be32(data + 0x0C);
 	vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
 
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
index 68d6a21..11f688b 100644
--- a/fs/partitions/mac.c
+++ b/fs/partitions/mac.c
@@ -29,10 +29,9 @@
 
 int mac_partition(struct parsed_partitions *state)
 {
-	int slot = 1;
 	Sector sect;
 	unsigned char *data;
-	int blk, blocks_in_map;
+	int slot, blocks_in_map;
 	unsigned secsize;
 #ifdef CONFIG_PPC_PMAC
 	int found_root = 0;
@@ -59,10 +58,14 @@
 		put_dev_sector(sect);
 		return 0;		/* not a MacOS disk */
 	}
-	strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
 	blocks_in_map = be32_to_cpu(part->map_count);
-	for (blk = 1; blk <= blocks_in_map; ++blk) {
-		int pos = blk * secsize;
+	if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
+		put_dev_sector(sect);
+		return 0;
+	}
+	strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
+	for (slot = 1; slot <= blocks_in_map; ++slot) {
+		int pos = slot * secsize;
 		put_dev_sector(sect);
 		data = read_part_sector(state, pos/512, &sect);
 		if (!data)
@@ -113,13 +116,11 @@
 			}
 
 			if (goodness > found_root_goodness) {
-				found_root = blk;
+				found_root = slot;
 				found_root_goodness = goodness;
 			}
 		}
 #endif /* CONFIG_PPC_PMAC */
-
-		++slot;
 	}
 #ifdef CONFIG_PPC_PMAC
 	if (found_root_goodness)
diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c
index 48cec7c..be03a0b 100644
--- a/fs/partitions/osf.c
+++ b/fs/partitions/osf.c
@@ -10,10 +10,13 @@
 #include "check.h"
 #include "osf.h"
 
+#define MAX_OSF_PARTITIONS 8
+
 int osf_partition(struct parsed_partitions *state)
 {
 	int i;
 	int slot = 1;
+	unsigned int npartitions;
 	Sector sect;
 	unsigned char *data;
 	struct disklabel {
@@ -45,7 +48,7 @@
 			u8  p_fstype;
 			u8  p_frag;
 			__le16 p_cpg;
-		} d_partitions[8];
+		} d_partitions[MAX_OSF_PARTITIONS];
 	} * label;
 	struct d_partition * partition;
 
@@ -63,7 +66,12 @@
 		put_dev_sector(sect);
 		return 0;
 	}
-	for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) {
+	npartitions = le16_to_cpu(label->d_npartitions);
+	if (npartitions > MAX_OSF_PARTITIONS) {
+		put_dev_sector(sect);
+		return 0;
+	}
+	for (i = 0 ; i < npartitions; i++, partition++) {
 		if (slot == state->limit)
 		        break;
 		if (le32_to_cpu(partition->p_size))
diff --git a/fs/pipe.c b/fs/pipe.c
index 68f1f8e..da42f7d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -441,7 +441,7 @@
 			break;
 		}
 		if (do_wakeup) {
-			wake_up_interruptible_sync(&pipe->wait);
+			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 		}
 		pipe_wait(pipe);
@@ -450,7 +450,7 @@
 
 	/* Signal writers asynchronously that there is more room. */
 	if (do_wakeup) {
-		wake_up_interruptible_sync(&pipe->wait);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
 	if (ret > 0)
@@ -612,7 +612,7 @@
 			break;
 		}
 		if (do_wakeup) {
-			wake_up_interruptible_sync(&pipe->wait);
+			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 			do_wakeup = 0;
 		}
@@ -623,7 +623,7 @@
 out:
 	mutex_unlock(&inode->i_mutex);
 	if (do_wakeup) {
-		wake_up_interruptible_sync(&pipe->wait);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 	}
 	if (ret > 0)
@@ -715,7 +715,7 @@
 	if (!pipe->readers && !pipe->writers) {
 		free_pipe_info(inode);
 	} else {
-		wake_up_interruptible_sync(&pipe->wait);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
@@ -1004,7 +1004,6 @@
 		goto err_inode;
 	path.mnt = mntget(pipe_mnt);
 
-	d_set_d_op(path.dentry, &pipefs_dentry_operations);
 	d_instantiate(path.dentry, inode);
 
 	err = -ENFILE;
@@ -1266,7 +1265,8 @@
 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
 			 int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "pipe:", &pipefs_ops, PIPEFS_MAGIC);
+	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
+			&pipefs_dentry_operations, PIPEFS_MAGIC);
 }
 
 static struct file_system_type pipe_fs_type = {
@@ -1292,7 +1292,7 @@
 static void __exit exit_pipe_fs(void)
 {
 	unregister_filesystem(&pipe_fs_type);
-	mntput_long(pipe_mnt);
+	mntput(pipe_mnt);
 }
 
 fs_initcall(init_pipe_fs);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 39df95a..b1cf6bf 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -22,6 +22,7 @@
 
 #include <linux/errno.h>
 
+EXPORT_SYMBOL(posix_acl_init);
 EXPORT_SYMBOL(posix_acl_alloc);
 EXPORT_SYMBOL(posix_acl_clone);
 EXPORT_SYMBOL(posix_acl_valid);
@@ -32,6 +33,16 @@
 EXPORT_SYMBOL(posix_acl_permission);
 
 /*
+ * Init a fresh posix_acl
+ */
+void
+posix_acl_init(struct posix_acl *acl, int count)
+{
+	atomic_set(&acl->a_refcount, 1);
+	acl->a_count = count;
+}
+
+/*
  * Allocate a new ACL with the specified number of entries.
  */
 struct posix_acl *
@@ -40,10 +51,8 @@
 	const size_t size = sizeof(struct posix_acl) +
 	                    count * sizeof(struct posix_acl_entry);
 	struct posix_acl *acl = kmalloc(size, flags);
-	if (acl) {
-		atomic_set(&acl->a_refcount, 1);
-		acl->a_count = count;
-	}
+	if (acl)
+		posix_acl_init(acl, count);
 	return acl;
 }
 
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 6a00688..15af622 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -1,5 +1,5 @@
 config PROC_FS
-	bool "/proc file system support" if EMBEDDED
+	bool "/proc file system support" if EXPERT
 	default y
 	help
 	  This is a virtual file system providing information about the status
@@ -40,7 +40,7 @@
         Exports the dump image of crashed kernel in ELF format.
 
 config PROC_SYSCTL
-	bool "Sysctl support (/proc/sys)" if EMBEDDED
+	bool "Sysctl support (/proc/sys)" if EXPERT
 	depends on PROC_FS
 	select SYSCTL
 	default y
@@ -61,7 +61,7 @@
 config PROC_PAGE_MONITOR
  	default y
 	depends on PROC_FS && MMU
-	bool "Enable /proc page monitoring" if EMBEDDED
+	bool "Enable /proc page monitoring" if EXPERT
  	help
 	  Various /proc files exist to monitor process memory utilization:
 	  /proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap,
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 288a49e0..df434c5 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -10,12 +10,12 @@
 proc-y       += inode.o root.o base.o generic.o array.o \
 		proc_tty.o
 proc-y	+= cmdline.o
+proc-y	+= consoles.o
 proc-y	+= cpuinfo.o
 proc-y	+= devices.o
 proc-y	+= interrupts.o
 proc-y	+= loadavg.o
 proc-y	+= meminfo.o
-proc-y	+= proc_console.o
 proc-y	+= stat.o
 proc-y	+= uptime.o
 proc-y	+= version.o
diff --git a/fs/proc/array.c b/fs/proc/array.c
index fff6572..7c99c1c 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -95,7 +95,7 @@
 
 	get_task_comm(tcomm, p);
 
-	seq_printf(m, "Name:\t");
+	seq_puts(m, "Name:\t");
 	end = m->buf + m->size;
 	buf = m->buf + m->count;
 	name = tcomm;
@@ -122,7 +122,7 @@
 		buf++;
 	}
 	m->count = buf - m->buf;
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 /*
@@ -208,7 +208,7 @@
 		seq_printf(m, "%d ", GROUP_AT(group_info, g));
 	put_cred(cred);
 
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 static void render_sigset_t(struct seq_file *m, const char *header,
@@ -216,7 +216,7 @@
 {
 	int i;
 
-	seq_printf(m, "%s", header);
+	seq_puts(m, header);
 
 	i = _NSIG;
 	do {
@@ -230,7 +230,7 @@
 		seq_printf(m, "%x", x);
 	} while (i >= 4);
 
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
@@ -291,12 +291,12 @@
 {
 	unsigned __capi;
 
-	seq_printf(m, "%s", header);
+	seq_puts(m, header);
 	CAP_FOR_EACH_U32(__capi) {
 		seq_printf(m, "%08x",
 			   a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
 	}
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 static inline void task_cap(struct seq_file *m, struct task_struct *p)
@@ -329,12 +329,12 @@
 
 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
 {
-	seq_printf(m, "Cpus_allowed:\t");
+	seq_puts(m, "Cpus_allowed:\t");
 	seq_cpumask(m, &task->cpus_allowed);
-	seq_printf(m, "\n");
-	seq_printf(m, "Cpus_allowed_list:\t");
+	seq_putc(m, '\n');
+	seq_puts(m, "Cpus_allowed_list:\t");
 	seq_cpumask_list(m, &task->cpus_allowed);
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
@@ -353,9 +353,6 @@
 	task_cap(m, task);
 	task_cpus_allowed(m, task);
 	cpuset_task_status_allowed(m, task);
-#if defined(CONFIG_S390)
-	task_show_regs(m, task);
-#endif
 	task_context_switch_counts(m, task);
 	return 0;
 }
@@ -535,15 +532,15 @@
 int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
 			struct pid *pid, struct task_struct *task)
 {
-	int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
+	unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
 	struct mm_struct *mm = get_task_mm(task);
 
 	if (mm) {
 		size = task_statm(mm, &shared, &text, &data, &resident);
 		mmput(mm);
 	}
-	seq_printf(m, "%d %d %d %d %d %d %d\n",
-			size, resident, shared, text, lib, data, 0);
+	seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
+			size, resident, shared, text, data);
 
 	return 0;
 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b20962c..d49c4b5 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -373,26 +373,20 @@
 		return -ESRCH;
 	seq_puts(m, "Latency Top version : v0.1\n");
 	for (i = 0; i < 32; i++) {
-		if (task->latency_record[i].backtrace[0]) {
+		struct latency_record *lr = &task->latency_record[i];
+		if (lr->backtrace[0]) {
 			int q;
-			seq_printf(m, "%i %li %li ",
-				task->latency_record[i].count,
-				task->latency_record[i].time,
-				task->latency_record[i].max);
+			seq_printf(m, "%i %li %li",
+				   lr->count, lr->time, lr->max);
 			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
-				char sym[KSYM_SYMBOL_LEN];
-				char *c;
-				if (!task->latency_record[i].backtrace[q])
+				unsigned long bt = lr->backtrace[q];
+				if (!bt)
 					break;
-				if (task->latency_record[i].backtrace[q] == ULONG_MAX)
+				if (bt == ULONG_MAX)
 					break;
-				sprint_symbol(sym, task->latency_record[i].backtrace[q]);
-				c = strchr(sym, '+');
-				if (c)
-					*c = 0;
-				seq_printf(m, "%s ", sym);
+				seq_printf(m, " %ps", (void *)bt);
 			}
-			seq_printf(m, "\n");
+			seq_putc(m, '\n');
 		}
 
 	}
@@ -751,14 +745,7 @@
 
 static int proc_single_open(struct inode *inode, struct file *filp)
 {
-	int ret;
-	ret = single_open(filp, proc_single_show, NULL);
-	if (!ret) {
-		struct seq_file *m = filp->private_data;
-
-		m->private = inode;
-	}
-	return ret;
+	return single_open(filp, proc_single_show, inode);
 }
 
 static const struct file_operations proc_single_file_operations = {
@@ -1164,7 +1151,7 @@
 		goto err_task_lock;
 	}
 
-	if (oom_score_adj < task->signal->oom_score_adj &&
+	if (oom_score_adj < task->signal->oom_score_adj_min &&
 			!capable(CAP_SYS_RESOURCE)) {
 		err = -EACCES;
 		goto err_sighand;
@@ -1177,6 +1164,8 @@
 			atomic_dec(&task->mm->oom_disable_count);
 	}
 	task->signal->oom_score_adj = oom_score_adj;
+	if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
+		task->signal->oom_score_adj_min = oom_score_adj;
 	/*
 	 * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
 	 * always attainable.
@@ -1386,15 +1375,7 @@
 
 static int sched_open(struct inode *inode, struct file *filp)
 {
-	int ret;
-
-	ret = single_open(filp, sched_show, NULL);
-	if (!ret) {
-		struct seq_file *m = filp->private_data;
-
-		m->private = inode;
-	}
-	return ret;
+	return single_open(filp, sched_show, inode);
 }
 
 static const struct file_operations proc_pid_sched_operations = {
@@ -1530,15 +1511,7 @@
 
 static int comm_open(struct inode *inode, struct file *filp)
 {
-	int ret;
-
-	ret = single_open(filp, comm_show, NULL);
-	if (!ret) {
-		struct seq_file *m = filp->private_data;
-
-		m->private = inode;
-	}
-	return ret;
+	return single_open(filp, comm_show, inode);
 }
 
 static const struct file_operations proc_pid_set_comm_operations = {
@@ -2647,35 +2620,6 @@
 		&proc_self_inode_operations, NULL, {}),
 };
 
-/*
- *	Exceptional case: normally we are not allowed to unhash a busy
- * directory. In this case, however, we can do it - no aliasing problems
- * due to the way we treat inodes.
- */
-static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
-{
-	struct inode *inode;
-	struct task_struct *task;
-
-	if (nd->flags & LOOKUP_RCU)
-		return -ECHILD;
-
-	inode = dentry->d_inode;
-	task = get_proc_task(inode);
-	if (task) {
-		put_task_struct(task);
-		return 1;
-	}
-	d_drop(dentry);
-	return 0;
-}
-
-static const struct dentry_operations proc_base_dentry_operations =
-{
-	.d_revalidate	= proc_base_revalidate,
-	.d_delete	= pid_delete_dentry,
-};
-
 static struct dentry *proc_base_instantiate(struct inode *dir,
 	struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
@@ -2712,7 +2656,6 @@
 	if (p->fop)
 		inode->i_fop = p->fop;
 	ei->op = p->op;
-	d_set_d_op(dentry, &proc_base_dentry_operations);
 	d_add(dentry, inode);
 	error = NULL;
 out:
diff --git a/fs/proc/proc_console.c b/fs/proc/consoles.c
similarity index 94%
rename from fs/proc/proc_console.c
rename to fs/proc/consoles.c
index 8a70760..b701eaa 100644
--- a/fs/proc/proc_console.c
+++ b/fs/proc/consoles.c
@@ -67,7 +67,7 @@
 	struct console *con;
 	loff_t off = 0;
 
-	acquire_console_sem();
+	console_lock();
 	for_each_console(con)
 		if (off++ == *pos)
 			break;
@@ -84,7 +84,7 @@
 
 static void c_stop(struct seq_file *m, void *v)
 {
-	release_console_sem();
+	console_unlock();
 }
 
 static const struct seq_operations consoles_op = {
@@ -106,9 +106,9 @@
 	.release	= seq_release,
 };
 
-static int register_proc_consoles(void)
+static int __init proc_consoles_init(void)
 {
 	proc_create("consoles", 0, NULL, &proc_consoles_operations);
 	return 0;
 }
-module_init(register_proc_consoles);
+module_init(proc_consoles_init);
diff --git a/fs/proc/devices.c b/fs/proc/devices.c
index 59ee7da..b143471 100644
--- a/fs/proc/devices.c
+++ b/fs/proc/devices.c
@@ -9,14 +9,14 @@
 
 	if (i < CHRDEV_MAJOR_HASH_SIZE) {
 		if (i == 0)
-			seq_printf(f, "Character devices:\n");
+			seq_puts(f, "Character devices:\n");
 		chrdev_show(f, i);
 	}
 #ifdef CONFIG_BLOCK
 	else {
 		i -= CHRDEV_MAJOR_HASH_SIZE;
 		if (i == 0)
-			seq_printf(f, "\nBlock devices:\n");
+			seq_puts(f, "\nBlock devices:\n");
 		blkdev_show(f, i);
 	}
 #endif
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index f766be2..01e07f2 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -425,13 +425,10 @@
 		if (de->namelen != dentry->d_name.len)
 			continue;
 		if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
-			unsigned int ino;
-
-			ino = de->low_ino;
 			pde_get(de);
 			spin_unlock(&proc_subdir_lock);
 			error = -EINVAL;
-			inode = proc_get_inode(dir->i_sb, ino, de);
+			inode = proc_get_inode(dir->i_sb, de);
 			goto out_unlock;
 		}
 	}
@@ -768,12 +765,7 @@
 
 static void free_proc_entry(struct proc_dir_entry *de)
 {
-	unsigned int ino = de->low_ino;
-
-	if (ino < PROC_DYNAMIC_FIRST)
-		return;
-
-	release_inode_number(ino);
+	release_inode_number(de->low_ino);
 
 	if (S_ISLNK(de->mode))
 		kfree(de->data);
@@ -834,12 +826,9 @@
 
 		wait_for_completion(de->pde_unload_completion);
 
-		goto continue_removing;
+		spin_lock(&de->pde_unload_lock);
 	}
-	spin_unlock(&de->pde_unload_lock);
 
-continue_removing:
-	spin_lock(&de->pde_unload_lock);
 	while (!list_empty(&de->pde_openers)) {
 		struct pde_opener *pdeo;
 
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 6bcb926..d6a7ca1 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -27,6 +27,7 @@
 static void proc_evict_inode(struct inode *inode)
 {
 	struct proc_dir_entry *de;
+	struct ctl_table_header *head;
 
 	truncate_inode_pages(&inode->i_data, 0);
 	end_writeback(inode);
@@ -38,8 +39,11 @@
 	de = PROC_I(inode)->pde;
 	if (de)
 		pde_put(de);
-	if (PROC_I(inode)->sysctl)
-		sysctl_head_put(PROC_I(inode)->sysctl);
+	head = PROC_I(inode)->sysctl;
+	if (head) {
+		rcu_assign_pointer(PROC_I(inode)->sysctl, NULL);
+		sysctl_head_put(head);
+	}
 }
 
 struct vfsmount *proc_mnt;
@@ -416,12 +420,11 @@
 };
 #endif
 
-struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
-				struct proc_dir_entry *de)
+struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 {
 	struct inode * inode;
 
-	inode = iget_locked(sb, ino);
+	inode = iget_locked(sb, de->low_ino);
 	if (!inode)
 		return NULL;
 	if (inode->i_state & I_NEW) {
@@ -471,7 +474,7 @@
 	s->s_time_gran = 1;
 	
 	pde_get(&proc_root);
-	root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
+	root_inode = proc_get_inode(s, &proc_root);
 	if (!root_inode)
 		goto out_no_root;
 	root_inode->i_uid = 0;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 1f24a3e..9ad561d 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -96,7 +96,8 @@
 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
 unsigned long task_vsize(struct mm_struct *);
-int task_statm(struct mm_struct *, int *, int *, int *, int *);
+unsigned long task_statm(struct mm_struct *,
+	unsigned long *, unsigned long *, unsigned long *, unsigned long *);
 void task_mem(struct seq_file *, struct mm_struct *);
 
 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
@@ -108,7 +109,7 @@
 
 extern struct vfsmount *proc_mnt;
 int proc_fill_super(struct super_block *);
-struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *);
+struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
 
 /*
  * These are generic /proc routines that use the internal
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 6f37c39..d245cb2 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -558,7 +558,7 @@
 static const struct file_operations proc_kcore_operations = {
 	.read		= read_kcore,
 	.open		= open_kcore,
-	.llseek		= generic_file_llseek,
+	.llseek		= default_llseek,
 };
 
 #ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index a65239c..ed257d1 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -101,6 +101,9 @@
 #ifdef CONFIG_MEMORY_FAILURE
 		"HardwareCorrupted: %5lu kB\n"
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		"AnonHugePages:  %8lu kB\n"
+#endif
 		,
 		K(i.totalram),
 		K(i.freeram),
@@ -128,7 +131,12 @@
 		K(i.freeswap),
 		K(global_page_state(NR_FILE_DIRTY)),
 		K(global_page_state(NR_WRITEBACK)),
-		K(global_page_state(NR_ANON_PAGES)),
+		K(global_page_state(NR_ANON_PAGES)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		  + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+		  HPAGE_PMD_NR
+#endif
+		  ),
 		K(global_page_state(NR_FILE_MAPPED)),
 		K(global_page_state(NR_SHMEM)),
 		K(global_page_state(NR_SLAB_RECLAIMABLE) +
@@ -151,6 +159,10 @@
 #ifdef CONFIG_MEMORY_FAILURE
 		,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+		   HPAGE_PMD_NR)
+#endif
 		);
 
 	hugetlb_report_meminfo(m);
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 3b8b4566..6d8e6a9 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -40,7 +40,7 @@
 			ppage = pfn_to_page(pfn);
 		else
 			ppage = NULL;
-		if (!ppage)
+		if (!ppage || PageSlab(ppage))
 			pcount = 0;
 		else
 			pcount = page_mapcount(ppage);
@@ -116,15 +116,17 @@
 	if (PageHuge(page))
 		u |= 1 << KPF_HUGE;
 
+	/*
+	 * Caveats on high order pages: page->_count will only be set
+	 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
+	 * SLOB won't set PG_slab at all on compound pages.
+	 */
+	if (PageBuddy(page))
+		u |= 1 << KPF_BUDDY;
+
 	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);
 
-	/*
-	 * Caveats on high order pages:
-	 * PG_buddy will only be set on the head page; SLUB/SLQB do the same
-	 * for PG_slab; SLOB won't set PG_slab at all on compound pages.
-	 */
 	u |= kpf_copy_bit(k, KPF_SLAB,		PG_slab);
-	u |= kpf_copy_bit(k, KPF_BUDDY,		PG_buddy);
 
 	u |= kpf_copy_bit(k, KPF_ERROR,		PG_error);
 	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index d9396a4..927cbd1 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -233,7 +233,7 @@
 		return;
 	root = of_find_node_by_path("/");
 	if (root == NULL) {
-		printk(KERN_ERR "/proc/device-tree: can't find root\n");
+		pr_debug("/proc/device-tree: can't find root\n");
 		return;
 	}
 	proc_device_tree_add_node(root, proc_device_tree);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 09a1f92..8eb2522 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -408,15 +408,18 @@
 		const struct dentry *dentry, const struct inode *inode,
 		unsigned int len, const char *str, const struct qstr *name)
 {
+	struct ctl_table_header *head;
 	/* Although proc doesn't have negative dentries, rcu-walk means
 	 * that inode here can be NULL */
+	/* AV: can it, indeed? */
 	if (!inode)
-		return 0;
+		return 1;
 	if (name->len != len)
 		return 1;
 	if (memcmp(name->name, str, len))
 		return 1;
-	return !sysctl_is_seen(PROC_I(inode)->sysctl);
+	head = rcu_dereference(PROC_I(inode)->sysctl);
+	return !head || !sysctl_is_seen(head);
 }
 
 static const struct dentry_operations proc_sys_dentry_operations = {
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index 83adcc8..cb761f0 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -36,27 +36,27 @@
 	}
 	switch (p->type) {
 	case TTY_DRIVER_TYPE_SYSTEM:
-		seq_printf(m, "system");
+		seq_puts(m, "system");
 		if (p->subtype == SYSTEM_TYPE_TTY)
-			seq_printf(m, ":/dev/tty");
+			seq_puts(m, ":/dev/tty");
 		else if (p->subtype == SYSTEM_TYPE_SYSCONS)
-			seq_printf(m, ":console");
+			seq_puts(m, ":console");
 		else if (p->subtype == SYSTEM_TYPE_CONSOLE)
-			seq_printf(m, ":vtmaster");
+			seq_puts(m, ":vtmaster");
 		break;
 	case TTY_DRIVER_TYPE_CONSOLE:
-		seq_printf(m, "console");
+		seq_puts(m, "console");
 		break;
 	case TTY_DRIVER_TYPE_SERIAL:
-		seq_printf(m, "serial");
+		seq_puts(m, "serial");
 		break;
 	case TTY_DRIVER_TYPE_PTY:
 		if (p->subtype == PTY_TYPE_MASTER)
-			seq_printf(m, "pty:master");
+			seq_puts(m, "pty:master");
 		else if (p->subtype == PTY_TYPE_SLAVE)
-			seq_printf(m, "pty:slave");
+			seq_puts(m, "pty:slave");
 		else
-			seq_printf(m, "pty");
+			seq_puts(m, "pty");
 		break;
 	default:
 		seq_printf(m, "type:%d.%d", p->type, p->subtype);
@@ -74,19 +74,19 @@
 		/* pseudo-drivers first */
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty");
 		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0);
-		seq_printf(m, "system:/dev/tty\n");
+		seq_puts(m, "system:/dev/tty\n");
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console");
 		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1);
-		seq_printf(m, "system:console\n");
+		seq_puts(m, "system:console\n");
 #ifdef CONFIG_UNIX98_PTYS
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx");
 		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2);
-		seq_printf(m, "system\n");
+		seq_puts(m, "system\n");
 #endif
 #ifdef CONFIG_VT
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0");
 		seq_printf(m, "%3d %7d ", TTY_MAJOR, 0);
-		seq_printf(m, "system:vtmaster\n");
+		seq_puts(m, "system:vtmaster\n");
 #endif
 	}
 
diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c
index 3799473..62604be 100644
--- a/fs/proc/softirqs.c
+++ b/fs/proc/softirqs.c
@@ -10,16 +10,16 @@
 {
 	int i, j;
 
-	seq_printf(p, "                    ");
+	seq_puts(p, "                    ");
 	for_each_possible_cpu(i)
 		seq_printf(p, "CPU%-8d", i);
-	seq_printf(p, "\n");
+	seq_putc(p, '\n');
 
 	for (i = 0; i < NR_SOFTIRQS; i++) {
 		seq_printf(p, "%12s:", softirq_to_name[i]);
 		for_each_possible_cpu(j)
 			seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
-		seq_printf(p, "\n");
+		seq_putc(p, '\n');
 	}
 	return 0;
 }
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index e15a19c..1cffa2b 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -126,7 +126,7 @@
 
 	for (i = 0; i < NR_SOFTIRQS; i++)
 		seq_printf(p, " %u", per_softirq_sums[i]);
-	seq_printf(p, "\n");
+	seq_putc(p, '\n');
 
 	return 0;
 }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c126c83..60b9148 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -66,8 +66,9 @@
 	return PAGE_SIZE * mm->total_vm;
 }
 
-int task_statm(struct mm_struct *mm, int *shared, int *text,
-	       int *data, int *resident)
+unsigned long task_statm(struct mm_struct *mm,
+			 unsigned long *shared, unsigned long *text,
+			 unsigned long *data, unsigned long *resident)
 {
 	*shared = get_mm_counter(mm, MM_FILEPAGES);
 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
@@ -417,7 +418,8 @@
 		   "Anonymous:      %8lu kB\n"
 		   "Swap:           %8lu kB\n"
 		   "KernelPageSize: %8lu kB\n"
-		   "MMUPageSize:    %8lu kB\n",
+		   "MMUPageSize:    %8lu kB\n"
+		   "Locked:         %8lu kB\n",
 		   (vma->vm_end - vma->vm_start) >> 10,
 		   mss.resident >> 10,
 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
@@ -429,7 +431,9 @@
 		   mss.anonymous >> 10,
 		   mss.swap >> 10,
 		   vma_kernel_pagesize(vma) >> 10,
-		   vma_mmu_pagesize(vma) >> 10);
+		   vma_mmu_pagesize(vma) >> 10,
+		   (vma->vm_flags & VM_LOCKED) ?
+			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 
 	if (m->count < m->size)  /* vma is copied successfully */
 		m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index cb6306e..b535d3e 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -92,13 +92,14 @@
 	return vsize;
 }
 
-int task_statm(struct mm_struct *mm, int *shared, int *text,
-	       int *data, int *resident)
+unsigned long task_statm(struct mm_struct *mm,
+			 unsigned long *shared, unsigned long *text,
+			 unsigned long *data, unsigned long *resident)
 {
 	struct vm_area_struct *vma;
 	struct vm_region *region;
 	struct rb_node *p;
-	int size = kobjsize(mm);
+	unsigned long size = kobjsize(mm);
 
 	down_read(&mm->mmap_sem);
 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 0fed41e..a2a622e 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -133,16 +133,20 @@
 EXPORT_SYMBOL(dq_data_lock);
 
 void __quota_error(struct super_block *sb, const char *func,
-		  const char *fmt, ...)
+		   const char *fmt, ...)
 {
-	va_list args;
-
 	if (printk_ratelimit()) {
+		va_list args;
+		struct va_format vaf;
+
 		va_start(args, fmt);
-		printk(KERN_ERR "Quota error (device %s): %s: ",
-		       sb->s_id, func);
-		vprintk(fmt, args);
-		printk("\n");
+
+		vaf.fmt = fmt;
+		vaf.va = &args;
+
+		printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
+		       sb->s_id, func, &vaf);
+
 		va_end(args);
 	}
 }
@@ -2185,8 +2189,8 @@
 }
 EXPORT_SYMBOL(dquot_resume);
 
-int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
-		      struct path *path)
+int dquot_quota_on(struct super_block *sb, int type, int format_id,
+		   struct path *path)
 {
 	int error = security_quota_on(path->dentry);
 	if (error)
@@ -2200,20 +2204,6 @@
 					     DQUOT_LIMITS_ENABLED);
 	return error;
 }
-EXPORT_SYMBOL(dquot_quota_on_path);
-
-int dquot_quota_on(struct super_block *sb, int type, int format_id, char *name)
-{
-	struct path path;
-	int error;
-
-	error = kern_path(name, LOOKUP_FOLLOW, &path);
-	if (!error) {
-		error = dquot_quota_on_path(sb, type, format_id, &path);
-		path_put(&path);
-	}
-	return error;
-}
 EXPORT_SYMBOL(dquot_quota_on);
 
 /*
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index b299961..b34bdb2 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -64,18 +64,15 @@
 }
 
 static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
-		         void __user *addr)
+		         struct path *path)
 {
-	char *pathname;
-	int ret = -ENOSYS;
-
-	pathname = getname(addr);
-	if (IS_ERR(pathname))
-		return PTR_ERR(pathname);
-	if (sb->s_qcop->quota_on)
-		ret = sb->s_qcop->quota_on(sb, type, id, pathname);
-	putname(pathname);
-	return ret;
+	if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
+		return -ENOSYS;
+	if (sb->s_qcop->quota_on_meta)
+		return sb->s_qcop->quota_on_meta(sb, type, id);
+	if (IS_ERR(path))
+		return PTR_ERR(path);
+	return sb->s_qcop->quota_on(sb, type, id, path);
 }
 
 static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
@@ -241,7 +238,7 @@
 
 /* Copy parameters and call proper function */
 static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
-		       void __user *addr)
+		       void __user *addr, struct path *path)
 {
 	int ret;
 
@@ -256,7 +253,7 @@
 
 	switch (cmd) {
 	case Q_QUOTAON:
-		return quota_quotaon(sb, type, cmd, id, addr);
+		return quota_quotaon(sb, type, cmd, id, path);
 	case Q_QUOTAOFF:
 		if (!sb->s_qcop->quota_off)
 			return -ENOSYS;
@@ -335,6 +332,7 @@
 {
 	uint cmds, type;
 	struct super_block *sb = NULL;
+	struct path path, *pathp = NULL;
 	int ret;
 
 	cmds = cmd >> SUBCMDSHIFT;
@@ -351,12 +349,27 @@
 		return -ENODEV;
 	}
 
+	/*
+	 * Path for quotaon has to be resolved before grabbing superblock
+	 * because that gets s_umount sem which is also possibly needed by path
+	 * resolution (think about autofs) and thus deadlocks could arise.
+	 */
+	if (cmds == Q_QUOTAON) {
+		ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path);
+		if (ret)
+			pathp = ERR_PTR(ret);
+		else
+			pathp = &path;
+	}
+
 	sb = quotactl_block(special);
 	if (IS_ERR(sb))
 		return PTR_ERR(sb);
 
-	ret = do_quotactl(sb, type, cmds, id, addr);
+	ret = do_quotactl(sb, type, cmds, id, addr, pathp);
 
 	drop_super(sb);
+	if (pathp && !IS_ERR(pathp))
+		path_put(pathp);
 	return ret;
 }
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index 9e48874..e41c1becf 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -468,8 +468,8 @@
 		return -ENOMEM;
 	ret = read_blk(info, *blk, buf);
 	if (ret < 0) {
-		quota_error(dquot->dq_sb, "Can't read quota data "
-			    "block %u", blk);
+		quota_error(dquot->dq_sb, "Can't read quota data block %u",
+			    *blk);
 		goto out_buf;
 	}
 	newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@@ -493,8 +493,9 @@
 		} else {
 			ret = write_blk(info, *blk, buf);
 			if (ret < 0)
-				quota_error(dquot->dq_sb, "Can't write quota "
-					    "tree block %u", blk);
+				quota_error(dquot->dq_sb,
+					    "Can't write quota tree block %u",
+					    *blk);
 		}
 	}
 out_buf:
diff --git a/fs/read_write.c b/fs/read_write.c
index 5d431ba..5520f8a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -30,18 +30,9 @@
 
 EXPORT_SYMBOL(generic_ro_fops);
 
-static int
-__negative_fpos_check(struct file *file, loff_t pos, size_t count)
+static inline int unsigned_offsets(struct file *file)
 {
-	/*
-	 * pos or pos+count is negative here, check overflow.
-	 * too big "count" will be caught in rw_verify_area().
-	 */
-	if ((pos < 0) && (pos + count < pos))
-		return -EOVERFLOW;
-	if (file->f_mode & FMODE_UNSIGNED_OFFSET)
-		return 0;
-	return -EINVAL;
+	return file->f_mode & FMODE_UNSIGNED_OFFSET;
 }
 
 /**
@@ -75,7 +66,7 @@
 		break;
 	}
 
-	if (offset < 0 && __negative_fpos_check(file, offset, 0))
+	if (offset < 0 && !unsigned_offsets(file))
 		return -EINVAL;
 	if (offset > inode->i_sb->s_maxbytes)
 		return -EINVAL;
@@ -152,7 +143,7 @@
 			offset += file->f_pos;
 	}
 	retval = -EINVAL;
-	if (offset >= 0 || !__negative_fpos_check(file, offset, 0)) {
+	if (offset >= 0 || unsigned_offsets(file)) {
 		if (offset != file->f_pos) {
 			file->f_pos = offset;
 			file->f_version = 0;
@@ -252,9 +243,13 @@
 	if (unlikely((ssize_t) count < 0))
 		return retval;
 	pos = *ppos;
-	if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) {
-		retval = __negative_fpos_check(file, pos, count);
-		if (retval)
+	if (unlikely(pos < 0)) {
+		if (!unsigned_offsets(file))
+			return retval;
+		if (count >= -pos) /* both values are in 0..LLONG_MAX */
+			return -EOVERFLOW;
+	} else if (unlikely((loff_t) (pos + count) < 0)) {
+		if (!unsigned_offsets(file))
 			return retval;
 	}
 
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index d31bce1..3eea859 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2551,8 +2551,6 @@
 	result = 0;
 
 	if (journal->j_dev_bd != NULL) {
-		if (journal->j_dev_bd->bd_dev != super->s_dev)
-			bd_release(journal->j_dev_bd);
 		result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
 		journal->j_dev_bd = NULL;
 	}
@@ -2570,7 +2568,7 @@
 {
 	int result;
 	dev_t jdev;
-	fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE;
+	fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
 	char b[BDEVNAME_SIZE];
 
 	result = 0;
@@ -2584,7 +2582,10 @@
 
 	/* there is no "jdev" option and journal is on separate device */
 	if ((!jdev_name || !jdev_name[0])) {
-		journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
+		if (jdev == super->s_dev)
+			blkdev_mode &= ~FMODE_EXCL;
+		journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode,
+						      journal);
 		journal->j_dev_mode = blkdev_mode;
 		if (IS_ERR(journal->j_dev_bd)) {
 			result = PTR_ERR(journal->j_dev_bd);
@@ -2593,22 +2594,14 @@
 					 "cannot init journal device '%s': %i",
 					 __bdevname(jdev, b), result);
 			return result;
-		} else if (jdev != super->s_dev) {
-			result = bd_claim(journal->j_dev_bd, journal);
-			if (result) {
-				blkdev_put(journal->j_dev_bd, blkdev_mode);
-				return result;
-			}
-
+		} else if (jdev != super->s_dev)
 			set_blocksize(journal->j_dev_bd, super->s_blocksize);
-		}
 
 		return 0;
 	}
 
 	journal->j_dev_mode = blkdev_mode;
-	journal->j_dev_bd = open_bdev_exclusive(jdev_name,
-						blkdev_mode, journal);
+	journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal);
 	if (IS_ERR(journal->j_dev_bd)) {
 		result = PTR_ERR(journal->j_dev_bd);
 		journal->j_dev_bd = NULL;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index ba5f51e..68fdf45 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -771,7 +771,7 @@
 					EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
 					dentry, inode, &security);
 	if (retval) {
-		dir->i_nlink--;
+		DEC_DIR_INODE_NLINK(dir)
 		goto out_failed;
 	}
 
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index adbc6f5..45de98b 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -586,13 +586,13 @@
 	va_list args;
 	int mode, first, last;
 
-	va_start(args, bh);
-
 	if (!bh) {
 		printk("print_block: buffer is NULL\n");
 		return;
 	}
 
+	va_start(args, bh);
+
 	mode = va_arg(args, int);
 	first = va_arg(args, int);
 	last = va_arg(args, int);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 2575682..0aab04f 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -632,7 +632,7 @@
 static int reiserfs_release_dquot(struct dquot *);
 static int reiserfs_mark_dquot_dirty(struct dquot *);
 static int reiserfs_write_info(struct super_block *, int);
-static int reiserfs_quota_on(struct super_block *, int, int, char *);
+static int reiserfs_quota_on(struct super_block *, int, int, struct path *);
 
 static const struct dquot_operations reiserfs_quota_operations = {
 	.write_dquot = reiserfs_write_dquot,
@@ -2048,25 +2048,21 @@
  * Standard function to be called on quota_on
  */
 static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
-			     char *name)
+			     struct path *path)
 {
 	int err;
-	struct path path;
 	struct inode *inode;
 	struct reiserfs_transaction_handle th;
 
 	if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
 		return -EINVAL;
 
-	err = kern_path(name, LOOKUP_FOLLOW, &path);
-	if (err)
-		return err;
 	/* Quotafile not on the same filesystem? */
-	if (path.mnt->mnt_sb != sb) {
+	if (path->mnt->mnt_sb != sb) {
 		err = -EXDEV;
 		goto out;
 	}
-	inode = path.dentry->d_inode;
+	inode = path->dentry->d_inode;
 	/* We must not pack tails for quota files on reiserfs for quota IO to work */
 	if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
 		err = reiserfs_unpack(inode, NULL);
@@ -2082,7 +2078,7 @@
 	/* Journaling quota? */
 	if (REISERFS_SB(sb)->s_qf_names[type]) {
 		/* Quotafile not of fs root? */
-		if (path.dentry->d_parent != sb->s_root)
+		if (path->dentry->d_parent != sb->s_root)
 			reiserfs_warning(sb, "super-6521",
 				 "Quota file not on filesystem root. "
 				 "Journalled quota will not work.");
@@ -2101,9 +2097,8 @@
 		if (err)
 			goto out;
 	}
-	err = dquot_quota_on_path(sb, type, format_id, &path);
+	err = dquot_quota_on(sb, type, format_id, path);
 out:
-	path_put(&path);
 	return err;
 }
 
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 3cfb2e9..5c11ca8 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -978,8 +978,6 @@
 
 static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	if (nd->flags & LOOKUP_RCU)
-		return -ECHILD;
 	return -EPERM;
 }
 
diff --git a/fs/select.c b/fs/select.c
index b7b10aa..e56560d 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -306,6 +306,8 @@
 		rts.tv_sec = rts.tv_nsec = 0;
 
 	if (timeval) {
+		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
+			memset(&rtv, 0, sizeof(rtv));
 		rtv.tv_sec = rts.tv_sec;
 		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
 
diff --git a/fs/splice.c b/fs/splice.c
index ce2f025..50a5d978 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -682,19 +682,14 @@
 {
 	struct file *file = sd->u.file;
 	loff_t pos = sd->pos;
-	int ret, more;
+	int more;
 
-	ret = buf->ops->confirm(pipe, buf);
-	if (!ret) {
-		more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
-		if (file->f_op && file->f_op->sendpage)
-			ret = file->f_op->sendpage(file, buf->page, buf->offset,
-						   sd->len, &pos, more);
-		else
-			ret = -EINVAL;
-	}
+	if (!likely(file->f_op && file->f_op->sendpage))
+		return -EINVAL;
 
-	return ret;
+	more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
+	return file->f_op->sendpage(file, buf->page, buf->offset,
+				    sd->len, &pos, more);
 }
 
 /*
@@ -727,13 +722,6 @@
 	void *fsdata;
 	int ret;
 
-	/*
-	 * make sure the data in this buffer is uptodate
-	 */
-	ret = buf->ops->confirm(pipe, buf);
-	if (unlikely(ret))
-		return ret;
-
 	offset = sd->pos & ~PAGE_CACHE_MASK;
 
 	this_len = sd->len;
@@ -805,12 +793,17 @@
 		if (sd->len > sd->total_len)
 			sd->len = sd->total_len;
 
-		ret = actor(pipe, buf, sd);
-		if (ret <= 0) {
+		ret = buf->ops->confirm(pipe, buf);
+		if (unlikely(ret)) {
 			if (ret == -ENODATA)
 				ret = 0;
 			return ret;
 		}
+
+		ret = actor(pipe, buf, sd);
+		if (ret <= 0)
+			return ret;
+
 		buf->offset += ret;
 		buf->len -= ret;
 
@@ -1044,10 +1037,6 @@
 	int ret;
 	void *data;
 
-	ret = buf->ops->confirm(pipe, buf);
-	if (ret)
-		return ret;
-
 	data = buf->ops->map(pipe, buf, 0);
 	ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos);
 	buf->ops->unmap(pipe, buf, data);
@@ -1495,10 +1484,6 @@
 	char *src;
 	int ret;
 
-	ret = buf->ops->confirm(pipe, buf);
-	if (unlikely(ret))
-		return ret;
-
 	/*
 	 * See if we can use the atomic maps, by prefaulting in the
 	 * pages and doing an atomic copy
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index e5f63da..aa68a8a 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -29,7 +29,6 @@
 config SQUASHFS_XATTR
 	bool "Squashfs XATTR support"
 	depends on SQUASHFS
-	default n
 	help
 	  Saying Y here includes support for extended attributes (xattrs).
 	  Xattrs are name:value pairs associated with inodes by
@@ -40,7 +39,6 @@
 config SQUASHFS_LZO
 	bool "Include support for LZO compressed file systems"
 	depends on SQUASHFS
-	default n
 	select LZO_DECOMPRESS
 	help
 	  Saying Y here includes support for reading Squashfs file systems
@@ -53,10 +51,24 @@
 
 	  If unsure, say N.
 
+config SQUASHFS_XZ
+	bool "Include support for XZ compressed file systems"
+	depends on SQUASHFS
+	select XZ_DEC
+	help
+	  Saying Y here includes support for reading Squashfs file systems
+	  compressed with XZ compresssion.  XZ gives better compression than
+	  the default zlib compression, at the expense of greater CPU and
+	  memory overhead.
+
+	  XZ is not the standard compression used in Squashfs and so most
+	  file systems will be readable without selecting this option.
+
+	  If unsure, say N.
+
 config SQUASHFS_EMBEDDED
 	bool "Additional option for memory-constrained systems"
 	depends on SQUASHFS
-	default n
 	help
 	  Saying Y here allows you to specify cache size.
 
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 7672bac..cecf2be 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -7,3 +7,4 @@
 squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o
 squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o
 squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 653c030..8ab48bc 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -34,7 +34,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 #include "decompressor.h"
 
@@ -64,6 +63,14 @@
 		*length = (unsigned char) bh->b_data[*offset] |
 			(unsigned char) bh->b_data[*offset + 1] << 8;
 		*offset += 2;
+
+		if (*offset == msblk->devblksize) {
+			put_bh(bh);
+			bh = sb_bread(sb, ++(*cur_index));
+			if (bh == NULL)
+				return NULL;
+			*offset = 0;
+		}
 	}
 
 	return bh;
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 57314be..26b15ae 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -55,7 +55,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 
 /*
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index 24af9ce..a5940e5 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -27,7 +27,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "decompressor.h"
 #include "squashfs.h"
 
@@ -41,23 +40,26 @@
 };
 
 #ifndef CONFIG_SQUASHFS_LZO
-static const struct squashfs_decompressor squashfs_lzo_unsupported_comp_ops = {
+static const struct squashfs_decompressor squashfs_lzo_comp_ops = {
 	NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
 };
 #endif
 
+#ifndef CONFIG_SQUASHFS_XZ
+static const struct squashfs_decompressor squashfs_xz_comp_ops = {
+	NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
+};
+#endif
+
 static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
 	NULL, NULL, NULL, 0, "unknown", 0
 };
 
 static const struct squashfs_decompressor *decompressor[] = {
 	&squashfs_zlib_comp_ops,
-	&squashfs_lzma_unsupported_comp_ops,
-#ifdef CONFIG_SQUASHFS_LZO
 	&squashfs_lzo_comp_ops,
-#else
-	&squashfs_lzo_unsupported_comp_ops,
-#endif
+	&squashfs_xz_comp_ops,
+	&squashfs_lzma_unsupported_comp_ops,
 	&squashfs_unknown_comp_ops
 };
 
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 7425f80..3b305a7 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -52,4 +52,13 @@
 	return msblk->decompressor->decompress(msblk, buffer, bh, b, offset,
 		length, srclength, pages);
 }
+
+#ifdef CONFIG_SQUASHFS_XZ
+extern const struct squashfs_decompressor squashfs_xz_comp_ops;
+#endif
+
+#ifdef CONFIG_SQUASHFS_LZO
+extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
+#endif
+
 #endif
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 7c90bbd..7eef571 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -39,7 +39,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 
 /*
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
index b7f64bc..d8f3245 100644
--- a/fs/squashfs/id.c
+++ b/fs/squashfs/id.c
@@ -37,7 +37,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 
 /*
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 5d87789..7da759e 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -29,7 +29,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 #include "decompressor.h"
 
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 5d45569..ba729d8 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -27,11 +27,6 @@
 
 #define WARNING(s, args...)	pr_warning("SQUASHFS: "s, ## args)
 
-static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
-{
-	return list_entry(inode, struct squashfs_inode_info, vfs_inode);
-}
-
 /* block.c */
 extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *,
 				int, int);
@@ -104,6 +99,3 @@
 
 /* zlib_wrapper.c */
 extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
-
-/* lzo_wrapper.c */
-extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index c5137fc..39533fe 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -238,6 +238,7 @@
 #define ZLIB_COMPRESSION	1
 #define LZMA_COMPRESSION	2
 #define LZO_COMPRESSION		3
+#define XZ_COMPRESSION		4
 
 struct squashfs_super_block {
 	__le32			s_magic;
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
index d3e3a37..359baef 100644
--- a/fs/squashfs/squashfs_fs_i.h
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -45,4 +45,10 @@
 	};
 	struct inode	vfs_inode;
 };
+
+
+static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
+{
+	return list_entry(inode, struct squashfs_inode_info, vfs_inode);
+}
 #endif
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
index d33be5d..05385db 100644
--- a/fs/squashfs/xattr_id.c
+++ b/fs/squashfs/xattr_id.c
@@ -32,7 +32,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 #include "xattr.h"
 
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
new file mode 100644
index 0000000..c4eb400
--- /dev/null
+++ b/fs/squashfs/xz_wrapper.c
@@ -0,0 +1,147 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * xz_wrapper.c
+ */
+
+
+#include <linux/mutex.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/xz.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "decompressor.h"
+
+struct squashfs_xz {
+	struct xz_dec *state;
+	struct xz_buf buf;
+};
+
+static void *squashfs_xz_init(struct squashfs_sb_info *msblk)
+{
+	int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+
+	struct squashfs_xz *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+	if (stream == NULL)
+		goto failed;
+
+	stream->state = xz_dec_init(XZ_PREALLOC, block_size);
+	if (stream->state == NULL)
+		goto failed;
+
+	return stream;
+
+failed:
+	ERROR("Failed to allocate xz workspace\n");
+	kfree(stream);
+	return NULL;
+}
+
+
+static void squashfs_xz_free(void *strm)
+{
+	struct squashfs_xz *stream = strm;
+
+	if (stream) {
+		xz_dec_end(stream->state);
+		kfree(stream);
+	}
+}
+
+
+static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
+	struct buffer_head **bh, int b, int offset, int length, int srclength,
+	int pages)
+{
+	enum xz_ret xz_err;
+	int avail, total = 0, k = 0, page = 0;
+	struct squashfs_xz *stream = msblk->stream;
+
+	mutex_lock(&msblk->read_data_mutex);
+
+	xz_dec_reset(stream->state);
+	stream->buf.in_pos = 0;
+	stream->buf.in_size = 0;
+	stream->buf.out_pos = 0;
+	stream->buf.out_size = PAGE_CACHE_SIZE;
+	stream->buf.out = buffer[page++];
+
+	do {
+		if (stream->buf.in_pos == stream->buf.in_size && k < b) {
+			avail = min(length, msblk->devblksize - offset);
+			length -= avail;
+			wait_on_buffer(bh[k]);
+			if (!buffer_uptodate(bh[k]))
+				goto release_mutex;
+
+			stream->buf.in = bh[k]->b_data + offset;
+			stream->buf.in_size = avail;
+			stream->buf.in_pos = 0;
+			offset = 0;
+		}
+
+		if (stream->buf.out_pos == stream->buf.out_size
+							&& page < pages) {
+			stream->buf.out = buffer[page++];
+			stream->buf.out_pos = 0;
+			total += PAGE_CACHE_SIZE;
+		}
+
+		xz_err = xz_dec_run(stream->state, &stream->buf);
+
+		if (stream->buf.in_pos == stream->buf.in_size && k < b)
+			put_bh(bh[k++]);
+	} while (xz_err == XZ_OK);
+
+	if (xz_err != XZ_STREAM_END) {
+		ERROR("xz_dec_run error, data probably corrupt\n");
+		goto release_mutex;
+	}
+
+	if (k < b) {
+		ERROR("xz_uncompress error, input remaining\n");
+		goto release_mutex;
+	}
+
+	total += stream->buf.out_pos;
+	mutex_unlock(&msblk->read_data_mutex);
+	return total;
+
+release_mutex:
+	mutex_unlock(&msblk->read_data_mutex);
+
+	for (; k < b; k++)
+		put_bh(bh[k]);
+
+	return -EIO;
+}
+
+const struct squashfs_decompressor squashfs_xz_comp_ops = {
+	.init = squashfs_xz_init,
+	.free = squashfs_xz_free,
+	.decompress = squashfs_xz_uncompress,
+	.id = XZ_COMPRESSION,
+	.name = "xz",
+	.supported = 1
+};
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 7a60387..4661ae2 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -29,7 +29,6 @@
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
 #include "squashfs.h"
 #include "decompressor.h"
 
@@ -66,8 +65,8 @@
 	struct buffer_head **bh, int b, int offset, int length, int srclength,
 	int pages)
 {
-	int zlib_err = 0, zlib_init = 0;
-	int avail, bytes, k = 0, page = 0;
+	int zlib_err, zlib_init = 0;
+	int k = 0, page = 0;
 	z_stream *stream = msblk->stream;
 
 	mutex_lock(&msblk->read_data_mutex);
@@ -75,21 +74,14 @@
 	stream->avail_out = 0;
 	stream->avail_in = 0;
 
-	bytes = length;
 	do {
 		if (stream->avail_in == 0 && k < b) {
-			avail = min(bytes, msblk->devblksize - offset);
-			bytes -= avail;
+			int avail = min(length, msblk->devblksize - offset);
+			length -= avail;
 			wait_on_buffer(bh[k]);
 			if (!buffer_uptodate(bh[k]))
 				goto release_mutex;
 
-			if (avail == 0) {
-				offset = 0;
-				put_bh(bh[k++]);
-				continue;
-			}
-
 			stream->next_in = bh[k]->b_data + offset;
 			stream->avail_in = avail;
 			offset = 0;
@@ -128,6 +120,11 @@
 		goto release_mutex;
 	}
 
+	if (k < b) {
+		ERROR("zlib_uncompress error, data remaining\n");
+		goto release_mutex;
+	}
+
 	length = stream->total_out;
 	mutex_unlock(&msblk->read_data_mutex);
 	return length;
diff --git a/fs/stat.c b/fs/stat.c
index 12e90e2..d5c61cf 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -75,11 +75,13 @@
 	int error = -EINVAL;
 	int lookup_flags = 0;
 
-	if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
+	if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT)) != 0)
 		goto out;
 
 	if (!(flag & AT_SYMLINK_NOFOLLOW))
 		lookup_flags |= LOOKUP_FOLLOW;
+	if (flag & AT_NO_AUTOMOUNT)
+		lookup_flags |= LOOKUP_NO_AUTOMOUNT;
 
 	error = user_path_at(dfd, filename, lookup_flags, &path);
 	if (error)
diff --git a/fs/super.c b/fs/super.c
index 823e061..7e9dd4c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -177,6 +177,11 @@
 	struct file_system_type *fs = s->s_type;
 	if (atomic_dec_and_test(&s->s_active)) {
 		fs->kill_sb(s);
+		/*
+		 * We need to call rcu_barrier so all the delayed rcu free
+		 * inodes are flushed before we release the fs module.
+		 */
+		rcu_barrier();
 		put_filesystem(fs);
 		put_super(s);
 	} else {
@@ -767,13 +772,13 @@
 {
 	struct block_device *bdev;
 	struct super_block *s;
-	fmode_t mode = FMODE_READ;
+	fmode_t mode = FMODE_READ | FMODE_EXCL;
 	int error = 0;
 
 	if (!(flags & MS_RDONLY))
 		mode |= FMODE_WRITE;
 
-	bdev = open_bdev_exclusive(dev_name, mode, fs_type);
+	bdev = blkdev_get_by_path(dev_name, mode, fs_type);
 	if (IS_ERR(bdev))
 		return ERR_CAST(bdev);
 
@@ -802,13 +807,13 @@
 
 		/*
 		 * s_umount nests inside bd_mutex during
-		 * __invalidate_device().  close_bdev_exclusive()
-		 * acquires bd_mutex and can't be called under
-		 * s_umount.  Drop s_umount temporarily.  This is safe
-		 * as we're holding an active reference.
+		 * __invalidate_device().  blkdev_put() acquires
+		 * bd_mutex and can't be called under s_umount.  Drop
+		 * s_umount temporarily.  This is safe as we're
+		 * holding an active reference.
 		 */
 		up_write(&s->s_umount);
-		close_bdev_exclusive(bdev, mode);
+		blkdev_put(bdev, mode);
 		down_write(&s->s_umount);
 	} else {
 		char b[BDEVNAME_SIZE];
@@ -832,7 +837,7 @@
 error_s:
 	error = PTR_ERR(s);
 error_bdev:
-	close_bdev_exclusive(bdev, mode);
+	blkdev_put(bdev, mode);
 error:
 	return ERR_PTR(error);
 }
@@ -863,7 +868,8 @@
 	bdev->bd_super = NULL;
 	generic_shutdown_super(sb);
 	sync_blockdev(bdev);
-	close_bdev_exclusive(bdev, mode);
+	WARN_ON_ONCE(!(mode & FMODE_EXCL));
+	blkdev_put(bdev, mode | FMODE_EXCL);
 }
 
 EXPORT_SYMBOL(kill_block_super);
@@ -1140,7 +1146,7 @@
 	return mnt;
 
  err:
-	mntput_long(mnt);
+	mntput(mnt);
 	return ERR_PTR(err);
 }
 
diff --git a/fs/sysfs/Kconfig b/fs/sysfs/Kconfig
index f4b6758..8c41fea 100644
--- a/fs/sysfs/Kconfig
+++ b/fs/sysfs/Kconfig
@@ -1,5 +1,5 @@
 config SYSFS
-	bool "sysfs file system support" if EMBEDDED
+	bool "sysfs file system support" if EXPERT
 	default y
 	help
 	The sysfs filesystem is a virtual filesystem that the kernel uses to
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index b5e68da..e474fbc 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -48,7 +48,6 @@
 	struct inode * inode = NULL;
 	ino_t ino;
 
-	d_set_d_op(dentry, dir->i_sb->s_root->d_op);
 	if (dentry->d_name.len > SYSV_NAMELEN)
 		return ERR_PTR(-ENAMETOOLONG);
 	ino = sysv_inode_by_name(dentry);
@@ -246,7 +245,6 @@
 		new_de = sysv_find_entry(new_dentry, &new_page);
 		if (!new_de)
 			goto out_dir;
-		inode_inc_link_count(old_inode);
 		sysv_set_link(new_de, new_page, old_inode);
 		new_inode->i_ctime = CURRENT_TIME_SEC;
 		if (dir_de)
@@ -258,18 +256,15 @@
 			if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
 				goto out_dir;
 		}
-		inode_inc_link_count(old_inode);
 		err = sysv_add_link(new_dentry, old_inode);
-		if (err) {
-			inode_dec_link_count(old_inode);
+		if (err)
 			goto out_dir;
-		}
 		if (dir_de)
 			inode_inc_link_count(new_dir);
 	}
 
 	sysv_delete_entry(old_de, old_page);
-	inode_dec_link_count(old_inode);
+	mark_inode_dirty(old_inode);
 
 	if (dir_de) {
 		sysv_set_link(dir_de, dir_page, new_dir);
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 76712ae..f60c196 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -332,6 +332,10 @@
 	sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type;
 	/* set up enough so that it can read an inode */
 	sb->s_op = &sysv_sops;
+	if (sbi->s_forced_ro)
+		sb->s_flags |= MS_RDONLY;
+	if (sbi->s_truncate)
+		sb->s_d_op = &sysv_dentry_operations;
 	root_inode = sysv_iget(sb, SYSV_ROOT_INO);
 	if (IS_ERR(root_inode)) {
 		printk("SysV FS: get root inode failed\n");
@@ -343,10 +347,6 @@
 		printk("SysV FS: get root dentry failed\n");
 		return 0;
 	}
-	if (sbi->s_forced_ro)
-		sb->s_flags |= MS_RDONLY;
-	if (sbi->s_truncate)
-		d_set_d_op(sb->s_root, &sysv_dentry_operations);
 	return 1;
 }
 
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
index f8def3c..0e0e99b 100644
--- a/fs/udf/Kconfig
+++ b/fs/udf/Kconfig
@@ -1,6 +1,5 @@
 config UDF_FS
 	tristate "UDF file system support"
-	depends on BKL # needs serious work to remove
 	select CRC_ITU_T
 	help
 	  This is the new file system used on some CD-ROMs and DVDs. Say Y if
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index b608efa..306ee39 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -157,10 +157,9 @@
 				udf_debug("bit %ld already set\n", bit + i);
 				udf_debug("byte=%2x\n",
 					((char *)bh->b_data)[(bit + i) >> 3]);
-			} else {
-				udf_add_free_space(sb, sbi->s_partition, 1);
 			}
 		}
+		udf_add_free_space(sb, sbi->s_partition, count);
 		mark_buffer_dirty(bh);
 		if (overflow) {
 			block += count;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 51552bf..eb8bfe2 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -30,7 +30,6 @@
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 
 #include "udf_i.h"
@@ -190,18 +189,14 @@
 	struct inode *dir = filp->f_path.dentry->d_inode;
 	int result;
 
-	lock_kernel();
-
 	if (filp->f_pos == 0) {
 		if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
-			unlock_kernel();
 			return 0;
 		}
 		filp->f_pos++;
 	}
 
 	result = do_udf_readdir(dir, filp, filldir, dirent);
-	unlock_kernel();
  	return result;
 }
 
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 66b9e7e..89c7848 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -32,7 +32,6 @@
 #include <linux/string.h> /* memset */
 #include <linux/capability.h>
 #include <linux/errno.h>
-#include <linux/smp_lock.h>
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
 #include <linux/aio.h>
@@ -114,6 +113,7 @@
 	size_t count = iocb->ki_left;
 	struct udf_inode_info *iinfo = UDF_I(inode);
 
+	down_write(&iinfo->i_data_sem);
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 		if (file->f_flags & O_APPEND)
 			pos = inode->i_size;
@@ -126,6 +126,7 @@
 			udf_expand_file_adinicb(inode, pos + count, &err);
 			if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 				udf_debug("udf_expand_adinicb: err=%d\n", err);
+				up_write(&iinfo->i_data_sem);
 				return err;
 			}
 		} else {
@@ -135,6 +136,7 @@
 				iinfo->i_lenAlloc = inode->i_size;
 		}
 	}
+	up_write(&iinfo->i_data_sem);
 
 	retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
 	if (retval > 0)
@@ -149,8 +151,6 @@
 	long old_block, new_block;
 	int result = -EINVAL;
 
-	lock_kernel();
-
 	if (file_permission(filp, MAY_READ) != 0) {
 		udf_debug("no permission to access inode %lu\n", inode->i_ino);
 		result = -EPERM;
@@ -196,7 +196,6 @@
 	}
 
 out:
-	unlock_kernel();
 	return result;
 }
 
@@ -204,10 +203,10 @@
 {
 	if (filp->f_mode & FMODE_WRITE) {
 		mutex_lock(&inode->i_mutex);
-		lock_kernel();
+		down_write(&UDF_I(inode)->i_data_sem);
 		udf_discard_prealloc(inode);
 		udf_truncate_tail_extent(inode);
-		unlock_kernel();
+		up_write(&UDF_I(inode)->i_data_sem);
 		mutex_unlock(&inode->i_mutex);
 	}
 	return 0;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 75d9304..6fb7e0a 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -92,28 +92,19 @@
 		return NULL;
 	}
 
-	mutex_lock(&sbi->s_alloc_mutex);
 	if (sbi->s_lvid_bh) {
-		struct logicalVolIntegrityDesc *lvid =
-			(struct logicalVolIntegrityDesc *)
-			sbi->s_lvid_bh->b_data;
-		struct logicalVolIntegrityDescImpUse *lvidiu =
-							udf_sb_lvidiu(sbi);
-		struct logicalVolHeaderDesc *lvhd;
-		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)
-				(lvid->logicalVolContentsUse);
+		struct logicalVolIntegrityDescImpUse *lvidiu;
+
+		iinfo->i_unique = lvid_get_unique_id(sb);
+		mutex_lock(&sbi->s_alloc_mutex);
+		lvidiu = udf_sb_lvidiu(sbi);
 		if (S_ISDIR(mode))
 			le32_add_cpu(&lvidiu->numDirs, 1);
 		else
 			le32_add_cpu(&lvidiu->numFiles, 1);
-		iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
-		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
-			uniqueID += 16;
-		lvhd->uniqueID = cpu_to_le64(uniqueID);
 		udf_updated_lvid(sb);
+		mutex_unlock(&sbi->s_alloc_mutex);
 	}
-	mutex_unlock(&sbi->s_alloc_mutex);
 
 	inode_init_owner(inode, dir, mode);
 
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fc48f37..c6a2e78 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -31,7 +31,6 @@
 
 #include "udfdecl.h"
 #include <linux/mm.h>
-#include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
@@ -51,6 +50,7 @@
 static mode_t udf_convert_permissions(struct fileEntry *);
 static int udf_update_inode(struct inode *, int);
 static void udf_fill_inode(struct inode *, struct buffer_head *);
+static int udf_sync_inode(struct inode *inode);
 static int udf_alloc_i_data(struct inode *inode, size_t size);
 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
 					sector_t *, int *);
@@ -79,9 +79,7 @@
 		want_delete = 1;
 		inode->i_size = 0;
 		udf_truncate(inode);
-		lock_kernel();
 		udf_update_inode(inode, IS_SYNC(inode));
-		unlock_kernel();
 	}
 	invalidate_inode_buffers(inode);
 	end_writeback(inode);
@@ -97,9 +95,7 @@
 	kfree(iinfo->i_ext.i_data);
 	iinfo->i_ext.i_data = NULL;
 	if (want_delete) {
-		lock_kernel();
 		udf_free_inode(inode);
-		unlock_kernel();
 	}
 }
 
@@ -302,10 +298,9 @@
 	err = -EIO;
 	new = 0;
 	bh = NULL;
-
-	lock_kernel();
-
 	iinfo = UDF_I(inode);
+
+	down_write(&iinfo->i_data_sem);
 	if (block == iinfo->i_next_alloc_block + 1) {
 		iinfo->i_next_alloc_block++;
 		iinfo->i_next_alloc_goal++;
@@ -324,7 +319,7 @@
 	map_bh(bh_result, inode->i_sb, phys);
 
 abort:
-	unlock_kernel();
+	up_write(&iinfo->i_data_sem);
 	return err;
 }
 
@@ -1022,16 +1017,16 @@
 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 		return;
 
-	lock_kernel();
 	iinfo = UDF_I(inode);
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+		down_write(&iinfo->i_data_sem);
 		if (inode->i_sb->s_blocksize <
 				(udf_file_entry_alloc_offset(inode) +
 				 inode->i_size)) {
 			udf_expand_file_adinicb(inode, inode->i_size, &err);
 			if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 				inode->i_size = iinfo->i_lenAlloc;
-				unlock_kernel();
+				up_write(&iinfo->i_data_sem);
 				return;
 			} else
 				udf_truncate_extents(inode);
@@ -1042,10 +1037,13 @@
 				offset - udf_file_entry_alloc_offset(inode));
 			iinfo->i_lenAlloc = inode->i_size;
 		}
+		up_write(&iinfo->i_data_sem);
 	} else {
 		block_truncate_page(inode->i_mapping, inode->i_size,
 				    udf_get_block);
+		down_write(&iinfo->i_data_sem);
 		udf_truncate_extents(inode);
+		up_write(&iinfo->i_data_sem);
 	}
 
 	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
@@ -1053,7 +1051,6 @@
 		udf_sync_inode(inode);
 	else
 		mark_inode_dirty(inode);
-	unlock_kernel();
 }
 
 static void __udf_read_inode(struct inode *inode)
@@ -1202,6 +1199,7 @@
 		return;
 	}
 
+	read_lock(&sbi->s_cred_lock);
 	inode->i_uid = le32_to_cpu(fe->uid);
 	if (inode->i_uid == -1 ||
 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
@@ -1214,13 +1212,6 @@
 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
 		inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
 
-	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
-	if (!inode->i_nlink)
-		inode->i_nlink = 1;
-
-	inode->i_size = le64_to_cpu(fe->informationLength);
-	iinfo->i_lenExtents = inode->i_size;
-
 	if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
 			sbi->s_fmode != UDF_INVALID_MODE)
 		inode->i_mode = sbi->s_fmode;
@@ -1230,6 +1221,14 @@
 	else
 		inode->i_mode = udf_convert_permissions(fe);
 	inode->i_mode &= ~sbi->s_umask;
+	read_unlock(&sbi->s_cred_lock);
+
+	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
+	if (!inode->i_nlink)
+		inode->i_nlink = 1;
+
+	inode->i_size = le64_to_cpu(fe->informationLength);
+	iinfo->i_lenExtents = inode->i_size;
 
 	if (iinfo->i_efe == 0) {
 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
@@ -1373,16 +1372,10 @@
 
 int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-	int ret;
-
-	lock_kernel();
-	ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
-	unlock_kernel();
-
-	return ret;
+	return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
 }
 
-int udf_sync_inode(struct inode *inode)
+static int udf_sync_inode(struct inode *inode)
 {
 	return udf_update_inode(inode, 1);
 }
@@ -2048,7 +2041,7 @@
 	struct extent_position epos = {};
 	int ret;
 
-	lock_kernel();
+	down_read(&UDF_I(inode)->i_data_sem);
 
 	if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
 						(EXT_RECORDED_ALLOCATED >> 30))
@@ -2056,7 +2049,7 @@
 	else
 		ret = 0;
 
-	unlock_kernel();
+	up_read(&UDF_I(inode)->i_data_sem);
 	brelse(epos.bh);
 
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 6d8dc02..b7c338d 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -27,12 +27,13 @@
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include <linux/sched.h>
 #include <linux/crc-itu-t.h>
 #include <linux/exportfs.h>
 
+enum { UDF_MAX_LINKS = 0xffff };
+
 static inline int udf_match(int len1, const unsigned char *name1, int len2,
 			    const unsigned char *name2)
 {
@@ -228,10 +229,8 @@
 		}
 
 		if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) &&
-		    isdotdot) {
-			brelse(epos.bh);
-			return fi;
-		}
+		    isdotdot)
+			goto out_ok;
 
 		if (!lfi)
 			continue;
@@ -263,7 +262,6 @@
 	if (dentry->d_name.len > UDF_NAME_LEN - 2)
 		return ERR_PTR(-ENAMETOOLONG);
 
-	lock_kernel();
 #ifdef UDF_RECOVERY
 	/* temporary shorthand for specifying files by inode number */
 	if (!strncmp(dentry->d_name.name, ".B=", 3)) {
@@ -275,7 +273,6 @@
 		};
 		inode = udf_iget(dir->i_sb, lb);
 		if (!inode) {
-			unlock_kernel();
 			return ERR_PTR(-EACCES);
 		}
 	} else
@@ -291,11 +288,9 @@
 		loc = lelb_to_cpu(cfi.icb.extLocation);
 		inode = udf_iget(dir->i_sb, &loc);
 		if (!inode) {
-			unlock_kernel();
 			return ERR_PTR(-EACCES);
 		}
 	}
-	unlock_kernel();
 
 	return d_splice_alias(inode, dentry);
 }
@@ -476,15 +471,19 @@
 				f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
 		if (!fibh->ebh)
 			goto out_err;
+		/* Extents could have been merged, invalidate our position */
+		brelse(epos.bh);
+		epos.bh = NULL;
+		epos.block = dinfo->i_location;
+		epos.offset = udf_file_entry_alloc_offset(dir);
 
 		if (!fibh->soffset) {
-			if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
-			    (EXT_RECORDED_ALLOCATED >> 30)) {
-				block = eloc.logicalBlockNum + ((elen - 1) >>
+			/* Find the freshly allocated block */
+			while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
+				(EXT_RECORDED_ALLOCATED >> 30))
+				;
+			block = eloc.logicalBlockNum + ((elen - 1) >>
 					dir->i_sb->s_blocksize_bits);
-			} else
-				block++;
-
 			brelse(fibh->sbh);
 			fibh->sbh = fibh->ebh;
 			fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
@@ -562,10 +561,8 @@
 	int err;
 	struct udf_inode_info *iinfo;
 
-	lock_kernel();
 	inode = udf_new_inode(dir, mode, &err);
 	if (!inode) {
-		unlock_kernel();
 		return err;
 	}
 
@@ -583,7 +580,6 @@
 		inode->i_nlink--;
 		mark_inode_dirty(inode);
 		iput(inode);
-		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -596,7 +592,6 @@
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
-	unlock_kernel();
 	d_instantiate(dentry, inode);
 
 	return 0;
@@ -614,7 +609,6 @@
 	if (!old_valid_dev(rdev))
 		return -EINVAL;
 
-	lock_kernel();
 	err = -EIO;
 	inode = udf_new_inode(dir, mode, &err);
 	if (!inode)
@@ -627,7 +621,6 @@
 		inode->i_nlink--;
 		mark_inode_dirty(inode);
 		iput(inode);
-		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -646,7 +639,6 @@
 	err = 0;
 
 out:
-	unlock_kernel();
 	return err;
 }
 
@@ -659,9 +651,8 @@
 	struct udf_inode_info *dinfo = UDF_I(dir);
 	struct udf_inode_info *iinfo;
 
-	lock_kernel();
 	err = -EMLINK;
-	if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
+	if (dir->i_nlink >= UDF_MAX_LINKS)
 		goto out;
 
 	err = -EIO;
@@ -712,7 +703,6 @@
 	err = 0;
 
 out:
-	unlock_kernel();
 	return err;
 }
 
@@ -794,7 +784,6 @@
 	struct kernel_lb_addr tloc;
 
 	retval = -ENOENT;
-	lock_kernel();
 	fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
 	if (!fi)
 		goto out;
@@ -826,7 +815,6 @@
 	brelse(fibh.sbh);
 
 out:
-	unlock_kernel();
 	return retval;
 }
 
@@ -840,7 +828,6 @@
 	struct kernel_lb_addr tloc;
 
 	retval = -ENOENT;
-	lock_kernel();
 	fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
 	if (!fi)
 		goto out;
@@ -870,7 +857,6 @@
 	brelse(fibh.sbh);
 
 out:
-	unlock_kernel();
 	return retval;
 }
 
@@ -890,21 +876,21 @@
 	int block;
 	unsigned char *name = NULL;
 	int namelen;
-	struct buffer_head *bh;
 	struct udf_inode_info *iinfo;
+	struct super_block *sb = dir->i_sb;
 
-	lock_kernel();
 	inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
 	if (!inode)
 		goto out;
 
+	iinfo = UDF_I(inode);
+	down_write(&iinfo->i_data_sem);
 	name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
 	if (!name) {
 		err = -ENOMEM;
 		goto out_no_entry;
 	}
 
-	iinfo = UDF_I(inode);
 	inode->i_data.a_ops = &udf_symlink_aops;
 	inode->i_op = &udf_symlink_inode_operations;
 
@@ -912,7 +898,7 @@
 		struct kernel_lb_addr eloc;
 		uint32_t bsize;
 
-		block = udf_new_block(inode->i_sb, inode,
+		block = udf_new_block(sb, inode,
 				iinfo->i_location.partitionReferenceNum,
 				iinfo->i_location.logicalBlockNum, &err);
 		if (!block)
@@ -923,17 +909,17 @@
 		eloc.logicalBlockNum = block;
 		eloc.partitionReferenceNum =
 				iinfo->i_location.partitionReferenceNum;
-		bsize = inode->i_sb->s_blocksize;
+		bsize = sb->s_blocksize;
 		iinfo->i_lenExtents = bsize;
 		udf_add_aext(inode, &epos, &eloc, bsize, 0);
 		brelse(epos.bh);
 
-		block = udf_get_pblock(inode->i_sb, block,
+		block = udf_get_pblock(sb, block,
 				iinfo->i_location.partitionReferenceNum,
 				0);
-		epos.bh = udf_tgetblk(inode->i_sb, block);
+		epos.bh = udf_tgetblk(sb, block);
 		lock_buffer(epos.bh);
-		memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
+		memset(epos.bh->b_data, 0x00, bsize);
 		set_buffer_uptodate(epos.bh);
 		unlock_buffer(epos.bh);
 		mark_buffer_dirty_inode(epos.bh, inode);
@@ -941,7 +927,7 @@
 	} else
 		ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
 
-	eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode);
+	eoffset = sb->s_blocksize - udf_ext0_offset(inode);
 	pc = (struct pathComponent *)ea;
 
 	if (*symname == '/') {
@@ -981,7 +967,7 @@
 		}
 
 		if (pc->componentType == 5) {
-			namelen = udf_put_filename(inode->i_sb, compstart, name,
+			namelen = udf_put_filename(sb, compstart, name,
 						   symname - compstart);
 			if (!namelen)
 				goto out_no_entry;
@@ -1015,27 +1001,16 @@
 	fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
 	if (!fi)
 		goto out_no_entry;
-	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+	cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
-	bh = UDF_SB(inode->i_sb)->s_lvid_bh;
-	if (bh) {
-		struct logicalVolIntegrityDesc *lvid =
-				(struct logicalVolIntegrityDesc *)bh->b_data;
-		struct logicalVolHeaderDesc *lvhd;
-		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)
-				lvid->logicalVolContentsUse;
-		uniqueID = le64_to_cpu(lvhd->uniqueID);
+	if (UDF_SB(inode->i_sb)->s_lvid_bh) {
 		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
-		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
-			uniqueID += 16;
-		lvhd->uniqueID = cpu_to_le64(uniqueID);
-		mark_buffer_dirty(bh);
+			cpu_to_le32(lvid_get_unique_id(sb));
 	}
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
 	if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
 		mark_inode_dirty(dir);
+	up_write(&iinfo->i_data_sem);
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
@@ -1044,10 +1019,10 @@
 
 out:
 	kfree(name);
-	unlock_kernel();
 	return err;
 
 out_no_entry:
+	up_write(&iinfo->i_data_sem);
 	inode_dec_link_count(inode);
 	iput(inode);
 	goto out;
@@ -1060,36 +1035,19 @@
 	struct udf_fileident_bh fibh;
 	struct fileIdentDesc cfi, *fi;
 	int err;
-	struct buffer_head *bh;
 
-	lock_kernel();
-	if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
-		unlock_kernel();
+	if (inode->i_nlink >= UDF_MAX_LINKS)
 		return -EMLINK;
-	}
 
 	fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
 	if (!fi) {
-		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location);
-	bh = UDF_SB(inode->i_sb)->s_lvid_bh;
-	if (bh) {
-		struct logicalVolIntegrityDesc *lvid =
-				(struct logicalVolIntegrityDesc *)bh->b_data;
-		struct logicalVolHeaderDesc *lvhd;
-		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)
-				(lvid->logicalVolContentsUse);
-		uniqueID = le64_to_cpu(lvhd->uniqueID);
+	if (UDF_SB(inode->i_sb)->s_lvid_bh) {
 		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
-		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
-			uniqueID += 16;
-		lvhd->uniqueID = cpu_to_le64(uniqueID);
-		mark_buffer_dirty(bh);
+			cpu_to_le32(lvid_get_unique_id(inode->i_sb));
 	}
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
 	if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
@@ -1103,7 +1061,6 @@
 	mark_inode_dirty(inode);
 	ihold(inode);
 	d_instantiate(dentry, inode);
-	unlock_kernel();
 
 	return 0;
 }
@@ -1124,7 +1081,6 @@
 	struct kernel_lb_addr tloc;
 	struct udf_inode_info *old_iinfo = UDF_I(old_inode);
 
-	lock_kernel();
 	ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
 	if (ofi) {
 		if (ofibh.sbh != ofibh.ebh)
@@ -1176,9 +1132,7 @@
 			goto end_rename;
 
 		retval = -EMLINK;
-		if (!new_inode &&
-			new_dir->i_nlink >=
-				(256 << sizeof(new_dir->i_nlink)) - 1)
+		if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS)
 			goto end_rename;
 	}
 	if (!nfi) {
@@ -1248,7 +1202,6 @@
 			brelse(nfibh.ebh);
 		brelse(nfibh.sbh);
 	}
-	unlock_kernel();
 
 	return retval;
 }
@@ -1261,7 +1214,6 @@
 	struct fileIdentDesc cfi;
 	struct udf_fileident_bh fibh;
 
-	lock_kernel();
 	if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
 		goto out_unlock;
 
@@ -1273,11 +1225,9 @@
 	inode = udf_iget(child->d_inode->i_sb, &tloc);
 	if (!inode)
 		goto out_unlock;
-	unlock_kernel();
 
 	return d_obtain_alias(inode);
 out_unlock:
-	unlock_kernel();
 	return ERR_PTR(-EACCES);
 }
 
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 745eb20..a71090e 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -25,6 +25,7 @@
 #include <linux/fs.h>
 #include <linux/string.h>
 #include <linux/buffer_head.h>
+#include <linux/mutex.h>
 
 uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
 			uint16_t partition, uint32_t offset)
@@ -159,7 +160,9 @@
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	u16 reallocationTableLen;
 	struct buffer_head *bh;
+	int ret = 0;
 
+	mutex_lock(&sbi->s_alloc_mutex);
 	for (i = 0; i < sbi->s_partitions; i++) {
 		struct udf_part_map *map = &sbi->s_partmaps[i];
 		if (old_block > map->s_partition_root &&
@@ -175,8 +178,10 @@
 					break;
 				}
 
-			if (!st)
-				return 1;
+			if (!st) {
+				ret = 1;
+				goto out;
+			}
 
 			reallocationTableLen =
 					le16_to_cpu(st->reallocationTableLen);
@@ -207,14 +212,16 @@
 						     ((old_block -
 							map->s_partition_root) &
 						     (sdata->s_packet_len - 1));
-					return 0;
+					ret = 0;
+					goto out;
 				} else if (origLoc == packet) {
 					*new_block = le32_to_cpu(
 							entry->mappedLocation) +
 						     ((old_block -
 							map->s_partition_root) &
 						     (sdata->s_packet_len - 1));
-					return 0;
+					ret = 0;
+					goto out;
 				} else if (origLoc > packet)
 					break;
 			}
@@ -251,20 +258,24 @@
 					      st->mapEntry[k].mappedLocation) +
 					((old_block - map->s_partition_root) &
 					 (sdata->s_packet_len - 1));
-				return 0;
+				ret = 0;
+				goto out;
 			}
 
-			return 1;
+			ret = 1;
+			goto out;
 		} /* if old_block */
 	}
 
 	if (i == sbi->s_partitions) {
 		/* outside of partitions */
 		/* for now, fail =) */
-		return 1;
+		ret = 1;
 	}
 
-	return 0;
+out:
+	mutex_unlock(&sbi->s_alloc_mutex);
+	return ret;
 }
 
 static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
diff --git a/fs/udf/super.c b/fs/udf/super.c
index b539d53..7b27b06 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -48,7 +48,6 @@
 #include <linux/stat.h>
 #include <linux/cdrom.h>
 #include <linux/nls.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include <linux/vfs.h>
 #include <linux/vmalloc.h>
@@ -135,6 +134,7 @@
 	ei->i_next_alloc_block = 0;
 	ei->i_next_alloc_goal = 0;
 	ei->i_strat4096 = 0;
+	init_rwsem(&ei->i_data_sem);
 
 	return &ei->vfs_inode;
 }
@@ -574,13 +574,14 @@
 	if (!udf_parse_options(options, &uopt, true))
 		return -EINVAL;
 
-	lock_kernel();
+	write_lock(&sbi->s_cred_lock);
 	sbi->s_flags = uopt.flags;
 	sbi->s_uid   = uopt.uid;
 	sbi->s_gid   = uopt.gid;
 	sbi->s_umask = uopt.umask;
 	sbi->s_fmode = uopt.fmode;
 	sbi->s_dmode = uopt.dmode;
+	write_unlock(&sbi->s_cred_lock);
 
 	if (sbi->s_lvid_bh) {
 		int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
@@ -597,7 +598,6 @@
 		udf_open_lvid(sb);
 
 out_unlock:
-	unlock_kernel();
 	return error;
 }
 
@@ -966,9 +966,9 @@
 		(sizeof(struct buffer_head *) * nr_groups);
 
 	if (size <= PAGE_SIZE)
-		bitmap = kmalloc(size, GFP_KERNEL);
+		bitmap = kzalloc(size, GFP_KERNEL);
 	else
-		bitmap = vmalloc(size); /* TODO: get rid of vmalloc */
+		bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
 
 	if (bitmap == NULL) {
 		udf_error(sb, __func__,
@@ -977,7 +977,6 @@
 		return NULL;
 	}
 
-	memset(bitmap, 0x00, size);
 	bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
 	bitmap->s_nr_groups = nr_groups;
 	return bitmap;
@@ -1781,6 +1780,8 @@
 
 	if (!bh)
 		return;
+
+	mutex_lock(&sbi->s_alloc_mutex);
 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
 	lvidiu = udf_sb_lvidiu(sbi);
 
@@ -1797,6 +1798,7 @@
 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
 	mark_buffer_dirty(bh);
 	sbi->s_lvid_dirty = 0;
+	mutex_unlock(&sbi->s_alloc_mutex);
 }
 
 static void udf_close_lvid(struct super_block *sb)
@@ -1809,6 +1811,7 @@
 	if (!bh)
 		return;
 
+	mutex_lock(&sbi->s_alloc_mutex);
 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
 	lvidiu = udf_sb_lvidiu(sbi);
 	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1829,6 +1832,34 @@
 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
 	mark_buffer_dirty(bh);
 	sbi->s_lvid_dirty = 0;
+	mutex_unlock(&sbi->s_alloc_mutex);
+}
+
+u64 lvid_get_unique_id(struct super_block *sb)
+{
+	struct buffer_head *bh;
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	struct logicalVolIntegrityDesc *lvid;
+	struct logicalVolHeaderDesc *lvhd;
+	u64 uniqueID;
+	u64 ret;
+
+	bh = sbi->s_lvid_bh;
+	if (!bh)
+		return 0;
+
+	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+	lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
+
+	mutex_lock(&sbi->s_alloc_mutex);
+	ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
+	if (!(++uniqueID & 0xFFFFFFFF))
+		uniqueID += 16;
+	lvhd->uniqueID = cpu_to_le64(uniqueID);
+	mutex_unlock(&sbi->s_alloc_mutex);
+	mark_buffer_dirty(bh);
+
+	return ret;
 }
 
 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
@@ -1886,8 +1917,6 @@
 	struct kernel_lb_addr rootdir, fileset;
 	struct udf_sb_info *sbi;
 
-	lock_kernel();
-
 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
 	uopt.uid = -1;
 	uopt.gid = -1;
@@ -1896,10 +1925,8 @@
 	uopt.dmode = UDF_INVALID_MODE;
 
 	sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
-	if (!sbi) {
-		unlock_kernel();
+	if (!sbi)
 		return -ENOMEM;
-	}
 
 	sb->s_fs_info = sbi;
 
@@ -1936,6 +1963,7 @@
 	sbi->s_fmode = uopt.fmode;
 	sbi->s_dmode = uopt.dmode;
 	sbi->s_nls_map = uopt.nls_map;
+	rwlock_init(&sbi->s_cred_lock);
 
 	if (uopt.session == 0xFFFFFFFF)
 		sbi->s_session = udf_get_last_session(sb);
@@ -2045,7 +2073,6 @@
 		goto error_out;
 	}
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
-	unlock_kernel();
 	return 0;
 
 error_out:
@@ -2066,7 +2093,6 @@
 	kfree(sbi);
 	sb->s_fs_info = NULL;
 
-	unlock_kernel();
 	return -EINVAL;
 }
 
@@ -2105,8 +2131,6 @@
 
 	sbi = UDF_SB(sb);
 
-	lock_kernel();
-
 	if (sbi->s_vat_inode)
 		iput(sbi->s_vat_inode);
 	if (sbi->s_partitions)
@@ -2122,8 +2146,6 @@
 	kfree(sbi->s_partmaps);
 	kfree(sb->s_fs_info);
 	sb->s_fs_info = NULL;
-
-	unlock_kernel();
 }
 
 static int udf_sync_fs(struct super_block *sb, int wait)
@@ -2186,8 +2208,6 @@
 	uint16_t ident;
 	struct spaceBitmapDesc *bm;
 
-	lock_kernel();
-
 	loc.logicalBlockNum = bitmap->s_extPosition;
 	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
 	bh = udf_read_ptagged(sb, &loc, 0, &ident);
@@ -2224,10 +2244,7 @@
 		}
 	}
 	brelse(bh);
-
 out:
-	unlock_kernel();
-
 	return accum;
 }
 
@@ -2240,8 +2257,7 @@
 	int8_t etype;
 	struct extent_position epos;
 
-	lock_kernel();
-
+	mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
 	epos.block = UDF_I(table)->i_location;
 	epos.offset = sizeof(struct unallocSpaceEntry);
 	epos.bh = NULL;
@@ -2250,8 +2266,7 @@
 		accum += (elen >> table->i_sb->s_blocksize_bits);
 
 	brelse(epos.bh);
-
-	unlock_kernel();
+	mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
 
 	return accum;
 }
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 1606478..b1d4488 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -27,7 +27,6 @@
 #include <linux/mm.h>
 #include <linux/stat.h>
 #include <linux/pagemap.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include "udf_i.h"
 
@@ -78,13 +77,16 @@
 	int err = -EIO;
 	unsigned char *p = kmap(page);
 	struct udf_inode_info *iinfo;
+	uint32_t pos;
 
-	lock_kernel();
 	iinfo = UDF_I(inode);
+	pos = udf_block_map(inode, 0);
+
+	down_read(&iinfo->i_data_sem);
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 		symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
 	} else {
-		bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
+		bh = sb_bread(inode->i_sb, pos);
 
 		if (!bh)
 			goto out;
@@ -95,14 +97,14 @@
 	udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
 	brelse(bh);
 
-	unlock_kernel();
+	up_read(&iinfo->i_data_sem);
 	SetPageUptodate(page);
 	kunmap(page);
 	unlock_page(page);
 	return 0;
 
 out:
-	unlock_kernel();
+	up_read(&iinfo->i_data_sem);
 	SetPageError(page);
 	kunmap(page);
 	unlock_page(page);
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index e58d1de..d1bd31e 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -1,6 +1,18 @@
 #ifndef _UDF_I_H
 #define _UDF_I_H
 
+/*
+ * The i_data_sem and i_mutex serve for protection of allocation information
+ * of a regular files and symlinks. This includes all extents belonging to
+ * the file/symlink, a fact whether data are in-inode or in external data
+ * blocks, preallocation, goal block information... When extents are read,
+ * i_mutex or i_data_sem must be held (for reading is enough in case of
+ * i_data_sem). When extents are changed, i_data_sem must be held for writing
+ * and also i_mutex must be held.
+ *
+ * For directories i_mutex is used for all the necessary protection.
+ */
+
 struct udf_inode_info {
 	struct timespec		i_crtime;
 	/* Physical address of inode */
@@ -21,6 +33,7 @@
 		struct long_ad		*i_lad;
 		__u8		*i_data;
 	} i_ext;
+	struct rw_semaphore	i_data_sem;
 	struct inode vfs_inode;
 };
 
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index d113b72..4858c19 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -2,6 +2,7 @@
 #define __LINUX_UDF_SB_H
 
 #include <linux/mutex.h>
+#include <linux/bitops.h>
 
 /* Since UDF 2.01 is ISO 13346 based... */
 #define UDF_SUPER_MAGIC			0x15013346
@@ -128,6 +129,8 @@
 	uid_t			s_uid;
 	mode_t			s_fmode;
 	mode_t			s_dmode;
+	/* Lock protecting consistency of above permission settings */
+	rwlock_t		s_cred_lock;
 
 	/* Root Info */
 	struct timespec		s_record_time;
@@ -139,7 +142,7 @@
 	__u16			s_udfrev;
 
 	/* Miscellaneous flags */
-	__u32			s_flags;
+	unsigned long		s_flags;
 
 	/* Encoding info */
 	struct nls_table	*s_nls_map;
@@ -161,8 +164,19 @@
 
 int udf_compute_nr_groups(struct super_block *sb, u32 partition);
 
-#define UDF_QUERY_FLAG(X,Y)			( UDF_SB(X)->s_flags & ( 1 << (Y) ) )
-#define UDF_SET_FLAG(X,Y)			( UDF_SB(X)->s_flags |= ( 1 << (Y) ) )
-#define UDF_CLEAR_FLAG(X,Y)			( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) )
+static inline int UDF_QUERY_FLAG(struct super_block *sb, int flag)
+{
+	return test_bit(flag, &UDF_SB(sb)->s_flags);
+}
+
+static inline void UDF_SET_FLAG(struct super_block *sb, int flag)
+{
+	set_bit(flag, &UDF_SB(sb)->s_flags);
+}
+
+static inline void UDF_CLEAR_FLAG(struct super_block *sb, int flag)
+{
+	clear_bit(flag, &UDF_SB(sb)->s_flags);
+}
 
 #endif /* __LINUX_UDF_SB_H */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 6995ab1..eba4820 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -111,6 +111,8 @@
 };
 
 /* super.c */
+
+__attribute__((format(printf, 3, 4)))
 extern void udf_warning(struct super_block *, const char *, const char *, ...);
 static inline void udf_updated_lvid(struct super_block *sb)
 {
@@ -123,6 +125,7 @@
 	sb->s_dirt = 1;
 	UDF_SB(sb)->s_lvid_dirty = 1;
 }
+extern u64 lvid_get_unique_id(struct super_block *sb);
 
 /* namei.c */
 extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
@@ -133,7 +136,6 @@
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
 /* inode.c */
 extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
-extern int udf_sync_inode(struct inode *);
 extern void udf_expand_file_adinicb(struct inode *, int, int *);
 extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
 extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 12f39b9..d6f6815 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -306,7 +306,6 @@
 		new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
 		if (!new_de)
 			goto out_dir;
-		inode_inc_link_count(old_inode);
 		ufs_set_link(new_dir, new_de, new_page, old_inode);
 		new_inode->i_ctime = CURRENT_TIME_SEC;
 		if (dir_de)
@@ -318,12 +317,9 @@
 			if (new_dir->i_nlink >= UFS_LINK_MAX)
 				goto out_dir;
 		}
-		inode_inc_link_count(old_inode);
 		err = ufs_add_link(new_dentry, old_inode);
-		if (err) {
-			inode_dec_link_count(old_inode);
+		if (err)
 			goto out_dir;
-		}
 		if (dir_de)
 			inode_inc_link_count(new_dir);
 	}
@@ -331,12 +327,11 @@
 	/*
 	 * Like most other Unix systems, set the ctime for inodes on a
  	 * rename.
-	 * inode_dec_link_count() will mark the inode dirty.
 	 */
 	old_inode->i_ctime = CURRENT_TIME_SEC;
 
 	ufs_delete_entry(old_dir, old_de, old_page);
-	inode_dec_link_count(old_inode);
+	mark_inode_dirty(old_inode);
 
 	if (dir_de) {
 		ufs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 0dce969..faca449 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -98,6 +98,7 @@
 				   kmem.o \
 				   xfs_aops.o \
 				   xfs_buf.o \
+				   xfs_discard.o \
 				   xfs_export.o \
 				   xfs_file.o \
 				   xfs_fs_subr.o \
diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h
deleted file mode 100644
index 4dfc7c3..0000000
--- a/fs/xfs/linux-2.6/sv.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_SUPPORT_SV_H__
-#define __XFS_SUPPORT_SV_H__
-
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-
-/*
- * Synchronisation variables.
- *
- * (Parameters "pri", "svf" and "rts" are not implemented)
- */
-
-typedef struct sv_s {
-	wait_queue_head_t waiters;
-} sv_t;
-
-static inline void _sv_wait(sv_t *sv, spinlock_t *lock)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
-	add_wait_queue_exclusive(&sv->waiters, &wait);
-	__set_current_state(TASK_UNINTERRUPTIBLE);
-	spin_unlock(lock);
-
-	schedule();
-
-	remove_wait_queue(&sv->waiters, &wait);
-}
-
-#define sv_init(sv,flag,name) \
-	init_waitqueue_head(&(sv)->waiters)
-#define sv_destroy(sv) \
-	/*NOTHING*/
-#define sv_wait(sv, pri, lock, s) \
-	_sv_wait(sv, lock)
-#define sv_signal(sv) \
-	wake_up(&(sv)->waiters)
-#define sv_broadcast(sv) \
-	wake_up_all(&(sv)->waiters)
-
-#endif /* __XFS_SUPPORT_SV_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 691f612..ec7bbb5 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,15 +38,6 @@
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 
-/*
- * Types of I/O for bmap clustering and I/O completion tracking.
- */
-enum {
-	IO_READ,	/* mapping for a read */
-	IO_DELAY,	/* mapping covers delalloc region */
-	IO_UNWRITTEN,	/* mapping covers allocated but uninitialized data */
-	IO_NEW		/* just allocated */
-};
 
 /*
  * Prime number of hash buckets since address is used as the key.
@@ -182,9 +173,6 @@
 	xfs_inode_t		*ip = XFS_I(ioend->io_inode);
 	xfs_fsize_t		isize;
 
-	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
-	ASSERT(ioend->io_type != IO_READ);
-
 	if (unlikely(ioend->io_error))
 		return 0;
 
@@ -244,10 +232,8 @@
 	 * We might have to update the on-disk file size after extending
 	 * writes.
 	 */
-	if (ioend->io_type != IO_READ) {
-		error = xfs_setfilesize(ioend);
-		ASSERT(!error || error == EAGAIN);
-	}
+	error = xfs_setfilesize(ioend);
+	ASSERT(!error || error == EAGAIN);
 
 	/*
 	 * If we didn't complete processing of the ioend, requeue it to the
@@ -318,14 +304,63 @@
 xfs_map_blocks(
 	struct inode		*inode,
 	loff_t			offset,
-	ssize_t			count,
 	struct xfs_bmbt_irec	*imap,
-	int			flags)
+	int			type,
+	int			nonblocking)
 {
-	int			nmaps = 1;
-	int			new = 0;
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	ssize_t			count = 1 << inode->i_blkbits;
+	xfs_fileoff_t		offset_fsb, end_fsb;
+	int			error = 0;
+	int			bmapi_flags = XFS_BMAPI_ENTIRE;
+	int			nimaps = 1;
 
-	return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -XFS_ERROR(EIO);
+
+	if (type == IO_UNWRITTEN)
+		bmapi_flags |= XFS_BMAPI_IGSTATE;
+
+	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+		if (nonblocking)
+			return -XFS_ERROR(EAGAIN);
+		xfs_ilock(ip, XFS_ILOCK_SHARED);
+	}
+
+	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
+	       (ip->i_df.if_flags & XFS_IFEXTENTS));
+	ASSERT(offset <= mp->m_maxioffset);
+
+	if (offset + count > mp->m_maxioffset)
+		count = mp->m_maxioffset - offset;
+	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
+			  bmapi_flags,  NULL, 0, imap, &nimaps, NULL);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+	if (error)
+		return -XFS_ERROR(error);
+
+	if (type == IO_DELALLOC &&
+	    (!nimaps || isnullstartblock(imap->br_startblock))) {
+		error = xfs_iomap_write_allocate(ip, offset, count, imap);
+		if (!error)
+			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
+		return -XFS_ERROR(error);
+	}
+
+#ifdef DEBUG
+	if (type == IO_UNWRITTEN) {
+		ASSERT(nimaps);
+		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
+	}
+#endif
+	if (nimaps)
+		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
+	return 0;
 }
 
 STATIC int
@@ -380,26 +415,18 @@
 
 	submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
 		   WRITE_SYNC_PLUG : WRITE, bio);
-	ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
-	bio_put(bio);
 }
 
 STATIC struct bio *
 xfs_alloc_ioend_bio(
 	struct buffer_head	*bh)
 {
-	struct bio		*bio;
 	int			nvecs = bio_get_nr_vecs(bh->b_bdev);
-
-	do {
-		bio = bio_alloc(GFP_NOIO, nvecs);
-		nvecs >>= 1;
-	} while (!bio);
+	struct bio		*bio = bio_alloc(GFP_NOIO, nvecs);
 
 	ASSERT(bio->bi_private == NULL);
 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
-	bio_get(bio);
 	return bio;
 }
 
@@ -470,9 +497,8 @@
 	/* Pass 1 - start writeback */
 	do {
 		next = ioend->io_list;
-		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
 			xfs_start_buffer_writeback(bh);
-		}
 	} while ((ioend = next) != NULL);
 
 	/* Pass 2 - submit I/O */
@@ -600,117 +626,13 @@
 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
 
-	lock_buffer(bh);
 	xfs_map_buffer(inode, bh, imap, offset);
-	bh->b_bdev = xfs_find_bdev_for_inode(inode);
 	set_buffer_mapped(bh);
 	clear_buffer_delay(bh);
 	clear_buffer_unwritten(bh);
 }
 
 /*
- * Look for a page at index that is suitable for clustering.
- */
-STATIC unsigned int
-xfs_probe_page(
-	struct page		*page,
-	unsigned int		pg_offset)
-{
-	struct buffer_head	*bh, *head;
-	int			ret = 0;
-
-	if (PageWriteback(page))
-		return 0;
-	if (!PageDirty(page))
-		return 0;
-	if (!page->mapping)
-		return 0;
-	if (!page_has_buffers(page))
-		return 0;
-
-	bh = head = page_buffers(page);
-	do {
-		if (!buffer_uptodate(bh))
-			break;
-		if (!buffer_mapped(bh))
-			break;
-		ret += bh->b_size;
-		if (ret >= pg_offset)
-			break;
-	} while ((bh = bh->b_this_page) != head);
-
-	return ret;
-}
-
-STATIC size_t
-xfs_probe_cluster(
-	struct inode		*inode,
-	struct page		*startpage,
-	struct buffer_head	*bh,
-	struct buffer_head	*head)
-{
-	struct pagevec		pvec;
-	pgoff_t			tindex, tlast, tloff;
-	size_t			total = 0;
-	int			done = 0, i;
-
-	/* First sum forwards in this page */
-	do {
-		if (!buffer_uptodate(bh) || !buffer_mapped(bh))
-			return total;
-		total += bh->b_size;
-	} while ((bh = bh->b_this_page) != head);
-
-	/* if we reached the end of the page, sum forwards in following pages */
-	tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-	tindex = startpage->index + 1;
-
-	/* Prune this back to avoid pathological behavior */
-	tloff = min(tlast, startpage->index + 64);
-
-	pagevec_init(&pvec, 0);
-	while (!done && tindex <= tloff) {
-		unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
-
-		if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
-			break;
-
-		for (i = 0; i < pagevec_count(&pvec); i++) {
-			struct page *page = pvec.pages[i];
-			size_t pg_offset, pg_len = 0;
-
-			if (tindex == tlast) {
-				pg_offset =
-				    i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
-				if (!pg_offset) {
-					done = 1;
-					break;
-				}
-			} else
-				pg_offset = PAGE_CACHE_SIZE;
-
-			if (page->index == tindex && trylock_page(page)) {
-				pg_len = xfs_probe_page(page, pg_offset);
-				unlock_page(page);
-			}
-
-			if (!pg_len) {
-				done = 1;
-				break;
-			}
-
-			total += pg_len;
-			tindex++;
-		}
-
-		pagevec_release(&pvec);
-		cond_resched();
-	}
-
-	return total;
-}
-
-/*
  * Test if a given page is suitable for writing as part of an unwritten
  * or delayed allocate extent.
  */
@@ -731,9 +653,9 @@
 			if (buffer_unwritten(bh))
 				acceptable = (type == IO_UNWRITTEN);
 			else if (buffer_delay(bh))
-				acceptable = (type == IO_DELAY);
+				acceptable = (type == IO_DELALLOC);
 			else if (buffer_dirty(bh) && buffer_mapped(bh))
-				acceptable = (type == IO_NEW);
+				acceptable = (type == IO_OVERWRITE);
 			else
 				break;
 		} while ((bh = bh->b_this_page) != head);
@@ -758,8 +680,7 @@
 	loff_t			tindex,
 	struct xfs_bmbt_irec	*imap,
 	xfs_ioend_t		**ioendp,
-	struct writeback_control *wbc,
-	int			all_bh)
+	struct writeback_control *wbc)
 {
 	struct buffer_head	*bh, *head;
 	xfs_off_t		end_offset;
@@ -814,37 +735,30 @@
 			continue;
 		}
 
-		if (buffer_unwritten(bh) || buffer_delay(bh)) {
+		if (buffer_unwritten(bh) || buffer_delay(bh) ||
+		    buffer_mapped(bh)) {
 			if (buffer_unwritten(bh))
 				type = IO_UNWRITTEN;
+			else if (buffer_delay(bh))
+				type = IO_DELALLOC;
 			else
-				type = IO_DELAY;
+				type = IO_OVERWRITE;
 
 			if (!xfs_imap_valid(inode, imap, offset)) {
 				done = 1;
 				continue;
 			}
 
-			ASSERT(imap->br_startblock != HOLESTARTBLOCK);
-			ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
-
-			xfs_map_at_offset(inode, bh, imap, offset);
+			lock_buffer(bh);
+			if (type != IO_OVERWRITE)
+				xfs_map_at_offset(inode, bh, imap, offset);
 			xfs_add_to_ioend(inode, bh, offset, type,
 					 ioendp, done);
 
 			page_dirty--;
 			count++;
 		} else {
-			type = IO_NEW;
-			if (buffer_mapped(bh) && all_bh) {
-				lock_buffer(bh);
-				xfs_add_to_ioend(inode, bh, offset,
-						type, ioendp, done);
-				count++;
-				page_dirty--;
-			} else {
-				done = 1;
-			}
+			done = 1;
 		}
 	} while (offset += len, (bh = bh->b_this_page) != head);
 
@@ -876,7 +790,6 @@
 	struct xfs_bmbt_irec	*imap,
 	xfs_ioend_t		**ioendp,
 	struct writeback_control *wbc,
-	int			all_bh,
 	pgoff_t			tlast)
 {
 	struct pagevec		pvec;
@@ -891,7 +804,7 @@
 
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			done = xfs_convert_page(inode, pvec.pages[i], tindex++,
-					imap, ioendp, wbc, all_bh);
+					imap, ioendp, wbc);
 			if (done)
 				break;
 		}
@@ -935,7 +848,7 @@
 	struct buffer_head	*bh, *head;
 	loff_t			offset = page_offset(page);
 
-	if (!xfs_is_delayed_page(page, IO_DELAY))
+	if (!xfs_is_delayed_page(page, IO_DELALLOC))
 		goto out_invalidate;
 
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -1002,10 +915,10 @@
 	unsigned int		type;
 	__uint64_t              end_offset;
 	pgoff_t                 end_index, last_index;
-	ssize_t			size, len;
-	int			flags, err, imap_valid = 0, uptodate = 1;
+	ssize_t			len;
+	int			err, imap_valid = 0, uptodate = 1;
 	int			count = 0;
-	int			all_bh = 0;
+	int			nonblocking = 0;
 
 	trace_xfs_writepage(inode, page, 0);
 
@@ -1056,10 +969,14 @@
 
 	bh = head = page_buffers(page);
 	offset = page_offset(page);
-	flags = BMAPI_READ;
-	type = IO_NEW;
+	type = IO_OVERWRITE;
+
+	if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
+		nonblocking = 1;
 
 	do {
+		int new_ioend = 0;
+
 		if (offset >= end_offset)
 			break;
 		if (!buffer_uptodate(bh))
@@ -1076,90 +993,54 @@
 			continue;
 		}
 
-		if (imap_valid)
-			imap_valid = xfs_imap_valid(inode, &imap, offset);
-
-		if (buffer_unwritten(bh) || buffer_delay(bh)) {
-			int new_ioend = 0;
-
-			/*
-			 * Make sure we don't use a read-only iomap
-			 */
-			if (flags == BMAPI_READ)
-				imap_valid = 0;
-
-			if (buffer_unwritten(bh)) {
+		if (buffer_unwritten(bh)) {
+			if (type != IO_UNWRITTEN) {
 				type = IO_UNWRITTEN;
-				flags = BMAPI_WRITE | BMAPI_IGNSTATE;
-			} else if (buffer_delay(bh)) {
-				type = IO_DELAY;
-				flags = BMAPI_ALLOCATE;
-
-				if (wbc->sync_mode == WB_SYNC_NONE)
-					flags |= BMAPI_TRYLOCK;
+				imap_valid = 0;
 			}
-
-			if (!imap_valid) {
-				/*
-				 * If we didn't have a valid mapping then we
-				 * need to ensure that we put the new mapping
-				 * in a new ioend structure. This needs to be
-				 * done to ensure that the ioends correctly
-				 * reflect the block mappings at io completion
-				 * for unwritten extent conversion.
-				 */
-				new_ioend = 1;
-				err = xfs_map_blocks(inode, offset, len,
-						&imap, flags);
-				if (err)
-					goto error;
-				imap_valid = xfs_imap_valid(inode, &imap,
-							    offset);
-			}
-			if (imap_valid) {
-				xfs_map_at_offset(inode, bh, &imap, offset);
-				xfs_add_to_ioend(inode, bh, offset, type,
-						 &ioend, new_ioend);
-				count++;
+		} else if (buffer_delay(bh)) {
+			if (type != IO_DELALLOC) {
+				type = IO_DELALLOC;
+				imap_valid = 0;
 			}
 		} else if (buffer_uptodate(bh)) {
-			/*
-			 * we got here because the buffer is already mapped.
-			 * That means it must already have extents allocated
-			 * underneath it. Map the extent by reading it.
-			 */
-			if (!imap_valid || flags != BMAPI_READ) {
-				flags = BMAPI_READ;
-				size = xfs_probe_cluster(inode, page, bh, head);
-				err = xfs_map_blocks(inode, offset, size,
-						&imap, flags);
-				if (err)
-					goto error;
-				imap_valid = xfs_imap_valid(inode, &imap,
-							    offset);
-			}
-
-			/*
-			 * We set the type to IO_NEW in case we are doing a
-			 * small write at EOF that is extending the file but
-			 * without needing an allocation. We need to update the
-			 * file size on I/O completion in this case so it is
-			 * the same case as having just allocated a new extent
-			 * that we are writing into for the first time.
-			 */
-			type = IO_NEW;
-			if (trylock_buffer(bh)) {
-				if (imap_valid)
-					all_bh = 1;
-				xfs_add_to_ioend(inode, bh, offset, type,
-						&ioend, !imap_valid);
-				count++;
-			} else {
+			if (type != IO_OVERWRITE) {
+				type = IO_OVERWRITE;
 				imap_valid = 0;
 			}
-		} else if (PageUptodate(page)) {
-			ASSERT(buffer_mapped(bh));
-			imap_valid = 0;
+		} else {
+			if (PageUptodate(page)) {
+				ASSERT(buffer_mapped(bh));
+				imap_valid = 0;
+			}
+			continue;
+		}
+
+		if (imap_valid)
+			imap_valid = xfs_imap_valid(inode, &imap, offset);
+		if (!imap_valid) {
+			/*
+			 * If we didn't have a valid mapping then we need to
+			 * put the new mapping into a separate ioend structure.
+			 * This ensures non-contiguous extents always have
+			 * separate ioends, which is particularly important
+			 * for unwritten extent conversion at I/O completion
+			 * time.
+			 */
+			new_ioend = 1;
+			err = xfs_map_blocks(inode, offset, &imap, type,
+					     nonblocking);
+			if (err)
+				goto error;
+			imap_valid = xfs_imap_valid(inode, &imap, offset);
+		}
+		if (imap_valid) {
+			lock_buffer(bh);
+			if (type != IO_OVERWRITE)
+				xfs_map_at_offset(inode, bh, &imap, offset);
+			xfs_add_to_ioend(inode, bh, offset, type, &ioend,
+					 new_ioend);
+			count++;
 		}
 
 		if (!iohead)
@@ -1188,7 +1069,7 @@
 			end_index = last_index;
 
 		xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
-					wbc, all_bh, end_index);
+				  wbc, end_index);
 	}
 
 	if (iohead)
@@ -1257,13 +1138,19 @@
 	int			create,
 	int			direct)
 {
-	int			flags = create ? BMAPI_WRITE : BMAPI_READ;
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	xfs_fileoff_t		offset_fsb, end_fsb;
+	int			error = 0;
+	int			lockmode = 0;
 	struct xfs_bmbt_irec	imap;
+	int			nimaps = 1;
 	xfs_off_t		offset;
 	ssize_t			size;
-	int			nimap = 1;
 	int			new = 0;
-	int			error;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -XFS_ERROR(EIO);
 
 	offset = (xfs_off_t)iblock << inode->i_blkbits;
 	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
@@ -1272,15 +1159,45 @@
 	if (!create && direct && offset >= i_size_read(inode))
 		return 0;
 
-	if (direct && create)
-		flags |= BMAPI_DIRECT;
+	if (create) {
+		lockmode = XFS_ILOCK_EXCL;
+		xfs_ilock(ip, lockmode);
+	} else {
+		lockmode = xfs_ilock_map_shared(ip);
+	}
 
-	error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
-			  &new);
+	ASSERT(offset <= mp->m_maxioffset);
+	if (offset + size > mp->m_maxioffset)
+		size = mp->m_maxioffset - offset;
+	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
+	error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
+			  XFS_BMAPI_ENTIRE,  NULL, 0, &imap, &nimaps, NULL);
 	if (error)
-		return -error;
-	if (nimap == 0)
-		return 0;
+		goto out_unlock;
+
+	if (create &&
+	    (!nimaps ||
+	     (imap.br_startblock == HOLESTARTBLOCK ||
+	      imap.br_startblock == DELAYSTARTBLOCK))) {
+		if (direct) {
+			error = xfs_iomap_write_direct(ip, offset, size,
+						       &imap, nimaps);
+		} else {
+			error = xfs_iomap_write_delay(ip, offset, size, &imap);
+		}
+		if (error)
+			goto out_unlock;
+
+		trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
+	} else if (nimaps) {
+		trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
+	} else {
+		trace_xfs_get_blocks_notfound(ip, offset, size);
+		goto out_unlock;
+	}
+	xfs_iunlock(ip, lockmode);
 
 	if (imap.br_startblock != HOLESTARTBLOCK &&
 	    imap.br_startblock != DELAYSTARTBLOCK) {
@@ -1347,6 +1264,10 @@
 	}
 
 	return 0;
+
+out_unlock:
+	xfs_iunlock(ip, lockmode);
+	return -error;
 }
 
 int
@@ -1434,7 +1355,7 @@
 	ssize_t			ret;
 
 	if (rw & WRITE) {
-		iocb->private = xfs_alloc_ioend(inode, IO_NEW);
+		iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
 
 		ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
 					    offset, nr_segs,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index c5057fb..71f721e 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -23,6 +23,22 @@
 extern mempool_t *xfs_ioend_pool;
 
 /*
+ * Types of I/O for bmap clustering and I/O completion tracking.
+ */
+enum {
+	IO_DIRECT = 0,	/* special case for direct I/O ioends */
+	IO_DELALLOC,	/* mapping covers delalloc region */
+	IO_UNWRITTEN,	/* mapping covers allocated but uninitialized data */
+	IO_OVERWRITE,	/* mapping covers already allocated extent */
+};
+
+#define XFS_IO_TYPES \
+	{ 0,			"" }, \
+	{ IO_DELALLOC,		"delalloc" }, \
+	{ IO_UNWRITTEN,		"unwritten" }, \
+	{ IO_OVERWRITE,		"overwrite" }
+
+/*
  * xfs_ioend struct manages large extent writes for XFS.
  * It can manage several multi-page bio's at once.
  */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4c5deb6..ac1c7e8 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -44,12 +44,7 @@
 
 static kmem_zone_t *xfs_buf_zone;
 STATIC int xfsbufd(void *);
-STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
-static struct shrinker xfs_buf_shake = {
-	.shrink = xfsbufd_wakeup,
-	.seeks = DEFAULT_SEEKS,
-};
 
 static struct workqueue_struct *xfslogd_workqueue;
 struct workqueue_struct *xfsdatad_workqueue;
@@ -168,8 +163,79 @@
 }
 
 /*
- *	Internal xfs_buf_t object manipulation
+ * xfs_buf_lru_add - add a buffer to the LRU.
+ *
+ * The LRU takes a new reference to the buffer so that it will only be freed
+ * once the shrinker takes the buffer off the LRU.
  */
+STATIC void
+xfs_buf_lru_add(
+	struct xfs_buf	*bp)
+{
+	struct xfs_buftarg *btp = bp->b_target;
+
+	spin_lock(&btp->bt_lru_lock);
+	if (list_empty(&bp->b_lru)) {
+		atomic_inc(&bp->b_hold);
+		list_add_tail(&bp->b_lru, &btp->bt_lru);
+		btp->bt_lru_nr++;
+	}
+	spin_unlock(&btp->bt_lru_lock);
+}
+
+/*
+ * xfs_buf_lru_del - remove a buffer from the LRU
+ *
+ * The unlocked check is safe here because it only occurs when there are not
+ * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
+ * to optimise the shrinker removing the buffer from the LRU and calling
+ * xfs_buf_free(). i.e. it removes an unneccessary round trip on the
+ * bt_lru_lock.
+ */
+STATIC void
+xfs_buf_lru_del(
+	struct xfs_buf	*bp)
+{
+	struct xfs_buftarg *btp = bp->b_target;
+
+	if (list_empty(&bp->b_lru))
+		return;
+
+	spin_lock(&btp->bt_lru_lock);
+	if (!list_empty(&bp->b_lru)) {
+		list_del_init(&bp->b_lru);
+		btp->bt_lru_nr--;
+	}
+	spin_unlock(&btp->bt_lru_lock);
+}
+
+/*
+ * When we mark a buffer stale, we remove the buffer from the LRU and clear the
+ * b_lru_ref count so that the buffer is freed immediately when the buffer
+ * reference count falls to zero. If the buffer is already on the LRU, we need
+ * to remove the reference that LRU holds on the buffer.
+ *
+ * This prevents build-up of stale buffers on the LRU.
+ */
+void
+xfs_buf_stale(
+	struct xfs_buf	*bp)
+{
+	bp->b_flags |= XBF_STALE;
+	atomic_set(&(bp)->b_lru_ref, 0);
+	if (!list_empty(&bp->b_lru)) {
+		struct xfs_buftarg *btp = bp->b_target;
+
+		spin_lock(&btp->bt_lru_lock);
+		if (!list_empty(&bp->b_lru)) {
+			list_del_init(&bp->b_lru);
+			btp->bt_lru_nr--;
+			atomic_dec(&bp->b_hold);
+		}
+		spin_unlock(&btp->bt_lru_lock);
+	}
+	ASSERT(atomic_read(&bp->b_hold) >= 1);
+}
 
 STATIC void
 _xfs_buf_initialize(
@@ -186,7 +252,9 @@
 
 	memset(bp, 0, sizeof(xfs_buf_t));
 	atomic_set(&bp->b_hold, 1);
+	atomic_set(&bp->b_lru_ref, 1);
 	init_completion(&bp->b_iowait);
+	INIT_LIST_HEAD(&bp->b_lru);
 	INIT_LIST_HEAD(&bp->b_list);
 	RB_CLEAR_NODE(&bp->b_rbnode);
 	sema_init(&bp->b_sema, 0); /* held, no waiters */
@@ -262,6 +330,8 @@
 {
 	trace_xfs_buf_free(bp, _RET_IP_);
 
+	ASSERT(list_empty(&bp->b_lru));
+
 	if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
 		uint		i;
 
@@ -337,7 +407,6 @@
 					__func__, gfp_mask);
 
 			XFS_STATS_INC(xb_page_retries);
-			xfsbufd_wakeup(NULL, 0, gfp_mask);
 			congestion_wait(BLK_RW_ASYNC, HZ/50);
 			goto retry;
 		}
@@ -827,7 +896,7 @@
 	trace_xfs_buf_rele(bp, _RET_IP_);
 
 	if (!pag) {
-		ASSERT(!bp->b_relse);
+		ASSERT(list_empty(&bp->b_lru));
 		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
 		if (atomic_dec_and_test(&bp->b_hold))
 			xfs_buf_free(bp);
@@ -835,13 +904,15 @@
 	}
 
 	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
+
 	ASSERT(atomic_read(&bp->b_hold) > 0);
 	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
-		if (bp->b_relse) {
-			atomic_inc(&bp->b_hold);
+		if (!(bp->b_flags & XBF_STALE) &&
+			   atomic_read(&bp->b_lru_ref)) {
+			xfs_buf_lru_add(bp);
 			spin_unlock(&pag->pag_buf_lock);
-			bp->b_relse(bp);
 		} else {
+			xfs_buf_lru_del(bp);
 			ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
 			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
 			spin_unlock(&pag->pag_buf_lock);
@@ -1438,51 +1509,84 @@
  */
 
 /*
- *	Wait for any bufs with callbacks that have been submitted but
- *	have not yet returned... walk the hash list for the target.
+ * Wait for any bufs with callbacks that have been submitted but have not yet
+ * returned. These buffers will have an elevated hold count, so wait on those
+ * while freeing all the buffers only held by the LRU.
  */
 void
 xfs_wait_buftarg(
 	struct xfs_buftarg	*btp)
 {
-	struct xfs_perag	*pag;
-	uint			i;
+	struct xfs_buf		*bp;
 
-	for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) {
-		pag = xfs_perag_get(btp->bt_mount, i);
-		spin_lock(&pag->pag_buf_lock);
-		while (rb_first(&pag->pag_buf_tree)) {
-			spin_unlock(&pag->pag_buf_lock);
+restart:
+	spin_lock(&btp->bt_lru_lock);
+	while (!list_empty(&btp->bt_lru)) {
+		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
+		if (atomic_read(&bp->b_hold) > 1) {
+			spin_unlock(&btp->bt_lru_lock);
 			delay(100);
-			spin_lock(&pag->pag_buf_lock);
+			goto restart;
 		}
-		spin_unlock(&pag->pag_buf_lock);
-		xfs_perag_put(pag);
+		/*
+		 * clear the LRU reference count so the bufer doesn't get
+		 * ignored in xfs_buf_rele().
+		 */
+		atomic_set(&bp->b_lru_ref, 0);
+		spin_unlock(&btp->bt_lru_lock);
+		xfs_buf_rele(bp);
+		spin_lock(&btp->bt_lru_lock);
 	}
+	spin_unlock(&btp->bt_lru_lock);
 }
 
-/*
- *	buftarg list for delwrite queue processing
- */
-static LIST_HEAD(xfs_buftarg_list);
-static DEFINE_SPINLOCK(xfs_buftarg_lock);
-
-STATIC void
-xfs_register_buftarg(
-	xfs_buftarg_t           *btp)
+int
+xfs_buftarg_shrink(
+	struct shrinker		*shrink,
+	int			nr_to_scan,
+	gfp_t			mask)
 {
-	spin_lock(&xfs_buftarg_lock);
-	list_add(&btp->bt_list, &xfs_buftarg_list);
-	spin_unlock(&xfs_buftarg_lock);
-}
+	struct xfs_buftarg	*btp = container_of(shrink,
+					struct xfs_buftarg, bt_shrinker);
+	struct xfs_buf		*bp;
+	LIST_HEAD(dispose);
 
-STATIC void
-xfs_unregister_buftarg(
-	xfs_buftarg_t           *btp)
-{
-	spin_lock(&xfs_buftarg_lock);
-	list_del(&btp->bt_list);
-	spin_unlock(&xfs_buftarg_lock);
+	if (!nr_to_scan)
+		return btp->bt_lru_nr;
+
+	spin_lock(&btp->bt_lru_lock);
+	while (!list_empty(&btp->bt_lru)) {
+		if (nr_to_scan-- <= 0)
+			break;
+
+		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
+
+		/*
+		 * Decrement the b_lru_ref count unless the value is already
+		 * zero. If the value is already zero, we need to reclaim the
+		 * buffer, otherwise it gets another trip through the LRU.
+		 */
+		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
+			list_move_tail(&bp->b_lru, &btp->bt_lru);
+			continue;
+		}
+
+		/*
+		 * remove the buffer from the LRU now to avoid needing another
+		 * lock round trip inside xfs_buf_rele().
+		 */
+		list_move(&bp->b_lru, &dispose);
+		btp->bt_lru_nr--;
+	}
+	spin_unlock(&btp->bt_lru_lock);
+
+	while (!list_empty(&dispose)) {
+		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
+		list_del_init(&bp->b_lru);
+		xfs_buf_rele(bp);
+	}
+
+	return btp->bt_lru_nr;
 }
 
 void
@@ -1490,17 +1594,14 @@
 	struct xfs_mount	*mp,
 	struct xfs_buftarg	*btp)
 {
+	unregister_shrinker(&btp->bt_shrinker);
+
 	xfs_flush_buftarg(btp, 1);
 	if (mp->m_flags & XFS_MOUNT_BARRIER)
 		xfs_blkdev_issue_flush(btp);
 	iput(btp->bt_mapping->host);
 
-	/* Unregister the buftarg first so that we don't get a
-	 * wakeup finding a non-existent task
-	 */
-	xfs_unregister_buftarg(btp);
 	kthread_stop(btp->bt_task);
-
 	kmem_free(btp);
 }
 
@@ -1597,20 +1698,13 @@
 	xfs_buftarg_t		*btp,
 	const char		*fsname)
 {
-	int	error = 0;
-
-	INIT_LIST_HEAD(&btp->bt_list);
 	INIT_LIST_HEAD(&btp->bt_delwrite_queue);
 	spin_lock_init(&btp->bt_delwrite_lock);
 	btp->bt_flags = 0;
 	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
-	if (IS_ERR(btp->bt_task)) {
-		error = PTR_ERR(btp->bt_task);
-		goto out_error;
-	}
-	xfs_register_buftarg(btp);
-out_error:
-	return error;
+	if (IS_ERR(btp->bt_task))
+		return PTR_ERR(btp->bt_task);
+	return 0;
 }
 
 xfs_buftarg_t *
@@ -1627,12 +1721,17 @@
 	btp->bt_mount = mp;
 	btp->bt_dev =  bdev->bd_dev;
 	btp->bt_bdev = bdev;
+	INIT_LIST_HEAD(&btp->bt_lru);
+	spin_lock_init(&btp->bt_lru_lock);
 	if (xfs_setsize_buftarg_early(btp, bdev))
 		goto error;
 	if (xfs_mapping_buftarg(btp, bdev))
 		goto error;
 	if (xfs_alloc_delwrite_queue(btp, fsname))
 		goto error;
+	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
+	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
+	register_shrinker(&btp->bt_shrinker);
 	return btp;
 
 error:
@@ -1737,27 +1836,6 @@
 	flush_workqueue(queue);
 }
 
-STATIC int
-xfsbufd_wakeup(
-	struct shrinker		*shrink,
-	int			priority,
-	gfp_t			mask)
-{
-	xfs_buftarg_t		*btp;
-
-	spin_lock(&xfs_buftarg_lock);
-	list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
-		if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
-			continue;
-		if (list_empty(&btp->bt_delwrite_queue))
-			continue;
-		set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
-		wake_up_process(btp->bt_task);
-	}
-	spin_unlock(&xfs_buftarg_lock);
-	return 0;
-}
-
 /*
  * Move as many buffers as specified to the supplied list
  * idicating if we skipped any buffers to prevent deadlocks.
@@ -1952,7 +2030,6 @@
 	if (!xfsconvertd_workqueue)
 		goto out_destroy_xfsdatad_workqueue;
 
-	register_shrinker(&xfs_buf_shake);
 	return 0;
 
  out_destroy_xfsdatad_workqueue:
@@ -1968,7 +2045,6 @@
 void
 xfs_buf_terminate(void)
 {
-	unregister_shrinker(&xfs_buf_shake);
 	destroy_workqueue(xfsconvertd_workqueue);
 	destroy_workqueue(xfsdatad_workqueue);
 	destroy_workqueue(xfslogd_workqueue);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 383a3f3..cbe6595 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -128,10 +128,15 @@
 
 	/* per device delwri queue */
 	struct task_struct	*bt_task;
-	struct list_head	bt_list;
 	struct list_head	bt_delwrite_queue;
 	spinlock_t		bt_delwrite_lock;
 	unsigned long		bt_flags;
+
+	/* LRU control structures */
+	struct shrinker		bt_shrinker;
+	struct list_head	bt_lru;
+	spinlock_t		bt_lru_lock;
+	unsigned int		bt_lru_nr;
 } xfs_buftarg_t;
 
 /*
@@ -147,8 +152,6 @@
 
 struct xfs_buf;
 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
-typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
-typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
 
 #define XB_PAGES	2
 
@@ -164,9 +167,11 @@
 	xfs_off_t		b_file_offset;	/* offset in file */
 	size_t			b_buffer_length;/* size of buffer in bytes */
 	atomic_t		b_hold;		/* reference count */
+	atomic_t		b_lru_ref;	/* lru reclaim ref count */
 	xfs_buf_flags_t		b_flags;	/* status flags */
 	struct semaphore	b_sema;		/* semaphore for lockables */
 
+	struct list_head	b_lru;		/* lru list */
 	wait_queue_head_t	b_waiters;	/* unpin waiters */
 	struct list_head	b_list;
 	struct xfs_perag	*b_pag;		/* contains rbtree root */
@@ -176,7 +181,6 @@
 	void			*b_addr;	/* virtual address of buffer */
 	struct work_struct	b_iodone_work;
 	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
-	xfs_buf_relse_t		b_relse;	/* releasing function */
 	struct completion	b_iowait;	/* queue for I/O waiters */
 	void			*b_fspriv;
 	void			*b_fspriv2;
@@ -264,7 +268,8 @@
 #define XFS_BUF_ZEROFLAGS(bp)	((bp)->b_flags &= \
 		~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
 
-#define XFS_BUF_STALE(bp)	((bp)->b_flags |= XBF_STALE)
+void xfs_buf_stale(struct xfs_buf *bp);
+#define XFS_BUF_STALE(bp)	xfs_buf_stale(bp);
 #define XFS_BUF_UNSTALE(bp)	((bp)->b_flags &= ~XBF_STALE)
 #define XFS_BUF_ISSTALE(bp)	((bp)->b_flags & XBF_STALE)
 #define XFS_BUF_SUPER_STALE(bp)	do {				\
@@ -315,7 +320,6 @@
 #define XFS_BUF_FSPRIVATE2(bp, type)		((type)(bp)->b_fspriv2)
 #define XFS_BUF_SET_FSPRIVATE2(bp, val)		((bp)->b_fspriv2 = (void*)(val))
 #define XFS_BUF_SET_START(bp)			do { } while (0)
-#define XFS_BUF_SET_BRELSE_FUNC(bp, func)	((bp)->b_relse = (func))
 
 #define XFS_BUF_PTR(bp)			(xfs_caddr_t)((bp)->b_addr)
 #define XFS_BUF_SET_PTR(bp, val, cnt)	xfs_buf_associate_memory(bp, val, cnt)
@@ -328,9 +332,15 @@
 #define XFS_BUF_SIZE(bp)		((bp)->b_buffer_length)
 #define XFS_BUF_SET_SIZE(bp, cnt)	((bp)->b_buffer_length = (cnt))
 
-#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)	do { } while (0)
+static inline void
+xfs_buf_set_ref(
+	struct xfs_buf	*bp,
+	int		lru_ref)
+{
+	atomic_set(&bp->b_lru_ref, lru_ref);
+}
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)	xfs_buf_set_ref(bp, ref)
 #define XFS_BUF_SET_VTYPE(bp, type)		do { } while (0)
-#define XFS_BUF_SET_REF(bp, ref)		do { } while (0)
 
 #define XFS_BUF_ISPINNED(bp)	atomic_read(&((bp)->b_pin_count))
 
@@ -346,8 +356,7 @@
 
 static inline void xfs_buf_relse(xfs_buf_t *bp)
 {
-	if (!bp->b_relse)
-		xfs_buf_unlock(bp);
+	xfs_buf_unlock(bp);
 	xfs_buf_rele(bp);
 }
 
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c
new file mode 100644
index 0000000..d61611c
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_discard.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_sb.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_quota.h"
+#include "xfs_trans.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_discard.h"
+#include "xfs_trace.h"
+
+STATIC int
+xfs_trim_extents(
+	struct xfs_mount	*mp,
+	xfs_agnumber_t		agno,
+	xfs_fsblock_t		start,
+	xfs_fsblock_t		len,
+	xfs_fsblock_t		minlen,
+	__uint64_t		*blocks_trimmed)
+{
+	struct block_device	*bdev = mp->m_ddev_targp->bt_bdev;
+	struct xfs_btree_cur	*cur;
+	struct xfs_buf		*agbp;
+	struct xfs_perag	*pag;
+	int			error;
+	int			i;
+
+	pag = xfs_perag_get(mp, agno);
+
+	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+	if (error || !agbp)
+		goto out_put_perag;
+
+	cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
+
+	/*
+	 * Force out the log.  This means any transactions that might have freed
+	 * space before we took the AGF buffer lock are now on disk, and the
+	 * volatile disk cache is flushed.
+	 */
+	xfs_log_force(mp, XFS_LOG_SYNC);
+
+	/*
+	 * Look up the longest btree in the AGF and start with it.
+	 */
+	error = xfs_alloc_lookup_le(cur, 0,
+				    XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
+	if (error)
+		goto out_del_cursor;
+
+	/*
+	 * Loop until we are done with all extents that are large
+	 * enough to be worth discarding.
+	 */
+	while (i) {
+		xfs_agblock_t fbno;
+		xfs_extlen_t flen;
+
+		error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
+		if (error)
+			goto out_del_cursor;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
+		ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
+
+		/*
+		 * Too small?  Give up.
+		 */
+		if (flen < minlen) {
+			trace_xfs_discard_toosmall(mp, agno, fbno, flen);
+			goto out_del_cursor;
+		}
+
+		/*
+		 * If the extent is entirely outside of the range we are
+		 * supposed to discard skip it.  Do not bother to trim
+		 * down partially overlapping ranges for now.
+		 */
+		if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start ||
+		    XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) {
+			trace_xfs_discard_exclude(mp, agno, fbno, flen);
+			goto next_extent;
+		}
+
+		/*
+		 * If any blocks in the range are still busy, skip the
+		 * discard and try again the next time.
+		 */
+		if (xfs_alloc_busy_search(mp, agno, fbno, flen)) {
+			trace_xfs_discard_busy(mp, agno, fbno, flen);
+			goto next_extent;
+		}
+
+		trace_xfs_discard_extent(mp, agno, fbno, flen);
+		error = -blkdev_issue_discard(bdev,
+				XFS_AGB_TO_DADDR(mp, agno, fbno),
+				XFS_FSB_TO_BB(mp, flen),
+				GFP_NOFS, 0);
+		if (error)
+			goto out_del_cursor;
+		*blocks_trimmed += flen;
+
+next_extent:
+		error = xfs_btree_decrement(cur, 0, &i);
+		if (error)
+			goto out_del_cursor;
+	}
+
+out_del_cursor:
+	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+	xfs_buf_relse(agbp);
+out_put_perag:
+	xfs_perag_put(pag);
+	return error;
+}
+
+int
+xfs_ioc_trim(
+	struct xfs_mount		*mp,
+	struct fstrim_range __user	*urange)
+{
+	struct request_queue	*q = mp->m_ddev_targp->bt_bdev->bd_disk->queue;
+	unsigned int		granularity = q->limits.discard_granularity;
+	struct fstrim_range	range;
+	xfs_fsblock_t		start, len, minlen;
+	xfs_agnumber_t		start_agno, end_agno, agno;
+	__uint64_t		blocks_trimmed = 0;
+	int			error, last_error = 0;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -XFS_ERROR(EPERM);
+	if (!blk_queue_discard(q))
+		return -XFS_ERROR(EOPNOTSUPP);
+	if (copy_from_user(&range, urange, sizeof(range)))
+		return -XFS_ERROR(EFAULT);
+
+	/*
+	 * Truncating down the len isn't actually quite correct, but using
+	 * XFS_B_TO_FSB would mean we trivially get overflows for values
+	 * of ULLONG_MAX or slightly lower.  And ULLONG_MAX is the default
+	 * used by the fstrim application.  In the end it really doesn't
+	 * matter as trimming blocks is an advisory interface.
+	 */
+	start = XFS_B_TO_FSBT(mp, range.start);
+	len = XFS_B_TO_FSBT(mp, range.len);
+	minlen = XFS_B_TO_FSB(mp, max_t(u64, granularity, range.minlen));
+
+	start_agno = XFS_FSB_TO_AGNO(mp, start);
+	if (start_agno >= mp->m_sb.sb_agcount)
+		return -XFS_ERROR(EINVAL);
+
+	end_agno = XFS_FSB_TO_AGNO(mp, start + len);
+	if (end_agno >= mp->m_sb.sb_agcount)
+		end_agno = mp->m_sb.sb_agcount - 1;
+
+	for (agno = start_agno; agno <= end_agno; agno++) {
+		error = -xfs_trim_extents(mp, agno, start, len, minlen,
+					  &blocks_trimmed);
+		if (error)
+			last_error = error;
+	}
+
+	if (last_error)
+		return last_error;
+
+	range.len = XFS_FSB_TO_B(mp, blocks_trimmed);
+	if (copy_to_user(urange, &range, sizeof(range)))
+		return -XFS_ERROR(EFAULT);
+	return 0;
+}
diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/linux-2.6/xfs_discard.h
new file mode 100644
index 0000000..e82b6dd
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_discard.h
@@ -0,0 +1,8 @@
+#ifndef XFS_DISCARD_H
+#define XFS_DISCARD_H 1
+
+struct fstrim_range;
+
+extern int	xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *);
+
+#endif /* XFS_DISCARD_H */
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 3764d74..fc0114d 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -70,8 +70,16 @@
 	else
 		fileid_type = FILEID_INO32_GEN_PARENT;
 
-	/* filesystem may contain 64bit inode numbers */
-	if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS))
+	/*
+	 * If the the filesystem may contain 64bit inode numbers, we need
+	 * to use larger file handles that can represent them.
+	 *
+	 * While we only allocate inodes that do not fit into 32 bits any
+	 * large enough filesystem may contain them, thus the slightly
+	 * confusing looking conditional below.
+	 */
+	if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) ||
+	    (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES))
 		fileid_type |= XFS_FILEID_TYPE_64FLAG;
 
 	/*
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index ba8ad42..a55c1b4 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -37,10 +37,45 @@
 #include "xfs_trace.h"
 
 #include <linux/dcache.h>
+#include <linux/falloc.h>
 
 static const struct vm_operations_struct xfs_file_vm_ops;
 
 /*
+ * Locking primitives for read and write IO paths to ensure we consistently use
+ * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
+ */
+static inline void
+xfs_rw_ilock(
+	struct xfs_inode	*ip,
+	int			type)
+{
+	if (type & XFS_IOLOCK_EXCL)
+		mutex_lock(&VFS_I(ip)->i_mutex);
+	xfs_ilock(ip, type);
+}
+
+static inline void
+xfs_rw_iunlock(
+	struct xfs_inode	*ip,
+	int			type)
+{
+	xfs_iunlock(ip, type);
+	if (type & XFS_IOLOCK_EXCL)
+		mutex_unlock(&VFS_I(ip)->i_mutex);
+}
+
+static inline void
+xfs_rw_ilock_demote(
+	struct xfs_inode	*ip,
+	int			type)
+{
+	xfs_ilock_demote(ip, type);
+	if (type & XFS_IOLOCK_EXCL)
+		mutex_unlock(&VFS_I(ip)->i_mutex);
+}
+
+/*
  *	xfs_iozero
  *
  *	xfs_iozero clears the specified range of buffer supplied,
@@ -262,22 +297,21 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
-	if (unlikely(ioflags & IO_ISDIRECT))
-		mutex_lock(&inode->i_mutex);
-	xfs_ilock(ip, XFS_IOLOCK_SHARED);
-
 	if (unlikely(ioflags & IO_ISDIRECT)) {
+		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
+
 		if (inode->i_mapping->nrpages) {
 			ret = -xfs_flushinval_pages(ip,
 					(iocb->ki_pos & PAGE_CACHE_MASK),
 					-1, FI_REMAPF_LOCKED);
+			if (ret) {
+				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
+				return ret;
+			}
 		}
-		mutex_unlock(&inode->i_mutex);
-		if (ret) {
-			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
-			return ret;
-		}
-	}
+		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+	} else
+		xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 
 	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
 
@@ -285,7 +319,7 @@
 	if (ret > 0)
 		XFS_STATS_ADD(xs_read_bytes, ret);
 
-	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 	return ret;
 }
 
@@ -309,7 +343,7 @@
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return -EIO;
 
-	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 
 	trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
 
@@ -317,10 +351,61 @@
 	if (ret > 0)
 		XFS_STATS_ADD(xs_read_bytes, ret);
 
-	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 	return ret;
 }
 
+STATIC void
+xfs_aio_write_isize_update(
+	struct inode	*inode,
+	loff_t		*ppos,
+	ssize_t		bytes_written)
+{
+	struct xfs_inode	*ip = XFS_I(inode);
+	xfs_fsize_t		isize = i_size_read(inode);
+
+	if (bytes_written > 0)
+		XFS_STATS_ADD(xs_write_bytes, bytes_written);
+
+	if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
+					*ppos > isize))
+		*ppos = isize;
+
+	if (*ppos > ip->i_size) {
+		xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
+		if (*ppos > ip->i_size)
+			ip->i_size = *ppos;
+		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+	}
+}
+
+/*
+ * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
+ * part of the I/O may have been written to disk before the error occured.  In
+ * this case the on-disk file size may have been adjusted beyond the in-memory
+ * file size and now needs to be truncated back.
+ */
+STATIC void
+xfs_aio_write_newsize_update(
+	struct xfs_inode	*ip)
+{
+	if (ip->i_new_size) {
+		xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
+		ip->i_new_size = 0;
+		if (ip->i_d.di_size > ip->i_size)
+			ip->i_d.di_size = ip->i_size;
+		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+	}
+}
+
+/*
+ * xfs_file_splice_write() does not use xfs_rw_ilock() because
+ * generic_file_splice_write() takes the i_mutex itself. This, in theory,
+ * couuld cause lock inversions between the aio_write path and the splice path
+ * if someone is doing concurrent splice(2) based writes and write(2) based
+ * writes to the same inode. The only real way to fix this is to re-implement
+ * the generic code here with correct locking orders.
+ */
 STATIC ssize_t
 xfs_file_splice_write(
 	struct pipe_inode_info	*pipe,
@@ -331,7 +416,7 @@
 {
 	struct inode		*inode = outfilp->f_mapping->host;
 	struct xfs_inode	*ip = XFS_I(inode);
-	xfs_fsize_t		isize, new_size;
+	xfs_fsize_t		new_size;
 	int			ioflags = 0;
 	ssize_t			ret;
 
@@ -355,27 +440,9 @@
 	trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
 
 	ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
-	if (ret > 0)
-		XFS_STATS_ADD(xs_write_bytes, ret);
 
-	isize = i_size_read(inode);
-	if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
-		*ppos = isize;
-
-	if (*ppos > ip->i_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		if (*ppos > ip->i_size)
-			ip->i_size = *ppos;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
-
-	if (ip->i_new_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		ip->i_new_size = 0;
-		if (ip->i_d.di_size > ip->i_size)
-			ip->i_d.di_size = ip->i_size;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
+	xfs_aio_write_isize_update(inode, ppos, ret);
+	xfs_aio_write_newsize_update(ip);
 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 	return ret;
 }
@@ -562,6 +629,194 @@
 	return error;
 }
 
+/*
+ * Common pre-write limit and setup checks.
+ *
+ * Returns with iolock held according to @iolock.
+ */
+STATIC ssize_t
+xfs_file_aio_write_checks(
+	struct file		*file,
+	loff_t			*pos,
+	size_t			*count,
+	int			*iolock)
+{
+	struct inode		*inode = file->f_mapping->host;
+	struct xfs_inode	*ip = XFS_I(inode);
+	xfs_fsize_t		new_size;
+	int			error = 0;
+
+	error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
+	if (error) {
+		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
+		*iolock = 0;
+		return error;
+	}
+
+	new_size = *pos + *count;
+	if (new_size > ip->i_size)
+		ip->i_new_size = new_size;
+
+	if (likely(!(file->f_mode & FMODE_NOCMTIME)))
+		file_update_time(file);
+
+	/*
+	 * If the offset is beyond the size of the file, we need to zero any
+	 * blocks that fall between the existing EOF and the start of this
+	 * write.
+	 */
+	if (*pos > ip->i_size)
+		error = -xfs_zero_eof(ip, *pos, ip->i_size);
+
+	xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+	if (error)
+		return error;
+
+	/*
+	 * If we're writing the file then make sure to clear the setuid and
+	 * setgid bits if the process is not being run by root.  This keeps
+	 * people from modifying setuid and setgid binaries.
+	 */
+	return file_remove_suid(file);
+
+}
+
+/*
+ * xfs_file_dio_aio_write - handle direct IO writes
+ *
+ * Lock the inode appropriately to prepare for and issue a direct IO write.
+ * By separating it from the buffered write path we remove all the tricky to
+ * follow locking changes and looping.
+ *
+ * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
+ * until we're sure the bytes at the new EOF have been zeroed and/or the cached
+ * pages are flushed out.
+ *
+ * In most cases the direct IO writes will be done holding IOLOCK_SHARED
+ * allowing them to be done in parallel with reads and other direct IO writes.
+ * However, if the IO is not aligned to filesystem blocks, the direct IO layer
+ * needs to do sub-block zeroing and that requires serialisation against other
+ * direct IOs to the same block. In this case we need to serialise the
+ * submission of the unaligned IOs so that we don't get racing block zeroing in
+ * the dio layer.  To avoid the problem with aio, we also need to wait for
+ * outstanding IOs to complete so that unwritten extent conversion is completed
+ * before we try to map the overlapping block. This is currently implemented by
+ * hitting it with a big hammer (i.e. xfs_ioend_wait()).
+ *
+ * Returns with locks held indicated by @iolock and errors indicated by
+ * negative return values.
+ */
+STATIC ssize_t
+xfs_file_dio_aio_write(
+	struct kiocb		*iocb,
+	const struct iovec	*iovp,
+	unsigned long		nr_segs,
+	loff_t			pos,
+	size_t			ocount,
+	int			*iolock)
+{
+	struct file		*file = iocb->ki_filp;
+	struct address_space	*mapping = file->f_mapping;
+	struct inode		*inode = mapping->host;
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	ssize_t			ret = 0;
+	size_t			count = ocount;
+	int			unaligned_io = 0;
+	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?
+					mp->m_rtdev_targp : mp->m_ddev_targp;
+
+	*iolock = 0;
+	if ((pos & target->bt_smask) || (count & target->bt_smask))
+		return -XFS_ERROR(EINVAL);
+
+	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
+		unaligned_io = 1;
+
+	if (unaligned_io || mapping->nrpages || pos > ip->i_size)
+		*iolock = XFS_IOLOCK_EXCL;
+	else
+		*iolock = XFS_IOLOCK_SHARED;
+	xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
+
+	ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
+	if (ret)
+		return ret;
+
+	if (mapping->nrpages) {
+		WARN_ON(*iolock != XFS_IOLOCK_EXCL);
+		ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
+							FI_REMAPF_LOCKED);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * If we are doing unaligned IO, wait for all other IO to drain,
+	 * otherwise demote the lock if we had to flush cached pages
+	 */
+	if (unaligned_io)
+		xfs_ioend_wait(ip);
+	else if (*iolock == XFS_IOLOCK_EXCL) {
+		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+		*iolock = XFS_IOLOCK_SHARED;
+	}
+
+	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
+	ret = generic_file_direct_write(iocb, iovp,
+			&nr_segs, pos, &iocb->ki_pos, count, ocount);
+
+	/* No fallback to buffered IO on errors for XFS. */
+	ASSERT(ret < 0 || ret == count);
+	return ret;
+}
+
+STATIC ssize_t
+xfs_file_buffered_aio_write(
+	struct kiocb		*iocb,
+	const struct iovec	*iovp,
+	unsigned long		nr_segs,
+	loff_t			pos,
+	size_t			ocount,
+	int			*iolock)
+{
+	struct file		*file = iocb->ki_filp;
+	struct address_space	*mapping = file->f_mapping;
+	struct inode		*inode = mapping->host;
+	struct xfs_inode	*ip = XFS_I(inode);
+	ssize_t			ret;
+	int			enospc = 0;
+	size_t			count = ocount;
+
+	*iolock = XFS_IOLOCK_EXCL;
+	xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
+
+	ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
+	if (ret)
+		return ret;
+
+	/* We can write back this queue in page reclaim */
+	current->backing_dev_info = mapping->backing_dev_info;
+
+write_retry:
+	trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
+	ret = generic_file_buffered_write(iocb, iovp, nr_segs,
+			pos, &iocb->ki_pos, count, ret);
+	/*
+	 * if we just got an ENOSPC, flush the inode now we aren't holding any
+	 * page locks and retry *once*
+	 */
+	if (ret == -ENOSPC && !enospc) {
+		ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
+		if (ret)
+			return ret;
+		enospc = 1;
+		goto write_retry;
+	}
+	current->backing_dev_info = NULL;
+	return ret;
+}
+
 STATIC ssize_t
 xfs_file_aio_write(
 	struct kiocb		*iocb,
@@ -573,236 +828,115 @@
 	struct address_space	*mapping = file->f_mapping;
 	struct inode		*inode = mapping->host;
 	struct xfs_inode	*ip = XFS_I(inode);
-	struct xfs_mount	*mp = ip->i_mount;
-	ssize_t			ret = 0, error = 0;
-	int			ioflags = 0;
-	xfs_fsize_t		isize, new_size;
+	ssize_t			ret;
 	int			iolock;
-	size_t			ocount = 0, count;
-	int			need_i_mutex;
+	size_t			ocount = 0;
 
 	XFS_STATS_INC(xs_write_calls);
 
 	BUG_ON(iocb->ki_pos != pos);
 
-	if (unlikely(file->f_flags & O_DIRECT))
-		ioflags |= IO_ISDIRECT;
-	if (file->f_mode & FMODE_NOCMTIME)
-		ioflags |= IO_INVIS;
+	ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
+	if (ret)
+		return ret;
 
-	error = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
-	if (error)
-		return error;
-
-	count = ocount;
-	if (count == 0)
+	if (ocount == 0)
 		return 0;
 
-	xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
+	xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
 
-	if (XFS_FORCED_SHUTDOWN(mp))
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return -EIO;
 
-relock:
-	if (ioflags & IO_ISDIRECT) {
-		iolock = XFS_IOLOCK_SHARED;
-		need_i_mutex = 0;
-	} else {
-		iolock = XFS_IOLOCK_EXCL;
-		need_i_mutex = 1;
-		mutex_lock(&inode->i_mutex);
-	}
+	if (unlikely(file->f_flags & O_DIRECT))
+		ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
+						ocount, &iolock);
+	else
+		ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
+						ocount, &iolock);
 
-	xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
+	xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
 
-start:
-	error = -generic_write_checks(file, &pos, &count,
-					S_ISBLK(inode->i_mode));
-	if (error) {
-		xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
-		goto out_unlock_mutex;
-	}
-
-	if (ioflags & IO_ISDIRECT) {
-		xfs_buftarg_t	*target =
-			XFS_IS_REALTIME_INODE(ip) ?
-				mp->m_rtdev_targp : mp->m_ddev_targp;
-
-		if ((pos & target->bt_smask) || (count & target->bt_smask)) {
-			xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
-			return XFS_ERROR(-EINVAL);
-		}
-
-		if (!need_i_mutex && (mapping->nrpages || pos > ip->i_size)) {
-			xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
-			iolock = XFS_IOLOCK_EXCL;
-			need_i_mutex = 1;
-			mutex_lock(&inode->i_mutex);
-			xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
-			goto start;
-		}
-	}
-
-	new_size = pos + count;
-	if (new_size > ip->i_size)
-		ip->i_new_size = new_size;
-
-	if (likely(!(ioflags & IO_INVIS)))
-		file_update_time(file);
-
-	/*
-	 * If the offset is beyond the size of the file, we have a couple
-	 * of things to do. First, if there is already space allocated
-	 * we need to either create holes or zero the disk or ...
-	 *
-	 * If there is a page where the previous size lands, we need
-	 * to zero it out up to the new size.
-	 */
-
-	if (pos > ip->i_size) {
-		error = xfs_zero_eof(ip, pos, ip->i_size);
-		if (error) {
-			xfs_iunlock(ip, XFS_ILOCK_EXCL);
-			goto out_unlock_internal;
-		}
-	}
-	xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
-	/*
-	 * If we're writing the file then make sure to clear the
-	 * setuid and setgid bits if the process is not being run
-	 * by root.  This keeps people from modifying setuid and
-	 * setgid binaries.
-	 */
-	error = -file_remove_suid(file);
-	if (unlikely(error))
-		goto out_unlock_internal;
-
-	/* We can write back this queue in page reclaim */
-	current->backing_dev_info = mapping->backing_dev_info;
-
-	if ((ioflags & IO_ISDIRECT)) {
-		if (mapping->nrpages) {
-			WARN_ON(need_i_mutex == 0);
-			error = xfs_flushinval_pages(ip,
-					(pos & PAGE_CACHE_MASK),
-					-1, FI_REMAPF_LOCKED);
-			if (error)
-				goto out_unlock_internal;
-		}
-
-		if (need_i_mutex) {
-			/* demote the lock now the cached pages are gone */
-			xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
-			mutex_unlock(&inode->i_mutex);
-
-			iolock = XFS_IOLOCK_SHARED;
-			need_i_mutex = 0;
-		}
-
-		trace_xfs_file_direct_write(ip, count, iocb->ki_pos, ioflags);
-		ret = generic_file_direct_write(iocb, iovp,
-				&nr_segs, pos, &iocb->ki_pos, count, ocount);
-
-		/*
-		 * direct-io write to a hole: fall through to buffered I/O
-		 * for completing the rest of the request.
-		 */
-		if (ret >= 0 && ret != count) {
-			XFS_STATS_ADD(xs_write_bytes, ret);
-
-			pos += ret;
-			count -= ret;
-
-			ioflags &= ~IO_ISDIRECT;
-			xfs_iunlock(ip, iolock);
-			goto relock;
-		}
-	} else {
-		int enospc = 0;
-		ssize_t ret2 = 0;
-
-write_retry:
-		trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags);
-		ret2 = generic_file_buffered_write(iocb, iovp, nr_segs,
-				pos, &iocb->ki_pos, count, ret);
-		/*
-		 * if we just got an ENOSPC, flush the inode now we
-		 * aren't holding any page locks and retry *once*
-		 */
-		if (ret2 == -ENOSPC && !enospc) {
-			error = xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
-			if (error)
-				goto out_unlock_internal;
-			enospc = 1;
-			goto write_retry;
-		}
-		ret = ret2;
-	}
-
-	current->backing_dev_info = NULL;
-
-	isize = i_size_read(inode);
-	if (unlikely(ret < 0 && ret != -EFAULT && iocb->ki_pos > isize))
-		iocb->ki_pos = isize;
-
-	if (iocb->ki_pos > ip->i_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		if (iocb->ki_pos > ip->i_size)
-			ip->i_size = iocb->ki_pos;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
-
-	error = -ret;
 	if (ret <= 0)
-		goto out_unlock_internal;
-
-	XFS_STATS_ADD(xs_write_bytes, ret);
+		goto out_unlock;
 
 	/* Handle various SYNC-type writes */
 	if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
 		loff_t end = pos + ret - 1;
-		int error2;
+		int error, error2;
 
-		xfs_iunlock(ip, iolock);
-		if (need_i_mutex)
-			mutex_unlock(&inode->i_mutex);
-
-		error2 = filemap_write_and_wait_range(mapping, pos, end);
-		if (!error)
-			error = error2;
-		if (need_i_mutex)
-			mutex_lock(&inode->i_mutex);
-		xfs_ilock(ip, iolock);
+		xfs_rw_iunlock(ip, iolock);
+		error = filemap_write_and_wait_range(mapping, pos, end);
+		xfs_rw_ilock(ip, iolock);
 
 		error2 = -xfs_file_fsync(file,
 					 (file->f_flags & __O_SYNC) ? 0 : 1);
-		if (!error)
-			error = error2;
+		if (error)
+			ret = error;
+		else if (error2)
+			ret = error2;
 	}
 
- out_unlock_internal:
-	if (ip->i_new_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		ip->i_new_size = 0;
-		/*
-		 * If this was a direct or synchronous I/O that failed (such
-		 * as ENOSPC) then part of the I/O may have been written to
-		 * disk before the error occured.  In this case the on-disk
-		 * file size may have been adjusted beyond the in-memory file
-		 * size and now needs to be truncated back.
-		 */
-		if (ip->i_d.di_size > ip->i_size)
-			ip->i_d.di_size = ip->i_size;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
-	xfs_iunlock(ip, iolock);
- out_unlock_mutex:
-	if (need_i_mutex)
-		mutex_unlock(&inode->i_mutex);
-	return -error;
+out_unlock:
+	xfs_aio_write_newsize_update(ip);
+	xfs_rw_iunlock(ip, iolock);
+	return ret;
 }
 
+STATIC long
+xfs_file_fallocate(
+	struct file	*file,
+	int		mode,
+	loff_t		offset,
+	loff_t		len)
+{
+	struct inode	*inode = file->f_path.dentry->d_inode;
+	long		error;
+	loff_t		new_size = 0;
+	xfs_flock64_t	bf;
+	xfs_inode_t	*ip = XFS_I(inode);
+	int		cmd = XFS_IOC_RESVSP;
+
+	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+		return -EOPNOTSUPP;
+
+	bf.l_whence = 0;
+	bf.l_start = offset;
+	bf.l_len = len;
+
+	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+	if (mode & FALLOC_FL_PUNCH_HOLE)
+		cmd = XFS_IOC_UNRESVSP;
+
+	/* check the new inode size is valid before allocating */
+	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+	    offset + len > i_size_read(inode)) {
+		new_size = offset + len;
+		error = inode_newsize_ok(inode, new_size);
+		if (error)
+			goto out_unlock;
+	}
+
+	error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
+	if (error)
+		goto out_unlock;
+
+	/* Change file size if needed */
+	if (new_size) {
+		struct iattr iattr;
+
+		iattr.ia_valid = ATTR_SIZE;
+		iattr.ia_size = new_size;
+		error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
+	}
+
+out_unlock:
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	return error;
+}
+
+
 STATIC int
 xfs_file_open(
 	struct inode	*inode,
@@ -921,6 +1055,7 @@
 	.open		= xfs_file_open,
 	.release	= xfs_file_release,
 	.fsync		= xfs_file_fsync,
+	.fallocate	= xfs_file_fallocate,
 };
 
 const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index ad442d9..0ca0e3c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -39,6 +39,7 @@
 #include "xfs_dfrag.h"
 #include "xfs_fsops.h"
 #include "xfs_vnodeops.h"
+#include "xfs_discard.h"
 #include "xfs_quota.h"
 #include "xfs_inode_item.h"
 #include "xfs_export.h"
@@ -694,14 +695,19 @@
 	xfs_mount_t		*mp,
 	void			__user *arg)
 {
-	xfs_fsop_geom_v1_t	fsgeo;
+	xfs_fsop_geom_t         fsgeo;
 	int			error;
 
-	error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
+	error = xfs_fs_geometry(mp, &fsgeo, 3);
 	if (error)
 		return -error;
 
-	if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
+	/*
+	 * Caller should have passed an argument of type
+	 * xfs_fsop_geom_v1_t.  This is a proper subset of the
+	 * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
+	 */
+	if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
 		return -XFS_ERROR(EFAULT);
 	return 0;
 }
@@ -984,10 +990,22 @@
 
 		/*
 		 * Extent size must be a multiple of the appropriate block
-		 * size, if set at all.
+		 * size, if set at all. It must also be smaller than the
+		 * maximum extent size supported by the filesystem.
+		 *
+		 * Also, for non-realtime files, limit the extent size hint to
+		 * half the size of the AGs in the filesystem so alignment
+		 * doesn't result in extents larger than an AG.
 		 */
 		if (fa->fsx_extsize != 0) {
-			xfs_extlen_t	size;
+			xfs_extlen_t    size;
+			xfs_fsblock_t   extsize_fsb;
+
+			extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
+			if (extsize_fsb > MAXEXTLEN) {
+				code = XFS_ERROR(EINVAL);
+				goto error_return;
+			}
 
 			if (XFS_IS_REALTIME_INODE(ip) ||
 			    ((mask & FSX_XFLAGS) &&
@@ -996,6 +1014,10 @@
 				       mp->m_sb.sb_blocklog;
 			} else {
 				size = mp->m_sb.sb_blocksize;
+				if (extsize_fsb > mp->m_sb.sb_agblocks / 2) {
+					code = XFS_ERROR(EINVAL);
+					goto error_return;
+				}
 			}
 
 			if (fa->fsx_extsize % size) {
@@ -1294,6 +1316,8 @@
 	trace_xfs_file_ioctl(ip);
 
 	switch (cmd) {
+	case FITRIM:
+		return xfs_ioc_trim(mp, arg);
 	case XFS_IOC_ALLOCSP:
 	case XFS_IOC_FREESP:
 	case XFS_IOC_RESVSP:
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 94d5fd6..bd57278 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -46,7 +46,6 @@
 #include <linux/namei.h>
 #include <linux/posix_acl.h>
 #include <linux/security.h>
-#include <linux/falloc.h>
 #include <linux/fiemap.h>
 #include <linux/slab.h>
 
@@ -505,58 +504,6 @@
 	return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0);
 }
 
-STATIC long
-xfs_vn_fallocate(
-	struct inode	*inode,
-	int		mode,
-	loff_t		offset,
-	loff_t		len)
-{
-	long		error;
-	loff_t		new_size = 0;
-	xfs_flock64_t	bf;
-	xfs_inode_t	*ip = XFS_I(inode);
-
-	/* preallocation on directories not yet supported */
-	error = -ENODEV;
-	if (S_ISDIR(inode->i_mode))
-		goto out_error;
-
-	bf.l_whence = 0;
-	bf.l_start = offset;
-	bf.l_len = len;
-
-	xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
-	/* check the new inode size is valid before allocating */
-	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-	    offset + len > i_size_read(inode)) {
-		new_size = offset + len;
-		error = inode_newsize_ok(inode, new_size);
-		if (error)
-			goto out_unlock;
-	}
-
-	error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
-				       0, XFS_ATTR_NOLOCK);
-	if (error)
-		goto out_unlock;
-
-	/* Change file size if needed */
-	if (new_size) {
-		struct iattr iattr;
-
-		iattr.ia_valid = ATTR_SIZE;
-		iattr.ia_size = new_size;
-		error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
-	}
-
-out_unlock:
-	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-out_error:
-	return error;
-}
-
 #define XFS_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
 
 /*
@@ -650,7 +597,6 @@
 	.getxattr		= generic_getxattr,
 	.removexattr		= generic_removexattr,
 	.listxattr		= xfs_vn_listxattr,
-	.fallocate		= xfs_vn_fallocate,
 	.fiemap			= xfs_vn_fiemap,
 };
 
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 214ddd7..0964949 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -37,7 +37,6 @@
 
 #include <kmem.h>
 #include <mrlock.h>
-#include <sv.h>
 #include <time.h>
 
 #include <support/debug.h>
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 064f964..9731898 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -606,7 +606,8 @@
 {
 	int			error = 0;
 
-	*bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp);
+	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+				    mp);
 	if (IS_ERR(*bdevp)) {
 		error = PTR_ERR(*bdevp);
 		printk("XFS: Invalid device [%s], error=%d\n", name, error);
@@ -620,7 +621,7 @@
 	struct block_device	*bdev)
 {
 	if (bdev)
-		close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
+		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 /*
@@ -834,8 +835,11 @@
 	struct xfs_ail		*ailp,
 	xfs_lsn_t		threshold_lsn)
 {
-	ailp->xa_target = threshold_lsn;
-	wake_up_process(ailp->xa_task);
+	/* only ever move the target forwards */
+	if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
+		ailp->xa_target = threshold_lsn;
+		wake_up_process(ailp->xa_task);
+	}
 }
 
 STATIC int
@@ -847,8 +851,17 @@
 	long		tout = 0; /* milliseconds */
 
 	while (!kthread_should_stop()) {
-		schedule_timeout_interruptible(tout ?
-				msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
+		/*
+		 * for short sleeps indicating congestion, don't allow us to
+		 * get woken early. Otherwise all we do is bang on the AIL lock
+		 * without making progress.
+		 */
+		if (tout && tout <= 20)
+			__set_current_state(TASK_KILLABLE);
+		else
+			__set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(tout ?
+				 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
 
 		/* swsusp */
 		try_to_freeze();
@@ -935,7 +948,7 @@
  * Slab object creation initialisation for the XFS inode.
  * This covers only the idempotent fields in the XFS inode;
  * all other fields need to be initialised on allocation
- * from the slab. This avoids the need to repeatedly intialise
+ * from the slab. This avoids the need to repeatedly initialise
  * fields in the xfs inode that left in the initialise state
  * when freeing the inode.
  */
@@ -1118,6 +1131,8 @@
 	 */
 	ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+			&xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
 
 	xfs_inactive(ip);
 }
@@ -1399,7 +1414,7 @@
 
 	xfs_save_resvblks(mp);
 	xfs_quiesce_attr(mp);
-	return -xfs_fs_log_dummy(mp, SYNC_WAIT);
+	return -xfs_fs_log_dummy(mp);
 }
 
 STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index afb0d7c..e22f005 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -53,14 +53,30 @@
 {
 	struct inode		*inode = VFS_I(ip);
 
+	ASSERT(rcu_read_lock_held());
+
+	/*
+	 * check for stale RCU freed inode
+	 *
+	 * If the inode has been reallocated, it doesn't matter if it's not in
+	 * the AG we are walking - we are walking for writeback, so if it
+	 * passes all the "valid inode" checks and is dirty, then we'll write
+	 * it back anyway.  If it has been reallocated and still being
+	 * initialised, the XFS_INEW check below will catch it.
+	 */
+	spin_lock(&ip->i_flags_lock);
+	if (!ip->i_ino)
+		goto out_unlock_noent;
+
+	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
+	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+		goto out_unlock_noent;
+	spin_unlock(&ip->i_flags_lock);
+
 	/* nothing to sync during shutdown */
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return EFSCORRUPTED;
 
-	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
-	if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
-		return ENOENT;
-
 	/* If we can't grab the inode, it must on it's way to reclaim. */
 	if (!igrab(inode))
 		return ENOENT;
@@ -72,6 +88,10 @@
 
 	/* inode is valid */
 	return 0;
+
+out_unlock_noent:
+	spin_unlock(&ip->i_flags_lock);
+	return ENOENT;
 }
 
 STATIC int
@@ -98,12 +118,12 @@
 		int		error = 0;
 		int		i;
 
-		read_lock(&pag->pag_ici_lock);
+		rcu_read_lock();
 		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
 					(void **)batch, first_index,
 					XFS_LOOKUP_BATCH);
 		if (!nr_found) {
-			read_unlock(&pag->pag_ici_lock);
+			rcu_read_unlock();
 			break;
 		}
 
@@ -118,18 +138,26 @@
 				batch[i] = NULL;
 
 			/*
-			 * Update the index for the next lookup. Catch overflows
-			 * into the next AG range which can occur if we have inodes
-			 * in the last block of the AG and we are currently
-			 * pointing to the last inode.
+			 * Update the index for the next lookup. Catch
+			 * overflows into the next AG range which can occur if
+			 * we have inodes in the last block of the AG and we
+			 * are currently pointing to the last inode.
+			 *
+			 * Because we may see inodes that are from the wrong AG
+			 * due to RCU freeing and reallocation, only update the
+			 * index if it lies in this AG. It was a race that lead
+			 * us to see this inode, so another lookup from the
+			 * same index will not find it again.
 			 */
+			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
+				continue;
 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 				done = 1;
 		}
 
 		/* unlock now we've grabbed the inodes. */
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 
 		for (i = 0; i < nr_found; i++) {
 			if (!batch[i])
@@ -334,7 +362,7 @@
 
 	/* mark the log as covered if needed */
 	if (xfs_log_need_covered(mp))
-		error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
+		error2 = xfs_fs_log_dummy(mp);
 
 	/* flush data-only devices */
 	if (mp->m_rtdev_targp)
@@ -475,13 +503,14 @@
 	int		error;
 
 	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
-		xfs_log_force(mp, 0);
-		xfs_reclaim_inodes(mp, 0);
 		/* dgc: errors ignored here */
-		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
 		if (mp->m_super->s_frozen == SB_UNFROZEN &&
 		    xfs_log_need_covered(mp))
-			error = xfs_fs_log_dummy(mp, 0);
+			error = xfs_fs_log_dummy(mp);
+		else
+			xfs_log_force(mp, 0);
+		xfs_reclaim_inodes(mp, 0);
+		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
 	}
 	mp->m_sync_seq++;
 	wake_up(&mp->m_wait_single_sync_task);
@@ -592,12 +621,12 @@
 	struct xfs_perag *pag;
 
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
-	write_lock(&pag->pag_ici_lock);
+	spin_lock(&pag->pag_ici_lock);
 	spin_lock(&ip->i_flags_lock);
 	__xfs_inode_set_reclaim_tag(pag, ip);
 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
 	spin_unlock(&ip->i_flags_lock);
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 	xfs_perag_put(pag);
 }
 
@@ -639,9 +668,14 @@
 	struct xfs_inode	*ip,
 	int			flags)
 {
+	ASSERT(rcu_read_lock_held());
+
+	/* quick check for stale RCU freed inode */
+	if (!ip->i_ino)
+		return 1;
 
 	/*
-	 * do some unlocked checks first to avoid unnecceary lock traffic.
+	 * do some unlocked checks first to avoid unnecessary lock traffic.
 	 * The first is a flush lock check, the second is a already in reclaim
 	 * check. Only do these checks if we are not going to block on locks.
 	 */
@@ -654,11 +688,16 @@
 	 * The radix tree lock here protects a thread in xfs_iget from racing
 	 * with us starting reclaim on the inode.  Once we have the
 	 * XFS_IRECLAIM flag set it will not touch us.
+	 *
+	 * Due to RCU lookup, we may find inodes that have been freed and only
+	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
+	 * aren't candidates for reclaim at all, so we must check the
+	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
 	 */
 	spin_lock(&ip->i_flags_lock);
-	ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
-	if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
-		/* ignore as it is already under reclaim */
+	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
+	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
+		/* not a reclaim candidate. */
 		spin_unlock(&ip->i_flags_lock);
 		return 1;
 	}
@@ -795,12 +834,12 @@
 	 * added to the tree assert that it's been there before to catch
 	 * problems with the inode life time early on.
 	 */
-	write_lock(&pag->pag_ici_lock);
+	spin_lock(&pag->pag_ici_lock);
 	if (!radix_tree_delete(&pag->pag_ici_root,
 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
 		ASSERT(0);
 	__xfs_inode_clear_reclaim(pag, ip);
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 
 	/*
 	 * Here we do an (almost) spurious inode lock in order to coordinate
@@ -864,14 +903,14 @@
 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 			int	i;
 
-			write_lock(&pag->pag_ici_lock);
+			rcu_read_lock();
 			nr_found = radix_tree_gang_lookup_tag(
 					&pag->pag_ici_root,
 					(void **)batch, first_index,
 					XFS_LOOKUP_BATCH,
 					XFS_ICI_RECLAIM_TAG);
 			if (!nr_found) {
-				write_unlock(&pag->pag_ici_lock);
+				rcu_read_unlock();
 				break;
 			}
 
@@ -891,14 +930,24 @@
 				 * occur if we have inodes in the last block of
 				 * the AG and we are currently pointing to the
 				 * last inode.
+				 *
+				 * Because we may see inodes that are from the
+				 * wrong AG due to RCU freeing and
+				 * reallocation, only update the index if it
+				 * lies in this AG. It was a race that lead us
+				 * to see this inode, so another lookup from
+				 * the same index will not find it again.
 				 */
+				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
+								pag->pag_agno)
+					continue;
 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 					done = 1;
 			}
 
 			/* unlock now we've grabbed the inodes. */
-			write_unlock(&pag->pag_ici_lock);
+			rcu_read_unlock();
 
 			for (i = 0; i < nr_found; i++) {
 				if (!batch[i])
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index 7bb5092..ee3cee0 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -18,6 +18,7 @@
 #include "xfs.h"
 #include <linux/sysctl.h>
 #include <linux/proc_fs.h>
+#include "xfs_error.h"
 
 static struct ctl_table_header *xfs_table_header;
 
@@ -51,6 +52,26 @@
 
 	return ret;
 }
+
+STATIC int
+xfs_panic_mask_proc_handler(
+	ctl_table	*ctl,
+	int		write,
+	void		__user *buffer,
+	size_t		*lenp,
+	loff_t		*ppos)
+{
+	int		ret, *valp = ctl->data;
+
+	ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
+	if (!ret && write) {
+		xfs_panic_mask = *valp;
+#ifdef DEBUG
+		xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES);
+#endif
+	}
+	return ret;
+}
 #endif /* CONFIG_PROC_FS */
 
 static ctl_table xfs_table[] = {
@@ -77,7 +98,7 @@
 		.data		= &xfs_params.panic_mask.val,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= xfs_panic_mask_proc_handler,
 		.extra1		= &xfs_params.panic_mask.min,
 		.extra2		= &xfs_params.panic_mask.max
 	},
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index acef2e9..2d0bcb4 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -766,8 +766,8 @@
 		__field(int, curr_res)
 		__field(int, unit_res)
 		__field(unsigned int, flags)
-		__field(void *, reserve_headq)
-		__field(void *, write_headq)
+		__field(int, reserveq)
+		__field(int, writeq)
 		__field(int, grant_reserve_cycle)
 		__field(int, grant_reserve_bytes)
 		__field(int, grant_write_cycle)
@@ -784,19 +784,21 @@
 		__entry->curr_res = tic->t_curr_res;
 		__entry->unit_res = tic->t_unit_res;
 		__entry->flags = tic->t_flags;
-		__entry->reserve_headq = log->l_reserve_headq;
-		__entry->write_headq = log->l_write_headq;
-		__entry->grant_reserve_cycle = log->l_grant_reserve_cycle;
-		__entry->grant_reserve_bytes = log->l_grant_reserve_bytes;
-		__entry->grant_write_cycle = log->l_grant_write_cycle;
-		__entry->grant_write_bytes = log->l_grant_write_bytes;
+		__entry->reserveq = list_empty(&log->l_reserveq);
+		__entry->writeq = list_empty(&log->l_writeq);
+		xlog_crack_grant_head(&log->l_grant_reserve_head,
+				&__entry->grant_reserve_cycle,
+				&__entry->grant_reserve_bytes);
+		xlog_crack_grant_head(&log->l_grant_write_head,
+				&__entry->grant_write_cycle,
+				&__entry->grant_write_bytes);
 		__entry->curr_cycle = log->l_curr_cycle;
 		__entry->curr_block = log->l_curr_block;
-		__entry->tail_lsn = log->l_tail_lsn;
+		__entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
 	),
 	TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
-		  "t_unit_res %u t_flags %s reserve_headq 0x%p "
-		  "write_headq 0x%p grant_reserve_cycle %d "
+		  "t_unit_res %u t_flags %s reserveq %s "
+		  "writeq %s grant_reserve_cycle %d "
 		  "grant_reserve_bytes %d grant_write_cycle %d "
 		  "grant_write_bytes %d curr_cycle %d curr_block %d "
 		  "tail_cycle %d tail_block %d",
@@ -807,8 +809,8 @@
 		  __entry->curr_res,
 		  __entry->unit_res,
 		  __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
-		  __entry->reserve_headq,
-		  __entry->write_headq,
+		  __entry->reserveq ? "empty" : "active",
+		  __entry->writeq ? "empty" : "active",
 		  __entry->grant_reserve_cycle,
 		  __entry->grant_reserve_bytes,
 		  __entry->grant_write_cycle,
@@ -835,6 +837,7 @@
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
@@ -842,6 +845,7 @@
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
@@ -935,10 +939,10 @@
 DEFINE_PAGE_EVENT(xfs_releasepage);
 DEFINE_PAGE_EVENT(xfs_invalidatepage);
 
-DECLARE_EVENT_CLASS(xfs_iomap_class,
+DECLARE_EVENT_CLASS(xfs_imap_class,
 	TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
-		 int flags, struct xfs_bmbt_irec *irec),
-	TP_ARGS(ip, offset, count, flags, irec),
+		 int type, struct xfs_bmbt_irec *irec),
+	TP_ARGS(ip, offset, count, type, irec),
 	TP_STRUCT__entry(
 		__field(dev_t, dev)
 		__field(xfs_ino_t, ino)
@@ -946,7 +950,7 @@
 		__field(loff_t, new_size)
 		__field(loff_t, offset)
 		__field(size_t, count)
-		__field(int, flags)
+		__field(int, type)
 		__field(xfs_fileoff_t, startoff)
 		__field(xfs_fsblock_t, startblock)
 		__field(xfs_filblks_t, blockcount)
@@ -958,13 +962,13 @@
 		__entry->new_size = ip->i_new_size;
 		__entry->offset = offset;
 		__entry->count = count;
-		__entry->flags = flags;
+		__entry->type = type;
 		__entry->startoff = irec ? irec->br_startoff : 0;
 		__entry->startblock = irec ? irec->br_startblock : 0;
 		__entry->blockcount = irec ? irec->br_blockcount : 0;
 	),
 	TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
-		  "offset 0x%llx count %zd flags %s "
+		  "offset 0x%llx count %zd type %s "
 		  "startoff 0x%llx startblock %lld blockcount 0x%llx",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  __entry->ino,
@@ -972,20 +976,21 @@
 		  __entry->new_size,
 		  __entry->offset,
 		  __entry->count,
-		  __print_flags(__entry->flags, "|", BMAPI_FLAGS),
+		  __print_symbolic(__entry->type, XFS_IO_TYPES),
 		  __entry->startoff,
 		  (__int64_t)__entry->startblock,
 		  __entry->blockcount)
 )
 
 #define DEFINE_IOMAP_EVENT(name)	\
-DEFINE_EVENT(xfs_iomap_class, name,	\
+DEFINE_EVENT(xfs_imap_class, name,	\
 	TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,	\
-		 int flags, struct xfs_bmbt_irec *irec),		\
-	TP_ARGS(ip, offset, count, flags, irec))
-DEFINE_IOMAP_EVENT(xfs_iomap_enter);
-DEFINE_IOMAP_EVENT(xfs_iomap_found);
-DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
+		 int type, struct xfs_bmbt_irec *irec),		\
+	TP_ARGS(ip, offset, count, type, irec))
+DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
+DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
 
 DECLARE_EVENT_CLASS(xfs_simple_io_class,
 	TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -1022,6 +1027,7 @@
 	TP_ARGS(ip, offset, count))
 DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
 DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
+DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
 
 
 TRACE_EVENT(xfs_itruncate_start,
@@ -1420,6 +1426,7 @@
 	TP_PROTO(struct xfs_alloc_arg *args), \
 	TP_ARGS(args))
 DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
 DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
 DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
 DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
@@ -1752,6 +1759,39 @@
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
 
+DECLARE_EVENT_CLASS(xfs_discard_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 xfs_agblock_t agbno, xfs_extlen_t len),
+	TP_ARGS(mp, agno, agbno, len),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, agbno)
+		__field(xfs_extlen_t, len)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->agbno = agbno;
+		__entry->len = len;
+	),
+	TP_printk("dev %d:%d agno %u agbno %u len %u\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno,
+		  __entry->agbno,
+		  __entry->len)
+)
+
+#define DEFINE_DISCARD_EVENT(name) \
+DEFINE_EVENT(xfs_discard_class, name, \
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+		 xfs_agblock_t agbno, xfs_extlen_t len), \
+	TP_ARGS(mp, agno, agbno, len))
+DEFINE_DISCARD_EVENT(xfs_discard_extent);
+DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
+DEFINE_DISCARD_EVENT(xfs_discard_exclude);
+DEFINE_DISCARD_EVENT(xfs_discard_busy);
+
 #endif /* _TRACE_XFS_H */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index faf8e1a..d22aa31 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -149,7 +149,6 @@
 	ASSERT(list_empty(&dqp->q_freelist));
 
 	mutex_destroy(&dqp->q_qlock);
-	sv_destroy(&dqp->q_pinwait);
 	kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
 
 	atomic_dec(&xfs_Gqm->qm_totaldquots);
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index f8e854b..206a281 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1863,12 +1863,14 @@
 	xfs_dquot_t	*dqpout;
 	xfs_dquot_t	*dqp;
 	int		restarts;
+	int		startagain;
 
 	restarts = 0;
 	dqpout = NULL;
 
 	/* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
-startagain:
+again:
+	startagain = 0;
 	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
 
 	list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
@@ -1885,13 +1887,10 @@
 			ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
 
 			trace_xfs_dqreclaim_want(dqp);
-
-			xfs_dqunlock(dqp);
-			mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-			if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-				return NULL;
 			XQM_STATS_INC(xqmstats.xs_qm_dqwants);
-			goto startagain;
+			restarts++;
+			startagain = 1;
+			goto dqunlock;
 		}
 
 		/*
@@ -1906,23 +1905,20 @@
 			ASSERT(list_empty(&dqp->q_mplist));
 			list_del_init(&dqp->q_freelist);
 			xfs_Gqm->qm_dqfrlist_cnt--;
-			xfs_dqunlock(dqp);
 			dqpout = dqp;
 			XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
-			break;
+			goto dqunlock;
 		}
 
 		ASSERT(dqp->q_hash);
 		ASSERT(!list_empty(&dqp->q_mplist));
 
 		/*
-		 * Try to grab the flush lock. If this dquot is in the process of
-		 * getting flushed to disk, we don't want to reclaim it.
+		 * Try to grab the flush lock. If this dquot is in the process
+		 * of getting flushed to disk, we don't want to reclaim it.
 		 */
-		if (!xfs_dqflock_nowait(dqp)) {
-			xfs_dqunlock(dqp);
-			continue;
-		}
+		if (!xfs_dqflock_nowait(dqp))
+			goto dqunlock;
 
 		/*
 		 * We have the flush lock so we know that this is not in the
@@ -1944,8 +1940,7 @@
 				xfs_fs_cmn_err(CE_WARN, mp,
 			"xfs_qm_dqreclaim: dquot %p flush failed", dqp);
 			}
-			xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
-			continue;
+			goto dqunlock;
 		}
 
 		/*
@@ -1967,13 +1962,8 @@
 		 */
 		if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
 			restarts++;
-			mutex_unlock(&dqp->q_hash->qh_lock);
-			xfs_dqfunlock(dqp);
-			xfs_dqunlock(dqp);
-			mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-			if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
-				return NULL;
-			goto startagain;
+			startagain = 1;
+			goto qhunlock;
 		}
 
 		ASSERT(dqp->q_nrefs == 0);
@@ -1986,14 +1976,20 @@
 		xfs_Gqm->qm_dqfrlist_cnt--;
 		dqpout = dqp;
 		mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+qhunlock:
 		mutex_unlock(&dqp->q_hash->qh_lock);
 dqfunlock:
 		xfs_dqfunlock(dqp);
+dqunlock:
 		xfs_dqunlock(dqp);
 		if (dqpout)
 			break;
 		if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-			return NULL;
+			break;
+		if (startagain) {
+			mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+			goto again;
+		}
 	}
 	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
 	return dqpout;
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index 975aa10..0df8889 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -25,86 +25,78 @@
 #include "xfs_mount.h"
 #include "xfs_error.h"
 
-static char		message[1024];	/* keep it off the stack */
-static DEFINE_SPINLOCK(xfs_err_lock);
-
-/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
-#define XFS_MAX_ERR_LEVEL	7
-#define XFS_ERR_MASK		((1 << 3) - 1)
-static const char * const	err_level[XFS_MAX_ERR_LEVEL+1] =
-					{KERN_EMERG, KERN_ALERT, KERN_CRIT,
-					 KERN_ERR, KERN_WARNING, KERN_NOTICE,
-					 KERN_INFO, KERN_DEBUG};
-
 void
-cmn_err(register int level, char *fmt, ...)
+cmn_err(
+	const char	*lvl,
+	const char	*fmt,
+	...)
 {
-	char	*fp = fmt;
-	int	len;
-	ulong	flags;
-	va_list	ap;
+	struct va_format vaf;
+	va_list		args;
 
-	level &= XFS_ERR_MASK;
-	if (level > XFS_MAX_ERR_LEVEL)
-		level = XFS_MAX_ERR_LEVEL;
-	spin_lock_irqsave(&xfs_err_lock,flags);
-	va_start(ap, fmt);
-	if (*fmt == '!') fp++;
-	len = vsnprintf(message, sizeof(message), fp, ap);
-	if (len >= sizeof(message))
-		len = sizeof(message) - 1;
-	if (message[len-1] == '\n')
-		message[len-1] = 0;
-	printk("%s%s\n", err_level[level], message);
-	va_end(ap);
-	spin_unlock_irqrestore(&xfs_err_lock,flags);
-	BUG_ON(level == CE_PANIC);
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%s%pV", lvl, &vaf);
+	va_end(args);
+
+	BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0);
 }
 
 void
-xfs_fs_vcmn_err(
-	int			level,
+xfs_fs_cmn_err(
+	const char		*lvl,
 	struct xfs_mount	*mp,
-	char			*fmt,
-	va_list			ap)
+	const char		*fmt,
+	...)
 {
-	unsigned long		flags;
-	int			len = 0;
+	struct va_format	vaf;
+	va_list			args;
 
-	level &= XFS_ERR_MASK;
-	if (level > XFS_MAX_ERR_LEVEL)
-		level = XFS_MAX_ERR_LEVEL;
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
 
-	spin_lock_irqsave(&xfs_err_lock,flags);
+	printk("%sFilesystem %s: %pV", lvl, mp->m_fsname, &vaf);
+	va_end(args);
 
-	if (mp) {
-		len = sprintf(message, "Filesystem \"%s\": ", mp->m_fsname);
+	BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0);
+}
 
-		/*
-		 * Skip the printk if we can't print anything useful
-		 * due to an over-long device name.
-		 */
-		if (len >= sizeof(message))
-			goto out;
+/* All callers to xfs_cmn_err use CE_ALERT, so don't bother testing lvl */
+void
+xfs_cmn_err(
+	int			panic_tag,
+	const char		*lvl,
+	struct xfs_mount	*mp,
+	const char		*fmt,
+	...)
+{
+	struct va_format	vaf;
+	va_list			args;
+	int			do_panic = 0;
+
+	if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
+		printk(KERN_ALERT "XFS: Transforming an alert into a BUG.");
+		do_panic = 1;
 	}
 
-	len = vsnprintf(message + len, sizeof(message) - len, fmt, ap);
-	if (len >= sizeof(message))
-		len = sizeof(message) - 1;
-	if (message[len-1] == '\n')
-		message[len-1] = 0;
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
 
-	printk("%s%s\n", err_level[level], message);
- out:
-	spin_unlock_irqrestore(&xfs_err_lock,flags);
+	printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf);
+	va_end(args);
 
-	BUG_ON(level == CE_PANIC);
+	BUG_ON(do_panic);
 }
 
 void
 assfail(char *expr, char *file, int line)
 {
-	printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line);
+	printk(KERN_CRIT "Assertion failed: %s, file: %s, line: %d\n", expr,
+	       file, line);
 	BUG();
 }
 
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index d2d2046..05699f6 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -20,15 +20,22 @@
 
 #include <stdarg.h>
 
-#define CE_DEBUG        7               /* debug        */
-#define CE_CONT         6               /* continuation */
-#define CE_NOTE         5               /* notice       */
-#define CE_WARN         4               /* warning      */
-#define CE_ALERT        1               /* alert        */
-#define CE_PANIC        0               /* panic        */
+struct xfs_mount;
 
-extern void cmn_err(int, char *, ...)
-	__attribute__ ((format (printf, 2, 3)));
+#define CE_DEBUG        KERN_DEBUG
+#define CE_CONT         KERN_INFO
+#define CE_NOTE         KERN_NOTICE
+#define CE_WARN         KERN_WARNING
+#define CE_ALERT        KERN_ALERT
+#define CE_PANIC        KERN_EMERG
+
+void cmn_err(const char *lvl, const char *fmt, ...)
+		__attribute__ ((format (printf, 2, 3)));
+void xfs_fs_cmn_err( const char *lvl, struct xfs_mount *mp,
+		const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
+void xfs_cmn_err( int panic_tag, const char *lvl, struct xfs_mount *mp,
+		const char *fmt, ...) __attribute__ ((format (printf, 4, 5)));
+
 extern void assfail(char *expr, char *f, int l);
 
 #define ASSERT_ALWAYS(expr)	\
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 63c7a1a..58632cc 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -227,7 +227,7 @@
 
 	atomic_t        pagf_fstrms;    /* # of filestreams active in this AG */
 
-	rwlock_t	pag_ici_lock;	/* incore inode lock */
+	spinlock_t	pag_ici_lock;	/* incore inode cache lock */
 	struct radix_tree_root pag_ici_root;	/* incore inode cache root */
 	int		pag_ici_reclaimable;	/* reclaimable inodes */
 	struct mutex	pag_ici_reclaim_lock;	/* serialisation point */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 112abc4..f322798 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -41,10 +41,6 @@
 #define	XFSA_FIXUP_BNO_OK	1
 #define	XFSA_FIXUP_CNT_OK	2
 
-static int
-xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
-		    xfs_agblock_t bno, xfs_extlen_t len);
-
 /*
  * Prototypes for per-ag allocation routines
  */
@@ -94,7 +90,7 @@
  * Lookup the first record less than or equal to [bno, len]
  * in the btree given by cur.
  */
-STATIC int				/* error */
+int					/* error */
 xfs_alloc_lookup_le(
 	struct xfs_btree_cur	*cur,	/* btree cursor */
 	xfs_agblock_t		bno,	/* starting block of extent */
@@ -127,7 +123,7 @@
 /*
  * Get the data from the pointed-to record.
  */
-STATIC int				/* error */
+int					/* error */
 xfs_alloc_get_rec(
 	struct xfs_btree_cur	*cur,	/* btree cursor */
 	xfs_agblock_t		*bno,	/* output: starting block of extent */
@@ -577,61 +573,58 @@
 	xfs_extlen_t	rlen;	/* length of returned extent */
 
 	ASSERT(args->alignment == 1);
+
 	/*
 	 * Allocate/initialize a cursor for the by-number freespace btree.
 	 */
 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
-		args->agno, XFS_BTNUM_BNO);
+					  args->agno, XFS_BTNUM_BNO);
+
 	/*
 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
 	 * Look for the closest free block <= bno, it must contain bno
 	 * if any free block does.
 	 */
-	if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))
+	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
+	if (error)
 		goto error0;
-	if (!i) {
-		/*
-		 * Didn't find it, return null.
-		 */
-		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
-		args->agbno = NULLAGBLOCK;
-		return 0;
-	}
+	if (!i)
+		goto not_found;
+
 	/*
 	 * Grab the freespace record.
 	 */
-	if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))
+	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
+	if (error)
 		goto error0;
 	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
 	ASSERT(fbno <= args->agbno);
 	minend = args->agbno + args->minlen;
 	maxend = args->agbno + args->maxlen;
 	fend = fbno + flen;
+
 	/*
 	 * Give up if the freespace isn't long enough for the minimum request.
 	 */
-	if (fend < minend) {
-		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
-		args->agbno = NULLAGBLOCK;
-		return 0;
-	}
+	if (fend < minend)
+		goto not_found;
+
 	/*
 	 * End of extent will be smaller of the freespace end and the
 	 * maximal requested end.
-	 */
-	end = XFS_AGBLOCK_MIN(fend, maxend);
-	/*
+	 *
 	 * Fix the length according to mod and prod if given.
 	 */
+	end = XFS_AGBLOCK_MIN(fend, maxend);
 	args->len = end - args->agbno;
 	xfs_alloc_fix_len(args);
-	if (!xfs_alloc_fix_minleft(args)) {
-		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
-		return 0;
-	}
+	if (!xfs_alloc_fix_minleft(args))
+		goto not_found;
+
 	rlen = args->len;
 	ASSERT(args->agbno + rlen <= fend);
 	end = args->agbno + rlen;
+
 	/*
 	 * We are allocating agbno for rlen [agbno .. end]
 	 * Allocate/initialize a cursor for the by-size btree.
@@ -640,16 +633,25 @@
 		args->agno, XFS_BTNUM_CNT);
 	ASSERT(args->agbno + args->len <=
 		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
-	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
-			args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
+	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
+				      args->len, XFSA_FIXUP_BNO_OK);
+	if (error) {
 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
 		goto error0;
 	}
+
 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
 
-	trace_xfs_alloc_exact_done(args);
 	args->wasfromfl = 0;
+	trace_xfs_alloc_exact_done(args);
+	return 0;
+
+not_found:
+	/* Didn't find it, return null. */
+	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+	args->agbno = NULLAGBLOCK;
+	trace_xfs_alloc_exact_notfound(args);
 	return 0;
 
 error0:
@@ -659,6 +661,95 @@
 }
 
 /*
+ * Search the btree in a given direction via the search cursor and compare
+ * the records found against the good extent we've already found.
+ */
+STATIC int
+xfs_alloc_find_best_extent(
+	struct xfs_alloc_arg	*args,	/* allocation argument structure */
+	struct xfs_btree_cur	**gcur,	/* good cursor */
+	struct xfs_btree_cur	**scur,	/* searching cursor */
+	xfs_agblock_t		gdiff,	/* difference for search comparison */
+	xfs_agblock_t		*sbno,	/* extent found by search */
+	xfs_extlen_t		*slen,
+	xfs_extlen_t		*slena,	/* aligned length */
+	int			dir)	/* 0 = search right, 1 = search left */
+{
+	xfs_agblock_t		bno;
+	xfs_agblock_t		new;
+	xfs_agblock_t		sdiff;
+	int			error;
+	int			i;
+
+	/* The good extent is perfect, no need to  search. */
+	if (!gdiff)
+		goto out_use_good;
+
+	/*
+	 * Look until we find a better one, run out of space or run off the end.
+	 */
+	do {
+		error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
+		if (error)
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		xfs_alloc_compute_aligned(*sbno, *slen, args->alignment,
+					  args->minlen, &bno, slena);
+
+		/*
+		 * The good extent is closer than this one.
+		 */
+		if (!dir) {
+			if (bno >= args->agbno + gdiff)
+				goto out_use_good;
+		} else {
+			if (bno <= args->agbno - gdiff)
+				goto out_use_good;
+		}
+
+		/*
+		 * Same distance, compare length and pick the best.
+		 */
+		if (*slena >= args->minlen) {
+			args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
+			xfs_alloc_fix_len(args);
+
+			sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
+						       args->alignment, *sbno,
+						       *slen, &new);
+
+			/*
+			 * Choose closer size and invalidate other cursor.
+			 */
+			if (sdiff < gdiff)
+				goto out_use_search;
+			goto out_use_good;
+		}
+
+		if (!dir)
+			error = xfs_btree_increment(*scur, 0, &i);
+		else
+			error = xfs_btree_decrement(*scur, 0, &i);
+		if (error)
+			goto error0;
+	} while (i);
+
+out_use_good:
+	xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
+	*scur = NULL;
+	return 0;
+
+out_use_search:
+	xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
+	*gcur = NULL;
+	return 0;
+
+error0:
+	/* caller invalidates cursors */
+	return error;
+}
+
+/*
  * Allocate a variable extent near bno in the allocation group agno.
  * Extent's length (returned in len) will be between minlen and maxlen,
  * and of the form k * prod + mod unless there's nothing that large.
@@ -925,203 +1016,45 @@
 			}
 		}
 	} while (bno_cur_lt || bno_cur_gt);
+
 	/*
 	 * Got both cursors still active, need to find better entry.
 	 */
 	if (bno_cur_lt && bno_cur_gt) {
-		/*
-		 * Left side is long enough, look for a right side entry.
-		 */
 		if (ltlena >= args->minlen) {
 			/*
-			 * Fix up the length.
+			 * Left side is good, look for a right side entry.
 			 */
 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
 			xfs_alloc_fix_len(args);
-			rlen = args->len;
-			ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
 				args->alignment, ltbno, ltlen, &ltnew);
+
+			error = xfs_alloc_find_best_extent(args,
+						&bno_cur_lt, &bno_cur_gt,
+						ltdiff, &gtbno, &gtlen, &gtlena,
+						0 /* search right */);
+		} else {
+			ASSERT(gtlena >= args->minlen);
+
 			/*
-			 * Not perfect.
-			 */
-			if (ltdiff) {
-				/*
-				 * Look until we find a better one, run out of
-				 * space, or run off the end.
-				 */
-				while (bno_cur_lt && bno_cur_gt) {
-					if ((error = xfs_alloc_get_rec(
-							bno_cur_gt, &gtbno,
-							&gtlen, &i)))
-						goto error0;
-					XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-					xfs_alloc_compute_aligned(gtbno, gtlen,
-						args->alignment, args->minlen,
-						&gtbnoa, &gtlena);
-					/*
-					 * The left one is clearly better.
-					 */
-					if (gtbnoa >= args->agbno + ltdiff) {
-						xfs_btree_del_cursor(
-							bno_cur_gt,
-							XFS_BTREE_NOERROR);
-						bno_cur_gt = NULL;
-						break;
-					}
-					/*
-					 * If we reach a big enough entry,
-					 * compare the two and pick the best.
-					 */
-					if (gtlena >= args->minlen) {
-						args->len =
-							XFS_EXTLEN_MIN(gtlena,
-								args->maxlen);
-						xfs_alloc_fix_len(args);
-						rlen = args->len;
-						gtdiff = xfs_alloc_compute_diff(
-							args->agbno, rlen,
-							args->alignment,
-							gtbno, gtlen, &gtnew);
-						/*
-						 * Right side is better.
-						 */
-						if (gtdiff < ltdiff) {
-							xfs_btree_del_cursor(
-								bno_cur_lt,
-								XFS_BTREE_NOERROR);
-							bno_cur_lt = NULL;
-						}
-						/*
-						 * Left side is better.
-						 */
-						else {
-							xfs_btree_del_cursor(
-								bno_cur_gt,
-								XFS_BTREE_NOERROR);
-							bno_cur_gt = NULL;
-						}
-						break;
-					}
-					/*
-					 * Fell off the right end.
-					 */
-					if ((error = xfs_btree_increment(
-							bno_cur_gt, 0, &i)))
-						goto error0;
-					if (!i) {
-						xfs_btree_del_cursor(
-							bno_cur_gt,
-							XFS_BTREE_NOERROR);
-						bno_cur_gt = NULL;
-						break;
-					}
-				}
-			}
-			/*
-			 * The left side is perfect, trash the right side.
-			 */
-			else {
-				xfs_btree_del_cursor(bno_cur_gt,
-						     XFS_BTREE_NOERROR);
-				bno_cur_gt = NULL;
-			}
-		}
-		/*
-		 * It's the right side that was found first, look left.
-		 */
-		else {
-			/*
-			 * Fix up the length.
+			 * Right side is good, look for a left side entry.
 			 */
 			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
 			xfs_alloc_fix_len(args);
-			rlen = args->len;
-			gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+			gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
 				args->alignment, gtbno, gtlen, &gtnew);
-			/*
-			 * Right side entry isn't perfect.
-			 */
-			if (gtdiff) {
-				/*
-				 * Look until we find a better one, run out of
-				 * space, or run off the end.
-				 */
-				while (bno_cur_lt && bno_cur_gt) {
-					if ((error = xfs_alloc_get_rec(
-							bno_cur_lt, &ltbno,
-							&ltlen, &i)))
-						goto error0;
-					XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-					xfs_alloc_compute_aligned(ltbno, ltlen,
-						args->alignment, args->minlen,
-						&ltbnoa, &ltlena);
-					/*
-					 * The right one is clearly better.
-					 */
-					if (ltbnoa <= args->agbno - gtdiff) {
-						xfs_btree_del_cursor(
-							bno_cur_lt,
-							XFS_BTREE_NOERROR);
-						bno_cur_lt = NULL;
-						break;
-					}
-					/*
-					 * If we reach a big enough entry,
-					 * compare the two and pick the best.
-					 */
-					if (ltlena >= args->minlen) {
-						args->len = XFS_EXTLEN_MIN(
-							ltlena, args->maxlen);
-						xfs_alloc_fix_len(args);
-						rlen = args->len;
-						ltdiff = xfs_alloc_compute_diff(
-							args->agbno, rlen,
-							args->alignment,
-							ltbno, ltlen, &ltnew);
-						/*
-						 * Left side is better.
-						 */
-						if (ltdiff < gtdiff) {
-							xfs_btree_del_cursor(
-								bno_cur_gt,
-								XFS_BTREE_NOERROR);
-							bno_cur_gt = NULL;
-						}
-						/*
-						 * Right side is better.
-						 */
-						else {
-							xfs_btree_del_cursor(
-								bno_cur_lt,
-								XFS_BTREE_NOERROR);
-							bno_cur_lt = NULL;
-						}
-						break;
-					}
-					/*
-					 * Fell off the left end.
-					 */
-					if ((error = xfs_btree_decrement(
-							bno_cur_lt, 0, &i)))
-						goto error0;
-					if (!i) {
-						xfs_btree_del_cursor(bno_cur_lt,
-							XFS_BTREE_NOERROR);
-						bno_cur_lt = NULL;
-						break;
-					}
-				}
-			}
-			/*
-			 * The right side is perfect, trash the left side.
-			 */
-			else {
-				xfs_btree_del_cursor(bno_cur_lt,
-					XFS_BTREE_NOERROR);
-				bno_cur_lt = NULL;
-			}
+
+			error = xfs_alloc_find_best_extent(args,
+						&bno_cur_gt, &bno_cur_lt,
+						gtdiff, &ltbno, &ltlen, &ltlena,
+						1 /* search left */);
 		}
+
+		if (error)
+			goto error0;
 	}
+
 	/*
 	 * If we couldn't get anything, give up.
 	 */
@@ -1130,6 +1063,7 @@
 		args->agbno = NULLAGBLOCK;
 		return 0;
 	}
+
 	/*
 	 * At this point we have selected a freespace entry, either to the
 	 * left or to the right.  If it's on the right, copy all the
@@ -1146,6 +1080,7 @@
 		j = 1;
 	} else
 		j = 0;
+
 	/*
 	 * Fix up the length and compute the useful address.
 	 */
@@ -2676,7 +2611,7 @@
  * will require a synchronous transaction, but it can still be
  * used to distinguish between a partial or exact match.
  */
-static int
+int
 xfs_alloc_busy_search(
 	struct xfs_mount	*mp,
 	xfs_agnumber_t		agno,
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 895009a..d0b3bc7 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -19,6 +19,7 @@
 #define	__XFS_ALLOC_H__
 
 struct xfs_buf;
+struct xfs_btree_cur;
 struct xfs_mount;
 struct xfs_perag;
 struct xfs_trans;
@@ -74,6 +75,22 @@
 #define XFS_ALLOC_SET_ASIDE(mp)  (4 + ((mp)->m_sb.sb_agcount * 4))
 
 /*
+ * When deciding how much space to allocate out of an AG, we limit the
+ * allocation maximum size to the size the AG. However, we cannot use all the
+ * blocks in the AG - some are permanently used by metadata. These
+ * blocks are generally:
+ *	- the AG superblock, AGF, AGI and AGFL
+ *	- the AGF (bno and cnt) and AGI btree root blocks
+ *	- 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits
+ *
+ * The AG headers are sector sized, so the amount of space they take up is
+ * dependent on filesystem geometry. The others are all single blocks.
+ */
+#define XFS_ALLOC_AG_MAX_USABLE(mp)	\
+	((mp)->m_sb.sb_agblocks - XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)) - 7)
+
+
+/*
  * Argument structure for xfs_alloc routines.
  * This is turned into a structure to avoid having 20 arguments passed
  * down several levels of the stack.
@@ -118,16 +135,16 @@
 		struct xfs_perag *pag);
 
 #ifdef __KERNEL__
-
 void
-xfs_alloc_busy_insert(xfs_trans_t *tp,
-		xfs_agnumber_t agno,
-		xfs_agblock_t bno,
-		xfs_extlen_t len);
+xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
+	xfs_agblock_t bno, xfs_extlen_t len);
 
 void
 xfs_alloc_busy_clear(struct xfs_mount *mp, struct xfs_busy_extent *busyp);
 
+int
+xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
+	xfs_agblock_t bno, xfs_extlen_t len);
 #endif	/* __KERNEL__ */
 
 /*
@@ -205,4 +222,18 @@
 	xfs_fsblock_t	bno,	/* starting block number of extent */
 	xfs_extlen_t	len);	/* length of extent */
 
+int					/* error */
+xfs_alloc_lookup_le(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		bno,	/* starting block of extent */
+	xfs_extlen_t		len,	/* length of extent */
+	int			*stat);	/* success/failure */
+
+int					/* error */
+xfs_alloc_get_rec(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		*bno,	/* output: starting block of extent */
+	xfs_extlen_t		*len,	/* output: length of extent */
+	int			*stat);	/* output: success/failure */
+
 #endif	/* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index a6cff8e..71e90dc2 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -637,7 +637,7 @@
 	 * It didn't all fit, so we have to sort everything on hashval.
 	 */
 	sbsize = sf->hdr.count * sizeof(*sbuf);
-	sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
+	sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
 
 	/*
 	 * Scan the attribute list for the rest of the entries, storing
@@ -2386,7 +2386,7 @@
 				args.dp = context->dp;
 				args.whichfork = XFS_ATTR_FORK;
 				args.valuelen = valuelen;
-				args.value = kmem_alloc(valuelen, KM_SLEEP);
+				args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
 				args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
 				args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
 				retval = xfs_attr_rmtval_get(&args);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 4111cd3..dc3afd77 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -1038,17 +1038,34 @@
 		 * Filling in the middle part of a previous delayed allocation.
 		 * Contiguity is impossible here.
 		 * This case is avoided almost all the time.
+		 *
+		 * We start with a delayed allocation:
+		 *
+		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
+		 *  PREV @ idx
+		 *
+	         * and we are allocating:
+		 *                     +rrrrrrrrrrrrrrrrr+
+		 *			      new
+		 *
+		 * and we set it up for insertion as:
+		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
+		 *                            new
+		 *  PREV @ idx          LEFT              RIGHT
+		 *                      inserted at idx + 1
 		 */
 		temp = new->br_startoff - PREV.br_startoff;
-		trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
-		xfs_bmbt_set_blockcount(ep, temp);
-		r[0] = *new;
-		r[1].br_state = PREV.br_state;
-		r[1].br_startblock = 0;
-		r[1].br_startoff = new_endoff;
 		temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
-		r[1].br_blockcount = temp2;
-		xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
+		trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
+		xfs_bmbt_set_blockcount(ep, temp);	/* truncate PREV */
+		LEFT = *new;
+		RIGHT.br_state = PREV.br_state;
+		RIGHT.br_startblock = nullstartblock(
+				(int)xfs_bmap_worst_indlen(ip, temp2));
+		RIGHT.br_startoff = new_endoff;
+		RIGHT.br_blockcount = temp2;
+		/* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
+		xfs_iext_insert(ip, idx + 1, 2, &LEFT, state);
 		ip->i_df.if_lastex = idx + 1;
 		ip->i_d.di_nextents++;
 		if (cur == NULL)
@@ -2430,7 +2447,7 @@
 		startag = ag = 0;
 
 	pag = xfs_perag_get(mp, ag);
-	while (*blen < ap->alen) {
+	while (*blen < args->maxlen) {
 		if (!pag->pagf_init) {
 			error = xfs_alloc_pagf_init(mp, args->tp, ag,
 						    XFS_ALLOC_FLAG_TRYLOCK);
@@ -2452,7 +2469,7 @@
 			notinit = 1;
 
 		if (xfs_inode_is_filestream(ap->ip)) {
-			if (*blen >= ap->alen)
+			if (*blen >= args->maxlen)
 				break;
 
 			if (ap->userdata) {
@@ -2498,14 +2515,14 @@
 	 * If the best seen length is less than the request
 	 * length, use the best as the minimum.
 	 */
-	else if (*blen < ap->alen)
+	else if (*blen < args->maxlen)
 		args->minlen = *blen;
 	/*
-	 * Otherwise we've seen an extent as big as alen,
+	 * Otherwise we've seen an extent as big as maxlen,
 	 * use that as the minimum.
 	 */
 	else
-		args->minlen = ap->alen;
+		args->minlen = args->maxlen;
 
 	/*
 	 * set the failure fallback case to look in the selected
@@ -2573,7 +2590,9 @@
 	args.tp = ap->tp;
 	args.mp = mp;
 	args.fsbno = ap->rval;
-	args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
+
+	/* Trim the allocation back to the maximum an AG can fit. */
+	args.maxlen = MIN(ap->alen, XFS_ALLOC_AG_MAX_USABLE(mp));
 	args.firstblock = ap->firstblock;
 	blen = 0;
 	if (nullfb) {
@@ -2621,7 +2640,7 @@
 			/*
 			 * Adjust for alignment
 			 */
-			if (blen > args.alignment && blen <= ap->alen)
+			if (blen > args.alignment && blen <= args.maxlen)
 				args.minlen = blen - args.alignment;
 			args.minalignslop = 0;
 		} else {
@@ -2640,7 +2659,7 @@
 			 * of minlen+alignment+slop doesn't go up
 			 * between the calls.
 			 */
-			if (blen > mp->m_dalign && blen <= ap->alen)
+			if (blen > mp->m_dalign && blen <= args.maxlen)
 				nextminlen = blen - mp->m_dalign;
 			else
 				nextminlen = args.minlen;
@@ -4485,6 +4504,16 @@
 				/* Figure out the extent size, adjust alen */
 				extsz = xfs_get_extsz_hint(ip);
 				if (extsz) {
+					/*
+					 * make sure we don't exceed a single
+					 * extent length when we align the
+					 * extent by reducing length we are
+					 * going to allocate by the maximum
+					 * amount extent size aligment may
+					 * require.
+					 */
+					alen = XFS_FILBLKS_MIN(len,
+						   MAXEXTLEN - (2 * extsz - 1));
 					error = xfs_bmap_extsize_align(mp,
 							&got, &prev, extsz,
 							rt, eof,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 04f9cca..2f9e97c 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -634,9 +634,8 @@
 		return error;
 	}
 	ASSERT(!bp || !XFS_BUF_GETERROR(bp));
-	if (bp != NULL) {
+	if (bp)
 		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
-	}
 	*bpp = bp;
 	return 0;
 }
@@ -944,13 +943,13 @@
 	switch (cur->bc_btnum) {
 	case XFS_BTNUM_BNO:
 	case XFS_BTNUM_CNT:
-		XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
 		break;
 	case XFS_BTNUM_INO:
-		XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_INOMAP, XFS_INO_BTREE_REF);
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, XFS_INO_BTREE_REF);
 		break;
 	case XFS_BTNUM_BMAP:
-		XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_BMAP_BTREE_REF);
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_BMAP_BTREE_REF);
 		break;
 	default:
 		ASSERT(0);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 2686d0d..6f8c21c 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -141,8 +141,7 @@
 #define		xfs_buf_item_log_check(x)
 #endif
 
-STATIC void	xfs_buf_error_relse(xfs_buf_t *bp);
-STATIC void	xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
+STATIC void	xfs_buf_do_callbacks(struct xfs_buf *bp);
 
 /*
  * This returns the number of log iovecs needed to log the
@@ -428,13 +427,15 @@
 
 		if (remove) {
 			/*
-			 * We have to remove the log item from the transaction
-			 * as we are about to release our reference to the
-			 * buffer.  If we don't, the unlock that occurs later
-			 * in xfs_trans_uncommit() will ry to reference the
+			 * If we are in a transaction context, we have to
+			 * remove the log item from the transaction as we are
+			 * about to release our reference to the buffer.  If we
+			 * don't, the unlock that occurs later in
+			 * xfs_trans_uncommit() will try to reference the
 			 * buffer which we no longer have a hold on.
 			 */
-			xfs_trans_del_item(lip);
+			if (lip->li_desc)
+				xfs_trans_del_item(lip);
 
 			/*
 			 * Since the transaction no longer refers to the buffer,
@@ -450,7 +451,7 @@
 		 * xfs_trans_ail_delete() drops the AIL lock.
 		 */
 		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
-			xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
+			xfs_buf_do_callbacks(bp);
 			XFS_BUF_SET_FSPRIVATE(bp, NULL);
 			XFS_BUF_CLR_IODONE_FUNC(bp);
 		} else {
@@ -918,15 +919,26 @@
 	XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
 }
 
+/*
+ * We can have many callbacks on a buffer. Running the callbacks individually
+ * can cause a lot of contention on the AIL lock, so we allow for a single
+ * callback to be able to scan the remaining lip->li_bio_list for other items
+ * of the same type and callback to be processed in the first call.
+ *
+ * As a result, the loop walking the callback list below will also modify the
+ * list. it removes the first item from the list and then runs the callback.
+ * The loop then restarts from the new head of the list. This allows the
+ * callback to scan and modify the list attached to the buffer and we don't
+ * have to care about maintaining a next item pointer.
+ */
 STATIC void
 xfs_buf_do_callbacks(
-	xfs_buf_t	*bp,
-	xfs_log_item_t	*lip)
+	struct xfs_buf		*bp)
 {
-	xfs_log_item_t	*nlip;
+	struct xfs_log_item	*lip;
 
-	while (lip != NULL) {
-		nlip = lip->li_bio_list;
+	while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) {
+		XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list);
 		ASSERT(lip->li_cb != NULL);
 		/*
 		 * Clear the next pointer so we don't have any
@@ -936,7 +948,6 @@
 		 */
 		lip->li_bio_list = NULL;
 		lip->li_cb(bp, lip);
-		lip = nlip;
 	}
 }
 
@@ -949,129 +960,77 @@
  */
 void
 xfs_buf_iodone_callbacks(
-	xfs_buf_t	*bp)
+	struct xfs_buf		*bp)
 {
-	xfs_log_item_t	*lip;
-	static ulong	lasttime;
-	static xfs_buftarg_t *lasttarg;
-	xfs_mount_t	*mp;
+	struct xfs_log_item	*lip = bp->b_fspriv;
+	struct xfs_mount	*mp = lip->li_mountp;
+	static ulong		lasttime;
+	static xfs_buftarg_t	*lasttarg;
 
-	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-	lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+	if (likely(!XFS_BUF_GETERROR(bp)))
+		goto do_callbacks;
 
-	if (XFS_BUF_GETERROR(bp) != 0) {
-		/*
-		 * If we've already decided to shutdown the filesystem
-		 * because of IO errors, there's no point in giving this
-		 * a retry.
-		 */
-		mp = lip->li_mountp;
-		if (XFS_FORCED_SHUTDOWN(mp)) {
-			ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
-			XFS_BUF_SUPER_STALE(bp);
-			trace_xfs_buf_item_iodone(bp, _RET_IP_);
-			xfs_buf_do_callbacks(bp, lip);
-			XFS_BUF_SET_FSPRIVATE(bp, NULL);
-			XFS_BUF_CLR_IODONE_FUNC(bp);
-			xfs_buf_ioend(bp, 0);
-			return;
-		}
+	/*
+	 * If we've already decided to shutdown the filesystem because of
+	 * I/O errors, there's no point in giving this a retry.
+	 */
+	if (XFS_FORCED_SHUTDOWN(mp)) {
+		XFS_BUF_SUPER_STALE(bp);
+		trace_xfs_buf_item_iodone(bp, _RET_IP_);
+		goto do_callbacks;
+	}
 
-		if ((XFS_BUF_TARGET(bp) != lasttarg) ||
-		    (time_after(jiffies, (lasttime + 5*HZ)))) {
-			lasttime = jiffies;
-			cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
-					" block 0x%llx in %s",
-				XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
-			      (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
-		}
-		lasttarg = XFS_BUF_TARGET(bp);
+	if (XFS_BUF_TARGET(bp) != lasttarg ||
+	    time_after(jiffies, (lasttime + 5*HZ))) {
+		lasttime = jiffies;
+		cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
+				" block 0x%llx in %s",
+			XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
+		      (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
+	}
+	lasttarg = XFS_BUF_TARGET(bp);
 
-		if (XFS_BUF_ISASYNC(bp)) {
-			/*
-			 * If the write was asynchronous then noone will be
-			 * looking for the error.  Clear the error state
-			 * and write the buffer out again delayed write.
-			 *
-			 * XXXsup This is OK, so long as we catch these
-			 * before we start the umount; we don't want these
-			 * DELWRI metadata bufs to be hanging around.
-			 */
-			XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */
+	/*
+	 * If the write was asynchronous then noone will be looking for the
+	 * error.  Clear the error state and write the buffer out again.
+	 *
+	 * During sync or umount we'll write all pending buffers again
+	 * synchronous, which will catch these errors if they keep hanging
+	 * around.
+	 */
+	if (XFS_BUF_ISASYNC(bp)) {
+		XFS_BUF_ERROR(bp, 0); /* errno of 0 unsets the flag */
 
-			if (!(XFS_BUF_ISSTALE(bp))) {
-				XFS_BUF_DELAYWRITE(bp);
-				XFS_BUF_DONE(bp);
-				XFS_BUF_SET_START(bp);
-			}
-			ASSERT(XFS_BUF_IODONE_FUNC(bp));
-			trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
-			xfs_buf_relse(bp);
-		} else {
-			/*
-			 * If the write of the buffer was not asynchronous,
-			 * then we want to make sure to return the error
-			 * to the caller of bwrite().  Because of this we
-			 * cannot clear the B_ERROR state at this point.
-			 * Instead we install a callback function that
-			 * will be called when the buffer is released, and
-			 * that routine will clear the error state and
-			 * set the buffer to be written out again after
-			 * some delay.
-			 */
-			/* We actually overwrite the existing b-relse
-			   function at times, but we're gonna be shutting down
-			   anyway. */
-			XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse);
+		if (!XFS_BUF_ISSTALE(bp)) {
+			XFS_BUF_DELAYWRITE(bp);
 			XFS_BUF_DONE(bp);
-			XFS_BUF_FINISH_IOWAIT(bp);
+			XFS_BUF_SET_START(bp);
 		}
+		ASSERT(XFS_BUF_IODONE_FUNC(bp));
+		trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
+		xfs_buf_relse(bp);
 		return;
 	}
 
-	xfs_buf_do_callbacks(bp, lip);
+	/*
+	 * If the write of the buffer was synchronous, we want to make
+	 * sure to return the error to the caller of xfs_bwrite().
+	 */
+	XFS_BUF_STALE(bp);
+	XFS_BUF_DONE(bp);
+	XFS_BUF_UNDELAYWRITE(bp);
+
+	trace_xfs_buf_error_relse(bp, _RET_IP_);
+	xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+
+do_callbacks:
+	xfs_buf_do_callbacks(bp);
 	XFS_BUF_SET_FSPRIVATE(bp, NULL);
 	XFS_BUF_CLR_IODONE_FUNC(bp);
 	xfs_buf_ioend(bp, 0);
 }
 
 /*
- * This is a callback routine attached to a buffer which gets an error
- * when being written out synchronously.
- */
-STATIC void
-xfs_buf_error_relse(
-	xfs_buf_t	*bp)
-{
-	xfs_log_item_t	*lip;
-	xfs_mount_t	*mp;
-
-	lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
-	mp = (xfs_mount_t *)lip->li_mountp;
-	ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
-
-	XFS_BUF_STALE(bp);
-	XFS_BUF_DONE(bp);
-	XFS_BUF_UNDELAYWRITE(bp);
-	XFS_BUF_ERROR(bp,0);
-
-	trace_xfs_buf_error_relse(bp, _RET_IP_);
-
-	if (! XFS_FORCED_SHUTDOWN(mp))
-		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
-	/*
-	 * We have to unpin the pinned buffers so do the
-	 * callbacks.
-	 */
-	xfs_buf_do_callbacks(bp, lip);
-	XFS_BUF_SET_FSPRIVATE(bp, NULL);
-	XFS_BUF_CLR_IODONE_FUNC(bp);
-	XFS_BUF_SET_BRELSE_FUNC(bp,NULL);
-	xfs_buf_relse(bp);
-}
-
-
-/*
  * This is the iodone() function for buffers which have been
  * logged.  It is called when they are eventually flushed out.
  * It should remove the buf item from the AIL, and free the buf item.
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 0e2ed43..b6ecd20 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -105,17 +105,6 @@
 	xfs_buf_log_format_t	bli_format;	/* in-log header */
 } xfs_buf_log_item_t;
 
-/*
- * This structure is used during recovery to record the buf log
- * items which have been canceled and should not be replayed.
- */
-typedef struct xfs_buf_cancel {
-	xfs_daddr_t		bc_blkno;
-	uint			bc_len;
-	int			bc_refcount;
-	struct xfs_buf_cancel	*bc_next;
-} xfs_buf_cancel_t;
-
 void	xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
 void	xfs_buf_item_relse(struct xfs_buf *);
 void	xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index c78cc6a..4c7db74 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -152,37 +152,6 @@
 }
 #endif /* DEBUG */
 
-
-void
-xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...)
-{
-	va_list ap;
-
-	va_start(ap, fmt);
-	xfs_fs_vcmn_err(level, mp, fmt, ap);
-	va_end(ap);
-}
-
-void
-xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...)
-{
-	va_list ap;
-
-#ifdef DEBUG
-	xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES);
-#endif
-
-	if (xfs_panic_mask && (xfs_panic_mask & panic_tag)
-	    && (level & CE_ALERT)) {
-		level &= ~CE_ALERT;
-		level |= CE_PANIC;
-		cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG.");
-	}
-	va_start(ap, fmt);
-	xfs_fs_vcmn_err(level, mp, fmt, ap);
-	va_end(ap);
-}
-
 void
 xfs_error_report(
 	const char		*tag,
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index f338847..10dce54 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -136,8 +136,8 @@
 	 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
 			(rf))))
 
-extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
-extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
+extern int xfs_errortag_add(int error_tag, struct xfs_mount *mp);
+extern int xfs_errortag_clearall(struct xfs_mount *mp, int loud);
 #else
 #define XFS_TEST_ERROR(expr, mp, tag, rf)	(expr)
 #define xfs_errortag_add(tag, mp)		(ENOSYS)
@@ -162,21 +162,15 @@
 
 struct xfs_mount;
 
-extern void xfs_fs_vcmn_err(int level, struct xfs_mount *mp,
-		char *fmt, va_list ap)
-	__attribute__ ((format (printf, 3, 0)));
-extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp,
-			char *fmt, ...)
-	__attribute__ ((format (printf, 4, 5)));
-extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...)
-	__attribute__ ((format (printf, 3, 4)));
-
 extern void xfs_hex_dump(void *p, int length);
 
 #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \
 	xfs_fs_cmn_err(level, mp, fmt "  Unmount and run xfs_repair.", ## args)
 
 #define xfs_fs_mount_cmn_err(f, fmt, args...) \
-	((f & XFS_MFSI_QUIET)? (void)0 : cmn_err(CE_WARN, "XFS: " fmt, ## args))
+	do { \
+		if (!(f & XFS_MFSI_QUIET)) 	\
+			cmn_err(CE_WARN, "XFS: " fmt, ## args); \
+	} while (0)
 
 #endif	/* __XFS_ERROR_H__ */
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index a55e687..d22e626 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -48,6 +48,28 @@
 }
 
 /*
+ * Freeing the efi requires that we remove it from the AIL if it has already
+ * been placed there. However, the EFI may not yet have been placed in the AIL
+ * when called by xfs_efi_release() from EFD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the
+ * test_and_clear_bit(XFS_EFI_COMMITTED) to ensure only the last caller frees
+ * the EFI.
+ */
+STATIC void
+__xfs_efi_release(
+	struct xfs_efi_log_item	*efip)
+{
+	struct xfs_ail		*ailp = efip->efi_item.li_ailp;
+
+	if (!test_and_clear_bit(XFS_EFI_COMMITTED, &efip->efi_flags)) {
+		spin_lock(&ailp->xa_lock);
+		/* xfs_trans_ail_delete() drops the AIL lock. */
+		xfs_trans_ail_delete(ailp, &efip->efi_item);
+		xfs_efi_item_free(efip);
+	}
+}
+
+/*
  * This returns the number of iovecs needed to log the given efi item.
  * We only need 1 iovec for an efi item.  It just logs the efi_log_format
  * structure.
@@ -74,7 +96,8 @@
 	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
 	uint			size;
 
-	ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents);
+	ASSERT(atomic_read(&efip->efi_next_extent) ==
+				efip->efi_format.efi_nextents);
 
 	efip->efi_format.efi_type = XFS_LI_EFI;
 
@@ -99,10 +122,12 @@
 }
 
 /*
- * While EFIs cannot really be pinned, the unpin operation is the
- * last place at which the EFI is manipulated during a transaction.
- * Here we coordinate with xfs_efi_cancel() to determine who gets to
- * free the EFI.
+ * While EFIs cannot really be pinned, the unpin operation is the last place at
+ * which the EFI is manipulated during a transaction.  If we are being asked to
+ * remove the EFI it's because the transaction has been cancelled and by
+ * definition that means the EFI cannot be in the AIL so remove it from the
+ * transaction and free it.  Otherwise coordinate with xfs_efi_release() (via
+ * XFS_EFI_COMMITTED) to determine who gets to free the EFI.
  */
 STATIC void
 xfs_efi_item_unpin(
@@ -110,20 +135,15 @@
 	int			remove)
 {
 	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
-	struct xfs_ail		*ailp = lip->li_ailp;
 
-	spin_lock(&ailp->xa_lock);
-	if (efip->efi_flags & XFS_EFI_CANCELED) {
-		if (remove)
+	if (remove) {
+		ASSERT(!(lip->li_flags & XFS_LI_IN_AIL));
+		if (lip->li_desc)
 			xfs_trans_del_item(lip);
-
-		/* xfs_trans_ail_delete() drops the AIL lock. */
-		xfs_trans_ail_delete(ailp, lip);
 		xfs_efi_item_free(efip);
-	} else {
-		efip->efi_flags |= XFS_EFI_COMMITTED;
-		spin_unlock(&ailp->xa_lock);
+		return;
 	}
+	__xfs_efi_release(efip);
 }
 
 /*
@@ -152,16 +172,20 @@
 }
 
 /*
- * The EFI is logged only once and cannot be moved in the log, so
- * simply return the lsn at which it's been logged.  The canceled
- * flag is not paid any attention here.  Checking for that is delayed
- * until the EFI is unpinned.
+ * The EFI is logged only once and cannot be moved in the log, so simply return
+ * the lsn at which it's been logged.  For bulk transaction committed
+ * processing, the EFI may be processed but not yet unpinned prior to the EFD
+ * being processed. Set the XFS_EFI_COMMITTED flag so this case can be detected
+ * when processing the EFD.
  */
 STATIC xfs_lsn_t
 xfs_efi_item_committed(
 	struct xfs_log_item	*lip,
 	xfs_lsn_t		lsn)
 {
+	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
+
+	set_bit(XFS_EFI_COMMITTED, &efip->efi_flags);
 	return lsn;
 }
 
@@ -230,6 +254,7 @@
 	xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
 	efip->efi_format.efi_nextents = nextents;
 	efip->efi_format.efi_id = (__psint_t)(void*)efip;
+	atomic_set(&efip->efi_next_extent, 0);
 
 	return efip;
 }
@@ -289,37 +314,18 @@
 }
 
 /*
- * This is called by the efd item code below to release references to
- * the given efi item.  Each efd calls this with the number of
- * extents that it has logged, and when the sum of these reaches
- * the total number of extents logged by this efi item we can free
- * the efi item.
- *
- * Freeing the efi item requires that we remove it from the AIL.
- * We'll use the AIL lock to protect our counters as well as
- * the removal from the AIL.
+ * This is called by the efd item code below to release references to the given
+ * efi item.  Each efd calls this with the number of extents that it has
+ * logged, and when the sum of these reaches the total number of extents logged
+ * by this efi item we can free the efi item.
  */
 void
 xfs_efi_release(xfs_efi_log_item_t	*efip,
 		uint			nextents)
 {
-	struct xfs_ail		*ailp = efip->efi_item.li_ailp;
-	int			extents_left;
-
-	ASSERT(efip->efi_next_extent > 0);
-	ASSERT(efip->efi_flags & XFS_EFI_COMMITTED);
-
-	spin_lock(&ailp->xa_lock);
-	ASSERT(efip->efi_next_extent >= nextents);
-	efip->efi_next_extent -= nextents;
-	extents_left = efip->efi_next_extent;
-	if (extents_left == 0) {
-		/* xfs_trans_ail_delete() drops the AIL lock. */
-		xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
-		xfs_efi_item_free(efip);
-	} else {
-		spin_unlock(&ailp->xa_lock);
-	}
+	ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
+	if (atomic_sub_and_test(nextents, &efip->efi_next_extent))
+		__xfs_efi_release(efip);
 }
 
 static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 0d22c56..375f68e 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -111,11 +111,10 @@
 #define	XFS_EFI_MAX_FAST_EXTENTS	16
 
 /*
- * Define EFI flags.
+ * Define EFI flag bits. Manipulated by set/clear/test_bit operators.
  */
-#define	XFS_EFI_RECOVERED	0x1
-#define	XFS_EFI_COMMITTED	0x2
-#define	XFS_EFI_CANCELED	0x4
+#define	XFS_EFI_RECOVERED	1
+#define	XFS_EFI_COMMITTED	2
 
 /*
  * This is the "extent free intention" log item.  It is used
@@ -125,8 +124,8 @@
  */
 typedef struct xfs_efi_log_item {
 	xfs_log_item_t		efi_item;
-	uint			efi_flags;	/* misc flags */
-	uint			efi_next_extent;
+	atomic_t		efi_next_extent;
+	unsigned long		efi_flags;	/* misc flags */
 	xfs_efi_log_format_t	efi_format;
 } xfs_efi_log_item_t;
 
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a7c116e..85668ef 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -53,6 +53,9 @@
 	xfs_fsop_geom_t		*geo,
 	int			new_version)
 {
+
+	memset(geo, 0, sizeof(*geo));
+
 	geo->blocksize = mp->m_sb.sb_blocksize;
 	geo->rtextsize = mp->m_sb.sb_rextsize;
 	geo->agblocks = mp->m_sb.sb_agblocks;
@@ -374,6 +377,7 @@
 		mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
 	} else
 		mp->m_maxicount = 0;
+	xfs_set_low_space_thresholds(mp);
 
 	/* update secondary superblocks. */
 	for (agno = 1; agno < nagcount; agno++) {
@@ -611,12 +615,13 @@
  *
  * We cannot use an inode here for this - that will push dirty state back up
  * into the VFS and then periodic inode flushing will prevent log covering from
- * making progress. Hence we log a field in the superblock instead.
+ * making progress. Hence we log a field in the superblock instead and use a
+ * synchronous transaction to ensure the superblock is immediately unpinned
+ * and can be written back.
  */
 int
 xfs_fs_log_dummy(
-	xfs_mount_t	*mp,
-	int		flags)
+	xfs_mount_t	*mp)
 {
 	xfs_trans_t	*tp;
 	int		error;
@@ -631,8 +636,7 @@
 
 	/* log the UUID because it is an unchanging field */
 	xfs_mod_sb(tp, XFS_SB_UUID);
-	if (flags & SYNC_WAIT)
-		xfs_trans_set_sync(tp);
+	xfs_trans_set_sync(tp);
 	return xfs_trans_commit(tp, 0);
 }
 
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index a786c52..1b6a98b 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -25,6 +25,6 @@
 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
 				xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
-extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags);
+extern int xfs_fs_log_dummy(struct xfs_mount *mp);
 
 #endif	/* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index d7de5a3..cb9b6d1 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -43,6 +43,17 @@
 
 
 /*
+ * Define xfs inode iolock lockdep classes. We need to ensure that all active
+ * inodes are considered the same for lockdep purposes, including inodes that
+ * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
+ * guarantee the locks are considered the same when there are multiple lock
+ * initialisation siteѕ. Also, define a reclaimable inode class so it is
+ * obvious in lockdep reports which class the report is against.
+ */
+static struct lock_class_key xfs_iolock_active;
+struct lock_class_key xfs_iolock_reclaimable;
+
+/*
  * Allocate and initialise an xfs_inode.
  */
 STATIC struct xfs_inode *
@@ -69,8 +80,11 @@
 	ASSERT(atomic_read(&ip->i_pincount) == 0);
 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
 	ASSERT(completion_done(&ip->i_flush));
+	ASSERT(ip->i_ino == 0);
 
 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+			&xfs_iolock_active, "xfs_iolock_active");
 
 	/* initialise the xfs inode */
 	ip->i_ino = ino;
@@ -85,9 +99,6 @@
 	ip->i_size = 0;
 	ip->i_new_size = 0;
 
-	/* prevent anyone from using this yet */
-	VFS_I(ip)->i_state = I_NEW;
-
 	return ip;
 }
 
@@ -145,7 +156,18 @@
 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
 	ASSERT(completion_done(&ip->i_flush));
 
-	call_rcu(&ip->i_vnode.i_rcu, xfs_inode_free_callback);
+	/*
+	 * Because we use RCU freeing we need to ensure the inode always
+	 * appears to be reclaimed with an invalid inode number when in the
+	 * free state. The ip->i_flags_lock provides the barrier against lookup
+	 * races.
+	 */
+	spin_lock(&ip->i_flags_lock);
+	ip->i_flags = XFS_IRECLAIM;
+	ip->i_ino = 0;
+	spin_unlock(&ip->i_flags_lock);
+
+	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 }
 
 /*
@@ -155,14 +177,29 @@
 xfs_iget_cache_hit(
 	struct xfs_perag	*pag,
 	struct xfs_inode	*ip,
+	xfs_ino_t		ino,
 	int			flags,
-	int			lock_flags) __releases(pag->pag_ici_lock)
+	int			lock_flags) __releases(RCU)
 {
 	struct inode		*inode = VFS_I(ip);
 	struct xfs_mount	*mp = ip->i_mount;
 	int			error;
 
+	/*
+	 * check for re-use of an inode within an RCU grace period due to the
+	 * radix tree nodes not being updated yet. We monitor for this by
+	 * setting the inode number to zero before freeing the inode structure.
+	 * If the inode has been reallocated and set up, then the inode number
+	 * will not match, so check for that, too.
+	 */
 	spin_lock(&ip->i_flags_lock);
+	if (ip->i_ino != ino) {
+		trace_xfs_iget_skip(ip);
+		XFS_STATS_INC(xs_ig_frecycle);
+		error = EAGAIN;
+		goto out_error;
+	}
+
 
 	/*
 	 * If we are racing with another cache hit that is currently
@@ -205,7 +242,7 @@
 		ip->i_flags |= XFS_IRECLAIM;
 
 		spin_unlock(&ip->i_flags_lock);
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 
 		error = -inode_init_always(mp->m_super, inode);
 		if (error) {
@@ -213,7 +250,7 @@
 			 * Re-initializing the inode failed, and we are in deep
 			 * trouble.  Try to re-add it to the reclaim list.
 			 */
-			read_lock(&pag->pag_ici_lock);
+			rcu_read_lock();
 			spin_lock(&ip->i_flags_lock);
 
 			ip->i_flags &= ~XFS_INEW;
@@ -223,14 +260,20 @@
 			goto out_error;
 		}
 
-		write_lock(&pag->pag_ici_lock);
+		spin_lock(&pag->pag_ici_lock);
 		spin_lock(&ip->i_flags_lock);
 		ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
 		ip->i_flags |= XFS_INEW;
 		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
 		inode->i_state = I_NEW;
+
+		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+		lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+				&xfs_iolock_active, "xfs_iolock_active");
+
 		spin_unlock(&ip->i_flags_lock);
-		write_unlock(&pag->pag_ici_lock);
+		spin_unlock(&pag->pag_ici_lock);
 	} else {
 		/* If the VFS inode is being torn down, pause and try again. */
 		if (!igrab(inode)) {
@@ -241,7 +284,7 @@
 
 		/* We've got a live one. */
 		spin_unlock(&ip->i_flags_lock);
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 		trace_xfs_iget_hit(ip);
 	}
 
@@ -255,7 +298,7 @@
 
 out_error:
 	spin_unlock(&ip->i_flags_lock);
-	read_unlock(&pag->pag_ici_lock);
+	rcu_read_unlock();
 	return error;
 }
 
@@ -308,7 +351,7 @@
 			BUG();
 	}
 
-	write_lock(&pag->pag_ici_lock);
+	spin_lock(&pag->pag_ici_lock);
 
 	/* insert the new inode */
 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
@@ -323,14 +366,14 @@
 	ip->i_udquot = ip->i_gdquot = NULL;
 	xfs_iflags_set(ip, XFS_INEW);
 
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 	radix_tree_preload_end();
 
 	*ipp = ip;
 	return 0;
 
 out_preload_end:
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 	radix_tree_preload_end();
 	if (lock_flags)
 		xfs_iunlock(ip, lock_flags);
@@ -377,7 +420,7 @@
 	xfs_agino_t	agino;
 
 	/* reject inode numbers outside existing AGs */
-	if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
+	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 		return EINVAL;
 
 	/* get the perag structure and ensure that it's inode capable */
@@ -386,15 +429,15 @@
 
 again:
 	error = 0;
-	read_lock(&pag->pag_ici_lock);
+	rcu_read_lock();
 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 
 	if (ip) {
-		error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
+		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 		if (error)
 			goto out_error_or_again;
 	} else {
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 		XFS_STATS_INC(xs_ig_missed);
 
 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 108c7a0..be7cf62 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -887,7 +887,7 @@
 	 * around for a while.  This helps to keep recently accessed
 	 * meta-data in-core longer.
 	 */
-	XFS_BUF_SET_REF(bp, XFS_INO_REF);
+	xfs_buf_set_ref(bp, XFS_INO_REF);
 
 	/*
 	 * Use xfs_trans_brelse() to release the buffer containing the
@@ -2000,17 +2000,33 @@
 		 */
 		for (i = 0; i < ninodes; i++) {
 retry:
-			read_lock(&pag->pag_ici_lock);
+			rcu_read_lock();
 			ip = radix_tree_lookup(&pag->pag_ici_root,
 					XFS_INO_TO_AGINO(mp, (inum + i)));
 
-			/* Inode not in memory or stale, nothing to do */
-			if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
-				read_unlock(&pag->pag_ici_lock);
+			/* Inode not in memory, nothing to do */
+			if (!ip) {
+				rcu_read_unlock();
 				continue;
 			}
 
 			/*
+			 * because this is an RCU protected lookup, we could
+			 * find a recently freed or even reallocated inode
+			 * during the lookup. We need to check under the
+			 * i_flags_lock for a valid inode here. Skip it if it
+			 * is not valid, the wrong inode or stale.
+			 */
+			spin_lock(&ip->i_flags_lock);
+			if (ip->i_ino != inum + i ||
+			    __xfs_iflags_test(ip, XFS_ISTALE)) {
+				spin_unlock(&ip->i_flags_lock);
+				rcu_read_unlock();
+				continue;
+			}
+			spin_unlock(&ip->i_flags_lock);
+
+			/*
 			 * Don't try to lock/unlock the current inode, but we
 			 * _cannot_ skip the other inodes that we did not find
 			 * in the list attached to the buffer and are not
@@ -2019,11 +2035,11 @@
 			 */
 			if (ip != free_ip &&
 			    !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
-				read_unlock(&pag->pag_ici_lock);
+				rcu_read_unlock();
 				delay(1);
 				goto retry;
 			}
-			read_unlock(&pag->pag_ici_lock);
+			rcu_read_unlock();
 
 			xfs_iflock(ip);
 			xfs_iflags_set(ip, XFS_ISTALE);
@@ -2629,7 +2645,7 @@
 
 	mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
 	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
-	read_lock(&pag->pag_ici_lock);
+	rcu_read_lock();
 	/* really need a gang lookup range call here */
 	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
 					first_index, inodes_per_cluster);
@@ -2640,9 +2656,21 @@
 		iq = ilist[i];
 		if (iq == ip)
 			continue;
-		/* if the inode lies outside this cluster, we're done. */
-		if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index)
-			break;
+
+		/*
+		 * because this is an RCU protected lookup, we could find a
+		 * recently freed or even reallocated inode during the lookup.
+		 * We need to check under the i_flags_lock for a valid inode
+		 * here. Skip it if it is not valid or the wrong inode.
+		 */
+		spin_lock(&ip->i_flags_lock);
+		if (!ip->i_ino ||
+		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
+			spin_unlock(&ip->i_flags_lock);
+			continue;
+		}
+		spin_unlock(&ip->i_flags_lock);
+
 		/*
 		 * Do an un-protected check to see if the inode is dirty and
 		 * is a candidate for flushing.  These checks will be repeated
@@ -2692,7 +2720,7 @@
 	}
 
 out_free:
-	read_unlock(&pag->pag_ici_lock);
+	rcu_read_unlock();
 	kmem_free(ilist);
 out_put:
 	xfs_perag_put(pag);
@@ -2704,7 +2732,7 @@
 	 * Corruption detected in the clustering loop.  Invalidate the
 	 * inode buffer and shut down the filesystem.
 	 */
-	read_unlock(&pag->pag_ici_lock);
+	rcu_read_unlock();
 	/*
 	 * Clean up the buffer.  If it was B_DELWRI, just release it --
 	 * brelse can handle it with no problems.  If not, shut down the
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index fb2ca2e..5c95fa8 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -376,12 +376,13 @@
 /*
  * In-core inode flags.
  */
-#define XFS_IRECLAIM    0x0001  /* we have started reclaiming this inode    */
-#define XFS_ISTALE	0x0002	/* inode has been staled */
-#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */
-#define XFS_INEW	0x0008	/* inode has just been allocated */
-#define XFS_IFILESTREAM	0x0010	/* inode is in a filestream directory */
-#define XFS_ITRUNCATED	0x0020	/* truncated down so flush-on-close */
+#define XFS_IRECLAIM		0x0001  /* started reclaiming this inode */
+#define XFS_ISTALE		0x0002	/* inode has been staled */
+#define XFS_IRECLAIMABLE	0x0004	/* inode can be reclaimed */
+#define XFS_INEW		0x0008	/* inode has just been allocated */
+#define XFS_IFILESTREAM		0x0010	/* inode is in a filestream directory */
+#define XFS_ITRUNCATED		0x0020	/* truncated down so flush-on-close */
+#define XFS_IDIRTY_RELEASE	0x0040	/* dirty release already seen */
 
 /*
  * Flags for inode locking.
@@ -438,6 +439,8 @@
 #define XFS_IOLOCK_DEP(flags)	(((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
 #define XFS_ILOCK_DEP(flags)	(((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
 
+extern struct lock_class_key xfs_iolock_reclaimable;
+
 /*
  * Flags for xfs_itruncate_start().
  */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7c8d30c..fd4f398 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -842,15 +842,64 @@
  * flushed to disk.  It is responsible for removing the inode item
  * from the AIL if it has not been re-logged, and unlocking the inode's
  * flush lock.
+ *
+ * To reduce AIL lock traffic as much as possible, we scan the buffer log item
+ * list for other inodes that will run this function. We remove them from the
+ * buffer list so we can process all the inode IO completions in one AIL lock
+ * traversal.
  */
 void
 xfs_iflush_done(
 	struct xfs_buf		*bp,
 	struct xfs_log_item	*lip)
 {
-	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
-	xfs_inode_t		*ip = iip->ili_inode;
+	struct xfs_inode_log_item *iip;
+	struct xfs_log_item	*blip;
+	struct xfs_log_item	*next;
+	struct xfs_log_item	*prev;
 	struct xfs_ail		*ailp = lip->li_ailp;
+	int			need_ail = 0;
+
+	/*
+	 * Scan the buffer IO completions for other inodes being completed and
+	 * attach them to the current inode log item.
+	 */
+	blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+	prev = NULL;
+	while (blip != NULL) {
+		if (lip->li_cb != xfs_iflush_done) {
+			prev = blip;
+			blip = blip->li_bio_list;
+			continue;
+		}
+
+		/* remove from list */
+		next = blip->li_bio_list;
+		if (!prev) {
+			XFS_BUF_SET_FSPRIVATE(bp, next);
+		} else {
+			prev->li_bio_list = next;
+		}
+
+		/* add to current list */
+		blip->li_bio_list = lip->li_bio_list;
+		lip->li_bio_list = blip;
+
+		/*
+		 * while we have the item, do the unlocked check for needing
+		 * the AIL lock.
+		 */
+		iip = INODE_ITEM(blip);
+		if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
+			need_ail++;
+
+		blip = next;
+	}
+
+	/* make sure we capture the state of the initial inode. */
+	iip = INODE_ITEM(lip);
+	if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
+		need_ail++;
 
 	/*
 	 * We only want to pull the item from the AIL if it is
@@ -861,28 +910,37 @@
 	 * the lock since it's cheaper, and then we recheck while
 	 * holding the lock before removing the inode from the AIL.
 	 */
-	if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) {
+	if (need_ail) {
+		struct xfs_log_item *log_items[need_ail];
+		int i = 0;
 		spin_lock(&ailp->xa_lock);
-		if (lip->li_lsn == iip->ili_flush_lsn) {
-			/* xfs_trans_ail_delete() drops the AIL lock. */
-			xfs_trans_ail_delete(ailp, lip);
-		} else {
-			spin_unlock(&ailp->xa_lock);
+		for (blip = lip; blip; blip = blip->li_bio_list) {
+			iip = INODE_ITEM(blip);
+			if (iip->ili_logged &&
+			    blip->li_lsn == iip->ili_flush_lsn) {
+				log_items[i++] = blip;
+			}
+			ASSERT(i <= need_ail);
 		}
+		/* xfs_trans_ail_delete_bulk() drops the AIL lock. */
+		xfs_trans_ail_delete_bulk(ailp, log_items, i);
 	}
 
-	iip->ili_logged = 0;
 
 	/*
-	 * Clear the ili_last_fields bits now that we know that the
-	 * data corresponding to them is safely on disk.
+	 * clean up and unlock the flush lock now we are done. We can clear the
+	 * ili_last_fields bits now that we know that the data corresponding to
+	 * them is safely on disk.
 	 */
-	iip->ili_last_fields = 0;
+	for (blip = lip; blip; blip = next) {
+		next = blip->li_bio_list;
+		blip->li_bio_list = NULL;
 
-	/*
-	 * Release the inode's flush lock since we're done with it.
-	 */
-	xfs_ifunlock(ip);
+		iip = INODE_ITEM(blip);
+		iip->ili_logged = 0;
+		iip->ili_last_fields = 0;
+		xfs_ifunlock(iip->ili_inode);
+	}
 }
 
 /*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 2057614..8a0f044 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -47,127 +47,8 @@
 
 #define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
 						<< mp->m_writeio_log)
-#define XFS_STRAT_WRITE_IMAPS	2
 #define XFS_WRITE_IMAPS		XFS_BMAP_MAX_NMAP
 
-STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
-				  int, struct xfs_bmbt_irec *, int *);
-STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int,
-				 struct xfs_bmbt_irec *, int *);
-STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
-				struct xfs_bmbt_irec *, int *);
-
-int
-xfs_iomap(
-	struct xfs_inode	*ip,
-	xfs_off_t		offset,
-	ssize_t			count,
-	int			flags,
-	struct xfs_bmbt_irec	*imap,
-	int			*nimaps,
-	int			*new)
-{
-	struct xfs_mount	*mp = ip->i_mount;
-	xfs_fileoff_t		offset_fsb, end_fsb;
-	int			error = 0;
-	int			lockmode = 0;
-	int			bmapi_flags = 0;
-
-	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
-
-	*new = 0;
-
-	if (XFS_FORCED_SHUTDOWN(mp))
-		return XFS_ERROR(EIO);
-
-	trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
-
-	switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
-	case BMAPI_READ:
-		lockmode = xfs_ilock_map_shared(ip);
-		bmapi_flags = XFS_BMAPI_ENTIRE;
-		break;
-	case BMAPI_WRITE:
-		lockmode = XFS_ILOCK_EXCL;
-		if (flags & BMAPI_IGNSTATE)
-			bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
-		xfs_ilock(ip, lockmode);
-		break;
-	case BMAPI_ALLOCATE:
-		lockmode = XFS_ILOCK_SHARED;
-		bmapi_flags = XFS_BMAPI_ENTIRE;
-
-		/* Attempt non-blocking lock */
-		if (flags & BMAPI_TRYLOCK) {
-			if (!xfs_ilock_nowait(ip, lockmode))
-				return XFS_ERROR(EAGAIN);
-		} else {
-			xfs_ilock(ip, lockmode);
-		}
-		break;
-	default:
-		BUG();
-	}
-
-	ASSERT(offset <= mp->m_maxioffset);
-	if ((xfs_fsize_t)offset + count > mp->m_maxioffset)
-		count = mp->m_maxioffset - offset;
-	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
-	offset_fsb = XFS_B_TO_FSBT(mp, offset);
-
-	error = xfs_bmapi(NULL, ip, offset_fsb,
-			(xfs_filblks_t)(end_fsb - offset_fsb),
-			bmapi_flags,  NULL, 0, imap,
-			nimaps, NULL);
-
-	if (error)
-		goto out;
-
-	switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
-	case BMAPI_WRITE:
-		/* If we found an extent, return it */
-		if (*nimaps &&
-		    (imap->br_startblock != HOLESTARTBLOCK) &&
-		    (imap->br_startblock != DELAYSTARTBLOCK)) {
-			trace_xfs_iomap_found(ip, offset, count, flags, imap);
-			break;
-		}
-
-		if (flags & BMAPI_DIRECT) {
-			error = xfs_iomap_write_direct(ip, offset, count, flags,
-						       imap, nimaps);
-		} else {
-			error = xfs_iomap_write_delay(ip, offset, count, flags,
-						      imap, nimaps);
-		}
-		if (!error) {
-			trace_xfs_iomap_alloc(ip, offset, count, flags, imap);
-		}
-		*new = 1;
-		break;
-	case BMAPI_ALLOCATE:
-		/* If we found an extent, return it */
-		xfs_iunlock(ip, lockmode);
-		lockmode = 0;
-
-		if (*nimaps && !isnullstartblock(imap->br_startblock)) {
-			trace_xfs_iomap_found(ip, offset, count, flags, imap);
-			break;
-		}
-
-		error = xfs_iomap_write_allocate(ip, offset, count,
-						 imap, nimaps);
-		break;
-	}
-
-	ASSERT(*nimaps <= 1);
-
-out:
-	if (lockmode)
-		xfs_iunlock(ip, lockmode);
-	return XFS_ERROR(error);
-}
-
 STATIC int
 xfs_iomap_eof_align_last_fsb(
 	xfs_mount_t	*mp,
@@ -236,14 +117,13 @@
 	return EFSCORRUPTED;
 }
 
-STATIC int
+int
 xfs_iomap_write_direct(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	int		flags,
 	xfs_bmbt_irec_t *imap,
-	int		*nmaps)
+	int		nmaps)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb;
@@ -279,7 +159,7 @@
 		if (error)
 			goto error_out;
 	} else {
-		if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK))
+		if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
 			last_fsb = MIN(last_fsb, (xfs_fileoff_t)
 					imap->br_blockcount +
 					imap->br_startoff);
@@ -331,7 +211,7 @@
 	xfs_trans_ijoin(tp, ip);
 
 	bmapi_flag = XFS_BMAPI_WRITE;
-	if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))
+	if (offset < ip->i_size || extsz)
 		bmapi_flag |= XFS_BMAPI_PREALLOC;
 
 	/*
@@ -370,7 +250,6 @@
 		goto error_out;
 	}
 
-	*nmaps = 1;
 	return 0;
 
 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
@@ -379,7 +258,6 @@
 
 error1:	/* Just cancel transaction */
 	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-	*nmaps = 0;	/* nothing set-up here */
 
 error_out:
 	return XFS_ERROR(error);
@@ -389,6 +267,9 @@
  * If the caller is doing a write at the end of the file, then extend the
  * allocation out to the file system's write iosize.  We clean up any extra
  * space left over when the file is closed in xfs_inactive().
+ *
+ * If we find we already have delalloc preallocation beyond EOF, don't do more
+ * preallocation as it it not needed.
  */
 STATIC int
 xfs_iomap_eof_want_preallocate(
@@ -396,7 +277,6 @@
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	int		ioflag,
 	xfs_bmbt_irec_t *imap,
 	int		nimaps,
 	int		*prealloc)
@@ -405,6 +285,7 @@
 	xfs_filblks_t   count_fsb;
 	xfs_fsblock_t	firstblock;
 	int		n, error, imaps;
+	int		found_delalloc = 0;
 
 	*prealloc = 0;
 	if ((offset + count) <= ip->i_size)
@@ -429,20 +310,71 @@
 				return 0;
 			start_fsb += imap[n].br_blockcount;
 			count_fsb -= imap[n].br_blockcount;
+
+			if (imap[n].br_startblock == DELAYSTARTBLOCK)
+				found_delalloc = 1;
 		}
 	}
-	*prealloc = 1;
+	if (!found_delalloc)
+		*prealloc = 1;
 	return 0;
 }
 
-STATIC int
+/*
+ * If we don't have a user specified preallocation size, dynamically increase
+ * the preallocation size as the size of the file grows. Cap the maximum size
+ * at a single extent or less if the filesystem is near full. The closer the
+ * filesystem is to full, the smaller the maximum prealocation.
+ */
+STATIC xfs_fsblock_t
+xfs_iomap_prealloc_size(
+	struct xfs_mount	*mp,
+	struct xfs_inode	*ip)
+{
+	xfs_fsblock_t		alloc_blocks = 0;
+
+	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
+		int shift = 0;
+		int64_t freesp;
+
+		/*
+		 * rounddown_pow_of_two() returns an undefined result
+		 * if we pass in alloc_blocks = 0. Hence the "+ 1" to
+		 * ensure we always pass in a non-zero value.
+		 */
+		alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1;
+		alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
+					rounddown_pow_of_two(alloc_blocks));
+
+		xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
+		freesp = mp->m_sb.sb_fdblocks;
+		if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
+			shift = 2;
+			if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
+				shift++;
+			if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
+				shift++;
+			if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
+				shift++;
+			if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
+				shift++;
+		}
+		if (shift)
+			alloc_blocks >>= shift;
+	}
+
+	if (alloc_blocks < mp->m_writeio_blocks)
+		alloc_blocks = mp->m_writeio_blocks;
+
+	return alloc_blocks;
+}
+
+int
 xfs_iomap_write_delay(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	int		ioflag,
-	xfs_bmbt_irec_t *ret_imap,
-	int		*nmaps)
+	xfs_bmbt_irec_t *ret_imap)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb;
@@ -469,16 +401,19 @@
 	extsz = xfs_get_extsz_hint(ip);
 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
 
+
 	error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
-				ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
+				imap, XFS_WRITE_IMAPS, &prealloc);
 	if (error)
 		return error;
 
 retry:
 	if (prealloc) {
+		xfs_fsblock_t	alloc_blocks = xfs_iomap_prealloc_size(mp, ip);
+
 		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
 		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
-		last_fsb = ioalign + mp->m_writeio_blocks;
+		last_fsb = ioalign + alloc_blocks;
 	} else {
 		last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
 	}
@@ -496,22 +431,31 @@
 			  XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
 			  XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
 			  &nimaps, NULL);
-	if (error && (error != ENOSPC))
+	switch (error) {
+	case 0:
+	case ENOSPC:
+	case EDQUOT:
+		break;
+	default:
 		return XFS_ERROR(error);
+	}
 
 	/*
-	 * If bmapi returned us nothing, and if we didn't get back EDQUOT,
-	 * then we must have run out of space - flush all other inodes with
-	 * delalloc blocks and retry without EOF preallocation.
+	 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT.  For
+	 * ENOSPC, * flush all other inodes with delalloc blocks to free up
+	 * some of the excess reserved metadata space. For both cases, retry
+	 * without EOF preallocation.
 	 */
 	if (nimaps == 0) {
 		trace_xfs_delalloc_enospc(ip, offset, count);
 		if (flushed)
-			return XFS_ERROR(ENOSPC);
+			return XFS_ERROR(error ? error : ENOSPC);
 
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-		xfs_flush_inodes(ip);
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		if (error == ENOSPC) {
+			xfs_iunlock(ip, XFS_ILOCK_EXCL);
+			xfs_flush_inodes(ip);
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+		}
 
 		flushed = 1;
 		error = 0;
@@ -523,8 +467,6 @@
 		return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
 
 	*ret_imap = imap[0];
-	*nmaps = 1;
-
 	return 0;
 }
 
@@ -538,13 +480,12 @@
  * We no longer bother to look at the incoming map - all we have to
  * guarantee is that whatever we allocate fills the required range.
  */
-STATIC int
+int
 xfs_iomap_write_allocate(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	xfs_bmbt_irec_t *imap,
-	int		*retmap)
+	xfs_bmbt_irec_t *imap)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb, last_block;
@@ -557,8 +498,6 @@
 	int		error = 0;
 	int		nres;
 
-	*retmap = 0;
-
 	/*
 	 * Make sure that the dquots are there.
 	 */
@@ -680,7 +619,6 @@
 		if ((offset_fsb >= imap->br_startoff) &&
 		    (offset_fsb < (imap->br_startoff +
 				   imap->br_blockcount))) {
-			*retmap = 1;
 			XFS_STATS_INC(xs_xstrat_quick);
 			return 0;
 		}
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 7748a43..8061576 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,30 +18,15 @@
 #ifndef __XFS_IOMAP_H__
 #define __XFS_IOMAP_H__
 
-/* base extent manipulation calls */
-#define BMAPI_READ	(1 << 0)	/* read extents */
-#define BMAPI_WRITE	(1 << 1)	/* create extents */
-#define BMAPI_ALLOCATE	(1 << 2)	/* delayed allocate to real extents */
-
-/* modifiers */
-#define BMAPI_IGNSTATE	(1 << 4)	/* ignore unwritten state on read */
-#define BMAPI_DIRECT	(1 << 5)	/* direct instead of buffered write */
-#define BMAPI_MMA	(1 << 6)	/* allocate for mmap write */
-#define BMAPI_TRYLOCK	(1 << 7)	/* non-blocking request */
-
-#define BMAPI_FLAGS \
-	{ BMAPI_READ,		"READ" }, \
-	{ BMAPI_WRITE,		"WRITE" }, \
-	{ BMAPI_ALLOCATE,	"ALLOCATE" }, \
-	{ BMAPI_IGNSTATE,	"IGNSTATE" }, \
-	{ BMAPI_DIRECT,		"DIRECT" }, \
-	{ BMAPI_TRYLOCK,	"TRYLOCK" }
-
 struct xfs_inode;
 struct xfs_bmbt_irec;
 
-extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int,
-		     struct xfs_bmbt_irec *, int *, int *);
+extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+			struct xfs_bmbt_irec *, int);
+extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
+			struct xfs_bmbt_irec *);
+extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+			struct xfs_bmbt_irec *);
 extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
 
 #endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index cee4ab9..ae6fef1f 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -47,7 +47,7 @@
 				xfs_buftarg_t	*log_target,
 				xfs_daddr_t	blk_offset,
 				int		num_bblks);
-STATIC int	 xlog_space_left(xlog_t *log, int cycle, int bytes);
+STATIC int	 xlog_space_left(struct log *log, atomic64_t *head);
 STATIC int	 xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
 STATIC void	 xlog_dealloc_log(xlog_t *log);
 
@@ -70,7 +70,7 @@
 /* local functions to manipulate grant head */
 STATIC int  xlog_grant_log_space(xlog_t		*log,
 				 xlog_ticket_t	*xtic);
-STATIC void xlog_grant_push_ail(xfs_mount_t	*mp,
+STATIC void xlog_grant_push_ail(struct log	*log,
 				int		need_bytes);
 STATIC void xlog_regrant_reserve_log_space(xlog_t	 *log,
 					   xlog_ticket_t *ticket);
@@ -81,98 +81,73 @@
 
 #if defined(DEBUG)
 STATIC void	xlog_verify_dest_ptr(xlog_t *log, char *ptr);
-STATIC void	xlog_verify_grant_head(xlog_t *log, int equals);
+STATIC void	xlog_verify_grant_tail(struct log *log);
 STATIC void	xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
 				  int count, boolean_t syncing);
 STATIC void	xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
 				     xfs_lsn_t tail_lsn);
 #else
 #define xlog_verify_dest_ptr(a,b)
-#define xlog_verify_grant_head(a,b)
+#define xlog_verify_grant_tail(a)
 #define xlog_verify_iclog(a,b,c,d)
 #define xlog_verify_tail_lsn(a,b,c)
 #endif
 
 STATIC int	xlog_iclogs_empty(xlog_t *log);
 
-
 static void
-xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+xlog_grant_sub_space(
+	struct log	*log,
+	atomic64_t	*head,
+	int		bytes)
 {
-	if (*qp) {
-		tic->t_next	    = (*qp);
-		tic->t_prev	    = (*qp)->t_prev;
-		(*qp)->t_prev->t_next = tic;
-		(*qp)->t_prev	    = tic;
-	} else {
-		tic->t_prev = tic->t_next = tic;
-		*qp = tic;
-	}
+	int64_t	head_val = atomic64_read(head);
+	int64_t new, old;
 
-	tic->t_flags |= XLOG_TIC_IN_Q;
+	do {
+		int	cycle, space;
+
+		xlog_crack_grant_head_val(head_val, &cycle, &space);
+
+		space -= bytes;
+		if (space < 0) {
+			space += log->l_logsize;
+			cycle--;
+		}
+
+		old = head_val;
+		new = xlog_assign_grant_head_val(cycle, space);
+		head_val = atomic64_cmpxchg(head, old, new);
+	} while (head_val != old);
 }
 
 static void
-xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+xlog_grant_add_space(
+	struct log	*log,
+	atomic64_t	*head,
+	int		bytes)
 {
-	if (tic == tic->t_next) {
-		*qp = NULL;
-	} else {
-		*qp = tic->t_next;
-		tic->t_next->t_prev = tic->t_prev;
-		tic->t_prev->t_next = tic->t_next;
-	}
+	int64_t	head_val = atomic64_read(head);
+	int64_t new, old;
 
-	tic->t_next = tic->t_prev = NULL;
-	tic->t_flags &= ~XLOG_TIC_IN_Q;
-}
+	do {
+		int		tmp;
+		int		cycle, space;
 
-static void
-xlog_grant_sub_space(struct log *log, int bytes)
-{
-	log->l_grant_write_bytes -= bytes;
-	if (log->l_grant_write_bytes < 0) {
-		log->l_grant_write_bytes += log->l_logsize;
-		log->l_grant_write_cycle--;
-	}
+		xlog_crack_grant_head_val(head_val, &cycle, &space);
 
-	log->l_grant_reserve_bytes -= bytes;
-	if ((log)->l_grant_reserve_bytes < 0) {
-		log->l_grant_reserve_bytes += log->l_logsize;
-		log->l_grant_reserve_cycle--;
-	}
+		tmp = log->l_logsize - space;
+		if (tmp > bytes)
+			space += bytes;
+		else {
+			space = bytes - tmp;
+			cycle++;
+		}
 
-}
-
-static void
-xlog_grant_add_space_write(struct log *log, int bytes)
-{
-	int tmp = log->l_logsize - log->l_grant_write_bytes;
-	if (tmp > bytes)
-		log->l_grant_write_bytes += bytes;
-	else {
-		log->l_grant_write_cycle++;
-		log->l_grant_write_bytes = bytes - tmp;
-	}
-}
-
-static void
-xlog_grant_add_space_reserve(struct log *log, int bytes)
-{
-	int tmp = log->l_logsize - log->l_grant_reserve_bytes;
-	if (tmp > bytes)
-		log->l_grant_reserve_bytes += bytes;
-	else {
-		log->l_grant_reserve_cycle++;
-		log->l_grant_reserve_bytes = bytes - tmp;
-	}
-}
-
-static inline void
-xlog_grant_add_space(struct log *log, int bytes)
-{
-	xlog_grant_add_space_write(log, bytes);
-	xlog_grant_add_space_reserve(log, bytes);
+		old = head_val;
+		new = xlog_assign_grant_head_val(cycle, space);
+		head_val = atomic64_cmpxchg(head, old, new);
+	} while (head_val != old);
 }
 
 static void
@@ -355,7 +330,7 @@
 
 		trace_xfs_log_reserve(log, internal_ticket);
 
-		xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
+		xlog_grant_push_ail(log, internal_ticket->t_unit_res);
 		retval = xlog_regrant_write_log_space(log, internal_ticket);
 	} else {
 		/* may sleep if need to allocate more tickets */
@@ -369,7 +344,7 @@
 
 		trace_xfs_log_reserve(log, internal_ticket);
 
-		xlog_grant_push_ail(mp,
+		xlog_grant_push_ail(log,
 				    (internal_ticket->t_unit_res *
 				     internal_ticket->t_cnt));
 		retval = xlog_grant_log_space(log, internal_ticket);
@@ -402,7 +377,7 @@
 		cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname);
 	else {
 		cmn_err(CE_NOTE,
-			"!Mounting filesystem \"%s\" in no-recovery mode.  Filesystem will be inconsistent.",
+			"Mounting filesystem \"%s\" in no-recovery mode.  Filesystem will be inconsistent.",
 			mp->m_fsname);
 		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 	}
@@ -584,8 +559,8 @@
 		if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
 		      iclog->ic_state == XLOG_STATE_DIRTY)) {
 			if (!XLOG_FORCED_SHUTDOWN(log)) {
-				sv_wait(&iclog->ic_force_wait, PMEM,
-					&log->l_icloglock, s);
+				xlog_wait(&iclog->ic_force_wait,
+							&log->l_icloglock);
 			} else {
 				spin_unlock(&log->l_icloglock);
 			}
@@ -625,8 +600,8 @@
 			|| iclog->ic_state == XLOG_STATE_DIRTY
 			|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
 
-				sv_wait(&iclog->ic_force_wait, PMEM,
-					&log->l_icloglock, s);
+				xlog_wait(&iclog->ic_force_wait,
+							&log->l_icloglock);
 		} else {
 			spin_unlock(&log->l_icloglock);
 		}
@@ -703,55 +678,46 @@
 {
 	xlog_ticket_t	*tic;
 	xlog_t		*log = mp->m_log;
-	int		need_bytes, free_bytes, cycle, bytes;
+	int		need_bytes, free_bytes;
 
 	if (XLOG_FORCED_SHUTDOWN(log))
 		return;
 
-	if (tail_lsn == 0) {
-		/* needed since sync_lsn is 64 bits */
-		spin_lock(&log->l_icloglock);
-		tail_lsn = log->l_last_sync_lsn;
-		spin_unlock(&log->l_icloglock);
-	}
+	if (tail_lsn == 0)
+		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 
-	spin_lock(&log->l_grant_lock);
+	/* tail_lsn == 1 implies that we weren't passed a valid value.  */
+	if (tail_lsn != 1)
+		atomic64_set(&log->l_tail_lsn, tail_lsn);
 
-	/* Also an invalid lsn.  1 implies that we aren't passing in a valid
-	 * tail_lsn.
-	 */
-	if (tail_lsn != 1) {
-		log->l_tail_lsn = tail_lsn;
-	}
-
-	if ((tic = log->l_write_headq)) {
+	if (!list_empty_careful(&log->l_writeq)) {
 #ifdef DEBUG
 		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 			panic("Recovery problem");
 #endif
-		cycle = log->l_grant_write_cycle;
-		bytes = log->l_grant_write_bytes;
-		free_bytes = xlog_space_left(log, cycle, bytes);
-		do {
+		spin_lock(&log->l_grant_write_lock);
+		free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+		list_for_each_entry(tic, &log->l_writeq, t_queue) {
 			ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
 
 			if (free_bytes < tic->t_unit_res && tail_lsn != 1)
 				break;
 			tail_lsn = 0;
 			free_bytes -= tic->t_unit_res;
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_write_headq);
+			trace_xfs_log_regrant_write_wake_up(log, tic);
+			wake_up(&tic->t_wait);
+		}
+		spin_unlock(&log->l_grant_write_lock);
 	}
-	if ((tic = log->l_reserve_headq)) {
+
+	if (!list_empty_careful(&log->l_reserveq)) {
 #ifdef DEBUG
 		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 			panic("Recovery problem");
 #endif
-		cycle = log->l_grant_reserve_cycle;
-		bytes = log->l_grant_reserve_bytes;
-		free_bytes = xlog_space_left(log, cycle, bytes);
-		do {
+		spin_lock(&log->l_grant_reserve_lock);
+		free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+		list_for_each_entry(tic, &log->l_reserveq, t_queue) {
 			if (tic->t_flags & XLOG_TIC_PERM_RESERV)
 				need_bytes = tic->t_unit_res*tic->t_cnt;
 			else
@@ -760,12 +726,12 @@
 				break;
 			tail_lsn = 0;
 			free_bytes -= need_bytes;
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_reserve_headq);
+			trace_xfs_log_grant_wake_up(log, tic);
+			wake_up(&tic->t_wait);
+		}
+		spin_unlock(&log->l_grant_reserve_lock);
 	}
-	spin_unlock(&log->l_grant_lock);
-}	/* xfs_log_move_tail */
+}
 
 /*
  * Determine if we have a transaction that has gone to disk
@@ -831,23 +797,19 @@
  * We may be holding the log iclog lock upon entering this routine.
  */
 xfs_lsn_t
-xlog_assign_tail_lsn(xfs_mount_t *mp)
+xlog_assign_tail_lsn(
+	struct xfs_mount	*mp)
 {
-	xfs_lsn_t tail_lsn;
-	xlog_t	  *log = mp->m_log;
+	xfs_lsn_t		tail_lsn;
+	struct log		*log = mp->m_log;
 
 	tail_lsn = xfs_trans_ail_tail(mp->m_ail);
-	spin_lock(&log->l_grant_lock);
-	if (tail_lsn != 0) {
-		log->l_tail_lsn = tail_lsn;
-	} else {
-		tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
-	}
-	spin_unlock(&log->l_grant_lock);
+	if (!tail_lsn)
+		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 
+	atomic64_set(&log->l_tail_lsn, tail_lsn);
 	return tail_lsn;
-}	/* xlog_assign_tail_lsn */
-
+}
 
 /*
  * Return the space in the log between the tail and the head.  The head
@@ -864,21 +826,26 @@
  * result is that we return the size of the log as the amount of space left.
  */
 STATIC int
-xlog_space_left(xlog_t *log, int cycle, int bytes)
+xlog_space_left(
+	struct log	*log,
+	atomic64_t	*head)
 {
-	int free_bytes;
-	int tail_bytes;
-	int tail_cycle;
+	int		free_bytes;
+	int		tail_bytes;
+	int		tail_cycle;
+	int		head_cycle;
+	int		head_bytes;
 
-	tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn));
-	tail_cycle = CYCLE_LSN(log->l_tail_lsn);
-	if ((tail_cycle == cycle) && (bytes >= tail_bytes)) {
-		free_bytes = log->l_logsize - (bytes - tail_bytes);
-	} else if ((tail_cycle + 1) < cycle) {
+	xlog_crack_grant_head(head, &head_cycle, &head_bytes);
+	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
+	tail_bytes = BBTOB(tail_bytes);
+	if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
+		free_bytes = log->l_logsize - (head_bytes - tail_bytes);
+	else if (tail_cycle + 1 < head_cycle)
 		return 0;
-	} else if (tail_cycle < cycle) {
-		ASSERT(tail_cycle == (cycle - 1));
-		free_bytes = tail_bytes - bytes;
+	else if (tail_cycle < head_cycle) {
+		ASSERT(tail_cycle == (head_cycle - 1));
+		free_bytes = tail_bytes - head_bytes;
 	} else {
 		/*
 		 * The reservation head is behind the tail.
@@ -889,12 +856,12 @@
 			"xlog_space_left: head behind tail\n"
 			"  tail_cycle = %d, tail_bytes = %d\n"
 			"  GH   cycle = %d, GH   bytes = %d",
-			tail_cycle, tail_bytes, cycle, bytes);
+			tail_cycle, tail_bytes, head_cycle, head_bytes);
 		ASSERT(0);
 		free_bytes = log->l_logsize;
 	}
 	return free_bytes;
-}	/* xlog_space_left */
+}
 
 
 /*
@@ -1047,12 +1014,16 @@
 	log->l_flags	   |= XLOG_ACTIVE_RECOVERY;
 
 	log->l_prev_block  = -1;
-	log->l_tail_lsn	   = xlog_assign_lsn(1, 0);
 	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
-	log->l_last_sync_lsn = log->l_tail_lsn;
+	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
+	xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
 	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
-	log->l_grant_reserve_cycle = 1;
-	log->l_grant_write_cycle = 1;
+	xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
+	xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
+	INIT_LIST_HEAD(&log->l_reserveq);
+	INIT_LIST_HEAD(&log->l_writeq);
+	spin_lock_init(&log->l_grant_reserve_lock);
+	spin_lock_init(&log->l_grant_write_lock);
 
 	error = EFSCORRUPTED;
 	if (xfs_sb_version_hassector(&mp->m_sb)) {
@@ -1094,8 +1065,7 @@
 	log->l_xbuf = bp;
 
 	spin_lock_init(&log->l_icloglock);
-	spin_lock_init(&log->l_grant_lock);
-	sv_init(&log->l_flush_wait, 0, "flush_wait");
+	init_waitqueue_head(&log->l_flush_wait);
 
 	/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
 	ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1151,8 +1121,8 @@
 
 		ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
 		ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
-		sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
-		sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
+		init_waitqueue_head(&iclog->ic_force_wait);
+		init_waitqueue_head(&iclog->ic_write_wait);
 
 		iclogp = &iclog->ic_next;
 	}
@@ -1167,15 +1137,11 @@
 out_free_iclog:
 	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
 		prev_iclog = iclog->ic_next;
-		if (iclog->ic_bp) {
-			sv_destroy(&iclog->ic_force_wait);
-			sv_destroy(&iclog->ic_write_wait);
+		if (iclog->ic_bp)
 			xfs_buf_free(iclog->ic_bp);
-		}
 		kmem_free(iclog);
 	}
 	spinlock_destroy(&log->l_icloglock);
-	spinlock_destroy(&log->l_grant_lock);
 	xfs_buf_free(log->l_xbuf);
 out_free_log:
 	kmem_free(log);
@@ -1223,61 +1189,60 @@
  * water mark.  In this manner, we would be creating a low water mark.
  */
 STATIC void
-xlog_grant_push_ail(xfs_mount_t	*mp,
-		    int		need_bytes)
+xlog_grant_push_ail(
+	struct log	*log,
+	int		need_bytes)
 {
-    xlog_t	*log = mp->m_log;	/* pointer to the log */
-    xfs_lsn_t	tail_lsn;		/* lsn of the log tail */
-    xfs_lsn_t	threshold_lsn = 0;	/* lsn we'd like to be at */
-    int		free_blocks;		/* free blocks left to write to */
-    int		free_bytes;		/* free bytes left to write to */
-    int		threshold_block;	/* block in lsn we'd like to be at */
-    int		threshold_cycle;	/* lsn cycle we'd like to be at */
-    int		free_threshold;
+	xfs_lsn_t	threshold_lsn = 0;
+	xfs_lsn_t	last_sync_lsn;
+	int		free_blocks;
+	int		free_bytes;
+	int		threshold_block;
+	int		threshold_cycle;
+	int		free_threshold;
 
-    ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
+	ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
 
-    spin_lock(&log->l_grant_lock);
-    free_bytes = xlog_space_left(log,
-				 log->l_grant_reserve_cycle,
-				 log->l_grant_reserve_bytes);
-    tail_lsn = log->l_tail_lsn;
-    free_blocks = BTOBBT(free_bytes);
+	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+	free_blocks = BTOBBT(free_bytes);
 
-    /*
-     * Set the threshold for the minimum number of free blocks in the
-     * log to the maximum of what the caller needs, one quarter of the
-     * log, and 256 blocks.
-     */
-    free_threshold = BTOBB(need_bytes);
-    free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
-    free_threshold = MAX(free_threshold, 256);
-    if (free_blocks < free_threshold) {
-	threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
-	threshold_cycle = CYCLE_LSN(tail_lsn);
-	if (threshold_block >= log->l_logBBsize) {
-	    threshold_block -= log->l_logBBsize;
-	    threshold_cycle += 1;
-	}
-	threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block);
-
-	/* Don't pass in an lsn greater than the lsn of the last
-	 * log record known to be on disk.
+	/*
+	 * Set the threshold for the minimum number of free blocks in the
+	 * log to the maximum of what the caller needs, one quarter of the
+	 * log, and 256 blocks.
 	 */
-	if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0)
-	    threshold_lsn = log->l_last_sync_lsn;
-    }
-    spin_unlock(&log->l_grant_lock);
+	free_threshold = BTOBB(need_bytes);
+	free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
+	free_threshold = MAX(free_threshold, 256);
+	if (free_blocks >= free_threshold)
+		return;
 
-    /*
-     * Get the transaction layer to kick the dirty buffers out to
-     * disk asynchronously. No point in trying to do this if
-     * the filesystem is shutting down.
-     */
-    if (threshold_lsn &&
-	!XLOG_FORCED_SHUTDOWN(log))
-	    xfs_trans_ail_push(log->l_ailp, threshold_lsn);
-}	/* xlog_grant_push_ail */
+	xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
+						&threshold_block);
+	threshold_block += free_threshold;
+	if (threshold_block >= log->l_logBBsize) {
+		threshold_block -= log->l_logBBsize;
+		threshold_cycle += 1;
+	}
+	threshold_lsn = xlog_assign_lsn(threshold_cycle,
+					threshold_block);
+	/*
+	 * Don't pass in an lsn greater than the lsn of the last
+	 * log record known to be on disk. Use a snapshot of the last sync lsn
+	 * so that it doesn't change between the compare and the set.
+	 */
+	last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
+	if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
+		threshold_lsn = last_sync_lsn;
+
+	/*
+	 * Get the transaction layer to kick the dirty buffers out to
+	 * disk asynchronously. No point in trying to do this if
+	 * the filesystem is shutting down.
+	 */
+	if (!XLOG_FORCED_SHUTDOWN(log))
+		xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+}
 
 /*
  * The bdstrat callback function for log bufs. This gives us a central
@@ -1372,9 +1337,8 @@
 		 roundoff < BBTOB(1)));
 
 	/* move grant heads by roundoff in sync */
-	spin_lock(&log->l_grant_lock);
-	xlog_grant_add_space(log, roundoff);
-	spin_unlock(&log->l_grant_lock);
+	xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff);
+	xlog_grant_add_space(log, &log->l_grant_write_head, roundoff);
 
 	/* put cycle number in every block */
 	xlog_pack_data(log, iclog, roundoff); 
@@ -1489,15 +1453,12 @@
 
 	iclog = log->l_iclog;
 	for (i=0; i<log->l_iclog_bufs; i++) {
-		sv_destroy(&iclog->ic_force_wait);
-		sv_destroy(&iclog->ic_write_wait);
 		xfs_buf_free(iclog->ic_bp);
 		next_iclog = iclog->ic_next;
 		kmem_free(iclog);
 		iclog = next_iclog;
 	}
 	spinlock_destroy(&log->l_icloglock);
-	spinlock_destroy(&log->l_grant_lock);
 
 	xfs_buf_free(log->l_xbuf);
 	log->l_mp->m_log = NULL;
@@ -2232,7 +2193,7 @@
 				lowest_lsn = xlog_get_lowest_lsn(log);
 				if (lowest_lsn &&
 				    XFS_LSN_CMP(lowest_lsn,
-				    		be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
+						be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
 					iclog = iclog->ic_next;
 					continue; /* Leave this iclog for
 						   * another thread */
@@ -2240,23 +2201,21 @@
 
 				iclog->ic_state = XLOG_STATE_CALLBACK;
 
-				spin_unlock(&log->l_icloglock);
 
-				/* l_last_sync_lsn field protected by
-				 * l_grant_lock. Don't worry about iclog's lsn.
-				 * No one else can be here except us.
+				/*
+				 * update the last_sync_lsn before we drop the
+				 * icloglock to ensure we are the only one that
+				 * can update it.
 				 */
-				spin_lock(&log->l_grant_lock);
-				ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn,
-				       be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
-				log->l_last_sync_lsn =
-					be64_to_cpu(iclog->ic_header.h_lsn);
-				spin_unlock(&log->l_grant_lock);
+				ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
+					be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
+				atomic64_set(&log->l_last_sync_lsn,
+					be64_to_cpu(iclog->ic_header.h_lsn));
 
-			} else {
-				spin_unlock(&log->l_icloglock);
+			} else
 				ioerrors++;
-			}
+
+			spin_unlock(&log->l_icloglock);
 
 			/*
 			 * Keep processing entries in the callback list until
@@ -2297,7 +2256,7 @@
 			xlog_state_clean_log(log);
 
 			/* wake up threads waiting in xfs_log_force() */
-			sv_broadcast(&iclog->ic_force_wait);
+			wake_up_all(&iclog->ic_force_wait);
 
 			iclog = iclog->ic_next;
 		} while (first_iclog != iclog);
@@ -2344,7 +2303,7 @@
 	spin_unlock(&log->l_icloglock);
 
 	if (wake)
-		sv_broadcast(&log->l_flush_wait);
+		wake_up_all(&log->l_flush_wait);
 }
 
 
@@ -2395,7 +2354,7 @@
 	 * iclog buffer, we wake them all, one will get to do the
 	 * I/O, the others get to wait for the result.
 	 */
-	sv_broadcast(&iclog->ic_write_wait);
+	wake_up_all(&iclog->ic_write_wait);
 	spin_unlock(&log->l_icloglock);
 	xlog_state_do_callback(log, aborted, iclog);	/* also cleans log */
 }	/* xlog_state_done_syncing */
@@ -2444,7 +2403,7 @@
 		XFS_STATS_INC(xs_log_noiclogs);
 
 		/* Wait for log writes to have flushed */
-		sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0);
+		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
 		goto restart;
 	}
 
@@ -2527,6 +2486,18 @@
  *
  * Once a ticket gets put onto the reserveq, it will only return after
  * the needed reservation is satisfied.
+ *
+ * This function is structured so that it has a lock free fast path. This is
+ * necessary because every new transaction reservation will come through this
+ * path. Hence any lock will be globally hot if we take it unconditionally on
+ * every pass.
+ *
+ * As tickets are only ever moved on and off the reserveq under the
+ * l_grant_reserve_lock, we only need to take that lock if we are going
+ * to add the ticket to the queue and sleep. We can avoid taking the lock if the
+ * ticket was never added to the reserveq because the t_queue list head will be
+ * empty and we hold the only reference to it so it can safely be checked
+ * unlocked.
  */
 STATIC int
 xlog_grant_log_space(xlog_t	   *log,
@@ -2534,24 +2505,27 @@
 {
 	int		 free_bytes;
 	int		 need_bytes;
-#ifdef DEBUG
-	xfs_lsn_t	 tail_lsn;
-#endif
-
 
 #ifdef DEBUG
 	if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 		panic("grant Recovery problem");
 #endif
 
-	/* Is there space or do we need to sleep? */
-	spin_lock(&log->l_grant_lock);
-
 	trace_xfs_log_grant_enter(log, tic);
 
+	need_bytes = tic->t_unit_res;
+	if (tic->t_flags & XFS_LOG_PERM_RESERV)
+		need_bytes *= tic->t_ocnt;
+
 	/* something is already sleeping; insert new transaction at end */
-	if (log->l_reserve_headq) {
-		xlog_ins_ticketq(&log->l_reserve_headq, tic);
+	if (!list_empty_careful(&log->l_reserveq)) {
+		spin_lock(&log->l_grant_reserve_lock);
+		/* recheck the queue now we are locked */
+		if (list_empty(&log->l_reserveq)) {
+			spin_unlock(&log->l_grant_reserve_lock);
+			goto redo;
+		}
+		list_add_tail(&tic->t_queue, &log->l_reserveq);
 
 		trace_xfs_log_grant_sleep1(log, tic);
 
@@ -2563,72 +2537,57 @@
 			goto error_return;
 
 		XFS_STATS_INC(xs_sleep_logspace);
-		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
+		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+
 		/*
 		 * If we got an error, and the filesystem is shutting down,
 		 * we'll catch it down below. So just continue...
 		 */
 		trace_xfs_log_grant_wake1(log, tic);
-		spin_lock(&log->l_grant_lock);
 	}
-	if (tic->t_flags & XFS_LOG_PERM_RESERV)
-		need_bytes = tic->t_unit_res*tic->t_ocnt;
-	else
-		need_bytes = tic->t_unit_res;
 
 redo:
 	if (XLOG_FORCED_SHUTDOWN(log))
-		goto error_return;
+		goto error_return_unlocked;
 
-	free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle,
-				     log->l_grant_reserve_bytes);
+	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
 	if (free_bytes < need_bytes) {
-		if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-			xlog_ins_ticketq(&log->l_reserve_headq, tic);
+		spin_lock(&log->l_grant_reserve_lock);
+		if (list_empty(&tic->t_queue))
+			list_add_tail(&tic->t_queue, &log->l_reserveq);
 
 		trace_xfs_log_grant_sleep2(log, tic);
 
-		spin_unlock(&log->l_grant_lock);
-		xlog_grant_push_ail(log->l_mp, need_bytes);
-		spin_lock(&log->l_grant_lock);
-
-		XFS_STATS_INC(xs_sleep_logspace);
-		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
-
-		spin_lock(&log->l_grant_lock);
 		if (XLOG_FORCED_SHUTDOWN(log))
 			goto error_return;
 
-		trace_xfs_log_grant_wake2(log, tic);
+		xlog_grant_push_ail(log, need_bytes);
 
+		XFS_STATS_INC(xs_sleep_logspace);
+		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+
+		trace_xfs_log_grant_wake2(log, tic);
 		goto redo;
-	} else if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_reserve_headq, tic);
+	}
+
+	if (!list_empty(&tic->t_queue)) {
+		spin_lock(&log->l_grant_reserve_lock);
+		list_del_init(&tic->t_queue);
+		spin_unlock(&log->l_grant_reserve_lock);
+	}
 
 	/* we've got enough space */
-	xlog_grant_add_space(log, need_bytes);
-#ifdef DEBUG
-	tail_lsn = log->l_tail_lsn;
-	/*
-	 * Check to make sure the grant write head didn't just over lap the
-	 * tail.  If the cycles are the same, we can't be overlapping.
-	 * Otherwise, make sure that the cycles differ by exactly one and
-	 * check the byte count.
-	 */
-	if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
-		ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
-		ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
-	}
-#endif
+	xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
+	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
 	trace_xfs_log_grant_exit(log, tic);
-	xlog_verify_grant_head(log, 1);
-	spin_unlock(&log->l_grant_lock);
+	xlog_verify_grant_tail(log);
 	return 0;
 
- error_return:
-	if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_reserve_headq, tic);
-
+error_return_unlocked:
+	spin_lock(&log->l_grant_reserve_lock);
+error_return:
+	list_del_init(&tic->t_queue);
+	spin_unlock(&log->l_grant_reserve_lock);
 	trace_xfs_log_grant_error(log, tic);
 
 	/*
@@ -2638,7 +2597,6 @@
 	 */
 	tic->t_curr_res = 0;
 	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-	spin_unlock(&log->l_grant_lock);
 	return XFS_ERROR(EIO);
 }	/* xlog_grant_log_space */
 
@@ -2646,17 +2604,14 @@
 /*
  * Replenish the byte reservation required by moving the grant write head.
  *
- *
+ * Similar to xlog_grant_log_space, the function is structured to have a lock
+ * free fast path.
  */
 STATIC int
 xlog_regrant_write_log_space(xlog_t	   *log,
 			     xlog_ticket_t *tic)
 {
 	int		free_bytes, need_bytes;
-	xlog_ticket_t	*ntic;
-#ifdef DEBUG
-	xfs_lsn_t	tail_lsn;
-#endif
 
 	tic->t_curr_res = tic->t_unit_res;
 	xlog_tic_reset_res(tic);
@@ -2669,12 +2624,9 @@
 		panic("regrant Recovery problem");
 #endif
 
-	spin_lock(&log->l_grant_lock);
-
 	trace_xfs_log_regrant_write_enter(log, tic);
-
 	if (XLOG_FORCED_SHUTDOWN(log))
-		goto error_return;
+		goto error_return_unlocked;
 
 	/* If there are other waiters on the queue then give them a
 	 * chance at logspace before us. Wake up the first waiters,
@@ -2683,92 +2635,76 @@
 	 * this transaction.
 	 */
 	need_bytes = tic->t_unit_res;
-	if ((ntic = log->l_write_headq)) {
-		free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
-					     log->l_grant_write_bytes);
-		do {
+	if (!list_empty_careful(&log->l_writeq)) {
+		struct xlog_ticket *ntic;
+
+		spin_lock(&log->l_grant_write_lock);
+		free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+		list_for_each_entry(ntic, &log->l_writeq, t_queue) {
 			ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
 
 			if (free_bytes < ntic->t_unit_res)
 				break;
 			free_bytes -= ntic->t_unit_res;
-			sv_signal(&ntic->t_wait);
-			ntic = ntic->t_next;
-		} while (ntic != log->l_write_headq);
+			wake_up(&ntic->t_wait);
+		}
 
-		if (ntic != log->l_write_headq) {
-			if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-				xlog_ins_ticketq(&log->l_write_headq, tic);
-
+		if (ntic != list_first_entry(&log->l_writeq,
+						struct xlog_ticket, t_queue)) {
+			if (list_empty(&tic->t_queue))
+				list_add_tail(&tic->t_queue, &log->l_writeq);
 			trace_xfs_log_regrant_write_sleep1(log, tic);
 
-			spin_unlock(&log->l_grant_lock);
-			xlog_grant_push_ail(log->l_mp, need_bytes);
-			spin_lock(&log->l_grant_lock);
+			xlog_grant_push_ail(log, need_bytes);
 
 			XFS_STATS_INC(xs_sleep_logspace);
-			sv_wait(&tic->t_wait, PINOD|PLTWAIT,
-				&log->l_grant_lock, s);
-
-			/* If we're shutting down, this tic is already
-			 * off the queue */
-			spin_lock(&log->l_grant_lock);
-			if (XLOG_FORCED_SHUTDOWN(log))
-				goto error_return;
-
+			xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
 			trace_xfs_log_regrant_write_wake1(log, tic);
-		}
+		} else
+			spin_unlock(&log->l_grant_write_lock);
 	}
 
 redo:
 	if (XLOG_FORCED_SHUTDOWN(log))
-		goto error_return;
+		goto error_return_unlocked;
 
-	free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
-				     log->l_grant_write_bytes);
+	free_bytes = xlog_space_left(log, &log->l_grant_write_head);
 	if (free_bytes < need_bytes) {
-		if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-			xlog_ins_ticketq(&log->l_write_headq, tic);
-		spin_unlock(&log->l_grant_lock);
-		xlog_grant_push_ail(log->l_mp, need_bytes);
-		spin_lock(&log->l_grant_lock);
+		spin_lock(&log->l_grant_write_lock);
+		if (list_empty(&tic->t_queue))
+			list_add_tail(&tic->t_queue, &log->l_writeq);
 
-		XFS_STATS_INC(xs_sleep_logspace);
-		trace_xfs_log_regrant_write_sleep2(log, tic);
-
-		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
-
-		/* If we're shutting down, this tic is already off the queue */
-		spin_lock(&log->l_grant_lock);
 		if (XLOG_FORCED_SHUTDOWN(log))
 			goto error_return;
 
+		xlog_grant_push_ail(log, need_bytes);
+
+		XFS_STATS_INC(xs_sleep_logspace);
+		trace_xfs_log_regrant_write_sleep2(log, tic);
+		xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+
 		trace_xfs_log_regrant_write_wake2(log, tic);
 		goto redo;
-	} else if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_write_headq, tic);
+	}
+
+	if (!list_empty(&tic->t_queue)) {
+		spin_lock(&log->l_grant_write_lock);
+		list_del_init(&tic->t_queue);
+		spin_unlock(&log->l_grant_write_lock);
+	}
 
 	/* we've got enough space */
-	xlog_grant_add_space_write(log, need_bytes);
-#ifdef DEBUG
-	tail_lsn = log->l_tail_lsn;
-	if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
-		ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
-		ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
-	}
-#endif
-
+	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
 	trace_xfs_log_regrant_write_exit(log, tic);
-
-	xlog_verify_grant_head(log, 1);
-	spin_unlock(&log->l_grant_lock);
+	xlog_verify_grant_tail(log);
 	return 0;
 
 
+ error_return_unlocked:
+	spin_lock(&log->l_grant_write_lock);
  error_return:
-	if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_reserve_headq, tic);
-
+	list_del_init(&tic->t_queue);
+	spin_unlock(&log->l_grant_write_lock);
 	trace_xfs_log_regrant_write_error(log, tic);
 
 	/*
@@ -2778,7 +2714,6 @@
 	 */
 	tic->t_curr_res = 0;
 	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-	spin_unlock(&log->l_grant_lock);
 	return XFS_ERROR(EIO);
 }	/* xlog_regrant_write_log_space */
 
@@ -2799,27 +2734,24 @@
 	if (ticket->t_cnt > 0)
 		ticket->t_cnt--;
 
-	spin_lock(&log->l_grant_lock);
-	xlog_grant_sub_space(log, ticket->t_curr_res);
+	xlog_grant_sub_space(log, &log->l_grant_reserve_head,
+					ticket->t_curr_res);
+	xlog_grant_sub_space(log, &log->l_grant_write_head,
+					ticket->t_curr_res);
 	ticket->t_curr_res = ticket->t_unit_res;
 	xlog_tic_reset_res(ticket);
 
 	trace_xfs_log_regrant_reserve_sub(log, ticket);
 
-	xlog_verify_grant_head(log, 1);
-
 	/* just return if we still have some of the pre-reserved space */
-	if (ticket->t_cnt > 0) {
-		spin_unlock(&log->l_grant_lock);
+	if (ticket->t_cnt > 0)
 		return;
-	}
 
-	xlog_grant_add_space_reserve(log, ticket->t_unit_res);
+	xlog_grant_add_space(log, &log->l_grant_reserve_head,
+					ticket->t_unit_res);
 
 	trace_xfs_log_regrant_reserve_exit(log, ticket);
 
-	xlog_verify_grant_head(log, 0);
-	spin_unlock(&log->l_grant_lock);
 	ticket->t_curr_res = ticket->t_unit_res;
 	xlog_tic_reset_res(ticket);
 }	/* xlog_regrant_reserve_log_space */
@@ -2843,28 +2775,29 @@
 xlog_ungrant_log_space(xlog_t	     *log,
 		       xlog_ticket_t *ticket)
 {
+	int	bytes;
+
 	if (ticket->t_cnt > 0)
 		ticket->t_cnt--;
 
-	spin_lock(&log->l_grant_lock);
 	trace_xfs_log_ungrant_enter(log, ticket);
-
-	xlog_grant_sub_space(log, ticket->t_curr_res);
-
 	trace_xfs_log_ungrant_sub(log, ticket);
 
-	/* If this is a permanent reservation ticket, we may be able to free
+	/*
+	 * If this is a permanent reservation ticket, we may be able to free
 	 * up more space based on the remaining count.
 	 */
+	bytes = ticket->t_curr_res;
 	if (ticket->t_cnt > 0) {
 		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
-		xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
+		bytes += ticket->t_unit_res*ticket->t_cnt;
 	}
 
+	xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes);
+	xlog_grant_sub_space(log, &log->l_grant_write_head, bytes);
+
 	trace_xfs_log_ungrant_exit(log, ticket);
 
-	xlog_verify_grant_head(log, 1);
-	spin_unlock(&log->l_grant_lock);
 	xfs_log_move_tail(log->l_mp, 1);
 }	/* xlog_ungrant_log_space */
 
@@ -2901,11 +2834,11 @@
 
 	if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
 		/* update tail before writing to iclog */
-		xlog_assign_tail_lsn(log->l_mp);
+		xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
 		sync++;
 		iclog->ic_state = XLOG_STATE_SYNCING;
-		iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
-		xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
+		iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+		xlog_verify_tail_lsn(log, iclog, tail_lsn);
 		/* cycle incremented when incrementing curr_block */
 	}
 	spin_unlock(&log->l_icloglock);
@@ -3088,7 +3021,7 @@
 			return XFS_ERROR(EIO);
 		}
 		XFS_STATS_INC(xs_log_force_sleep);
-		sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s);
+		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
 		/*
 		 * No need to grab the log lock here since we're
 		 * only deciding whether or not to return EIO
@@ -3206,8 +3139,8 @@
 
 				XFS_STATS_INC(xs_log_force_sleep);
 
-				sv_wait(&iclog->ic_prev->ic_write_wait,
-					PSWP, &log->l_icloglock, s);
+				xlog_wait(&iclog->ic_prev->ic_write_wait,
+							&log->l_icloglock);
 				if (log_flushed)
 					*log_flushed = 1;
 				already_slept = 1;
@@ -3235,7 +3168,7 @@
 				return XFS_ERROR(EIO);
 			}
 			XFS_STATS_INC(xs_log_force_sleep);
-			sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
+			xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
 			/*
 			 * No need to grab the log lock here since we're
 			 * only deciding whether or not to return EIO
@@ -3310,10 +3243,8 @@
 	xlog_ticket_t	*ticket)
 {
 	ASSERT(atomic_read(&ticket->t_ref) > 0);
-	if (atomic_dec_and_test(&ticket->t_ref)) {
-		sv_destroy(&ticket->t_wait);
+	if (atomic_dec_and_test(&ticket->t_ref))
 		kmem_zone_free(xfs_log_ticket_zone, ticket);
-	}
 }
 
 xlog_ticket_t *
@@ -3435,6 +3366,7 @@
         }
 
 	atomic_set(&tic->t_ref, 1);
+	INIT_LIST_HEAD(&tic->t_queue);
 	tic->t_unit_res		= unit_bytes;
 	tic->t_curr_res		= unit_bytes;
 	tic->t_cnt		= cnt;
@@ -3445,7 +3377,7 @@
 	tic->t_trans_type	= 0;
 	if (xflags & XFS_LOG_PERM_RESERV)
 		tic->t_flags |= XLOG_TIC_PERM_RESERV;
-	sv_init(&tic->t_wait, SV_DEFAULT, "logtick");
+	init_waitqueue_head(&tic->t_wait);
 
 	xlog_tic_reset_res(tic);
 
@@ -3484,18 +3416,25 @@
 }
 
 STATIC void
-xlog_verify_grant_head(xlog_t *log, int equals)
+xlog_verify_grant_tail(
+	struct log	*log)
 {
-    if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) {
-	if (equals)
-	    ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes);
-	else
-	    ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes);
-    } else {
-	ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle);
-	ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes);
-    }
-}	/* xlog_verify_grant_head */
+	int		tail_cycle, tail_blocks;
+	int		cycle, space;
+
+	/*
+	 * Check to make sure the grant write head didn't just over lap the
+	 * tail.  If the cycles are the same, we can't be overlapping.
+	 * Otherwise, make sure that the cycles differ by exactly one and
+	 * check the byte count.
+	 */
+	xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
+	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
+	if (tail_cycle != cycle) {
+		ASSERT(cycle - 1 == tail_cycle);
+		ASSERT(space <= BBTOB(tail_blocks));
+	}
+}
 
 /* check if it will fit */
 STATIC void
@@ -3716,12 +3655,10 @@
 		xlog_cil_force(log);
 
 	/*
-	 * We must hold both the GRANT lock and the LOG lock,
-	 * before we mark the filesystem SHUTDOWN and wake
-	 * everybody up to tell the bad news.
+	 * mark the filesystem and the as in a shutdown state and wake
+	 * everybody up to tell them the bad news.
 	 */
 	spin_lock(&log->l_icloglock);
-	spin_lock(&log->l_grant_lock);
 	mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
 	if (mp->m_sb_bp)
 		XFS_BUF_DONE(mp->m_sb_bp);
@@ -3742,27 +3679,21 @@
 	spin_unlock(&log->l_icloglock);
 
 	/*
-	 * We don't want anybody waiting for log reservations
-	 * after this. That means we have to wake up everybody
-	 * queued up on reserve_headq as well as write_headq.
-	 * In addition, we make sure in xlog_{re}grant_log_space
-	 * that we don't enqueue anything once the SHUTDOWN flag
-	 * is set, and this action is protected by the GRANTLOCK.
+	 * We don't want anybody waiting for log reservations after this. That
+	 * means we have to wake up everybody queued up on reserveq as well as
+	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
+	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
+	 * action is protected by the grant locks.
 	 */
-	if ((tic = log->l_reserve_headq)) {
-		do {
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_reserve_headq);
-	}
+	spin_lock(&log->l_grant_reserve_lock);
+	list_for_each_entry(tic, &log->l_reserveq, t_queue)
+		wake_up(&tic->t_wait);
+	spin_unlock(&log->l_grant_reserve_lock);
 
-	if ((tic = log->l_write_headq)) {
-		do {
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_write_headq);
-	}
-	spin_unlock(&log->l_grant_lock);
+	spin_lock(&log->l_grant_write_lock);
+	list_for_each_entry(tic, &log->l_writeq, t_queue)
+		wake_up(&tic->t_wait);
+	spin_unlock(&log->l_grant_write_lock);
 
 	if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
 		ASSERT(!logerror);
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 916eb7d..3bd3291 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -191,7 +191,7 @@
 
 xlog_tid_t xfs_log_get_trans_ident(struct xfs_trans *tp);
 
-int	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
+void	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
 				struct xfs_log_vec *log_vector,
 				xfs_lsn_t *commit_lsn, int flags);
 bool	xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 23d6ceb..9ca59be 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -61,7 +61,7 @@
 	INIT_LIST_HEAD(&cil->xc_committing);
 	spin_lock_init(&cil->xc_cil_lock);
 	init_rwsem(&cil->xc_ctx_lock);
-	sv_init(&cil->xc_commit_wait, SV_DEFAULT, "cilwait");
+	init_waitqueue_head(&cil->xc_commit_wait);
 
 	INIT_LIST_HEAD(&ctx->committing);
 	INIT_LIST_HEAD(&ctx->busy_extents);
@@ -361,15 +361,10 @@
 	int	abort)
 {
 	struct xfs_cil_ctx	*ctx = args;
-	struct xfs_log_vec	*lv;
-	int			abortflag = abort ? XFS_LI_ABORTED : 0;
 	struct xfs_busy_extent	*busyp, *n;
 
-	/* unpin all the log items */
-	for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) {
-		xfs_trans_item_committed(lv->lv_item, ctx->start_lsn,
-							abortflag);
-	}
+	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
+					ctx->start_lsn, abort);
 
 	list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list)
 		xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp);
@@ -548,7 +543,7 @@
 
 	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
 	if (error)
-		goto out_abort;
+		goto out_abort_free_ticket;
 
 	/*
 	 * now that we've written the checkpoint into the log, strictly
@@ -568,14 +563,15 @@
 			 * It is still being pushed! Wait for the push to
 			 * complete, then start again from the beginning.
 			 */
-			sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
+			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
 			goto restart;
 		}
 	}
 	spin_unlock(&cil->xc_cil_lock);
 
+	/* xfs_log_done always frees the ticket on error. */
 	commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
-	if (error || commit_lsn == -1)
+	if (commit_lsn == -1)
 		goto out_abort;
 
 	/* attach all the transactions w/ busy extents to iclog */
@@ -592,7 +588,7 @@
 	 */
 	spin_lock(&cil->xc_cil_lock);
 	ctx->commit_lsn = commit_lsn;
-	sv_broadcast(&cil->xc_commit_wait);
+	wake_up_all(&cil->xc_commit_wait);
 	spin_unlock(&cil->xc_cil_lock);
 
 	/* release the hounds! */
@@ -605,6 +601,8 @@
 	kmem_free(new_ctx);
 	return 0;
 
+out_abort_free_ticket:
+	xfs_log_ticket_put(tic);
 out_abort:
 	xlog_cil_committed(ctx, XFS_LI_ABORTED);
 	return XFS_ERROR(EIO);
@@ -627,7 +625,7 @@
  * background commit, returns without it held once background commits are
  * allowed again.
  */
-int
+void
 xfs_log_commit_cil(
 	struct xfs_mount	*mp,
 	struct xfs_trans	*tp,
@@ -642,11 +640,6 @@
 	if (flags & XFS_TRANS_RELEASE_LOG_RES)
 		log_flags = XFS_LOG_REL_PERM_RESERV;
 
-	if (XLOG_FORCED_SHUTDOWN(log)) {
-		xlog_cil_free_logvec(log_vector);
-		return XFS_ERROR(EIO);
-	}
-
 	/*
 	 * do all the hard work of formatting items (including memory
 	 * allocation) outside the CIL context lock. This prevents stalling CIL
@@ -706,7 +699,6 @@
 	 */
 	if (push)
 		xlog_cil_push(log, 0);
-	return 0;
 }
 
 /*
@@ -757,7 +749,7 @@
 			 * It is still being pushed! Wait for the push to
 			 * complete, then start again from the beginning.
 			 */
-			sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
+			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
 			goto restart;
 		}
 		if (ctx->sequence != sequence)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index edcdfe0..d5f8be8 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -21,7 +21,6 @@
 struct xfs_buf;
 struct log;
 struct xlog_ticket;
-struct xfs_buf_cancel;
 struct xfs_mount;
 
 /*
@@ -54,7 +53,6 @@
 	BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
 	 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
 
-
 static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
 {
 	return ((xfs_lsn_t)cycle << 32) | block;
@@ -133,12 +131,10 @@
  */
 #define XLOG_TIC_INITED		0x1	/* has been initialized */
 #define XLOG_TIC_PERM_RESERV	0x2	/* permanent reservation */
-#define XLOG_TIC_IN_Q		0x4
 
 #define XLOG_TIC_FLAGS \
 	{ XLOG_TIC_INITED,	"XLOG_TIC_INITED" }, \
-	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }, \
-	{ XLOG_TIC_IN_Q,	"XLOG_TIC_IN_Q" }
+	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
 
 #endif	/* __KERNEL__ */
 
@@ -244,9 +240,8 @@
 } xlog_res_t;
 
 typedef struct xlog_ticket {
-	sv_t		   t_wait;	 /* ticket wait queue            : 20 */
-	struct xlog_ticket *t_next;	 /*			         :4|8 */
-	struct xlog_ticket *t_prev;	 /*				 :4|8 */
+	wait_queue_head_t  t_wait;	 /* ticket wait queue */
+	struct list_head   t_queue;	 /* reserve/write queue */
 	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
 	atomic_t	   t_ref;	 /* ticket reference count       : 4  */
 	int		   t_curr_res;	 /* current reservation in bytes : 4  */
@@ -353,8 +348,8 @@
  * and move everything else out to subsequent cachelines.
  */
 typedef struct xlog_in_core {
-	sv_t			ic_force_wait;
-	sv_t			ic_write_wait;
+	wait_queue_head_t	ic_force_wait;
+	wait_queue_head_t	ic_write_wait;
 	struct xlog_in_core	*ic_next;
 	struct xlog_in_core	*ic_prev;
 	struct xfs_buf		*ic_bp;
@@ -421,7 +416,7 @@
 	struct xfs_cil_ctx	*xc_ctx;
 	struct rw_semaphore	xc_ctx_lock;
 	struct list_head	xc_committing;
-	sv_t			xc_commit_wait;
+	wait_queue_head_t	xc_commit_wait;
 	xfs_lsn_t		xc_current_sequence;
 };
 
@@ -491,7 +486,7 @@
 	struct xfs_buftarg	*l_targ;        /* buftarg of log */
 	uint			l_flags;
 	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
-	struct xfs_buf_cancel	**l_buf_cancel_table;
+	struct list_head	*l_buf_cancel_table;
 	int			l_iclog_hsize;  /* size of iclog header */
 	int			l_iclog_heads;  /* # of iclog header sectors */
 	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
@@ -503,29 +498,40 @@
 	int			l_logBBsize;    /* size of log in BB chunks */
 
 	/* The following block of fields are changed while holding icloglock */
-	sv_t			l_flush_wait ____cacheline_aligned_in_smp;
+	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
 						/* waiting for iclog flush */
 	int			l_covered_state;/* state of "covering disk
 						 * log entries" */
 	xlog_in_core_t		*l_iclog;       /* head log queue	*/
 	spinlock_t		l_icloglock;    /* grab to change iclog state */
-	xfs_lsn_t		l_tail_lsn;     /* lsn of 1st LR with unflushed
-						 * buffers */
-	xfs_lsn_t		l_last_sync_lsn;/* lsn of last LR on disk */
 	int			l_curr_cycle;   /* Cycle number of log writes */
 	int			l_prev_cycle;   /* Cycle number before last
 						 * block increment */
 	int			l_curr_block;   /* current logical log block */
 	int			l_prev_block;   /* previous logical log block */
 
-	/* The following block of fields are changed while holding grant_lock */
-	spinlock_t		l_grant_lock ____cacheline_aligned_in_smp;
-	xlog_ticket_t		*l_reserve_headq;
-	xlog_ticket_t		*l_write_headq;
-	int			l_grant_reserve_cycle;
-	int			l_grant_reserve_bytes;
-	int			l_grant_write_cycle;
-	int			l_grant_write_bytes;
+	/*
+	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
+	 * read without needing to hold specific locks. To avoid operations
+	 * contending with other hot objects, place each of them on a separate
+	 * cacheline.
+	 */
+	/* lsn of last LR on disk */
+	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
+	/* lsn of 1st LR with unflushed * buffers */
+	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
+
+	/*
+	 * ticket grant locks, queues and accounting have their own cachlines
+	 * as these are quite hot and can be operated on concurrently.
+	 */
+	spinlock_t		l_grant_reserve_lock ____cacheline_aligned_in_smp;
+	struct list_head	l_reserveq;
+	atomic64_t		l_grant_reserve_head;
+
+	spinlock_t		l_grant_write_lock ____cacheline_aligned_in_smp;
+	struct list_head	l_writeq;
+	atomic64_t		l_grant_write_head;
 
 	/* The following field are used for debugging; need to hold icloglock */
 #ifdef DEBUG
@@ -534,6 +540,9 @@
 
 } xlog_t;
 
+#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
+	((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
+
 #define XLOG_FORCED_SHUTDOWN(log)	((log)->l_flags & XLOG_IO_ERROR)
 
 /* common routines */
@@ -562,6 +571,61 @@
 				xlog_in_core_t **commit_iclog, uint flags);
 
 /*
+ * When we crack an atomic LSN, we sample it first so that the value will not
+ * change while we are cracking it into the component values. This means we
+ * will always get consistent component values to work from. This should always
+ * be used to smaple and crack LSNs taht are stored and updated in atomic
+ * variables.
+ */
+static inline void
+xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
+{
+	xfs_lsn_t val = atomic64_read(lsn);
+
+	*cycle = CYCLE_LSN(val);
+	*block = BLOCK_LSN(val);
+}
+
+/*
+ * Calculate and assign a value to an atomic LSN variable from component pieces.
+ */
+static inline void
+xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
+{
+	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
+}
+
+/*
+ * When we crack the grant head, we sample it first so that the value will not
+ * change while we are cracking it into the component values. This means we
+ * will always get consistent component values to work from.
+ */
+static inline void
+xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
+{
+	*cycle = val >> 32;
+	*space = val & 0xffffffff;
+}
+
+static inline void
+xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
+{
+	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
+}
+
+static inline int64_t
+xlog_assign_grant_head_val(int cycle, int space)
+{
+	return ((int64_t)cycle << 32) | space;
+}
+
+static inline void
+xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
+{
+	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
+}
+
+/*
  * Committed Item List interfaces
  */
 int	xlog_cil_init(struct log *log);
@@ -585,6 +649,21 @@
  */
 #define XLOG_UNMOUNT_REC_TYPE	(-1U)
 
+/*
+ * Wrapper function for waiting on a wait queue serialised against wakeups
+ * by a spinlock. This matches the semantics of all the wait queues used in the
+ * log code.
+ */
+static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	add_wait_queue_exclusive(wq, &wait);
+	__set_current_state(TASK_UNINTERRUPTIBLE);
+	spin_unlock(lock);
+	schedule();
+	remove_wait_queue(wq, &wait);
+}
 #endif	/* __KERNEL__ */
 
 #endif	/* __XFS_LOG_PRIV_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 966d3f9..aa0ebb7 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -53,6 +53,17 @@
 #endif
 
 /*
+ * This structure is used during recovery to record the buf log items which
+ * have been canceled and should not be replayed.
+ */
+struct xfs_buf_cancel {
+	xfs_daddr_t		bc_blkno;
+	uint			bc_len;
+	int			bc_refcount;
+	struct list_head	bc_list;
+};
+
+/*
  * Sector aligned buffer routines for buffer create/read/write/access
  */
 
@@ -925,12 +936,12 @@
 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
 	if (found == 2)
 		log->l_curr_cycle++;
-	log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
-	log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
-	log->l_grant_reserve_cycle = log->l_curr_cycle;
-	log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
-	log->l_grant_write_cycle = log->l_curr_cycle;
-	log->l_grant_write_bytes = BBTOB(log->l_curr_block);
+	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
+	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
+	xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
+					BBTOB(log->l_curr_block));
+	xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle,
+					BBTOB(log->l_curr_block));
 
 	/*
 	 * Look for unmount record.  If we find it, then we know there
@@ -960,7 +971,7 @@
 	}
 	after_umount_blk = (i + hblks + (int)
 		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
-	tail_lsn = log->l_tail_lsn;
+	tail_lsn = atomic64_read(&log->l_tail_lsn);
 	if (*head_blk == after_umount_blk &&
 	    be32_to_cpu(rhead->h_num_logops) == 1) {
 		umount_data_blk = (i + hblks) % log->l_logBBsize;
@@ -975,12 +986,10 @@
 			 * log records will point recovery to after the
 			 * current unmount record.
 			 */
-			log->l_tail_lsn =
-				xlog_assign_lsn(log->l_curr_cycle,
-						after_umount_blk);
-			log->l_last_sync_lsn =
-				xlog_assign_lsn(log->l_curr_cycle,
-						after_umount_blk);
+			xlog_assign_atomic_lsn(&log->l_tail_lsn,
+					log->l_curr_cycle, after_umount_blk);
+			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
+					log->l_curr_cycle, after_umount_blk);
 			*tail_blk = after_umount_blk;
 
 			/*
@@ -1605,82 +1614,45 @@
  * record in the table to tell us how many times we expect to see this
  * record during the second pass.
  */
-STATIC void
-xlog_recover_do_buffer_pass1(
-	xlog_t			*log,
-	xfs_buf_log_format_t	*buf_f)
+STATIC int
+xlog_recover_buffer_pass1(
+	struct log		*log,
+	xlog_recover_item_t	*item)
 {
-	xfs_buf_cancel_t	*bcp;
-	xfs_buf_cancel_t	*nextp;
-	xfs_buf_cancel_t	*prevp;
-	xfs_buf_cancel_t	**bucket;
-	xfs_daddr_t		blkno = 0;
-	uint			len = 0;
-	ushort			flags = 0;
-
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		blkno = buf_f->blf_blkno;
-		len = buf_f->blf_len;
-		flags = buf_f->blf_flags;
-		break;
-	}
+	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
+	struct list_head	*bucket;
+	struct xfs_buf_cancel	*bcp;
 
 	/*
 	 * If this isn't a cancel buffer item, then just return.
 	 */
-	if (!(flags & XFS_BLF_CANCEL)) {
+	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
-		return;
+		return 0;
 	}
 
 	/*
-	 * Insert an xfs_buf_cancel record into the hash table of
-	 * them.  If there is already an identical record, bump
-	 * its reference count.
+	 * Insert an xfs_buf_cancel record into the hash table of them.
+	 * If there is already an identical record, bump its reference count.
 	 */
-	bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
-					  XLOG_BC_TABLE_SIZE];
-	/*
-	 * If the hash bucket is empty then just insert a new record into
-	 * the bucket.
-	 */
-	if (*bucket == NULL) {
-		bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
-						     KM_SLEEP);
-		bcp->bc_blkno = blkno;
-		bcp->bc_len = len;
-		bcp->bc_refcount = 1;
-		bcp->bc_next = NULL;
-		*bucket = bcp;
-		return;
-	}
-
-	/*
-	 * The hash bucket is not empty, so search for duplicates of our
-	 * record.  If we find one them just bump its refcount.  If not
-	 * then add us at the end of the list.
-	 */
-	prevp = NULL;
-	nextp = *bucket;
-	while (nextp != NULL) {
-		if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
-			nextp->bc_refcount++;
+	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
+	list_for_each_entry(bcp, bucket, bc_list) {
+		if (bcp->bc_blkno == buf_f->blf_blkno &&
+		    bcp->bc_len == buf_f->blf_len) {
+			bcp->bc_refcount++;
 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
-			return;
+			return 0;
 		}
-		prevp = nextp;
-		nextp = nextp->bc_next;
 	}
-	ASSERT(prevp != NULL);
-	bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
-					     KM_SLEEP);
-	bcp->bc_blkno = blkno;
-	bcp->bc_len = len;
+
+	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
+	bcp->bc_blkno = buf_f->blf_blkno;
+	bcp->bc_len = buf_f->blf_len;
 	bcp->bc_refcount = 1;
-	bcp->bc_next = NULL;
-	prevp->bc_next = bcp;
+	list_add_tail(&bcp->bc_list, bucket);
+
 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
+	return 0;
 }
 
 /*
@@ -1698,14 +1670,13 @@
  */
 STATIC int
 xlog_check_buffer_cancelled(
-	xlog_t			*log,
+	struct log		*log,
 	xfs_daddr_t		blkno,
 	uint			len,
 	ushort			flags)
 {
-	xfs_buf_cancel_t	*bcp;
-	xfs_buf_cancel_t	*prevp;
-	xfs_buf_cancel_t	**bucket;
+	struct list_head	*bucket;
+	struct xfs_buf_cancel	*bcp;
 
 	if (log->l_buf_cancel_table == NULL) {
 		/*
@@ -1716,128 +1687,70 @@
 		return 0;
 	}
 
-	bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
-					  XLOG_BC_TABLE_SIZE];
-	bcp = *bucket;
-	if (bcp == NULL) {
-		/*
-		 * There is no corresponding entry in the table built
-		 * in pass one, so this buffer has not been cancelled.
-		 */
-		ASSERT(!(flags & XFS_BLF_CANCEL));
-		return 0;
+	/*
+	 * Search for an entry in the  cancel table that matches our buffer.
+	 */
+	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
+	list_for_each_entry(bcp, bucket, bc_list) {
+		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
+			goto found;
 	}
 
 	/*
-	 * Search for an entry in the buffer cancel table that
-	 * matches our buffer.
-	 */
-	prevp = NULL;
-	while (bcp != NULL) {
-		if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
-			/*
-			 * We've go a match, so return 1 so that the
-			 * recovery of this buffer is cancelled.
-			 * If this buffer is actually a buffer cancel
-			 * log item, then decrement the refcount on the
-			 * one in the table and remove it if this is the
-			 * last reference.
-			 */
-			if (flags & XFS_BLF_CANCEL) {
-				bcp->bc_refcount--;
-				if (bcp->bc_refcount == 0) {
-					if (prevp == NULL) {
-						*bucket = bcp->bc_next;
-					} else {
-						prevp->bc_next = bcp->bc_next;
-					}
-					kmem_free(bcp);
-				}
-			}
-			return 1;
-		}
-		prevp = bcp;
-		bcp = bcp->bc_next;
-	}
-	/*
-	 * We didn't find a corresponding entry in the table, so
-	 * return 0 so that the buffer is NOT cancelled.
+	 * We didn't find a corresponding entry in the table, so return 0 so
+	 * that the buffer is NOT cancelled.
 	 */
 	ASSERT(!(flags & XFS_BLF_CANCEL));
 	return 0;
-}
 
-STATIC int
-xlog_recover_do_buffer_pass2(
-	xlog_t			*log,
-	xfs_buf_log_format_t	*buf_f)
-{
-	xfs_daddr_t		blkno = 0;
-	ushort			flags = 0;
-	uint			len = 0;
-
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		blkno = buf_f->blf_blkno;
-		flags = buf_f->blf_flags;
-		len = buf_f->blf_len;
-		break;
+found:
+	/*
+	 * We've go a match, so return 1 so that the recovery of this buffer
+	 * is cancelled.  If this buffer is actually a buffer cancel log
+	 * item, then decrement the refcount on the one in the table and
+	 * remove it if this is the last reference.
+	 */
+	if (flags & XFS_BLF_CANCEL) {
+		if (--bcp->bc_refcount == 0) {
+			list_del(&bcp->bc_list);
+			kmem_free(bcp);
+		}
 	}
-
-	return xlog_check_buffer_cancelled(log, blkno, len, flags);
+	return 1;
 }
 
 /*
- * Perform recovery for a buffer full of inodes.  In these buffers,
- * the only data which should be recovered is that which corresponds
- * to the di_next_unlinked pointers in the on disk inode structures.
- * The rest of the data for the inodes is always logged through the
- * inodes themselves rather than the inode buffer and is recovered
- * in xlog_recover_do_inode_trans().
+ * Perform recovery for a buffer full of inodes.  In these buffers, the only
+ * data which should be recovered is that which corresponds to the
+ * di_next_unlinked pointers in the on disk inode structures.  The rest of the
+ * data for the inodes is always logged through the inodes themselves rather
+ * than the inode buffer and is recovered in xlog_recover_inode_pass2().
  *
- * The only time when buffers full of inodes are fully recovered is
- * when the buffer is full of newly allocated inodes.  In this case
- * the buffer will not be marked as an inode buffer and so will be
- * sent to xlog_recover_do_reg_buffer() below during recovery.
+ * The only time when buffers full of inodes are fully recovered is when the
+ * buffer is full of newly allocated inodes.  In this case the buffer will
+ * not be marked as an inode buffer and so will be sent to
+ * xlog_recover_do_reg_buffer() below during recovery.
  */
 STATIC int
 xlog_recover_do_inode_buffer(
-	xfs_mount_t		*mp,
+	struct xfs_mount	*mp,
 	xlog_recover_item_t	*item,
-	xfs_buf_t		*bp,
+	struct xfs_buf		*bp,
 	xfs_buf_log_format_t	*buf_f)
 {
 	int			i;
-	int			item_index;
-	int			bit;
-	int			nbits;
-	int			reg_buf_offset;
-	int			reg_buf_bytes;
+	int			item_index = 0;
+	int			bit = 0;
+	int			nbits = 0;
+	int			reg_buf_offset = 0;
+	int			reg_buf_bytes = 0;
 	int			next_unlinked_offset;
 	int			inodes_per_buf;
 	xfs_agino_t		*logged_nextp;
 	xfs_agino_t		*buffer_nextp;
-	unsigned int		*data_map = NULL;
-	unsigned int		map_size = 0;
 
 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
 
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		data_map = buf_f->blf_data_map;
-		map_size = buf_f->blf_map_size;
-		break;
-	}
-	/*
-	 * Set the variables corresponding to the current region to
-	 * 0 so that we'll initialize them on the first pass through
-	 * the loop.
-	 */
-	reg_buf_offset = 0;
-	reg_buf_bytes = 0;
-	bit = 0;
-	nbits = 0;
-	item_index = 0;
 	inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
 	for (i = 0; i < inodes_per_buf; i++) {
 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
@@ -1852,18 +1765,18 @@
 			 * the current di_next_unlinked field.
 			 */
 			bit += nbits;
-			bit = xfs_next_bit(data_map, map_size, bit);
+			bit = xfs_next_bit(buf_f->blf_data_map,
+					   buf_f->blf_map_size, bit);
 
 			/*
 			 * If there are no more logged regions in the
 			 * buffer, then we're done.
 			 */
-			if (bit == -1) {
+			if (bit == -1)
 				return 0;
-			}
 
-			nbits = xfs_contig_bits(data_map, map_size,
-							 bit);
+			nbits = xfs_contig_bits(buf_f->blf_data_map,
+						buf_f->blf_map_size, bit);
 			ASSERT(nbits > 0);
 			reg_buf_offset = bit << XFS_BLF_SHIFT;
 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
@@ -1875,9 +1788,8 @@
 		 * di_next_unlinked field, then move on to the next
 		 * di_next_unlinked field.
 		 */
-		if (next_unlinked_offset < reg_buf_offset) {
+		if (next_unlinked_offset < reg_buf_offset)
 			continue;
-		}
 
 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
@@ -1913,36 +1825,29 @@
  * given buffer.  The bitmap in the buf log format structure indicates
  * where to place the logged data.
  */
-/*ARGSUSED*/
 STATIC void
 xlog_recover_do_reg_buffer(
 	struct xfs_mount	*mp,
 	xlog_recover_item_t	*item,
-	xfs_buf_t		*bp,
+	struct xfs_buf		*bp,
 	xfs_buf_log_format_t	*buf_f)
 {
 	int			i;
 	int			bit;
 	int			nbits;
-	unsigned int		*data_map = NULL;
-	unsigned int		map_size = 0;
 	int                     error;
 
 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
 
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		data_map = buf_f->blf_data_map;
-		map_size = buf_f->blf_map_size;
-		break;
-	}
 	bit = 0;
 	i = 1;  /* 0 is the buf format structure */
 	while (1) {
-		bit = xfs_next_bit(data_map, map_size, bit);
+		bit = xfs_next_bit(buf_f->blf_data_map,
+				   buf_f->blf_map_size, bit);
 		if (bit == -1)
 			break;
-		nbits = xfs_contig_bits(data_map, map_size, bit);
+		nbits = xfs_contig_bits(buf_f->blf_data_map,
+					buf_f->blf_map_size, bit);
 		ASSERT(nbits > 0);
 		ASSERT(item->ri_buf[i].i_addr != NULL);
 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
@@ -2176,77 +2081,46 @@
  * for more details on the implementation of the table of cancel records.
  */
 STATIC int
-xlog_recover_do_buffer_trans(
+xlog_recover_buffer_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_buf_t		*bp;
 	int			error;
-	int			cancel;
-	xfs_daddr_t		blkno;
-	int			len;
-	ushort			flags;
 	uint			buf_flags;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		/*
-		 * In this pass we're only looking for buf items
-		 * with the XFS_BLF_CANCEL bit set.
-		 */
-		xlog_recover_do_buffer_pass1(log, buf_f);
+	/*
+	 * In this pass we only want to recover all the buffers which have
+	 * not been cancelled and are not cancellation buffers themselves.
+	 */
+	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
+			buf_f->blf_len, buf_f->blf_flags)) {
+		trace_xfs_log_recover_buf_cancel(log, buf_f);
 		return 0;
-	} else {
-		/*
-		 * In this pass we want to recover all the buffers
-		 * which have not been cancelled and are not
-		 * cancellation buffers themselves.  The routine
-		 * we call here will tell us whether or not to
-		 * continue with the replay of this buffer.
-		 */
-		cancel = xlog_recover_do_buffer_pass2(log, buf_f);
-		if (cancel) {
-			trace_xfs_log_recover_buf_cancel(log, buf_f);
-			return 0;
-		}
-	}
-	trace_xfs_log_recover_buf_recover(log, buf_f);
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		blkno = buf_f->blf_blkno;
-		len = buf_f->blf_len;
-		flags = buf_f->blf_flags;
-		break;
-	default:
-		xfs_fs_cmn_err(CE_ALERT, log->l_mp,
-			"xfs_log_recover: unknown buffer type 0x%x, logdev %s",
-			buf_f->blf_type, log->l_mp->m_logname ?
-			log->l_mp->m_logname : "internal");
-		XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
-				 XFS_ERRLEVEL_LOW, log->l_mp);
-		return XFS_ERROR(EFSCORRUPTED);
 	}
 
-	mp = log->l_mp;
+	trace_xfs_log_recover_buf_recover(log, buf_f);
+
 	buf_flags = XBF_LOCK;
-	if (!(flags & XFS_BLF_INODE_BUF))
+	if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF))
 		buf_flags |= XBF_MAPPED;
 
-	bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
+	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
+			  buf_flags);
 	if (XFS_BUF_ISERROR(bp)) {
-		xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
-				  bp, blkno);
+		xfs_ioerror_alert("xlog_recover_do..(read#1)", mp,
+				  bp, buf_f->blf_blkno);
 		error = XFS_BUF_GETERROR(bp);
 		xfs_buf_relse(bp);
 		return error;
 	}
 
 	error = 0;
-	if (flags & XFS_BLF_INODE_BUF) {
+	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
-	} else if (flags &
+	} else if (buf_f->blf_flags &
 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
 		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
 	} else {
@@ -2286,16 +2160,14 @@
 }
 
 STATIC int
-xlog_recover_do_inode_trans(
+xlog_recover_inode_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
 	xfs_inode_log_format_t	*in_f;
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_buf_t		*bp;
 	xfs_dinode_t		*dip;
-	xfs_ino_t		ino;
 	int			len;
 	xfs_caddr_t		src;
 	xfs_caddr_t		dest;
@@ -2305,10 +2177,6 @@
 	xfs_icdinode_t		*dicp;
 	int			need_free = 0;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return 0;
-	}
-
 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
 		in_f = item->ri_buf[0].i_addr;
 	} else {
@@ -2318,8 +2186,6 @@
 		if (error)
 			goto error;
 	}
-	ino = in_f->ilf_ino;
-	mp = log->l_mp;
 
 	/*
 	 * Inode buffers can be freed, look out for it,
@@ -2354,8 +2220,8 @@
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
-			dip, bp, ino);
-		XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
+			dip, bp, in_f->ilf_ino);
+		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
 				 XFS_ERRLEVEL_LOW, mp);
 		error = EFSCORRUPTED;
 		goto error;
@@ -2365,8 +2231,8 @@
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
-			item, ino);
-		XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
+			item, in_f->ilf_ino);
+		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
 				 XFS_ERRLEVEL_LOW, mp);
 		error = EFSCORRUPTED;
 		goto error;
@@ -2394,12 +2260,12 @@
 	if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
-			XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
+			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
 					 XFS_ERRLEVEL_LOW, mp, dicp);
 			xfs_buf_relse(bp);
 			xfs_fs_cmn_err(CE_ALERT, mp,
 				"xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
-				item, dip, bp, ino);
+				item, dip, bp, in_f->ilf_ino);
 			error = EFSCORRUPTED;
 			goto error;
 		}
@@ -2407,40 +2273,40 @@
 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
-			XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
+			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
 					     XFS_ERRLEVEL_LOW, mp, dicp);
 			xfs_buf_relse(bp);
 			xfs_fs_cmn_err(CE_ALERT, mp,
 				"xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
-				item, dip, bp, ino);
+				item, dip, bp, in_f->ilf_ino);
 			error = EFSCORRUPTED;
 			goto error;
 		}
 	}
 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
-		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
+		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
 				     XFS_ERRLEVEL_LOW, mp, dicp);
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
-			item, dip, bp, ino,
+			item, dip, bp, in_f->ilf_ino,
 			dicp->di_nextents + dicp->di_anextents,
 			dicp->di_nblocks);
 		error = EFSCORRUPTED;
 		goto error;
 	}
 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
-		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
+		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
 				     XFS_ERRLEVEL_LOW, mp, dicp);
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
-			item, dip, bp, ino, dicp->di_forkoff);
+			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
 		error = EFSCORRUPTED;
 		goto error;
 	}
 	if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
-		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
+		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
 				     XFS_ERRLEVEL_LOW, mp, dicp);
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
@@ -2532,7 +2398,7 @@
 			break;
 
 		default:
-			xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
+			xlog_warn("XFS: xlog_recover_inode_pass2: Invalid flag");
 			ASSERT(0);
 			xfs_buf_relse(bp);
 			error = EIO;
@@ -2556,18 +2422,11 @@
  * of that type.
  */
 STATIC int
-xlog_recover_do_quotaoff_trans(
+xlog_recover_quotaoff_pass1(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
-	xfs_qoff_logformat_t	*qoff_f;
-
-	if (pass == XLOG_RECOVER_PASS2) {
-		return (0);
-	}
-
-	qoff_f = item->ri_buf[0].i_addr;
+	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
 	ASSERT(qoff_f);
 
 	/*
@@ -2588,22 +2447,17 @@
  * Recover a dquot record
  */
 STATIC int
-xlog_recover_do_dquot_trans(
+xlog_recover_dquot_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_buf_t		*bp;
 	struct xfs_disk_dquot	*ddq, *recddq;
 	int			error;
 	xfs_dq_logformat_t	*dq_f;
 	uint			type;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return 0;
-	}
-	mp = log->l_mp;
 
 	/*
 	 * Filesystems are required to send in quota flags at mount time.
@@ -2647,7 +2501,7 @@
 	if ((error = xfs_qm_dqcheck(recddq,
 			   dq_f->qlf_id,
 			   0, XFS_QMOPT_DOWARN,
-			   "xlog_recover_do_dquot_trans (log copy)"))) {
+			   "xlog_recover_dquot_pass2 (log copy)"))) {
 		return XFS_ERROR(EIO);
 	}
 	ASSERT(dq_f->qlf_len == 1);
@@ -2670,7 +2524,7 @@
 	 * minimal initialization then.
 	 */
 	if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
-			   "xlog_recover_do_dquot_trans")) {
+			   "xlog_recover_dquot_pass2")) {
 		xfs_buf_relse(bp);
 		return XFS_ERROR(EIO);
 	}
@@ -2693,38 +2547,31 @@
  * LSN.
  */
 STATIC int
-xlog_recover_do_efi_trans(
+xlog_recover_efi_pass2(
 	xlog_t			*log,
 	xlog_recover_item_t	*item,
-	xfs_lsn_t		lsn,
-	int			pass)
+	xfs_lsn_t		lsn)
 {
 	int			error;
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_efi_log_item_t	*efip;
 	xfs_efi_log_format_t	*efi_formatp;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return 0;
-	}
-
 	efi_formatp = item->ri_buf[0].i_addr;
 
-	mp = log->l_mp;
 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
 	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
 					 &(efip->efi_format)))) {
 		xfs_efi_item_free(efip);
 		return error;
 	}
-	efip->efi_next_extent = efi_formatp->efi_nextents;
-	efip->efi_flags |= XFS_EFI_COMMITTED;
+	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
 
 	spin_lock(&log->l_ailp->xa_lock);
 	/*
 	 * xfs_trans_ail_update() drops the AIL lock.
 	 */
-	xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
+	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
 	return 0;
 }
 
@@ -2737,11 +2584,10 @@
  * efd format structure.  If we find it, we remove the efi from the
  * AIL and free it.
  */
-STATIC void
-xlog_recover_do_efd_trans(
+STATIC int
+xlog_recover_efd_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
 	xfs_efd_log_format_t	*efd_formatp;
 	xfs_efi_log_item_t	*efip = NULL;
@@ -2750,10 +2596,6 @@
 	struct xfs_ail_cursor	cur;
 	struct xfs_ail		*ailp = log->l_ailp;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return;
-	}
-
 	efd_formatp = item->ri_buf[0].i_addr;
 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
@@ -2785,62 +2627,6 @@
 	}
 	xfs_trans_ail_cursor_done(ailp, &cur);
 	spin_unlock(&ailp->xa_lock);
-}
-
-/*
- * Perform the transaction
- *
- * If the transaction modifies a buffer or inode, do it now.  Otherwise,
- * EFIs and EFDs get queued up by adding entries into the AIL for them.
- */
-STATIC int
-xlog_recover_do_trans(
-	xlog_t			*log,
-	xlog_recover_t		*trans,
-	int			pass)
-{
-	int			error = 0;
-	xlog_recover_item_t	*item;
-
-	error = xlog_recover_reorder_trans(log, trans, pass);
-	if (error)
-		return error;
-
-	list_for_each_entry(item, &trans->r_itemq, ri_list) {
-		trace_xfs_log_recover_item_recover(log, trans, item, pass);
-		switch (ITEM_TYPE(item)) {
-		case XFS_LI_BUF:
-			error = xlog_recover_do_buffer_trans(log, item, pass);
-			break;
-		case XFS_LI_INODE:
-			error = xlog_recover_do_inode_trans(log, item, pass);
-			break;
-		case XFS_LI_EFI:
-			error = xlog_recover_do_efi_trans(log, item,
-							  trans->r_lsn, pass);
-			break;
-		case XFS_LI_EFD:
-			xlog_recover_do_efd_trans(log, item, pass);
-			error = 0;
-			break;
-		case XFS_LI_DQUOT:
-			error = xlog_recover_do_dquot_trans(log, item, pass);
-			break;
-		case XFS_LI_QUOTAOFF:
-			error = xlog_recover_do_quotaoff_trans(log, item,
-							       pass);
-			break;
-		default:
-			xlog_warn(
-	"XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
-			ASSERT(0);
-			error = XFS_ERROR(EIO);
-			break;
-		}
-
-		if (error)
-			return error;
-	}
 
 	return 0;
 }
@@ -2852,7 +2638,7 @@
  */
 STATIC void
 xlog_recover_free_trans(
-	xlog_recover_t		*trans)
+	struct xlog_recover	*trans)
 {
 	xlog_recover_item_t	*item, *n;
 	int			i;
@@ -2871,17 +2657,95 @@
 }
 
 STATIC int
+xlog_recover_commit_pass1(
+	struct log		*log,
+	struct xlog_recover	*trans,
+	xlog_recover_item_t	*item)
+{
+	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
+
+	switch (ITEM_TYPE(item)) {
+	case XFS_LI_BUF:
+		return xlog_recover_buffer_pass1(log, item);
+	case XFS_LI_QUOTAOFF:
+		return xlog_recover_quotaoff_pass1(log, item);
+	case XFS_LI_INODE:
+	case XFS_LI_EFI:
+	case XFS_LI_EFD:
+	case XFS_LI_DQUOT:
+		/* nothing to do in pass 1 */
+		return 0;
+	default:
+		xlog_warn(
+	"XFS: invalid item type (%d) xlog_recover_commit_pass1",
+			ITEM_TYPE(item));
+		ASSERT(0);
+		return XFS_ERROR(EIO);
+	}
+}
+
+STATIC int
+xlog_recover_commit_pass2(
+	struct log		*log,
+	struct xlog_recover	*trans,
+	xlog_recover_item_t	*item)
+{
+	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
+
+	switch (ITEM_TYPE(item)) {
+	case XFS_LI_BUF:
+		return xlog_recover_buffer_pass2(log, item);
+	case XFS_LI_INODE:
+		return xlog_recover_inode_pass2(log, item);
+	case XFS_LI_EFI:
+		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
+	case XFS_LI_EFD:
+		return xlog_recover_efd_pass2(log, item);
+	case XFS_LI_DQUOT:
+		return xlog_recover_dquot_pass2(log, item);
+	case XFS_LI_QUOTAOFF:
+		/* nothing to do in pass2 */
+		return 0;
+	default:
+		xlog_warn(
+	"XFS: invalid item type (%d) xlog_recover_commit_pass2",
+			ITEM_TYPE(item));
+		ASSERT(0);
+		return XFS_ERROR(EIO);
+	}
+}
+
+/*
+ * Perform the transaction.
+ *
+ * If the transaction modifies a buffer or inode, do it now.  Otherwise,
+ * EFIs and EFDs get queued up by adding entries into the AIL for them.
+ */
+STATIC int
 xlog_recover_commit_trans(
-	xlog_t			*log,
-	xlog_recover_t		*trans,
+	struct log		*log,
+	struct xlog_recover	*trans,
 	int			pass)
 {
-	int			error;
+	int			error = 0;
+	xlog_recover_item_t	*item;
 
 	hlist_del(&trans->r_list);
-	if ((error = xlog_recover_do_trans(log, trans, pass)))
+
+	error = xlog_recover_reorder_trans(log, trans, pass);
+	if (error)
 		return error;
-	xlog_recover_free_trans(trans);			/* no error */
+
+	list_for_each_entry(item, &trans->r_itemq, ri_list) {
+		if (pass == XLOG_RECOVER_PASS1)
+			error = xlog_recover_commit_pass1(log, trans, item);
+		else
+			error = xlog_recover_commit_pass2(log, trans, item);
+		if (error)
+			return error;
+	}
+
+	xlog_recover_free_trans(trans);
 	return 0;
 }
 
@@ -3011,7 +2875,7 @@
 	xfs_extent_t		*extp;
 	xfs_fsblock_t		startblock_fsb;
 
-	ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
+	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
 
 	/*
 	 * First check the validity of the extents described by the
@@ -3050,7 +2914,7 @@
 					 extp->ext_len);
 	}
 
-	efip->efi_flags |= XFS_EFI_RECOVERED;
+	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
 	error = xfs_trans_commit(tp, 0);
 	return error;
 
@@ -3107,7 +2971,7 @@
 		 * Skip EFIs that we've already processed.
 		 */
 		efip = (xfs_efi_log_item_t *)lip;
-		if (efip->efi_flags & XFS_EFI_RECOVERED) {
+		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
 			continue;
 		}
@@ -3724,7 +3588,7 @@
 	xfs_daddr_t	head_blk,
 	xfs_daddr_t	tail_blk)
 {
-	int		error;
+	int		error, i;
 
 	ASSERT(head_blk != tail_blk);
 
@@ -3732,10 +3596,12 @@
 	 * First do a pass to find all of the cancelled buf log items.
 	 * Store them in the buf_cancel_table for use in the second pass.
 	 */
-	log->l_buf_cancel_table =
-		(xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
-						 sizeof(xfs_buf_cancel_t*),
+	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
+						 sizeof(struct list_head),
 						 KM_SLEEP);
+	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
+		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
+
 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
 				      XLOG_RECOVER_PASS1);
 	if (error != 0) {
@@ -3754,7 +3620,7 @@
 		int	i;
 
 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
-			ASSERT(log->l_buf_cancel_table[i] == NULL);
+			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
 	}
 #endif	/* DEBUG */
 
@@ -3934,7 +3800,7 @@
 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
 	} else {
 		cmn_err(CE_DEBUG,
-			"!Ending clean XFS mount for filesystem: %s\n",
+			"Ending clean XFS mount for filesystem: %s\n",
 			log->l_mp->m_fsname);
 	}
 	return 0;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 19e9dfa..d447aef 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -472,7 +472,7 @@
 			goto out_unwind;
 		pag->pag_agno = index;
 		pag->pag_mount = mp;
-		rwlock_init(&pag->pag_ici_lock);
+		spin_lock_init(&pag->pag_ici_lock);
 		mutex_init(&pag->pag_ici_reclaim_lock);
 		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
 		spin_lock_init(&pag->pag_buf_lock);
@@ -975,6 +975,24 @@
 }
 
 /*
+ * precalculate the low space thresholds for dynamic speculative preallocation.
+ */
+void
+xfs_set_low_space_thresholds(
+	struct xfs_mount	*mp)
+{
+	int i;
+
+	for (i = 0; i < XFS_LOWSP_MAX; i++) {
+		__uint64_t space = mp->m_sb.sb_dblocks;
+
+		do_div(space, 100);
+		mp->m_low_space[i] = space * (i + 1);
+	}
+}
+
+
+/*
  * Set whether we're using inode alignment.
  */
 STATIC void
@@ -1196,6 +1214,9 @@
 	 */
 	xfs_set_rw_sizes(mp);
 
+	/* set the low space thresholds for dynamic preallocation */
+	xfs_set_low_space_thresholds(mp);
+
 	/*
 	 * Set the inode cluster size.
 	 * This may still be overridden by the file system
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 5861b49..a62e897 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -103,6 +103,16 @@
 	xfs_mod_incore_sb(mp, field, delta, rsvd)
 #endif
 
+/* dynamic preallocation free space thresholds, 5% down to 1% */
+enum {
+	XFS_LOWSP_1_PCNT = 0,
+	XFS_LOWSP_2_PCNT,
+	XFS_LOWSP_3_PCNT,
+	XFS_LOWSP_4_PCNT,
+	XFS_LOWSP_5_PCNT,
+	XFS_LOWSP_MAX,
+};
+
 typedef struct xfs_mount {
 	struct super_block	*m_super;
 	xfs_tid_t		m_tid;		/* next unused tid for fs */
@@ -202,6 +212,8 @@
 	__int64_t		m_update_flags;	/* sb flags we need to update
 						   on the next remount,rw */
 	struct shrinker		m_inode_shrink;	/* inode reclaim shrinker */
+	int64_t			m_low_space[XFS_LOWSP_MAX];
+						/* low free space thresholds */
 } xfs_mount_t;
 
 /*
@@ -379,6 +391,8 @@
 
 extern int	xfs_dev_is_read_only(struct xfs_mount *, char *);
 
+extern void	xfs_set_low_space_thresholds(struct xfs_mount *);
+
 #endif	/* __KERNEL__ */
 
 extern void	xfs_mod_sb(struct xfs_trans *, __int64_t);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f6d956b..7692279 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1137,7 +1137,7 @@
 	if (blkdelta)
 		xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
 out:
-	ASSERT(error = 0);
+	ASSERT(error == 0);
 	return;
 }
 
@@ -1350,7 +1350,7 @@
  * they could be immediately flushed and we'd have to race with the flusher
  * trying to pull the item from the AIL as we add it.
  */
-void
+static void
 xfs_trans_item_committed(
 	struct xfs_log_item	*lip,
 	xfs_lsn_t		commit_lsn,
@@ -1425,21 +1425,120 @@
 	xfs_trans_free(tp);
 }
 
+static inline void
+xfs_log_item_batch_insert(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	**log_items,
+	int			nr_items,
+	xfs_lsn_t		commit_lsn)
+{
+	int	i;
+
+	spin_lock(&ailp->xa_lock);
+	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
+	xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
+
+	for (i = 0; i < nr_items; i++)
+		IOP_UNPIN(log_items[i], 0);
+}
+
 /*
- * Called from the trans_commit code when we notice that
- * the filesystem is in the middle of a forced shutdown.
+ * Bulk operation version of xfs_trans_committed that takes a log vector of
+ * items to insert into the AIL. This uses bulk AIL insertion techniques to
+ * minimise lock traffic.
+ *
+ * If we are called with the aborted flag set, it is because a log write during
+ * a CIL checkpoint commit has failed. In this case, all the items in the
+ * checkpoint have already gone through IOP_COMMITED and IOP_UNLOCK, which
+ * means that checkpoint commit abort handling is treated exactly the same
+ * as an iclog write error even though we haven't started any IO yet. Hence in
+ * this case all we need to do is IOP_COMMITTED processing, followed by an
+ * IOP_UNPIN(aborted) call.
+ */
+void
+xfs_trans_committed_bulk(
+	struct xfs_ail		*ailp,
+	struct xfs_log_vec	*log_vector,
+	xfs_lsn_t		commit_lsn,
+	int			aborted)
+{
+#define LOG_ITEM_BATCH_SIZE	32
+	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
+	struct xfs_log_vec	*lv;
+	int			i = 0;
+
+	/* unpin all the log items */
+	for (lv = log_vector; lv; lv = lv->lv_next ) {
+		struct xfs_log_item	*lip = lv->lv_item;
+		xfs_lsn_t		item_lsn;
+
+		if (aborted)
+			lip->li_flags |= XFS_LI_ABORTED;
+		item_lsn = IOP_COMMITTED(lip, commit_lsn);
+
+		/* item_lsn of -1 means the item was freed */
+		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
+			continue;
+
+		/*
+		 * if we are aborting the operation, no point in inserting the
+		 * object into the AIL as we are in a shutdown situation.
+		 */
+		if (aborted) {
+			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
+			IOP_UNPIN(lip, 1);
+			continue;
+		}
+
+		if (item_lsn != commit_lsn) {
+
+			/*
+			 * Not a bulk update option due to unusual item_lsn.
+			 * Push into AIL immediately, rechecking the lsn once
+			 * we have the ail lock. Then unpin the item.
+			 */
+			spin_lock(&ailp->xa_lock);
+			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
+				xfs_trans_ail_update(ailp, lip, item_lsn);
+			else
+				spin_unlock(&ailp->xa_lock);
+			IOP_UNPIN(lip, 0);
+			continue;
+		}
+
+		/* Item is a candidate for bulk AIL insert.  */
+		log_items[i++] = lv->lv_item;
+		if (i >= LOG_ITEM_BATCH_SIZE) {
+			xfs_log_item_batch_insert(ailp, log_items,
+					LOG_ITEM_BATCH_SIZE, commit_lsn);
+			i = 0;
+		}
+	}
+
+	/* make sure we insert the remainder! */
+	if (i)
+		xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
+}
+
+/*
+ * Called from the trans_commit code when we notice that the filesystem is in
+ * the middle of a forced shutdown.
+ *
+ * When we are called here, we have already pinned all the items in the
+ * transaction. However, neither IOP_COMMITTING or IOP_UNLOCK has been called
+ * so we can simply walk the items in the transaction, unpin them with an abort
+ * flag and then free the items. Note that unpinning the items can result in
+ * them being freed immediately, so we need to use a safe list traversal method
+ * here.
  */
 STATIC void
 xfs_trans_uncommit(
 	struct xfs_trans	*tp,
 	uint			flags)
 {
-	struct xfs_log_item_desc *lidp;
+	struct xfs_log_item_desc *lidp, *n;
 
-	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
-		/*
-		 * Unpin all but those that aren't dirty.
-		 */
+	list_for_each_entry_safe(lidp, n, &tp->t_items, lid_trans) {
 		if (lidp->lid_flags & XFS_LID_DIRTY)
 			IOP_UNPIN(lidp->lid_item, 1);
 	}
@@ -1656,7 +1755,6 @@
 	int			flags)
 {
 	struct xfs_log_vec	*log_vector;
-	int			error;
 
 	/*
 	 * Get each log item to allocate a vector structure for
@@ -1667,9 +1765,7 @@
 	if (!log_vector)
 		return ENOMEM;
 
-	error = xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags);
-	if (error)
-		return error;
+	xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags);
 
 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 	xfs_trans_free(tp);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 246286b..c2042b7 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -294,8 +294,8 @@
 #define	XFS_ALLOC_BTREE_REF	2
 #define	XFS_BMAP_BTREE_REF	2
 #define	XFS_DIR_BTREE_REF	2
+#define	XFS_INO_REF		2
 #define	XFS_ATTR_BTREE_REF	1
-#define	XFS_INO_REF		1
 #define	XFS_DQUOT_REF		1
 
 #ifdef __KERNEL__
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index dc90695..c5bbbc4 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,8 @@
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 
-STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
+STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
+STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
 STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
 STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
 
@@ -449,129 +449,152 @@
 		xfs_log_move_tail(ailp->xa_mount, 1);
 }	/* xfs_trans_unlocked_item */
 
-
 /*
- * Update the position of the item in the AIL with the new
- * lsn.  If it is not yet in the AIL, add it.  Otherwise, move
- * it to its new position by removing it and re-adding it.
+ * xfs_trans_ail_update - bulk AIL insertion operation.
  *
- * Wakeup anyone with an lsn less than the item's lsn.  If the item
- * we move in the AIL is the minimum one, update the tail lsn in the
- * log manager.
+ * @xfs_trans_ail_update takes an array of log items that all need to be
+ * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
+ * be added.  Otherwise, it will be repositioned  by removing it and re-adding
+ * it to the AIL. If we move the first item in the AIL, update the log tail to
+ * match the new minimum LSN in the AIL.
  *
- * This function must be called with the AIL lock held.  The lock
- * is dropped before returning.
+ * This function takes the AIL lock once to execute the update operations on
+ * all the items in the array, and as such should not be called with the AIL
+ * lock held. As a result, once we have the AIL lock, we need to check each log
+ * item LSN to confirm it needs to be moved forward in the AIL.
+ *
+ * To optimise the insert operation, we delete all the items from the AIL in
+ * the first pass, moving them into a temporary list, then splice the temporary
+ * list into the correct position in the AIL. This avoids needing to do an
+ * insert operation on every item.
+ *
+ * This function must be called with the AIL lock held.  The lock is dropped
+ * before returning.
  */
 void
-xfs_trans_ail_update(
-	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip,
-	xfs_lsn_t	lsn) __releases(ailp->xa_lock)
+xfs_trans_ail_update_bulk(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	**log_items,
+	int			nr_items,
+	xfs_lsn_t		lsn) __releases(ailp->xa_lock)
 {
-	xfs_log_item_t		*dlip = NULL;
-	xfs_log_item_t		*mlip;	/* ptr to minimum lip */
+	xfs_log_item_t		*mlip;
 	xfs_lsn_t		tail_lsn;
+	int			mlip_changed = 0;
+	int			i;
+	LIST_HEAD(tmp);
 
 	mlip = xfs_ail_min(ailp);
 
-	if (lip->li_flags & XFS_LI_IN_AIL) {
-		dlip = xfs_ail_delete(ailp, lip);
-		ASSERT(dlip == lip);
-		xfs_trans_ail_cursor_clear(ailp, dlip);
-	} else {
-		lip->li_flags |= XFS_LI_IN_AIL;
-	}
+	for (i = 0; i < nr_items; i++) {
+		struct xfs_log_item *lip = log_items[i];
+		if (lip->li_flags & XFS_LI_IN_AIL) {
+			/* check if we really need to move the item */
+			if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
+				continue;
 
-	lip->li_lsn = lsn;
-	xfs_ail_insert(ailp, lip);
-
-	if (mlip == dlip) {
-		mlip = xfs_ail_min(ailp);
-		/*
-		 * It is not safe to access mlip after the AIL lock is
-		 * dropped, so we must get a copy of li_lsn before we do
-		 * so.  This is especially important on 32-bit platforms
-		 * where accessing and updating 64-bit values like li_lsn
-		 * is not atomic.
-		 */
-		tail_lsn = mlip->li_lsn;
-		spin_unlock(&ailp->xa_lock);
-		xfs_log_move_tail(ailp->xa_mount, tail_lsn);
-	} else {
-		spin_unlock(&ailp->xa_lock);
-	}
-
-
-}	/* xfs_trans_update_ail */
-
-/*
- * Delete the given item from the AIL.  It must already be in
- * the AIL.
- *
- * Wakeup anyone with an lsn less than item's lsn.    If the item
- * we delete in the AIL is the minimum one, update the tail lsn in the
- * log manager.
- *
- * Clear the IN_AIL flag from the item, reset its lsn to 0, and
- * bump the AIL's generation count to indicate that the tree
- * has changed.
- *
- * This function must be called with the AIL lock held.  The lock
- * is dropped before returning.
- */
-void
-xfs_trans_ail_delete(
-	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip) __releases(ailp->xa_lock)
-{
-	xfs_log_item_t		*dlip;
-	xfs_log_item_t		*mlip;
-	xfs_lsn_t		tail_lsn;
-
-	if (lip->li_flags & XFS_LI_IN_AIL) {
-		mlip = xfs_ail_min(ailp);
-		dlip = xfs_ail_delete(ailp, lip);
-		ASSERT(dlip == lip);
-		xfs_trans_ail_cursor_clear(ailp, dlip);
-
-
-		lip->li_flags &= ~XFS_LI_IN_AIL;
-		lip->li_lsn = 0;
-
-		if (mlip == dlip) {
-			mlip = xfs_ail_min(ailp);
-			/*
-			 * It is not safe to access mlip after the AIL lock
-			 * is dropped, so we must get a copy of li_lsn
-			 * before we do so.  This is especially important
-			 * on 32-bit platforms where accessing and updating
-			 * 64-bit values like li_lsn is not atomic.
-			 */
-			tail_lsn = mlip ? mlip->li_lsn : 0;
-			spin_unlock(&ailp->xa_lock);
-			xfs_log_move_tail(ailp->xa_mount, tail_lsn);
+			xfs_ail_delete(ailp, lip);
+			if (mlip == lip)
+				mlip_changed = 1;
 		} else {
-			spin_unlock(&ailp->xa_lock);
+			lip->li_flags |= XFS_LI_IN_AIL;
 		}
+		lip->li_lsn = lsn;
+		list_add(&lip->li_ail, &tmp);
 	}
-	else {
-		/*
-		 * If the file system is not being shutdown, we are in
-		 * serious trouble if we get to this stage.
-		 */
-		struct xfs_mount	*mp = ailp->xa_mount;
 
+	xfs_ail_splice(ailp, &tmp, lsn);
+
+	if (!mlip_changed) {
 		spin_unlock(&ailp->xa_lock);
-		if (!XFS_FORCED_SHUTDOWN(mp)) {
-			xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
-		"%s: attempting to delete a log item that is not in the AIL",
-					__func__);
-			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-		}
+		return;
 	}
+
+	/*
+	 * It is not safe to access mlip after the AIL lock is dropped, so we
+	 * must get a copy of li_lsn before we do so.  This is especially
+	 * important on 32-bit platforms where accessing and updating 64-bit
+	 * values like li_lsn is not atomic.
+	 */
+	mlip = xfs_ail_min(ailp);
+	tail_lsn = mlip->li_lsn;
+	spin_unlock(&ailp->xa_lock);
+	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
 }
 
+/*
+ * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
+ *
+ * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
+ * removed from the AIL. The caller is already holding the AIL lock, and done
+ * all the checks necessary to ensure the items passed in via @log_items are
+ * ready for deletion. This includes checking that the items are in the AIL.
+ *
+ * For each log item to be removed, unlink it  from the AIL, clear the IN_AIL
+ * flag from the item and reset the item's lsn to 0. If we remove the first
+ * item in the AIL, update the log tail to match the new minimum LSN in the
+ * AIL.
+ *
+ * This function will not drop the AIL lock until all items are removed from
+ * the AIL to minimise the amount of lock traffic on the AIL. This does not
+ * greatly increase the AIL hold time, but does significantly reduce the amount
+ * of traffic on the lock, especially during IO completion.
+ *
+ * This function must be called with the AIL lock held.  The lock is dropped
+ * before returning.
+ */
+void
+xfs_trans_ail_delete_bulk(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	**log_items,
+	int			nr_items) __releases(ailp->xa_lock)
+{
+	xfs_log_item_t		*mlip;
+	xfs_lsn_t		tail_lsn;
+	int			mlip_changed = 0;
+	int			i;
 
+	mlip = xfs_ail_min(ailp);
+
+	for (i = 0; i < nr_items; i++) {
+		struct xfs_log_item *lip = log_items[i];
+		if (!(lip->li_flags & XFS_LI_IN_AIL)) {
+			struct xfs_mount	*mp = ailp->xa_mount;
+
+			spin_unlock(&ailp->xa_lock);
+			if (!XFS_FORCED_SHUTDOWN(mp)) {
+				xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
+		"%s: attempting to delete a log item that is not in the AIL",
+						__func__);
+				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+			}
+			return;
+		}
+
+		xfs_ail_delete(ailp, lip);
+		lip->li_flags &= ~XFS_LI_IN_AIL;
+		lip->li_lsn = 0;
+		if (mlip == lip)
+			mlip_changed = 1;
+	}
+
+	if (!mlip_changed) {
+		spin_unlock(&ailp->xa_lock);
+		return;
+	}
+
+	/*
+	 * It is not safe to access mlip after the AIL lock is dropped, so we
+	 * must get a copy of li_lsn before we do so.  This is especially
+	 * important on 32-bit platforms where accessing and updating 64-bit
+	 * values like li_lsn is not atomic. It is possible we've emptied the
+	 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
+	 */
+	mlip = xfs_ail_min(ailp);
+	tail_lsn = mlip ? mlip->li_lsn : 0;
+	spin_unlock(&ailp->xa_lock);
+	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
+}
 
 /*
  * The active item list (AIL) is a doubly linked list of log
@@ -623,16 +646,13 @@
 }
 
 /*
- * Insert the given log item into the AIL.
- * We almost always insert at the end of the list, so on inserts
- * we search from the end of the list to find where the
- * new item belongs.
+ * splice the log item list into the AIL at the given LSN.
  */
 STATIC void
-xfs_ail_insert(
+xfs_ail_splice(
 	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip)
-/* ARGSUSED */
+	struct list_head *list,
+	xfs_lsn_t	lsn)
 {
 	xfs_log_item_t	*next_lip;
 
@@ -640,39 +660,33 @@
 	 * If the list is empty, just insert the item.
 	 */
 	if (list_empty(&ailp->xa_ail)) {
-		list_add(&lip->li_ail, &ailp->xa_ail);
+		list_splice(list, &ailp->xa_ail);
 		return;
 	}
 
 	list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
-		if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)
+		if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
 			break;
 	}
 
 	ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
-	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
+	       (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
 
-	list_add(&lip->li_ail, &next_lip->li_ail);
-
-	xfs_ail_check(ailp, lip);
+	list_splice_init(list, &next_lip->li_ail);
 	return;
 }
 
 /*
  * Delete the given item from the AIL.  Return a pointer to the item.
  */
-/*ARGSUSED*/
-STATIC xfs_log_item_t *
+STATIC void
 xfs_ail_delete(
 	struct xfs_ail	*ailp,
 	xfs_log_item_t	*lip)
-/* ARGSUSED */
 {
 	xfs_ail_check(ailp, lip);
-
 	list_del(&lip->li_ail);
-
-	return lip;
+	xfs_trans_ail_cursor_clear(ailp, lip);
 }
 
 /*
@@ -682,7 +696,6 @@
 STATIC xfs_log_item_t *
 xfs_ail_min(
 	struct xfs_ail	*ailp)
-/* ARGSUSED */
 {
 	if (list_empty(&ailp->xa_ail))
 		return NULL;
@@ -699,7 +712,6 @@
 xfs_ail_next(
 	struct xfs_ail	*ailp,
 	xfs_log_item_t	*lip)
-/* ARGSUSED */
 {
 	if (lip->li_ail.next == &ailp->xa_ail)
 		return NULL;
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index f783d5e..f7590f5 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -69,12 +69,16 @@
 	tp->t_flags |= XFS_TRANS_DIRTY;
 	efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 
-	next_extent = efip->efi_next_extent;
+	/*
+	 * atomic_inc_return gives us the value after the increment;
+	 * we want to use it as an array index so we need to subtract 1 from
+	 * it.
+	 */
+	next_extent = atomic_inc_return(&efip->efi_next_extent) - 1;
 	ASSERT(next_extent < efip->efi_format.efi_nextents);
 	extp = &(efip->efi_format.efi_extents[next_extent]);
 	extp->ext_start = start_block;
 	extp->ext_len = ext_len;
-	efip->efi_next_extent++;
 }
 
 
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 62da86c..35162c2 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -22,15 +22,17 @@
 struct xfs_log_item_desc;
 struct xfs_mount;
 struct xfs_trans;
+struct xfs_ail;
+struct xfs_log_vec;
 
 void	xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
 void	xfs_trans_del_item(struct xfs_log_item *);
 void	xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
 				int flags);
-void	xfs_trans_item_committed(struct xfs_log_item *lip,
-				xfs_lsn_t commit_lsn, int aborted);
 void	xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
 
+void	xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv,
+				xfs_lsn_t commit_lsn, int aborted);
 /*
  * AIL traversal cursor.
  *
@@ -73,12 +75,29 @@
 /*
  * From xfs_trans_ail.c
  */
-void			xfs_trans_ail_update(struct xfs_ail *ailp,
-					struct xfs_log_item *lip, xfs_lsn_t lsn)
-					__releases(ailp->xa_lock);
-void			xfs_trans_ail_delete(struct xfs_ail *ailp,
-					struct xfs_log_item *lip)
-					__releases(ailp->xa_lock);
+void	xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
+				struct xfs_log_item **log_items, int nr_items,
+				xfs_lsn_t lsn) __releases(ailp->xa_lock);
+static inline void
+xfs_trans_ail_update(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn) __releases(ailp->xa_lock)
+{
+	xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
+}
+
+void	xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
+				struct xfs_log_item **log_items, int nr_items)
+				__releases(ailp->xa_lock);
+static inline void
+xfs_trans_ail_delete(
+	struct xfs_ail	*ailp,
+	xfs_log_item_t	*lip) __releases(ailp->xa_lock)
+{
+	xfs_trans_ail_delete_bulk(ailp, &lip, 1);
+}
+
 void			xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
 void			xfs_trans_unlocked_item(struct xfs_ail *,
 					xfs_log_item_t *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 8e4a63c..d8e6f8c 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -964,29 +964,48 @@
 			xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
 	}
 
-	if (ip->i_d.di_nlink != 0) {
-		if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
-		     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
-		       ip->i_delayed_blks > 0)) &&
-		     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
-		    (!(ip->i_d.di_flags &
-				(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
+	if (ip->i_d.di_nlink == 0)
+		return 0;
 
-			/*
-			 * If we can't get the iolock just skip truncating
-			 * the blocks past EOF because we could deadlock
-			 * with the mmap_sem otherwise.  We'll get another
-			 * chance to drop them once the last reference to
-			 * the inode is dropped, so we'll never leak blocks
-			 * permanently.
-			 */
-			error = xfs_free_eofblocks(mp, ip,
-						   XFS_FREE_EOF_TRYLOCK);
-			if (error)
-				return error;
-		}
+	if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
+	     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
+	       ip->i_delayed_blks > 0)) &&
+	     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
+	    (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
+
+		/*
+		 * If we can't get the iolock just skip truncating the blocks
+		 * past EOF because we could deadlock with the mmap_sem
+		 * otherwise.  We'll get another chance to drop them once the
+		 * last reference to the inode is dropped, so we'll never leak
+		 * blocks permanently.
+		 *
+		 * Further, check if the inode is being opened, written and
+		 * closed frequently and we have delayed allocation blocks
+		 * oustanding (e.g. streaming writes from the NFS server),
+		 * truncating the blocks past EOF will cause fragmentation to
+		 * occur.
+		 *
+		 * In this case don't do the truncation, either, but we have to
+		 * be careful how we detect this case. Blocks beyond EOF show
+		 * up as i_delayed_blks even when the inode is clean, so we
+		 * need to truncate them away first before checking for a dirty
+		 * release. Hence on the first dirty close we will still remove
+		 * the speculative allocation, but after that we will leave it
+		 * in place.
+		 */
+		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
+			return 0;
+
+		error = xfs_free_eofblocks(mp, ip,
+					   XFS_FREE_EOF_TRYLOCK);
+		if (error)
+			return error;
+
+		/* delalloc blocks after truncation means it really is dirty */
+		if (ip->i_delayed_blks)
+			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
 	}
-
 	return 0;
 }
 
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 17714be..5b6c391 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 9cf736e..fc1575f 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index bc4a6de..ef1cef7 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index a091cab..de39915 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 359ef11..78ca429 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -148,9 +148,7 @@
 	u32 suprise_removal_ok:1;
 	u32 power_manageable:1;
 	u32 performance_manageable:1;
-	u32 wake_capable:1;	/* Wakeup(_PRW) supported? */
-	u32 force_power_state:1;
-	u32 reserved:22;
+	u32 reserved:24;
 };
 
 /* File System */
@@ -242,20 +240,14 @@
 struct acpi_device_wakeup_flags {
 	u8 valid:1;		/* Can successfully enable wakeup? */
 	u8 run_wake:1;		/* Run-Wake GPE devices */
-	u8 always_enabled:1;	/* Run-wake devices that are always enabled */
 	u8 notifier_present:1;  /* Wake-up notify handler has been installed */
 };
 
-struct acpi_device_wakeup_state {
-	u8 enabled:1;
-};
-
 struct acpi_device_wakeup {
 	acpi_handle gpe_device;
 	u64 gpe_number;
 	u64 sleep_state;
 	struct acpi_handle_list resources;
-	struct acpi_device_wakeup_state state;
 	struct acpi_device_wakeup_flags flags;
 	int prepare_count;
 	int run_wake_count;
@@ -328,8 +320,8 @@
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
 				       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
-int acpi_bus_get_power(acpi_handle handle, int *state);
 int acpi_bus_set_power(acpi_handle handle, int state);
+int acpi_bus_update_power(acpi_handle handle, int *state_p);
 bool acpi_bus_power_manageable(acpi_handle handle);
 bool acpi_bus_can_wakeup(acpi_handle handle);
 #ifdef CONFIG_ACPI_PROC_EVENT
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 65b3f58..a3252a5 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -8,7 +8,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 53b7cfd..e46ec95 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20101013
+#define ACPI_CA_VERSION                 0x20110112
 
 #include "actypes.h"
 #include "actbl.h"
@@ -229,6 +229,10 @@
 acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
 
 acpi_status
+acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler,
+				 void *context);
+
+acpi_status
 acpi_install_fixed_event_handler(u32 acpi_event,
 				 acpi_event_handler handler, void *context);
 
@@ -258,11 +262,11 @@
 acpi_status
 acpi_install_gpe_handler(acpi_handle gpe_device,
 			 u32 gpe_number,
-			 u32 type, acpi_event_handler address, void *context);
+			 u32 type, acpi_gpe_handler address, void *context);
 
 acpi_status
 acpi_remove_gpe_handler(acpi_handle gpe_device,
-			u32 gpe_number, acpi_event_handler address);
+			u32 gpe_number, acpi_gpe_handler address);
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
@@ -292,11 +296,13 @@
 
 acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number);
 
-acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number);
-
 acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number);
 
-acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action);
+acpi_status
+acpi_setup_gpe_for_wake(acpi_handle parent_device,
+			acpi_handle gpe_device, u32 gpe_number);
+
+acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action);
 
 acpi_status
 acpi_get_gpe_status(acpi_handle gpe_device,
@@ -315,7 +321,7 @@
 
 acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
 
-acpi_status acpi_update_gpes(void);
+acpi_status acpi_update_all_gpes(void);
 
 /*
  * Resource interfaces
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index e552635..0a66cc4 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index ad20016..7e42bfe 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index c637b75..7504bc9 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -119,7 +119,7 @@
 struct acpi_table_bert {
 	struct acpi_table_header header;	/* Common ACPI table header */
 	u32 region_length;	/* Length of the boot error region */
-	u64 address;		/* Physical addresss of the error region */
+	u64 address;		/* Physical address of the error region */
 };
 
 /* Boot Error Region (not a subtable, pointed to by Address field above) */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index d4136b2..0fc15df 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 2b134b6..64f838b 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -656,33 +656,34 @@
 #define ACPI_GPE_MAX                    0xFF
 #define ACPI_NUM_GPE                    256
 
-/* Actions for acpi_gpe_wakeup, acpi_hw_low_set_gpe */
+/* Actions for acpi_set_gpe_wake_mask, acpi_hw_low_set_gpe */
 
 #define ACPI_GPE_ENABLE                 0
 #define ACPI_GPE_DISABLE                1
-#define ACPI_GPE_COND_ENABLE            2
+#define ACPI_GPE_CONDITIONAL_ENABLE     2
 
 /*
  * GPE info flags - Per GPE
- * +-------+---+-+-+
- * |  7:4  |3:2|1|0|
- * +-------+---+-+-+
- *     |     |  | |
- *     |     |  | +--- Interrupt type: edge or level triggered
- *     |     |  +----- GPE can wake the system
- *     |     +-------- Type of dispatch:to method, handler, or none
- *     +-------------- <Reserved>
+ * +-------+-+-+---+
+ * |  7:4  |3|2|1:0|
+ * +-------+-+-+---+
+ *     |    | |  |
+ *     |    | |  +-- Type of dispatch:to method, handler, notify, or none
+ *     |    | +----- Interrupt type: edge or level triggered
+ *     |    +------- Is a Wake GPE
+ *     +------------ <Reserved>
  */
-#define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x01
-#define ACPI_GPE_LEVEL_TRIGGERED        (u8) 0x01
+#define ACPI_GPE_DISPATCH_NONE          (u8) 0x00
+#define ACPI_GPE_DISPATCH_METHOD        (u8) 0x01
+#define ACPI_GPE_DISPATCH_HANDLER       (u8) 0x02
+#define ACPI_GPE_DISPATCH_NOTIFY        (u8) 0x03
+#define ACPI_GPE_DISPATCH_MASK          (u8) 0x03
+
+#define ACPI_GPE_LEVEL_TRIGGERED        (u8) 0x04
 #define ACPI_GPE_EDGE_TRIGGERED         (u8) 0x00
+#define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x04
 
-#define ACPI_GPE_CAN_WAKE		(u8) 0x02
-
-#define ACPI_GPE_DISPATCH_MASK          (u8) 0x0C
-#define ACPI_GPE_DISPATCH_HANDLER       (u8) 0x04
-#define ACPI_GPE_DISPATCH_METHOD        (u8) 0x08
-#define ACPI_GPE_DISPATCH_NOT_USED      (u8) 0x00
+#define ACPI_GPE_CAN_WAKE               (u8) 0x08
 
 /*
  * Flags for GPE and Lock interfaces
@@ -894,9 +895,20 @@
 /*
  * Various handlers and callback procedures
  */
+typedef
+void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type,
+			       acpi_handle device,
+			       u32 event_number, void *context);
+
+#define ACPI_EVENT_TYPE_GPE         0
+#define ACPI_EVENT_TYPE_FIXED       1
+
 typedef u32(*acpi_event_handler) (void *context);
 
 typedef
+u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context);
+
+typedef
 void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context);
 
 typedef
@@ -951,6 +963,10 @@
 #define ACPI_INTERRUPT_NOT_HANDLED      0x00
 #define ACPI_INTERRUPT_HANDLED          0x01
 
+/* GPE handler return values */
+
+#define ACPI_REENABLE_GPE               0x80
+
 /* Length of 32-bit EISAID values when converted back to a string */
 
 #define ACPI_EISAID_STRING_SIZE         8	/* Includes null terminator */
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index b336502..c4dbb13 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -19,6 +19,12 @@
 extern int hest_disable;
 extern int erst_disable;
 
+#ifdef CONFIG_ACPI_APEI
+void __init acpi_hest_init(void);
+#else
+static inline void acpi_hest_init(void) { return; }
+#endif
+
 typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
 int apei_hest_parse(apei_hest_func_t func, void *data);
 
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index a3e334a..5af3ed5 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 5dcb953..e228893 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 572189e..5d2a5e9 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 1b62102..55192ac 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -324,6 +324,12 @@
 int acpi_processor_get_throttling_info(struct acpi_processor *pr);
 extern int acpi_processor_set_throttling(struct acpi_processor *pr,
 					 int state, bool force);
+/*
+ * Reevaluate whether the T-state is invalid after one cpu is
+ * onlined/offlined. In such case the flags.throttling will be updated.
+ */
+extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+			unsigned long action);
 extern const struct file_operations acpi_processor_throttling_fops;
 extern void acpi_processor_throttling_init(void);
 /* in processor_idle.c */
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
index 3da9e27..787abbb 100644
--- a/include/asm-generic/mman-common.h
+++ b/include/asm-generic/mman-common.h
@@ -45,6 +45,9 @@
 #define MADV_MERGEABLE   12		/* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 13		/* KSM may not merge identical pages */
 
+#define MADV_HUGEPAGE	14		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	15		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 6f3c6ae..b4bfe33 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -4,68 +4,103 @@
 #ifndef __ASSEMBLY__
 #ifdef CONFIG_MMU
 
+#include <linux/mm_types.h>
+
 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-/*
- * Largely same as above, but only sets the access flags (dirty,
- * accessed, and writable). Furthermore, we know it always gets set
- * to a "more permissive" setting, which allows most architectures
- * to optimize this. We return whether the PTE actually changed, which
- * in turn instructs the caller to do things like update__mmu_cache.
- * This used to be done in the caller, but sparc needs minor faults to
- * force that call on sun4c so we changed this macro slightly
- */
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-({									  \
-	int __changed = !pte_same(*(__ptep), __entry);			  \
-	if (__changed) {						  \
-		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
-		flush_tlb_page(__vma, __address);			  \
-	}								  \
-	__changed;							  \
-})
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pte_t *ptep,
+				 pte_t entry, int dirty);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pmd_t *pmdp,
+				 pmd_t entry, int dirty);
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(__vma, __address, __ptep)		\
-({									\
-	pte_t __pte = *(__ptep);					\
-	int r = 1;							\
-	if (!pte_young(__pte))						\
-		r = 0;							\
-	else								\
-		set_pte_at((__vma)->vm_mm, (__address),			\
-			   (__ptep), pte_mkold(__pte));			\
-	r;								\
-})
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pte_t *ptep)
+{
+	pte_t pte = *ptep;
+	int r = 1;
+	if (!pte_young(pte))
+		r = 0;
+	else
+		set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
+	return r;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pmd_t *pmdp)
+{
+	pmd_t pmd = *pmdp;
+	int r = 1;
+	if (!pmd_young(pmd))
+		r = 0;
+	else
+		set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
+	return r;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pmd_t *pmdp)
+{
+	BUG();
+	return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(__vma, __address, __ptep)		\
-({									\
-	int __young;							\
-	__young = ptep_test_and_clear_young(__vma, __address, __ptep);	\
-	if (__young)							\
-		flush_tlb_page(__vma, __address);			\
-	__young;							\
-})
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+int pmdp_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pmd_t *pmdp);
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define ptep_get_and_clear(__mm, __address, __ptep)			\
-({									\
-	pte_t __pte = *(__ptep);					\
-	pte_clear((__mm), (__address), (__ptep));			\
-	__pte;								\
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+				       unsigned long address,
+				       pte_t *ptep)
+{
+	pte_t pte = *ptep;
+	pte_clear(mm, address, ptep);
+	return pte;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+				       unsigned long address,
+				       pmd_t *pmdp)
+{
+	pmd_t pmd = *pmdp;
+	pmd_clear(mm, address, pmdp);
+	return pmd;
 })
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-#define ptep_get_and_clear_full(__mm, __address, __ptep, __full)	\
-({									\
-	pte_t __pte;							\
-	__pte = ptep_get_and_clear((__mm), (__address), (__ptep));	\
-	__pte;								\
-})
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+					    unsigned long address, pte_t *ptep,
+					    int full)
+{
+	pte_t pte;
+	pte = ptep_get_and_clear(mm, address, ptep);
+	return pte;
+}
 #endif
 
 /*
@@ -74,20 +109,25 @@
  * not present, or in the process of an address space destruction.
  */
 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
-#define pte_clear_not_present_full(__mm, __address, __ptep, __full)	\
-do {									\
-	pte_clear((__mm), (__address), (__ptep));			\
-} while (0)
+static inline void pte_clear_not_present_full(struct mm_struct *mm,
+					      unsigned long address,
+					      pte_t *ptep,
+					      int full)
+{
+	pte_clear(mm, address, ptep);
+}
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-#define ptep_clear_flush(__vma, __address, __ptep)			\
-({									\
-	pte_t __pte;							\
-	__pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep);	\
-	flush_tlb_page(__vma, __address);				\
-	__pte;								\
-})
+extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
+			      unsigned long address,
+			      pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
+extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
+			      unsigned long address,
+			      pmd_t *pmdp);
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -99,8 +139,49 @@
 }
 #endif
 
+#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long address, pmd_t *pmdp)
+{
+	pmd_t old_pmd = *pmdp;
+	set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long address, pmd_t *pmdp)
+{
+	BUG();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
+				  unsigned long address,
+				  pmd_t *pmdp);
+#endif
+
 #ifndef __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B)	(pte_val(A) == pte_val(B))
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+	return pte_val(pte_a) == pte_val(pte_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMD_SAME
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+	return pmd_val(pmd_a) == pmd_val(pmd_b);
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+	BUG();
+	return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
@@ -348,6 +429,24 @@
 				unsigned long size);
 #endif
 
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+	return 0;
+}
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+	return 0;
+}
+#ifndef __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+	BUG();
+	return 0;
+}
+#endif /* __HAVE_ARCH_PMD_WRITE */
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 05cbad0..fe77e33 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -124,7 +124,8 @@
 #endif
 
 #ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS()	VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
+#define FTRACE_EVENTS()	. = ALIGN(8);					\
+			VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
 			*(_ftrace_events)				\
 			VMLINUX_SYMBOL(__stop_ftrace_events) = .;
 #else
@@ -140,7 +141,8 @@
 #endif
 
 #ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
+#define TRACE_SYSCALLS() . = ALIGN(8);					\
+			 VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
 			 *(__syscalls_metadata)				\
 			 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
 #else
@@ -165,10 +167,8 @@
 	CPU_KEEP(exit.data)						\
 	MEM_KEEP(init.data)						\
 	MEM_KEEP(exit.data)						\
-	. = ALIGN(32);							\
-	VMLINUX_SYMBOL(__start___tracepoints) = .;			\
+	STRUCT_ALIGN();							\
 	*(__tracepoints)						\
-	VMLINUX_SYMBOL(__stop___tracepoints) = .;			\
 	/* implement dynamic printk debug */				\
 	. = ALIGN(8);							\
 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
@@ -176,13 +176,7 @@
 	VMLINUX_SYMBOL(__stop___verbose) = .;				\
 	LIKELY_PROFILE()		       				\
 	BRANCH_PROFILE()						\
-	TRACE_PRINTKS()							\
-									\
-	STRUCT_ALIGN();							\
-	FTRACE_EVENTS()							\
-									\
-	STRUCT_ALIGN();							\
-	TRACE_SYSCALLS()
+	TRACE_PRINTKS()
 
 /*
  * Data section helpers
@@ -200,7 +194,8 @@
 
 #define READ_MOSTLY_DATA(align)						\
 	. = ALIGN(align);						\
-	*(.data..read_mostly)
+	*(.data..read_mostly)						\
+	. = ALIGN(align);
 
 #define CACHELINE_ALIGNED_DATA(align)					\
 	. = ALIGN(align);						\
@@ -219,6 +214,10 @@
 		VMLINUX_SYMBOL(__start_rodata) = .;			\
 		*(.rodata) *(.rodata.*)					\
 		*(__vermagic)		/* Kernel version magic */	\
+		. = ALIGN(8);						\
+		VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;		\
+		*(__tracepoints_ptrs)	/* Tracepoints: pointer array */\
+		VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .;		\
 		*(__markers_strings)	/* Markers: strings */		\
 		*(__tracepoints_strings)/* Tracepoints: strings */	\
 	}								\
@@ -363,6 +362,13 @@
 		VMLINUX_SYMBOL(__start___param) = .;			\
 		*(__param)						\
 		VMLINUX_SYMBOL(__stop___param) = .;			\
+	}								\
+									\
+	/* Built-in module versions. */					\
+	__modver : AT(ADDR(__modver) - LOAD_OFFSET) {			\
+		VMLINUX_SYMBOL(__start___modver) = .;			\
+		*(__modver)						\
+		VMLINUX_SYMBOL(__stop___modver) = .;			\
 		. = ALIGN((align));					\
 		VMLINUX_SYMBOL(__end_rodata) = .;			\
 	}								\
@@ -474,6 +480,8 @@
 	KERNEL_CTORS()							\
 	*(.init.rodata)							\
 	MCOUNT_REC()							\
+	FTRACE_EVENTS()							\
+	TRACE_SYSCALLS()						\
 	DEV_DISCARD(init.rodata)					\
 	CPU_DISCARD(init.rodata)					\
 	MEM_DISCARD(init.rodata)					\
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
new file mode 100644
index 0000000..c5813c8
--- /dev/null
+++ b/include/crypto/if_alg.h
@@ -0,0 +1,92 @@
+/*
+ * if_alg: User-space algorithm interface
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_IF_ALG_H
+#define _CRYPTO_IF_ALG_H
+
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/if_alg.h>
+#include <linux/types.h>
+#include <net/sock.h>
+
+#define ALG_MAX_PAGES			16
+
+struct crypto_async_request;
+
+struct alg_sock {
+	/* struct sock must be the first member of struct alg_sock */
+	struct sock sk;
+
+	struct sock *parent;
+
+	const struct af_alg_type *type;
+	void *private;
+};
+
+struct af_alg_completion {
+	struct completion completion;
+	int err;
+};
+
+struct af_alg_control {
+	struct af_alg_iv *iv;
+	int op;
+};
+
+struct af_alg_type {
+	void *(*bind)(const char *name, u32 type, u32 mask);
+	void (*release)(void *private);
+	int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+	int (*accept)(void *private, struct sock *sk);
+
+	struct proto_ops *ops;
+	struct module *owner;
+	char name[14];
+};
+
+struct af_alg_sgl {
+	struct scatterlist sg[ALG_MAX_PAGES];
+	struct page *pages[ALG_MAX_PAGES];
+};
+
+int af_alg_register_type(const struct af_alg_type *type);
+int af_alg_unregister_type(const struct af_alg_type *type);
+
+int af_alg_release(struct socket *sock);
+int af_alg_accept(struct sock *sk, struct socket *newsock);
+
+int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
+		   int write);
+void af_alg_free_sg(struct af_alg_sgl *sgl);
+
+int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
+
+int af_alg_wait_for_completion(int err, struct af_alg_completion *completion);
+void af_alg_complete(struct crypto_async_request *req, int err);
+
+static inline struct alg_sock *alg_sk(struct sock *sk)
+{
+	return (struct alg_sock *)sk;
+}
+
+static inline void af_alg_release_parent(struct sock *sk)
+{
+	sock_put(alg_sk(sk)->parent);
+}
+
+static inline void af_alg_init_completion(struct af_alg_completion *completion)
+{
+	init_completion(&completion->completion);
+}
+
+#endif	/* _CRYPTO_IF_ALG_H */
diff --git a/drivers/crypto/padlock.h b/include/crypto/padlock.h
similarity index 82%
rename from drivers/crypto/padlock.h
rename to include/crypto/padlock.h
index b728e45..d2cfa2e 100644
--- a/drivers/crypto/padlock.h
+++ b/include/crypto/padlock.h
@@ -15,9 +15,15 @@
 
 #define PADLOCK_ALIGNMENT 16
 
-#define PFX	"padlock: "
+#define PFX	KBUILD_MODNAME ": "
 
 #define PADLOCK_CRA_PRIORITY	300
 #define PADLOCK_COMPOSITE_PRIORITY 400
 
+#ifdef CONFIG_64BIT
+#define STACK_ALIGN 16
+#else
+#define STACK_ALIGN 4
+#endif
+
 #endif	/* _CRYPTO_PADLOCK_H */
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 833d208..4fd95a3 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -68,6 +68,21 @@
 	return (++sg)->length ? sg : (void *)sg_page(sg);
 }
 
+static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+					    struct scatterlist *sg,
+					    int chain, int num)
+{
+	if (chain) {
+		head->length += sg->length;
+		sg = scatterwalk_sg_next(sg);
+	}
+
+	if (sg)
+		scatterwalk_sg_chain(head, num, sg);
+	else
+		sg_mark_end(head);
+}
+
 static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
 						struct scatter_walk *walk_out)
 {
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index a4694c6..348843b 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1101,7 +1101,7 @@
 	struct platform_device *platformdev; /**< Platform device struture */
 
 	struct drm_sg_mem *sg;	/**< Scatter gather memory */
-	int num_crtcs;                  /**< Number of CRTCs on this device */
+	unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
 	void *dev_private;		/**< device private data */
 	void *mm_private;
 	struct address_space *dev_mapping;
@@ -1367,7 +1367,7 @@
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 				     struct timeval *vblanktime);
-extern void drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
 extern void drm_vblank_off(struct drm_device *dev, int crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index acd7fad..801be59 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -275,6 +275,7 @@
 
 /**
  * drm_crtc_funcs - control CRTCs for a given device
+ * @reset: reset CRTC after state has been invalidate (e.g. resume)
  * @dpms: control display power levels
  * @save: save CRTC state
  * @resore: restore CRTC state
@@ -302,6 +303,8 @@
 	void (*save)(struct drm_crtc *crtc); /* suspend? */
 	/* Restore CRTC state */
 	void (*restore)(struct drm_crtc *crtc); /* resume? */
+	/* Reset CRTC state */
+	void (*reset)(struct drm_crtc *crtc);
 
 	/* cursor controls */
 	int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
@@ -379,6 +382,7 @@
  * @dpms: set power state (see drm_crtc_funcs above)
  * @save: save connector state
  * @restore: restore connector state
+ * @reset: reset connector after state has been invalidate (e.g. resume)
  * @mode_valid: is this mode valid on the given connector?
  * @mode_fixup: try to fixup proposed mode for this connector
  * @mode_set: set this mode
@@ -396,6 +400,7 @@
 	void (*dpms)(struct drm_connector *connector, int mode);
 	void (*save)(struct drm_connector *connector);
 	void (*restore)(struct drm_connector *connector);
+	void (*reset)(struct drm_connector *connector);
 
 	/* Check to see if anything is attached to the connector.
 	 * @force is set to false whilst polling, true when checking the
@@ -413,6 +418,7 @@
 };
 
 struct drm_encoder_funcs {
+	void (*reset)(struct drm_encoder *encoder);
 	void (*destroy)(struct drm_encoder *encoder);
 };
 
@@ -656,6 +662,7 @@
 						   struct drm_display_mode *mode);
 extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
 extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
 extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index aac27bd..f22e7fe 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -121,6 +121,9 @@
 void drm_fb_helper_restore(void);
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height);
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+			    uint32_t depth);
+
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
 bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index fe29ae3..5ff1194 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -28,7 +28,6 @@
 	{0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
 	{0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
 	{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
-	{0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
 	{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
 	{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
 	{0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index e95a86b..e5c607a 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -907,6 +907,7 @@
 #define RADEON_INFO_TILING_CONFIG	0x06
 #define RADEON_INFO_WANT_HYPERZ		0x07
 #define RADEON_INFO_WANT_CMASK		0x08 /* get access to CMASK on r300 */
+#define RADEON_INFO_CLOCK_CRYSTAL_FREQ	0x09 /* clock crystal frequency */
 
 struct drm_radeon_info {
 	uint32_t		request;
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index 5cb86c3..fc48754 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -99,7 +99,6 @@
  * structure of raw payloads passed to add_key() or instantiate key
  */
 struct rxrpc_key_data_v1 {
-	u32		kif_version;		/* 1 */
 	u16		security_index;
 	u16		ticket_length;
 	u32		expiry;			/* time_t */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index d1580c1..b0ada6f 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -1,5 +1,6 @@
 header-y += byteorder/
 header-y += can/
+header-y += caif/
 header-y += dvb/
 header-y += hdlc/
 header-y += isdn/
@@ -158,6 +159,7 @@
 header-y += if.h
 header-y += if_addr.h
 header-y += if_addrlabel.h
+header-y += if_alg.h
 header-y += if_arcnet.h
 header-y += if_arp.h
 header-y += if_bonding.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 67c91b4..a2e910e 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -306,9 +306,6 @@
 					     u32 *mask, u32 req);
 extern void acpi_early_init(void);
 
-int acpi_os_map_generic_address(struct acpi_generic_address *addr);
-void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
-
 #else	/* !CONFIG_ACPI */
 
 #define acpi_disabled 1
@@ -352,4 +349,14 @@
 	return -1;
 }
 #endif	/* !CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI_SLEEP
+int suspend_nvs_register(unsigned long start, unsigned long size);
+#else
+static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+{
+	return 0;
+}
+#endif
+
 #endif	/*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_io.h b/include/linux/acpi_io.h
new file mode 100644
index 0000000..7180013
--- /dev/null
+++ b/include/linux/acpi_io.h
@@ -0,0 +1,16 @@
+#ifndef _ACPI_IO_H_
+#define _ACPI_IO_H_
+
+#include <linux/io.h>
+#include <acpi/acpi.h>
+
+static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
+					    acpi_size size)
+{
+       return ioremap_cache(phys, size);
+}
+
+int acpi_os_map_generic_address(struct acpi_generic_address *addr);
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
+
+#endif
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 521a0f8..3111385 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -12,7 +12,6 @@
  *
  * Please credit ARM.com
  * Documentation: ARM DDI 0196D
- *
  */
 
 #ifndef AMBA_PL08X_H
@@ -22,6 +21,15 @@
 #include <linux/dmaengine.h>
 #include <linux/interrupt.h>
 
+struct pl08x_lli;
+struct pl08x_driver_data;
+
+/* Bitmasks for selecting AHB ports for DMA transfers */
+enum {
+	PL08X_AHB1 = (1 << 0),
+	PL08X_AHB2 = (1 << 1)
+};
+
 /**
  * struct pl08x_channel_data - data structure to pass info between
  * platform and PL08x driver regarding channel configuration
@@ -46,8 +54,10 @@
  * @circular_buffer: whether the buffer passed in is circular and
  * shall simply be looped round round (like a record baby round
  * round round round)
- * @single: the device connected to this channel will request single
- * DMA transfers, not bursts. (Bursts are default.)
+ * @single: the device connected to this channel will request single DMA
+ * transfers, not bursts. (Bursts are default.)
+ * @periph_buses: the device connected to this channel is accessible via
+ * these buses (use PL08X_AHB1 | PL08X_AHB2).
  */
 struct pl08x_channel_data {
 	char *bus_id;
@@ -55,10 +65,10 @@
 	int max_signal;
 	u32 muxval;
 	u32 cctl;
-	u32 ccfg;
 	dma_addr_t addr;
 	bool circular_buffer;
 	bool single;
+	u8 periph_buses;
 };
 
 /**
@@ -67,24 +77,23 @@
  * @addr: current address
  * @maxwidth: the maximum width of a transfer on this bus
  * @buswidth: the width of this bus in bytes: 1, 2 or 4
- * @fill_bytes: bytes required to fill to the next bus memory
- * boundary
+ * @fill_bytes: bytes required to fill to the next bus memory boundary
  */
 struct pl08x_bus_data {
 	dma_addr_t addr;
 	u8 maxwidth;
 	u8 buswidth;
-	u32 fill_bytes;
+	size_t fill_bytes;
 };
 
 /**
  * struct pl08x_phy_chan - holder for the physical channels
  * @id: physical index to this channel
  * @lock: a lock to use when altering an instance of this struct
- * @signal: the physical signal (aka channel) serving this
- * physical channel right now
- * @serving: the virtual channel currently being served by this
- * physical channel
+ * @signal: the physical signal (aka channel) serving this physical channel
+ * right now
+ * @serving: the virtual channel currently being served by this physical
+ * channel
  */
 struct pl08x_phy_chan {
 	unsigned int id;
@@ -92,11 +101,6 @@
 	spinlock_t lock;
 	int signal;
 	struct pl08x_dma_chan *serving;
-	u32 csrc;
-	u32 cdst;
-	u32 clli;
-	u32 cctl;
-	u32 ccfg;
 };
 
 /**
@@ -108,26 +112,23 @@
 	struct dma_async_tx_descriptor tx;
 	struct list_head node;
 	enum dma_data_direction	direction;
-	struct pl08x_bus_data srcbus;
-	struct pl08x_bus_data dstbus;
-	int len;
+	dma_addr_t src_addr;
+	dma_addr_t dst_addr;
+	size_t len;
 	dma_addr_t llis_bus;
-	void *llis_va;
-	struct pl08x_channel_data *cd;
-	bool active;
+	struct pl08x_lli *llis_va;
+	/* Default cctl value for LLIs */
+	u32 cctl;
 	/*
 	 * Settings to be put into the physical channel when we
-	 * trigger this txd
+	 * trigger this txd.  Other registers are in llis_va[0].
 	 */
-	u32 csrc;
-	u32 cdst;
-	u32 clli;
-	u32 cctl;
+	u32 ccfg;
 };
 
 /**
- * struct pl08x_dma_chan_state - holds the PL08x specific virtual
- * channel states
+ * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
+ * states
  * @PL08X_CHAN_IDLE: the channel is idle
  * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
  * channel and is running a transfer on it
@@ -147,6 +148,8 @@
  * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
  * @chan: wrappped abstract channel
  * @phychan: the physical channel utilized by this channel, if there is one
+ * @phychan_hold: if non-zero, hold on to the physical channel even if we
+ * have no pending entries
  * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
  * @name: name of channel
  * @cd: channel platform data
@@ -154,53 +157,49 @@
  * @runtime_direction: current direction of this channel according to
  * runtime config
  * @lc: last completed transaction on this channel
- * @desc_list: queued transactions pending on this channel
+ * @pend_list: queued transactions pending on this channel
  * @at: active transaction on this channel
- * @lockflags: sometimes we let a lock last between two function calls,
- * especially prep/submit, and then we need to store the IRQ flags
- * in the channel state, here
  * @lock: a lock for this channel data
  * @host: a pointer to the host (internal use)
  * @state: whether the channel is idle, paused, running etc
  * @slave: whether this channel is a device (slave) or for memcpy
- * @waiting: a TX descriptor on this channel which is waiting for
- * a physical channel to become available
+ * @waiting: a TX descriptor on this channel which is waiting for a physical
+ * channel to become available
  */
 struct pl08x_dma_chan {
 	struct dma_chan chan;
 	struct pl08x_phy_chan *phychan;
+	int phychan_hold;
 	struct tasklet_struct tasklet;
 	char *name;
 	struct pl08x_channel_data *cd;
 	dma_addr_t runtime_addr;
 	enum dma_data_direction	runtime_direction;
-	atomic_t last_issued;
 	dma_cookie_t lc;
-	struct list_head desc_list;
+	struct list_head pend_list;
 	struct pl08x_txd *at;
-	unsigned long lockflags;
 	spinlock_t lock;
-	void *host;
+	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
 	bool slave;
 	struct pl08x_txd *waiting;
 };
 
 /**
- * struct pl08x_platform_data - the platform configuration for the
- * PL08x PrimeCells.
+ * struct pl08x_platform_data - the platform configuration for the PL08x
+ * PrimeCells.
  * @slave_channels: the channels defined for the different devices on the
  * platform, all inclusive, including multiplexed channels. The available
- * physical channels will be multiplexed around these signals as they
- * are requested, just enumerate all possible channels.
- * @get_signal: request a physical signal to be used for a DMA
- * transfer immediately: if there is some multiplexing or similar blocking
- * the use of the channel the transfer can be denied by returning
- * less than zero, else it returns the allocated signal number
+ * physical channels will be multiplexed around these signals as they are
+ * requested, just enumerate all possible channels.
+ * @get_signal: request a physical signal to be used for a DMA transfer
+ * immediately: if there is some multiplexing or similar blocking the use
+ * of the channel the transfer can be denied by returning less than zero,
+ * else it returns the allocated signal number
  * @put_signal: indicate to the platform that this physical signal is not
  * running any DMA transfer and multiplexing can be recycled
- * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
- * LLI addresses are on 0/1 Master 1/2.
+ * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
+ * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
  */
 struct pl08x_platform_data {
 	struct pl08x_channel_data *slave_channels;
@@ -208,6 +207,8 @@
 	struct pl08x_channel_data memcpy_channel;
 	int (*get_signal)(struct pl08x_dma_chan *);
 	void (*put_signal)(struct pl08x_dma_chan *);
+	u8 lli_buses;
+	u8 mem_buses;
 };
 
 #ifdef CONFIG_AMBA_PL08X
diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h
index 8b49ac4..e02982f 100644
--- a/include/linux/auto_fs4.h
+++ b/include/linux/auto_fs4.h
@@ -24,7 +24,7 @@
 #define AUTOFS_MIN_PROTO_VERSION	3
 #define AUTOFS_MAX_PROTO_VERSION	5
 
-#define AUTOFS_PROTO_SUBVERSION		1
+#define AUTOFS_PROTO_SUBVERSION		2
 
 /* Mask for expire behaviour */
 #define AUTOFS_EXP_IMMEDIATE		1
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h
index 904dec7..a69554e 100644
--- a/include/linux/bfin_mac.h
+++ b/include/linux/bfin_mac.h
@@ -24,6 +24,7 @@
 	const unsigned short *mac_peripherals;
 	int phy_mode;
 	unsigned int phy_mask;
+	unsigned short vlan1_mask, vlan2_mask;
 };
 
 #endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 64a7114..c3d6512 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -25,7 +25,7 @@
 /*
  * This structure is used to hold the arguments that are used when loading binaries.
  */
-struct linux_binprm{
+struct linux_binprm {
 	char buf[BINPRM_BUF_SIZE];
 #ifdef CONFIG_MMU
 	struct vm_area_struct *vma;
@@ -93,7 +93,6 @@
 	int (*load_shlib)(struct file *);
 	int (*core_dump)(struct coredump_params *cprm);
 	unsigned long min_coredump;	/* minimal dump size */
-	int hasvdso;
 };
 
 extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
@@ -113,7 +112,7 @@
 
 extern int prepare_binprm(struct linux_binprm *);
 extern int __must_check remove_arg_zero(struct linux_binprm *);
-extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
+extern int search_binary_handler(struct linux_binprm *, struct pt_regs *);
 extern int flush_old_exec(struct linux_binprm * bprm);
 extern void setup_new_exec(struct linux_binprm * bprm);
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 36ab42c..d5063e1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -115,6 +115,7 @@
 	void *elevator_private3;
 
 	struct gendisk *rq_disk;
+	struct hd_struct *part;
 	unsigned long start_time;
 #ifdef CONFIG_BLK_CGROUP
 	unsigned long long start_time_ns;
@@ -646,7 +647,6 @@
 
 extern int blk_register_queue(struct gendisk *disk);
 extern void blk_unregister_queue(struct gendisk *disk);
-extern void register_disk(struct gendisk *dev);
 extern void generic_make_request(struct bio *bio);
 extern void blk_rq_init(struct request_queue *q, struct request *rq);
 extern void blk_put_request(struct request *);
@@ -699,7 +699,7 @@
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *);
+extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
 extern void blk_run_queue(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
 			   struct rq_map_data *, void __user *, unsigned long,
@@ -1088,7 +1088,6 @@
 
 struct work_struct;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
 
 #ifdef CONFIG_BLK_CGROUP
 /*
@@ -1136,7 +1135,6 @@
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
 extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
-extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
 extern void throtl_shutdown_timer_wq(struct request_queue *q);
 #else /* CONFIG_BLK_DEV_THROTTLING */
 static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
@@ -1146,7 +1144,6 @@
 
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
-static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
 static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
 #endif /* CONFIG_BLK_DEV_THROTTLING */
 
@@ -1256,6 +1253,9 @@
 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*direct_access) (struct block_device *, sector_t,
 						void **, unsigned long *);
+	unsigned int (*check_events) (struct gendisk *disk,
+				      unsigned int clearing);
+	/* ->media_changed() is DEPRECATED, use ->check_events() instead */
 	int (*media_changed) (struct gendisk *);
 	void (*unlock_native_capacity) (struct gendisk *);
 	int (*revalidate_disk) (struct gendisk *);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3395cf7..b22fb0d 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -245,7 +245,6 @@
 
 extern void blk_dump_cmd(char *buf, struct request *rq);
 extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
-extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
 
 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
 
diff --git a/include/linux/caif/Kbuild b/include/linux/caif/Kbuild
new file mode 100644
index 0000000..a9cf250
--- /dev/null
+++ b/include/linux/caif/Kbuild
@@ -0,0 +1,2 @@
+header-y += caif_socket.h
+header-y += if_caif.h
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index f389e31..fb45919 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -28,8 +28,6 @@
 
 void cdev_del(struct cdev *);
 
-int cdev_index(struct inode *inode);
-
 void cd_forget(struct inode *);
 
 extern struct backing_dev_info directly_mappable_cdev_bdi;
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 78e9047..35eae4b 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -946,6 +946,8 @@
 /* device-related storage */
 	unsigned int options	: 30;	/* options flags */
 	unsigned mc_flags	: 2;	/* media change buffer flags */
+	unsigned int vfs_events;	/* cached events for vfs path */
+	unsigned int ioctl_events;	/* cached events for ioctl path */
     	int use_count;                  /* number of times device opened */
     	char name[20];                  /* name of the device type */
 /* per-device flags */
@@ -965,6 +967,8 @@
 	int (*open) (struct cdrom_device_info *, int);
 	void (*release) (struct cdrom_device_info *);
 	int (*drive_status) (struct cdrom_device_info *, int);
+	unsigned int (*check_events) (struct cdrom_device_info *cdi,
+				      unsigned int clearing, int slot);
 	int (*media_changed) (struct cdrom_device_info *, int);
 	int (*tray_move) (struct cdrom_device_info *, int);
 	int (*lock_door) (struct cdrom_device_info *, int);
@@ -993,6 +997,8 @@
 extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
 extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
 		       fmode_t mode, unsigned int cmd, unsigned long arg);
+extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
+				       unsigned int clearing);
 extern int cdrom_media_changed(struct cdrom_device_info *);
 
 extern int register_cdrom(struct cdrom_device_info *cdi);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index c3c74ae..09dcc0c 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -43,6 +43,10 @@
 #define CEPH_FEATURE_NOSRCADDR      (1<<1)
 #define CEPH_FEATURE_MONCLOCKCHECK  (1<<2)
 #define CEPH_FEATURE_FLOCK          (1<<3)
+#define CEPH_FEATURE_SUBSCRIBE2     (1<<4)
+#define CEPH_FEATURE_MONNAMES       (1<<5)
+#define CEPH_FEATURE_RECONNECT_SEQ  (1<<6)
+#define CEPH_FEATURE_DIRLAYOUTHASH  (1<<7)
 
 
 /*
@@ -55,10 +59,10 @@
 	__le32 fl_stripe_count;    /* over this many objects */
 	__le32 fl_object_size;     /* until objects are this big, then move to
 				      new objects */
-	__le32 fl_cas_hash;        /* 0 = none; 1 = sha256 */
+	__le32 fl_cas_hash;        /* UNUSED.  0 = none; 1 = sha256 */
 
 	/* pg -> disk layout */
-	__le32 fl_object_stripe_unit;  /* for per-object parity, if any */
+	__le32 fl_object_stripe_unit;  /* UNUSED.  for per-object parity, if any */
 
 	/* object -> pg layout */
 	__le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
@@ -69,6 +73,12 @@
 
 int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
 
+struct ceph_dir_layout {
+	__u8   dl_dir_hash;   /* see ceph_hash.h for ids */
+	__u8   dl_unused1;
+	__u16  dl_unused2;
+	__u32  dl_unused3;
+} __attribute__ ((packed));
 
 /* crypto algorithms */
 #define CEPH_CRYPTO_NONE 0x0
@@ -457,7 +467,7 @@
 	struct ceph_timespec rctime;
 	struct ceph_frag_tree_head fragtree;  /* (must be at end of struct) */
 } __attribute__ ((packed));
-/* followed by frag array, then symlink string, then xattr blob */
+/* followed by frag array, symlink string, dir layout, xattr blob */
 
 /* reply_lease follows dname, and reply_inode */
 struct ceph_mds_reply_lease {
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index a108b42..31d91a6 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -110,17 +110,12 @@
 
 /*
  * ceph_connection state bit flags
- *
- * QUEUED and BUSY are used together to ensure that only a single
- * thread is currently opening, reading or writing data to the socket.
  */
 #define LOSSYTX         0  /* we can close channel or drop messages on errors */
 #define CONNECTING	1
 #define NEGOTIATING	2
 #define KEEPALIVE_PENDING      3
 #define WRITE_PENDING	4  /* we have data ready to send */
-#define QUEUED          5  /* there is work queued on this connection */
-#define BUSY            6  /* work is being done */
 #define STANDBY		8  /* no outgoing messages, socket closed.  we keep
 			    * the ceph_connection around to maintain shared
 			    * state with the peer. */
@@ -128,6 +123,7 @@
 #define SOCK_CLOSED	11 /* socket state changed to closed */
 #define OPENING         13 /* open connection w/ (possibly new) peer */
 #define DEAD            14 /* dead, about to kfree */
+#define BACKOFF         15
 
 /*
  * A single connection with another host.
@@ -165,7 +161,6 @@
 	struct list_head out_queue;
 	struct list_head out_sent;   /* sending or sent but unacked */
 	u64 out_seq;		     /* last message queued for send */
-	bool out_keepalive_pending;
 
 	u64 in_seq, in_seq_acked;  /* last message received, acked */
 
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed4ba11..ce104e3 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -564,7 +564,7 @@
 /*
  * To iterate across the tasks in a cgroup:
  *
- * 1) call cgroup_iter_start to intialize an iterator
+ * 1) call cgroup_iter_start to initialize an iterator
  *
  * 2) call cgroup_iter_next() to retrieve member tasks until it
  *    returns NULL or until you want to end the iteration
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 5ac5155..dfa2ed4 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -11,6 +11,9 @@
 /* The full zone was compacted */
 #define COMPACT_COMPLETE	3
 
+#define COMPACT_MODE_DIRECT_RECLAIM	0
+#define COMPACT_MODE_KSWAPD		1
+
 #ifdef CONFIG_COMPACTION
 extern int sysctl_compact_memory;
 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -21,7 +24,12 @@
 
 extern int fragmentation_index(struct zone *zone, unsigned int order);
 extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
-			int order, gfp_t gfp_mask, nodemask_t *mask);
+			int order, gfp_t gfp_mask, nodemask_t *mask,
+			bool sync);
+extern unsigned long compaction_suitable(struct zone *zone, int order);
+extern unsigned long compact_zone_order(struct zone *zone, int order,
+					gfp_t gfp_mask, bool sync,
+					int compact_mode);
 
 /* Do not skip compaction more than 64 times */
 #define COMPACT_MAX_DEFER_SHIFT 6
@@ -54,7 +62,20 @@
 
 #else
 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
-			int order, gfp_t gfp_mask, nodemask_t *nodemask)
+			int order, gfp_t gfp_mask, nodemask_t *nodemask,
+			bool sync)
+{
+	return COMPACT_CONTINUE;
+}
+
+static inline unsigned long compaction_suitable(struct zone *zone, int order)
+{
+	return COMPACT_SKIPPED;
+}
+
+static inline unsigned long compact_zone_order(struct zone *zone, int order,
+					       gfp_t gfp_mask, bool sync,
+					       int compact_mode)
 {
 	return COMPACT_CONTINUE;
 }
diff --git a/include/linux/console.h b/include/linux/console.h
index 9774fe6..7453cfd 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -139,9 +139,9 @@
 extern void register_console(struct console *);
 extern int unregister_console(struct console *);
 extern struct console *console_drivers;
-extern void acquire_console_sem(void);
-extern int try_acquire_console_sem(void);
-extern void release_console_sem(void);
+extern void console_lock(void);
+extern int console_trylock(void);
+extern void console_unlock(void);
 extern void console_conditional_schedule(void);
 extern void console_unblank(void);
 extern struct tty_driver *console_device(int *);
diff --git a/include/linux/cper.h b/include/linux/cper.h
index bf972f8..3104aaf 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -39,10 +39,12 @@
  * Severity difinition for error_severity in struct cper_record_header
  * and section_severity in struct cper_section_descriptor
  */
-#define CPER_SEV_RECOVERABLE			0x0
-#define CPER_SEV_FATAL				0x1
-#define CPER_SEV_CORRECTED			0x2
-#define CPER_SEV_INFORMATIONAL			0x3
+enum {
+	CPER_SEV_RECOVERABLE,
+	CPER_SEV_FATAL,
+	CPER_SEV_CORRECTED,
+	CPER_SEV_INFORMATIONAL,
+};
 
 /*
  * Validation bits difinition for validation_bits in struct
@@ -201,6 +203,47 @@
 	UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F,	\
 		0xDF, 0xAA, 0x84, 0xEC)
 
+#define CPER_PROC_VALID_TYPE			0x0001
+#define CPER_PROC_VALID_ISA			0x0002
+#define CPER_PROC_VALID_ERROR_TYPE		0x0004
+#define CPER_PROC_VALID_OPERATION		0x0008
+#define CPER_PROC_VALID_FLAGS			0x0010
+#define CPER_PROC_VALID_LEVEL			0x0020
+#define CPER_PROC_VALID_VERSION			0x0040
+#define CPER_PROC_VALID_BRAND_INFO		0x0080
+#define CPER_PROC_VALID_ID			0x0100
+#define CPER_PROC_VALID_TARGET_ADDRESS		0x0200
+#define CPER_PROC_VALID_REQUESTOR_ID		0x0400
+#define CPER_PROC_VALID_RESPONDER_ID		0x0800
+#define CPER_PROC_VALID_IP			0x1000
+
+#define CPER_MEM_VALID_ERROR_STATUS		0x0001
+#define CPER_MEM_VALID_PHYSICAL_ADDRESS		0x0002
+#define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK	0x0004
+#define CPER_MEM_VALID_NODE			0x0008
+#define CPER_MEM_VALID_CARD			0x0010
+#define CPER_MEM_VALID_MODULE			0x0020
+#define CPER_MEM_VALID_BANK			0x0040
+#define CPER_MEM_VALID_DEVICE			0x0080
+#define CPER_MEM_VALID_ROW			0x0100
+#define CPER_MEM_VALID_COLUMN			0x0200
+#define CPER_MEM_VALID_BIT_POSITION		0x0400
+#define CPER_MEM_VALID_REQUESTOR_ID		0x0800
+#define CPER_MEM_VALID_RESPONDER_ID		0x1000
+#define CPER_MEM_VALID_TARGET_ID		0x2000
+#define CPER_MEM_VALID_ERROR_TYPE		0x4000
+
+#define CPER_PCIE_VALID_PORT_TYPE		0x0001
+#define CPER_PCIE_VALID_VERSION			0x0002
+#define CPER_PCIE_VALID_COMMAND_STATUS		0x0004
+#define CPER_PCIE_VALID_DEVICE_ID		0x0008
+#define CPER_PCIE_VALID_SERIAL_NUMBER		0x0010
+#define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS	0x0020
+#define CPER_PCIE_VALID_CAPABILITY		0x0040
+#define CPER_PCIE_VALID_AER_INFO		0x0080
+
+#define CPER_PCIE_SLOT_SHIFT			3
+
 /*
  * All tables and structs must be byte-packed to match CPER
  * specification, since the tables are provided by the system BIOS
@@ -306,6 +349,41 @@
 	__u8	error_type;
 };
 
+struct cper_sec_pcie {
+	__u64		validation_bits;
+	__u32		port_type;
+	struct {
+		__u8	minor;
+		__u8	major;
+		__u8	reserved[2];
+	}		version;
+	__u16		command;
+	__u16		status;
+	__u32		reserved;
+	struct {
+		__u16	vendor_id;
+		__u16	device_id;
+		__u8	class_code[3];
+		__u8	function;
+		__u8	device;
+		__u16	segment;
+		__u8	bus;
+		__u8	secondary_bus;
+		__u16	slot;
+		__u8	reserved;
+	}		device_id;
+	struct {
+		__u32	lower;
+		__u32	upper;
+	}		serial_number;
+	struct {
+		__u16	secondary_status;
+		__u16	control;
+	}		bridge;
+	__u8	capability[60];
+	__u8	aer_info[96];
+};
+
 /* Reset to default packing */
 #pragma pack()
 
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 1be416b..36719ea 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -47,13 +47,7 @@
 
 /* Idle State Flags */
 #define CPUIDLE_FLAG_TIME_VALID	(0x01) /* is residency time measurable? */
-#define CPUIDLE_FLAG_CHECK_BM	(0x02) /* BM activity will exit state */
-#define CPUIDLE_FLAG_POLL	(0x10) /* no latency, no savings */
-#define CPUIDLE_FLAG_SHALLOW	(0x20) /* low latency, minimal savings */
-#define CPUIDLE_FLAG_BALANCED	(0x40) /* medium latency, moderate savings */
-#define CPUIDLE_FLAG_DEEP	(0x80) /* high latency, large savings */
 #define CPUIDLE_FLAG_IGNORE	(0x100) /* ignore during this idle period */
-#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h
index 6fc2bed..0e7bf27 100644
--- a/include/linux/cramfs_fs.h
+++ b/include/linux/cramfs_fs.h
@@ -84,9 +84,11 @@
 				| CRAMFS_FLAG_WRONG_SIGNATURE \
 				| CRAMFS_FLAG_SHIFTED_ROOT_OFFSET )
 
+#ifdef __KERNEL__
 /* Uncompression interfaces to the underlying zlib */
 int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen);
 int cramfs_uncompress_init(void);
 void cramfs_uncompress_exit(void);
+#endif /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
index d5a1d48..6fe2114 100644
--- a/include/linux/cs5535.h
+++ b/include/linux/cs5535.h
@@ -103,14 +103,20 @@
 #define GPIO_POSITIVE_EDGE_STS	0x48
 #define GPIO_NEGATIVE_EDGE_STS	0x4C
 
+#define GPIO_FLTR7_AMOUNT	0xD8
+
 #define GPIO_MAP_X		0xE0
 #define GPIO_MAP_Y		0xE4
 #define GPIO_MAP_Z		0xE8
 #define GPIO_MAP_W		0xEC
 
+#define GPIO_FE7_SEL		0xF7
+
 void cs5535_gpio_set(unsigned offset, unsigned int reg);
 void cs5535_gpio_clear(unsigned offset, unsigned int reg);
 int cs5535_gpio_isset(unsigned offset, unsigned int reg);
+int cs5535_gpio_set_irq(unsigned group, unsigned irq);
+void cs5535_gpio_setup_event(unsigned offset, int pair, int pme);
 
 /* MFGPTs */
 
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 59fcd24..f958c19 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -167,6 +167,8 @@
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)(struct dentry *, char *, int);
+	struct vfsmount *(*d_automount)(struct path *);
+	int (*d_manage)(struct dentry *, bool, bool);
 } ____cacheline_aligned;
 
 /*
@@ -205,13 +207,18 @@
 
 #define DCACHE_CANT_MOUNT	0x0100
 #define DCACHE_GENOCIDE		0x0200
-#define DCACHE_MOUNTED		0x0400	/* is a mountpoint */
 
 #define DCACHE_OP_HASH		0x1000
 #define DCACHE_OP_COMPARE	0x2000
 #define DCACHE_OP_REVALIDATE	0x4000
 #define DCACHE_OP_DELETE	0x8000
 
+#define DCACHE_MOUNTED		0x10000	/* is a mountpoint */
+#define DCACHE_NEED_AUTOMOUNT	0x20000	/* handle automount on this dir */
+#define DCACHE_MANAGE_TRANSIT	0x40000	/* manage transit from this dirent */
+#define DCACHE_MANAGED_DENTRY \
+	(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
+
 extern seqlock_t rename_lock;
 
 static inline int dname_external(struct dentry *dentry)
@@ -399,7 +406,12 @@
 
 extern void dput(struct dentry *);
 
-static inline int d_mountpoint(struct dentry *dentry)
+static inline bool d_managed(struct dentry *dentry)
+{
+	return dentry->d_flags & DCACHE_MANAGED_DENTRY;
+}
+
+static inline bool d_mountpoint(struct dentry *dentry)
 {
 	return dentry->d_flags & DCACHE_MOUNTED;
 }
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index 68cd248..66900e3 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -101,8 +101,8 @@
  */
 struct dcb_app {
 	__u8	selector;
-	__u32	protocol;
 	__u8	priority;
+	__u16	protocol;
 };
 
 struct dcbmsg {
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
index f9b06cc..8c0aef1 100644
--- a/include/linux/decompress/inflate.h
+++ b/include/linux/decompress/inflate.h
@@ -1,9 +1,6 @@
 #ifndef INFLATE_H
 #define INFLATE_H
 
-/* Other housekeeping constants */
-#define INBUFSIZ 4096
-
 int gunzip(unsigned char *inbuf, int len,
 	   int(*fill)(void*, unsigned int),
 	   int(*flush)(void*, unsigned int),
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index ad5ec1d..4cb72b9 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -61,8 +61,6 @@
 #define large_malloc(a) malloc(a)
 #define large_free(a) free(a)
 
-#define set_error_fn(x)
-
 #define INIT
 
 #else /* STATIC */
@@ -72,6 +70,7 @@
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/string.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 /* Use defines rather than static inline in order to avoid spurious
@@ -84,9 +83,6 @@
 #define large_malloc(a) vmalloc(a)
 #define large_free(a) vfree(a)
 
-static void(*error)(char *m);
-#define set_error_fn(x) error = x;
-
 #define INIT __init
 #define STATIC
 
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
new file mode 100644
index 0000000..41728fc
--- /dev/null
+++ b/include/linux/decompress/unxz.h
@@ -0,0 +1,19 @@
+/*
+ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef DECOMPRESS_UNXZ_H
+#define DECOMPRESS_UNXZ_H
+
+int unxz(unsigned char *in, int in_size,
+	 int (*fill)(void *dest, unsigned int size),
+	 int (*flush)(void *src, unsigned int size),
+	 unsigned char *out, int *in_used,
+	 void (*error)(char *x));
+
+#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 2970022..272496d 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -193,6 +193,13 @@
 	char *error;
 };
 
+/* Each target can link one of these into the table */
+struct dm_target_callbacks {
+	struct list_head list;
+	int (*congested_fn) (struct dm_target_callbacks *, int);
+	void (*unplug_fn)(struct dm_target_callbacks *);
+};
+
 int dm_register_target(struct target_type *t);
 void dm_unregister_target(struct target_type *t);
 
@@ -269,6 +276,11 @@
 			sector_t start, sector_t len, char *params);
 
 /*
+ * Target_ctr should call this if it needs to add any callbacks.
+ */
+void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
+
+/*
  * Finally call this to make the table ready for use.
  */
 int dm_table_complete(struct dm_table *t);
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 49eab36..78bbf47 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -44,7 +44,7 @@
  * Remove a device, destroy any tables.
  *
  * DM_DEV_RENAME:
- * Rename a device.
+ * Rename a device or set its uuid if none was previously supplied.
  *
  * DM_SUSPEND:
  * This performs both suspend and resume, depending which flag is
@@ -267,9 +267,9 @@
 #define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR	4
-#define DM_VERSION_MINOR	18
-#define DM_VERSION_PATCHLEVEL	0
-#define DM_VERSION_EXTRA	"-ioctl (2010-06-29)"
+#define DM_VERSION_MINOR	19
+#define DM_VERSION_PATCHLEVEL	1
+#define DM_VERSION_EXTRA	"-ioctl (2011-01-07)"
 
 /* Status bits */
 #define DM_READONLY_FLAG	(1 << 0) /* In/Out */
@@ -322,4 +322,10 @@
  */
 #define DM_UEVENT_GENERATED_FLAG	(1 << 13) /* Out */
 
+/*
+ * If set, rename changes the uuid not the name.  Only permitted
+ * if no uuid was previously supplied: an existing uuid cannot be changed.
+ */
+#define DM_UUID_FLAG			(1 << 14) /* In */
+
 #endif				/* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h
index 0c3c3a2..eeace7d 100644
--- a/include/linux/dm-log-userspace.h
+++ b/include/linux/dm-log-userspace.h
@@ -370,6 +370,16 @@
 #define DM_ULOG_REQUEST_TYPE(request_type) \
 	(DM_ULOG_REQUEST_MASK & (request_type))
 
+/*
+ * DM_ULOG_REQUEST_VERSION is incremented when there is a
+ * change to the way information is passed between kernel
+ * and userspace.  This could be a structure change of
+ * dm_ulog_request or a change in the way requests are
+ * issued/handled.  Changes are outlined here:
+ *	version 1:  Initial implementation
+ */
+#define DM_ULOG_REQUEST_VERSION 1
+
 struct dm_ulog_request {
 	/*
 	 * The local unique identifier (luid) and the universally unique
@@ -383,8 +393,9 @@
 	 */
 	uint64_t luid;
 	char uuid[DM_UUID_LEN];
-	char padding[7];        /* Padding because DM_UUID_LEN = 129 */
+	char padding[3];        /* Padding because DM_UUID_LEN = 129 */
 
+	uint32_t version;       /* See DM_ULOG_REQUEST_VERSION */
 	int32_t error;          /* Used to report back processing errors */
 
 	uint32_t seq;           /* Sequence number for request */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8cd00ad..9bebd7f 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -532,7 +532,7 @@
 	return dmaengine_device_control(chan, DMA_RESUME, 0);
 }
 
-static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc)
+static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
 {
 	return desc->tx_submit(desc);
 }
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a90b389..1c70028 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -44,34 +44,24 @@
 extern int ddebug_remove_module(const char *mod_name);
 
 #define dynamic_pr_debug(fmt, ...) do {					\
-	__label__ do_printk;						\
-	__label__ out;							\
 	static struct _ddebug descriptor				\
 	__used								\
 	__attribute__((section("__verbose"), aligned(8))) =		\
 	{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,		\
 		_DPRINTK_FLAGS_DEFAULT };				\
-	JUMP_LABEL(&descriptor.enabled, do_printk);			\
-	goto out;							\
-do_printk:								\
-	printk(KERN_DEBUG pr_fmt(fmt),	##__VA_ARGS__);			\
-out:	;								\
+	if (unlikely(descriptor.enabled))				\
+		printk(KERN_DEBUG pr_fmt(fmt),	##__VA_ARGS__);		\
 	} while (0)
 
 
 #define dynamic_dev_dbg(dev, fmt, ...) do {				\
-	__label__ do_printk;						\
-	__label__ out;							\
 	static struct _ddebug descriptor				\
 	__used								\
 	__attribute__((section("__verbose"), aligned(8))) =		\
 	{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,		\
 		_DPRINTK_FLAGS_DEFAULT };				\
-	JUMP_LABEL(&descriptor.enabled, do_printk);			\
-	goto out;							\
-do_printk:								\
-	dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);		\
-out:	;								\
+	if (unlikely(descriptor.enabled))				\
+		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 	} while (0)
 
 #else
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index f16a010..ab68f78 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -48,8 +48,10 @@
 
 
 
-extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
+extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+					    unsigned int rxqs);
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
+#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
 
 /**
  * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
@@ -97,6 +99,17 @@
 }
 
 /**
+ * is_unicast_ether_addr - Determine if the Ethernet address is unicast
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if the address is a unicast address.
+ */
+static inline int is_unicast_ether_addr(const u8 *addr)
+{
+	return !is_multicast_ether_addr(addr);
+}
+
+/**
  * is_valid_ether_addr - Determine if the given Ethernet address is valid
  * @addr: Pointer to a six-byte array containing the Ethernet address
  *
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 6ce1bca..65990ef 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -724,21 +724,30 @@
 					 ~EXT3_DIR_ROUND)
 #define EXT3_MAX_REC_LEN		((1<<16)-1)
 
+/*
+ * Tests against MAX_REC_LEN etc were put in place for 64k block
+ * sizes; if that is not possible on this arch, we can skip
+ * those tests and speed things up.
+ */
 static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
 {
 	unsigned len = le16_to_cpu(dlen);
 
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == EXT3_MAX_REC_LEN)
 		return 1 << 16;
+#endif
 	return len;
 }
 
 static inline __le16 ext3_rec_len_to_disk(unsigned len)
 {
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == (1 << 16))
 		return cpu_to_le16(EXT3_MAX_REC_LEN);
 	else if (len > (1 << 16))
 		BUG();
+#endif
 	return cpu_to_le16(len);
 }
 
@@ -856,6 +865,7 @@
 extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
 extern void ext3_init_block_alloc_info(struct inode *);
 extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
+extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
 
 /* dir.c */
 extern int ext3_check_dir_entry(const char *, struct inode *,
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 3c15510..73e0b62 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -2,6 +2,7 @@
 #define _FALLOC_H_
 
 #define FALLOC_FL_KEEP_SIZE	0x01 /* default is extend size */
+#define FALLOC_FL_PUNCH_HOLE	0x02 /* de-allocates range */
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index afc00af..a562fa5 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -45,6 +45,7 @@
 #define AT_REMOVEDIR		0x200   /* Remove directory instead of
                                            unlinking file.  */
 #define AT_SYMLINK_FOLLOW	0x400   /* Follow symbolic links.  */
+#define AT_NO_AUTOMOUNT		0x800	/* Suppress terminal automount traversal */
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/fec.h b/include/linux/fec.h
index 5d3523d..bcff455 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -3,6 +3,8 @@
  * Copyright (c) 2009 Orex Computed Radiography
  *   Baruch Siach <baruch@tkos.co.il>
  *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
  * Header file for the FEC platform data
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,6 +18,7 @@
 
 struct fec_platform_data {
 	phy_interface_t phy;
+	unsigned char mac[ETH_ALEN];
 };
 
 #endif
diff --git a/include/linux/file.h b/include/linux/file.h
index b1e1297..e85baeb 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -23,7 +23,7 @@
 
 static inline void fput_light(struct file *file, int fput_needed)
 {
-	if (unlikely(fput_needed))
+	if (fput_needed)
 		fput(file);
 }
 
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 68c642d..59ea406 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -273,7 +273,7 @@
  * @closure:	See &fw_cdev_event_common;
  *		set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
  * @type:	%FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
- * @completed:	Offset into the receive buffer; data before this offest is valid
+ * @completed:	Offset into the receive buffer; data before this offset is valid
  *
  * This event is sent in multichannel contexts (context type
  * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index da7e52b..1effc8b 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -109,7 +109,7 @@
 }
 
 /*
- * Check if the task should be counted as freezeable by the freezer
+ * Check if the task should be counted as freezable by the freezer
  */
 static inline int freezer_should_skip(struct task_struct *p)
 {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f84d992..e38b50a4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -242,6 +242,7 @@
 #define S_SWAPFILE	256	/* Do not truncate: swapon got its bmaps */
 #define S_PRIVATE	512	/* Inode is fs-internal */
 #define S_IMA		1024	/* Inode has an associated IMA struct */
+#define S_AUTOMOUNT	2048	/* Automount/referral quasi-directory */
 
 /*
  * Note that nosuid etc flags are inode-specific: setting some file-system
@@ -277,6 +278,7 @@
 #define IS_SWAPFILE(inode)	((inode)->i_flags & S_SWAPFILE)
 #define IS_PRIVATE(inode)	((inode)->i_flags & S_PRIVATE)
 #define IS_IMA(inode)		((inode)->i_flags & S_IMA)
+#define IS_AUTOMOUNT(inode)	((inode)->i_flags & S_AUTOMOUNT)
 
 /* the read-only stuff doesn't really belong here, but any other place is
    probably as bad and I don't want to create yet another include file. */
@@ -647,6 +649,7 @@
 	spinlock_t		private_lock;	/* for use by the address_space */
 	struct list_head	private_list;	/* ditto */
 	struct address_space	*assoc_mapping;	/* ditto */
+	struct mutex		unmap_mutex;    /* to protect unmapping */
 } __attribute__((aligned(sizeof(long))));
 	/*
 	 * On most architectures that alignment is already the case; but
@@ -664,8 +667,9 @@
 	void *			bd_claiming;
 	void *			bd_holder;
 	int			bd_holders;
+	bool			bd_write_holder;
 #ifdef CONFIG_SYSFS
-	struct list_head	bd_holder_list;
+	struct list_head	bd_holder_disks;
 #endif
 	struct block_device *	bd_contains;
 	unsigned		bd_block_size;
@@ -1065,7 +1069,6 @@
 	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_break)(struct file_lock *);
-	int (*fl_mylease)(struct file_lock *, struct file_lock *);
 	int (*fl_change)(struct file_lock **, int);
 };
 
@@ -1423,6 +1426,7 @@
 	 * generic_show_options()
 	 */
 	char __rcu *s_options;
+	const struct dentry_operations *s_d_op; /* default d_op for dentries */
 };
 
 extern struct timespec current_fs_time(struct super_block *sb);
@@ -1480,8 +1484,8 @@
 	unsigned int fi_flags;		/* Flags as passed from user */
 	unsigned int fi_extents_mapped;	/* Number of mapped extents */
 	unsigned int fi_extents_max;	/* Size of fiemap_extent array */
-	struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
-						 * array */
+	struct fiemap_extent __user *fi_extents_start; /* Start of
+							fiemap_extent array */
 };
 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
 			    u64 phys, u64 len, u32 flags);
@@ -1549,6 +1553,8 @@
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
 	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
 	int (*setlease)(struct file *, long, struct file_lock **);
+	long (*fallocate)(struct file *file, int mode, loff_t offset,
+			  loff_t len);
 };
 
 #define IPERM_FLAG_RCU	0x0001
@@ -1579,8 +1585,6 @@
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
 	void (*truncate_range)(struct inode *, loff_t, loff_t);
-	long (*fallocate)(struct inode *inode, int mode, loff_t offset,
-			  loff_t len);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
 		      u64 len);
 } ____cacheline_aligned;
@@ -1834,7 +1838,9 @@
 			int (*set)(struct super_block *,void *),
 			void *data);
 extern struct dentry *mount_pseudo(struct file_system_type *, char *,
-	const struct super_operations *ops, unsigned long);
+	const struct super_operations *ops,
+	const struct dentry_operations *dops,
+	unsigned long);
 extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
 
 static inline void sb_mark_dirty(struct super_block *sb)
@@ -2016,7 +2022,6 @@
 extern void bd_set_size(struct block_device *, loff_t size);
 extern void bd_forget(struct inode *inode);
 extern void bdput(struct block_device *);
-extern struct block_device *open_by_devnum(dev_t, fmode_t);
 extern void invalidate_bdev(struct block_device *);
 extern int sync_blockdev(struct block_device *bdev);
 extern struct super_block *freeze_bdev(struct block_device *);
@@ -2047,16 +2052,26 @@
 extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
 extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
 extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *, fmode_t);
-extern int blkdev_put(struct block_device *, fmode_t);
-extern int bd_claim(struct block_device *, void *);
-extern void bd_release(struct block_device *);
+extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
+extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+					       void *holder);
+extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
+					      void *holder);
+extern int blkdev_put(struct block_device *bdev, fmode_t mode);
 #ifdef CONFIG_SYSFS
-extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
-extern void bd_release_from_disk(struct block_device *, struct gendisk *);
+extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+extern void bd_unlink_disk_holder(struct block_device *bdev,
+				  struct gendisk *disk);
 #else
-#define bd_claim_by_disk(bdev, holder, disk)	bd_claim(bdev, holder)
-#define bd_release_from_disk(bdev, disk)	bd_release(bdev)
+static inline int bd_link_disk_holder(struct block_device *bdev,
+				      struct gendisk *disk)
+{
+	return 0;
+}
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+					 struct gendisk *disk)
+{
+}
 #endif
 #endif
 
@@ -2092,8 +2107,6 @@
 extern const char *__bdevname(dev_t, char *buffer);
 extern const char *bdevname(struct block_device *bdev, char *buffer);
 extern struct block_device *lookup_bdev(const char *);
-extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
-extern void close_bdev_exclusive(struct block_device *, fmode_t);
 extern void blkdev_show(struct seq_file *,off_t);
 
 #else
@@ -2127,7 +2140,7 @@
 				   struct block_device *bdev);
 extern int revalidate_disk(struct gendisk *);
 extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *);
+extern int __invalidate_device(struct block_device *, bool);
 extern int invalidate_partition(struct gendisk *, int);
 #endif
 unsigned long invalidate_mapping_pages(struct address_space *mapping,
@@ -2213,6 +2226,7 @@
 
 extern int inode_init_always(struct super_block *, struct inode *);
 extern void inode_init_once(struct inode *);
+extern void address_space_init_once(struct address_space *mapping);
 extern void ihold(struct inode * inode);
 extern void iput(struct inode *);
 extern struct inode * igrab(struct inode *);
@@ -2543,9 +2557,12 @@
 		   void __user *buffer, size_t *lenp, loff_t *ppos);
 int __init get_filesystem_list(char *buf);
 
+#define __FMODE_EXEC		((__force int) FMODE_EXEC)
+#define __FMODE_NONOTIFY	((__force int) FMODE_NONOTIFY)
+
 #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
 #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
-					    (flag & FMODE_NONOTIFY)))
+					    (flag & __FMODE_NONOTIFY)))
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_FS_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 7a7b9c1..c0d5f69 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -115,6 +115,7 @@
 #else
 	struct disk_stats dkstats;
 #endif
+	atomic_t ref;
 	struct rcu_head rcu_head;
 };
 
@@ -127,6 +128,11 @@
 #define GENHD_FL_EXT_DEVT			64 /* allow extended devt */
 #define GENHD_FL_NATIVE_CAPACITY		128
 
+enum {
+	DISK_EVENT_MEDIA_CHANGE			= 1 << 0, /* media changed */
+	DISK_EVENT_EJECT_REQUEST		= 1 << 1, /* eject requested */
+};
+
 #define BLK_SCSI_MAX_CMDS	(256)
 #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
 
@@ -143,6 +149,8 @@
 	struct hd_struct __rcu *part[];
 };
 
+struct disk_events;
+
 struct gendisk {
 	/* major, first_minor and minors are input parameters only,
 	 * don't use directly.  Use disk_devt() and disk_max_parts().
@@ -154,6 +162,10 @@
 
 	char disk_name[DISK_NAME_LEN];	/* name of major driver */
 	char *(*devnode)(struct gendisk *gd, mode_t *mode);
+
+	unsigned int events;		/* supported events */
+	unsigned int async_events;	/* async events, subset of all */
+
 	/* Array of pointers to partitions indexed by partno.
 	 * Protected with matching bdev lock but stat and other
 	 * non-critical accesses use RCU.  Always access through
@@ -171,9 +183,8 @@
 	struct kobject *slave_dir;
 
 	struct timer_rand_state *random;
-
 	atomic_t sync_io;		/* RAID */
-	struct work_struct async_notify;
+	struct disk_events *ev;
 #ifdef  CONFIG_BLK_DEV_INTEGRITY
 	struct blk_integrity *integrity;
 #endif
@@ -395,7 +406,6 @@
 /* block/genhd.c */
 extern void add_disk(struct gendisk *disk);
 extern void del_gendisk(struct gendisk *gp);
-extern void unlink_gendisk(struct gendisk *gp);
 extern struct gendisk *get_gendisk(dev_t dev, int *partno);
 extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
 
@@ -407,6 +417,11 @@
 	return disk->part0.policy;
 }
 
+extern void disk_block_events(struct gendisk *disk);
+extern void disk_unblock_events(struct gendisk *disk);
+extern void disk_check_events(struct gendisk *disk);
+extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
+
 /* drivers/char/random.c */
 extern void add_disk_randomness(struct gendisk *disk);
 extern void rand_initialize_disk(struct gendisk *disk);
@@ -583,6 +598,7 @@
 						     sector_t len, int flags,
 						     struct partition_meta_info
 						       *info);
+extern void __delete_partition(struct hd_struct *);
 extern void delete_partition(struct gendisk *, int);
 extern void printk_all_partitions(void);
 
@@ -611,6 +627,29 @@
 			       const char *buf, size_t count);
 #endif /* CONFIG_FAIL_MAKE_REQUEST */
 
+static inline void hd_ref_init(struct hd_struct *part)
+{
+	atomic_set(&part->ref, 1);
+	smp_mb();
+}
+
+static inline void hd_struct_get(struct hd_struct *part)
+{
+	atomic_inc(&part->ref);
+	smp_mb__after_atomic_inc();
+}
+
+static inline int hd_struct_try_get(struct hd_struct *part)
+{
+	return atomic_inc_not_zero(&part->ref);
+}
+
+static inline void hd_struct_put(struct hd_struct *part)
+{
+	if (atomic_dec_and_test(&part->ref))
+		__delete_partition(part);
+}
+
 #else /* CONFIG_BLOCK */
 
 static inline void printk_all_partitions(void) { }
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f54adfc..dca3176 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -34,6 +34,7 @@
 #else
 #define ___GFP_NOTRACK		0
 #endif
+#define ___GFP_NO_KSWAPD	0x400000u
 
 /*
  * GFP bitmasks..
@@ -81,13 +82,15 @@
 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
 #define __GFP_NOTRACK	((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */
 
+#define __GFP_NO_KSWAPD	((__force gfp_t)___GFP_NO_KSWAPD)
+
 /*
  * This may seem redundant, but it's a way of annotating false positives vs.
  * allocations that simply cannot be supported (e.g. page tables).
  */
 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
 
-#define __GFP_BITS_SHIFT 22	/* Room for 22 __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 23	/* Room for 23 __GFP_FOO bits */
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /* This equals 0, but use constants in case they ever change */
@@ -106,6 +109,9 @@
 				 __GFP_HARDWALL | __GFP_HIGHMEM | \
 				 __GFP_MOVABLE)
 #define GFP_IOFS	(__GFP_IO | __GFP_FS)
+#define GFP_TRANSHUGE	(GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+			 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
+			 __GFP_NO_KSWAPD)
 
 #ifdef CONFIG_NUMA
 #define GFP_THISNODE	(__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
@@ -243,7 +249,7 @@
 					 ((1 << ZONES_SHIFT) - 1);
 
 	if (__builtin_constant_p(bit))
-		MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+		BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
 	else {
 #ifdef CONFIG_DEBUG_VM
 		BUG_ON((GFP_ZONE_BAD >> bit) & 1);
@@ -325,14 +331,20 @@
 {
 	return alloc_pages_current(gfp_mask, order);
 }
-extern struct page *alloc_page_vma(gfp_t gfp_mask,
-			struct vm_area_struct *vma, unsigned long addr);
+extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+			struct vm_area_struct *vma, unsigned long addr,
+			int node);
 #else
 #define alloc_pages(gfp_mask, order) \
 		alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node)	\
+	alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+#define alloc_page_vma(gfp_mask, vma, addr)			\
+	alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+#define alloc_page_vma_node(gfp_mask, vma, addr, node)		\
+	alloc_pages_vma(gfp_mask, 0, vma, addr, node)
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index e41f7dd..32720ba 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -13,6 +13,7 @@
 #include <linux/errno.h>
 
 struct device;
+struct gpio;
 struct gpio_chip;
 
 /*
@@ -34,6 +35,17 @@
 	return -ENOSYS;
 }
 
+static inline int gpio_request_one(unsigned gpio,
+					unsigned long flags, const char *label)
+{
+	return -ENOSYS;
+}
+
+static inline int gpio_request_array(struct gpio *array, size_t num)
+{
+	return -ENOSYS;
+}
+
 static inline void gpio_free(unsigned gpio)
 {
 	might_sleep();
@@ -42,6 +54,14 @@
 	WARN_ON(1);
 }
 
+static inline void gpio_free_array(struct gpio *array, size_t num)
+{
+	might_sleep();
+
+	/* GPIO can never have been requested */
+	WARN_ON(1);
+}
+
 static inline int gpio_direction_input(unsigned gpio)
 {
 	return -ENOSYS;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 20b9801..d91c25e 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -402,7 +402,7 @@
 	__u16 dpad;			/* dpad input code */
 };
 
-#define HID_MAX_FIELDS 64
+#define HID_MAX_FIELDS 128
 
 struct hid_report {
 	struct list_head list;
@@ -593,6 +593,7 @@
  * @report_fixup: called before report descriptor parsing (NULL means nop)
  * @input_mapping: invoked on input registering before mapping an usage
  * @input_mapped: invoked on input registering after mapping an usage
+ * @feature_mapping: invoked on feature registering
  * @suspend: invoked on suspend (NULL means nop)
  * @resume: invoked on resume if device was not reset (NULL means nop)
  * @reset_resume: invoked on resume if device was reset (NULL means nop)
@@ -636,6 +637,9 @@
 	int (*input_mapped)(struct hid_device *hdev,
 			struct hid_input *hidinput, struct hid_field *field,
 			struct hid_usage *usage, unsigned long **bit, int *max);
+	void (*feature_mapping)(struct hid_device *hdev,
+			struct hid_input *hidinput, struct hid_field *field,
+			struct hid_usage *usage);
 #ifdef CONFIG_PM
 	int (*suspend)(struct hid_device *hdev, pm_message_t message);
 	int (*resume)(struct hid_device *hdev);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
new file mode 100644
index 0000000..df29c8f
--- /dev/null
+++ b/include/linux/huge_mm.h
@@ -0,0 +1,180 @@
+#ifndef _LINUX_HUGE_MM_H
+#define _LINUX_HUGE_MM_H
+
+extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
+				      struct vm_area_struct *vma,
+				      unsigned long address, pmd_t *pmd,
+				      unsigned int flags);
+extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+			 struct vm_area_struct *vma);
+extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+			       unsigned long address, pmd_t *pmd,
+			       pmd_t orig_pmd);
+extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm);
+extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
+					  unsigned long addr,
+					  pmd_t *pmd,
+					  unsigned int flags);
+extern int zap_huge_pmd(struct mmu_gather *tlb,
+			struct vm_area_struct *vma,
+			pmd_t *pmd);
+extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+			unsigned long addr, unsigned long end,
+			unsigned char *vec);
+extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+			unsigned long addr, pgprot_t newprot);
+
+enum transparent_hugepage_flag {
+	TRANSPARENT_HUGEPAGE_FLAG,
+	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+	TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
+	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
+	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
+#ifdef CONFIG_DEBUG_VM
+	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
+#endif
+};
+
+enum page_check_address_pmd_flag {
+	PAGE_CHECK_ADDRESS_PMD_FLAG,
+	PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
+	PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
+};
+extern pmd_t *page_check_address_pmd(struct page *page,
+				     struct mm_struct *mm,
+				     unsigned long address,
+				     enum page_check_address_pmd_flag flag);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define HPAGE_PMD_SHIFT HPAGE_SHIFT
+#define HPAGE_PMD_MASK HPAGE_MASK
+#define HPAGE_PMD_SIZE HPAGE_SIZE
+
+#define transparent_hugepage_enabled(__vma)				\
+	((transparent_hugepage_flags &					\
+	  (1<<TRANSPARENT_HUGEPAGE_FLAG) ||				\
+	  (transparent_hugepage_flags &					\
+	   (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&			\
+	   ((__vma)->vm_flags & VM_HUGEPAGE))) &&			\
+	 !((__vma)->vm_flags & VM_NOHUGEPAGE) &&			\
+	 !is_vma_temporary_stack(__vma))
+#define transparent_hugepage_defrag(__vma)				\
+	((transparent_hugepage_flags &					\
+	  (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||			\
+	 (transparent_hugepage_flags &					\
+	  (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) &&		\
+	  (__vma)->vm_flags & VM_HUGEPAGE))
+#ifdef CONFIG_DEBUG_VM
+#define transparent_hugepage_debug_cow()				\
+	(transparent_hugepage_flags &					\
+	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
+#else /* CONFIG_DEBUG_VM */
+#define transparent_hugepage_debug_cow() 0
+#endif /* CONFIG_DEBUG_VM */
+
+extern unsigned long transparent_hugepage_flags;
+extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+			  pmd_t *dst_pmd, pmd_t *src_pmd,
+			  struct vm_area_struct *vma,
+			  unsigned long addr, unsigned long end);
+extern int handle_pte_fault(struct mm_struct *mm,
+			    struct vm_area_struct *vma, unsigned long address,
+			    pte_t *pte, pmd_t *pmd, unsigned int flags);
+extern int split_huge_page(struct page *page);
+extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
+#define split_huge_page_pmd(__mm, __pmd)				\
+	do {								\
+		pmd_t *____pmd = (__pmd);				\
+		if (unlikely(pmd_trans_huge(*____pmd)))			\
+			__split_huge_page_pmd(__mm, ____pmd);		\
+	}  while (0)
+#define wait_split_huge_page(__anon_vma, __pmd)				\
+	do {								\
+		pmd_t *____pmd = (__pmd);				\
+		spin_unlock_wait(&(__anon_vma)->root->lock);		\
+		/*							\
+		 * spin_unlock_wait() is just a loop in C and so the	\
+		 * CPU can reorder anything around it.			\
+		 */							\
+		smp_mb();						\
+		BUG_ON(pmd_trans_splitting(*____pmd) ||			\
+		       pmd_trans_huge(*____pmd));			\
+	} while (0)
+#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
+#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+#if HPAGE_PMD_ORDER > MAX_ORDER
+#error "hugepages can't be allocated by the buddy allocator"
+#endif
+extern int hugepage_madvise(struct vm_area_struct *vma,
+			    unsigned long *vm_flags, int advice);
+extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+				    unsigned long start,
+				    unsigned long end,
+				    long adjust_next);
+static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
+					 unsigned long start,
+					 unsigned long end,
+					 long adjust_next)
+{
+	if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+		return;
+	__vma_adjust_trans_huge(vma, start, end, adjust_next);
+}
+static inline int hpage_nr_pages(struct page *page)
+{
+	if (unlikely(PageTransHuge(page)))
+		return HPAGE_PMD_NR;
+	return 1;
+}
+static inline struct page *compound_trans_head(struct page *page)
+{
+	if (PageTail(page)) {
+		struct page *head;
+		head = page->first_page;
+		smp_rmb();
+		/*
+		 * head may be a dangling pointer.
+		 * __split_huge_page_refcount clears PageTail before
+		 * overwriting first_page, so if PageTail is still
+		 * there it means the head pointer isn't dangling.
+		 */
+		if (PageTail(page))
+			return head;
+	}
+	return page;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+#define HPAGE_PMD_SHIFT ({ BUG(); 0; })
+#define HPAGE_PMD_MASK ({ BUG(); 0; })
+#define HPAGE_PMD_SIZE ({ BUG(); 0; })
+
+#define hpage_nr_pages(x) 1
+
+#define transparent_hugepage_enabled(__vma) 0
+
+#define transparent_hugepage_flags 0UL
+static inline int split_huge_page(struct page *page)
+{
+	return 0;
+}
+#define split_huge_page_pmd(__mm, __pmd)	\
+	do { } while (0)
+#define wait_split_huge_page(__anon_vma, __pmd)	\
+	do { } while (0)
+#define compound_trans_head(page) compound_head(page)
+static inline int hugepage_madvise(struct vm_area_struct *vma,
+				   unsigned long *vm_flags, int advice)
+{
+	BUG();
+	return 0;
+}
+static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
+					 unsigned long start,
+					 unsigned long end,
+					 long adjust_next)
+{
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 6042228..294169e 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -959,7 +959,7 @@
 /* block-ack parameters */
 #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
 #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
 #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
 
diff --git a/include/linux/if_alg.h b/include/linux/if_alg.h
new file mode 100644
index 0000000..0f9acce
--- /dev/null
+++ b/include/linux/if_alg.h
@@ -0,0 +1,40 @@
+/*
+ * if_alg: User-space algorithm interface
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _LINUX_IF_ALG_H
+#define _LINUX_IF_ALG_H
+
+#include <linux/types.h>
+
+struct sockaddr_alg {
+	__u16	salg_family;
+	__u8	salg_type[14];
+	__u32	salg_feat;
+	__u32	salg_mask;
+	__u8	salg_name[64];
+};
+
+struct af_alg_iv {
+	__u32	ivlen;
+	__u8	iv[0];
+};
+
+/* Socket options */
+#define ALG_SET_KEY			1
+#define ALG_SET_IV			2
+#define ALG_SET_OP			3
+
+/* Operations */
+#define ALG_OP_DECRYPT			0
+#define ALG_OP_ENCRYPT			1
+
+#endif	/* _LINUX_IF_ALG_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index f7e73c3..dd3f201 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -103,7 +103,7 @@
 
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
 
-typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
+typedef int br_should_route_hook_t(struct sk_buff *skb);
 extern br_should_route_hook_t __rcu *br_should_route_hook;
 
 #endif
diff --git a/include/linux/input.h b/include/linux/input.h
index c4e9d91..e428382 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -802,6 +802,7 @@
 #define SW_CAMERA_LENS_COVER	0x09  /* set = lens covered */
 #define SW_KEYPAD_SLIDE		0x0a  /* set = keypad slide out */
 #define SW_FRONT_PROXIMITY	0x0b  /* set = front proximity sensor active */
+#define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_MAX			0x0f
 #define SW_CNT			(SW_MAX+1)
 
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
new file mode 100644
index 0000000..1affd0d
--- /dev/null
+++ b/include/linux/input/as5011.h
@@ -0,0 +1,20 @@
+#ifndef _AS5011_H
+#define _AS5011_H
+
+/*
+ * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+struct as5011_platform_data {
+	unsigned int button_gpio;
+	unsigned int axis_irq; /* irq number */
+	unsigned long axis_irqflags;
+	char xp, xn; /* threshold for x axis */
+	char yp, yn; /* threshold for y axis */
+};
+
+#endif /* _AS5011_H */
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h
index e470d38..05e0328 100644
--- a/include/linux/input/bu21013.h
+++ b/include/linux/input/bu21013.h
@@ -12,8 +12,6 @@
  * @cs_en:	pointer to the cs enable function
  * @cs_dis:	pointer to the cs disable function
  * @irq_read_val:    pointer to read the pen irq value function
- * @x_max_res: xmax resolution
- * @y_max_res: ymax resolution
  * @touch_x_max: touch x max
  * @touch_y_max: touch y max
  * @cs_pin: chip select pin
@@ -29,8 +27,6 @@
 	int (*cs_en)(int reset_pin);
 	int (*cs_dis)(int reset_pin);
 	int (*irq_read_val)(void);
-	int x_max_res;
-	int y_max_res;
 	int touch_x_max;
 	int touch_y_max;
 	unsigned int cs_pin;
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 6974746..fe7c4b9 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -4,8 +4,8 @@
 #include <linux/types.h>
 #include <linux/input.h>
 
-#define MATRIX_MAX_ROWS		16
-#define MATRIX_MAX_COLS		16
+#define MATRIX_MAX_ROWS		32
+#define MATRIX_MAX_COLS		32
 
 #define KEY(row, col, val)	((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
 				 (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 65aae34..045f2f2 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -454,6 +454,44 @@
 /* Validate that the given IPMI address is valid. */
 int ipmi_validate_addr(struct ipmi_addr *addr, int len);
 
+/*
+ * How did the IPMI driver find out about the device?
+ */
+enum ipmi_addr_src {
+	SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
+	SI_PCI,	SI_DEVICETREE, SI_DEFAULT
+};
+
+union ipmi_smi_info_union {
+	/*
+	 * the acpi_info element is defined for the SI_ACPI
+	 * address type
+	 */
+	struct {
+		void *acpi_handle;
+	} acpi_info;
+};
+
+struct ipmi_smi_info {
+	enum ipmi_addr_src addr_src;
+
+	/*
+	 * Base device for the interface.  Don't forget to put this when
+	 * you are done.
+	 */
+	struct device *dev;
+
+	/*
+	 * The addr_info provides more detailed info for some IPMI
+	 * devices, depending on the addr_src.  Currently only SI_ACPI
+	 * info is provided.
+	 */
+	union ipmi_smi_info_union addr_info;
+};
+
+/* This is to get the private info of ipmi_smi_t */
+extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
+
 #endif /* __KERNEL__ */
 
 
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 4b48318..906590a 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/ipmi.h>
 
 /* This files describes the interface for IPMI system management interface
    drivers to bind into the IPMI message handler. */
@@ -86,6 +87,13 @@
 	int (*start_processing)(void       *send_info,
 				ipmi_smi_t new_intf);
 
+	/*
+	 * Get the detailed private info of the low level interface and store
+	 * it into the structure of ipmi_smi_data. For example: the
+	 * ACPI device handle will be returned for the pnp_acpi IPMI device.
+	 */
+	int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data);
+
 	/* Called to enqueue an SMI message to be sent.  This
 	   operation is not allowed to fail.  If an error occurs, it
 	   should report back the error in a received message.  It may
diff --git a/include/linux/irq.h b/include/linux/irq.h
index abde252..80fcb53 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -74,7 +74,8 @@
 
 #define IRQF_MODIFY_MASK	\
 	(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
-	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL)
+	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+	 IRQ_PER_CPU)
 
 #ifdef CONFIG_IRQ_PER_CPU
 # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 979c68c..c1a95b7 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -57,7 +57,7 @@
 #endif
 
 	struct timer_rand_state *timer_rand_state;
-	unsigned int		*kstat_irqs;
+	unsigned int __percpu	*kstat_irqs;
 	irq_flow_handler_t	handle_irq;
 	struct irqaction	*action;	/* IRQ action list */
 	unsigned int		status;		/* IRQ status */
@@ -101,13 +101,6 @@
 #define get_irq_desc_msi(desc)		((desc)->irq_data.msi_desc)
 
 /*
- * Monolithic do_IRQ implementation.
- */
-#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-extern unsigned int __do_IRQ(unsigned int irq);
-#endif
-
-/*
  * Architectures call this to let the generic IRQ layer
  * handle an interrupt. If the descriptor is attached to an
  * irqchip-style controller then we call the ->handle_irq() handler,
@@ -115,14 +108,7 @@
  */
 static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
 {
-#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
 	desc->handle_irq(irq, desc);
-#else
-	if (likely(desc->handle_irq))
-		desc->handle_irq(irq, desc);
-	else
-		__do_IRQ(irq);
-#endif
 }
 
 static inline void generic_handle_irq(unsigned int irq)
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 2ae86aa..27e79c2 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -94,7 +94,7 @@
  *
  * This is an opaque datatype.
  **/
-typedef struct handle_s		handle_t;	/* Atomic operation type */
+typedef struct jbd2_journal_handle handle_t;	/* Atomic operation type */
 
 
 /**
@@ -416,7 +416,7 @@
  * in so it can be fixed later.
  */
 
-struct handle_s
+struct jbd2_journal_handle
 {
 	/* Which compound transaction is this update a part of? */
 	transaction_t		*h_transaction;
@@ -1158,6 +1158,22 @@
 	kmem_cache_free(jbd2_handle_cache, handle);
 }
 
+/*
+ * jbd2_inode management (optional, for those file systems that want to use
+ * dynamically allocated jbd2_inode structures)
+ */
+extern struct kmem_cache *jbd2_inode_cache;
+
+static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
+{
+	return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
+}
+
+static inline void jbd2_free_inode(struct jbd2_inode *jinode)
+{
+	kmem_cache_free(jbd2_inode_cache, jinode);
+}
+
 /* Primary revoke support */
 #define JOURNAL_REVOKE_DEFAULT_HASH 256
 extern int	   jbd2_journal_init_revoke(journal_t *, int);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d0fbc04..2fe6e84 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -143,9 +143,22 @@
 
 #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
 
-#define abs(x) ({				\
-		long __x = (x);			\
-		(__x < 0) ? -__x : __x;		\
+/*
+ * abs() handles unsigned and signed longs, ints, shorts and chars.  For all
+ * input types abs() returns a signed long.
+ * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
+ * for those.
+ */
+#define abs(x) ({						\
+		long ret;					\
+		if (sizeof(x) == sizeof(long)) {		\
+			long __x = (x);				\
+			ret = (__x < 0) ? -__x : __x;		\
+		} else {					\
+			int __x = (x);				\
+			ret = (__x < 0) ? -__x : __x;		\
+		}						\
+		ret;						\
 	})
 
 #define abs64(x) ({				\
@@ -230,6 +243,8 @@
 extern unsigned long get_taint(void);
 extern int root_mountflags;
 
+extern bool early_boot_irqs_disabled;
+
 /* Values used for system_state */
 extern enum system_states {
 	SYSTEM_BOOTING,
@@ -560,12 +575,6 @@
 	char _f[20-2*sizeof(long)-sizeof(int)];	/* Padding: libc5 uses this.. */
 };
 
-/* Force a compilation error if condition is true */
-#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
-
-/* Force a compilation error if condition is constant and true */
-#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
-
 /* Force a compilation error if a constant expression is not a power of 2 */
 #define BUILD_BUG_ON_NOT_POWER_OF_2(n)			\
 	BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
@@ -577,6 +586,32 @@
 #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
 #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
 
+/**
+ * BUILD_BUG_ON - break compile if a condition is true.
+ * @condition: the condition which the compiler should know is false.
+ *
+ * If you have some code which relies on certain constants being equal, or
+ * other compile-time-evaluated condition, you should use BUILD_BUG_ON to
+ * detect if someone changes it.
+ *
+ * The implementation uses gcc's reluctance to create a negative array, but
+ * gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
+ * to inline functions).  So as a fallback we use the optimizer; if it can't
+ * prove the condition is false, it will cause a link error on the undefined
+ * "__build_bug_on_failed".  This error message can be harder to track down
+ * though, hence the two different methods.
+ */
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else
+extern int __build_bug_on_failed;
+#define BUILD_BUG_ON(condition)					\
+	do {							\
+		((void)sizeof(char[1 - 2*!!(condition)]));	\
+		if (condition) __build_bug_on_failed = 1;	\
+	} while(0)
+#endif
+
 /* Trap pasters of __FUNCTION__ at compile-time */
 #define __FUNCTION__ (__func__)
 
@@ -587,6 +622,13 @@
 #define NUMA_BUILD 0
 #endif
 
+/* This helps us avoid #ifdef CONFIG_COMPACTION */
+#ifdef CONFIG_COMPACTION
+#define COMPACTION_BUILD 1
+#else
+#define COMPACTION_BUILD 0
+#endif
+
 /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
 # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 44e83ba..0cce2db 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -46,16 +46,14 @@
 extern unsigned long long nr_context_switches(void);
 
 #ifndef CONFIG_GENERIC_HARDIRQS
-#define kstat_irqs_this_cpu(irq) \
-	(this_cpu_read(kstat.irqs[irq])
 
 struct irq_desc;
 
 static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
 					    struct irq_desc *desc)
 {
-	kstat_this_cpu.irqs[irq]++;
-	kstat_this_cpu.irqs_sum++;
+	__this_cpu_inc(kstat.irqs[irq]);
+	__this_cpu_inc(kstat.irqs_sum);
 }
 
 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
@@ -65,17 +63,18 @@
 #else
 #include <linux/irq.h>
 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
-#define kstat_irqs_this_cpu(DESC) \
-	((DESC)->kstat_irqs[smp_processor_id()])
-#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\
-	((DESC)->kstat_irqs[smp_processor_id()]++);\
-	kstat_this_cpu.irqs_sum++; } while (0)
+
+#define kstat_incr_irqs_this_cpu(irqno, DESC)		\
+do {							\
+	__this_cpu_inc(*(DESC)->kstat_irqs);		\
+	__this_cpu_inc(kstat.irqs_sum);			\
+} while (0)
 
 #endif
 
 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
 {
-	kstat_this_cpu.softirqs[irq]++;
+	__this_cpu_inc(kstat.softirqs[irq]);
 }
 
 static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
new file mode 100644
index 0000000..6b394f0
--- /dev/null
+++ b/include/linux/khugepaged.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_KHUGEPAGED_H
+#define _LINUX_KHUGEPAGED_H
+
+#include <linux/sched.h> /* MMF_VM_HUGEPAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int __khugepaged_enter(struct mm_struct *mm);
+extern void __khugepaged_exit(struct mm_struct *mm);
+extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
+
+#define khugepaged_enabled()					       \
+	(transparent_hugepage_flags &				       \
+	 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |		       \
+	  (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
+#define khugepaged_always()				\
+	(transparent_hugepage_flags &			\
+	 (1<<TRANSPARENT_HUGEPAGE_FLAG))
+#define khugepaged_req_madv()					\
+	(transparent_hugepage_flags &				\
+	 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
+#define khugepaged_defrag()					\
+	(transparent_hugepage_flags &				\
+	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
+
+static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+	if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
+		return __khugepaged_enter(mm);
+	return 0;
+}
+
+static inline void khugepaged_exit(struct mm_struct *mm)
+{
+	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
+		__khugepaged_exit(mm);
+}
+
+static inline int khugepaged_enter(struct vm_area_struct *vma)
+{
+	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
+		if ((khugepaged_always() ||
+		     (khugepaged_req_madv() &&
+		      vma->vm_flags & VM_HUGEPAGE)) &&
+		    !(vma->vm_flags & VM_NOHUGEPAGE))
+			if (__khugepaged_enter(vma->vm_mm))
+				return -ENOMEM;
+	return 0;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+	return 0;
+}
+static inline void khugepaged_exit(struct mm_struct *mm)
+{
+}
+static inline int khugepaged_enter(struct vm_area_struct *vma)
+{
+	return 0;
+}
+static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+{
+	return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#endif /* _LINUX_KHUGEPAGED_H */
diff --git a/include/linux/klist.h b/include/linux/klist.h
index e91a4e5..a370ce5 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -22,7 +22,7 @@
 	struct list_head	k_list;
 	void			(*get)(struct klist_node *);
 	void			(*put)(struct klist_node *);
-} __attribute__ ((aligned (4)));
+} __attribute__ ((aligned (sizeof(void *))));
 
 #define KLIST_INIT(_name, _get, _put)					\
 	{ .k_lock	= __SPIN_LOCK_UNLOCKED(_name.k_lock),		\
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index 08d7dc4..39f8453 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -76,7 +76,7 @@
 									\
 		_n = (long) &((ptr)->name##_end)			\
 			- (long) &((ptr)->name##_begin);		\
-		MAYBE_BUILD_BUG_ON(_n < 0);				\
+		BUILD_BUG_ON(_n < 0);					\
 									\
 		kmemcheck_mark_initialized(&((ptr)->name##_begin), _n);	\
 	} while (0)
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 24b4414..2a0d7d6 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -18,6 +18,10 @@
 	KMSG_DUMP_OOPS,
 	KMSG_DUMP_PANIC,
 	KMSG_DUMP_KEXEC,
+	KMSG_DUMP_RESTART,
+	KMSG_DUMP_HALT,
+	KMSG_DUMP_POWEROFF,
+	KMSG_DUMP_EMERG,
 };
 
 /**
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 919ae53..ea2dc1a 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -540,6 +540,7 @@
 #endif
 #define KVM_CAP_PPC_GET_PVINFO 57
 #define KVM_CAP_PPC_IRQ_LEVEL 58
+#define KVM_CAP_ASYNC_PF 59
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a055742..b5021db 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -16,6 +16,8 @@
 #include <linux/mm.h>
 #include <linux/preempt.h>
 #include <linux/msi.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -40,6 +42,7 @@
 #define KVM_REQ_KICK               9
 #define KVM_REQ_DEACTIVATE_FPU    10
 #define KVM_REQ_EVENT             11
+#define KVM_REQ_APF_HALT          12
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID	0
 
@@ -74,6 +77,27 @@
 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 			      struct kvm_io_device *dev);
 
+#ifdef CONFIG_KVM_ASYNC_PF
+struct kvm_async_pf {
+	struct work_struct work;
+	struct list_head link;
+	struct list_head queue;
+	struct kvm_vcpu *vcpu;
+	struct mm_struct *mm;
+	gva_t gva;
+	unsigned long addr;
+	struct kvm_arch_async_pf arch;
+	struct page *page;
+	bool done;
+};
+
+void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
+void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+		       struct kvm_arch_async_pf *arch);
+int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
+#endif
+
 struct kvm_vcpu {
 	struct kvm *kvm;
 #ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -104,6 +128,15 @@
 	gpa_t mmio_phys_addr;
 #endif
 
+#ifdef CONFIG_KVM_ASYNC_PF
+	struct {
+		u32 queued;
+		struct list_head queue;
+		struct list_head done;
+		spinlock_t lock;
+	} async_pf;
+#endif
+
 	struct kvm_vcpu_arch arch;
 };
 
@@ -113,16 +146,19 @@
  */
 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 
+struct kvm_lpage_info {
+	unsigned long rmap_pde;
+	int write_count;
+};
+
 struct kvm_memory_slot {
 	gfn_t base_gfn;
 	unsigned long npages;
 	unsigned long flags;
 	unsigned long *rmap;
 	unsigned long *dirty_bitmap;
-	struct {
-		unsigned long rmap_pde;
-		int write_count;
-	} *lpage_info[KVM_NR_PAGE_SIZES - 1];
+	unsigned long *dirty_bitmap_head;
+	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 	unsigned long userspace_addr;
 	int user_alloc;
 	int id;
@@ -169,6 +205,7 @@
 
 struct kvm_memslots {
 	int nmemslots;
+	u64 generation;
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
 };
@@ -206,6 +243,10 @@
 
 	struct mutex irq_lock;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
+	/*
+	 * Update side is protected by irq_lock and,
+	 * if configured, irqfds.lock.
+	 */
 	struct kvm_irq_routing_table __rcu *irq_routing;
 	struct hlist_head mask_notifier_list;
 	struct hlist_head irq_ack_notifier_list;
@@ -216,6 +257,7 @@
 	unsigned long mmu_notifier_seq;
 	long mmu_notifier_count;
 #endif
+	long tlbs_dirty;
 };
 
 /* The guest did something we don't support. */
@@ -302,7 +344,11 @@
 
 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
+		       bool write_fault, bool *writable);
 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
+		      bool *writable);
 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 			 struct kvm_memory_slot *slot, gfn_t gfn);
 int memslot_id(struct kvm *kvm, gfn_t gfn);
@@ -321,18 +367,25 @@
 			 int offset, int len);
 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 		    unsigned long len);
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			   void *data, unsigned long len);
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			      gpa_t gpa);
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			     gfn_t gfn);
 
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+
 void kvm_flush_remote_tlbs(struct kvm *kvm);
 void kvm_reload_remote_mmus(struct kvm *kvm);
 
@@ -398,7 +451,19 @@
 
 void kvm_free_physmem(struct kvm *kvm);
 
-struct  kvm *kvm_arch_create_vm(void);
+#ifndef __KVM_HAVE_ARCH_VM_ALLOC
+static inline struct kvm *kvm_arch_alloc_vm(void)
+{
+	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+}
+
+static inline void kvm_arch_free_vm(struct kvm *kvm)
+{
+	kfree(kvm);
+}
+#endif
+
+int kvm_arch_init_vm(struct kvm *kvm);
 void kvm_arch_destroy_vm(struct kvm *kvm);
 void kvm_free_all_assigned_devices(struct kvm *kvm);
 void kvm_arch_sync_events(struct kvm *kvm);
@@ -414,16 +479,8 @@
 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 };
 
-#define KVM_ASSIGNED_MSIX_PENDING		0x1
-struct kvm_guest_msix_entry {
-	u32 vector;
-	u16 entry;
-	u16 flags;
-};
-
 struct kvm_assigned_dev_kernel {
 	struct kvm_irq_ack_notifier ack_notifier;
-	struct work_struct interrupt_work;
 	struct list_head list;
 	int assigned_dev_id;
 	int host_segnr;
@@ -434,13 +491,14 @@
 	bool host_irq_disabled;
 	struct msix_entry *host_msix_entries;
 	int guest_irq;
-	struct kvm_guest_msix_entry *guest_msix_entries;
+	struct msix_entry *guest_msix_entries;
 	unsigned long irq_requested_type;
 	int irq_source_id;
 	int flags;
 	struct pci_dev *dev;
 	struct kvm *kvm;
-	spinlock_t assigned_dev_lock;
+	spinlock_t intx_lock;
+	char irq_name[32];
 };
 
 struct kvm_irq_mask_notifier {
@@ -462,6 +520,8 @@
 				   unsigned long *deliver_bitmask);
 #endif
 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
+		int irq_source_id, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
 				   struct kvm_irq_ack_notifier *kian);
@@ -603,17 +663,28 @@
 void kvm_eventfd_init(struct kvm *kvm);
 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
 void kvm_irqfd_release(struct kvm *kvm);
+void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
 #else
 
 static inline void kvm_eventfd_init(struct kvm *kvm) {}
+
 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
 {
 	return -EINVAL;
 }
 
 static inline void kvm_irqfd_release(struct kvm *kvm) {}
+
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+static inline void kvm_irq_routing_update(struct kvm *kvm,
+					  struct kvm_irq_routing_table *irq_rt)
+{
+	rcu_assign_pointer(kvm->irq_routing, irq_rt);
+}
+#endif
+
 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 {
 	return -ENOSYS;
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 7ac0d4ee..fa7cc72 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -67,4 +67,11 @@
 	u32 dest_id;
 };
 
+struct gfn_to_hva_cache {
+	u64 generation;
+	gpa_t gpa;
+	unsigned long hva;
+	struct kvm_memory_slot *memslot;
+};
+
 #endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
index 38368d7..fd548d2 100644
--- a/include/linux/leds-lp5521.h
+++ b/include/linux/leds-lp5521.h
@@ -42,6 +42,7 @@
 	int	(*setup_resources)(void);
 	void	(*release_resources)(void);
 	void	(*enable)(bool state);
+	const char *label;
 };
 
 #endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
index 7967476..2694289 100644
--- a/include/linux/leds-lp5523.h
+++ b/include/linux/leds-lp5523.h
@@ -42,6 +42,7 @@
 	int	(*setup_resources)(void);
 	void	(*release_resources)(void);
 	void	(*enable)(bool state);
+	const	char *label;
 };
 
 #endif /* __LINUX_LP5523_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index 9a5f8a7..3a54266 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -96,6 +96,11 @@
  * in an undefined state.
  */
 #ifndef CONFIG_DEBUG_LIST
+static inline void __list_del_entry(struct list_head *entry)
+{
+	__list_del(entry->prev, entry->next);
+}
+
 static inline void list_del(struct list_head *entry)
 {
 	__list_del(entry->prev, entry->next);
@@ -103,6 +108,7 @@
 	entry->prev = LIST_POISON2;
 }
 #else
+extern void __list_del_entry(struct list_head *entry);
 extern void list_del(struct list_head *entry);
 #endif
 
@@ -135,7 +141,7 @@
  */
 static inline void list_del_init(struct list_head *entry)
 {
-	__list_del(entry->prev, entry->next);
+	__list_del_entry(entry);
 	INIT_LIST_HEAD(entry);
 }
 
@@ -146,7 +152,7 @@
  */
 static inline void list_move(struct list_head *list, struct list_head *head)
 {
-	__list_del(list->prev, list->next);
+	__list_del_entry(list);
 	list_add(list, head);
 }
 
@@ -158,7 +164,7 @@
 static inline void list_move_tail(struct list_head *list,
 				  struct list_head *head)
 {
-	__list_del(list->prev, list->next);
+	__list_del_entry(list);
 	list_add_tail(list, head);
 }
 
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index 9ee97e7..5bad17d1 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -62,7 +62,8 @@
 					struct hlist_bl_node *n)
 {
 	LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
-	LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK));
+	LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
+							LIST_BL_LOCKMASK);
 	h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
 }
 
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 34b2b7f..257d377 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -44,14 +44,4 @@
 #define NLMDBG_XDR		0x0100
 #define NLMDBG_ALL		0x7fff
 
-
-/*
- * Support for printing NLM cookies in dprintk()
- */
-#ifdef RPC_DEBUG
-struct nlm_cookie;
-/* Call this function with the BKL held (it uses a static buffer) */
-extern const char *nlmdbg_cookie2a(const struct nlm_cookie *);
-#endif
-
 #endif /* LINUX_LOCKD_DEBUG_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 2dee05e..ff9abff 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -202,9 +202,9 @@
  * Lockd client functions
  */
 struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
-void		  nlm_release_call(struct nlm_rqst *);
 int		  nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
 int		  nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
+void		  nlmclnt_release_call(struct nlm_rqst *);
 struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
 void		  nlmclnt_finish_block(struct nlm_wait *block);
 int		  nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
@@ -223,13 +223,14 @@
 					const u32 version,
 					const char *hostname,
 					int noresvport);
+void		  nlmclnt_release_host(struct nlm_host *);
 struct nlm_host  *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
 					const char *hostname,
 					const size_t hostname_len);
+void		  nlmsvc_release_host(struct nlm_host *);
 struct rpc_clnt * nlm_bind_host(struct nlm_host *);
 void		  nlm_rebind_host(struct nlm_host *);
 struct nlm_host * nlm_get_host(struct nlm_host *);
-void		  nlm_release_host(struct nlm_host *);
 void		  nlm_shutdown_hosts(void);
 void		  nlm_host_rebooted(const struct nlm_reboot *);
 
@@ -267,6 +268,7 @@
 void		  nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
 					nlm_host_match_fn_t match);
 void		  nlmsvc_grant_reply(struct nlm_cookie *, __be32);
+void		  nlmsvc_release_call(struct nlm_rqst *);
 
 /*
  * File handling for the server personality
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 71c09b26..4aef1dd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -436,16 +436,8 @@
 #endif /* CONFIG_LOCKDEP */
 
 #ifdef CONFIG_TRACE_IRQFLAGS
-extern void early_boot_irqs_off(void);
-extern void early_boot_irqs_on(void);
 extern void print_irqtrace_events(struct task_struct *curr);
 #else
-static inline void early_boot_irqs_off(void)
-{
-}
-static inline void early_boot_irqs_on(void)
-{
-}
 static inline void print_irqtrace_events(struct task_struct *curr)
 {
 }
@@ -522,12 +514,15 @@
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # ifdef CONFIG_PROVE_LOCKING
 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
+#  define lock_map_acquire_read(l)	lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
 # else
 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
+#  define lock_map_acquire_read(l)	lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
 # endif
 # define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
 #else
 # define lock_map_acquire(l)			do { } while (0)
+# define lock_map_acquire_read(l)		do { } while (0)
 # define lock_map_release(l)			do { } while (0)
 #endif
 
diff --git a/include/linux/magic.h b/include/linux/magic.h
index ff690d0..62730ea 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -16,6 +16,7 @@
 #define TMPFS_MAGIC		0x01021994
 #define HUGETLBFS_MAGIC 	0x958458f6	/* some random number */
 #define SQUASHFS_MAGIC		0x73717368
+#define ECRYPTFS_SUPER_MAGIC	0xf15f
 #define EFS_SUPER_MAGIC		0x414A53
 #define EXT2_SUPER_MAGIC	0xEF53
 #define EXT3_SUPER_MAGIC	0xEF53
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 54cbbac..5525d37 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -18,6 +18,17 @@
 	} e_index;
 };
 
+struct mb_cache {
+	struct list_head		c_cache_list;
+	const char			*c_name;
+	atomic_t			c_entry_count;
+	int				c_max_entries;
+	int				c_bucket_bits;
+	struct kmem_cache		*c_entry_cache;
+	struct list_head		*c_block_hash;
+	struct list_head		*c_index_hash;
+};
+
 /* Functions on caches */
 
 struct mb_cache *mb_cache_create(const char *, int);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 159a0762..f512e18 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -25,6 +25,11 @@
 struct page;
 struct mm_struct;
 
+/* Stats that can be updated by kernel. */
+enum mem_cgroup_page_stat_item {
+	MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
+};
+
 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
 					struct list_head *dst,
 					unsigned long *scanned, int order,
@@ -93,7 +98,7 @@
 mem_cgroup_prepare_migration(struct page *page,
 	struct page *newpage, struct mem_cgroup **ptr);
 extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
-	struct page *oldpage, struct page *newpage);
+	struct page *oldpage, struct page *newpage, bool migration_ok);
 
 /*
  * For memory reclaim.
@@ -121,11 +126,30 @@
 	return false;
 }
 
-void mem_cgroup_update_file_mapped(struct page *page, int val);
+void mem_cgroup_update_page_stat(struct page *page,
+				 enum mem_cgroup_page_stat_item idx,
+				 int val);
+
+static inline void mem_cgroup_inc_page_stat(struct page *page,
+					    enum mem_cgroup_page_stat_item idx)
+{
+	mem_cgroup_update_page_stat(page, idx, 1);
+}
+
+static inline void mem_cgroup_dec_page_stat(struct page *page,
+					    enum mem_cgroup_page_stat_item idx)
+{
+	mem_cgroup_update_page_stat(page, idx, -1);
+}
+
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 						gfp_t gfp_mask);
 u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
+#endif
+
 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
 struct mem_cgroup;
 
@@ -231,8 +255,7 @@
 }
 
 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
-					struct page *oldpage,
-					struct page *newpage)
+		struct page *oldpage, struct page *newpage, bool migration_ok)
 {
 }
 
@@ -293,8 +316,13 @@
 {
 }
 
-static inline void mem_cgroup_update_file_mapped(struct page *page,
-							int val)
+static inline void mem_cgroup_inc_page_stat(struct page *page,
+					    enum mem_cgroup_page_stat_item idx)
+{
+}
+
+static inline void mem_cgroup_dec_page_stat(struct page *page,
+					    enum mem_cgroup_page_stat_item idx)
 {
 }
 
@@ -311,6 +339,11 @@
 	return 0;
 }
 
+static inline void mem_cgroup_split_huge_fixup(struct page *head,
+						struct page *tail)
+{
+}
+
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 31c237a..8122018d 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -13,12 +13,16 @@
 #ifdef CONFIG_MEMORY_HOTPLUG
 
 /*
- * Types for free bootmem.
- * The normal smallest mapcount is -1. Here is smaller value than it.
+ * Types for free bootmem stored in page->lru.next. These have to be in
+ * some random range in unsigned long space for debugging purposes.
  */
-#define SECTION_INFO		(-1 - 1)
-#define MIX_SECTION_INFO	(-1 - 2)
-#define NODE_INFO		(-1 - 3)
+enum {
+	MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+	SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
+	MIX_SECTION_INFO,
+	NODE_INFO,
+	MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
+};
 
 /*
  * pgdat resizing functions
@@ -161,6 +165,12 @@
 extern void put_page_bootmem(struct page *page);
 #endif
 
+/*
+ * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug
+ * notifier will be called under this. 2) offline/online/add/remove memory
+ * will not run simultaneously.
+ */
+
 void lock_memory_hotplug(void);
 void unlock_memory_hotplug(void);
 
diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h
index d63b605..37f56b7 100644
--- a/include/linux/mfd/ab8500.h
+++ b/include/linux/mfd/ab8500.h
@@ -74,32 +74,37 @@
 #define AB8500_INT_ACC_DETECT_21DB_F	37
 #define AB8500_INT_ACC_DETECT_21DB_R	38
 #define AB8500_INT_GP_SW_ADC_CONV_END	39
-#define AB8500_INT_BTEMP_LOW		72
-#define AB8500_INT_BTEMP_LOW_MEDIUM	73
-#define AB8500_INT_BTEMP_MEDIUM_HIGH	74
-#define AB8500_INT_BTEMP_HIGH		75
-#define AB8500_INT_USB_CHARGER_NOT_OK	81
-#define AB8500_INT_ID_WAKEUP_R		82
-#define AB8500_INT_ID_DET_R1R		84
-#define AB8500_INT_ID_DET_R2R		85
-#define AB8500_INT_ID_DET_R3R		86
-#define AB8500_INT_ID_DET_R4R		87
-#define AB8500_INT_ID_WAKEUP_F		88
-#define AB8500_INT_ID_DET_R1F		90
-#define AB8500_INT_ID_DET_R2F		91
-#define AB8500_INT_ID_DET_R3F		92
-#define AB8500_INT_ID_DET_R4F		93
-#define AB8500_INT_USB_CHG_DET_DONE	94
-#define AB8500_INT_USB_CH_TH_PROT_F	96
-#define AB8500_INT_USB_CH_TH_PROP_R	97
-#define AB8500_INT_MAIN_CH_TH_PROP_F	98
-#define AB8500_INT_MAIN_CH_TH_PROT_R	99
-#define AB8500_INT_USB_CHARGER_NOT_OKF	103
+#define AB8500_INT_ADP_SOURCE_ERROR	72
+#define AB8500_INT_ADP_SINK_ERROR	73
+#define AB8500_INT_ADP_PROBE_PLUG	74
+#define AB8500_INT_ADP_PROBE_UNPLUG	75
+#define AB8500_INT_ADP_SENSE_OFF	76
+#define AB8500_INT_USB_PHY_POWER_ERR	78
+#define AB8500_INT_USB_LINK_STATUS	79
+#define AB8500_INT_BTEMP_LOW		80
+#define AB8500_INT_BTEMP_LOW_MEDIUM	81
+#define AB8500_INT_BTEMP_MEDIUM_HIGH	82
+#define AB8500_INT_BTEMP_HIGH		83
+#define AB8500_INT_USB_CHARGER_NOT_OK	89
+#define AB8500_INT_ID_WAKEUP_R		90
+#define AB8500_INT_ID_DET_R1R		92
+#define AB8500_INT_ID_DET_R2R		93
+#define AB8500_INT_ID_DET_R3R		94
+#define AB8500_INT_ID_DET_R4R		95
+#define AB8500_INT_ID_WAKEUP_F		96
+#define AB8500_INT_ID_DET_R1F		98
+#define AB8500_INT_ID_DET_R2F		99
+#define AB8500_INT_ID_DET_R3F		100
+#define AB8500_INT_ID_DET_R4F		101
+#define AB8500_INT_USB_CHG_DET_DONE	102
+#define AB8500_INT_USB_CH_TH_PROT_F	104
+#define AB8500_INT_USB_CH_TH_PROT_R    105
+#define AB8500_INT_MAIN_CH_TH_PROT_F   106
+#define AB8500_INT_MAIN_CH_TH_PROT_R	107
+#define AB8500_INT_USB_CHARGER_NOT_OKF	111
 
-#define AB8500_NR_IRQS			104
-#define AB8500_NUM_IRQ_REGS		13
-
-#define AB8500_NUM_REGULATORS   15
+#define AB8500_NR_IRQS			112
+#define AB8500_NUM_IRQ_REGS		14
 
 /**
  * struct ab8500 - ab8500 internal structure
@@ -145,7 +150,8 @@
 struct ab8500_platform_data {
 	int irq_base;
 	void (*init) (struct ab8500 *);
-	struct regulator_init_data *regulator[AB8500_NUM_REGULATORS];
+	int num_regulator;
+	struct regulator_init_data *regulator;
 };
 
 extern int __devinit ab8500_init(struct ab8500 *ab8500);
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index cb93d80..835996e 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -39,7 +39,7 @@
 	size_t			data_size;
 
 	/*
-	 * This resources can be specified relatievly to the parent device.
+	 * This resources can be specified relatively to the parent device.
 	 * For accessing device you should use resources from device
 	 */
 	int			num_resources;
@@ -47,6 +47,12 @@
 
 	/* don't check for resource conflicts */
 	bool			ignore_resource_conflicts;
+
+	/*
+	 * Disable runtime PM callbacks for this subdevice - see
+	 * pm_runtime_no_callbacks().
+	 */
+	bool			pm_runtime_no_callbacks;
 };
 
 extern int mfd_add_devices(struct device *parent, int id,
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index 7363dea..effa5d3 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -159,10 +159,12 @@
 	u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS];
 	u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS];
 	int type;
+	bool wakeup;
 };
 
 int max8998_irq_init(struct max8998_dev *max8998);
 void max8998_irq_exit(struct max8998_dev *max8998);
+int max8998_irq_resume(struct max8998_dev *max8998);
 
 extern int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest);
 extern int max8998_bulk_read(struct i2c_client *i2c, u8 reg, int count,
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index f8c9f88..61daa16 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -70,24 +70,43 @@
  * @num_regulators: number of regultors used
  * @irq_base: base IRQ number for max8998, required for IRQs
  * @ono: power onoff IRQ number for max8998
- * @buck1_max_voltage1: BUCK1 maximum alowed voltage register 1
- * @buck1_max_voltage2: BUCK1 maximum alowed voltage register 2
- * @buck2_max_voltage: BUCK2 maximum alowed voltage
+ * @buck_voltage_lock: Do NOT change the values of the following six
+ *   registers set by buck?_voltage?. The voltage of BUCK1/2 cannot
+ *   be other than the preset values.
+ * @buck1_voltage1: BUCK1 DVS mode 1 voltage register
+ * @buck1_voltage2: BUCK1 DVS mode 2 voltage register
+ * @buck1_voltage3: BUCK1 DVS mode 3 voltage register
+ * @buck1_voltage4: BUCK1 DVS mode 4 voltage register
+ * @buck2_voltage1: BUCK2 DVS mode 1 voltage register
+ * @buck2_voltage2: BUCK2 DVS mode 2 voltage register
  * @buck1_set1: BUCK1 gpio pin 1 to set output voltage
  * @buck1_set2: BUCK1 gpio pin 2 to set output voltage
+ * @buck1_default_idx: Default for BUCK1 gpio pin 1, 2
  * @buck2_set3: BUCK2 gpio pin to set output voltage
+ * @buck2_default_idx: Default for BUCK2 gpio pin.
+ * @wakeup: Allow to wake up from suspend
+ * @rtc_delay: LP3974 RTC chip bug that requires delay after a register
+ * write before reading it.
  */
 struct max8998_platform_data {
 	struct max8998_regulator_data	*regulators;
 	int				num_regulators;
 	int				irq_base;
 	int				ono;
-	int                             buck1_max_voltage1;
-	int                             buck1_max_voltage2;
-	int                             buck2_max_voltage;
+	bool				buck_voltage_lock;
+	int				buck1_voltage1;
+	int				buck1_voltage2;
+	int				buck1_voltage3;
+	int				buck1_voltage4;
+	int				buck2_voltage1;
+	int				buck2_voltage2;
 	int				buck1_set1;
 	int				buck1_set2;
+	int				buck1_default_idx;
 	int				buck2_set3;
+	int				buck2_default_idx;
+	bool				wakeup;
+	bool				rtc_delay;
 };
 
 #endif /*  __LINUX_MFD_MAX8998_H */
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index b4c741e..7d0f3d6 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
  * Copyright 2009-2010 Pengutronix
  * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
  *
@@ -122,39 +123,39 @@
 		unsigned int channel, unsigned int *sample);
 
 
-#define	MC13783_SW_SW1A		0
-#define	MC13783_SW_SW1B		1
-#define	MC13783_SW_SW2A		2
-#define	MC13783_SW_SW2B		3
-#define	MC13783_SW_SW3		4
-#define	MC13783_SW_PLL		5
-#define	MC13783_REGU_VAUDIO	6
-#define	MC13783_REGU_VIOHI	7
-#define	MC13783_REGU_VIOLO	8
-#define	MC13783_REGU_VDIG	9
-#define	MC13783_REGU_VGEN	10
-#define	MC13783_REGU_VRFDIG	11
-#define	MC13783_REGU_VRFREF	12
-#define	MC13783_REGU_VRFCP	13
-#define	MC13783_REGU_VSIM	14
-#define	MC13783_REGU_VESIM	15
-#define	MC13783_REGU_VCAM	16
-#define	MC13783_REGU_VRFBG	17
-#define	MC13783_REGU_VVIB	18
-#define	MC13783_REGU_VRF1	19
-#define	MC13783_REGU_VRF2	20
-#define	MC13783_REGU_VMMC1	21
-#define	MC13783_REGU_VMMC2	22
-#define	MC13783_REGU_GPO1	23
-#define	MC13783_REGU_GPO2	24
-#define	MC13783_REGU_GPO3	25
-#define	MC13783_REGU_GPO4	26
-#define	MC13783_REGU_V1		27
-#define	MC13783_REGU_V2		28
-#define	MC13783_REGU_V3		29
-#define	MC13783_REGU_V4		30
-#define	MC13783_REGU_PWGT1SPI	31
-#define	MC13783_REGU_PWGT2SPI	32
+#define	MC13783_REG_SW1A		0
+#define	MC13783_REG_SW1B		1
+#define	MC13783_REG_SW2A		2
+#define	MC13783_REG_SW2B		3
+#define	MC13783_REG_SW3		4
+#define	MC13783_REG_PLL		5
+#define	MC13783_REG_VAUDIO	6
+#define	MC13783_REG_VIOHI	7
+#define	MC13783_REG_VIOLO	8
+#define	MC13783_REG_VDIG	9
+#define	MC13783_REG_VGEN	10
+#define	MC13783_REG_VRFDIG	11
+#define	MC13783_REG_VRFREF	12
+#define	MC13783_REG_VRFCP	13
+#define	MC13783_REG_VSIM	14
+#define	MC13783_REG_VESIM	15
+#define	MC13783_REG_VCAM	16
+#define	MC13783_REG_VRFBG	17
+#define	MC13783_REG_VVIB	18
+#define	MC13783_REG_VRF1	19
+#define	MC13783_REG_VRF2	20
+#define	MC13783_REG_VMMC1	21
+#define	MC13783_REG_VMMC2	22
+#define	MC13783_REG_GPO1	23
+#define	MC13783_REG_GPO2	24
+#define	MC13783_REG_GPO3	25
+#define	MC13783_REG_GPO4	26
+#define	MC13783_REG_V1		27
+#define	MC13783_REG_V2		28
+#define	MC13783_REG_V3		29
+#define	MC13783_REG_V4		30
+#define	MC13783_REG_PWGT1SPI	31
+#define	MC13783_REG_PWGT2SPI	32
 
 #define MC13783_IRQ_ADCDONE	MC13XXX_IRQ_ADCDONE
 #define MC13783_IRQ_ADCBISDONE	MC13XXX_IRQ_ADCBISDONE
diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h
new file mode 100644
index 0000000..a00f2be
--- /dev/null
+++ b/include/linux/mfd/mc13892.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_MC13892_H
+#define __LINUX_MFD_MC13892_H
+
+#include <linux/mfd/mc13xxx.h>
+
+#define MC13892_SW1		0
+#define MC13892_SW2		1
+#define MC13892_SW3		2
+#define MC13892_SW4		3
+#define MC13892_SWBST	4
+#define MC13892_VIOHI	5
+#define MC13892_VPLL	6
+#define MC13892_VDIG	7
+#define MC13892_VSD	8
+#define MC13892_VUSB2	9
+#define MC13892_VVIDEO	10
+#define MC13892_VAUDIO	11
+#define MC13892_VCAM	12
+#define MC13892_VGEN1	13
+#define MC13892_VGEN2	14
+#define MC13892_VGEN3	15
+#define MC13892_VUSB	16
+#define MC13892_GPO1	17
+#define MC13892_GPO2	18
+#define MC13892_GPO3	19
+#define MC13892_GPO4	20
+#define MC13892_PWGT1SPI	21
+#define MC13892_PWGT2SPI	22
+#define MC13892_VCOINCELL	23
+
+#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 085f041..8e70310 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -57,6 +57,10 @@
  * is configured in 4-bit mode.
  */
 #define TMIO_MMC_BLKSZ_2BYTES		(1 << 1)
+/*
+ * Some controllers can support SDIO IRQ signalling.
+ */
+#define TMIO_MMC_SDIO_IRQ		(1 << 2)
 
 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
 int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -66,6 +70,7 @@
 struct tmio_mmc_dma {
 	void *chan_priv_tx;
 	void *chan_priv_rx;
+	int alignment_shift;
 };
 
 /*
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index a1239c4..903280d 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -245,6 +245,7 @@
 	WM8320 = 0x8320,
 	WM8321 = 0x8321,
 	WM8325 = 0x8325,
+	WM8326 = 0x8326,
 };
 
 struct wm831x {
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index de79bae..ef4f0b6 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -17,6 +17,11 @@
 
 #include <linux/interrupt.h>
 
+enum wm8994_type {
+	WM8994 = 0,
+	WM8958 = 1,
+};
+
 struct regulator_dev;
 struct regulator_bulk_data;
 
@@ -48,6 +53,8 @@
 	struct mutex io_lock;
 	struct mutex irq_lock;
 
+	enum wm8994_type type;
+
 	struct device *dev;
 	int (*read_dev)(struct wm8994 *wm8994, unsigned short reg,
 			int bytes, void *dest);
@@ -64,10 +71,12 @@
 	u16 irq_masks_cache[WM8994_NUM_IRQ_REGS];
 
 	/* Used over suspend/resume */
+	bool suspended;
 	u16 ldo_regs[WM8994_NUM_LDO_REGS];
 	u16 gpio_regs[WM8994_NUM_GPIO_REGS];
 
 	struct regulator_dev *dbvdd;
+	int num_supplies;
 	struct regulator_bulk_data *supplies;
 };
 
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index add8a1b..9eab263 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -30,6 +30,8 @@
 
 #define WM8994_DRC_REGS 5
 #define WM8994_EQ_REGS  20
+#define WM8958_MBC_CUTOFF_REGS 20
+#define WM8958_MBC_COEFF_REGS  48
 
 /**
  * DRC configurations are specified with a label and a set of register
@@ -59,6 +61,18 @@
         u16 regs[WM8994_EQ_REGS];
 };
 
+/**
+ * Multiband compressor configurations are specified with a label and
+ * two sets of values to write.  Configurations are expected to be
+ * generated using the multiband compressor configuration panel in
+ * WISCE - see http://www.wolfsonmicro.com/wisce/
+ */
+struct wm8958_mbc_cfg {
+	const char *name;
+	u16 cutoff_regs[WM8958_MBC_CUTOFF_REGS];
+	u16 coeff_regs[WM8958_MBC_COEFF_REGS];
+};
+
 struct wm8994_pdata {
 	int gpio_base;
 
@@ -78,6 +92,9 @@
         int num_retune_mobile_cfgs;
         struct wm8994_retune_mobile_cfg *retune_mobile_cfgs;
 
+	int num_mbc_cfgs;
+	struct wm8958_mbc_cfg *mbc_cfgs;
+
         /* LINEOUT can be differential or single ended */
         unsigned int lineout1_diff:1;
         unsigned int lineout2_diff:1;
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index 967f62f..be072fa 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -64,12 +64,16 @@
 #define WM8994_LDO_1                            0x3B
 #define WM8994_LDO_2                            0x3C
 #define WM8994_CHARGE_PUMP_1                    0x4C
+#define WM8958_CHARGE_PUMP_2                    0x4D
 #define WM8994_CLASS_W_1                        0x51
 #define WM8994_DC_SERVO_1                       0x54
 #define WM8994_DC_SERVO_2                       0x55
 #define WM8994_DC_SERVO_4                       0x57
 #define WM8994_DC_SERVO_READBACK                0x58
 #define WM8994_ANALOGUE_HP_1                    0x60
+#define WM8958_MIC_DETECT_1                     0xD0
+#define WM8958_MIC_DETECT_2                     0xD1
+#define WM8958_MIC_DETECT_3                     0xD2
 #define WM8994_CHIP_REVISION                    0x100
 #define WM8994_CONTROL_INTERFACE                0x101
 #define WM8994_WRITE_SEQUENCER_CTRL_1           0x110
@@ -109,6 +113,10 @@
 #define WM8994_AIF2DAC_LRCLK                    0x315
 #define WM8994_AIF2DAC_DATA                     0x316
 #define WM8994_AIF2ADC_DATA                     0x317
+#define WM8958_AIF3_CONTROL_1                   0x320
+#define WM8958_AIF3_CONTROL_2                   0x321
+#define WM8958_AIF3DAC_DATA                     0x322
+#define WM8958_AIF3ADC_DATA                     0x323
 #define WM8994_AIF1_ADC1_LEFT_VOLUME            0x400
 #define WM8994_AIF1_ADC1_RIGHT_VOLUME           0x401
 #define WM8994_AIF1_DAC1_LEFT_VOLUME            0x402
@@ -242,6 +250,83 @@
 #define WM8994_INTERRUPT_STATUS_2_MASK          0x739
 #define WM8994_INTERRUPT_CONTROL                0x740
 #define WM8994_IRQ_DEBOUNCE                     0x748
+#define WM8958_DSP2_PROGRAM                     0x900
+#define WM8958_DSP2_CONFIG                      0x901
+#define WM8958_DSP2_MAGICNUM                    0xA00
+#define WM8958_DSP2_RELEASEYEAR                 0xA01
+#define WM8958_DSP2_RELEASEMONTHDAY             0xA02
+#define WM8958_DSP2_RELEASETIME                 0xA03
+#define WM8958_DSP2_VERMAJMIN                   0xA04
+#define WM8958_DSP2_VERBUILD                    0xA05
+#define WM8958_DSP2_EXECCONTROL                 0xA0D
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1     0x2200
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_2     0x2201
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_1     0x2202
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_2     0x2203
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_1     0x2204
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_2     0x2205
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_1     0x2206
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_2     0x2207
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_1     0x2208
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_2     0x2209
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_1     0x220A
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_2     0x220B
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_1     0x220C
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_2     0x220D
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_1     0x220E
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_2     0x220F
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_1     0x2210
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_2     0x2211
+#define WM8958_MBC_BAND_1_LOWER_CUTOFF_1        0x2212
+#define WM8958_MBC_BAND_1_LOWER_CUTOFF_2        0x2213
+#define WM8958_MBC_BAND_1_K_1                   0x2400
+#define WM8958_MBC_BAND_1_K_2                   0x2401
+#define WM8958_MBC_BAND_1_N1_1                  0x2402
+#define WM8958_MBC_BAND_1_N1_2                  0x2403
+#define WM8958_MBC_BAND_1_N2_1                  0x2404
+#define WM8958_MBC_BAND_1_N2_2                  0x2405
+#define WM8958_MBC_BAND_1_N3_1                  0x2406
+#define WM8958_MBC_BAND_1_N3_2                  0x2407
+#define WM8958_MBC_BAND_1_N4_1                  0x2408
+#define WM8958_MBC_BAND_1_N4_2                  0x2409
+#define WM8958_MBC_BAND_1_N5_1                  0x240A
+#define WM8958_MBC_BAND_1_N5_2                  0x240B
+#define WM8958_MBC_BAND_1_X1_1                  0x240C
+#define WM8958_MBC_BAND_1_X1_2                  0x240D
+#define WM8958_MBC_BAND_1_X2_1                  0x240E
+#define WM8958_MBC_BAND_1_X2_2                  0x240F
+#define WM8958_MBC_BAND_1_X3_1                  0x2410
+#define WM8958_MBC_BAND_1_X3_2                  0x2411
+#define WM8958_MBC_BAND_1_ATTACK_1              0x2412
+#define WM8958_MBC_BAND_1_ATTACK_2              0x2413
+#define WM8958_MBC_BAND_1_DECAY_1               0x2414
+#define WM8958_MBC_BAND_1_DECAY_2               0x2415
+#define WM8958_MBC_BAND_2_K_1                   0x2416
+#define WM8958_MBC_BAND_2_K_2                   0x2417
+#define WM8958_MBC_BAND_2_N1_1                  0x2418
+#define WM8958_MBC_BAND_2_N1_2                  0x2419
+#define WM8958_MBC_BAND_2_N2_1                  0x241A
+#define WM8958_MBC_BAND_2_N2_2                  0x241B
+#define WM8958_MBC_BAND_2_N3_1                  0x241C
+#define WM8958_MBC_BAND_2_N3_2                  0x241D
+#define WM8958_MBC_BAND_2_N4_1                  0x241E
+#define WM8958_MBC_BAND_2_N4_2                  0x241F
+#define WM8958_MBC_BAND_2_N5_1                  0x2420
+#define WM8958_MBC_BAND_2_N5_2                  0x2421
+#define WM8958_MBC_BAND_2_X1_1                  0x2422
+#define WM8958_MBC_BAND_2_X1_2                  0x2423
+#define WM8958_MBC_BAND_2_X2_1                  0x2424
+#define WM8958_MBC_BAND_2_X2_2                  0x2425
+#define WM8958_MBC_BAND_2_X3_1                  0x2426
+#define WM8958_MBC_BAND_2_X3_2                  0x2427
+#define WM8958_MBC_BAND_2_ATTACK_1              0x2428
+#define WM8958_MBC_BAND_2_ATTACK_2              0x2429
+#define WM8958_MBC_BAND_2_DECAY_1               0x242A
+#define WM8958_MBC_BAND_2_DECAY_2               0x242B
+#define WM8958_MBC_B2_PG2_1                     0x242C
+#define WM8958_MBC_B2_PG2_2                     0x242D
+#define WM8958_MBC_B1_PG2_1                     0x242E
+#define WM8958_MBC_B1_PG2_2                     0x242F
 #define WM8994_WRITE_SEQUENCER_0                0x3000
 #define WM8994_WRITE_SEQUENCER_1                0x3001
 #define WM8994_WRITE_SEQUENCER_2                0x3002
@@ -992,6 +1077,12 @@
 /*
  * R6 (0x06) - Power Management (6)
  */
+#define WM8958_AIF3ADC_SRC_MASK                 0x0600  /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF3ADC_SRC_SHIFT                     9  /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF3ADC_SRC_WIDTH                     2  /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF2DAC_SRC_MASK                 0x0180  /* AIF2DAC_SRC - [8:7] */
+#define WM8958_AIF2DAC_SRC_SHIFT                     7  /* AIF2DAC_SRC - [8:7] */
+#define WM8958_AIF2DAC_SRC_WIDTH                     2  /* AIF2DAC_SRC - [8:7] */
 #define WM8994_AIF3_TRI                         0x0020  /* AIF3_TRI */
 #define WM8994_AIF3_TRI_MASK                    0x0020  /* AIF3_TRI */
 #define WM8994_AIF3_TRI_SHIFT                        5  /* AIF3_TRI */
@@ -1836,6 +1927,14 @@
 #define WM8994_CP_ENA_WIDTH                          1  /* CP_ENA */
 
 /*
+ * R77 (0x4D) - Charge Pump (2)
+ */
+#define WM8958_CP_DISCH                         0x8000  /* CP_DISCH */
+#define WM8958_CP_DISCH_MASK                    0x8000  /* CP_DISCH */
+#define WM8958_CP_DISCH_SHIFT                       15  /* CP_DISCH */
+#define WM8958_CP_DISCH_WIDTH                        1  /* CP_DISCH */
+
+/*
  * R81 (0x51) - Class W (1)
  */
 #define WM8994_CP_DYN_SRC_SEL_MASK              0x0300  /* CP_DYN_SRC_SEL - [9:8] */
@@ -1952,6 +2051,46 @@
 #define WM8994_HPOUT1R_DLY_WIDTH                     1  /* HPOUT1R_DLY */
 
 /*
+ * R208 (0xD0) - Mic Detect 1
+ */
+#define WM8958_MICD_BIAS_STARTTIME_MASK         0xF000  /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_BIAS_STARTTIME_SHIFT            12  /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_BIAS_STARTTIME_WIDTH             4  /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_RATE_MASK                   0x0F00  /* MICD_RATE - [11:8] */
+#define WM8958_MICD_RATE_SHIFT                       8  /* MICD_RATE - [11:8] */
+#define WM8958_MICD_RATE_WIDTH                       4  /* MICD_RATE - [11:8] */
+#define WM8958_MICD_DBTIME                      0x0002  /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_MASK                 0x0002  /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_SHIFT                     1  /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_WIDTH                     1  /* MICD_DBTIME */
+#define WM8958_MICD_ENA                         0x0001  /* MICD_ENA */
+#define WM8958_MICD_ENA_MASK                    0x0001  /* MICD_ENA */
+#define WM8958_MICD_ENA_SHIFT                        0  /* MICD_ENA */
+#define WM8958_MICD_ENA_WIDTH                        1  /* MICD_ENA */
+
+/*
+ * R209 (0xD1) - Mic Detect 2
+ */
+#define WM8958_MICD_LVL_SEL_MASK                0x00FF  /* MICD_LVL_SEL - [7:0] */
+#define WM8958_MICD_LVL_SEL_SHIFT                    0  /* MICD_LVL_SEL - [7:0] */
+#define WM8958_MICD_LVL_SEL_WIDTH                    8  /* MICD_LVL_SEL - [7:0] */
+
+/*
+ * R210 (0xD2) - Mic Detect 3
+ */
+#define WM8958_MICD_LVL_MASK                    0x07FC  /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_SHIFT                        2  /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_WIDTH                        9  /* MICD_LVL - [10:2] */
+#define WM8958_MICD_VALID                       0x0002  /* MICD_VALID */
+#define WM8958_MICD_VALID_MASK                  0x0002  /* MICD_VALID */
+#define WM8958_MICD_VALID_SHIFT                      1  /* MICD_VALID */
+#define WM8958_MICD_VALID_WIDTH                      1  /* MICD_VALID */
+#define WM8958_MICD_STS                         0x0001  /* MICD_STS */
+#define WM8958_MICD_STS_MASK                    0x0001  /* MICD_STS */
+#define WM8958_MICD_STS_SHIFT                        0  /* MICD_STS */
+#define WM8958_MICD_STS_WIDTH                        1  /* MICD_STS */
+
+/*
  * R256 (0x100) - Chip Revision
  */
 #define WM8994_CHIP_REV_MASK                    0x000F  /* CHIP_REV - [3:0] */
@@ -2069,6 +2208,14 @@
 /*
  * R520 (0x208) - Clocking (1)
  */
+#define WM8958_DSP2CLK_ENA                      0x4000  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_MASK                 0x4000  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_SHIFT                    14  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_WIDTH                     1  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_SRC                      0x1000  /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_MASK                 0x1000  /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_SHIFT                    12  /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_WIDTH                     1  /* DSP2CLK_SRC */
 #define WM8994_TOCLK_ENA                        0x0010  /* TOCLK_ENA */
 #define WM8994_TOCLK_ENA_MASK                   0x0010  /* TOCLK_ENA */
 #define WM8994_TOCLK_ENA_SHIFT                       4  /* TOCLK_ENA */
@@ -2553,6 +2700,63 @@
 #define WM8994_AIF2ADCR_DAT_INV_WIDTH                1  /* AIF2ADCR_DAT_INV */
 
 /*
+ * R800 (0x320) - AIF3 Control (1)
+ */
+#define WM8958_AIF3_LRCLK_INV                   0x0080  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_MASK              0x0080  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_SHIFT                  7  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_WIDTH                  1  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_WL_MASK                     0x0060  /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_WL_SHIFT                         5  /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_WL_WIDTH                         2  /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_FMT_MASK                    0x0018  /* AIF3_FMT - [4:3] */
+#define WM8958_AIF3_FMT_SHIFT                        3  /* AIF3_FMT - [4:3] */
+#define WM8958_AIF3_FMT_WIDTH                        2  /* AIF3_FMT - [4:3] */
+
+/*
+ * R801 (0x321) - AIF3 Control (2)
+ */
+#define WM8958_AIF3DAC_BOOST_MASK               0x0C00  /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_BOOST_SHIFT                  10  /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_BOOST_WIDTH                   2  /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_COMP                     0x0010  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_MASK                0x0010  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_SHIFT                    4  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_WIDTH                    1  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMPMODE                 0x0008  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_MASK            0x0008  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_SHIFT                3  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_WIDTH                1  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3ADC_COMP                     0x0004  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_MASK                0x0004  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_SHIFT                    2  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_WIDTH                    1  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMPMODE                 0x0002  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_MASK            0x0002  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_SHIFT                1  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_WIDTH                1  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3_LOOPBACK                    0x0001  /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_MASK               0x0001  /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_SHIFT                   0  /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_WIDTH                   1  /* AIF3_LOOPBACK */
+
+/*
+ * R802 (0x322) - AIF3DAC Data
+ */
+#define WM8958_AIF3DAC_DAT_INV                  0x0001  /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_MASK             0x0001  /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_SHIFT                 0  /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_WIDTH                 1  /* AIF3DAC_DAT_INV */
+
+/*
+ * R803 (0x323) - AIF3ADC Data
+ */
+#define WM8958_AIF3ADC_DAT_INV                  0x0001  /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_MASK             0x0001  /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_SHIFT                 0  /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_WIDTH                 1  /* AIF3ADC_DAT_INV */
+
+/*
  * R1024 (0x400) - AIF1 ADC1 Left Volume
  */
 #define WM8994_AIF1ADC1_VU                      0x0100  /* AIF1ADC1_VU */
@@ -4289,4 +4493,102 @@
 #define WM8994_TEMP_SHUT_DB_SHIFT                    0  /* TEMP_SHUT_DB */
 #define WM8994_TEMP_SHUT_DB_WIDTH                    1  /* TEMP_SHUT_DB */
 
+/*
+ * R2304 (0x900) - DSP2_Program
+ */
+#define WM8958_DSP2_ENA                         0x0001  /* DSP2_ENA */
+#define WM8958_DSP2_ENA_MASK                    0x0001  /* DSP2_ENA */
+#define WM8958_DSP2_ENA_SHIFT                        0  /* DSP2_ENA */
+#define WM8958_DSP2_ENA_WIDTH                        1  /* DSP2_ENA */
+
+/*
+ * R2305 (0x901) - DSP2_Config
+ */
+#define WM8958_MBC_SEL_MASK                     0x0030  /* MBC_SEL - [5:4] */
+#define WM8958_MBC_SEL_SHIFT                         4  /* MBC_SEL - [5:4] */
+#define WM8958_MBC_SEL_WIDTH                         2  /* MBC_SEL - [5:4] */
+#define WM8958_MBC_ENA                          0x0001  /* MBC_ENA */
+#define WM8958_MBC_ENA_MASK                     0x0001  /* MBC_ENA */
+#define WM8958_MBC_ENA_SHIFT                         0  /* MBC_ENA */
+#define WM8958_MBC_ENA_WIDTH                         1  /* MBC_ENA */
+
+/*
+ * R2560 (0xA00) - DSP2_MagicNum
+ */
+#define WM8958_DSP2_MAGIC_NUM_MASK              0xFFFF  /* DSP2_MAGIC_NUM - [15:0] */
+#define WM8958_DSP2_MAGIC_NUM_SHIFT                  0  /* DSP2_MAGIC_NUM - [15:0] */
+#define WM8958_DSP2_MAGIC_NUM_WIDTH                 16  /* DSP2_MAGIC_NUM - [15:0] */
+
+/*
+ * R2561 (0xA01) - DSP2_ReleaseYear
+ */
+#define WM8958_DSP2_RELEASE_YEAR_MASK           0xFFFF  /* DSP2_RELEASE_YEAR - [15:0] */
+#define WM8958_DSP2_RELEASE_YEAR_SHIFT               0  /* DSP2_RELEASE_YEAR - [15:0] */
+#define WM8958_DSP2_RELEASE_YEAR_WIDTH              16  /* DSP2_RELEASE_YEAR - [15:0] */
+
+/*
+ * R2562 (0xA02) - DSP2_ReleaseMonthDay
+ */
+#define WM8958_DSP2_RELEASE_MONTH_MASK          0xFF00  /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_MONTH_SHIFT              8  /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_MONTH_WIDTH              8  /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_DAY_MASK            0x00FF  /* DSP2_RELEASE_DAY - [7:0] */
+#define WM8958_DSP2_RELEASE_DAY_SHIFT                0  /* DSP2_RELEASE_DAY - [7:0] */
+#define WM8958_DSP2_RELEASE_DAY_WIDTH                8  /* DSP2_RELEASE_DAY - [7:0] */
+
+/*
+ * R2563 (0xA03) - DSP2_ReleaseTime
+ */
+#define WM8958_DSP2_RELEASE_HOURS_MASK          0xFF00  /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_HOURS_SHIFT              8  /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_HOURS_WIDTH              8  /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_MINS_MASK           0x00FF  /* DSP2_RELEASE_MINS - [7:0] */
+#define WM8958_DSP2_RELEASE_MINS_SHIFT               0  /* DSP2_RELEASE_MINS - [7:0] */
+#define WM8958_DSP2_RELEASE_MINS_WIDTH               8  /* DSP2_RELEASE_MINS - [7:0] */
+
+/*
+ * R2564 (0xA04) - DSP2_VerMajMin
+ */
+#define WM8958_DSP2_MAJOR_VER_MASK              0xFF00  /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MAJOR_VER_SHIFT                  8  /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MAJOR_VER_WIDTH                  8  /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MINOR_VER_MASK              0x00FF  /* DSP2_MINOR_VER - [7:0] */
+#define WM8958_DSP2_MINOR_VER_SHIFT                  0  /* DSP2_MINOR_VER - [7:0] */
+#define WM8958_DSP2_MINOR_VER_WIDTH                  8  /* DSP2_MINOR_VER - [7:0] */
+
+/*
+ * R2565 (0xA05) - DSP2_VerBuild
+ */
+#define WM8958_DSP2_BUILD_VER_MASK              0xFFFF  /* DSP2_BUILD_VER - [15:0] */
+#define WM8958_DSP2_BUILD_VER_SHIFT                  0  /* DSP2_BUILD_VER - [15:0] */
+#define WM8958_DSP2_BUILD_VER_WIDTH                 16  /* DSP2_BUILD_VER - [15:0] */
+
+/*
+ * R2573 (0xA0D) - DSP2_ExecControl
+ */
+#define WM8958_DSP2_STOPC                       0x0020  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_MASK                  0x0020  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_SHIFT                      5  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_WIDTH                      1  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPS                       0x0010  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_MASK                  0x0010  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_SHIFT                      4  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_WIDTH                      1  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPI                       0x0008  /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_MASK                  0x0008  /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_SHIFT                      3  /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_WIDTH                      1  /* DSP2_STOPI */
+#define WM8958_DSP2_STOP                        0x0004  /* DSP2_STOP */
+#define WM8958_DSP2_STOP_MASK                   0x0004  /* DSP2_STOP */
+#define WM8958_DSP2_STOP_SHIFT                       2  /* DSP2_STOP */
+#define WM8958_DSP2_STOP_WIDTH                       1  /* DSP2_STOP */
+#define WM8958_DSP2_RUNR                        0x0002  /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_MASK                   0x0002  /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_SHIFT                       1  /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_WIDTH                       1  /* DSP2_RUNR */
+#define WM8958_DSP2_RUN                         0x0001  /* DSP2_RUN */
+#define WM8958_DSP2_RUN_MASK                    0x0001  /* DSP2_RUN */
+#define WM8958_DSP2_RUN_SHIFT                        0  /* DSP2_RUN */
+#define WM8958_DSP2_RUN_WIDTH                        1  /* DSP2_RUN */
+
 #endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 085527f..e39aeec 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -13,9 +13,11 @@
 extern int migrate_page(struct address_space *,
 			struct page *, struct page *);
 extern int migrate_pages(struct list_head *l, new_page_t x,
-			unsigned long private, int offlining);
+			unsigned long private, bool offlining,
+			bool sync);
 extern int migrate_huge_pages(struct list_head *l, new_page_t x,
-			unsigned long private, int offlining);
+			unsigned long private, bool offlining,
+			bool sync);
 
 extern int fail_migrate_page(struct address_space *,
 			struct page *, struct page *);
@@ -33,9 +35,11 @@
 
 static inline void putback_lru_pages(struct list_head *l) {}
 static inline int migrate_pages(struct list_head *l, new_page_t x,
-		unsigned long private, int offlining) { return -ENOSYS; }
+		unsigned long private, bool offlining,
+		bool sync) { return -ENOSYS; }
 static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
-		unsigned long private, int offlining) { return -ENOSYS; }
+		unsigned long private, bool offlining,
+		bool sync) { return -ENOSYS; }
 
 static inline int migrate_prep(void) { return -ENOSYS; }
 static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a7b15bc..0492146 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -144,6 +144,11 @@
 	MLX4_STAT_RATE_OFFSET	= 5
 };
 
+enum mlx4_protocol {
+	MLX4_PROTOCOL_IB,
+	MLX4_PROTOCOL_EN,
+};
+
 enum {
 	MLX4_MTT_FLAG_PRESENT		= 1
 };
@@ -500,8 +505,9 @@
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-			  int block_mcast_loopback);
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
+			  int block_mcast_loopback, enum mlx4_protocol protocol);
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+			  enum mlx4_protocol protocol);
 
 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index f407cd4..e1eebf7 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -34,6 +34,7 @@
 #define MLX4_DRIVER_H
 
 #include <linux/device.h>
+#include <linux/mlx4/device.h>
 
 struct mlx4_dev;
 
@@ -44,11 +45,6 @@
 	MLX4_DEV_EVENT_PORT_REINIT,
 };
 
-enum mlx4_protocol {
-	MLX4_PROTOCOL_IB,
-	MLX4_PROTOCOL_EN,
-};
-
 struct mlx4_interface {
 	void *			(*add)	 (struct mlx4_dev *dev);
 	void			(*remove)(struct mlx4_dev *dev, void *context);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 721f451..f6385fc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -14,6 +14,7 @@
 #include <linux/mm_types.h>
 #include <linux/range.h>
 #include <linux/pfn.h>
+#include <linux/bit_spinlock.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -82,6 +83,7 @@
 #define VM_GROWSUP	0x00000200
 #else
 #define VM_GROWSUP	0x00000000
+#define VM_NOHUGEPAGE	0x00000200	/* MADV_NOHUGEPAGE marked this vma */
 #endif
 #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
@@ -101,7 +103,11 @@
 #define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
+#else
+#define VM_HUGEPAGE	0x01000000	/* MADV_HUGEPAGE marked this vma */
+#endif
 #define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
 #define VM_ALWAYSDUMP	0x04000000	/* Always include in core dumps */
 
@@ -242,6 +248,7 @@
  * files which need it (119 of them)
  */
 #include <linux/page-flags.h>
+#include <linux/huge_mm.h>
 
 /*
  * Methods to modify the page usage count.
@@ -305,6 +312,39 @@
 }
 #endif
 
+static inline void compound_lock(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	bit_spin_lock(PG_compound_lock, &page->flags);
+#endif
+}
+
+static inline void compound_unlock(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	bit_spin_unlock(PG_compound_lock, &page->flags);
+#endif
+}
+
+static inline unsigned long compound_lock_irqsave(struct page *page)
+{
+	unsigned long uninitialized_var(flags);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	local_irq_save(flags);
+	compound_lock(page);
+#endif
+	return flags;
+}
+
+static inline void compound_unlock_irqrestore(struct page *page,
+					      unsigned long flags)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	compound_unlock(page);
+	local_irq_restore(flags);
+#endif
+}
+
 static inline struct page *compound_head(struct page *page)
 {
 	if (unlikely(PageTail(page)))
@@ -319,9 +359,29 @@
 
 static inline void get_page(struct page *page)
 {
-	page = compound_head(page);
-	VM_BUG_ON(atomic_read(&page->_count) == 0);
+	/*
+	 * Getting a normal page or the head of a compound page
+	 * requires to already have an elevated page->_count. Only if
+	 * we're getting a tail page, the elevated page->_count is
+	 * required only in the head page, so for tail pages the
+	 * bugcheck only verifies that the page->_count isn't
+	 * negative.
+	 */
+	VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
 	atomic_inc(&page->_count);
+	/*
+	 * Getting a tail page will elevate both the head and tail
+	 * page->_count(s).
+	 */
+	if (unlikely(PageTail(page))) {
+		/*
+		 * This is safe only because
+		 * __split_huge_page_refcount can't run under
+		 * get_page().
+		 */
+		VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
+		atomic_inc(&page->first_page->_count);
+	}
 }
 
 static inline struct page *virt_to_head_page(const void *x)
@@ -339,6 +399,27 @@
 	atomic_set(&page->_count, 1);
 }
 
+/*
+ * PageBuddy() indicate that the page is free and in the buddy system
+ * (see mm/page_alloc.c).
+ */
+static inline int PageBuddy(struct page *page)
+{
+	return atomic_read(&page->_mapcount) == -2;
+}
+
+static inline void __SetPageBuddy(struct page *page)
+{
+	VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
+	atomic_set(&page->_mapcount, -2);
+}
+
+static inline void __ClearPageBuddy(struct page *page)
+{
+	VM_BUG_ON(!PageBuddy(page));
+	atomic_set(&page->_mapcount, -1);
+}
+
 void put_page(struct page *page);
 void put_pages_list(struct list_head *pages);
 
@@ -370,11 +451,40 @@
 	return (unsigned long)page[1].lru.prev;
 }
 
+static inline int compound_trans_order(struct page *page)
+{
+	int order;
+	unsigned long flags;
+
+	if (!PageHead(page))
+		return 0;
+
+	flags = compound_lock_irqsave(page);
+	order = compound_order(page);
+	compound_unlock_irqrestore(page, flags);
+	return order;
+}
+
 static inline void set_compound_order(struct page *page, unsigned long order)
 {
 	page[1].lru.prev = (void *)order;
 }
 
+#ifdef CONFIG_MMU
+/*
+ * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
+ * servicing faults for write access.  In the normal case, do always want
+ * pte_mkwrite.  But get_user_pages can cause write faults for mappings
+ * that do not have writing enabled, when used by access_process_vm.
+ */
+static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+	if (likely(vma->vm_flags & VM_WRITE))
+		pte = pte_mkwrite(pte);
+	return pte;
+}
+#endif
+
 /*
  * Multiple processes may "see" the same page. E.g. for untouched
  * mappings of /dev/null, all processes see the same page full of
@@ -657,7 +767,7 @@
 	VM_BUG_ON(PageSlab(page));
 	if (unlikely(PageSwapCache(page)))
 		mapping = &swapper_space;
-	else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+	else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 		mapping = NULL;
 	return mapping;
 }
@@ -1064,7 +1174,8 @@
 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
 #endif
 
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
+int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+		pmd_t *pmd, unsigned long address);
 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
 
 /*
@@ -1133,16 +1244,18 @@
 	pte_unmap(pte);					\
 } while (0)
 
-#define pte_alloc_map(mm, pmd, address)			\
-	((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
-		NULL: pte_offset_map(pmd, address))
+#define pte_alloc_map(mm, vma, pmd, address)				\
+	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,	\
+							pmd, address))?	\
+	 NULL: pte_offset_map(pmd, address))
 
 #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
-	((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
+	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,	\
+							pmd, address))?	\
 		NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
 
 #define pte_alloc_kernel(pmd, address)			\
-	((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
 		NULL: pte_offset_kernel(pmd, address))
 
 extern void free_area_init(unsigned long * zones_size);
@@ -1415,6 +1528,8 @@
 #define FOLL_GET	0x04	/* do get_page on page */
 #define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
 #define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
+#define FOLL_MLOCK	0x40	/* mark page as mlocked */
+#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 			void *data);
@@ -1518,5 +1633,14 @@
 
 extern void dump_page(struct page *page);
 
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+extern void clear_huge_page(struct page *page,
+			    unsigned long addr,
+			    unsigned int pages_per_huge_page);
+extern void copy_user_huge_page(struct page *dst, struct page *src,
+				unsigned long addr, struct vm_area_struct *vma,
+				unsigned int pages_per_huge_page);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8835b87..8f7d247 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -1,6 +1,8 @@
 #ifndef LINUX_MM_INLINE_H
 #define LINUX_MM_INLINE_H
 
+#include <linux/huge_mm.h>
+
 /**
  * page_is_file_cache - should the page be on a file LRU or anon LRU?
  * @page: the page to test
@@ -20,18 +22,25 @@
 }
 
 static inline void
+__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
+		       struct list_head *head)
+{
+	list_add(&page->lru, head);
+	__mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
+	mem_cgroup_add_lru_list(page, l);
+}
+
+static inline void
 add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
 {
-	list_add(&page->lru, &zone->lru[l].list);
-	__inc_zone_state(zone, NR_LRU_BASE + l);
-	mem_cgroup_add_lru_list(page, l);
+	__add_page_to_lru_list(zone, page, l, &zone->lru[l].list);
 }
 
 static inline void
 del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
 {
 	list_del(&page->lru);
-	__dec_zone_state(zone, NR_LRU_BASE + l);
+	__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
 	mem_cgroup_del_lru_list(page, l);
 }
 
@@ -66,7 +75,7 @@
 			l += LRU_ACTIVE;
 		}
 	}
-	__dec_zone_state(zone, NR_LRU_BASE + l);
+	__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
 	mem_cgroup_del_lru_list(page, l);
 }
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bb7288a..26bc4e2 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -310,6 +310,9 @@
 #ifdef CONFIG_MMU_NOTIFIER
 	struct mmu_notifier_mm *mmu_notifier_mm;
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	pgtable_t pmd_huge_pte; /* protected by page_table_lock */
+#endif
 	/* How many tasks sharing this mm are OOM_DISABLE */
 	atomic_t oom_disable_count;
 };
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
new file mode 100644
index 0000000..16b0261
--- /dev/null
+++ b/include/linux/mmc/dw_mmc.h
@@ -0,0 +1,217 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *  (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MMC_DW_MMC_H_
+#define _LINUX_MMC_DW_MMC_H_
+
+#define MAX_MCI_SLOTS	2
+
+enum dw_mci_state {
+	STATE_IDLE = 0,
+	STATE_SENDING_CMD,
+	STATE_SENDING_DATA,
+	STATE_DATA_BUSY,
+	STATE_SENDING_STOP,
+	STATE_DATA_ERROR,
+};
+
+enum {
+	EVENT_CMD_COMPLETE = 0,
+	EVENT_XFER_COMPLETE,
+	EVENT_DATA_COMPLETE,
+	EVENT_DATA_ERROR,
+	EVENT_XFER_ERROR
+};
+
+struct mmc_data;
+
+/**
+ * struct dw_mci - MMC controller state shared between all slots
+ * @lock: Spinlock protecting the queue and associated data.
+ * @regs: Pointer to MMIO registers.
+ * @sg: Scatterlist entry currently being processed by PIO code, if any.
+ * @pio_offset: Offset into the current scatterlist entry.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ *	or NULL if the controller is idle.
+ * @cmd: The command currently being sent to the card, or NULL.
+ * @data: The data currently being transferred, or NULL if no data
+ *	transfer is in progress.
+ * @use_dma: Whether DMA channel is initialized or not.
+ * @sg_dma: Bus address of DMA buffer.
+ * @sg_cpu: Virtual address of DMA buffer.
+ * @dma_ops: Pointer to platform-specific DMA callbacks.
+ * @cmd_status: Snapshot of SR taken upon completion of the current
+ *	command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @data_status: Snapshot of SR taken upon completion of the current
+ *	data transfer. Only valid when EVENT_DATA_COMPLETE or
+ *	EVENT_DATA_ERROR is pending.
+ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
+ *	to be sent.
+ * @dir_status: Direction of current transfer.
+ * @tasklet: Tasklet running the request state machine.
+ * @card_tasklet: Tasklet handling card detect.
+ * @pending_events: Bitmask of events flagged by the interrupt handler
+ *	to be processed by the tasklet.
+ * @completed_events: Bitmask of events which the state machine has
+ *	processed.
+ * @state: Tasklet state.
+ * @queue: List of slots waiting for access to the controller.
+ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
+ *	rate and timeout calculations.
+ * @current_speed: Configured rate of the controller.
+ * @num_slots: Number of slots available.
+ * @pdev: Platform device associated with the MMC controller.
+ * @pdata: Platform data associated with the MMC controller.
+ * @slot: Slots sharing this MMC controller.
+ * @data_shift: log2 of FIFO item size.
+ * @push_data: Pointer to FIFO push function.
+ * @pull_data: Pointer to FIFO pull function.
+ * @quirks: Set of quirks that apply to specific versions of the IP.
+ *
+ * Locking
+ * =======
+ *
+ * @lock is a softirq-safe spinlock protecting @queue as well as
+ * @cur_slot, @mrq and @state. These must always be updated
+ * at the same time while holding @lock.
+ *
+ * The @mrq field of struct dw_mci_slot is also protected by @lock,
+ * and must always be written at the same time as the slot is added to
+ * @queue.
+ *
+ * @pending_events and @completed_events are accessed using atomic bit
+ * operations, so they don't need any locking.
+ *
+ * None of the fields touched by the interrupt handler need any
+ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
+ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
+ * interrupts must be disabled and @data_status updated with a
+ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
+ * CMDRDY interupt must be disabled and @cmd_status updated with a
+ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
+ * bytes_xfered field of @data must be written. This is ensured by
+ * using barriers.
+ */
+struct dw_mci {
+	spinlock_t		lock;
+	void __iomem		*regs;
+
+	struct scatterlist	*sg;
+	unsigned int		pio_offset;
+
+	struct dw_mci_slot	*cur_slot;
+	struct mmc_request	*mrq;
+	struct mmc_command	*cmd;
+	struct mmc_data		*data;
+
+	/* DMA interface members*/
+	int			use_dma;
+
+	dma_addr_t		sg_dma;
+	void			*sg_cpu;
+	struct dw_mci_dma_ops	*dma_ops;
+#ifdef CONFIG_MMC_DW_IDMAC
+	unsigned int		ring_size;
+#else
+	struct dw_mci_dma_data	*dma_data;
+#endif
+	u32			cmd_status;
+	u32			data_status;
+	u32			stop_cmdr;
+	u32			dir_status;
+	struct tasklet_struct	tasklet;
+	struct tasklet_struct	card_tasklet;
+	unsigned long		pending_events;
+	unsigned long		completed_events;
+	enum dw_mci_state	state;
+	struct list_head	queue;
+
+	u32			bus_hz;
+	u32			current_speed;
+	u32			num_slots;
+	struct platform_device	*pdev;
+	struct dw_mci_board	*pdata;
+	struct dw_mci_slot	*slot[MAX_MCI_SLOTS];
+
+	/* FIFO push and pull */
+	int			data_shift;
+	void (*push_data)(struct dw_mci *host, void *buf, int cnt);
+	void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
+
+	/* Workaround flags */
+	u32			quirks;
+};
+
+/* DMA ops for Internal/External DMAC interface */
+struct dw_mci_dma_ops {
+	/* DMA Ops */
+	int (*init)(struct dw_mci *host);
+	void (*start)(struct dw_mci *host, unsigned int sg_len);
+	void (*complete)(struct dw_mci *host);
+	void (*stop)(struct dw_mci *host);
+	void (*cleanup)(struct dw_mci *host);
+	void (*exit)(struct dw_mci *host);
+};
+
+/* IP Quirks/flags. */
+/* No special quirks or flags to cater for */
+#define DW_MCI_QUIRK_NONE		0
+/* DTO fix for command transmission with IDMAC configured */
+#define DW_MCI_QUIRK_IDMAC_DTO		1
+/* delay needed between retries on some 2.11a implementations */
+#define DW_MCI_QUIRK_RETRY_DELAY	2
+/* High Speed Capable - Supports HS cards (upto 50MHz) */
+#define DW_MCI_QUIRK_HIGHSPEED		4
+
+
+struct dma_pdata;
+
+struct block_settings {
+	unsigned short	max_segs;	/* see blk_queue_max_segments */
+	unsigned int	max_blk_size;	/* maximum size of one mmc block */
+	unsigned int	max_blk_count;	/* maximum number of blocks in one req*/
+	unsigned int	max_req_size;	/* maximum number of bytes in one req*/
+	unsigned int	max_seg_size;	/* see blk_queue_max_segment_size */
+};
+
+/* Board platform data */
+struct dw_mci_board {
+	u32 num_slots;
+
+	u32 quirks; /* Workaround / Quirk flags */
+	unsigned int bus_hz; /* Bus speed */
+
+	/* delay in mS before detecting cards after interrupt */
+	u32 detect_delay_ms;
+
+	int (*init)(u32 slot_id, irq_handler_t , void *);
+	int (*get_ro)(u32 slot_id);
+	int (*get_cd)(u32 slot_id);
+	int (*get_ocr)(u32 slot_id);
+	int (*get_bus_wd)(u32 slot_id);
+	/*
+	 * Enable power to selected slot and set voltage to desired level.
+	 * Voltage levels are specified using MMC_VDD_xxx defines defined
+	 * in linux/mmc/host.h file.
+	 */
+	void (*setpower)(u32 slot_id, u32 volt);
+	void (*exit)(u32 slot_id);
+	void (*select_slot)(u32 slot_id);
+
+	struct dw_mci_dma_ops *dma_ops;
+	struct dma_pdata *data;
+	struct block_settings *blk_settings;
+};
+
+#endif /* _LINUX_MMC_DW_MMC_H_ */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 30f6fad..bcb793e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -131,6 +131,9 @@
 	unsigned int		f_max;
 	unsigned int		f_init;
 	u32			ocr_avail;
+	u32			ocr_avail_sdio;	/* SDIO-specific OCR */
+	u32			ocr_avail_sd;	/* SD-specific OCR */
+	u32			ocr_avail_mmc;	/* MMC-specific OCR */
 	struct notifier_block	pm_notify;
 
 #define MMC_VDD_165_195		0x00000080	/* VDD voltage 1.65 - 1.95 */
@@ -169,9 +172,20 @@
 #define MMC_CAP_1_2V_DDR	(1 << 12)	/* can support */
 						/* DDR mode at 1.2V */
 #define MMC_CAP_POWER_OFF_CARD	(1 << 13)	/* Can power off after boot */
+#define MMC_CAP_BUS_WIDTH_TEST	(1 << 14)	/* CMD14/CMD19 bus width ok */
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
+#ifdef CONFIG_MMC_CLKGATE
+	int			clk_requests;	/* internal reference counter */
+	unsigned int		clk_delay;	/* number of MCI clk hold cycles */
+	bool			clk_gated;	/* clock gated */
+	struct work_struct	clk_gate_work; /* delayed clock gate */
+	unsigned int		clk_old;	/* old clock value cache */
+	spinlock_t		clk_lock;	/* lock for clk fields */
+	struct mutex		clk_gate_mutex;	/* mutex for clock gating */
+#endif
+
 	/* host specific block data */
 	unsigned int		max_seg_size;	/* see blk_queue_max_segment_size */
 	unsigned short		max_segs;	/* see blk_queue_max_segments */
@@ -307,5 +321,10 @@
 	return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
 }
 
+static inline int mmc_card_is_powered_resumed(struct mmc_host *host)
+{
+	return host->pm_flags & MMC_PM_KEEP_POWER;
+}
+
 #endif
 
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 956fbd87..612301f 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -40,7 +40,9 @@
 #define MMC_READ_DAT_UNTIL_STOP  11   /* adtc [31:0] dadr        R1  */
 #define MMC_STOP_TRANSMISSION    12   /* ac                      R1b */
 #define MMC_SEND_STATUS          13   /* ac   [31:16] RCA        R1  */
+#define MMC_BUS_TEST_R           14   /* adtc                    R1  */
 #define MMC_GO_INACTIVE_STATE    15   /* ac   [31:16] RCA            */
+#define MMC_BUS_TEST_W           19   /* adtc                    R1  */
 #define MMC_SPI_READ_OCR         58   /* spi                  spi_R3 */
 #define MMC_SPI_CRC_ON_OFF       59   /* spi  [0:0] flag      spi_R1 */
 
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 1fdc673..83bd9f7 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -83,6 +83,8 @@
 #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12		(1<<28)
 /* Controller doesn't have HISPD bit field in HI-SPEED SD card */
 #define SDHCI_QUIRK_NO_HISPD_BIT			(1<<29)
+/* Controller treats ADMA descriptors with length 0000h incorrectly */
+#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC		(1<<30)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
@@ -139,6 +141,10 @@
 
 	unsigned int caps;	/* Alternative capabilities */
 
+	unsigned int            ocr_avail_sdio;	/* OCR bit masks */
+	unsigned int            ocr_avail_sd;
+	unsigned int            ocr_avail_mmc;
+
 	unsigned long private[0] ____cacheline_aligned;
 };
 #endif /* __SDHCI_H */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index 44fc534..38d3930 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -94,16 +94,19 @@
 
 static inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
 {
-	return readl(addr + reg);
+	return __raw_readl(addr + reg);
 }
 
 static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
 {
-	writel(val, addr + reg);
+	__raw_writel(val, addr + reg);
 }
 
 #define SH_MMCIF_BBS 512 /* boot block size */
 
+enum { MMCIF_PROGRESS_ENTER, MMCIF_PROGRESS_INIT,
+       MMCIF_PROGRESS_LOAD, MMCIF_PROGRESS_DONE };
+
 static inline void sh_mmcif_boot_cmd_send(void __iomem *base,
 					  unsigned long cmd, unsigned long arg)
 {
@@ -166,6 +169,17 @@
 	unsigned long k;
 	int ret = 0;
 
+	/* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */
+	sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL,
+			CLK_ENABLE | CLKDIV_4 | SRSPTO_256 |
+			SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+
+	/* CMD9 - Get CSD */
+	sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000);
+
+	/* CMD7 - Select the card */
+	sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000);
+
 	/* CMD16 - Set the block size */
 	sh_mmcif_boot_cmd(base, 0x10400000, SH_MMCIF_BBS);
 
@@ -209,27 +223,4 @@
 	sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000);
 }
 
-static inline void sh_mmcif_boot_slurp(void __iomem *base,
-				       unsigned char *buf,
-				       unsigned long no_bytes)
-{
-	unsigned long tmp;
-
-	/* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */
-	sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL,
-			CLK_ENABLE | CLKDIV_4 | SRSPTO_256 |
-			SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
-
-	/* CMD9 - Get CSD */
-	sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000);
-
-	/* CMD7 - Select the card */
-	sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000);
-
-	tmp = no_bytes / SH_MMCIF_BBS;
-	tmp += (no_bytes % SH_MMCIF_BBS) ? 1 : 0;
-
-	sh_mmcif_boot_do_read(base, 512, tmp, buf);
-}
-
 #endif /* __SH_MMCIF_H__ */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 43dcfbd..cc2e7df 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -62,6 +62,16 @@
 				 unsigned long address);
 
 	/*
+	 * test_young is called to check the young/accessed bitflag in
+	 * the secondary pte. This is used to know if the page is
+	 * frequently used without actually clearing the flag or tearing
+	 * down the secondary mapping on the page.
+	 */
+	int (*test_young)(struct mmu_notifier *mn,
+			  struct mm_struct *mm,
+			  unsigned long address);
+
+	/*
 	 * change_pte is called in cases that pte mapping to page is changed:
 	 * for example, when ksm remaps pte to point to a new shared page.
 	 */
@@ -163,6 +173,8 @@
 extern void __mmu_notifier_release(struct mm_struct *mm);
 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 					  unsigned long address);
+extern int __mmu_notifier_test_young(struct mm_struct *mm,
+				     unsigned long address);
 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
 				      unsigned long address, pte_t pte);
 extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
@@ -186,6 +198,14 @@
 	return 0;
 }
 
+static inline int mmu_notifier_test_young(struct mm_struct *mm,
+					  unsigned long address)
+{
+	if (mm_has_notifiers(mm))
+		return __mmu_notifier_test_young(mm, address);
+	return 0;
+}
+
 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
 					   unsigned long address, pte_t pte)
 {
@@ -243,6 +263,32 @@
 	__pte;								\
 })
 
+#define pmdp_clear_flush_notify(__vma, __address, __pmdp)		\
+({									\
+	pmd_t __pmd;							\
+	struct vm_area_struct *___vma = __vma;				\
+	unsigned long ___address = __address;				\
+	VM_BUG_ON(__address & ~HPAGE_PMD_MASK);				\
+	mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address,	\
+					    (__address)+HPAGE_PMD_SIZE);\
+	__pmd = pmdp_clear_flush(___vma, ___address, __pmdp);		\
+	mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address,	\
+					  (__address)+HPAGE_PMD_SIZE);	\
+	__pmd;								\
+})
+
+#define pmdp_splitting_flush_notify(__vma, __address, __pmdp)		\
+({									\
+	struct vm_area_struct *___vma = __vma;				\
+	unsigned long ___address = __address;				\
+	VM_BUG_ON(__address & ~HPAGE_PMD_MASK);				\
+	mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address,	\
+					    (__address)+HPAGE_PMD_SIZE);\
+	pmdp_splitting_flush(___vma, ___address, __pmdp);		\
+	mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address,	\
+					  (__address)+HPAGE_PMD_SIZE);	\
+})
+
 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
 ({									\
 	int __young;							\
@@ -254,6 +300,17 @@
 	__young;							\
 })
 
+#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
+({									\
+	int __young;							\
+	struct vm_area_struct *___vma = __vma;				\
+	unsigned long ___address = __address;				\
+	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
+	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
+						  ___address);		\
+	__young;							\
+})
+
 #define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
 ({									\
 	struct mm_struct *___mm = __mm;					\
@@ -276,6 +333,12 @@
 	return 0;
 }
 
+static inline int mmu_notifier_test_young(struct mm_struct *mm,
+					  unsigned long address)
+{
+	return 0;
+}
+
 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
 					   unsigned long address, pte_t pte)
 {
@@ -305,7 +368,10 @@
 }
 
 #define ptep_clear_flush_young_notify ptep_clear_flush_young
+#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
 #define ptep_clear_flush_notify ptep_clear_flush
+#define pmdp_clear_flush_notify pmdp_clear_flush
+#define pmdp_splitting_flush_notify pmdp_splitting_flush
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 39c24eb..02ecb01 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -114,6 +114,7 @@
 	NUMA_LOCAL,		/* allocation from local node */
 	NUMA_OTHER,		/* allocation from other node */
 #endif
+	NR_ANON_TRANSPARENT_HUGEPAGES,
 	NR_VM_ZONE_STAT_ITEMS };
 
 /*
@@ -458,12 +459,6 @@
 	return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
-#ifdef CONFIG_SMP
-unsigned long zone_nr_free_pages(struct zone *zone);
-#else
-#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
-#endif /* CONFIG_SMP */
-
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
@@ -645,6 +640,7 @@
 	wait_queue_head_t kswapd_wait;
 	struct task_struct *kswapd;
 	int kswapd_max_order;
+	enum zone_type classzone_idx;
 } pg_data_t;
 
 #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
@@ -660,8 +656,10 @@
 
 extern struct mutex zonelists_mutex;
 void build_all_zonelists(void *data);
-void wakeup_kswapd(struct zone *zone, int order);
-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
+bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+		int classzone_idx, int alloc_flags);
+bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
 		int classzone_idx, int alloc_flags);
 enum memmap_context {
 	MEMMAP_EARLY,
diff --git a/include/linux/module.h b/include/linux/module.h
index 8b17fd8..5de4204 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -58,6 +58,12 @@
 	void (*free)(struct module *);
 };
 
+struct module_version_attribute {
+	struct module_attribute mattr;
+	const char *module_name;
+	const char *version;
+} __attribute__ ((__aligned__(sizeof(void *))));
+
 struct module_kobject
 {
 	struct kobject kobj;
@@ -161,7 +167,28 @@
   Using this automatically adds a checksum of the .c files and the
   local headers in "srcversion".
 */
+
+#if defined(MODULE) || !defined(CONFIG_SYSFS)
 #define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#else
+#define MODULE_VERSION(_version)					\
+	extern ssize_t __modver_version_show(struct module_attribute *,	\
+					     struct module *, char *);	\
+	static struct module_version_attribute __modver_version_attr	\
+	__used								\
+    __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \
+	= {								\
+		.mattr	= {						\
+			.attr	= {					\
+				.name	= "version",			\
+				.mode	= S_IRUGO,			\
+			},						\
+			.show	= __modver_version_show,		\
+		},							\
+		.module_name	= KBUILD_MODNAME,			\
+		.version	= _version,				\
+	}
+#endif
 
 /* Optional firmware file (or files) needed by the module
  * format is simply firmware file name.  Multiple firmware
@@ -350,7 +377,7 @@
 	   keeping pointers to this stuff */
 	char *args;
 #ifdef CONFIG_TRACEPOINTS
-	struct tracepoint *tracepoints;
+	struct tracepoint * const *tracepoints_ptrs;
 	unsigned int num_tracepoints;
 #endif
 #ifdef HAVE_JUMP_LABEL
@@ -362,7 +389,7 @@
 	unsigned int num_trace_bprintk_fmt;
 #endif
 #ifdef CONFIG_EVENT_TRACING
-	struct ftrace_event_call *trace_events;
+	struct ftrace_event_call **trace_events;
 	unsigned int num_trace_events;
 #endif
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 112adf8..07b4195 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -16,15 +16,17 @@
 /* Chosen so that structs with an unsigned long line up. */
 #define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
 
-#ifdef MODULE
 #define ___module_cat(a,b) __mod_ ## a ## b
 #define __module_cat(a,b) ___module_cat(a,b)
+#ifdef MODULE
 #define __MODULE_INFO(tag, name, info)					  \
 static const char __module_cat(name,__LINE__)[]				  \
   __used __attribute__((section(".modinfo"), unused, aligned(1)))	  \
   = __stringify(tag) "=" info
 #else  /* !MODULE */
-#define __MODULE_INFO(tag, name, info)
+/* This struct is here for syntactic coherency, it is not used */
+#define __MODULE_INFO(tag, name, info)					  \
+  struct __module_cat(name,__LINE__) {}
 #endif
 #define __MODULE_PARM_TYPE(name, _type)					  \
   __MODULE_INFO(parmtype, name##type, #name ":" _type)
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 1869ea2..604f122 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -60,7 +60,7 @@
 	struct super_block *mnt_sb;	/* pointer to superblock */
 #ifdef CONFIG_SMP
 	struct mnt_pcp __percpu *mnt_pcp;
-	atomic_t mnt_longrefs;
+	atomic_t mnt_longterm;		/* how many of the refs are longterm */
 #else
 	int mnt_count;
 	int mnt_writers;
@@ -96,8 +96,6 @@
 extern void mnt_drop_write(struct vfsmount *mnt);
 extern void mntput(struct vfsmount *mnt);
 extern struct vfsmount *mntget(struct vfsmount *mnt);
-extern void mntput_long(struct vfsmount *mnt);
-extern struct vfsmount *mntget_long(struct vfsmount *mnt);
 extern void mnt_pin(struct vfsmount *mnt);
 extern void mnt_unpin(struct vfsmount *mnt);
 extern int __mnt_is_readonly(struct vfsmount *mnt);
@@ -110,12 +108,7 @@
 				      int flags, const char *name,
 				      void *data);
 
-struct nameidata;
-
-struct path;
-extern int do_add_mount(struct vfsmount *newmnt, struct path *path,
-			int mnt_flags, struct list_head *fslist);
-
+extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
 extern void mark_mounts_for_expiry(struct list_head *mounts);
 
 extern dev_t name_to_dev_t(char *name);
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 0fa7a3a..b21d567 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -150,6 +150,7 @@
 extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
 extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
 extern int ip_mr_init(void);
 #else
 static inline
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 6091ab7..9d2deb2 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -136,6 +136,7 @@
 extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 extern int ip6_mr_input(struct sk_buff *skb);
 extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
+extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
 extern int ip6_mr_init(void);
 extern void ip6_mr_cleanup(void);
 #else
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 4dd0c2c..a9baee6 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -527,8 +527,7 @@
 struct cfi_fixup {
 	uint16_t mfr;
 	uint16_t id;
-	void (*fixup)(struct mtd_info *mtd, void* param);
-	void* param;
+	void (*fixup)(struct mtd_info *mtd);
 };
 
 #define CFI_MFR_ANY		0xFFFF
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index 5d25567..6987995 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -16,6 +16,7 @@
 #ifndef __MTD_FSMC_H
 #define __MTD_FSMC_H
 
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/physmap.h>
 #include <linux/types.h>
@@ -27,7 +28,7 @@
 
 /*
  * The placement of the Command Latch Enable (CLE) and
- * Address Latch Enable (ALE) is twised around in the
+ * Address Latch Enable (ALE) is twisted around in the
  * SPEAR310 implementation.
  */
 #if defined(CONFIG_MACH_SPEAR310)
@@ -62,7 +63,7 @@
 
 /* ctrl_tim register definitions */
 
-struct fsms_nand_bank_regs {
+struct fsmc_nand_bank_regs {
 	uint32_t pc;
 	uint32_t sts;
 	uint32_t comm;
@@ -78,7 +79,7 @@
 struct fsmc_regs {
 	struct fsmc_nor_bank_regs nor_bank_regs[FSMC_MAX_NOR_BANKS];
 	uint8_t reserved_1[0x40 - 0x20];
-	struct fsms_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS];
+	struct fsmc_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS];
 	uint8_t reserved_2[0xfe0 - 0xc0];
 	uint32_t peripid0;			/* 0xfe0 */
 	uint32_t peripid1;			/* 0xfe4 */
@@ -114,25 +115,6 @@
 #define FSMC_THOLD_4		(4 << 16)
 #define FSMC_THIZ_1		(1 << 24)
 
-/* peripid2 register definitions */
-#define FSMC_REVISION_MSK	(0xf)
-#define FSMC_REVISION_SHFT	(0x4)
-
-#define FSMC_VER1		1
-#define FSMC_VER2		2
-#define FSMC_VER3		3
-#define FSMC_VER4		4
-#define FSMC_VER5		5
-#define FSMC_VER6		6
-#define FSMC_VER7		7
-#define FSMC_VER8		8
-
-static inline uint32_t get_fsmc_version(struct fsmc_regs *regs)
-{
-	return (readl(&regs->peripid2) >> FSMC_REVISION_SHFT) &
-				FSMC_REVISION_MSK;
-}
-
 /*
  * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
  * and it has to be read consecutively and immediately after the 512
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index fe8d77e..9d5306b 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -144,6 +144,17 @@
 	 */
 	uint32_t writesize;
 
+	/*
+	 * Size of the write buffer used by the MTD. MTD devices having a write
+	 * buffer can write multiple writesize chunks at a time. E.g. while
+	 * writing 4 * writesize bytes to a device with 2 * writesize bytes
+	 * buffer the MTD driver can (but doesn't have to) do 2 writesize
+	 * operations, but not 4. Currently, all NANDs have writebufsize
+	 * equivalent to writesize (NAND page size). Some NOR flashes do have
+	 * writebufsize greater than writesize.
+	 */
+	uint32_t writebufsize;
+
 	uint32_t oobsize;   // Amount of OOB data per block (e.g. 16)
 	uint32_t oobavail;  // Available OOB bytes per block
 
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 63e17d0..1f489b2 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -448,6 +448,8 @@
  *			See the defines for further explanation.
  * @badblockpos:	[INTERN] position of the bad block marker in the oob
  *			area.
+ * @badblockbits:	[INTERN] number of bits to left-shift the bad block
+ *			number
  * @cellinfo:		[INTERN] MLC/multichip data from chip ident
  * @numchips:		[INTERN] number of physical chips
  * @chipsize:		[INTERN] the size of one chip for multichip arrays
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 0c8815b..ae418e4 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -118,6 +118,8 @@
 	int (*chip_probe)(struct mtd_info *mtd);
 	int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
 	int (*scan_bbt)(struct mtd_info *mtd);
+	int (*enable)(struct mtd_info *mtd);
+	int (*disable)(struct mtd_info *mtd);
 
 	struct completion	complete;
 	int			irq;
@@ -137,6 +139,14 @@
 	void			*bbm;
 
 	void			*priv;
+
+	/*
+	 * Shows that the current operation is composed
+	 * of sequence of commands. For example, cache program.
+	 * Such command status OnGo bit is checked at the end of
+	 * sequence.
+	 */
+	unsigned int		ongoing;
 };
 
 /*
@@ -171,6 +181,9 @@
 #define ONENAND_IS_2PLANE(this)			(0)
 #endif
 
+#define ONENAND_IS_CACHE_PROGRAM(this)					\
+	(this->options & ONENAND_HAS_CACHE_PROGRAM)
+
 /* Check byte access in OneNAND */
 #define ONENAND_CHECK_BYTE_ACCESS(addr)		(addr & 0x1)
 
@@ -181,6 +194,7 @@
 #define ONENAND_HAS_UNLOCK_ALL		(0x0002)
 #define ONENAND_HAS_2PLANE		(0x0004)
 #define ONENAND_HAS_4KB_PAGE		(0x0008)
+#define ONENAND_HAS_CACHE_PROGRAM	(0x0010)
 #define ONENAND_SKIP_UNLOCK_CHECK	(0x0100)
 #define ONENAND_PAGEBUF_ALLOC		(0x1000)
 #define ONENAND_OOBBUF_ALLOC		(0x2000)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 2b54316..4a0a8ba 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -89,7 +89,7 @@
 static inline int mtd_has_cmdlinepart(void) { return 0; }
 #endif
 
-int mtd_is_master(struct mtd_info *mtd);
+int mtd_is_partition(struct mtd_info *mtd);
 int mtd_add_partition(struct mtd_info *master, char *name,
 		      long long offset, long long length);
 int mtd_del_partition(struct mtd_info *master, int partno);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 18d06ad..f276d4f 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -45,6 +45,7 @@
  *  - ending slashes ok even for nonexistent files
  *  - internal "there are more path components" flag
  *  - dentry cache is untrusted; force a real lookup
+ *  - suppress terminal automount
  */
 #define LOOKUP_FOLLOW		0x0001
 #define LOOKUP_DIRECTORY	0x0002
@@ -53,6 +54,7 @@
 #define LOOKUP_PARENT		0x0010
 #define LOOKUP_REVAL		0x0020
 #define LOOKUP_RCU		0x0040
+#define LOOKUP_NO_AUTOMOUNT	0x0080
 /*
  * Intent data
  */
@@ -79,7 +81,8 @@
 
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 
-extern int follow_down(struct path *);
+extern int follow_down_one(struct path *);
+extern int follow_down(struct path *, bool);
 extern int follow_up(struct path *);
 
 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index 1c27f20..e13eefe 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -143,104 +143,4 @@
 #define NCP_MAXPATHLEN 255
 #define NCP_MAXNAMELEN 14
 
-#ifdef __KERNEL__
-
-#include <linux/ncp_fs_i.h>
-#include <linux/ncp_fs_sb.h>
-
-/* define because it is easy to change PRINTK to {*}PRINTK */
-#define PRINTK(format, args...) printk(KERN_DEBUG format , ## args)
-
-#undef NCPFS_PARANOIA
-#ifdef NCPFS_PARANOIA
-#define PPRINTK(format, args...) PRINTK(format , ## args)
-#else
-#define PPRINTK(format, args...)
-#endif
-
-#ifndef DEBUG_NCP
-#define DEBUG_NCP 0
-#endif
-#if DEBUG_NCP > 0
-#define DPRINTK(format, args...) PRINTK(format , ## args)
-#else
-#define DPRINTK(format, args...)
-#endif
-#if DEBUG_NCP > 1
-#define DDPRINTK(format, args...) PRINTK(format , ## args)
-#else
-#define DDPRINTK(format, args...)
-#endif
-
-#define NCP_MAX_RPC_TIMEOUT (6*HZ)
-
-
-struct ncp_entry_info {
-	struct nw_info_struct	i;
-	ino_t			ino;
-	int			opened;
-	int			access;
-	unsigned int		volume;
-	__u8			file_handle[6];
-};
-
-static inline struct ncp_server *NCP_SBP(const struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-
-#define NCP_SERVER(inode)	NCP_SBP((inode)->i_sb)
-static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode)
-{
-	return container_of(inode, struct ncp_inode_info, vfs_inode);
-}
-
-/* linux/fs/ncpfs/inode.c */
-int ncp_notify_change(struct dentry *, struct iattr *);
-struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *);
-void ncp_update_inode(struct inode *, struct ncp_entry_info *);
-void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
-
-/* linux/fs/ncpfs/dir.c */
-extern const struct inode_operations ncp_dir_inode_operations;
-extern const struct file_operations ncp_dir_operations;
-extern const struct dentry_operations ncp_root_dentry_operations;
-int ncp_conn_logged_in(struct super_block *);
-int ncp_date_dos2unix(__le16 time, __le16 date);
-void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
-
-/* linux/fs/ncpfs/ioctl.c */
-long ncp_ioctl(struct file *, unsigned int, unsigned long);
-long ncp_compat_ioctl(struct file *, unsigned int, unsigned long);
-
-/* linux/fs/ncpfs/sock.c */
-int ncp_request2(struct ncp_server *server, int function,
-	void* reply, int max_reply_size);
-static inline int ncp_request(struct ncp_server *server, int function) {
-	return ncp_request2(server, function, server->packet, server->packet_size);
-}
-int ncp_connect(struct ncp_server *server);
-int ncp_disconnect(struct ncp_server *server);
-void ncp_lock_server(struct ncp_server *server);
-void ncp_unlock_server(struct ncp_server *server);
-
-/* linux/fs/ncpfs/symlink.c */
-#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
-extern const struct address_space_operations ncp_symlink_aops;
-int ncp_symlink(struct inode*, struct dentry*, const char*);
-#endif
-
-/* linux/fs/ncpfs/file.c */
-extern const struct inode_operations ncp_file_inode_operations;
-extern const struct file_operations ncp_file_operations;
-int ncp_make_open(struct inode *, int);
-
-/* linux/fs/ncpfs/mmap.c */
-int ncp_mmap(struct file *, struct vm_area_struct *);
-
-/* linux/fs/ncpfs/ncplib_kernel.c */
-int ncp_make_closed(struct inode *);
-
-#endif				/* __KERNEL__ */
-
 #endif				/* _LINUX_NCP_FS_H */
diff --git a/include/linux/ncp_mount.h b/include/linux/ncp_mount.h
index a2b549e..dfcbea2 100644
--- a/include/linux/ncp_mount.h
+++ b/include/linux/ncp_mount.h
@@ -68,26 +68,4 @@
 
 #define NCP_MOUNT_VERSION_V5	(5)	/* Text only */
 
-#ifdef __KERNEL__
-
-struct ncp_mount_data_kernel {
-	unsigned long    flags;		/* NCP_MOUNT_* flags */
-	unsigned int	 int_flags;	/* internal flags */
-#define NCP_IMOUNT_LOGGEDIN_POSSIBLE	0x0001
-	__kernel_uid32_t mounted_uid;	/* Who may umount() this filesystem? */
-	struct pid      *wdog_pid;	/* Who cares for our watchdog packets? */
-	unsigned int     ncp_fd;	/* The socket to the ncp port */
-	unsigned int     time_out;	/* How long should I wait after
-					   sending a NCP request? */
-	unsigned int     retry_count;	/* And how often should I retry? */
-	unsigned char	 mounted_vol[NCP_VOLNAME_LEN + 1];
-	__kernel_uid32_t uid;
-	__kernel_gid32_t gid;
-	__kernel_mode_t  file_mode;
-	__kernel_mode_t  dir_mode;
-	int		 info_fd;
-};
-
-#endif /* __KERNEL__ */
-
 #endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0f6b1c9..71caf7a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -520,9 +520,6 @@
 	 * please use this field instead of dev->trans_start
 	 */
 	unsigned long		trans_start;
-	u64			tx_bytes;
-	u64			tx_packets;
-	u64			tx_dropped;
 } ____cacheline_aligned_in_smp;
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -2191,11 +2188,15 @@
 extern void		ether_setup(struct net_device *dev);
 
 /* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
+extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 				       void (*setup)(struct net_device *),
-				       unsigned int queue_count);
+				       unsigned int txqs, unsigned int rxqs);
 #define alloc_netdev(sizeof_priv, name, setup) \
-	alloc_netdev_mq(sizeof_priv, name, setup, 1)
+	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+
+#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
+	alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
+
 extern int		register_netdev(struct net_device *dev);
 extern void		unregister_netdev(struct net_device *dev);
 
@@ -2261,8 +2262,6 @@
 extern void		dev_mcast_init(void);
 extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 					       struct rtnl_link_stats64 *storage);
-extern void		dev_txq_stats_fold(const struct net_device *dev,
-					   struct rtnl_link_stats64 *stats);
 
 extern int		netdev_max_backlog;
 extern int		netdev_tstamp_prequeue;
@@ -2303,7 +2302,7 @@
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 					struct net_device *dev);
 
-int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev);
+int netif_skb_features(struct sk_buff *skb);
 
 static inline int net_gso_ok(int features, int gso_type)
 {
@@ -2317,16 +2316,10 @@
 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 }
 
-static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
+static inline int netif_needs_gso(struct sk_buff *skb, int features)
 {
-	if (skb_is_gso(skb)) {
-		int features = netif_get_vlan_features(skb, dev);
-
-		return (!skb_gso_ok(skb, features) ||
-			unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
-	}
-
-	return 0;
+	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
+		unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
 }
 
 static inline void netif_set_gso_max_size(struct net_device *dev,
@@ -2399,6 +2392,9 @@
 extern int netdev_info(const struct net_device *dev, const char *format, ...)
 	__attribute__ ((format (printf, 2, 3)));
 
+#define MODULE_ALIAS_NETDEV(device) \
+	MODULE_ALIAS("netdev-" device)
+
 #if defined(DEBUG)
 #define netdev_dbg(__dev, format, args...)			\
 	netdev_printk(KERN_DEBUG, __dev, format, ##args)
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 742bec0..6712e71 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -472,7 +472,7 @@
  *  necessary for reading the counters.
  */
 struct xt_info_lock {
-	spinlock_t lock;
+	seqlock_t lock;
 	unsigned char readers;
 };
 DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
@@ -497,7 +497,7 @@
 	local_bh_disable();
 	lock = &__get_cpu_var(xt_info_locks);
 	if (likely(!lock->readers++))
-		spin_lock(&lock->lock);
+		write_seqlock(&lock->lock);
 }
 
 static inline void xt_info_rdunlock_bh(void)
@@ -505,7 +505,7 @@
 	struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
 
 	if (likely(!--lock->readers))
-		spin_unlock(&lock->lock);
+		write_sequnlock(&lock->lock);
 	local_bh_enable();
 }
 
@@ -516,12 +516,12 @@
  */
 static inline void xt_info_wrlock(unsigned int cpu)
 {
-	spin_lock(&per_cpu(xt_info_locks, cpu).lock);
+	write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
 }
 
 static inline void xt_info_wrunlock(unsigned int cpu)
 {
-	spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
+	write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
 }
 
 /*
diff --git a/include/linux/nfc/pn544.h b/include/linux/nfc/pn544.h
new file mode 100644
index 0000000..7ab8521
--- /dev/null
+++ b/include/linux/nfc/pn544.h
@@ -0,0 +1,97 @@
+/*
+ * Driver include for the PN544 NFC chip.
+ *
+ * Copyright (C) Nokia Corporation
+ *
+ * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
+ * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PN544_H_
+#define _PN544_H_
+
+#include <linux/i2c.h>
+
+#define PN544_DRIVER_NAME	"pn544"
+#define PN544_MAXWINDOW_SIZE	7
+#define PN544_WINDOW_SIZE	4
+#define PN544_RETRIES		10
+#define PN544_MAX_I2C_TRANSFER	0x0400
+#define PN544_MSG_MAX_SIZE	0x21 /* at normal HCI mode */
+
+/* ioctl */
+#define PN544_CHAR_BASE		'P'
+#define PN544_IOR(num, dtype)	_IOR(PN544_CHAR_BASE, num, dtype)
+#define PN544_IOW(num, dtype)	_IOW(PN544_CHAR_BASE, num, dtype)
+#define PN544_GET_FW_MODE	PN544_IOW(1, unsigned int)
+#define PN544_SET_FW_MODE	PN544_IOW(2, unsigned int)
+#define PN544_GET_DEBUG		PN544_IOW(3, unsigned int)
+#define PN544_SET_DEBUG		PN544_IOW(4, unsigned int)
+
+/* Timing restrictions (ms) */
+#define PN544_RESETVEN_TIME	30 /* 7 */
+#define PN544_PVDDVEN_TIME	0
+#define PN544_VBATVEN_TIME	0
+#define PN544_GPIO4VEN_TIME	0
+#define PN544_WAKEUP_ACK	5
+#define PN544_WAKEUP_GUARD	(PN544_WAKEUP_ACK + 1)
+#define PN544_INACTIVITY_TIME	1000
+#define PN544_INTERFRAME_DELAY	200 /* us */
+#define PN544_BAUDRATE_CHANGE	150 /* us */
+
+/* Debug bits */
+#define PN544_DEBUG_BUF		0x01
+#define PN544_DEBUG_READ	0x02
+#define PN544_DEBUG_WRITE	0x04
+#define PN544_DEBUG_IRQ		0x08
+#define PN544_DEBUG_CALLS	0x10
+#define PN544_DEBUG_MODE	0x20
+
+/* Normal (HCI) mode */
+#define PN544_LLC_HCI_OVERHEAD	3 /* header + crc (to length) */
+#define PN544_LLC_MIN_SIZE	(1 + PN544_LLC_HCI_OVERHEAD) /* length + */
+#define PN544_LLC_MAX_DATA	(PN544_MSG_MAX_SIZE - 2)
+#define PN544_LLC_MAX_HCI_SIZE	(PN544_LLC_MAX_DATA - 2)
+
+struct pn544_llc_packet {
+	unsigned char length; /* of rest of packet */
+	unsigned char header;
+	unsigned char data[PN544_LLC_MAX_DATA]; /* includes crc-ccitt */
+};
+
+/* Firmware upgrade mode */
+#define PN544_FW_HEADER_SIZE	3
+/* max fw transfer is 1024bytes, but I2C limits it to 0xC0 */
+#define PN544_MAX_FW_DATA	(PN544_MAX_I2C_TRANSFER - PN544_FW_HEADER_SIZE)
+
+struct pn544_fw_packet {
+	unsigned char command; /* status in answer */
+	unsigned char length[2]; /* big-endian order (msf) */
+	unsigned char data[PN544_MAX_FW_DATA];
+};
+
+#ifdef __KERNEL__
+/* board config */
+struct pn544_nfc_platform_data {
+	int (*request_resources) (struct i2c_client *client);
+	void (*free_resources) (void);
+	void (*enable) (int fw);
+	int (*test) (void);
+	void (*disable) (void);
+};
+#endif /* __KERNEL__ */
+
+#endif /* _PN544_H_ */
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index ac33806..6ccfe3b 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -11,6 +11,9 @@
 #define NFS3_MAXGROUPS		16
 #define NFS3_FHSIZE		64
 #define NFS3_COOKIESIZE		4
+#define NFS3_CREATEVERFSIZE	8
+#define NFS3_COOKIEVERFSIZE	8
+#define NFS3_WRITEVERFSIZE	8
 #define NFS3_FIFO_DEV		(-1)
 #define NFS3MODE_FMT		0170000
 #define NFS3MODE_DIR		0040000
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 4925b22..134716e 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -65,6 +65,9 @@
 
 #define NFS4_CDFC4_FORE	0x1
 #define NFS4_CDFC4_BACK 0x2
+#define NFS4_CDFC4_BOTH 0x3
+#define NFS4_CDFC4_FORE_OR_BOTH 0x3
+#define NFS4_CDFC4_BACK_OR_BOTH 0x7
 
 #define NFS4_SET_TO_SERVER_TIME	0
 #define NFS4_SET_TO_CLIENT_TIME	1
@@ -111,9 +114,13 @@
 
 #define EXCHGID4_FLAG_SUPP_MOVED_REFER		0x00000001
 #define EXCHGID4_FLAG_SUPP_MOVED_MIGR		0x00000002
+#define EXCHGID4_FLAG_BIND_PRINC_STATEID	0x00000100
+
 #define EXCHGID4_FLAG_USE_NON_PNFS		0x00010000
 #define EXCHGID4_FLAG_USE_PNFS_MDS		0x00020000
 #define EXCHGID4_FLAG_USE_PNFS_DS		0x00040000
+#define EXCHGID4_FLAG_MASK_PNFS			0x00070000
+
 #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A	0x40000000
 #define EXCHGID4_FLAG_CONFIRMED_R		0x80000000
 /*
@@ -121,8 +128,8 @@
  * they're set in the argument or response, have separate
  * invalid flag masks for arg (_A) and resp (_R).
  */
-#define EXCHGID4_FLAG_MASK_A			0x40070003
-#define EXCHGID4_FLAG_MASK_R			0x80070003
+#define EXCHGID4_FLAG_MASK_A			0x40070103
+#define EXCHGID4_FLAG_MASK_R			0x80070103
 
 #define SEQ4_STATUS_CB_PATH_DOWN		0x00000001
 #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING	0x00000002
@@ -136,6 +143,9 @@
 #define SEQ4_STATUS_CB_PATH_DOWN_SESSION	0x00000200
 #define SEQ4_STATUS_BACKCHANNEL_FAULT		0x00000400
 
+#define NFS4_SECINFO_STYLE4_CURRENT_FH	0
+#define NFS4_SECINFO_STYLE4_PARENT	1
+
 #define NFS4_MAX_UINT64	(~(u64)0)
 
 /* An NFS4 sessions server must support at least NFS4_MAX_OPS operations.
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 0779bb8..6023efa 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -215,7 +215,6 @@
 #define NFS_INO_ADVISE_RDPLUS	(0)		/* advise readdirplus */
 #define NFS_INO_STALE		(1)		/* possible stale inode */
 #define NFS_INO_ACL_LRU_SET	(2)		/* Inode is on the LRU list */
-#define NFS_INO_MOUNTPOINT	(3)		/* inode is remote mountpoint */
 #define NFS_INO_FLUSHING	(4)		/* inode is flushing out data */
 #define NFS_INO_FSCACHE		(5)		/* inode can be cached by FS-Cache */
 #define NFS_INO_FSCACHE_LOCK	(6)		/* FS-Cache cookie management lock */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 452d964..3e112de 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -47,11 +47,6 @@
 	u64			cl_clientid;	/* constant */
 	unsigned long		cl_state;
 
-	struct rb_root		cl_openowner_id;
-	struct rb_root		cl_lockowner_id;
-
-	struct list_head	cl_delegations;
-	struct rb_root		cl_state_owners;
 	spinlock_t		cl_lock;
 
 	unsigned long		cl_lease_time;
@@ -71,12 +66,9 @@
 	 */
 	char			cl_ipaddr[48];
 	unsigned char		cl_id_uniquifier;
+	u32			cl_cb_ident;	/* v4.0 callback identifier */
 	const struct nfs4_minor_version_ops *cl_mvops;
-#endif /* CONFIG_NFS_V4 */
 
-#ifdef CONFIG_NFS_V4_1
-	/* clientid returned from EXCHANGE_ID, used by session operations */
-	u64			cl_ex_clid;
 	/* The sequence id to use for the next CREATE_SESSION */
 	u32			cl_seqid;
 	/* The flags used for obtaining the clientid during EXCHANGE_ID */
@@ -84,7 +76,7 @@
 	struct nfs4_session	*cl_session; 	/* sharred session */
 	struct list_head	cl_layouts;
 	struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_NFS_V4 */
 
 #ifdef CONFIG_NFS_FSCACHE
 	struct fscache_cookie	*fscache;	/* client index cache cookie */
@@ -148,7 +140,14 @@
 						   that are supported on this
 						   filesystem */
 	struct pnfs_layoutdriver_type  *pnfs_curr_ld; /* Active layout driver */
+	struct rpc_wait_queue	roc_rpcwaitq;
+
+	/* the following fields are protected by nfs_client->cl_lock */
+	struct rb_root		state_owners;
+	struct rb_root		openowner_id;
+	struct rb_root		lockowner_id;
 #endif
+	struct list_head	delegations;
 	void (*destroy)(struct nfs_server *);
 
 	atomic_t active; /* Keep trace of any activity to this server */
@@ -182,7 +181,7 @@
 /* maximum number of slots to use */
 #define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
 
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_NFS_V4)
 
 /* Sessions */
 #define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
@@ -196,6 +195,7 @@
 						 * op for dynamic resizing */
 	int		target_max_slots;	/* Set by CB_RECALL_SLOT as
 						 * the new max_slots */
+	struct completion complete;
 };
 
 static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
@@ -212,7 +212,6 @@
 	unsigned long			session_state;
 	u32				hash_alg;
 	u32				ssv_len;
-	struct completion		complete;
 
 	/* The fore and back channel */
 	struct nfs4_channel_attrs	fc_attrs;
@@ -222,5 +221,5 @@
 	struct nfs_client		*clp;
 };
 
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_NFS_V4 */
 #endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 80f0719..b006857 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -208,6 +208,7 @@
 	struct inode *inode;
 	struct nfs_open_context *ctx;
 	struct nfs4_sequence_args seq_args;
+	nfs4_stateid stateid;
 };
 
 struct nfs4_layoutget_res {
@@ -223,7 +224,6 @@
 	struct nfs4_layoutget_args args;
 	struct nfs4_layoutget_res res;
 	struct pnfs_layout_segment **lsegpp;
-	int status;
 };
 
 struct nfs4_getdeviceinfo_args {
@@ -317,6 +317,7 @@
 struct nfs_lowner {
 	__u64			clientid;
 	__u64			id;
+	dev_t			s_dev;
 };
 
 struct nfs_lock_args {
@@ -484,6 +485,7 @@
 	struct nfs_fh *		fh;
 	struct nfs_fattr *	fattr;
 	unsigned char		d_type;
+	struct nfs_server *	server;
 };
 
 /*
@@ -1089,7 +1091,7 @@
 	int	(*pathconf) (struct nfs_server *, struct nfs_fh *,
 			     struct nfs_pathconf *);
 	int	(*set_capabilities)(struct nfs_server *, struct nfs_fh *);
-	__be32 *(*decode_dirent)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int plus);
+	int	(*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int);
 	void	(*read_setup)   (struct nfs_read_data *, struct rpc_message *);
 	int	(*read_done)  (struct rpc_task *, struct nfs_read_data *);
 	void	(*write_setup)  (struct nfs_write_data *, struct rpc_message *);
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index f321b57..fabcb1e 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -51,10 +51,10 @@
 	return w;
 }
 
-extern unsigned int
+extern int
 nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
 	      struct posix_acl *acl, int encode_entries, int typeflag);
-extern unsigned int
+extern int
 nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
 	      struct posix_acl **pacl);
 
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 8ae78a6..bd31615 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -35,7 +35,7 @@
 #define NFSEXP_NOHIDE		0x0200
 #define NFSEXP_NOSUBTREECHECK	0x0400
 #define	NFSEXP_NOAUTHNLM	0x0800		/* Don't authenticate NLM requests - just trust */
-#define NFSEXP_MSNFS		0x1000	/* do silly things that MS clients expect */
+#define NFSEXP_MSNFS		0x1000	/* do silly things that MS clients expect; no longer supported */
 #define NFSEXP_FSID		0x2000
 #define	NFSEXP_CROSSMOUNT	0x4000
 #define	NFSEXP_NOACL		0x8000	/* reserved for possible ACL related use */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2b89b71..821ffb9 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -148,6 +148,10 @@
  * @NL80211_CMD_SET_MPATH:  Set mesh path attributes for mesh path to
  * 	destination %NL80211_ATTR_MAC on the interface identified by
  * 	%NL80211_ATTR_IFINDEX.
+ * @NL80211_CMD_NEW_MPATH: Create a new mesh path for the destination given by
+ *	%NL80211_ATTR_MAC via %NL80211_ATTR_MPATH_NEXT_HOP.
+ * @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by
+ *	%NL80211_ATTR_MAC.
  * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the
  *	the interface identified by %NL80211_ATTR_IFINDEX.
  * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
@@ -612,7 +616,7 @@
  *	consisting of a nested array.
  *
  * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes).
- * @NL80211_ATTR_PLINK_ACTION: action to perform on the mesh peer link.
+ * @NL80211_ATTR_STA_PLINK_ACTION: action to perform on the mesh peer link.
  * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path.
  * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path
  * 	info given for %NL80211_CMD_GET_MPATH, nested attribute described at
@@ -879,7 +883,9 @@
  *	See &enum nl80211_key_default_types.
  *
  * @NL80211_ATTR_MESH_SETUP: Optional mesh setup parameters.  These cannot be
- * changed once the mesh is active.
+ *	changed once the mesh is active.
+ * @NL80211_ATTR_MESH_CONFIG: Mesh configuration parameters, a nested attribute
+ *	containing attributes from &enum nl80211_meshconf_params.
  *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -1225,8 +1231,6 @@
  * @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs)
  * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station)
  * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
- * @__NL80211_STA_INFO_AFTER_LAST: internal
- * @NL80211_STA_INFO_MAX: highest possible station info attribute
  * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
  * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
  * 	containing info as possible, see &enum nl80211_sta_info_txrate.
@@ -1236,6 +1240,11 @@
  * @NL80211_STA_INFO_TX_RETRIES: total retries (u32, to this station)
  * @NL80211_STA_INFO_TX_FAILED: total failed packets (u32, to this station)
  * @NL80211_STA_INFO_SIGNAL_AVG: signal strength average (u8, dBm)
+ * @NL80211_STA_INFO_LLID: the station's mesh LLID
+ * @NL80211_STA_INFO_PLID: the station's mesh PLID
+ * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station
+ * @__NL80211_STA_INFO_AFTER_LAST: internal
+ * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
 enum nl80211_sta_info {
 	__NL80211_STA_INFO_INVALID,
@@ -1626,7 +1635,7 @@
  * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs)
  * that it takes for an HWMP information element to propagate across the mesh
  *
- * @NL80211_MESHCONF_ROOTMODE: whether root mode is enabled or not
+ * @NL80211_MESHCONF_HWMP_ROOTMODE: whether root mode is enabled or not
  *
  * @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a
  * source mesh point for path selection elements.
@@ -1678,6 +1687,7 @@
  * element that vendors will use to identify the path selection methods and
  * metrics in use.
  *
+ * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
  * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
  */
 enum nl80211_mesh_setup_params {
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 0ef22a1..c84d900 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -97,7 +97,7 @@
 extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
 				     int depth, void *data);
 extern void early_init_dt_add_memory_arch(u64 base, u64 size);
-extern u64 early_init_dt_alloc_memory_arch(u64 size, u64 align);
+extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
 extern u64 dt_mem_next_cell(int s, __be32 **cellp);
 
 /*
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 32fb812..1ca6411 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -16,6 +16,8 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
 #include <asm/atomic.h>
  
 /* Each escaped entry is prefixed by ESCAPE_CODE
@@ -186,10 +188,17 @@
 int oprofile_add_data64(struct op_entry *entry, u64 val);
 int oprofile_write_commit(struct op_entry *entry);
 
-#ifdef CONFIG_PERF_EVENTS
+#ifdef CONFIG_HW_PERF_EVENTS
 int __init oprofile_perf_init(struct oprofile_operations *ops);
 void oprofile_perf_exit(void);
 char *op_name_from_perf_id(void);
-#endif /* CONFIG_PERF_EVENTS */
+#else
+static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
+{
+	pr_info("oprofile: hardware counters not available\n");
+	return -ENODEV;
+}
+static inline void oprofile_perf_exit(void) { }
+#endif /* CONFIG_HW_PERF_EVENTS */
 
 #endif /* OPROFILE_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5f38c46..0db8037 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -48,9 +48,6 @@
  * struct page (these bits with information) are always mapped into kernel
  * address space...
  *
- * PG_buddy is set to indicate that the page is free and in the buddy system
- * (see mm/page_alloc.c).
- *
  * PG_hwpoison indicates that a page got corrupted in hardware and contains
  * data with incorrect ECC bits that triggered a machine check. Accessing is
  * not safe since it may cause another machine check. Don't touch!
@@ -96,7 +93,6 @@
 	PG_swapcache,		/* Swap page: swp_entry_t in private */
 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
 	PG_reclaim,		/* To be reclaimed asap */
-	PG_buddy,		/* Page is free, on buddy lists */
 	PG_swapbacked,		/* Page is backed by RAM/swap */
 	PG_unevictable,		/* Page is "unevictable"  */
 #ifdef CONFIG_MMU
@@ -108,6 +104,9 @@
 #ifdef CONFIG_MEMORY_FAILURE
 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	PG_compound_lock,
+#endif
 	__NR_PAGEFLAGS,
 
 	/* Filesystems */
@@ -198,7 +197,7 @@
 struct page;	/* forward declaration */
 
 TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
-PAGEFLAG(Error, error)
+PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
 PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
 PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
 PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
@@ -230,7 +229,6 @@
  * risky: they bypass page accounting.
  */
 TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
-__PAGEFLAG(Buddy, buddy)
 PAGEFLAG(MappedToDisk, mappedtodisk)
 
 /* PG_readahead is only used for file reads; PG_reclaim is only for writes */
@@ -344,7 +342,7 @@
  * tests can be used in performance sensitive paths. PageCompound is
  * generally not used in hot code paths.
  */
-__PAGEFLAG(Head, head)
+__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
 __PAGEFLAG(Tail, tail)
 
 static inline int PageCompound(struct page *page)
@@ -352,6 +350,13 @@
 	return page->flags & ((1L << PG_head) | (1L << PG_tail));
 
 }
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ClearPageCompound(struct page *page)
+{
+	BUG_ON(!PageHead(page));
+	ClearPageHead(page);
+}
+#endif
 #else
 /*
  * Reduce page flag use as much as possible by overlapping
@@ -389,14 +394,61 @@
 	page->flags &= ~PG_head_tail_mask;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ClearPageCompound(struct page *page)
+{
+	BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound));
+	clear_bit(PG_compound, &page->flags);
+}
+#endif
+
 #endif /* !PAGEFLAGS_EXTENDED */
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * PageHuge() only returns true for hugetlbfs pages, but not for
+ * normal or transparent huge pages.
+ *
+ * PageTransHuge() returns true for both transparent huge and
+ * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
+ * called only in the core VM paths where hugetlbfs pages can't exist.
+ */
+static inline int PageTransHuge(struct page *page)
+{
+	VM_BUG_ON(PageTail(page));
+	return PageHead(page);
+}
+
+static inline int PageTransCompound(struct page *page)
+{
+	return PageCompound(page);
+}
+
+#else
+
+static inline int PageTransHuge(struct page *page)
+{
+	return 0;
+}
+
+static inline int PageTransCompound(struct page *page)
+{
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_MMU
 #define __PG_MLOCKED		(1 << PG_mlocked)
 #else
 #define __PG_MLOCKED		0
 #endif
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __PG_COMPOUND_LOCK		(1 << PG_compound_lock)
+#else
+#define __PG_COMPOUND_LOCK		0
+#endif
+
 /*
  * Flags checked when a page is freed.  Pages being freed should not have
  * these flags set.  It they are, there is a problem.
@@ -404,9 +456,10 @@
 #define PAGE_FLAGS_CHECK_AT_FREE \
 	(1 << PG_lru	 | 1 << PG_locked    | \
 	 1 << PG_private | 1 << PG_private_2 | \
-	 1 << PG_buddy	 | 1 << PG_writeback | 1 << PG_reserved | \
+	 1 << PG_writeback | 1 << PG_reserved | \
 	 1 << PG_slab	 | 1 << PG_swapcache | 1 << PG_active | \
-	 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON)
+	 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
+	 __PG_COMPOUND_LOCK)
 
 /*
  * Flags checked when a page is prepped for return by the page allocator.
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index b02195d..6d6cb7a 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -35,12 +35,15 @@
 
 enum {
 	/* flags for mem_cgroup */
-	PCG_LOCK,  /* page cgroup is locked */
+	PCG_LOCK,  /* Lock for pc->mem_cgroup and following bits. */
 	PCG_CACHE, /* charged as cache */
 	PCG_USED, /* this object is in use. */
-	PCG_ACCT_LRU, /* page has been accounted for */
-	PCG_FILE_MAPPED, /* page is accounted as "mapped" */
 	PCG_MIGRATION, /* under page migration */
+	/* flags for mem_cgroup and file and I/O status */
+	PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
+	PCG_FILE_MAPPED, /* page is accounted as "mapped" */
+	/* No lock in page_cgroup */
+	PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
 };
 
 #define TESTPCGFLAG(uname, lname)			\
@@ -94,6 +97,10 @@
 
 static inline void lock_page_cgroup(struct page_cgroup *pc)
 {
+	/*
+	 * Don't take this lock in IRQ context.
+	 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
+	 */
 	bit_spin_lock(PCG_LOCK, &pc->flags);
 }
 
@@ -107,6 +114,24 @@
 	return bit_spin_is_locked(PCG_LOCK, &pc->flags);
 }
 
+static inline void move_lock_page_cgroup(struct page_cgroup *pc,
+	unsigned long *flags)
+{
+	/*
+	 * We know updates to pc->flags of page cache's stats are from both of
+	 * usual context or IRQ context. Disable IRQ to avoid deadlock.
+	 */
+	local_irq_save(*flags);
+	bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
+}
+
+static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
+	unsigned long *flags)
+{
+	bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
+	local_irq_restore(*flags);
+}
+
 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
 struct page_cgroup;
 
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 2d1ffe3..9c66e99 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -48,7 +48,7 @@
 
 static inline int mapping_unevictable(struct address_space *mapping)
 {
-	if (likely(mapping))
+	if (mapping)
 		return test_bit(AS_UNEVICTABLE, &mapping->flags);
 	return !!mapping;
 }
diff --git a/include/linux/path.h b/include/linux/path.h
index a581e8c..edc98de 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -10,9 +10,7 @@
 };
 
 extern void path_get(struct path *);
-extern void path_get_long(struct path *);
 extern void path_put(struct path *);
-extern void path_put_long(struct path *);
 
 static inline int path_equal(const struct path *path1, const struct path *path2)
 {
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index c8b6473..4462350 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -35,9 +35,12 @@
 	return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
 					      pbus->number);
 }
+#endif
+
+#ifdef CONFIG_ACPI_APEI
+extern bool aer_acpi_firmware_first(void);
 #else
-static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
-{ return NULL; }
+static inline bool aer_acpi_firmware_first(void) { return false; }
 #endif
 
 #endif	/* _PCI_ACPI_H_ */
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
index 91ba0b3..ce681051 100644
--- a/include/linux/pci-aspm.h
+++ b/include/linux/pci-aspm.h
@@ -27,6 +27,7 @@
 extern void pcie_aspm_exit_link_state(struct pci_dev *pdev);
 extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
 extern void pci_disable_link_state(struct pci_dev *pdev, int state);
+extern void pcie_clear_aspm(void);
 extern void pcie_no_aspm(void);
 #else
 static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
@@ -41,7 +42,9 @@
 static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
 {
 }
-
+static inline void pcie_clear_aspm(void)
+{
+}
 static inline void pcie_no_aspm(void)
 {
 }
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 7454408..559d028 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -806,7 +806,7 @@
 
 /* Power management related routines */
 int pci_save_state(struct pci_dev *dev);
-int pci_restore_state(struct pci_dev *dev);
+void pci_restore_state(struct pci_dev *dev);
 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
@@ -820,7 +820,6 @@
 int pci_back_from_sleep(struct pci_dev *dev);
 bool pci_dev_run_wake(struct pci_dev *dev);
 bool pci_check_pme_status(struct pci_dev *dev);
-void pci_wakeup_event(struct pci_dev *dev);
 void pci_pme_wakeup_bus(struct pci_bus *bus);
 
 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
@@ -994,6 +993,14 @@
 extern int pci_msi_enabled(void);
 #endif
 
+#ifdef CONFIG_PCIEPORTBUS
+extern bool pcie_ports_disabled;
+extern bool pcie_ports_auto;
+#else
+#define pcie_ports_disabled	true
+#define pcie_ports_auto		false
+#endif
+
 #ifndef CONFIG_PCIEASPM
 static inline int pcie_aspm_enabled(void)
 {
@@ -1003,6 +1010,14 @@
 extern int pcie_aspm_enabled(void);
 #endif
 
+#ifdef CONFIG_PCIEAER
+void pci_no_aer(void);
+bool pci_aer_available(void);
+#else
+static inline void pci_no_aer(void) { }
+static inline bool pci_aer_available(void) { return false; }
+#endif
+
 #ifndef CONFIG_PCIE_ECRC
 static inline void pcie_set_ecrc_checking(struct pci_dev *dev)
 {
@@ -1168,10 +1183,8 @@
 	return 0;
 }
 
-static inline int pci_restore_state(struct pci_dev *dev)
-{
-	return 0;
-}
+static inline void pci_restore_state(struct pci_dev *dev)
+{ }
 
 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 {
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cb845c1..3adb06e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -518,6 +518,7 @@
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC	0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK	0x1304
 #define PCI_DEVICE_ID_AMD_15H_NB_MISC	0x1603
+#define PCI_DEVICE_ID_AMD_CNB17H_F3	0x1703
 #define PCI_DEVICE_ID_AMD_LANCE		0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
 #define PCI_DEVICE_ID_AMD_SCSI		0x2020
@@ -1650,6 +1651,11 @@
 #define PCI_DEVICE_ID_O2_6836		0x6836
 #define PCI_DEVICE_ID_O2_6812		0x6872
 #define PCI_DEVICE_ID_O2_6933		0x6933
+#define PCI_DEVICE_ID_O2_8120		0x8120
+#define PCI_DEVICE_ID_O2_8220		0x8220
+#define PCI_DEVICE_ID_O2_8221		0x8221
+#define PCI_DEVICE_ID_O2_8320		0x8320
+#define PCI_DEVICE_ID_O2_8321		0x8321
 
 #define PCI_VENDOR_ID_3DFX		0x121a
 #define PCI_DEVICE_ID_3DFX_VOODOO	0x0001
@@ -2363,6 +2369,10 @@
 #define PCI_DEVICE_ID_JMICRON_JMB38X_SD	0x2381
 #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS	0x2383
+#define PCI_DEVICE_ID_JMICRON_JMB385_MS	0x2388
+#define PCI_DEVICE_ID_JMICRON_JMB388_SD	0x2391
+#define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392
+#define PCI_DEVICE_ID_JMICRON_JMB390_MS	0x2393
 
 #define PCI_VENDOR_ID_KORENIX		0x1982
 #define PCI_DEVICE_ID_KORENIX_JETCARDF0	0x1600
@@ -2468,7 +2478,8 @@
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN	0x1c41
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX	0x1c5f
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS	0x1d22
-#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC	0x1d40
+#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0	0x1d40
+#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1	0x1d41
 #define PCI_DEVICE_ID_INTEL_82801AA_0	0x2410
 #define PCI_DEVICE_ID_INTEL_82801AA_1	0x2411
 #define PCI_DEVICE_ID_INTEL_82801AA_3	0x2413
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index af83076..5b7e6b1 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -309,6 +309,14 @@
 #define PCI_MSIX_PBA		8
 #define  PCI_MSIX_FLAGS_BIRMASK	(7 << 0)
 
+/* MSI-X entry's format */
+#define PCI_MSIX_ENTRY_SIZE		16
+#define  PCI_MSIX_ENTRY_LOWER_ADDR	0
+#define  PCI_MSIX_ENTRY_UPPER_ADDR	4
+#define  PCI_MSIX_ENTRY_DATA		8
+#define  PCI_MSIX_ENTRY_VECTOR_CTRL	12
+#define   PCI_MSIX_ENTRY_CTRL_MASKBIT	1
+
 /* CompactPCI Hotswap Register */
 
 #define PCI_CHSWP_CSR		2	/* Control and Status Register */
@@ -496,6 +504,8 @@
 #define  PCI_EXP_RTCTL_CRSSVE	0x10	/* CRS Software Visibility Enable */
 #define PCI_EXP_RTCAP		30	/* Root Capabilities */
 #define PCI_EXP_RTSTA		32	/* Root Status */
+#define PCI_EXP_RTSTA_PME	0x10000 /* PME status */
+#define PCI_EXP_RTSTA_PENDING	0x20000 /* PME pending */
 #define PCI_EXP_DEVCAP2		36	/* Device Capabilities 2 */
 #define  PCI_EXP_DEVCAP2_ARI	0x20	/* Alternative Routing-ID */
 #define PCI_EXP_DEVCTL2		40	/* Device Control 2 */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index dd9c7ab..21415cc 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -431,6 +431,8 @@
 	struct list_head	entry;
 	struct completion	completion;
 	struct wakeup_source	*wakeup;
+#else
+	unsigned int		should_wakeup:1;
 #endif
 #ifdef CONFIG_PM_RUNTIME
 	struct timer_list	suspend_timer;
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 9cff00d..03a67db 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -109,11 +109,6 @@
 	return dev->power.can_wakeup;
 }
 
-static inline bool device_may_wakeup(struct device *dev)
-{
-	return false;
-}
-
 static inline struct wakeup_source *wakeup_source_create(const char *name)
 {
 	return NULL;
@@ -134,24 +129,32 @@
 
 static inline int device_wakeup_enable(struct device *dev)
 {
-	return -EINVAL;
+	dev->power.should_wakeup = true;
+	return 0;
 }
 
 static inline int device_wakeup_disable(struct device *dev)
 {
+	dev->power.should_wakeup = false;
+	return 0;
+}
+
+static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+{
+	dev->power.should_wakeup = enable;
 	return 0;
 }
 
 static inline int device_init_wakeup(struct device *dev, bool val)
 {
-	dev->power.can_wakeup = val;
-	return val ? -EINVAL : 0;
+	device_set_wakeup_capable(dev, val);
+	device_set_wakeup_enable(dev, val);
+	return 0;
 }
 
-
-static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+static inline bool device_may_wakeup(struct device *dev)
 {
-	return -EINVAL;
+	return dev->power.can_wakeup && dev->power.should_wakeup;
 }
 
 static inline void __pm_stay_awake(struct wakeup_source *ws) {}
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 56e76af..1a2ccd6 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -57,7 +57,7 @@
 };
 
 /*
- * Structures and helpers for sys_poll/sys_poll
+ * Structures and helpers for select/poll syscall
  */
 struct poll_wqueues {
 	poll_table pt;
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index d68283a..54211c1 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -71,6 +71,7 @@
 
 /* posix_acl.c */
 
+extern void posix_acl_init(struct posix_acl *, int);
 extern struct posix_acl *posix_acl_alloc(int, gfp_t);
 extern struct posix_acl *posix_acl_clone(const struct posix_acl *, gfp_t);
 extern int posix_acl_valid(const struct posix_acl *);
diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h
new file mode 100644
index 0000000..de1dfe0
--- /dev/null
+++ b/include/linux/power/gpio-charger.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LINUX_POWER_GPIO_CHARGER_H__
+#define __LINUX_POWER_GPIO_CHARGER_H__
+
+#include <linux/power_supply.h>
+#include <linux/types.h>
+
+/**
+ * struct gpio_charger_platform_data - platform_data for gpio_charger devices
+ * @name:		Name for the chargers power_supply device
+ * @type:		Type of the charger
+ * @gpio:		GPIO which is used to indicate the chargers status
+ * @gpio_active_low:	Should be set to 1 if the GPIO is active low otherwise 0
+ * @supplied_to:	Array of battery names to which this chargers supplies power
+ * @num_supplicants:	Number of entries in the supplied_to array
+ */
+struct gpio_charger_platform_data {
+	const char *name;
+	enum power_supply_type type;
+
+	int gpio;
+	int gpio_active_low;
+
+	char **supplied_to;
+	size_t num_supplicants;
+};
+
+#endif
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
new file mode 100644
index 0000000..7995deb
--- /dev/null
+++ b/include/linux/power/max17042_battery.h
@@ -0,0 +1,30 @@
+/*
+ * Fuel gauge driver for Maxim 17042 / 8966 / 8997
+ *  Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __MAX17042_BATTERY_H_
+#define __MAX17042_BATTERY_H_
+
+struct max17042_platform_data {
+	bool enable_current_sense;
+};
+
+#endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/pps.h b/include/linux/pps.h
index 0194ab0..a9bb1d9 100644
--- a/include/linux/pps.h
+++ b/include/linux/pps.h
@@ -114,11 +114,18 @@
 	struct pps_ktime timeout;
 };
 
+struct pps_bind_args {
+	int tsformat;	/* format of time stamps */
+	int edge;	/* selected event type */
+	int consumer;	/* selected kernel consumer */
+};
+
 #include <linux/ioctl.h>
 
 #define PPS_GETPARAMS		_IOR('p', 0xa1, struct pps_kparams *)
 #define PPS_SETPARAMS		_IOW('p', 0xa2, struct pps_kparams *)
 #define PPS_GETCAP		_IOR('p', 0xa3, int *)
 #define PPS_FETCH		_IOWR('p', 0xa4, struct pps_fdata *)
+#define PPS_KC_BIND		_IOW('p', 0xa5, struct pps_bind_args *)
 
 #endif /* _PPS_H_ */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index e0a193f..9404854 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -18,6 +18,9 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#ifndef LINUX_PPS_KERNEL_H
+#define LINUX_PPS_KERNEL_H
+
 #include <linux/pps.h>
 
 #include <linux/cdev.h>
@@ -28,18 +31,28 @@
  * Global defines
  */
 
+struct pps_device;
+
 /* The specific PPS source info */
 struct pps_source_info {
 	char name[PPS_MAX_NAME_LEN];		/* simbolic name */
 	char path[PPS_MAX_NAME_LEN];		/* path of connected device */
 	int mode;				/* PPS's allowed mode */
 
-	void (*echo)(int source, int event, void *data); /* PPS echo function */
+	void (*echo)(struct pps_device *pps,
+			int event, void *data);	/* PPS echo function */
 
 	struct module *owner;
 	struct device *dev;
 };
 
+struct pps_event_time {
+#ifdef CONFIG_NTP_PPS
+	struct timespec ts_raw;
+#endif /* CONFIG_NTP_PPS */
+	struct timespec ts_real;
+};
+
 /* The main struct */
 struct pps_device {
 	struct pps_source_info info;		/* PSS source info */
@@ -52,38 +65,56 @@
 	struct pps_ktime clear_tu;
 	int current_mode;			/* PPS mode at event time */
 
-	int go;					/* PPS event is arrived? */
+	unsigned int last_ev;			/* last PPS event id */
 	wait_queue_head_t queue;		/* PPS event queue */
 
 	unsigned int id;			/* PPS source unique ID */
 	struct cdev cdev;
 	struct device *dev;
-	int devno;
 	struct fasync_struct *async_queue;	/* fasync method */
 	spinlock_t lock;
-
-	atomic_t usage;				/* usage count */
 };
 
 /*
  * Global variables
  */
 
-extern spinlock_t pps_idr_lock;
-extern struct idr pps_idr;
-extern struct timespec pps_irq_ts[];
-
 extern struct device_attribute pps_attrs[];
 
 /*
  * Exported functions
  */
 
-struct pps_device *pps_get_source(int source);
-extern void pps_put_source(struct pps_device *pps);
-extern int pps_register_source(struct pps_source_info *info,
-				int default_params);
-extern void pps_unregister_source(int source);
+extern struct pps_device *pps_register_source(
+		struct pps_source_info *info, int default_params);
+extern void pps_unregister_source(struct pps_device *pps);
 extern int pps_register_cdev(struct pps_device *pps);
 extern void pps_unregister_cdev(struct pps_device *pps);
-extern void pps_event(int source, struct pps_ktime *ts, int event, void *data);
+extern void pps_event(struct pps_device *pps,
+		struct pps_event_time *ts, int event, void *data);
+
+static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
+		struct timespec ts)
+{
+	kt->sec = ts.tv_sec;
+	kt->nsec = ts.tv_nsec;
+}
+
+#ifdef CONFIG_NTP_PPS
+
+static inline void pps_get_ts(struct pps_event_time *ts)
+{
+	getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real);
+}
+
+#else /* CONFIG_NTP_PPS */
+
+static inline void pps_get_ts(struct pps_event_time *ts)
+{
+	getnstimeofday(&ts->ts_real);
+}
+
+#endif /* CONFIG_NTP_PPS */
+
+#endif /* LINUX_PPS_KERNEL_H */
+
diff --git a/include/linux/printk.h b/include/linux/printk.h
index b772ca5..ee048e7 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -4,14 +4,14 @@
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
 
-#define	KERN_EMERG	"<0>"	/* system is unusable			*/
-#define	KERN_ALERT	"<1>"	/* action must be taken immediately	*/
-#define	KERN_CRIT	"<2>"	/* critical conditions			*/
-#define	KERN_ERR	"<3>"	/* error conditions			*/
-#define	KERN_WARNING	"<4>"	/* warning conditions			*/
-#define	KERN_NOTICE	"<5>"	/* normal but significant condition	*/
-#define	KERN_INFO	"<6>"	/* informational			*/
-#define	KERN_DEBUG	"<7>"	/* debug-level messages			*/
+#define KERN_EMERG	"<0>"	/* system is unusable			*/
+#define KERN_ALERT	"<1>"	/* action must be taken immediately	*/
+#define KERN_CRIT	"<2>"	/* critical conditions			*/
+#define KERN_ERR	"<3>"	/* error conditions			*/
+#define KERN_WARNING	"<4>"	/* warning conditions			*/
+#define KERN_NOTICE	"<5>"	/* normal but significant condition	*/
+#define KERN_INFO	"<6>"	/* informational			*/
+#define KERN_DEBUG	"<7>"	/* debug-level messages			*/
 
 /* Use the default kernel loglevel */
 #define KERN_DEFAULT	"<d>"
@@ -20,7 +20,7 @@
  * line that had no enclosing \n). Only to be used by core/arch code
  * during early bootup (a continued line is not SMP-safe otherwise).
  */
-#define	KERN_CONT	"<c>"
+#define KERN_CONT	"<c>"
 
 extern int console_printk[];
 
@@ -29,6 +29,17 @@
 #define minimum_console_loglevel (console_printk[2])
 #define default_console_loglevel (console_printk[3])
 
+static inline void console_silent(void)
+{
+	console_loglevel = 0;
+}
+
+static inline void console_verbose(void)
+{
+	if (console_loglevel)
+		console_loglevel = 15;
+}
+
 struct va_format {
 	const char *fmt;
 	va_list *va;
@@ -65,11 +76,27 @@
  */
 #define HW_ERR		"[Hardware Error]: "
 
+/*
+ * Dummy printk for disabled debugging statements to use whilst maintaining
+ * gcc's format and side-effect checking.
+ */
+static inline __attribute__ ((format (printf, 1, 2)))
+int no_printk(const char *fmt, ...)
+{
+	return 0;
+}
+
+extern asmlinkage __attribute__ ((format (printf, 1, 2)))
+void early_printk(const char *fmt, ...);
+
+extern int printk_needs_cpu(int cpu);
+extern void printk_tick(void);
+
 #ifdef CONFIG_PRINTK
-asmlinkage int vprintk(const char *fmt, va_list args)
-	__attribute__ ((format (printf, 1, 0)));
-asmlinkage int printk(const char * fmt, ...)
-	__attribute__ ((format (printf, 1, 2))) __cold;
+asmlinkage __attribute__ ((format (printf, 1, 0)))
+int vprintk(const char *fmt, va_list args);
+asmlinkage __attribute__ ((format (printf, 1, 2))) __cold
+int printk(const char *fmt, ...);
 
 /*
  * Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -83,99 +110,56 @@
 
 extern int printk_delay_msec;
 extern int dmesg_restrict;
-
-/*
- * Print a one-time message (analogous to WARN_ONCE() et al):
- */
-#define printk_once(x...) ({			\
-	static bool __print_once;		\
-						\
-	if (!__print_once) {			\
-		__print_once = true;		\
-		printk(x);			\
-	}					\
-})
+extern int kptr_restrict;
 
 void log_buf_kexec_setup(void);
 #else
-static inline int vprintk(const char *s, va_list args)
-	__attribute__ ((format (printf, 1, 0)));
-static inline int vprintk(const char *s, va_list args) { return 0; }
-static inline int printk(const char *s, ...)
-	__attribute__ ((format (printf, 1, 2)));
-static inline int __cold printk(const char *s, ...) { return 0; }
-static inline int printk_ratelimit(void) { return 0; }
-static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
-					  unsigned int interval_msec)	\
-		{ return false; }
-
-/* No effect, but we still get type checking even in the !PRINTK case: */
-#define printk_once(x...) printk(x)
+static inline __attribute__ ((format (printf, 1, 0)))
+int vprintk(const char *s, va_list args)
+{
+	return 0;
+}
+static inline __attribute__ ((format (printf, 1, 2))) __cold
+int printk(const char *s, ...)
+{
+	return 0;
+}
+static inline int printk_ratelimit(void)
+{
+	return 0;
+}
+static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+					  unsigned int interval_msec)
+{
+	return false;
+}
 
 static inline void log_buf_kexec_setup(void)
 {
 }
 #endif
 
-/*
- * Dummy printk for disabled debugging statements to use whilst maintaining
- * gcc's format and side-effect checking.
- */
-static inline __attribute__ ((format (printf, 1, 2)))
-int no_printk(const char *s, ...) { return 0; }
-
-extern int printk_needs_cpu(int cpu);
-extern void printk_tick(void);
-
-extern void asmlinkage __attribute__((format(printf, 1, 2)))
-	early_printk(const char *fmt, ...);
-
-static inline void console_silent(void)
-{
-	console_loglevel = 0;
-}
-
-static inline void console_verbose(void)
-{
-	if (console_loglevel)
-		console_loglevel = 15;
-}
-
 extern void dump_stack(void) __cold;
 
-enum {
-	DUMP_PREFIX_NONE,
-	DUMP_PREFIX_ADDRESS,
-	DUMP_PREFIX_OFFSET
-};
-extern void hex_dump_to_buffer(const void *buf, size_t len,
-				int rowsize, int groupsize,
-				char *linebuf, size_t linebuflen, bool ascii);
-extern void print_hex_dump(const char *level, const char *prefix_str,
-				int prefix_type, int rowsize, int groupsize,
-				const void *buf, size_t len, bool ascii);
-extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
-			const void *buf, size_t len);
-
 #ifndef pr_fmt
 #define pr_fmt(fmt) fmt
 #endif
 
 #define pr_emerg(fmt, ...) \
-        printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_alert(fmt, ...) \
-        printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_crit(fmt, ...) \
-        printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_err(fmt, ...) \
-        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warning(fmt, ...) \
-        printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warn pr_warning
 #define pr_notice(fmt, ...) \
-        printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_info(fmt, ...) \
-        printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_cont(fmt, ...) \
 	printk(KERN_CONT fmt, ##__VA_ARGS__)
 
@@ -185,7 +169,7 @@
 	printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #else
 #define pr_devel(fmt, ...) \
-	({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #endif
 
 /* If you are writing a driver, please use dev_dbg instead */
@@ -198,7 +182,51 @@
 	dynamic_pr_debug(fmt, ##__VA_ARGS__)
 #else
 #define pr_debug(fmt, ...) \
-	({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+/*
+ * Print a one-time message (analogous to WARN_ONCE() et al):
+ */
+
+#ifdef CONFIG_PRINTK
+#define printk_once(fmt, ...)			\
+({						\
+	static bool __print_once;		\
+						\
+	if (!__print_once) {			\
+		__print_once = true;		\
+		printk(fmt, ##__VA_ARGS__);	\
+	}					\
+})
+#else
+#define printk_once(fmt, ...)			\
+	no_printk(fmt, ##__VA_ARGS__)
+#endif
+
+#define pr_emerg_once(fmt, ...)					\
+	printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_once(fmt, ...)					\
+	printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_once(fmt, ...)					\
+	printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_once(fmt, ...)					\
+	printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn_once(fmt, ...)					\
+	printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_once(fmt, ...)				\
+	printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_once(fmt, ...)					\
+	printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_cont_once(fmt, ...)					\
+	printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__)
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(DEBUG)
+#define pr_debug_once(fmt, ...)					\
+	printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_once(fmt, ...)					\
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #endif
 
 /*
@@ -206,7 +234,8 @@
  * no local ratelimit_state used in the !PRINTK case
  */
 #ifdef CONFIG_PRINTK
-#define printk_ratelimited(fmt, ...)  ({				\
+#define printk_ratelimited(fmt, ...)					\
+({									\
 	static DEFINE_RATELIMIT_STATE(_rs,				\
 				      DEFAULT_RATELIMIT_INTERVAL,	\
 				      DEFAULT_RATELIMIT_BURST);		\
@@ -215,34 +244,59 @@
 		printk(fmt, ##__VA_ARGS__);				\
 })
 #else
-/* No effect, but we still get type checking even in the !PRINTK case: */
-#define printk_ratelimited printk
+#define printk_ratelimited(fmt, ...)					\
+	no_printk(fmt, ##__VA_ARGS__)
 #endif
 
-#define pr_emerg_ratelimited(fmt, ...) \
+#define pr_emerg_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert_ratelimited(fmt, ...) \
+#define pr_alert_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit_ratelimited(fmt, ...) \
+#define pr_crit_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err_ratelimited(fmt, ...) \
+#define pr_err_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warning_ratelimited(fmt, ...) \
+#define pr_warn_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn_ratelimited pr_warning_ratelimited
-#define pr_notice_ratelimited(fmt, ...) \
+#define pr_notice_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info_ratelimited(fmt, ...) \
+#define pr_info_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 /* no pr_cont_ratelimited, don't do that... */
 /* If you are writing a driver, please use dev_dbg instead */
 #if defined(DEBUG)
-#define pr_debug_ratelimited(fmt, ...) \
+#define pr_debug_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #else
 #define pr_debug_ratelimited(fmt, ...) \
-	({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
-				     ##__VA_ARGS__); 0; })
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+enum {
+	DUMP_PREFIX_NONE,
+	DUMP_PREFIX_ADDRESS,
+	DUMP_PREFIX_OFFSET
+};
+extern void hex_dump_to_buffer(const void *buf, size_t len,
+			       int rowsize, int groupsize,
+			       char *linebuf, size_t linebuflen, bool ascii);
+#ifdef CONFIG_PRINTK
+extern void print_hex_dump(const char *level, const char *prefix_str,
+			   int prefix_type, int rowsize, int groupsize,
+			   const void *buf, size_t len, bool ascii);
+extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+				 const void *buf, size_t len);
+#else
+static inline void print_hex_dump(const char *level, const char *prefix_str,
+				  int prefix_type, int rowsize, int groupsize,
+				  const void *buf, size_t len, bool ascii)
+{
+}
+static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+					const void *buf, size_t len)
+{
+}
+
 #endif
 
 #endif
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 092a04f..a1147e5 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -102,11 +102,8 @@
 
 extern long arch_ptrace(struct task_struct *child, long request,
 			unsigned long addr, unsigned long data);
-extern int ptrace_traceme(void);
 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
-extern int ptrace_attach(struct task_struct *tsk);
-extern int ptrace_detach(struct task_struct *, unsigned int);
 extern void ptrace_disable(struct task_struct *);
 extern int ptrace_check_attach(struct task_struct *task, int kill);
 extern int ptrace_request(struct task_struct *child, long request,
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 94c1f03..9a85412 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -322,9 +322,12 @@
 	qsize_t *(*get_reserved_space) (struct inode *);
 };
 
+struct path;
+
 /* Operations handling requests from userspace */
 struct quotactl_ops {
-	int (*quota_on)(struct super_block *, int, int, char *);
+	int (*quota_on)(struct super_block *, int, int, struct path *);
+	int (*quota_on_meta)(struct super_block *, int, int);
 	int (*quota_off)(struct super_block *, int);
 	int (*quota_sync)(struct super_block *, int, int);
 	int (*get_info)(struct super_block *, int, struct if_dqinfo *);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index d1a9193..eb354f6 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -31,8 +31,9 @@
 #define quota_error(sb, fmt, args...) \
 	__quota_error((sb), __func__, fmt , ## args)
 
-extern void __quota_error(struct super_block *sb, const char *func,
-			 const char *fmt, ...);
+extern __attribute__((format (printf, 3, 4)))
+void __quota_error(struct super_block *sb, const char *func,
+		   const char *fmt, ...);
 
 /*
  * declaration of quota_function calls in kernel.
@@ -75,11 +76,9 @@
 
 int dquot_file_open(struct inode *inode, struct file *file);
 
-int dquot_quota_on(struct super_block *sb, int type, int format_id,
-	char *path);
 int dquot_enable(struct inode *inode, int type, int format_id,
 	unsigned int flags);
-int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
+int dquot_quota_on(struct super_block *sb, int type, int format_id,
  	struct path *path);
 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
  	int format_id, int type);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index ab2baa5..23241c2 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -146,6 +146,22 @@
 }
 
 /**
+ * radix_tree_deref_slot_protected	- dereference a slot without RCU lock but with tree lock held
+ * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
+ * Returns:	item that was stored in that slot with any direct pointer flag
+ *		removed.
+ *
+ * Similar to radix_tree_deref_slot but only used during migration when a pages
+ * mapping is being moved. The caller does not hold the RCU read lock but it
+ * must hold the tree lock to prevent parallel updates.
+ */
+static inline void *radix_tree_deref_slot_protected(void **pslot,
+							spinlock_t *treelock)
+{
+	return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
+}
+
+/**
  * radix_tree_deref_retry	- check radix_tree_deref_slot
  * @arg:	pointer returned by radix_tree_deref_slot
  * Returns:	0 if retry is not required, otherwise retry is required
diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h
index b872b49..cf1244f 100644
--- a/include/linux/rculist_bl.h
+++ b/include/linux/rculist_bl.h
@@ -11,7 +11,8 @@
 					struct hlist_bl_node *n)
 {
 	LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
-	LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK));
+	LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
+							LIST_BL_LOCKMASK);
 	rcu_assign_pointer(h->first,
 		(struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
 }
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index f509877..6a210f1 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -11,15 +11,17 @@
 #define __LINUX_MFD_AB8500_REGULATOR_H
 
 /* AB8500 regulators */
-#define AB8500_LDO_AUX1         0
-#define AB8500_LDO_AUX2         1
-#define AB8500_LDO_AUX3         2
-#define AB8500_LDO_INTCORE      3
-#define AB8500_LDO_TVOUT        4
-#define AB8500_LDO_AUDIO	5
-#define AB8500_LDO_ANAMIC1      6
-#define AB8500_LDO_ANAMIC2      7
-#define AB8500_LDO_DMIC         8
-#define AB8500_LDO_ANA          9
-
+enum ab8500_regulator_id {
+	AB8500_LDO_AUX1,
+	AB8500_LDO_AUX2,
+	AB8500_LDO_AUX3,
+	AB8500_LDO_INTCORE,
+	AB8500_LDO_TVOUT,
+	AB8500_LDO_AUDIO,
+	AB8500_LDO_ANAMIC1,
+	AB8500_LDO_ANAMIC2,
+	AB8500_LDO_DMIC,
+	AB8500_LDO_ANA,
+	AB8500_NUM_REGULATORS,
+};
 #endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index ebd7472..7954f6b 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -154,6 +154,7 @@
 				   int min_uV, int max_uV);
 int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
 int regulator_get_voltage(struct regulator *regulator);
+int regulator_sync_voltage(struct regulator *regulator);
 int regulator_set_current_limit(struct regulator *regulator,
 			       int min_uA, int max_uA);
 int regulator_get_current_limit(struct regulator *regulator);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 592cd7c..b8ed16a 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -42,7 +42,11 @@
  *
  * @set_voltage: Set the voltage for the regulator within the range specified.
  *               The driver should select the voltage closest to min_uV.
+ * @set_voltage_sel: Set the voltage for the regulator using the specified
+ *                   selector.
  * @get_voltage: Return the currently configured voltage for the regulator.
+ * @get_voltage_sel: Return the currently configured voltage selector for the
+ *                   regulator.
  * @list_voltage: Return one of the supported voltages, in microvolts; zero
  *	if the selector indicates a voltage that is unusable on this system;
  *	or negative errno.  Selectors range from zero to one less than
@@ -79,8 +83,11 @@
 	int (*list_voltage) (struct regulator_dev *, unsigned selector);
 
 	/* get/set regulator voltage */
-	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV);
+	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
+			    unsigned *selector);
+	int (*set_voltage_sel) (struct regulator_dev *, unsigned selector);
 	int (*get_voltage) (struct regulator_dev *);
+	int (*get_voltage_sel) (struct regulator_dev *);
 
 	/* get/set regulator current  */
 	int (*set_current_limit) (struct regulator_dev *,
@@ -168,9 +175,9 @@
  */
 struct regulator_dev {
 	struct regulator_desc *desc;
-	int use_count;
-	int open_count;
 	int exclusive;
+	u32 use_count;
+	u32 open_count;
 
 	/* lists we belong to */
 	struct list_head list; /* list of all regulators */
@@ -188,10 +195,14 @@
 	struct regulator_dev *supply;	/* for tree */
 
 	void *reg_data;		/* regulator_dev data */
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs;
+#endif
 };
 
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
-	struct device *dev, struct regulator_init_data *init_data,
+	struct device *dev, const struct regulator_init_data *init_data,
 	void *driver_data);
 void regulator_unregister(struct regulator_dev *rdev);
 
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fcb9884..a5930cb 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -182,6 +182,26 @@
 	return ret;
 }
 
+/**
+ * res_counter_check_margin - check if the counter allows charging
+ * @cnt: the resource counter to check
+ * @bytes: the number of bytes to check the remaining space against
+ *
+ * Returns a boolean value on whether the counter can be charged
+ * @bytes or whether this would exceed the limit.
+ */
+static inline bool res_counter_check_margin(struct res_counter *cnt,
+					    unsigned long bytes)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cnt->lock, flags);
+	ret = cnt->limit - cnt->usage >= bytes;
+	spin_unlock_irqrestore(&cnt->lock, flags);
+	return ret;
+}
+
 static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
 {
 	bool ret;
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 0bed941..ff681eb 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -66,14 +66,62 @@
 
 #define RIO_PW_MSG_SIZE		64
 
+/*
+ * A component tag value (stored in the component tag CSR) is used as device's
+ * unique identifier assigned during enumeration. Besides being used for
+ * identifying switches (which do not have device ID register), it also is used
+ * by error management notification and therefore has to be assigned
+ * to endpoints as well.
+ */
+#define RIO_CTAG_RESRVD	0xfffe0000 /* Reserved */
+#define RIO_CTAG_UDEVID	0x0001ffff /* Unique device identifier */
+
 extern struct bus_type rio_bus_type;
 extern struct device rio_bus;
 extern struct list_head rio_devices;	/* list of all devices */
 
 struct rio_mport;
+struct rio_dev;
 union rio_pw_msg;
 
 /**
+ * struct rio_switch - RIO switch info
+ * @node: Node in global list of switches
+ * @switchid: Switch ID that is unique across a network
+ * @route_table: Copy of switch routing table
+ * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
+ * @add_entry: Callback for switch-specific route add function
+ * @get_entry: Callback for switch-specific route get function
+ * @clr_table: Callback for switch-specific clear route table function
+ * @set_domain: Callback for switch-specific domain setting function
+ * @get_domain: Callback for switch-specific domain get function
+ * @em_init: Callback for switch-specific error management init function
+ * @em_handle: Callback for switch-specific error management handler function
+ * @sw_sysfs: Callback that initializes switch-specific sysfs attributes
+ * @nextdev: Array of per-port pointers to the next attached device
+ */
+struct rio_switch {
+	struct list_head node;
+	u16 switchid;
+	u8 *route_table;
+	u32 port_ok;
+	int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table, u16 route_destid, u8 route_port);
+	int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table, u16 route_destid, u8 *route_port);
+	int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table);
+	int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			   u8 sw_domain);
+	int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			   u8 *sw_domain);
+	int (*em_init) (struct rio_dev *dev);
+	int (*em_handle) (struct rio_dev *dev, u8 swport);
+	int (*sw_sysfs) (struct rio_dev *dev, int create);
+	struct rio_dev *nextdev[0];
+};
+
+/**
  * struct rio_dev - RIO device info
  * @global_list: Node in list of all RIO devices
  * @net_list: Node in list of RIO devices in a network
@@ -93,13 +141,14 @@
  * @phys_efptr: RIO device extended features pointer
  * @em_efptr: RIO Error Management features pointer
  * @dma_mask: Mask of bits of RIO address this device implements
- * @rswitch: Pointer to &struct rio_switch if valid for this device
  * @driver: Driver claiming this device
  * @dev: Device model device
  * @riores: RIO resources this device owns
  * @pwcback: port-write callback function for this device
- * @destid: Network destination ID
+ * @destid: Network destination ID (or associated destid for switch)
+ * @hopcount: Hopcount to this device
  * @prev: Previous RIO device connected to the current one
+ * @rswitch: struct rio_switch (if valid for this device)
  */
 struct rio_dev {
 	struct list_head global_list;	/* node in list of all RIO devices */
@@ -120,18 +169,20 @@
 	u32 phys_efptr;
 	u32 em_efptr;
 	u64 dma_mask;
-	struct rio_switch *rswitch;	/* RIO switch info */
 	struct rio_driver *driver;	/* RIO driver claiming this device */
 	struct device dev;	/* LDM device structure */
 	struct resource riores[RIO_MAX_DEV_RESOURCES];
 	int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
 	u16 destid;
+	u8 hopcount;
 	struct rio_dev *prev;
+	struct rio_switch rswitch[0];	/* RIO switch info */
 };
 
 #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
 #define rio_dev_f(n) list_entry(n, struct rio_dev, net_list)
 #define	to_rio_dev(n) container_of(n, struct rio_dev, dev)
+#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0])
 
 /**
  * struct rio_msg - RIO message event
@@ -224,49 +275,6 @@
 #define RIO_SW_SYSFS_CREATE	1	/* Create switch attributes */
 #define RIO_SW_SYSFS_REMOVE	0	/* Remove switch attributes */
 
-/**
- * struct rio_switch - RIO switch info
- * @node: Node in global list of switches
- * @rdev: Associated RIO device structure
- * @switchid: Switch ID that is unique across a network
- * @hopcount: Hopcount to this switch
- * @destid: Associated destid in the path
- * @route_table: Copy of switch routing table
- * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
- * @add_entry: Callback for switch-specific route add function
- * @get_entry: Callback for switch-specific route get function
- * @clr_table: Callback for switch-specific clear route table function
- * @set_domain: Callback for switch-specific domain setting function
- * @get_domain: Callback for switch-specific domain get function
- * @em_init: Callback for switch-specific error management initialization function
- * @em_handle: Callback for switch-specific error management handler function
- * @sw_sysfs: Callback that initializes switch-specific sysfs attributes
- * @nextdev: Array of per-port pointers to the next attached device
- */
-struct rio_switch {
-	struct list_head node;
-	struct rio_dev *rdev;
-	u16 switchid;
-	u16 hopcount;
-	u16 destid;
-	u8 *route_table;
-	u32 port_ok;
-	int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
-			  u16 table, u16 route_destid, u8 route_port);
-	int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
-			  u16 table, u16 route_destid, u8 * route_port);
-	int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
-			  u16 table);
-	int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
-			   u8 sw_domain);
-	int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
-			   u8 *sw_domain);
-	int (*em_init) (struct rio_dev *dev);
-	int (*em_handle) (struct rio_dev *dev, u8 swport);
-	int (*sw_sysfs) (struct rio_dev *dev, int create);
-	struct rio_dev *nextdev[0];
-};
-
 /* Low-level architecture-dependent routines */
 
 /**
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index edc55da..e09e565 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -150,16 +150,8 @@
 static inline int rio_read_config_32(struct rio_dev *rdev, u32 offset,
 				     u32 * data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
-					offset, data);
+	return rio_mport_read_config_32(rdev->net->hport, rdev->destid,
+					rdev->hopcount, offset, data);
 };
 
 /**
@@ -174,16 +166,8 @@
 static inline int rio_write_config_32(struct rio_dev *rdev, u32 offset,
 				      u32 data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
-					 offset, data);
+	return rio_mport_write_config_32(rdev->net->hport, rdev->destid,
+					 rdev->hopcount, offset, data);
 };
 
 /**
@@ -198,16 +182,8 @@
 static inline int rio_read_config_16(struct rio_dev *rdev, u32 offset,
 				     u16 * data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_read_config_16(rdev->net->hport, destid, hopcount,
-					offset, data);
+	return rio_mport_read_config_16(rdev->net->hport, rdev->destid,
+					rdev->hopcount, offset, data);
 };
 
 /**
@@ -222,16 +198,8 @@
 static inline int rio_write_config_16(struct rio_dev *rdev, u32 offset,
 				      u16 data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_write_config_16(rdev->net->hport, destid, hopcount,
-					 offset, data);
+	return rio_mport_write_config_16(rdev->net->hport, rdev->destid,
+					 rdev->hopcount, offset, data);
 };
 
 /**
@@ -245,16 +213,8 @@
  */
 static inline int rio_read_config_8(struct rio_dev *rdev, u32 offset, u8 * data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_read_config_8(rdev->net->hport, destid, hopcount,
-				       offset, data);
+	return rio_mport_read_config_8(rdev->net->hport, rdev->destid,
+				       rdev->hopcount, offset, data);
 };
 
 /**
@@ -268,16 +228,8 @@
  */
 static inline int rio_write_config_8(struct rio_dev *rdev, u32 offset, u8 data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_write_config_8(rdev->net->hport, destid, hopcount,
-					offset, data);
+	return rio_mport_write_config_8(rdev->net->hport, rdev->destid,
+					rdev->hopcount, offset, data);
 };
 
 extern int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid,
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index ee7b6ad..7410d33 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -36,5 +36,7 @@
 #define RIO_DID_IDTCPS10Q		0x035e
 #define RIO_DID_IDTCPS1848		0x0374
 #define RIO_DID_IDTCPS1616		0x0379
+#define RIO_DID_IDTVPS1616		0x0377
+#define RIO_DID_IDTSPS1616		0x0378
 
 #endif				/* LINUX_RIO_IDS_H */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index d63dcba..9026b30 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -14,10 +14,12 @@
 #define LINUX_RIO_REGS_H
 
 /*
- * In RapidIO, each device has a 2MB configuration space that is
+ * In RapidIO, each device has a 16MB configuration space that is
  * accessed via maintenance transactions.  Portions of configuration
  * space are standardized and/or reserved.
  */
+#define RIO_MAINT_SPACE_SZ	0x1000000 /* 16MB of RapidIO mainenance space */
+
 #define RIO_DEV_ID_CAR		0x00	/* [I] Device Identity CAR */
 #define RIO_DEV_INFO_CAR	0x04	/* [I] Device Information CAR */
 #define RIO_ASM_ID_CAR		0x08	/* [I] Assembly Identity CAR */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bb83c0d..e9fd04c 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -198,6 +198,8 @@
 };
 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 
+bool is_vma_temporary_stack(struct vm_area_struct *vma);
+
 int try_to_unmap(struct page *, enum ttu_flags flags);
 int try_to_unmap_one(struct page *, struct vm_area_struct *,
 			unsigned long address, enum ttu_flags flags);
diff --git a/include/linux/romfs_fs.h b/include/linux/romfs_fs.h
index c490fbc..5f57f93 100644
--- a/include/linux/romfs_fs.h
+++ b/include/linux/romfs_fs.h
@@ -1,6 +1,9 @@
 #ifndef __LINUX_ROMFS_FS_H
 #define __LINUX_ROMFS_FS_H
 
+#include <linux/types.h>
+#include <linux/fs.h>
+
 /* The basic structures of the romfs filesystem */
 
 #define ROMBSIZE BLOCK_SIZE
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 14dbc83..89c3e51 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -107,12 +107,17 @@
 extern int rtc_valid_tm(struct rtc_time *tm);
 extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
 extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
+ktime_t rtc_tm_to_ktime(struct rtc_time tm);
+struct rtc_time rtc_ktime_to_tm(ktime_t kt);
+
 
 #include <linux/device.h>
 #include <linux/seq_file.h>
 #include <linux/cdev.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
+#include <linux/timerqueue.h>
+#include <linux/workqueue.h>
 
 extern struct class *rtc_class;
 
@@ -151,7 +156,19 @@
 };
 
 #define RTC_DEVICE_NAME_SIZE 20
-struct rtc_task;
+typedef struct rtc_task {
+	void (*func)(void *private_data);
+	void *private_data;
+} rtc_task_t;
+
+
+struct rtc_timer {
+	struct rtc_task	task;
+	struct timerqueue_node node;
+	ktime_t period;
+	int enabled;
+};
+
 
 /* flags */
 #define RTC_DEV_BUSY 0
@@ -179,6 +196,15 @@
 	spinlock_t irq_task_lock;
 	int irq_freq;
 	int max_user_freq;
+
+	struct timerqueue_head timerqueue;
+	struct rtc_timer aie_timer;
+	struct rtc_timer uie_rtctimer;
+	struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
+	int pie_enabled;
+	struct work_struct irqwork;
+
+
 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
 	struct work_struct uie_task;
 	struct timer_list uie_timer;
@@ -224,15 +250,21 @@
 extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
 						unsigned int enabled);
 
-typedef struct rtc_task {
-	void (*func)(void *private_data);
-	void *private_data;
-} rtc_task_t;
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode);
+void rtc_aie_update_irq(void *private);
+void rtc_uie_update_irq(void *private);
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
 
 int rtc_register(rtc_task_t *task);
 int rtc_unregister(rtc_task_t *task);
 int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
 
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+			ktime_t expires, ktime_t period);
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer);
+void rtc_timer_do_work(struct work_struct *work);
+
 static inline bool is_leap_year(unsigned int year)
 {
 	return (!(year % 4) && (year % 100)) || !(year % 400);
diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h
index dbce22f..fbe58b7 100644
--- a/include/linux/s3c_adc_battery.h
+++ b/include/linux/s3c_adc_battery.h
@@ -14,6 +14,7 @@
 	void (*disable_charger)(void);
 
 	int gpio_charge_finished;
+	int gpio_inverted;
 
 	const struct s3c_adc_bat_thresh *lut_noac;
 	unsigned int lut_noac_cnt;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index abc527a..777d8a5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -21,7 +21,8 @@
 #define CLONE_DETACHED		0x00400000	/* Unused, ignored */
 #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
 #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
-#define CLONE_STOPPED		0x02000000	/* Start in stopped state */
+/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
+   and is now available for re-use. */
 #define CLONE_NEWUTS		0x04000000	/* New utsname group? */
 #define CLONE_NEWIPC		0x08000000	/* New ipcs */
 #define CLONE_NEWUSER		0x10000000	/* New user namespace */
@@ -433,6 +434,7 @@
 #endif
 					/* leave room for more dump flags */
 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
+#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
 
 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 
@@ -633,6 +635,8 @@
 
 	int oom_adj;		/* OOM kill score adjustment (bit shift) */
 	int oom_score_adj;	/* OOM kill score adjustment */
+	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
+				 * Only settable by CAP_SYS_RESOURCE. */
 
 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
 					 * credential calculations
@@ -683,7 +687,7 @@
 	atomic_t fanotify_listeners;
 #endif
 #ifdef CONFIG_EPOLL
-	atomic_t epoll_watches;	/* The number of file descriptors currently watched */
+	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
 #endif
 #ifdef CONFIG_POSIX_MQUEUE
 	/* protected by mq_lock	*/
@@ -1740,7 +1744,7 @@
 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
-#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
+#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
 #define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
 
 /*
diff --git a/include/linux/security.h b/include/linux/security.h
index c642bb8..b2b7f97 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1662,7 +1662,7 @@
 		    const kernel_cap_t *effective,
 		    const kernel_cap_t *inheritable,
 		    const kernel_cap_t *permitted);
-int security_capable(int cap);
+int security_capable(const struct cred *cred, int cap);
 int security_real_capable(struct task_struct *tsk, int cap);
 int security_real_capable_noaudit(struct task_struct *tsk, int cap);
 int security_sysctl(struct ctl_table *table, int op);
@@ -1856,9 +1856,9 @@
 	return cap_capset(new, old, effective, inheritable, permitted);
 }
 
-static inline int security_capable(int cap)
+static inline int security_capable(const struct cred *cred, int cap)
 {
-	return cap_capable(current, current_cred(), cap, SECURITY_CAP_AUDIT);
+	return cap_capable(current, cred, cap, SECURITY_CAP_AUDIT);
 }
 
 static inline int security_real_capable(struct task_struct *tsk, int cap)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a23fa29..758c5b0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -212,6 +212,7 @@
 #include <linux/tty.h>
 #include <linux/mutex.h>
 #include <linux/sysrq.h>
+#include <linux/pps_kernel.h>
 
 struct uart_port;
 struct serial_struct;
@@ -528,10 +529,10 @@
 	struct uart_state *state = uport->state;
 	struct tty_port *port = &state->port;
 	struct tty_ldisc *ld = tty_ldisc_ref(port->tty);
-	struct timespec ts;
+	struct pps_event_time ts;
 
 	if (ld && ld->ops->dcd_change)
-		getnstimeofday(&ts);
+		pps_get_ts(&ts);
 
 	uport->icount.dcd++;
 #ifdef CONFIG_HARD_PPS
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index baed212..1630d9c 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -8,6 +8,23 @@
  * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts)
  */
 
+enum {
+	SCBRR_ALGO_1,		/* ((clk + 16 * bps) / (16 * bps) - 1) */
+	SCBRR_ALGO_2,		/* ((clk + 16 * bps) / (32 * bps) - 1) */
+	SCBRR_ALGO_3,		/* (((clk * 2) + 16 * bps) / (16 * bps) - 1) */
+	SCBRR_ALGO_4,		/* (((clk * 2) + 16 * bps) / (32 * bps) - 1) */
+	SCBRR_ALGO_5,		/* (((clk * 1000 / 32) / bps) - 1) */
+};
+
+#define SCSCR_TIE	(1 << 7)
+#define SCSCR_RIE	(1 << 6)
+#define SCSCR_TE	(1 << 5)
+#define SCSCR_RE	(1 << 4)
+#define SCSCR_REIE	(1 << 3)	/* not supported by all parts */
+#define SCSCR_TOIE	(1 << 2)	/* not supported by all parts */
+#define SCSCR_CKE1	(1 << 1)
+#define SCSCR_CKE0	(1 << 0)
+
 /* Offsets into the sci_port->irqs array */
 enum {
 	SCIx_ERI_IRQ,
@@ -29,7 +46,12 @@
 	unsigned int	type;			/* SCI / SCIF / IRDA */
 	upf_t		flags;			/* UPF_* flags */
 	char		*clk;			/* clock string */
+
+	unsigned int	scbrr_algo_id;		/* SCBRR calculation algo */
+	unsigned int	scscr;			/* SCSCR initialization */
+
 	struct device	*dma_dev;
+
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	unsigned int dma_slave_tx;
 	unsigned int dma_slave_rx;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 20ec0a64..bf221d6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -255,6 +255,11 @@
 typedef unsigned char *sk_buff_data_t;
 #endif
 
+#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
+    defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
+#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
+#endif
+
 /** 
  *	struct sk_buff - socket buffer
  *	@next: Next buffer in list
@@ -362,6 +367,8 @@
 	void			(*destructor)(struct sk_buff *skb);
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	struct nf_conntrack	*nfct;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	struct sk_buff		*nfct_reasm;
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
@@ -2057,6 +2064,8 @@
 	if (nfct)
 		atomic_inc(&nfct->use);
 }
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
 {
 	if (skb)
@@ -2085,6 +2094,8 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(skb->nfct);
 	skb->nfct = NULL;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	nf_conntrack_put_reasm(skb->nfct_reasm);
 	skb->nfct_reasm = NULL;
 #endif
@@ -2101,6 +2112,8 @@
 	dst->nfct = src->nfct;
 	nf_conntrack_get(src->nfct);
 	dst->nfctinfo = src->nfctinfo;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	dst->nfct_reasm = src->nfct_reasm;
 	nf_conntrack_get_reasm(src->nfct_reasm);
 #endif
@@ -2114,6 +2127,8 @@
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(dst->nfct);
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	nf_conntrack_put_reasm(dst->nfct_reasm);
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5f65f14..edbb1d0 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -191,7 +191,8 @@
 #define AF_PHONET	35	/* Phonet sockets		*/
 #define AF_IEEE802154	36	/* IEEE802154 sockets		*/
 #define AF_CAIF		37	/* CAIF sockets			*/
-#define AF_MAX		38	/* For now.. */
+#define AF_ALG		38	/* Algorithm sockets		*/
+#define AF_MAX		39	/* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
@@ -232,6 +233,7 @@
 #define PF_PHONET	AF_PHONET
 #define PF_IEEE802154	AF_IEEE802154
 #define PF_CAIF		AF_CAIF
+#define PF_ALG		AF_ALG
 #define PF_MAX		AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
@@ -305,6 +307,7 @@
 #define SOL_RDS		276
 #define SOL_IUCV	277
 #define SOL_CAIF	278
+#define SOL_ALG		279
 
 /* IPX options */
 #define IPX_TYPE	1
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index b202475..8521067 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -110,9 +110,9 @@
 	__be32 *		(*crmarshal)(struct rpc_task *, __be32 *);
 	int			(*crrefresh)(struct rpc_task *);
 	__be32 *		(*crvalidate)(struct rpc_task *, __be32 *);
-	int			(*crwrap_req)(struct rpc_task *, kxdrproc_t,
+	int			(*crwrap_req)(struct rpc_task *, kxdreproc_t,
 						void *, __be32 *, void *);
-	int			(*crunwrap_resp)(struct rpc_task *, kxdrproc_t,
+	int			(*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
 						void *, __be32 *, void *);
 };
 
@@ -139,8 +139,8 @@
 void			put_rpccred(struct rpc_cred *);
 __be32 *		rpcauth_marshcred(struct rpc_task *, __be32 *);
 __be32 *		rpcauth_checkverf(struct rpc_task *, __be32 *);
-int			rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj);
-int			rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, __be32 *data, void *obj);
+int			rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
+int			rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
 int			rpcauth_refreshcred(struct rpc_task *);
 void			rpcauth_invalcred(struct rpc_task *);
 int			rpcauth_uptodatecred(struct rpc_task *);
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 7c91260..0828842 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -43,7 +43,7 @@
  */
 static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
 {
-	if (rqstp->rq_server->bc_xprt)
+	if (rqstp->rq_server->sv_bc_xprt)
 		return 1;
 	return 0;
 }
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 78aa104..7898ea1 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -256,10 +256,13 @@
 	return rv - boot.tv_sec;
 }
 
+#ifdef CONFIG_NFSD_DEPRECATED
 static inline void sunrpc_invalidate(struct cache_head *h,
 				     struct cache_detail *detail)
 {
 	h->expiry_time = seconds_since_boot() - 1;
 	detail->nextcheck = seconds_since_boot();
 }
+#endif /* CONFIG_NFSD_DEPRECATED */
+
 #endif /*  _LINUX_SUNRPC_CACHE_H_ */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index a5a55f2..ef9476a 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -89,8 +89,8 @@
  */
 struct rpc_procinfo {
 	u32			p_proc;		/* RPC procedure number */
-	kxdrproc_t		p_encode;	/* XDR encode function */
-	kxdrproc_t		p_decode;	/* XDR decode function */
+	kxdreproc_t		p_encode;	/* XDR encode function */
+	kxdrdproc_t		p_decode;	/* XDR decode function */
 	unsigned int		p_arglen;	/* argument hdr length (u32) */
 	unsigned int		p_replen;	/* reply hdr length (u32) */
 	unsigned int		p_count;	/* call count */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 88513fd..d81db80 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -212,6 +212,7 @@
 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
 				const struct rpc_call_ops *ops);
 void		rpc_put_task(struct rpc_task *);
+void		rpc_put_task_async(struct rpc_task *);
 void		rpc_exit_task(struct rpc_task *);
 void		rpc_exit(struct rpc_task *, int);
 void		rpc_release_calldata(const struct rpc_call_ops *, void *);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 5a3085b..ea29330 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -99,7 +99,7 @@
 	spinlock_t		sv_cb_lock;	/* protects the svc_cb_list */
 	wait_queue_head_t	sv_cb_waitq;	/* sleep here if there are no
 						 * entries in the svc_cb_list */
-	struct svc_xprt		*bc_xprt;
+	struct svc_xprt		*sv_bc_xprt;	/* callback on fore channel */
 #endif /* CONFIG_NFS_V4_1 */
 };
 
@@ -269,6 +269,7 @@
 	struct cache_req	rq_chandle;	/* handle passed to caches for 
 						 * request delaying 
 						 */
+	bool			rq_dropme;
 	/* Catering to nfsd */
 	struct auth_domain *	rq_client;	/* RPC peer info */
 	struct auth_domain *	rq_gssclient;	/* "gss/"-style peer info */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index aea0d438..7ad9751 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -63,7 +63,6 @@
 #define XPT_LISTENER	11		/* listening endpoint */
 #define XPT_CACHE_AUTH	12		/* cache auth info */
 
-	struct svc_pool		*xpt_pool;	/* current pool iff queued */
 	struct svc_serv		*xpt_server;	/* service for transport */
 	atomic_t    	    	xpt_reserved;	/* space on outq that is rsvd */
 	struct mutex		xpt_mutex;	/* to serialize sending data */
@@ -80,6 +79,7 @@
 	struct list_head	xpt_users;	/* callbacks on free */
 
 	struct net		*xpt_net;
+	struct rpc_xprt		*xpt_bc_xprt;	/* NFSv4.1 backchannel */
 };
 
 static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 1b353a7..04dba23 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,7 +28,6 @@
 	/* private TCP part */
 	u32			sk_reclen;	/* length of record */
 	u32			sk_tcplen;	/* current read length */
-	struct rpc_xprt		*sk_bc_xprt;	/* NFSv4.1 backchannel xprt */
 };
 
 /*
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 498ab93..fc84b7a 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -33,8 +33,8 @@
 };
 
 /*
- * This is the generic XDR function. rqstp is either a rpc_rqst (client
- * side) or svc_rqst pointer (server side).
+ * This is the legacy generic XDR function. rqstp is either a rpc_rqst
+ * (client side) or svc_rqst pointer (server side).
  * Encode functions always assume there's enough room in the buffer.
  */
 typedef int	(*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
@@ -201,14 +201,22 @@
 
 	__be32 *end;		/* end of available buffer space */
 	struct kvec *iov;	/* pointer to the current kvec */
+	struct kvec scratch;	/* Scratch buffer */
+	struct page **page_ptr;	/* pointer to the current page */
 };
 
+/*
+ * These are the xdr_stream style generic XDR encode and decode functions.
+ */
+typedef void	(*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+typedef int	(*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+
 extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
 extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
 extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
 		unsigned int base, unsigned int len);
 extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
-extern __be32 *xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes);
+extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
 extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
 extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
 extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 89d10d2..bef0f53 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -321,6 +321,7 @@
 #define XPRT_CLOSING		(6)
 #define XPRT_CONNECTION_ABORT	(7)
 #define XPRT_CONNECTION_CLOSE	(8)
+#define XPRT_INITIALIZED	(9)
 
 static inline void xprt_set_connected(struct rpc_xprt *xprt)
 {
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 144b34b..5a89e36 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -122,7 +122,7 @@
  * suspend_set_ops - set platform dependent suspend operations
  * @ops: The new suspend operations to set.
  */
-extern void suspend_set_ops(struct platform_suspend_ops *ops);
+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
 extern int suspend_valid_only_mem(suspend_state_t state);
 
 /**
@@ -147,7 +147,7 @@
 #else /* !CONFIG_SUSPEND */
 #define suspend_valid_only_mem	NULL
 
-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
 #endif /* !CONFIG_SUSPEND */
 
@@ -245,7 +245,7 @@
 extern void swsusp_unset_page_free(struct page *);
 extern unsigned long get_safe_page(gfp_t gfp_mask);
 
-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
 extern int hibernate(void);
 extern bool system_entering_hibernation(void);
 #else /* CONFIG_HIBERNATION */
@@ -253,28 +253,11 @@
 static inline void swsusp_set_page_free(struct page *p) {}
 static inline void swsusp_unset_page_free(struct page *p) {}
 
-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
 static inline int hibernate(void) { return -ENOSYS; }
 static inline bool system_entering_hibernation(void) { return false; }
 #endif /* CONFIG_HIBERNATION */
 
-#ifdef CONFIG_SUSPEND_NVS
-extern int suspend_nvs_register(unsigned long start, unsigned long size);
-extern int suspend_nvs_alloc(void);
-extern void suspend_nvs_free(void);
-extern void suspend_nvs_save(void);
-extern void suspend_nvs_restore(void);
-#else /* CONFIG_SUSPEND_NVS */
-static inline int suspend_nvs_register(unsigned long a, unsigned long b)
-{
-	return 0;
-}
-static inline int suspend_nvs_alloc(void) { return 0; }
-static inline void suspend_nvs_free(void) {}
-static inline void suspend_nvs_save(void) {}
-static inline void suspend_nvs_restore(void) {}
-#endif /* CONFIG_SUSPEND_NVS */
-
 #ifdef CONFIG_PM_SLEEP
 void save_processor_state(void);
 void restore_processor_state(void);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index eba53e7..4d55932 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -208,6 +208,8 @@
 /* linux/mm/swap.c */
 extern void __lru_cache_add(struct page *, enum lru_list lru);
 extern void lru_cache_add_lru(struct page *, enum lru_list lru);
+extern void lru_add_page_tail(struct zone* zone,
+			      struct page *page, struct page *page_tail);
 extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void lru_add_drain(void);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 18cd068..98664db 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -125,39 +125,37 @@
 extern struct trace_event_functions exit_syscall_print_funcs;
 
 #define SYSCALL_TRACE_ENTER_EVENT(sname)				\
-	static struct syscall_metadata					\
-	__attribute__((__aligned__(4))) __syscall_meta_##sname;		\
+	static struct syscall_metadata __syscall_meta_##sname;		\
 	static struct ftrace_event_call __used				\
-	  __attribute__((__aligned__(4)))				\
-	  __attribute__((section("_ftrace_events")))			\
 	  event_enter_##sname = {					\
 		.name                   = "sys_enter"#sname,		\
 		.class			= &event_class_syscall_enter,	\
 		.event.funcs            = &enter_syscall_print_funcs,	\
 		.data			= (void *)&__syscall_meta_##sname,\
 	};								\
+	static struct ftrace_event_call __used				\
+	  __attribute__((section("_ftrace_events")))			\
+	 *__event_enter_##sname = &event_enter_##sname;			\
 	__TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY)
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)					\
-	static struct syscall_metadata					\
-	__attribute__((__aligned__(4))) __syscall_meta_##sname;		\
+	static struct syscall_metadata __syscall_meta_##sname;		\
 	static struct ftrace_event_call __used				\
-	  __attribute__((__aligned__(4)))				\
-	  __attribute__((section("_ftrace_events")))			\
 	  event_exit_##sname = {					\
 		.name                   = "sys_exit"#sname,		\
 		.class			= &event_class_syscall_exit,	\
 		.event.funcs		= &exit_syscall_print_funcs,	\
 		.data			= (void *)&__syscall_meta_##sname,\
 	};								\
+	static struct ftrace_event_call __used				\
+	  __attribute__((section("_ftrace_events")))			\
+	*__event_exit_##sname = &event_exit_##sname;			\
 	__TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY)
 
 #define SYSCALL_METADATA(sname, nb)				\
 	SYSCALL_TRACE_ENTER_EVENT(sname);			\
 	SYSCALL_TRACE_EXIT_EVENT(sname);			\
 	static struct syscall_metadata __used			\
-	  __attribute__((__aligned__(4)))			\
-	  __attribute__((section("__syscalls_metadata")))	\
 	  __syscall_meta_##sname = {				\
 		.name 		= "sys"#sname,			\
 		.nb_args 	= nb,				\
@@ -166,14 +164,15 @@
 		.enter_event	= &event_enter_##sname,		\
 		.exit_event	= &event_exit_##sname,		\
 		.enter_fields	= LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
-	};
+	};							\
+	static struct syscall_metadata __used			\
+	  __attribute__((section("__syscalls_metadata")))	\
+	 *__p_syscall_meta_##sname = &__syscall_meta_##sname;
 
 #define SYSCALL_DEFINE0(sname)					\
 	SYSCALL_TRACE_ENTER_EVENT(_##sname);			\
 	SYSCALL_TRACE_EXIT_EVENT(_##sname);			\
 	static struct syscall_metadata __used			\
-	  __attribute__((__aligned__(4)))			\
-	  __attribute__((section("__syscalls_metadata")))	\
 	  __syscall_meta__##sname = {				\
 		.name 		= "sys_"#sname,			\
 		.nb_args 	= 0,				\
@@ -181,6 +180,9 @@
 		.exit_event	= &event_exit__##sname,		\
 		.enter_fields	= LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
 	};							\
+	static struct syscall_metadata __used			\
+	  __attribute__((section("__syscalls_metadata")))	\
+	 *__p_syscall_meta_##sname = &__syscall_meta__##sname;	\
 	asmlinkage long sys_##sname(void)
 #else
 #define SYSCALL_DEFINE0(name)	   asmlinkage long sys_##name(void)
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 7bb5cb6..11684d9 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -930,6 +930,7 @@
 
 #ifdef __KERNEL__
 #include <linux/list.h>
+#include <linux/rcupdate.h>
 
 /* For the /proc/sys support */
 struct ctl_table;
@@ -1037,10 +1038,15 @@
    struct ctl_table trees. */
 struct ctl_table_header
 {
-	struct ctl_table *ctl_table;
-	struct list_head ctl_entry;
-	int used;
-	int count;
+	union {
+		struct {
+			struct ctl_table *ctl_table;
+			struct list_head ctl_entry;
+			int used;
+			int count;
+		};
+		struct rcu_head rcu;
+	};
 	struct completion *unregistering;
 	struct ctl_table *ctl_table_arg;
 	struct ctl_table_root *root;
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 387fa7d..7faf933 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -17,6 +17,9 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 
+/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
+#define SYSRQ_DEFAULT_ENABLE	1
+
 /* Possible values of bitmask for enabling sysrq functions */
 /* 0x0001 is reserved for enable everything */
 #define SYSRQ_ENABLE_LOG	0x0002
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 1de8b9eb..d3ec89f 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -77,7 +77,7 @@
 	char type[THERMAL_NAME_LENGTH];
 	struct device device;
 	void *devdata;
-	struct thermal_cooling_device_ops *ops;
+	const struct thermal_cooling_device_ops *ops;
 	struct list_head node;
 };
 
@@ -114,7 +114,7 @@
 	int last_temperature;
 	bool passive;
 	unsigned int forced_passive;
-	struct thermal_zone_device_ops *ops;
+	const struct thermal_zone_device_ops *ops;
 	struct list_head cooling_devices;
 	struct idr idr;
 	struct mutex lock;	/* protect cooling devices list */
@@ -127,13 +127,41 @@
 	struct thermal_hwmon_attr temp_crit;	/* hwmon sys attr */
 #endif
 };
+/* Adding event notification support elements */
+#define THERMAL_GENL_FAMILY_NAME                "thermal_event"
+#define THERMAL_GENL_VERSION                    0x01
+#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_group"
+
+enum events {
+	THERMAL_AUX0,
+	THERMAL_AUX1,
+	THERMAL_CRITICAL,
+	THERMAL_DEV_FAULT,
+};
+
+struct thermal_genl_event {
+	u32 orig;
+	enum events event;
+};
+/* attributes of thermal_genl_family */
+enum {
+	THERMAL_GENL_ATTR_UNSPEC,
+	THERMAL_GENL_ATTR_EVENT,
+	__THERMAL_GENL_ATTR_MAX,
+};
+#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
+
+/* commands supported by the thermal_genl_family */
+enum {
+	THERMAL_GENL_CMD_UNSPEC,
+	THERMAL_GENL_CMD_EVENT,
+	__THERMAL_GENL_CMD_MAX,
+};
+#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
 
 struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
-							 struct
-							 thermal_zone_device_ops
-							 *, int tc1, int tc2,
-							 int passive_freq,
-							 int polling_freq);
+		const struct thermal_zone_device_ops *, int tc1, int tc2,
+		int passive_freq, int polling_freq);
 void thermal_zone_device_unregister(struct thermal_zone_device *);
 
 int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
@@ -142,9 +170,16 @@
 				       struct thermal_cooling_device *);
 void thermal_zone_device_update(struct thermal_zone_device *);
 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
-							       struct
-							       thermal_cooling_device_ops
-							       *);
+		const struct thermal_cooling_device_ops *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
 
+#ifdef CONFIG_NET
+extern int generate_netlink_event(u32 orig, enum events event);
+#else
+static inline int generate_netlink_event(u32 orig, enum events event)
+{
+	return 0;
+}
+#endif
+
 #endif /* __THERMAL_H__ */
diff --git a/include/linux/time.h b/include/linux/time.h
index 9f15ac7..1e6d3b5 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -158,6 +158,8 @@
 extern int do_getitimer(int which, struct itimerval *value);
 extern void getnstimeofday(struct timespec *tv);
 extern void getrawmonotonic(struct timespec *ts);
+extern void getnstime_raw_and_real(struct timespec *ts_raw,
+		struct timespec *ts_real);
 extern void getboottime(struct timespec *ts);
 extern void monotonic_to_bootbased(struct timespec *ts);
 
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 32d852f..d23999f 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -268,6 +268,7 @@
 extern void second_overflow(void);
 extern void update_ntp_one_tick(void);
 extern int do_adjtimex(struct timex *);
+extern void hardpps(const struct timespec *, const struct timespec *);
 
 int read_current_timer(unsigned long *timer_val);
 
diff --git a/include/linux/toshiba.h b/include/linux/toshiba.h
index 6a7c4ed..772dedb 100644
--- a/include/linux/toshiba.h
+++ b/include/linux/toshiba.h
@@ -33,6 +33,8 @@
 	unsigned int edi __attribute__ ((packed));
 } SMMRegisters;
 
+#ifdef __KERNEL__
 int tosh_smm(SMMRegisters *regs);
+#endif /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index d3e4f87..97c84a5 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -32,13 +32,8 @@
 	int state;			/* State. */
 	void (*regfunc)(void);
 	void (*unregfunc)(void);
-	struct tracepoint_func *funcs;
-} __attribute__((aligned(32)));		/*
-					 * Aligned on 32 bytes because it is
-					 * globally visible and gcc happily
-					 * align these on the structure size.
-					 * Keep in sync with vmlinux.lds.h.
-					 */
+	struct tracepoint_func __rcu *funcs;
+};
 
 /*
  * Connect a probe to a tracepoint.
@@ -61,15 +56,15 @@
 
 struct tracepoint_iter {
 	struct module *module;
-	struct tracepoint *tracepoint;
+	struct tracepoint * const *tracepoint;
 };
 
 extern void tracepoint_iter_start(struct tracepoint_iter *iter);
 extern void tracepoint_iter_next(struct tracepoint_iter *iter);
 extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
 extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
-extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
-	struct tracepoint *begin, struct tracepoint *end);
+extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
+	struct tracepoint * const *begin, struct tracepoint * const *end);
 
 /*
  * tracepoint_synchronize_unregister must be called between the last tracepoint
@@ -84,11 +79,13 @@
 #define PARAMS(args...) args
 
 #ifdef CONFIG_TRACEPOINTS
-extern void tracepoint_update_probe_range(struct tracepoint *begin,
-	struct tracepoint *end);
+extern
+void tracepoint_update_probe_range(struct tracepoint * const *begin,
+	struct tracepoint * const *end);
 #else
-static inline void tracepoint_update_probe_range(struct tracepoint *begin,
-	struct tracepoint *end)
+static inline
+void tracepoint_update_probe_range(struct tracepoint * const *begin,
+	struct tracepoint * const *end)
 { }
 #endif /* CONFIG_TRACEPOINTS */
 
@@ -174,12 +171,20 @@
 	{								\
 	}
 
+/*
+ * We have no guarantee that gcc and the linker won't up-align the tracepoint
+ * structures, so we create an array of pointers that will be used for iteration
+ * on the tracepoints.
+ */
 #define DEFINE_TRACE_FN(name, reg, unreg)				\
 	static const char __tpstrtab_##name[]				\
 	__attribute__((section("__tracepoints_strings"))) = #name;	\
 	struct tracepoint __tracepoint_##name				\
-	__attribute__((section("__tracepoints"), aligned(32))) =	\
-		{ __tpstrtab_##name, 0, reg, unreg, NULL }
+	__attribute__((section("__tracepoints"))) =			\
+		{ __tpstrtab_##name, 0, reg, unreg, NULL };		\
+	static struct tracepoint * const __tracepoint_ptr_##name __used	\
+	__attribute__((section("__tracepoints_ptrs"))) =		\
+		&__tracepoint_##name;
 
 #define DEFINE_TRACE(name)						\
 	DEFINE_TRACE_FN(name, NULL, NULL);
@@ -326,7 +331,7 @@
  *		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
  *		__entry->next_pid	= next->pid;
  *		__entry->next_prio	= next->prio;
- *	)
+ *	),
  *
  *	*
  *	* Formatted output of a trace record via TP_printk().
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 526d66f..ff7dc08 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -101,14 +101,15 @@
  *	any pending driver I/O is completed.
  *
  * void (*dcd_change)(struct tty_struct *tty, unsigned int status,
- * 			struct timespec *ts)
+ *			struct pps_event_time *ts)
  *
  *	Tells the discipline that the DCD pin has changed its status and
- *	the relative timestamp. Pointer ts can be NULL.
+ *	the relative timestamp. Pointer ts cannot be NULL.
  */
 
 #include <linux/fs.h>
 #include <linux/wait.h>
+#include <linux/pps_kernel.h>
 
 struct tty_ldisc_ops {
 	int	magic;
@@ -143,7 +144,7 @@
 			       char *fp, int count);
 	void	(*write_wakeup)(struct tty_struct *);
 	void	(*dcd_change)(struct tty_struct *, unsigned int,
-				struct timespec *);
+				struct pps_event_time *);
 
 	struct  module *owner;
 	
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index fa261a0..8da8c4e 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -67,21 +67,21 @@
 #endif
 };
 
-static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	write_seqcount_begin(&syncp->seq);
 #endif
 }
 
-static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	write_seqcount_end(&syncp->seq);
 #endif
 }
 
-static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	return read_seqcount_begin(&syncp->seq);
@@ -93,7 +93,7 @@
 #endif
 }
 
-static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 					 unsigned int start)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
@@ -112,7 +112,7 @@
  * - UP 32bit must disable BH.
  * - 64bit have no problem atomically reading u64 values, irq safe.
  */
-static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
+static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	return read_seqcount_begin(&syncp->seq);
@@ -124,7 +124,7 @@
 #endif
 }
 
-static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
+static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
 					 unsigned int start)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
index c9a6abd..c0d817d 100644
--- a/include/linux/unaligned/packed_struct.h
+++ b/include/linux/unaligned/packed_struct.h
@@ -3,9 +3,9 @@
 
 #include <linux/kernel.h>
 
-struct __una_u16 { u16 x; } __attribute__((packed));
-struct __una_u32 { u32 x; } __attribute__((packed));
-struct __una_u64 { u64 x; } __attribute__((packed));
+struct __una_u16 { u16 x; } __packed;
+struct __una_u32 { u32 x; } __packed;
+struct __una_u64 { u64 x; } __packed;
 
 static inline u16 __get_unaligned_cpu16(const void *p)
 {
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index 5e86dc7..81a9279 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -89,7 +89,7 @@
 
 #define USB_CDC_COMM_FEATURE	0x01
 #define USB_CDC_CAP_LINE	0x02
-#define USB_CDC_CAP_BRK	0x04
+#define USB_CDC_CAP_BRK		0x04
 #define USB_CDC_CAP_NOTIFY	0x08
 
 /* "Union Functional Descriptor" from CDC spec 5.2.3.8 */
@@ -271,6 +271,11 @@
 	__le16	wLength;
 } __attribute__ ((packed));
 
+struct usb_cdc_speed_change {
+	__le32	DLBitRRate;	/* contains the downlink bit rate (IN pipe) */
+	__le32	ULBitRate;	/* contains the uplink bit rate (OUT pipe) */
+} __attribute__ ((packed));
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -292,7 +297,7 @@
 	__le16	wNdpOutDivisor;
 	__le16	wNdpOutPayloadRemainder;
 	__le16	wNdpOutAlignment;
-	__le16	wPadding2;
+	__le16	wNtbOutMaxDatagrams;
 } __attribute__ ((packed));
 
 /*
@@ -307,7 +312,7 @@
 	__le16	wHeaderLength;
 	__le16	wSequence;
 	__le16	wBlockLength;
-	__le16	wFpIndex;
+	__le16	wNdpIndex;
 } __attribute__ ((packed));
 
 struct usb_cdc_ncm_nth32 {
@@ -315,7 +320,7 @@
 	__le16	wHeaderLength;
 	__le16	wSequence;
 	__le32	dwBlockLength;
-	__le32	dwFpIndex;
+	__le32	dwNdpIndex;
 } __attribute__ ((packed));
 
 /*
@@ -337,7 +342,7 @@
 struct usb_cdc_ncm_ndp16 {
 	__le32	dwSignature;
 	__le16	wLength;
-	__le16	wNextFpIndex;
+	__le16	wNextNdpIndex;
 	struct	usb_cdc_ncm_dpe16 dpe16[0];
 } __attribute__ ((packed));
 
@@ -375,6 +380,7 @@
 #define USB_CDC_NCM_NCAP_ENCAP_COMMAND			(1 << 2)
 #define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE		(1 << 3)
 #define USB_CDC_NCM_NCAP_CRC_MODE			(1 << 4)
+#define	USB_CDC_NCM_NCAP_NTB_INPUT_SIZE			(1 << 5)
 
 /* CDC NCM subclass Table 6-3: NTB Parameter Structure */
 #define USB_CDC_NCM_NTB16_SUPPORTED			(1 << 0)
@@ -392,6 +398,13 @@
 #define USB_CDC_NCM_NTB_MIN_IN_SIZE			2048
 #define USB_CDC_NCM_NTB_MIN_OUT_SIZE			2048
 
+/* NTB Input Size Structure */
+struct usb_cdc_ncm_ndp_input_size {
+	__le32	dwNtbInMaxSize;
+	__le16	wNtbInMaxDatagrams;
+	__le16	wReserved;
+} __attribute__ ((packed));
+
 /* CDC NCM subclass 6.2.11 SetCrcMode */
 #define USB_CDC_NCM_CRC_NOT_APPENDED			0x00
 #define USB_CDC_NCM_CRC_APPENDED			0x01
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index dd6ee49..a854fe8 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -112,6 +112,7 @@
 	/* Flags that get set only during HCD registration or removal. */
 	unsigned		rh_registered:1;/* is root hub registered? */
 	unsigned		rh_pollable:1;	/* may we poll the root hub? */
+	unsigned		msix_enabled:1;	/* driver has MSI-X enabled? */
 
 	/* The next flag is a stopgap, to be removed when all the HCDs
 	 * support the new root-hub polling mechanism. */
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index b92e173..7d1babb 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -16,12 +16,8 @@
 #ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__
 #define __LINUX_USB_GADGET_MSM72K_UDC_H__
 
-#ifdef CONFIG_ARCH_MSM7X00A
-#define USB_SBUSCFG          (MSM_USB_BASE + 0x0090)
-#else
 #define USB_AHBBURST         (MSM_USB_BASE + 0x0090)
 #define USB_AHBMODE          (MSM_USB_BASE + 0x0098)
-#endif
 #define USB_CAPLENGTH        (MSM_USB_BASE + 0x0100) /* 8 bit */
 
 #define USB_USBCMD           (MSM_USB_BASE + 0x0140)
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 16d682f..c904913 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -347,6 +347,9 @@
 extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
 					unsigned int ch);
 extern int usb_serial_handle_break(struct usb_serial_port *port);
+extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+					 struct tty_struct *tty,
+					 unsigned int status);
 
 
 extern int usb_serial_bus_register(struct usb_serial_driver *device);
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 8178156..faf4679 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -6,7 +6,7 @@
 #include <linux/sched.h>
 #include <linux/err.h>
 
-#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 8)
+#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
 #define UIDHASH_SZ	(1 << UIDHASH_BITS)
 
 struct user_namespace {
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 0093dd7..800617b 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -109,7 +109,10 @@
 				      unsigned int fbit)
 {
 	/* Did you forget to fix assumptions on max features? */
-	MAYBE_BUILD_BUG_ON(fbit >= 32);
+	if (__builtin_constant_p(fbit))
+		BUILD_BUG_ON(fbit >= 32);
+	else
+		BUG_ON(fbit >= 32);
 
 	if (fbit < VIRTIO_TRANSPORT_F_START)
 		virtio_check_driver_offered_feature(vdev, fbit);
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index a85064d..e4d3335 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -7,7 +7,8 @@
  * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
  * anyone can use the definitions to implement compatible drivers/servers.
  *
- * Copyright (C) Red Hat, Inc., 2009, 2010
+ * Copyright (C) Red Hat, Inc., 2009, 2010, 2011
+ * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
  */
 
 /* Feature bits */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 44b54f6..4ed6fcd 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -59,8 +59,9 @@
 extern void *vmalloc_32(unsigned long size);
 extern void *vmalloc_32_user(unsigned long size);
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
-extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
-				pgprot_t prot);
+extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+			unsigned long start, unsigned long end, gfp_t gfp_mask,
+			pgprot_t prot, int node, void *caller);
 extern void vfree(const void *addr);
 
 extern void *vmap(struct page **pages, unsigned int count,
@@ -90,9 +91,6 @@
 					unsigned long flags,
 					unsigned long start, unsigned long end,
 					void *caller);
-extern struct vm_struct *get_vm_area_node(unsigned long size,
-					  unsigned long flags, int node,
-					  gfp_t gfp_mask);
 extern struct vm_struct *remove_vm_area(const void *addr);
 
 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
@@ -120,7 +118,7 @@
 #ifdef CONFIG_SMP
 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 				     const size_t *sizes, int nr_vms,
-				     size_t align, gfp_t gfp_mask);
+				     size_t align);
 
 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
 #endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index eaaea37..833e676 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -254,6 +254,11 @@
 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 
 void refresh_cpu_vm_stats(int);
+
+int calculate_pressure_threshold(struct zone *zone);
+int calculate_normal_threshold(struct zone *zone);
+void set_pgdat_percpu_threshold(pg_data_t *pgdat,
+				int (*calculate_pressure)(struct zone *));
 #else /* CONFIG_SMP */
 
 /*
@@ -298,6 +303,8 @@
 #define dec_zone_page_state __dec_zone_page_state
 #define mod_zone_page_state __mod_zone_page_state
 
+#define set_pgdat_percpu_threshold(pgdat, callback) { }
+
 static inline void refresh_cpu_vm_stats(int cpu) { }
 #endif
 
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 1ac1158..f7998a3 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -250,7 +250,7 @@
 enum {
 	WQ_NON_REENTRANT	= 1 << 0, /* guarantee non-reentrance */
 	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
-	WQ_FREEZEABLE		= 1 << 2, /* freeze during suspend */
+	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
 	WQ_HIGHPRI		= 1 << 4, /* high priority */
 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu instensive workqueue */
@@ -318,7 +318,7 @@
 /**
  * alloc_ordered_workqueue - allocate an ordered workqueue
  * @name: name of the workqueue
- * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
  *
  * Allocate an ordered workqueue.  An ordered workqueue executes at
  * most one work item at any given time in the queued order.  They are
@@ -335,8 +335,8 @@
 
 #define create_workqueue(name)					\
 	alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
-#define create_freezeable_workqueue(name)			\
-	alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
+#define create_freezable_workqueue(name)			\
+	alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 #define create_singlethread_workqueue(name)			\
 	alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 
diff --git a/include/linux/xz.h b/include/linux/xz.h
new file mode 100644
index 0000000..64cffa6
--- /dev/null
+++ b/include/linux/xz.h
@@ -0,0 +1,264 @@
+/*
+ * XZ decompressor
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_H
+#define XZ_H
+
+#ifdef __KERNEL__
+#	include <linux/stddef.h>
+#	include <linux/types.h>
+#else
+#	include <stddef.h>
+#	include <stdint.h>
+#endif
+
+/* In Linux, this is used to make extern functions static when needed. */
+#ifndef XZ_EXTERN
+#	define XZ_EXTERN extern
+#endif
+
+/**
+ * enum xz_mode - Operation mode
+ *
+ * @XZ_SINGLE:              Single-call mode. This uses less RAM than
+ *                          than multi-call modes, because the LZMA2
+ *                          dictionary doesn't need to be allocated as
+ *                          part of the decoder state. All required data
+ *                          structures are allocated at initialization,
+ *                          so xz_dec_run() cannot return XZ_MEM_ERROR.
+ * @XZ_PREALLOC:            Multi-call mode with preallocated LZMA2
+ *                          dictionary buffer. All data structures are
+ *                          allocated at initialization, so xz_dec_run()
+ *                          cannot return XZ_MEM_ERROR.
+ * @XZ_DYNALLOC:            Multi-call mode. The LZMA2 dictionary is
+ *                          allocated once the required size has been
+ *                          parsed from the stream headers. If the
+ *                          allocation fails, xz_dec_run() will return
+ *                          XZ_MEM_ERROR.
+ *
+ * It is possible to enable support only for a subset of the above
+ * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC,
+ * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled
+ * with support for all operation modes, but the preboot code may
+ * be built with fewer features to minimize code size.
+ */
+enum xz_mode {
+	XZ_SINGLE,
+	XZ_PREALLOC,
+	XZ_DYNALLOC
+};
+
+/**
+ * enum xz_ret - Return codes
+ * @XZ_OK:                  Everything is OK so far. More input or more
+ *                          output space is required to continue. This
+ *                          return code is possible only in multi-call mode
+ *                          (XZ_PREALLOC or XZ_DYNALLOC).
+ * @XZ_STREAM_END:          Operation finished successfully.
+ * @XZ_UNSUPPORTED_CHECK:   Integrity check type is not supported. Decoding
+ *                          is still possible in multi-call mode by simply
+ *                          calling xz_dec_run() again.
+ *                          Note that this return value is used only if
+ *                          XZ_DEC_ANY_CHECK was defined at build time,
+ *                          which is not used in the kernel. Unsupported
+ *                          check types return XZ_OPTIONS_ERROR if
+ *                          XZ_DEC_ANY_CHECK was not defined at build time.
+ * @XZ_MEM_ERROR:           Allocating memory failed. This return code is
+ *                          possible only if the decoder was initialized
+ *                          with XZ_DYNALLOC. The amount of memory that was
+ *                          tried to be allocated was no more than the
+ *                          dict_max argument given to xz_dec_init().
+ * @XZ_MEMLIMIT_ERROR:      A bigger LZMA2 dictionary would be needed than
+ *                          allowed by the dict_max argument given to
+ *                          xz_dec_init(). This return value is possible
+ *                          only in multi-call mode (XZ_PREALLOC or
+ *                          XZ_DYNALLOC); the single-call mode (XZ_SINGLE)
+ *                          ignores the dict_max argument.
+ * @XZ_FORMAT_ERROR:        File format was not recognized (wrong magic
+ *                          bytes).
+ * @XZ_OPTIONS_ERROR:       This implementation doesn't support the requested
+ *                          compression options. In the decoder this means
+ *                          that the header CRC32 matches, but the header
+ *                          itself specifies something that we don't support.
+ * @XZ_DATA_ERROR:          Compressed data is corrupt.
+ * @XZ_BUF_ERROR:           Cannot make any progress. Details are slightly
+ *                          different between multi-call and single-call
+ *                          mode; more information below.
+ *
+ * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls
+ * to XZ code cannot consume any input and cannot produce any new output.
+ * This happens when there is no new input available, or the output buffer
+ * is full while at least one output byte is still pending. Assuming your
+ * code is not buggy, you can get this error only when decoding a compressed
+ * stream that is truncated or otherwise corrupt.
+ *
+ * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer
+ * is too small or the compressed input is corrupt in a way that makes the
+ * decoder produce more output than the caller expected. When it is
+ * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR
+ * is used instead of XZ_BUF_ERROR.
+ */
+enum xz_ret {
+	XZ_OK,
+	XZ_STREAM_END,
+	XZ_UNSUPPORTED_CHECK,
+	XZ_MEM_ERROR,
+	XZ_MEMLIMIT_ERROR,
+	XZ_FORMAT_ERROR,
+	XZ_OPTIONS_ERROR,
+	XZ_DATA_ERROR,
+	XZ_BUF_ERROR
+};
+
+/**
+ * struct xz_buf - Passing input and output buffers to XZ code
+ * @in:         Beginning of the input buffer. This may be NULL if and only
+ *              if in_pos is equal to in_size.
+ * @in_pos:     Current position in the input buffer. This must not exceed
+ *              in_size.
+ * @in_size:    Size of the input buffer
+ * @out:        Beginning of the output buffer. This may be NULL if and only
+ *              if out_pos is equal to out_size.
+ * @out_pos:    Current position in the output buffer. This must not exceed
+ *              out_size.
+ * @out_size:   Size of the output buffer
+ *
+ * Only the contents of the output buffer from out[out_pos] onward, and
+ * the variables in_pos and out_pos are modified by the XZ code.
+ */
+struct xz_buf {
+	const uint8_t *in;
+	size_t in_pos;
+	size_t in_size;
+
+	uint8_t *out;
+	size_t out_pos;
+	size_t out_size;
+};
+
+/**
+ * struct xz_dec - Opaque type to hold the XZ decoder state
+ */
+struct xz_dec;
+
+/**
+ * xz_dec_init() - Allocate and initialize a XZ decoder state
+ * @mode:       Operation mode
+ * @dict_max:   Maximum size of the LZMA2 dictionary (history buffer) for
+ *              multi-call decoding. This is ignored in single-call mode
+ *              (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes
+ *              or 2^n + 2^(n-1) bytes (the latter sizes are less common
+ *              in practice), so other values for dict_max don't make sense.
+ *              In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB,
+ *              512 KiB, and 1 MiB are probably the only reasonable values,
+ *              except for kernel and initramfs images where a bigger
+ *              dictionary can be fine and useful.
+ *
+ * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at
+ * once. The caller must provide enough output space or the decoding will
+ * fail. The output space is used as the dictionary buffer, which is why
+ * there is no need to allocate the dictionary as part of the decoder's
+ * internal state.
+ *
+ * Because the output buffer is used as the workspace, streams encoded using
+ * a big dictionary are not a problem in single-call mode. It is enough that
+ * the output buffer is big enough to hold the actual uncompressed data; it
+ * can be smaller than the dictionary size stored in the stream headers.
+ *
+ * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes
+ * of memory is preallocated for the LZMA2 dictionary. This way there is no
+ * risk that xz_dec_run() could run out of memory, since xz_dec_run() will
+ * never allocate any memory. Instead, if the preallocated dictionary is too
+ * small for decoding the given input stream, xz_dec_run() will return
+ * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be
+ * decoded to avoid allocating excessive amount of memory for the dictionary.
+ *
+ * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC):
+ * dict_max specifies the maximum allowed dictionary size that xz_dec_run()
+ * may allocate once it has parsed the dictionary size from the stream
+ * headers. This way excessive allocations can be avoided while still
+ * limiting the maximum memory usage to a sane value to prevent running the
+ * system out of memory when decompressing streams from untrusted sources.
+ *
+ * On success, xz_dec_init() returns a pointer to struct xz_dec, which is
+ * ready to be used with xz_dec_run(). If memory allocation fails,
+ * xz_dec_init() returns NULL.
+ */
+XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
+
+/**
+ * xz_dec_run() - Run the XZ decoder
+ * @s:          Decoder state allocated using xz_dec_init()
+ * @b:          Input and output buffers
+ *
+ * The possible return values depend on build options and operation mode.
+ * See enum xz_ret for details.
+ *
+ * Note that if an error occurs in single-call mode (return value is not
+ * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the
+ * contents of the output buffer from b->out[b->out_pos] onward are
+ * undefined. This is true even after XZ_BUF_ERROR, because with some filter
+ * chains, there may be a second pass over the output buffer, and this pass
+ * cannot be properly done if the output buffer is truncated. Thus, you
+ * cannot give the single-call decoder a too small buffer and then expect to
+ * get that amount valid data from the beginning of the stream. You must use
+ * the multi-call decoder if you don't want to uncompress the whole stream.
+ */
+XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
+
+/**
+ * xz_dec_reset() - Reset an already allocated decoder state
+ * @s:          Decoder state allocated using xz_dec_init()
+ *
+ * This function can be used to reset the multi-call decoder state without
+ * freeing and reallocating memory with xz_dec_end() and xz_dec_init().
+ *
+ * In single-call mode, xz_dec_reset() is always called in the beginning of
+ * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
+ * multi-call mode.
+ */
+XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
+
+/**
+ * xz_dec_end() - Free the memory allocated for the decoder state
+ * @s:          Decoder state allocated using xz_dec_init(). If s is NULL,
+ *              this function does nothing.
+ */
+XZ_EXTERN void xz_dec_end(struct xz_dec *s);
+
+/*
+ * Standalone build (userspace build or in-kernel build for boot time use)
+ * needs a CRC32 implementation. For normal in-kernel use, kernel's own
+ * CRC32 module is used instead, and users of this module don't need to
+ * care about the functions below.
+ */
+#ifndef XZ_INTERNAL_CRC32
+#	ifdef __KERNEL__
+#		define XZ_INTERNAL_CRC32 0
+#	else
+#		define XZ_INTERNAL_CRC32 1
+#	endif
+#endif
+
+#if XZ_INTERNAL_CRC32
+/*
+ * This must be called before any other xz_* function to initialize
+ * the CRC32 lookup table.
+ */
+XZ_EXTERN void xz_crc32_init(void);
+
+/*
+ * Update CRC32 value using the polynomial from IEEE-802.3. To start a new
+ * calculation, the third argument must be zero. To continue the calculation,
+ * the previously returned value is passed as the third argument.
+ */
+XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
+#endif
+#endif
diff --git a/include/media/davinci/vpss.h b/include/media/davinci/vpss.h
index c59cc02..b586495 100644
--- a/include/media/davinci/vpss.h
+++ b/include/media/davinci/vpss.h
@@ -44,7 +44,7 @@
 	short pplen;
 };
 
-/* Used for enable/diable VPSS Clock */
+/* Used for enable/disable VPSS Clock */
 enum vpss_clock_sel {
 	/* DM355/DM365 */
 	VPSS_CCDC_CLOCK,
diff --git a/include/media/mt9v011.h b/include/media/mt9v011.h
new file mode 100644
index 0000000..ea29fc7
--- /dev/null
+++ b/include/media/mt9v011.h
@@ -0,0 +1,17 @@
+/* mt9v011 sensor
+ *
+ * Copyright (C) 2011 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MT9V011_H__
+#define __MT9V011_H__
+
+struct mt9v011_platform_data {
+	unsigned xtal;	/* Hz */
+};
+
+#endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index a23c1fc..2963263 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -183,6 +183,9 @@
 }
 
 #define IR_MAX_DURATION         0xFFFFFFFF      /* a bit more than 4 seconds */
+#define US_TO_NS(usec)		((usec) * 1000)
+#define MS_TO_US(msec)		((msec) * 1000)
+#define MS_TO_NS(msec)		((msec) * 1000 * 1000)
 
 void ir_raw_event_handle(struct rc_dev *dev);
 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index ac7ce00..7982714 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -115,7 +115,7 @@
 
 	/* different device locks */
 	spinlock_t			slock;
-	struct mutex			lock;
+	struct mutex			v4l2_lock;
 
 	unsigned char			__iomem *mem;		/* pointer to mapped IO memory */
 	u32				revision;	/* chip revision; needed for bug-workarounds*/
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 2d65b35..a659319 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -138,21 +138,10 @@
 
 /* Load an i2c module and return an initialized v4l2_subdev struct.
    The client_type argument is the name of the chip that's on the adapter. */
-struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
 		struct i2c_adapter *adapter, const char *client_type,
-		int irq, void *platform_data,
 		u8 addr, const unsigned short *probe_addrs);
 
-/* Load an i2c module and return an initialized v4l2_subdev struct.
-   The client_type argument is the name of the chip that's on the adapter. */
-static inline struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
-		struct i2c_adapter *adapter, const char *client_type,
-		u8 addr, const unsigned short *probe_addrs)
-{
-	return v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter, client_type, 0, NULL,
-				       addr, probe_addrs);
-}
-
 struct i2c_board_info;
 
 struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index d69ab4a..97d0638 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -23,6 +23,7 @@
 
 #include <linux/list.h>
 #include <linux/device.h>
+#include <linux/videodev2.h>
 
 /* forward references */
 struct v4l2_ctrl_handler;
@@ -53,8 +54,10 @@
   * @handler:	The handler that owns the control.
   * @cluster:	Point to start of cluster array.
   * @ncontrols:	Number of controls in cluster array.
-  * @has_new:	Internal flag: set when there is a valid new value.
   * @done:	Internal flag: set for each processed control.
+  * @is_new:	Set when the user specified a new value for this control. It
+  *		is also set when called from v4l2_ctrl_handler_setup. Drivers
+  *		should never set this flag.
   * @is_private: If set, then this control is private to its handler and it
   *		will not be added to any other handlers. Drivers can set
   *		this flag.
@@ -97,9 +100,9 @@
 	struct v4l2_ctrl_handler *handler;
 	struct v4l2_ctrl **cluster;
 	unsigned ncontrols;
-	unsigned int has_new:1;
 	unsigned int done:1;
 
+	unsigned int is_new:1;
 	unsigned int is_private:1;
 	unsigned int is_volatile:1;
 
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index b0316a7..daf1e57 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -106,10 +106,7 @@
 	u8 strength;	/* Pin drive strength */
 };
 
-/* s_config: if set, then it is always called by the v4l2_i2c_new_subdev*
-	functions after the v4l2_subdev was registered. It is used to pass
-	platform data to the subdev which can be used during initialization.
-
+/*
    s_io_pin_config: configure one or more chip I/O pins for chips that
 	multiplex different internal signal pads out to IO pins.  This function
 	takes a pointer to an array of 'n' pin configuration entries, one for
@@ -141,7 +138,6 @@
 struct v4l2_subdev_core_ops {
 	int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip);
 	int (*log_status)(struct v4l2_subdev *sd);
-	int (*s_config)(struct v4l2_subdev *sd, int irq, void *platform_data);
 	int (*s_io_pin_config)(struct v4l2_subdev *sd, size_t n,
 				      struct v4l2_subdev_io_pin_config *pincfg);
 	int (*init)(struct v4l2_subdev *sd, u32 val);
@@ -415,6 +411,21 @@
 	const struct v4l2_subdev_sensor_ops	*sensor;
 };
 
+/*
+ * Internal ops. Never call this from drivers, only the v4l2 framework can call
+ * these ops.
+ *
+ * registered: called when this subdev is registered. When called the v4l2_dev
+ *	field is set to the correct v4l2_device.
+ *
+ * unregistered: called when this subdev is unregistered. When called the
+ *	v4l2_dev field is still set to the correct v4l2_device.
+ */
+struct v4l2_subdev_internal_ops {
+	int (*registered)(struct v4l2_subdev *sd);
+	void (*unregistered)(struct v4l2_subdev *sd);
+};
+
 #define V4L2_SUBDEV_NAME_SIZE 32
 
 /* Set this flag if this subdev is a i2c device. */
@@ -431,6 +442,8 @@
 	u32 flags;
 	struct v4l2_device *v4l2_dev;
 	const struct v4l2_subdev_ops *ops;
+	/* Never call these internal ops from within a driver! */
+	const struct v4l2_subdev_internal_ops *internal_ops;
 	/* The control handler of this subdev. May be NULL. */
 	struct v4l2_ctrl_handler *ctrl_handler;
 	/* name must be unique */
diff --git a/include/net/ah.h b/include/net/ah.h
index f0129f7..ca95b98 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -4,7 +4,7 @@
 #include <linux/skbuff.h>
 
 /* This is the maximum truncated ICV length that we know of. */
-#define MAX_AH_AUTH_LEN	12
+#define MAX_AH_AUTH_LEN	64
 
 struct crypto_ahash;
 
diff --git a/include/net/arp.h b/include/net/arp.h
index f4cf6ce..91f0568 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -25,5 +25,6 @@
 				  const unsigned char *src_hw,
 				  const unsigned char *target_hw);
 extern void arp_xmit(struct sk_buff *skb);
+int arp_invalidate(struct net_device *dev, __be32 ip);
 
 #endif	/* _ARP_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index a29feb0..d2cf884 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -184,6 +184,7 @@
 	__u32		 link_mode;
 	__u8             auth_type;
 	__u8             sec_level;
+	__u8		 pending_sec_level;
 	__u8             power_save;
 	__u16            disc_timeout;
 	unsigned long	 pend;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index bcc9f44..1322695 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1103,6 +1103,8 @@
  * @change_mpath: change a given mesh path
  * @get_mpath: get a mesh path for the given parameters
  * @dump_mpath: dump mesh path callback -- resume dump at index @idx
+ * @join_mesh: join the mesh network with the specified parameters
+ * @leave_mesh: leave the current mesh network
  *
  * @get_mesh_config: Get the current mesh configuration
  *
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 8a64b81..b4c7c1c 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -195,7 +195,8 @@
  */
 static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
 {
-	nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+	if (hdr)
+		nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
 }
 
 /**
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4a3cd2c..96e50e0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -89,6 +89,18 @@
 #define IPV6_ADDR_SCOPE_GLOBAL		0x0e
 
 /*
+ *	Addr flags
+ */
+#ifdef __KERNEL__
+#define IPV6_ADDR_MC_FLAG_TRANSIENT(a)	\
+	((a)->s6_addr[1] & 0x10)
+#define IPV6_ADDR_MC_FLAG_PREFIX(a)	\
+	((a)->s6_addr[1] & 0x20)
+#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a)	\
+	((a)->s6_addr[1] & 0x40)
+#endif
+
+/*
  *	fragmentation header
  */
 
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 5b3fd5a..62c0ce2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -337,6 +337,10 @@
  * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame
  * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this
  *	frame and selects the maximum number of streams that it can use.
+ * @IEEE80211_TX_CTL_TX_OFFCHAN: Marks this packet to be transmitted on
+ *	the off-channel channel when a remain-on-channel offload is done
+ *	in hardware -- normal packets still flow and are expected to be
+ *	handled properly by the device.
  *
  * Note: If you have to add new flags to the enumeration, then don't
  *	 forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -1753,6 +1757,16 @@
  *	(also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
  *
  * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
+ *
+ * @remain_on_channel: Starts an off-channel period on the given channel, must
+ *	call back to ieee80211_ready_on_channel() when on that channel. Note
+ *	that normal channel traffic is not stopped as this is intended for hw
+ *	offload. Frames to transmit on the off-channel channel are transmitted
+ *	normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the
+ *	duration (which will always be non-zero) expires, the driver must call
+ *	ieee80211_remain_on_channel_expired(). This callback may sleep.
+ * @cancel_remain_on_channel: Requests that an ongoing off-channel period is
+ *	aborted before it expires. This callback may sleep.
  */
 struct ieee80211_ops {
 	int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 1ee717e..a4c9936 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -7,16 +7,6 @@
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
 
-extern int nf_ct_frag6_init(void);
-extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
-			       struct net_device *in,
-			       struct net_device *out,
-			       int (*okfn)(struct sk_buff *));
-
-struct inet_frags_ctl;
-
 #include <linux/sysctl.h>
 extern struct ctl_table nf_ct_ipv6_sysctl_table[];
 
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 94dd54d..fd79c9a 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -3,4 +3,14 @@
 
 extern void nf_defrag_ipv6_enable(void);
 
+extern int nf_ct_frag6_init(void);
+extern void nf_ct_frag6_cleanup(void);
+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+			       struct net_device *in,
+			       struct net_device *out,
+			       int (*okfn)(struct sk_buff *));
+
+struct inet_frags_ctl;
+
 #endif /* _NF_DEFRAG_IPV6_H */
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 96ba5f7..349cefe 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -77,9 +77,6 @@
 	if (e == NULL)
 		return;
 
-	if (!(e->ctmask & (1 << event)))
-		return;
-
 	set_bit(event, &e->cache);
 }
 
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
index cd85b3b..e505358 100644
--- a/include/net/netfilter/nf_tproxy_core.h
+++ b/include/net/netfilter/nf_tproxy_core.h
@@ -201,18 +201,8 @@
 }
 #endif
 
-static inline void
-nf_tproxy_put_sock(struct sock *sk)
-{
-	/* TIME_WAIT inet sockets have to be handled differently */
-	if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT))
-		inet_twsk_put(inet_twsk(sk));
-	else
-		sock_put(sk);
-}
-
 /* assign a socket to the skb -- consumes sk */
-int
+void
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
 
 #endif
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index d5df797..5395e09 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -107,8 +107,8 @@
 	int			sock_type;
 };
 
-int phonet_proto_register(int protocol, struct phonet_protocol *pp);
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp);
+int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp);
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp);
 
 int phonet_sysctl_init(void);
 void phonet_sysctl_exit(void);
diff --git a/include/net/red.h b/include/net/red.h
index 995108e..3319f16 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -97,7 +97,6 @@
 	u32		forced_mark;	/* Forced marks, qavg > max_thresh */
 	u32		pdrop;          /* Drops due to queue limits */
 	u32		other;          /* Drops due to drop() calls */
-	u32		backlog;
 };
 
 struct red_parms {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 0af57eb..04f8556 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -199,7 +199,7 @@
 
 struct qdisc_skb_cb {
 	unsigned int		pkt_len;
-	char			data[];
+	long			data[];
 };
 
 static inline int qdisc_qlen(struct Qdisc *q)
@@ -207,7 +207,7 @@
 	return q->q.qlen;
 }
 
-static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
+static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
 {
 	return (struct qdisc_skb_cb *)skb->cb;
 }
@@ -394,7 +394,7 @@
 	return true;
 }
 
-static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
+static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
 {
 	return qdisc_skb_cb(skb)->pkt_len;
 }
@@ -426,10 +426,18 @@
 	return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
 }
 
-static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
+
+static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+				 const struct sk_buff *skb)
 {
-	sch->bstats.bytes += len;
-	sch->bstats.packets++;
+	bstats->bytes += qdisc_pkt_len(skb);
+	bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+}
+
+static inline void qdisc_bstats_update(struct Qdisc *sch,
+				       const struct sk_buff *skb)
+{
+	bstats_update(&sch->bstats, skb);
 }
 
 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -437,7 +445,6 @@
 {
 	__skb_queue_tail(list, skb);
 	sch->qstats.backlog += qdisc_pkt_len(skb);
-	__qdisc_update_bstats(sch, qdisc_pkt_len(skb));
 
 	return NET_XMIT_SUCCESS;
 }
@@ -452,8 +459,10 @@
 {
 	struct sk_buff *skb = __skb_dequeue(list);
 
-	if (likely(skb != NULL))
+	if (likely(skb != NULL)) {
 		sch->qstats.backlog -= qdisc_pkt_len(skb);
+		qdisc_bstats_update(sch, skb);
+	}
 
 	return skb;
 }
@@ -466,10 +475,11 @@
 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
 					      struct sk_buff_head *list)
 {
-	struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
+	struct sk_buff *skb = __skb_dequeue(list);
 
 	if (likely(skb != NULL)) {
 		unsigned int len = qdisc_pkt_len(skb);
+		sch->qstats.backlog -= len;
 		kfree_skb(skb);
 		return len;
 	}
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 2b2769c..e73ebda 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -78,6 +78,7 @@
 #define SCTP_GET_PEER_ADDR_INFO	15
 #define SCTP_DELAYED_ACK_TIME	16
 #define SCTP_DELAYED_ACK SCTP_DELAYED_ACK_TIME
+#define SCTP_DELAYED_SACK SCTP_DELAYED_ACK_TIME
 #define SCTP_CONTEXT	17
 #define SCTP_FRAGMENT_INTERLEAVE	18
 #define SCTP_PARTIAL_DELIVERY_POINT	19 /* Set/Get partial delivery point */
@@ -99,8 +100,8 @@
 #define SCTP_SOCKOPT_PEELOFF	102	/* peel off association. */
 /* Options 104-106 are deprecated and removed. Do not use this space */
 #define SCTP_SOCKOPT_CONNECTX_OLD	107	/* CONNECTX old requests. */
-#define SCTP_GET_PEER_ADDRS	108		/* Get all peer addresss. */
-#define SCTP_GET_LOCAL_ADDRS	109		/* Get all local addresss. */
+#define SCTP_GET_PEER_ADDRS	108		/* Get all peer address. */
+#define SCTP_GET_LOCAL_ADDRS	109		/* Get all local address. */
 #define SCTP_SOCKOPT_CONNECTX	110		/* CONNECTX requests. */
 #define SCTP_SOCKOPT_CONNECTX3	111	/* CONNECTX requests (updated) */
 
diff --git a/include/net/sock.h b/include/net/sock.h
index 21a02f7..bc1cf7d8 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -152,14 +152,18 @@
 	 * fields between dontcopy_begin/dontcopy_end
 	 * are not copied in sock_copy()
 	 */
+	/* private: */
 	int			skc_dontcopy_begin[0];
+	/* public: */
 	union {
 		struct hlist_node	skc_node;
 		struct hlist_nulls_node skc_nulls_node;
 	};
 	int			skc_tx_queue_mapping;
 	atomic_t		skc_refcnt;
+	/* private: */
 	int                     skc_dontcopy_end[0];
+	/* public: */
 };
 
 /**
@@ -749,6 +753,8 @@
 					int level,
 					int optname, char __user *optval,
 					int __user *option);
+	int			(*compat_ioctl)(struct sock *sk,
+					unsigned int cmd, unsigned long arg);
 #endif
 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
 					   struct msghdr *msg, size_t len);
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index 8479b66..3fd5064 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -261,6 +261,7 @@
 #define CONF_ENABLE_ESR         0x0008
 #define CONF_ENABLE_IOCARD	0x0010 /* auto-enabled if IO resources or IRQ
 					* (CONF_ENABLE_IRQ) in use */
+#define CONF_ENABLE_ZVCARD	0x0020
 
 /* flags used by pcmcia_loop_config() autoconfiguration */
 #define CONF_AUTO_CHECK_VCC	0x0100 /* check for matching Vcc? */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e04c488..55cd0a0 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -47,10 +47,13 @@
 #include <linux/list.h>
 #include <linux/rwsem.h>
 #include <linux/scatterlist.h>
+#include <linux/workqueue.h>
 
 #include <asm/atomic.h>
 #include <asm/uaccess.h>
 
+extern struct workqueue_struct *ib_wq;
+
 union ib_gid {
 	u8	raw[16];
 	struct {
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h
index 8e9b222..8a143ca 100644
--- a/include/scsi/fc/fc_fcp.h
+++ b/include/scsi/fc/fc_fcp.h
@@ -46,7 +46,7 @@
  */
 struct fcp_cmnd {
 	__u8		fc_lun[8];	/* logical unit number */
-	__u8		fc_cmdref;	/* commmand reference number */
+	__u8		fc_cmdref;	/* command reference number */
 	__u8		fc_pri_ta;	/* priority and task attribute */
 	__u8		fc_tm_flags;	/* task management flags */
 	__u8		fc_flags;	/* additional len & flags */
@@ -58,7 +58,7 @@
 
 struct fcp_cmnd32 {
 	__u8		fc_lun[8];	/* logical unit number */
-	__u8		fc_cmdref;	/* commmand reference number */
+	__u8		fc_cmdref;	/* command reference number */
 	__u8		fc_pri_ta;	/* priority and task attribute */
 	__u8		fc_tm_flags;	/* task management flags */
 	__u8		fc_flags;	/* additional len & flags */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 1651fef1..b76d400 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -9,6 +9,7 @@
 #define _SCSI_SCSI_H
 
 #include <linux/types.h>
+#include <linux/scatterlist.h>
 
 struct scsi_cmnd;
 
@@ -104,6 +105,7 @@
 #define UNMAP		      0x42
 #define READ_TOC              0x43
 #define READ_HEADER           0x44
+#define GET_EVENT_STATUS_NOTIFICATION 0x4a
 #define LOG_SELECT            0x4c
 #define LOG_SENSE             0x4d
 #define XDWRITEREAD_10        0x53
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 4940045..b602f47 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -477,7 +477,7 @@
 
 struct snd_ac97 {
 	/* -- lowlevel (hardware) driver specific -- */
-	struct snd_ac97_build_ops * build_ops;
+	const struct snd_ac97_build_ops *build_ops;
 	void *private_data;
 	void (*private_free) (struct snd_ac97 *ac97);
 	/* --- */
diff --git a/include/sound/alc5623.h b/include/sound/alc5623.h
new file mode 100644
index 0000000..422c97d
--- /dev/null
+++ b/include/sound/alc5623.h
@@ -0,0 +1,15 @@
+#ifndef _INCLUDE_SOUND_ALC5623_H
+#define _INCLUDE_SOUND_ALC5623_H
+struct alc5623_platform_data {
+	/* configure :                              */
+	/* Lineout/Speaker Amps Vmid ratio control  */
+	/* enable/disable adc/dac high pass filters */
+	unsigned int add_ctrl;
+	/* configure :                              */
+	/* output to enable when jack is low        */
+	/* output to enable when jack is high       */
+	/* jack detect (gpio/nc/jack detect [12]    */
+	unsigned int jack_det_ctrl;
+};
+#endif
+
diff --git a/include/sound/asound.h b/include/sound/asound.h
index a1803ec..5d6074f 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -259,6 +259,7 @@
 #define SNDRV_PCM_INFO_HALF_DUPLEX	0x00100000	/* only half duplex */
 #define SNDRV_PCM_INFO_JOINT_DUPLEX	0x00200000	/* playback and capture stream are somewhat correlated */
 #define SNDRV_PCM_INFO_SYNC_START	0x00400000	/* pcm support some kind of sync go */
+#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP	0x00800000	/* period wakeup can be disabled */
 #define SNDRV_PCM_INFO_FIFO_IN_FRAMES	0x80000000	/* internal kernel flag - FIFO size is in frames */
 
 typedef int __bitwise snd_pcm_state_t;
@@ -334,6 +335,8 @@
 #define	SNDRV_PCM_HW_PARAM_LAST_INTERVAL	SNDRV_PCM_HW_PARAM_TICK_TIME
 
 #define SNDRV_PCM_HW_PARAMS_NORESAMPLE	(1<<0)	/* avoid rate resampling */
+#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER	(1<<1)	/* export buffer */
+#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP	(1<<2)	/* disable period wakeups */
 
 struct snd_interval {
 	unsigned int min, max;
diff --git a/include/sound/control.h b/include/sound/control.h
index 112374d..7715e6f 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -160,12 +160,14 @@
 }
 
 /*
- * Frequently used control callbacks
+ * Frequently used control callbacks/helpers
  */
 int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_info *uinfo);
 int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo);
+int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
+		      unsigned int items, const char *const names[]);
 
 /*
  * virtual master control
diff --git a/include/sound/hdsp.h b/include/sound/hdsp.h
index d98a78d..0909a38 100644
--- a/include/sound/hdsp.h
+++ b/include/sound/hdsp.h
@@ -28,6 +28,7 @@
 	Multiface,
 	H9652,
 	H9632,
+	RPM,
 	Undefined,
 };
 
diff --git a/include/sound/minors.h b/include/sound/minors.h
index a81798a..8f76420 100644
--- a/include/sound/minors.h
+++ b/include/sound/minors.h
@@ -31,8 +31,8 @@
 /* these minors can still be used for autoloading devices (/dev/aload*) */
 #define SNDRV_MINOR_CONTROL		0	/* 0 */
 #define SNDRV_MINOR_GLOBAL		1	/* 1 */
-#define SNDRV_MINOR_SEQUENCER		(SNDRV_MINOR_GLOBAL + 0 * 32)
-#define SNDRV_MINOR_TIMER		(SNDRV_MINOR_GLOBAL + 1 * 32)
+#define SNDRV_MINOR_SEQUENCER		1	/* SNDRV_MINOR_GLOBAL + 0 * 32 */
+#define SNDRV_MINOR_TIMER		33	/* SNDRV_MINOR_GLOBAL + 1 * 32 */
 
 #ifndef CONFIG_SND_DYNAMIC_MINORS
 						/* 2 - 3 (reserved) */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index dfd9b76..e731f8d7 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -297,6 +297,7 @@
 	unsigned int info;
 	unsigned int rate_num;
 	unsigned int rate_den;
+	unsigned int no_period_wakeup: 1;
 
 	/* -- SW params -- */
 	int tstamp_mode;		/* mmap timestamp is updated */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e7b6802..1bafe95 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -16,8 +16,6 @@
 
 #include <linux/list.h>
 
-#include <sound/soc.h>
-
 struct snd_pcm_substream;
 
 /*
@@ -205,7 +203,7 @@
 	int (*resume)(struct snd_soc_dai *dai);
 
 	/* ops */
-	struct snd_soc_dai_ops *ops;
+	const struct snd_soc_dai_ops *ops;
 
 	/* DAI capabilities */
 	struct snd_soc_pcm_stream capture;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 8fd3b41..8031769 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -16,7 +16,6 @@
 #include <linux/device.h>
 #include <linux/types.h>
 #include <sound/control.h>
-#include <sound/soc.h>
 
 /* widget has no PM register bit */
 #define SND_SOC_NOPM	-1
@@ -72,6 +71,10 @@
 	 wcontrols, wncontrols) \
 {	.id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols}
+#define SND_SOC_DAPM_OUT_DRV(wname, wreg, wshift, winvert,\
+	 wcontrols, wncontrols) \
+{	.id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols}
 #define SND_SOC_DAPM_MIXER(wname, wreg, wshift, winvert, \
 	 wcontrols, wncontrols)\
 {	.id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
@@ -90,6 +93,9 @@
 #define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
 {	.id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1}
+#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \
+{	.id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1}
 #define SND_SOC_DAPM_VALUE_MUX(wname, wreg, wshift, winvert, wcontrols) \
 {	.id = snd_soc_dapm_value_mux, .name = wname, .reg = wreg, \
 	.shift = wshift, .invert = winvert, .kcontrols = wcontrols, \
@@ -116,6 +122,11 @@
 {	.id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols, \
 	.event = wevent, .event_flags = wflags}
+#define SND_SOC_DAPM_OUT_DRV_E(wname, wreg, wshift, winvert, wcontrols, \
+	wncontrols, wevent, wflags) \
+{	.id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols, \
+	.event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_MIXER_E(wname, wreg, wshift, winvert, wcontrols, \
 	wncontrols, wevent, wflags) \
 {	.id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
@@ -140,6 +151,11 @@
 {	.id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \
 	.event = wevent, .event_flags = wflags}
+#define SND_SOC_DAPM_VIRT_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
+	wevent, wflags) \
+{	.id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \
+	.event = wevent, .event_flags = wflags}
 
 /* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
 #define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
@@ -219,13 +235,6 @@
 	.info = snd_soc_info_volsw, \
 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
 	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
-#define SOC_DAPM_DOUBLE(xname, reg, shift_left, shift_right, max, invert, \
-	power) \
-{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
-	.info = snd_soc_info_volsw, \
- 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- 	.private_value = (reg) | ((shift_left) << 8) | ((shift_right) << 12) |\
-		((max) << 16) | ((invert) << 24) }
 #define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
 	.info = snd_soc_info_volsw, \
@@ -233,15 +242,6 @@
 	.tlv.p = (tlv_array), \
 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
 	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
-#define SOC_DAPM_DOUBLE_TLV(xname, reg, shift_left, shift_right, max, invert, \
-	power, tlv_array) \
-{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
-	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
-	.tlv.p = (tlv_array), \
-	.info = snd_soc_info_volsw, \
-	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
-	.private_value = (reg) | ((shift_left) << 8) | ((shift_right) << 12) |\
-		((max) << 16) | ((invert) << 24) }
 #define SOC_DAPM_ENUM(xname, xenum) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
 	.info = snd_soc_info_enum_double, \
@@ -297,6 +297,7 @@
 struct snd_soc_dapm_path;
 struct snd_soc_dapm_pin;
 struct snd_soc_dapm_route;
+struct snd_soc_dapm_context;
 
 int dapm_reg_event(struct snd_soc_dapm_widget *w,
 		   struct snd_kcontrol *kcontrol, int event);
@@ -324,16 +325,16 @@
 	struct snd_ctl_elem_value *uncontrol);
 int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *uncontrol);
-int snd_soc_dapm_new_control(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget);
-int snd_soc_dapm_new_controls(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget,
 	int num);
 
 /* dapm path setup */
-int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec);
-void snd_soc_dapm_free(struct snd_soc_codec *codec);
-int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm);
+void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
+int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
 			    const struct snd_soc_dapm_route *route, int num);
 
 /* dapm events */
@@ -343,27 +344,33 @@
 
 /* dapm sys fs - used by the core */
 int snd_soc_dapm_sys_add(struct device *dev);
-void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec);
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm);
 
 /* dapm audio pin control and status */
-int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_disable_pin(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_nc_pin(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_sync(struct snd_soc_codec *codec);
-int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec,
+int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm,
+			    const char *pin);
+int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
+			     const char *pin);
+int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
+				const char *pin);
+int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm);
+int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
 				  const char *pin);
-int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin);
+int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
+				const char *pin);
 
 /* dapm widget types */
 enum snd_soc_dapm_type {
 	snd_soc_dapm_input = 0,		/* input pin */
 	snd_soc_dapm_output,		/* output pin */
 	snd_soc_dapm_mux,			/* selects 1 analog signal from many inputs */
+	snd_soc_dapm_virt_mux,			/* virtual version of snd_soc_dapm_mux */
 	snd_soc_dapm_value_mux,			/* selects 1 analog signal from many inputs */
 	snd_soc_dapm_mixer,			/* mixes several analog signals together */
 	snd_soc_dapm_mixer_named_ctl,		/* mixer with named controls */
 	snd_soc_dapm_pga,			/* programmable gain/attenuation (volume) */
+	snd_soc_dapm_out_drv,			/* output driver */
 	snd_soc_dapm_adc,			/* analog to digital converter */
 	snd_soc_dapm_dac,			/* digital to analog converter */
 	snd_soc_dapm_micbias,		/* microphone bias (power) */
@@ -425,6 +432,7 @@
 	char *sname;	/* stream name */
 	struct snd_soc_codec *codec;
 	struct list_head list;
+	struct snd_soc_dapm_context *dapm;
 
 	/* dapm control */
 	short reg;						/* negative reg = no direct dapm */
@@ -461,4 +469,35 @@
 	struct list_head power_list;
 };
 
+struct snd_soc_dapm_update {
+	struct snd_soc_dapm_widget *widget;
+	struct snd_kcontrol *kcontrol;
+	int reg;
+	int mask;
+	int val;
+};
+
+/* DAPM context */
+struct snd_soc_dapm_context {
+	int n_widgets; /* number of widgets in this context */
+	enum snd_soc_bias_level bias_level;
+	enum snd_soc_bias_level suspend_bias_level;
+	struct delayed_work delayed_work;
+	unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
+
+	struct snd_soc_dapm_update *update;
+
+	struct device *dev; /* from parent - for debug */
+	struct snd_soc_codec *codec; /* parent codec */
+	struct snd_soc_card *card; /* parent card */
+
+	/* used during DAPM updates */
+	int dev_power;
+	struct list_head list;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_dapm;
+#endif
+};
+
 #endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 5c3bce8..74921f2 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -222,10 +222,8 @@
 
 struct snd_jack;
 struct snd_soc_card;
-struct snd_soc_device;
 struct snd_soc_pcm_stream;
 struct snd_soc_ops;
-struct snd_soc_dai_mode;
 struct snd_soc_pcm_runtime;
 struct snd_soc_dai;
 struct snd_soc_dai_driver;
@@ -235,9 +233,10 @@
 struct snd_soc_codec;
 struct snd_soc_codec_driver;
 struct soc_enum;
-struct snd_soc_ac97_ops;
 struct snd_soc_jack;
 struct snd_soc_jack_pin;
+struct snd_soc_cache_ops;
+#include <sound/soc-dapm.h>
 
 #ifdef CONFIG_GPIOLIB
 struct snd_soc_jack_gpio;
@@ -253,17 +252,30 @@
 	SND_SOC_SPI,
 };
 
+enum snd_soc_compress_type {
+	SND_SOC_FLAT_COMPRESSION = 1,
+	SND_SOC_LZO_COMPRESSION,
+	SND_SOC_RBTREE_COMPRESSION
+};
+
 int snd_soc_register_platform(struct device *dev,
 		struct snd_soc_platform_driver *platform_drv);
 void snd_soc_unregister_platform(struct device *dev);
 int snd_soc_register_codec(struct device *dev,
-		struct snd_soc_codec_driver *codec_drv,
+		const struct snd_soc_codec_driver *codec_drv,
 		struct snd_soc_dai_driver *dai_drv, int num_dai);
 void snd_soc_unregister_codec(struct device *dev);
 int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg);
 int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
 			       int addr_bits, int data_bits,
 			       enum snd_soc_control_type control);
+int snd_soc_cache_sync(struct snd_soc_codec *codec);
+int snd_soc_cache_init(struct snd_soc_codec *codec);
+int snd_soc_cache_exit(struct snd_soc_codec *codec);
+int snd_soc_cache_write(struct snd_soc_codec *codec,
+			unsigned int reg, unsigned int value);
+int snd_soc_cache_read(struct snd_soc_codec *codec,
+		       unsigned int reg, unsigned int *value);
 
 /* Utility functions to get clock rates from various things */
 int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
@@ -420,23 +432,37 @@
 	int (*trigger)(struct snd_pcm_substream *, int);
 };
 
+/* SoC cache ops */
+struct snd_soc_cache_ops {
+	const char *name;
+	enum snd_soc_compress_type id;
+	int (*init)(struct snd_soc_codec *codec);
+	int (*exit)(struct snd_soc_codec *codec);
+	int (*read)(struct snd_soc_codec *codec, unsigned int reg,
+		unsigned int *value);
+	int (*write)(struct snd_soc_codec *codec, unsigned int reg,
+		unsigned int value);
+	int (*sync)(struct snd_soc_codec *codec);
+};
+
 /* SoC Audio Codec device */
 struct snd_soc_codec {
 	const char *name;
+	const char *name_prefix;
 	int id;
 	struct device *dev;
-	struct snd_soc_codec_driver *driver;
+	const struct snd_soc_codec_driver *driver;
 
 	struct mutex mutex;
 	struct snd_soc_card *card;
 	struct list_head list;
 	struct list_head card_list;
 	int num_dai;
+	enum snd_soc_compress_type compress_type;
 
 	/* runtime */
 	struct snd_ac97 *ac97;  /* for ad-hoc ac97 devices */
 	unsigned int active;
-	unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
 	unsigned int cache_only:1;  /* Suppress writes to hardware */
 	unsigned int cache_sync:1; /* Cache needs to be synced to hardware */
 	unsigned int suspended:1; /* Codec is in suspend PM state */
@@ -444,25 +470,25 @@
 	unsigned int ac97_registered:1; /* Codec has been AC97 registered */
 	unsigned int ac97_created:1; /* Codec has been created by SoC */
 	unsigned int sysfs_registered:1; /* codec has been sysfs registered */
+	unsigned int cache_init:1; /* codec cache has been initialized */
 
 	/* codec IO */
 	void *control_data; /* codec control (i2c/3wire) data */
 	hw_write_t hw_write;
 	unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int);
+	unsigned int (*read)(struct snd_soc_codec *, unsigned int);
+	int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
 	void *reg_cache;
+	const void *reg_def_copy;
+	const struct snd_soc_cache_ops *cache_ops;
+	struct mutex cache_rw_mutex;
 
 	/* dapm */
-	u32 pop_time;
-	struct list_head dapm_widgets;
-	struct list_head dapm_paths;
-	enum snd_soc_bias_level bias_level;
-	enum snd_soc_bias_level suspend_bias_level;
-	struct delayed_work delayed_work;
+	struct snd_soc_dapm_context dapm;
 
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_codec_root;
 	struct dentry *debugfs_reg;
-	struct dentry *debugfs_pop_time;
 	struct dentry *debugfs_dapm;
 #endif
 };
@@ -488,6 +514,7 @@
 	short reg_cache_step;
 	short reg_word_size;
 	const void *reg_cache_default;
+	enum snd_soc_compress_type compress_type;
 
 	/* codec bias level */
 	int (*set_bias_level)(struct snd_soc_codec *,
@@ -554,6 +581,30 @@
 	struct snd_soc_ops *ops;
 };
 
+struct snd_soc_codec_conf {
+	const char *dev_name;
+
+	/*
+	 * optional map of kcontrol, widget and path name prefixes that are
+	 * associated per device
+	 */
+	const char *name_prefix;
+
+	/*
+	 * set this to the desired compression type if you want to
+	 * override the one supplied in codec->driver->compress_type
+	 */
+	enum snd_soc_compress_type compress_type;
+};
+
+struct snd_soc_aux_dev {
+	const char *name;		/* Codec name */
+	const char *codec_name;		/* for multi-codec */
+
+	/* codec/machine specific init - e.g. add machine controls */
+	int (*init)(struct snd_soc_dapm_context *dapm);
+};
+
 /* SoC card */
 struct snd_soc_card {
 	const char *name;
@@ -579,6 +630,8 @@
 	/* callbacks */
 	int (*set_bias_level)(struct snd_soc_card *,
 			      enum snd_soc_bias_level level);
+	int (*set_bias_level_post)(struct snd_soc_card *,
+				   enum snd_soc_bias_level level);
 
 	long pmdown_time;
 
@@ -588,12 +641,35 @@
 	struct snd_soc_pcm_runtime *rtd;
 	int num_rtd;
 
+	/* optional codec specific configuration */
+	struct snd_soc_codec_conf *codec_conf;
+	int num_configs;
+
+	/*
+	 * optional auxiliary devices such as amplifiers or codecs with DAI
+	 * link unused
+	 */
+	struct snd_soc_aux_dev *aux_dev;
+	int num_aux_devs;
+	struct snd_soc_pcm_runtime *rtd_aux;
+	int num_aux_rtd;
+
 	struct work_struct deferred_resume_work;
 
 	/* lists of probed devices belonging to this card */
 	struct list_head codec_dev_list;
 	struct list_head platform_dev_list;
 	struct list_head dai_dev_list;
+
+	struct list_head widgets;
+	struct list_head paths;
+	struct list_head dapm_list;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_card_root;
+	struct dentry *debugfs_pop_time;
+#endif
+	u32 pop_time;
 };
 
 /* SoC machine DAI configuration, glues a codec and cpu DAI together */
@@ -639,17 +715,9 @@
 };
 
 /* codec IO */
-static inline unsigned int snd_soc_read(struct snd_soc_codec *codec,
-					unsigned int reg)
-{
-	return codec->driver->read(codec, reg);
-}
-
-static inline unsigned int snd_soc_write(struct snd_soc_codec *codec,
-					 unsigned int reg, unsigned int val)
-{
-	return codec->driver->write(codec, reg, val);
-}
+unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
+unsigned int snd_soc_write(struct snd_soc_codec *codec,
+			   unsigned int reg, unsigned int val);
 
 /* device driver data */
 
diff --git a/include/sound/wm8903.h b/include/sound/wm8903.h
index b4a0db23..1eeebd5 100644
--- a/include/sound/wm8903.h
+++ b/include/sound/wm8903.h
@@ -17,13 +17,9 @@
 /*
  * R6 (0x06) - Mic Bias Control 0
  */
-#define WM8903_MICDET_HYST_ENA                  0x0080  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_MASK             0x0080  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_SHIFT                 7  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_WIDTH                 1  /* MICDET_HYST_ENA */
-#define WM8903_MICDET_THR_MASK                  0x0070  /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_SHIFT                      4  /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_WIDTH                      3  /* MICDET_THR - [6:4] */
+#define WM8903_MICDET_THR_MASK                  0x0030  /* MICDET_THR - [5:4] */
+#define WM8903_MICDET_THR_SHIFT                      4  /* MICDET_THR - [5:4] */
+#define WM8903_MICDET_THR_WIDTH                      2  /* MICDET_THR - [5:4] */
 #define WM8903_MICSHORT_THR_MASK                0x000C  /* MICSHORT_THR - [3:2] */
 #define WM8903_MICSHORT_THR_SHIFT                    2  /* MICSHORT_THR - [3:2] */
 #define WM8903_MICSHORT_THR_WIDTH                    2  /* MICSHORT_THR - [3:2] */
diff --git a/include/target/configfs_macros.h b/include/target/configfs_macros.h
new file mode 100644
index 0000000..7fe7460
--- /dev/null
+++ b/include/target/configfs_macros.h
@@ -0,0 +1,147 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * configfs_macros.h - extends macros for configfs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * 	sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * Based on kobject.h:
+ *      Copyright (c) 2002-2003	Patrick Mochel
+ *      Copyright (c) 2002-2003	Open Source Development Labs
+ *
+ * configfs Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * Added CONFIGFS_EATTR() macros from original configfs.h macros
+ * Copright (C) 2008-2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * Please read Documentation/filesystems/configfs.txt before using the
+ * configfs interface, ESPECIALLY the parts about reference counts and
+ * item destructors.
+ */
+
+#ifndef _CONFIGFS_MACROS_H_
+#define _CONFIGFS_MACROS_H_
+
+#include <linux/configfs.h>
+
+/*
+ * Users often need to create attribute structures for their configurable
+ * attributes, containing a configfs_attribute member and function pointers
+ * for the show() and store() operations on that attribute. If they don't
+ * need anything else on the extended attribute structure, they can use
+ * this macro to define it.  The argument _name isends up as
+ * 'struct _name_attribute, as well as names of to CONFIGFS_ATTR_OPS() below.
+ * The argument _item is the name of the structure containing the
+ * struct config_item or struct config_group structure members
+ */
+#define CONFIGFS_EATTR_STRUCT(_name, _item)				\
+struct _name##_attribute {						\
+	struct configfs_attribute attr;					\
+	ssize_t (*show)(struct _item *, char *);			\
+	ssize_t (*store)(struct _item *, const char *, size_t);		\
+}
+
+/*
+ * With the extended attribute structure, users can use this macro
+ * (similar to sysfs' __ATTR) to make defining attributes easier.
+ * An example:
+ * #define MYITEM_EATTR(_name, _mode, _show, _store)	\
+ * struct myitem_attribute childless_attr_##_name =	\
+ *         __CONFIGFS_EATTR(_name, _mode, _show, _store)
+ */
+#define __CONFIGFS_EATTR(_name, _mode, _show, _store)			\
+{									\
+	.attr	= {							\
+			.ca_name = __stringify(_name),			\
+			.ca_mode = _mode,				\
+			.ca_owner = THIS_MODULE,			\
+	},								\
+	.show	= _show,						\
+	.store	= _store,						\
+}
+/* Here is a readonly version, only requiring a show() operation */
+#define __CONFIGFS_EATTR_RO(_name, _show)				\
+{									\
+	.attr	= {							\
+			.ca_name = __stringify(_name),			\
+			.ca_mode = 0444,				\
+			.ca_owner = THIS_MODULE,			\
+	},								\
+	.show	= _show,						\
+}
+
+/*
+ * With these extended attributes, the simple show_attribute() and
+ * store_attribute() operations need to call the show() and store() of the
+ * attributes.  This is a common pattern, so we provide a macro to define
+ * them.  The argument _name is the name of the attribute defined by
+ * CONFIGFS_ATTR_STRUCT(). The argument _item is the name of the structure
+ * containing the struct config_item or struct config_group structure member.
+ * The argument _item_member is the actual name of the struct config_* struct
+ * in your _item structure.  Meaning  my_structure->some_config_group.
+ *		                      ^^_item^^^^^  ^^_item_member^^^
+ * This macro expects the attributes to be named "struct <name>_attribute".
+ */
+#define CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member)		\
+static struct _item *to_##_name(struct config_item *ci)			\
+{									\
+	return (ci) ? container_of(to_config_group(ci), struct _item,	\
+		_item_member) : NULL;					\
+}
+
+#define CONFIGFS_EATTR_OPS_SHOW(_name, _item)				\
+static ssize_t _name##_attr_show(struct config_item *item,		\
+				 struct configfs_attribute *attr,	\
+				 char *page)				\
+{									\
+	struct _item *_item = to_##_name(item);				\
+	struct _name##_attribute * _name##_attr =			\
+		container_of(attr, struct _name##_attribute, attr);	\
+	ssize_t ret = 0;						\
+									\
+	if (_name##_attr->show)						\
+		ret = _name##_attr->show(_item, page);			\
+	return ret;							\
+}
+
+#define CONFIGFS_EATTR_OPS_STORE(_name, _item)				\
+static ssize_t _name##_attr_store(struct config_item *item,		\
+				  struct configfs_attribute *attr,	\
+				  const char *page, size_t count)	\
+{									\
+	struct _item *_item = to_##_name(item);				\
+	struct _name##_attribute * _name##_attr =			\
+		container_of(attr, struct _name##_attribute, attr);	\
+	ssize_t ret = -EINVAL;						\
+									\
+	if (_name##_attr->store)					\
+		ret = _name##_attr->store(_item, page, count);		\
+	return ret;							\
+}
+
+#define CONFIGFS_EATTR_OPS(_name, _item, _item_member)			\
+	CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member);		\
+	CONFIGFS_EATTR_OPS_SHOW(_name, _item);				\
+	CONFIGFS_EATTR_OPS_STORE(_name, _item);
+
+#define CONFIGFS_EATTR_OPS_RO(_name, _item, _item_member)		\
+	CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member);		\
+	CONFIGFS_EATTR_OPS_SHOW(_name, _item);
+
+#endif /* _CONFIGFS_MACROS_H_ */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
new file mode 100644
index 0000000..0828b6c
--- /dev/null
+++ b/include/target/target_core_base.h
@@ -0,0 +1,953 @@
+#ifndef TARGET_CORE_BASE_H
+#define TARGET_CORE_BASE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_cmnd.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+
+#define TARGET_CORE_MOD_VERSION		"v4.0.0-rc6"
+#define SHUTDOWN_SIGS	(sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
+
+/* Used by transport_generic_allocate_iovecs() */
+#define TRANSPORT_IOV_DATA_BUFFER		5
+/* Maximum Number of LUNs per Target Portal Group */
+#define TRANSPORT_MAX_LUNS_PER_TPG		256
+/*
+ * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
+ *
+ * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
+ * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
+ * 16-byte CDBs by default and require an extra allocation for
+ * 32-byte CDBs to becasue of legacy issues.
+ *
+ * Within TCM Core there are no such legacy limitiations, so we go ahead
+ * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
+ * within all TCM Core and subsystem plugin code.
+ */
+#define TCM_MAX_COMMAND_SIZE			32
+/*
+ * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
+ * defined 96, but the real limit is 252 (or 260 including the header)
+ */
+#define TRANSPORT_SENSE_BUFFER			SCSI_SENSE_BUFFERSIZE
+/* Used by transport_send_check_condition_and_sense() */
+#define SPC_SENSE_KEY_OFFSET			2
+#define SPC_ASC_KEY_OFFSET			12
+#define SPC_ASCQ_KEY_OFFSET			13
+#define TRANSPORT_IQN_LEN			224
+/* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
+#define LU_GROUP_NAME_BUF			256
+/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
+#define TG_PT_GROUP_NAME_BUF			256
+/* Used to parse VPD into struct t10_vpd */
+#define VPD_TMP_BUF_SIZE			128
+/* Used by transport_generic_cmd_sequencer() */
+#define READ_BLOCK_LEN          		6
+#define READ_CAP_LEN            		8
+#define READ_POSITION_LEN       		20
+#define INQUIRY_LEN				36
+/* Used by transport_get_inquiry_vpd_serial() */
+#define INQUIRY_VPD_SERIAL_LEN			254
+/* Used by transport_get_inquiry_vpd_device_ident() */
+#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN	254
+
+/* struct se_hba->hba_flags */
+enum hba_flags_table {
+	HBA_FLAGS_INTERNAL_USE	= 0x01,
+	HBA_FLAGS_PSCSI_MODE	= 0x02,
+};
+
+/* struct se_lun->lun_status */
+enum transport_lun_status_table {
+	TRANSPORT_LUN_STATUS_FREE = 0,
+	TRANSPORT_LUN_STATUS_ACTIVE = 1,
+};
+
+/* struct se_portal_group->se_tpg_type */
+enum transport_tpg_type_table {
+	TRANSPORT_TPG_TYPE_NORMAL = 0,
+	TRANSPORT_TPG_TYPE_DISCOVERY = 1,
+};
+
+/* Used for generate timer flags */
+enum timer_flags_table {
+	TF_RUNNING	= 0x01,
+	TF_STOP		= 0x02,
+};
+
+/* Special transport agnostic struct se_cmd->t_states */
+enum transport_state_table {
+	TRANSPORT_NO_STATE	= 0,
+	TRANSPORT_NEW_CMD	= 1,
+	TRANSPORT_DEFERRED_CMD	= 2,
+	TRANSPORT_WRITE_PENDING	= 3,
+	TRANSPORT_PROCESS_WRITE	= 4,
+	TRANSPORT_PROCESSING	= 5,
+	TRANSPORT_COMPLETE_OK	= 6,
+	TRANSPORT_COMPLETE_FAILURE = 7,
+	TRANSPORT_COMPLETE_TIMEOUT = 8,
+	TRANSPORT_PROCESS_TMR	= 9,
+	TRANSPORT_TMR_COMPLETE	= 10,
+	TRANSPORT_ISTATE_PROCESSING = 11,
+	TRANSPORT_ISTATE_PROCESSED = 12,
+	TRANSPORT_KILL		= 13,
+	TRANSPORT_REMOVE	= 14,
+	TRANSPORT_FREE		= 15,
+	TRANSPORT_NEW_CMD_MAP	= 16,
+};
+
+/* Used for struct se_cmd->se_cmd_flags */
+enum se_cmd_flags_table {
+	SCF_SUPPORTED_SAM_OPCODE	= 0x00000001,
+	SCF_TRANSPORT_TASK_SENSE	= 0x00000002,
+	SCF_EMULATED_TASK_SENSE		= 0x00000004,
+	SCF_SCSI_DATA_SG_IO_CDB		= 0x00000008,
+	SCF_SCSI_CONTROL_SG_IO_CDB	= 0x00000010,
+	SCF_SCSI_CONTROL_NONSG_IO_CDB	= 0x00000020,
+	SCF_SCSI_NON_DATA_CDB		= 0x00000040,
+	SCF_SCSI_CDB_EXCEPTION		= 0x00000080,
+	SCF_SCSI_RESERVATION_CONFLICT	= 0x00000100,
+	SCF_CMD_PASSTHROUGH_NOALLOC	= 0x00000200,
+	SCF_SE_CMD_FAILED		= 0x00000400,
+	SCF_SE_LUN_CMD			= 0x00000800,
+	SCF_SE_ALLOW_EOO		= 0x00001000,
+	SCF_SE_DISABLE_ONLINE_CHECK	= 0x00002000,
+	SCF_SENT_CHECK_CONDITION	= 0x00004000,
+	SCF_OVERFLOW_BIT		= 0x00008000,
+	SCF_UNDERFLOW_BIT		= 0x00010000,
+	SCF_SENT_DELAYED_TAS		= 0x00020000,
+	SCF_ALUA_NON_OPTIMIZED		= 0x00040000,
+	SCF_DELAYED_CMD_FROM_SAM_ATTR	= 0x00080000,
+	SCF_PASSTHROUGH_SG_TO_MEM	= 0x00100000,
+	SCF_PASSTHROUGH_CONTIG_TO_SG	= 0x00200000,
+	SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
+	SCF_EMULATE_SYNC_CACHE		= 0x00800000,
+	SCF_EMULATE_CDB_ASYNC		= 0x01000000,
+	SCF_EMULATE_SYNC_UNMAP		= 0x02000000
+};
+
+/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
+enum transport_lunflags_table {
+	TRANSPORT_LUNFLAGS_NO_ACCESS		= 0x00,
+	TRANSPORT_LUNFLAGS_INITIATOR_ACCESS	= 0x01,
+	TRANSPORT_LUNFLAGS_READ_ONLY		= 0x02,
+	TRANSPORT_LUNFLAGS_READ_WRITE		= 0x04,
+};
+
+/* struct se_device->dev_status */
+enum transport_device_status_table {
+	TRANSPORT_DEVICE_ACTIVATED		= 0x01,
+	TRANSPORT_DEVICE_DEACTIVATED		= 0x02,
+	TRANSPORT_DEVICE_QUEUE_FULL		= 0x04,
+	TRANSPORT_DEVICE_SHUTDOWN		= 0x08,
+	TRANSPORT_DEVICE_OFFLINE_ACTIVATED	= 0x10,
+	TRANSPORT_DEVICE_OFFLINE_DEACTIVATED	= 0x20,
+};
+
+/*
+ * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
+ * to signal which ASC/ASCQ sense payload should be built.
+ */
+enum tcm_sense_reason_table {
+	TCM_NON_EXISTENT_LUN			= 0x01,
+	TCM_UNSUPPORTED_SCSI_OPCODE		= 0x02,
+	TCM_INCORRECT_AMOUNT_OF_DATA		= 0x03,
+	TCM_UNEXPECTED_UNSOLICITED_DATA		= 0x04,
+	TCM_SERVICE_CRC_ERROR			= 0x05,
+	TCM_SNACK_REJECTED			= 0x06,
+	TCM_SECTOR_COUNT_TOO_MANY		= 0x07,
+	TCM_INVALID_CDB_FIELD			= 0x08,
+	TCM_INVALID_PARAMETER_LIST		= 0x09,
+	TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE	= 0x0a,
+	TCM_UNKNOWN_MODE_PAGE			= 0x0b,
+	TCM_WRITE_PROTECTED			= 0x0c,
+	TCM_CHECK_CONDITION_ABORT_CMD		= 0x0d,
+	TCM_CHECK_CONDITION_UNIT_ATTENTION	= 0x0e,
+	TCM_CHECK_CONDITION_NOT_READY		= 0x0f,
+};
+
+struct se_obj {
+	atomic_t obj_access_count;
+} ____cacheline_aligned;
+
+/*
+ * Used by TCM Core internally to signal if ALUA emulation is enabled or
+ * disabled, or running in with TCM/pSCSI passthrough mode
+ */
+typedef enum {
+	SPC_ALUA_PASSTHROUGH,
+	SPC2_ALUA_DISABLED,
+	SPC3_ALUA_EMULATED
+} t10_alua_index_t;
+
+/*
+ * Used by TCM Core internally to signal if SAM Task Attribute emulation
+ * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
+ */
+typedef enum {
+	SAM_TASK_ATTR_PASSTHROUGH,
+	SAM_TASK_ATTR_UNTAGGED,
+	SAM_TASK_ATTR_EMULATED
+} t10_task_attr_index_t;
+
+/*
+ * Used for target SCSI statistics
+ */
+typedef enum {
+	SCSI_INST_INDEX,
+	SCSI_DEVICE_INDEX,
+	SCSI_AUTH_INTR_INDEX,
+	SCSI_INDEX_TYPE_MAX
+} scsi_index_t;
+
+struct scsi_index_table {
+	spinlock_t	lock;
+	u32		scsi_mib_index[SCSI_INDEX_TYPE_MAX];
+} ____cacheline_aligned;
+
+struct se_cmd;
+
+struct t10_alua {
+	t10_alua_index_t alua_type;
+	/* ALUA Target Port Group ID */
+	u16	alua_tg_pt_gps_counter;
+	u32	alua_tg_pt_gps_count;
+	spinlock_t tg_pt_gps_lock;
+	struct se_subsystem_dev *t10_sub_dev;
+	/* Used for default ALUA Target Port Group */
+	struct t10_alua_tg_pt_gp *default_tg_pt_gp;
+	/* Used for default ALUA Target Port Group ConfigFS group */
+	struct config_group alua_tg_pt_gps_group;
+	int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
+	struct list_head tg_pt_gps_list;
+} ____cacheline_aligned;
+
+struct t10_alua_lu_gp {
+	u16	lu_gp_id;
+	int	lu_gp_valid_id;
+	u32	lu_gp_members;
+	atomic_t lu_gp_shutdown;
+	atomic_t lu_gp_ref_cnt;
+	spinlock_t lu_gp_lock;
+	struct config_group lu_gp_group;
+	struct list_head lu_gp_list;
+	struct list_head lu_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_alua_lu_gp_member {
+	int lu_gp_assoc:1;
+	atomic_t lu_gp_mem_ref_cnt;
+	spinlock_t lu_gp_mem_lock;
+	struct t10_alua_lu_gp *lu_gp;
+	struct se_device *lu_gp_mem_dev;
+	struct list_head lu_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_alua_tg_pt_gp {
+	u16	tg_pt_gp_id;
+	int	tg_pt_gp_valid_id;
+	int	tg_pt_gp_alua_access_status;
+	int	tg_pt_gp_alua_access_type;
+	int	tg_pt_gp_nonop_delay_msecs;
+	int	tg_pt_gp_trans_delay_msecs;
+	int	tg_pt_gp_pref;
+	int	tg_pt_gp_write_metadata;
+	/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
+#define ALUA_MD_BUF_LEN				1024
+	u32	tg_pt_gp_md_buf_len;
+	u32	tg_pt_gp_members;
+	atomic_t tg_pt_gp_alua_access_state;
+	atomic_t tg_pt_gp_ref_cnt;
+	spinlock_t tg_pt_gp_lock;
+	struct mutex tg_pt_gp_md_mutex;
+	struct se_subsystem_dev *tg_pt_gp_su_dev;
+	struct config_group tg_pt_gp_group;
+	struct list_head tg_pt_gp_list;
+	struct list_head tg_pt_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_alua_tg_pt_gp_member {
+	int tg_pt_gp_assoc:1;
+	atomic_t tg_pt_gp_mem_ref_cnt;
+	spinlock_t tg_pt_gp_mem_lock;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_port *tg_pt;
+	struct list_head tg_pt_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_vpd {
+	unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
+	int protocol_identifier_set;
+	u32 protocol_identifier;
+	u32 device_identifier_code_set;
+	u32 association;
+	u32 device_identifier_type;
+	struct list_head vpd_list;
+} ____cacheline_aligned;
+
+struct t10_wwn {
+	unsigned char vendor[8];
+	unsigned char model[16];
+	unsigned char revision[4];
+	unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN];
+	spinlock_t t10_vpd_lock;
+	struct se_subsystem_dev *t10_sub_dev;
+	struct config_group t10_wwn_group;
+	struct list_head t10_vpd_list;
+} ____cacheline_aligned;
+
+
+/*
+ * Used by TCM Core internally to signal if >= SPC-3 peristent reservations
+ * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
+ * mode
+ */
+typedef enum {
+	SPC_PASSTHROUGH,
+	SPC2_RESERVATIONS,
+	SPC3_PERSISTENT_RESERVATIONS
+} t10_reservations_index_t;
+
+struct t10_pr_registration {
+	/* Used for fabrics that contain WWN+ISID */
+#define PR_REG_ISID_LEN				16
+	/* PR_REG_ISID_LEN + ',i,0x' */
+#define PR_REG_ISID_ID_LEN			(PR_REG_ISID_LEN + 5)
+	char pr_reg_isid[PR_REG_ISID_LEN];
+	/* Used during APTPL metadata reading */
+#define PR_APTPL_MAX_IPORT_LEN			256
+	unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
+	/* Used during APTPL metadata reading */
+#define PR_APTPL_MAX_TPORT_LEN			256
+	unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
+	/* For writing out live meta data */
+	unsigned char *pr_aptpl_buf;
+	u16 pr_aptpl_rpti;
+	u16 pr_reg_tpgt;
+	/* Reservation effects all target ports */
+	int pr_reg_all_tg_pt;
+	/* Activate Persistence across Target Power Loss */
+	int pr_reg_aptpl;
+	int pr_res_holder;
+	int pr_res_type;
+	int pr_res_scope;
+	/* Used for fabric initiator WWPNs using a ISID */
+	int isid_present_at_reg:1;
+	u32 pr_res_mapped_lun;
+	u32 pr_aptpl_target_lun;
+	u32 pr_res_generation;
+	u64 pr_reg_bin_isid;
+	u64 pr_res_key;
+	atomic_t pr_res_holders;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_dev_entry *pr_reg_deve;
+	struct se_lun *pr_reg_tg_pt_lun;
+	struct list_head pr_reg_list;
+	struct list_head pr_reg_abort_list;
+	struct list_head pr_reg_aptpl_list;
+	struct list_head pr_reg_atp_list;
+	struct list_head pr_reg_atp_mem_list;
+} ____cacheline_aligned;
+
+/*
+ * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
+ * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
+ * core_setup_reservations()
+ */
+struct t10_reservation_ops {
+	int (*t10_reservation_check)(struct se_cmd *, u32 *);
+	int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
+	int (*t10_pr_register)(struct se_cmd *);
+	int (*t10_pr_clear)(struct se_cmd *);
+};
+
+struct t10_reservation_template {
+	/* Reservation effects all target ports */
+	int pr_all_tg_pt;
+	/* Activate Persistence across Target Power Loss enabled
+	 * for SCSI device */
+	int pr_aptpl_active;
+	/* Used by struct t10_reservation_template->pr_aptpl_buf_len */
+#define PR_APTPL_BUF_LEN			8192
+	u32 pr_aptpl_buf_len;
+	u32 pr_generation;
+	t10_reservations_index_t res_type;
+	spinlock_t registration_lock;
+	spinlock_t aptpl_reg_lock;
+	/*
+	 * This will always be set by one individual I_T Nexus.
+	 * However with all_tg_pt=1, other I_T Nexus from the
+	 * same initiator can access PR reg/res info on a different
+	 * target port.
+	 *
+	 * There is also the 'All Registrants' case, where there is
+	 * a single *pr_res_holder of the reservation, but all
+	 * registrations are considered reservation holders.
+	 */
+	struct se_node_acl *pr_res_holder;
+	struct list_head registration_list;
+	struct list_head aptpl_reg_list;
+	struct t10_reservation_ops pr_ops;
+} ____cacheline_aligned;
+
+struct se_queue_req {
+	int			state;
+	void			*cmd;
+	struct list_head	qr_list;
+} ____cacheline_aligned;
+
+struct se_queue_obj {
+	atomic_t		queue_cnt;
+	spinlock_t		cmd_queue_lock;
+	struct list_head	qobj_list;
+	wait_queue_head_t	thread_wq;
+} ____cacheline_aligned;
+
+/*
+ * Used one per struct se_cmd to hold all extra struct se_task
+ * metadata.  This structure is setup and allocated in
+ * drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
+ */
+struct se_transport_task {
+	unsigned char		*t_task_cdb;
+	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+	unsigned long long	t_task_lba;
+	int			t_tasks_failed;
+	int			t_tasks_fua;
+	int			t_tasks_bidi:1;
+	u32			t_task_cdbs;
+	u32			t_tasks_check;
+	u32			t_tasks_no;
+	u32			t_tasks_sectors;
+	u32			t_tasks_se_num;
+	u32			t_tasks_se_bidi_num;
+	u32			t_tasks_sg_chained_no;
+	atomic_t		t_fe_count;
+	atomic_t		t_se_count;
+	atomic_t		t_task_cdbs_left;
+	atomic_t		t_task_cdbs_ex_left;
+	atomic_t		t_task_cdbs_timeout_left;
+	atomic_t		t_task_cdbs_sent;
+	atomic_t		t_transport_aborted;
+	atomic_t		t_transport_active;
+	atomic_t		t_transport_complete;
+	atomic_t		t_transport_queue_active;
+	atomic_t		t_transport_sent;
+	atomic_t		t_transport_stop;
+	atomic_t		t_transport_timeout;
+	atomic_t		transport_dev_active;
+	atomic_t		transport_lun_active;
+	atomic_t		transport_lun_fe_stop;
+	atomic_t		transport_lun_stop;
+	spinlock_t		t_state_lock;
+	struct completion	t_transport_stop_comp;
+	struct completion	transport_lun_fe_stop_comp;
+	struct completion	transport_lun_stop_comp;
+	struct scatterlist	*t_tasks_sg_chained;
+	struct scatterlist	t_tasks_sg_bounce;
+	void			*t_task_buf;
+	/*
+	 * Used for pre-registered fabric SGL passthrough WRITE and READ
+	 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
+	 * and other HW target mode fabric modules.
+	 */
+	struct scatterlist	*t_task_pt_sgl;
+	struct list_head	*t_mem_list;
+	/* Used for BIDI READ */
+	struct list_head	*t_mem_bidi_list;
+	struct list_head	t_task_list;
+} ____cacheline_aligned;
+
+struct se_task {
+	unsigned char	task_sense;
+	struct scatterlist *task_sg;
+	struct scatterlist *task_sg_bidi;
+	u8		task_scsi_status;
+	u8		task_flags;
+	int		task_error_status;
+	int		task_state_flags;
+	int		task_padded_sg:1;
+	unsigned long long	task_lba;
+	u32		task_no;
+	u32		task_sectors;
+	u32		task_size;
+	u32		task_sg_num;
+	u32		task_sg_offset;
+	enum dma_data_direction	task_data_direction;
+	struct se_cmd *task_se_cmd;
+	struct se_device	*se_dev;
+	struct completion	task_stop_comp;
+	atomic_t	task_active;
+	atomic_t	task_execute_queue;
+	atomic_t	task_timeout;
+	atomic_t	task_sent;
+	atomic_t	task_stop;
+	atomic_t	task_state_active;
+	struct timer_list	task_timer;
+	struct se_device *se_obj_ptr;
+	struct list_head t_list;
+	struct list_head t_execute_list;
+	struct list_head t_state_list;
+} ____cacheline_aligned;
+
+#define TASK_CMD(task)	((struct se_cmd *)task->task_se_cmd)
+#define TASK_DEV(task)	((struct se_device *)task->se_dev)
+
+struct se_cmd {
+	/* SAM response code being sent to initiator */
+	u8			scsi_status;
+	u8			scsi_asc;
+	u8			scsi_ascq;
+	u8			scsi_sense_reason;
+	u16			scsi_sense_length;
+	/* Delay for ALUA Active/NonOptimized state access in milliseconds */
+	int			alua_nonop_delay;
+	/* See include/linux/dma-mapping.h */
+	enum dma_data_direction	data_direction;
+	/* For SAM Task Attribute */
+	int			sam_task_attr;
+	/* Transport protocol dependent state, see transport_state_table */
+	enum transport_state_table t_state;
+	/* Transport protocol dependent state for out of order CmdSNs */
+	int			deferred_t_state;
+	/* Transport specific error status */
+	int			transport_error_status;
+	/* See se_cmd_flags_table */
+	u32			se_cmd_flags;
+	u32			se_ordered_id;
+	/* Total size in bytes associated with command */
+	u32			data_length;
+	/* SCSI Presented Data Transfer Length */
+	u32			cmd_spdtl;
+	u32			residual_count;
+	u32			orig_fe_lun;
+	/* Persistent Reservation key */
+	u64			pr_res_key;
+	atomic_t                transport_sent;
+	/* Used for sense data */
+	void			*sense_buffer;
+	struct list_head	se_delayed_list;
+	struct list_head	se_ordered_list;
+	struct list_head	se_lun_list;
+	struct se_device      *se_dev;
+	struct se_dev_entry   *se_deve;
+	struct se_device	*se_obj_ptr;
+	struct se_device	*se_orig_obj_ptr;
+	struct se_lun		*se_lun;
+	/* Only used for internal passthrough and legacy TCM fabric modules */
+	struct se_session	*se_sess;
+	struct se_tmr_req	*se_tmr_req;
+	/* t_task is setup to t_task_backstore in transport_init_se_cmd() */
+	struct se_transport_task *t_task;
+	struct se_transport_task t_task_backstore;
+	struct target_core_fabric_ops *se_tfo;
+	int (*transport_emulate_cdb)(struct se_cmd *);
+	void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
+	void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
+	void (*transport_complete_callback)(struct se_cmd *);
+} ____cacheline_aligned;
+
+#define T_TASK(cmd)     ((struct se_transport_task *)(cmd->t_task))
+#define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo)
+
+struct se_tmr_req {
+	/* Task Management function to be preformed */
+	u8			function;
+	/* Task Management response to send */
+	u8			response;
+	int			call_transport;
+	/* Reference to ITT that Task Mgmt should be preformed */
+	u32			ref_task_tag;
+	/* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
+	u64			ref_task_lun;
+	void 			*fabric_tmr_ptr;
+	struct se_cmd		*task_cmd;
+	struct se_cmd		*ref_cmd;
+	struct se_device	*tmr_dev;
+	struct se_lun		*tmr_lun;
+	struct list_head	tmr_list;
+} ____cacheline_aligned;
+
+struct se_ua {
+	u8			ua_asc;
+	u8			ua_ascq;
+	struct se_node_acl	*ua_nacl;
+	struct list_head	ua_dev_list;
+	struct list_head	ua_nacl_list;
+} ____cacheline_aligned;
+
+struct se_node_acl {
+	char			initiatorname[TRANSPORT_IQN_LEN];
+	/* Used to signal demo mode created ACL, disabled by default */
+	int			dynamic_node_acl:1;
+	u32			queue_depth;
+	u32			acl_index;
+	u64			num_cmds;
+	u64			read_bytes;
+	u64			write_bytes;
+	spinlock_t		stats_lock;
+	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
+	atomic_t		acl_pr_ref_count;
+	struct se_dev_entry	*device_list;
+	struct se_session	*nacl_sess;
+	struct se_portal_group *se_tpg;
+	spinlock_t		device_list_lock;
+	spinlock_t		nacl_sess_lock;
+	struct config_group	acl_group;
+	struct config_group	acl_attrib_group;
+	struct config_group	acl_auth_group;
+	struct config_group	acl_param_group;
+	struct config_group	*acl_default_groups[4];
+	struct list_head	acl_list;
+	struct list_head	acl_sess_list;
+} ____cacheline_aligned;
+
+struct se_session {
+	u64			sess_bin_isid;
+	struct se_node_acl	*se_node_acl;
+	struct se_portal_group *se_tpg;
+	void			*fabric_sess_ptr;
+	struct list_head	sess_list;
+	struct list_head	sess_acl_list;
+} ____cacheline_aligned;
+
+#define SE_SESS(cmd)		((struct se_session *)(cmd)->se_sess)
+#define SE_NODE_ACL(sess)	((struct se_node_acl *)(sess)->se_node_acl)
+
+struct se_device;
+struct se_transform_info;
+struct scatterlist;
+
+struct se_lun_acl {
+	char			initiatorname[TRANSPORT_IQN_LEN];
+	u32			mapped_lun;
+	struct se_node_acl	*se_lun_nacl;
+	struct se_lun		*se_lun;
+	struct list_head	lacl_list;
+	struct config_group	se_lun_group;
+}  ____cacheline_aligned;
+
+struct se_dev_entry {
+	int			def_pr_registered:1;
+	/* See transport_lunflags_table */
+	u32			lun_flags;
+	u32			deve_cmds;
+	u32			mapped_lun;
+	u32			average_bytes;
+	u32			last_byte_count;
+	u32			total_cmds;
+	u32			total_bytes;
+	u64			pr_res_key;
+	u64			creation_time;
+	u32			attach_count;
+	u64			read_bytes;
+	u64			write_bytes;
+	atomic_t		ua_count;
+	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
+	atomic_t		pr_ref_count;
+	struct se_lun_acl	*se_lun_acl;
+	spinlock_t		ua_lock;
+	struct se_lun		*se_lun;
+	struct list_head	alua_port_list;
+	struct list_head	ua_list;
+}  ____cacheline_aligned;
+
+struct se_dev_limits {
+	/* Max supported HW queue depth */
+	u32		hw_queue_depth;
+	/* Max supported virtual queue depth */
+	u32		queue_depth;
+	/* From include/linux/blkdev.h for the other HW/SW limits. */
+	struct queue_limits limits;
+} ____cacheline_aligned;
+
+struct se_dev_attrib {
+	int		emulate_dpo;
+	int		emulate_fua_write;
+	int		emulate_fua_read;
+	int		emulate_write_cache;
+	int		emulate_ua_intlck_ctrl;
+	int		emulate_tas;
+	int		emulate_tpu;
+	int		emulate_tpws;
+	int		emulate_reservations;
+	int		emulate_alua;
+	int		enforce_pr_isids;
+	u32		hw_block_size;
+	u32		block_size;
+	u32		hw_max_sectors;
+	u32		max_sectors;
+	u32		optimal_sectors;
+	u32		hw_queue_depth;
+	u32		queue_depth;
+	u32		task_timeout;
+	u32		max_unmap_lba_count;
+	u32		max_unmap_block_desc_count;
+	u32		unmap_granularity;
+	u32		unmap_granularity_alignment;
+	struct se_subsystem_dev *da_sub_dev;
+	struct config_group da_group;
+} ____cacheline_aligned;
+
+struct se_subsystem_dev {
+/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
+#define SE_DEV_ALIAS_LEN		512
+	unsigned char	se_dev_alias[SE_DEV_ALIAS_LEN];
+/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
+#define SE_UDEV_PATH_LEN		512
+	unsigned char	se_dev_udev_path[SE_UDEV_PATH_LEN];
+	u32		su_dev_flags;
+	struct se_hba *se_dev_hba;
+	struct se_device *se_dev_ptr;
+	struct se_dev_attrib se_dev_attrib;
+	/* T10 Asymmetric Logical Unit Assignment for Target Ports */
+	struct t10_alua	t10_alua;
+	/* T10 Inquiry and VPD WWN Information */
+	struct t10_wwn	t10_wwn;
+	/* T10 SPC-2 + SPC-3 Reservations */
+	struct t10_reservation_template t10_reservation;
+	spinlock_t      se_dev_lock;
+	void            *se_dev_su_ptr;
+	struct list_head g_se_dev_list;
+	struct config_group se_dev_group;
+	/* For T10 Reservations */
+	struct config_group se_dev_pr_group;
+} ____cacheline_aligned;
+
+#define T10_ALUA(su_dev)	(&(su_dev)->t10_alua)
+#define T10_RES(su_dev)		(&(su_dev)->t10_reservation)
+#define T10_PR_OPS(su_dev)	(&(su_dev)->t10_reservation.pr_ops)
+
+struct se_device {
+	/* Set to 1 if thread is NOT sleeping on thread_sem */
+	u8			thread_active;
+	u8			dev_status_timer_flags;
+	/* RELATIVE TARGET PORT IDENTIFER Counter */
+	u16			dev_rpti_counter;
+	/* Used for SAM Task Attribute ordering */
+	u32			dev_cur_ordered_id;
+	u32			dev_flags;
+	u32			dev_port_count;
+	/* See transport_device_status_table */
+	u32			dev_status;
+	u32			dev_tcq_window_closed;
+	/* Physical device queue depth */
+	u32			queue_depth;
+	/* Used for SPC-2 reservations enforce of ISIDs */
+	u64			dev_res_bin_isid;
+	t10_task_attr_index_t	dev_task_attr_type;
+	/* Pointer to transport specific device structure */
+	void 			*dev_ptr;
+	u32			dev_index;
+	u64			creation_time;
+	u32			num_resets;
+	u64			num_cmds;
+	u64			read_bytes;
+	u64			write_bytes;
+	spinlock_t		stats_lock;
+	/* Active commands on this virtual SE device */
+	atomic_t		active_cmds;
+	atomic_t		simple_cmds;
+	atomic_t		depth_left;
+	atomic_t		dev_ordered_id;
+	atomic_t		dev_tur_active;
+	atomic_t		execute_tasks;
+	atomic_t		dev_status_thr_count;
+	atomic_t		dev_hoq_count;
+	atomic_t		dev_ordered_sync;
+	struct se_obj		dev_obj;
+	struct se_obj		dev_access_obj;
+	struct se_obj		dev_export_obj;
+	struct se_queue_obj	*dev_queue_obj;
+	struct se_queue_obj	*dev_status_queue_obj;
+	spinlock_t		delayed_cmd_lock;
+	spinlock_t		ordered_cmd_lock;
+	spinlock_t		execute_task_lock;
+	spinlock_t		state_task_lock;
+	spinlock_t		dev_alua_lock;
+	spinlock_t		dev_reservation_lock;
+	spinlock_t		dev_state_lock;
+	spinlock_t		dev_status_lock;
+	spinlock_t		dev_status_thr_lock;
+	spinlock_t		se_port_lock;
+	spinlock_t		se_tmr_lock;
+	/* Used for legacy SPC-2 reservationsa */
+	struct se_node_acl	*dev_reserved_node_acl;
+	/* Used for ALUA Logical Unit Group membership */
+	struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
+	/* Used for SPC-3 Persistent Reservations */
+	struct t10_pr_registration *dev_pr_res_holder;
+	struct list_head	dev_sep_list;
+	struct list_head	dev_tmr_list;
+	struct timer_list	dev_status_timer;
+	/* Pointer to descriptor for processing thread */
+	struct task_struct	*process_thread;
+	pid_t			process_thread_pid;
+	struct task_struct		*dev_mgmt_thread;
+	struct list_head	delayed_cmd_list;
+	struct list_head	ordered_cmd_list;
+	struct list_head	execute_task_list;
+	struct list_head	state_task_list;
+	/* Pointer to associated SE HBA */
+	struct se_hba		*se_hba;
+	struct se_subsystem_dev *se_sub_dev;
+	/* Pointer to template of function pointers for transport */
+	struct se_subsystem_api *transport;
+	/* Linked list for struct se_hba struct se_device list */
+	struct list_head	dev_list;
+	/* Linked list for struct se_global->g_se_dev_list */
+	struct list_head	g_se_dev_list;
+}  ____cacheline_aligned;
+
+#define SE_DEV(cmd)		((struct se_device *)(cmd)->se_lun->lun_se_dev)
+#define SU_DEV(dev)		((struct se_subsystem_dev *)(dev)->se_sub_dev)
+#define DEV_ATTRIB(dev)		(&(dev)->se_sub_dev->se_dev_attrib)
+#define DEV_T10_WWN(dev)	(&(dev)->se_sub_dev->t10_wwn)
+
+struct se_hba {
+	u16			hba_tpgt;
+	u32			hba_id;
+	/* See hba_flags_table */
+	u32			hba_flags;
+	/* Virtual iSCSI devices attached. */
+	u32			dev_count;
+	u32			hba_index;
+	atomic_t		load_balance_queue;
+	atomic_t		left_queue_depth;
+	/* Maximum queue depth the HBA can handle. */
+	atomic_t		max_queue_depth;
+	/* Pointer to transport specific host structure. */
+	void			*hba_ptr;
+	/* Linked list for struct se_device */
+	struct list_head	hba_dev_list;
+	struct list_head	hba_list;
+	spinlock_t		device_lock;
+	spinlock_t		hba_queue_lock;
+	struct config_group	hba_group;
+	struct mutex		hba_access_mutex;
+	struct se_subsystem_api *transport;
+}  ____cacheline_aligned;
+
+#define SE_HBA(d)		((struct se_hba *)(d)->se_hba)
+
+struct se_lun {
+	/* See transport_lun_status_table */
+	enum transport_lun_status_table lun_status;
+	u32			lun_access;
+	u32			lun_flags;
+	u32			unpacked_lun;
+	atomic_t		lun_acl_count;
+	spinlock_t		lun_acl_lock;
+	spinlock_t		lun_cmd_lock;
+	spinlock_t		lun_sep_lock;
+	struct completion	lun_shutdown_comp;
+	struct list_head	lun_cmd_list;
+	struct list_head	lun_acl_list;
+	struct se_device	*lun_se_dev;
+	struct config_group	lun_group;
+	struct se_port	*lun_sep;
+} ____cacheline_aligned;
+
+#define SE_LUN(c)		((struct se_lun *)(c)->se_lun)
+
+struct scsi_port_stats {
+       u64     cmd_pdus;
+       u64     tx_data_octets;
+       u64     rx_data_octets;
+} ____cacheline_aligned;
+
+struct se_port {
+	/* RELATIVE TARGET PORT IDENTIFER */
+	u16		sep_rtpi;
+	int		sep_tg_pt_secondary_stat;
+	int		sep_tg_pt_secondary_write_md;
+	u32		sep_index;
+	struct scsi_port_stats sep_stats;
+	/* Used for ALUA Target Port Groups membership */
+	atomic_t	sep_tg_pt_gp_active;
+	atomic_t	sep_tg_pt_secondary_offline;
+	/* Used for PR ALL_TG_PT=1 */
+	atomic_t	sep_tg_pt_ref_cnt;
+	spinlock_t	sep_alua_lock;
+	struct mutex	sep_tg_pt_md_mutex;
+	struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
+	struct se_lun *sep_lun;
+	struct se_portal_group *sep_tpg;
+	struct list_head sep_alua_list;
+	struct list_head sep_list;
+} ____cacheline_aligned;
+
+struct se_tpg_np {
+	struct se_portal_group *tpg_np_parent;
+	struct config_group	tpg_np_group;
+} ____cacheline_aligned;
+
+struct se_portal_group {
+	/* Type of target portal group, see transport_tpg_type_table */
+	enum transport_tpg_type_table se_tpg_type;
+	/* Number of ACLed Initiator Nodes for this TPG */
+	u32			num_node_acls;
+	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
+	atomic_t		tpg_pr_ref_count;
+	/* Spinlock for adding/removing ACLed Nodes */
+	spinlock_t		acl_node_lock;
+	/* Spinlock for adding/removing sessions */
+	spinlock_t		session_lock;
+	spinlock_t		tpg_lun_lock;
+	/* Pointer to $FABRIC_MOD portal group */
+	void			*se_tpg_fabric_ptr;
+	struct list_head	se_tpg_list;
+	/* linked list for initiator ACL list */
+	struct list_head	acl_node_list;
+	struct se_lun		*tpg_lun_list;
+	struct se_lun		tpg_virt_lun0;
+	/* List of TCM sessions assoicated wth this TPG */
+	struct list_head	tpg_sess_list;
+	/* Pointer to $FABRIC_MOD dependent code */
+	struct target_core_fabric_ops *se_tpg_tfo;
+	struct se_wwn		*se_tpg_wwn;
+	struct config_group	tpg_group;
+	struct config_group	*tpg_default_groups[6];
+	struct config_group	tpg_lun_group;
+	struct config_group	tpg_np_group;
+	struct config_group	tpg_acl_group;
+	struct config_group	tpg_attrib_group;
+	struct config_group	tpg_param_group;
+} ____cacheline_aligned;
+
+#define TPG_TFO(se_tpg)	((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo)
+
+struct se_wwn {
+	struct target_fabric_configfs *wwn_tf;
+	struct config_group	wwn_group;
+} ____cacheline_aligned;
+
+struct se_global {
+	u16			alua_lu_gps_counter;
+	int			g_sub_api_initialized;
+	u32			in_shutdown;
+	u32			alua_lu_gps_count;
+	u32			g_hba_id_counter;
+	struct config_group	target_core_hbagroup;
+	struct config_group	alua_group;
+	struct config_group	alua_lu_gps_group;
+	struct list_head	g_lu_gps_list;
+	struct list_head	g_se_tpg_list;
+	struct list_head	g_hba_list;
+	struct list_head	g_se_dev_list;
+	struct se_hba		*g_lun0_hba;
+	struct se_subsystem_dev *g_lun0_su_dev;
+	struct se_device	*g_lun0_dev;
+	struct t10_alua_lu_gp	*default_lu_gp;
+	spinlock_t		g_device_lock;
+	spinlock_t		hba_lock;
+	spinlock_t		se_tpg_lock;
+	spinlock_t		lu_gps_lock;
+	spinlock_t		plugin_class_lock;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
new file mode 100644
index 0000000..40e6e74
--- /dev/null
+++ b/include/target/target_core_configfs.h
@@ -0,0 +1,52 @@
+#define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION
+
+#define TARGET_CORE_CONFIG_ROOT	"/sys/kernel/config"
+
+#define TARGET_CORE_NAME_MAX_LEN	64
+#define TARGET_FABRIC_NAME_SIZE		32
+
+extern struct target_fabric_configfs *target_fabric_configfs_init(
+				struct module *, const char *);
+extern void target_fabric_configfs_free(struct target_fabric_configfs *);
+extern int target_fabric_configfs_register(struct target_fabric_configfs *);
+extern void target_fabric_configfs_deregister(struct target_fabric_configfs *);
+
+struct target_fabric_configfs_template {
+	struct config_item_type tfc_discovery_cit;
+	struct config_item_type	tfc_wwn_cit;
+	struct config_item_type tfc_tpg_cit;
+	struct config_item_type tfc_tpg_base_cit;
+	struct config_item_type tfc_tpg_lun_cit;
+	struct config_item_type tfc_tpg_port_cit;
+	struct config_item_type tfc_tpg_np_cit;
+	struct config_item_type tfc_tpg_np_base_cit;
+	struct config_item_type tfc_tpg_attrib_cit;
+	struct config_item_type tfc_tpg_param_cit;
+	struct config_item_type tfc_tpg_nacl_cit;
+	struct config_item_type tfc_tpg_nacl_base_cit;
+	struct config_item_type tfc_tpg_nacl_attrib_cit;
+	struct config_item_type tfc_tpg_nacl_auth_cit;
+	struct config_item_type tfc_tpg_nacl_param_cit;
+	struct config_item_type tfc_tpg_mappedlun_cit;
+};
+
+struct target_fabric_configfs {
+	char			tf_name[TARGET_FABRIC_NAME_SIZE];
+	atomic_t		tf_access_cnt;
+	struct list_head	tf_list;
+	struct config_group	tf_group;
+	struct config_group	tf_disc_group;
+	struct config_group	*tf_default_groups[2];
+	/* Pointer to fabric's config_item */
+	struct config_item	*tf_fabric;
+	/* Passed from fabric modules */
+	struct config_item_type	*tf_fabric_cit;
+	/* Pointer to target core subsystem */
+	struct configfs_subsystem *tf_subsys;
+	/* Pointer to fabric's struct module */
+	struct module *tf_module;
+	struct target_core_fabric_ops tf_ops;
+	struct target_fabric_configfs_template tf_cit_tmpl;
+};
+
+#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl)
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
new file mode 100644
index 0000000..52b18a5
--- /dev/null
+++ b/include/target/target_core_device.h
@@ -0,0 +1,61 @@
+#ifndef TARGET_CORE_DEVICE_H
+#define TARGET_CORE_DEVICE_H
+
+extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32);
+extern int transport_get_lun_for_tmr(struct se_cmd *, u32);
+extern struct se_dev_entry *core_get_se_deve_from_rtpi(
+					struct se_node_acl *, u16);
+extern int core_free_device_list_for_node(struct se_node_acl *,
+					struct se_portal_group *);
+extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
+extern void core_update_device_list_access(u32, u32, struct se_node_acl *);
+extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32,
+					u32, struct se_node_acl *,
+					struct se_portal_group *, int);
+extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
+extern int core_dev_export(struct se_device *, struct se_portal_group *,
+					struct se_lun *);
+extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
+					struct se_lun *);
+extern int transport_core_report_lun_response(struct se_cmd *);
+extern void se_release_device_for_hba(struct se_device *);
+extern void se_release_vpd_for_dev(struct se_device *);
+extern void se_clear_dev_ports(struct se_device *);
+extern int se_free_virtual_device(struct se_device *, struct se_hba *);
+extern int se_dev_check_online(struct se_device *);
+extern int se_dev_check_shutdown(struct se_device *);
+extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
+extern int se_dev_set_task_timeout(struct se_device *, u32);
+extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+extern int se_dev_set_unmap_granularity(struct se_device *, u32);
+extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+extern int se_dev_set_emulate_dpo(struct se_device *, int);
+extern int se_dev_set_emulate_fua_write(struct se_device *, int);
+extern int se_dev_set_emulate_fua_read(struct se_device *, int);
+extern int se_dev_set_emulate_write_cache(struct se_device *, int);
+extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+extern int se_dev_set_emulate_tas(struct se_device *, int);
+extern int se_dev_set_emulate_tpu(struct se_device *, int);
+extern int se_dev_set_emulate_tpws(struct se_device *, int);
+extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
+extern int se_dev_set_queue_depth(struct se_device *, u32);
+extern int se_dev_set_max_sectors(struct se_device *, u32);
+extern int se_dev_set_optimal_sectors(struct se_device *, u32);
+extern int se_dev_set_block_size(struct se_device *, u32);
+extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
+					struct se_device *, u32);
+extern int core_dev_del_lun(struct se_portal_group *, u32);
+extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+							u32, char *, int *);
+extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+						struct se_lun_acl *, u32, u32);
+extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+						struct se_lun *, struct se_lun_acl *);
+extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
+						struct se_lun_acl *lacl);
+extern int core_dev_setup_virtual_lun0(void);
+extern void core_dev_release_virtual_lun0(void);
+
+#endif /* TARGET_CORE_DEVICE_H */
diff --git a/include/target/target_core_fabric_configfs.h b/include/target/target_core_fabric_configfs.h
new file mode 100644
index 0000000..a26fb75
--- /dev/null
+++ b/include/target/target_core_fabric_configfs.h
@@ -0,0 +1,106 @@
+/*
+ * Used for tfc_wwn_cit attributes
+ */
+
+#include <target/configfs_macros.h>
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_attrib, se_node_acl);
+#define TF_NACL_ATTRIB_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_attrib_attribute _fabric##_nacl_attrib_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_attrib_show_##_name,				\
+	_fabric##_nacl_attrib_store_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_auth, se_node_acl);
+#define TF_NACL_AUTH_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_auth_show_##_name,				\
+	_fabric##_nacl_auth_store_##_name);
+
+#define TF_NACL_AUTH_ATTR_RO(_fabric, _name)				\
+static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_nacl_auth_show_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_param, se_node_acl);
+#define TF_NACL_PARAM_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_param_show_##_name,				\
+	_fabric##_nacl_param_store_##_name);
+
+#define TF_NACL_PARAM_ATTR_RO(_fabric, _name)				\
+static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_nacl_param_show_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_base, se_node_acl);
+#define TF_NACL_BASE_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_show_##_name,					\
+	_fabric##_nacl_store_##_name);
+
+#define TF_NACL_BASE_ATTR_RO(_fabric, _name)				\
+static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_nacl_show_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_np_base, se_tpg_np);
+#define TF_NP_BASE_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_np_base_attribute _fabric##_np_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_np_show_##_name,					\
+	_fabric##_np_store_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_tpg_attrib, se_portal_group);
+#define TF_TPG_ATTRIB_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_tpg_attrib_attribute _fabric##_tpg_attrib_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_tpg_attrib_show_##_name,				\
+	_fabric##_tpg_attrib_store_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_tpg_param, se_portal_group);
+#define TF_TPG_PARAM_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_tpg_param_attribute _fabric##_tpg_param_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_tpg_param_show_##_name,				\
+	_fabric##_tpg_param_store_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_tpg, se_portal_group);
+#define TF_TPG_BASE_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_tpg_attribute _fabric##_tpg_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_tpg_show_##_name,					\
+	_fabric##_tpg_store_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs);
+#define TF_WWN_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_wwn_attribute _fabric##_wwn_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_wwn_show_attr_##_name,				\
+	_fabric##_wwn_store_attr_##_name);
+
+#define TF_WWN_ATTR_RO(_fabric, _name)					\
+static struct target_fabric_wwn_attribute _fabric##_wwn_##_name =	\
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_wwn_show_attr_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_discovery, target_fabric_configfs);
+#define TF_DISC_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_disc_show_##_name,					\
+	_fabric##_disc_store_##_name);
+
+#define TF_DISC_ATTR_RO(_fabric, _name)					\
+static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_disc_show_##_name);
+
+extern int target_fabric_setup_cits(struct target_fabric_configfs *);
diff --git a/include/target/target_core_fabric_lib.h b/include/target/target_core_fabric_lib.h
new file mode 100644
index 0000000..c2f8d0e
--- /dev/null
+++ b/include/target/target_core_fabric_lib.h
@@ -0,0 +1,28 @@
+#ifndef TARGET_CORE_FABRIC_LIB_H
+#define TARGET_CORE_FABRIC_LIB_H
+
+extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
+extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
+			const char *, u32 *, char **);
+
+extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
+extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
+			const char *, u32 *, char **);
+
+extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
+extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
+			const char *, u32 *, char **);
+
+#endif /* TARGET_CORE_FABRIC_LIB_H */
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
new file mode 100644
index 0000000..f3ac12b
--- /dev/null
+++ b/include/target/target_core_fabric_ops.h
@@ -0,0 +1,100 @@
+/* Defined in target_core_configfs.h */
+struct target_fabric_configfs;
+
+struct target_core_fabric_ops {
+	struct configfs_subsystem *tf_subsys;
+	/*
+	 * Optional to signal struct se_task->task_sg[] padding entries
+	 * for scatterlist chaining using transport_do_task_sg_link(),
+	 * disabled by default
+	 */
+	int task_sg_chaining:1;
+	char *(*get_fabric_name)(void);
+	u8 (*get_fabric_proto_ident)(struct se_portal_group *);
+	char *(*tpg_get_wwn)(struct se_portal_group *);
+	u16 (*tpg_get_tag)(struct se_portal_group *);
+	u32 (*tpg_get_default_depth)(struct se_portal_group *);
+	u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
+				struct se_node_acl *,
+				struct t10_pr_registration *, int *,
+				unsigned char *);
+	u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
+				struct se_node_acl *,
+				struct t10_pr_registration *, int *);
+	char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
+				const char *, u32 *, char **);
+	int (*tpg_check_demo_mode)(struct se_portal_group *);
+	int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
+	int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
+	int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
+	struct se_node_acl *(*tpg_alloc_fabric_acl)(
+					struct se_portal_group *);
+	void (*tpg_release_fabric_acl)(struct se_portal_group *,
+					struct se_node_acl *);
+	u32 (*tpg_get_inst_index)(struct se_portal_group *);
+	/*
+	 * Optional function pointer for TCM to perform command map
+	 * from TCM processing thread context, for those struct se_cmd
+	 * initally allocated in interrupt context.
+	 */
+	int (*new_cmd_map)(struct se_cmd *);
+	/*
+	 * Optional function pointer for TCM fabric modules that use
+	 * Linux/NET sockets to allocate struct iovec array to struct se_cmd
+	 */
+	int (*alloc_cmd_iovecs)(struct se_cmd *);
+	/*
+	 * Optional to release struct se_cmd and fabric dependent allocated
+	 * I/O descriptor in transport_cmd_check_stop()
+	 */
+	void (*check_stop_free)(struct se_cmd *);
+	void (*release_cmd_to_pool)(struct se_cmd *);
+	void (*release_cmd_direct)(struct se_cmd *);
+	/*
+	 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
+	 */
+	int (*shutdown_session)(struct se_session *);
+	void (*close_session)(struct se_session *);
+	void (*stop_session)(struct se_session *, int, int);
+	void (*fall_back_to_erl0)(struct se_session *);
+	int (*sess_logged_in)(struct se_session *);
+	u32 (*sess_get_index)(struct se_session *);
+	/*
+	 * Used only for SCSI fabrics that contain multi-value TransportIDs
+	 * (like iSCSI).  All other SCSI fabrics should set this to NULL.
+	 */
+	u32 (*sess_get_initiator_sid)(struct se_session *,
+				      unsigned char *, u32);
+	int (*write_pending)(struct se_cmd *);
+	int (*write_pending_status)(struct se_cmd *);
+	void (*set_default_node_attributes)(struct se_node_acl *);
+	u32 (*get_task_tag)(struct se_cmd *);
+	int (*get_cmd_state)(struct se_cmd *);
+	void (*new_cmd_failure)(struct se_cmd *);
+	int (*queue_data_in)(struct se_cmd *);
+	int (*queue_status)(struct se_cmd *);
+	int (*queue_tm_rsp)(struct se_cmd *);
+	u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
+	u16 (*get_fabric_sense_len)(void);
+	int (*is_state_remove)(struct se_cmd *);
+	u64 (*pack_lun)(unsigned int);
+	/*
+	 * fabric module calls for target_core_fabric_configfs.c
+	 */
+	struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
+				struct config_group *, const char *);
+	void (*fabric_drop_wwn)(struct se_wwn *);
+	struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
+				struct config_group *, const char *);
+	void (*fabric_drop_tpg)(struct se_portal_group *);
+	int (*fabric_post_link)(struct se_portal_group *,
+				struct se_lun *);
+	void (*fabric_pre_unlink)(struct se_portal_group *,
+				struct se_lun *);
+	struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
+				struct config_group *, const char *);
+	void (*fabric_drop_np)(struct se_tpg_np *);
+	struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
+				struct config_group *, const char *);
+	void (*fabric_drop_nodeacl)(struct se_node_acl *);
+};
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
new file mode 100644
index 0000000..6c8248b
--- /dev/null
+++ b/include/target/target_core_tmr.h
@@ -0,0 +1,43 @@
+#ifndef TARGET_CORE_TMR_H
+#define TARGET_CORE_TMR_H
+
+/* task management function values */
+#ifdef ABORT_TASK
+#undef ABORT_TASK
+#endif /* ABORT_TASK */
+#define ABORT_TASK				1
+#ifdef ABORT_TASK_SET
+#undef ABORT_TASK_SET
+#endif /* ABORT_TASK_SET */
+#define ABORT_TASK_SET				2
+#ifdef CLEAR_ACA
+#undef CLEAR_ACA
+#endif /* CLEAR_ACA */
+#define CLEAR_ACA				3
+#ifdef CLEAR_TASK_SET
+#undef CLEAR_TASK_SET
+#endif /* CLEAR_TASK_SET */
+#define CLEAR_TASK_SET				4
+#define LUN_RESET				5
+#define TARGET_WARM_RESET			6
+#define TARGET_COLD_RESET			7
+#define TASK_REASSIGN				8
+
+/* task management response values */
+#define TMR_FUNCTION_COMPLETE			0
+#define TMR_TASK_DOES_NOT_EXIST			1
+#define TMR_LUN_DOES_NOT_EXIST			2
+#define TMR_TASK_STILL_ALLEGIANT		3
+#define TMR_TASK_FAILOVER_NOT_SUPPORTED		4
+#define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED	5
+#define TMR_FUNCTION_AUTHORIZATION_FAILED	6
+#define TMR_FUNCTION_REJECTED			255
+
+extern struct kmem_cache *se_tmr_req_cache;
+
+extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8);
+extern void core_tmr_release_req(struct se_tmr_req *);
+extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
+				struct list_head *, struct se_cmd *);
+
+#endif /* TARGET_CORE_TMR_H */
diff --git a/include/target/target_core_tpg.h b/include/target/target_core_tpg.h
new file mode 100644
index 0000000..77e1872
--- /dev/null
+++ b/include/target/target_core_tpg.h
@@ -0,0 +1,35 @@
+#ifndef TARGET_CORE_TPG_H
+#define TARGET_CORE_TPG_H
+
+extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+						const char *);
+extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+						unsigned char *);
+extern void core_tpg_add_node_to_devs(struct se_node_acl *,
+						struct se_portal_group *);
+extern struct se_node_acl *core_tpg_check_initiator_node_acl(
+						struct se_portal_group *,
+						unsigned char *);
+extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
+extern void core_tpg_wait_for_mib_ref(struct se_node_acl *);
+extern void core_tpg_clear_object_luns(struct se_portal_group *);
+extern struct se_node_acl *core_tpg_add_initiator_node_acl(
+					struct se_portal_group *,
+					struct se_node_acl *,
+					const char *, u32);
+extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
+						struct se_node_acl *, int);
+extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
+						unsigned char *, u32, int);
+extern int core_tpg_register(struct target_core_fabric_ops *,
+					struct se_wwn *,
+					struct se_portal_group *, void *,
+					int);
+extern int core_tpg_deregister(struct se_portal_group *);
+extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
+extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32,
+				void *);
+extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
+extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
+
+#endif /* TARGET_CORE_TPG_H */
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
new file mode 100644
index 0000000..2e8ec51
--- /dev/null
+++ b/include/target/target_core_transport.h
@@ -0,0 +1,355 @@
+#ifndef TARGET_CORE_TRANSPORT_H
+#define TARGET_CORE_TRANSPORT_H
+
+#define TARGET_CORE_VERSION			TARGET_CORE_MOD_VERSION
+
+/* Attempts before moving from SHORT to LONG */
+#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD	3
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT	3  /* In milliseconds */
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG	10 /* In milliseconds */
+
+#define PYX_TRANSPORT_STATUS_INTERVAL		5 /* In seconds */
+
+#define PYX_TRANSPORT_SENT_TO_TRANSPORT		0
+#define PYX_TRANSPORT_WRITE_PENDING		1
+
+#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE	-1
+#define PYX_TRANSPORT_HBA_QUEUE_FULL		-2
+#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS	-3
+#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES	-4
+#define PYX_TRANSPORT_INVALID_CDB_FIELD		-5
+#define PYX_TRANSPORT_INVALID_PARAMETER_LIST	-6
+#define PYX_TRANSPORT_LU_COMM_FAILURE		-7
+#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE		-8
+#define PYX_TRANSPORT_WRITE_PROTECTED		-9
+#define PYX_TRANSPORT_TASK_TIMEOUT		-10
+#define PYX_TRANSPORT_RESERVATION_CONFLICT	-11
+#define PYX_TRANSPORT_ILLEGAL_REQUEST		-12
+#define PYX_TRANSPORT_USE_SENSE_REASON		-13
+
+#ifndef SAM_STAT_RESERVATION_CONFLICT
+#define SAM_STAT_RESERVATION_CONFLICT		0x18
+#endif
+
+#define TRANSPORT_PLUGIN_FREE			0
+#define TRANSPORT_PLUGIN_REGISTERED		1
+
+#define TRANSPORT_PLUGIN_PHBA_PDEV		1
+#define TRANSPORT_PLUGIN_VHBA_PDEV		2
+#define TRANSPORT_PLUGIN_VHBA_VDEV		3
+
+/* For SE OBJ Plugins, in seconds */
+#define TRANSPORT_TIMEOUT_TUR			10
+#define TRANSPORT_TIMEOUT_TYPE_DISK		60
+#define TRANSPORT_TIMEOUT_TYPE_ROM		120
+#define TRANSPORT_TIMEOUT_TYPE_TAPE		600
+#define TRANSPORT_TIMEOUT_TYPE_OTHER		300
+
+/* For se_task->task_state_flags */
+#define TSF_EXCEPTION_CLEARED			0x01
+
+/*
+ * struct se_subsystem_dev->su_dev_flags
+*/
+#define SDF_FIRMWARE_VPD_UNIT_SERIAL		0x00000001
+#define SDF_EMULATED_VPD_UNIT_SERIAL		0x00000002
+#define SDF_USING_UDEV_PATH			0x00000004
+#define SDF_USING_ALIAS				0x00000008
+
+/*
+ * struct se_device->dev_flags
+ */
+#define DF_READ_ONLY				0x00000001
+#define DF_SPC2_RESERVATIONS			0x00000002
+#define DF_SPC2_RESERVATIONS_WITH_ISID		0x00000004
+
+/* struct se_dev_attrib sanity values */
+/* 10 Minutes */
+#define DA_TASK_TIMEOUT_MAX			600
+/* Default max_unmap_lba_count */
+#define DA_MAX_UNMAP_LBA_COUNT			0
+/* Default max_unmap_block_desc_count */
+#define DA_MAX_UNMAP_BLOCK_DESC_COUNT		0
+/* Default unmap_granularity */
+#define DA_UNMAP_GRANULARITY_DEFAULT		0
+/* Default unmap_granularity_alignment */
+#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT	0
+/* Emulation for Direct Page Out */
+#define DA_EMULATE_DPO				0
+/* Emulation for Forced Unit Access WRITEs */
+#define DA_EMULATE_FUA_WRITE			1
+/* Emulation for Forced Unit Access READs */
+#define DA_EMULATE_FUA_READ			0
+/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
+#define DA_EMULATE_WRITE_CACHE			0
+/* Emulation for UNIT ATTENTION Interlock Control */
+#define DA_EMULATE_UA_INTLLCK_CTRL		0
+/* Emulation for TASK_ABORTED status (TAS) by default */
+#define DA_EMULATE_TAS				1
+/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
+#define DA_EMULATE_TPU				0
+/*
+ * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
+ * block/blk-lib.c:blkdev_issue_discard()
+ */
+#define DA_EMULATE_TPWS				0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_RESERVATIONS			0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_ALUA				0
+/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
+#define DA_ENFORCE_PR_ISIDS			1
+#define DA_STATUS_MAX_SECTORS_MIN		16
+#define DA_STATUS_MAX_SECTORS_MAX		8192
+
+#define SE_MODE_PAGE_BUF			512
+
+#define MOD_MAX_SECTORS(ms, bs)			(ms % (PAGE_SIZE / bs))
+
+struct se_mem;
+struct se_subsystem_api;
+
+extern int init_se_global(void);
+extern void release_se_global(void);
+extern void init_scsi_index_table(void);
+extern u32 scsi_get_new_index(scsi_index_t);
+extern void transport_init_queue_obj(struct se_queue_obj *);
+extern int transport_subsystem_check_init(void);
+extern int transport_subsystem_register(struct se_subsystem_api *);
+extern void transport_subsystem_release(struct se_subsystem_api *);
+extern void transport_load_plugins(void);
+extern struct se_session *transport_init_session(void);
+extern void __transport_register_session(struct se_portal_group *,
+					struct se_node_acl *,
+					struct se_session *, void *);
+extern void transport_register_session(struct se_portal_group *,
+					struct se_node_acl *,
+					struct se_session *, void *);
+extern void transport_free_session(struct se_session *);
+extern void transport_deregister_session_configfs(struct se_session *);
+extern void transport_deregister_session(struct se_session *);
+extern void transport_cmd_finish_abort(struct se_cmd *, int);
+extern void transport_cmd_finish_abort_tmr(struct se_cmd *);
+extern void transport_complete_sync_cache(struct se_cmd *, int);
+extern void transport_complete_task(struct se_task *, int);
+extern void transport_add_task_to_execute_queue(struct se_task *,
+						struct se_task *,
+						struct se_device *);
+extern void transport_remove_task_from_execute_queue(struct se_task *,
+						struct se_device *);
+unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+extern void transport_dump_dev_state(struct se_device *, char *, int *);
+extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
+					unsigned long long, char *, int *);
+extern void transport_dump_vpd_proto_id(struct t10_vpd *,
+					unsigned char *, int);
+extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
+extern int transport_dump_vpd_assoc(struct t10_vpd *,
+					unsigned char *, int);
+extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
+extern int transport_dump_vpd_ident_type(struct t10_vpd *,
+					unsigned char *, int);
+extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
+extern int transport_dump_vpd_ident(struct t10_vpd *,
+					unsigned char *, int);
+extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
+extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
+					struct se_subsystem_api *,
+					struct se_subsystem_dev *, u32,
+					void *, struct se_dev_limits *,
+					const char *, const char *);
+extern void transport_device_setup_cmd(struct se_cmd *);
+extern void transport_init_se_cmd(struct se_cmd *,
+					struct target_core_fabric_ops *,
+					struct se_session *, u32, int, int,
+					unsigned char *);
+extern void transport_free_se_cmd(struct se_cmd *);
+extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
+extern int transport_generic_handle_cdb(struct se_cmd *);
+extern int transport_generic_handle_cdb_map(struct se_cmd *);
+extern int transport_generic_handle_data(struct se_cmd *);
+extern void transport_new_cmd_failure(struct se_cmd *);
+extern int transport_generic_handle_tmr(struct se_cmd *);
+extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
+extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
+extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
+				struct scatterlist *, u32);
+extern int transport_clear_lun_from_sessions(struct se_lun *);
+extern int transport_check_aborted_status(struct se_cmd *, int);
+extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
+extern void transport_send_task_abort(struct se_cmd *);
+extern void transport_release_cmd_to_pool(struct se_cmd *);
+extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
+extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
+extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32);
+extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
+					void *, struct se_mem *,
+					struct se_mem **, u32 *, u32 *);
+extern void transport_do_task_sg_chain(struct se_cmd *);
+extern void transport_generic_process_write(struct se_cmd *);
+extern int transport_generic_do_tmr(struct se_cmd *);
+/* From target_core_alua.c */
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+
+/*
+ * Each se_transport_task_t can have N number of possible struct se_task's
+ * for the storage transport(s) to possibly execute.
+ * Used primarily for splitting up CDBs that exceed the physical storage
+ * HBA's maximum sector count per task.
+ */
+struct se_mem {
+	struct page	*se_page;
+	u32		se_len;
+	u32		se_off;
+	struct list_head se_list;
+} ____cacheline_aligned;
+
+/*
+ * 	Each type of disk transport supported MUST have a template defined
+ *	within its .h file.
+ */
+struct se_subsystem_api {
+	/*
+	 * The Name. :-)
+	 */
+	char name[16];
+	/*
+	 * Transport Type.
+	 */
+	u8 transport_type;
+	/*
+	 * struct module for struct se_hba references
+	 */
+	struct module *owner;
+	/*
+	 * Used for global se_subsystem_api list_head
+	 */
+	struct list_head sub_api_list;
+	/*
+	 * For SCF_SCSI_NON_DATA_CDB
+	 */
+	int (*cdb_none)(struct se_task *);
+	/*
+	 * For SCF_SCSI_CONTROL_NONSG_IO_CDB
+	 */
+	int (*map_task_non_SG)(struct se_task *);
+	/*
+	 * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB
+	 */
+	int (*map_task_SG)(struct se_task *);
+	/*
+	 * attach_hba():
+	 */
+	int (*attach_hba)(struct se_hba *, u32);
+	/*
+	 * detach_hba():
+	 */
+	void (*detach_hba)(struct se_hba *);
+	/*
+	 * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
+	 *		Linux/SCSI struct Scsi_Host passthrough
+	*/
+	int (*pmode_enable_hba)(struct se_hba *, unsigned long);
+	/*
+	 * allocate_virtdevice():
+	 */
+	void *(*allocate_virtdevice)(struct se_hba *, const char *);
+	/*
+	 * create_virtdevice(): Only for Virtual HBAs
+	 */
+	struct se_device *(*create_virtdevice)(struct se_hba *,
+				struct se_subsystem_dev *, void *);
+	/*
+	 * free_device():
+	 */
+	void (*free_device)(void *);
+
+	/*
+	 * dpo_emulated():
+	 */
+	int (*dpo_emulated)(struct se_device *);
+	/*
+	 * fua_write_emulated():
+	 */
+	int (*fua_write_emulated)(struct se_device *);
+	/*
+	 * fua_read_emulated():
+	 */
+	int (*fua_read_emulated)(struct se_device *);
+	/*
+	 * write_cache_emulated():
+	 */
+	int (*write_cache_emulated)(struct se_device *);
+	/*
+	 * transport_complete():
+	 *
+	 * Use transport_generic_complete() for majority of DAS transport
+	 * drivers.  Provided out of convenience.
+	 */
+	int (*transport_complete)(struct se_task *task);
+	struct se_task *(*alloc_task)(struct se_cmd *);
+	/*
+	 * do_task():
+	 */
+	int (*do_task)(struct se_task *);
+	/*
+	 * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
+	 * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
+	 */
+	int (*do_discard)(struct se_device *, sector_t, u32);
+	/*
+	 * Used  by virtual subsystem plugins IBLOCK and FILEIO to emulate
+	 * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
+	 */
+	void (*do_sync_cache)(struct se_task *);
+	/*
+	 * free_task():
+	 */
+	void (*free_task)(struct se_task *);
+	/*
+	 * check_configfs_dev_params():
+	 */
+	ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
+	/*
+	 * set_configfs_dev_params():
+	 */
+	ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
+						const char *, ssize_t);
+	/*
+	 * show_configfs_dev_params():
+	 */
+	ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
+						char *);
+	/*
+	 * get_cdb():
+	 */
+	unsigned char *(*get_cdb)(struct se_task *);
+	/*
+	 * get_device_rev():
+	 */
+	u32 (*get_device_rev)(struct se_device *);
+	/*
+	 * get_device_type():
+	 */
+	u32 (*get_device_type)(struct se_device *);
+	/*
+	 * Get the sector_t from a subsystem backstore..
+	 */
+	sector_t (*get_blocks)(struct se_device *);
+	/*
+	 * do_se_mem_map():
+	 */
+	int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
+				struct se_mem *, struct se_mem **, u32 *, u32 *);
+	/*
+	 * get_sense_buffer():
+	 */
+	unsigned char *(*get_sense_buffer)(struct se_task *);
+} ____cacheline_aligned;
+
+#define TRANSPORT(dev)		((dev)->transport)
+#define HBA_TRANSPORT(hba)	((hba)->transport)
+
+extern struct se_global *se_global;
+
+#endif /* TARGET_CORE_TRANSPORT_H */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index b0b4eb2..da39b22 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -21,6 +21,16 @@
 #undef CREATE_TRACE_POINTS
 
 #include <linux/stringify.h>
+/*
+ * module.h includes tracepoints, and because ftrace.h
+ * pulls in module.h:
+ *  trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
+ *  linux/ftrace.h -> linux/module.h
+ * we must include module.h here before we play with any of
+ * the TRACE_EVENT() macros, otherwise the tracepoints included
+ * by module.h may break the build.
+ */
+#include <linux/module.h>
 
 #undef TRACE_EVENT
 #define TRACE_EVENT(name, proto, args, tstruct, assign, print)	\
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
new file mode 100644
index 0000000..186e84d
--- /dev/null
+++ b/include/trace/events/asoc.h
@@ -0,0 +1,235 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM asoc
+
+#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ASOC_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+struct snd_soc_jack;
+struct snd_soc_codec;
+struct snd_soc_card;
+struct snd_soc_dapm_widget;
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(snd_soc_reg,
+
+	TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(codec, reg, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		codec->name	)
+		__field(	int,		id		)
+		__field(	unsigned int,	reg		)
+		__field(	unsigned int,	val		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, codec->name);
+		__entry->id = codec->id;
+		__entry->reg = reg;
+		__entry->val = val;
+	),
+
+	TP_printk("codec=%s.%d reg=%x val=%x", __get_str(name),
+		  (int)__entry->id, (unsigned int)__entry->reg,
+		  (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_reg, snd_soc_reg_write,
+
+	TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(codec, reg, val)
+
+);
+
+DEFINE_EVENT(snd_soc_reg, snd_soc_reg_read,
+
+	TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(codec, reg, val)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_card,
+
+	TP_PROTO(struct snd_soc_card *card, int val),
+
+	TP_ARGS(card, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		card->name	)
+		__field(	int,		val		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, card->name);
+		__entry->val = val;
+	),
+
+	TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start,
+
+	TP_PROTO(struct snd_soc_card *card, int val),
+
+	TP_ARGS(card, val)
+
+);
+
+DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done,
+
+	TP_PROTO(struct snd_soc_card *card, int val),
+
+	TP_ARGS(card, val)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_dapm_basic,
+
+	TP_PROTO(struct snd_soc_card *card),
+
+	TP_ARGS(card),
+
+	TP_STRUCT__entry(
+		__string(	name,	card->name	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, card->name);
+	),
+
+	TP_printk("card=%s", __get_str(name))
+);
+
+DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start,
+
+	TP_PROTO(struct snd_soc_card *card),
+
+	TP_ARGS(card)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done,
+
+	TP_PROTO(struct snd_soc_card *card),
+
+	TP_ARGS(card)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_dapm_widget,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val),
+
+	TP_STRUCT__entry(
+		__string(	name,	w->name		)
+		__field(	int,	val		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, w->name);
+		__entry->val = val;
+	),
+
+	TP_printk("widget=%s val=%d", __get_str(name),
+		  (int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val)
+
+);
+
+TRACE_EVENT(snd_soc_jack_irq,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(	name,	name		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("%s", __get_str(name))
+);
+
+TRACE_EVENT(snd_soc_jack_report,
+
+	TP_PROTO(struct snd_soc_jack *jack, int mask, int val),
+
+	TP_ARGS(jack, mask, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		jack->jack->name	)
+		__field(	int,		mask			)
+		__field(	int,		val			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, jack->jack->name);
+		__entry->mask = mask;
+		__entry->val = val;
+	),
+
+	TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val,
+		  (int)__entry->mask)
+);
+
+TRACE_EVENT(snd_soc_jack_notify,
+
+	TP_PROTO(struct snd_soc_jack *jack, int val),
+
+	TP_ARGS(jack, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		jack->jack->name	)
+		__field(	int,		val			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, jack->jack->name);
+		__entry->val = val;
+	),
+
+	TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
+);
+
+#endif /* _TRACE_ASOC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index d8ce278..78f18ad 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -31,7 +31,7 @@
 					0 : blk_rq_sectors(rq);
 		__entry->errors    = rq->errors;
 
-		blk_fill_rwbs_rq(__entry->rwbs, rq);
+		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 		blk_dump_cmd(__get_str(cmd), rq);
 	),
 
@@ -118,7 +118,7 @@
 		__entry->bytes     = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
 					blk_rq_bytes(rq) : 0;
 
-		blk_fill_rwbs_rq(__entry->rwbs, rq);
+		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 		blk_dump_cmd(__get_str(cmd), rq);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
@@ -206,15 +206,16 @@
  * block_bio_complete - completed all work on the block operation
  * @q: queue holding the block operation
  * @bio: block operation completed
+ * @error: io error value
  *
  * This tracepoint indicates there is no further work to do on this
  * block IO operation @bio.
  */
 TRACE_EVENT(block_bio_complete,
 
-	TP_PROTO(struct request_queue *q, struct bio *bio),
+	TP_PROTO(struct request_queue *q, struct bio *bio, int error),
 
-	TP_ARGS(q, bio),
+	TP_ARGS(q, bio, error),
 
 	TP_STRUCT__entry(
 		__field( dev_t,		dev		)
@@ -228,6 +229,7 @@
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_sector;
 		__entry->nr_sector	= bio->bi_size >> 9;
+		__entry->error		= error;
 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 	),
 
@@ -486,16 +488,16 @@
 );
 
 /**
- * block_remap - map request for a partition to the raw device
+ * block_bio_remap - map request for a logical device to the raw device
  * @q: queue holding the operation
  * @bio: revised operation
  * @dev: device for the operation
  * @from: original sector for the operation
  *
- * An operation for a partition on a block device has been mapped to the
+ * An operation for a logical device has been mapped to the
  * raw block device.
  */
-TRACE_EVENT(block_remap,
+TRACE_EVENT(block_bio_remap,
 
 	TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
 		 sector_t from),
@@ -561,7 +563,7 @@
 		__entry->nr_sector	= blk_rq_sectors(rq);
 		__entry->old_dev	= dev;
 		__entry->old_sector	= from;
-		blk_fill_rwbs_rq(__entry->rwbs, rq);
+		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 	),
 
 	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
new file mode 100644
index 0000000..388bcdd
--- /dev/null
+++ b/include/trace/events/compaction.h
@@ -0,0 +1,74 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM compaction
+
+#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COMPACTION_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include "gfpflags.h"
+
+DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
+
+	TP_PROTO(unsigned long nr_scanned,
+		unsigned long nr_taken),
+
+	TP_ARGS(nr_scanned, nr_taken),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, nr_scanned)
+		__field(unsigned long, nr_taken)
+	),
+
+	TP_fast_assign(
+		__entry->nr_scanned = nr_scanned;
+		__entry->nr_taken = nr_taken;
+	),
+
+	TP_printk("nr_scanned=%lu nr_taken=%lu",
+		__entry->nr_scanned,
+		__entry->nr_taken)
+);
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
+
+	TP_PROTO(unsigned long nr_scanned,
+		unsigned long nr_taken),
+
+	TP_ARGS(nr_scanned, nr_taken)
+);
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
+	TP_PROTO(unsigned long nr_scanned,
+		unsigned long nr_taken),
+
+	TP_ARGS(nr_scanned, nr_taken)
+);
+
+TRACE_EVENT(mm_compaction_migratepages,
+
+	TP_PROTO(unsigned long nr_migrated,
+		unsigned long nr_failed),
+
+	TP_ARGS(nr_migrated, nr_failed),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, nr_migrated)
+		__field(unsigned long, nr_failed)
+	),
+
+	TP_fast_assign(
+		__entry->nr_migrated = nr_migrated;
+		__entry->nr_failed = nr_failed;
+	),
+
+	TP_printk("nr_migrated=%lu nr_failed=%lu",
+		__entry->nr_migrated,
+		__entry->nr_failed)
+);
+
+
+#endif /* _TRACE_COMPACTION_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6dd3a51..46e3cd8 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -6,6 +6,36 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
 
+#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
+
+#define kvm_trace_exit_reason						\
+	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
+	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
+	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
+	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
+	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
+
+TRACE_EVENT(kvm_userspace_exit,
+	    TP_PROTO(__u32 reason, int errno),
+	    TP_ARGS(reason, errno),
+
+	TP_STRUCT__entry(
+		__field(	__u32,		reason		)
+		__field(	int,		errno		)
+	),
+
+	TP_fast_assign(
+		__entry->reason		= reason;
+		__entry->errno		= errno;
+	),
+
+	TP_printk("reason %s (%d)",
+		  __entry->errno < 0 ?
+		  (__entry->errno == -EINTR ? "restart" : "error") :
+		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
+		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
+);
+
 #if defined(__KVM_HAVE_IOAPIC)
 TRACE_EVENT(kvm_set_irq,
 	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
@@ -185,6 +215,97 @@
 		  __entry->referenced ? "YOUNG" : "OLD")
 );
 
+#ifdef CONFIG_KVM_ASYNC_PF
+DECLARE_EVENT_CLASS(kvm_async_get_page_class,
+
+	TP_PROTO(u64 gva, u64 gfn),
+
+	TP_ARGS(gva, gfn),
+
+	TP_STRUCT__entry(
+		__field(__u64, gva)
+		__field(u64, gfn)
+	),
+
+	TP_fast_assign(
+		__entry->gva = gva;
+		__entry->gfn = gfn;
+	),
+
+	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
+
+	TP_PROTO(u64 gva, u64 gfn),
+
+	TP_ARGS(gva, gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
+
+	TP_PROTO(u64 gva, u64 gfn),
+
+	TP_ARGS(gva, gfn)
+);
+
+DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
+
+	TP_PROTO(u64 token, u64 gva),
+
+	TP_ARGS(token, gva),
+
+	TP_STRUCT__entry(
+		__field(__u64, token)
+		__field(__u64, gva)
+	),
+
+	TP_fast_assign(
+		__entry->token = token;
+		__entry->gva = gva;
+	),
+
+	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
+
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
+
+	TP_PROTO(u64 token, u64 gva),
+
+	TP_ARGS(token, gva)
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
+
+	TP_PROTO(u64 token, u64 gva),
+
+	TP_ARGS(token, gva)
+);
+
+TRACE_EVENT(
+	kvm_async_pf_completed,
+	TP_PROTO(unsigned long address, struct page *page, u64 gva),
+	TP_ARGS(address, page, gva),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, address)
+		__field(pfn_t, pfn)
+		__field(u64, gva)
+		),
+
+	TP_fast_assign(
+		__entry->address = address;
+		__entry->pfn = page ? page_to_pfn(page) : 0;
+		__entry->gva = gva;
+		),
+
+	TP_printk("gva %#llx address %#lx pfn %#llx",  __entry->gva,
+		  __entry->address, __entry->pfn)
+);
+
+#endif
+
 #endif /* _TRACE_KVM_MAIN_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index c7bb2f0..c6bae36 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -1,5 +1,15 @@
+/*
+ * Because linux/module.h has tracepoints in the header, and ftrace.h
+ * eventually includes this file, define_trace.h includes linux/module.h
+ * But we do not want the module.h to override the TRACE_SYSTEM macro
+ * variable that define_trace.h is processing, so we only set it
+ * when module events are being processed, which would happen when
+ * CREATE_TRACE_POINTS is defined.
+ */
+#ifdef CREATE_TRACE_POINTS
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM module
+#endif
 
 #if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _TRACE_MODULE_H
diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h
new file mode 100644
index 0000000..37502a7
--- /dev/null
+++ b/include/trace/events/regulator.h
@@ -0,0 +1,141 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regulator
+
+#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGULATOR_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Events which just log themselves and the regulator name for enable/disable
+ * type tracking.
+ */
+DECLARE_EVENT_CLASS(regulator_basic,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(	name,	name	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("name=%s", __get_str(name))
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable_delay,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable_complete,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_disable,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_disable_complete,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+/*
+ * Events that take a range of numerical values, mostly for voltages
+ * and so on.
+ */
+DECLARE_EVENT_CLASS(regulator_range,
+
+	TP_PROTO(const char *name, int min, int max),
+
+	TP_ARGS(name, min, max),
+
+	TP_STRUCT__entry(
+		__string(	name,		name		)
+		__field(        int,            min             )
+		__field(        int,            max             )
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->min  = min;
+		__entry->max  = max;
+	),
+
+	TP_printk("name=%s (%d-%d)", __get_str(name),
+		  (int)__entry->min, (int)__entry->max)
+);
+
+DEFINE_EVENT(regulator_range, regulator_set_voltage,
+
+	TP_PROTO(const char *name, int min, int max),
+
+	TP_ARGS(name, min, max)
+
+);
+
+
+/*
+ * Events that take a single value, mostly for readback and refcounts.
+ */
+DECLARE_EVENT_CLASS(regulator_value,
+
+	TP_PROTO(const char *name, unsigned int val),
+
+	TP_ARGS(name, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		name		)
+		__field(        unsigned int,   val             )
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->val  = val;
+	),
+
+	TP_printk("name=%s, val=%u", __get_str(name),
+		  (int)__entry->val)
+);
+
+DEFINE_EVENT(regulator_value, regulator_set_voltage_complete,
+
+	TP_PROTO(const char *name, unsigned int value),
+
+	TP_ARGS(name, value)
+
+);
+
+#endif /* _TRACE_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 75ce9d5..f10293c 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -25,9 +25,7 @@
 
 	TP_fast_assign(
 		__entry->skbaddr = skb;
-		if (skb) {
-			__entry->protocol = ntohs(skb->protocol);
-		}
+		__entry->protocol = ntohs(skb->protocol);
 		__entry->location = location;
 	),
 
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index c255fcc..ea422aa 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -25,13 +25,13 @@
 
 #define trace_reclaim_flags(page, sync) ( \
 	(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
-	(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC)   \
+	(sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC)   \
 	)
 
 #define trace_shrink_flags(file, sync) ( \
-	(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \
+	(sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \
 			(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) |  \
-	(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+	(sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
 	)
 
 TRACE_EVENT(mm_vmscan_kswapd_sleep,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 89a2b2d..4e249b9 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -81,6 +81,7 @@
 	TP_ARGS(bdi))
 
 DEFINE_WRITEBACK_EVENT(writeback_nowork);
+DEFINE_WRITEBACK_EVENT(writeback_wake_background);
 DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
 DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index e16610c..3e68366 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -446,14 +446,16 @@
  *	.reg			= ftrace_event_reg,
  * };
  *
- * static struct ftrace_event_call __used
- * __attribute__((__aligned__(4)))
- * __attribute__((section("_ftrace_events"))) event_<call> = {
+ * static struct ftrace_event_call event_<call> = {
  *	.name			= "<call>",
  *	.class			= event_class_<template>,
  *	.event			= &ftrace_event_type_<call>,
  *	.print_fmt		= print_fmt_<call>,
  * };
+ * // its only safe to use pointers when doing linker tricks to
+ * // create an array.
+ * static struct ftrace_event_call __used
+ * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
  *
  */
 
@@ -579,28 +581,28 @@
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, call, proto, args)			\
 									\
-static struct ftrace_event_call __used					\
-__attribute__((__aligned__(4)))						\
-__attribute__((section("_ftrace_events"))) event_##call = {		\
+static struct ftrace_event_call __used event_##call = {			\
 	.name			= #call,				\
 	.class			= &event_class_##template,		\
 	.event.funcs		= &ftrace_event_type_funcs_##template,	\
 	.print_fmt		= print_fmt_##template,			\
-};
+};									\
+static struct ftrace_event_call __used					\
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 
 #undef DEFINE_EVENT_PRINT
 #define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
 									\
 static const char print_fmt_##call[] = print;				\
 									\
-static struct ftrace_event_call __used					\
-__attribute__((__aligned__(4)))						\
-__attribute__((section("_ftrace_events"))) event_##call = {		\
+static struct ftrace_event_call __used event_##call = {			\
 	.name			= #call,				\
 	.class			= &event_class_##template,		\
 	.event.funcs		= &ftrace_event_type_funcs_##call,	\
 	.print_fmt		= print_fmt_##call,			\
-}
+};									\
+static struct ftrace_event_call __used					\
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
diff --git a/include/xen/gntdev.h b/include/xen/gntdev.h
new file mode 100644
index 0000000..eb23f41
--- /dev/null
+++ b/include/xen/gntdev.h
@@ -0,0 +1,119 @@
+/******************************************************************************
+ * gntdev.h
+ * 
+ * Interface to /dev/xen/gntdev.
+ * 
+ * Copyright (c) 2007, D G Murray
+ * 
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_PUBLIC_GNTDEV_H__
+#define __LINUX_PUBLIC_GNTDEV_H__
+
+struct ioctl_gntdev_grant_ref {
+	/* The domain ID of the grant to be mapped. */
+	uint32_t domid;
+	/* The grant reference of the grant to be mapped. */
+	uint32_t ref;
+};
+
+/*
+ * Inserts the grant references into the mapping table of an instance
+ * of gntdev. N.B. This does not perform the mapping, which is deferred
+ * until mmap() is called with @index as the offset.
+ */
+#define IOCTL_GNTDEV_MAP_GRANT_REF \
+_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
+struct ioctl_gntdev_map_grant_ref {
+	/* IN parameters */
+	/* The number of grants to be mapped. */
+	uint32_t count;
+	uint32_t pad;
+	/* OUT parameters */
+	/* The offset to be used on a subsequent call to mmap(). */
+	uint64_t index;
+	/* Variable IN parameter. */
+	/* Array of grant references, of size @count. */
+	struct ioctl_gntdev_grant_ref refs[1];
+};
+
+/*
+ * Removes the grant references from the mapping table of an instance of
+ * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
+ * before this ioctl is called, or an error will result.
+ */
+#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
+_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
+struct ioctl_gntdev_unmap_grant_ref {
+	/* IN parameters */
+	/* The offset was returned by the corresponding map operation. */
+	uint64_t index;
+	/* The number of pages to be unmapped. */
+	uint32_t count;
+	uint32_t pad;
+};
+
+/*
+ * Returns the offset in the driver's address space that corresponds
+ * to @vaddr. This can be used to perform a munmap(), followed by an
+ * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
+ * the caller. The number of pages that were allocated at the same time as
+ * @vaddr is returned in @count.
+ *
+ * N.B. Where more than one page has been mapped into a contiguous range, the
+ *      supplied @vaddr must correspond to the start of the range; otherwise
+ *      an error will result. It is only possible to munmap() the entire
+ *      contiguously-allocated range at once, and not any subrange thereof.
+ */
+#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
+_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
+struct ioctl_gntdev_get_offset_for_vaddr {
+	/* IN parameters */
+	/* The virtual address of the first mapped page in a range. */
+	uint64_t vaddr;
+	/* OUT parameters */
+	/* The offset that was used in the initial mmap() operation. */
+	uint64_t offset;
+	/* The number of pages mapped in the VM area that begins at @vaddr. */
+	uint32_t count;
+	uint32_t pad;
+};
+
+/*
+ * Sets the maximum number of grants that may mapped at once by this gntdev
+ * instance.
+ *
+ * N.B. This must be called before any other ioctl is performed on the device.
+ */
+#define IOCTL_GNTDEV_SET_MAX_GRANTS \
+_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants))
+struct ioctl_gntdev_set_max_grants {
+	/* IN parameter */
+	/* The maximum number of grants that may be mapped at once. */
+	uint32_t count;
+};
+
+#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 9a73170..b1fab6b 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -37,10 +37,16 @@
 #ifndef __ASM_GNTTAB_H__
 #define __ASM_GNTTAB_H__
 
-#include <asm/xen/hypervisor.h>
+#include <asm/page.h>
+
+#include <xen/interface/xen.h>
 #include <xen/interface/grant_table.h>
+
+#include <asm/xen/hypervisor.h>
 #include <asm/xen/grant_table.h>
 
+#include <xen/features.h>
+
 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
 #define NR_GRANT_FRAMES 4
 
@@ -107,6 +113,37 @@
 void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
 				       unsigned long pfn);
 
+static inline void
+gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
+		  uint32_t flags, grant_ref_t ref, domid_t domid)
+{
+	if (flags & GNTMAP_contains_pte)
+		map->host_addr = addr;
+	else if (xen_feature(XENFEAT_auto_translated_physmap))
+		map->host_addr = __pa(addr);
+	else
+		map->host_addr = addr;
+
+	map->flags = flags;
+	map->ref = ref;
+	map->dom = domid;
+}
+
+static inline void
+gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
+		    uint32_t flags, grant_handle_t handle)
+{
+	if (flags & GNTMAP_contains_pte)
+		unmap->host_addr = addr;
+	else if (xen_feature(XENFEAT_auto_translated_physmap))
+		unmap->host_addr = __pa(addr);
+	else
+		unmap->host_addr = addr;
+
+	unmap->handle = handle;
+	unmap->dev_bus_addr = 0;
+}
+
 int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
 			   unsigned long max_nr_gframes,
 			   struct grant_entry **__shared);
@@ -118,4 +155,9 @@
 
 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
 
+int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+		    struct page **pages, unsigned int count);
+int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+		      struct page **pages, unsigned int count);
+
 #endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 43e2d7d..7a1d15f 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -94,7 +94,7 @@
 	int (*remove)(struct xenbus_device *dev);
 	int (*suspend)(struct xenbus_device *dev, pm_message_t state);
 	int (*resume)(struct xenbus_device *dev);
-	int (*uevent)(struct xenbus_device *, char **, int, char *, int);
+	int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
 	struct device_driver driver;
 	int (*read_otherend_details)(struct xenbus_device *dev);
 	int (*is_ready)(struct xenbus_device *dev);
diff --git a/init/Kconfig b/init/Kconfig
index 8dfd094..be788c0 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -130,13 +130,16 @@
 config HAVE_KERNEL_LZMA
 	bool
 
+config HAVE_KERNEL_XZ
+	bool
+
 config HAVE_KERNEL_LZO
 	bool
 
 choice
 	prompt "Kernel compression mode"
 	default KERNEL_GZIP
-	depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO
+	depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO
 	help
 	  The linux kernel is a kind of self-extracting executable.
 	  Several compression algorithms are available, which differ
@@ -181,6 +184,21 @@
 	  two. Compression is slowest.	The kernel size is about 33%
 	  smaller with LZMA in comparison to gzip.
 
+config KERNEL_XZ
+	bool "XZ"
+	depends on HAVE_KERNEL_XZ
+	help
+	  XZ uses the LZMA2 algorithm and instruction set specific
+	  BCJ filters which can improve compression ratio of executable
+	  code. The size of the kernel is about 30% smaller with XZ in
+	  comparison to gzip. On architectures for which there is a BCJ
+	  filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ
+	  will create a few percent smaller kernel than plain LZMA.
+
+	  The speed is about the same as with LZMA: The decompression
+	  speed of XZ is better than that of bzip2 but worse than gzip
+	  and LZO. Compression is slow.
+
 config KERNEL_LZO
 	bool "LZO"
 	depends on HAVE_KERNEL_LZO
@@ -497,21 +515,6 @@
 
 	  Accept the default if unsure.
 
-config SRCU_SYNCHRONIZE_DELAY
-	int "Microseconds to delay before waiting for readers"
-	range 0 20
-	default 10
-	help
-	  This option controls how long SRCU delays before entering its
-	  loop waiting on SRCU readers.  The purpose of this loop is
-	  to avoid the unconditional context-switch penalty that would
-	  otherwise be incurred if there was an active SRCU reader,
-	  in a manner similar to adaptive locking schemes.  This should
-	  be set to be a bit longer than the common-case SRCU read-side
-	  critical-section overhead.
-
-	  Accept the default if unsure.
-
 endmenu # "RCU Subsystem"
 
 config IKCONFIG
@@ -673,7 +676,7 @@
 	help
 	  Memory Resource Controller Swap Extension comes with its price in
 	  a bigger memory consumption. General purpose distribution kernels
-	  which want to enable the feautre but keep it disabled by default
+	  which want to enable the feature but keep it disabled by default
 	  and let the user enable it by swapaccount boot command line
 	  parameter should have this option unselected.
 	  For those who want to have the feature enabled by default should
@@ -742,8 +745,8 @@
 endif # CGROUPS
 
 menuconfig NAMESPACES
-	bool "Namespaces support" if EMBEDDED
-	default !EMBEDDED
+	bool "Namespaces support" if EXPERT
+	default !EXPERT
 	help
 	  Provides the way to make tasks work with different objects using
 	  the same id. For example same IPC id may refer to different objects
@@ -896,23 +899,31 @@
 config ANON_INODES
 	bool
 
-menuconfig EMBEDDED
-	bool "Configure standard kernel features (for small systems)"
+menuconfig EXPERT
+	bool "Configure standard kernel features (expert users)"
 	help
 	  This option allows certain base kernel options and settings
           to be disabled or tweaked. This is for specialized
           environments which can tolerate a "non-standard" kernel.
           Only use this if you really know what you are doing.
 
+config EMBEDDED
+	bool "Embedded system"
+	select EXPERT
+	help
+	  This option should be enabled if compiling the kernel for
+	  an embedded system so certain expert options are available
+	  for configuration.
+
 config UID16
-	bool "Enable 16-bit UID system calls" if EMBEDDED
+	bool "Enable 16-bit UID system calls" if EXPERT
 	depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION)
 	default y
 	help
 	  This enables the legacy 16-bit UID syscall wrappers.
 
 config SYSCTL_SYSCALL
-	bool "Sysctl syscall support" if EMBEDDED
+	bool "Sysctl syscall support" if EXPERT
 	depends on PROC_SYSCTL
 	default y
 	select SYSCTL
@@ -929,7 +940,7 @@
 	  If unsure say Y here.
 
 config KALLSYMS
-	 bool "Load all symbols for debugging/ksymoops" if EMBEDDED
+	 bool "Load all symbols for debugging/ksymoops" if EXPERT
 	 default y
 	 help
 	   Say Y here to let the kernel print out symbolic crash information and
@@ -960,7 +971,7 @@
 
 
 config HOTPLUG
-	bool "Support for hot-pluggable devices" if EMBEDDED
+	bool "Support for hot-pluggable devices" if EXPERT
 	default y
 	help
 	  This option is provided for the case where no hotplug or uevent
@@ -970,7 +981,7 @@
 
 config PRINTK
 	default y
-	bool "Enable support for printk" if EMBEDDED
+	bool "Enable support for printk" if EXPERT
 	help
 	  This option enables normal printk support. Removing it
 	  eliminates most of the message strings from the kernel image
@@ -979,7 +990,7 @@
 	  strongly discouraged.
 
 config BUG
-	bool "BUG() support" if EMBEDDED
+	bool "BUG() support" if EXPERT
 	default y
 	help
           Disabling this option eliminates support for BUG and WARN, reducing
@@ -990,12 +1001,12 @@
 
 config ELF_CORE
 	default y
-	bool "Enable ELF core dumps" if EMBEDDED
+	bool "Enable ELF core dumps" if EXPERT
 	help
 	  Enable support for generating core dumps. Disabling saves about 4k.
 
 config PCSPKR_PLATFORM
-	bool "Enable PC-Speaker support" if EMBEDDED
+	bool "Enable PC-Speaker support" if EXPERT
 	depends on ALPHA || X86 || MIPS || PPC_PREP || PPC_CHRP || PPC_PSERIES
 	default y
 	help
@@ -1004,14 +1015,14 @@
 
 config BASE_FULL
 	default y
-	bool "Enable full-sized data structures for core" if EMBEDDED
+	bool "Enable full-sized data structures for core" if EXPERT
 	help
 	  Disabling this option reduces the size of miscellaneous core
 	  kernel data structures. This saves memory on small machines,
 	  but may reduce performance.
 
 config FUTEX
-	bool "Enable futex support" if EMBEDDED
+	bool "Enable futex support" if EXPERT
 	default y
 	select RT_MUTEXES
 	help
@@ -1020,7 +1031,7 @@
 	  run glibc-based applications correctly.
 
 config EPOLL
-	bool "Enable eventpoll support" if EMBEDDED
+	bool "Enable eventpoll support" if EXPERT
 	default y
 	select ANON_INODES
 	help
@@ -1028,7 +1039,7 @@
 	  support for epoll family of system calls.
 
 config SIGNALFD
-	bool "Enable signalfd() system call" if EMBEDDED
+	bool "Enable signalfd() system call" if EXPERT
 	select ANON_INODES
 	default y
 	help
@@ -1038,7 +1049,7 @@
 	  If unsure, say Y.
 
 config TIMERFD
-	bool "Enable timerfd() system call" if EMBEDDED
+	bool "Enable timerfd() system call" if EXPERT
 	select ANON_INODES
 	default y
 	help
@@ -1048,7 +1059,7 @@
 	  If unsure, say Y.
 
 config EVENTFD
-	bool "Enable eventfd() system call" if EMBEDDED
+	bool "Enable eventfd() system call" if EXPERT
 	select ANON_INODES
 	default y
 	help
@@ -1058,7 +1069,7 @@
 	  If unsure, say Y.
 
 config SHMEM
-	bool "Use full shmem filesystem" if EMBEDDED
+	bool "Use full shmem filesystem" if EXPERT
 	default y
 	depends on MMU
 	help
@@ -1069,7 +1080,7 @@
 	  which may be appropriate on small systems without swap.
 
 config AIO
-	bool "Enable AIO support" if EMBEDDED
+	bool "Enable AIO support" if EXPERT
 	default y
 	help
 	  This option enables POSIX asynchronous I/O which may by used
@@ -1146,16 +1157,16 @@
 
 config VM_EVENT_COUNTERS
 	default y
-	bool "Enable VM event counters for /proc/vmstat" if EMBEDDED
+	bool "Enable VM event counters for /proc/vmstat" if EXPERT
 	help
 	  VM event counters are needed for event counts to be shown.
 	  This option allows the disabling of the VM event counters
-	  on EMBEDDED systems.  /proc/vmstat will only show page counts
+	  on EXPERT systems.  /proc/vmstat will only show page counts
 	  if VM event counters are disabled.
 
 config PCI_QUIRKS
 	default y
-	bool "Enable PCI quirk workarounds" if EMBEDDED
+	bool "Enable PCI quirk workarounds" if EXPERT
 	depends on PCI
 	help
 	  This enables workarounds for various PCI chipset
@@ -1164,7 +1175,7 @@
 
 config SLUB_DEBUG
 	default y
-	bool "Enable SLUB debugging support" if EMBEDDED
+	bool "Enable SLUB debugging support" if EXPERT
 	depends on SLUB && SYSFS
 	help
 	  SLUB has extensive debug support features. Disabling these can
@@ -1208,7 +1219,7 @@
 	   a slab allocator.
 
 config SLOB
-	depends on EMBEDDED
+	depends on EXPERT
 	bool "SLOB (Simple Allocator)"
 	help
 	   SLOB replaces the stock allocator with a drastically simpler
@@ -1219,7 +1230,7 @@
 
 config MMAP_ALLOW_UNINITIALIZED
 	bool "Allow mmapped anonymous memory to be uninitialized"
-	depends on EMBEDDED && !MMU
+	depends on EXPERT && !MMU
 	default n
 	help
 	  Normally, and according to the Linux spec, anonymous memory obtained
diff --git a/init/calibrate.c b/init/calibrate.c
index 6eb48e5..24fe022 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -66,7 +66,7 @@
 		pre_start = 0;
 		read_current_timer(&start);
 		start_jiffies = jiffies;
-		while (jiffies <= (start_jiffies + 1)) {
+		while (time_before_eq(jiffies, start_jiffies + 1)) {
 			pre_start = start;
 			read_current_timer(&start);
 		}
@@ -74,8 +74,8 @@
 
 		pre_end = 0;
 		end = post_start;
-		while (jiffies <=
-		       (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) {
+		while (time_before_eq(jiffies, start_jiffies + 1 +
+					       DELAY_CALIBRATION_TICKS)) {
 			pre_end = end;
 			read_current_timer(&end);
 		}
diff --git a/init/main.c b/init/main.c
index 00799c1..33c37c3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -96,6 +96,15 @@
 extern void tc_init(void);
 #endif
 
+/*
+ * Debug helper: via this flag we know that we are in 'early bootup code'
+ * where only the boot processor is running with IRQ disabled.  This means
+ * two things - IRQ must not be enabled before the flag is cleared and some
+ * operations which are not allowed with IRQ disabled are allowed while the
+ * flag is set.
+ */
+bool early_boot_irqs_disabled __read_mostly;
+
 enum system_states system_state __read_mostly;
 EXPORT_SYMBOL(system_state);
 
@@ -554,7 +563,7 @@
 	cgroup_init_early();
 
 	local_irq_disable();
-	early_boot_irqs_off();
+	early_boot_irqs_disabled = true;
 
 /*
  * Interrupts are still disabled. Do necessary setups, then
@@ -621,7 +630,7 @@
 	if (!irqs_disabled())
 		printk(KERN_CRIT "start_kernel(): bug: interrupts were "
 				 "enabled early\n");
-	early_boot_irqs_on();
+	early_boot_irqs_disabled = false;
 	local_irq_enable();
 
 	/* Interrupts are enabled now so all GFP allocations are safe. */
diff --git a/kernel/Makefile b/kernel/Makefile
index 33e0a39..353d3fe 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -43,7 +43,7 @@
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
-obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
+obj-$(CONFIG_SMP) += smp.o
 ifneq ($(CONFIG_SMP),y)
 obj-y += up.o
 endif
@@ -100,6 +100,7 @@
 obj-$(CONFIG_TRACING) += trace/
 obj-$(CONFIG_X86_DS) += trace/
 obj-$(CONFIG_RING_BUFFER) += trace/
+obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_SMP) += sched_cpupri.o
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_PERF_EVENTS) += perf_event.o
diff --git a/kernel/audit.c b/kernel/audit.c
index 77770a0..e495624 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -400,7 +400,7 @@
 	if (err < 0) {
 		BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
 		printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
-		audit_log_lost("auditd dissapeared\n");
+		audit_log_lost("auditd disappeared\n");
 		audit_pid = 0;
 		/* we might get lucky and get this in the next auditd */
 		audit_hold_skb(skb);
diff --git a/kernel/capability.c b/kernel/capability.c
index 2f05303..9e9385f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -306,7 +306,7 @@
 		BUG();
 	}
 
-	if (security_capable(cap) == 0) {
+	if (security_capable(current_cred(), cap) == 0) {
 		current->flags |= PF_SUPERPRIV;
 		return 1;
 	}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 51cddc1..b24d702 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -763,9 +763,8 @@
  * -> cgroup_mkdir.
  */
 
-static struct dentry *cgroup_lookup(struct inode *dir,
-			struct dentry *dentry, struct nameidata *nd);
 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
 static int cgroup_populate_dir(struct cgroup *cgrp);
 static const struct inode_operations cgroup_dir_inode_operations;
@@ -862,6 +861,11 @@
 	iput(inode);
 }
 
+static int cgroup_delete(const struct dentry *d)
+{
+	return 1;
+}
+
 static void remove_dir(struct dentry *d)
 {
 	struct dentry *parent = dget(d->d_parent);
@@ -912,7 +916,7 @@
 
 	parent = dentry->d_parent;
 	spin_lock(&parent->d_lock);
-	spin_lock(&dentry->d_lock);
+	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 	list_del_init(&dentry->d_u.d_child);
 	spin_unlock(&dentry->d_lock);
 	spin_unlock(&parent->d_lock);
@@ -1451,6 +1455,11 @@
 
 static int cgroup_get_rootdir(struct super_block *sb)
 {
+	static const struct dentry_operations cgroup_dops = {
+		.d_iput = cgroup_diput,
+		.d_delete = cgroup_delete,
+	};
+
 	struct inode *inode =
 		cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
 	struct dentry *dentry;
@@ -1468,6 +1477,8 @@
 		return -ENOMEM;
 	}
 	sb->s_root = dentry;
+	/* for everything else we want ->d_op set */
+	sb->s_d_op = &cgroup_dops;
 	return 0;
 }
 
@@ -2197,6 +2208,14 @@
 	.rename = cgroup_rename,
 };
 
+static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	if (dentry->d_name.len > NAME_MAX)
+		return ERR_PTR(-ENAMETOOLONG);
+	d_add(dentry, NULL);
+	return NULL;
+}
+
 /*
  * Check if a file is a control file
  */
@@ -2207,26 +2226,6 @@
 	return __d_cft(file->f_dentry);
 }
 
-static int cgroup_delete_dentry(const struct dentry *dentry)
-{
-	return 1;
-}
-
-static struct dentry *cgroup_lookup(struct inode *dir,
-			struct dentry *dentry, struct nameidata *nd)
-{
-	static const struct dentry_operations cgroup_dentry_operations = {
-		.d_delete = cgroup_delete_dentry,
-		.d_iput = cgroup_diput,
-	};
-
-	if (dentry->d_name.len > NAME_MAX)
-		return ERR_PTR(-ENAMETOOLONG);
-	d_set_d_op(dentry, &cgroup_dentry_operations);
-	d_add(dentry, NULL);
-	return NULL;
-}
-
 static int cgroup_create_file(struct dentry *dentry, mode_t mode,
 				struct super_block *sb)
 {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4349935..e92e981 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1575,8 +1575,10 @@
 		return -ENODEV;
 
 	trialcs = alloc_trial_cpuset(cs);
-	if (!trialcs)
-		return -ENOMEM;
+	if (!trialcs) {
+		retval = -ENOMEM;
+		goto out;
+	}
 
 	switch (cft->private) {
 	case FILE_CPULIST:
@@ -1591,6 +1593,7 @@
 	}
 
 	free_trial_cpuset(trialcs);
+out:
 	cgroup_unlock();
 	return retval;
 }
diff --git a/kernel/cred.c b/kernel/cred.c
index 6a1aa00..3a9d6dd 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -252,13 +252,13 @@
 #endif
 
 	atomic_set(&new->usage, 1);
+#ifdef CONFIG_DEBUG_CREDENTIALS
+	new->magic = CRED_MAGIC;
+#endif
 
 	if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
 		goto error;
 
-#ifdef CONFIG_DEBUG_CREDENTIALS
-	new->magic = CRED_MAGIC;
-#endif
 	return new;
 
 error:
@@ -657,6 +657,8 @@
 	validate_creds(old);
 
 	*new = *old;
+	atomic_set(&new->usage, 1);
+	set_cred_subscribers(new, 0);
 	get_uid(new->user);
 	get_group_info(new->group_info);
 
@@ -674,8 +676,6 @@
 	if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
 		goto error;
 
-	atomic_set(&new->usage, 1);
-	set_cred_subscribers(new, 0);
 	put_cred(old);
 	validate_creds(new);
 	return new;
@@ -748,7 +748,11 @@
 	if (cred->magic != CRED_MAGIC)
 		return true;
 #ifdef CONFIG_SECURITY_SELINUX
-	if (selinux_is_enabled()) {
+	/*
+	 * cred->security == NULL if security_cred_alloc_blank() or
+	 * security_prepare_creds() returned an error.
+	 */
+	if (selinux_is_enabled() && cred->security) {
 		if ((unsigned long) cred->security < PAGE_SIZE)
 			return true;
 		if ((*(u32 *)cred->security & 0xffffff00) ==
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index a6e7297..bd3e8e2 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2914,7 +2914,7 @@
 	}
 }
 
-/* Intialize kdb_printf, breakpoint tables and kdb state */
+/* Initialize kdb_printf, breakpoint tables and kdb state */
 void __init kdb_init(int lvl)
 {
 	static int kdb_init_lvl = KDB_NOT_INITIALIZED;
diff --git a/kernel/exit.c b/kernel/exit.c
index 89c7486..f9a45eb 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -994,6 +994,15 @@
 	exit_fs(tsk);
 	check_stack_usage();
 	exit_thread();
+
+	/*
+	 * Flush inherited counters to the parent - before the parent
+	 * gets woken up by child-exit notifications.
+	 *
+	 * because of cgroup mode, must be called before cgroup_exit()
+	 */
+	perf_event_exit_task(tsk);
+
 	cgroup_exit(tsk, 1);
 
 	if (group_dead)
@@ -1007,11 +1016,6 @@
 	 * FIXME: do that only when needed, using sched_exit tracepoint
 	 */
 	flush_ptrace_hw_breakpoint(tsk);
-	/*
-	 * Flush inherited counters to the parent - before the parent
-	 * gets woken up by child-exit notifications.
-	 */
-	perf_event_exit_task(tsk);
 
 	exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
diff --git a/kernel/fork.c b/kernel/fork.c
index d9b44f2..25e4291 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -66,6 +66,7 @@
 #include <linux/posix-timers.h>
 #include <linux/user-return-notifier.h>
 #include <linux/oom.h>
+#include <linux/khugepaged.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -330,6 +331,9 @@
 	retval = ksm_fork(mm, oldmm);
 	if (retval)
 		goto out;
+	retval = khugepaged_fork(mm, oldmm);
+	if (retval)
+		goto out;
 
 	prev = NULL;
 	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
@@ -529,6 +533,9 @@
 	mm_free_pgd(mm);
 	destroy_context(mm);
 	mmu_notifier_mm_destroy(mm);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	VM_BUG_ON(mm->pmd_huge_pte);
+#endif
 	free_mm(mm);
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
@@ -543,6 +550,7 @@
 	if (atomic_dec_and_test(&mm->mm_users)) {
 		exit_aio(mm);
 		ksm_exit(mm);
+		khugepaged_exit(mm); /* must run before exit_mmap */
 		exit_mmap(mm);
 		set_mm_exe_file(mm, NULL);
 		if (!list_empty(&mm->mmlist)) {
@@ -669,6 +677,10 @@
 	mm->token_priority = 0;
 	mm->last_interval = 0;
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	mm->pmd_huge_pte = NULL;
+#endif
+
 	if (!mm_init(mm, tsk))
 		goto fail_nomem;
 
@@ -910,6 +922,7 @@
 
 	sig->oom_adj = current->signal->oom_adj;
 	sig->oom_score_adj = current->signal->oom_score_adj;
+	sig->oom_score_adj_min = current->signal->oom_score_adj_min;
 
 	mutex_init(&sig->cred_guard_mutex);
 
@@ -1410,23 +1423,6 @@
 	}
 
 	/*
-	 * We hope to recycle these flags after 2.6.26
-	 */
-	if (unlikely(clone_flags & CLONE_STOPPED)) {
-		static int __read_mostly count = 100;
-
-		if (count > 0 && printk_ratelimit()) {
-			char comm[TASK_COMM_LEN];
-
-			count--;
-			printk(KERN_INFO "fork(): process `%s' used deprecated "
-					"clone flags 0x%lx\n",
-				get_task_comm(comm, current),
-				clone_flags & CLONE_STOPPED);
-		}
-	}
-
-	/*
 	 * When called from kernel_thread, don't do user tracing stuff.
 	 */
 	if (likely(user_mode(regs)))
@@ -1464,16 +1460,7 @@
 		 */
 		p->flags &= ~PF_STARTING;
 
-		if (unlikely(clone_flags & CLONE_STOPPED)) {
-			/*
-			 * We'll start up with an immediate SIGSTOP.
-			 */
-			sigaddset(&p->pending.signal, SIGSTOP);
-			set_tsk_thread_flag(p, TIF_SIGPENDING);
-			__set_task_state(p, TASK_STOPPED);
-		} else {
-			wake_up_new_task(p, clone_flags);
-		}
+		wake_up_new_task(p, clone_flags);
 
 		tracehook_report_clone_complete(trace, regs,
 						clone_flags, nr, p);
diff --git a/kernel/futex.c b/kernel/futex.c
index 3019b92..b766d28 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -233,7 +233,7 @@
 {
 	unsigned long address = (unsigned long)uaddr;
 	struct mm_struct *mm = current->mm;
-	struct page *page;
+	struct page *page, *page_head;
 	int err;
 
 	/*
@@ -265,11 +265,46 @@
 	if (err < 0)
 		return err;
 
-	page = compound_head(page);
-	lock_page(page);
-	if (!page->mapping) {
-		unlock_page(page);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	page_head = page;
+	if (unlikely(PageTail(page))) {
 		put_page(page);
+		/* serialize against __split_huge_page_splitting() */
+		local_irq_disable();
+		if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
+			page_head = compound_head(page);
+			/*
+			 * page_head is valid pointer but we must pin
+			 * it before taking the PG_lock and/or
+			 * PG_compound_lock. The moment we re-enable
+			 * irqs __split_huge_page_splitting() can
+			 * return and the head page can be freed from
+			 * under us. We can't take the PG_lock and/or
+			 * PG_compound_lock on a page that could be
+			 * freed from under us.
+			 */
+			if (page != page_head) {
+				get_page(page_head);
+				put_page(page);
+			}
+			local_irq_enable();
+		} else {
+			local_irq_enable();
+			goto again;
+		}
+	}
+#else
+	page_head = compound_head(page);
+	if (page != page_head) {
+		get_page(page_head);
+		put_page(page);
+	}
+#endif
+
+	lock_page(page_head);
+	if (!page_head->mapping) {
+		unlock_page(page_head);
+		put_page(page_head);
 		goto again;
 	}
 
@@ -280,20 +315,20 @@
 	 * it's a read-only handle, it's expected that futexes attach to
 	 * the object not the particular process.
 	 */
-	if (PageAnon(page)) {
+	if (PageAnon(page_head)) {
 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
 		key->private.mm = mm;
 		key->private.address = address;
 	} else {
 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
-		key->shared.inode = page->mapping->host;
-		key->shared.pgoff = page->index;
+		key->shared.inode = page_head->mapping->host;
+		key->shared.pgoff = page_head->index;
 	}
 
 	get_futex_key_refs(key);
 
-	unlock_page(page);
-	put_page(page);
+	unlock_page(page_head);
+	put_page(page_head);
 	return 0;
 }
 
@@ -791,10 +826,9 @@
 	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
 
 	/*
-	 * This happens when we have stolen the lock and the original
-	 * pending owner did not enqueue itself back on the rt_mutex.
-	 * Thats not a tragedy. We know that way, that a lock waiter
-	 * is on the fly. We make the futex_q waiter the pending owner.
+	 * It is possible that the next waiter (the one that brought
+	 * this owner to the kernel) timed out and is no longer
+	 * waiting on the lock.
 	 */
 	if (!new_owner)
 		new_owner = this->task;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 45da2b6..0c8d7c0 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1745,7 +1745,7 @@
 	}
 
 	/*
-	 * A NULL parameter means "inifinte"
+	 * A NULL parameter means "infinite"
 	 */
 	if (!expires) {
 		schedule();
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 31d766b..8e42fec 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -9,9 +9,6 @@
 config GENERIC_HARDIRQS
        def_bool y
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-       def_bool y
-
 # Select this to disable the deprecated stuff
 config GENERIC_HARDIRQS_NO_DEPRECATED
        def_bool n
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e2347eb..3540a71 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -118,114 +118,3 @@
 
 	return retval;
 }
-
-#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-
-#ifdef CONFIG_ENABLE_WARN_DEPRECATED
-# warning __do_IRQ is deprecated. Please convert to proper flow handlers
-#endif
-
-/**
- * __do_IRQ - original all in one highlevel IRQ handler
- * @irq:	the interrupt number
- *
- * __do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- *
- * This is the original x86 implementation which is used for every
- * interrupt type.
- */
-unsigned int __do_IRQ(unsigned int irq)
-{
-	struct irq_desc *desc = irq_to_desc(irq);
-	struct irqaction *action;
-	unsigned int status;
-
-	kstat_incr_irqs_this_cpu(irq, desc);
-
-	if (CHECK_IRQ_PER_CPU(desc->status)) {
-		irqreturn_t action_ret;
-
-		/*
-		 * No locking required for CPU-local interrupts:
-		 */
-		if (desc->irq_data.chip->ack)
-			desc->irq_data.chip->ack(irq);
-		if (likely(!(desc->status & IRQ_DISABLED))) {
-			action_ret = handle_IRQ_event(irq, desc->action);
-			if (!noirqdebug)
-				note_interrupt(irq, desc, action_ret);
-		}
-		desc->irq_data.chip->end(irq);
-		return 1;
-	}
-
-	raw_spin_lock(&desc->lock);
-	if (desc->irq_data.chip->ack)
-		desc->irq_data.chip->ack(irq);
-	/*
-	 * REPLAY is when Linux resends an IRQ that was dropped earlier
-	 * WAITING is used by probe to mark irqs that are being tested
-	 */
-	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
-	status |= IRQ_PENDING; /* we _want_ to handle it */
-
-	/*
-	 * If the IRQ is disabled for whatever reason, we cannot
-	 * use the action we have.
-	 */
-	action = NULL;
-	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
-		action = desc->action;
-		status &= ~IRQ_PENDING; /* we commit to handling */
-		status |= IRQ_INPROGRESS; /* we are handling it */
-	}
-	desc->status = status;
-
-	/*
-	 * If there is no IRQ handler or it was disabled, exit early.
-	 * Since we set PENDING, if another processor is handling
-	 * a different instance of this same irq, the other processor
-	 * will take care of it.
-	 */
-	if (unlikely(!action))
-		goto out;
-
-	/*
-	 * Edge triggered interrupts need to remember
-	 * pending events.
-	 * This applies to any hw interrupts that allow a second
-	 * instance of the same irq to arrive while we are in do_IRQ
-	 * or in the handler. But the code here only handles the _second_
-	 * instance of the irq, not the third or fourth. So it is mostly
-	 * useful for irq hardware that does not mask cleanly in an
-	 * SMP environment.
-	 */
-	for (;;) {
-		irqreturn_t action_ret;
-
-		raw_spin_unlock(&desc->lock);
-
-		action_ret = handle_IRQ_event(irq, action);
-		if (!noirqdebug)
-			note_interrupt(irq, desc, action_ret);
-
-		raw_spin_lock(&desc->lock);
-		if (likely(!(desc->status & IRQ_PENDING)))
-			break;
-		desc->status &= ~IRQ_PENDING;
-	}
-	desc->status &= ~IRQ_INPROGRESS;
-
-out:
-	/*
-	 * The ->end() handler has to deal with interrupts which got
-	 * disabled while the handler was running.
-	 */
-	desc->irq_data.chip->end(irq);
-	raw_spin_unlock(&desc->lock);
-
-	return 1;
-}
-#endif
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4571ae7..99c3bc8 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -3,6 +3,12 @@
  */
 #include <linux/irqdesc.h>
 
+#ifdef CONFIG_SPARSE_IRQ
+# define IRQ_BITMAP_BITS	(NR_IRQS + 8196)
+#else
+# define IRQ_BITMAP_BITS	NR_IRQS
+#endif
+
 extern int noirqdebug;
 
 #define irq_data_to_desc(data)	container_of(data, struct irq_desc, irq_data)
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9988d03..2039bea 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -72,6 +72,8 @@
 
 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
 {
+	int cpu;
+
 	desc->irq_data.irq = irq;
 	desc->irq_data.chip = &no_irq_chip;
 	desc->irq_data.chip_data = NULL;
@@ -83,7 +85,8 @@
 	desc->irq_count = 0;
 	desc->irqs_unhandled = 0;
 	desc->name = NULL;
-	memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
+	for_each_possible_cpu(cpu)
+		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
 	desc_smp_init(desc, node);
 }
 
@@ -91,7 +94,7 @@
 EXPORT_SYMBOL_GPL(nr_irqs);
 
 static DEFINE_MUTEX(sparse_irq_lock);
-static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
+static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
 
 #ifdef CONFIG_SPARSE_IRQ
 
@@ -133,8 +136,7 @@
 	if (!desc)
 		return NULL;
 	/* allocate based on nr_cpu_ids */
-	desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
-					 gfp, node);
+	desc->kstat_irqs = alloc_percpu(unsigned int);
 	if (!desc->kstat_irqs)
 		goto err_desc;
 
@@ -149,7 +151,7 @@
 	return desc;
 
 err_kstat:
-	kfree(desc->kstat_irqs);
+	free_percpu(desc->kstat_irqs);
 err_desc:
 	kfree(desc);
 	return NULL;
@@ -166,7 +168,7 @@
 	mutex_unlock(&sparse_irq_lock);
 
 	free_masks(desc);
-	kfree(desc->kstat_irqs);
+	free_percpu(desc->kstat_irqs);
 	kfree(desc);
 }
 
@@ -215,6 +217,15 @@
 	initcnt = arch_probe_nr_irqs();
 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
 
+	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
+		nr_irqs = IRQ_BITMAP_BITS;
+
+	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
+		initcnt = IRQ_BITMAP_BITS;
+
+	if (initcnt > nr_irqs)
+		nr_irqs = initcnt;
+
 	for (i = 0; i < initcnt; i++) {
 		desc = alloc_desc(i, node);
 		set_bit(i, allocated_irqs);
@@ -234,7 +245,6 @@
 	}
 };
 
-static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
 int __init early_irq_init(void)
 {
 	int count, i, node = first_online_node;
@@ -250,7 +260,8 @@
 	for (i = 0; i < count; i++) {
 		desc[i].irq_data.irq = i;
 		desc[i].irq_data.chip = &no_irq_chip;
-		desc[i].kstat_irqs = kstat_irqs_all[i];
+		/* TODO : do this allocation on-demand ... */
+		desc[i].kstat_irqs = alloc_percpu(unsigned int);
 		alloc_masks(desc + i, GFP_KERNEL, node);
 		desc_smp_init(desc + i, node);
 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
@@ -275,6 +286,22 @@
 
 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
 {
+#if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
+	struct irq_desc *desc;
+	unsigned int i;
+
+	for (i = 0; i < cnt; i++) {
+		desc = irq_to_desc(start + i);
+		if (desc && !desc->kstat_irqs) {
+			unsigned int __percpu *stats = alloc_percpu(unsigned int);
+
+			if (!stats)
+				return -1;
+			if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL)
+				free_percpu(stats);
+		}
+	}
+#endif
 	return start;
 }
 #endif /* !CONFIG_SPARSE_IRQ */
@@ -391,7 +418,9 @@
 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
-	return desc ? desc->kstat_irqs[cpu] : 0;
+
+	return desc && desc->kstat_irqs ?
+			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
 }
 
 #ifdef CONFIG_GENERIC_HARDIRQS
@@ -401,10 +430,10 @@
 	int cpu;
 	int sum = 0;
 
-	if (!desc)
+	if (!desc || !desc->kstat_irqs)
 		return 0;
 	for_each_possible_cpu(cpu)
-		sum += desc->kstat_irqs[cpu];
+		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
 	return sum;
 }
 #endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0caa59f..9033c1c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1100,7 +1100,7 @@
 	if (retval)
 		kfree(action);
 
-#ifdef CONFIG_DEBUG_SHIRQ
+#ifdef CONFIG_DEBUG_SHIRQ_FIXME
 	if (!retval && (irqflags & IRQF_SHARED)) {
 		/*
 		 * It's a shared IRQ -- the driver ought to be prepared for it
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 1d25419..441fd62 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -56,6 +56,7 @@
 void move_native_irq(int irq)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
+	bool masked;
 
 	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
 		return;
@@ -63,8 +64,15 @@
 	if (unlikely(desc->status & IRQ_DISABLED))
 		return;
 
-	desc->irq_data.chip->irq_mask(&desc->irq_data);
+	/*
+	 * Be careful vs. already masked interrupts. If this is a
+	 * threaded interrupt with ONESHOT set, we can end up with an
+	 * interrupt storm.
+	 */
+	masked = desc->status & IRQ_MASKED;
+	if (!masked)
+		desc->irq_data.chip->irq_mask(&desc->irq_data);
 	move_masked_irq(irq);
-	desc->irq_data.chip->irq_unmask(&desc->irq_data);
+	if (!masked)
+		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
-
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 891115a..dc49358 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -23,7 +23,7 @@
 #ifdef CONFIG_HARDIRQS_SW_RESEND
 
 /* Bitmap to handle software resend of interrupts: */
-static DECLARE_BITMAP(irqs_resend, NR_IRQS);
+static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
 
 /*
  * Run software resends of IRQ's
diff --git a/kernel/kexec.c b/kernel/kexec.c
index b55045b..ec19b92 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -163,7 +163,7 @@
 	 * just verifies it is an address we can use.
 	 *
 	 * Since the kernel does everything in page size chunks ensure
-	 * the destination addreses are page aligned.  Too many
+	 * the destination addresses are page aligned.  Too many
 	 * special cases crop of when we don't do this.  The most
 	 * insidious is getting overlapping destination addresses
 	 * simply because addresses are changed to page size
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 17110a4..ee74b35 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -241,24 +241,19 @@
 	seq_puts(m, "Latency Top version : v0.1\n");
 
 	for (i = 0; i < MAXLR; i++) {
-		if (latency_record[i].backtrace[0]) {
+		struct latency_record *lr = &latency_record[i];
+
+		if (lr->backtrace[0]) {
 			int q;
-			seq_printf(m, "%i %lu %lu ",
-				latency_record[i].count,
-				latency_record[i].time,
-				latency_record[i].max);
+			seq_printf(m, "%i %lu %lu",
+				   lr->count, lr->time, lr->max);
 			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
-				char sym[KSYM_SYMBOL_LEN];
-				char *c;
-				if (!latency_record[i].backtrace[q])
+				unsigned long bt = lr->backtrace[q];
+				if (!bt)
 					break;
-				if (latency_record[i].backtrace[q] == ULONG_MAX)
+				if (bt == ULONG_MAX)
 					break;
-				sprint_symbol(sym, latency_record[i].backtrace[q]);
-				c = strchr(sym, '+');
-				if (c)
-					*c = 0;
-				seq_printf(m, "%s ", sym);
+				seq_printf(m, " %ps", (void *)bt);
 			}
 			seq_printf(m, "\n");
 		}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 42ba65d..0d2058d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2292,22 +2292,6 @@
 }
 
 /*
- * Debugging helper: via this flag we know that we are in
- * 'early bootup code', and will warn about any invalid irqs-on event:
- */
-static int early_boot_irqs_enabled;
-
-void early_boot_irqs_off(void)
-{
-	early_boot_irqs_enabled = 0;
-}
-
-void early_boot_irqs_on(void)
-{
-	early_boot_irqs_enabled = 1;
-}
-
-/*
  * Hardirqs will be enabled:
  */
 void trace_hardirqs_on_caller(unsigned long ip)
@@ -2319,7 +2303,7 @@
 	if (unlikely(!debug_locks || current->lockdep_recursion))
 		return;
 
-	if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
+	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
 		return;
 
 	if (unlikely(curr->hardirqs_enabled)) {
diff --git a/kernel/module.c b/kernel/module.c
index 34e00b7..efa290e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2460,9 +2460,9 @@
 #endif
 
 #ifdef CONFIG_TRACEPOINTS
-	mod->tracepoints = section_objs(info, "__tracepoints",
-					sizeof(*mod->tracepoints),
-					&mod->num_tracepoints);
+	mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
+					     sizeof(*mod->tracepoints_ptrs),
+					     &mod->num_tracepoints);
 #endif
 #ifdef HAVE_JUMP_LABEL
 	mod->jump_entries = section_objs(info, "__jump_table",
@@ -3393,7 +3393,7 @@
 		   struct modversion_info *ver,
 		   struct kernel_param *kp,
 		   struct kernel_symbol *ks,
-		   struct tracepoint *tp)
+		   struct tracepoint * const *tp)
 {
 }
 EXPORT_SYMBOL(module_layout);
@@ -3407,8 +3407,8 @@
 	mutex_lock(&module_mutex);
 	list_for_each_entry(mod, &modules, list)
 		if (!mod->taints)
-			tracepoint_update_probe_range(mod->tracepoints,
-				mod->tracepoints + mod->num_tracepoints);
+			tracepoint_update_probe_range(mod->tracepoints_ptrs,
+				mod->tracepoints_ptrs + mod->num_tracepoints);
 	mutex_unlock(&module_mutex);
 }
 
@@ -3432,8 +3432,8 @@
 			else if (iter_mod > iter->module)
 				iter->tracepoint = NULL;
 			found = tracepoint_get_iter_range(&iter->tracepoint,
-				iter_mod->tracepoints,
-				iter_mod->tracepoints
+				iter_mod->tracepoints_ptrs,
+				iter_mod->tracepoints_ptrs
 					+ iter_mod->num_tracepoints);
 			if (found) {
 				iter->module = iter_mod;
diff --git a/kernel/panic.c b/kernel/panic.c
index 4c13b1a..991bb87 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -34,6 +34,7 @@
 static DEFINE_SPINLOCK(pause_on_oops_lock);
 
 int panic_timeout;
+EXPORT_SYMBOL_GPL(panic_timeout);
 
 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
 
diff --git a/kernel/params.c b/kernel/params.c
index 08107d1..0da1411 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -719,9 +719,7 @@
 			params[i].ops->free(params[i].arg);
 }
 
-static void __init kernel_add_sysfs_param(const char *name,
-					  struct kernel_param *kparam,
-					  unsigned int name_skip)
+static struct module_kobject * __init locate_module_kobject(const char *name)
 {
 	struct module_kobject *mk;
 	struct kobject *kobj;
@@ -729,10 +727,7 @@
 
 	kobj = kset_find_obj(module_kset, name);
 	if (kobj) {
-		/* We already have one.  Remove params so we can add more. */
 		mk = to_module_kobject(kobj);
-		/* We need to remove it before adding parameters. */
-		sysfs_remove_group(&mk->kobj, &mk->mp->grp);
 	} else {
 		mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
 		BUG_ON(!mk);
@@ -743,15 +738,36 @@
 					   "%s", name);
 		if (err) {
 			kobject_put(&mk->kobj);
-			printk(KERN_ERR "Module '%s' failed add to sysfs, "
-			       "error number %d\n", name, err);
-			printk(KERN_ERR	"The system will be unstable now.\n");
-			return;
+			printk(KERN_ERR
+				"Module '%s' failed add to sysfs, error number %d\n",
+				name, err);
+			printk(KERN_ERR
+				"The system will be unstable now.\n");
+			return NULL;
 		}
-		/* So that exit path is even. */
+
+		/* So that we hold reference in both cases. */
 		kobject_get(&mk->kobj);
 	}
 
+	return mk;
+}
+
+static void __init kernel_add_sysfs_param(const char *name,
+					  struct kernel_param *kparam,
+					  unsigned int name_skip)
+{
+	struct module_kobject *mk;
+	int err;
+
+	mk = locate_module_kobject(name);
+	if (!mk)
+		return;
+
+	/* We need to remove old parameters before adding more. */
+	if (mk->mp)
+		sysfs_remove_group(&mk->kobj, &mk->mp->grp);
+
 	/* These should not fail at boot. */
 	err = add_sysfs_param(mk, kparam, kparam->name + name_skip);
 	BUG_ON(err);
@@ -796,6 +812,32 @@
 	}
 }
 
+ssize_t __modver_version_show(struct module_attribute *mattr,
+			      struct module *mod, char *buf)
+{
+	struct module_version_attribute *vattr =
+		container_of(mattr, struct module_version_attribute, mattr);
+
+	return sprintf(buf, "%s\n", vattr->version);
+}
+
+extern struct module_version_attribute __start___modver[], __stop___modver[];
+
+static void __init version_sysfs_builtin(void)
+{
+	const struct module_version_attribute *vattr;
+	struct module_kobject *mk;
+	int err;
+
+	for (vattr = __start___modver; vattr < __stop___modver; vattr++) {
+		mk = locate_module_kobject(vattr->module_name);
+		if (mk) {
+			err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
+			kobject_uevent(&mk->kobj, KOBJ_ADD);
+			kobject_put(&mk->kobj);
+		}
+	}
+}
 
 /* module-related sysfs stuff */
 
@@ -875,6 +917,7 @@
 	}
 	module_sysfs_initialized = 1;
 
+	version_sysfs_builtin();
 	param_sysfs_builtin();
 
 	return 0;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 11847bf..656222f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -38,6 +38,12 @@
 
 #include <asm/irq_regs.h>
 
+enum event_type_t {
+	EVENT_FLEXIBLE = 0x1,
+	EVENT_PINNED = 0x2,
+	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
 atomic_t perf_task_events __read_mostly;
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -65,6 +71,12 @@
 
 static atomic64_t perf_event_id;
 
+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+			      enum event_type_t event_type);
+
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+			     enum event_type_t event_type);
+
 void __weak perf_event_print_debug(void)	{ }
 
 extern __weak const char *perf_pmu_name(void)
@@ -72,6 +84,11 @@
 	return "pmu";
 }
 
+static inline u64 perf_clock(void)
+{
+	return local_clock();
+}
+
 void perf_pmu_disable(struct pmu *pmu)
 {
 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -240,11 +257,6 @@
 	put_ctx(ctx);
 }
 
-static inline u64 perf_clock(void)
-{
-	return local_clock();
-}
-
 /*
  * Update the record of the current time in a context.
  */
@@ -256,6 +268,12 @@
 	ctx->timestamp = now;
 }
 
+static u64 perf_event_time(struct perf_event *event)
+{
+	struct perf_event_context *ctx = event->ctx;
+	return ctx ? ctx->time : 0;
+}
+
 /*
  * Update the total_time_enabled and total_time_running fields for a event.
  */
@@ -269,7 +287,7 @@
 		return;
 
 	if (ctx->is_active)
-		run_end = ctx->time;
+		run_end = perf_event_time(event);
 	else
 		run_end = event->tstamp_stopped;
 
@@ -278,7 +296,7 @@
 	if (event->state == PERF_EVENT_STATE_INACTIVE)
 		run_end = event->tstamp_stopped;
 	else
-		run_end = ctx->time;
+		run_end = perf_event_time(event);
 
 	event->total_time_running = run_end - event->tstamp_running;
 }
@@ -534,6 +552,7 @@
 		  struct perf_cpu_context *cpuctx,
 		  struct perf_event_context *ctx)
 {
+	u64 tstamp = perf_event_time(event);
 	u64 delta;
 	/*
 	 * An event which could not be activated because of
@@ -545,7 +564,7 @@
 	    && !event_filter_match(event)) {
 		delta = ctx->time - event->tstamp_stopped;
 		event->tstamp_running += delta;
-		event->tstamp_stopped = ctx->time;
+		event->tstamp_stopped = tstamp;
 	}
 
 	if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -556,7 +575,7 @@
 		event->pending_disable = 0;
 		event->state = PERF_EVENT_STATE_OFF;
 	}
-	event->tstamp_stopped = ctx->time;
+	event->tstamp_stopped = tstamp;
 	event->pmu->del(event, 0);
 	event->oncpu = -1;
 
@@ -763,16 +782,33 @@
 	raw_spin_unlock_irq(&ctx->lock);
 }
 
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_event *event, int enable);
+
 static int
 event_sched_in(struct perf_event *event,
 		 struct perf_cpu_context *cpuctx,
 		 struct perf_event_context *ctx)
 {
+	u64 tstamp = perf_event_time(event);
+
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
 	event->state = PERF_EVENT_STATE_ACTIVE;
 	event->oncpu = smp_processor_id();
+
+	/*
+	 * Unthrottle events, since we scheduled we might have missed several
+	 * ticks already, also for a heavily scheduling task there is little
+	 * guarantee it'll get a tick in a timely manner.
+	 */
+	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
+		perf_log_throttle(event, 1);
+		event->hw.interrupts = 0;
+	}
+
 	/*
 	 * The new state must be visible before we turn it on in the hardware:
 	 */
@@ -784,9 +820,9 @@
 		return -EAGAIN;
 	}
 
-	event->tstamp_running += ctx->time - event->tstamp_stopped;
+	event->tstamp_running += tstamp - event->tstamp_stopped;
 
-	event->shadow_ctx_time = ctx->time - ctx->timestamp;
+	event->shadow_ctx_time = tstamp - ctx->timestamp;
 
 	if (!is_software_event(event))
 		cpuctx->active_oncpu++;
@@ -898,11 +934,13 @@
 static void add_event_to_ctx(struct perf_event *event,
 			       struct perf_event_context *ctx)
 {
+	u64 tstamp = perf_event_time(event);
+
 	list_add_event(event, ctx);
 	perf_group_attach(event);
-	event->tstamp_enabled = ctx->time;
-	event->tstamp_running = ctx->time;
-	event->tstamp_stopped = ctx->time;
+	event->tstamp_enabled = tstamp;
+	event->tstamp_running = tstamp;
+	event->tstamp_stopped = tstamp;
 }
 
 /*
@@ -937,7 +975,7 @@
 
 	add_event_to_ctx(event, ctx);
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		goto unlock;
 
 	/*
@@ -1042,14 +1080,13 @@
 					struct perf_event_context *ctx)
 {
 	struct perf_event *sub;
+	u64 tstamp = perf_event_time(event);
 
 	event->state = PERF_EVENT_STATE_INACTIVE;
-	event->tstamp_enabled = ctx->time - event->total_time_enabled;
+	event->tstamp_enabled = tstamp - event->total_time_enabled;
 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
-		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
-			sub->tstamp_enabled =
-				ctx->time - sub->total_time_enabled;
-		}
+		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
 	}
 }
 
@@ -1082,7 +1119,7 @@
 		goto unlock;
 	__perf_event_mark_enabled(event, ctx);
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		goto unlock;
 
 	/*
@@ -1193,12 +1230,6 @@
 	return 0;
 }
 
-enum event_type_t {
-	EVENT_FLEXIBLE = 0x1,
-	EVENT_PINNED = 0x2,
-	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
-};
-
 static void ctx_sched_out(struct perf_event_context *ctx,
 			  struct perf_cpu_context *cpuctx,
 			  enum event_type_t event_type)
@@ -1435,7 +1466,7 @@
 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
 		if (event->state <= PERF_EVENT_STATE_OFF)
 			continue;
-		if (event->cpu != -1 && event->cpu != smp_processor_id())
+		if (!event_filter_match(event))
 			continue;
 
 		if (group_can_go_on(event, cpuctx, 1))
@@ -1467,7 +1498,7 @@
 		 * Listen to the 'cpu' scheduling filter constraint
 		 * of events:
 		 */
-		if (event->cpu != -1 && event->cpu != smp_processor_id())
+		if (!event_filter_match(event))
 			continue;
 
 		if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@ -1580,10 +1611,6 @@
 	}
 }
 
-#define MAX_INTERRUPTS (~0ULL)
-
-static void perf_log_throttle(struct perf_event *event, int enable);
-
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
 {
 	u64 frequency = event->attr.sample_freq;
@@ -1694,7 +1721,7 @@
 		if (event->state != PERF_EVENT_STATE_ACTIVE)
 			continue;
 
-		if (event->cpu != -1 && event->cpu != smp_processor_id())
+		if (!event_filter_match(event))
 			continue;
 
 		hwc = &event->hw;
@@ -1885,11 +1912,12 @@
 		return;
 
 	raw_spin_lock(&ctx->lock);
-	update_context_time(ctx);
+	if (ctx->is_active)
+		update_context_time(ctx);
 	update_event_times(event);
+	if (event->state == PERF_EVENT_STATE_ACTIVE)
+		event->pmu->read(event);
 	raw_spin_unlock(&ctx->lock);
-
-	event->pmu->read(event);
 }
 
 static inline u64 perf_event_count(struct perf_event *event)
@@ -1983,8 +2011,7 @@
 	 * accessed from NMI. Use a temporary manual per cpu allocation
 	 * until that gets sorted out.
 	 */
-	size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
-		num_possible_cpus();
+	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
 
 	entries = kzalloc(size, GFP_KERNEL);
 	if (!entries)
@@ -2185,13 +2212,6 @@
 	if (!task)
 		return ERR_PTR(-ESRCH);
 
-	/*
-	 * Can't attach events to a dying task.
-	 */
-	err = -ESRCH;
-	if (task->flags & PF_EXITING)
-		goto errout;
-
 	/* Reuse ptrace permission checks for now. */
 	err = -EACCES;
 	if (!ptrace_may_access(task, PTRACE_MODE_READ))
@@ -2212,14 +2232,11 @@
 	unsigned long flags;
 	int ctxn, err;
 
-	if (!task && cpu != -1) {
+	if (!task) {
 		/* Must be root to operate on a CPU event: */
 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
 			return ERR_PTR(-EACCES);
 
-		if (cpu < 0 || cpu >= nr_cpumask_bits)
-			return ERR_PTR(-EINVAL);
-
 		/*
 		 * We could be clever and allow to attach a event to an
 		 * offline CPU and activate it when the CPU comes up, but
@@ -2255,14 +2272,27 @@
 
 		get_ctx(ctx);
 
-		if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
-			/*
-			 * We raced with some other task; use
-			 * the context they set.
-			 */
+		err = 0;
+		mutex_lock(&task->perf_event_mutex);
+		/*
+		 * If it has already passed perf_event_exit_task().
+		 * we must see PF_EXITING, it takes this mutex too.
+		 */
+		if (task->flags & PF_EXITING)
+			err = -ESRCH;
+		else if (task->perf_event_ctxp[ctxn])
+			err = -EAGAIN;
+		else
+			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
+		mutex_unlock(&task->perf_event_mutex);
+
+		if (unlikely(err)) {
 			put_task_struct(task);
 			kfree(ctx);
-			goto retry;
+
+			if (err == -EAGAIN)
+				goto retry;
+			goto errout;
 		}
 	}
 
@@ -3893,7 +3923,7 @@
 	if (event->state < PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		return 0;
 
 	if (event->attr.comm || event->attr.mmap ||
@@ -4030,7 +4060,7 @@
 	if (event->state < PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		return 0;
 
 	if (event->attr.comm)
@@ -4178,7 +4208,7 @@
 	if (event->state < PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		return 0;
 
 	if ((!executable && event->attr.mmap_data) ||
@@ -4648,7 +4678,7 @@
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
-void inline perf_swevent_put_recursion_context(int rctx)
+inline void perf_swevent_put_recursion_context(int rctx)
 {
 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
 
@@ -5361,6 +5391,8 @@
 	goto out;
 }
 
+static struct lock_class_key cpuctx_mutex;
+
 int perf_pmu_register(struct pmu *pmu, char *name, int type)
 {
 	int cpu, ret;
@@ -5409,6 +5441,7 @@
 
 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
 		__perf_event_init_context(&cpuctx->ctx);
+		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
 		cpuctx->ctx.type = cpu_context;
 		cpuctx->ctx.pmu = pmu;
 		cpuctx->jiffies_interval = 1;
@@ -5525,6 +5558,11 @@
 	struct hw_perf_event *hwc;
 	long err;
 
+	if ((unsigned)cpu >= nr_cpu_ids) {
+		if (!task || cpu != -1)
+			return ERR_PTR(-EINVAL);
+	}
+
 	event = kzalloc(sizeof(*event), GFP_KERNEL);
 	if (!event)
 		return ERR_PTR(-ENOMEM);
@@ -5573,7 +5611,7 @@
 
 	if (!overflow_handler && parent_event)
 		overflow_handler = parent_event->overflow_handler;
-	
+
 	event->overflow_handler	= overflow_handler;
 
 	if (attr->disabled)
@@ -6109,7 +6147,7 @@
 	 * scheduled, so we are now safe from rescheduling changing
 	 * our context.
 	 */
-	child_ctx = child->perf_event_ctxp[ctxn];
+	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
 	task_ctx_sched_out(child_ctx, EVENT_ALL);
 
 	/*
@@ -6422,11 +6460,6 @@
 	unsigned long flags;
 	int ret = 0;
 
-	child->perf_event_ctxp[ctxn] = NULL;
-
-	mutex_init(&child->perf_event_mutex);
-	INIT_LIST_HEAD(&child->perf_event_list);
-
 	if (likely(!parent->perf_event_ctxp[ctxn]))
 		return 0;
 
@@ -6478,7 +6511,6 @@
 
 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
 	parent_ctx->rotate_disable = 0;
-	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
 
 	child_ctx = child->perf_event_ctxp[ctxn];
 
@@ -6486,12 +6518,11 @@
 		/*
 		 * Mark the child context as a clone of the parent
 		 * context, or of whatever the parent is a clone of.
-		 * Note that if the parent is a clone, it could get
-		 * uncloned at any point, but that doesn't matter
-		 * because the list of events and the generation
-		 * count can't have changed since we took the mutex.
+		 *
+		 * Note that if the parent is a clone, the holding of
+		 * parent_ctx->lock avoids it from being uncloned.
 		 */
-		cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
+		cloned_ctx = parent_ctx->parent_ctx;
 		if (cloned_ctx) {
 			child_ctx->parent_ctx = cloned_ctx;
 			child_ctx->parent_gen = parent_ctx->parent_gen;
@@ -6502,6 +6533,7 @@
 		get_ctx(child_ctx->parent_ctx);
 	}
 
+	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
 	mutex_unlock(&parent_ctx->mutex);
 
 	perf_unpin_context(parent_ctx);
@@ -6516,6 +6548,10 @@
 {
 	int ctxn, ret;
 
+	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
+	mutex_init(&child->perf_event_mutex);
+	INIT_LIST_HEAD(&child->perf_event_list);
+
 	for_each_task_context_nr(ctxn) {
 		ret = perf_event_init_context(child, ctxn);
 		if (ret)
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index a5aff3e..2657299 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -100,13 +100,9 @@
 	depends on PM_ADVANCED_DEBUG
 	default n
 
-config SUSPEND_NVS
-       bool
-
 config SUSPEND
 	bool "Suspend to RAM and standby"
 	depends on PM && ARCH_SUSPEND_POSSIBLE
-	select SUSPEND_NVS if HAS_IOMEM
 	default y
 	---help---
 	  Allow the system to enter sleep states in which main memory is
@@ -140,7 +136,6 @@
 	depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
 	select LZO_COMPRESS
 	select LZO_DECOMPRESS
-	select SUSPEND_NVS if HAS_IOMEM
 	---help---
 	  Enable the suspend to disk (STD) functionality, which is usually
 	  called "hibernation" in user interfaces.  STD checkpoints the
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index b755972..c350e18 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -7,6 +7,5 @@
 obj-$(CONFIG_PM_TEST_SUSPEND)	+= suspend_test.o
 obj-$(CONFIG_HIBERNATION)	+= hibernate.o snapshot.o swap.o user.o \
 				   block_io.o
-obj-$(CONFIG_SUSPEND_NVS)	+= nvs.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 870f72b..1832bd2 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -51,14 +51,14 @@
 
 static int hibernation_mode = HIBERNATION_SHUTDOWN;
 
-static struct platform_hibernation_ops *hibernation_ops;
+static const struct platform_hibernation_ops *hibernation_ops;
 
 /**
  * hibernation_set_ops - set the global hibernate operations
  * @ops: the hibernation operations to use in subsequent hibernation transitions
  */
 
-void hibernation_set_ops(struct platform_hibernation_ops *ops)
+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
 {
 	if (ops && !(ops->begin && ops->end &&  ops->pre_snapshot
 	    && ops->prepare && ops->finish && ops->enter && ops->pre_restore
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 7b5db6a..7018530 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -326,7 +326,7 @@
 
 static int __init pm_start_workqueue(void)
 {
-	pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0);
+	pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
 
 	return pm_wq ? 0 : -ENOMEM;
 }
diff --git a/kernel/power/process.c b/kernel/power/process.c
index d6d2a10..0cf3a27 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,7 +22,7 @@
  */
 #define TIMEOUT	(20 * HZ)
 
-static inline int freezeable(struct task_struct * p)
+static inline int freezable(struct task_struct * p)
 {
 	if ((p == current) ||
 	    (p->flags & PF_NOFREEZE) ||
@@ -53,7 +53,7 @@
 		todo = 0;
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
-			if (frozen(p) || !freezeable(p))
+			if (frozen(p) || !freezable(p))
 				continue;
 
 			if (!freeze_task(p, sig_only))
@@ -167,7 +167,7 @@
 
 	read_lock(&tasklist_lock);
 	do_each_thread(g, p) {
-		if (!freezeable(p))
+		if (!freezable(p))
 			continue;
 
 		if (nosig_only && should_send_signal(p))
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0dac75e..64db648 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1519,11 +1519,8 @@
 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
 		unsigned int nr_pages, unsigned int nr_highmem)
 {
-	int error = 0;
-
 	if (nr_highmem > 0) {
-		error = get_highmem_buffer(PG_ANY);
-		if (error)
+		if (get_highmem_buffer(PG_ANY))
 			goto err_out;
 		if (nr_highmem > alloc_highmem) {
 			nr_highmem -= alloc_highmem;
@@ -1546,7 +1543,7 @@
 
  err_out:
 	swsusp_free();
-	return error;
+	return -ENOMEM;
 }
 
 asmlinkage int swsusp_save(void)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 8850df6..de6f86b 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -31,13 +31,13 @@
 	[PM_SUSPEND_MEM]	= "mem",
 };
 
-static struct platform_suspend_ops *suspend_ops;
+static const struct platform_suspend_ops *suspend_ops;
 
 /**
  *	suspend_set_ops - Set the global suspend method table.
  *	@ops:	Pointer to ops structure.
  */
-void suspend_set_ops(struct platform_suspend_ops *ops)
+void suspend_set_ops(const struct platform_suspend_ops *ops)
 {
 	mutex_lock(&pm_mutex);
 	suspend_ops = ops;
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8c7e483..7c97c3a 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -224,7 +224,7 @@
 		return res;
 
 	root_swap = res;
-	res = blkdev_get(hib_resume_bdev, FMODE_WRITE);
+	res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
 	if (res)
 		return res;
 
@@ -888,7 +888,7 @@
 /**
  *	swsusp_read - read the hibernation image.
  *	@flags_p: flags passed by the "frozen" kernel in the image header should
- *		  be written into this memeory location
+ *		  be written into this memory location
  */
 
 int swsusp_read(unsigned int *flags_p)
@@ -930,7 +930,8 @@
 {
 	int error;
 
-	hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
+	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
+					    FMODE_READ, NULL);
 	if (!IS_ERR(hib_resume_bdev)) {
 		set_blocksize(hib_resume_bdev, PAGE_SIZE);
 		clear_page(swsusp_header);
diff --git a/kernel/printk.c b/kernel/printk.c
index f64b899..3623152 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -39,6 +39,7 @@
 #include <linux/syslog.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
+#include <linux/rculist.h>
 
 #include <asm/uaccess.h>
 
@@ -96,7 +97,7 @@
 /*
  * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars
  * It is also used in interesting ways to provide interlocking in
- * release_console_sem().
+ * console_unlock();.
  */
 static DEFINE_SPINLOCK(logbuf_lock);
 
@@ -261,25 +262,47 @@
 int dmesg_restrict;
 #endif
 
+static int syslog_action_restricted(int type)
+{
+	if (dmesg_restrict)
+		return 1;
+	/* Unless restricted, we allow "read all" and "get buffer size" for everybody */
+	return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
+}
+
+static int check_syslog_permissions(int type, bool from_file)
+{
+	/*
+	 * If this is from /proc/kmsg and we've already opened it, then we've
+	 * already done the capabilities checks at open time.
+	 */
+	if (from_file && type != SYSLOG_ACTION_OPEN)
+		return 0;
+
+	if (syslog_action_restricted(type)) {
+		if (capable(CAP_SYSLOG))
+			return 0;
+		/* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
+		if (capable(CAP_SYS_ADMIN)) {
+			WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
+				 "but no CAP_SYSLOG (deprecated).\n");
+			return 0;
+		}
+		return -EPERM;
+	}
+	return 0;
+}
+
 int do_syslog(int type, char __user *buf, int len, bool from_file)
 {
 	unsigned i, j, limit, count;
 	int do_clear = 0;
 	char c;
-	int error = 0;
+	int error;
 
-	/*
-	 * If this is from /proc/kmsg we only do the capabilities checks
-	 * at open time.
-	 */
-	if (type == SYSLOG_ACTION_OPEN || !from_file) {
-		if (dmesg_restrict && !capable(CAP_SYSLOG))
-			goto warn; /* switch to return -EPERM after 2.6.39 */
-		if ((type != SYSLOG_ACTION_READ_ALL &&
-		     type != SYSLOG_ACTION_SIZE_BUFFER) &&
-		    !capable(CAP_SYSLOG))
-			goto warn; /* switch to return -EPERM after 2.6.39 */
-	}
+	error = check_syslog_permissions(type, from_file);
+	if (error)
+		goto out;
 
 	error = security_syslog(type);
 	if (error)
@@ -422,12 +445,6 @@
 	}
 out:
 	return error;
-warn:
-	/* remove after 2.6.39 */
-	if (capable(CAP_SYS_ADMIN))
-		WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
-		  "but no CAP_SYSLOG (deprecated and denied).\n");
-	return -EPERM;
 }
 
 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
@@ -500,7 +517,7 @@
 /*
  * Call the console drivers, asking them to write out
  * log_buf[start] to log_buf[end - 1].
- * The console_sem must be held.
+ * The console_lock must be held.
  */
 static void call_console_drivers(unsigned start, unsigned end)
 {
@@ -603,11 +620,11 @@
  *
  * This is printk().  It can be called from any context.  We want it to work.
  *
- * We try to grab the console_sem.  If we succeed, it's easy - we log the output and
+ * We try to grab the console_lock.  If we succeed, it's easy - we log the output and
  * call the console drivers.  If we fail to get the semaphore we place the output
  * into the log buffer and return.  The current holder of the console_sem will
- * notice the new output in release_console_sem() and will send it to the
- * consoles before releasing the semaphore.
+ * notice the new output in console_unlock(); and will send it to the
+ * consoles before releasing the lock.
  *
  * One effect of this deferred printing is that code which calls printk() and
  * then changes console_loglevel may break. This is because console_loglevel
@@ -658,19 +675,19 @@
 /*
  * Try to get console ownership to actually show the kernel
  * messages from a 'printk'. Return true (and with the
- * console_semaphore held, and 'console_locked' set) if it
+ * console_lock held, and 'console_locked' set) if it
  * is successful, false otherwise.
  *
  * This gets called with the 'logbuf_lock' spinlock held and
  * interrupts disabled. It should return with 'lockbuf_lock'
  * released but interrupts still disabled.
  */
-static int acquire_console_semaphore_for_printk(unsigned int cpu)
+static int console_trylock_for_printk(unsigned int cpu)
 	__releases(&logbuf_lock)
 {
 	int retval = 0;
 
-	if (!try_acquire_console_sem()) {
+	if (console_trylock()) {
 		retval = 1;
 
 		/*
@@ -826,12 +843,12 @@
 	 * actual magic (print out buffers, wake up klogd,
 	 * etc). 
 	 *
-	 * The acquire_console_semaphore_for_printk() function
+	 * The console_trylock_for_printk() function
 	 * will release 'logbuf_lock' regardless of whether it
 	 * actually gets the semaphore or not.
 	 */
-	if (acquire_console_semaphore_for_printk(this_cpu))
-		release_console_sem();
+	if (console_trylock_for_printk(this_cpu))
+		console_unlock();
 
 	lockdep_on();
 out_restore_irqs:
@@ -992,7 +1009,7 @@
 	if (!console_suspend_enabled)
 		return;
 	printk("Suspending console(s) (use no_console_suspend to debug)\n");
-	acquire_console_sem();
+	console_lock();
 	console_suspended = 1;
 	up(&console_sem);
 }
@@ -1003,7 +1020,7 @@
 		return;
 	down(&console_sem);
 	console_suspended = 0;
-	release_console_sem();
+	console_unlock();
 }
 
 /**
@@ -1026,21 +1043,21 @@
 	case CPU_DYING:
 	case CPU_DOWN_FAILED:
 	case CPU_UP_CANCELED:
-		acquire_console_sem();
-		release_console_sem();
+		console_lock();
+		console_unlock();
 	}
 	return NOTIFY_OK;
 }
 
 /**
- * acquire_console_sem - lock the console system for exclusive use.
+ * console_lock - lock the console system for exclusive use.
  *
- * Acquires a semaphore which guarantees that the caller has
+ * Acquires a lock which guarantees that the caller has
  * exclusive access to the console system and the console_drivers list.
  *
  * Can sleep, returns nothing.
  */
-void acquire_console_sem(void)
+void console_lock(void)
 {
 	BUG_ON(in_interrupt());
 	down(&console_sem);
@@ -1049,21 +1066,29 @@
 	console_locked = 1;
 	console_may_schedule = 1;
 }
-EXPORT_SYMBOL(acquire_console_sem);
+EXPORT_SYMBOL(console_lock);
 
-int try_acquire_console_sem(void)
+/**
+ * console_trylock - try to lock the console system for exclusive use.
+ *
+ * Tried to acquire a lock which guarantees that the caller has
+ * exclusive access to the console system and the console_drivers list.
+ *
+ * returns 1 on success, and 0 on failure to acquire the lock.
+ */
+int console_trylock(void)
 {
 	if (down_trylock(&console_sem))
-		return -1;
+		return 0;
 	if (console_suspended) {
 		up(&console_sem);
-		return -1;
+		return 0;
 	}
 	console_locked = 1;
 	console_may_schedule = 0;
-	return 0;
+	return 1;
 }
-EXPORT_SYMBOL(try_acquire_console_sem);
+EXPORT_SYMBOL(console_trylock);
 
 int is_console_locked(void)
 {
@@ -1094,20 +1119,20 @@
 }
 
 /**
- * release_console_sem - unlock the console system
+ * console_unlock - unlock the console system
  *
- * Releases the semaphore which the caller holds on the console system
+ * Releases the console_lock which the caller holds on the console system
  * and the console driver list.
  *
- * While the semaphore was held, console output may have been buffered
- * by printk().  If this is the case, release_console_sem() emits
- * the output prior to releasing the semaphore.
+ * While the console_lock was held, console output may have been buffered
+ * by printk().  If this is the case, console_unlock(); emits
+ * the output prior to releasing the lock.
  *
  * If there is output waiting for klogd, we wake it up.
  *
- * release_console_sem() may be called from any context.
+ * console_unlock(); may be called from any context.
  */
-void release_console_sem(void)
+void console_unlock(void)
 {
 	unsigned long flags;
 	unsigned _con_start, _log_end;
@@ -1140,7 +1165,7 @@
 	if (wake_klogd)
 		wake_up_klogd();
 }
-EXPORT_SYMBOL(release_console_sem);
+EXPORT_SYMBOL(console_unlock);
 
 /**
  * console_conditional_schedule - yield the CPU if required
@@ -1149,7 +1174,7 @@
  * if this CPU should yield the CPU to another task, do
  * so here.
  *
- * Must be called within acquire_console_sem().
+ * Must be called within console_lock();.
  */
 void __sched console_conditional_schedule(void)
 {
@@ -1170,14 +1195,14 @@
 		if (down_trylock(&console_sem) != 0)
 			return;
 	} else
-		acquire_console_sem();
+		console_lock();
 
 	console_locked = 1;
 	console_may_schedule = 0;
 	for_each_console(c)
 		if ((c->flags & CON_ENABLED) && c->unblank)
 			c->unblank();
-	release_console_sem();
+	console_unlock();
 }
 
 /*
@@ -1188,7 +1213,7 @@
 	struct console *c;
 	struct tty_driver *driver = NULL;
 
-	acquire_console_sem();
+	console_lock();
 	for_each_console(c) {
 		if (!c->device)
 			continue;
@@ -1196,7 +1221,7 @@
 		if (driver)
 			break;
 	}
-	release_console_sem();
+	console_unlock();
 	return driver;
 }
 
@@ -1207,17 +1232,17 @@
  */
 void console_stop(struct console *console)
 {
-	acquire_console_sem();
+	console_lock();
 	console->flags &= ~CON_ENABLED;
-	release_console_sem();
+	console_unlock();
 }
 EXPORT_SYMBOL(console_stop);
 
 void console_start(struct console *console)
 {
-	acquire_console_sem();
+	console_lock();
 	console->flags |= CON_ENABLED;
-	release_console_sem();
+	console_unlock();
 }
 EXPORT_SYMBOL(console_start);
 
@@ -1339,7 +1364,7 @@
 	 *	Put this console in the list - keep the
 	 *	preferred driver at the head of the list.
 	 */
-	acquire_console_sem();
+	console_lock();
 	if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
 		newcon->next = console_drivers;
 		console_drivers = newcon;
@@ -1351,14 +1376,14 @@
 	}
 	if (newcon->flags & CON_PRINTBUFFER) {
 		/*
-		 * release_console_sem() will print out the buffered messages
+		 * console_unlock(); will print out the buffered messages
 		 * for us.
 		 */
 		spin_lock_irqsave(&logbuf_lock, flags);
 		con_start = log_start;
 		spin_unlock_irqrestore(&logbuf_lock, flags);
 	}
-	release_console_sem();
+	console_unlock();
 	console_sysfs_notify();
 
 	/*
@@ -1395,7 +1420,7 @@
 		return braille_unregister_console(console);
 #endif
 
-	acquire_console_sem();
+	console_lock();
 	if (console_drivers == console) {
 		console_drivers=console->next;
 		res = 0;
@@ -1417,7 +1442,7 @@
 	if (console_drivers != NULL && console->flags & CON_CONSDEV)
 		console_drivers->flags |= CON_CONSDEV;
 
-	release_console_sem();
+	console_unlock();
 	console_sysfs_notify();
 	return res;
 }
@@ -1502,7 +1527,7 @@
 	/* Don't allow registering multiple times */
 	if (!dumper->registered) {
 		dumper->registered = 1;
-		list_add_tail(&dumper->list, &dump_list);
+		list_add_tail_rcu(&dumper->list, &dump_list);
 		err = 0;
 	}
 	spin_unlock_irqrestore(&dump_list_lock, flags);
@@ -1526,29 +1551,16 @@
 	spin_lock_irqsave(&dump_list_lock, flags);
 	if (dumper->registered) {
 		dumper->registered = 0;
-		list_del(&dumper->list);
+		list_del_rcu(&dumper->list);
 		err = 0;
 	}
 	spin_unlock_irqrestore(&dump_list_lock, flags);
+	synchronize_rcu();
 
 	return err;
 }
 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
 
-static const char * const kmsg_reasons[] = {
-	[KMSG_DUMP_OOPS]	= "oops",
-	[KMSG_DUMP_PANIC]	= "panic",
-	[KMSG_DUMP_KEXEC]	= "kexec",
-};
-
-static const char *kmsg_to_str(enum kmsg_dump_reason reason)
-{
-	if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0)
-		return "unknown";
-
-	return kmsg_reasons[reason];
-}
-
 /**
  * kmsg_dump - dump kernel log to kernel message dumpers.
  * @reason: the reason (oops, panic etc) for dumping
@@ -1587,13 +1599,9 @@
 		l2 = chars;
 	}
 
-	if (!spin_trylock_irqsave(&dump_list_lock, flags)) {
-		printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n",
-				kmsg_to_str(reason));
-		return;
-	}
-	list_for_each_entry(dumper, &dump_list, list)
+	rcu_read_lock();
+	list_for_each_entry_rcu(dumper, &dump_list, list)
 		dumper->dump(dumper, reason, s1, l1, s2, l2);
-	spin_unlock_irqrestore(&dump_list_lock, flags);
+	rcu_read_unlock();
 }
 #endif
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 99bbaa3..e2302e4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -163,7 +163,7 @@
 	return !err;
 }
 
-int ptrace_attach(struct task_struct *task)
+static int ptrace_attach(struct task_struct *task)
 {
 	int retval;
 
@@ -219,7 +219,7 @@
  * Performs checks and sets PT_PTRACED.
  * Should be used by all ptrace implementations for PTRACE_TRACEME.
  */
-int ptrace_traceme(void)
+static int ptrace_traceme(void)
 {
 	int ret = -EPERM;
 
@@ -293,7 +293,7 @@
 	return false;
 }
 
-int ptrace_detach(struct task_struct *child, unsigned int data)
+static int ptrace_detach(struct task_struct *child, unsigned int data)
 {
 	bool dead = false;
 
@@ -313,7 +313,7 @@
 		child->exit_code = data;
 		dead = __ptrace_detach(current, child);
 		if (!child->exit_state)
-			wake_up_process(child);
+			wake_up_state(child, TASK_TRACED | TASK_STOPPED);
 	}
 	write_unlock_irq(&tasklist_lock);
 
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 0344937..0c343b9 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -189,7 +189,8 @@
 	unsigned long flags;
 
 	for (;;) {
-		wait_event(rcu_kthread_wq, have_rcu_kthread_work != 0);
+		wait_event_interruptible(rcu_kthread_wq,
+					 have_rcu_kthread_work != 0);
 		morework = rcu_boost();
 		local_irq_save(flags);
 		work = have_rcu_kthread_work;
diff --git a/kernel/sched.c b/kernel/sched.c
index a0eb094..42eab5a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -553,9 +553,6 @@
 	/* try_to_wake_up() stats */
 	unsigned int ttwu_count;
 	unsigned int ttwu_local;
-
-	/* BKL stats */
-	unsigned int bkl_count;
 #endif
 };
 
@@ -609,6 +606,9 @@
 	struct task_group *tg;
 	struct cgroup_subsys_state *css;
 
+	if (p->flags & PF_EXITING)
+		return &root_task_group;
+
 	css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
 			lockdep_is_held(&task_rq(p)->lock));
 	tg = container_of(css, struct task_group, css);
@@ -2505,7 +2505,7 @@
  * try_to_wake_up_local - try to wake up a local task with rq lock held
  * @p: the thread to be awakened
  *
- * Put @p on the run-queue if it's not alredy there.  The caller must
+ * Put @p on the run-queue if it's not already there.  The caller must
  * ensure that this_rq() is locked, @p is bound to this_rq() and not
  * the current task.  this_rq() stays locked over invocation.
  */
@@ -3887,7 +3887,7 @@
 	schedstat_inc(this_rq(), sched_count);
 #ifdef CONFIG_SCHEDSTATS
 	if (unlikely(prev->lock_depth >= 0)) {
-		schedstat_inc(this_rq(), bkl_count);
+		schedstat_inc(this_rq(), rq_sched_info.bkl_count);
 		schedstat_inc(prev, sched_info.bkl_count);
 	}
 #endif
@@ -4213,6 +4213,7 @@
 {
 	__wake_up_common(q, mode, 1, 0, key);
 }
+EXPORT_SYMBOL_GPL(__wake_up_locked_key);
 
 /**
  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
@@ -4871,7 +4872,8 @@
 		 * assigned.
 		 */
 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
-				task_group(p)->rt_bandwidth.rt_runtime == 0) {
+				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
+				!task_group_is_autogroup(task_group(p))) {
 			__task_rq_unlock(rq);
 			raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 			return -EPERM;
@@ -8882,6 +8884,20 @@
 	}
 }
 
+static void
+cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
+{
+	/*
+	 * cgroup_exit() is called in the copy_process() failure path.
+	 * Ignore this case since the task hasn't ran yet, this avoids
+	 * trying to poke a half freed task state from generic code.
+	 */
+	if (!(task->flags & PF_EXITING))
+		return;
+
+	sched_move_task(task);
+}
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
 				u64 shareval)
@@ -8954,6 +8970,7 @@
 	.destroy	= cpu_cgroup_destroy,
 	.can_attach	= cpu_cgroup_can_attach,
 	.attach		= cpu_cgroup_attach,
+	.exit		= cpu_cgroup_exit,
 	.populate	= cpu_cgroup_populate,
 	.subsys_id	= cpu_cgroup_subsys_id,
 	.early_init	= 1,
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index 32a723b..9fb6562 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -27,6 +27,11 @@
 {
 	struct autogroup *ag = container_of(kref, struct autogroup, kref);
 
+#ifdef CONFIG_RT_GROUP_SCHED
+	/* We've redirected RT tasks to the root task group... */
+	ag->tg->rt_se = NULL;
+	ag->tg->rt_rq = NULL;
+#endif
 	sched_destroy_group(ag->tg);
 }
 
@@ -55,6 +60,10 @@
 	return ag;
 }
 
+#ifdef CONFIG_RT_GROUP_SCHED
+static void free_rt_sched_group(struct task_group *tg);
+#endif
+
 static inline struct autogroup *autogroup_create(void)
 {
 	struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
@@ -72,6 +81,19 @@
 	init_rwsem(&ag->lock);
 	ag->id = atomic_inc_return(&autogroup_seq_nr);
 	ag->tg = tg;
+#ifdef CONFIG_RT_GROUP_SCHED
+	/*
+	 * Autogroup RT tasks are redirected to the root task group
+	 * so we don't have to move tasks around upon policy change,
+	 * or flail around trying to allocate bandwidth on the fly.
+	 * A bandwidth exception in __sched_setscheduler() allows
+	 * the policy change to proceed.  Thereafter, task_group()
+	 * returns &root_task_group, so zero bandwidth is required.
+	 */
+	free_rt_sched_group(tg);
+	tg->rt_se = root_task_group.rt_se;
+	tg->rt_rq = root_task_group.rt_rq;
+#endif
 	tg->autogroup = ag;
 
 	return ag;
@@ -106,6 +128,11 @@
 	return true;
 }
 
+static inline bool task_group_is_autogroup(struct task_group *tg)
+{
+	return tg != &root_task_group && tg->autogroup;
+}
+
 static inline struct task_group *
 autogroup_task_group(struct task_struct *p, struct task_group *tg)
 {
@@ -231,6 +258,11 @@
 #ifdef CONFIG_SCHED_DEBUG
 static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
 {
+	int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
+
+	if (!enabled || !tg->autogroup)
+		return 0;
+
 	return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
 }
 #endif /* CONFIG_SCHED_DEBUG */
diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h
index 5358e24..7b859ff 100644
--- a/kernel/sched_autogroup.h
+++ b/kernel/sched_autogroup.h
@@ -15,6 +15,10 @@
 
 static inline void autogroup_init(struct task_struct *init_task) {  }
 static inline void autogroup_free(struct task_group *tg) { }
+static inline bool task_group_is_autogroup(struct task_group *tg)
+{
+	return 0;
+}
 
 static inline struct task_group *
 autogroup_task_group(struct task_struct *p, struct task_group *tg)
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 1dfae3d..eb6cb8e 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -16,6 +16,8 @@
 #include <linux/kallsyms.h>
 #include <linux/utsname.h>
 
+static DEFINE_SPINLOCK(sched_debug_lock);
+
 /*
  * This allows printing both to /proc/sched_debug and
  * to the console
@@ -86,6 +88,26 @@
 }
 #endif
 
+#ifdef CONFIG_CGROUP_SCHED
+static char group_path[PATH_MAX];
+
+static char *task_group_path(struct task_group *tg)
+{
+	if (autogroup_path(tg, group_path, PATH_MAX))
+		return group_path;
+
+	/*
+	 * May be NULL if the underlying cgroup isn't fully-created yet
+	 */
+	if (!tg->css.cgroup) {
+		group_path[0] = '\0';
+		return group_path;
+	}
+	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+	return group_path;
+}
+#endif
+
 static void
 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 {
@@ -108,6 +130,9 @@
 	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
 		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
 #endif
+#ifdef CONFIG_CGROUP_SCHED
+	SEQ_printf(m, " %s", task_group_path(task_group(p)));
+#endif
 
 	SEQ_printf(m, "\n");
 }
@@ -144,7 +169,11 @@
 	struct sched_entity *last;
 	unsigned long flags;
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
+#else
 	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
+#endif
 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
 			SPLIT_NS(cfs_rq->exec_clock));
 
@@ -191,7 +220,11 @@
 
 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
 {
+#ifdef CONFIG_RT_GROUP_SCHED
+	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
+#else
 	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
+#endif
 
 #define P(x) \
 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
@@ -212,6 +245,7 @@
 static void print_cpu(struct seq_file *m, int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
 
 #ifdef CONFIG_X86
 	{
@@ -262,14 +296,20 @@
 	P(ttwu_count);
 	P(ttwu_local);
 
-	P(bkl_count);
+	SEQ_printf(m, "  .%-30s: %d\n", "bkl_count",
+				rq->rq_sched_info.bkl_count);
 
 #undef P
+#undef P64
 #endif
+	spin_lock_irqsave(&sched_debug_lock, flags);
 	print_cfs_stats(m, cpu);
 	print_rt_stats(m, cpu);
 
+	rcu_read_lock();
 	print_rq(m, rq, cpu);
+	rcu_read_unlock();
+	spin_unlock_irqrestore(&sched_debug_lock, flags);
 }
 
 static const char *sched_tunable_scaling_names[] = {
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c62ebae..0c26e2d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -699,7 +699,8 @@
 	cfs_rq->nr_running--;
 }
 
-#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_FAIR_GROUP_SCHED
+# ifdef CONFIG_SMP
 static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
 					    int global_update)
 {
@@ -721,10 +722,10 @@
 	u64 now, delta;
 	unsigned long load = cfs_rq->load.weight;
 
-	if (!cfs_rq)
+	if (cfs_rq->tg == &root_task_group)
 		return;
 
-	now = rq_of(cfs_rq)->clock;
+	now = rq_of(cfs_rq)->clock_task;
 	delta = now - cfs_rq->load_stamp;
 
 	/* truncate load history at 4 idle periods */
@@ -762,6 +763,51 @@
 		list_del_leaf_cfs_rq(cfs_rq);
 }
 
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+				long weight_delta)
+{
+	long load_weight, load, shares;
+
+	load = cfs_rq->load.weight + weight_delta;
+
+	load_weight = atomic_read(&tg->load_weight);
+	load_weight -= cfs_rq->load_contribution;
+	load_weight += load;
+
+	shares = (tg->shares * load);
+	if (load_weight)
+		shares /= load_weight;
+
+	if (shares < MIN_SHARES)
+		shares = MIN_SHARES;
+	if (shares > tg->shares)
+		shares = tg->shares;
+
+	return shares;
+}
+
+static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
+		update_cfs_load(cfs_rq, 0);
+		update_cfs_shares(cfs_rq, 0);
+	}
+}
+# else /* CONFIG_SMP */
+static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
+{
+}
+
+static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+				long weight_delta)
+{
+	return tg->shares;
+}
+
+static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+}
+# endif /* CONFIG_SMP */
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 			    unsigned long weight)
 {
@@ -782,41 +828,20 @@
 {
 	struct task_group *tg;
 	struct sched_entity *se;
-	long load_weight, load, shares;
-
-	if (!cfs_rq)
-		return;
+	long shares;
 
 	tg = cfs_rq->tg;
 	se = tg->se[cpu_of(rq_of(cfs_rq))];
 	if (!se)
 		return;
-
-	load = cfs_rq->load.weight + weight_delta;
-
-	load_weight = atomic_read(&tg->load_weight);
-	load_weight -= cfs_rq->load_contribution;
-	load_weight += load;
-
-	shares = (tg->shares * load);
-	if (load_weight)
-		shares /= load_weight;
-
-	if (shares < MIN_SHARES)
-		shares = MIN_SHARES;
-	if (shares > tg->shares)
-		shares = tg->shares;
+#ifndef CONFIG_SMP
+	if (likely(se->load.weight == tg->shares))
+		return;
+#endif
+	shares = calc_cfs_shares(cfs_rq, tg, weight_delta);
 
 	reweight_entity(cfs_rq_of(se), se, shares);
 }
-
-static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
-	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
-		update_cfs_load(cfs_rq, 0);
-		update_cfs_shares(cfs_rq, 0);
-	}
-}
 #else /* CONFIG_FAIR_GROUP_SCHED */
 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 {
@@ -1062,6 +1087,9 @@
 		struct sched_entity *se = __pick_next_entity(cfs_rq);
 		s64 delta = curr->vruntime - se->vruntime;
 
+		if (delta < 0)
+			return;
+
 		if (delta > ideal_runtime)
 			resched_task(rq_of(cfs_rq)->curr);
 	}
@@ -1362,27 +1390,27 @@
 		return wl;
 
 	for_each_sched_entity(se) {
-		long S, rw, s, a, b;
+		long lw, w;
 
-		S = se->my_q->tg->shares;
-		s = se->load.weight;
-		rw = se->my_q->load.weight;
+		tg = se->my_q->tg;
+		w = se->my_q->load.weight;
 
-		a = S*(rw + wl);
-		b = S*rw + s*wg;
+		/* use this cpu's instantaneous contribution */
+		lw = atomic_read(&tg->load_weight);
+		lw -= se->my_q->load_contribution;
+		lw += w + wg;
 
-		wl = s*(a-b);
+		wl += w;
 
-		if (likely(b))
-			wl /= b;
+		if (lw > 0 && wl < lw)
+			wl = (wl * tg->shares) / lw;
+		else
+			wl = tg->shares;
 
-		/*
-		 * Assume the group is already running and will
-		 * thus already be accounted for in the weight.
-		 *
-		 * That is, moving shares between CPUs, does not
-		 * alter the group weight.
-		 */
+		/* zero point is MIN_SHARES */
+		if (wl < MIN_SHARES)
+			wl = MIN_SHARES;
+		wl -= se->load.weight;
 		wg = 0;
 	}
 
@@ -1401,7 +1429,7 @@
 
 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
 {
-	unsigned long this_load, load;
+	s64 this_load, load;
 	int idx, this_cpu, prev_cpu;
 	unsigned long tl_per_task;
 	struct task_group *tg;
@@ -1440,8 +1468,8 @@
 	 * Otherwise check if either cpus are near enough in load to allow this
 	 * task to be woken on this_cpu.
 	 */
-	if (this_load) {
-		unsigned long this_eff_load, prev_eff_load;
+	if (this_load > 0) {
+		s64 this_eff_load, prev_eff_load;
 
 		this_eff_load = 100;
 		this_eff_load *= power_of(prev_cpu);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index c914ec7..01f75a5 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -210,11 +210,12 @@
 
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
-	int this_cpu = smp_processor_id();
 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 	struct sched_rt_entity *rt_se;
 
-	rt_se = rt_rq->tg->rt_se[this_cpu];
+	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
+
+	rt_se = rt_rq->tg->rt_se[cpu];
 
 	if (rt_rq->rt_nr_running) {
 		if (rt_se && !on_rt_rq(rt_se))
@@ -226,10 +227,10 @@
 
 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
-	int this_cpu = smp_processor_id();
 	struct sched_rt_entity *rt_se;
+	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 
-	rt_se = rt_rq->tg->rt_se[this_cpu];
+	rt_se = rt_rq->tg->rt_se[cpu];
 
 	if (rt_se && on_rt_rq(rt_se))
 		dequeue_rt_entity(rt_se);
@@ -565,8 +566,11 @@
 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
 				idle = 0;
 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
-		} else if (rt_rq->rt_nr_running)
+		} else if (rt_rq->rt_nr_running) {
 			idle = 0;
+			if (!rt_rq_throttled(rt_rq))
+				enqueue = 1;
+		}
 
 		if (enqueue)
 			sched_rt_rq_enqueue(rt_rq);
@@ -625,7 +629,7 @@
 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 	u64 delta_exec;
 
-	if (!task_has_rt_policy(curr))
+	if (curr->sched_class != &rt_sched_class)
 		return;
 
 	delta_exec = rq->clock_task - curr->se.exec_start;
diff --git a/kernel/smp.c b/kernel/smp.c
index 12ed8b0..9910744 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/cpu.h>
 
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
 static struct {
 	struct list_head	queue;
 	raw_spinlock_t		lock;
@@ -193,23 +194,52 @@
 	 */
 	list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
 		int refs;
+		void (*func) (void *info);
 
-		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
+		/*
+		 * Since we walk the list without any locks, we might
+		 * see an entry that was completed, removed from the
+		 * list and is in the process of being reused.
+		 *
+		 * We must check that the cpu is in the cpumask before
+		 * checking the refs, and both must be set before
+		 * executing the callback on this cpu.
+		 */
+
+		if (!cpumask_test_cpu(cpu, data->cpumask))
 			continue;
 
+		smp_rmb();
+
+		if (atomic_read(&data->refs) == 0)
+			continue;
+
+		func = data->csd.func;			/* for later warn */
 		data->csd.func(data->csd.info);
 
+		/*
+		 * If the cpu mask is not still set then it enabled interrupts,
+		 * we took another smp interrupt, and executed the function
+		 * twice on this cpu.  In theory that copy decremented refs.
+		 */
+		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
+			WARN(1, "%pS enabled interrupts and double executed\n",
+			     func);
+			continue;
+		}
+
 		refs = atomic_dec_return(&data->refs);
 		WARN_ON(refs < 0);
-		if (!refs) {
-			raw_spin_lock(&call_function.lock);
-			list_del_rcu(&data->csd.list);
-			raw_spin_unlock(&call_function.lock);
-		}
 
 		if (refs)
 			continue;
 
+		WARN_ON(!cpumask_empty(data->cpumask));
+
+		raw_spin_lock(&call_function.lock);
+		list_del_rcu(&data->csd.list);
+		raw_spin_unlock(&call_function.lock);
+
 		csd_unlock(&data->csd);
 	}
 
@@ -429,7 +459,7 @@
 	 * can't happen.
 	 */
 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
-		     && !oops_in_progress);
+		     && !oops_in_progress && !early_boot_irqs_disabled);
 
 	/* So, what's a CPU they want? Ignoring this one. */
 	cpu = cpumask_first_and(mask, cpu_online_mask);
@@ -453,11 +483,21 @@
 
 	data = &__get_cpu_var(cfd_data);
 	csd_lock(&data->csd);
+	BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
 
 	data->csd.func = func;
 	data->csd.info = info;
 	cpumask_and(data->cpumask, mask, cpu_online_mask);
 	cpumask_clear_cpu(this_cpu, data->cpumask);
+
+	/*
+	 * To ensure the interrupt handler gets an complete view
+	 * we order the cpumask and refs writes and order the read
+	 * of them in the interrupt handler.  In addition we may
+	 * only clear our own cpu bit from the mask.
+	 */
+	smp_wmb();
+
 	atomic_set(&data->refs, cpumask_weight(data->cpumask));
 
 	raw_spin_lock_irqsave(&call_function.lock, flags);
@@ -529,3 +569,24 @@
 {
 	raw_spin_unlock_irq(&call_function.lock);
 }
+#endif /* USE_GENERIC_SMP_HELPERS */
+
+/*
+ * Call a function on all processors.  May be used during early boot while
+ * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
+ * of local_irq_disable/enable().
+ */
+int on_each_cpu(void (*func) (void *info), void *info, int wait)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	preempt_disable();
+	ret = smp_call_function(func, info, wait);
+	local_irq_save(flags);
+	func(info);
+	local_irq_restore(flags);
+	preempt_enable();
+	return ret;
+}
+EXPORT_SYMBOL(on_each_cpu);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0823778..68eb5ef 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -885,25 +885,6 @@
 }
 early_initcall(spawn_ksoftirqd);
 
-#ifdef CONFIG_SMP
-/*
- * Call a function on all processors
- */
-int on_each_cpu(void (*func) (void *info), void *info, int wait)
-{
-	int ret = 0;
-
-	preempt_disable();
-	ret = smp_call_function(func, info, wait);
-	local_irq_disable();
-	func(info);
-	local_irq_enable();
-	preempt_enable();
-	return ret;
-}
-EXPORT_SYMBOL(on_each_cpu);
-#endif
-
 /*
  * [ These __weak aliases are kept in a separate compilation unit, so that
  *   GCC does not inline them incorrectly. ]
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 98d8c1e..73ce23f 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -156,6 +156,16 @@
 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
 /*
+ * We use an adaptive strategy for synchronize_srcu() and especially for
+ * synchronize_srcu_expedited().  We spin for a fixed time period
+ * (defined below) to allow SRCU readers to exit their read-side critical
+ * sections.  If there are still some readers after 10 microseconds,
+ * we repeatedly block for 1-millisecond time periods.  This approach
+ * has done well in testing, so there is no need for a config parameter.
+ */
+#define SYNCHRONIZE_SRCU_READER_DELAY 10
+
+/*
  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
  */
 static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
@@ -207,11 +217,12 @@
 	 * will have finished executing.  We initially give readers
 	 * an arbitrarily chosen 10 microseconds to get out of their
 	 * SRCU read-side critical sections, then loop waiting 1/HZ
-	 * seconds per iteration.
+	 * seconds per iteration.  The 10-microsecond value has done
+	 * very well in testing.
 	 */
 
 	if (srcu_readers_active_idx(sp, idx))
-		udelay(CONFIG_SRCU_SYNCHRONIZE_DELAY);
+		udelay(SYNCHRONIZE_SRCU_READER_DELAY);
 	while (srcu_readers_active_idx(sp, idx))
 		schedule_timeout_interruptible(1);
 
diff --git a/kernel/sys.c b/kernel/sys.c
index 2745dcd..18da702 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -43,6 +43,8 @@
 #include <linux/kprobes.h>
 #include <linux/user_namespace.h>
 
+#include <linux/kmsg_dump.h>
+
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/unistd.h>
@@ -285,6 +287,7 @@
  */
 void emergency_restart(void)
 {
+	kmsg_dump(KMSG_DUMP_EMERG);
 	machine_emergency_restart();
 }
 EXPORT_SYMBOL_GPL(emergency_restart);
@@ -312,6 +315,7 @@
 		printk(KERN_EMERG "Restarting system.\n");
 	else
 		printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
+	kmsg_dump(KMSG_DUMP_RESTART);
 	machine_restart(cmd);
 }
 EXPORT_SYMBOL_GPL(kernel_restart);
@@ -333,6 +337,7 @@
 	kernel_shutdown_prepare(SYSTEM_HALT);
 	sysdev_shutdown();
 	printk(KERN_EMERG "System halted.\n");
+	kmsg_dump(KMSG_DUMP_HALT);
 	machine_halt();
 }
 
@@ -351,6 +356,7 @@
 	disable_nonboot_cpus();
 	sysdev_shutdown();
 	printk(KERN_EMERG "Power down.\n");
+	kmsg_dump(KMSG_DUMP_POWEROFF);
 	machine_power_off();
 }
 EXPORT_SYMBOL_GPL(kernel_power_off);
@@ -1379,7 +1385,8 @@
 	const struct cred *cred = current_cred(), *tcred;
 
 	tcred = __task_cred(task);
-	if ((cred->uid != tcred->euid ||
+	if (current != task &&
+	    (cred->uid != tcred->euid ||
 	     cred->uid != tcred->suid ||
 	     cred->uid != tcred->uid  ||
 	     cred->gid != tcred->egid ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ae5cbb1..4eed0af 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 #include <linux/signal.h>
+#include <linux/printk.h>
 #include <linux/proc_fs.h>
 #include <linux/security.h>
 #include <linux/ctype.h>
@@ -169,7 +170,8 @@
 #endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static int __sysrq_enabled; /* Note: sysrq code ises it's own private copy */
+/* Note: sysrq code uses it's own private copy */
+static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
 
 static int sysrq_sysctl_handler(ctl_table *table, int write,
 				void __user *buffer, size_t *lenp,
@@ -192,9 +194,9 @@
 static struct ctl_table root_table[];
 static struct ctl_table_root sysctl_table_root;
 static struct ctl_table_header root_table_header = {
-	.count = 1,
+	{{.count = 1,
 	.ctl_table = root_table,
-	.ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),
+	.ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}},
 	.root = &sysctl_table_root,
 	.set = &sysctl_table_root.default_set,
 };
@@ -245,10 +247,6 @@
 		.mode		= 0555,
 		.child		= dev_table,
 	},
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -710,6 +708,15 @@
 		.extra1		= &zero,
 		.extra2		= &one,
 	},
+	{
+		.procname	= "kptr_restrict",
+		.data		= &kptr_restrict,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &two,
+	},
 #endif
 	{
 		.procname	= "ngroups_max",
@@ -962,10 +969,6 @@
 		.proc_handler	= proc_dointvec,
 	},
 #endif
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -1326,11 +1329,6 @@
 		.extra2		= &one,
 	},
 #endif
-
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -1486,10 +1484,6 @@
 		.proc_handler	= &pipe_proc_fn,
 		.extra1		= &pipe_min_size,
 	},
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -1573,11 +1567,16 @@
 	spin_unlock(&sysctl_lock);
 }
 
+static void free_head(struct rcu_head *rcu)
+{
+	kfree(container_of(rcu, struct ctl_table_header, rcu));
+}
+
 void sysctl_head_put(struct ctl_table_header *head)
 {
 	spin_lock(&sysctl_lock);
 	if (!--head->count)
-		kfree(head);
+		call_rcu(&head->rcu, free_head);
 	spin_unlock(&sysctl_lock);
 }
 
@@ -1954,10 +1953,10 @@
 	start_unregistering(header);
 	if (!--header->parent->count) {
 		WARN_ON(1);
-		kfree(header->parent);
+		call_rcu(&header->parent->rcu, free_head);
 	}
 	if (!--header->count)
-		kfree(header);
+		call_rcu(&header->rcu, free_head);
 	spin_unlock(&sysctl_lock);
 }
 
@@ -2899,7 +2898,7 @@
 	}
 }
 
-#else /* CONFIG_PROC_FS */
+#else /* CONFIG_PROC_SYSCTL */
 
 int proc_dostring(struct ctl_table *table, int write,
 		  void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -2951,7 +2950,7 @@
 }
 
 
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_PROC_SYSCTL */
 
 /*
  * No sense putting this after each symbol definition, twice,
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 4b2545a..b875bed 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1192,7 +1192,7 @@
 
 		buf[result] = '\0';
 
-		/* Convert the decnet addresss to binary */
+		/* Convert the decnet address to binary */
 		result = -EIO;
 		nodep = strchr(buf, '.') + 1;
 		if (!nodep)
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 69691eb..3971c6b 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -348,7 +348,7 @@
 	return ret;
 }
 
-#ifdef CONFIG_IA64
+#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 #define TASKSTATS_NEEDS_PADDING 1
 #endif
 
diff --git a/kernel/time.c b/kernel/time.c
index ba9b338..3217435 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -238,7 +238,7 @@
  * Avoid unnecessary multiplications/divisions in the
  * two most common HZ cases:
  */
-unsigned int inline jiffies_to_msecs(const unsigned long j)
+inline unsigned int jiffies_to_msecs(const unsigned long j)
 {
 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 	return (MSEC_PER_SEC / HZ) * j;
@@ -254,7 +254,7 @@
 }
 EXPORT_SYMBOL(jiffies_to_msecs);
 
-unsigned int inline jiffies_to_usecs(const unsigned long j)
+inline unsigned int jiffies_to_usecs(const unsigned long j)
 {
 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
 	return (USEC_PER_SEC / HZ) * j;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index df140cd..6519cf62 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -113,7 +113,7 @@
  * @shift:	pointer to shift variable
  * @from:	frequency to convert from
  * @to:		frequency to convert to
- * @minsec:	guaranteed runtime conversion range in seconds
+ * @maxsec:	guaranteed runtime conversion range in seconds
  *
  * The function evaluates the shift/mult pair for the scaled math
  * operations of clocksources and clockevents.
@@ -122,7 +122,7 @@
  * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
  * event @to is the counter frequency and @from is NSEC_PER_SEC.
  *
- * The @minsec conversion range argument controls the time frame in
+ * The @maxsec conversion range argument controls the time frame in
  * seconds which must be covered by the runtime conversion with the
  * calculated mult and shift factors. This guarantees that no 64bit
  * overflow happens when the input value of the conversion is
@@ -131,7 +131,7 @@
  * factors.
  */
 void
-clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
+clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
 {
 	u64 tmp;
 	u32 sft, sftacc= 32;
@@ -140,7 +140,7 @@
 	 * Calculate the shift factor which is limiting the conversion
 	 * range:
 	 */
-	tmp = ((u64)minsec * from) >> 32;
+	tmp = ((u64)maxsec * from) >> 32;
 	while (tmp) {
 		tmp >>=1;
 		sftacc--;
@@ -679,7 +679,7 @@
 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
 {
 
-	/* Intialize mult/shift and max_idle_ns */
+	/* Initialize mult/shift and max_idle_ns */
 	__clocksource_updatefreq_scale(cs, scale, freq);
 
 	/* Add clocksource to the clcoksource list */
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index d232189..5c00242 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -14,6 +14,7 @@
 #include <linux/timex.h>
 #include <linux/time.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 
 /*
  * NTP timekeeping variables:
@@ -74,6 +75,162 @@
 /* constant (boot-param configurable) NTP tick adjustment (upscaled)	*/
 static s64			ntp_tick_adj;
 
+#ifdef CONFIG_NTP_PPS
+
+/*
+ * The following variables are used when a pulse-per-second (PPS) signal
+ * is available. They establish the engineering parameters of the clock
+ * discipline loop when controlled by the PPS signal.
+ */
+#define PPS_VALID	10	/* PPS signal watchdog max (s) */
+#define PPS_POPCORN	4	/* popcorn spike threshold (shift) */
+#define PPS_INTMIN	2	/* min freq interval (s) (shift) */
+#define PPS_INTMAX	8	/* max freq interval (s) (shift) */
+#define PPS_INTCOUNT	4	/* number of consecutive good intervals to
+				   increase pps_shift or consecutive bad
+				   intervals to decrease it */
+#define PPS_MAXWANDER	100000	/* max PPS freq wander (ns/s) */
+
+static int pps_valid;		/* signal watchdog counter */
+static long pps_tf[3];		/* phase median filter */
+static long pps_jitter;		/* current jitter (ns) */
+static struct timespec pps_fbase; /* beginning of the last freq interval */
+static int pps_shift;		/* current interval duration (s) (shift) */
+static int pps_intcnt;		/* interval counter */
+static s64 pps_freq;		/* frequency offset (scaled ns/s) */
+static long pps_stabil;		/* current stability (scaled ns/s) */
+
+/*
+ * PPS signal quality monitors
+ */
+static long pps_calcnt;		/* calibration intervals */
+static long pps_jitcnt;		/* jitter limit exceeded */
+static long pps_stbcnt;		/* stability limit exceeded */
+static long pps_errcnt;		/* calibration errors */
+
+
+/* PPS kernel consumer compensates the whole phase error immediately.
+ * Otherwise, reduce the offset by a fixed factor times the time constant.
+ */
+static inline s64 ntp_offset_chunk(s64 offset)
+{
+	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
+		return offset;
+	else
+		return shift_right(offset, SHIFT_PLL + time_constant);
+}
+
+static inline void pps_reset_freq_interval(void)
+{
+	/* the PPS calibration interval may end
+	   surprisingly early */
+	pps_shift = PPS_INTMIN;
+	pps_intcnt = 0;
+}
+
+/**
+ * pps_clear - Clears the PPS state variables
+ *
+ * Must be called while holding a write on the xtime_lock
+ */
+static inline void pps_clear(void)
+{
+	pps_reset_freq_interval();
+	pps_tf[0] = 0;
+	pps_tf[1] = 0;
+	pps_tf[2] = 0;
+	pps_fbase.tv_sec = pps_fbase.tv_nsec = 0;
+	pps_freq = 0;
+}
+
+/* Decrease pps_valid to indicate that another second has passed since
+ * the last PPS signal. When it reaches 0, indicate that PPS signal is
+ * missing.
+ *
+ * Must be called while holding a write on the xtime_lock
+ */
+static inline void pps_dec_valid(void)
+{
+	if (pps_valid > 0)
+		pps_valid--;
+	else {
+		time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
+				 STA_PPSWANDER | STA_PPSERROR);
+		pps_clear();
+	}
+}
+
+static inline void pps_set_freq(s64 freq)
+{
+	pps_freq = freq;
+}
+
+static inline int is_error_status(int status)
+{
+	return (time_status & (STA_UNSYNC|STA_CLOCKERR))
+		/* PPS signal lost when either PPS time or
+		 * PPS frequency synchronization requested
+		 */
+		|| ((time_status & (STA_PPSFREQ|STA_PPSTIME))
+			&& !(time_status & STA_PPSSIGNAL))
+		/* PPS jitter exceeded when
+		 * PPS time synchronization requested */
+		|| ((time_status & (STA_PPSTIME|STA_PPSJITTER))
+			== (STA_PPSTIME|STA_PPSJITTER))
+		/* PPS wander exceeded or calibration error when
+		 * PPS frequency synchronization requested
+		 */
+		|| ((time_status & STA_PPSFREQ)
+			&& (time_status & (STA_PPSWANDER|STA_PPSERROR)));
+}
+
+static inline void pps_fill_timex(struct timex *txc)
+{
+	txc->ppsfreq	   = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) *
+					 PPM_SCALE_INV, NTP_SCALE_SHIFT);
+	txc->jitter	   = pps_jitter;
+	if (!(time_status & STA_NANO))
+		txc->jitter /= NSEC_PER_USEC;
+	txc->shift	   = pps_shift;
+	txc->stabil	   = pps_stabil;
+	txc->jitcnt	   = pps_jitcnt;
+	txc->calcnt	   = pps_calcnt;
+	txc->errcnt	   = pps_errcnt;
+	txc->stbcnt	   = pps_stbcnt;
+}
+
+#else /* !CONFIG_NTP_PPS */
+
+static inline s64 ntp_offset_chunk(s64 offset)
+{
+	return shift_right(offset, SHIFT_PLL + time_constant);
+}
+
+static inline void pps_reset_freq_interval(void) {}
+static inline void pps_clear(void) {}
+static inline void pps_dec_valid(void) {}
+static inline void pps_set_freq(s64 freq) {}
+
+static inline int is_error_status(int status)
+{
+	return status & (STA_UNSYNC|STA_CLOCKERR);
+}
+
+static inline void pps_fill_timex(struct timex *txc)
+{
+	/* PPS is not implemented, so these are zero */
+	txc->ppsfreq	   = 0;
+	txc->jitter	   = 0;
+	txc->shift	   = 0;
+	txc->stabil	   = 0;
+	txc->jitcnt	   = 0;
+	txc->calcnt	   = 0;
+	txc->errcnt	   = 0;
+	txc->stbcnt	   = 0;
+}
+
+#endif /* CONFIG_NTP_PPS */
+
 /*
  * NTP methods:
  */
@@ -185,6 +342,9 @@
 
 	tick_length	= tick_length_base;
 	time_offset	= 0;
+
+	/* Clear PPS state variables */
+	pps_clear();
 }
 
 /*
@@ -250,16 +410,16 @@
 		time_status |= STA_UNSYNC;
 	}
 
-	/*
-	 * Compute the phase adjustment for the next second. The offset is
-	 * reduced by a fixed factor times the time constant.
-	 */
+	/* Compute the phase adjustment for the next second */
 	tick_length	 = tick_length_base;
 
-	delta		 = shift_right(time_offset, SHIFT_PLL + time_constant);
+	delta		 = ntp_offset_chunk(time_offset);
 	time_offset	-= delta;
 	tick_length	+= delta;
 
+	/* Check PPS signal */
+	pps_dec_valid();
+
 	if (!time_adjust)
 		return;
 
@@ -369,6 +529,8 @@
 	if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
 		time_state = TIME_OK;
 		time_status = STA_UNSYNC;
+		/* restart PPS frequency calibration */
+		pps_reset_freq_interval();
 	}
 
 	/*
@@ -418,6 +580,8 @@
 		time_freq = txc->freq * PPM_SCALE;
 		time_freq = min(time_freq, MAXFREQ_SCALED);
 		time_freq = max(time_freq, -MAXFREQ_SCALED);
+		/* update pps_freq */
+		pps_set_freq(time_freq);
 	}
 
 	if (txc->modes & ADJ_MAXERROR)
@@ -508,7 +672,8 @@
 	}
 
 	result = time_state;	/* mostly `TIME_OK' */
-	if (time_status & (STA_UNSYNC|STA_CLOCKERR))
+	/* check for errors */
+	if (is_error_status(time_status))
 		result = TIME_ERROR;
 
 	txc->freq	   = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
@@ -522,15 +687,8 @@
 	txc->tick	   = tick_usec;
 	txc->tai	   = time_tai;
 
-	/* PPS is not implemented, so these are zero */
-	txc->ppsfreq	   = 0;
-	txc->jitter	   = 0;
-	txc->shift	   = 0;
-	txc->stabil	   = 0;
-	txc->jitcnt	   = 0;
-	txc->calcnt	   = 0;
-	txc->errcnt	   = 0;
-	txc->stbcnt	   = 0;
+	/* fill PPS status fields */
+	pps_fill_timex(txc);
 
 	write_sequnlock_irq(&xtime_lock);
 
@@ -544,6 +702,243 @@
 	return result;
 }
 
+#ifdef	CONFIG_NTP_PPS
+
+/* actually struct pps_normtime is good old struct timespec, but it is
+ * semantically different (and it is the reason why it was invented):
+ * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
+ * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
+struct pps_normtime {
+	__kernel_time_t	sec;	/* seconds */
+	long		nsec;	/* nanoseconds */
+};
+
+/* normalize the timestamp so that nsec is in the
+   ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
+static inline struct pps_normtime pps_normalize_ts(struct timespec ts)
+{
+	struct pps_normtime norm = {
+		.sec = ts.tv_sec,
+		.nsec = ts.tv_nsec
+	};
+
+	if (norm.nsec > (NSEC_PER_SEC >> 1)) {
+		norm.nsec -= NSEC_PER_SEC;
+		norm.sec++;
+	}
+
+	return norm;
+}
+
+/* get current phase correction and jitter */
+static inline long pps_phase_filter_get(long *jitter)
+{
+	*jitter = pps_tf[0] - pps_tf[1];
+	if (*jitter < 0)
+		*jitter = -*jitter;
+
+	/* TODO: test various filters */
+	return pps_tf[0];
+}
+
+/* add the sample to the phase filter */
+static inline void pps_phase_filter_add(long err)
+{
+	pps_tf[2] = pps_tf[1];
+	pps_tf[1] = pps_tf[0];
+	pps_tf[0] = err;
+}
+
+/* decrease frequency calibration interval length.
+ * It is halved after four consecutive unstable intervals.
+ */
+static inline void pps_dec_freq_interval(void)
+{
+	if (--pps_intcnt <= -PPS_INTCOUNT) {
+		pps_intcnt = -PPS_INTCOUNT;
+		if (pps_shift > PPS_INTMIN) {
+			pps_shift--;
+			pps_intcnt = 0;
+		}
+	}
+}
+
+/* increase frequency calibration interval length.
+ * It is doubled after four consecutive stable intervals.
+ */
+static inline void pps_inc_freq_interval(void)
+{
+	if (++pps_intcnt >= PPS_INTCOUNT) {
+		pps_intcnt = PPS_INTCOUNT;
+		if (pps_shift < PPS_INTMAX) {
+			pps_shift++;
+			pps_intcnt = 0;
+		}
+	}
+}
+
+/* update clock frequency based on MONOTONIC_RAW clock PPS signal
+ * timestamps
+ *
+ * At the end of the calibration interval the difference between the
+ * first and last MONOTONIC_RAW clock timestamps divided by the length
+ * of the interval becomes the frequency update. If the interval was
+ * too long, the data are discarded.
+ * Returns the difference between old and new frequency values.
+ */
+static long hardpps_update_freq(struct pps_normtime freq_norm)
+{
+	long delta, delta_mod;
+	s64 ftemp;
+
+	/* check if the frequency interval was too long */
+	if (freq_norm.sec > (2 << pps_shift)) {
+		time_status |= STA_PPSERROR;
+		pps_errcnt++;
+		pps_dec_freq_interval();
+		pr_err("hardpps: PPSERROR: interval too long - %ld s\n",
+				freq_norm.sec);
+		return 0;
+	}
+
+	/* here the raw frequency offset and wander (stability) is
+	 * calculated. If the wander is less than the wander threshold
+	 * the interval is increased; otherwise it is decreased.
+	 */
+	ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT,
+			freq_norm.sec);
+	delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
+	pps_freq = ftemp;
+	if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
+		pr_warning("hardpps: PPSWANDER: change=%ld\n", delta);
+		time_status |= STA_PPSWANDER;
+		pps_stbcnt++;
+		pps_dec_freq_interval();
+	} else {	/* good sample */
+		pps_inc_freq_interval();
+	}
+
+	/* the stability metric is calculated as the average of recent
+	 * frequency changes, but is used only for performance
+	 * monitoring
+	 */
+	delta_mod = delta;
+	if (delta_mod < 0)
+		delta_mod = -delta_mod;
+	pps_stabil += (div_s64(((s64)delta_mod) <<
+				(NTP_SCALE_SHIFT - SHIFT_USEC),
+				NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN;
+
+	/* if enabled, the system clock frequency is updated */
+	if ((time_status & STA_PPSFREQ) != 0 &&
+	    (time_status & STA_FREQHOLD) == 0) {
+		time_freq = pps_freq;
+		ntp_update_frequency();
+	}
+
+	return delta;
+}
+
+/* correct REALTIME clock phase error against PPS signal */
+static void hardpps_update_phase(long error)
+{
+	long correction = -error;
+	long jitter;
+
+	/* add the sample to the median filter */
+	pps_phase_filter_add(correction);
+	correction = pps_phase_filter_get(&jitter);
+
+	/* Nominal jitter is due to PPS signal noise. If it exceeds the
+	 * threshold, the sample is discarded; otherwise, if so enabled,
+	 * the time offset is updated.
+	 */
+	if (jitter > (pps_jitter << PPS_POPCORN)) {
+		pr_warning("hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
+		       jitter, (pps_jitter << PPS_POPCORN));
+		time_status |= STA_PPSJITTER;
+		pps_jitcnt++;
+	} else if (time_status & STA_PPSTIME) {
+		/* correct the time using the phase offset */
+		time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT,
+				NTP_INTERVAL_FREQ);
+		/* cancel running adjtime() */
+		time_adjust = 0;
+	}
+	/* update jitter */
+	pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN;
+}
+
+/*
+ * hardpps() - discipline CPU clock oscillator to external PPS signal
+ *
+ * This routine is called at each PPS signal arrival in order to
+ * discipline the CPU clock oscillator to the PPS signal. It takes two
+ * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former
+ * is used to correct clock phase error and the latter is used to
+ * correct the frequency.
+ *
+ * This code is based on David Mills's reference nanokernel
+ * implementation. It was mostly rewritten but keeps the same idea.
+ */
+void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
+{
+	struct pps_normtime pts_norm, freq_norm;
+	unsigned long flags;
+
+	pts_norm = pps_normalize_ts(*phase_ts);
+
+	write_seqlock_irqsave(&xtime_lock, flags);
+
+	/* clear the error bits, they will be set again if needed */
+	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
+
+	/* indicate signal presence */
+	time_status |= STA_PPSSIGNAL;
+	pps_valid = PPS_VALID;
+
+	/* when called for the first time,
+	 * just start the frequency interval */
+	if (unlikely(pps_fbase.tv_sec == 0)) {
+		pps_fbase = *raw_ts;
+		write_sequnlock_irqrestore(&xtime_lock, flags);
+		return;
+	}
+
+	/* ok, now we have a base for frequency calculation */
+	freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase));
+
+	/* check that the signal is in the range
+	 * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
+	if ((freq_norm.sec == 0) ||
+			(freq_norm.nsec > MAXFREQ * freq_norm.sec) ||
+			(freq_norm.nsec < -MAXFREQ * freq_norm.sec)) {
+		time_status |= STA_PPSJITTER;
+		/* restart the frequency calibration interval */
+		pps_fbase = *raw_ts;
+		write_sequnlock_irqrestore(&xtime_lock, flags);
+		pr_err("hardpps: PPSJITTER: bad pulse\n");
+		return;
+	}
+
+	/* signal is ok */
+
+	/* check if the current frequency interval is finished */
+	if (freq_norm.sec >= (1 << pps_shift)) {
+		pps_calcnt++;
+		/* restart the frequency calibration interval */
+		pps_fbase = *raw_ts;
+		hardpps_update_freq(freq_norm);
+	}
+
+	hardpps_update_phase(pts_norm.nsec);
+
+	write_sequnlock_irqrestore(&xtime_lock, flags);
+}
+EXPORT_SYMBOL(hardpps);
+
+#endif	/* CONFIG_NTP_PPS */
+
 static int __init ntp_tick_adj_setup(char *str)
 {
 	ntp_tick_adj = simple_strtol(str, NULL, 0);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 48b2761..a3b5aff 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -600,4 +600,14 @@
 	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
 }
 
+/*
+ * Check whether the broadcast device supports oneshot.
+ */
+bool tick_broadcast_oneshot_available(void)
+{
+	struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
+}
+
 #endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 051bc80..ed228ef 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -51,7 +51,11 @@
 {
 	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
-	return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
+	if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
+		return 0;
+	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
+		return 1;
+	return tick_broadcast_oneshot_available();
 }
 
 /*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 290eefb..f65d3a7 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@
 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
 extern int tick_broadcast_oneshot_active(void);
 extern void tick_check_oneshot_broadcast(int cpu);
+bool tick_broadcast_oneshot_available(void);
 # else /* BROADCAST */
 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
@@ -46,6 +47,7 @@
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
 static inline void tick_check_oneshot_broadcast(int cpu) { }
+static inline bool tick_broadcast_oneshot_available(void) { return true; }
 # endif /* !BROADCAST */
 
 #else /* !ONESHOT */
@@ -76,6 +78,7 @@
 	return 0;
 }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline bool tick_broadcast_oneshot_available(void) { return false; }
 #endif /* !TICK_ONESHOT */
 
 /*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3e216e0..c55ea24 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -642,8 +642,7 @@
 	}
 	local_irq_enable();
 
-	printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n",
-	       smp_processor_id());
+	printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
 }
 
 /*
@@ -795,8 +794,10 @@
 	}
 
 #ifdef CONFIG_NO_HZ
-	if (tick_nohz_enabled)
+	if (tick_nohz_enabled) {
 		ts->nohz_mode = NOHZ_MODE_HIGHRES;
+		printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
+	}
 #endif
 }
 #endif /* HIGH_RES_TIMERS */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5bb86da..d27c756 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -49,7 +49,7 @@
 	u32	mult;
 };
 
-struct timekeeper timekeeper;
+static struct timekeeper timekeeper;
 
 /**
  * timekeeper_setup_internals - Set up internals to use clocksource clock.
@@ -164,7 +164,7 @@
 /*
  * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
  */
-struct timespec raw_time;
+static struct timespec raw_time;
 
 /* flag for if timekeeping is suspended */
 int __read_mostly timekeeping_suspended;
@@ -288,6 +288,49 @@
 }
 EXPORT_SYMBOL_GPL(ktime_get_ts);
 
+#ifdef CONFIG_NTP_PPS
+
+/**
+ * getnstime_raw_and_real - get day and raw monotonic time in timespec format
+ * @ts_raw:	pointer to the timespec to be set to raw monotonic time
+ * @ts_real:	pointer to the timespec to be set to the time of day
+ *
+ * This function reads both the time of day and raw monotonic time at the
+ * same time atomically and stores the resulting timestamps in timespec
+ * format.
+ */
+void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
+{
+	unsigned long seq;
+	s64 nsecs_raw, nsecs_real;
+
+	WARN_ON_ONCE(timekeeping_suspended);
+
+	do {
+		u32 arch_offset;
+
+		seq = read_seqbegin(&xtime_lock);
+
+		*ts_raw = raw_time;
+		*ts_real = xtime;
+
+		nsecs_raw = timekeeping_get_ns_raw();
+		nsecs_real = timekeeping_get_ns();
+
+		/* If arch requires, add in gettimeoffset() */
+		arch_offset = arch_gettimeoffset();
+		nsecs_raw += arch_offset;
+		nsecs_real += arch_offset;
+
+	} while (read_seqretry(&xtime_lock, seq));
+
+	timespec_add_ns(ts_raw, nsecs_raw);
+	timespec_add_ns(ts_real, nsecs_real);
+}
+EXPORT_SYMBOL(getnstime_raw_and_real);
+
+#endif /* CONFIG_NTP_PPS */
+
 /**
  * do_gettimeofday - Returns the time of day in a timeval
  * @tv:		pointer to the timeval to be set
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 32a19f9..3258455 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -41,7 +41,7 @@
 	char symname[KSYM_NAME_LEN];
 
 	if (lookup_symbol_name((unsigned long)sym, symname) < 0)
-		SEQ_printf(m, "<%p>", sym);
+		SEQ_printf(m, "<%pK>", sym);
 	else
 		SEQ_printf(m, "%s", symname);
 }
@@ -112,7 +112,7 @@
 static void
 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
 {
-	SEQ_printf(m, "  .base:       %p\n", base);
+	SEQ_printf(m, "  .base:       %pK\n", base);
 	SEQ_printf(m, "  .index:      %d\n",
 			base->index);
 	SEQ_printf(m, "  .resolution: %Lu nsecs\n",
diff --git a/kernel/timer.c b/kernel/timer.c
index 43ca993..d645992 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -959,7 +959,7 @@
  *
  * Synchronization rules: Callers must prevent restarting of the timer,
  * otherwise this function is meaningless. It must not be called from
- * hardirq contexts. The caller must not hold locks which would prevent
+ * interrupt contexts. The caller must not hold locks which would prevent
  * completion of the timer's handler. The timer's handler must not call
  * add_timer_on(). Upon exit the timer is not queued and the handler is
  * not running on any CPU.
@@ -969,10 +969,12 @@
 int del_timer_sync(struct timer_list *timer)
 {
 #ifdef CONFIG_LOCKDEP
-	local_bh_disable();
+	unsigned long flags;
+
+	local_irq_save(flags);
 	lock_map_acquire(&timer->lockdep_map);
 	lock_map_release(&timer->lockdep_map);
-	local_bh_enable();
+	local_irq_restore(flags);
 #endif
 	/*
 	 * don't use it in hardirq context, because it
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 53f3381..761c510 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -52,7 +52,7 @@
 endif
 obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
 obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
-obj-$(CONFIG_EVENT_TRACING) += power-traces.o
+obj-$(CONFIG_TRACEPOINTS) += power-traces.o
 ifeq ($(CONFIG_TRACING),y)
 obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
 endif
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7b8ec02..cbafed7 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -138,6 +138,13 @@
 		     !blk_tracer_enabled))
 		return;
 
+	/*
+	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
+	 * message to the trace.
+	 */
+	if (!(bt->act_mask & BLK_TC_NOTIFY))
+		return;
+
 	local_irq_save(flags);
 	buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
 	va_start(args, fmt);
@@ -758,53 +765,58 @@
  * @q:		queue the io is for
  * @bio:	the source bio
  * @what:	the action
+ * @error:	error, if any
  *
  * Description:
  *     Records an action against a bio. Will log the bio offset + size.
  *
  **/
 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-				     u32 what)
+			      u32 what, int error)
 {
 	struct blk_trace *bt = q->blk_trace;
 
 	if (likely(!bt))
 		return;
 
+	if (!error && !bio_flagged(bio, BIO_UPTODATE))
+		error = EIO;
+
 	__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
-			!bio_flagged(bio, BIO_UPTODATE), 0, NULL);
+			error, 0, NULL);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
 				     struct request_queue *q, struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
+	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 }
 
 static void blk_add_trace_bio_complete(void *ignore,
-				       struct request_queue *q, struct bio *bio)
+				       struct request_queue *q, struct bio *bio,
+				       int error)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
+	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 }
 
 static void blk_add_trace_bio_backmerge(void *ignore,
 					struct request_queue *q,
 					struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 }
 
 static void blk_add_trace_bio_frontmerge(void *ignore,
 					 struct request_queue *q,
 					 struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 }
 
 static void blk_add_trace_bio_queue(void *ignore,
 				    struct request_queue *q, struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+	blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 }
 
 static void blk_add_trace_getrq(void *ignore,
@@ -812,7 +824,7 @@
 				struct bio *bio, int rw)
 {
 	if (bio)
-		blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
+		blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
 	else {
 		struct blk_trace *bt = q->blk_trace;
 
@@ -827,7 +839,7 @@
 				  struct bio *bio, int rw)
 {
 	if (bio)
-		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
+		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
 	else {
 		struct blk_trace *bt = q->blk_trace;
 
@@ -887,7 +899,7 @@
 }
 
 /**
- * blk_add_trace_remap - Add a trace for a remap operation
+ * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
  * @ignore:	trace callback data parameter (not used)
  * @q:		queue the io is for
  * @bio:	the source bio
@@ -899,9 +911,9 @@
  *     it spans a stripe (or similar). Add a trace for that action.
  *
  **/
-static void blk_add_trace_remap(void *ignore,
-				struct request_queue *q, struct bio *bio,
-				dev_t dev, sector_t from)
+static void blk_add_trace_bio_remap(void *ignore,
+				    struct request_queue *q, struct bio *bio,
+				    dev_t dev, sector_t from)
 {
 	struct blk_trace *bt = q->blk_trace;
 	struct blk_io_trace_remap r;
@@ -1016,7 +1028,7 @@
 	WARN_ON(ret);
 	ret = register_trace_block_split(blk_add_trace_split, NULL);
 	WARN_ON(ret);
-	ret = register_trace_block_remap(blk_add_trace_remap, NULL);
+	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
 	WARN_ON(ret);
 	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
 	WARN_ON(ret);
@@ -1025,7 +1037,7 @@
 static void blk_unregister_tracepoints(void)
 {
 	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
-	unregister_trace_block_remap(blk_add_trace_remap, NULL);
+	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
 	unregister_trace_block_split(blk_add_trace_split, NULL);
 	unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
 	unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
@@ -1815,21 +1827,5 @@
 	rwbs[i] = '\0';
 }
 
-void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
-{
-	int rw = rq->cmd_flags & 0x03;
-	int bytes;
-
-	if (rq->cmd_flags & REQ_DISCARD)
-		rw |= REQ_DISCARD;
-
-	if (rq->cmd_flags & REQ_SECURE)
-		rw |= REQ_SECURE;
-
-	bytes = blk_rq_bytes(rq);
-
-	blk_fill_rwbs(rwbs, rw, bytes);
-}
-
 #endif /* CONFIG_EVENT_TRACING */
 
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f8cf959..dc53ecb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1313,12 +1313,10 @@
 
 	__this_cpu_inc(user_stack_count);
 
-
-
 	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
 					  sizeof(*entry), flags, pc);
 	if (!event)
-		return;
+		goto out_drop_count;
 	entry	= ring_buffer_event_data(event);
 
 	entry->tgid		= current->tgid;
@@ -1333,8 +1331,8 @@
 	if (!filter_check_discard(call, entry, buffer, event))
 		ring_buffer_unlock_commit(buffer, event);
 
+ out_drop_count:
 	__this_cpu_dec(user_stack_count);
-
  out:
 	preempt_enable();
 }
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index e3dfeca..6cf2237 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -53,7 +53,7 @@
  */
 
 /*
- * Function trace entry - function address and parent function addres:
+ * Function trace entry - function address and parent function address:
  */
 FTRACE_ENTRY(function, ftrace_entry,
 
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 35fde09..5f499e04 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1284,7 +1284,7 @@
 static void trace_module_add_events(struct module *mod)
 {
 	struct ftrace_module_file_ops *file_ops = NULL;
-	struct ftrace_event_call *call, *start, *end;
+	struct ftrace_event_call **call, **start, **end;
 
 	start = mod->trace_events;
 	end = mod->trace_events + mod->num_trace_events;
@@ -1297,7 +1297,7 @@
 		return;
 
 	for_each_event(call, start, end) {
-		__trace_add_event_call(call, mod,
+		__trace_add_event_call(*call, mod,
 				       &file_ops->id, &file_ops->enable,
 				       &file_ops->filter, &file_ops->format);
 	}
@@ -1367,8 +1367,8 @@
 	.priority = 0,
 };
 
-extern struct ftrace_event_call __start_ftrace_events[];
-extern struct ftrace_event_call __stop_ftrace_events[];
+extern struct ftrace_event_call *__start_ftrace_events[];
+extern struct ftrace_event_call *__stop_ftrace_events[];
 
 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
 
@@ -1384,7 +1384,7 @@
 
 static __init int event_trace_init(void)
 {
-	struct ftrace_event_call *call;
+	struct ftrace_event_call **call;
 	struct dentry *d_tracer;
 	struct dentry *entry;
 	struct dentry *d_events;
@@ -1430,7 +1430,7 @@
 		pr_warning("tracing: Failed to allocate common fields");
 
 	for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
-		__trace_add_event_call(call, NULL, &ftrace_event_id_fops,
+		__trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
 				       &ftrace_enable_fops,
 				       &ftrace_event_filter_fops,
 				       &ftrace_event_format_fops);
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 4b74d71..bbeec31 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -161,13 +161,13 @@
 	.fields			= LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
 };									\
 									\
-struct ftrace_event_call __used						\
-__attribute__((__aligned__(4)))						\
-__attribute__((section("_ftrace_events"))) event_##call = {		\
+struct ftrace_event_call __used event_##call = {			\
 	.name			= #call,				\
 	.event.type		= etype,				\
 	.class			= &event_class_ftrace_##call,		\
 	.print_fmt		= print,				\
 };									\
+struct ftrace_event_call __used						\
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
 
 #include "trace_entries.h"
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 5cf8c60..92b6e1e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -453,14 +453,6 @@
  * Stubs:
  */
 
-void early_boot_irqs_off(void)
-{
-}
-
-void early_boot_irqs_on(void)
-{
-}
-
 void trace_softirqs_on(unsigned long ip)
 {
 }
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index bac752f..5c9fe08 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -23,9 +23,6 @@
 static int syscall_enter_define_fields(struct ftrace_event_call *call);
 static int syscall_exit_define_fields(struct ftrace_event_call *call);
 
-/* All syscall exit events have the same fields */
-static LIST_HEAD(syscall_exit_fields);
-
 static struct list_head *
 syscall_get_enter_fields(struct ftrace_event_call *call)
 {
@@ -34,50 +31,45 @@
 	return &entry->enter_fields;
 }
 
-static struct list_head *
-syscall_get_exit_fields(struct ftrace_event_call *call)
-{
-	return &syscall_exit_fields;
-}
-
 struct trace_event_functions enter_syscall_print_funcs = {
-	.trace                  = print_syscall_enter,
+	.trace		= print_syscall_enter,
 };
 
 struct trace_event_functions exit_syscall_print_funcs = {
-	.trace                  = print_syscall_exit,
+	.trace		= print_syscall_exit,
 };
 
 struct ftrace_event_class event_class_syscall_enter = {
-	.system			= "syscalls",
-	.reg			= syscall_enter_register,
-	.define_fields		= syscall_enter_define_fields,
-	.get_fields		= syscall_get_enter_fields,
-	.raw_init		= init_syscall_trace,
+	.system		= "syscalls",
+	.reg		= syscall_enter_register,
+	.define_fields	= syscall_enter_define_fields,
+	.get_fields	= syscall_get_enter_fields,
+	.raw_init	= init_syscall_trace,
 };
 
 struct ftrace_event_class event_class_syscall_exit = {
-	.system			= "syscalls",
-	.reg			= syscall_exit_register,
-	.define_fields		= syscall_exit_define_fields,
-	.get_fields		= syscall_get_exit_fields,
-	.raw_init		= init_syscall_trace,
+	.system		= "syscalls",
+	.reg		= syscall_exit_register,
+	.define_fields	= syscall_exit_define_fields,
+	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
+	.raw_init	= init_syscall_trace,
 };
 
-extern unsigned long __start_syscalls_metadata[];
-extern unsigned long __stop_syscalls_metadata[];
+extern struct syscall_metadata *__start_syscalls_metadata[];
+extern struct syscall_metadata *__stop_syscalls_metadata[];
 
 static struct syscall_metadata **syscalls_metadata;
 
-static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
+static __init struct syscall_metadata *
+find_syscall_meta(unsigned long syscall)
 {
-	struct syscall_metadata *start;
-	struct syscall_metadata *stop;
+	struct syscall_metadata **start;
+	struct syscall_metadata **stop;
 	char str[KSYM_SYMBOL_LEN];
 
 
-	start = (struct syscall_metadata *)__start_syscalls_metadata;
-	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
+	start = __start_syscalls_metadata;
+	stop = __stop_syscalls_metadata;
 	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
 
 	for ( ; start < stop; start++) {
@@ -87,8 +79,8 @@
 		 * with "SyS" instead of "sys", leading to an unwanted
 		 * mismatch.
 		 */
-		if (start->name && !strcmp(start->name + 3, str + 3))
-			return start;
+		if ((*start)->name && !strcmp((*start)->name + 3, str + 3))
+			return *start;
 	}
 	return NULL;
 }
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index e95ee7f..68187af 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -27,8 +27,8 @@
 #include <linux/sched.h>
 #include <linux/jump_label.h>
 
-extern struct tracepoint __start___tracepoints[];
-extern struct tracepoint __stop___tracepoints[];
+extern struct tracepoint * const __start___tracepoints_ptrs[];
+extern struct tracepoint * const __stop___tracepoints_ptrs[];
 
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
@@ -298,10 +298,10 @@
  *
  * Updates the probe callback corresponding to a range of tracepoints.
  */
-void
-tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
+void tracepoint_update_probe_range(struct tracepoint * const *begin,
+				   struct tracepoint * const *end)
 {
-	struct tracepoint *iter;
+	struct tracepoint * const *iter;
 	struct tracepoint_entry *mark_entry;
 
 	if (!begin)
@@ -309,12 +309,12 @@
 
 	mutex_lock(&tracepoints_mutex);
 	for (iter = begin; iter < end; iter++) {
-		mark_entry = get_tracepoint(iter->name);
+		mark_entry = get_tracepoint((*iter)->name);
 		if (mark_entry) {
-			set_tracepoint(&mark_entry, iter,
+			set_tracepoint(&mark_entry, *iter,
 					!!mark_entry->refcount);
 		} else {
-			disable_tracepoint(iter);
+			disable_tracepoint(*iter);
 		}
 	}
 	mutex_unlock(&tracepoints_mutex);
@@ -326,8 +326,8 @@
 static void tracepoint_update_probes(void)
 {
 	/* Core kernel tracepoints */
-	tracepoint_update_probe_range(__start___tracepoints,
-		__stop___tracepoints);
+	tracepoint_update_probe_range(__start___tracepoints_ptrs,
+		__stop___tracepoints_ptrs);
 	/* tracepoints in modules. */
 	module_update_tracepoints();
 }
@@ -514,8 +514,8 @@
  * Will return the first tracepoint in the range if the input tracepoint is
  * NULL.
  */
-int tracepoint_get_iter_range(struct tracepoint **tracepoint,
-	struct tracepoint *begin, struct tracepoint *end)
+int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
+	struct tracepoint * const *begin, struct tracepoint * const *end)
 {
 	if (!*tracepoint && begin != end) {
 		*tracepoint = begin;
@@ -534,7 +534,8 @@
 	/* Core kernel tracepoints */
 	if (!iter->module) {
 		found = tracepoint_get_iter_range(&iter->tracepoint,
-				__start___tracepoints, __stop___tracepoints);
+				__start___tracepoints_ptrs,
+				__stop___tracepoints_ptrs);
 		if (found)
 			goto end;
 	}
@@ -585,8 +586,8 @@
 	switch (val) {
 	case MODULE_STATE_COMING:
 	case MODULE_STATE_GOING:
-		tracepoint_update_probe_range(mod->tracepoints,
-			mod->tracepoints + mod->num_tracepoints);
+		tracepoint_update_probe_range(mod->tracepoints_ptrs,
+			mod->tracepoints_ptrs + mod->num_tracepoints);
 		break;
 	}
 	return 0;
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 2591583..9da289c 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -12,6 +12,8 @@
 #include <linux/highuid.h>
 #include <linux/cred.h>
 
+static struct kmem_cache *user_ns_cachep __read_mostly;
+
 /*
  * Create a new user namespace, deriving the creator from the user in the
  * passed credentials, and replacing that user with the new root user for the
@@ -26,7 +28,7 @@
 	struct user_struct *root_user;
 	int n;
 
-	ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL);
+	ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL);
 	if (!ns)
 		return -ENOMEM;
 
@@ -38,7 +40,7 @@
 	/* Alloc new root user.  */
 	root_user = alloc_uid(ns, 0);
 	if (!root_user) {
-		kfree(ns);
+		kmem_cache_free(user_ns_cachep, ns);
 		return -ENOMEM;
 	}
 
@@ -71,7 +73,7 @@
 	struct user_namespace *ns =
 		container_of(work, struct user_namespace, destroyer);
 	free_uid(ns->creator);
-	kfree(ns);
+	kmem_cache_free(user_ns_cachep, ns);
 }
 
 void free_user_ns(struct kref *kref)
@@ -126,3 +128,10 @@
 	/* No useful relationship so no mapping */
 	return overflowgid;
 }
+
+static __init int user_namespaces_init(void)
+{
+	user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
+	return 0;
+}
+module_init(user_namespaces_init);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d7ebdf4..18bb157 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -27,7 +27,7 @@
 #include <asm/irq_regs.h>
 #include <linux/perf_event.h>
 
-int watchdog_enabled;
+int watchdog_enabled = 1;
 int __read_mostly softlockup_thresh = 60;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -43,9 +43,6 @@
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
-static int no_watchdog;
-
-
 /* boot commands */
 /*
  * Should we panic when a soft-lockup or hard-lockup occurs:
@@ -58,7 +55,7 @@
 	if (!strncmp(str, "panic", 5))
 		hardlockup_panic = 1;
 	else if (!strncmp(str, "0", 1))
-		no_watchdog = 1;
+		watchdog_enabled = 0;
 	return 1;
 }
 __setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -77,7 +74,7 @@
 
 static int __init nowatchdog_setup(char *str)
 {
-	no_watchdog = 1;
+	watchdog_enabled = 0;
 	return 1;
 }
 __setup("nowatchdog", nowatchdog_setup);
@@ -85,7 +82,7 @@
 /* deprecated */
 static int __init nosoftlockup_setup(char *str)
 {
-	no_watchdog = 1;
+	watchdog_enabled = 0;
 	return 1;
 }
 __setup("nosoftlockup", nosoftlockup_setup);
@@ -366,8 +363,14 @@
 		goto out_save;
 	}
 
-	printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n",
-	       cpu, PTR_ERR(event));
+
+	/* vary the KERN level based on the returned errno */
+	if (PTR_ERR(event) == -EOPNOTSUPP)
+		printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
+	else if (PTR_ERR(event) == -ENOENT)
+		printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu);
+	else
+		printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event));
 	return PTR_ERR(event);
 
 	/* success path */
@@ -432,9 +435,6 @@
 		wake_up_process(p);
 	}
 
-	/* if any cpu succeeds, watchdog is considered enabled for the system */
-	watchdog_enabled = 1;
-
 	return 0;
 }
 
@@ -462,12 +462,16 @@
 static void watchdog_enable_all_cpus(void)
 {
 	int cpu;
-	int result = 0;
+
+	watchdog_enabled = 0;
 
 	for_each_online_cpu(cpu)
-		result += watchdog_enable(cpu);
+		if (!watchdog_enable(cpu))
+			/* if any cpu succeeds, watchdog is considered
+			   enabled for the system */
+			watchdog_enabled = 1;
 
-	if (result)
+	if (!watchdog_enabled)
 		printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
 
 }
@@ -476,9 +480,6 @@
 {
 	int cpu;
 
-	if (no_watchdog)
-		return;
-
 	for_each_online_cpu(cpu)
 		watchdog_disable(cpu);
 
@@ -498,10 +499,12 @@
 {
 	proc_dointvec(table, write, buffer, length, ppos);
 
-	if (watchdog_enabled)
-		watchdog_enable_all_cpus();
-	else
-		watchdog_disable_all_cpus();
+	if (write) {
+		if (watchdog_enabled)
+			watchdog_enable_all_cpus();
+		else
+			watchdog_disable_all_cpus();
+	}
 	return 0;
 }
 
@@ -530,7 +533,8 @@
 		break;
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
-		err = watchdog_enable(hotcpu);
+		if (watchdog_enabled)
+			err = watchdog_enable(hotcpu);
 		break;
 #ifdef CONFIG_HOTPLUG_CPU
 	case CPU_UP_CANCELED:
@@ -555,9 +559,6 @@
 	void *cpu = (void *)(long)smp_processor_id();
 	int err;
 
-	if (no_watchdog)
-		return;
-
 	err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
 	WARN_ON(notifier_to_errno(err));
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8ee6ec8..ee6578b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -79,7 +79,9 @@
 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
 
-	MAYDAY_INITIAL_TIMEOUT	= HZ / 100,	/* call for help after 10ms */
+	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
+						/* call for help after 10ms
+						   (min two ticks) */
 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
@@ -768,7 +770,11 @@
 
 	worker->flags &= ~flags;
 
-	/* if transitioning out of NOT_RUNNING, increment nr_running */
+	/*
+	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
+	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
+	 * of multiple flags, not a single flag.
+	 */
 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 		if (!(worker->flags & WORKER_NOT_RUNNING))
 			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
@@ -1840,7 +1846,7 @@
 	spin_unlock_irq(&gcwq->lock);
 
 	work_clear_pending(work);
-	lock_map_acquire(&cwq->wq->lockdep_map);
+	lock_map_acquire_read(&cwq->wq->lockdep_map);
 	lock_map_acquire(&lockdep_map);
 	trace_workqueue_execute_start(work);
 	f(work);
@@ -2043,6 +2049,15 @@
 				move_linked_works(work, scheduled, &n);
 
 		process_scheduled_works(rescuer);
+
+		/*
+		 * Leave this gcwq.  If keep_working() is %true, notify a
+		 * regular worker; otherwise, we end up with 0 concurrency
+		 * and stalling the execution.
+		 */
+		if (keep_working(gcwq))
+			wake_up_worker(gcwq);
+
 		spin_unlock_irq(&gcwq->lock);
 	}
 
@@ -2384,8 +2399,18 @@
 	insert_wq_barrier(cwq, barr, work, worker);
 	spin_unlock_irq(&gcwq->lock);
 
-	lock_map_acquire(&cwq->wq->lockdep_map);
+	/*
+	 * If @max_active is 1 or rescuer is in use, flushing another work
+	 * item on the same workqueue may lead to deadlock.  Make sure the
+	 * flusher is not running on the same workqueue by verifying write
+	 * access.
+	 */
+	if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
+		lock_map_acquire(&cwq->wq->lockdep_map);
+	else
+		lock_map_acquire_read(&cwq->wq->lockdep_map);
 	lock_map_release(&cwq->wq->lockdep_map);
+
 	return true;
 already_gone:
 	spin_unlock_irq(&gcwq->lock);
@@ -2942,7 +2967,7 @@
 	 */
 	spin_lock(&workqueue_lock);
 
-	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
+	if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
 		for_each_cwq_cpu(cpu, wq)
 			get_cwq(cpu, wq)->max_active = 0;
 
@@ -3054,7 +3079,7 @@
 
 		spin_lock_irq(&gcwq->lock);
 
-		if (!(wq->flags & WQ_FREEZEABLE) ||
+		if (!(wq->flags & WQ_FREEZABLE) ||
 		    !(gcwq->flags & GCWQ_FREEZING))
 			get_cwq(gcwq->cpu, wq)->max_active = max_active;
 
@@ -3304,7 +3329,7 @@
 	 * want to get it over with ASAP - spam rescuers, wake up as
 	 * many idlers as necessary and create new ones till the
 	 * worklist is empty.  Note that if the gcwq is frozen, there
-	 * may be frozen works in freezeable cwqs.  Don't declare
+	 * may be frozen works in freezable cwqs.  Don't declare
 	 * completion while frozen.
 	 */
 	while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3562,9 +3587,9 @@
 /**
  * freeze_workqueues_begin - begin freezing workqueues
  *
- * Start freezing workqueues.  After this function returns, all
- * freezeable workqueues will queue new works to their frozen_works
- * list instead of gcwq->worklist.
+ * Start freezing workqueues.  After this function returns, all freezable
+ * workqueues will queue new works to their frozen_works list instead of
+ * gcwq->worklist.
  *
  * CONTEXT:
  * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3590,7 +3615,7 @@
 		list_for_each_entry(wq, &workqueues, list) {
 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-			if (cwq && wq->flags & WQ_FREEZEABLE)
+			if (cwq && wq->flags & WQ_FREEZABLE)
 				cwq->max_active = 0;
 		}
 
@@ -3601,7 +3626,7 @@
 }
 
 /**
- * freeze_workqueues_busy - are freezeable workqueues still busy?
+ * freeze_workqueues_busy - are freezable workqueues still busy?
  *
  * Check whether freezing is complete.  This function must be called
  * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3610,8 +3635,8 @@
  * Grabs and releases workqueue_lock.
  *
  * RETURNS:
- * %true if some freezeable workqueues are still busy.  %false if
- * freezing is complete.
+ * %true if some freezable workqueues are still busy.  %false if freezing
+ * is complete.
  */
 bool freeze_workqueues_busy(void)
 {
@@ -3631,7 +3656,7 @@
 		list_for_each_entry(wq, &workqueues, list) {
 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+			if (!cwq || !(wq->flags & WQ_FREEZABLE))
 				continue;
 
 			BUG_ON(cwq->nr_active < 0);
@@ -3676,7 +3701,7 @@
 		list_for_each_entry(wq, &workqueues, list) {
 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+			if (!cwq || !(wq->flags & WQ_FREEZABLE))
 				continue;
 
 			/* restore max_active and repopulate worklist */
diff --git a/lib/Kconfig b/lib/Kconfig
index 3116aa6..0ee67e0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -106,6 +106,8 @@
 config LZO_DECOMPRESS
 	tristate
 
+source "lib/xz/Kconfig"
+
 #
 # These all provide a common interface (hence the apparent duplication with
 # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
@@ -120,6 +122,10 @@
 config DECOMPRESS_LZMA
 	tristate
 
+config DECOMPRESS_XZ
+	select XZ_DEC
+	tristate
+
 config DECOMPRESS_LZO
 	select LZO_DECOMPRESS
 	tristate
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2d05adb..2b97418 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -657,7 +657,7 @@
 	  Disable for production systems.
 
 config DEBUG_BUGVERBOSE
-	bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
+	bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
 	depends on BUG
 	depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
 		   FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
@@ -729,8 +729,8 @@
 	  If unsure, say N.
 
 config DEBUG_MEMORY_INIT
-	bool "Debug memory initialisation" if EMBEDDED
-	default !EMBEDDED
+	bool "Debug memory initialisation" if EXPERT
+	default !EXPERT
 	help
 	  Enable this for additional checks during memory initialisation.
 	  The sanity checks verify aspects of the VM such as the memory model
@@ -805,7 +805,7 @@
 config FRAME_POINTER
 	bool "Compile the kernel with frame pointers"
 	depends on DEBUG_KERNEL && \
-		(CRIS || M68K || M68KNOMMU || FRV || UML || \
+		(CRIS || M68K || FRV || UML || \
 		 AVR32 || SUPERH || BLACKFIN || MN10300) || \
 		ARCH_WANT_FRAME_POINTERS
 	default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
diff --git a/lib/Makefile b/lib/Makefile
index d7b6e30a..cbb774f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@
 	 idr.o int_sqrt.o extable.o prio_tree.o \
 	 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
 	 proportions.o prio_heap.o ratelimit.o show_mem.o \
-	 is_single_threaded.o plist.o decompress.o flex_array.o
+	 is_single_threaded.o plist.o decompress.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -21,7 +21,7 @@
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
-	 string_helpers.o gcd.o lcm.o list_sort.o uuid.o
+	 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
@@ -69,11 +69,13 @@
 obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
 obj-$(CONFIG_LZO_COMPRESS) += lzo/
 obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
+obj-$(CONFIG_XZ_DEC) += xz/
 obj-$(CONFIG_RAID6_PQ) += raid6/
 
 lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
 lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
 lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
+lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o
 lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
 
 obj-$(CONFIG_TEXTSEARCH) += textsearch.o
diff --git a/lib/decompress.c b/lib/decompress.c
index a760681..3d766b7 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -8,6 +8,7 @@
 
 #include <linux/decompress/bunzip2.h>
 #include <linux/decompress/unlzma.h>
+#include <linux/decompress/unxz.h>
 #include <linux/decompress/inflate.h>
 #include <linux/decompress/unlzo.h>
 
@@ -23,6 +24,9 @@
 #ifndef CONFIG_DECOMPRESS_LZMA
 # define unlzma NULL
 #endif
+#ifndef CONFIG_DECOMPRESS_XZ
+# define unxz NULL
+#endif
 #ifndef CONFIG_DECOMPRESS_LZO
 # define unlzo NULL
 #endif
@@ -36,6 +40,7 @@
 	{ {037, 0236}, "gzip", gunzip },
 	{ {0x42, 0x5a}, "bzip2", bunzip2 },
 	{ {0x5d, 0x00}, "lzma", unlzma },
+	{ {0xfd, 0x37}, "xz", unxz },
 	{ {0x89, 0x4c}, "lzo", unlzo },
 	{ {0, 0}, NULL, NULL }
 };
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 81c8bb1..a7b80c1 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -49,7 +49,6 @@
 #define PREBOOT
 #else
 #include <linux/decompress/bunzip2.h>
-#include <linux/slab.h>
 #endif /* STATIC */
 
 #include <linux/decompress/mm.h>
@@ -682,13 +681,12 @@
 			int(*flush)(void*, unsigned int),
 			unsigned char *outbuf,
 			int *pos,
-			void(*error_fn)(char *x))
+			void(*error)(char *x))
 {
 	struct bunzip_data *bd;
 	int i = -1;
 	unsigned char *inbuf;
 
-	set_error_fn(error_fn);
 	if (flush)
 		outbuf = malloc(BZIP2_IOBUF_SIZE);
 
@@ -751,8 +749,8 @@
 			int(*flush)(void*, unsigned int),
 			unsigned char *outbuf,
 			int *pos,
-			void(*error_fn)(char *x))
+			void(*error)(char *x))
 {
-	return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
+	return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
 }
 #endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index fc686c7..19ff89e 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,7 +19,6 @@
 #include "zlib_inflate/inflate.h"
 
 #include "zlib_inflate/infutil.h"
-#include <linux/slab.h>
 
 #endif /* STATIC */
 
@@ -27,7 +26,7 @@
 
 #define GZIP_IOBUF_SIZE (16*1024)
 
-static int nofill(void *buffer, unsigned int len)
+static int INIT nofill(void *buffer, unsigned int len)
 {
 	return -1;
 }
@@ -38,13 +37,12 @@
 		       int(*flush)(void*, unsigned int),
 		       unsigned char *out_buf,
 		       int *pos,
-		       void(*error_fn)(char *x)) {
+		       void(*error)(char *x)) {
 	u8 *zbuf;
 	struct z_stream_s *strm;
 	int rc;
 	size_t out_len;
 
-	set_error_fn(error_fn);
 	rc = -1;
 	if (flush) {
 		out_len = 0x8000; /* 32 K */
@@ -100,13 +98,22 @@
 	 * possible asciz filename)
 	 */
 	strm->next_in = zbuf + 10;
+	strm->avail_in = len - 10;
 	/* skip over asciz filename */
 	if (zbuf[3] & 0x8) {
-		while (strm->next_in[0])
-			strm->next_in++;
-		strm->next_in++;
+		do {
+			/*
+			 * If the filename doesn't fit into the buffer,
+			 * the file is very probably corrupt. Don't try
+			 * to read more data.
+			 */
+			if (strm->avail_in == 0) {
+				error("header error");
+				goto gunzip_5;
+			}
+			--strm->avail_in;
+		} while (*strm->next_in++);
 	}
-	strm->avail_in = len - (strm->next_in - zbuf);
 
 	strm->next_out = out_buf;
 	strm->avail_out = out_len;
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index ca82fde..476c65a 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -33,7 +33,6 @@
 #define PREBOOT
 #else
 #include <linux/decompress/unlzma.h>
-#include <linux/slab.h>
 #endif /* STATIC */
 
 #include <linux/decompress/mm.h>
@@ -74,6 +73,7 @@
 	uint32_t code;
 	uint32_t range;
 	uint32_t bound;
+	void (*error)(char *);
 };
 
 
@@ -82,7 +82,7 @@
 #define RC_MODEL_TOTAL_BITS 11
 
 
-static int nofill(void *buffer, unsigned int len)
+static int INIT nofill(void *buffer, unsigned int len)
 {
 	return -1;
 }
@@ -92,7 +92,7 @@
 {
 	rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
 	if (rc->buffer_size <= 0)
-		error("unexpected EOF");
+		rc->error("unexpected EOF");
 	rc->ptr = rc->buffer;
 	rc->buffer_end = rc->buffer + rc->buffer_size;
 }
@@ -127,12 +127,6 @@
 }
 
 
-/* Called once. TODO: bb_maybe_free() */
-static inline void INIT rc_free(struct rc *rc)
-{
-	free(rc->buffer);
-}
-
 /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
 static void INIT rc_do_normalize(struct rc *rc)
 {
@@ -169,7 +163,7 @@
 	rc->range = rc->bound;
 	*p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
 }
-static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
+static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p)
 {
 	rc->range -= rc->bound;
 	rc->code -= rc->bound;
@@ -319,32 +313,38 @@
 
 }
 
-static inline void INIT write_byte(struct writer *wr, uint8_t byte)
+static inline int INIT write_byte(struct writer *wr, uint8_t byte)
 {
 	wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
 	if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
 		wr->buffer_pos = 0;
 		wr->global_pos += wr->header->dict_size;
-		wr->flush((char *)wr->buffer, wr->header->dict_size);
+		if (wr->flush((char *)wr->buffer, wr->header->dict_size)
+				!= wr->header->dict_size)
+			return -1;
 	}
+	return 0;
 }
 
 
-static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
+static inline int INIT copy_byte(struct writer *wr, uint32_t offs)
 {
-	write_byte(wr, peek_old_byte(wr, offs));
+	return write_byte(wr, peek_old_byte(wr, offs));
 }
 
-static inline void INIT copy_bytes(struct writer *wr,
+static inline int INIT copy_bytes(struct writer *wr,
 					 uint32_t rep0, int len)
 {
 	do {
-		copy_byte(wr, rep0);
+		if (copy_byte(wr, rep0))
+			return -1;
 		len--;
 	} while (len != 0 && wr->buffer_pos < wr->header->dst_size);
+
+	return len;
 }
 
-static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
+static inline int INIT process_bit0(struct writer *wr, struct rc *rc,
 				     struct cstate *cst, uint16_t *p,
 				     int pos_state, uint16_t *prob,
 				     int lc, uint32_t literal_pos_mask) {
@@ -378,16 +378,17 @@
 		uint16_t *prob_lit = prob + mi;
 		rc_get_bit(rc, prob_lit, &mi);
 	}
-	write_byte(wr, mi);
 	if (cst->state < 4)
 		cst->state = 0;
 	else if (cst->state < 10)
 		cst->state -= 3;
 	else
 		cst->state -= 6;
+
+	return write_byte(wr, mi);
 }
 
-static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
+static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
 					    struct cstate *cst, uint16_t *p,
 					    int pos_state, uint16_t *prob) {
   int offset;
@@ -418,8 +419,7 @@
 
 				cst->state = cst->state < LZMA_NUM_LIT_STATES ?
 					9 : 11;
-				copy_byte(wr, cst->rep0);
-				return;
+				return copy_byte(wr, cst->rep0);
 			} else {
 				rc_update_bit_1(rc, prob);
 			}
@@ -521,12 +521,15 @@
 		} else
 			cst->rep0 = pos_slot;
 		if (++(cst->rep0) == 0)
-			return;
+			return 0;
+		if (cst->rep0 > wr->header->dict_size
+				|| cst->rep0 > get_pos(wr))
+			return -1;
 	}
 
 	len += LZMA_MATCH_MIN_LEN;
 
-	copy_bytes(wr, cst->rep0, len);
+	return copy_bytes(wr, cst->rep0, len);
 }
 
 
@@ -536,7 +539,7 @@
 			      int(*flush)(void*, unsigned int),
 			      unsigned char *output,
 			      int *posp,
-			      void(*error_fn)(char *x)
+			      void(*error)(char *x)
 	)
 {
 	struct lzma_header header;
@@ -552,7 +555,7 @@
 	unsigned char *inbuf;
 	int ret = -1;
 
-	set_error_fn(error_fn);
+	rc.error = error;
 
 	if (buf)
 		inbuf = buf;
@@ -580,8 +583,10 @@
 		((unsigned char *)&header)[i] = *rc.ptr++;
 	}
 
-	if (header.pos >= (9 * 5 * 5))
+	if (header.pos >= (9 * 5 * 5)) {
 		error("bad header");
+		goto exit_1;
+	}
 
 	mi = 0;
 	lc = header.pos;
@@ -627,21 +632,29 @@
 		int pos_state =	get_pos(&wr) & pos_state_mask;
 		uint16_t *prob = p + LZMA_IS_MATCH +
 			(cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
-		if (rc_is_bit_0(&rc, prob))
-			process_bit0(&wr, &rc, &cst, p, pos_state, prob,
-				     lc, literal_pos_mask);
-		else {
-			process_bit1(&wr, &rc, &cst, p, pos_state, prob);
+		if (rc_is_bit_0(&rc, prob)) {
+			if (process_bit0(&wr, &rc, &cst, p, pos_state, prob,
+					lc, literal_pos_mask)) {
+				error("LZMA data is corrupt");
+				goto exit_3;
+			}
+		} else {
+			if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) {
+				error("LZMA data is corrupt");
+				goto exit_3;
+			}
 			if (cst.rep0 == 0)
 				break;
 		}
+		if (rc.buffer_size <= 0)
+			goto exit_3;
 	}
 
 	if (posp)
 		*posp = rc.ptr-rc.buffer;
-	if (wr.flush)
-		wr.flush(wr.buffer, wr.buffer_pos);
-	ret = 0;
+	if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos)
+		ret = 0;
+exit_3:
 	large_free(p);
 exit_2:
 	if (!output)
@@ -659,9 +672,9 @@
 			      int(*flush)(void*, unsigned int),
 			      unsigned char *output,
 			      int *posp,
-			      void(*error_fn)(char *x)
+			      void(*error)(char *x)
 	)
 {
-	return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn);
+	return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
 }
 #endif
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index bcb3a4b..5a7a2ad 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -33,7 +33,6 @@
 #ifdef STATIC
 #include "lzo/lzo1x_decompress.c"
 #else
-#include <linux/slab.h>
 #include <linux/decompress/unlzo.h>
 #endif
 
@@ -49,14 +48,25 @@
 
 #define LZO_BLOCK_SIZE        (256*1024l)
 #define HEADER_HAS_FILTER      0x00000800L
+#define HEADER_SIZE_MIN       (9 + 7     + 4 + 8     + 1       + 4)
+#define HEADER_SIZE_MAX       (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4)
 
-STATIC inline int INIT parse_header(u8 *input, u8 *skip)
+STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len)
 {
 	int l;
 	u8 *parse = input;
+	u8 *end = input + in_len;
 	u8 level = 0;
 	u16 version;
 
+	/*
+	 * Check that there's enough input to possibly have a valid header.
+	 * Then it is possible to parse several fields until the minimum
+	 * size may have been used.
+	 */
+	if (in_len < HEADER_SIZE_MIN)
+		return 0;
+
 	/* read magic: 9 first bits */
 	for (l = 0; l < 9; l++) {
 		if (*parse++ != lzop_magic[l])
@@ -74,6 +84,15 @@
 	else
 		parse += 4; /* flags */
 
+	/*
+	 * At least mode, mtime_low, filename length, and checksum must
+	 * be left to be parsed. If also mtime_high is present, it's OK
+	 * because the next input buffer check is after reading the
+	 * filename length.
+	 */
+	if (end - parse < 8 + 1 + 4)
+		return 0;
+
 	/* skip mode and mtime_low */
 	parse += 8;
 	if (version >= 0x0940)
@@ -81,6 +100,8 @@
 
 	l = *parse++;
 	/* don't care about the file name, and skip checksum */
+	if (end - parse < l + 4)
+		return 0;
 	parse += l + 4;
 
 	*skip = parse - input;
@@ -91,16 +112,15 @@
 				int (*fill) (void *, unsigned int),
 				int (*flush) (void *, unsigned int),
 				u8 *output, int *posp,
-				void (*error_fn) (char *x))
+				void (*error) (char *x))
 {
-	u8 skip = 0, r = 0;
+	u8 r = 0;
+	int skip = 0;
 	u32 src_len, dst_len;
 	size_t tmp;
 	u8 *in_buf, *in_buf_save, *out_buf;
 	int ret = -1;
 
-	set_error_fn(error_fn);
-
 	if (output) {
 		out_buf = output;
 	} else if (!flush) {
@@ -119,8 +139,8 @@
 		goto exit_1;
 	} else if (input) {
 		in_buf = input;
-	} else if (!fill || !posp) {
-		error("NULL input pointer and missing position pointer or fill function");
+	} else if (!fill) {
+		error("NULL input pointer and missing fill function");
 		goto exit_1;
 	} else {
 		in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE));
@@ -134,22 +154,47 @@
 	if (posp)
 		*posp = 0;
 
-	if (fill)
-		fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
+	if (fill) {
+		/*
+		 * Start from in_buf + HEADER_SIZE_MAX to make it possible
+		 * to use memcpy() to copy the unused data to the beginning
+		 * of the buffer. This way memmove() isn't needed which
+		 * is missing from pre-boot environments of most archs.
+		 */
+		in_buf += HEADER_SIZE_MAX;
+		in_len = fill(in_buf, HEADER_SIZE_MAX);
+	}
 
-	if (!parse_header(input, &skip)) {
+	if (!parse_header(in_buf, &skip, in_len)) {
 		error("invalid header");
 		goto exit_2;
 	}
 	in_buf += skip;
+	in_len -= skip;
+
+	if (fill) {
+		/* Move the unused data to the beginning of the buffer. */
+		memcpy(in_buf_save, in_buf, in_len);
+		in_buf = in_buf_save;
+	}
 
 	if (posp)
 		*posp = skip;
 
 	for (;;) {
 		/* read uncompressed block size */
+		if (fill && in_len < 4) {
+			skip = fill(in_buf + in_len, 4 - in_len);
+			if (skip > 0)
+				in_len += skip;
+		}
+		if (in_len < 4) {
+			error("file corrupted");
+			goto exit_2;
+		}
 		dst_len = get_unaligned_be32(in_buf);
 		in_buf += 4;
+		in_len -= 4;
 
 		/* exit if last block */
 		if (dst_len == 0) {
@@ -164,8 +209,18 @@
 		}
 
 		/* read compressed block size, and skip block checksum info */
+		if (fill && in_len < 8) {
+			skip = fill(in_buf + in_len, 8 - in_len);
+			if (skip > 0)
+				in_len += skip;
+		}
+		if (in_len < 8) {
+			error("file corrupted");
+			goto exit_2;
+		}
 		src_len = get_unaligned_be32(in_buf);
 		in_buf += 8;
+		in_len -= 8;
 
 		if (src_len <= 0 || src_len > dst_len) {
 			error("file corrupted");
@@ -173,6 +228,15 @@
 		}
 
 		/* decompress */
+		if (fill && in_len < src_len) {
+			skip = fill(in_buf + in_len, src_len - in_len);
+			if (skip > 0)
+				in_len += skip;
+		}
+		if (in_len < src_len) {
+			error("file corrupted");
+			goto exit_2;
+		}
 		tmp = dst_len;
 
 		/* When the input data is not compressed at all,
@@ -190,17 +254,26 @@
 			}
 		}
 
-		if (flush)
-			flush(out_buf, dst_len);
+		if (flush && flush(out_buf, dst_len) != dst_len)
+			goto exit_2;
 		if (output)
 			out_buf += dst_len;
 		if (posp)
 			*posp += src_len + 12;
+
+		in_buf += src_len;
+		in_len -= src_len;
 		if (fill) {
+			/*
+			 * If there happens to still be unused data left in
+			 * in_buf, move it to the beginning of the buffer.
+			 * Use a loop to avoid memmove() dependency.
+			 */
+			if (in_len > 0)
+				for (skip = 0; skip < in_len; ++skip)
+					in_buf_save[skip] = in_buf[skip];
 			in_buf = in_buf_save;
-			fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
-		} else
-			in_buf += src_len;
+		}
 	}
 
 	ret = 0;
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
new file mode 100644
index 0000000..cecd23d
--- /dev/null
+++ b/lib/decompress_unxz.c
@@ -0,0 +1,397 @@
+/*
+ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+/*
+ * Important notes about in-place decompression
+ *
+ * At least on x86, the kernel is decompressed in place: the compressed data
+ * is placed to the end of the output buffer, and the decompressor overwrites
+ * most of the compressed data. There must be enough safety margin to
+ * guarantee that the write position is always behind the read position.
+ *
+ * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
+ * Note that the margin with XZ is bigger than with Deflate (gzip)!
+ *
+ * The worst case for in-place decompression is that the beginning of
+ * the file is compressed extremely well, and the rest of the file is
+ * uncompressible. Thus, we must look for worst-case expansion when the
+ * compressor is encoding uncompressible data.
+ *
+ * The structure of the .xz file in case of a compresed kernel is as follows.
+ * Sizes (as bytes) of the fields are in parenthesis.
+ *
+ *    Stream Header (12)
+ *    Block Header:
+ *      Block Header (8-12)
+ *      Compressed Data (N)
+ *      Block Padding (0-3)
+ *      CRC32 (4)
+ *    Index (8-20)
+ *    Stream Footer (12)
+ *
+ * Normally there is exactly one Block, but let's assume that there are
+ * 2-4 Blocks just in case. Because Stream Header and also Block Header
+ * of the first Block don't make the decompressor produce any uncompressed
+ * data, we can ignore them from our calculations. Block Headers of possible
+ * additional Blocks have to be taken into account still. With these
+ * assumptions, it is safe to assume that the total header overhead is
+ * less than 128 bytes.
+ *
+ * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
+ * doesn't change the size of the data, it is enough to calculate the
+ * safety margin for LZMA2.
+ *
+ * LZMA2 stores the data in chunks. Each chunk has a header whose size is
+ * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
+ * the maximum chunk header size is 8 bytes. After the chunk header, there
+ * may be up to 64 KiB of actual payload in the chunk. Often the payload is
+ * quite a bit smaller though; to be safe, let's assume that an average
+ * chunk has only 32 KiB of payload.
+ *
+ * The maximum uncompressed size of the payload is 2 MiB. The minimum
+ * uncompressed size of the payload is in practice never less than the
+ * payload size itself. The LZMA2 format would allow uncompressed size
+ * to be less than the payload size, but no sane compressor creates such
+ * files. LZMA2 supports storing uncompressible data in uncompressed form,
+ * so there's never a need to create payloads whose uncompressed size is
+ * smaller than the compressed size.
+ *
+ * The assumption, that the uncompressed size of the payload is never
+ * smaller than the payload itself, is valid only when talking about
+ * the payload as a whole. It is possible that the payload has parts where
+ * the decompressor consumes more input than it produces output. Calculating
+ * the worst case for this would be tricky. Instead of trying to do that,
+ * let's simply make sure that the decompressor never overwrites any bytes
+ * of the payload which it is currently reading.
+ *
+ * Now we have enough information to calculate the safety margin. We need
+ *   - 128 bytes for the .xz file format headers;
+ *   - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
+ *     per chunk, each chunk having average payload size of 32 KiB); and
+ *   - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
+ *     the decompressor never overwrites anything from the LZMA2 chunk
+ *     payload it is currently reading.
+ *
+ * We get the following formula:
+ *
+ *    safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
+ *                  = 128 + (uncompressed_size >> 12) + 65536
+ *
+ * For comparision, according to arch/x86/boot/compressed/misc.c, the
+ * equivalent formula for Deflate is this:
+ *
+ *    safety_margin = 18 + (uncompressed_size >> 12) + 32768
+ *
+ * Thus, when updating Deflate-only in-place kernel decompressor to
+ * support XZ, the fixed overhead has to be increased from 18+32768 bytes
+ * to 128+65536 bytes.
+ */
+
+/*
+ * STATIC is defined to "static" if we are being built for kernel
+ * decompression (pre-boot code). <linux/decompress/mm.h> will define
+ * STATIC to empty if it wasn't already defined. Since we will need to
+ * know later if we are being used for kernel decompression, we define
+ * XZ_PREBOOT here.
+ */
+#ifdef STATIC
+#	define XZ_PREBOOT
+#endif
+#ifdef __KERNEL__
+#	include <linux/decompress/mm.h>
+#endif
+#define XZ_EXTERN STATIC
+
+#ifndef XZ_PREBOOT
+#	include <linux/slab.h>
+#	include <linux/xz.h>
+#else
+/*
+ * Use the internal CRC32 code instead of kernel's CRC32 module, which
+ * is not available in early phase of booting.
+ */
+#define XZ_INTERNAL_CRC32 1
+
+/*
+ * For boot time use, we enable only the BCJ filter of the current
+ * architecture or none if no BCJ filter is available for the architecture.
+ */
+#ifdef CONFIG_X86
+#	define XZ_DEC_X86
+#endif
+#ifdef CONFIG_PPC
+#	define XZ_DEC_POWERPC
+#endif
+#ifdef CONFIG_ARM
+#	define XZ_DEC_ARM
+#endif
+#ifdef CONFIG_IA64
+#	define XZ_DEC_IA64
+#endif
+#ifdef CONFIG_SPARC
+#	define XZ_DEC_SPARC
+#endif
+
+/*
+ * This will get the basic headers so that memeq() and others
+ * can be defined.
+ */
+#include "xz/xz_private.h"
+
+/*
+ * Replace the normal allocation functions with the versions from
+ * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL)
+ * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
+ * Workaround it here because the other decompressors don't need it.
+ */
+#undef kmalloc
+#undef kfree
+#undef vmalloc
+#undef vfree
+#define kmalloc(size, flags) malloc(size)
+#define kfree(ptr) free(ptr)
+#define vmalloc(size) malloc(size)
+#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
+
+/*
+ * FIXME: Not all basic memory functions are provided in architecture-specific
+ * files (yet). We define our own versions here for now, but this should be
+ * only a temporary solution.
+ *
+ * memeq and memzero are not used much and any remotely sane implementation
+ * is fast enough. memcpy/memmove speed matters in multi-call mode, but
+ * the kernel image is decompressed in single-call mode, in which only
+ * memcpy speed can matter and only if there is a lot of uncompressible data
+ * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
+ * functions below should just be kept small; it's probably not worth
+ * optimizing for speed.
+ */
+
+#ifndef memeq
+static bool memeq(const void *a, const void *b, size_t size)
+{
+	const uint8_t *x = a;
+	const uint8_t *y = b;
+	size_t i;
+
+	for (i = 0; i < size; ++i)
+		if (x[i] != y[i])
+			return false;
+
+	return true;
+}
+#endif
+
+#ifndef memzero
+static void memzero(void *buf, size_t size)
+{
+	uint8_t *b = buf;
+	uint8_t *e = b + size;
+
+	while (b != e)
+		*b++ = '\0';
+}
+#endif
+
+#ifndef memmove
+/* Not static to avoid a conflict with the prototype in the Linux headers. */
+void *memmove(void *dest, const void *src, size_t size)
+{
+	uint8_t *d = dest;
+	const uint8_t *s = src;
+	size_t i;
+
+	if (d < s) {
+		for (i = 0; i < size; ++i)
+			d[i] = s[i];
+	} else if (d > s) {
+		i = size;
+		while (i-- > 0)
+			d[i] = s[i];
+	}
+
+	return dest;
+}
+#endif
+
+/*
+ * Since we need memmove anyway, would use it as memcpy too.
+ * Commented out for now to avoid breaking things.
+ */
+/*
+#ifndef memcpy
+#	define memcpy memmove
+#endif
+*/
+
+#include "xz/xz_crc32.c"
+#include "xz/xz_dec_stream.c"
+#include "xz/xz_dec_lzma2.c"
+#include "xz/xz_dec_bcj.c"
+
+#endif /* XZ_PREBOOT */
+
+/* Size of the input and output buffers in multi-call mode */
+#define XZ_IOBUF_SIZE 4096
+
+/*
+ * This function implements the API defined in <linux/decompress/generic.h>.
+ *
+ * This wrapper will automatically choose single-call or multi-call mode
+ * of the native XZ decoder API. The single-call mode can be used only when
+ * both input and output buffers are available as a single chunk, i.e. when
+ * fill() and flush() won't be used.
+ */
+STATIC int INIT unxz(unsigned char *in, int in_size,
+		     int (*fill)(void *dest, unsigned int size),
+		     int (*flush)(void *src, unsigned int size),
+		     unsigned char *out, int *in_used,
+		     void (*error)(char *x))
+{
+	struct xz_buf b;
+	struct xz_dec *s;
+	enum xz_ret ret;
+	bool must_free_in = false;
+
+#if XZ_INTERNAL_CRC32
+	xz_crc32_init();
+#endif
+
+	if (in_used != NULL)
+		*in_used = 0;
+
+	if (fill == NULL && flush == NULL)
+		s = xz_dec_init(XZ_SINGLE, 0);
+	else
+		s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
+
+	if (s == NULL)
+		goto error_alloc_state;
+
+	if (flush == NULL) {
+		b.out = out;
+		b.out_size = (size_t)-1;
+	} else {
+		b.out_size = XZ_IOBUF_SIZE;
+		b.out = malloc(XZ_IOBUF_SIZE);
+		if (b.out == NULL)
+			goto error_alloc_out;
+	}
+
+	if (in == NULL) {
+		must_free_in = true;
+		in = malloc(XZ_IOBUF_SIZE);
+		if (in == NULL)
+			goto error_alloc_in;
+	}
+
+	b.in = in;
+	b.in_pos = 0;
+	b.in_size = in_size;
+	b.out_pos = 0;
+
+	if (fill == NULL && flush == NULL) {
+		ret = xz_dec_run(s, &b);
+	} else {
+		do {
+			if (b.in_pos == b.in_size && fill != NULL) {
+				if (in_used != NULL)
+					*in_used += b.in_pos;
+
+				b.in_pos = 0;
+
+				in_size = fill(in, XZ_IOBUF_SIZE);
+				if (in_size < 0) {
+					/*
+					 * This isn't an optimal error code
+					 * but it probably isn't worth making
+					 * a new one either.
+					 */
+					ret = XZ_BUF_ERROR;
+					break;
+				}
+
+				b.in_size = in_size;
+			}
+
+			ret = xz_dec_run(s, &b);
+
+			if (flush != NULL && (b.out_pos == b.out_size
+					|| (ret != XZ_OK && b.out_pos > 0))) {
+				/*
+				 * Setting ret here may hide an error
+				 * returned by xz_dec_run(), but probably
+				 * it's not too bad.
+				 */
+				if (flush(b.out, b.out_pos) != (int)b.out_pos)
+					ret = XZ_BUF_ERROR;
+
+				b.out_pos = 0;
+			}
+		} while (ret == XZ_OK);
+
+		if (must_free_in)
+			free(in);
+
+		if (flush != NULL)
+			free(b.out);
+	}
+
+	if (in_used != NULL)
+		*in_used += b.in_pos;
+
+	xz_dec_end(s);
+
+	switch (ret) {
+	case XZ_STREAM_END:
+		return 0;
+
+	case XZ_MEM_ERROR:
+		/* This can occur only in multi-call mode. */
+		error("XZ decompressor ran out of memory");
+		break;
+
+	case XZ_FORMAT_ERROR:
+		error("Input is not in the XZ format (wrong magic bytes)");
+		break;
+
+	case XZ_OPTIONS_ERROR:
+		error("Input was encoded with settings that are not "
+				"supported by this XZ decoder");
+		break;
+
+	case XZ_DATA_ERROR:
+	case XZ_BUF_ERROR:
+		error("XZ-compressed data is corrupt");
+		break;
+
+	default:
+		error("Bug in the XZ decompressor");
+		break;
+	}
+
+	return -1;
+
+error_alloc_in:
+	if (flush != NULL)
+		free(b.out);
+
+error_alloc_out:
+	xz_dec_end(s);
+
+error_alloc_state:
+	error("XZ decompressor ran out of memory");
+	return -1;
+}
+
+/*
+ * This macro is used by architecture-specific files to decompress
+ * the kernel image.
+ */
+#define decompress unxz
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 3094318..b335acb 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -141,11 +141,10 @@
 			else if (!dp->flags)
 				dt->num_enabled++;
 			dp->flags = newflags;
-			if (newflags) {
-				jump_label_enable(&dp->enabled);
-			} else {
-				jump_label_disable(&dp->enabled);
-			}
+			if (newflags)
+				dp->enabled = 1;
+			else
+				dp->enabled = 0;
 			if (verbose)
 				printk(KERN_INFO
 					"ddebug: changed %s:%d [%s]%s %s\n",
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 77a6fea..c0ea40b 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -23,6 +23,7 @@
 #include <linux/flex_array.h>
 #include <linux/slab.h>
 #include <linux/stddef.h>
+#include <linux/module.h>
 
 struct flex_array_part {
 	char elements[FLEX_ARRAY_PART_SIZE];
@@ -103,6 +104,7 @@
 						FLEX_ARRAY_BASE_BYTES_LEFT);
 	return ret;
 }
+EXPORT_SYMBOL(flex_array_alloc);
 
 static int fa_element_to_part_nr(struct flex_array *fa,
 					unsigned int element_nr)
@@ -126,12 +128,14 @@
 	for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
 		kfree(fa->parts[part_nr]);
 }
+EXPORT_SYMBOL(flex_array_free_parts);
 
 void flex_array_free(struct flex_array *fa)
 {
 	flex_array_free_parts(fa);
 	kfree(fa);
 }
+EXPORT_SYMBOL(flex_array_free);
 
 static unsigned int index_inside_part(struct flex_array *fa,
 					unsigned int element_nr)
@@ -196,6 +200,7 @@
 	memcpy(dst, src, fa->element_size);
 	return 0;
 }
+EXPORT_SYMBOL(flex_array_put);
 
 /**
  * flex_array_clear - clear element in array at @element_nr
@@ -223,6 +228,7 @@
 	memset(dst, FLEX_ARRAY_FREE, fa->element_size);
 	return 0;
 }
+EXPORT_SYMBOL(flex_array_clear);
 
 /**
  * flex_array_prealloc - guarantee that array space exists
@@ -259,6 +265,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(flex_array_prealloc);
 
 /**
  * flex_array_get - pull data back out of the array
@@ -288,6 +295,7 @@
 	}
 	return &part->elements[index_inside_part(fa, element_nr)];
 }
+EXPORT_SYMBOL(flex_array_get);
 
 /**
  * flex_array_get_ptr - pull a ptr back out of the array
@@ -308,6 +316,7 @@
 
 	return *tmp;
 }
+EXPORT_SYMBOL(flex_array_get_ptr);
 
 static int part_is_free(struct flex_array_part *part)
 {
@@ -348,3 +357,4 @@
 	}
 	return ret;
 }
+EXPORT_SYMBOL(flex_array_shrink);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index b66b2bd..f5fe6ba 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -154,6 +154,7 @@
 }
 EXPORT_SYMBOL(hex_dump_to_buffer);
 
+#ifdef CONFIG_PRINTK
 /**
  * print_hex_dump - print a text hex dump to syslog for a binary blob of data
  * @level: kernel log level (e.g. KERN_DEBUG)
@@ -238,3 +239,4 @@
 		       buf, len, true);
 }
 EXPORT_SYMBOL(print_hex_dump_bytes);
+#endif
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 5730ecd..da4e2ad 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/io.h>
+#include <linux/module.h>
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 
@@ -90,3 +91,4 @@
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 344c710..b8029a5 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -35,6 +35,31 @@
 }
 EXPORT_SYMBOL(__list_add);
 
+void __list_del_entry(struct list_head *entry)
+{
+	struct list_head *prev, *next;
+
+	prev = entry->prev;
+	next = entry->next;
+
+	if (WARN(next == LIST_POISON1,
+		"list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+		entry, LIST_POISON1) ||
+	    WARN(prev == LIST_POISON2,
+		"list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+		entry, LIST_POISON2) ||
+	    WARN(prev->next != entry,
+		"list_del corruption. prev->next should be %p, "
+		"but was %p\n", entry, prev->next) ||
+	    WARN(next->prev != entry,
+		"list_del corruption. next->prev should be %p, "
+		"but was %p\n", entry, next->prev))
+		return;
+
+	__list_del(prev, next);
+}
+EXPORT_SYMBOL(__list_del_entry);
+
 /**
  * list_del - deletes entry from list.
  * @entry: the element to delete from the list.
@@ -43,19 +68,7 @@
  */
 void list_del(struct list_head *entry)
 {
-	WARN(entry->next == LIST_POISON1,
-		"list_del corruption, next is LIST_POISON1 (%p)\n",
-		LIST_POISON1);
-	WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2,
-		"list_del corruption, prev is LIST_POISON2 (%p)\n",
-		LIST_POISON2);
-	WARN(entry->prev->next != entry,
-		"list_del corruption. prev->next should be %p, "
-		"but was %p\n", entry, entry->prev->next);
-	WARN(entry->next->prev != entry,
-		"list_del corruption. next->prev should be %p, "
-		"but was %p\n", entry, entry->next->prev);
-	__list_del(entry->prev, entry->next);
+	__list_del_entry(entry);
 	entry->next = LIST_POISON1;
 	entry->prev = LIST_POISON2;
 }
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 00e8a02..ac09f22 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -148,7 +148,7 @@
 {
 	int i, len = 0;
 
-	for (i = 0; i < n; i++) {
+	for (i = 0; i < n; i++, p++) {
 		if (p->len)
 			len += nla_total_size(p->len);
 		else if (nla_attr_minlen[p->type])
@@ -167,7 +167,7 @@
  * @policy: validation policy
  *
  * Parses a stream of attributes and stores a pointer to each attribute in
- * the tb array accessable via the attribute type. Attributes with a type
+ * the tb array accessible via the attribute type. Attributes with a type
  * exceeding maxtype will be silently ignored for backwards compatibility
  * reasons. policy may be set to NULL if no validation is required.
  *
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5086bb9..7ea2e03 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -736,10 +736,11 @@
 		}
 	}
 	/*
-	 * The iftag must have been set somewhere because otherwise
-	 * we would return immediated at the beginning of the function
+	 * We need not to tag the root tag if there is no tag which is set with
+	 * settag within the range from *first_indexp to last_index.
 	 */
-	root_tag_set(root, settag);
+	if (tagged > 0)
+		root_tag_set(root, settag);
 	*first_indexp = index;
 
 	return tagged;
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 4693f79..a16be19 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -315,6 +315,7 @@
 
 	rb_augment_path(node, func, data);
 }
+EXPORT_SYMBOL(rb_augment_insert);
 
 /*
  * before removing the node, find the deepest node on the rebalance path
@@ -340,6 +341,7 @@
 
 	return deepest;
 }
+EXPORT_SYMBOL(rb_augment_erase_begin);
 
 /*
  * after removal, update the tree to account for the removed entry
@@ -350,6 +352,7 @@
 	if (node)
 		rb_augment_path(node, func, data);
 }
+EXPORT_SYMBOL(rb_augment_erase_end);
 
 /*
  * This function returns the first node (in sort order) of the tree.
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 7c06ee5..93ca08b 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -60,7 +60,7 @@
 static char *io_tlb_start, *io_tlb_end;
 
 /*
- * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
+ * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
  * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
  */
 static unsigned long io_tlb_nslabs;
@@ -686,8 +686,10 @@
 	/*
 	 * Ensure that the address returned is DMA'ble
 	 */
-	if (!dma_capable(dev, dev_addr, size))
-		panic("map_single: bounce buffer is not DMA'ble");
+	if (!dma_capable(dev, dev_addr, size)) {
+		swiotlb_tbl_unmap_single(dev, map, size, dir);
+		dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
+	}
 
 	return dev_addr;
 }
diff --git a/lib/textsearch.c b/lib/textsearch.c
index d608331..e0cc014 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -13,7 +13,7 @@
  *
  * INTRODUCTION
  *
- *   The textsearch infrastructure provides text searching facitilies for
+ *   The textsearch infrastructure provides text searching facilities for
  *   both linear and non-linear data. Individual search algorithms are
  *   implemented in modules and chosen by the user.
  *
@@ -43,7 +43,7 @@
  *       to the algorithm to store persistent variables.
  *   (4) Core eventually resets the search offset and forwards the find()
  *       request to the algorithm.
- *   (5) Algorithm calls get_next_block() provided by the user continously
+ *   (5) Algorithm calls get_next_block() provided by the user continuously
  *       to fetch the data to be searched in block by block.
  *   (6) Algorithm invokes finish() after the last call to get_next_block
  *       to clean up any leftovers from get_next_block. (Optional)
@@ -58,15 +58,15 @@
  *   the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
  *   to perform case insensitive matching. But it might slow down
  *   performance of algorithm, so you should use it at own your risk.
- *   The returned configuration may then be used for an arbitary
+ *   The returned configuration may then be used for an arbitrary
  *   amount of times and even in parallel as long as a separate struct
  *   ts_state variable is provided to every instance.
  *
  *   The actual search is performed by either calling textsearch_find_-
  *   continuous() for linear data or by providing an own get_next_block()
  *   implementation and calling textsearch_find(). Both functions return
- *   the position of the first occurrence of the patern or UINT_MAX if
- *   no match was found. Subsequent occurences can be found by calling
+ *   the position of the first occurrence of the pattern or UINT_MAX if
+ *   no match was found. Subsequent occurrences can be found by calling
  *   textsearch_next() regardless of the linearity of the data.
  *
  *   Once you're done using a configuration it must be given back via
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c150d3d..d3023df 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -936,6 +936,8 @@
 	return string(buf, end, uuid, spec);
 }
 
+int kptr_restrict = 1;
+
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
  * by an extra set of alphanumeric characters that are extended format
@@ -979,6 +981,7 @@
  *       Implements a "recursive vsnprintf".
  *       Do not use this feature without some mechanism to verify the
  *       correctness of the format string and va_list arguments.
+ * - 'K' For a kernel pointer that should be hidden from unprivileged users
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
@@ -1035,6 +1038,25 @@
 		return buf + vsnprintf(buf, end - buf,
 				       ((struct va_format *)ptr)->fmt,
 				       *(((struct va_format *)ptr)->va));
+	case 'K':
+		/*
+		 * %pK cannot be used in IRQ context because its test
+		 * for CAP_SYSLOG would be meaningless.
+		 */
+		if (in_irq() || in_serving_softirq() || in_nmi()) {
+			if (spec.field_width == -1)
+				spec.field_width = 2 * sizeof(void *);
+			return string(buf, end, "pK-error", spec);
+		} else if ((kptr_restrict == 0) ||
+			 (kptr_restrict == 1 &&
+			  has_capability_noaudit(current, CAP_SYSLOG)))
+			break;
+
+		if (spec.field_width == -1) {
+			spec.field_width = 2 * sizeof(void *);
+			spec.flags |= ZEROPAD;
+		}
+		return number(buf, end, 0, spec);
 	}
 	spec.flags |= SMALL;
 	if (spec.field_width == -1) {
@@ -1451,7 +1473,7 @@
  * @args: Arguments for the format string
  *
  * The return value is the number of characters which have been written into
- * the @buf not including the trailing '\0'. If @size is <= 0 the function
+ * the @buf not including the trailing '\0'. If @size is == 0 the function
  * returns 0.
  *
  * Call this function if you are already dealing with a va_list.
@@ -1465,7 +1487,11 @@
 
 	i = vsnprintf(buf, size, fmt, args);
 
-	return (i >= size) ? (size - 1) : i;
+	if (likely(i < size))
+		return i;
+	if (size != 0)
+		return size - 1;
+	return 0;
 }
 EXPORT_SYMBOL(vscnprintf);
 
@@ -1513,14 +1539,10 @@
 	int i;
 
 	va_start(args, fmt);
-	i = vsnprintf(buf, size, fmt, args);
+	i = vscnprintf(buf, size, fmt, args);
 	va_end(args);
 
-	if (likely(i < size))
-		return i;
-	if (size != 0)
-		return size - 1;
-	return 0;
+	return i;
 }
 EXPORT_SYMBOL(scnprintf);
 
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
new file mode 100644
index 0000000..60a6088
--- /dev/null
+++ b/lib/xz/Kconfig
@@ -0,0 +1,59 @@
+config XZ_DEC
+	tristate "XZ decompression support"
+	select CRC32
+	help
+	  LZMA2 compression algorithm and BCJ filters are supported using
+	  the .xz file format as the container. For integrity checking,
+	  CRC32 is supported. See Documentation/xz.txt for more information.
+
+config XZ_DEC_X86
+	bool "x86 BCJ filter decoder" if EXPERT
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_POWERPC
+	bool "PowerPC BCJ filter decoder" if EXPERT
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_IA64
+	bool "IA-64 BCJ filter decoder" if EXPERT
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_ARM
+	bool "ARM BCJ filter decoder" if EXPERT
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_ARMTHUMB
+	bool "ARM-Thumb BCJ filter decoder" if EXPERT
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_SPARC
+	bool "SPARC BCJ filter decoder" if EXPERT
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_BCJ
+	bool
+	default n
+
+config XZ_DEC_TEST
+	tristate "XZ decompressor tester"
+	default n
+	depends on XZ_DEC
+	help
+	  This allows passing .xz files to the in-kernel XZ decoder via
+	  a character special file. It calculates CRC32 of the decompressed
+	  data and writes diagnostics to the system log.
+
+	  Unless you are developing the XZ decoder, you don't need this
+	  and should say N.
diff --git a/lib/xz/Makefile b/lib/xz/Makefile
new file mode 100644
index 0000000..a7fa769
--- /dev/null
+++ b/lib/xz/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_XZ_DEC) += xz_dec.o
+xz_dec-y := xz_dec_syms.o xz_dec_stream.o xz_dec_lzma2.o
+xz_dec-$(CONFIG_XZ_DEC_BCJ) += xz_dec_bcj.o
+
+obj-$(CONFIG_XZ_DEC_TEST) += xz_dec_test.o
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
new file mode 100644
index 0000000..34532d1
--- /dev/null
+++ b/lib/xz/xz_crc32.c
@@ -0,0 +1,59 @@
+/*
+ * CRC32 using the polynomial from IEEE-802.3
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+/*
+ * This is not the fastest implementation, but it is pretty compact.
+ * The fastest versions of xz_crc32() on modern CPUs without hardware
+ * accelerated CRC instruction are 3-5 times as fast as this version,
+ * but they are bigger and use more memory for the lookup table.
+ */
+
+#include "xz_private.h"
+
+/*
+ * STATIC_RW_DATA is used in the pre-boot environment on some architectures.
+ * See <linux/decompress/mm.h> for details.
+ */
+#ifndef STATIC_RW_DATA
+#	define STATIC_RW_DATA static
+#endif
+
+STATIC_RW_DATA uint32_t xz_crc32_table[256];
+
+XZ_EXTERN void xz_crc32_init(void)
+{
+	const uint32_t poly = 0xEDB88320;
+
+	uint32_t i;
+	uint32_t j;
+	uint32_t r;
+
+	for (i = 0; i < 256; ++i) {
+		r = i;
+		for (j = 0; j < 8; ++j)
+			r = (r >> 1) ^ (poly & ~((r & 1) - 1));
+
+		xz_crc32_table[i] = r;
+	}
+
+	return;
+}
+
+XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
+{
+	crc = ~crc;
+
+	while (size != 0) {
+		crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
+		--size;
+	}
+
+	return ~crc;
+}
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
new file mode 100644
index 0000000..e51e255
--- /dev/null
+++ b/lib/xz/xz_dec_bcj.c
@@ -0,0 +1,561 @@
+/*
+ * Branch/Call/Jump (BCJ) filter decoders
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+
+/*
+ * The rest of the file is inside this ifdef. It makes things a little more
+ * convenient when building without support for any BCJ filters.
+ */
+#ifdef XZ_DEC_BCJ
+
+struct xz_dec_bcj {
+	/* Type of the BCJ filter being used */
+	enum {
+		BCJ_X86 = 4,        /* x86 or x86-64 */
+		BCJ_POWERPC = 5,    /* Big endian only */
+		BCJ_IA64 = 6,       /* Big or little endian */
+		BCJ_ARM = 7,        /* Little endian only */
+		BCJ_ARMTHUMB = 8,   /* Little endian only */
+		BCJ_SPARC = 9       /* Big or little endian */
+	} type;
+
+	/*
+	 * Return value of the next filter in the chain. We need to preserve
+	 * this information across calls, because we must not call the next
+	 * filter anymore once it has returned XZ_STREAM_END.
+	 */
+	enum xz_ret ret;
+
+	/* True if we are operating in single-call mode. */
+	bool single_call;
+
+	/*
+	 * Absolute position relative to the beginning of the uncompressed
+	 * data (in a single .xz Block). We care only about the lowest 32
+	 * bits so this doesn't need to be uint64_t even with big files.
+	 */
+	uint32_t pos;
+
+	/* x86 filter state */
+	uint32_t x86_prev_mask;
+
+	/* Temporary space to hold the variables from struct xz_buf */
+	uint8_t *out;
+	size_t out_pos;
+	size_t out_size;
+
+	struct {
+		/* Amount of already filtered data in the beginning of buf */
+		size_t filtered;
+
+		/* Total amount of data currently stored in buf  */
+		size_t size;
+
+		/*
+		 * Buffer to hold a mix of filtered and unfiltered data. This
+		 * needs to be big enough to hold Alignment + 2 * Look-ahead:
+		 *
+		 * Type         Alignment   Look-ahead
+		 * x86              1           4
+		 * PowerPC          4           0
+		 * IA-64           16           0
+		 * ARM              4           0
+		 * ARM-Thumb        2           2
+		 * SPARC            4           0
+		 */
+		uint8_t buf[16];
+	} temp;
+};
+
+#ifdef XZ_DEC_X86
+/*
+ * This is used to test the most significant byte of a memory address
+ * in an x86 instruction.
+ */
+static inline int bcj_x86_test_msbyte(uint8_t b)
+{
+	return b == 0x00 || b == 0xFF;
+}
+
+static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	static const bool mask_to_allowed_status[8]
+		= { true, true, true, false, true, false, false, false };
+
+	static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 };
+
+	size_t i;
+	size_t prev_pos = (size_t)-1;
+	uint32_t prev_mask = s->x86_prev_mask;
+	uint32_t src;
+	uint32_t dest;
+	uint32_t j;
+	uint8_t b;
+
+	if (size <= 4)
+		return 0;
+
+	size -= 4;
+	for (i = 0; i < size; ++i) {
+		if ((buf[i] & 0xFE) != 0xE8)
+			continue;
+
+		prev_pos = i - prev_pos;
+		if (prev_pos > 3) {
+			prev_mask = 0;
+		} else {
+			prev_mask = (prev_mask << (prev_pos - 1)) & 7;
+			if (prev_mask != 0) {
+				b = buf[i + 4 - mask_to_bit_num[prev_mask]];
+				if (!mask_to_allowed_status[prev_mask]
+						|| bcj_x86_test_msbyte(b)) {
+					prev_pos = i;
+					prev_mask = (prev_mask << 1) | 1;
+					continue;
+				}
+			}
+		}
+
+		prev_pos = i;
+
+		if (bcj_x86_test_msbyte(buf[i + 4])) {
+			src = get_unaligned_le32(buf + i + 1);
+			while (true) {
+				dest = src - (s->pos + (uint32_t)i + 5);
+				if (prev_mask == 0)
+					break;
+
+				j = mask_to_bit_num[prev_mask] * 8;
+				b = (uint8_t)(dest >> (24 - j));
+				if (!bcj_x86_test_msbyte(b))
+					break;
+
+				src = dest ^ (((uint32_t)1 << (32 - j)) - 1);
+			}
+
+			dest &= 0x01FFFFFF;
+			dest |= (uint32_t)0 - (dest & 0x01000000);
+			put_unaligned_le32(dest, buf + i + 1);
+			i += 4;
+		} else {
+			prev_mask = (prev_mask << 1) | 1;
+		}
+	}
+
+	prev_pos = i - prev_pos;
+	s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1);
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_POWERPC
+static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t instr;
+
+	for (i = 0; i + 4 <= size; i += 4) {
+		instr = get_unaligned_be32(buf + i);
+		if ((instr & 0xFC000003) == 0x48000001) {
+			instr &= 0x03FFFFFC;
+			instr -= s->pos + (uint32_t)i;
+			instr &= 0x03FFFFFC;
+			instr |= 0x48000001;
+			put_unaligned_be32(instr, buf + i);
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_IA64
+static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	static const uint8_t branch_table[32] = {
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		4, 4, 6, 6, 0, 0, 7, 7,
+		4, 4, 0, 0, 4, 4, 0, 0
+	};
+
+	/*
+	 * The local variables take a little bit stack space, but it's less
+	 * than what LZMA2 decoder takes, so it doesn't make sense to reduce
+	 * stack usage here without doing that for the LZMA2 decoder too.
+	 */
+
+	/* Loop counters */
+	size_t i;
+	size_t j;
+
+	/* Instruction slot (0, 1, or 2) in the 128-bit instruction word */
+	uint32_t slot;
+
+	/* Bitwise offset of the instruction indicated by slot */
+	uint32_t bit_pos;
+
+	/* bit_pos split into byte and bit parts */
+	uint32_t byte_pos;
+	uint32_t bit_res;
+
+	/* Address part of an instruction */
+	uint32_t addr;
+
+	/* Mask used to detect which instructions to convert */
+	uint32_t mask;
+
+	/* 41-bit instruction stored somewhere in the lowest 48 bits */
+	uint64_t instr;
+
+	/* Instruction normalized with bit_res for easier manipulation */
+	uint64_t norm;
+
+	for (i = 0; i + 16 <= size; i += 16) {
+		mask = branch_table[buf[i] & 0x1F];
+		for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
+			if (((mask >> slot) & 1) == 0)
+				continue;
+
+			byte_pos = bit_pos >> 3;
+			bit_res = bit_pos & 7;
+			instr = 0;
+			for (j = 0; j < 6; ++j)
+				instr |= (uint64_t)(buf[i + j + byte_pos])
+						<< (8 * j);
+
+			norm = instr >> bit_res;
+
+			if (((norm >> 37) & 0x0F) == 0x05
+					&& ((norm >> 9) & 0x07) == 0) {
+				addr = (norm >> 13) & 0x0FFFFF;
+				addr |= ((uint32_t)(norm >> 36) & 1) << 20;
+				addr <<= 4;
+				addr -= s->pos + (uint32_t)i;
+				addr >>= 4;
+
+				norm &= ~((uint64_t)0x8FFFFF << 13);
+				norm |= (uint64_t)(addr & 0x0FFFFF) << 13;
+				norm |= (uint64_t)(addr & 0x100000)
+						<< (36 - 20);
+
+				instr &= (1 << bit_res) - 1;
+				instr |= norm << bit_res;
+
+				for (j = 0; j < 6; j++)
+					buf[i + j + byte_pos]
+						= (uint8_t)(instr >> (8 * j));
+			}
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_ARM
+static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t addr;
+
+	for (i = 0; i + 4 <= size; i += 4) {
+		if (buf[i + 3] == 0xEB) {
+			addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
+					| ((uint32_t)buf[i + 2] << 16);
+			addr <<= 2;
+			addr -= s->pos + (uint32_t)i + 8;
+			addr >>= 2;
+			buf[i] = (uint8_t)addr;
+			buf[i + 1] = (uint8_t)(addr >> 8);
+			buf[i + 2] = (uint8_t)(addr >> 16);
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_ARMTHUMB
+static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t addr;
+
+	for (i = 0; i + 4 <= size; i += 2) {
+		if ((buf[i + 1] & 0xF8) == 0xF0
+				&& (buf[i + 3] & 0xF8) == 0xF8) {
+			addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
+					| ((uint32_t)buf[i] << 11)
+					| (((uint32_t)buf[i + 3] & 0x07) << 8)
+					| (uint32_t)buf[i + 2];
+			addr <<= 1;
+			addr -= s->pos + (uint32_t)i + 4;
+			addr >>= 1;
+			buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07));
+			buf[i] = (uint8_t)(addr >> 11);
+			buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07));
+			buf[i + 2] = (uint8_t)addr;
+			i += 2;
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_SPARC
+static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t instr;
+
+	for (i = 0; i + 4 <= size; i += 4) {
+		instr = get_unaligned_be32(buf + i);
+		if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
+			instr <<= 2;
+			instr -= s->pos + (uint32_t)i;
+			instr >>= 2;
+			instr = ((uint32_t)0x40000000 - (instr & 0x400000))
+					| 0x40000000 | (instr & 0x3FFFFF);
+			put_unaligned_be32(instr, buf + i);
+		}
+	}
+
+	return i;
+}
+#endif
+
+/*
+ * Apply the selected BCJ filter. Update *pos and s->pos to match the amount
+ * of data that got filtered.
+ *
+ * NOTE: This is implemented as a switch statement to avoid using function
+ * pointers, which could be problematic in the kernel boot code, which must
+ * avoid pointers to static data (at least on x86).
+ */
+static void bcj_apply(struct xz_dec_bcj *s,
+		      uint8_t *buf, size_t *pos, size_t size)
+{
+	size_t filtered;
+
+	buf += *pos;
+	size -= *pos;
+
+	switch (s->type) {
+#ifdef XZ_DEC_X86
+	case BCJ_X86:
+		filtered = bcj_x86(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_POWERPC
+	case BCJ_POWERPC:
+		filtered = bcj_powerpc(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_IA64
+	case BCJ_IA64:
+		filtered = bcj_ia64(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_ARM
+	case BCJ_ARM:
+		filtered = bcj_arm(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_ARMTHUMB
+	case BCJ_ARMTHUMB:
+		filtered = bcj_armthumb(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_SPARC
+	case BCJ_SPARC:
+		filtered = bcj_sparc(s, buf, size);
+		break;
+#endif
+	default:
+		/* Never reached but silence compiler warnings. */
+		filtered = 0;
+		break;
+	}
+
+	*pos += filtered;
+	s->pos += filtered;
+}
+
+/*
+ * Flush pending filtered data from temp to the output buffer.
+ * Move the remaining mixture of possibly filtered and unfiltered
+ * data to the beginning of temp.
+ */
+static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
+{
+	size_t copy_size;
+
+	copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos);
+	memcpy(b->out + b->out_pos, s->temp.buf, copy_size);
+	b->out_pos += copy_size;
+
+	s->temp.filtered -= copy_size;
+	s->temp.size -= copy_size;
+	memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size);
+}
+
+/*
+ * The BCJ filter functions are primitive in sense that they process the
+ * data in chunks of 1-16 bytes. To hide this issue, this function does
+ * some buffering.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
+				     struct xz_dec_lzma2 *lzma2,
+				     struct xz_buf *b)
+{
+	size_t out_start;
+
+	/*
+	 * Flush pending already filtered data to the output buffer. Return
+	 * immediatelly if we couldn't flush everything, or if the next
+	 * filter in the chain had already returned XZ_STREAM_END.
+	 */
+	if (s->temp.filtered > 0) {
+		bcj_flush(s, b);
+		if (s->temp.filtered > 0)
+			return XZ_OK;
+
+		if (s->ret == XZ_STREAM_END)
+			return XZ_STREAM_END;
+	}
+
+	/*
+	 * If we have more output space than what is currently pending in
+	 * temp, copy the unfiltered data from temp to the output buffer
+	 * and try to fill the output buffer by decoding more data from the
+	 * next filter in the chain. Apply the BCJ filter on the new data
+	 * in the output buffer. If everything cannot be filtered, copy it
+	 * to temp and rewind the output buffer position accordingly.
+	 */
+	if (s->temp.size < b->out_size - b->out_pos) {
+		out_start = b->out_pos;
+		memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
+		b->out_pos += s->temp.size;
+
+		s->ret = xz_dec_lzma2_run(lzma2, b);
+		if (s->ret != XZ_STREAM_END
+				&& (s->ret != XZ_OK || s->single_call))
+			return s->ret;
+
+		bcj_apply(s, b->out, &out_start, b->out_pos);
+
+		/*
+		 * As an exception, if the next filter returned XZ_STREAM_END,
+		 * we can do that too, since the last few bytes that remain
+		 * unfiltered are meant to remain unfiltered.
+		 */
+		if (s->ret == XZ_STREAM_END)
+			return XZ_STREAM_END;
+
+		s->temp.size = b->out_pos - out_start;
+		b->out_pos -= s->temp.size;
+		memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
+	}
+
+	/*
+	 * If we have unfiltered data in temp, try to fill by decoding more
+	 * data from the next filter. Apply the BCJ filter on temp. Then we
+	 * hopefully can fill the actual output buffer by copying filtered
+	 * data from temp. A mix of filtered and unfiltered data may be left
+	 * in temp; it will be taken care on the next call to this function.
+	 */
+	if (s->temp.size > 0) {
+		/* Make b->out{,_pos,_size} temporarily point to s->temp. */
+		s->out = b->out;
+		s->out_pos = b->out_pos;
+		s->out_size = b->out_size;
+		b->out = s->temp.buf;
+		b->out_pos = s->temp.size;
+		b->out_size = sizeof(s->temp.buf);
+
+		s->ret = xz_dec_lzma2_run(lzma2, b);
+
+		s->temp.size = b->out_pos;
+		b->out = s->out;
+		b->out_pos = s->out_pos;
+		b->out_size = s->out_size;
+
+		if (s->ret != XZ_OK && s->ret != XZ_STREAM_END)
+			return s->ret;
+
+		bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size);
+
+		/*
+		 * If the next filter returned XZ_STREAM_END, we mark that
+		 * everything is filtered, since the last unfiltered bytes
+		 * of the stream are meant to be left as is.
+		 */
+		if (s->ret == XZ_STREAM_END)
+			s->temp.filtered = s->temp.size;
+
+		bcj_flush(s, b);
+		if (s->temp.filtered > 0)
+			return XZ_OK;
+	}
+
+	return s->ret;
+}
+
+XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
+{
+	struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s != NULL)
+		s->single_call = single_call;
+
+	return s;
+}
+
+XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
+{
+	switch (id) {
+#ifdef XZ_DEC_X86
+	case BCJ_X86:
+#endif
+#ifdef XZ_DEC_POWERPC
+	case BCJ_POWERPC:
+#endif
+#ifdef XZ_DEC_IA64
+	case BCJ_IA64:
+#endif
+#ifdef XZ_DEC_ARM
+	case BCJ_ARM:
+#endif
+#ifdef XZ_DEC_ARMTHUMB
+	case BCJ_ARMTHUMB:
+#endif
+#ifdef XZ_DEC_SPARC
+	case BCJ_SPARC:
+#endif
+		break;
+
+	default:
+		/* Unsupported Filter ID */
+		return XZ_OPTIONS_ERROR;
+	}
+
+	s->type = id;
+	s->ret = XZ_OK;
+	s->pos = 0;
+	s->x86_prev_mask = 0;
+	s->temp.filtered = 0;
+	s->temp.size = 0;
+
+	return XZ_OK;
+}
+
+#endif
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
new file mode 100644
index 0000000..ea5fa4f
--- /dev/null
+++ b/lib/xz/xz_dec_lzma2.c
@@ -0,0 +1,1171 @@
+/*
+ * LZMA2 decoder
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+#include "xz_lzma2.h"
+
+/*
+ * Range decoder initialization eats the first five bytes of each LZMA chunk.
+ */
+#define RC_INIT_BYTES 5
+
+/*
+ * Minimum number of usable input buffer to safely decode one LZMA symbol.
+ * The worst case is that we decode 22 bits using probabilities and 26
+ * direct bits. This may decode at maximum of 20 bytes of input. However,
+ * lzma_main() does an extra normalization before returning, thus we
+ * need to put 21 here.
+ */
+#define LZMA_IN_REQUIRED 21
+
+/*
+ * Dictionary (history buffer)
+ *
+ * These are always true:
+ *    start <= pos <= full <= end
+ *    pos <= limit <= end
+ *
+ * In multi-call mode, also these are true:
+ *    end == size
+ *    size <= size_max
+ *    allocated <= size
+ *
+ * Most of these variables are size_t to support single-call mode,
+ * in which the dictionary variables address the actual output
+ * buffer directly.
+ */
+struct dictionary {
+	/* Beginning of the history buffer */
+	uint8_t *buf;
+
+	/* Old position in buf (before decoding more data) */
+	size_t start;
+
+	/* Position in buf */
+	size_t pos;
+
+	/*
+	 * How full dictionary is. This is used to detect corrupt input that
+	 * would read beyond the beginning of the uncompressed stream.
+	 */
+	size_t full;
+
+	/* Write limit; we don't write to buf[limit] or later bytes. */
+	size_t limit;
+
+	/*
+	 * End of the dictionary buffer. In multi-call mode, this is
+	 * the same as the dictionary size. In single-call mode, this
+	 * indicates the size of the output buffer.
+	 */
+	size_t end;
+
+	/*
+	 * Size of the dictionary as specified in Block Header. This is used
+	 * together with "full" to detect corrupt input that would make us
+	 * read beyond the beginning of the uncompressed stream.
+	 */
+	uint32_t size;
+
+	/*
+	 * Maximum allowed dictionary size in multi-call mode.
+	 * This is ignored in single-call mode.
+	 */
+	uint32_t size_max;
+
+	/*
+	 * Amount of memory currently allocated for the dictionary.
+	 * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
+	 * size_max is always the same as the allocated size.)
+	 */
+	uint32_t allocated;
+
+	/* Operation mode */
+	enum xz_mode mode;
+};
+
+/* Range decoder */
+struct rc_dec {
+	uint32_t range;
+	uint32_t code;
+
+	/*
+	 * Number of initializing bytes remaining to be read
+	 * by rc_read_init().
+	 */
+	uint32_t init_bytes_left;
+
+	/*
+	 * Buffer from which we read our input. It can be either
+	 * temp.buf or the caller-provided input buffer.
+	 */
+	const uint8_t *in;
+	size_t in_pos;
+	size_t in_limit;
+};
+
+/* Probabilities for a length decoder. */
+struct lzma_len_dec {
+	/* Probability of match length being at least 10 */
+	uint16_t choice;
+
+	/* Probability of match length being at least 18 */
+	uint16_t choice2;
+
+	/* Probabilities for match lengths 2-9 */
+	uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
+
+	/* Probabilities for match lengths 10-17 */
+	uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
+
+	/* Probabilities for match lengths 18-273 */
+	uint16_t high[LEN_HIGH_SYMBOLS];
+};
+
+struct lzma_dec {
+	/* Distances of latest four matches */
+	uint32_t rep0;
+	uint32_t rep1;
+	uint32_t rep2;
+	uint32_t rep3;
+
+	/* Types of the most recently seen LZMA symbols */
+	enum lzma_state state;
+
+	/*
+	 * Length of a match. This is updated so that dict_repeat can
+	 * be called again to finish repeating the whole match.
+	 */
+	uint32_t len;
+
+	/*
+	 * LZMA properties or related bit masks (number of literal
+	 * context bits, a mask dervied from the number of literal
+	 * position bits, and a mask dervied from the number
+	 * position bits)
+	 */
+	uint32_t lc;
+	uint32_t literal_pos_mask; /* (1 << lp) - 1 */
+	uint32_t pos_mask;         /* (1 << pb) - 1 */
+
+	/* If 1, it's a match. Otherwise it's a single 8-bit literal. */
+	uint16_t is_match[STATES][POS_STATES_MAX];
+
+	/* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
+	uint16_t is_rep[STATES];
+
+	/*
+	 * If 0, distance of a repeated match is rep0.
+	 * Otherwise check is_rep1.
+	 */
+	uint16_t is_rep0[STATES];
+
+	/*
+	 * If 0, distance of a repeated match is rep1.
+	 * Otherwise check is_rep2.
+	 */
+	uint16_t is_rep1[STATES];
+
+	/* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
+	uint16_t is_rep2[STATES];
+
+	/*
+	 * If 1, the repeated match has length of one byte. Otherwise
+	 * the length is decoded from rep_len_decoder.
+	 */
+	uint16_t is_rep0_long[STATES][POS_STATES_MAX];
+
+	/*
+	 * Probability tree for the highest two bits of the match
+	 * distance. There is a separate probability tree for match
+	 * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
+	 */
+	uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
+
+	/*
+	 * Probility trees for additional bits for match distance
+	 * when the distance is in the range [4, 127].
+	 */
+	uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
+
+	/*
+	 * Probability tree for the lowest four bits of a match
+	 * distance that is equal to or greater than 128.
+	 */
+	uint16_t dist_align[ALIGN_SIZE];
+
+	/* Length of a normal match */
+	struct lzma_len_dec match_len_dec;
+
+	/* Length of a repeated match */
+	struct lzma_len_dec rep_len_dec;
+
+	/* Probabilities of literals */
+	uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
+};
+
+struct lzma2_dec {
+	/* Position in xz_dec_lzma2_run(). */
+	enum lzma2_seq {
+		SEQ_CONTROL,
+		SEQ_UNCOMPRESSED_1,
+		SEQ_UNCOMPRESSED_2,
+		SEQ_COMPRESSED_0,
+		SEQ_COMPRESSED_1,
+		SEQ_PROPERTIES,
+		SEQ_LZMA_PREPARE,
+		SEQ_LZMA_RUN,
+		SEQ_COPY
+	} sequence;
+
+	/* Next position after decoding the compressed size of the chunk. */
+	enum lzma2_seq next_sequence;
+
+	/* Uncompressed size of LZMA chunk (2 MiB at maximum) */
+	uint32_t uncompressed;
+
+	/*
+	 * Compressed size of LZMA chunk or compressed/uncompressed
+	 * size of uncompressed chunk (64 KiB at maximum)
+	 */
+	uint32_t compressed;
+
+	/*
+	 * True if dictionary reset is needed. This is false before
+	 * the first chunk (LZMA or uncompressed).
+	 */
+	bool need_dict_reset;
+
+	/*
+	 * True if new LZMA properties are needed. This is false
+	 * before the first LZMA chunk.
+	 */
+	bool need_props;
+};
+
+struct xz_dec_lzma2 {
+	/*
+	 * The order below is important on x86 to reduce code size and
+	 * it shouldn't hurt on other platforms. Everything up to and
+	 * including lzma.pos_mask are in the first 128 bytes on x86-32,
+	 * which allows using smaller instructions to access those
+	 * variables. On x86-64, fewer variables fit into the first 128
+	 * bytes, but this is still the best order without sacrificing
+	 * the readability by splitting the structures.
+	 */
+	struct rc_dec rc;
+	struct dictionary dict;
+	struct lzma2_dec lzma2;
+	struct lzma_dec lzma;
+
+	/*
+	 * Temporary buffer which holds small number of input bytes between
+	 * decoder calls. See lzma2_lzma() for details.
+	 */
+	struct {
+		uint32_t size;
+		uint8_t buf[3 * LZMA_IN_REQUIRED];
+	} temp;
+};
+
+/**************
+ * Dictionary *
+ **************/
+
+/*
+ * Reset the dictionary state. When in single-call mode, set up the beginning
+ * of the dictionary to point to the actual output buffer.
+ */
+static void dict_reset(struct dictionary *dict, struct xz_buf *b)
+{
+	if (DEC_IS_SINGLE(dict->mode)) {
+		dict->buf = b->out + b->out_pos;
+		dict->end = b->out_size - b->out_pos;
+	}
+
+	dict->start = 0;
+	dict->pos = 0;
+	dict->limit = 0;
+	dict->full = 0;
+}
+
+/* Set dictionary write limit */
+static void dict_limit(struct dictionary *dict, size_t out_max)
+{
+	if (dict->end - dict->pos <= out_max)
+		dict->limit = dict->end;
+	else
+		dict->limit = dict->pos + out_max;
+}
+
+/* Return true if at least one byte can be written into the dictionary. */
+static inline bool dict_has_space(const struct dictionary *dict)
+{
+	return dict->pos < dict->limit;
+}
+
+/*
+ * Get a byte from the dictionary at the given distance. The distance is
+ * assumed to valid, or as a special case, zero when the dictionary is
+ * still empty. This special case is needed for single-call decoding to
+ * avoid writing a '\0' to the end of the destination buffer.
+ */
+static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist)
+{
+	size_t offset = dict->pos - dist - 1;
+
+	if (dist >= dict->pos)
+		offset += dict->end;
+
+	return dict->full > 0 ? dict->buf[offset] : 0;
+}
+
+/*
+ * Put one byte into the dictionary. It is assumed that there is space for it.
+ */
+static inline void dict_put(struct dictionary *dict, uint8_t byte)
+{
+	dict->buf[dict->pos++] = byte;
+
+	if (dict->full < dict->pos)
+		dict->full = dict->pos;
+}
+
+/*
+ * Repeat given number of bytes from the given distance. If the distance is
+ * invalid, false is returned. On success, true is returned and *len is
+ * updated to indicate how many bytes were left to be repeated.
+ */
+static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
+{
+	size_t back;
+	uint32_t left;
+
+	if (dist >= dict->full || dist >= dict->size)
+		return false;
+
+	left = min_t(size_t, dict->limit - dict->pos, *len);
+	*len -= left;
+
+	back = dict->pos - dist - 1;
+	if (dist >= dict->pos)
+		back += dict->end;
+
+	do {
+		dict->buf[dict->pos++] = dict->buf[back++];
+		if (back == dict->end)
+			back = 0;
+	} while (--left > 0);
+
+	if (dict->full < dict->pos)
+		dict->full = dict->pos;
+
+	return true;
+}
+
+/* Copy uncompressed data as is from input to dictionary and output buffers. */
+static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
+			      uint32_t *left)
+{
+	size_t copy_size;
+
+	while (*left > 0 && b->in_pos < b->in_size
+			&& b->out_pos < b->out_size) {
+		copy_size = min(b->in_size - b->in_pos,
+				b->out_size - b->out_pos);
+		if (copy_size > dict->end - dict->pos)
+			copy_size = dict->end - dict->pos;
+		if (copy_size > *left)
+			copy_size = *left;
+
+		*left -= copy_size;
+
+		memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
+		dict->pos += copy_size;
+
+		if (dict->full < dict->pos)
+			dict->full = dict->pos;
+
+		if (DEC_IS_MULTI(dict->mode)) {
+			if (dict->pos == dict->end)
+				dict->pos = 0;
+
+			memcpy(b->out + b->out_pos, b->in + b->in_pos,
+					copy_size);
+		}
+
+		dict->start = dict->pos;
+
+		b->out_pos += copy_size;
+		b->in_pos += copy_size;
+	}
+}
+
+/*
+ * Flush pending data from dictionary to b->out. It is assumed that there is
+ * enough space in b->out. This is guaranteed because caller uses dict_limit()
+ * before decoding data into the dictionary.
+ */
+static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
+{
+	size_t copy_size = dict->pos - dict->start;
+
+	if (DEC_IS_MULTI(dict->mode)) {
+		if (dict->pos == dict->end)
+			dict->pos = 0;
+
+		memcpy(b->out + b->out_pos, dict->buf + dict->start,
+				copy_size);
+	}
+
+	dict->start = dict->pos;
+	b->out_pos += copy_size;
+	return copy_size;
+}
+
+/*****************
+ * Range decoder *
+ *****************/
+
+/* Reset the range decoder. */
+static void rc_reset(struct rc_dec *rc)
+{
+	rc->range = (uint32_t)-1;
+	rc->code = 0;
+	rc->init_bytes_left = RC_INIT_BYTES;
+}
+
+/*
+ * Read the first five initial bytes into rc->code if they haven't been
+ * read already. (Yes, the first byte gets completely ignored.)
+ */
+static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b)
+{
+	while (rc->init_bytes_left > 0) {
+		if (b->in_pos == b->in_size)
+			return false;
+
+		rc->code = (rc->code << 8) + b->in[b->in_pos++];
+		--rc->init_bytes_left;
+	}
+
+	return true;
+}
+
+/* Return true if there may not be enough input for the next decoding loop. */
+static inline bool rc_limit_exceeded(const struct rc_dec *rc)
+{
+	return rc->in_pos > rc->in_limit;
+}
+
+/*
+ * Return true if it is possible (from point of view of range decoder) that
+ * we have reached the end of the LZMA chunk.
+ */
+static inline bool rc_is_finished(const struct rc_dec *rc)
+{
+	return rc->code == 0;
+}
+
+/* Read the next input byte if needed. */
+static __always_inline void rc_normalize(struct rc_dec *rc)
+{
+	if (rc->range < RC_TOP_VALUE) {
+		rc->range <<= RC_SHIFT_BITS;
+		rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
+	}
+}
+
+/*
+ * Decode one bit. In some versions, this function has been splitted in three
+ * functions so that the compiler is supposed to be able to more easily avoid
+ * an extra branch. In this particular version of the LZMA decoder, this
+ * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
+ * on x86). Using a non-splitted version results in nicer looking code too.
+ *
+ * NOTE: This must return an int. Do not make it return a bool or the speed
+ * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
+ * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
+ */
+static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
+{
+	uint32_t bound;
+	int bit;
+
+	rc_normalize(rc);
+	bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
+	if (rc->code < bound) {
+		rc->range = bound;
+		*prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
+		bit = 0;
+	} else {
+		rc->range -= bound;
+		rc->code -= bound;
+		*prob -= *prob >> RC_MOVE_BITS;
+		bit = 1;
+	}
+
+	return bit;
+}
+
+/* Decode a bittree starting from the most significant bit. */
+static __always_inline uint32_t rc_bittree(struct rc_dec *rc,
+					   uint16_t *probs, uint32_t limit)
+{
+	uint32_t symbol = 1;
+
+	do {
+		if (rc_bit(rc, &probs[symbol]))
+			symbol = (symbol << 1) + 1;
+		else
+			symbol <<= 1;
+	} while (symbol < limit);
+
+	return symbol;
+}
+
+/* Decode a bittree starting from the least significant bit. */
+static __always_inline void rc_bittree_reverse(struct rc_dec *rc,
+					       uint16_t *probs,
+					       uint32_t *dest, uint32_t limit)
+{
+	uint32_t symbol = 1;
+	uint32_t i = 0;
+
+	do {
+		if (rc_bit(rc, &probs[symbol])) {
+			symbol = (symbol << 1) + 1;
+			*dest += 1 << i;
+		} else {
+			symbol <<= 1;
+		}
+	} while (++i < limit);
+}
+
+/* Decode direct bits (fixed fifty-fifty probability) */
+static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
+{
+	uint32_t mask;
+
+	do {
+		rc_normalize(rc);
+		rc->range >>= 1;
+		rc->code -= rc->range;
+		mask = (uint32_t)0 - (rc->code >> 31);
+		rc->code += rc->range & mask;
+		*dest = (*dest << 1) + (mask + 1);
+	} while (--limit > 0);
+}
+
+/********
+ * LZMA *
+ ********/
+
+/* Get pointer to literal coder probability array. */
+static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s)
+{
+	uint32_t prev_byte = dict_get(&s->dict, 0);
+	uint32_t low = prev_byte >> (8 - s->lzma.lc);
+	uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
+	return s->lzma.literal[low + high];
+}
+
+/* Decode a literal (one 8-bit byte) */
+static void lzma_literal(struct xz_dec_lzma2 *s)
+{
+	uint16_t *probs;
+	uint32_t symbol;
+	uint32_t match_byte;
+	uint32_t match_bit;
+	uint32_t offset;
+	uint32_t i;
+
+	probs = lzma_literal_probs(s);
+
+	if (lzma_state_is_literal(s->lzma.state)) {
+		symbol = rc_bittree(&s->rc, probs, 0x100);
+	} else {
+		symbol = 1;
+		match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
+		offset = 0x100;
+
+		do {
+			match_bit = match_byte & offset;
+			match_byte <<= 1;
+			i = offset + match_bit + symbol;
+
+			if (rc_bit(&s->rc, &probs[i])) {
+				symbol = (symbol << 1) + 1;
+				offset &= match_bit;
+			} else {
+				symbol <<= 1;
+				offset &= ~match_bit;
+			}
+		} while (symbol < 0x100);
+	}
+
+	dict_put(&s->dict, (uint8_t)symbol);
+	lzma_state_literal(&s->lzma.state);
+}
+
+/* Decode the length of the match into s->lzma.len. */
+static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
+		     uint32_t pos_state)
+{
+	uint16_t *probs;
+	uint32_t limit;
+
+	if (!rc_bit(&s->rc, &l->choice)) {
+		probs = l->low[pos_state];
+		limit = LEN_LOW_SYMBOLS;
+		s->lzma.len = MATCH_LEN_MIN;
+	} else {
+		if (!rc_bit(&s->rc, &l->choice2)) {
+			probs = l->mid[pos_state];
+			limit = LEN_MID_SYMBOLS;
+			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
+		} else {
+			probs = l->high;
+			limit = LEN_HIGH_SYMBOLS;
+			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
+					+ LEN_MID_SYMBOLS;
+		}
+	}
+
+	s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
+}
+
+/* Decode a match. The distance will be stored in s->lzma.rep0. */
+static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
+{
+	uint16_t *probs;
+	uint32_t dist_slot;
+	uint32_t limit;
+
+	lzma_state_match(&s->lzma.state);
+
+	s->lzma.rep3 = s->lzma.rep2;
+	s->lzma.rep2 = s->lzma.rep1;
+	s->lzma.rep1 = s->lzma.rep0;
+
+	lzma_len(s, &s->lzma.match_len_dec, pos_state);
+
+	probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
+	dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
+
+	if (dist_slot < DIST_MODEL_START) {
+		s->lzma.rep0 = dist_slot;
+	} else {
+		limit = (dist_slot >> 1) - 1;
+		s->lzma.rep0 = 2 + (dist_slot & 1);
+
+		if (dist_slot < DIST_MODEL_END) {
+			s->lzma.rep0 <<= limit;
+			probs = s->lzma.dist_special + s->lzma.rep0
+					- dist_slot - 1;
+			rc_bittree_reverse(&s->rc, probs,
+					&s->lzma.rep0, limit);
+		} else {
+			rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
+			s->lzma.rep0 <<= ALIGN_BITS;
+			rc_bittree_reverse(&s->rc, s->lzma.dist_align,
+					&s->lzma.rep0, ALIGN_BITS);
+		}
+	}
+}
+
+/*
+ * Decode a repeated match. The distance is one of the four most recently
+ * seen matches. The distance will be stored in s->lzma.rep0.
+ */
+static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
+{
+	uint32_t tmp;
+
+	if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
+		if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
+				s->lzma.state][pos_state])) {
+			lzma_state_short_rep(&s->lzma.state);
+			s->lzma.len = 1;
+			return;
+		}
+	} else {
+		if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
+			tmp = s->lzma.rep1;
+		} else {
+			if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
+				tmp = s->lzma.rep2;
+			} else {
+				tmp = s->lzma.rep3;
+				s->lzma.rep3 = s->lzma.rep2;
+			}
+
+			s->lzma.rep2 = s->lzma.rep1;
+		}
+
+		s->lzma.rep1 = s->lzma.rep0;
+		s->lzma.rep0 = tmp;
+	}
+
+	lzma_state_long_rep(&s->lzma.state);
+	lzma_len(s, &s->lzma.rep_len_dec, pos_state);
+}
+
+/* LZMA decoder core */
+static bool lzma_main(struct xz_dec_lzma2 *s)
+{
+	uint32_t pos_state;
+
+	/*
+	 * If the dictionary was reached during the previous call, try to
+	 * finish the possibly pending repeat in the dictionary.
+	 */
+	if (dict_has_space(&s->dict) && s->lzma.len > 0)
+		dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
+
+	/*
+	 * Decode more LZMA symbols. One iteration may consume up to
+	 * LZMA_IN_REQUIRED - 1 bytes.
+	 */
+	while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
+		pos_state = s->dict.pos & s->lzma.pos_mask;
+
+		if (!rc_bit(&s->rc, &s->lzma.is_match[
+				s->lzma.state][pos_state])) {
+			lzma_literal(s);
+		} else {
+			if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
+				lzma_rep_match(s, pos_state);
+			else
+				lzma_match(s, pos_state);
+
+			if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
+				return false;
+		}
+	}
+
+	/*
+	 * Having the range decoder always normalized when we are outside
+	 * this function makes it easier to correctly handle end of the chunk.
+	 */
+	rc_normalize(&s->rc);
+
+	return true;
+}
+
+/*
+ * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
+ * here, because LZMA state may be reset without resetting the dictionary.
+ */
+static void lzma_reset(struct xz_dec_lzma2 *s)
+{
+	uint16_t *probs;
+	size_t i;
+
+	s->lzma.state = STATE_LIT_LIT;
+	s->lzma.rep0 = 0;
+	s->lzma.rep1 = 0;
+	s->lzma.rep2 = 0;
+	s->lzma.rep3 = 0;
+
+	/*
+	 * All probabilities are initialized to the same value. This hack
+	 * makes the code smaller by avoiding a separate loop for each
+	 * probability array.
+	 *
+	 * This could be optimized so that only that part of literal
+	 * probabilities that are actually required. In the common case
+	 * we would write 12 KiB less.
+	 */
+	probs = s->lzma.is_match[0];
+	for (i = 0; i < PROBS_TOTAL; ++i)
+		probs[i] = RC_BIT_MODEL_TOTAL / 2;
+
+	rc_reset(&s->rc);
+}
+
+/*
+ * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
+ * from the decoded lp and pb values. On success, the LZMA decoder state is
+ * reset and true is returned.
+ */
+static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
+{
+	if (props > (4 * 5 + 4) * 9 + 8)
+		return false;
+
+	s->lzma.pos_mask = 0;
+	while (props >= 9 * 5) {
+		props -= 9 * 5;
+		++s->lzma.pos_mask;
+	}
+
+	s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
+
+	s->lzma.literal_pos_mask = 0;
+	while (props >= 9) {
+		props -= 9;
+		++s->lzma.literal_pos_mask;
+	}
+
+	s->lzma.lc = props;
+
+	if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
+		return false;
+
+	s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
+
+	lzma_reset(s);
+
+	return true;
+}
+
+/*********
+ * LZMA2 *
+ *********/
+
+/*
+ * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
+ * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
+ * wrapper function takes care of making the LZMA decoder's assumption safe.
+ *
+ * As long as there is plenty of input left to be decoded in the current LZMA
+ * chunk, we decode directly from the caller-supplied input buffer until
+ * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
+ * s->temp.buf, which (hopefully) gets filled on the next call to this
+ * function. We decode a few bytes from the temporary buffer so that we can
+ * continue decoding from the caller-supplied input buffer again.
+ */
+static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
+{
+	size_t in_avail;
+	uint32_t tmp;
+
+	in_avail = b->in_size - b->in_pos;
+	if (s->temp.size > 0 || s->lzma2.compressed == 0) {
+		tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
+		if (tmp > s->lzma2.compressed - s->temp.size)
+			tmp = s->lzma2.compressed - s->temp.size;
+		if (tmp > in_avail)
+			tmp = in_avail;
+
+		memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
+
+		if (s->temp.size + tmp == s->lzma2.compressed) {
+			memzero(s->temp.buf + s->temp.size + tmp,
+					sizeof(s->temp.buf)
+						- s->temp.size - tmp);
+			s->rc.in_limit = s->temp.size + tmp;
+		} else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
+			s->temp.size += tmp;
+			b->in_pos += tmp;
+			return true;
+		} else {
+			s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
+		}
+
+		s->rc.in = s->temp.buf;
+		s->rc.in_pos = 0;
+
+		if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
+			return false;
+
+		s->lzma2.compressed -= s->rc.in_pos;
+
+		if (s->rc.in_pos < s->temp.size) {
+			s->temp.size -= s->rc.in_pos;
+			memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
+					s->temp.size);
+			return true;
+		}
+
+		b->in_pos += s->rc.in_pos - s->temp.size;
+		s->temp.size = 0;
+	}
+
+	in_avail = b->in_size - b->in_pos;
+	if (in_avail >= LZMA_IN_REQUIRED) {
+		s->rc.in = b->in;
+		s->rc.in_pos = b->in_pos;
+
+		if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
+			s->rc.in_limit = b->in_pos + s->lzma2.compressed;
+		else
+			s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
+
+		if (!lzma_main(s))
+			return false;
+
+		in_avail = s->rc.in_pos - b->in_pos;
+		if (in_avail > s->lzma2.compressed)
+			return false;
+
+		s->lzma2.compressed -= in_avail;
+		b->in_pos = s->rc.in_pos;
+	}
+
+	in_avail = b->in_size - b->in_pos;
+	if (in_avail < LZMA_IN_REQUIRED) {
+		if (in_avail > s->lzma2.compressed)
+			in_avail = s->lzma2.compressed;
+
+		memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
+		s->temp.size = in_avail;
+		b->in_pos += in_avail;
+	}
+
+	return true;
+}
+
+/*
+ * Take care of the LZMA2 control layer, and forward the job of actual LZMA
+ * decoding or copying of uncompressed chunks to other functions.
+ */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
+				       struct xz_buf *b)
+{
+	uint32_t tmp;
+
+	while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
+		switch (s->lzma2.sequence) {
+		case SEQ_CONTROL:
+			/*
+			 * LZMA2 control byte
+			 *
+			 * Exact values:
+			 *   0x00   End marker
+			 *   0x01   Dictionary reset followed by
+			 *          an uncompressed chunk
+			 *   0x02   Uncompressed chunk (no dictionary reset)
+			 *
+			 * Highest three bits (s->control & 0xE0):
+			 *   0xE0   Dictionary reset, new properties and state
+			 *          reset, followed by LZMA compressed chunk
+			 *   0xC0   New properties and state reset, followed
+			 *          by LZMA compressed chunk (no dictionary
+			 *          reset)
+			 *   0xA0   State reset using old properties,
+			 *          followed by LZMA compressed chunk (no
+			 *          dictionary reset)
+			 *   0x80   LZMA chunk (no dictionary or state reset)
+			 *
+			 * For LZMA compressed chunks, the lowest five bits
+			 * (s->control & 1F) are the highest bits of the
+			 * uncompressed size (bits 16-20).
+			 *
+			 * A new LZMA2 stream must begin with a dictionary
+			 * reset. The first LZMA chunk must set new
+			 * properties and reset the LZMA state.
+			 *
+			 * Values that don't match anything described above
+			 * are invalid and we return XZ_DATA_ERROR.
+			 */
+			tmp = b->in[b->in_pos++];
+
+			if (tmp >= 0xE0 || tmp == 0x01) {
+				s->lzma2.need_props = true;
+				s->lzma2.need_dict_reset = false;
+				dict_reset(&s->dict, b);
+			} else if (s->lzma2.need_dict_reset) {
+				return XZ_DATA_ERROR;
+			}
+
+			if (tmp >= 0x80) {
+				s->lzma2.uncompressed = (tmp & 0x1F) << 16;
+				s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
+
+				if (tmp >= 0xC0) {
+					/*
+					 * When there are new properties,
+					 * state reset is done at
+					 * SEQ_PROPERTIES.
+					 */
+					s->lzma2.need_props = false;
+					s->lzma2.next_sequence
+							= SEQ_PROPERTIES;
+
+				} else if (s->lzma2.need_props) {
+					return XZ_DATA_ERROR;
+
+				} else {
+					s->lzma2.next_sequence
+							= SEQ_LZMA_PREPARE;
+					if (tmp >= 0xA0)
+						lzma_reset(s);
+				}
+			} else {
+				if (tmp == 0x00)
+					return XZ_STREAM_END;
+
+				if (tmp > 0x02)
+					return XZ_DATA_ERROR;
+
+				s->lzma2.sequence = SEQ_COMPRESSED_0;
+				s->lzma2.next_sequence = SEQ_COPY;
+			}
+
+			break;
+
+		case SEQ_UNCOMPRESSED_1:
+			s->lzma2.uncompressed
+					+= (uint32_t)b->in[b->in_pos++] << 8;
+			s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
+			break;
+
+		case SEQ_UNCOMPRESSED_2:
+			s->lzma2.uncompressed
+					+= (uint32_t)b->in[b->in_pos++] + 1;
+			s->lzma2.sequence = SEQ_COMPRESSED_0;
+			break;
+
+		case SEQ_COMPRESSED_0:
+			s->lzma2.compressed
+					= (uint32_t)b->in[b->in_pos++] << 8;
+			s->lzma2.sequence = SEQ_COMPRESSED_1;
+			break;
+
+		case SEQ_COMPRESSED_1:
+			s->lzma2.compressed
+					+= (uint32_t)b->in[b->in_pos++] + 1;
+			s->lzma2.sequence = s->lzma2.next_sequence;
+			break;
+
+		case SEQ_PROPERTIES:
+			if (!lzma_props(s, b->in[b->in_pos++]))
+				return XZ_DATA_ERROR;
+
+			s->lzma2.sequence = SEQ_LZMA_PREPARE;
+
+		case SEQ_LZMA_PREPARE:
+			if (s->lzma2.compressed < RC_INIT_BYTES)
+				return XZ_DATA_ERROR;
+
+			if (!rc_read_init(&s->rc, b))
+				return XZ_OK;
+
+			s->lzma2.compressed -= RC_INIT_BYTES;
+			s->lzma2.sequence = SEQ_LZMA_RUN;
+
+		case SEQ_LZMA_RUN:
+			/*
+			 * Set dictionary limit to indicate how much we want
+			 * to be encoded at maximum. Decode new data into the
+			 * dictionary. Flush the new data from dictionary to
+			 * b->out. Check if we finished decoding this chunk.
+			 * In case the dictionary got full but we didn't fill
+			 * the output buffer yet, we may run this loop
+			 * multiple times without changing s->lzma2.sequence.
+			 */
+			dict_limit(&s->dict, min_t(size_t,
+					b->out_size - b->out_pos,
+					s->lzma2.uncompressed));
+			if (!lzma2_lzma(s, b))
+				return XZ_DATA_ERROR;
+
+			s->lzma2.uncompressed -= dict_flush(&s->dict, b);
+
+			if (s->lzma2.uncompressed == 0) {
+				if (s->lzma2.compressed > 0 || s->lzma.len > 0
+						|| !rc_is_finished(&s->rc))
+					return XZ_DATA_ERROR;
+
+				rc_reset(&s->rc);
+				s->lzma2.sequence = SEQ_CONTROL;
+
+			} else if (b->out_pos == b->out_size
+					|| (b->in_pos == b->in_size
+						&& s->temp.size
+						< s->lzma2.compressed)) {
+				return XZ_OK;
+			}
+
+			break;
+
+		case SEQ_COPY:
+			dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
+			if (s->lzma2.compressed > 0)
+				return XZ_OK;
+
+			s->lzma2.sequence = SEQ_CONTROL;
+			break;
+		}
+	}
+
+	return XZ_OK;
+}
+
+XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
+						   uint32_t dict_max)
+{
+	struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s == NULL)
+		return NULL;
+
+	s->dict.mode = mode;
+	s->dict.size_max = dict_max;
+
+	if (DEC_IS_PREALLOC(mode)) {
+		s->dict.buf = vmalloc(dict_max);
+		if (s->dict.buf == NULL) {
+			kfree(s);
+			return NULL;
+		}
+	} else if (DEC_IS_DYNALLOC(mode)) {
+		s->dict.buf = NULL;
+		s->dict.allocated = 0;
+	}
+
+	return s;
+}
+
+XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
+{
+	/* This limits dictionary size to 3 GiB to keep parsing simpler. */
+	if (props > 39)
+		return XZ_OPTIONS_ERROR;
+
+	s->dict.size = 2 + (props & 1);
+	s->dict.size <<= (props >> 1) + 11;
+
+	if (DEC_IS_MULTI(s->dict.mode)) {
+		if (s->dict.size > s->dict.size_max)
+			return XZ_MEMLIMIT_ERROR;
+
+		s->dict.end = s->dict.size;
+
+		if (DEC_IS_DYNALLOC(s->dict.mode)) {
+			if (s->dict.allocated < s->dict.size) {
+				vfree(s->dict.buf);
+				s->dict.buf = vmalloc(s->dict.size);
+				if (s->dict.buf == NULL) {
+					s->dict.allocated = 0;
+					return XZ_MEM_ERROR;
+				}
+			}
+		}
+	}
+
+	s->lzma.len = 0;
+
+	s->lzma2.sequence = SEQ_CONTROL;
+	s->lzma2.need_dict_reset = true;
+
+	s->temp.size = 0;
+
+	return XZ_OK;
+}
+
+XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
+{
+	if (DEC_IS_MULTI(s->dict.mode))
+		vfree(s->dict.buf);
+
+	kfree(s);
+}
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
new file mode 100644
index 0000000..ac809b1
--- /dev/null
+++ b/lib/xz/xz_dec_stream.c
@@ -0,0 +1,821 @@
+/*
+ * .xz Stream decoder
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+#include "xz_stream.h"
+
+/* Hash used to validate the Index field */
+struct xz_dec_hash {
+	vli_type unpadded;
+	vli_type uncompressed;
+	uint32_t crc32;
+};
+
+struct xz_dec {
+	/* Position in dec_main() */
+	enum {
+		SEQ_STREAM_HEADER,
+		SEQ_BLOCK_START,
+		SEQ_BLOCK_HEADER,
+		SEQ_BLOCK_UNCOMPRESS,
+		SEQ_BLOCK_PADDING,
+		SEQ_BLOCK_CHECK,
+		SEQ_INDEX,
+		SEQ_INDEX_PADDING,
+		SEQ_INDEX_CRC32,
+		SEQ_STREAM_FOOTER
+	} sequence;
+
+	/* Position in variable-length integers and Check fields */
+	uint32_t pos;
+
+	/* Variable-length integer decoded by dec_vli() */
+	vli_type vli;
+
+	/* Saved in_pos and out_pos */
+	size_t in_start;
+	size_t out_start;
+
+	/* CRC32 value in Block or Index */
+	uint32_t crc32;
+
+	/* Type of the integrity check calculated from uncompressed data */
+	enum xz_check check_type;
+
+	/* Operation mode */
+	enum xz_mode mode;
+
+	/*
+	 * True if the next call to xz_dec_run() is allowed to return
+	 * XZ_BUF_ERROR.
+	 */
+	bool allow_buf_error;
+
+	/* Information stored in Block Header */
+	struct {
+		/*
+		 * Value stored in the Compressed Size field, or
+		 * VLI_UNKNOWN if Compressed Size is not present.
+		 */
+		vli_type compressed;
+
+		/*
+		 * Value stored in the Uncompressed Size field, or
+		 * VLI_UNKNOWN if Uncompressed Size is not present.
+		 */
+		vli_type uncompressed;
+
+		/* Size of the Block Header field */
+		uint32_t size;
+	} block_header;
+
+	/* Information collected when decoding Blocks */
+	struct {
+		/* Observed compressed size of the current Block */
+		vli_type compressed;
+
+		/* Observed uncompressed size of the current Block */
+		vli_type uncompressed;
+
+		/* Number of Blocks decoded so far */
+		vli_type count;
+
+		/*
+		 * Hash calculated from the Block sizes. This is used to
+		 * validate the Index field.
+		 */
+		struct xz_dec_hash hash;
+	} block;
+
+	/* Variables needed when verifying the Index field */
+	struct {
+		/* Position in dec_index() */
+		enum {
+			SEQ_INDEX_COUNT,
+			SEQ_INDEX_UNPADDED,
+			SEQ_INDEX_UNCOMPRESSED
+		} sequence;
+
+		/* Size of the Index in bytes */
+		vli_type size;
+
+		/* Number of Records (matches block.count in valid files) */
+		vli_type count;
+
+		/*
+		 * Hash calculated from the Records (matches block.hash in
+		 * valid files).
+		 */
+		struct xz_dec_hash hash;
+	} index;
+
+	/*
+	 * Temporary buffer needed to hold Stream Header, Block Header,
+	 * and Stream Footer. The Block Header is the biggest (1 KiB)
+	 * so we reserve space according to that. buf[] has to be aligned
+	 * to a multiple of four bytes; the size_t variables before it
+	 * should guarantee this.
+	 */
+	struct {
+		size_t pos;
+		size_t size;
+		uint8_t buf[1024];
+	} temp;
+
+	struct xz_dec_lzma2 *lzma2;
+
+#ifdef XZ_DEC_BCJ
+	struct xz_dec_bcj *bcj;
+	bool bcj_active;
+#endif
+};
+
+#ifdef XZ_DEC_ANY_CHECK
+/* Sizes of the Check field with different Check IDs */
+static const uint8_t check_sizes[16] = {
+	0,
+	4, 4, 4,
+	8, 8, 8,
+	16, 16, 16,
+	32, 32, 32,
+	64, 64, 64
+};
+#endif
+
+/*
+ * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller
+ * must have set s->temp.pos to indicate how much data we are supposed
+ * to copy into s->temp.buf. Return true once s->temp.pos has reached
+ * s->temp.size.
+ */
+static bool fill_temp(struct xz_dec *s, struct xz_buf *b)
+{
+	size_t copy_size = min_t(size_t,
+			b->in_size - b->in_pos, s->temp.size - s->temp.pos);
+
+	memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size);
+	b->in_pos += copy_size;
+	s->temp.pos += copy_size;
+
+	if (s->temp.pos == s->temp.size) {
+		s->temp.pos = 0;
+		return true;
+	}
+
+	return false;
+}
+
+/* Decode a variable-length integer (little-endian base-128 encoding) */
+static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in,
+			   size_t *in_pos, size_t in_size)
+{
+	uint8_t byte;
+
+	if (s->pos == 0)
+		s->vli = 0;
+
+	while (*in_pos < in_size) {
+		byte = in[*in_pos];
+		++*in_pos;
+
+		s->vli |= (vli_type)(byte & 0x7F) << s->pos;
+
+		if ((byte & 0x80) == 0) {
+			/* Don't allow non-minimal encodings. */
+			if (byte == 0 && s->pos != 0)
+				return XZ_DATA_ERROR;
+
+			s->pos = 0;
+			return XZ_STREAM_END;
+		}
+
+		s->pos += 7;
+		if (s->pos == 7 * VLI_BYTES_MAX)
+			return XZ_DATA_ERROR;
+	}
+
+	return XZ_OK;
+}
+
+/*
+ * Decode the Compressed Data field from a Block. Update and validate
+ * the observed compressed and uncompressed sizes of the Block so that
+ * they don't exceed the values possibly stored in the Block Header
+ * (validation assumes that no integer overflow occurs, since vli_type
+ * is normally uint64_t). Update the CRC32 if presence of the CRC32
+ * field was indicated in Stream Header.
+ *
+ * Once the decoding is finished, validate that the observed sizes match
+ * the sizes possibly stored in the Block Header. Update the hash and
+ * Block count, which are later used to validate the Index field.
+ */
+static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b)
+{
+	enum xz_ret ret;
+
+	s->in_start = b->in_pos;
+	s->out_start = b->out_pos;
+
+#ifdef XZ_DEC_BCJ
+	if (s->bcj_active)
+		ret = xz_dec_bcj_run(s->bcj, s->lzma2, b);
+	else
+#endif
+		ret = xz_dec_lzma2_run(s->lzma2, b);
+
+	s->block.compressed += b->in_pos - s->in_start;
+	s->block.uncompressed += b->out_pos - s->out_start;
+
+	/*
+	 * There is no need to separately check for VLI_UNKNOWN, since
+	 * the observed sizes are always smaller than VLI_UNKNOWN.
+	 */
+	if (s->block.compressed > s->block_header.compressed
+			|| s->block.uncompressed
+				> s->block_header.uncompressed)
+		return XZ_DATA_ERROR;
+
+	if (s->check_type == XZ_CHECK_CRC32)
+		s->crc32 = xz_crc32(b->out + s->out_start,
+				b->out_pos - s->out_start, s->crc32);
+
+	if (ret == XZ_STREAM_END) {
+		if (s->block_header.compressed != VLI_UNKNOWN
+				&& s->block_header.compressed
+					!= s->block.compressed)
+			return XZ_DATA_ERROR;
+
+		if (s->block_header.uncompressed != VLI_UNKNOWN
+				&& s->block_header.uncompressed
+					!= s->block.uncompressed)
+			return XZ_DATA_ERROR;
+
+		s->block.hash.unpadded += s->block_header.size
+				+ s->block.compressed;
+
+#ifdef XZ_DEC_ANY_CHECK
+		s->block.hash.unpadded += check_sizes[s->check_type];
+#else
+		if (s->check_type == XZ_CHECK_CRC32)
+			s->block.hash.unpadded += 4;
+#endif
+
+		s->block.hash.uncompressed += s->block.uncompressed;
+		s->block.hash.crc32 = xz_crc32(
+				(const uint8_t *)&s->block.hash,
+				sizeof(s->block.hash), s->block.hash.crc32);
+
+		++s->block.count;
+	}
+
+	return ret;
+}
+
+/* Update the Index size and the CRC32 value. */
+static void index_update(struct xz_dec *s, const struct xz_buf *b)
+{
+	size_t in_used = b->in_pos - s->in_start;
+	s->index.size += in_used;
+	s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32);
+}
+
+/*
+ * Decode the Number of Records, Unpadded Size, and Uncompressed Size
+ * fields from the Index field. That is, Index Padding and CRC32 are not
+ * decoded by this function.
+ *
+ * This can return XZ_OK (more input needed), XZ_STREAM_END (everything
+ * successfully decoded), or XZ_DATA_ERROR (input is corrupt).
+ */
+static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b)
+{
+	enum xz_ret ret;
+
+	do {
+		ret = dec_vli(s, b->in, &b->in_pos, b->in_size);
+		if (ret != XZ_STREAM_END) {
+			index_update(s, b);
+			return ret;
+		}
+
+		switch (s->index.sequence) {
+		case SEQ_INDEX_COUNT:
+			s->index.count = s->vli;
+
+			/*
+			 * Validate that the Number of Records field
+			 * indicates the same number of Records as
+			 * there were Blocks in the Stream.
+			 */
+			if (s->index.count != s->block.count)
+				return XZ_DATA_ERROR;
+
+			s->index.sequence = SEQ_INDEX_UNPADDED;
+			break;
+
+		case SEQ_INDEX_UNPADDED:
+			s->index.hash.unpadded += s->vli;
+			s->index.sequence = SEQ_INDEX_UNCOMPRESSED;
+			break;
+
+		case SEQ_INDEX_UNCOMPRESSED:
+			s->index.hash.uncompressed += s->vli;
+			s->index.hash.crc32 = xz_crc32(
+					(const uint8_t *)&s->index.hash,
+					sizeof(s->index.hash),
+					s->index.hash.crc32);
+			--s->index.count;
+			s->index.sequence = SEQ_INDEX_UNPADDED;
+			break;
+		}
+	} while (s->index.count > 0);
+
+	return XZ_STREAM_END;
+}
+
+/*
+ * Validate that the next four input bytes match the value of s->crc32.
+ * s->pos must be zero when starting to validate the first byte.
+ */
+static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b)
+{
+	do {
+		if (b->in_pos == b->in_size)
+			return XZ_OK;
+
+		if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++])
+			return XZ_DATA_ERROR;
+
+		s->pos += 8;
+
+	} while (s->pos < 32);
+
+	s->crc32 = 0;
+	s->pos = 0;
+
+	return XZ_STREAM_END;
+}
+
+#ifdef XZ_DEC_ANY_CHECK
+/*
+ * Skip over the Check field when the Check ID is not supported.
+ * Returns true once the whole Check field has been skipped over.
+ */
+static bool check_skip(struct xz_dec *s, struct xz_buf *b)
+{
+	while (s->pos < check_sizes[s->check_type]) {
+		if (b->in_pos == b->in_size)
+			return false;
+
+		++b->in_pos;
+		++s->pos;
+	}
+
+	s->pos = 0;
+
+	return true;
+}
+#endif
+
+/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */
+static enum xz_ret dec_stream_header(struct xz_dec *s)
+{
+	if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE))
+		return XZ_FORMAT_ERROR;
+
+	if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0)
+			!= get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2))
+		return XZ_DATA_ERROR;
+
+	if (s->temp.buf[HEADER_MAGIC_SIZE] != 0)
+		return XZ_OPTIONS_ERROR;
+
+	/*
+	 * Of integrity checks, we support only none (Check ID = 0) and
+	 * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined,
+	 * we will accept other check types too, but then the check won't
+	 * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given.
+	 */
+	s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1];
+
+#ifdef XZ_DEC_ANY_CHECK
+	if (s->check_type > XZ_CHECK_MAX)
+		return XZ_OPTIONS_ERROR;
+
+	if (s->check_type > XZ_CHECK_CRC32)
+		return XZ_UNSUPPORTED_CHECK;
+#else
+	if (s->check_type > XZ_CHECK_CRC32)
+		return XZ_OPTIONS_ERROR;
+#endif
+
+	return XZ_OK;
+}
+
+/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */
+static enum xz_ret dec_stream_footer(struct xz_dec *s)
+{
+	if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE))
+		return XZ_DATA_ERROR;
+
+	if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf))
+		return XZ_DATA_ERROR;
+
+	/*
+	 * Validate Backward Size. Note that we never added the size of the
+	 * Index CRC32 field to s->index.size, thus we use s->index.size / 4
+	 * instead of s->index.size / 4 - 1.
+	 */
+	if ((s->index.size >> 2) != get_le32(s->temp.buf + 4))
+		return XZ_DATA_ERROR;
+
+	if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type)
+		return XZ_DATA_ERROR;
+
+	/*
+	 * Use XZ_STREAM_END instead of XZ_OK to be more convenient
+	 * for the caller.
+	 */
+	return XZ_STREAM_END;
+}
+
+/* Decode the Block Header and initialize the filter chain. */
+static enum xz_ret dec_block_header(struct xz_dec *s)
+{
+	enum xz_ret ret;
+
+	/*
+	 * Validate the CRC32. We know that the temp buffer is at least
+	 * eight bytes so this is safe.
+	 */
+	s->temp.size -= 4;
+	if (xz_crc32(s->temp.buf, s->temp.size, 0)
+			!= get_le32(s->temp.buf + s->temp.size))
+		return XZ_DATA_ERROR;
+
+	s->temp.pos = 2;
+
+	/*
+	 * Catch unsupported Block Flags. We support only one or two filters
+	 * in the chain, so we catch that with the same test.
+	 */
+#ifdef XZ_DEC_BCJ
+	if (s->temp.buf[1] & 0x3E)
+#else
+	if (s->temp.buf[1] & 0x3F)
+#endif
+		return XZ_OPTIONS_ERROR;
+
+	/* Compressed Size */
+	if (s->temp.buf[1] & 0x40) {
+		if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
+					!= XZ_STREAM_END)
+			return XZ_DATA_ERROR;
+
+		s->block_header.compressed = s->vli;
+	} else {
+		s->block_header.compressed = VLI_UNKNOWN;
+	}
+
+	/* Uncompressed Size */
+	if (s->temp.buf[1] & 0x80) {
+		if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
+				!= XZ_STREAM_END)
+			return XZ_DATA_ERROR;
+
+		s->block_header.uncompressed = s->vli;
+	} else {
+		s->block_header.uncompressed = VLI_UNKNOWN;
+	}
+
+#ifdef XZ_DEC_BCJ
+	/* If there are two filters, the first one must be a BCJ filter. */
+	s->bcj_active = s->temp.buf[1] & 0x01;
+	if (s->bcj_active) {
+		if (s->temp.size - s->temp.pos < 2)
+			return XZ_OPTIONS_ERROR;
+
+		ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]);
+		if (ret != XZ_OK)
+			return ret;
+
+		/*
+		 * We don't support custom start offset,
+		 * so Size of Properties must be zero.
+		 */
+		if (s->temp.buf[s->temp.pos++] != 0x00)
+			return XZ_OPTIONS_ERROR;
+	}
+#endif
+
+	/* Valid Filter Flags always take at least two bytes. */
+	if (s->temp.size - s->temp.pos < 2)
+		return XZ_DATA_ERROR;
+
+	/* Filter ID = LZMA2 */
+	if (s->temp.buf[s->temp.pos++] != 0x21)
+		return XZ_OPTIONS_ERROR;
+
+	/* Size of Properties = 1-byte Filter Properties */
+	if (s->temp.buf[s->temp.pos++] != 0x01)
+		return XZ_OPTIONS_ERROR;
+
+	/* Filter Properties contains LZMA2 dictionary size. */
+	if (s->temp.size - s->temp.pos < 1)
+		return XZ_DATA_ERROR;
+
+	ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]);
+	if (ret != XZ_OK)
+		return ret;
+
+	/* The rest must be Header Padding. */
+	while (s->temp.pos < s->temp.size)
+		if (s->temp.buf[s->temp.pos++] != 0x00)
+			return XZ_OPTIONS_ERROR;
+
+	s->temp.pos = 0;
+	s->block.compressed = 0;
+	s->block.uncompressed = 0;
+
+	return XZ_OK;
+}
+
+static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
+{
+	enum xz_ret ret;
+
+	/*
+	 * Store the start position for the case when we are in the middle
+	 * of the Index field.
+	 */
+	s->in_start = b->in_pos;
+
+	while (true) {
+		switch (s->sequence) {
+		case SEQ_STREAM_HEADER:
+			/*
+			 * Stream Header is copied to s->temp, and then
+			 * decoded from there. This way if the caller
+			 * gives us only little input at a time, we can
+			 * still keep the Stream Header decoding code
+			 * simple. Similar approach is used in many places
+			 * in this file.
+			 */
+			if (!fill_temp(s, b))
+				return XZ_OK;
+
+			/*
+			 * If dec_stream_header() returns
+			 * XZ_UNSUPPORTED_CHECK, it is still possible
+			 * to continue decoding if working in multi-call
+			 * mode. Thus, update s->sequence before calling
+			 * dec_stream_header().
+			 */
+			s->sequence = SEQ_BLOCK_START;
+
+			ret = dec_stream_header(s);
+			if (ret != XZ_OK)
+				return ret;
+
+		case SEQ_BLOCK_START:
+			/* We need one byte of input to continue. */
+			if (b->in_pos == b->in_size)
+				return XZ_OK;
+
+			/* See if this is the beginning of the Index field. */
+			if (b->in[b->in_pos] == 0) {
+				s->in_start = b->in_pos++;
+				s->sequence = SEQ_INDEX;
+				break;
+			}
+
+			/*
+			 * Calculate the size of the Block Header and
+			 * prepare to decode it.
+			 */
+			s->block_header.size
+				= ((uint32_t)b->in[b->in_pos] + 1) * 4;
+
+			s->temp.size = s->block_header.size;
+			s->temp.pos = 0;
+			s->sequence = SEQ_BLOCK_HEADER;
+
+		case SEQ_BLOCK_HEADER:
+			if (!fill_temp(s, b))
+				return XZ_OK;
+
+			ret = dec_block_header(s);
+			if (ret != XZ_OK)
+				return ret;
+
+			s->sequence = SEQ_BLOCK_UNCOMPRESS;
+
+		case SEQ_BLOCK_UNCOMPRESS:
+			ret = dec_block(s, b);
+			if (ret != XZ_STREAM_END)
+				return ret;
+
+			s->sequence = SEQ_BLOCK_PADDING;
+
+		case SEQ_BLOCK_PADDING:
+			/*
+			 * Size of Compressed Data + Block Padding
+			 * must be a multiple of four. We don't need
+			 * s->block.compressed for anything else
+			 * anymore, so we use it here to test the size
+			 * of the Block Padding field.
+			 */
+			while (s->block.compressed & 3) {
+				if (b->in_pos == b->in_size)
+					return XZ_OK;
+
+				if (b->in[b->in_pos++] != 0)
+					return XZ_DATA_ERROR;
+
+				++s->block.compressed;
+			}
+
+			s->sequence = SEQ_BLOCK_CHECK;
+
+		case SEQ_BLOCK_CHECK:
+			if (s->check_type == XZ_CHECK_CRC32) {
+				ret = crc32_validate(s, b);
+				if (ret != XZ_STREAM_END)
+					return ret;
+			}
+#ifdef XZ_DEC_ANY_CHECK
+			else if (!check_skip(s, b)) {
+				return XZ_OK;
+			}
+#endif
+
+			s->sequence = SEQ_BLOCK_START;
+			break;
+
+		case SEQ_INDEX:
+			ret = dec_index(s, b);
+			if (ret != XZ_STREAM_END)
+				return ret;
+
+			s->sequence = SEQ_INDEX_PADDING;
+
+		case SEQ_INDEX_PADDING:
+			while ((s->index.size + (b->in_pos - s->in_start))
+					& 3) {
+				if (b->in_pos == b->in_size) {
+					index_update(s, b);
+					return XZ_OK;
+				}
+
+				if (b->in[b->in_pos++] != 0)
+					return XZ_DATA_ERROR;
+			}
+
+			/* Finish the CRC32 value and Index size. */
+			index_update(s, b);
+
+			/* Compare the hashes to validate the Index field. */
+			if (!memeq(&s->block.hash, &s->index.hash,
+					sizeof(s->block.hash)))
+				return XZ_DATA_ERROR;
+
+			s->sequence = SEQ_INDEX_CRC32;
+
+		case SEQ_INDEX_CRC32:
+			ret = crc32_validate(s, b);
+			if (ret != XZ_STREAM_END)
+				return ret;
+
+			s->temp.size = STREAM_HEADER_SIZE;
+			s->sequence = SEQ_STREAM_FOOTER;
+
+		case SEQ_STREAM_FOOTER:
+			if (!fill_temp(s, b))
+				return XZ_OK;
+
+			return dec_stream_footer(s);
+		}
+	}
+
+	/* Never reached */
+}
+
+/*
+ * xz_dec_run() is a wrapper for dec_main() to handle some special cases in
+ * multi-call and single-call decoding.
+ *
+ * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we
+ * are not going to make any progress anymore. This is to prevent the caller
+ * from calling us infinitely when the input file is truncated or otherwise
+ * corrupt. Since zlib-style API allows that the caller fills the input buffer
+ * only when the decoder doesn't produce any new output, we have to be careful
+ * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only
+ * after the second consecutive call to xz_dec_run() that makes no progress.
+ *
+ * In single-call mode, if we couldn't decode everything and no error
+ * occurred, either the input is truncated or the output buffer is too small.
+ * Since we know that the last input byte never produces any output, we know
+ * that if all the input was consumed and decoding wasn't finished, the file
+ * must be corrupt. Otherwise the output buffer has to be too small or the
+ * file is corrupt in a way that decoding it produces too big output.
+ *
+ * If single-call decoding fails, we reset b->in_pos and b->out_pos back to
+ * their original values. This is because with some filter chains there won't
+ * be any valid uncompressed data in the output buffer unless the decoding
+ * actually succeeds (that's the price to pay of using the output buffer as
+ * the workspace).
+ */
+XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
+{
+	size_t in_start;
+	size_t out_start;
+	enum xz_ret ret;
+
+	if (DEC_IS_SINGLE(s->mode))
+		xz_dec_reset(s);
+
+	in_start = b->in_pos;
+	out_start = b->out_pos;
+	ret = dec_main(s, b);
+
+	if (DEC_IS_SINGLE(s->mode)) {
+		if (ret == XZ_OK)
+			ret = b->in_pos == b->in_size
+					? XZ_DATA_ERROR : XZ_BUF_ERROR;
+
+		if (ret != XZ_STREAM_END) {
+			b->in_pos = in_start;
+			b->out_pos = out_start;
+		}
+
+	} else if (ret == XZ_OK && in_start == b->in_pos
+			&& out_start == b->out_pos) {
+		if (s->allow_buf_error)
+			ret = XZ_BUF_ERROR;
+
+		s->allow_buf_error = true;
+	} else {
+		s->allow_buf_error = false;
+	}
+
+	return ret;
+}
+
+XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
+{
+	struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s == NULL)
+		return NULL;
+
+	s->mode = mode;
+
+#ifdef XZ_DEC_BCJ
+	s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode));
+	if (s->bcj == NULL)
+		goto error_bcj;
+#endif
+
+	s->lzma2 = xz_dec_lzma2_create(mode, dict_max);
+	if (s->lzma2 == NULL)
+		goto error_lzma2;
+
+	xz_dec_reset(s);
+	return s;
+
+error_lzma2:
+#ifdef XZ_DEC_BCJ
+	xz_dec_bcj_end(s->bcj);
+error_bcj:
+#endif
+	kfree(s);
+	return NULL;
+}
+
+XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
+{
+	s->sequence = SEQ_STREAM_HEADER;
+	s->allow_buf_error = false;
+	s->pos = 0;
+	s->crc32 = 0;
+	memzero(&s->block, sizeof(s->block));
+	memzero(&s->index, sizeof(s->index));
+	s->temp.pos = 0;
+	s->temp.size = STREAM_HEADER_SIZE;
+}
+
+XZ_EXTERN void xz_dec_end(struct xz_dec *s)
+{
+	if (s != NULL) {
+		xz_dec_lzma2_end(s->lzma2);
+#ifdef XZ_DEC_BCJ
+		xz_dec_bcj_end(s->bcj);
+#endif
+		kfree(s);
+	}
+}
diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c
new file mode 100644
index 0000000..32eb3c0
--- /dev/null
+++ b/lib/xz/xz_dec_syms.c
@@ -0,0 +1,26 @@
+/*
+ * XZ decoder module information
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include <linux/module.h>
+#include <linux/xz.h>
+
+EXPORT_SYMBOL(xz_dec_init);
+EXPORT_SYMBOL(xz_dec_reset);
+EXPORT_SYMBOL(xz_dec_run);
+EXPORT_SYMBOL(xz_dec_end);
+
+MODULE_DESCRIPTION("XZ decompressor");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
+
+/*
+ * This code is in the public domain, but in Linux it's simplest to just
+ * say it's GPL and consider the authors as the copyright holders.
+ */
+MODULE_LICENSE("GPL");
diff --git a/lib/xz/xz_dec_test.c b/lib/xz/xz_dec_test.c
new file mode 100644
index 0000000..da28a19
--- /dev/null
+++ b/lib/xz/xz_dec_test.c
@@ -0,0 +1,220 @@
+/*
+ * XZ decoder tester
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/crc32.h>
+#include <linux/xz.h>
+
+/* Maximum supported dictionary size */
+#define DICT_MAX (1 << 20)
+
+/* Device name to pass to register_chrdev(). */
+#define DEVICE_NAME "xz_dec_test"
+
+/* Dynamically allocated device major number */
+static int device_major;
+
+/*
+ * We reuse the same decoder state, and thus can decode only one
+ * file at a time.
+ */
+static bool device_is_open;
+
+/* XZ decoder state */
+static struct xz_dec *state;
+
+/*
+ * Return value of xz_dec_run(). We need to avoid calling xz_dec_run() after
+ * it has returned XZ_STREAM_END, so we make this static.
+ */
+static enum xz_ret ret;
+
+/*
+ * Input and output buffers. The input buffer is used as a temporary safe
+ * place for the data coming from the userspace.
+ */
+static uint8_t buffer_in[1024];
+static uint8_t buffer_out[1024];
+
+/*
+ * Structure to pass the input and output buffers to the XZ decoder.
+ * A few of the fields are never modified so we initialize them here.
+ */
+static struct xz_buf buffers = {
+	.in = buffer_in,
+	.out = buffer_out,
+	.out_size = sizeof(buffer_out)
+};
+
+/*
+ * CRC32 of uncompressed data. This is used to give the user a simple way
+ * to check that the decoder produces correct output.
+ */
+static uint32_t crc;
+
+static int xz_dec_test_open(struct inode *i, struct file *f)
+{
+	if (device_is_open)
+		return -EBUSY;
+
+	device_is_open = true;
+
+	xz_dec_reset(state);
+	ret = XZ_OK;
+	crc = 0xFFFFFFFF;
+
+	buffers.in_pos = 0;
+	buffers.in_size = 0;
+	buffers.out_pos = 0;
+
+	printk(KERN_INFO DEVICE_NAME ": opened\n");
+	return 0;
+}
+
+static int xz_dec_test_release(struct inode *i, struct file *f)
+{
+	device_is_open = false;
+
+	if (ret == XZ_OK)
+		printk(KERN_INFO DEVICE_NAME ": input was truncated\n");
+
+	printk(KERN_INFO DEVICE_NAME ": closed\n");
+	return 0;
+}
+
+/*
+ * Decode the data given to us from the userspace. CRC32 of the uncompressed
+ * data is calculated and is printed at the end of successful decoding. The
+ * uncompressed data isn't stored anywhere for further use.
+ *
+ * The .xz file must have exactly one Stream and no Stream Padding. The data
+ * after the first Stream is considered to be garbage.
+ */
+static ssize_t xz_dec_test_write(struct file *file, const char __user *buf,
+				 size_t size, loff_t *pos)
+{
+	size_t remaining;
+
+	if (ret != XZ_OK) {
+		if (size > 0)
+			printk(KERN_INFO DEVICE_NAME ": %zu bytes of "
+					"garbage at the end of the file\n",
+					size);
+
+		return -ENOSPC;
+	}
+
+	printk(KERN_INFO DEVICE_NAME ": decoding %zu bytes of input\n",
+			size);
+
+	remaining = size;
+	while ((remaining > 0 || buffers.out_pos == buffers.out_size)
+			&& ret == XZ_OK) {
+		if (buffers.in_pos == buffers.in_size) {
+			buffers.in_pos = 0;
+			buffers.in_size = min(remaining, sizeof(buffer_in));
+			if (copy_from_user(buffer_in, buf, buffers.in_size))
+				return -EFAULT;
+
+			buf += buffers.in_size;
+			remaining -= buffers.in_size;
+		}
+
+		buffers.out_pos = 0;
+		ret = xz_dec_run(state, &buffers);
+		crc = crc32(crc, buffer_out, buffers.out_pos);
+	}
+
+	switch (ret) {
+	case XZ_OK:
+		printk(KERN_INFO DEVICE_NAME ": XZ_OK\n");
+		return size;
+
+	case XZ_STREAM_END:
+		printk(KERN_INFO DEVICE_NAME ": XZ_STREAM_END, "
+				"CRC32 = 0x%08X\n", ~crc);
+		return size - remaining - (buffers.in_size - buffers.in_pos);
+
+	case XZ_MEMLIMIT_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_MEMLIMIT_ERROR\n");
+		break;
+
+	case XZ_FORMAT_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_FORMAT_ERROR\n");
+		break;
+
+	case XZ_OPTIONS_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_OPTIONS_ERROR\n");
+		break;
+
+	case XZ_DATA_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_DATA_ERROR\n");
+		break;
+
+	case XZ_BUF_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_BUF_ERROR\n");
+		break;
+
+	default:
+		printk(KERN_INFO DEVICE_NAME ": Bug detected!\n");
+		break;
+	}
+
+	return -EIO;
+}
+
+/* Allocate the XZ decoder state and register the character device. */
+static int __init xz_dec_test_init(void)
+{
+	static const struct file_operations fileops = {
+		.owner = THIS_MODULE,
+		.open = &xz_dec_test_open,
+		.release = &xz_dec_test_release,
+		.write = &xz_dec_test_write
+	};
+
+	state = xz_dec_init(XZ_PREALLOC, DICT_MAX);
+	if (state == NULL)
+		return -ENOMEM;
+
+	device_major = register_chrdev(0, DEVICE_NAME, &fileops);
+	if (device_major < 0) {
+		xz_dec_end(state);
+		return device_major;
+	}
+
+	printk(KERN_INFO DEVICE_NAME ": module loaded\n");
+	printk(KERN_INFO DEVICE_NAME ": Create a device node with "
+			"'mknod " DEVICE_NAME " c %d 0' and write .xz files "
+			"to it.\n", device_major);
+	return 0;
+}
+
+static void __exit xz_dec_test_exit(void)
+{
+	unregister_chrdev(device_major, DEVICE_NAME);
+	xz_dec_end(state);
+	printk(KERN_INFO DEVICE_NAME ": module unloaded\n");
+}
+
+module_init(xz_dec_test_init);
+module_exit(xz_dec_test_exit);
+
+MODULE_DESCRIPTION("XZ decompressor tester");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org>");
+
+/*
+ * This code is in the public domain, but in Linux it's simplest to just
+ * say it's GPL and consider the authors as the copyright holders.
+ */
+MODULE_LICENSE("GPL");
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
new file mode 100644
index 0000000..071d67b
--- /dev/null
+++ b/lib/xz/xz_lzma2.h
@@ -0,0 +1,204 @@
+/*
+ * LZMA2 definitions
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_LZMA2_H
+#define XZ_LZMA2_H
+
+/* Range coder constants */
+#define RC_SHIFT_BITS 8
+#define RC_TOP_BITS 24
+#define RC_TOP_VALUE (1 << RC_TOP_BITS)
+#define RC_BIT_MODEL_TOTAL_BITS 11
+#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS)
+#define RC_MOVE_BITS 5
+
+/*
+ * Maximum number of position states. A position state is the lowest pb
+ * number of bits of the current uncompressed offset. In some places there
+ * are different sets of probabilities for different position states.
+ */
+#define POS_STATES_MAX (1 << 4)
+
+/*
+ * This enum is used to track which LZMA symbols have occurred most recently
+ * and in which order. This information is used to predict the next symbol.
+ *
+ * Symbols:
+ *  - Literal: One 8-bit byte
+ *  - Match: Repeat a chunk of data at some distance
+ *  - Long repeat: Multi-byte match at a recently seen distance
+ *  - Short repeat: One-byte repeat at a recently seen distance
+ *
+ * The symbol names are in from STATE_oldest_older_previous. REP means
+ * either short or long repeated match, and NONLIT means any non-literal.
+ */
+enum lzma_state {
+	STATE_LIT_LIT,
+	STATE_MATCH_LIT_LIT,
+	STATE_REP_LIT_LIT,
+	STATE_SHORTREP_LIT_LIT,
+	STATE_MATCH_LIT,
+	STATE_REP_LIT,
+	STATE_SHORTREP_LIT,
+	STATE_LIT_MATCH,
+	STATE_LIT_LONGREP,
+	STATE_LIT_SHORTREP,
+	STATE_NONLIT_MATCH,
+	STATE_NONLIT_REP
+};
+
+/* Total number of states */
+#define STATES 12
+
+/* The lowest 7 states indicate that the previous state was a literal. */
+#define LIT_STATES 7
+
+/* Indicate that the latest symbol was a literal. */
+static inline void lzma_state_literal(enum lzma_state *state)
+{
+	if (*state <= STATE_SHORTREP_LIT_LIT)
+		*state = STATE_LIT_LIT;
+	else if (*state <= STATE_LIT_SHORTREP)
+		*state -= 3;
+	else
+		*state -= 6;
+}
+
+/* Indicate that the latest symbol was a match. */
+static inline void lzma_state_match(enum lzma_state *state)
+{
+	*state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH;
+}
+
+/* Indicate that the latest state was a long repeated match. */
+static inline void lzma_state_long_rep(enum lzma_state *state)
+{
+	*state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP;
+}
+
+/* Indicate that the latest symbol was a short match. */
+static inline void lzma_state_short_rep(enum lzma_state *state)
+{
+	*state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP;
+}
+
+/* Test if the previous symbol was a literal. */
+static inline bool lzma_state_is_literal(enum lzma_state state)
+{
+	return state < LIT_STATES;
+}
+
+/* Each literal coder is divided in three sections:
+ *   - 0x001-0x0FF: Without match byte
+ *   - 0x101-0x1FF: With match byte; match bit is 0
+ *   - 0x201-0x2FF: With match byte; match bit is 1
+ *
+ * Match byte is used when the previous LZMA symbol was something else than
+ * a literal (that is, it was some kind of match).
+ */
+#define LITERAL_CODER_SIZE 0x300
+
+/* Maximum number of literal coders */
+#define LITERAL_CODERS_MAX (1 << 4)
+
+/* Minimum length of a match is two bytes. */
+#define MATCH_LEN_MIN 2
+
+/* Match length is encoded with 4, 5, or 10 bits.
+ *
+ * Length   Bits
+ *  2-9      4 = Choice=0 + 3 bits
+ * 10-17     5 = Choice=1 + Choice2=0 + 3 bits
+ * 18-273   10 = Choice=1 + Choice2=1 + 8 bits
+ */
+#define LEN_LOW_BITS 3
+#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS)
+#define LEN_MID_BITS 3
+#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS)
+#define LEN_HIGH_BITS 8
+#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS)
+#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS)
+
+/*
+ * Maximum length of a match is 273 which is a result of the encoding
+ * described above.
+ */
+#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1)
+
+/*
+ * Different sets of probabilities are used for match distances that have
+ * very short match length: Lengths of 2, 3, and 4 bytes have a separate
+ * set of probabilities for each length. The matches with longer length
+ * use a shared set of probabilities.
+ */
+#define DIST_STATES 4
+
+/*
+ * Get the index of the appropriate probability array for decoding
+ * the distance slot.
+ */
+static inline uint32_t lzma_get_dist_state(uint32_t len)
+{
+	return len < DIST_STATES + MATCH_LEN_MIN
+			? len - MATCH_LEN_MIN : DIST_STATES - 1;
+}
+
+/*
+ * The highest two bits of a 32-bit match distance are encoded using six bits.
+ * This six-bit value is called a distance slot. This way encoding a 32-bit
+ * value takes 6-36 bits, larger values taking more bits.
+ */
+#define DIST_SLOT_BITS 6
+#define DIST_SLOTS (1 << DIST_SLOT_BITS)
+
+/* Match distances up to 127 are fully encoded using probabilities. Since
+ * the highest two bits (distance slot) are always encoded using six bits,
+ * the distances 0-3 don't need any additional bits to encode, since the
+ * distance slot itself is the same as the actual distance. DIST_MODEL_START
+ * indicates the first distance slot where at least one additional bit is
+ * needed.
+ */
+#define DIST_MODEL_START 4
+
+/*
+ * Match distances greater than 127 are encoded in three pieces:
+ *   - distance slot: the highest two bits
+ *   - direct bits: 2-26 bits below the highest two bits
+ *   - alignment bits: four lowest bits
+ *
+ * Direct bits don't use any probabilities.
+ *
+ * The distance slot value of 14 is for distances 128-191.
+ */
+#define DIST_MODEL_END 14
+
+/* Distance slots that indicate a distance <= 127. */
+#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2)
+#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS)
+
+/*
+ * For match distances greater than 127, only the highest two bits and the
+ * lowest four bits (alignment) is encoded using probabilities.
+ */
+#define ALIGN_BITS 4
+#define ALIGN_SIZE (1 << ALIGN_BITS)
+#define ALIGN_MASK (ALIGN_SIZE - 1)
+
+/* Total number of all probability variables */
+#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE)
+
+/*
+ * LZMA remembers the four most recent match distances. Reusing these
+ * distances tends to take less space than re-encoding the actual
+ * distance value.
+ */
+#define REPS 4
+
+#endif
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
new file mode 100644
index 0000000..a65633e
--- /dev/null
+++ b/lib/xz/xz_private.h
@@ -0,0 +1,156 @@
+/*
+ * Private includes and definitions
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_PRIVATE_H
+#define XZ_PRIVATE_H
+
+#ifdef __KERNEL__
+#	include <linux/xz.h>
+#	include <asm/byteorder.h>
+#	include <asm/unaligned.h>
+	/* XZ_PREBOOT may be defined only via decompress_unxz.c. */
+#	ifndef XZ_PREBOOT
+#		include <linux/slab.h>
+#		include <linux/vmalloc.h>
+#		include <linux/string.h>
+#		ifdef CONFIG_XZ_DEC_X86
+#			define XZ_DEC_X86
+#		endif
+#		ifdef CONFIG_XZ_DEC_POWERPC
+#			define XZ_DEC_POWERPC
+#		endif
+#		ifdef CONFIG_XZ_DEC_IA64
+#			define XZ_DEC_IA64
+#		endif
+#		ifdef CONFIG_XZ_DEC_ARM
+#			define XZ_DEC_ARM
+#		endif
+#		ifdef CONFIG_XZ_DEC_ARMTHUMB
+#			define XZ_DEC_ARMTHUMB
+#		endif
+#		ifdef CONFIG_XZ_DEC_SPARC
+#			define XZ_DEC_SPARC
+#		endif
+#		define memeq(a, b, size) (memcmp(a, b, size) == 0)
+#		define memzero(buf, size) memset(buf, 0, size)
+#	endif
+#	define get_le32(p) le32_to_cpup((const uint32_t *)(p))
+#else
+	/*
+	 * For userspace builds, use a separate header to define the required
+	 * macros and functions. This makes it easier to adapt the code into
+	 * different environments and avoids clutter in the Linux kernel tree.
+	 */
+#	include "xz_config.h"
+#endif
+
+/* If no specific decoding mode is requested, enable support for all modes. */
+#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \
+		&& !defined(XZ_DEC_DYNALLOC)
+#	define XZ_DEC_SINGLE
+#	define XZ_DEC_PREALLOC
+#	define XZ_DEC_DYNALLOC
+#endif
+
+/*
+ * The DEC_IS_foo(mode) macros are used in "if" statements. If only some
+ * of the supported modes are enabled, these macros will evaluate to true or
+ * false at compile time and thus allow the compiler to omit unneeded code.
+ */
+#ifdef XZ_DEC_SINGLE
+#	define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE)
+#else
+#	define DEC_IS_SINGLE(mode) (false)
+#endif
+
+#ifdef XZ_DEC_PREALLOC
+#	define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC)
+#else
+#	define DEC_IS_PREALLOC(mode) (false)
+#endif
+
+#ifdef XZ_DEC_DYNALLOC
+#	define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC)
+#else
+#	define DEC_IS_DYNALLOC(mode) (false)
+#endif
+
+#if !defined(XZ_DEC_SINGLE)
+#	define DEC_IS_MULTI(mode) (true)
+#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC)
+#	define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE)
+#else
+#	define DEC_IS_MULTI(mode) (false)
+#endif
+
+/*
+ * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ.
+ * XZ_DEC_BCJ is used to enable generic support for BCJ decoders.
+ */
+#ifndef XZ_DEC_BCJ
+#	if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
+			|| defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \
+			|| defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
+			|| defined(XZ_DEC_SPARC)
+#		define XZ_DEC_BCJ
+#	endif
+#endif
+
+/*
+ * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
+ * before calling xz_dec_lzma2_run().
+ */
+XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
+						   uint32_t dict_max);
+
+/*
+ * Decode the LZMA2 properties (one byte) and reset the decoder. Return
+ * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not
+ * big enough, and XZ_OPTIONS_ERROR if props indicates something that this
+ * decoder doesn't support.
+ */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s,
+					 uint8_t props);
+
+/* Decode raw LZMA2 stream from b->in to b->out. */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
+				       struct xz_buf *b);
+
+/* Free the memory allocated for the LZMA2 decoder. */
+XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
+
+#ifdef XZ_DEC_BCJ
+/*
+ * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before
+ * calling xz_dec_bcj_run().
+ */
+XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
+
+/*
+ * Decode the Filter ID of a BCJ filter. This implementation doesn't
+ * support custom start offsets, so no decoding of Filter Properties
+ * is needed. Returns XZ_OK if the given Filter ID is supported.
+ * Otherwise XZ_OPTIONS_ERROR is returned.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
+
+/*
+ * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is
+ * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run()
+ * must be called directly.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
+				     struct xz_dec_lzma2 *lzma2,
+				     struct xz_buf *b);
+
+/* Free the memory allocated for the BCJ filters. */
+#define xz_dec_bcj_end(s) kfree(s)
+#endif
+
+#endif
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
new file mode 100644
index 0000000..66cb5a7
--- /dev/null
+++ b/lib/xz/xz_stream.h
@@ -0,0 +1,62 @@
+/*
+ * Definitions for handling the .xz file format
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_STREAM_H
+#define XZ_STREAM_H
+
+#if defined(__KERNEL__) && !XZ_INTERNAL_CRC32
+#	include <linux/crc32.h>
+#	undef crc32
+#	define xz_crc32(buf, size, crc) \
+		(~crc32_le(~(uint32_t)(crc), buf, size))
+#endif
+
+/*
+ * See the .xz file format specification at
+ * http://tukaani.org/xz/xz-file-format.txt
+ * to understand the container format.
+ */
+
+#define STREAM_HEADER_SIZE 12
+
+#define HEADER_MAGIC "\3757zXZ"
+#define HEADER_MAGIC_SIZE 6
+
+#define FOOTER_MAGIC "YZ"
+#define FOOTER_MAGIC_SIZE 2
+
+/*
+ * Variable-length integer can hold a 63-bit unsigned integer or a special
+ * value indicating that the value is unknown.
+ *
+ * Experimental: vli_type can be defined to uint32_t to save a few bytes
+ * in code size (no effect on speed). Doing so limits the uncompressed and
+ * compressed size of the file to less than 256 MiB and may also weaken
+ * error detection slightly.
+ */
+typedef uint64_t vli_type;
+
+#define VLI_MAX ((vli_type)-1 / 2)
+#define VLI_UNKNOWN ((vli_type)-1)
+
+/* Maximum encoded size of a VLI */
+#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7)
+
+/* Integrity Check types */
+enum xz_check {
+	XZ_CHECK_NONE = 0,
+	XZ_CHECK_CRC32 = 1,
+	XZ_CHECK_CRC64 = 4,
+	XZ_CHECK_SHA256 = 10
+};
+
+/* Maximum possible Check ID */
+#define XZ_CHECK_MAX 15
+
+#endif
diff --git a/mm/Kconfig b/mm/Kconfig
index c2c8a4a..e9c0c61 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -179,7 +179,7 @@
 config COMPACTION
 	bool "Allow for memory compaction"
 	select MIGRATION
-	depends on EXPERIMENTAL && HUGETLB_PAGE && MMU
+	depends on MMU
 	help
 	  Allows the compaction of memory for the allocation of huge pages.
 
@@ -302,6 +302,44 @@
 
 	  See Documentation/nommu-mmap.txt for more information.
 
+config TRANSPARENT_HUGEPAGE
+	bool "Transparent Hugepage Support"
+	depends on X86 && MMU
+	select COMPACTION
+	help
+	  Transparent Hugepages allows the kernel to use huge pages and
+	  huge tlb transparently to the applications whenever possible.
+	  This feature can improve computing performance to certain
+	  applications by speeding up page faults during memory
+	  allocation, by reducing the number of tlb misses and by speeding
+	  up the pagetable walking.
+
+	  If memory constrained on embedded, you may want to say N.
+
+choice
+	prompt "Transparent Hugepage Support sysfs defaults"
+	depends on TRANSPARENT_HUGEPAGE
+	default TRANSPARENT_HUGEPAGE_ALWAYS
+	help
+	  Selects the sysfs defaults for Transparent Hugepage Support.
+
+	config TRANSPARENT_HUGEPAGE_ALWAYS
+		bool "always"
+	help
+	  Enabling Transparent Hugepage always, can increase the
+	  memory footprint of applications without a guaranteed
+	  benefit but it will work automatically for all applications.
+
+	config TRANSPARENT_HUGEPAGE_MADVISE
+		bool "madvise"
+	help
+	  Enabling Transparent Hugepage madvise, will only provide a
+	  performance improvement benefit to the applications using
+	  madvise(MADV_HUGEPAGE) but it won't risk to increase the
+	  memory footprint of applications without a guaranteed
+	  benefit.
+endchoice
+
 #
 # UP and nommu archs use km based percpu allocator
 #
diff --git a/mm/Makefile b/mm/Makefile
index f73f75a..2b1b575 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -5,7 +5,7 @@
 mmu-y			:= nommu.o
 mmu-$(CONFIG_MMU)	:= fremap.o highmem.o madvise.o memory.o mincore.o \
 			   mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
-			   vmalloc.o pagewalk.o
+			   vmalloc.o pagewalk.o pgtable-generic.o
 
 obj-y			:= bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
 			   maccess.o page_alloc.o page-writeback.o \
@@ -37,6 +37,7 @@
 obj-$(CONFIG_FS_XIP) += filemap_xip.o
 obj-$(CONFIG_MIGRATION) += migrate.o
 obj-$(CONFIG_QUICKLIST) += quicklist.o
+obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
 obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
 obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
 obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
diff --git a/mm/compaction.c b/mm/compaction.c
index 1a8894e..8be430b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,6 +16,9 @@
 #include <linux/sysfs.h>
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/compaction.h>
+
 /*
  * compact_control is used to track pages being migrated and the free pages
  * they are being migrated to during memory compaction. The free_pfn starts
@@ -30,6 +33,7 @@
 	unsigned long nr_migratepages;	/* Number of pages to migrate */
 	unsigned long free_pfn;		/* isolate_freepages search base */
 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
+	bool sync;			/* Synchronous migration */
 
 	/* Account for isolated anon and file pages */
 	unsigned long nr_anon;
@@ -38,6 +42,8 @@
 	unsigned int order;		/* order a direct compactor needs */
 	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
 	struct zone *zone;
+
+	int compact_mode;
 };
 
 static unsigned long release_freepages(struct list_head *freelist)
@@ -60,7 +66,7 @@
 				struct list_head *freelist)
 {
 	unsigned long zone_end_pfn, end_pfn;
-	int total_isolated = 0;
+	int nr_scanned = 0, total_isolated = 0;
 	struct page *cursor;
 
 	/* Get the last PFN we should scan for free pages at */
@@ -81,6 +87,7 @@
 
 		if (!pfn_valid_within(blockpfn))
 			continue;
+		nr_scanned++;
 
 		if (!PageBuddy(page))
 			continue;
@@ -100,6 +107,7 @@
 		}
 	}
 
+	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
 	return total_isolated;
 }
 
@@ -234,6 +242,8 @@
 					struct compact_control *cc)
 {
 	unsigned long low_pfn, end_pfn;
+	unsigned long last_pageblock_nr = 0, pageblock_nr;
+	unsigned long nr_scanned = 0, nr_isolated = 0;
 	struct list_head *migratelist = &cc->migratepages;
 
 	/* Do not scan outside zone boundaries */
@@ -266,20 +276,51 @@
 		struct page *page;
 		if (!pfn_valid_within(low_pfn))
 			continue;
+		nr_scanned++;
 
 		/* Get the page and skip if free */
 		page = pfn_to_page(low_pfn);
 		if (PageBuddy(page))
 			continue;
 
+		/*
+		 * For async migration, also only scan in MOVABLE blocks. Async
+		 * migration is optimistic to see if the minimum amount of work
+		 * satisfies the allocation
+		 */
+		pageblock_nr = low_pfn >> pageblock_order;
+		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
+				get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
+			low_pfn += pageblock_nr_pages;
+			low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
+			last_pageblock_nr = pageblock_nr;
+			continue;
+		}
+
+		if (!PageLRU(page))
+			continue;
+
+		/*
+		 * PageLRU is set, and lru_lock excludes isolation,
+		 * splitting and collapsing (collapsing has already
+		 * happened if PageLRU is set).
+		 */
+		if (PageTransHuge(page)) {
+			low_pfn += (1 << compound_order(page)) - 1;
+			continue;
+		}
+
 		/* Try isolate the page */
 		if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
 			continue;
 
+		VM_BUG_ON(PageTransCompound(page));
+
 		/* Successfully isolated */
 		del_page_from_lru_list(zone, page, page_lru(page));
 		list_add(&page->lru, migratelist);
 		cc->nr_migratepages++;
+		nr_isolated++;
 
 		/* Avoid isolating too much */
 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
@@ -291,6 +332,8 @@
 	spin_unlock_irq(&zone->lru_lock);
 	cc->migrate_pfn = low_pfn;
 
+	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
+
 	return cc->nr_migratepages;
 }
 
@@ -341,10 +384,10 @@
 }
 
 static int compact_finished(struct zone *zone,
-						struct compact_control *cc)
+			    struct compact_control *cc)
 {
 	unsigned int order;
-	unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order);
+	unsigned long watermark;
 
 	if (fatal_signal_pending(current))
 		return COMPACT_PARTIAL;
@@ -354,12 +397,31 @@
 		return COMPACT_COMPLETE;
 
 	/* Compaction run is not finished if the watermark is not met */
+	if (cc->compact_mode != COMPACT_MODE_KSWAPD)
+		watermark = low_wmark_pages(zone);
+	else
+		watermark = high_wmark_pages(zone);
+	watermark += (1 << cc->order);
+
 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
 		return COMPACT_CONTINUE;
 
+	/*
+	 * order == -1 is expected when compacting via
+	 * /proc/sys/vm/compact_memory
+	 */
 	if (cc->order == -1)
 		return COMPACT_CONTINUE;
 
+	/*
+	 * Generating only one page of the right order is not enough
+	 * for kswapd, we must continue until we're above the high
+	 * watermark as a pool for high order GFP_ATOMIC allocations
+	 * too.
+	 */
+	if (cc->compact_mode == COMPACT_MODE_KSWAPD)
+		return COMPACT_CONTINUE;
+
 	/* Direct compactor: Is a suitable page free? */
 	for (order = cc->order; order < MAX_ORDER; order++) {
 		/* Job done if page is free of the right migratetype */
@@ -374,10 +436,69 @@
 	return COMPACT_CONTINUE;
 }
 
+/*
+ * compaction_suitable: Is this suitable to run compaction on this zone now?
+ * Returns
+ *   COMPACT_SKIPPED  - If there are too few free pages for compaction
+ *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
+ *   COMPACT_CONTINUE - If compaction should run now
+ */
+unsigned long compaction_suitable(struct zone *zone, int order)
+{
+	int fragindex;
+	unsigned long watermark;
+
+	/*
+	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
+	 * This is because during migration, copies of pages need to be
+	 * allocated and for a short time, the footprint is higher
+	 */
+	watermark = low_wmark_pages(zone) + (2UL << order);
+	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+		return COMPACT_SKIPPED;
+
+	/*
+	 * order == -1 is expected when compacting via
+	 * /proc/sys/vm/compact_memory
+	 */
+	if (order == -1)
+		return COMPACT_CONTINUE;
+
+	/*
+	 * fragmentation index determines if allocation failures are due to
+	 * low memory or external fragmentation
+	 *
+	 * index of -1 implies allocations might succeed dependingon watermarks
+	 * index towards 0 implies failure is due to lack of memory
+	 * index towards 1000 implies failure is due to fragmentation
+	 *
+	 * Only compact if a failure would be due to fragmentation.
+	 */
+	fragindex = fragmentation_index(zone, order);
+	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
+		return COMPACT_SKIPPED;
+
+	if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0))
+		return COMPACT_PARTIAL;
+
+	return COMPACT_CONTINUE;
+}
+
 static int compact_zone(struct zone *zone, struct compact_control *cc)
 {
 	int ret;
 
+	ret = compaction_suitable(zone, cc->order);
+	switch (ret) {
+	case COMPACT_PARTIAL:
+	case COMPACT_SKIPPED:
+		/* Compaction is likely to fail */
+		return ret;
+	case COMPACT_CONTINUE:
+		/* Fall through to compaction */
+		;
+	}
+
 	/* Setup to move all movable pages to the end of the zone */
 	cc->migrate_pfn = zone->zone_start_pfn;
 	cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
@@ -393,7 +514,8 @@
 
 		nr_migrate = cc->nr_migratepages;
 		migrate_pages(&cc->migratepages, compaction_alloc,
-						(unsigned long)cc, 0);
+				(unsigned long)cc, false,
+				cc->sync);
 		update_nr_listpages(cc);
 		nr_remaining = cc->nr_migratepages;
 
@@ -401,6 +523,8 @@
 		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
 		if (nr_remaining)
 			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
+		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
+						nr_remaining);
 
 		/* Release LRU pages not migrated */
 		if (!list_empty(&cc->migratepages)) {
@@ -417,8 +541,10 @@
 	return ret;
 }
 
-static unsigned long compact_zone_order(struct zone *zone,
-						int order, gfp_t gfp_mask)
+unsigned long compact_zone_order(struct zone *zone,
+				 int order, gfp_t gfp_mask,
+				 bool sync,
+				 int compact_mode)
 {
 	struct compact_control cc = {
 		.nr_freepages = 0,
@@ -426,6 +552,8 @@
 		.order = order,
 		.migratetype = allocflags_to_migratetype(gfp_mask),
 		.zone = zone,
+		.sync = sync,
+		.compact_mode = compact_mode,
 	};
 	INIT_LIST_HEAD(&cc.freepages);
 	INIT_LIST_HEAD(&cc.migratepages);
@@ -441,16 +569,17 @@
  * @order: The order of the current allocation
  * @gfp_mask: The GFP mask of the current allocation
  * @nodemask: The allowed nodes to allocate from
+ * @sync: Whether migration is synchronous or not
  *
  * This is the main entry point for direct page compaction.
  */
 unsigned long try_to_compact_pages(struct zonelist *zonelist,
-			int order, gfp_t gfp_mask, nodemask_t *nodemask)
+			int order, gfp_t gfp_mask, nodemask_t *nodemask,
+			bool sync)
 {
 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
 	int may_enter_fs = gfp_mask & __GFP_FS;
 	int may_perform_io = gfp_mask & __GFP_IO;
-	unsigned long watermark;
 	struct zoneref *z;
 	struct zone *zone;
 	int rc = COMPACT_SKIPPED;
@@ -460,7 +589,7 @@
 	 * made because an assumption is made that the page allocator can satisfy
 	 * the "cheaper" orders without taking special steps
 	 */
-	if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io)
+	if (!order || !may_enter_fs || !may_perform_io)
 		return rc;
 
 	count_vm_event(COMPACTSTALL);
@@ -468,43 +597,14 @@
 	/* Compact each zone in the list */
 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
 								nodemask) {
-		int fragindex;
 		int status;
 
-		/*
-		 * Watermarks for order-0 must be met for compaction. Note
-		 * the 2UL. This is because during migration, copies of
-		 * pages need to be allocated and for a short time, the
-		 * footprint is higher
-		 */
-		watermark = low_wmark_pages(zone) + (2UL << order);
-		if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
-			continue;
-
-		/*
-		 * fragmentation index determines if allocation failures are
-		 * due to low memory or external fragmentation
-		 *
-		 * index of -1 implies allocations might succeed depending
-		 * 	on watermarks
-		 * index towards 0 implies failure is due to lack of memory
-		 * index towards 1000 implies failure is due to fragmentation
-		 *
-		 * Only compact if a failure would be due to fragmentation.
-		 */
-		fragindex = fragmentation_index(zone, order);
-		if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
-			continue;
-
-		if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
-			rc = COMPACT_PARTIAL;
-			break;
-		}
-
-		status = compact_zone_order(zone, order, gfp_mask);
+		status = compact_zone_order(zone, order, gfp_mask, sync,
+					    COMPACT_MODE_DIRECT_RECLAIM);
 		rc = max(status, rc);
 
-		if (zone_watermark_ok(zone, order, watermark, 0, 0))
+		/* If a normal allocation would succeed, stop compacting */
+		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
 			break;
 	}
 
@@ -531,6 +631,7 @@
 			.nr_freepages = 0,
 			.nr_migratepages = 0,
 			.order = -1,
+			.compact_mode = COMPACT_MODE_DIRECT_RECLAIM,
 		};
 
 		zone = &pgdat->node_zones[zoneid];
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 4df2de7..03bf3bb 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -324,7 +324,7 @@
 		if (mem_flags & __GFP_WAIT) {
 			DECLARE_WAITQUEUE(wait, current);
 
-			__set_current_state(TASK_INTERRUPTIBLE);
+			__set_current_state(TASK_UNINTERRUPTIBLE);
 			__add_wait_queue(&pool->waitq, &wait);
 			spin_unlock_irqrestore(&pool->lock, flags);
 
@@ -355,20 +355,15 @@
 
 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
 {
-	unsigned long flags;
 	struct dma_page *page;
 
-	spin_lock_irqsave(&pool->lock, flags);
 	list_for_each_entry(page, &pool->page_list, page_list) {
 		if (dma < page->dma)
 			continue;
 		if (dma < (page->dma + pool->allocation))
-			goto done;
+			return page;
 	}
-	page = NULL;
- done:
-	spin_unlock_irqrestore(&pool->lock, flags);
-	return page;
+	return NULL;
 }
 
 /**
@@ -386,8 +381,10 @@
 	unsigned long flags;
 	unsigned int offset;
 
+	spin_lock_irqsave(&pool->lock, flags);
 	page = pool_find_page(pool, dma);
 	if (!page) {
+		spin_unlock_irqrestore(&pool->lock, flags);
 		if (pool->dev)
 			dev_err(pool->dev,
 				"dma_pool_free %s, %p/%lx (bad dma)\n",
@@ -401,6 +398,7 @@
 	offset = vaddr - page->vaddr;
 #ifdef	DMAPOOL_DEBUG
 	if ((dma - page->dma) != offset) {
+		spin_unlock_irqrestore(&pool->lock, flags);
 		if (pool->dev)
 			dev_err(pool->dev,
 				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
@@ -418,6 +416,7 @@
 				chain = *(int *)(page->vaddr + chain);
 				continue;
 			}
+			spin_unlock_irqrestore(&pool->lock, flags);
 			if (pool->dev)
 				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
 					"already free\n", pool->name,
@@ -432,7 +431,6 @@
 	memset(vaddr, POOL_POISON_FREED, pool->size);
 #endif
 
-	spin_lock_irqsave(&pool->lock, flags);
 	page->in_use--;
 	*(int *)vaddr = page->offset;
 	page->offset = offset;
diff --git a/mm/filemap.c b/mm/filemap.c
index ca38939..83a45d3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -298,7 +298,7 @@
 				continue;
 
 			wait_on_page_writeback(page);
-			if (PageError(page))
+			if (TestClearPageError(page))
 				ret = -EIO;
 		}
 		pagevec_release(&pvec);
@@ -837,9 +837,6 @@
 		if (radix_tree_deref_retry(page))
 			goto restart;
 
-		if (page->mapping == NULL || page->index != index)
-			break;
-
 		if (!page_cache_get_speculative(page))
 			goto repeat;
 
@@ -849,6 +846,16 @@
 			goto repeat;
 		}
 
+		/*
+		 * must check mapping and index after taking the ref.
+		 * otherwise we can get both false positives and false
+		 * negatives, which is just confusing to the caller.
+		 */
+		if (page->mapping == NULL || page->index != index) {
+			page_cache_release(page);
+			break;
+		}
+
 		pages[ret] = page;
 		ret++;
 		index++;
@@ -2220,7 +2227,7 @@
 		gfp_notmask = __GFP_FS;
 repeat:
 	page = find_lock_page(mapping, index);
-	if (likely(page))
+	if (page)
 		return page;
 
 	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
new file mode 100644
index 0000000..113e35c
--- /dev/null
+++ b/mm/huge_memory.c
@@ -0,0 +1,2365 @@
+/*
+ *  Copyright (C) 2009  Red Hat, Inc.
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/hugetlb.h>
+#include <linux/mmu_notifier.h>
+#include <linux/rmap.h>
+#include <linux/swap.h>
+#include <linux/mm_inline.h>
+#include <linux/kthread.h>
+#include <linux/khugepaged.h>
+#include <linux/freezer.h>
+#include <linux/mman.h>
+#include <asm/tlb.h>
+#include <asm/pgalloc.h>
+#include "internal.h"
+
+/*
+ * By default transparent hugepage support is enabled for all mappings
+ * and khugepaged scans all mappings. Defrag is only invoked by
+ * khugepaged hugepage allocations and by page faults inside
+ * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
+ * allocations.
+ */
+unsigned long transparent_hugepage_flags __read_mostly =
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
+	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
+	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
+#endif
+	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
+	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+
+/* default scan 8*512 pte (or vmas) every 30 second */
+static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
+static unsigned int khugepaged_pages_collapsed;
+static unsigned int khugepaged_full_scans;
+static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
+/* during fragmentation poll the hugepage allocator once every minute */
+static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
+static struct task_struct *khugepaged_thread __read_mostly;
+static DEFINE_MUTEX(khugepaged_mutex);
+static DEFINE_SPINLOCK(khugepaged_mm_lock);
+static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
+/*
+ * default collapse hugepages if there is at least one pte mapped like
+ * it would have happened if the vma was large enough during page
+ * fault.
+ */
+static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
+
+static int khugepaged(void *none);
+static int mm_slots_hash_init(void);
+static int khugepaged_slab_init(void);
+static void khugepaged_slab_free(void);
+
+#define MM_SLOTS_HASH_HEADS 1024
+static struct hlist_head *mm_slots_hash __read_mostly;
+static struct kmem_cache *mm_slot_cache __read_mostly;
+
+/**
+ * struct mm_slot - hash lookup from mm to mm_slot
+ * @hash: hash collision list
+ * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
+ * @mm: the mm that this information is valid for
+ */
+struct mm_slot {
+	struct hlist_node hash;
+	struct list_head mm_node;
+	struct mm_struct *mm;
+};
+
+/**
+ * struct khugepaged_scan - cursor for scanning
+ * @mm_head: the head of the mm list to scan
+ * @mm_slot: the current mm_slot we are scanning
+ * @address: the next address inside that to be scanned
+ *
+ * There is only the one khugepaged_scan instance of this cursor structure.
+ */
+struct khugepaged_scan {
+	struct list_head mm_head;
+	struct mm_slot *mm_slot;
+	unsigned long address;
+} khugepaged_scan = {
+	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
+};
+
+
+static int set_recommended_min_free_kbytes(void)
+{
+	struct zone *zone;
+	int nr_zones = 0;
+	unsigned long recommended_min;
+	extern int min_free_kbytes;
+
+	if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
+		      &transparent_hugepage_flags) &&
+	    !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+		      &transparent_hugepage_flags))
+		return 0;
+
+	for_each_populated_zone(zone)
+		nr_zones++;
+
+	/* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
+	recommended_min = pageblock_nr_pages * nr_zones * 2;
+
+	/*
+	 * Make sure that on average at least two pageblocks are almost free
+	 * of another type, one for a migratetype to fall back to and a
+	 * second to avoid subsequent fallbacks of other types There are 3
+	 * MIGRATE_TYPES we care about.
+	 */
+	recommended_min += pageblock_nr_pages * nr_zones *
+			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
+
+	/* don't ever allow to reserve more than 5% of the lowmem */
+	recommended_min = min(recommended_min,
+			      (unsigned long) nr_free_buffer_pages() / 20);
+	recommended_min <<= (PAGE_SHIFT-10);
+
+	if (recommended_min > min_free_kbytes)
+		min_free_kbytes = recommended_min;
+	setup_per_zone_wmarks();
+	return 0;
+}
+late_initcall(set_recommended_min_free_kbytes);
+
+static int start_khugepaged(void)
+{
+	int err = 0;
+	if (khugepaged_enabled()) {
+		int wakeup;
+		if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
+			err = -ENOMEM;
+			goto out;
+		}
+		mutex_lock(&khugepaged_mutex);
+		if (!khugepaged_thread)
+			khugepaged_thread = kthread_run(khugepaged, NULL,
+							"khugepaged");
+		if (unlikely(IS_ERR(khugepaged_thread))) {
+			printk(KERN_ERR
+			       "khugepaged: kthread_run(khugepaged) failed\n");
+			err = PTR_ERR(khugepaged_thread);
+			khugepaged_thread = NULL;
+		}
+		wakeup = !list_empty(&khugepaged_scan.mm_head);
+		mutex_unlock(&khugepaged_mutex);
+		if (wakeup)
+			wake_up_interruptible(&khugepaged_wait);
+
+		set_recommended_min_free_kbytes();
+	} else
+		/* wakeup to exit */
+		wake_up_interruptible(&khugepaged_wait);
+out:
+	return err;
+}
+
+#ifdef CONFIG_SYSFS
+
+static ssize_t double_flag_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf,
+				enum transparent_hugepage_flag enabled,
+				enum transparent_hugepage_flag req_madv)
+{
+	if (test_bit(enabled, &transparent_hugepage_flags)) {
+		VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
+		return sprintf(buf, "[always] madvise never\n");
+	} else if (test_bit(req_madv, &transparent_hugepage_flags))
+		return sprintf(buf, "always [madvise] never\n");
+	else
+		return sprintf(buf, "always madvise [never]\n");
+}
+static ssize_t double_flag_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf, size_t count,
+				 enum transparent_hugepage_flag enabled,
+				 enum transparent_hugepage_flag req_madv)
+{
+	if (!memcmp("always", buf,
+		    min(sizeof("always")-1, count))) {
+		set_bit(enabled, &transparent_hugepage_flags);
+		clear_bit(req_madv, &transparent_hugepage_flags);
+	} else if (!memcmp("madvise", buf,
+			   min(sizeof("madvise")-1, count))) {
+		clear_bit(enabled, &transparent_hugepage_flags);
+		set_bit(req_madv, &transparent_hugepage_flags);
+	} else if (!memcmp("never", buf,
+			   min(sizeof("never")-1, count))) {
+		clear_bit(enabled, &transparent_hugepage_flags);
+		clear_bit(req_madv, &transparent_hugepage_flags);
+	} else
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t enabled_show(struct kobject *kobj,
+			    struct kobj_attribute *attr, char *buf)
+{
+	return double_flag_show(kobj, attr, buf,
+				TRANSPARENT_HUGEPAGE_FLAG,
+				TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
+}
+static ssize_t enabled_store(struct kobject *kobj,
+			     struct kobj_attribute *attr,
+			     const char *buf, size_t count)
+{
+	ssize_t ret;
+
+	ret = double_flag_store(kobj, attr, buf, count,
+				TRANSPARENT_HUGEPAGE_FLAG,
+				TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
+
+	if (ret > 0) {
+		int err = start_khugepaged();
+		if (err)
+			ret = err;
+	}
+
+	if (ret > 0 &&
+	    (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
+		      &transparent_hugepage_flags) ||
+	     test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+		      &transparent_hugepage_flags)))
+		set_recommended_min_free_kbytes();
+
+	return ret;
+}
+static struct kobj_attribute enabled_attr =
+	__ATTR(enabled, 0644, enabled_show, enabled_store);
+
+static ssize_t single_flag_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf,
+				enum transparent_hugepage_flag flag)
+{
+	if (test_bit(flag, &transparent_hugepage_flags))
+		return sprintf(buf, "[yes] no\n");
+	else
+		return sprintf(buf, "yes [no]\n");
+}
+static ssize_t single_flag_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf, size_t count,
+				 enum transparent_hugepage_flag flag)
+{
+	if (!memcmp("yes", buf,
+		    min(sizeof("yes")-1, count))) {
+		set_bit(flag, &transparent_hugepage_flags);
+	} else if (!memcmp("no", buf,
+			   min(sizeof("no")-1, count))) {
+		clear_bit(flag, &transparent_hugepage_flags);
+	} else
+		return -EINVAL;
+
+	return count;
+}
+
+/*
+ * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
+ * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
+ * memory just to allocate one more hugepage.
+ */
+static ssize_t defrag_show(struct kobject *kobj,
+			   struct kobj_attribute *attr, char *buf)
+{
+	return double_flag_show(kobj, attr, buf,
+				TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
+				TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
+}
+static ssize_t defrag_store(struct kobject *kobj,
+			    struct kobj_attribute *attr,
+			    const char *buf, size_t count)
+{
+	return double_flag_store(kobj, attr, buf, count,
+				 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
+				 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
+}
+static struct kobj_attribute defrag_attr =
+	__ATTR(defrag, 0644, defrag_show, defrag_store);
+
+#ifdef CONFIG_DEBUG_VM
+static ssize_t debug_cow_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf)
+{
+	return single_flag_show(kobj, attr, buf,
+				TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
+}
+static ssize_t debug_cow_store(struct kobject *kobj,
+			       struct kobj_attribute *attr,
+			       const char *buf, size_t count)
+{
+	return single_flag_store(kobj, attr, buf, count,
+				 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
+}
+static struct kobj_attribute debug_cow_attr =
+	__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
+#endif /* CONFIG_DEBUG_VM */
+
+static struct attribute *hugepage_attr[] = {
+	&enabled_attr.attr,
+	&defrag_attr.attr,
+#ifdef CONFIG_DEBUG_VM
+	&debug_cow_attr.attr,
+#endif
+	NULL,
+};
+
+static struct attribute_group hugepage_attr_group = {
+	.attrs = hugepage_attr,
+};
+
+static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
+}
+
+static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
+					  struct kobj_attribute *attr,
+					  const char *buf, size_t count)
+{
+	unsigned long msecs;
+	int err;
+
+	err = strict_strtoul(buf, 10, &msecs);
+	if (err || msecs > UINT_MAX)
+		return -EINVAL;
+
+	khugepaged_scan_sleep_millisecs = msecs;
+	wake_up_interruptible(&khugepaged_wait);
+
+	return count;
+}
+static struct kobj_attribute scan_sleep_millisecs_attr =
+	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
+	       scan_sleep_millisecs_store);
+
+static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
+					  struct kobj_attribute *attr,
+					  char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
+}
+
+static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
+					   struct kobj_attribute *attr,
+					   const char *buf, size_t count)
+{
+	unsigned long msecs;
+	int err;
+
+	err = strict_strtoul(buf, 10, &msecs);
+	if (err || msecs > UINT_MAX)
+		return -EINVAL;
+
+	khugepaged_alloc_sleep_millisecs = msecs;
+	wake_up_interruptible(&khugepaged_wait);
+
+	return count;
+}
+static struct kobj_attribute alloc_sleep_millisecs_attr =
+	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
+	       alloc_sleep_millisecs_store);
+
+static ssize_t pages_to_scan_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
+}
+static ssize_t pages_to_scan_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t count)
+{
+	int err;
+	unsigned long pages;
+
+	err = strict_strtoul(buf, 10, &pages);
+	if (err || !pages || pages > UINT_MAX)
+		return -EINVAL;
+
+	khugepaged_pages_to_scan = pages;
+
+	return count;
+}
+static struct kobj_attribute pages_to_scan_attr =
+	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
+	       pages_to_scan_store);
+
+static ssize_t pages_collapsed_show(struct kobject *kobj,
+				    struct kobj_attribute *attr,
+				    char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
+}
+static struct kobj_attribute pages_collapsed_attr =
+	__ATTR_RO(pages_collapsed);
+
+static ssize_t full_scans_show(struct kobject *kobj,
+			       struct kobj_attribute *attr,
+			       char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_full_scans);
+}
+static struct kobj_attribute full_scans_attr =
+	__ATTR_RO(full_scans);
+
+static ssize_t khugepaged_defrag_show(struct kobject *kobj,
+				      struct kobj_attribute *attr, char *buf)
+{
+	return single_flag_show(kobj, attr, buf,
+				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+}
+static ssize_t khugepaged_defrag_store(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       const char *buf, size_t count)
+{
+	return single_flag_store(kobj, attr, buf, count,
+				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+}
+static struct kobj_attribute khugepaged_defrag_attr =
+	__ATTR(defrag, 0644, khugepaged_defrag_show,
+	       khugepaged_defrag_store);
+
+/*
+ * max_ptes_none controls if khugepaged should collapse hugepages over
+ * any unmapped ptes in turn potentially increasing the memory
+ * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
+ * reduce the available free memory in the system as it
+ * runs. Increasing max_ptes_none will instead potentially reduce the
+ * free memory in the system during the khugepaged scan.
+ */
+static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
+					     struct kobj_attribute *attr,
+					     char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
+}
+static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
+					      struct kobj_attribute *attr,
+					      const char *buf, size_t count)
+{
+	int err;
+	unsigned long max_ptes_none;
+
+	err = strict_strtoul(buf, 10, &max_ptes_none);
+	if (err || max_ptes_none > HPAGE_PMD_NR-1)
+		return -EINVAL;
+
+	khugepaged_max_ptes_none = max_ptes_none;
+
+	return count;
+}
+static struct kobj_attribute khugepaged_max_ptes_none_attr =
+	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
+	       khugepaged_max_ptes_none_store);
+
+static struct attribute *khugepaged_attr[] = {
+	&khugepaged_defrag_attr.attr,
+	&khugepaged_max_ptes_none_attr.attr,
+	&pages_to_scan_attr.attr,
+	&pages_collapsed_attr.attr,
+	&full_scans_attr.attr,
+	&scan_sleep_millisecs_attr.attr,
+	&alloc_sleep_millisecs_attr.attr,
+	NULL,
+};
+
+static struct attribute_group khugepaged_attr_group = {
+	.attrs = khugepaged_attr,
+	.name = "khugepaged",
+};
+#endif /* CONFIG_SYSFS */
+
+static int __init hugepage_init(void)
+{
+	int err;
+#ifdef CONFIG_SYSFS
+	static struct kobject *hugepage_kobj;
+#endif
+
+	err = -EINVAL;
+	if (!has_transparent_hugepage()) {
+		transparent_hugepage_flags = 0;
+		goto out;
+	}
+
+#ifdef CONFIG_SYSFS
+	err = -ENOMEM;
+	hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
+	if (unlikely(!hugepage_kobj)) {
+		printk(KERN_ERR "hugepage: failed kobject create\n");
+		goto out;
+	}
+
+	err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
+	if (err) {
+		printk(KERN_ERR "hugepage: failed register hugeage group\n");
+		goto out;
+	}
+
+	err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
+	if (err) {
+		printk(KERN_ERR "hugepage: failed register hugeage group\n");
+		goto out;
+	}
+#endif
+
+	err = khugepaged_slab_init();
+	if (err)
+		goto out;
+
+	err = mm_slots_hash_init();
+	if (err) {
+		khugepaged_slab_free();
+		goto out;
+	}
+
+	/*
+	 * By default disable transparent hugepages on smaller systems,
+	 * where the extra memory used could hurt more than TLB overhead
+	 * is likely to save.  The admin can still enable it through /sys.
+	 */
+	if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
+		transparent_hugepage_flags = 0;
+
+	start_khugepaged();
+
+	set_recommended_min_free_kbytes();
+
+out:
+	return err;
+}
+module_init(hugepage_init)
+
+static int __init setup_transparent_hugepage(char *str)
+{
+	int ret = 0;
+	if (!str)
+		goto out;
+	if (!strcmp(str, "always")) {
+		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
+			&transparent_hugepage_flags);
+		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+			  &transparent_hugepage_flags);
+		ret = 1;
+	} else if (!strcmp(str, "madvise")) {
+		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
+			  &transparent_hugepage_flags);
+		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+			&transparent_hugepage_flags);
+		ret = 1;
+	} else if (!strcmp(str, "never")) {
+		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
+			  &transparent_hugepage_flags);
+		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+			  &transparent_hugepage_flags);
+		ret = 1;
+	}
+out:
+	if (!ret)
+		printk(KERN_WARNING
+		       "transparent_hugepage= cannot parse, ignored\n");
+	return ret;
+}
+__setup("transparent_hugepage=", setup_transparent_hugepage);
+
+static void prepare_pmd_huge_pte(pgtable_t pgtable,
+				 struct mm_struct *mm)
+{
+	assert_spin_locked(&mm->page_table_lock);
+
+	/* FIFO */
+	if (!mm->pmd_huge_pte)
+		INIT_LIST_HEAD(&pgtable->lru);
+	else
+		list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
+	mm->pmd_huge_pte = pgtable;
+}
+
+static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+	if (likely(vma->vm_flags & VM_WRITE))
+		pmd = pmd_mkwrite(pmd);
+	return pmd;
+}
+
+static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
+					struct vm_area_struct *vma,
+					unsigned long haddr, pmd_t *pmd,
+					struct page *page)
+{
+	int ret = 0;
+	pgtable_t pgtable;
+
+	VM_BUG_ON(!PageCompound(page));
+	pgtable = pte_alloc_one(mm, haddr);
+	if (unlikely(!pgtable)) {
+		mem_cgroup_uncharge_page(page);
+		put_page(page);
+		return VM_FAULT_OOM;
+	}
+
+	clear_huge_page(page, haddr, HPAGE_PMD_NR);
+	__SetPageUptodate(page);
+
+	spin_lock(&mm->page_table_lock);
+	if (unlikely(!pmd_none(*pmd))) {
+		spin_unlock(&mm->page_table_lock);
+		mem_cgroup_uncharge_page(page);
+		put_page(page);
+		pte_free(mm, pgtable);
+	} else {
+		pmd_t entry;
+		entry = mk_pmd(page, vma->vm_page_prot);
+		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+		entry = pmd_mkhuge(entry);
+		/*
+		 * The spinlocking to take the lru_lock inside
+		 * page_add_new_anon_rmap() acts as a full memory
+		 * barrier to be sure clear_huge_page writes become
+		 * visible after the set_pmd_at() write.
+		 */
+		page_add_new_anon_rmap(page, vma, haddr);
+		set_pmd_at(mm, haddr, pmd, entry);
+		prepare_pmd_huge_pte(pgtable, mm);
+		add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+		spin_unlock(&mm->page_table_lock);
+	}
+
+	return ret;
+}
+
+static inline gfp_t alloc_hugepage_gfpmask(int defrag)
+{
+	return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
+}
+
+static inline struct page *alloc_hugepage_vma(int defrag,
+					      struct vm_area_struct *vma,
+					      unsigned long haddr, int nd)
+{
+	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
+			       HPAGE_PMD_ORDER, vma, haddr, nd);
+}
+
+#ifndef CONFIG_NUMA
+static inline struct page *alloc_hugepage(int defrag)
+{
+	return alloc_pages(alloc_hugepage_gfpmask(defrag),
+			   HPAGE_PMD_ORDER);
+}
+#endif
+
+int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+			       unsigned long address, pmd_t *pmd,
+			       unsigned int flags)
+{
+	struct page *page;
+	unsigned long haddr = address & HPAGE_PMD_MASK;
+	pte_t *pte;
+
+	if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
+		if (unlikely(anon_vma_prepare(vma)))
+			return VM_FAULT_OOM;
+		if (unlikely(khugepaged_enter(vma)))
+			return VM_FAULT_OOM;
+		page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+					  vma, haddr, numa_node_id());
+		if (unlikely(!page))
+			goto out;
+		if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
+			put_page(page);
+			goto out;
+		}
+
+		return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
+	}
+out:
+	/*
+	 * Use __pte_alloc instead of pte_alloc_map, because we can't
+	 * run pte_offset_map on the pmd, if an huge pmd could
+	 * materialize from under us from a different thread.
+	 */
+	if (unlikely(__pte_alloc(mm, vma, pmd, address)))
+		return VM_FAULT_OOM;
+	/* if an huge pmd materialized from under us just retry later */
+	if (unlikely(pmd_trans_huge(*pmd)))
+		return 0;
+	/*
+	 * A regular pmd is established and it can't morph into a huge pmd
+	 * from under us anymore at this point because we hold the mmap_sem
+	 * read mode and khugepaged takes it in write mode. So now it's
+	 * safe to run pte_offset_map().
+	 */
+	pte = pte_offset_map(pmd, address);
+	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
+}
+
+int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+		  struct vm_area_struct *vma)
+{
+	struct page *src_page;
+	pmd_t pmd;
+	pgtable_t pgtable;
+	int ret;
+
+	ret = -ENOMEM;
+	pgtable = pte_alloc_one(dst_mm, addr);
+	if (unlikely(!pgtable))
+		goto out;
+
+	spin_lock(&dst_mm->page_table_lock);
+	spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
+
+	ret = -EAGAIN;
+	pmd = *src_pmd;
+	if (unlikely(!pmd_trans_huge(pmd))) {
+		pte_free(dst_mm, pgtable);
+		goto out_unlock;
+	}
+	if (unlikely(pmd_trans_splitting(pmd))) {
+		/* split huge page running from under us */
+		spin_unlock(&src_mm->page_table_lock);
+		spin_unlock(&dst_mm->page_table_lock);
+		pte_free(dst_mm, pgtable);
+
+		wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
+		goto out;
+	}
+	src_page = pmd_page(pmd);
+	VM_BUG_ON(!PageHead(src_page));
+	get_page(src_page);
+	page_dup_rmap(src_page);
+	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+
+	pmdp_set_wrprotect(src_mm, addr, src_pmd);
+	pmd = pmd_mkold(pmd_wrprotect(pmd));
+	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
+	prepare_pmd_huge_pte(pgtable, dst_mm);
+
+	ret = 0;
+out_unlock:
+	spin_unlock(&src_mm->page_table_lock);
+	spin_unlock(&dst_mm->page_table_lock);
+out:
+	return ret;
+}
+
+/* no "address" argument so destroys page coloring of some arch */
+pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
+{
+	pgtable_t pgtable;
+
+	assert_spin_locked(&mm->page_table_lock);
+
+	/* FIFO */
+	pgtable = mm->pmd_huge_pte;
+	if (list_empty(&pgtable->lru))
+		mm->pmd_huge_pte = NULL;
+	else {
+		mm->pmd_huge_pte = list_entry(pgtable->lru.next,
+					      struct page, lru);
+		list_del(&pgtable->lru);
+	}
+	return pgtable;
+}
+
+static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
+					struct vm_area_struct *vma,
+					unsigned long address,
+					pmd_t *pmd, pmd_t orig_pmd,
+					struct page *page,
+					unsigned long haddr)
+{
+	pgtable_t pgtable;
+	pmd_t _pmd;
+	int ret = 0, i;
+	struct page **pages;
+
+	pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
+			GFP_KERNEL);
+	if (unlikely(!pages)) {
+		ret |= VM_FAULT_OOM;
+		goto out;
+	}
+
+	for (i = 0; i < HPAGE_PMD_NR; i++) {
+		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE,
+					       vma, address, page_to_nid(page));
+		if (unlikely(!pages[i] ||
+			     mem_cgroup_newpage_charge(pages[i], mm,
+						       GFP_KERNEL))) {
+			if (pages[i])
+				put_page(pages[i]);
+			mem_cgroup_uncharge_start();
+			while (--i >= 0) {
+				mem_cgroup_uncharge_page(pages[i]);
+				put_page(pages[i]);
+			}
+			mem_cgroup_uncharge_end();
+			kfree(pages);
+			ret |= VM_FAULT_OOM;
+			goto out;
+		}
+	}
+
+	for (i = 0; i < HPAGE_PMD_NR; i++) {
+		copy_user_highpage(pages[i], page + i,
+				   haddr + PAGE_SHIFT*i, vma);
+		__SetPageUptodate(pages[i]);
+		cond_resched();
+	}
+
+	spin_lock(&mm->page_table_lock);
+	if (unlikely(!pmd_same(*pmd, orig_pmd)))
+		goto out_free_pages;
+	VM_BUG_ON(!PageHead(page));
+
+	pmdp_clear_flush_notify(vma, haddr, pmd);
+	/* leave pmd empty until pte is filled */
+
+	pgtable = get_pmd_huge_pte(mm);
+	pmd_populate(mm, &_pmd, pgtable);
+
+	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+		pte_t *pte, entry;
+		entry = mk_pte(pages[i], vma->vm_page_prot);
+		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+		page_add_new_anon_rmap(pages[i], vma, haddr);
+		pte = pte_offset_map(&_pmd, haddr);
+		VM_BUG_ON(!pte_none(*pte));
+		set_pte_at(mm, haddr, pte, entry);
+		pte_unmap(pte);
+	}
+	kfree(pages);
+
+	mm->nr_ptes++;
+	smp_wmb(); /* make pte visible before pmd */
+	pmd_populate(mm, pmd, pgtable);
+	page_remove_rmap(page);
+	spin_unlock(&mm->page_table_lock);
+
+	ret |= VM_FAULT_WRITE;
+	put_page(page);
+
+out:
+	return ret;
+
+out_free_pages:
+	spin_unlock(&mm->page_table_lock);
+	mem_cgroup_uncharge_start();
+	for (i = 0; i < HPAGE_PMD_NR; i++) {
+		mem_cgroup_uncharge_page(pages[i]);
+		put_page(pages[i]);
+	}
+	mem_cgroup_uncharge_end();
+	kfree(pages);
+	goto out;
+}
+
+int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+			unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
+{
+	int ret = 0;
+	struct page *page, *new_page;
+	unsigned long haddr;
+
+	VM_BUG_ON(!vma->anon_vma);
+	spin_lock(&mm->page_table_lock);
+	if (unlikely(!pmd_same(*pmd, orig_pmd)))
+		goto out_unlock;
+
+	page = pmd_page(orig_pmd);
+	VM_BUG_ON(!PageCompound(page) || !PageHead(page));
+	haddr = address & HPAGE_PMD_MASK;
+	if (page_mapcount(page) == 1) {
+		pmd_t entry;
+		entry = pmd_mkyoung(orig_pmd);
+		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+		if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
+			update_mmu_cache(vma, address, entry);
+		ret |= VM_FAULT_WRITE;
+		goto out_unlock;
+	}
+	get_page(page);
+	spin_unlock(&mm->page_table_lock);
+
+	if (transparent_hugepage_enabled(vma) &&
+	    !transparent_hugepage_debug_cow())
+		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+					      vma, haddr, numa_node_id());
+	else
+		new_page = NULL;
+
+	if (unlikely(!new_page)) {
+		ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+						   pmd, orig_pmd, page, haddr);
+		put_page(page);
+		goto out;
+	}
+
+	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+		put_page(new_page);
+		put_page(page);
+		ret |= VM_FAULT_OOM;
+		goto out;
+	}
+
+	copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
+	__SetPageUptodate(new_page);
+
+	spin_lock(&mm->page_table_lock);
+	put_page(page);
+	if (unlikely(!pmd_same(*pmd, orig_pmd))) {
+		mem_cgroup_uncharge_page(new_page);
+		put_page(new_page);
+	} else {
+		pmd_t entry;
+		VM_BUG_ON(!PageHead(page));
+		entry = mk_pmd(new_page, vma->vm_page_prot);
+		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+		entry = pmd_mkhuge(entry);
+		pmdp_clear_flush_notify(vma, haddr, pmd);
+		page_add_new_anon_rmap(new_page, vma, haddr);
+		set_pmd_at(mm, haddr, pmd, entry);
+		update_mmu_cache(vma, address, entry);
+		page_remove_rmap(page);
+		put_page(page);
+		ret |= VM_FAULT_WRITE;
+	}
+out_unlock:
+	spin_unlock(&mm->page_table_lock);
+out:
+	return ret;
+}
+
+struct page *follow_trans_huge_pmd(struct mm_struct *mm,
+				   unsigned long addr,
+				   pmd_t *pmd,
+				   unsigned int flags)
+{
+	struct page *page = NULL;
+
+	assert_spin_locked(&mm->page_table_lock);
+
+	if (flags & FOLL_WRITE && !pmd_write(*pmd))
+		goto out;
+
+	page = pmd_page(*pmd);
+	VM_BUG_ON(!PageHead(page));
+	if (flags & FOLL_TOUCH) {
+		pmd_t _pmd;
+		/*
+		 * We should set the dirty bit only for FOLL_WRITE but
+		 * for now the dirty bit in the pmd is meaningless.
+		 * And if the dirty bit will become meaningful and
+		 * we'll only set it with FOLL_WRITE, an atomic
+		 * set_bit will be required on the pmd to set the
+		 * young bit, instead of the current set_pmd_at.
+		 */
+		_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+		set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
+	}
+	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
+	VM_BUG_ON(!PageCompound(page));
+	if (flags & FOLL_GET)
+		get_page(page);
+
+out:
+	return page;
+}
+
+int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+		 pmd_t *pmd)
+{
+	int ret = 0;
+
+	spin_lock(&tlb->mm->page_table_lock);
+	if (likely(pmd_trans_huge(*pmd))) {
+		if (unlikely(pmd_trans_splitting(*pmd))) {
+			spin_unlock(&tlb->mm->page_table_lock);
+			wait_split_huge_page(vma->anon_vma,
+					     pmd);
+		} else {
+			struct page *page;
+			pgtable_t pgtable;
+			pgtable = get_pmd_huge_pte(tlb->mm);
+			page = pmd_page(*pmd);
+			pmd_clear(pmd);
+			page_remove_rmap(page);
+			VM_BUG_ON(page_mapcount(page) < 0);
+			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+			VM_BUG_ON(!PageHead(page));
+			spin_unlock(&tlb->mm->page_table_lock);
+			tlb_remove_page(tlb, page);
+			pte_free(tlb->mm, pgtable);
+			ret = 1;
+		}
+	} else
+		spin_unlock(&tlb->mm->page_table_lock);
+
+	return ret;
+}
+
+int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+		unsigned long addr, unsigned long end,
+		unsigned char *vec)
+{
+	int ret = 0;
+
+	spin_lock(&vma->vm_mm->page_table_lock);
+	if (likely(pmd_trans_huge(*pmd))) {
+		ret = !pmd_trans_splitting(*pmd);
+		spin_unlock(&vma->vm_mm->page_table_lock);
+		if (unlikely(!ret))
+			wait_split_huge_page(vma->anon_vma, pmd);
+		else {
+			/*
+			 * All logical pages in the range are present
+			 * if backed by a huge page.
+			 */
+			memset(vec, 1, (end - addr) >> PAGE_SHIFT);
+		}
+	} else
+		spin_unlock(&vma->vm_mm->page_table_lock);
+
+	return ret;
+}
+
+int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+		unsigned long addr, pgprot_t newprot)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	int ret = 0;
+
+	spin_lock(&mm->page_table_lock);
+	if (likely(pmd_trans_huge(*pmd))) {
+		if (unlikely(pmd_trans_splitting(*pmd))) {
+			spin_unlock(&mm->page_table_lock);
+			wait_split_huge_page(vma->anon_vma, pmd);
+		} else {
+			pmd_t entry;
+
+			entry = pmdp_get_and_clear(mm, addr, pmd);
+			entry = pmd_modify(entry, newprot);
+			set_pmd_at(mm, addr, pmd, entry);
+			spin_unlock(&vma->vm_mm->page_table_lock);
+			flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
+			ret = 1;
+		}
+	} else
+		spin_unlock(&vma->vm_mm->page_table_lock);
+
+	return ret;
+}
+
+pmd_t *page_check_address_pmd(struct page *page,
+			      struct mm_struct *mm,
+			      unsigned long address,
+			      enum page_check_address_pmd_flag flag)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd, *ret = NULL;
+
+	if (address & ~HPAGE_PMD_MASK)
+		goto out;
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		goto out;
+
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		goto out;
+
+	pmd = pmd_offset(pud, address);
+	if (pmd_none(*pmd))
+		goto out;
+	if (pmd_page(*pmd) != page)
+		goto out;
+	/*
+	 * split_vma() may create temporary aliased mappings. There is
+	 * no risk as long as all huge pmd are found and have their
+	 * splitting bit set before __split_huge_page_refcount
+	 * runs. Finding the same huge pmd more than once during the
+	 * same rmap walk is not a problem.
+	 */
+	if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
+	    pmd_trans_splitting(*pmd))
+		goto out;
+	if (pmd_trans_huge(*pmd)) {
+		VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
+			  !pmd_trans_splitting(*pmd));
+		ret = pmd;
+	}
+out:
+	return ret;
+}
+
+static int __split_huge_page_splitting(struct page *page,
+				       struct vm_area_struct *vma,
+				       unsigned long address)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	pmd_t *pmd;
+	int ret = 0;
+
+	spin_lock(&mm->page_table_lock);
+	pmd = page_check_address_pmd(page, mm, address,
+				     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
+	if (pmd) {
+		/*
+		 * We can't temporarily set the pmd to null in order
+		 * to split it, the pmd must remain marked huge at all
+		 * times or the VM won't take the pmd_trans_huge paths
+		 * and it won't wait on the anon_vma->root->lock to
+		 * serialize against split_huge_page*.
+		 */
+		pmdp_splitting_flush_notify(vma, address, pmd);
+		ret = 1;
+	}
+	spin_unlock(&mm->page_table_lock);
+
+	return ret;
+}
+
+static void __split_huge_page_refcount(struct page *page)
+{
+	int i;
+	unsigned long head_index = page->index;
+	struct zone *zone = page_zone(page);
+	int zonestat;
+
+	/* prevent PageLRU to go away from under us, and freeze lru stats */
+	spin_lock_irq(&zone->lru_lock);
+	compound_lock(page);
+
+	for (i = 1; i < HPAGE_PMD_NR; i++) {
+		struct page *page_tail = page + i;
+
+		/* tail_page->_count cannot change */
+		atomic_sub(atomic_read(&page_tail->_count), &page->_count);
+		BUG_ON(page_count(page) <= 0);
+		atomic_add(page_mapcount(page) + 1, &page_tail->_count);
+		BUG_ON(atomic_read(&page_tail->_count) <= 0);
+
+		/* after clearing PageTail the gup refcount can be released */
+		smp_mb();
+
+		/*
+		 * retain hwpoison flag of the poisoned tail page:
+		 *   fix for the unsuitable process killed on Guest Machine(KVM)
+		 *   by the memory-failure.
+		 */
+		page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
+		page_tail->flags |= (page->flags &
+				     ((1L << PG_referenced) |
+				      (1L << PG_swapbacked) |
+				      (1L << PG_mlocked) |
+				      (1L << PG_uptodate)));
+		page_tail->flags |= (1L << PG_dirty);
+
+		/*
+		 * 1) clear PageTail before overwriting first_page
+		 * 2) clear PageTail before clearing PageHead for VM_BUG_ON
+		 */
+		smp_wmb();
+
+		/*
+		 * __split_huge_page_splitting() already set the
+		 * splitting bit in all pmd that could map this
+		 * hugepage, that will ensure no CPU can alter the
+		 * mapcount on the head page. The mapcount is only
+		 * accounted in the head page and it has to be
+		 * transferred to all tail pages in the below code. So
+		 * for this code to be safe, the split the mapcount
+		 * can't change. But that doesn't mean userland can't
+		 * keep changing and reading the page contents while
+		 * we transfer the mapcount, so the pmd splitting
+		 * status is achieved setting a reserved bit in the
+		 * pmd, not by clearing the present bit.
+		*/
+		BUG_ON(page_mapcount(page_tail));
+		page_tail->_mapcount = page->_mapcount;
+
+		BUG_ON(page_tail->mapping);
+		page_tail->mapping = page->mapping;
+
+		page_tail->index = ++head_index;
+
+		BUG_ON(!PageAnon(page_tail));
+		BUG_ON(!PageUptodate(page_tail));
+		BUG_ON(!PageDirty(page_tail));
+		BUG_ON(!PageSwapBacked(page_tail));
+
+		mem_cgroup_split_huge_fixup(page, page_tail);
+
+		lru_add_page_tail(zone, page, page_tail);
+	}
+
+	__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+	__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
+
+	/*
+	 * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
+	 * so adjust those appropriately if this page is on the LRU.
+	 */
+	if (PageLRU(page)) {
+		zonestat = NR_LRU_BASE + page_lru(page);
+		__mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
+	}
+
+	ClearPageCompound(page);
+	compound_unlock(page);
+	spin_unlock_irq(&zone->lru_lock);
+
+	for (i = 1; i < HPAGE_PMD_NR; i++) {
+		struct page *page_tail = page + i;
+		BUG_ON(page_count(page_tail) <= 0);
+		/*
+		 * Tail pages may be freed if there wasn't any mapping
+		 * like if add_to_swap() is running on a lru page that
+		 * had its mapping zapped. And freeing these pages
+		 * requires taking the lru_lock so we do the put_page
+		 * of the tail pages after the split is complete.
+		 */
+		put_page(page_tail);
+	}
+
+	/*
+	 * Only the head page (now become a regular page) is required
+	 * to be pinned by the caller.
+	 */
+	BUG_ON(page_count(page) <= 0);
+}
+
+static int __split_huge_page_map(struct page *page,
+				 struct vm_area_struct *vma,
+				 unsigned long address)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	pmd_t *pmd, _pmd;
+	int ret = 0, i;
+	pgtable_t pgtable;
+	unsigned long haddr;
+
+	spin_lock(&mm->page_table_lock);
+	pmd = page_check_address_pmd(page, mm, address,
+				     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
+	if (pmd) {
+		pgtable = get_pmd_huge_pte(mm);
+		pmd_populate(mm, &_pmd, pgtable);
+
+		for (i = 0, haddr = address; i < HPAGE_PMD_NR;
+		     i++, haddr += PAGE_SIZE) {
+			pte_t *pte, entry;
+			BUG_ON(PageCompound(page+i));
+			entry = mk_pte(page + i, vma->vm_page_prot);
+			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+			if (!pmd_write(*pmd))
+				entry = pte_wrprotect(entry);
+			else
+				BUG_ON(page_mapcount(page) != 1);
+			if (!pmd_young(*pmd))
+				entry = pte_mkold(entry);
+			pte = pte_offset_map(&_pmd, haddr);
+			BUG_ON(!pte_none(*pte));
+			set_pte_at(mm, haddr, pte, entry);
+			pte_unmap(pte);
+		}
+
+		mm->nr_ptes++;
+		smp_wmb(); /* make pte visible before pmd */
+		/*
+		 * Up to this point the pmd is present and huge and
+		 * userland has the whole access to the hugepage
+		 * during the split (which happens in place). If we
+		 * overwrite the pmd with the not-huge version
+		 * pointing to the pte here (which of course we could
+		 * if all CPUs were bug free), userland could trigger
+		 * a small page size TLB miss on the small sized TLB
+		 * while the hugepage TLB entry is still established
+		 * in the huge TLB. Some CPU doesn't like that. See
+		 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
+		 * Erratum 383 on page 93. Intel should be safe but is
+		 * also warns that it's only safe if the permission
+		 * and cache attributes of the two entries loaded in
+		 * the two TLB is identical (which should be the case
+		 * here). But it is generally safer to never allow
+		 * small and huge TLB entries for the same virtual
+		 * address to be loaded simultaneously. So instead of
+		 * doing "pmd_populate(); flush_tlb_range();" we first
+		 * mark the current pmd notpresent (atomically because
+		 * here the pmd_trans_huge and pmd_trans_splitting
+		 * must remain set at all times on the pmd until the
+		 * split is complete for this pmd), then we flush the
+		 * SMP TLB and finally we write the non-huge version
+		 * of the pmd entry with pmd_populate.
+		 */
+		set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+		pmd_populate(mm, pmd, pgtable);
+		ret = 1;
+	}
+	spin_unlock(&mm->page_table_lock);
+
+	return ret;
+}
+
+/* must be called with anon_vma->root->lock hold */
+static void __split_huge_page(struct page *page,
+			      struct anon_vma *anon_vma)
+{
+	int mapcount, mapcount2;
+	struct anon_vma_chain *avc;
+
+	BUG_ON(!PageHead(page));
+	BUG_ON(PageTail(page));
+
+	mapcount = 0;
+	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+		struct vm_area_struct *vma = avc->vma;
+		unsigned long addr = vma_address(page, vma);
+		BUG_ON(is_vma_temporary_stack(vma));
+		if (addr == -EFAULT)
+			continue;
+		mapcount += __split_huge_page_splitting(page, vma, addr);
+	}
+	/*
+	 * It is critical that new vmas are added to the tail of the
+	 * anon_vma list. This guarantes that if copy_huge_pmd() runs
+	 * and establishes a child pmd before
+	 * __split_huge_page_splitting() freezes the parent pmd (so if
+	 * we fail to prevent copy_huge_pmd() from running until the
+	 * whole __split_huge_page() is complete), we will still see
+	 * the newly established pmd of the child later during the
+	 * walk, to be able to set it as pmd_trans_splitting too.
+	 */
+	if (mapcount != page_mapcount(page))
+		printk(KERN_ERR "mapcount %d page_mapcount %d\n",
+		       mapcount, page_mapcount(page));
+	BUG_ON(mapcount != page_mapcount(page));
+
+	__split_huge_page_refcount(page);
+
+	mapcount2 = 0;
+	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+		struct vm_area_struct *vma = avc->vma;
+		unsigned long addr = vma_address(page, vma);
+		BUG_ON(is_vma_temporary_stack(vma));
+		if (addr == -EFAULT)
+			continue;
+		mapcount2 += __split_huge_page_map(page, vma, addr);
+	}
+	if (mapcount != mapcount2)
+		printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
+		       mapcount, mapcount2, page_mapcount(page));
+	BUG_ON(mapcount != mapcount2);
+}
+
+int split_huge_page(struct page *page)
+{
+	struct anon_vma *anon_vma;
+	int ret = 1;
+
+	BUG_ON(!PageAnon(page));
+	anon_vma = page_lock_anon_vma(page);
+	if (!anon_vma)
+		goto out;
+	ret = 0;
+	if (!PageCompound(page))
+		goto out_unlock;
+
+	BUG_ON(!PageSwapBacked(page));
+	__split_huge_page(page, anon_vma);
+
+	BUG_ON(PageCompound(page));
+out_unlock:
+	page_unlock_anon_vma(anon_vma);
+out:
+	return ret;
+}
+
+int hugepage_madvise(struct vm_area_struct *vma,
+		     unsigned long *vm_flags, int advice)
+{
+	switch (advice) {
+	case MADV_HUGEPAGE:
+		/*
+		 * Be somewhat over-protective like KSM for now!
+		 */
+		if (*vm_flags & (VM_HUGEPAGE |
+				 VM_SHARED   | VM_MAYSHARE   |
+				 VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
+				 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
+				 VM_MIXEDMAP | VM_SAO))
+			return -EINVAL;
+		*vm_flags &= ~VM_NOHUGEPAGE;
+		*vm_flags |= VM_HUGEPAGE;
+		/*
+		 * If the vma become good for khugepaged to scan,
+		 * register it here without waiting a page fault that
+		 * may not happen any time soon.
+		 */
+		if (unlikely(khugepaged_enter_vma_merge(vma)))
+			return -ENOMEM;
+		break;
+	case MADV_NOHUGEPAGE:
+		/*
+		 * Be somewhat over-protective like KSM for now!
+		 */
+		if (*vm_flags & (VM_NOHUGEPAGE |
+				 VM_SHARED   | VM_MAYSHARE   |
+				 VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
+				 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
+				 VM_MIXEDMAP | VM_SAO))
+			return -EINVAL;
+		*vm_flags &= ~VM_HUGEPAGE;
+		*vm_flags |= VM_NOHUGEPAGE;
+		/*
+		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
+		 * this vma even if we leave the mm registered in khugepaged if
+		 * it got registered before VM_NOHUGEPAGE was set.
+		 */
+		break;
+	}
+
+	return 0;
+}
+
+static int __init khugepaged_slab_init(void)
+{
+	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
+					  sizeof(struct mm_slot),
+					  __alignof__(struct mm_slot), 0, NULL);
+	if (!mm_slot_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void __init khugepaged_slab_free(void)
+{
+	kmem_cache_destroy(mm_slot_cache);
+	mm_slot_cache = NULL;
+}
+
+static inline struct mm_slot *alloc_mm_slot(void)
+{
+	if (!mm_slot_cache)	/* initialization failed */
+		return NULL;
+	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
+}
+
+static inline void free_mm_slot(struct mm_slot *mm_slot)
+{
+	kmem_cache_free(mm_slot_cache, mm_slot);
+}
+
+static int __init mm_slots_hash_init(void)
+{
+	mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
+				GFP_KERNEL);
+	if (!mm_slots_hash)
+		return -ENOMEM;
+	return 0;
+}
+
+#if 0
+static void __init mm_slots_hash_free(void)
+{
+	kfree(mm_slots_hash);
+	mm_slots_hash = NULL;
+}
+#endif
+
+static struct mm_slot *get_mm_slot(struct mm_struct *mm)
+{
+	struct mm_slot *mm_slot;
+	struct hlist_head *bucket;
+	struct hlist_node *node;
+
+	bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
+				% MM_SLOTS_HASH_HEADS];
+	hlist_for_each_entry(mm_slot, node, bucket, hash) {
+		if (mm == mm_slot->mm)
+			return mm_slot;
+	}
+	return NULL;
+}
+
+static void insert_to_mm_slots_hash(struct mm_struct *mm,
+				    struct mm_slot *mm_slot)
+{
+	struct hlist_head *bucket;
+
+	bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
+				% MM_SLOTS_HASH_HEADS];
+	mm_slot->mm = mm;
+	hlist_add_head(&mm_slot->hash, bucket);
+}
+
+static inline int khugepaged_test_exit(struct mm_struct *mm)
+{
+	return atomic_read(&mm->mm_users) == 0;
+}
+
+int __khugepaged_enter(struct mm_struct *mm)
+{
+	struct mm_slot *mm_slot;
+	int wakeup;
+
+	mm_slot = alloc_mm_slot();
+	if (!mm_slot)
+		return -ENOMEM;
+
+	/* __khugepaged_exit() must not run from under us */
+	VM_BUG_ON(khugepaged_test_exit(mm));
+	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
+		free_mm_slot(mm_slot);
+		return 0;
+	}
+
+	spin_lock(&khugepaged_mm_lock);
+	insert_to_mm_slots_hash(mm, mm_slot);
+	/*
+	 * Insert just behind the scanning cursor, to let the area settle
+	 * down a little.
+	 */
+	wakeup = list_empty(&khugepaged_scan.mm_head);
+	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
+	spin_unlock(&khugepaged_mm_lock);
+
+	atomic_inc(&mm->mm_count);
+	if (wakeup)
+		wake_up_interruptible(&khugepaged_wait);
+
+	return 0;
+}
+
+int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+{
+	unsigned long hstart, hend;
+	if (!vma->anon_vma)
+		/*
+		 * Not yet faulted in so we will register later in the
+		 * page fault if needed.
+		 */
+		return 0;
+	if (vma->vm_file || vma->vm_ops)
+		/* khugepaged not yet working on file or special mappings */
+		return 0;
+	VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
+	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+	hend = vma->vm_end & HPAGE_PMD_MASK;
+	if (hstart < hend)
+		return khugepaged_enter(vma);
+	return 0;
+}
+
+void __khugepaged_exit(struct mm_struct *mm)
+{
+	struct mm_slot *mm_slot;
+	int free = 0;
+
+	spin_lock(&khugepaged_mm_lock);
+	mm_slot = get_mm_slot(mm);
+	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
+		hlist_del(&mm_slot->hash);
+		list_del(&mm_slot->mm_node);
+		free = 1;
+	}
+
+	if (free) {
+		spin_unlock(&khugepaged_mm_lock);
+		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
+		free_mm_slot(mm_slot);
+		mmdrop(mm);
+	} else if (mm_slot) {
+		spin_unlock(&khugepaged_mm_lock);
+		/*
+		 * This is required to serialize against
+		 * khugepaged_test_exit() (which is guaranteed to run
+		 * under mmap sem read mode). Stop here (after we
+		 * return all pagetables will be destroyed) until
+		 * khugepaged has finished working on the pagetables
+		 * under the mmap_sem.
+		 */
+		down_write(&mm->mmap_sem);
+		up_write(&mm->mmap_sem);
+	} else
+		spin_unlock(&khugepaged_mm_lock);
+}
+
+static void release_pte_page(struct page *page)
+{
+	/* 0 stands for page_is_file_cache(page) == false */
+	dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
+	unlock_page(page);
+	putback_lru_page(page);
+}
+
+static void release_pte_pages(pte_t *pte, pte_t *_pte)
+{
+	while (--_pte >= pte) {
+		pte_t pteval = *_pte;
+		if (!pte_none(pteval))
+			release_pte_page(pte_page(pteval));
+	}
+}
+
+static void release_all_pte_pages(pte_t *pte)
+{
+	release_pte_pages(pte, pte + HPAGE_PMD_NR);
+}
+
+static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
+					unsigned long address,
+					pte_t *pte)
+{
+	struct page *page;
+	pte_t *_pte;
+	int referenced = 0, isolated = 0, none = 0;
+	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
+	     _pte++, address += PAGE_SIZE) {
+		pte_t pteval = *_pte;
+		if (pte_none(pteval)) {
+			if (++none <= khugepaged_max_ptes_none)
+				continue;
+			else {
+				release_pte_pages(pte, _pte);
+				goto out;
+			}
+		}
+		if (!pte_present(pteval) || !pte_write(pteval)) {
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		page = vm_normal_page(vma, address, pteval);
+		if (unlikely(!page)) {
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		VM_BUG_ON(PageCompound(page));
+		BUG_ON(!PageAnon(page));
+		VM_BUG_ON(!PageSwapBacked(page));
+
+		/* cannot use mapcount: can't collapse if there's a gup pin */
+		if (page_count(page) != 1) {
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		/*
+		 * We can do it before isolate_lru_page because the
+		 * page can't be freed from under us. NOTE: PG_lock
+		 * is needed to serialize against split_huge_page
+		 * when invoked from the VM.
+		 */
+		if (!trylock_page(page)) {
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		/*
+		 * Isolate the page to avoid collapsing an hugepage
+		 * currently in use by the VM.
+		 */
+		if (isolate_lru_page(page)) {
+			unlock_page(page);
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		/* 0 stands for page_is_file_cache(page) == false */
+		inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
+		VM_BUG_ON(!PageLocked(page));
+		VM_BUG_ON(PageLRU(page));
+
+		/* If there is no mapped pte young don't collapse the page */
+		if (pte_young(pteval) || PageReferenced(page) ||
+		    mmu_notifier_test_young(vma->vm_mm, address))
+			referenced = 1;
+	}
+	if (unlikely(!referenced))
+		release_all_pte_pages(pte);
+	else
+		isolated = 1;
+out:
+	return isolated;
+}
+
+static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
+				      struct vm_area_struct *vma,
+				      unsigned long address,
+				      spinlock_t *ptl)
+{
+	pte_t *_pte;
+	for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
+		pte_t pteval = *_pte;
+		struct page *src_page;
+
+		if (pte_none(pteval)) {
+			clear_user_highpage(page, address);
+			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
+		} else {
+			src_page = pte_page(pteval);
+			copy_user_highpage(page, src_page, address, vma);
+			VM_BUG_ON(page_mapcount(src_page) != 1);
+			VM_BUG_ON(page_count(src_page) != 2);
+			release_pte_page(src_page);
+			/*
+			 * ptl mostly unnecessary, but preempt has to
+			 * be disabled to update the per-cpu stats
+			 * inside page_remove_rmap().
+			 */
+			spin_lock(ptl);
+			/*
+			 * paravirt calls inside pte_clear here are
+			 * superfluous.
+			 */
+			pte_clear(vma->vm_mm, address, _pte);
+			page_remove_rmap(src_page);
+			spin_unlock(ptl);
+			free_page_and_swap_cache(src_page);
+		}
+
+		address += PAGE_SIZE;
+		page++;
+	}
+}
+
+static void collapse_huge_page(struct mm_struct *mm,
+			       unsigned long address,
+			       struct page **hpage,
+			       struct vm_area_struct *vma,
+			       int node)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd, _pmd;
+	pte_t *pte;
+	pgtable_t pgtable;
+	struct page *new_page;
+	spinlock_t *ptl;
+	int isolated;
+	unsigned long hstart, hend;
+
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+#ifndef CONFIG_NUMA
+	VM_BUG_ON(!*hpage);
+	new_page = *hpage;
+	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+		up_read(&mm->mmap_sem);
+		return;
+	}
+#else
+	VM_BUG_ON(*hpage);
+	/*
+	 * Allocate the page while the vma is still valid and under
+	 * the mmap_sem read mode so there is no memory allocation
+	 * later when we take the mmap_sem in write mode. This is more
+	 * friendly behavior (OTOH it may actually hide bugs) to
+	 * filesystems in userland with daemons allocating memory in
+	 * the userland I/O paths.  Allocating memory with the
+	 * mmap_sem in read mode is good idea also to allow greater
+	 * scalability.
+	 */
+	new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
+				      node);
+	if (unlikely(!new_page)) {
+		up_read(&mm->mmap_sem);
+		*hpage = ERR_PTR(-ENOMEM);
+		return;
+	}
+	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+		up_read(&mm->mmap_sem);
+		put_page(new_page);
+		return;
+	}
+#endif
+
+	/* after allocating the hugepage upgrade to mmap_sem write mode */
+	up_read(&mm->mmap_sem);
+
+	/*
+	 * Prevent all access to pagetables with the exception of
+	 * gup_fast later hanlded by the ptep_clear_flush and the VM
+	 * handled by the anon_vma lock + PG_lock.
+	 */
+	down_write(&mm->mmap_sem);
+	if (unlikely(khugepaged_test_exit(mm)))
+		goto out;
+
+	vma = find_vma(mm, address);
+	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+	hend = vma->vm_end & HPAGE_PMD_MASK;
+	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
+		goto out;
+
+	if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
+	    (vma->vm_flags & VM_NOHUGEPAGE))
+		goto out;
+
+	/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
+	if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+		goto out;
+	if (is_vma_temporary_stack(vma))
+		goto out;
+	VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		goto out;
+
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		goto out;
+
+	pmd = pmd_offset(pud, address);
+	/* pmd can't go away or become huge under us */
+	if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+		goto out;
+
+	anon_vma_lock(vma->anon_vma);
+
+	pte = pte_offset_map(pmd, address);
+	ptl = pte_lockptr(mm, pmd);
+
+	spin_lock(&mm->page_table_lock); /* probably unnecessary */
+	/*
+	 * After this gup_fast can't run anymore. This also removes
+	 * any huge TLB entry from the CPU so we won't allow
+	 * huge and small TLB entries for the same virtual address
+	 * to avoid the risk of CPU bugs in that area.
+	 */
+	_pmd = pmdp_clear_flush_notify(vma, address, pmd);
+	spin_unlock(&mm->page_table_lock);
+
+	spin_lock(ptl);
+	isolated = __collapse_huge_page_isolate(vma, address, pte);
+	spin_unlock(ptl);
+
+	if (unlikely(!isolated)) {
+		pte_unmap(pte);
+		spin_lock(&mm->page_table_lock);
+		BUG_ON(!pmd_none(*pmd));
+		set_pmd_at(mm, address, pmd, _pmd);
+		spin_unlock(&mm->page_table_lock);
+		anon_vma_unlock(vma->anon_vma);
+		goto out;
+	}
+
+	/*
+	 * All pages are isolated and locked so anon_vma rmap
+	 * can't run anymore.
+	 */
+	anon_vma_unlock(vma->anon_vma);
+
+	__collapse_huge_page_copy(pte, new_page, vma, address, ptl);
+	pte_unmap(pte);
+	__SetPageUptodate(new_page);
+	pgtable = pmd_pgtable(_pmd);
+	VM_BUG_ON(page_count(pgtable) != 1);
+	VM_BUG_ON(page_mapcount(pgtable) != 0);
+
+	_pmd = mk_pmd(new_page, vma->vm_page_prot);
+	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
+	_pmd = pmd_mkhuge(_pmd);
+
+	/*
+	 * spin_lock() below is not the equivalent of smp_wmb(), so
+	 * this is needed to avoid the copy_huge_page writes to become
+	 * visible after the set_pmd_at() write.
+	 */
+	smp_wmb();
+
+	spin_lock(&mm->page_table_lock);
+	BUG_ON(!pmd_none(*pmd));
+	page_add_new_anon_rmap(new_page, vma, address);
+	set_pmd_at(mm, address, pmd, _pmd);
+	update_mmu_cache(vma, address, entry);
+	prepare_pmd_huge_pte(pgtable, mm);
+	mm->nr_ptes--;
+	spin_unlock(&mm->page_table_lock);
+
+#ifndef CONFIG_NUMA
+	*hpage = NULL;
+#endif
+	khugepaged_pages_collapsed++;
+out_up_write:
+	up_write(&mm->mmap_sem);
+	return;
+
+out:
+	mem_cgroup_uncharge_page(new_page);
+#ifdef CONFIG_NUMA
+	put_page(new_page);
+#endif
+	goto out_up_write;
+}
+
+static int khugepaged_scan_pmd(struct mm_struct *mm,
+			       struct vm_area_struct *vma,
+			       unsigned long address,
+			       struct page **hpage)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte, *_pte;
+	int ret = 0, referenced = 0, none = 0;
+	struct page *page;
+	unsigned long _address;
+	spinlock_t *ptl;
+	int node = -1;
+
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		goto out;
+
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		goto out;
+
+	pmd = pmd_offset(pud, address);
+	if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+		goto out;
+
+	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
+	     _pte++, _address += PAGE_SIZE) {
+		pte_t pteval = *_pte;
+		if (pte_none(pteval)) {
+			if (++none <= khugepaged_max_ptes_none)
+				continue;
+			else
+				goto out_unmap;
+		}
+		if (!pte_present(pteval) || !pte_write(pteval))
+			goto out_unmap;
+		page = vm_normal_page(vma, _address, pteval);
+		if (unlikely(!page))
+			goto out_unmap;
+		/*
+		 * Chose the node of the first page. This could
+		 * be more sophisticated and look at more pages,
+		 * but isn't for now.
+		 */
+		if (node == -1)
+			node = page_to_nid(page);
+		VM_BUG_ON(PageCompound(page));
+		if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
+			goto out_unmap;
+		/* cannot use mapcount: can't collapse if there's a gup pin */
+		if (page_count(page) != 1)
+			goto out_unmap;
+		if (pte_young(pteval) || PageReferenced(page) ||
+		    mmu_notifier_test_young(vma->vm_mm, address))
+			referenced = 1;
+	}
+	if (referenced)
+		ret = 1;
+out_unmap:
+	pte_unmap_unlock(pte, ptl);
+	if (ret)
+		/* collapse_huge_page will return with the mmap_sem released */
+		collapse_huge_page(mm, address, hpage, vma, node);
+out:
+	return ret;
+}
+
+static void collect_mm_slot(struct mm_slot *mm_slot)
+{
+	struct mm_struct *mm = mm_slot->mm;
+
+	VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+
+	if (khugepaged_test_exit(mm)) {
+		/* free mm_slot */
+		hlist_del(&mm_slot->hash);
+		list_del(&mm_slot->mm_node);
+
+		/*
+		 * Not strictly needed because the mm exited already.
+		 *
+		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
+		 */
+
+		/* khugepaged_mm_lock actually not necessary for the below */
+		free_mm_slot(mm_slot);
+		mmdrop(mm);
+	}
+}
+
+static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
+					    struct page **hpage)
+{
+	struct mm_slot *mm_slot;
+	struct mm_struct *mm;
+	struct vm_area_struct *vma;
+	int progress = 0;
+
+	VM_BUG_ON(!pages);
+	VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+
+	if (khugepaged_scan.mm_slot)
+		mm_slot = khugepaged_scan.mm_slot;
+	else {
+		mm_slot = list_entry(khugepaged_scan.mm_head.next,
+				     struct mm_slot, mm_node);
+		khugepaged_scan.address = 0;
+		khugepaged_scan.mm_slot = mm_slot;
+	}
+	spin_unlock(&khugepaged_mm_lock);
+
+	mm = mm_slot->mm;
+	down_read(&mm->mmap_sem);
+	if (unlikely(khugepaged_test_exit(mm)))
+		vma = NULL;
+	else
+		vma = find_vma(mm, khugepaged_scan.address);
+
+	progress++;
+	for (; vma; vma = vma->vm_next) {
+		unsigned long hstart, hend;
+
+		cond_resched();
+		if (unlikely(khugepaged_test_exit(mm))) {
+			progress++;
+			break;
+		}
+
+		if ((!(vma->vm_flags & VM_HUGEPAGE) &&
+		     !khugepaged_always()) ||
+		    (vma->vm_flags & VM_NOHUGEPAGE)) {
+		skip:
+			progress++;
+			continue;
+		}
+		/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
+		if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+			goto skip;
+		if (is_vma_temporary_stack(vma))
+			goto skip;
+
+		VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
+
+		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+		hend = vma->vm_end & HPAGE_PMD_MASK;
+		if (hstart >= hend)
+			goto skip;
+		if (khugepaged_scan.address > hend)
+			goto skip;
+		if (khugepaged_scan.address < hstart)
+			khugepaged_scan.address = hstart;
+		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
+
+		while (khugepaged_scan.address < hend) {
+			int ret;
+			cond_resched();
+			if (unlikely(khugepaged_test_exit(mm)))
+				goto breakouterloop;
+
+			VM_BUG_ON(khugepaged_scan.address < hstart ||
+				  khugepaged_scan.address + HPAGE_PMD_SIZE >
+				  hend);
+			ret = khugepaged_scan_pmd(mm, vma,
+						  khugepaged_scan.address,
+						  hpage);
+			/* move to next address */
+			khugepaged_scan.address += HPAGE_PMD_SIZE;
+			progress += HPAGE_PMD_NR;
+			if (ret)
+				/* we released mmap_sem so break loop */
+				goto breakouterloop_mmap_sem;
+			if (progress >= pages)
+				goto breakouterloop;
+		}
+	}
+breakouterloop:
+	up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
+breakouterloop_mmap_sem:
+
+	spin_lock(&khugepaged_mm_lock);
+	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
+	/*
+	 * Release the current mm_slot if this mm is about to die, or
+	 * if we scanned all vmas of this mm.
+	 */
+	if (khugepaged_test_exit(mm) || !vma) {
+		/*
+		 * Make sure that if mm_users is reaching zero while
+		 * khugepaged runs here, khugepaged_exit will find
+		 * mm_slot not pointing to the exiting mm.
+		 */
+		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
+			khugepaged_scan.mm_slot = list_entry(
+				mm_slot->mm_node.next,
+				struct mm_slot, mm_node);
+			khugepaged_scan.address = 0;
+		} else {
+			khugepaged_scan.mm_slot = NULL;
+			khugepaged_full_scans++;
+		}
+
+		collect_mm_slot(mm_slot);
+	}
+
+	return progress;
+}
+
+static int khugepaged_has_work(void)
+{
+	return !list_empty(&khugepaged_scan.mm_head) &&
+		khugepaged_enabled();
+}
+
+static int khugepaged_wait_event(void)
+{
+	return !list_empty(&khugepaged_scan.mm_head) ||
+		!khugepaged_enabled();
+}
+
+static void khugepaged_do_scan(struct page **hpage)
+{
+	unsigned int progress = 0, pass_through_head = 0;
+	unsigned int pages = khugepaged_pages_to_scan;
+
+	barrier(); /* write khugepaged_pages_to_scan to local stack */
+
+	while (progress < pages) {
+		cond_resched();
+
+#ifndef CONFIG_NUMA
+		if (!*hpage) {
+			*hpage = alloc_hugepage(khugepaged_defrag());
+			if (unlikely(!*hpage))
+				break;
+		}
+#else
+		if (IS_ERR(*hpage))
+			break;
+#endif
+
+		if (unlikely(kthread_should_stop() || freezing(current)))
+			break;
+
+		spin_lock(&khugepaged_mm_lock);
+		if (!khugepaged_scan.mm_slot)
+			pass_through_head++;
+		if (khugepaged_has_work() &&
+		    pass_through_head < 2)
+			progress += khugepaged_scan_mm_slot(pages - progress,
+							    hpage);
+		else
+			progress = pages;
+		spin_unlock(&khugepaged_mm_lock);
+	}
+}
+
+static void khugepaged_alloc_sleep(void)
+{
+	DEFINE_WAIT(wait);
+	add_wait_queue(&khugepaged_wait, &wait);
+	schedule_timeout_interruptible(
+		msecs_to_jiffies(
+			khugepaged_alloc_sleep_millisecs));
+	remove_wait_queue(&khugepaged_wait, &wait);
+}
+
+#ifndef CONFIG_NUMA
+static struct page *khugepaged_alloc_hugepage(void)
+{
+	struct page *hpage;
+
+	do {
+		hpage = alloc_hugepage(khugepaged_defrag());
+		if (!hpage)
+			khugepaged_alloc_sleep();
+	} while (unlikely(!hpage) &&
+		 likely(khugepaged_enabled()));
+	return hpage;
+}
+#endif
+
+static void khugepaged_loop(void)
+{
+	struct page *hpage;
+
+#ifdef CONFIG_NUMA
+	hpage = NULL;
+#endif
+	while (likely(khugepaged_enabled())) {
+#ifndef CONFIG_NUMA
+		hpage = khugepaged_alloc_hugepage();
+		if (unlikely(!hpage))
+			break;
+#else
+		if (IS_ERR(hpage)) {
+			khugepaged_alloc_sleep();
+			hpage = NULL;
+		}
+#endif
+
+		khugepaged_do_scan(&hpage);
+#ifndef CONFIG_NUMA
+		if (hpage)
+			put_page(hpage);
+#endif
+		try_to_freeze();
+		if (unlikely(kthread_should_stop()))
+			break;
+		if (khugepaged_has_work()) {
+			DEFINE_WAIT(wait);
+			if (!khugepaged_scan_sleep_millisecs)
+				continue;
+			add_wait_queue(&khugepaged_wait, &wait);
+			schedule_timeout_interruptible(
+				msecs_to_jiffies(
+					khugepaged_scan_sleep_millisecs));
+			remove_wait_queue(&khugepaged_wait, &wait);
+		} else if (khugepaged_enabled())
+			wait_event_freezable(khugepaged_wait,
+					     khugepaged_wait_event());
+	}
+}
+
+static int khugepaged(void *none)
+{
+	struct mm_slot *mm_slot;
+
+	set_freezable();
+	set_user_nice(current, 19);
+
+	/* serialize with start_khugepaged() */
+	mutex_lock(&khugepaged_mutex);
+
+	for (;;) {
+		mutex_unlock(&khugepaged_mutex);
+		VM_BUG_ON(khugepaged_thread != current);
+		khugepaged_loop();
+		VM_BUG_ON(khugepaged_thread != current);
+
+		mutex_lock(&khugepaged_mutex);
+		if (!khugepaged_enabled())
+			break;
+		if (unlikely(kthread_should_stop()))
+			break;
+	}
+
+	spin_lock(&khugepaged_mm_lock);
+	mm_slot = khugepaged_scan.mm_slot;
+	khugepaged_scan.mm_slot = NULL;
+	if (mm_slot)
+		collect_mm_slot(mm_slot);
+	spin_unlock(&khugepaged_mm_lock);
+
+	khugepaged_thread = NULL;
+	mutex_unlock(&khugepaged_mutex);
+
+	return 0;
+}
+
+void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
+{
+	struct page *page;
+
+	spin_lock(&mm->page_table_lock);
+	if (unlikely(!pmd_trans_huge(*pmd))) {
+		spin_unlock(&mm->page_table_lock);
+		return;
+	}
+	page = pmd_page(*pmd);
+	VM_BUG_ON(!page_count(page));
+	get_page(page);
+	spin_unlock(&mm->page_table_lock);
+
+	split_huge_page(page);
+
+	put_page(page);
+	BUG_ON(pmd_trans_huge(*pmd));
+}
+
+static void split_huge_page_address(struct mm_struct *mm,
+				    unsigned long address)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		return;
+
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		return;
+
+	pmd = pmd_offset(pud, address);
+	if (!pmd_present(*pmd))
+		return;
+	/*
+	 * Caller holds the mmap_sem write mode, so a huge pmd cannot
+	 * materialize from under us.
+	 */
+	split_huge_page_pmd(mm, pmd);
+}
+
+void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+			     unsigned long start,
+			     unsigned long end,
+			     long adjust_next)
+{
+	/*
+	 * If the new start address isn't hpage aligned and it could
+	 * previously contain an hugepage: check if we need to split
+	 * an huge pmd.
+	 */
+	if (start & ~HPAGE_PMD_MASK &&
+	    (start & HPAGE_PMD_MASK) >= vma->vm_start &&
+	    (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
+		split_huge_page_address(vma->vm_mm, start);
+
+	/*
+	 * If the new end address isn't hpage aligned and it could
+	 * previously contain an hugepage: check if we need to split
+	 * an huge pmd.
+	 */
+	if (end & ~HPAGE_PMD_MASK &&
+	    (end & HPAGE_PMD_MASK) >= vma->vm_start &&
+	    (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
+		split_huge_page_address(vma->vm_mm, end);
+
+	/*
+	 * If we're also updating the vma->vm_next->vm_start, if the new
+	 * vm_next->vm_start isn't page aligned and it could previously
+	 * contain an hugepage: check if we need to split an huge pmd.
+	 */
+	if (adjust_next > 0) {
+		struct vm_area_struct *next = vma->vm_next;
+		unsigned long nstart = next->vm_start;
+		nstart += adjust_next << PAGE_SHIFT;
+		if (nstart & ~HPAGE_PMD_MASK &&
+		    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
+		    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
+			split_huge_page_address(next->vm_mm, nstart);
+	}
+}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8585524..bb0b7c1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -394,71 +394,6 @@
 	return 0;
 }
 
-static void clear_gigantic_page(struct page *page,
-			unsigned long addr, unsigned long sz)
-{
-	int i;
-	struct page *p = page;
-
-	might_sleep();
-	for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
-		cond_resched();
-		clear_user_highpage(p, addr + i * PAGE_SIZE);
-	}
-}
-static void clear_huge_page(struct page *page,
-			unsigned long addr, unsigned long sz)
-{
-	int i;
-
-	if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
-		clear_gigantic_page(page, addr, sz);
-		return;
-	}
-
-	might_sleep();
-	for (i = 0; i < sz/PAGE_SIZE; i++) {
-		cond_resched();
-		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
-	}
-}
-
-static void copy_user_gigantic_page(struct page *dst, struct page *src,
-			   unsigned long addr, struct vm_area_struct *vma)
-{
-	int i;
-	struct hstate *h = hstate_vma(vma);
-	struct page *dst_base = dst;
-	struct page *src_base = src;
-
-	for (i = 0; i < pages_per_huge_page(h); ) {
-		cond_resched();
-		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
-
-		i++;
-		dst = mem_map_next(dst, dst_base, i);
-		src = mem_map_next(src, src_base, i);
-	}
-}
-
-static void copy_user_huge_page(struct page *dst, struct page *src,
-			   unsigned long addr, struct vm_area_struct *vma)
-{
-	int i;
-	struct hstate *h = hstate_vma(vma);
-
-	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
-		copy_user_gigantic_page(dst, src, addr, vma);
-		return;
-	}
-
-	might_sleep();
-	for (i = 0; i < pages_per_huge_page(h); i++) {
-		cond_resched();
-		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
-	}
-}
-
 static void copy_gigantic_page(struct page *dst, struct page *src)
 {
 	int i;
@@ -1428,6 +1363,7 @@
 
 	return sprintf(buf, "%lu\n", nr_huge_pages);
 }
+
 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
 			struct kobject *kobj, struct kobj_attribute *attr,
 			const char *buf, size_t len)
@@ -1440,9 +1376,14 @@
 
 	err = strict_strtoul(buf, 10, &count);
 	if (err)
-		return 0;
+		goto out;
 
 	h = kobj_to_hstate(kobj, &nid);
+	if (h->order >= MAX_ORDER) {
+		err = -EINVAL;
+		goto out;
+	}
+
 	if (nid == NUMA_NO_NODE) {
 		/*
 		 * global hstate attribute
@@ -1468,6 +1409,9 @@
 		NODEMASK_FREE(nodes_allowed);
 
 	return len;
+out:
+	NODEMASK_FREE(nodes_allowed);
+	return err;
 }
 
 static ssize_t nr_hugepages_show(struct kobject *kobj,
@@ -1510,6 +1454,7 @@
 	struct hstate *h = kobj_to_hstate(kobj, NULL);
 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
 }
+
 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
 		struct kobj_attribute *attr, const char *buf, size_t count)
 {
@@ -1517,9 +1462,12 @@
 	unsigned long input;
 	struct hstate *h = kobj_to_hstate(kobj, NULL);
 
+	if (h->order >= MAX_ORDER)
+		return -EINVAL;
+
 	err = strict_strtoul(buf, 10, &input);
 	if (err)
-		return 0;
+		return err;
 
 	spin_lock(&hugetlb_lock);
 	h->nr_overcommit_huge_pages = input;
@@ -1922,13 +1870,19 @@
 {
 	struct hstate *h = &default_hstate;
 	unsigned long tmp;
+	int ret;
 
 	if (!write)
 		tmp = h->max_huge_pages;
 
+	if (write && h->order >= MAX_ORDER)
+		return -EINVAL;
+
 	table->data = &tmp;
 	table->maxlen = sizeof(unsigned long);
-	proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	if (ret)
+		goto out;
 
 	if (write) {
 		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
@@ -1943,8 +1897,8 @@
 		if (nodes_allowed != &node_states[N_HIGH_MEMORY])
 			NODEMASK_FREE(nodes_allowed);
 	}
-
-	return 0;
+out:
+	return ret;
 }
 
 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
@@ -1982,21 +1936,27 @@
 {
 	struct hstate *h = &default_hstate;
 	unsigned long tmp;
+	int ret;
 
 	if (!write)
 		tmp = h->nr_overcommit_huge_pages;
 
+	if (write && h->order >= MAX_ORDER)
+		return -EINVAL;
+
 	table->data = &tmp;
 	table->maxlen = sizeof(unsigned long);
-	proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	if (ret)
+		goto out;
 
 	if (write) {
 		spin_lock(&hugetlb_lock);
 		h->nr_overcommit_huge_pages = tmp;
 		spin_unlock(&hugetlb_lock);
 	}
-
-	return 0;
+out:
+	return ret;
 }
 
 #endif /* CONFIG_SYSCTL */
@@ -2454,7 +2414,8 @@
 		return VM_FAULT_OOM;
 	}
 
-	copy_user_huge_page(new_page, old_page, address, vma);
+	copy_user_huge_page(new_page, old_page, address, vma,
+			    pages_per_huge_page(h));
 	__SetPageUptodate(new_page);
 
 	/*
@@ -2558,7 +2519,7 @@
 			ret = -PTR_ERR(page);
 			goto out;
 		}
-		clear_huge_page(page, address, huge_page_size(h));
+		clear_huge_page(page, address, pages_per_huge_page(h));
 		__SetPageUptodate(page);
 
 		if (vma->vm_flags & VM_MAYSHARE) {
diff --git a/mm/internal.h b/mm/internal.h
index dedb0af..6948820 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -134,6 +134,10 @@
 	}
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern unsigned long vma_address(struct page *page,
+				 struct vm_area_struct *vma);
+#endif
 #else /* !CONFIG_MMU */
 static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
 {
@@ -243,7 +247,8 @@
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 		     unsigned long start, int len, unsigned int foll_flags,
-		     struct page **pages, struct vm_area_struct **vmas);
+		     struct page **pages, struct vm_area_struct **vmas,
+		     int *nonblocking);
 
 #define ZONE_RECLAIM_NOSCAN	-2
 #define ZONE_RECLAIM_FULL	-1
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c
index 177a516..ff0d977 100644
--- a/mm/kmemleak-test.c
+++ b/mm/kmemleak-test.c
@@ -75,13 +75,11 @@
 	 * after the module is removed.
 	 */
 	for (i = 0; i < 10; i++) {
-		elem = kmalloc(sizeof(*elem), GFP_KERNEL);
-		pr_info("kmemleak: kmalloc(sizeof(*elem)) = %p\n", elem);
+		elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+		pr_info("kmemleak: kzalloc(sizeof(*elem)) = %p\n", elem);
 		if (!elem)
 			return -ENOMEM;
-		memset(elem, 0, sizeof(*elem));
 		INIT_LIST_HEAD(&elem->list);
-
 		list_add_tail(&elem->list, &test_list);
 	}
 
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index bd9bc21..84225f3 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -113,7 +113,9 @@
 #define BYTES_PER_POINTER	sizeof(void *)
 
 /* GFP bitmask for kmemleak internal allocations */
-#define GFP_KMEMLEAK_MASK	(GFP_KERNEL | GFP_ATOMIC)
+#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
+				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
+				 __GFP_NOWARN)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
@@ -511,9 +513,10 @@
 	struct kmemleak_object *object;
 	struct prio_tree_node *node;
 
-	object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
+	object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 	if (!object) {
-		kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
+		pr_warning("Cannot allocate a kmemleak_object structure\n");
+		kmemleak_disable();
 		return NULL;
 	}
 
@@ -734,9 +737,9 @@
 		return;
 	}
 
-	area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
+	area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 	if (!area) {
-		kmemleak_warn("Cannot allocate a scan area\n");
+		pr_warning("Cannot allocate a scan area\n");
 		goto out;
 	}
 
diff --git a/mm/ksm.c b/mm/ksm.c
index 43bc893..c2b2a94 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -34,6 +34,7 @@
 #include <linux/swap.h>
 #include <linux/ksm.h>
 #include <linux/hash.h>
+#include <linux/freezer.h>
 
 #include <asm/tlbflush.h>
 #include "internal.h"
@@ -411,6 +412,20 @@
 	up_read(&mm->mmap_sem);
 }
 
+static struct page *page_trans_compound_anon(struct page *page)
+{
+	if (PageTransCompound(page)) {
+		struct page *head = compound_trans_head(page);
+		/*
+		 * head may actually be splitted and freed from under
+		 * us but it's ok here.
+		 */
+		if (PageAnon(head))
+			return head;
+	}
+	return NULL;
+}
+
 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
 {
 	struct mm_struct *mm = rmap_item->mm;
@@ -430,7 +445,7 @@
 	page = follow_page(vma, addr, FOLL_GET);
 	if (IS_ERR_OR_NULL(page))
 		goto out;
-	if (PageAnon(page)) {
+	if (PageAnon(page) || page_trans_compound_anon(page)) {
 		flush_anon_page(vma, page, addr);
 		flush_dcache_page(page);
 	} else {
@@ -708,6 +723,7 @@
 	if (addr == -EFAULT)
 		goto out;
 
+	BUG_ON(PageTransCompound(page));
 	ptep = page_check_address(page, mm, addr, &ptl, 0);
 	if (!ptep)
 		goto out;
@@ -783,6 +799,7 @@
 		goto out;
 
 	pmd = pmd_offset(pud, addr);
+	BUG_ON(pmd_trans_huge(*pmd));
 	if (!pmd_present(*pmd))
 		goto out;
 
@@ -800,6 +817,8 @@
 	set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
 
 	page_remove_rmap(page);
+	if (!page_mapped(page))
+		try_to_free_swap(page);
 	put_page(page);
 
 	pte_unmap_unlock(ptep, ptl);
@@ -808,6 +827,33 @@
 	return err;
 }
 
+static int page_trans_compound_anon_split(struct page *page)
+{
+	int ret = 0;
+	struct page *transhuge_head = page_trans_compound_anon(page);
+	if (transhuge_head) {
+		/* Get the reference on the head to split it. */
+		if (get_page_unless_zero(transhuge_head)) {
+			/*
+			 * Recheck we got the reference while the head
+			 * was still anonymous.
+			 */
+			if (PageAnon(transhuge_head))
+				ret = split_huge_page(transhuge_head);
+			else
+				/*
+				 * Retry later if split_huge_page run
+				 * from under us.
+				 */
+				ret = 1;
+			put_page(transhuge_head);
+		} else
+			/* Retry later if split_huge_page run from under us. */
+			ret = 1;
+	}
+	return ret;
+}
+
 /*
  * try_to_merge_one_page - take two pages and merge them into one
  * @vma: the vma that holds the pte pointing to page
@@ -828,6 +874,9 @@
 
 	if (!(vma->vm_flags & VM_MERGEABLE))
 		goto out;
+	if (PageTransCompound(page) && page_trans_compound_anon_split(page))
+		goto out;
+	BUG_ON(PageTransCompound(page));
 	if (!PageAnon(page))
 		goto out;
 
@@ -1247,6 +1296,18 @@
 
 	slot = ksm_scan.mm_slot;
 	if (slot == &ksm_mm_head) {
+		/*
+		 * A number of pages can hang around indefinitely on per-cpu
+		 * pagevecs, raised page count preventing write_protect_page
+		 * from merging them.  Though it doesn't really matter much,
+		 * it is puzzling to see some stuck in pages_volatile until
+		 * other activity jostles them out, and they also prevented
+		 * LTP's KSM test from succeeding deterministically; so drain
+		 * them here (here rather than on entry to ksm_do_scan(),
+		 * so we don't IPI too often when pages_to_scan is set low).
+		 */
+		lru_add_drain_all();
+
 		root_unstable_tree = RB_ROOT;
 
 		spin_lock(&ksm_mmlist_lock);
@@ -1277,7 +1338,13 @@
 			if (ksm_test_exit(mm))
 				break;
 			*page = follow_page(vma, ksm_scan.address, FOLL_GET);
-			if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) {
+			if (IS_ERR_OR_NULL(*page)) {
+				ksm_scan.address += PAGE_SIZE;
+				cond_resched();
+				continue;
+			}
+			if (PageAnon(*page) ||
+			    page_trans_compound_anon(*page)) {
 				flush_anon_page(vma, *page, ksm_scan.address);
 				flush_dcache_page(*page);
 				rmap_item = get_next_rmap_item(slot,
@@ -1291,8 +1358,7 @@
 				up_read(&mm->mmap_sem);
 				return rmap_item;
 			}
-			if (!IS_ERR_OR_NULL(*page))
-				put_page(*page);
+			put_page(*page);
 			ksm_scan.address += PAGE_SIZE;
 			cond_resched();
 		}
@@ -1352,7 +1418,7 @@
 	struct rmap_item *rmap_item;
 	struct page *uninitialized_var(page);
 
-	while (scan_npages--) {
+	while (scan_npages-- && likely(!freezing(current))) {
 		cond_resched();
 		rmap_item = scan_get_next_rmap_item(&page);
 		if (!rmap_item)
@@ -1370,6 +1436,7 @@
 
 static int ksm_scan_thread(void *nothing)
 {
+	set_freezable();
 	set_user_nice(current, 5);
 
 	while (!kthread_should_stop()) {
@@ -1378,11 +1445,13 @@
 			ksm_do_scan(ksm_thread_pages_to_scan);
 		mutex_unlock(&ksm_thread_mutex);
 
+		try_to_freeze();
+
 		if (ksmd_should_run()) {
 			schedule_timeout_interruptible(
 				msecs_to_jiffies(ksm_thread_sleep_millisecs));
 		} else {
-			wait_event_interruptible(ksm_thread_wait,
+			wait_event_freezable(ksm_thread_wait,
 				ksmd_should_run() || kthread_should_stop());
 		}
 	}
diff --git a/mm/madvise.c b/mm/madvise.c
index 319528b..2221491 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -71,6 +71,12 @@
 		if (error)
 			goto out;
 		break;
+	case MADV_HUGEPAGE:
+	case MADV_NOHUGEPAGE:
+		error = hugepage_madvise(vma, &new_flags, behavior);
+		if (error)
+			goto out;
+		break;
 	}
 
 	if (new_flags == vma->vm_flags) {
@@ -283,6 +289,10 @@
 	case MADV_MERGEABLE:
 	case MADV_UNMERGEABLE:
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	case MADV_HUGEPAGE:
+	case MADV_NOHUGEPAGE:
+#endif
 		return 1;
 
 	default:
diff --git a/mm/memblock.c b/mm/memblock.c
index 400dc62..4618fda 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -137,8 +137,6 @@
 
 	BUG_ON(0 == size);
 
-	size = memblock_align_up(size, align);
-
 	/* Pump up max_addr */
 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
 		end = memblock.current_limit;
@@ -683,13 +681,13 @@
 
 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
 {
-	int idx = memblock_search(&memblock.reserved, base);
+	int idx = memblock_search(&memblock.memory, base);
 
 	if (idx == -1)
 		return 0;
-	return memblock.reserved.regions[idx].base <= base &&
-		(memblock.reserved.regions[idx].base +
-		 memblock.reserved.regions[idx].size) >= (base + size);
+	return memblock.memory.regions[idx].base <= base &&
+		(memblock.memory.regions[idx].base +
+		 memblock.memory.regions[idx].size) >= (base + size);
 }
 
 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 00bb8a6..da53a25 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -292,7 +292,6 @@
 	unsigned long moved_charge;
 	unsigned long moved_swap;
 	struct task_struct *moving_task;	/* a task moving charges */
-	struct mm_struct *mm;
 	wait_queue_head_t waitq;		/* a waitq for other context */
 } mc = {
 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
@@ -601,23 +600,24 @@
 }
 
 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
-					 struct page_cgroup *pc,
-					 bool charge)
+					 bool file, int nr_pages)
 {
-	int val = (charge) ? 1 : -1;
-
 	preempt_disable();
 
-	if (PageCgroupCache(pc))
-		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
+	if (file)
+		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
 	else
-		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
+		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
 
-	if (charge)
+	/* pagein of a big page is an event. So, ignore page size */
+	if (nr_pages > 0)
 		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
-	else
+	else {
 		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
-	__this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
+		nr_pages = -nr_pages; /* for event */
+	}
+
+	__this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
 
 	preempt_enable();
 }
@@ -816,12 +816,12 @@
 	 * removed from global LRU.
 	 */
 	mz = page_cgroup_zoneinfo(pc);
-	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+	/* huge page split is done under lru_lock. so, we have no races. */
+	MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
 	if (mem_cgroup_is_root(pc->mem_cgroup))
 		return;
 	VM_BUG_ON(list_empty(&pc->lru));
 	list_del_init(&pc->lru);
-	return;
 }
 
 void mem_cgroup_del_lru(struct page *page)
@@ -838,13 +838,12 @@
 		return;
 
 	pc = lookup_page_cgroup(page);
-	/*
-	 * Used bit is set without atomic ops but after smp_wmb().
-	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
-	 */
-	smp_rmb();
 	/* unused or root page is not rotated. */
-	if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
+	if (!PageCgroupUsed(pc))
+		return;
+	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+	smp_rmb();
+	if (mem_cgroup_is_root(pc->mem_cgroup))
 		return;
 	mz = page_cgroup_zoneinfo(pc);
 	list_move(&pc->lru, &mz->lists[lru]);
@@ -859,16 +858,13 @@
 		return;
 	pc = lookup_page_cgroup(page);
 	VM_BUG_ON(PageCgroupAcctLRU(pc));
-	/*
-	 * Used bit is set without atomic ops but after smp_wmb().
-	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
-	 */
-	smp_rmb();
 	if (!PageCgroupUsed(pc))
 		return;
-
+	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+	smp_rmb();
 	mz = page_cgroup_zoneinfo(pc);
-	MEM_CGROUP_ZSTAT(mz, lru) += 1;
+	/* huge page split is done under lru_lock. so, we have no races. */
+	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
 	SetPageCgroupAcctLRU(pc);
 	if (mem_cgroup_is_root(pc->mem_cgroup))
 		return;
@@ -1032,14 +1028,10 @@
 		return NULL;
 
 	pc = lookup_page_cgroup(page);
-	/*
-	 * Used bit is set without atomic ops but after smp_wmb().
-	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
-	 */
-	smp_rmb();
 	if (!PageCgroupUsed(pc))
 		return NULL;
-
+	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+	smp_rmb();
 	mz = page_cgroup_zoneinfo(pc);
 	if (!mz)
 		return NULL;
@@ -1087,7 +1079,7 @@
 		case 0:
 			list_move(&page->lru, dst);
 			mem_cgroup_del_lru(page);
-			nr_taken++;
+			nr_taken += hpage_nr_pages(page);
 			break;
 		case -EBUSY:
 			/* we don't affect global LRU but rotate in our LRU */
@@ -1121,6 +1113,23 @@
 	return false;
 }
 
+/**
+ * mem_cgroup_check_margin - check if the memory cgroup allows charging
+ * @mem: memory cgroup to check
+ * @bytes: the number of bytes the caller intends to charge
+ *
+ * Returns a boolean value on whether @mem can be charged @bytes or
+ * whether this would exceed the limit.
+ */
+static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes)
+{
+	if (!res_counter_check_margin(&mem->res, bytes))
+		return false;
+	if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes))
+		return false;
+	return true;
+}
+
 static unsigned int get_swappiness(struct mem_cgroup *memcg)
 {
 	struct cgroup *cgrp = memcg->css.cgroup;
@@ -1312,8 +1321,9 @@
 	u64 limit;
 	u64 memsw;
 
-	limit = res_counter_read_u64(&memcg->res, RES_LIMIT) +
-			total_swap_pages;
+	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+	limit += total_swap_pages << PAGE_SHIFT;
+
 	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
 	/*
 	 * If memsw is finite and limits the amount of swap space available
@@ -1600,11 +1610,13 @@
  * possibility of race condition. If there is, we take a lock.
  */
 
-static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
+void mem_cgroup_update_page_stat(struct page *page,
+				 enum mem_cgroup_page_stat_item idx, int val)
 {
 	struct mem_cgroup *mem;
 	struct page_cgroup *pc = lookup_page_cgroup(page);
 	bool need_unlock = false;
+	unsigned long uninitialized_var(flags);
 
 	if (unlikely(!pc))
 		return;
@@ -1614,39 +1626,36 @@
 	if (unlikely(!mem || !PageCgroupUsed(pc)))
 		goto out;
 	/* pc->mem_cgroup is unstable ? */
-	if (unlikely(mem_cgroup_stealed(mem))) {
+	if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
 		/* take a lock against to access pc->mem_cgroup */
-		lock_page_cgroup(pc);
+		move_lock_page_cgroup(pc, &flags);
 		need_unlock = true;
 		mem = pc->mem_cgroup;
 		if (!mem || !PageCgroupUsed(pc))
 			goto out;
 	}
 
-	this_cpu_add(mem->stat->count[idx], val);
-
 	switch (idx) {
-	case MEM_CGROUP_STAT_FILE_MAPPED:
+	case MEMCG_NR_FILE_MAPPED:
 		if (val > 0)
 			SetPageCgroupFileMapped(pc);
 		else if (!page_mapped(page))
 			ClearPageCgroupFileMapped(pc);
+		idx = MEM_CGROUP_STAT_FILE_MAPPED;
 		break;
 	default:
 		BUG();
 	}
 
+	this_cpu_add(mem->stat->count[idx], val);
+
 out:
 	if (unlikely(need_unlock))
-		unlock_page_cgroup(pc);
+		move_unlock_page_cgroup(pc, &flags);
 	rcu_read_unlock();
 	return;
 }
-
-void mem_cgroup_update_file_mapped(struct page *page, int val)
-{
-	mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
-}
+EXPORT_SYMBOL(mem_cgroup_update_page_stat);
 
 /*
  * size of first charge trial. "32" comes from vmscan.c's magic value.
@@ -1842,27 +1851,39 @@
 		if (likely(!ret))
 			return CHARGE_OK;
 
+		res_counter_uncharge(&mem->res, csize);
 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
 		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
 	} else
 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
-
-	if (csize > PAGE_SIZE) /* change csize and retry */
+	/*
+	 * csize can be either a huge page (HPAGE_SIZE), a batch of
+	 * regular pages (CHARGE_SIZE), or a single regular page
+	 * (PAGE_SIZE).
+	 *
+	 * Never reclaim on behalf of optional batching, retry with a
+	 * single page instead.
+	 */
+	if (csize == CHARGE_SIZE)
 		return CHARGE_RETRY;
 
 	if (!(gfp_mask & __GFP_WAIT))
 		return CHARGE_WOULDBLOCK;
 
 	ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
-					gfp_mask, flags);
+					      gfp_mask, flags);
+	if (mem_cgroup_check_margin(mem_over_limit, csize))
+		return CHARGE_RETRY;
 	/*
-	 * try_to_free_mem_cgroup_pages() might not give us a full
-	 * picture of reclaim. Some pages are reclaimed and might be
-	 * moved to swap cache or just unmapped from the cgroup.
-	 * Check the limit again to see if the reclaim reduced the
-	 * current usage of the cgroup before giving up
+	 * Even though the limit is exceeded at this point, reclaim
+	 * may have been able to free some pages.  Retry the charge
+	 * before killing the task.
+	 *
+	 * Only for regular pages, though: huge pages are rather
+	 * unlikely to succeed so close to the limit, and we fall back
+	 * to regular pages anyway in case of failure.
 	 */
-	if (ret || mem_cgroup_check_under_limit(mem_over_limit))
+	if (csize == PAGE_SIZE && ret)
 		return CHARGE_RETRY;
 
 	/*
@@ -1887,12 +1908,14 @@
  * oom-killer can be invoked.
  */
 static int __mem_cgroup_try_charge(struct mm_struct *mm,
-		gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
+				   gfp_t gfp_mask,
+				   struct mem_cgroup **memcg, bool oom,
+				   int page_size)
 {
 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
 	struct mem_cgroup *mem = NULL;
 	int ret;
-	int csize = CHARGE_SIZE;
+	int csize = max(CHARGE_SIZE, (unsigned long) page_size);
 
 	/*
 	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
@@ -1917,7 +1940,7 @@
 		VM_BUG_ON(css_is_removed(&mem->css));
 		if (mem_cgroup_is_root(mem))
 			goto done;
-		if (consume_stock(mem))
+		if (page_size == PAGE_SIZE && consume_stock(mem))
 			goto done;
 		css_get(&mem->css);
 	} else {
@@ -1940,7 +1963,7 @@
 			rcu_read_unlock();
 			goto done;
 		}
-		if (consume_stock(mem)) {
+		if (page_size == PAGE_SIZE && consume_stock(mem)) {
 			/*
 			 * It seems dagerous to access memcg without css_get().
 			 * But considering how consume_stok works, it's not
@@ -1981,7 +2004,7 @@
 		case CHARGE_OK:
 			break;
 		case CHARGE_RETRY: /* not in OOM situation but retry */
-			csize = PAGE_SIZE;
+			csize = page_size;
 			css_put(&mem->css);
 			mem = NULL;
 			goto again;
@@ -2002,8 +2025,8 @@
 		}
 	} while (ret != CHARGE_OK);
 
-	if (csize > PAGE_SIZE)
-		refill_stock(mem, csize - PAGE_SIZE);
+	if (csize > page_size)
+		refill_stock(mem, csize - page_size);
 	css_put(&mem->css);
 done:
 	*memcg = mem;
@@ -2031,9 +2054,10 @@
 	}
 }
 
-static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem,
+				     int page_size)
 {
-	__mem_cgroup_cancel_charge(mem, 1);
+	__mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT);
 }
 
 /*
@@ -2083,15 +2107,13 @@
 	return mem;
 }
 
-/*
- * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
- * USED state. If already USED, uncharge and return.
- */
-
 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
-				     struct page_cgroup *pc,
-				     enum charge_type ctype)
+				       struct page_cgroup *pc,
+				       enum charge_type ctype,
+				       int page_size)
 {
+	int nr_pages = page_size >> PAGE_SHIFT;
+
 	/* try_charge() can return NULL to *memcg, taking care of it. */
 	if (!mem)
 		return;
@@ -2099,10 +2121,13 @@
 	lock_page_cgroup(pc);
 	if (unlikely(PageCgroupUsed(pc))) {
 		unlock_page_cgroup(pc);
-		mem_cgroup_cancel_charge(mem);
+		mem_cgroup_cancel_charge(mem, page_size);
 		return;
 	}
-
+	/*
+	 * we don't need page_cgroup_lock about tail pages, becase they are not
+	 * accessed by any other context at this point.
+	 */
 	pc->mem_cgroup = mem;
 	/*
 	 * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2126,8 +2151,7 @@
 		break;
 	}
 
-	mem_cgroup_charge_statistics(mem, pc, true);
-
+	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
 	unlock_page_cgroup(pc);
 	/*
 	 * "charge_statistics" updated event counter. Then, check it.
@@ -2137,6 +2161,48 @@
 	memcg_check_events(mem, pc->page);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
+			(1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
+/*
+ * Because tail pages are not marked as "used", set it. We're under
+ * zone->lru_lock, 'splitting on pmd' and compund_lock.
+ */
+void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
+{
+	struct page_cgroup *head_pc = lookup_page_cgroup(head);
+	struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
+	unsigned long flags;
+
+	if (mem_cgroup_disabled())
+		return;
+	/*
+	 * We have no races with charge/uncharge but will have races with
+	 * page state accounting.
+	 */
+	move_lock_page_cgroup(head_pc, &flags);
+
+	tail_pc->mem_cgroup = head_pc->mem_cgroup;
+	smp_wmb(); /* see __commit_charge() */
+	if (PageCgroupAcctLRU(head_pc)) {
+		enum lru_list lru;
+		struct mem_cgroup_per_zone *mz;
+
+		/*
+		 * LRU flags cannot be copied because we need to add tail
+		 *.page to LRU by generic call and our hook will be called.
+		 * We hold lru_lock, then, reduce counter directly.
+		 */
+		lru = page_lru(head);
+		mz = page_cgroup_zoneinfo(head_pc);
+		MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+	}
+	tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
+	move_unlock_page_cgroup(head_pc, &flags);
+}
+#endif
+
 /**
  * __mem_cgroup_move_account - move account of the page
  * @pc:	page_cgroup of the page.
@@ -2155,8 +2221,11 @@
  */
 
 static void __mem_cgroup_move_account(struct page_cgroup *pc,
-	struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
+	struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge,
+	int charge_size)
 {
+	int nr_pages = charge_size >> PAGE_SHIFT;
+
 	VM_BUG_ON(from == to);
 	VM_BUG_ON(PageLRU(pc->page));
 	VM_BUG_ON(!page_is_cgroup_locked(pc));
@@ -2170,14 +2239,14 @@
 		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
 		preempt_enable();
 	}
-	mem_cgroup_charge_statistics(from, pc, false);
+	mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
 	if (uncharge)
 		/* This is not "cancel", but cancel_charge does all we need. */
-		mem_cgroup_cancel_charge(from);
+		mem_cgroup_cancel_charge(from, charge_size);
 
 	/* caller should have done css_get */
 	pc->mem_cgroup = to;
-	mem_cgroup_charge_statistics(to, pc, true);
+	mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
 	/*
 	 * We charges against "to" which may not have any tasks. Then, "to"
 	 * can be under rmdir(). But in current implementation, caller of
@@ -2192,12 +2261,25 @@
  * __mem_cgroup_move_account()
  */
 static int mem_cgroup_move_account(struct page_cgroup *pc,
-		struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
+		struct mem_cgroup *from, struct mem_cgroup *to,
+		bool uncharge, int charge_size)
 {
 	int ret = -EINVAL;
+	unsigned long flags;
+	/*
+	 * The page is isolated from LRU. So, collapse function
+	 * will not handle this page. But page splitting can happen.
+	 * Do this check under compound_page_lock(). The caller should
+	 * hold it.
+	 */
+	if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page))
+		return -EBUSY;
+
 	lock_page_cgroup(pc);
 	if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
-		__mem_cgroup_move_account(pc, from, to, uncharge);
+		move_lock_page_cgroup(pc, &flags);
+		__mem_cgroup_move_account(pc, from, to, uncharge, charge_size);
+		move_unlock_page_cgroup(pc, &flags);
 		ret = 0;
 	}
 	unlock_page_cgroup(pc);
@@ -2221,6 +2303,8 @@
 	struct cgroup *cg = child->css.cgroup;
 	struct cgroup *pcg = cg->parent;
 	struct mem_cgroup *parent;
+	int page_size = PAGE_SIZE;
+	unsigned long flags;
 	int ret;
 
 	/* Is ROOT ? */
@@ -2233,14 +2317,24 @@
 	if (isolate_lru_page(page))
 		goto put;
 
+	if (PageTransHuge(page))
+		page_size = HPAGE_SIZE;
+
 	parent = mem_cgroup_from_cont(pcg);
-	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+	ret = __mem_cgroup_try_charge(NULL, gfp_mask,
+				&parent, false, page_size);
 	if (ret || !parent)
 		goto put_back;
 
-	ret = mem_cgroup_move_account(pc, child, parent, true);
+	if (page_size > PAGE_SIZE)
+		flags = compound_lock_irqsave(page);
+
+	ret = mem_cgroup_move_account(pc, child, parent, true, page_size);
 	if (ret)
-		mem_cgroup_cancel_charge(parent);
+		mem_cgroup_cancel_charge(parent, page_size);
+
+	if (page_size > PAGE_SIZE)
+		compound_unlock_irqrestore(page, flags);
 put_back:
 	putback_lru_page(page);
 put:
@@ -2259,20 +2353,32 @@
 				gfp_t gfp_mask, enum charge_type ctype)
 {
 	struct mem_cgroup *mem = NULL;
+	int page_size = PAGE_SIZE;
 	struct page_cgroup *pc;
+	bool oom = true;
 	int ret;
 
+	if (PageTransHuge(page)) {
+		page_size <<= compound_order(page);
+		VM_BUG_ON(!PageTransHuge(page));
+		/*
+		 * Never OOM-kill a process for a huge page.  The
+		 * fault handler will fall back to regular pages.
+		 */
+		oom = false;
+	}
+
 	pc = lookup_page_cgroup(page);
 	/* can happen at boot */
 	if (unlikely(!pc))
 		return 0;
 	prefetchw(pc);
 
-	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
 	if (ret || !mem)
 		return ret;
 
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, page_size);
 	return 0;
 }
 
@@ -2281,8 +2387,6 @@
 {
 	if (mem_cgroup_disabled())
 		return 0;
-	if (PageCompound(page))
-		return 0;
 	/*
 	 * If already mapped, we don't have to account.
 	 * If page cache, page->mapping has address_space.
@@ -2388,13 +2492,13 @@
 	if (!mem)
 		goto charge_cur_mm;
 	*ptr = mem;
-	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
 	css_put(&mem->css);
 	return ret;
 charge_cur_mm:
 	if (unlikely(!mm))
 		mm = &init_mm;
-	return __mem_cgroup_try_charge(mm, mask, ptr, true);
+	return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
 }
 
 static void
@@ -2410,7 +2514,7 @@
 	cgroup_exclude_rmdir(&ptr->css);
 	pc = lookup_page_cgroup(page);
 	mem_cgroup_lru_del_before_commit_swapcache(page);
-	__mem_cgroup_commit_charge(ptr, pc, ctype);
+	__mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE);
 	mem_cgroup_lru_add_after_commit_swapcache(page);
 	/*
 	 * Now swap is on-memory. This means this page may be
@@ -2459,11 +2563,12 @@
 		return;
 	if (!mem)
 		return;
-	mem_cgroup_cancel_charge(mem);
+	mem_cgroup_cancel_charge(mem, PAGE_SIZE);
 }
 
 static void
-__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
+	      int page_size)
 {
 	struct memcg_batch_info *batch = NULL;
 	bool uncharge_memsw = true;
@@ -2490,6 +2595,9 @@
 	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
 		goto direct_uncharge;
 
+	if (page_size != PAGE_SIZE)
+		goto direct_uncharge;
+
 	/*
 	 * In typical case, batch->memcg == mem. This means we can
 	 * merge a series of uncharges to an uncharge of res_counter.
@@ -2503,9 +2611,9 @@
 		batch->memsw_bytes += PAGE_SIZE;
 	return;
 direct_uncharge:
-	res_counter_uncharge(&mem->res, PAGE_SIZE);
+	res_counter_uncharge(&mem->res, page_size);
 	if (uncharge_memsw)
-		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+		res_counter_uncharge(&mem->memsw, page_size);
 	if (unlikely(batch->memcg != mem))
 		memcg_oom_recover(mem);
 	return;
@@ -2517,8 +2625,10 @@
 static struct mem_cgroup *
 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 {
+	int count;
 	struct page_cgroup *pc;
 	struct mem_cgroup *mem = NULL;
+	int page_size = PAGE_SIZE;
 
 	if (mem_cgroup_disabled())
 		return NULL;
@@ -2526,6 +2636,12 @@
 	if (PageSwapCache(page))
 		return NULL;
 
+	if (PageTransHuge(page)) {
+		page_size <<= compound_order(page);
+		VM_BUG_ON(!PageTransHuge(page));
+	}
+
+	count = page_size >> PAGE_SHIFT;
 	/*
 	 * Check if our page_cgroup is valid
 	 */
@@ -2558,7 +2674,7 @@
 		break;
 	}
 
-	mem_cgroup_charge_statistics(mem, pc, false);
+	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -count);
 
 	ClearPageCgroupUsed(pc);
 	/*
@@ -2579,7 +2695,7 @@
 		mem_cgroup_get(mem);
 	}
 	if (!mem_cgroup_is_root(mem))
-		__do_uncharge(mem, ctype);
+		__do_uncharge(mem, ctype, page_size);
 
 	return mem;
 
@@ -2774,6 +2890,7 @@
 	enum charge_type ctype;
 	int ret = 0;
 
+	VM_BUG_ON(PageTransHuge(page));
 	if (mem_cgroup_disabled())
 		return 0;
 
@@ -2823,7 +2940,7 @@
 		return 0;
 
 	*ptr = mem;
-	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
+	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE);
 	css_put(&mem->css);/* drop extra refcnt */
 	if (ret || *ptr == NULL) {
 		if (PageAnon(page)) {
@@ -2850,13 +2967,13 @@
 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
 	else
 		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE);
 	return ret;
 }
 
 /* remove redundant charge if migration failed*/
 void mem_cgroup_end_migration(struct mem_cgroup *mem,
-	struct page *oldpage, struct page *newpage)
+	struct page *oldpage, struct page *newpage, bool migration_ok)
 {
 	struct page *used, *unused;
 	struct page_cgroup *pc;
@@ -2865,8 +2982,7 @@
 		return;
 	/* blocks rmdir() */
 	cgroup_exclude_rmdir(&mem->css);
-	/* at migration success, oldpage->mapping is NULL. */
-	if (oldpage->mapping) {
+	if (!migration_ok) {
 		used = oldpage;
 		unused = newpage;
 	} else {
@@ -4176,13 +4292,11 @@
 	 */
 	if (!node_state(node, N_NORMAL_MEMORY))
 		tmp = -1;
-	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
+	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
 	if (!pn)
 		return 1;
 
 	mem->info.nodeinfo[node] = pn;
-	memset(pn, 0, sizeof(*pn));
-
 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
 		mz = &pn->zoneinfo[zone];
 		for_each_lru(l)
@@ -4206,14 +4320,13 @@
 
 	/* Can be very big if MAX_NUMNODES is very big */
 	if (size < PAGE_SIZE)
-		mem = kmalloc(size, GFP_KERNEL);
+		mem = kzalloc(size, GFP_KERNEL);
 	else
-		mem = vmalloc(size);
+		mem = vzalloc(size);
 
 	if (!mem)
 		return NULL;
 
-	memset(mem, 0, size);
 	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
 	if (!mem->stat)
 		goto out_free;
@@ -4461,7 +4574,8 @@
 			batch_count = PRECHARGE_COUNT_AT_ONCE;
 			cond_resched();
 		}
-		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+					      PAGE_SIZE);
 		if (ret || !mem)
 			/* mem_cgroup_clear_mc() will do uncharge later */
 			return -ENOMEM;
@@ -4623,6 +4737,7 @@
 	pte_t *pte;
 	spinlock_t *ptl;
 
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; pte++, addr += PAGE_SIZE)
 		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
@@ -4638,7 +4753,7 @@
 	unsigned long precharge;
 	struct vm_area_struct *vma;
 
-	/* We've already held the mmap_sem */
+	down_read(&mm->mmap_sem);
 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 		struct mm_walk mem_cgroup_count_precharge_walk = {
 			.pmd_entry = mem_cgroup_count_precharge_pte_range,
@@ -4650,6 +4765,7 @@
 		walk_page_range(vma->vm_start, vma->vm_end,
 					&mem_cgroup_count_precharge_walk);
 	}
+	up_read(&mm->mmap_sem);
 
 	precharge = mc.precharge;
 	mc.precharge = 0;
@@ -4659,10 +4775,15 @@
 
 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
 {
-	return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
+	unsigned long precharge = mem_cgroup_count_precharge(mm);
+
+	VM_BUG_ON(mc.moving_task);
+	mc.moving_task = current;
+	return mem_cgroup_do_precharge(precharge);
 }
 
-static void mem_cgroup_clear_mc(void)
+/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
+static void __mem_cgroup_clear_mc(void)
 {
 	struct mem_cgroup *from = mc.from;
 	struct mem_cgroup *to = mc.to;
@@ -4697,23 +4818,28 @@
 						PAGE_SIZE * mc.moved_swap);
 		}
 		/* we've already done mem_cgroup_get(mc.to) */
-
 		mc.moved_swap = 0;
 	}
-	if (mc.mm) {
-		up_read(&mc.mm->mmap_sem);
-		mmput(mc.mm);
-	}
+	memcg_oom_recover(from);
+	memcg_oom_recover(to);
+	wake_up_all(&mc.waitq);
+}
+
+static void mem_cgroup_clear_mc(void)
+{
+	struct mem_cgroup *from = mc.from;
+
+	/*
+	 * we must clear moving_task before waking up waiters at the end of
+	 * task migration.
+	 */
+	mc.moving_task = NULL;
+	__mem_cgroup_clear_mc();
 	spin_lock(&mc.lock);
 	mc.from = NULL;
 	mc.to = NULL;
 	spin_unlock(&mc.lock);
-	mc.moving_task = NULL;
-	mc.mm = NULL;
 	mem_cgroup_end_move(from);
-	memcg_oom_recover(from);
-	memcg_oom_recover(to);
-	wake_up_all(&mc.waitq);
 }
 
 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
@@ -4735,38 +4861,23 @@
 			return 0;
 		/* We move charges only when we move a owner of the mm */
 		if (mm->owner == p) {
-			/*
-			 * We do all the move charge works under one mmap_sem to
-			 * avoid deadlock with down_write(&mmap_sem)
-			 * -> try_charge() -> if (mc.moving_task) -> sleep.
-			 */
-			down_read(&mm->mmap_sem);
-
 			VM_BUG_ON(mc.from);
 			VM_BUG_ON(mc.to);
 			VM_BUG_ON(mc.precharge);
 			VM_BUG_ON(mc.moved_charge);
 			VM_BUG_ON(mc.moved_swap);
-			VM_BUG_ON(mc.moving_task);
-			VM_BUG_ON(mc.mm);
-
 			mem_cgroup_start_move(from);
 			spin_lock(&mc.lock);
 			mc.from = from;
 			mc.to = mem;
-			mc.precharge = 0;
-			mc.moved_charge = 0;
-			mc.moved_swap = 0;
 			spin_unlock(&mc.lock);
-			mc.moving_task = current;
-			mc.mm = mm;
+			/* We set mc.moving_task later */
 
 			ret = mem_cgroup_precharge_mc(mm);
 			if (ret)
 				mem_cgroup_clear_mc();
-			/* We call up_read() and mmput() in clear_mc(). */
-		} else
-			mmput(mm);
+		}
+		mmput(mm);
 	}
 	return ret;
 }
@@ -4789,6 +4900,7 @@
 	spinlock_t *ptl;
 
 retry:
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; addr += PAGE_SIZE) {
 		pte_t ptent = *(pte++);
@@ -4809,7 +4921,7 @@
 				goto put;
 			pc = lookup_page_cgroup(page);
 			if (!mem_cgroup_move_account(pc,
-						mc.from, mc.to, false)) {
+					mc.from, mc.to, false, PAGE_SIZE)) {
 				mc.precharge--;
 				/* we uncharge from mc.from later. */
 				mc.moved_charge++;
@@ -4854,7 +4966,19 @@
 	struct vm_area_struct *vma;
 
 	lru_add_drain_all();
-	/* We've already held the mmap_sem */
+retry:
+	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+		/*
+		 * Someone who are holding the mmap_sem might be waiting in
+		 * waitq. So we cancel all extra charges, wake up all waiters,
+		 * and retry. Because we cancel precharges, we might not be able
+		 * to move enough charges, but moving charge is a best-effort
+		 * feature anyway, so it wouldn't be a big problem.
+		 */
+		__mem_cgroup_clear_mc();
+		cond_resched();
+		goto retry;
+	}
 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 		int ret;
 		struct mm_walk mem_cgroup_move_charge_walk = {
@@ -4873,6 +4997,7 @@
 			 */
 			break;
 	}
+	up_read(&mm->mmap_sem);
 }
 
 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
@@ -4881,11 +5006,17 @@
 				struct task_struct *p,
 				bool threadgroup)
 {
-	if (!mc.mm)
+	struct mm_struct *mm;
+
+	if (!mc.to)
 		/* no need to move charge */
 		return;
 
-	mem_cgroup_move_charge(mc.mm);
+	mm = get_task_mm(p);
+	if (mm) {
+		mem_cgroup_move_charge(mm);
+		mmput(mm);
+	}
 	mem_cgroup_clear_mc();
 }
 #else	/* !CONFIG_MMU */
@@ -4929,9 +5060,9 @@
 static int __init enable_swap_account(char *s)
 {
 	/* consider enabled if no parameter or 1 is given */
-	if (!s || !strcmp(s, "1"))
+	if (!(*s) || !strcmp(s, "=1"))
 		really_do_swap_account = 1;
-	else if (!strcmp(s, "0"))
+	else if (!strcmp(s, "=0"))
 		really_do_swap_account = 0;
 	return 1;
 }
@@ -4939,7 +5070,8 @@
 
 static int __init disable_swap_account(char *s)
 {
-	enable_swap_account("0");
+	printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
+	enable_swap_account("=0");
 	return 1;
 }
 __setup("noswapaccount", disable_swap_account);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 46ab2c0..0207c2f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -203,7 +203,7 @@
 #ifdef __ARCH_SI_TRAPNO
 	si.si_trapno = trapno;
 #endif
-	si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
+	si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
 	/*
 	 * Don't use force here, it's convenient if the signal
 	 * can be temporarily blocked.
@@ -233,8 +233,8 @@
 	}
 
 	/*
-	 * Only all shrink_slab here (which would also
-	 * shrink other caches) if access is not potentially fatal.
+	 * Only call shrink_slab here (which would also shrink other caches) if
+	 * access is not potentially fatal.
 	 */
 	if (access) {
 		int nr;
@@ -854,6 +854,7 @@
 	int ret;
 	int kill = 1;
 	struct page *hpage = compound_head(p);
+	struct page *ppage;
 
 	if (PageReserved(p) || PageSlab(p))
 		return SWAP_SUCCESS;
@@ -895,6 +896,44 @@
 	}
 
 	/*
+	 * ppage: poisoned page
+	 *   if p is regular page(4k page)
+	 *        ppage == real poisoned page;
+	 *   else p is hugetlb or THP, ppage == head page.
+	 */
+	ppage = hpage;
+
+	if (PageTransHuge(hpage)) {
+		/*
+		 * Verify that this isn't a hugetlbfs head page, the check for
+		 * PageAnon is just for avoid tripping a split_huge_page
+		 * internal debug check, as split_huge_page refuses to deal with
+		 * anything that isn't an anon page. PageAnon can't go away fro
+		 * under us because we hold a refcount on the hpage, without a
+		 * refcount on the hpage. split_huge_page can't be safely called
+		 * in the first place, having a refcount on the tail isn't
+		 * enough * to be safe.
+		 */
+		if (!PageHuge(hpage) && PageAnon(hpage)) {
+			if (unlikely(split_huge_page(hpage))) {
+				/*
+				 * FIXME: if splitting THP is failed, it is
+				 * better to stop the following operation rather
+				 * than causing panic by unmapping. System might
+				 * survive if the page is freed later.
+				 */
+				printk(KERN_INFO
+					"MCE %#lx: failed to split THP\n", pfn);
+
+				BUG_ON(!PageHWPoison(p));
+				return SWAP_FAIL;
+			}
+			/* THP is split, so ppage should be the real poisoned page. */
+			ppage = p;
+		}
+	}
+
+	/*
 	 * First collect all the processes that have the page
 	 * mapped in dirty form.  This has to be done before try_to_unmap,
 	 * because ttu takes the rmap data structures down.
@@ -903,12 +942,18 @@
 	 * there's nothing that can be done.
 	 */
 	if (kill)
-		collect_procs(hpage, &tokill);
+		collect_procs(ppage, &tokill);
 
-	ret = try_to_unmap(hpage, ttu);
+	if (hpage != ppage)
+		lock_page_nosync(ppage);
+
+	ret = try_to_unmap(ppage, ttu);
 	if (ret != SWAP_SUCCESS)
 		printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
-				pfn, page_mapcount(hpage));
+				pfn, page_mapcount(ppage));
+
+	if (hpage != ppage)
+		unlock_page(ppage);
 
 	/*
 	 * Now that the dirty bit has been propagated to the
@@ -919,7 +964,7 @@
 	 * use a more force-full uncatchable kill to prevent
 	 * any accesses to the poisoned memory.
 	 */
-	kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
+	kill_procs_ao(&tokill, !!PageDirty(ppage), trapno,
 		      ret != SWAP_SUCCESS, p, pfn);
 
 	return ret;
@@ -928,7 +973,7 @@
 static void set_page_hwpoison_huge_page(struct page *hpage)
 {
 	int i;
-	int nr_pages = 1 << compound_order(hpage);
+	int nr_pages = 1 << compound_trans_order(hpage);
 	for (i = 0; i < nr_pages; i++)
 		SetPageHWPoison(hpage + i);
 }
@@ -936,7 +981,7 @@
 static void clear_page_hwpoison_huge_page(struct page *hpage)
 {
 	int i;
-	int nr_pages = 1 << compound_order(hpage);
+	int nr_pages = 1 << compound_trans_order(hpage);
 	for (i = 0; i < nr_pages; i++)
 		ClearPageHWPoison(hpage + i);
 }
@@ -966,7 +1011,7 @@
 		return 0;
 	}
 
-	nr_pages = 1 << compound_order(hpage);
+	nr_pages = 1 << compound_trans_order(hpage);
 	atomic_long_add(nr_pages, &mce_bad_pages);
 
 	/*
@@ -1020,19 +1065,22 @@
 	 * The check (unnecessarily) ignores LRU pages being isolated and
 	 * walked by the page reclaim code, however that's not a big loss.
 	 */
-	if (!PageLRU(p) && !PageHuge(p))
-		shake_page(p, 0);
-	if (!PageLRU(p) && !PageHuge(p)) {
-		/*
-		 * shake_page could have turned it free.
-		 */
-		if (is_free_buddy_page(p)) {
-			action_result(pfn, "free buddy, 2nd try", DELAYED);
-			return 0;
+	if (!PageHuge(p) && !PageTransCompound(p)) {
+		if (!PageLRU(p))
+			shake_page(p, 0);
+		if (!PageLRU(p)) {
+			/*
+			 * shake_page could have turned it free.
+			 */
+			if (is_free_buddy_page(p)) {
+				action_result(pfn, "free buddy, 2nd try",
+						DELAYED);
+				return 0;
+			}
+			action_result(pfn, "non LRU", IGNORED);
+			put_page(p);
+			return -EBUSY;
 		}
-		action_result(pfn, "non LRU", IGNORED);
-		put_page(p);
-		return -EBUSY;
 	}
 
 	/*
@@ -1062,7 +1110,7 @@
 	 * For error on the tail page, we should set PG_hwpoison
 	 * on the head page to show that the hugepage is hwpoisoned
 	 */
-	if (PageTail(p) && TestSetPageHWPoison(hpage)) {
+	if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
 		action_result(pfn, "hugepage already hardware poisoned",
 				IGNORED);
 		unlock_page(hpage);
@@ -1164,7 +1212,7 @@
 		return 0;
 	}
 
-	nr_pages = 1 << compound_order(page);
+	nr_pages = 1 << compound_trans_order(page);
 
 	if (!get_page_unless_zero(page)) {
 		/*
@@ -1290,9 +1338,13 @@
 	/* Keep page count to indicate a given hugepage is isolated. */
 
 	list_add(&hpage->lru, &pagelist);
-	ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+	ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
+				true);
 	if (ret) {
-			putback_lru_pages(&pagelist);
+		struct page *page1, *page2;
+		list_for_each_entry_safe(page1, page2, &pagelist, lru)
+			put_page(page1);
+
 		pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
 			 pfn, ret, page->flags);
 		if (ret > 0)
@@ -1301,7 +1353,7 @@
 	}
 done:
 	if (!PageHWPoison(hpage))
-		atomic_long_add(1 << compound_order(hpage), &mce_bad_pages);
+		atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
 	set_page_hwpoison_huge_page(hpage);
 	dequeue_hwpoisoned_huge_page(hpage);
 	/* keep elevated page count for bad page */
@@ -1413,8 +1465,10 @@
 		LIST_HEAD(pagelist);
 
 		list_add(&page->lru, &pagelist);
-		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+								0, true);
 		if (ret) {
+			putback_lru_pages(&pagelist);
 			pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
 				pfn, ret, page->flags);
 			if (ret > 0)
diff --git a/mm/memory.c b/mm/memory.c
index 02e48aa..5823698 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -394,9 +394,11 @@
 	}
 }
 
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+		pmd_t *pmd, unsigned long address)
 {
 	pgtable_t new = pte_alloc_one(mm, address);
+	int wait_split_huge_page;
 	if (!new)
 		return -ENOMEM;
 
@@ -416,14 +418,18 @@
 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 
 	spin_lock(&mm->page_table_lock);
-	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
+	wait_split_huge_page = 0;
+	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 		mm->nr_ptes++;
 		pmd_populate(mm, pmd, new);
 		new = NULL;
-	}
+	} else if (unlikely(pmd_trans_splitting(*pmd)))
+		wait_split_huge_page = 1;
 	spin_unlock(&mm->page_table_lock);
 	if (new)
 		pte_free(mm, new);
+	if (wait_split_huge_page)
+		wait_split_huge_page(vma->anon_vma, pmd);
 	return 0;
 }
 
@@ -436,10 +442,11 @@
 	smp_wmb(); /* See comment in __pte_alloc */
 
 	spin_lock(&init_mm.page_table_lock);
-	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
+	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 		pmd_populate_kernel(&init_mm, pmd, new);
 		new = NULL;
-	}
+	} else
+		VM_BUG_ON(pmd_trans_splitting(*pmd));
 	spin_unlock(&init_mm.page_table_lock);
 	if (new)
 		pte_free_kernel(&init_mm, new);
@@ -719,9 +726,9 @@
 	return 0;
 }
 
-static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
-		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
-		unsigned long addr, unsigned long end)
+int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
+		   unsigned long addr, unsigned long end)
 {
 	pte_t *orig_src_pte, *orig_dst_pte;
 	pte_t *src_pte, *dst_pte;
@@ -795,6 +802,17 @@
 	src_pmd = pmd_offset(src_pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (pmd_trans_huge(*src_pmd)) {
+			int err;
+			VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
+			err = copy_huge_pmd(dst_mm, src_mm,
+					    dst_pmd, src_pmd, addr, vma);
+			if (err == -ENOMEM)
+				return -ENOMEM;
+			if (!err)
+				continue;
+			/* fall through */
+		}
 		if (pmd_none_or_clear_bad(src_pmd))
 			continue;
 		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
@@ -997,6 +1015,16 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (pmd_trans_huge(*pmd)) {
+			if (next-addr != HPAGE_PMD_SIZE) {
+				VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
+				split_huge_page_pmd(vma->vm_mm, pmd);
+			} else if (zap_huge_pmd(tlb, vma, pmd)) {
+				(*zap_work)--;
+				continue;
+			}
+			/* fall through */
+		}
 		if (pmd_none_or_clear_bad(pmd)) {
 			(*zap_work)--;
 			continue;
@@ -1262,7 +1290,7 @@
 	pud = pud_offset(pgd, address);
 	if (pud_none(*pud))
 		goto no_page_table;
-	if (pud_huge(*pud)) {
+	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
 		BUG_ON(flags & FOLL_GET);
 		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
 		goto out;
@@ -1273,11 +1301,32 @@
 	pmd = pmd_offset(pud, address);
 	if (pmd_none(*pmd))
 		goto no_page_table;
-	if (pmd_huge(*pmd)) {
+	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
 		BUG_ON(flags & FOLL_GET);
 		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
 		goto out;
 	}
+	if (pmd_trans_huge(*pmd)) {
+		if (flags & FOLL_SPLIT) {
+			split_huge_page_pmd(mm, pmd);
+			goto split_fallthrough;
+		}
+		spin_lock(&mm->page_table_lock);
+		if (likely(pmd_trans_huge(*pmd))) {
+			if (unlikely(pmd_trans_splitting(*pmd))) {
+				spin_unlock(&mm->page_table_lock);
+				wait_split_huge_page(vma->anon_vma, pmd);
+			} else {
+				page = follow_trans_huge_pmd(mm, address,
+							     pmd, flags);
+				spin_unlock(&mm->page_table_lock);
+				goto out;
+			}
+		} else
+			spin_unlock(&mm->page_table_lock);
+		/* fall through */
+	}
+split_fallthrough:
 	if (unlikely(pmd_bad(*pmd)))
 		goto no_page_table;
 
@@ -1310,6 +1359,28 @@
 		 */
 		mark_page_accessed(page);
 	}
+	if (flags & FOLL_MLOCK) {
+		/*
+		 * The preliminary mapping check is mainly to avoid the
+		 * pointless overhead of lock_page on the ZERO_PAGE
+		 * which might bounce very badly if there is contention.
+		 *
+		 * If the page is already locked, we don't need to
+		 * handle it now - vmscan will handle it later if and
+		 * when it attempts to reclaim the page.
+		 */
+		if (page->mapping && trylock_page(page)) {
+			lru_add_drain();  /* push cached pages to LRU */
+			/*
+			 * Because we lock page here and migration is
+			 * blocked by the pte's page reference, we need
+			 * only check for file-cache page truncation.
+			 */
+			if (page->mapping)
+				mlock_vma_page(page);
+			unlock_page(page);
+		}
+	}
 unlock:
 	pte_unmap_unlock(ptep, ptl);
 out:
@@ -1341,7 +1412,8 @@
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 		     unsigned long start, int nr_pages, unsigned int gup_flags,
-		     struct page **pages, struct vm_area_struct **vmas)
+		     struct page **pages, struct vm_area_struct **vmas,
+		     int *nonblocking)
 {
 	int i;
 	unsigned long vm_flags;
@@ -1386,6 +1458,7 @@
 			pmd = pmd_offset(pud, pg);
 			if (pmd_none(*pmd))
 				return i ? : -EFAULT;
+			VM_BUG_ON(pmd_trans_huge(*pmd));
 			pte = pte_offset_map(pmd, pg);
 			if (pte_none(*pte)) {
 				pte_unmap(pte);
@@ -1441,10 +1514,15 @@
 			cond_resched();
 			while (!(page = follow_page(vma, start, foll_flags))) {
 				int ret;
+				unsigned int fault_flags = 0;
+
+				if (foll_flags & FOLL_WRITE)
+					fault_flags |= FAULT_FLAG_WRITE;
+				if (nonblocking)
+					fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 
 				ret = handle_mm_fault(mm, vma, start,
-					(foll_flags & FOLL_WRITE) ?
-					FAULT_FLAG_WRITE : 0);
+							fault_flags);
 
 				if (ret & VM_FAULT_ERROR) {
 					if (ret & VM_FAULT_OOM)
@@ -1460,6 +1538,11 @@
 				else
 					tsk->min_flt++;
 
+				if (ret & VM_FAULT_RETRY) {
+					*nonblocking = 0;
+					return i;
+				}
+
 				/*
 				 * The VM_FAULT_WRITE bit tells us that
 				 * do_wp_page has broken COW when necessary,
@@ -1559,7 +1642,8 @@
 	if (force)
 		flags |= FOLL_FORCE;
 
-	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
+				NULL);
 }
 EXPORT_SYMBOL(get_user_pages);
 
@@ -1584,7 +1668,8 @@
 	struct page *page;
 
 	if (__get_user_pages(current, current->mm, addr, 1,
-			FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
+			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
+			     NULL) < 1)
 		return NULL;
 	flush_cache_page(vma, addr, page_to_pfn(page));
 	return page;
@@ -1598,8 +1683,10 @@
 	pud_t * pud = pud_alloc(mm, pgd, addr);
 	if (pud) {
 		pmd_t * pmd = pmd_alloc(mm, pud, addr);
-		if (pmd)
+		if (pmd) {
+			VM_BUG_ON(pmd_trans_huge(*pmd));
 			return pte_alloc_map_lock(mm, pmd, addr, ptl);
+		}
 	}
 	return NULL;
 }
@@ -1818,6 +1905,7 @@
 	pmd = pmd_alloc(mm, pud, addr);
 	if (!pmd)
 		return -ENOMEM;
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	do {
 		next = pmd_addr_end(addr, end);
 		if (remap_pte_range(mm, pmd, addr, next,
@@ -2048,19 +2136,6 @@
 	return same;
 }
 
-/*
- * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
- * servicing faults for write access.  In the normal case, do always want
- * pte_mkwrite.  But get_user_pages can cause write faults for mappings
- * that do not have writing enabled, when used by access_process_vm.
- */
-static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
-{
-	if (likely(vma->vm_flags & VM_WRITE))
-		pte = pte_mkwrite(pte);
-	return pte;
-}
-
 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
 {
 	/*
@@ -2112,7 +2187,7 @@
 {
 	struct page *old_page, *new_page;
 	pte_t entry;
-	int reuse = 0, ret = 0;
+	int ret = 0;
 	int page_mkwrite = 0;
 	struct page *dirty_page = NULL;
 
@@ -2144,19 +2219,20 @@
 							 &ptl);
 			if (!pte_same(*page_table, orig_pte)) {
 				unlock_page(old_page);
-				page_cache_release(old_page);
 				goto unlock;
 			}
 			page_cache_release(old_page);
 		}
-		reuse = reuse_swap_page(old_page);
-		if (reuse)
+		if (reuse_swap_page(old_page)) {
 			/*
 			 * The page is all ours.  Move it to our anon_vma so
 			 * the rmap code will not search our parent or siblings.
 			 * Protected against the rmap code by the page lock.
 			 */
 			page_move_anon_rmap(old_page, vma, address);
+			unlock_page(old_page);
+			goto reuse;
+		}
 		unlock_page(old_page);
 	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
 					(VM_WRITE|VM_SHARED))) {
@@ -2212,7 +2288,6 @@
 							 &ptl);
 			if (!pte_same(*page_table, orig_pte)) {
 				unlock_page(old_page);
-				page_cache_release(old_page);
 				goto unlock;
 			}
 
@@ -2220,18 +2295,52 @@
 		}
 		dirty_page = old_page;
 		get_page(dirty_page);
-		reuse = 1;
-	}
 
-	if (reuse) {
 reuse:
 		flush_cache_page(vma, address, pte_pfn(orig_pte));
 		entry = pte_mkyoung(orig_pte);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 		if (ptep_set_access_flags(vma, address, page_table, entry,1))
 			update_mmu_cache(vma, address, page_table);
+		pte_unmap_unlock(page_table, ptl);
 		ret |= VM_FAULT_WRITE;
-		goto unlock;
+
+		if (!dirty_page)
+			return ret;
+
+		/*
+		 * Yes, Virginia, this is actually required to prevent a race
+		 * with clear_page_dirty_for_io() from clearing the page dirty
+		 * bit after it clear all dirty ptes, but before a racing
+		 * do_wp_page installs a dirty pte.
+		 *
+		 * do_no_page is protected similarly.
+		 */
+		if (!page_mkwrite) {
+			wait_on_page_locked(dirty_page);
+			set_page_dirty_balance(dirty_page, page_mkwrite);
+		}
+		put_page(dirty_page);
+		if (page_mkwrite) {
+			struct address_space *mapping = dirty_page->mapping;
+
+			set_page_dirty(dirty_page);
+			unlock_page(dirty_page);
+			page_cache_release(dirty_page);
+			if (mapping)	{
+				/*
+				 * Some device drivers do not set page.mapping
+				 * but still dirty their pages
+				 */
+				balance_dirty_pages_ratelimited(mapping);
+			}
+		}
+
+		/* file_update_time outside page_lock */
+		if (vma->vm_file)
+			file_update_time(vma->vm_file);
+
+		return ret;
 	}
 
 	/*
@@ -2256,16 +2365,6 @@
 	}
 	__SetPageUptodate(new_page);
 
-	/*
-	 * Don't let another task, with possibly unlocked vma,
-	 * keep the mlocked page.
-	 */
-	if ((vma->vm_flags & VM_LOCKED) && old_page) {
-		lock_page(old_page);	/* for LRU manipulation */
-		clear_page_mlock(old_page);
-		unlock_page(old_page);
-	}
-
 	if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
 		goto oom_free_new;
 
@@ -2333,42 +2432,19 @@
 
 	if (new_page)
 		page_cache_release(new_page);
-	if (old_page)
-		page_cache_release(old_page);
 unlock:
 	pte_unmap_unlock(page_table, ptl);
-	if (dirty_page) {
+	if (old_page) {
 		/*
-		 * Yes, Virginia, this is actually required to prevent a race
-		 * with clear_page_dirty_for_io() from clearing the page dirty
-		 * bit after it clear all dirty ptes, but before a racing
-		 * do_wp_page installs a dirty pte.
-		 *
-		 * do_no_page is protected similarly.
+		 * Don't let another task, with possibly unlocked vma,
+		 * keep the mlocked page.
 		 */
-		if (!page_mkwrite) {
-			wait_on_page_locked(dirty_page);
-			set_page_dirty_balance(dirty_page, page_mkwrite);
+		if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
+			lock_page(old_page);	/* LRU manipulation */
+			munlock_vma_page(old_page);
+			unlock_page(old_page);
 		}
-		put_page(dirty_page);
-		if (page_mkwrite) {
-			struct address_space *mapping = dirty_page->mapping;
-
-			set_page_dirty(dirty_page);
-			unlock_page(dirty_page);
-			page_cache_release(dirty_page);
-			if (mapping)	{
-				/*
-				 * Some device drivers do not set page.mapping
-				 * but still dirty their pages
-				 */
-				balance_dirty_pages_ratelimited(mapping);
-			}
-		}
-
-		/* file_update_time outside page_lock */
-		if (vma->vm_file)
-			file_update_time(vma->vm_file);
+		page_cache_release(old_page);
 	}
 	return ret;
 oom_free_new:
@@ -2572,6 +2648,7 @@
 		details.last_index = ULONG_MAX;
 	details.i_mmap_lock = &mapping->i_mmap_lock;
 
+	mutex_lock(&mapping->unmap_mutex);
 	spin_lock(&mapping->i_mmap_lock);
 
 	/* Protect against endless unmapping loops */
@@ -2588,6 +2665,7 @@
 	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
 		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
 	spin_unlock(&mapping->i_mmap_lock);
+	mutex_unlock(&mapping->unmap_mutex);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
@@ -2975,12 +3053,6 @@
 				goto out;
 			}
 			charged = 1;
-			/*
-			 * Don't let another task, with possibly unlocked vma,
-			 * keep the mlocked page.
-			 */
-			if (vma->vm_flags & VM_LOCKED)
-				clear_page_mlock(vmf.page);
 			copy_user_highpage(page, vmf.page, address, vma);
 			__SetPageUptodate(page);
 		} else {
@@ -3147,9 +3219,9 @@
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
-static inline int handle_pte_fault(struct mm_struct *mm,
-		struct vm_area_struct *vma, unsigned long address,
-		pte_t *pte, pmd_t *pmd, unsigned int flags)
+int handle_pte_fault(struct mm_struct *mm,
+		     struct vm_area_struct *vma, unsigned long address,
+		     pte_t *pte, pmd_t *pmd, unsigned int flags)
 {
 	pte_t entry;
 	spinlock_t *ptl;
@@ -3228,9 +3300,40 @@
 	pmd = pmd_alloc(mm, pud, address);
 	if (!pmd)
 		return VM_FAULT_OOM;
-	pte = pte_alloc_map(mm, pmd, address);
-	if (!pte)
+	if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
+		if (!vma->vm_ops)
+			return do_huge_pmd_anonymous_page(mm, vma, address,
+							  pmd, flags);
+	} else {
+		pmd_t orig_pmd = *pmd;
+		barrier();
+		if (pmd_trans_huge(orig_pmd)) {
+			if (flags & FAULT_FLAG_WRITE &&
+			    !pmd_write(orig_pmd) &&
+			    !pmd_trans_splitting(orig_pmd))
+				return do_huge_pmd_wp_page(mm, vma, address,
+							   pmd, orig_pmd);
+			return 0;
+		}
+	}
+
+	/*
+	 * Use __pte_alloc instead of pte_alloc_map, because we can't
+	 * run pte_offset_map on the pmd, if an huge pmd could
+	 * materialize from under us from a different thread.
+	 */
+	if (unlikely(__pte_alloc(mm, vma, pmd, address)))
 		return VM_FAULT_OOM;
+	/* if an huge pmd materialized from under us just retry later */
+	if (unlikely(pmd_trans_huge(*pmd)))
+		return 0;
+	/*
+	 * A regular pmd is established and it can't morph into a huge pmd
+	 * from under us anymore at this point because we hold the mmap_sem
+	 * read mode and khugepaged takes it in write mode. So now it's
+	 * safe to run pte_offset_map().
+	 */
+	pte = pte_offset_map(pmd, address);
 
 	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 }
@@ -3296,7 +3399,12 @@
 	vma = find_vma(current->mm, addr);
 	if (!vma)
 		return -ENOMEM;
-	write = (vma->vm_flags & VM_WRITE) != 0;
+	/*
+	 * We want to touch writable mappings with a write fault in order
+	 * to break COW, except for shared mappings because these don't COW
+	 * and we would not want to dirty them for nothing.
+	 */
+	write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
 	BUG_ON(addr >= end);
 	BUG_ON(end > vma->vm_end);
 	len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
@@ -3368,6 +3476,7 @@
 		goto out;
 
 	pmd = pmd_offset(pud, address);
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
 		goto out;
 
@@ -3608,3 +3717,74 @@
 }
 EXPORT_SYMBOL(might_fault);
 #endif
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+static void clear_gigantic_page(struct page *page,
+				unsigned long addr,
+				unsigned int pages_per_huge_page)
+{
+	int i;
+	struct page *p = page;
+
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page;
+	     i++, p = mem_map_next(p, page, i)) {
+		cond_resched();
+		clear_user_highpage(p, addr + i * PAGE_SIZE);
+	}
+}
+void clear_huge_page(struct page *page,
+		     unsigned long addr, unsigned int pages_per_huge_page)
+{
+	int i;
+
+	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
+		clear_gigantic_page(page, addr, pages_per_huge_page);
+		return;
+	}
+
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page; i++) {
+		cond_resched();
+		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
+	}
+}
+
+static void copy_user_gigantic_page(struct page *dst, struct page *src,
+				    unsigned long addr,
+				    struct vm_area_struct *vma,
+				    unsigned int pages_per_huge_page)
+{
+	int i;
+	struct page *dst_base = dst;
+	struct page *src_base = src;
+
+	for (i = 0; i < pages_per_huge_page; ) {
+		cond_resched();
+		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
+
+		i++;
+		dst = mem_map_next(dst, dst_base, i);
+		src = mem_map_next(src, src_base, i);
+	}
+}
+
+void copy_user_huge_page(struct page *dst, struct page *src,
+			 unsigned long addr, struct vm_area_struct *vma,
+			 unsigned int pages_per_huge_page)
+{
+	int i;
+
+	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
+		copy_user_gigantic_page(dst, src, addr, vma,
+					pages_per_huge_page);
+		return;
+	}
+
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page; i++) {
+		cond_resched();
+		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
+	}
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2c6523a..321fc74 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -82,9 +82,10 @@
 
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
-static void get_page_bootmem(unsigned long info,  struct page *page, int type)
+static void get_page_bootmem(unsigned long info,  struct page *page,
+			     unsigned long type)
 {
-	atomic_set(&page->_mapcount, type);
+	page->lru.next = (struct list_head *) type;
 	SetPagePrivate(page);
 	set_page_private(page, info);
 	atomic_inc(&page->_count);
@@ -94,15 +95,16 @@
  * so use __ref to tell modpost not to generate a warning */
 void __ref put_page_bootmem(struct page *page)
 {
-	int type;
+	unsigned long type;
 
-	type = atomic_read(&page->_mapcount);
-	BUG_ON(type >= -1);
+	type = (unsigned long) page->lru.next;
+	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
+	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
 
 	if (atomic_dec_return(&page->_count) == 1) {
 		ClearPagePrivate(page);
 		set_page_private(page, 0);
-		reset_page_mapcount(page);
+		INIT_LIST_HEAD(&page->lru);
 		__free_pages_bootmem(page, 0);
 	}
 
@@ -407,6 +409,7 @@
 	int ret;
 	struct memory_notify arg;
 
+	lock_memory_hotplug();
 	arg.start_pfn = pfn;
 	arg.nr_pages = nr_pages;
 	arg.status_change_nid = -1;
@@ -419,6 +422,7 @@
 	ret = notifier_to_errno(ret);
 	if (ret) {
 		memory_notify(MEM_CANCEL_ONLINE, &arg);
+		unlock_memory_hotplug();
 		return ret;
 	}
 	/*
@@ -443,6 +447,7 @@
 		printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
 			nr_pages, pfn);
 		memory_notify(MEM_CANCEL_ONLINE, &arg);
+		unlock_memory_hotplug();
 		return ret;
 	}
 
@@ -467,6 +472,7 @@
 
 	if (onlined_pages)
 		memory_notify(MEM_ONLINE, &arg);
+	unlock_memory_hotplug();
 
 	return 0;
 }
@@ -733,7 +739,8 @@
 			goto out;
 		}
 		/* this function returns # of failed pages */
-		ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
+		ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
+								true, true);
 		if (ret)
 			putback_lru_pages(&source);
 	}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 11ff260..b53ec99 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -514,6 +514,7 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		split_huge_page_pmd(vma->vm_mm, pmd);
 		if (pmd_none_or_clear_bad(pmd))
 			continue;
 		if (check_pte_range(vma, pmd, addr, next, nodes,
@@ -935,7 +936,8 @@
 		return PTR_ERR(vma);
 
 	if (!list_empty(&pagelist)) {
-		err = migrate_pages(&pagelist, new_node_page, dest, 0);
+		err = migrate_pages(&pagelist, new_node_page, dest,
+								false, true);
 		if (err)
 			putback_lru_pages(&pagelist);
 	}
@@ -1155,7 +1157,8 @@
 
 		if (!list_empty(&pagelist)) {
 			nr_failed = migrate_pages(&pagelist, new_vma_page,
-						(unsigned long)vma, 0);
+						(unsigned long)vma,
+						false, true);
 			if (nr_failed)
 				putback_lru_pages(&pagelist);
 		}
@@ -1308,16 +1311,13 @@
 
 	/* Find the mm_struct */
 	rcu_read_lock();
-	read_lock(&tasklist_lock);
 	task = pid ? find_task_by_vpid(pid) : current;
 	if (!task) {
-		read_unlock(&tasklist_lock);
 		rcu_read_unlock();
 		err = -ESRCH;
 		goto out;
 	}
 	mm = get_task_mm(task);
-	read_unlock(&tasklist_lock);
 	rcu_read_unlock();
 
 	err = -EINVAL;
@@ -1524,10 +1524,9 @@
 }
 
 /* Return a zonelist indicated by gfp for node representing a mempolicy */
-static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
+static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
+	int nd)
 {
-	int nd = numa_node_id();
-
 	switch (policy->mode) {
 	case MPOL_PREFERRED:
 		if (!(policy->flags & MPOL_F_LOCAL))
@@ -1679,7 +1678,7 @@
 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
 				huge_page_shift(hstate_vma(vma))), gfp_flags);
 	} else {
-		zl = policy_zonelist(gfp_flags, *mpol);
+		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
 		if ((*mpol)->mode == MPOL_BIND)
 			*nodemask = &(*mpol)->v.nodes;
 	}
@@ -1796,7 +1795,7 @@
 }
 
 /**
- * 	alloc_page_vma	- Allocate a page for a VMA.
+ * 	alloc_pages_vma	- Allocate a page for a VMA.
  *
  * 	@gfp:
  *      %GFP_USER    user allocation.
@@ -1805,6 +1804,7 @@
  *      %GFP_FS      allocation should not call back into a file system.
  *      %GFP_ATOMIC  don't sleep.
  *
+ *	@order:Order of the GFP allocation.
  * 	@vma:  Pointer to VMA or NULL if not available.
  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
  *
@@ -1818,7 +1818,8 @@
  *	Should be called with the mm_sem of the vma hold.
  */
 struct page *
-alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
+alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+		unsigned long addr, int node)
 {
 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
 	struct zonelist *zl;
@@ -1828,18 +1829,18 @@
 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
 		unsigned nid;
 
-		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
+		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
 		mpol_cond_put(pol);
-		page = alloc_page_interleave(gfp, 0, nid);
+		page = alloc_page_interleave(gfp, order, nid);
 		put_mems_allowed();
 		return page;
 	}
-	zl = policy_zonelist(gfp, pol);
+	zl = policy_zonelist(gfp, pol, node);
 	if (unlikely(mpol_needs_cond_ref(pol))) {
 		/*
 		 * slow path: ref counted shared policy
 		 */
-		struct page *page =  __alloc_pages_nodemask(gfp, 0,
+		struct page *page =  __alloc_pages_nodemask(gfp, order,
 						zl, policy_nodemask(gfp, pol));
 		__mpol_put(pol);
 		put_mems_allowed();
@@ -1848,7 +1849,8 @@
 	/*
 	 * fast path:  default or task policy
 	 */
-	page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
+	page = __alloc_pages_nodemask(gfp, order, zl,
+				      policy_nodemask(gfp, pol));
 	put_mems_allowed();
 	return page;
 }
@@ -1889,7 +1891,8 @@
 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
 	else
 		page = __alloc_pages_nodemask(gfp, order,
-			policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
+				policy_zonelist(gfp, pol, numa_node_id()),
+				policy_nodemask(gfp, pol));
 	put_mems_allowed();
 	return page;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 6ae8a66..352de555 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -113,6 +113,8 @@
 			goto out;
 
 		pmd = pmd_offset(pud, addr);
+		if (pmd_trans_huge(*pmd))
+			goto out;
 		if (!pmd_present(*pmd))
 			goto out;
 
@@ -246,7 +248,7 @@
 
 	expected_count = 2 + page_has_private(page);
 	if (page_count(page) != expected_count ||
-			(struct page *)radix_tree_deref_slot(pslot) != page) {
+		radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
 		spin_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;
 	}
@@ -318,7 +320,7 @@
 
 	expected_count = 2 + page_has_private(page);
 	if (page_count(page) != expected_count ||
-	    (struct page *)radix_tree_deref_slot(pslot) != page) {
+		radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
 		spin_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;
 	}
@@ -614,13 +616,12 @@
  * to the newly allocated page in newpage.
  */
 static int unmap_and_move(new_page_t get_new_page, unsigned long private,
-			struct page *page, int force, int offlining)
+			struct page *page, int force, bool offlining, bool sync)
 {
 	int rc = 0;
 	int *result = NULL;
 	struct page *newpage = get_new_page(page, private, &result);
 	int remap_swapcache = 1;
-	int rcu_locked = 0;
 	int charge = 0;
 	struct mem_cgroup *mem = NULL;
 	struct anon_vma *anon_vma = NULL;
@@ -632,6 +633,9 @@
 		/* page was freed from under us. So we are done. */
 		goto move_newpage;
 	}
+	if (unlikely(PageTransHuge(page)))
+		if (unlikely(split_huge_page(page)))
+			goto move_newpage;
 
 	/* prepare cgroup just returns 0 or -ENOMEM */
 	rc = -EAGAIN;
@@ -639,6 +643,23 @@
 	if (!trylock_page(page)) {
 		if (!force)
 			goto move_newpage;
+
+		/*
+		 * It's not safe for direct compaction to call lock_page.
+		 * For example, during page readahead pages are added locked
+		 * to the LRU. Later, when the IO completes the pages are
+		 * marked uptodate and unlocked. However, the queueing
+		 * could be merging multiple pages for one bio (e.g.
+		 * mpage_readpages). If an allocation happens for the
+		 * second or third page, the process can end up locking
+		 * the same page twice and deadlocking. Rather than
+		 * trying to be clever about what pages can be locked,
+		 * avoid the use of lock_page for direct compaction
+		 * altogether.
+		 */
+		if (current->flags & PF_MEMALLOC)
+			goto move_newpage;
+
 		lock_page(page);
 	}
 
@@ -665,27 +686,33 @@
 	BUG_ON(charge);
 
 	if (PageWriteback(page)) {
-		if (!force)
+		if (!force || !sync)
 			goto uncharge;
 		wait_on_page_writeback(page);
 	}
 	/*
 	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
 	 * we cannot notice that anon_vma is freed while we migrates a page.
-	 * This rcu_read_lock() delays freeing anon_vma pointer until the end
+	 * This get_anon_vma() delays freeing anon_vma pointer until the end
 	 * of migration. File cache pages are no problem because of page_lock()
 	 * File Caches may use write_page() or lock_page() in migration, then,
 	 * just care Anon page here.
 	 */
 	if (PageAnon(page)) {
-		rcu_read_lock();
-		rcu_locked = 1;
-
-		/* Determine how to safely use anon_vma */
-		if (!page_mapped(page)) {
-			if (!PageSwapCache(page))
-				goto rcu_unlock;
-
+		/*
+		 * Only page_lock_anon_vma() understands the subtleties of
+		 * getting a hold on an anon_vma from outside one of its mms.
+		 */
+		anon_vma = page_lock_anon_vma(page);
+		if (anon_vma) {
+			/*
+			 * Take a reference count on the anon_vma if the
+			 * page is mapped so that it is guaranteed to
+			 * exist when the page is remapped later
+			 */
+			get_anon_vma(anon_vma);
+			page_unlock_anon_vma(anon_vma);
+		} else if (PageSwapCache(page)) {
 			/*
 			 * We cannot be sure that the anon_vma of an unmapped
 			 * swapcache page is safe to use because we don't
@@ -700,13 +727,7 @@
 			 */
 			remap_swapcache = 0;
 		} else {
-			/*
-			 * Take a reference count on the anon_vma if the
-			 * page is mapped so that it is guaranteed to
-			 * exist when the page is remapped later
-			 */
-			anon_vma = page_anon_vma(page);
-			get_anon_vma(anon_vma);
+			goto uncharge;
 		}
 	}
 
@@ -723,16 +744,10 @@
 	 * free the metadata, so the page can be freed.
 	 */
 	if (!page->mapping) {
-		if (!PageAnon(page) && page_has_private(page)) {
-			/*
-			 * Go direct to try_to_free_buffers() here because
-			 * a) that's what try_to_release_page() would do anyway
-			 * b) we may be under rcu_read_lock() here, so we can't
-			 *    use GFP_KERNEL which is what try_to_release_page()
-			 *    needs to be effective.
-			 */
+		VM_BUG_ON(PageAnon(page));
+		if (page_has_private(page)) {
 			try_to_free_buffers(page);
-			goto rcu_unlock;
+			goto uncharge;
 		}
 		goto skip_unmap;
 	}
@@ -746,20 +761,18 @@
 
 	if (rc && remap_swapcache)
 		remove_migration_ptes(page, page);
-rcu_unlock:
 
 	/* Drop an anon_vma reference if we took one */
 	if (anon_vma)
 		drop_anon_vma(anon_vma);
 
-	if (rcu_locked)
-		rcu_read_unlock();
 uncharge:
 	if (!charge)
-		mem_cgroup_end_migration(mem, page, newpage);
+		mem_cgroup_end_migration(mem, page, newpage, rc == 0);
 unlock:
 	unlock_page(page);
 
+move_newpage:
 	if (rc != -EAGAIN) {
  		/*
  		 * A page that has been migrated has all references
@@ -773,8 +786,6 @@
 		putback_lru_page(page);
 	}
 
-move_newpage:
-
 	/*
 	 * Move the new page to the LRU. If migration was not successful
 	 * then this will free the page.
@@ -810,12 +821,11 @@
  */
 static int unmap_and_move_huge_page(new_page_t get_new_page,
 				unsigned long private, struct page *hpage,
-				int force, int offlining)
+				int force, bool offlining, bool sync)
 {
 	int rc = 0;
 	int *result = NULL;
 	struct page *new_hpage = get_new_page(hpage, private, &result);
-	int rcu_locked = 0;
 	struct anon_vma *anon_vma = NULL;
 
 	if (!new_hpage)
@@ -824,18 +834,16 @@
 	rc = -EAGAIN;
 
 	if (!trylock_page(hpage)) {
-		if (!force)
+		if (!force || !sync)
 			goto out;
 		lock_page(hpage);
 	}
 
 	if (PageAnon(hpage)) {
-		rcu_read_lock();
-		rcu_locked = 1;
-
-		if (page_mapped(hpage)) {
-			anon_vma = page_anon_vma(hpage);
-			atomic_inc(&anon_vma->external_refcount);
+		anon_vma = page_lock_anon_vma(hpage);
+		if (anon_vma) {
+			get_anon_vma(anon_vma);
+			page_unlock_anon_vma(anon_vma);
 		}
 	}
 
@@ -847,16 +855,8 @@
 	if (rc)
 		remove_migration_ptes(hpage, hpage);
 
-	if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount,
-					    &anon_vma->lock)) {
-		int empty = list_empty(&anon_vma->head);
-		spin_unlock(&anon_vma->lock);
-		if (empty)
-			anon_vma_free(anon_vma);
-	}
-
-	if (rcu_locked)
-		rcu_read_unlock();
+	if (anon_vma)
+		drop_anon_vma(anon_vma);
 out:
 	unlock_page(hpage);
 
@@ -887,12 +887,13 @@
  * are movable anymore because to has become empty
  * or no retryable pages exist anymore.
  * Caller should call putback_lru_pages to return pages to the LRU
- * or free list.
+ * or free list only if ret != 0.
  *
  * Return: Number of pages not migrated or error code.
  */
 int migrate_pages(struct list_head *from,
-		new_page_t get_new_page, unsigned long private, int offlining)
+		new_page_t get_new_page, unsigned long private, bool offlining,
+		bool sync)
 {
 	int retry = 1;
 	int nr_failed = 0;
@@ -912,7 +913,8 @@
 			cond_resched();
 
 			rc = unmap_and_move(get_new_page, private,
-						page, pass > 2, offlining);
+						page, pass > 2, offlining,
+						sync);
 
 			switch(rc) {
 			case -ENOMEM:
@@ -941,7 +943,8 @@
 }
 
 int migrate_huge_pages(struct list_head *from,
-		new_page_t get_new_page, unsigned long private, int offlining)
+		new_page_t get_new_page, unsigned long private, bool offlining,
+		bool sync)
 {
 	int retry = 1;
 	int nr_failed = 0;
@@ -957,7 +960,8 @@
 			cond_resched();
 
 			rc = unmap_and_move_huge_page(get_new_page,
-					private, page, pass > 2, offlining);
+					private, page, pass > 2, offlining,
+					sync);
 
 			switch(rc) {
 			case -ENOMEM:
@@ -976,10 +980,6 @@
 	}
 	rc = 0;
 out:
-
-	list_for_each_entry_safe(page, page2, from, lru)
-		put_page(page);
-
 	if (rc)
 		return rc;
 
@@ -1042,7 +1042,7 @@
 		if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
 			goto set_status;
 
-		page = follow_page(vma, pp->addr, FOLL_GET);
+		page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
 
 		err = PTR_ERR(page);
 		if (IS_ERR(page))
@@ -1090,7 +1090,7 @@
 	err = 0;
 	if (!list_empty(&pagelist)) {
 		err = migrate_pages(&pagelist, new_page_node,
-				(unsigned long)pm, 0);
+				(unsigned long)pm, 0, true);
 		if (err)
 			putback_lru_pages(&pagelist);
 	}
@@ -1287,14 +1287,14 @@
 		return -EPERM;
 
 	/* Find the mm_struct */
-	read_lock(&tasklist_lock);
+	rcu_read_lock();
 	task = pid ? find_task_by_vpid(pid) : current;
 	if (!task) {
-		read_unlock(&tasklist_lock);
+		rcu_read_unlock();
 		return -ESRCH;
 	}
 	mm = get_task_mm(task);
-	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
 
 	if (!mm)
 		return -EINVAL;
diff --git a/mm/mincore.c b/mm/mincore.c
index 9ac42dc..a4e6b9d 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -154,6 +154,13 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (pmd_trans_huge(*pmd)) {
+			if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
+				vec += (next - addr) >> PAGE_SHIFT;
+				continue;
+			}
+			/* fall through */
+		}
 		if (pmd_none_or_clear_bad(pmd))
 			mincore_unmapped_range(vma, addr, next, vec);
 		else
diff --git a/mm/mlock.c b/mm/mlock.c
index b70919c..c3924c7f 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -155,13 +155,12 @@
  * vma->vm_mm->mmap_sem must be held for at least read.
  */
 static long __mlock_vma_pages_range(struct vm_area_struct *vma,
-				    unsigned long start, unsigned long end)
+				    unsigned long start, unsigned long end,
+				    int *nonblocking)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long addr = start;
-	struct page *pages[16]; /* 16 gives a reasonable batch */
 	int nr_pages = (end - start) / PAGE_SIZE;
-	int ret = 0;
 	int gup_flags;
 
 	VM_BUG_ON(start & ~PAGE_MASK);
@@ -170,73 +169,33 @@
 	VM_BUG_ON(end   > vma->vm_end);
 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-	gup_flags = FOLL_TOUCH | FOLL_GET;
-	if (vma->vm_flags & VM_WRITE)
+	gup_flags = FOLL_TOUCH;
+	/*
+	 * We want to touch writable mappings with a write fault in order
+	 * to break COW, except for shared mappings because these don't COW
+	 * and we would not want to dirty them for nothing.
+	 */
+	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
 		gup_flags |= FOLL_WRITE;
 
+	/*
+	 * We want mlock to succeed for regions that have any permissions
+	 * other than PROT_NONE.
+	 */
+	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+		gup_flags |= FOLL_FORCE;
+
+	if (vma->vm_flags & VM_LOCKED)
+		gup_flags |= FOLL_MLOCK;
+
 	/* We don't try to access the guard page of a stack vma */
 	if (stack_guard_page(vma, start)) {
 		addr += PAGE_SIZE;
 		nr_pages--;
 	}
 
-	while (nr_pages > 0) {
-		int i;
-
-		cond_resched();
-
-		/*
-		 * get_user_pages makes pages present if we are
-		 * setting mlock. and this extra reference count will
-		 * disable migration of this page.  However, page may
-		 * still be truncated out from under us.
-		 */
-		ret = __get_user_pages(current, mm, addr,
-				min_t(int, nr_pages, ARRAY_SIZE(pages)),
-				gup_flags, pages, NULL);
-		/*
-		 * This can happen for, e.g., VM_NONLINEAR regions before
-		 * a page has been allocated and mapped at a given offset,
-		 * or for addresses that map beyond end of a file.
-		 * We'll mlock the pages if/when they get faulted in.
-		 */
-		if (ret < 0)
-			break;
-
-		lru_add_drain();	/* push cached pages to LRU */
-
-		for (i = 0; i < ret; i++) {
-			struct page *page = pages[i];
-
-			if (page->mapping) {
-				/*
-				 * That preliminary check is mainly to avoid
-				 * the pointless overhead of lock_page on the
-				 * ZERO_PAGE: which might bounce very badly if
-				 * there is contention.  However, we're still
-				 * dirtying its cacheline with get/put_page:
-				 * we'll add another __get_user_pages flag to
-				 * avoid it if that case turns out to matter.
-				 */
-				lock_page(page);
-				/*
-				 * Because we lock page here and migration is
-				 * blocked by the elevated reference, we need
-				 * only check for file-cache page truncation.
-				 */
-				if (page->mapping)
-					mlock_vma_page(page);
-				unlock_page(page);
-			}
-			put_page(page);	/* ref from get_user_pages() */
-		}
-
-		addr += ret * PAGE_SIZE;
-		nr_pages -= ret;
-		ret = 0;
-	}
-
-	return ret;	/* 0 or negative error code */
+	return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
+				NULL, NULL, nonblocking);
 }
 
 /*
@@ -280,7 +239,7 @@
 			is_vm_hugetlb_page(vma) ||
 			vma == get_gate_vma(current))) {
 
-		__mlock_vma_pages_range(vma, start, end);
+		__mlock_vma_pages_range(vma, start, end, NULL);
 
 		/* Hide errors from mmap() and other callers */
 		return 0;
@@ -372,18 +331,10 @@
 	int ret = 0;
 	int lock = newflags & VM_LOCKED;
 
-	if (newflags == vma->vm_flags ||
-			(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
+	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current))
 		goto out;	/* don't set VM_LOCKED,  don't count */
 
-	if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
-			is_vm_hugetlb_page(vma) ||
-			vma == get_gate_vma(current)) {
-		if (lock)
-			make_pages_present(start, end);
-		goto out;	/* don't set VM_LOCKED,  don't count */
-	}
-
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma));
@@ -419,14 +370,10 @@
 	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
 	 */
 
-	if (lock) {
+	if (lock)
 		vma->vm_flags = newflags;
-		ret = __mlock_vma_pages_range(vma, start, end);
-		if (ret < 0)
-			ret = __mlock_posix_error_return(ret);
-	} else {
+	else
 		munlock_vma_pages_range(vma, start, end);
-	}
 
 out:
 	*prev = vma;
@@ -439,7 +386,8 @@
 	struct vm_area_struct * vma, * prev;
 	int error;
 
-	len = PAGE_ALIGN(len);
+	VM_BUG_ON(start & ~PAGE_MASK);
+	VM_BUG_ON(len != PAGE_ALIGN(len));
 	end = start + len;
 	if (end < start)
 		return -EINVAL;
@@ -482,6 +430,62 @@
 	return error;
 }
 
+static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long end, nstart, nend;
+	struct vm_area_struct *vma = NULL;
+	int locked = 0;
+	int ret = 0;
+
+	VM_BUG_ON(start & ~PAGE_MASK);
+	VM_BUG_ON(len != PAGE_ALIGN(len));
+	end = start + len;
+
+	for (nstart = start; nstart < end; nstart = nend) {
+		/*
+		 * We want to fault in pages for [nstart; end) address range.
+		 * Find first corresponding VMA.
+		 */
+		if (!locked) {
+			locked = 1;
+			down_read(&mm->mmap_sem);
+			vma = find_vma(mm, nstart);
+		} else if (nstart >= vma->vm_end)
+			vma = vma->vm_next;
+		if (!vma || vma->vm_start >= end)
+			break;
+		/*
+		 * Set [nstart; nend) to intersection of desired address
+		 * range with the first VMA. Also, skip undesirable VMA types.
+		 */
+		nend = min(end, vma->vm_end);
+		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+			continue;
+		if (nstart < vma->vm_start)
+			nstart = vma->vm_start;
+		/*
+		 * Now fault in a range of pages. __mlock_vma_pages_range()
+		 * double checks the vma flags, so that it won't mlock pages
+		 * if the vma was already munlocked.
+		 */
+		ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
+		if (ret < 0) {
+			if (ignore_errors) {
+				ret = 0;
+				continue;	/* continue at next VMA */
+			}
+			ret = __mlock_posix_error_return(ret);
+			break;
+		}
+		nend = nstart + ret * PAGE_SIZE;
+		ret = 0;
+	}
+	if (locked)
+		up_read(&mm->mmap_sem);
+	return ret;	/* 0 or negative error code */
+}
+
 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
 {
 	unsigned long locked;
@@ -507,6 +511,8 @@
 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
 		error = do_mlock(start, len, 1);
 	up_write(&current->mm->mmap_sem);
+	if (!error)
+		error = do_mlock_pages(start, len, 0);
 	return error;
 }
 
@@ -571,6 +577,10 @@
 	    capable(CAP_IPC_LOCK))
 		ret = do_mlockall(flags);
 	up_write(&current->mm->mmap_sem);
+	if (!ret && (flags & MCL_CURRENT)) {
+		/* Ignore errors */
+		do_mlock_pages(0, TASK_SIZE, 1);
+	}
 out:
 	return ret;
 }
diff --git a/mm/mmap.c b/mm/mmap.c
index 50a4aa0..2ec8eb5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -29,6 +29,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/perf_event.h>
 #include <linux/audit.h>
+#include <linux/khugepaged.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -253,7 +254,15 @@
 	down_write(&mm->mmap_sem);
 
 #ifdef CONFIG_COMPAT_BRK
-	min_brk = mm->end_code;
+	/*
+	 * CONFIG_COMPAT_BRK can still be overridden by setting
+	 * randomize_va_space to 2, which will still cause mm->start_brk
+	 * to be arbitrarily shifted
+	 */
+	if (mm->start_brk > PAGE_ALIGN(mm->end_data))
+		min_brk = mm->start_brk;
+	else
+		min_brk = mm->end_data;
 #else
 	min_brk = mm->start_brk;
 #endif
@@ -588,6 +597,8 @@
 		}
 	}
 
+	vma_adjust_trans_huge(vma, start, end, adjust_next);
+
 	/*
 	 * When changing only vma->vm_end, we don't really need anon_vma
 	 * lock. This is a fairly rare case by itself, but the anon_vma
@@ -815,6 +826,7 @@
 				end, prev->vm_pgoff, NULL);
 		if (err)
 			return NULL;
+		khugepaged_enter_vma_merge(prev);
 		return prev;
 	}
 
@@ -833,6 +845,7 @@
 				next->vm_pgoff - pglen, NULL);
 		if (err)
 			return NULL;
+		khugepaged_enter_vma_merge(area);
 		return area;
 	}
 
@@ -1761,6 +1774,7 @@
 		}
 	}
 	vma_unlock_anon_vma(vma);
+	khugepaged_enter_vma_merge(vma);
 	return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1808,6 +1822,7 @@
 		}
 	}
 	vma_unlock_anon_vma(vma);
+	khugepaged_enter_vma_merge(vma);
 	return error;
 }
 
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 438951d..8d032de 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -100,6 +100,26 @@
 	return young;
 }
 
+int __mmu_notifier_test_young(struct mm_struct *mm,
+			      unsigned long address)
+{
+	struct mmu_notifier *mn;
+	struct hlist_node *n;
+	int young = 0;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+		if (mn->ops->test_young) {
+			young = mn->ops->test_young(mn, mm, address);
+			if (young)
+				break;
+		}
+	}
+	rcu_read_unlock();
+
+	return young;
+}
+
 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
 			       pte_t pte)
 {
diff --git a/mm/mmzone.c b/mm/mmzone.c
index e35bfb8..f5b7d17 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -87,24 +87,3 @@
 	return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
-
-#ifdef CONFIG_SMP
-/* Called when a more accurate view of NR_FREE_PAGES is needed */
-unsigned long zone_nr_free_pages(struct zone *zone)
-{
-	unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
-
-	/*
-	 * While kswapd is awake, it is considered the zone is under some
-	 * memory pressure. Under pressure, there is a risk that
-	 * per-cpu-counter-drift will allow the min watermark to be breached
-	 * potentially causing a live-lock. While kswapd is awake and
-	 * free pages are low, get a better estimate for free pages
-	 */
-	if (nr_free_pages < zone->percpu_drift_mark &&
-			!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
-		return zone_page_state_snapshot(zone, NR_FREE_PAGES);
-
-	return nr_free_pages;
-}
-#endif /* CONFIG_SMP */
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 4c51338..5a688a2 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -78,7 +78,7 @@
 	pte_unmap_unlock(pte - 1, ptl);
 }
 
-static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
+static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 		unsigned long addr, unsigned long end, pgprot_t newprot,
 		int dirty_accountable)
 {
@@ -88,13 +88,21 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (pmd_trans_huge(*pmd)) {
+			if (next - addr != HPAGE_PMD_SIZE)
+				split_huge_page_pmd(vma->vm_mm, pmd);
+			else if (change_huge_pmd(vma, pmd, addr, newprot))
+				continue;
+			/* fall through */
+		}
 		if (pmd_none_or_clear_bad(pmd))
 			continue;
-		change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
+		change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
+				 dirty_accountable);
 	} while (pmd++, addr = next, addr != end);
 }
 
-static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
+static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 		unsigned long addr, unsigned long end, pgprot_t newprot,
 		int dirty_accountable)
 {
@@ -106,7 +114,8 @@
 		next = pud_addr_end(addr, end);
 		if (pud_none_or_clear_bad(pud))
 			continue;
-		change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
+		change_pmd_range(vma, pud, addr, next, newprot,
+				 dirty_accountable);
 	} while (pud++, addr = next, addr != end);
 }
 
@@ -126,7 +135,8 @@
 		next = pgd_addr_end(addr, end);
 		if (pgd_none_or_clear_bad(pgd))
 			continue;
-		change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
+		change_pud_range(vma, pgd, addr, next, newprot,
+				 dirty_accountable);
 	} while (pgd++, addr = next, addr != end);
 	flush_tlb_range(vma, start, end);
 }
diff --git a/mm/mremap.c b/mm/mremap.c
index 563fbdd..1de98d4 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -41,13 +41,15 @@
 		return NULL;
 
 	pmd = pmd_offset(pud, addr);
+	split_huge_page_pmd(mm, pmd);
 	if (pmd_none_or_clear_bad(pmd))
 		return NULL;
 
 	return pmd;
 }
 
-static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
+static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+			    unsigned long addr)
 {
 	pgd_t *pgd;
 	pud_t *pud;
@@ -62,7 +64,8 @@
 	if (!pmd)
 		return NULL;
 
-	if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
+	VM_BUG_ON(pmd_trans_huge(*pmd));
+	if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr))
 		return NULL;
 
 	return pmd;
@@ -91,9 +94,7 @@
 		 */
 		mapping = vma->vm_file->f_mapping;
 		spin_lock(&mapping->i_mmap_lock);
-		if (new_vma->vm_truncate_count &&
-		    new_vma->vm_truncate_count != vma->vm_truncate_count)
-			new_vma->vm_truncate_count = 0;
+		new_vma->vm_truncate_count = 0;
 	}
 
 	/*
@@ -147,7 +148,7 @@
 		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
 		if (!old_pmd)
 			continue;
-		new_pmd = alloc_new_pmd(vma->vm_mm, new_addr);
+		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
 		if (!new_pmd)
 			break;
 		next = (new_addr + PMD_SIZE) & PMD_MASK;
diff --git a/mm/nommu.c b/mm/nommu.c
index ef4045d..f59e142 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -127,7 +127,8 @@
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 		     unsigned long start, int nr_pages, unsigned int foll_flags,
-		     struct page **pages, struct vm_area_struct **vmas)
+		     struct page **pages, struct vm_area_struct **vmas,
+		     int *retry)
 {
 	struct vm_area_struct *vma;
 	unsigned long vm_flags;
@@ -185,7 +186,8 @@
 	if (force)
 		flags |= FOLL_FORCE;
 
-	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
+				NULL);
 }
 EXPORT_SYMBOL(get_user_pages);
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b4edfe7..2cb01f6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -404,15 +404,18 @@
  * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
  * - vm.dirty_ratio             or  vm.dirty_bytes
  * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
- * runtime tasks.
+ * real-time tasks.
  */
 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 {
 	unsigned long background;
 	unsigned long dirty;
-	unsigned long available_memory = determine_dirtyable_memory();
+	unsigned long uninitialized_var(available_memory);
 	struct task_struct *tsk;
 
+	if (!vm_dirty_bytes || !dirty_background_bytes)
+		available_memory = determine_dirtyable_memory();
+
 	if (vm_dirty_bytes)
 		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
 	else
@@ -1103,7 +1106,7 @@
 int __set_page_dirty_no_writeback(struct page *page)
 {
 	if (!PageDirty(page))
-		SetPageDirty(page);
+		return !TestSetPageDirty(page);
 	return 0;
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ff7e158..cdef1d4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -357,6 +357,7 @@
 	}
 }
 
+/* update __split_huge_page_refcount if you change this function */
 static int destroy_compound_page(struct page *page, unsigned long order)
 {
 	int i;
@@ -426,18 +427,10 @@
  *
  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  */
-static inline struct page *
-__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
-{
-	unsigned long buddy_idx = page_idx ^ (1 << order);
-
-	return page + (buddy_idx - page_idx);
-}
-
 static inline unsigned long
-__find_combined_index(unsigned long page_idx, unsigned int order)
+__find_buddy_index(unsigned long page_idx, unsigned int order)
 {
-	return (page_idx & ~(1 << order));
+	return page_idx ^ (1 << order);
 }
 
 /*
@@ -448,8 +441,8 @@
  * (c) a page and its buddy have the same order &&
  * (d) a page and its buddy are in the same zone.
  *
- * For recording whether a page is in the buddy system, we use PG_buddy.
- * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
+ * For recording whether a page is in the buddy system, we set ->_mapcount -2.
+ * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
  *
  * For recording page's order, we use page_private(page).
  */
@@ -482,7 +475,7 @@
  * as necessary, plus some accounting needed to play nicely with other
  * parts of the VM system.
  * At each level, we keep a list of pages, which are heads of continuous
- * free pages of length of (1 << order) and marked with PG_buddy. Page's
+ * free pages of length of (1 << order) and marked with _mapcount -2. Page's
  * order is recorded in page_private(page) field.
  * So when we are allocating or freeing one, we can derive the state of the
  * other.  That is, if we allocate a small block, and both were   
@@ -499,6 +492,7 @@
 {
 	unsigned long page_idx;
 	unsigned long combined_idx;
+	unsigned long uninitialized_var(buddy_idx);
 	struct page *buddy;
 
 	if (unlikely(PageCompound(page)))
@@ -513,7 +507,8 @@
 	VM_BUG_ON(bad_range(zone, page));
 
 	while (order < MAX_ORDER-1) {
-		buddy = __page_find_buddy(page, page_idx, order);
+		buddy_idx = __find_buddy_index(page_idx, order);
+		buddy = page + (buddy_idx - page_idx);
 		if (!page_is_buddy(page, buddy, order))
 			break;
 
@@ -521,7 +516,7 @@
 		list_del(&buddy->lru);
 		zone->free_area[order].nr_free--;
 		rmv_page_order(buddy);
-		combined_idx = __find_combined_index(page_idx, order);
+		combined_idx = buddy_idx & page_idx;
 		page = page + (combined_idx - page_idx);
 		page_idx = combined_idx;
 		order++;
@@ -538,9 +533,10 @@
 	 */
 	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
 		struct page *higher_page, *higher_buddy;
-		combined_idx = __find_combined_index(page_idx, order);
-		higher_page = page + combined_idx - page_idx;
-		higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
+		combined_idx = buddy_idx & page_idx;
+		higher_page = page + (combined_idx - page_idx);
+		buddy_idx = __find_buddy_index(combined_idx, order + 1);
+		higher_buddy = page + (buddy_idx - combined_idx);
 		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
 			list_add_tail(&page->lru,
 				&zone->free_area[order].free_list[migratetype]);
@@ -651,13 +647,10 @@
 	trace_mm_page_free_direct(page, order);
 	kmemcheck_free_shadow(page, order);
 
-	for (i = 0; i < (1 << order); i++) {
-		struct page *pg = page + i;
-
-		if (PageAnon(pg))
-			pg->mapping = NULL;
-		bad += free_pages_check(pg);
-	}
+	if (PageAnon(page))
+		page->mapping = NULL;
+	for (i = 0; i < (1 << order); i++)
+		bad += free_pages_check(page + i);
 	if (bad)
 		return false;
 
@@ -1095,8 +1088,10 @@
 		pset = per_cpu_ptr(zone->pageset, cpu);
 
 		pcp = &pset->pcp;
-		free_pcppages_bulk(zone, pcp->count, pcp);
-		pcp->count = 0;
+		if (pcp->count) {
+			free_pcppages_bulk(zone, pcp->count, pcp);
+			pcp->count = 0;
+		}
 		local_irq_restore(flags);
 	}
 }
@@ -1460,24 +1455,24 @@
 #endif /* CONFIG_FAIL_PAGE_ALLOC */
 
 /*
- * Return 1 if free pages are above 'mark'. This takes into account the order
+ * Return true if free pages are above 'mark'. This takes into account the order
  * of the allocation.
  */
-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
-		      int classzone_idx, int alloc_flags)
+static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+		      int classzone_idx, int alloc_flags, long free_pages)
 {
 	/* free_pages my go negative - that's OK */
 	long min = mark;
-	long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
 	int o;
 
+	free_pages -= (1 << order) + 1;
 	if (alloc_flags & ALLOC_HIGH)
 		min -= min / 2;
 	if (alloc_flags & ALLOC_HARDER)
 		min -= min / 4;
 
 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
-		return 0;
+		return false;
 	for (o = 0; o < order; o++) {
 		/* At the next order, this order's pages become unavailable */
 		free_pages -= z->free_area[o].nr_free << o;
@@ -1486,9 +1481,28 @@
 		min >>= 1;
 
 		if (free_pages <= min)
-			return 0;
+			return false;
 	}
-	return 1;
+	return true;
+}
+
+bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+		      int classzone_idx, int alloc_flags)
+{
+	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
+					zone_page_state(z, NR_FREE_PAGES));
+}
+
+bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
+		      int classzone_idx, int alloc_flags)
+{
+	long free_pages = zone_page_state(z, NR_FREE_PAGES);
+
+	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
+		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
+
+	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
+								free_pages);
 }
 
 #ifdef CONFIG_NUMA
@@ -1793,15 +1807,18 @@
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 	struct zonelist *zonelist, enum zone_type high_zoneidx,
 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-	int migratetype, unsigned long *did_some_progress)
+	int migratetype, unsigned long *did_some_progress,
+	bool sync_migration)
 {
 	struct page *page;
 
 	if (!order || compaction_deferred(preferred_zone))
 		return NULL;
 
+	current->flags |= PF_MEMALLOC;
 	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
-								nodemask);
+						nodemask, sync_migration);
+	current->flags &= ~PF_MEMALLOC;
 	if (*did_some_progress != COMPACT_SKIPPED) {
 
 		/* Page migration frees to the PCP lists but we want merging */
@@ -1837,7 +1854,8 @@
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 	struct zonelist *zonelist, enum zone_type high_zoneidx,
 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-	int migratetype, unsigned long *did_some_progress)
+	int migratetype, unsigned long *did_some_progress,
+	bool sync_migration)
 {
 	return NULL;
 }
@@ -1852,23 +1870,22 @@
 {
 	struct page *page = NULL;
 	struct reclaim_state reclaim_state;
-	struct task_struct *p = current;
 	bool drained = false;
 
 	cond_resched();
 
 	/* We now go into synchronous reclaim */
 	cpuset_memory_pressure_bump();
-	p->flags |= PF_MEMALLOC;
+	current->flags |= PF_MEMALLOC;
 	lockdep_set_current_reclaim_state(gfp_mask);
 	reclaim_state.reclaimed_slab = 0;
-	p->reclaim_state = &reclaim_state;
+	current->reclaim_state = &reclaim_state;
 
 	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
 
-	p->reclaim_state = NULL;
+	current->reclaim_state = NULL;
 	lockdep_clear_current_reclaim_state();
-	p->flags &= ~PF_MEMALLOC;
+	current->flags &= ~PF_MEMALLOC;
 
 	cond_resched();
 
@@ -1920,19 +1937,19 @@
 
 static inline
 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
-						enum zone_type high_zoneidx)
+						enum zone_type high_zoneidx,
+						enum zone_type classzone_idx)
 {
 	struct zoneref *z;
 	struct zone *zone;
 
 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
-		wakeup_kswapd(zone, order);
+		wakeup_kswapd(zone, order, classzone_idx);
 }
 
 static inline int
 gfp_to_alloc_flags(gfp_t gfp_mask)
 {
-	struct task_struct *p = current;
 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
 	const gfp_t wait = gfp_mask & __GFP_WAIT;
 
@@ -1948,18 +1965,23 @@
 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 
 	if (!wait) {
-		alloc_flags |= ALLOC_HARDER;
+		/*
+		 * Not worth trying to allocate harder for
+		 * __GFP_NOMEMALLOC even if it can't schedule.
+		 */
+		if  (!(gfp_mask & __GFP_NOMEMALLOC))
+			alloc_flags |= ALLOC_HARDER;
 		/*
 		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
 		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
 		 */
 		alloc_flags &= ~ALLOC_CPUSET;
-	} else if (unlikely(rt_task(p)) && !in_interrupt())
+	} else if (unlikely(rt_task(current)) && !in_interrupt())
 		alloc_flags |= ALLOC_HARDER;
 
 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
 		if (!in_interrupt() &&
-		    ((p->flags & PF_MEMALLOC) ||
+		    ((current->flags & PF_MEMALLOC) ||
 		     unlikely(test_thread_flag(TIF_MEMDIE))))
 			alloc_flags |= ALLOC_NO_WATERMARKS;
 	}
@@ -1978,7 +2000,7 @@
 	int alloc_flags;
 	unsigned long pages_reclaimed = 0;
 	unsigned long did_some_progress;
-	struct task_struct *p = current;
+	bool sync_migration = false;
 
 	/*
 	 * In the slowpath, we sanity check order to avoid ever trying to
@@ -2003,7 +2025,9 @@
 		goto nopage;
 
 restart:
-	wake_all_kswapd(order, zonelist, high_zoneidx);
+	if (!(gfp_mask & __GFP_NO_KSWAPD))
+		wake_all_kswapd(order, zonelist, high_zoneidx,
+						zone_idx(preferred_zone));
 
 	/*
 	 * OK, we're below the kswapd watermark and have kicked background
@@ -2012,6 +2036,14 @@
 	 */
 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
+	/*
+	 * Find the true preferred zone if the allocation is unconstrained by
+	 * cpusets.
+	 */
+	if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
+		first_zones_zonelist(zonelist, high_zoneidx, NULL,
+					&preferred_zone);
+
 	/* This is the last chance, in general, before the goto nopage. */
 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2034,21 +2066,26 @@
 		goto nopage;
 
 	/* Avoid recursion of direct reclaim */
-	if (p->flags & PF_MEMALLOC)
+	if (current->flags & PF_MEMALLOC)
 		goto nopage;
 
 	/* Avoid allocations with no watermarks from looping endlessly */
 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
 		goto nopage;
 
-	/* Try direct compaction */
+	/*
+	 * Try direct compaction. The first pass is asynchronous. Subsequent
+	 * attempts after direct reclaim are synchronous
+	 */
 	page = __alloc_pages_direct_compact(gfp_mask, order,
 					zonelist, high_zoneidx,
 					nodemask,
 					alloc_flags, preferred_zone,
-					migratetype, &did_some_progress);
+					migratetype, &did_some_progress,
+					sync_migration);
 	if (page)
 		goto got_pg;
+	sync_migration = true;
 
 	/* Try direct reclaim and then allocating */
 	page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2102,13 +2139,27 @@
 		/* Wait for some write requests to complete then retry */
 		wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
 		goto rebalance;
+	} else {
+		/*
+		 * High-order allocations do not necessarily loop after
+		 * direct reclaim and reclaim/compaction depends on compaction
+		 * being called after reclaim so call directly if necessary
+		 */
+		page = __alloc_pages_direct_compact(gfp_mask, order,
+					zonelist, high_zoneidx,
+					nodemask,
+					alloc_flags, preferred_zone,
+					migratetype, &did_some_progress,
+					sync_migration);
+		if (page)
+			goto got_pg;
 	}
 
 nopage:
 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
 		printk(KERN_WARNING "%s: page allocation failure."
 			" order:%d, mode:0x%x\n",
-			p->comm, order, gfp_mask);
+			current->comm, order, gfp_mask);
 		dump_stack();
 		show_mem();
 	}
@@ -2151,7 +2202,9 @@
 
 	get_mems_allowed();
 	/* The preferred zone is used for statistics later */
-	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
+	first_zones_zonelist(zonelist, high_zoneidx,
+				nodemask ? : &cpuset_current_mems_allowed,
+				&preferred_zone);
 	if (!preferred_zone) {
 		put_mems_allowed();
 		return NULL;
@@ -2442,7 +2495,7 @@
 			" all_unreclaimable? %s"
 			"\n",
 			zone->name,
-			K(zone_nr_free_pages(zone)),
+			K(zone_page_state(zone, NR_FREE_PAGES)),
 			K(min_wmark_pages(zone)),
 			K(low_wmark_pages(zone)),
 			K(high_wmark_pages(zone)),
@@ -2585,9 +2638,16 @@
 
 static __init int setup_numa_zonelist_order(char *s)
 {
-	if (s)
-		return __parse_numa_zonelist_order(s);
-	return 0;
+	int ret;
+
+	if (!s)
+		return 0;
+
+	ret = __parse_numa_zonelist_order(s);
+	if (ret == 0)
+		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
+
+	return ret;
 }
 early_param("numa_zonelist_order", setup_numa_zonelist_order);
 
@@ -4014,7 +4074,7 @@
 		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
 }
 #else
-static void inline setup_usemap(struct pglist_data *pgdat,
+static inline void setup_usemap(struct pglist_data *pgdat,
 				struct zone *zone, unsigned long zonesize) {}
 #endif /* CONFIG_SPARSEMEM */
 
@@ -5316,10 +5376,9 @@
 	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
 		unsigned long check = pfn + iter;
 
-		if (!pfn_valid_within(check)) {
-			iter++;
+		if (!pfn_valid_within(check))
 			continue;
-		}
+
 		page = pfn_to_page(check);
 		if (!page_count(page)) {
 			if (PageBuddy(page))
@@ -5517,7 +5576,6 @@
 	{1UL << PG_swapcache,		"swapcache"	},
 	{1UL << PG_mappedtodisk,	"mappedtodisk"	},
 	{1UL << PG_reclaim,		"reclaim"	},
-	{1UL << PG_buddy,		"buddy"		},
 	{1UL << PG_swapbacked,		"swapbacked"	},
 	{1UL << PG_unevictable,		"unevictable"	},
 #ifdef CONFIG_MMU
@@ -5565,7 +5623,7 @@
 {
 	printk(KERN_ALERT
 	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
-		page, page_count(page), page_mapcount(page),
+		page, atomic_read(&page->_count), page_mapcount(page),
 		page->mapping, page->index);
 	dump_page_flags(page->flags);
 }
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 38cc58b..7cfa6ae 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -34,6 +34,7 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		split_huge_page_pmd(walk->mm, pmd);
 		if (pmd_none_or_clear_bad(pmd)) {
 			if (walk->pte_hole)
 				err = walk->pte_hole(addr, next, walk);
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 7d9c1d0..ea53496 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -421,7 +421,7 @@
 		return NULL;
 
 	vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
-				pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL);
+				pcpu_nr_groups, pcpu_atom_size);
 	if (!vms) {
 		pcpu_free_chunk(chunk);
 		return NULL;
diff --git a/mm/percpu.c b/mm/percpu.c
index 3dd4984..3f93001 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -258,7 +258,7 @@
 
 /*
  * (Un)populated page region iterators.  Iterate over (un)populated
- * page regions betwen @start and @end in @chunk.  @rs and @re should
+ * page regions between @start and @end in @chunk.  @rs and @re should
  * be integer variables and will be set to start and end page index of
  * the current region.
  */
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
new file mode 100644
index 0000000..eb663fb
--- /dev/null
+++ b/mm/pgtable-generic.c
@@ -0,0 +1,121 @@
+/*
+ *  mm/pgtable-generic.c
+ *
+ *  Generic pgtable methods declared in asm-generic/pgtable.h
+ *
+ *  Copyright (C) 2010  Linus Torvalds
+ */
+
+#include <linux/pagemap.h>
+#include <asm/tlb.h>
+#include <asm-generic/pgtable.h>
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Only sets the access flags (dirty, accessed, and
+ * writable). Furthermore, we know it always gets set to a "more
+ * permissive" setting, which allows most architectures to optimize
+ * this. We return whether the PTE actually changed, which in turn
+ * instructs the caller to do things like update__mmu_cache.  This
+ * used to be done in the caller, but sparc needs minor faults to
+ * force that call on sun4c so we changed this macro slightly
+ */
+int ptep_set_access_flags(struct vm_area_struct *vma,
+			  unsigned long address, pte_t *ptep,
+			  pte_t entry, int dirty)
+{
+	int changed = !pte_same(*ptep, entry);
+	if (changed) {
+		set_pte_at(vma->vm_mm, address, ptep, entry);
+		flush_tlb_page(vma, address);
+	}
+	return changed;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+int pmdp_set_access_flags(struct vm_area_struct *vma,
+			  unsigned long address, pmd_t *pmdp,
+			  pmd_t entry, int dirty)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	int changed = !pmd_same(*pmdp, entry);
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	if (changed) {
+		set_pmd_at(vma->vm_mm, address, pmdp, entry);
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	}
+	return changed;
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+	BUG();
+	return 0;
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pte_t *ptep)
+{
+	int young;
+	young = ptep_test_and_clear_young(vma, address, ptep);
+	if (young)
+		flush_tlb_page(vma, address);
+	return young;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+int pmdp_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pmd_t *pmdp)
+{
+	int young;
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+	BUG();
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	young = pmdp_test_and_clear_young(vma, address, pmdp);
+	if (young)
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	return young;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
+		       pte_t *ptep)
+{
+	pte_t pte;
+	pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
+	flush_tlb_page(vma, address);
+	return pte;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
+		       pmd_t *pmdp)
+{
+	pmd_t pmd;
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
+	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	return pmd;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+			   pmd_t *pmdp)
+{
+	pmd_t pmd = pmd_mksplitting(*pmdp);
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+	/* tlb flush only to serialize against gup-fast */
+	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index 1a8bf76b..941bf82 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -94,7 +94,7 @@
  * anonymous pages mapped into it with that anon_vma.
  *
  * The common case will be that we already have one, but if
- * if not we either need to find an adjacent mapping that we
+ * not we either need to find an adjacent mapping that we
  * can re-use the anon_vma from (very common when the only
  * reason for splitting a vma has been mprotect()), or we
  * allocate a new one.
@@ -177,6 +177,10 @@
 	list_add(&avc->same_vma, &vma->anon_vma_chain);
 
 	anon_vma_lock(anon_vma);
+	/*
+	 * It's critical to add new vmas to the tail of the anon_vma,
+	 * see comment in huge_memory.c:__split_huge_page().
+	 */
 	list_add_tail(&avc->same_anon_vma, &anon_vma->head);
 	anon_vma_unlock(anon_vma);
 }
@@ -360,7 +364,7 @@
  * Returns virtual address or -EFAULT if page's index/offset is not
  * within the range mapped the @vma.
  */
-static inline unsigned long
+inline unsigned long
 vma_address(struct page *page, struct vm_area_struct *vma)
 {
 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -435,6 +439,8 @@
 	pmd = pmd_offset(pud, address);
 	if (!pmd_present(*pmd))
 		return NULL;
+	if (pmd_trans_huge(*pmd))
+		return NULL;
 
 	pte = pte_offset_map(pmd, address);
 	/* Make a quick check before getting the lock */
@@ -489,35 +495,65 @@
 			unsigned long *vm_flags)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	pte_t *pte;
-	spinlock_t *ptl;
 	int referenced = 0;
 
-	pte = page_check_address(page, mm, address, &ptl, 0);
-	if (!pte)
-		goto out;
+	if (unlikely(PageTransHuge(page))) {
+		pmd_t *pmd;
 
-	/*
-	 * Don't want to elevate referenced for mlocked page that gets this far,
-	 * in order that it progresses to try_to_unmap and is moved to the
-	 * unevictable list.
-	 */
-	if (vma->vm_flags & VM_LOCKED) {
-		*mapcount = 1;	/* break early from loop */
-		*vm_flags |= VM_LOCKED;
-		goto out_unmap;
-	}
-
-	if (ptep_clear_flush_young_notify(vma, address, pte)) {
+		spin_lock(&mm->page_table_lock);
 		/*
-		 * Don't treat a reference through a sequentially read
-		 * mapping as such.  If the page has been used in
-		 * another mapping, we will catch it; if this other
-		 * mapping is already gone, the unmap path will have
-		 * set PG_referenced or activated the page.
+		 * rmap might return false positives; we must filter
+		 * these out using page_check_address_pmd().
 		 */
-		if (likely(!VM_SequentialReadHint(vma)))
+		pmd = page_check_address_pmd(page, mm, address,
+					     PAGE_CHECK_ADDRESS_PMD_FLAG);
+		if (!pmd) {
+			spin_unlock(&mm->page_table_lock);
+			goto out;
+		}
+
+		if (vma->vm_flags & VM_LOCKED) {
+			spin_unlock(&mm->page_table_lock);
+			*mapcount = 0;	/* break early from loop */
+			*vm_flags |= VM_LOCKED;
+			goto out;
+		}
+
+		/* go ahead even if the pmd is pmd_trans_splitting() */
+		if (pmdp_clear_flush_young_notify(vma, address, pmd))
 			referenced++;
+		spin_unlock(&mm->page_table_lock);
+	} else {
+		pte_t *pte;
+		spinlock_t *ptl;
+
+		/*
+		 * rmap might return false positives; we must filter
+		 * these out using page_check_address().
+		 */
+		pte = page_check_address(page, mm, address, &ptl, 0);
+		if (!pte)
+			goto out;
+
+		if (vma->vm_flags & VM_LOCKED) {
+			pte_unmap_unlock(pte, ptl);
+			*mapcount = 0;	/* break early from loop */
+			*vm_flags |= VM_LOCKED;
+			goto out;
+		}
+
+		if (ptep_clear_flush_young_notify(vma, address, pte)) {
+			/*
+			 * Don't treat a reference through a sequentially read
+			 * mapping as such.  If the page has been used in
+			 * another mapping, we will catch it; if this other
+			 * mapping is already gone, the unmap path will have
+			 * set PG_referenced or activated the page.
+			 */
+			if (likely(!VM_SequentialReadHint(vma)))
+				referenced++;
+		}
+		pte_unmap_unlock(pte, ptl);
 	}
 
 	/* Pretend the page is referenced if the task has the
@@ -526,9 +562,7 @@
 			rwsem_is_locked(&mm->mmap_sem))
 		referenced++;
 
-out_unmap:
 	(*mapcount)--;
-	pte_unmap_unlock(pte, ptl);
 
 	if (referenced)
 		*vm_flags |= vma->vm_flags;
@@ -864,8 +898,13 @@
 	struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
 	int first = atomic_inc_and_test(&page->_mapcount);
-	if (first)
-		__inc_zone_page_state(page, NR_ANON_PAGES);
+	if (first) {
+		if (!PageTransHuge(page))
+			__inc_zone_page_state(page, NR_ANON_PAGES);
+		else
+			__inc_zone_page_state(page,
+					      NR_ANON_TRANSPARENT_HUGEPAGES);
+	}
 	if (unlikely(PageKsm(page)))
 		return;
 
@@ -893,7 +932,10 @@
 	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
 	SetPageSwapBacked(page);
 	atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
-	__inc_zone_page_state(page, NR_ANON_PAGES);
+	if (!PageTransHuge(page))
+		__inc_zone_page_state(page, NR_ANON_PAGES);
+	else
+		__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
 	__page_set_anon_rmap(page, vma, address, 1);
 	if (page_evictable(page, vma))
 		lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -911,7 +953,7 @@
 {
 	if (atomic_inc_and_test(&page->_mapcount)) {
 		__inc_zone_page_state(page, NR_FILE_MAPPED);
-		mem_cgroup_update_file_mapped(page, 1);
+		mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
 	}
 }
 
@@ -946,10 +988,14 @@
 		return;
 	if (PageAnon(page)) {
 		mem_cgroup_uncharge_page(page);
-		__dec_zone_page_state(page, NR_ANON_PAGES);
+		if (!PageTransHuge(page))
+			__dec_zone_page_state(page, NR_ANON_PAGES);
+		else
+			__dec_zone_page_state(page,
+					      NR_ANON_TRANSPARENT_HUGEPAGES);
 	} else {
 		__dec_zone_page_state(page, NR_FILE_MAPPED);
-		mem_cgroup_update_file_mapped(page, -1);
+		mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
 	}
 	/*
 	 * It would be tidy to reset the PageAnon mapping here,
@@ -1202,7 +1248,7 @@
 	return ret;
 }
 
-static bool is_vma_temporary_stack(struct vm_area_struct *vma)
+bool is_vma_temporary_stack(struct vm_area_struct *vma)
 {
 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
 
@@ -1400,6 +1446,7 @@
 	int ret;
 
 	BUG_ON(!PageLocked(page));
+	VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
 
 	if (unlikely(PageKsm(page)))
 		ret = try_to_unmap_ksm(page, flags);
diff --git a/mm/slab.c b/mm/slab.c
index 2640374..37961d1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -284,7 +284,7 @@
  * Need this for bootstrapping a per node allocator.
  */
 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
+static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
 #define	CACHE_CACHE 0
 #define	SIZE_AC MAX_NUMNODES
 #define	SIZE_L3 (2 * MAX_NUMNODES)
@@ -4053,7 +4053,7 @@
  * necessary. Note that the l3 listlock also protects the array_cache
  * if drain_array() is used on the shared array.
  */
-void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
 			 struct array_cache *ac, int force, int node)
 {
 	int tofree;
@@ -4317,7 +4317,7 @@
  * @count: data length
  * @ppos: unused
  */
-ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 		       size_t count, loff_t *ppos)
 {
 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
diff --git a/mm/slub.c b/mm/slub.c
index 008cd74..e15aa7f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3636,7 +3636,7 @@
 		len += sprintf(buf + len, "%7ld ", l->count);
 
 		if (l->addr)
-			len += sprint_symbol(buf + len, (unsigned long)l->addr);
+			len += sprintf(buf + len, "%pS", (void *)l->addr);
 		else
 			len += sprintf(buf + len, "<not-available>");
 
@@ -3797,7 +3797,7 @@
 		}
 	}
 
-	down_read(&slub_lock);
+	lock_memory_hotplug();
 #ifdef CONFIG_SLUB_DEBUG
 	if (flags & SO_ALL) {
 		for_each_node_state(node, N_NORMAL_MEMORY) {
@@ -3838,7 +3838,7 @@
 			x += sprintf(buf + x, " N%d=%lu",
 					node, nodes[node]);
 #endif
-	up_read(&slub_lock);
+	unlock_memory_hotplug();
 	kfree(nodes);
 	return x + sprintf(buf + x, "\n");
 }
@@ -3946,12 +3946,9 @@
 
 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
 {
-	if (s->ctor) {
-		int n = sprint_symbol(buf, (unsigned long)s->ctor);
-
-		return n + sprintf(buf + n, "\n");
-	}
-	return 0;
+	if (!s->ctor)
+		return 0;
+	return sprintf(buf, "%pS\n", s->ctor);
 }
 SLAB_ATTR_RO(ctor);
 
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 29d6cbf..64b9840 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -9,7 +9,7 @@
  *
  * However, virtual mappings need a page table and TLBs. Many Linux
  * architectures already map their physical space using 1-1 mappings
- * via TLBs. For those arches the virtual memmory map is essentially
+ * via TLBs. For those arches the virtual memory map is essentially
  * for free if we use the same page size as the 1-1 mappings. In that
  * case the overhead consists of a few additional pages that are
  * allocated to create a view of memory for vmemmap.
diff --git a/mm/sparse.c b/mm/sparse.c
index 95ac219..9325020 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -671,10 +671,10 @@
 static void free_map_bootmem(struct page *page, unsigned long nr_pages)
 {
 	unsigned long maps_section_nr, removing_section_nr, i;
-	int magic;
+	unsigned long magic;
 
 	for (i = 0; i < nr_pages; i++, page++) {
-		magic = atomic_read(&page->_mapcount);
+		magic = (unsigned long) page->lru.next;
 
 		BUG_ON(magic == NODE_INFO);
 
diff --git a/mm/swap.c b/mm/swap.c
index 3f48542..c02f936 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -56,17 +56,97 @@
 		del_page_from_lru(zone, page);
 		spin_unlock_irqrestore(&zone->lru_lock, flags);
 	}
+}
+
+static void __put_single_page(struct page *page)
+{
+	__page_cache_release(page);
 	free_hot_cold_page(page, 0);
 }
 
+static void __put_compound_page(struct page *page)
+{
+	compound_page_dtor *dtor;
+
+	__page_cache_release(page);
+	dtor = get_compound_page_dtor(page);
+	(*dtor)(page);
+}
+
 static void put_compound_page(struct page *page)
 {
-	page = compound_head(page);
-	if (put_page_testzero(page)) {
-		compound_page_dtor *dtor;
-
-		dtor = get_compound_page_dtor(page);
-		(*dtor)(page);
+	if (unlikely(PageTail(page))) {
+		/* __split_huge_page_refcount can run under us */
+		struct page *page_head = page->first_page;
+		smp_rmb();
+		/*
+		 * If PageTail is still set after smp_rmb() we can be sure
+		 * that the page->first_page we read wasn't a dangling pointer.
+		 * See __split_huge_page_refcount() smp_wmb().
+		 */
+		if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
+			unsigned long flags;
+			/*
+			 * Verify that our page_head wasn't converted
+			 * to a a regular page before we got a
+			 * reference on it.
+			 */
+			if (unlikely(!PageHead(page_head))) {
+				/* PageHead is cleared after PageTail */
+				smp_rmb();
+				VM_BUG_ON(PageTail(page));
+				goto out_put_head;
+			}
+			/*
+			 * Only run compound_lock on a valid PageHead,
+			 * after having it pinned with
+			 * get_page_unless_zero() above.
+			 */
+			smp_mb();
+			/* page_head wasn't a dangling pointer */
+			flags = compound_lock_irqsave(page_head);
+			if (unlikely(!PageTail(page))) {
+				/* __split_huge_page_refcount run before us */
+				compound_unlock_irqrestore(page_head, flags);
+				VM_BUG_ON(PageHead(page_head));
+			out_put_head:
+				if (put_page_testzero(page_head))
+					__put_single_page(page_head);
+			out_put_single:
+				if (put_page_testzero(page))
+					__put_single_page(page);
+				return;
+			}
+			VM_BUG_ON(page_head != page->first_page);
+			/*
+			 * We can release the refcount taken by
+			 * get_page_unless_zero now that
+			 * split_huge_page_refcount is blocked on the
+			 * compound_lock.
+			 */
+			if (put_page_testzero(page_head))
+				VM_BUG_ON(1);
+			/* __split_huge_page_refcount will wait now */
+			VM_BUG_ON(atomic_read(&page->_count) <= 0);
+			atomic_dec(&page->_count);
+			VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
+			compound_unlock_irqrestore(page_head, flags);
+			if (put_page_testzero(page_head)) {
+				if (PageHead(page_head))
+					__put_compound_page(page_head);
+				else
+					__put_single_page(page_head);
+			}
+		} else {
+			/* page_head is a dangling pointer */
+			VM_BUG_ON(PageTail(page));
+			goto out_put_single;
+		}
+	} else if (put_page_testzero(page)) {
+		if (PageHead(page))
+			__put_compound_page(page);
+		else
+			__put_single_page(page);
 	}
 }
 
@@ -75,7 +155,7 @@
 	if (unlikely(PageCompound(page)))
 		put_compound_page(page);
 	else if (put_page_testzero(page))
-		__page_cache_release(page);
+		__put_single_page(page);
 }
 EXPORT_SYMBOL(put_page);
 
@@ -399,6 +479,43 @@
 
 EXPORT_SYMBOL(__pagevec_release);
 
+/* used by __split_huge_page_refcount() */
+void lru_add_page_tail(struct zone* zone,
+		       struct page *page, struct page *page_tail)
+{
+	int active;
+	enum lru_list lru;
+	const int file = 0;
+	struct list_head *head;
+
+	VM_BUG_ON(!PageHead(page));
+	VM_BUG_ON(PageCompound(page_tail));
+	VM_BUG_ON(PageLRU(page_tail));
+	VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
+
+	SetPageLRU(page_tail);
+
+	if (page_evictable(page_tail, NULL)) {
+		if (PageActive(page)) {
+			SetPageActive(page_tail);
+			active = 1;
+			lru = LRU_ACTIVE_ANON;
+		} else {
+			active = 0;
+			lru = LRU_INACTIVE_ANON;
+		}
+		update_page_reclaim_stat(zone, page_tail, file, active);
+		if (likely(PageLRU(page)))
+			head = page->lru.prev;
+		else
+			head = &zone->lru[lru].list;
+		__add_page_to_lru_list(zone, page_tail, lru, head);
+	} else {
+		SetPageUnevictable(page_tail);
+		add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
+	}
+}
+
 /*
  * Add the passed pages to the LRU, then drop the caller's refcount
  * on them.  Reinitialises the caller's pagevec.
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e10f583..5c8cfab 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -157,6 +157,12 @@
 	if (!entry.val)
 		return 0;
 
+	if (unlikely(PageTransHuge(page)))
+		if (unlikely(split_huge_page(page))) {
+			swapcache_free(entry, NULL);
+			return 0;
+		}
+
 	/*
 	 * Radix-tree node allocations from PF_MEMALLOC contexts could
 	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 67ddaaf..0341c57 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -964,6 +964,8 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (unlikely(pmd_trans_huge(*pmd)))
+			continue;
 		if (pmd_none_or_clear_bad(pmd))
 			continue;
 		ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
@@ -1677,7 +1679,7 @@
 	if (S_ISBLK(inode->i_mode)) {
 		struct block_device *bdev = I_BDEV(inode);
 		set_blocksize(bdev, p->old_block_size);
-		bd_release(bdev);
+		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 	} else {
 		mutex_lock(&inode->i_mutex);
 		inode->i_flags &= ~S_SWAPFILE;
@@ -1938,8 +1940,9 @@
 
 	error = -EINVAL;
 	if (S_ISBLK(inode->i_mode)) {
-		bdev = I_BDEV(inode);
-		error = bd_claim(bdev, sys_swapon);
+		bdev = bdgrab(I_BDEV(inode));
+		error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
+				   sys_swapon);
 		if (error < 0) {
 			bdev = NULL;
 			error = -EINVAL;
@@ -2136,7 +2139,7 @@
 bad_swap:
 	if (bdev) {
 		set_blocksize(bdev, p->old_block_size);
-		bd_release(bdev);
+		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 	}
 	destroy_swap_extents(p);
 	swap_cgroup_swapoff(type);
diff --git a/mm/truncate.c b/mm/truncate.c
index 3c2d5dd..d64296b 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -225,6 +225,7 @@
 	next = start;
 	while (next <= end &&
 	       pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+		mem_cgroup_uncharge_start();
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			struct page *page = pvec.pages[i];
 			pgoff_t page_index = page->index;
@@ -247,6 +248,7 @@
 			unlock_page(page);
 		}
 		pagevec_release(&pvec);
+		mem_cgroup_uncharge_end();
 		cond_resched();
 	}
 
@@ -549,13 +551,12 @@
  * @inode: inode
  * @newsize: new file size
  *
- * truncate_setsize updastes i_size update and performs pagecache
- * truncation (if necessary) for a file size updates. It will be
- * typically be called from the filesystem's setattr function when
- * ATTR_SIZE is passed in.
+ * truncate_setsize updates i_size and performs pagecache truncation (if
+ * necessary) to @newsize. It will be typically be called from the filesystem's
+ * setattr function when ATTR_SIZE is passed in.
  *
- * Must be called with inode_mutex held and after all filesystem
- * specific block truncation has been performed.
+ * Must be called with inode_mutex held and before all filesystem specific
+ * block truncation has been performed.
  */
 void truncate_setsize(struct inode *inode, loff_t newsize)
 {
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index eb5cc7d..f9b1667 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -748,7 +748,7 @@
 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
 					VMALLOC_START, VMALLOC_END,
 					node, gfp_mask);
-	if (unlikely(IS_ERR(va))) {
+	if (IS_ERR(va)) {
 		kfree(vb);
 		return ERR_CAST(va);
 	}
@@ -1175,6 +1175,7 @@
 {
 	vunmap_page_range(addr, addr + size);
 }
+EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
 
 /**
  * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
@@ -1315,13 +1316,6 @@
 						-1, GFP_KERNEL, caller);
 }
 
-struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
-				   int node, gfp_t gfp_mask)
-{
-	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-				  node, gfp_mask, __builtin_return_address(0));
-}
-
 static struct vm_struct *find_vm_area(const void *addr)
 {
 	struct vmap_area *va;
@@ -1537,17 +1531,47 @@
 	return NULL;
 }
 
-void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
+/**
+ *	__vmalloc_node_range  -  allocate virtually contiguous memory
+ *	@size:		allocation size
+ *	@align:		desired alignment
+ *	@start:		vm area range start
+ *	@end:		vm area range end
+ *	@gfp_mask:	flags for the page level allocator
+ *	@prot:		protection mask for the allocated pages
+ *	@node:		node to use for allocation or -1
+ *	@caller:	caller's return address
+ *
+ *	Allocate enough pages to cover @size from the page level
+ *	allocator with @gfp_mask flags.  Map them into contiguous
+ *	kernel virtual space, using a pagetable protection of @prot.
+ */
+void *__vmalloc_node_range(unsigned long size, unsigned long align,
+			unsigned long start, unsigned long end, gfp_t gfp_mask,
+			pgprot_t prot, int node, void *caller)
 {
-	void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1,
-					 __builtin_return_address(0));
+	struct vm_struct *area;
+	void *addr;
+	unsigned long real_size = size;
+
+	size = PAGE_ALIGN(size);
+	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
+		return NULL;
+
+	area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
+				  gfp_mask, caller);
+
+	if (!area)
+		return NULL;
+
+	addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
 
 	/*
 	 * A ref_count = 3 is needed because the vm_struct and vmap_area
 	 * structures allocated in the __get_vm_area_node() function contain
 	 * references to the virtual address of the vmalloc'ed block.
 	 */
-	kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask);
+	kmemleak_alloc(addr, real_size, 3, gfp_mask);
 
 	return addr;
 }
@@ -1569,30 +1593,8 @@
 			    gfp_t gfp_mask, pgprot_t prot,
 			    int node, void *caller)
 {
-	struct vm_struct *area;
-	void *addr;
-	unsigned long real_size = size;
-
-	size = PAGE_ALIGN(size);
-	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
-		return NULL;
-
-	area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
-				  VMALLOC_END, node, gfp_mask, caller);
-
-	if (!area)
-		return NULL;
-
-	addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
-
-	/*
-	 * A ref_count = 3 is needed because the vm_struct and vmap_area
-	 * structures allocated in the __get_vm_area_node() function contain
-	 * references to the virtual address of the vmalloc'ed block.
-	 */
-	kmemleak_alloc(addr, real_size, 3, gfp_mask);
-
-	return addr;
+	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
+				gfp_mask, prot, node, caller);
 }
 
 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
@@ -2203,17 +2205,16 @@
  * @sizes: array containing size of each area
  * @nr_vms: the number of areas to allocate
  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
- * @gfp_mask: allocation mask
  *
  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
  *	    vm_structs on success, %NULL on failure
  *
  * Percpu allocator wants to use congruent vm areas so that it can
  * maintain the offsets among percpu areas.  This function allocates
- * congruent vmalloc areas for it.  These areas tend to be scattered
- * pretty far, distance between two areas easily going up to
- * gigabytes.  To avoid interacting with regular vmallocs, these areas
- * are allocated from top.
+ * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
+ * be scattered pretty far, distance between two areas easily going up
+ * to gigabytes.  To avoid interacting with regular vmallocs, these
+ * areas are allocated from top.
  *
  * Despite its complicated look, this allocator is rather simple.  It
  * does everything top-down and scans areas from the end looking for
@@ -2224,7 +2225,7 @@
  */
 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 				     const size_t *sizes, int nr_vms,
-				     size_t align, gfp_t gfp_mask)
+				     size_t align)
 {
 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
@@ -2234,8 +2235,6 @@
 	unsigned long base, start, end, last_end;
 	bool purged = false;
 
-	gfp_mask &= GFP_RECLAIM_MASK;
-
 	/* verify parameters and allocate data structures */
 	BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
 	for (last_area = 0, area = 0; area < nr_vms; area++) {
@@ -2268,14 +2267,14 @@
 		return NULL;
 	}
 
-	vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask);
-	vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask);
+	vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
+	vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
 	if (!vas || !vms)
 		goto err_free;
 
 	for (area = 0; area < nr_vms; area++) {
-		vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask);
-		vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask);
+		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
+		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
 		if (!vas[area] || !vms[area])
 			goto err_free;
 	}
@@ -2456,13 +2455,8 @@
 	seq_printf(m, "0x%p-0x%p %7ld",
 		v->addr, v->addr + v->size, v->size);
 
-	if (v->caller) {
-		char buff[KSYM_SYMBOL_LEN];
-
-		seq_putc(m, ' ');
-		sprint_symbol(buff, (unsigned long)v->caller);
-		seq_puts(m, buff);
-	}
+	if (v->caller)
+		seq_printf(m, " %pS", v->caller);
 
 	if (v->nr_pages)
 		seq_printf(m, " pages=%d", v->nr_pages);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9ca587c..6771ea7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -32,6 +32,7 @@
 #include <linux/topology.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
+#include <linux/compaction.h>
 #include <linux/notifier.h>
 #include <linux/rwsem.h>
 #include <linux/delay.h>
@@ -51,11 +52,23 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/vmscan.h>
 
-enum lumpy_mode {
-	LUMPY_MODE_NONE,
-	LUMPY_MODE_ASYNC,
-	LUMPY_MODE_SYNC,
-};
+/*
+ * reclaim_mode determines how the inactive list is shrunk
+ * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
+ * RECLAIM_MODE_ASYNC:  Do not block
+ * RECLAIM_MODE_SYNC:   Allow blocking e.g. call wait_on_page_writeback
+ * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
+ *			page from the LRU and reclaim all pages within a
+ *			naturally aligned range
+ * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
+ *			order-0 pages and then compact the zone
+ */
+typedef unsigned __bitwise__ reclaim_mode_t;
+#define RECLAIM_MODE_SINGLE		((__force reclaim_mode_t)0x01u)
+#define RECLAIM_MODE_ASYNC		((__force reclaim_mode_t)0x02u)
+#define RECLAIM_MODE_SYNC		((__force reclaim_mode_t)0x04u)
+#define RECLAIM_MODE_LUMPYRECLAIM	((__force reclaim_mode_t)0x08u)
+#define RECLAIM_MODE_COMPACTION		((__force reclaim_mode_t)0x10u)
 
 struct scan_control {
 	/* Incremented by the number of inactive pages that were scanned */
@@ -88,7 +101,7 @@
 	 * Intend to reclaim enough continuous memory rather than reclaim
 	 * enough amount of memory. i.e, mode for high order allocation.
 	 */
-	enum lumpy_mode lumpy_reclaim_mode;
+	reclaim_mode_t reclaim_mode;
 
 	/* Which cgroup do we reclaim from */
 	struct mem_cgroup *mem_cgroup;
@@ -271,34 +284,37 @@
 	return ret;
 }
 
-static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
+static void set_reclaim_mode(int priority, struct scan_control *sc,
 				   bool sync)
 {
-	enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
+	reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
 
 	/*
-	 * Some reclaim have alredy been failed. No worth to try synchronous
-	 * lumpy reclaim.
+	 * Initially assume we are entering either lumpy reclaim or
+	 * reclaim/compaction.Depending on the order, we will either set the
+	 * sync mode or just reclaim order-0 pages later.
 	 */
-	if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
-		return;
+	if (COMPACTION_BUILD)
+		sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
+	else
+		sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
 
 	/*
-	 * If we need a large contiguous chunk of memory, or have
-	 * trouble getting a small set of contiguous pages, we
-	 * will reclaim both active and inactive pages.
+	 * Avoid using lumpy reclaim or reclaim/compaction if possible by
+	 * restricting when its set to either costly allocations or when
+	 * under memory pressure
 	 */
 	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-		sc->lumpy_reclaim_mode = mode;
+		sc->reclaim_mode |= syncmode;
 	else if (sc->order && priority < DEF_PRIORITY - 2)
-		sc->lumpy_reclaim_mode = mode;
+		sc->reclaim_mode |= syncmode;
 	else
-		sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+		sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
 }
 
-static void disable_lumpy_reclaim_mode(struct scan_control *sc)
+static void reset_reclaim_mode(struct scan_control *sc)
 {
-	sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+	sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
 }
 
 static inline int is_page_cache_freeable(struct page *page)
@@ -429,7 +445,7 @@
 		 * first attempt to free a range of pages fails.
 		 */
 		if (PageWriteback(page) &&
-		    sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC)
+		    (sc->reclaim_mode & RECLAIM_MODE_SYNC))
 			wait_on_page_writeback(page);
 
 		if (!PageWriteback(page)) {
@@ -437,7 +453,7 @@
 			ClearPageReclaim(page);
 		}
 		trace_mm_vmscan_writepage(page,
-			trace_reclaim_flags(page, sc->lumpy_reclaim_mode));
+			trace_reclaim_flags(page, sc->reclaim_mode));
 		inc_zone_page_state(page, NR_VMSCAN_WRITE);
 		return PAGE_SUCCESS;
 	}
@@ -622,7 +638,7 @@
 	referenced_page = TestClearPageReferenced(page);
 
 	/* Lumpy reclaim - ignore references */
-	if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE)
+	if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
 		return PAGEREF_RECLAIM;
 
 	/*
@@ -739,7 +755,7 @@
 			 * for any page for which writeback has already
 			 * started.
 			 */
-			if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC &&
+			if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
 			    may_enter_fs)
 				wait_on_page_writeback(page);
 			else {
@@ -895,7 +911,7 @@
 			try_to_free_swap(page);
 		unlock_page(page);
 		putback_lru_page(page);
-		disable_lumpy_reclaim_mode(sc);
+		reset_reclaim_mode(sc);
 		continue;
 
 activate_locked:
@@ -908,7 +924,7 @@
 keep_locked:
 		unlock_page(page);
 keep:
-		disable_lumpy_reclaim_mode(sc);
+		reset_reclaim_mode(sc);
 keep_lumpy:
 		list_add(&page->lru, &ret_pages);
 		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
@@ -1028,7 +1044,7 @@
 		case 0:
 			list_move(&page->lru, dst);
 			mem_cgroup_del_lru(page);
-			nr_taken++;
+			nr_taken += hpage_nr_pages(page);
 			break;
 
 		case -EBUSY:
@@ -1086,7 +1102,7 @@
 			if (__isolate_lru_page(cursor_page, mode, file) == 0) {
 				list_move(&cursor_page->lru, dst);
 				mem_cgroup_del_lru(cursor_page);
-				nr_taken++;
+				nr_taken += hpage_nr_pages(page);
 				nr_lumpy_taken++;
 				if (PageDirty(cursor_page))
 					nr_lumpy_dirty++;
@@ -1141,14 +1157,15 @@
 	struct page *page;
 
 	list_for_each_entry(page, page_list, lru) {
+		int numpages = hpage_nr_pages(page);
 		lru = page_lru_base_type(page);
 		if (PageActive(page)) {
 			lru += LRU_ACTIVE;
 			ClearPageActive(page);
-			nr_active++;
+			nr_active += numpages;
 		}
 		if (count)
-			count[lru]++;
+			count[lru] += numpages;
 	}
 
 	return nr_active;
@@ -1258,7 +1275,8 @@
 		add_page_to_lru_list(zone, page, lru);
 		if (is_active_lru(lru)) {
 			int file = is_file_lru(lru);
-			reclaim_stat->recent_rotated[file]++;
+			int numpages = hpage_nr_pages(page);
+			reclaim_stat->recent_rotated[file] += numpages;
 		}
 		if (!pagevec_add(&pvec, page)) {
 			spin_unlock_irq(&zone->lru_lock);
@@ -1324,7 +1342,7 @@
 		return false;
 
 	/* Only stall on lumpy reclaim */
-	if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
+	if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
 		return false;
 
 	/* If we have relaimed everything on the isolated list, no stall */
@@ -1368,15 +1386,15 @@
 			return SWAP_CLUSTER_MAX;
 	}
 
-	set_lumpy_reclaim_mode(priority, sc, false);
+	set_reclaim_mode(priority, sc, false);
 	lru_add_drain();
 	spin_lock_irq(&zone->lru_lock);
 
 	if (scanning_global_lru(sc)) {
 		nr_taken = isolate_pages_global(nr_to_scan,
 			&page_list, &nr_scanned, sc->order,
-			sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
-					ISOLATE_INACTIVE : ISOLATE_BOTH,
+			sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
+					ISOLATE_BOTH : ISOLATE_INACTIVE,
 			zone, 0, file);
 		zone->pages_scanned += nr_scanned;
 		if (current_is_kswapd())
@@ -1388,8 +1406,8 @@
 	} else {
 		nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
 			&page_list, &nr_scanned, sc->order,
-			sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
-					ISOLATE_INACTIVE : ISOLATE_BOTH,
+			sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
+					ISOLATE_BOTH : ISOLATE_INACTIVE,
 			zone, sc->mem_cgroup,
 			0, file);
 		/*
@@ -1411,7 +1429,7 @@
 
 	/* Check if we should syncronously wait for writeback */
 	if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
-		set_lumpy_reclaim_mode(priority, sc, true);
+		set_reclaim_mode(priority, sc, true);
 		nr_reclaimed += shrink_page_list(&page_list, zone, sc);
 	}
 
@@ -1426,7 +1444,7 @@
 		zone_idx(zone),
 		nr_scanned, nr_reclaimed,
 		priority,
-		trace_shrink_flags(file, sc->lumpy_reclaim_mode));
+		trace_shrink_flags(file, sc->reclaim_mode));
 	return nr_reclaimed;
 }
 
@@ -1466,7 +1484,7 @@
 
 		list_move(&page->lru, &zone->lru[lru].list);
 		mem_cgroup_add_lru_list(page, lru);
-		pgmoved++;
+		pgmoved += hpage_nr_pages(page);
 
 		if (!pagevec_add(&pvec, page) || list_empty(list)) {
 			spin_unlock_irq(&zone->lru_lock);
@@ -1534,7 +1552,7 @@
 		}
 
 		if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
-			nr_rotated++;
+			nr_rotated += hpage_nr_pages(page);
 			/*
 			 * Identify referenced, file-backed active pages and
 			 * give them one more trip around the active list. So
@@ -1805,6 +1823,69 @@
 }
 
 /*
+ * Reclaim/compaction depends on a number of pages being freed. To avoid
+ * disruption to the system, a small number of order-0 pages continue to be
+ * rotated and reclaimed in the normal fashion. However, by the time we get
+ * back to the allocator and call try_to_compact_zone(), we ensure that
+ * there are enough free pages for it to be likely successful
+ */
+static inline bool should_continue_reclaim(struct zone *zone,
+					unsigned long nr_reclaimed,
+					unsigned long nr_scanned,
+					struct scan_control *sc)
+{
+	unsigned long pages_for_compaction;
+	unsigned long inactive_lru_pages;
+
+	/* If not in reclaim/compaction mode, stop */
+	if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
+		return false;
+
+	/* Consider stopping depending on scan and reclaim activity */
+	if (sc->gfp_mask & __GFP_REPEAT) {
+		/*
+		 * For __GFP_REPEAT allocations, stop reclaiming if the
+		 * full LRU list has been scanned and we are still failing
+		 * to reclaim pages. This full LRU scan is potentially
+		 * expensive but a __GFP_REPEAT caller really wants to succeed
+		 */
+		if (!nr_reclaimed && !nr_scanned)
+			return false;
+	} else {
+		/*
+		 * For non-__GFP_REPEAT allocations which can presumably
+		 * fail without consequence, stop if we failed to reclaim
+		 * any pages from the last SWAP_CLUSTER_MAX number of
+		 * pages that were scanned. This will return to the
+		 * caller faster at the risk reclaim/compaction and
+		 * the resulting allocation attempt fails
+		 */
+		if (!nr_reclaimed)
+			return false;
+	}
+
+	/*
+	 * If we have not reclaimed enough pages for compaction and the
+	 * inactive lists are large enough, continue reclaiming
+	 */
+	pages_for_compaction = (2UL << sc->order);
+	inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
+				zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+	if (sc->nr_reclaimed < pages_for_compaction &&
+			inactive_lru_pages > pages_for_compaction)
+		return true;
+
+	/* If compaction would go ahead or the allocation would succeed, stop */
+	switch (compaction_suitable(zone, sc->order)) {
+	case COMPACT_PARTIAL:
+	case COMPACT_CONTINUE:
+		return false;
+	default:
+		return true;
+	}
+}
+
+/*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
 static void shrink_zone(int priority, struct zone *zone,
@@ -1813,9 +1894,12 @@
 	unsigned long nr[NR_LRU_LISTS];
 	unsigned long nr_to_scan;
 	enum lru_list l;
-	unsigned long nr_reclaimed = sc->nr_reclaimed;
+	unsigned long nr_reclaimed, nr_scanned;
 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
 
+restart:
+	nr_reclaimed = 0;
+	nr_scanned = sc->nr_scanned;
 	get_scan_count(zone, sc, nr, priority);
 
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1841,8 +1925,7 @@
 		if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
 			break;
 	}
-
-	sc->nr_reclaimed = nr_reclaimed;
+	sc->nr_reclaimed += nr_reclaimed;
 
 	/*
 	 * Even if we did not try to evict anon pages at all, we want to
@@ -1851,6 +1934,11 @@
 	if (inactive_anon_is_low(zone, sc))
 		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
+	/* reclaim/compaction might need reclaim to continue */
+	if (should_continue_reclaim(zone, nr_reclaimed,
+					sc->nr_scanned - nr_scanned, sc))
+		goto restart;
+
 	throttle_vm_writeout(sc->gfp_mask);
 }
 
@@ -2007,7 +2095,8 @@
 			struct zone *preferred_zone;
 
 			first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
-							NULL, &preferred_zone);
+						&cpuset_current_mems_allowed,
+						&preferred_zone);
 			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
 		}
 	}
@@ -2124,38 +2213,87 @@
 }
 #endif
 
+/*
+ * pgdat_balanced is used when checking if a node is balanced for high-order
+ * allocations. Only zones that meet watermarks and are in a zone allowed
+ * by the callers classzone_idx are added to balanced_pages. The total of
+ * balanced pages must be at least 25% of the zones allowed by classzone_idx
+ * for the node to be considered balanced. Forcing all zones to be balanced
+ * for high orders can cause excessive reclaim when there are imbalanced zones.
+ * The choice of 25% is due to
+ *   o a 16M DMA zone that is balanced will not balance a zone on any
+ *     reasonable sized machine
+ *   o On all other machines, the top zone must be at least a reasonable
+ *     precentage of the middle zones. For example, on 32-bit x86, highmem
+ *     would need to be at least 256M for it to be balance a whole node.
+ *     Similarly, on x86-64 the Normal zone would need to be at least 1G
+ *     to balance a node on its own. These seemed like reasonable ratios.
+ */
+static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
+						int classzone_idx)
+{
+	unsigned long present_pages = 0;
+	int i;
+
+	for (i = 0; i <= classzone_idx; i++)
+		present_pages += pgdat->node_zones[i].present_pages;
+
+	return balanced_pages > (present_pages >> 2);
+}
+
 /* is kswapd sleeping prematurely? */
-static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+					int classzone_idx)
 {
 	int i;
+	unsigned long balanced = 0;
+	bool all_zones_ok = true;
 
 	/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
 	if (remaining)
-		return 1;
+		return true;
 
-	/* If after HZ/10, a zone is below the high mark, it's premature */
+	/* Check the watermark levels */
 	for (i = 0; i < pgdat->nr_zones; i++) {
 		struct zone *zone = pgdat->node_zones + i;
 
 		if (!populated_zone(zone))
 			continue;
 
-		if (zone->all_unreclaimable)
+		/*
+		 * balance_pgdat() skips over all_unreclaimable after
+		 * DEF_PRIORITY. Effectively, it considers them balanced so
+		 * they must be considered balanced here as well if kswapd
+		 * is to sleep
+		 */
+		if (zone->all_unreclaimable) {
+			balanced += zone->present_pages;
 			continue;
+		}
 
-		if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
-								0, 0))
-			return 1;
+		if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
+							classzone_idx, 0))
+			all_zones_ok = false;
+		else
+			balanced += zone->present_pages;
 	}
 
-	return 0;
+	/*
+	 * For high-order requests, the balanced zones must contain at least
+	 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
+	 * must be balanced
+	 */
+	if (order)
+		return pgdat_balanced(pgdat, balanced, classzone_idx);
+	else
+		return !all_zones_ok;
 }
 
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
  * they are all at high_wmark_pages(zone).
  *
- * Returns the number of pages which were actually freed.
+ * Returns the final order kswapd was reclaiming at
  *
  * There is special handling here for zones which are full of pinned pages.
  * This can happen if the pages are all mlocked, or if they are all used by
@@ -2172,11 +2310,14 @@
  * interoperates with the page allocator fallback scheme to ensure that aging
  * of pages is balanced across the zones.
  */
-static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
+static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
+							int *classzone_idx)
 {
 	int all_zones_ok;
+	unsigned long balanced;
 	int priority;
 	int i;
+	int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
 	unsigned long total_scanned;
 	struct reclaim_state *reclaim_state = current->reclaim_state;
 	struct scan_control sc = {
@@ -2199,7 +2340,6 @@
 	count_vm_event(PAGEOUTRUN);
 
 	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
-		int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
 		unsigned long lru_pages = 0;
 		int has_under_min_watermark_zone = 0;
 
@@ -2208,6 +2348,7 @@
 			disable_swap_token();
 
 		all_zones_ok = 1;
+		balanced = 0;
 
 		/*
 		 * Scan in the highmem->dma direction for the highest
@@ -2230,9 +2371,10 @@
 				shrink_active_list(SWAP_CLUSTER_MAX, zone,
 							&sc, priority, 0);
 
-			if (!zone_watermark_ok(zone, order,
+			if (!zone_watermark_ok_safe(zone, order,
 					high_wmark_pages(zone), 0, 0)) {
 				end_zone = i;
+				*classzone_idx = i;
 				break;
 			}
 		}
@@ -2255,6 +2397,7 @@
 		 * cause too much scanning of the lower zones.
 		 */
 		for (i = 0; i <= end_zone; i++) {
+			int compaction;
 			struct zone *zone = pgdat->node_zones + i;
 			int nr_slab;
 
@@ -2276,7 +2419,7 @@
 			 * We put equal pressure on every zone, unless one
 			 * zone has way too many pages free already.
 			 */
-			if (!zone_watermark_ok(zone, order,
+			if (!zone_watermark_ok_safe(zone, order,
 					8*high_wmark_pages(zone), end_zone, 0))
 				shrink_zone(priority, zone, &sc);
 			reclaim_state->reclaimed_slab = 0;
@@ -2284,9 +2427,26 @@
 						lru_pages);
 			sc.nr_reclaimed += reclaim_state->reclaimed_slab;
 			total_scanned += sc.nr_scanned;
+
+			compaction = 0;
+			if (order &&
+			    zone_watermark_ok(zone, 0,
+					       high_wmark_pages(zone),
+					      end_zone, 0) &&
+			    !zone_watermark_ok(zone, order,
+					       high_wmark_pages(zone),
+					       end_zone, 0)) {
+				compact_zone_order(zone,
+						   order,
+						   sc.gfp_mask, false,
+						   COMPACT_MODE_KSWAPD);
+				compaction = 1;
+			}
+
 			if (zone->all_unreclaimable)
 				continue;
-			if (nr_slab == 0 && !zone_reclaimable(zone))
+			if (!compaction && nr_slab == 0 &&
+			    !zone_reclaimable(zone))
 				zone->all_unreclaimable = 1;
 			/*
 			 * If we've done a decent amount of scanning and
@@ -2297,7 +2457,7 @@
 			    total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
 				sc.may_writepage = 1;
 
-			if (!zone_watermark_ok(zone, order,
+			if (!zone_watermark_ok_safe(zone, order,
 					high_wmark_pages(zone), end_zone, 0)) {
 				all_zones_ok = 0;
 				/*
@@ -2305,7 +2465,7 @@
 				 * means that we have a GFP_ATOMIC allocation
 				 * failure risk. Hurry up!
 				 */
-				if (!zone_watermark_ok(zone, order,
+				if (!zone_watermark_ok_safe(zone, order,
 					    min_wmark_pages(zone), end_zone, 0))
 					has_under_min_watermark_zone = 1;
 			} else {
@@ -2317,10 +2477,12 @@
 				 * spectulatively avoid congestion waits
 				 */
 				zone_clear_flag(zone, ZONE_CONGESTED);
+				if (i <= *classzone_idx)
+					balanced += zone->present_pages;
 			}
 
 		}
-		if (all_zones_ok)
+		if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
 			break;		/* kswapd: all done */
 		/*
 		 * OK, kswapd is getting into trouble.  Take a nap, then take
@@ -2343,7 +2505,13 @@
 			break;
 	}
 out:
-	if (!all_zones_ok) {
+
+	/*
+	 * order-0: All zones must meet high watermark for a balanced node
+	 * high-order: Balanced zones must make up at least 25% of the node
+	 *             for the node to be balanced
+	 */
+	if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
 		cond_resched();
 
 		try_to_freeze();
@@ -2368,7 +2536,88 @@
 		goto loop_again;
 	}
 
-	return sc.nr_reclaimed;
+	/*
+	 * If kswapd was reclaiming at a higher order, it has the option of
+	 * sleeping without all zones being balanced. Before it does, it must
+	 * ensure that the watermarks for order-0 on *all* zones are met and
+	 * that the congestion flags are cleared. The congestion flag must
+	 * be cleared as kswapd is the only mechanism that clears the flag
+	 * and it is potentially going to sleep here.
+	 */
+	if (order) {
+		for (i = 0; i <= end_zone; i++) {
+			struct zone *zone = pgdat->node_zones + i;
+
+			if (!populated_zone(zone))
+				continue;
+
+			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+				continue;
+
+			/* Confirm the zone is balanced for order-0 */
+			if (!zone_watermark_ok(zone, 0,
+					high_wmark_pages(zone), 0, 0)) {
+				order = sc.order = 0;
+				goto loop_again;
+			}
+
+			/* If balanced, clear the congested flag */
+			zone_clear_flag(zone, ZONE_CONGESTED);
+		}
+	}
+
+	/*
+	 * Return the order we were reclaiming at so sleeping_prematurely()
+	 * makes a decision on the order we were last reclaiming at. However,
+	 * if another caller entered the allocator slow path while kswapd
+	 * was awake, order will remain at the higher level
+	 */
+	*classzone_idx = end_zone;
+	return order;
+}
+
+static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
+{
+	long remaining = 0;
+	DEFINE_WAIT(wait);
+
+	if (freezing(current) || kthread_should_stop())
+		return;
+
+	prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+
+	/* Try to sleep for a short interval */
+	if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
+		remaining = schedule_timeout(HZ/10);
+		finish_wait(&pgdat->kswapd_wait, &wait);
+		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+	}
+
+	/*
+	 * After a short sleep, check if it was a premature sleep. If not, then
+	 * go fully to sleep until explicitly woken up.
+	 */
+	if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
+		trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
+
+		/*
+		 * vmstat counters are not perfectly accurate and the estimated
+		 * value for counters such as NR_FREE_PAGES can deviate from the
+		 * true value by nr_online_cpus * threshold. To avoid the zone
+		 * watermarks being breached while under pressure, we reduce the
+		 * per-cpu vmstat threshold while kswapd is awake and restore
+		 * them before going back to sleep.
+		 */
+		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
+		schedule();
+		set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
+	} else {
+		if (remaining)
+			count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+		else
+			count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
+	}
+	finish_wait(&pgdat->kswapd_wait, &wait);
 }
 
 /*
@@ -2387,9 +2636,10 @@
 static int kswapd(void *p)
 {
 	unsigned long order;
+	int classzone_idx;
 	pg_data_t *pgdat = (pg_data_t*)p;
 	struct task_struct *tsk = current;
-	DEFINE_WAIT(wait);
+
 	struct reclaim_state reclaim_state = {
 		.reclaimed_slab = 0,
 	};
@@ -2417,49 +2667,30 @@
 	set_freezable();
 
 	order = 0;
+	classzone_idx = MAX_NR_ZONES - 1;
 	for ( ; ; ) {
 		unsigned long new_order;
+		int new_classzone_idx;
 		int ret;
 
-		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
 		new_order = pgdat->kswapd_max_order;
+		new_classzone_idx = pgdat->classzone_idx;
 		pgdat->kswapd_max_order = 0;
-		if (order < new_order) {
+		pgdat->classzone_idx = MAX_NR_ZONES - 1;
+		if (order < new_order || classzone_idx > new_classzone_idx) {
 			/*
 			 * Don't sleep if someone wants a larger 'order'
-			 * allocation
+			 * allocation or has tigher zone constraints
 			 */
 			order = new_order;
+			classzone_idx = new_classzone_idx;
 		} else {
-			if (!freezing(current) && !kthread_should_stop()) {
-				long remaining = 0;
-
-				/* Try to sleep for a short interval */
-				if (!sleeping_prematurely(pgdat, order, remaining)) {
-					remaining = schedule_timeout(HZ/10);
-					finish_wait(&pgdat->kswapd_wait, &wait);
-					prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
-				}
-
-				/*
-				 * After a short sleep, check if it was a
-				 * premature sleep. If not, then go fully
-				 * to sleep until explicitly woken up
-				 */
-				if (!sleeping_prematurely(pgdat, order, remaining)) {
-					trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
-					schedule();
-				} else {
-					if (remaining)
-						count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
-					else
-						count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
-				}
-			}
-
+			kswapd_try_to_sleep(pgdat, order, classzone_idx);
 			order = pgdat->kswapd_max_order;
+			classzone_idx = pgdat->classzone_idx;
+			pgdat->kswapd_max_order = 0;
+			pgdat->classzone_idx = MAX_NR_ZONES - 1;
 		}
-		finish_wait(&pgdat->kswapd_wait, &wait);
 
 		ret = try_to_freeze();
 		if (kthread_should_stop())
@@ -2471,7 +2702,7 @@
 		 */
 		if (!ret) {
 			trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
-			balance_pgdat(pgdat, order);
+			order = balance_pgdat(pgdat, order, &classzone_idx);
 		}
 	}
 	return 0;
@@ -2480,23 +2711,26 @@
 /*
  * A zone is low on free memory, so wake its kswapd task to service it.
  */
-void wakeup_kswapd(struct zone *zone, int order)
+void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
 {
 	pg_data_t *pgdat;
 
 	if (!populated_zone(zone))
 		return;
 
-	pgdat = zone->zone_pgdat;
-	if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
-		return;
-	if (pgdat->kswapd_max_order < order)
-		pgdat->kswapd_max_order = order;
-	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
 	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
 		return;
+	pgdat = zone->zone_pgdat;
+	if (pgdat->kswapd_max_order < order) {
+		pgdat->kswapd_max_order = order;
+		pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
+	}
 	if (!waitqueue_active(&pgdat->kswapd_wait))
 		return;
+	if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
+		return;
+
+	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
 	wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 312d728..0c3b504 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -83,7 +83,31 @@
 
 #ifdef CONFIG_SMP
 
-static int calculate_threshold(struct zone *zone)
+int calculate_pressure_threshold(struct zone *zone)
+{
+	int threshold;
+	int watermark_distance;
+
+	/*
+	 * As vmstats are not up to date, there is drift between the estimated
+	 * and real values. For high thresholds and a high number of CPUs, it
+	 * is possible for the min watermark to be breached while the estimated
+	 * value looks fine. The pressure threshold is a reduced value such
+	 * that even the maximum amount of drift will not accidentally breach
+	 * the min watermark
+	 */
+	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
+	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
+
+	/*
+	 * Maximum threshold is 125
+	 */
+	threshold = min(125, threshold);
+
+	return threshold;
+}
+
+int calculate_normal_threshold(struct zone *zone)
 {
 	int threshold;
 	int mem;	/* memory in 128 MB units */
@@ -142,7 +166,7 @@
 	for_each_populated_zone(zone) {
 		unsigned long max_drift, tolerate_drift;
 
-		threshold = calculate_threshold(zone);
+		threshold = calculate_normal_threshold(zone);
 
 		for_each_online_cpu(cpu)
 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
@@ -161,6 +185,26 @@
 	}
 }
 
+void set_pgdat_percpu_threshold(pg_data_t *pgdat,
+				int (*calculate_pressure)(struct zone *))
+{
+	struct zone *zone;
+	int cpu;
+	int threshold;
+	int i;
+
+	for (i = 0; i < pgdat->nr_zones; i++) {
+		zone = &pgdat->node_zones[i];
+		if (!zone->percpu_drift_mark)
+			continue;
+
+		threshold = (*calculate_pressure)(zone);
+		for_each_possible_cpu(cpu)
+			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+							= threshold;
+	}
+}
+
 /*
  * For use when we know that interrupts are disabled.
  */
@@ -836,6 +880,7 @@
 	"numa_local",
 	"numa_other",
 #endif
+	"nr_anon_transparent_hugepages",
 	"nr_dirty_threshold",
 	"nr_dirty_background_threshold",
 
@@ -911,7 +956,7 @@
 		   "\n        scanned  %lu"
 		   "\n        spanned  %lu"
 		   "\n        present  %lu",
-		   zone_nr_free_pages(zone),
+		   zone_page_state(zone, NR_FREE_PAGES),
 		   min_wmark_pages(zone),
 		   low_wmark_pages(zone),
 		   high_wmark_pages(zone),
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 798beac..1e308f2 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -178,27 +178,24 @@
 			break;
 		case 's':{
 				char **sptr = va_arg(ap, char **);
-				int16_t len;
-				int size;
+				uint16_t len;
 
 				errcode = p9pdu_readf(pdu, proto_version,
 								"w", &len);
 				if (errcode)
 					break;
 
-				size = max_t(int16_t, len, 0);
-
-				*sptr = kmalloc(size + 1, GFP_KERNEL);
+				*sptr = kmalloc(len + 1, GFP_KERNEL);
 				if (*sptr == NULL) {
 					errcode = -EFAULT;
 					break;
 				}
-				if (pdu_read(pdu, *sptr, size)) {
+				if (pdu_read(pdu, *sptr, len)) {
 					errcode = -EFAULT;
 					kfree(*sptr);
 					*sptr = NULL;
 				} else
-					(*sptr)[size] = 0;
+					(*sptr)[len] = 0;
 			}
 			break;
 		case 'Q':{
@@ -234,14 +231,14 @@
 			}
 			break;
 		case 'D':{
-				int32_t *count = va_arg(ap, int32_t *);
+				uint32_t *count = va_arg(ap, uint32_t *);
 				void **data = va_arg(ap, void **);
 
 				errcode =
 				    p9pdu_readf(pdu, proto_version, "d", count);
 				if (!errcode) {
 					*count =
-					    min_t(int32_t, *count,
+					    min_t(uint32_t, *count,
 						  pdu->size - pdu->offset);
 					*data = &pdu->sdata[pdu->offset];
 				}
@@ -404,9 +401,10 @@
 			break;
 		case 's':{
 				const char *sptr = va_arg(ap, const char *);
-				int16_t len = 0;
+				uint16_t len = 0;
 				if (sptr)
-					len = min_t(int16_t, strlen(sptr), USHRT_MAX);
+					len = min_t(uint16_t, strlen(sptr),
+								USHRT_MAX);
 
 				errcode = p9pdu_writef(pdu, proto_version,
 								"w", len);
@@ -438,7 +436,7 @@
 						 stbuf->n_gid, stbuf->n_muid);
 			} break;
 		case 'D':{
-				int32_t count = va_arg(ap, int32_t);
+				uint32_t count = va_arg(ap, uint32_t);
 				const void *data = va_arg(ap, const void *);
 
 				errcode = p9pdu_writef(pdu, proto_version, "d",
diff --git a/net/Kconfig b/net/Kconfig
index ad0aafe..7284062 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -253,7 +253,9 @@
 	what was just said, you don't need it: say N.
 
 	Documentation on how to use TCP connection probing can be found
-	at http://linux-net.osdl.org/index.php/TcpProbe
+	at:
+	
+	  http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
 
 	To compile this code as a module, choose M here: the
 	module will be called tcp_probe.
diff --git a/net/Makefile b/net/Makefile
index a3330eb..a51d946 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -19,9 +19,7 @@
 obj-$(CONFIG_INET)		+= ipv4/
 obj-$(CONFIG_XFRM)		+= xfrm/
 obj-$(CONFIG_UNIX)		+= unix/
-ifneq ($(CONFIG_IPV6),)
-obj-y				+= ipv6/
-endif
+obj-$(CONFIG_NET)		+= ipv6/
 obj-$(CONFIG_PACKET)		+= packet/
 obj-$(CONFIG_NET_KEY)		+= key/
 obj-$(CONFIG_BRIDGE)		+= bridge/
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index bb86d29..6da5dae 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,7 +1392,7 @@
 	ax25_cb *ax25;
 	int err = 0;
 
-	memset(fsa, 0, sizeof(fsa));
+	memset(fsa, 0, sizeof(*fsa));
 	lock_sock(sk);
 	ax25 = ax25_sk(sk);
 
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index d4d9926..65106fb 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -151,9 +151,9 @@
 	}							\
 	while (0)
 #else /* !CONFIG_BATMAN_ADV_DEBUG */
-static inline void bat_dbg(char type __attribute__((unused)),
-			   struct bat_priv *bat_priv __attribute__((unused)),
-			   char *fmt __attribute__((unused)), ...)
+static inline void bat_dbg(char type __always_unused,
+			   struct bat_priv *bat_priv __always_unused,
+			   char *fmt __always_unused, ...)
 {
 }
 #endif
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index b49fdf7..2284e81 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -63,7 +63,7 @@
 	uint8_t  num_hna;
 	uint8_t  gw_flags;  /* flags related to gateway class */
 	uint8_t  align;
-} __attribute__((packed));
+} __packed;
 
 #define BAT_PACKET_LEN sizeof(struct batman_packet)
 
@@ -76,7 +76,7 @@
 	uint8_t  orig[6];
 	uint16_t seqno;
 	uint8_t  uid;
-} __attribute__((packed));
+} __packed;
 
 #define BAT_RR_LEN 16
 
@@ -93,14 +93,14 @@
 	uint8_t  uid;
 	uint8_t  rr_cur;
 	uint8_t  rr[BAT_RR_LEN][ETH_ALEN];
-} __attribute__((packed));
+} __packed;
 
 struct unicast_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
 	uint8_t  dest[6];
 	uint8_t  ttl;
-} __attribute__((packed));
+} __packed;
 
 struct unicast_frag_packet {
 	uint8_t  packet_type;
@@ -110,7 +110,7 @@
 	uint8_t  flags;
 	uint8_t  orig[6];
 	uint16_t seqno;
-} __attribute__((packed));
+} __packed;
 
 struct bcast_packet {
 	uint8_t  packet_type;
@@ -118,7 +118,7 @@
 	uint8_t  orig[6];
 	uint8_t  ttl;
 	uint32_t seqno;
-} __attribute__((packed));
+} __packed;
 
 struct vis_packet {
 	uint8_t  packet_type;
@@ -131,6 +131,6 @@
 				  * neighbors */
 	uint8_t  target_orig[6]; /* who should receive this packet */
 	uint8_t  sender_orig[6]; /* who sent or rebroadcasted this packet */
-} __attribute__((packed));
+} __packed;
 
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 97cb23d..bf3f6f5 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -246,13 +246,13 @@
 	/* this packet might be part of the vis send queue. */
 	struct sk_buff *skb_packet;
 	/* vis_info may follow here*/
-} __attribute__((packed));
+} __packed;
 
 struct vis_info_entry {
 	uint8_t  src[ETH_ALEN];
 	uint8_t  dest[ETH_ALEN];
 	uint8_t  quality;	/* quality = 0 means HNA */
-} __attribute__((packed));
+} __packed;
 
 struct recvlist_node {
 	struct list_head list;
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc2e28b..d1a6113 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -50,12 +50,12 @@
 		skb = tfp->skb;
 	}
 
+	if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
+		goto err;
+
 	skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
-	if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
-		/* free buffered skb, skb will be freed later */
-		kfree_skb(tfp->skb);
-		return NULL;
-	}
+	if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
+		goto err;
 
 	/* move free entry to end */
 	tfp->skb = NULL;
@@ -70,6 +70,11 @@
 	unicast_packet->packet_type = BAT_UNICAST;
 
 	return skb;
+
+err:
+	/* free buffered skb, skb will be freed later */
+	kfree_skb(tfp->skb);
+	return NULL;
 }
 
 static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
@@ -229,10 +234,12 @@
 	if (!bat_priv->primary_if)
 		goto dropped;
 
-	unicast_packet = (struct unicast_packet *) skb->data;
-
-	memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
 	frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
+	if (!frag_skb)
+		goto dropped;
+
+	unicast_packet = (struct unicast_packet *) skb->data;
+	memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
 	skb_split(skb, frag_skb, data_len / 2);
 
 	if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cd4c423..de1022c 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -64,6 +64,7 @@
 
 	spin_unlock_bh(&bat_priv->vis_list_lock);
 	kfree_skb(info->skb_packet);
+	kfree(info);
 }
 
 /* Compare two vis packets, used by the hashing algorithm */
@@ -268,10 +269,10 @@
 				buff_pos += sprintf(buff + buff_pos, "%pM,",
 						entry->addr);
 
-				for (i = 0; i < packet->entries; i++)
+				for (j = 0; j < packet->entries; j++)
 					buff_pos += vis_data_read_entry(
 							buff + buff_pos,
-							&entries[i],
+							&entries[j],
 							entry->addr,
 							entry->primary);
 
@@ -444,7 +445,7 @@
 			      info);
 	if (hash_added < 0) {
 		/* did not work (for some reason) */
-		kref_put(&old_info->refcount, free_info);
+		kref_put(&info->refcount, free_info);
 		info = NULL;
 	}
 
@@ -815,7 +816,7 @@
 		container_of(work, struct delayed_work, work);
 	struct bat_priv *bat_priv =
 		container_of(delayed_work, struct bat_priv, vis_work);
-	struct vis_info *info, *temp;
+	struct vis_info *info;
 
 	spin_lock_bh(&bat_priv->vis_hash_lock);
 	purge_vis_packets(bat_priv);
@@ -825,8 +826,9 @@
 		send_list_add(bat_priv, bat_priv->my_vis_info);
 	}
 
-	list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
-				 send_list) {
+	while (!list_empty(&bat_priv->vis_send_list)) {
+		info = list_first_entry(&bat_priv->vis_send_list,
+					typeof(*info), send_list);
 
 		kref_get(&info->refcount);
 		spin_unlock_bh(&bat_priv->vis_hash_lock);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 6b90a41..99cd8d9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -379,14 +379,10 @@
 	hci_conn_hold(acl);
 
 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
-		acl->sec_level = sec_level;
+		acl->sec_level = BT_SECURITY_LOW;
+		acl->pending_sec_level = sec_level;
 		acl->auth_type = auth_type;
 		hci_acl_connect(acl);
-	} else {
-		if (acl->sec_level < sec_level)
-			acl->sec_level = sec_level;
-		if (acl->auth_type < auth_type)
-			acl->auth_type = auth_type;
 	}
 
 	if (type == ACL_LINK)
@@ -442,11 +438,17 @@
 {
 	BT_DBG("conn %p", conn);
 
+	if (conn->pending_sec_level > sec_level)
+		sec_level = conn->pending_sec_level;
+
 	if (sec_level > conn->sec_level)
-		conn->sec_level = sec_level;
+		conn->pending_sec_level = sec_level;
 	else if (conn->link_mode & HCI_LM_AUTH)
 		return 1;
 
+	/* Make sure we preserve an existing MITM requirement*/
+	auth_type |= (conn->auth_type & 0x01);
+
 	conn->auth_type = auth_type;
 
 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8b602d8..9c4541b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1011,6 +1011,10 @@
 
 	destroy_workqueue(hdev->workqueue);
 
+	hci_dev_lock_bh(hdev);
+	hci_blacklist_clear(hdev);
+	hci_dev_unlock_bh(hdev);
+
 	__hci_dev_put(hdev);
 
 	return 0;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 3810017..a290854 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -692,13 +692,13 @@
 	if (conn->state != BT_CONFIG || !conn->out)
 		return 0;
 
-	if (conn->sec_level == BT_SECURITY_SDP)
+	if (conn->pending_sec_level == BT_SECURITY_SDP)
 		return 0;
 
 	/* Only request authentication for SSP connections or non-SSP
 	 * devices with sec_level HIGH */
 	if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
-					conn->sec_level != BT_SECURITY_HIGH)
+				conn->pending_sec_level != BT_SECURITY_HIGH)
 		return 0;
 
 	return 1;
@@ -1095,9 +1095,10 @@
 
 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
 	if (conn) {
-		if (!ev->status)
+		if (!ev->status) {
 			conn->link_mode |= HCI_LM_AUTH;
-		else
+			conn->sec_level = conn->pending_sec_level;
+		} else
 			conn->sec_level = BT_SECURITY_LOW;
 
 		clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index c791fcd..675614e 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -305,33 +305,44 @@
 	}
 }
 
+static inline u8 l2cap_get_auth_type(struct sock *sk)
+{
+	if (sk->sk_type == SOCK_RAW) {
+		switch (l2cap_pi(sk)->sec_level) {
+		case BT_SECURITY_HIGH:
+			return HCI_AT_DEDICATED_BONDING_MITM;
+		case BT_SECURITY_MEDIUM:
+			return HCI_AT_DEDICATED_BONDING;
+		default:
+			return HCI_AT_NO_BONDING;
+		}
+	} else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
+		if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
+			l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+
+		if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
+			return HCI_AT_NO_BONDING_MITM;
+		else
+			return HCI_AT_NO_BONDING;
+	} else {
+		switch (l2cap_pi(sk)->sec_level) {
+		case BT_SECURITY_HIGH:
+			return HCI_AT_GENERAL_BONDING_MITM;
+		case BT_SECURITY_MEDIUM:
+			return HCI_AT_GENERAL_BONDING;
+		default:
+			return HCI_AT_NO_BONDING;
+		}
+	}
+}
+
 /* Service level security */
 static inline int l2cap_check_security(struct sock *sk)
 {
 	struct l2cap_conn *conn = l2cap_pi(sk)->conn;
 	__u8 auth_type;
 
-	if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
-		if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
-			auth_type = HCI_AT_NO_BONDING_MITM;
-		else
-			auth_type = HCI_AT_NO_BONDING;
-
-		if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
-			l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
-	} else {
-		switch (l2cap_pi(sk)->sec_level) {
-		case BT_SECURITY_HIGH:
-			auth_type = HCI_AT_GENERAL_BONDING_MITM;
-			break;
-		case BT_SECURITY_MEDIUM:
-			auth_type = HCI_AT_GENERAL_BONDING;
-			break;
-		default:
-			auth_type = HCI_AT_NO_BONDING;
-			break;
-		}
-	}
+	auth_type = l2cap_get_auth_type(sk);
 
 	return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
 								auth_type);
@@ -848,6 +859,7 @@
 				result = L2CAP_CR_SEC_BLOCK;
 			else
 				result = L2CAP_CR_BAD_PSM;
+			sk->sk_state = BT_DISCONN;
 
 			rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
 			rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
@@ -1068,39 +1080,7 @@
 
 	err = -ENOMEM;
 
-	if (sk->sk_type == SOCK_RAW) {
-		switch (l2cap_pi(sk)->sec_level) {
-		case BT_SECURITY_HIGH:
-			auth_type = HCI_AT_DEDICATED_BONDING_MITM;
-			break;
-		case BT_SECURITY_MEDIUM:
-			auth_type = HCI_AT_DEDICATED_BONDING;
-			break;
-		default:
-			auth_type = HCI_AT_NO_BONDING;
-			break;
-		}
-	} else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
-		if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
-			auth_type = HCI_AT_NO_BONDING_MITM;
-		else
-			auth_type = HCI_AT_NO_BONDING;
-
-		if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
-			l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
-	} else {
-		switch (l2cap_pi(sk)->sec_level) {
-		case BT_SECURITY_HIGH:
-			auth_type = HCI_AT_GENERAL_BONDING_MITM;
-			break;
-		case BT_SECURITY_MEDIUM:
-			auth_type = HCI_AT_GENERAL_BONDING;
-			break;
-		default:
-			auth_type = HCI_AT_NO_BONDING;
-			break;
-		}
-	}
+	auth_type = l2cap_get_auth_type(sk);
 
 	hcon = hci_connect(hdev, ACL_LINK, dst,
 					l2cap_pi(sk)->sec_level, auth_type);
@@ -1127,7 +1107,8 @@
 		if (sk->sk_type != SOCK_SEQPACKET &&
 				sk->sk_type != SOCK_STREAM) {
 			l2cap_sock_clear_timer(sk);
-			sk->sk_state = BT_CONNECTED;
+			if (l2cap_check_security(sk))
+				sk->sk_state = BT_CONNECTED;
 		} else
 			l2cap_do_start(sk);
 	}
@@ -1893,8 +1874,8 @@
 		if (pi->mode == L2CAP_MODE_STREAMING) {
 			l2cap_streaming_send(sk);
 		} else {
-			if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
-					pi->conn_state && L2CAP_CONN_WAIT_F) {
+			if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+					(pi->conn_state & L2CAP_CONN_WAIT_F)) {
 				err = len;
 				break;
 			}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ff8aaa7..6b83776 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1164,7 +1164,8 @@
 			 * initiator rfcomm_process_rx already calls
 			 * rfcomm_session_put() */
 			if (s->sock->sk->sk_state != BT_CLOSED)
-				rfcomm_session_put(s);
+				if (list_empty(&s->dlcs))
+					rfcomm_session_put(s);
 			break;
 		}
 	}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2575c2d..d7b9af4 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -727,7 +727,9 @@
 			break;
 		}
 
+		tty_unlock();
 		schedule();
+		tty_lock();
 	}
 	set_current_state(TASK_RUNNING);
 	remove_wait_queue(&dev->wait, &wait);
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 9190ae4..6dee7bf 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -6,6 +6,7 @@
 	tristate "802.1d Ethernet Bridging"
 	select LLC
 	select STP
+	depends on IPV6 || IPV6=n
 	---help---
 	  If you say Y here, then your Linux box will be able to act as an
 	  Ethernet bridge, which means that the different Ethernet segments it
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 2872393..88485cc 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -328,12 +328,12 @@
 	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
 	if (fdb) {
 		memcpy(fdb->addr.addr, addr, ETH_ALEN);
-		hlist_add_head_rcu(&fdb->hlist, head);
-
 		fdb->dst = source;
 		fdb->is_local = is_local;
 		fdb->is_static = is_local;
 		fdb->ageing_timer = jiffies;
+
+		hlist_add_head_rcu(&fdb->hlist, head);
 	}
 	return fdb;
 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6f6d8e1..88e4aa9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,7 +80,7 @@
 	if (is_multicast_ether_addr(dest)) {
 		mdst = br_mdb_get(br, skb);
 		if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
-			if ((mdst && !hlist_unhashed(&mdst->mglist)) ||
+			if ((mdst && mdst->mglist) ||
 			    br_multicast_is_router(br))
 				skb2 = skb;
 			br_multicast_forward(mdst, skb, skb2);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index f701a21..030a002 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -37,10 +37,9 @@
 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
+static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
 {
-	if (ipv6_addr_is_multicast(addr) &&
-	    IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
+	if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
 		return 1;
 	return 0;
 }
@@ -232,8 +231,7 @@
 	if (!netif_running(br->dev) || timer_pending(&mp->timer))
 		goto out;
 
-	if (!hlist_unhashed(&mp->mglist))
-		hlist_del_init(&mp->mglist);
+	mp->mglist = false;
 
 	if (mp->ports)
 		goto out;
@@ -276,7 +274,7 @@
 		del_timer(&p->query_timer);
 		call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
-		if (!mp->ports && hlist_unhashed(&mp->mglist) &&
+		if (!mp->ports && !mp->mglist &&
 		    netif_running(br->dev))
 			mod_timer(&mp->timer, jiffies);
 
@@ -436,7 +434,6 @@
 	eth = eth_hdr(skb);
 
 	memcpy(eth->h_source, br->dev->dev_addr, 6);
-	ipv6_eth_mc_map(group, eth->h_dest);
 	eth->h_proto = htons(ETH_P_IPV6);
 	skb_put(skb, sizeof(*eth));
 
@@ -448,8 +445,10 @@
 	ip6h->payload_len = htons(8 + sizeof(*mldq));
 	ip6h->nexthdr = IPPROTO_HOPOPTS;
 	ip6h->hop_limit = 1;
-	ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
+	ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+			   &ip6h->saddr);
 	ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
+	ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
 
 	hopopt = (u8 *)(ip6h + 1);
 	hopopt[0] = IPPROTO_ICMPV6;		/* next hdr */
@@ -528,7 +527,7 @@
 	struct net_bridge *br = mp->br;
 
 	spin_lock(&br->multicast_lock);
-	if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) ||
+	if (!netif_running(br->dev) || !mp->mglist ||
 	    mp->queries_sent >= br->multicast_last_member_count)
 		goto out;
 
@@ -719,7 +718,7 @@
 		goto err;
 
 	if (!port) {
-		hlist_add_head(&mp->mglist, &br->mglist);
+		mp->mglist = true;
 		mod_timer(&mp->timer, now + br->multicast_membership_interval);
 		goto out;
 	}
@@ -781,11 +780,11 @@
 {
 	struct br_ip br_group;
 
-	if (ipv6_is_local_multicast(group))
+	if (!ipv6_is_transient_multicast(group))
 		return 0;
 
 	ipv6_addr_copy(&br_group.u.ip6, group);
-	br_group.proto = htons(ETH_P_IP);
+	br_group.proto = htons(ETH_P_IPV6);
 
 	return br_multicast_add_group(br, port, &br_group);
 }
@@ -1014,18 +1013,19 @@
 
 		nsrcs = skb_header_pointer(skb,
 					   len + offsetof(struct mld2_grec,
-							  grec_mca),
+							  grec_nsrcs),
 					   sizeof(_nsrcs), &_nsrcs);
 		if (!nsrcs)
 			return -EINVAL;
 
 		if (!pskb_may_pull(skb,
 				   len + sizeof(*grec) +
-				   sizeof(struct in6_addr) * (*nsrcs)))
+				   sizeof(struct in6_addr) * ntohs(*nsrcs)))
 			return -EINVAL;
 
 		grec = (struct mld2_grec *)(skb->data + len);
-		len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs);
+		len += sizeof(*grec) +
+		       sizeof(struct in6_addr) * ntohs(*nsrcs);
 
 		/* We treat these as MLDv1 reports for now. */
 		switch (grec->grec_type) {
@@ -1165,7 +1165,7 @@
 
 	max_delay *= br->multicast_last_member_count;
 
-	if (!hlist_unhashed(&mp->mglist) &&
+	if (mp->mglist &&
 	    (timer_pending(&mp->timer) ?
 	     time_after(mp->timer.expires, now + max_delay) :
 	     try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1177,7 +1177,7 @@
 		if (timer_pending(&p->timer) ?
 		    time_after(p->timer.expires, now + max_delay) :
 		    try_to_del_timer_sync(&p->timer) >= 0)
-			mod_timer(&mp->timer, now + max_delay);
+			mod_timer(&p->timer, now + max_delay);
 	}
 
 out:
@@ -1236,7 +1236,7 @@
 		goto out;
 
 	max_delay *= br->multicast_last_member_count;
-	if (!hlist_unhashed(&mp->mglist) &&
+	if (mp->mglist &&
 	    (timer_pending(&mp->timer) ?
 	     time_after(mp->timer.expires, now + max_delay) :
 	     try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1248,7 +1248,7 @@
 		if (timer_pending(&p->timer) ?
 		    time_after(p->timer.expires, now + max_delay) :
 		    try_to_del_timer_sync(&p->timer) >= 0)
-			mod_timer(&mp->timer, now + max_delay);
+			mod_timer(&p->timer, now + max_delay);
 	}
 
 out:
@@ -1283,7 +1283,7 @@
 		     br->multicast_last_member_interval;
 
 	if (!port) {
-		if (!hlist_unhashed(&mp->mglist) &&
+		if (mp->mglist &&
 		    (timer_pending(&mp->timer) ?
 		     time_after(mp->timer.expires, time) :
 		     try_to_del_timer_sync(&mp->timer) >= 0)) {
@@ -1341,7 +1341,7 @@
 {
 	struct br_ip br_group;
 
-	if (ipv6_is_local_multicast(group))
+	if (!ipv6_is_transient_multicast(group))
 		return;
 
 	ipv6_addr_copy(&br_group.u.ip6, group);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 84aac77..4e1b620 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -84,13 +84,13 @@
 struct net_bridge_mdb_entry
 {
 	struct hlist_node		hlist[2];
-	struct hlist_node		mglist;
 	struct net_bridge		*br;
 	struct net_bridge_port_group __rcu *ports;
 	struct rcu_head			rcu;
 	struct timer_list		timer;
 	struct timer_list		query_timer;
 	struct br_ip			addr;
+	bool				mglist;
 	u32				queries_sent;
 };
 
@@ -238,7 +238,6 @@
 	spinlock_t			multicast_lock;
 	struct net_bridge_mdb_htable __rcu *mdb;
 	struct hlist_head		router_list;
-	struct hlist_head		mglist;
 
 	struct timer_list		multicast_router_timer;
 	struct timer_list		multicast_querier_timer;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 1bf0cf5..8184c03 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -740,12 +740,12 @@
 		if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
 			return -ENOPROTOOPT;
 		lock_sock(&(cf_sk->sk));
-		cf_sk->conn_req.param.size = ol;
 		if (ol > sizeof(cf_sk->conn_req.param.data) ||
 			copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
 			release_sock(&cf_sk->sk);
 			return -EINVAL;
 		}
+		cf_sk->conn_req.param.size = ol;
 		release_sock(&cf_sk->sk);
 		return 0;
 
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 21ede14..c665de7 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -191,6 +191,7 @@
 	struct cflayer *servl = NULL;
 	struct cfcnfg_phyinfo *phyinfo = NULL;
 	u8 phyid = 0;
+
 	caif_assert(adap_layer != NULL);
 	channel_id = adap_layer->id;
 	if (adap_layer->dn == NULL || channel_id == 0) {
@@ -199,16 +200,16 @@
 		goto end;
 	}
 	servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id);
-	if (servl == NULL)
-		goto end;
-	layer_set_up(servl, NULL);
-	ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
 	if (servl == NULL) {
 		pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)",
 		       channel_id);
 		ret = -EINVAL;
 		goto end;
 	}
+	layer_set_up(servl, NULL);
+	ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
+	if (ret)
+		goto end;
 	caif_assert(channel_id == servl->id);
 	if (adap_layer->dn != NULL) {
 		phyid = cfsrvl_getphyid(adap_layer->dn);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 84a422c..6008d6d 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -76,6 +76,8 @@
 	struct chnl_net *priv  = container_of(layr, struct chnl_net, chnl);
 	int pktlen;
 	int err = 0;
+	const u8 *ip_version;
+	u8 buf;
 
 	priv = container_of(layr, struct chnl_net, chnl);
 
@@ -90,7 +92,21 @@
 	 * send the packet to the net stack.
 	 */
 	skb->dev = priv->netdev;
-	skb->protocol = htons(ETH_P_IP);
+
+	/* check the version of IP */
+	ip_version = skb_header_pointer(skb, 0, 1, &buf);
+	if (!ip_version)
+		return -EINVAL;
+	switch (*ip_version >> 4) {
+	case 4:
+		skb->protocol = htons(ETH_P_IP);
+		break;
+	case 6:
+		skb->protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		return -EINVAL;
+	}
 
 	/* If we change the header in loop mode, the checksum is corrupted. */
 	if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
@@ -378,9 +394,7 @@
 	priv->conn_req.sockaddr.u.dgm.connection_id = -1;
 	priv->flowenabled = false;
 
-	ASSERT_RTNL();
 	init_waitqueue_head(&priv->netmgmt_wq);
-	list_add(&priv->list_field, &chnl_net_list);
 }
 
 
@@ -437,6 +451,8 @@
 	ret = register_netdevice(dev);
 	if (ret)
 		pr_warn("device rtml registration failed\n");
+	else
+		list_add(&caifdev->list_field, &chnl_net_list);
 	return ret;
 }
 
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9d5e8ac..092dc88 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1256,6 +1256,9 @@
 		struct sockaddr_can *addr =
 			(struct sockaddr_can *)msg->msg_name;
 
+		if (msg->msg_namelen < sizeof(*addr))
+			return -EINVAL;
+
 		if (addr->can_family != AF_CAN)
 			return -EINVAL;
 
diff --git a/net/can/raw.c b/net/can/raw.c
index e88f610..883e9d7 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -649,6 +649,9 @@
 		struct sockaddr_can *addr =
 			(struct sockaddr_can *)msg->msg_name;
 
+		if (msg->msg_namelen < sizeof(*addr))
+			return -EINVAL;
+
 		if (addr->can_family != AF_CAN)
 			return -EINVAL;
 
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 815ef88..0a1b53b 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -1,5 +1,6 @@
 
 #include <linux/ceph/types.h>
+#include <linux/module.h>
 
 /*
  * Robert Jenkin's hash function.
@@ -104,6 +105,7 @@
 		return -1;
 	}
 }
+EXPORT_SYMBOL(ceph_str_hash);
 
 const char *ceph_str_hash_name(int type)
 {
@@ -116,3 +118,4 @@
 		return "unknown";
 	}
 }
+EXPORT_SYMBOL(ceph_str_hash_name);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b6ff4a1..05f3578 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -96,7 +96,7 @@
 
 int ceph_msgr_init(void)
 {
-	ceph_msgr_wq = create_workqueue("ceph-msgr");
+	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
 	if (!ceph_msgr_wq) {
 		pr_err("msgr_init failed to create workqueue\n");
 		return -ENOMEM;
@@ -252,8 +252,12 @@
 {
 	struct kvec iov = {buf, len};
 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
+	int r;
 
-	return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
+	r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
+	if (r == -EAGAIN)
+		r = 0;
+	return r;
 }
 
 /*
@@ -264,13 +268,17 @@
 		     size_t kvlen, size_t len, int more)
 {
 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
+	int r;
 
 	if (more)
 		msg.msg_flags |= MSG_MORE;
 	else
 		msg.msg_flags |= MSG_EOR;  /* superfluous, but what the hell */
 
-	return kernel_sendmsg(sock, &msg, iov, kvlen, len);
+	r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
+	if (r == -EAGAIN)
+		r = 0;
+	return r;
 }
 
 
@@ -328,7 +336,6 @@
 		ceph_msg_put(con->out_msg);
 		con->out_msg = NULL;
 	}
-	con->out_keepalive_pending = false;
 	con->in_seq = 0;
 	con->in_seq_acked = 0;
 }
@@ -847,6 +854,8 @@
 		    (msg->pages || msg->pagelist || msg->bio || in_trail))
 			kunmap(page);
 
+		if (ret == -EAGAIN)
+			ret = 0;
 		if (ret <= 0)
 			goto out;
 
@@ -1238,8 +1247,6 @@
 		     con->auth_retry);
 		if (con->auth_retry == 2) {
 			con->error_msg = "connect authorization failure";
-			reset_connection(con);
-			set_bit(CLOSED, &con->state);
 			return -1;
 		}
 		con->auth_retry = 1;
@@ -1705,14 +1712,6 @@
 
 	/* open the socket first? */
 	if (con->sock == NULL) {
-		/*
-		 * if we were STANDBY and are reconnecting _this_
-		 * connection, bump connect_seq now.  Always bump
-		 * global_seq.
-		 */
-		if (test_and_clear_bit(STANDBY, &con->state))
-			con->connect_seq++;
-
 		prepare_write_banner(msgr, con);
 		prepare_write_connect(msgr, con, 1);
 		prepare_read_banner(con);
@@ -1737,16 +1736,12 @@
 	if (con->out_skip) {
 		ret = write_partial_skip(con);
 		if (ret <= 0)
-			goto done;
-		if (ret < 0) {
-			dout("try_write write_partial_skip err %d\n", ret);
-			goto done;
-		}
+			goto out;
 	}
 	if (con->out_kvec_left) {
 		ret = write_partial_kvec(con);
 		if (ret <= 0)
-			goto done;
+			goto out;
 	}
 
 	/* msg pages? */
@@ -1761,11 +1756,11 @@
 		if (ret == 1)
 			goto more_kvec;  /* we need to send the footer, too! */
 		if (ret == 0)
-			goto done;
+			goto out;
 		if (ret < 0) {
 			dout("try_write write_partial_msg_pages err %d\n",
 			     ret);
-			goto done;
+			goto out;
 		}
 	}
 
@@ -1789,10 +1784,9 @@
 	/* Nothing to do! */
 	clear_bit(WRITE_PENDING, &con->state);
 	dout("try_write nothing else to write.\n");
-done:
 	ret = 0;
 out:
-	dout("try_write done on %p\n", con);
+	dout("try_write done on %p ret %d\n", con, ret);
 	return ret;
 }
 
@@ -1821,19 +1815,17 @@
 			dout("try_read connecting\n");
 			ret = read_partial_banner(con);
 			if (ret <= 0)
-				goto done;
-			if (process_banner(con) < 0) {
-				ret = -1;
 				goto out;
-			}
+			ret = process_banner(con);
+			if (ret < 0)
+				goto out;
 		}
 		ret = read_partial_connect(con);
 		if (ret <= 0)
-			goto done;
-		if (process_connect(con) < 0) {
-			ret = -1;
 			goto out;
-		}
+		ret = process_connect(con);
+		if (ret < 0)
+			goto out;
 		goto more;
 	}
 
@@ -1848,7 +1840,7 @@
 		dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
 		ret = ceph_tcp_recvmsg(con->sock, buf, skip);
 		if (ret <= 0)
-			goto done;
+			goto out;
 		con->in_base_pos += ret;
 		if (con->in_base_pos)
 			goto more;
@@ -1859,7 +1851,7 @@
 		 */
 		ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
 		if (ret <= 0)
-			goto done;
+			goto out;
 		dout("try_read got tag %d\n", (int)con->in_tag);
 		switch (con->in_tag) {
 		case CEPH_MSGR_TAG_MSG:
@@ -1870,7 +1862,7 @@
 			break;
 		case CEPH_MSGR_TAG_CLOSE:
 			set_bit(CLOSED, &con->state);   /* fixme */
-			goto done;
+			goto out;
 		default:
 			goto bad_tag;
 		}
@@ -1882,13 +1874,12 @@
 			case -EBADMSG:
 				con->error_msg = "bad crc";
 				ret = -EIO;
-				goto out;
+				break;
 			case -EIO:
 				con->error_msg = "io error";
-				goto out;
-			default:
-				goto done;
+				break;
 			}
+			goto out;
 		}
 		if (con->in_tag == CEPH_MSGR_TAG_READY)
 			goto more;
@@ -1898,15 +1889,13 @@
 	if (con->in_tag == CEPH_MSGR_TAG_ACK) {
 		ret = read_partial_ack(con);
 		if (ret <= 0)
-			goto done;
+			goto out;
 		process_ack(con);
 		goto more;
 	}
 
-done:
-	ret = 0;
 out:
-	dout("try_read done on %p\n", con);
+	dout("try_read done on %p ret %d\n", con, ret);
 	return ret;
 
 bad_tag:
@@ -1920,20 +1909,6 @@
 /*
  * Atomically queue work on a connection.  Bump @con reference to
  * avoid races with connection teardown.
- *
- * There is some trickery going on with QUEUED and BUSY because we
- * only want a _single_ thread operating on each connection at any
- * point in time, but we want to use all available CPUs.
- *
- * The worker thread only proceeds if it can atomically set BUSY.  It
- * clears QUEUED and does it's thing.  When it thinks it's done, it
- * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
- * (tries again to set BUSY).
- *
- * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
- * try to queue work.  If that fails (work is already queued, or BUSY)
- * we give up (work also already being done or is queued) but leave QUEUED
- * set so that the worker thread will loop if necessary.
  */
 static void queue_con(struct ceph_connection *con)
 {
@@ -1948,11 +1923,7 @@
 		return;
 	}
 
-	set_bit(QUEUED, &con->state);
-	if (test_bit(BUSY, &con->state)) {
-		dout("queue_con %p - already BUSY\n", con);
-		con->ops->put(con);
-	} else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
+	if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
 		dout("queue_con %p - already queued\n", con);
 		con->ops->put(con);
 	} else {
@@ -1967,18 +1938,26 @@
 {
 	struct ceph_connection *con = container_of(work, struct ceph_connection,
 						   work.work);
-	int backoff = 0;
-
-more:
-	if (test_and_set_bit(BUSY, &con->state) != 0) {
-		dout("con_work %p BUSY already set\n", con);
-		goto out;
-	}
-	dout("con_work %p start, clearing QUEUED\n", con);
-	clear_bit(QUEUED, &con->state);
 
 	mutex_lock(&con->mutex);
+	if (test_and_clear_bit(BACKOFF, &con->state)) {
+		dout("con_work %p backing off\n", con);
+		if (queue_delayed_work(ceph_msgr_wq, &con->work,
+				       round_jiffies_relative(con->delay))) {
+			dout("con_work %p backoff %lu\n", con, con->delay);
+			mutex_unlock(&con->mutex);
+			return;
+		} else {
+			con->ops->put(con);
+			dout("con_work %p FAILED to back off %lu\n", con,
+			     con->delay);
+		}
+	}
 
+	if (test_bit(STANDBY, &con->state)) {
+		dout("con_work %p STANDBY\n", con);
+		goto done;
+	}
 	if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
 		dout("con_work CLOSED\n");
 		con_close_socket(con);
@@ -1994,28 +1973,13 @@
 	    try_read(con) < 0 ||
 	    try_write(con) < 0) {
 		mutex_unlock(&con->mutex);
-		backoff = 1;
 		ceph_fault(con);     /* error/fault path */
 		goto done_unlocked;
 	}
 
 done:
 	mutex_unlock(&con->mutex);
-
 done_unlocked:
-	clear_bit(BUSY, &con->state);
-	dout("con->state=%lu\n", con->state);
-	if (test_bit(QUEUED, &con->state)) {
-		if (!backoff || test_bit(OPENING, &con->state)) {
-			dout("con_work %p QUEUED reset, looping\n", con);
-			goto more;
-		}
-		dout("con_work %p QUEUED reset, but just faulted\n", con);
-		clear_bit(QUEUED, &con->state);
-	}
-	dout("con_work %p done\n", con);
-
-out:
 	con->ops->put(con);
 }
 
@@ -2050,10 +2014,12 @@
 	/* Requeue anything that hasn't been acked */
 	list_splice_init(&con->out_sent, &con->out_queue);
 
-	/* If there are no messages in the queue, place the connection
-	 * in a STANDBY state (i.e., don't try to reconnect just yet). */
-	if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
-		dout("fault setting STANDBY\n");
+	/* If there are no messages queued or keepalive pending, place
+	 * the connection in a STANDBY state */
+	if (list_empty(&con->out_queue) &&
+	    !test_bit(KEEPALIVE_PENDING, &con->state)) {
+		dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
+		clear_bit(WRITE_PENDING, &con->state);
 		set_bit(STANDBY, &con->state);
 	} else {
 		/* retry after a delay. */
@@ -2061,11 +2027,24 @@
 			con->delay = BASE_DELAY_INTERVAL;
 		else if (con->delay < MAX_DELAY_INTERVAL)
 			con->delay *= 2;
-		dout("fault queueing %p delay %lu\n", con, con->delay);
 		con->ops->get(con);
 		if (queue_delayed_work(ceph_msgr_wq, &con->work,
-				       round_jiffies_relative(con->delay)) == 0)
+				       round_jiffies_relative(con->delay))) {
+			dout("fault queued %p delay %lu\n", con, con->delay);
+		} else {
 			con->ops->put(con);
+			dout("fault failed to queue %p delay %lu, backoff\n",
+			     con, con->delay);
+			/*
+			 * In many cases we see a socket state change
+			 * while con_work is running and end up
+			 * queuing (non-delayed) work, such that we
+			 * can't backoff with a delay.  Set a flag so
+			 * that when con_work restarts we schedule the
+			 * delay then.
+			 */
+			set_bit(BACKOFF, &con->state);
+		}
 	}
 
 out_unlock:
@@ -2136,6 +2115,19 @@
 }
 EXPORT_SYMBOL(ceph_messenger_destroy);
 
+static void clear_standby(struct ceph_connection *con)
+{
+	/* come back from STANDBY? */
+	if (test_and_clear_bit(STANDBY, &con->state)) {
+		mutex_lock(&con->mutex);
+		dout("clear_standby %p and ++connect_seq\n", con);
+		con->connect_seq++;
+		WARN_ON(test_bit(WRITE_PENDING, &con->state));
+		WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
+		mutex_unlock(&con->mutex);
+	}
+}
+
 /*
  * Queue up an outgoing message on the given connection.
  */
@@ -2168,6 +2160,7 @@
 
 	/* if there wasn't anything waiting to send before, queue
 	 * new work */
+	clear_standby(con);
 	if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
 		queue_con(con);
 }
@@ -2233,6 +2226,8 @@
  */
 void ceph_con_keepalive(struct ceph_connection *con)
 {
+	dout("con_keepalive %p\n", con);
+	clear_standby(con);
 	if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
 	    test_and_set_bit(WRITE_PENDING, &con->state) == 0)
 		queue_con(con);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d73f3f6..71603ac 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -605,8 +605,10 @@
 			goto bad;
 		}
 		err = __decode_pool(p, end, pi);
-		if (err < 0)
+		if (err < 0) {
+			kfree(pi);
 			goto bad;
+		}
 		__insert_pg_pool(&map->pg_pools, pi);
 	}
 
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 1a040e6..cd9c21d 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -16,22 +16,30 @@
 					  int num_pages, bool write_page)
 {
 	struct page **pages;
-	int rc;
+	int got = 0;
+	int rc = 0;
 
 	pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
 	if (!pages)
 		return ERR_PTR(-ENOMEM);
 
 	down_read(&current->mm->mmap_sem);
-	rc = get_user_pages(current, current->mm, (unsigned long)data,
-			    num_pages, write_page, 0, pages, NULL);
+	while (got < num_pages) {
+		rc = get_user_pages(current, current->mm,
+		    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
+		    num_pages - got, write_page, 0, pages + got, NULL);
+		if (rc < 0)
+			break;
+		BUG_ON(rc == 0);
+		got += rc;
+	}
 	up_read(&current->mm->mmap_sem);
-	if (rc < num_pages)
+	if (rc < 0)
 		goto fail;
 	return pages;
 
 fail:
-	ceph_put_page_vector(pages, rc > 0 ? rc : 0, false);
+	ceph_put_page_vector(pages, got, false);
 	return ERR_PTR(rc);
 }
 EXPORT_SYMBOL(ceph_get_direct_page_vector);
diff --git a/net/core/dev.c b/net/core/dev.c
index a215269..6561021 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -749,7 +749,8 @@
  *	@ha: hardware address
  *
  *	Search for an interface by MAC address. Returns NULL if the device
- *	is not found or a pointer to the device. The caller must hold RCU
+ *	is not found or a pointer to the device.
+ *	The caller must hold RCU or RTNL.
  *	The returned device has not had its ref count increased
  *	and the caller must therefore be careful about locking
  *
@@ -1113,13 +1114,21 @@
 void dev_load(struct net *net, const char *name)
 {
 	struct net_device *dev;
+	int no_module;
 
 	rcu_read_lock();
 	dev = dev_get_by_name_rcu(net, name);
 	rcu_read_unlock();
 
-	if (!dev && capable(CAP_NET_ADMIN))
-		request_module("%s", name);
+	no_module = !dev;
+	if (no_module && capable(CAP_NET_ADMIN))
+		no_module = request_module("netdev-%s", name);
+	if (no_module && capable(CAP_SYS_MODULE)) {
+		if (!request_module("%s", name))
+			pr_err("Loading kernel module for a network device "
+"with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s "
+"instead\n", name);
+	}
 }
 EXPORT_SYMBOL(dev_load);
 
@@ -1279,10 +1288,13 @@
 
 static int __dev_close(struct net_device *dev)
 {
+	int retval;
 	LIST_HEAD(single);
 
 	list_add(&dev->unreg_list, &single);
-	return __dev_close_many(&single);
+	retval = __dev_close_many(&single);
+	list_del(&single);
+	return retval;
 }
 
 int dev_close_many(struct list_head *head)
@@ -1324,7 +1336,7 @@
 
 	list_add(&dev->unreg_list, &single);
 	dev_close_many(&single);
-
+	list_del(&single);
 	return 0;
 }
 EXPORT_SYMBOL(dev_close);
@@ -1732,33 +1744,6 @@
 }
 EXPORT_SYMBOL(netif_device_attach);
 
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
-{
-	return ((features & NETIF_F_NO_CSUM) ||
-		((features & NETIF_F_V4_CSUM) &&
-		 protocol == htons(ETH_P_IP)) ||
-		((features & NETIF_F_V6_CSUM) &&
-		 protocol == htons(ETH_P_IPV6)) ||
-		((features & NETIF_F_FCOE_CRC) &&
-		 protocol == htons(ETH_P_FCOE)));
-}
-
-static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
-{
-	__be16 protocol = skb->protocol;
-	int features = dev->features;
-
-	if (vlan_tx_tag_present(skb)) {
-		features &= dev->vlan_features;
-	} else if (protocol == htons(ETH_P_8021Q)) {
-		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-		protocol = veh->h_vlan_encapsulated_proto;
-		features &= dev->vlan_features;
-	}
-
-	return can_checksum_protocol(features, protocol);
-}
-
 /**
  * skb_dev_set -- assign a new device to a buffer
  * @skb: buffer for the new device
@@ -1971,16 +1956,14 @@
 /**
  *	dev_gso_segment - Perform emulated hardware segmentation on skb.
  *	@skb: buffer to segment
+ *	@features: device features as applicable to this skb
  *
  *	This function segments the given skb and stores the list of segments
  *	in skb->next.
  */
-static int dev_gso_segment(struct sk_buff *skb)
+static int dev_gso_segment(struct sk_buff *skb, int features)
 {
-	struct net_device *dev = skb->dev;
 	struct sk_buff *segs;
-	int features = dev->features & ~(illegal_highdma(dev, skb) ?
-					 NETIF_F_SG : 0);
 
 	segs = skb_gso_segment(skb, features);
 
@@ -2017,22 +2000,52 @@
 	}
 }
 
-int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
+static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+{
+	return ((features & NETIF_F_GEN_CSUM) ||
+		((features & NETIF_F_V4_CSUM) &&
+		 protocol == htons(ETH_P_IP)) ||
+		((features & NETIF_F_V6_CSUM) &&
+		 protocol == htons(ETH_P_IPV6)) ||
+		((features & NETIF_F_FCOE_CRC) &&
+		 protocol == htons(ETH_P_FCOE)));
+}
+
+static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+{
+	if (!can_checksum_protocol(features, protocol)) {
+		features &= ~NETIF_F_ALL_CSUM;
+		features &= ~NETIF_F_SG;
+	} else if (illegal_highdma(skb->dev, skb)) {
+		features &= ~NETIF_F_SG;
+	}
+
+	return features;
+}
+
+int netif_skb_features(struct sk_buff *skb)
 {
 	__be16 protocol = skb->protocol;
+	int features = skb->dev->features;
 
 	if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
 		protocol = veh->h_vlan_encapsulated_proto;
-	} else if (!skb->vlan_tci)
-		return dev->features;
+	} else if (!vlan_tx_tag_present(skb)) {
+		return harmonize_features(skb, protocol, features);
+	}
 
-	if (protocol != htons(ETH_P_8021Q))
-		return dev->features & dev->vlan_features;
-	else
-		return 0;
+	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
+
+	if (protocol != htons(ETH_P_8021Q)) {
+		return harmonize_features(skb, protocol, features);
+	} else {
+		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
+		return harmonize_features(skb, protocol, features);
+	}
 }
-EXPORT_SYMBOL(netif_get_vlan_features);
+EXPORT_SYMBOL(netif_skb_features);
 
 /*
  * Returns true if either:
@@ -2042,22 +2055,13 @@
  *	   support DMA from it.
  */
 static inline int skb_needs_linearize(struct sk_buff *skb,
-				      struct net_device *dev)
+				      int features)
 {
-	if (skb_is_nonlinear(skb)) {
-		int features = dev->features;
-
-		if (vlan_tx_tag_present(skb))
-			features &= dev->vlan_features;
-
-		return (skb_has_frag_list(skb) &&
-			!(features & NETIF_F_FRAGLIST)) ||
+	return skb_is_nonlinear(skb) &&
+			((skb_has_frag_list(skb) &&
+				!(features & NETIF_F_FRAGLIST)) ||
 			(skb_shinfo(skb)->nr_frags &&
-			(!(features & NETIF_F_SG) ||
-			illegal_highdma(dev, skb)));
-	}
-
-	return 0;
+				!(features & NETIF_F_SG)));
 }
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2067,6 +2071,8 @@
 	int rc = NETDEV_TX_OK;
 
 	if (likely(!skb->next)) {
+		int features;
+
 		/*
 		 * If device doesnt need skb->dst, release it right now while
 		 * its hot in this cpu cache
@@ -2079,8 +2085,10 @@
 
 		skb_orphan_try(skb);
 
+		features = netif_skb_features(skb);
+
 		if (vlan_tx_tag_present(skb) &&
-		    !(dev->features & NETIF_F_HW_VLAN_TX)) {
+		    !(features & NETIF_F_HW_VLAN_TX)) {
 			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
 			if (unlikely(!skb))
 				goto out;
@@ -2088,13 +2096,13 @@
 			skb->vlan_tci = 0;
 		}
 
-		if (netif_needs_gso(dev, skb)) {
-			if (unlikely(dev_gso_segment(skb)))
+		if (netif_needs_gso(skb, features)) {
+			if (unlikely(dev_gso_segment(skb, features)))
 				goto out_kfree_skb;
 			if (skb->next)
 				goto gso;
 		} else {
-			if (skb_needs_linearize(skb, dev) &&
+			if (skb_needs_linearize(skb, features) &&
 			    __skb_linearize(skb))
 				goto out_kfree_skb;
 
@@ -2105,7 +2113,7 @@
 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
 				skb_set_transport_header(skb,
 					skb_checksum_start_offset(skb));
-				if (!dev_can_checksum(dev, skb) &&
+				if (!(features & NETIF_F_ALL_CSUM) &&
 				     skb_checksum_help(skb))
 					goto out_kfree_skb;
 			}
@@ -2301,7 +2309,10 @@
 		 */
 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
 			skb_dst_force(skb);
-		__qdisc_update_bstats(q, skb->len);
+
+		qdisc_skb_cb(skb)->pkt_len = skb->len;
+		qdisc_bstats_update(q, skb);
+
 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
 			if (unlikely(contended)) {
 				spin_unlock(&q->busylock);
@@ -2563,7 +2574,8 @@
 
 	map = rcu_dereference(rxqueue->rps_map);
 	if (map) {
-		if (map->len == 1) {
+		if (map->len == 1 &&
+		    !rcu_dereference_raw(rxqueue->rps_flow_table)) {
 			tcpu = map->cpus[0];
 			if (cpu_online(tcpu))
 				cpu = tcpu;
@@ -3424,6 +3436,8 @@
 	__skb_pull(skb, skb_headlen(skb));
 	skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
 	skb->vlan_tci = 0;
+	skb->dev = napi->dev;
+	skb->skb_iif = 0;
 
 	napi->skb = skb;
 }
@@ -5060,6 +5074,7 @@
 
 	list_add(&dev->unreg_list, &single);
 	rollback_registered_many(&single);
+	list_del(&single);
 }
 
 unsigned long netdev_fix_features(unsigned long features, const char *name)
@@ -5524,34 +5539,6 @@
 	}
 }
 
-/**
- *	dev_txq_stats_fold - fold tx_queues stats
- *	@dev: device to get statistics from
- *	@stats: struct rtnl_link_stats64 to hold results
- */
-void dev_txq_stats_fold(const struct net_device *dev,
-			struct rtnl_link_stats64 *stats)
-{
-	u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
-	unsigned int i;
-	struct netdev_queue *txq;
-
-	for (i = 0; i < dev->num_tx_queues; i++) {
-		txq = netdev_get_tx_queue(dev, i);
-		spin_lock_bh(&txq->_xmit_lock);
-		tx_bytes   += txq->tx_bytes;
-		tx_packets += txq->tx_packets;
-		tx_dropped += txq->tx_dropped;
-		spin_unlock_bh(&txq->_xmit_lock);
-	}
-	if (tx_bytes || tx_packets || tx_dropped) {
-		stats->tx_bytes   = tx_bytes;
-		stats->tx_packets = tx_packets;
-		stats->tx_dropped = tx_dropped;
-	}
-}
-EXPORT_SYMBOL(dev_txq_stats_fold);
-
 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
  * fields in the same order, with only the type differing.
  */
@@ -5595,7 +5582,6 @@
 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
 	} else {
 		netdev_stats_to_stats64(storage, &dev->stats);
-		dev_txq_stats_fold(dev, storage);
 	}
 	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
 	return storage;
@@ -5621,18 +5607,20 @@
 }
 
 /**
- *	alloc_netdev_mq - allocate network device
+ *	alloc_netdev_mqs - allocate network device
  *	@sizeof_priv:	size of private data to allocate space for
  *	@name:		device name format string
  *	@setup:		callback to initialize device
- *	@queue_count:	the number of subqueues to allocate
+ *	@txqs:		the number of TX subqueues to allocate
+ *	@rxqs:		the number of RX subqueues to allocate
  *
  *	Allocates a struct net_device with private data area for driver use
  *	and performs basic initialization.  Also allocates subquue structs
- *	for each queue on the device at the end of the netdevice.
+ *	for each queue on the device.
  */
-struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
-		void (*setup)(struct net_device *), unsigned int queue_count)
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+		void (*setup)(struct net_device *),
+		unsigned int txqs, unsigned int rxqs)
 {
 	struct net_device *dev;
 	size_t alloc_size;
@@ -5640,12 +5628,20 @@
 
 	BUG_ON(strlen(name) >= sizeof(dev->name));
 
-	if (queue_count < 1) {
+	if (txqs < 1) {
 		pr_err("alloc_netdev: Unable to allocate device "
 		       "with zero queues.\n");
 		return NULL;
 	}
 
+#ifdef CONFIG_RPS
+	if (rxqs < 1) {
+		pr_err("alloc_netdev: Unable to allocate device "
+		       "with zero RX queues.\n");
+		return NULL;
+	}
+#endif
+
 	alloc_size = sizeof(struct net_device);
 	if (sizeof_priv) {
 		/* ensure 32-byte alignment of private area */
@@ -5676,18 +5672,6 @@
 
 	dev_net_set(dev, &init_net);
 
-	dev->num_tx_queues = queue_count;
-	dev->real_num_tx_queues = queue_count;
-	if (netif_alloc_netdev_queues(dev))
-		goto free_pcpu;
-
-#ifdef CONFIG_RPS
-	dev->num_rx_queues = queue_count;
-	dev->real_num_rx_queues = queue_count;
-	if (netif_alloc_rx_queues(dev))
-		goto free_pcpu;
-#endif
-
 	dev->gso_max_size = GSO_MAX_SIZE;
 
 	INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
@@ -5697,9 +5681,26 @@
 	INIT_LIST_HEAD(&dev->link_watch_list);
 	dev->priv_flags = IFF_XMIT_DST_RELEASE;
 	setup(dev);
+
+	dev->num_tx_queues = txqs;
+	dev->real_num_tx_queues = txqs;
+	if (netif_alloc_netdev_queues(dev))
+		goto free_all;
+
+#ifdef CONFIG_RPS
+	dev->num_rx_queues = rxqs;
+	dev->real_num_rx_queues = rxqs;
+	if (netif_alloc_rx_queues(dev))
+		goto free_all;
+#endif
+
 	strcpy(dev->name, name);
 	return dev;
 
+free_all:
+	free_netdev(dev);
+	return NULL;
+
 free_pcpu:
 	free_percpu(dev->pcpu_refcnt);
 	kfree(dev->_tx);
@@ -5711,7 +5712,7 @@
 	kfree(p);
 	return NULL;
 }
-EXPORT_SYMBOL(alloc_netdev_mq);
+EXPORT_SYMBOL(alloc_netdev_mqs);
 
 /**
  *	free_netdev - free network device
@@ -6209,7 +6210,7 @@
 static void __net_exit default_device_exit_batch(struct list_head *net_list)
 {
 	/* At exit all network devices most be removed from a network
-	 * namespace.  Do this in the reverse order of registeration.
+	 * namespace.  Do this in the reverse order of registration.
 	 * Do this across as many network namespaces as possible to
 	 * improve batching efficiency.
 	 */
@@ -6227,6 +6228,7 @@
 		}
 	}
 	unregister_netdevice_many(&dev_kill_list);
+	list_del(&dev_kill_list);
 	rtnl_unlock();
 }
 
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 508f9c1..133fd22 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -144,7 +144,7 @@
 
 	list_for_each_entry(ha, &from_list->list, list) {
 		type = addr_type ? addr_type : ha->type;
-		__hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+		__hw_addr_del(to_list, ha->addr, addr_len, type);
 	}
 }
 EXPORT_SYMBOL(__hw_addr_del_multiple);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 1774178..ff23029 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -817,7 +817,7 @@
 	if (regs.len > reglen)
 		regs.len = reglen;
 
-	regbuf = vmalloc(reglen);
+	regbuf = vzalloc(reglen);
 	if (!regbuf)
 		return -ENOMEM;
 
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b27d4e..afc5837 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -158,7 +158,7 @@
 /**
  *	sk_run_filter - run a filter on a socket
  *	@skb: buffer to run the filter on
- *	@filter: filter to apply
+ *	@fentry: filter to apply
  *
  * Decode and apply filter instructions to the skb->data.
  * Return length to keep, 0 for none. @skb is the data we are
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a9e7fc4..b5bada9 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3321,7 +3321,7 @@
 				    pkt_dev->started_at);
 	ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
 
-	p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n",
+	p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
 		     (unsigned long long)ktime_to_us(elapsed),
 		     (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
 		     (unsigned long long)ktime_to_us(idle),
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57..2d65c6b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1121,8 +1121,7 @@
 				return -EOPNOTSUPP;
 
 			if (af_ops->validate_link_af) {
-				err = af_ops->validate_link_af(dev,
-							tb[IFLA_AF_SPEC]);
+				err = af_ops->validate_link_af(dev, af);
 				if (err < 0)
 					return err;
 			}
@@ -1672,6 +1671,9 @@
 			snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
 
 		dest_net = rtnl_link_get_net(net, tb);
+		if (IS_ERR(dest_net))
+			return PTR_ERR(dest_net);
+
 		dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
 
 		if (IS_ERR(dev))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 19d6c21..d883dcc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -210,6 +210,7 @@
 	shinfo = skb_shinfo(skb);
 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 	atomic_set(&shinfo->dataref, 1);
+	kmemcheck_annotate_variable(shinfo->destructor_arg);
 
 	if (fclone) {
 		struct sk_buff *child = skb + 1;
@@ -380,6 +381,8 @@
 	}
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(skb->nfct);
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	nf_conntrack_put_reasm(skb->nfct_reasm);
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
@@ -2742,8 +2745,12 @@
 
 merge:
 	if (offset > headlen) {
-		skbinfo->frags[0].page_offset += offset - headlen;
-		skbinfo->frags[0].size -= offset - headlen;
+		unsigned int eat = offset - headlen;
+
+		skbinfo->frags[0].page_offset += eat;
+		skbinfo->frags[0].size -= eat;
+		skb->data_len -= eat;
+		skb->len -= eat;
 		offset = headlen;
 	}
 
diff --git a/net/core/sock.c b/net/core/sock.c
index a658aeb..7dfed79 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -157,7 +157,7 @@
   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
-  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
+  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
   "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
@@ -173,7 +173,7 @@
   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
-  "slock-AF_IEEE802154", "slock-AF_CAIF" ,
+  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
   "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
@@ -189,7 +189,7 @@
   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
-  "clock-AF_IEEE802154", "clock-AF_CAIF" ,
+  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
   "clock-AF_MAX"
 };
 
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d900ab9..c44348a 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -583,7 +583,7 @@
 	u8 up, idtype;
 	int ret = -EINVAL;
 
-	if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp)
+	if (!tb[DCB_ATTR_APP])
 		goto out;
 
 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
@@ -604,7 +604,16 @@
 		goto out;
 
 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
-	up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+
+	if (netdev->dcbnl_ops->getapp) {
+		up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+	} else {
+		struct dcb_app app = {
+					.selector = idtype,
+					.protocol = id,
+				     };
+		up = dcb_getapp(netdev, &app);
+	}
 
 	/* send this back */
 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -617,6 +626,9 @@
 	dcb->cmd = DCB_CMD_GAPP;
 
 	app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
+	if (!app_nest)
+		goto out_cancel;
+
 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
 	if (ret)
 		goto out_cancel;
@@ -1181,7 +1193,7 @@
 			goto err;
 	}
 
-	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) {
+	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
 		struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
 		err = ops->ieee_setpfc(netdev, pfc);
 		if (err)
@@ -1604,6 +1616,10 @@
 u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
 {
 	struct dcb_app_type *itr;
+	struct dcb_app_type event;
+
+	memcpy(&event.name, dev->name, sizeof(event.name));
+	memcpy(&event.app, new, sizeof(event.app));
 
 	spin_lock(&dcb_lock);
 	/* Search for existing match and replace */
@@ -1635,7 +1651,7 @@
 	}
 out:
 	spin_unlock(&dcb_lock);
-	call_dcbevent_notifiers(DCB_APP_EVENT, new);
+	call_dcbevent_notifiers(DCB_APP_EVENT, &event);
 	return 0;
 }
 EXPORT_SYMBOL(dcb_setapp);
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index ad6dffd..b75968a 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -49,7 +49,9 @@
 	what was just said, you don't need it: say N.
 
 	Documentation on how to use DCCP connection probing can be found
-	at http://linux-net.osdl.org/index.php/DccpProbe
+	at:
+	
+	  http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe
 
 	To compile this code as a module, choose M here: the
 	module will be called dccp_probe.
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 4508705..5fdb072 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -426,7 +426,8 @@
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 
-	dp->dccps_gsr = seq;
+	if (after48(seq, dp->dccps_gsr))
+		dp->dccps_gsr = seq;
 	/* Sequence validity window depends on remote Sequence Window (7.5.1) */
 	dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
 	/*
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 15af247..4222e7a 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -260,7 +260,7 @@
 		 */
 		if (time_before(now, (dp->dccps_rate_last +
 				      sysctl_dccp_sync_ratelimit)))
-			return 0;
+			return -1;
 
 		DCCP_WARN("Step 6 failed for %s packet, "
 			  "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
@@ -614,6 +614,9 @@
 		/* Caller (dccp_v4_do_rcv) will send Reset */
 		dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
 		return 1;
+	} else if (sk->sk_state == DCCP_CLOSED) {
+		dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
+		return 1;
 	}
 
 	if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@
 	}
 
 	switch (sk->sk_state) {
-	case DCCP_CLOSED:
-		dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
-		return 1;
-
 	case DCCP_REQUESTING:
 		queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
 		if (queued >= 0)
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 5639438..4234882 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -21,7 +21,8 @@
 /* Boundary values */
 static int		zero     = 0,
 			u8_max   = 0xFF;
-static unsigned long	seqw_min = 32;
+static unsigned long	seqw_min = DCCPF_SEQ_WMIN,
+			seqw_max = 0xFFFFFFFF;		/* maximum on 32 bit */
 
 static struct ctl_table dccp_default_table[] = {
 	{
@@ -31,6 +32,7 @@
 		.mode		= 0644,
 		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &seqw_min,		/* RFC 4340, 7.5.2 */
+		.extra2		= &seqw_max,
 	},
 	{
 		.procname	= "rx_ccid",
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 0ba1563..0dcaa90 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1130,7 +1130,7 @@
 /*
  * This processes a device up event. We only start up
  * the loopback device & ethernet devices with correct
- * MAC addreses automatically. Others must be started
+ * MAC addresses automatically. Others must be started
  * specifically.
  *
  * FIXME: How should we configure the loopback address ? If we could dispense
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 739435a..cfa7a5e 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -67,8 +67,9 @@
 	size_t result_len = 0;
 	const char *data = _data, *end, *opt;
 
-	kenter("%%%d,%s,'%s',%zu",
-	       key->serial, key->description, data, datalen);
+	kenter("%%%d,%s,'%*.*s',%zu",
+	       key->serial, key->description,
+	       (int)datalen, (int)datalen, data, datalen);
 
 	if (datalen <= 1 || !data || data[datalen - 1] != '\0')
 		return -EINVAL;
@@ -217,6 +218,19 @@
 		seq_printf(m, ": %u", key->datalen);
 }
 
+/*
+ * read the DNS data
+ * - the key's semaphore is read-locked
+ */
+static long dns_resolver_read(const struct key *key,
+			      char __user *buffer, size_t buflen)
+{
+	if (key->type_data.x[0])
+		return key->type_data.x[0];
+
+	return user_read(key, buffer, buflen);
+}
+
 struct key_type key_type_dns_resolver = {
 	.name		= "dns_resolver",
 	.instantiate	= dns_resolver_instantiate,
@@ -224,7 +238,7 @@
 	.revoke		= user_revoke,
 	.destroy	= user_destroy,
 	.describe	= dns_resolver_describe,
-	.read		= user_read,
+	.read		= dns_resolver_read,
 };
 
 static int __init init_dns_resolver(void)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0c877a7..3fb14b7 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -428,7 +428,7 @@
 }
 module_exit(dsa_cleanup_module);
 
-MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>")
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:dsa");
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 15dcc1a..0c28263 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -265,13 +265,13 @@
 static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 			  struct msghdr *msg, size_t len)
 {
-	struct sock *sk = sock->sk;
 	struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
 	struct net_device *dev;
 	struct ec_addr addr;
 	int err;
 	unsigned char port, cb;
 #if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
+	struct sock *sk = sock->sk;
 	struct sk_buff *skb;
 	struct ec_cb *eb;
 #endif
@@ -488,10 +488,10 @@
 
 error_free_buf:
 	vfree(userbuf);
+error:
 #else
 	err = -EPROTOTYPE;
 #endif
-	error:
 	mutex_unlock(&econet_mutex);
 
 	return err;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f00ef2f..44d2b42 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -347,10 +347,11 @@
 EXPORT_SYMBOL(ether_setup);
 
 /**
- * alloc_etherdev_mq - Allocates and sets up an Ethernet device
+ * alloc_etherdev_mqs - Allocates and sets up an Ethernet device
  * @sizeof_priv: Size of additional driver-private structure to be allocated
  *	for this Ethernet device
- * @queue_count: The number of queues this device has.
+ * @txqs: The number of TX queues this device has.
+ * @rxqs: The number of RX queues this device has.
  *
  * Fill in the fields of the device structure with Ethernet-generic
  * values. Basically does everything except registering the device.
@@ -360,11 +361,12 @@
  * this private data area.
  */
 
-struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+				      unsigned int rxqs)
 {
-	return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count);
+	return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
 }
-EXPORT_SYMBOL(alloc_etherdev_mq);
+EXPORT_SYMBOL(alloc_etherdev_mqs);
 
 static size_t _format_mac_addr(char *buf, int buflen,
 			       const unsigned char *addr, int len)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9e95d7f..a5a1050 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -432,7 +432,9 @@
 	---help---
 	  Support for INET (TCP, DCCP, etc) socket monitoring interface used by
 	  native Linux tools such as ss. ss is included in iproute2, currently
-	  downloadable at <http://linux-net.osdl.org/index.php/Iproute2>.
+	  downloadable at:
+	  
+	    http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2
 
 	  If unsure, say Y.
 
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f2b6110..45b89d7 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -880,6 +880,19 @@
 }
 EXPORT_SYMBOL(inet_ioctl);
 
+#ifdef CONFIG_COMPAT
+int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+	struct sock *sk = sock->sk;
+	int err = -ENOIOCTLCMD;
+
+	if (sk->sk_prot->compat_ioctl)
+		err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
+
+	return err;
+}
+#endif
+
 const struct proto_ops inet_stream_ops = {
 	.family		   = PF_INET,
 	.owner		   = THIS_MODULE,
@@ -903,6 +916,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_sock_common_setsockopt,
 	.compat_getsockopt = compat_sock_common_getsockopt,
+	.compat_ioctl	   = inet_compat_ioctl,
 #endif
 };
 EXPORT_SYMBOL(inet_stream_ops);
@@ -929,6 +943,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_sock_common_setsockopt,
 	.compat_getsockopt = compat_sock_common_getsockopt,
+	.compat_ioctl	   = inet_compat_ioctl,
 #endif
 };
 EXPORT_SYMBOL(inet_dgram_ops);
@@ -959,6 +974,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_sock_common_setsockopt,
 	.compat_getsockopt = compat_sock_common_getsockopt,
+	.compat_ioctl	   = inet_compat_ioctl,
 #endif
 };
 
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 880a5ec..86961be 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -314,14 +314,15 @@
 
 	skb->ip_summed = CHECKSUM_NONE;
 
-	ah = (struct ip_auth_hdr *)skb->data;
-	iph = ip_hdr(skb);
-	ihl = ip_hdrlen(skb);
 
 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
 		goto out;
 	nfrags = err;
 
+	ah = (struct ip_auth_hdr *)skb->data;
+	iph = ip_hdr(skb);
+	ihl = ip_hdrlen(skb);
+
 	work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
 	if (!work_iph)
 		goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a2fc7b9..7927589 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,13 @@
 		IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
 		return 0;
 	}
-	if (__in_dev_get_rcu(dev)) {
-		IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
+	if (__in_dev_get_rtnl(dev)) {
+		IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
 		return 0;
 	}
 	return -ENXIO;
 }
 
-/* must be called with rcu_read_lock() */
 static int arp_req_set_public(struct net *net, struct arpreq *r,
 		struct net_device *dev)
 {
@@ -1143,6 +1142,23 @@
 	return err;
 }
 
+int arp_invalidate(struct net_device *dev, __be32 ip)
+{
+	struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
+	int err = -ENXIO;
+
+	if (neigh) {
+		if (neigh->nud_state & ~NUD_NOARP)
+			err = neigh_update(neigh, NULL, NUD_FAILED,
+					   NEIGH_UPDATE_F_OVERRIDE|
+					   NEIGH_UPDATE_F_ADMIN);
+		neigh_release(neigh);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(arp_invalidate);
+
 static int arp_req_delete_public(struct net *net, struct arpreq *r,
 		struct net_device *dev)
 {
@@ -1163,7 +1179,6 @@
 {
 	int err;
 	__be32 ip;
-	struct neighbour *neigh;
 
 	if (r->arp_flags & ATF_PUBL)
 		return arp_req_delete_public(net, r, dev);
@@ -1181,16 +1196,7 @@
 		if (!dev)
 			return -EINVAL;
 	}
-	err = -ENXIO;
-	neigh = neigh_lookup(&arp_tbl, &ip, dev);
-	if (neigh) {
-		if (neigh->nud_state & ~NUD_NOARP)
-			err = neigh_update(neigh, NULL, NUD_FAILED,
-					   NEIGH_UPDATE_F_OVERRIDE|
-					   NEIGH_UPDATE_F_ADMIN);
-		neigh_release(neigh);
-	}
-	return err;
+	return arp_invalidate(dev, ip);
 }
 
 /*
@@ -1226,10 +1232,10 @@
 	if (!(r.arp_flags & ATF_NETMASK))
 		((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
 							   htonl(0xFFFFFFFFUL);
-	rcu_read_lock();
+	rtnl_lock();
 	if (r.arp_dev[0]) {
 		err = -ENODEV;
-		dev = dev_get_by_name_rcu(net, r.arp_dev);
+		dev = __dev_get_by_name(net, r.arp_dev);
 		if (dev == NULL)
 			goto out;
 
@@ -1256,7 +1262,7 @@
 		break;
 	}
 out:
-	rcu_read_unlock();
+	rtnl_unlock();
 	if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
 		err = -EFAULT;
 	return err;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 748cb5b..036652c 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -670,7 +670,7 @@
 			     ifap = &ifa->ifa_next) {
 				if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
 				    sin_orig.sin_addr.s_addr ==
-							ifa->ifa_address) {
+							ifa->ifa_local) {
 					break; /* found */
 				}
 			}
@@ -1030,6 +1030,21 @@
 	return mtu >= 68;
 }
 
+static void inetdev_send_gratuitous_arp(struct net_device *dev,
+					struct in_device *in_dev)
+
+{
+	struct in_ifaddr *ifa = in_dev->ifa_list;
+
+	if (!ifa)
+		return;
+
+	arp_send(ARPOP_REQUEST, ETH_P_ARP,
+		 ifa->ifa_local, dev,
+		 ifa->ifa_local, NULL,
+		 dev->dev_addr, NULL);
+}
+
 /* Called only under RTNL semaphore */
 
 static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1082,18 +1097,13 @@
 		}
 		ip_mc_up(in_dev);
 		/* fall through */
-	case NETDEV_NOTIFY_PEERS:
 	case NETDEV_CHANGEADDR:
+		if (!IN_DEV_ARP_NOTIFY(in_dev))
+			break;
+		/* fall through */
+	case NETDEV_NOTIFY_PEERS:
 		/* Send gratuitous ARP to notify of link change */
-		if (IN_DEV_ARP_NOTIFY(in_dev)) {
-			struct in_ifaddr *ifa = in_dev->ifa_list;
-
-			if (ifa)
-				arp_send(ARPOP_REQUEST, ETH_P_ARP,
-					 ifa->ifa_address, dev,
-					 ifa->ifa_address, NULL,
-					 dev->dev_addr, NULL);
-		}
+		inetdev_send_gratuitous_arp(dev, in_dev);
 		break;
 	case NETDEV_DOWN:
 		ip_mc_down(in_dev);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 25e3181..97e5fb7 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@
 		     !sk2->sk_bound_dev_if ||
 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
 			if (!reuse || !sk2->sk_reuse ||
-			    sk2->sk_state == TCP_LISTEN) {
+			    ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
 				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
 				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
 				    sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,7 +122,8 @@
 					    (tb->num_owners < smallest_size || smallest_size == -1)) {
 						smallest_size = tb->num_owners;
 						smallest_rover = rover;
-						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
+						    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
 							spin_unlock(&head->lock);
 							snum = smallest_rover;
 							goto have_snum;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c5af909..3c8dfa1 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -505,7 +505,9 @@
 			}
 
 			rcu_read_unlock();
+			local_bh_disable();
 			inet_twsk_deschedule(tw, twdr);
+			local_bh_enable();
 			inet_twsk_put(tw);
 			goto restart_rcu;
 		}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d9bc857..a96e656 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -475,7 +475,7 @@
 struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
 {
 	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
-	struct inet_peer_base *base = family_to_base(AF_INET);
+	struct inet_peer_base *base = family_to_base(daddr->family);
 	struct inet_peer *p;
 
 	/* Look up for the address quickly, lockless.
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index eb68a0e..d1d0e2c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -775,6 +775,7 @@
 			.fl4_dst = dst,
 			.fl4_src = tiph->saddr,
 			.fl4_tos = RT_TOS(tos),
+			.proto = IPPROTO_GRE,
 			.fl_gre_key = tunnel->parms.o_key
 		};
 		if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
@@ -1764,4 +1765,4 @@
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_RTNL_LINK("gre");
 MODULE_ALIAS_RTNL_LINK("gretap");
-MODULE_ALIAS("gre0");
+MODULE_ALIAS_NETDEV("gre0");
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 988f52f..a5f58e7 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -913,4 +913,4 @@
 module_init(ipip_init);
 module_exit(ipip_fini);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("tunl0");
+MODULE_ALIAS_NETDEV("tunl0");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3f3a9af..8b65a12 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -60,6 +60,7 @@
 #include <linux/notifier.h>
 #include <linux/if_arp.h>
 #include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
 #include <net/ipip.h>
 #include <net/checksum.h>
 #include <net/netlink.h>
@@ -1434,6 +1435,81 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req {
+	struct in_addr src;
+	struct in_addr grp;
+	compat_ulong_t pktcnt;
+	compat_ulong_t bytecnt;
+	compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_vif_req {
+	vifi_t	vifi;		/* Which iface */
+	compat_ulong_t icount;
+	compat_ulong_t ocount;
+	compat_ulong_t ibytes;
+	compat_ulong_t obytes;
+};
+
+int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+	struct compat_sioc_sg_req sr;
+	struct compat_sioc_vif_req vr;
+	struct vif_device *vif;
+	struct mfc_cache *c;
+	struct net *net = sock_net(sk);
+	struct mr_table *mrt;
+
+	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+	if (mrt == NULL)
+		return -ENOENT;
+
+	switch (cmd) {
+	case SIOCGETVIFCNT:
+		if (copy_from_user(&vr, arg, sizeof(vr)))
+			return -EFAULT;
+		if (vr.vifi >= mrt->maxvif)
+			return -EINVAL;
+		read_lock(&mrt_lock);
+		vif = &mrt->vif_table[vr.vifi];
+		if (VIF_EXISTS(mrt, vr.vifi)) {
+			vr.icount = vif->pkt_in;
+			vr.ocount = vif->pkt_out;
+			vr.ibytes = vif->bytes_in;
+			vr.obytes = vif->bytes_out;
+			read_unlock(&mrt_lock);
+
+			if (copy_to_user(arg, &vr, sizeof(vr)))
+				return -EFAULT;
+			return 0;
+		}
+		read_unlock(&mrt_lock);
+		return -EADDRNOTAVAIL;
+	case SIOCGETSGCNT:
+		if (copy_from_user(&sr, arg, sizeof(sr)))
+			return -EFAULT;
+
+		rcu_read_lock();
+		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+		if (c) {
+			sr.pktcnt = c->mfc_un.res.pkt;
+			sr.bytecnt = c->mfc_un.res.bytes;
+			sr.wrong_if = c->mfc_un.res.wrong_if;
+			rcu_read_unlock();
+
+			if (copy_to_user(arg, &sr, sizeof(sr)))
+				return -EFAULT;
+			return 0;
+		}
+		rcu_read_unlock();
+		return -EADDRNOTAVAIL;
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+#endif
+
 
 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3fac340..e855fff 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -710,42 +710,25 @@
 	struct arpt_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu = get_cpu();
-
-	/* Instead of clearing (by a previous call to memset())
-	 * the counters and using adds, we set the counters
-	 * with data used by 'current' CPU
-	 *
-	 * Bottom half has to be disabled to prevent deadlock
-	 * if new softirq were to run and call ipt_do_table
-	 */
-	local_bh_disable();
-	i = 0;
-	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
-		SET_COUNTER(counters[i], iter->counters.bcnt,
-			    iter->counters.pcnt);
-		++i;
-	}
-	local_bh_enable();
-	/* Processing counters from other cpus, we can let bottom half enabled,
-	 * (preemption is disabled)
-	 */
 
 	for_each_possible_cpu(cpu) {
-		if (cpu == curcpu)
-			continue;
+		seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
 		i = 0;
-		local_bh_disable();
-		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
-			ADD_COUNTER(counters[i], iter->counters.bcnt,
-				    iter->counters.pcnt);
+			u64 bcnt, pcnt;
+			unsigned int start;
+
+			do {
+				start = read_seqbegin(lock);
+				bcnt = iter->counters.bcnt;
+				pcnt = iter->counters.pcnt;
+			} while (read_seqretry(lock, start));
+
+			ADD_COUNTER(counters[i], bcnt, pcnt);
 			++i;
 		}
-		xt_info_wrunlock(cpu);
-		local_bh_enable();
 	}
-	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -759,7 +742,7 @@
 	 * about).
 	 */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc(countersize);
+	counters = vzalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1007,7 +990,7 @@
 	struct arpt_entry *iter;
 
 	ret = 0;
-	counters = vmalloc(num_counters * sizeof(struct xt_counters));
+	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index b8ddcc4..a5e52a9 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -60,12 +60,12 @@
 
 	if (mangle->flags & ~ARPT_MANGLE_MASK ||
 	    !(mangle->flags & ARPT_MANGLE_MASK))
-		return false;
+		return -EINVAL;
 
 	if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT &&
 	   mangle->target != XT_CONTINUE)
-		return false;
-	return true;
+		return -EINVAL;
+	return 0;
 }
 
 static struct xt_target arpt_mangle_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index a846d63..652efea 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -884,42 +884,25 @@
 	struct ipt_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu = get_cpu();
-
-	/* Instead of clearing (by a previous call to memset())
-	 * the counters and using adds, we set the counters
-	 * with data used by 'current' CPU.
-	 *
-	 * Bottom half has to be disabled to prevent deadlock
-	 * if new softirq were to run and call ipt_do_table
-	 */
-	local_bh_disable();
-	i = 0;
-	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
-		SET_COUNTER(counters[i], iter->counters.bcnt,
-			    iter->counters.pcnt);
-		++i;
-	}
-	local_bh_enable();
-	/* Processing counters from other cpus, we can let bottom half enabled,
-	 * (preemption is disabled)
-	 */
 
 	for_each_possible_cpu(cpu) {
-		if (cpu == curcpu)
-			continue;
+		seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
 		i = 0;
-		local_bh_disable();
-		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
-			ADD_COUNTER(counters[i], iter->counters.bcnt,
-				    iter->counters.pcnt);
+			u64 bcnt, pcnt;
+			unsigned int start;
+
+			do {
+				start = read_seqbegin(lock);
+				bcnt = iter->counters.bcnt;
+				pcnt = iter->counters.pcnt;
+			} while (read_seqretry(lock, start));
+
+			ADD_COUNTER(counters[i], bcnt, pcnt);
 			++i; /* macro does multi eval of i */
 		}
-		xt_info_wrunlock(cpu);
-		local_bh_enable();
 	}
-	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -932,7 +915,7 @@
 	   (other than comefrom, which userspace doesn't care
 	   about). */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc(countersize);
+	counters = vzalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1203,7 +1186,7 @@
 	struct ipt_entry *iter;
 
 	ret = 0;
-	counters = vmalloc(num_counters * sizeof(struct xt_counters));
+	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a3d5ab7..6390ba2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -76,6 +76,7 @@
 #include <linux/seq_file.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
 
 static struct raw_hashinfo raw_v4_hashinfo = {
 	.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
@@ -838,6 +839,23 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCOUTQ:
+	case SIOCINQ:
+		return -ENOIOCTLCMD;
+	default:
+#ifdef CONFIG_IP_MROUTE
+		return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+		return -ENOIOCTLCMD;
+#endif
+	}
+}
+#endif
+
 struct proto raw_prot = {
 	.name		   = "RAW",
 	.owner		   = THIS_MODULE,
@@ -860,6 +878,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_raw_setsockopt,
 	.compat_getsockopt = compat_raw_getsockopt,
+	.compat_ioctl	   = compat_raw_ioctl,
 #endif
 };
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 351dc4e..6ed6603 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2707,6 +2707,11 @@
 	return NULL;
 }
 
+static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+{
+	return 0;
+}
+
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
@@ -2716,6 +2721,8 @@
 	.protocol		=	cpu_to_be16(ETH_P_IP),
 	.destroy		=	ipv4_dst_destroy,
 	.check			=	ipv4_blackhole_dst_check,
+	.default_mtu		=	ipv4_blackhole_default_mtu,
+	.default_advmss		=	ipv4_default_advmss,
 	.update_pmtu		=	ipv4_rt_blackhole_update_pmtu,
 };
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2549b29..65f6c04 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1222,7 +1222,7 @@
 	}
 
 	/* D-SACK for already forgotten data... Do dumb counting. */
-	if (dup_sack &&
+	if (dup_sack && tp->undo_marker && tp->undo_retrans &&
 	    !after(end_seq_0, prior_snd_una) &&
 	    after(end_seq_0, tp->undo_marker))
 		tp->undo_retrans--;
@@ -1299,7 +1299,8 @@
 
 	/* Account D-SACK for retransmitted packet. */
 	if (dup_sack && (sacked & TCPCB_RETRANS)) {
-		if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
+		if (tp->undo_marker && tp->undo_retrans &&
+		    after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
 			tp->undo_retrans--;
 		if (sacked & TCPCB_SACKED_ACKED)
 			state->reord = min(fack_count, state->reord);
@@ -4399,7 +4400,7 @@
 			if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
 				tp->ucopy.len -= chunk;
 				tp->copied_seq += chunk;
-				eaten = (chunk == skb->len && !th->fin);
+				eaten = (chunk == skb->len);
 				tcp_rcv_space_adjust(sk);
 			}
 			local_bh_disable();
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 856f684..02f583b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1994,7 +1994,6 @@
 				}
 				req = req->dl_next;
 			}
-			st->offset = 0;
 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
 				break;
 get_req:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dc7c096..dfa5beb 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1350,7 +1350,7 @@
 	return 0;
 }
 
-/* Intialize TSO state of a skb.
+/* Initialize TSO state of a skb.
  * This must be invoked the first time we consider transmitting
  * SKB onto the wire.
  */
@@ -2162,7 +2162,7 @@
 		if (!tp->retrans_stamp)
 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
 
-		tp->undo_retrans++;
+		tp->undo_retrans += tcp_skb_pcount(skb);
 
 		/* snd_nxt is stored to detect loss of retransmitted segment,
 		 * see tcp_input.c tcp_sacktag_write_queue().
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 5b189c9..fd6782e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -420,9 +420,6 @@
 	    dev->type == ARPHRD_TUNNEL6 ||
 	    dev->type == ARPHRD_SIT ||
 	    dev->type == ARPHRD_NONE) {
-		printk(KERN_INFO
-		       "%s: Disabled Privacy Extensions\n",
-		       dev->name);
 		ndev->cnf.use_tempaddr = -1;
 	} else {
 		in6_dev_hold(ndev);
@@ -2664,14 +2661,12 @@
 	struct net *net = dev_net(dev);
 	struct inet6_dev *idev;
 	struct inet6_ifaddr *ifa;
-	LIST_HEAD(keep_list);
-	int state;
+	int state, i;
 
 	ASSERT_RTNL();
 
-	/* Flush routes if device is being removed or it is not loopback */
-	if (how || !(dev->flags & IFF_LOOPBACK))
-		rt6_ifdown(net, dev);
+	rt6_ifdown(net, dev);
+	neigh_ifdown(&nd_tbl, dev);
 
 	idev = __in6_dev_get(dev);
 	if (idev == NULL)
@@ -2692,6 +2687,23 @@
 
 	}
 
+	/* Step 2: clear hash table */
+	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
+		struct hlist_head *h = &inet6_addr_lst[i];
+		struct hlist_node *n;
+
+		spin_lock_bh(&addrconf_hash_lock);
+	restart:
+		hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
+			if (ifa->idev == idev) {
+				hlist_del_init_rcu(&ifa->addr_lst);
+				addrconf_del_timer(ifa);
+				goto restart;
+			}
+		}
+		spin_unlock_bh(&addrconf_hash_lock);
+	}
+
 	write_lock_bh(&idev->lock);
 
 	/* Step 2: clear flags for stateless addrconf */
@@ -2725,52 +2737,23 @@
 				       struct inet6_ifaddr, if_list);
 		addrconf_del_timer(ifa);
 
-		/* If just doing link down, and address is permanent
-		   and not link-local, then retain it. */
-		if (!how &&
-		    (ifa->flags&IFA_F_PERMANENT) &&
-		    !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
-			list_move_tail(&ifa->if_list, &keep_list);
+		list_del(&ifa->if_list);
 
-			/* If not doing DAD on this address, just keep it. */
-			if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
-			    idev->cnf.accept_dad <= 0 ||
-			    (ifa->flags & IFA_F_NODAD))
-				continue;
+		write_unlock_bh(&idev->lock);
 
-			/* If it was tentative already, no need to notify */
-			if (ifa->flags & IFA_F_TENTATIVE)
-				continue;
+		spin_lock_bh(&ifa->state_lock);
+		state = ifa->state;
+		ifa->state = INET6_IFADDR_STATE_DEAD;
+		spin_unlock_bh(&ifa->state_lock);
 
-			/* Flag it for later restoration when link comes up */
-			ifa->flags |= IFA_F_TENTATIVE;
-			ifa->state = INET6_IFADDR_STATE_DAD;
-		} else {
-			list_del(&ifa->if_list);
-
-			/* clear hash table */
-			spin_lock_bh(&addrconf_hash_lock);
-			hlist_del_init_rcu(&ifa->addr_lst);
-			spin_unlock_bh(&addrconf_hash_lock);
-
-			write_unlock_bh(&idev->lock);
-			spin_lock_bh(&ifa->state_lock);
-			state = ifa->state;
-			ifa->state = INET6_IFADDR_STATE_DEAD;
-			spin_unlock_bh(&ifa->state_lock);
-
-			if (state != INET6_IFADDR_STATE_DEAD) {
-				__ipv6_ifa_notify(RTM_DELADDR, ifa);
-				atomic_notifier_call_chain(&inet6addr_chain,
-							   NETDEV_DOWN, ifa);
-			}
-
-			in6_ifa_put(ifa);
-			write_lock_bh(&idev->lock);
+		if (state != INET6_IFADDR_STATE_DEAD) {
+			__ipv6_ifa_notify(RTM_DELADDR, ifa);
+			atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
 		}
-	}
+		in6_ifa_put(ifa);
 
-	list_splice(&keep_list, &idev->addr_list);
+		write_lock_bh(&idev->lock);
+	}
 
 	write_unlock_bh(&idev->lock);
 
@@ -4159,8 +4142,7 @@
 		addrconf_leave_solict(ifp->idev, &ifp->addr);
 		dst_hold(&ifp->rt->dst);
 
-		if (ifp->state == INET6_IFADDR_STATE_DEAD &&
-		    ip6_del_rt(ifp->rt))
+		if (ip6_del_rt(ifp->rt))
 			dst_free(&ifp->rt->dst);
 		break;
 	}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 059a3de..978e80e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -300,7 +300,7 @@
 			goto out;
 		}
 
-		/* Reproduce AF_INET checks to make the bindings consitant */
+		/* Reproduce AF_INET checks to make the bindings consistent */
 		v4addr = addr->sin6_addr.s6_addr32[3];
 		chk_addr_ret = inet_addr_type(net, v4addr);
 		if (!sysctl_ip_nonlocal_bind &&
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ee82d4e..1aba54a 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -538,14 +538,16 @@
 	if (!pskb_may_pull(skb, ah_hlen))
 		goto out;
 
-	ip6h = ipv6_hdr(skb);
-
-	skb_push(skb, hdr_len);
 
 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
 		goto out;
 	nfrags = err;
 
+	ah = (struct ip_auth_hdr *)skb->data;
+	ip6h = ipv6_hdr(skb);
+
+	skb_push(skb, hdr_len);
+
 	work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
 	if (!work_iph)
 		goto out;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e46305d..d144e62 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@
 		     !sk2->sk_bound_dev_if ||
 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
 		    (!sk->sk_reuse || !sk2->sk_reuse ||
-		     sk2->sk_state == TCP_LISTEN) &&
+		     ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
 		     ipv6_rcv_saddr_equal(sk, sk2))
 			break;
 	}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 94b5bf1..5f8d242 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -401,6 +401,9 @@
 		goto drop;
 	}
 
+	if (skb->pkt_type != PACKET_HOST)
+		goto drop;
+
 	skb_forward_csum(skb);
 
 	/*
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4f4483e..e528a42 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -57,6 +57,7 @@
 MODULE_AUTHOR("Ville Nuorvala");
 MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETDEV("ip6tnl0");
 
 #ifdef IP6_TNL_DEBUG
 #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9fab274..0e1d53b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -34,6 +34,7 @@
 #include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/compat.h>
 #include <net/protocol.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
@@ -1804,6 +1805,80 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req6 {
+	struct sockaddr_in6 src;
+	struct sockaddr_in6 grp;
+	compat_ulong_t pktcnt;
+	compat_ulong_t bytecnt;
+	compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_mif_req6 {
+	mifi_t	mifi;
+	compat_ulong_t icount;
+	compat_ulong_t ocount;
+	compat_ulong_t ibytes;
+	compat_ulong_t obytes;
+};
+
+int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+	struct compat_sioc_sg_req6 sr;
+	struct compat_sioc_mif_req6 vr;
+	struct mif_device *vif;
+	struct mfc6_cache *c;
+	struct net *net = sock_net(sk);
+	struct mr6_table *mrt;
+
+	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
+	if (mrt == NULL)
+		return -ENOENT;
+
+	switch (cmd) {
+	case SIOCGETMIFCNT_IN6:
+		if (copy_from_user(&vr, arg, sizeof(vr)))
+			return -EFAULT;
+		if (vr.mifi >= mrt->maxvif)
+			return -EINVAL;
+		read_lock(&mrt_lock);
+		vif = &mrt->vif6_table[vr.mifi];
+		if (MIF_EXISTS(mrt, vr.mifi)) {
+			vr.icount = vif->pkt_in;
+			vr.ocount = vif->pkt_out;
+			vr.ibytes = vif->bytes_in;
+			vr.obytes = vif->bytes_out;
+			read_unlock(&mrt_lock);
+
+			if (copy_to_user(arg, &vr, sizeof(vr)))
+				return -EFAULT;
+			return 0;
+		}
+		read_unlock(&mrt_lock);
+		return -EADDRNOTAVAIL;
+	case SIOCGETSGCNT_IN6:
+		if (copy_from_user(&sr, arg, sizeof(sr)))
+			return -EFAULT;
+
+		read_lock(&mrt_lock);
+		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
+		if (c) {
+			sr.pktcnt = c->mfc_un.res.pkt;
+			sr.bytecnt = c->mfc_un.res.bytes;
+			sr.wrong_if = c->mfc_un.res.wrong_if;
+			read_unlock(&mrt_lock);
+
+			if (copy_to_user(arg, &sr, sizeof(sr)))
+				return -EFAULT;
+			return 0;
+		}
+		read_unlock(&mrt_lock);
+		return -EADDRNOTAVAIL;
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+#endif
 
 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
 {
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 4555823..7d227c6 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -897,42 +897,25 @@
 	struct ip6t_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu = get_cpu();
-
-	/* Instead of clearing (by a previous call to memset())
-	 * the counters and using adds, we set the counters
-	 * with data used by 'current' CPU
-	 *
-	 * Bottom half has to be disabled to prevent deadlock
-	 * if new softirq were to run and call ipt_do_table
-	 */
-	local_bh_disable();
-	i = 0;
-	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
-		SET_COUNTER(counters[i], iter->counters.bcnt,
-			    iter->counters.pcnt);
-		++i;
-	}
-	local_bh_enable();
-	/* Processing counters from other cpus, we can let bottom half enabled,
-	 * (preemption is disabled)
-	 */
 
 	for_each_possible_cpu(cpu) {
-		if (cpu == curcpu)
-			continue;
+		seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
 		i = 0;
-		local_bh_disable();
-		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
-			ADD_COUNTER(counters[i], iter->counters.bcnt,
-				    iter->counters.pcnt);
+			u64 bcnt, pcnt;
+			unsigned int start;
+
+			do {
+				start = read_seqbegin(lock);
+				bcnt = iter->counters.bcnt;
+				pcnt = iter->counters.pcnt;
+			} while (read_seqretry(lock, start));
+
+			ADD_COUNTER(counters[i], bcnt, pcnt);
 			++i;
 		}
-		xt_info_wrunlock(cpu);
-		local_bh_enable();
 	}
-	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -945,7 +928,7 @@
 	   (other than comefrom, which userspace doesn't care
 	   about). */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc(countersize);
+	counters = vzalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1216,7 +1199,7 @@
 	struct ip6t_entry *iter;
 
 	ret = 0;
-	counters = vmalloc(num_counters * sizeof(struct xt_counters));
+	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 09c8889..de33803 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -410,7 +410,7 @@
 		if (p != NULL) {
 			sb_add(m, "%02x", *p++);
 			for (i = 1; i < len; i++)
-				sb_add(m, ":%02x", p[i]);
+				sb_add(m, ":%02x", *p++);
 		}
 		sb_add(m, " ");
 
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 99abfb5..97c5b21 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -19,13 +19,15 @@
 
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_bridge.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#endif
+#include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
@@ -33,8 +35,10 @@
 {
 	u16 zone = NF_CT_DEFAULT_ZONE;
 
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	if (skb->nfct)
 		zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+#endif
 
 #ifdef CONFIG_BRIDGE_NETFILTER
 	if (skb->nf_bridge &&
@@ -56,9 +60,11 @@
 {
 	struct sk_buff *reasm;
 
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	/* Previously seen (loopback)?	*/
 	if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
 		return NF_ACCEPT;
+#endif
 
 	reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
 	/* queued */
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 86c3952..c5b0915 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -31,6 +31,7 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 #include <linux/skbuff.h>
+#include <linux/compat.h>
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
 
@@ -1157,6 +1158,23 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCOUTQ:
+	case SIOCINQ:
+		return -ENOIOCTLCMD;
+	default:
+#ifdef CONFIG_IPV6_MROUTE
+		return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+		return -ENOIOCTLCMD;
+#endif
+	}
+}
+#endif
+
 static void rawv6_close(struct sock *sk, long timeout)
 {
 	if (inet_sk(sk)->inet_num == IPPROTO_RAW)
@@ -1215,6 +1233,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_rawv6_setsockopt,
 	.compat_getsockopt = compat_rawv6_getsockopt,
+	.compat_ioctl	   = compat_rawv6_ioctl,
 #endif
 };
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 373bd04..e7db701 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -72,8 +72,6 @@
 #define RT6_TRACE(x...) do { ; } while (0)
 #endif
 
-#define CLONE_OFFLINK_ROUTE 0
-
 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
@@ -115,6 +113,11 @@
 	.local_out		=	__ip6_local_out,
 };
 
+static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+{
+	return 0;
+}
+
 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
@@ -124,6 +127,8 @@
 	.protocol		=	cpu_to_be16(ETH_P_IPV6),
 	.destroy		=	ip6_dst_destroy,
 	.check			=	ip6_dst_check,
+	.default_mtu		=	ip6_blackhole_default_mtu,
+	.default_advmss		=	ip6_default_advmss,
 	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
 };
 
@@ -196,7 +201,6 @@
 		in6_dev_put(idev);
 	}
 	if (peer) {
-		BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
 		rt->rt6i_peer = NULL;
 		inet_putpeer(peer);
 	}
@@ -206,9 +210,6 @@
 {
 	struct inet_peer *peer;
 
-	if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
-		return;
-
 	peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
 	if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
 		inet_putpeer(peer);
@@ -738,13 +739,10 @@
 
 	if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
 		nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
-	else {
-#if CLONE_OFFLINK_ROUTE
+	else if (!(rt->dst.flags & DST_HOST))
 		nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
-#else
+	else
 		goto out2;
-#endif
-	}
 
 	dst_release(&rt->dst);
 	rt = nrt ? : net->ipv6.ip6_null_entry;
@@ -2561,14 +2559,16 @@
 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
 			      void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct net *net = current->nsproxy->net_ns;
-	int delay = net->ipv6.sysctl.flush_delay;
-	if (write) {
-		proc_dointvec(ctl, write, buffer, lenp, ppos);
-		fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
-		return 0;
-	} else
+	struct net *net;
+	int delay;
+	if (!write)
 		return -EINVAL;
+
+	net = (struct net *)ctl->extra1;
+	delay = net->ipv6.sysctl.flush_delay;
+	proc_dointvec(ctl, write, buffer, lenp, ppos);
+	fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+	return 0;
 }
 
 ctl_table ipv6_route_table_template[] = {
@@ -2655,6 +2655,7 @@
 
 	if (table) {
 		table[0].data = &net->ipv6.sysctl.flush_delay;
+		table[0].extra1 = net;
 		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
 		table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
 		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8ce38f1..d2c16e1 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1290,4 +1290,4 @@
 module_init(sit_init);
 module_exit(sit_cleanup);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("sit0");
+MODULE_ALIAS_NETDEV("sit0");
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index fa1d8f4..7cb65ef 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -15,6 +15,8 @@
 #include <net/addrconf.h>
 #include <net/inet_frag.h>
 
+static struct ctl_table empty[1];
+
 static ctl_table ipv6_table_template[] = {
 	{
 		.procname	= "route",
@@ -35,6 +37,12 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec
 	},
+	{
+		.procname	= "neigh",
+		.maxlen		= 0,
+		.mode		= 0555,
+		.child		= empty,
+	},
 	{ }
 };
 
@@ -152,7 +160,6 @@
 
 int ipv6_static_sysctl_register(void)
 {
-	static struct ctl_table empty[1];
 	ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
 	if (ip6_base == NULL)
 		return -ENOMEM;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7e74023..da87428 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -98,6 +98,10 @@
 	if (!xdst->u.rt6.rt6i_idev)
 		return -ENODEV;
 
+	xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
+	if (rt->rt6i_peer)
+		atomic_inc(&rt->rt6i_peer->refcnt);
+
 	/* Sheit... I remember I did this right. Apparently,
 	 * it was magically lost, so this code needs audit */
 	xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
@@ -216,6 +220,8 @@
 
 	if (likely(xdst->u.rt6.rt6i_idev))
 		in6_dev_put(xdst->u.rt6.rt6i_idev);
+	if (likely(xdst->u.rt6.rt6i_peer))
+		inet_putpeer(xdst->u.rt6.rt6i_peer);
 	xfrm_dst_destroy(xdst);
 }
 
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 9109262..c766056 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -20,7 +20,7 @@
 	def_bool n
 
 config MAC80211_RC_PID
-	bool "PID controller based rate control algorithm" if EMBEDDED
+	bool "PID controller based rate control algorithm" if EXPERT
 	select MAC80211_HAS_RC
 	---help---
 	  This option enables a TX rate control algorithm for
@@ -28,14 +28,14 @@
 	  rate.
 
 config MAC80211_RC_MINSTREL
-	bool "Minstrel" if EMBEDDED
+	bool "Minstrel" if EXPERT
 	select MAC80211_HAS_RC
 	default y
 	---help---
 	  This option enables the 'minstrel' TX rate control algorithm
 
 config MAC80211_RC_MINSTREL_HT
-	bool "Minstrel 802.11n support" if EMBEDDED
+	bool "Minstrel 802.11n support" if EXPERT
 	depends on MAC80211_RC_MINSTREL
 	default y
 	---help---
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index f138b19..227ca82 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -185,8 +185,6 @@
 				     struct ieee80211_mgmt *mgmt,
 				     size_t len)
 {
-	struct ieee80211_hw *hw = &local->hw;
-	struct ieee80211_conf *conf = &hw->conf;
 	struct tid_ampdu_rx *tid_agg_rx;
 	u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
 	u8 dialog_token;
@@ -231,13 +229,8 @@
 		goto end_no_lock;
 	}
 	/* determine default buffer size */
-	if (buf_size == 0) {
-		struct ieee80211_supported_band *sband;
-
-		sband = local->hw.wiphy->bands[conf->channel->band];
-		buf_size = IEEE80211_MIN_AMPDU_BUF;
-		buf_size = buf_size << sband->ht_cap.ampdu_factor;
-	}
+	if (buf_size == 0)
+		buf_size = IEEE80211_MAX_AMPDU_BUF;
 
 
 	/* examine state machine */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4bc8a92..9cd73b1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1822,6 +1822,7 @@
 		*cookie ^= 2;
 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
 		local->hw_roc_skb = skb;
+		local->hw_roc_skb_for_status = skb;
 		mutex_unlock(&local->mtx);
 
 		return 0;
@@ -1875,6 +1876,7 @@
 		if (ret == 0) {
 			kfree_skb(local->hw_roc_skb);
 			local->hw_roc_skb = NULL;
+			local->hw_roc_skb_for_status = NULL;
 		}
 
 		mutex_unlock(&local->mtx);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c47d7c0..533fd32 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -953,7 +953,7 @@
 
 	struct ieee80211_channel *hw_roc_channel;
 	struct net_device *hw_roc_dev;
-	struct sk_buff *hw_roc_skb;
+	struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status;
 	struct work_struct hw_roc_start, hw_roc_done;
 	enum nl80211_channel_type hw_roc_channel_type;
 	unsigned int hw_roc_duration;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8acba45..7a10a8d 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1229,6 +1229,7 @@
 	}
 	mutex_unlock(&local->iflist_mtx);
 	unregister_netdevice_many(&unreg_list);
+	list_del(&unreg_list);
 }
 
 static u32 ieee80211_idle_off(struct ieee80211_local *local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 485d36b..a46ff06 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -39,6 +39,8 @@
 MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
 		 "Disable 40MHz support in the 2.4GHz band");
 
+static struct lock_class_key ieee80211_rx_skb_queue_class;
+
 void ieee80211_configure_filter(struct ieee80211_local *local)
 {
 	u64 mc;
@@ -569,7 +571,15 @@
 	spin_lock_init(&local->filter_lock);
 	spin_lock_init(&local->queue_stop_reason_lock);
 
-	skb_queue_head_init(&local->rx_skb_queue);
+	/*
+	 * The rx_skb_queue is only accessed from tasklets,
+	 * but other SKB queues are used from within IRQ
+	 * context. Therefore, this one needs a different
+	 * locking class so our direct, non-irq-safe use of
+	 * the queue's lock doesn't throw lockdep warnings.
+	 */
+	skb_queue_head_init_class(&local->rx_skb_queue,
+				  &ieee80211_rx_skb_queue_class);
 
 	INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
 
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 45fbb9e..c9ceb4d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1033,6 +1033,12 @@
 	if (is_multicast_ether_addr(hdr->addr1))
 		return;
 
+	/*
+	 * In case we receive frames after disassociation.
+	 */
+	if (!sdata->u.mgd.associated)
+		return;
+
 	ieee80211_sta_reset_conn_monitor(sdata);
 }
 
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 38a7972..071ac95 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -323,6 +323,7 @@
 
 	if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
 		struct ieee80211_work *wk;
+		u64 cookie = (unsigned long)skb;
 
 		rcu_read_lock();
 		list_for_each_entry_rcu(wk, &local->work_list, list) {
@@ -334,8 +335,12 @@
 			break;
 		}
 		rcu_read_unlock();
+		if (local->hw_roc_skb_for_status == skb) {
+			cookie = local->hw_roc_cookie ^ 2;
+			local->hw_roc_skb_for_status = NULL;
+		}
 		cfg80211_mgmt_tx_status(
-			skb->dev, (unsigned long) skb, skb->data, skb->len,
+			skb->dev, cookie, skb->data, skb->len,
 			!!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
 	}
 
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5950e3a..b0beaa5 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1547,7 +1547,7 @@
 		skb_orphan(skb);
 	}
 
-	if (skb_header_cloned(skb))
+	if (skb_cloned(skb))
 		I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
 	else if (head_need || tail_need)
 		I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -2230,6 +2230,9 @@
 
 	sdata = vif_to_sdata(vif);
 
+	if (!ieee80211_sdata_running(sdata))
+		goto out;
+
 	if (tim_offset)
 		*tim_offset = 0;
 	if (tim_length)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index cf68700..d036597 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1210,7 +1210,9 @@
 		switch (sdata->vif.type) {
 		case NL80211_IFTYPE_STATION:
 			changed |= BSS_CHANGED_ASSOC;
+			mutex_lock(&sdata->u.mgd.mtx);
 			ieee80211_bss_info_change_notify(sdata, changed);
+			mutex_unlock(&sdata->u.mgd.mtx);
 			break;
 		case NL80211_IFTYPE_ADHOC:
 			changed |= BSS_CHANGED_IBSS;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 32fcbe2..4aa614b 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -133,6 +133,7 @@
 
 		/* Optimization: we don't need to hold module
 		   reference here, since function can't sleep. --RR */
+repeat:
 		verdict = elem->hook(hook, skb, indev, outdev, okfn);
 		if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
@@ -145,7 +146,7 @@
 #endif
 			if (verdict != NF_REPEAT)
 				return verdict;
-			*i = (*i)->prev;
+			goto repeat;
 		}
 	}
 	return NF_ACCEPT;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 22f7ad5..ba98e13 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -808,9 +808,9 @@
 	dest->u_threshold = udest->u_threshold;
 	dest->l_threshold = udest->l_threshold;
 
-	spin_lock(&dest->dst_lock);
+	spin_lock_bh(&dest->dst_lock);
 	ip_vs_dst_reset(dest);
-	spin_unlock(&dest->dst_lock);
+	spin_unlock_bh(&dest->dst_lock);
 
 	if (add)
 		ip_vs_new_estimator(&dest->stats);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e615119..84f4fcc 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -942,8 +942,15 @@
 	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
 		nf_conntrack_event_cache(IPCT_REPLY, ct);
 out:
-	if (tmpl)
-		nf_ct_put(tmpl);
+	if (tmpl) {
+		/* Special case: we have to repeat this hook, assign the
+		 * template again to this packet. We assume that this packet
+		 * has no conntrack assigned. This is used by nf_ct_tcp. */
+		if (ret == NF_REPEAT)
+			skb->nfct = (struct nf_conntrack *)tmpl;
+		else
+			nf_ct_put(tmpl);
+	}
 
 	return ret;
 }
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5702de3..63a1b91 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -63,6 +63,9 @@
 		 * this does not harm and it happens very rarely. */
 		unsigned long missed = e->missed;
 
+		if (!((events | missed) & e->ctmask))
+			goto out_unlock;
+
 		ret = notify->fcn(events | missed, &item);
 		if (unlikely(ret < 0 || missed)) {
 			spin_lock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0cdba50..eead9db 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -645,30 +645,29 @@
 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
 	u_int8_t l3proto = nfmsg->nfgen_family;
 
-	rcu_read_lock();
+	spin_lock_bh(&nf_conntrack_lock);
 	last = (struct nf_conn *)cb->args[1];
 	for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
 restart:
-		hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
+		hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
 					 hnnode) {
 			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
 				continue;
 			ct = nf_ct_tuplehash_to_ctrack(h);
-			if (!atomic_inc_not_zero(&ct->ct_general.use))
-				continue;
 			/* Dump entries of a given L3 protocol number.
 			 * If it is not specified, ie. l3proto == 0,
 			 * then dump everything. */
 			if (l3proto && nf_ct_l3num(ct) != l3proto)
-				goto releasect;
+				continue;
 			if (cb->args[1]) {
 				if (ct != last)
-					goto releasect;
+					continue;
 				cb->args[1] = 0;
 			}
 			if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
 						cb->nlh->nlmsg_seq,
 						IPCTNL_MSG_CT_NEW, ct) < 0) {
+				nf_conntrack_get(&ct->ct_general);
 				cb->args[1] = (unsigned long)ct;
 				goto out;
 			}
@@ -681,8 +680,6 @@
 				if (acct)
 					memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
 			}
-releasect:
-		nf_ct_put(ct);
 		}
 		if (cb->args[1]) {
 			cb->args[1] = 0;
@@ -690,7 +687,7 @@
 		}
 	}
 out:
-	rcu_read_unlock();
+	spin_unlock_bh(&nf_conntrack_lock);
 	if (last)
 		nf_ct_put(last);
 
@@ -976,7 +973,8 @@
 free:
 	kfree_skb(skb2);
 out:
-	return err;
+	/* this avoids a loop in nfnetlink. */
+	return err == -EAGAIN ? -ENOBUFS : err;
 }
 
 #ifdef CONFIG_NF_NAT_NEEDED
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index b07393e..9181699 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -85,6 +85,8 @@
 
 int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
 {
+	if (pf >= ARRAY_SIZE(nf_loggers))
+		return -EINVAL;
 	mutex_lock(&nf_log_mutex);
 	if (__find_logger(pf, logger->name) == NULL) {
 		mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@
 
 void nf_log_unbind_pf(u_int8_t pf)
 {
+	if (pf >= ARRAY_SIZE(nf_loggers))
+		return;
 	mutex_lock(&nf_log_mutex);
 	rcu_assign_pointer(nf_loggers[pf], NULL);
 	mutex_unlock(&nf_log_mutex);
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 4d87bef..474d621 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -28,26 +28,23 @@
 	skb->destructor = NULL;
 
 	if (sk)
-		nf_tproxy_put_sock(sk);
+		sock_put(sk);
 }
 
 /* consumes sk */
-int
+void
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
 {
-	bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
-				inet_twsk(sk)->tw_transparent :
-				inet_sk(sk)->transparent;
+	/* assigning tw sockets complicates things; most
+	 * skb->sk->X checks would have to test sk->sk_state first */
+	if (sk->sk_state == TCP_TIME_WAIT) {
+		inet_twsk_put(inet_twsk(sk));
+		return;
+	}
 
-	if (transparent) {
-		skb_orphan(skb);
-		skb->sk = sk;
-		skb->destructor = nf_tproxy_destructor;
-		return 1;
-	} else
-		nf_tproxy_put_sock(sk);
-
-	return 0;
+	skb_orphan(skb);
+	skb->sk = sk;
+	skb->destructor = nf_tproxy_destructor;
 }
 EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
 
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 80463507..c942376 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1325,7 +1325,8 @@
 
 	for_each_possible_cpu(i) {
 		struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
-		spin_lock_init(&lock->lock);
+
+		seqlock_init(&lock->lock);
 		lock->readers = 0;
 	}
 
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 640678f..dcfd57e 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -33,6 +33,20 @@
 #include <net/netfilter/nf_tproxy_core.h>
 #include <linux/netfilter/xt_TPROXY.h>
 
+static bool tproxy_sk_is_transparent(struct sock *sk)
+{
+	if (sk->sk_state != TCP_TIME_WAIT) {
+		if (inet_sk(sk)->transparent)
+			return true;
+		sock_put(sk);
+	} else {
+		if (inet_twsk(sk)->tw_transparent)
+			return true;
+		inet_twsk_put(inet_twsk(sk));
+	}
+	return false;
+}
+
 static inline __be32
 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
 {
@@ -141,7 +155,7 @@
 					   skb->dev, NFT_LOOKUP_LISTENER);
 
 	/* NOTE: assign_sock consumes our sk reference */
-	if (sk && nf_tproxy_assign_sock(skb, sk)) {
+	if (sk && tproxy_sk_is_transparent(sk)) {
 		/* This should be in a separate target, but we don't do multiple
 		   targets on the same rule yet */
 		skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
@@ -149,6 +163,8 @@
 		pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
 			 iph->protocol, &iph->daddr, ntohs(hp->dest),
 			 &laddr, ntohs(lport), skb->mark);
+
+		nf_tproxy_assign_sock(skb, sk);
 		return NF_ACCEPT;
 	}
 
@@ -306,7 +322,7 @@
 					   par->in, NFT_LOOKUP_LISTENER);
 
 	/* NOTE: assign_sock consumes our sk reference */
-	if (sk && nf_tproxy_assign_sock(skb, sk)) {
+	if (sk && tproxy_sk_is_transparent(sk)) {
 		/* This should be in a separate target, but we don't do multiple
 		   targets on the same rule yet */
 		skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
@@ -314,6 +330,8 @@
 		pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
 			 tproto, &iph->saddr, ntohs(hp->source),
 			 laddr, ntohs(lport), skb->mark);
+
+		nf_tproxy_assign_sock(skb, sk);
 		return NF_ACCEPT;
 	}
 
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 88f7c35..73c33a4 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -53,15 +53,13 @@
 }
 
 static inline int
-iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b)
+iprange_ipv6_lt(const struct in6_addr *a, const struct in6_addr *b)
 {
 	unsigned int i;
-	int r;
 
 	for (i = 0; i < 4; ++i) {
-		r = ntohl(a->s6_addr32[i]) - ntohl(b->s6_addr32[i]);
-		if (r != 0)
-			return r;
+		if (a->s6_addr32[i] != b->s6_addr32[i])
+			return ntohl(a->s6_addr32[i]) < ntohl(b->s6_addr32[i]);
 	}
 
 	return 0;
@@ -75,15 +73,15 @@
 	bool m;
 
 	if (info->flags & IPRANGE_SRC) {
-		m  = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0;
-		m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0;
+		m  = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
+		m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
 		m ^= !!(info->flags & IPRANGE_SRC_INV);
 		if (m)
 			return false;
 	}
 	if (info->flags & IPRANGE_DST) {
-		m  = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0;
-		m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0;
+		m  = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
+		m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
 		m ^= !!(info->flags & IPRANGE_DST_INV);
 		if (m)
 			return false;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 00d6ae83..9cc4635 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,6 +35,15 @@
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
+static void
+xt_socket_put_sk(struct sock *sk)
+{
+	if (sk->sk_state == TCP_TIME_WAIT)
+		inet_twsk_put(inet_twsk(sk));
+	else
+		sock_put(sk);
+}
+
 static int
 extract_icmp4_fields(const struct sk_buff *skb,
 		    u8 *protocol,
@@ -164,7 +173,7 @@
 				       (sk->sk_state == TCP_TIME_WAIT &&
 					inet_twsk(sk)->tw_transparent));
 
-		nf_tproxy_put_sock(sk);
+		xt_socket_put_sk(sk);
 
 		if (wildcard || !transparent)
 			sk = NULL;
@@ -298,7 +307,7 @@
 				       (sk->sk_state == TCP_TIME_WAIT &&
 					inet_twsk(sk)->tw_transparent));
 
-		nf_tproxy_put_sock(sk);
+		xt_socket_put_sk(sk);
 
 		if (wildcard || !transparent)
 			sk = NULL;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 478181d..1f92459 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1407,7 +1407,7 @@
 	int noblock = flags&MSG_DONTWAIT;
 	size_t copied;
 	struct sk_buff *skb, *data_skb;
-	int err;
+	int err, ret;
 
 	if (flags&MSG_OOB)
 		return -EOPNOTSUPP;
@@ -1470,8 +1470,13 @@
 
 	skb_free_datagram(sk, skb);
 
-	if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
-		netlink_dump(sk);
+	if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+		ret = netlink_dump(sk);
+		if (ret) {
+			sk->sk_err = ret;
+			sk->sk_error_report(sk);
+		}
+	}
 
 	scm_recv(sock, msg, siocb->scm, flags);
 out:
@@ -1736,6 +1741,7 @@
 	struct netlink_callback *cb;
 	struct sock *sk;
 	struct netlink_sock *nlk;
+	int ret;
 
 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
 	if (cb == NULL)
@@ -1764,9 +1770,13 @@
 	nlk->cb = cb;
 	mutex_unlock(nlk->cb_mutex);
 
-	netlink_dump(sk);
+	ret = netlink_dump(sk);
+
 	sock_put(sk);
 
+	if (ret)
+		return ret;
+
 	/* We successfully started a dump, by returning -EINTR we
 	 * signal not to send ACK even if it was requested.
 	 */
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index fd95beb..1072b2c 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -37,7 +37,7 @@
 /* Transport protocol registration */
 static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
 
-static struct phonet_protocol *phonet_proto_get(int protocol)
+static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
 {
 	struct phonet_protocol *pp;
 
@@ -458,7 +458,7 @@
 
 static DEFINE_MUTEX(proto_tab_lock);
 
-int __init_or_module phonet_proto_register(int protocol,
+int __init_or_module phonet_proto_register(unsigned int protocol,
 						struct phonet_protocol *pp)
 {
 	int err = 0;
@@ -481,7 +481,7 @@
 }
 EXPORT_SYMBOL(phonet_proto_register);
 
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
 {
 	mutex_lock(&proto_tab_lock);
 	BUG_ON(proto_tab[protocol] != pp);
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 71f373c..c47a511 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -551,7 +551,10 @@
 	if (conn->c_loopback
 	    && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
 		rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
-		return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+		scat = &rm->data.op_sg[sg];
+		ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+		ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
+		return ret;
 	}
 
 	/* FIXME we may overallocate here */
diff --git a/net/rds/loop.c b/net/rds/loop.c
index aeec1d4..bca6761 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -61,10 +61,15 @@
 			 unsigned int hdr_off, unsigned int sg,
 			 unsigned int off)
 {
+	struct scatterlist *sgp = &rm->data.op_sg[sg];
+	int ret = sizeof(struct rds_header) +
+			be32_to_cpu(rm->m_inc.i_hdr.h_len);
+
 	/* Do not send cong updates to loopback */
 	if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
 		rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
-		return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+		ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off);
+		goto out;
 	}
 
 	BUG_ON(hdr_off || sg || off);
@@ -80,8 +85,8 @@
 			    NULL);
 
 	rds_inc_put(&rm->m_inc);
-
-	return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
+out:
+	return ret;
 }
 
 /*
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index eaf7658..7fce6df 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -18,7 +18,7 @@
 	default y
 
 config RFKILL_INPUT
-	bool "RF switch input support" if EMBEDDED
+	bool "RF switch input support" if EXPERT
 	depends on RFKILL
 	depends on INPUT = y || RFKILL = INPUT
-	default y if !EMBEDDED
+	default y if !EXPERT
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 0b9bb20..74c064c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -808,7 +808,7 @@
 		goto error_call_jar;
 	}
 
-	rxrpc_workqueue = create_workqueue("krxrpcd");
+	rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
 	if (!rxrpc_workqueue) {
 		printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
 		goto error_work_queue;
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 8931500..1a2b0633 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -423,6 +423,7 @@
 			goto protocol_error;
 		}
 
+	case RXRPC_PACKET_TYPE_ACKALL:
 	case RXRPC_PACKET_TYPE_ACK:
 		/* ACK processing is done in process context */
 		read_lock_bh(&call->state_lock);
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 5ee16f0..d763793 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -89,11 +89,11 @@
 		return ret;
 
 	plen -= sizeof(*token);
-	token = kmalloc(sizeof(*token), GFP_KERNEL);
+	token = kzalloc(sizeof(*token), GFP_KERNEL);
 	if (!token)
 		return -ENOMEM;
 
-	token->kad = kmalloc(plen, GFP_KERNEL);
+	token->kad = kzalloc(plen, GFP_KERNEL);
 	if (!token->kad) {
 		kfree(token);
 		return -ENOMEM;
@@ -731,10 +731,10 @@
 		goto error;
 
 	ret = -ENOMEM;
-	token = kmalloc(sizeof(*token), GFP_KERNEL);
+	token = kzalloc(sizeof(*token), GFP_KERNEL);
 	if (!token)
 		goto error;
-	token->kad = kmalloc(plen, GFP_KERNEL);
+	token->kad = kzalloc(plen, GFP_KERNEL);
 	if (!token->kad)
 		goto error_free;
 
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a36270a..f04d4a4 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -24,7 +24,7 @@
 	  To administer these schedulers, you'll need the user-level utilities
 	  from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>.
 	  That package also contains some documentation; for more, check out
-	  <http://linux-net.osdl.org/index.php/Iproute2>.
+	  <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>.
 
 	  This Quality of Service (QoS) support will enable you to use
 	  Differentiated Services (diffserv) and Resource Reservation Protocol
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 67dc7ce..83ddfc0 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -508,8 +508,7 @@
 
 	spin_lock(&p->tcf_lock);
 	p->tcf_tm.lastuse = jiffies;
-	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	p->tcf_bstats.packets++;
+	bstats_update(&p->tcf_bstats, skb);
 	action = p->tcf_action;
 	update_flags = p->update_flags;
 	spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8daef96..c2a7c20 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -209,8 +209,7 @@
 	spin_lock(&ipt->tcf_lock);
 
 	ipt->tcf_tm.lastuse = jiffies;
-	ipt->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	ipt->tcf_bstats.packets++;
+	bstats_update(&ipt->tcf_bstats, skb);
 
 	/* yes, we have to worry about both in and out dev
 	 worry later - danger - this API seems to have changed
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0c311be..d765067 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -165,8 +165,7 @@
 
 	spin_lock(&m->tcf_lock);
 	m->tcf_tm.lastuse = jiffies;
-	m->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	m->tcf_bstats.packets++;
+	bstats_update(&m->tcf_bstats, skb);
 
 	dev = m->tcfm_dev;
 	if (!dev) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 186eb83..178a4bd 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,8 +125,7 @@
 	egress = p->flags & TCA_NAT_FLAG_EGRESS;
 	action = p->tcf_action;
 
-	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	p->tcf_bstats.packets++;
+	bstats_update(&p->tcf_bstats, skb);
 
 	spin_unlock(&p->tcf_lock);
 
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0593c9..445bef7 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -187,8 +187,7 @@
 bad:
 	p->tcf_qstats.overlimits++;
 done:
-	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	p->tcf_bstats.packets++;
+	bstats_update(&p->tcf_bstats, skb);
 	spin_unlock(&p->tcf_lock);
 	return p->tcf_action;
 }
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7ebf743..e2f08b1 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -298,8 +298,7 @@
 
 	spin_lock(&police->tcf_lock);
 
-	police->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	police->tcf_bstats.packets++;
+	bstats_update(&police->tcf_bstats, skb);
 
 	if (police->tcfp_ewma_rate &&
 	    police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97e84f3..7287cff 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -42,8 +42,7 @@
 
 	spin_lock(&d->tcf_lock);
 	d->tcf_tm.lastuse = jiffies;
-	d->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	d->tcf_bstats.packets++;
+	bstats_update(&d->tcf_bstats, skb);
 
 	/* print policy string followed by _ then packet count
 	 * Example if this was the 3rd packet and the string was "hello"
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 66cbf4e..836f5fe 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -46,8 +46,7 @@
 
 	spin_lock(&d->tcf_lock);
 	d->tcf_tm.lastuse = jiffies;
-	d->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	d->tcf_bstats.packets++;
+	bstats_update(&d->tcf_bstats, skb);
 
 	if (d->flags & SKBEDIT_F_PRIORITY)
 		skb->priority = d->priority;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 2825407..943d733 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -422,10 +422,8 @@
 		}
 		return ret;
 	}
-	sch->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
-	flow->bstats.bytes += qdisc_pkt_len(skb);
-	flow->bstats.packets++;
+	qdisc_bstats_update(sch, skb);
+	bstats_update(&flow->bstats, skb);
 	/*
 	 * Okay, this may seem weird. We pretend we've dropped the packet if
 	 * it goes via ATM. The reason for this is that the outer qdisc
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index eb76315..5f63ec5 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,8 +390,6 @@
 	ret = qdisc_enqueue(skb, cl->q);
 	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
-		sch->bstats.packets++;
-		sch->bstats.bytes += qdisc_pkt_len(skb);
 		cbq_mark_toplevel(q, cl);
 		if (!cl->next_alive)
 			cbq_activate_class(cl);
@@ -650,8 +648,6 @@
 		ret = qdisc_enqueue(skb, cl->q);
 		if (ret == NET_XMIT_SUCCESS) {
 			sch->q.qlen++;
-			sch->bstats.packets++;
-			sch->bstats.bytes += qdisc_pkt_len(skb);
 			if (!cl->next_alive)
 				cbq_activate_class(cl);
 			return 0;
@@ -973,6 +969,7 @@
 
 		skb = cbq_dequeue_1(sch);
 		if (skb) {
+			qdisc_bstats_update(sch, skb);
 			sch->q.qlen--;
 			sch->flags &= ~TCQ_F_THROTTLED;
 			return skb;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index aa8b531..6b7fe4a 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -351,7 +351,6 @@
 {
 	struct drr_sched *q = qdisc_priv(sch);
 	struct drr_class *cl;
-	unsigned int len;
 	int err;
 
 	cl = drr_classify(skb, sch, &err);
@@ -362,7 +361,6 @@
 		return err;
 	}
 
-	len = qdisc_pkt_len(skb);
 	err = qdisc_enqueue(skb, cl->qdisc);
 	if (unlikely(err != NET_XMIT_SUCCESS)) {
 		if (net_xmit_drop_count(err)) {
@@ -377,10 +375,7 @@
 		cl->deficit = cl->quantum;
 	}
 
-	cl->bstats.packets++;
-	cl->bstats.bytes += len;
-	sch->bstats.packets++;
-	sch->bstats.bytes += len;
+	bstats_update(&cl->bstats, skb);
 
 	sch->q.qlen++;
 	return err;
@@ -407,6 +402,7 @@
 			skb = qdisc_dequeue_peeked(cl->qdisc);
 			if (cl->qdisc->q.qlen == 0)
 				list_del(&cl->alist);
+			qdisc_bstats_update(sch, skb);
 			sch->q.qlen--;
 			return skb;
 		}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1d295d6..0f7bf3f 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,8 +260,6 @@
 		return err;
 	}
 
-	sch->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
@@ -284,6 +282,7 @@
 	if (skb == NULL)
 		return NULL;
 
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen--;
 
 	index = skb->tc_index & (p->indices - 1);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index aa4d633..d468b47 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -46,17 +46,14 @@
 
 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 {
-	struct sk_buff *skb_head;
 	struct fifo_sched_data *q = qdisc_priv(sch);
 
 	if (likely(skb_queue_len(&sch->q) < q->limit))
 		return qdisc_enqueue_tail(skb, sch);
 
 	/* queue full, remove one skb to fulfill the limit */
-	skb_head = qdisc_dequeue_head(sch);
+	__qdisc_queue_drop_head(sch, &sch->q);
 	sch->qstats.drops++;
-	kfree_skb(skb_head);
-
 	qdisc_enqueue_tail(skb, sch);
 
 	return NET_XMIT_CN;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598..1bc6980 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -839,6 +839,7 @@
 
 	list_add(&dev->unreg_list, &single);
 	dev_deactivate_many(&single);
+	list_del(&single);
 }
 
 static void dev_init_scheduler_queue(struct net_device *dev,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 069c62b..14a799de 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1599,10 +1599,7 @@
 	if (cl->qdisc->q.qlen == 1)
 		set_active(cl, qdisc_pkt_len(skb));
 
-	cl->bstats.packets++;
-	cl->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
+	bstats_update(&cl->bstats, skb);
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
@@ -1668,6 +1665,7 @@
 	}
 
 	sch->flags &= ~TCQ_F_THROTTLED;
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen--;
 
 	return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 01b519d..fc12fe6 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -569,15 +569,11 @@
 		}
 		return ret;
 	} else {
-		cl->bstats.packets +=
-			skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
-		cl->bstats.bytes += qdisc_pkt_len(skb);
+		bstats_update(&cl->bstats, skb);
 		htb_activate(q, cl);
 	}
 
 	sch->q.qlen++;
-	sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
 	return NET_XMIT_SUCCESS;
 }
 
@@ -648,12 +644,10 @@
 				htb_add_to_wait_tree(q, cl, diff);
 		}
 
-		/* update byte stats except for leaves which are already updated */
-		if (cl->level) {
-			cl->bstats.bytes += bytes;
-			cl->bstats.packets += skb_is_gso(skb)?
-					skb_shinfo(skb)->gso_segs:1;
-		}
+		/* update basic stats except for leaves which are already updated */
+		if (cl->level)
+			bstats_update(&cl->bstats, skb);
+
 		cl = cl->parent;
 	}
 }
@@ -847,7 +841,7 @@
 
 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 {
-	struct sk_buff *skb = NULL;
+	struct sk_buff *skb;
 	struct htb_sched *q = qdisc_priv(sch);
 	int level;
 	psched_time_t next_event;
@@ -856,6 +850,8 @@
 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
 	skb = __skb_dequeue(&q->direct_queue);
 	if (skb != NULL) {
+ok:
+		qdisc_bstats_update(sch, skb);
 		sch->flags &= ~TCQ_F_THROTTLED;
 		sch->q.qlen--;
 		return skb;
@@ -889,11 +885,8 @@
 			int prio = ffz(m);
 			m |= 1 << prio;
 			skb = htb_dequeue_tree(q, prio, level);
-			if (likely(skb != NULL)) {
-				sch->q.qlen--;
-				sch->flags &= ~TCQ_F_THROTTLED;
-				goto fin;
-			}
+			if (likely(skb != NULL))
+				goto ok;
 		}
 	}
 	sch->qstats.overlimits++;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f10e34a..bce1665 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -63,8 +63,7 @@
 
 	result = tc_classify(skb, p->filter_list, &res);
 
-	sch->bstats.packets++;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
+	qdisc_bstats_update(sch, skb);
 	switch (result) {
 	case TC_ACT_SHOT:
 		result = TC_ACT_SHOT;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 32690de..436a2e7 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,8 +83,6 @@
 
 	ret = qdisc_enqueue(skb, qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
 		sch->q.qlen++;
 		return NET_XMIT_SUCCESS;
 	}
@@ -113,6 +111,7 @@
 			qdisc = q->queues[q->curband];
 			skb = qdisc->dequeue(qdisc);
 			if (skb) {
+				qdisc_bstats_update(sch, skb);
 				sch->q.qlen--;
 				return skb;
 			}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e5593c0..6a3006b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,6 @@
 
 	if (likely(ret == NET_XMIT_SUCCESS)) {
 		sch->q.qlen++;
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
 	} else if (net_xmit_drop_count(ret)) {
 		sch->qstats.drops++;
 	}
@@ -290,6 +288,7 @@
 				skb->tstamp.tv64 = 0;
 #endif
 			pr_debug("netem_dequeue: return skb=%p\n", skb);
+			qdisc_bstats_update(sch, skb);
 			sch->q.qlen--;
 			return skb;
 		}
@@ -477,8 +476,6 @@
 		__skb_queue_after(list, skb, nskb);
 
 		sch->qstats.backlog += qdisc_pkt_len(nskb);
-		sch->bstats.bytes += qdisc_pkt_len(nskb);
-		sch->bstats.packets++;
 
 		return NET_XMIT_SUCCESS;
 	}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b1c95bc..fbd710d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,8 +84,6 @@
 
 	ret = qdisc_enqueue(skb, qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
 		sch->q.qlen++;
 		return NET_XMIT_SUCCESS;
 	}
@@ -117,6 +115,7 @@
 		struct Qdisc *qdisc = q->queues[prio];
 		struct sk_buff *skb = qdisc->dequeue(qdisc);
 		if (skb) {
+			qdisc_bstats_update(sch, skb);
 			sch->q.qlen--;
 			return skb;
 		}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a67ba3c..9f98dbd 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,8 +94,6 @@
 
 	ret = qdisc_enqueue(skb, child);
 	if (likely(ret == NET_XMIT_SUCCESS)) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
 		sch->q.qlen++;
 	} else if (net_xmit_drop_count(ret)) {
 		q->stats.pdrop++;
@@ -115,11 +113,13 @@
 	struct Qdisc *child = q->qdisc;
 
 	skb = child->dequeue(child);
-	if (skb)
+	if (skb) {
+		qdisc_bstats_update(sch, skb);
 		sch->q.qlen--;
-	else if (!red_is_idling(&q->parms))
-		red_start_of_idle_period(&q->parms);
-
+	} else {
+		if (!red_is_idling(&q->parms))
+			red_start_of_idle_period(&q->parms);
+	}
 	return skb;
 }
 
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d54ac94..edea8ce 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -402,11 +402,8 @@
 		q->tail = slot;
 		slot->allot = q->scaled_quantum;
 	}
-	if (++sch->q.qlen <= q->limit) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+	if (++sch->q.qlen <= q->limit)
 		return NET_XMIT_SUCCESS;
-	}
 
 	sfq_drop(sch);
 	return NET_XMIT_CN;
@@ -446,6 +443,7 @@
 	}
 	skb = slot_dequeue_head(slot);
 	sfq_dec(q, a);
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen--;
 	sch->qstats.backlog -= qdisc_pkt_len(skb);
 
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 641a30d..e931658 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,8 +134,6 @@
 	}
 
 	sch->q.qlen++;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
 	return NET_XMIT_SUCCESS;
 }
 
@@ -188,6 +186,7 @@
 			q->ptokens = ptoks;
 			sch->q.qlen--;
 			sch->flags &= ~TCQ_F_THROTTLED;
+			qdisc_bstats_update(sch, skb);
 			return skb;
 		}
 
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 106479a..d84e732 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -59,6 +59,10 @@
 	struct net_device *dev;
 	struct Qdisc *slaves;
 	struct list_head master_list;
+	unsigned long	tx_bytes;
+	unsigned long	tx_packets;
+	unsigned long	tx_errors;
+	unsigned long	tx_dropped;
 };
 
 struct teql_sched_data
@@ -83,8 +87,6 @@
 
 	if (q->q.qlen < dev->tx_queue_len) {
 		__skb_queue_tail(&q->q, skb);
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
 		return NET_XMIT_SUCCESS;
 	}
 
@@ -108,6 +110,8 @@
 			dat->m->slaves = sch;
 			netif_wake_queue(m);
 		}
+	} else {
+		qdisc_bstats_update(sch, skb);
 	}
 	sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
 	return skb;
@@ -275,7 +279,6 @@
 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct teql_master *master = netdev_priv(dev);
-	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
 	struct Qdisc *start, *q;
 	int busy;
 	int nores;
@@ -315,8 +318,8 @@
 					__netif_tx_unlock(slave_txq);
 					master->slaves = NEXT_SLAVE(q);
 					netif_wake_queue(dev);
-					txq->tx_packets++;
-					txq->tx_bytes += length;
+					master->tx_packets++;
+					master->tx_bytes += length;
 					return NETDEV_TX_OK;
 				}
 				__netif_tx_unlock(slave_txq);
@@ -343,10 +346,10 @@
 		netif_stop_queue(dev);
 		return NETDEV_TX_BUSY;
 	}
-	dev->stats.tx_errors++;
+	master->tx_errors++;
 
 drop:
-	txq->tx_dropped++;
+	master->tx_dropped++;
 	dev_kfree_skb(skb);
 	return NETDEV_TX_OK;
 }
@@ -399,6 +402,18 @@
 	return 0;
 }
 
+static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
+						     struct rtnl_link_stats64 *stats)
+{
+	struct teql_master *m = netdev_priv(dev);
+
+	stats->tx_packets	= m->tx_packets;
+	stats->tx_bytes		= m->tx_bytes;
+	stats->tx_errors	= m->tx_errors;
+	stats->tx_dropped	= m->tx_dropped;
+	return stats;
+}
+
 static int teql_master_mtu(struct net_device *dev, int new_mtu)
 {
 	struct teql_master *m = netdev_priv(dev);
@@ -423,6 +438,7 @@
 	.ndo_open	= teql_master_open,
 	.ndo_stop	= teql_master_close,
 	.ndo_start_xmit	= teql_master_xmit,
+	.ndo_get_stats64 = teql_master_stats64,
 	.ndo_change_mtu	= teql_master_mtu,
 };
 
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2cc46f0..b23428f 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2029,11 +2029,11 @@
 			*errp = sctp_make_op_error_fixed(asoc, chunk);
 
 		if (*errp) {
-			sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
-					WORD_ROUND(ntohs(param.p->length)));
-			sctp_addto_chunk_fixed(*errp,
-					WORD_ROUND(ntohs(param.p->length)),
-					param.v);
+			if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+					WORD_ROUND(ntohs(param.p->length))))
+				sctp_addto_chunk_fixed(*errp,
+						WORD_ROUND(ntohs(param.p->length)),
+						param.v);
 		} else {
 			/* If there is no memory for generating the ERROR
 			 * report as specified, an ABORT will be triggered
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a09b0dd..8e02550 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3428,7 +3428,7 @@
 		retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
 		break;
 
-	case SCTP_DELAYED_ACK:
+	case SCTP_DELAYED_SACK:
 		retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
 		break;
 	case SCTP_PARTIAL_DELIVERY_POINT:
@@ -5333,7 +5333,7 @@
 		retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
 							  optlen);
 		break;
-	case SCTP_DELAYED_ACK:
+	case SCTP_DELAYED_SACK:
 		retval = sctp_getsockopt_delayed_ack(sk, len, optval,
 							  optlen);
 		break;
diff --git a/net/socket.c b/net/socket.c
index ccc576a..ac2219f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -306,20 +306,6 @@
 	.statfs		= simple_statfs,
 };
 
-static struct dentry *sockfs_mount(struct file_system_type *fs_type,
-			 int flags, const char *dev_name, void *data)
-{
-	return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
-}
-
-static struct vfsmount *sock_mnt __read_mostly;
-
-static struct file_system_type sock_fs_type = {
-	.name =		"sockfs",
-	.mount =	sockfs_mount,
-	.kill_sb =	kill_anon_super,
-};
-
 /*
  * sockfs_dname() is called from d_path().
  */
@@ -333,6 +319,21 @@
 	.d_dname  = sockfs_dname,
 };
 
+static struct dentry *sockfs_mount(struct file_system_type *fs_type,
+			 int flags, const char *dev_name, void *data)
+{
+	return mount_pseudo(fs_type, "socket:", &sockfs_ops,
+		&sockfs_dentry_operations, SOCKFS_MAGIC);
+}
+
+static struct vfsmount *sock_mnt __read_mostly;
+
+static struct file_system_type sock_fs_type = {
+	.name =		"sockfs",
+	.mount =	sockfs_mount,
+	.kill_sb =	kill_anon_super,
+};
+
 /*
  *	Obtains the first available file descriptor and sets it up for use.
  *
@@ -368,7 +369,6 @@
 	}
 	path.mnt = mntget(sock_mnt);
 
-	d_set_d_op(path.dentry, &sockfs_dentry_operations);
 	d_instantiate(path.dentry, SOCK_INODE(sock));
 	SOCK_INODE(sock)->i_fop = &socket_file_ops;
 
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index afe6784..67e3127 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -563,8 +563,17 @@
 	return cred->cr_ops->crvalidate(task, p);
 }
 
+static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+				   __be32 *data, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
+	encode(rqstp, &xdr, obj);
+}
+
 int
-rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
+rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
 		__be32 *data, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -574,11 +583,22 @@
 	if (cred->cr_ops->crwrap_req)
 		return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
 	/* By default, we encode the arguments normally. */
-	return encode(rqstp, data, obj);
+	rpcauth_wrap_req_encode(encode, rqstp, data, obj);
+	return 0;
+}
+
+static int
+rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+			  __be32 *data, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
+	return decode(rqstp, &xdr, obj);
 }
 
 int
-rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
+rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
 		__be32 *data, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -589,7 +609,7 @@
 		return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
 						   data, obj);
 	/* By default, we decode the arguments normally. */
-	return decode(rqstp, data, obj);
+	return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
 }
 
 int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3835ce3..45dbf15 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1231,9 +1231,19 @@
 	return NULL;
 }
 
+static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+				__be32 *p, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
+	encode(rqstp, &xdr, obj);
+}
+
 static inline int
 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
-		kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+		   kxdreproc_t encode, struct rpc_rqst *rqstp,
+		   __be32 *p, void *obj)
 {
 	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
 	struct xdr_buf	integ_buf;
@@ -1249,9 +1259,7 @@
 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
 	*p++ = htonl(rqstp->rq_seqno);
 
-	status = encode(rqstp, p, obj);
-	if (status)
-		return status;
+	gss_wrap_req_encode(encode, rqstp, p, obj);
 
 	if (xdr_buf_subsegment(snd_buf, &integ_buf,
 				offset, snd_buf->len - offset))
@@ -1325,7 +1333,8 @@
 
 static inline int
 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
-		kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+		  kxdreproc_t encode, struct rpc_rqst *rqstp,
+		  __be32 *p, void *obj)
 {
 	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
 	u32		offset;
@@ -1342,9 +1351,7 @@
 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
 	*p++ = htonl(rqstp->rq_seqno);
 
-	status = encode(rqstp, p, obj);
-	if (status)
-		return status;
+	gss_wrap_req_encode(encode, rqstp, p, obj);
 
 	status = alloc_enc_pages(rqstp);
 	if (status)
@@ -1394,7 +1401,7 @@
 
 static int
 gss_wrap_req(struct rpc_task *task,
-	     kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
+	     kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
 	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
@@ -1407,12 +1414,14 @@
 		/* The spec seems a little ambiguous here, but I think that not
 		 * wrapping context destruction requests makes the most sense.
 		 */
-		status = encode(rqstp, p, obj);
+		gss_wrap_req_encode(encode, rqstp, p, obj);
+		status = 0;
 		goto out;
 	}
 	switch (gss_cred->gc_service) {
 		case RPC_GSS_SVC_NONE:
-			status = encode(rqstp, p, obj);
+			gss_wrap_req_encode(encode, rqstp, p, obj);
+			status = 0;
 			break;
 		case RPC_GSS_SVC_INTEGRITY:
 			status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1494,10 +1503,19 @@
 	return 0;
 }
 
+static int
+gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+		      __be32 *p, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	return decode(rqstp, &xdr, obj);
+}
 
 static int
 gss_unwrap_resp(struct rpc_task *task,
-		kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
+		kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1528,7 +1546,7 @@
 	cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
 						+ (savedlen - head->iov_len);
 out_decode:
-	status = decode(rqstp, p, obj);
+	status = gss_unwrap_req_decode(decode, rqstp, p, obj);
 out:
 	gss_put_ctx(ctx);
 	dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 75ee993..9576f35 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -137,7 +137,7 @@
 		ms_usage = 13;
 		break;
 	default:
-		return EINVAL;;
+		return -EINVAL;
 	}
 	salt[0] = (ms_usage >> 0) & 0xff;
 	salt[1] = (ms_usage >> 8) & 0xff;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index dec2a6f..bcdae78 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -67,7 +67,6 @@
 
 #define	RSI_HASHBITS	6
 #define	RSI_HASHMAX	(1<<RSI_HASHBITS)
-#define	RSI_HASHMASK	(RSI_HASHMAX-1)
 
 struct rsi {
 	struct cache_head	h;
@@ -319,7 +318,6 @@
 
 #define	RSC_HASHBITS	10
 #define	RSC_HASHMAX	(1<<RSC_HASHBITS)
-#define	RSC_HASHMASK	(RSC_HASHMAX-1)
 
 #define GSS_SEQ_WIN	128
 
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 7dcfe0c..1dd1a68 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -59,8 +59,8 @@
 		ret = task->tk_status;
 		rpc_put_task(task);
 	}
-	return ret;
 	dprintk("RPC:       bc_send ret= %d\n", ret);
+	return ret;
 }
 
 #endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index e433e75..72ad836 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -37,7 +37,7 @@
 
 #define	 RPCDBG_FACILITY RPCDBG_CACHE
 
-static void cache_defer_req(struct cache_req *req, struct cache_head *item);
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
 static void cache_revisit_request(struct cache_head *item);
 
 static void cache_init(struct cache_head *h)
@@ -128,6 +128,7 @@
 {
 	head->expiry_time = expiry;
 	head->last_refresh = seconds_since_boot();
+	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
 	set_bit(CACHE_VALID, &head->flags);
 }
 
@@ -208,11 +209,36 @@
 		/* entry is valid */
 		if (test_bit(CACHE_NEGATIVE, &h->flags))
 			return -ENOENT;
-		else
+		else {
+			/*
+			 * In combination with write barrier in
+			 * sunrpc_cache_update, ensures that anyone
+			 * using the cache entry after this sees the
+			 * updated contents:
+			 */
+			smp_rmb();
 			return 0;
+		}
 	}
 }
 
+static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
+{
+	int rv;
+
+	write_lock(&detail->hash_lock);
+	rv = cache_is_valid(detail, h);
+	if (rv != -EAGAIN) {
+		write_unlock(&detail->hash_lock);
+		return rv;
+	}
+	set_bit(CACHE_NEGATIVE, &h->flags);
+	cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
+	write_unlock(&detail->hash_lock);
+	cache_fresh_unlocked(h, detail);
+	return -ENOENT;
+}
+
 /*
  * This is the generic cache management routine for all
  * the authentication caches.
@@ -251,14 +277,8 @@
 			case -EINVAL:
 				clear_bit(CACHE_PENDING, &h->flags);
 				cache_revisit_request(h);
-				if (rv == -EAGAIN) {
-					set_bit(CACHE_NEGATIVE, &h->flags);
-					cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
-					cache_fresh_unlocked(h, detail);
-					rv = -ENOENT;
-				}
+				rv = try_to_negate_entry(detail, h);
 				break;
-
 			case -EAGAIN:
 				clear_bit(CACHE_PENDING, &h->flags);
 				cache_revisit_request(h);
@@ -268,9 +288,11 @@
 	}
 
 	if (rv == -EAGAIN) {
-		cache_defer_req(rqstp, h);
-		if (!test_bit(CACHE_PENDING, &h->flags)) {
-			/* Request is not deferred */
+		if (!cache_defer_req(rqstp, h)) {
+			/*
+			 * Request was not deferred; handle it as best
+			 * we can ourselves:
+			 */
 			rv = cache_is_valid(detail, h);
 			if (rv == -EAGAIN)
 				rv = -ETIMEDOUT;
@@ -618,18 +640,19 @@
 		discard->revisit(discard, 1);
 }
 
-static void cache_defer_req(struct cache_req *req, struct cache_head *item)
+/* Return true if and only if a deferred request is queued. */
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
 {
 	struct cache_deferred_req *dreq;
 
 	if (req->thread_wait) {
 		cache_wait_req(req, item);
 		if (!test_bit(CACHE_PENDING, &item->flags))
-			return;
+			return false;
 	}
 	dreq = req->defer(req);
 	if (dreq == NULL)
-		return;
+		return false;
 	setup_deferral(dreq, item, 1);
 	if (!test_bit(CACHE_PENDING, &item->flags))
 		/* Bit could have been cleared before we managed to
@@ -638,6 +661,7 @@
 		cache_revisit_request(item);
 
 	cache_limit_defers();
+	return true;
 }
 
 static void cache_revisit_request(struct cache_head *item)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 92ce94f..57d344c 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1095,7 +1095,7 @@
 rpc_xdr_encode(struct rpc_task *task)
 {
 	struct rpc_rqst	*req = task->tk_rqstp;
-	kxdrproc_t	encode;
+	kxdreproc_t	encode;
 	__be32		*p;
 
 	dprint_status(task);
@@ -1535,7 +1535,7 @@
 {
 	struct rpc_clnt	*clnt = task->tk_client;
 	struct rpc_rqst	*req = task->tk_rqstp;
-	kxdrproc_t	decode = task->tk_msg.rpc_proc->p_decode;
+	kxdrdproc_t	decode = task->tk_msg.rpc_proc->p_decode;
 	__be32		*p;
 
 	dprintk("RPC: %5u call_decode (status %d)\n",
@@ -1776,12 +1776,11 @@
 	goto out_garbage;
 }
 
-static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
+static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
 {
-	return 0;
 }
 
-static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
+static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
 {
 	return 0;
 }
@@ -1830,23 +1829,15 @@
 			  const struct rpc_task *task)
 {
 	const char *rpc_waitq = "none";
-	char *p, action[KSYM_SYMBOL_LEN];
 
 	if (RPC_IS_QUEUED(task))
 		rpc_waitq = rpc_qname(task->tk_waitqueue);
 
-	/* map tk_action pointer to a function name; then trim off
-	 * the "+0x0 [sunrpc]" */
-	sprint_symbol(action, (unsigned long)task->tk_action);
-	p = strchr(action, '+');
-	if (p)
-		*p = '\0';
-
-	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
+	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
 		task->tk_pid, task->tk_flags, task->tk_status,
 		clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
 		clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
-		action, rpc_waitq);
+		task->tk_action, rpc_waitq);
 }
 
 void rpc_show_tasks(void)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 09f01f4..72bc536 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -474,7 +474,7 @@
 {
 	struct inode *inode;
 
-	BUG_ON(!d_unhashed(dentry));
+	d_drop(dentry);
 	inode = rpc_get_inode(dir->i_sb, mode);
 	if (!inode)
 		goto out_err;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa6d7ca..c652e4c 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -57,10 +57,6 @@
 	RPCBPROC_GETSTAT,
 };
 
-#define RPCB_HIGHPROC_2		RPCBPROC_CALLIT
-#define RPCB_HIGHPROC_3		RPCBPROC_TADDR2UADDR
-#define RPCB_HIGHPROC_4		RPCBPROC_GETSTAT
-
 /*
  * r_owner
  *
@@ -693,46 +689,37 @@
  * XDR functions for rpcbind
  */
 
-static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p,
-			    const struct rpcbind_args *rpcb)
+static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     const struct rpcbind_args *rpcb)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 
 	dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
 			task->tk_pid, task->tk_msg.rpc_proc->p_name,
 			rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-
-	p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz);
-	if (unlikely(p == NULL))
-		return -EIO;
-
-	*p++ = htonl(rpcb->r_prog);
-	*p++ = htonl(rpcb->r_vers);
-	*p++ = htonl(rpcb->r_prot);
-	*p   = htonl(rpcb->r_port);
-
-	return 0;
+	p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
+	*p++ = cpu_to_be32(rpcb->r_prog);
+	*p++ = cpu_to_be32(rpcb->r_vers);
+	*p++ = cpu_to_be32(rpcb->r_prot);
+	*p   = cpu_to_be32(rpcb->r_port);
 }
 
-static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
 			    struct rpcbind_args *rpcb)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
 	unsigned long port;
-
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+	__be32 *p;
 
 	rpcb->r_port = 0;
 
-	p = xdr_inline_decode(&xdr, sizeof(__be32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
 
-	port = ntohl(*p);
+	port = be32_to_cpup(p);
 	dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
 			task->tk_msg.rpc_proc->p_name, port);
 	if (unlikely(port > USHRT_MAX))
@@ -742,20 +729,18 @@
 	return 0;
 }
 
-static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
 			unsigned int *boolp)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	p = xdr_inline_decode(&xdr, sizeof(__be32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
 
 	*boolp = 0;
-	if (*p)
+	if (*p != xdr_zero)
 		*boolp = 1;
 
 	dprintk("RPC: %5u RPCB_%s call %s\n",
@@ -764,73 +749,53 @@
 	return 0;
 }
 
-static int encode_rpcb_string(struct xdr_stream *xdr, const char *string,
-				const u32 maxstrlen)
+static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
+			       const u32 maxstrlen)
 {
-	u32 len;
 	__be32 *p;
+	u32 len;
 
-	if (unlikely(string == NULL))
-		return -EIO;
 	len = strlen(string);
-	if (unlikely(len > maxstrlen))
-		return -EIO;
-
-	p = xdr_reserve_space(xdr, sizeof(__be32) + len);
-	if (unlikely(p == NULL))
-		return -EIO;
+	BUG_ON(len > maxstrlen);
+	p = xdr_reserve_space(xdr, 4 + len);
 	xdr_encode_opaque(p, string, len);
-
-	return 0;
 }
 
-static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p,
-			    const struct rpcbind_args *rpcb)
+static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     const struct rpcbind_args *rpcb)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 
 	dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
 			task->tk_pid, task->tk_msg.rpc_proc->p_name,
 			rpcb->r_prog, rpcb->r_vers,
 			rpcb->r_netid, rpcb->r_addr);
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
+	*p++ = cpu_to_be32(rpcb->r_prog);
+	*p = cpu_to_be32(rpcb->r_vers);
 
-	p = xdr_reserve_space(&xdr,
-			sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
-	if (unlikely(p == NULL))
-		return -EIO;
-	*p++ = htonl(rpcb->r_prog);
-	*p = htonl(rpcb->r_vers);
-
-	if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
-		return -EIO;
-	if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
-		return -EIO;
-	if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
-		return -EIO;
-
-	return 0;
+	encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
+	encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
+	encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
 }
 
-static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
 			    struct rpcbind_args *rpcb)
 {
 	struct sockaddr_storage address;
 	struct sockaddr *sap = (struct sockaddr *)&address;
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 	u32 len;
 
 	rpcb->r_port = 0;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	p = xdr_inline_decode(&xdr, sizeof(__be32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		goto out_fail;
-	len = ntohl(*p);
+	len = be32_to_cpup(p);
 
 	/*
 	 * If the returned universal address is a null string,
@@ -845,7 +810,7 @@
 	if (unlikely(len > RPCBIND_MAXUADDRLEN))
 		goto out_fail;
 
-	p = xdr_inline_decode(&xdr, len);
+	p = xdr_inline_decode(xdr, len);
 	if (unlikely(p == NULL))
 		goto out_fail;
 	dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
@@ -871,8 +836,8 @@
 static struct rpc_procinfo rpcb_procedures2[] = {
 	[RPCBPROC_SET] = {
 		.p_proc		= RPCBPROC_SET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_mapping,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_mapping,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_mappingargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_SET,
@@ -881,8 +846,8 @@
 	},
 	[RPCBPROC_UNSET] = {
 		.p_proc		= RPCBPROC_UNSET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_mapping,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_mapping,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_mappingargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_UNSET,
@@ -891,8 +856,8 @@
 	},
 	[RPCBPROC_GETPORT] = {
 		.p_proc		= RPCBPROC_GETPORT,
-		.p_encode	= (kxdrproc_t)rpcb_enc_mapping,
-		.p_decode	= (kxdrproc_t)rpcb_dec_getport,
+		.p_encode	= (kxdreproc_t)rpcb_enc_mapping,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_getport,
 		.p_arglen	= RPCB_mappingargs_sz,
 		.p_replen	= RPCB_getportres_sz,
 		.p_statidx	= RPCBPROC_GETPORT,
@@ -904,8 +869,8 @@
 static struct rpc_procinfo rpcb_procedures3[] = {
 	[RPCBPROC_SET] = {
 		.p_proc		= RPCBPROC_SET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_SET,
@@ -914,8 +879,8 @@
 	},
 	[RPCBPROC_UNSET] = {
 		.p_proc		= RPCBPROC_UNSET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_UNSET,
@@ -924,8 +889,8 @@
 	},
 	[RPCBPROC_GETADDR] = {
 		.p_proc		= RPCBPROC_GETADDR,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_getaddr,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_getaddr,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_getaddrres_sz,
 		.p_statidx	= RPCBPROC_GETADDR,
@@ -937,8 +902,8 @@
 static struct rpc_procinfo rpcb_procedures4[] = {
 	[RPCBPROC_SET] = {
 		.p_proc		= RPCBPROC_SET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_SET,
@@ -947,8 +912,8 @@
 	},
 	[RPCBPROC_UNSET] = {
 		.p_proc		= RPCBPROC_UNSET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_UNSET,
@@ -957,8 +922,8 @@
 	},
 	[RPCBPROC_GETADDR] = {
 		.p_proc		= RPCBPROC_GETADDR,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_getaddr,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_getaddr,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_getaddrres_sz,
 		.p_statidx	= RPCBPROC_GETADDR,
@@ -993,19 +958,19 @@
 
 static struct rpc_version rpcb_version2 = {
 	.number		= RPCBVERS_2,
-	.nrprocs	= RPCB_HIGHPROC_2,
+	.nrprocs	= ARRAY_SIZE(rpcb_procedures2),
 	.procs		= rpcb_procedures2
 };
 
 static struct rpc_version rpcb_version3 = {
 	.number		= RPCBVERS_3,
-	.nrprocs	= RPCB_HIGHPROC_3,
+	.nrprocs	= ARRAY_SIZE(rpcb_procedures3),
 	.procs		= rpcb_procedures3
 };
 
 static struct rpc_version rpcb_version4 = {
 	.number		= RPCBVERS_4,
-	.nrprocs	= RPCB_HIGHPROC_4,
+	.nrprocs	= ARRAY_SIZE(rpcb_procedures4),
 	.procs		= rpcb_procedures4
 };
 
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 243fc09..59e5994 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -252,23 +252,37 @@
 
 /*
  * Mark an RPC call as having completed by clearing the 'active' bit
+ * and then waking up all tasks that were sleeping.
  */
-static void rpc_mark_complete_task(struct rpc_task *task)
+static int rpc_complete_task(struct rpc_task *task)
 {
-	smp_mb__before_clear_bit();
+	void *m = &task->tk_runstate;
+	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
+	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&wq->lock, flags);
 	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
-	smp_mb__after_clear_bit();
-	wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
+	ret = atomic_dec_and_test(&task->tk_count);
+	if (waitqueue_active(wq))
+		__wake_up_locked_key(wq, TASK_NORMAL, &k);
+	spin_unlock_irqrestore(&wq->lock, flags);
+	return ret;
 }
 
 /*
  * Allow callers to wait for completion of an RPC call
+ *
+ * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
+ * to enforce taking of the wq->lock and hence avoid races with
+ * rpc_complete_task().
  */
 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
 {
 	if (action == NULL)
 		action = rpc_wait_bit_killable;
-	return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
+	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
 			action, TASK_KILLABLE);
 }
 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
@@ -857,34 +871,67 @@
 	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
 }
 
-void rpc_put_task(struct rpc_task *task)
+static void rpc_release_resources_task(struct rpc_task *task)
 {
-	if (!atomic_dec_and_test(&task->tk_count))
-		return;
-	/* Release resources */
 	if (task->tk_rqstp)
 		xprt_release(task);
 	if (task->tk_msg.rpc_cred)
 		put_rpccred(task->tk_msg.rpc_cred);
 	rpc_task_release_client(task);
-	if (task->tk_workqueue != NULL) {
+}
+
+static void rpc_final_put_task(struct rpc_task *task,
+		struct workqueue_struct *q)
+{
+	if (q != NULL) {
 		INIT_WORK(&task->u.tk_work, rpc_async_release);
-		queue_work(task->tk_workqueue, &task->u.tk_work);
+		queue_work(q, &task->u.tk_work);
 	} else
 		rpc_free_task(task);
 }
+
+static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
+{
+	if (atomic_dec_and_test(&task->tk_count)) {
+		rpc_release_resources_task(task);
+		rpc_final_put_task(task, q);
+	}
+}
+
+void rpc_put_task(struct rpc_task *task)
+{
+	rpc_do_put_task(task, NULL);
+}
 EXPORT_SYMBOL_GPL(rpc_put_task);
 
+void rpc_put_task_async(struct rpc_task *task)
+{
+	rpc_do_put_task(task, task->tk_workqueue);
+}
+EXPORT_SYMBOL_GPL(rpc_put_task_async);
+
 static void rpc_release_task(struct rpc_task *task)
 {
 	dprintk("RPC: %5u release task\n", task->tk_pid);
 
 	BUG_ON (RPC_IS_QUEUED(task));
 
-	/* Wake up anyone who is waiting for task completion */
-	rpc_mark_complete_task(task);
+	rpc_release_resources_task(task);
 
-	rpc_put_task(task);
+	/*
+	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
+	 * so it should be safe to use task->tk_count as a test for whether
+	 * or not any other processes still hold references to our rpc_task.
+	 */
+	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
+		/* Wake up anyone who may be waiting for task completion */
+		if (!rpc_complete_task(task))
+			return;
+	} else {
+		if (!atomic_dec_and_test(&task->tk_count))
+			return;
+	}
+	rpc_final_put_task(task, task->tk_workqueue);
 }
 
 int rpciod_up(void)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6359c42..08e05a8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -488,10 +488,6 @@
 	if (svc_serv_is_pooled(serv))
 		svc_pool_map_put();
 
-#if defined(CONFIG_NFS_V4_1)
-	svc_sock_destroy(serv->bc_xprt);
-#endif /* CONFIG_NFS_V4_1 */
-
 	svc_unregister(serv);
 	kfree(serv->sv_pools);
 	kfree(serv);
@@ -1005,6 +1001,7 @@
 	rqstp->rq_splice_ok = 1;
 	/* Will be turned off only when NFSv4 Sessions are used */
 	rqstp->rq_usedeferral = 1;
+	rqstp->rq_dropme = false;
 
 	/* Setup reply header */
 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1106,7 +1103,7 @@
 		*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
 
 		/* Encode reply */
-		if (*statp == rpc_drop_reply) {
+		if (rqstp->rq_dropme) {
 			if (procp->pc_release)
 				procp->pc_release(rqstp, NULL, rqstp->rq_resp);
 			goto dropit;
@@ -1147,7 +1144,6 @@
  dropit:
 	svc_authorise(rqstp);	/* doesn't hurt to call this twice */
 	dprintk("svc: svc_process dropit\n");
-	svc_drop(rqstp);
 	return 0;
 
 err_short_len:
@@ -1218,7 +1214,6 @@
 	struct kvec		*resv = &rqstp->rq_res.head[0];
 	struct svc_serv		*serv = rqstp->rq_server;
 	u32			dir;
-	int			error;
 
 	/*
 	 * Setup response xdr_buf.
@@ -1246,11 +1241,13 @@
 		return 0;
 	}
 
-	error = svc_process_common(rqstp, argv, resv);
-	if (error <= 0)
-		return error;
-
-	return svc_send(rqstp);
+	/* Returns 1 for send, 0 for drop */
+	if (svc_process_common(rqstp, argv, resv))
+		return svc_send(rqstp);
+	else {
+		svc_drop(rqstp);
+		return 0;
+	}
 }
 
 #if defined(CONFIG_NFS_V4_1)
@@ -1264,10 +1261,9 @@
 {
 	struct kvec	*argv = &rqstp->rq_arg.head[0];
 	struct kvec	*resv = &rqstp->rq_res.head[0];
-	int 		error;
 
 	/* Build the svc_rqst used by the common processing routine */
-	rqstp->rq_xprt = serv->bc_xprt;
+	rqstp->rq_xprt = serv->sv_bc_xprt;
 	rqstp->rq_xid = req->rq_xid;
 	rqstp->rq_prot = req->rq_xprt->prot;
 	rqstp->rq_server = serv;
@@ -1292,12 +1288,15 @@
 	svc_getu32(argv);	/* XID */
 	svc_getnl(argv);	/* CALLDIR */
 
-	error = svc_process_common(rqstp, argv, resv);
-	if (error <= 0)
-		return error;
-
-	memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
-	return bc_send(req);
+	/* Returns 1 for send, 0 for drop */
+	if (svc_process_common(rqstp, argv, resv)) {
+		memcpy(&req->rq_snd_buf, &rqstp->rq_res,
+						sizeof(req->rq_snd_buf));
+		return bc_send(req);
+	} else {
+		/* Nothing to do to drop request */
+		return 0;
+	}
 }
 EXPORT_SYMBOL(bc_svc_process);
 #endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3f2c555..ab86b79 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -13,6 +13,7 @@
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/svc_xprt.h>
 #include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
 
 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
 
@@ -128,6 +129,9 @@
 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
 		svcauth_unix_info_release(xprt);
 	put_net(xprt->xpt_net);
+	/* See comment on corresponding get in xs_setup_bc_tcp(): */
+	if (xprt->xpt_bc_xprt)
+		xprt_put(xprt->xpt_bc_xprt);
 	xprt->xpt_ops->xpo_free(xprt);
 	module_put(owner);
 }
@@ -303,6 +307,15 @@
 	list_del(&rqstp->rq_list);
 }
 
+static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
+{
+	if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
+		return true;
+	if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
+		return xprt->xpt_ops->xpo_has_wspace(xprt);
+	return false;
+}
+
 /*
  * Queue up a transport with data pending. If there are idle nfsd
  * processes, wake 'em up.
@@ -315,8 +328,7 @@
 	struct svc_rqst	*rqstp;
 	int cpu;
 
-	if (!(xprt->xpt_flags &
-	      ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
+	if (!svc_xprt_has_something_to_do(xprt))
 		return;
 
 	cpu = get_cpu();
@@ -343,28 +355,7 @@
 		dprintk("svc: transport %p busy, not enqueued\n", xprt);
 		goto out_unlock;
 	}
-	BUG_ON(xprt->xpt_pool != NULL);
-	xprt->xpt_pool = pool;
 
-	/* Handle pending connection */
-	if (test_bit(XPT_CONN, &xprt->xpt_flags))
-		goto process;
-
-	/* Handle close in-progress */
-	if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
-		goto process;
-
-	/* Check if we have space to reply to a request */
-	if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
-		/* Don't enqueue while not enough space for reply */
-		dprintk("svc: no write space, transport %p  not enqueued\n",
-			xprt);
-		xprt->xpt_pool = NULL;
-		clear_bit(XPT_BUSY, &xprt->xpt_flags);
-		goto out_unlock;
-	}
-
- process:
 	if (!list_empty(&pool->sp_threads)) {
 		rqstp = list_entry(pool->sp_threads.next,
 				   struct svc_rqst,
@@ -381,13 +372,11 @@
 		rqstp->rq_reserved = serv->sv_max_mesg;
 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
 		pool->sp_stats.threads_woken++;
-		BUG_ON(xprt->xpt_pool != pool);
 		wake_up(&rqstp->rq_wait);
 	} else {
 		dprintk("svc: transport %p put into queue\n", xprt);
 		list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
 		pool->sp_stats.sockets_queued++;
-		BUG_ON(xprt->xpt_pool != pool);
 	}
 
 out_unlock:
@@ -426,7 +415,6 @@
 void svc_xprt_received(struct svc_xprt *xprt)
 {
 	BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
-	xprt->xpt_pool = NULL;
 	/* As soon as we clear busy, the xprt could be closed and
 	 * 'put', so we need a reference to call svc_xprt_enqueue with:
 	 */
@@ -722,7 +710,10 @@
 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
 		dprintk("svc_recv: found XPT_CLOSE\n");
 		svc_delete_xprt(xprt);
-	} else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+		/* Leave XPT_BUSY set on the dead xprt: */
+		goto out;
+	}
+	if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
 		struct svc_xprt *newxpt;
 		newxpt = xprt->xpt_ops->xpo_accept(xprt);
 		if (newxpt) {
@@ -747,28 +738,23 @@
 			spin_unlock_bh(&serv->sv_lock);
 			svc_xprt_received(newxpt);
 		}
-		svc_xprt_received(xprt);
-	} else {
+	} else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
 		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
 			rqstp, pool->sp_id, xprt,
 			atomic_read(&xprt->xpt_ref.refcount));
 		rqstp->rq_deferred = svc_deferred_dequeue(xprt);
-		if (rqstp->rq_deferred) {
-			svc_xprt_received(xprt);
+		if (rqstp->rq_deferred)
 			len = svc_deferred_recv(rqstp);
-		} else {
+		else
 			len = xprt->xpt_ops->xpo_recvfrom(rqstp);
-			svc_xprt_received(xprt);
-		}
 		dprintk("svc: got len=%d\n", len);
 	}
+	svc_xprt_received(xprt);
 
 	/* No data, incomplete (TCP) read, or accept() */
-	if (len == 0 || len == -EAGAIN) {
-		rqstp->rq_res.len = 0;
-		svc_xprt_release(rqstp);
-		return -EAGAIN;
-	}
+	if (len == 0 || len == -EAGAIN)
+		goto out;
+
 	clear_bit(XPT_OLD, &xprt->xpt_flags);
 
 	rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
@@ -777,6 +763,10 @@
 	if (serv->sv_stats)
 		serv->sv_stats->netcnt++;
 	return len;
+out:
+	rqstp->rq_res.len = 0;
+	svc_xprt_release(rqstp);
+	return -EAGAIN;
 }
 EXPORT_SYMBOL_GPL(svc_recv);
 
@@ -935,7 +925,12 @@
 	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
 		/* someone else will have to effect the close */
 		return;
-
+	/*
+	 * We expect svc_close_xprt() to work even when no threads are
+	 * running (e.g., while configuring the server before starting
+	 * any threads), so if the transport isn't busy, we delete
+	 * it ourself:
+	 */
 	svc_delete_xprt(xprt);
 }
 EXPORT_SYMBOL_GPL(svc_close_xprt);
@@ -945,16 +940,16 @@
 	struct svc_xprt *xprt;
 	struct svc_xprt *tmp;
 
+	/*
+	 * The server is shutting down, and no more threads are running.
+	 * svc_xprt_enqueue() might still be running, but at worst it
+	 * will re-add the xprt to sp_sockets, which will soon get
+	 * freed.  So we don't bother with any more locking, and don't
+	 * leave the close to the (nonexistent) server threads:
+	 */
 	list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
-		if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
-			/* Waiting to be processed, but no threads left,
-			 * So just remove it from the waiting list
-			 */
-			list_del_init(&xprt->xpt_ready);
-			clear_bit(XPT_BUSY, &xprt->xpt_flags);
-		}
-		svc_close_xprt(xprt);
+		svc_delete_xprt(xprt);
 	}
 }
 
@@ -1028,6 +1023,7 @@
 	}
 	svc_xprt_get(rqstp->rq_xprt);
 	dr->xprt = rqstp->rq_xprt;
+	rqstp->rq_dropme = true;
 
 	dr->handle.revisit = svc_revisit;
 	return &dr->handle;
@@ -1065,14 +1061,13 @@
 	if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
 		return NULL;
 	spin_lock(&xprt->xpt_lock);
-	clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
 	if (!list_empty(&xprt->xpt_deferred)) {
 		dr = list_entry(xprt->xpt_deferred.next,
 				struct svc_deferred_req,
 				handle.recent);
 		list_del_init(&dr->handle.recent);
-		set_bit(XPT_DEFERRED, &xprt->xpt_flags);
-	}
+	} else
+		clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
 	spin_unlock(&xprt->xpt_lock);
 	return dr;
 }
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 4e9393c..7963569 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -118,7 +118,6 @@
 
 #define	DN_HASHBITS	6
 #define	DN_HASHMAX	(1<<DN_HASHBITS)
-#define	DN_HASHMASK	(DN_HASHMAX-1)
 
 static struct hlist_head	auth_domain_table[DN_HASHMAX];
 static spinlock_t	auth_domain_lock =
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 560677d..30916b0 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -30,7 +30,9 @@
 
 struct unix_domain {
 	struct auth_domain	h;
+#ifdef CONFIG_NFSD_DEPRECATED
 	int	addr_changes;
+#endif /* CONFIG_NFSD_DEPRECATED */
 	/* other stuff later */
 };
 
@@ -64,7 +66,9 @@
 			return NULL;
 		}
 		new->h.flavour = &svcauth_unix;
+#ifdef CONFIG_NFSD_DEPRECATED
 		new->addr_changes = 0;
+#endif /* CONFIG_NFSD_DEPRECATED */
 		rv = auth_domain_lookup(name, &new->h);
 	}
 }
@@ -85,14 +89,15 @@
  */
 #define	IP_HASHBITS	8
 #define	IP_HASHMAX	(1<<IP_HASHBITS)
-#define	IP_HASHMASK	(IP_HASHMAX-1)
 
 struct ip_map {
 	struct cache_head	h;
 	char			m_class[8]; /* e.g. "nfsd" */
 	struct in6_addr		m_addr;
 	struct unix_domain	*m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
 	int			m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
 };
 
 static void ip_map_put(struct kref *kref)
@@ -146,7 +151,9 @@
 
 	kref_get(&item->m_client->h.ref);
 	new->m_client = item->m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
 	new->m_add_change = item->m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
 }
 static struct cache_head *ip_map_alloc(void)
 {
@@ -331,6 +338,7 @@
 	ip.h.flags = 0;
 	if (!udom)
 		set_bit(CACHE_NEGATIVE, &ip.h.flags);
+#ifdef CONFIG_NFSD_DEPRECATED
 	else {
 		ip.m_add_change = udom->addr_changes;
 		/* if this is from the legacy set_client system call,
@@ -339,6 +347,7 @@
 		if (expiry == NEVER)
 			ip.m_add_change++;
 	}
+#endif /* CONFIG_NFSD_DEPRECATED */
 	ip.h.expiry_time = expiry;
 	ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
 				 hash_str(ipm->m_class, IP_HASHBITS) ^
@@ -358,6 +367,7 @@
 	return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
 }
 
+#ifdef CONFIG_NFSD_DEPRECATED
 int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom)
 {
 	struct unix_domain *udom;
@@ -402,8 +412,7 @@
 		return NULL;
 
 	if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
-		if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
-			auth_domain_put(&ipm->m_client->h);
+		sunrpc_invalidate(&ipm->h, sn->ip_map_cache);
 		rv = NULL;
 	} else {
 		rv = &ipm->m_client->h;
@@ -413,6 +422,7 @@
 	return rv;
 }
 EXPORT_SYMBOL_GPL(auth_unix_lookup);
+#endif /* CONFIG_NFSD_DEPRECATED */
 
 void svcauth_unix_purge(void)
 {
@@ -497,7 +507,6 @@
  */
 #define	GID_HASHBITS	8
 #define	GID_HASHMAX	(1<<GID_HASHBITS)
-#define	GID_HASHMASK	(GID_HASHMAX - 1)
 
 struct unix_gid {
 	struct cache_head	h;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 07919e1..d802e94 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -66,6 +66,13 @@
 static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
 					  struct net *, struct sockaddr *,
 					  int, int);
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+					     struct net *, struct sockaddr *,
+					     int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+#endif /* CONFIG_NFS_V4_1 */
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 static struct lock_class_key svc_key[2];
 static struct lock_class_key svc_slock_key[2];
@@ -324,19 +331,21 @@
 			len = onelen;
 			break;
 		}
-		if (toclose && strcmp(toclose, buf + len) == 0)
+		if (toclose && strcmp(toclose, buf + len) == 0) {
 			closesk = svsk;
-		else
+			svc_xprt_get(&closesk->sk_xprt);
+		} else
 			len += onelen;
 	}
 	spin_unlock_bh(&serv->sv_lock);
 
-	if (closesk)
+	if (closesk) {
 		/* Should unregister with portmap, but you cannot
 		 * unregister just one protocol...
 		 */
 		svc_close_xprt(&closesk->sk_xprt);
-	else if (toclose)
+		svc_xprt_put(&closesk->sk_xprt);
+	} else if (toclose)
 		return -ENOENT;
 	return len;
 }
@@ -985,15 +994,17 @@
 		vec[0] = rqstp->rq_arg.head[0];
 	} else {
 		/* REPLY */
-		if (svsk->sk_bc_xprt)
-			req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
+		struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
+
+		if (bc_xprt)
+			req = xprt_lookup_rqst(bc_xprt, xid);
 
 		if (!req) {
 			printk(KERN_NOTICE
 				"%s: Got unrecognized reply: "
-				"calldir 0x%x sk_bc_xprt %p xid %08x\n",
+				"calldir 0x%x xpt_bc_xprt %p xid %08x\n",
 				__func__, ntohl(calldir),
-				svsk->sk_bc_xprt, xid);
+				bc_xprt, xid);
 			vec[0] = rqstp->rq_arg.head[0];
 			goto out;
 		}
@@ -1184,6 +1195,57 @@
 	return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
 }
 
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+					     struct net *, struct sockaddr *,
+					     int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+
+static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv,
+				       struct net *net,
+				       struct sockaddr *sa, int salen,
+				       int flags)
+{
+	return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
+}
+
+static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt)
+{
+}
+
+static struct svc_xprt_ops svc_tcp_bc_ops = {
+	.xpo_create = svc_bc_tcp_create,
+	.xpo_detach = svc_bc_tcp_sock_detach,
+	.xpo_free = svc_bc_sock_free,
+	.xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
+};
+
+static struct svc_xprt_class svc_tcp_bc_class = {
+	.xcl_name = "tcp-bc",
+	.xcl_owner = THIS_MODULE,
+	.xcl_ops = &svc_tcp_bc_ops,
+	.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+};
+
+static void svc_init_bc_xprt_sock(void)
+{
+	svc_reg_xprt_class(&svc_tcp_bc_class);
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+	svc_unreg_xprt_class(&svc_tcp_bc_class);
+}
+#else /* CONFIG_NFS_V4_1 */
+static void svc_init_bc_xprt_sock(void)
+{
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
 static struct svc_xprt_ops svc_tcp_ops = {
 	.xpo_create = svc_tcp_create,
 	.xpo_recvfrom = svc_tcp_recvfrom,
@@ -1207,12 +1269,14 @@
 {
 	svc_reg_xprt_class(&svc_tcp_class);
 	svc_reg_xprt_class(&svc_udp_class);
+	svc_init_bc_xprt_sock();
 }
 
 void svc_cleanup_xprt_sock(void)
 {
 	svc_unreg_xprt_class(&svc_tcp_class);
 	svc_unreg_xprt_class(&svc_udp_class);
+	svc_cleanup_bc_xprt_sock();
 }
 
 static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
@@ -1509,41 +1573,43 @@
 	kfree(svsk);
 }
 
+#if defined(CONFIG_NFS_V4_1)
 /*
- * Create a svc_xprt.
- *
- * For internal use only (e.g. nfsv4.1 backchannel).
- * Callers should typically use the xpo_create() method.
+ * Create a back channel svc_xprt which shares the fore channel socket.
  */
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
+					     int protocol,
+					     struct net *net,
+					     struct sockaddr *sin, int len,
+					     int flags)
 {
 	struct svc_sock *svsk;
-	struct svc_xprt *xprt = NULL;
+	struct svc_xprt *xprt;
 
-	dprintk("svc: %s\n", __func__);
+	if (protocol != IPPROTO_TCP) {
+		printk(KERN_WARNING "svc: only TCP sockets"
+			" supported on shared back channel\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
 	if (!svsk)
-		goto out;
+		return ERR_PTR(-ENOMEM);
 
 	xprt = &svsk->sk_xprt;
-	if (prot == IPPROTO_TCP)
-		svc_xprt_init(&svc_tcp_class, xprt, serv);
-	else if (prot == IPPROTO_UDP)
-		svc_xprt_init(&svc_udp_class, xprt, serv);
-	else
-		BUG();
-out:
-	dprintk("svc: %s return %p\n", __func__, xprt);
+	svc_xprt_init(&svc_tcp_bc_class, xprt, serv);
+
+	serv->sv_bc_xprt = xprt;
+
 	return xprt;
 }
-EXPORT_SYMBOL_GPL(svc_sock_create);
 
 /*
- * Destroy a svc_sock.
+ * Free a back channel svc_sock.
  */
-void svc_sock_destroy(struct svc_xprt *xprt)
+static void svc_bc_sock_free(struct svc_xprt *xprt)
 {
 	if (xprt)
 		kfree(container_of(xprt, struct svc_sock, sk_xprt));
 }
-EXPORT_SYMBOL_GPL(svc_sock_destroy);
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index cd9e841..679cd67 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -552,6 +552,74 @@
 }
 EXPORT_SYMBOL_GPL(xdr_write_pages);
 
+static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
+		__be32 *p, unsigned int len)
+{
+	if (len > iov->iov_len)
+		len = iov->iov_len;
+	if (p == NULL)
+		p = (__be32*)iov->iov_base;
+	xdr->p = p;
+	xdr->end = (__be32*)(iov->iov_base + len);
+	xdr->iov = iov;
+	xdr->page_ptr = NULL;
+}
+
+static int xdr_set_page_base(struct xdr_stream *xdr,
+		unsigned int base, unsigned int len)
+{
+	unsigned int pgnr;
+	unsigned int maxlen;
+	unsigned int pgoff;
+	unsigned int pgend;
+	void *kaddr;
+
+	maxlen = xdr->buf->page_len;
+	if (base >= maxlen)
+		return -EINVAL;
+	maxlen -= base;
+	if (len > maxlen)
+		len = maxlen;
+
+	base += xdr->buf->page_base;
+
+	pgnr = base >> PAGE_SHIFT;
+	xdr->page_ptr = &xdr->buf->pages[pgnr];
+	kaddr = page_address(*xdr->page_ptr);
+
+	pgoff = base & ~PAGE_MASK;
+	xdr->p = (__be32*)(kaddr + pgoff);
+
+	pgend = pgoff + len;
+	if (pgend > PAGE_SIZE)
+		pgend = PAGE_SIZE;
+	xdr->end = (__be32*)(kaddr + pgend);
+	xdr->iov = NULL;
+	return 0;
+}
+
+static void xdr_set_next_page(struct xdr_stream *xdr)
+{
+	unsigned int newbase;
+
+	newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
+	newbase -= xdr->buf->page_base;
+
+	if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
+		xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+}
+
+static bool xdr_set_next_buffer(struct xdr_stream *xdr)
+{
+	if (xdr->page_ptr != NULL)
+		xdr_set_next_page(xdr);
+	else if (xdr->iov == xdr->buf->head) {
+		if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
+			xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+	}
+	return xdr->p != xdr->end;
+}
+
 /**
  * xdr_init_decode - Initialize an xdr_stream for decoding data.
  * @xdr: pointer to xdr_stream struct
@@ -560,41 +628,67 @@
  */
 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 {
-	struct kvec *iov = buf->head;
-	unsigned int len = iov->iov_len;
-
-	if (len > buf->len)
-		len = buf->len;
 	xdr->buf = buf;
-	xdr->iov = iov;
-	xdr->p = p;
-	xdr->end = (__be32 *)((char *)iov->iov_base + len);
+	xdr->scratch.iov_base = NULL;
+	xdr->scratch.iov_len = 0;
+	if (buf->head[0].iov_len != 0)
+		xdr_set_iov(xdr, buf->head, p, buf->len);
+	else if (buf->page_len != 0)
+		xdr_set_page_base(xdr, 0, buf->len);
 }
 EXPORT_SYMBOL_GPL(xdr_init_decode);
 
-/**
- * xdr_inline_peek - Allow read-ahead in the XDR data stream
- * @xdr: pointer to xdr_stream struct
- * @nbytes: number of bytes of data to decode
- *
- * Check if the input buffer is long enough to enable us to decode
- * 'nbytes' more bytes of data starting at the current position.
- * If so return the current pointer without updating the current
- * pointer position.
- */
-__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
+static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 {
 	__be32 *p = xdr->p;
 	__be32 *q = p + XDR_QUADLEN(nbytes);
 
 	if (unlikely(q > xdr->end || q < p))
 		return NULL;
+	xdr->p = q;
 	return p;
 }
-EXPORT_SYMBOL_GPL(xdr_inline_peek);
 
 /**
- * xdr_inline_decode - Retrieve non-page XDR data to decode
+ * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to an empty buffer
+ * @buflen: size of 'buf'
+ *
+ * The scratch buffer is used when decoding from an array of pages.
+ * If an xdr_inline_decode() call spans across page boundaries, then
+ * we copy the data into the scratch buffer in order to allow linear
+ * access.
+ */
+void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
+{
+	xdr->scratch.iov_base = buf;
+	xdr->scratch.iov_len = buflen;
+}
+EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
+
+static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
+{
+	__be32 *p;
+	void *cpdest = xdr->scratch.iov_base;
+	size_t cplen = (char *)xdr->end - (char *)xdr->p;
+
+	if (nbytes > xdr->scratch.iov_len)
+		return NULL;
+	memcpy(cpdest, xdr->p, cplen);
+	cpdest += cplen;
+	nbytes -= cplen;
+	if (!xdr_set_next_buffer(xdr))
+		return NULL;
+	p = __xdr_inline_decode(xdr, nbytes);
+	if (p == NULL)
+		return NULL;
+	memcpy(cpdest, p, nbytes);
+	return xdr->scratch.iov_base;
+}
+
+/**
+ * xdr_inline_decode - Retrieve XDR data to decode
  * @xdr: pointer to xdr_stream struct
  * @nbytes: number of bytes of data to decode
  *
@@ -605,13 +699,16 @@
  */
 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 {
-	__be32 *p = xdr->p;
-	__be32 *q = p + XDR_QUADLEN(nbytes);
+	__be32 *p;
 
-	if (unlikely(q > xdr->end || q < p))
+	if (nbytes == 0)
+		return xdr->p;
+	if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
 		return NULL;
-	xdr->p = q;
-	return p;
+	p = __xdr_inline_decode(xdr, nbytes);
+	if (p != NULL)
+		return p;
+	return xdr_copy_to_scratch(xdr, nbytes);
 }
 EXPORT_SYMBOL_GPL(xdr_inline_decode);
 
@@ -671,16 +768,12 @@
  */
 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
 {
-	char * kaddr = page_address(xdr->buf->pages[0]);
 	xdr_read_pages(xdr, len);
 	/*
 	 * Position current pointer at beginning of tail, and
 	 * set remaining message length.
 	 */
-	if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
-		len = PAGE_CACHE_SIZE - xdr->buf->page_base;
-	xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
-	xdr->end = (__be32 *)((char *)xdr->p + len);
+	xdr_set_page_base(xdr, 0, len);
 }
 EXPORT_SYMBOL_GPL(xdr_enter_page);
 
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 4c8f18a..856274d 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -965,6 +965,7 @@
 	xprt = kzalloc(size, GFP_KERNEL);
 	if (xprt == NULL)
 		goto out;
+	kref_init(&xprt->kref);
 
 	xprt->max_reqs = max_req;
 	xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
@@ -1101,8 +1102,10 @@
 				-PTR_ERR(xprt));
 		return xprt;
 	}
+	if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
+		/* ->setup returned a pre-initialized xprt: */
+		return xprt;
 
-	kref_init(&xprt->kref);
 	spin_lock_init(&xprt->transport_lock);
 	spin_lock_init(&xprt->reserve_lock);
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 9df1ead..1a10dcd 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -1335,6 +1335,7 @@
 					    p, 0, length, DMA_FROM_DEVICE);
 	if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
 		put_page(p);
+		svc_rdma_put_context(ctxt, 1);
 		return;
 	}
 	atomic_inc(&xprt->sc_dma_used);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 96549df..be96d42 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1631,7 +1631,8 @@
 	}
 	xs_reclassify_socket(family, sock);
 
-	if (xs_bind(transport, sock)) {
+	err = xs_bind(transport, sock);
+	if (err) {
 		sock_release(sock);
 		goto out;
 	}
@@ -2359,6 +2360,15 @@
 	struct svc_sock *bc_sock;
 	struct rpc_xprt *ret;
 
+	if (args->bc_xprt->xpt_bc_xprt) {
+		/*
+		 * This server connection already has a backchannel
+		 * export; we can't create a new one, as we wouldn't be
+		 * able to match replies based on xid any more.  So,
+		 * reuse the already-existing one:
+		 */
+		 return args->bc_xprt->xpt_bc_xprt;
+	}
 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
 	if (IS_ERR(xprt))
 		return xprt;
@@ -2375,16 +2385,6 @@
 	xprt->reestablish_timeout = 0;
 	xprt->idle_timeout = 0;
 
-	/*
-	 * The backchannel uses the same socket connection as the
-	 * forechannel
-	 */
-	xprt->bc_xprt = args->bc_xprt;
-	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
-	bc_sock->sk_bc_xprt = xprt;
-	transport->sock = bc_sock->sk_sock;
-	transport->inet = bc_sock->sk_sk;
-
 	xprt->ops = &bc_tcp_ops;
 
 	switch (addr->sa_family) {
@@ -2407,6 +2407,20 @@
 			xprt->address_strings[RPC_DISPLAY_PROTO]);
 
 	/*
+	 * Once we've associated a backchannel xprt with a connection,
+	 * we want to keep it around as long as long as the connection
+	 * lasts, in case we need to start using it for a backchannel
+	 * again; this reference won't be dropped until bc_xprt is
+	 * destroyed.
+	 */
+	xprt_get(xprt);
+	args->bc_xprt->xpt_bc_xprt = xprt;
+	xprt->bc_xprt = args->bc_xprt;
+	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
+	transport->sock = bc_sock->sk_sock;
+	transport->inet = bc_sock->sk_sk;
+
+	/*
 	 * Since we don't want connections for the backchannel, we set
 	 * the xprt status to connected
 	 */
@@ -2415,6 +2429,7 @@
 
 	if (try_module_get(THIS_MODULE))
 		return xprt;
+	xprt_put(xprt);
 	ret = ERR_PTR(-EINVAL);
 out_err:
 	xprt_free(xprt);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dd419d2..437a99e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1724,7 +1724,11 @@
 
 	msg->msg_namelen = 0;
 
-	mutex_lock(&u->readlock);
+	err = mutex_lock_interruptible(&u->readlock);
+	if (err) {
+		err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+		goto out;
+	}
 
 	skb = skb_recv_datagram(sk, flags, noblock, &err);
 	if (!skb) {
@@ -1864,7 +1868,11 @@
 		memset(&tmp_scm, 0, sizeof(tmp_scm));
 	}
 
-	mutex_lock(&u->readlock);
+	err = mutex_lock_interruptible(&u->readlock);
+	if (err) {
+		err = sock_intr_errno(timeo);
+		goto out;
+	}
 
 	do {
 		int chunk;
@@ -1895,11 +1903,12 @@
 
 			timeo = unix_stream_data_wait(sk, timeo);
 
-			if (signal_pending(current)) {
+			if (signal_pending(current)
+			    ||  mutex_lock_interruptible(&u->readlock)) {
 				err = sock_intr_errno(timeo);
 				goto out;
 			}
-			mutex_lock(&u->readlock);
+
 			continue;
  unlock:
 			unix_state_unlock(sk);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index d0ee290..1f1ef70 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -95,7 +95,7 @@
 	  If unsure, say N.
 
 config CFG80211_INTERNAL_REGDB
-	bool "use statically compiled regulatory rules database" if EMBEDDED
+	bool "use statically compiled regulatory rules database" if EXPERT
 	default n
 	depends on CFG80211
 	---help---
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3e5dbd4..d112f03 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -802,11 +802,11 @@
 			return freq;
 		if (freq == 0)
 			return -EINVAL;
-		wdev_lock(wdev);
 		mutex_lock(&rdev->devlist_mtx);
+		wdev_lock(wdev);
 		err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
-		mutex_unlock(&rdev->devlist_mtx);
 		wdev_unlock(wdev);
+		mutex_unlock(&rdev->devlist_mtx);
 		return err;
 	default:
 		return -EOPNOTSUPP;
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 55187c8..4062075 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -27,9 +27,19 @@
 #include <net/sock.h>
 #include <net/x25.h>
 
-/*
- * Parse a set of facilities into the facilities structures. Unrecognised
- *	facilities are written to the debug log file.
+/**
+ * x25_parse_facilities - Parse facilities from skb into the facilities structs
+ *
+ * @skb: sk_buff to parse
+ * @facilities: Regular facilites, updated as facilities are found
+ * @dte_facs: ITU DTE facilities, updated as DTE facilities are found
+ * @vc_fac_mask: mask is updated with all facilities found
+ *
+ * Return codes:
+ *  -1 - Parsing error, caller should drop call and clean up
+ *   0 - Parse OK, this skb has no facilities
+ *  >0 - Parse OK, returns the length of the facilities header
+ *
  */
 int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
 		struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
@@ -62,7 +72,7 @@
 		switch (*p & X25_FAC_CLASS_MASK) {
 		case X25_FAC_CLASS_A:
 			if (len < 2)
-				return 0;
+				return -1;
 			switch (*p) {
 			case X25_FAC_REVERSE:
 				if((p[1] & 0x81) == 0x81) {
@@ -107,7 +117,7 @@
 			break;
 		case X25_FAC_CLASS_B:
 			if (len < 3)
-				return 0;
+				return -1;
 			switch (*p) {
 			case X25_FAC_PACKET_SIZE:
 				facilities->pacsize_in  = p[1];
@@ -130,7 +140,7 @@
 			break;
 		case X25_FAC_CLASS_C:
 			if (len < 4)
-				return 0;
+				return -1;
 			printk(KERN_DEBUG "X.25: unknown facility %02X, "
 			       "values %02X, %02X, %02X\n",
 			       p[0], p[1], p[2], p[3]);
@@ -139,18 +149,18 @@
 			break;
 		case X25_FAC_CLASS_D:
 			if (len < p[1] + 2)
-				return 0;
+				return -1;
 			switch (*p) {
 			case X25_FAC_CALLING_AE:
 				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
-					return 0;
+					return -1;
 				dte_facs->calling_len = p[2];
 				memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
 				*vc_fac_mask |= X25_MASK_CALLING_AE;
 				break;
 			case X25_FAC_CALLED_AE:
 				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
-					return 0;
+					return -1;
 				dte_facs->called_len = p[2];
 				memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
 				*vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index f729f02..15de65f 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -91,10 +91,10 @@
 {
 	struct x25_address source_addr, dest_addr;
 	int len;
+	struct x25_sock *x25 = x25_sk(sk);
 
 	switch (frametype) {
 		case X25_CALL_ACCEPTED: {
-			struct x25_sock *x25 = x25_sk(sk);
 
 			x25_stop_timer(sk);
 			x25->condition = 0x00;
@@ -113,14 +113,16 @@
 						&dest_addr);
 			if (len > 0)
 				skb_pull(skb, len);
+			else if (len < 0)
+				goto out_clear;
 
 			len = x25_parse_facilities(skb, &x25->facilities,
 						&x25->dte_facilities,
 						&x25->vc_facil_mask);
 			if (len > 0)
 				skb_pull(skb, len);
-			else
-				return -1;
+			else if (len < 0)
+				goto out_clear;
 			/*
 			 *	Copy any Call User Data.
 			 */
@@ -144,6 +146,12 @@
 	}
 
 	return 0;
+
+out_clear:
+	x25_write_internal(sk, X25_CLEAR_REQUEST);
+	x25->state = X25_STATE_2;
+	x25_start_t23timer(sk);
+	return 0;
 }
 
 /*
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 4cbc942..2130692 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -396,9 +396,12 @@
 	write_lock_bh(&x25_neigh_list_lock);
 
 	list_for_each_safe(entry, tmp, &x25_neigh_list) {
+		struct net_device *dev;
+
 		nb = list_entry(entry, struct x25_neigh, node);
+		dev = nb->dev;
 		__x25_remove_neigh(nb);
-		dev_put(nb->dev);
+		dev_put(dev);
 	}
 	write_unlock_bh(&x25_neigh_list_lock);
 }
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8b3ef40..6459588 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1340,10 +1340,13 @@
 	default:
 		BUG();
 	}
-	xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
+	xdst = dst_alloc(dst_ops);
 	xfrm_policy_put_afinfo(afinfo);
 
-	xdst->flo.ops = &xfrm_bundle_fc_ops;
+	if (likely(xdst))
+		xdst->flo.ops = &xfrm_bundle_fc_ops;
+	else
+		xdst = ERR_PTR(-ENOBUFS);
 
 	return xdst;
 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8eb8895..6129196 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -26,6 +26,7 @@
 #include <net/sock.h>
 #include <net/xfrm.h>
 #include <net/netlink.h>
+#include <net/ah.h>
 #include <asm/uaccess.h>
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 #include <linux/in6.h>
@@ -302,7 +303,8 @@
 	algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
 	if (!algo)
 		return -ENOSYS;
-	if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
+	if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
+	    ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
 		return -EINVAL;
 	*props = algo->desc.sadb_alg_id;
 
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 396da16..1c702ca 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -262,6 +262,34 @@
 	lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
 	(rm -f $@ ; false)
 
+# XZ
+# ---------------------------------------------------------------------------
+# Use xzkern to compress the kernel image and xzmisc to compress other things.
+#
+# xzkern uses a big LZMA2 dictionary since it doesn't increase memory usage
+# of the kernel decompressor. A BCJ filter is used if it is available for
+# the target architecture. xzkern also appends uncompressed size of the data
+# using size_append. The .xz format has the size information available at
+# the end of the file too, but it's in more complex format and it's good to
+# avoid changing the part of the boot code that reads the uncompressed size.
+# Note that the bytes added by size_append will make the xz tool think that
+# the file is corrupt. This is expected.
+#
+# xzmisc doesn't use size_append, so it can be used to create normal .xz
+# files. xzmisc uses smaller LZMA2 dictionary than xzkern, because a very
+# big dictionary would increase the memory usage too much in the multi-call
+# decompression mode. A BCJ filter isn't used either.
+quiet_cmd_xzkern = XZKERN  $@
+cmd_xzkern = (cat $(filter-out FORCE,$^) | \
+	sh $(srctree)/scripts/xz_wrap.sh && \
+	$(call size_append, $(filter-out FORCE,$^))) > $@ || \
+	(rm -f $@ ; false)
+
+quiet_cmd_xzmisc = XZMISC  $@
+cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
+	xz --check=crc32 --lzma2=dict=1MiB) > $@ || \
+	(rm -f $@ ; false)
+
 # misc stuff
 # ---------------------------------------------------------------------------
 quote:="
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index c9a16ab..291228e 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -309,12 +309,18 @@
 	close(fd);
 }
 
+/*
+ * Important: The below generated source_foo.o and deps_foo.o variable
+ * assignments are parsed not only by make, but also by the rather simple
+ * parser in scripts/mod/sumversion.c.
+ */
 static void parse_dep_file(void *map, size_t len)
 {
 	char *m = map;
 	char *end = m + len;
 	char *p;
 	char s[PATH_MAX];
+	int first;
 
 	p = strchr(m, ':');
 	if (!p) {
@@ -322,11 +328,11 @@
 		exit(1);
 	}
 	memcpy(s, m, p-m); s[p-m] = 0;
-	printf("deps_%s := \\\n", target);
 	m = p+1;
 
 	clear_config();
 
+	first = 1;
 	while (m < end) {
 		while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
 			m++;
@@ -340,9 +346,20 @@
 		if (strrcmp(s, "include/generated/autoconf.h") &&
 		    strrcmp(s, "arch/um/include/uml-config.h") &&
 		    strrcmp(s, ".ver")) {
-			printf("  %s \\\n", s);
+			/*
+			 * Do not list the source file as dependency, so that
+			 * kbuild is not confused if a .c file is rewritten
+			 * into .S or vice versa. Storing it in source_* is
+			 * needed for modpost to compute srcversions.
+			 */
+			if (first) {
+				printf("source_%s := %s\n\n", target, s);
+				printf("deps_%s := \\\n", target);
+			} else
+				printf("  %s \\\n", s);
 			do_config_file(s);
 		}
+		first = 0;
 		m = p + 1;
 	}
 	printf("\n%s: $(deps_%s)\n\n", target, target);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e3c7fc0..4c0383d 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -859,7 +859,7 @@
 				$av_preprocessor = 0;
 			}
 
-		} elsif ($cur =~ /^(\(\s*$Type\s*)\)/) {
+		} elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
 			print "CAST($1)\n" if ($dbg_values > 1);
 			push(@av_paren_type, $type);
 			$type = 'C';
@@ -2743,6 +2743,11 @@
 			WARN("plain inline is preferred over $1\n" . $herecurr);
 		}
 
+# Check for __attribute__ packed, prefer __packed
+		if ($line =~ /\b__attribute__\s*\(\s*\(.*\bpacked\b/) {
+			WARN("__packed is preferred over __attribute__((packed))\n" . $herecurr);
+		}
+
 # check for sizeof(&)
 		if ($line =~ /\bsizeof\s*\(\s*\&/) {
 			WARN("sizeof(& should be avoided\n" . $herecurr);
@@ -2785,10 +2790,15 @@
 		}
 
 # check for pointless casting of kmalloc return
-		if ($line =~ /\*\s*\)\s*k[czm]alloc\b/) {
+		if ($line =~ /\*\s*\)\s*[kv][czm]alloc(_node){0,1}\b/) {
 			WARN("unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr);
 		}
 
+# check for multiple semicolons
+		if ($line =~ /;\s*;\s*$/) {
+		    WARN("Statements terminations use 1 semicolon\n" . $herecurr);
+		}
+
 # check for gcc specific __FUNCTION__
 		if ($line =~ /__FUNCTION__/) {
 			WARN("__func__ should be used instead of gcc specific __FUNCTION__\n"  . $herecurr);
@@ -2892,6 +2902,11 @@
 				ERROR("lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr);
 			}
 		}
+
+		if ($line =~ /debugfs_create_file.*S_IWUGO/ ||
+		    $line =~ /DEVICE_ATTR.*S_IWUGO/ ) {
+			WARN("Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
+		}
 	}
 
 	# If we have no input at all, then there is nothing to report on
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 5958fff..55caecd 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -243,6 +243,8 @@
 		echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f"
 		echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
 		echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
+		echo "$output_file" | grep -q "\.xz$" && \
+				compr="xz --check=crc32 --lzma2=dict=1MiB"
 		echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
 		echo "$output_file" | grep -q "\.cpio$" && compr="cat"
 		shift
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index d21ec3a..139e0ff 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
 use strict;
 
 my $P = $0;
-my $V = '0.26-beta6';
+my $V = '0.26';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -40,7 +40,7 @@
 my $output_multiline = 1;
 my $output_separator = ", ";
 my $output_roles = 0;
-my $output_rolestats = 0;
+my $output_rolestats = 1;
 my $scm = 0;
 my $web = 0;
 my $subsystem = 0;
@@ -494,6 +494,40 @@
 
 exit($exit);
 
+sub range_is_maintained {
+    my ($start, $end) = @_;
+
+    for (my $i = $start; $i < $end; $i++) {
+	my $line = $typevalue[$i];
+	if ($line =~ m/^(\C):\s*(.*)/) {
+	    my $type = $1;
+	    my $value = $2;
+	    if ($type eq 'S') {
+		if ($value =~ /(maintain|support)/i) {
+		    return 1;
+		}
+	    }
+	}
+    }
+    return 0;
+}
+
+sub range_has_maintainer {
+    my ($start, $end) = @_;
+
+    for (my $i = $start; $i < $end; $i++) {
+	my $line = $typevalue[$i];
+	if ($line =~ m/^(\C):\s*(.*)/) {
+	    my $type = $1;
+	    my $value = $2;
+	    if ($type eq 'M') {
+		return 1;
+	    }
+	}
+    }
+    return 0;
+}
+
 sub get_maintainers {
     %email_hash_name = ();
     %email_hash_address = ();
@@ -556,7 +590,9 @@
 				my $file_pd = ($file  =~ tr@/@@);
 				$value_pd++ if (substr($value,-1,1) ne "/");
 				$value_pd = -1 if ($value =~ /^\.\*/);
-				if ($value_pd >= $file_pd) {
+				if ($value_pd >= $file_pd &&
+				    range_is_maintained($start, $end) &&
+				    range_has_maintainer($start, $end)) {
 				    $exact_pattern_match_hash{$file} = 1;
 				}
 				if ($pattern_depth == 0 ||
@@ -720,7 +756,8 @@
   --help => show this help information
 
 Default options:
-  [--email --git --m --n --l --multiline --pattern-depth=0 --remove-duplicates]
+  [--email --nogit --git-fallback --m --n --l --multiline -pattern-depth=0
+   --remove-duplicates --rolestats]
 
 Notes:
   Using "-f directory" may give unexpected results:
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 97d2259..e8fba95 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1615,7 +1615,7 @@
  * A module includes a number of sections that are discarded
  * either when loaded or when used as built-in.
  * For loaded modules all functions marked __init and all data
- * marked __initdata will be discarded when the module has been intialized.
+ * marked __initdata will be discarded when the module has been initialized.
  * Likewise for modules used built-in the sections marked __exit
  * are discarded because __exit marked function are supposed to be called
  * only when a module is unloaded which never happens for built-in modules.
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index ecf9c7d..9dfcd6d 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -300,8 +300,8 @@
 		return 0;
 }
 
-/* We have dir/file.o.  Open dir/.file.o.cmd, look for deps_ line to
- * figure out source file. */
+/* We have dir/file.o.  Open dir/.file.o.cmd, look for source_ and deps_ line
+ * to figure out source files. */
 static int parse_source_files(const char *objfile, struct md4_ctx *md)
 {
 	char *cmd, *file, *line, *dir;
@@ -340,6 +340,21 @@
 	*/
 	while ((line = get_next_line(&pos, file, flen)) != NULL) {
 		char* p = line;
+
+		if (strncmp(line, "source_", sizeof("source_")-1) == 0) {
+			p = strrchr(line, ' ');
+			if (!p) {
+				warn("malformed line: %s\n", line);
+				goto out_file;
+			}
+			p++;
+			if (!parse_file(p, md)) {
+				warn("could not open %s: %s\n",
+				     p, strerror(errno));
+				goto out_file;
+			}
+			continue;
+		}
 		if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) {
 			check_files = 1;
 			continue;
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index b0b2357..f6cbc3d 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -238,12 +238,12 @@
 fi
 
 # Build header package
-find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$
-find arch/x86/include include scripts -type f >> /tmp/files$$
+(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$)
+(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$)
 (cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$)
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
-tar -c -f - -T /tmp/files$$ | (cd $destdir; tar -xf -)
+(cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -)
 (cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -)
 rm -f /tmp/files$$ /tmp/objfiles$$
 arch=$(dpkg --print-architecture)
diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
new file mode 100644
index 0000000..17a5798
--- /dev/null
+++ b/scripts/xz_wrap.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+#
+# This is a wrapper for xz to compress the kernel image using appropriate
+# compression options depending on the architecture.
+#
+# Author: Lasse Collin <lasse.collin@tukaani.org>
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+
+BCJ=
+LZMA2OPTS=
+
+case $ARCH in
+	x86|x86_64)     BCJ=--x86 ;;
+	powerpc)        BCJ=--powerpc ;;
+	ia64)           BCJ=--ia64; LZMA2OPTS=pb=4 ;;
+	arm)            BCJ=--arm ;;
+	sparc)          BCJ=--sparc ;;
+esac
+
+exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index 19ba16e..a4a8639 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -28,7 +28,7 @@
  * The format used for transition tables is based on the GNU flex table
  * file format (--tables-file option; see Table File Format in the flex
  * info pages and the flex sources for documentation). The magic number
- * used in the header is 0x1B5E783D insted of 0xF13C57B1 though, because
+ * used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because
  * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used
  * slightly differently (see the apparmor-parser package).
  */
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 6c94105..1bf090a 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -13,8 +13,8 @@
 	request_key_auth.o \
 	user_defined.o
 
-obj-$(CONFIG_TRUSTED_KEYS) += trusted_defined.o
-obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted_defined.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o
 obj-$(CONFIG_KEYS_COMPAT) += compat.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 792c0a6..07a5f35 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -1,4 +1,4 @@
-/* compat.c: 32-bit compatibility syscall for 64-bit systems
+/* 32-bit compatibility syscall for 64-bit systems
  *
  * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -14,13 +14,13 @@
 #include <linux/compat.h>
 #include "internal.h"
 
-/*****************************************************************************/
 /*
- * the key control system call, 32-bit compatibility version for 64-bit archs
- * - this should only be called if the 64-bit arch uses weird pointers in
- *   32-bit mode or doesn't guarantee that the top 32-bits of the argument
- *   registers on taking a 32-bit syscall are zero
- * - if you can, you should call sys_keyctl directly
+ * The key control system call, 32-bit compatibility version for 64-bit archs
+ *
+ * This should only be called if the 64-bit arch uses weird pointers in 32-bit
+ * mode or doesn't guarantee that the top 32-bits of the argument registers on
+ * taking a 32-bit syscall are zero.  If you can, you should call sys_keyctl()
+ * directly.
  */
 asmlinkage long compat_sys_keyctl(u32 option,
 				  u32 arg2, u32 arg3, u32 arg4, u32 arg5)
@@ -88,5 +88,4 @@
 	default:
 		return -EOPNOTSUPP;
 	}
-
-} /* end compat_sys_keyctl() */
+}
diff --git a/security/keys/encrypted_defined.c b/security/keys/encrypted.c
similarity index 99%
rename from security/keys/encrypted_defined.c
rename to security/keys/encrypted.c
index 32d27c8..9e7e4ce 100644
--- a/security/keys/encrypted_defined.c
+++ b/security/keys/encrypted.c
@@ -30,7 +30,7 @@
 #include <crypto/sha.h>
 #include <crypto/aes.h>
 
-#include "encrypted_defined.h"
+#include "encrypted.h"
 
 static const char KEY_TRUSTED_PREFIX[] = "trusted:";
 static const char KEY_USER_PREFIX[] = "user:";
diff --git a/security/keys/encrypted_defined.h b/security/keys/encrypted.h
similarity index 100%
rename from security/keys/encrypted_defined.h
rename to security/keys/encrypted.h
diff --git a/security/keys/gc.c b/security/keys/gc.c
index a46e825..89df6b5 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -32,8 +32,8 @@
 static time_t key_gc_new_timer;
 
 /*
- * Schedule a garbage collection run
- * - precision isn't particularly important
+ * Schedule a garbage collection run.
+ * - time precision isn't particularly important
  */
 void key_schedule_gc(time_t gc_at)
 {
@@ -61,8 +61,9 @@
 }
 
 /*
- * Garbage collect pointers from a keyring
- * - return true if we altered the keyring
+ * Garbage collect pointers from a keyring.
+ *
+ * Return true if we altered the keyring.
  */
 static bool key_gc_keyring(struct key *keyring, time_t limit)
 	__releases(key_serial_lock)
@@ -107,9 +108,8 @@
 }
 
 /*
- * Garbage collector for keys
- * - this involves scanning the keyrings for dead, expired and revoked keys
- *   that have overstayed their welcome
+ * Garbage collector for keys.  This involves scanning the keyrings for dead,
+ * expired and revoked keys that have overstayed their welcome
  */
 static void key_garbage_collector(struct work_struct *work)
 {
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 56a133d..a52aa7c 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -1,4 +1,4 @@
-/* internal.h: authentication token and access key management internal defs
+/* Authentication token and access key management internal defs
  *
  * Copyright (C) 2003-5, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -35,10 +35,12 @@
 
 /*****************************************************************************/
 /*
- * keep track of keys for a user
- * - this needs to be separate to user_struct to avoid a refcount-loop
- *   (user_struct pins some keyrings which pin this struct)
- * - this also keeps track of keys under request from userspace for this UID
+ * Keep track of keys for a user.
+ *
+ * This needs to be separate to user_struct to avoid a refcount-loop
+ * (user_struct pins some keyrings which pin this struct).
+ *
+ * We also keep track of keys under request from userspace for this UID here.
  */
 struct key_user {
 	struct rb_node		node;
@@ -62,7 +64,7 @@
 extern void key_user_put(struct key_user *user);
 
 /*
- * key quota limits
+ * Key quota limits.
  * - root has its own separate limits to everyone else
  */
 extern unsigned key_quota_root_maxkeys;
@@ -85,13 +87,13 @@
 extern int __key_link_begin(struct key *keyring,
 			    const struct key_type *type,
 			    const char *description,
-			    struct keyring_list **_prealloc);
+			    unsigned long *_prealloc);
 extern int __key_link_check_live_key(struct key *keyring, struct key *key);
 extern void __key_link(struct key *keyring, struct key *key,
-		       struct keyring_list **_prealloc);
+		       unsigned long *_prealloc);
 extern void __key_link_end(struct key *keyring,
 			   struct key_type *type,
-			   struct keyring_list *prealloc);
+			   unsigned long prealloc);
 
 extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
 				      const struct key_type *type,
@@ -146,13 +148,13 @@
 extern void keyring_gc(struct key *keyring, time_t limit);
 extern void key_schedule_gc(time_t expiry_at);
 
-/*
- * check to see whether permission is granted to use a key in the desired way
- */
 extern int key_task_permission(const key_ref_t key_ref,
 			       const struct cred *cred,
 			       key_perm_t perm);
 
+/*
+ * Check to see whether permission is granted to use a key in the desired way.
+ */
 static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
 {
 	return key_task_permission(key_ref, current_cred(), perm);
@@ -168,7 +170,7 @@
 #define	KEY_ALL		0x3f	/* all the above permissions */
 
 /*
- * request_key authorisation
+ * Authorisation record for request_key().
  */
 struct request_key_auth {
 	struct key		*target_key;
@@ -188,7 +190,7 @@
 extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
 
 /*
- * keyctl functions
+ * keyctl() functions
  */
 extern long keyctl_get_keyring_ID(key_serial_t, int);
 extern long keyctl_join_session_keyring(const char __user *);
@@ -214,7 +216,7 @@
 extern long keyctl_session_to_parent(void);
 
 /*
- * debugging key validation
+ * Debugging key validation
  */
 #ifdef KEY_DEBUGGING
 extern void __key_check(const struct key *);
diff --git a/security/keys/key.c b/security/keys/key.c
index c1eac80..1c2d43d 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -39,10 +39,10 @@
 static void key_cleanup(struct work_struct *work);
 static DECLARE_WORK(key_cleanup_task, key_cleanup);
 
-/* we serialise key instantiation and link */
+/* We serialise key instantiation and link */
 DEFINE_MUTEX(key_construction_mutex);
 
-/* any key who's type gets unegistered will be re-typed to this */
+/* Any key who's type gets unegistered will be re-typed to this */
 static struct key_type key_type_dead = {
 	.name		= "dead",
 };
@@ -56,10 +56,9 @@
 }
 #endif
 
-/*****************************************************************************/
 /*
- * get the key quota record for a user, allocating a new record if one doesn't
- * already exist
+ * Get the key quota record for a user, allocating a new record if one doesn't
+ * already exist.
  */
 struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
 {
@@ -67,7 +66,7 @@
 	struct rb_node *parent = NULL;
 	struct rb_node **p;
 
- try_again:
+try_again:
 	p = &key_user_tree.rb_node;
 	spin_lock(&key_user_lock);
 
@@ -124,18 +123,16 @@
 	goto out;
 
 	/* okay - we found a user record for this UID */
- found:
+found:
 	atomic_inc(&user->usage);
 	spin_unlock(&key_user_lock);
 	kfree(candidate);
- out:
+out:
 	return user;
+}
 
-} /* end key_user_lookup() */
-
-/*****************************************************************************/
 /*
- * dispose of a user structure
+ * Dispose of a user structure
  */
 void key_user_put(struct key_user *user)
 {
@@ -146,14 +143,11 @@
 
 		kfree(user);
 	}
+}
 
-} /* end key_user_put() */
-
-/*****************************************************************************/
 /*
- * assign a key the next unique serial number
- * - these are assigned randomly to avoid security issues through covert
- *   channel problems
+ * Allocate a serial number for a key.  These are assigned randomly to avoid
+ * security issues through covert channel problems.
  */
 static inline void key_alloc_serial(struct key *key)
 {
@@ -211,18 +205,36 @@
 		if (key->serial < xkey->serial)
 			goto attempt_insertion;
 	}
+}
 
-} /* end key_alloc_serial() */
-
-/*****************************************************************************/
-/*
- * allocate a key of the specified type
- * - update the user's quota to reflect the existence of the key
- * - called from a key-type operation with key_types_sem read-locked by
- *   key_create_or_update()
- *   - this prevents unregistration of the key type
- * - upon return the key is as yet uninstantiated; the caller needs to either
- *   instantiate the key or discard it before returning
+/**
+ * key_alloc - Allocate a key of the specified type.
+ * @type: The type of key to allocate.
+ * @desc: The key description to allow the key to be searched out.
+ * @uid: The owner of the new key.
+ * @gid: The group ID for the new key's group permissions.
+ * @cred: The credentials specifying UID namespace.
+ * @perm: The permissions mask of the new key.
+ * @flags: Flags specifying quota properties.
+ *
+ * Allocate a key of the specified type with the attributes given.  The key is
+ * returned in an uninstantiated state and the caller needs to instantiate the
+ * key before returning.
+ *
+ * The user's key count quota is updated to reflect the creation of the key and
+ * the user's key data quota has the default for the key type reserved.  The
+ * instantiation function should amend this as necessary.  If insufficient
+ * quota is available, -EDQUOT will be returned.
+ *
+ * The LSM security modules can prevent a key being created, in which case
+ * -EACCES will be returned.
+ *
+ * Returns a pointer to the new key if successful and an error code otherwise.
+ *
+ * Note that the caller needs to ensure the key type isn't uninstantiated.
+ * Internally this can be done by locking key_types_sem.  Externally, this can
+ * be done by either never unregistering the key type, or making sure
+ * key_alloc() calls don't race with module unloading.
  */
 struct key *key_alloc(struct key_type *type, const char *desc,
 		      uid_t uid, gid_t gid, const struct cred *cred,
@@ -344,14 +356,19 @@
 	key_user_put(user);
 	key = ERR_PTR(-EDQUOT);
 	goto error;
-
-} /* end key_alloc() */
-
+}
 EXPORT_SYMBOL(key_alloc);
 
-/*****************************************************************************/
-/*
- * reserve an amount of quota for the key's payload
+/**
+ * key_payload_reserve - Adjust data quota reservation for the key's payload
+ * @key: The key to make the reservation for.
+ * @datalen: The amount of data payload the caller now wants.
+ *
+ * Adjust the amount of the owning user's key data quota that a key reserves.
+ * If the amount is increased, then -EDQUOT may be returned if there isn't
+ * enough free quota available.
+ *
+ * If successful, 0 is returned.
  */
 int key_payload_reserve(struct key *key, size_t datalen)
 {
@@ -384,22 +401,21 @@
 		key->datalen = datalen;
 
 	return ret;
-
-} /* end key_payload_reserve() */
-
+}
 EXPORT_SYMBOL(key_payload_reserve);
 
-/*****************************************************************************/
 /*
- * instantiate a key and link it into the target keyring atomically
- * - called with the target keyring's semaphore writelocked
+ * Instantiate a key and link it into the target keyring atomically.  Must be
+ * called with the target keyring's semaphore writelocked.  The target key's
+ * semaphore need not be locked as instantiation is serialised by
+ * key_construction_mutex.
  */
 static int __key_instantiate_and_link(struct key *key,
 				      const void *data,
 				      size_t datalen,
 				      struct key *keyring,
 				      struct key *authkey,
-				      struct keyring_list **_prealloc)
+				      unsigned long *_prealloc)
 {
 	int ret, awaken;
 
@@ -441,12 +457,23 @@
 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
 
 	return ret;
+}
 
-} /* end __key_instantiate_and_link() */
-
-/*****************************************************************************/
-/*
- * instantiate a key and link it into the target keyring atomically
+/**
+ * key_instantiate_and_link - Instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @data: The data to use to instantiate the keyring.
+ * @datalen: The length of @data.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Instantiate a key that's in the uninstantiated state using the provided data
+ * and, if successful, link it in to the destination keyring if one is
+ * supplied.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up.  If the key was already instantiated,
+ * -EBUSY will be returned.
  */
 int key_instantiate_and_link(struct key *key,
 			     const void *data,
@@ -454,7 +481,7 @@
 			     struct key *keyring,
 			     struct key *authkey)
 {
-	struct keyring_list *prealloc;
+	unsigned long prealloc;
 	int ret;
 
 	if (keyring) {
@@ -471,21 +498,35 @@
 		__key_link_end(keyring, key->type, prealloc);
 
 	return ret;
-
-} /* end key_instantiate_and_link() */
+}
 
 EXPORT_SYMBOL(key_instantiate_and_link);
 
-/*****************************************************************************/
-/*
- * negatively instantiate a key and link it into the target keyring atomically
+/**
+ * key_negate_and_link - Negatively instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @timeout: The timeout on the negative key.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Negatively instantiate a key that's in the uninstantiated state and, if
+ * successful, set its timeout and link it in to the destination keyring if one
+ * is supplied.  The key and any links to the key will be automatically garbage
+ * collected after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up.  If the key was already instantiated,
+ * -EBUSY will be returned.
  */
 int key_negate_and_link(struct key *key,
 			unsigned timeout,
 			struct key *keyring,
 			struct key *authkey)
 {
-	struct keyring_list *prealloc;
+	unsigned long prealloc;
 	struct timespec now;
 	int ret, awaken, link_ret = 0;
 
@@ -535,22 +576,23 @@
 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
 
 	return ret == 0 ? link_ret : ret;
-
-} /* end key_negate_and_link() */
+}
 
 EXPORT_SYMBOL(key_negate_and_link);
 
-/*****************************************************************************/
 /*
- * do cleaning up in process context so that we don't have to disable
- * interrupts all over the place
+ * Garbage collect keys in process context so that we don't have to disable
+ * interrupts all over the place.
+ *
+ * key_put() schedules this rather than trying to do the cleanup itself, which
+ * means key_put() doesn't have to sleep.
  */
 static void key_cleanup(struct work_struct *work)
 {
 	struct rb_node *_n;
 	struct key *key;
 
- go_again:
+go_again:
 	/* look for a dead key in the tree */
 	spin_lock(&key_serial_lock);
 
@@ -564,7 +606,7 @@
 	spin_unlock(&key_serial_lock);
 	return;
 
- found_dead_key:
+found_dead_key:
 	/* we found a dead key - once we've removed it from the tree, we can
 	 * drop the lock */
 	rb_erase(&key->serial_node, &key_serial_tree);
@@ -601,14 +643,15 @@
 
 	/* there may, of course, be more than one key to destroy */
 	goto go_again;
+}
 
-} /* end key_cleanup() */
-
-/*****************************************************************************/
-/*
- * dispose of a reference to a key
- * - when all the references are gone, we schedule the cleanup task to come and
- *   pull it out of the tree in definite process context
+/**
+ * key_put - Discard a reference to a key.
+ * @key: The key to discard a reference from.
+ *
+ * Discard a reference to a key, and when all the references are gone, we
+ * schedule the cleanup task to come and pull it out of the tree in process
+ * context at some later time.
  */
 void key_put(struct key *key)
 {
@@ -618,14 +661,11 @@
 		if (atomic_dec_and_test(&key->usage))
 			schedule_work(&key_cleanup_task);
 	}
-
-} /* end key_put() */
-
+}
 EXPORT_SYMBOL(key_put);
 
-/*****************************************************************************/
 /*
- * find a key by its serial number
+ * Find a key by its serial number.
  */
 struct key *key_lookup(key_serial_t id)
 {
@@ -647,11 +687,11 @@
 			goto found;
 	}
 
- not_found:
+not_found:
 	key = ERR_PTR(-ENOKEY);
 	goto error;
 
- found:
+found:
 	/* pretend it doesn't exist if it is awaiting deletion */
 	if (atomic_read(&key->usage) == 0)
 		goto not_found;
@@ -661,16 +701,16 @@
 	 */
 	atomic_inc(&key->usage);
 
- error:
+error:
 	spin_unlock(&key_serial_lock);
 	return key;
+}
 
-} /* end key_lookup() */
-
-/*****************************************************************************/
 /*
- * find and lock the specified key type against removal
- * - we return with the sem readlocked
+ * Find and lock the specified key type against removal.
+ *
+ * We return with the sem read-locked if successful.  If the type wasn't
+ * available -ENOKEY is returned instead.
  */
 struct key_type *key_type_lookup(const char *type)
 {
@@ -688,26 +728,23 @@
 	up_read(&key_types_sem);
 	ktype = ERR_PTR(-ENOKEY);
 
- found_kernel_type:
+found_kernel_type:
 	return ktype;
+}
 
-} /* end key_type_lookup() */
-
-/*****************************************************************************/
 /*
- * unlock a key type
+ * Unlock a key type locked by key_type_lookup().
  */
 void key_type_put(struct key_type *ktype)
 {
 	up_read(&key_types_sem);
+}
 
-} /* end key_type_put() */
-
-/*****************************************************************************/
 /*
- * attempt to update an existing key
- * - the key has an incremented refcount
- * - we need to put the key if we get an error
+ * Attempt to update an existing key.
+ *
+ * The key is given to us with an incremented refcount that we need to discard
+ * if we get an error.
  */
 static inline key_ref_t __key_update(key_ref_t key_ref,
 				     const void *payload, size_t plen)
@@ -742,13 +779,32 @@
 	key_put(key);
 	key_ref = ERR_PTR(ret);
 	goto out;
+}
 
-} /* end __key_update() */
-
-/*****************************************************************************/
-/*
- * search the specified keyring for a key of the same description; if one is
- * found, update it, otherwise add a new one
+/**
+ * key_create_or_update - Update or create and instantiate a key.
+ * @keyring_ref: A pointer to the destination keyring with possession flag.
+ * @type: The type of key.
+ * @description: The searchable description for the key.
+ * @payload: The data to use to instantiate or update the key.
+ * @plen: The length of @payload.
+ * @perm: The permissions mask for a new key.
+ * @flags: The quota flags for a new key.
+ *
+ * Search the destination keyring for a key of the same description and if one
+ * is found, update it, otherwise create and instantiate a new one and create a
+ * link to it from that keyring.
+ *
+ * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
+ * concocted.
+ *
+ * Returns a pointer to the new key if successful, -ENODEV if the key type
+ * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
+ * caller isn't permitted to modify the keyring or the LSM did not permit
+ * creation of the key.
+ *
+ * On success, the possession flag from the keyring ref will be tacked on to
+ * the key ref before it is returned.
  */
 key_ref_t key_create_or_update(key_ref_t keyring_ref,
 			       const char *type,
@@ -758,7 +814,7 @@
 			       key_perm_t perm,
 			       unsigned long flags)
 {
-	struct keyring_list *prealloc;
+	unsigned long prealloc;
 	const struct cred *cred = current_cred();
 	struct key_type *ktype;
 	struct key *keyring, *key = NULL;
@@ -855,14 +911,21 @@
 
 	key_ref = __key_update(key_ref, payload, plen);
 	goto error;
-
-} /* end key_create_or_update() */
-
+}
 EXPORT_SYMBOL(key_create_or_update);
 
-/*****************************************************************************/
-/*
- * update a key
+/**
+ * key_update - Update a key's contents.
+ * @key_ref: The pointer (plus possession flag) to the key.
+ * @payload: The data to be used to update the key.
+ * @plen: The length of @payload.
+ *
+ * Attempt to update the contents of a key with the given payload data.  The
+ * caller must be granted Write permission on the key.  Negative keys can be
+ * instantiated by this method.
+ *
+ * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
+ * type does not support updating.  The key type may return other errors.
  */
 int key_update(key_ref_t key_ref, const void *payload, size_t plen)
 {
@@ -891,14 +954,17 @@
 
  error:
 	return ret;
-
-} /* end key_update() */
-
+}
 EXPORT_SYMBOL(key_update);
 
-/*****************************************************************************/
-/*
- * revoke a key
+/**
+ * key_revoke - Revoke a key.
+ * @key: The key to be revoked.
+ *
+ * Mark a key as being revoked and ask the type to free up its resources.  The
+ * revocation timeout is set and the key and all its links will be
+ * automatically garbage collected after key_gc_delay amount of time if they
+ * are not manually dealt with first.
  */
 void key_revoke(struct key *key)
 {
@@ -926,14 +992,16 @@
 	}
 
 	up_write(&key->sem);
-
-} /* end key_revoke() */
-
+}
 EXPORT_SYMBOL(key_revoke);
 
-/*****************************************************************************/
-/*
- * register a type of key
+/**
+ * register_key_type - Register a type of key.
+ * @ktype: The new key type.
+ *
+ * Register a new key type.
+ *
+ * Returns 0 on success or -EEXIST if a type of this name already exists.
  */
 int register_key_type(struct key_type *ktype)
 {
@@ -953,17 +1021,19 @@
 	list_add(&ktype->link, &key_types_list);
 	ret = 0;
 
- out:
+out:
 	up_write(&key_types_sem);
 	return ret;
-
-} /* end register_key_type() */
-
+}
 EXPORT_SYMBOL(register_key_type);
 
-/*****************************************************************************/
-/*
- * unregister a type of key
+/**
+ * unregister_key_type - Unregister a type of key.
+ * @ktype: The key type.
+ *
+ * Unregister a key type and mark all the extant keys of this type as dead.
+ * Those keys of this type are then destroyed to get rid of their payloads and
+ * they and their links will be garbage collected as soon as possible.
  */
 void unregister_key_type(struct key_type *ktype)
 {
@@ -1010,14 +1080,11 @@
 	up_write(&key_types_sem);
 
 	key_schedule_gc(0);
-
-} /* end unregister_key_type() */
-
+}
 EXPORT_SYMBOL(unregister_key_type);
 
-/*****************************************************************************/
 /*
- * initialise the key management stuff
+ * Initialise the key management state.
  */
 void __init key_init(void)
 {
@@ -1037,5 +1104,4 @@
 
 	rb_insert_color(&root_key_user.node,
 			&key_user_tree);
-
-} /* end key_init() */
+}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 60924f6..31a0fd8 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1,4 +1,4 @@
-/* keyctl.c: userspace keyctl operations
+/* Userspace key control operations
  *
  * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -31,28 +31,24 @@
 	int ret;
 
 	ret = strncpy_from_user(type, _type, len);
-
 	if (ret < 0)
 		return ret;
-
 	if (ret == 0 || ret >= len)
 		return -EINVAL;
-
 	if (type[0] == '.')
 		return -EPERM;
-
 	type[len - 1] = '\0';
-
 	return 0;
 }
 
-/*****************************************************************************/
 /*
- * extract the description of a new key from userspace and either add it as a
- * new key to the specified keyring or update a matching key in that keyring
- * - the keyring must be writable
- * - returns the new key's serial number
- * - implements add_key()
+ * Extract the description of a new key from userspace and either add it as a
+ * new key to the specified keyring or update a matching key in that keyring.
+ *
+ * The keyring must be writable so that we can attach the key to it.
+ *
+ * If successful, the new key's serial number is returned, otherwise an error
+ * code is returned.
  */
 SYSCALL_DEFINE5(add_key, const char __user *, _type,
 		const char __user *, _description,
@@ -132,19 +128,20 @@
 	kfree(description);
  error:
 	return ret;
+}
 
-} /* end sys_add_key() */
-
-/*****************************************************************************/
 /*
- * search the process keyrings for a matching key
- * - nested keyrings may also be searched if they have Search permission
- * - if a key is found, it will be attached to the destination keyring if
- *   there's one specified
- * - /sbin/request-key will be invoked if _callout_info is non-NULL
- *   - the _callout_info string will be passed to /sbin/request-key
- *   - if the _callout_info string is empty, it will be rendered as "-"
- * - implements request_key()
+ * Search the process keyrings and keyring trees linked from those for a
+ * matching key.  Keyrings must have appropriate Search permission to be
+ * searched.
+ *
+ * If a key is found, it will be attached to the destination keyring if there's
+ * one specified and the serial number of the key will be returned.
+ *
+ * If no key is found, /sbin/request-key will be invoked if _callout_info is
+ * non-NULL in an attempt to create a key.  The _callout_info string will be
+ * passed to /sbin/request-key to aid with completing the request.  If the
+ * _callout_info string is "" then it will be changed to "-".
  */
 SYSCALL_DEFINE4(request_key, const char __user *, _type,
 		const char __user *, _description,
@@ -222,14 +219,14 @@
 	kfree(description);
 error:
 	return ret;
+}
 
-} /* end sys_request_key() */
-
-/*****************************************************************************/
 /*
- * get the ID of the specified process keyring
- * - the keyring must have search permission to be found
- * - implements keyctl(KEYCTL_GET_KEYRING_ID)
+ * Get the ID of the specified process keyring.
+ *
+ * The requested keyring must have search permission to be found.
+ *
+ * If successful, the ID of the requested keyring will be returned.
  */
 long keyctl_get_keyring_ID(key_serial_t id, int create)
 {
@@ -248,13 +245,17 @@
 	key_ref_put(key_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_get_keyring_ID() */
-
-/*****************************************************************************/
 /*
- * join the session keyring
- * - implements keyctl(KEYCTL_JOIN_SESSION_KEYRING)
+ * Join a (named) session keyring.
+ *
+ * Create and join an anonymous session keyring or join a named session
+ * keyring, creating it if necessary.  A named session keyring must have Search
+ * permission for it to be joined.  Session keyrings without this permit will
+ * be skipped over.
+ *
+ * If successful, the ID of the joined session keyring will be returned.
  */
 long keyctl_join_session_keyring(const char __user *_name)
 {
@@ -277,14 +278,17 @@
 
 error:
 	return ret;
+}
 
-} /* end keyctl_join_session_keyring() */
-
-/*****************************************************************************/
 /*
- * update a key's data payload
- * - the key must be writable
- * - implements keyctl(KEYCTL_UPDATE)
+ * Update a key's data payload from the given data.
+ *
+ * The key must grant the caller Write permission and the key type must support
+ * updating for this to work.  A negative key can be positively instantiated
+ * with this call.
+ *
+ * If successful, 0 will be returned.  If the key type does not support
+ * updating, then -EOPNOTSUPP will be returned.
  */
 long keyctl_update_key(key_serial_t id,
 		       const void __user *_payload,
@@ -326,14 +330,17 @@
 	kfree(payload);
 error:
 	return ret;
+}
 
-} /* end keyctl_update_key() */
-
-/*****************************************************************************/
 /*
- * revoke a key
- * - the key must be writable
- * - implements keyctl(KEYCTL_REVOKE)
+ * Revoke a key.
+ *
+ * The key must be grant the caller Write or Setattr permission for this to
+ * work.  The key type should give up its quota claim when revoked.  The key
+ * and any links to the key will be automatically garbage collected after a
+ * certain amount of time (/proc/sys/kernel/keys/gc_delay).
+ *
+ * If successful, 0 is returned.
  */
 long keyctl_revoke_key(key_serial_t id)
 {
@@ -358,14 +365,14 @@
 	key_ref_put(key_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_revoke_key() */
-
-/*****************************************************************************/
 /*
- * clear the specified process keyring
- * - the keyring must be writable
- * - implements keyctl(KEYCTL_CLEAR)
+ * Clear the specified keyring, creating an empty process keyring if one of the
+ * special keyring IDs is used.
+ *
+ * The keyring must grant the caller Write permission for this to work.  If
+ * successful, 0 will be returned.
  */
 long keyctl_keyring_clear(key_serial_t ringid)
 {
@@ -383,15 +390,18 @@
 	key_ref_put(keyring_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_keyring_clear() */
-
-/*****************************************************************************/
 /*
- * link a key into a keyring
- * - the keyring must be writable
- * - the key must be linkable
- * - implements keyctl(KEYCTL_LINK)
+ * Create a link from a keyring to a key if there's no matching key in the
+ * keyring, otherwise replace the link to the matching key with a link to the
+ * new key.
+ *
+ * The key must grant the caller Link permission and the the keyring must grant
+ * the caller Write permission.  Furthermore, if an additional link is created,
+ * the keyring's quota will be extended.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
 {
@@ -417,15 +427,16 @@
 	key_ref_put(keyring_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_keyring_link() */
-
-/*****************************************************************************/
 /*
- * unlink the first attachment of a key from a keyring
- * - the keyring must be writable
- * - we don't need any permissions on the key
- * - implements keyctl(KEYCTL_UNLINK)
+ * Unlink a key from a keyring.
+ *
+ * The keyring must grant the caller Write permission for this to work; the key
+ * itself need not grant the caller anything.  If the last link to a key is
+ * removed then that key will be scheduled for destruction.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
 {
@@ -451,19 +462,20 @@
 	key_ref_put(keyring_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_keyring_unlink() */
-
-/*****************************************************************************/
 /*
- * describe a user key
- * - the key must have view permission
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of description available,
- *   irrespective of how much we may have copied
- * - the description is formatted thus:
+ * Return a description of a key to userspace.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, we place up to buflen bytes of data into it formatted
+ * in the following way:
+ *
  *	type;uid;gid;perm;description<NUL>
- * - implements keyctl(KEYCTL_DESCRIBE)
+ *
+ * If successful, we return the amount of description available, irrespective
+ * of how much we may have copied into the buffer.
  */
 long keyctl_describe_key(key_serial_t keyid,
 			 char __user *buffer,
@@ -531,18 +543,17 @@
 	key_ref_put(key_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_describe_key() */
-
-/*****************************************************************************/
 /*
- * search the specified keyring for a matching key
- * - the start keyring must be searchable
- * - nested keyrings may also be searched if they are searchable
- * - only keys with search permission may be found
- * - if a key is found, it will be attached to the destination keyring if
- *   there's one specified
- * - implements keyctl(KEYCTL_SEARCH)
+ * Search the specified keyring and any keyrings it links to for a matching
+ * key.  Only keyrings that grant the caller Search permission will be searched
+ * (this includes the starting keyring).  Only keys with Search permission can
+ * be found.
+ *
+ * If successful, the found key will be linked to the destination keyring if
+ * supplied and the key has Link permission, and the found key ID will be
+ * returned.
  */
 long keyctl_keyring_search(key_serial_t ringid,
 			   const char __user *_type,
@@ -626,18 +637,17 @@
 	kfree(description);
 error:
 	return ret;
+}
 
-} /* end keyctl_keyring_search() */
-
-/*****************************************************************************/
 /*
- * read a user key's payload
- * - the keyring must be readable or the key must be searchable from the
- *   process's keyrings
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of data in the key,
- *   irrespective of how much we may have copied
- * - implements keyctl(KEYCTL_READ)
+ * Read a key's payload.
+ *
+ * The key must either grant the caller Read permission, or it must grant the
+ * caller Search permission when searched for from the process keyrings.
+ *
+ * If successful, we place up to buflen bytes of data into the buffer, if one
+ * is provided, and return the amount of data that is available in the key,
+ * irrespective of how much we copied into the buffer.
  */
 long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
 {
@@ -688,15 +698,22 @@
 	key_put(key);
 error:
 	return ret;
+}
 
-} /* end keyctl_read_key() */
-
-/*****************************************************************************/
 /*
- * change the ownership of a key
- * - the keyring owned by the changer
- * - if the uid or gid is -1, then that parameter is not changed
- * - implements keyctl(KEYCTL_CHOWN)
+ * Change the ownership of a key
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet.  For the UID to be changed, or
+ * for the GID to be changed to a group the caller is not a member of, the
+ * caller must have sysadmin capability.  If either uid or gid is -1 then that
+ * attribute is not changed.
+ *
+ * If the UID is to be changed, the new user must have sufficient quota to
+ * accept the key.  The quota deduction will be removed from the old user to
+ * the new user should the attribute be changed.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
 {
@@ -796,14 +813,14 @@
 	zapowner = newowner;
 	ret = -EDQUOT;
 	goto error_put;
+}
 
-} /* end keyctl_chown_key() */
-
-/*****************************************************************************/
 /*
- * change the permission mask on a key
- * - the keyring owned by the changer
- * - implements keyctl(KEYCTL_SETPERM)
+ * Change the permission mask on a key.
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet.  If the caller does not have
+ * sysadmin capability, it may only change the permission on keys that it owns.
  */
 long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
 {
@@ -838,11 +855,11 @@
 	key_put(key);
 error:
 	return ret;
-
-} /* end keyctl_setperm_key() */
+}
 
 /*
- * get the destination keyring for instantiation
+ * Get the destination keyring for instantiation and check that the caller has
+ * Write permission on it.
  */
 static long get_instantiation_keyring(key_serial_t ringid,
 				      struct request_key_auth *rka,
@@ -879,7 +896,7 @@
 }
 
 /*
- * change the request_key authorisation key on the current process
+ * Change the request_key authorisation key on the current process.
  */
 static int keyctl_change_reqkey_auth(struct key *key)
 {
@@ -895,10 +912,14 @@
 	return commit_creds(new);
 }
 
-/*****************************************************************************/
 /*
- * instantiate the key with the specified payload, and, if one is given, link
- * the key into the keyring
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_instantiate_key(key_serial_t id,
 			    const void __user *_payload,
@@ -973,13 +994,22 @@
 		vfree(payload);
 error:
 	return ret;
+}
 
-} /* end keyctl_instantiate_key() */
-
-/*****************************************************************************/
 /*
- * negatively instantiate the key with the given timeout (in seconds), and, if
- * one is given, link the key into the keyring
+ * Negatively instantiate the key with the given timeout (in seconds) and link
+ * the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
 {
@@ -1020,13 +1050,14 @@
 
 error:
 	return ret;
+}
 
-} /* end keyctl_negate_key() */
-
-/*****************************************************************************/
 /*
- * set the default keyring in which request_key() will cache keys
- * - return the old setting
+ * Read or set the default keyring in which request_key() will cache keys and
+ * return the old setting.
+ *
+ * If a process keyring is specified then this will be created if it doesn't
+ * yet exist.  The old setting will be returned if successful.
  */
 long keyctl_set_reqkey_keyring(int reqkey_defl)
 {
@@ -1079,12 +1110,19 @@
 error:
 	abort_creds(new);
 	return ret;
+}
 
-} /* end keyctl_set_reqkey_keyring() */
-
-/*****************************************************************************/
 /*
- * set or clear the timeout for a key
+ * Set or clear the timeout on a key.
+ *
+ * Either the key must grant the caller Setattr permission or else the caller
+ * must hold an instantiation authorisation token for the key.
+ *
+ * The timeout is either 0 to clear the timeout, or a number of seconds from
+ * the current time.  The key and any links to the key will be automatically
+ * garbage collected after the timeout expires.
+ *
+ * If successful, 0 is returned.
  */
 long keyctl_set_timeout(key_serial_t id, unsigned timeout)
 {
@@ -1136,12 +1174,24 @@
 	ret = 0;
 error:
 	return ret;
+}
 
-} /* end keyctl_set_timeout() */
-
-/*****************************************************************************/
 /*
- * assume the authority to instantiate the specified key
+ * Assume (or clear) the authority to instantiate the specified key.
+ *
+ * This sets the authoritative token currently in force for key instantiation.
+ * This must be done for a key to be instantiated.  It has the effect of making
+ * available all the keys from the caller of the request_key() that created a
+ * key to request_key() calls made by the caller of this function.
+ *
+ * The caller must have the instantiation key in their process keyrings with a
+ * Search permission grant available to the caller.
+ *
+ * If the ID given is 0, then the setting will be cleared and 0 returned.
+ *
+ * If the ID given has a matching an authorisation key, then that key will be
+ * set and its ID will be returned.  The authorisation key can be read to get
+ * the callout information passed to request_key().
  */
 long keyctl_assume_authority(key_serial_t id)
 {
@@ -1178,16 +1228,17 @@
 	ret = authkey->serial;
 error:
 	return ret;
-
-} /* end keyctl_assume_authority() */
+}
 
 /*
- * get the security label of a key
- * - the key must grant us view permission
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of information available,
- *   irrespective of how much we may have copied (including the terminal NUL)
- * - implements keyctl(KEYCTL_GET_SECURITY)
+ * Get a key's the LSM security label.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, then up to buflen bytes of data will be placed into it.
+ *
+ * If successful, the amount of information available will be returned,
+ * irrespective of how much was copied (including the terminal NUL).
  */
 long keyctl_get_security(key_serial_t keyid,
 			 char __user *buffer,
@@ -1242,10 +1293,16 @@
 }
 
 /*
- * attempt to install the calling process's session keyring on the process's
- * parent process
- * - the keyring must exist and must grant us LINK permission
- * - implements keyctl(KEYCTL_SESSION_TO_PARENT)
+ * Attempt to install the calling process's session keyring on the process's
+ * parent process.
+ *
+ * The keyring must exist and must grant the caller LINK permission, and the
+ * parent process must be single-threaded and must have the same effective
+ * ownership as this process and mustn't be SUID/SGID.
+ *
+ * The keyring will be emplaced on the parent when it next resumes userspace.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_session_to_parent(void)
 {
@@ -1348,9 +1405,8 @@
 #endif /* !TIF_NOTIFY_RESUME */
 }
 
-/*****************************************************************************/
 /*
- * the key control system call
+ * The key control system call
  */
 SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
 		unsigned long, arg4, unsigned long, arg5)
@@ -1439,5 +1495,4 @@
 	default:
 		return -EOPNOTSUPP;
 	}
-
-} /* end sys_keyctl() */
+}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d37f713..5620f08 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -25,14 +25,16 @@
 		(keyring)->payload.subscriptions,			\
 		rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
 
+#define KEY_LINK_FIXQUOTA 1UL
+
 /*
- * when plumbing the depths of the key tree, this sets a hard limit set on how
- * deep we're willing to go
+ * When plumbing the depths of the key tree, this sets a hard limit
+ * set on how deep we're willing to go.
  */
 #define KEYRING_SEARCH_MAX_DEPTH 6
 
 /*
- * we keep all named keyrings in a hash to speed looking them up
+ * We keep all named keyrings in a hash to speed looking them up.
  */
 #define KEYRING_NAME_HASH_SIZE	(1 << 5)
 
@@ -50,7 +52,9 @@
 }
 
 /*
- * the keyring type definition
+ * The keyring key type definition.  Keyrings are simply keys of this type and
+ * can be treated as ordinary keys in addition to having their own special
+ * operations.
  */
 static int keyring_instantiate(struct key *keyring,
 			       const void *data, size_t datalen);
@@ -71,19 +75,17 @@
 	.describe	= keyring_describe,
 	.read		= keyring_read,
 };
-
 EXPORT_SYMBOL(key_type_keyring);
 
 /*
- * semaphore to serialise link/link calls to prevent two link calls in parallel
- * introducing a cycle
+ * Semaphore to serialise link/link calls to prevent two link calls in parallel
+ * introducing a cycle.
  */
 static DECLARE_RWSEM(keyring_serialise_link_sem);
 
-/*****************************************************************************/
 /*
- * publish the name of a keyring so that it can be found by name (if it has
- * one)
+ * Publish the name of a keyring so that it can be found by name (if it has
+ * one).
  */
 static void keyring_publish_name(struct key *keyring)
 {
@@ -102,13 +104,12 @@
 
 		write_unlock(&keyring_name_lock);
 	}
+}
 
-} /* end keyring_publish_name() */
-
-/*****************************************************************************/
 /*
- * initialise a keyring
- * - we object if we were given any data
+ * Initialise a keyring.
+ *
+ * Returns 0 on success, -EINVAL if given any data.
  */
 static int keyring_instantiate(struct key *keyring,
 			       const void *data, size_t datalen)
@@ -123,23 +124,20 @@
 	}
 
 	return ret;
+}
 
-} /* end keyring_instantiate() */
-
-/*****************************************************************************/
 /*
- * match keyrings on their name
+ * Match keyrings on their name
  */
 static int keyring_match(const struct key *keyring, const void *description)
 {
 	return keyring->description &&
 		strcmp(keyring->description, description) == 0;
+}
 
-} /* end keyring_match() */
-
-/*****************************************************************************/
 /*
- * dispose of the data dangling from the corpse of a keyring
+ * Clean up a keyring when it is destroyed.  Unpublish its name if it had one
+ * and dispose of its data.
  */
 static void keyring_destroy(struct key *keyring)
 {
@@ -164,12 +162,10 @@
 			key_put(klist->keys[loop]);
 		kfree(klist);
 	}
+}
 
-} /* end keyring_destroy() */
-
-/*****************************************************************************/
 /*
- * describe the keyring
+ * Describe a keyring for /proc.
  */
 static void keyring_describe(const struct key *keyring, struct seq_file *m)
 {
@@ -187,13 +183,12 @@
 	else
 		seq_puts(m, ": empty");
 	rcu_read_unlock();
+}
 
-} /* end keyring_describe() */
-
-/*****************************************************************************/
 /*
- * read a list of key IDs from the keyring's contents
- * - the keyring's semaphore is read-locked
+ * Read a list of key IDs from the keyring's contents in binary form
+ *
+ * The keyring's semaphore is read-locked by the caller.
  */
 static long keyring_read(const struct key *keyring,
 			 char __user *buffer, size_t buflen)
@@ -241,12 +236,10 @@
 
 error:
 	return ret;
+}
 
-} /* end keyring_read() */
-
-/*****************************************************************************/
 /*
- * allocate a keyring and link into the destination keyring
+ * Allocate a keyring and link into the destination keyring.
  */
 struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
 			  const struct cred *cred, unsigned long flags,
@@ -269,20 +262,42 @@
 	}
 
 	return keyring;
+}
 
-} /* end keyring_alloc() */
-
-/*****************************************************************************/
-/*
- * search the supplied keyring tree for a key that matches the criterion
- * - perform a breadth-then-depth search up to the prescribed limit
- * - we only find keys on which we have search permission
- * - we use the supplied match function to see if the description (or other
- *   feature of interest) matches
- * - we rely on RCU to prevent the keyring lists from disappearing on us
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we only found negative matching keys
- * - we propagate the possession attribute from the keyring ref to the key ref
+/**
+ * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @cred: The credentials to use for permissions checks.
+ * @type: The type of key to search for.
+ * @description: Parameter for @match.
+ * @match: Function to rule on whether or not a key is the one required.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree.  In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH).
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit.  The
+ * match function may use any attributes of a key that it wishes to to
+ * determine the match.  Normally the match function from the key type would be
+ * used.
+ *
+ * RCU is used to prevent the keyring key lists from disappearing without the
+ * need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
  */
 key_ref_t keyring_search_aux(key_ref_t keyring_ref,
 			     const struct cred *cred,
@@ -444,17 +459,16 @@
 	rcu_read_unlock();
 error:
 	return key_ref;
+}
 
-} /* end keyring_search_aux() */
-
-/*****************************************************************************/
-/*
- * search the supplied keyring tree for a key that matches the criterion
- * - perform a breadth-then-depth search up to the prescribed limit
- * - we only find keys on which we have search permission
- * - we readlock the keyrings as we search down the tree
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we only found negative matching keys
+/**
+ * keyring_search - Search the supplied keyring tree for a matching key
+ * @keyring: The root of the keyring tree to be searched.
+ * @type: The type of keyring we want to find.
+ * @description: The name of the keyring we want to find.
+ *
+ * As keyring_search_aux() above, but using the current task's credentials and
+ * type's default matching function.
  */
 key_ref_t keyring_search(key_ref_t keyring,
 			 struct key_type *type,
@@ -465,16 +479,23 @@
 
 	return keyring_search_aux(keyring, current->cred,
 				  type, description, type->match);
-
-} /* end keyring_search() */
-
+}
 EXPORT_SYMBOL(keyring_search);
 
-/*****************************************************************************/
 /*
- * search the given keyring only (no recursion)
- * - keyring must be locked by caller
- * - caller must guarantee that the keyring is a keyring
+ * Search the given keyring only (no recursion).
+ *
+ * The caller must guarantee that the keyring is a keyring and that the
+ * permission is granted to search the keyring as no check is made here.
+ *
+ * RCU is used to make it unnecessary to lock the keyring key list here.
+ *
+ * Returns a pointer to the found key with usage count incremented if
+ * successful and returns -ENOKEY if not found.  Revoked keys and keys not
+ * providing the requested permission are skipped over.
+ *
+ * If successful, the possession indicator is propagated from the keyring ref
+ * to the returned key reference.
  */
 key_ref_t __keyring_search_one(key_ref_t keyring_ref,
 			       const struct key_type *ktype,
@@ -514,14 +535,18 @@
 	atomic_inc(&key->usage);
 	rcu_read_unlock();
 	return make_key_ref(key, possessed);
+}
 
-} /* end __keyring_search_one() */
-
-/*****************************************************************************/
 /*
- * find a keyring with the specified name
- * - all named keyrings are searched
- * - normally only finds keyrings with search permission for the current process
+ * Find a keyring with the specified name.
+ *
+ * All named keyrings in the current user namespace are searched, provided they
+ * grant Search permission directly to the caller (unless this check is
+ * skipped).  Keyrings whose usage points have reached zero or who have been
+ * revoked are skipped.
+ *
+ * Returns a pointer to the keyring with the keyring's refcount having being
+ * incremented on success.  -ENOKEY is returned if a key could not be found.
  */
 struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
 {
@@ -569,15 +594,14 @@
 out:
 	read_unlock(&keyring_name_lock);
 	return keyring;
+}
 
-} /* end find_keyring_by_name() */
-
-/*****************************************************************************/
 /*
- * see if a cycle will will be created by inserting acyclic tree B in acyclic
- * tree A at the topmost level (ie: as a direct child of A)
- * - since we are adding B to A at the top level, checking for cycles should
- *   just be a matter of seeing if node A is somewhere in tree B
+ * See if a cycle will will be created by inserting acyclic tree B in acyclic
+ * tree A at the topmost level (ie: as a direct child of A).
+ *
+ * Since we are adding B to A at the top level, checking for cycles should just
+ * be a matter of seeing if node A is somewhere in tree B.
  */
 static int keyring_detect_cycle(struct key *A, struct key *B)
 {
@@ -657,11 +681,10 @@
 cycle_detected:
 	ret = -EDEADLK;
 	goto error;
-
-} /* end keyring_detect_cycle() */
+}
 
 /*
- * dispose of a keyring list after the RCU grace period, freeing the unlinked
+ * Dispose of a keyring list after the RCU grace period, freeing the unlinked
  * key
  */
 static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
@@ -675,14 +698,14 @@
 }
 
 /*
- * preallocate memory so that a key can be linked into to a keyring
+ * Preallocate memory so that a key can be linked into to a keyring.
  */
 int __key_link_begin(struct key *keyring, const struct key_type *type,
-		     const char *description,
-		     struct keyring_list **_prealloc)
+		     const char *description, unsigned long *_prealloc)
 	__acquires(&keyring->sem)
 {
 	struct keyring_list *klist, *nklist;
+	unsigned long prealloc;
 	unsigned max;
 	size_t size;
 	int loop, ret;
@@ -725,6 +748,7 @@
 
 				/* note replacement slot */
 				klist->delkey = nklist->delkey = loop;
+				prealloc = (unsigned long)nklist;
 				goto done;
 			}
 		}
@@ -739,6 +763,7 @@
 	if (klist && klist->nkeys < klist->maxkeys) {
 		/* there's sufficient slack space to append directly */
 		nklist = NULL;
+		prealloc = KEY_LINK_FIXQUOTA;
 	} else {
 		/* grow the key list */
 		max = 4;
@@ -773,8 +798,9 @@
 		nklist->keys[nklist->delkey] = NULL;
 	}
 
+	prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA;
 done:
-	*_prealloc = nklist;
+	*_prealloc = prealloc;
 	kleave(" = 0");
 	return 0;
 
@@ -792,10 +818,10 @@
 }
 
 /*
- * check already instantiated keys aren't going to be a problem
- * - the caller must have called __key_link_begin()
- * - don't need to call this for keys that were created since __key_link_begin()
- *   was called
+ * Check already instantiated keys aren't going to be a problem.
+ *
+ * The caller must have called __key_link_begin(). Don't need to call this for
+ * keys that were created since __key_link_begin() was called.
  */
 int __key_link_check_live_key(struct key *keyring, struct key *key)
 {
@@ -807,17 +833,20 @@
 }
 
 /*
- * link a key into to a keyring
- * - must be called with __key_link_begin() having being called
- * - discard already extant link to matching key if there is one
+ * Link a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.  Discards any
+ * already extant link to matching key if there is one, so that each keyring
+ * holds at most one link to any given key of a particular type+description
+ * combination.
  */
 void __key_link(struct key *keyring, struct key *key,
-		struct keyring_list **_prealloc)
+		unsigned long *_prealloc)
 {
 	struct keyring_list *klist, *nklist;
 
-	nklist = *_prealloc;
-	*_prealloc = NULL;
+	nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA);
+	*_prealloc = 0;
 
 	kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
 
@@ -852,34 +881,54 @@
 }
 
 /*
- * finish linking a key into to a keyring
- * - must be called with __key_link_begin() having being called
+ * Finish linking a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.
  */
 void __key_link_end(struct key *keyring, struct key_type *type,
-		    struct keyring_list *prealloc)
+		    unsigned long prealloc)
 	__releases(&keyring->sem)
 {
 	BUG_ON(type == NULL);
 	BUG_ON(type->name == NULL);
-	kenter("%d,%s,%p", keyring->serial, type->name, prealloc);
+	kenter("%d,%s,%lx", keyring->serial, type->name, prealloc);
 
 	if (type == &key_type_keyring)
 		up_write(&keyring_serialise_link_sem);
 
 	if (prealloc) {
-		kfree(prealloc);
-		key_payload_reserve(keyring,
-				    keyring->datalen - KEYQUOTA_LINK_BYTES);
+		if (prealloc & KEY_LINK_FIXQUOTA)
+			key_payload_reserve(keyring,
+					    keyring->datalen -
+					    KEYQUOTA_LINK_BYTES);
+		kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA));
 	}
 	up_write(&keyring->sem);
 }
 
-/*
- * link a key to a keyring
+/**
+ * key_link - Link a key to a keyring
+ * @keyring: The keyring to make the link in.
+ * @key: The key to link to.
+ *
+ * Make a link in a keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring.
+ *
+ * This function will write-lock the keyring's semaphore and will consume some
+ * of the user's key data quota to hold the link.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
+ * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
+ * full, -EDQUOT if there is insufficient key data quota remaining to add
+ * another link or -ENOMEM if there's insufficient memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
  */
 int key_link(struct key *keyring, struct key *key)
 {
-	struct keyring_list *prealloc;
+	unsigned long prealloc;
 	int ret;
 
 	key_check(keyring);
@@ -895,12 +944,24 @@
 
 	return ret;
 }
-
 EXPORT_SYMBOL(key_link);
 
-/*****************************************************************************/
-/*
- * unlink the first link to a key from a keyring
+/**
+ * key_unlink - Unlink the first link to a key from a keyring.
+ * @keyring: The keyring to remove the link from.
+ * @key: The key the link is to.
+ *
+ * Remove a link from a keyring to a key.
+ *
+ * This function will write-lock the keyring's semaphore.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
+ * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
+ * memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be removed (the keyring should have Write permission; no permissions are
+ * required on the key).
  */
 int key_unlink(struct key *keyring, struct key *key)
 {
@@ -968,15 +1029,12 @@
 	ret = -ENOMEM;
 	up_write(&keyring->sem);
 	goto error;
-
-} /* end key_unlink() */
-
+}
 EXPORT_SYMBOL(key_unlink);
 
-/*****************************************************************************/
 /*
- * dispose of a keyring list after the RCU grace period, releasing the keys it
- * links to
+ * Dispose of a keyring list after the RCU grace period, releasing the keys it
+ * links to.
  */
 static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
 {
@@ -989,13 +1047,15 @@
 		key_put(klist->keys[loop]);
 
 	kfree(klist);
+}
 
-} /* end keyring_clear_rcu_disposal() */
-
-/*****************************************************************************/
-/*
- * clear the specified process keyring
- * - implements keyctl(KEYCTL_CLEAR)
+/**
+ * keyring_clear - Clear a keyring
+ * @keyring: The keyring to clear.
+ *
+ * Clear the contents of the specified keyring.
+ *
+ * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
  */
 int keyring_clear(struct key *keyring)
 {
@@ -1027,15 +1087,13 @@
 	}
 
 	return ret;
-
-} /* end keyring_clear() */
-
+}
 EXPORT_SYMBOL(keyring_clear);
 
-/*****************************************************************************/
 /*
- * dispose of the links from a revoked keyring
- * - called with the key sem write-locked
+ * Dispose of the links from a revoked keyring.
+ *
+ * This is called with the key sem write-locked.
  */
 static void keyring_revoke(struct key *keyring)
 {
@@ -1050,11 +1108,10 @@
 		rcu_assign_pointer(keyring->payload.subscriptions, NULL);
 		call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
 	}
-
-} /* end keyring_revoke() */
+}
 
 /*
- * Determine whether a key is dead
+ * Determine whether a key is dead.
  */
 static bool key_is_dead(struct key *key, time_t limit)
 {
@@ -1063,7 +1120,12 @@
 }
 
 /*
- * Collect garbage from the contents of a keyring
+ * Collect garbage from the contents of a keyring, replacing the old list with
+ * a new one with the pointers all shuffled down.
+ *
+ * Dead keys are classed as oned that are flagged as being dead or are revoked,
+ * expired or negative keys that were revoked or expired before the specified
+ * limit.
  */
 void keyring_gc(struct key *keyring, time_t limit)
 {
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 2864550..c35b522 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -1,4 +1,4 @@
-/* permission.c: key permission determination
+/* Key permission checking
  *
  * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -13,18 +13,19 @@
 #include <linux/security.h>
 #include "internal.h"
 
-/*****************************************************************************/
 /**
  * key_task_permission - Check a key can be used
- * @key_ref: The key to check
- * @cred: The credentials to use
- * @perm: The permissions to check for
+ * @key_ref: The key to check.
+ * @cred: The credentials to use.
+ * @perm: The permissions to check for.
  *
  * Check to see whether permission is granted to use a key in the desired way,
  * but permit the security modules to override.
  *
- * The caller must hold either a ref on cred or must hold the RCU readlock or a
- * spinlock.
+ * The caller must hold either a ref on cred or must hold the RCU readlock.
+ *
+ * Returns 0 if successful, -EACCES if access is denied based on the
+ * permissions bits or the LSM check.
  */
 int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
 			key_perm_t perm)
@@ -79,14 +80,16 @@
 
 	/* let LSM be the final arbiter */
 	return security_key_permission(key_ref, cred, perm);
-
-} /* end key_task_permission() */
-
+}
 EXPORT_SYMBOL(key_task_permission);
 
-/*****************************************************************************/
-/*
- * validate a key
+/**
+ * key_validate - Validate a key.
+ * @key: The key to be validated.
+ *
+ * Check that a key is valid, returning 0 if the key is okay, -EKEYREVOKED if
+ * the key's type has been removed or if the key has been revoked or
+ * -EKEYEXPIRED if the key has expired.
  */
 int key_validate(struct key *key)
 {
@@ -111,7 +114,5 @@
 
 error:
 	return ret;
-
-} /* end key_validate() */
-
+}
 EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 70373966..525cf8a 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -1,4 +1,4 @@
-/* proc.c: proc files for key database enumeration
+/* procfs files for key database enumeration
  *
  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -60,9 +60,8 @@
 	.release	= seq_release,
 };
 
-/*****************************************************************************/
 /*
- * declare the /proc files
+ * Declare the /proc files.
  */
 static int __init key_proc_init(void)
 {
@@ -79,14 +78,13 @@
 		panic("Cannot create /proc/key-users\n");
 
 	return 0;
-
-} /* end key_proc_init() */
+}
 
 __initcall(key_proc_init);
 
-/*****************************************************************************/
 /*
- * implement "/proc/keys" to provides a list of the keys on the system
+ * Implement "/proc/keys" to provide a list of the keys on the system that
+ * grant View permission to the caller.
  */
 #ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
 
@@ -293,9 +291,9 @@
 	return __key_user_next(n);
 }
 
-/*****************************************************************************/
 /*
- * implement "/proc/key-users" to provides a list of the key users
+ * Implement "/proc/key-users" to provides a list of the key users and their
+ * quotas.
  */
 static int proc_key_users_open(struct inode *inode, struct file *file)
 {
@@ -351,5 +349,4 @@
 		   maxbytes);
 
 	return 0;
-
 }
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 504bdd2..930634e 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -1,4 +1,4 @@
-/* Management of a process's keyrings
+/* Manage a process's keyrings
  *
  * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -21,13 +21,13 @@
 #include <asm/uaccess.h>
 #include "internal.h"
 
-/* session keyring create vs join semaphore */
+/* Session keyring create vs join semaphore */
 static DEFINE_MUTEX(key_session_mutex);
 
-/* user keyring creation semaphore */
+/* User keyring creation semaphore */
 static DEFINE_MUTEX(key_user_keyring_mutex);
 
-/* the root user's tracking struct */
+/* The root user's tracking struct */
 struct key_user root_key_user = {
 	.usage		= ATOMIC_INIT(3),
 	.cons_lock	= __MUTEX_INITIALIZER(root_key_user.cons_lock),
@@ -38,9 +38,8 @@
 	.user_ns	= &init_user_ns,
 };
 
-/*****************************************************************************/
 /*
- * install user and user session keyrings for a particular UID
+ * Install the user and user session keyrings for the current process's UID.
  */
 int install_user_keyrings(void)
 {
@@ -122,7 +121,8 @@
 }
 
 /*
- * install a fresh thread keyring directly to new credentials
+ * Install a fresh thread keyring directly to new credentials.  This keyring is
+ * allowed to overrun the quota.
  */
 int install_thread_keyring_to_cred(struct cred *new)
 {
@@ -138,7 +138,7 @@
 }
 
 /*
- * install a fresh thread keyring, discarding the old one
+ * Install a fresh thread keyring, discarding the old one.
  */
 static int install_thread_keyring(void)
 {
@@ -161,9 +161,10 @@
 }
 
 /*
- * install a process keyring directly to a credentials struct
- * - returns -EEXIST if there was already a process keyring, 0 if one installed,
- *   and other -ve on any other error
+ * Install a process keyring directly to a credentials struct.
+ *
+ * Returns -EEXIST if there was already a process keyring, 0 if one installed,
+ * and other value on any other error
  */
 int install_process_keyring_to_cred(struct cred *new)
 {
@@ -192,8 +193,11 @@
 }
 
 /*
- * make sure a process keyring is installed
- * - we
+ * Make sure a process keyring is installed for the current process.  The
+ * existing process keyring is not replaced.
+ *
+ * Returns 0 if there is a process keyring by the end of this function, some
+ * error otherwise.
  */
 static int install_process_keyring(void)
 {
@@ -214,7 +218,7 @@
 }
 
 /*
- * install a session keyring directly to a credentials struct
+ * Install a session keyring directly to a credentials struct.
  */
 int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
 {
@@ -254,8 +258,8 @@
 }
 
 /*
- * install a session keyring, discarding the old one
- * - if a keyring is not supplied, an empty one is invented
+ * Install a session keyring, discarding the old one.  If a keyring is not
+ * supplied, an empty one is invented.
  */
 static int install_session_keyring(struct key *keyring)
 {
@@ -275,9 +279,8 @@
 	return commit_creds(new);
 }
 
-/*****************************************************************************/
 /*
- * the filesystem user ID changed
+ * Handle the fsuid changing.
  */
 void key_fsuid_changed(struct task_struct *tsk)
 {
@@ -288,12 +291,10 @@
 		tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
 		up_write(&tsk->cred->thread_keyring->sem);
 	}
+}
 
-} /* end key_fsuid_changed() */
-
-/*****************************************************************************/
 /*
- * the filesystem group ID changed
+ * Handle the fsgid changing.
  */
 void key_fsgid_changed(struct task_struct *tsk)
 {
@@ -304,16 +305,28 @@
 		tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
 		up_write(&tsk->cred->thread_keyring->sem);
 	}
+}
 
-} /* end key_fsgid_changed() */
-
-/*****************************************************************************/
 /*
- * search only my process keyrings for the first matching key
- * - we use the supplied match function to see if the description (or other
- *   feature of interest) matches
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we found only negative matching keys
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key.
+ *
+ * The search criteria are the type and the match function.  The description is
+ * given to the match function as a parameter, but doesn't otherwise influence
+ * the search.  Typically the match function will compare the description
+ * parameter to the key's description.
+ *
+ * This can only search keyrings that grant Search permission to the supplied
+ * credentials.  Keyrings linked to searched keyrings will also be searched if
+ * they grant Search permission too.  Keys can only be found if they grant
+ * Search permission to the credentials.
+ *
+ * Returns a pointer to the key with the key usage count incremented if
+ * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
+ * matched negative keys.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
  */
 key_ref_t search_my_process_keyrings(struct key_type *type,
 				     const void *description,
@@ -428,13 +441,13 @@
 	return key_ref;
 }
 
-/*****************************************************************************/
 /*
- * search the process keyrings for the first matching key
- * - we use the supplied match function to see if the description (or other
- *   feature of interest) matches
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we found only negative matching keys
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key in the manner of search_my_process_keyrings(), but also search
+ * the keys attached to the assumed authorisation key using its credentials if
+ * one is available.
+ *
+ * Return same as search_my_process_keyrings().
  */
 key_ref_t search_process_keyrings(struct key_type *type,
 				  const void *description,
@@ -489,24 +502,33 @@
 
 found:
 	return key_ref;
+}
 
-} /* end search_process_keyrings() */
-
-/*****************************************************************************/
 /*
- * see if the key we're looking at is the target key
+ * See if the key we're looking at is the target key.
  */
 int lookup_user_key_possessed(const struct key *key, const void *target)
 {
 	return key == target;
+}
 
-} /* end lookup_user_key_possessed() */
-
-/*****************************************************************************/
 /*
- * lookup a key given a key ID from userspace with a given permissions mask
- * - don't create special keyrings unless so requested
- * - partially constructed keys aren't found unless requested
+ * Look up a key ID given us by userspace with a given permissions mask to get
+ * the key it refers to.
+ *
+ * Flags can be passed to request that special keyrings be created if referred
+ * to directly, to permit partially constructed keys to be found and to skip
+ * validity and permission checks on the found key.
+ *
+ * Returns a pointer to the key with an incremented usage count if successful;
+ * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
+ * to a key or the best found key was a negative key; -EKEYREVOKED or
+ * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
+ * found key doesn't grant the requested permit or the LSM denied access to it;
+ * or -ENOMEM if a special keyring couldn't be created.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
  */
 key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
 			  key_perm_t perm)
@@ -711,15 +733,18 @@
 reget_creds:
 	put_cred(cred);
 	goto try_again;
+}
 
-} /* end lookup_user_key() */
-
-/*****************************************************************************/
 /*
- * join the named keyring as the session keyring if possible, or attempt to
- * create a new one of that name if not
- * - if the name is NULL, an empty anonymous keyring is installed instead
- * - named session keyring joining is done with a semaphore held
+ * Join the named keyring as the session keyring if possible else attempt to
+ * create a new one of that name and join that.
+ *
+ * If the name is NULL, an empty anonymous keyring will be installed as the
+ * session keyring.
+ *
+ * Named session keyrings are joined with a semaphore held to prevent the
+ * keyrings from going away whilst the attempt is made to going them and also
+ * to prevent a race in creating compatible session keyrings.
  */
 long join_session_keyring(const char *name)
 {
@@ -791,8 +816,8 @@
 }
 
 /*
- * Replace a process's session keyring when that process resumes userspace on
- * behalf of one of its children
+ * Replace a process's session keyring on behalf of one of its children when
+ * the target  process is about to resume userspace execution.
  */
 void key_replace_session_keyring(void)
 {
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 0ea52d2..a3dc0d4 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -39,8 +39,14 @@
 	return signal_pending(current) ? -ERESTARTSYS : 0;
 }
 
-/*
- * call to complete the construction of a key
+/**
+ * complete_request_key - Complete the construction of a key.
+ * @cons: The key construction record.
+ * @error: The success or failute of the construction.
+ *
+ * Complete the attempt to construct a key.  The key will be negated
+ * if an error is indicated.  The authorisation key will be revoked
+ * unconditionally.
  */
 void complete_request_key(struct key_construction *cons, int error)
 {
@@ -58,23 +64,33 @@
 }
 EXPORT_SYMBOL(complete_request_key);
 
+/*
+ * Initialise a usermode helper that is going to have a specific session
+ * keyring.
+ *
+ * This is called in context of freshly forked kthread before kernel_execve(),
+ * so we can simply install the desired session_keyring at this point.
+ */
 static int umh_keys_init(struct subprocess_info *info)
 {
 	struct cred *cred = (struct cred*)current_cred();
 	struct key *keyring = info->data;
-	/*
-	 * This is called in context of freshly forked kthread before
-	 * kernel_execve(), we can just change our ->session_keyring.
-	 */
+
 	return install_session_keyring_to_cred(cred, keyring);
 }
 
+/*
+ * Clean up a usermode helper with session keyring.
+ */
 static void umh_keys_cleanup(struct subprocess_info *info)
 {
 	struct key *keyring = info->data;
 	key_put(keyring);
 }
 
+/*
+ * Call a usermode helper with a specific session keyring.
+ */
 static int call_usermodehelper_keys(char *path, char **argv, char **envp,
 			 struct key *session_keyring, enum umh_wait wait)
 {
@@ -91,7 +107,7 @@
 }
 
 /*
- * request userspace finish the construction of a key
+ * Request userspace finish the construction of a key
  * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
  */
 static int call_sbin_request_key(struct key_construction *cons,
@@ -198,8 +214,9 @@
 }
 
 /*
- * call out to userspace for key construction
- * - we ignore program failure and go on key status instead
+ * Call out to userspace for key construction.
+ *
+ * Program failure is ignored in favour of key status.
  */
 static int construct_key(struct key *key, const void *callout_info,
 			 size_t callout_len, void *aux,
@@ -246,9 +263,10 @@
 }
 
 /*
- * get the appropriate destination keyring for the request
- * - we return whatever keyring we select with an extra reference upon it which
- *   the caller must release
+ * Get the appropriate destination keyring for the request.
+ *
+ * The keyring selected is returned with an extra reference upon it which the
+ * caller must release.
  */
 static void construct_get_dest_keyring(struct key **_dest_keyring)
 {
@@ -321,9 +339,11 @@
 }
 
 /*
- * allocate a new key in under-construction state and attempt to link it in to
- * the requested place
- * - may return a key that's already under construction instead
+ * Allocate a new key in under-construction state and attempt to link it in to
+ * the requested keyring.
+ *
+ * May return a key that's already under construction instead if there was a
+ * race between two thread calling request_key().
  */
 static int construct_alloc_key(struct key_type *type,
 			       const char *description,
@@ -332,8 +352,8 @@
 			       struct key_user *user,
 			       struct key **_key)
 {
-	struct keyring_list *prealloc;
 	const struct cred *cred = current_cred();
+	unsigned long prealloc;
 	struct key *key;
 	key_ref_t key_ref;
 	int ret;
@@ -414,7 +434,7 @@
 }
 
 /*
- * commence key construction
+ * Commence key construction.
  */
 static struct key *construct_key_and_link(struct key_type *type,
 					  const char *description,
@@ -465,12 +485,32 @@
 	return ERR_PTR(ret);
 }
 
-/*
- * request a key
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - cache the key in an appropriate keyring
+/**
+ * request_key_and_link - Request a key and cache it in a keyring.
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ * @dest_keyring: Where to cache the key.
+ * @flags: Flags to key_alloc().
+ *
+ * A key matching the specified criteria is searched for in the process's
+ * keyrings and returned with its usage count incremented if found.  Otherwise,
+ * if callout_info is not NULL, a key will be allocated and some service
+ * (probably in userspace) will be asked to instantiate it.
+ *
+ * If successfully found or created, the key will be linked to the destination
+ * keyring if one is provided.
+ *
+ * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED
+ * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was
+ * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT
+ * if insufficient key quota was available to create a new key; or -ENOMEM if
+ * insufficient memory was available.
+ *
+ * If the returned key was created, then it may still be under construction,
+ * and wait_for_key_construction() should be used to wait for that to complete.
  */
 struct key *request_key_and_link(struct key_type *type,
 				 const char *description,
@@ -524,8 +564,16 @@
 	return key;
 }
 
-/*
- * wait for construction of a key to complete
+/**
+ * wait_for_key_construction - Wait for construction of a key to complete
+ * @key: The key being waited for.
+ * @intr: Whether to wait interruptibly.
+ *
+ * Wait for a key to finish being constructed.
+ *
+ * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY
+ * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was
+ * revoked or expired.
  */
 int wait_for_key_construction(struct key *key, bool intr)
 {
@@ -542,12 +590,19 @@
 }
 EXPORT_SYMBOL(wait_for_key_construction);
 
-/*
- * request a key
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - waits uninterruptible for creation to complete
+/**
+ * request_key - Request a key and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota,
+ * the callout_info must be a NUL-terminated string and no auxiliary data can
+ * be passed.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
  */
 struct key *request_key(struct key_type *type,
 			const char *description,
@@ -572,12 +627,19 @@
 }
 EXPORT_SYMBOL(request_key);
 
-/*
- * request a key with auxiliary data for the upcaller
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - waits uninterruptible for creation to complete
+/**
+ * request_key_with_auxdata - Request a key with auxiliary data for the upcaller
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
  */
 struct key *request_key_with_auxdata(struct key_type *type,
 				     const char *description,
@@ -602,10 +664,18 @@
 EXPORT_SYMBOL(request_key_with_auxdata);
 
 /*
- * request a key (allow async construction)
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
+ * request_key_async - Request a key (allow async construction)
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota and
+ * no auxiliary data can be passed.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
  */
 struct key *request_key_async(struct key_type *type,
 			      const char *description,
@@ -620,9 +690,17 @@
 
 /*
  * request a key with auxiliary data for the upcaller (allow async construction)
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
  */
 struct key *request_key_async_with_auxdata(struct key_type *type,
 					   const char *description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 8674715..6816403 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -1,4 +1,4 @@
-/* request_key_auth.c: request key authorisation controlling key def
+/* Request key authorisation token key definition.
  *
  * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -26,7 +26,7 @@
 static long request_key_auth_read(const struct key *, char __user *, size_t);
 
 /*
- * the request-key authorisation key type definition
+ * The request-key authorisation key type definition.
  */
 struct key_type key_type_request_key_auth = {
 	.name		= ".request_key_auth",
@@ -38,9 +38,8 @@
 	.read		= request_key_auth_read,
 };
 
-/*****************************************************************************/
 /*
- * instantiate a request-key authorisation key
+ * Instantiate a request-key authorisation key.
  */
 static int request_key_auth_instantiate(struct key *key,
 					const void *data,
@@ -48,12 +47,10 @@
 {
 	key->payload.data = (struct request_key_auth *) data;
 	return 0;
+}
 
-} /* end request_key_auth_instantiate() */
-
-/*****************************************************************************/
 /*
- * reading a request-key authorisation key retrieves the callout information
+ * Describe an authorisation token.
  */
 static void request_key_auth_describe(const struct key *key,
 				      struct seq_file *m)
@@ -63,12 +60,10 @@
 	seq_puts(m, "key:");
 	seq_puts(m, key->description);
 	seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
+}
 
-} /* end request_key_auth_describe() */
-
-/*****************************************************************************/
 /*
- * read the callout_info data
+ * Read the callout_info data (retrieves the callout information).
  * - the key's semaphore is read-locked
  */
 static long request_key_auth_read(const struct key *key,
@@ -91,13 +86,12 @@
 	}
 
 	return ret;
+}
 
-} /* end request_key_auth_read() */
-
-/*****************************************************************************/
 /*
- * handle revocation of an authorisation token key
- * - called with the key sem write-locked
+ * Handle revocation of an authorisation token key.
+ *
+ * Called with the key sem write-locked.
  */
 static void request_key_auth_revoke(struct key *key)
 {
@@ -109,12 +103,10 @@
 		put_cred(rka->cred);
 		rka->cred = NULL;
 	}
+}
 
-} /* end request_key_auth_revoke() */
-
-/*****************************************************************************/
 /*
- * destroy an instantiation authorisation token key
+ * Destroy an instantiation authorisation token key.
  */
 static void request_key_auth_destroy(struct key *key)
 {
@@ -131,13 +123,11 @@
 	key_put(rka->dest_keyring);
 	kfree(rka->callout_info);
 	kfree(rka);
+}
 
-} /* end request_key_auth_destroy() */
-
-/*****************************************************************************/
 /*
- * create an authorisation token for /sbin/request-key or whoever to gain
- * access to the caller's security data
+ * Create an authorisation token for /sbin/request-key or whoever to gain
+ * access to the caller's security data.
  */
 struct key *request_key_auth_new(struct key *target, const void *callout_info,
 				 size_t callout_len, struct key *dest_keyring)
@@ -228,12 +218,10 @@
 	kfree(rka);
 	kleave("= %d", ret);
 	return ERR_PTR(ret);
+}
 
-} /* end request_key_auth_new() */
-
-/*****************************************************************************/
 /*
- * see if an authorisation key is associated with a particular key
+ * See if an authorisation key is associated with a particular key.
  */
 static int key_get_instantiation_authkey_match(const struct key *key,
 					       const void *_id)
@@ -242,16 +230,11 @@
 	key_serial_t id = (key_serial_t)(unsigned long) _id;
 
 	return rka->target_key->serial == id;
+}
 
-} /* end key_get_instantiation_authkey_match() */
-
-/*****************************************************************************/
 /*
- * get the authorisation key for instantiation of a specific key if attached to
- * the current process's keyrings
- * - this key is inserted into a keyring and that is set as /sbin/request-key's
- *   session keyring
- * - a target_id of zero specifies any valid token
+ * Search the current process's keyrings for the authorisation key for
+ * instantiation of a key.
  */
 struct key *key_get_instantiation_authkey(key_serial_t target_id)
 {
@@ -278,5 +261,4 @@
 
 error:
 	return authkey;
-
-} /* end key_get_instantiation_authkey() */
+}
diff --git a/security/keys/trusted_defined.c b/security/keys/trusted.c
similarity index 97%
rename from security/keys/trusted_defined.c
rename to security/keys/trusted.c
index 975e9f2..83fc92e 100644
--- a/security/keys/trusted_defined.c
+++ b/security/keys/trusted.c
@@ -29,7 +29,7 @@
 #include <linux/tpm.h>
 #include <linux/tpm_command.h>
 
-#include "trusted_defined.h"
+#include "trusted.h"
 
 static const char hmac_alg[] = "hmac(sha1)";
 static const char hash_alg[] = "sha1";
@@ -101,11 +101,13 @@
 		if (dlen == 0)
 			break;
 		data = va_arg(argp, unsigned char *);
-		if (data == NULL)
-			return -EINVAL;
+		if (data == NULL) {
+			ret = -EINVAL;
+			break;
+		}
 		ret = crypto_shash_update(&sdesc->shash, data, dlen);
 		if (ret < 0)
-			goto out;
+			break;
 	}
 	va_end(argp);
 	if (!ret)
@@ -146,14 +148,17 @@
 		if (dlen == 0)
 			break;
 		data = va_arg(argp, unsigned char *);
-		ret = crypto_shash_update(&sdesc->shash, data, dlen);
-		if (ret < 0) {
-			va_end(argp);
-			goto out;
+		if (!data) {
+			ret = -EINVAL;
+			break;
 		}
+		ret = crypto_shash_update(&sdesc->shash, data, dlen);
+		if (ret < 0)
+			break;
 	}
 	va_end(argp);
-	ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, paramdigest);
 	if (!ret)
 		ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
 				  paramdigest, TPM_NONCE_SIZE, h1,
@@ -222,13 +227,12 @@
 			break;
 		dpos = va_arg(argp, unsigned int);
 		ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
-		if (ret < 0) {
-			va_end(argp);
-			goto out;
-		}
+		if (ret < 0)
+			break;
 	}
 	va_end(argp);
-	ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, paramdigest);
 	if (ret < 0)
 		goto out;
 
@@ -316,13 +320,12 @@
 			break;
 		dpos = va_arg(argp, unsigned int);
 		ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
-		if (ret < 0) {
-			va_end(argp);
-			goto out;
-		}
+		if (ret < 0)
+			break;
 	}
 	va_end(argp);
-	ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, paramdigest);
 	if (ret < 0)
 		goto out;
 
@@ -511,7 +514,7 @@
 	/* get session for sealing key */
 	ret = osap(tb, &sess, keyauth, keytype, keyhandle);
 	if (ret < 0)
-		return ret;
+		goto out;
 	dump_sess(&sess);
 
 	/* calculate encrypted authorization value */
@@ -519,11 +522,11 @@
 	memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE);
 	ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash);
 	if (ret < 0)
-		return ret;
+		goto out;
 
 	ret = tpm_get_random(tb, td->nonceodd, TPM_NONCE_SIZE);
 	if (ret < 0)
-		return ret;
+		goto out;
 	ordinal = htonl(TPM_ORD_SEAL);
 	datsize = htonl(datalen);
 	pcrsize = htonl(pcrinfosize);
@@ -552,7 +555,7 @@
 				   &datsize, datalen, data, 0, 0);
 	}
 	if (ret < 0)
-		return ret;
+		goto out;
 
 	/* build and send the TPM request packet */
 	INIT_BUF(tb);
@@ -572,7 +575,7 @@
 
 	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
 	if (ret < 0)
-		return ret;
+		goto out;
 
 	/* calculate the size of the returned Blob */
 	sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
@@ -591,6 +594,8 @@
 		memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
 		*bloblen = storedsize;
 	}
+out:
+	kfree(td);
 	return ret;
 }
 
@@ -1027,6 +1032,7 @@
 	ret = datablob_parse(datablob, new_p, new_o);
 	if (ret != Opt_update) {
 		ret = -EINVAL;
+		kfree(new_p);
 		goto out;
 	}
 	/* copy old key values, and reseal with new pcrs */
diff --git a/security/keys/trusted_defined.h b/security/keys/trusted.h
similarity index 100%
rename from security/keys/trusted_defined.h
rename to security/keys/trusted.h
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index e9aa079..02807fb 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -35,7 +35,6 @@
 
 EXPORT_SYMBOL_GPL(key_type_user);
 
-/*****************************************************************************/
 /*
  * instantiate a user defined key
  */
@@ -65,12 +64,10 @@
 
 error:
 	return ret;
-
-} /* end user_instantiate() */
+}
 
 EXPORT_SYMBOL_GPL(user_instantiate);
 
-/*****************************************************************************/
 /*
  * dispose of the old data from an updated user defined key
  */
@@ -81,10 +78,8 @@
 	upayload = container_of(rcu, struct user_key_payload, rcu);
 
 	kfree(upayload);
+}
 
-} /* end user_update_rcu_disposal() */
-
-/*****************************************************************************/
 /*
  * update a user defined key
  * - the key's semaphore is write-locked
@@ -123,24 +118,20 @@
 
 error:
 	return ret;
-
-} /* end user_update() */
+}
 
 EXPORT_SYMBOL_GPL(user_update);
 
-/*****************************************************************************/
 /*
  * match users on their name
  */
 int user_match(const struct key *key, const void *description)
 {
 	return strcmp(key->description, description) == 0;
-
-} /* end user_match() */
+}
 
 EXPORT_SYMBOL_GPL(user_match);
 
-/*****************************************************************************/
 /*
  * dispose of the links from a revoked keyring
  * - called with the key sem write-locked
@@ -156,12 +147,10 @@
 		rcu_assign_pointer(key->payload.data, NULL);
 		call_rcu(&upayload->rcu, user_update_rcu_disposal);
 	}
-
-} /* end user_revoke() */
+}
 
 EXPORT_SYMBOL(user_revoke);
 
-/*****************************************************************************/
 /*
  * dispose of the data dangling from the corpse of a user key
  */
@@ -170,12 +159,10 @@
 	struct user_key_payload *upayload = key->payload.data;
 
 	kfree(upayload);
-
-} /* end user_destroy() */
+}
 
 EXPORT_SYMBOL_GPL(user_destroy);
 
-/*****************************************************************************/
 /*
  * describe the user key
  */
@@ -184,12 +171,10 @@
 	seq_puts(m, key->description);
 
 	seq_printf(m, ": %u", key->datalen);
-
-} /* end user_describe() */
+}
 
 EXPORT_SYMBOL_GPL(user_describe);
 
-/*****************************************************************************/
 /*
  * read the key data
  * - the key's semaphore is read-locked
@@ -213,7 +198,6 @@
 	}
 
 	return ret;
-
-} /* end user_read() */
+}
 
 EXPORT_SYMBOL_GPL(user_read);
diff --git a/security/security.c b/security/security.c
index 739e403..7b7308a 100644
--- a/security/security.c
+++ b/security/security.c
@@ -154,10 +154,9 @@
 				    effective, inheritable, permitted);
 }
 
-int security_capable(int cap)
+int security_capable(const struct cred *cred, int cap)
 {
-	return security_ops->capable(current, current_cred(), cap,
-				     SECURITY_CAP_AUDIT);
+	return security_ops->capable(current, cred, cap, SECURITY_CAP_AUDIT);
 }
 
 int security_real_capable(struct task_struct *tsk, int cap)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e276eb4..c8d6992 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3198,7 +3198,11 @@
 {
 	struct task_security_struct *tsec = cred->security;
 
-	BUG_ON((unsigned long) cred->security < PAGE_SIZE);
+	/*
+	 * cred->security == NULL if security_cred_alloc_blank() or
+	 * security_prepare_creds() returned an error.
+	 */
+	BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
 	cred->security = (void *) 0x7UL;
 	kfree(tsec);
 }
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index c3f845c..a533732 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -178,7 +178,7 @@
 	p->bool_val_to_struct = (struct cond_bool_datum **)
 		kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL);
 	if (!p->bool_val_to_struct)
-		return -1;
+		return -ENOMEM;
 	return 0;
 }
 
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index be9de38..5736356 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -501,8 +501,8 @@
 	if (rc)
 		goto out;
 
-	rc = -ENOMEM;
-	if (cond_init_bool_indexes(p))
+	rc = cond_init_bool_indexes(p);
+	if (rc)
 		goto out;
 
 	for (i = 0; i < SYM_NUM; i++) {
diff --git a/sound/ac97_bus.c b/sound/ac97_bus.c
index a351dd0..2b50cbe 100644
--- a/sound/ac97_bus.c
+++ b/sound/ac97_bus.c
@@ -19,8 +19,8 @@
 
 /*
  * Let drivers decide whether they want to support given codec from their
- * probe method.  Drivers have direct access to the struct snd_ac97 structure and may
- * decide based on the id field amongst other things.
+ * probe method. Drivers have direct access to the struct snd_ac97
+ * structure and may  decide based on the id field amongst other things.
  */
 static int ac97_bus_match(struct device *dev, struct device_driver *drv)
 {
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index 91852e4..3687a6c 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -1114,7 +1114,6 @@
 	of_node_put(onyx->codec.node);
 	if (onyx->codec_info)
 		kfree(onyx->codec_info);
-	i2c_set_clientdata(client, onyx);
 	kfree(onyx);
 	return 0;
 }
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index de8e03a..faa3174 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -287,10 +287,9 @@
 		free_irq(linein_detect_irq, &rt->line_in_notify);
 	if (rt->line_out_notify.gpio_private)
 		free_irq(lineout_detect_irq, &rt->line_out_notify);
-	cancel_delayed_work(&rt->headphone_notify.work);
-	cancel_delayed_work(&rt->line_in_notify.work);
-	cancel_delayed_work(&rt->line_out_notify.work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&rt->headphone_notify.work);
+	cancel_delayed_work_sync(&rt->line_in_notify.work);
+	cancel_delayed_work_sync(&rt->line_out_notify.work);
 	mutex_destroy(&rt->headphone_notify.mutex);
 	mutex_destroy(&rt->line_in_notify.mutex);
 	mutex_destroy(&rt->line_out_notify.mutex);
diff --git a/sound/aoa/core/gpio-pmf.c b/sound/aoa/core/gpio-pmf.c
index 7e267c9..c8d8a1a 100644
--- a/sound/aoa/core/gpio-pmf.c
+++ b/sound/aoa/core/gpio-pmf.c
@@ -107,10 +107,9 @@
 
 	/* make sure no work is pending before freeing
 	 * all things */
-	cancel_delayed_work(&rt->headphone_notify.work);
-	cancel_delayed_work(&rt->line_in_notify.work);
-	cancel_delayed_work(&rt->line_out_notify.work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&rt->headphone_notify.work);
+	cancel_delayed_work_sync(&rt->line_in_notify.work);
+	cancel_delayed_work_sync(&rt->line_out_notify.work);
 
 	mutex_destroy(&rt->headphone_notify.mutex);
 	mutex_destroy(&rt->line_in_notify.mutex);
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 91acc9a..7c1fc64 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -30,6 +30,8 @@
 
 #define DRIVER_NAME	"aaci-pl041"
 
+#define FRAME_PERIOD_US	21
+
 /*
  * PM support is not complete.  Turn it off.
  */
@@ -48,7 +50,11 @@
 	if (v & SLFR_1RXV)
 		readl(aaci->base + AACI_SL1RX);
 
-	writel(maincr, aaci->base + AACI_MAINCR);
+	if (maincr != readl(aaci->base + AACI_MAINCR)) {
+		writel(maincr, aaci->base + AACI_MAINCR);
+		readl(aaci->base + AACI_MAINCR);
+		udelay(1);
+	}
 }
 
 /*
@@ -64,8 +70,8 @@
 			    unsigned short val)
 {
 	struct aaci *aaci = ac97->private_data;
+	int timeout;
 	u32 v;
-	int timeout = 5000;
 
 	if (ac97->num >= 4)
 		return;
@@ -81,14 +87,17 @@
 	writel(val << 4, aaci->base + AACI_SL2TX);
 	writel(reg << 12, aaci->base + AACI_SL1TX);
 
-	/*
-	 * Wait for the transmission of both slots to complete.
-	 */
+	/* Initially, wait one frame period */
+	udelay(FRAME_PERIOD_US);
+
+	/* And then wait an additional eight frame periods for it to be sent */
+	timeout = FRAME_PERIOD_US * 8;
 	do {
+		udelay(1);
 		v = readl(aaci->base + AACI_SLFR);
 	} while ((v & (SLFR_1TXB|SLFR_2TXB)) && --timeout);
 
-	if (!timeout)
+	if (v & (SLFR_1TXB|SLFR_2TXB))
 		dev_err(&aaci->dev->dev,
 			"timeout waiting for write to complete\n");
 
@@ -101,9 +110,8 @@
 static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
 {
 	struct aaci *aaci = ac97->private_data;
+	int timeout, retries = 10;
 	u32 v;
-	int timeout = 5000;
-	int retries = 10;
 
 	if (ac97->num >= 4)
 		return ~0;
@@ -117,35 +125,34 @@
 	 */
 	writel((reg << 12) | (1 << 19), aaci->base + AACI_SL1TX);
 
-	/*
-	 * Wait for the transmission to complete.
-	 */
+	/* Initially, wait one frame period */
+	udelay(FRAME_PERIOD_US);
+
+	/* And then wait an additional eight frame periods for it to be sent */
+	timeout = FRAME_PERIOD_US * 8;
 	do {
+		udelay(1);
 		v = readl(aaci->base + AACI_SLFR);
 	} while ((v & SLFR_1TXB) && --timeout);
 
-	if (!timeout) {
+	if (v & SLFR_1TXB) {
 		dev_err(&aaci->dev->dev, "timeout on slot 1 TX busy\n");
 		v = ~0;
 		goto out;
 	}
 
-	/*
-	 * Give the AC'97 codec more than enough time
-	 * to respond. (42us = ~2 frames at 48kHz.)
-	 */
-	udelay(42);
+	/* Now wait for the response frame */
+	udelay(FRAME_PERIOD_US);
 
-	/*
-	 * Wait for slot 2 to indicate data.
-	 */
-	timeout = 5000;
+	/* And then wait an additional eight frame periods for data */
+	timeout = FRAME_PERIOD_US * 8;
 	do {
+		udelay(1);
 		cond_resched();
 		v = readl(aaci->base + AACI_SLFR) & (SLFR_1RXV|SLFR_2RXV);
 	} while ((v != (SLFR_1RXV|SLFR_2RXV)) && --timeout);
 
-	if (!timeout) {
+	if (v != (SLFR_1RXV|SLFR_2RXV)) {
 		dev_err(&aaci->dev->dev, "timeout on RX valid\n");
 		v = ~0;
 		goto out;
@@ -179,6 +186,7 @@
 	int timeout = 5000;
 
 	do {
+		udelay(1);
 		val = readl(aacirun->base + AACI_SR);
 	} while (val & mask && timeout--);
 }
@@ -874,7 +882,7 @@
 	 * Give the AC'97 codec more than enough time
 	 * to wake up. (42us = ~2 frames at 48kHz.)
 	 */
-	udelay(42);
+	udelay(FRAME_PERIOD_US * 2);
 
 	ret = snd_ac97_bus(aaci->card, 0, &aaci_bus_ops, aaci, &ac97_bus);
 	if (ret)
@@ -989,6 +997,8 @@
 	 * disabling the channel doesn't clear the FIFO.
 	 */
 	writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR);
+	readl(aaci->base + AACI_MAINCR);
+	udelay(1);
 	writel(aaci->maincr, aaci->base + AACI_MAINCR);
 
 	/*
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 10c3a87..b310702 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -33,9 +33,12 @@
 #include <linux/dw_dmac.h>
 
 #include <mach/cpu.h>
-#include <mach/hardware.h>
 #include <mach/gpio.h>
 
+#ifdef CONFIG_ARCH_AT91
+#include <mach/hardware.h>
+#endif
+
 #include "ac97c.h"
 
 enum {
diff --git a/sound/core/control.c b/sound/core/control.c
index 45a8180..9ce00ed 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1488,7 +1488,7 @@
 }
 
 /*
- * Frequently used control callbacks
+ * Frequently used control callbacks/helpers
  */
 int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_info *uinfo)
@@ -1513,3 +1513,29 @@
 }
 
 EXPORT_SYMBOL(snd_ctl_boolean_stereo_info);
+
+/**
+ * snd_ctl_enum_info - fills the info structure for an enumerated control
+ * @info: the structure to be filled
+ * @channels: the number of the control's channels; often one
+ * @items: the number of control values; also the size of @names
+ * @names: an array containing the names of all control values
+ *
+ * Sets all required fields in @info to their appropriate values.
+ * If the control's accessibility is not the default (readable and writable),
+ * the caller has to fill @info->access.
+ */
+int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
+		      unsigned int items, const char *const names[])
+{
+	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	info->count = channels;
+	info->value.enumerated.items = items;
+	if (info->value.enumerated.item >= items)
+		info->value.enumerated.item = items - 1;
+	strlcpy(info->value.enumerated.name,
+		names[info->value.enumerated.item],
+		sizeof(info->value.enumerated.name));
+	return 0;
+}
+EXPORT_SYMBOL(snd_ctl_enum_info);
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index 7730575..b8b31c4 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -45,12 +45,13 @@
 {
 	struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
 	struct snd_timer *t = stime->timer;
+	unsigned long oruns;
 
 	if (!atomic_read(&stime->running))
 		return HRTIMER_NORESTART;
 
-	hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
-	snd_timer_interrupt(stime->timer, t->sticks);
+	oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
+	snd_timer_interrupt(stime->timer, t->sticks * oruns);
 
 	if (!atomic_read(&stime->running))
 		return HRTIMER_NORESTART;
@@ -104,7 +105,7 @@
 }
 
 static struct snd_timer_hardware hrtimer_hw = {
-	.flags =	SNDRV_TIMER_HW_AUTO,
+	.flags =	SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_TASKLET,
 	.open =		snd_hrtimer_open,
 	.close =	snd_hrtimer_close,
 	.start =	snd_hrtimer_start,
diff --git a/sound/core/init.c b/sound/core/init.c
index 57b792e2..3e65da2 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -642,7 +642,7 @@
  *  external accesses.  Thus, you should call this function at the end
  *  of the initialization of the card.
  *
- *  Returns zero otherwise a negative error code if the registrain failed.
+ *  Returns zero otherwise a negative error code if the registration failed.
  */
 int snd_card_register(struct snd_card *card)
 {
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 4902ae5..53b53e9 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -141,6 +141,7 @@
 
 fail_input:
 	input_free_device(jack->input_dev);
+	kfree(jack->id);
 	kfree(jack);
 	return err;
 }
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index b753ec6..a2e4eb3 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -453,8 +453,10 @@
 	} else {
 		*params = *save;
 		max = snd_pcm_hw_param_max(pcm, params, var, max, &maxdir);
-		if (max < 0)
+		if (max < 0) {
+			kfree(save);
 			return max;
+		}
 		last = 1;
 	}
  _end:
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 11446a1..a82e3756 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -373,6 +373,27 @@
 			   (unsigned long)new_hw_ptr,
 			   (unsigned long)runtime->hw_ptr_base);
 	}
+
+	if (runtime->no_period_wakeup) {
+		/*
+		 * Without regular period interrupts, we have to check
+		 * the elapsed time to detect xruns.
+		 */
+		jdelta = jiffies - runtime->hw_ptr_jiffies;
+		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
+			goto no_delta_check;
+		hdelta = jdelta - delta * HZ / runtime->rate;
+		while (hdelta > runtime->hw_ptr_buffer_jiffies / 2 + 1) {
+			delta += runtime->buffer_size;
+			hw_base += runtime->buffer_size;
+			if (hw_base >= runtime->boundary)
+				hw_base = 0;
+			new_hw_ptr = hw_base + pos;
+			hdelta -= runtime->hw_ptr_buffer_jiffies;
+		}
+		goto no_delta_check;
+	}
+
 	/* something must be really wrong */
 	if (delta >= runtime->buffer_size + runtime->period_size) {
 		hw_ptr_error(substream,
@@ -442,6 +463,7 @@
 			     (long)old_hw_ptr);
 	}
 
+ no_delta_check:
 	if (runtime->status->hw_ptr == new_hw_ptr)
 		return 0;
 
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index e82c1f9..4be45e7 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -422,6 +422,9 @@
 	runtime->info = params->info;
 	runtime->rate_num = params->rate_num;
 	runtime->rate_den = params->rate_den;
+	runtime->no_period_wakeup =
+			(params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
+			(params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
 
 	bits = snd_pcm_format_physical_width(runtime->format);
 	runtime->sample_bits = bits;
@@ -984,7 +987,7 @@
 	if (push)
 		snd_pcm_update_hw_ptr(substream);
 	/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
-	 * a delta betwen the current jiffies, this gives a large enough
+	 * a delta between the current jiffies, this gives a large enough
 	 * delta, effectively to skip the check once.
 	 */
 	substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
diff --git a/sound/core/seq/seq.c b/sound/core/seq/seq.c
index bf09a5a..119fddb6 100644
--- a/sound/core/seq/seq.c
+++ b/sound/core/seq/seq.c
@@ -32,6 +32,7 @@
 #include "seq_timer.h"
 #include "seq_system.h"
 #include "seq_info.h"
+#include <sound/minors.h>
 #include <sound/seq_device.h>
 
 #if defined(CONFIG_SND_SEQ_DUMMY_MODULE)
@@ -73,6 +74,9 @@
 module_param(seq_default_timer_resolution, int, 0644);
 MODULE_PARM_DESC(seq_default_timer_resolution, "The default timer resolution in Hz.");
 
+MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_SEQUENCER);
+MODULE_ALIAS("devname:snd/seq");
+
 /*
  *  INIT PART
  */
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 66691fe..1c7a3ef 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -188,14 +188,22 @@
 };
 
 #ifdef CONFIG_SND_DYNAMIC_MINORS
-static int snd_find_free_minor(void)
+static int snd_find_free_minor(int type)
 {
 	int minor;
 
+	/* static minors for module auto loading */
+	if (type == SNDRV_DEVICE_TYPE_SEQUENCER)
+		return SNDRV_MINOR_SEQUENCER;
+	if (type == SNDRV_DEVICE_TYPE_TIMER)
+		return SNDRV_MINOR_TIMER;
+
 	for (minor = 0; minor < ARRAY_SIZE(snd_minors); ++minor) {
-		/* skip minors still used statically for autoloading devices */
-		if (SNDRV_MINOR_DEVICE(minor) == SNDRV_MINOR_CONTROL ||
-		    minor == SNDRV_MINOR_SEQUENCER)
+		/* skip static minors still used for module auto loading */
+		if (SNDRV_MINOR_DEVICE(minor) == SNDRV_MINOR_CONTROL)
+			continue;
+		if (minor == SNDRV_MINOR_SEQUENCER ||
+		    minor == SNDRV_MINOR_TIMER)
 			continue;
 		if (!snd_minors[minor])
 			return minor;
@@ -269,7 +277,7 @@
 	preg->private_data = private_data;
 	mutex_lock(&sound_mutex);
 #ifdef CONFIG_SND_DYNAMIC_MINORS
-	minor = snd_find_free_minor();
+	minor = snd_find_free_minor(type);
 #else
 	minor = snd_kernel_minor(type, card, dev);
 	if (minor >= 0 && snd_minors[minor])
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 13afb60..ed016329 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -34,8 +34,8 @@
 #include <sound/initval.h>
 #include <linux/kmod.h>
 
-#if defined(CONFIG_SND_HPET) || defined(CONFIG_SND_HPET_MODULE)
-#define DEFAULT_TIMER_LIMIT 3
+#if defined(CONFIG_SND_HRTIMER) || defined(CONFIG_SND_HRTIMER_MODULE)
+#define DEFAULT_TIMER_LIMIT 4
 #elif defined(CONFIG_SND_RTCTIMER) || defined(CONFIG_SND_RTCTIMER_MODULE)
 #define DEFAULT_TIMER_LIMIT 2
 #else
@@ -52,6 +52,9 @@
 module_param(timer_tstamp_monotonic, int, 0444);
 MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
 
+MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
+MODULE_ALIAS("devname:snd/timer");
+
 struct snd_timer_user {
 	struct snd_timer_instance *timeri;
 	int tread;		/* enhanced read with timestamps and events */
diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c
index a1282c1..5cfcb90 100644
--- a/sound/drivers/ml403-ac97cr.c
+++ b/sound/drivers/ml403-ac97cr.c
@@ -1143,8 +1143,8 @@
 					     (resource->start) + 1);
 	if (ml403_ac97cr->port == NULL) {
 		snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": "
-			   "unable to remap memory region (%x to %x)\n",
-			   resource->start, resource->end);
+			   "unable to remap memory region (%pR)\n",
+			   resource);
 		snd_ml403_ac97cr_free(ml403_ac97cr);
 		return -EBUSY;
 	}
diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
index da03597..5c426df 100644
--- a/sound/drivers/mtpav.c
+++ b/sound/drivers/mtpav.c
@@ -55,14 +55,13 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/ioport.h>
+#include <linux/io.h>
 #include <linux/moduleparam.h>
 #include <sound/core.h>
 #include <sound/initval.h>
 #include <sound/rawmidi.h>
 #include <linux/delay.h>
 
-#include <asm/io.h>
-
 /*
  *      globals
  */
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 971a84a..c424d32 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -57,8 +57,7 @@
 {
 	chip->init = 1;	/* don't schedule new work */
 	mb();
-	cancel_delayed_work(&chip->work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&chip->work);
 	kfree(chip);
 }
 
@@ -141,7 +140,7 @@
 {
 	chip->init = 1;
 	mb();
-	flush_scheduled_work();
+	flush_delayed_work_sync(&chip->work);
 	ak4113_init_regs(chip);
 	/* bring up statistics / event queing */
 	chip->init = 0;
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 0341451..d9fb537 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -67,8 +67,7 @@
 {
 	chip->init = 1;	/* don't schedule new work */
 	mb();
-	cancel_delayed_work(&chip->work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&chip->work);
 	kfree(chip);
 }
 
@@ -154,7 +153,7 @@
 {
 	chip->init = 1;
 	mb();
-	flush_scheduled_work();
+	flush_delayed_work_sync(&chip->work);
 	ak4114_init_regs(chip);
 	/* bring up statistics / event queing */
 	chip->init = 0;
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index 265abcc..9b915e2 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -264,7 +264,7 @@
 		snd_printd("OPL3-SA [0x%lx] detect (1) = 0x%x (0x%x)\n", port, tmp, tmp1);
 		return -ENODEV;
 	}
-	/* try if the MIC register is accesible */
+	/* try if the MIC register is accessible */
 	tmp = snd_opl3sa2_read(chip, OPL3SA2_MIC);
 	snd_opl3sa2_write(chip, OPL3SA2_MIC, 0x8a);
 	if (((tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MIC)) & 0x9f) != 0x8a) {
diff --git a/sound/oss/Makefile b/sound/oss/Makefile
index 96f14dc..90ffb99 100644
--- a/sound/oss/Makefile
+++ b/sound/oss/Makefile
@@ -87,7 +87,7 @@
 	$(obj)/bin2hex pss_synth < $< > $@
 else
     $(obj)/pss_boot.h:
-	(							\
+	$(Q)(							\
 	    echo 'static unsigned char * pss_synth = NULL;';	\
 	    echo 'static int pss_synthLen = 0;';		\
 	) > $@
@@ -102,7 +102,7 @@
 	$(obj)/hex2hex -i trix_boot < $< > $@
 else
     $(obj)/trix_boot.h:
-	(							\
+	$(Q)(							\
 	    echo 'static unsigned char * trix_boot = NULL;';	\
 	    echo 'static int trix_boot_len = 0;';		\
 	) > $@
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 12e3465..9823d59 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -209,7 +209,7 @@
         tristate
 
 config SND_OXYGEN
-	tristate "C-Media 8788 (Oxygen)"
+	tristate "C-Media 8786, 8787, 8788 (Oxygen)"
 	select SND_OXYGEN_LIB
 	select SND_PCM
 	select SND_MPU401_UART
@@ -217,13 +217,18 @@
 	  Say Y here to include support for sound cards based on the
 	  C-Media CMI8788 (Oxygen HD Audio) chip:
 	   * Asound A-8788
+	   * Asus Xonar DG
 	   * AuzenTech X-Meridian
+	   * AuzenTech X-Meridian 2G
 	   * Bgears b-Enspirer
 	   * Club3D Theatron DTS
 	   * HT-Omega Claro (plus)
 	   * HT-Omega Claro halo (XT)
+	   * Kuroutoshikou CMI8787-HG2PCI
 	   * Razer Barracuda AC-1
 	   * Sondigo Inferno
+	   * TempoTec/MediaTek HiFier Fantasia
+	   * TempoTec/MediaTek HiFier Serenade
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-oxygen.
@@ -578,18 +583,6 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-hdspm.
 
-config SND_HIFIER
-	tristate "TempoTec HiFier Fantasia"
-	select SND_OXYGEN_LIB
-	select SND_PCM
-	select SND_MPU401_UART
-	help
-	  Say Y here to include support for the MediaTek/TempoTec HiFier
-	  Fantasia sound card.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called snd-hifier.
-
 config SND_ICE1712
 	tristate "ICEnsemble ICE1712 (Envy24)"
 	select SND_MPU401_UART
@@ -826,8 +819,8 @@
 	  Say Y here to include support for sound cards based on the
 	  Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS,
 	  Essence ST (Deluxe), and Essence STX.
-	  Support for the HDAV1.3 (Deluxe) is incomplete; for the
-	  HDAV1.3 Slim and Xense, missing.
+	  Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental;
+	  for the Xense, missing.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-virtuoso.
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index a7630e9e..cb62d17 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1014,8 +1014,7 @@
 {
 	if (ac97) {
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-		cancel_delayed_work(&ac97->power_work);
-		flush_scheduled_work();
+		cancel_delayed_work_sync(&ac97->power_work);
 #endif
 		snd_ac97_proc_done(ac97);
 		if (ac97->bus)
@@ -1962,7 +1961,7 @@
 }
 
 /* build_ops to do nothing */
-static struct snd_ac97_build_ops null_build_ops;
+static const struct snd_ac97_build_ops null_build_ops;
 
 #ifdef CONFIG_SND_AC97_POWER_SAVE
 static void do_update_power(struct work_struct *work)
@@ -2456,8 +2455,7 @@
 	if (ac97->build_ops->suspend)
 		ac97->build_ops->suspend(ac97);
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-	cancel_delayed_work(&ac97->power_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&ac97->power_work);
 #endif
 	snd_ac97_powerdown(ac97);
 }
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index e68c98e..bf47574 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -371,7 +371,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
+static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
 	.build_spdif	= patch_yamaha_ymf743_build_spdif,
 	.build_3d	= patch_yamaha_ymf7x3_3d,
 };
@@ -455,7 +455,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
+static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
 	.build_3d	= patch_yamaha_ymf7x3_3d,
 	.build_post_spdif = patch_yamaha_ymf753_post_spdif
 };
@@ -502,7 +502,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
+static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
 	.build_specific = patch_wolfson_wm9703_specific,
 };
 
@@ -533,7 +533,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
+static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
 	.build_specific = patch_wolfson_wm9704_specific,
 };
 
@@ -677,7 +677,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
+static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
 	.build_specific = patch_wolfson_wm9711_specific,
 };
 
@@ -871,7 +871,7 @@
 }
 #endif
 
-static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
+static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
 	.build_specific = patch_wolfson_wm9713_specific,
 	.build_3d = patch_wolfson_wm9713_3d,
 #ifdef CONFIG_PM	
@@ -976,7 +976,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
+static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
 	.build_3d	= patch_sigmatel_stac9700_3d,
 	.build_specific	= patch_sigmatel_stac97xx_specific
 };
@@ -1023,7 +1023,7 @@
 	return patch_sigmatel_stac97xx_specific(ac97);
 }
 
-static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
+static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
 	.build_3d	= patch_sigmatel_stac9708_3d,
 	.build_specific	= patch_sigmatel_stac9708_specific
 };
@@ -1252,7 +1252,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
+static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
 	.build_3d	= patch_sigmatel_stac9700_3d,
 	.build_specific	= patch_sigmatel_stac9758_specific
 };
@@ -1327,7 +1327,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_cirrus_ops = {
+static const struct snd_ac97_build_ops patch_cirrus_ops = {
 	.build_spdif = patch_cirrus_build_spdif
 };
 
@@ -1384,7 +1384,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_conexant_ops = {
+static const struct snd_ac97_build_ops patch_conexant_ops = {
 	.build_spdif = patch_conexant_build_spdif
 };
 
@@ -1560,7 +1560,7 @@
 	}
 }
 
-static struct snd_ac97_build_ops patch_ad1881_build_ops = {
+static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
 #ifdef CONFIG_PM
 	.resume = ad18xx_resume
 #endif
@@ -1647,7 +1647,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_ad1885_build_ops = {
+static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
 	.build_specific = &patch_ad1885_specific,
 #ifdef CONFIG_PM
 	.resume = ad18xx_resume
@@ -1674,7 +1674,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_ad1886_build_ops = {
+static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
 	.build_specific = &patch_ad1886_specific,
 #ifdef CONFIG_PM
 	.resume = ad18xx_resume
@@ -1881,7 +1881,7 @@
 				    ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
 }
 
-static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
+static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
 	.build_post_spdif = patch_ad198x_post_spdif,
 	.build_specific = patch_ad1981a_specific,
 #ifdef CONFIG_PM
@@ -1936,7 +1936,7 @@
 				    ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
 }
 
-static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
+static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
 	.build_post_spdif = patch_ad198x_post_spdif,
 	.build_specific = patch_ad1981b_specific,
 #ifdef CONFIG_PM
@@ -2075,7 +2075,7 @@
 	return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
 }
 
-static struct snd_ac97_build_ops patch_ad1888_build_ops = {
+static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
 	.build_post_spdif = patch_ad198x_post_spdif,
 	.build_specific = patch_ad1888_specific,
 #ifdef CONFIG_PM
@@ -2124,7 +2124,7 @@
 	return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
 }
 
-static struct snd_ac97_build_ops patch_ad1980_build_ops = {
+static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
 	.build_post_spdif = patch_ad198x_post_spdif,
 	.build_specific = patch_ad1980_specific,
 #ifdef CONFIG_PM
@@ -2239,7 +2239,7 @@
 				    ARRAY_SIZE(snd_ac97_ad1985_controls));
 }
 
-static struct snd_ac97_build_ops patch_ad1985_build_ops = {
+static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
 	.build_post_spdif = patch_ad198x_post_spdif,
 	.build_specific = patch_ad1985_specific,
 #ifdef CONFIG_PM
@@ -2531,7 +2531,7 @@
 				    ARRAY_SIZE(snd_ac97_ad1985_controls));
 }
 
-static struct snd_ac97_build_ops patch_ad1986_build_ops = {
+static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
 	.build_post_spdif = patch_ad198x_post_spdif,
 	.build_specific = patch_ad1986_specific,
 #ifdef CONFIG_PM
@@ -2636,7 +2636,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_alc650_ops = {
+static const struct snd_ac97_build_ops patch_alc650_ops = {
 	.build_specific	= patch_alc650_specific,
 	.update_jacks = alc650_update_jacks
 };
@@ -2788,7 +2788,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_alc655_ops = {
+static const struct snd_ac97_build_ops patch_alc655_ops = {
 	.build_specific	= patch_alc655_specific,
 	.update_jacks = alc655_update_jacks
 };
@@ -2900,7 +2900,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_alc850_ops = {
+static const struct snd_ac97_build_ops patch_alc850_ops = {
 	.build_specific	= patch_alc850_specific,
 	.update_jacks = alc850_update_jacks
 };
@@ -2962,7 +2962,7 @@
 	return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
 }
 
-static struct snd_ac97_build_ops patch_cm9738_ops = {
+static const struct snd_ac97_build_ops patch_cm9738_ops = {
 	.build_specific	= patch_cm9738_specific,
 	.update_jacks = cm9738_update_jacks
 };
@@ -3053,7 +3053,7 @@
 	return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
 }
 
-static struct snd_ac97_build_ops patch_cm9739_ops = {
+static const struct snd_ac97_build_ops patch_cm9739_ops = {
 	.build_specific	= patch_cm9739_specific,
 	.build_post_spdif = patch_cm9739_post_spdif,
 	.update_jacks = cm9739_update_jacks
@@ -3227,7 +3227,7 @@
 	return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
 }
 
-static struct snd_ac97_build_ops patch_cm9761_ops = {
+static const struct snd_ac97_build_ops patch_cm9761_ops = {
 	.build_specific	= patch_cm9761_specific,
 	.build_post_spdif = patch_cm9761_post_spdif,
 	.update_jacks = cm9761_update_jacks
@@ -3323,7 +3323,7 @@
 	return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
 }
 
-static struct snd_ac97_build_ops patch_cm9780_ops = {
+static const struct snd_ac97_build_ops patch_cm9780_ops = {
 	.build_specific	= patch_cm9780_specific,
 	.build_post_spdif = patch_cm9761_post_spdif	/* identical with CM9761 */
 };
@@ -3443,7 +3443,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_vt1616_ops = {
+static const struct snd_ac97_build_ops patch_vt1616_ops = {
 	.build_specific	= patch_vt1616_specific
 };
 
@@ -3797,7 +3797,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_it2646_ops = {
+static const struct snd_ac97_build_ops patch_it2646_ops = {
 	.build_specific	= patch_it2646_specific,
 	.update_jacks = it2646_update_jacks
 };
@@ -3831,7 +3831,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_si3036_ops = {
+static const struct snd_ac97_build_ops patch_si3036_ops = {
 	.build_specific	= patch_si3036_specific,
 };
 
@@ -3898,7 +3898,7 @@
 	return 0;
 }
 
-static struct snd_ac97_build_ops patch_ucb1400_ops = {
+static const struct snd_ac97_build_ops patch_ucb1400_ops = {
 	.build_specific	= patch_ucb1400_specific,
 };
 
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 23f49f3..16c0bdf 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1252,11 +1252,19 @@
 static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
 {
 	stream_t *dma = &vortex->dma_adb[adbdma];
-	int temp;
+	int temp, page, delta;
 
 	temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2));
-	temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1));
-	return temp;
+	page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT;
+	if (dma->nr_periods >= 4)
+		delta = (page - dma->period_real) & 3;
+	else {
+		delta = (page - dma->period_real);
+		if (delta < 0)
+			delta += dma->nr_periods;
+	}
+	return (dma->period_virt + delta) * dma->period_bytes
+		+ (temp & (dma->period_bytes - 1));
 }
 
 static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma)
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index b9d2f20..5439d66 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -42,11 +42,7 @@
 	.rate_min = 5000,
 	.rate_max = 48000,
 	.channels_min = 1,
-#ifdef CHIP_AU8830
-	.channels_max = 4,
-#else
 	.channels_max = 2,
-#endif
 	.buffer_bytes_max = 0x10000,
 	.period_bytes_min = 0x1,
 	.period_bytes_max = 0x1000,
@@ -115,6 +111,17 @@
 	.periods_max = 64,
 };
 #endif
+#ifdef CHIP_AU8830
+static unsigned int au8830_channels[3] = {
+	1, 2, 4,
+};
+
+static struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = {
+	.count = ARRAY_SIZE(au8830_channels),
+	.list = au8830_channels,
+	.mask = 0,
+};
+#endif
 /* open callback */
 static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
 {
@@ -156,6 +163,15 @@
 		if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB
 		    || VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_I2S)
 			runtime->hw = snd_vortex_playback_hw_adb;
+#ifdef CHIP_AU8830
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+			VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) {
+			runtime->hw.channels_max = 4;
+			snd_pcm_hw_constraint_list(runtime, 0,
+				SNDRV_PCM_HW_PARAM_CHANNELS,
+				&hw_constraints_au8830_channels);
+		}
+#endif
 		substream->runtime->private_data = NULL;
 	}
 #ifndef CHIP_AU8810
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 2f3cacb..573594b 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1,6 +1,6 @@
 /*
  *  azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168).
- *  Copyright (C) 2002, 2005 - 2009 by Andreas Mohr <andi AT lisas.de>
+ *  Copyright (C) 2002, 2005 - 2010 by Andreas Mohr <andi AT lisas.de>
  *
  *  Framework borrowed from Bart Hartgers's als4000.c.
  *  Driver developed on PCI168 AP(W) version (PCI rev. 10, subsystem ID 1801),
@@ -175,6 +175,7 @@
 
 #include <asm/io.h>
 #include <linux/init.h>
+#include <linux/bug.h> /* WARN_ONCE */
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -201,14 +202,15 @@
 
 /* === Debug settings ===
   Further diagnostic functionality than the settings below
-  does not need to be provided, since one can easily write a bash script
+  does not need to be provided, since one can easily write a POSIX shell script
   to dump the card's I/O ports (those listed in lspci -v -v):
-  function dump()
+  dump()
   {
     local descr=$1; local addr=$2; local count=$3
 
     echo "${descr}: ${count} @ ${addr}:"
-    dd if=/dev/port skip=$[${addr}] count=${count} bs=1 2>/dev/null| hexdump -C
+    dd if=/dev/port skip=`printf %d ${addr}` count=${count} bs=1 \
+      2>/dev/null| hexdump -C
   }
   and then use something like
   "dump joy200 0x200 8", "dump mpu388 0x388 4", "dump joy 0xb400 8",
@@ -216,14 +218,14 @@
   possibly within a "while true; do ... sleep 1; done" loop.
   Tweaking ports could be done using
   VALSTRING="`printf "%02x" $value`"
-  printf "\x""$VALSTRING"|dd of=/dev/port seek=$[${addr}] bs=1 2>/dev/null
+  printf "\x""$VALSTRING"|dd of=/dev/port seek=`printf %d ${addr}` bs=1 \
+    2>/dev/null
 */
 
 #define DEBUG_MISC	0
 #define DEBUG_CALLS	0
 #define DEBUG_MIXER	0
 #define DEBUG_CODEC	0
-#define DEBUG_IO	0
 #define DEBUG_TIMER	0
 #define DEBUG_GAME	0
 #define DEBUG_PM	0
@@ -291,19 +293,23 @@
 module_param(seqtimer_scaling, int, 0444);
 MODULE_PARM_DESC(seqtimer_scaling, "Set 1024000Hz sequencer timer scale factor (lockup danger!). Default 128.");
 
-struct snd_azf3328_codec_data {
-	unsigned long io_base;
-	struct snd_pcm_substream *substream;
-	bool running;
-	const char *name;
-};
-
 enum snd_azf3328_codec_type {
+  /* warning: fixed indices (also used for bitmask checks!) */
   AZF_CODEC_PLAYBACK = 0,
   AZF_CODEC_CAPTURE = 1,
   AZF_CODEC_I2S_OUT = 2,
 };
 
+struct snd_azf3328_codec_data {
+	unsigned long io_base; /* keep first! (avoid offset calc) */
+	unsigned int dma_base; /* helper to avoid an indirection in hotpath */
+	spinlock_t *lock; /* TODO: convert to our own per-codec lock member */
+	struct snd_pcm_substream *substream;
+	bool running;
+	enum snd_azf3328_codec_type type;
+	const char *name;
+};
+
 struct snd_azf3328 {
 	/* often-used fields towards beginning, then grouped */
 
@@ -362,6 +368,9 @@
 static int
 snd_azf3328_io_reg_setb(unsigned reg, u8 mask, bool do_set)
 {
+	/* Well, strictly spoken, the inb/outb sequence isn't atomic
+	   and would need locking. However we currently don't care
+	   since it potentially complicates matters. */
 	u8 prev = inb(reg), new;
 
 	new = (do_set) ? (prev|mask) : (prev & ~mask);
@@ -413,6 +422,21 @@
 	outl(value, codec->io_base + reg);
 }
 
+static inline void
+snd_azf3328_codec_outl_multi(const struct snd_azf3328_codec_data *codec,
+			     unsigned reg, const void *buffer, int count
+)
+{
+	unsigned long addr = codec->io_base + reg;
+	if (count) {
+		const u32 *buf = buffer;
+		do {
+			outl(*buf++, addr);
+			addr += 4;
+		} while (--count);
+	}
+}
+
 static inline u32
 snd_azf3328_codec_inl(const struct snd_azf3328_codec_data *codec, unsigned reg)
 {
@@ -943,38 +967,37 @@
 }
 
 static void
-snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
-			       enum snd_azf3328_codec_type codec_type,
+snd_azf3328_codec_setfmt(struct snd_azf3328_codec_data *codec,
 			       enum azf_freq_t bitrate,
 			       unsigned int format_width,
 			       unsigned int channels
 )
 {
 	unsigned long flags;
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 	u16 val = 0xff00;
+	u8 freq = 0;
 
 	snd_azf3328_dbgcallenter();
 	switch (bitrate) {
-	case AZF_FREQ_4000:  val |= SOUNDFORMAT_FREQ_SUSPECTED_4000; break;
-	case AZF_FREQ_4800:  val |= SOUNDFORMAT_FREQ_SUSPECTED_4800; break;
+	case AZF_FREQ_4000:  freq = SOUNDFORMAT_FREQ_SUSPECTED_4000; break;
+	case AZF_FREQ_4800:  freq = SOUNDFORMAT_FREQ_SUSPECTED_4800; break;
 	case AZF_FREQ_5512:
 		/* the AZF3328 names it "5510" for some strange reason */
-			     val |= SOUNDFORMAT_FREQ_5510; break;
-	case AZF_FREQ_6620:  val |= SOUNDFORMAT_FREQ_6620; break;
-	case AZF_FREQ_8000:  val |= SOUNDFORMAT_FREQ_8000; break;
-	case AZF_FREQ_9600:  val |= SOUNDFORMAT_FREQ_9600; break;
-	case AZF_FREQ_11025: val |= SOUNDFORMAT_FREQ_11025; break;
-	case AZF_FREQ_13240: val |= SOUNDFORMAT_FREQ_SUSPECTED_13240; break;
-	case AZF_FREQ_16000: val |= SOUNDFORMAT_FREQ_16000; break;
-	case AZF_FREQ_22050: val |= SOUNDFORMAT_FREQ_22050; break;
-	case AZF_FREQ_32000: val |= SOUNDFORMAT_FREQ_32000; break;
+			     freq = SOUNDFORMAT_FREQ_5510; break;
+	case AZF_FREQ_6620:  freq = SOUNDFORMAT_FREQ_6620; break;
+	case AZF_FREQ_8000:  freq = SOUNDFORMAT_FREQ_8000; break;
+	case AZF_FREQ_9600:  freq = SOUNDFORMAT_FREQ_9600; break;
+	case AZF_FREQ_11025: freq = SOUNDFORMAT_FREQ_11025; break;
+	case AZF_FREQ_13240: freq = SOUNDFORMAT_FREQ_SUSPECTED_13240; break;
+	case AZF_FREQ_16000: freq = SOUNDFORMAT_FREQ_16000; break;
+	case AZF_FREQ_22050: freq = SOUNDFORMAT_FREQ_22050; break;
+	case AZF_FREQ_32000: freq = SOUNDFORMAT_FREQ_32000; break;
 	default:
 		snd_printk(KERN_WARNING "unknown bitrate %d, assuming 44.1kHz!\n", bitrate);
 		/* fall-through */
-	case AZF_FREQ_44100: val |= SOUNDFORMAT_FREQ_44100; break;
-	case AZF_FREQ_48000: val |= SOUNDFORMAT_FREQ_48000; break;
-	case AZF_FREQ_66200: val |= SOUNDFORMAT_FREQ_SUSPECTED_66200; break;
+	case AZF_FREQ_44100: freq = SOUNDFORMAT_FREQ_44100; break;
+	case AZF_FREQ_48000: freq = SOUNDFORMAT_FREQ_48000; break;
+	case AZF_FREQ_66200: freq = SOUNDFORMAT_FREQ_SUSPECTED_66200; break;
 	}
 	/* val = 0xff07; 3m27.993s (65301Hz; -> 64000Hz???) hmm, 66120, 65967, 66123 */
 	/* val = 0xff09; 17m15.098s (13123,478Hz; -> 12000Hz???) hmm, 13237.2Hz? */
@@ -986,13 +1009,15 @@
 	/* val = 0xff0d; 41m23.135s (5523,600Hz; -> 5512Hz???) */
 	/* val = 0xff0e; 28m30.777s (8017Hz; -> 8000Hz???) */
 
+	val |= freq;
+
 	if (channels == 2)
 		val |= SOUNDFORMAT_FLAG_2CHANNELS;
 
 	if (format_width == 16)
 		val |= SOUNDFORMAT_FLAG_16BIT;
 
-	spin_lock_irqsave(&chip->reg_lock, flags);
+	spin_lock_irqsave(codec->lock, flags);
 
 	/* set bitrate/format */
 	snd_azf3328_codec_outw(codec, IDX_IO_CODEC_SOUNDFORMAT, val);
@@ -1004,7 +1029,8 @@
 	 * (FIXME: yes, it works, but what exactly am I doing here?? :)
 	 * FIXME: does this have some side effects for full-duplex
 	 * or other dramatic side effects? */
-	if (codec_type == AZF_CODEC_PLAYBACK) /* only do it for playback */
+	/* do it for non-capture codecs only */
+	if (codec->type != AZF_CODEC_CAPTURE)
 		snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
 			snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS) |
 			DMA_RUN_SOMETHING1 |
@@ -1014,20 +1040,19 @@
 			DMA_SOMETHING_ELSE
 		);
 
-	spin_unlock_irqrestore(&chip->reg_lock, flags);
+	spin_unlock_irqrestore(codec->lock, flags);
 	snd_azf3328_dbgcallleave();
 }
 
 static inline void
-snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328 *chip,
-			    enum snd_azf3328_codec_type codec_type
+snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328_codec_data *codec
 )
 {
 	/* choose lowest frequency for low power consumption.
 	 * While this will cause louder noise due to rather coarse frequency,
 	 * it should never matter since output should always
 	 * get disabled properly when idle anyway. */
-	snd_azf3328_codec_setfmt(chip, codec_type, AZF_FREQ_4000, 8, 1);
+	snd_azf3328_codec_setfmt(codec, AZF_FREQ_4000, 8, 1);
 }
 
 static void
@@ -1101,69 +1126,87 @@
 		/* ...and adjust clock, too
 		 * (reduce noise and power consumption) */
 		if (!enable)
-			snd_azf3328_codec_setfmt_lowpower(
-				chip,
-				codec_type
-			);
+			snd_azf3328_codec_setfmt_lowpower(codec);
 		codec->running = enable;
 	}
 }
 
 static void
-snd_azf3328_codec_setdmaa(struct snd_azf3328 *chip,
-				enum snd_azf3328_codec_type codec_type,
+snd_azf3328_codec_setdmaa(struct snd_azf3328_codec_data *codec,
 				unsigned long addr,
-				unsigned int count,
-				unsigned int size
+				unsigned int period_bytes,
+				unsigned int buffer_bytes
 )
 {
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 	snd_azf3328_dbgcallenter();
+	WARN_ONCE(period_bytes & 1, "odd period length!?\n");
+	WARN_ONCE(buffer_bytes != 2 * period_bytes,
+		 "missed our input expectations! %u vs. %u\n",
+		 buffer_bytes, period_bytes);
 	if (!codec->running) {
 		/* AZF3328 uses a two buffer pointer DMA transfer approach */
 
-		unsigned long flags, addr_area2;
+		unsigned long flags;
 
 		/* width 32bit (prevent overflow): */
-		u32 count_areas, lengths;
+		u32 area_length;
+		struct codec_setup_io {
+			u32 dma_start_1;
+			u32 dma_start_2;
+			u32 dma_lengths;
+		} __attribute__((packed)) setup_io;
 
-		count_areas = size/2;
-		addr_area2 = addr+count_areas;
-		snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n",
-				addr, count_areas, addr_area2, count_areas);
+		area_length = buffer_bytes/2;
 
-		count_areas--; /* max. index */
+		setup_io.dma_start_1 = addr;
+		setup_io.dma_start_2 = addr+area_length;
+
+		snd_azf3328_dbgcodec(
+			"setdma: buffers %08x[%u] / %08x[%u], %u, %u\n",
+				setup_io.dma_start_1, area_length,
+				setup_io.dma_start_2, area_length,
+				period_bytes, buffer_bytes);
+
+		/* Hmm, are we really supposed to decrement this by 1??
+		   Most definitely certainly not: configuring full length does
+		   work properly (i.e. likely better), and BTW we
+		   violated possibly differing frame sizes with this...
+
+		area_length--; |* max. index *|
+		*/
 
 		/* build combined I/O buffer length word */
-		lengths = (count_areas << 16) | (count_areas);
-		spin_lock_irqsave(&chip->reg_lock, flags);
-		snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_1, addr);
-		snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_2,
-								addr_area2);
-		snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_LENGTHS,
-								lengths);
-		spin_unlock_irqrestore(&chip->reg_lock, flags);
+		setup_io.dma_lengths = (area_length << 16) | (area_length);
+
+		spin_lock_irqsave(codec->lock, flags);
+		snd_azf3328_codec_outl_multi(
+			codec, IDX_IO_CODEC_DMA_START_1, &setup_io, 3
+		);
+		spin_unlock_irqrestore(codec->lock, flags);
 	}
 	snd_azf3328_dbgcallleave();
 }
 
 static int
-snd_azf3328_codec_prepare(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_prepare(struct snd_pcm_substream *substream)
 {
-#if 0
-	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_azf3328_codec_data *codec = runtime->private_data;
+#if 0
         unsigned int size = snd_pcm_lib_buffer_bytes(substream);
 	unsigned int count = snd_pcm_lib_period_bytes(substream);
 #endif
 
 	snd_azf3328_dbgcallenter();
+
+	codec->dma_base = runtime->dma_addr;
+
 #if 0
-	snd_azf3328_codec_setfmt(chip, AZF_CODEC_...,
+	snd_azf3328_codec_setfmt(codec,
 		runtime->rate,
 		snd_pcm_format_width(runtime->format),
 		runtime->channels);
-	snd_azf3328_codec_setdmaa(chip, AZF_CODEC_...,
+	snd_azf3328_codec_setdmaa(codec,
 					runtime->dma_addr, count, size);
 #endif
 	snd_azf3328_dbgcallleave();
@@ -1171,24 +1214,23 @@
 }
 
 static int
-snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type,
-			struct snd_pcm_substream *substream, int cmd)
+snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
 	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_azf3328_codec_data *codec = runtime->private_data;
 	int result = 0;
 	u16 flags1;
 	bool previously_muted = 0;
-	bool is_playback_codec = (AZF_CODEC_PLAYBACK == codec_type);
+	bool is_main_mixer_playback_codec = (AZF_CODEC_PLAYBACK == codec->type);
 
-	snd_azf3328_dbgcalls("snd_azf3328_codec_trigger cmd %d\n", cmd);
+	snd_azf3328_dbgcalls("snd_azf3328_pcm_trigger cmd %d\n", cmd);
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 		snd_azf3328_dbgcodec("START %s\n", codec->name);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* mute WaveOut (avoid clicking during setup) */
 			previously_muted =
 				snd_azf3328_mixer_set_mute(
@@ -1196,12 +1238,12 @@
 				);
 		}
 
-		snd_azf3328_codec_setfmt(chip, codec_type,
+		snd_azf3328_codec_setfmt(codec,
 			runtime->rate,
 			snd_pcm_format_width(runtime->format),
 			runtime->channels);
 
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		/* first, remember current value: */
 		flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
 
@@ -1211,14 +1253,14 @@
 
 		/* FIXME: clear interrupts or what??? */
 		snd_azf3328_codec_outw(codec, IDX_IO_CODEC_IRQTYPE, 0xffff);
-		spin_unlock(&chip->reg_lock);
+		spin_unlock(codec->lock);
 
-		snd_azf3328_codec_setdmaa(chip, codec_type, runtime->dma_addr,
+		snd_azf3328_codec_setdmaa(codec, runtime->dma_addr,
 			snd_pcm_lib_period_bytes(substream),
 			snd_pcm_lib_buffer_bytes(substream)
 		);
 
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 #ifdef WIN9X
 		/* FIXME: enable playback/recording??? */
 		flags1 |= DMA_RUN_SOMETHING1 | DMA_RUN_SOMETHING2;
@@ -1242,10 +1284,10 @@
 			DMA_EPILOGUE_SOMETHING |
 			DMA_SOMETHING_ELSE);
 #endif
-		spin_unlock(&chip->reg_lock);
-		snd_azf3328_ctrl_codec_activity(chip, codec_type, 1);
+		spin_unlock(codec->lock);
+		snd_azf3328_ctrl_codec_activity(chip, codec->type, 1);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* now unmute WaveOut */
 			if (!previously_muted)
 				snd_azf3328_mixer_set_mute(
@@ -1258,19 +1300,19 @@
 	case SNDRV_PCM_TRIGGER_RESUME:
 		snd_azf3328_dbgcodec("RESUME %s\n", codec->name);
 		/* resume codec if we were active */
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		if (codec->running)
 			snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
 				snd_azf3328_codec_inw(
 					codec, IDX_IO_CODEC_DMA_FLAGS
 				) | DMA_RESUME
 			);
-		spin_unlock(&chip->reg_lock);
+		spin_unlock(codec->lock);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		snd_azf3328_dbgcodec("STOP %s\n", codec->name);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* mute WaveOut (avoid clicking during setup) */
 			previously_muted =
 				snd_azf3328_mixer_set_mute(
@@ -1278,7 +1320,7 @@
 				);
 		}
 
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		/* first, remember current value: */
 		flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
 
@@ -1293,10 +1335,10 @@
 
 		flags1 &= ~DMA_RUN_SOMETHING1;
 		snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
-		spin_unlock(&chip->reg_lock);
-		snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
+		spin_unlock(codec->lock);
+		snd_azf3328_ctrl_codec_activity(chip, codec->type, 0);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* now unmute WaveOut */
 			if (!previously_muted)
 				snd_azf3328_mixer_set_mute(
@@ -1330,67 +1372,29 @@
 	return result;
 }
 
-static int
-snd_azf3328_codec_playback_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	return snd_azf3328_codec_trigger(AZF_CODEC_PLAYBACK, substream, cmd);
-}
-
-static int
-snd_azf3328_codec_capture_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	return snd_azf3328_codec_trigger(AZF_CODEC_CAPTURE, substream, cmd);
-}
-
-static int
-snd_azf3328_codec_i2s_out_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	return snd_azf3328_codec_trigger(AZF_CODEC_I2S_OUT, substream, cmd);
-}
-
 static snd_pcm_uframes_t
-snd_azf3328_codec_pointer(struct snd_pcm_substream *substream,
-			  enum snd_azf3328_codec_type codec_type
+snd_azf3328_pcm_pointer(struct snd_pcm_substream *substream
 )
 {
-	const struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
-	unsigned long bufptr, result;
+	const struct snd_azf3328_codec_data *codec =
+		substream->runtime->private_data;
+	unsigned long result;
 	snd_pcm_uframes_t frmres;
 
-#ifdef QUERY_HARDWARE
-	bufptr = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1);
-#else
-	bufptr = substream->runtime->dma_addr;
-#endif
 	result = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_CURRPOS);
 
 	/* calculate offset */
-	result -= bufptr;
+#ifdef QUERY_HARDWARE
+	result -= snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1);
+#else
+	result -= codec->dma_base;
+#endif
 	frmres = bytes_to_frames( substream->runtime, result);
-	snd_azf3328_dbgcodec("%s @ 0x%8lx, frames %8ld\n",
-				codec->name, result, frmres);
+	snd_azf3328_dbgcodec("%08li %s @ 0x%8lx, frames %8ld\n",
+				jiffies, codec->name, result, frmres);
 	return frmres;
 }
 
-static snd_pcm_uframes_t
-snd_azf3328_codec_playback_pointer(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_codec_pointer(substream, AZF_CODEC_PLAYBACK);
-}
-
-static snd_pcm_uframes_t
-snd_azf3328_codec_capture_pointer(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_codec_pointer(substream, AZF_CODEC_CAPTURE);
-}
-
-static snd_pcm_uframes_t
-snd_azf3328_codec_i2s_out_pointer(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_codec_pointer(substream, AZF_CODEC_I2S_OUT);
-}
-
 /******************************************************************/
 
 #ifdef SUPPORT_GAMEPORT
@@ -1532,7 +1536,7 @@
 		}
 	}
 
-	/* trigger next axes sampling, to be evaluated the next time we
+	/* trigger next sampling of axes, to be evaluated the next time we
 	 * enter this function */
 
 	/* for some very, very strange reason we cannot enable
@@ -1624,29 +1628,29 @@
 }
 
 static inline void
-snd_azf3328_codec_interrupt(struct snd_azf3328 *chip, u8 status)
+snd_azf3328_pcm_interrupt(const struct snd_azf3328_codec_data *first_codec,
+			  u8 status
+)
 {
 	u8 which;
 	enum snd_azf3328_codec_type codec_type;
-	const struct snd_azf3328_codec_data *codec;
+	const struct snd_azf3328_codec_data *codec = first_codec;
 
 	for (codec_type = AZF_CODEC_PLAYBACK;
 		 codec_type <= AZF_CODEC_I2S_OUT;
-			 ++codec_type) {
+			 ++codec_type, ++codec) {
 
 		/* skip codec if there's no interrupt for it */
 		if (!(status & (1 << codec_type)))
 			continue;
 
-		codec = &chip->codecs[codec_type];
-
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		which = snd_azf3328_codec_inb(codec, IDX_IO_CODEC_IRQTYPE);
 		/* ack all IRQ types immediately */
 		snd_azf3328_codec_outb(codec, IDX_IO_CODEC_IRQTYPE, which);
-		spin_unlock(&chip->reg_lock);
+		spin_unlock(codec->lock);
 
-		if ((chip->pcm[codec_type]) && (codec->substream)) {
+		if (codec->substream) {
 			snd_pcm_period_elapsed(codec->substream);
 			snd_azf3328_dbgcodec("%s period done (#%x), @ %x\n",
 				codec->name,
@@ -1701,7 +1705,7 @@
 	}
 
 	if (status & (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_I2S_OUT))
-		snd_azf3328_codec_interrupt(chip, status);
+		snd_azf3328_pcm_interrupt(chip->codecs, status);
 
 	if (status & IRQ_GAMEPORT)
 		snd_azf3328_gameport_interrupt(chip);
@@ -1789,101 +1793,85 @@
 {
 	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 
 	snd_azf3328_dbgcallenter();
-	chip->codecs[codec_type].substream = substream;
+	codec->substream = substream;
 
 	/* same parameters for all our codecs - at least we think so... */
 	runtime->hw = snd_azf3328_hardware;
 
 	snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
 				   &snd_azf3328_hw_constraints_rates);
+	runtime->private_data = codec;
 	snd_azf3328_dbgcallleave();
 	return 0;
 }
 
 static int
-snd_azf3328_playback_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_playback_open(struct snd_pcm_substream *substream)
 {
 	return snd_azf3328_pcm_open(substream, AZF_CODEC_PLAYBACK);
 }
 
 static int
-snd_azf3328_capture_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_capture_open(struct snd_pcm_substream *substream)
 {
 	return snd_azf3328_pcm_open(substream, AZF_CODEC_CAPTURE);
 }
 
 static int
-snd_azf3328_i2s_out_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_i2s_out_open(struct snd_pcm_substream *substream)
 {
 	return snd_azf3328_pcm_open(substream, AZF_CODEC_I2S_OUT);
 }
 
 static int
-snd_azf3328_pcm_close(struct snd_pcm_substream *substream,
-		      enum snd_azf3328_codec_type codec_type
+snd_azf3328_pcm_close(struct snd_pcm_substream *substream
 )
 {
-	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
+	struct snd_azf3328_codec_data *codec =
+		substream->runtime->private_data;
 
 	snd_azf3328_dbgcallenter();
-	chip->codecs[codec_type].substream = NULL;
+	codec->substream = NULL;
 	snd_azf3328_dbgcallleave();
 	return 0;
 }
 
-static int
-snd_azf3328_playback_close(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_pcm_close(substream, AZF_CODEC_PLAYBACK);
-}
-
-static int
-snd_azf3328_capture_close(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_pcm_close(substream, AZF_CODEC_CAPTURE);
-}
-
-static int
-snd_azf3328_i2s_out_close(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_pcm_close(substream, AZF_CODEC_I2S_OUT);
-}
-
 /******************************************************************/
 
 static struct snd_pcm_ops snd_azf3328_playback_ops = {
-	.open =		snd_azf3328_playback_open,
-	.close =	snd_azf3328_playback_close,
+	.open =		snd_azf3328_pcm_playback_open,
+	.close =	snd_azf3328_pcm_close,
 	.ioctl =	snd_pcm_lib_ioctl,
 	.hw_params =	snd_azf3328_hw_params,
 	.hw_free =	snd_azf3328_hw_free,
-	.prepare =	snd_azf3328_codec_prepare,
-	.trigger =	snd_azf3328_codec_playback_trigger,
-	.pointer =	snd_azf3328_codec_playback_pointer
+	.prepare =	snd_azf3328_pcm_prepare,
+	.trigger =	snd_azf3328_pcm_trigger,
+	.pointer =	snd_azf3328_pcm_pointer
 };
 
 static struct snd_pcm_ops snd_azf3328_capture_ops = {
-	.open =		snd_azf3328_capture_open,
-	.close =	snd_azf3328_capture_close,
+	.open =		snd_azf3328_pcm_capture_open,
+	.close =	snd_azf3328_pcm_close,
 	.ioctl =	snd_pcm_lib_ioctl,
 	.hw_params =	snd_azf3328_hw_params,
 	.hw_free =	snd_azf3328_hw_free,
-	.prepare =	snd_azf3328_codec_prepare,
-	.trigger =	snd_azf3328_codec_capture_trigger,
-	.pointer =	snd_azf3328_codec_capture_pointer
+	.prepare =	snd_azf3328_pcm_prepare,
+	.trigger =	snd_azf3328_pcm_trigger,
+	.pointer =	snd_azf3328_pcm_pointer
 };
 
 static struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
-	.open =		snd_azf3328_i2s_out_open,
-	.close =	snd_azf3328_i2s_out_close,
+	.open =		snd_azf3328_pcm_i2s_out_open,
+	.close =	snd_azf3328_pcm_close,
 	.ioctl =	snd_pcm_lib_ioctl,
 	.hw_params =	snd_azf3328_hw_params,
 	.hw_free =	snd_azf3328_hw_free,
-	.prepare =	snd_azf3328_codec_prepare,
-	.trigger =	snd_azf3328_codec_i2s_out_trigger,
-	.pointer =	snd_azf3328_codec_i2s_out_pointer
+	.prepare =	snd_azf3328_pcm_prepare,
+	.trigger =	snd_azf3328_pcm_trigger,
+	.pointer =	snd_azf3328_pcm_pointer
 };
 
 static int __devinit
@@ -1966,7 +1954,7 @@
 		snd_azf3328_dbgtimer("delay was too low (%d)!\n", delay);
 		delay = 49; /* minimum time is 49 ticks */
 	}
-	snd_azf3328_dbgtimer("setting timer countdown value %d, add COUNTDOWN|IRQ\n", delay);
+	snd_azf3328_dbgtimer("setting timer countdown value %d\n", delay);
 	delay |= TIMER_COUNTDOWN_ENABLE | TIMER_IRQ_ENABLE;
 	spin_lock_irqsave(&chip->reg_lock, flags);
 	snd_azf3328_ctrl_outl(chip, IDX_IO_TIMER_VALUE, delay);
@@ -2180,6 +2168,7 @@
 	};
 	u8 dma_init;
 	enum snd_azf3328_codec_type codec_type;
+	struct snd_azf3328_codec_data *codec_setup;
 
 	*rchip = NULL;
 
@@ -2217,15 +2206,23 @@
 	chip->opl3_io  = pci_resource_start(pci, 3);
 	chip->mixer_io = pci_resource_start(pci, 4);
 
-	chip->codecs[AZF_CODEC_PLAYBACK].io_base =
-				chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK;
-	chip->codecs[AZF_CODEC_PLAYBACK].name = "PLAYBACK";
-	chip->codecs[AZF_CODEC_CAPTURE].io_base =
-				chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE;
-	chip->codecs[AZF_CODEC_CAPTURE].name = "CAPTURE";
-	chip->codecs[AZF_CODEC_I2S_OUT].io_base =
-				chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT;
-	chip->codecs[AZF_CODEC_I2S_OUT].name = "I2S_OUT";
+	codec_setup = &chip->codecs[AZF_CODEC_PLAYBACK];
+	codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK;
+	codec_setup->lock = &chip->reg_lock;
+	codec_setup->type = AZF_CODEC_PLAYBACK;
+	codec_setup->name = "PLAYBACK";
+
+	codec_setup = &chip->codecs[AZF_CODEC_CAPTURE];
+	codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE;
+	codec_setup->lock = &chip->reg_lock;
+	codec_setup->type = AZF_CODEC_CAPTURE;
+	codec_setup->name = "CAPTURE";
+
+	codec_setup = &chip->codecs[AZF_CODEC_I2S_OUT];
+	codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT;
+	codec_setup->lock = &chip->reg_lock;
+	codec_setup->type = AZF_CODEC_I2S_OUT;
+	codec_setup->name = "I2S_OUT";
 
 	if (request_irq(pci->irq, snd_azf3328_interrupt,
 			IRQF_SHARED, card->shortname, chip)) {
@@ -2257,15 +2254,15 @@
 		struct snd_azf3328_codec_data *codec =
 			 &chip->codecs[codec_type];
 
-		/* shutdown codecs to save power */
+		/* shutdown codecs to reduce power / noise */
 			/* have ...ctrl_codec_activity() act properly */
 		codec->running = 1;
 		snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
 
-		spin_lock_irq(&chip->reg_lock);
+		spin_lock_irq(codec->lock);
 		snd_azf3328_codec_outb(codec, IDX_IO_CODEC_DMA_FLAGS,
 						 dma_init);
-		spin_unlock_irq(&chip->reg_lock);
+		spin_unlock_irq(codec->lock);
 	}
 
 	snd_card_set_dev(card, &pci->dev);
@@ -2419,6 +2416,7 @@
 
 	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 
+	/* same pcm object for playback/capture */
 	snd_pcm_suspend_all(chip->pcm[AZF_CODEC_PLAYBACK]);
 	snd_pcm_suspend_all(chip->pcm[AZF_CODEC_I2S_OUT]);
 
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 37e1b5d..2958a05 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -637,15 +637,9 @@
 static int snd_bt87x_capture_source_info(struct snd_kcontrol *kcontrol,
 					 struct snd_ctl_elem_info *info)
 {
-	static char *texts[3] = {"TV Tuner", "FM", "Mic/Line"};
+	static const char *const texts[3] = {"TV Tuner", "FM", "Mic/Line"};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item > 2)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, texts[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, texts);
 }
 
 static int snd_bt87x_capture_source_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h
index f19c110..fc53b9b 100644
--- a/sound/pci/ca0106/ca0106.h
+++ b/sound/pci/ca0106/ca0106.h
@@ -188,7 +188,7 @@
 #define PLAYBACK_LIST_PTR	0x02		/* Pointer to the current period being played */
 						/* PTR[5:0], Default: 0x0 */
 #define PLAYBACK_UNKNOWN3	0x03		/* Not used ?? */
-#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA addresss */
+#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA address */
 						/* DMA[31:0], Default: 0x0 */
 #define PLAYBACK_PERIOD_SIZE	0x05		/* Playback period size. win2000 uses 0x04000000 */
 						/* SIZE[31:16], Default: 0x0 */
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index d2d12c0..01b4938 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1082,7 +1082,7 @@
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct snd_ca0106_pcm *epcm = runtime->private_data;
 	snd_pcm_uframes_t ptr, ptr1, ptr2 = 0;
-	int channel = channel=epcm->channel_id;
+	int channel = epcm->channel_id;
 
 	if (!epcm->running)
 		return 0;
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 329968e..b5bb036 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -2507,14 +2507,12 @@
 					struct snd_ctl_elem_info *uinfo)
 {
 	struct cmipci *cm = snd_kcontrol_chip(kcontrol);
-	static char *texts[3] = { "Line-In", "Rear Output", "Bass Output" };
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = 1;
-	uinfo->value.enumerated.items = cm->chip_version >= 39 ? 3 : 2;
-	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
-		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
-	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-	return 0;
+	static const char *const texts[3] = {
+		"Line-In", "Rear Output", "Bass Output"
+	};
+
+	return snd_ctl_enum_info(uinfo, 1,
+				 cm->chip_version >= 39 ? 3 : 2, texts);
 }
 
 static inline unsigned int get_line_in_mode(struct cmipci *cm)
@@ -2564,14 +2562,9 @@
 static int snd_cmipci_mic_in_mode_info(struct snd_kcontrol *kcontrol,
 				       struct snd_ctl_elem_info *uinfo)
 {
-	static char *texts[2] = { "Mic-In", "Center/LFE Output" };
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = 1;
-	uinfo->value.enumerated.items = 2;
-	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
-		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
-	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-	return 0;
+	static const char *const texts[2] = { "Mic-In", "Center/LFE Output" };
+
+	return snd_ctl_enum_info(uinfo, 1, 2, texts);
 }
 
 static int snd_cmipci_mic_in_mode_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/cs5535audio/cs5535audio_pm.c b/sound/pci/cs5535audio/cs5535audio_pm.c
index a3301cc..185b000 100644
--- a/sound/pci/cs5535audio/cs5535audio_pm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pm.c
@@ -90,12 +90,7 @@
 	int i;
 
 	pci_set_power_state(pci, PCI_D0);
-	if (pci_restore_state(pci) < 0) {
-		printk(KERN_ERR "cs5535audio: pci_restore_state failed, "
-		       "disabling device\n");
-		snd_card_disconnect(card);
-		return -EIO;
-	}
+	pci_restore_state(pci);
 	if (pci_enable_device(pci) < 0) {
 		printk(KERN_ERR "cs5535audio: pci_enable_device failed, "
 		       "disabling device\n");
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index df47f73..0c701e4 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -114,7 +114,7 @@
 						 */
 #define PLAYBACK_LIST_SIZE	0x01		/* Size of list in bytes << 16. E.g. 8 periods -> 0x00380000  */
 #define PLAYBACK_LIST_PTR	0x02		/* Pointer to the current period being played */
-#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA addresss */
+#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA address */
 #define PLAYBACK_PERIOD_SIZE	0x05		/* Playback period size */
 #define PLAYBACK_POINTER	0x06		/* Playback period pointer. Sample currently in DAC */
 #define PLAYBACK_UNKNOWN1       0x07
diff --git a/sound/pci/emu10k1/p16v.h b/sound/pci/emu10k1/p16v.h
index 15321494..00f4817 100644
--- a/sound/pci/emu10k1/p16v.h
+++ b/sound/pci/emu10k1/p16v.h
@@ -96,7 +96,7 @@
 #define PLAYBACK_LIST_SIZE	0x01		/* Size of list in bytes << 16. E.g. 8 periods -> 0x00380000  */
 #define PLAYBACK_LIST_PTR	0x02		/* Pointer to the current period being played */
 #define PLAYBACK_UNKNOWN3	0x03		/* Not used */
-#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA addresss */
+#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA address */
 #define PLAYBACK_PERIOD_SIZE	0x05		/* Playback period size. win2000 uses 0x04000000 */
 #define PLAYBACK_POINTER	0x06		/* Playback period pointer. Used with PLAYBACK_LIST_PTR to determine buffer position currently in DAC */
 #define PLAYBACK_FIFO_END_ADDRESS	0x07		/* Playback FIFO end address */
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 23a58f0..7c17f45 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -220,7 +220,7 @@
 #define	RINGB_EN_2CODEC		0x0020
 #define RINGB_SING_BIT_DUAL	0x0040
 
-/* ****Port Adresses**** */
+/* ****Port Addresses**** */
 
 /*   Write & Read */
 #define ESM_INDEX		0x02
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 98b6d02..ae5c5d5e 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2134,10 +2134,10 @@
  * This function returns zero if successful or a negative error code.
  */
 int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
-			unsigned int *tlv, const char **slaves)
+			unsigned int *tlv, const char * const *slaves)
 {
 	struct snd_kcontrol *kctl;
-	const char **s;
+	const char * const *s;
 	int err;
 
 	for (s = slaves; *s && !snd_hda_find_mixer_ctl(codec, *s); s++)
@@ -3689,7 +3689,7 @@
  * If no entries are matching, the function returns a negative value.
  */
 int snd_hda_check_board_config(struct hda_codec *codec,
-			       int num_configs, const char **models,
+			       int num_configs, const char * const *models,
 			       const struct snd_pci_quirk *tbl)
 {
 	if (codec->modelname && models) {
@@ -3753,7 +3753,7 @@
  * If no entries are matching, the function returns a negative value.
  */
 int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
-			       int num_configs, const char **models,
+			       int num_configs, const char * const *models,
 			       const struct snd_pci_quirk *tbl)
 {
 	const struct snd_pci_quirk *q;
@@ -4571,6 +4571,9 @@
 		}
 		memset(cfg->hp_pins + cfg->hp_outs, 0,
 		       sizeof(hda_nid_t) * (AUTO_CFG_MAX_OUTS - cfg->hp_outs));
+		if (!cfg->hp_outs)
+			cfg->line_out_type = AUTO_PIN_HP_OUT;
+
 	}
 
 	/* sort by sequence */
@@ -4687,7 +4690,7 @@
 					int check_location)
 {
 	unsigned int def_conf;
-	static const char *mic_names[] = {
+	static const char * const mic_names[] = {
 		"Internal Mic", "Dock Mic", "Mic", "Front Mic", "Rear Mic",
 	};
 	int attr;
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 4a66347..74b05602 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -381,7 +381,7 @@
 	snd_print_pcm_rates(a->rates, buf, sizeof(buf));
 
 	if (a->format == AUDIO_CODING_TYPE_LPCM)
-		snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8));
+		snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8);
 	else if (a->max_bitrate)
 		snprintf(buf2, sizeof(buf2),
 				", max bitrate = %d", a->max_bitrate);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index fb0582f8..a63c54d 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -762,7 +762,8 @@
 /*
  * build output mixer controls
  */
-static int create_output_mixers(struct hda_codec *codec, const char **names)
+static int create_output_mixers(struct hda_codec *codec,
+				const char * const *names)
 {
 	struct hda_gspec *spec = codec->spec;
 	int i, err;
@@ -780,8 +781,8 @@
 static int build_output_controls(struct hda_codec *codec)
 {
 	struct hda_gspec *spec = codec->spec;
-	static const char *types_speaker[] = { "Speaker", "Headphone" };
-	static const char *types_line[] = { "Front", "Headphone" };
+	static const char * const types_speaker[] = { "Speaker", "Headphone" };
+	static const char * const types_line[] = { "Front", "Headphone" };
 
 	switch (spec->pcm_vol_nodes) {
 	case 1:
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index a1c4008..fcedad9 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1235,7 +1235,8 @@
 			pos_adj = 0;
 		} else {
 			ofs = setup_bdle(substream, azx_dev,
-					 &bdl, ofs, pos_adj, 1);
+					 &bdl, ofs, pos_adj,
+					 !substream->runtime->no_period_wakeup);
 			if (ofs < 0)
 				goto error;
 		}
@@ -1247,7 +1248,8 @@
 					 period_bytes - pos_adj, 0);
 		else
 			ofs = setup_bdle(substream, azx_dev, &bdl, ofs,
-					 period_bytes, 1);
+					 period_bytes,
+					 !substream->runtime->no_period_wakeup);
 		if (ofs < 0)
 			goto error;
 	}
@@ -1515,7 +1517,8 @@
 				 /* No full-resume yet implemented */
 				 /* SNDRV_PCM_INFO_RESUME |*/
 				 SNDRV_PCM_INFO_PAUSE |
-				 SNDRV_PCM_INFO_SYNC_START),
+				 SNDRV_PCM_INFO_SYNC_START |
+				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		SNDRV_PCM_RATE_48000,
 	.rate_min =		48000,
@@ -2305,6 +2308,7 @@
 	SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+	SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
@@ -2700,7 +2704,7 @@
 	if (err < 0)
 		goto out_free;
 #ifdef CONFIG_SND_HDA_PATCH_LOADER
-	if (patch[dev]) {
+	if (patch[dev] && *patch[dev]) {
 		snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n",
 			   patch[dev]);
 		err = snd_hda_load_patch(chip->bus, patch[dev]);
@@ -2806,6 +2810,8 @@
 #endif
 	/* Vortex86MX */
 	{ PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
+	/* VMware HDAudio */
+	{ PCI_DEVICE(0x15ad, 0x1977), .driver_data = AZX_DRIVER_GENERIC },
 	/* AMD/ATI Generic, PCI class code and Vendor ID for HD Audio */
 	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
 	  .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 46bbefe..3ab5e7a 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -140,7 +140,7 @@
 struct snd_kcontrol *snd_hda_find_mixer_ctl(struct hda_codec *codec,
 					    const char *name);
 int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
-			unsigned int *tlv, const char **slaves);
+			unsigned int *tlv, const char * const *slaves);
 int snd_hda_codec_reset(struct hda_codec *codec);
 
 /* amp value bits */
@@ -341,10 +341,10 @@
  * Misc
  */
 int snd_hda_check_board_config(struct hda_codec *codec, int num_configs,
-			       const char **modelnames,
+			       const char * const *modelnames,
 			       const struct snd_pci_quirk *pci_list);
 int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
-                               int num_configs, const char **models,
+                               int num_configs, const char * const *models,
                                const struct snd_pci_quirk *tbl);
 int snd_hda_add_new_ctls(struct hda_codec *codec,
 			 struct snd_kcontrol_new *knew);
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index f025200..bfe74c2 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -418,7 +418,7 @@
 
 static const char *get_pwr_state(u32 state)
 {
-	static const char *buf[4] = {
+	static const char * const buf[4] = {
 		"D0", "D1", "D2", "D3"
 	};
 	if (state < 4)
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index f7ff3f7..8dabab7 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -46,6 +46,9 @@
 	unsigned int cur_eapd;
 	unsigned int need_dac_fix;
 
+	hda_nid_t *alt_dac_nid;
+	struct hda_pcm_stream *stream_analog_alt_playback;
+
 	/* capture */
 	unsigned int num_adc_nids;
 	hda_nid_t *adc_nids;
@@ -81,8 +84,8 @@
 #endif
 	/* for virtual master */
 	hda_nid_t vmaster_nid;
-	const char **slave_vols;
-	const char **slave_sws;
+	const char * const *slave_vols;
+	const char * const *slave_sws;
 };
 
 /*
@@ -130,7 +133,7 @@
 	return 0;
 }
 
-static const char *ad_slave_vols[] = {
+static const char * const ad_slave_vols[] = {
 	"Front Playback Volume",
 	"Surround Playback Volume",
 	"Center Playback Volume",
@@ -143,7 +146,7 @@
 	NULL
 };
 
-static const char *ad_slave_sws[] = {
+static const char * const ad_slave_sws[] = {
 	"Front Playback Switch",
 	"Surround Playback Switch",
 	"Center Playback Switch",
@@ -156,6 +159,25 @@
 	NULL
 };
 
+static const char * const ad1988_6stack_fp_slave_vols[] = {
+	"Front Playback Volume",
+	"Surround Playback Volume",
+	"Center Playback Volume",
+	"LFE Playback Volume",
+	"Side Playback Volume",
+	"IEC958 Playback Volume",
+	NULL
+};
+
+static const char * const ad1988_6stack_fp_slave_sws[] = {
+	"Front Playback Switch",
+	"Surround Playback Switch",
+	"Center Playback Switch",
+	"LFE Playback Switch",
+	"Side Playback Switch",
+	"IEC958 Playback Switch",
+	NULL
+};
 static void ad198x_free_kctls(struct hda_codec *codec);
 
 #ifdef CONFIG_SND_HDA_INPUT_BEEP
@@ -309,6 +331,38 @@
 	return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
 }
 
+static int ad198x_alt_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+				struct hda_codec *codec,
+				unsigned int stream_tag,
+				unsigned int format,
+				struct snd_pcm_substream *substream)
+{
+	struct ad198x_spec *spec = codec->spec;
+	snd_hda_codec_setup_stream(codec, spec->alt_dac_nid[0], stream_tag,
+					0, format);
+	return 0;
+}
+
+static int ad198x_alt_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+				struct hda_codec *codec,
+				struct snd_pcm_substream *substream)
+{
+	struct ad198x_spec *spec = codec->spec;
+	snd_hda_codec_cleanup_stream(codec, spec->alt_dac_nid[0]);
+	return 0;
+}
+
+static struct hda_pcm_stream ad198x_pcm_analog_alt_playback = {
+	.substreams = 1,
+	.channels_min = 2,
+	.channels_max = 2,
+	/* NID is set in ad198x_build_pcms */
+	.ops = {
+		.prepare = ad198x_alt_playback_pcm_prepare,
+		.cleanup = ad198x_alt_playback_pcm_cleanup
+	},
+};
+
 /*
  * Digital out
  */
@@ -446,6 +500,17 @@
 		}
 	}
 
+	if (spec->alt_dac_nid && spec->stream_analog_alt_playback) {
+		codec->num_pcms++;
+		info = spec->pcm_rec + 2;
+		info->name = "AD198x Headphone";
+		info->pcm_type = HDA_PCM_TYPE_AUDIO;
+		info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
+			*spec->stream_analog_alt_playback;
+		info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
+			spec->alt_dac_nid[0];
+	}
+
 	return 0;
 }
 
@@ -666,7 +731,7 @@
 	HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
@@ -729,7 +794,7 @@
 	HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
 	/* 
 	   HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
 	   HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT), */
@@ -775,7 +840,7 @@
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
 	{
@@ -1069,7 +1134,7 @@
 	AD1986A_MODELS
 };
 
-static const char *ad1986a_models[AD1986A_MODELS] = {
+static const char * const ad1986a_models[AD1986A_MODELS] = {
 	[AD1986A_6STACK]	= "6stack",
 	[AD1986A_3STACK]	= "3stack",
 	[AD1986A_LAPTOP]	= "laptop",
@@ -1358,7 +1423,7 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1515,8 +1580,8 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x1c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1726,8 +1791,8 @@
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
 #endif
-	HDA_CODEC_VOLUME("Mic Boost", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x18, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1774,7 +1839,7 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x08, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1813,7 +1878,7 @@
 	AD1981_MODELS
 };
 
-static const char *ad1981_models[AD1981_MODELS] = {
+static const char * const ad1981_models[AD1981_MODELS] = {
 	[AD1981_HP]		= "hp",
 	[AD1981_THINKPAD]	= "thinkpad",
 	[AD1981_BASIC]		= "basic",
@@ -2015,6 +2080,7 @@
 enum {
 	AD1988_6STACK,
 	AD1988_6STACK_DIG,
+	AD1988_6STACK_DIG_FP,
 	AD1988_3STACK,
 	AD1988_3STACK_DIG,
 	AD1988_LAPTOP,
@@ -2047,6 +2113,10 @@
 	0x04, 0x05, 0x0a, 0x06
 };
 
+static hda_nid_t ad1988_alt_dac_nid[1] = {
+	0x03
+};
+
 static hda_nid_t ad1988_3stack_dac_nids_rev2[3] = {
 	0x04, 0x0a, 0x06
 };
@@ -2160,8 +2230,37 @@
 	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
+
+	{ } /* end */
+};
+
+static struct snd_kcontrol_new ad1988_6stack_fp_mixers[] = {
+	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
+
+	HDA_BIND_MUTE("Front Playback Switch", 0x29, 2, HDA_INPUT),
+	HDA_BIND_MUTE("Surround Playback Switch", 0x2a, 2, HDA_INPUT),
+	HDA_BIND_MUTE_MONO("Center Playback Switch", 0x27, 1, 2, HDA_INPUT),
+	HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x27, 2, 2, HDA_INPUT),
+	HDA_BIND_MUTE("Side Playback Switch", 0x28, 2, HDA_INPUT),
+	HDA_BIND_MUTE("Headphone Playback Switch", 0x22, 2, HDA_INPUT),
+	HDA_BIND_MUTE("Mono Playback Switch", 0x1e, 2, HDA_INPUT),
+
+	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x6, HDA_INPUT),
+	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x6, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x4, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x4, HDA_INPUT),
+
+	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
+	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
+
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
 
 	{ } /* end */
 };
@@ -2203,8 +2302,8 @@
 	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 		.name = "Channel Mode",
@@ -2232,7 +2331,7 @@
 	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
 
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -2445,6 +2544,68 @@
 	{ }
 };
 
+static struct hda_verb ad1988_6stack_fp_init_verbs[] = {
+	/* Front, Surround, CLFE, side DAC; unmute as default */
+	{0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x06, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	/* Headphone; unmute as default */
+	{0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	/* Port-A front headphon path */
+	{0x37, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC0:03h */
+	{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+	{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+	{0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+	/* Port-D line-out path */
+	{0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+	{0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+	{0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+	/* Port-F surround path */
+	{0x2a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+	{0x2a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+	{0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+	/* Port-G CLFE path */
+	{0x27, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+	{0x27, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+	{0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x24, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+	/* Port-H side path */
+	{0x28, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+	{0x28, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+	{0x25, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+	{0x25, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+	/* Mono out path */
+	{0x36, AC_VERB_SET_CONNECT_SEL, 0x1}, /* DAC1:04h */
+	{0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+	{0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+	{0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+	{0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f}, /* unmute, 0dB */
+	/* Port-B front mic-in path */
+	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+	{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+	{0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+	/* Port-C line-in path */
+	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+	{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+	{0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+	{0x33, AC_VERB_SET_CONNECT_SEL, 0x0},
+	/* Port-E mic-in path */
+	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+	{0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+	{0x3c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+	{0x34, AC_VERB_SET_CONNECT_SEL, 0x0},
+	/* Analog CD Input */
+	{0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+	/* Analog Mix output amp */
+	{0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
+
+	{ }
+};
+
 static struct hda_verb ad1988_capture_init_verbs[] = {
 	/* mute analog mix */
 	{0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
@@ -2792,7 +2953,9 @@
 					     const struct auto_pin_cfg *cfg)
 {
 	char name[32];
-	static const char *chname[4] = { "Front", "Surround", NULL /*CLFE*/, "Side" };
+	static const char * const chname[4] = {
+		"Front", "Surround", NULL /*CLFE*/, "Side"
+	};
 	hda_nid_t nid;
 	int i, err;
 
@@ -2902,7 +3065,7 @@
 		idx = ad1988_pin_idx(pin);
 		bnid = ad1988_boost_nids[idx];
 		if (bnid) {
-			sprintf(name, "%s Boost", ctlname);
+			sprintf(name, "%s Boost Volume", ctlname);
 			return add_control(spec, AD_CTL_WIDGET_VOL, name,
 					   HDA_COMPOSE_AMP_VAL(bnid, 3, idx, HDA_OUTPUT));
 
@@ -3074,13 +3237,13 @@
 	return 0;
 }
 
-
 /*
  */
 
-static const char *ad1988_models[AD1988_MODEL_LAST] = {
+static const char * const ad1988_models[AD1988_MODEL_LAST] = {
 	[AD1988_6STACK]		= "6stack",
 	[AD1988_6STACK_DIG]	= "6stack-dig",
+	[AD1988_6STACK_DIG_FP]	= "6stack-dig-fp",
 	[AD1988_3STACK]		= "3stack",
 	[AD1988_3STACK_DIG]	= "3stack-dig",
 	[AD1988_LAPTOP]		= "laptop",
@@ -3140,6 +3303,7 @@
 	switch (board_config) {
 	case AD1988_6STACK:
 	case AD1988_6STACK_DIG:
+	case AD1988_6STACK_DIG_FP:
 		spec->multiout.max_channels = 8;
 		spec->multiout.num_dacs = 4;
 		if (is_rev2(codec))
@@ -3152,10 +3316,22 @@
 			spec->mixers[0] = ad1988_6stack_mixers1_rev2;
 		else
 			spec->mixers[0] = ad1988_6stack_mixers1;
-		spec->mixers[1] = ad1988_6stack_mixers2;
+		if (board_config == AD1988_6STACK_DIG_FP) {
+			spec->mixers[1] = ad1988_6stack_fp_mixers;
+			spec->slave_vols = ad1988_6stack_fp_slave_vols;
+			spec->slave_sws = ad1988_6stack_fp_slave_sws;
+			spec->alt_dac_nid = ad1988_alt_dac_nid;
+			spec->stream_analog_alt_playback =
+				&ad198x_pcm_analog_alt_playback;
+		} else
+			spec->mixers[1] = ad1988_6stack_mixers2;
 		spec->num_init_verbs = 1;
-		spec->init_verbs[0] = ad1988_6stack_init_verbs;
-		if (board_config == AD1988_6STACK_DIG) {
+		if (board_config == AD1988_6STACK_DIG_FP)
+			spec->init_verbs[0] = ad1988_6stack_fp_init_verbs;
+		else
+			spec->init_verbs[0] = ad1988_6stack_init_verbs;
+		if ((board_config == AD1988_6STACK_DIG) ||
+			(board_config == AD1988_6STACK_DIG_FP)) {
 			spec->multiout.dig_out_nid = AD1988_SPDIF_OUT;
 			spec->dig_in_nid = AD1988_SPDIF_IN;
 		}
@@ -3300,8 +3476,8 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3399,7 +3575,7 @@
 };
 #endif
 
-static const char *ad1884_slave_vols[] = {
+static const char * const ad1884_slave_vols[] = {
 	"PCM Playback Volume",
 	"Mic Playback Volume",
 	"Mono Playback Volume",
@@ -3499,9 +3675,9 @@
 	HDA_CODEC_MUTE("Beep Playback Switch", 0x20, 0x03, HDA_INPUT),
 	HDA_CODEC_VOLUME("Docking Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("Docking Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Docking Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3560,8 +3736,8 @@
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line-In Playback Volume", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Line-In Playback Switch", 0x20, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line-In Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line-In Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3637,7 +3813,7 @@
 	AD1984_MODELS
 };
 
-static const char *ad1984_models[AD1984_MODELS] = {
+static const char * const ad1984_models[AD1984_MODELS] = {
 	[AD1984_BASIC]		= "basic",
 	[AD1984_THINKPAD]	= "thinkpad",
 	[AD1984_DELL_DESKTOP]	= "dell_desktop",
@@ -3745,9 +3921,9 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3888,9 +4064,9 @@
 	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Dock Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	{ } /* end */
@@ -4126,8 +4302,8 @@
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	{
@@ -4255,8 +4431,8 @@
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -4308,7 +4484,7 @@
 	AD1884A_MODELS
 };
 
-static const char *ad1884a_models[AD1884A_MODELS] = {
+static const char * const ad1884a_models[AD1884A_MODELS] = {
 	[AD1884A_DESKTOP]	= "desktop",
 	[AD1884A_LAPTOP]	= "laptop",
 	[AD1884A_MOBILE]	= "mobile",
@@ -4494,9 +4670,9 @@
 	HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Line-In Boost", 0x3a, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Line-In Boost Volume", 0x3a, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -4547,7 +4723,7 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x06, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x06, HDA_INPUT),
-	HDA_CODEC_VOLUME("Digital Mic Boost", 0x1f, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Digital Mic Boost Volume", 0x1f, 0x0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -4696,7 +4872,7 @@
 	AD1882_MODELS
 };
 
-static const char *ad1882_models[AD1986A_MODELS] = {
+static const char * const ad1882_models[AD1986A_MODELS] = {
 	[AD1882_3STACK]		= "3stack",
 	[AD1882_6STACK]		= "6stack",
 };
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 18af38e..067982f4 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -490,7 +490,7 @@
  * create mixer controls
  */
 
-static const char *dir_sfx[2] = { "Playback", "Capture" };
+static const char * const dir_sfx[2] = { "Playback", "Capture" };
 
 static int add_mute(struct hda_codec *codec, const char *name, int index,
 		    unsigned int pval, int dir, struct snd_kcontrol **kctlp)
@@ -1039,9 +1039,11 @@
 	{0x11, AC_VERB_SET_PROC_COEF, 0x0008},
 	{0x11, AC_VERB_SET_PROC_STATE, 0x00},
 
+#if 0 /* Don't to set to D3 as we are in power-up sequence */
 	{0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
 	{0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
 	/*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
+#endif
 
 	{} /* terminator */
 };
@@ -1156,7 +1158,7 @@
 	return 0;
 }
 
-static const char *cs420x_models[CS420X_MODELS] = {
+static const char * const cs420x_models[CS420X_MODELS] = {
 	[CS420X_MBP53] = "mbp53",
 	[CS420X_MBP55] = "mbp55",
 	[CS420X_IMAC27] = "imac27",
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index ff60908..1f8bbcd 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -608,7 +608,7 @@
 /*
  */
 
-static const char *cmi9880_models[CMI_MODELS] = {
+static const char * const cmi9880_models[CMI_MODELS] = {
 	[CMI_MINIMAL]	= "minimal",
 	[CMI_MIN_FP]	= "min_fp",
 	[CMI_FULL]	= "full",
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 76bd58a..4d5004e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -85,6 +85,7 @@
 	unsigned int auto_mic;
 	int auto_mic_ext;		/* autocfg.inputs[] index for ext mic */
 	unsigned int need_dac_fix;
+	hda_nid_t slave_dig_outs[2];
 
 	/* capture */
 	unsigned int num_adc_nids;
@@ -127,6 +128,7 @@
 	unsigned int ideapad:1;
 	unsigned int thinkpad:1;
 	unsigned int hp_laptop:1;
+	unsigned int asus:1;
 
 	unsigned int ext_mic_present;
 	unsigned int recording;
@@ -352,6 +354,8 @@
 			info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
 				spec->dig_in_nid;
 		}
+		if (spec->slave_dig_outs[0])
+			codec->slave_dig_outs = spec->slave_dig_outs;
 	}
 
 	return 0;
@@ -403,10 +407,16 @@
 	struct conexant_spec *spec;
 	struct conexant_jack *jack;
 	const char *name;
-	int err;
+	int i, err;
 
 	spec = codec->spec;
 	snd_array_init(&spec->jacks, sizeof(*jack), 32);
+
+	jack = spec->jacks.list;
+	for (i = 0; i < spec->jacks.used; i++, jack++)
+		if (jack->nid == nid)
+			return 0 ; /* already present */
+
 	jack = snd_array_new(&spec->jacks);
 	name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ;
 
@@ -537,13 +547,13 @@
 };
 #endif
 
-static const char *slave_vols[] = {
+static const char * const slave_vols[] = {
 	"Headphone Playback Volume",
 	"Speaker Playback Volume",
 	NULL
 };
 
-static const char *slave_sws[] = {
+static const char * const slave_sws[] = {
 	"Headphone Playback Switch",
 	"Speaker Playback Switch",
 	NULL
@@ -869,16 +879,16 @@
 }
 
 static struct snd_kcontrol_new cxt5045_mixers[] = {
-	HDA_CODEC_VOLUME("Int Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
 	HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -910,16 +920,16 @@
 };
 
 static struct snd_kcontrol_new cxt5045_mixers_hp530[] = {
-	HDA_CODEC_VOLUME("Int Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
 	HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -947,7 +957,7 @@
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x1a, AC_VERB_SET_CONNECT_SEL,0x1},
 	{0x1a, AC_VERB_SET_AMP_GAIN_MUTE,
 	 AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17},
@@ -960,7 +970,7 @@
 };
 
 static struct hda_verb cxt5045_benq_init_verbs[] = {
-	/* Int Mic, Mic */
+	/* Internal Mic, Mic */
 	{0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
 	{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
 	/* Line In,HP, Amp  */
@@ -973,7 +983,7 @@
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x1a, AC_VERB_SET_CONNECT_SEL, 0x1},
 	{0x1a, AC_VERB_SET_AMP_GAIN_MUTE,
 	 AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17},
@@ -1134,7 +1144,7 @@
 	CXT5045_MODELS
 };
 
-static const char *cxt5045_models[CXT5045_MODELS] = {
+static const char * const cxt5045_models[CXT5045_MODELS] = {
 	[CXT5045_LAPTOP_HPSENSE]	= "laptop-hpsense",
 	[CXT5045_LAPTOP_MICSENSE]	= "laptop-micsense",
 	[CXT5045_LAPTOP_HPMICSENSE]	= "laptop-hpmicsense",
@@ -1376,7 +1386,7 @@
 static struct snd_kcontrol_new cxt5047_base_mixers[] = {
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x19, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x19, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x1a, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x1a, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x03, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x12, 0x03, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Volume", 0x10, 0x00, HDA_OUTPUT),
@@ -1579,7 +1589,7 @@
 	CXT5047_MODELS
 };
 
-static const char *cxt5047_models[CXT5047_MODELS] = {
+static const char * const cxt5047_models[CXT5047_MODELS] = {
 	[CXT5047_LAPTOP]	= "laptop",
 	[CXT5047_LAPTOP_HP]	= "laptop-hp",
 	[CXT5047_LAPTOP_EAPD]	= "laptop-eapd",
@@ -1796,8 +1806,8 @@
 static struct snd_kcontrol_new cxt5051_capture_mixers[] = {
 	HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("External Mic Volume", 0x14, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("External Mic Switch", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("Docking Mic Volume", 0x15, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Docking Mic Switch", 0x15, 0x00, HDA_INPUT),
 	{}
@@ -1806,8 +1816,8 @@
 static struct snd_kcontrol_new cxt5051_hp_mixers[] = {
 	HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("External Mic Volume", 0x15, 0x00, HDA_INPUT),
-	HDA_CODEC_MUTE("External Mic Switch", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Volume", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Switch", 0x15, 0x00, HDA_INPUT),
 	{}
 };
 
@@ -1826,8 +1836,8 @@
 static struct snd_kcontrol_new cxt5051_toshiba_mixers[] = {
 	HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("External Mic Volume", 0x14, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("External Mic Switch", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT),
 	{}
 };
 
@@ -1847,7 +1857,7 @@
 	{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */	
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
@@ -1874,7 +1884,7 @@
 	{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x14, AC_VERB_SET_CONNECT_SEL, 0x1},
 	/* SPDIF route: PCM */
@@ -1904,7 +1914,7 @@
 	{0x19, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
@@ -1932,7 +1942,7 @@
 	{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x14, AC_VERB_SET_CONNECT_SEL, 0x1},
 	/* SPDIF route: PCM */
@@ -1995,7 +2005,7 @@
 	CXT5051_MODELS
 };
 
-static const char *cxt5051_models[CXT5051_MODELS] = {
+static const char *const cxt5051_models[CXT5051_MODELS] = {
 	[CXT5051_LAPTOP]	= "laptop",
 	[CXT5051_HP]		= "hp",
 	[CXT5051_HP_DV6736]	= "hp-dv6736",
@@ -2100,7 +2110,7 @@
 static hda_nid_t cxt5066_dac_nids[1] = { 0x10 };
 static hda_nid_t cxt5066_adc_nids[3] = { 0x14, 0x15, 0x16 };
 static hda_nid_t cxt5066_capsrc_nids[1] = { 0x17 };
-#define CXT5066_SPDIF_OUT	0x21
+static hda_nid_t cxt5066_digout_pin_nids[2] = { 0x20, 0x22 };
 
 /* OLPC's microphone port is DC coupled for use with external sensors,
  * therefore we use a 50% mic bias in order to center the input signal with
@@ -2111,6 +2121,11 @@
 	{ 2, NULL },
 };
 
+#define HP_PRESENT_PORT_A	(1 << 0)
+#define HP_PRESENT_PORT_D	(1 << 1)
+#define hp_port_a_present(spec)	((spec)->hp_present & HP_PRESENT_PORT_A)
+#define hp_port_d_present(spec)	((spec)->hp_present & HP_PRESENT_PORT_D)
+
 static void cxt5066_update_speaker(struct hda_codec *codec)
 {
 	struct conexant_spec *spec = codec->spec;
@@ -2120,24 +2135,20 @@
 		    spec->hp_present, spec->cur_eapd);
 
 	/* Port A (HP) */
-	pinctl = ((spec->hp_present & 1) && spec->cur_eapd) ? PIN_HP : 0;
+	pinctl = (hp_port_a_present(spec) && spec->cur_eapd) ? PIN_HP : 0;
 	snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
 			pinctl);
 
 	/* Port D (HP/LO) */
-	if (spec->dell_automute) {
-		/* DELL AIO Port Rule: PortA>  PortD>  IntSpk */
-		pinctl = (!(spec->hp_present & 1) && spec->cur_eapd)
-			? PIN_OUT : 0;
-	} else if (spec->thinkpad) {
-		if (spec->cur_eapd)
-			pinctl = spec->port_d_mode;
-		/* Mute dock line-out if Port A (laptop HP) is present */
-		if (spec->hp_present&  1)
+	pinctl = spec->cur_eapd ? spec->port_d_mode : 0;
+	if (spec->dell_automute || spec->thinkpad) {
+		/* Mute if Port A is connected */
+		if (hp_port_a_present(spec))
 			pinctl = 0;
 	} else {
-		pinctl = ((spec->hp_present & 2) && spec->cur_eapd)
-			? spec->port_d_mode : 0;
+		/* Thinkpad/Dell doesn't give pin-D status */
+		if (!hp_port_d_present(spec))
+			pinctl = 0;
 	}
 	snd_hda_codec_write(codec, 0x1c, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
 			pinctl);
@@ -2311,6 +2322,19 @@
 	}
 }
 
+
+/* toggle input of built-in digital mic and mic jack appropriately */
+static void cxt5066_asus_automic(struct hda_codec *codec)
+{
+	unsigned int present;
+
+	present = snd_hda_jack_detect(codec, 0x1b);
+	snd_printdd("CXT5066: external microphone present=%d\n", present);
+	snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+			    present ? 1 : 0);
+}
+
+
 /* toggle input of built-in digital mic and mic jack appropriately */
 static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
 {
@@ -2379,13 +2403,30 @@
 	/* Port D */
 	portD = snd_hda_jack_detect(codec, 0x1c);
 
-	spec->hp_present = !!(portA);
-	spec->hp_present |= portD ? 2 : 0;
+	spec->hp_present = portA ? HP_PRESENT_PORT_A : 0;
+	spec->hp_present |= portD ? HP_PRESENT_PORT_D : 0;
 	snd_printdd("CXT5066: hp automute portA=%x portD=%x present=%d\n",
 		portA, portD, spec->hp_present);
 	cxt5066_update_speaker(codec);
 }
 
+/* Dispatch the right mic autoswitch function */
+static void cxt5066_automic(struct hda_codec *codec)
+{
+	struct conexant_spec *spec = codec->spec;
+
+	if (spec->dell_vostro)
+		cxt5066_vostro_automic(codec);
+	else if (spec->ideapad)
+		cxt5066_ideapad_automic(codec);
+	else if (spec->thinkpad)
+		cxt5066_thinkpad_automic(codec);
+	else if (spec->hp_laptop)
+		cxt5066_hp_laptop_automic(codec);
+	else if (spec->asus)
+		cxt5066_asus_automic(codec);
+}
+
 /* unsolicited event for jack sensing */
 static void cxt5066_olpc_unsol_event(struct hda_codec *codec, unsigned int res)
 {
@@ -2404,60 +2445,19 @@
 }
 
 /* unsolicited event for jack sensing */
-static void cxt5066_vostro_event(struct hda_codec *codec, unsigned int res)
+static void cxt5066_unsol_event(struct hda_codec *codec, unsigned int res)
 {
-	snd_printdd("CXT5066_vostro: unsol event %x (%x)\n", res, res >> 26);
+	snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
 	switch (res >> 26) {
 	case CONEXANT_HP_EVENT:
 		cxt5066_hp_automute(codec);
 		break;
 	case CONEXANT_MIC_EVENT:
-		cxt5066_vostro_automic(codec);
+		cxt5066_automic(codec);
 		break;
 	}
 }
 
-/* unsolicited event for jack sensing */
-static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
-{
-	snd_printdd("CXT5066_ideapad: unsol event %x (%x)\n", res, res >> 26);
-	switch (res >> 26) {
-	case CONEXANT_HP_EVENT:
-		cxt5066_hp_automute(codec);
-		break;
-	case CONEXANT_MIC_EVENT:
-		cxt5066_ideapad_automic(codec);
-		break;
-	}
-}
-
-/* unsolicited event for jack sensing */
-static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
-{
-	snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
-	switch (res >> 26) {
-	case CONEXANT_HP_EVENT:
-		cxt5066_hp_automute(codec);
-		break;
-	case CONEXANT_MIC_EVENT:
-		cxt5066_hp_laptop_automic(codec);
-		break;
-	}
-}
-
-/* unsolicited event for jack sensing */
-static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
-{
-	snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26);
-	switch (res >> 26) {
-	case CONEXANT_HP_EVENT:
-		cxt5066_hp_automute(codec);
-		break;
-	case CONEXANT_MIC_EVENT:
-		cxt5066_thinkpad_automic(codec);
-		break;
-	}
-}
 
 static const struct hda_input_mux cxt5066_analog_mic_boost = {
 	.num_items = 5,
@@ -2632,6 +2632,27 @@
 	spec->recording = 0;
 }
 
+static void conexant_check_dig_outs(struct hda_codec *codec,
+				    hda_nid_t *dig_pins,
+				    int num_pins)
+{
+	struct conexant_spec *spec = codec->spec;
+	hda_nid_t *nid_loc = &spec->multiout.dig_out_nid;
+	int i;
+
+	for (i = 0; i < num_pins; i++, dig_pins++) {
+		unsigned int cfg = snd_hda_codec_get_pincfg(codec, *dig_pins);
+		if (get_defcfg_connect(cfg) == AC_JACK_PORT_NONE)
+			continue;
+		if (snd_hda_get_connections(codec, *dig_pins, nid_loc, 1) != 1)
+			continue;
+		if (spec->slave_dig_outs[0])
+			nid_loc++;
+		else
+			nid_loc = spec->slave_dig_outs;
+	}
+}
+
 static struct hda_input_mux cxt5066_capture_source = {
 	.num_items = 4,
 	.items = {
@@ -2728,7 +2749,7 @@
 static struct snd_kcontrol_new cxt5066_vostro_mixers[] = {
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name = "Int Mic Boost Capture Enum",
+		.name = "Internal Mic Boost Capture Enum",
 		.info = cxt5066_mic_boost_mux_enum_info,
 		.get = cxt5066_mic_boost_mux_enum_get,
 		.put = cxt5066_mic_boost_mux_enum_put,
@@ -2954,7 +2975,7 @@
 	{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 
 	/* internal microphone */
-	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable int mic */
+	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */
 
 	/* EAPD */
 	{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
@@ -3009,7 +3030,7 @@
 	{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 
 	/* internal microphone */
-	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable int mic */
+	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */
 
 	/* EAPD */
 	{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
@@ -3038,20 +3059,11 @@
 /* initialize jack-sensing, too */
 static int cxt5066_init(struct hda_codec *codec)
 {
-	struct conexant_spec *spec = codec->spec;
-
 	snd_printdd("CXT5066: init\n");
 	conexant_init(codec);
 	if (codec->patch_ops.unsol_event) {
 		cxt5066_hp_automute(codec);
-		if (spec->dell_vostro)
-			cxt5066_vostro_automic(codec);
-		else if (spec->ideapad)
-			cxt5066_ideapad_automic(codec);
-		else if (spec->thinkpad)
-			cxt5066_thinkpad_automic(codec);
-		else if (spec->hp_laptop)
-			cxt5066_hp_laptop_automic(codec);
+		cxt5066_automic(codec);
 	}
 	cxt5066_set_mic_boost(codec);
 	return 0;
@@ -3079,17 +3091,19 @@
 	CXT5066_DELL_VOSTRO,	/* Dell Vostro 1015i */
 	CXT5066_IDEAPAD,	/* Lenovo IdeaPad U150 */
 	CXT5066_THINKPAD,	/* Lenovo ThinkPad T410s, others? */
+	CXT5066_ASUS,		/* Asus K52JU, Lenovo G560 - Int mic at 0x1a and Ext mic at 0x1b */
 	CXT5066_HP_LAPTOP,      /* HP Laptop */
 	CXT5066_MODELS
 };
 
-static const char *cxt5066_models[CXT5066_MODELS] = {
+static const char * const cxt5066_models[CXT5066_MODELS] = {
 	[CXT5066_LAPTOP]	= "laptop",
 	[CXT5066_DELL_LAPTOP]	= "dell-laptop",
 	[CXT5066_OLPC_XO_1_5]	= "olpc-xo-1_5",
 	[CXT5066_DELL_VOSTRO]	= "dell-vostro",
 	[CXT5066_IDEAPAD]	= "ideapad",
 	[CXT5066_THINKPAD]	= "thinkpad",
+	[CXT5066_ASUS]		= "asus",
 	[CXT5066_HP_LAPTOP]	= "hp-laptop",
 };
 
@@ -3097,10 +3111,15 @@
 	SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
-	SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP),
+	SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
+	SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
+	SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS),
 	SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
 	SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
@@ -3108,16 +3127,11 @@
 		      CXT5066_LAPTOP),
 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
  	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
- 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
+	SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
 	{}
 };
 
@@ -3138,7 +3152,8 @@
 	spec->multiout.max_channels = 2;
 	spec->multiout.num_dacs = ARRAY_SIZE(cxt5066_dac_nids);
 	spec->multiout.dac_nids = cxt5066_dac_nids;
-	spec->multiout.dig_out_nid = CXT5066_SPDIF_OUT;
+	conexant_check_dig_outs(codec, cxt5066_digout_pin_nids,
+	    ARRAY_SIZE(cxt5066_digout_pin_nids));
 	spec->num_adc_nids = 1;
 	spec->adc_nids = cxt5066_adc_nids;
 	spec->capsrc_nids = cxt5066_capsrc_nids;
@@ -3172,17 +3187,20 @@
 		spec->num_init_verbs++;
 		spec->dell_automute = 1;
 		break;
+	case CXT5066_ASUS:
 	case CXT5066_HP_LAPTOP:
 		codec->patch_ops.init = cxt5066_init;
-		codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
+		codec->patch_ops.unsol_event = cxt5066_unsol_event;
 		spec->init_verbs[spec->num_init_verbs] =
 			cxt5066_init_verbs_hp_laptop;
 		spec->num_init_verbs++;
-		spec->hp_laptop = 1;
+		spec->hp_laptop = board_config == CXT5066_HP_LAPTOP;
+		spec->asus = board_config == CXT5066_ASUS;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixers;
 		/* no S/PDIF out */
-		spec->multiout.dig_out_nid = 0;
+		if (board_config == CXT5066_HP_LAPTOP)
+			spec->multiout.dig_out_nid = 0;
 		/* input source automatically selected */
 		spec->input_mux = NULL;
 		spec->port_d_mode = 0;
@@ -3212,7 +3230,7 @@
 		break;
 	case CXT5066_DELL_VOSTRO:
 		codec->patch_ops.init = cxt5066_init;
-		codec->patch_ops.unsol_event = cxt5066_vostro_event;
+		codec->patch_ops.unsol_event = cxt5066_unsol_event;
 		spec->init_verbs[0] = cxt5066_init_verbs_vostro;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixers;
@@ -3229,7 +3247,7 @@
 		break;
 	case CXT5066_IDEAPAD:
 		codec->patch_ops.init = cxt5066_init;
-		codec->patch_ops.unsol_event = cxt5066_ideapad_event;
+		codec->patch_ops.unsol_event = cxt5066_unsol_event;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixers;
 		spec->init_verbs[0] = cxt5066_init_verbs_ideapad;
@@ -3245,7 +3263,7 @@
 		break;
 	case CXT5066_THINKPAD:
 		codec->patch_ops.init = cxt5066_init;
-		codec->patch_ops.unsol_event = cxt5066_thinkpad_event;
+		codec->patch_ops.unsol_event = cxt5066_unsol_event;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixers;
 		spec->init_verbs[0] = cxt5066_init_verbs_thinkpad;
@@ -3394,7 +3412,7 @@
 		}
 	}
 	spec->multiout.dac_nids = spec->private_dac_nids;
-	spec->multiout.max_channels = nums * 2;
+	spec->multiout.max_channels = spec->multiout.num_dacs * 2;
 
 	if (cfg->hp_outs > 0)
 		spec->auto_mute = 1;
@@ -3422,6 +3440,9 @@
 				    AC_VERB_SET_PIN_WIDGET_CONTROL,
 				    present ? 0 : PIN_OUT);
 	}
+	for (i = 0; !present && i < cfg->line_outs; i++)
+		if (snd_hda_jack_detect(codec, cfg->line_out_pins[i]))
+			present = 1;
 	for (i = 0; i < cfg->speaker_outs; i++) {
 		snd_hda_codec_write(codec, cfg->speaker_pins[i], 0,
 				    AC_VERB_SET_PIN_WIDGET_CONTROL,
@@ -3710,9 +3731,9 @@
 	return 0;
 }
 
-static int cx_auto_add_volume(struct hda_codec *codec, const char *basename,
+static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
 			      const char *dir, int cidx,
-			      hda_nid_t nid, int hda_dir)
+			      hda_nid_t nid, int hda_dir, int amp_idx)
 {
 	static char name[32];
 	static struct snd_kcontrol_new knew[] = {
@@ -3724,7 +3745,8 @@
 
 	for (i = 0; i < 2; i++) {
 		struct snd_kcontrol *kctl;
-		knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, 0, hda_dir);
+		knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx,
+							    hda_dir);
 		knew[i].subdevice = HDA_SUBDEV_AMP_FLAG;
 		knew[i].index = cidx;
 		snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]);
@@ -3740,6 +3762,9 @@
 	return 0;
 }
 
+#define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir)		\
+	cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0)
+
 #define cx_auto_add_pb_volume(codec, nid, str, idx)			\
 	cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT)
 
@@ -3748,7 +3773,7 @@
 	struct conexant_spec *spec = codec->spec;
 	int i, err;
 	int num_line = 0, num_hp = 0, num_spk = 0;
-	static const char *texts[3] = { "Front", "Surround", "CLFE" };
+	static const char * const texts[3] = { "Front", "Surround", "CLFE" };
 
 	if (spec->dac_info_filled == 1)
 		return cx_auto_add_pb_volume(codec, spec->dac_info[0].dac,
@@ -3789,29 +3814,60 @@
 	struct conexant_spec *spec = codec->spec;
 	struct auto_pin_cfg *cfg = &spec->autocfg;
 	static const char *prev_label;
-	int i, err, cidx;
+	int i, err, cidx, conn_len;
+	hda_nid_t conn[HDA_MAX_CONNECTIONS];
 
-	err = cx_auto_add_volume(codec, "Capture", "", 0, spec->adc_nids[0],
-				 HDA_INPUT);
-	if (err < 0)
-		return err;
+	int multi_adc_volume = 0; /* If the ADC nid has several input volumes */
+	int adc_nid = spec->adc_nids[0];
+
+	conn_len = snd_hda_get_connections(codec, adc_nid, conn,
+					   HDA_MAX_CONNECTIONS);
+	if (conn_len < 0)
+		return conn_len;
+
+	multi_adc_volume = cfg->num_inputs > 1 && conn_len > 1;
+	if (!multi_adc_volume) {
+		err = cx_auto_add_volume(codec, "Capture", "", 0, adc_nid,
+					 HDA_INPUT);
+		if (err < 0)
+			return err;
+	}
+
 	prev_label = NULL;
 	cidx = 0;
 	for (i = 0; i < cfg->num_inputs; i++) {
 		hda_nid_t nid = cfg->inputs[i].pin;
 		const char *label;
-		if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP))
+		int j;
+		int pin_amp = get_wcaps(codec, nid) & AC_WCAP_IN_AMP;
+		if (!pin_amp && !multi_adc_volume)
 			continue;
+
 		label = hda_get_autocfg_input_label(codec, cfg, i);
 		if (label == prev_label)
 			cidx++;
 		else
 			cidx = 0;
 		prev_label = label;
-		err = cx_auto_add_volume(codec, label, " Capture", cidx,
-					 nid, HDA_INPUT);
-		if (err < 0)
-			return err;
+
+		if (pin_amp) {
+			err = cx_auto_add_volume(codec, label, " Boost", cidx,
+						 nid, HDA_INPUT);
+			if (err < 0)
+				return err;
+		}
+
+		if (!multi_adc_volume)
+			continue;
+		for (j = 0; j < conn_len; j++) {
+			if (conn[j] == nid) {
+				err = cx_auto_add_volume_idx(codec, label,
+				    " Capture", cidx, adc_nid, HDA_INPUT, j);
+				if (err < 0)
+					return err;
+				break;
+			}
+		}
 	}
 	return 0;
 }
@@ -3883,6 +3939,8 @@
 	  .patch = patch_cxt5066 },
 	{ .id = 0x14f15069, .name = "CX20585",
 	  .patch = patch_cxt5066 },
+	{ .id = 0x14f1506e, .name = "CX20590",
+	  .patch = patch_cxt5066 },
 	{ .id = 0x14f15097, .name = "CX20631",
 	  .patch = patch_conexant_auto },
 	{ .id = 0x14f15098, .name = "CX20632",
@@ -3909,6 +3967,7 @@
 MODULE_ALIAS("snd-hda-codec-id:14f15067");
 MODULE_ALIAS("snd-hda-codec-id:14f15068");
 MODULE_ALIAS("snd-hda-codec-id:14f15069");
+MODULE_ALIAS("snd-hda-codec-id:14f1506e");
 MODULE_ALIAS("snd-hda-codec-id:14f15097");
 MODULE_ALIAS("snd-hda-codec-id:14f15098");
 MODULE_ALIAS("snd-hda-codec-id:14f150a1");
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 31df774..ec0fa2d 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -31,10 +31,15 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/moduleparam.h>
 #include <sound/core.h>
 #include "hda_codec.h"
 #include "hda_local.h"
 
+static bool static_hdmi_pcm;
+module_param(static_hdmi_pcm, bool, 0644);
+MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+
 /*
  * The HDMI/DisplayPort configuration can be highly dynamic. A graphics device
  * could support two independent pipes, each of them can be connected to one or
@@ -637,6 +642,7 @@
 			hdmi_ai->ver		= 0x01;
 			hdmi_ai->len		= 0x0a;
 			hdmi_ai->CC02_CT47	= channels - 1;
+			hdmi_ai->CA		= ca;
 			hdmi_checksum_audio_infoframe(hdmi_ai);
 		} else if (spec->sink_eld[i].conn_type == 1) { /* DisplayPort */
 			struct dp_audio_infoframe *dp_ai;
@@ -646,6 +652,7 @@
 			dp_ai->len		= 0x1b;
 			dp_ai->ver		= 0x11 << 2;
 			dp_ai->CC02_CT47	= channels - 1;
+			dp_ai->CA		= ca;
 		} else {
 			snd_printd("HDMI: unknown connection type at pin %d\n",
 				   pin_nid);
@@ -812,6 +819,7 @@
 	struct hdmi_spec *spec = codec->spec;
 	struct hdmi_eld *eld;
 	struct hda_pcm_stream *codec_pars;
+	struct snd_pcm_runtime *runtime = substream->runtime;
 	unsigned int idx;
 
 	for (idx = 0; idx < spec->num_cvts; idx++)
@@ -827,7 +835,7 @@
 		*codec_pars = *hinfo;
 
 	eld = &spec->sink_eld[idx];
-	if (eld->sad_count > 0) {
+	if (!static_hdmi_pcm && eld->eld_valid && eld->sad_count > 0) {
 		hdmi_eld_update_pcm_info(eld, hinfo, codec_pars);
 		if (hinfo->channels_min > hinfo->channels_max ||
 		    !hinfo->rates || !hinfo->formats)
@@ -839,6 +847,14 @@
 		hinfo->formats = codec_pars->formats;
 		hinfo->maxbps = codec_pars->maxbps;
 	}
+	/* store the updated parameters */
+	runtime->hw.channels_min = hinfo->channels_min;
+	runtime->hw.channels_max = hinfo->channels_max;
+	runtime->hw.formats = hinfo->formats;
+	runtime->hw.rates = hinfo->rates;
+
+	snd_pcm_hw_constraint_step(substream->runtime, 0,
+				   SNDRV_PCM_HW_PARAM_CHANNELS, 2);
 	return 0;
 }
 
@@ -904,23 +920,28 @@
 	spec->pin[spec->num_pins] = pin_nid;
 	spec->num_pins++;
 
-	/*
-	 * It is assumed that converter nodes come first in the node list and
-	 * hence have been registered and usable now.
-	 */
 	return hdmi_read_pin_conn(codec, pin_nid);
 }
 
 static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t nid)
 {
+	int i, found_pin = 0;
 	struct hdmi_spec *spec = codec->spec;
 
-	if (spec->num_cvts >= MAX_HDMI_CVTS) {
-		snd_printk(KERN_WARNING
-			   "HDMI: no space for converter %d\n", nid);
-		return -E2BIG;
+	for (i = 0; i < spec->num_pins; i++)
+		if (nid == spec->pin_cvt[i]) {
+			found_pin = 1;
+			break;
+		}
+
+	if (!found_pin) {
+		snd_printdd("HDMI: Skipping node %d (no connection)\n", nid);
+		return -EINVAL;
 	}
 
+	if (snd_BUG_ON(spec->num_cvts >= MAX_HDMI_CVTS))
+		return -E2BIG;
+
 	spec->cvt[spec->num_cvts] = nid;
 	spec->num_cvts++;
 
@@ -931,6 +952,8 @@
 {
 	hda_nid_t nid;
 	int i, nodes;
+	int num_tmp_cvts = 0;
+	hda_nid_t tmp_cvt[MAX_HDMI_CVTS];
 
 	nodes = snd_hda_get_sub_nodes(codec, codec->afg, &nid);
 	if (!nid || nodes < 0) {
@@ -941,6 +964,7 @@
 	for (i = 0; i < nodes; i++, nid++) {
 		unsigned int caps;
 		unsigned int type;
+		unsigned int config;
 
 		caps = snd_hda_param_read(codec, nid, AC_PAR_AUDIO_WIDGET_CAP);
 		type = get_wcaps_type(caps);
@@ -950,17 +974,32 @@
 
 		switch (type) {
 		case AC_WID_AUD_OUT:
-			hdmi_add_cvt(codec, nid);
+			if (num_tmp_cvts >= MAX_HDMI_CVTS) {
+				snd_printk(KERN_WARNING
+					   "HDMI: no space for converter %d\n", nid);
+				continue;
+			}
+			tmp_cvt[num_tmp_cvts] = nid;
+			num_tmp_cvts++;
 			break;
 		case AC_WID_PIN:
 			caps = snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP);
 			if (!(caps & (AC_PINCAP_HDMI | AC_PINCAP_DP)))
 				continue;
+
+			config = snd_hda_codec_read(codec, nid, 0,
+					     AC_VERB_GET_CONFIG_DEFAULT, 0);
+			if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
+				continue;
+
 			hdmi_add_pin(codec, nid);
 			break;
 		}
 	}
 
+	for (i = 0; i < num_tmp_cvts; i++)
+		hdmi_add_cvt(codec, tmp_cvt[i]);
+
 	/*
 	 * G45/IbexPeak don't support EPSS: the unsolicited pin hot plug event
 	 * can be lost and presence sense verb will become inaccurate if the
@@ -1165,11 +1204,56 @@
 	return 0;
 }
 
+static unsigned int channels_2_6_8[] = {
+	2, 6, 8
+};
+
+static unsigned int channels_2_8[] = {
+	2, 8
+};
+
+static struct snd_pcm_hw_constraint_list hw_constraints_2_6_8_channels = {
+	.count = ARRAY_SIZE(channels_2_6_8),
+	.list = channels_2_6_8,
+	.mask = 0,
+};
+
+static struct snd_pcm_hw_constraint_list hw_constraints_2_8_channels = {
+	.count = ARRAY_SIZE(channels_2_8),
+	.list = channels_2_8,
+	.mask = 0,
+};
+
 static int simple_playback_pcm_open(struct hda_pcm_stream *hinfo,
 				    struct hda_codec *codec,
 				    struct snd_pcm_substream *substream)
 {
 	struct hdmi_spec *spec = codec->spec;
+	struct snd_pcm_hw_constraint_list *hw_constraints_channels = NULL;
+
+	switch (codec->preset->id) {
+	case 0x10de0002:
+	case 0x10de0003:
+	case 0x10de0005:
+	case 0x10de0006:
+		hw_constraints_channels = &hw_constraints_2_8_channels;
+		break;
+	case 0x10de0007:
+		hw_constraints_channels = &hw_constraints_2_6_8_channels;
+		break;
+	default:
+		break;
+	}
+
+	if (hw_constraints_channels != NULL) {
+		snd_pcm_hw_constraint_list(substream->runtime, 0,
+				SNDRV_PCM_HW_PARAM_CHANNELS,
+				hw_constraints_channels);
+	} else {
+		snd_pcm_hw_constraint_step(substream->runtime, 0,
+					   SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+	}
+
 	return snd_hda_multi_out_dig_open(codec, &spec->multiout);
 }
 
@@ -1532,7 +1616,7 @@
 { .id = 0x1002793c, .name = "RS600 HDMI",	.patch = patch_atihdmi },
 { .id = 0x10027919, .name = "RS600 HDMI",	.patch = patch_atihdmi },
 { .id = 0x1002791a, .name = "RS690/780 HDMI",	.patch = patch_atihdmi },
-{ .id = 0x1002aa01, .name = "R6xx HDMI",	.patch = patch_atihdmi },
+{ .id = 0x1002aa01, .name = "R6xx HDMI",	.patch = patch_generic_hdmi },
 { .id = 0x10951390, .name = "SiI1390 HDMI",	.patch = patch_generic_hdmi },
 { .id = 0x10951392, .name = "SiI1392 HDMI",	.patch = patch_generic_hdmi },
 { .id = 0x17e80047, .name = "Chrontel HDMI",	.patch = patch_generic_hdmi },
@@ -1550,6 +1634,9 @@
 { .id = 0x10de0012, .name = "GPU 12 HDMI/DP",	.patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de0013, .name = "GPU 13 HDMI/DP",	.patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de0014, .name = "GPU 14 HDMI/DP",	.patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP",	.patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP",	.patch = patch_nvhdmi_8ch_89 },
+/* 17 is known to be absent */
 { .id = 0x10de0018, .name = "GPU 18 HDMI/DP",	.patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de0019, .name = "GPU 19 HDMI/DP",	.patch = patch_nvhdmi_8ch_89 },
 { .id = 0x10de001a, .name = "GPU 1a HDMI/DP",	.patch = patch_nvhdmi_8ch_89 },
@@ -1592,6 +1679,8 @@
 MODULE_ALIAS("snd-hda-codec-id:10de0012");
 MODULE_ALIAS("snd-hda-codec-id:10de0013");
 MODULE_ALIAS("snd-hda-codec-id:10de0014");
+MODULE_ALIAS("snd-hda-codec-id:10de0015");
+MODULE_ALIAS("snd-hda-codec-id:10de0016");
 MODULE_ALIAS("snd-hda-codec-id:10de0018");
 MODULE_ALIAS("snd-hda-codec-id:10de0019");
 MODULE_ALIAS("snd-hda-codec-id:10de001a");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 552a09e..4261bb8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -231,7 +231,6 @@
 	ALC888_ACER_ASPIRE_8930G,
 	ALC888_ACER_ASPIRE_7730G,
 	ALC883_MEDION,
-	ALC883_MEDION_MD2,
 	ALC883_MEDION_WIM2160,
 	ALC883_LAPTOP_EAPD,
 	ALC883_LENOVO_101E_2ch,
@@ -304,6 +303,8 @@
 	unsigned int  fixup:1; /* Means that this sku is set by driver, not read from hw */
 };
 
+struct alc_fixup;
+
 struct alc_spec {
 	/* codec parameterization */
 	struct snd_kcontrol_new *mixers[5];	/* mixer arrays */
@@ -405,6 +406,11 @@
 	/* for PLL fix */
 	hda_nid_t pll_nid;
 	unsigned int pll_coef_idx, pll_coef_bit;
+
+	/* fix-up list */
+	int fixup_id;
+	const struct alc_fixup *fixup_list;
+	const char *fixup_name;
 };
 
 /*
@@ -1127,11 +1133,8 @@
 		nid = spec->autocfg.hp_pins[i];
 		if (!nid)
 			break;
-		if (snd_hda_jack_detect(codec, nid)) {
-			spec->jack_present = 1;
-			break;
-		}
-		alc_report_jack(codec, spec->autocfg.hp_pins[i]);
+		alc_report_jack(codec, nid);
+		spec->jack_present |= snd_hda_jack_detect(codec, nid);
 	}
 
 	mute = spec->jack_present ? HDA_AMP_MUTE : 0;
@@ -1678,48 +1681,139 @@
 	u32 val;
 };
 
-struct alc_fixup {
-	unsigned int sku;
-	const struct alc_pincfg *pins;
-	const struct hda_verb *verbs;
+struct alc_model_fixup {
+	const int id;
+	const char *name;
 };
 
-static void alc_pick_fixup(struct hda_codec *codec,
-			   const struct snd_pci_quirk *quirk,
-			   const struct alc_fixup *fix,
-			   int pre_init)
-{
-	const struct alc_pincfg *cfg;
-	struct alc_spec *spec;
+struct alc_fixup {
+	int type;
+	bool chained;
+	int chain_id;
+	union {
+		unsigned int sku;
+		const struct alc_pincfg *pins;
+		const struct hda_verb *verbs;
+		void (*func)(struct hda_codec *codec,
+			     const struct alc_fixup *fix,
+			     int action);
+	} v;
+};
 
-	quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
-	if (!quirk)
+enum {
+	ALC_FIXUP_INVALID,
+	ALC_FIXUP_SKU,
+	ALC_FIXUP_PINS,
+	ALC_FIXUP_VERBS,
+	ALC_FIXUP_FUNC,
+};
+
+enum {
+	ALC_FIXUP_ACT_PRE_PROBE,
+	ALC_FIXUP_ACT_PROBE,
+	ALC_FIXUP_ACT_INIT,
+};
+
+static void alc_apply_fixup(struct hda_codec *codec, int action)
+{
+	struct alc_spec *spec = codec->spec;
+	int id = spec->fixup_id;
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+	const char *modelname = spec->fixup_name;
+#endif
+	int depth = 0;
+
+	if (!spec->fixup_list)
 		return;
-	fix += quirk->value;
-	cfg = fix->pins;
-	if (pre_init && fix->sku) {
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-		snd_printdd(KERN_INFO "hda_codec: %s: Apply sku override for %s\n",
-			    codec->chip_name, quirk->name);
-#endif
-		spec = codec->spec;
-		spec->cdefine.sku_cfg = fix->sku;
-		spec->cdefine.fixup = 1;
+
+	while (id >= 0) {
+		const struct alc_fixup *fix = spec->fixup_list + id;
+		const struct alc_pincfg *cfg;
+
+		switch (fix->type) {
+		case ALC_FIXUP_SKU:
+			if (action != ALC_FIXUP_ACT_PRE_PROBE || !fix->v.sku)
+				break;;
+			snd_printdd(KERN_INFO "hda_codec: %s: "
+				    "Apply sku override for %s\n",
+				    codec->chip_name, modelname);
+			spec->cdefine.sku_cfg = fix->v.sku;
+			spec->cdefine.fixup = 1;
+			break;
+		case ALC_FIXUP_PINS:
+			cfg = fix->v.pins;
+			if (action != ALC_FIXUP_ACT_PRE_PROBE || !cfg)
+				break;
+			snd_printdd(KERN_INFO "hda_codec: %s: "
+				    "Apply pincfg for %s\n",
+				    codec->chip_name, modelname);
+			for (; cfg->nid; cfg++)
+				snd_hda_codec_set_pincfg(codec, cfg->nid,
+							 cfg->val);
+			break;
+		case ALC_FIXUP_VERBS:
+			if (action != ALC_FIXUP_ACT_PROBE || !fix->v.verbs)
+				break;
+			snd_printdd(KERN_INFO "hda_codec: %s: "
+				    "Apply fix-verbs for %s\n",
+				    codec->chip_name, modelname);
+			add_verb(codec->spec, fix->v.verbs);
+			break;
+		case ALC_FIXUP_FUNC:
+			if (!fix->v.func)
+				break;
+			snd_printdd(KERN_INFO "hda_codec: %s: "
+				    "Apply fix-func for %s\n",
+				    codec->chip_name, modelname);
+			fix->v.func(codec, fix, action);
+			break;
+		default:
+			snd_printk(KERN_ERR "hda_codec: %s: "
+				   "Invalid fixup type %d\n",
+				   codec->chip_name, fix->type);
+			break;
+		}
+		if (!fix[id].chained)
+			break;
+		if (++depth > 10)
+			break;
+		id = fix[id].chain_id;
 	}
-	if (pre_init && cfg) {
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-		snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n",
-			    codec->chip_name, quirk->name);
-#endif
-		for (; cfg->nid; cfg++)
-			snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
+}
+
+static void alc_pick_fixup(struct hda_codec *codec,
+			   const struct alc_model_fixup *models,
+			   const struct snd_pci_quirk *quirk,
+			   const struct alc_fixup *fixlist)
+{
+	struct alc_spec *spec = codec->spec;
+	int id = -1;
+	const char *name = NULL;
+
+	if (codec->modelname && models) {
+		while (models->name) {
+			if (!strcmp(codec->modelname, models->name)) {
+				id = models->id;
+				name = models->name;
+				break;
+			}
+			models++;
+		}
 	}
-	if (!pre_init && fix->verbs) {
+	if (id < 0) {
+		quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
+		if (quirk) {
+			id = quirk->value;
 #ifdef CONFIG_SND_DEBUG_VERBOSE
-		snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n",
-			    codec->chip_name, quirk->name);
+			name = quirk->name;
 #endif
-		add_verb(codec->spec, fix->verbs);
+		}
+	}
+
+	spec->fixup_id = id;
+	if (id >= 0) {
+		spec->fixup_list = fixlist;
+		spec->fixup_name = name;
 	}
 }
 
@@ -1981,6 +2075,7 @@
 	{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
 	{0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
+	{0x15, AC_VERB_SET_EAPD_BTLENABLE, 2},
 	{ }
 };
 
@@ -2120,17 +2215,17 @@
 	{
 		.num_items = 5,
 		.items = {
-			{ "Ext Mic", 0x0 },
+			{ "Mic", 0x0 },
 			{ "Line In", 0x2 },
 			{ "CD", 0x4 },
 			{ "Input Mix", 0xa },
-			{ "Int Mic", 0xb },
+			{ "Internal Mic", 0xb },
 		},
 	},
 	{
 		.num_items = 4,
 		.items = {
-			{ "Ext Mic", 0x0 },
+			{ "Mic", 0x0 },
 			{ "Line In", 0x2 },
 			{ "CD", 0x4 },
 			{ "Input Mix", 0xa },
@@ -2187,11 +2282,34 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
 
+static struct snd_kcontrol_new alc888_acer_aspire_4930g_mixer[] = {
+	HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
+	HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
+	HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0f, 2, 0x0,
+		HDA_OUTPUT),
+	HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0f, 2, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0f, 1, 0x0, HDA_OUTPUT),
+	HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0f, 1, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Side Playback Volume", 0x0e, 0x0, HDA_OUTPUT),
+	HDA_BIND_MUTE("Side Playback Switch", 0x0e, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
+	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
+	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	{ } /* end */
+};
+
+
 static struct snd_kcontrol_new alc889_acer_aspire_8930g_mixer[] = {
 	HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -2205,7 +2323,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -2796,10 +2914,10 @@
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -2820,7 +2938,7 @@
 /*
  * slave controls for virtual master
  */
-static const char *alc_slave_vols[] = {
+static const char * const alc_slave_vols[] = {
 	"Front Playback Volume",
 	"Surround Playback Volume",
 	"Center Playback Volume",
@@ -2834,7 +2952,7 @@
 	NULL,
 };
 
-static const char *alc_slave_sws[] = {
+static const char * const alc_slave_sws[] = {
 	"Front Playback Switch",
 	"Surround Playback Switch",
 	"Center Playback Switch",
@@ -3307,7 +3425,7 @@
 };
 
 /* auto-toggle front mic */
-static void alc880_uniwill_mic_automute(struct hda_codec *codec)
+static void alc88x_simple_mic_automute(struct hda_codec *codec)
 {
  	unsigned int present;
 	unsigned char bits;
@@ -3329,7 +3447,7 @@
 static void alc880_uniwill_init_hook(struct hda_codec *codec)
 {
 	alc_automute_amp(codec);
-	alc880_uniwill_mic_automute(codec);
+	alc88x_simple_mic_automute(codec);
 }
 
 static void alc880_uniwill_unsol_event(struct hda_codec *codec,
@@ -3340,7 +3458,7 @@
 	 */
 	switch (res >> 28) {
 	case ALC880_MIC_EVENT:
-		alc880_uniwill_mic_automute(codec);
+		alc88x_simple_mic_automute(codec);
 		break;
 	default:
 		alc_automute_amp_unsol_event(codec, res);
@@ -3815,6 +3933,8 @@
 	if (spec->init_hook)
 		spec->init_hook(codec);
 
+	alc_apply_fixup(codec, ALC_FIXUP_ACT_INIT);
+
 	hda_call_check_power_status(codec, 0x01);
 	return 0;
 }
@@ -4513,7 +4633,7 @@
 /*
  */
 
-static const char *alc880_models[ALC880_MODEL_LAST] = {
+static const char * const alc880_models[ALC880_MODEL_LAST] = {
 	[ALC880_3ST]		= "3stack",
 	[ALC880_TCL_S700]	= "tcl",
 	[ALC880_3ST_DIG]	= "3stack-digout",
@@ -5023,13 +5143,33 @@
 	return 0;
 }
 
+static const char *alc_get_line_out_pfx(const struct auto_pin_cfg *cfg,
+					bool can_be_master)
+{
+	if (!cfg->hp_outs && !cfg->speaker_outs && can_be_master)
+		return "Master";
+
+	switch (cfg->line_out_type) {
+	case AUTO_PIN_SPEAKER_OUT:
+		return "Speaker";
+	case AUTO_PIN_HP_OUT:
+		return "Headphone";
+	default:
+		if (cfg->line_outs == 1)
+			return "PCM";
+		break;
+	}
+	return NULL;
+}
+
 /* add playback controls from the parsed DAC table */
 static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec,
 					     const struct auto_pin_cfg *cfg)
 {
-	static const char *chname[4] = {
+	static const char * const chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
+	const char *pfx = alc_get_line_out_pfx(cfg, false);
 	hda_nid_t nid;
 	int i, err;
 
@@ -5037,7 +5177,7 @@
 		if (!spec->multiout.dac_nids[i])
 			continue;
 		nid = alc880_idx_to_mixer(alc880_dac_to_idx(spec->multiout.dac_nids[i]));
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
 					      "Center",
@@ -5064,18 +5204,17 @@
 			if (err < 0)
 				return err;
 		} else {
-			const char *pfx;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-				pfx = "Speaker";
-			else
-				pfx = chname[i];
-			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx,
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
+						name, i,
 					  HDA_COMPOSE_AMP_VAL(nid, 3, 0,
 							      HDA_OUTPUT));
 			if (err < 0)
 				return err;
-			err = add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, pfx,
+			err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE,
+					       name, i,
 					  HDA_COMPOSE_AMP_VAL(nid, 3, 2,
 							      HDA_INPUT));
 			if (err < 0)
@@ -5155,7 +5294,8 @@
 {
 	struct alc_spec *spec = codec->spec;
 	struct hda_input_mux *imux = &spec->private_imux[0];
-	int i, err, idx, type, type_idx = 0;
+	int i, err, idx, type_idx = 0;
+	const char *prev_label = NULL;
 
 	for (i = 0; i < cfg->num_inputs; i++) {
 		hda_nid_t pin;
@@ -5165,12 +5305,13 @@
 		if (!alc_is_input_pin(codec, pin))
 			continue;
 
-		type = cfg->inputs[i].type;
-		if (i > 0 && type == cfg->inputs[i - 1].type)
+		label = hda_get_autocfg_input_label(codec, cfg, i);
+		if (prev_label && !strcmp(label, prev_label))
 			type_idx++;
 		else
 			type_idx = 0;
-		label = hda_get_autocfg_input_label(codec, cfg, i);
+		prev_label = label;
+
 		if (mixer) {
 			idx = get_connection_index(codec, mixer, pin);
 			if (idx >= 0) {
@@ -7023,7 +7164,8 @@
 
 static const struct alc_fixup alc260_fixups[] = {
 	[PINFIX_HP_DC5750] = {
-		.pins = (const struct alc_pincfg[]) {
+		.type = ALC_FIXUP_PINS,
+		.v.pins = (const struct alc_pincfg[]) {
 			{ 0x11, 0x90130110 }, /* speaker */
 			{ }
 		}
@@ -7038,7 +7180,7 @@
 /*
  * ALC260 configurations
  */
-static const char *alc260_models[ALC260_MODEL_LAST] = {
+static const char * const alc260_models[ALC260_MODEL_LAST] = {
 	[ALC260_BASIC]		= "basic",
 	[ALC260_HP]		= "hp",
 	[ALC260_HP_3013]	= "hp-3013",
@@ -7234,8 +7376,10 @@
 		board_config = ALC260_AUTO;
 	}
 
-	if (board_config == ALC260_AUTO)
-		alc_pick_fixup(codec, alc260_fixup_tbl, alc260_fixups, 1);
+	if (board_config == ALC260_AUTO) {
+		alc_pick_fixup(codec, NULL, alc260_fixup_tbl, alc260_fixups);
+		alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+	}
 
 	if (board_config == ALC260_AUTO) {
 		/* automatic parse from the BIOS config */
@@ -7283,8 +7427,7 @@
 	set_capture_mixer(codec);
 	set_beep_amp(spec, 0x07, 0x05, HDA_INPUT);
 
-	if (board_config == ALC260_AUTO)
-		alc_pick_fixup(codec, alc260_fixup_tbl, alc260_fixups, 0);
+	alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
 
 	spec->vmaster_nid = 0x08;
 
@@ -7406,7 +7549,7 @@
 	.num_items = 4,
 	.items = {
 		{ "Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Internal Mic", 0x1 },
 		{ "Line", 0x2 },
 		{ "CD", 0x4 },
 	},
@@ -7416,7 +7559,7 @@
 	.num_items = 2,
 	.items = {
 		{ "Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Internal Mic", 0x1 },
 	},
 };
 
@@ -7851,10 +7994,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -7878,8 +8021,8 @@
 	HDA_CODEC_MUTE  ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE  ("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x1a, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x00, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7896,8 +8039,8 @@
 	HDA_CODEC_MUTE  ("Line Playback Switch", 0x0b, 0x07, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE  ("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0x00, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7912,7 +8055,7 @@
 	HDA_BIND_MUTE   ("Headphone Playback Switch", 0x0f, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x07, HDA_INPUT),
 	HDA_CODEC_MUTE  ("Line Playback Switch", 0x0b, 0x07, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x00, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7931,7 +8074,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -7946,10 +8089,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7969,7 +8112,7 @@
 	HDA_CODEC_MUTE("Mobile Line Playback Switch", 0x0b, 0x03, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7982,7 +8125,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8763,10 +8906,10 @@
 	HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8777,11 +8920,11 @@
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -8791,11 +8934,11 @@
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -8808,10 +8951,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8831,10 +8974,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8855,10 +8998,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8879,10 +9022,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x3, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x1b, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x1b, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x3, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8902,10 +9045,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8926,7 +9069,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8939,20 +9082,20 @@
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc883_targa_8ch_mixer[] = {
 	HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -8963,7 +9106,7 @@
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8976,21 +9119,8 @@
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
-	{ } /* end */
-};
-
-static struct snd_kcontrol_new alc883_medion_md2_mixer[] = {
-	HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Front Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -9037,7 +9167,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -9050,7 +9180,7 @@
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -9072,10 +9202,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -9096,8 +9226,8 @@
 	HDA_CODEC_MUTE("Enable Headphones", 0x15, 0x00, HDA_OUTPUT),
 	HDA_CODEC_MUTE_MONO("Enable LFE", 0x16, 2, 0x00, HDA_OUTPUT),
 	/* Boost mixers */
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x1a, 0x00, HDA_INPUT),
 	/* Input mixers */
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT),
@@ -9111,7 +9241,7 @@
 	HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -9141,7 +9271,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -9182,16 +9312,6 @@
 	spec->autocfg.speaker_pins[1] = 0x17;
 }
 
-/* auto-toggle front mic */
-/*
-static void alc883_mitac_mic_automute(struct hda_codec *codec)
-{
-	unsigned char bits = snd_hda_jack_detect(codec, 0x18) ? HDA_AMP_MUTE : 0;
-
-	snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, HDA_AMP_MUTE, bits);
-}
-*/
-
 static struct hda_verb alc883_mitac_verbs[] = {
 	/* HP */
 	{0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -9435,18 +9555,8 @@
 		alc888_lenovo_ms7195_rca_automute(codec);
 }
 
-static struct hda_verb alc883_medion_md2_verbs[] = {
-	{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-	{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-
-	{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-
-	{0x14, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
-	{ } /* end */
-};
-
 /* toggle speaker-output according to the hp-jack state */
-static void alc883_medion_md2_setup(struct hda_codec *codec)
+static void alc883_lenovo_nb0763_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
 
@@ -9458,15 +9568,6 @@
 #define alc883_targa_init_hook		alc882_targa_init_hook
 #define alc883_targa_unsol_event	alc882_targa_unsol_event
 
-static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
-{
-	unsigned int present;
-
-	present = snd_hda_jack_detect(codec, 0x18);
-	snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1,
-				 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-}
-
 static void alc883_clevo_m720_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
@@ -9478,7 +9579,7 @@
 static void alc883_clevo_m720_init_hook(struct hda_codec *codec)
 {
 	alc_automute_amp(codec);
-	alc883_clevo_m720_mic_automute(codec);
+	alc88x_simple_mic_automute(codec);
 }
 
 static void alc883_clevo_m720_unsol_event(struct hda_codec *codec,
@@ -9486,7 +9587,7 @@
 {
 	switch (res >> 26) {
 	case ALC880_MIC_EVENT:
-		alc883_clevo_m720_mic_automute(codec);
+		alc88x_simple_mic_automute(codec);
 		break;
 	default:
 		alc_automute_amp_unsol_event(codec, res);
@@ -9702,7 +9803,7 @@
 /*
  * configuration and preset
  */
-static const char *alc882_models[ALC882_MODEL_LAST] = {
+static const char * const alc882_models[ALC882_MODEL_LAST] = {
 	[ALC882_3ST_DIG]	= "3stack-dig",
 	[ALC882_6ST_DIG]	= "6stack-dig",
 	[ALC882_ARIMA]		= "arima",
@@ -9731,7 +9832,6 @@
 	[ALC888_ACER_ASPIRE_8930G]	= "acer-aspire-8930g",
 	[ALC888_ACER_ASPIRE_7730G]	= "acer-aspire-7730g",
 	[ALC883_MEDION]		= "medion",
-	[ALC883_MEDION_MD2]	= "medion-md2",
 	[ALC883_MEDION_WIM2160]	= "medion-wim2160",
 	[ALC883_LAPTOP_EAPD]	= "laptop-eapd",
 	[ALC883_LENOVO_101E_2ch] = "lenovo-101e",
@@ -10279,7 +10379,7 @@
 		.init_hook = alc_automute_amp,
 	},
 	[ALC888_ACER_ASPIRE_4930G] = {
-		.mixers = { alc888_base_mixer,
+		.mixers = { alc888_acer_aspire_4930g_mixer,
 				alc883_chmode_mixer },
 		.init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
 				alc888_acer_aspire_4930g_verbs },
@@ -10379,19 +10479,6 @@
 		.channel_mode = alc883_sixstack_modes,
 		.input_mux = &alc883_capture_source,
 	},
-	[ALC883_MEDION_MD2] = {
-		.mixers = { alc883_medion_md2_mixer},
-		.init_verbs = { alc883_init_verbs, alc883_medion_md2_verbs},
-		.num_dacs = ARRAY_SIZE(alc883_dac_nids),
-		.dac_nids = alc883_dac_nids,
-		.dig_out_nid = ALC883_DIGOUT_NID,
-		.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
-		.channel_mode = alc883_3ST_2ch_modes,
-		.input_mux = &alc883_capture_source,
-		.unsol_event = alc_automute_amp_unsol_event,
-		.setup = alc883_medion_md2_setup,
-		.init_hook = alc_automute_amp,
-	},
 	[ALC883_MEDION_WIM2160] = {
 		.mixers = { alc883_medion_wim2160_mixer },
 		.init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs },
@@ -10468,7 +10555,7 @@
 		.need_dac_fix = 1,
 		.input_mux = &alc883_lenovo_nb0763_capture_source,
 		.unsol_event = alc_automute_amp_unsol_event,
-		.setup = alc883_medion_md2_setup,
+		.setup = alc883_lenovo_nb0763_setup,
 		.init_hook = alc_automute_amp,
 	},
 	[ALC888_LENOVO_MS7195_DIG] = {
@@ -10667,7 +10754,8 @@
 
 static const struct alc_fixup alc882_fixups[] = {
 	[PINFIX_ABIT_AW9D_MAX] = {
-		.pins = (const struct alc_pincfg[]) {
+		.type = ALC_FIXUP_PINS,
+		.v.pins = (const struct alc_pincfg[]) {
 			{ 0x15, 0x01080104 }, /* side */
 			{ 0x16, 0x01011012 }, /* rear */
 			{ 0x17, 0x01016011 }, /* clfe */
@@ -10675,13 +10763,15 @@
 		}
 	},
 	[PINFIX_PB_M5210] = {
-		.verbs = (const struct hda_verb[]) {
+		.type = ALC_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
 			{ 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50 },
 			{}
 		}
 	},
 	[PINFIX_ACER_ASPIRE_7736] = {
-		.sku = ALC_FIXUP_SKU_IGNORE,
+		.type = ALC_FIXUP_SKU,
+		.v.sku = ALC_FIXUP_SKU_IGNORE,
 	},
 };
 
@@ -10830,25 +10920,30 @@
 {
 	struct alc_spec *spec = codec->spec;
 	struct auto_pin_cfg *cfg = &spec->autocfg;
-	int i, err, type;
+	int i, err;
 	int type_idx = 0;
 	hda_nid_t nid;
+	const char *prev_label = NULL;
 
 	for (i = 0; i < cfg->num_inputs; i++) {
 		if (cfg->inputs[i].type > AUTO_PIN_MIC)
 			break;
 		nid = cfg->inputs[i].pin;
 		if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP) {
-			char label[32];
-			type = cfg->inputs[i].type;
-			if (i > 0 && type == cfg->inputs[i - 1].type)
+			const char *label;
+			char boost_label[32];
+
+			label = hda_get_autocfg_input_label(codec, cfg, i);
+			if (prev_label && !strcmp(label, prev_label))
 				type_idx++;
 			else
 				type_idx = 0;
-			snprintf(label, sizeof(label), "%s Boost",
-				 hda_get_autocfg_input_label(codec, cfg, i));
-			err = add_control(spec, ALC_CTL_WIDGET_VOL, label,
-					  type_idx,
+			prev_label = label;
+
+			snprintf(boost_label, sizeof(boost_label),
+				 "%s Boost Volume", label);
+			err = add_control(spec, ALC_CTL_WIDGET_VOL,
+					  boost_label, type_idx,
 				  HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
 			if (err < 0)
 				return err;
@@ -10962,8 +11057,10 @@
 		board_config = ALC882_AUTO;
 	}
 
-	if (board_config == ALC882_AUTO)
-		alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 1);
+	if (board_config == ALC882_AUTO) {
+		alc_pick_fixup(codec, NULL, alc882_fixup_tbl, alc882_fixups);
+		alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+	}
 
 	alc_auto_parse_customize_define(codec);
 
@@ -11039,8 +11136,7 @@
 	if (has_cdefine_beep(codec))
 		set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
 
-	if (board_config == ALC882_AUTO)
-		alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 0);
+	alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
 
 	spec->vmaster_nid = 0x0c;
 
@@ -11090,10 +11186,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
@@ -11194,10 +11290,10 @@
 			    HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
@@ -11219,7 +11315,7 @@
 			    HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x1a, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
@@ -11230,7 +11326,7 @@
 static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = {
 	HDA_CODEC_VOLUME("Rear Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Rear Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Rear Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Rear Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11250,7 +11346,7 @@
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11357,10 +11453,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	{ } /* end */
 };
@@ -11374,10 +11470,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11445,10 +11541,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11632,7 +11728,7 @@
 
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
@@ -11687,7 +11783,7 @@
 	.num_items = 3,
 	.items = {
 		{ "Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Internal Mic", 0x1 },
 		{ "CD", 0x4 },
 	},
 };
@@ -11839,12 +11935,12 @@
 	},
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11875,12 +11971,12 @@
 	},
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11889,10 +11985,10 @@
 	ALC262_HIPPO_MASTER_SWITCH,
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11918,8 +12014,8 @@
 	HDA_BIND_MUTE("Master Playback Switch", 0x0c, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Headphone Mic Boost", 0x15, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Headphone Mic Boost Volume", 0x15, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -12089,13 +12185,8 @@
 	spec->multiout.dac_nids = spec->private_dac_nids;
 	spec->multiout.dac_nids[0] = 2;
 
-	if (!cfg->speaker_pins[0] && !cfg->hp_pins[0])
-		pfx = "Master";
-	else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-		pfx = "Speaker";
-	else if (cfg->line_out_type == AUTO_PIN_HP_OUT)
-		pfx = "Headphone";
-	else
+	pfx = alc_get_line_out_pfx(cfg, true);
+	if (!pfx)
 		pfx = "Front";
 	for (i = 0; i < 2; i++) {
 		err = alc262_add_out_sw_ctl(spec, cfg->line_out_pins[i], pfx, i);
@@ -12435,19 +12526,14 @@
 
 static const struct alc_fixup alc262_fixups[] = {
 	[PINFIX_FSC_H270] = {
-		.pins = (const struct alc_pincfg[]) {
+		.type = ALC_FIXUP_PINS,
+		.v.pins = (const struct alc_pincfg[]) {
 			{ 0x14, 0x99130110 }, /* speaker */
 			{ 0x15, 0x0221142f }, /* front HP */
 			{ 0x1b, 0x0121141f }, /* rear HP */
 			{ }
 		}
 	},
-	[PINFIX_PB_M5210] = {
-		.verbs = (const struct hda_verb[]) {
-			{ 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50 },
-			{}
-		}
-	},
 };
 
 static struct snd_pci_quirk alc262_fixup_tbl[] = {
@@ -12537,7 +12623,7 @@
 /*
  * configuration and preset
  */
-static const char *alc262_models[ALC262_MODEL_LAST] = {
+static const char * const alc262_models[ALC262_MODEL_LAST] = {
 	[ALC262_BASIC]		= "basic",
 	[ALC262_HIPPO]		= "hippo",
 	[ALC262_HIPPO_1]	= "hippo_1",
@@ -12565,6 +12651,8 @@
 			   ALC262_HP_BPC),
 	SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1300, "HP xw series",
 			   ALC262_HP_BPC),
+	SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series",
+			   ALC262_HP_BPC),
 	SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series",
 			   ALC262_HP_BPC),
 	SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL),
@@ -12878,8 +12966,10 @@
 		board_config = ALC262_AUTO;
 	}
 
-	if (board_config == ALC262_AUTO)
-		alc_pick_fixup(codec, alc262_fixup_tbl, alc262_fixups, 1);
+	if (board_config == ALC262_AUTO) {
+		alc_pick_fixup(codec, NULL, alc262_fixup_tbl, alc262_fixups);
+		alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+	}
 
 	if (board_config == ALC262_AUTO) {
 		/* automatic parse from the BIOS config */
@@ -12949,8 +13039,7 @@
 	if (!spec->no_analog && has_cdefine_beep(codec))
 		set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
 
-	if (board_config == ALC262_AUTO)
-		alc_pick_fixup(codec, alc262_fixup_tbl, alc262_fixups, 0);
+	alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
 
 	spec->vmaster_nid = 0x0c;
 
@@ -12996,9 +13085,9 @@
 	HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x3, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13007,9 +13096,9 @@
 	HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x3, 0x0, HDA_OUTPUT),
 	ALC262_HIPPO_MASTER_SWITCH,
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13113,9 +13202,9 @@
 		.put = alc268_acer_master_sw_put,
 		.private_value = HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
 	},
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13131,8 +13220,8 @@
 		.put = alc268_acer_master_sw_put,
 		.private_value = HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
 	},
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13224,8 +13313,8 @@
 	HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13258,8 +13347,8 @@
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Capture Volume", 0x23, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Mic Capture Switch", 0x23, 2, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13724,7 +13813,7 @@
 /*
  * configuration and preset
  */
-static const char *alc268_models[ALC268_MODEL_LAST] = {
+static const char * const alc268_models[ALC268_MODEL_LAST] = {
 	[ALC267_QUANTA_IL1]	= "quanta-il1",
 	[ALC268_3ST]		= "3stack",
 	[ALC268_TOSHIBA]	= "toshiba",
@@ -14082,10 +14171,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x16, 2, 0x0, HDA_OUTPUT),
 	{ } /* end */
@@ -14105,10 +14194,10 @@
 	},
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -14126,13 +14215,13 @@
 	},
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x0b, 0x03, HDA_INPUT),
 	HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x0b, 0x03, HDA_INPUT),
-	HDA_CODEC_VOLUME("Dock Mic Boost", 0x1b, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x1b, 0, HDA_INPUT),
 	{ }
 };
 
@@ -14162,30 +14251,30 @@
 static struct snd_kcontrol_new alc269_laptop_analog_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x08, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("IntMic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc269_laptop_digital_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x08, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc269vb_laptop_analog_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x09, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x09, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("IntMic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc269vb_laptop_digital_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x09, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x09, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -14804,31 +14893,49 @@
 }
 #endif /* SND_HDA_NEEDS_RESUME */
 
+static void alc269_fixup_hweq(struct hda_codec *codec,
+			       const struct alc_fixup *fix, int action)
+{
+	int coef;
+
+	if (action != ALC_FIXUP_ACT_INIT)
+		return;
+	coef = alc_read_coef_idx(codec, 0x1e);
+	alc_write_coef_idx(codec, 0x1e, coef | 0x80);
+}
+
 enum {
 	ALC269_FIXUP_SONY_VAIO,
-	ALC275_FIX_SONY_VAIO_GPIO2,
+	ALC275_FIXUP_SONY_VAIO_GPIO2,
 	ALC269_FIXUP_DELL_M101Z,
 	ALC269_FIXUP_SKU_IGNORE,
 	ALC269_FIXUP_ASUS_G73JW,
+	ALC269_FIXUP_LENOVO_EAPD,
+	ALC275_FIXUP_SONY_HWEQ,
 };
 
 static const struct alc_fixup alc269_fixups[] = {
 	[ALC269_FIXUP_SONY_VAIO] = {
-		.verbs = (const struct hda_verb[]) {
+		.type = ALC_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
 			{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD},
 			{}
 		}
 	},
-	[ALC275_FIX_SONY_VAIO_GPIO2] = {
-		.verbs = (const struct hda_verb[]) {
+	[ALC275_FIXUP_SONY_VAIO_GPIO2] = {
+		.type = ALC_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
 			{0x01, AC_VERB_SET_GPIO_MASK, 0x04},
 			{0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04},
 			{0x01, AC_VERB_SET_GPIO_DATA, 0x00},
 			{ }
-		}
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_SONY_VAIO
 	},
 	[ALC269_FIXUP_DELL_M101Z] = {
-		.verbs = (const struct hda_verb[]) {
+		.type = ALC_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
 			/* Enables internal speaker */
 			{0x20, AC_VERB_SET_COEF_INDEX, 13},
 			{0x20, AC_VERB_SET_PROC_COEF, 0x4040},
@@ -14836,25 +14943,44 @@
 		}
 	},
 	[ALC269_FIXUP_SKU_IGNORE] = {
-		.sku = ALC_FIXUP_SKU_IGNORE,
+		.type = ALC_FIXUP_SKU,
+		.v.sku = ALC_FIXUP_SKU_IGNORE,
 	},
 	[ALC269_FIXUP_ASUS_G73JW] = {
-		.pins = (const struct alc_pincfg[]) {
+		.type = ALC_FIXUP_PINS,
+		.v.pins = (const struct alc_pincfg[]) {
 			{ 0x17, 0x99130111 }, /* subwoofer */
 			{ }
 		}
 	},
+	[ALC269_FIXUP_LENOVO_EAPD] = {
+		.type = ALC_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
+			{0x14, AC_VERB_SET_EAPD_BTLENABLE, 0},
+			{}
+		}
+	},
+	[ALC275_FIXUP_SONY_HWEQ] = {
+		.type = ALC_FIXUP_FUNC,
+		.v.func = alc269_fixup_hweq,
+		.chained = true,
+		.chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2
+	}
 };
 
 static struct snd_pci_quirk alc269_fixup_tbl[] = {
-	SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
-	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
-	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
+	SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
+	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
-	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+	SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+	SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
 	{}
 };
 
@@ -14862,7 +14988,7 @@
 /*
  * configuration and preset
  */
-static const char *alc269_models[ALC269_MODEL_LAST] = {
+static const char * const alc269_models[ALC269_MODEL_LAST] = {
 	[ALC269_BASIC]			= "basic",
 	[ALC269_QUANTA_FL1]		= "quanta",
 	[ALC269_AMIC]			= "laptop-amic",
@@ -14886,7 +15012,7 @@
 	SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC),
 	SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC),
 	SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC),
-	SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC),
+	SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC),
 	SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC),
 	SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC),
 	SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC),
@@ -15138,8 +15264,10 @@
 		board_config = ALC269_AUTO;
 	}
 
-	if (board_config == ALC269_AUTO)
-		alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 1);
+	if (board_config == ALC269_AUTO) {
+		alc_pick_fixup(codec, NULL, alc269_fixup_tbl, alc269_fixups);
+		alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+	}
 
 	if (board_config == ALC269_AUTO) {
 		/* automatic parse from the BIOS config */
@@ -15200,8 +15328,7 @@
 	if (has_cdefine_beep(codec))
 		set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
 
-	if (board_config == ALC269_AUTO)
-		alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 0);
+	alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
 
 	spec->vmaster_nid = 0x02;
 
@@ -15889,41 +16016,33 @@
 	return 0;
 }
 
-static int alc861_create_out_sw(struct hda_codec *codec, const char *pfx,
-				hda_nid_t nid, unsigned int chs)
+static int __alc861_create_out_sw(struct hda_codec *codec, const char *pfx,
+				  hda_nid_t nid, int idx, unsigned int chs)
 {
-	return add_pb_sw_ctrl(codec->spec, ALC_CTL_WIDGET_MUTE, pfx,
+	return __add_pb_sw_ctrl(codec->spec, ALC_CTL_WIDGET_MUTE, pfx, idx,
 			   HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
 }
 
+#define alc861_create_out_sw(codec, pfx, nid, chs) \
+	__alc861_create_out_sw(codec, pfx, nid, 0, chs)
+
 /* add playback controls from the parsed DAC table */
 static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec,
 					     const struct auto_pin_cfg *cfg)
 {
 	struct alc_spec *spec = codec->spec;
-	static const char *chname[4] = {
+	static const char * const chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
+	const char *pfx = alc_get_line_out_pfx(cfg, true);
 	hda_nid_t nid;
 	int i, err;
 
-	if (cfg->line_outs == 1) {
-		const char *pfx = NULL;
-		if (!cfg->hp_outs)
-			pfx = "Master";
-		else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-			pfx = "Speaker";
-		if (pfx) {
-			nid = spec->multiout.dac_nids[0];
-			return alc861_create_out_sw(codec, pfx, nid, 3);
-		}
-	}
-
 	for (i = 0; i < cfg->line_outs; i++) {
 		nid = spec->multiout.dac_nids[i];
 		if (!nid)
 			continue;
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = alc861_create_out_sw(codec, "Center", nid, 1);
 			if (err < 0)
@@ -15932,7 +16051,10 @@
 			if (err < 0)
 				return err;
 		} else {
-			err = alc861_create_out_sw(codec, chname[i], nid, 3);
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __alc861_create_out_sw(codec, name, nid, i, 3);
 			if (err < 0)
 				return err;
 		}
@@ -16115,7 +16237,7 @@
 /*
  * configuration and preset
  */
-static const char *alc861_models[ALC861_MODEL_LAST] = {
+static const char * const alc861_models[ALC861_MODEL_LAST] = {
 	[ALC861_3ST]		= "3stack",
 	[ALC660_3ST]		= "3stack-660",
 	[ALC861_3ST_DIG]	= "3stack-dig",
@@ -16265,7 +16387,8 @@
 
 static const struct alc_fixup alc861_fixups[] = {
 	[PINFIX_FSC_AMILO_PI1505] = {
-		.pins = (const struct alc_pincfg[]) {
+		.type = ALC_FIXUP_PINS,
+		.v.pins = (const struct alc_pincfg[]) {
 			{ 0x0b, 0x0221101f }, /* HP */
 			{ 0x0f, 0x90170310 }, /* speaker */
 			{ }
@@ -16300,8 +16423,10 @@
 		board_config = ALC861_AUTO;
 	}
 
-	if (board_config == ALC861_AUTO)
-		alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 1);
+	if (board_config == ALC861_AUTO) {
+		alc_pick_fixup(codec, NULL, alc861_fixup_tbl, alc861_fixups);
+		alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+	}
 
 	if (board_config == ALC861_AUTO) {
 		/* automatic parse from the BIOS config */
@@ -16338,8 +16463,7 @@
 
 	spec->vmaster_nid = 0x03;
 
-	if (board_config == ALC861_AUTO)
-		alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 0);
+	alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
 
 	codec->patch_ops = alc_patch_ops;
 	if (board_config == ALC861_AUTO) {
@@ -16404,8 +16528,8 @@
 static struct hda_input_mux alc861vd_dallas_capture_source = {
 	.num_items = 2,
 	.items = {
-		{ "Ext Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Mic", 0x0 },
+		{ "Internal Mic", 0x1 },
 	},
 };
 
@@ -16484,11 +16608,11 @@
 
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 
@@ -16507,11 +16631,11 @@
 
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 
@@ -16531,11 +16655,11 @@
 
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 
@@ -16546,19 +16670,19 @@
 };
 
 /* Pin assignment: Speaker=0x14, HP = 0x15,
- *                 Ext Mic=0x18, Int Mic = 0x19, CD = 0x1c, PC Beep = 0x1d
+ *                 Mic=0x18, Internal Mic = 0x19, CD = 0x1c, PC Beep = 0x1d
  */
 static struct snd_kcontrol_new alc861vd_dallas_mixer[] = {
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x02, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0c, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Headphone Playback Switch", 0x0d, 2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -16723,18 +16847,6 @@
 	{}
 };
 
-static void alc861vd_lenovo_mic_automute(struct hda_codec *codec)
-{
-	unsigned int present;
-	unsigned char bits;
-
-	present = snd_hda_jack_detect(codec, 0x18);
-	bits = present ? HDA_AMP_MUTE : 0;
-
-	snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1,
-				 HDA_AMP_MUTE, bits);
-}
-
 static void alc861vd_lenovo_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
@@ -16745,7 +16857,7 @@
 static void alc861vd_lenovo_init_hook(struct hda_codec *codec)
 {
 	alc_automute_amp(codec);
-	alc861vd_lenovo_mic_automute(codec);
+	alc88x_simple_mic_automute(codec);
 }
 
 static void alc861vd_lenovo_unsol_event(struct hda_codec *codec,
@@ -16753,7 +16865,7 @@
 {
 	switch (res >> 26) {
 	case ALC880_MIC_EVENT:
-		alc861vd_lenovo_mic_automute(codec);
+		alc88x_simple_mic_automute(codec);
 		break;
 	default:
 		alc_automute_amp_unsol_event(codec, res);
@@ -16828,7 +16940,7 @@
 /*
  * configuration and preset
  */
-static const char *alc861vd_models[ALC861VD_MODEL_LAST] = {
+static const char * const alc861vd_models[ALC861VD_MODEL_LAST] = {
 	[ALC660VD_3ST]		= "3stack-660",
 	[ALC660VD_3ST_DIG]	= "3stack-660-digout",
 	[ALC660VD_ASUS_V1S]	= "asus-v1s",
@@ -17048,7 +17160,10 @@
 static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
 					     const struct auto_pin_cfg *cfg)
 {
-	static const char *chname[4] = {"Front", "Surround", "CLFE", "Side"};
+	static const char * const chname[4] = {
+		"Front", "Surround", "CLFE", "Side"
+	};
+	const char *pfx = alc_get_line_out_pfx(cfg, true);
 	hda_nid_t nid_v, nid_s;
 	int i, err;
 
@@ -17062,7 +17177,7 @@
 				alc880_dac_to_idx(
 					spec->multiout.dac_nids[i]));
 
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
 					      "Center",
@@ -17089,24 +17204,17 @@
 			if (err < 0)
 				return err;
 		} else {
-			const char *pfx;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
-				if (!cfg->hp_pins)
-					pfx = "Speaker";
-				else
-					pfx = "PCM";
-			} else
-				pfx = chname[i];
-			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx,
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
+						name, i,
 					  HDA_COMPOSE_AMP_VAL(nid_v, 3, 0,
 							      HDA_OUTPUT));
 			if (err < 0)
 				return err;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-				pfx = "Speaker";
-			err = add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, pfx,
+			err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE,
+					       name, i,
 					  HDA_COMPOSE_AMP_VAL(nid_s, 3, 2,
 							      HDA_INPUT));
 			if (err < 0)
@@ -17239,7 +17347,8 @@
 /* reset GPIO1 */
 static const struct alc_fixup alc861vd_fixups[] = {
 	[ALC660VD_FIX_ASUS_GPIO1] = {
-		.verbs = (const struct hda_verb[]) {
+		.type = ALC_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
 			{0x01, AC_VERB_SET_GPIO_MASK, 0x03},
 			{0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01},
 			{0x01, AC_VERB_SET_GPIO_DATA, 0x01},
@@ -17274,8 +17383,10 @@
 		board_config = ALC861VD_AUTO;
 	}
 
-	if (board_config == ALC861VD_AUTO)
-		alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 1);
+	if (board_config == ALC861VD_AUTO) {
+		alc_pick_fixup(codec, NULL, alc861vd_fixup_tbl, alc861vd_fixups);
+		alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+	}
 
 	if (board_config == ALC861VD_AUTO) {
 		/* automatic parse from the BIOS config */
@@ -17323,8 +17434,7 @@
 
 	spec->vmaster_nid = 0x02;
 
-	if (board_config == ALC861VD_AUTO)
-		alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 0);
+	alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
 
 	codec->patch_ops = alc_patch_ops;
 
@@ -17570,13 +17680,13 @@
 	HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT),
 	ALC262_HIPPO_MASTER_SWITCH,
 
-	HDA_CODEC_VOLUME("e-Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("e-Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("e-Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("i-Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -17720,8 +17830,8 @@
 
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -17732,8 +17842,8 @@
 
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	{ } /* end */
@@ -18566,13 +18676,13 @@
 	HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT),
 	ALC262_HIPPO_MASTER_SWITCH,
 
-	HDA_CODEC_VOLUME("e-Mic/LineIn Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("e-Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("e-Mic/LineIn Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic/LineIn Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic/LineIn Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("i-Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -18583,13 +18693,13 @@
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -18607,7 +18717,7 @@
 /*
  * configuration and preset
  */
-static const char *alc662_models[ALC662_MODEL_LAST] = {
+static const char * const alc662_models[ALC662_MODEL_LAST] = {
 	[ALC662_3ST_2ch_DIG]	= "3stack-dig",
 	[ALC662_3ST_6ch_DIG]	= "3stack-6ch-dig",
 	[ALC662_3ST_6ch]	= "3stack-6ch",
@@ -18712,6 +18822,7 @@
 					ALC662_3ST_6ch_DIG),
 	SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x",
 			   ALC663_ASUS_H13),
+	SND_PCI_QUIRK(0x1991, 0x5628, "Ordissimo EVE", ALC662_LENOVO_101E),
 	{}
 };
 
@@ -19094,20 +19205,24 @@
 	return 0;
 }
 
-static inline int alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx,
-			      hda_nid_t nid, unsigned int chs)
+static inline int __alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx,
+				       hda_nid_t nid, int idx, unsigned int chs)
 {
-	return add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx,
+	return __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx, idx,
 			   HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
 }
 
-static inline int alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx,
-			     hda_nid_t nid, unsigned int chs)
+static inline int __alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx,
+				      hda_nid_t nid, int idx, unsigned int chs)
 {
-	return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx,
+	return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, idx,
 			   HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_INPUT));
 }
 
+#define alc662_add_vol_ctl(spec, pfx, nid, chs) \
+	__alc662_add_vol_ctl(spec, pfx, nid, 0, chs)
+#define alc662_add_sw_ctl(spec, pfx, nid, chs) \
+	__alc662_add_sw_ctl(spec, pfx, nid, 0, chs)
 #define alc662_add_stereo_vol(spec, pfx, nid) \
 	alc662_add_vol_ctl(spec, pfx, nid, 3)
 #define alc662_add_stereo_sw(spec, pfx, nid) \
@@ -19118,9 +19233,10 @@
 					     const struct auto_pin_cfg *cfg)
 {
 	struct alc_spec *spec = codec->spec;
-	static const char *chname[4] = {
+	static const char * const chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
+	const char *pfx = alc_get_line_out_pfx(cfg, true);
 	hda_nid_t nid, mix;
 	int i, err;
 
@@ -19131,7 +19247,7 @@
 		mix = alc662_dac_to_mix(codec, cfg->line_out_pins[i], nid);
 		if (!mix)
 			continue;
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = alc662_add_vol_ctl(spec, "Center", nid, 1);
 			if (err < 0)
@@ -19146,22 +19262,13 @@
 			if (err < 0)
 				return err;
 		} else {
-			const char *pfx;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
-				if (cfg->hp_outs)
-					pfx = "Speaker";
-				else
-					pfx = "PCM";
-			} else
-				pfx = chname[i];
-			err = alc662_add_vol_ctl(spec, pfx, nid, 3);
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __alc662_add_vol_ctl(spec, name, nid, i, 3);
 			if (err < 0)
 				return err;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-				pfx = "Speaker";
-			err = alc662_add_sw_ctl(spec, pfx, mix, 3);
+			err = __alc662_add_sw_ctl(spec, name, mix, i, 3);
 			if (err < 0)
 				return err;
 		}
@@ -19358,34 +19465,69 @@
 		alc_inithook(codec);
 }
 
+static void alc272_fixup_mario(struct hda_codec *codec,
+			       const struct alc_fixup *fix, int action)
+{
+	if (action != ALC_FIXUP_ACT_PROBE)
+		return;
+	if (snd_hda_override_amp_caps(codec, 0x2, HDA_OUTPUT,
+				      (0x3b << AC_AMPCAP_OFFSET_SHIFT) |
+				      (0x3b << AC_AMPCAP_NUM_STEPS_SHIFT) |
+				      (0x03 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+				      (0 << AC_AMPCAP_MUTE_SHIFT)))
+		printk(KERN_WARNING
+		       "hda_codec: failed to override amp caps for NID 0x2\n");
+}
+
 enum {
 	ALC662_FIXUP_ASPIRE,
 	ALC662_FIXUP_IDEAPAD,
+	ALC272_FIXUP_MARIO,
+	ALC662_FIXUP_CZC_P10T,
 };
 
 static const struct alc_fixup alc662_fixups[] = {
 	[ALC662_FIXUP_ASPIRE] = {
-		.pins = (const struct alc_pincfg[]) {
+		.type = ALC_FIXUP_PINS,
+		.v.pins = (const struct alc_pincfg[]) {
 			{ 0x15, 0x99130112 }, /* subwoofer */
 			{ }
 		}
 	},
 	[ALC662_FIXUP_IDEAPAD] = {
-		.pins = (const struct alc_pincfg[]) {
+		.type = ALC_FIXUP_PINS,
+		.v.pins = (const struct alc_pincfg[]) {
 			{ 0x17, 0x99130112 }, /* subwoofer */
 			{ }
 		}
 	},
+	[ALC272_FIXUP_MARIO] = {
+		.type = ALC_FIXUP_FUNC,
+		.v.func = alc272_fixup_mario,
+	},
+	[ALC662_FIXUP_CZC_P10T] = {
+		.type = ALC_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
+			{0x14, AC_VERB_SET_EAPD_BTLENABLE, 0},
+			{}
+		}
+	},
 };
 
 static struct snd_pci_quirk alc662_fixup_tbl[] = {
+	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
 	SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
 	SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+	SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
 	{}
 };
 
+static const struct alc_model_fixup alc662_fixup_models[] = {
+	{.id = ALC272_FIXUP_MARIO, .name = "mario"},
+	{}
+};
 
 
 static int patch_alc662(struct hda_codec *codec)
@@ -19424,7 +19566,9 @@
 	}
 
 	if (board_config == ALC662_AUTO) {
-		alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 1);
+		alc_pick_fixup(codec, alc662_fixup_models,
+			       alc662_fixup_tbl, alc662_fixups);
+		alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
 		/* automatic parse from the BIOS config */
 		err = alc662_parse_auto_config(codec);
 		if (err < 0) {
@@ -19482,11 +19626,11 @@
 	}
 	spec->vmaster_nid = 0x02;
 
+	alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
+
 	codec->patch_ops = alc_patch_ops;
-	if (board_config == ALC662_AUTO) {
+	if (board_config == ALC662_AUTO)
 		spec->init_hook = alc662_auto_init;
-		alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 0);
-	}
 
 	alc_init_jacks(codec);
 
@@ -19612,9 +19756,9 @@
 	HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x12, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x12, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -19874,7 +20018,7 @@
 /*
  * configuration and preset
  */
-static const char *alc680_models[ALC680_MODEL_LAST] = {
+static const char * const alc680_models[ALC680_MODEL_LAST] = {
 	[ALC680_BASE]		= "base",
 	[ALC680_AUTO]		= "auto",
 };
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index f03b2ff..bd7b123 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -266,7 +266,7 @@
 	struct sigmatel_mic_route int_mic;
 	struct sigmatel_mic_route dock_mic;
 
-	const char **spdif_labels;
+	const char * const *spdif_labels;
 
 	hda_nid_t dig_in_nid;
 	hda_nid_t mono_nid;
@@ -389,6 +389,9 @@
 	0x11, 0x20, 0
 };
 
+#define STAC92HD88XXX_NUM_DMICS	STAC92HD83XXX_NUM_DMICS
+#define stac92hd88xxx_dmic_nids	stac92hd83xxx_dmic_nids
+
 #define STAC92HD87B_NUM_DMICS	 1
 static hda_nid_t stac92hd87b_dmic_nids[STAC92HD87B_NUM_DMICS + 1] = {
 	0x11, 0
@@ -521,7 +524,7 @@
 	HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT),
 };
 
-static const char *stac927x_spdif_labels[5] = {
+static const char * const stac927x_spdif_labels[5] = {
 	"Digital Playback", "ADAT", "Analog Mux 1",
 	"Analog Mux 2", "Analog Mux 3"
 };
@@ -583,7 +586,12 @@
 	0x0f, 0x10, 0x11, 0x1f, 0x20,
 };
 
-static hda_nid_t stac92hd88xxx_pin_nids[10] = {
+static hda_nid_t stac92hd87xxx_pin_nids[6] = {
+	0x0a, 0x0b, 0x0c, 0x0d,
+	0x0f, 0x11,
+};
+
+static hda_nid_t stac92hd88xxx_pin_nids[8] = {
 	0x0a, 0x0b, 0x0c, 0x0d,
 	0x0f, 0x11, 0x1f, 0x20,
 };
@@ -1059,7 +1067,7 @@
 	.put = stac92xx_smux_enum_put,
 };
 
-static const char *slave_vols[] = {
+static const char * const slave_vols[] = {
 	"Front Playback Volume",
 	"Surround Playback Volume",
 	"Center Playback Volume",
@@ -1070,7 +1078,7 @@
 	NULL
 };
 
-static const char *slave_sws[] = {
+static const char * const slave_sws[] = {
 	"Front Playback Switch",
 	"Surround Playback Switch",
 	"Center Playback Switch",
@@ -1351,7 +1359,7 @@
 	[STAC_9200_PANASONIC] = ref9200_pin_configs,
 };
 
-static const char *stac9200_models[STAC_9200_MODELS] = {
+static const char * const stac9200_models[STAC_9200_MODELS] = {
 	[STAC_AUTO] = "auto",
 	[STAC_REF] = "ref",
 	[STAC_9200_OQO] = "oqo",
@@ -1497,7 +1505,7 @@
 	[STAC_M6] = stac925xM6_pin_configs,
 };
 
-static const char *stac925x_models[STAC_925x_MODELS] = {
+static const char * const stac925x_models[STAC_925x_MODELS] = {
 	[STAC_925x_AUTO] = "auto",
 	[STAC_REF] = "ref",
 	[STAC_M1] = "m1",
@@ -1571,7 +1579,7 @@
 	[STAC_92HD73XX_INTEL]	= intel_dg45id_pin_configs,
 };
 
-static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
+static const char * const stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
 	[STAC_92HD73XX_AUTO] = "auto",
 	[STAC_92HD73XX_NO_JD] = "no-jd",
 	[STAC_92HD73XX_REF] = "ref",
@@ -1657,7 +1665,7 @@
 	[STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
 };
 
-static const char *stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
+static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
 	[STAC_92HD83XXX_AUTO] = "auto",
 	[STAC_92HD83XXX_REF] = "ref",
 	[STAC_92HD83XXX_PWR_REF] = "mic-ref",
@@ -1719,7 +1727,7 @@
 	[STAC_HP_DV4_1222NR]	= NULL,
 };
 
-static const char *stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = {
+static const char * const stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = {
 	[STAC_92HD71BXX_AUTO] = "auto",
 	[STAC_92HD71BXX_REF] = "ref",
 	[STAC_DELL_M4_1] = "dell-m4-1",
@@ -1912,7 +1920,7 @@
 	[STAC_922X_DELL_M82] = dell_922x_m82_pin_configs,	
 };
 
-static const char *stac922x_models[STAC_922X_MODELS] = {
+static const char * const stac922x_models[STAC_922X_MODELS] = {
 	[STAC_922X_AUTO] = "auto",
 	[STAC_D945_REF]	= "ref",
 	[STAC_D945GTP5]	= "5stack",
@@ -2074,7 +2082,7 @@
 	[STAC_927X_VOLKNOB] = NULL,
 };
 
-static const char *stac927x_models[STAC_927X_MODELS] = {
+static const char * const stac927x_models[STAC_927X_MODELS] = {
 	[STAC_927X_AUTO]	= "auto",
 	[STAC_D965_REF_NO_JD]	= "ref-no-jd",
 	[STAC_D965_REF]		= "ref",
@@ -2177,7 +2185,7 @@
 	[STAC_9205_EAPD] = NULL,
 };
 
-static const char *stac9205_models[STAC_9205_MODELS] = {
+static const char * const stac9205_models[STAC_9205_MODELS] = {
 	[STAC_9205_AUTO] = "auto",
 	[STAC_9205_REF] = "ref",
 	[STAC_9205_DELL_M42] = "dell-m42",
@@ -3120,7 +3128,7 @@
 				 int type)
 {
 	struct sigmatel_spec *spec = codec->spec;
-	static const char *chname[4] = {
+	static const char * const chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
 	hda_nid_t nid;
@@ -3253,7 +3261,7 @@
 }
 
 /* labels for mono mux outputs */
-static const char *stac92xx_mono_labels[4] = {
+static const char * const stac92xx_mono_labels[4] = {
 	"DAC0", "DAC1", "Mixer", "DAC2"
 };
 
@@ -3377,7 +3385,7 @@
 	return 0;
 };
 
-static const char *stac92xx_spdif_labels[3] = {
+static const char * const stac92xx_spdif_labels[3] = {
 	"Digital Playback", "Analog Mux 1", "Analog Mux 2",
 };
 
@@ -3385,7 +3393,7 @@
 {
 	struct sigmatel_spec *spec = codec->spec;
 	struct hda_input_mux *spdif_mux = &spec->private_smux;
-	const char **labels = spec->spdif_labels;
+	const char * const *labels = spec->spdif_labels;
 	int i, num_cons;
 	hda_nid_t con_lst[HDA_MAX_NUM_INPUTS];
 
@@ -3406,7 +3414,7 @@
 }
 
 /* labels for dmic mux inputs */
-static const char *stac92xx_dmic_labels[5] = {
+static const char * const stac92xx_dmic_labels[5] = {
 	"Analog Inputs", "Digital Mic 1", "Digital Mic 2",
 	"Digital Mic 3", "Digital Mic 4"
 };
@@ -3591,7 +3599,7 @@
 		if (check_mic_pin(codec, spec->dmic_nids[i],
 		    &fixed, &ext, &dock))
 			return 0;
-	if (!fixed && !ext && !dock)
+	if (!fixed || (!ext && !dock))
 		return 0; /* no input to switch */
 	if (!(get_wcaps(codec, ext) & AC_WCAP_UNSOL_CAP))
 		return 0; /* no unsol support */
@@ -5330,7 +5338,7 @@
 	return 0;
 }
 
-static int stac92hd83xxx_set_system_btl_amp(struct hda_codec *codec)
+static int hp_bnb2011_with_dock(struct hda_codec *codec)
 {
 	if (codec->vendor_id != 0x111d7605 &&
 	    codec->vendor_id != 0x111d76d1)
@@ -5345,10 +5353,6 @@
 	case 0x103c161d:
 	case 0x103c161e:
 	case 0x103c161f:
-	case 0x103c1620:
-	case 0x103c1621:
-	case 0x103c1622:
-	case 0x103c1623:
 
 	case 0x103c162a:
 	case 0x103c162b:
@@ -5357,41 +5361,9 @@
 	case 0x103c1631:
 
 	case 0x103c1633:
-
+	case 0x103c1634:
 	case 0x103c1635:
 
-	case 0x103c164f:
-
-	case 0x103c1676:
-	case 0x103c1677:
-	case 0x103c1678:
-	case 0x103c1679:
-	case 0x103c167a:
-	case 0x103c167b:
-	case 0x103c167c:
-	case 0x103c167d:
-	case 0x103c167e:
-	case 0x103c167f:
-	case 0x103c1680:
-	case 0x103c1681:
-	case 0x103c1682:
-	case 0x103c1683:
-	case 0x103c1684:
-	case 0x103c1685:
-	case 0x103c1686:
-	case 0x103c1687:
-	case 0x103c1688:
-	case 0x103c1689:
-	case 0x103c168a:
-	case 0x103c168b:
-	case 0x103c168c:
-	case 0x103c168d:
-	case 0x103c168e:
-	case 0x103c168f:
-	case 0x103c1690:
-	case 0x103c1691:
-	case 0x103c1692:
-
 	case 0x103c3587:
 	case 0x103c3588:
 	case 0x103c3589:
@@ -5399,9 +5371,9 @@
 
 	case 0x103c3667:
 	case 0x103c3668:
-		/* set BTL amp level to 13.43dB for louder speaker output */
-		return snd_hda_codec_write_cache(codec, codec->afg, 0,
-						 0x7F4, 0x14);
+	case 0x103c3669:
+
+		return 1;
 	}
 	return 0;
 }
@@ -5417,12 +5389,17 @@
 	if (spec == NULL)
 		return -ENOMEM;
 
+	if (hp_bnb2011_with_dock(codec)) {
+		snd_hda_codec_set_pincfg(codec, 0xa, 0x2101201f);
+		snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e);
+	}
+
 	/* reset pin power-down; Windows may leave these bits after reboot */
 	snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7EC, 0);
 	snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7ED, 0);
 	codec->no_trigger_sense = 1;
 	codec->spec = spec;
-	spec->linear_tone_beep = 1;
+	spec->linear_tone_beep = 0;
 	codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
 	spec->digbeep_nid = 0x21;
 	spec->dmic_nids = stac92hd83xxx_dmic_nids;
@@ -5458,19 +5435,27 @@
 	switch (codec->vendor_id) {
 	case 0x111d76d1:
 	case 0x111d76d9:
+	case 0x111d76e5:
 		spec->dmic_nids = stac92hd87b_dmic_nids;
 		spec->num_dmics = stac92xx_connected_ports(codec,
 				stac92hd87b_dmic_nids,
 				STAC92HD87B_NUM_DMICS);
-		/* Fall through */
+		spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids);
+		spec->pin_nids = stac92hd87xxx_pin_nids;
+		spec->mono_nid = 0;
+		spec->num_pwrs = 0;
+		break;
 	case 0x111d7666:
 	case 0x111d7667:
 	case 0x111d7668:
 	case 0x111d7669:
+	case 0x111d76e3:
+		spec->num_dmics = stac92xx_connected_ports(codec,
+				stac92hd88xxx_dmic_nids,
+				STAC92HD88XXX_NUM_DMICS);
 		spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
 		spec->pin_nids = stac92hd88xxx_pin_nids;
 		spec->mono_nid = 0;
-		spec->digbeep_nid = 0;
 		spec->num_pwrs = 0;
 		break;
 	case 0x111d7604:
@@ -5537,8 +5522,6 @@
 			AC_VERB_SET_CONNECT_SEL, num_dacs);
 	}
 
-	stac92hd83xxx_set_system_btl_amp(codec);
-
 	codec->proc_widget_hook = stac92hd_proc_hook;
 
 	return 0;
@@ -6261,7 +6244,7 @@
 	0x90a7013e
 };
 
-static const char *stac9872_models[STAC_9872_MODELS] = {
+static const char * const stac9872_models[STAC_9872_MODELS] = {
 	[STAC_9872_AUTO] = "auto",
 	[STAC_9872_VAIO] = "vaio",
 };
@@ -6411,6 +6394,8 @@
 	{ .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
 	{ .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
 	{ .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
+	{ .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
+	{ .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
 	{ .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
 	{} /* terminator */
 };
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index d1c3f8d..63b0054 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -263,8 +263,7 @@
 		return;
 	snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
 			    !spec->vt1708_jack_detectect);
-	cancel_delayed_work(&spec->vt1708_hp_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&spec->vt1708_hp_work);
 }
 
 
@@ -568,7 +567,7 @@
 		hda_nid_t nid = cfg->inputs[i].pin;
 		if (spec->smart51_enabled && is_smart51_pins(spec, nid))
 			ctl = PIN_OUT;
-		else if (i == AUTO_PIN_MIC)
+		else if (cfg->inputs[i].type == AUTO_PIN_MIC)
 			ctl = PIN_VREF50;
 		else
 			ctl = PIN_IN;
@@ -2282,7 +2281,9 @@
 					     const struct auto_pin_cfg *cfg)
 {
 	char name[32];
-	static const char *chname[4] = { "Front", "Surround", "C/LFE", "Side" };
+	static const char * const chname[4] = {
+		"Front", "Surround", "C/LFE", "Side"
+	};
 	hda_nid_t nid, nid_vol, nid_vols[] = {0x17, 0x19, 0x1a, 0x1b};
 	int i, err;
 
@@ -2371,7 +2372,7 @@
 {
 	int i;
 	struct hda_input_mux *imux = &spec->private_imux[1];
-	static const char *texts[] = { "OFF", "ON", NULL};
+	static const char * const texts[] = { "OFF", "ON", NULL};
 
 	/* for hp mode select */
 	for (i = 0; texts[i]; i++)
@@ -2891,7 +2892,9 @@
 					     const struct auto_pin_cfg *cfg)
 {
 	char name[32];
-	static const char *chname[4] = { "Front", "Surround", "C/LFE", "Side" };
+	static const char * const chname[4] = {
+		"Front", "Surround", "C/LFE", "Side"
+	};
 	hda_nid_t nid, nid_vol, nid_vols[] = {0x18, 0x1a, 0x1b, 0x29};
 	int i, err;
 
@@ -3434,7 +3437,9 @@
 					     const struct auto_pin_cfg *cfg)
 {
 	char name[32];
-	static const char *chname[4] = { "Front", "Surround", "C/LFE", "Side" };
+	static const char * const chname[4] = {
+		"Front", "Surround", "C/LFE", "Side"
+	};
 	hda_nid_t nid_vols[] = {0x16, 0x18, 0x26, 0x27};
 	hda_nid_t nid, nid_vol = 0;
 	int i, err;
@@ -3862,7 +3867,9 @@
 					     const struct auto_pin_cfg *cfg)
 {
 	char name[32];
-	static const char *chname[4] = { "Front", "Surround", "C/LFE", "Side" };
+	static const char * const chname[4] = {
+		"Front", "Surround", "C/LFE", "Side"
+	};
 	hda_nid_t nid_vols[] = {0x10, 0x11, 0x24, 0x25};
 	hda_nid_t nid_mutes[] = {0x1C, 0x18, 0x26, 0x27};
 	hda_nid_t nid, nid_vol, nid_mute;
@@ -4305,7 +4312,7 @@
 {
 	int err, i;
 	struct hda_input_mux *imux;
-	static const char *texts[] = { "ON", "OFF", NULL};
+	static const char * const texts[] = { "ON", "OFF", NULL};
 	if (!pin)
 		return 0;
 	spec->multiout.hp_nid = 0x1D;
@@ -4616,7 +4623,9 @@
 					     const struct auto_pin_cfg *cfg)
 {
 	char name[32];
-	static const char *chname[4] = { "Front", "Surround", "C/LFE", "Side" };
+	static const char * const chname[4] = {
+		"Front", "Surround", "C/LFE", "Side"
+	};
 	hda_nid_t nid_vols[] = {0x8, 0x9, 0xa, 0xb};
 	hda_nid_t nid_mutes[] = {0x24, 0x25, 0x26, 0x27};
 	hda_nid_t nid, nid_vol, nid_mute = 0;
@@ -5065,7 +5074,9 @@
 					      const struct auto_pin_cfg *cfg)
 {
 	char name[32];
-	static const char *chname[3] = { "Front", "Surround", "C/LFE" };
+	static const char * const chname[3] = {
+		"Front", "Surround", "C/LFE"
+	};
 	hda_nid_t nid_vols[] = {0x10, 0x11, 0x25};
 	hda_nid_t nid_mutes[] = {0x1C, 0x18, 0x27};
 	hda_nid_t nid, nid_vol, nid_mute;
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index 712c171..20c6b07 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -96,6 +96,11 @@
 		tmp |= ICE1712_DELTA_AP_CCLK | ICE1712_DELTA_AP_CS_CODEC;
 		tmp &= ~ICE1712_DELTA_AP_CS_DIGITAL;
 		break;
+	case ICE1712_SUBDEVICE_DELTA66E:
+		tmp |= ICE1712_DELTA_66E_CCLK | ICE1712_DELTA_66E_CS_CHIP_A |
+		       ICE1712_DELTA_66E_CS_CHIP_B;
+		tmp &= ~ICE1712_DELTA_66E_CS_CS8427;
+		break;
 	case ICE1712_SUBDEVICE_VX442:
 		tmp |= ICE1712_VX442_CCLK | ICE1712_VX442_CODEC_CHIP_A | ICE1712_VX442_CODEC_CHIP_B;
 		tmp &= ~ICE1712_VX442_CS_DIGITAL;
@@ -119,6 +124,9 @@
 	case ICE1712_SUBDEVICE_DELTA410:
 		tmp |= ICE1712_DELTA_AP_CS_DIGITAL;
 		break;
+	case ICE1712_SUBDEVICE_DELTA66E:
+		tmp |= ICE1712_DELTA_66E_CS_CS8427;
+		break;
 	case ICE1712_SUBDEVICE_VX442:
 		tmp |= ICE1712_VX442_CS_DIGITAL;
 		break;
@@ -276,6 +284,20 @@
 }
 
 /*
+ * AK4524 on Delta66 rev E to choose the chip address
+ */
+static void delta66e_ak4524_lock(struct snd_akm4xxx *ak, int chip)
+{
+	struct snd_ak4xxx_private *priv = (void *)ak->private_value[0];
+	struct snd_ice1712 *ice = ak->private_data[0];
+
+	snd_ice1712_save_gpio_status(ice);
+	priv->cs_mask =
+	priv->cs_addr = chip == 0 ? ICE1712_DELTA_66E_CS_CHIP_A :
+				    ICE1712_DELTA_66E_CS_CHIP_B;
+}
+
+/*
  * AK4528 on VX442 to choose the chip mask
  */
 static void vx442_ak4524_lock(struct snd_akm4xxx *ak, int chip)
@@ -487,6 +509,29 @@
 	.mask_flags = 0,
 };
 
+static struct snd_akm4xxx akm_delta66e __devinitdata = {
+	.type = SND_AK4524,
+	.num_adcs = 4,
+	.num_dacs = 4,
+	.ops = {
+		.lock = delta66e_ak4524_lock,
+		.set_rate_val = delta_ak4524_set_rate_val
+	}
+};
+
+static struct snd_ak4xxx_private akm_delta66e_priv __devinitdata = {
+	.caddr = 2,
+	.cif = 0, /* the default level of the CIF pin from AK4524 */
+	.data_mask = ICE1712_DELTA_66E_DOUT,
+	.clk_mask = ICE1712_DELTA_66E_CCLK,
+	.cs_mask = 0,
+	.cs_addr = 0, /* set later */
+	.cs_none = 0,
+	.add_flags = 0,
+	.mask_flags = 0,
+};
+
+
 static struct snd_akm4xxx akm_delta44 __devinitdata = {
 	.type = SND_AK4524,
 	.num_adcs = 4,
@@ -535,6 +580,7 @@
 {
 	int err;
 	struct snd_akm4xxx *ak;
+	unsigned char tmp;
 
 	if (ice->eeprom.subvendor == ICE1712_SUBDEVICE_DELTA1010 &&
 	    ice->eeprom.gpiodir == 0x7b)
@@ -577,6 +623,12 @@
 		break;
 	}
 
+	/* initialize the SPI clock to high */
+	tmp = snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA);
+	tmp |= ICE1712_DELTA_AP_CCLK;
+	snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp);
+	udelay(5);
+
 	/* initialize spdif */
 	switch (ice->eeprom.subvendor) {
 	case ICE1712_SUBDEVICE_AUDIOPHILE:
@@ -644,9 +696,11 @@
 		err = snd_ice1712_akm4xxx_init(ak, &akm_delta44, &akm_delta44_priv, ice);
 		break;
 	case ICE1712_SUBDEVICE_VX442:
-	case ICE1712_SUBDEVICE_DELTA66E:
 		err = snd_ice1712_akm4xxx_init(ak, &akm_vx442, &akm_vx442_priv, ice);
 		break;
+	case ICE1712_SUBDEVICE_DELTA66E:
+		err = snd_ice1712_akm4xxx_init(ak, &akm_delta66e, &akm_delta66e_priv, ice);
+		break;
 	default:
 		snd_BUG();
 		return -EINVAL;
diff --git a/sound/pci/ice1712/delta.h b/sound/pci/ice1712/delta.h
index 1a0ac6c..11a9c3a 100644
--- a/sound/pci/ice1712/delta.h
+++ b/sound/pci/ice1712/delta.h
@@ -144,6 +144,17 @@
 #define ICE1712_DELTA_1010LT_CS_NONE	0x50	/* nothing */
 #define ICE1712_DELTA_1010LT_WORDCLOCK 0x80	/* sample clock source: 0 = Word Clock Input, 1 = S/PDIF Input ??? */
 
+/* M-Audio Delta 66 rev. E definitions.
+ * Newer revisions of Delta 66 have CS8427 over SPI for
+ * S/PDIF transceiver instead of CS8404/CS8414. */
+/* 0x01 = DFS */
+#define ICE1712_DELTA_66E_CCLK		0x02	/* SPI clock */
+#define ICE1712_DELTA_66E_DIN		0x04	/* data input */
+#define ICE1712_DELTA_66E_DOUT		0x08	/* data output */
+#define ICE1712_DELTA_66E_CS_CS8427	0x10	/* chip select, low = CS8427 */
+#define ICE1712_DELTA_66E_CS_CHIP_A	0x20	/* AK4524 #0 */
+#define ICE1712_DELTA_66E_CS_CHIP_B	0x40	/* AK4524 #1 */
+
 /* Digigram VX442 definitions */
 #define ICE1712_VX442_CCLK		0x02	/* SPI clock */
 #define ICE1712_VX442_DIN		0x04	/* data input */
diff --git a/sound/pci/oxygen/Makefile b/sound/pci/oxygen/Makefile
index acd8f15..0f87265 100644
--- a/sound/pci/oxygen/Makefile
+++ b/sound/pci/oxygen/Makefile
@@ -1,10 +1,8 @@
 snd-oxygen-lib-objs := oxygen_io.o oxygen_lib.o oxygen_mixer.o oxygen_pcm.o
-snd-hifier-objs := hifier.o
-snd-oxygen-objs := oxygen.o
+snd-oxygen-objs := oxygen.o xonar_dg.o
 snd-virtuoso-objs := virtuoso.o xonar_lib.o \
 	xonar_pcm179x.o xonar_cs43xx.o xonar_wm87x6.o xonar_hdmi.o
 
 obj-$(CONFIG_SND_OXYGEN_LIB) += snd-oxygen-lib.o
-obj-$(CONFIG_SND_HIFIER) += snd-hifier.o
 obj-$(CONFIG_SND_OXYGEN) += snd-oxygen.o
 obj-$(CONFIG_SND_VIRTUOSO) += snd-virtuoso.o
diff --git a/sound/pci/oxygen/cs4245.h b/sound/pci/oxygen/cs4245.h
new file mode 100644
index 0000000..5e0197e
--- /dev/null
+++ b/sound/pci/oxygen/cs4245.h
@@ -0,0 +1,107 @@
+#define CS4245_CHIP_ID		0x01
+#define CS4245_POWER_CTRL	0x02
+#define CS4245_DAC_CTRL_1	0x03
+#define CS4245_ADC_CTRL		0x04
+#define CS4245_MCLK_FREQ	0x05
+#define CS4245_SIGNAL_SEL	0x06
+#define CS4245_PGA_B_CTRL	0x07
+#define CS4245_PGA_A_CTRL	0x08
+#define CS4245_ANALOG_IN	0x09
+#define CS4245_DAC_A_CTRL	0x0a
+#define CS4245_DAC_B_CTRL	0x0b
+#define CS4245_DAC_CTRL_2	0x0c
+#define CS4245_INT_STATUS	0x0d
+#define CS4245_INT_MASK		0x0e
+#define CS4245_INT_MODE_MSB	0x0f
+#define CS4245_INT_MODE_LSB	0x10
+
+/* Chip ID */
+#define CS4245_CHIP_PART_MASK	0xf0
+#define CS4245_CHIP_REV_MASK	0x0f
+
+/* Power Control */
+#define CS4245_FREEZE		0x80
+#define CS4245_PDN_MIC		0x08
+#define CS4245_PDN_ADC		0x04
+#define CS4245_PDN_DAC		0x02
+#define CS4245_PDN		0x01
+
+/* DAC Control */
+#define CS4245_DAC_FM_MASK	0xc0
+#define CS4245_DAC_FM_SINGLE	0x00
+#define CS4245_DAC_FM_DOUBLE	0x40
+#define CS4245_DAC_FM_QUAD	0x80
+#define CS4245_DAC_DIF_MASK	0x30
+#define CS4245_DAC_DIF_LJUST	0x00
+#define CS4245_DAC_DIF_I2S	0x10
+#define CS4245_DAC_DIF_RJUST_16	0x20
+#define CS4245_DAC_DIF_RJUST_24	0x30
+#define CS4245_RESERVED_1	0x08
+#define CS4245_MUTE_DAC		0x04
+#define CS4245_DEEMPH		0x02
+#define CS4245_DAC_MASTER	0x01
+
+/* ADC Control */
+#define CS4245_ADC_FM_MASK	0xc0
+#define CS4245_ADC_FM_SINGLE	0x00
+#define CS4245_ADC_FM_DOUBLE	0x40
+#define CS4245_ADC_FM_QUAD	0x80
+#define CS4245_ADC_DIF_MASK	0x10
+#define CS4245_ADC_DIF_LJUST	0x00
+#define CS4245_ADC_DIF_I2S	0x10
+#define CS4245_MUTE_ADC		0x04
+#define CS4245_HPF_FREEZE	0x02
+#define CS4245_ADC_MASTER	0x01
+
+/* MCLK Frequency */
+#define CS4245_MCLK1_MASK	0x70
+#define CS4245_MCLK1_SHIFT	4
+#define CS4245_MCLK2_MASK	0x07
+#define CS4245_MCLK2_SHIFT	0
+#define CS4245_MCLK_1		0
+#define CS4245_MCLK_1_5		1
+#define CS4245_MCLK_2		2
+#define CS4245_MCLK_3		3
+#define CS4245_MCLK_4		4
+
+/* Signal Selection */
+#define CS4245_A_OUT_SEL_MASK	0x60
+#define CS4245_A_OUT_SEL_HIZ	0x00
+#define CS4245_A_OUT_SEL_DAC	0x20
+#define CS4245_A_OUT_SEL_PGA	0x40
+#define CS4245_LOOP		0x02
+#define CS4245_ASYNCH		0x01
+
+/* Channel B/A PGA Control */
+#define CS4245_PGA_GAIN_MASK	0x3f
+
+/* ADC Input Control */
+#define CS4245_PGA_SOFT		0x10
+#define CS4245_PGA_ZERO		0x08
+#define CS4245_SEL_MASK		0x07
+#define CS4245_SEL_MIC		0x00
+#define CS4245_SEL_INPUT_1	0x01
+#define CS4245_SEL_INPUT_2	0x02
+#define CS4245_SEL_INPUT_3	0x03
+#define CS4245_SEL_INPUT_4	0x04
+#define CS4245_SEL_INPUT_5	0x05
+#define CS4245_SEL_INPUT_6	0x06
+
+/* DAC Channel A/B Volume Control */
+#define CS4245_VOL_MASK		0xff
+
+/* DAC Control 2 */
+#define CS4245_DAC_SOFT		0x80
+#define CS4245_DAC_ZERO		0x40
+#define CS4245_INVERT_DAC	0x20
+#define CS4245_INT_ACTIVE_HIGH	0x01
+
+/* Interrupt Status/Mask/Mode */
+#define CS4245_ADC_CLK_ERR	0x08
+#define CS4245_DAC_CLK_ERR	0x04
+#define CS4245_ADC_OVFL		0x02
+#define CS4245_ADC_UNDRFL	0x01
+
+
+#define CS4245_SPI_ADDRESS	(0x9e << 16)
+#define CS4245_SPI_WRITE	(0 << 16)
diff --git a/sound/pci/oxygen/hifier.c b/sound/pci/oxygen/hifier.c
deleted file mode 100644
index 5a87d68..0000000
--- a/sound/pci/oxygen/hifier.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * C-Media CMI8788 driver for the MediaTek/TempoTec HiFier Fantasia
- *
- * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
- *
- *
- *  This driver is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License, version 2.
- *
- *  This driver is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this driver; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-/*
- * CMI8788:
- *
- * SPI 0 -> AK4396
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <sound/control.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/tlv.h>
-#include "oxygen.h"
-#include "ak4396.h"
-
-MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
-MODULE_DESCRIPTION("TempoTec HiFier driver");
-MODULE_LICENSE("GPL v2");
-
-static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
-static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
-
-module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "card index");
-module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string");
-module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "enable card");
-
-static DEFINE_PCI_DEVICE_TABLE(hifier_ids) = {
-	{ OXYGEN_PCI_SUBID(0x14c3, 0x1710) },
-	{ OXYGEN_PCI_SUBID(0x14c3, 0x1711) },
-	{ OXYGEN_PCI_SUBID_BROKEN_EEPROM },
-	{ }
-};
-MODULE_DEVICE_TABLE(pci, hifier_ids);
-
-struct hifier_data {
-	u8 ak4396_regs[5];
-};
-
-static void ak4396_write(struct oxygen *chip, u8 reg, u8 value)
-{
-	struct hifier_data *data = chip->model_data;
-
-	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER  |
-			 OXYGEN_SPI_DATA_LENGTH_2 |
-			 OXYGEN_SPI_CLOCK_160 |
-			 (0 << OXYGEN_SPI_CODEC_SHIFT) |
-			 OXYGEN_SPI_CEN_LATCH_CLOCK_HI,
-			 AK4396_WRITE | (reg << 8) | value);
-	data->ak4396_regs[reg] = value;
-}
-
-static void ak4396_write_cached(struct oxygen *chip, u8 reg, u8 value)
-{
-	struct hifier_data *data = chip->model_data;
-
-	if (value != data->ak4396_regs[reg])
-		ak4396_write(chip, reg, value);
-}
-
-static void hifier_registers_init(struct oxygen *chip)
-{
-	struct hifier_data *data = chip->model_data;
-
-	ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB | AK4396_RSTN);
-	ak4396_write(chip, AK4396_CONTROL_2,
-		     data->ak4396_regs[AK4396_CONTROL_2]);
-	ak4396_write(chip, AK4396_CONTROL_3, AK4396_PCM);
-	ak4396_write(chip, AK4396_LCH_ATT, chip->dac_volume[0]);
-	ak4396_write(chip, AK4396_RCH_ATT, chip->dac_volume[1]);
-}
-
-static void hifier_init(struct oxygen *chip)
-{
-	struct hifier_data *data = chip->model_data;
-
-	data->ak4396_regs[AK4396_CONTROL_2] =
-		AK4396_SMUTE | AK4396_DEM_OFF | AK4396_DFS_NORMAL;
-	hifier_registers_init(chip);
-
-	snd_component_add(chip->card, "AK4396");
-	snd_component_add(chip->card, "CS5340");
-}
-
-static void hifier_cleanup(struct oxygen *chip)
-{
-}
-
-static void hifier_resume(struct oxygen *chip)
-{
-	hifier_registers_init(chip);
-}
-
-static void set_ak4396_params(struct oxygen *chip,
-			       struct snd_pcm_hw_params *params)
-{
-	struct hifier_data *data = chip->model_data;
-	u8 value;
-
-	value = data->ak4396_regs[AK4396_CONTROL_2] & ~AK4396_DFS_MASK;
-	if (params_rate(params) <= 54000)
-		value |= AK4396_DFS_NORMAL;
-	else if (params_rate(params) <= 108000)
-		value |= AK4396_DFS_DOUBLE;
-	else
-		value |= AK4396_DFS_QUAD;
-
-	msleep(1); /* wait for the new MCLK to become stable */
-
-	if (value != data->ak4396_regs[AK4396_CONTROL_2]) {
-		ak4396_write(chip, AK4396_CONTROL_1,
-			     AK4396_DIF_24_MSB);
-		ak4396_write(chip, AK4396_CONTROL_2, value);
-		ak4396_write(chip, AK4396_CONTROL_1,
-			     AK4396_DIF_24_MSB | AK4396_RSTN);
-	}
-}
-
-static void update_ak4396_volume(struct oxygen *chip)
-{
-	ak4396_write_cached(chip, AK4396_LCH_ATT, chip->dac_volume[0]);
-	ak4396_write_cached(chip, AK4396_RCH_ATT, chip->dac_volume[1]);
-}
-
-static void update_ak4396_mute(struct oxygen *chip)
-{
-	struct hifier_data *data = chip->model_data;
-	u8 value;
-
-	value = data->ak4396_regs[AK4396_CONTROL_2] & ~AK4396_SMUTE;
-	if (chip->dac_mute)
-		value |= AK4396_SMUTE;
-	ak4396_write_cached(chip, AK4396_CONTROL_2, value);
-}
-
-static void set_cs5340_params(struct oxygen *chip,
-			      struct snd_pcm_hw_params *params)
-{
-}
-
-static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
-
-static const struct oxygen_model model_hifier = {
-	.shortname = "C-Media CMI8787",
-	.longname = "C-Media Oxygen HD Audio",
-	.chip = "CMI8788",
-	.init = hifier_init,
-	.cleanup = hifier_cleanup,
-	.resume = hifier_resume,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
-	.set_dac_params = set_ak4396_params,
-	.set_adc_params = set_cs5340_params,
-	.update_dac_volume = update_ak4396_volume,
-	.update_dac_mute = update_ak4396_mute,
-	.dac_tlv = ak4396_db_scale,
-	.model_data_size = sizeof(struct hifier_data),
-	.device_config = PLAYBACK_0_TO_I2S |
-			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_1,
-	.dac_channels = 2,
-	.dac_volume_min = 0,
-	.dac_volume_max = 255,
-	.function_flags = OXYGEN_FUNCTION_SPI,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
-	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
-};
-
-static int __devinit get_hifier_model(struct oxygen *chip,
-				      const struct pci_device_id *id)
-{
-	chip->model = model_hifier;
-	return 0;
-}
-
-static int __devinit hifier_probe(struct pci_dev *pci,
-				  const struct pci_device_id *pci_id)
-{
-	static int dev;
-	int err;
-
-	if (dev >= SNDRV_CARDS)
-		return -ENODEV;
-	if (!enable[dev]) {
-		++dev;
-		return -ENOENT;
-	}
-	err = oxygen_pci_probe(pci, index[dev], id[dev], THIS_MODULE,
-			       hifier_ids, get_hifier_model);
-	if (err >= 0)
-		++dev;
-	return err;
-}
-
-static struct pci_driver hifier_driver = {
-	.name = "CMI8787HiFier",
-	.id_table = hifier_ids,
-	.probe = hifier_probe,
-	.remove = __devexit_p(oxygen_pci_remove),
-#ifdef CONFIG_PM
-	.suspend = oxygen_pci_suspend,
-	.resume = oxygen_pci_resume,
-#endif
-};
-
-static int __init alsa_card_hifier_init(void)
-{
-	return pci_register_driver(&hifier_driver);
-}
-
-static void __exit alsa_card_hifier_exit(void)
-{
-	pci_unregister_driver(&hifier_driver);
-}
-
-module_init(alsa_card_hifier_init)
-module_exit(alsa_card_hifier_exit)
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 98a8eb3..d7e8ddd 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -20,19 +20,32 @@
 /*
  * CMI8788:
  *
- * SPI 0 -> 1st AK4396 (front)
- * SPI 1 -> 2nd AK4396 (surround)
- * SPI 2 -> 3rd AK4396 (center/LFE)
- * SPI 3 -> WM8785
- * SPI 4 -> 4th AK4396 (back)
+ *   SPI 0 -> 1st AK4396 (front)
+ *   SPI 1 -> 2nd AK4396 (surround)
+ *   SPI 2 -> 3rd AK4396 (center/LFE)
+ *   SPI 3 -> WM8785
+ *   SPI 4 -> 4th AK4396 (back)
  *
- * GPIO 0 -> DFS0 of AK5385
- * GPIO 1 -> DFS1 of AK5385
- * GPIO 8 -> enable headphone amplifier on HT-Omega models
+ *   GPIO 0 -> DFS0 of AK5385
+ *   GPIO 1 -> DFS1 of AK5385
+ *
+ * X-Meridian models:
+ *   GPIO 4 -> enable extension S/PDIF input
+ *   GPIO 6 -> enable on-board S/PDIF input
+ *
+ * Claro models:
+ *   GPIO 6 -> S/PDIF from optical (0) or coaxial (1) input
+ *   GPIO 8 -> enable headphone amplifier
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to ADC input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN <- aux
+ *   CD_IN  <- CD
+ *   MIC_IN <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to ADC input
  */
 
 #include <linux/delay.h>
@@ -41,18 +54,22 @@
 #include <sound/ac97_codec.h>
 #include <sound/control.h>
 #include <sound/core.h>
+#include <sound/info.h>
 #include <sound/initval.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include "oxygen.h"
+#include "xonar_dg.h"
 #include "ak4396.h"
 #include "wm8785.h"
 
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_DESCRIPTION("C-Media CMI8788 driver");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8788}}");
+MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8786}"
+			",{C-Media,CMI8787}"
+			",{C-Media,CMI8788}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
@@ -66,24 +83,46 @@
 MODULE_PARM_DESC(enable, "enable card");
 
 enum {
-	MODEL_CMEDIA_REF,	/* C-Media's reference design */
-	MODEL_MERIDIAN,		/* AuzenTech X-Meridian */
-	MODEL_CLARO,		/* HT-Omega Claro */
-	MODEL_CLARO_HALO,	/* HT-Omega Claro halo */
+	MODEL_CMEDIA_REF,
+	MODEL_MERIDIAN,
+	MODEL_MERIDIAN_2G,
+	MODEL_CLARO,
+	MODEL_CLARO_HALO,
+	MODEL_FANTASIA,
+	MODEL_SERENADE,
+	MODEL_2CH_OUTPUT,
+	MODEL_HG2PCI,
+	MODEL_XONAR_DG,
 };
 
 static DEFINE_PCI_DEVICE_TABLE(oxygen_ids) = {
+	/* C-Media's reference design */
 	{ OXYGEN_PCI_SUBID(0x10b0, 0x0216), .driver_data = MODEL_CMEDIA_REF },
+	{ OXYGEN_PCI_SUBID(0x10b0, 0x0217), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x10b0, 0x0218), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x10b0, 0x0219), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x13f6, 0x0001), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x13f6, 0x0010), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x13f6, 0x8788), .driver_data = MODEL_CMEDIA_REF },
-	{ OXYGEN_PCI_SUBID(0x13f6, 0xffff), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x147a, 0xa017), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x1a58, 0x0910), .driver_data = MODEL_CMEDIA_REF },
+	/* Asus Xonar DG */
+	{ OXYGEN_PCI_SUBID(0x1043, 0x8467), .driver_data = MODEL_XONAR_DG },
+	/* PCI 2.0 HD Audio */
+	{ OXYGEN_PCI_SUBID(0x13f6, 0x8782), .driver_data = MODEL_2CH_OUTPUT },
+	/* Kuroutoshikou CMI8787-HG2PCI */
+	{ OXYGEN_PCI_SUBID(0x13f6, 0xffff), .driver_data = MODEL_HG2PCI },
+	/* TempoTec HiFier Fantasia */
+	{ OXYGEN_PCI_SUBID(0x14c3, 0x1710), .driver_data = MODEL_FANTASIA },
+	/* TempoTec HiFier Serenade */
+	{ OXYGEN_PCI_SUBID(0x14c3, 0x1711), .driver_data = MODEL_SERENADE },
+	/* AuzenTech X-Meridian */
 	{ OXYGEN_PCI_SUBID(0x415a, 0x5431), .driver_data = MODEL_MERIDIAN },
+	/* AuzenTech X-Meridian 2G */
+	{ OXYGEN_PCI_SUBID(0x5431, 0x017a), .driver_data = MODEL_MERIDIAN_2G },
+	/* HT-Omega Claro */
 	{ OXYGEN_PCI_SUBID(0x7284, 0x9761), .driver_data = MODEL_CLARO },
+	/* HT-Omega Claro halo */
 	{ OXYGEN_PCI_SUBID(0x7284, 0x9781), .driver_data = MODEL_CLARO_HALO },
 	{ }
 };
@@ -95,9 +134,15 @@
 #define GPIO_AK5385_DFS_DOUBLE	0x0001
 #define GPIO_AK5385_DFS_QUAD	0x0002
 
+#define GPIO_MERIDIAN_DIG_MASK	0x0050
+#define GPIO_MERIDIAN_DIG_EXT	0x0010
+#define GPIO_MERIDIAN_DIG_BOARD	0x0040
+
+#define GPIO_CLARO_DIG_COAX	0x0040
 #define GPIO_CLARO_HP		0x0100
 
 struct generic_data {
+	unsigned int dacs;
 	u8 ak4396_regs[4][5];
 	u16 wm8785_regs[3];
 };
@@ -148,7 +193,7 @@
 	struct generic_data *data = chip->model_data;
 	unsigned int i;
 
-	for (i = 0; i < 4; ++i) {
+	for (i = 0; i < data->dacs; ++i) {
 		ak4396_write(chip, i, AK4396_CONTROL_1,
 			     AK4396_DIF_24_MSB | AK4396_RSTN);
 		ak4396_write(chip, i, AK4396_CONTROL_2,
@@ -166,6 +211,7 @@
 {
 	struct generic_data *data = chip->model_data;
 
+	data->dacs = chip->model.dac_channels_pcm / 2;
 	data->ak4396_regs[0][AK4396_CONTROL_2] =
 		AK4396_SMUTE | AK4396_DEM_OFF | AK4396_DFS_NORMAL;
 	ak4396_registers_init(chip);
@@ -207,6 +253,10 @@
 
 static void meridian_init(struct oxygen *chip)
 {
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_MERIDIAN_DIG_MASK);
+	oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+			      GPIO_MERIDIAN_DIG_BOARD, GPIO_MERIDIAN_DIG_MASK);
 	ak4396_init(chip);
 	ak5385_init(chip);
 }
@@ -220,6 +270,8 @@
 
 static void claro_init(struct oxygen *chip)
 {
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_CLARO_DIG_COAX);
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_CLARO_DIG_COAX);
 	ak4396_init(chip);
 	wm8785_init(chip);
 	claro_enable_hp(chip);
@@ -227,11 +279,24 @@
 
 static void claro_halo_init(struct oxygen *chip)
 {
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_CLARO_DIG_COAX);
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_CLARO_DIG_COAX);
 	ak4396_init(chip);
 	ak5385_init(chip);
 	claro_enable_hp(chip);
 }
 
+static void fantasia_init(struct oxygen *chip)
+{
+	ak4396_init(chip);
+	snd_component_add(chip->card, "CS5340");
+}
+
+static void stereo_output_init(struct oxygen *chip)
+{
+	ak4396_init(chip);
+}
+
 static void generic_cleanup(struct oxygen *chip)
 {
 }
@@ -268,6 +333,11 @@
 	claro_enable_hp(chip);
 }
 
+static void stereo_resume(struct oxygen *chip)
+{
+	ak4396_registers_init(chip);
+}
+
 static void set_ak4396_params(struct oxygen *chip,
 			      struct snd_pcm_hw_params *params)
 {
@@ -286,7 +356,7 @@
 	msleep(1); /* wait for the new MCLK to become stable */
 
 	if (value != data->ak4396_regs[0][AK4396_CONTROL_2]) {
-		for (i = 0; i < 4; ++i) {
+		for (i = 0; i < data->dacs; ++i) {
 			ak4396_write(chip, i, AK4396_CONTROL_1,
 				     AK4396_DIF_24_MSB);
 			ak4396_write(chip, i, AK4396_CONTROL_2, value);
@@ -298,9 +368,10 @@
 
 static void update_ak4396_volume(struct oxygen *chip)
 {
+	struct generic_data *data = chip->model_data;
 	unsigned int i;
 
-	for (i = 0; i < 4; ++i) {
+	for (i = 0; i < data->dacs; ++i) {
 		ak4396_write_cached(chip, i, AK4396_LCH_ATT,
 				    chip->dac_volume[i * 2]);
 		ak4396_write_cached(chip, i, AK4396_RCH_ATT,
@@ -317,7 +388,7 @@
 	value = data->ak4396_regs[0][AK4396_CONTROL_2] & ~AK4396_SMUTE;
 	if (chip->dac_mute)
 		value |= AK4396_SMUTE;
-	for (i = 0; i < 4; ++i)
+	for (i = 0; i < data->dacs; ++i)
 		ak4396_write_cached(chip, i, AK4396_CONTROL_2, value);
 }
 
@@ -356,6 +427,10 @@
 			      value, GPIO_AK5385_DFS_MASK);
 }
 
+static void set_no_params(struct oxygen *chip, struct snd_pcm_hw_params *params)
+{
+}
+
 static int rolloff_info(struct snd_kcontrol *ctl,
 			struct snd_ctl_elem_info *info)
 {
@@ -363,13 +438,7 @@
 		"Sharp Roll-off", "Slow Roll-off"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int rolloff_get(struct snd_kcontrol *ctl,
@@ -400,7 +469,7 @@
 		reg &= ~AK4396_SLOW;
 	changed = reg != data->ak4396_regs[0][AK4396_CONTROL_2];
 	if (changed) {
-		for (i = 0; i < 4; ++i)
+		for (i = 0; i < data->dacs; ++i)
 			ak4396_write(chip, i, AK4396_CONTROL_2, reg);
 	}
 	mutex_unlock(&chip->mutex);
@@ -421,13 +490,7 @@
 		"None", "High-pass Filter"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
@@ -466,6 +529,100 @@
 	.put = hpf_put,
 };
 
+static int meridian_dig_source_info(struct snd_kcontrol *ctl,
+				    struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "On-board", "Extension" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int claro_dig_source_info(struct snd_kcontrol *ctl,
+				 struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "Optical", "Coaxial" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int meridian_dig_source_get(struct snd_kcontrol *ctl,
+				   struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+
+	value->value.enumerated.item[0] =
+		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) &
+		   GPIO_MERIDIAN_DIG_EXT);
+	return 0;
+}
+
+static int claro_dig_source_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+
+	value->value.enumerated.item[0] =
+		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) &
+		   GPIO_CLARO_DIG_COAX);
+	return 0;
+}
+
+static int meridian_dig_source_put(struct snd_kcontrol *ctl,
+				   struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	u16 old_reg, new_reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	old_reg = oxygen_read16(chip, OXYGEN_GPIO_DATA);
+	new_reg = old_reg & ~GPIO_MERIDIAN_DIG_MASK;
+	if (value->value.enumerated.item[0] == 0)
+		new_reg |= GPIO_MERIDIAN_DIG_BOARD;
+	else
+		new_reg |= GPIO_MERIDIAN_DIG_EXT;
+	changed = new_reg != old_reg;
+	if (changed)
+		oxygen_write16(chip, OXYGEN_GPIO_DATA, new_reg);
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int claro_dig_source_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	u16 old_reg, new_reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	old_reg = oxygen_read16(chip, OXYGEN_GPIO_DATA);
+	new_reg = old_reg & ~GPIO_CLARO_DIG_COAX;
+	if (value->value.enumerated.item[0])
+		new_reg |= GPIO_CLARO_DIG_COAX;
+	changed = new_reg != old_reg;
+	if (changed)
+		oxygen_write16(chip, OXYGEN_GPIO_DATA, new_reg);
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static const struct snd_kcontrol_new meridian_dig_source_control = {
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "IEC958 Source Capture Enum",
+	.info = meridian_dig_source_info,
+	.get = meridian_dig_source_get,
+	.put = meridian_dig_source_put,
+};
+
+static const struct snd_kcontrol_new claro_dig_source_control = {
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "IEC958 Source Capture Enum",
+	.info = claro_dig_source_info,
+	.get = claro_dig_source_get,
+	.put = claro_dig_source_put,
+};
+
 static int generic_mixer_init(struct oxygen *chip)
 {
 	return snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip));
@@ -484,6 +641,81 @@
 	return 0;
 }
 
+static int meridian_mixer_init(struct oxygen *chip)
+{
+	int err;
+
+	err = generic_mixer_init(chip);
+	if (err < 0)
+		return err;
+	err = snd_ctl_add(chip->card,
+			  snd_ctl_new1(&meridian_dig_source_control, chip));
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static int claro_mixer_init(struct oxygen *chip)
+{
+	int err;
+
+	err = generic_wm8785_mixer_init(chip);
+	if (err < 0)
+		return err;
+	err = snd_ctl_add(chip->card,
+			  snd_ctl_new1(&claro_dig_source_control, chip));
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static int claro_halo_mixer_init(struct oxygen *chip)
+{
+	int err;
+
+	err = generic_mixer_init(chip);
+	if (err < 0)
+		return err;
+	err = snd_ctl_add(chip->card,
+			  snd_ctl_new1(&claro_dig_source_control, chip));
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static void dump_ak4396_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct generic_data *data = chip->model_data;
+	unsigned int dac, i;
+
+	for (dac = 0; dac < data->dacs; ++dac) {
+		snd_iprintf(buffer, "\nAK4396 %u:", dac + 1);
+		for (i = 0; i < 5; ++i)
+			snd_iprintf(buffer, " %02x", data->ak4396_regs[dac][i]);
+	}
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_wm8785_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct generic_data *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nWM8785:");
+	for (i = 0; i < 3; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8785_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_oxygen_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	dump_ak4396_registers(chip, buffer);
+	dump_wm8785_registers(chip, buffer);
+}
+
 static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
 
 static const struct oxygen_model model_generic = {
@@ -494,11 +726,11 @@
 	.mixer_init = generic_wm8785_mixer_init,
 	.cleanup = generic_cleanup,
 	.resume = generic_resume,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
 	.set_dac_params = set_ak4396_params,
 	.set_adc_params = set_wm8785_params,
 	.update_dac_volume = update_ak4396_volume,
 	.update_dac_mute = update_ak4396_mute,
+	.dump_registers = dump_oxygen_registers,
 	.dac_tlv = ak4396_db_scale,
 	.model_data_size = sizeof(struct generic_data),
 	.device_config = PLAYBACK_0_TO_I2S |
@@ -508,11 +740,14 @@
 			 CAPTURE_1_FROM_SPDIF |
 			 CAPTURE_2_FROM_AC97_1 |
 			 AC97_CD_INPUT,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 0,
 	.dac_volume_max = 255,
 	.function_flags = OXYGEN_FUNCTION_SPI |
 			  OXYGEN_FUNCTION_ENABLE_SPI_4_5,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 256, 128),
 	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
@@ -520,42 +755,87 @@
 static int __devinit get_oxygen_model(struct oxygen *chip,
 				      const struct pci_device_id *id)
 {
+	static const char *const names[] = {
+		[MODEL_MERIDIAN]	= "AuzenTech X-Meridian",
+		[MODEL_MERIDIAN_2G]	= "AuzenTech X-Meridian 2G",
+		[MODEL_CLARO]		= "HT-Omega Claro",
+		[MODEL_CLARO_HALO]	= "HT-Omega Claro halo",
+		[MODEL_FANTASIA]	= "TempoTec HiFier Fantasia",
+		[MODEL_SERENADE]	= "TempoTec HiFier Serenade",
+		[MODEL_HG2PCI]		= "CMI8787-HG2PCI",
+	};
+
 	chip->model = model_generic;
 	switch (id->driver_data) {
 	case MODEL_MERIDIAN:
+	case MODEL_MERIDIAN_2G:
 		chip->model.init = meridian_init;
-		chip->model.mixer_init = generic_mixer_init;
+		chip->model.mixer_init = meridian_mixer_init;
 		chip->model.resume = meridian_resume;
 		chip->model.set_adc_params = set_ak5385_params;
+		chip->model.dump_registers = dump_ak4396_registers;
 		chip->model.device_config = PLAYBACK_0_TO_I2S |
 					    PLAYBACK_1_TO_SPDIF |
 					    CAPTURE_0_FROM_I2S_2 |
 					    CAPTURE_1_FROM_SPDIF;
+		if (id->driver_data == MODEL_MERIDIAN)
+			chip->model.device_config |= AC97_CD_INPUT;
 		break;
 	case MODEL_CLARO:
 		chip->model.init = claro_init;
+		chip->model.mixer_init = claro_mixer_init;
 		chip->model.cleanup = claro_cleanup;
 		chip->model.suspend = claro_suspend;
 		chip->model.resume = claro_resume;
 		break;
 	case MODEL_CLARO_HALO:
 		chip->model.init = claro_halo_init;
-		chip->model.mixer_init = generic_mixer_init;
+		chip->model.mixer_init = claro_halo_mixer_init;
 		chip->model.cleanup = claro_cleanup;
 		chip->model.suspend = claro_suspend;
 		chip->model.resume = claro_resume;
 		chip->model.set_adc_params = set_ak5385_params;
+		chip->model.dump_registers = dump_ak4396_registers;
 		chip->model.device_config = PLAYBACK_0_TO_I2S |
 					    PLAYBACK_1_TO_SPDIF |
 					    CAPTURE_0_FROM_I2S_2 |
 					    CAPTURE_1_FROM_SPDIF;
 		break;
+	case MODEL_FANTASIA:
+	case MODEL_SERENADE:
+	case MODEL_2CH_OUTPUT:
+	case MODEL_HG2PCI:
+		chip->model.shortname = "C-Media CMI8787";
+		chip->model.chip = "CMI8787";
+		if (id->driver_data == MODEL_FANTASIA)
+			chip->model.init = fantasia_init;
+		else
+			chip->model.init = stereo_output_init;
+		chip->model.resume = stereo_resume;
+		chip->model.mixer_init = generic_mixer_init;
+		chip->model.set_adc_params = set_no_params;
+		chip->model.dump_registers = dump_ak4396_registers;
+		chip->model.device_config = PLAYBACK_0_TO_I2S |
+					    PLAYBACK_1_TO_SPDIF;
+		if (id->driver_data == MODEL_FANTASIA) {
+			chip->model.device_config |= CAPTURE_0_FROM_I2S_1;
+			chip->model.adc_mclks = OXYGEN_MCLKS(256, 128, 128);
+		}
+		chip->model.dac_channels_pcm = 2;
+		chip->model.dac_channels_mixer = 2;
+		break;
+	case MODEL_XONAR_DG:
+		chip->model = model_xonar_dg;
+		break;
 	}
 	if (id->driver_data == MODEL_MERIDIAN ||
+	    id->driver_data == MODEL_MERIDIAN_2G ||
 	    id->driver_data == MODEL_CLARO_HALO) {
 		chip->model.misc_flags = OXYGEN_MISC_MIDI;
 		chip->model.device_config |= MIDI_OUTPUT | MIDI_INPUT;
 	}
+	if (id->driver_data < ARRAY_SIZE(names) && names[id->driver_data])
+		chip->model.shortname = names[id->driver_data];
 	return 0;
 }
 
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index 7d5222c..f53897a 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -16,6 +16,10 @@
 #define PCM_AC97	5
 #define PCM_COUNT	6
 
+#define OXYGEN_MCLKS(f_single, f_double, f_quad) ((MCLK_##f_single << 0) | \
+						  (MCLK_##f_double << 2) | \
+						  (MCLK_##f_quad   << 4))
+
 #define OXYGEN_IO_SIZE	0x100
 
 #define OXYGEN_EEPROM_ID	0x434d	/* "CM" */
@@ -35,6 +39,7 @@
 #define MIDI_OUTPUT		0x0800
 #define MIDI_INPUT		0x1000
 #define AC97_CD_INPUT		0x2000
+#define AC97_FMIC_SWITCH	0x4000
 
 enum {
 	CONTROL_SPDIF_PCM,
@@ -65,6 +70,7 @@
 struct snd_pcm_hw_params;
 struct snd_kcontrol_new;
 struct snd_rawmidi;
+struct snd_info_buffer;
 struct oxygen;
 
 struct oxygen_model {
@@ -79,8 +85,6 @@
 	void (*resume)(struct oxygen *chip);
 	void (*pcm_hardware_filter)(unsigned int channel,
 				    struct snd_pcm_hardware *hardware);
-	unsigned int (*get_i2s_mclk)(struct oxygen *chip, unsigned int channel,
-				     struct snd_pcm_hw_params *hw_params);
 	void (*set_dac_params)(struct oxygen *chip,
 			       struct snd_pcm_hw_params *params);
 	void (*set_adc_params)(struct oxygen *chip,
@@ -88,19 +92,25 @@
 	void (*update_dac_volume)(struct oxygen *chip);
 	void (*update_dac_mute)(struct oxygen *chip);
 	void (*update_center_lfe_mix)(struct oxygen *chip, bool mixed);
+	unsigned int (*adjust_dac_routing)(struct oxygen *chip,
+					   unsigned int play_routing);
 	void (*gpio_changed)(struct oxygen *chip);
 	void (*uart_input)(struct oxygen *chip);
 	void (*ac97_switch)(struct oxygen *chip,
 			    unsigned int reg, unsigned int mute);
+	void (*dump_registers)(struct oxygen *chip,
+			       struct snd_info_buffer *buffer);
 	const unsigned int *dac_tlv;
-	unsigned long private_data;
 	size_t model_data_size;
 	unsigned int device_config;
-	u8 dac_channels;
+	u8 dac_channels_pcm;
+	u8 dac_channels_mixer;
 	u8 dac_volume_min;
 	u8 dac_volume_max;
 	u8 misc_flags;
 	u8 function_flags;
+	u8 dac_mclks;
+	u8 adc_mclks;
 	u16 dac_i2s_format;
 	u16 adc_i2s_format;
 };
@@ -121,7 +131,6 @@
 	u8 pcm_running;
 	u8 dac_routing;
 	u8 spdif_playback_enable;
-	u8 revision;
 	u8 has_ac97_0;
 	u8 has_ac97_1;
 	u32 spdif_bits;
@@ -167,8 +176,6 @@
 /* oxygen_pcm.c */
 
 int oxygen_pcm_init(struct oxygen *chip);
-unsigned int oxygen_default_i2s_mclk(struct oxygen *chip, unsigned int channel,
-				     struct snd_pcm_hw_params *hw_params);
 
 /* oxygen_io.c */
 
diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c
index 09b2b2a..f5164b1 100644
--- a/sound/pci/oxygen/oxygen_io.c
+++ b/sound/pci/oxygen/oxygen_io.c
@@ -197,11 +197,11 @@
 {
 	unsigned int count;
 
-	/* should not need more than 7.68 us (24 * 320 ns) */
+	/* should not need more than 30.72 us (24 * 1.28 us) */
 	count = 10;
 	while ((oxygen_read8(chip, OXYGEN_SPI_CONTROL) & OXYGEN_SPI_BUSY)
 	       && count > 0) {
-		udelay(1);
+		udelay(4);
 		--count;
 	}
 
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index e5ebe56..70b7398 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -202,7 +202,13 @@
 	struct oxygen *chip = entry->private_data;
 	int i, j;
 
-	snd_iprintf(buffer, "CMI8788\n\n");
+	switch (oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_PACKAGE_ID_MASK) {
+	case OXYGEN_PACKAGE_ID_8786: i = '6'; break;
+	case OXYGEN_PACKAGE_ID_8787: i = '7'; break;
+	case OXYGEN_PACKAGE_ID_8788: i = '8'; break;
+	default:                     i = '?'; break;
+	}
+	snd_iprintf(buffer, "CMI878%c:\n", i);
 	for (i = 0; i < OXYGEN_IO_SIZE; i += 0x10) {
 		snd_iprintf(buffer, "%02x:", i);
 		for (j = 0; j < 0x10; ++j)
@@ -212,7 +218,7 @@
 	if (mutex_lock_interruptible(&chip->mutex) < 0)
 		return;
 	if (chip->has_ac97_0) {
-		snd_iprintf(buffer, "\nAC97\n");
+		snd_iprintf(buffer, "\nAC97:\n");
 		for (i = 0; i < 0x80; i += 0x10) {
 			snd_iprintf(buffer, "%02x:", i);
 			for (j = 0; j < 0x10; j += 2)
@@ -222,7 +228,7 @@
 		}
 	}
 	if (chip->has_ac97_1) {
-		snd_iprintf(buffer, "\nAC97 2\n");
+		snd_iprintf(buffer, "\nAC97 2:\n");
 		for (i = 0; i < 0x80; i += 0x10) {
 			snd_iprintf(buffer, "%02x:", i);
 			for (j = 0; j < 0x10; j += 2)
@@ -232,13 +238,15 @@
 		}
 	}
 	mutex_unlock(&chip->mutex);
+	if (chip->model.dump_registers)
+		chip->model.dump_registers(chip, buffer);
 }
 
 static void oxygen_proc_init(struct oxygen *chip)
 {
 	struct snd_info_entry *entry;
 
-	if (!snd_card_proc_new(chip->card, "cmi8788", &entry))
+	if (!snd_card_proc_new(chip->card, "oxygen", &entry))
 		snd_info_set_text_ops(entry, chip, oxygen_proc_read);
 }
 #else
@@ -262,7 +270,7 @@
 	 */
 	subdevice = oxygen_read_eeprom(chip, 2);
 	/* use default ID if EEPROM is missing */
-	if (subdevice == 0xffff)
+	if (subdevice == 0xffff && oxygen_read_eeprom(chip, 1) == 0xffff)
 		subdevice = 0x8788;
 	/*
 	 * We use only the subsystem device ID for searching because it is
@@ -364,12 +372,7 @@
 		(IEC958_AES1_CON_PCM_CODER << OXYGEN_SPDIF_CATEGORY_SHIFT);
 	chip->spdif_pcm_bits = chip->spdif_bits;
 
-	if (oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_REVISION_2)
-		chip->revision = 2;
-	else
-		chip->revision = 1;
-
-	if (chip->revision == 1)
+	if (!(oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_REVISION_2))
 		oxygen_set_bits8(chip, OXYGEN_MISC,
 				 OXYGEN_MISC_PCI_MEM_W_1_CLOCK);
 
@@ -406,28 +409,40 @@
 		      (OXYGEN_FORMAT_16 << OXYGEN_MULTICH_FORMAT_SHIFT));
 	oxygen_write8(chip, OXYGEN_REC_CHANNELS, OXYGEN_REC_CHANNELS_2_2_2);
 	oxygen_write16(chip, OXYGEN_I2S_MULTICH_FORMAT,
-		       OXYGEN_RATE_48000 | chip->model.dac_i2s_format |
-		       OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 |
-		       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+		       OXYGEN_RATE_48000 |
+		       chip->model.dac_i2s_format |
+		       OXYGEN_I2S_MCLK(chip->model.dac_mclks) |
+		       OXYGEN_I2S_BITS_16 |
+		       OXYGEN_I2S_MASTER |
+		       OXYGEN_I2S_BCLK_64);
 	if (chip->model.device_config & CAPTURE_0_FROM_I2S_1)
 		oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
-			       OXYGEN_RATE_48000 | chip->model.adc_i2s_format |
-			       OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 |
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+			       OXYGEN_RATE_48000 |
+			       chip->model.adc_i2s_format |
+			       OXYGEN_I2S_MCLK(chip->model.adc_mclks) |
+			       OXYGEN_I2S_BITS_16 |
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_BCLK_64);
 	else
 		oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK);
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_MUTE_MCLK);
 	if (chip->model.device_config & (CAPTURE_0_FROM_I2S_2 |
 					 CAPTURE_2_FROM_I2S_2))
 		oxygen_write16(chip, OXYGEN_I2S_B_FORMAT,
-			       OXYGEN_RATE_48000 | chip->model.adc_i2s_format |
-			       OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 |
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+			       OXYGEN_RATE_48000 |
+			       chip->model.adc_i2s_format |
+			       OXYGEN_I2S_MCLK(chip->model.adc_mclks) |
+			       OXYGEN_I2S_BITS_16 |
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_BCLK_64);
 	else
 		oxygen_write16(chip, OXYGEN_I2S_B_FORMAT,
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK);
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_MUTE_MCLK);
 	oxygen_write16(chip, OXYGEN_I2S_C_FORMAT,
-		       OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK);
+		       OXYGEN_I2S_MASTER |
+		       OXYGEN_I2S_MUTE_MCLK);
 	oxygen_clear_bits32(chip, OXYGEN_SPDIF_CONTROL,
 			    OXYGEN_SPDIF_OUT_ENABLE |
 			    OXYGEN_SPDIF_LOOPBACK);
@@ -557,7 +572,8 @@
 	oxygen_shutdown(chip);
 	if (chip->irq >= 0)
 		free_irq(chip->irq, chip);
-	flush_scheduled_work();
+	flush_work_sync(&chip->spdif_input_bits_work);
+	flush_work_sync(&chip->gpio_work);
 	chip->model.cleanup(chip);
 	kfree(chip->model_data);
 	mutex_destroy(&chip->mutex);
@@ -648,8 +664,8 @@
 
 	strcpy(card->driver, chip->model.chip);
 	strcpy(card->shortname, chip->model.shortname);
-	sprintf(card->longname, "%s (rev %u) at %#lx, irq %i",
-		chip->model.longname, chip->revision, chip->addr, chip->irq);
+	sprintf(card->longname, "%s at %#lx, irq %i",
+		chip->model.longname, chip->addr, chip->irq);
 	strcpy(card->mixername, chip->model.chip);
 	snd_component_add(card, chip->model.chip);
 
@@ -733,7 +749,8 @@
 	spin_unlock_irq(&chip->reg_lock);
 
 	synchronize_irq(chip->irq);
-	flush_scheduled_work();
+	flush_work_sync(&chip->spdif_input_bits_work);
+	flush_work_sync(&chip->gpio_work);
 	chip->interrupt_mask = saved_interrupt_mask;
 
 	pci_disable_device(pci);
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 2849b36..26c7e8b 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -31,7 +31,7 @@
 	struct oxygen *chip = ctl->private_data;
 
 	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-	info->count = chip->model.dac_channels;
+	info->count = chip->model.dac_channels_mixer;
 	info->value.integer.min = chip->model.dac_volume_min;
 	info->value.integer.max = chip->model.dac_volume_max;
 	return 0;
@@ -44,7 +44,7 @@
 	unsigned int i;
 
 	mutex_lock(&chip->mutex);
-	for (i = 0; i < chip->model.dac_channels; ++i)
+	for (i = 0; i < chip->model.dac_channels_mixer; ++i)
 		value->value.integer.value[i] = chip->dac_volume[i];
 	mutex_unlock(&chip->mutex);
 	return 0;
@@ -59,7 +59,7 @@
 
 	changed = 0;
 	mutex_lock(&chip->mutex);
-	for (i = 0; i < chip->model.dac_channels; ++i)
+	for (i = 0; i < chip->model.dac_channels_mixer; ++i)
 		if (value->value.integer.value[i] != chip->dac_volume[i]) {
 			chip->dac_volume[i] = value->value.integer.value[i];
 			changed = 1;
@@ -97,6 +97,16 @@
 	return changed;
 }
 
+static unsigned int upmix_item_count(struct oxygen *chip)
+{
+	if (chip->model.dac_channels_pcm < 8)
+		return 2;
+	else if (chip->model.update_center_lfe_mix)
+		return 5;
+	else
+		return 3;
+}
+
 static int upmix_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
 {
 	static const char *const names[5] = {
@@ -107,15 +117,9 @@
 		"Front+Surround+Center/LFE+Back",
 	};
 	struct oxygen *chip = ctl->private_data;
-	unsigned int count = chip->model.update_center_lfe_mix ? 5 : 3;
+	unsigned int count = upmix_item_count(chip);
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = count;
-	if (info->value.enumerated.item >= count)
-		info->value.enumerated.item = count - 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, count, names);
 }
 
 static int upmix_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
@@ -176,6 +180,8 @@
 			    (1 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) |
 			    (2 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) |
 			    (3 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT);
+	if (chip->model.adjust_dac_routing)
+		reg_value = chip->model.adjust_dac_routing(chip, reg_value);
 	oxygen_write16_masked(chip, OXYGEN_PLAY_ROUTING, reg_value,
 			      OXYGEN_PLAY_DAC0_SOURCE_MASK |
 			      OXYGEN_PLAY_DAC1_SOURCE_MASK |
@@ -188,7 +194,7 @@
 static int upmix_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
 {
 	struct oxygen *chip = ctl->private_data;
-	unsigned int count = chip->model.update_center_lfe_mix ? 5 : 3;
+	unsigned int count = upmix_item_count(chip);
 	int changed;
 
 	if (value->value.enumerated.item[0] >= count)
@@ -430,30 +436,31 @@
 	return 0;
 }
 
-static int spdif_loopback_get(struct snd_kcontrol *ctl,
-			      struct snd_ctl_elem_value *value)
+static int spdif_bit_switch_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
 {
 	struct oxygen *chip = ctl->private_data;
+	u32 bit = ctl->private_value;
 
 	value->value.integer.value[0] =
-		!!(oxygen_read32(chip, OXYGEN_SPDIF_CONTROL)
-		   & OXYGEN_SPDIF_LOOPBACK);
+		!!(oxygen_read32(chip, OXYGEN_SPDIF_CONTROL) & bit);
 	return 0;
 }
 
-static int spdif_loopback_put(struct snd_kcontrol *ctl,
-			      struct snd_ctl_elem_value *value)
+static int spdif_bit_switch_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
 {
 	struct oxygen *chip = ctl->private_data;
+	u32 bit = ctl->private_value;
 	u32 oldreg, newreg;
 	int changed;
 
 	spin_lock_irq(&chip->reg_lock);
 	oldreg = oxygen_read32(chip, OXYGEN_SPDIF_CONTROL);
 	if (value->value.integer.value[0])
-		newreg = oldreg | OXYGEN_SPDIF_LOOPBACK;
+		newreg = oldreg | bit;
 	else
-		newreg = oldreg & ~OXYGEN_SPDIF_LOOPBACK;
+		newreg = oldreg & ~bit;
 	changed = newreg != oldreg;
 	if (changed)
 		oxygen_write32(chip, OXYGEN_SPDIF_CONTROL, newreg);
@@ -644,6 +651,46 @@
 	return change;
 }
 
+static int mic_fmic_source_info(struct snd_kcontrol *ctl,
+			   struct snd_ctl_elem_info *info)
+{
+	static const char *const names[] = { "Mic Jack", "Front Panel" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int mic_fmic_source_get(struct snd_kcontrol *ctl,
+			       struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] =
+		!!(oxygen_read_ac97(chip, 0, CM9780_JACK) & CM9780_FMIC2MIC);
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int mic_fmic_source_put(struct snd_kcontrol *ctl,
+			       struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	u16 oldreg, newreg;
+	int change;
+
+	mutex_lock(&chip->mutex);
+	oldreg = oxygen_read_ac97(chip, 0, CM9780_JACK);
+	if (value->value.enumerated.item[0])
+		newreg = oldreg | CM9780_FMIC2MIC;
+	else
+		newreg = oldreg & ~CM9780_FMIC2MIC;
+	change = newreg != oldreg;
+	if (change)
+		oxygen_write_ac97(chip, 0, CM9780_JACK, newreg);
+	mutex_unlock(&chip->mutex);
+	return change;
+}
+
 static int ac97_fp_rec_volume_info(struct snd_kcontrol *ctl,
 				   struct snd_ctl_elem_info *info)
 {
@@ -791,8 +838,17 @@
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 		.name = SNDRV_CTL_NAME_IEC958("Loopback ", NONE, SWITCH),
 		.info = snd_ctl_boolean_mono_info,
-		.get = spdif_loopback_get,
-		.put = spdif_loopback_put,
+		.get = spdif_bit_switch_get,
+		.put = spdif_bit_switch_put,
+		.private_value = OXYGEN_SPDIF_LOOPBACK,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = SNDRV_CTL_NAME_IEC958("Validity Check ",CAPTURE,SWITCH),
+		.info = snd_ctl_boolean_mono_info,
+		.get = spdif_bit_switch_get,
+		.put = spdif_bit_switch_put,
+		.private_value = OXYGEN_SPDIF_SPDVALID,
 	},
 };
 
@@ -908,6 +964,13 @@
 	AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC, 0),
 	AC97_SWITCH("Mic Capture Switch", 0, AC97_MIC, 15, 1),
 	AC97_SWITCH("Mic Boost (+20dB)", 0, AC97_MIC, 6, 0),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Mic Source Capture Enum",
+		.info = mic_fmic_source_info,
+		.get = mic_fmic_source_get,
+		.put = mic_fmic_source_put,
+	},
 	AC97_SWITCH("Line Capture Switch", 0, AC97_LINE, 15, 1),
 	AC97_VOLUME("CD Capture Volume", 0, AC97_CD, 1),
 	AC97_SWITCH("CD Capture Switch", 0, AC97_CD, 15, 1),
@@ -970,7 +1033,10 @@
 				continue;
 		}
 		if (!strcmp(template.name, "Stereo Upmixing") &&
-		    chip->model.dac_channels == 2)
+		    chip->model.dac_channels_pcm == 2)
+			continue;
+		if (!strcmp(template.name, "Mic Source Capture Enum") &&
+		    !(chip->model.device_config & AC97_FMIC_SWITCH))
 			continue;
 		if (!strncmp(template.name, "CD Capture ", 11) &&
 		    !(chip->model.device_config & AC97_CD_INPUT))
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index 8146674..d5533e3 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -39,7 +39,8 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_PAUSE |
-		SNDRV_PCM_INFO_SYNC_START,
+		SNDRV_PCM_INFO_SYNC_START |
+		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
 	.formats = SNDRV_PCM_FMTBIT_S16_LE |
 		   SNDRV_PCM_FMTBIT_S32_LE,
 	.rates = SNDRV_PCM_RATE_32000 |
@@ -65,7 +66,8 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_PAUSE |
-		SNDRV_PCM_INFO_SYNC_START,
+		SNDRV_PCM_INFO_SYNC_START |
+		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
 	.formats = SNDRV_PCM_FMTBIT_S16_LE |
 		   SNDRV_PCM_FMTBIT_S32_LE,
 	.rates = SNDRV_PCM_RATE_32000 |
@@ -91,7 +93,8 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_PAUSE |
-		SNDRV_PCM_INFO_SYNC_START,
+		SNDRV_PCM_INFO_SYNC_START |
+		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
 	.formats = SNDRV_PCM_FMTBIT_S16_LE,
 	.rates = SNDRV_PCM_RATE_48000,
 	.rate_min = 48000,
@@ -140,7 +143,7 @@
 		runtime->hw.rate_min = 44100;
 		break;
 	case PCM_MULTICH:
-		runtime->hw.channels_max = chip->model.dac_channels;
+		runtime->hw.channels_max = chip->model.dac_channels_pcm;
 		break;
 	}
 	if (chip->model.pcm_hardware_filter)
@@ -271,17 +274,6 @@
 	}
 }
 
-unsigned int oxygen_default_i2s_mclk(struct oxygen *chip,
-				     unsigned int channel,
-				     struct snd_pcm_hw_params *hw_params)
-{
-	if (params_rate(hw_params) <= 96000)
-		return OXYGEN_I2S_MCLK_256;
-	else
-		return OXYGEN_I2S_MCLK_128;
-}
-EXPORT_SYMBOL(oxygen_default_i2s_mclk);
-
 static unsigned int oxygen_i2s_bits(struct snd_pcm_hw_params *hw_params)
 {
 	if (params_format(hw_params) == SNDRV_PCM_FORMAT_S32_LE)
@@ -341,6 +333,26 @@
 	return 0;
 }
 
+static u16 get_mclk(struct oxygen *chip, unsigned int channel,
+		    struct snd_pcm_hw_params *params)
+{
+	unsigned int mclks, shift;
+
+	if (channel == PCM_MULTICH)
+		mclks = chip->model.dac_mclks;
+	else
+		mclks = chip->model.adc_mclks;
+
+	if (params_rate(params) <= 48000)
+		shift = 0;
+	else if (params_rate(params) <= 96000)
+		shift = 2;
+	else
+		shift = 4;
+
+	return OXYGEN_I2S_MCLK(mclks >> shift);
+}
+
 static int oxygen_rec_a_hw_params(struct snd_pcm_substream *substream,
 				  struct snd_pcm_hw_params *hw_params)
 {
@@ -357,8 +369,8 @@
 			     OXYGEN_REC_FORMAT_A_MASK);
 	oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT,
 			      oxygen_rate(hw_params) |
-			      chip->model.get_i2s_mclk(chip, PCM_A, hw_params) |
 			      chip->model.adc_i2s_format |
+			      get_mclk(chip, PCM_A, hw_params) |
 			      oxygen_i2s_bits(hw_params),
 			      OXYGEN_I2S_RATE_MASK |
 			      OXYGEN_I2S_FORMAT_MASK |
@@ -393,9 +405,8 @@
 	if (!is_ac97)
 		oxygen_write16_masked(chip, OXYGEN_I2S_B_FORMAT,
 				      oxygen_rate(hw_params) |
-				      chip->model.get_i2s_mclk(chip, PCM_B,
-							       hw_params) |
 				      chip->model.adc_i2s_format |
+				      get_mclk(chip, PCM_B, hw_params) |
 				      oxygen_i2s_bits(hw_params),
 				      OXYGEN_I2S_RATE_MASK |
 				      OXYGEN_I2S_FORMAT_MASK |
@@ -476,8 +487,7 @@
 	oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
 			      oxygen_rate(hw_params) |
 			      chip->model.dac_i2s_format |
-			      chip->model.get_i2s_mclk(chip, PCM_MULTICH,
-						       hw_params) |
+			      get_mclk(chip, PCM_MULTICH, hw_params) |
 			      oxygen_i2s_bits(hw_params),
 			      OXYGEN_I2S_RATE_MASK |
 			      OXYGEN_I2S_FORMAT_MASK |
@@ -530,7 +540,10 @@
 	oxygen_set_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
 	oxygen_clear_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
 
-	chip->interrupt_mask |= channel_mask;
+	if (substream->runtime->no_period_wakeup)
+		chip->interrupt_mask &= ~channel_mask;
+	else
+		chip->interrupt_mask |= channel_mask;
 	oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask);
 	spin_unlock_irq(&chip->reg_lock);
 	return 0;
diff --git a/sound/pci/oxygen/oxygen_regs.h b/sound/pci/oxygen/oxygen_regs.h
index 4dcd41b..63dc7a0 100644
--- a/sound/pci/oxygen/oxygen_regs.h
+++ b/sound/pci/oxygen/oxygen_regs.h
@@ -139,9 +139,11 @@
 #define  OXYGEN_I2S_FORMAT_I2S		0x0000
 #define  OXYGEN_I2S_FORMAT_LJUST	0x0008
 #define  OXYGEN_I2S_MCLK_MASK		0x0030	/* MCLK/LRCK */
-#define  OXYGEN_I2S_MCLK_128		0x0000
-#define  OXYGEN_I2S_MCLK_256		0x0010
-#define  OXYGEN_I2S_MCLK_512		0x0020
+#define  OXYGEN_I2S_MCLK_SHIFT		4
+#define  MCLK_128			0
+#define  MCLK_256			1
+#define  MCLK_512			2
+#define  OXYGEN_I2S_MCLK(f)		(((f) & 3) << OXYGEN_I2S_MCLK_SHIFT)
 #define  OXYGEN_I2S_BITS_MASK		0x00c0
 #define  OXYGEN_I2S_BITS_16		0x0000
 #define  OXYGEN_I2S_BITS_20		0x0040
@@ -238,11 +240,11 @@
 #define  OXYGEN_SPI_DATA_LENGTH_MASK	0x02
 #define  OXYGEN_SPI_DATA_LENGTH_2	0x00
 #define  OXYGEN_SPI_DATA_LENGTH_3	0x02
-#define  OXYGEN_SPI_CLOCK_MASK		0xc0
+#define  OXYGEN_SPI_CLOCK_MASK		0x0c
 #define  OXYGEN_SPI_CLOCK_160		0x00	/* ns */
-#define  OXYGEN_SPI_CLOCK_320		0x40
-#define  OXYGEN_SPI_CLOCK_640		0x80
-#define  OXYGEN_SPI_CLOCK_1280		0xc0
+#define  OXYGEN_SPI_CLOCK_320		0x04
+#define  OXYGEN_SPI_CLOCK_640		0x08
+#define  OXYGEN_SPI_CLOCK_1280		0x0c
 #define  OXYGEN_SPI_CODEC_MASK		0x70	/* 0..5 */
 #define  OXYGEN_SPI_CODEC_SHIFT		4
 #define  OXYGEN_SPI_CEN_MASK		0x80
diff --git a/sound/pci/oxygen/xonar.h b/sound/pci/oxygen/xonar.h
index b35343b..0434c20 100644
--- a/sound/pci/oxygen/xonar.h
+++ b/sound/pci/oxygen/xonar.h
@@ -24,6 +24,8 @@
 void xonar_init_cs53x1(struct oxygen *chip);
 void xonar_set_cs53x1_params(struct oxygen *chip,
 			     struct snd_pcm_hw_params *params);
+
+#define XONAR_GPIO_BIT_INVERT	(1 << 16)
 int xonar_gpio_bit_switch_get(struct snd_kcontrol *ctl,
 			      struct snd_ctl_elem_value *value);
 int xonar_gpio_bit_switch_put(struct snd_kcontrol *ctl,
diff --git a/sound/pci/oxygen/xonar_cs43xx.c b/sound/pci/oxygen/xonar_cs43xx.c
index aa27c310..2527191 100644
--- a/sound/pci/oxygen/xonar_cs43xx.c
+++ b/sound/pci/oxygen/xonar_cs43xx.c
@@ -22,29 +22,28 @@
  *
  * CMI8788:
  *
- * I²C <-> CS4398 (front)
- *     <-> CS4362A (surround, center/LFE, back)
+ *   I²C <-> CS4398 (addr 1001111) (front)
+ *       <-> CS4362A (addr 0011000) (surround, center/LFE, back)
  *
- * GPI 0 <- external power present (DX only)
+ *   GPI 0 <- external power present (DX only)
  *
- * GPIO 0 -> enable output to speakers
- * GPIO 1 -> enable front panel I/O
- * GPIO 2 -> M0 of CS5361
- * GPIO 3 -> M1 of CS5361
- * GPIO 8 -> route input jack to line-in (0) or mic-in (1)
- *
- * CS4398:
- *
- * AD0 <- 1
- * AD1 <- 1
- *
- * CS4362A:
- *
- * AD0 <- 0
+ *   GPIO 0 -> enable output to speakers
+ *   GPIO 1 -> route output to front panel
+ *   GPIO 2 -> M0 of CS5361
+ *   GPIO 3 -> M1 of CS5361
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> ?
+ *   GPIO 8 -> route input jack to line-in (0) or mic-in (1)
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5361 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN  <- aux
+ *   MIC_IN  <- mic
+ *   FMIC_IN <- front mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5361 input
  */
 
 #include <linux/pci.h>
@@ -63,6 +62,7 @@
 #define GPI_EXT_POWER		0x01
 #define GPIO_D1_OUTPUT_ENABLE	0x0001
 #define GPIO_D1_FRONT_PANEL	0x0002
+#define GPIO_D1_MAGIC		0x00c0
 #define GPIO_D1_INPUT_ROUTE	0x0100
 
 #define I2C_DEVICE_CS4398	0x9e	/* 10011, AD1=1, AD0=1, /W=0 */
@@ -169,12 +169,12 @@
 	cs43xx_registers_init(chip);
 
 	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
-			  GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE);
+			  GPIO_D1_FRONT_PANEL |
+			  GPIO_D1_MAGIC |
+			  GPIO_D1_INPUT_ROUTE);
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
 			    GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE);
 
-	oxygen_ac97_set_bits(chip, 0, CM9780_JACK, CM9780_FMIC2MIC);
-
 	xonar_init_cs53x1(chip);
 	xonar_enable_output(chip);
 
@@ -284,7 +284,7 @@
 
 static const struct snd_kcontrol_new front_panel_switch = {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "Front Panel Switch",
+	.name = "Front Panel Playback Switch",
 	.info = snd_ctl_boolean_mono_info,
 	.get = xonar_gpio_bit_switch_get,
 	.put = xonar_gpio_bit_switch_put,
@@ -298,13 +298,7 @@
 		"Fast Roll-off", "Slow Roll-off"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int rolloff_get(struct snd_kcontrol *ctl,
@@ -380,6 +374,30 @@
 	return 0;
 }
 
+static void dump_cs4362a_registers(struct xonar_cs43xx *data,
+				   struct snd_info_buffer *buffer)
+{
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nCS4362A:");
+	for (i = 1; i <= 14; ++i)
+		snd_iprintf(buffer, " %02x", data->cs4362a_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_d1_registers(struct oxygen *chip,
+			      struct snd_info_buffer *buffer)
+{
+	struct xonar_cs43xx *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nCS4398: 7?");
+	for (i = 2; i < 8; ++i)
+		snd_iprintf(buffer, " %02x", data->cs4398_regs[i]);
+	snd_iprintf(buffer, "\n");
+	dump_cs4362a_registers(data, buffer);
+}
+
 static const struct oxygen_model model_xonar_d1 = {
 	.longname = "Asus Virtuoso 100",
 	.chip = "AV200",
@@ -388,22 +406,26 @@
 	.cleanup = xonar_d1_cleanup,
 	.suspend = xonar_d1_suspend,
 	.resume = xonar_d1_resume,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
 	.set_dac_params = set_cs43xx_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_cs43xx_volume,
 	.update_dac_mute = update_cs43xx_mute,
 	.update_center_lfe_mix = update_cs43xx_center_lfe_mix,
 	.ac97_switch = xonar_d1_line_mic_ac97_switch,
+	.dump_registers = dump_d1_registers,
 	.dac_tlv = cs4362a_db_scale,
 	.model_data_size = sizeof(struct xonar_cs43xx),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_2,
-	.dac_channels = 8,
+			 CAPTURE_0_FROM_I2S_2 |
+			 AC97_FMIC_SWITCH,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 127 - 60,
 	.dac_volume_max = 127,
 	.function_flags = OXYGEN_FUNCTION_2WIRE,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
 	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
new file mode 100644
index 0000000..bc6eb58
--- /dev/null
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -0,0 +1,608 @@
+/*
+ * card driver for the Xonar DG
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ *
+ *
+ *  This driver is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2.
+ *
+ *  This driver is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this driver; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Xonar DG
+ * --------
+ *
+ * CMI8788:
+ *
+ *   SPI 0 -> CS4245
+ *
+ *   I²S 1 -> CS4245
+ *   I²S 2 -> CS4361 (center/LFE)
+ *   I²S 3 -> CS4361 (surround)
+ *   I²S 4 -> CS4361 (front)
+ *
+ *   GPIO 3 <- ?
+ *   GPIO 4 <- headphone detect
+ *   GPIO 5 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 6 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 7 -> enable rear headphone amp
+ *   GPIO 8 -> enable output to speakers
+ *
+ * CS4245:
+ *
+ *   input 1 <- aux
+ *   input 2 <- front mic
+ *   input 4 <- line/mic
+ *   DAC out -> headphones
+ *   aux out -> front panel headphones
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/info.h>
+#include <sound/pcm.h>
+#include <sound/tlv.h>
+#include "oxygen.h"
+#include "xonar_dg.h"
+#include "cs4245.h"
+
+#define GPIO_MAGIC		0x0008
+#define GPIO_HP_DETECT		0x0010
+#define GPIO_INPUT_ROUTE	0x0060
+#define GPIO_HP_REAR		0x0080
+#define GPIO_OUTPUT_ENABLE	0x0100
+
+struct dg {
+	unsigned int output_sel;
+	s8 input_vol[4][2];
+	unsigned int input_sel;
+	u8 hp_vol_att;
+	u8 cs4245_regs[0x11];
+};
+
+static void cs4245_write(struct oxygen *chip, unsigned int reg, u8 value)
+{
+	struct dg *data = chip->model_data;
+
+	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
+			 OXYGEN_SPI_DATA_LENGTH_3 |
+			 OXYGEN_SPI_CLOCK_1280 |
+			 (0 << OXYGEN_SPI_CODEC_SHIFT) |
+			 OXYGEN_SPI_CEN_LATCH_CLOCK_HI,
+			 CS4245_SPI_ADDRESS |
+			 CS4245_SPI_WRITE |
+			 (reg << 8) | value);
+	data->cs4245_regs[reg] = value;
+}
+
+static void cs4245_write_cached(struct oxygen *chip, unsigned int reg, u8 value)
+{
+	struct dg *data = chip->model_data;
+
+	if (value != data->cs4245_regs[reg])
+		cs4245_write(chip, reg, value);
+}
+
+static void cs4245_registers_init(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	cs4245_write(chip, CS4245_POWER_CTRL, CS4245_PDN);
+	cs4245_write(chip, CS4245_DAC_CTRL_1,
+		     data->cs4245_regs[CS4245_DAC_CTRL_1]);
+	cs4245_write(chip, CS4245_ADC_CTRL,
+		     data->cs4245_regs[CS4245_ADC_CTRL]);
+	cs4245_write(chip, CS4245_SIGNAL_SEL,
+		     data->cs4245_regs[CS4245_SIGNAL_SEL]);
+	cs4245_write(chip, CS4245_PGA_B_CTRL,
+		     data->cs4245_regs[CS4245_PGA_B_CTRL]);
+	cs4245_write(chip, CS4245_PGA_A_CTRL,
+		     data->cs4245_regs[CS4245_PGA_A_CTRL]);
+	cs4245_write(chip, CS4245_ANALOG_IN,
+		     data->cs4245_regs[CS4245_ANALOG_IN]);
+	cs4245_write(chip, CS4245_DAC_A_CTRL,
+		     data->cs4245_regs[CS4245_DAC_A_CTRL]);
+	cs4245_write(chip, CS4245_DAC_B_CTRL,
+		     data->cs4245_regs[CS4245_DAC_B_CTRL]);
+	cs4245_write(chip, CS4245_DAC_CTRL_2,
+		     CS4245_DAC_SOFT | CS4245_DAC_ZERO | CS4245_INVERT_DAC);
+	cs4245_write(chip, CS4245_INT_MASK, 0);
+	cs4245_write(chip, CS4245_POWER_CTRL, 0);
+}
+
+static void cs4245_init(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	data->cs4245_regs[CS4245_DAC_CTRL_1] =
+		CS4245_DAC_FM_SINGLE | CS4245_DAC_DIF_LJUST;
+	data->cs4245_regs[CS4245_ADC_CTRL] =
+		CS4245_ADC_FM_SINGLE | CS4245_ADC_DIF_LJUST;
+	data->cs4245_regs[CS4245_SIGNAL_SEL] =
+		CS4245_A_OUT_SEL_HIZ | CS4245_ASYNCH;
+	data->cs4245_regs[CS4245_PGA_B_CTRL] = 0;
+	data->cs4245_regs[CS4245_PGA_A_CTRL] = 0;
+	data->cs4245_regs[CS4245_ANALOG_IN] =
+		CS4245_PGA_SOFT | CS4245_PGA_ZERO | CS4245_SEL_INPUT_4;
+	data->cs4245_regs[CS4245_DAC_A_CTRL] = 0;
+	data->cs4245_regs[CS4245_DAC_B_CTRL] = 0;
+	cs4245_registers_init(chip);
+	snd_component_add(chip->card, "CS4245");
+}
+
+static void dg_output_enable(struct oxygen *chip)
+{
+	msleep(2500);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE);
+}
+
+static void dg_init(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	data->output_sel = 0;
+	data->input_sel = 3;
+	data->hp_vol_att = 2 * 16;
+
+	cs4245_init(chip);
+
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL,
+			    GPIO_MAGIC | GPIO_HP_DETECT);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_INPUT_ROUTE | GPIO_HP_REAR | GPIO_OUTPUT_ENABLE);
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
+			    GPIO_INPUT_ROUTE | GPIO_HP_REAR);
+	dg_output_enable(chip);
+}
+
+static void dg_cleanup(struct oxygen *chip)
+{
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE);
+}
+
+static void dg_suspend(struct oxygen *chip)
+{
+	dg_cleanup(chip);
+}
+
+static void dg_resume(struct oxygen *chip)
+{
+	cs4245_registers_init(chip);
+	dg_output_enable(chip);
+}
+
+static void set_cs4245_dac_params(struct oxygen *chip,
+				  struct snd_pcm_hw_params *params)
+{
+	struct dg *data = chip->model_data;
+	u8 value;
+
+	value = data->cs4245_regs[CS4245_DAC_CTRL_1] & ~CS4245_DAC_FM_MASK;
+	if (params_rate(params) <= 50000)
+		value |= CS4245_DAC_FM_SINGLE;
+	else if (params_rate(params) <= 100000)
+		value |= CS4245_DAC_FM_DOUBLE;
+	else
+		value |= CS4245_DAC_FM_QUAD;
+	cs4245_write_cached(chip, CS4245_DAC_CTRL_1, value);
+}
+
+static void set_cs4245_adc_params(struct oxygen *chip,
+				  struct snd_pcm_hw_params *params)
+{
+	struct dg *data = chip->model_data;
+	u8 value;
+
+	value = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_ADC_FM_MASK;
+	if (params_rate(params) <= 50000)
+		value |= CS4245_ADC_FM_SINGLE;
+	else if (params_rate(params) <= 100000)
+		value |= CS4245_ADC_FM_DOUBLE;
+	else
+		value |= CS4245_ADC_FM_QUAD;
+	cs4245_write_cached(chip, CS4245_ADC_CTRL, value);
+}
+
+static inline unsigned int shift_bits(unsigned int value,
+				      unsigned int shift_from,
+				      unsigned int shift_to,
+				      unsigned int mask)
+{
+	if (shift_from < shift_to)
+		return (value << (shift_to - shift_from)) & mask;
+	else
+		return (value >> (shift_from - shift_to)) & mask;
+}
+
+static unsigned int adjust_dg_dac_routing(struct oxygen *chip,
+					  unsigned int play_routing)
+{
+	return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) |
+	       shift_bits(play_routing,
+			  OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC1_SOURCE_MASK) |
+	       shift_bits(play_routing,
+			  OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC2_SOURCE_MASK) |
+	       shift_bits(play_routing,
+			  OXYGEN_PLAY_DAC0_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC3_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC3_SOURCE_MASK);
+}
+
+static int output_switch_info(struct snd_kcontrol *ctl,
+			      struct snd_ctl_elem_info *info)
+{
+	static const char *const names[3] = {
+		"Speakers", "Headphones", "FP Headphones"
+	};
+
+	return snd_ctl_enum_info(info, 1, 3, names);
+}
+
+static int output_switch_get(struct snd_kcontrol *ctl,
+			     struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] = data->output_sel;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int output_switch_put(struct snd_kcontrol *ctl,
+			     struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	u8 reg;
+	int changed;
+
+	if (value->value.enumerated.item[0] > 2)
+		return -EINVAL;
+
+	mutex_lock(&chip->mutex);
+	changed = value->value.enumerated.item[0] != data->output_sel;
+	if (changed) {
+		data->output_sel = value->value.enumerated.item[0];
+
+		reg = data->cs4245_regs[CS4245_SIGNAL_SEL] &
+						~CS4245_A_OUT_SEL_MASK;
+		reg |= data->output_sel == 2 ?
+				CS4245_A_OUT_SEL_DAC : CS4245_A_OUT_SEL_HIZ;
+		cs4245_write_cached(chip, CS4245_SIGNAL_SEL, reg);
+
+		cs4245_write_cached(chip, CS4245_DAC_A_CTRL,
+				    data->output_sel ? data->hp_vol_att : 0);
+		cs4245_write_cached(chip, CS4245_DAC_B_CTRL,
+				    data->output_sel ? data->hp_vol_att : 0);
+
+		oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+				      data->output_sel == 1 ? GPIO_HP_REAR : 0,
+				      GPIO_HP_REAR);
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int hp_volume_offset_info(struct snd_kcontrol *ctl,
+				 struct snd_ctl_elem_info *info)
+{
+	static const char *const names[3] = {
+		"< 64 ohms", "64-150 ohms", "150-300 ohms"
+	};
+
+	return snd_ctl_enum_info(info, 1, 3, names);
+}
+
+static int hp_volume_offset_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	if (data->hp_vol_att > 2 * 7)
+		value->value.enumerated.item[0] = 0;
+	else if (data->hp_vol_att > 0)
+		value->value.enumerated.item[0] = 1;
+	else
+		value->value.enumerated.item[0] = 2;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int hp_volume_offset_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	static const s8 atts[3] = { 2 * 16, 2 * 7, 0 };
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	s8 att;
+	int changed;
+
+	if (value->value.enumerated.item[0] > 2)
+		return -EINVAL;
+	att = atts[value->value.enumerated.item[0]];
+	mutex_lock(&chip->mutex);
+	changed = att != data->hp_vol_att;
+	if (changed) {
+		data->hp_vol_att = att;
+		if (data->output_sel) {
+			cs4245_write_cached(chip, CS4245_DAC_A_CTRL, att);
+			cs4245_write_cached(chip, CS4245_DAC_B_CTRL, att);
+		}
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int input_vol_info(struct snd_kcontrol *ctl,
+			  struct snd_ctl_elem_info *info)
+{
+	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	info->count = 2;
+	info->value.integer.min = 2 * -12;
+	info->value.integer.max = 2 * 12;
+	return 0;
+}
+
+static int input_vol_get(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int idx = ctl->private_value;
+
+	mutex_lock(&chip->mutex);
+	value->value.integer.value[0] = data->input_vol[idx][0];
+	value->value.integer.value[1] = data->input_vol[idx][1];
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int input_vol_put(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int idx = ctl->private_value;
+	int changed = 0;
+
+	if (value->value.integer.value[0] < 2 * -12 ||
+	    value->value.integer.value[0] > 2 * 12 ||
+	    value->value.integer.value[1] < 2 * -12 ||
+	    value->value.integer.value[1] > 2 * 12)
+		return -EINVAL;
+	mutex_lock(&chip->mutex);
+	changed = data->input_vol[idx][0] != value->value.integer.value[0] ||
+		  data->input_vol[idx][1] != value->value.integer.value[1];
+	if (changed) {
+		data->input_vol[idx][0] = value->value.integer.value[0];
+		data->input_vol[idx][1] = value->value.integer.value[1];
+		if (idx == data->input_sel) {
+			cs4245_write_cached(chip, CS4245_PGA_A_CTRL,
+					    data->input_vol[idx][0]);
+			cs4245_write_cached(chip, CS4245_PGA_B_CTRL,
+					    data->input_vol[idx][1]);
+		}
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static DECLARE_TLV_DB_SCALE(cs4245_pga_db_scale, -1200, 50, 0);
+
+static int input_sel_info(struct snd_kcontrol *ctl,
+			  struct snd_ctl_elem_info *info)
+{
+	static const char *const names[4] = {
+		"Mic", "Aux", "Front Mic", "Line"
+	};
+
+	return snd_ctl_enum_info(info, 1, 4, names);
+}
+
+static int input_sel_get(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] = data->input_sel;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int input_sel_put(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	static const u8 sel_values[4] = {
+		CS4245_SEL_MIC,
+		CS4245_SEL_INPUT_1,
+		CS4245_SEL_INPUT_2,
+		CS4245_SEL_INPUT_4
+	};
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	int changed;
+
+	if (value->value.enumerated.item[0] > 3)
+		return -EINVAL;
+
+	mutex_lock(&chip->mutex);
+	changed = value->value.enumerated.item[0] != data->input_sel;
+	if (changed) {
+		data->input_sel = value->value.enumerated.item[0];
+
+		cs4245_write(chip, CS4245_ANALOG_IN,
+			     (data->cs4245_regs[CS4245_ANALOG_IN] &
+							~CS4245_SEL_MASK) |
+			     sel_values[data->input_sel]);
+
+		cs4245_write_cached(chip, CS4245_PGA_A_CTRL,
+				    data->input_vol[data->input_sel][0]);
+		cs4245_write_cached(chip, CS4245_PGA_B_CTRL,
+				    data->input_vol[data->input_sel][1]);
+
+		oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+				      data->input_sel ? 0 : GPIO_INPUT_ROUTE,
+				      GPIO_INPUT_ROUTE);
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "Active", "Frozen" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	value->value.enumerated.item[0] =
+		!!(data->cs4245_regs[CS4245_ADC_CTRL] & CS4245_HPF_FREEZE);
+	return 0;
+}
+
+static int hpf_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	u8 reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	reg = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_HPF_FREEZE;
+	if (value->value.enumerated.item[0])
+		reg |= CS4245_HPF_FREEZE;
+	changed = reg != data->cs4245_regs[CS4245_ADC_CTRL];
+	if (changed)
+		cs4245_write(chip, CS4245_ADC_CTRL, reg);
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+#define INPUT_VOLUME(xname, index) { \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+	.name = xname, \
+	.info = input_vol_info, \
+	.get = input_vol_get, \
+	.put = input_vol_put, \
+	.tlv = { .p = cs4245_pga_db_scale }, \
+	.private_value = index, \
+}
+static const struct snd_kcontrol_new dg_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Analog Output Playback Enum",
+		.info = output_switch_info,
+		.get = output_switch_get,
+		.put = output_switch_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Headphones Impedance Playback Enum",
+		.info = hp_volume_offset_info,
+		.get = hp_volume_offset_get,
+		.put = hp_volume_offset_put,
+	},
+	INPUT_VOLUME("Mic Capture Volume", 0),
+	INPUT_VOLUME("Aux Capture Volume", 1),
+	INPUT_VOLUME("Front Mic Capture Volume", 2),
+	INPUT_VOLUME("Line Capture Volume", 3),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Capture Source",
+		.info = input_sel_info,
+		.get = input_sel_get,
+		.put = input_sel_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "ADC High-pass Filter Capture Enum",
+		.info = hpf_info,
+		.get = hpf_get,
+		.put = hpf_put,
+	},
+};
+
+static int dg_control_filter(struct snd_kcontrol_new *template)
+{
+	if (!strncmp(template->name, "Master Playback ", 16))
+		return 1;
+	return 0;
+}
+
+static int dg_mixer_init(struct oxygen *chip)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(dg_controls); ++i) {
+		err = snd_ctl_add(chip->card,
+				  snd_ctl_new1(&dg_controls[i], chip));
+		if (err < 0)
+			return err;
+	}
+	return 0;
+}
+
+static void dump_cs4245_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct dg *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nCS4245:");
+	for (i = 1; i <= 0x10; ++i)
+		snd_iprintf(buffer, " %02x", data->cs4245_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+struct oxygen_model model_xonar_dg = {
+	.shortname = "Xonar DG",
+	.longname = "C-Media Oxygen HD Audio",
+	.chip = "CMI8786",
+	.init = dg_init,
+	.control_filter = dg_control_filter,
+	.mixer_init = dg_mixer_init,
+	.cleanup = dg_cleanup,
+	.suspend = dg_suspend,
+	.resume = dg_resume,
+	.set_dac_params = set_cs4245_dac_params,
+	.set_adc_params = set_cs4245_adc_params,
+	.adjust_dac_routing = adjust_dg_dac_routing,
+	.dump_registers = dump_cs4245_registers,
+	.model_data_size = sizeof(struct dg),
+	.device_config = PLAYBACK_0_TO_I2S |
+			 PLAYBACK_1_TO_SPDIF |
+			 CAPTURE_0_FROM_I2S_2,
+	.dac_channels_pcm = 6,
+	.dac_channels_mixer = 0,
+	.function_flags = OXYGEN_FUNCTION_SPI,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+};
diff --git a/sound/pci/oxygen/xonar_dg.h b/sound/pci/oxygen/xonar_dg.h
new file mode 100644
index 0000000..5688d78
--- /dev/null
+++ b/sound/pci/oxygen/xonar_dg.h
@@ -0,0 +1,8 @@
+#ifndef XONAR_DG_H_INCLUDED
+#define XONAR_DG_H_INCLUDED
+
+#include "oxygen.h"
+
+extern struct oxygen_model model_xonar_dg;
+
+#endif
diff --git a/sound/pci/oxygen/xonar_hdmi.c b/sound/pci/oxygen/xonar_hdmi.c
index b12db1f..136dac6a 100644
--- a/sound/pci/oxygen/xonar_hdmi.c
+++ b/sound/pci/oxygen/xonar_hdmi.c
@@ -1,5 +1,5 @@
 /*
- * helper functions for HDMI models (Xonar HDAV1.3)
+ * helper functions for HDMI models (Xonar HDAV1.3/HDAV1.3 Slim)
  *
  * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
  *
diff --git a/sound/pci/oxygen/xonar_lib.c b/sound/pci/oxygen/xonar_lib.c
index b3ff713..0ebe7f5 100644
--- a/sound/pci/oxygen/xonar_lib.c
+++ b/sound/pci/oxygen/xonar_lib.c
@@ -104,9 +104,10 @@
 {
 	struct oxygen *chip = ctl->private_data;
 	u16 bit = ctl->private_value;
+	bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT;
 
 	value->value.integer.value[0] =
-		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & bit);
+		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & bit) ^ invert;
 	return 0;
 }
 
@@ -115,12 +116,13 @@
 {
 	struct oxygen *chip = ctl->private_data;
 	u16 bit = ctl->private_value;
+	bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT;
 	u16 old_bits, new_bits;
 	int changed;
 
 	spin_lock_irq(&chip->reg_lock);
 	old_bits = oxygen_read16(chip, OXYGEN_GPIO_DATA);
-	if (value->value.integer.value[0])
+	if (!!value->value.integer.value[0] ^ invert)
 		new_bits = old_bits | bit;
 	else
 		new_bits = old_bits & ~bit;
diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
index d491fd6..54cad38 100644
--- a/sound/pci/oxygen/xonar_pcm179x.c
+++ b/sound/pci/oxygen/xonar_pcm179x.c
@@ -22,20 +22,26 @@
  *
  * CMI8788:
  *
- * SPI 0 -> 1st PCM1796 (front)
- * SPI 1 -> 2nd PCM1796 (surround)
- * SPI 2 -> 3rd PCM1796 (center/LFE)
- * SPI 4 -> 4th PCM1796 (back)
+ *   SPI 0 -> 1st PCM1796 (front)
+ *   SPI 1 -> 2nd PCM1796 (surround)
+ *   SPI 2 -> 3rd PCM1796 (center/LFE)
+ *   SPI 4 -> 4th PCM1796 (back)
  *
- * GPIO 2 -> M0 of CS5381
- * GPIO 3 -> M1 of CS5381
- * GPIO 5 <- external power present (D2X only)
- * GPIO 7 -> ALT
- * GPIO 8 -> enable output to speakers
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 5 <- external power present (D2X only)
+ *   GPIO 7 -> ALT
+ *   GPIO 8 -> enable output to speakers
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN   <- aux
+ *   VIDEO_IN <- CD
+ *   FMIC_IN  <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
  */
 
 /*
@@ -44,52 +50,53 @@
  *
  * CMI8788:
  *
- * I²C <-> PCM1796 (front)
+ *   I²C <-> PCM1796 (addr 1001100) (front)
  *
- * GPI 0 <- external power present
+ *   GPI 0 <- external power present
  *
- * GPIO 0 -> enable output to speakers
- * GPIO 2 -> M0 of CS5381
- * GPIO 3 -> M1 of CS5381
- * GPIO 8 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 0 -> enable HDMI (0) or speaker (1) output
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 4 <- daughterboard detection
+ *   GPIO 5 <- daughterboard detection
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> ?
+ *   GPIO 8 -> route input jack to line-in (0) or mic-in (1)
  *
- * TXD -> HDMI controller
- * RXD <- HDMI controller
- *
- * PCM1796 front: AD1,0 <- 0,0
+ *   UART <-> HDMI controller
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN <- aux
+ *   CD_IN  <- CD
+ *   MIC_IN <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
  *
  * no daughterboard
  * ----------------
  *
- * GPIO 4 <- 1
+ *   GPIO 4 <- 1
  *
  * H6 daughterboard
  * ----------------
  *
- * GPIO 4 <- 0
- * GPIO 5 <- 0
+ *   GPIO 4 <- 0
+ *   GPIO 5 <- 0
  *
- * I²C <-> PCM1796 (surround)
- *     <-> PCM1796 (center/LFE)
- *     <-> PCM1796 (back)
- *
- * PCM1796 surround:   AD1,0 <- 0,1
- * PCM1796 center/LFE: AD1,0 <- 1,0
- * PCM1796 back:       AD1,0 <- 1,1
+ *   I²C <-> PCM1796 (addr 1001101) (surround)
+ *       <-> PCM1796 (addr 1001110) (center/LFE)
+ *       <-> PCM1796 (addr 1001111) (back)
  *
  * unknown daughterboard
  * ---------------------
  *
- * GPIO 4 <- 0
- * GPIO 5 <- 1
+ *   GPIO 4 <- 0
+ *   GPIO 5 <- 1
  *
- * I²C <-> CS4362A (surround, center/LFE, back)
- *
- * CS4362A: AD0 <- 0
+ *   I²C <-> CS4362A (addr 0011000) (surround, center/LFE, back)
  */
 
 /*
@@ -98,32 +105,35 @@
  *
  * CMI8788:
  *
- * I²C <-> PCM1792A
- *     <-> CS2000 (ST only)
+ *   I²C <-> PCM1792A (addr 1001100)
+ *       <-> CS2000 (addr 1001110) (ST only)
  *
- * ADC1 MCLK -> REF_CLK of CS2000 (ST only)
+ *   ADC1 MCLK -> REF_CLK of CS2000 (ST only)
  *
- * GPI 0 <- external power present (STX only)
+ *   GPI 0 <- external power present (STX only)
  *
- * GPIO 0 -> enable output to speakers
- * GPIO 1 -> route HP to front panel (0) or rear jack (1)
- * GPIO 2 -> M0 of CS5381
- * GPIO 3 -> M1 of CS5381
- * GPIO 7 -> route output to speaker jacks (0) or HP (1)
- * GPIO 8 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 0 -> enable output to speakers
+ *   GPIO 1 -> route HP to front panel (0) or rear jack (1)
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 4 <- daughterboard detection
+ *   GPIO 5 <- daughterboard detection
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> route output to speaker jacks (0) or HP (1)
+ *   GPIO 8 -> route input jack to line-in (0) or mic-in (1)
  *
  * PCM1792A:
  *
- * AD1,0 <- 0,0
- * SCK <- CLK_OUT of CS2000 (ST only)
- *
- * CS2000:
- *
- * AD0 <- 0
+ *   SCK <- CLK_OUT of CS2000 (ST only)
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN <- aux
+ *   MIC_IN <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
  *
  * H6 daughterboard
  * ----------------
@@ -133,15 +143,39 @@
  */
 
 /*
- * Xonar HDAV1.3 Slim
- * ------------------
+ * Xonar Xense
+ * -----------
  *
  * CMI8788:
  *
- * GPIO 1 -> enable output
+ *   I²C <-> PCM1796 (addr 1001100) (front)
+ *       <-> CS4362A (addr 0011000) (surround, center/LFE, back)
+ *       <-> CS2000 (addr 1001110)
  *
- * TXD -> HDMI controller
- * RXD <- HDMI controller
+ *   ADC1 MCLK -> REF_CLK of CS2000
+ *
+ *   GPI 0 <- external power present
+ *
+ *   GPIO 0 -> enable output
+ *   GPIO 1 -> route HP to front panel (0) or rear jack (1)
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 4 -> enable output
+ *   GPIO 5 -> enable output
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> route output to HP (0) or speaker (1)
+ *   GPIO 8 -> route input jack to mic-in (0) or line-in (1)
+ *
+ * CM9780:
+ *
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN   <- aux
+ *   VIDEO_IN <- ?
+ *   FMIC_IN  <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   GPO 1 -> route mic-in from input jack (0) or front panel header (1)
  */
 
 #include <linux/pci.h>
@@ -150,6 +184,7 @@
 #include <sound/ac97_codec.h>
 #include <sound/control.h>
 #include <sound/core.h>
+#include <sound/info.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
@@ -167,12 +202,14 @@
 #define GPIO_INPUT_ROUTE	0x0100
 
 #define GPIO_HDAV_OUTPUT_ENABLE	0x0001
+#define GPIO_HDAV_MAGIC		0x00c0
 
 #define GPIO_DB_MASK		0x0030
 #define GPIO_DB_H6		0x0000
 
 #define GPIO_ST_OUTPUT_ENABLE	0x0001
 #define GPIO_ST_HP_REAR		0x0002
+#define GPIO_ST_MAGIC		0x0040
 #define GPIO_ST_HP		0x0080
 
 #define I2C_DEVICE_PCM1796(i)	(0x98 + ((i) << 1))	/* 10011, ii, /W=0 */
@@ -186,11 +223,12 @@
 	unsigned int dacs;
 	u8 pcm1796_regs[4][5];
 	unsigned int current_rate;
-	bool os_128;
+	bool h6;
 	bool hp_active;
 	s8 hp_gain_offset;
 	bool has_cs2000;
-	u8 cs2000_fun_cfg_1;
+	u8 cs2000_regs[0x1f];
+	bool broken_i2c;
 };
 
 struct xonar_hdav {
@@ -249,16 +287,14 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	oxygen_write_i2c(chip, I2C_DEVICE_CS2000, reg, value);
-	if (reg == CS2000_FUN_CFG_1)
-		data->cs2000_fun_cfg_1 = value;
+	data->cs2000_regs[reg] = value;
 }
 
 static void cs2000_write_cached(struct oxygen *chip, u8 reg, u8 value)
 {
 	struct xonar_pcm179x *data = chip->model_data;
 
-	if (reg != CS2000_FUN_CFG_1 ||
-	    value != data->cs2000_fun_cfg_1)
+	if (value != data->cs2000_regs[reg])
 		cs2000_write(chip, reg, value);
 }
 
@@ -268,6 +304,7 @@
 	unsigned int i;
 	s8 gain_offset;
 
+	msleep(1);
 	gain_offset = data->hp_active ? data->hp_gain_offset : 0;
 	for (i = 0; i < data->dacs; ++i) {
 		/* set ATLD before ATL/ATR */
@@ -282,6 +319,7 @@
 		pcm1796_write(chip, i, 20,
 			      data->pcm1796_regs[0][20 - PCM1796_REG_BASE]);
 		pcm1796_write(chip, i, 21, 0);
+		gain_offset = 0;
 	}
 }
 
@@ -290,10 +328,11 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	data->pcm1796_regs[0][18 - PCM1796_REG_BASE] = PCM1796_MUTE |
-		PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD;
+		PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD;
 	data->pcm1796_regs[0][19 - PCM1796_REG_BASE] =
 		PCM1796_FLT_SHARP | PCM1796_ATS_1;
-	data->pcm1796_regs[0][20 - PCM1796_REG_BASE] = PCM1796_OS_64;
+	data->pcm1796_regs[0][20 - PCM1796_REG_BASE] =
+		data->h6 ? PCM1796_OS_64 : PCM1796_OS_128;
 	pcm1796_registers_init(chip);
 	data->current_rate = 48000;
 }
@@ -339,18 +378,20 @@
 	oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS,
 		       OXYGEN_2WIRE_LENGTH_8 |
 		       OXYGEN_2WIRE_INTERRUPT_MASK |
-		       OXYGEN_2WIRE_SPEED_FAST);
+		       OXYGEN_2WIRE_SPEED_STANDARD);
 
 	data->pcm179x.generic.anti_pop_delay = 100;
 	data->pcm179x.generic.output_enable_bit = GPIO_HDAV_OUTPUT_ENABLE;
 	data->pcm179x.generic.ext_power_reg = OXYGEN_GPI_DATA;
 	data->pcm179x.generic.ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK;
 	data->pcm179x.generic.ext_power_bit = GPI_EXT_POWER;
-	data->pcm179x.dacs = chip->model.private_data ? 4 : 1;
+	data->pcm179x.dacs = chip->model.dac_channels_mixer / 2;
+	data->pcm179x.h6 = chip->model.dac_channels_mixer > 2;
 
 	pcm1796_init(chip);
 
-	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_INPUT_ROUTE);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_HDAV_MAGIC | GPIO_INPUT_ROUTE);
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_INPUT_ROUTE);
 
 	xonar_init_cs53x1(chip);
@@ -367,7 +408,7 @@
 	oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS,
 		       OXYGEN_2WIRE_LENGTH_8 |
 		       OXYGEN_2WIRE_INTERRUPT_MASK |
-		       OXYGEN_2WIRE_SPEED_FAST);
+		       OXYGEN_2WIRE_SPEED_STANDARD);
 }
 
 static void xonar_st_init_common(struct oxygen *chip)
@@ -375,13 +416,14 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	data->generic.output_enable_bit = GPIO_ST_OUTPUT_ENABLE;
-	data->dacs = chip->model.private_data ? 4 : 1;
+	data->dacs = chip->model.dac_channels_mixer / 2;
 	data->hp_gain_offset = 2*-18;
 
 	pcm1796_init(chip);
 
 	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
-			  GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP);
+			  GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR |
+			  GPIO_ST_MAGIC | GPIO_ST_HP);
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
 			    GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP);
 
@@ -410,9 +452,11 @@
 	cs2000_write(chip, CS2000_RATIO_0 + 1, 0x10);
 	cs2000_write(chip, CS2000_RATIO_0 + 2, 0x00);
 	cs2000_write(chip, CS2000_RATIO_0 + 3, 0x00);
-	cs2000_write(chip, CS2000_FUN_CFG_1, data->cs2000_fun_cfg_1);
+	cs2000_write(chip, CS2000_FUN_CFG_1,
+		     data->cs2000_regs[CS2000_FUN_CFG_1]);
 	cs2000_write(chip, CS2000_FUN_CFG_2, 0);
 	cs2000_write(chip, CS2000_GLOBAL_CFG, CS2000_EN_DEV_CFG_2);
+	msleep(3); /* PLL lock delay */
 }
 
 static void xonar_st_init(struct oxygen *chip)
@@ -420,13 +464,18 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	data->generic.anti_pop_delay = 100;
+	data->h6 = chip->model.dac_channels_mixer > 2;
 	data->has_cs2000 = 1;
-	data->cs2000_fun_cfg_1 = CS2000_REF_CLK_DIV_1;
+	data->cs2000_regs[CS2000_FUN_CFG_1] = CS2000_REF_CLK_DIV_1;
+	data->broken_i2c = true;
 
 	oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
-		       OXYGEN_RATE_48000 | OXYGEN_I2S_FORMAT_I2S |
-		       OXYGEN_I2S_MCLK_128 | OXYGEN_I2S_BITS_16 |
-		       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+		       OXYGEN_RATE_48000 |
+		       OXYGEN_I2S_FORMAT_I2S |
+		       OXYGEN_I2S_MCLK(data->h6 ? MCLK_256 : MCLK_512) |
+		       OXYGEN_I2S_BITS_16 |
+		       OXYGEN_I2S_MASTER |
+		       OXYGEN_I2S_BCLK_64);
 
 	xonar_st_init_i2c(chip);
 	cs2000_registers_init(chip);
@@ -507,44 +556,16 @@
 	xonar_stx_resume(chip);
 }
 
-static unsigned int mclk_from_rate(struct oxygen *chip, unsigned int rate)
-{
-	struct xonar_pcm179x *data = chip->model_data;
-
-	if (rate <= 32000)
-		return OXYGEN_I2S_MCLK_512;
-	else if (rate <= 48000 && data->os_128)
-		return OXYGEN_I2S_MCLK_512;
-	else if (rate <= 96000)
-		return OXYGEN_I2S_MCLK_256;
-	else
-		return OXYGEN_I2S_MCLK_128;
-}
-
-static unsigned int get_pcm1796_i2s_mclk(struct oxygen *chip,
-					 unsigned int channel,
-					 struct snd_pcm_hw_params *params)
-{
-	if (channel == PCM_MULTICH)
-		return mclk_from_rate(chip, params_rate(params));
-	else
-		return oxygen_default_i2s_mclk(chip, channel, params);
-}
-
 static void update_pcm1796_oversampling(struct oxygen *chip)
 {
 	struct xonar_pcm179x *data = chip->model_data;
 	unsigned int i;
 	u8 reg;
 
-	if (data->current_rate <= 32000)
+	if (data->current_rate <= 48000 && !data->h6)
 		reg = PCM1796_OS_128;
-	else if (data->current_rate <= 48000 && data->os_128)
-		reg = PCM1796_OS_128;
-	else if (data->current_rate <= 96000 || data->os_128)
-		reg = PCM1796_OS_64;
 	else
-		reg = PCM1796_OS_32;
+		reg = PCM1796_OS_64;
 	for (i = 0; i < data->dacs; ++i)
 		pcm1796_write_cached(chip, i, 20, reg);
 }
@@ -554,6 +575,7 @@
 {
 	struct xonar_pcm179x *data = chip->model_data;
 
+	msleep(1);
 	data->current_rate = params_rate(params);
 	update_pcm1796_oversampling(chip);
 }
@@ -570,6 +592,7 @@
 				     + gain_offset);
 		pcm1796_write_cached(chip, i, 17, chip->dac_volume[i * 2 + 1]
 				     + gain_offset);
+		gain_offset = 0;
 	}
 }
 
@@ -579,7 +602,7 @@
 	unsigned int i;
 	u8 value;
 
-	value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD;
+	value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD;
 	if (chip->dac_mute)
 		value |= PCM1796_MUTE;
 	for (i = 0; i < data->dacs; ++i)
@@ -592,45 +615,35 @@
 	u8 rate_mclk, reg;
 
 	switch (rate) {
-		/* XXX Why is the I2S A MCLK half the actual I2S MCLK? */
 	case 32000:
-		rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256;
+	case 64000:
+		rate_mclk = OXYGEN_RATE_32000;
 		break;
 	case 44100:
-		if (data->os_128)
-			rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256;
-		else
-			rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_128;
-		break;
-	default: /* 48000 */
-		if (data->os_128)
-			rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256;
-		else
-			rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_128;
-		break;
-	case 64000:
-		rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256;
-		break;
 	case 88200:
-		rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256;
-		break;
-	case 96000:
-		rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256;
-		break;
 	case 176400:
-		rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256;
+		rate_mclk = OXYGEN_RATE_44100;
 		break;
+	default:
+	case 48000:
+	case 96000:
 	case 192000:
-		rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256;
+		rate_mclk = OXYGEN_RATE_48000;
 		break;
 	}
+
+	if (rate <= 96000 && (rate > 48000 || data->h6)) {
+		rate_mclk |= OXYGEN_I2S_MCLK(MCLK_256);
+		reg = CS2000_REF_CLK_DIV_1;
+	} else {
+		rate_mclk |= OXYGEN_I2S_MCLK(MCLK_512);
+		reg = CS2000_REF_CLK_DIV_2;
+	}
+
 	oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT, rate_mclk,
 			      OXYGEN_I2S_RATE_MASK | OXYGEN_I2S_MCLK_MASK);
-	if ((rate_mclk & OXYGEN_I2S_MCLK_MASK) <= OXYGEN_I2S_MCLK_128)
-		reg = CS2000_REF_CLK_DIV_1;
-	else
-		reg = CS2000_REF_CLK_DIV_2;
 	cs2000_write_cached(chip, CS2000_FUN_CFG_1, reg);
+	msleep(3); /* PLL lock delay */
 }
 
 static void set_st_params(struct oxygen *chip,
@@ -665,13 +678,7 @@
 		"Sharp Roll-off", "Slow Roll-off"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int rolloff_get(struct snd_kcontrol *ctl,
@@ -719,57 +726,13 @@
 	.put = rolloff_put,
 };
 
-static int os_128_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
-{
-	static const char *const names[2] = { "64x", "128x" };
-
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
-}
-
-static int os_128_get(struct snd_kcontrol *ctl,
-		      struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct xonar_pcm179x *data = chip->model_data;
-
-	value->value.enumerated.item[0] = data->os_128;
-	return 0;
-}
-
-static int os_128_put(struct snd_kcontrol *ctl,
-		      struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct xonar_pcm179x *data = chip->model_data;
-	int changed;
-
-	mutex_lock(&chip->mutex);
-	changed = value->value.enumerated.item[0] != data->os_128;
-	if (changed) {
-		data->os_128 = value->value.enumerated.item[0];
-		if (data->has_cs2000)
-			update_cs2000_rate(chip, data->current_rate);
-		oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
-				      mclk_from_rate(chip, data->current_rate),
-				      OXYGEN_I2S_MCLK_MASK);
-		update_pcm1796_oversampling(chip);
-	}
-	mutex_unlock(&chip->mutex);
-	return changed;
-}
-
-static const struct snd_kcontrol_new os_128_control = {
+static const struct snd_kcontrol_new hdav_hdmi_control = {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "DAC Oversampling Playback Enum",
-	.info = os_128_info,
-	.get = os_128_get,
-	.put = os_128_put,
+	.name = "HDMI Playback Switch",
+	.info = snd_ctl_boolean_mono_info,
+	.get = xonar_gpio_bit_switch_get,
+	.put = xonar_gpio_bit_switch_put,
+	.private_value = GPIO_HDAV_OUTPUT_ENABLE | XONAR_GPIO_BIT_INVERT,
 };
 
 static int st_output_switch_info(struct snd_kcontrol *ctl,
@@ -779,13 +742,7 @@
 		"Speakers", "Headphones", "FP Headphones"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item >= 3)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, names);
 }
 
 static int st_output_switch_get(struct snd_kcontrol *ctl,
@@ -840,13 +797,7 @@
 		"< 64 ohms", "64-300 ohms", "300-600 ohms"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item > 2)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, names);
 }
 
 static int st_hp_volume_offset_get(struct snd_kcontrol *ctl,
@@ -928,16 +879,25 @@
 	return 0;
 }
 
+static int xonar_st_h6_control_filter(struct snd_kcontrol_new *template)
+{
+	if (!strncmp(template->name, "Master Playback ", 16))
+		/* no volume/mute, as I²C to the third DAC does not work */
+		return 1;
+	return 0;
+}
+
 static int add_pcm1796_controls(struct oxygen *chip)
 {
+	struct xonar_pcm179x *data = chip->model_data;
 	int err;
 
-	err = snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip));
-	if (err < 0)
-		return err;
-	err = snd_ctl_add(chip->card, snd_ctl_new1(&os_128_control, chip));
-	if (err < 0)
-		return err;
+	if (!data->broken_i2c) {
+		err = snd_ctl_add(chip->card,
+				  snd_ctl_new1(&rolloff_control, chip));
+		if (err < 0)
+			return err;
+	}
 	return 0;
 }
 
@@ -956,7 +916,15 @@
 
 static int xonar_hdav_mixer_init(struct oxygen *chip)
 {
-	return add_pcm1796_controls(chip);
+	int err;
+
+	err = snd_ctl_add(chip->card, snd_ctl_new1(&hdav_hdmi_control, chip));
+	if (err < 0)
+		return err;
+	err = add_pcm1796_controls(chip);
+	if (err < 0)
+		return err;
+	return 0;
 }
 
 static int xonar_st_mixer_init(struct oxygen *chip)
@@ -976,6 +944,45 @@
 	return 0;
 }
 
+static void dump_pcm1796_registers(struct oxygen *chip,
+				   struct snd_info_buffer *buffer)
+{
+	struct xonar_pcm179x *data = chip->model_data;
+	unsigned int dac, i;
+
+	for (dac = 0; dac < data->dacs; ++dac) {
+		snd_iprintf(buffer, "\nPCM1796 %u:", dac + 1);
+		for (i = 0; i < 5; ++i)
+			snd_iprintf(buffer, " %02x",
+				    data->pcm1796_regs[dac][i]);
+	}
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_cs2000_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct xonar_pcm179x *data = chip->model_data;
+	unsigned int i;
+
+	if (data->has_cs2000) {
+		snd_iprintf(buffer, "\nCS2000:\n00:   ");
+		for (i = 1; i < 0x10; ++i)
+			snd_iprintf(buffer, " %02x", data->cs2000_regs[i]);
+		snd_iprintf(buffer, "\n10:");
+		for (i = 0x10; i < 0x1f; ++i)
+			snd_iprintf(buffer, " %02x", data->cs2000_regs[i]);
+		snd_iprintf(buffer, "\n");
+	}
+}
+
+static void dump_st_registers(struct oxygen *chip,
+			      struct snd_info_buffer *buffer)
+{
+	dump_pcm1796_registers(chip, buffer);
+	dump_cs2000_registers(chip, buffer);
+}
+
 static const struct oxygen_model model_xonar_d2 = {
 	.longname = "Asus Virtuoso 200",
 	.chip = "AV200",
@@ -985,11 +992,11 @@
 	.cleanup = xonar_d2_cleanup,
 	.suspend = xonar_d2_suspend,
 	.resume = xonar_d2_resume,
-	.get_i2s_mclk = get_pcm1796_i2s_mclk,
 	.set_dac_params = set_pcm1796_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_pcm1796_volume,
 	.update_dac_mute = update_pcm1796_mute,
+	.dump_registers = dump_pcm1796_registers,
 	.dac_tlv = pcm1796_db_scale,
 	.model_data_size = sizeof(struct xonar_pcm179x),
 	.device_config = PLAYBACK_0_TO_I2S |
@@ -999,13 +1006,16 @@
 			 MIDI_OUTPUT |
 			 MIDI_INPUT |
 			 AC97_CD_INPUT,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.misc_flags = OXYGEN_MISC_MIDI,
 	.function_flags = OXYGEN_FUNCTION_SPI |
 			  OXYGEN_FUNCTION_ENABLE_SPI_4_5,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.dac_mclks = OXYGEN_MCLKS(512, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
@@ -1018,25 +1028,28 @@
 	.suspend = xonar_hdav_suspend,
 	.resume = xonar_hdav_resume,
 	.pcm_hardware_filter = xonar_hdmi_pcm_hardware_filter,
-	.get_i2s_mclk = get_pcm1796_i2s_mclk,
 	.set_dac_params = set_hdav_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_pcm1796_volume,
 	.update_dac_mute = update_pcm1796_mute,
 	.uart_input = xonar_hdmi_uart_input,
 	.ac97_switch = xonar_line_mic_ac97_switch,
+	.dump_registers = dump_pcm1796_registers,
 	.dac_tlv = pcm1796_db_scale,
 	.model_data_size = sizeof(struct xonar_hdav),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
 			 CAPTURE_0_FROM_I2S_2 |
 			 CAPTURE_1_FROM_SPDIF,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 2,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.misc_flags = OXYGEN_MISC_MIDI,
 	.function_flags = OXYGEN_FUNCTION_2WIRE,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.dac_mclks = OXYGEN_MCLKS(512, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
@@ -1048,22 +1061,26 @@
 	.cleanup = xonar_st_cleanup,
 	.suspend = xonar_st_suspend,
 	.resume = xonar_st_resume,
-	.get_i2s_mclk = get_pcm1796_i2s_mclk,
 	.set_dac_params = set_st_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_pcm1796_volume,
 	.update_dac_mute = update_pcm1796_mute,
 	.ac97_switch = xonar_line_mic_ac97_switch,
+	.dump_registers = dump_st_registers,
 	.dac_tlv = pcm1796_db_scale,
 	.model_data_size = sizeof(struct xonar_pcm179x),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_2,
-	.dac_channels = 2,
+			 CAPTURE_0_FROM_I2S_2 |
+			 AC97_FMIC_SWITCH,
+	.dac_channels_pcm = 2,
+	.dac_channels_mixer = 2,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.function_flags = OXYGEN_FUNCTION_2WIRE,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.dac_mclks = OXYGEN_MCLKS(512, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
@@ -1089,7 +1106,8 @@
 			break;
 		case GPIO_DB_H6:
 			chip->model.shortname = "Xonar HDAV1.3+H6";
-			chip->model.private_data = 1;
+			chip->model.dac_channels_mixer = 8;
+			chip->model.dac_mclks = OXYGEN_MCLKS(256, 128, 128);
 			break;
 		}
 		break;
@@ -1102,8 +1120,10 @@
 			break;
 		case GPIO_DB_H6:
 			chip->model.shortname = "Xonar ST+H6";
-			chip->model.dac_channels = 8;
-			chip->model.private_data = 1;
+			chip->model.control_filter = xonar_st_h6_control_filter;
+			chip->model.dac_channels_pcm = 8;
+			chip->model.dac_channels_mixer = 8;
+			chip->model.dac_mclks = OXYGEN_MCLKS(256, 128, 128);
 			break;
 		}
 		break;
@@ -1114,9 +1134,6 @@
 		chip->model.resume = xonar_stx_resume;
 		chip->model.set_dac_params = set_pcm1796_params;
 		break;
-	case 0x835e:
-		snd_printk(KERN_ERR "the HDAV1.3 Slim is not supported\n");
-		return -ENODEV;
 	default:
 		return -EINVAL;
 	}
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
index 200f760..42d1ab1 100644
--- a/sound/pci/oxygen/xonar_wm87x6.c
+++ b/sound/pci/oxygen/xonar_wm87x6.c
@@ -1,5 +1,5 @@
 /*
- * card driver for models with WM8776/WM8766 DACs (Xonar DS)
+ * card driver for models with WM8776/WM8766 DACs (Xonar DS/HDAV1.3 Slim)
  *
  * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
  *
@@ -22,26 +22,48 @@
  *
  * CMI8788:
  *
- * SPI 0 -> WM8766 (surround, center/LFE, back)
- * SPI 1 -> WM8776 (front, input)
+ *   SPI 0 -> WM8766 (surround, center/LFE, back)
+ *   SPI 1 -> WM8776 (front, input)
  *
- * GPIO 4 <- headphone detect, 0 = plugged
- * GPIO 6 -> route input jack to mic-in (0) or line-in (1)
- * GPIO 7 -> enable output to front L/R speaker channels
- * GPIO 8 -> enable output to other speaker channels and front panel headphone
+ *   GPIO 4 <- headphone detect, 0 = plugged
+ *   GPIO 6 -> route input jack to mic-in (0) or line-in (1)
+ *   GPIO 7 -> enable output to front L/R speaker channels
+ *   GPIO 8 -> enable output to other speaker channels and front panel headphone
  *
- * WM8766:
+ * WM8776:
  *
- * input 1 <- line
- * input 2 <- mic
- * input 3 <- front mic
- * input 4 <- aux
+ *   input 1 <- line
+ *   input 2 <- mic
+ *   input 3 <- front mic
+ *   input 4 <- aux
+ */
+
+/*
+ * Xonar HDAV1.3 Slim
+ * ------------------
+ *
+ * CMI8788:
+ *
+ *   I²C <-> WM8776 (addr 0011010)
+ *
+ *   GPIO 0  -> disable HDMI output
+ *   GPIO 1  -> enable HP output
+ *   GPIO 6  -> firmware EEPROM I²C clock
+ *   GPIO 7 <-> firmware EEPROM I²C data
+ *
+ *   UART <-> HDMI controller
+ *
+ * WM8776:
+ *
+ *   input 1 <- mic
+ *   input 2 <- aux
  */
 
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <sound/control.h>
 #include <sound/core.h>
+#include <sound/info.h>
 #include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -55,6 +77,13 @@
 #define GPIO_DS_OUTPUT_FRONTLR	0x0080
 #define GPIO_DS_OUTPUT_ENABLE	0x0100
 
+#define GPIO_SLIM_HDMI_DISABLE	0x0001
+#define GPIO_SLIM_OUTPUT_ENABLE	0x0002
+#define GPIO_SLIM_FIRMWARE_CLK	0x0040
+#define GPIO_SLIM_FIRMWARE_DATA	0x0080
+
+#define I2C_DEVICE_WM8776	0x34	/* 001101, 0, /W=0 */
+
 #define LC_CONTROL_LIMITER	0x40000000
 #define LC_CONTROL_ALC		0x20000000
 
@@ -66,19 +95,37 @@
 	struct snd_kcontrol *mic_adcmux_control;
 	struct snd_kcontrol *lc_controls[13];
 	struct snd_jack *hp_jack;
+	struct xonar_hdmi hdmi;
 };
 
-static void wm8776_write(struct oxygen *chip,
-			 unsigned int reg, unsigned int value)
+static void wm8776_write_spi(struct oxygen *chip,
+			     unsigned int reg, unsigned int value)
 {
-	struct xonar_wm87x6 *data = chip->model_data;
-
 	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
 			 OXYGEN_SPI_DATA_LENGTH_2 |
 			 OXYGEN_SPI_CLOCK_160 |
 			 (1 << OXYGEN_SPI_CODEC_SHIFT) |
 			 OXYGEN_SPI_CEN_LATCH_CLOCK_LO,
 			 (reg << 9) | value);
+}
+
+static void wm8776_write_i2c(struct oxygen *chip,
+			     unsigned int reg, unsigned int value)
+{
+	oxygen_write_i2c(chip, I2C_DEVICE_WM8776,
+			 (reg << 1) | (value >> 8), value);
+}
+
+static void wm8776_write(struct oxygen *chip,
+			 unsigned int reg, unsigned int value)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	if ((chip->model.function_flags & OXYGEN_FUNCTION_2WIRE_SPI_MASK) ==
+	    OXYGEN_FUNCTION_SPI)
+		wm8776_write_spi(chip, reg, value);
+	else
+		wm8776_write_i2c(chip, reg, value);
 	if (reg < ARRAY_SIZE(data->wm8776_regs)) {
 		if (reg >= WM8776_HPLVOL && reg <= WM8776_DACMASTER)
 			value &= ~WM8776_UPDATE;
@@ -245,17 +292,50 @@
 	snd_component_add(chip->card, "WM8766");
 }
 
+static void xonar_hdav_slim_init(struct oxygen *chip)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	data->generic.anti_pop_delay = 300;
+	data->generic.output_enable_bit = GPIO_SLIM_OUTPUT_ENABLE;
+
+	wm8776_init(chip);
+
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_SLIM_HDMI_DISABLE |
+			  GPIO_SLIM_FIRMWARE_CLK |
+			  GPIO_SLIM_FIRMWARE_DATA);
+
+	xonar_hdmi_init(chip, &data->hdmi);
+	xonar_enable_output(chip);
+
+	snd_component_add(chip->card, "WM8776");
+}
+
 static void xonar_ds_cleanup(struct oxygen *chip)
 {
 	xonar_disable_output(chip);
 	wm8776_write(chip, WM8776_RESET, 0);
 }
 
+static void xonar_hdav_slim_cleanup(struct oxygen *chip)
+{
+	xonar_hdmi_cleanup(chip);
+	xonar_disable_output(chip);
+	wm8776_write(chip, WM8776_RESET, 0);
+	msleep(2);
+}
+
 static void xonar_ds_suspend(struct oxygen *chip)
 {
 	xonar_ds_cleanup(chip);
 }
 
+static void xonar_hdav_slim_suspend(struct oxygen *chip)
+{
+	xonar_hdav_slim_cleanup(chip);
+}
+
 static void xonar_ds_resume(struct oxygen *chip)
 {
 	wm8776_registers_init(chip);
@@ -264,6 +344,15 @@
 	xonar_ds_handle_hp_jack(chip);
 }
 
+static void xonar_hdav_slim_resume(struct oxygen *chip)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	wm8776_registers_init(chip);
+	xonar_hdmi_resume(chip, &data->hdmi);
+	xonar_enable_output(chip);
+}
+
 static void wm8776_adc_hardware_filter(unsigned int channel,
 				       struct snd_pcm_hardware *hardware)
 {
@@ -278,6 +367,13 @@
 	}
 }
 
+static void xonar_hdav_slim_hardware_filter(unsigned int channel,
+					    struct snd_pcm_hardware *hardware)
+{
+	wm8776_adc_hardware_filter(channel, hardware);
+	xonar_hdmi_pcm_hardware_filter(channel, hardware);
+}
+
 static void set_wm87x6_dac_params(struct oxygen *chip,
 				  struct snd_pcm_hw_params *params)
 {
@@ -294,6 +390,14 @@
 	wm8776_write_cached(chip, WM8776_MSTRCTRL, reg);
 }
 
+static void set_hdav_slim_dac_params(struct oxygen *chip,
+				     struct snd_pcm_hw_params *params)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	xonar_set_hdmi_params(chip, &data->hdmi, params);
+}
+
 static void update_wm8776_volume(struct oxygen *chip)
 {
 	struct xonar_wm87x6 *data = chip->model_data;
@@ -473,11 +577,6 @@
 	const char *const *names;
 
 	max = (ctl->private_value >> 12) & 0xf;
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = max + 1;
-	if (info->value.enumerated.item > max)
-		info->value.enumerated.item = max;
 	switch ((ctl->private_value >> 24) & 0x1f) {
 	case WM8776_ALCCTRL2:
 		names = hld;
@@ -501,8 +600,7 @@
 	default:
 		return -ENXIO;
 	}
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, max + 1, names);
 }
 
 static int wm8776_field_volume_info(struct snd_kcontrol *ctl,
@@ -759,13 +857,8 @@
 	static const char *const names[3] = {
 		"None", "Peak Limiter", "Automatic Level Control"
 	};
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item >= 3)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+
+	return snd_ctl_enum_info(info, 1, 3, names);
 }
 
 static int wm8776_level_control_get(struct snd_kcontrol *ctl,
@@ -851,13 +944,7 @@
 		"None", "High-pass Filter"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
@@ -985,6 +1072,53 @@
 		.private_value = 0,
 	},
 };
+static const struct snd_kcontrol_new hdav_slim_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "HDMI Playback Switch",
+		.info = snd_ctl_boolean_mono_info,
+		.get = xonar_gpio_bit_switch_get,
+		.put = xonar_gpio_bit_switch_put,
+		.private_value = GPIO_SLIM_HDMI_DISABLE | XONAR_GPIO_BIT_INVERT,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Headphone Playback Volume",
+		.info = wm8776_hp_vol_info,
+		.get = wm8776_hp_vol_get,
+		.put = wm8776_hp_vol_put,
+		.tlv = { .p = wm8776_hp_db_scale },
+	},
+	WM8776_BIT_SWITCH("Headphone Playback Switch",
+			  WM8776_PWRDOWN, WM8776_HPPD, 1, 0),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Input Capture Volume",
+		.info = wm8776_input_vol_info,
+		.get = wm8776_input_vol_get,
+		.put = wm8776_input_vol_put,
+		.tlv = { .p = wm8776_adc_db_scale },
+	},
+	WM8776_BIT_SWITCH("Mic Capture Switch",
+			  WM8776_ADCMUX, 1 << 0, 0, 0),
+	WM8776_BIT_SWITCH("Aux Capture Switch",
+			  WM8776_ADCMUX, 1 << 1, 0, 0),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "ADC Filter Capture Enum",
+		.info = hpf_info,
+		.get = hpf_get,
+		.put = hpf_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Level Control Capture Enum",
+		.info = wm8776_level_control_info,
+		.get = wm8776_level_control_get,
+		.put = wm8776_level_control_put,
+		.private_value = 0,
+	},
+};
 static const struct snd_kcontrol_new lc_controls[] = {
 	WM8776_FIELD_CTL_VOLUME("Limiter Threshold",
 				WM8776_ALCCTRL1, 0, 11, 0, 15, 0xf,
@@ -1028,6 +1162,26 @@
 				LC_CONTROL_ALC, wm8776_ngth_db_scale),
 };
 
+static int add_lc_controls(struct oxygen *chip)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+	unsigned int i;
+	struct snd_kcontrol *ctl;
+	int err;
+
+	BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
+	for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
+		ctl = snd_ctl_new1(&lc_controls[i], chip);
+		if (!ctl)
+			return -ENOMEM;
+		err = snd_ctl_add(chip->card, ctl);
+		if (err < 0)
+			return err;
+		data->lc_controls[i] = ctl;
+	}
+	return 0;
+}
+
 static int xonar_ds_mixer_init(struct oxygen *chip)
 {
 	struct xonar_wm87x6 *data = chip->model_data;
@@ -1049,17 +1203,54 @@
 	}
 	if (!data->line_adcmux_control || !data->mic_adcmux_control)
 		return -ENXIO;
-	BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
-	for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
-		ctl = snd_ctl_new1(&lc_controls[i], chip);
+
+	return add_lc_controls(chip);
+}
+
+static int xonar_hdav_slim_mixer_init(struct oxygen *chip)
+{
+	unsigned int i;
+	struct snd_kcontrol *ctl;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(hdav_slim_controls); ++i) {
+		ctl = snd_ctl_new1(&hdav_slim_controls[i], chip);
 		if (!ctl)
 			return -ENOMEM;
 		err = snd_ctl_add(chip->card, ctl);
 		if (err < 0)
 			return err;
-		data->lc_controls[i] = ctl;
 	}
-	return 0;
+
+	return add_lc_controls(chip);
+}
+
+static void dump_wm8776_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nWM8776:\n00:");
+	for (i = 0; i < 0x10; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8776_regs[i]);
+	snd_iprintf(buffer, "\n10:");
+	for (i = 0x10; i < 0x17; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8776_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_wm87x6_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+	unsigned int i;
+
+	dump_wm8776_registers(chip, buffer);
+	snd_iprintf(buffer, "\nWM8766:\n00:");
+	for (i = 0; i < 0x10; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8766_regs[i]);
+	snd_iprintf(buffer, "\n");
 }
 
 static const struct oxygen_model model_xonar_ds = {
@@ -1072,22 +1263,57 @@
 	.suspend = xonar_ds_suspend,
 	.resume = xonar_ds_resume,
 	.pcm_hardware_filter = wm8776_adc_hardware_filter,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
 	.set_dac_params = set_wm87x6_dac_params,
 	.set_adc_params = set_wm8776_adc_params,
 	.update_dac_volume = update_wm87x6_volume,
 	.update_dac_mute = update_wm87x6_mute,
 	.update_center_lfe_mix = update_wm8766_center_lfe_mix,
 	.gpio_changed = xonar_ds_gpio_changed,
+	.dump_registers = dump_wm87x6_registers,
 	.dac_tlv = wm87x6_dac_db_scale,
 	.model_data_size = sizeof(struct xonar_wm87x6),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
 			 CAPTURE_0_FROM_I2S_1,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.function_flags = OXYGEN_FUNCTION_SPI,
+	.dac_mclks = OXYGEN_MCLKS(256, 256, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 256, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+};
+
+static const struct oxygen_model model_xonar_hdav_slim = {
+	.shortname = "Xonar HDAV1.3 Slim",
+	.longname = "Asus Virtuoso 200",
+	.chip = "AV200",
+	.init = xonar_hdav_slim_init,
+	.mixer_init = xonar_hdav_slim_mixer_init,
+	.cleanup = xonar_hdav_slim_cleanup,
+	.suspend = xonar_hdav_slim_suspend,
+	.resume = xonar_hdav_slim_resume,
+	.pcm_hardware_filter = xonar_hdav_slim_hardware_filter,
+	.set_dac_params = set_hdav_slim_dac_params,
+	.set_adc_params = set_wm8776_adc_params,
+	.update_dac_volume = update_wm8776_volume,
+	.update_dac_mute = update_wm8776_mute,
+	.uart_input = xonar_hdmi_uart_input,
+	.dump_registers = dump_wm8776_registers,
+	.dac_tlv = wm87x6_dac_db_scale,
+	.model_data_size = sizeof(struct xonar_wm87x6),
+	.device_config = PLAYBACK_0_TO_I2S |
+			 PLAYBACK_1_TO_SPDIF |
+			 CAPTURE_0_FROM_I2S_1,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 2,
+	.dac_volume_min = 255 - 2*60,
+	.dac_volume_max = 255,
+	.function_flags = OXYGEN_FUNCTION_2WIRE,
+	.dac_mclks = OXYGEN_MCLKS(256, 256, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 256, 128),
 	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
@@ -1099,6 +1325,9 @@
 	case 0x838e:
 		chip->model = model_xonar_ds;
 		break;
+	case 0x835e:
+		chip->model = model_xonar_hdav_slim;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 0b720cf..2d83324 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -60,6 +60,7 @@
 	        "{RME HDSP-9652},"
 		"{RME HDSP-9632}}");
 #ifdef HDSP_FW_LOADER
+MODULE_FIRMWARE("rpm_firmware.bin");
 MODULE_FIRMWARE("multiface_firmware.bin");
 MODULE_FIRMWARE("multiface_firmware_rev11.bin");
 MODULE_FIRMWARE("digiface_firmware.bin");
@@ -81,6 +82,7 @@
 #define H9632_SS_CHANNELS	 12
 #define H9632_DS_CHANNELS	 8
 #define H9632_QS_CHANNELS	 4
+#define RPM_CHANNELS             6
 
 /* Write registers. These are defined as byte-offsets from the iobase value.
  */
@@ -191,6 +193,25 @@
 #define HDSP_PhoneGain1		  (1<<30)
 #define HDSP_QuadSpeed	  	  (1<<31)
 
+/* RPM uses some of the registers for special purposes */
+#define HDSP_RPM_Inp12            0x04A00
+#define HDSP_RPM_Inp12_Phon_6dB   0x00800  /* Dolby */
+#define HDSP_RPM_Inp12_Phon_0dB   0x00000  /* .. */
+#define HDSP_RPM_Inp12_Phon_n6dB  0x04000  /* inp_0 */
+#define HDSP_RPM_Inp12_Line_0dB   0x04200  /* Dolby+PRO */
+#define HDSP_RPM_Inp12_Line_n6dB  0x00200  /* PRO */
+
+#define HDSP_RPM_Inp34            0x32000
+#define HDSP_RPM_Inp34_Phon_6dB   0x20000  /* SyncRef1 */
+#define HDSP_RPM_Inp34_Phon_0dB   0x00000  /* .. */
+#define HDSP_RPM_Inp34_Phon_n6dB  0x02000  /* SyncRef2 */
+#define HDSP_RPM_Inp34_Line_0dB   0x30000  /* SyncRef1+SyncRef0 */
+#define HDSP_RPM_Inp34_Line_n6dB  0x10000  /* SyncRef0 */
+
+#define HDSP_RPM_Bypass           0x01000
+
+#define HDSP_RPM_Disconnect       0x00001
+
 #define HDSP_ADGainMask       (HDSP_ADGain0|HDSP_ADGain1)
 #define HDSP_ADGainMinus10dBV  HDSP_ADGainMask
 #define HDSP_ADGainPlus4dBu   (HDSP_ADGain0)
@@ -450,7 +471,7 @@
 	u32                   creg_spdif;
 	u32                   creg_spdif_stream;
 	int                   clock_source_locked;
-	char                 *card_name;	     /* digiface/multiface */
+	char                 *card_name;	 /* digiface/multiface/rpm */
 	enum HDSP_IO_Type     io_type;               /* ditto, but for code use */
         unsigned short        firmware_rev;
 	unsigned short	      state;		     /* stores state bits */
@@ -612,6 +633,7 @@
 	switch (hdsp->io_type) {
 	case Multiface:
 	case Digiface:
+	case RPM:
 	default:
 		if (hdsp->firmware_rev == 0xa)
 			return (64 * out) + (32 + (in));
@@ -629,6 +651,7 @@
 	switch (hdsp->io_type) {
 	case Multiface:
 	case Digiface:
+	case RPM:
 	default:
 		if (hdsp->firmware_rev == 0xa)
 			return (64 * out) + in;
@@ -655,7 +678,7 @@
 {
 	if (hdsp->io_type == H9652 || hdsp->io_type == H9632) return 0;
 	if (hdsp_read (hdsp, HDSP_statusRegister) & HDSP_ConfigError) {
-		snd_printk ("Hammerfall-DSP: no Digiface or Multiface connected!\n");
+		snd_printk("Hammerfall-DSP: no IO box connected!\n");
 		hdsp->state &= ~HDSP_FirmwareLoaded;
 		return -EIO;
 	}
@@ -680,7 +703,7 @@
 		}
 	}
 
-	snd_printk("Hammerfall-DSP: no Digiface or Multiface connected!\n");
+	snd_printk("Hammerfall-DSP: no IO box connected!\n");
 	hdsp->state &= ~HDSP_FirmwareLoaded;
 	return -EIO;
 }
@@ -752,17 +775,21 @@
 		hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD);
 		hdsp_write (hdsp, HDSP_fifoData, 0);
 
-		if (hdsp_fifo_wait (hdsp, 0, HDSP_SHORT_WAIT)) {
-			hdsp->io_type = Multiface;
-			hdsp_write (hdsp, HDSP_control2Reg, HDSP_VERSION_BIT);
-			hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD);
-			hdsp_fifo_wait (hdsp, 0, HDSP_SHORT_WAIT);
+		if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT)) {
+			hdsp_write(hdsp, HDSP_control2Reg, HDSP_VERSION_BIT);
+			hdsp_write(hdsp, HDSP_control2Reg, HDSP_S_LOAD);
+			if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT))
+				hdsp->io_type = RPM;
+			else
+				hdsp->io_type = Multiface;
 		} else {
 			hdsp->io_type = Digiface;
 		}
 	} else {
 		/* firmware was already loaded, get iobox type */
-		if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
+		if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2)
+			hdsp->io_type = RPM;
+		else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
 			hdsp->io_type = Multiface;
 		else
 			hdsp->io_type = Digiface;
@@ -1184,6 +1211,7 @@
 			hdsp->channel_map = channel_map_ds;
 	} else {
 		switch (hdsp->io_type) {
+		case RPM:
 		case Multiface:
 			hdsp->channel_map = channel_map_mf_ss;
 			break;
@@ -3231,6 +3259,318 @@
 HDSP_USE_MIDI_TASKLET("Use Midi Tasklet", 0),
 };
 
+
+static int hdsp_rpm_input12(struct hdsp *hdsp)
+{
+	switch (hdsp->control_register & HDSP_RPM_Inp12) {
+	case HDSP_RPM_Inp12_Phon_6dB:
+		return 0;
+	case HDSP_RPM_Inp12_Phon_n6dB:
+		return 2;
+	case HDSP_RPM_Inp12_Line_0dB:
+		return 3;
+	case HDSP_RPM_Inp12_Line_n6dB:
+		return 4;
+	}
+	return 1;
+}
+
+
+static int snd_hdsp_get_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.enumerated.item[0] = hdsp_rpm_input12(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_input12(struct hdsp *hdsp, int mode)
+{
+	hdsp->control_register &= ~HDSP_RPM_Inp12;
+	switch (mode) {
+	case 0:
+		hdsp->control_register |= HDSP_RPM_Inp12_Phon_6dB;
+		break;
+	case 1:
+		break;
+	case 2:
+		hdsp->control_register |= HDSP_RPM_Inp12_Phon_n6dB;
+		break;
+	case 3:
+		hdsp->control_register |= HDSP_RPM_Inp12_Line_0dB;
+		break;
+	case 4:
+		hdsp->control_register |= HDSP_RPM_Inp12_Line_n6dB;
+		break;
+	default:
+		return -1;
+	}
+
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.enumerated.item[0];
+	if (val < 0)
+		val = 0;
+	if (val > 4)
+		val = 4;
+	spin_lock_irq(&hdsp->lock);
+	if (val != hdsp_rpm_input12(hdsp))
+		change = (hdsp_set_rpm_input12(hdsp, val) == 0) ? 1 : 0;
+	else
+		change = 0;
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+
+static int snd_hdsp_info_rpm_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	static char *texts[] = {"Phono +6dB", "Phono 0dB", "Phono -6dB", "Line 0dB", "Line -6dB"};
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = 5;
+	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+
+static int hdsp_rpm_input34(struct hdsp *hdsp)
+{
+	switch (hdsp->control_register & HDSP_RPM_Inp34) {
+	case HDSP_RPM_Inp34_Phon_6dB:
+		return 0;
+	case HDSP_RPM_Inp34_Phon_n6dB:
+		return 2;
+	case HDSP_RPM_Inp34_Line_0dB:
+		return 3;
+	case HDSP_RPM_Inp34_Line_n6dB:
+		return 4;
+	}
+	return 1;
+}
+
+
+static int snd_hdsp_get_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.enumerated.item[0] = hdsp_rpm_input34(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_input34(struct hdsp *hdsp, int mode)
+{
+	hdsp->control_register &= ~HDSP_RPM_Inp34;
+	switch (mode) {
+	case 0:
+		hdsp->control_register |= HDSP_RPM_Inp34_Phon_6dB;
+		break;
+	case 1:
+		break;
+	case 2:
+		hdsp->control_register |= HDSP_RPM_Inp34_Phon_n6dB;
+		break;
+	case 3:
+		hdsp->control_register |= HDSP_RPM_Inp34_Line_0dB;
+		break;
+	case 4:
+		hdsp->control_register |= HDSP_RPM_Inp34_Line_n6dB;
+		break;
+	default:
+		return -1;
+	}
+
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.enumerated.item[0];
+	if (val < 0)
+		val = 0;
+	if (val > 4)
+		val = 4;
+	spin_lock_irq(&hdsp->lock);
+	if (val != hdsp_rpm_input34(hdsp))
+		change = (hdsp_set_rpm_input34(hdsp, val) == 0) ? 1 : 0;
+	else
+		change = 0;
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+
+/* RPM Bypass switch */
+static int hdsp_rpm_bypass(struct hdsp *hdsp)
+{
+	return (hdsp->control_register & HDSP_RPM_Bypass) ? 1 : 0;
+}
+
+
+static int snd_hdsp_get_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.integer.value[0] = hdsp_rpm_bypass(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_bypass(struct hdsp *hdsp, int on)
+{
+	if (on)
+		hdsp->control_register |= HDSP_RPM_Bypass;
+	else
+		hdsp->control_register &= ~HDSP_RPM_Bypass;
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	unsigned int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.integer.value[0] & 1;
+	spin_lock_irq(&hdsp->lock);
+	change = (int)val != hdsp_rpm_bypass(hdsp);
+	hdsp_set_rpm_bypass(hdsp, val);
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+
+static int snd_hdsp_info_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	static char *texts[] = {"On", "Off"};
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = 2;
+	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+
+/* RPM Disconnect switch */
+static int hdsp_rpm_disconnect(struct hdsp *hdsp)
+{
+	return (hdsp->control_register & HDSP_RPM_Disconnect) ? 1 : 0;
+}
+
+
+static int snd_hdsp_get_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.integer.value[0] = hdsp_rpm_disconnect(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_disconnect(struct hdsp *hdsp, int on)
+{
+	if (on)
+		hdsp->control_register |= HDSP_RPM_Disconnect;
+	else
+		hdsp->control_register &= ~HDSP_RPM_Disconnect;
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	unsigned int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.integer.value[0] & 1;
+	spin_lock_irq(&hdsp->lock);
+	change = (int)val != hdsp_rpm_disconnect(hdsp);
+	hdsp_set_rpm_disconnect(hdsp, val);
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+static int snd_hdsp_info_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	static char *texts[] = {"On", "Off"};
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = 2;
+	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+static struct snd_kcontrol_new snd_hdsp_rpm_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "RPM Bypass",
+		.get = snd_hdsp_get_rpm_bypass,
+		.put = snd_hdsp_put_rpm_bypass,
+		.info = snd_hdsp_info_rpm_bypass
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "RPM Disconnect",
+		.get = snd_hdsp_get_rpm_disconnect,
+		.put = snd_hdsp_put_rpm_disconnect,
+		.info = snd_hdsp_info_rpm_disconnect
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Input 1/2",
+		.get = snd_hdsp_get_rpm_input12,
+		.put = snd_hdsp_put_rpm_input12,
+		.info = snd_hdsp_info_rpm_input
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Input 3/4",
+		.get = snd_hdsp_get_rpm_input34,
+		.put = snd_hdsp_put_rpm_input34,
+		.info = snd_hdsp_info_rpm_input
+	},
+	HDSP_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
+	HDSP_MIXER("Mixer", 0)
+};
+
 static struct snd_kcontrol_new snd_hdsp_96xx_aeb = HDSP_AEB("Analog Extension Board", 0);
 static struct snd_kcontrol_new snd_hdsp_adat_sync_check = HDSP_ADAT_SYNC_CHECK;
 
@@ -3240,6 +3580,16 @@
 	int err;
 	struct snd_kcontrol *kctl;
 
+	if (hdsp->io_type == RPM) {
+		/* RPM Bypass, Disconnect and Input switches */
+		for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_rpm_controls); idx++) {
+			err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_rpm_controls[idx], hdsp));
+			if (err < 0)
+				return err;
+		}
+		return 0;
+	}
+
 	for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_controls); idx++) {
 		if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_controls[idx], hdsp))) < 0)
 			return err;
@@ -3459,48 +3809,102 @@
 
 	snd_iprintf(buffer, "\n");
 
-	switch (hdsp_spdif_in(hdsp)) {
-	case HDSP_SPDIFIN_OPTICAL:
-		snd_iprintf(buffer, "IEC958 input: Optical\n");
-		break;
-	case HDSP_SPDIFIN_COAXIAL:
-		snd_iprintf(buffer, "IEC958 input: Coaxial\n");
-		break;
-	case HDSP_SPDIFIN_INTERNAL:
-		snd_iprintf(buffer, "IEC958 input: Internal\n");
-		break;
-	case HDSP_SPDIFIN_AES:
-		snd_iprintf(buffer, "IEC958 input: AES\n");
-		break;
-	default:
-		snd_iprintf(buffer, "IEC958 input: ???\n");
-		break;
+	if (hdsp->io_type != RPM) {
+		switch (hdsp_spdif_in(hdsp)) {
+		case HDSP_SPDIFIN_OPTICAL:
+			snd_iprintf(buffer, "IEC958 input: Optical\n");
+			break;
+		case HDSP_SPDIFIN_COAXIAL:
+			snd_iprintf(buffer, "IEC958 input: Coaxial\n");
+			break;
+		case HDSP_SPDIFIN_INTERNAL:
+			snd_iprintf(buffer, "IEC958 input: Internal\n");
+			break;
+		case HDSP_SPDIFIN_AES:
+			snd_iprintf(buffer, "IEC958 input: AES\n");
+			break;
+		default:
+			snd_iprintf(buffer, "IEC958 input: ???\n");
+			break;
+		}
 	}
 
-	if (hdsp->control_register & HDSP_SPDIFOpticalOut)
-		snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n");
-	else
-		snd_iprintf(buffer, "IEC958 output: Coaxial only\n");
+	if (RPM == hdsp->io_type) {
+		if (hdsp->control_register & HDSP_RPM_Bypass)
+			snd_iprintf(buffer, "RPM Bypass: disabled\n");
+		else
+			snd_iprintf(buffer, "RPM Bypass: enabled\n");
+		if (hdsp->control_register & HDSP_RPM_Disconnect)
+			snd_iprintf(buffer, "RPM disconnected\n");
+		else
+			snd_iprintf(buffer, "RPM connected\n");
 
-	if (hdsp->control_register & HDSP_SPDIFProfessional)
-		snd_iprintf(buffer, "IEC958 quality: Professional\n");
-	else
-		snd_iprintf(buffer, "IEC958 quality: Consumer\n");
+		switch (hdsp->control_register & HDSP_RPM_Inp12) {
+		case HDSP_RPM_Inp12_Phon_6dB:
+			snd_iprintf(buffer, "Input 1/2: Phono, 6dB\n");
+			break;
+		case HDSP_RPM_Inp12_Phon_0dB:
+			snd_iprintf(buffer, "Input 1/2: Phono, 0dB\n");
+			break;
+		case HDSP_RPM_Inp12_Phon_n6dB:
+			snd_iprintf(buffer, "Input 1/2: Phono, -6dB\n");
+			break;
+		case HDSP_RPM_Inp12_Line_0dB:
+			snd_iprintf(buffer, "Input 1/2: Line, 0dB\n");
+			break;
+		case HDSP_RPM_Inp12_Line_n6dB:
+			snd_iprintf(buffer, "Input 1/2: Line, -6dB\n");
+			break;
+		default:
+			snd_iprintf(buffer, "Input 1/2: ???\n");
+		}
 
-	if (hdsp->control_register & HDSP_SPDIFEmphasis)
-		snd_iprintf(buffer, "IEC958 emphasis: on\n");
-	else
-		snd_iprintf(buffer, "IEC958 emphasis: off\n");
+		switch (hdsp->control_register & HDSP_RPM_Inp34) {
+		case HDSP_RPM_Inp34_Phon_6dB:
+			snd_iprintf(buffer, "Input 3/4: Phono, 6dB\n");
+			break;
+		case HDSP_RPM_Inp34_Phon_0dB:
+			snd_iprintf(buffer, "Input 3/4: Phono, 0dB\n");
+			break;
+		case HDSP_RPM_Inp34_Phon_n6dB:
+			snd_iprintf(buffer, "Input 3/4: Phono, -6dB\n");
+			break;
+		case HDSP_RPM_Inp34_Line_0dB:
+			snd_iprintf(buffer, "Input 3/4: Line, 0dB\n");
+			break;
+		case HDSP_RPM_Inp34_Line_n6dB:
+			snd_iprintf(buffer, "Input 3/4: Line, -6dB\n");
+			break;
+		default:
+			snd_iprintf(buffer, "Input 3/4: ???\n");
+		}
 
-	if (hdsp->control_register & HDSP_SPDIFNonAudio)
-		snd_iprintf(buffer, "IEC958 NonAudio: on\n");
-	else
-		snd_iprintf(buffer, "IEC958 NonAudio: off\n");
-	if ((x = hdsp_spdif_sample_rate (hdsp)) != 0)
-		snd_iprintf (buffer, "IEC958 sample rate: %d\n", x);
-	else
-		snd_iprintf (buffer, "IEC958 sample rate: Error flag set\n");
+	} else {
+		if (hdsp->control_register & HDSP_SPDIFOpticalOut)
+			snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n");
+		else
+			snd_iprintf(buffer, "IEC958 output: Coaxial only\n");
 
+		if (hdsp->control_register & HDSP_SPDIFProfessional)
+			snd_iprintf(buffer, "IEC958 quality: Professional\n");
+		else
+			snd_iprintf(buffer, "IEC958 quality: Consumer\n");
+
+		if (hdsp->control_register & HDSP_SPDIFEmphasis)
+			snd_iprintf(buffer, "IEC958 emphasis: on\n");
+		else
+			snd_iprintf(buffer, "IEC958 emphasis: off\n");
+
+		if (hdsp->control_register & HDSP_SPDIFNonAudio)
+			snd_iprintf(buffer, "IEC958 NonAudio: on\n");
+		else
+			snd_iprintf(buffer, "IEC958 NonAudio: off\n");
+		x = hdsp_spdif_sample_rate(hdsp);
+		if (x != 0)
+			snd_iprintf(buffer, "IEC958 sample rate: %d\n", x);
+		else
+			snd_iprintf(buffer, "IEC958 sample rate: Error flag set\n");
+	}
 	snd_iprintf(buffer, "\n");
 
 	/* Sync Check */
@@ -3765,7 +4169,7 @@
 			snd_hdsp_midi_input_read (&hdsp->midi[0]);
 		}
 	}
-	if (hdsp->io_type != Multiface && hdsp->io_type != H9632 && midi1 && midi1status) {
+	if (hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632 && midi1 && midi1status) {
 		if (hdsp->use_midi_tasklet) {
 			/* we disable interrupts for this input until processing is done */
 			hdsp->control_register &= ~HDSP_Midi1InterruptEnable;
@@ -4093,7 +4497,7 @@
 				 SNDRV_PCM_RATE_96000),
 	.rate_min =		32000,
 	.rate_max =		96000,
-	.channels_min =		14,
+	.channels_min =		6,
 	.channels_max =		HDSP_MAX_CHANNELS,
 	.buffer_bytes_max =	HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS,
 	.period_bytes_min =	(64 * 4) * 10,
@@ -4122,7 +4526,7 @@
 				 SNDRV_PCM_RATE_96000),
 	.rate_min =		32000,
 	.rate_max =		96000,
-	.channels_min =		14,
+	.channels_min =		5,
 	.channels_max =		HDSP_MAX_CHANNELS,
 	.buffer_bytes_max =	HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS,
 	.period_bytes_min =	(64 * 4) * 10,
@@ -4357,10 +4761,12 @@
 			     snd_hdsp_hw_rule_rate_out_channels, hdsp,
 			     SNDRV_PCM_HW_PARAM_CHANNELS, -1);
 
-	hdsp->creg_spdif_stream = hdsp->creg_spdif;
-	hdsp->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
-	snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
-		       SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	if (RPM != hdsp->io_type) {
+		hdsp->creg_spdif_stream = hdsp->creg_spdif;
+		hdsp->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+		snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
+			SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	}
 	return 0;
 }
 
@@ -4375,9 +4781,11 @@
 
 	spin_unlock_irq(&hdsp->lock);
 
-	hdsp->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
-	snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
-		       SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	if (RPM != hdsp->io_type) {
+		hdsp->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+		snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
+			SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	}
 	return 0;
 }
 
@@ -4616,7 +5024,7 @@
 		if (hdsp->io_type != H9632)
 		    info.adatsync_sync_check = (unsigned char)hdsp_adatsync_sync_check(hdsp);
 		info.spdif_sync_check = (unsigned char)hdsp_spdif_sync_check(hdsp);
-		for (i = 0; i < ((hdsp->io_type != Multiface && hdsp->io_type != H9632) ? 3 : 1); ++i)
+		for (i = 0; i < ((hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632) ? 3 : 1); ++i)
 			info.adat_sync_check[i] = (unsigned char)hdsp_adat_sync_check(hdsp, i);
 		info.spdif_in = (unsigned char)hdsp_spdif_in(hdsp);
 		info.spdif_out = (unsigned char)hdsp_spdif_out(hdsp);
@@ -4636,6 +5044,9 @@
 			info.phone_gain = (unsigned char)hdsp_phone_gain(hdsp);
 			info.xlr_breakout_cable = (unsigned char)hdsp_xlr_breakout_cable(hdsp);
 
+		} else if (hdsp->io_type == RPM) {
+			info.da_gain = (unsigned char) hdsp_rpm_input12(hdsp);
+			info.ad_gain = (unsigned char) hdsp_rpm_input34(hdsp);
 		}
 		if (hdsp->io_type == H9632 || hdsp->io_type == H9652)
 			info.analog_extension_board = (unsigned char)hdsp_aeb(hdsp);
@@ -4844,6 +5255,14 @@
 		hdsp->ds_in_channels = hdsp->ds_out_channels = MULTIFACE_DS_CHANNELS;
 		break;
 
+	case RPM:
+		hdsp->card_name = "RME Hammerfall DSP + RPM";
+		hdsp->ss_in_channels = RPM_CHANNELS-1;
+		hdsp->ss_out_channels = RPM_CHANNELS;
+		hdsp->ds_in_channels = RPM_CHANNELS-1;
+		hdsp->ds_out_channels = RPM_CHANNELS;
+		break;
+
 	default:
  		/* should never get here */
 		break;
@@ -4930,6 +5349,9 @@
 
 	/* caution: max length of firmware filename is 30! */
 	switch (hdsp->io_type) {
+	case RPM:
+		fwfile = "rpm_firmware.bin";
+		break;
 	case Multiface:
 		if (hdsp->firmware_rev == 0xa)
 			fwfile = "multiface_firmware.bin";
@@ -5100,7 +5522,9 @@
 			return 0;
 		} else {
 			snd_printk(KERN_INFO "Hammerfall-DSP: Firmware already present, initializing card.\n");
-			if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
+			if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2)
+				hdsp->io_type = RPM;
+			else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
 				hdsp->io_type = Multiface;
 			else
 				hdsp->io_type = Digiface;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 0c98ef9..f5eadfc 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -487,7 +487,7 @@
 	struct snd_kcontrol *playback_mixer_ctls[HDSPM_MAX_CHANNELS];
 	/* but input to much, so not used */
 	struct snd_kcontrol *input_mixer_ctls[HDSPM_MAX_CHANNELS];
-	/* full mixer accessable over mixer ioctl or hwdep-device */
+	/* full mixer accessible over mixer ioctl or hwdep-device */
 	struct hdspm_mixer *mixer;
 
 };
@@ -550,7 +550,7 @@
 	return bit2freq_tab[n];
 }
 
-/* Write/read to/from HDSPM with Adresses in Bytes
+/* Write/read to/from HDSPM with Addresses in Bytes
    not words but only 32Bit writes are allowed */
 
 static inline void hdspm_write(struct hdspm * hdspm, unsigned int reg,
@@ -2908,7 +2908,7 @@
 
 	/* Channel playback mixer as default control 
 	   Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders,
-	   thats too * big for any alsamixer they are accesible via special
+	   thats too * big for any alsamixer they are accessible via special
 	   IOCTL on hwdep and the mixer 2dimensional mixer control
 	*/
 
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 5518371..c94c051 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -1389,15 +1389,9 @@
 
 static int snd_ymfpci_drec_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info)
 {
-	static char *texts[3] = {"AC'97", "IEC958", "ZV Port"};
+	static const char *const texts[3] = {"AC'97", "IEC958", "ZV Port"};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item > 2)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, texts[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, texts);
 }
 
 static int snd_ymfpci_drec_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value)
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.h b/sound/pcmcia/pdaudiocf/pdaudiocf.h
index bd26e09..6ce9ad7 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.h
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.h
@@ -22,7 +22,7 @@
 #define __PDAUDIOCF_H
 
 #include <sound/pcm.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/interrupt.h>
 #include <pcmcia/cistpl.h>
 #include <pcmcia/ds.h>
diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
index 989e04a..fe33e12 100644
--- a/sound/pcmcia/vx/vxp_ops.c
+++ b/sound/pcmcia/vx/vxp_ops.c
@@ -23,8 +23,8 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/firmware.h>
+#include <linux/io.h>
 #include <sound/core.h>
-#include <asm/io.h>
 #include "vxpocket.h"
 
 
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 581a670..edce8a2 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -51,7 +51,7 @@
 static int snd_ps3_start_delay = CONFIG_SND_PS3_DEFAULT_START_DELAY;
 
 module_param_named(start_delay, snd_ps3_start_delay, uint, 0644);
-MODULE_PARM_DESC(start_delay, "time to insert silent data in milisec");
+MODULE_PARM_DESC(start_delay, "time to insert silent data in ms");
 
 static int index = SNDRV_DEFAULT_IDX1;
 static char *id = SNDRV_DEFAULT_STR1;
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 3e598e7..a3efc52 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -20,6 +20,21 @@
 
 if SND_SOC
 
+config SND_SOC_CACHE_LZO
+	bool "Support LZO compression for register caches"
+	select LZO_COMPRESS
+	select LZO_DECOMPRESS
+	---help---
+	   Select this to enable LZO compression for register caches.
+	   This will allow machine or CODEC drivers to compress register
+	   caches in memory, reducing the memory consumption at the
+	   expense of performance.  If this is not present and is used
+	   the system will fall back to uncompressed caches.
+
+	   Usually it is safe to disable this option, where cache
+	   compression in used the rbtree option will typically perform
+	   better.
+
 config SND_SOC_AC97_BUS
 	bool
 
@@ -36,7 +51,7 @@
 source "sound/soc/omap/Kconfig"
 source "sound/soc/kirkwood/Kconfig"
 source "sound/soc/pxa/Kconfig"
-source "sound/soc/s3c24xx/Kconfig"
+source "sound/soc/samsung/Kconfig"
 source "sound/soc/s6000/Kconfig"
 source "sound/soc/sh/Kconfig"
 source "sound/soc/txx9/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index eb18344..ce913bf 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -14,7 +14,7 @@
 obj-$(CONFIG_SND_SOC)	+= omap/
 obj-$(CONFIG_SND_SOC)	+= kirkwood/
 obj-$(CONFIG_SND_SOC)	+= pxa/
-obj-$(CONFIG_SND_SOC)	+= s3c24xx/
+obj-$(CONFIG_SND_SOC)	+= samsung/
 obj-$(CONFIG_SND_SOC)	+= s6000/
 obj-$(CONFIG_SND_SOC)	+= sh/
 obj-$(CONFIG_SND_SOC)	+= txx9/
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
index 5f4e59f..1aac2f4 100644
--- a/sound/soc/atmel/playpaq_wm8510.c
+++ b/sound/soc/atmel/playpaq_wm8510.c
@@ -33,7 +33,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/at32ap700x.h>
 #include <mach/portmux.h>
@@ -318,27 +317,28 @@
 static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i;
 
 	/*
 	 * Add DAPM widgets
 	 */
 	for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
-		snd_soc_dapm_new_control(codec, &playpaq_dapm_widgets[i]);
+		snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
 
 
 
 	/*
 	 * Setup audio path interconnects
 	 */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 
 
 	/* always connected pins */
-	snd_soc_dapm_enable_pin(codec, "Int Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_enable_pin(dapm, "Int Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_sync(dapm);
 
 
 
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index e521ada..af3c730 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -44,7 +44,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -140,6 +139,7 @@
 {
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	printk(KERN_DEBUG
@@ -154,25 +154,25 @@
 	}
 
 	/* Add specific widgets */
-	snd_soc_dapm_new_controls(codec, at91sam9g20ek_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, at91sam9g20ek_dapm_widgets,
 				  ARRAY_SIZE(at91sam9g20ek_dapm_widgets));
 	/* Set up specific audio path interconnects */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	/* not connected */
-	snd_soc_dapm_nc_pin(codec, "RLINEIN");
-	snd_soc_dapm_nc_pin(codec, "LLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "RLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "LLINEIN");
 
 #ifdef ENABLE_MIC_INPUT
-	snd_soc_dapm_enable_pin(codec, "Int Mic");
+	snd_soc_dapm_enable_pin(dapm, "Int Mic");
 #else
-	snd_soc_dapm_nc_pin(codec, "Int Mic");
+	snd_soc_dapm_nc_pin(dapm, "Int Mic");
 #endif
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/atmel/snd-soc-afeb9260.c b/sound/soc/atmel/snd-soc-afeb9260.c
index 86e0f85..5e4d499 100644
--- a/sound/soc/atmel/snd-soc-afeb9260.c
+++ b/sound/soc/atmel/snd-soc-afeb9260.c
@@ -30,7 +30,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -105,19 +104,20 @@
 static int afeb9260_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add afeb9260 specific widgets */
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
 	/* Set up afeb9260 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -129,7 +129,7 @@
 	.cpu_dai_name = "atmel-ssc-dai.0",
 	.codec_dai_name = "tlv320aic23-hifi",
 	.platform_name = "atmel_pcm-audio",
-	.codec_name = "tlv320aic23-codec.0-0x1a",
+	.codec_name = "tlv320aic23-codec.0-001a",
 	.init = afeb9260_tlv320aic23_init,
 	.ops = &afeb9260_ops,
 };
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index b62fcd3..cb99f04 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -13,7 +13,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/au1xxx_psc.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 3abeedd..ae40359 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -1,6 +1,7 @@
 config SND_BF5XX_I2S
 	tristate "SoC I2S Audio for the ADI BF5xx chip"
 	depends on BLACKFIN
+	select SND_BF5XX_SOC_SPORT
 	help
 	  Say Y or M if you want to add support for codecs attached to
 	  the Blackfin SPORT (synchronous serial ports) interface in I2S
@@ -35,6 +36,7 @@
 config SND_BF5XX_TDM
 	tristate "SoC I2S(TDM mode) Audio for the ADI BF5xx chip"
 	depends on (BLACKFIN && SND_SOC)
+	select SND_BF5XX_SOC_SPORT
 	help
 	  Say Y or M if you want to add support for codecs attached to
 	  the Blackfin SPORT (synchronous serial ports) interface in TDM
@@ -61,6 +63,10 @@
 config SND_BF5XX_AC97
 	tristate "SoC AC97 Audio for the ADI BF5xx chip"
 	depends on BLACKFIN
+	select AC97_BUS
+	select SND_SOC_AC97_BUS
+	select SND_BF5XX_SOC_SPORT
+	select SND_BF5XX_SOC_AC97
 	help
 	  Say Y or M if you want to add support for codecs attached to
 	  the Blackfin SPORT (synchronous serial ports) interface in slot 16
@@ -122,17 +128,12 @@
 
 config SND_BF5XX_SOC_I2S
 	tristate
-	select SND_BF5XX_SOC_SPORT
 
 config SND_BF5XX_SOC_TDM
 	tristate
-	select SND_BF5XX_SOC_SPORT
 
 config SND_BF5XX_SOC_AC97
 	tristate
-	select AC97_BUS
-	select SND_SOC_AC97_BUS
-	select SND_BF5XX_SOC_SPORT
 
 config SND_BF5XX_SPORT_NUM
 	int "Set a SPORT for Sound chip"
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index c5f856ec..ffbac26 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -260,9 +260,9 @@
 	pr_debug("%s : sport %d\n", __func__, dai->id);
 	if (!dai->active)
 		return 0;
-	if (dai->capture.active)
+	if (dai->capture_active)
 		sport_rx_stop(sport);
-	if (dai->playback.active)
+	if (dai->playback_active)
 		sport_tx_stop(sport);
 	return 0;
 }
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
index 2394bff..83012da 100644
--- a/sound/soc/blackfin/bf5xx-ad1836.c
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -20,7 +20,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/blackfin.h>
diff --git a/sound/soc/blackfin/bf5xx-ad193x.c b/sound/soc/blackfin/bf5xx-ad193x.c
index e4a6253..d3ccb92 100644
--- a/sound/soc/blackfin/bf5xx-ad193x.c
+++ b/sound/soc/blackfin/bf5xx-ad193x.c
@@ -29,7 +29,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/blackfin.h>
diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c
index 900ced5..732fb8b 100644
--- a/sound/soc/blackfin/bf5xx-ad73311.c
+++ b/sound/soc/blackfin/bf5xx-ad73311.c
@@ -35,7 +35,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/blackfin.h>
diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c
index 36f2769..ad28663 100644
--- a/sound/soc/blackfin/bf5xx-ssm2602.c
+++ b/sound/soc/blackfin/bf5xx-ssm2602.c
@@ -33,7 +33,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/dma.h>
@@ -120,7 +119,7 @@
 	.cpu_dai_name = "bf5xx-i2s",
 	.codec_dai_name = "ssm2602-hifi",
 	.platform_name = "bf5xx-pcm-audio",
-	.codec_name = "ssm2602-codec.0-0x1b",
+	.codec_name = "ssm2602-codec.0-001b",
 	.ops = &bf5xx_ssm2602_ops,
 };
 
diff --git a/sound/soc/blackfin/bf5xx-tdm.c b/sound/soc/blackfin/bf5xx-tdm.c
index 1251239..5515ac9 100644
--- a/sound/soc/blackfin/bf5xx-tdm.c
+++ b/sound/soc/blackfin/bf5xx-tdm.c
@@ -210,7 +210,7 @@
 #ifdef CONFIG_PM
 static int bf5xx_tdm_suspend(struct snd_soc_dai *dai)
 {
-	struct sport_device *sport = dai->private_data;
+	struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
 
 	if (!dai->active)
 		return 0;
@@ -235,13 +235,13 @@
 		ret = -EBUSY;
 	}
 
-	ret = sport_config_rx(sport, IRFS, 0x1F, 0, 0);
+	ret = sport_config_rx(sport, 0, 0x1F, 0, 0);
 	if (ret) {
 		pr_err("SPORT is busy!\n");
 		ret = -EBUSY;
 	}
 
-	ret = sport_config_tx(sport, ITFS, 0x1F, 0, 0);
+	ret = sport_config_tx(sport, 0, 0x1F, 0, 0);
 	if (ret) {
 		pr_err("SPORT is busy!\n");
 		ret = -EBUSY;
@@ -303,14 +303,14 @@
 		goto sport_config_err;
 	}
 
-	ret = sport_config_rx(sport_handle, IRFS, 0x1F, 0, 0);
+	ret = sport_config_rx(sport_handle, 0, 0x1F, 0, 0);
 	if (ret) {
 		pr_err("SPORT is busy!\n");
 		ret = -EBUSY;
 		goto sport_config_err;
 	}
 
-	ret = sport_config_tx(sport_handle, ITFS, 0x1F, 0, 0);
+	ret = sport_config_tx(sport_handle, 0, 0x1F, 0, 0);
 	if (ret) {
 		pr_err("SPORT is busy!\n");
 		ret = -EBUSY;
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 01d19e9..06b6981 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -19,10 +19,10 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 #include <sound/jack.h>
+#include <trace/events/asoc.h>
 
 #include "88pm860x-codec.h"
 
@@ -146,7 +146,6 @@
 
 	int			irq[4];
 	unsigned char		name[4][MAX_NAME_LEN];
-	unsigned char		reg_cache[REG_CACHE_SIZE];
 };
 
 /* -9450dB to 0dB in 150dB steps ( mute instead of -9450dB) */
@@ -1172,7 +1171,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Enable Audio PLL & Audio section */
 			data = AUDIO_PLL | AUDIO_SECTION_RESET
 				| AUDIO_SECTION_ON;
@@ -1185,7 +1184,7 @@
 		pm860x_set_bits(codec->control_data, REG_MISC2, data, 0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -1263,6 +1262,12 @@
 	mask = pm860x->det.hs_shrt | pm860x->det.hook_det | pm860x->det.lo_shrt
 		| pm860x->det.hp_det;
 
+#ifndef CONFIG_SND_SOC_88PM860X_MODULE
+	if (status & (HEADSET_STATUS | MIC_STATUS | SHORT_HS1 | SHORT_HS2 |
+		      SHORT_LO1 | SHORT_LO2))
+		trace_snd_soc_jack_irq(dev_name(pm860x->codec->dev));
+#endif
+
 	if ((pm860x->det.hp_det & SND_JACK_HEADPHONE)
 		&& (status & HEADSET_STATUS))
 		report |= SND_JACK_HEADPHONE;
@@ -1346,6 +1351,7 @@
 static int pm860x_probe(struct snd_soc_codec *codec)
 {
 	struct pm860x_priv *pm860x = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i, ret;
 
 	pm860x->codec = codec;
@@ -1358,7 +1364,7 @@
 					   pm860x->name[i], pm860x);
 		if (ret < 0) {
 			dev_err(codec->dev, "Failed to request IRQ!\n");
-			goto out_irq;
+			goto out;
 		}
 	}
 
@@ -1369,22 +1375,20 @@
 	if (ret < 0) {
 		dev_err(codec->dev, "Failed to fill register cache: %d\n",
 			ret);
-		goto out_codec;
+		goto out;
 	}
 
 	snd_soc_add_controls(codec, pm860x_snd_controls,
 			     ARRAY_SIZE(pm860x_snd_controls));
-	snd_soc_dapm_new_controls(codec, pm860x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, pm860x_dapm_widgets,
 				  ARRAY_SIZE(pm860x_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	return 0;
 
-out_codec:
-	i = 3;
-out_irq:
-	for (; i >= 0; i--)
+out:
+	while (--i >= 0)
 		free_irq(pm860x->irq[i], pm860x);
-	return -EINVAL;
+	return ret;
 }
 
 static int pm860x_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 3b5690d..c48b23c 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -22,6 +22,7 @@
 	select SND_SOC_AK4535 if I2C
 	select SND_SOC_AK4642 if I2C
 	select SND_SOC_AK4671 if I2C
+	select SND_SOC_ALC5623 if I2C
 	select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
 	select SND_SOC_CS42L51 if I2C
 	select SND_SOC_CS4270 if I2C
@@ -43,7 +44,7 @@
 	select SND_SOC_TWL6040 if TWL4030_CORE
 	select SND_SOC_UDA134X
 	select SND_SOC_UDA1380 if I2C
-	select SND_SOC_WL1273 if WL1273_CORE
+	select SND_SOC_WL1273 if RADIO_WL1273
 	select SND_SOC_WM2000 if I2C
 	select SND_SOC_WM8350 if MFD_WM8350
 	select SND_SOC_WM8400 if MFD_WM8400
@@ -54,9 +55,11 @@
 	select SND_SOC_WM8727
 	select SND_SOC_WM8728 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8731 if SND_SOC_I2C_AND_SPI
+	select SND_SOC_WM8737 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8741 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8750 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI
+	select SND_SOC_WM8770 if SPI_MASTER
 	select SND_SOC_WM8776 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8804 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8900 if I2C
@@ -75,6 +78,7 @@
 	select SND_SOC_WM8990 if I2C
 	select SND_SOC_WM8993 if I2C
 	select SND_SOC_WM8994 if MFD_WM8994
+	select SND_SOC_WM8995 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM9081 if I2C
 	select SND_SOC_WM9090 if I2C
 	select SND_SOC_WM9705 if SND_SOC_AC97_BUS
@@ -130,6 +134,9 @@
 config SND_SOC_AK4671
 	tristate
 
+config SND_SOC_ALC5623
+       tristate
+
 config SND_SOC_CQ0093VC
 	tristate
 
@@ -160,6 +167,9 @@
 config SND_SOC_DA7210
         tristate
 
+config SND_SOC_DMIC
+	tristate
+
 config SND_SOC_MAX98088
        tristate
 
@@ -231,6 +241,9 @@
 config SND_SOC_WM8731
 	tristate
 
+config SND_SOC_WM8737
+	tristate
+
 config SND_SOC_WM8741
 	tristate
 
@@ -240,6 +253,9 @@
 config SND_SOC_WM8753
 	tristate
 
+config SND_SOC_WM8770
+	tristate
+
 config SND_SOC_WM8776
 	tristate
 
@@ -294,6 +310,9 @@
 config SND_SOC_WM8994
 	tristate
 
+config SND_SOC_WM8995
+	tristate
+
 config SND_SOC_WM9081
 	tristate
 
@@ -318,3 +337,4 @@
 
 config SND_SOC_WM9090
 	tristate
+
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index f67a2d6..579af9c 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -14,9 +14,11 @@
 snd-soc-cs4270-objs := cs4270.o
 snd-soc-cx20442-objs := cx20442.o
 snd-soc-da7210-objs := da7210.o
+snd-soc-dmic-objs := dmic.o
 snd-soc-l3-objs := l3.o
 snd-soc-max98088-objs := max98088.o
 snd-soc-pcm3008-objs := pcm3008.o
+snd-soc-alc5623-objs := alc5623.o
 snd-soc-spdif-objs := spdif_transciever.o
 snd-soc-ssm2602-objs := ssm2602.o
 snd-soc-stac9766-objs := stac9766.o
@@ -38,9 +40,11 @@
 snd-soc-wm8727-objs := wm8727.o
 snd-soc-wm8728-objs := wm8728.o
 snd-soc-wm8731-objs := wm8731.o
+snd-soc-wm8737-objs := wm8737.o
 snd-soc-wm8741-objs := wm8741.o
 snd-soc-wm8750-objs := wm8750.o
 snd-soc-wm8753-objs := wm8753.o
+snd-soc-wm8770-objs := wm8770.o
 snd-soc-wm8776-objs := wm8776.o
 snd-soc-wm8804-objs := wm8804.o
 snd-soc-wm8900-objs := wm8900.o
@@ -58,7 +62,8 @@
 snd-soc-wm8988-objs := wm8988.o
 snd-soc-wm8990-objs := wm8990.o
 snd-soc-wm8993-objs := wm8993.o
-snd-soc-wm8994-objs := wm8994.o
+snd-soc-wm8994-objs := wm8994.o wm8994-tables.o
+snd-soc-wm8995-objs := wm8995.o
 snd-soc-wm9081-objs := wm9081.o
 snd-soc-wm9705-objs := wm9705.o
 snd-soc-wm9712-objs := wm9712.o
@@ -88,10 +93,12 @@
 obj-$(CONFIG_SND_SOC_CS4270)	+= snd-soc-cs4270.o
 obj-$(CONFIG_SND_SOC_CX20442)	+= snd-soc-cx20442.o
 obj-$(CONFIG_SND_SOC_DA7210)	+= snd-soc-da7210.o
+obj-$(CONFIG_SND_SOC_DMIC)	+= snd-soc-dmic.o
 obj-$(CONFIG_SND_SOC_L3)	+= snd-soc-l3.o
 obj-$(CONFIG_SND_SOC_JZ4740_CODEC)	+= snd-soc-jz4740-codec.o
 obj-$(CONFIG_SND_SOC_MAX98088)	+= snd-soc-max98088.o
 obj-$(CONFIG_SND_SOC_PCM3008)	+= snd-soc-pcm3008.o
+obj-$(CONFIG_SND_SOC_ALC5623)    += snd-soc-alc5623.o
 obj-$(CONFIG_SND_SOC_SPDIF)	+= snd-soc-spdif.o
 obj-$(CONFIG_SND_SOC_SSM2602)	+= snd-soc-ssm2602.o
 obj-$(CONFIG_SND_SOC_STAC9766)	+= snd-soc-stac9766.o
@@ -113,9 +120,11 @@
 obj-$(CONFIG_SND_SOC_WM8727)	+= snd-soc-wm8727.o
 obj-$(CONFIG_SND_SOC_WM8728)	+= snd-soc-wm8728.o
 obj-$(CONFIG_SND_SOC_WM8731)	+= snd-soc-wm8731.o
+obj-$(CONFIG_SND_SOC_WM8737)	+= snd-soc-wm8737.o
 obj-$(CONFIG_SND_SOC_WM8741)	+= snd-soc-wm8741.o
 obj-$(CONFIG_SND_SOC_WM8750)	+= snd-soc-wm8750.o
 obj-$(CONFIG_SND_SOC_WM8753)	+= snd-soc-wm8753.o
+obj-$(CONFIG_SND_SOC_WM8770)	+= snd-soc-wm8770.o
 obj-$(CONFIG_SND_SOC_WM8776)	+= snd-soc-wm8776.o
 obj-$(CONFIG_SND_SOC_WM8804)	+= snd-soc-wm8804.o
 obj-$(CONFIG_SND_SOC_WM8900)	+= snd-soc-wm8900.o
@@ -134,6 +143,7 @@
 obj-$(CONFIG_SND_SOC_WM8990)	+= snd-soc-wm8990.o
 obj-$(CONFIG_SND_SOC_WM8993)	+= snd-soc-wm8993.o
 obj-$(CONFIG_SND_SOC_WM8994)	+= snd-soc-wm8994.o
+obj-$(CONFIG_SND_SOC_WM8995)	+= snd-soc-wm8995.o
 obj-$(CONFIG_SND_SOC_WM9081)	+= snd-soc-wm9081.o
 obj-$(CONFIG_SND_SOC_WM9705)	+= snd-soc-wm9705.o
 obj-$(CONFIG_SND_SOC_WM9712)	+= snd-soc-wm9712.o
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index d272534..ab63d52 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -27,7 +27,6 @@
 #include <sound/initval.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
-#include <sound/soc-dapm.h>
 #include <linux/spi/spi.h>
 #include "ad1836.h"
 
@@ -220,6 +219,7 @@
 static int ad1836_probe(struct snd_soc_codec *codec)
 {
 	struct ad1836_priv *ad1836 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 
 	codec->control_data = ad1836->control_data;
@@ -227,7 +227,6 @@
 	if (ret < 0) {
 		dev_err(codec->dev, "failed to set cache I/O: %d\n",
 				ret);
-		kfree(ad1836);
 		return ret;
 	}
 
@@ -252,9 +251,9 @@
 
 	snd_soc_add_controls(codec, ad1836_snd_controls,
 			     ARRAY_SIZE(ad1836_snd_controls));
-	snd_soc_dapm_new_controls(codec, ad1836_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, ad1836_dapm_widgets,
 				  ARRAY_SIZE(ad1836_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index fa2834c..da46479 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -19,12 +19,10 @@
 #include <sound/initval.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
-#include <sound/soc-dapm.h>
 #include "ad193x.h"
 
 /* codec private data */
 struct ad193x_priv {
-	u8 reg_cache[AD193X_NUM_REGS];
 	enum snd_soc_control_type bus_type;
 	void *control_data;
 	int sysclk;
@@ -353,6 +351,7 @@
 static int ad193x_probe(struct snd_soc_codec *codec)
 {
 	struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	codec->control_data = ad193x->control_data;
@@ -363,7 +362,6 @@
 	if (ret < 0) {
 		dev_err(codec->dev, "failed to set cache I/O: %d\n",
 				ret);
-		kfree(ad193x);
 		return ret;
 	}
 
@@ -385,9 +383,9 @@
 
 	snd_soc_add_controls(codec, ad193x_snd_controls,
 			     ARRAY_SIZE(ad193x_snd_controls));
-	snd_soc_dapm_new_controls(codec, ad193x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, ad193x_dapm_widgets,
 				  ARRAY_SIZE(ad193x_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 410ccd5..34cb51e 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -29,7 +29,6 @@
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "ad1980.h"
 
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index cd88c8f..8b38739 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "ak4535.h"
@@ -290,10 +289,11 @@
 
 static int ak4535_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, ak4535_dapm_widgets,
-				  ARRAY_SIZE(ak4535_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, ak4535_dapm_widgets,
+				  ARRAY_SIZE(ak4535_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -366,9 +366,9 @@
 static int ak4535_mute(struct snd_soc_dai *dai, int mute)
 {
 	struct snd_soc_codec *codec = dai->codec;
-	u16 mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf;
+	u16 mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC);
 	if (!mute)
-		ak4535_write(codec, AK4535_DAC, mute_reg);
+		ak4535_write(codec, AK4535_DAC, mute_reg & ~0x20);
 	else
 		ak4535_write(codec, AK4535_DAC, mute_reg | 0x20);
 	return 0;
@@ -381,11 +381,11 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf;
-		ak4535_write(codec, AK4535_DAC, mute_reg);
+		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC);
+		ak4535_write(codec, AK4535_DAC, mute_reg & ~0x20);
 		break;
 	case SND_SOC_BIAS_PREPARE:
-		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf;
+		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC);
 		ak4535_write(codec, AK4535_DAC, mute_reg | 0x20);
 		break;
 	case SND_SOC_BIAS_STANDBY:
@@ -399,7 +399,7 @@
 		ak4535_write(codec, AK4535_PM1, i & (~0x80));
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 90c90b7..f00eba3 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -26,7 +26,7 @@
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 24f5f49..2ec75ab 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -17,7 +17,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -28,7 +27,6 @@
 struct ak4671_priv {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u8 reg_cache[AK4671_CACHEREGNUM];
 };
 
 /* ak4671 register cache & default register settings */
@@ -437,10 +435,11 @@
 
 static int ak4671_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, ak4671_dapm_widgets,
-				  ARRAY_SIZE(ak4671_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, ak4671_dapm_widgets,
+				  ARRAY_SIZE(ak4671_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -602,7 +601,7 @@
 		snd_soc_write(codec, AK4671_AD_DA_POWER_MANAGEMENT, 0x00);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
new file mode 100644
index 0000000..4f377c9
--- /dev/null
+++ b/sound/soc/codecs/alc5623.c
@@ -0,0 +1,1117 @@
+/*
+ * alc5623.c  --  alc562[123] ALSA Soc Audio driver
+ *
+ * Copyright 2008 Realtek Microelectronics
+ * Author: flove <flove@realtek.com> Ethan <eku@marvell.com>
+ *
+ * Copyright 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ *
+ * Based on WM8753.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/alc5623.h>
+
+#include "alc5623.h"
+
+static int caps_charge = 2000;
+module_param(caps_charge, int, 0);
+MODULE_PARM_DESC(caps_charge, "ALC5623 cap charge time (msecs)");
+
+/* codec private data */
+struct alc5623_priv {
+	enum snd_soc_control_type control_type;
+	void *control_data;
+	struct mutex mutex;
+	u8 id;
+	unsigned int sysclk;
+	u16 reg_cache[ALC5623_VENDOR_ID2+2];
+	unsigned int add_ctrl;
+	unsigned int jack_det_ctrl;
+};
+
+static void alc5623_fill_cache(struct snd_soc_codec *codec)
+{
+	int i, step = codec->driver->reg_cache_step;
+	u16 *cache = codec->reg_cache;
+
+	/* not really efficient ... */
+	for (i = 0 ; i < codec->driver->reg_cache_size ; i += step)
+		cache[i] = codec->hw_read(codec, i);
+}
+
+static inline int alc5623_reset(struct snd_soc_codec *codec)
+{
+	return snd_soc_write(codec, ALC5623_RESET, 0);
+}
+
+static int amp_mixer_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	/* to power-on/off class-d amp generators/speaker */
+	/* need to write to 'index-46h' register :        */
+	/* so write index num (here 0x46) to reg 0x6a     */
+	/* and then 0xffff/0 to reg 0x6c                  */
+	snd_soc_write(w->codec, ALC5623_HID_CTRL_INDEX, 0x46);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0xFFFF);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0);
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * ALC5623 Controls
+ */
+
+static const DECLARE_TLV_DB_SCALE(vol_tlv, -3450, 150, 0);
+static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0);
+static const unsigned int boost_tlv[] = {
+	TLV_DB_RANGE_HEAD(3),
+	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0);
+
+static const struct snd_kcontrol_new rt5621_vol_snd_controls[] = {
+	SOC_DOUBLE_TLV("Speaker Playback Volume",
+			ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Speaker Playback Switch",
+			ALC5623_SPK_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("Headphone Playback Volume",
+			ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Headphone Playback Switch",
+			ALC5623_HP_OUT_VOL, 15, 7, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5622_vol_snd_controls[] = {
+	SOC_DOUBLE_TLV("Speaker Playback Volume",
+			ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Speaker Playback Switch",
+			ALC5623_SPK_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("Line Playback Volume",
+			ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Line Playback Switch",
+			ALC5623_HP_OUT_VOL, 15, 7, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_vol_snd_controls[] = {
+	SOC_DOUBLE_TLV("Line Playback Volume",
+			ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Line Playback Switch",
+			ALC5623_SPK_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("Headphone Playback Volume",
+			ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Headphone Playback Switch",
+			ALC5623_HP_OUT_VOL, 15, 7, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_snd_controls[] = {
+	SOC_DOUBLE_TLV("Auxout Playback Volume",
+			ALC5623_MONO_AUX_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Auxout Playback Switch",
+			ALC5623_MONO_AUX_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("PCM Playback Volume",
+			ALC5623_STEREO_DAC_VOL, 8, 0, 31, 1, vol_tlv),
+	SOC_DOUBLE_TLV("AuxI Capture Volume",
+			ALC5623_AUXIN_VOL, 8, 0, 31, 1, vol_tlv),
+	SOC_DOUBLE_TLV("LineIn Capture Volume",
+			ALC5623_LINE_IN_VOL, 8, 0, 31, 1, vol_tlv),
+	SOC_SINGLE_TLV("Mic1 Capture Volume",
+			ALC5623_MIC_VOL, 8, 31, 1, vol_tlv),
+	SOC_SINGLE_TLV("Mic2 Capture Volume",
+			ALC5623_MIC_VOL, 0, 31, 1, vol_tlv),
+	SOC_DOUBLE_TLV("Rec Capture Volume",
+			ALC5623_ADC_REC_GAIN, 7, 0, 31, 0, adc_rec_tlv),
+	SOC_SINGLE_TLV("Mic 1 Boost Volume",
+			ALC5623_MIC_CTRL, 10, 2, 0, boost_tlv),
+	SOC_SINGLE_TLV("Mic 2 Boost Volume",
+			ALC5623_MIC_CTRL, 8, 2, 0, boost_tlv),
+	SOC_SINGLE_TLV("Digital Boost Volume",
+			ALC5623_ADD_CTRL_REG, 4, 3, 0, dig_tlv),
+};
+
+/*
+ * DAPM Controls
+ */
+static const struct snd_kcontrol_new alc5623_hp_mixer_controls[] = {
+SOC_DAPM_SINGLE("LI2HP Playback Switch", ALC5623_LINE_IN_VOL, 15, 1, 1),
+SOC_DAPM_SINGLE("AUXI2HP Playback Switch", ALC5623_AUXIN_VOL, 15, 1, 1),
+SOC_DAPM_SINGLE("MIC12HP Playback Switch", ALC5623_MIC_ROUTING_CTRL, 15, 1, 1),
+SOC_DAPM_SINGLE("MIC22HP Playback Switch", ALC5623_MIC_ROUTING_CTRL, 7, 1, 1),
+SOC_DAPM_SINGLE("DAC2HP Playback Switch", ALC5623_STEREO_DAC_VOL, 15, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_hpl_mixer_controls[] = {
+SOC_DAPM_SINGLE("ADC2HP_L Playback Switch", ALC5623_ADC_REC_GAIN, 15, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_hpr_mixer_controls[] = {
+SOC_DAPM_SINGLE("ADC2HP_R Playback Switch", ALC5623_ADC_REC_GAIN, 14, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_mono_mixer_controls[] = {
+SOC_DAPM_SINGLE("ADC2MONO_L Playback Switch", ALC5623_ADC_REC_GAIN, 13, 1, 1),
+SOC_DAPM_SINGLE("ADC2MONO_R Playback Switch", ALC5623_ADC_REC_GAIN, 12, 1, 1),
+SOC_DAPM_SINGLE("LI2MONO Playback Switch", ALC5623_LINE_IN_VOL, 13, 1, 1),
+SOC_DAPM_SINGLE("AUXI2MONO Playback Switch", ALC5623_AUXIN_VOL, 13, 1, 1),
+SOC_DAPM_SINGLE("MIC12MONO Playback Switch", ALC5623_MIC_ROUTING_CTRL, 13, 1, 1),
+SOC_DAPM_SINGLE("MIC22MONO Playback Switch", ALC5623_MIC_ROUTING_CTRL, 5, 1, 1),
+SOC_DAPM_SINGLE("DAC2MONO Playback Switch", ALC5623_STEREO_DAC_VOL, 13, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_speaker_mixer_controls[] = {
+SOC_DAPM_SINGLE("LI2SPK Playback Switch", ALC5623_LINE_IN_VOL, 14, 1, 1),
+SOC_DAPM_SINGLE("AUXI2SPK Playback Switch", ALC5623_AUXIN_VOL, 14, 1, 1),
+SOC_DAPM_SINGLE("MIC12SPK Playback Switch", ALC5623_MIC_ROUTING_CTRL, 14, 1, 1),
+SOC_DAPM_SINGLE("MIC22SPK Playback Switch", ALC5623_MIC_ROUTING_CTRL, 6, 1, 1),
+SOC_DAPM_SINGLE("DAC2SPK Playback Switch", ALC5623_STEREO_DAC_VOL, 14, 1, 1),
+};
+
+/* Left Record Mixer */
+static const struct snd_kcontrol_new alc5623_captureL_mixer_controls[] = {
+SOC_DAPM_SINGLE("Mic1 Capture Switch", ALC5623_ADC_REC_MIXER, 14, 1, 1),
+SOC_DAPM_SINGLE("Mic2 Capture Switch", ALC5623_ADC_REC_MIXER, 13, 1, 1),
+SOC_DAPM_SINGLE("LineInL Capture Switch", ALC5623_ADC_REC_MIXER, 12, 1, 1),
+SOC_DAPM_SINGLE("Left AuxI Capture Switch", ALC5623_ADC_REC_MIXER, 11, 1, 1),
+SOC_DAPM_SINGLE("HPMixerL Capture Switch", ALC5623_ADC_REC_MIXER, 10, 1, 1),
+SOC_DAPM_SINGLE("SPKMixer Capture Switch", ALC5623_ADC_REC_MIXER, 9, 1, 1),
+SOC_DAPM_SINGLE("MonoMixer Capture Switch", ALC5623_ADC_REC_MIXER, 8, 1, 1),
+};
+
+/* Right Record Mixer */
+static const struct snd_kcontrol_new alc5623_captureR_mixer_controls[] = {
+SOC_DAPM_SINGLE("Mic1 Capture Switch", ALC5623_ADC_REC_MIXER, 6, 1, 1),
+SOC_DAPM_SINGLE("Mic2 Capture Switch", ALC5623_ADC_REC_MIXER, 5, 1, 1),
+SOC_DAPM_SINGLE("LineInR Capture Switch", ALC5623_ADC_REC_MIXER, 4, 1, 1),
+SOC_DAPM_SINGLE("Right AuxI Capture Switch", ALC5623_ADC_REC_MIXER, 3, 1, 1),
+SOC_DAPM_SINGLE("HPMixerR Capture Switch", ALC5623_ADC_REC_MIXER, 2, 1, 1),
+SOC_DAPM_SINGLE("SPKMixer Capture Switch", ALC5623_ADC_REC_MIXER, 1, 1, 1),
+SOC_DAPM_SINGLE("MonoMixer Capture Switch", ALC5623_ADC_REC_MIXER, 0, 1, 1),
+};
+
+static const char *alc5623_spk_n_sour_sel[] = {
+		"RN/-R", "RP/+R", "LN/-R", "Vmid" };
+static const char *alc5623_hpl_out_input_sel[] = {
+		"Vmid", "HP Left Mix"};
+static const char *alc5623_hpr_out_input_sel[] = {
+		"Vmid", "HP Right Mix"};
+static const char *alc5623_spkout_input_sel[] = {
+		"Vmid", "HPOut Mix", "Speaker Mix", "Mono Mix"};
+static const char *alc5623_aux_out_input_sel[] = {
+		"Vmid", "HPOut Mix", "Speaker Mix", "Mono Mix"};
+
+/* auxout output mux */
+static const struct soc_enum alc5623_aux_out_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 6, 4, alc5623_aux_out_input_sel);
+static const struct snd_kcontrol_new alc5623_auxout_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_aux_out_input_enum);
+
+/* speaker output mux */
+static const struct soc_enum alc5623_spkout_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 10, 4, alc5623_spkout_input_sel);
+static const struct snd_kcontrol_new alc5623_spkout_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_spkout_input_enum);
+
+/* headphone left output mux */
+static const struct soc_enum alc5623_hpl_out_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 9, 2, alc5623_hpl_out_input_sel);
+static const struct snd_kcontrol_new alc5623_hpl_out_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_hpl_out_input_enum);
+
+/* headphone right output mux */
+static const struct soc_enum alc5623_hpr_out_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 8, 2, alc5623_hpr_out_input_sel);
+static const struct snd_kcontrol_new alc5623_hpr_out_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_hpr_out_input_enum);
+
+/* speaker output N select */
+static const struct soc_enum alc5623_spk_n_sour_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 14, 4, alc5623_spk_n_sour_sel);
+static const struct snd_kcontrol_new alc5623_spkoutn_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_spk_n_sour_enum);
+
+static const struct snd_soc_dapm_widget alc5623_dapm_widgets[] = {
+/* Muxes */
+SND_SOC_DAPM_MUX("AuxOut Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_auxout_mux_controls),
+SND_SOC_DAPM_MUX("SpeakerOut Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_spkout_mux_controls),
+SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_hpl_out_mux_controls),
+SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_hpr_out_mux_controls),
+SND_SOC_DAPM_MUX("SpeakerOut N Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_spkoutn_mux_controls),
+
+/* output mixers */
+SND_SOC_DAPM_MIXER("HP Mix", SND_SOC_NOPM, 0, 0,
+	&alc5623_hp_mixer_controls[0],
+	ARRAY_SIZE(alc5623_hp_mixer_controls)),
+SND_SOC_DAPM_MIXER("HPR Mix", ALC5623_PWR_MANAG_ADD2, 4, 0,
+	&alc5623_hpr_mixer_controls[0],
+	ARRAY_SIZE(alc5623_hpr_mixer_controls)),
+SND_SOC_DAPM_MIXER("HPL Mix", ALC5623_PWR_MANAG_ADD2, 5, 0,
+	&alc5623_hpl_mixer_controls[0],
+	ARRAY_SIZE(alc5623_hpl_mixer_controls)),
+SND_SOC_DAPM_MIXER("HPOut Mix", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("Mono Mix", ALC5623_PWR_MANAG_ADD2, 2, 0,
+	&alc5623_mono_mixer_controls[0],
+	ARRAY_SIZE(alc5623_mono_mixer_controls)),
+SND_SOC_DAPM_MIXER("Speaker Mix", ALC5623_PWR_MANAG_ADD2, 3, 0,
+	&alc5623_speaker_mixer_controls[0],
+	ARRAY_SIZE(alc5623_speaker_mixer_controls)),
+
+/* input mixers */
+SND_SOC_DAPM_MIXER("Left Capture Mix", ALC5623_PWR_MANAG_ADD2, 1, 0,
+	&alc5623_captureL_mixer_controls[0],
+	ARRAY_SIZE(alc5623_captureL_mixer_controls)),
+SND_SOC_DAPM_MIXER("Right Capture Mix", ALC5623_PWR_MANAG_ADD2, 0, 0,
+	&alc5623_captureR_mixer_controls[0],
+	ARRAY_SIZE(alc5623_captureR_mixer_controls)),
+
+SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback",
+	ALC5623_PWR_MANAG_ADD2, 9, 0),
+SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback",
+	ALC5623_PWR_MANAG_ADD2, 8, 0),
+SND_SOC_DAPM_MIXER("I2S Mix", ALC5623_PWR_MANAG_ADD1, 15, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("AuxI Mix", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("Line Mix", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture",
+	ALC5623_PWR_MANAG_ADD2, 7, 0),
+SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture",
+	ALC5623_PWR_MANAG_ADD2, 6, 0),
+SND_SOC_DAPM_PGA("Left Headphone", ALC5623_PWR_MANAG_ADD3, 10, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right Headphone", ALC5623_PWR_MANAG_ADD3, 9, 0, NULL, 0),
+SND_SOC_DAPM_PGA("SpeakerOut", ALC5623_PWR_MANAG_ADD3, 12, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Left AuxOut", ALC5623_PWR_MANAG_ADD3, 14, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right AuxOut", ALC5623_PWR_MANAG_ADD3, 13, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Left LineIn", ALC5623_PWR_MANAG_ADD3, 7, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right LineIn", ALC5623_PWR_MANAG_ADD3, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Left AuxI", ALC5623_PWR_MANAG_ADD3, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right AuxI", ALC5623_PWR_MANAG_ADD3, 4, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC1 PGA", ALC5623_PWR_MANAG_ADD3, 3, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC2 PGA", ALC5623_PWR_MANAG_ADD3, 2, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC1 Pre Amp", ALC5623_PWR_MANAG_ADD3, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC2 Pre Amp", ALC5623_PWR_MANAG_ADD3, 0, 0, NULL, 0),
+SND_SOC_DAPM_MICBIAS("Mic Bias1", ALC5623_PWR_MANAG_ADD1, 11, 0),
+
+SND_SOC_DAPM_OUTPUT("AUXOUTL"),
+SND_SOC_DAPM_OUTPUT("AUXOUTR"),
+SND_SOC_DAPM_OUTPUT("HPL"),
+SND_SOC_DAPM_OUTPUT("HPR"),
+SND_SOC_DAPM_OUTPUT("SPKOUT"),
+SND_SOC_DAPM_OUTPUT("SPKOUTN"),
+SND_SOC_DAPM_INPUT("LINEINL"),
+SND_SOC_DAPM_INPUT("LINEINR"),
+SND_SOC_DAPM_INPUT("AUXINL"),
+SND_SOC_DAPM_INPUT("AUXINR"),
+SND_SOC_DAPM_INPUT("MIC1"),
+SND_SOC_DAPM_INPUT("MIC2"),
+SND_SOC_DAPM_VMID("Vmid"),
+};
+
+static const char *alc5623_amp_names[] = {"AB Amp", "D Amp"};
+static const struct soc_enum alc5623_amp_enum =
+	SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 13, 2, alc5623_amp_names);
+static const struct snd_kcontrol_new alc5623_amp_mux_controls =
+	SOC_DAPM_ENUM("Route", alc5623_amp_enum);
+
+static const struct snd_soc_dapm_widget alc5623_dapm_amp_widgets[] = {
+SND_SOC_DAPM_PGA_E("D Amp", ALC5623_PWR_MANAG_ADD2, 14, 0, NULL, 0,
+	amp_mixer_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_PGA("AB Amp", ALC5623_PWR_MANAG_ADD2, 15, 0, NULL, 0),
+SND_SOC_DAPM_MUX("AB-D Amp Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_amp_mux_controls),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	/* virtual mixer - mixes left & right channels */
+	{"I2S Mix", NULL,				"Left DAC"},
+	{"I2S Mix", NULL,				"Right DAC"},
+	{"Line Mix", NULL,				"Right LineIn"},
+	{"Line Mix", NULL,				"Left LineIn"},
+	{"AuxI Mix", NULL,				"Left AuxI"},
+	{"AuxI Mix", NULL,				"Right AuxI"},
+	{"AUXOUTL", NULL,				"Left AuxOut"},
+	{"AUXOUTR", NULL,				"Right AuxOut"},
+
+	/* HP mixer */
+	{"HPL Mix", "ADC2HP_L Playback Switch",		"Left Capture Mix"},
+	{"HPL Mix", NULL,				"HP Mix"},
+	{"HPR Mix", "ADC2HP_R Playback Switch",		"Right Capture Mix"},
+	{"HPR Mix", NULL,				"HP Mix"},
+	{"HP Mix", "LI2HP Playback Switch",		"Line Mix"},
+	{"HP Mix", "AUXI2HP Playback Switch",		"AuxI Mix"},
+	{"HP Mix", "MIC12HP Playback Switch",		"MIC1 PGA"},
+	{"HP Mix", "MIC22HP Playback Switch",		"MIC2 PGA"},
+	{"HP Mix", "DAC2HP Playback Switch",		"I2S Mix"},
+
+	/* speaker mixer */
+	{"Speaker Mix", "LI2SPK Playback Switch",	"Line Mix"},
+	{"Speaker Mix", "AUXI2SPK Playback Switch",	"AuxI Mix"},
+	{"Speaker Mix", "MIC12SPK Playback Switch",	"MIC1 PGA"},
+	{"Speaker Mix", "MIC22SPK Playback Switch",	"MIC2 PGA"},
+	{"Speaker Mix", "DAC2SPK Playback Switch",	"I2S Mix"},
+
+	/* mono mixer */
+	{"Mono Mix", "ADC2MONO_L Playback Switch",	"Left Capture Mix"},
+	{"Mono Mix", "ADC2MONO_R Playback Switch",	"Right Capture Mix"},
+	{"Mono Mix", "LI2MONO Playback Switch",		"Line Mix"},
+	{"Mono Mix", "AUXI2MONO Playback Switch",	"AuxI Mix"},
+	{"Mono Mix", "MIC12MONO Playback Switch",	"MIC1 PGA"},
+	{"Mono Mix", "MIC22MONO Playback Switch",	"MIC2 PGA"},
+	{"Mono Mix", "DAC2MONO Playback Switch",	"I2S Mix"},
+
+	/* Left record mixer */
+	{"Left Capture Mix", "LineInL Capture Switch",	"LINEINL"},
+	{"Left Capture Mix", "Left AuxI Capture Switch", "AUXINL"},
+	{"Left Capture Mix", "Mic1 Capture Switch",	"MIC1 Pre Amp"},
+	{"Left Capture Mix", "Mic2 Capture Switch",	"MIC2 Pre Amp"},
+	{"Left Capture Mix", "HPMixerL Capture Switch", "HPL Mix"},
+	{"Left Capture Mix", "SPKMixer Capture Switch", "Speaker Mix"},
+	{"Left Capture Mix", "MonoMixer Capture Switch", "Mono Mix"},
+
+	/*Right record mixer */
+	{"Right Capture Mix", "LineInR Capture Switch",	"LINEINR"},
+	{"Right Capture Mix", "Right AuxI Capture Switch",	"AUXINR"},
+	{"Right Capture Mix", "Mic1 Capture Switch",	"MIC1 Pre Amp"},
+	{"Right Capture Mix", "Mic2 Capture Switch",	"MIC2 Pre Amp"},
+	{"Right Capture Mix", "HPMixerR Capture Switch", "HPR Mix"},
+	{"Right Capture Mix", "SPKMixer Capture Switch", "Speaker Mix"},
+	{"Right Capture Mix", "MonoMixer Capture Switch", "Mono Mix"},
+
+	/* headphone left mux */
+	{"Left Headphone Mux", "HP Left Mix",		"HPL Mix"},
+	{"Left Headphone Mux", "Vmid",			"Vmid"},
+
+	/* headphone right mux */
+	{"Right Headphone Mux", "HP Right Mix",		"HPR Mix"},
+	{"Right Headphone Mux", "Vmid",			"Vmid"},
+
+	/* speaker out mux */
+	{"SpeakerOut Mux", "Vmid",			"Vmid"},
+	{"SpeakerOut Mux", "HPOut Mix",			"HPOut Mix"},
+	{"SpeakerOut Mux", "Speaker Mix",		"Speaker Mix"},
+	{"SpeakerOut Mux", "Mono Mix",			"Mono Mix"},
+
+	/* Mono/Aux Out mux */
+	{"AuxOut Mux", "Vmid",				"Vmid"},
+	{"AuxOut Mux", "HPOut Mix",			"HPOut Mix"},
+	{"AuxOut Mux", "Speaker Mix",			"Speaker Mix"},
+	{"AuxOut Mux", "Mono Mix",			"Mono Mix"},
+
+	/* output pga */
+	{"HPL", NULL,					"Left Headphone"},
+	{"Left Headphone", NULL,			"Left Headphone Mux"},
+	{"HPR", NULL,					"Right Headphone"},
+	{"Right Headphone", NULL,			"Right Headphone Mux"},
+	{"Left AuxOut", NULL,				"AuxOut Mux"},
+	{"Right AuxOut", NULL,				"AuxOut Mux"},
+
+	/* input pga */
+	{"Left LineIn", NULL,				"LINEINL"},
+	{"Right LineIn", NULL,				"LINEINR"},
+	{"Left AuxI", NULL,				"AUXINL"},
+	{"Right AuxI", NULL,				"AUXINR"},
+	{"MIC1 Pre Amp", NULL,				"MIC1"},
+	{"MIC2 Pre Amp", NULL,				"MIC2"},
+	{"MIC1 PGA", NULL,				"MIC1 Pre Amp"},
+	{"MIC2 PGA", NULL,				"MIC2 Pre Amp"},
+
+	/* left ADC */
+	{"Left ADC", NULL,				"Left Capture Mix"},
+
+	/* right ADC */
+	{"Right ADC", NULL,				"Right Capture Mix"},
+
+	{"SpeakerOut N Mux", "RN/-R",			"SpeakerOut"},
+	{"SpeakerOut N Mux", "RP/+R",			"SpeakerOut"},
+	{"SpeakerOut N Mux", "LN/-R",			"SpeakerOut"},
+	{"SpeakerOut N Mux", "Vmid",			"Vmid"},
+
+	{"SPKOUT", NULL,				"SpeakerOut"},
+	{"SPKOUTN", NULL,				"SpeakerOut N Mux"},
+};
+
+static const struct snd_soc_dapm_route intercon_spk[] = {
+	{"SpeakerOut", NULL,				"SpeakerOut Mux"},
+};
+
+static const struct snd_soc_dapm_route intercon_amp_spk[] = {
+	{"AB Amp", NULL,				"SpeakerOut Mux"},
+	{"D Amp", NULL,					"SpeakerOut Mux"},
+	{"AB-D Amp Mux", "AB Amp",			"AB Amp"},
+	{"AB-D Amp Mux", "D Amp",			"D Amp"},
+	{"SpeakerOut", NULL,				"AB-D Amp Mux"},
+};
+
+/* PLL divisors */
+struct _pll_div {
+	u32 pll_in;
+	u32 pll_out;
+	u16 regvalue;
+};
+
+/* Note : pll code from original alc5623 driver. Not sure of how good it is */
+/* usefull only for master mode */
+static const struct _pll_div codec_master_pll_div[] = {
+
+	{  2048000,  8192000,	0x0ea0},
+	{  3686400,  8192000,	0x4e27},
+	{ 12000000,  8192000,	0x456b},
+	{ 13000000,  8192000,	0x495f},
+	{ 13100000,  8192000,	0x0320},
+	{  2048000,  11289600,	0xf637},
+	{  3686400,  11289600,	0x2f22},
+	{ 12000000,  11289600,	0x3e2f},
+	{ 13000000,  11289600,	0x4d5b},
+	{ 13100000,  11289600,	0x363b},
+	{  2048000,  16384000,	0x1ea0},
+	{  3686400,  16384000,	0x9e27},
+	{ 12000000,  16384000,	0x452b},
+	{ 13000000,  16384000,	0x542f},
+	{ 13100000,  16384000,	0x03a0},
+	{  2048000,  16934400,	0xe625},
+	{  3686400,  16934400,	0x9126},
+	{ 12000000,  16934400,	0x4d2c},
+	{ 13000000,  16934400,	0x742f},
+	{ 13100000,  16934400,	0x3c27},
+	{  2048000,  22579200,	0x2aa0},
+	{  3686400,  22579200,	0x2f20},
+	{ 12000000,  22579200,	0x7e2f},
+	{ 13000000,  22579200,	0x742f},
+	{ 13100000,  22579200,	0x3c27},
+	{  2048000,  24576000,	0x2ea0},
+	{  3686400,  24576000,	0xee27},
+	{ 12000000,  24576000,	0x2915},
+	{ 13000000,  24576000,	0x772e},
+	{ 13100000,  24576000,	0x0d20},
+};
+
+static const struct _pll_div codec_slave_pll_div[] = {
+
+	{  1024000,  16384000,  0x3ea0},
+	{  1411200,  22579200,	0x3ea0},
+	{  1536000,  24576000,	0x3ea0},
+	{  2048000,  16384000,  0x1ea0},
+	{  2822400,  22579200,	0x1ea0},
+	{  3072000,  24576000,	0x1ea0},
+
+};
+
+static int alc5623_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
+		int source, unsigned int freq_in, unsigned int freq_out)
+{
+	int i;
+	struct snd_soc_codec *codec = codec_dai->codec;
+	int gbl_clk = 0, pll_div = 0;
+	u16 reg;
+
+	if (pll_id < ALC5623_PLL_FR_MCLK || pll_id > ALC5623_PLL_FR_BCK)
+		return -ENODEV;
+
+	/* Disable PLL power */
+	snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD2,
+				ALC5623_PWR_ADD2_PLL,
+				0);
+
+	/* pll is not used in slave mode */
+	reg = snd_soc_read(codec, ALC5623_DAI_CONTROL);
+	if (reg & ALC5623_DAI_SDP_SLAVE_MODE)
+		return 0;
+
+	if (!freq_in || !freq_out)
+		return 0;
+
+	switch (pll_id) {
+	case ALC5623_PLL_FR_MCLK:
+		for (i = 0; i < ARRAY_SIZE(codec_master_pll_div); i++) {
+			if (codec_master_pll_div[i].pll_in == freq_in
+			   && codec_master_pll_div[i].pll_out == freq_out) {
+				/* PLL source from MCLK */
+				pll_div  = codec_master_pll_div[i].regvalue;
+				break;
+			}
+		}
+		break;
+	case ALC5623_PLL_FR_BCK:
+		for (i = 0; i < ARRAY_SIZE(codec_slave_pll_div); i++) {
+			if (codec_slave_pll_div[i].pll_in == freq_in
+			   && codec_slave_pll_div[i].pll_out == freq_out) {
+				/* PLL source from Bitclk */
+				gbl_clk = ALC5623_GBL_CLK_PLL_SOUR_SEL_BITCLK;
+				pll_div = codec_slave_pll_div[i].regvalue;
+				break;
+			}
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!pll_div)
+		return -EINVAL;
+
+	snd_soc_write(codec, ALC5623_GLOBAL_CLK_CTRL_REG, gbl_clk);
+	snd_soc_write(codec, ALC5623_PLL_CTRL, pll_div);
+	snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD2,
+				ALC5623_PWR_ADD2_PLL,
+				ALC5623_PWR_ADD2_PLL);
+	gbl_clk |= ALC5623_GBL_CLK_SYS_SOUR_SEL_PLL;
+	snd_soc_write(codec, ALC5623_GLOBAL_CLK_CTRL_REG, gbl_clk);
+
+	return 0;
+}
+
+struct _coeff_div {
+	u16 fs;
+	u16 regvalue;
+};
+
+/* codec hifi mclk (after PLL) clock divider coefficients */
+/* values inspired from column BCLK=32Fs of Appendix A table */
+static const struct _coeff_div coeff_div[] = {
+	{256*8, 0x3a69},
+	{384*8, 0x3c6b},
+	{256*4, 0x2a69},
+	{384*4, 0x2c6b},
+	{256*2, 0x1a69},
+	{384*2, 0x1c6b},
+	{256*1, 0x0a69},
+	{384*1, 0x0c6b},
+};
+
+static int get_coeff(struct snd_soc_codec *codec, int rate)
+{
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+		if (coeff_div[i].fs * rate == alc5623->sysclk)
+			return i;
+	}
+	return -EINVAL;
+}
+
+/*
+ * Clock after PLL and dividers
+ */
+static int alc5623_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+		int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+
+	switch (freq) {
+	case  8192000:
+	case 11289600:
+	case 12288000:
+	case 16384000:
+	case 16934400:
+	case 18432000:
+	case 22579200:
+	case 24576000:
+		alc5623->sysclk = freq;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int alc5623_set_dai_fmt(struct snd_soc_dai *codec_dai,
+		unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	u16 iface = 0;
+
+	/* set master/slave audio interface */
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		iface = ALC5623_DAI_SDP_MASTER_MODE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		iface = ALC5623_DAI_SDP_SLAVE_MODE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* interface format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		iface |= ALC5623_DAI_I2S_DF_I2S;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		iface |= ALC5623_DAI_I2S_DF_RIGHT;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		iface |= ALC5623_DAI_I2S_DF_LEFT;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		iface |= ALC5623_DAI_I2S_DF_PCM;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		iface |= ALC5623_DAI_I2S_DF_PCM | ALC5623_DAI_I2S_PCM_MODE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* clock inversion */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		iface |= ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		iface |= ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return snd_soc_write(codec, ALC5623_DAI_CONTROL, iface);
+}
+
+static int alc5623_pcm_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+	int coeff, rate;
+	u16 iface;
+
+	iface = snd_soc_read(codec, ALC5623_DAI_CONTROL);
+	iface &= ~ALC5623_DAI_I2S_DL_MASK;
+
+	/* bit size */
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		iface |= ALC5623_DAI_I2S_DL_16;
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		iface |= ALC5623_DAI_I2S_DL_20;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		iface |= ALC5623_DAI_I2S_DL_24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		iface |= ALC5623_DAI_I2S_DL_32;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* set iface & srate */
+	snd_soc_write(codec, ALC5623_DAI_CONTROL, iface);
+	rate = params_rate(params);
+	coeff = get_coeff(codec, rate);
+	if (coeff < 0)
+		return -EINVAL;
+
+	coeff = coeff_div[coeff].regvalue;
+	dev_dbg(codec->dev, "%s: sysclk=%d,rate=%d,coeff=0x%04x\n",
+		__func__, alc5623->sysclk, rate, coeff);
+	snd_soc_write(codec, ALC5623_STEREO_AD_DA_CLK_CTRL, coeff);
+
+	return 0;
+}
+
+static int alc5623_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	u16 hp_mute = ALC5623_MISC_M_DAC_L_INPUT | ALC5623_MISC_M_DAC_R_INPUT;
+	u16 mute_reg = snd_soc_read(codec, ALC5623_MISC_CTRL) & ~hp_mute;
+
+	if (mute)
+		mute_reg |= hp_mute;
+
+	return snd_soc_write(codec, ALC5623_MISC_CTRL, mute_reg);
+}
+
+#define ALC5623_ADD2_POWER_EN (ALC5623_PWR_ADD2_VREF \
+	| ALC5623_PWR_ADD2_DAC_REF_CIR)
+
+#define ALC5623_ADD3_POWER_EN (ALC5623_PWR_ADD3_MAIN_BIAS \
+	| ALC5623_PWR_ADD3_MIC1_BOOST_AD)
+
+#define ALC5623_ADD1_POWER_EN \
+	(ALC5623_PWR_ADD1_SHORT_CURR_DET_EN | ALC5623_PWR_ADD1_SOFTGEN_EN \
+	| ALC5623_PWR_ADD1_DEPOP_BUF_HP | ALC5623_PWR_ADD1_HP_OUT_AMP \
+	| ALC5623_PWR_ADD1_HP_OUT_ENH_AMP)
+
+#define ALC5623_ADD1_POWER_EN_5622 \
+	(ALC5623_PWR_ADD1_SHORT_CURR_DET_EN \
+	| ALC5623_PWR_ADD1_HP_OUT_AMP)
+
+static void enable_power_depop(struct snd_soc_codec *codec)
+{
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+
+	snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD1,
+				ALC5623_PWR_ADD1_SOFTGEN_EN,
+				ALC5623_PWR_ADD1_SOFTGEN_EN);
+
+	snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3, ALC5623_ADD3_POWER_EN);
+
+	snd_soc_update_bits(codec, ALC5623_MISC_CTRL,
+				ALC5623_MISC_HP_DEPOP_MODE2_EN,
+				ALC5623_MISC_HP_DEPOP_MODE2_EN);
+
+	msleep(500);
+
+	snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2, ALC5623_ADD2_POWER_EN);
+
+	/* avoid writing '1' into 5622 reserved bits */
+	if (alc5623->id == 0x22)
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1,
+			ALC5623_ADD1_POWER_EN_5622);
+	else
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1,
+			ALC5623_ADD1_POWER_EN);
+
+	/* disable HP Depop2 */
+	snd_soc_update_bits(codec, ALC5623_MISC_CTRL,
+				ALC5623_MISC_HP_DEPOP_MODE2_EN,
+				0);
+
+}
+
+static int alc5623_set_bias_level(struct snd_soc_codec *codec,
+				      enum snd_soc_bias_level level)
+{
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		enable_power_depop(codec);
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		/* everything off except vref/vmid, */
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2,
+				ALC5623_PWR_ADD2_VREF);
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3,
+				ALC5623_PWR_ADD3_MAIN_BIAS);
+		break;
+	case SND_SOC_BIAS_OFF:
+		/* everything off, dac mute, inactive */
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2, 0);
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3, 0);
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1, 0);
+		break;
+	}
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#define ALC5623_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE \
+			| SNDRV_PCM_FMTBIT_S24_LE \
+			| SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops alc5623_dai_ops = {
+		.hw_params = alc5623_pcm_hw_params,
+		.digital_mute = alc5623_mute,
+		.set_fmt = alc5623_set_dai_fmt,
+		.set_sysclk = alc5623_set_dai_sysclk,
+		.set_pll = alc5623_set_dai_pll,
+};
+
+static struct snd_soc_dai_driver alc5623_dai = {
+	.name = "alc5623-hifi",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =	8000,
+		.rate_max =	48000,
+		.rates = SNDRV_PCM_RATE_8000_48000,
+		.formats = ALC5623_FORMATS,},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =	8000,
+		.rate_max =	48000,
+		.rates = SNDRV_PCM_RATE_8000_48000,
+		.formats = ALC5623_FORMATS,},
+
+	.ops = &alc5623_dai_ops,
+};
+
+static int alc5623_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
+{
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int alc5623_resume(struct snd_soc_codec *codec)
+{
+	int i, step = codec->driver->reg_cache_step;
+	u16 *cache = codec->reg_cache;
+
+	/* Sync reg_cache with the hardware */
+	for (i = 2 ; i < codec->driver->reg_cache_size ; i += step)
+		snd_soc_write(codec, i, cache[i]);
+
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* charge alc5623 caps */
+	if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
+		alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+		codec->dapm.bias_level = SND_SOC_BIAS_ON;
+		alc5623_set_bias_level(codec, codec->dapm.bias_level);
+	}
+
+	return 0;
+}
+
+static int alc5623_probe(struct snd_soc_codec *codec)
+{
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int ret;
+
+	ret = snd_soc_codec_set_cache_io(codec, 8, 16, alc5623->control_type);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+		return ret;
+	}
+
+	alc5623_reset(codec);
+	alc5623_fill_cache(codec);
+
+	/* power on device */
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	if (alc5623->add_ctrl) {
+		snd_soc_write(codec, ALC5623_ADD_CTRL_REG,
+				alc5623->add_ctrl);
+	}
+
+	if (alc5623->jack_det_ctrl) {
+		snd_soc_write(codec, ALC5623_JACK_DET_CTRL,
+				alc5623->jack_det_ctrl);
+	}
+
+	switch (alc5623->id) {
+	case 0x21:
+		snd_soc_add_controls(codec, rt5621_vol_snd_controls,
+			ARRAY_SIZE(rt5621_vol_snd_controls));
+		break;
+	case 0x22:
+		snd_soc_add_controls(codec, rt5622_vol_snd_controls,
+			ARRAY_SIZE(rt5622_vol_snd_controls));
+		break;
+	case 0x23:
+		snd_soc_add_controls(codec, alc5623_vol_snd_controls,
+			ARRAY_SIZE(alc5623_vol_snd_controls));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_add_controls(codec, alc5623_snd_controls,
+			ARRAY_SIZE(alc5623_snd_controls));
+
+	snd_soc_dapm_new_controls(dapm, alc5623_dapm_widgets,
+					ARRAY_SIZE(alc5623_dapm_widgets));
+
+	/* set up audio path interconnects */
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+
+	switch (alc5623->id) {
+	case 0x21:
+	case 0x22:
+		snd_soc_dapm_new_controls(dapm, alc5623_dapm_amp_widgets,
+					ARRAY_SIZE(alc5623_dapm_amp_widgets));
+		snd_soc_dapm_add_routes(dapm, intercon_amp_spk,
+					ARRAY_SIZE(intercon_amp_spk));
+		break;
+	case 0x23:
+		snd_soc_dapm_add_routes(dapm, intercon_spk,
+					ARRAY_SIZE(intercon_spk));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/* power down chip */
+static int alc5623_remove(struct snd_soc_codec *codec)
+{
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_device_alc5623 = {
+	.probe = alc5623_probe,
+	.remove = alc5623_remove,
+	.suspend = alc5623_suspend,
+	.resume = alc5623_resume,
+	.set_bias_level = alc5623_set_bias_level,
+	.reg_cache_size = ALC5623_VENDOR_ID2+2,
+	.reg_word_size = sizeof(u16),
+	.reg_cache_step = 2,
+};
+
+/*
+ * ALC5623 2 wire address is determined by A1 pin
+ * state during powerup.
+ *    low  = 0x1a
+ *    high = 0x1b
+ */
+static int alc5623_i2c_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	struct alc5623_platform_data *pdata;
+	struct alc5623_priv *alc5623;
+	int ret, vid1, vid2;
+
+	vid1 = i2c_smbus_read_word_data(client, ALC5623_VENDOR_ID1);
+	if (vid1 < 0) {
+		dev_err(&client->dev, "failed to read I2C\n");
+		return -EIO;
+	}
+	vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8);
+
+	vid2 = i2c_smbus_read_byte_data(client, ALC5623_VENDOR_ID2);
+	if (vid2 < 0) {
+		dev_err(&client->dev, "failed to read I2C\n");
+		return -EIO;
+	}
+
+	if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
+		dev_err(&client->dev, "unknown or wrong codec\n");
+		dev_err(&client->dev, "Expected %x:%lx, got %x:%x\n",
+				0x10ec, id->driver_data,
+				vid1, vid2);
+		return -ENODEV;
+	}
+
+	dev_dbg(&client->dev, "Found codec id : alc56%02x\n", vid2);
+
+	alc5623 = kzalloc(sizeof(struct alc5623_priv), GFP_KERNEL);
+	if (alc5623 == NULL)
+		return -ENOMEM;
+
+	pdata = client->dev.platform_data;
+	if (pdata) {
+		alc5623->add_ctrl = pdata->add_ctrl;
+		alc5623->jack_det_ctrl = pdata->jack_det_ctrl;
+	}
+
+	alc5623->id = vid2;
+	switch (alc5623->id) {
+	case 0x21:
+		alc5623_dai.name = "alc5621-hifi";
+		break;
+	case 0x22:
+		alc5623_dai.name = "alc5622-hifi";
+		break;
+	case 0x23:
+		alc5623_dai.name = "alc5623-hifi";
+		break;
+	default:
+		kfree(alc5623);
+		return -EINVAL;
+	}
+
+	i2c_set_clientdata(client, alc5623);
+	alc5623->control_data = client;
+	alc5623->control_type = SND_SOC_I2C;
+	mutex_init(&alc5623->mutex);
+
+	ret =  snd_soc_register_codec(&client->dev,
+		&soc_codec_device_alc5623, &alc5623_dai, 1);
+	if (ret != 0) {
+		dev_err(&client->dev, "Failed to register codec: %d\n", ret);
+		kfree(alc5623);
+	}
+
+	return ret;
+}
+
+static int alc5623_i2c_remove(struct i2c_client *client)
+{
+	struct alc5623_priv *alc5623 = i2c_get_clientdata(client);
+
+	snd_soc_unregister_codec(&client->dev);
+	kfree(alc5623);
+	return 0;
+}
+
+static const struct i2c_device_id alc5623_i2c_table[] = {
+	{"alc5621", 0x21},
+	{"alc5622", 0x22},
+	{"alc5623", 0x23},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, alc5623_i2c_table);
+
+/*  i2c codec control layer */
+static struct i2c_driver alc5623_i2c_driver = {
+	.driver = {
+		.name = "alc562x-codec",
+		.owner = THIS_MODULE,
+	},
+	.probe = alc5623_i2c_probe,
+	.remove =  __devexit_p(alc5623_i2c_remove),
+	.id_table = alc5623_i2c_table,
+};
+
+static int __init alc5623_modinit(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&alc5623_i2c_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "%s: can't add i2c driver", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+module_init(alc5623_modinit);
+
+static void __exit alc5623_modexit(void)
+{
+	i2c_del_driver(&alc5623_i2c_driver);
+}
+module_exit(alc5623_modexit);
+
+MODULE_DESCRIPTION("ASoC alc5621/2/3 driver");
+MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/alc5623.h b/sound/soc/codecs/alc5623.h
new file mode 100644
index 0000000..f3d6826
--- /dev/null
+++ b/sound/soc/codecs/alc5623.h
@@ -0,0 +1,161 @@
+/*
+ * alc5623.h  --  alc562[123] ALSA Soc Audio driver
+ *
+ * Copyright 2008 Realtek Microelectronics
+ * Copyright 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * Author: flove <flove@realtek.com>
+ * Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ALC5623_H
+#define _ALC5623_H
+
+#define ALC5623_RESET				0x00
+/*				5621 5622 5623  */
+/* speaker output vol		   2    2       */
+/* line output vol                      4    2  */
+/* HP output vol		   4    0    4  */
+#define ALC5623_SPK_OUT_VOL			0x02
+#define ALC5623_HP_OUT_VOL			0x04
+#define ALC5623_MONO_AUX_OUT_VOL		0x06
+#define ALC5623_AUXIN_VOL			0x08
+#define ALC5623_LINE_IN_VOL			0x0A
+#define ALC5623_STEREO_DAC_VOL			0x0C
+#define ALC5623_MIC_VOL				0x0E
+#define ALC5623_MIC_ROUTING_CTRL		0x10
+#define ALC5623_ADC_REC_GAIN			0x12
+#define ALC5623_ADC_REC_MIXER			0x14
+#define ALC5623_SOFT_VOL_CTRL_TIME		0x16
+/* ALC5623_OUTPUT_MIXER_CTRL :			*/
+/* same remark as for reg 2 line vs speaker	*/
+#define ALC5623_OUTPUT_MIXER_CTRL		0x1C
+#define ALC5623_MIC_CTRL			0x22
+
+#define	ALC5623_DAI_CONTROL			0x34
+#define ALC5623_DAI_SDP_MASTER_MODE		(0 << 15)
+#define ALC5623_DAI_SDP_SLAVE_MODE		(1 << 15)
+#define ALC5623_DAI_I2S_PCM_MODE		(1 << 14)
+#define ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL	(1 <<  7)
+#define ALC5623_DAI_ADC_DATA_L_R_SWAP		(1 <<  5)
+#define ALC5623_DAI_DAC_DATA_L_R_SWAP		(1 <<  4)
+#define ALC5623_DAI_I2S_DL_MASK			(3 <<  2)
+#define ALC5623_DAI_I2S_DL_32			(3 <<  2)
+#define	ALC5623_DAI_I2S_DL_24			(2 <<  2)
+#define ALC5623_DAI_I2S_DL_20			(1 <<  2)
+#define ALC5623_DAI_I2S_DL_16			(0 <<  2)
+#define ALC5623_DAI_I2S_DF_PCM			(3 <<  0)
+#define	ALC5623_DAI_I2S_DF_LEFT			(2 <<  0)
+#define ALC5623_DAI_I2S_DF_RIGHT		(1 <<  0)
+#define ALC5623_DAI_I2S_DF_I2S			(0 <<  0)
+
+#define ALC5623_STEREO_AD_DA_CLK_CTRL		0x36
+#define	ALC5623_COMPANDING_CTRL			0x38
+
+#define	ALC5623_PWR_MANAG_ADD1			0x3A
+#define ALC5623_PWR_ADD1_MAIN_I2S_EN		(1 << 15)
+#define ALC5623_PWR_ADD1_ZC_DET_PD_EN		(1 << 14)
+#define ALC5623_PWR_ADD1_MIC1_BIAS_EN		(1 << 11)
+#define ALC5623_PWR_ADD1_SHORT_CURR_DET_EN	(1 << 10)
+#define ALC5623_PWR_ADD1_SOFTGEN_EN		(1 <<  8) /* rsvd on 5622 */
+#define	ALC5623_PWR_ADD1_DEPOP_BUF_HP		(1 <<  6) /* rsvd on 5622 */
+#define	ALC5623_PWR_ADD1_HP_OUT_AMP		(1 <<  5)
+#define	ALC5623_PWR_ADD1_HP_OUT_ENH_AMP		(1 <<  4) /* rsvd on 5622 */
+#define ALC5623_PWR_ADD1_DEPOP_BUF_AUX		(1 <<  2)
+#define ALC5623_PWR_ADD1_AUX_OUT_AMP		(1 <<  1)
+#define ALC5623_PWR_ADD1_AUX_OUT_ENH_AMP	(1 <<  0) /* rsvd on 5622 */
+
+#define ALC5623_PWR_MANAG_ADD2			0x3C
+#define ALC5623_PWR_ADD2_LINEOUT		(1 << 15) /* rt5623 */
+#define ALC5623_PWR_ADD2_CLASS_AB		(1 << 15) /* rt5621 */
+#define ALC5623_PWR_ADD2_CLASS_D		(1 << 14) /* rt5621 */
+#define ALC5623_PWR_ADD2_VREF			(1 << 13)
+#define ALC5623_PWR_ADD2_PLL			(1 << 12)
+#define ALC5623_PWR_ADD2_DAC_REF_CIR		(1 << 10)
+#define ALC5623_PWR_ADD2_L_DAC_CLK		(1 <<  9)
+#define ALC5623_PWR_ADD2_R_DAC_CLK		(1 <<  8)
+#define ALC5623_PWR_ADD2_L_ADC_CLK_GAIN		(1 <<  7)
+#define ALC5623_PWR_ADD2_R_ADC_CLK_GAIN		(1 <<  6)
+#define ALC5623_PWR_ADD2_L_HP_MIXER		(1 <<  5)
+#define ALC5623_PWR_ADD2_R_HP_MIXER		(1 <<  4)
+#define ALC5623_PWR_ADD2_SPK_MIXER		(1 <<  3)
+#define ALC5623_PWR_ADD2_MONO_MIXER		(1 <<  2)
+#define ALC5623_PWR_ADD2_L_ADC_REC_MIXER	(1 <<  1)
+#define ALC5623_PWR_ADD2_R_ADC_REC_MIXER	(1 <<  0)
+
+#define ALC5623_PWR_MANAG_ADD3			0x3E
+#define ALC5623_PWR_ADD3_MAIN_BIAS		(1 << 15)
+#define ALC5623_PWR_ADD3_AUXOUT_L_VOL_AMP	(1 << 14)
+#define ALC5623_PWR_ADD3_AUXOUT_R_VOL_AMP	(1 << 13)
+#define ALC5623_PWR_ADD3_SPK_OUT		(1 << 12)
+#define ALC5623_PWR_ADD3_HP_L_OUT_VOL		(1 << 10)
+#define ALC5623_PWR_ADD3_HP_R_OUT_VOL		(1 <<  9)
+#define ALC5623_PWR_ADD3_LINEIN_L_VOL		(1 <<  7)
+#define ALC5623_PWR_ADD3_LINEIN_R_VOL		(1 <<  6)
+#define ALC5623_PWR_ADD3_AUXIN_L_VOL		(1 <<  5)
+#define ALC5623_PWR_ADD3_AUXIN_R_VOL		(1 <<  4)
+#define ALC5623_PWR_ADD3_MIC1_FUN_CTRL		(1 <<  3)
+#define ALC5623_PWR_ADD3_MIC2_FUN_CTRL		(1 <<  2)
+#define ALC5623_PWR_ADD3_MIC1_BOOST_AD		(1 <<  1)
+#define ALC5623_PWR_ADD3_MIC2_BOOST_AD		(1 <<  0)
+
+#define ALC5623_ADD_CTRL_REG			0x40
+
+#define	ALC5623_GLOBAL_CLK_CTRL_REG		0x42
+#define ALC5623_GBL_CLK_SYS_SOUR_SEL_PLL	(1 << 15)
+#define ALC5623_GBL_CLK_SYS_SOUR_SEL_MCLK	(0 << 15)
+#define ALC5623_GBL_CLK_PLL_SOUR_SEL_BITCLK	(1 << 14)
+#define ALC5623_GBL_CLK_PLL_SOUR_SEL_MCLK	(0 << 14)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV8	(3 <<  1)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV4	(2 <<  1)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV2	(1 <<  1)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV1	(0 <<  1)
+#define ALC5623_GBL_CLK_PLL_PRE_DIV2		(1 <<  0)
+#define ALC5623_GBL_CLK_PLL_PRE_DIV1		(0 <<  0)
+
+#define ALC5623_PLL_CTRL			0x44
+#define ALC5623_PLL_CTRL_N_VAL(n)		(((n)&0xff) << 8)
+#define ALC5623_PLL_CTRL_K_VAL(k)		(((k)&0x7)  << 4)
+#define ALC5623_PLL_CTRL_M_VAL(m)		((m)&0xf)
+
+#define ALC5623_GPIO_OUTPUT_PIN_CTRL		0x4A
+#define ALC5623_GPIO_PIN_CONFIG			0x4C
+#define ALC5623_GPIO_PIN_POLARITY		0x4E
+#define ALC5623_GPIO_PIN_STICKY			0x50
+#define ALC5623_GPIO_PIN_WAKEUP			0x52
+#define ALC5623_GPIO_PIN_STATUS			0x54
+#define ALC5623_GPIO_PIN_SHARING		0x56
+#define	ALC5623_OVER_CURR_STATUS		0x58
+#define ALC5623_JACK_DET_CTRL			0x5A
+
+#define ALC5623_MISC_CTRL			0x5E
+#define ALC5623_MISC_DISABLE_FAST_VREG		(1 << 15)
+#define ALC5623_MISC_SPK_CLASS_AB_OC_PD		(1 << 13) /* 5621 */
+#define ALC5623_MISC_SPK_CLASS_AB_OC_DET	(1 << 12) /* 5621 */
+#define ALC5623_MISC_HP_DEPOP_MODE3_EN		(1 << 10)
+#define ALC5623_MISC_HP_DEPOP_MODE2_EN		(1 <<  9)
+#define ALC5623_MISC_HP_DEPOP_MODE1_EN		(1 <<  8)
+#define ALC5623_MISC_AUXOUT_DEPOP_MODE3_EN	(1 <<  6)
+#define ALC5623_MISC_AUXOUT_DEPOP_MODE2_EN	(1 <<  5)
+#define ALC5623_MISC_AUXOUT_DEPOP_MODE1_EN	(1 <<  4)
+#define ALC5623_MISC_M_DAC_L_INPUT		(1 <<  3)
+#define ALC5623_MISC_M_DAC_R_INPUT		(1 <<  2)
+#define ALC5623_MISC_IRQOUT_INV_CTRL		(1 <<  0)
+
+#define	ALC5623_PSEDUEO_SPATIAL_CTRL		0x60
+#define ALC5623_EQ_CTRL				0x62
+#define ALC5623_EQ_MODE_ENABLE			0x66
+#define ALC5623_AVC_CTRL			0x68
+#define ALC5623_HID_CTRL_INDEX			0x6A
+#define ALC5623_HID_CTRL_DATA			0x6C
+#define ALC5623_VENDOR_ID1			0x7C
+#define ALC5623_VENDOR_ID2			0x7E
+
+#define ALC5623_PLL_FR_MCLK			0
+#define ALC5623_PLL_FR_BCK			1
+#endif
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 8236439..347a567 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -36,8 +36,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dai.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include <mach/dm365.h>
@@ -116,7 +114,7 @@
 			     DAVINCI_VC_REG12_POWER_ALL_OFF);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -155,7 +153,7 @@
 
 static int cq93vc_probe(struct snd_soc_codec *codec)
 {
-	struct davinci_vc *davinci_vc = codec->dev->platform_data;
+	struct davinci_vc *davinci_vc = snd_soc_codec_get_drvdata(codec);
 
 	davinci_vc->cq93vc.codec = codec;
 	codec->control_data = davinci_vc;
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 6d4bdc6..8b51245 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -106,6 +106,21 @@
 #define CS4270_MUTE_DAC_A	0x01
 #define CS4270_MUTE_DAC_B	0x02
 
+/* Power-on default values for the registers
+ *
+ * This array contains the power-on default values of the registers, with the
+ * exception of the "CHIPID" register (01h).  The lower four bits of that
+ * register contain the hardware revision, so it is treated as volatile.
+ *
+ * Also note that on the CS4270, the first readable register is 1, but ASoC
+ * assumes the first register is 0.  Therfore, the array must have an entry for
+ * register 0, but we use cs4270_reg_is_readable() to tell ASoC that it can't
+ * be read.
+ */
+static const u8 cs4270_default_reg_cache[CS4270_LASTREG + 1] = {
+	0x00, 0x00, 0x00, 0x30, 0x00, 0x60, 0x20, 0x00, 0x00
+};
+
 static const char *supply_names[] = {
 	"va", "vd", "vlc"
 };
@@ -114,7 +129,6 @@
 struct cs4270_private {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u8 reg_cache[CS4270_NUMREGS];
 	unsigned int mclk; /* Input frequency of the MCLK pin */
 	unsigned int mode; /* The mode (I2S or left-justified) */
 	unsigned int slave_mode;
@@ -179,6 +193,20 @@
 /* The number of MCLK/LRCK ratios supported by the CS4270 */
 #define NUM_MCLK_RATIOS		ARRAY_SIZE(cs4270_mode_ratios)
 
+static int cs4270_reg_is_readable(unsigned int reg)
+{
+	return (reg >= CS4270_FIRSTREG) && (reg <= CS4270_LASTREG);
+}
+
+static int cs4270_reg_is_volatile(unsigned int reg)
+{
+	/* Unreadable registers are considered volatile */
+	if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
+		return 1;
+
+	return reg == CS4270_CHIPID;
+}
+
 /**
  * cs4270_set_dai_sysclk - determine the CS4270 samples rates.
  * @codec_dai: the codec DAI
@@ -264,97 +292,6 @@
 }
 
 /**
- * cs4270_fill_cache - pre-fill the CS4270 register cache.
- * @codec: the codec for this CS4270
- *
- * This function fills in the CS4270 register cache by reading the register
- * values from the hardware.
- *
- * This CS4270 registers are cached to avoid excessive I2C I/O operations.
- * After the initial read to pre-fill the cache, the CS4270 never updates
- * the register values, so we won't have a cache coherency problem.
- *
- * We use the auto-increment feature of the CS4270 to read all registers in
- * one shot.
- */
-static int cs4270_fill_cache(struct snd_soc_codec *codec)
-{
-	u8 *cache = codec->reg_cache;
-	struct i2c_client *i2c_client = codec->control_data;
-	s32 length;
-
-	length = i2c_smbus_read_i2c_block_data(i2c_client,
-		CS4270_FIRSTREG | CS4270_I2C_INCR, CS4270_NUMREGS, cache);
-
-	if (length != CS4270_NUMREGS) {
-		dev_err(codec->dev, "i2c read failure, addr=0x%x\n",
-		       i2c_client->addr);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-/**
- * cs4270_read_reg_cache - read from the CS4270 register cache.
- * @codec: the codec for this CS4270
- * @reg: the register to read
- *
- * This function returns the value for a given register.  It reads only from
- * the register cache, not the hardware itself.
- *
- * This CS4270 registers are cached to avoid excessive I2C I/O operations.
- * After the initial read to pre-fill the cache, the CS4270 never updates
- * the register values, so we won't have a cache coherency problem.
- */
-static unsigned int cs4270_read_reg_cache(struct snd_soc_codec *codec,
-	unsigned int reg)
-{
-	u8 *cache = codec->reg_cache;
-
-	if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
-		return -EIO;
-
-	return cache[reg - CS4270_FIRSTREG];
-}
-
-/**
- * cs4270_i2c_write - write to a CS4270 register via the I2C bus.
- * @codec: the codec for this CS4270
- * @reg: the register to write
- * @value: the value to write to the register
- *
- * This function writes the given value to the given CS4270 register, and
- * also updates the register cache.
- *
- * Note that we don't use the hw_write function pointer of snd_soc_codec.
- * That's because it's too clunky: the hw_write_t prototype does not match
- * i2c_smbus_write_byte_data(), and it's just another layer of overhead.
- */
-static int cs4270_i2c_write(struct snd_soc_codec *codec, unsigned int reg,
-			    unsigned int value)
-{
-	u8 *cache = codec->reg_cache;
-
-	if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
-		return -EIO;
-
-	/* Only perform an I2C operation if the new value is different */
-	if (cache[reg - CS4270_FIRSTREG] != value) {
-		struct i2c_client *client = codec->control_data;
-		if (i2c_smbus_write_byte_data(client, reg, value)) {
-			dev_err(codec->dev, "i2c write failed\n");
-			return -EIO;
-		}
-
-		/* We've written to the hardware, so update the cache */
-		cache[reg - CS4270_FIRSTREG] = value;
-	}
-
-	return 0;
-}
-
-/**
  * cs4270_hw_params - program the CS4270 with the given hardware parameters.
  * @substream: the audio stream
  * @params: the hardware parameters to set
@@ -551,15 +488,16 @@
 static int cs4270_probe(struct snd_soc_codec *codec)
 {
 	struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
-	int i, ret, reg;
+	int i, ret;
 
 	codec->control_data = cs4270->control_data;
 
-	/* The I2C interface is set up, so pre-fill our register cache */
-
-	ret = cs4270_fill_cache(codec);
+	/* Tell ASoC what kind of I/O to use to read the registers.  ASoC will
+	 * then do the I2C transactions itself.
+	 */
+	ret = snd_soc_codec_set_cache_io(codec, 8, 8, cs4270->control_type);
 	if (ret < 0) {
-		dev_err(codec->dev, "failed to fill register cache\n");
+		dev_err(codec->dev, "failed to set cache I/O (ret=%i)\n", ret);
 		return ret;
 	}
 
@@ -568,10 +506,7 @@
 	 * this feature disabled by default.  An application (e.g. alsactl) can
 	 * re-enabled it by using the controls.
 	 */
-
-	reg = cs4270_read_reg_cache(codec, CS4270_MUTE);
-	reg &= ~CS4270_MUTE_AUTO;
-	ret = cs4270_i2c_write(codec, CS4270_MUTE, reg);
+	ret = snd_soc_update_bits(codec, CS4270_MUTE, CS4270_MUTE_AUTO, 0);
 	if (ret < 0) {
 		dev_err(codec->dev, "i2c write failed\n");
 		return ret;
@@ -582,10 +517,8 @@
 	 * playback has started.  An application (e.g. alsactl) can
 	 * re-enabled it by using the controls.
 	 */
-
-	reg = cs4270_read_reg_cache(codec, CS4270_TRANS);
-	reg &= ~(CS4270_TRANS_SOFT | CS4270_TRANS_ZERO);
-	ret = cs4270_i2c_write(codec, CS4270_TRANS, reg);
+	ret = snd_soc_update_bits(codec, CS4270_TRANS,
+		CS4270_TRANS_SOFT | CS4270_TRANS_ZERO, 0);
 	if (ret < 0) {
 		dev_err(codec->dev, "i2c write failed\n");
 		return ret;
@@ -708,15 +641,16 @@
  * Assign this variable to the codec_dev field of the machine driver's
  * snd_soc_device structure.
  */
-static struct snd_soc_codec_driver soc_codec_device_cs4270 = {
-	.probe =	cs4270_probe,
-	.remove =	cs4270_remove,
-	.suspend =	cs4270_soc_suspend,
-	.resume =	cs4270_soc_resume,
-	.read = cs4270_read_reg_cache,
-	.write = cs4270_i2c_write,
-	.reg_cache_size = CS4270_NUMREGS,
-	.reg_word_size = sizeof(u8),
+static const struct snd_soc_codec_driver soc_codec_device_cs4270 = {
+	.probe =		cs4270_probe,
+	.remove =		cs4270_remove,
+	.suspend =		cs4270_soc_suspend,
+	.resume =		cs4270_soc_resume,
+	.volatile_register =	cs4270_reg_is_volatile,
+	.readable_register =	cs4270_reg_is_readable,
+	.reg_cache_size =	CS4270_LASTREG + 1,
+	.reg_word_size =	sizeof(u8),
+	.reg_cache_default =	cs4270_default_reg_cache,
 };
 
 /**
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index cb086ea..8fb7070 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -26,7 +26,6 @@
 #include <linux/slab.h>
 #include <sound/core.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 #include <sound/pcm_params.h>
@@ -47,7 +46,6 @@
 	unsigned int mclk;
 	unsigned int audio_mode;	/* The mode (I2S or left-justified) */
 	enum master_slave_mode func;
-	u8 reg_cache[CS42L51_NUMREGS];
 };
 
 #define CS42L51_FORMATS ( \
@@ -519,6 +517,7 @@
 static int cs42l51_probe(struct snd_soc_codec *codec)
 {
 	struct cs42l51_private *cs42l51 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret, reg;
 
 	codec->control_data = cs42l51->control_data;
@@ -550,9 +549,9 @@
 
 	snd_soc_add_controls(codec, cs42l51_snd_controls,
 		ARRAY_SIZE(cs42l51_snd_controls));
-	snd_soc_dapm_new_controls(codec, cs42l51_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, cs42l51_dapm_widgets,
 		ARRAY_SIZE(cs42l51_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, cs42l51_routes,
+	snd_soc_dapm_add_routes(dapm, cs42l51_routes,
 		ARRAY_SIZE(cs42l51_routes));
 
 	return 0;
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index e8d27c8..0bb424a 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -18,7 +18,7 @@
 
 #include <sound/core.h>
 #include <sound/initval.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 
 #include "cx20442.h"
 
@@ -26,7 +26,6 @@
 struct cx20442_priv {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u8 reg_cache[1];
 };
 
 #define CX20442_PM		0x0
@@ -89,10 +88,11 @@
 
 static int cx20442_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, cx20442_dapm_widgets,
-				  ARRAY_SIZE(cx20442_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, cx20442_audio_map,
+	snd_soc_dapm_new_controls(dapm, cx20442_dapm_widgets,
+				  ARRAY_SIZE(cx20442_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, cx20442_audio_map,
 				ARRAY_SIZE(cx20442_audio_map));
 
 	return 0;
@@ -263,7 +263,7 @@
 	/* Prevent the codec driver from further accessing the modem */
 	codec->hw_write = NULL;
 	cx20442->control_data = NULL;
-	codec->pop_time = 0;
+	codec->card->pop_time = 0;
 }
 
 /* Line discipline .hangup() */
@@ -291,7 +291,7 @@
 		/* Set up codec driver access to modem controls */
 		cx20442->control_data = tty;
 		codec->hw_write = (hw_write_t)tty->ops->write;
-		codec->pop_time = 1;
+		codec->card->pop_time = 1;
 	}
 }
 
@@ -348,7 +348,7 @@
 
 	cx20442->control_data = NULL;
 	codec->hw_write = NULL;
-	codec->pop_time = 0;
+	codec->card->pop_time = 0;
 
 	return 0;
 }
@@ -367,9 +367,12 @@
 	return 0;
 }
 
+static const u8 cx20442_reg;
+
 static struct snd_soc_codec_driver cx20442_codec_dev = {
 	.probe = 	cx20442_codec_probe,
 	.remove = 	cx20442_codec_remove,
+	.reg_cache_default = &cx20442_reg,
 	.reg_cache_size = 1,
 	.reg_word_size = sizeof(u8),
 	.read = cx20442_read_reg_cache,
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 58bb9b9..92fd9d7 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -21,7 +21,7 @@
 #include <linux/slab.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
new file mode 100644
index 0000000..57e9dac
--- /dev/null
+++ b/sound/soc/codecs/dmic.c
@@ -0,0 +1,81 @@
+/*
+ * dmic.c  --  SoC audio for Generic Digital MICs
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+static struct snd_soc_dai_driver dmic_dai = {
+	.name = "dmic-hifi",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 8,
+		.rates = SNDRV_PCM_RATE_CONTINUOUS,
+		.formats = SNDRV_PCM_FMTBIT_S32_LE
+			| SNDRV_PCM_FMTBIT_S24_LE
+			| SNDRV_PCM_FMTBIT_S16_LE,
+	},
+};
+
+static struct snd_soc_codec_driver soc_dmic = {};
+
+static int __devinit dmic_dev_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev,
+			&soc_dmic, &dmic_dai, 1);
+}
+
+static int __devexit dmic_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+MODULE_ALIAS("platform:dmic-codec");
+
+static struct platform_driver dmic_driver = {
+	.driver = {
+		.name = "dmic-codec",
+		.owner = THIS_MODULE,
+	},
+	.probe = dmic_dev_probe,
+	.remove = __devexit_p(dmic_dev_remove),
+};
+
+static int __init dmic_init(void)
+{
+	return platform_driver_register(&dmic_driver);
+}
+module_init(dmic_init);
+
+static void __exit dmic_exit(void)
+{
+	platform_driver_unregister(&dmic_driver);
+}
+module_exit(dmic_exit);
+
+MODULE_DESCRIPTION("Generic DMIC driver");
+MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 16253ec..f7cd346f 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -22,7 +22,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/initval.h>
-#include <sound/soc-dapm.h>
 #include <sound/soc.h>
 
 #define JZ4740_REG_CODEC_1 0x0
@@ -266,7 +265,7 @@
 		break;
 	case SND_SOC_BIAS_STANDBY:
 		/* The only way to clear the suspend flag is to reset the codec */
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			jz4740_codec_wakeup(codec);
 
 		mask = JZ4740_CODEC_1_VREF_DISABLE |
@@ -288,23 +287,25 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
 
 static int jz4740_codec_dev_probe(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	snd_soc_update_bits(codec, JZ4740_REG_CODEC_1,
 			JZ4740_CODEC_1_SW2_ENABLE, JZ4740_CODEC_1_SW2_ENABLE);
 
 	snd_soc_add_controls(codec, jz4740_codec_controls,
 		ARRAY_SIZE(jz4740_codec_controls));
 
-	snd_soc_dapm_new_controls(codec, jz4740_codec_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, jz4740_codec_dapm_widgets,
 		ARRAY_SIZE(jz4740_codec_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, jz4740_codec_dapm_routes,
+	snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes,
 		ARRAY_SIZE(jz4740_codec_dapm_routes));
 
 	snd_soc_dapm_new_widgets(codec);
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 6447dbb..89498f9 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -20,7 +20,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <linux/slab.h>
@@ -1229,15 +1228,17 @@
 
 static int max98088_add_widgets(struct snd_soc_codec *codec)
 {
-       snd_soc_dapm_new_controls(codec, max98088_dapm_widgets,
+       struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+       snd_soc_dapm_new_controls(dapm, max98088_dapm_widgets,
                                  ARRAY_SIZE(max98088_dapm_widgets));
 
-       snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+       snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
        snd_soc_add_controls(codec, max98088_snd_controls,
                             ARRAY_SIZE(max98088_snd_controls));
 
-       snd_soc_dapm_new_widgets(codec);
+       snd_soc_dapm_new_widgets(dapm);
        return 0;
 }
 
@@ -1622,7 +1623,7 @@
                break;
 
        case SND_SOC_BIAS_STANDBY:
-               if (codec->bias_level == SND_SOC_BIAS_OFF)
+               if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
                        max98088_sync_cache(codec);
 
                snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
@@ -1635,7 +1636,7 @@
                codec->cache_sync = 1;
                break;
        }
-       codec->bias_level = level;
+       codec->dapm.bias_level = level;
        return 0;
 }
 
@@ -1957,7 +1958,7 @@
                return ret;
        }
 
-       /* initalize private data */
+       /* initialize private data */
 
        max98088->sysclk = (unsigned)-1;
        max98088->eq_textcnt = 0;
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 6f38d61..2727bef 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -38,7 +38,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "ssm2602.h"
@@ -207,10 +206,11 @@
 
 static int ssm2602_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, ssm2602_dapm_widgets,
-				  ARRAY_SIZE(ssm2602_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_conn, ARRAY_SIZE(audio_conn));
+	snd_soc_dapm_new_controls(dapm, ssm2602_dapm_widgets,
+				  ARRAY_SIZE(ssm2602_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_conn, ARRAY_SIZE(audio_conn));
 
 	return 0;
 }
@@ -493,7 +493,7 @@
 		break;
 
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 061f9e5..78b2b50 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -236,7 +236,7 @@
 		stac9766_ac97_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index e8652b1..54a30ef 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -30,7 +30,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 
@@ -391,11 +390,12 @@
 
 static int tlv320aic23_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
-				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
+				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 	/* set up audio path interconnects */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -574,7 +574,7 @@
 		tlv320aic23_write(codec, TLV320AIC23_PWR, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index 6b7d71e..e2a7608 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -18,7 +18,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "tlv320aic26.h"
@@ -31,7 +30,6 @@
 struct aic26 {
 	struct spi_device *spi;
 	struct snd_soc_codec codec;
-	u16 reg_cache[AIC26_NUM_REGS];	/* shadow registers */
 	int master;
 	int datfm;
 	int mclk;
@@ -355,7 +353,6 @@
  */
 static int aic26_probe(struct snd_soc_codec *codec)
 {
-	struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
 	int ret, err, i, reg;
 
 	dev_info(codec->dev, "Probing AIC26 SoC CODEC driver\n");
@@ -373,7 +370,7 @@
 	aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
 
 	/* Fill register cache */
-	for (i = 0; i < ARRAY_SIZE(aic26->reg_cache); i++)
+	for (i = 0; i < codec->driver->reg_cache_size; i++)
 		aic26_reg_read(codec, i);
 
 	/* Register the sysfs files for debugging */
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 77b8f9a..3bedab2 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -46,7 +46,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/tlv320aic3x.h>
@@ -61,6 +60,8 @@
 	"DRVDD",	/* ADC Analog and Output Driver Voltage */
 };
 
+static LIST_HEAD(reset_list);
+
 struct aic3x_priv;
 
 struct aic3x_disable_nb {
@@ -77,6 +78,7 @@
 	struct aic3x_setup_data *setup;
 	void *control_data;
 	unsigned int sysclk;
+	struct list_head list;
 	int master;
 	int gpio_reset;
 	int power;
@@ -183,7 +185,7 @@
 
 	if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
 		/* find dapm widget path assoc with kcontrol */
-		list_for_each_entry(path, &widget->codec->dapm_paths, list) {
+		list_for_each_entry(path, &widget->dapm->card->paths, list) {
 			if (path->kcontrol != kcontrol)
 				continue;
 
@@ -199,7 +201,7 @@
 		}
 
 		if (found)
-			snd_soc_dapm_sync(widget->codec);
+			snd_soc_dapm_sync(widget->dapm);
 	}
 
 	ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
@@ -788,17 +790,19 @@
 static int aic3x_add_widgets(struct snd_soc_codec *codec)
 {
 	struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
 				  ARRAY_SIZE(aic3x_dapm_widgets));
 
 	/* set up audio path interconnects */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	if (aic3x->model == AIC3X_MODEL_3007) {
-		snd_soc_dapm_new_controls(codec, aic3007_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, aic3007_dapm_widgets,
 			ARRAY_SIZE(aic3007_dapm_widgets));
-		snd_soc_dapm_add_routes(codec, intercon_3007, ARRAY_SIZE(intercon_3007));
+		snd_soc_dapm_add_routes(dapm, intercon_3007,
+					ARRAY_SIZE(intercon_3007));
 	}
 
 	return 0;
@@ -1075,7 +1079,7 @@
 		 * Put codec to reset and require cache sync as at least one
 		 * of the supplies was disabled
 		 */
-		if (aic3x->gpio_reset >= 0)
+		if (gpio_is_valid(aic3x->gpio_reset))
 			gpio_set_value(aic3x->gpio_reset, 0);
 		aic3x->codec->cache_sync = 1;
 	}
@@ -1102,7 +1106,7 @@
 		if (!codec->cache_sync)
 			goto out;
 
-		if (aic3x->gpio_reset >= 0) {
+		if (gpio_is_valid(aic3x->gpio_reset)) {
 			udelay(1);
 			gpio_set_value(aic3x->gpio_reset, 1);
 		}
@@ -1135,7 +1139,7 @@
 	case SND_SOC_BIAS_ON:
 		break;
 	case SND_SOC_BIAS_PREPARE:
-		if (codec->bias_level == SND_SOC_BIAS_STANDBY &&
+		if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY &&
 		    aic3x->master) {
 			/* enable pll */
 			reg = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
@@ -1146,7 +1150,7 @@
 	case SND_SOC_BIAS_STANDBY:
 		if (!aic3x->power)
 			aic3x_set_power(codec, 1);
-		if (codec->bias_level == SND_SOC_BIAS_PREPARE &&
+		if (codec->dapm.bias_level == SND_SOC_BIAS_PREPARE &&
 		    aic3x->master) {
 			/* disable pll */
 			reg = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
@@ -1159,7 +1163,7 @@
 			aic3x_set_power(codec, 0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1344,14 +1348,28 @@
 	return 0;
 }
 
+static bool aic3x_is_shared_reset(struct aic3x_priv *aic3x)
+{
+	struct aic3x_priv *a;
+
+	list_for_each_entry(a, &reset_list, list) {
+		if (gpio_is_valid(aic3x->gpio_reset) &&
+		    aic3x->gpio_reset == a->gpio_reset)
+			return true;
+	}
+
+	return false;
+}
+
 static int aic3x_probe(struct snd_soc_codec *codec)
 {
 	struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
 	int ret, i;
 
+	INIT_LIST_HEAD(&aic3x->list);
 	codec->control_data = aic3x->control_data;
 	aic3x->codec = codec;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	ret = snd_soc_codec_set_cache_io(codec, 8, 8, aic3x->control_type);
 	if (ret != 0) {
@@ -1359,7 +1377,8 @@
 		return ret;
 	}
 
-	if (aic3x->gpio_reset >= 0) {
+	if (gpio_is_valid(aic3x->gpio_reset) &&
+	    !aic3x_is_shared_reset(aic3x)) {
 		ret = gpio_request(aic3x->gpio_reset, "tlv320aic3x reset");
 		if (ret != 0)
 			goto err_gpio;
@@ -1405,6 +1424,7 @@
 		snd_soc_add_controls(codec, &aic3x_classd_amp_gain_ctrl, 1);
 
 	aic3x_add_widgets(codec);
+	list_add(&aic3x->list, &reset_list);
 
 	return 0;
 
@@ -1414,10 +1434,10 @@
 					      &aic3x->disable_nb[i].nb);
 	regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
 err_get:
-	if (aic3x->gpio_reset >= 0)
+	if (gpio_is_valid(aic3x->gpio_reset) &&
+	    !aic3x_is_shared_reset(aic3x))
 		gpio_free(aic3x->gpio_reset);
 err_gpio:
-	kfree(aic3x);
 	return ret;
 }
 
@@ -1427,7 +1447,9 @@
 	int i;
 
 	aic3x_set_bias_level(codec, SND_SOC_BIAS_OFF);
-	if (aic3x->gpio_reset >= 0) {
+	list_del(&aic3x->list);
+	if (gpio_is_valid(aic3x->gpio_reset) &&
+	    !aic3x_is_shared_reset(aic3x)) {
 		gpio_set_value(aic3x->gpio_reset, 0);
 		gpio_free(aic3x->gpio_reset);
 	}
@@ -1523,21 +1545,6 @@
 	.remove = aic3x_i2c_remove,
 	.id_table = aic3x_i2c_id,
 };
-
-static inline void aic3x_i2c_init(void)
-{
-	int ret;
-
-	ret = i2c_add_driver(&aic3x_i2c_driver);
-	if (ret)
-		printk(KERN_ERR "%s: error regsitering i2c driver, %d\n",
-		       __func__, ret);
-}
-
-static inline void aic3x_i2c_exit(void)
-{
-	i2c_del_driver(&aic3x_i2c_driver);
-}
 #endif
 
 static int __init aic3x_modinit(void)
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index c5ab8c8..71d7be8 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -36,21 +36,21 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
 #include <sound/tlv320dac33-plat.h>
 #include "tlv320dac33.h"
 
-#define DAC33_BUFFER_SIZE_BYTES		24576	/* bytes, 12288 16 bit words,
-						 * 6144 stereo */
-#define DAC33_BUFFER_SIZE_SAMPLES	6144
-
-#define NSAMPLE_MAX		5700
-
-#define MODE7_LTHR		10
-#define MODE7_UTHR		(DAC33_BUFFER_SIZE_SAMPLES - 10)
+/*
+ * The internal FIFO is 24576 bytes long
+ * It can be configured to hold 16bit or 24bit samples
+ * In 16bit configuration the FIFO can hold 6144 stereo samples
+ * In 24bit configuration the FIFO can hold 4096 stereo samples
+ */
+#define DAC33_FIFO_SIZE_16BIT	6144
+#define DAC33_FIFO_SIZE_24BIT	4096
+#define DAC33_MODE7_MARGIN	10	/* Safety margin for FIFO in Mode7 */
 
 #define BURST_BASEFREQ_HZ	49152000
 
@@ -100,16 +100,11 @@
 	unsigned int refclk;
 
 	unsigned int alarm_threshold;	/* set to be half of LATENCY_TIME_MS */
-	unsigned int nsample_min;	/* nsample should not be lower than
-					 * this */
-	unsigned int nsample_max;	/* nsample should not be higher than
-					 * this */
 	enum dac33_fifo_modes fifo_mode;/* FIFO mode selection */
+	unsigned int fifo_size;		/* Size of the FIFO in samples */
 	unsigned int nsample;		/* burst read amount from host */
 	int mode1_latency;		/* latency caused by the i2c writes in
 					 * us */
-	int auto_fifo_config; 		/* Configure the FIFO based on the
-					 * period size */
 	u8 burst_bclkdiv;		/* BCLK divider value in burst mode */
 	unsigned int burst_rate;	/* Interface speed in Burst modes */
 
@@ -303,7 +298,6 @@
 	if (unlikely(!dac33->chip_power))
 		return;
 
-	/* 44-46: DAC Control Registers */
 	/* A : DAC sample rate Fsref/1.5 */
 	dac33_write(codec, DAC33_DAC_CTRL_A, DAC33_DACRATE(0));
 	/* B : DAC src=normal, not muted */
@@ -316,8 +310,6 @@
 	 clock source = internal osc (?) */
 	dac33_write(codec, DAC33_ANA_VOL_SOFT_STEP_CTRL, DAC33_VOLCLKEN);
 
-	dac33_write(codec, DAC33_PWR_CTRL, DAC33_PDNALLB);
-
 	/* Restore only selected registers (gains mostly) */
 	dac33_write(codec, DAC33_LDAC_DIG_VOL_CTRL,
 		    dac33_read_reg_cache(codec, DAC33_LDAC_DIG_VOL_CTRL));
@@ -328,6 +320,10 @@
 		    dac33_read_reg_cache(codec, DAC33_LINEL_TO_LLO_VOL));
 	dac33_write(codec, DAC33_LINER_TO_RLO_VOL,
 		    dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL));
+
+	dac33_write(codec, DAC33_OUT_AMP_CTRL,
+		    dac33_read_reg_cache(codec, DAC33_OUT_AMP_CTRL));
+
 }
 
 static inline int dac33_read_id(struct snd_soc_codec *codec)
@@ -357,6 +353,21 @@
 	dac33_write(codec, DAC33_PWR_CTRL, reg);
 }
 
+static inline void dac33_disable_digital(struct snd_soc_codec *codec)
+{
+	u8 reg;
+
+	/* Stop the DAI clock */
+	reg = dac33_read_reg_cache(codec, DAC33_SER_AUDIOIF_CTRL_B);
+	reg &= ~DAC33_BCLKON;
+	dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_B, reg);
+
+	/* Power down the Oscillator, and DACs */
+	reg = dac33_read_reg_cache(codec, DAC33_PWR_CTRL);
+	reg &= ~(DAC33_OSCPDNB | DAC33_DACRPDNB | DAC33_DACLPDNB);
+	dac33_write(codec, DAC33_PWR_CTRL, reg);
+}
+
 static int dac33_hard_power(struct snd_soc_codec *codec, int power)
 {
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
@@ -405,7 +416,7 @@
 	return ret;
 }
 
-static int playback_event(struct snd_soc_dapm_widget *w,
+static int dac33_playback_event(struct snd_soc_dapm_widget *w,
 		struct snd_kcontrol *kcontrol, int event)
 {
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(w->codec);
@@ -417,77 +428,13 @@
 			dac33_prepare_chip(dac33->substream);
 		}
 		break;
+	case SND_SOC_DAPM_POST_PMD:
+		dac33_disable_digital(w->codec);
+		break;
 	}
 	return 0;
 }
 
-static int dac33_get_nsample(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = dac33->nsample;
-
-	return 0;
-}
-
-static int dac33_set_nsample(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-
-	if (dac33->nsample == ucontrol->value.integer.value[0])
-		return 0;
-
-	if (ucontrol->value.integer.value[0] < dac33->nsample_min ||
-	    ucontrol->value.integer.value[0] > dac33->nsample_max) {
-		ret = -EINVAL;
-	} else {
-		dac33->nsample = ucontrol->value.integer.value[0];
-		/* Re calculate the burst time */
-		dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate,
-						      dac33->nsample);
-	}
-
-	return ret;
-}
-
-static int dac33_get_uthr(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = dac33->uthr;
-
-	return 0;
-}
-
-static int dac33_set_uthr(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-
-	if (dac33->substream)
-		return -EBUSY;
-
-	if (dac33->uthr == ucontrol->value.integer.value[0])
-		return 0;
-
-	if (ucontrol->value.integer.value[0] < (MODE7_LTHR + 10) ||
-	    ucontrol->value.integer.value[0] > MODE7_UTHR)
-		ret = -EINVAL;
-	else
-		dac33->uthr = ucontrol->value.integer.value[0];
-
-	return ret;
-}
-
 static int dac33_get_fifo_mode(struct snd_kcontrol *kcontrol,
 			 struct snd_ctl_elem_value *ucontrol)
 {
@@ -572,13 +519,6 @@
 		 dac33_get_fifo_mode, dac33_set_fifo_mode),
 };
 
-static const struct snd_kcontrol_new dac33_fifo_snd_controls[] = {
-	SOC_SINGLE_EXT("nSample", 0, 0, 5900, 0,
-		dac33_get_nsample, dac33_set_nsample),
-	SOC_SINGLE_EXT("UTHR", 0, 0, MODE7_UTHR, 0,
-		 dac33_get_uthr, dac33_set_uthr),
-};
-
 /* Analog bypass */
 static const struct snd_kcontrol_new dac33_dapm_abypassl_control =
 	SOC_DAPM_SINGLE("Switch", DAC33_LINEL_TO_LLO_VOL, 7, 1, 1);
@@ -586,6 +526,25 @@
 static const struct snd_kcontrol_new dac33_dapm_abypassr_control =
 	SOC_DAPM_SINGLE("Switch", DAC33_LINER_TO_RLO_VOL, 7, 1, 1);
 
+/* LOP L/R invert selection */
+static const char *dac33_lr_lom_texts[] = {"DAC", "LOP"};
+
+static const struct soc_enum dac33_left_lom_enum =
+	SOC_ENUM_SINGLE(DAC33_OUT_AMP_CTRL, 3,
+			ARRAY_SIZE(dac33_lr_lom_texts),
+			dac33_lr_lom_texts);
+
+static const struct snd_kcontrol_new dac33_dapm_left_lom_control =
+SOC_DAPM_ENUM("Route", dac33_left_lom_enum);
+
+static const struct soc_enum dac33_right_lom_enum =
+	SOC_ENUM_SINGLE(DAC33_OUT_AMP_CTRL, 2,
+			ARRAY_SIZE(dac33_lr_lom_texts),
+			dac33_lr_lom_texts);
+
+static const struct snd_kcontrol_new dac33_dapm_right_lom_control =
+SOC_DAPM_ENUM("Route", dac33_right_lom_enum);
+
 static const struct snd_soc_dapm_widget dac33_dapm_widgets[] = {
 	SND_SOC_DAPM_OUTPUT("LEFT_LO"),
 	SND_SOC_DAPM_OUTPUT("RIGHT_LO"),
@@ -593,8 +552,8 @@
 	SND_SOC_DAPM_INPUT("LINEL"),
 	SND_SOC_DAPM_INPUT("LINER"),
 
-	SND_SOC_DAPM_DAC("DACL", "Left Playback", DAC33_LDAC_PWR_CTRL, 2, 0),
-	SND_SOC_DAPM_DAC("DACR", "Right Playback", DAC33_RDAC_PWR_CTRL, 2, 0),
+	SND_SOC_DAPM_DAC("DACL", "Left Playback", SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC("DACR", "Right Playback", SND_SOC_NOPM, 0, 0),
 
 	/* Analog bypass */
 	SND_SOC_DAPM_SWITCH("Analog Left Bypass", SND_SOC_NOPM, 0, 0,
@@ -602,12 +561,30 @@
 	SND_SOC_DAPM_SWITCH("Analog Right Bypass", SND_SOC_NOPM, 0, 0,
 				&dac33_dapm_abypassr_control),
 
-	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Left Amp Power",
+	SND_SOC_DAPM_MUX("Left LOM Inverted From", SND_SOC_NOPM, 0, 0,
+		&dac33_dapm_left_lom_control),
+	SND_SOC_DAPM_MUX("Right LOM Inverted From", SND_SOC_NOPM, 0, 0,
+		&dac33_dapm_right_lom_control),
+	/*
+	 * For DAPM path, when only the anlog bypass path is enabled, and the
+	 * LOP inverted from the corresponding DAC side.
+	 * This is needed, so we can attach the DAC power supply in this case.
+	 */
+	SND_SOC_DAPM_PGA("Left Bypass PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Right Bypass PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Left Amplifier",
 			 DAC33_OUT_AMP_PWR_CTRL, 6, 3, 3, 0),
-	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Right Amp Power",
+	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Right Amplifier",
 			 DAC33_OUT_AMP_PWR_CTRL, 4, 3, 3, 0),
 
-	SND_SOC_DAPM_PRE("Prepare Playback", playback_event),
+	SND_SOC_DAPM_SUPPLY("Left DAC Power",
+			    DAC33_LDAC_PWR_CTRL, 2, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("Right DAC Power",
+			    DAC33_RDAC_PWR_CTRL, 2, 0, NULL, 0),
+
+	SND_SOC_DAPM_PRE("Pre Playback", dac33_playback_event),
+	SND_SOC_DAPM_POST("Post Playback", dac33_playback_event),
 };
 
 static const struct snd_soc_dapm_route audio_map[] = {
@@ -615,24 +592,39 @@
 	{"Analog Left Bypass", "Switch", "LINEL"},
 	{"Analog Right Bypass", "Switch", "LINER"},
 
-	{"Output Left Amp Power", NULL, "DACL"},
-	{"Output Right Amp Power", NULL, "DACR"},
+	{"Output Left Amplifier", NULL, "DACL"},
+	{"Output Right Amplifier", NULL, "DACR"},
 
-	{"Output Left Amp Power", NULL, "Analog Left Bypass"},
-	{"Output Right Amp Power", NULL, "Analog Right Bypass"},
+	{"Left Bypass PGA", NULL, "Analog Left Bypass"},
+	{"Right Bypass PGA", NULL, "Analog Right Bypass"},
+
+	{"Left LOM Inverted From", "DAC", "Left Bypass PGA"},
+	{"Right LOM Inverted From", "DAC", "Right Bypass PGA"},
+	{"Left LOM Inverted From", "LOP", "Analog Left Bypass"},
+	{"Right LOM Inverted From", "LOP", "Analog Right Bypass"},
+
+	{"Output Left Amplifier", NULL, "Left LOM Inverted From"},
+	{"Output Right Amplifier", NULL, "Right LOM Inverted From"},
+
+	{"DACL", NULL, "Left DAC Power"},
+	{"DACR", NULL, "Right DAC Power"},
+
+	{"Left Bypass PGA", NULL, "Left DAC Power"},
+	{"Right Bypass PGA", NULL, "Right DAC Power"},
 
 	/* output */
-	{"LEFT_LO", NULL, "Output Left Amp Power"},
-	{"RIGHT_LO", NULL, "Output Right Amp Power"},
+	{"LEFT_LO", NULL, "Output Left Amplifier"},
+	{"RIGHT_LO", NULL, "Output Right Amplifier"},
 };
 
 static int dac33_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, dac33_dapm_widgets,
-				  ARRAY_SIZE(dac33_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, dac33_dapm_widgets,
+				  ARRAY_SIZE(dac33_dapm_widgets));
 	/* set up audio path interconnects */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -640,16 +632,18 @@
 static int dac33_set_bias_level(struct snd_soc_codec *codec,
 				enum snd_soc_bias_level level)
 {
+	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		dac33_soft_power(codec, 1);
+		if (!dac33->substream)
+			dac33_soft_power(codec, 1);
 		break;
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Coming from OFF, switch on the codec */
 			ret = dac33_hard_power(codec, 1);
 			if (ret != 0)
@@ -660,14 +654,14 @@
 		break;
 	case SND_SOC_BIAS_OFF:
 		/* Do not power off, when the codec is already off */
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			return 0;
 		ret = dac33_hard_power(codec, 0);
 		if (ret != 0)
 			return ret;
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -705,7 +699,7 @@
 		spin_unlock_irq(&dac33->lock);
 
 		dac33_write16(codec, DAC33_PREFILL_MSB,
-				DAC33_THRREG(MODE7_LTHR));
+				DAC33_THRREG(DAC33_MODE7_MARGIN));
 
 		/* Enable Upper Threshold IRQ */
 		dac33_write(codec, DAC33_FIFO_IRQ_MASK, DAC33_MUT);
@@ -815,6 +809,8 @@
 	/* Stream started, save the substream pointer */
 	dac33->substream = substream;
 
+	snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24);
+
 	return 0;
 }
 
@@ -826,18 +822,17 @@
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 
 	dac33->substream = NULL;
-
-	/* Reset the nSample restrictions */
-	dac33->nsample_min = 0;
-	dac33->nsample_max = NSAMPLE_MAX;
 }
 
+#define CALC_BURST_RATE(bclkdiv, bclk_per_sample) \
+	(BURST_BASEFREQ_HZ / bclkdiv / bclk_per_sample)
 static int dac33_hw_params(struct snd_pcm_substream *substream,
 			   struct snd_pcm_hw_params *params,
 			   struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_codec *codec = rtd->codec;
+	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 
 	/* Check parameters for validity */
 	switch (params_rate(params)) {
@@ -852,6 +847,12 @@
 
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
+		dac33->fifo_size = DAC33_FIFO_SIZE_16BIT;
+		dac33->burst_rate = CALC_BURST_RATE(dac33->burst_bclkdiv, 32);
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		dac33->fifo_size = DAC33_FIFO_SIZE_24BIT;
+		dac33->burst_rate = CALC_BURST_RATE(dac33->burst_bclkdiv, 64);
 		break;
 	default:
 		dev_err(codec->dev, "unsupported format %d\n",
@@ -906,6 +907,9 @@
 		aictrl_a |= (DAC33_NCYCL_16 | DAC33_WLEN_16);
 		fifoctrl_a |= DAC33_WIDTH;
 		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		aictrl_a |= (DAC33_NCYCL_32 | DAC33_WLEN_24);
+		break;
 	default:
 		dev_err(codec->dev, "unsupported format %d\n",
 			substream->runtime->format);
@@ -1040,7 +1044,10 @@
 		dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C,
 							dac33->burst_bclkdiv);
 	else
-		dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 32);
+		if (substream->runtime->format == SNDRV_PCM_FORMAT_S16_LE)
+			dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 32);
+		else
+			dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 16);
 
 	switch (dac33->fifo_mode) {
 	case DAC33_FIFO_MODE1:
@@ -1053,7 +1060,8 @@
 		 * at the bottom, and also at the top of the FIFO
 		 */
 		dac33_write16(codec, DAC33_UTHR_MSB, DAC33_THRREG(dac33->uthr));
-		dac33_write16(codec, DAC33_LTHR_MSB, DAC33_THRREG(MODE7_LTHR));
+		dac33_write16(codec, DAC33_LTHR_MSB,
+			      DAC33_THRREG(DAC33_MODE7_MARGIN));
 		break;
 	default:
 		break;
@@ -1082,42 +1090,21 @@
 		/* Number of samples under i2c latency */
 		dac33->alarm_threshold = US_TO_SAMPLES(rate,
 						dac33->mode1_latency);
-		nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
-				dac33->alarm_threshold;
+		nsample_limit = dac33->fifo_size - dac33->alarm_threshold;
 
-		if (dac33->auto_fifo_config) {
-			if (period_size <= dac33->alarm_threshold)
-				/*
-				 * Configure nSamaple to number of periods,
-				 * which covers the latency requironment.
-				 */
-				dac33->nsample = period_size *
-				       ((dac33->alarm_threshold / period_size) +
-				       (dac33->alarm_threshold % period_size ?
-				       1 : 0));
-			else if (period_size > nsample_limit)
-				dac33->nsample = nsample_limit;
-			else
-				dac33->nsample = period_size;
-		} else {
-			/* nSample time shall not be shorter than i2c latency */
-			dac33->nsample_min = dac33->alarm_threshold;
+		if (period_size <= dac33->alarm_threshold)
 			/*
-			 * nSample should not be bigger than alsa buffer minus
-			 * size of one period to avoid overruns
+			 * Configure nSamaple to number of periods,
+			 * which covers the latency requironment.
 			 */
-			dac33->nsample_max = substream->runtime->buffer_size -
-						period_size;
-
-			if (dac33->nsample_max > nsample_limit)
-				dac33->nsample_max = nsample_limit;
-
-			/* Correct the nSample if it is outside of the ranges */
-			if (dac33->nsample < dac33->nsample_min)
-				dac33->nsample = dac33->nsample_min;
-			if (dac33->nsample > dac33->nsample_max)
-				dac33->nsample = dac33->nsample_max;
-		}
+			dac33->nsample = period_size *
+				((dac33->alarm_threshold / period_size) +
+				(dac33->alarm_threshold % period_size ?
+				1 : 0));
+		else if (period_size > nsample_limit)
+			dac33->nsample = nsample_limit;
+		else
+			dac33->nsample = period_size;
 
 		dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate,
 						      dac33->nsample);
@@ -1125,19 +1112,16 @@
 		dac33->t_stamp2 = 0;
 		break;
 	case DAC33_FIFO_MODE7:
-		if (dac33->auto_fifo_config) {
-			dac33->uthr = UTHR_FROM_PERIOD_SIZE(
-					period_size,
-					rate,
-					dac33->burst_rate) + 9;
-			if (dac33->uthr > MODE7_UTHR)
-				dac33->uthr = MODE7_UTHR;
-			if (dac33->uthr < (MODE7_LTHR + 10))
-				dac33->uthr = (MODE7_LTHR + 10);
-		}
+		dac33->uthr = UTHR_FROM_PERIOD_SIZE(period_size, rate,
+						    dac33->burst_rate) + 9;
+		if (dac33->uthr > (dac33->fifo_size - DAC33_MODE7_MARGIN))
+			dac33->uthr = dac33->fifo_size - DAC33_MODE7_MARGIN;
+		if (dac33->uthr < (DAC33_MODE7_MARGIN + 10))
+			dac33->uthr = (DAC33_MODE7_MARGIN + 10);
+
 		dac33->mode7_us_to_lthr =
 				SAMPLES_TO_US(substream->runtime->rate,
-					dac33->uthr - MODE7_LTHR + 1);
+					dac33->uthr - DAC33_MODE7_MARGIN + 1);
 		dac33->t_stamp1 = 0;
 		break;
 	default:
@@ -1255,8 +1239,8 @@
 			samples += (samples_in - samples_out);
 
 			if (likely(samples > 0))
-				delay = samples > DAC33_BUFFER_SIZE_SAMPLES ?
-					DAC33_BUFFER_SIZE_SAMPLES : samples;
+				delay = samples > dac33->fifo_size ?
+					dac33->fifo_size : samples;
 			else
 				delay = 0;
 		}
@@ -1308,7 +1292,7 @@
 			samples_in = US_TO_SAMPLES(
 					dac33->burst_rate,
 					time_delta);
-			delay = MODE7_LTHR + samples_in - samples_out;
+			delay = DAC33_MODE7_MARGIN + samples_in - samples_out;
 
 			if (unlikely(delay > uthr))
 				delay = uthr;
@@ -1415,7 +1399,7 @@
 
 	codec->control_data = dac33->control_data;
 	codec->hw_write = (hw_write_t) i2c_master_send;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 	dac33->codec = codec;
 
 	/* Read the tlv320dac33 ID registers */
@@ -1459,14 +1443,10 @@
 	snd_soc_add_controls(codec, dac33_snd_controls,
 			     ARRAY_SIZE(dac33_snd_controls));
 	/* Only add the FIFO controls, if we have valid IRQ number */
-	if (dac33->irq >= 0) {
+	if (dac33->irq >= 0)
 		snd_soc_add_controls(codec, dac33_mode_snd_controls,
 				     ARRAY_SIZE(dac33_mode_snd_controls));
-		/* FIFO usage controls only, if autoio config is not selected */
-		if (!dac33->auto_fifo_config)
-			snd_soc_add_controls(codec, dac33_fifo_snd_controls,
-					ARRAY_SIZE(dac33_fifo_snd_controls));
-	}
+
 	dac33_add_widgets(codec);
 
 err_power:
@@ -1515,7 +1495,7 @@
 
 #define DAC33_RATES	(SNDRV_PCM_RATE_44100 | \
 			 SNDRV_PCM_RATE_48000)
-#define DAC33_FORMATS	SNDRV_PCM_FMTBIT_S16_LE
+#define DAC33_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static struct snd_soc_dai_ops dac33_dai_ops = {
 	.startup	= dac33_startup,
@@ -1563,17 +1543,11 @@
 
 	dac33->power_gpio = pdata->power_gpio;
 	dac33->burst_bclkdiv = pdata->burst_bclkdiv;
-	/* Pre calculate the burst rate */
-	dac33->burst_rate = BURST_BASEFREQ_HZ / dac33->burst_bclkdiv / 32;
 	dac33->keep_bclk = pdata->keep_bclk;
-	dac33->auto_fifo_config = pdata->auto_fifo_config;
 	dac33->mode1_latency = pdata->mode1_latency;
 	if (!dac33->mode1_latency)
 		dac33->mode1_latency = 10000; /* 10ms */
 	dac33->irq = client->irq;
-	dac33->nsample = NSAMPLE_MAX;
-	dac33->nsample_max = NSAMPLE_MAX;
-	dac33->uthr = MODE7_UTHR;
 	/* Disable FIFO use by default */
 	dac33->fifo_mode = DAC33_FIFO_BYPASS;
 
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index d2c2430..1f1ac81 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <sound/tpa6130a2-plat.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 
 #include "tpa6130a2.h"
@@ -42,7 +41,7 @@
 	unsigned char regs[TPA6130A2_CACHEREGNUM];
 	struct regulator *supply;
 	int power_gpio;
-	unsigned char power_state;
+	u8 power_state:1;
 	enum tpa_model id;
 };
 
@@ -117,7 +116,7 @@
 	return ret;
 }
 
-static int tpa6130a2_power(int power)
+static int tpa6130a2_power(u8 power)
 {
 	struct	tpa6130a2_data *data;
 	u8	val;
@@ -127,17 +126,19 @@
 	data = i2c_get_clientdata(tpa6130a2_client);
 
 	mutex_lock(&data->mutex);
-	if (power && !data->power_state) {
-		/* Power on */
-		if (data->power_gpio >= 0)
-			gpio_set_value(data->power_gpio, 1);
+	if (power == data->power_state)
+		goto exit;
 
+	if (power) {
 		ret = regulator_enable(data->supply);
 		if (ret != 0) {
 			dev_err(&tpa6130a2_client->dev,
 				"Failed to enable supply: %d\n", ret);
 			goto exit;
 		}
+		/* Power on */
+		if (data->power_gpio >= 0)
+			gpio_set_value(data->power_gpio, 1);
 
 		data->power_state = 1;
 		ret = tpa6130a2_initialize();
@@ -150,12 +151,7 @@
 			data->power_state = 0;
 			goto exit;
 		}
-
-		/* Clear SWS */
-		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
-		val &= ~TPA6130A2_SWS;
-		tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
-	} else if (!power && data->power_state) {
+	} else {
 		/* set SWS */
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val |= TPA6130A2_SWS;
@@ -300,6 +296,7 @@
 		/* Enable amplifier */
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val |= channel;
+		val &= ~TPA6130A2_SWS;
 		tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
 
 		/* Unmute channel */
@@ -320,72 +317,24 @@
 	}
 }
 
-static int tpa6130a2_left_event(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_L, 1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_L, 0);
-		break;
-	}
-	return 0;
-}
-
-static int tpa6130a2_right_event(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R, 1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R, 0);
-		break;
-	}
-	return 0;
-}
-
-static int tpa6130a2_supply_event(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
+int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable)
 {
 	int ret = 0;
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
+	if (enable) {
 		ret = tpa6130a2_power(1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
+		if (ret < 0)
+			return ret;
+		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L,
+					 1);
+	} else {
+		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L,
+					 0);
 		ret = tpa6130a2_power(0);
-		break;
 	}
+
 	return ret;
 }
-
-static const struct snd_soc_dapm_widget tpa6130a2_dapm_widgets[] = {
-	SND_SOC_DAPM_PGA_E("TPA6130A2 Left", SND_SOC_NOPM,
-			0, 0, NULL, 0, tpa6130a2_left_event,
-			SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("TPA6130A2 Right", SND_SOC_NOPM,
-			0, 0, NULL, 0, tpa6130a2_right_event,
-			SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_SUPPLY("TPA6130A2 Enable", SND_SOC_NOPM,
-			0, 0, tpa6130a2_supply_event,
-			SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
-	/* Outputs */
-	SND_SOC_DAPM_OUTPUT("TPA6130A2 Headphone Left"),
-	SND_SOC_DAPM_OUTPUT("TPA6130A2 Headphone Right"),
-};
-
-static const struct snd_soc_dapm_route audio_map[] = {
-	{"TPA6130A2 Headphone Left", NULL, "TPA6130A2 Left"},
-	{"TPA6130A2 Headphone Right", NULL, "TPA6130A2 Right"},
-
-	{"TPA6130A2 Headphone Left", NULL, "TPA6130A2 Enable"},
-	{"TPA6130A2 Headphone Right", NULL, "TPA6130A2 Enable"},
-};
+EXPORT_SYMBOL_GPL(tpa6130a2_stereo_enable);
 
 int tpa6130a2_add_controls(struct snd_soc_codec *codec)
 {
@@ -396,18 +345,12 @@
 
 	data = i2c_get_clientdata(tpa6130a2_client);
 
-	snd_soc_dapm_new_controls(codec, tpa6130a2_dapm_widgets,
-				ARRAY_SIZE(tpa6130a2_dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-
 	if (data->id == TPA6140A2)
 		return snd_soc_add_controls(codec, tpa6140a2_controls,
 						ARRAY_SIZE(tpa6140a2_controls));
 	else
 		return snd_soc_add_controls(codec, tpa6130a2_controls,
 						ARRAY_SIZE(tpa6130a2_controls));
-
 }
 EXPORT_SYMBOL_GPL(tpa6130a2_add_controls);
 
diff --git a/sound/soc/codecs/tpa6130a2.h b/sound/soc/codecs/tpa6130a2.h
index 57e867f..5df49c8 100644
--- a/sound/soc/codecs/tpa6130a2.h
+++ b/sound/soc/codecs/tpa6130a2.h
@@ -57,5 +57,6 @@
 #define TPA6130A2_VERSION_MASK		(0x0f)
 
 extern int tpa6130a2_add_controls(struct snd_soc_codec *codec);
+extern int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable);
 
 #endif /* __TPA6130A2_H__ */
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index cbebec6..e4d464b 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -32,7 +32,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -233,6 +232,16 @@
 	return 0;
 }
 
+static inline void twl4030_wait_ms(int time)
+{
+	if (time < 60) {
+		time *= 1000;
+		usleep_range(time, time + 500);
+	} else {
+		msleep(time);
+	}
+}
+
 static void twl4030_codec_enable(struct snd_soc_codec *codec, int enable)
 {
 	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
@@ -338,10 +347,14 @@
 	twl4030_write(codec, TWL4030_REG_ANAMICL,
 		reg | TWL4030_CNCL_OFFSET_START);
 
-	/* wait for offset cancellation to complete */
+	/*
+	 * Wait for offset cancellation to complete.
+	 * Since this takes a while, do not slam the i2c.
+	 * Start polling the status after ~20ms.
+	 */
+	msleep(20);
 	do {
-		/* this takes a little while, so don't slam i2c */
-		udelay(2000);
+		usleep_range(1000, 2000);
 		twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte,
 				    TWL4030_REG_ANAMICL);
 	} while ((i++ < 100) &&
@@ -725,9 +738,12 @@
 	/* Base values for ramp delay calculation: 2^19 - 2^26 */
 	unsigned int ramp_base[] = {524288, 1048576, 2097152, 4194304,
 				    8388608, 16777216, 33554432, 67108864};
+	unsigned int delay;
 
 	hs_gain = twl4030_read_reg_cache(codec, TWL4030_REG_HS_GAIN_SET);
 	hs_pop = twl4030_read_reg_cache(codec, TWL4030_REG_HS_POPN_SET);
+	delay = (ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
+		twl4030->sysclk) + 1;
 
 	/* Enable external mute control, this dramatically reduces
 	 * the pop-noise */
@@ -751,16 +767,14 @@
 		hs_pop |= TWL4030_RAMP_EN;
 		twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
 		/* Wait ramp delay time + 1, so the VMID can settle */
-		mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
-			twl4030->sysclk) + 1);
+		twl4030_wait_ms(delay);
 	} else {
 		/* Headset ramp-down _not_ according to
 		 * the TRM, but in a way that it is working */
 		hs_pop &= ~TWL4030_RAMP_EN;
 		twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
 		/* Wait ramp delay time + 1, so the VMID can settle */
-		mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
-			twl4030->sysclk) + 1);
+		twl4030_wait_ms(delay);
 		/* Bypass the reg_cache to mute the headset */
 		twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
 					hs_gain & (~0x0f),
@@ -835,7 +849,7 @@
 	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec);
 
 	if (twl4030->digimic_delay)
-		mdelay(twl4030->digimic_delay);
+		twl4030_wait_ms(twl4030->digimic_delay);
 	return 0;
 }
 
@@ -1621,10 +1635,11 @@
 
 static int twl4030_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, twl4030_dapm_widgets,
-				 ARRAY_SIZE(twl4030_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, twl4030_dapm_widgets,
+				 ARRAY_SIZE(twl4030_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -1638,14 +1653,14 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			twl4030_codec_enable(codec, 1);
 		break;
 	case SND_SOC_BIAS_OFF:
 		twl4030_codec_enable(codec, 0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1709,6 +1724,7 @@
 	struct snd_soc_codec *codec = rtd->codec;
 	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
 
+	snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24);
 	if (twl4030->master_substream) {
 		twl4030->slave_substream = substream;
 		/* The DAI has one configuration for playback and capture, so
@@ -1833,7 +1849,7 @@
 	case SNDRV_PCM_FORMAT_S16_LE:
 		format |= TWL4030_DATA_WIDTH_16S_16W;
 		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S32_LE:
 		format |= TWL4030_DATA_WIDTH_32S_24W;
 		break;
 	default:
@@ -2166,7 +2182,7 @@
 }
 
 #define TWL4030_RATES	 (SNDRV_PCM_RATE_8000_48000)
-#define TWL4030_FORMATS	 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE)
+#define TWL4030_FORMATS	 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static struct snd_soc_dai_ops twl4030_dai_hifi_ops = {
 	.startup	= twl4030_startup,
@@ -2245,7 +2261,7 @@
 	snd_soc_codec_set_drvdata(codec, twl4030);
 	/* Set the defaults, and power up the codec */
 	twl4030->sysclk = twl4030_codec_get_mclk() / 1000;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	twl4030_init_chip(codec);
 
@@ -2257,9 +2273,12 @@
 
 static int twl4030_soc_remove(struct snd_soc_codec *codec)
 {
+	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
+
 	/* Reset registers to their chip default before leaving */
 	twl4030_reset_registers(codec);
 	twl4030_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	kfree(twl4030);
 	return 0;
 }
 
@@ -2291,10 +2310,7 @@
 
 static int __devexit twl4030_codec_remove(struct platform_device *pdev)
 {
-	struct twl4030_priv *twl4030 = dev_get_drvdata(&pdev->dev);
-
 	snd_soc_unregister_codec(&pdev->dev);
-	kfree(twl4030);
 	return 0;
 }
 
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 10f6e52..4bbf1b1 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -34,14 +34,46 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
 #include "twl6040.h"
 
-#define TWL6040_RATES	 (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-#define TWL6040_FORMATS	 (SNDRV_PCM_FMTBIT_S32_LE)
+#define TWL6040_RATES		SNDRV_PCM_RATE_8000_96000
+#define TWL6040_FORMATS	(SNDRV_PCM_FMTBIT_S32_LE)
+
+#define TWL6040_OUTHS_0dB 0x00
+#define TWL6040_OUTHS_M30dB 0x0F
+#define TWL6040_OUTHF_0dB 0x03
+#define TWL6040_OUTHF_M52dB 0x1D
+
+#define TWL6040_RAMP_NONE	0
+#define TWL6040_RAMP_UP		1
+#define TWL6040_RAMP_DOWN	2
+
+#define TWL6040_HSL_VOL_MASK	0x0F
+#define TWL6040_HSL_VOL_SHIFT	0
+#define TWL6040_HSR_VOL_MASK	0xF0
+#define TWL6040_HSR_VOL_SHIFT	4
+#define TWL6040_HF_VOL_MASK	0x1F
+#define TWL6040_HF_VOL_SHIFT	0
+
+struct twl6040_output {
+	u16 active;
+	u16 left_vol;
+	u16 right_vol;
+	u16 left_step;
+	u16 right_step;
+	unsigned int step_delay;
+	u16 ramp;
+	u16 mute;
+	struct completion ramp_done;
+};
+
+struct twl6040_jack_data {
+	struct snd_soc_jack *jack;
+	int report;
+};
 
 /* codec private data */
 struct twl6040_data {
@@ -53,6 +85,17 @@
 	unsigned int sysclk;
 	struct snd_pcm_hw_constraint_list *sysclk_constraints;
 	struct completion ready;
+	struct twl6040_jack_data hs_jack;
+	struct snd_soc_codec *codec;
+	struct workqueue_struct *workqueue;
+	struct delayed_work delayed_work;
+	struct mutex mutex;
+	struct twl6040_output headset;
+	struct twl6040_output handsfree;
+	struct workqueue_struct *hf_workqueue;
+	struct workqueue_struct *hs_workqueue;
+	struct delayed_work hs_delayed_work;
+	struct delayed_work hf_delayed_work;
 };
 
 /*
@@ -201,7 +244,7 @@
 	if (reg >= TWL6040_CACHEREGNUM)
 		return -EIO;
 
-	twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &value, reg);
+	twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &value, reg);
 	twl6040_write_reg_cache(codec, reg, value);
 
 	return value;
@@ -217,7 +260,7 @@
 		return -EIO;
 
 	twl6040_write_reg_cache(codec, reg, value);
-	return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value, reg);
+	return twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, value, reg);
 }
 
 static void twl6040_init_vio_regs(struct snd_soc_codec *codec)
@@ -254,6 +297,305 @@
 	}
 }
 
+/*
+ * Ramp HS PGA volume to minimise pops at stream startup and shutdown.
+ */
+static inline int twl6040_hs_ramp_step(struct snd_soc_codec *codec,
+			unsigned int left_step, unsigned int right_step)
+{
+
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *headset = &priv->headset;
+	int left_complete = 0, right_complete = 0;
+	u8 reg, val;
+
+	/* left channel */
+	left_step = (left_step > 0xF) ? 0xF : left_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN);
+	val = (~reg & TWL6040_HSL_VOL_MASK);
+
+	if (headset->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < headset->left_vol) {
+			val += left_step;
+			reg &= ~TWL6040_HSL_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN,
+					(reg | (~val & TWL6040_HSL_VOL_MASK)));
+		} else {
+			left_complete = 1;
+		}
+	} else if (headset->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0x0) {
+			val -= left_step;
+			reg &= ~TWL6040_HSL_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN, reg |
+						(~val & TWL6040_HSL_VOL_MASK));
+		} else {
+			left_complete = 1;
+		}
+	}
+
+	/* right channel */
+	right_step = (right_step > 0xF) ? 0xF : right_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN);
+	val = (~reg & TWL6040_HSR_VOL_MASK) >> TWL6040_HSR_VOL_SHIFT;
+
+	if (headset->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < headset->right_vol) {
+			val += right_step;
+			reg &= ~TWL6040_HSR_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN,
+				(reg | (~val << TWL6040_HSR_VOL_SHIFT)));
+		} else {
+			right_complete = 1;
+		}
+	} else if (headset->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0x0) {
+			val -= right_step;
+			reg &= ~TWL6040_HSR_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN,
+					 reg | (~val << TWL6040_HSR_VOL_SHIFT));
+		} else {
+			right_complete = 1;
+		}
+	}
+
+	return left_complete & right_complete;
+}
+
+/*
+ * Ramp HF PGA volume to minimise pops at stream startup and shutdown.
+ */
+static inline int twl6040_hf_ramp_step(struct snd_soc_codec *codec,
+			unsigned int left_step, unsigned int right_step)
+{
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *handsfree = &priv->handsfree;
+	int left_complete = 0, right_complete = 0;
+	u16 reg, val;
+
+	/* left channel */
+	left_step = (left_step > 0x1D) ? 0x1D : left_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFLGAIN);
+	reg = 0x1D - reg;
+	val = (reg & TWL6040_HF_VOL_MASK);
+	if (handsfree->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < handsfree->left_vol) {
+			val += left_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFLGAIN,
+						reg | (0x1D - val));
+		} else {
+			left_complete = 1;
+		}
+	} else if (handsfree->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0) {
+			val -= left_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFLGAIN,
+						reg | (0x1D - val));
+		} else {
+			left_complete = 1;
+		}
+	}
+
+	/* right channel */
+	right_step = (right_step > 0x1D) ? 0x1D : right_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFRGAIN);
+	reg = 0x1D - reg;
+	val = (reg & TWL6040_HF_VOL_MASK);
+	if (handsfree->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < handsfree->right_vol) {
+			val += right_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFRGAIN,
+						reg | (0x1D - val));
+		} else {
+			right_complete = 1;
+		}
+	} else if (handsfree->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0) {
+			val -= right_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFRGAIN,
+						reg | (0x1D - val));
+		}
+	}
+
+	return left_complete & right_complete;
+}
+
+/*
+ * This work ramps both output PGAs at stream start/stop time to
+ * minimise pop associated with DAPM power switching.
+ */
+static void twl6040_pga_hs_work(struct work_struct *work)
+{
+	struct twl6040_data *priv =
+		container_of(work, struct twl6040_data, hs_delayed_work.work);
+	struct snd_soc_codec *codec = priv->codec;
+	struct twl6040_output *headset = &priv->headset;
+	unsigned int delay = headset->step_delay;
+	int i, headset_complete;
+
+	/* do we need to ramp at all ? */
+	if (headset->ramp == TWL6040_RAMP_NONE)
+		return;
+
+	/* HS PGA volumes have 4 bits of resolution to ramp */
+	for (i = 0; i <= 16; i++) {
+		headset_complete = 1;
+		if (headset->ramp != TWL6040_RAMP_NONE)
+			headset_complete = twl6040_hs_ramp_step(codec,
+							headset->left_step,
+							headset->right_step);
+
+		/* ramp finished ? */
+		if (headset_complete)
+			break;
+
+		/*
+		 * TODO: tune: delay is longer over 0dB
+		 * as increases are larger.
+		 */
+		if (i >= 8)
+			schedule_timeout_interruptible(msecs_to_jiffies(delay +
+							(delay >> 1)));
+		else
+			schedule_timeout_interruptible(msecs_to_jiffies(delay));
+	}
+
+	if (headset->ramp == TWL6040_RAMP_DOWN) {
+		headset->active = 0;
+		complete(&headset->ramp_done);
+	} else {
+		headset->active = 1;
+	}
+	headset->ramp = TWL6040_RAMP_NONE;
+}
+
+static void twl6040_pga_hf_work(struct work_struct *work)
+{
+	struct twl6040_data *priv =
+		container_of(work, struct twl6040_data, hf_delayed_work.work);
+	struct snd_soc_codec *codec = priv->codec;
+	struct twl6040_output *handsfree = &priv->handsfree;
+	unsigned int delay = handsfree->step_delay;
+	int i, handsfree_complete;
+
+	/* do we need to ramp at all ? */
+	if (handsfree->ramp == TWL6040_RAMP_NONE)
+		return;
+
+	/* HF PGA volumes have 5 bits of resolution to ramp */
+	for (i = 0; i <= 32; i++) {
+		handsfree_complete = 1;
+		if (handsfree->ramp != TWL6040_RAMP_NONE)
+			handsfree_complete = twl6040_hf_ramp_step(codec,
+							handsfree->left_step,
+							handsfree->right_step);
+
+		/* ramp finished ? */
+		if (handsfree_complete)
+			break;
+
+		/*
+		 * TODO: tune: delay is longer over 0dB
+		 * as increases are larger.
+		 */
+		if (i >= 16)
+			schedule_timeout_interruptible(msecs_to_jiffies(delay +
+						       (delay >> 1)));
+		else
+			schedule_timeout_interruptible(msecs_to_jiffies(delay));
+	}
+
+
+	if (handsfree->ramp == TWL6040_RAMP_DOWN) {
+		handsfree->active = 0;
+		complete(&handsfree->ramp_done);
+	} else
+		handsfree->active = 1;
+	handsfree->ramp = TWL6040_RAMP_NONE;
+}
+
+static int pga_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out;
+	struct delayed_work *work;
+	struct workqueue_struct *queue;
+
+	switch (w->shift) {
+	case 2:
+	case 3:
+		out = &priv->headset;
+		work = &priv->hs_delayed_work;
+		queue = priv->hs_workqueue;
+		out->step_delay = 5;	/* 5 ms between volume ramp steps */
+		break;
+	case 4:
+		out = &priv->handsfree;
+		work = &priv->hf_delayed_work;
+		queue = priv->hf_workqueue;
+		out->step_delay = 5;	/* 5 ms between volume ramp steps */
+		if (SND_SOC_DAPM_EVENT_ON(event))
+			priv->non_lp++;
+		else
+			priv->non_lp--;
+		break;
+	default:
+		return -1;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (out->active)
+			break;
+
+		/* don't use volume ramp for power-up */
+		out->left_step = out->left_vol;
+		out->right_step = out->right_vol;
+
+		if (!delayed_work_pending(work)) {
+			out->ramp = TWL6040_RAMP_UP;
+			queue_delayed_work(queue, work,
+					msecs_to_jiffies(1));
+		}
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+		if (!out->active)
+			break;
+
+		if (!delayed_work_pending(work)) {
+			/* use volume ramp for power-down */
+			out->left_step = 1;
+			out->right_step = 1;
+			out->ramp = TWL6040_RAMP_DOWN;
+			INIT_COMPLETION(out->ramp_done);
+
+			queue_delayed_work(queue, work,
+					msecs_to_jiffies(1));
+
+			wait_for_completion_timeout(&out->ramp_done,
+					msecs_to_jiffies(2000));
+		}
+		break;
+	}
+
+	return 0;
+}
+
 /* twl6040 codec manual power-up sequence */
 static void twl6040_power_up(struct snd_soc_codec *codec)
 {
@@ -382,6 +724,47 @@
 	return 0;
 }
 
+void twl6040_hs_jack_report(struct snd_soc_codec *codec,
+				struct snd_soc_jack *jack, int report)
+{
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	int status;
+
+	mutex_lock(&priv->mutex);
+
+	/* Sync status */
+	status = twl6040_read_reg_volatile(codec, TWL6040_REG_STATUS);
+	if (status & TWL6040_PLUGCOMP)
+		snd_soc_jack_report(jack, report, report);
+	else
+		snd_soc_jack_report(jack, 0, report);
+
+	mutex_unlock(&priv->mutex);
+}
+
+void twl6040_hs_jack_detect(struct snd_soc_codec *codec,
+				struct snd_soc_jack *jack, int report)
+{
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_jack_data *hs_jack = &priv->hs_jack;
+
+	hs_jack->jack = jack;
+	hs_jack->report = report;
+
+	twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report);
+}
+EXPORT_SYMBOL_GPL(twl6040_hs_jack_detect);
+
+static void twl6040_accessory_work(struct work_struct *work)
+{
+	struct twl6040_data *priv = container_of(work,
+					struct twl6040_data, delayed_work.work);
+	struct snd_soc_codec *codec = priv->codec;
+	struct twl6040_jack_data *hs_jack = &priv->hs_jack;
+
+	twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report);
+}
+
 /* audio interrupt handler */
 static irqreturn_t twl6040_naudint_handler(int irq, void *data)
 {
@@ -389,33 +772,180 @@
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 	u8 intid;
 
-	twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID);
+	twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID);
 
-	switch (intid) {
-	case TWL6040_THINT:
+	if (intid & TWL6040_THINT)
 		dev_alert(codec->dev, "die temp over-limit detection\n");
-		break;
-	case TWL6040_PLUGINT:
-	case TWL6040_UNPLUGINT:
-	case TWL6040_HOOKINT:
-		break;
-	case TWL6040_HFINT:
+
+	if ((intid & TWL6040_PLUGINT) || (intid & TWL6040_UNPLUGINT))
+		queue_delayed_work(priv->workqueue, &priv->delayed_work,
+							msecs_to_jiffies(200));
+
+	if (intid & TWL6040_HOOKINT)
+		dev_info(codec->dev, "hook detection\n");
+
+	if (intid & TWL6040_HFINT)
 		dev_alert(codec->dev, "hf drivers over current detection\n");
-		break;
-	case TWL6040_VIBINT:
+
+	if (intid & TWL6040_VIBINT)
 		dev_alert(codec->dev, "vib drivers over current detection\n");
-		break;
-	case TWL6040_READYINT:
+
+	if (intid & TWL6040_READYINT)
 		complete(&priv->ready);
-		break;
-	default:
-		dev_err(codec->dev, "unknown audio interrupt %d\n", intid);
-		break;
-	}
 
 	return IRQ_HANDLED;
 }
 
+static int twl6040_put_volsw(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = NULL;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	int ret;
+	unsigned int reg = mc->reg;
+
+	/* For HS and HF we shadow the values and only actually write
+	 * them out when active in order to ensure the amplifier comes on
+	 * as quietly as possible. */
+	switch (reg) {
+	case TWL6040_REG_HSGAIN:
+		out = &twl6040_priv->headset;
+		break;
+	default:
+		break;
+	}
+
+	if (out) {
+		out->left_vol = ucontrol->value.integer.value[0];
+		out->right_vol = ucontrol->value.integer.value[1];
+		if (!out->active)
+			return 1;
+	}
+
+	ret = snd_soc_put_volsw(kcontrol, ucontrol);
+	if (ret < 0)
+		return ret;
+
+	return 1;
+}
+
+static int twl6040_get_volsw(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = &twl6040_priv->headset;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	unsigned int reg = mc->reg;
+
+	switch (reg) {
+	case TWL6040_REG_HSGAIN:
+		out = &twl6040_priv->headset;
+		ucontrol->value.integer.value[0] = out->left_vol;
+		ucontrol->value.integer.value[1] = out->right_vol;
+		return 0;
+
+	default:
+		break;
+	}
+
+	return snd_soc_get_volsw(kcontrol, ucontrol);
+}
+
+static int twl6040_put_volsw_2r_vu(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = NULL;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	int ret;
+	unsigned int reg = mc->reg;
+
+	/* For HS and HF we shadow the values and only actually write
+	 * them out when active in order to ensure the amplifier comes on
+	 * as quietly as possible. */
+	switch (reg) {
+	case TWL6040_REG_HFLGAIN:
+	case TWL6040_REG_HFRGAIN:
+		out = &twl6040_priv->handsfree;
+		break;
+	default:
+		break;
+	}
+
+	if (out) {
+		out->left_vol = ucontrol->value.integer.value[0];
+		out->right_vol = ucontrol->value.integer.value[1];
+		if (!out->active)
+			return 1;
+	}
+
+	ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
+	if (ret < 0)
+		return ret;
+
+	return 1;
+}
+
+static int twl6040_get_volsw_2r(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = &twl6040_priv->handsfree;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	unsigned int reg = mc->reg;
+
+	/* If these are cached registers use the cache */
+	switch (reg) {
+	case TWL6040_REG_HFLGAIN:
+	case TWL6040_REG_HFRGAIN:
+		out = &twl6040_priv->handsfree;
+		ucontrol->value.integer.value[0] = out->left_vol;
+		ucontrol->value.integer.value[1] = out->right_vol;
+		return 0;
+
+	default:
+		break;
+	}
+
+	return snd_soc_get_volsw_2r(kcontrol, ucontrol);
+}
+
+/* double control with volume update */
+#define SOC_TWL6040_DOUBLE_TLV(xname, xreg, shift_left, shift_right, xmax,\
+							xinvert, tlv_array)\
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
+		 SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+	.tlv.p = (tlv_array), \
+	.info = snd_soc_info_volsw, .get = twl6040_get_volsw, \
+	.put = twl6040_put_volsw, \
+	.private_value = (unsigned long)&(struct soc_mixer_control) \
+		{.reg = xreg, .shift = shift_left, .rshift = shift_right,\
+		 .max = xmax, .platform_max = xmax, .invert = xinvert} }
+
+/* double control with volume update */
+#define SOC_TWL6040_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax,\
+				xinvert, tlv_array)\
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+		SNDRV_CTL_ELEM_ACCESS_READWRITE | \
+		SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+	.tlv.p = (tlv_array), \
+	.info = snd_soc_info_volsw_2r, \
+	.get = twl6040_get_volsw_2r, .put = twl6040_put_volsw_2r_vu, \
+	.private_value = (unsigned long)&(struct soc_mixer_control) \
+		{.reg = reg_left, .rreg = reg_right, .shift = xshift, \
+		 .rshift = xshift, .max = xmax, .invert = xinvert}, }
+
 /*
  * MICATT volume control:
  * from -6 to 0 dB in 6 dB steps
@@ -424,9 +954,15 @@
 
 /*
  * MICGAIN volume control:
- * from 6 to 30 dB in 6 dB steps
+ * from -6 to 30 dB in 6 dB steps
  */
-static DECLARE_TLV_DB_SCALE(mic_amp_tlv, 600, 600, 0);
+static DECLARE_TLV_DB_SCALE(mic_amp_tlv, -600, 600, 0);
+
+/*
+ * AFMGAIN volume control:
+ * from 18 to 24 dB in 6 dB steps
+ */
+static DECLARE_TLV_DB_SCALE(afm_amp_tlv, 1800, 600, 0);
 
 /*
  * HSGAIN volume control:
@@ -455,8 +991,30 @@
 	{"Headset Mic", "Sub Mic", "Aux/FM Right", "Off"};
 
 static const struct soc_enum twl6040_enum[] = {
-	SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 3, twl6040_amicl_texts),
-	SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 3, twl6040_amicr_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 4, twl6040_amicl_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 4, twl6040_amicr_texts),
+};
+
+static const char *twl6040_hs_texts[] = {
+	"Off", "HS DAC", "Line-In amp"
+};
+
+static const struct soc_enum twl6040_hs_enum[] = {
+	SOC_ENUM_SINGLE(TWL6040_REG_HSLCTL, 5, ARRAY_SIZE(twl6040_hs_texts),
+			twl6040_hs_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_HSRCTL, 5, ARRAY_SIZE(twl6040_hs_texts),
+			twl6040_hs_texts),
+};
+
+static const char *twl6040_hf_texts[] = {
+	"Off", "HF DAC", "Line-In amp"
+};
+
+static const struct soc_enum twl6040_hf_enum[] = {
+	SOC_ENUM_SINGLE(TWL6040_REG_HFLCTL, 2, ARRAY_SIZE(twl6040_hf_texts),
+			twl6040_hf_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_HFRCTL, 2, ARRAY_SIZE(twl6040_hf_texts),
+			twl6040_hf_texts),
 };
 
 static const struct snd_kcontrol_new amicl_control =
@@ -466,18 +1024,18 @@
 	SOC_DAPM_ENUM("Route", twl6040_enum[1]);
 
 /* Headset DAC playback switches */
-static const struct snd_kcontrol_new hsdacl_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSLCTL, 5, 1, 0);
+static const struct snd_kcontrol_new hsl_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hs_enum[0]);
 
-static const struct snd_kcontrol_new hsdacr_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSRCTL, 5, 1, 0);
+static const struct snd_kcontrol_new hsr_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hs_enum[1]);
 
 /* Handsfree DAC playback switches */
-static const struct snd_kcontrol_new hfdacl_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFLCTL, 2, 1, 0);
+static const struct snd_kcontrol_new hfl_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hf_enum[0]);
 
-static const struct snd_kcontrol_new hfdacr_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFRCTL, 2, 1, 0);
+static const struct snd_kcontrol_new hfr_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hf_enum[1]);
 
 static const struct snd_kcontrol_new ep_driver_switch_controls =
 	SOC_DAPM_SINGLE("Switch", TWL6040_REG_EARCTL, 0, 1, 0);
@@ -489,10 +1047,14 @@
 	SOC_DOUBLE_TLV("Capture Volume",
 		TWL6040_REG_MICGAIN, 0, 3, 4, 0, mic_amp_tlv),
 
+	/* AFM gains */
+	SOC_DOUBLE_TLV("Aux FM Volume",
+		TWL6040_REG_LINEGAIN, 0, 4, 0xF, 0, afm_amp_tlv),
+
 	/* Playback gains */
-	SOC_DOUBLE_TLV("Headset Playback Volume",
+	SOC_TWL6040_DOUBLE_TLV("Headset Playback Volume",
 		TWL6040_REG_HSGAIN, 0, 4, 0xF, 1, hs_tlv),
-	SOC_DOUBLE_R_TLV("Handsfree Playback Volume",
+	SOC_TWL6040_DOUBLE_R_TLV("Handsfree Playback Volume",
 		TWL6040_REG_HFLGAIN, TWL6040_REG_HFRGAIN, 0, 0x1D, 1, hf_tlv),
 	SOC_SINGLE_TLV("Earphone Playback Volume",
 		TWL6040_REG_EARCTL, 1, 0xF, 1, ep_tlv),
@@ -525,6 +1087,12 @@
 	SND_SOC_DAPM_PGA("MicAmpR",
 			TWL6040_REG_MICRCTL, 0, 0, NULL, 0),
 
+	/* Auxiliary FM PGAs */
+	SND_SOC_DAPM_PGA("AFMAmpL",
+			TWL6040_REG_MICLCTL, 1, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("AFMAmpR",
+			TWL6040_REG_MICRCTL, 1, 0, NULL, 0),
+
 	/* ADCs */
 	SND_SOC_DAPM_ADC("ADC Left", "Left Front Capture",
 			TWL6040_REG_MICLCTL, 2, 0),
@@ -559,29 +1127,33 @@
 			twl6040_power_mode_event,
 			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
-	/* Analog playback switches */
-	SND_SOC_DAPM_SWITCH("HSDAC Left Playback",
-			SND_SOC_NOPM, 0, 0, &hsdacl_switch_controls),
-	SND_SOC_DAPM_SWITCH("HSDAC Right Playback",
-			SND_SOC_NOPM, 0, 0, &hsdacr_switch_controls),
-	SND_SOC_DAPM_SWITCH("HFDAC Left Playback",
-			SND_SOC_NOPM, 0, 0, &hfdacl_switch_controls),
-	SND_SOC_DAPM_SWITCH("HFDAC Right Playback",
-			SND_SOC_NOPM, 0, 0, &hfdacr_switch_controls),
+	SND_SOC_DAPM_MUX("HF Left Playback",
+			SND_SOC_NOPM, 0, 0, &hfl_mux_controls),
+	SND_SOC_DAPM_MUX("HF Right Playback",
+			SND_SOC_NOPM, 0, 0, &hfr_mux_controls),
+	/* Analog playback Muxes */
+	SND_SOC_DAPM_MUX("HS Left Playback",
+			SND_SOC_NOPM, 0, 0, &hsl_mux_controls),
+	SND_SOC_DAPM_MUX("HS Right Playback",
+			SND_SOC_NOPM, 0, 0, &hsr_mux_controls),
 
 	/* Analog playback drivers */
-	SND_SOC_DAPM_PGA_E("Handsfree Left Driver",
+	SND_SOC_DAPM_OUT_DRV_E("Handsfree Left Driver",
 			TWL6040_REG_HFLCTL, 4, 0, NULL, 0,
-			twl6040_power_mode_event,
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("Handsfree Right Driver",
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUT_DRV_E("Handsfree Right Driver",
 			TWL6040_REG_HFRCTL, 4, 0, NULL, 0,
-			twl6040_power_mode_event,
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA("Headset Left Driver",
-			TWL6040_REG_HSLCTL, 2, 0, NULL, 0),
-	SND_SOC_DAPM_PGA("Headset Right Driver",
-			TWL6040_REG_HSRCTL, 2, 0, NULL, 0),
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUT_DRV_E("Headset Left Driver",
+			TWL6040_REG_HSLCTL, 2, 0, NULL, 0,
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUT_DRV_E("Headset Right Driver",
+			TWL6040_REG_HSRCTL, 2, 0, NULL, 0,
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 	SND_SOC_DAPM_SWITCH_E("Earphone Driver",
 			SND_SOC_NOPM, 0, 0, &ep_driver_switch_controls,
 			twl6040_power_mode_event,
@@ -611,12 +1183,18 @@
 	{"ADC Left", NULL, "MicAmpL"},
 	{"ADC Right", NULL, "MicAmpR"},
 
-	/* Headset playback path */
-	{"HSDAC Left Playback", "Switch", "HSDAC Left"},
-	{"HSDAC Right Playback", "Switch", "HSDAC Right"},
+	/* AFM path */
+	{"AFMAmpL", "NULL", "AFML"},
+	{"AFMAmpR", "NULL", "AFMR"},
 
-	{"Headset Left Driver", NULL, "HSDAC Left Playback"},
-	{"Headset Right Driver", NULL, "HSDAC Right Playback"},
+	{"HS Left Playback", "HS DAC", "HSDAC Left"},
+	{"HS Left Playback", "Line-In amp", "AFMAmpL"},
+
+	{"HS Right Playback", "HS DAC", "HSDAC Right"},
+	{"HS Right Playback", "Line-In amp", "AFMAmpR"},
+
+	{"Headset Left Driver", "NULL", "HS Left Playback"},
+	{"Headset Right Driver", "NULL", "HS Right Playback"},
 
 	{"HSOL", NULL, "Headset Left Driver"},
 	{"HSOR", NULL, "Headset Right Driver"},
@@ -625,12 +1203,14 @@
 	{"Earphone Driver", "Switch", "HSDAC Left"},
 	{"EP", NULL, "Earphone Driver"},
 
-	/* Handsfree playback path */
-	{"HFDAC Left Playback", "Switch", "HFDAC Left"},
-	{"HFDAC Right Playback", "Switch", "HFDAC Right"},
+	{"HF Left Playback", "HF DAC", "HFDAC Left"},
+	{"HF Left Playback", "Line-In amp", "AFMAmpL"},
 
-	{"HFDAC Left PGA", NULL, "HFDAC Left Playback"},
-	{"HFDAC Right PGA", NULL, "HFDAC Right Playback"},
+	{"HF Right Playback", "HF DAC", "HFDAC Right"},
+	{"HF Right Playback", "Line-In amp", "AFMAmpR"},
+
+	{"HFDAC Left PGA", NULL, "HF Left Playback"},
+	{"HFDAC Right PGA", NULL, "HF Right Playback"},
 
 	{"Handsfree Left Driver", "Switch", "HFDAC Left PGA"},
 	{"Handsfree Right Driver", "Switch", "HFDAC Right PGA"},
@@ -641,12 +1221,12 @@
 
 static int twl6040_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, twl6040_dapm_widgets,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, twl6040_dapm_widgets,
 				 ARRAY_SIZE(twl6040_dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
-
-	snd_soc_dapm_new_widgets(codec);
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_widgets(dapm);
 
 	return 0;
 }
@@ -659,10 +1239,10 @@
 	u8 intid;
 
 	time_left = wait_for_completion_timeout(&priv->ready,
-				msecs_to_jiffies(48));
+				msecs_to_jiffies(144));
 
 	if (!time_left) {
-		twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid,
+		twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid,
 							TWL6040_REG_INTID);
 		if (!(intid & TWL6040_READYINT)) {
 			dev_err(codec->dev, "timeout waiting for READYINT\n");
@@ -713,6 +1293,15 @@
 
 		/* initialize vdd/vss registers with reg_cache */
 		twl6040_init_vdd_regs(codec);
+
+		/* Set external boost GPO */
+		twl6040_write(codec, TWL6040_REG_GPOCTL, 0x02);
+
+		/* Set initial minimal gain values */
+		twl6040_write(codec, TWL6040_REG_HSGAIN, 0xFF);
+		twl6040_write(codec, TWL6040_REG_EARCTL, 0x1E);
+		twl6040_write(codec, TWL6040_REG_HFLGAIN, 0x1D);
+		twl6040_write(codec, TWL6040_REG_HFRGAIN, 0x1D);
 		break;
 	case SND_SOC_BIAS_OFF:
 		if (!priv->codec_powered)
@@ -739,7 +1328,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -772,23 +1361,6 @@
 	struct snd_soc_codec *codec = rtd->codec;
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 
-	if (!priv->sysclk) {
-		dev_err(codec->dev,
-			"no mclk configured, call set_sysclk() on init\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * capture is not supported at 17.64 MHz,
-	 * it's reserved for headset low-power playback scenario
-	 */
-	if ((priv->sysclk == 17640000) && substream->stream) {
-		dev_err(codec->dev,
-			"capture mode is not supported at %dHz\n",
-			priv->sysclk);
-		return -EINVAL;
-	}
-
 	snd_pcm_hw_constraint_list(substream->runtime, 0,
 				SNDRV_PCM_HW_PARAM_RATE,
 				priv->sysclk_constraints);
@@ -814,10 +1386,17 @@
 
 	rate = params_rate(params);
 	switch (rate) {
+	case 11250:
+	case 22500:
+	case 44100:
 	case 88200:
 		lppllctl |= TWL6040_LPLLFIN;
 		priv->sysclk = 17640000;
 		break;
+	case 8000:
+	case 16000:
+	case 32000:
+	case 48000:
 	case 96000:
 		lppllctl &= ~TWL6040_LPLLFIN;
 		priv->sysclk = 19200000;
@@ -832,31 +1411,37 @@
 	return 0;
 }
 
-static int twl6040_trigger(struct snd_pcm_substream *substream,
-			int cmd, struct snd_soc_dai *dai)
+static int twl6040_prepare(struct snd_pcm_substream *substream,
+			struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_codec *codec = rtd->codec;
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-		/*
-		 * low-power playback mode is restricted
-		 * for headset path only
-		 */
-		if ((priv->sysclk == 17640000) && priv->non_lp) {
+	if (!priv->sysclk) {
+		dev_err(codec->dev,
+			"no mclk configured, call set_sysclk() on init\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * capture is not supported at 17.64 MHz,
+	 * it's reserved for headset low-power playback scenario
+	 */
+	if ((priv->sysclk == 17640000) &&
+			substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		dev_err(codec->dev,
+			"capture mode is not supported at %dHz\n",
+			priv->sysclk);
+		return -EINVAL;
+	}
+
+	if ((priv->sysclk == 17640000) && priv->non_lp) {
 			dev_err(codec->dev,
 				"some enabled paths aren't supported at %dHz\n",
 				priv->sysclk);
 			return -EPERM;
-		}
-		break;
-	default:
-		break;
 	}
-
 	return 0;
 }
 
@@ -970,7 +1555,7 @@
 static struct snd_soc_dai_ops twl6040_dai_ops = {
 	.startup	= twl6040_startup,
 	.hw_params	= twl6040_hw_params,
-	.trigger	= twl6040_trigger,
+	.prepare	= twl6040_prepare,
 	.set_sysclk	= twl6040_set_dai_sysclk,
 };
 
@@ -1004,6 +1589,7 @@
 static int twl6040_resume(struct snd_soc_codec *codec)
 {
 	twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	twl6040_set_bias_level(codec, codec->dapm.suspend_bias_level);
 
 	return 0;
 }
@@ -1018,24 +1604,41 @@
 	struct twl6040_data *priv;
 	int audpwron, naudint;
 	int ret = 0;
+	u8 icrev, intmr = TWL6040_ALLINT_MSK;
 
 	priv = kzalloc(sizeof(struct twl6040_data), GFP_KERNEL);
 	if (priv == NULL)
 		return -ENOMEM;
 	snd_soc_codec_set_drvdata(codec, priv);
 
-	if (twl_codec) {
+	priv->codec = codec;
+
+	twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &icrev, TWL6040_REG_ASICREV);
+
+	if (twl_codec && (icrev > 0))
 		audpwron = twl_codec->audpwron_gpio;
-		naudint = twl_codec->naudint_irq;
-	} else {
+	else
 		audpwron = -EINVAL;
+
+	if (twl_codec)
+		naudint = twl_codec->naudint_irq;
+	else
 		naudint = 0;
-	}
 
 	priv->audpwron = audpwron;
 	priv->naudint = naudint;
+	priv->workqueue = create_singlethread_workqueue("twl6040-codec");
+
+	if (!priv->workqueue)
+		goto work_err;
+
+	INIT_DELAYED_WORK(&priv->delayed_work, twl6040_accessory_work);
+
+	mutex_init(&priv->mutex);
 
 	init_completion(&priv->ready);
+	init_completion(&priv->headset.ramp_done);
+	init_completion(&priv->handsfree.ramp_done);
 
 	if (gpio_is_valid(audpwron)) {
 		ret = gpio_request(audpwron, "audpwron");
@@ -1047,7 +1650,14 @@
 			goto gpio2_err;
 
 		priv->codec_powered = 0;
+
+		/* enable only codec ready interrupt */
+		intmr &= ~(TWL6040_READYMSK | TWL6040_PLUGMSK);
+
+		/* reset interrupt status to allow correct power up sequence */
+		twl6040_read_reg_volatile(codec, TWL6040_REG_INTID);
 	}
+	twl6040_write(codec, TWL6040_REG_INTMR, intmr);
 
 	if (naudint) {
 		/* audio interrupt */
@@ -1057,25 +1667,29 @@
 				"twl6040_codec", codec);
 		if (ret)
 			goto gpio2_err;
-	} else {
-		if (gpio_is_valid(audpwron)) {
-			/* enable only codec ready interrupt */
-			twl6040_write_reg_cache(codec, TWL6040_REG_INTMR,
-					~TWL6040_READYMSK & TWL6040_ALLINT_MSK);
-		} else {
-			/* no interrupts at all */
-			twl6040_write_reg_cache(codec, TWL6040_REG_INTMR,
-						TWL6040_ALLINT_MSK);
-		}
 	}
 
 	/* init vio registers */
 	twl6040_init_vio_regs(codec);
 
+	priv->hf_workqueue = create_singlethread_workqueue("twl6040-hf");
+	if (priv->hf_workqueue == NULL) {
+		ret = -ENOMEM;
+		goto irq_err;
+	}
+	priv->hs_workqueue = create_singlethread_workqueue("twl6040-hs");
+	if (priv->hs_workqueue == NULL) {
+		ret = -ENOMEM;
+		goto wq_err;
+	}
+
+	INIT_DELAYED_WORK(&priv->hs_delayed_work, twl6040_pga_hs_work);
+	INIT_DELAYED_WORK(&priv->hf_delayed_work, twl6040_pga_hf_work);
+
 	/* power on device */
 	ret = twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 	if (ret)
-		goto irq_err;
+		goto bias_err;
 
 	snd_soc_add_controls(codec, twl6040_snd_controls,
 				ARRAY_SIZE(twl6040_snd_controls));
@@ -1083,6 +1697,10 @@
 
 	return 0;
 
+bias_err:
+	destroy_workqueue(priv->hs_workqueue);
+wq_err:
+	destroy_workqueue(priv->hf_workqueue);
 irq_err:
 	if (naudint)
 		free_irq(naudint, codec);
@@ -1090,6 +1708,8 @@
 	if (gpio_is_valid(audpwron))
 		gpio_free(audpwron);
 gpio1_err:
+	destroy_workqueue(priv->workqueue);
+work_err:
 	kfree(priv);
 	return ret;
 }
@@ -1108,6 +1728,9 @@
 	if (naudint)
 		free_irq(naudint, codec);
 
+	destroy_workqueue(priv->workqueue);
+	destroy_workqueue(priv->hf_workqueue);
+	destroy_workqueue(priv->hs_workqueue);
 	kfree(priv);
 
 	return 0;
diff --git a/sound/soc/codecs/twl6040.h b/sound/soc/codecs/twl6040.h
index f7c77fa..23aeed0 100644
--- a/sound/soc/codecs/twl6040.h
+++ b/sound/soc/codecs/twl6040.h
@@ -79,6 +79,7 @@
 
 /* INTMR (0x04) fields */
 
+#define TWL6040_PLUGMSK			0x02
 #define TWL6040_READYMSK		0x40
 #define TWL6040_ALLINT_MSK		0x7B
 
@@ -135,4 +136,11 @@
 #define TWL6040_HPPLL_ID		1
 #define TWL6040_LPPLL_ID		2
 
+/* STATUS (0x2E) fields */
+
+#define TWL6040_PLUGCOMP		0x02
+
+void twl6040_hs_jack_detect(struct snd_soc_codec *codec,
+			    struct snd_soc_jack *jack, int report);
+
 #endif /* End of __TWL6040_H__ */
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 464f0cf..e76847a 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -19,7 +19,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include <sound/uda134x.h>
@@ -389,7 +388,7 @@
 			pd->power(0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 0c6c725..c5ca8cf 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -27,7 +27,6 @@
 #include <sound/control.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/uda1380.h>
 
@@ -36,7 +35,6 @@
 /* codec private data */
 struct uda1380_priv {
 	struct snd_soc_codec *codec;
-	u16 reg_cache[UDA1380_CACHEREGNUM];
 	unsigned int dac_clk;
 	struct work_struct work;
 	void *control_data;
@@ -414,10 +412,11 @@
 
 static int uda1380_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets,
-				  ARRAY_SIZE(uda1380_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
+				  ARRAY_SIZE(uda1380_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -603,7 +602,7 @@
 	int reg;
 	struct uda1380_platform_data *pdata = codec->dev->platform_data;
 
-	if (codec->bias_level == level)
+	if (codec->dapm.bias_level == level)
 		return 0;
 
 	switch (level) {
@@ -613,7 +612,7 @@
 		uda1380_write(codec, UDA1380_PM, R02_PON_BIAS | pm);
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			if (gpio_is_valid(pdata->gpio_power)) {
 				gpio_set_value(pdata->gpio_power, 1);
 				mdelay(1);
@@ -636,7 +635,7 @@
 		for (reg = UDA1380_MVOL; reg < UDA1380_CACHEREGNUM; reg++)
 			set_bit(reg - 0x10, &uda1380_cache_dirty);
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index 0c47c78..861b28f 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -25,8 +25,7 @@
 #include <linux/slab.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dai.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 
 #include "wl1273.h"
@@ -43,7 +42,7 @@
 static int snd_wl1273_fm_set_i2s_mode(struct wl1273_core *core,
 				      int rate, int width)
 {
-	struct device *dev = &core->i2c_dev->dev;
+	struct device *dev = &core->client->dev;
 	int r = 0;
 	u16 mode;
 
@@ -124,13 +123,13 @@
 	dev_dbg(dev, "mode: 0x%04x\n", mode);
 
 	if (core->i2s_mode != mode) {
-		r = wl1273_fm_write_cmd(core, WL1273_I2S_MODE_CONFIG_SET, mode);
+		r = core->write(core, WL1273_I2S_MODE_CONFIG_SET, mode);
 		if (r)
 			goto out;
 
 		core->i2s_mode = mode;
-		r = wl1273_fm_write_cmd(core, WL1273_AUDIO_ENABLE,
-					WL1273_AUDIO_ENABLE_I2S);
+		r = core->write(core, WL1273_AUDIO_ENABLE,
+				WL1273_AUDIO_ENABLE_I2S);
 		if (r)
 			goto out;
 	}
@@ -143,8 +142,7 @@
 static int snd_wl1273_fm_set_channel_number(struct wl1273_core *core,
 					    int channel_number)
 {
-	struct i2c_client *client = core->i2c_dev;
-	struct device *dev = &client->dev;
+	struct device *dev = &core->client->dev;
 	int r = 0;
 
 	dev_dbg(dev, "%s\n", __func__);
@@ -155,17 +153,13 @@
 		goto out;
 
 	if (channel_number == 1 && core->mode == WL1273_MODE_RX)
-		r = wl1273_fm_write_cmd(core, WL1273_MOST_MODE_SET,
-					WL1273_RX_MONO);
+		r = core->write(core, WL1273_MOST_MODE_SET, WL1273_RX_MONO);
 	else if (channel_number == 1 && core->mode == WL1273_MODE_TX)
-		r = wl1273_fm_write_cmd(core, WL1273_MONO_SET,
-					WL1273_TX_MONO);
+		r = core->write(core, WL1273_MONO_SET, WL1273_TX_MONO);
 	else if (channel_number == 2 && core->mode == WL1273_MODE_RX)
-		r = wl1273_fm_write_cmd(core, WL1273_MOST_MODE_SET,
-					WL1273_RX_STEREO);
+		r = core->write(core, WL1273_MOST_MODE_SET, WL1273_RX_STEREO);
 	else if (channel_number == 2 && core->mode == WL1273_MODE_TX)
-		r = wl1273_fm_write_cmd(core, WL1273_MONO_SET,
-					WL1273_TX_STEREO);
+		r = core->write(core, WL1273_MONO_SET, WL1273_TX_STEREO);
 	else
 		r = -EINVAL;
 out:
@@ -238,7 +232,7 @@
 	if (wl1273->core->audio_mode == val)
 		return 0;
 
-	r = wl1273_fm_set_audio(wl1273->core, val);
+	r = wl1273->core->set_audio(wl1273->core, val);
 	if (r < 0)
 		return r;
 
@@ -273,8 +267,8 @@
 
 	dev_dbg(codec->dev, "%s: enter.\n", __func__);
 
-	r = wl1273_fm_set_volume(wl1273->core,
-				 ucontrol->value.integer.value[0]);
+	r = wl1273->core->set_volume(wl1273->core,
+				     ucontrol->value.integer.value[0]);
 	if (r)
 		return r;
 
diff --git a/sound/soc/codecs/wl1273.h b/sound/soc/codecs/wl1273.h
index 14ed027..43ec7e6 100644
--- a/sound/soc/codecs/wl1273.h
+++ b/sound/soc/codecs/wl1273.h
@@ -25,77 +25,6 @@
 #ifndef __WL1273_CODEC_H__
 #define __WL1273_CODEC_H__
 
-/* I2S protocol, left channel first, data width 16 bits */
-#define WL1273_PCM_DEF_MODE		0x00
-
-/* Rx */
-#define WL1273_AUDIO_ENABLE_I2S		(1 << 0)
-#define WL1273_AUDIO_ENABLE_ANALOG	(1 << 1)
-
-/* Tx */
-#define WL1273_AUDIO_IO_SET_ANALOG	0
-#define WL1273_AUDIO_IO_SET_I2S		1
-
-#define WL1273_POWER_SET_OFF		0
-#define WL1273_POWER_SET_FM		(1 << 0)
-#define WL1273_POWER_SET_RDS		(1 << 1)
-#define WL1273_POWER_SET_RETENTION	(1 << 4)
-
-#define WL1273_PUPD_SET_OFF		0x00
-#define WL1273_PUPD_SET_ON		0x01
-#define WL1273_PUPD_SET_RETENTION	0x10
-
-/* I2S mode */
-#define WL1273_IS2_WIDTH_32	0x0
-#define WL1273_IS2_WIDTH_40	0x1
-#define WL1273_IS2_WIDTH_22_23	0x2
-#define WL1273_IS2_WIDTH_23_22	0x3
-#define WL1273_IS2_WIDTH_48	0x4
-#define WL1273_IS2_WIDTH_50	0x5
-#define WL1273_IS2_WIDTH_60	0x6
-#define WL1273_IS2_WIDTH_64	0x7
-#define WL1273_IS2_WIDTH_80	0x8
-#define WL1273_IS2_WIDTH_96	0x9
-#define WL1273_IS2_WIDTH_128	0xa
-#define WL1273_IS2_WIDTH	0xf
-
-#define WL1273_IS2_FORMAT_STD	(0x0 << 4)
-#define WL1273_IS2_FORMAT_LEFT	(0x1 << 4)
-#define WL1273_IS2_FORMAT_RIGHT	(0x2 << 4)
-#define WL1273_IS2_FORMAT_USER	(0x3 << 4)
-
-#define WL1273_IS2_MASTER	(0x0 << 6)
-#define WL1273_IS2_SLAVEW	(0x1 << 6)
-
-#define WL1273_IS2_TRI_AFTER_SENDING	(0x0 << 7)
-#define WL1273_IS2_TRI_ALWAYS_ACTIVE	(0x1 << 7)
-
-#define WL1273_IS2_SDOWS_RR	(0x0 << 8)
-#define WL1273_IS2_SDOWS_RF	(0x1 << 8)
-#define WL1273_IS2_SDOWS_FR	(0x2 << 8)
-#define WL1273_IS2_SDOWS_FF	(0x3 << 8)
-
-#define WL1273_IS2_TRI_OPT	(0x0 << 10)
-#define WL1273_IS2_TRI_ALWAYS	(0x1 << 10)
-
-#define WL1273_IS2_RATE_48K	(0x0 << 12)
-#define WL1273_IS2_RATE_44_1K	(0x1 << 12)
-#define WL1273_IS2_RATE_32K	(0x2 << 12)
-#define WL1273_IS2_RATE_22_05K	(0x4 << 12)
-#define WL1273_IS2_RATE_16K	(0x5 << 12)
-#define WL1273_IS2_RATE_12K	(0x8 << 12)
-#define WL1273_IS2_RATE_11_025	(0x9 << 12)
-#define WL1273_IS2_RATE_8K	(0xa << 12)
-#define WL1273_IS2_RATE		(0xf << 12)
-
-#define WL1273_I2S_DEF_MODE	(WL1273_IS2_WIDTH_32 | \
-				 WL1273_IS2_FORMAT_STD | \
-				 WL1273_IS2_MASTER | \
-				 WL1273_IS2_TRI_AFTER_SENDING | \
-				 WL1273_IS2_SDOWS_RR | \
-				 WL1273_IS2_TRI_OPT | \
-				 WL1273_IS2_RATE_48K)
-
 int wl1273_get_format(struct snd_soc_codec *codec, unsigned int *fmt);
 
 #endif	/* End of __WL1273_CODEC_H__ */
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 4bcd168..80ddf4f 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -36,7 +36,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -705,6 +704,7 @@
 /* Called from the machine driver */
 int wm2000_add_controls(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	if (!wm2000_i2c) {
@@ -712,12 +712,12 @@
 		return -ENODEV;
 	}
 
-	ret = snd_soc_dapm_new_controls(codec, wm2000_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, wm2000_dapm_widgets,
 					ARRAY_SIZE(wm2000_dapm_widgets));
 	if (ret < 0)
 		return ret;
 
-	ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (ret < 0)
 		return ret;
 
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 7611add..6d6dc9e 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -24,9 +24,9 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
+#include <trace/events/asoc.h>
 
 #include "wm8350.h"
 
@@ -54,6 +54,7 @@
 
 struct wm8350_jack_data {
 	struct snd_soc_jack *jack;
+	struct delayed_work work;
 	int report;
 	int short_report;
 };
@@ -230,8 +231,9 @@
  */
 static void wm8350_pga_work(struct work_struct *work)
 {
-	struct snd_soc_codec *codec =
-	    container_of(work, struct snd_soc_codec, delayed_work.work);
+	struct snd_soc_dapm_context *dapm =
+	    container_of(work, struct snd_soc_dapm_context, delayed_work.work);
+	struct snd_soc_codec *codec = dapm->codec;
 	struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
 	struct wm8350_output *out1 = &wm8350_data->out1,
 	    *out2 = &wm8350_data->out2;
@@ -302,8 +304,8 @@
 		out->ramp = WM8350_RAMP_UP;
 		out->active = 1;
 
-		if (!delayed_work_pending(&codec->delayed_work))
-			schedule_delayed_work(&codec->delayed_work,
+		if (!delayed_work_pending(&codec->dapm.delayed_work))
+			schedule_delayed_work(&codec->dapm.delayed_work,
 					      msecs_to_jiffies(1));
 		break;
 
@@ -311,8 +313,8 @@
 		out->ramp = WM8350_RAMP_DOWN;
 		out->active = 0;
 
-		if (!delayed_work_pending(&codec->delayed_work))
-			schedule_delayed_work(&codec->delayed_work,
+		if (!delayed_work_pending(&codec->dapm.delayed_work))
+			schedule_delayed_work(&codec->dapm.delayed_work,
 					      msecs_to_jiffies(1));
 		break;
 	}
@@ -786,9 +788,10 @@
 
 static int wm8350_add_widgets(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	ret = snd_soc_dapm_new_controls(codec,
+	ret = snd_soc_dapm_new_controls(dapm,
 					wm8350_dapm_widgets,
 					ARRAY_SIZE(wm8350_dapm_widgets));
 	if (ret != 0) {
@@ -797,7 +800,7 @@
 	}
 
 	/* set up audio paths */
-	ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (ret != 0) {
 		dev_err(codec->dev, "DAPM route register failed\n");
 		return ret;
@@ -1184,7 +1187,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
 						    priv->supplies);
 			if (ret != 0)
@@ -1317,7 +1320,7 @@
 				       priv->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -1334,37 +1337,13 @@
 	return 0;
 }
 
-static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
+static void wm8350_hp_work(struct wm8350_data *priv,
+			   struct wm8350_jack_data *jack,
+			   u16 mask)
 {
-	struct wm8350_data *priv = data;
 	struct wm8350 *wm8350 = priv->codec.control_data;
 	u16 reg;
 	int report;
-	int mask;
-	struct wm8350_jack_data *jack = NULL;
-
-	switch (irq - wm8350->irq_base) {
-	case WM8350_IRQ_CODEC_JCK_DET_L:
-		jack = &priv->hpl;
-		mask = WM8350_JACK_L_LVL;
-		break;
-
-	case WM8350_IRQ_CODEC_JCK_DET_R:
-		jack = &priv->hpr;
-		mask = WM8350_JACK_R_LVL;
-		break;
-
-	default:
-		BUG();
-	}
-
-	if (!jack->jack) {
-		dev_warn(wm8350->dev, "Jack interrupt called with no jack\n");
-		return IRQ_NONE;
-	}
-
-	/* Debounce */
-	msleep(200);
 
 	reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS);
 	if (reg & mask)
@@ -1374,6 +1353,54 @@
 
 	snd_soc_jack_report(jack->jack, report, jack->report);
 
+}
+
+static void wm8350_hpl_work(struct work_struct *work)
+{
+	struct wm8350_data *priv =
+	    container_of(work, struct wm8350_data, hpl.work.work);
+
+	wm8350_hp_work(priv, &priv->hpl, WM8350_JACK_L_LVL);
+}
+
+static void wm8350_hpr_work(struct work_struct *work)
+{
+	struct wm8350_data *priv =
+	    container_of(work, struct wm8350_data, hpr.work.work);
+	
+	wm8350_hp_work(priv, &priv->hpr, WM8350_JACK_R_LVL);
+}
+
+static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
+{
+	struct wm8350_data *priv = data;
+	struct wm8350 *wm8350 = priv->codec.control_data;
+	struct wm8350_jack_data *jack = NULL;
+
+	switch (irq - wm8350->irq_base) {
+	case WM8350_IRQ_CODEC_JCK_DET_L:
+#ifndef CONFIG_SND_SOC_WM8350_MODULE
+		trace_snd_soc_jack_irq("WM8350 HPL");
+#endif
+		jack = &priv->hpl;
+		break;
+
+	case WM8350_IRQ_CODEC_JCK_DET_R:
+#ifndef CONFIG_SND_SOC_WM8350_MODULE
+		trace_snd_soc_jack_irq("WM8350 HPR");
+#endif
+		jack = &priv->hpr;
+		break;
+
+	default:
+		BUG();
+	}
+
+	if (device_may_wakeup(wm8350->dev))
+		pm_wakeup_event(wm8350->dev, 250);
+
+	schedule_delayed_work(&jack->work, 200);
+
 	return IRQ_HANDLED;
 }
 
@@ -1436,6 +1463,10 @@
 	u16 reg;
 	int report = 0;
 
+#ifndef CONFIG_SND_SOC_WM8350_MODULE
+	trace_snd_soc_jack_irq("WM8350 mic");
+#endif
+
 	reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS);
 	if (reg & WM8350_JACK_MICSCD_LVL)
 		report |= priv->mic.short_report;
@@ -1550,7 +1581,9 @@
 	/* Put the codec into reset if it wasn't already */
 	wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
 
-	INIT_DELAYED_WORK(&codec->delayed_work, wm8350_pga_work);
+	INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8350_pga_work);
+	INIT_DELAYED_WORK(&priv->hpl.work, wm8350_hpl_work);
+	INIT_DELAYED_WORK(&priv->hpr.work, wm8350_hpr_work);
 
 	/* Enable the codec */
 	wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
@@ -1626,7 +1659,6 @@
 {
 	struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec);
 	struct wm8350 *wm8350 = dev_get_platdata(codec->dev);
-	int ret;
 
 	wm8350_clear_bits(wm8350, WM8350_JACK_DETECT,
 			  WM8350_JDL_ENA | WM8350_JDR_ENA);
@@ -1641,15 +1673,12 @@
 	priv->hpr.jack = NULL;
 	priv->mic.jack = NULL;
 
-	/* cancel any work waiting to be queued. */
-	ret = cancel_delayed_work(&codec->delayed_work);
+	cancel_delayed_work_sync(&priv->hpl.work);
+	cancel_delayed_work_sync(&priv->hpr.work);
 
 	/* if there was any work waiting then we run it now and
 	 * wait for its completion */
-	if (ret) {
-		schedule_delayed_work(&codec->delayed_work, 0);
-		flush_scheduled_work();
-	}
+	flush_delayed_work_sync(&codec->dapm.delayed_work);
 
 	wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 8502997..3c3bc07 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -911,10 +910,11 @@
 
 static int wm8400_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8400_dapm_widgets,
-				  ARRAY_SIZE(wm8400_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8400_dapm_widgets,
+				  ARRAY_SIZE(wm8400_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1219,7 +1219,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(power),
 						    &power[0]);
 			if (ret != 0) {
@@ -1306,7 +1306,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index 8f10709..db0dced 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8510.h"
@@ -216,10 +215,11 @@
 
 static int wm8510_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8510_dapm_widgets,
-				  ARRAY_SIZE(wm8510_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8510_dapm_widgets,
+				  ARRAY_SIZE(wm8510_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -478,7 +478,7 @@
 	case SND_SOC_BIAS_STANDBY:
 		power1 |= WM8510_POWER1_BIASEN | WM8510_POWER1_BUFIOEN;
 
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Initial cap charge at VMID 5k */
 			snd_soc_write(codec, WM8510_POWER1, power1 | 0x3);
 			mdelay(100);
@@ -495,7 +495,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index deca79e..5eb2f50 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -109,10 +108,11 @@
 
 static int wm8523_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8523_dapm_widgets,
-				  ARRAY_SIZE(wm8523_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8523_dapm_widgets,
+				  ARRAY_SIZE(wm8523_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -327,7 +327,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies),
 						    wm8523->supplies);
 			if (ret != 0) {
@@ -366,7 +366,7 @@
 				       wm8523->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 8725d4e..8f6b5ee 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -31,7 +31,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 #include <asm/div64.h>
@@ -191,7 +190,6 @@
 struct wm8580_priv {
 	enum snd_soc_control_type control_type;
 	struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
-	u16 reg_cache[WM8580_MAX_REGISTER + 1];
 	struct pll_state a;
 	struct pll_state b;
 	int sysclk[2];
@@ -302,10 +300,11 @@
 
 static int wm8580_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets,
-				  ARRAY_SIZE(wm8580_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets,
+				  ARRAY_SIZE(wm8580_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -507,13 +506,13 @@
 	}
 
 	/* Look up the SYSCLK ratio; accept only exact matches */
-	ratio = wm8580->sysclk[dai->id] / params_rate(params);
+	ratio = wm8580->sysclk[dai->driver->id] / params_rate(params);
 	for (i = 0; i < ARRAY_SIZE(wm8580_sysclk_ratios); i++)
 		if (ratio == wm8580_sysclk_ratios[i])
 			break;
 	if (i == ARRAY_SIZE(wm8580_sysclk_ratios)) {
 		dev_err(codec->dev, "Invalid clock ratio %d/%d\n",
-			wm8580->sysclk[dai->id], params_rate(params));
+			wm8580->sysclk[dai->driver->id], params_rate(params));
 		return -EINVAL;
 	}
 	paifa |= i;
@@ -716,7 +715,7 @@
 
 	switch (clk_id) {
 	case WM8580_CLKSRC_ADCMCLK:
-		if (dai->id != WM8580_DAI_PAIFTX)
+		if (dai->driver->id != WM8580_DAI_PAIFTX)
 			return -EINVAL;
 		sel = 0 << sel_shift;
 		break;
@@ -735,7 +734,7 @@
 	}
 
 	/* We really should validate PLL settings but not yet */
-	wm8580->sysclk[dai->id] = freq;
+	wm8580->sysclk[dai->driver->id] = freq;
 
 	return snd_soc_update_bits(codec, WM8580_CLKSEL, sel_mask, sel);
 }
@@ -767,7 +766,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Power up and get individual control of the DACs */
 			reg = snd_soc_read(codec, WM8580_PWRDN1);
 			reg &= ~(WM8580_PWRDN1_PWDN | WM8580_PWRDN1_ALLDACPD);
@@ -785,7 +784,7 @@
 		snd_soc_write(codec, WM8580_PWRDN1, reg | WM8580_PWRDN1_PWDN);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -905,7 +904,7 @@
 	.set_bias_level = wm8580_set_bias_level,
 	.reg_cache_size = ARRAY_SIZE(wm8580_reg),
 	.reg_word_size = sizeof(u16),
-	.reg_cache_default = &wm8580_reg,
+	.reg_cache_default = wm8580_reg,
 };
 
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index 54fbd76..97c3038 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 
@@ -34,7 +33,6 @@
 /* codec private data */
 struct wm8711_priv {
 	enum snd_soc_control_type bus_type;
-	u16 reg_cache[WM8711_CACHEREGNUM];
 	unsigned int sysclk;
 };
 
@@ -93,10 +91,11 @@
 
 static int wm8711_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8711_dapm_widgets,
-				  ARRAY_SIZE(wm8711_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8711_dapm_widgets,
+				  ARRAY_SIZE(wm8711_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -318,7 +317,7 @@
 		snd_soc_write(codec, WM8711_PWR, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index 075f35e..736b035 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -73,10 +72,11 @@
 
 static int wm8728_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8728_dapm_widgets,
-				  ARRAY_SIZE(wm8728_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8728_dapm_widgets,
+				  ARRAY_SIZE(wm8728_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -180,7 +180,7 @@
 	case SND_SOC_BIAS_ON:
 	case SND_SOC_BIAS_PREPARE:
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Power everything up... */
 			reg = snd_soc_read(codec, WM8728_DACCTL);
 			snd_soc_write(codec, WM8728_DACCTL, reg & ~0x4);
@@ -197,7 +197,7 @@
 		snd_soc_write(codec, WM8728_DACCTL, reg | 0x4);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index e725c09..0a67c31 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -44,9 +43,10 @@
 struct wm8731_priv {
 	enum snd_soc_control_type control_type;
 	struct regulator_bulk_data supplies[WM8731_NUM_SUPPLIES];
-	u16 reg_cache[WM8731_CACHEREGNUM];
 	unsigned int sysclk;
 	int sysclk_type;
+	int playback_fs;
+	bool deemph;
 };
 
 
@@ -65,16 +65,79 @@
 #define wm8731_reset(c)	snd_soc_write(c, WM8731_RESET, 0)
 
 static const char *wm8731_input_select[] = {"Line In", "Mic"};
-static const char *wm8731_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"};
 
-static const struct soc_enum wm8731_enum[] = {
-	SOC_ENUM_SINGLE(WM8731_APANA, 2, 2, wm8731_input_select),
-	SOC_ENUM_SINGLE(WM8731_APDIGI, 1, 4, wm8731_deemph),
-};
+static const struct soc_enum wm8731_insel_enum =
+	SOC_ENUM_SINGLE(WM8731_APANA, 2, 2, wm8731_input_select);
+
+static int wm8731_deemph[] = { 0, 32000, 44100, 48000 };
+
+static int wm8731_set_deemph(struct snd_soc_codec *codec)
+{
+	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+	int val, i, best;
+
+	/* If we're using deemphasis select the nearest available sample
+	 * rate.
+	 */
+	if (wm8731->deemph) {
+		best = 1;
+		for (i = 2; i < ARRAY_SIZE(wm8731_deemph); i++) {
+			if (abs(wm8731_deemph[i] - wm8731->playback_fs) <
+			    abs(wm8731_deemph[best] - wm8731->playback_fs))
+				best = i;
+		}
+
+		val = best << 1;
+	} else {
+		best = 0;
+		val = 0;
+	}
+
+	dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n",
+		best, wm8731_deemph[best]);
+
+	return snd_soc_update_bits(codec, WM8731_APDIGI, 0x6, val);
+}
+
+static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = wm8731->deemph;
+
+	return 0;
+}
+
+static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+	int deemph = ucontrol->value.enumerated.item[0];
+	int ret = 0;
+
+	if (deemph > 1)
+		return -EINVAL;
+
+	mutex_lock(&codec->mutex);
+	if (wm8731->deemph != deemph) {
+		wm8731->deemph = deemph;
+
+		wm8731_set_deemph(codec);
+
+		ret = 1;
+	}
+	mutex_unlock(&codec->mutex);
+
+	return ret;
+}
 
 static const DECLARE_TLV_DB_SCALE(in_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
+static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 2000, 0);
 
 static const struct snd_kcontrol_new wm8731_snd_controls[] = {
 
@@ -87,7 +150,7 @@
 		 in_tlv),
 SOC_DOUBLE_R("Line Capture Switch", WM8731_LINVOL, WM8731_RINVOL, 7, 1, 1),
 
-SOC_SINGLE("Mic Boost (+20dB)", WM8731_APANA, 0, 1, 0),
+SOC_SINGLE_TLV("Mic Boost Volume", WM8731_APANA, 0, 1, 0, mic_tlv),
 SOC_SINGLE("Mic Capture Switch", WM8731_APANA, 1, 1, 1),
 
 SOC_SINGLE_TLV("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1,
@@ -96,7 +159,8 @@
 SOC_SINGLE("ADC High Pass Filter Switch", WM8731_APDIGI, 0, 1, 1),
 SOC_SINGLE("Store DC Offset Switch", WM8731_APDIGI, 4, 1, 0),
 
-SOC_ENUM("Playback De-emphasis", wm8731_enum[1]),
+SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0,
+		    wm8731_get_deemph, wm8731_put_deemph),
 };
 
 /* Output Mixer */
@@ -108,7 +172,7 @@
 
 /* Input mux */
 static const struct snd_kcontrol_new wm8731_input_mux_controls =
-SOC_DAPM_ENUM("Input Select", wm8731_enum[0]);
+SOC_DAPM_ENUM("Input Select", wm8731_insel_enum);
 
 static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = {
 SND_SOC_DAPM_SUPPLY("OSC", WM8731_PWR, 5, 1, NULL, 0),
@@ -165,10 +229,11 @@
 
 static int wm8731_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets,
-				  ARRAY_SIZE(wm8731_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets,
+				  ARRAY_SIZE(wm8731_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -239,6 +304,8 @@
 	u16 srate = (coeff_div[i].sr << 2) |
 		(coeff_div[i].bosr << 1) | coeff_div[i].usb;
 
+	wm8731->playback_fs = params_rate(params);
+
 	snd_soc_write(codec, WM8731_SRATE, srate);
 
 	/* bit size */
@@ -253,6 +320,8 @@
 		break;
 	}
 
+	wm8731_set_deemph(codec);
+
 	snd_soc_write(codec, WM8731_IFACE, iface);
 	return 0;
 }
@@ -319,7 +388,7 @@
 		return -EINVAL;
 	}
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(&codec->dapm);
 
 	return 0;
 }
@@ -399,7 +468,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
 						    wm8731->supplies);
 			if (ret != 0)
@@ -428,7 +497,7 @@
 				       wm8731->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -542,7 +611,6 @@
 err_regulator_get:
 	regulator_bulk_free(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
 
-	kfree(wm8731);
 	return ret;
 }
 
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
new file mode 100644
index 0000000..30c67d0
--- /dev/null
+++ b/sound/soc/codecs/wm8737.c
@@ -0,0 +1,754 @@
+/*
+ * wm8737.c  --  WM8737 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8737.h"
+
+#define WM8737_NUM_SUPPLIES 4
+static const char *wm8737_supply_names[WM8737_NUM_SUPPLIES] = {
+	"DCVDD",
+	"DBVDD",
+	"AVDD",
+	"MVDD",
+};
+
+/* codec private data */
+struct wm8737_priv {
+	enum snd_soc_control_type control_type;
+	struct regulator_bulk_data supplies[WM8737_NUM_SUPPLIES];
+	unsigned int mclk;
+};
+
+static const u16 wm8737_reg[WM8737_REGISTER_COUNT] = {
+	0x00C3,     /* R0  - Left PGA volume */
+	0x00C3,     /* R1  - Right PGA volume */
+	0x0007,     /* R2  - AUDIO path L */
+	0x0007,     /* R3  - AUDIO path R */
+	0x0000,     /* R4  - 3D Enhance */
+	0x0000,     /* R5  - ADC Control */
+	0x0000,     /* R6  - Power Management */
+	0x000A,     /* R7  - Audio Format */
+	0x0000,     /* R8  - Clocking */
+	0x000F,     /* R9  - MIC Preamp Control */
+	0x0003,     /* R10 - Misc Bias Control */
+	0x0000,     /* R11 - Noise Gate */
+	0x007C,     /* R12 - ALC1 */
+	0x0000,     /* R13 - ALC2 */
+	0x0032,     /* R14 - ALC3 */
+};
+
+static int wm8737_reset(struct snd_soc_codec *codec)
+{
+	return snd_soc_write(codec, WM8737_RESET, 0);
+}
+
+static const unsigned int micboost_tlv[] = {
+	TLV_DB_RANGE_HEAD(4),
+	0, 0, TLV_DB_SCALE_ITEM(1300, 0, 0),
+	1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
+	2, 2, TLV_DB_SCALE_ITEM(2800, 0, 0),
+	3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(pga_tlv, -9750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -600, 600, 0);
+static const DECLARE_TLV_DB_SCALE(ng_tlv, -7800, 600, 0);
+static const DECLARE_TLV_DB_SCALE(alc_max_tlv, -1200, 600, 0);
+static const DECLARE_TLV_DB_SCALE(alc_target_tlv, -1800, 100, 0);
+
+static const char *micbias_enum_text[] = {
+	"25%",
+	"50%",
+	"75%",
+	"100%",
+};
+
+static const struct soc_enum micbias_enum =
+	SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 0, 4, micbias_enum_text);
+
+static const char *low_cutoff_text[] = {
+	"Low", "High"
+};
+
+static const struct soc_enum low_3d =
+	SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 6, 2, low_cutoff_text);
+
+static const char *high_cutoff_text[] = {
+	"High", "Low"
+};
+
+static const struct soc_enum high_3d =
+	SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 5, 2, high_cutoff_text);
+
+static const char *alc_fn_text[] = {
+	"Disabled", "Right", "Left", "Stereo"
+};
+
+static const struct soc_enum alc_fn =
+	SOC_ENUM_SINGLE(WM8737_ALC1, 7, 4, alc_fn_text);
+
+static const char *alc_hold_text[] = {
+	"0", "2.67ms", "5.33ms", "10.66ms", "21.32ms", "42.64ms", "85.28ms",
+	"170.56ms", "341.12ms", "682.24ms", "1.364s", "2.728s", "5.458s",
+	"10.916s", "21.832s", "43.691s"
+};
+
+static const struct soc_enum alc_hold =
+	SOC_ENUM_SINGLE(WM8737_ALC2, 0, 16, alc_hold_text);
+
+static const char *alc_atk_text[] = {
+	"8.4ms", "16.8ms", "33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms",
+	"1.075s", "2.15s", "4.3s", "8.6s"
+};
+
+static const struct soc_enum alc_atk =
+	SOC_ENUM_SINGLE(WM8737_ALC3, 0, 11, alc_atk_text);
+
+static const char *alc_dcy_text[] = {
+	"33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms", "1.075s", "2.15s",
+	"4.3s", "8.6s", "17.2s", "34.41s"
+};
+
+static const struct soc_enum alc_dcy =
+	SOC_ENUM_SINGLE(WM8737_ALC3, 4, 11, alc_dcy_text);
+
+static const struct snd_kcontrol_new wm8737_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Mic Boost Volume", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+		 6, 3, 0, micboost_tlv),
+SOC_DOUBLE_R("Mic Boost Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+	     4, 1, 0),
+SOC_DOUBLE("Mic ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+	   3, 1, 0),
+
+SOC_DOUBLE_R_TLV("Capture Volume", WM8737_LEFT_PGA_VOLUME,
+		 WM8737_RIGHT_PGA_VOLUME, 0, 255, 0, pga_tlv),
+SOC_DOUBLE("Capture ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+	   2, 1, 0),
+
+SOC_DOUBLE("INPUT1 DC Bias Switch", WM8737_MISC_BIAS_CONTROL, 0, 1, 1, 0),
+
+SOC_ENUM("Mic PGA Bias", micbias_enum),
+SOC_SINGLE("ADC Low Power Switch", WM8737_ADC_CONTROL, 2, 1, 0),
+SOC_SINGLE("High Pass Filter Switch", WM8737_ADC_CONTROL, 0, 1, 1),
+SOC_DOUBLE("Polarity Invert Switch", WM8737_ADC_CONTROL, 5, 6, 1, 0),
+
+SOC_SINGLE("3D Switch", WM8737_3D_ENHANCE, 0, 1, 0),
+SOC_SINGLE("3D Depth", WM8737_3D_ENHANCE, 1, 15, 0),
+SOC_ENUM("3D Low Cut-off", low_3d),
+SOC_ENUM("3D High Cut-off", low_3d),
+SOC_SINGLE_TLV("3D ADC Volume", WM8737_3D_ENHANCE, 7, 1, 1, adc_tlv),
+
+SOC_SINGLE("Noise Gate Switch", WM8737_NOISE_GATE, 0, 1, 0),
+SOC_SINGLE_TLV("Noise Gate Threshold Volume", WM8737_NOISE_GATE, 2, 7, 0,
+	       ng_tlv),
+
+SOC_ENUM("ALC", alc_fn),
+SOC_SINGLE_TLV("ALC Max Gain Volume", WM8737_ALC1, 4, 7, 0, alc_max_tlv),
+SOC_SINGLE_TLV("ALC Target Volume", WM8737_ALC1, 0, 15, 0, alc_target_tlv),
+SOC_ENUM("ALC Hold Time", alc_hold),
+SOC_SINGLE("ALC ZC Switch", WM8737_ALC2, 4, 1, 0),
+SOC_ENUM("ALC Attack Time", alc_atk),
+SOC_ENUM("ALC Decay Time", alc_dcy),
+};
+
+static const char *linsel_text[] = {
+	"LINPUT1", "LINPUT2", "LINPUT3", "LINPUT1 DC",
+};
+
+static const struct soc_enum linsel_enum =
+	SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_L, 7, 4, linsel_text);
+
+static const struct snd_kcontrol_new linsel_mux =
+	SOC_DAPM_ENUM("LINSEL", linsel_enum);
+
+
+static const char *rinsel_text[] = {
+	"RINPUT1", "RINPUT2", "RINPUT3", "RINPUT1 DC",
+};
+
+static const struct soc_enum rinsel_enum =
+	SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_R, 7, 4, rinsel_text);
+
+static const struct snd_kcontrol_new rinsel_mux =
+	SOC_DAPM_ENUM("RINSEL", rinsel_enum);
+
+static const char *bypass_text[] = {
+	"Direct", "Preamp"
+};
+
+static const struct soc_enum lbypass_enum =
+	SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 2, 2, bypass_text);
+
+static const struct snd_kcontrol_new lbypass_mux =
+	SOC_DAPM_ENUM("Left Bypass", lbypass_enum);
+
+
+static const struct soc_enum rbypass_enum =
+	SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 3, 2, bypass_text);
+
+static const struct snd_kcontrol_new rbypass_mux =
+	SOC_DAPM_ENUM("Left Bypass", rbypass_enum);
+
+static const struct snd_soc_dapm_widget wm8737_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("LINPUT1"),
+SND_SOC_DAPM_INPUT("LINPUT2"),
+SND_SOC_DAPM_INPUT("LINPUT3"),
+SND_SOC_DAPM_INPUT("RINPUT1"),
+SND_SOC_DAPM_INPUT("RINPUT2"),
+SND_SOC_DAPM_INPUT("RINPUT3"),
+SND_SOC_DAPM_INPUT("LACIN"),
+SND_SOC_DAPM_INPUT("RACIN"),
+
+SND_SOC_DAPM_MUX("LINSEL", SND_SOC_NOPM, 0, 0, &linsel_mux),
+SND_SOC_DAPM_MUX("RINSEL", SND_SOC_NOPM, 0, 0, &rinsel_mux),
+
+SND_SOC_DAPM_MUX("Left Preamp Mux", SND_SOC_NOPM, 0, 0, &lbypass_mux),
+SND_SOC_DAPM_MUX("Right Preamp Mux", SND_SOC_NOPM, 0, 0, &rbypass_mux),
+
+SND_SOC_DAPM_PGA("PGAL", WM8737_POWER_MANAGEMENT, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("PGAR", WM8737_POWER_MANAGEMENT, 4, 0, NULL, 0),
+
+SND_SOC_DAPM_DAC("ADCL", NULL, WM8737_POWER_MANAGEMENT, 3, 0),
+SND_SOC_DAPM_DAC("ADCR", NULL, WM8737_POWER_MANAGEMENT, 2, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF", "Capture", 0, WM8737_POWER_MANAGEMENT, 6, 0),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	{ "LINSEL", "LINPUT1", "LINPUT1" },
+	{ "LINSEL", "LINPUT2", "LINPUT2" },
+	{ "LINSEL", "LINPUT3", "LINPUT3" },
+	{ "LINSEL", "LINPUT1 DC", "LINPUT1" },
+
+	{ "RINSEL", "RINPUT1", "RINPUT1" },
+	{ "RINSEL", "RINPUT2", "RINPUT2" },
+	{ "RINSEL", "RINPUT3", "RINPUT3" },
+	{ "RINSEL", "RINPUT1 DC", "RINPUT1" },
+
+	{ "Left Preamp Mux", "Preamp", "LINSEL" },
+	{ "Left Preamp Mux", "Direct", "LACIN" },
+
+	{ "Right Preamp Mux", "Preamp", "RINSEL" },
+	{ "Right Preamp Mux", "Direct", "RACIN" },
+
+	{ "PGAL", NULL, "Left Preamp Mux" },
+	{ "PGAR", NULL, "Right Preamp Mux" },
+
+	{ "ADCL", NULL, "PGAL" },
+	{ "ADCR", NULL, "PGAR" },
+
+	{ "AIF", NULL, "ADCL" },
+	{ "AIF", NULL, "ADCR" },
+};
+
+static int wm8737_add_widgets(struct snd_soc_codec *codec)
+{
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, wm8737_dapm_widgets,
+				  ARRAY_SIZE(wm8737_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+
+	return 0;
+}
+
+/* codec mclk clock divider coefficients */
+static const struct {
+	u32 mclk;
+	u32 rate;
+	u8 usb;
+	u8 sr;
+} coeff_div[] = {
+	{ 12288000,  8000, 0,  0x4 },
+	{ 12288000, 12000, 0,  0x8 },
+	{ 12288000, 16000, 0,  0xa },
+	{ 12288000, 24000, 0, 0x1c },
+	{ 12288000, 32000, 0,  0xc },
+	{ 12288000, 48000, 0,    0 },
+	{ 12288000, 96000, 0,  0xe },
+
+	{ 11289600,  8000, 0, 0x14 },
+	{ 11289600, 11025, 0, 0x18 },
+	{ 11289600, 22050, 0, 0x1a },
+	{ 11289600, 44100, 0, 0x10 },
+	{ 11289600, 88200, 0, 0x1e },
+
+	{ 18432000,  8000, 0,  0x5 },
+	{ 18432000, 12000, 0,  0x9 },
+	{ 18432000, 16000, 0,  0xb },
+	{ 18432000, 24000, 0, 0x1b },
+	{ 18432000, 32000, 0,  0xd },
+	{ 18432000, 48000, 0,  0x1 },
+	{ 18432000, 96000, 0, 0x1f },
+
+	{ 16934400,  8000, 0, 0x15 },
+	{ 16934400, 11025, 0, 0x19 },
+	{ 16934400, 22050, 0, 0x1b },
+	{ 16934400, 44100, 0, 0x11 },
+	{ 16934400, 88200, 0, 0x1f },
+
+	{ 12000000,  8000, 1,  0x4 },
+	{ 12000000, 11025, 1, 0x19 },
+	{ 12000000, 12000, 1,  0x8 },
+	{ 12000000, 16000, 1,  0xa },
+	{ 12000000, 22050, 1, 0x1b },
+	{ 12000000, 24000, 1, 0x1c },
+	{ 12000000, 32000, 1,  0xc },
+	{ 12000000, 44100, 1, 0x11 },
+	{ 12000000, 48000, 1,  0x0 },
+	{ 12000000, 88200, 1, 0x1f },
+	{ 12000000, 96000, 1,  0xe },
+};
+
+static int wm8737_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int i;
+	u16 clocking = 0;
+	u16 af = 0;
+
+	for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+		if (coeff_div[i].rate != params_rate(params))
+			continue;
+
+		if (coeff_div[i].mclk == wm8737->mclk)
+			break;
+
+		if (coeff_div[i].mclk == wm8737->mclk * 2) {
+			clocking |= WM8737_CLKDIV2;
+			break;
+		}
+	}
+
+	if (i == ARRAY_SIZE(coeff_div)) {
+		dev_err(codec->dev, "%dHz MCLK can't support %dHz\n",
+			wm8737->mclk, params_rate(params));
+		return -EINVAL;
+	}
+
+	clocking |= coeff_div[i].usb | (coeff_div[i].sr << WM8737_SR_SHIFT);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		af |= 0x8;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		af |= 0x10;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		af |= 0x18;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT, WM8737_WL_MASK, af);
+	snd_soc_update_bits(codec, WM8737_CLOCKING,
+			    WM8737_USB_MODE | WM8737_CLKDIV2 | WM8737_SR_MASK,
+			    clocking);
+
+	return 0;
+}
+
+static int wm8737_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+				 int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+		if (freq == coeff_div[i].mclk ||
+		    freq == coeff_div[i].mclk * 2) {
+			wm8737->mclk = freq;
+			return 0;
+		}
+	}
+
+	dev_err(codec->dev, "MCLK rate %dHz not supported\n", freq);
+
+	return -EINVAL;
+}
+
+
+static int wm8737_set_dai_fmt(struct snd_soc_dai *codec_dai,
+		unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	u16 af = 0;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		af |= WM8737_MS;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		af |= 0x2;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		af |= 0x1;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		af |= 0x3;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		af |= 0x13;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		af |= WM8737_LRP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT,
+			    WM8737_FORMAT_MASK | WM8737_LRP | WM8737_MS, af);
+
+	return 0;
+}
+
+static int wm8737_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		break;
+
+	case SND_SOC_BIAS_PREPARE:
+		/* VMID at 2*75k */
+		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+				    WM8737_VMIDSEL_MASK, 0);
+		break;
+
+	case SND_SOC_BIAS_STANDBY:
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
+						    wm8737->supplies);
+			if (ret != 0) {
+				dev_err(codec->dev,
+					"Failed to enable supplies: %d\n",
+					ret);
+				return ret;
+			}
+
+			snd_soc_cache_sync(codec);
+
+			/* Fast VMID ramp at 2*2.5k */
+			snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+					    WM8737_VMIDSEL_MASK, 0x4);
+
+			/* Bring VMID up */
+			snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
+					    WM8737_VMID_MASK |
+					    WM8737_VREF_MASK,
+					    WM8737_VMID_MASK |
+					    WM8737_VREF_MASK);
+
+			msleep(500);
+		}
+
+		/* VMID at 2*300k */
+		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+				    WM8737_VMIDSEL_MASK, 2);
+
+		break;
+
+	case SND_SOC_BIAS_OFF:
+		snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
+				    WM8737_VMID_MASK | WM8737_VREF_MASK, 0);
+
+		regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies),
+				       wm8737->supplies);
+		break;
+	}
+
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#define WM8737_RATES SNDRV_PCM_RATE_8000_96000
+
+#define WM8737_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8737_dai_ops = {
+	.hw_params	= wm8737_hw_params,
+	.set_sysclk	= wm8737_set_dai_sysclk,
+	.set_fmt	= wm8737_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver wm8737_dai = {
+	.name = "wm8737",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 2,  /* Mono modes not yet supported */
+		.channels_max = 2,
+		.rates = WM8737_RATES,
+		.formats = WM8737_FORMATS,
+	},
+	.ops = &wm8737_dai_ops,
+};
+
+#ifdef CONFIG_PM
+static int wm8737_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8737_resume(struct snd_soc_codec *codec)
+{
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	return 0;
+}
+#else
+#define wm8737_suspend NULL
+#define wm8737_resume NULL
+#endif
+
+static int wm8737_probe(struct snd_soc_codec *codec)
+{
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int ret, i;
+
+	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8737->control_type);
+	if (ret != 0) {
+		dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(wm8737->supplies); i++)
+		wm8737->supplies[i].supply = wm8737_supply_names[i];
+
+	ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8737->supplies),
+				 wm8737->supplies);
+	if (ret != 0) {
+		dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
+				    wm8737->supplies);
+	if (ret != 0) {
+		dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+		goto err_get;
+	}
+
+	ret = wm8737_reset(codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to issue reset\n");
+		goto err_enable;
+	}
+
+	snd_soc_update_bits(codec, WM8737_LEFT_PGA_VOLUME, WM8737_LVU,
+			    WM8737_LVU);
+	snd_soc_update_bits(codec, WM8737_RIGHT_PGA_VOLUME, WM8737_RVU,
+			    WM8737_RVU);
+
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* Bias level configuration will have done an extra enable */
+	regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+
+	snd_soc_add_controls(codec, wm8737_snd_controls,
+			     ARRAY_SIZE(wm8737_snd_controls));
+	wm8737_add_widgets(codec);
+
+	return 0;
+
+err_enable:
+	regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+err_get:
+	regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+
+	return ret;
+}
+
+static int wm8737_remove(struct snd_soc_codec *codec)
+{
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8737 = {
+	.probe		= wm8737_probe,
+	.remove		= wm8737_remove,
+	.suspend	= wm8737_suspend,
+	.resume		= wm8737_resume,
+	.set_bias_level = wm8737_set_bias_level,
+
+	.reg_cache_size = WM8737_REGISTER_COUNT - 1, /* Skip reset */
+	.reg_word_size	= sizeof(u16),
+	.reg_cache_default = wm8737_reg,
+};
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8737_i2c_probe(struct i2c_client *i2c,
+				      const struct i2c_device_id *id)
+{
+	struct wm8737_priv *wm8737;
+	int ret;
+
+	wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
+	if (wm8737 == NULL)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, wm8737);
+	wm8737->control_type = SND_SOC_I2C;
+
+	ret =  snd_soc_register_codec(&i2c->dev,
+				      &soc_codec_dev_wm8737, &wm8737_dai, 1);
+	if (ret < 0)
+		kfree(wm8737);
+	return ret;
+
+}
+
+static __devexit int wm8737_i2c_remove(struct i2c_client *client)
+{
+	snd_soc_unregister_codec(&client->dev);
+	kfree(i2c_get_clientdata(client));
+	return 0;
+}
+
+static const struct i2c_device_id wm8737_i2c_id[] = {
+	{ "wm8737", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, wm8737_i2c_id);
+
+static struct i2c_driver wm8737_i2c_driver = {
+	.driver = {
+		.name = "wm8737",
+		.owner = THIS_MODULE,
+	},
+	.probe =    wm8737_i2c_probe,
+	.remove =   __devexit_p(wm8737_i2c_remove),
+	.id_table = wm8737_i2c_id,
+};
+#endif
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit wm8737_spi_probe(struct spi_device *spi)
+{
+	struct wm8737_priv *wm8737;
+	int ret;
+
+	wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
+	if (wm8737 == NULL)
+		return -ENOMEM;
+
+	wm8737->control_type = SND_SOC_SPI;
+	spi_set_drvdata(spi, wm8737);
+
+	ret = snd_soc_register_codec(&spi->dev,
+				     &soc_codec_dev_wm8737, &wm8737_dai, 1);
+	if (ret < 0)
+		kfree(wm8737);
+	return ret;
+}
+
+static int __devexit wm8737_spi_remove(struct spi_device *spi)
+{
+	snd_soc_unregister_codec(&spi->dev);
+	kfree(spi_get_drvdata(spi));
+	return 0;
+}
+
+static struct spi_driver wm8737_spi_driver = {
+	.driver = {
+		.name	= "wm8737",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= wm8737_spi_probe,
+	.remove		= __devexit_p(wm8737_spi_remove),
+};
+#endif /* CONFIG_SPI_MASTER */
+
+static int __init wm8737_modinit(void)
+{
+	int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	ret = i2c_add_driver(&wm8737_i2c_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "Failed to register WM8737 I2C driver: %d\n",
+		       ret);
+	}
+#endif
+#if defined(CONFIG_SPI_MASTER)
+	ret = spi_register_driver(&wm8737_spi_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "Failed to register WM8737 SPI driver: %d\n",
+		       ret);
+	}
+#endif
+	return 0;
+}
+module_init(wm8737_modinit);
+
+static void __exit wm8737_exit(void)
+{
+#if defined(CONFIG_SPI_MASTER)
+	spi_unregister_driver(&wm8737_spi_driver);
+#endif
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_driver(&wm8737_i2c_driver);
+#endif
+}
+module_exit(wm8737_exit);
+
+MODULE_DESCRIPTION("ASoC WM8737 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8737.h b/sound/soc/codecs/wm8737.h
new file mode 100644
index 0000000..23d14c8
--- /dev/null
+++ b/sound/soc/codecs/wm8737.h
@@ -0,0 +1,322 @@
+#ifndef _WM8737_H
+#define _WM8737_H
+
+/*
+ * wm8737.c  --  WM8523 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Register values.
+ */
+#define WM8737_LEFT_PGA_VOLUME                  0x00
+#define WM8737_RIGHT_PGA_VOLUME                 0x01
+#define WM8737_AUDIO_PATH_L                     0x02
+#define WM8737_AUDIO_PATH_R                     0x03
+#define WM8737_3D_ENHANCE                       0x04
+#define WM8737_ADC_CONTROL                      0x05
+#define WM8737_POWER_MANAGEMENT                 0x06
+#define WM8737_AUDIO_FORMAT                     0x07
+#define WM8737_CLOCKING                         0x08
+#define WM8737_MIC_PREAMP_CONTROL               0x09
+#define WM8737_MISC_BIAS_CONTROL                0x0A
+#define WM8737_NOISE_GATE                       0x0B
+#define WM8737_ALC1                             0x0C
+#define WM8737_ALC2                             0x0D
+#define WM8737_ALC3                             0x0E
+#define WM8737_RESET                            0x0F
+
+#define WM8737_REGISTER_COUNT                   16
+#define WM8737_MAX_REGISTER                     0x0F
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Left PGA volume
+ */
+#define WM8737_LVU                              0x0100  /* LVU */
+#define WM8737_LVU_MASK                         0x0100  /* LVU */
+#define WM8737_LVU_SHIFT                             8  /* LVU */
+#define WM8737_LVU_WIDTH                             1  /* LVU */
+#define WM8737_LINVOL_MASK                      0x00FF  /* LINVOL - [7:0] */
+#define WM8737_LINVOL_SHIFT                          0  /* LINVOL - [7:0] */
+#define WM8737_LINVOL_WIDTH                          8  /* LINVOL - [7:0] */
+
+/*
+ * R1 (0x01) - Right PGA volume
+ */
+#define WM8737_RVU                              0x0100  /* RVU */
+#define WM8737_RVU_MASK                         0x0100  /* RVU */
+#define WM8737_RVU_SHIFT                             8  /* RVU */
+#define WM8737_RVU_WIDTH                             1  /* RVU */
+#define WM8737_RINVOL_MASK                      0x00FF  /* RINVOL - [7:0] */
+#define WM8737_RINVOL_SHIFT                          0  /* RINVOL - [7:0] */
+#define WM8737_RINVOL_WIDTH                          8  /* RINVOL - [7:0] */
+
+/*
+ * R2 (0x02) - AUDIO path L
+ */
+#define WM8737_LINSEL_MASK                      0x0180  /* LINSEL - [8:7] */
+#define WM8737_LINSEL_SHIFT                          7  /* LINSEL - [8:7] */
+#define WM8737_LINSEL_WIDTH                          2  /* LINSEL - [8:7] */
+#define WM8737_LMICBOOST_MASK                   0x0060  /* LMICBOOST - [6:5] */
+#define WM8737_LMICBOOST_SHIFT                       5  /* LMICBOOST - [6:5] */
+#define WM8737_LMICBOOST_WIDTH                       2  /* LMICBOOST - [6:5] */
+#define WM8737_LMBE                             0x0010  /* LMBE */
+#define WM8737_LMBE_MASK                        0x0010  /* LMBE */
+#define WM8737_LMBE_SHIFT                            4  /* LMBE */
+#define WM8737_LMBE_WIDTH                            1  /* LMBE */
+#define WM8737_LMZC                             0x0008  /* LMZC */
+#define WM8737_LMZC_MASK                        0x0008  /* LMZC */
+#define WM8737_LMZC_SHIFT                            3  /* LMZC */
+#define WM8737_LMZC_WIDTH                            1  /* LMZC */
+#define WM8737_LPZC                             0x0004  /* LPZC */
+#define WM8737_LPZC_MASK                        0x0004  /* LPZC */
+#define WM8737_LPZC_SHIFT                            2  /* LPZC */
+#define WM8737_LPZC_WIDTH                            1  /* LPZC */
+#define WM8737_LZCTO_MASK                       0x0003  /* LZCTO - [1:0] */
+#define WM8737_LZCTO_SHIFT                           0  /* LZCTO - [1:0] */
+#define WM8737_LZCTO_WIDTH                           2  /* LZCTO - [1:0] */
+
+/*
+ * R3 (0x03) - AUDIO path R
+ */
+#define WM8737_RINSEL_MASK                      0x0180  /* RINSEL - [8:7] */
+#define WM8737_RINSEL_SHIFT                          7  /* RINSEL - [8:7] */
+#define WM8737_RINSEL_WIDTH                          2  /* RINSEL - [8:7] */
+#define WM8737_RMICBOOST_MASK                   0x0060  /* RMICBOOST - [6:5] */
+#define WM8737_RMICBOOST_SHIFT                       5  /* RMICBOOST - [6:5] */
+#define WM8737_RMICBOOST_WIDTH                       2  /* RMICBOOST - [6:5] */
+#define WM8737_RMBE                             0x0010  /* RMBE */
+#define WM8737_RMBE_MASK                        0x0010  /* RMBE */
+#define WM8737_RMBE_SHIFT                            4  /* RMBE */
+#define WM8737_RMBE_WIDTH                            1  /* RMBE */
+#define WM8737_RMZC                             0x0008  /* RMZC */
+#define WM8737_RMZC_MASK                        0x0008  /* RMZC */
+#define WM8737_RMZC_SHIFT                            3  /* RMZC */
+#define WM8737_RMZC_WIDTH                            1  /* RMZC */
+#define WM8737_RPZC                             0x0004  /* RPZC */
+#define WM8737_RPZC_MASK                        0x0004  /* RPZC */
+#define WM8737_RPZC_SHIFT                            2  /* RPZC */
+#define WM8737_RPZC_WIDTH                            1  /* RPZC */
+#define WM8737_RZCTO_MASK                       0x0003  /* RZCTO - [1:0] */
+#define WM8737_RZCTO_SHIFT                           0  /* RZCTO - [1:0] */
+#define WM8737_RZCTO_WIDTH                           2  /* RZCTO - [1:0] */
+
+/*
+ * R4 (0x04) - 3D Enhance
+ */
+#define WM8737_DIV2                             0x0080  /* DIV2 */
+#define WM8737_DIV2_MASK                        0x0080  /* DIV2 */
+#define WM8737_DIV2_SHIFT                            7  /* DIV2 */
+#define WM8737_DIV2_WIDTH                            1  /* DIV2 */
+#define WM8737_3DLC                             0x0040  /* 3DLC */
+#define WM8737_3DLC_MASK                        0x0040  /* 3DLC */
+#define WM8737_3DLC_SHIFT                            6  /* 3DLC */
+#define WM8737_3DLC_WIDTH                            1  /* 3DLC */
+#define WM8737_3DUC                             0x0020  /* 3DUC */
+#define WM8737_3DUC_MASK                        0x0020  /* 3DUC */
+#define WM8737_3DUC_SHIFT                            5  /* 3DUC */
+#define WM8737_3DUC_WIDTH                            1  /* 3DUC */
+#define WM8737_3DDEPTH_MASK                     0x001E  /* 3DDEPTH - [4:1] */
+#define WM8737_3DDEPTH_SHIFT                         1  /* 3DDEPTH - [4:1] */
+#define WM8737_3DDEPTH_WIDTH                         4  /* 3DDEPTH - [4:1] */
+#define WM8737_3DE                              0x0001  /* 3DE */
+#define WM8737_3DE_MASK                         0x0001  /* 3DE */
+#define WM8737_3DE_SHIFT                             0  /* 3DE */
+#define WM8737_3DE_WIDTH                             1  /* 3DE */
+
+/*
+ * R5 (0x05) - ADC Control
+ */
+#define WM8737_MONOMIX_MASK                     0x0180  /* MONOMIX - [8:7] */
+#define WM8737_MONOMIX_SHIFT                         7  /* MONOMIX - [8:7] */
+#define WM8737_MONOMIX_WIDTH                         2  /* MONOMIX - [8:7] */
+#define WM8737_POLARITY_MASK                    0x0060  /* POLARITY - [6:5] */
+#define WM8737_POLARITY_SHIFT                        5  /* POLARITY - [6:5] */
+#define WM8737_POLARITY_WIDTH                        2  /* POLARITY - [6:5] */
+#define WM8737_HPOR                             0x0010  /* HPOR */
+#define WM8737_HPOR_MASK                        0x0010  /* HPOR */
+#define WM8737_HPOR_SHIFT                            4  /* HPOR */
+#define WM8737_HPOR_WIDTH                            1  /* HPOR */
+#define WM8737_LP                               0x0004  /* LP */
+#define WM8737_LP_MASK                          0x0004  /* LP */
+#define WM8737_LP_SHIFT                              2  /* LP */
+#define WM8737_LP_WIDTH                              1  /* LP */
+#define WM8737_MONOUT                           0x0002  /* MONOUT */
+#define WM8737_MONOUT_MASK                      0x0002  /* MONOUT */
+#define WM8737_MONOUT_SHIFT                          1  /* MONOUT */
+#define WM8737_MONOUT_WIDTH                          1  /* MONOUT */
+#define WM8737_ADCHPD                           0x0001  /* ADCHPD */
+#define WM8737_ADCHPD_MASK                      0x0001  /* ADCHPD */
+#define WM8737_ADCHPD_SHIFT                          0  /* ADCHPD */
+#define WM8737_ADCHPD_WIDTH                          1  /* ADCHPD */
+
+/*
+ * R6 (0x06) - Power Management
+ */
+#define WM8737_VMID                             0x0100  /* VMID */
+#define WM8737_VMID_MASK                        0x0100  /* VMID */
+#define WM8737_VMID_SHIFT                            8  /* VMID */
+#define WM8737_VMID_WIDTH                            1  /* VMID */
+#define WM8737_VREF                             0x0080  /* VREF */
+#define WM8737_VREF_MASK                        0x0080  /* VREF */
+#define WM8737_VREF_SHIFT                            7  /* VREF */
+#define WM8737_VREF_WIDTH                            1  /* VREF */
+#define WM8737_AI                               0x0040  /* AI */
+#define WM8737_AI_MASK                          0x0040  /* AI */
+#define WM8737_AI_SHIFT                              6  /* AI */
+#define WM8737_AI_WIDTH                              1  /* AI */
+#define WM8737_PGL                              0x0020  /* PGL */
+#define WM8737_PGL_MASK                         0x0020  /* PGL */
+#define WM8737_PGL_SHIFT                             5  /* PGL */
+#define WM8737_PGL_WIDTH                             1  /* PGL */
+#define WM8737_PGR                              0x0010  /* PGR */
+#define WM8737_PGR_MASK                         0x0010  /* PGR */
+#define WM8737_PGR_SHIFT                             4  /* PGR */
+#define WM8737_PGR_WIDTH                             1  /* PGR */
+#define WM8737_ADL                              0x0008  /* ADL */
+#define WM8737_ADL_MASK                         0x0008  /* ADL */
+#define WM8737_ADL_SHIFT                             3  /* ADL */
+#define WM8737_ADL_WIDTH                             1  /* ADL */
+#define WM8737_ADR                              0x0004  /* ADR */
+#define WM8737_ADR_MASK                         0x0004  /* ADR */
+#define WM8737_ADR_SHIFT                             2  /* ADR */
+#define WM8737_ADR_WIDTH                             1  /* ADR */
+#define WM8737_MICBIAS_MASK                     0x0003  /* MICBIAS - [1:0] */
+#define WM8737_MICBIAS_SHIFT                         0  /* MICBIAS - [1:0] */
+#define WM8737_MICBIAS_WIDTH                         2  /* MICBIAS - [1:0] */
+
+/*
+ * R7 (0x07) - Audio Format
+ */
+#define WM8737_SDODIS                           0x0080  /* SDODIS */
+#define WM8737_SDODIS_MASK                      0x0080  /* SDODIS */
+#define WM8737_SDODIS_SHIFT                          7  /* SDODIS */
+#define WM8737_SDODIS_WIDTH                          1  /* SDODIS */
+#define WM8737_MS                               0x0040  /* MS */
+#define WM8737_MS_MASK                          0x0040  /* MS */
+#define WM8737_MS_SHIFT                              6  /* MS */
+#define WM8737_MS_WIDTH                              1  /* MS */
+#define WM8737_LRP                              0x0010  /* LRP */
+#define WM8737_LRP_MASK                         0x0010  /* LRP */
+#define WM8737_LRP_SHIFT                             4  /* LRP */
+#define WM8737_LRP_WIDTH                             1  /* LRP */
+#define WM8737_WL_MASK                          0x000C  /* WL - [3:2] */
+#define WM8737_WL_SHIFT                              2  /* WL - [3:2] */
+#define WM8737_WL_WIDTH                              2  /* WL - [3:2] */
+#define WM8737_FORMAT_MASK                      0x0003  /* FORMAT - [1:0] */
+#define WM8737_FORMAT_SHIFT                          0  /* FORMAT - [1:0] */
+#define WM8737_FORMAT_WIDTH                          2  /* FORMAT - [1:0] */
+
+/*
+ * R8 (0x08) - Clocking
+ */
+#define WM8737_AUTODETECT                       0x0080  /* AUTODETECT */
+#define WM8737_AUTODETECT_MASK                  0x0080  /* AUTODETECT */
+#define WM8737_AUTODETECT_SHIFT                      7  /* AUTODETECT */
+#define WM8737_AUTODETECT_WIDTH                      1  /* AUTODETECT */
+#define WM8737_CLKDIV2                          0x0040  /* CLKDIV2 */
+#define WM8737_CLKDIV2_MASK                     0x0040  /* CLKDIV2 */
+#define WM8737_CLKDIV2_SHIFT                         6  /* CLKDIV2 */
+#define WM8737_CLKDIV2_WIDTH                         1  /* CLKDIV2 */
+#define WM8737_SR_MASK                          0x003E  /* SR - [5:1] */
+#define WM8737_SR_SHIFT                              1  /* SR - [5:1] */
+#define WM8737_SR_WIDTH                              5  /* SR - [5:1] */
+#define WM8737_USB_MODE                         0x0001  /* USB MODE */
+#define WM8737_USB_MODE_MASK                    0x0001  /* USB MODE */
+#define WM8737_USB_MODE_SHIFT                        0  /* USB MODE */
+#define WM8737_USB_MODE_WIDTH                        1  /* USB MODE */
+
+/*
+ * R9 (0x09) - MIC Preamp Control
+ */
+#define WM8737_RBYPEN                           0x0008  /* RBYPEN */
+#define WM8737_RBYPEN_MASK                      0x0008  /* RBYPEN */
+#define WM8737_RBYPEN_SHIFT                          3  /* RBYPEN */
+#define WM8737_RBYPEN_WIDTH                          1  /* RBYPEN */
+#define WM8737_LBYPEN                           0x0004  /* LBYPEN */
+#define WM8737_LBYPEN_MASK                      0x0004  /* LBYPEN */
+#define WM8737_LBYPEN_SHIFT                          2  /* LBYPEN */
+#define WM8737_LBYPEN_WIDTH                          1  /* LBYPEN */
+#define WM8737_MBCTRL_MASK                      0x0003  /* MBCTRL - [1:0] */
+#define WM8737_MBCTRL_SHIFT                          0  /* MBCTRL - [1:0] */
+#define WM8737_MBCTRL_WIDTH                          2  /* MBCTRL - [1:0] */
+
+/*
+ * R10 (0x0A) - Misc Bias Control
+ */
+#define WM8737_VMIDSEL_MASK                     0x000C  /* VMIDSEL - [3:2] */
+#define WM8737_VMIDSEL_SHIFT                         2  /* VMIDSEL - [3:2] */
+#define WM8737_VMIDSEL_WIDTH                         2  /* VMIDSEL - [3:2] */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE           0x0002  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE_MASK      0x0002  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE_SHIFT          1  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE_WIDTH          1  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE           0x0001  /* RINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE_MASK      0x0001  /* RINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE_SHIFT          0  /* RINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE_WIDTH          1  /* RINPUT1 DC BIAS ENABLE */
+
+/*
+ * R11 (0x0B) - Noise Gate
+ */
+#define WM8737_NGTH_MASK                        0x001C  /* NGTH - [4:2] */
+#define WM8737_NGTH_SHIFT                            2  /* NGTH - [4:2] */
+#define WM8737_NGTH_WIDTH                            3  /* NGTH - [4:2] */
+#define WM8737_NGAT                             0x0001  /* NGAT */
+#define WM8737_NGAT_MASK                        0x0001  /* NGAT */
+#define WM8737_NGAT_SHIFT                            0  /* NGAT */
+#define WM8737_NGAT_WIDTH                            1  /* NGAT */
+
+/*
+ * R12 (0x0C) - ALC1
+ */
+#define WM8737_ALCSEL_MASK                      0x0180  /* ALCSEL - [8:7] */
+#define WM8737_ALCSEL_SHIFT                          7  /* ALCSEL - [8:7] */
+#define WM8737_ALCSEL_WIDTH                          2  /* ALCSEL - [8:7] */
+#define WM8737_MAX_GAIN_MASK                    0x0070  /* MAX GAIN - [6:4] */
+#define WM8737_MAX_GAIN_SHIFT                        4  /* MAX GAIN - [6:4] */
+#define WM8737_MAX_GAIN_WIDTH                        3  /* MAX GAIN - [6:4] */
+#define WM8737_ALCL_MASK                        0x000F  /* ALCL - [3:0] */
+#define WM8737_ALCL_SHIFT                            0  /* ALCL - [3:0] */
+#define WM8737_ALCL_WIDTH                            4  /* ALCL - [3:0] */
+
+/*
+ * R13 (0x0D) - ALC2
+ */
+#define WM8737_ALCZCE                           0x0010  /* ALCZCE */
+#define WM8737_ALCZCE_MASK                      0x0010  /* ALCZCE */
+#define WM8737_ALCZCE_SHIFT                          4  /* ALCZCE */
+#define WM8737_ALCZCE_WIDTH                          1  /* ALCZCE */
+#define WM8737_HLD_MASK                         0x000F  /* HLD - [3:0] */
+#define WM8737_HLD_SHIFT                             0  /* HLD - [3:0] */
+#define WM8737_HLD_WIDTH                             4  /* HLD - [3:0] */
+
+/*
+ * R14 (0x0E) - ALC3
+ */
+#define WM8737_DCY_MASK                         0x00F0  /* DCY - [7:4] */
+#define WM8737_DCY_SHIFT                             4  /* DCY - [7:4] */
+#define WM8737_DCY_WIDTH                             4  /* DCY - [7:4] */
+#define WM8737_ATK_MASK                         0x000F  /* ATK - [3:0] */
+#define WM8737_ATK_SHIFT                             0  /* ATK - [3:0] */
+#define WM8737_ATK_WIDTH                             4  /* ATK - [3:0] */
+
+/*
+ * R15 (0x0F) - Reset
+ */
+#define WM8737_RESET_MASK                       0x01FF  /* RESET - [8:0] */
+#define WM8737_RESET_SHIFT                           0  /* RESET - [8:0] */
+#define WM8737_RESET_WIDTH                           9  /* RESET - [8:0] */
+
+#endif
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index aea60ef..494f2d3 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -94,10 +93,11 @@
 
 static int wm8741_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8741_dapm_widgets,
-				  ARRAY_SIZE(wm8741_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8741_dapm_widgets,
+				  ARRAY_SIZE(wm8741_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -455,7 +455,7 @@
 	.resume =	wm8741_resume,
 	.reg_cache_size = ARRAY_SIZE(wm8741_reg_defaults),
 	.reg_word_size = sizeof(u16),
-	.reg_cache_default = &wm8741_reg_defaults,
+	.reg_cache_default = wm8741_reg_defaults,
 };
 
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 6c924cd..38f38fd 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8750.h"
@@ -53,7 +52,6 @@
 struct wm8750_priv {
 	unsigned int sysclk;
 	enum snd_soc_control_type control_type;
-	u16 reg_cache[ARRAY_SIZE(wm8750_reg)];
 };
 
 #define wm8750_reset(c)	snd_soc_write(c, WM8750_RESET, 0)
@@ -399,10 +397,11 @@
 
 static int wm8750_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
-				  ARRAY_SIZE(wm8750_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
+				  ARRAY_SIZE(wm8750_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -615,7 +614,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Set VMID to 5k */
 			snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1);
 
@@ -630,7 +629,7 @@
 		snd_soc_write(codec, WM8750_PWR1, 0x0001);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 87caae5..79b02ae 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -45,7 +45,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <asm/div64.h>
@@ -623,10 +622,11 @@
 
 static int wm8753_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
-				  ARRAY_SIZE(wm8753_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets,
+				  ARRAY_SIZE(wm8753_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1245,7 +1245,7 @@
 		snd_soc_write(codec, WM8753_PWR1, 0x0001);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -1435,9 +1435,11 @@
 
 static void wm8753_work(struct work_struct *work)
 {
-	struct snd_soc_codec *codec =
-		container_of(work, struct snd_soc_codec, delayed_work.work);
-	wm8753_set_bias_level(codec, codec->bias_level);
+	struct snd_soc_dapm_context *dapm =
+		container_of(work, struct snd_soc_dapm_context,
+			     delayed_work.work);
+	struct snd_soc_codec *codec = dapm->codec;
+	wm8753_set_bias_level(codec, dapm->bias_level);
 }
 
 static int wm8753_suspend(struct snd_soc_codec *codec, pm_message_t state)
@@ -1466,41 +1468,22 @@
 	wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
 	/* charge wm8753 caps */
-	if (codec->suspend_bias_level == SND_SOC_BIAS_ON) {
+	if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
 		wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
-		codec->bias_level = SND_SOC_BIAS_ON;
-		schedule_delayed_work(&codec->delayed_work,
+		codec->dapm.bias_level = SND_SOC_BIAS_ON;
+		schedule_delayed_work(&codec->dapm.delayed_work,
 			msecs_to_jiffies(caps_charge));
 	}
 
 	return 0;
 }
 
-/*
- * This function forces any delayed work to be queued and run.
- */
-static int run_delayed_work(struct delayed_work *dwork)
-{
-	int ret;
-
-	/* cancel any work waiting to be queued. */
-	ret = cancel_delayed_work(dwork);
-
-	/* if there was any work waiting then we run it now and
-	 * wait for it's completion */
-	if (ret) {
-		schedule_delayed_work(dwork, 0);
-		flush_scheduled_work();
-	}
-	return ret;
-}
-
 static int wm8753_probe(struct snd_soc_codec *codec)
 {
 	struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
-	INIT_DELAYED_WORK(&codec->delayed_work, wm8753_work);
+	INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8753_work);
 
 	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8753->control_type);
 	if (ret < 0) {
@@ -1519,7 +1502,7 @@
 
 	/* charge output caps */
 	wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
-	schedule_delayed_work(&codec->delayed_work,
+	schedule_delayed_work(&codec->dapm.delayed_work,
 			      msecs_to_jiffies(caps_charge));
 
 	/* set the update bits */
@@ -1544,7 +1527,7 @@
 /* power down chip */
 static int wm8753_remove(struct snd_soc_codec *codec)
 {
-	run_delayed_work(&codec->delayed_work);
+	flush_delayed_work_sync(&codec->dapm.delayed_work);
 	wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
 	return 0;
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
new file mode 100644
index 0000000..19b92ba
--- /dev/null
+++ b/sound/soc/codecs/wm8770.c
@@ -0,0 +1,749 @@
+/*
+ * wm8770.c  --  WM8770 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8770.h"
+
+#define WM8770_NUM_SUPPLIES 3
+static const char *wm8770_supply_names[WM8770_NUM_SUPPLIES] = {
+	"AVDD1",
+	"AVDD2",
+	"DVDD"
+};
+
+static const u16 wm8770_reg_defs[WM8770_CACHEREGNUM] = {
+	0x7f, 0x7f, 0x7f, 0x7f,
+	0x7f, 0x7f, 0x7f, 0x7f,
+	0x7f, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0, 0x90, 0,
+	0, 0x22, 0x22, 0x3e,
+	0xc, 0xc, 0x100, 0x189,
+	0x189, 0x8770
+};
+
+struct wm8770_priv {
+	enum snd_soc_control_type control_type;
+	struct regulator_bulk_data supplies[WM8770_NUM_SUPPLIES];
+	struct notifier_block disable_nb[WM8770_NUM_SUPPLIES];
+	struct snd_soc_codec *codec;
+	int sysclk;
+};
+
+static int vout12supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event);
+static int vout34supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event);
+
+/*
+ * We can't use the same notifier block for more than one supply and
+ * there's no way I can see to get from a callback to the caller
+ * except container_of().
+ */
+#define WM8770_REGULATOR_EVENT(n) \
+static int wm8770_regulator_event_##n(struct notifier_block *nb, \
+				      unsigned long event, void *data)    \
+{ \
+	struct wm8770_priv *wm8770 = container_of(nb, struct wm8770_priv, \
+				     disable_nb[n]); \
+	if (event & REGULATOR_EVENT_DISABLE) { \
+		wm8770->codec->cache_sync = 1; \
+	} \
+	return 0; \
+}
+
+WM8770_REGULATOR_EVENT(0)
+WM8770_REGULATOR_EVENT(1)
+WM8770_REGULATOR_EVENT(2)
+
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -1200, 100, 0);
+static const DECLARE_TLV_DB_SCALE(dac_dig_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(dac_alg_tlv, -12700, 100, 1);
+
+static const char *dac_phase_text[][2] = {
+	{ "DAC1 Normal", "DAC1 Inverted" },
+	{ "DAC2 Normal", "DAC2 Inverted" },
+	{ "DAC3 Normal", "DAC3 Inverted" },
+	{ "DAC4 Normal", "DAC4 Inverted" },
+};
+
+static const struct soc_enum dac_phase[] = {
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 0, 1, 2, dac_phase_text[0]),
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 2, 3, 2, dac_phase_text[1]),
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 4, 5, 2, dac_phase_text[2]),
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 6, 7, 2, dac_phase_text[3]),
+};
+
+static const struct snd_kcontrol_new wm8770_snd_controls[] = {
+	/* global DAC playback controls */
+	SOC_SINGLE_TLV("DAC Playback Volume", WM8770_MSDIGVOL, 0, 255, 0,
+		dac_dig_tlv),
+	SOC_SINGLE("DAC Playback Switch", WM8770_DACMUTE, 4, 1, 1),
+	SOC_SINGLE("DAC Playback ZC Switch", WM8770_DACCTRL1, 0, 1, 0),
+
+	/* global VOUT playback controls */
+	SOC_SINGLE_TLV("VOUT Playback Volume", WM8770_MSALGVOL, 0, 127, 0,
+		dac_alg_tlv),
+	SOC_SINGLE("VOUT Playback ZC Switch", WM8770_MSALGVOL, 7, 1, 0),
+
+	/* VOUT1/2/3/4 specific controls */
+	SOC_DOUBLE_R_TLV("VOUT1 Playback Volume", WM8770_VOUT1LVOL,
+		WM8770_VOUT1RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT1 Playback ZC Switch", WM8770_VOUT1LVOL,
+		WM8770_VOUT1RVOL, 7, 1, 0),
+	SOC_DOUBLE_R_TLV("VOUT2 Playback Volume", WM8770_VOUT2LVOL,
+		WM8770_VOUT2RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT2 Playback ZC Switch", WM8770_VOUT2LVOL,
+		WM8770_VOUT2RVOL, 7, 1, 0),
+	SOC_DOUBLE_R_TLV("VOUT3 Playback Volume", WM8770_VOUT3LVOL,
+		WM8770_VOUT3RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT3 Playback ZC Switch", WM8770_VOUT3LVOL,
+		WM8770_VOUT3RVOL, 7, 1, 0),
+	SOC_DOUBLE_R_TLV("VOUT4 Playback Volume", WM8770_VOUT4LVOL,
+		WM8770_VOUT4RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT4 Playback ZC Switch", WM8770_VOUT4LVOL,
+		WM8770_VOUT4RVOL, 7, 1, 0),
+
+	/* DAC1/2/3/4 specific controls */
+	SOC_DOUBLE_R_TLV("DAC1 Playback Volume", WM8770_DAC1LVOL,
+		WM8770_DAC1RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC1 Deemphasis Switch", WM8770_DACCTRL2, 0, 1, 0),
+	SOC_ENUM("DAC1 Phase", dac_phase[0]),
+	SOC_DOUBLE_R_TLV("DAC2 Playback Volume", WM8770_DAC2LVOL,
+		WM8770_DAC2RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC2 Deemphasis Switch", WM8770_DACCTRL2, 1, 1, 0),
+	SOC_ENUM("DAC2 Phase", dac_phase[1]),
+	SOC_DOUBLE_R_TLV("DAC3 Playback Volume", WM8770_DAC3LVOL,
+		WM8770_DAC3RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC3 Deemphasis Switch", WM8770_DACCTRL2, 2, 1, 0),
+	SOC_ENUM("DAC3 Phase", dac_phase[2]),
+	SOC_DOUBLE_R_TLV("DAC4 Playback Volume", WM8770_DAC4LVOL,
+		WM8770_DAC4RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC4 Deemphasis Switch", WM8770_DACCTRL2, 3, 1, 0),
+	SOC_ENUM("DAC4 Phase", dac_phase[3]),
+
+	/* ADC specific controls */
+	SOC_DOUBLE_R_TLV("Capture Volume", WM8770_ADCLCTRL, WM8770_ADCRCTRL,
+		0, 31, 0, adc_tlv),
+	SOC_DOUBLE_R("Capture Switch", WM8770_ADCLCTRL, WM8770_ADCRCTRL,
+		5, 1, 1),
+
+	/* other controls */
+	SOC_SINGLE("ADC 128x Oversampling Switch", WM8770_MSTRCTRL, 3, 1, 0),
+	SOC_SINGLE("ADC Highpass Filter Switch", WM8770_IFACECTRL, 8, 1, 1)
+};
+
+static const char *ain_text[] = {
+	"AIN1", "AIN2", "AIN3", "AIN4",
+	"AIN5", "AIN6", "AIN7", "AIN8"
+};
+
+static const struct soc_enum ain_enum =
+	SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
+
+static const struct snd_kcontrol_new ain_mux =
+	SOC_DAPM_ENUM("Capture Mux", ain_enum);
+
+static const struct snd_kcontrol_new vout1_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC1 Switch", WM8770_OUTMUX1, 0, 1, 0),
+	SOC_DAPM_SINGLE("AUX1 Switch", WM8770_OUTMUX1, 1, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 2, 1, 0)
+};
+
+static const struct snd_kcontrol_new vout2_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC2 Switch", WM8770_OUTMUX1, 3, 1, 0),
+	SOC_DAPM_SINGLE("AUX2 Switch", WM8770_OUTMUX1, 4, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 5, 1, 0)
+};
+
+static const struct snd_kcontrol_new vout3_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC3 Switch", WM8770_OUTMUX2, 0, 1, 0),
+	SOC_DAPM_SINGLE("AUX3 Switch", WM8770_OUTMUX2, 1, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 2, 1, 0)
+};
+
+static const struct snd_kcontrol_new vout4_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC4 Switch", WM8770_OUTMUX2, 3, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 4, 1, 0)
+};
+
+static const struct snd_soc_dapm_widget wm8770_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("AUX1"),
+	SND_SOC_DAPM_INPUT("AUX2"),
+	SND_SOC_DAPM_INPUT("AUX3"),
+
+	SND_SOC_DAPM_INPUT("AIN1"),
+	SND_SOC_DAPM_INPUT("AIN2"),
+	SND_SOC_DAPM_INPUT("AIN3"),
+	SND_SOC_DAPM_INPUT("AIN4"),
+	SND_SOC_DAPM_INPUT("AIN5"),
+	SND_SOC_DAPM_INPUT("AIN6"),
+	SND_SOC_DAPM_INPUT("AIN7"),
+	SND_SOC_DAPM_INPUT("AIN8"),
+
+	SND_SOC_DAPM_MUX("Capture Mux", WM8770_ADCMUX, 8, 1, &ain_mux),
+
+	SND_SOC_DAPM_ADC("ADC", "Capture", WM8770_PWDNCTRL, 1, 1),
+
+	SND_SOC_DAPM_DAC("DAC1", "Playback", WM8770_PWDNCTRL, 2, 1),
+	SND_SOC_DAPM_DAC("DAC2", "Playback", WM8770_PWDNCTRL, 3, 1),
+	SND_SOC_DAPM_DAC("DAC3", "Playback", WM8770_PWDNCTRL, 4, 1),
+	SND_SOC_DAPM_DAC("DAC4", "Playback", WM8770_PWDNCTRL, 5, 1),
+
+	SND_SOC_DAPM_SUPPLY("VOUT12 Supply", SND_SOC_NOPM, 0, 0,
+		vout12supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("VOUT34 Supply", SND_SOC_NOPM, 0, 0,
+		vout34supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MIXER("VOUT1 Mixer", SND_SOC_NOPM, 0, 0,
+		vout1_mix_controls, ARRAY_SIZE(vout1_mix_controls)),
+	SND_SOC_DAPM_MIXER("VOUT2 Mixer", SND_SOC_NOPM, 0, 0,
+		vout2_mix_controls, ARRAY_SIZE(vout2_mix_controls)),
+	SND_SOC_DAPM_MIXER("VOUT3 Mixer", SND_SOC_NOPM, 0, 0,
+		vout3_mix_controls, ARRAY_SIZE(vout3_mix_controls)),
+	SND_SOC_DAPM_MIXER("VOUT4 Mixer", SND_SOC_NOPM, 0, 0,
+		vout4_mix_controls, ARRAY_SIZE(vout4_mix_controls)),
+
+	SND_SOC_DAPM_OUTPUT("VOUT1"),
+	SND_SOC_DAPM_OUTPUT("VOUT2"),
+	SND_SOC_DAPM_OUTPUT("VOUT3"),
+	SND_SOC_DAPM_OUTPUT("VOUT4")
+};
+
+static const struct snd_soc_dapm_route wm8770_intercon[] = {
+	{ "Capture Mux", "AIN1", "AIN1" },
+	{ "Capture Mux", "AIN2", "AIN2" },
+	{ "Capture Mux", "AIN3", "AIN3" },
+	{ "Capture Mux", "AIN4", "AIN4" },
+	{ "Capture Mux", "AIN5", "AIN5" },
+	{ "Capture Mux", "AIN6", "AIN6" },
+	{ "Capture Mux", "AIN7", "AIN7" },
+	{ "Capture Mux", "AIN8", "AIN8" },
+
+	{ "ADC", NULL, "Capture Mux" },
+
+	{ "VOUT1 Mixer", NULL, "VOUT12 Supply" },
+	{ "VOUT1 Mixer", "DAC1 Switch", "DAC1" },
+	{ "VOUT1 Mixer", "AUX1 Switch", "AUX1" },
+	{ "VOUT1 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT2 Mixer", NULL, "VOUT12 Supply" },
+	{ "VOUT2 Mixer", "DAC2 Switch", "DAC2" },
+	{ "VOUT2 Mixer", "AUX2 Switch", "AUX2" },
+	{ "VOUT2 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT3 Mixer", NULL, "VOUT34 Supply" },
+	{ "VOUT3 Mixer", "DAC3 Switch", "DAC3" },
+	{ "VOUT3 Mixer", "AUX3 Switch", "AUX3" },
+	{ "VOUT3 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT4 Mixer", NULL, "VOUT34 Supply" },
+	{ "VOUT4 Mixer", "DAC4 Switch", "DAC4" },
+	{ "VOUT4 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT1", NULL, "VOUT1 Mixer" },
+	{ "VOUT2", NULL, "VOUT2 Mixer" },
+	{ "VOUT3", NULL, "VOUT3 Mixer" },
+	{ "VOUT4", NULL, "VOUT4 Mixer" }
+};
+
+static int vout12supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+
+	codec = w->codec;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0x180);
+		break;
+	}
+
+	return 0;
+}
+
+static int vout34supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+
+	codec = w->codec;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0x180);
+		break;
+	}
+
+	return 0;
+}
+
+static int wm8770_reset(struct snd_soc_codec *codec)
+{
+	return snd_soc_write(codec, WM8770_RESET, 0);
+}
+
+static int wm8770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec;
+	int iface, master;
+
+	codec = dai->codec;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		master = 0x100;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		master = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	iface = 0;
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		iface |= 0x2;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		iface |= 0x1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		iface |= 0xc;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		iface |= 0x8;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		iface |= 0x4;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8770_IFACECTRL, 0xf, iface);
+	snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x100, master);
+
+	return 0;
+}
+
+static const int mclk_ratios[] = {
+	128,
+	192,
+	256,
+	384,
+	512,
+	768
+};
+
+static int wm8770_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec;
+	struct wm8770_priv *wm8770;
+	int i;
+	int iface;
+	int shift;
+	int ratio;
+
+	codec = dai->codec;
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+
+	iface = 0;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		iface |= 0x10;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		iface |= 0x20;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		iface |= 0x30;
+		break;
+	}
+
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		i = 0;
+		shift = 4;
+		break;
+	case SNDRV_PCM_STREAM_CAPTURE:
+		i = 2;
+		shift = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Only need to set MCLK/LRCLK ratio if we're master */
+	if (snd_soc_read(codec, WM8770_MSTRCTRL) & 0x100) {
+		for (; i < ARRAY_SIZE(mclk_ratios); ++i) {
+			ratio = wm8770->sysclk / params_rate(params);
+			if (ratio == mclk_ratios[i])
+				break;
+		}
+
+		if (i == ARRAY_SIZE(mclk_ratios)) {
+			dev_err(codec->dev,
+				"Unable to configure MCLK ratio %d/%d\n",
+				wm8770->sysclk, params_rate(params));
+			return -EINVAL;
+		}
+
+		dev_dbg(codec->dev, "MCLK is %dfs\n", mclk_ratios[i]);
+
+		snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x7 << shift,
+				    i << shift);
+	}
+
+	snd_soc_update_bits(codec, WM8770_IFACECTRL, 0x30, iface);
+
+	return 0;
+}
+
+static int wm8770_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec;
+
+	codec = dai->codec;
+	return snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10,
+				   !!mute << 4);
+}
+
+static int wm8770_set_sysclk(struct snd_soc_dai *dai,
+			     int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec;
+	struct wm8770_priv *wm8770;
+
+	codec = dai->codec;
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+	wm8770->sysclk = freq;
+	return 0;
+}
+
+static void wm8770_sync_cache(struct snd_soc_codec *codec)
+{
+	int i;
+	u16 *cache;
+
+	if (!codec->cache_sync)
+		return;
+
+	codec->cache_only = 0;
+	cache = codec->reg_cache;
+	for (i = 0; i < codec->driver->reg_cache_size; i++) {
+		if (i == WM8770_RESET || cache[i] == wm8770_reg_defs[i])
+			continue;
+		snd_soc_write(codec, i, cache[i]);
+	}
+	codec->cache_sync = 0;
+}
+
+static int wm8770_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	int ret;
+	struct wm8770_priv *wm8770;
+
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies),
+						    wm8770->supplies);
+			if (ret) {
+				dev_err(codec->dev,
+					"Failed to enable supplies: %d\n",
+					ret);
+				return ret;
+			}
+			wm8770_sync_cache(codec);
+			/* global powerup */
+			snd_soc_write(codec, WM8770_PWDNCTRL, 0);
+		}
+		break;
+	case SND_SOC_BIAS_OFF:
+		/* global powerdown */
+		snd_soc_write(codec, WM8770_PWDNCTRL, 1);
+		regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies),
+				       wm8770->supplies);
+		break;
+	}
+
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#define WM8770_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8770_dai_ops = {
+	.digital_mute = wm8770_mute,
+	.hw_params = wm8770_hw_params,
+	.set_fmt = wm8770_set_fmt,
+	.set_sysclk = wm8770_set_sysclk,
+};
+
+static struct snd_soc_dai_driver wm8770_dai = {
+	.name = "wm8770-hifi",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000_192000,
+		.formats = WM8770_FORMATS
+	},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000_96000,
+		.formats = WM8770_FORMATS
+	},
+	.ops = &wm8770_dai_ops,
+	.symmetric_rates = 1
+};
+
+#ifdef CONFIG_PM
+static int wm8770_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8770_resume(struct snd_soc_codec *codec)
+{
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	return 0;
+}
+#else
+#define wm8770_suspend NULL
+#define wm8770_resume NULL
+#endif
+
+static int wm8770_probe(struct snd_soc_codec *codec)
+{
+	struct wm8770_priv *wm8770;
+	int ret;
+	int i;
+
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+	wm8770->codec = codec;
+
+	codec->dapm.idle_bias_off = 1;
+
+	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8770->control_type);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++)
+		wm8770->supplies[i].supply = wm8770_supply_names[i];
+
+	ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8770->supplies),
+				 wm8770->supplies);
+	if (ret) {
+		dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	wm8770->disable_nb[0].notifier_call = wm8770_regulator_event_0;
+	wm8770->disable_nb[1].notifier_call = wm8770_regulator_event_1;
+	wm8770->disable_nb[2].notifier_call = wm8770_regulator_event_2;
+
+	/* This should really be moved into the regulator core */
+	for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) {
+		ret = regulator_register_notifier(wm8770->supplies[i].consumer,
+						  &wm8770->disable_nb[i]);
+		if (ret) {
+			dev_err(codec->dev,
+				"Failed to register regulator notifier: %d\n",
+				ret);
+		}
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies),
+				    wm8770->supplies);
+	if (ret) {
+		dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+		goto err_reg_get;
+	}
+
+	ret = wm8770_reset(codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+		goto err_reg_enable;
+	}
+
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* latch the volume update bits */
+	snd_soc_update_bits(codec, WM8770_MSDIGVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_MSALGVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT1RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT2RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT3RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT4RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC1RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC2RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC3RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC4RVOL, 0x100, 0x100);
+
+	/* mute all DACs */
+	snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10, 0x10);
+
+	snd_soc_add_controls(codec, wm8770_snd_controls,
+			     ARRAY_SIZE(wm8770_snd_controls));
+	snd_soc_dapm_new_controls(&codec->dapm, wm8770_dapm_widgets,
+				  ARRAY_SIZE(wm8770_dapm_widgets));
+	snd_soc_dapm_add_routes(&codec->dapm, wm8770_intercon,
+				ARRAY_SIZE(wm8770_intercon));
+	return 0;
+
+err_reg_enable:
+	regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
+err_reg_get:
+	regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
+	return ret;
+}
+
+static int wm8770_remove(struct snd_soc_codec *codec)
+{
+	struct wm8770_priv *wm8770;
+	int i;
+
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+	for (i = 0; i < ARRAY_SIZE(wm8770->supplies); ++i)
+		regulator_unregister_notifier(wm8770->supplies[i].consumer,
+					      &wm8770->disable_nb[i]);
+	regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8770 = {
+	.probe = wm8770_probe,
+	.remove = wm8770_remove,
+	.suspend = wm8770_suspend,
+	.resume = wm8770_resume,
+	.set_bias_level = wm8770_set_bias_level,
+	.reg_cache_size = ARRAY_SIZE(wm8770_reg_defs),
+	.reg_word_size = sizeof (u16),
+	.reg_cache_default = wm8770_reg_defs
+};
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit wm8770_spi_probe(struct spi_device *spi)
+{
+	struct wm8770_priv *wm8770;
+	int ret;
+
+	wm8770 = kzalloc(sizeof(struct wm8770_priv), GFP_KERNEL);
+	if (!wm8770)
+		return -ENOMEM;
+
+	wm8770->control_type = SND_SOC_SPI;
+	spi_set_drvdata(spi, wm8770);
+
+	ret = snd_soc_register_codec(&spi->dev,
+				     &soc_codec_dev_wm8770, &wm8770_dai, 1);
+	if (ret < 0)
+		kfree(wm8770);
+	return ret;
+}
+
+static int __devexit wm8770_spi_remove(struct spi_device *spi)
+{
+	snd_soc_unregister_codec(&spi->dev);
+	kfree(spi_get_drvdata(spi));
+	return 0;
+}
+
+static struct spi_driver wm8770_spi_driver = {
+	.driver = {
+		.name = "wm8770",
+		.owner = THIS_MODULE,
+	},
+	.probe = wm8770_spi_probe,
+	.remove = __devexit_p(wm8770_spi_remove)
+};
+#endif
+
+static int __init wm8770_modinit(void)
+{
+	int ret = 0;
+
+#if defined(CONFIG_SPI_MASTER)
+	ret = spi_register_driver(&wm8770_spi_driver);
+	if (ret) {
+		printk(KERN_ERR "Failed to register wm8770 SPI driver: %d\n",
+		       ret);
+	}
+#endif
+	return ret;
+}
+module_init(wm8770_modinit);
+
+static void __exit wm8770_exit(void)
+{
+#if defined(CONFIG_SPI_MASTER)
+	spi_unregister_driver(&wm8770_spi_driver);
+#endif
+}
+module_exit(wm8770_exit);
+
+MODULE_DESCRIPTION("ASoC WM8770 driver");
+MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8770.h b/sound/soc/codecs/wm8770.h
new file mode 100644
index 0000000..5f1b3bd
--- /dev/null
+++ b/sound/soc/codecs/wm8770.h
@@ -0,0 +1,51 @@
+/*
+ * wm8770.h  --  WM8770 ASoC driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8770_H
+#define _WM8770_H
+
+/* Registers */
+#define WM8770_VOUT1LVOL                0
+#define WM8770_VOUT1RVOL                0x1
+#define WM8770_VOUT2LVOL                0x2
+#define WM8770_VOUT2RVOL                0x3
+#define WM8770_VOUT3LVOL                0x4
+#define WM8770_VOUT3RVOL                0x5
+#define WM8770_VOUT4LVOL                0x6
+#define WM8770_VOUT4RVOL                0x7
+#define WM8770_MSALGVOL                 0x8
+#define WM8770_DAC1LVOL                 0x9
+#define WM8770_DAC1RVOL                 0xa
+#define WM8770_DAC2LVOL                 0xb
+#define WM8770_DAC2RVOL                 0xc
+#define WM8770_DAC3LVOL                 0xd
+#define WM8770_DAC3RVOL                 0xe
+#define WM8770_DAC4LVOL                 0xf
+#define WM8770_DAC4RVOL                 0x10
+#define WM8770_MSDIGVOL                 0x11
+#define WM8770_DACPHASE                 0x12
+#define WM8770_DACCTRL1                 0x13
+#define WM8770_DACMUTE                  0x14
+#define WM8770_DACCTRL2                 0x15
+#define WM8770_IFACECTRL                0x16
+#define WM8770_MSTRCTRL                 0x17
+#define WM8770_PWDNCTRL                 0x18
+#define WM8770_ADCLCTRL                 0x19
+#define WM8770_ADCRCTRL                 0x1a
+#define WM8770_ADCMUX                   0x1b
+#define WM8770_OUTMUX1                  0x1c
+#define WM8770_OUTMUX2                  0x1d
+#define WM8770_RESET                    0x31
+
+#define WM8770_CACHEREGNUM 0x20
+
+#endif
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 0132a27..8e7953b 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -306,7 +305,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Disable the global powerdown; DAPM does the rest */
 			snd_soc_update_bits(codec, WM8776_PWRDOWN, 1, 0);
 		}
@@ -317,7 +316,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -404,6 +403,7 @@
 static int wm8776_probe(struct snd_soc_codec *codec)
 {
 	struct wm8776_priv *wm8776 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 
 	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8776->control_type);
@@ -427,9 +427,9 @@
 
 	snd_soc_add_controls(codec, wm8776_snd_controls,
 			     ARRAY_SIZE(wm8776_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8776_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8776_dapm_widgets,
 				  ARRAY_SIZE(wm8776_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
+	snd_soc_dapm_add_routes(dapm, routes, ARRAY_SIZE(routes));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 4599e8e..6dae1b4 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -515,7 +514,7 @@
 		snd_soc_update_bits(codec, WM8804_PWRDN, 0x9, 0);
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8804->supplies),
 						    wm8804->supplies);
 			if (ret) {
@@ -537,7 +536,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -581,7 +580,7 @@
 	wm8804 = snd_soc_codec_get_drvdata(codec);
 	wm8804->codec = codec;
 
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	ret = snd_soc_codec_set_cache_io(codec, 8, 8, wm8804->control_type);
 	if (ret < 0) {
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index aca4b1e..cd09599 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -30,7 +30,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -140,7 +139,6 @@
 
 struct wm8900_priv {
 	enum snd_soc_control_type control_type;
-	u16 reg_cache[WM8900_MAXREG];
 
 	u32 fll_in; /* FLL input frequency */
 	u32 fll_out; /* FLL output frequency */
@@ -611,10 +609,11 @@
 
 static int wm8900_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8900_dapm_widgets,
-				  ARRAY_SIZE(wm8900_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8900_dapm_widgets,
+				  ARRAY_SIZE(wm8900_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1051,7 +1050,7 @@
 
 	case SND_SOC_BIAS_STANDBY:
 		/* Charge capacitors if initial power up */
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* STARTUP_BIAS_ENA on */
 			snd_soc_write(codec, WM8900_REG_POWER1,
 				     WM8900_REG_POWER1_STARTUP_BIAS_ENA);
@@ -1119,7 +1118,7 @@
 			     WM8900_REG_POWER2_SYSCLK_ENA);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 622b602..017d99c 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -29,9 +29,9 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/wm8903.h>
+#include <trace/events/asoc.h>
 
 #include "wm8903.h"
 
@@ -214,15 +214,14 @@
 
 struct wm8903_priv {
 
-	u16 reg_cache[ARRAY_SIZE(wm8903_reg_defaults)];
-
 	int sysclk;
 	int irq;
 
-	/* Reference counts */
+	int fs;
+	int deemph;
+
+	/* Reference count */
 	int class_w_users;
-	int playback_active;
-	int capture_active;
 
 	struct completion wseq;
 
@@ -231,9 +230,6 @@
 	int mic_short;
 	int mic_last_report;
 	int mic_delay;
-
-	struct snd_pcm_substream *master_substream;
-	struct snd_pcm_substream *slave_substream;
 };
 
 static int wm8903_volatile_register(unsigned int reg)
@@ -463,6 +459,72 @@
 	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
 
 
+static int wm8903_deemph[] = { 0, 32000, 44100, 48000 };
+
+static int wm8903_set_deemph(struct snd_soc_codec *codec)
+{
+	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+	int val, i, best;
+
+	/* If we're using deemphasis select the nearest available sample
+	 * rate.
+	 */
+	if (wm8903->deemph) {
+		best = 1;
+		for (i = 2; i < ARRAY_SIZE(wm8903_deemph); i++) {
+			if (abs(wm8903_deemph[i] - wm8903->fs) <
+			    abs(wm8903_deemph[best] - wm8903->fs))
+				best = i;
+		}
+
+		val = best << WM8903_DEEMPH_SHIFT;
+	} else {
+		best = 0;
+		val = 0;
+	}
+
+	dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n",
+		best, wm8903_deemph[best]);
+
+	return snd_soc_update_bits(codec, WM8903_DAC_DIGITAL_1,
+				   WM8903_DEEMPH_MASK, val);
+}
+
+static int wm8903_get_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = wm8903->deemph;
+
+	return 0;
+}
+
+static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+	int deemph = ucontrol->value.enumerated.item[0];
+	int ret = 0;
+
+	if (deemph > 1)
+		return -EINVAL;
+
+	mutex_lock(&codec->mutex);
+	if (wm8903->deemph != deemph) {
+		wm8903->deemph = deemph;
+
+		wm8903_set_deemph(codec);
+
+		ret = 1;
+	}
+	mutex_unlock(&codec->mutex);
+
+	return ret;
+}
+
 /* ALSA can only do steps of .01dB */
 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
 
@@ -475,6 +537,23 @@
 static const DECLARE_TLV_DB_SCALE(drc_tlv_max, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(drc_tlv_startup, -300, 50, 0);
 
+static const char *hpf_mode_text[] = {
+	"Hi-fi", "Voice 1", "Voice 2", "Voice 3"
+};
+
+static const struct soc_enum hpf_mode =
+	SOC_ENUM_SINGLE(WM8903_ADC_DIGITAL_0, 5, 4, hpf_mode_text);
+
+static const char *osr_text[] = {
+	"Low power", "High performance"
+};
+
+static const struct soc_enum adc_osr =
+	SOC_ENUM_SINGLE(WM8903_ANALOGUE_ADC_0, 0, 2, osr_text);
+
+static const struct soc_enum dac_osr =
+	SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 0, 2, osr_text);
+
 static const char *drc_slope_text[] = {
 	"1", "1/2", "1/4", "1/8", "1/16", "0"
 };
@@ -537,13 +616,6 @@
 static const struct soc_enum mute_mode =
 	SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 9, 2, mute_mode_text);
 
-static const char *dac_deemphasis_text[] = {
-	"Disabled", "32kHz", "44.1kHz", "48kHz"
-};
-
-static const struct soc_enum dac_deemphasis =
-	SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 1, 4, dac_deemphasis_text);
-
 static const char *companding_text[] = {
 	"ulaw", "alaw"
 };
@@ -613,6 +685,9 @@
 	   6, 1, 0),
 
 /* ADCs */
+SOC_ENUM("ADC OSR", adc_osr),
+SOC_SINGLE("HPF Switch", WM8903_ADC_DIGITAL_0, 4, 1, 0),
+SOC_ENUM("HPF Mode", hpf_mode),
 SOC_SINGLE("DRC Switch", WM8903_DRC_0, 15, 1, 0),
 SOC_ENUM("DRC Compressor Slope R0", drc_slope_r0),
 SOC_ENUM("DRC Compressor Slope R1", drc_slope_r1),
@@ -642,14 +717,16 @@
 	       12, 0, digital_sidetone_tlv),
 
 /* DAC */
+SOC_ENUM("DAC OSR", dac_osr),
 SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8903_DAC_DIGITAL_VOLUME_LEFT,
 		 WM8903_DAC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv),
 SOC_ENUM("DAC Soft Mute Rate", soft_mute),
 SOC_ENUM("DAC Mute Mode", mute_mode),
 SOC_SINGLE("DAC Mono Switch", WM8903_DAC_DIGITAL_1, 12, 1, 0),
-SOC_ENUM("DAC De-emphasis", dac_deemphasis),
 SOC_ENUM("DAC Companding Mode", dac_companding),
 SOC_SINGLE("DAC Companding Switch", WM8903_AUDIO_INTERFACE_0, 1, 1, 0),
+SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0,
+		    wm8903_get_deemph, wm8903_put_deemph),
 
 /* Headphones */
 SOC_DOUBLE_R("Headphone Switch",
@@ -923,10 +1000,11 @@
 
 static int wm8903_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8903_dapm_widgets,
-				  ARRAY_SIZE(wm8903_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8903_dapm_widgets,
+				  ARRAY_SIZE(wm8903_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -934,7 +1012,7 @@
 static int wm8903_set_bias_level(struct snd_soc_codec *codec,
 				 enum snd_soc_bias_level level)
 {
-	u16 reg, reg2;
+	u16 reg;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
@@ -946,7 +1024,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			snd_soc_write(codec, WM8903_CLOCK_RATES_2,
 				     WM8903_CLK_SYS_ENA);
 
@@ -958,23 +1036,15 @@
 			wm8903_run_sequence(codec, 0);
 			wm8903_sync_reg_cache(codec, codec->reg_cache);
 
-			/* Enable low impedence charge pump output */
-			reg = snd_soc_read(codec,
-					  WM8903_CONTROL_INTERFACE_TEST_1);
-			snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
-				     reg | WM8903_TEST_KEY);
-			reg2 = snd_soc_read(codec, WM8903_CHARGE_PUMP_TEST_1);
-			snd_soc_write(codec, WM8903_CHARGE_PUMP_TEST_1,
-				     reg2 | WM8903_CP_SW_KELVIN_MODE_MASK);
-			snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
-				     reg);
-
 			/* By default no bypass paths are enabled so
 			 * enable Class W support.
 			 */
 			dev_dbg(codec->dev, "Enabling Class W\n");
-			snd_soc_write(codec, WM8903_CLASS_W_0, reg |
-				     WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V);
+			snd_soc_update_bits(codec, WM8903_CLASS_W_0,
+					    WM8903_CP_DYN_FREQ |
+					    WM8903_CP_DYN_V,
+					    WM8903_CP_DYN_FREQ |
+					    WM8903_CP_DYN_V);
 		}
 
 		reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0);
@@ -991,7 +1061,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1222,58 +1292,6 @@
 	{ 0,      0 },
 };
 
-static int wm8903_startup(struct snd_pcm_substream *substream,
-			  struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->codec;
-	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-	struct snd_pcm_runtime *master_runtime;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		wm8903->playback_active++;
-	else
-		wm8903->capture_active++;
-
-	/* The DAI has shared clocks so if we already have a playback or
-	 * capture going then constrain this substream to match it.
-	 */
-	if (wm8903->master_substream) {
-		master_runtime = wm8903->master_substream->runtime;
-
-		dev_dbg(codec->dev, "Constraining to %d bits\n",
-			master_runtime->sample_bits);
-
-		snd_pcm_hw_constraint_minmax(substream->runtime,
-					     SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
-					     master_runtime->sample_bits,
-					     master_runtime->sample_bits);
-
-		wm8903->slave_substream = substream;
-	} else
-		wm8903->master_substream = substream;
-
-	return 0;
-}
-
-static void wm8903_shutdown(struct snd_pcm_substream *substream,
-			    struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->codec;
-	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		wm8903->playback_active--;
-	else
-		wm8903->capture_active--;
-
-	if (wm8903->master_substream == substream)
-		wm8903->master_substream = wm8903->slave_substream;
-
-	wm8903->slave_substream = NULL;
-}
-
 static int wm8903_hw_params(struct snd_pcm_substream *substream,
 			    struct snd_pcm_hw_params *params,
 			    struct snd_soc_dai *dai)
@@ -1298,11 +1316,6 @@
 	u16 clock1 = snd_soc_read(codec, WM8903_CLOCK_RATES_1);
 	u16 dac_digital1 = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
 
-	if (substream == wm8903->slave_substream) {
-		dev_dbg(codec->dev, "Ignoring hw_params for slave substream\n");
-		return 0;
-	}
-
 	/* Enable sloping stopband filter for low sample rates */
 	if (fs <= 24000)
 		dac_digital1 |= WM8903_DAC_SB_FILT;
@@ -1320,19 +1333,6 @@
 		}
 	}
 
-	/* Constraints should stop us hitting this but let's make sure */
-	if (wm8903->capture_active)
-		switch (sample_rates[dsp_config].rate) {
-		case 88200:
-		case 96000:
-			dev_err(codec->dev, "%dHz unsupported by ADC\n",
-				fs);
-			return -EINVAL;
-
-		default:
-			break;
-		}
-
 	dev_dbg(codec->dev, "DSP fs = %dHz\n", sample_rates[dsp_config].rate);
 	clock1 &= ~WM8903_SAMPLE_RATE_MASK;
 	clock1 |= sample_rates[dsp_config].value;
@@ -1428,6 +1428,9 @@
 	aif2 |= bclk_divs[bclk_div].div;
 	aif3 |= bclk / fs;
 
+	wm8903->fs = params_rate(params);
+	wm8903_set_deemph(codec);
+
 	snd_soc_write(codec, WM8903_CLOCK_RATES_0, clock0);
 	snd_soc_write(codec, WM8903_CLOCK_RATES_1, clock1);
 	snd_soc_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
@@ -1479,7 +1482,7 @@
 			    WM8903_MICDET_EINT | WM8903_MICSHRT_EINT,
 			    irq_mask);
 
-	if (det && shrt) {
+	if (det || shrt) {
 		/* Enable mic detection, this may not have been set through
 		 * platform data (eg, if the defaults are OK). */
 		snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
@@ -1521,6 +1524,11 @@
 	mic_report = wm8903->mic_last_report;
 	int_pol = snd_soc_read(codec, WM8903_INTERRUPT_POLARITY_1);
 
+#ifndef CONFIG_SND_SOC_WM8903_MODULE
+	if (int_val & (WM8903_MICSHRT_EINT | WM8903_MICDET_EINT))
+		trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
 	if (int_val & WM8903_MICSHRT_EINT) {
 		dev_dbg(codec->dev, "Microphone short (pol=%x)\n", int_pol);
 
@@ -1571,8 +1579,6 @@
 			SNDRV_PCM_FMTBIT_S24_LE)
 
 static struct snd_soc_dai_ops wm8903_dai_ops = {
-	.startup	= wm8903_startup,
-	.shutdown	= wm8903_shutdown,
 	.hw_params	= wm8903_hw_params,
 	.digital_mute	= wm8903_digital_mute,
 	.set_fmt	= wm8903_set_dai_fmt,
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
index 996435e..e3ec243 100644
--- a/sound/soc/codecs/wm8903.h
+++ b/sound/soc/codecs/wm8903.h
@@ -19,10 +19,6 @@
 			     struct snd_soc_jack *jack,
 			     int det, int shrt);
 
-#define WM8903_MCLK_DIV_2 1
-#define WM8903_CLK_SYS    2
-#define WM8903_BCLK       3
-#define WM8903_LRCLK      4
 
 /*
  * Register values.
@@ -98,8 +94,6 @@
 #define WM8903_INTERRUPT_STATUS_1_MASK          0x7A
 #define WM8903_INTERRUPT_POLARITY_1             0x7B
 #define WM8903_INTERRUPT_CONTROL                0x7E
-#define WM8903_CONTROL_INTERFACE_TEST_1         0x81
-#define WM8903_CHARGE_PUMP_TEST_1               0x95
 #define WM8903_CLOCK_RATE_TEST_4                0xA4
 #define WM8903_ANALOGUE_OUTPUT_BIAS_0           0xAC
 
@@ -171,7 +165,7 @@
 
 #define WM8903_VMID_RES_50K                          2
 #define WM8903_VMID_RES_250K                         3
-#define WM8903_VMID_RES_5K                           4
+#define WM8903_VMID_RES_5K                           6
 
 /*
  * R8 (0x08) - Analogue DAC 0
@@ -1206,25 +1200,6 @@
 #define WM8903_IRQ_POL_WIDTH                         1  /* IRQ_POL */
 
 /*
- * R129 (0x81) - Control Interface Test 1
- */
-#define WM8903_USER_KEY                         0x0002  /* USER_KEY */
-#define WM8903_USER_KEY_MASK                    0x0002  /* USER_KEY */
-#define WM8903_USER_KEY_SHIFT                        1  /* USER_KEY */
-#define WM8903_USER_KEY_WIDTH                        1  /* USER_KEY */
-#define WM8903_TEST_KEY                         0x0001  /* TEST_KEY */
-#define WM8903_TEST_KEY_MASK                    0x0001  /* TEST_KEY */
-#define WM8903_TEST_KEY_SHIFT                        0  /* TEST_KEY */
-#define WM8903_TEST_KEY_WIDTH                        1  /* TEST_KEY */
-
-/*
- * R149 (0x95) - Charge Pump Test 1
- */
-#define WM8903_CP_SW_KELVIN_MODE_MASK           0x0006  /* CP_SW_KELVIN_MODE - [2:1] */
-#define WM8903_CP_SW_KELVIN_MODE_SHIFT               1  /* CP_SW_KELVIN_MODE - [2:1] */
-#define WM8903_CP_SW_KELVIN_MODE_WIDTH               2  /* CP_SW_KELVIN_MODE - [2:1] */
-
-/*
  * R164 (0xA4) - Clock Rate Test 4
  */
 #define WM8903_ADC_DIG_MIC                      0x0200  /* ADC_DIG_MIC */
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 1ec12ef..9de44a4 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8904.h>
@@ -1427,10 +1426,11 @@
 static int wm8904_add_widgets(struct snd_soc_codec *codec)
 {
 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, wm8904_core_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8904_core_dapm_widgets,
 				  ARRAY_SIZE(wm8904_core_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, core_intercon,
+	snd_soc_dapm_add_routes(dapm, core_intercon,
 				ARRAY_SIZE(core_intercon));
 
 	switch (wm8904->devtype) {
@@ -1442,20 +1442,20 @@
 		snd_soc_add_controls(codec, wm8904_snd_controls,
 				     ARRAY_SIZE(wm8904_snd_controls));
 
-		snd_soc_dapm_new_controls(codec, wm8904_adc_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_adc_dapm_widgets,
 					  ARRAY_SIZE(wm8904_adc_dapm_widgets));
-		snd_soc_dapm_new_controls(codec, wm8904_dac_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_dac_dapm_widgets,
 					  ARRAY_SIZE(wm8904_dac_dapm_widgets));
-		snd_soc_dapm_new_controls(codec, wm8904_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_dapm_widgets,
 					  ARRAY_SIZE(wm8904_dapm_widgets));
 
-		snd_soc_dapm_add_routes(codec, core_intercon,
+		snd_soc_dapm_add_routes(dapm, core_intercon,
 					ARRAY_SIZE(core_intercon));
-		snd_soc_dapm_add_routes(codec, adc_intercon,
+		snd_soc_dapm_add_routes(dapm, adc_intercon,
 					ARRAY_SIZE(adc_intercon));
-		snd_soc_dapm_add_routes(codec, dac_intercon,
+		snd_soc_dapm_add_routes(dapm, dac_intercon,
 					ARRAY_SIZE(dac_intercon));
-		snd_soc_dapm_add_routes(codec, wm8904_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8904_intercon,
 					ARRAY_SIZE(wm8904_intercon));
 		break;
 
@@ -1463,17 +1463,17 @@
 		snd_soc_add_controls(codec, wm8904_dac_snd_controls,
 				     ARRAY_SIZE(wm8904_dac_snd_controls));
 
-		snd_soc_dapm_new_controls(codec, wm8904_dac_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_dac_dapm_widgets,
 					  ARRAY_SIZE(wm8904_dac_dapm_widgets));
 
-		snd_soc_dapm_add_routes(codec, dac_intercon,
+		snd_soc_dapm_add_routes(dapm, dac_intercon,
 					ARRAY_SIZE(dac_intercon));
-		snd_soc_dapm_add_routes(codec, wm8912_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8912_intercon,
 					ARRAY_SIZE(wm8912_intercon));
 		break;
 	}
 
-	snd_soc_dapm_new_widgets(codec);
+	snd_soc_dapm_new_widgets(dapm);
 	return 0;
 }
 
@@ -1589,7 +1589,7 @@
 		       - wm8904->fs);
 	for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
 		cur_val = abs((wm8904->sysclk_rate /
-			       clk_sys_rates[i].ratio) - wm8904->fs);;
+			       clk_sys_rates[i].ratio) - wm8904->fs);
 		if (cur_val < best_val) {
 			best = i;
 			best_val = cur_val;
@@ -2138,7 +2138,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8904->supplies),
 						    wm8904->supplies);
 			if (ret != 0) {
@@ -2197,7 +2197,7 @@
 				       wm8904->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -2373,7 +2373,7 @@
 	int ret, i;
 
 	codec->cache_sync = 1;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	switch (wm8904->devtype) {
 	case WM8904:
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 23086e2..25580e3 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -35,7 +35,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -43,7 +42,6 @@
 
 struct wm8940_priv {
 	unsigned int sysclk;
-	u16 reg_cache[WM8940_CACHEREGNUM];
 	enum snd_soc_control_type control_type;
 	void *control_data;
 };
@@ -291,13 +289,14 @@
 
 static int wm8940_add_widgets(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	ret = snd_soc_dapm_new_controls(codec, wm8940_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, wm8940_dapm_widgets,
 					ARRAY_SIZE(wm8940_dapm_widgets));
 	if (ret)
 		goto error_ret;
-	ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (ret)
 		goto error_ret;
 
@@ -735,7 +734,6 @@
 		return ret;
 
 	return ret;
-;
 }
 
 static int wm8940_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 2ac35b0..7167dfc 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8955.h>
@@ -576,13 +575,14 @@
 
 static int wm8955_add_widgets(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	snd_soc_add_controls(codec, wm8955_snd_controls,
 			     ARRAY_SIZE(wm8955_snd_controls));
 
-	snd_soc_dapm_new_controls(codec, wm8955_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8955_dapm_widgets,
 				  ARRAY_SIZE(wm8955_dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, wm8955_intercon,
+	snd_soc_dapm_add_routes(dapm, wm8955_intercon,
 				ARRAY_SIZE(wm8955_intercon));
 
 	return 0;
@@ -786,7 +786,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8955->supplies),
 						    wm8955->supplies);
 			if (ret != 0) {
@@ -850,7 +850,7 @@
 				       wm8955->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index ff6ff2f..4393394 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -20,7 +20,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8960.h>
@@ -72,7 +71,6 @@
 };
 
 struct wm8960_priv {
-	u16 reg_cache[WM8960_CACHEREGNUM];
 	enum snd_soc_control_type control_type;
 	void *control_data;
 	int (*set_bias_level)(struct snd_soc_codec *,
@@ -389,27 +387,28 @@
 {
 	struct wm8960_data *pdata = codec->dev->platform_data;
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct snd_soc_dapm_widget *w;
 
-	snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets,
 				  ARRAY_SIZE(wm8960_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	/* In capless mode OUT3 is used to provide VMID for the
 	 * headphone outputs, otherwise it is used as a mono mixer.
 	 */
 	if (pdata && pdata->capless) {
-		snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_capless,
+		snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets_capless,
 					  ARRAY_SIZE(wm8960_dapm_widgets_capless));
 
-		snd_soc_dapm_add_routes(codec, audio_paths_capless,
+		snd_soc_dapm_add_routes(dapm, audio_paths_capless,
 					ARRAY_SIZE(audio_paths_capless));
 	} else {
-		snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_out3,
+		snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets_out3,
 					  ARRAY_SIZE(wm8960_dapm_widgets_out3));
 
-		snd_soc_dapm_add_routes(codec, audio_paths_out3,
+		snd_soc_dapm_add_routes(dapm, audio_paths_out3,
 					ARRAY_SIZE(audio_paths_out3));
 	}
 
@@ -418,7 +417,9 @@
 	 * list each time to find the desired power state do so now
 	 * and save the result.
 	 */
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &codec->card->widgets, list) {
+		if (w->dapm != &codec->dapm)
+			continue;
 		if (strcmp(w->name, "LOUT1 PGA") == 0)
 			wm8960->lout1 = w;
 		if (strcmp(w->name, "ROUT1 PGA") == 0)
@@ -573,7 +574,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Enable anti-pop features */
 			snd_soc_write(codec, WM8960_APOP1,
 				      WM8960_POBCTRL | WM8960_SOFT_ST |
@@ -611,7 +612,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -627,7 +628,7 @@
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
-		switch (codec->bias_level) {
+		switch (codec->dapm.bias_level) {
 		case SND_SOC_BIAS_STANDBY:
 			/* Enable anti pop mode */
 			snd_soc_update_bits(codec, WM8960_APOP1,
@@ -682,7 +683,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		switch (codec->bias_level) {
+		switch (codec->dapm.bias_level) {
 		case SND_SOC_BIAS_PREPARE:
 			/* Disable HP discharge */
 			snd_soc_update_bits(codec, WM8960_APOP2,
@@ -706,7 +707,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index 8340485..55252e7 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -290,7 +289,6 @@
 struct wm8961_priv {
 	enum snd_soc_control_type control_type;
 	int sysclk;
-	u16 reg_cache[WM8961_MAX_REGISTER];
 };
 
 static int wm8961_volatile_register(unsigned int reg)
@@ -882,7 +880,7 @@
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
-		if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) {
 			/* Enable bias generation */
 			reg = snd_soc_read(codec, WM8961_ANTI_POP);
 			reg |= WM8961_BUFIOEN | WM8961_BUFDCOPEN;
@@ -897,7 +895,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_PREPARE) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_PREPARE) {
 			/* VREF off */
 			reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
 			reg &= ~WM8961_VREF;
@@ -919,7 +917,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -959,6 +957,7 @@
 
 static int wm8961_probe(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 	u16 reg;
 
@@ -1024,9 +1023,9 @@
 
 	snd_soc_add_controls(codec, wm8961_snd_controls,
 				ARRAY_SIZE(wm8961_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8961_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8961_dapm_widgets,
 				  ARRAY_SIZE(wm8961_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 7c421cc..b9cb1fc 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -29,10 +29,10 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8962.h>
+#include <trace/events/asoc.h>
 
 #include "wm8962.h"
 
@@ -1956,7 +1956,7 @@
 
 static int wm8962_reset(struct snd_soc_codec *codec)
 {
-	return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0);
+	return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0x6243);
 }
 
 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
@@ -2677,6 +2677,7 @@
 static int wm8962_add_widgets(struct snd_soc_codec *codec)
 {
 	struct wm8962_pdata *pdata = dev_get_platdata(codec->dev);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	snd_soc_add_controls(codec, wm8962_snd_controls,
 			     ARRAY_SIZE(wm8962_snd_controls));
@@ -2688,26 +2689,26 @@
 				     ARRAY_SIZE(wm8962_spk_stereo_controls));
 
 
-	snd_soc_dapm_new_controls(codec, wm8962_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8962_dapm_widgets,
 				  ARRAY_SIZE(wm8962_dapm_widgets));
 	if (pdata && pdata->spk_mono)
-		snd_soc_dapm_new_controls(codec, wm8962_dapm_spk_mono_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8962_dapm_spk_mono_widgets,
 					  ARRAY_SIZE(wm8962_dapm_spk_mono_widgets));
 	else
-		snd_soc_dapm_new_controls(codec, wm8962_dapm_spk_stereo_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8962_dapm_spk_stereo_widgets,
 					  ARRAY_SIZE(wm8962_dapm_spk_stereo_widgets));
 
-	snd_soc_dapm_add_routes(codec, wm8962_intercon,
+	snd_soc_dapm_add_routes(dapm, wm8962_intercon,
 				ARRAY_SIZE(wm8962_intercon));
 	if (pdata && pdata->spk_mono)
-		snd_soc_dapm_add_routes(codec, wm8962_spk_mono_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8962_spk_mono_intercon,
 					ARRAY_SIZE(wm8962_spk_mono_intercon));
 	else
-		snd_soc_dapm_add_routes(codec, wm8962_spk_stereo_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8962_spk_stereo_intercon,
 					ARRAY_SIZE(wm8962_spk_stereo_intercon));
 
 
-	snd_soc_dapm_disable_pin(codec, "Beep");
+	snd_soc_dapm_disable_pin(dapm, "Beep");
 
 	return 0;
 }
@@ -2814,7 +2815,7 @@
 	struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
-	if (level == codec->bias_level)
+	if (level == codec->dapm.bias_level)
 		return 0;
 
 	switch (level) {
@@ -2828,7 +2829,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies),
 						    wm8962->supplies);
 			if (ret != 0) {
@@ -2878,7 +2879,7 @@
 				       wm8962->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -3348,6 +3349,12 @@
 	if (active & (WM8962_MICSCD_EINT | WM8962_MICD_EINT)) {
 		dev_dbg(codec->dev, "Microphone event detected\n");
 
+#ifndef CONFIG_SND_SOC_WM8962_MODULE
+		trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
+		pm_wakeup_event(codec->dev, 300);
+
 		schedule_delayed_work(&wm8962->mic_work,
 				      msecs_to_jiffies(250));
 	}
@@ -3433,6 +3440,7 @@
 	struct wm8962_priv *wm8962 =
 		container_of(work, struct wm8962_priv, beep_work);
 	struct snd_soc_codec *codec = wm8962->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i;
 	int reg = 0;
 	int best = 0;
@@ -3449,16 +3457,16 @@
 
 		reg = WM8962_BEEP_ENA | (best << WM8962_BEEP_RATE_SHIFT);
 
-		snd_soc_dapm_enable_pin(codec, "Beep");
+		snd_soc_dapm_enable_pin(dapm, "Beep");
 	} else {
 		dev_dbg(codec->dev, "Disabling beep\n");
-		snd_soc_dapm_disable_pin(codec, "Beep");
+		snd_soc_dapm_disable_pin(dapm, "Beep");
 	}
 
 	snd_soc_update_bits(codec, WM8962_BEEP_GENERATOR_1,
 			    WM8962_BEEP_ENA | WM8962_BEEP_RATE_MASK, reg);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 /* For usability define a way of injecting beep events for the device -
@@ -3706,7 +3714,7 @@
 	INIT_DELAYED_WORK(&wm8962->mic_work, wm8962_mic_work);
 
 	codec->cache_sync = 1;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_I2C);
 	if (ret != 0) {
@@ -3865,7 +3873,6 @@
 err_get:
 	regulator_bulk_free(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
 err:
-	kfree(wm8962);
 	return ret;
 }
 
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 9f18db6..572bb80 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8971.h"
@@ -333,10 +332,11 @@
 
 static int wm8971_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8971_dapm_widgets,
-				  ARRAY_SIZE(wm8971_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8971_dapm_widgets,
+				  ARRAY_SIZE(wm8971_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -553,7 +553,7 @@
 		snd_soc_write(codec, WM8971_PWR1, 0x0001);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -590,9 +590,11 @@
 
 static void wm8971_work(struct work_struct *work)
 {
-	struct snd_soc_codec *codec =
-		container_of(work, struct snd_soc_codec, delayed_work.work);
-	wm8971_set_bias_level(codec, codec->bias_level);
+	struct snd_soc_dapm_context *dapm =
+		container_of(work, struct snd_soc_dapm_context,
+			     delayed_work.work);
+	struct snd_soc_codec *codec = dapm->codec;
+	wm8971_set_bias_level(codec, codec->dapm.bias_level);
 }
 
 static int wm8971_suspend(struct snd_soc_codec *codec, pm_message_t state)
@@ -620,11 +622,11 @@
 	wm8971_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
 	/* charge wm8971 caps */
-	if (codec->suspend_bias_level == SND_SOC_BIAS_ON) {
+	if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
 		reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
 		snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
-		codec->bias_level = SND_SOC_BIAS_ON;
-		queue_delayed_work(wm8971_workq, &codec->delayed_work,
+		codec->dapm.bias_level = SND_SOC_BIAS_ON;
+		queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work,
 			msecs_to_jiffies(1000));
 	}
 
@@ -643,7 +645,7 @@
 		return ret;
 	}
 
-	INIT_DELAYED_WORK(&codec->delayed_work, wm8971_work);
+	INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8971_work);
 	wm8971_workq = create_workqueue("wm8971");
 	if (wm8971_workq == NULL)
 		return -ENOMEM;
@@ -653,8 +655,8 @@
 	/* charge output caps - set vmid to 5k for quick power up */
 	reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
 	snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
-	codec->bias_level = SND_SOC_BIAS_STANDBY;
-	queue_delayed_work(wm8971_workq, &codec->delayed_work,
+	codec->dapm.bias_level = SND_SOC_BIAS_STANDBY;
+	queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work,
 		msecs_to_jiffies(1000));
 
 	/* set the update bits */
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index b4363f6..ca646a8 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -52,7 +51,6 @@
 
 struct wm8974_priv {
 	enum snd_soc_control_type control_type;
-	u16 reg_cache[WM8974_CACHEREGNUM];
 };
 
 #define wm8974_reset(c)	snd_soc_write(c, WM8974_RESET, 0)
@@ -274,10 +272,11 @@
 
 static int wm8974_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8974_dapm_widgets,
-				  ARRAY_SIZE(wm8974_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8974_dapm_widgets,
+				  ARRAY_SIZE(wm8974_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -530,7 +529,7 @@
 	case SND_SOC_BIAS_STANDBY:
 		power1 |= WM8974_POWER1_BIASEN | WM8974_POWER1_BUFIOEN;
 
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Initial cap charge at VMID 5k */
 			snd_soc_write(codec, WM8974_POWER1, power1 | 0x3);
 			mdelay(100);
@@ -547,7 +546,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 13b979a..8dfb0a0 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <asm/div64.h>
@@ -60,7 +59,6 @@
 	unsigned int f_opclk;
 	int mclk_idx;
 	enum wm8978_sysclk_src sysclk;
-	u16 reg_cache[WM8978_CACHEREGNUM];
 };
 
 static const char *wm8978_companding[] = {"Off", "NC", "u-law", "A-law"};
@@ -147,18 +145,18 @@
 	SOC_SINGLE("DAC Playback Limiter Threshold",
 		WM8978_DAC_LIMITER_2, 4, 7, 0),
 	SOC_SINGLE("DAC Playback Limiter Boost",
-		WM8978_DAC_LIMITER_2, 0, 15, 0),
+		WM8978_DAC_LIMITER_2, 0, 12, 0),
 
 	SOC_ENUM("ALC Enable Switch", alc1),
 	SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0),
 	SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0),
 
-	SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 7, 0),
+	SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 10, 0),
 	SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0),
 
 	SOC_ENUM("ALC Capture Mode", alc3),
-	SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 15, 0),
-	SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 15, 0),
+	SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 10, 0),
+	SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 10, 0),
 
 	SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0),
 	SOC_SINGLE("ALC Capture Noise Gate Threshold",
@@ -213,8 +211,10 @@
 		WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1),
 
 	/* DAC / ADC oversampling */
-	SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, 8, 1, 0),
-	SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, 8, 1, 0),
+	SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL,
+		   5, 1, 0),
+	SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL,
+		   5, 1, 0),
 };
 
 /* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */
@@ -355,11 +355,12 @@
 
 static int wm8978_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8978_dapm_widgets,
-				  ARRAY_SIZE(wm8978_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, wm8978_dapm_widgets,
+				  ARRAY_SIZE(wm8978_dapm_widgets));
 	/* set up the WM8978 audio map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -837,7 +838,7 @@
 		/* bit 3: enable bias, bit 2: enable I/O tie off buffer */
 		power1 |= 0xc;
 
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Initial cap charge at VMID 5k */
 			snd_soc_write(codec, WM8978_POWER_MANAGEMENT_1,
 				      power1 | 0x3);
@@ -857,7 +858,7 @@
 
 	dev_dbg(codec->dev, "%s: %d, %x\n", __func__, level, power1);
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index fd2e7cc..bae510a 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -533,10 +532,11 @@
 
 static int wm8985_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8985_dapm_widgets,
-				  ARRAY_SIZE(wm8985_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map,
+	snd_soc_dapm_new_controls(dapm, wm8985_dapm_widgets,
+				  ARRAY_SIZE(wm8985_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map,
 				ARRAY_SIZE(audio_map));
 	return 0;
 }
@@ -879,7 +879,7 @@
 				    1 << WM8985_VMIDSEL_SHIFT);
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8985->supplies),
 						    wm8985->supplies);
 			if (ret) {
@@ -939,7 +939,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index d7f2597..d7170f1 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -25,7 +25,6 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8988.h"
@@ -54,7 +53,6 @@
 	unsigned int sysclk;
 	enum snd_soc_control_type control_type;
 	struct snd_pcm_hw_constraint_list *sysclk_constraints;
-	u16 reg_cache[WM8988_NUM_REG];
 };
 
 
@@ -677,7 +675,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* VREF, VMID=2x5k */
 			snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1);
 
@@ -693,7 +691,7 @@
 		snd_soc_write(codec, WM8988_PWR1, 0x0000);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -759,6 +757,7 @@
 static int wm8988_probe(struct snd_soc_codec *codec)
 {
 	struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 	u16 reg;
 
@@ -790,9 +789,9 @@
 
 	snd_soc_add_controls(codec, wm8988_snd_controls,
 				ARRAY_SIZE(wm8988_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8988_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8988_dapm_widgets,
 				  ARRAY_SIZE(wm8988_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 264828e..100aeee 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <asm/div64.h>
@@ -914,11 +913,12 @@
 
 static int wm8990_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8990_dapm_widgets,
-				  ARRAY_SIZE(wm8990_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, wm8990_dapm_widgets,
+				  ARRAY_SIZE(wm8990_dapm_widgets));
 	/* set up the WM8990 audio map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1170,7 +1170,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Enable all output discharge bits */
 			snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
 				WM8990_DIS_RLINE | WM8990_DIS_OUT3 |
@@ -1183,7 +1183,7 @@
 				     WM8990_VMIDTOG);
 
 			/* Delay to allow output caps to discharge */
-			msleep(msecs_to_jiffies(300));
+			msleep(300);
 
 			/* Disable VMIDTOG */
 			snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
@@ -1195,17 +1195,17 @@
 			/* Enable outputs */
 			snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00);
 
-			msleep(msecs_to_jiffies(50));
+			msleep(50);
 
 			/* Enable VMID at 2x50k */
 			snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02);
 
-			msleep(msecs_to_jiffies(100));
+			msleep(100);
 
 			/* Enable VREF */
 			snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
 
-			msleep(msecs_to_jiffies(600));
+			msleep(600);
 
 			/* Enable BUFIOEN */
 			snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
@@ -1250,7 +1250,7 @@
 		/* Disable VMID */
 		snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01);
 
-		msleep(msecs_to_jiffies(300));
+		msleep(300);
 
 		/* Enable all output discharge bits */
 		snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
@@ -1266,7 +1266,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 589e3fa..18c0d9c 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -24,7 +24,6 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/wm8993.h>
 
@@ -226,7 +225,6 @@
 
 struct wm8993_priv {
 	struct wm_hubs_data hubs_data;
-	u16 reg_cache[WM8993_REGISTER_COUNT];
 	struct regulator_bulk_data supplies[WM8993_NUM_SUPPLIES];
 	struct wm8993_platform_data pdata;
 	enum snd_soc_control_type control_type;
@@ -735,6 +733,7 @@
 					    0);
 		}
 		wm8993->class_w_users++;
+		wm8993->hubs_data.class_w = true;
 	}
 
 	/* Implement the change */
@@ -751,6 +750,7 @@
 					    WM8993_CP_DYN_V);
 		}
 		wm8993->class_w_users--;
+		wm8993->hubs_data.class_w = false;
 	}
 
 	dev_dbg(codec->dev, "Indirect DAC use count now %d\n",
@@ -968,7 +968,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8993->supplies),
 						    wm8993->supplies);
 			if (ret != 0)
@@ -1029,6 +1029,12 @@
 				    WM8993_VMID_SEL_MASK | WM8993_BIAS_ENA,
 				    0);
 
+		snd_soc_update_bits(codec, WM8993_ANTIPOP2,
+				    WM8993_STARTUP_BIAS_ENA |
+				    WM8993_VMID_BUF_ENA |
+				    WM8993_VMID_RAMP_MASK |
+				    WM8993_BIAS_SRC, 0);
+
 #ifdef CONFIG_REGULATOR
                /* Post 2.6.34 we will be able to get a callback when
                 * the regulators are disabled which we can use but
@@ -1043,7 +1049,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1225,7 +1231,7 @@
 		       - wm8993->fs);
 	for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
 		cur_val = abs((wm8993->sysclk_rate /
-			       clk_sys_rates[i].ratio) - wm8993->fs);;
+			       clk_sys_rates[i].ratio) - wm8993->fs);
 		if (cur_val < best_val) {
 			best = i;
 			best_val = cur_val;
@@ -1422,6 +1428,7 @@
 static int wm8993_probe(struct snd_soc_codec *codec)
 {
 	struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret, i, val;
 
 	wm8993->hubs_data.hp_startup_mode = 1;
@@ -1503,11 +1510,11 @@
 				     ARRAY_SIZE(wm8993_eq_controls));
 	}
 
-	snd_soc_dapm_new_controls(codec, wm8993_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8993_dapm_widgets,
 				  ARRAY_SIZE(wm8993_dapm_widgets));
 	wm_hubs_add_analogue_controls(codec);
 
-	snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
+	snd_soc_dapm_add_routes(dapm, routes, ARRAY_SIZE(routes));
 	wm_hubs_add_analogue_routes(codec, wm8993->pdata.lineout1_diff,
 				    wm8993->pdata.lineout2_diff);
 
diff --git a/sound/soc/codecs/wm8994-tables.c b/sound/soc/codecs/wm8994-tables.c
new file mode 100644
index 0000000..68e9b02
--- /dev/null
+++ b/sound/soc/codecs/wm8994-tables.c
@@ -0,0 +1,3147 @@
+#include "wm8994.h"
+
+const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE] = {
+	{ 0xFFFF, 0xFFFF }, /* R0     - Software Reset */
+	{ 0x3B37, 0x3B37 }, /* R1     - Power Management (1) */
+	{ 0x6BF0, 0x6BF0 }, /* R2     - Power Management (2) */
+	{ 0x3FF0, 0x3FF0 }, /* R3     - Power Management (3) */
+	{ 0x3F3F, 0x3F3F }, /* R4     - Power Management (4) */
+	{ 0x3F0F, 0x3F0F }, /* R5     - Power Management (5) */
+	{ 0x003F, 0x003F }, /* R6     - Power Management (6) */
+	{ 0x0000, 0x0000 }, /* R7 */
+	{ 0x0000, 0x0000 }, /* R8 */
+	{ 0x0000, 0x0000 }, /* R9 */
+	{ 0x0000, 0x0000 }, /* R10 */
+	{ 0x0000, 0x0000 }, /* R11 */
+	{ 0x0000, 0x0000 }, /* R12 */
+	{ 0x0000, 0x0000 }, /* R13 */
+	{ 0x0000, 0x0000 }, /* R14 */
+	{ 0x0000, 0x0000 }, /* R15 */
+	{ 0x0000, 0x0000 }, /* R16 */
+	{ 0x0000, 0x0000 }, /* R17 */
+	{ 0x0000, 0x0000 }, /* R18 */
+	{ 0x0000, 0x0000 }, /* R19 */
+	{ 0x0000, 0x0000 }, /* R20 */
+	{ 0x01C0, 0x01C0 }, /* R21    - Input Mixer (1) */
+	{ 0x0000, 0x0000 }, /* R22 */
+	{ 0x0000, 0x0000 }, /* R23 */
+	{ 0x00DF, 0x01DF }, /* R24    - Left Line Input 1&2 Volume */
+	{ 0x00DF, 0x01DF }, /* R25    - Left Line Input 3&4 Volume */
+	{ 0x00DF, 0x01DF }, /* R26    - Right Line Input 1&2 Volume */
+	{ 0x00DF, 0x01DF }, /* R27    - Right Line Input 3&4 Volume */
+	{ 0x00FF, 0x01FF }, /* R28    - Left Output Volume */
+	{ 0x00FF, 0x01FF }, /* R29    - Right Output Volume */
+	{ 0x0077, 0x0077 }, /* R30    - Line Outputs Volume */
+	{ 0x0030, 0x0030 }, /* R31    - HPOUT2 Volume */
+	{ 0x00FF, 0x01FF }, /* R32    - Left OPGA Volume */
+	{ 0x00FF, 0x01FF }, /* R33    - Right OPGA Volume */
+	{ 0x007F, 0x007F }, /* R34    - SPKMIXL Attenuation */
+	{ 0x017F, 0x017F }, /* R35    - SPKMIXR Attenuation */
+	{ 0x003F, 0x003F }, /* R36    - SPKOUT Mixers */
+	{ 0x003F, 0x003F }, /* R37    - ClassD */
+	{ 0x00FF, 0x01FF }, /* R38    - Speaker Volume Left */
+	{ 0x00FF, 0x01FF }, /* R39    - Speaker Volume Right */
+	{ 0x00FF, 0x00FF }, /* R40    - Input Mixer (2) */
+	{ 0x01B7, 0x01B7 }, /* R41    - Input Mixer (3) */
+	{ 0x01B7, 0x01B7 }, /* R42    - Input Mixer (4) */
+	{ 0x01C7, 0x01C7 }, /* R43    - Input Mixer (5) */
+	{ 0x01C7, 0x01C7 }, /* R44    - Input Mixer (6) */
+	{ 0x01FF, 0x01FF }, /* R45    - Output Mixer (1) */
+	{ 0x01FF, 0x01FF }, /* R46    - Output Mixer (2) */
+	{ 0x0FFF, 0x0FFF }, /* R47    - Output Mixer (3) */
+	{ 0x0FFF, 0x0FFF }, /* R48    - Output Mixer (4) */
+	{ 0x0FFF, 0x0FFF }, /* R49    - Output Mixer (5) */
+	{ 0x0FFF, 0x0FFF }, /* R50    - Output Mixer (6) */
+	{ 0x0038, 0x0038 }, /* R51    - HPOUT2 Mixer */
+	{ 0x0077, 0x0077 }, /* R52    - Line Mixer (1) */
+	{ 0x0077, 0x0077 }, /* R53    - Line Mixer (2) */
+	{ 0x03FF, 0x03FF }, /* R54    - Speaker Mixer */
+	{ 0x00C1, 0x00C1 }, /* R55    - Additional Control */
+	{ 0x00F0, 0x00F0 }, /* R56    - AntiPOP (1) */
+	{ 0x01EF, 0x01EF }, /* R57    - AntiPOP (2) */
+	{ 0x00FF, 0x00FF }, /* R58    - MICBIAS */
+	{ 0x000F, 0x000F }, /* R59    - LDO 1 */
+	{ 0x0007, 0x0007 }, /* R60    - LDO 2 */
+	{ 0x0000, 0x0000 }, /* R61 */
+	{ 0x0000, 0x0000 }, /* R62 */
+	{ 0x0000, 0x0000 }, /* R63 */
+	{ 0x0000, 0x0000 }, /* R64 */
+	{ 0x0000, 0x0000 }, /* R65 */
+	{ 0x0000, 0x0000 }, /* R66 */
+	{ 0x0000, 0x0000 }, /* R67 */
+	{ 0x0000, 0x0000 }, /* R68 */
+	{ 0x0000, 0x0000 }, /* R69 */
+	{ 0x0000, 0x0000 }, /* R70 */
+	{ 0x0000, 0x0000 }, /* R71 */
+	{ 0x0000, 0x0000 }, /* R72 */
+	{ 0x0000, 0x0000 }, /* R73 */
+	{ 0x0000, 0x0000 }, /* R74 */
+	{ 0x0000, 0x0000 }, /* R75 */
+	{ 0x8000, 0x8000 }, /* R76    - Charge Pump (1) */
+	{ 0x0000, 0x0000 }, /* R77 */
+	{ 0x0000, 0x0000 }, /* R78 */
+	{ 0x0000, 0x0000 }, /* R79 */
+	{ 0x0000, 0x0000 }, /* R80 */
+	{ 0x0301, 0x0301 }, /* R81    - Class W (1) */
+	{ 0x0000, 0x0000 }, /* R82 */
+	{ 0x0000, 0x0000 }, /* R83 */
+	{ 0x333F, 0x333F }, /* R84    - DC Servo (1) */
+	{ 0x0FEF, 0x0FEF }, /* R85    - DC Servo (2) */
+	{ 0x0000, 0x0000 }, /* R86 */
+	{ 0xFFFF, 0xFFFF }, /* R87    - DC Servo (4) */
+	{ 0x0333, 0x0000 }, /* R88    - DC Servo Readback */
+	{ 0x0000, 0x0000 }, /* R89 */
+	{ 0x0000, 0x0000 }, /* R90 */
+	{ 0x0000, 0x0000 }, /* R91 */
+	{ 0x0000, 0x0000 }, /* R92 */
+	{ 0x0000, 0x0000 }, /* R93 */
+	{ 0x0000, 0x0000 }, /* R94 */
+	{ 0x0000, 0x0000 }, /* R95 */
+	{ 0x00EE, 0x00EE }, /* R96    - Analogue HP (1) */
+	{ 0x0000, 0x0000 }, /* R97 */
+	{ 0x0000, 0x0000 }, /* R98 */
+	{ 0x0000, 0x0000 }, /* R99 */
+	{ 0x0000, 0x0000 }, /* R100 */
+	{ 0x0000, 0x0000 }, /* R101 */
+	{ 0x0000, 0x0000 }, /* R102 */
+	{ 0x0000, 0x0000 }, /* R103 */
+	{ 0x0000, 0x0000 }, /* R104 */
+	{ 0x0000, 0x0000 }, /* R105 */
+	{ 0x0000, 0x0000 }, /* R106 */
+	{ 0x0000, 0x0000 }, /* R107 */
+	{ 0x0000, 0x0000 }, /* R108 */
+	{ 0x0000, 0x0000 }, /* R109 */
+	{ 0x0000, 0x0000 }, /* R110 */
+	{ 0x0000, 0x0000 }, /* R111 */
+	{ 0x0000, 0x0000 }, /* R112 */
+	{ 0x0000, 0x0000 }, /* R113 */
+	{ 0x0000, 0x0000 }, /* R114 */
+	{ 0x0000, 0x0000 }, /* R115 */
+	{ 0x0000, 0x0000 }, /* R116 */
+	{ 0x0000, 0x0000 }, /* R117 */
+	{ 0x0000, 0x0000 }, /* R118 */
+	{ 0x0000, 0x0000 }, /* R119 */
+	{ 0x0000, 0x0000 }, /* R120 */
+	{ 0x0000, 0x0000 }, /* R121 */
+	{ 0x0000, 0x0000 }, /* R122 */
+	{ 0x0000, 0x0000 }, /* R123 */
+	{ 0x0000, 0x0000 }, /* R124 */
+	{ 0x0000, 0x0000 }, /* R125 */
+	{ 0x0000, 0x0000 }, /* R126 */
+	{ 0x0000, 0x0000 }, /* R127 */
+	{ 0x0000, 0x0000 }, /* R128 */
+	{ 0x0000, 0x0000 }, /* R129 */
+	{ 0x0000, 0x0000 }, /* R130 */
+	{ 0x0000, 0x0000 }, /* R131 */
+	{ 0x0000, 0x0000 }, /* R132 */
+	{ 0x0000, 0x0000 }, /* R133 */
+	{ 0x0000, 0x0000 }, /* R134 */
+	{ 0x0000, 0x0000 }, /* R135 */
+	{ 0x0000, 0x0000 }, /* R136 */
+	{ 0x0000, 0x0000 }, /* R137 */
+	{ 0x0000, 0x0000 }, /* R138 */
+	{ 0x0000, 0x0000 }, /* R139 */
+	{ 0x0000, 0x0000 }, /* R140 */
+	{ 0x0000, 0x0000 }, /* R141 */
+	{ 0x0000, 0x0000 }, /* R142 */
+	{ 0x0000, 0x0000 }, /* R143 */
+	{ 0x0000, 0x0000 }, /* R144 */
+	{ 0x0000, 0x0000 }, /* R145 */
+	{ 0x0000, 0x0000 }, /* R146 */
+	{ 0x0000, 0x0000 }, /* R147 */
+	{ 0x0000, 0x0000 }, /* R148 */
+	{ 0x0000, 0x0000 }, /* R149 */
+	{ 0x0000, 0x0000 }, /* R150 */
+	{ 0x0000, 0x0000 }, /* R151 */
+	{ 0x0000, 0x0000 }, /* R152 */
+	{ 0x0000, 0x0000 }, /* R153 */
+	{ 0x0000, 0x0000 }, /* R154 */
+	{ 0x0000, 0x0000 }, /* R155 */
+	{ 0x0000, 0x0000 }, /* R156 */
+	{ 0x0000, 0x0000 }, /* R157 */
+	{ 0x0000, 0x0000 }, /* R158 */
+	{ 0x0000, 0x0000 }, /* R159 */
+	{ 0x0000, 0x0000 }, /* R160 */
+	{ 0x0000, 0x0000 }, /* R161 */
+	{ 0x0000, 0x0000 }, /* R162 */
+	{ 0x0000, 0x0000 }, /* R163 */
+	{ 0x0000, 0x0000 }, /* R164 */
+	{ 0x0000, 0x0000 }, /* R165 */
+	{ 0x0000, 0x0000 }, /* R166 */
+	{ 0x0000, 0x0000 }, /* R167 */
+	{ 0x0000, 0x0000 }, /* R168 */
+	{ 0x0000, 0x0000 }, /* R169 */
+	{ 0x0000, 0x0000 }, /* R170 */
+	{ 0x0000, 0x0000 }, /* R171 */
+	{ 0x0000, 0x0000 }, /* R172 */
+	{ 0x0000, 0x0000 }, /* R173 */
+	{ 0x0000, 0x0000 }, /* R174 */
+	{ 0x0000, 0x0000 }, /* R175 */
+	{ 0x0000, 0x0000 }, /* R176 */
+	{ 0x0000, 0x0000 }, /* R177 */
+	{ 0x0000, 0x0000 }, /* R178 */
+	{ 0x0000, 0x0000 }, /* R179 */
+	{ 0x0000, 0x0000 }, /* R180 */
+	{ 0x0000, 0x0000 }, /* R181 */
+	{ 0x0000, 0x0000 }, /* R182 */
+	{ 0x0000, 0x0000 }, /* R183 */
+	{ 0x0000, 0x0000 }, /* R184 */
+	{ 0x0000, 0x0000 }, /* R185 */
+	{ 0x0000, 0x0000 }, /* R186 */
+	{ 0x0000, 0x0000 }, /* R187 */
+	{ 0x0000, 0x0000 }, /* R188 */
+	{ 0x0000, 0x0000 }, /* R189 */
+	{ 0x0000, 0x0000 }, /* R190 */
+	{ 0x0000, 0x0000 }, /* R191 */
+	{ 0x0000, 0x0000 }, /* R192 */
+	{ 0x0000, 0x0000 }, /* R193 */
+	{ 0x0000, 0x0000 }, /* R194 */
+	{ 0x0000, 0x0000 }, /* R195 */
+	{ 0x0000, 0x0000 }, /* R196 */
+	{ 0x0000, 0x0000 }, /* R197 */
+	{ 0x0000, 0x0000 }, /* R198 */
+	{ 0x0000, 0x0000 }, /* R199 */
+	{ 0x0000, 0x0000 }, /* R200 */
+	{ 0x0000, 0x0000 }, /* R201 */
+	{ 0x0000, 0x0000 }, /* R202 */
+	{ 0x0000, 0x0000 }, /* R203 */
+	{ 0x0000, 0x0000 }, /* R204 */
+	{ 0x0000, 0x0000 }, /* R205 */
+	{ 0x0000, 0x0000 }, /* R206 */
+	{ 0x0000, 0x0000 }, /* R207 */
+	{ 0x0000, 0x0000 }, /* R208 */
+	{ 0x0000, 0x0000 }, /* R209 */
+	{ 0x0000, 0x0000 }, /* R210 */
+	{ 0x0000, 0x0000 }, /* R211 */
+	{ 0x0000, 0x0000 }, /* R212 */
+	{ 0x0000, 0x0000 }, /* R213 */
+	{ 0x0000, 0x0000 }, /* R214 */
+	{ 0x0000, 0x0000 }, /* R215 */
+	{ 0x0000, 0x0000 }, /* R216 */
+	{ 0x0000, 0x0000 }, /* R217 */
+	{ 0x0000, 0x0000 }, /* R218 */
+	{ 0x0000, 0x0000 }, /* R219 */
+	{ 0x0000, 0x0000 }, /* R220 */
+	{ 0x0000, 0x0000 }, /* R221 */
+	{ 0x0000, 0x0000 }, /* R222 */
+	{ 0x0000, 0x0000 }, /* R223 */
+	{ 0x0000, 0x0000 }, /* R224 */
+	{ 0x0000, 0x0000 }, /* R225 */
+	{ 0x0000, 0x0000 }, /* R226 */
+	{ 0x0000, 0x0000 }, /* R227 */
+	{ 0x0000, 0x0000 }, /* R228 */
+	{ 0x0000, 0x0000 }, /* R229 */
+	{ 0x0000, 0x0000 }, /* R230 */
+	{ 0x0000, 0x0000 }, /* R231 */
+	{ 0x0000, 0x0000 }, /* R232 */
+	{ 0x0000, 0x0000 }, /* R233 */
+	{ 0x0000, 0x0000 }, /* R234 */
+	{ 0x0000, 0x0000 }, /* R235 */
+	{ 0x0000, 0x0000 }, /* R236 */
+	{ 0x0000, 0x0000 }, /* R237 */
+	{ 0x0000, 0x0000 }, /* R238 */
+	{ 0x0000, 0x0000 }, /* R239 */
+	{ 0x0000, 0x0000 }, /* R240 */
+	{ 0x0000, 0x0000 }, /* R241 */
+	{ 0x0000, 0x0000 }, /* R242 */
+	{ 0x0000, 0x0000 }, /* R243 */
+	{ 0x0000, 0x0000 }, /* R244 */
+	{ 0x0000, 0x0000 }, /* R245 */
+	{ 0x0000, 0x0000 }, /* R246 */
+	{ 0x0000, 0x0000 }, /* R247 */
+	{ 0x0000, 0x0000 }, /* R248 */
+	{ 0x0000, 0x0000 }, /* R249 */
+	{ 0x0000, 0x0000 }, /* R250 */
+	{ 0x0000, 0x0000 }, /* R251 */
+	{ 0x0000, 0x0000 }, /* R252 */
+	{ 0x0000, 0x0000 }, /* R253 */
+	{ 0x0000, 0x0000 }, /* R254 */
+	{ 0x0000, 0x0000 }, /* R255 */
+	{ 0x000F, 0x0000 }, /* R256   - Chip Revision */
+	{ 0x0074, 0x0074 }, /* R257   - Control Interface */
+	{ 0x0000, 0x0000 }, /* R258 */
+	{ 0x0000, 0x0000 }, /* R259 */
+	{ 0x0000, 0x0000 }, /* R260 */
+	{ 0x0000, 0x0000 }, /* R261 */
+	{ 0x0000, 0x0000 }, /* R262 */
+	{ 0x0000, 0x0000 }, /* R263 */
+	{ 0x0000, 0x0000 }, /* R264 */
+	{ 0x0000, 0x0000 }, /* R265 */
+	{ 0x0000, 0x0000 }, /* R266 */
+	{ 0x0000, 0x0000 }, /* R267 */
+	{ 0x0000, 0x0000 }, /* R268 */
+	{ 0x0000, 0x0000 }, /* R269 */
+	{ 0x0000, 0x0000 }, /* R270 */
+	{ 0x0000, 0x0000 }, /* R271 */
+	{ 0x807F, 0x837F }, /* R272   - Write Sequencer Ctrl (1) */
+	{ 0x017F, 0x0000 }, /* R273   - Write Sequencer Ctrl (2) */
+	{ 0x0000, 0x0000 }, /* R274 */
+	{ 0x0000, 0x0000 }, /* R275 */
+	{ 0x0000, 0x0000 }, /* R276 */
+	{ 0x0000, 0x0000 }, /* R277 */
+	{ 0x0000, 0x0000 }, /* R278 */
+	{ 0x0000, 0x0000 }, /* R279 */
+	{ 0x0000, 0x0000 }, /* R280 */
+	{ 0x0000, 0x0000 }, /* R281 */
+	{ 0x0000, 0x0000 }, /* R282 */
+	{ 0x0000, 0x0000 }, /* R283 */
+	{ 0x0000, 0x0000 }, /* R284 */
+	{ 0x0000, 0x0000 }, /* R285 */
+	{ 0x0000, 0x0000 }, /* R286 */
+	{ 0x0000, 0x0000 }, /* R287 */
+	{ 0x0000, 0x0000 }, /* R288 */
+	{ 0x0000, 0x0000 }, /* R289 */
+	{ 0x0000, 0x0000 }, /* R290 */
+	{ 0x0000, 0x0000 }, /* R291 */
+	{ 0x0000, 0x0000 }, /* R292 */
+	{ 0x0000, 0x0000 }, /* R293 */
+	{ 0x0000, 0x0000 }, /* R294 */
+	{ 0x0000, 0x0000 }, /* R295 */
+	{ 0x0000, 0x0000 }, /* R296 */
+	{ 0x0000, 0x0000 }, /* R297 */
+	{ 0x0000, 0x0000 }, /* R298 */
+	{ 0x0000, 0x0000 }, /* R299 */
+	{ 0x0000, 0x0000 }, /* R300 */
+	{ 0x0000, 0x0000 }, /* R301 */
+	{ 0x0000, 0x0000 }, /* R302 */
+	{ 0x0000, 0x0000 }, /* R303 */
+	{ 0x0000, 0x0000 }, /* R304 */
+	{ 0x0000, 0x0000 }, /* R305 */
+	{ 0x0000, 0x0000 }, /* R306 */
+	{ 0x0000, 0x0000 }, /* R307 */
+	{ 0x0000, 0x0000 }, /* R308 */
+	{ 0x0000, 0x0000 }, /* R309 */
+	{ 0x0000, 0x0000 }, /* R310 */
+	{ 0x0000, 0x0000 }, /* R311 */
+	{ 0x0000, 0x0000 }, /* R312 */
+	{ 0x0000, 0x0000 }, /* R313 */
+	{ 0x0000, 0x0000 }, /* R314 */
+	{ 0x0000, 0x0000 }, /* R315 */
+	{ 0x0000, 0x0000 }, /* R316 */
+	{ 0x0000, 0x0000 }, /* R317 */
+	{ 0x0000, 0x0000 }, /* R318 */
+	{ 0x0000, 0x0000 }, /* R319 */
+	{ 0x0000, 0x0000 }, /* R320 */
+	{ 0x0000, 0x0000 }, /* R321 */
+	{ 0x0000, 0x0000 }, /* R322 */
+	{ 0x0000, 0x0000 }, /* R323 */
+	{ 0x0000, 0x0000 }, /* R324 */
+	{ 0x0000, 0x0000 }, /* R325 */
+	{ 0x0000, 0x0000 }, /* R326 */
+	{ 0x0000, 0x0000 }, /* R327 */
+	{ 0x0000, 0x0000 }, /* R328 */
+	{ 0x0000, 0x0000 }, /* R329 */
+	{ 0x0000, 0x0000 }, /* R330 */
+	{ 0x0000, 0x0000 }, /* R331 */
+	{ 0x0000, 0x0000 }, /* R332 */
+	{ 0x0000, 0x0000 }, /* R333 */
+	{ 0x0000, 0x0000 }, /* R334 */
+	{ 0x0000, 0x0000 }, /* R335 */
+	{ 0x0000, 0x0000 }, /* R336 */
+	{ 0x0000, 0x0000 }, /* R337 */
+	{ 0x0000, 0x0000 }, /* R338 */
+	{ 0x0000, 0x0000 }, /* R339 */
+	{ 0x0000, 0x0000 }, /* R340 */
+	{ 0x0000, 0x0000 }, /* R341 */
+	{ 0x0000, 0x0000 }, /* R342 */
+	{ 0x0000, 0x0000 }, /* R343 */
+	{ 0x0000, 0x0000 }, /* R344 */
+	{ 0x0000, 0x0000 }, /* R345 */
+	{ 0x0000, 0x0000 }, /* R346 */
+	{ 0x0000, 0x0000 }, /* R347 */
+	{ 0x0000, 0x0000 }, /* R348 */
+	{ 0x0000, 0x0000 }, /* R349 */
+	{ 0x0000, 0x0000 }, /* R350 */
+	{ 0x0000, 0x0000 }, /* R351 */
+	{ 0x0000, 0x0000 }, /* R352 */
+	{ 0x0000, 0x0000 }, /* R353 */
+	{ 0x0000, 0x0000 }, /* R354 */
+	{ 0x0000, 0x0000 }, /* R355 */
+	{ 0x0000, 0x0000 }, /* R356 */
+	{ 0x0000, 0x0000 }, /* R357 */
+	{ 0x0000, 0x0000 }, /* R358 */
+	{ 0x0000, 0x0000 }, /* R359 */
+	{ 0x0000, 0x0000 }, /* R360 */
+	{ 0x0000, 0x0000 }, /* R361 */
+	{ 0x0000, 0x0000 }, /* R362 */
+	{ 0x0000, 0x0000 }, /* R363 */
+	{ 0x0000, 0x0000 }, /* R364 */
+	{ 0x0000, 0x0000 }, /* R365 */
+	{ 0x0000, 0x0000 }, /* R366 */
+	{ 0x0000, 0x0000 }, /* R367 */
+	{ 0x0000, 0x0000 }, /* R368 */
+	{ 0x0000, 0x0000 }, /* R369 */
+	{ 0x0000, 0x0000 }, /* R370 */
+	{ 0x0000, 0x0000 }, /* R371 */
+	{ 0x0000, 0x0000 }, /* R372 */
+	{ 0x0000, 0x0000 }, /* R373 */
+	{ 0x0000, 0x0000 }, /* R374 */
+	{ 0x0000, 0x0000 }, /* R375 */
+	{ 0x0000, 0x0000 }, /* R376 */
+	{ 0x0000, 0x0000 }, /* R377 */
+	{ 0x0000, 0x0000 }, /* R378 */
+	{ 0x0000, 0x0000 }, /* R379 */
+	{ 0x0000, 0x0000 }, /* R380 */
+	{ 0x0000, 0x0000 }, /* R381 */
+	{ 0x0000, 0x0000 }, /* R382 */
+	{ 0x0000, 0x0000 }, /* R383 */
+	{ 0x0000, 0x0000 }, /* R384 */
+	{ 0x0000, 0x0000 }, /* R385 */
+	{ 0x0000, 0x0000 }, /* R386 */
+	{ 0x0000, 0x0000 }, /* R387 */
+	{ 0x0000, 0x0000 }, /* R388 */
+	{ 0x0000, 0x0000 }, /* R389 */
+	{ 0x0000, 0x0000 }, /* R390 */
+	{ 0x0000, 0x0000 }, /* R391 */
+	{ 0x0000, 0x0000 }, /* R392 */
+	{ 0x0000, 0x0000 }, /* R393 */
+	{ 0x0000, 0x0000 }, /* R394 */
+	{ 0x0000, 0x0000 }, /* R395 */
+	{ 0x0000, 0x0000 }, /* R396 */
+	{ 0x0000, 0x0000 }, /* R397 */
+	{ 0x0000, 0x0000 }, /* R398 */
+	{ 0x0000, 0x0000 }, /* R399 */
+	{ 0x0000, 0x0000 }, /* R400 */
+	{ 0x0000, 0x0000 }, /* R401 */
+	{ 0x0000, 0x0000 }, /* R402 */
+	{ 0x0000, 0x0000 }, /* R403 */
+	{ 0x0000, 0x0000 }, /* R404 */
+	{ 0x0000, 0x0000 }, /* R405 */
+	{ 0x0000, 0x0000 }, /* R406 */
+	{ 0x0000, 0x0000 }, /* R407 */
+	{ 0x0000, 0x0000 }, /* R408 */
+	{ 0x0000, 0x0000 }, /* R409 */
+	{ 0x0000, 0x0000 }, /* R410 */
+	{ 0x0000, 0x0000 }, /* R411 */
+	{ 0x0000, 0x0000 }, /* R412 */
+	{ 0x0000, 0x0000 }, /* R413 */
+	{ 0x0000, 0x0000 }, /* R414 */
+	{ 0x0000, 0x0000 }, /* R415 */
+	{ 0x0000, 0x0000 }, /* R416 */
+	{ 0x0000, 0x0000 }, /* R417 */
+	{ 0x0000, 0x0000 }, /* R418 */
+	{ 0x0000, 0x0000 }, /* R419 */
+	{ 0x0000, 0x0000 }, /* R420 */
+	{ 0x0000, 0x0000 }, /* R421 */
+	{ 0x0000, 0x0000 }, /* R422 */
+	{ 0x0000, 0x0000 }, /* R423 */
+	{ 0x0000, 0x0000 }, /* R424 */
+	{ 0x0000, 0x0000 }, /* R425 */
+	{ 0x0000, 0x0000 }, /* R426 */
+	{ 0x0000, 0x0000 }, /* R427 */
+	{ 0x0000, 0x0000 }, /* R428 */
+	{ 0x0000, 0x0000 }, /* R429 */
+	{ 0x0000, 0x0000 }, /* R430 */
+	{ 0x0000, 0x0000 }, /* R431 */
+	{ 0x0000, 0x0000 }, /* R432 */
+	{ 0x0000, 0x0000 }, /* R433 */
+	{ 0x0000, 0x0000 }, /* R434 */
+	{ 0x0000, 0x0000 }, /* R435 */
+	{ 0x0000, 0x0000 }, /* R436 */
+	{ 0x0000, 0x0000 }, /* R437 */
+	{ 0x0000, 0x0000 }, /* R438 */
+	{ 0x0000, 0x0000 }, /* R439 */
+	{ 0x0000, 0x0000 }, /* R440 */
+	{ 0x0000, 0x0000 }, /* R441 */
+	{ 0x0000, 0x0000 }, /* R442 */
+	{ 0x0000, 0x0000 }, /* R443 */
+	{ 0x0000, 0x0000 }, /* R444 */
+	{ 0x0000, 0x0000 }, /* R445 */
+	{ 0x0000, 0x0000 }, /* R446 */
+	{ 0x0000, 0x0000 }, /* R447 */
+	{ 0x0000, 0x0000 }, /* R448 */
+	{ 0x0000, 0x0000 }, /* R449 */
+	{ 0x0000, 0x0000 }, /* R450 */
+	{ 0x0000, 0x0000 }, /* R451 */
+	{ 0x0000, 0x0000 }, /* R452 */
+	{ 0x0000, 0x0000 }, /* R453 */
+	{ 0x0000, 0x0000 }, /* R454 */
+	{ 0x0000, 0x0000 }, /* R455 */
+	{ 0x0000, 0x0000 }, /* R456 */
+	{ 0x0000, 0x0000 }, /* R457 */
+	{ 0x0000, 0x0000 }, /* R458 */
+	{ 0x0000, 0x0000 }, /* R459 */
+	{ 0x0000, 0x0000 }, /* R460 */
+	{ 0x0000, 0x0000 }, /* R461 */
+	{ 0x0000, 0x0000 }, /* R462 */
+	{ 0x0000, 0x0000 }, /* R463 */
+	{ 0x0000, 0x0000 }, /* R464 */
+	{ 0x0000, 0x0000 }, /* R465 */
+	{ 0x0000, 0x0000 }, /* R466 */
+	{ 0x0000, 0x0000 }, /* R467 */
+	{ 0x0000, 0x0000 }, /* R468 */
+	{ 0x0000, 0x0000 }, /* R469 */
+	{ 0x0000, 0x0000 }, /* R470 */
+	{ 0x0000, 0x0000 }, /* R471 */
+	{ 0x0000, 0x0000 }, /* R472 */
+	{ 0x0000, 0x0000 }, /* R473 */
+	{ 0x0000, 0x0000 }, /* R474 */
+	{ 0x0000, 0x0000 }, /* R475 */
+	{ 0x0000, 0x0000 }, /* R476 */
+	{ 0x0000, 0x0000 }, /* R477 */
+	{ 0x0000, 0x0000 }, /* R478 */
+	{ 0x0000, 0x0000 }, /* R479 */
+	{ 0x0000, 0x0000 }, /* R480 */
+	{ 0x0000, 0x0000 }, /* R481 */
+	{ 0x0000, 0x0000 }, /* R482 */
+	{ 0x0000, 0x0000 }, /* R483 */
+	{ 0x0000, 0x0000 }, /* R484 */
+	{ 0x0000, 0x0000 }, /* R485 */
+	{ 0x0000, 0x0000 }, /* R486 */
+	{ 0x0000, 0x0000 }, /* R487 */
+	{ 0x0000, 0x0000 }, /* R488 */
+	{ 0x0000, 0x0000 }, /* R489 */
+	{ 0x0000, 0x0000 }, /* R490 */
+	{ 0x0000, 0x0000 }, /* R491 */
+	{ 0x0000, 0x0000 }, /* R492 */
+	{ 0x0000, 0x0000 }, /* R493 */
+	{ 0x0000, 0x0000 }, /* R494 */
+	{ 0x0000, 0x0000 }, /* R495 */
+	{ 0x0000, 0x0000 }, /* R496 */
+	{ 0x0000, 0x0000 }, /* R497 */
+	{ 0x0000, 0x0000 }, /* R498 */
+	{ 0x0000, 0x0000 }, /* R499 */
+	{ 0x0000, 0x0000 }, /* R500 */
+	{ 0x0000, 0x0000 }, /* R501 */
+	{ 0x0000, 0x0000 }, /* R502 */
+	{ 0x0000, 0x0000 }, /* R503 */
+	{ 0x0000, 0x0000 }, /* R504 */
+	{ 0x0000, 0x0000 }, /* R505 */
+	{ 0x0000, 0x0000 }, /* R506 */
+	{ 0x0000, 0x0000 }, /* R507 */
+	{ 0x0000, 0x0000 }, /* R508 */
+	{ 0x0000, 0x0000 }, /* R509 */
+	{ 0x0000, 0x0000 }, /* R510 */
+	{ 0x0000, 0x0000 }, /* R511 */
+	{ 0x001F, 0x001F }, /* R512   - AIF1 Clocking (1) */
+	{ 0x003F, 0x003F }, /* R513   - AIF1 Clocking (2) */
+	{ 0x0000, 0x0000 }, /* R514 */
+	{ 0x0000, 0x0000 }, /* R515 */
+	{ 0x001F, 0x001F }, /* R516   - AIF2 Clocking (1) */
+	{ 0x003F, 0x003F }, /* R517   - AIF2 Clocking (2) */
+	{ 0x0000, 0x0000 }, /* R518 */
+	{ 0x0000, 0x0000 }, /* R519 */
+	{ 0x001F, 0x001F }, /* R520   - Clocking (1) */
+	{ 0x0777, 0x0777 }, /* R521   - Clocking (2) */
+	{ 0x0000, 0x0000 }, /* R522 */
+	{ 0x0000, 0x0000 }, /* R523 */
+	{ 0x0000, 0x0000 }, /* R524 */
+	{ 0x0000, 0x0000 }, /* R525 */
+	{ 0x0000, 0x0000 }, /* R526 */
+	{ 0x0000, 0x0000 }, /* R527 */
+	{ 0x00FF, 0x00FF }, /* R528   - AIF1 Rate */
+	{ 0x00FF, 0x00FF }, /* R529   - AIF2 Rate */
+	{ 0x000F, 0x0000 }, /* R530   - Rate Status */
+	{ 0x0000, 0x0000 }, /* R531 */
+	{ 0x0000, 0x0000 }, /* R532 */
+	{ 0x0000, 0x0000 }, /* R533 */
+	{ 0x0000, 0x0000 }, /* R534 */
+	{ 0x0000, 0x0000 }, /* R535 */
+	{ 0x0000, 0x0000 }, /* R536 */
+	{ 0x0000, 0x0000 }, /* R537 */
+	{ 0x0000, 0x0000 }, /* R538 */
+	{ 0x0000, 0x0000 }, /* R539 */
+	{ 0x0000, 0x0000 }, /* R540 */
+	{ 0x0000, 0x0000 }, /* R541 */
+	{ 0x0000, 0x0000 }, /* R542 */
+	{ 0x0000, 0x0000 }, /* R543 */
+	{ 0x0007, 0x0007 }, /* R544   - FLL1 Control (1) */
+	{ 0x3F77, 0x3F77 }, /* R545   - FLL1 Control (2) */
+	{ 0xFFFF, 0xFFFF }, /* R546   - FLL1 Control (3) */
+	{ 0x7FEF, 0x7FEF }, /* R547   - FLL1 Control (4) */
+	{ 0x1FDB, 0x1FDB }, /* R548   - FLL1 Control (5) */
+	{ 0x0000, 0x0000 }, /* R549 */
+	{ 0x0000, 0x0000 }, /* R550 */
+	{ 0x0000, 0x0000 }, /* R551 */
+	{ 0x0000, 0x0000 }, /* R552 */
+	{ 0x0000, 0x0000 }, /* R553 */
+	{ 0x0000, 0x0000 }, /* R554 */
+	{ 0x0000, 0x0000 }, /* R555 */
+	{ 0x0000, 0x0000 }, /* R556 */
+	{ 0x0000, 0x0000 }, /* R557 */
+	{ 0x0000, 0x0000 }, /* R558 */
+	{ 0x0000, 0x0000 }, /* R559 */
+	{ 0x0000, 0x0000 }, /* R560 */
+	{ 0x0000, 0x0000 }, /* R561 */
+	{ 0x0000, 0x0000 }, /* R562 */
+	{ 0x0000, 0x0000 }, /* R563 */
+	{ 0x0000, 0x0000 }, /* R564 */
+	{ 0x0000, 0x0000 }, /* R565 */
+	{ 0x0000, 0x0000 }, /* R566 */
+	{ 0x0000, 0x0000 }, /* R567 */
+	{ 0x0000, 0x0000 }, /* R568 */
+	{ 0x0000, 0x0000 }, /* R569 */
+	{ 0x0000, 0x0000 }, /* R570 */
+	{ 0x0000, 0x0000 }, /* R571 */
+	{ 0x0000, 0x0000 }, /* R572 */
+	{ 0x0000, 0x0000 }, /* R573 */
+	{ 0x0000, 0x0000 }, /* R574 */
+	{ 0x0000, 0x0000 }, /* R575 */
+	{ 0x0007, 0x0007 }, /* R576   - FLL2 Control (1) */
+	{ 0x3F77, 0x3F77 }, /* R577   - FLL2 Control (2) */
+	{ 0xFFFF, 0xFFFF }, /* R578   - FLL2 Control (3) */
+	{ 0x7FEF, 0x7FEF }, /* R579   - FLL2 Control (4) */
+	{ 0x1FDB, 0x1FDB }, /* R580   - FLL2 Control (5) */
+	{ 0x0000, 0x0000 }, /* R581 */
+	{ 0x0000, 0x0000 }, /* R582 */
+	{ 0x0000, 0x0000 }, /* R583 */
+	{ 0x0000, 0x0000 }, /* R584 */
+	{ 0x0000, 0x0000 }, /* R585 */
+	{ 0x0000, 0x0000 }, /* R586 */
+	{ 0x0000, 0x0000 }, /* R587 */
+	{ 0x0000, 0x0000 }, /* R588 */
+	{ 0x0000, 0x0000 }, /* R589 */
+	{ 0x0000, 0x0000 }, /* R590 */
+	{ 0x0000, 0x0000 }, /* R591 */
+	{ 0x0000, 0x0000 }, /* R592 */
+	{ 0x0000, 0x0000 }, /* R593 */
+	{ 0x0000, 0x0000 }, /* R594 */
+	{ 0x0000, 0x0000 }, /* R595 */
+	{ 0x0000, 0x0000 }, /* R596 */
+	{ 0x0000, 0x0000 }, /* R597 */
+	{ 0x0000, 0x0000 }, /* R598 */
+	{ 0x0000, 0x0000 }, /* R599 */
+	{ 0x0000, 0x0000 }, /* R600 */
+	{ 0x0000, 0x0000 }, /* R601 */
+	{ 0x0000, 0x0000 }, /* R602 */
+	{ 0x0000, 0x0000 }, /* R603 */
+	{ 0x0000, 0x0000 }, /* R604 */
+	{ 0x0000, 0x0000 }, /* R605 */
+	{ 0x0000, 0x0000 }, /* R606 */
+	{ 0x0000, 0x0000 }, /* R607 */
+	{ 0x0000, 0x0000 }, /* R608 */
+	{ 0x0000, 0x0000 }, /* R609 */
+	{ 0x0000, 0x0000 }, /* R610 */
+	{ 0x0000, 0x0000 }, /* R611 */
+	{ 0x0000, 0x0000 }, /* R612 */
+	{ 0x0000, 0x0000 }, /* R613 */
+	{ 0x0000, 0x0000 }, /* R614 */
+	{ 0x0000, 0x0000 }, /* R615 */
+	{ 0x0000, 0x0000 }, /* R616 */
+	{ 0x0000, 0x0000 }, /* R617 */
+	{ 0x0000, 0x0000 }, /* R618 */
+	{ 0x0000, 0x0000 }, /* R619 */
+	{ 0x0000, 0x0000 }, /* R620 */
+	{ 0x0000, 0x0000 }, /* R621 */
+	{ 0x0000, 0x0000 }, /* R622 */
+	{ 0x0000, 0x0000 }, /* R623 */
+	{ 0x0000, 0x0000 }, /* R624 */
+	{ 0x0000, 0x0000 }, /* R625 */
+	{ 0x0000, 0x0000 }, /* R626 */
+	{ 0x0000, 0x0000 }, /* R627 */
+	{ 0x0000, 0x0000 }, /* R628 */
+	{ 0x0000, 0x0000 }, /* R629 */
+	{ 0x0000, 0x0000 }, /* R630 */
+	{ 0x0000, 0x0000 }, /* R631 */
+	{ 0x0000, 0x0000 }, /* R632 */
+	{ 0x0000, 0x0000 }, /* R633 */
+	{ 0x0000, 0x0000 }, /* R634 */
+	{ 0x0000, 0x0000 }, /* R635 */
+	{ 0x0000, 0x0000 }, /* R636 */
+	{ 0x0000, 0x0000 }, /* R637 */
+	{ 0x0000, 0x0000 }, /* R638 */
+	{ 0x0000, 0x0000 }, /* R639 */
+	{ 0x0000, 0x0000 }, /* R640 */
+	{ 0x0000, 0x0000 }, /* R641 */
+	{ 0x0000, 0x0000 }, /* R642 */
+	{ 0x0000, 0x0000 }, /* R643 */
+	{ 0x0000, 0x0000 }, /* R644 */
+	{ 0x0000, 0x0000 }, /* R645 */
+	{ 0x0000, 0x0000 }, /* R646 */
+	{ 0x0000, 0x0000 }, /* R647 */
+	{ 0x0000, 0x0000 }, /* R648 */
+	{ 0x0000, 0x0000 }, /* R649 */
+	{ 0x0000, 0x0000 }, /* R650 */
+	{ 0x0000, 0x0000 }, /* R651 */
+	{ 0x0000, 0x0000 }, /* R652 */
+	{ 0x0000, 0x0000 }, /* R653 */
+	{ 0x0000, 0x0000 }, /* R654 */
+	{ 0x0000, 0x0000 }, /* R655 */
+	{ 0x0000, 0x0000 }, /* R656 */
+	{ 0x0000, 0x0000 }, /* R657 */
+	{ 0x0000, 0x0000 }, /* R658 */
+	{ 0x0000, 0x0000 }, /* R659 */
+	{ 0x0000, 0x0000 }, /* R660 */
+	{ 0x0000, 0x0000 }, /* R661 */
+	{ 0x0000, 0x0000 }, /* R662 */
+	{ 0x0000, 0x0000 }, /* R663 */
+	{ 0x0000, 0x0000 }, /* R664 */
+	{ 0x0000, 0x0000 }, /* R665 */
+	{ 0x0000, 0x0000 }, /* R666 */
+	{ 0x0000, 0x0000 }, /* R667 */
+	{ 0x0000, 0x0000 }, /* R668 */
+	{ 0x0000, 0x0000 }, /* R669 */
+	{ 0x0000, 0x0000 }, /* R670 */
+	{ 0x0000, 0x0000 }, /* R671 */
+	{ 0x0000, 0x0000 }, /* R672 */
+	{ 0x0000, 0x0000 }, /* R673 */
+	{ 0x0000, 0x0000 }, /* R674 */
+	{ 0x0000, 0x0000 }, /* R675 */
+	{ 0x0000, 0x0000 }, /* R676 */
+	{ 0x0000, 0x0000 }, /* R677 */
+	{ 0x0000, 0x0000 }, /* R678 */
+	{ 0x0000, 0x0000 }, /* R679 */
+	{ 0x0000, 0x0000 }, /* R680 */
+	{ 0x0000, 0x0000 }, /* R681 */
+	{ 0x0000, 0x0000 }, /* R682 */
+	{ 0x0000, 0x0000 }, /* R683 */
+	{ 0x0000, 0x0000 }, /* R684 */
+	{ 0x0000, 0x0000 }, /* R685 */
+	{ 0x0000, 0x0000 }, /* R686 */
+	{ 0x0000, 0x0000 }, /* R687 */
+	{ 0x0000, 0x0000 }, /* R688 */
+	{ 0x0000, 0x0000 }, /* R689 */
+	{ 0x0000, 0x0000 }, /* R690 */
+	{ 0x0000, 0x0000 }, /* R691 */
+	{ 0x0000, 0x0000 }, /* R692 */
+	{ 0x0000, 0x0000 }, /* R693 */
+	{ 0x0000, 0x0000 }, /* R694 */
+	{ 0x0000, 0x0000 }, /* R695 */
+	{ 0x0000, 0x0000 }, /* R696 */
+	{ 0x0000, 0x0000 }, /* R697 */
+	{ 0x0000, 0x0000 }, /* R698 */
+	{ 0x0000, 0x0000 }, /* R699 */
+	{ 0x0000, 0x0000 }, /* R700 */
+	{ 0x0000, 0x0000 }, /* R701 */
+	{ 0x0000, 0x0000 }, /* R702 */
+	{ 0x0000, 0x0000 }, /* R703 */
+	{ 0x0000, 0x0000 }, /* R704 */
+	{ 0x0000, 0x0000 }, /* R705 */
+	{ 0x0000, 0x0000 }, /* R706 */
+	{ 0x0000, 0x0000 }, /* R707 */
+	{ 0x0000, 0x0000 }, /* R708 */
+	{ 0x0000, 0x0000 }, /* R709 */
+	{ 0x0000, 0x0000 }, /* R710 */
+	{ 0x0000, 0x0000 }, /* R711 */
+	{ 0x0000, 0x0000 }, /* R712 */
+	{ 0x0000, 0x0000 }, /* R713 */
+	{ 0x0000, 0x0000 }, /* R714 */
+	{ 0x0000, 0x0000 }, /* R715 */
+	{ 0x0000, 0x0000 }, /* R716 */
+	{ 0x0000, 0x0000 }, /* R717 */
+	{ 0x0000, 0x0000 }, /* R718 */
+	{ 0x0000, 0x0000 }, /* R719 */
+	{ 0x0000, 0x0000 }, /* R720 */
+	{ 0x0000, 0x0000 }, /* R721 */
+	{ 0x0000, 0x0000 }, /* R722 */
+	{ 0x0000, 0x0000 }, /* R723 */
+	{ 0x0000, 0x0000 }, /* R724 */
+	{ 0x0000, 0x0000 }, /* R725 */
+	{ 0x0000, 0x0000 }, /* R726 */
+	{ 0x0000, 0x0000 }, /* R727 */
+	{ 0x0000, 0x0000 }, /* R728 */
+	{ 0x0000, 0x0000 }, /* R729 */
+	{ 0x0000, 0x0000 }, /* R730 */
+	{ 0x0000, 0x0000 }, /* R731 */
+	{ 0x0000, 0x0000 }, /* R732 */
+	{ 0x0000, 0x0000 }, /* R733 */
+	{ 0x0000, 0x0000 }, /* R734 */
+	{ 0x0000, 0x0000 }, /* R735 */
+	{ 0x0000, 0x0000 }, /* R736 */
+	{ 0x0000, 0x0000 }, /* R737 */
+	{ 0x0000, 0x0000 }, /* R738 */
+	{ 0x0000, 0x0000 }, /* R739 */
+	{ 0x0000, 0x0000 }, /* R740 */
+	{ 0x0000, 0x0000 }, /* R741 */
+	{ 0x0000, 0x0000 }, /* R742 */
+	{ 0x0000, 0x0000 }, /* R743 */
+	{ 0x0000, 0x0000 }, /* R744 */
+	{ 0x0000, 0x0000 }, /* R745 */
+	{ 0x0000, 0x0000 }, /* R746 */
+	{ 0x0000, 0x0000 }, /* R747 */
+	{ 0x0000, 0x0000 }, /* R748 */
+	{ 0x0000, 0x0000 }, /* R749 */
+	{ 0x0000, 0x0000 }, /* R750 */
+	{ 0x0000, 0x0000 }, /* R751 */
+	{ 0x0000, 0x0000 }, /* R752 */
+	{ 0x0000, 0x0000 }, /* R753 */
+	{ 0x0000, 0x0000 }, /* R754 */
+	{ 0x0000, 0x0000 }, /* R755 */
+	{ 0x0000, 0x0000 }, /* R756 */
+	{ 0x0000, 0x0000 }, /* R757 */
+	{ 0x0000, 0x0000 }, /* R758 */
+	{ 0x0000, 0x0000 }, /* R759 */
+	{ 0x0000, 0x0000 }, /* R760 */
+	{ 0x0000, 0x0000 }, /* R761 */
+	{ 0x0000, 0x0000 }, /* R762 */
+	{ 0x0000, 0x0000 }, /* R763 */
+	{ 0x0000, 0x0000 }, /* R764 */
+	{ 0x0000, 0x0000 }, /* R765 */
+	{ 0x0000, 0x0000 }, /* R766 */
+	{ 0x0000, 0x0000 }, /* R767 */
+	{ 0xE1F8, 0xE1F8 }, /* R768   - AIF1 Control (1) */
+	{ 0xCD1F, 0xCD1F }, /* R769   - AIF1 Control (2) */
+	{ 0xF000, 0xF000 }, /* R770   - AIF1 Master/Slave */
+	{ 0x01F0, 0x01F0 }, /* R771   - AIF1 BCLK */
+	{ 0x0FFF, 0x0FFF }, /* R772   - AIF1ADC LRCLK */
+	{ 0x0FFF, 0x0FFF }, /* R773   - AIF1DAC LRCLK */
+	{ 0x0003, 0x0003 }, /* R774   - AIF1DAC Data */
+	{ 0x0003, 0x0003 }, /* R775   - AIF1ADC Data */
+	{ 0x0000, 0x0000 }, /* R776 */
+	{ 0x0000, 0x0000 }, /* R777 */
+	{ 0x0000, 0x0000 }, /* R778 */
+	{ 0x0000, 0x0000 }, /* R779 */
+	{ 0x0000, 0x0000 }, /* R780 */
+	{ 0x0000, 0x0000 }, /* R781 */
+	{ 0x0000, 0x0000 }, /* R782 */
+	{ 0x0000, 0x0000 }, /* R783 */
+	{ 0xF1F8, 0xF1F8 }, /* R784   - AIF2 Control (1) */
+	{ 0xFD1F, 0xFD1F }, /* R785   - AIF2 Control (2) */
+	{ 0xF000, 0xF000 }, /* R786   - AIF2 Master/Slave */
+	{ 0x01F0, 0x01F0 }, /* R787   - AIF2 BCLK */
+	{ 0x0FFF, 0x0FFF }, /* R788   - AIF2ADC LRCLK */
+	{ 0x0FFF, 0x0FFF }, /* R789   - AIF2DAC LRCLK */
+	{ 0x0003, 0x0003 }, /* R790   - AIF2DAC Data */
+	{ 0x0003, 0x0003 }, /* R791   - AIF2ADC Data */
+	{ 0x0000, 0x0000 }, /* R792 */
+	{ 0x0000, 0x0000 }, /* R793 */
+	{ 0x0000, 0x0000 }, /* R794 */
+	{ 0x0000, 0x0000 }, /* R795 */
+	{ 0x0000, 0x0000 }, /* R796 */
+	{ 0x0000, 0x0000 }, /* R797 */
+	{ 0x0000, 0x0000 }, /* R798 */
+	{ 0x0000, 0x0000 }, /* R799 */
+	{ 0x0000, 0x0000 }, /* R800 */
+	{ 0x0000, 0x0000 }, /* R801 */
+	{ 0x0000, 0x0000 }, /* R802 */
+	{ 0x0000, 0x0000 }, /* R803 */
+	{ 0x0000, 0x0000 }, /* R804 */
+	{ 0x0000, 0x0000 }, /* R805 */
+	{ 0x0000, 0x0000 }, /* R806 */
+	{ 0x0000, 0x0000 }, /* R807 */
+	{ 0x0000, 0x0000 }, /* R808 */
+	{ 0x0000, 0x0000 }, /* R809 */
+	{ 0x0000, 0x0000 }, /* R810 */
+	{ 0x0000, 0x0000 }, /* R811 */
+	{ 0x0000, 0x0000 }, /* R812 */
+	{ 0x0000, 0x0000 }, /* R813 */
+	{ 0x0000, 0x0000 }, /* R814 */
+	{ 0x0000, 0x0000 }, /* R815 */
+	{ 0x0000, 0x0000 }, /* R816 */
+	{ 0x0000, 0x0000 }, /* R817 */
+	{ 0x0000, 0x0000 }, /* R818 */
+	{ 0x0000, 0x0000 }, /* R819 */
+	{ 0x0000, 0x0000 }, /* R820 */
+	{ 0x0000, 0x0000 }, /* R821 */
+	{ 0x0000, 0x0000 }, /* R822 */
+	{ 0x0000, 0x0000 }, /* R823 */
+	{ 0x0000, 0x0000 }, /* R824 */
+	{ 0x0000, 0x0000 }, /* R825 */
+	{ 0x0000, 0x0000 }, /* R826 */
+	{ 0x0000, 0x0000 }, /* R827 */
+	{ 0x0000, 0x0000 }, /* R828 */
+	{ 0x0000, 0x0000 }, /* R829 */
+	{ 0x0000, 0x0000 }, /* R830 */
+	{ 0x0000, 0x0000 }, /* R831 */
+	{ 0x0000, 0x0000 }, /* R832 */
+	{ 0x0000, 0x0000 }, /* R833 */
+	{ 0x0000, 0x0000 }, /* R834 */
+	{ 0x0000, 0x0000 }, /* R835 */
+	{ 0x0000, 0x0000 }, /* R836 */
+	{ 0x0000, 0x0000 }, /* R837 */
+	{ 0x0000, 0x0000 }, /* R838 */
+	{ 0x0000, 0x0000 }, /* R839 */
+	{ 0x0000, 0x0000 }, /* R840 */
+	{ 0x0000, 0x0000 }, /* R841 */
+	{ 0x0000, 0x0000 }, /* R842 */
+	{ 0x0000, 0x0000 }, /* R843 */
+	{ 0x0000, 0x0000 }, /* R844 */
+	{ 0x0000, 0x0000 }, /* R845 */
+	{ 0x0000, 0x0000 }, /* R846 */
+	{ 0x0000, 0x0000 }, /* R847 */
+	{ 0x0000, 0x0000 }, /* R848 */
+	{ 0x0000, 0x0000 }, /* R849 */
+	{ 0x0000, 0x0000 }, /* R850 */
+	{ 0x0000, 0x0000 }, /* R851 */
+	{ 0x0000, 0x0000 }, /* R852 */
+	{ 0x0000, 0x0000 }, /* R853 */
+	{ 0x0000, 0x0000 }, /* R854 */
+	{ 0x0000, 0x0000 }, /* R855 */
+	{ 0x0000, 0x0000 }, /* R856 */
+	{ 0x0000, 0x0000 }, /* R857 */
+	{ 0x0000, 0x0000 }, /* R858 */
+	{ 0x0000, 0x0000 }, /* R859 */
+	{ 0x0000, 0x0000 }, /* R860 */
+	{ 0x0000, 0x0000 }, /* R861 */
+	{ 0x0000, 0x0000 }, /* R862 */
+	{ 0x0000, 0x0000 }, /* R863 */
+	{ 0x0000, 0x0000 }, /* R864 */
+	{ 0x0000, 0x0000 }, /* R865 */
+	{ 0x0000, 0x0000 }, /* R866 */
+	{ 0x0000, 0x0000 }, /* R867 */
+	{ 0x0000, 0x0000 }, /* R868 */
+	{ 0x0000, 0x0000 }, /* R869 */
+	{ 0x0000, 0x0000 }, /* R870 */
+	{ 0x0000, 0x0000 }, /* R871 */
+	{ 0x0000, 0x0000 }, /* R872 */
+	{ 0x0000, 0x0000 }, /* R873 */
+	{ 0x0000, 0x0000 }, /* R874 */
+	{ 0x0000, 0x0000 }, /* R875 */
+	{ 0x0000, 0x0000 }, /* R876 */
+	{ 0x0000, 0x0000 }, /* R877 */
+	{ 0x0000, 0x0000 }, /* R878 */
+	{ 0x0000, 0x0000 }, /* R879 */
+	{ 0x0000, 0x0000 }, /* R880 */
+	{ 0x0000, 0x0000 }, /* R881 */
+	{ 0x0000, 0x0000 }, /* R882 */
+	{ 0x0000, 0x0000 }, /* R883 */
+	{ 0x0000, 0x0000 }, /* R884 */
+	{ 0x0000, 0x0000 }, /* R885 */
+	{ 0x0000, 0x0000 }, /* R886 */
+	{ 0x0000, 0x0000 }, /* R887 */
+	{ 0x0000, 0x0000 }, /* R888 */
+	{ 0x0000, 0x0000 }, /* R889 */
+	{ 0x0000, 0x0000 }, /* R890 */
+	{ 0x0000, 0x0000 }, /* R891 */
+	{ 0x0000, 0x0000 }, /* R892 */
+	{ 0x0000, 0x0000 }, /* R893 */
+	{ 0x0000, 0x0000 }, /* R894 */
+	{ 0x0000, 0x0000 }, /* R895 */
+	{ 0x0000, 0x0000 }, /* R896 */
+	{ 0x0000, 0x0000 }, /* R897 */
+	{ 0x0000, 0x0000 }, /* R898 */
+	{ 0x0000, 0x0000 }, /* R899 */
+	{ 0x0000, 0x0000 }, /* R900 */
+	{ 0x0000, 0x0000 }, /* R901 */
+	{ 0x0000, 0x0000 }, /* R902 */
+	{ 0x0000, 0x0000 }, /* R903 */
+	{ 0x0000, 0x0000 }, /* R904 */
+	{ 0x0000, 0x0000 }, /* R905 */
+	{ 0x0000, 0x0000 }, /* R906 */
+	{ 0x0000, 0x0000 }, /* R907 */
+	{ 0x0000, 0x0000 }, /* R908 */
+	{ 0x0000, 0x0000 }, /* R909 */
+	{ 0x0000, 0x0000 }, /* R910 */
+	{ 0x0000, 0x0000 }, /* R911 */
+	{ 0x0000, 0x0000 }, /* R912 */
+	{ 0x0000, 0x0000 }, /* R913 */
+	{ 0x0000, 0x0000 }, /* R914 */
+	{ 0x0000, 0x0000 }, /* R915 */
+	{ 0x0000, 0x0000 }, /* R916 */
+	{ 0x0000, 0x0000 }, /* R917 */
+	{ 0x0000, 0x0000 }, /* R918 */
+	{ 0x0000, 0x0000 }, /* R919 */
+	{ 0x0000, 0x0000 }, /* R920 */
+	{ 0x0000, 0x0000 }, /* R921 */
+	{ 0x0000, 0x0000 }, /* R922 */
+	{ 0x0000, 0x0000 }, /* R923 */
+	{ 0x0000, 0x0000 }, /* R924 */
+	{ 0x0000, 0x0000 }, /* R925 */
+	{ 0x0000, 0x0000 }, /* R926 */
+	{ 0x0000, 0x0000 }, /* R927 */
+	{ 0x0000, 0x0000 }, /* R928 */
+	{ 0x0000, 0x0000 }, /* R929 */
+	{ 0x0000, 0x0000 }, /* R930 */
+	{ 0x0000, 0x0000 }, /* R931 */
+	{ 0x0000, 0x0000 }, /* R932 */
+	{ 0x0000, 0x0000 }, /* R933 */
+	{ 0x0000, 0x0000 }, /* R934 */
+	{ 0x0000, 0x0000 }, /* R935 */
+	{ 0x0000, 0x0000 }, /* R936 */
+	{ 0x0000, 0x0000 }, /* R937 */
+	{ 0x0000, 0x0000 }, /* R938 */
+	{ 0x0000, 0x0000 }, /* R939 */
+	{ 0x0000, 0x0000 }, /* R940 */
+	{ 0x0000, 0x0000 }, /* R941 */
+	{ 0x0000, 0x0000 }, /* R942 */
+	{ 0x0000, 0x0000 }, /* R943 */
+	{ 0x0000, 0x0000 }, /* R944 */
+	{ 0x0000, 0x0000 }, /* R945 */
+	{ 0x0000, 0x0000 }, /* R946 */
+	{ 0x0000, 0x0000 }, /* R947 */
+	{ 0x0000, 0x0000 }, /* R948 */
+	{ 0x0000, 0x0000 }, /* R949 */
+	{ 0x0000, 0x0000 }, /* R950 */
+	{ 0x0000, 0x0000 }, /* R951 */
+	{ 0x0000, 0x0000 }, /* R952 */
+	{ 0x0000, 0x0000 }, /* R953 */
+	{ 0x0000, 0x0000 }, /* R954 */
+	{ 0x0000, 0x0000 }, /* R955 */
+	{ 0x0000, 0x0000 }, /* R956 */
+	{ 0x0000, 0x0000 }, /* R957 */
+	{ 0x0000, 0x0000 }, /* R958 */
+	{ 0x0000, 0x0000 }, /* R959 */
+	{ 0x0000, 0x0000 }, /* R960 */
+	{ 0x0000, 0x0000 }, /* R961 */
+	{ 0x0000, 0x0000 }, /* R962 */
+	{ 0x0000, 0x0000 }, /* R963 */
+	{ 0x0000, 0x0000 }, /* R964 */
+	{ 0x0000, 0x0000 }, /* R965 */
+	{ 0x0000, 0x0000 }, /* R966 */
+	{ 0x0000, 0x0000 }, /* R967 */
+	{ 0x0000, 0x0000 }, /* R968 */
+	{ 0x0000, 0x0000 }, /* R969 */
+	{ 0x0000, 0x0000 }, /* R970 */
+	{ 0x0000, 0x0000 }, /* R971 */
+	{ 0x0000, 0x0000 }, /* R972 */
+	{ 0x0000, 0x0000 }, /* R973 */
+	{ 0x0000, 0x0000 }, /* R974 */
+	{ 0x0000, 0x0000 }, /* R975 */
+	{ 0x0000, 0x0000 }, /* R976 */
+	{ 0x0000, 0x0000 }, /* R977 */
+	{ 0x0000, 0x0000 }, /* R978 */
+	{ 0x0000, 0x0000 }, /* R979 */
+	{ 0x0000, 0x0000 }, /* R980 */
+	{ 0x0000, 0x0000 }, /* R981 */
+	{ 0x0000, 0x0000 }, /* R982 */
+	{ 0x0000, 0x0000 }, /* R983 */
+	{ 0x0000, 0x0000 }, /* R984 */
+	{ 0x0000, 0x0000 }, /* R985 */
+	{ 0x0000, 0x0000 }, /* R986 */
+	{ 0x0000, 0x0000 }, /* R987 */
+	{ 0x0000, 0x0000 }, /* R988 */
+	{ 0x0000, 0x0000 }, /* R989 */
+	{ 0x0000, 0x0000 }, /* R990 */
+	{ 0x0000, 0x0000 }, /* R991 */
+	{ 0x0000, 0x0000 }, /* R992 */
+	{ 0x0000, 0x0000 }, /* R993 */
+	{ 0x0000, 0x0000 }, /* R994 */
+	{ 0x0000, 0x0000 }, /* R995 */
+	{ 0x0000, 0x0000 }, /* R996 */
+	{ 0x0000, 0x0000 }, /* R997 */
+	{ 0x0000, 0x0000 }, /* R998 */
+	{ 0x0000, 0x0000 }, /* R999 */
+	{ 0x0000, 0x0000 }, /* R1000 */
+	{ 0x0000, 0x0000 }, /* R1001 */
+	{ 0x0000, 0x0000 }, /* R1002 */
+	{ 0x0000, 0x0000 }, /* R1003 */
+	{ 0x0000, 0x0000 }, /* R1004 */
+	{ 0x0000, 0x0000 }, /* R1005 */
+	{ 0x0000, 0x0000 }, /* R1006 */
+	{ 0x0000, 0x0000 }, /* R1007 */
+	{ 0x0000, 0x0000 }, /* R1008 */
+	{ 0x0000, 0x0000 }, /* R1009 */
+	{ 0x0000, 0x0000 }, /* R1010 */
+	{ 0x0000, 0x0000 }, /* R1011 */
+	{ 0x0000, 0x0000 }, /* R1012 */
+	{ 0x0000, 0x0000 }, /* R1013 */
+	{ 0x0000, 0x0000 }, /* R1014 */
+	{ 0x0000, 0x0000 }, /* R1015 */
+	{ 0x0000, 0x0000 }, /* R1016 */
+	{ 0x0000, 0x0000 }, /* R1017 */
+	{ 0x0000, 0x0000 }, /* R1018 */
+	{ 0x0000, 0x0000 }, /* R1019 */
+	{ 0x0000, 0x0000 }, /* R1020 */
+	{ 0x0000, 0x0000 }, /* R1021 */
+	{ 0x0000, 0x0000 }, /* R1022 */
+	{ 0x0000, 0x0000 }, /* R1023 */
+	{ 0x00FF, 0x01FF }, /* R1024  - AIF1 ADC1 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1025  - AIF1 ADC1 Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1026  - AIF1 DAC1 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1027  - AIF1 DAC1 Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1028  - AIF1 ADC2 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1029  - AIF1 ADC2 Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1030  - AIF1 DAC2 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1031  - AIF1 DAC2 Right Volume */
+	{ 0x0000, 0x0000 }, /* R1032 */
+	{ 0x0000, 0x0000 }, /* R1033 */
+	{ 0x0000, 0x0000 }, /* R1034 */
+	{ 0x0000, 0x0000 }, /* R1035 */
+	{ 0x0000, 0x0000 }, /* R1036 */
+	{ 0x0000, 0x0000 }, /* R1037 */
+	{ 0x0000, 0x0000 }, /* R1038 */
+	{ 0x0000, 0x0000 }, /* R1039 */
+	{ 0xF800, 0xF800 }, /* R1040  - AIF1 ADC1 Filters */
+	{ 0x7800, 0x7800 }, /* R1041  - AIF1 ADC2 Filters */
+	{ 0x0000, 0x0000 }, /* R1042 */
+	{ 0x0000, 0x0000 }, /* R1043 */
+	{ 0x0000, 0x0000 }, /* R1044 */
+	{ 0x0000, 0x0000 }, /* R1045 */
+	{ 0x0000, 0x0000 }, /* R1046 */
+	{ 0x0000, 0x0000 }, /* R1047 */
+	{ 0x0000, 0x0000 }, /* R1048 */
+	{ 0x0000, 0x0000 }, /* R1049 */
+	{ 0x0000, 0x0000 }, /* R1050 */
+	{ 0x0000, 0x0000 }, /* R1051 */
+	{ 0x0000, 0x0000 }, /* R1052 */
+	{ 0x0000, 0x0000 }, /* R1053 */
+	{ 0x0000, 0x0000 }, /* R1054 */
+	{ 0x0000, 0x0000 }, /* R1055 */
+	{ 0x02B6, 0x02B6 }, /* R1056  - AIF1 DAC1 Filters (1) */
+	{ 0x3F00, 0x3F00 }, /* R1057  - AIF1 DAC1 Filters (2) */
+	{ 0x02B6, 0x02B6 }, /* R1058  - AIF1 DAC2 Filters (1) */
+	{ 0x3F00, 0x3F00 }, /* R1059  - AIF1 DAC2 Filters (2) */
+	{ 0x0000, 0x0000 }, /* R1060 */
+	{ 0x0000, 0x0000 }, /* R1061 */
+	{ 0x0000, 0x0000 }, /* R1062 */
+	{ 0x0000, 0x0000 }, /* R1063 */
+	{ 0x0000, 0x0000 }, /* R1064 */
+	{ 0x0000, 0x0000 }, /* R1065 */
+	{ 0x0000, 0x0000 }, /* R1066 */
+	{ 0x0000, 0x0000 }, /* R1067 */
+	{ 0x0000, 0x0000 }, /* R1068 */
+	{ 0x0000, 0x0000 }, /* R1069 */
+	{ 0x0000, 0x0000 }, /* R1070 */
+	{ 0x0000, 0x0000 }, /* R1071 */
+	{ 0x0000, 0x0000 }, /* R1072 */
+	{ 0x0000, 0x0000 }, /* R1073 */
+	{ 0x0000, 0x0000 }, /* R1074 */
+	{ 0x0000, 0x0000 }, /* R1075 */
+	{ 0x0000, 0x0000 }, /* R1076 */
+	{ 0x0000, 0x0000 }, /* R1077 */
+	{ 0x0000, 0x0000 }, /* R1078 */
+	{ 0x0000, 0x0000 }, /* R1079 */
+	{ 0x0000, 0x0000 }, /* R1080 */
+	{ 0x0000, 0x0000 }, /* R1081 */
+	{ 0x0000, 0x0000 }, /* R1082 */
+	{ 0x0000, 0x0000 }, /* R1083 */
+	{ 0x0000, 0x0000 }, /* R1084 */
+	{ 0x0000, 0x0000 }, /* R1085 */
+	{ 0x0000, 0x0000 }, /* R1086 */
+	{ 0x0000, 0x0000 }, /* R1087 */
+	{ 0xFFFF, 0xFFFF }, /* R1088  - AIF1 DRC1 (1) */
+	{ 0x1FFF, 0x1FFF }, /* R1089  - AIF1 DRC1 (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1090  - AIF1 DRC1 (3) */
+	{ 0x07FF, 0x07FF }, /* R1091  - AIF1 DRC1 (4) */
+	{ 0x03FF, 0x03FF }, /* R1092  - AIF1 DRC1 (5) */
+	{ 0x0000, 0x0000 }, /* R1093 */
+	{ 0x0000, 0x0000 }, /* R1094 */
+	{ 0x0000, 0x0000 }, /* R1095 */
+	{ 0x0000, 0x0000 }, /* R1096 */
+	{ 0x0000, 0x0000 }, /* R1097 */
+	{ 0x0000, 0x0000 }, /* R1098 */
+	{ 0x0000, 0x0000 }, /* R1099 */
+	{ 0x0000, 0x0000 }, /* R1100 */
+	{ 0x0000, 0x0000 }, /* R1101 */
+	{ 0x0000, 0x0000 }, /* R1102 */
+	{ 0x0000, 0x0000 }, /* R1103 */
+	{ 0xFFFF, 0xFFFF }, /* R1104  - AIF1 DRC2 (1) */
+	{ 0x1FFF, 0x1FFF }, /* R1105  - AIF1 DRC2 (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1106  - AIF1 DRC2 (3) */
+	{ 0x07FF, 0x07FF }, /* R1107  - AIF1 DRC2 (4) */
+	{ 0x03FF, 0x03FF }, /* R1108  - AIF1 DRC2 (5) */
+	{ 0x0000, 0x0000 }, /* R1109 */
+	{ 0x0000, 0x0000 }, /* R1110 */
+	{ 0x0000, 0x0000 }, /* R1111 */
+	{ 0x0000, 0x0000 }, /* R1112 */
+	{ 0x0000, 0x0000 }, /* R1113 */
+	{ 0x0000, 0x0000 }, /* R1114 */
+	{ 0x0000, 0x0000 }, /* R1115 */
+	{ 0x0000, 0x0000 }, /* R1116 */
+	{ 0x0000, 0x0000 }, /* R1117 */
+	{ 0x0000, 0x0000 }, /* R1118 */
+	{ 0x0000, 0x0000 }, /* R1119 */
+	{ 0x0000, 0x0000 }, /* R1120 */
+	{ 0x0000, 0x0000 }, /* R1121 */
+	{ 0x0000, 0x0000 }, /* R1122 */
+	{ 0x0000, 0x0000 }, /* R1123 */
+	{ 0x0000, 0x0000 }, /* R1124 */
+	{ 0x0000, 0x0000 }, /* R1125 */
+	{ 0x0000, 0x0000 }, /* R1126 */
+	{ 0x0000, 0x0000 }, /* R1127 */
+	{ 0x0000, 0x0000 }, /* R1128 */
+	{ 0x0000, 0x0000 }, /* R1129 */
+	{ 0x0000, 0x0000 }, /* R1130 */
+	{ 0x0000, 0x0000 }, /* R1131 */
+	{ 0x0000, 0x0000 }, /* R1132 */
+	{ 0x0000, 0x0000 }, /* R1133 */
+	{ 0x0000, 0x0000 }, /* R1134 */
+	{ 0x0000, 0x0000 }, /* R1135 */
+	{ 0x0000, 0x0000 }, /* R1136 */
+	{ 0x0000, 0x0000 }, /* R1137 */
+	{ 0x0000, 0x0000 }, /* R1138 */
+	{ 0x0000, 0x0000 }, /* R1139 */
+	{ 0x0000, 0x0000 }, /* R1140 */
+	{ 0x0000, 0x0000 }, /* R1141 */
+	{ 0x0000, 0x0000 }, /* R1142 */
+	{ 0x0000, 0x0000 }, /* R1143 */
+	{ 0x0000, 0x0000 }, /* R1144 */
+	{ 0x0000, 0x0000 }, /* R1145 */
+	{ 0x0000, 0x0000 }, /* R1146 */
+	{ 0x0000, 0x0000 }, /* R1147 */
+	{ 0x0000, 0x0000 }, /* R1148 */
+	{ 0x0000, 0x0000 }, /* R1149 */
+	{ 0x0000, 0x0000 }, /* R1150 */
+	{ 0x0000, 0x0000 }, /* R1151 */
+	{ 0xFFFF, 0xFFFF }, /* R1152  - AIF1 DAC1 EQ Gains (1) */
+	{ 0xFFC0, 0xFFC0 }, /* R1153  - AIF1 DAC1 EQ Gains (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1154  - AIF1 DAC1 EQ Band 1 A */
+	{ 0xFFFF, 0xFFFF }, /* R1155  - AIF1 DAC1 EQ Band 1 B */
+	{ 0xFFFF, 0xFFFF }, /* R1156  - AIF1 DAC1 EQ Band 1 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1157  - AIF1 DAC1 EQ Band 2 A */
+	{ 0xFFFF, 0xFFFF }, /* R1158  - AIF1 DAC1 EQ Band 2 B */
+	{ 0xFFFF, 0xFFFF }, /* R1159  - AIF1 DAC1 EQ Band 2 C */
+	{ 0xFFFF, 0xFFFF }, /* R1160  - AIF1 DAC1 EQ Band 2 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1161  - AIF1 DAC1 EQ Band 3 A */
+	{ 0xFFFF, 0xFFFF }, /* R1162  - AIF1 DAC1 EQ Band 3 B */
+	{ 0xFFFF, 0xFFFF }, /* R1163  - AIF1 DAC1 EQ Band 3 C */
+	{ 0xFFFF, 0xFFFF }, /* R1164  - AIF1 DAC1 EQ Band 3 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1165  - AIF1 DAC1 EQ Band 4 A */
+	{ 0xFFFF, 0xFFFF }, /* R1166  - AIF1 DAC1 EQ Band 4 B */
+	{ 0xFFFF, 0xFFFF }, /* R1167  - AIF1 DAC1 EQ Band 4 C */
+	{ 0xFFFF, 0xFFFF }, /* R1168  - AIF1 DAC1 EQ Band 4 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1169  - AIF1 DAC1 EQ Band 5 A */
+	{ 0xFFFF, 0xFFFF }, /* R1170  - AIF1 DAC1 EQ Band 5 B */
+	{ 0xFFFF, 0xFFFF }, /* R1171  - AIF1 DAC1 EQ Band 5 PG */
+	{ 0x0000, 0x0000 }, /* R1172 */
+	{ 0x0000, 0x0000 }, /* R1173 */
+	{ 0x0000, 0x0000 }, /* R1174 */
+	{ 0x0000, 0x0000 }, /* R1175 */
+	{ 0x0000, 0x0000 }, /* R1176 */
+	{ 0x0000, 0x0000 }, /* R1177 */
+	{ 0x0000, 0x0000 }, /* R1178 */
+	{ 0x0000, 0x0000 }, /* R1179 */
+	{ 0x0000, 0x0000 }, /* R1180 */
+	{ 0x0000, 0x0000 }, /* R1181 */
+	{ 0x0000, 0x0000 }, /* R1182 */
+	{ 0x0000, 0x0000 }, /* R1183 */
+	{ 0xFFFF, 0xFFFF }, /* R1184  - AIF1 DAC2 EQ Gains (1) */
+	{ 0xFFC0, 0xFFC0 }, /* R1185  - AIF1 DAC2 EQ Gains (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1186  - AIF1 DAC2 EQ Band 1 A */
+	{ 0xFFFF, 0xFFFF }, /* R1187  - AIF1 DAC2 EQ Band 1 B */
+	{ 0xFFFF, 0xFFFF }, /* R1188  - AIF1 DAC2 EQ Band 1 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1189  - AIF1 DAC2 EQ Band 2 A */
+	{ 0xFFFF, 0xFFFF }, /* R1190  - AIF1 DAC2 EQ Band 2 B */
+	{ 0xFFFF, 0xFFFF }, /* R1191  - AIF1 DAC2 EQ Band 2 C */
+	{ 0xFFFF, 0xFFFF }, /* R1192  - AIF1 DAC2 EQ Band 2 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1193  - AIF1 DAC2 EQ Band 3 A */
+	{ 0xFFFF, 0xFFFF }, /* R1194  - AIF1 DAC2 EQ Band 3 B */
+	{ 0xFFFF, 0xFFFF }, /* R1195  - AIF1 DAC2 EQ Band 3 C */
+	{ 0xFFFF, 0xFFFF }, /* R1196  - AIF1 DAC2 EQ Band 3 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1197  - AIF1 DAC2 EQ Band 4 A */
+	{ 0xFFFF, 0xFFFF }, /* R1198  - AIF1 DAC2 EQ Band 4 B */
+	{ 0xFFFF, 0xFFFF }, /* R1199  - AIF1 DAC2 EQ Band 4 C */
+	{ 0xFFFF, 0xFFFF }, /* R1200  - AIF1 DAC2 EQ Band 4 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1201  - AIF1 DAC2 EQ Band 5 A */
+	{ 0xFFFF, 0xFFFF }, /* R1202  - AIF1 DAC2 EQ Band 5 B */
+	{ 0xFFFF, 0xFFFF }, /* R1203  - AIF1 DAC2 EQ Band 5 PG */
+	{ 0x0000, 0x0000 }, /* R1204 */
+	{ 0x0000, 0x0000 }, /* R1205 */
+	{ 0x0000, 0x0000 }, /* R1206 */
+	{ 0x0000, 0x0000 }, /* R1207 */
+	{ 0x0000, 0x0000 }, /* R1208 */
+	{ 0x0000, 0x0000 }, /* R1209 */
+	{ 0x0000, 0x0000 }, /* R1210 */
+	{ 0x0000, 0x0000 }, /* R1211 */
+	{ 0x0000, 0x0000 }, /* R1212 */
+	{ 0x0000, 0x0000 }, /* R1213 */
+	{ 0x0000, 0x0000 }, /* R1214 */
+	{ 0x0000, 0x0000 }, /* R1215 */
+	{ 0x0000, 0x0000 }, /* R1216 */
+	{ 0x0000, 0x0000 }, /* R1217 */
+	{ 0x0000, 0x0000 }, /* R1218 */
+	{ 0x0000, 0x0000 }, /* R1219 */
+	{ 0x0000, 0x0000 }, /* R1220 */
+	{ 0x0000, 0x0000 }, /* R1221 */
+	{ 0x0000, 0x0000 }, /* R1222 */
+	{ 0x0000, 0x0000 }, /* R1223 */
+	{ 0x0000, 0x0000 }, /* R1224 */
+	{ 0x0000, 0x0000 }, /* R1225 */
+	{ 0x0000, 0x0000 }, /* R1226 */
+	{ 0x0000, 0x0000 }, /* R1227 */
+	{ 0x0000, 0x0000 }, /* R1228 */
+	{ 0x0000, 0x0000 }, /* R1229 */
+	{ 0x0000, 0x0000 }, /* R1230 */
+	{ 0x0000, 0x0000 }, /* R1231 */
+	{ 0x0000, 0x0000 }, /* R1232 */
+	{ 0x0000, 0x0000 }, /* R1233 */
+	{ 0x0000, 0x0000 }, /* R1234 */
+	{ 0x0000, 0x0000 }, /* R1235 */
+	{ 0x0000, 0x0000 }, /* R1236 */
+	{ 0x0000, 0x0000 }, /* R1237 */
+	{ 0x0000, 0x0000 }, /* R1238 */
+	{ 0x0000, 0x0000 }, /* R1239 */
+	{ 0x0000, 0x0000 }, /* R1240 */
+	{ 0x0000, 0x0000 }, /* R1241 */
+	{ 0x0000, 0x0000 }, /* R1242 */
+	{ 0x0000, 0x0000 }, /* R1243 */
+	{ 0x0000, 0x0000 }, /* R1244 */
+	{ 0x0000, 0x0000 }, /* R1245 */
+	{ 0x0000, 0x0000 }, /* R1246 */
+	{ 0x0000, 0x0000 }, /* R1247 */
+	{ 0x0000, 0x0000 }, /* R1248 */
+	{ 0x0000, 0x0000 }, /* R1249 */
+	{ 0x0000, 0x0000 }, /* R1250 */
+	{ 0x0000, 0x0000 }, /* R1251 */
+	{ 0x0000, 0x0000 }, /* R1252 */
+	{ 0x0000, 0x0000 }, /* R1253 */
+	{ 0x0000, 0x0000 }, /* R1254 */
+	{ 0x0000, 0x0000 }, /* R1255 */
+	{ 0x0000, 0x0000 }, /* R1256 */
+	{ 0x0000, 0x0000 }, /* R1257 */
+	{ 0x0000, 0x0000 }, /* R1258 */
+	{ 0x0000, 0x0000 }, /* R1259 */
+	{ 0x0000, 0x0000 }, /* R1260 */
+	{ 0x0000, 0x0000 }, /* R1261 */
+	{ 0x0000, 0x0000 }, /* R1262 */
+	{ 0x0000, 0x0000 }, /* R1263 */
+	{ 0x0000, 0x0000 }, /* R1264 */
+	{ 0x0000, 0x0000 }, /* R1265 */
+	{ 0x0000, 0x0000 }, /* R1266 */
+	{ 0x0000, 0x0000 }, /* R1267 */
+	{ 0x0000, 0x0000 }, /* R1268 */
+	{ 0x0000, 0x0000 }, /* R1269 */
+	{ 0x0000, 0x0000 }, /* R1270 */
+	{ 0x0000, 0x0000 }, /* R1271 */
+	{ 0x0000, 0x0000 }, /* R1272 */
+	{ 0x0000, 0x0000 }, /* R1273 */
+	{ 0x0000, 0x0000 }, /* R1274 */
+	{ 0x0000, 0x0000 }, /* R1275 */
+	{ 0x0000, 0x0000 }, /* R1276 */
+	{ 0x0000, 0x0000 }, /* R1277 */
+	{ 0x0000, 0x0000 }, /* R1278 */
+	{ 0x0000, 0x0000 }, /* R1279 */
+	{ 0x00FF, 0x01FF }, /* R1280  - AIF2 ADC Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1281  - AIF2 ADC Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1282  - AIF2 DAC Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1283  - AIF2 DAC Right Volume */
+	{ 0x0000, 0x0000 }, /* R1284 */
+	{ 0x0000, 0x0000 }, /* R1285 */
+	{ 0x0000, 0x0000 }, /* R1286 */
+	{ 0x0000, 0x0000 }, /* R1287 */
+	{ 0x0000, 0x0000 }, /* R1288 */
+	{ 0x0000, 0x0000 }, /* R1289 */
+	{ 0x0000, 0x0000 }, /* R1290 */
+	{ 0x0000, 0x0000 }, /* R1291 */
+	{ 0x0000, 0x0000 }, /* R1292 */
+	{ 0x0000, 0x0000 }, /* R1293 */
+	{ 0x0000, 0x0000 }, /* R1294 */
+	{ 0x0000, 0x0000 }, /* R1295 */
+	{ 0xF800, 0xF800 }, /* R1296  - AIF2 ADC Filters */
+	{ 0x0000, 0x0000 }, /* R1297 */
+	{ 0x0000, 0x0000 }, /* R1298 */
+	{ 0x0000, 0x0000 }, /* R1299 */
+	{ 0x0000, 0x0000 }, /* R1300 */
+	{ 0x0000, 0x0000 }, /* R1301 */
+	{ 0x0000, 0x0000 }, /* R1302 */
+	{ 0x0000, 0x0000 }, /* R1303 */
+	{ 0x0000, 0x0000 }, /* R1304 */
+	{ 0x0000, 0x0000 }, /* R1305 */
+	{ 0x0000, 0x0000 }, /* R1306 */
+	{ 0x0000, 0x0000 }, /* R1307 */
+	{ 0x0000, 0x0000 }, /* R1308 */
+	{ 0x0000, 0x0000 }, /* R1309 */
+	{ 0x0000, 0x0000 }, /* R1310 */
+	{ 0x0000, 0x0000 }, /* R1311 */
+	{ 0x02B6, 0x02B6 }, /* R1312  - AIF2 DAC Filters (1) */
+	{ 0x3F00, 0x3F00 }, /* R1313  - AIF2 DAC Filters (2) */
+	{ 0x0000, 0x0000 }, /* R1314 */
+	{ 0x0000, 0x0000 }, /* R1315 */
+	{ 0x0000, 0x0000 }, /* R1316 */
+	{ 0x0000, 0x0000 }, /* R1317 */
+	{ 0x0000, 0x0000 }, /* R1318 */
+	{ 0x0000, 0x0000 }, /* R1319 */
+	{ 0x0000, 0x0000 }, /* R1320 */
+	{ 0x0000, 0x0000 }, /* R1321 */
+	{ 0x0000, 0x0000 }, /* R1322 */
+	{ 0x0000, 0x0000 }, /* R1323 */
+	{ 0x0000, 0x0000 }, /* R1324 */
+	{ 0x0000, 0x0000 }, /* R1325 */
+	{ 0x0000, 0x0000 }, /* R1326 */
+	{ 0x0000, 0x0000 }, /* R1327 */
+	{ 0x0000, 0x0000 }, /* R1328 */
+	{ 0x0000, 0x0000 }, /* R1329 */
+	{ 0x0000, 0x0000 }, /* R1330 */
+	{ 0x0000, 0x0000 }, /* R1331 */
+	{ 0x0000, 0x0000 }, /* R1332 */
+	{ 0x0000, 0x0000 }, /* R1333 */
+	{ 0x0000, 0x0000 }, /* R1334 */
+	{ 0x0000, 0x0000 }, /* R1335 */
+	{ 0x0000, 0x0000 }, /* R1336 */
+	{ 0x0000, 0x0000 }, /* R1337 */
+	{ 0x0000, 0x0000 }, /* R1338 */
+	{ 0x0000, 0x0000 }, /* R1339 */
+	{ 0x0000, 0x0000 }, /* R1340 */
+	{ 0x0000, 0x0000 }, /* R1341 */
+	{ 0x0000, 0x0000 }, /* R1342 */
+	{ 0x0000, 0x0000 }, /* R1343 */
+	{ 0xFFFF, 0xFFFF }, /* R1344  - AIF2 DRC (1) */
+	{ 0x1FFF, 0x1FFF }, /* R1345  - AIF2 DRC (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1346  - AIF2 DRC (3) */
+	{ 0x07FF, 0x07FF }, /* R1347  - AIF2 DRC (4) */
+	{ 0x03FF, 0x03FF }, /* R1348  - AIF2 DRC (5) */
+	{ 0x0000, 0x0000 }, /* R1349 */
+	{ 0x0000, 0x0000 }, /* R1350 */
+	{ 0x0000, 0x0000 }, /* R1351 */
+	{ 0x0000, 0x0000 }, /* R1352 */
+	{ 0x0000, 0x0000 }, /* R1353 */
+	{ 0x0000, 0x0000 }, /* R1354 */
+	{ 0x0000, 0x0000 }, /* R1355 */
+	{ 0x0000, 0x0000 }, /* R1356 */
+	{ 0x0000, 0x0000 }, /* R1357 */
+	{ 0x0000, 0x0000 }, /* R1358 */
+	{ 0x0000, 0x0000 }, /* R1359 */
+	{ 0x0000, 0x0000 }, /* R1360 */
+	{ 0x0000, 0x0000 }, /* R1361 */
+	{ 0x0000, 0x0000 }, /* R1362 */
+	{ 0x0000, 0x0000 }, /* R1363 */
+	{ 0x0000, 0x0000 }, /* R1364 */
+	{ 0x0000, 0x0000 }, /* R1365 */
+	{ 0x0000, 0x0000 }, /* R1366 */
+	{ 0x0000, 0x0000 }, /* R1367 */
+	{ 0x0000, 0x0000 }, /* R1368 */
+	{ 0x0000, 0x0000 }, /* R1369 */
+	{ 0x0000, 0x0000 }, /* R1370 */
+	{ 0x0000, 0x0000 }, /* R1371 */
+	{ 0x0000, 0x0000 }, /* R1372 */
+	{ 0x0000, 0x0000 }, /* R1373 */
+	{ 0x0000, 0x0000 }, /* R1374 */
+	{ 0x0000, 0x0000 }, /* R1375 */
+	{ 0x0000, 0x0000 }, /* R1376 */
+	{ 0x0000, 0x0000 }, /* R1377 */
+	{ 0x0000, 0x0000 }, /* R1378 */
+	{ 0x0000, 0x0000 }, /* R1379 */
+	{ 0x0000, 0x0000 }, /* R1380 */
+	{ 0x0000, 0x0000 }, /* R1381 */
+	{ 0x0000, 0x0000 }, /* R1382 */
+	{ 0x0000, 0x0000 }, /* R1383 */
+	{ 0x0000, 0x0000 }, /* R1384 */
+	{ 0x0000, 0x0000 }, /* R1385 */
+	{ 0x0000, 0x0000 }, /* R1386 */
+	{ 0x0000, 0x0000 }, /* R1387 */
+	{ 0x0000, 0x0000 }, /* R1388 */
+	{ 0x0000, 0x0000 }, /* R1389 */
+	{ 0x0000, 0x0000 }, /* R1390 */
+	{ 0x0000, 0x0000 }, /* R1391 */
+	{ 0x0000, 0x0000 }, /* R1392 */
+	{ 0x0000, 0x0000 }, /* R1393 */
+	{ 0x0000, 0x0000 }, /* R1394 */
+	{ 0x0000, 0x0000 }, /* R1395 */
+	{ 0x0000, 0x0000 }, /* R1396 */
+	{ 0x0000, 0x0000 }, /* R1397 */
+	{ 0x0000, 0x0000 }, /* R1398 */
+	{ 0x0000, 0x0000 }, /* R1399 */
+	{ 0x0000, 0x0000 }, /* R1400 */
+	{ 0x0000, 0x0000 }, /* R1401 */
+	{ 0x0000, 0x0000 }, /* R1402 */
+	{ 0x0000, 0x0000 }, /* R1403 */
+	{ 0x0000, 0x0000 }, /* R1404 */
+	{ 0x0000, 0x0000 }, /* R1405 */
+	{ 0x0000, 0x0000 }, /* R1406 */
+	{ 0x0000, 0x0000 }, /* R1407 */
+	{ 0xFFFF, 0xFFFF }, /* R1408  - AIF2 EQ Gains (1) */
+	{ 0xFFC0, 0xFFC0 }, /* R1409  - AIF2 EQ Gains (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1410  - AIF2 EQ Band 1 A */
+	{ 0xFFFF, 0xFFFF }, /* R1411  - AIF2 EQ Band 1 B */
+	{ 0xFFFF, 0xFFFF }, /* R1412  - AIF2 EQ Band 1 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1413  - AIF2 EQ Band 2 A */
+	{ 0xFFFF, 0xFFFF }, /* R1414  - AIF2 EQ Band 2 B */
+	{ 0xFFFF, 0xFFFF }, /* R1415  - AIF2 EQ Band 2 C */
+	{ 0xFFFF, 0xFFFF }, /* R1416  - AIF2 EQ Band 2 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1417  - AIF2 EQ Band 3 A */
+	{ 0xFFFF, 0xFFFF }, /* R1418  - AIF2 EQ Band 3 B */
+	{ 0xFFFF, 0xFFFF }, /* R1419  - AIF2 EQ Band 3 C */
+	{ 0xFFFF, 0xFFFF }, /* R1420  - AIF2 EQ Band 3 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1421  - AIF2 EQ Band 4 A */
+	{ 0xFFFF, 0xFFFF }, /* R1422  - AIF2 EQ Band 4 B */
+	{ 0xFFFF, 0xFFFF }, /* R1423  - AIF2 EQ Band 4 C */
+	{ 0xFFFF, 0xFFFF }, /* R1424  - AIF2 EQ Band 4 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1425  - AIF2 EQ Band 5 A */
+	{ 0xFFFF, 0xFFFF }, /* R1426  - AIF2 EQ Band 5 B */
+	{ 0xFFFF, 0xFFFF }, /* R1427  - AIF2 EQ Band 5 PG */
+	{ 0x0000, 0x0000 }, /* R1428 */
+	{ 0x0000, 0x0000 }, /* R1429 */
+	{ 0x0000, 0x0000 }, /* R1430 */
+	{ 0x0000, 0x0000 }, /* R1431 */
+	{ 0x0000, 0x0000 }, /* R1432 */
+	{ 0x0000, 0x0000 }, /* R1433 */
+	{ 0x0000, 0x0000 }, /* R1434 */
+	{ 0x0000, 0x0000 }, /* R1435 */
+	{ 0x0000, 0x0000 }, /* R1436 */
+	{ 0x0000, 0x0000 }, /* R1437 */
+	{ 0x0000, 0x0000 }, /* R1438 */
+	{ 0x0000, 0x0000 }, /* R1439 */
+	{ 0x0000, 0x0000 }, /* R1440 */
+	{ 0x0000, 0x0000 }, /* R1441 */
+	{ 0x0000, 0x0000 }, /* R1442 */
+	{ 0x0000, 0x0000 }, /* R1443 */
+	{ 0x0000, 0x0000 }, /* R1444 */
+	{ 0x0000, 0x0000 }, /* R1445 */
+	{ 0x0000, 0x0000 }, /* R1446 */
+	{ 0x0000, 0x0000 }, /* R1447 */
+	{ 0x0000, 0x0000 }, /* R1448 */
+	{ 0x0000, 0x0000 }, /* R1449 */
+	{ 0x0000, 0x0000 }, /* R1450 */
+	{ 0x0000, 0x0000 }, /* R1451 */
+	{ 0x0000, 0x0000 }, /* R1452 */
+	{ 0x0000, 0x0000 }, /* R1453 */
+	{ 0x0000, 0x0000 }, /* R1454 */
+	{ 0x0000, 0x0000 }, /* R1455 */
+	{ 0x0000, 0x0000 }, /* R1456 */
+	{ 0x0000, 0x0000 }, /* R1457 */
+	{ 0x0000, 0x0000 }, /* R1458 */
+	{ 0x0000, 0x0000 }, /* R1459 */
+	{ 0x0000, 0x0000 }, /* R1460 */
+	{ 0x0000, 0x0000 }, /* R1461 */
+	{ 0x0000, 0x0000 }, /* R1462 */
+	{ 0x0000, 0x0000 }, /* R1463 */
+	{ 0x0000, 0x0000 }, /* R1464 */
+	{ 0x0000, 0x0000 }, /* R1465 */
+	{ 0x0000, 0x0000 }, /* R1466 */
+	{ 0x0000, 0x0000 }, /* R1467 */
+	{ 0x0000, 0x0000 }, /* R1468 */
+	{ 0x0000, 0x0000 }, /* R1469 */
+	{ 0x0000, 0x0000 }, /* R1470 */
+	{ 0x0000, 0x0000 }, /* R1471 */
+	{ 0x0000, 0x0000 }, /* R1472 */
+	{ 0x0000, 0x0000 }, /* R1473 */
+	{ 0x0000, 0x0000 }, /* R1474 */
+	{ 0x0000, 0x0000 }, /* R1475 */
+	{ 0x0000, 0x0000 }, /* R1476 */
+	{ 0x0000, 0x0000 }, /* R1477 */
+	{ 0x0000, 0x0000 }, /* R1478 */
+	{ 0x0000, 0x0000 }, /* R1479 */
+	{ 0x0000, 0x0000 }, /* R1480 */
+	{ 0x0000, 0x0000 }, /* R1481 */
+	{ 0x0000, 0x0000 }, /* R1482 */
+	{ 0x0000, 0x0000 }, /* R1483 */
+	{ 0x0000, 0x0000 }, /* R1484 */
+	{ 0x0000, 0x0000 }, /* R1485 */
+	{ 0x0000, 0x0000 }, /* R1486 */
+	{ 0x0000, 0x0000 }, /* R1487 */
+	{ 0x0000, 0x0000 }, /* R1488 */
+	{ 0x0000, 0x0000 }, /* R1489 */
+	{ 0x0000, 0x0000 }, /* R1490 */
+	{ 0x0000, 0x0000 }, /* R1491 */
+	{ 0x0000, 0x0000 }, /* R1492 */
+	{ 0x0000, 0x0000 }, /* R1493 */
+	{ 0x0000, 0x0000 }, /* R1494 */
+	{ 0x0000, 0x0000 }, /* R1495 */
+	{ 0x0000, 0x0000 }, /* R1496 */
+	{ 0x0000, 0x0000 }, /* R1497 */
+	{ 0x0000, 0x0000 }, /* R1498 */
+	{ 0x0000, 0x0000 }, /* R1499 */
+	{ 0x0000, 0x0000 }, /* R1500 */
+	{ 0x0000, 0x0000 }, /* R1501 */
+	{ 0x0000, 0x0000 }, /* R1502 */
+	{ 0x0000, 0x0000 }, /* R1503 */
+	{ 0x0000, 0x0000 }, /* R1504 */
+	{ 0x0000, 0x0000 }, /* R1505 */
+	{ 0x0000, 0x0000 }, /* R1506 */
+	{ 0x0000, 0x0000 }, /* R1507 */
+	{ 0x0000, 0x0000 }, /* R1508 */
+	{ 0x0000, 0x0000 }, /* R1509 */
+	{ 0x0000, 0x0000 }, /* R1510 */
+	{ 0x0000, 0x0000 }, /* R1511 */
+	{ 0x0000, 0x0000 }, /* R1512 */
+	{ 0x0000, 0x0000 }, /* R1513 */
+	{ 0x0000, 0x0000 }, /* R1514 */
+	{ 0x0000, 0x0000 }, /* R1515 */
+	{ 0x0000, 0x0000 }, /* R1516 */
+	{ 0x0000, 0x0000 }, /* R1517 */
+	{ 0x0000, 0x0000 }, /* R1518 */
+	{ 0x0000, 0x0000 }, /* R1519 */
+	{ 0x0000, 0x0000 }, /* R1520 */
+	{ 0x0000, 0x0000 }, /* R1521 */
+	{ 0x0000, 0x0000 }, /* R1522 */
+	{ 0x0000, 0x0000 }, /* R1523 */
+	{ 0x0000, 0x0000 }, /* R1524 */
+	{ 0x0000, 0x0000 }, /* R1525 */
+	{ 0x0000, 0x0000 }, /* R1526 */
+	{ 0x0000, 0x0000 }, /* R1527 */
+	{ 0x0000, 0x0000 }, /* R1528 */
+	{ 0x0000, 0x0000 }, /* R1529 */
+	{ 0x0000, 0x0000 }, /* R1530 */
+	{ 0x0000, 0x0000 }, /* R1531 */
+	{ 0x0000, 0x0000 }, /* R1532 */
+	{ 0x0000, 0x0000 }, /* R1533 */
+	{ 0x0000, 0x0000 }, /* R1534 */
+	{ 0x0000, 0x0000 }, /* R1535 */
+	{ 0x01EF, 0x01EF }, /* R1536  - DAC1 Mixer Volumes */
+	{ 0x0037, 0x0037 }, /* R1537  - DAC1 Left Mixer Routing */
+	{ 0x0037, 0x0037 }, /* R1538  - DAC1 Right Mixer Routing */
+	{ 0x01EF, 0x01EF }, /* R1539  - DAC2 Mixer Volumes */
+	{ 0x0037, 0x0037 }, /* R1540  - DAC2 Left Mixer Routing */
+	{ 0x0037, 0x0037 }, /* R1541  - DAC2 Right Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1542  - AIF1 ADC1 Left Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1543  - AIF1 ADC1 Right Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1544  - AIF1 ADC2 Left Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1545  - AIF1 ADC2 Right mixer Routing */
+	{ 0x0000, 0x0000 }, /* R1546 */
+	{ 0x0000, 0x0000 }, /* R1547 */
+	{ 0x0000, 0x0000 }, /* R1548 */
+	{ 0x0000, 0x0000 }, /* R1549 */
+	{ 0x0000, 0x0000 }, /* R1550 */
+	{ 0x0000, 0x0000 }, /* R1551 */
+	{ 0x02FF, 0x03FF }, /* R1552  - DAC1 Left Volume */
+	{ 0x02FF, 0x03FF }, /* R1553  - DAC1 Right Volume */
+	{ 0x02FF, 0x03FF }, /* R1554  - DAC2 Left Volume */
+	{ 0x02FF, 0x03FF }, /* R1555  - DAC2 Right Volume */
+	{ 0x0003, 0x0003 }, /* R1556  - DAC Softmute */
+	{ 0x0000, 0x0000 }, /* R1557 */
+	{ 0x0000, 0x0000 }, /* R1558 */
+	{ 0x0000, 0x0000 }, /* R1559 */
+	{ 0x0000, 0x0000 }, /* R1560 */
+	{ 0x0000, 0x0000 }, /* R1561 */
+	{ 0x0000, 0x0000 }, /* R1562 */
+	{ 0x0000, 0x0000 }, /* R1563 */
+	{ 0x0000, 0x0000 }, /* R1564 */
+	{ 0x0000, 0x0000 }, /* R1565 */
+	{ 0x0000, 0x0000 }, /* R1566 */
+	{ 0x0000, 0x0000 }, /* R1567 */
+	{ 0x0003, 0x0003 }, /* R1568  - Oversampling */
+	{ 0x03C3, 0x03C3 }, /* R1569  - Sidetone */
+};
+
+const __devinitdata u16 wm8994_reg_defaults[WM8994_CACHE_SIZE] = {
+	0x8994,     /* R0     - Software Reset */
+	0x0000,     /* R1     - Power Management (1) */
+	0x6000,     /* R2     - Power Management (2) */
+	0x0000,     /* R3     - Power Management (3) */
+	0x0000,     /* R4     - Power Management (4) */
+	0x0000,     /* R5     - Power Management (5) */
+	0x0000,     /* R6     - Power Management (6) */
+	0x0000,     /* R7 */
+	0x0000,     /* R8 */
+	0x0000,     /* R9 */
+	0x0000,     /* R10 */
+	0x0000,     /* R11 */
+	0x0000,     /* R12 */
+	0x0000,     /* R13 */
+	0x0000,     /* R14 */
+	0x0000,     /* R15 */
+	0x0000,     /* R16 */
+	0x0000,     /* R17 */
+	0x0000,     /* R18 */
+	0x0000,     /* R19 */
+	0x0000,     /* R20 */
+	0x0000,     /* R21    - Input Mixer (1) */
+	0x0000,     /* R22 */
+	0x0000,     /* R23 */
+	0x008B,     /* R24    - Left Line Input 1&2 Volume */
+	0x008B,     /* R25    - Left Line Input 3&4 Volume */
+	0x008B,     /* R26    - Right Line Input 1&2 Volume */
+	0x008B,     /* R27    - Right Line Input 3&4 Volume */
+	0x006D,     /* R28    - Left Output Volume */
+	0x006D,     /* R29    - Right Output Volume */
+	0x0066,     /* R30    - Line Outputs Volume */
+	0x0020,     /* R31    - HPOUT2 Volume */
+	0x0079,     /* R32    - Left OPGA Volume */
+	0x0079,     /* R33    - Right OPGA Volume */
+	0x0003,     /* R34    - SPKMIXL Attenuation */
+	0x0003,     /* R35    - SPKMIXR Attenuation */
+	0x0011,     /* R36    - SPKOUT Mixers */
+	0x0140,     /* R37    - ClassD */
+	0x0079,     /* R38    - Speaker Volume Left */
+	0x0079,     /* R39    - Speaker Volume Right */
+	0x0000,     /* R40    - Input Mixer (2) */
+	0x0000,     /* R41    - Input Mixer (3) */
+	0x0000,     /* R42    - Input Mixer (4) */
+	0x0000,     /* R43    - Input Mixer (5) */
+	0x0000,     /* R44    - Input Mixer (6) */
+	0x0000,     /* R45    - Output Mixer (1) */
+	0x0000,     /* R46    - Output Mixer (2) */
+	0x0000,     /* R47    - Output Mixer (3) */
+	0x0000,     /* R48    - Output Mixer (4) */
+	0x0000,     /* R49    - Output Mixer (5) */
+	0x0000,     /* R50    - Output Mixer (6) */
+	0x0000,     /* R51    - HPOUT2 Mixer */
+	0x0000,     /* R52    - Line Mixer (1) */
+	0x0000,     /* R53    - Line Mixer (2) */
+	0x0000,     /* R54    - Speaker Mixer */
+	0x0000,     /* R55    - Additional Control */
+	0x0000,     /* R56    - AntiPOP (1) */
+	0x0000,     /* R57    - AntiPOP (2) */
+	0x0000,     /* R58    - MICBIAS */
+	0x000D,     /* R59    - LDO 1 */
+	0x0003,     /* R60    - LDO 2 */
+	0x0000,     /* R61 */
+	0x0000,     /* R62 */
+	0x0000,     /* R63 */
+	0x0000,     /* R64 */
+	0x0000,     /* R65 */
+	0x0000,     /* R66 */
+	0x0000,     /* R67 */
+	0x0000,     /* R68 */
+	0x0000,     /* R69 */
+	0x0000,     /* R70 */
+	0x0000,     /* R71 */
+	0x0000,     /* R72 */
+	0x0000,     /* R73 */
+	0x0000,     /* R74 */
+	0x0000,     /* R75 */
+	0x1F25,     /* R76    - Charge Pump (1) */
+	0x0000,     /* R77 */
+	0x0000,     /* R78 */
+	0x0000,     /* R79 */
+	0x0000,     /* R80 */
+	0x0004,     /* R81    - Class W (1) */
+	0x0000,     /* R82 */
+	0x0000,     /* R83 */
+	0x0000,     /* R84    - DC Servo (1) */
+	0x054A,     /* R85    - DC Servo (2) */
+	0x0000,     /* R86 */
+	0x0000,     /* R87    - DC Servo (4) */
+	0x0000,     /* R88    - DC Servo Readback */
+	0x0000,     /* R89 */
+	0x0000,     /* R90 */
+	0x0000,     /* R91 */
+	0x0000,     /* R92 */
+	0x0000,     /* R93 */
+	0x0000,     /* R94 */
+	0x0000,     /* R95 */
+	0x0000,     /* R96    - Analogue HP (1) */
+	0x0000,     /* R97 */
+	0x0000,     /* R98 */
+	0x0000,     /* R99 */
+	0x0000,     /* R100 */
+	0x0000,     /* R101 */
+	0x0000,     /* R102 */
+	0x0000,     /* R103 */
+	0x0000,     /* R104 */
+	0x0000,     /* R105 */
+	0x0000,     /* R106 */
+	0x0000,     /* R107 */
+	0x0000,     /* R108 */
+	0x0000,     /* R109 */
+	0x0000,     /* R110 */
+	0x0000,     /* R111 */
+	0x0000,     /* R112 */
+	0x0000,     /* R113 */
+	0x0000,     /* R114 */
+	0x0000,     /* R115 */
+	0x0000,     /* R116 */
+	0x0000,     /* R117 */
+	0x0000,     /* R118 */
+	0x0000,     /* R119 */
+	0x0000,     /* R120 */
+	0x0000,     /* R121 */
+	0x0000,     /* R122 */
+	0x0000,     /* R123 */
+	0x0000,     /* R124 */
+	0x0000,     /* R125 */
+	0x0000,     /* R126 */
+	0x0000,     /* R127 */
+	0x0000,     /* R128 */
+	0x0000,     /* R129 */
+	0x0000,     /* R130 */
+	0x0000,     /* R131 */
+	0x0000,     /* R132 */
+	0x0000,     /* R133 */
+	0x0000,     /* R134 */
+	0x0000,     /* R135 */
+	0x0000,     /* R136 */
+	0x0000,     /* R137 */
+	0x0000,     /* R138 */
+	0x0000,     /* R139 */
+	0x0000,     /* R140 */
+	0x0000,     /* R141 */
+	0x0000,     /* R142 */
+	0x0000,     /* R143 */
+	0x0000,     /* R144 */
+	0x0000,     /* R145 */
+	0x0000,     /* R146 */
+	0x0000,     /* R147 */
+	0x0000,     /* R148 */
+	0x0000,     /* R149 */
+	0x0000,     /* R150 */
+	0x0000,     /* R151 */
+	0x0000,     /* R152 */
+	0x0000,     /* R153 */
+	0x0000,     /* R154 */
+	0x0000,     /* R155 */
+	0x0000,     /* R156 */
+	0x0000,     /* R157 */
+	0x0000,     /* R158 */
+	0x0000,     /* R159 */
+	0x0000,     /* R160 */
+	0x0000,     /* R161 */
+	0x0000,     /* R162 */
+	0x0000,     /* R163 */
+	0x0000,     /* R164 */
+	0x0000,     /* R165 */
+	0x0000,     /* R166 */
+	0x0000,     /* R167 */
+	0x0000,     /* R168 */
+	0x0000,     /* R169 */
+	0x0000,     /* R170 */
+	0x0000,     /* R171 */
+	0x0000,     /* R172 */
+	0x0000,     /* R173 */
+	0x0000,     /* R174 */
+	0x0000,     /* R175 */
+	0x0000,     /* R176 */
+	0x0000,     /* R177 */
+	0x0000,     /* R178 */
+	0x0000,     /* R179 */
+	0x0000,     /* R180 */
+	0x0000,     /* R181 */
+	0x0000,     /* R182 */
+	0x0000,     /* R183 */
+	0x0000,     /* R184 */
+	0x0000,     /* R185 */
+	0x0000,     /* R186 */
+	0x0000,     /* R187 */
+	0x0000,     /* R188 */
+	0x0000,     /* R189 */
+	0x0000,     /* R190 */
+	0x0000,     /* R191 */
+	0x0000,     /* R192 */
+	0x0000,     /* R193 */
+	0x0000,     /* R194 */
+	0x0000,     /* R195 */
+	0x0000,     /* R196 */
+	0x0000,     /* R197 */
+	0x0000,     /* R198 */
+	0x0000,     /* R199 */
+	0x0000,     /* R200 */
+	0x0000,     /* R201 */
+	0x0000,     /* R202 */
+	0x0000,     /* R203 */
+	0x0000,     /* R204 */
+	0x0000,     /* R205 */
+	0x0000,     /* R206 */
+	0x0000,     /* R207 */
+	0x0000,     /* R208 */
+	0x0000,     /* R209 */
+	0x0000,     /* R210 */
+	0x0000,     /* R211 */
+	0x0000,     /* R212 */
+	0x0000,     /* R213 */
+	0x0000,     /* R214 */
+	0x0000,     /* R215 */
+	0x0000,     /* R216 */
+	0x0000,     /* R217 */
+	0x0000,     /* R218 */
+	0x0000,     /* R219 */
+	0x0000,     /* R220 */
+	0x0000,     /* R221 */
+	0x0000,     /* R222 */
+	0x0000,     /* R223 */
+	0x0000,     /* R224 */
+	0x0000,     /* R225 */
+	0x0000,     /* R226 */
+	0x0000,     /* R227 */
+	0x0000,     /* R228 */
+	0x0000,     /* R229 */
+	0x0000,     /* R230 */
+	0x0000,     /* R231 */
+	0x0000,     /* R232 */
+	0x0000,     /* R233 */
+	0x0000,     /* R234 */
+	0x0000,     /* R235 */
+	0x0000,     /* R236 */
+	0x0000,     /* R237 */
+	0x0000,     /* R238 */
+	0x0000,     /* R239 */
+	0x0000,     /* R240 */
+	0x0000,     /* R241 */
+	0x0000,     /* R242 */
+	0x0000,     /* R243 */
+	0x0000,     /* R244 */
+	0x0000,     /* R245 */
+	0x0000,     /* R246 */
+	0x0000,     /* R247 */
+	0x0000,     /* R248 */
+	0x0000,     /* R249 */
+	0x0000,     /* R250 */
+	0x0000,     /* R251 */
+	0x0000,     /* R252 */
+	0x0000,     /* R253 */
+	0x0000,     /* R254 */
+	0x0000,     /* R255 */
+	0x0003,     /* R256   - Chip Revision */
+	0x8004,     /* R257   - Control Interface */
+	0x0000,     /* R258 */
+	0x0000,     /* R259 */
+	0x0000,     /* R260 */
+	0x0000,     /* R261 */
+	0x0000,     /* R262 */
+	0x0000,     /* R263 */
+	0x0000,     /* R264 */
+	0x0000,     /* R265 */
+	0x0000,     /* R266 */
+	0x0000,     /* R267 */
+	0x0000,     /* R268 */
+	0x0000,     /* R269 */
+	0x0000,     /* R270 */
+	0x0000,     /* R271 */
+	0x0000,     /* R272   - Write Sequencer Ctrl (1) */
+	0x0000,     /* R273   - Write Sequencer Ctrl (2) */
+	0x0000,     /* R274 */
+	0x0000,     /* R275 */
+	0x0000,     /* R276 */
+	0x0000,     /* R277 */
+	0x0000,     /* R278 */
+	0x0000,     /* R279 */
+	0x0000,     /* R280 */
+	0x0000,     /* R281 */
+	0x0000,     /* R282 */
+	0x0000,     /* R283 */
+	0x0000,     /* R284 */
+	0x0000,     /* R285 */
+	0x0000,     /* R286 */
+	0x0000,     /* R287 */
+	0x0000,     /* R288 */
+	0x0000,     /* R289 */
+	0x0000,     /* R290 */
+	0x0000,     /* R291 */
+	0x0000,     /* R292 */
+	0x0000,     /* R293 */
+	0x0000,     /* R294 */
+	0x0000,     /* R295 */
+	0x0000,     /* R296 */
+	0x0000,     /* R297 */
+	0x0000,     /* R298 */
+	0x0000,     /* R299 */
+	0x0000,     /* R300 */
+	0x0000,     /* R301 */
+	0x0000,     /* R302 */
+	0x0000,     /* R303 */
+	0x0000,     /* R304 */
+	0x0000,     /* R305 */
+	0x0000,     /* R306 */
+	0x0000,     /* R307 */
+	0x0000,     /* R308 */
+	0x0000,     /* R309 */
+	0x0000,     /* R310 */
+	0x0000,     /* R311 */
+	0x0000,     /* R312 */
+	0x0000,     /* R313 */
+	0x0000,     /* R314 */
+	0x0000,     /* R315 */
+	0x0000,     /* R316 */
+	0x0000,     /* R317 */
+	0x0000,     /* R318 */
+	0x0000,     /* R319 */
+	0x0000,     /* R320 */
+	0x0000,     /* R321 */
+	0x0000,     /* R322 */
+	0x0000,     /* R323 */
+	0x0000,     /* R324 */
+	0x0000,     /* R325 */
+	0x0000,     /* R326 */
+	0x0000,     /* R327 */
+	0x0000,     /* R328 */
+	0x0000,     /* R329 */
+	0x0000,     /* R330 */
+	0x0000,     /* R331 */
+	0x0000,     /* R332 */
+	0x0000,     /* R333 */
+	0x0000,     /* R334 */
+	0x0000,     /* R335 */
+	0x0000,     /* R336 */
+	0x0000,     /* R337 */
+	0x0000,     /* R338 */
+	0x0000,     /* R339 */
+	0x0000,     /* R340 */
+	0x0000,     /* R341 */
+	0x0000,     /* R342 */
+	0x0000,     /* R343 */
+	0x0000,     /* R344 */
+	0x0000,     /* R345 */
+	0x0000,     /* R346 */
+	0x0000,     /* R347 */
+	0x0000,     /* R348 */
+	0x0000,     /* R349 */
+	0x0000,     /* R350 */
+	0x0000,     /* R351 */
+	0x0000,     /* R352 */
+	0x0000,     /* R353 */
+	0x0000,     /* R354 */
+	0x0000,     /* R355 */
+	0x0000,     /* R356 */
+	0x0000,     /* R357 */
+	0x0000,     /* R358 */
+	0x0000,     /* R359 */
+	0x0000,     /* R360 */
+	0x0000,     /* R361 */
+	0x0000,     /* R362 */
+	0x0000,     /* R363 */
+	0x0000,     /* R364 */
+	0x0000,     /* R365 */
+	0x0000,     /* R366 */
+	0x0000,     /* R367 */
+	0x0000,     /* R368 */
+	0x0000,     /* R369 */
+	0x0000,     /* R370 */
+	0x0000,     /* R371 */
+	0x0000,     /* R372 */
+	0x0000,     /* R373 */
+	0x0000,     /* R374 */
+	0x0000,     /* R375 */
+	0x0000,     /* R376 */
+	0x0000,     /* R377 */
+	0x0000,     /* R378 */
+	0x0000,     /* R379 */
+	0x0000,     /* R380 */
+	0x0000,     /* R381 */
+	0x0000,     /* R382 */
+	0x0000,     /* R383 */
+	0x0000,     /* R384 */
+	0x0000,     /* R385 */
+	0x0000,     /* R386 */
+	0x0000,     /* R387 */
+	0x0000,     /* R388 */
+	0x0000,     /* R389 */
+	0x0000,     /* R390 */
+	0x0000,     /* R391 */
+	0x0000,     /* R392 */
+	0x0000,     /* R393 */
+	0x0000,     /* R394 */
+	0x0000,     /* R395 */
+	0x0000,     /* R396 */
+	0x0000,     /* R397 */
+	0x0000,     /* R398 */
+	0x0000,     /* R399 */
+	0x0000,     /* R400 */
+	0x0000,     /* R401 */
+	0x0000,     /* R402 */
+	0x0000,     /* R403 */
+	0x0000,     /* R404 */
+	0x0000,     /* R405 */
+	0x0000,     /* R406 */
+	0x0000,     /* R407 */
+	0x0000,     /* R408 */
+	0x0000,     /* R409 */
+	0x0000,     /* R410 */
+	0x0000,     /* R411 */
+	0x0000,     /* R412 */
+	0x0000,     /* R413 */
+	0x0000,     /* R414 */
+	0x0000,     /* R415 */
+	0x0000,     /* R416 */
+	0x0000,     /* R417 */
+	0x0000,     /* R418 */
+	0x0000,     /* R419 */
+	0x0000,     /* R420 */
+	0x0000,     /* R421 */
+	0x0000,     /* R422 */
+	0x0000,     /* R423 */
+	0x0000,     /* R424 */
+	0x0000,     /* R425 */
+	0x0000,     /* R426 */
+	0x0000,     /* R427 */
+	0x0000,     /* R428 */
+	0x0000,     /* R429 */
+	0x0000,     /* R430 */
+	0x0000,     /* R431 */
+	0x0000,     /* R432 */
+	0x0000,     /* R433 */
+	0x0000,     /* R434 */
+	0x0000,     /* R435 */
+	0x0000,     /* R436 */
+	0x0000,     /* R437 */
+	0x0000,     /* R438 */
+	0x0000,     /* R439 */
+	0x0000,     /* R440 */
+	0x0000,     /* R441 */
+	0x0000,     /* R442 */
+	0x0000,     /* R443 */
+	0x0000,     /* R444 */
+	0x0000,     /* R445 */
+	0x0000,     /* R446 */
+	0x0000,     /* R447 */
+	0x0000,     /* R448 */
+	0x0000,     /* R449 */
+	0x0000,     /* R450 */
+	0x0000,     /* R451 */
+	0x0000,     /* R452 */
+	0x0000,     /* R453 */
+	0x0000,     /* R454 */
+	0x0000,     /* R455 */
+	0x0000,     /* R456 */
+	0x0000,     /* R457 */
+	0x0000,     /* R458 */
+	0x0000,     /* R459 */
+	0x0000,     /* R460 */
+	0x0000,     /* R461 */
+	0x0000,     /* R462 */
+	0x0000,     /* R463 */
+	0x0000,     /* R464 */
+	0x0000,     /* R465 */
+	0x0000,     /* R466 */
+	0x0000,     /* R467 */
+	0x0000,     /* R468 */
+	0x0000,     /* R469 */
+	0x0000,     /* R470 */
+	0x0000,     /* R471 */
+	0x0000,     /* R472 */
+	0x0000,     /* R473 */
+	0x0000,     /* R474 */
+	0x0000,     /* R475 */
+	0x0000,     /* R476 */
+	0x0000,     /* R477 */
+	0x0000,     /* R478 */
+	0x0000,     /* R479 */
+	0x0000,     /* R480 */
+	0x0000,     /* R481 */
+	0x0000,     /* R482 */
+	0x0000,     /* R483 */
+	0x0000,     /* R484 */
+	0x0000,     /* R485 */
+	0x0000,     /* R486 */
+	0x0000,     /* R487 */
+	0x0000,     /* R488 */
+	0x0000,     /* R489 */
+	0x0000,     /* R490 */
+	0x0000,     /* R491 */
+	0x0000,     /* R492 */
+	0x0000,     /* R493 */
+	0x0000,     /* R494 */
+	0x0000,     /* R495 */
+	0x0000,     /* R496 */
+	0x0000,     /* R497 */
+	0x0000,     /* R498 */
+	0x0000,     /* R499 */
+	0x0000,     /* R500 */
+	0x0000,     /* R501 */
+	0x0000,     /* R502 */
+	0x0000,     /* R503 */
+	0x0000,     /* R504 */
+	0x0000,     /* R505 */
+	0x0000,     /* R506 */
+	0x0000,     /* R507 */
+	0x0000,     /* R508 */
+	0x0000,     /* R509 */
+	0x0000,     /* R510 */
+	0x0000,     /* R511 */
+	0x0000,     /* R512   - AIF1 Clocking (1) */
+	0x0000,     /* R513   - AIF1 Clocking (2) */
+	0x0000,     /* R514 */
+	0x0000,     /* R515 */
+	0x0000,     /* R516   - AIF2 Clocking (1) */
+	0x0000,     /* R517   - AIF2 Clocking (2) */
+	0x0000,     /* R518 */
+	0x0000,     /* R519 */
+	0x0000,     /* R520   - Clocking (1) */
+	0x0000,     /* R521   - Clocking (2) */
+	0x0000,     /* R522 */
+	0x0000,     /* R523 */
+	0x0000,     /* R524 */
+	0x0000,     /* R525 */
+	0x0000,     /* R526 */
+	0x0000,     /* R527 */
+	0x0083,     /* R528   - AIF1 Rate */
+	0x0083,     /* R529   - AIF2 Rate */
+	0x0000,     /* R530   - Rate Status */
+	0x0000,     /* R531 */
+	0x0000,     /* R532 */
+	0x0000,     /* R533 */
+	0x0000,     /* R534 */
+	0x0000,     /* R535 */
+	0x0000,     /* R536 */
+	0x0000,     /* R537 */
+	0x0000,     /* R538 */
+	0x0000,     /* R539 */
+	0x0000,     /* R540 */
+	0x0000,     /* R541 */
+	0x0000,     /* R542 */
+	0x0000,     /* R543 */
+	0x0000,     /* R544   - FLL1 Control (1) */
+	0x0000,     /* R545   - FLL1 Control (2) */
+	0x0000,     /* R546   - FLL1 Control (3) */
+	0x0000,     /* R547   - FLL1 Control (4) */
+	0x0C80,     /* R548   - FLL1 Control (5) */
+	0x0000,     /* R549 */
+	0x0000,     /* R550 */
+	0x0000,     /* R551 */
+	0x0000,     /* R552 */
+	0x0000,     /* R553 */
+	0x0000,     /* R554 */
+	0x0000,     /* R555 */
+	0x0000,     /* R556 */
+	0x0000,     /* R557 */
+	0x0000,     /* R558 */
+	0x0000,     /* R559 */
+	0x0000,     /* R560 */
+	0x0000,     /* R561 */
+	0x0000,     /* R562 */
+	0x0000,     /* R563 */
+	0x0000,     /* R564 */
+	0x0000,     /* R565 */
+	0x0000,     /* R566 */
+	0x0000,     /* R567 */
+	0x0000,     /* R568 */
+	0x0000,     /* R569 */
+	0x0000,     /* R570 */
+	0x0000,     /* R571 */
+	0x0000,     /* R572 */
+	0x0000,     /* R573 */
+	0x0000,     /* R574 */
+	0x0000,     /* R575 */
+	0x0000,     /* R576   - FLL2 Control (1) */
+	0x0000,     /* R577   - FLL2 Control (2) */
+	0x0000,     /* R578   - FLL2 Control (3) */
+	0x0000,     /* R579   - FLL2 Control (4) */
+	0x0C80,     /* R580   - FLL2 Control (5) */
+	0x0000,     /* R581 */
+	0x0000,     /* R582 */
+	0x0000,     /* R583 */
+	0x0000,     /* R584 */
+	0x0000,     /* R585 */
+	0x0000,     /* R586 */
+	0x0000,     /* R587 */
+	0x0000,     /* R588 */
+	0x0000,     /* R589 */
+	0x0000,     /* R590 */
+	0x0000,     /* R591 */
+	0x0000,     /* R592 */
+	0x0000,     /* R593 */
+	0x0000,     /* R594 */
+	0x0000,     /* R595 */
+	0x0000,     /* R596 */
+	0x0000,     /* R597 */
+	0x0000,     /* R598 */
+	0x0000,     /* R599 */
+	0x0000,     /* R600 */
+	0x0000,     /* R601 */
+	0x0000,     /* R602 */
+	0x0000,     /* R603 */
+	0x0000,     /* R604 */
+	0x0000,     /* R605 */
+	0x0000,     /* R606 */
+	0x0000,     /* R607 */
+	0x0000,     /* R608 */
+	0x0000,     /* R609 */
+	0x0000,     /* R610 */
+	0x0000,     /* R611 */
+	0x0000,     /* R612 */
+	0x0000,     /* R613 */
+	0x0000,     /* R614 */
+	0x0000,     /* R615 */
+	0x0000,     /* R616 */
+	0x0000,     /* R617 */
+	0x0000,     /* R618 */
+	0x0000,     /* R619 */
+	0x0000,     /* R620 */
+	0x0000,     /* R621 */
+	0x0000,     /* R622 */
+	0x0000,     /* R623 */
+	0x0000,     /* R624 */
+	0x0000,     /* R625 */
+	0x0000,     /* R626 */
+	0x0000,     /* R627 */
+	0x0000,     /* R628 */
+	0x0000,     /* R629 */
+	0x0000,     /* R630 */
+	0x0000,     /* R631 */
+	0x0000,     /* R632 */
+	0x0000,     /* R633 */
+	0x0000,     /* R634 */
+	0x0000,     /* R635 */
+	0x0000,     /* R636 */
+	0x0000,     /* R637 */
+	0x0000,     /* R638 */
+	0x0000,     /* R639 */
+	0x0000,     /* R640 */
+	0x0000,     /* R641 */
+	0x0000,     /* R642 */
+	0x0000,     /* R643 */
+	0x0000,     /* R644 */
+	0x0000,     /* R645 */
+	0x0000,     /* R646 */
+	0x0000,     /* R647 */
+	0x0000,     /* R648 */
+	0x0000,     /* R649 */
+	0x0000,     /* R650 */
+	0x0000,     /* R651 */
+	0x0000,     /* R652 */
+	0x0000,     /* R653 */
+	0x0000,     /* R654 */
+	0x0000,     /* R655 */
+	0x0000,     /* R656 */
+	0x0000,     /* R657 */
+	0x0000,     /* R658 */
+	0x0000,     /* R659 */
+	0x0000,     /* R660 */
+	0x0000,     /* R661 */
+	0x0000,     /* R662 */
+	0x0000,     /* R663 */
+	0x0000,     /* R664 */
+	0x0000,     /* R665 */
+	0x0000,     /* R666 */
+	0x0000,     /* R667 */
+	0x0000,     /* R668 */
+	0x0000,     /* R669 */
+	0x0000,     /* R670 */
+	0x0000,     /* R671 */
+	0x0000,     /* R672 */
+	0x0000,     /* R673 */
+	0x0000,     /* R674 */
+	0x0000,     /* R675 */
+	0x0000,     /* R676 */
+	0x0000,     /* R677 */
+	0x0000,     /* R678 */
+	0x0000,     /* R679 */
+	0x0000,     /* R680 */
+	0x0000,     /* R681 */
+	0x0000,     /* R682 */
+	0x0000,     /* R683 */
+	0x0000,     /* R684 */
+	0x0000,     /* R685 */
+	0x0000,     /* R686 */
+	0x0000,     /* R687 */
+	0x0000,     /* R688 */
+	0x0000,     /* R689 */
+	0x0000,     /* R690 */
+	0x0000,     /* R691 */
+	0x0000,     /* R692 */
+	0x0000,     /* R693 */
+	0x0000,     /* R694 */
+	0x0000,     /* R695 */
+	0x0000,     /* R696 */
+	0x0000,     /* R697 */
+	0x0000,     /* R698 */
+	0x0000,     /* R699 */
+	0x0000,     /* R700 */
+	0x0000,     /* R701 */
+	0x0000,     /* R702 */
+	0x0000,     /* R703 */
+	0x0000,     /* R704 */
+	0x0000,     /* R705 */
+	0x0000,     /* R706 */
+	0x0000,     /* R707 */
+	0x0000,     /* R708 */
+	0x0000,     /* R709 */
+	0x0000,     /* R710 */
+	0x0000,     /* R711 */
+	0x0000,     /* R712 */
+	0x0000,     /* R713 */
+	0x0000,     /* R714 */
+	0x0000,     /* R715 */
+	0x0000,     /* R716 */
+	0x0000,     /* R717 */
+	0x0000,     /* R718 */
+	0x0000,     /* R719 */
+	0x0000,     /* R720 */
+	0x0000,     /* R721 */
+	0x0000,     /* R722 */
+	0x0000,     /* R723 */
+	0x0000,     /* R724 */
+	0x0000,     /* R725 */
+	0x0000,     /* R726 */
+	0x0000,     /* R727 */
+	0x0000,     /* R728 */
+	0x0000,     /* R729 */
+	0x0000,     /* R730 */
+	0x0000,     /* R731 */
+	0x0000,     /* R732 */
+	0x0000,     /* R733 */
+	0x0000,     /* R734 */
+	0x0000,     /* R735 */
+	0x0000,     /* R736 */
+	0x0000,     /* R737 */
+	0x0000,     /* R738 */
+	0x0000,     /* R739 */
+	0x0000,     /* R740 */
+	0x0000,     /* R741 */
+	0x0000,     /* R742 */
+	0x0000,     /* R743 */
+	0x0000,     /* R744 */
+	0x0000,     /* R745 */
+	0x0000,     /* R746 */
+	0x0000,     /* R747 */
+	0x0000,     /* R748 */
+	0x0000,     /* R749 */
+	0x0000,     /* R750 */
+	0x0000,     /* R751 */
+	0x0000,     /* R752 */
+	0x0000,     /* R753 */
+	0x0000,     /* R754 */
+	0x0000,     /* R755 */
+	0x0000,     /* R756 */
+	0x0000,     /* R757 */
+	0x0000,     /* R758 */
+	0x0000,     /* R759 */
+	0x0000,     /* R760 */
+	0x0000,     /* R761 */
+	0x0000,     /* R762 */
+	0x0000,     /* R763 */
+	0x0000,     /* R764 */
+	0x0000,     /* R765 */
+	0x0000,     /* R766 */
+	0x0000,     /* R767 */
+	0x4050,     /* R768   - AIF1 Control (1) */
+	0x4000,     /* R769   - AIF1 Control (2) */
+	0x0000,     /* R770   - AIF1 Master/Slave */
+	0x0040,     /* R771   - AIF1 BCLK */
+	0x0040,     /* R772   - AIF1ADC LRCLK */
+	0x0040,     /* R773   - AIF1DAC LRCLK */
+	0x0004,     /* R774   - AIF1DAC Data */
+	0x0100,     /* R775   - AIF1ADC Data */
+	0x0000,     /* R776 */
+	0x0000,     /* R777 */
+	0x0000,     /* R778 */
+	0x0000,     /* R779 */
+	0x0000,     /* R780 */
+	0x0000,     /* R781 */
+	0x0000,     /* R782 */
+	0x0000,     /* R783 */
+	0x4050,     /* R784   - AIF2 Control (1) */
+	0x4000,     /* R785   - AIF2 Control (2) */
+	0x0000,     /* R786   - AIF2 Master/Slave */
+	0x0040,     /* R787   - AIF2 BCLK */
+	0x0040,     /* R788   - AIF2ADC LRCLK */
+	0x0040,     /* R789   - AIF2DAC LRCLK */
+	0x0000,     /* R790   - AIF2DAC Data */
+	0x0000,     /* R791   - AIF2ADC Data */
+	0x0000,     /* R792 */
+	0x0000,     /* R793 */
+	0x0000,     /* R794 */
+	0x0000,     /* R795 */
+	0x0000,     /* R796 */
+	0x0000,     /* R797 */
+	0x0000,     /* R798 */
+	0x0000,     /* R799 */
+	0x0000,     /* R800 */
+	0x0000,     /* R801 */
+	0x0000,     /* R802 */
+	0x0000,     /* R803 */
+	0x0000,     /* R804 */
+	0x0000,     /* R805 */
+	0x0000,     /* R806 */
+	0x0000,     /* R807 */
+	0x0000,     /* R808 */
+	0x0000,     /* R809 */
+	0x0000,     /* R810 */
+	0x0000,     /* R811 */
+	0x0000,     /* R812 */
+	0x0000,     /* R813 */
+	0x0000,     /* R814 */
+	0x0000,     /* R815 */
+	0x0000,     /* R816 */
+	0x0000,     /* R817 */
+	0x0000,     /* R818 */
+	0x0000,     /* R819 */
+	0x0000,     /* R820 */
+	0x0000,     /* R821 */
+	0x0000,     /* R822 */
+	0x0000,     /* R823 */
+	0x0000,     /* R824 */
+	0x0000,     /* R825 */
+	0x0000,     /* R826 */
+	0x0000,     /* R827 */
+	0x0000,     /* R828 */
+	0x0000,     /* R829 */
+	0x0000,     /* R830 */
+	0x0000,     /* R831 */
+	0x0000,     /* R832 */
+	0x0000,     /* R833 */
+	0x0000,     /* R834 */
+	0x0000,     /* R835 */
+	0x0000,     /* R836 */
+	0x0000,     /* R837 */
+	0x0000,     /* R838 */
+	0x0000,     /* R839 */
+	0x0000,     /* R840 */
+	0x0000,     /* R841 */
+	0x0000,     /* R842 */
+	0x0000,     /* R843 */
+	0x0000,     /* R844 */
+	0x0000,     /* R845 */
+	0x0000,     /* R846 */
+	0x0000,     /* R847 */
+	0x0000,     /* R848 */
+	0x0000,     /* R849 */
+	0x0000,     /* R850 */
+	0x0000,     /* R851 */
+	0x0000,     /* R852 */
+	0x0000,     /* R853 */
+	0x0000,     /* R854 */
+	0x0000,     /* R855 */
+	0x0000,     /* R856 */
+	0x0000,     /* R857 */
+	0x0000,     /* R858 */
+	0x0000,     /* R859 */
+	0x0000,     /* R860 */
+	0x0000,     /* R861 */
+	0x0000,     /* R862 */
+	0x0000,     /* R863 */
+	0x0000,     /* R864 */
+	0x0000,     /* R865 */
+	0x0000,     /* R866 */
+	0x0000,     /* R867 */
+	0x0000,     /* R868 */
+	0x0000,     /* R869 */
+	0x0000,     /* R870 */
+	0x0000,     /* R871 */
+	0x0000,     /* R872 */
+	0x0000,     /* R873 */
+	0x0000,     /* R874 */
+	0x0000,     /* R875 */
+	0x0000,     /* R876 */
+	0x0000,     /* R877 */
+	0x0000,     /* R878 */
+	0x0000,     /* R879 */
+	0x0000,     /* R880 */
+	0x0000,     /* R881 */
+	0x0000,     /* R882 */
+	0x0000,     /* R883 */
+	0x0000,     /* R884 */
+	0x0000,     /* R885 */
+	0x0000,     /* R886 */
+	0x0000,     /* R887 */
+	0x0000,     /* R888 */
+	0x0000,     /* R889 */
+	0x0000,     /* R890 */
+	0x0000,     /* R891 */
+	0x0000,     /* R892 */
+	0x0000,     /* R893 */
+	0x0000,     /* R894 */
+	0x0000,     /* R895 */
+	0x0000,     /* R896 */
+	0x0000,     /* R897 */
+	0x0000,     /* R898 */
+	0x0000,     /* R899 */
+	0x0000,     /* R900 */
+	0x0000,     /* R901 */
+	0x0000,     /* R902 */
+	0x0000,     /* R903 */
+	0x0000,     /* R904 */
+	0x0000,     /* R905 */
+	0x0000,     /* R906 */
+	0x0000,     /* R907 */
+	0x0000,     /* R908 */
+	0x0000,     /* R909 */
+	0x0000,     /* R910 */
+	0x0000,     /* R911 */
+	0x0000,     /* R912 */
+	0x0000,     /* R913 */
+	0x0000,     /* R914 */
+	0x0000,     /* R915 */
+	0x0000,     /* R916 */
+	0x0000,     /* R917 */
+	0x0000,     /* R918 */
+	0x0000,     /* R919 */
+	0x0000,     /* R920 */
+	0x0000,     /* R921 */
+	0x0000,     /* R922 */
+	0x0000,     /* R923 */
+	0x0000,     /* R924 */
+	0x0000,     /* R925 */
+	0x0000,     /* R926 */
+	0x0000,     /* R927 */
+	0x0000,     /* R928 */
+	0x0000,     /* R929 */
+	0x0000,     /* R930 */
+	0x0000,     /* R931 */
+	0x0000,     /* R932 */
+	0x0000,     /* R933 */
+	0x0000,     /* R934 */
+	0x0000,     /* R935 */
+	0x0000,     /* R936 */
+	0x0000,     /* R937 */
+	0x0000,     /* R938 */
+	0x0000,     /* R939 */
+	0x0000,     /* R940 */
+	0x0000,     /* R941 */
+	0x0000,     /* R942 */
+	0x0000,     /* R943 */
+	0x0000,     /* R944 */
+	0x0000,     /* R945 */
+	0x0000,     /* R946 */
+	0x0000,     /* R947 */
+	0x0000,     /* R948 */
+	0x0000,     /* R949 */
+	0x0000,     /* R950 */
+	0x0000,     /* R951 */
+	0x0000,     /* R952 */
+	0x0000,     /* R953 */
+	0x0000,     /* R954 */
+	0x0000,     /* R955 */
+	0x0000,     /* R956 */
+	0x0000,     /* R957 */
+	0x0000,     /* R958 */
+	0x0000,     /* R959 */
+	0x0000,     /* R960 */
+	0x0000,     /* R961 */
+	0x0000,     /* R962 */
+	0x0000,     /* R963 */
+	0x0000,     /* R964 */
+	0x0000,     /* R965 */
+	0x0000,     /* R966 */
+	0x0000,     /* R967 */
+	0x0000,     /* R968 */
+	0x0000,     /* R969 */
+	0x0000,     /* R970 */
+	0x0000,     /* R971 */
+	0x0000,     /* R972 */
+	0x0000,     /* R973 */
+	0x0000,     /* R974 */
+	0x0000,     /* R975 */
+	0x0000,     /* R976 */
+	0x0000,     /* R977 */
+	0x0000,     /* R978 */
+	0x0000,     /* R979 */
+	0x0000,     /* R980 */
+	0x0000,     /* R981 */
+	0x0000,     /* R982 */
+	0x0000,     /* R983 */
+	0x0000,     /* R984 */
+	0x0000,     /* R985 */
+	0x0000,     /* R986 */
+	0x0000,     /* R987 */
+	0x0000,     /* R988 */
+	0x0000,     /* R989 */
+	0x0000,     /* R990 */
+	0x0000,     /* R991 */
+	0x0000,     /* R992 */
+	0x0000,     /* R993 */
+	0x0000,     /* R994 */
+	0x0000,     /* R995 */
+	0x0000,     /* R996 */
+	0x0000,     /* R997 */
+	0x0000,     /* R998 */
+	0x0000,     /* R999 */
+	0x0000,     /* R1000 */
+	0x0000,     /* R1001 */
+	0x0000,     /* R1002 */
+	0x0000,     /* R1003 */
+	0x0000,     /* R1004 */
+	0x0000,     /* R1005 */
+	0x0000,     /* R1006 */
+	0x0000,     /* R1007 */
+	0x0000,     /* R1008 */
+	0x0000,     /* R1009 */
+	0x0000,     /* R1010 */
+	0x0000,     /* R1011 */
+	0x0000,     /* R1012 */
+	0x0000,     /* R1013 */
+	0x0000,     /* R1014 */
+	0x0000,     /* R1015 */
+	0x0000,     /* R1016 */
+	0x0000,     /* R1017 */
+	0x0000,     /* R1018 */
+	0x0000,     /* R1019 */
+	0x0000,     /* R1020 */
+	0x0000,     /* R1021 */
+	0x0000,     /* R1022 */
+	0x0000,     /* R1023 */
+	0x00C0,     /* R1024  - AIF1 ADC1 Left Volume */
+	0x00C0,     /* R1025  - AIF1 ADC1 Right Volume */
+	0x00C0,     /* R1026  - AIF1 DAC1 Left Volume */
+	0x00C0,     /* R1027  - AIF1 DAC1 Right Volume */
+	0x00C0,     /* R1028  - AIF1 ADC2 Left Volume */
+	0x00C0,     /* R1029  - AIF1 ADC2 Right Volume */
+	0x00C0,     /* R1030  - AIF1 DAC2 Left Volume */
+	0x00C0,     /* R1031  - AIF1 DAC2 Right Volume */
+	0x0000,     /* R1032 */
+	0x0000,     /* R1033 */
+	0x0000,     /* R1034 */
+	0x0000,     /* R1035 */
+	0x0000,     /* R1036 */
+	0x0000,     /* R1037 */
+	0x0000,     /* R1038 */
+	0x0000,     /* R1039 */
+	0x0000,     /* R1040  - AIF1 ADC1 Filters */
+	0x0000,     /* R1041  - AIF1 ADC2 Filters */
+	0x0000,     /* R1042 */
+	0x0000,     /* R1043 */
+	0x0000,     /* R1044 */
+	0x0000,     /* R1045 */
+	0x0000,     /* R1046 */
+	0x0000,     /* R1047 */
+	0x0000,     /* R1048 */
+	0x0000,     /* R1049 */
+	0x0000,     /* R1050 */
+	0x0000,     /* R1051 */
+	0x0000,     /* R1052 */
+	0x0000,     /* R1053 */
+	0x0000,     /* R1054 */
+	0x0000,     /* R1055 */
+	0x0200,     /* R1056  - AIF1 DAC1 Filters (1) */
+	0x0010,     /* R1057  - AIF1 DAC1 Filters (2) */
+	0x0200,     /* R1058  - AIF1 DAC2 Filters (1) */
+	0x0010,     /* R1059  - AIF1 DAC2 Filters (2) */
+	0x0000,     /* R1060 */
+	0x0000,     /* R1061 */
+	0x0000,     /* R1062 */
+	0x0000,     /* R1063 */
+	0x0000,     /* R1064 */
+	0x0000,     /* R1065 */
+	0x0000,     /* R1066 */
+	0x0000,     /* R1067 */
+	0x0000,     /* R1068 */
+	0x0000,     /* R1069 */
+	0x0000,     /* R1070 */
+	0x0000,     /* R1071 */
+	0x0000,     /* R1072 */
+	0x0000,     /* R1073 */
+	0x0000,     /* R1074 */
+	0x0000,     /* R1075 */
+	0x0000,     /* R1076 */
+	0x0000,     /* R1077 */
+	0x0000,     /* R1078 */
+	0x0000,     /* R1079 */
+	0x0000,     /* R1080 */
+	0x0000,     /* R1081 */
+	0x0000,     /* R1082 */
+	0x0000,     /* R1083 */
+	0x0000,     /* R1084 */
+	0x0000,     /* R1085 */
+	0x0000,     /* R1086 */
+	0x0000,     /* R1087 */
+	0x0098,     /* R1088  - AIF1 DRC1 (1) */
+	0x0845,     /* R1089  - AIF1 DRC1 (2) */
+	0x0000,     /* R1090  - AIF1 DRC1 (3) */
+	0x0000,     /* R1091  - AIF1 DRC1 (4) */
+	0x0000,     /* R1092  - AIF1 DRC1 (5) */
+	0x0000,     /* R1093 */
+	0x0000,     /* R1094 */
+	0x0000,     /* R1095 */
+	0x0000,     /* R1096 */
+	0x0000,     /* R1097 */
+	0x0000,     /* R1098 */
+	0x0000,     /* R1099 */
+	0x0000,     /* R1100 */
+	0x0000,     /* R1101 */
+	0x0000,     /* R1102 */
+	0x0000,     /* R1103 */
+	0x0098,     /* R1104  - AIF1 DRC2 (1) */
+	0x0845,     /* R1105  - AIF1 DRC2 (2) */
+	0x0000,     /* R1106  - AIF1 DRC2 (3) */
+	0x0000,     /* R1107  - AIF1 DRC2 (4) */
+	0x0000,     /* R1108  - AIF1 DRC2 (5) */
+	0x0000,     /* R1109 */
+	0x0000,     /* R1110 */
+	0x0000,     /* R1111 */
+	0x0000,     /* R1112 */
+	0x0000,     /* R1113 */
+	0x0000,     /* R1114 */
+	0x0000,     /* R1115 */
+	0x0000,     /* R1116 */
+	0x0000,     /* R1117 */
+	0x0000,     /* R1118 */
+	0x0000,     /* R1119 */
+	0x0000,     /* R1120 */
+	0x0000,     /* R1121 */
+	0x0000,     /* R1122 */
+	0x0000,     /* R1123 */
+	0x0000,     /* R1124 */
+	0x0000,     /* R1125 */
+	0x0000,     /* R1126 */
+	0x0000,     /* R1127 */
+	0x0000,     /* R1128 */
+	0x0000,     /* R1129 */
+	0x0000,     /* R1130 */
+	0x0000,     /* R1131 */
+	0x0000,     /* R1132 */
+	0x0000,     /* R1133 */
+	0x0000,     /* R1134 */
+	0x0000,     /* R1135 */
+	0x0000,     /* R1136 */
+	0x0000,     /* R1137 */
+	0x0000,     /* R1138 */
+	0x0000,     /* R1139 */
+	0x0000,     /* R1140 */
+	0x0000,     /* R1141 */
+	0x0000,     /* R1142 */
+	0x0000,     /* R1143 */
+	0x0000,     /* R1144 */
+	0x0000,     /* R1145 */
+	0x0000,     /* R1146 */
+	0x0000,     /* R1147 */
+	0x0000,     /* R1148 */
+	0x0000,     /* R1149 */
+	0x0000,     /* R1150 */
+	0x0000,     /* R1151 */
+	0x6318,     /* R1152  - AIF1 DAC1 EQ Gains (1) */
+	0x6300,     /* R1153  - AIF1 DAC1 EQ Gains (2) */
+	0x0FCA,     /* R1154  - AIF1 DAC1 EQ Band 1 A */
+	0x0400,     /* R1155  - AIF1 DAC1 EQ Band 1 B */
+	0x00D8,     /* R1156  - AIF1 DAC1 EQ Band 1 PG */
+	0x1EB5,     /* R1157  - AIF1 DAC1 EQ Band 2 A */
+	0xF145,     /* R1158  - AIF1 DAC1 EQ Band 2 B */
+	0x0B75,     /* R1159  - AIF1 DAC1 EQ Band 2 C */
+	0x01C5,     /* R1160  - AIF1 DAC1 EQ Band 2 PG */
+	0x1C58,     /* R1161  - AIF1 DAC1 EQ Band 3 A */
+	0xF373,     /* R1162  - AIF1 DAC1 EQ Band 3 B */
+	0x0A54,     /* R1163  - AIF1 DAC1 EQ Band 3 C */
+	0x0558,     /* R1164  - AIF1 DAC1 EQ Band 3 PG */
+	0x168E,     /* R1165  - AIF1 DAC1 EQ Band 4 A */
+	0xF829,     /* R1166  - AIF1 DAC1 EQ Band 4 B */
+	0x07AD,     /* R1167  - AIF1 DAC1 EQ Band 4 C */
+	0x1103,     /* R1168  - AIF1 DAC1 EQ Band 4 PG */
+	0x0564,     /* R1169  - AIF1 DAC1 EQ Band 5 A */
+	0x0559,     /* R1170  - AIF1 DAC1 EQ Band 5 B */
+	0x4000,     /* R1171  - AIF1 DAC1 EQ Band 5 PG */
+	0x0000,     /* R1172 */
+	0x0000,     /* R1173 */
+	0x0000,     /* R1174 */
+	0x0000,     /* R1175 */
+	0x0000,     /* R1176 */
+	0x0000,     /* R1177 */
+	0x0000,     /* R1178 */
+	0x0000,     /* R1179 */
+	0x0000,     /* R1180 */
+	0x0000,     /* R1181 */
+	0x0000,     /* R1182 */
+	0x0000,     /* R1183 */
+	0x6318,     /* R1184  - AIF1 DAC2 EQ Gains (1) */
+	0x6300,     /* R1185  - AIF1 DAC2 EQ Gains (2) */
+	0x0FCA,     /* R1186  - AIF1 DAC2 EQ Band 1 A */
+	0x0400,     /* R1187  - AIF1 DAC2 EQ Band 1 B */
+	0x00D8,     /* R1188  - AIF1 DAC2 EQ Band 1 PG */
+	0x1EB5,     /* R1189  - AIF1 DAC2 EQ Band 2 A */
+	0xF145,     /* R1190  - AIF1 DAC2 EQ Band 2 B */
+	0x0B75,     /* R1191  - AIF1 DAC2 EQ Band 2 C */
+	0x01C5,     /* R1192  - AIF1 DAC2 EQ Band 2 PG */
+	0x1C58,     /* R1193  - AIF1 DAC2 EQ Band 3 A */
+	0xF373,     /* R1194  - AIF1 DAC2 EQ Band 3 B */
+	0x0A54,     /* R1195  - AIF1 DAC2 EQ Band 3 C */
+	0x0558,     /* R1196  - AIF1 DAC2 EQ Band 3 PG */
+	0x168E,     /* R1197  - AIF1 DAC2 EQ Band 4 A */
+	0xF829,     /* R1198  - AIF1 DAC2 EQ Band 4 B */
+	0x07AD,     /* R1199  - AIF1 DAC2 EQ Band 4 C */
+	0x1103,     /* R1200  - AIF1 DAC2 EQ Band 4 PG */
+	0x0564,     /* R1201  - AIF1 DAC2 EQ Band 5 A */
+	0x0559,     /* R1202  - AIF1 DAC2 EQ Band 5 B */
+	0x4000,     /* R1203  - AIF1 DAC2 EQ Band 5 PG */
+	0x0000,     /* R1204 */
+	0x0000,     /* R1205 */
+	0x0000,     /* R1206 */
+	0x0000,     /* R1207 */
+	0x0000,     /* R1208 */
+	0x0000,     /* R1209 */
+	0x0000,     /* R1210 */
+	0x0000,     /* R1211 */
+	0x0000,     /* R1212 */
+	0x0000,     /* R1213 */
+	0x0000,     /* R1214 */
+	0x0000,     /* R1215 */
+	0x0000,     /* R1216 */
+	0x0000,     /* R1217 */
+	0x0000,     /* R1218 */
+	0x0000,     /* R1219 */
+	0x0000,     /* R1220 */
+	0x0000,     /* R1221 */
+	0x0000,     /* R1222 */
+	0x0000,     /* R1223 */
+	0x0000,     /* R1224 */
+	0x0000,     /* R1225 */
+	0x0000,     /* R1226 */
+	0x0000,     /* R1227 */
+	0x0000,     /* R1228 */
+	0x0000,     /* R1229 */
+	0x0000,     /* R1230 */
+	0x0000,     /* R1231 */
+	0x0000,     /* R1232 */
+	0x0000,     /* R1233 */
+	0x0000,     /* R1234 */
+	0x0000,     /* R1235 */
+	0x0000,     /* R1236 */
+	0x0000,     /* R1237 */
+	0x0000,     /* R1238 */
+	0x0000,     /* R1239 */
+	0x0000,     /* R1240 */
+	0x0000,     /* R1241 */
+	0x0000,     /* R1242 */
+	0x0000,     /* R1243 */
+	0x0000,     /* R1244 */
+	0x0000,     /* R1245 */
+	0x0000,     /* R1246 */
+	0x0000,     /* R1247 */
+	0x0000,     /* R1248 */
+	0x0000,     /* R1249 */
+	0x0000,     /* R1250 */
+	0x0000,     /* R1251 */
+	0x0000,     /* R1252 */
+	0x0000,     /* R1253 */
+	0x0000,     /* R1254 */
+	0x0000,     /* R1255 */
+	0x0000,     /* R1256 */
+	0x0000,     /* R1257 */
+	0x0000,     /* R1258 */
+	0x0000,     /* R1259 */
+	0x0000,     /* R1260 */
+	0x0000,     /* R1261 */
+	0x0000,     /* R1262 */
+	0x0000,     /* R1263 */
+	0x0000,     /* R1264 */
+	0x0000,     /* R1265 */
+	0x0000,     /* R1266 */
+	0x0000,     /* R1267 */
+	0x0000,     /* R1268 */
+	0x0000,     /* R1269 */
+	0x0000,     /* R1270 */
+	0x0000,     /* R1271 */
+	0x0000,     /* R1272 */
+	0x0000,     /* R1273 */
+	0x0000,     /* R1274 */
+	0x0000,     /* R1275 */
+	0x0000,     /* R1276 */
+	0x0000,     /* R1277 */
+	0x0000,     /* R1278 */
+	0x0000,     /* R1279 */
+	0x00C0,     /* R1280  - AIF2 ADC Left Volume */
+	0x00C0,     /* R1281  - AIF2 ADC Right Volume */
+	0x00C0,     /* R1282  - AIF2 DAC Left Volume */
+	0x00C0,     /* R1283  - AIF2 DAC Right Volume */
+	0x0000,     /* R1284 */
+	0x0000,     /* R1285 */
+	0x0000,     /* R1286 */
+	0x0000,     /* R1287 */
+	0x0000,     /* R1288 */
+	0x0000,     /* R1289 */
+	0x0000,     /* R1290 */
+	0x0000,     /* R1291 */
+	0x0000,     /* R1292 */
+	0x0000,     /* R1293 */
+	0x0000,     /* R1294 */
+	0x0000,     /* R1295 */
+	0x0000,     /* R1296  - AIF2 ADC Filters */
+	0x0000,     /* R1297 */
+	0x0000,     /* R1298 */
+	0x0000,     /* R1299 */
+	0x0000,     /* R1300 */
+	0x0000,     /* R1301 */
+	0x0000,     /* R1302 */
+	0x0000,     /* R1303 */
+	0x0000,     /* R1304 */
+	0x0000,     /* R1305 */
+	0x0000,     /* R1306 */
+	0x0000,     /* R1307 */
+	0x0000,     /* R1308 */
+	0x0000,     /* R1309 */
+	0x0000,     /* R1310 */
+	0x0000,     /* R1311 */
+	0x0200,     /* R1312  - AIF2 DAC Filters (1) */
+	0x0010,     /* R1313  - AIF2 DAC Filters (2) */
+	0x0000,     /* R1314 */
+	0x0000,     /* R1315 */
+	0x0000,     /* R1316 */
+	0x0000,     /* R1317 */
+	0x0000,     /* R1318 */
+	0x0000,     /* R1319 */
+	0x0000,     /* R1320 */
+	0x0000,     /* R1321 */
+	0x0000,     /* R1322 */
+	0x0000,     /* R1323 */
+	0x0000,     /* R1324 */
+	0x0000,     /* R1325 */
+	0x0000,     /* R1326 */
+	0x0000,     /* R1327 */
+	0x0000,     /* R1328 */
+	0x0000,     /* R1329 */
+	0x0000,     /* R1330 */
+	0x0000,     /* R1331 */
+	0x0000,     /* R1332 */
+	0x0000,     /* R1333 */
+	0x0000,     /* R1334 */
+	0x0000,     /* R1335 */
+	0x0000,     /* R1336 */
+	0x0000,     /* R1337 */
+	0x0000,     /* R1338 */
+	0x0000,     /* R1339 */
+	0x0000,     /* R1340 */
+	0x0000,     /* R1341 */
+	0x0000,     /* R1342 */
+	0x0000,     /* R1343 */
+	0x0098,     /* R1344  - AIF2 DRC (1) */
+	0x0845,     /* R1345  - AIF2 DRC (2) */
+	0x0000,     /* R1346  - AIF2 DRC (3) */
+	0x0000,     /* R1347  - AIF2 DRC (4) */
+	0x0000,     /* R1348  - AIF2 DRC (5) */
+	0x0000,     /* R1349 */
+	0x0000,     /* R1350 */
+	0x0000,     /* R1351 */
+	0x0000,     /* R1352 */
+	0x0000,     /* R1353 */
+	0x0000,     /* R1354 */
+	0x0000,     /* R1355 */
+	0x0000,     /* R1356 */
+	0x0000,     /* R1357 */
+	0x0000,     /* R1358 */
+	0x0000,     /* R1359 */
+	0x0000,     /* R1360 */
+	0x0000,     /* R1361 */
+	0x0000,     /* R1362 */
+	0x0000,     /* R1363 */
+	0x0000,     /* R1364 */
+	0x0000,     /* R1365 */
+	0x0000,     /* R1366 */
+	0x0000,     /* R1367 */
+	0x0000,     /* R1368 */
+	0x0000,     /* R1369 */
+	0x0000,     /* R1370 */
+	0x0000,     /* R1371 */
+	0x0000,     /* R1372 */
+	0x0000,     /* R1373 */
+	0x0000,     /* R1374 */
+	0x0000,     /* R1375 */
+	0x0000,     /* R1376 */
+	0x0000,     /* R1377 */
+	0x0000,     /* R1378 */
+	0x0000,     /* R1379 */
+	0x0000,     /* R1380 */
+	0x0000,     /* R1381 */
+	0x0000,     /* R1382 */
+	0x0000,     /* R1383 */
+	0x0000,     /* R1384 */
+	0x0000,     /* R1385 */
+	0x0000,     /* R1386 */
+	0x0000,     /* R1387 */
+	0x0000,     /* R1388 */
+	0x0000,     /* R1389 */
+	0x0000,     /* R1390 */
+	0x0000,     /* R1391 */
+	0x0000,     /* R1392 */
+	0x0000,     /* R1393 */
+	0x0000,     /* R1394 */
+	0x0000,     /* R1395 */
+	0x0000,     /* R1396 */
+	0x0000,     /* R1397 */
+	0x0000,     /* R1398 */
+	0x0000,     /* R1399 */
+	0x0000,     /* R1400 */
+	0x0000,     /* R1401 */
+	0x0000,     /* R1402 */
+	0x0000,     /* R1403 */
+	0x0000,     /* R1404 */
+	0x0000,     /* R1405 */
+	0x0000,     /* R1406 */
+	0x0000,     /* R1407 */
+	0x6318,     /* R1408  - AIF2 EQ Gains (1) */
+	0x6300,     /* R1409  - AIF2 EQ Gains (2) */
+	0x0FCA,     /* R1410  - AIF2 EQ Band 1 A */
+	0x0400,     /* R1411  - AIF2 EQ Band 1 B */
+	0x00D8,     /* R1412  - AIF2 EQ Band 1 PG */
+	0x1EB5,     /* R1413  - AIF2 EQ Band 2 A */
+	0xF145,     /* R1414  - AIF2 EQ Band 2 B */
+	0x0B75,     /* R1415  - AIF2 EQ Band 2 C */
+	0x01C5,     /* R1416  - AIF2 EQ Band 2 PG */
+	0x1C58,     /* R1417  - AIF2 EQ Band 3 A */
+	0xF373,     /* R1418  - AIF2 EQ Band 3 B */
+	0x0A54,     /* R1419  - AIF2 EQ Band 3 C */
+	0x0558,     /* R1420  - AIF2 EQ Band 3 PG */
+	0x168E,     /* R1421  - AIF2 EQ Band 4 A */
+	0xF829,     /* R1422  - AIF2 EQ Band 4 B */
+	0x07AD,     /* R1423  - AIF2 EQ Band 4 C */
+	0x1103,     /* R1424  - AIF2 EQ Band 4 PG */
+	0x0564,     /* R1425  - AIF2 EQ Band 5 A */
+	0x0559,     /* R1426  - AIF2 EQ Band 5 B */
+	0x4000,     /* R1427  - AIF2 EQ Band 5 PG */
+	0x0000,     /* R1428 */
+	0x0000,     /* R1429 */
+	0x0000,     /* R1430 */
+	0x0000,     /* R1431 */
+	0x0000,     /* R1432 */
+	0x0000,     /* R1433 */
+	0x0000,     /* R1434 */
+	0x0000,     /* R1435 */
+	0x0000,     /* R1436 */
+	0x0000,     /* R1437 */
+	0x0000,     /* R1438 */
+	0x0000,     /* R1439 */
+	0x0000,     /* R1440 */
+	0x0000,     /* R1441 */
+	0x0000,     /* R1442 */
+	0x0000,     /* R1443 */
+	0x0000,     /* R1444 */
+	0x0000,     /* R1445 */
+	0x0000,     /* R1446 */
+	0x0000,     /* R1447 */
+	0x0000,     /* R1448 */
+	0x0000,     /* R1449 */
+	0x0000,     /* R1450 */
+	0x0000,     /* R1451 */
+	0x0000,     /* R1452 */
+	0x0000,     /* R1453 */
+	0x0000,     /* R1454 */
+	0x0000,     /* R1455 */
+	0x0000,     /* R1456 */
+	0x0000,     /* R1457 */
+	0x0000,     /* R1458 */
+	0x0000,     /* R1459 */
+	0x0000,     /* R1460 */
+	0x0000,     /* R1461 */
+	0x0000,     /* R1462 */
+	0x0000,     /* R1463 */
+	0x0000,     /* R1464 */
+	0x0000,     /* R1465 */
+	0x0000,     /* R1466 */
+	0x0000,     /* R1467 */
+	0x0000,     /* R1468 */
+	0x0000,     /* R1469 */
+	0x0000,     /* R1470 */
+	0x0000,     /* R1471 */
+	0x0000,     /* R1472 */
+	0x0000,     /* R1473 */
+	0x0000,     /* R1474 */
+	0x0000,     /* R1475 */
+	0x0000,     /* R1476 */
+	0x0000,     /* R1477 */
+	0x0000,     /* R1478 */
+	0x0000,     /* R1479 */
+	0x0000,     /* R1480 */
+	0x0000,     /* R1481 */
+	0x0000,     /* R1482 */
+	0x0000,     /* R1483 */
+	0x0000,     /* R1484 */
+	0x0000,     /* R1485 */
+	0x0000,     /* R1486 */
+	0x0000,     /* R1487 */
+	0x0000,     /* R1488 */
+	0x0000,     /* R1489 */
+	0x0000,     /* R1490 */
+	0x0000,     /* R1491 */
+	0x0000,     /* R1492 */
+	0x0000,     /* R1493 */
+	0x0000,     /* R1494 */
+	0x0000,     /* R1495 */
+	0x0000,     /* R1496 */
+	0x0000,     /* R1497 */
+	0x0000,     /* R1498 */
+	0x0000,     /* R1499 */
+	0x0000,     /* R1500 */
+	0x0000,     /* R1501 */
+	0x0000,     /* R1502 */
+	0x0000,     /* R1503 */
+	0x0000,     /* R1504 */
+	0x0000,     /* R1505 */
+	0x0000,     /* R1506 */
+	0x0000,     /* R1507 */
+	0x0000,     /* R1508 */
+	0x0000,     /* R1509 */
+	0x0000,     /* R1510 */
+	0x0000,     /* R1511 */
+	0x0000,     /* R1512 */
+	0x0000,     /* R1513 */
+	0x0000,     /* R1514 */
+	0x0000,     /* R1515 */
+	0x0000,     /* R1516 */
+	0x0000,     /* R1517 */
+	0x0000,     /* R1518 */
+	0x0000,     /* R1519 */
+	0x0000,     /* R1520 */
+	0x0000,     /* R1521 */
+	0x0000,     /* R1522 */
+	0x0000,     /* R1523 */
+	0x0000,     /* R1524 */
+	0x0000,     /* R1525 */
+	0x0000,     /* R1526 */
+	0x0000,     /* R1527 */
+	0x0000,     /* R1528 */
+	0x0000,     /* R1529 */
+	0x0000,     /* R1530 */
+	0x0000,     /* R1531 */
+	0x0000,     /* R1532 */
+	0x0000,     /* R1533 */
+	0x0000,     /* R1534 */
+	0x0000,     /* R1535 */
+	0x0000,     /* R1536  - DAC1 Mixer Volumes */
+	0x0000,     /* R1537  - DAC1 Left Mixer Routing */
+	0x0000,     /* R1538  - DAC1 Right Mixer Routing */
+	0x0000,     /* R1539  - DAC2 Mixer Volumes */
+	0x0000,     /* R1540  - DAC2 Left Mixer Routing */
+	0x0000,     /* R1541  - DAC2 Right Mixer Routing */
+	0x0000,     /* R1542  - AIF1 ADC1 Left Mixer Routing */
+	0x0000,     /* R1543  - AIF1 ADC1 Right Mixer Routing */
+	0x0000,     /* R1544  - AIF1 ADC2 Left Mixer Routing */
+	0x0000,     /* R1545  - AIF1 ADC2 Right mixer Routing */
+	0x0000,     /* R1546 */
+	0x0000,     /* R1547 */
+	0x0000,     /* R1548 */
+	0x0000,     /* R1549 */
+	0x0000,     /* R1550 */
+	0x0000,     /* R1551 */
+	0x02C0,     /* R1552  - DAC1 Left Volume */
+	0x02C0,     /* R1553  - DAC1 Right Volume */
+	0x02C0,     /* R1554  - DAC2 Left Volume */
+	0x02C0,     /* R1555  - DAC2 Right Volume */
+	0x0000,     /* R1556  - DAC Softmute */
+	0x0000,     /* R1557 */
+	0x0000,     /* R1558 */
+	0x0000,     /* R1559 */
+	0x0000,     /* R1560 */
+	0x0000,     /* R1561 */
+	0x0000,     /* R1562 */
+	0x0000,     /* R1563 */
+	0x0000,     /* R1564 */
+	0x0000,     /* R1565 */
+	0x0000,     /* R1566 */
+	0x0000,     /* R1567 */
+	0x0002,     /* R1568  - Oversampling */
+	0x0000,     /* R1569  - Sidetone */
+};
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 4d3e6f1..c6c958e 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -18,15 +18,17 @@
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <sound/core.h>
+#include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
+#include <trace/events/asoc.h>
 
 #include <linux/mfd/wm8994/core.h>
 #include <linux/mfd/wm8994/registers.h>
@@ -57,8 +59,6 @@
 	WM8994_AIF2_EQ_GAINS_1,
 };
 
-#define WM8994_REG_CACHE_SIZE  0x621
-
 struct wm8994_micdet {
 	struct snd_soc_jack *jack;
 	int det;
@@ -71,7 +71,6 @@
 	enum snd_soc_control_type control_type;
 	void *control_data;
 	struct snd_soc_codec *codec;
-	u16 reg_cache[WM8994_REG_CACHE_SIZE + 1];
 	int sysclk[2];
 	int sysclk_rate[2];
 	int mclk[2];
@@ -81,6 +80,8 @@
 	int dac_rates[2];
 	int lrclk_shared[2];
 
+	int mbc_ena[3];
+
 	/* Platform dependant DRC configuration */
 	const char **drc_texts;
 	int drc_cfg[WM8994_NUM_DRC];
@@ -92,1586 +93,26 @@
 	int retune_mobile_cfg[WM8994_NUM_EQ];
 	struct soc_enum retune_mobile_enum;
 
+	/* Platform dependant MBC configuration */
+	int mbc_cfg;
+	const char **mbc_texts;
+	struct soc_enum mbc_enum;
+
 	struct wm8994_micdet micdet[2];
 
+	wm8958_micdet_cb jack_cb;
+	void *jack_cb_data;
+	bool jack_is_mic;
+	bool jack_is_video;
+
 	int revision;
 	struct wm8994_pdata *pdata;
-};
 
-static const struct {
-	unsigned short readable;   /* Mask of readable bits */
-	unsigned short writable;   /* Mask of writable bits */
-} access_masks[] = {
-	{ 0xFFFF, 0xFFFF }, /* R0     - Software Reset */
-	{ 0x3B37, 0x3B37 }, /* R1     - Power Management (1) */
-	{ 0x6BF0, 0x6BF0 }, /* R2     - Power Management (2) */
-	{ 0x3FF0, 0x3FF0 }, /* R3     - Power Management (3) */
-	{ 0x3F3F, 0x3F3F }, /* R4     - Power Management (4) */
-	{ 0x3F0F, 0x3F0F }, /* R5     - Power Management (5) */
-	{ 0x003F, 0x003F }, /* R6     - Power Management (6) */
-	{ 0x0000, 0x0000 }, /* R7 */
-	{ 0x0000, 0x0000 }, /* R8 */
-	{ 0x0000, 0x0000 }, /* R9 */
-	{ 0x0000, 0x0000 }, /* R10 */
-	{ 0x0000, 0x0000 }, /* R11 */
-	{ 0x0000, 0x0000 }, /* R12 */
-	{ 0x0000, 0x0000 }, /* R13 */
-	{ 0x0000, 0x0000 }, /* R14 */
-	{ 0x0000, 0x0000 }, /* R15 */
-	{ 0x0000, 0x0000 }, /* R16 */
-	{ 0x0000, 0x0000 }, /* R17 */
-	{ 0x0000, 0x0000 }, /* R18 */
-	{ 0x0000, 0x0000 }, /* R19 */
-	{ 0x0000, 0x0000 }, /* R20 */
-	{ 0x01C0, 0x01C0 }, /* R21    - Input Mixer (1) */
-	{ 0x0000, 0x0000 }, /* R22 */
-	{ 0x0000, 0x0000 }, /* R23 */
-	{ 0x00DF, 0x01DF }, /* R24    - Left Line Input 1&2 Volume */
-	{ 0x00DF, 0x01DF }, /* R25    - Left Line Input 3&4 Volume */
-	{ 0x00DF, 0x01DF }, /* R26    - Right Line Input 1&2 Volume */
-	{ 0x00DF, 0x01DF }, /* R27    - Right Line Input 3&4 Volume */
-	{ 0x00FF, 0x01FF }, /* R28    - Left Output Volume */
-	{ 0x00FF, 0x01FF }, /* R29    - Right Output Volume */
-	{ 0x0077, 0x0077 }, /* R30    - Line Outputs Volume */
-	{ 0x0030, 0x0030 }, /* R31    - HPOUT2 Volume */
-	{ 0x00FF, 0x01FF }, /* R32    - Left OPGA Volume */
-	{ 0x00FF, 0x01FF }, /* R33    - Right OPGA Volume */
-	{ 0x007F, 0x007F }, /* R34    - SPKMIXL Attenuation */
-	{ 0x017F, 0x017F }, /* R35    - SPKMIXR Attenuation */
-	{ 0x003F, 0x003F }, /* R36    - SPKOUT Mixers */
-	{ 0x003F, 0x003F }, /* R37    - ClassD */
-	{ 0x00FF, 0x01FF }, /* R38    - Speaker Volume Left */
-	{ 0x00FF, 0x01FF }, /* R39    - Speaker Volume Right */
-	{ 0x00FF, 0x00FF }, /* R40    - Input Mixer (2) */
-	{ 0x01B7, 0x01B7 }, /* R41    - Input Mixer (3) */
-	{ 0x01B7, 0x01B7 }, /* R42    - Input Mixer (4) */
-	{ 0x01C7, 0x01C7 }, /* R43    - Input Mixer (5) */
-	{ 0x01C7, 0x01C7 }, /* R44    - Input Mixer (6) */
-	{ 0x01FF, 0x01FF }, /* R45    - Output Mixer (1) */
-	{ 0x01FF, 0x01FF }, /* R46    - Output Mixer (2) */
-	{ 0x0FFF, 0x0FFF }, /* R47    - Output Mixer (3) */
-	{ 0x0FFF, 0x0FFF }, /* R48    - Output Mixer (4) */
-	{ 0x0FFF, 0x0FFF }, /* R49    - Output Mixer (5) */
-	{ 0x0FFF, 0x0FFF }, /* R50    - Output Mixer (6) */
-	{ 0x0038, 0x0038 }, /* R51    - HPOUT2 Mixer */
-	{ 0x0077, 0x0077 }, /* R52    - Line Mixer (1) */
-	{ 0x0077, 0x0077 }, /* R53    - Line Mixer (2) */
-	{ 0x03FF, 0x03FF }, /* R54    - Speaker Mixer */
-	{ 0x00C1, 0x00C1 }, /* R55    - Additional Control */
-	{ 0x00F0, 0x00F0 }, /* R56    - AntiPOP (1) */
-	{ 0x01EF, 0x01EF }, /* R57    - AntiPOP (2) */
-	{ 0x00FF, 0x00FF }, /* R58    - MICBIAS */
-	{ 0x000F, 0x000F }, /* R59    - LDO 1 */
-	{ 0x0007, 0x0007 }, /* R60    - LDO 2 */
-	{ 0x0000, 0x0000 }, /* R61 */
-	{ 0x0000, 0x0000 }, /* R62 */
-	{ 0x0000, 0x0000 }, /* R63 */
-	{ 0x0000, 0x0000 }, /* R64 */
-	{ 0x0000, 0x0000 }, /* R65 */
-	{ 0x0000, 0x0000 }, /* R66 */
-	{ 0x0000, 0x0000 }, /* R67 */
-	{ 0x0000, 0x0000 }, /* R68 */
-	{ 0x0000, 0x0000 }, /* R69 */
-	{ 0x0000, 0x0000 }, /* R70 */
-	{ 0x0000, 0x0000 }, /* R71 */
-	{ 0x0000, 0x0000 }, /* R72 */
-	{ 0x0000, 0x0000 }, /* R73 */
-	{ 0x0000, 0x0000 }, /* R74 */
-	{ 0x0000, 0x0000 }, /* R75 */
-	{ 0x8000, 0x8000 }, /* R76    - Charge Pump (1) */
-	{ 0x0000, 0x0000 }, /* R77 */
-	{ 0x0000, 0x0000 }, /* R78 */
-	{ 0x0000, 0x0000 }, /* R79 */
-	{ 0x0000, 0x0000 }, /* R80 */
-	{ 0x0301, 0x0301 }, /* R81    - Class W (1) */
-	{ 0x0000, 0x0000 }, /* R82 */
-	{ 0x0000, 0x0000 }, /* R83 */
-	{ 0x333F, 0x333F }, /* R84    - DC Servo (1) */
-	{ 0x0FEF, 0x0FEF }, /* R85    - DC Servo (2) */
-	{ 0x0000, 0x0000 }, /* R86 */
-	{ 0xFFFF, 0xFFFF }, /* R87    - DC Servo (4) */
-	{ 0x0333, 0x0000 }, /* R88    - DC Servo Readback */
-	{ 0x0000, 0x0000 }, /* R89 */
-	{ 0x0000, 0x0000 }, /* R90 */
-	{ 0x0000, 0x0000 }, /* R91 */
-	{ 0x0000, 0x0000 }, /* R92 */
-	{ 0x0000, 0x0000 }, /* R93 */
-	{ 0x0000, 0x0000 }, /* R94 */
-	{ 0x0000, 0x0000 }, /* R95 */
-	{ 0x00EE, 0x00EE }, /* R96    - Analogue HP (1) */
-	{ 0x0000, 0x0000 }, /* R97 */
-	{ 0x0000, 0x0000 }, /* R98 */
-	{ 0x0000, 0x0000 }, /* R99 */
-	{ 0x0000, 0x0000 }, /* R100 */
-	{ 0x0000, 0x0000 }, /* R101 */
-	{ 0x0000, 0x0000 }, /* R102 */
-	{ 0x0000, 0x0000 }, /* R103 */
-	{ 0x0000, 0x0000 }, /* R104 */
-	{ 0x0000, 0x0000 }, /* R105 */
-	{ 0x0000, 0x0000 }, /* R106 */
-	{ 0x0000, 0x0000 }, /* R107 */
-	{ 0x0000, 0x0000 }, /* R108 */
-	{ 0x0000, 0x0000 }, /* R109 */
-	{ 0x0000, 0x0000 }, /* R110 */
-	{ 0x0000, 0x0000 }, /* R111 */
-	{ 0x0000, 0x0000 }, /* R112 */
-	{ 0x0000, 0x0000 }, /* R113 */
-	{ 0x0000, 0x0000 }, /* R114 */
-	{ 0x0000, 0x0000 }, /* R115 */
-	{ 0x0000, 0x0000 }, /* R116 */
-	{ 0x0000, 0x0000 }, /* R117 */
-	{ 0x0000, 0x0000 }, /* R118 */
-	{ 0x0000, 0x0000 }, /* R119 */
-	{ 0x0000, 0x0000 }, /* R120 */
-	{ 0x0000, 0x0000 }, /* R121 */
-	{ 0x0000, 0x0000 }, /* R122 */
-	{ 0x0000, 0x0000 }, /* R123 */
-	{ 0x0000, 0x0000 }, /* R124 */
-	{ 0x0000, 0x0000 }, /* R125 */
-	{ 0x0000, 0x0000 }, /* R126 */
-	{ 0x0000, 0x0000 }, /* R127 */
-	{ 0x0000, 0x0000 }, /* R128 */
-	{ 0x0000, 0x0000 }, /* R129 */
-	{ 0x0000, 0x0000 }, /* R130 */
-	{ 0x0000, 0x0000 }, /* R131 */
-	{ 0x0000, 0x0000 }, /* R132 */
-	{ 0x0000, 0x0000 }, /* R133 */
-	{ 0x0000, 0x0000 }, /* R134 */
-	{ 0x0000, 0x0000 }, /* R135 */
-	{ 0x0000, 0x0000 }, /* R136 */
-	{ 0x0000, 0x0000 }, /* R137 */
-	{ 0x0000, 0x0000 }, /* R138 */
-	{ 0x0000, 0x0000 }, /* R139 */
-	{ 0x0000, 0x0000 }, /* R140 */
-	{ 0x0000, 0x0000 }, /* R141 */
-	{ 0x0000, 0x0000 }, /* R142 */
-	{ 0x0000, 0x0000 }, /* R143 */
-	{ 0x0000, 0x0000 }, /* R144 */
-	{ 0x0000, 0x0000 }, /* R145 */
-	{ 0x0000, 0x0000 }, /* R146 */
-	{ 0x0000, 0x0000 }, /* R147 */
-	{ 0x0000, 0x0000 }, /* R148 */
-	{ 0x0000, 0x0000 }, /* R149 */
-	{ 0x0000, 0x0000 }, /* R150 */
-	{ 0x0000, 0x0000 }, /* R151 */
-	{ 0x0000, 0x0000 }, /* R152 */
-	{ 0x0000, 0x0000 }, /* R153 */
-	{ 0x0000, 0x0000 }, /* R154 */
-	{ 0x0000, 0x0000 }, /* R155 */
-	{ 0x0000, 0x0000 }, /* R156 */
-	{ 0x0000, 0x0000 }, /* R157 */
-	{ 0x0000, 0x0000 }, /* R158 */
-	{ 0x0000, 0x0000 }, /* R159 */
-	{ 0x0000, 0x0000 }, /* R160 */
-	{ 0x0000, 0x0000 }, /* R161 */
-	{ 0x0000, 0x0000 }, /* R162 */
-	{ 0x0000, 0x0000 }, /* R163 */
-	{ 0x0000, 0x0000 }, /* R164 */
-	{ 0x0000, 0x0000 }, /* R165 */
-	{ 0x0000, 0x0000 }, /* R166 */
-	{ 0x0000, 0x0000 }, /* R167 */
-	{ 0x0000, 0x0000 }, /* R168 */
-	{ 0x0000, 0x0000 }, /* R169 */
-	{ 0x0000, 0x0000 }, /* R170 */
-	{ 0x0000, 0x0000 }, /* R171 */
-	{ 0x0000, 0x0000 }, /* R172 */
-	{ 0x0000, 0x0000 }, /* R173 */
-	{ 0x0000, 0x0000 }, /* R174 */
-	{ 0x0000, 0x0000 }, /* R175 */
-	{ 0x0000, 0x0000 }, /* R176 */
-	{ 0x0000, 0x0000 }, /* R177 */
-	{ 0x0000, 0x0000 }, /* R178 */
-	{ 0x0000, 0x0000 }, /* R179 */
-	{ 0x0000, 0x0000 }, /* R180 */
-	{ 0x0000, 0x0000 }, /* R181 */
-	{ 0x0000, 0x0000 }, /* R182 */
-	{ 0x0000, 0x0000 }, /* R183 */
-	{ 0x0000, 0x0000 }, /* R184 */
-	{ 0x0000, 0x0000 }, /* R185 */
-	{ 0x0000, 0x0000 }, /* R186 */
-	{ 0x0000, 0x0000 }, /* R187 */
-	{ 0x0000, 0x0000 }, /* R188 */
-	{ 0x0000, 0x0000 }, /* R189 */
-	{ 0x0000, 0x0000 }, /* R190 */
-	{ 0x0000, 0x0000 }, /* R191 */
-	{ 0x0000, 0x0000 }, /* R192 */
-	{ 0x0000, 0x0000 }, /* R193 */
-	{ 0x0000, 0x0000 }, /* R194 */
-	{ 0x0000, 0x0000 }, /* R195 */
-	{ 0x0000, 0x0000 }, /* R196 */
-	{ 0x0000, 0x0000 }, /* R197 */
-	{ 0x0000, 0x0000 }, /* R198 */
-	{ 0x0000, 0x0000 }, /* R199 */
-	{ 0x0000, 0x0000 }, /* R200 */
-	{ 0x0000, 0x0000 }, /* R201 */
-	{ 0x0000, 0x0000 }, /* R202 */
-	{ 0x0000, 0x0000 }, /* R203 */
-	{ 0x0000, 0x0000 }, /* R204 */
-	{ 0x0000, 0x0000 }, /* R205 */
-	{ 0x0000, 0x0000 }, /* R206 */
-	{ 0x0000, 0x0000 }, /* R207 */
-	{ 0x0000, 0x0000 }, /* R208 */
-	{ 0x0000, 0x0000 }, /* R209 */
-	{ 0x0000, 0x0000 }, /* R210 */
-	{ 0x0000, 0x0000 }, /* R211 */
-	{ 0x0000, 0x0000 }, /* R212 */
-	{ 0x0000, 0x0000 }, /* R213 */
-	{ 0x0000, 0x0000 }, /* R214 */
-	{ 0x0000, 0x0000 }, /* R215 */
-	{ 0x0000, 0x0000 }, /* R216 */
-	{ 0x0000, 0x0000 }, /* R217 */
-	{ 0x0000, 0x0000 }, /* R218 */
-	{ 0x0000, 0x0000 }, /* R219 */
-	{ 0x0000, 0x0000 }, /* R220 */
-	{ 0x0000, 0x0000 }, /* R221 */
-	{ 0x0000, 0x0000 }, /* R222 */
-	{ 0x0000, 0x0000 }, /* R223 */
-	{ 0x0000, 0x0000 }, /* R224 */
-	{ 0x0000, 0x0000 }, /* R225 */
-	{ 0x0000, 0x0000 }, /* R226 */
-	{ 0x0000, 0x0000 }, /* R227 */
-	{ 0x0000, 0x0000 }, /* R228 */
-	{ 0x0000, 0x0000 }, /* R229 */
-	{ 0x0000, 0x0000 }, /* R230 */
-	{ 0x0000, 0x0000 }, /* R231 */
-	{ 0x0000, 0x0000 }, /* R232 */
-	{ 0x0000, 0x0000 }, /* R233 */
-	{ 0x0000, 0x0000 }, /* R234 */
-	{ 0x0000, 0x0000 }, /* R235 */
-	{ 0x0000, 0x0000 }, /* R236 */
-	{ 0x0000, 0x0000 }, /* R237 */
-	{ 0x0000, 0x0000 }, /* R238 */
-	{ 0x0000, 0x0000 }, /* R239 */
-	{ 0x0000, 0x0000 }, /* R240 */
-	{ 0x0000, 0x0000 }, /* R241 */
-	{ 0x0000, 0x0000 }, /* R242 */
-	{ 0x0000, 0x0000 }, /* R243 */
-	{ 0x0000, 0x0000 }, /* R244 */
-	{ 0x0000, 0x0000 }, /* R245 */
-	{ 0x0000, 0x0000 }, /* R246 */
-	{ 0x0000, 0x0000 }, /* R247 */
-	{ 0x0000, 0x0000 }, /* R248 */
-	{ 0x0000, 0x0000 }, /* R249 */
-	{ 0x0000, 0x0000 }, /* R250 */
-	{ 0x0000, 0x0000 }, /* R251 */
-	{ 0x0000, 0x0000 }, /* R252 */
-	{ 0x0000, 0x0000 }, /* R253 */
-	{ 0x0000, 0x0000 }, /* R254 */
-	{ 0x0000, 0x0000 }, /* R255 */
-	{ 0x000F, 0x0000 }, /* R256   - Chip Revision */
-	{ 0x0074, 0x0074 }, /* R257   - Control Interface */
-	{ 0x0000, 0x0000 }, /* R258 */
-	{ 0x0000, 0x0000 }, /* R259 */
-	{ 0x0000, 0x0000 }, /* R260 */
-	{ 0x0000, 0x0000 }, /* R261 */
-	{ 0x0000, 0x0000 }, /* R262 */
-	{ 0x0000, 0x0000 }, /* R263 */
-	{ 0x0000, 0x0000 }, /* R264 */
-	{ 0x0000, 0x0000 }, /* R265 */
-	{ 0x0000, 0x0000 }, /* R266 */
-	{ 0x0000, 0x0000 }, /* R267 */
-	{ 0x0000, 0x0000 }, /* R268 */
-	{ 0x0000, 0x0000 }, /* R269 */
-	{ 0x0000, 0x0000 }, /* R270 */
-	{ 0x0000, 0x0000 }, /* R271 */
-	{ 0x807F, 0x837F }, /* R272   - Write Sequencer Ctrl (1) */
-	{ 0x017F, 0x0000 }, /* R273   - Write Sequencer Ctrl (2) */
-	{ 0x0000, 0x0000 }, /* R274 */
-	{ 0x0000, 0x0000 }, /* R275 */
-	{ 0x0000, 0x0000 }, /* R276 */
-	{ 0x0000, 0x0000 }, /* R277 */
-	{ 0x0000, 0x0000 }, /* R278 */
-	{ 0x0000, 0x0000 }, /* R279 */
-	{ 0x0000, 0x0000 }, /* R280 */
-	{ 0x0000, 0x0000 }, /* R281 */
-	{ 0x0000, 0x0000 }, /* R282 */
-	{ 0x0000, 0x0000 }, /* R283 */
-	{ 0x0000, 0x0000 }, /* R284 */
-	{ 0x0000, 0x0000 }, /* R285 */
-	{ 0x0000, 0x0000 }, /* R286 */
-	{ 0x0000, 0x0000 }, /* R287 */
-	{ 0x0000, 0x0000 }, /* R288 */
-	{ 0x0000, 0x0000 }, /* R289 */
-	{ 0x0000, 0x0000 }, /* R290 */
-	{ 0x0000, 0x0000 }, /* R291 */
-	{ 0x0000, 0x0000 }, /* R292 */
-	{ 0x0000, 0x0000 }, /* R293 */
-	{ 0x0000, 0x0000 }, /* R294 */
-	{ 0x0000, 0x0000 }, /* R295 */
-	{ 0x0000, 0x0000 }, /* R296 */
-	{ 0x0000, 0x0000 }, /* R297 */
-	{ 0x0000, 0x0000 }, /* R298 */
-	{ 0x0000, 0x0000 }, /* R299 */
-	{ 0x0000, 0x0000 }, /* R300 */
-	{ 0x0000, 0x0000 }, /* R301 */
-	{ 0x0000, 0x0000 }, /* R302 */
-	{ 0x0000, 0x0000 }, /* R303 */
-	{ 0x0000, 0x0000 }, /* R304 */
-	{ 0x0000, 0x0000 }, /* R305 */
-	{ 0x0000, 0x0000 }, /* R306 */
-	{ 0x0000, 0x0000 }, /* R307 */
-	{ 0x0000, 0x0000 }, /* R308 */
-	{ 0x0000, 0x0000 }, /* R309 */
-	{ 0x0000, 0x0000 }, /* R310 */
-	{ 0x0000, 0x0000 }, /* R311 */
-	{ 0x0000, 0x0000 }, /* R312 */
-	{ 0x0000, 0x0000 }, /* R313 */
-	{ 0x0000, 0x0000 }, /* R314 */
-	{ 0x0000, 0x0000 }, /* R315 */
-	{ 0x0000, 0x0000 }, /* R316 */
-	{ 0x0000, 0x0000 }, /* R317 */
-	{ 0x0000, 0x0000 }, /* R318 */
-	{ 0x0000, 0x0000 }, /* R319 */
-	{ 0x0000, 0x0000 }, /* R320 */
-	{ 0x0000, 0x0000 }, /* R321 */
-	{ 0x0000, 0x0000 }, /* R322 */
-	{ 0x0000, 0x0000 }, /* R323 */
-	{ 0x0000, 0x0000 }, /* R324 */
-	{ 0x0000, 0x0000 }, /* R325 */
-	{ 0x0000, 0x0000 }, /* R326 */
-	{ 0x0000, 0x0000 }, /* R327 */
-	{ 0x0000, 0x0000 }, /* R328 */
-	{ 0x0000, 0x0000 }, /* R329 */
-	{ 0x0000, 0x0000 }, /* R330 */
-	{ 0x0000, 0x0000 }, /* R331 */
-	{ 0x0000, 0x0000 }, /* R332 */
-	{ 0x0000, 0x0000 }, /* R333 */
-	{ 0x0000, 0x0000 }, /* R334 */
-	{ 0x0000, 0x0000 }, /* R335 */
-	{ 0x0000, 0x0000 }, /* R336 */
-	{ 0x0000, 0x0000 }, /* R337 */
-	{ 0x0000, 0x0000 }, /* R338 */
-	{ 0x0000, 0x0000 }, /* R339 */
-	{ 0x0000, 0x0000 }, /* R340 */
-	{ 0x0000, 0x0000 }, /* R341 */
-	{ 0x0000, 0x0000 }, /* R342 */
-	{ 0x0000, 0x0000 }, /* R343 */
-	{ 0x0000, 0x0000 }, /* R344 */
-	{ 0x0000, 0x0000 }, /* R345 */
-	{ 0x0000, 0x0000 }, /* R346 */
-	{ 0x0000, 0x0000 }, /* R347 */
-	{ 0x0000, 0x0000 }, /* R348 */
-	{ 0x0000, 0x0000 }, /* R349 */
-	{ 0x0000, 0x0000 }, /* R350 */
-	{ 0x0000, 0x0000 }, /* R351 */
-	{ 0x0000, 0x0000 }, /* R352 */
-	{ 0x0000, 0x0000 }, /* R353 */
-	{ 0x0000, 0x0000 }, /* R354 */
-	{ 0x0000, 0x0000 }, /* R355 */
-	{ 0x0000, 0x0000 }, /* R356 */
-	{ 0x0000, 0x0000 }, /* R357 */
-	{ 0x0000, 0x0000 }, /* R358 */
-	{ 0x0000, 0x0000 }, /* R359 */
-	{ 0x0000, 0x0000 }, /* R360 */
-	{ 0x0000, 0x0000 }, /* R361 */
-	{ 0x0000, 0x0000 }, /* R362 */
-	{ 0x0000, 0x0000 }, /* R363 */
-	{ 0x0000, 0x0000 }, /* R364 */
-	{ 0x0000, 0x0000 }, /* R365 */
-	{ 0x0000, 0x0000 }, /* R366 */
-	{ 0x0000, 0x0000 }, /* R367 */
-	{ 0x0000, 0x0000 }, /* R368 */
-	{ 0x0000, 0x0000 }, /* R369 */
-	{ 0x0000, 0x0000 }, /* R370 */
-	{ 0x0000, 0x0000 }, /* R371 */
-	{ 0x0000, 0x0000 }, /* R372 */
-	{ 0x0000, 0x0000 }, /* R373 */
-	{ 0x0000, 0x0000 }, /* R374 */
-	{ 0x0000, 0x0000 }, /* R375 */
-	{ 0x0000, 0x0000 }, /* R376 */
-	{ 0x0000, 0x0000 }, /* R377 */
-	{ 0x0000, 0x0000 }, /* R378 */
-	{ 0x0000, 0x0000 }, /* R379 */
-	{ 0x0000, 0x0000 }, /* R380 */
-	{ 0x0000, 0x0000 }, /* R381 */
-	{ 0x0000, 0x0000 }, /* R382 */
-	{ 0x0000, 0x0000 }, /* R383 */
-	{ 0x0000, 0x0000 }, /* R384 */
-	{ 0x0000, 0x0000 }, /* R385 */
-	{ 0x0000, 0x0000 }, /* R386 */
-	{ 0x0000, 0x0000 }, /* R387 */
-	{ 0x0000, 0x0000 }, /* R388 */
-	{ 0x0000, 0x0000 }, /* R389 */
-	{ 0x0000, 0x0000 }, /* R390 */
-	{ 0x0000, 0x0000 }, /* R391 */
-	{ 0x0000, 0x0000 }, /* R392 */
-	{ 0x0000, 0x0000 }, /* R393 */
-	{ 0x0000, 0x0000 }, /* R394 */
-	{ 0x0000, 0x0000 }, /* R395 */
-	{ 0x0000, 0x0000 }, /* R396 */
-	{ 0x0000, 0x0000 }, /* R397 */
-	{ 0x0000, 0x0000 }, /* R398 */
-	{ 0x0000, 0x0000 }, /* R399 */
-	{ 0x0000, 0x0000 }, /* R400 */
-	{ 0x0000, 0x0000 }, /* R401 */
-	{ 0x0000, 0x0000 }, /* R402 */
-	{ 0x0000, 0x0000 }, /* R403 */
-	{ 0x0000, 0x0000 }, /* R404 */
-	{ 0x0000, 0x0000 }, /* R405 */
-	{ 0x0000, 0x0000 }, /* R406 */
-	{ 0x0000, 0x0000 }, /* R407 */
-	{ 0x0000, 0x0000 }, /* R408 */
-	{ 0x0000, 0x0000 }, /* R409 */
-	{ 0x0000, 0x0000 }, /* R410 */
-	{ 0x0000, 0x0000 }, /* R411 */
-	{ 0x0000, 0x0000 }, /* R412 */
-	{ 0x0000, 0x0000 }, /* R413 */
-	{ 0x0000, 0x0000 }, /* R414 */
-	{ 0x0000, 0x0000 }, /* R415 */
-	{ 0x0000, 0x0000 }, /* R416 */
-	{ 0x0000, 0x0000 }, /* R417 */
-	{ 0x0000, 0x0000 }, /* R418 */
-	{ 0x0000, 0x0000 }, /* R419 */
-	{ 0x0000, 0x0000 }, /* R420 */
-	{ 0x0000, 0x0000 }, /* R421 */
-	{ 0x0000, 0x0000 }, /* R422 */
-	{ 0x0000, 0x0000 }, /* R423 */
-	{ 0x0000, 0x0000 }, /* R424 */
-	{ 0x0000, 0x0000 }, /* R425 */
-	{ 0x0000, 0x0000 }, /* R426 */
-	{ 0x0000, 0x0000 }, /* R427 */
-	{ 0x0000, 0x0000 }, /* R428 */
-	{ 0x0000, 0x0000 }, /* R429 */
-	{ 0x0000, 0x0000 }, /* R430 */
-	{ 0x0000, 0x0000 }, /* R431 */
-	{ 0x0000, 0x0000 }, /* R432 */
-	{ 0x0000, 0x0000 }, /* R433 */
-	{ 0x0000, 0x0000 }, /* R434 */
-	{ 0x0000, 0x0000 }, /* R435 */
-	{ 0x0000, 0x0000 }, /* R436 */
-	{ 0x0000, 0x0000 }, /* R437 */
-	{ 0x0000, 0x0000 }, /* R438 */
-	{ 0x0000, 0x0000 }, /* R439 */
-	{ 0x0000, 0x0000 }, /* R440 */
-	{ 0x0000, 0x0000 }, /* R441 */
-	{ 0x0000, 0x0000 }, /* R442 */
-	{ 0x0000, 0x0000 }, /* R443 */
-	{ 0x0000, 0x0000 }, /* R444 */
-	{ 0x0000, 0x0000 }, /* R445 */
-	{ 0x0000, 0x0000 }, /* R446 */
-	{ 0x0000, 0x0000 }, /* R447 */
-	{ 0x0000, 0x0000 }, /* R448 */
-	{ 0x0000, 0x0000 }, /* R449 */
-	{ 0x0000, 0x0000 }, /* R450 */
-	{ 0x0000, 0x0000 }, /* R451 */
-	{ 0x0000, 0x0000 }, /* R452 */
-	{ 0x0000, 0x0000 }, /* R453 */
-	{ 0x0000, 0x0000 }, /* R454 */
-	{ 0x0000, 0x0000 }, /* R455 */
-	{ 0x0000, 0x0000 }, /* R456 */
-	{ 0x0000, 0x0000 }, /* R457 */
-	{ 0x0000, 0x0000 }, /* R458 */
-	{ 0x0000, 0x0000 }, /* R459 */
-	{ 0x0000, 0x0000 }, /* R460 */
-	{ 0x0000, 0x0000 }, /* R461 */
-	{ 0x0000, 0x0000 }, /* R462 */
-	{ 0x0000, 0x0000 }, /* R463 */
-	{ 0x0000, 0x0000 }, /* R464 */
-	{ 0x0000, 0x0000 }, /* R465 */
-	{ 0x0000, 0x0000 }, /* R466 */
-	{ 0x0000, 0x0000 }, /* R467 */
-	{ 0x0000, 0x0000 }, /* R468 */
-	{ 0x0000, 0x0000 }, /* R469 */
-	{ 0x0000, 0x0000 }, /* R470 */
-	{ 0x0000, 0x0000 }, /* R471 */
-	{ 0x0000, 0x0000 }, /* R472 */
-	{ 0x0000, 0x0000 }, /* R473 */
-	{ 0x0000, 0x0000 }, /* R474 */
-	{ 0x0000, 0x0000 }, /* R475 */
-	{ 0x0000, 0x0000 }, /* R476 */
-	{ 0x0000, 0x0000 }, /* R477 */
-	{ 0x0000, 0x0000 }, /* R478 */
-	{ 0x0000, 0x0000 }, /* R479 */
-	{ 0x0000, 0x0000 }, /* R480 */
-	{ 0x0000, 0x0000 }, /* R481 */
-	{ 0x0000, 0x0000 }, /* R482 */
-	{ 0x0000, 0x0000 }, /* R483 */
-	{ 0x0000, 0x0000 }, /* R484 */
-	{ 0x0000, 0x0000 }, /* R485 */
-	{ 0x0000, 0x0000 }, /* R486 */
-	{ 0x0000, 0x0000 }, /* R487 */
-	{ 0x0000, 0x0000 }, /* R488 */
-	{ 0x0000, 0x0000 }, /* R489 */
-	{ 0x0000, 0x0000 }, /* R490 */
-	{ 0x0000, 0x0000 }, /* R491 */
-	{ 0x0000, 0x0000 }, /* R492 */
-	{ 0x0000, 0x0000 }, /* R493 */
-	{ 0x0000, 0x0000 }, /* R494 */
-	{ 0x0000, 0x0000 }, /* R495 */
-	{ 0x0000, 0x0000 }, /* R496 */
-	{ 0x0000, 0x0000 }, /* R497 */
-	{ 0x0000, 0x0000 }, /* R498 */
-	{ 0x0000, 0x0000 }, /* R499 */
-	{ 0x0000, 0x0000 }, /* R500 */
-	{ 0x0000, 0x0000 }, /* R501 */
-	{ 0x0000, 0x0000 }, /* R502 */
-	{ 0x0000, 0x0000 }, /* R503 */
-	{ 0x0000, 0x0000 }, /* R504 */
-	{ 0x0000, 0x0000 }, /* R505 */
-	{ 0x0000, 0x0000 }, /* R506 */
-	{ 0x0000, 0x0000 }, /* R507 */
-	{ 0x0000, 0x0000 }, /* R508 */
-	{ 0x0000, 0x0000 }, /* R509 */
-	{ 0x0000, 0x0000 }, /* R510 */
-	{ 0x0000, 0x0000 }, /* R511 */
-	{ 0x001F, 0x001F }, /* R512   - AIF1 Clocking (1) */
-	{ 0x003F, 0x003F }, /* R513   - AIF1 Clocking (2) */
-	{ 0x0000, 0x0000 }, /* R514 */
-	{ 0x0000, 0x0000 }, /* R515 */
-	{ 0x001F, 0x001F }, /* R516   - AIF2 Clocking (1) */
-	{ 0x003F, 0x003F }, /* R517   - AIF2 Clocking (2) */
-	{ 0x0000, 0x0000 }, /* R518 */
-	{ 0x0000, 0x0000 }, /* R519 */
-	{ 0x001F, 0x001F }, /* R520   - Clocking (1) */
-	{ 0x0777, 0x0777 }, /* R521   - Clocking (2) */
-	{ 0x0000, 0x0000 }, /* R522 */
-	{ 0x0000, 0x0000 }, /* R523 */
-	{ 0x0000, 0x0000 }, /* R524 */
-	{ 0x0000, 0x0000 }, /* R525 */
-	{ 0x0000, 0x0000 }, /* R526 */
-	{ 0x0000, 0x0000 }, /* R527 */
-	{ 0x00FF, 0x00FF }, /* R528   - AIF1 Rate */
-	{ 0x00FF, 0x00FF }, /* R529   - AIF2 Rate */
-	{ 0x000F, 0x0000 }, /* R530   - Rate Status */
-	{ 0x0000, 0x0000 }, /* R531 */
-	{ 0x0000, 0x0000 }, /* R532 */
-	{ 0x0000, 0x0000 }, /* R533 */
-	{ 0x0000, 0x0000 }, /* R534 */
-	{ 0x0000, 0x0000 }, /* R535 */
-	{ 0x0000, 0x0000 }, /* R536 */
-	{ 0x0000, 0x0000 }, /* R537 */
-	{ 0x0000, 0x0000 }, /* R538 */
-	{ 0x0000, 0x0000 }, /* R539 */
-	{ 0x0000, 0x0000 }, /* R540 */
-	{ 0x0000, 0x0000 }, /* R541 */
-	{ 0x0000, 0x0000 }, /* R542 */
-	{ 0x0000, 0x0000 }, /* R543 */
-	{ 0x0007, 0x0007 }, /* R544   - FLL1 Control (1) */
-	{ 0x3F77, 0x3F77 }, /* R545   - FLL1 Control (2) */
-	{ 0xFFFF, 0xFFFF }, /* R546   - FLL1 Control (3) */
-	{ 0x7FEF, 0x7FEF }, /* R547   - FLL1 Control (4) */
-	{ 0x1FDB, 0x1FDB }, /* R548   - FLL1 Control (5) */
-	{ 0x0000, 0x0000 }, /* R549 */
-	{ 0x0000, 0x0000 }, /* R550 */
-	{ 0x0000, 0x0000 }, /* R551 */
-	{ 0x0000, 0x0000 }, /* R552 */
-	{ 0x0000, 0x0000 }, /* R553 */
-	{ 0x0000, 0x0000 }, /* R554 */
-	{ 0x0000, 0x0000 }, /* R555 */
-	{ 0x0000, 0x0000 }, /* R556 */
-	{ 0x0000, 0x0000 }, /* R557 */
-	{ 0x0000, 0x0000 }, /* R558 */
-	{ 0x0000, 0x0000 }, /* R559 */
-	{ 0x0000, 0x0000 }, /* R560 */
-	{ 0x0000, 0x0000 }, /* R561 */
-	{ 0x0000, 0x0000 }, /* R562 */
-	{ 0x0000, 0x0000 }, /* R563 */
-	{ 0x0000, 0x0000 }, /* R564 */
-	{ 0x0000, 0x0000 }, /* R565 */
-	{ 0x0000, 0x0000 }, /* R566 */
-	{ 0x0000, 0x0000 }, /* R567 */
-	{ 0x0000, 0x0000 }, /* R568 */
-	{ 0x0000, 0x0000 }, /* R569 */
-	{ 0x0000, 0x0000 }, /* R570 */
-	{ 0x0000, 0x0000 }, /* R571 */
-	{ 0x0000, 0x0000 }, /* R572 */
-	{ 0x0000, 0x0000 }, /* R573 */
-	{ 0x0000, 0x0000 }, /* R574 */
-	{ 0x0000, 0x0000 }, /* R575 */
-	{ 0x0007, 0x0007 }, /* R576   - FLL2 Control (1) */
-	{ 0x3F77, 0x3F77 }, /* R577   - FLL2 Control (2) */
-	{ 0xFFFF, 0xFFFF }, /* R578   - FLL2 Control (3) */
-	{ 0x7FEF, 0x7FEF }, /* R579   - FLL2 Control (4) */
-	{ 0x1FDB, 0x1FDB }, /* R580   - FLL2 Control (5) */
-	{ 0x0000, 0x0000 }, /* R581 */
-	{ 0x0000, 0x0000 }, /* R582 */
-	{ 0x0000, 0x0000 }, /* R583 */
-	{ 0x0000, 0x0000 }, /* R584 */
-	{ 0x0000, 0x0000 }, /* R585 */
-	{ 0x0000, 0x0000 }, /* R586 */
-	{ 0x0000, 0x0000 }, /* R587 */
-	{ 0x0000, 0x0000 }, /* R588 */
-	{ 0x0000, 0x0000 }, /* R589 */
-	{ 0x0000, 0x0000 }, /* R590 */
-	{ 0x0000, 0x0000 }, /* R591 */
-	{ 0x0000, 0x0000 }, /* R592 */
-	{ 0x0000, 0x0000 }, /* R593 */
-	{ 0x0000, 0x0000 }, /* R594 */
-	{ 0x0000, 0x0000 }, /* R595 */
-	{ 0x0000, 0x0000 }, /* R596 */
-	{ 0x0000, 0x0000 }, /* R597 */
-	{ 0x0000, 0x0000 }, /* R598 */
-	{ 0x0000, 0x0000 }, /* R599 */
-	{ 0x0000, 0x0000 }, /* R600 */
-	{ 0x0000, 0x0000 }, /* R601 */
-	{ 0x0000, 0x0000 }, /* R602 */
-	{ 0x0000, 0x0000 }, /* R603 */
-	{ 0x0000, 0x0000 }, /* R604 */
-	{ 0x0000, 0x0000 }, /* R605 */
-	{ 0x0000, 0x0000 }, /* R606 */
-	{ 0x0000, 0x0000 }, /* R607 */
-	{ 0x0000, 0x0000 }, /* R608 */
-	{ 0x0000, 0x0000 }, /* R609 */
-	{ 0x0000, 0x0000 }, /* R610 */
-	{ 0x0000, 0x0000 }, /* R611 */
-	{ 0x0000, 0x0000 }, /* R612 */
-	{ 0x0000, 0x0000 }, /* R613 */
-	{ 0x0000, 0x0000 }, /* R614 */
-	{ 0x0000, 0x0000 }, /* R615 */
-	{ 0x0000, 0x0000 }, /* R616 */
-	{ 0x0000, 0x0000 }, /* R617 */
-	{ 0x0000, 0x0000 }, /* R618 */
-	{ 0x0000, 0x0000 }, /* R619 */
-	{ 0x0000, 0x0000 }, /* R620 */
-	{ 0x0000, 0x0000 }, /* R621 */
-	{ 0x0000, 0x0000 }, /* R622 */
-	{ 0x0000, 0x0000 }, /* R623 */
-	{ 0x0000, 0x0000 }, /* R624 */
-	{ 0x0000, 0x0000 }, /* R625 */
-	{ 0x0000, 0x0000 }, /* R626 */
-	{ 0x0000, 0x0000 }, /* R627 */
-	{ 0x0000, 0x0000 }, /* R628 */
-	{ 0x0000, 0x0000 }, /* R629 */
-	{ 0x0000, 0x0000 }, /* R630 */
-	{ 0x0000, 0x0000 }, /* R631 */
-	{ 0x0000, 0x0000 }, /* R632 */
-	{ 0x0000, 0x0000 }, /* R633 */
-	{ 0x0000, 0x0000 }, /* R634 */
-	{ 0x0000, 0x0000 }, /* R635 */
-	{ 0x0000, 0x0000 }, /* R636 */
-	{ 0x0000, 0x0000 }, /* R637 */
-	{ 0x0000, 0x0000 }, /* R638 */
-	{ 0x0000, 0x0000 }, /* R639 */
-	{ 0x0000, 0x0000 }, /* R640 */
-	{ 0x0000, 0x0000 }, /* R641 */
-	{ 0x0000, 0x0000 }, /* R642 */
-	{ 0x0000, 0x0000 }, /* R643 */
-	{ 0x0000, 0x0000 }, /* R644 */
-	{ 0x0000, 0x0000 }, /* R645 */
-	{ 0x0000, 0x0000 }, /* R646 */
-	{ 0x0000, 0x0000 }, /* R647 */
-	{ 0x0000, 0x0000 }, /* R648 */
-	{ 0x0000, 0x0000 }, /* R649 */
-	{ 0x0000, 0x0000 }, /* R650 */
-	{ 0x0000, 0x0000 }, /* R651 */
-	{ 0x0000, 0x0000 }, /* R652 */
-	{ 0x0000, 0x0000 }, /* R653 */
-	{ 0x0000, 0x0000 }, /* R654 */
-	{ 0x0000, 0x0000 }, /* R655 */
-	{ 0x0000, 0x0000 }, /* R656 */
-	{ 0x0000, 0x0000 }, /* R657 */
-	{ 0x0000, 0x0000 }, /* R658 */
-	{ 0x0000, 0x0000 }, /* R659 */
-	{ 0x0000, 0x0000 }, /* R660 */
-	{ 0x0000, 0x0000 }, /* R661 */
-	{ 0x0000, 0x0000 }, /* R662 */
-	{ 0x0000, 0x0000 }, /* R663 */
-	{ 0x0000, 0x0000 }, /* R664 */
-	{ 0x0000, 0x0000 }, /* R665 */
-	{ 0x0000, 0x0000 }, /* R666 */
-	{ 0x0000, 0x0000 }, /* R667 */
-	{ 0x0000, 0x0000 }, /* R668 */
-	{ 0x0000, 0x0000 }, /* R669 */
-	{ 0x0000, 0x0000 }, /* R670 */
-	{ 0x0000, 0x0000 }, /* R671 */
-	{ 0x0000, 0x0000 }, /* R672 */
-	{ 0x0000, 0x0000 }, /* R673 */
-	{ 0x0000, 0x0000 }, /* R674 */
-	{ 0x0000, 0x0000 }, /* R675 */
-	{ 0x0000, 0x0000 }, /* R676 */
-	{ 0x0000, 0x0000 }, /* R677 */
-	{ 0x0000, 0x0000 }, /* R678 */
-	{ 0x0000, 0x0000 }, /* R679 */
-	{ 0x0000, 0x0000 }, /* R680 */
-	{ 0x0000, 0x0000 }, /* R681 */
-	{ 0x0000, 0x0000 }, /* R682 */
-	{ 0x0000, 0x0000 }, /* R683 */
-	{ 0x0000, 0x0000 }, /* R684 */
-	{ 0x0000, 0x0000 }, /* R685 */
-	{ 0x0000, 0x0000 }, /* R686 */
-	{ 0x0000, 0x0000 }, /* R687 */
-	{ 0x0000, 0x0000 }, /* R688 */
-	{ 0x0000, 0x0000 }, /* R689 */
-	{ 0x0000, 0x0000 }, /* R690 */
-	{ 0x0000, 0x0000 }, /* R691 */
-	{ 0x0000, 0x0000 }, /* R692 */
-	{ 0x0000, 0x0000 }, /* R693 */
-	{ 0x0000, 0x0000 }, /* R694 */
-	{ 0x0000, 0x0000 }, /* R695 */
-	{ 0x0000, 0x0000 }, /* R696 */
-	{ 0x0000, 0x0000 }, /* R697 */
-	{ 0x0000, 0x0000 }, /* R698 */
-	{ 0x0000, 0x0000 }, /* R699 */
-	{ 0x0000, 0x0000 }, /* R700 */
-	{ 0x0000, 0x0000 }, /* R701 */
-	{ 0x0000, 0x0000 }, /* R702 */
-	{ 0x0000, 0x0000 }, /* R703 */
-	{ 0x0000, 0x0000 }, /* R704 */
-	{ 0x0000, 0x0000 }, /* R705 */
-	{ 0x0000, 0x0000 }, /* R706 */
-	{ 0x0000, 0x0000 }, /* R707 */
-	{ 0x0000, 0x0000 }, /* R708 */
-	{ 0x0000, 0x0000 }, /* R709 */
-	{ 0x0000, 0x0000 }, /* R710 */
-	{ 0x0000, 0x0000 }, /* R711 */
-	{ 0x0000, 0x0000 }, /* R712 */
-	{ 0x0000, 0x0000 }, /* R713 */
-	{ 0x0000, 0x0000 }, /* R714 */
-	{ 0x0000, 0x0000 }, /* R715 */
-	{ 0x0000, 0x0000 }, /* R716 */
-	{ 0x0000, 0x0000 }, /* R717 */
-	{ 0x0000, 0x0000 }, /* R718 */
-	{ 0x0000, 0x0000 }, /* R719 */
-	{ 0x0000, 0x0000 }, /* R720 */
-	{ 0x0000, 0x0000 }, /* R721 */
-	{ 0x0000, 0x0000 }, /* R722 */
-	{ 0x0000, 0x0000 }, /* R723 */
-	{ 0x0000, 0x0000 }, /* R724 */
-	{ 0x0000, 0x0000 }, /* R725 */
-	{ 0x0000, 0x0000 }, /* R726 */
-	{ 0x0000, 0x0000 }, /* R727 */
-	{ 0x0000, 0x0000 }, /* R728 */
-	{ 0x0000, 0x0000 }, /* R729 */
-	{ 0x0000, 0x0000 }, /* R730 */
-	{ 0x0000, 0x0000 }, /* R731 */
-	{ 0x0000, 0x0000 }, /* R732 */
-	{ 0x0000, 0x0000 }, /* R733 */
-	{ 0x0000, 0x0000 }, /* R734 */
-	{ 0x0000, 0x0000 }, /* R735 */
-	{ 0x0000, 0x0000 }, /* R736 */
-	{ 0x0000, 0x0000 }, /* R737 */
-	{ 0x0000, 0x0000 }, /* R738 */
-	{ 0x0000, 0x0000 }, /* R739 */
-	{ 0x0000, 0x0000 }, /* R740 */
-	{ 0x0000, 0x0000 }, /* R741 */
-	{ 0x0000, 0x0000 }, /* R742 */
-	{ 0x0000, 0x0000 }, /* R743 */
-	{ 0x0000, 0x0000 }, /* R744 */
-	{ 0x0000, 0x0000 }, /* R745 */
-	{ 0x0000, 0x0000 }, /* R746 */
-	{ 0x0000, 0x0000 }, /* R747 */
-	{ 0x0000, 0x0000 }, /* R748 */
-	{ 0x0000, 0x0000 }, /* R749 */
-	{ 0x0000, 0x0000 }, /* R750 */
-	{ 0x0000, 0x0000 }, /* R751 */
-	{ 0x0000, 0x0000 }, /* R752 */
-	{ 0x0000, 0x0000 }, /* R753 */
-	{ 0x0000, 0x0000 }, /* R754 */
-	{ 0x0000, 0x0000 }, /* R755 */
-	{ 0x0000, 0x0000 }, /* R756 */
-	{ 0x0000, 0x0000 }, /* R757 */
-	{ 0x0000, 0x0000 }, /* R758 */
-	{ 0x0000, 0x0000 }, /* R759 */
-	{ 0x0000, 0x0000 }, /* R760 */
-	{ 0x0000, 0x0000 }, /* R761 */
-	{ 0x0000, 0x0000 }, /* R762 */
-	{ 0x0000, 0x0000 }, /* R763 */
-	{ 0x0000, 0x0000 }, /* R764 */
-	{ 0x0000, 0x0000 }, /* R765 */
-	{ 0x0000, 0x0000 }, /* R766 */
-	{ 0x0000, 0x0000 }, /* R767 */
-	{ 0xE1F8, 0xE1F8 }, /* R768   - AIF1 Control (1) */
-	{ 0xCD1F, 0xCD1F }, /* R769   - AIF1 Control (2) */
-	{ 0xF000, 0xF000 }, /* R770   - AIF1 Master/Slave */
-	{ 0x01F0, 0x01F0 }, /* R771   - AIF1 BCLK */
-	{ 0x0FFF, 0x0FFF }, /* R772   - AIF1ADC LRCLK */
-	{ 0x0FFF, 0x0FFF }, /* R773   - AIF1DAC LRCLK */
-	{ 0x0003, 0x0003 }, /* R774   - AIF1DAC Data */
-	{ 0x0003, 0x0003 }, /* R775   - AIF1ADC Data */
-	{ 0x0000, 0x0000 }, /* R776 */
-	{ 0x0000, 0x0000 }, /* R777 */
-	{ 0x0000, 0x0000 }, /* R778 */
-	{ 0x0000, 0x0000 }, /* R779 */
-	{ 0x0000, 0x0000 }, /* R780 */
-	{ 0x0000, 0x0000 }, /* R781 */
-	{ 0x0000, 0x0000 }, /* R782 */
-	{ 0x0000, 0x0000 }, /* R783 */
-	{ 0xF1F8, 0xF1F8 }, /* R784   - AIF2 Control (1) */
-	{ 0xFD1F, 0xFD1F }, /* R785   - AIF2 Control (2) */
-	{ 0xF000, 0xF000 }, /* R786   - AIF2 Master/Slave */
-	{ 0x01F0, 0x01F0 }, /* R787   - AIF2 BCLK */
-	{ 0x0FFF, 0x0FFF }, /* R788   - AIF2ADC LRCLK */
-	{ 0x0FFF, 0x0FFF }, /* R789   - AIF2DAC LRCLK */
-	{ 0x0003, 0x0003 }, /* R790   - AIF2DAC Data */
-	{ 0x0003, 0x0003 }, /* R791   - AIF2ADC Data */
-	{ 0x0000, 0x0000 }, /* R792 */
-	{ 0x0000, 0x0000 }, /* R793 */
-	{ 0x0000, 0x0000 }, /* R794 */
-	{ 0x0000, 0x0000 }, /* R795 */
-	{ 0x0000, 0x0000 }, /* R796 */
-	{ 0x0000, 0x0000 }, /* R797 */
-	{ 0x0000, 0x0000 }, /* R798 */
-	{ 0x0000, 0x0000 }, /* R799 */
-	{ 0x0000, 0x0000 }, /* R800 */
-	{ 0x0000, 0x0000 }, /* R801 */
-	{ 0x0000, 0x0000 }, /* R802 */
-	{ 0x0000, 0x0000 }, /* R803 */
-	{ 0x0000, 0x0000 }, /* R804 */
-	{ 0x0000, 0x0000 }, /* R805 */
-	{ 0x0000, 0x0000 }, /* R806 */
-	{ 0x0000, 0x0000 }, /* R807 */
-	{ 0x0000, 0x0000 }, /* R808 */
-	{ 0x0000, 0x0000 }, /* R809 */
-	{ 0x0000, 0x0000 }, /* R810 */
-	{ 0x0000, 0x0000 }, /* R811 */
-	{ 0x0000, 0x0000 }, /* R812 */
-	{ 0x0000, 0x0000 }, /* R813 */
-	{ 0x0000, 0x0000 }, /* R814 */
-	{ 0x0000, 0x0000 }, /* R815 */
-	{ 0x0000, 0x0000 }, /* R816 */
-	{ 0x0000, 0x0000 }, /* R817 */
-	{ 0x0000, 0x0000 }, /* R818 */
-	{ 0x0000, 0x0000 }, /* R819 */
-	{ 0x0000, 0x0000 }, /* R820 */
-	{ 0x0000, 0x0000 }, /* R821 */
-	{ 0x0000, 0x0000 }, /* R822 */
-	{ 0x0000, 0x0000 }, /* R823 */
-	{ 0x0000, 0x0000 }, /* R824 */
-	{ 0x0000, 0x0000 }, /* R825 */
-	{ 0x0000, 0x0000 }, /* R826 */
-	{ 0x0000, 0x0000 }, /* R827 */
-	{ 0x0000, 0x0000 }, /* R828 */
-	{ 0x0000, 0x0000 }, /* R829 */
-	{ 0x0000, 0x0000 }, /* R830 */
-	{ 0x0000, 0x0000 }, /* R831 */
-	{ 0x0000, 0x0000 }, /* R832 */
-	{ 0x0000, 0x0000 }, /* R833 */
-	{ 0x0000, 0x0000 }, /* R834 */
-	{ 0x0000, 0x0000 }, /* R835 */
-	{ 0x0000, 0x0000 }, /* R836 */
-	{ 0x0000, 0x0000 }, /* R837 */
-	{ 0x0000, 0x0000 }, /* R838 */
-	{ 0x0000, 0x0000 }, /* R839 */
-	{ 0x0000, 0x0000 }, /* R840 */
-	{ 0x0000, 0x0000 }, /* R841 */
-	{ 0x0000, 0x0000 }, /* R842 */
-	{ 0x0000, 0x0000 }, /* R843 */
-	{ 0x0000, 0x0000 }, /* R844 */
-	{ 0x0000, 0x0000 }, /* R845 */
-	{ 0x0000, 0x0000 }, /* R846 */
-	{ 0x0000, 0x0000 }, /* R847 */
-	{ 0x0000, 0x0000 }, /* R848 */
-	{ 0x0000, 0x0000 }, /* R849 */
-	{ 0x0000, 0x0000 }, /* R850 */
-	{ 0x0000, 0x0000 }, /* R851 */
-	{ 0x0000, 0x0000 }, /* R852 */
-	{ 0x0000, 0x0000 }, /* R853 */
-	{ 0x0000, 0x0000 }, /* R854 */
-	{ 0x0000, 0x0000 }, /* R855 */
-	{ 0x0000, 0x0000 }, /* R856 */
-	{ 0x0000, 0x0000 }, /* R857 */
-	{ 0x0000, 0x0000 }, /* R858 */
-	{ 0x0000, 0x0000 }, /* R859 */
-	{ 0x0000, 0x0000 }, /* R860 */
-	{ 0x0000, 0x0000 }, /* R861 */
-	{ 0x0000, 0x0000 }, /* R862 */
-	{ 0x0000, 0x0000 }, /* R863 */
-	{ 0x0000, 0x0000 }, /* R864 */
-	{ 0x0000, 0x0000 }, /* R865 */
-	{ 0x0000, 0x0000 }, /* R866 */
-	{ 0x0000, 0x0000 }, /* R867 */
-	{ 0x0000, 0x0000 }, /* R868 */
-	{ 0x0000, 0x0000 }, /* R869 */
-	{ 0x0000, 0x0000 }, /* R870 */
-	{ 0x0000, 0x0000 }, /* R871 */
-	{ 0x0000, 0x0000 }, /* R872 */
-	{ 0x0000, 0x0000 }, /* R873 */
-	{ 0x0000, 0x0000 }, /* R874 */
-	{ 0x0000, 0x0000 }, /* R875 */
-	{ 0x0000, 0x0000 }, /* R876 */
-	{ 0x0000, 0x0000 }, /* R877 */
-	{ 0x0000, 0x0000 }, /* R878 */
-	{ 0x0000, 0x0000 }, /* R879 */
-	{ 0x0000, 0x0000 }, /* R880 */
-	{ 0x0000, 0x0000 }, /* R881 */
-	{ 0x0000, 0x0000 }, /* R882 */
-	{ 0x0000, 0x0000 }, /* R883 */
-	{ 0x0000, 0x0000 }, /* R884 */
-	{ 0x0000, 0x0000 }, /* R885 */
-	{ 0x0000, 0x0000 }, /* R886 */
-	{ 0x0000, 0x0000 }, /* R887 */
-	{ 0x0000, 0x0000 }, /* R888 */
-	{ 0x0000, 0x0000 }, /* R889 */
-	{ 0x0000, 0x0000 }, /* R890 */
-	{ 0x0000, 0x0000 }, /* R891 */
-	{ 0x0000, 0x0000 }, /* R892 */
-	{ 0x0000, 0x0000 }, /* R893 */
-	{ 0x0000, 0x0000 }, /* R894 */
-	{ 0x0000, 0x0000 }, /* R895 */
-	{ 0x0000, 0x0000 }, /* R896 */
-	{ 0x0000, 0x0000 }, /* R897 */
-	{ 0x0000, 0x0000 }, /* R898 */
-	{ 0x0000, 0x0000 }, /* R899 */
-	{ 0x0000, 0x0000 }, /* R900 */
-	{ 0x0000, 0x0000 }, /* R901 */
-	{ 0x0000, 0x0000 }, /* R902 */
-	{ 0x0000, 0x0000 }, /* R903 */
-	{ 0x0000, 0x0000 }, /* R904 */
-	{ 0x0000, 0x0000 }, /* R905 */
-	{ 0x0000, 0x0000 }, /* R906 */
-	{ 0x0000, 0x0000 }, /* R907 */
-	{ 0x0000, 0x0000 }, /* R908 */
-	{ 0x0000, 0x0000 }, /* R909 */
-	{ 0x0000, 0x0000 }, /* R910 */
-	{ 0x0000, 0x0000 }, /* R911 */
-	{ 0x0000, 0x0000 }, /* R912 */
-	{ 0x0000, 0x0000 }, /* R913 */
-	{ 0x0000, 0x0000 }, /* R914 */
-	{ 0x0000, 0x0000 }, /* R915 */
-	{ 0x0000, 0x0000 }, /* R916 */
-	{ 0x0000, 0x0000 }, /* R917 */
-	{ 0x0000, 0x0000 }, /* R918 */
-	{ 0x0000, 0x0000 }, /* R919 */
-	{ 0x0000, 0x0000 }, /* R920 */
-	{ 0x0000, 0x0000 }, /* R921 */
-	{ 0x0000, 0x0000 }, /* R922 */
-	{ 0x0000, 0x0000 }, /* R923 */
-	{ 0x0000, 0x0000 }, /* R924 */
-	{ 0x0000, 0x0000 }, /* R925 */
-	{ 0x0000, 0x0000 }, /* R926 */
-	{ 0x0000, 0x0000 }, /* R927 */
-	{ 0x0000, 0x0000 }, /* R928 */
-	{ 0x0000, 0x0000 }, /* R929 */
-	{ 0x0000, 0x0000 }, /* R930 */
-	{ 0x0000, 0x0000 }, /* R931 */
-	{ 0x0000, 0x0000 }, /* R932 */
-	{ 0x0000, 0x0000 }, /* R933 */
-	{ 0x0000, 0x0000 }, /* R934 */
-	{ 0x0000, 0x0000 }, /* R935 */
-	{ 0x0000, 0x0000 }, /* R936 */
-	{ 0x0000, 0x0000 }, /* R937 */
-	{ 0x0000, 0x0000 }, /* R938 */
-	{ 0x0000, 0x0000 }, /* R939 */
-	{ 0x0000, 0x0000 }, /* R940 */
-	{ 0x0000, 0x0000 }, /* R941 */
-	{ 0x0000, 0x0000 }, /* R942 */
-	{ 0x0000, 0x0000 }, /* R943 */
-	{ 0x0000, 0x0000 }, /* R944 */
-	{ 0x0000, 0x0000 }, /* R945 */
-	{ 0x0000, 0x0000 }, /* R946 */
-	{ 0x0000, 0x0000 }, /* R947 */
-	{ 0x0000, 0x0000 }, /* R948 */
-	{ 0x0000, 0x0000 }, /* R949 */
-	{ 0x0000, 0x0000 }, /* R950 */
-	{ 0x0000, 0x0000 }, /* R951 */
-	{ 0x0000, 0x0000 }, /* R952 */
-	{ 0x0000, 0x0000 }, /* R953 */
-	{ 0x0000, 0x0000 }, /* R954 */
-	{ 0x0000, 0x0000 }, /* R955 */
-	{ 0x0000, 0x0000 }, /* R956 */
-	{ 0x0000, 0x0000 }, /* R957 */
-	{ 0x0000, 0x0000 }, /* R958 */
-	{ 0x0000, 0x0000 }, /* R959 */
-	{ 0x0000, 0x0000 }, /* R960 */
-	{ 0x0000, 0x0000 }, /* R961 */
-	{ 0x0000, 0x0000 }, /* R962 */
-	{ 0x0000, 0x0000 }, /* R963 */
-	{ 0x0000, 0x0000 }, /* R964 */
-	{ 0x0000, 0x0000 }, /* R965 */
-	{ 0x0000, 0x0000 }, /* R966 */
-	{ 0x0000, 0x0000 }, /* R967 */
-	{ 0x0000, 0x0000 }, /* R968 */
-	{ 0x0000, 0x0000 }, /* R969 */
-	{ 0x0000, 0x0000 }, /* R970 */
-	{ 0x0000, 0x0000 }, /* R971 */
-	{ 0x0000, 0x0000 }, /* R972 */
-	{ 0x0000, 0x0000 }, /* R973 */
-	{ 0x0000, 0x0000 }, /* R974 */
-	{ 0x0000, 0x0000 }, /* R975 */
-	{ 0x0000, 0x0000 }, /* R976 */
-	{ 0x0000, 0x0000 }, /* R977 */
-	{ 0x0000, 0x0000 }, /* R978 */
-	{ 0x0000, 0x0000 }, /* R979 */
-	{ 0x0000, 0x0000 }, /* R980 */
-	{ 0x0000, 0x0000 }, /* R981 */
-	{ 0x0000, 0x0000 }, /* R982 */
-	{ 0x0000, 0x0000 }, /* R983 */
-	{ 0x0000, 0x0000 }, /* R984 */
-	{ 0x0000, 0x0000 }, /* R985 */
-	{ 0x0000, 0x0000 }, /* R986 */
-	{ 0x0000, 0x0000 }, /* R987 */
-	{ 0x0000, 0x0000 }, /* R988 */
-	{ 0x0000, 0x0000 }, /* R989 */
-	{ 0x0000, 0x0000 }, /* R990 */
-	{ 0x0000, 0x0000 }, /* R991 */
-	{ 0x0000, 0x0000 }, /* R992 */
-	{ 0x0000, 0x0000 }, /* R993 */
-	{ 0x0000, 0x0000 }, /* R994 */
-	{ 0x0000, 0x0000 }, /* R995 */
-	{ 0x0000, 0x0000 }, /* R996 */
-	{ 0x0000, 0x0000 }, /* R997 */
-	{ 0x0000, 0x0000 }, /* R998 */
-	{ 0x0000, 0x0000 }, /* R999 */
-	{ 0x0000, 0x0000 }, /* R1000 */
-	{ 0x0000, 0x0000 }, /* R1001 */
-	{ 0x0000, 0x0000 }, /* R1002 */
-	{ 0x0000, 0x0000 }, /* R1003 */
-	{ 0x0000, 0x0000 }, /* R1004 */
-	{ 0x0000, 0x0000 }, /* R1005 */
-	{ 0x0000, 0x0000 }, /* R1006 */
-	{ 0x0000, 0x0000 }, /* R1007 */
-	{ 0x0000, 0x0000 }, /* R1008 */
-	{ 0x0000, 0x0000 }, /* R1009 */
-	{ 0x0000, 0x0000 }, /* R1010 */
-	{ 0x0000, 0x0000 }, /* R1011 */
-	{ 0x0000, 0x0000 }, /* R1012 */
-	{ 0x0000, 0x0000 }, /* R1013 */
-	{ 0x0000, 0x0000 }, /* R1014 */
-	{ 0x0000, 0x0000 }, /* R1015 */
-	{ 0x0000, 0x0000 }, /* R1016 */
-	{ 0x0000, 0x0000 }, /* R1017 */
-	{ 0x0000, 0x0000 }, /* R1018 */
-	{ 0x0000, 0x0000 }, /* R1019 */
-	{ 0x0000, 0x0000 }, /* R1020 */
-	{ 0x0000, 0x0000 }, /* R1021 */
-	{ 0x0000, 0x0000 }, /* R1022 */
-	{ 0x0000, 0x0000 }, /* R1023 */
-	{ 0x00FF, 0x01FF }, /* R1024  - AIF1 ADC1 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1025  - AIF1 ADC1 Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1026  - AIF1 DAC1 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1027  - AIF1 DAC1 Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1028  - AIF1 ADC2 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1029  - AIF1 ADC2 Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1030  - AIF1 DAC2 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1031  - AIF1 DAC2 Right Volume */
-	{ 0x0000, 0x0000 }, /* R1032 */
-	{ 0x0000, 0x0000 }, /* R1033 */
-	{ 0x0000, 0x0000 }, /* R1034 */
-	{ 0x0000, 0x0000 }, /* R1035 */
-	{ 0x0000, 0x0000 }, /* R1036 */
-	{ 0x0000, 0x0000 }, /* R1037 */
-	{ 0x0000, 0x0000 }, /* R1038 */
-	{ 0x0000, 0x0000 }, /* R1039 */
-	{ 0xF800, 0xF800 }, /* R1040  - AIF1 ADC1 Filters */
-	{ 0x7800, 0x7800 }, /* R1041  - AIF1 ADC2 Filters */
-	{ 0x0000, 0x0000 }, /* R1042 */
-	{ 0x0000, 0x0000 }, /* R1043 */
-	{ 0x0000, 0x0000 }, /* R1044 */
-	{ 0x0000, 0x0000 }, /* R1045 */
-	{ 0x0000, 0x0000 }, /* R1046 */
-	{ 0x0000, 0x0000 }, /* R1047 */
-	{ 0x0000, 0x0000 }, /* R1048 */
-	{ 0x0000, 0x0000 }, /* R1049 */
-	{ 0x0000, 0x0000 }, /* R1050 */
-	{ 0x0000, 0x0000 }, /* R1051 */
-	{ 0x0000, 0x0000 }, /* R1052 */
-	{ 0x0000, 0x0000 }, /* R1053 */
-	{ 0x0000, 0x0000 }, /* R1054 */
-	{ 0x0000, 0x0000 }, /* R1055 */
-	{ 0x02B6, 0x02B6 }, /* R1056  - AIF1 DAC1 Filters (1) */
-	{ 0x3F00, 0x3F00 }, /* R1057  - AIF1 DAC1 Filters (2) */
-	{ 0x02B6, 0x02B6 }, /* R1058  - AIF1 DAC2 Filters (1) */
-	{ 0x3F00, 0x3F00 }, /* R1059  - AIF1 DAC2 Filters (2) */
-	{ 0x0000, 0x0000 }, /* R1060 */
-	{ 0x0000, 0x0000 }, /* R1061 */
-	{ 0x0000, 0x0000 }, /* R1062 */
-	{ 0x0000, 0x0000 }, /* R1063 */
-	{ 0x0000, 0x0000 }, /* R1064 */
-	{ 0x0000, 0x0000 }, /* R1065 */
-	{ 0x0000, 0x0000 }, /* R1066 */
-	{ 0x0000, 0x0000 }, /* R1067 */
-	{ 0x0000, 0x0000 }, /* R1068 */
-	{ 0x0000, 0x0000 }, /* R1069 */
-	{ 0x0000, 0x0000 }, /* R1070 */
-	{ 0x0000, 0x0000 }, /* R1071 */
-	{ 0x0000, 0x0000 }, /* R1072 */
-	{ 0x0000, 0x0000 }, /* R1073 */
-	{ 0x0000, 0x0000 }, /* R1074 */
-	{ 0x0000, 0x0000 }, /* R1075 */
-	{ 0x0000, 0x0000 }, /* R1076 */
-	{ 0x0000, 0x0000 }, /* R1077 */
-	{ 0x0000, 0x0000 }, /* R1078 */
-	{ 0x0000, 0x0000 }, /* R1079 */
-	{ 0x0000, 0x0000 }, /* R1080 */
-	{ 0x0000, 0x0000 }, /* R1081 */
-	{ 0x0000, 0x0000 }, /* R1082 */
-	{ 0x0000, 0x0000 }, /* R1083 */
-	{ 0x0000, 0x0000 }, /* R1084 */
-	{ 0x0000, 0x0000 }, /* R1085 */
-	{ 0x0000, 0x0000 }, /* R1086 */
-	{ 0x0000, 0x0000 }, /* R1087 */
-	{ 0xFFFF, 0xFFFF }, /* R1088  - AIF1 DRC1 (1) */
-	{ 0x1FFF, 0x1FFF }, /* R1089  - AIF1 DRC1 (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1090  - AIF1 DRC1 (3) */
-	{ 0x07FF, 0x07FF }, /* R1091  - AIF1 DRC1 (4) */
-	{ 0x03FF, 0x03FF }, /* R1092  - AIF1 DRC1 (5) */
-	{ 0x0000, 0x0000 }, /* R1093 */
-	{ 0x0000, 0x0000 }, /* R1094 */
-	{ 0x0000, 0x0000 }, /* R1095 */
-	{ 0x0000, 0x0000 }, /* R1096 */
-	{ 0x0000, 0x0000 }, /* R1097 */
-	{ 0x0000, 0x0000 }, /* R1098 */
-	{ 0x0000, 0x0000 }, /* R1099 */
-	{ 0x0000, 0x0000 }, /* R1100 */
-	{ 0x0000, 0x0000 }, /* R1101 */
-	{ 0x0000, 0x0000 }, /* R1102 */
-	{ 0x0000, 0x0000 }, /* R1103 */
-	{ 0xFFFF, 0xFFFF }, /* R1104  - AIF1 DRC2 (1) */
-	{ 0x1FFF, 0x1FFF }, /* R1105  - AIF1 DRC2 (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1106  - AIF1 DRC2 (3) */
-	{ 0x07FF, 0x07FF }, /* R1107  - AIF1 DRC2 (4) */
-	{ 0x03FF, 0x03FF }, /* R1108  - AIF1 DRC2 (5) */
-	{ 0x0000, 0x0000 }, /* R1109 */
-	{ 0x0000, 0x0000 }, /* R1110 */
-	{ 0x0000, 0x0000 }, /* R1111 */
-	{ 0x0000, 0x0000 }, /* R1112 */
-	{ 0x0000, 0x0000 }, /* R1113 */
-	{ 0x0000, 0x0000 }, /* R1114 */
-	{ 0x0000, 0x0000 }, /* R1115 */
-	{ 0x0000, 0x0000 }, /* R1116 */
-	{ 0x0000, 0x0000 }, /* R1117 */
-	{ 0x0000, 0x0000 }, /* R1118 */
-	{ 0x0000, 0x0000 }, /* R1119 */
-	{ 0x0000, 0x0000 }, /* R1120 */
-	{ 0x0000, 0x0000 }, /* R1121 */
-	{ 0x0000, 0x0000 }, /* R1122 */
-	{ 0x0000, 0x0000 }, /* R1123 */
-	{ 0x0000, 0x0000 }, /* R1124 */
-	{ 0x0000, 0x0000 }, /* R1125 */
-	{ 0x0000, 0x0000 }, /* R1126 */
-	{ 0x0000, 0x0000 }, /* R1127 */
-	{ 0x0000, 0x0000 }, /* R1128 */
-	{ 0x0000, 0x0000 }, /* R1129 */
-	{ 0x0000, 0x0000 }, /* R1130 */
-	{ 0x0000, 0x0000 }, /* R1131 */
-	{ 0x0000, 0x0000 }, /* R1132 */
-	{ 0x0000, 0x0000 }, /* R1133 */
-	{ 0x0000, 0x0000 }, /* R1134 */
-	{ 0x0000, 0x0000 }, /* R1135 */
-	{ 0x0000, 0x0000 }, /* R1136 */
-	{ 0x0000, 0x0000 }, /* R1137 */
-	{ 0x0000, 0x0000 }, /* R1138 */
-	{ 0x0000, 0x0000 }, /* R1139 */
-	{ 0x0000, 0x0000 }, /* R1140 */
-	{ 0x0000, 0x0000 }, /* R1141 */
-	{ 0x0000, 0x0000 }, /* R1142 */
-	{ 0x0000, 0x0000 }, /* R1143 */
-	{ 0x0000, 0x0000 }, /* R1144 */
-	{ 0x0000, 0x0000 }, /* R1145 */
-	{ 0x0000, 0x0000 }, /* R1146 */
-	{ 0x0000, 0x0000 }, /* R1147 */
-	{ 0x0000, 0x0000 }, /* R1148 */
-	{ 0x0000, 0x0000 }, /* R1149 */
-	{ 0x0000, 0x0000 }, /* R1150 */
-	{ 0x0000, 0x0000 }, /* R1151 */
-	{ 0xFFFF, 0xFFFF }, /* R1152  - AIF1 DAC1 EQ Gains (1) */
-	{ 0xFFC0, 0xFFC0 }, /* R1153  - AIF1 DAC1 EQ Gains (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1154  - AIF1 DAC1 EQ Band 1 A */
-	{ 0xFFFF, 0xFFFF }, /* R1155  - AIF1 DAC1 EQ Band 1 B */
-	{ 0xFFFF, 0xFFFF }, /* R1156  - AIF1 DAC1 EQ Band 1 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1157  - AIF1 DAC1 EQ Band 2 A */
-	{ 0xFFFF, 0xFFFF }, /* R1158  - AIF1 DAC1 EQ Band 2 B */
-	{ 0xFFFF, 0xFFFF }, /* R1159  - AIF1 DAC1 EQ Band 2 C */
-	{ 0xFFFF, 0xFFFF }, /* R1160  - AIF1 DAC1 EQ Band 2 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1161  - AIF1 DAC1 EQ Band 3 A */
-	{ 0xFFFF, 0xFFFF }, /* R1162  - AIF1 DAC1 EQ Band 3 B */
-	{ 0xFFFF, 0xFFFF }, /* R1163  - AIF1 DAC1 EQ Band 3 C */
-	{ 0xFFFF, 0xFFFF }, /* R1164  - AIF1 DAC1 EQ Band 3 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1165  - AIF1 DAC1 EQ Band 4 A */
-	{ 0xFFFF, 0xFFFF }, /* R1166  - AIF1 DAC1 EQ Band 4 B */
-	{ 0xFFFF, 0xFFFF }, /* R1167  - AIF1 DAC1 EQ Band 4 C */
-	{ 0xFFFF, 0xFFFF }, /* R1168  - AIF1 DAC1 EQ Band 4 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1169  - AIF1 DAC1 EQ Band 5 A */
-	{ 0xFFFF, 0xFFFF }, /* R1170  - AIF1 DAC1 EQ Band 5 B */
-	{ 0xFFFF, 0xFFFF }, /* R1171  - AIF1 DAC1 EQ Band 5 PG */
-	{ 0x0000, 0x0000 }, /* R1172 */
-	{ 0x0000, 0x0000 }, /* R1173 */
-	{ 0x0000, 0x0000 }, /* R1174 */
-	{ 0x0000, 0x0000 }, /* R1175 */
-	{ 0x0000, 0x0000 }, /* R1176 */
-	{ 0x0000, 0x0000 }, /* R1177 */
-	{ 0x0000, 0x0000 }, /* R1178 */
-	{ 0x0000, 0x0000 }, /* R1179 */
-	{ 0x0000, 0x0000 }, /* R1180 */
-	{ 0x0000, 0x0000 }, /* R1181 */
-	{ 0x0000, 0x0000 }, /* R1182 */
-	{ 0x0000, 0x0000 }, /* R1183 */
-	{ 0xFFFF, 0xFFFF }, /* R1184  - AIF1 DAC2 EQ Gains (1) */
-	{ 0xFFC0, 0xFFC0 }, /* R1185  - AIF1 DAC2 EQ Gains (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1186  - AIF1 DAC2 EQ Band 1 A */
-	{ 0xFFFF, 0xFFFF }, /* R1187  - AIF1 DAC2 EQ Band 1 B */
-	{ 0xFFFF, 0xFFFF }, /* R1188  - AIF1 DAC2 EQ Band 1 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1189  - AIF1 DAC2 EQ Band 2 A */
-	{ 0xFFFF, 0xFFFF }, /* R1190  - AIF1 DAC2 EQ Band 2 B */
-	{ 0xFFFF, 0xFFFF }, /* R1191  - AIF1 DAC2 EQ Band 2 C */
-	{ 0xFFFF, 0xFFFF }, /* R1192  - AIF1 DAC2 EQ Band 2 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1193  - AIF1 DAC2 EQ Band 3 A */
-	{ 0xFFFF, 0xFFFF }, /* R1194  - AIF1 DAC2 EQ Band 3 B */
-	{ 0xFFFF, 0xFFFF }, /* R1195  - AIF1 DAC2 EQ Band 3 C */
-	{ 0xFFFF, 0xFFFF }, /* R1196  - AIF1 DAC2 EQ Band 3 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1197  - AIF1 DAC2 EQ Band 4 A */
-	{ 0xFFFF, 0xFFFF }, /* R1198  - AIF1 DAC2 EQ Band 4 B */
-	{ 0xFFFF, 0xFFFF }, /* R1199  - AIF1 DAC2 EQ Band 4 C */
-	{ 0xFFFF, 0xFFFF }, /* R1200  - AIF1 DAC2 EQ Band 4 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1201  - AIF1 DAC2 EQ Band 5 A */
-	{ 0xFFFF, 0xFFFF }, /* R1202  - AIF1 DAC2 EQ Band 5 B */
-	{ 0xFFFF, 0xFFFF }, /* R1203  - AIF1 DAC2 EQ Band 5 PG */
-	{ 0x0000, 0x0000 }, /* R1204 */
-	{ 0x0000, 0x0000 }, /* R1205 */
-	{ 0x0000, 0x0000 }, /* R1206 */
-	{ 0x0000, 0x0000 }, /* R1207 */
-	{ 0x0000, 0x0000 }, /* R1208 */
-	{ 0x0000, 0x0000 }, /* R1209 */
-	{ 0x0000, 0x0000 }, /* R1210 */
-	{ 0x0000, 0x0000 }, /* R1211 */
-	{ 0x0000, 0x0000 }, /* R1212 */
-	{ 0x0000, 0x0000 }, /* R1213 */
-	{ 0x0000, 0x0000 }, /* R1214 */
-	{ 0x0000, 0x0000 }, /* R1215 */
-	{ 0x0000, 0x0000 }, /* R1216 */
-	{ 0x0000, 0x0000 }, /* R1217 */
-	{ 0x0000, 0x0000 }, /* R1218 */
-	{ 0x0000, 0x0000 }, /* R1219 */
-	{ 0x0000, 0x0000 }, /* R1220 */
-	{ 0x0000, 0x0000 }, /* R1221 */
-	{ 0x0000, 0x0000 }, /* R1222 */
-	{ 0x0000, 0x0000 }, /* R1223 */
-	{ 0x0000, 0x0000 }, /* R1224 */
-	{ 0x0000, 0x0000 }, /* R1225 */
-	{ 0x0000, 0x0000 }, /* R1226 */
-	{ 0x0000, 0x0000 }, /* R1227 */
-	{ 0x0000, 0x0000 }, /* R1228 */
-	{ 0x0000, 0x0000 }, /* R1229 */
-	{ 0x0000, 0x0000 }, /* R1230 */
-	{ 0x0000, 0x0000 }, /* R1231 */
-	{ 0x0000, 0x0000 }, /* R1232 */
-	{ 0x0000, 0x0000 }, /* R1233 */
-	{ 0x0000, 0x0000 }, /* R1234 */
-	{ 0x0000, 0x0000 }, /* R1235 */
-	{ 0x0000, 0x0000 }, /* R1236 */
-	{ 0x0000, 0x0000 }, /* R1237 */
-	{ 0x0000, 0x0000 }, /* R1238 */
-	{ 0x0000, 0x0000 }, /* R1239 */
-	{ 0x0000, 0x0000 }, /* R1240 */
-	{ 0x0000, 0x0000 }, /* R1241 */
-	{ 0x0000, 0x0000 }, /* R1242 */
-	{ 0x0000, 0x0000 }, /* R1243 */
-	{ 0x0000, 0x0000 }, /* R1244 */
-	{ 0x0000, 0x0000 }, /* R1245 */
-	{ 0x0000, 0x0000 }, /* R1246 */
-	{ 0x0000, 0x0000 }, /* R1247 */
-	{ 0x0000, 0x0000 }, /* R1248 */
-	{ 0x0000, 0x0000 }, /* R1249 */
-	{ 0x0000, 0x0000 }, /* R1250 */
-	{ 0x0000, 0x0000 }, /* R1251 */
-	{ 0x0000, 0x0000 }, /* R1252 */
-	{ 0x0000, 0x0000 }, /* R1253 */
-	{ 0x0000, 0x0000 }, /* R1254 */
-	{ 0x0000, 0x0000 }, /* R1255 */
-	{ 0x0000, 0x0000 }, /* R1256 */
-	{ 0x0000, 0x0000 }, /* R1257 */
-	{ 0x0000, 0x0000 }, /* R1258 */
-	{ 0x0000, 0x0000 }, /* R1259 */
-	{ 0x0000, 0x0000 }, /* R1260 */
-	{ 0x0000, 0x0000 }, /* R1261 */
-	{ 0x0000, 0x0000 }, /* R1262 */
-	{ 0x0000, 0x0000 }, /* R1263 */
-	{ 0x0000, 0x0000 }, /* R1264 */
-	{ 0x0000, 0x0000 }, /* R1265 */
-	{ 0x0000, 0x0000 }, /* R1266 */
-	{ 0x0000, 0x0000 }, /* R1267 */
-	{ 0x0000, 0x0000 }, /* R1268 */
-	{ 0x0000, 0x0000 }, /* R1269 */
-	{ 0x0000, 0x0000 }, /* R1270 */
-	{ 0x0000, 0x0000 }, /* R1271 */
-	{ 0x0000, 0x0000 }, /* R1272 */
-	{ 0x0000, 0x0000 }, /* R1273 */
-	{ 0x0000, 0x0000 }, /* R1274 */
-	{ 0x0000, 0x0000 }, /* R1275 */
-	{ 0x0000, 0x0000 }, /* R1276 */
-	{ 0x0000, 0x0000 }, /* R1277 */
-	{ 0x0000, 0x0000 }, /* R1278 */
-	{ 0x0000, 0x0000 }, /* R1279 */
-	{ 0x00FF, 0x01FF }, /* R1280  - AIF2 ADC Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1281  - AIF2 ADC Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1282  - AIF2 DAC Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1283  - AIF2 DAC Right Volume */
-	{ 0x0000, 0x0000 }, /* R1284 */
-	{ 0x0000, 0x0000 }, /* R1285 */
-	{ 0x0000, 0x0000 }, /* R1286 */
-	{ 0x0000, 0x0000 }, /* R1287 */
-	{ 0x0000, 0x0000 }, /* R1288 */
-	{ 0x0000, 0x0000 }, /* R1289 */
-	{ 0x0000, 0x0000 }, /* R1290 */
-	{ 0x0000, 0x0000 }, /* R1291 */
-	{ 0x0000, 0x0000 }, /* R1292 */
-	{ 0x0000, 0x0000 }, /* R1293 */
-	{ 0x0000, 0x0000 }, /* R1294 */
-	{ 0x0000, 0x0000 }, /* R1295 */
-	{ 0xF800, 0xF800 }, /* R1296  - AIF2 ADC Filters */
-	{ 0x0000, 0x0000 }, /* R1297 */
-	{ 0x0000, 0x0000 }, /* R1298 */
-	{ 0x0000, 0x0000 }, /* R1299 */
-	{ 0x0000, 0x0000 }, /* R1300 */
-	{ 0x0000, 0x0000 }, /* R1301 */
-	{ 0x0000, 0x0000 }, /* R1302 */
-	{ 0x0000, 0x0000 }, /* R1303 */
-	{ 0x0000, 0x0000 }, /* R1304 */
-	{ 0x0000, 0x0000 }, /* R1305 */
-	{ 0x0000, 0x0000 }, /* R1306 */
-	{ 0x0000, 0x0000 }, /* R1307 */
-	{ 0x0000, 0x0000 }, /* R1308 */
-	{ 0x0000, 0x0000 }, /* R1309 */
-	{ 0x0000, 0x0000 }, /* R1310 */
-	{ 0x0000, 0x0000 }, /* R1311 */
-	{ 0x02B6, 0x02B6 }, /* R1312  - AIF2 DAC Filters (1) */
-	{ 0x3F00, 0x3F00 }, /* R1313  - AIF2 DAC Filters (2) */
-	{ 0x0000, 0x0000 }, /* R1314 */
-	{ 0x0000, 0x0000 }, /* R1315 */
-	{ 0x0000, 0x0000 }, /* R1316 */
-	{ 0x0000, 0x0000 }, /* R1317 */
-	{ 0x0000, 0x0000 }, /* R1318 */
-	{ 0x0000, 0x0000 }, /* R1319 */
-	{ 0x0000, 0x0000 }, /* R1320 */
-	{ 0x0000, 0x0000 }, /* R1321 */
-	{ 0x0000, 0x0000 }, /* R1322 */
-	{ 0x0000, 0x0000 }, /* R1323 */
-	{ 0x0000, 0x0000 }, /* R1324 */
-	{ 0x0000, 0x0000 }, /* R1325 */
-	{ 0x0000, 0x0000 }, /* R1326 */
-	{ 0x0000, 0x0000 }, /* R1327 */
-	{ 0x0000, 0x0000 }, /* R1328 */
-	{ 0x0000, 0x0000 }, /* R1329 */
-	{ 0x0000, 0x0000 }, /* R1330 */
-	{ 0x0000, 0x0000 }, /* R1331 */
-	{ 0x0000, 0x0000 }, /* R1332 */
-	{ 0x0000, 0x0000 }, /* R1333 */
-	{ 0x0000, 0x0000 }, /* R1334 */
-	{ 0x0000, 0x0000 }, /* R1335 */
-	{ 0x0000, 0x0000 }, /* R1336 */
-	{ 0x0000, 0x0000 }, /* R1337 */
-	{ 0x0000, 0x0000 }, /* R1338 */
-	{ 0x0000, 0x0000 }, /* R1339 */
-	{ 0x0000, 0x0000 }, /* R1340 */
-	{ 0x0000, 0x0000 }, /* R1341 */
-	{ 0x0000, 0x0000 }, /* R1342 */
-	{ 0x0000, 0x0000 }, /* R1343 */
-	{ 0xFFFF, 0xFFFF }, /* R1344  - AIF2 DRC (1) */
-	{ 0x1FFF, 0x1FFF }, /* R1345  - AIF2 DRC (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1346  - AIF2 DRC (3) */
-	{ 0x07FF, 0x07FF }, /* R1347  - AIF2 DRC (4) */
-	{ 0x03FF, 0x03FF }, /* R1348  - AIF2 DRC (5) */
-	{ 0x0000, 0x0000 }, /* R1349 */
-	{ 0x0000, 0x0000 }, /* R1350 */
-	{ 0x0000, 0x0000 }, /* R1351 */
-	{ 0x0000, 0x0000 }, /* R1352 */
-	{ 0x0000, 0x0000 }, /* R1353 */
-	{ 0x0000, 0x0000 }, /* R1354 */
-	{ 0x0000, 0x0000 }, /* R1355 */
-	{ 0x0000, 0x0000 }, /* R1356 */
-	{ 0x0000, 0x0000 }, /* R1357 */
-	{ 0x0000, 0x0000 }, /* R1358 */
-	{ 0x0000, 0x0000 }, /* R1359 */
-	{ 0x0000, 0x0000 }, /* R1360 */
-	{ 0x0000, 0x0000 }, /* R1361 */
-	{ 0x0000, 0x0000 }, /* R1362 */
-	{ 0x0000, 0x0000 }, /* R1363 */
-	{ 0x0000, 0x0000 }, /* R1364 */
-	{ 0x0000, 0x0000 }, /* R1365 */
-	{ 0x0000, 0x0000 }, /* R1366 */
-	{ 0x0000, 0x0000 }, /* R1367 */
-	{ 0x0000, 0x0000 }, /* R1368 */
-	{ 0x0000, 0x0000 }, /* R1369 */
-	{ 0x0000, 0x0000 }, /* R1370 */
-	{ 0x0000, 0x0000 }, /* R1371 */
-	{ 0x0000, 0x0000 }, /* R1372 */
-	{ 0x0000, 0x0000 }, /* R1373 */
-	{ 0x0000, 0x0000 }, /* R1374 */
-	{ 0x0000, 0x0000 }, /* R1375 */
-	{ 0x0000, 0x0000 }, /* R1376 */
-	{ 0x0000, 0x0000 }, /* R1377 */
-	{ 0x0000, 0x0000 }, /* R1378 */
-	{ 0x0000, 0x0000 }, /* R1379 */
-	{ 0x0000, 0x0000 }, /* R1380 */
-	{ 0x0000, 0x0000 }, /* R1381 */
-	{ 0x0000, 0x0000 }, /* R1382 */
-	{ 0x0000, 0x0000 }, /* R1383 */
-	{ 0x0000, 0x0000 }, /* R1384 */
-	{ 0x0000, 0x0000 }, /* R1385 */
-	{ 0x0000, 0x0000 }, /* R1386 */
-	{ 0x0000, 0x0000 }, /* R1387 */
-	{ 0x0000, 0x0000 }, /* R1388 */
-	{ 0x0000, 0x0000 }, /* R1389 */
-	{ 0x0000, 0x0000 }, /* R1390 */
-	{ 0x0000, 0x0000 }, /* R1391 */
-	{ 0x0000, 0x0000 }, /* R1392 */
-	{ 0x0000, 0x0000 }, /* R1393 */
-	{ 0x0000, 0x0000 }, /* R1394 */
-	{ 0x0000, 0x0000 }, /* R1395 */
-	{ 0x0000, 0x0000 }, /* R1396 */
-	{ 0x0000, 0x0000 }, /* R1397 */
-	{ 0x0000, 0x0000 }, /* R1398 */
-	{ 0x0000, 0x0000 }, /* R1399 */
-	{ 0x0000, 0x0000 }, /* R1400 */
-	{ 0x0000, 0x0000 }, /* R1401 */
-	{ 0x0000, 0x0000 }, /* R1402 */
-	{ 0x0000, 0x0000 }, /* R1403 */
-	{ 0x0000, 0x0000 }, /* R1404 */
-	{ 0x0000, 0x0000 }, /* R1405 */
-	{ 0x0000, 0x0000 }, /* R1406 */
-	{ 0x0000, 0x0000 }, /* R1407 */
-	{ 0xFFFF, 0xFFFF }, /* R1408  - AIF2 EQ Gains (1) */
-	{ 0xFFC0, 0xFFC0 }, /* R1409  - AIF2 EQ Gains (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1410  - AIF2 EQ Band 1 A */
-	{ 0xFFFF, 0xFFFF }, /* R1411  - AIF2 EQ Band 1 B */
-	{ 0xFFFF, 0xFFFF }, /* R1412  - AIF2 EQ Band 1 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1413  - AIF2 EQ Band 2 A */
-	{ 0xFFFF, 0xFFFF }, /* R1414  - AIF2 EQ Band 2 B */
-	{ 0xFFFF, 0xFFFF }, /* R1415  - AIF2 EQ Band 2 C */
-	{ 0xFFFF, 0xFFFF }, /* R1416  - AIF2 EQ Band 2 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1417  - AIF2 EQ Band 3 A */
-	{ 0xFFFF, 0xFFFF }, /* R1418  - AIF2 EQ Band 3 B */
-	{ 0xFFFF, 0xFFFF }, /* R1419  - AIF2 EQ Band 3 C */
-	{ 0xFFFF, 0xFFFF }, /* R1420  - AIF2 EQ Band 3 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1421  - AIF2 EQ Band 4 A */
-	{ 0xFFFF, 0xFFFF }, /* R1422  - AIF2 EQ Band 4 B */
-	{ 0xFFFF, 0xFFFF }, /* R1423  - AIF2 EQ Band 4 C */
-	{ 0xFFFF, 0xFFFF }, /* R1424  - AIF2 EQ Band 4 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1425  - AIF2 EQ Band 5 A */
-	{ 0xFFFF, 0xFFFF }, /* R1426  - AIF2 EQ Band 5 B */
-	{ 0xFFFF, 0xFFFF }, /* R1427  - AIF2 EQ Band 5 PG */
-	{ 0x0000, 0x0000 }, /* R1428 */
-	{ 0x0000, 0x0000 }, /* R1429 */
-	{ 0x0000, 0x0000 }, /* R1430 */
-	{ 0x0000, 0x0000 }, /* R1431 */
-	{ 0x0000, 0x0000 }, /* R1432 */
-	{ 0x0000, 0x0000 }, /* R1433 */
-	{ 0x0000, 0x0000 }, /* R1434 */
-	{ 0x0000, 0x0000 }, /* R1435 */
-	{ 0x0000, 0x0000 }, /* R1436 */
-	{ 0x0000, 0x0000 }, /* R1437 */
-	{ 0x0000, 0x0000 }, /* R1438 */
-	{ 0x0000, 0x0000 }, /* R1439 */
-	{ 0x0000, 0x0000 }, /* R1440 */
-	{ 0x0000, 0x0000 }, /* R1441 */
-	{ 0x0000, 0x0000 }, /* R1442 */
-	{ 0x0000, 0x0000 }, /* R1443 */
-	{ 0x0000, 0x0000 }, /* R1444 */
-	{ 0x0000, 0x0000 }, /* R1445 */
-	{ 0x0000, 0x0000 }, /* R1446 */
-	{ 0x0000, 0x0000 }, /* R1447 */
-	{ 0x0000, 0x0000 }, /* R1448 */
-	{ 0x0000, 0x0000 }, /* R1449 */
-	{ 0x0000, 0x0000 }, /* R1450 */
-	{ 0x0000, 0x0000 }, /* R1451 */
-	{ 0x0000, 0x0000 }, /* R1452 */
-	{ 0x0000, 0x0000 }, /* R1453 */
-	{ 0x0000, 0x0000 }, /* R1454 */
-	{ 0x0000, 0x0000 }, /* R1455 */
-	{ 0x0000, 0x0000 }, /* R1456 */
-	{ 0x0000, 0x0000 }, /* R1457 */
-	{ 0x0000, 0x0000 }, /* R1458 */
-	{ 0x0000, 0x0000 }, /* R1459 */
-	{ 0x0000, 0x0000 }, /* R1460 */
-	{ 0x0000, 0x0000 }, /* R1461 */
-	{ 0x0000, 0x0000 }, /* R1462 */
-	{ 0x0000, 0x0000 }, /* R1463 */
-	{ 0x0000, 0x0000 }, /* R1464 */
-	{ 0x0000, 0x0000 }, /* R1465 */
-	{ 0x0000, 0x0000 }, /* R1466 */
-	{ 0x0000, 0x0000 }, /* R1467 */
-	{ 0x0000, 0x0000 }, /* R1468 */
-	{ 0x0000, 0x0000 }, /* R1469 */
-	{ 0x0000, 0x0000 }, /* R1470 */
-	{ 0x0000, 0x0000 }, /* R1471 */
-	{ 0x0000, 0x0000 }, /* R1472 */
-	{ 0x0000, 0x0000 }, /* R1473 */
-	{ 0x0000, 0x0000 }, /* R1474 */
-	{ 0x0000, 0x0000 }, /* R1475 */
-	{ 0x0000, 0x0000 }, /* R1476 */
-	{ 0x0000, 0x0000 }, /* R1477 */
-	{ 0x0000, 0x0000 }, /* R1478 */
-	{ 0x0000, 0x0000 }, /* R1479 */
-	{ 0x0000, 0x0000 }, /* R1480 */
-	{ 0x0000, 0x0000 }, /* R1481 */
-	{ 0x0000, 0x0000 }, /* R1482 */
-	{ 0x0000, 0x0000 }, /* R1483 */
-	{ 0x0000, 0x0000 }, /* R1484 */
-	{ 0x0000, 0x0000 }, /* R1485 */
-	{ 0x0000, 0x0000 }, /* R1486 */
-	{ 0x0000, 0x0000 }, /* R1487 */
-	{ 0x0000, 0x0000 }, /* R1488 */
-	{ 0x0000, 0x0000 }, /* R1489 */
-	{ 0x0000, 0x0000 }, /* R1490 */
-	{ 0x0000, 0x0000 }, /* R1491 */
-	{ 0x0000, 0x0000 }, /* R1492 */
-	{ 0x0000, 0x0000 }, /* R1493 */
-	{ 0x0000, 0x0000 }, /* R1494 */
-	{ 0x0000, 0x0000 }, /* R1495 */
-	{ 0x0000, 0x0000 }, /* R1496 */
-	{ 0x0000, 0x0000 }, /* R1497 */
-	{ 0x0000, 0x0000 }, /* R1498 */
-	{ 0x0000, 0x0000 }, /* R1499 */
-	{ 0x0000, 0x0000 }, /* R1500 */
-	{ 0x0000, 0x0000 }, /* R1501 */
-	{ 0x0000, 0x0000 }, /* R1502 */
-	{ 0x0000, 0x0000 }, /* R1503 */
-	{ 0x0000, 0x0000 }, /* R1504 */
-	{ 0x0000, 0x0000 }, /* R1505 */
-	{ 0x0000, 0x0000 }, /* R1506 */
-	{ 0x0000, 0x0000 }, /* R1507 */
-	{ 0x0000, 0x0000 }, /* R1508 */
-	{ 0x0000, 0x0000 }, /* R1509 */
-	{ 0x0000, 0x0000 }, /* R1510 */
-	{ 0x0000, 0x0000 }, /* R1511 */
-	{ 0x0000, 0x0000 }, /* R1512 */
-	{ 0x0000, 0x0000 }, /* R1513 */
-	{ 0x0000, 0x0000 }, /* R1514 */
-	{ 0x0000, 0x0000 }, /* R1515 */
-	{ 0x0000, 0x0000 }, /* R1516 */
-	{ 0x0000, 0x0000 }, /* R1517 */
-	{ 0x0000, 0x0000 }, /* R1518 */
-	{ 0x0000, 0x0000 }, /* R1519 */
-	{ 0x0000, 0x0000 }, /* R1520 */
-	{ 0x0000, 0x0000 }, /* R1521 */
-	{ 0x0000, 0x0000 }, /* R1522 */
-	{ 0x0000, 0x0000 }, /* R1523 */
-	{ 0x0000, 0x0000 }, /* R1524 */
-	{ 0x0000, 0x0000 }, /* R1525 */
-	{ 0x0000, 0x0000 }, /* R1526 */
-	{ 0x0000, 0x0000 }, /* R1527 */
-	{ 0x0000, 0x0000 }, /* R1528 */
-	{ 0x0000, 0x0000 }, /* R1529 */
-	{ 0x0000, 0x0000 }, /* R1530 */
-	{ 0x0000, 0x0000 }, /* R1531 */
-	{ 0x0000, 0x0000 }, /* R1532 */
-	{ 0x0000, 0x0000 }, /* R1533 */
-	{ 0x0000, 0x0000 }, /* R1534 */
-	{ 0x0000, 0x0000 }, /* R1535 */
-	{ 0x01EF, 0x01EF }, /* R1536  - DAC1 Mixer Volumes */
-	{ 0x0037, 0x0037 }, /* R1537  - DAC1 Left Mixer Routing */
-	{ 0x0037, 0x0037 }, /* R1538  - DAC1 Right Mixer Routing */
-	{ 0x01EF, 0x01EF }, /* R1539  - DAC2 Mixer Volumes */
-	{ 0x0037, 0x0037 }, /* R1540  - DAC2 Left Mixer Routing */
-	{ 0x0037, 0x0037 }, /* R1541  - DAC2 Right Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1542  - AIF1 ADC1 Left Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1543  - AIF1 ADC1 Right Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1544  - AIF1 ADC2 Left Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1545  - AIF1 ADC2 Right mixer Routing */
-	{ 0x0000, 0x0000 }, /* R1546 */
-	{ 0x0000, 0x0000 }, /* R1547 */
-	{ 0x0000, 0x0000 }, /* R1548 */
-	{ 0x0000, 0x0000 }, /* R1549 */
-	{ 0x0000, 0x0000 }, /* R1550 */
-	{ 0x0000, 0x0000 }, /* R1551 */
-	{ 0x02FF, 0x03FF }, /* R1552  - DAC1 Left Volume */
-	{ 0x02FF, 0x03FF }, /* R1553  - DAC1 Right Volume */
-	{ 0x02FF, 0x03FF }, /* R1554  - DAC2 Left Volume */
-	{ 0x02FF, 0x03FF }, /* R1555  - DAC2 Right Volume */
-	{ 0x0003, 0x0003 }, /* R1556  - DAC Softmute */
-	{ 0x0000, 0x0000 }, /* R1557 */
-	{ 0x0000, 0x0000 }, /* R1558 */
-	{ 0x0000, 0x0000 }, /* R1559 */
-	{ 0x0000, 0x0000 }, /* R1560 */
-	{ 0x0000, 0x0000 }, /* R1561 */
-	{ 0x0000, 0x0000 }, /* R1562 */
-	{ 0x0000, 0x0000 }, /* R1563 */
-	{ 0x0000, 0x0000 }, /* R1564 */
-	{ 0x0000, 0x0000 }, /* R1565 */
-	{ 0x0000, 0x0000 }, /* R1566 */
-	{ 0x0000, 0x0000 }, /* R1567 */
-	{ 0x0003, 0x0003 }, /* R1568  - Oversampling */
-	{ 0x03C3, 0x03C3 }, /* R1569  - Sidetone */
+	unsigned int aif1clk_enable:1;
+	unsigned int aif2clk_enable:1;
+
+	unsigned int aif1clk_disable:1;
+	unsigned int aif2clk_disable:1;
 };
 
 static int wm8994_readable(unsigned int reg)
@@ -1696,14 +137,14 @@
 		break;
 	}
 
-	if (reg >= ARRAY_SIZE(access_masks))
+	if (reg >= WM8994_CACHE_SIZE)
 		return 0;
-	return access_masks[reg].readable != 0;
+	return wm8994_access_masks[reg].readable != 0;
 }
 
 static int wm8994_volatile(unsigned int reg)
 {
-	if (reg >= WM8994_REG_CACHE_SIZE)
+	if (reg >= WM8994_CACHE_SIZE)
 		return 1;
 
 	switch (reg) {
@@ -1714,6 +155,8 @@
 	case WM8994_RATE_STATUS:
 	case WM8994_LDO_1:
 	case WM8994_LDO_2:
+	case WM8958_DSP2_EXECCONTROL:
+	case WM8958_MIC_DETECT_3:
 		return 1;
 	default:
 		return 0;
@@ -1723,14 +166,16 @@
 static int wm8994_write(struct snd_soc_codec *codec, unsigned int reg,
 	unsigned int value)
 {
-	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	int ret;
 
 	BUG_ON(reg > WM8994_MAX_REGISTER);
 
-	if (!wm8994_volatile(reg))
-		wm8994->reg_cache[reg] = value;
-
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+	if (!wm8994_volatile(reg)) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret != 0)
+			dev_err(codec->dev, "Cache write to %x failed: %d\n",
+				reg, ret);
+	}
 
 	return wm8994_reg_write(codec->control_data, reg, value);
 }
@@ -1738,14 +183,22 @@
 static unsigned int wm8994_read(struct snd_soc_codec *codec,
 				unsigned int reg)
 {
-	u16 *reg_cache = codec->reg_cache;
+	unsigned int val;
+	int ret;
 
 	BUG_ON(reg > WM8994_MAX_REGISTER);
 
-	if (wm8994_volatile(reg))
-		return wm8994_reg_read(codec->control_data, reg);
-	else
-		return reg_cache[reg];
+	if (!wm8994_volatile(reg) && wm8994_readable(reg) &&
+	    reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_read(codec, reg, &val);
+		if (ret >= 0)
+			return val;
+		else
+			dev_err(codec->dev, "Cache read from %x failed: %d\n",
+				reg, ret);
+	}
+
+	return wm8994_reg_read(codec->control_data, reg);
 }
 
 static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
@@ -1837,7 +290,7 @@
 
 	snd_soc_update_bits(codec, WM8994_CLOCKING_1, WM8994_SYSCLK_SRC, new);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(&codec->dapm);
 
 	return 0;
 }
@@ -1864,6 +317,19 @@
 static const struct soc_enum sidetone_hpf =
 	SOC_ENUM_SINGLE(WM8994_SIDETONE, 7, 7, sidetone_hpf_text);
 
+static const char *adc_hpf_text[] = {
+	"HiFi", "Voice 1", "Voice 2", "Voice 3"
+};
+
+static const struct soc_enum aif1adc1_hpf =
+	SOC_ENUM_SINGLE(WM8994_AIF1_ADC1_FILTERS, 13, 4, adc_hpf_text);
+
+static const struct soc_enum aif1adc2_hpf =
+	SOC_ENUM_SINGLE(WM8994_AIF1_ADC2_FILTERS, 13, 4, adc_hpf_text);
+
+static const struct soc_enum aif2adc_hpf =
+	SOC_ENUM_SINGLE(WM8994_AIF2_ADC_FILTERS, 13, 4, adc_hpf_text);
+
 static const DECLARE_TLV_DB_SCALE(aif_tlv, 0, 600, 0);
 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
 static const DECLARE_TLV_DB_SCALE(st_tlv, -3600, 300, 0);
@@ -2071,21 +537,252 @@
 	return 0;
 }
 
-static const char *aifdac_src_text[] = {
+static const char *aif_chan_src_text[] = {
 	"Left", "Right"
 };
 
+static const struct soc_enum aif1adcl_src =
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 15, 2, aif_chan_src_text);
+
+static const struct soc_enum aif1adcr_src =
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 14, 2, aif_chan_src_text);
+
+static const struct soc_enum aif2adcl_src =
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 15, 2, aif_chan_src_text);
+
+static const struct soc_enum aif2adcr_src =
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 14, 2, aif_chan_src_text);
+
 static const struct soc_enum aif1dacl_src =
-	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aif_chan_src_text);
 
 static const struct soc_enum aif1dacr_src =
-	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aif_chan_src_text);
 
 static const struct soc_enum aif2dacl_src =
-	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aif_chan_src_text);
 
 static const struct soc_enum aif2dacr_src =
-	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aif_chan_src_text);
+
+static const char *osr_text[] = {
+	"Low Power", "High Performance",
+};
+
+static const struct soc_enum dac_osr =
+	SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 0, 2, osr_text);
+
+static const struct soc_enum adc_osr =
+	SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 1, 2, osr_text);
+
+static void wm8958_mbc_apply(struct snd_soc_codec *codec, int mbc, int start)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994_pdata *pdata = wm8994->pdata;
+	int pwr_reg = snd_soc_read(codec, WM8994_POWER_MANAGEMENT_5);
+	int ena, reg, aif, i;
+
+	switch (mbc) {
+	case 0:
+		pwr_reg &= (WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA);
+		aif = 0;
+		break;
+	case 1:
+		pwr_reg &= (WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
+		aif = 0;
+		break;
+	case 2:
+		pwr_reg &= (WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA);
+		aif = 1;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	/* We can only enable the MBC if the AIF is enabled and we
+	 * want it to be enabled. */
+	ena = pwr_reg && wm8994->mbc_ena[mbc];
+
+	reg = snd_soc_read(codec, WM8958_DSP2_PROGRAM);
+
+	dev_dbg(codec->dev, "MBC %d startup: %d, power: %x, DSP: %x\n",
+		mbc, start, pwr_reg, reg);
+
+	if (start && ena) {
+		/* If the DSP is already running then noop */
+		if (reg & WM8958_DSP2_ENA)
+			return;
+
+		/* Switch the clock over to the appropriate AIF */
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8958_DSP2CLK_SRC | WM8958_DSP2CLK_ENA,
+				    aif << WM8958_DSP2CLK_SRC_SHIFT |
+				    WM8958_DSP2CLK_ENA);
+
+		snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
+				    WM8958_DSP2_ENA, WM8958_DSP2_ENA);
+
+		/* If we've got user supplied MBC settings use them */
+		if (pdata && pdata->num_mbc_cfgs) {
+			struct wm8958_mbc_cfg *cfg
+				= &pdata->mbc_cfgs[wm8994->mbc_cfg];
+
+			for (i = 0; i < ARRAY_SIZE(cfg->coeff_regs); i++)
+				snd_soc_write(codec, i + WM8958_MBC_BAND_1_K_1,
+					      cfg->coeff_regs[i]);
+
+			for (i = 0; i < ARRAY_SIZE(cfg->cutoff_regs); i++)
+				snd_soc_write(codec,
+					      i + WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1,
+					      cfg->cutoff_regs[i]);
+		}
+
+		/* Run the DSP */
+		snd_soc_write(codec, WM8958_DSP2_EXECCONTROL,
+			      WM8958_DSP2_RUNR);
+
+		/* And we're off! */
+		snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
+				    WM8958_MBC_ENA | WM8958_MBC_SEL_MASK,
+				    mbc << WM8958_MBC_SEL_SHIFT |
+				    WM8958_MBC_ENA);
+	} else {
+		/* If the DSP is already stopped then noop */
+		if (!(reg & WM8958_DSP2_ENA))
+			return;
+
+		snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
+				    WM8958_MBC_ENA, 0);	
+		snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
+				    WM8958_DSP2_ENA, 0);
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8958_DSP2CLK_ENA, 0);
+	}
+}
+
+static int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
+		    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	int mbc;
+
+	switch (w->shift) {
+	case 13:
+	case 12:
+		mbc = 2;
+		break;
+	case 11:
+	case 10:
+		mbc = 1;
+		break;
+	case 9:
+	case 8:
+		mbc = 0;
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		wm8958_mbc_apply(codec, mbc, 1);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wm8958_mbc_apply(codec, mbc, 0);
+		break;
+	}
+
+	return 0;
+}
+
+static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994_pdata *pdata = wm8994->pdata;
+	int value = ucontrol->value.integer.value[0];
+	int reg;
+
+	/* Don't allow on the fly reconfiguration */
+	reg = snd_soc_read(codec, WM8994_CLOCKING_1);
+	if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
+		return -EBUSY;
+
+	if (value >= pdata->num_mbc_cfgs)
+		return -EINVAL;
+
+	wm8994->mbc_cfg = value;
+
+	return 0;
+}
+
+static int wm8958_get_mbc_enum(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = wm8994->mbc_cfg;
+
+	return 0;
+}
+
+static int wm8958_mbc_info(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 1;
+	return 0;
+}
+
+static int wm8958_mbc_get(struct snd_kcontrol *kcontrol,
+			  struct snd_ctl_elem_value *ucontrol)
+{
+	int mbc = kcontrol->private_value;
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = wm8994->mbc_ena[mbc];
+
+	return 0;
+}
+
+static int wm8958_mbc_put(struct snd_kcontrol *kcontrol,
+			  struct snd_ctl_elem_value *ucontrol)
+{
+	int mbc = kcontrol->private_value;
+	int i;
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	if (ucontrol->value.integer.value[0] > 1)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(wm8994->mbc_ena); i++) {
+		if (mbc != i && wm8994->mbc_ena[i]) {
+			dev_dbg(codec->dev, "MBC %d active already\n", mbc);
+			return -EBUSY;
+		}
+	}
+
+	wm8994->mbc_ena[mbc] = ucontrol->value.integer.value[0];
+
+	wm8958_mbc_apply(codec, mbc, wm8994->mbc_ena[mbc]);
+
+	return 0;
+}
+
+#define WM8958_MBC_SWITCH(xname, xval) {\
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+	.info = wm8958_mbc_info, \
+	.get = wm8958_mbc_get, .put = wm8958_mbc_put, \
+	.private_value = xval }
 
 static const struct snd_kcontrol_new wm8994_snd_controls[] = {
 SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
@@ -2098,10 +795,15 @@
 		 WM8994_AIF2_ADC_RIGHT_VOLUME,
 		 1, 119, 0, digital_tlv),
 
+SOC_ENUM("AIF1ADCL Source", aif1adcl_src),
+SOC_ENUM("AIF1ADCR Source", aif1adcr_src),
+SOC_ENUM("AIF2ADCL Source", aif2adcl_src),
+SOC_ENUM("AIF2ADCR Source", aif2adcr_src),
+
 SOC_ENUM("AIF1DACL Source", aif1dacl_src),
 SOC_ENUM("AIF1DACR Source", aif1dacr_src),
-SOC_ENUM("AIF2DACL Source", aif1dacl_src),
-SOC_ENUM("AIF2DACR Source", aif1dacr_src),
+SOC_ENUM("AIF2DACL Source", aif2dacl_src),
+SOC_ENUM("AIF2DACR Source", aif2dacr_src),
 
 SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8994_AIF1_DAC1_LEFT_VOLUME,
 		 WM8994_AIF1_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
@@ -2140,6 +842,18 @@
 SOC_ENUM("Sidetone HPF Mux", sidetone_hpf),
 SOC_SINGLE("Sidetone HPF Switch", WM8994_SIDETONE, 6, 1, 0),
 
+SOC_ENUM("AIF1ADC1 HPF Mode", aif1adc1_hpf),
+SOC_DOUBLE("AIF1ADC1 HPF Switch", WM8994_AIF1_ADC1_FILTERS, 12, 11, 1, 0),
+
+SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
+SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
+
+SOC_ENUM("AIF2ADC HPF Mode", aif2adc_hpf),
+SOC_DOUBLE("AIF2ADC HPF Switch", WM8994_AIF2_ADC_FILTERS, 12, 11, 1, 0),
+
+SOC_ENUM("ADC OSR", adc_osr),
+SOC_ENUM("DAC OSR", dac_osr),
+
 SOC_DOUBLE_R_TLV("DAC1 Volume", WM8994_DAC1_LEFT_VOLUME,
 		 WM8994_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
 SOC_DOUBLE_R("DAC1 Switch", WM8994_DAC1_LEFT_VOLUME,
@@ -2162,15 +876,15 @@
 
 SOC_SINGLE_TLV("AIF1DAC1 3D Stereo Volume", WM8994_AIF1_DAC1_FILTERS_2,
 	       10, 15, 0, wm8994_3d_tlv),
-SOC_SINGLE("AIF1DAC1 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2,
+SOC_SINGLE("AIF1DAC1 3D Stereo Switch", WM8994_AIF1_DAC1_FILTERS_2,
 	   8, 1, 0),
 SOC_SINGLE_TLV("AIF1DAC2 3D Stereo Volume", WM8994_AIF1_DAC2_FILTERS_2,
 	       10, 15, 0, wm8994_3d_tlv),
 SOC_SINGLE("AIF1DAC2 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2,
 	   8, 1, 0),
-SOC_SINGLE_TLV("AIF2DAC 3D Stereo Volume", WM8994_AIF1_DAC1_FILTERS_2,
+SOC_SINGLE_TLV("AIF2DAC 3D Stereo Volume", WM8994_AIF2_DAC_FILTERS_2,
 	       10, 15, 0, wm8994_3d_tlv),
-SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2,
+SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2,
 	   8, 1, 0),
 };
 
@@ -2209,6 +923,13 @@
 	       eq_tlv),
 };
 
+static const struct snd_kcontrol_new wm8958_snd_controls[] = {
+SOC_SINGLE_TLV("AIF3 Boost Volume", WM8958_AIF3_CONTROL_2, 10, 3, 0, aif_tlv),
+WM8958_MBC_SWITCH("AIF1DAC1 MBC Switch", 0),
+WM8958_MBC_SWITCH("AIF1DAC2 MBC Switch", 1),
+WM8958_MBC_SWITCH("AIF2DAC MBC Switch", 2),
+};
+
 static int clk_sys_event(struct snd_soc_dapm_widget *w,
 			 struct snd_kcontrol *kcontrol, int event)
 {
@@ -2228,6 +949,7 @@
 
 static void wm8994_update_class_w(struct snd_soc_codec *codec)
 {
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	int enable = 1;
 	int source = 0;  /* GCC flow analysis can't track enable */
 	int reg, reg_r;
@@ -2278,14 +1000,120 @@
 				    WM8994_CP_DYN_PWR |
 				    WM8994_CP_DYN_SRC_SEL_MASK,
 				    source | WM8994_CP_DYN_PWR);
+		wm8994->hubs.class_w = true;
 		
 	} else {
 		dev_dbg(codec->dev, "Class W disabled\n");
 		snd_soc_update_bits(codec, WM8994_CLASS_W_1,
 				    WM8994_CP_DYN_PWR, 0);
+		wm8994->hubs.class_w = false;
 	}
 }
 
+static int late_enable_ev(struct snd_soc_dapm_widget *w,
+			  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (wm8994->aif1clk_enable) {
+			snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+					    WM8994_AIF1CLK_ENA_MASK,
+					    WM8994_AIF1CLK_ENA);
+			wm8994->aif1clk_enable = 0;
+		}
+		if (wm8994->aif2clk_enable) {
+			snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+					    WM8994_AIF2CLK_ENA_MASK,
+					    WM8994_AIF2CLK_ENA);
+			wm8994->aif2clk_enable = 0;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int late_disable_ev(struct snd_soc_dapm_widget *w,
+			   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMD:
+		if (wm8994->aif1clk_disable) {
+			snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+					    WM8994_AIF1CLK_ENA_MASK, 0);
+			wm8994->aif1clk_disable = 0;
+		}
+		if (wm8994->aif2clk_disable) {
+			snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+					    WM8994_AIF2CLK_ENA_MASK, 0);
+			wm8994->aif2clk_disable = 0;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int aif1clk_ev(struct snd_soc_dapm_widget *w,
+		      struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wm8994->aif1clk_enable = 1;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wm8994->aif1clk_disable = 1;
+		break;
+	}
+
+	return 0;
+}
+
+static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+		      struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wm8994->aif2clk_enable = 1;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wm8994->aif2clk_disable = 1;
+		break;
+	}
+
+	return 0;
+}
+
+static int adc_mux_ev(struct snd_soc_dapm_widget *w,
+		      struct snd_kcontrol *kcontrol, int event)
+{
+	late_enable_ev(w, kcontrol, event);
+	return 0;
+}
+
+static int dac_ev(struct snd_soc_dapm_widget *w,
+		  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	unsigned int mask = 1 << w->shift;
+
+	snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+			    mask, mask);
+	return 0;
+}
+
 static const char *hp_mux_text[] = {
 	"Mixer",
 	"DAC",
@@ -2512,14 +1340,100 @@
 	SOC_DAPM_ENUM("AIF2ADC Mux", aif2adc_enum);
 
 static const char *aif3adc_text[] = {
-	"AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT",
+	"AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT", "Mono PCM",
 };
 
-static const struct soc_enum aif3adc_enum =
+static const struct soc_enum wm8994_aif3adc_enum =
 	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 3, aif3adc_text);
 
-static const struct snd_kcontrol_new aif3adc_mux =
-	SOC_DAPM_ENUM("AIF3ADC Mux", aif3adc_enum);
+static const struct snd_kcontrol_new wm8994_aif3adc_mux =
+	SOC_DAPM_ENUM("AIF3ADC Mux", wm8994_aif3adc_enum);
+
+static const struct soc_enum wm8958_aif3adc_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 4, aif3adc_text);
+
+static const struct snd_kcontrol_new wm8958_aif3adc_mux =
+	SOC_DAPM_ENUM("AIF3ADC Mux", wm8958_aif3adc_enum);
+
+static const char *mono_pcm_out_text[] = {
+	"None", "AIF2ADCL", "AIF2ADCR", 
+};
+
+static const struct soc_enum mono_pcm_out_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 9, 3, mono_pcm_out_text);
+
+static const struct snd_kcontrol_new mono_pcm_out_mux =
+	SOC_DAPM_ENUM("Mono PCM Out Mux", mono_pcm_out_enum);
+
+static const char *aif2dac_src_text[] = {
+	"AIF2", "AIF3",
+};
+
+/* Note that these two control shouldn't be simultaneously switched to AIF3 */
+static const struct soc_enum aif2dacl_src_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 7, 2, aif2dac_src_text);
+
+static const struct snd_kcontrol_new aif2dacl_src_mux =
+	SOC_DAPM_ENUM("AIF2DACL Mux", aif2dacl_src_enum);
+
+static const struct soc_enum aif2dacr_src_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 8, 2, aif2dac_src_text);
+
+static const struct snd_kcontrol_new aif2dacr_src_mux =
+	SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
+
+static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
+SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
+	SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
+	SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+	late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+	late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+	late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+	late_enable_ev, SND_SOC_DAPM_PRE_PMU),
+
+SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
+};
+
+static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
+SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0)
+};
+
+static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = {
+SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0,
+	dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0,
+	dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0,
+	dac_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
+	dac_ev, SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
+SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
+SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
+SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
+SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
+};
+
+static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
+SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+		   adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+		   adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
+SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+};
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
 SND_SOC_DAPM_INPUT("DMIC1DAT"),
@@ -2533,26 +1447,27 @@
 SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
 
-SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
-
-SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
 		     0, WM8994_POWER_MANAGEMENT_4, 9, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
 		     0, WM8994_POWER_MANAGEMENT_4, 8, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 9, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 8, 0),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
-SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
 		     0, WM8994_POWER_MANAGEMENT_4, 11, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
 		     0, WM8994_POWER_MANAGEMENT_4, 10, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 11, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 10, 0),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
 		   aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)),
@@ -2581,19 +1496,21 @@
 		     WM8994_POWER_MANAGEMENT_4, 13, 0),
 SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0,
 		     WM8994_POWER_MANAGEMENT_4, 12, 0),
-SND_SOC_DAPM_AIF_IN("AIF2DACL", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 13, 0),
-SND_SOC_DAPM_AIF_IN("AIF2DACR", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 12, 0),
+SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
 SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_OUT("AIF2ADCDAT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
 
 SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux),
 SND_SOC_DAPM_MUX("AIF2DAC Mux", SND_SOC_NOPM, 0, 0, &aif2dac_mux),
 SND_SOC_DAPM_MUX("AIF2ADC Mux", SND_SOC_NOPM, 0, 0, &aif2adc_mux),
-SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &aif3adc_mux),
 
 SND_SOC_DAPM_AIF_IN("AIF3DACDAT", "AIF3 Playback", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_IN("AIF3ADCDAT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0),
@@ -2612,14 +1529,6 @@
 SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0),
 SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
 
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
-
-SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
-SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
-SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
-SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
-
 SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
 SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
 
@@ -2631,8 +1540,18 @@
 SND_SOC_DAPM_POST("Debug log", post_ev),
 };
 
-static const struct snd_soc_dapm_route intercon[] = {
+static const struct snd_soc_dapm_widget wm8994_specific_dapm_widgets[] = {
+SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &wm8994_aif3adc_mux),
+};
 
+static const struct snd_soc_dapm_widget wm8958_dapm_widgets[] = {
+SND_SOC_DAPM_MUX("Mono PCM Out Mux", SND_SOC_NOPM, 0, 0, &mono_pcm_out_mux),
+SND_SOC_DAPM_MUX("AIF2DACL Mux", SND_SOC_NOPM, 0, 0, &aif2dacl_src_mux),
+SND_SOC_DAPM_MUX("AIF2DACR Mux", SND_SOC_NOPM, 0, 0, &aif2dacr_src_mux),
+SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &wm8958_aif3adc_mux),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
 	{ "CLK_SYS", NULL, "AIF1CLK", check_clk_sys },
 	{ "CLK_SYS", NULL, "AIF2CLK", check_clk_sys },
 
@@ -2740,9 +1659,6 @@
 	{ "AIF1DAC2L", NULL, "AIF1DAC Mux" },
 	{ "AIF1DAC2R", NULL, "AIF1DAC Mux" },
 
-	{ "AIF2DACL", NULL, "AIF2DAC Mux" },
-	{ "AIF2DACR", NULL, "AIF2DAC Mux" },
-
 	{ "AIF1DAC Mux", "AIF1DACDAT", "AIF1DACDAT" },
 	{ "AIF1DAC Mux", "AIF3DACDAT", "AIF3DACDAT" },
 	{ "AIF2DAC Mux", "AIF2DACDAT", "AIF2DACDAT" },
@@ -2752,14 +1668,12 @@
 	{ "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" },
 
 	/* DAC1 inputs */
-	{ "DAC1L", NULL, "DAC1L Mixer" },
 	{ "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" },
 	{ "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
 	{ "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
 	{ "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
 	{ "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
 
-	{ "DAC1R", NULL, "DAC1R Mixer" },
 	{ "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" },
 	{ "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
 	{ "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -2768,7 +1682,6 @@
 
 	/* DAC2/AIF2 outputs  */
 	{ "AIF2ADCL", NULL, "AIF2DAC2L Mixer" },
-	{ "DAC2L", NULL, "AIF2DAC2L Mixer" },
 	{ "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" },
 	{ "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
 	{ "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
@@ -2776,13 +1689,17 @@
 	{ "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" },
 
 	{ "AIF2ADCR", NULL, "AIF2DAC2R Mixer" },
-	{ "DAC2R", NULL, "AIF2DAC2R Mixer" },
 	{ "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" },
 	{ "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
 	{ "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
 	{ "AIF2DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" },
 	{ "AIF2DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" },
 
+	{ "AIF1ADCDAT", NULL, "AIF1ADC1L" },
+	{ "AIF1ADCDAT", NULL, "AIF1ADC1R" },
+	{ "AIF1ADCDAT", NULL, "AIF1ADC2L" },
+	{ "AIF1ADCDAT", NULL, "AIF1ADC2R" },
+
 	{ "AIF2ADCDAT", NULL, "AIF2ADC Mux" },
 
 	/* AIF3 output */
@@ -2815,6 +1732,51 @@
 	{ "Right Headphone Mux", "DAC", "DAC1R" },
 };
 
+static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = {
+	{ "DAC1L", NULL, "Late DAC1L Enable PGA" },
+	{ "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" },
+	{ "DAC1R", NULL, "Late DAC1R Enable PGA" },
+	{ "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" },
+	{ "DAC2L", NULL, "Late DAC2L Enable PGA" },
+	{ "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" },
+	{ "DAC2R", NULL, "Late DAC2R Enable PGA" },
+	{ "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" }
+};
+
+static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = {
+	{ "DAC1L", NULL, "DAC1L Mixer" },
+	{ "DAC1R", NULL, "DAC1R Mixer" },
+	{ "DAC2L", NULL, "AIF2DAC2L Mixer" },
+	{ "DAC2R", NULL, "AIF2DAC2R Mixer" },
+};
+
+static const struct snd_soc_dapm_route wm8994_revd_intercon[] = {
+	{ "AIF1DACDAT", NULL, "AIF2DACDAT" },
+	{ "AIF2DACDAT", NULL, "AIF1DACDAT" },
+	{ "AIF1ADCDAT", NULL, "AIF2ADCDAT" },
+	{ "AIF2ADCDAT", NULL, "AIF1ADCDAT" },
+};
+
+static const struct snd_soc_dapm_route wm8994_intercon[] = {
+	{ "AIF2DACL", NULL, "AIF2DAC Mux" },
+	{ "AIF2DACR", NULL, "AIF2DAC Mux" },
+};
+
+static const struct snd_soc_dapm_route wm8958_intercon[] = {
+	{ "AIF2DACL", NULL, "AIF2DACL Mux" },
+	{ "AIF2DACR", NULL, "AIF2DACR Mux" },
+
+	{ "AIF2DACL Mux", "AIF2", "AIF2DAC Mux" },
+	{ "AIF2DACL Mux", "AIF3", "AIF3DACDAT" },
+	{ "AIF2DACR Mux", "AIF2", "AIF2DAC Mux" },
+	{ "AIF2DACR Mux", "AIF3", "AIF3DACDAT" },
+
+	{ "Mono PCM Out Mux", "AIF2ADCL", "AIF2ADCL" },
+	{ "Mono PCM Out Mux", "AIF2ADCR", "AIF2ADCR" },
+
+	{ "AIF3ADC Mux", "Mono PCM", "Mono PCM Out Mux" },
+};
+
 /* The size in bits of the FLL divide multiplied by 10
  * to allow rounding later */
 #define FIXED_FLL_SIZE ((1 << 16) * 10)
@@ -2930,6 +1892,7 @@
 		/* Allow no source specification when stopping */
 		if (freq_out)
 			return -EINVAL;
+		src = wm8994->fll[id].src;
 		break;
 	case WM8994_FLL_SRC_MCLK1:
 	case WM8994_FLL_SRC_MCLK2:
@@ -3094,6 +2057,7 @@
 static int wm8994_set_bias_level(struct snd_soc_codec *codec,
 				 enum snd_soc_bias_level level)
 {
+	struct wm8994 *control = codec->control_data;
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 
 	switch (level) {
@@ -3107,16 +2071,36 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
-			/* Tweak DC servo and DSP configuration for
-			 * improved performance. */
-			if (wm8994->revision < 4) {
-				/* Tweak DC servo and DSP configuration for
-				 * improved performance. */
-				snd_soc_write(codec, 0x102, 0x3);
-				snd_soc_write(codec, 0x56, 0x3);
-				snd_soc_write(codec, 0x817, 0);
-				snd_soc_write(codec, 0x102, 0);
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			pm_runtime_get_sync(codec->dev);
+
+			switch (control->type) {
+			case WM8994:
+				if (wm8994->revision < 4) {
+					/* Tweak DC servo and DSP
+					 * configuration for improved
+					 * performance. */
+					snd_soc_write(codec, 0x102, 0x3);
+					snd_soc_write(codec, 0x56, 0x3);
+					snd_soc_write(codec, 0x817, 0);
+					snd_soc_write(codec, 0x102, 0);
+				}
+				break;
+
+			case WM8958:
+				if (wm8994->revision == 0) {
+					/* Optimise performance for rev A */
+					snd_soc_write(codec, 0x102, 0x3);
+					snd_soc_write(codec, 0xcb, 0x81);
+					snd_soc_write(codec, 0x817, 0);
+					snd_soc_write(codec, 0x102, 0);
+
+					snd_soc_update_bits(codec,
+							    WM8958_CHARGE_PUMP_2,
+							    WM8958_CP_DISCH,
+							    WM8958_CP_DISCH);
+				}
+				break;
 			}
 
 			/* Discharge LINEOUT1 & 2 */
@@ -3151,7 +2135,7 @@
 		break;
 
 	case SND_SOC_BIAS_OFF:
-		if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) {
 			/* Switch over to startup biases */
 			snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
 					    WM8994_BIAS_SRC |
@@ -3183,16 +2167,19 @@
 					    WM8994_STARTUP_BIAS_ENA |
 					    WM8994_VMID_BUF_ENA |
 					    WM8994_VMID_RAMP_MASK, 0);
+
+			pm_runtime_put(codec->dev);
 		}
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
 static int wm8994_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 {
 	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994 *control = codec->control_data;
 	int ms_reg;
 	int aif1_reg;
 	int ms = 0;
@@ -3277,6 +2264,13 @@
 		return -EINVAL;
 	}
 
+	/* The AIF2 format configuration needs to be mirrored to AIF3
+	 * on WM8958 if it's in use so just do it all the time. */
+	if (control->type == WM8958 && dai->id == 2)
+		snd_soc_update_bits(codec, WM8958_AIF3_CONTROL_1,
+				    WM8994_AIF1_LRCLK_INV |
+				    WM8958_AIF3_FMT_MASK, aif1);
+
 	snd_soc_update_bits(codec, aif1_reg,
 			    WM8994_AIF1_BCLK_INV | WM8994_AIF1_LRCLK_INV |
 			    WM8994_AIF1_FMT_MASK,
@@ -3317,12 +2311,15 @@
 			    struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994 *control = codec->control_data;
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	int aif1_reg;
+	int aif2_reg;
 	int bclk_reg;
 	int lrclk_reg;
 	int rate_reg;
 	int aif1 = 0;
+	int aif2 = 0;
 	int bclk = 0;
 	int lrclk = 0;
 	int rate_val = 0;
@@ -3333,6 +2330,7 @@
 	switch (dai->id) {
 	case 1:
 		aif1_reg = WM8994_AIF1_CONTROL_1;
+		aif2_reg = WM8994_AIF1_CONTROL_2;
 		bclk_reg = WM8994_AIF1_BCLK;
 		rate_reg = WM8994_AIF1_RATE;
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
@@ -3345,6 +2343,7 @@
 		break;
 	case 2:
 		aif1_reg = WM8994_AIF2_CONTROL_1;
+		aif2_reg = WM8994_AIF2_CONTROL_2;
 		bclk_reg = WM8994_AIF2_BCLK;
 		rate_reg = WM8994_AIF2_RATE;
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
@@ -3355,6 +2354,14 @@
 			dev_dbg(codec->dev, "AIF2 using split LRCLK\n");
 		}
 		break;
+	case 3:
+		switch (control->type) {
+		case WM8958:
+			aif1_reg = WM8958_AIF3_CONTROL_1;
+			break;
+		default:
+			return 0;
+		}
 	default:
 		return -EINVAL;
 	}
@@ -3392,6 +2399,10 @@
 	dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n",
 		dai->id, wm8994->aifclk[id], bclk_rate);
 
+	if (params_channels(params) == 1 &&
+	    (snd_soc_read(codec, aif1_reg) & 0x18) == 0x18)
+		aif2 |= WM8994_AIF1_MONO;
+
 	if (wm8994->aifclk[id] == 0) {
 		dev_err(dai->dev, "AIF%dCLK not configured\n", dai->id);
 		return -EINVAL;
@@ -3435,6 +2446,7 @@
 		lrclk, bclk_rate / lrclk);
 
 	snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1);
+	snd_soc_update_bits(codec, aif2_reg, WM8994_AIF1_MONO, aif2);
 	snd_soc_update_bits(codec, bclk_reg, WM8994_AIF1_BCLK_DIV_MASK, bclk);
 	snd_soc_update_bits(codec, lrclk_reg, WM8994_AIF1DAC_RATE_MASK,
 			    lrclk);
@@ -3458,6 +2470,47 @@
 	return 0;
 }
 
+static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994 *control = codec->control_data;
+	int aif1_reg;
+	int aif1 = 0;
+
+	switch (dai->id) {
+	case 3:
+		switch (control->type) {
+		case WM8958:
+			aif1_reg = WM8958_AIF3_CONTROL_1;
+			break;
+		default:
+			return 0;
+		}
+	default:
+		return 0;
+	}
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		aif1 |= 0x20;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		aif1 |= 0x40;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		aif1 |= 0x60;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1);
+}
+
 static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
 {
 	struct snd_soc_codec *codec = codec_dai->codec;
@@ -3512,7 +2565,7 @@
 	else
 		val = 0;
 
-	return snd_soc_update_bits(codec, reg, mask, reg);
+	return snd_soc_update_bits(codec, reg, mask, val);
 }
 
 #define WM8994_RATES SNDRV_PCM_RATE_8000_96000
@@ -3539,6 +2592,7 @@
 };
 
 static struct snd_soc_dai_ops wm8994_aif3_dai_ops = {
+	.hw_params	= wm8994_aif3_hw_params,
 	.set_tristate	= wm8994_set_tristate,
 };
 
@@ -3548,14 +2602,14 @@
 		.id = 1,
 		.playback = {
 			.stream_name = "AIF1 Playback",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
 		},
 		.capture = {
 			.stream_name = "AIF1 Capture",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
@@ -3567,14 +2621,14 @@
 		.id = 2,
 		.playback = {
 			.stream_name = "AIF2 Playback",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
 		},
 		.capture = {
 			.stream_name = "AIF2 Capture",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
@@ -3586,14 +2640,14 @@
 		.id = 3,
 		.playback = {
 			.stream_name = "AIF3 Playback",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
 		},
 		.capture = {
 			.stream_name = "AIF3 Capture",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
@@ -3625,26 +2679,28 @@
 static int wm8994_resume(struct snd_soc_codec *codec)
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-	u16 *reg_cache = codec->reg_cache;
 	int i, ret;
+	unsigned int val, mask;
+
+	if (wm8994->revision < 4) {
+		/* force a HW read */
+		val = wm8994_reg_read(codec->control_data,
+				      WM8994_POWER_MANAGEMENT_5);
+
+		/* modify the cache only */
+		codec->cache_only = 1;
+		mask =  WM8994_DAC1R_ENA | WM8994_DAC1L_ENA |
+			WM8994_DAC2R_ENA | WM8994_DAC2L_ENA;
+		val &= mask;
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+				    mask, val);
+		codec->cache_only = 0;
+	}
 
 	/* Restore the registers */
-	for (i = 1; i < ARRAY_SIZE(wm8994->reg_cache); i++) {
-		switch (i) {
-		case WM8994_LDO_1:
-		case WM8994_LDO_2:
-		case WM8994_SOFTWARE_RESET:
-			/* Handled by other MFD drivers */
-			continue;
-		default:
-			break;
-		}
-
-		if (!access_masks[i].writable)
-			continue;
-
-		wm8994_reg_write(codec->control_data, i, reg_cache[i]);
-	}
+	ret = snd_soc_cache_sync(codec);
+	if (ret != 0)
+		dev_err(codec->dev, "Failed to sync cache: %d\n", ret);
 
 	wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
@@ -3794,6 +2850,34 @@
 	dev_dbg(codec->dev, "%d ReTune Mobile configurations\n",
 		pdata->num_retune_mobile_cfgs);
 
+	if (pdata->num_mbc_cfgs) {
+		struct snd_kcontrol_new control[] = {
+			SOC_ENUM_EXT("MBC Mode", wm8994->mbc_enum,
+				     wm8958_get_mbc_enum, wm8958_put_mbc_enum),
+		};
+
+		/* We need an array of texts for the enum API */
+		wm8994->mbc_texts = kmalloc(sizeof(char *)
+					    * pdata->num_mbc_cfgs, GFP_KERNEL);
+		if (!wm8994->mbc_texts) {
+			dev_err(wm8994->codec->dev,
+				"Failed to allocate %d MBC config texts\n",
+				pdata->num_mbc_cfgs);
+			return;
+		}
+
+		for (i = 0; i < pdata->num_mbc_cfgs; i++)
+			wm8994->mbc_texts[i] = pdata->mbc_cfgs[i].name;
+
+		wm8994->mbc_enum.max = pdata->num_mbc_cfgs;
+		wm8994->mbc_enum.texts = wm8994->mbc_texts;
+
+		ret = snd_soc_add_controls(wm8994->codec, control, 1);
+		if (ret != 0)
+			dev_err(wm8994->codec->dev,
+				"Failed to add MBC mode controls: %d\n", ret);
+	}
+
 	if (pdata->num_retune_mobile_cfgs)
 		wm8994_handle_retune_mobile_pdata(wm8994);
 	else
@@ -3823,8 +2907,12 @@
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	struct wm8994_micdet *micdet;
+	struct wm8994 *control = codec->control_data;
 	int reg;
 
+	if (control->type != WM8994)
+		return -EINVAL;
+
 	switch (micbias) {
 	case 1:
 		micdet = &wm8994->micdet[0];
@@ -3863,6 +2951,10 @@
 	int reg;
 	int report;
 
+#ifndef CONFIG_SND_SOC_WM8994_MODULE
+	trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
 	reg = snd_soc_read(codec, WM8994_INTERRUPT_RAW_STATUS_2);
 	if (reg < 0) {
 		dev_err(codec->dev, "Failed to read microphone status: %d\n",
@@ -3891,77 +2983,250 @@
 	return IRQ_HANDLED;
 }
 
+/* Default microphone detection handler for WM8958 - the user can
+ * override this if they wish.
+ */
+static void wm8958_default_micdet(u16 status, void *data)
+{
+	struct snd_soc_codec *codec = data;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	int report = 0;
+
+	/* If nothing present then clear our statuses */
+	if (!(status & WM8958_MICD_STS)) {
+		wm8994->jack_is_video = false;
+		wm8994->jack_is_mic = false;
+		goto done;
+	}
+
+	/* Assume anything over 475 ohms is a microphone and remember
+	 * that we've seen one (since buttons override it) */
+	if (status & 0x600)
+		wm8994->jack_is_mic = true;
+	if (wm8994->jack_is_mic)
+		report |= SND_JACK_MICROPHONE;
+
+	/* Video has an impedence of approximately 75 ohms; assume
+	 * this isn't used as a button and remember it since buttons
+	 * override it. */
+	if (status & 0x40)
+		wm8994->jack_is_video = true;
+	if (wm8994->jack_is_video)
+		report |= SND_JACK_VIDEOOUT;
+
+	/* Everything else is buttons; just assign slots */
+	if (status & 0x4)
+		report |= SND_JACK_BTN_0;
+	if (status & 0x8)
+		report |= SND_JACK_BTN_1;
+	if (status & 0x10)
+		report |= SND_JACK_BTN_2;
+	if (status & 0x20)
+		report |= SND_JACK_BTN_3;
+	if (status & 0x80)
+		report |= SND_JACK_BTN_4;
+	if (status & 0x100)
+		report |= SND_JACK_BTN_5;
+
+done:
+	snd_soc_jack_report(wm8994->micdet[0].jack, report,
+			    SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+			    SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
+			    SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT);
+}
+
+/**
+ * wm8958_mic_detect - Enable microphone detection via the WM8958 IRQ
+ *
+ * @codec:   WM8958 codec
+ * @jack:    jack to report detection events on
+ *
+ * Enable microphone detection functionality for the WM8958.  By
+ * default simple detection which supports the detection of up to 6
+ * buttons plus video and microphone functionality is supported.
+ *
+ * The WM8958 has an advanced jack detection facility which is able to
+ * support complex accessory detection, especially when used in
+ * conjunction with external circuitry.  In order to provide maximum
+ * flexiblity a callback is provided which allows a completely custom
+ * detection algorithm.
+ */
+int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+		      wm8958_micdet_cb cb, void *cb_data)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = codec->control_data;
+
+	if (control->type != WM8958)
+		return -EINVAL;
+
+	if (jack) {
+		if (!cb) {
+			dev_dbg(codec->dev, "Using default micdet callback\n");
+			cb = wm8958_default_micdet;
+			cb_data = codec;
+		}
+
+		wm8994->micdet[0].jack = jack;
+		wm8994->jack_cb = cb;
+		wm8994->jack_cb_data = cb_data;
+
+		snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+				    WM8958_MICD_ENA, WM8958_MICD_ENA);
+	} else {
+		snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+				    WM8958_MICD_ENA, 0);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(wm8958_mic_detect);
+
+static irqreturn_t wm8958_mic_irq(int irq, void *data)
+{
+	struct wm8994_priv *wm8994 = data;
+	struct snd_soc_codec *codec = wm8994->codec;
+	int reg;
+
+	reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
+	if (reg < 0) {
+		dev_err(codec->dev, "Failed to read mic detect status: %d\n",
+			reg);
+		return IRQ_NONE;
+	}
+
+	if (!(reg & WM8958_MICD_VALID)) {
+		dev_dbg(codec->dev, "Mic detect data not valid\n");
+		goto out;
+	}
+
+#ifndef CONFIG_SND_SOC_WM8994_MODULE
+	trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
+	if (wm8994->jack_cb)
+		wm8994->jack_cb(reg, wm8994->jack_cb_data);
+	else
+		dev_warn(codec->dev, "Accessory detection with no callback\n");
+
+out:
+	return IRQ_HANDLED;
+}
+
 static int wm8994_codec_probe(struct snd_soc_codec *codec)
 {
+	struct wm8994 *control;
 	struct wm8994_priv *wm8994;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret, i;
 
 	codec->control_data = dev_get_drvdata(codec->dev->parent);
+	control = codec->control_data;
 
 	wm8994 = kzalloc(sizeof(struct wm8994_priv), GFP_KERNEL);
 	if (wm8994 == NULL)
 		return -ENOMEM;
 	snd_soc_codec_set_drvdata(codec, wm8994);
 
-	codec->reg_cache = &wm8994->reg_cache;
-
 	wm8994->pdata = dev_get_platdata(codec->dev->parent);
 	wm8994->codec = codec;
 
-	/* Fill the cache with physical values we inherited; don't reset */
-	ret = wm8994_bulk_read(codec->control_data, 0,
-			       ARRAY_SIZE(wm8994->reg_cache) - 1,
-			       codec->reg_cache);
-	if (ret < 0) {
-		dev_err(codec->dev, "Failed to fill register cache: %d\n",
-			ret);
-		goto err;
-	}
+	pm_runtime_enable(codec->dev);
+	pm_runtime_resume(codec->dev);
 
-	/* Clear the cached values for unreadable/volatile registers to
-	 * avoid potential confusion.
-	 */
-	for (i = 0; i < ARRAY_SIZE(wm8994->reg_cache); i++)
-		if (wm8994_volatile(i) || !wm8994_readable(i))
-			wm8994->reg_cache[i] = 0;
+	/* Read our current status back from the chip - we don't want to
+	 * reset as this may interfere with the GPIO or LDO operation. */
+	for (i = 0; i < WM8994_CACHE_SIZE; i++) {
+		if (!wm8994_readable(i) || wm8994_volatile(i))
+			continue;
+
+		ret = wm8994_reg_read(codec->control_data, i);
+		if (ret <= 0)
+			continue;
+
+		ret = snd_soc_cache_write(codec, i, ret);
+		if (ret != 0) {
+			dev_err(codec->dev,
+				"Failed to initialise cache for 0x%x: %d\n",
+				i, ret);
+			goto err;
+		}
+	}
 
 	/* Set revision-specific configuration */
 	wm8994->revision = snd_soc_read(codec, WM8994_CHIP_REVISION);
-	switch (wm8994->revision) {
-	case 2:
-	case 3:
-		wm8994->hubs.dcs_codes = -5;
-		wm8994->hubs.hp_startup_mode = 1;
+	switch (control->type) {
+	case WM8994:
+		switch (wm8994->revision) {
+		case 2:
+		case 3:
+			wm8994->hubs.dcs_codes = -5;
+			wm8994->hubs.hp_startup_mode = 1;
+			wm8994->hubs.dcs_readback_mode = 1;
+			break;
+		default:
+			wm8994->hubs.dcs_readback_mode = 1;
+			break;
+		}
+
+	case WM8958:
 		wm8994->hubs.dcs_readback_mode = 1;
 		break;
+
 	default:
-		wm8994->hubs.dcs_readback_mode = 1;
 		break;
 	}
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
-				 wm8994_mic_irq, "Mic 1 detect", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic1 detect IRQ: %d\n", ret);
+	switch (control->type) {
+	case WM8994:
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC1_DET,
+					 wm8994_mic_irq, "Mic 1 detect",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic1 detect IRQ: %d\n",
+				 ret);
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT,
-				 wm8994_mic_irq, "Mic 1 short", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic1 short IRQ: %d\n", ret);
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC1_SHRT,
+					 wm8994_mic_irq, "Mic 1 short",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic1 short IRQ: %d\n",
+				 ret);
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_DET,
-				 wm8994_mic_irq, "Mic 2 detect", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic2 detect IRQ: %d\n", ret);
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC2_DET,
+					 wm8994_mic_irq, "Mic 2 detect",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic2 detect IRQ: %d\n",
+				 ret);
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT,
-				 wm8994_mic_irq, "Mic 2 short", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic2 short IRQ: %d\n", ret);
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC2_SHRT,
+					 wm8994_mic_irq, "Mic 2 short",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic2 short IRQ: %d\n",
+				 ret);
+		break;
+
+	case WM8958:
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC1_DET,
+					 wm8958_mic_irq, "Mic detect",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic detect IRQ: %d\n",
+				 ret);
+		break;
+	}
 
 	/* Remember if AIFnLRCLK is configured as a GPIO.  This should be
 	 * configured on init - if a system wants to do this dynamically
@@ -4034,10 +3299,69 @@
 	wm_hubs_add_analogue_controls(codec);
 	snd_soc_add_controls(codec, wm8994_snd_controls,
 			     ARRAY_SIZE(wm8994_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8994_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8994_dapm_widgets,
 				  ARRAY_SIZE(wm8994_dapm_widgets));
+
+	switch (control->type) {
+	case WM8994:
+		snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
+					  ARRAY_SIZE(wm8994_specific_dapm_widgets));
+		if (wm8994->revision < 4) {
+			snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets,
+						  ARRAY_SIZE(wm8994_lateclk_revd_widgets));
+			snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets,
+						  ARRAY_SIZE(wm8994_adc_revd_widgets));
+			snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets,
+						  ARRAY_SIZE(wm8994_dac_revd_widgets));
+		} else {
+			snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
+						  ARRAY_SIZE(wm8994_lateclk_widgets));
+			snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
+						  ARRAY_SIZE(wm8994_adc_widgets));
+			snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
+						  ARRAY_SIZE(wm8994_dac_widgets));
+		}
+		break;
+	case WM8958:
+		snd_soc_add_controls(codec, wm8958_snd_controls,
+				     ARRAY_SIZE(wm8958_snd_controls));
+		snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
+					  ARRAY_SIZE(wm8994_lateclk_widgets));
+		snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
+					  ARRAY_SIZE(wm8994_adc_widgets));
+		snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
+					  ARRAY_SIZE(wm8994_dac_widgets));
+		snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
+					  ARRAY_SIZE(wm8958_dapm_widgets));
+		break;
+	}
+		
+
 	wm_hubs_add_analogue_routes(codec, 0, 0);
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+
+	switch (control->type) {
+	case WM8994:
+		snd_soc_dapm_add_routes(dapm, wm8994_intercon,
+					ARRAY_SIZE(wm8994_intercon));
+
+		if (wm8994->revision < 4) {
+			snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
+						ARRAY_SIZE(wm8994_revd_intercon));
+			snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon,
+						ARRAY_SIZE(wm8994_lateclk_revd_intercon));
+		} else {
+			snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
+						ARRAY_SIZE(wm8994_lateclk_intercon));
+		}
+		break;
+	case WM8958:
+		snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
+					ARRAY_SIZE(wm8994_lateclk_intercon));
+		snd_soc_dapm_add_routes(dapm, wm8958_intercon,
+					ARRAY_SIZE(wm8958_intercon));
+		break;
+	}
 
 	return 0;
 
@@ -4054,13 +3378,29 @@
 static int  wm8994_codec_remove(struct snd_soc_codec *codec)
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = codec->control_data;
 
 	wm8994_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT, wm8994);
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, wm8994);
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, wm8994);
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, wm8994);
+	pm_runtime_disable(codec->dev);
+
+	switch (control->type) {
+	case WM8994:
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT,
+				wm8994);
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET,
+				wm8994);
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT,
+				wm8994);
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
+				wm8994);
+		break;
+
+	case WM8958:
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
+				wm8994);
+		break;
+	}
 	kfree(wm8994->retune_mobile_texts);
 	kfree(wm8994->drc_texts);
 	kfree(wm8994);
@@ -4073,11 +3413,16 @@
 	.remove =	wm8994_codec_remove,
 	.suspend =	wm8994_suspend,
 	.resume =	wm8994_resume,
-	.read = wm8994_read,
-	.write = wm8994_write,
+	.read =		wm8994_read,
+	.write =	wm8994_write,
 	.readable_register = wm8994_readable,
 	.volatile_register = wm8994_volatile,
 	.set_bias_level = wm8994_set_bias_level,
+
+	.reg_cache_size = WM8994_CACHE_SIZE,
+	.reg_cache_default = wm8994_reg_defaults,
+	.reg_word_size = 2,
+	.compress_type = SND_SOC_RBTREE_COMPRESSION,
 };
 
 static int __devinit wm8994_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index d8dce26..0c355bf 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -28,7 +28,21 @@
 #define WM8994_FLL_SRC_LRCLK  3
 #define WM8994_FLL_SRC_BCLK   4
 
+typedef void (*wm8958_micdet_cb)(u16 status, void *data);
+
 int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
 		      int micbias, int det, int shrt);
+int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+		      wm8958_micdet_cb cb, void *cb_data);
+
+#define WM8994_CACHE_SIZE 1570
+
+struct wm8994_access_mask {
+	unsigned short readable;   /* Mask of readable bits */
+	unsigned short writable;   /* Mask of writable bits */
+};
+
+extern const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE];
+extern const __devinitdata  u16 wm8994_reg_defaults[WM8994_CACHE_SIZE];
 
 #endif
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
new file mode 100644
index 0000000..608c84c
--- /dev/null
+++ b/sound/soc/codecs/wm8995.c
@@ -0,0 +1,1818 @@
+/*
+ * wm8995.c  --  WM8995 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * Based on wm8994.c and wm_hubs.c by Mark Brown
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8995.h"
+
+static const u16 wm8995_reg_defs[WM8995_MAX_REGISTER + 1] = {
+	[0]     = 0x8995, [5]     = 0x0100, [16]    = 0x000b, [17]    = 0x000b,
+	[24]    = 0x02c0, [25]    = 0x02c0, [26]    = 0x02c0, [27]    = 0x02c0,
+	[28]    = 0x000f, [32]    = 0x0005, [33]    = 0x0005, [40]    = 0x0003,
+	[41]    = 0x0013, [48]    = 0x0004, [56]    = 0x09f8, [64]    = 0x1f25,
+	[69]    = 0x0004, [82]    = 0xaaaa, [84]    = 0x2a2a, [146]   = 0x0060,
+	[256]   = 0x0002, [257]   = 0x8004, [520]   = 0x0010, [528]   = 0x0083,
+	[529]   = 0x0083, [548]   = 0x0c80, [580]   = 0x0c80, [768]   = 0x4050,
+	[769]   = 0x4000, [771]   = 0x0040, [772]   = 0x0040, [773]   = 0x0040,
+	[774]   = 0x0004, [775]   = 0x0100, [784]   = 0x4050, [785]   = 0x4000,
+	[787]   = 0x0040, [788]   = 0x0040, [789]   = 0x0040, [1024]  = 0x00c0,
+	[1025]  = 0x00c0, [1026]  = 0x00c0, [1027]  = 0x00c0, [1028]  = 0x00c0,
+	[1029]  = 0x00c0, [1030]  = 0x00c0, [1031]  = 0x00c0, [1056]  = 0x0200,
+	[1057]  = 0x0010, [1058]  = 0x0200, [1059]  = 0x0010, [1088]  = 0x0098,
+	[1089]  = 0x0845, [1104]  = 0x0098, [1105]  = 0x0845, [1152]  = 0x6318,
+	[1153]  = 0x6300, [1154]  = 0x0fca, [1155]  = 0x0400, [1156]  = 0x00d8,
+	[1157]  = 0x1eb5, [1158]  = 0xf145, [1159]  = 0x0b75, [1160]  = 0x01c5,
+	[1161]  = 0x1c58, [1162]  = 0xf373, [1163]  = 0x0a54, [1164]  = 0x0558,
+	[1165]  = 0x168e, [1166]  = 0xf829, [1167]  = 0x07ad, [1168]  = 0x1103,
+	[1169]  = 0x0564, [1170]  = 0x0559, [1171]  = 0x4000, [1184]  = 0x6318,
+	[1185]  = 0x6300, [1186]  = 0x0fca, [1187]  = 0x0400, [1188]  = 0x00d8,
+	[1189]  = 0x1eb5, [1190]  = 0xf145, [1191]  = 0x0b75, [1192]  = 0x01c5,
+	[1193]  = 0x1c58, [1194]  = 0xf373, [1195]  = 0x0a54, [1196]  = 0x0558,
+	[1197]  = 0x168e, [1198]  = 0xf829, [1199]  = 0x07ad, [1200]  = 0x1103,
+	[1201]  = 0x0564, [1202]  = 0x0559, [1203]  = 0x4000, [1280]  = 0x00c0,
+	[1281]  = 0x00c0, [1282]  = 0x00c0, [1283]  = 0x00c0, [1312]  = 0x0200,
+	[1313]  = 0x0010, [1344]  = 0x0098, [1345]  = 0x0845, [1408]  = 0x6318,
+	[1409]  = 0x6300, [1410]  = 0x0fca, [1411]  = 0x0400, [1412]  = 0x00d8,
+	[1413]  = 0x1eb5, [1414]  = 0xf145, [1415]  = 0x0b75, [1416]  = 0x01c5,
+	[1417]  = 0x1c58, [1418]  = 0xf373, [1419]  = 0x0a54, [1420]  = 0x0558,
+	[1421]  = 0x168e, [1422]  = 0xf829, [1423]  = 0x07ad, [1424]  = 0x1103,
+	[1425]  = 0x0564, [1426]  = 0x0559, [1427]  = 0x4000, [1568]  = 0x0002,
+	[1792]  = 0xa100, [1793]  = 0xa101, [1794]  = 0xa101, [1795]  = 0xa101,
+	[1796]  = 0xa101, [1797]  = 0xa101, [1798]  = 0xa101, [1799]  = 0xa101,
+	[1800]  = 0xa101, [1801]  = 0xa101, [1802]  = 0xa101, [1803]  = 0xa101,
+	[1804]  = 0xa101, [1805]  = 0xa101, [1825]  = 0x0055, [1848]  = 0x3fff,
+	[1849]  = 0x1fff, [2049]  = 0x0001, [2050]  = 0x0069, [2056]  = 0x0002,
+	[2057]  = 0x0003, [2058]  = 0x0069, [12288] = 0x0001, [12289] = 0x0001,
+	[12291] = 0x0006, [12292] = 0x0040, [12293] = 0x0001, [12294] = 0x000f,
+	[12295] = 0x0006, [12296] = 0x0001, [12297] = 0x0003, [12298] = 0x0104,
+	[12300] = 0x0060, [12301] = 0x0011, [12302] = 0x0401, [12304] = 0x0050,
+	[12305] = 0x0003, [12306] = 0x0100, [12308] = 0x0051, [12309] = 0x0003,
+	[12310] = 0x0104, [12311] = 0x000a, [12312] = 0x0060, [12313] = 0x003b,
+	[12314] = 0x0502, [12315] = 0x0100, [12316] = 0x2fff, [12320] = 0x2fff,
+	[12324] = 0x2fff, [12328] = 0x2fff, [12332] = 0x2fff, [12336] = 0x2fff,
+	[12340] = 0x2fff, [12344] = 0x2fff, [12348] = 0x2fff, [12352] = 0x0001,
+	[12353] = 0x0001, [12355] = 0x0006, [12356] = 0x0040, [12357] = 0x0001,
+	[12358] = 0x000f, [12359] = 0x0006, [12360] = 0x0001, [12361] = 0x0003,
+	[12362] = 0x0104, [12364] = 0x0060, [12365] = 0x0011, [12366] = 0x0401,
+	[12368] = 0x0050, [12369] = 0x0003, [12370] = 0x0100, [12372] = 0x0060,
+	[12373] = 0x003b, [12374] = 0x0502, [12375] = 0x0100, [12376] = 0x2fff,
+	[12380] = 0x2fff, [12384] = 0x2fff, [12388] = 0x2fff, [12392] = 0x2fff,
+	[12396] = 0x2fff, [12400] = 0x2fff, [12404] = 0x2fff, [12408] = 0x2fff,
+	[12412] = 0x2fff, [12416] = 0x0001, [12417] = 0x0001, [12419] = 0x0006,
+	[12420] = 0x0040, [12421] = 0x0001, [12422] = 0x000f, [12423] = 0x0006,
+	[12424] = 0x0001, [12425] = 0x0003, [12426] = 0x0106, [12428] = 0x0061,
+	[12429] = 0x0011, [12430] = 0x0401, [12432] = 0x0050, [12433] = 0x0003,
+	[12434] = 0x0102, [12436] = 0x0051, [12437] = 0x0003, [12438] = 0x0106,
+	[12439] = 0x000a, [12440] = 0x0061, [12441] = 0x003b, [12442] = 0x0502,
+	[12443] = 0x0100, [12444] = 0x2fff, [12448] = 0x2fff, [12452] = 0x2fff,
+	[12456] = 0x2fff, [12460] = 0x2fff, [12464] = 0x2fff, [12468] = 0x2fff,
+	[12472] = 0x2fff, [12476] = 0x2fff, [12480] = 0x0001, [12481] = 0x0001,
+	[12483] = 0x0006, [12484] = 0x0040, [12485] = 0x0001, [12486] = 0x000f,
+	[12487] = 0x0006, [12488] = 0x0001, [12489] = 0x0003, [12490] = 0x0106,
+	[12492] = 0x0061, [12493] = 0x0011, [12494] = 0x0401, [12496] = 0x0050,
+	[12497] = 0x0003, [12498] = 0x0102, [12500] = 0x0061, [12501] = 0x003b,
+	[12502] = 0x0502, [12503] = 0x0100, [12504] = 0x2fff, [12508] = 0x2fff,
+	[12512] = 0x2fff, [12516] = 0x2fff, [12520] = 0x2fff, [12524] = 0x2fff,
+	[12528] = 0x2fff, [12532] = 0x2fff, [12536] = 0x2fff, [12540] = 0x2fff,
+	[12544] = 0x0060, [12546] = 0x0601, [12548] = 0x0050, [12550] = 0x0100,
+	[12552] = 0x0001, [12554] = 0x0104, [12555] = 0x0100, [12556] = 0x2fff,
+	[12560] = 0x2fff, [12564] = 0x2fff, [12568] = 0x2fff, [12572] = 0x2fff,
+	[12576] = 0x2fff, [12580] = 0x2fff, [12584] = 0x2fff, [12588] = 0x2fff,
+	[12592] = 0x2fff, [12596] = 0x2fff, [12600] = 0x2fff, [12604] = 0x2fff,
+	[12608] = 0x0061, [12610] = 0x0601, [12612] = 0x0050, [12614] = 0x0102,
+	[12616] = 0x0001, [12618] = 0x0106, [12619] = 0x0100, [12620] = 0x2fff,
+	[12624] = 0x2fff, [12628] = 0x2fff, [12632] = 0x2fff, [12636] = 0x2fff,
+	[12640] = 0x2fff, [12644] = 0x2fff, [12648] = 0x2fff, [12652] = 0x2fff,
+	[12656] = 0x2fff, [12660] = 0x2fff, [12664] = 0x2fff, [12668] = 0x2fff,
+	[12672] = 0x0060, [12674] = 0x0601, [12676] = 0x0061, [12678] = 0x0601,
+	[12680] = 0x0050, [12682] = 0x0300, [12684] = 0x0001, [12686] = 0x0304,
+	[12688] = 0x0040, [12690] = 0x000f, [12692] = 0x0001, [12695] = 0x0100
+};
+
+struct fll_config {
+	int src;
+	int in;
+	int out;
+};
+
+struct wm8995_priv {
+	enum snd_soc_control_type control_type;
+	int sysclk[2];
+	int mclk[2];
+	int aifclk[2];
+	struct fll_config fll[2], fll_suspend[2];
+};
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
+static const DECLARE_TLV_DB_SCALE(in1lr_pga_tlv, -1650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(in1l_boost_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 150, 0);
+
+static const char *in1l_text[] = {
+	"Differential", "Single-ended IN1LN", "Single-ended IN1LP"
+};
+
+static const SOC_ENUM_SINGLE_DECL(in1l_enum, WM8995_LEFT_LINE_INPUT_CONTROL,
+				  2, in1l_text);
+
+static const char *in1r_text[] = {
+	"Differential", "Single-ended IN1RN", "Single-ended IN1RP"
+};
+
+static const SOC_ENUM_SINGLE_DECL(in1r_enum, WM8995_LEFT_LINE_INPUT_CONTROL,
+				  0, in1r_text);
+
+static const char *dmic_src_text[] = {
+	"DMICDAT1", "DMICDAT2", "DMICDAT3"
+};
+
+static const SOC_ENUM_SINGLE_DECL(dmic_src1_enum, WM8995_POWER_MANAGEMENT_5,
+				  8, dmic_src_text);
+static const SOC_ENUM_SINGLE_DECL(dmic_src2_enum, WM8995_POWER_MANAGEMENT_5,
+				  6, dmic_src_text);
+
+static const struct snd_kcontrol_new wm8995_snd_controls[] = {
+	SOC_DOUBLE_R_TLV("DAC1 Volume", WM8995_DAC1_LEFT_VOLUME,
+		WM8995_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R("DAC1 Switch", WM8995_DAC1_LEFT_VOLUME,
+		WM8995_DAC1_RIGHT_VOLUME, 9, 1, 1),
+
+	SOC_DOUBLE_R_TLV("DAC2 Volume", WM8995_DAC2_LEFT_VOLUME,
+		WM8995_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R("DAC2 Switch", WM8995_DAC2_LEFT_VOLUME,
+		WM8995_DAC2_RIGHT_VOLUME, 9, 1, 1),
+
+	SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8995_AIF1_DAC1_LEFT_VOLUME,
+		WM8995_AIF1_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8995_AIF1_DAC2_LEFT_VOLUME,
+		WM8995_AIF1_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8995_AIF2_DAC_LEFT_VOLUME,
+		WM8995_AIF2_DAC_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+
+	SOC_DOUBLE_R_TLV("IN1LR Volume", WM8995_LEFT_LINE_INPUT_1_VOLUME,
+		WM8995_RIGHT_LINE_INPUT_1_VOLUME, 0, 31, 0, in1lr_pga_tlv),
+
+	SOC_SINGLE_TLV("IN1L Boost", WM8995_LEFT_LINE_INPUT_CONTROL,
+		4, 3, 0, in1l_boost_tlv),
+
+	SOC_ENUM("IN1L Mode", in1l_enum),
+	SOC_ENUM("IN1R Mode", in1r_enum),
+
+	SOC_ENUM("DMIC1 SRC", dmic_src1_enum),
+	SOC_ENUM("DMIC2 SRC", dmic_src2_enum),
+
+	SOC_DOUBLE_TLV("DAC1 Sidetone Volume", WM8995_DAC1_MIXER_VOLUMES, 0, 5,
+		24, 0, sidetone_tlv),
+	SOC_DOUBLE_TLV("DAC2 Sidetone Volume", WM8995_DAC2_MIXER_VOLUMES, 0, 5,
+		24, 0, sidetone_tlv),
+
+	SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8995_AIF1_ADC1_LEFT_VOLUME,
+		WM8995_AIF1_ADC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8995_AIF1_ADC2_LEFT_VOLUME,
+		WM8995_AIF1_ADC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8995_AIF2_ADC_LEFT_VOLUME,
+		WM8995_AIF2_ADC_RIGHT_VOLUME, 0, 96, 0, digital_tlv)
+};
+
+static void wm8995_update_class_w(struct snd_soc_codec *codec)
+{
+	int enable = 1;
+	int source = 0;  /* GCC flow analysis can't track enable */
+	int reg, reg_r;
+
+	/* We also need the same setting for L/R and only one path */
+	reg = snd_soc_read(codec, WM8995_DAC1_LEFT_MIXER_ROUTING);
+	switch (reg) {
+	case WM8995_AIF2DACL_TO_DAC1L:
+		dev_dbg(codec->dev, "Class W source AIF2DAC\n");
+		source = 2 << WM8995_CP_DYN_SRC_SEL_SHIFT;
+		break;
+	case WM8995_AIF1DAC2L_TO_DAC1L:
+		dev_dbg(codec->dev, "Class W source AIF1DAC2\n");
+		source = 1 << WM8995_CP_DYN_SRC_SEL_SHIFT;
+		break;
+	case WM8995_AIF1DAC1L_TO_DAC1L:
+		dev_dbg(codec->dev, "Class W source AIF1DAC1\n");
+		source = 0 << WM8995_CP_DYN_SRC_SEL_SHIFT;
+		break;
+	default:
+		dev_dbg(codec->dev, "DAC mixer setting: %x\n", reg);
+		enable = 0;
+		break;
+	}
+
+	reg_r = snd_soc_read(codec, WM8995_DAC1_RIGHT_MIXER_ROUTING);
+	if (reg_r != reg) {
+		dev_dbg(codec->dev, "Left and right DAC mixers different\n");
+		enable = 0;
+	}
+
+	if (enable) {
+		dev_dbg(codec->dev, "Class W enabled\n");
+		snd_soc_update_bits(codec, WM8995_CLASS_W_1,
+				    WM8995_CP_DYN_PWR_MASK |
+				    WM8995_CP_DYN_SRC_SEL_MASK,
+				    source | WM8995_CP_DYN_PWR);
+	} else {
+		dev_dbg(codec->dev, "Class W disabled\n");
+		snd_soc_update_bits(codec, WM8995_CLASS_W_1,
+				    WM8995_CP_DYN_PWR_MASK, 0);
+	}
+}
+
+static int check_clk_sys(struct snd_soc_dapm_widget *source,
+			 struct snd_soc_dapm_widget *sink)
+{
+	unsigned int reg;
+	const char *clk;
+
+	reg = snd_soc_read(source->codec, WM8995_CLOCKING_1);
+	/* Check what we're currently using for CLK_SYS */
+	if (reg & WM8995_SYSCLK_SRC)
+		clk = "AIF2CLK";
+	else
+		clk = "AIF1CLK";
+	return !strcmp(source->name, clk);
+}
+
+static int wm8995_put_class_w(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget *w;
+	struct snd_soc_codec *codec;
+	int ret;
+
+	w = snd_kcontrol_chip(kcontrol);
+	codec = w->codec;
+	ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
+	wm8995_update_class_w(codec);
+	return ret;
+}
+
+static int hp_supply_event(struct snd_soc_dapm_widget *w,
+			   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+
+	codec = w->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Enable the headphone amp */
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_HPOUT1L_ENA_MASK |
+				    WM8995_HPOUT1R_ENA_MASK,
+				    WM8995_HPOUT1L_ENA |
+				    WM8995_HPOUT1R_ENA);
+
+		/* Enable the second stage */
+		snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
+				    WM8995_HPOUT1L_DLY_MASK |
+				    WM8995_HPOUT1R_DLY_MASK,
+				    WM8995_HPOUT1L_DLY |
+				    WM8995_HPOUT1R_DLY);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1,
+				    WM8995_CP_ENA_MASK, 0);
+		break;
+	}
+
+	return 0;
+}
+
+static void dc_servo_cmd(struct snd_soc_codec *codec,
+			 unsigned int reg, unsigned int val, unsigned int mask)
+{
+	int timeout = 10;
+
+	dev_dbg(codec->dev, "%s: reg = %#x, val = %#x, mask = %#x\n",
+		__func__, reg, val, mask);
+
+	snd_soc_write(codec, reg, val);
+	while (timeout--) {
+		msleep(10);
+		val = snd_soc_read(codec, WM8995_DC_SERVO_READBACK_0);
+		if ((val & mask) == mask)
+			return;
+	}
+
+	dev_err(codec->dev, "Timed out waiting for DC Servo\n");
+}
+
+static int hp_event(struct snd_soc_dapm_widget *w,
+		    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+	unsigned int reg;
+
+	codec = w->codec;
+	reg = snd_soc_read(codec, WM8995_ANALOGUE_HP_1);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1,
+				    WM8995_CP_ENA_MASK, WM8995_CP_ENA);
+
+		msleep(5);
+
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_HPOUT1L_ENA_MASK |
+				    WM8995_HPOUT1R_ENA_MASK,
+				    WM8995_HPOUT1L_ENA | WM8995_HPOUT1R_ENA);
+
+		udelay(20);
+
+		reg |= WM8995_HPOUT1L_DLY | WM8995_HPOUT1R_DLY;
+		snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg);
+
+		snd_soc_write(codec, WM8995_DC_SERVO_1, WM8995_DCS_ENA_CHAN_0 |
+			      WM8995_DCS_ENA_CHAN_1);
+
+		dc_servo_cmd(codec, WM8995_DC_SERVO_2,
+			     WM8995_DCS_TRIG_STARTUP_0 |
+			     WM8995_DCS_TRIG_STARTUP_1,
+			     WM8995_DCS_TRIG_DAC_WR_0 |
+			     WM8995_DCS_TRIG_DAC_WR_1);
+
+		reg |= WM8995_HPOUT1R_OUTP | WM8995_HPOUT1R_RMV_SHORT |
+		       WM8995_HPOUT1L_OUTP | WM8995_HPOUT1L_RMV_SHORT;
+		snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg);
+
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
+				    WM8995_HPOUT1L_OUTP_MASK |
+				    WM8995_HPOUT1R_OUTP_MASK |
+				    WM8995_HPOUT1L_RMV_SHORT_MASK |
+				    WM8995_HPOUT1R_RMV_SHORT_MASK, 0);
+
+		snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
+				    WM8995_HPOUT1L_DLY_MASK |
+				    WM8995_HPOUT1R_DLY_MASK, 0);
+
+		snd_soc_write(codec, WM8995_DC_SERVO_1, 0);
+
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_HPOUT1L_ENA_MASK |
+				    WM8995_HPOUT1R_ENA_MASK,
+				    0);
+		break;
+	}
+
+	return 0;
+}
+
+static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
+{
+	struct wm8995_priv *wm8995;
+	int rate;
+	int reg1 = 0;
+	int offset;
+
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	if (aif)
+		offset = 4;
+	else
+		offset = 0;
+
+	switch (wm8995->sysclk[aif]) {
+	case WM8995_SYSCLK_MCLK1:
+		rate = wm8995->mclk[0];
+		break;
+	case WM8995_SYSCLK_MCLK2:
+		reg1 |= 0x8;
+		rate = wm8995->mclk[1];
+		break;
+	case WM8995_SYSCLK_FLL1:
+		reg1 |= 0x10;
+		rate = wm8995->fll[0].out;
+		break;
+	case WM8995_SYSCLK_FLL2:
+		reg1 |= 0x18;
+		rate = wm8995->fll[1].out;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (rate >= 13500000) {
+		rate /= 2;
+		reg1 |= WM8995_AIF1CLK_DIV;
+
+		dev_dbg(codec->dev, "Dividing AIF%d clock to %dHz\n",
+			aif + 1, rate);
+	}
+
+	wm8995->aifclk[aif] = rate;
+
+	snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1 + offset,
+			    WM8995_AIF1CLK_SRC_MASK | WM8995_AIF1CLK_DIV_MASK,
+			    reg1);
+	return 0;
+}
+
+static int configure_clock(struct snd_soc_codec *codec)
+{
+	struct wm8995_priv *wm8995;
+	int old, new;
+
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	/* Bring up the AIF clocks first */
+	configure_aif_clock(codec, 0);
+	configure_aif_clock(codec, 1);
+
+	/*
+	 * Then switch CLK_SYS over to the higher of them; a change
+	 * can only happen as a result of a clocking change which can
+	 * only be made outside of DAPM so we can safely redo the
+	 * clocking.
+	 */
+
+	/* If they're equal it doesn't matter which is used */
+	if (wm8995->aifclk[0] == wm8995->aifclk[1])
+		return 0;
+
+	if (wm8995->aifclk[0] < wm8995->aifclk[1])
+		new = WM8995_SYSCLK_SRC;
+	else
+		new = 0;
+
+	old = snd_soc_read(codec, WM8995_CLOCKING_1) & WM8995_SYSCLK_SRC;
+
+	/* If there's no change then we're done. */
+	if (old == new)
+		return 0;
+
+	snd_soc_update_bits(codec, WM8995_CLOCKING_1,
+			    WM8995_SYSCLK_SRC_MASK, new);
+
+	snd_soc_dapm_sync(&codec->dapm);
+
+	return 0;
+}
+
+static int clk_sys_event(struct snd_soc_dapm_widget *w,
+			 struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+
+	codec = w->codec;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		return configure_clock(codec);
+
+	case SND_SOC_DAPM_POST_PMD:
+		configure_clock(codec);
+		break;
+	}
+
+	return 0;
+}
+
+static const char *sidetone_text[] = {
+	"ADC/DMIC1", "DMIC2",
+};
+
+static const struct soc_enum sidetone1_enum =
+	SOC_ENUM_SINGLE(WM8995_SIDETONE, 0, 2, sidetone_text);
+
+static const struct snd_kcontrol_new sidetone1_mux =
+	SOC_DAPM_ENUM("Left Sidetone Mux", sidetone1_enum);
+
+static const struct soc_enum sidetone2_enum =
+	SOC_ENUM_SINGLE(WM8995_SIDETONE, 1, 2, sidetone_text);
+
+static const struct snd_kcontrol_new sidetone2_mux =
+	SOC_DAPM_ENUM("Right Sidetone Mux", sidetone2_enum);
+
+static const struct snd_kcontrol_new aif1adc1l_mix[] = {
+	SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif1adc1r_mix[] = {
+	SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif1adc2l_mix[] = {
+	SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif1adc2r_mix[] = {
+	SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new dac1l_mix[] = {
+	WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		5, 1, 0),
+	WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		4, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		2, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new dac1r_mix[] = {
+	WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		5, 1, 0),
+	WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		4, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		2, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif2dac2l_mix[] = {
+	SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		5, 1, 0),
+	SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		4, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		2, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif2dac2r_mix[] = {
+	SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		5, 1, 0),
+	SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		4, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		2, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new in1l_pga =
+	SOC_DAPM_SINGLE("IN1L Switch", WM8995_POWER_MANAGEMENT_2, 5, 1, 0);
+
+static const struct snd_kcontrol_new in1r_pga =
+	SOC_DAPM_SINGLE("IN1R Switch", WM8995_POWER_MANAGEMENT_2, 4, 1, 0);
+
+static const char *adc_mux_text[] = {
+	"ADC",
+	"DMIC",
+};
+
+static const struct soc_enum adc_enum =
+	SOC_ENUM_SINGLE(0, 0, 2, adc_mux_text);
+
+static const struct snd_kcontrol_new adcl_mux =
+	SOC_DAPM_ENUM_VIRT("ADCL Mux", adc_enum);
+
+static const struct snd_kcontrol_new adcr_mux =
+	SOC_DAPM_ENUM_VIRT("ADCR Mux", adc_enum);
+
+static const char *spk_src_text[] = {
+	"DAC1L", "DAC1R", "DAC2L", "DAC2R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(spk1l_src_enum, WM8995_LEFT_PDM_SPEAKER_1,
+				  0, spk_src_text);
+static const SOC_ENUM_SINGLE_DECL(spk1r_src_enum, WM8995_RIGHT_PDM_SPEAKER_1,
+				  0, spk_src_text);
+static const SOC_ENUM_SINGLE_DECL(spk2l_src_enum, WM8995_LEFT_PDM_SPEAKER_2,
+				  0, spk_src_text);
+static const SOC_ENUM_SINGLE_DECL(spk2r_src_enum, WM8995_RIGHT_PDM_SPEAKER_2,
+				  0, spk_src_text);
+
+static const struct snd_kcontrol_new spk1l_mux =
+	SOC_DAPM_ENUM("SPK1L SRC", spk1l_src_enum);
+static const struct snd_kcontrol_new spk1r_mux =
+	SOC_DAPM_ENUM("SPK1R SRC", spk1r_src_enum);
+static const struct snd_kcontrol_new spk2l_mux =
+	SOC_DAPM_ENUM("SPK2L SRC", spk2l_src_enum);
+static const struct snd_kcontrol_new spk2r_mux =
+	SOC_DAPM_ENUM("SPK2R SRC", spk2r_src_enum);
+
+static const struct snd_soc_dapm_widget wm8995_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("DMIC1DAT"),
+	SND_SOC_DAPM_INPUT("DMIC2DAT"),
+
+	SND_SOC_DAPM_INPUT("IN1L"),
+	SND_SOC_DAPM_INPUT("IN1R"),
+
+	SND_SOC_DAPM_MIXER("IN1L PGA", SND_SOC_NOPM, 0, 0,
+		&in1l_pga, 1),
+	SND_SOC_DAPM_MIXER("IN1R PGA", SND_SOC_NOPM, 0, 0,
+		&in1r_pga, 1),
+
+	SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8995_POWER_MANAGEMENT_1, 8, 0),
+	SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8995_POWER_MANAGEMENT_1, 9, 0),
+
+	SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8995_AIF1_CLOCKING_1, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8995_AIF2_CLOCKING_1, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8995_CLOCKING_1, 3, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8995_CLOCKING_1, 2, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("SYSDSPCLK", WM8995_CLOCKING_1, 1, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture", 0,
+		WM8995_POWER_MANAGEMENT_3, 9, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture", 0,
+		WM8995_POWER_MANAGEMENT_3, 8, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0,
+	SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
+		0, WM8995_POWER_MANAGEMENT_3, 11, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
+		0, WM8995_POWER_MANAGEMENT_3, 10, 0),
+
+	SND_SOC_DAPM_VIRT_MUX("ADCL Mux", SND_SOC_NOPM, 1, 0,
+		&adcl_mux),
+	SND_SOC_DAPM_VIRT_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0,
+		&adcr_mux),
+
+	SND_SOC_DAPM_ADC("DMIC2L", NULL, WM8995_POWER_MANAGEMENT_3, 5, 0),
+	SND_SOC_DAPM_ADC("DMIC2R", NULL, WM8995_POWER_MANAGEMENT_3, 4, 0),
+	SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8995_POWER_MANAGEMENT_3, 3, 0),
+	SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8995_POWER_MANAGEMENT_3, 2, 0),
+
+	SND_SOC_DAPM_ADC("ADCL", NULL, WM8995_POWER_MANAGEMENT_3, 1, 0),
+	SND_SOC_DAPM_ADC("ADCR", NULL, WM8995_POWER_MANAGEMENT_3, 0, 0),
+
+	SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)),
+	SND_SOC_DAPM_MIXER("AIF1ADC1R Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc1r_mix, ARRAY_SIZE(aif1adc1r_mix)),
+	SND_SOC_DAPM_MIXER("AIF1ADC2L Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc2l_mix, ARRAY_SIZE(aif1adc2l_mix)),
+	SND_SOC_DAPM_MIXER("AIF1ADC2R Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc2r_mix, ARRAY_SIZE(aif1adc2r_mix)),
+
+	SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		9, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		8, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM,
+		0, 0),
+
+	SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		11, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		10, 0),
+
+	SND_SOC_DAPM_MIXER("AIF2DAC2L Mixer", SND_SOC_NOPM, 0, 0,
+		aif2dac2l_mix, ARRAY_SIZE(aif2dac2l_mix)),
+	SND_SOC_DAPM_MIXER("AIF2DAC2R Mixer", SND_SOC_NOPM, 0, 0,
+		aif2dac2r_mix, ARRAY_SIZE(aif2dac2r_mix)),
+
+	SND_SOC_DAPM_DAC("DAC2L", NULL, WM8995_POWER_MANAGEMENT_4, 3, 0),
+	SND_SOC_DAPM_DAC("DAC2R", NULL, WM8995_POWER_MANAGEMENT_4, 2, 0),
+	SND_SOC_DAPM_DAC("DAC1L", NULL, WM8995_POWER_MANAGEMENT_4, 1, 0),
+	SND_SOC_DAPM_DAC("DAC1R", NULL, WM8995_POWER_MANAGEMENT_4, 0, 0),
+
+	SND_SOC_DAPM_MIXER("DAC1L Mixer", SND_SOC_NOPM, 0, 0, dac1l_mix,
+		ARRAY_SIZE(dac1l_mix)),
+	SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0, dac1r_mix,
+		ARRAY_SIZE(dac1r_mix)),
+
+	SND_SOC_DAPM_MUX("Left Sidetone", SND_SOC_NOPM, 0, 0, &sidetone1_mux),
+	SND_SOC_DAPM_MUX("Right Sidetone", SND_SOC_NOPM, 0, 0, &sidetone2_mux),
+
+	SND_SOC_DAPM_PGA_E("Headphone PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_SUPPLY("Headphone Supply", SND_SOC_NOPM, 0, 0,
+		hp_supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_MUX("SPK1L Driver", WM8995_LEFT_PDM_SPEAKER_1,
+		4, 0, &spk1l_mux),
+	SND_SOC_DAPM_MUX("SPK1R Driver", WM8995_RIGHT_PDM_SPEAKER_1,
+		4, 0, &spk1r_mux),
+	SND_SOC_DAPM_MUX("SPK2L Driver", WM8995_LEFT_PDM_SPEAKER_2,
+		4, 0, &spk2l_mux),
+	SND_SOC_DAPM_MUX("SPK2R Driver", WM8995_RIGHT_PDM_SPEAKER_2,
+		4, 0, &spk2r_mux),
+
+	SND_SOC_DAPM_SUPPLY("LDO2", WM8995_POWER_MANAGEMENT_2, 1, 0, NULL, 0),
+
+	SND_SOC_DAPM_OUTPUT("HP1L"),
+	SND_SOC_DAPM_OUTPUT("HP1R"),
+	SND_SOC_DAPM_OUTPUT("SPK1L"),
+	SND_SOC_DAPM_OUTPUT("SPK1R"),
+	SND_SOC_DAPM_OUTPUT("SPK2L"),
+	SND_SOC_DAPM_OUTPUT("SPK2R")
+};
+
+static const struct snd_soc_dapm_route wm8995_intercon[] = {
+	{ "CLK_SYS", NULL, "AIF1CLK", check_clk_sys },
+	{ "CLK_SYS", NULL, "AIF2CLK", check_clk_sys },
+
+	{ "DSP1CLK", NULL, "CLK_SYS" },
+	{ "DSP2CLK", NULL, "CLK_SYS" },
+	{ "SYSDSPCLK", NULL, "CLK_SYS" },
+
+	{ "AIF1ADC1L", NULL, "AIF1CLK" },
+	{ "AIF1ADC1L", NULL, "DSP1CLK" },
+	{ "AIF1ADC1R", NULL, "AIF1CLK" },
+	{ "AIF1ADC1R", NULL, "DSP1CLK" },
+	{ "AIF1ADC1R", NULL, "SYSDSPCLK" },
+
+	{ "AIF1ADC2L", NULL, "AIF1CLK" },
+	{ "AIF1ADC2L", NULL, "DSP1CLK" },
+	{ "AIF1ADC2R", NULL, "AIF1CLK" },
+	{ "AIF1ADC2R", NULL, "DSP1CLK" },
+	{ "AIF1ADC2R", NULL, "SYSDSPCLK" },
+
+	{ "DMIC1L", NULL, "DMIC1DAT" },
+	{ "DMIC1L", NULL, "CLK_SYS" },
+	{ "DMIC1R", NULL, "DMIC1DAT" },
+	{ "DMIC1R", NULL, "CLK_SYS" },
+	{ "DMIC2L", NULL, "DMIC2DAT" },
+	{ "DMIC2L", NULL, "CLK_SYS" },
+	{ "DMIC2R", NULL, "DMIC2DAT" },
+	{ "DMIC2R", NULL, "CLK_SYS" },
+
+	{ "ADCL", NULL, "AIF1CLK" },
+	{ "ADCL", NULL, "DSP1CLK" },
+	{ "ADCL", NULL, "SYSDSPCLK" },
+
+	{ "ADCR", NULL, "AIF1CLK" },
+	{ "ADCR", NULL, "DSP1CLK" },
+	{ "ADCR", NULL, "SYSDSPCLK" },
+
+	{ "IN1L PGA", "IN1L Switch", "IN1L" },
+	{ "IN1R PGA", "IN1R Switch", "IN1R" },
+	{ "IN1L PGA", NULL, "LDO2" },
+	{ "IN1R PGA", NULL, "LDO2" },
+
+	{ "ADCL", NULL, "IN1L PGA" },
+	{ "ADCR", NULL, "IN1R PGA" },
+
+	{ "ADCL Mux", "ADC", "ADCL" },
+	{ "ADCL Mux", "DMIC", "DMIC1L" },
+	{ "ADCR Mux", "ADC", "ADCR" },
+	{ "ADCR Mux", "DMIC", "DMIC1R" },
+
+	/* AIF1 outputs */
+	{ "AIF1ADC1L", NULL, "AIF1ADC1L Mixer" },
+	{ "AIF1ADC1L Mixer", "ADC/DMIC Switch", "ADCL Mux" },
+
+	{ "AIF1ADC1R", NULL, "AIF1ADC1R Mixer" },
+	{ "AIF1ADC1R Mixer", "ADC/DMIC Switch", "ADCR Mux" },
+
+	{ "AIF1ADC2L", NULL, "AIF1ADC2L Mixer" },
+	{ "AIF1ADC2L Mixer", "DMIC Switch", "DMIC2L" },
+
+	{ "AIF1ADC2R", NULL, "AIF1ADC2R Mixer" },
+	{ "AIF1ADC2R Mixer", "DMIC Switch", "DMIC2R" },
+
+	/* Sidetone */
+	{ "Left Sidetone", "ADC/DMIC1", "AIF1ADC1L" },
+	{ "Left Sidetone", "DMIC2", "AIF1ADC2L" },
+	{ "Right Sidetone", "ADC/DMIC1", "AIF1ADC1R" },
+	{ "Right Sidetone", "DMIC2", "AIF1ADC2R" },
+
+	{ "AIF1DAC1L", NULL, "AIF1CLK" },
+	{ "AIF1DAC1L", NULL, "DSP1CLK" },
+	{ "AIF1DAC1R", NULL, "AIF1CLK" },
+	{ "AIF1DAC1R", NULL, "DSP1CLK" },
+	{ "AIF1DAC1R", NULL, "SYSDSPCLK" },
+
+	{ "AIF1DAC2L", NULL, "AIF1CLK" },
+	{ "AIF1DAC2L", NULL, "DSP1CLK" },
+	{ "AIF1DAC2R", NULL, "AIF1CLK" },
+	{ "AIF1DAC2R", NULL, "DSP1CLK" },
+	{ "AIF1DAC2R", NULL, "SYSDSPCLK" },
+
+	{ "DAC1L", NULL, "AIF1CLK" },
+	{ "DAC1L", NULL, "DSP1CLK" },
+	{ "DAC1L", NULL, "SYSDSPCLK" },
+
+	{ "DAC1R", NULL, "AIF1CLK" },
+	{ "DAC1R", NULL, "DSP1CLK" },
+	{ "DAC1R", NULL, "SYSDSPCLK" },
+
+	{ "AIF1DAC1L", NULL, "AIF1DACDAT" },
+	{ "AIF1DAC1R", NULL, "AIF1DACDAT" },
+	{ "AIF1DAC2L", NULL, "AIF1DACDAT" },
+	{ "AIF1DAC2R", NULL, "AIF1DACDAT" },
+
+	/* DAC1 inputs */
+	{ "DAC1L", NULL, "DAC1L Mixer" },
+	{ "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
+	{ "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
+	{ "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
+	{ "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
+
+	{ "DAC1R", NULL, "DAC1R Mixer" },
+	{ "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
+	{ "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
+	{ "DAC1R Mixer", "Left Sidetone Switch", "Left Sidetone" },
+	{ "DAC1R Mixer", "Right Sidetone Switch", "Right Sidetone" },
+
+	/* DAC2/AIF2 outputs */
+	{ "DAC2L", NULL, "AIF2DAC2L Mixer" },
+	{ "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
+	{ "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
+
+	{ "DAC2R", NULL, "AIF2DAC2R Mixer" },
+	{ "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
+	{ "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
+
+	/* Output stages */
+	{ "Headphone PGA", NULL, "DAC1L" },
+	{ "Headphone PGA", NULL, "DAC1R" },
+
+	{ "Headphone PGA", NULL, "DAC2L" },
+	{ "Headphone PGA", NULL, "DAC2R" },
+
+	{ "Headphone PGA", NULL, "Headphone Supply" },
+	{ "Headphone PGA", NULL, "CLK_SYS" },
+	{ "Headphone PGA", NULL, "LDO2" },
+
+	{ "HP1L", NULL, "Headphone PGA" },
+	{ "HP1R", NULL, "Headphone PGA" },
+
+	{ "SPK1L Driver", "DAC1L", "DAC1L" },
+	{ "SPK1L Driver", "DAC1R", "DAC1R" },
+	{ "SPK1L Driver", "DAC2L", "DAC2L" },
+	{ "SPK1L Driver", "DAC2R", "DAC2R" },
+	{ "SPK1L Driver", NULL, "CLK_SYS" },
+
+	{ "SPK1R Driver", "DAC1L", "DAC1L" },
+	{ "SPK1R Driver", "DAC1R", "DAC1R" },
+	{ "SPK1R Driver", "DAC2L", "DAC2L" },
+	{ "SPK1R Driver", "DAC2R", "DAC2R" },
+	{ "SPK1R Driver", NULL, "CLK_SYS" },
+
+	{ "SPK2L Driver", "DAC1L", "DAC1L" },
+	{ "SPK2L Driver", "DAC1R", "DAC1R" },
+	{ "SPK2L Driver", "DAC2L", "DAC2L" },
+	{ "SPK2L Driver", "DAC2R", "DAC2R" },
+	{ "SPK2L Driver", NULL, "CLK_SYS" },
+
+	{ "SPK2R Driver", "DAC1L", "DAC1L" },
+	{ "SPK2R Driver", "DAC1R", "DAC1R" },
+	{ "SPK2R Driver", "DAC2L", "DAC2L" },
+	{ "SPK2R Driver", "DAC2R", "DAC2R" },
+	{ "SPK2R Driver", NULL, "CLK_SYS" },
+
+	{ "SPK1L", NULL, "SPK1L Driver" },
+	{ "SPK1R", NULL, "SPK1R Driver" },
+	{ "SPK2L", NULL, "SPK2L Driver" },
+	{ "SPK2R", NULL, "SPK2R Driver" }
+};
+
+static int wm8995_volatile(unsigned int reg)
+{
+	/* out of bounds registers are generally considered
+	 * volatile to support register banks that are partially
+	 * owned by something else for e.g. a DSP
+	 */
+	if (reg > WM8995_MAX_CACHED_REGISTER)
+		return 1;
+
+	switch (reg) {
+	case WM8995_SOFTWARE_RESET:
+	case WM8995_DC_SERVO_READBACK_0:
+	case WM8995_INTERRUPT_STATUS_1:
+	case WM8995_INTERRUPT_STATUS_2:
+	case WM8995_INTERRUPT_STATUS_1_MASK:
+	case WM8995_INTERRUPT_STATUS_2_MASK:
+	case WM8995_INTERRUPT_CONTROL:
+	case WM8995_ACCESSORY_DETECT_MODE1:
+	case WM8995_ACCESSORY_DETECT_MODE2:
+	case WM8995_HEADPHONE_DETECT1:
+	case WM8995_HEADPHONE_DETECT2:
+		return 1;
+	}
+
+	return 0;
+}
+
+static int wm8995_aif_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	int mute_reg;
+
+	switch (dai->id) {
+	case 0:
+		mute_reg = WM8995_AIF1_DAC1_FILTERS_1;
+		break;
+	case 1:
+		mute_reg = WM8995_AIF2_DAC_FILTERS_1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, mute_reg, WM8995_AIF1DAC1_MUTE_MASK,
+			    !!mute << WM8995_AIF1DAC1_MUTE_SHIFT);
+	return 0;
+}
+
+static int wm8995_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec;
+	int master;
+	int aif;
+
+	codec = dai->codec;
+
+	master = 0;
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:
+		master = WM8995_AIF1_MSTR;
+		break;
+	default:
+		dev_err(dai->dev, "Unknown master/slave configuration\n");
+		return -EINVAL;
+	}
+
+	aif = 0;
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_DSP_B:
+		aif |= WM8995_AIF1_LRCLK_INV;
+	case SND_SOC_DAIFMT_DSP_A:
+		aif |= (0x3 << WM8995_AIF1_FMT_SHIFT);
+		break;
+	case SND_SOC_DAIFMT_I2S:
+		aif |= (0x2 << WM8995_AIF1_FMT_SHIFT);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		aif |= (0x1 << WM8995_AIF1_FMT_SHIFT);
+		break;
+	default:
+		dev_err(dai->dev, "Unknown dai format\n");
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_DSP_A:
+	case SND_SOC_DAIFMT_DSP_B:
+		/* frame inversion not valid for DSP modes */
+		switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+		case SND_SOC_DAIFMT_NB_NF:
+			break;
+		case SND_SOC_DAIFMT_IB_NF:
+			aif |= WM8995_AIF1_BCLK_INV;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_RIGHT_J:
+	case SND_SOC_DAIFMT_LEFT_J:
+		switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+		case SND_SOC_DAIFMT_NB_NF:
+			break;
+		case SND_SOC_DAIFMT_IB_IF:
+			aif |= WM8995_AIF1_BCLK_INV | WM8995_AIF1_LRCLK_INV;
+			break;
+		case SND_SOC_DAIFMT_IB_NF:
+			aif |= WM8995_AIF1_BCLK_INV;
+			break;
+		case SND_SOC_DAIFMT_NB_IF:
+			aif |= WM8995_AIF1_LRCLK_INV;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8995_AIF1_CONTROL_1,
+			    WM8995_AIF1_BCLK_INV_MASK |
+			    WM8995_AIF1_LRCLK_INV_MASK |
+			    WM8995_AIF1_FMT_MASK, aif);
+	snd_soc_update_bits(codec, WM8995_AIF1_MASTER_SLAVE,
+			    WM8995_AIF1_MSTR_MASK, master);
+	return 0;
+}
+
+static const int srs[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100,
+	48000, 88200, 96000
+};
+
+static const int fs_ratios[] = {
+	-1 /* reserved */,
+	128, 192, 256, 384, 512, 768, 1024, 1408, 1536
+};
+
+static const int bclk_divs[] = {
+	10, 15, 20, 30, 40, 55, 60, 80, 110, 120, 160, 220, 240, 320, 440, 480
+};
+
+static int wm8995_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+	int aif1_reg;
+	int bclk_reg;
+	int lrclk_reg;
+	int rate_reg;
+	int bclk_rate;
+	int aif1;
+	int lrclk, bclk;
+	int i, rate_val, best, best_val, cur_val;
+
+	codec = dai->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	switch (dai->id) {
+	case 0:
+		aif1_reg = WM8995_AIF1_CONTROL_1;
+		bclk_reg = WM8995_AIF1_BCLK;
+		rate_reg = WM8995_AIF1_RATE;
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* ||
+			wm8995->lrclk_shared[0] */) {
+			lrclk_reg = WM8995_AIF1DAC_LRCLK;
+		} else {
+			lrclk_reg = WM8995_AIF1ADC_LRCLK;
+			dev_dbg(codec->dev, "AIF1 using split LRCLK\n");
+		}
+		break;
+	case 1:
+		aif1_reg = WM8995_AIF2_CONTROL_1;
+		bclk_reg = WM8995_AIF2_BCLK;
+		rate_reg = WM8995_AIF2_RATE;
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* ||
+		    wm8995->lrclk_shared[1] */) {
+			lrclk_reg = WM8995_AIF2DAC_LRCLK;
+		} else {
+			lrclk_reg = WM8995_AIF2ADC_LRCLK;
+			dev_dbg(codec->dev, "AIF2 using split LRCLK\n");
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	bclk_rate = snd_soc_params_to_bclk(params);
+	if (bclk_rate < 0)
+		return bclk_rate;
+
+	aif1 = 0;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		aif1 |= (0x1 << WM8995_AIF1_WL_SHIFT);
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		aif1 |= (0x2 << WM8995_AIF1_WL_SHIFT);
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		aif1 |= (0x3 << WM8995_AIF1_WL_SHIFT);
+		break;
+	default:
+		dev_err(dai->dev, "Unsupported word length %u\n",
+			params_format(params));
+		return -EINVAL;
+	}
+
+	/* try to find a suitable sample rate */
+	for (i = 0; i < ARRAY_SIZE(srs); ++i)
+		if (srs[i] == params_rate(params))
+			break;
+	if (i == ARRAY_SIZE(srs)) {
+		dev_err(dai->dev, "Sample rate %d is not supported\n",
+			params_rate(params));
+		return -EINVAL;
+	}
+	rate_val = i << WM8995_AIF1_SR_SHIFT;
+
+	dev_dbg(dai->dev, "Sample rate is %dHz\n", srs[i]);
+	dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n",
+		dai->id + 1, wm8995->aifclk[dai->id], bclk_rate);
+
+	/* AIFCLK/fs ratio; look for a close match in either direction */
+	best = 1;
+	best_val = abs((fs_ratios[1] * params_rate(params))
+		       - wm8995->aifclk[dai->id]);
+	for (i = 2; i < ARRAY_SIZE(fs_ratios); i++) {
+		cur_val = abs((fs_ratios[i] * params_rate(params))
+			      - wm8995->aifclk[dai->id]);
+		if (cur_val >= best_val)
+			continue;
+		best = i;
+		best_val = cur_val;
+	}
+	rate_val |= best;
+
+	dev_dbg(dai->dev, "Selected AIF%dCLK/fs = %d\n",
+		dai->id + 1, fs_ratios[best]);
+
+	/*
+	 * We may not get quite the right frequency if using
+	 * approximate clocks so look for the closest match that is
+	 * higher than the target (we need to ensure that there enough
+	 * BCLKs to clock out the samples).
+	 */
+	best = 0;
+	bclk = 0;
+	for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) {
+		cur_val = (wm8995->aifclk[dai->id] * 10 / bclk_divs[i]) - bclk_rate;
+		if (cur_val < 0) /* BCLK table is sorted */
+			break;
+		best = i;
+	}
+	bclk |= best << WM8995_AIF1_BCLK_DIV_SHIFT;
+
+	bclk_rate = wm8995->aifclk[dai->id] * 10 / bclk_divs[best];
+	dev_dbg(dai->dev, "Using BCLK_DIV %d for actual BCLK %dHz\n",
+		bclk_divs[best], bclk_rate);
+
+	lrclk = bclk_rate / params_rate(params);
+	dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
+		lrclk, bclk_rate / lrclk);
+
+	snd_soc_update_bits(codec, aif1_reg,
+			    WM8995_AIF1_WL_MASK, aif1);
+	snd_soc_update_bits(codec, bclk_reg,
+			    WM8995_AIF1_BCLK_DIV_MASK, bclk);
+	snd_soc_update_bits(codec, lrclk_reg,
+			    WM8995_AIF1DAC_RATE_MASK, lrclk);
+	snd_soc_update_bits(codec, rate_reg,
+			    WM8995_AIF1_SR_MASK |
+			    WM8995_AIF1CLK_RATE_MASK, rate_val);
+	return 0;
+}
+
+static int wm8995_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	int reg, val, mask;
+
+	switch (codec_dai->id) {
+	case 0:
+		reg = WM8995_AIF1_MASTER_SLAVE;
+		mask = WM8995_AIF1_TRI;
+		break;
+	case 1:
+		reg = WM8995_AIF2_MASTER_SLAVE;
+		mask = WM8995_AIF2_TRI;
+		break;
+	case 2:
+		reg = WM8995_POWER_MANAGEMENT_5;
+		mask = WM8995_AIF3_TRI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (tristate)
+		val = mask;
+	else
+		val = 0;
+
+	return snd_soc_update_bits(codec, reg, mask, val);
+}
+
+/* The size in bits of the FLL divide multiplied by 10
+ * to allow rounding later */
+#define FIXED_FLL_SIZE ((1 << 16) * 10)
+
+struct fll_div {
+	u16 outdiv;
+	u16 n;
+	u16 k;
+	u16 clk_ref_div;
+	u16 fll_fratio;
+};
+
+static int wm8995_get_fll_config(struct fll_div *fll,
+				 int freq_in, int freq_out)
+{
+	u64 Kpart;
+	unsigned int K, Ndiv, Nmod;
+
+	pr_debug("FLL input=%dHz, output=%dHz\n", freq_in, freq_out);
+
+	/* Scale the input frequency down to <= 13.5MHz */
+	fll->clk_ref_div = 0;
+	while (freq_in > 13500000) {
+		fll->clk_ref_div++;
+		freq_in /= 2;
+
+		if (fll->clk_ref_div > 3)
+			return -EINVAL;
+	}
+	pr_debug("CLK_REF_DIV=%d, Fref=%dHz\n", fll->clk_ref_div, freq_in);
+
+	/* Scale the output to give 90MHz<=Fvco<=100MHz */
+	fll->outdiv = 3;
+	while (freq_out * (fll->outdiv + 1) < 90000000) {
+		fll->outdiv++;
+		if (fll->outdiv > 63)
+			return -EINVAL;
+	}
+	freq_out *= fll->outdiv + 1;
+	pr_debug("OUTDIV=%d, Fvco=%dHz\n", fll->outdiv, freq_out);
+
+	if (freq_in > 1000000) {
+		fll->fll_fratio = 0;
+	} else if (freq_in > 256000) {
+		fll->fll_fratio = 1;
+		freq_in *= 2;
+	} else if (freq_in > 128000) {
+		fll->fll_fratio = 2;
+		freq_in *= 4;
+	} else if (freq_in > 64000) {
+		fll->fll_fratio = 3;
+		freq_in *= 8;
+	} else {
+		fll->fll_fratio = 4;
+		freq_in *= 16;
+	}
+	pr_debug("FLL_FRATIO=%d, Fref=%dHz\n", fll->fll_fratio, freq_in);
+
+	/* Now, calculate N.K */
+	Ndiv = freq_out / freq_in;
+
+	fll->n = Ndiv;
+	Nmod = freq_out % freq_in;
+	pr_debug("Nmod=%d\n", Nmod);
+
+	/* Calculate fractional part - scale up so we can round. */
+	Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+
+	do_div(Kpart, freq_in);
+
+	K = Kpart & 0xFFFFFFFF;
+
+	if ((K % 10) >= 5)
+		K += 5;
+
+	/* Move down to proper range now rounding is done */
+	fll->k = K / 10;
+
+	pr_debug("N=%x K=%x\n", fll->n, fll->k);
+
+	return 0;
+}
+
+static int wm8995_set_fll(struct snd_soc_dai *dai, int id,
+			  int src, unsigned int freq_in,
+			  unsigned int freq_out)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+	int reg_offset, ret;
+	struct fll_div fll;
+	u16 reg, aif1, aif2;
+
+	codec = dai->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	aif1 = snd_soc_read(codec, WM8995_AIF1_CLOCKING_1)
+	       & WM8995_AIF1CLK_ENA;
+
+	aif2 = snd_soc_read(codec, WM8995_AIF2_CLOCKING_1)
+	       & WM8995_AIF2CLK_ENA;
+
+	switch (id) {
+	case WM8995_FLL1:
+		reg_offset = 0;
+		id = 0;
+		break;
+	case WM8995_FLL2:
+		reg_offset = 0x20;
+		id = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (src) {
+	case 0:
+		/* Allow no source specification when stopping */
+		if (freq_out)
+			return -EINVAL;
+		break;
+	case WM8995_FLL_SRC_MCLK1:
+	case WM8995_FLL_SRC_MCLK2:
+	case WM8995_FLL_SRC_LRCLK:
+	case WM8995_FLL_SRC_BCLK:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Are we changing anything? */
+	if (wm8995->fll[id].src == src &&
+	    wm8995->fll[id].in == freq_in && wm8995->fll[id].out == freq_out)
+		return 0;
+
+	/* If we're stopping the FLL redo the old config - no
+	 * registers will actually be written but we avoid GCC flow
+	 * analysis bugs spewing warnings.
+	 */
+	if (freq_out)
+		ret = wm8995_get_fll_config(&fll, freq_in, freq_out);
+	else
+		ret = wm8995_get_fll_config(&fll, wm8995->fll[id].in,
+					    wm8995->fll[id].out);
+	if (ret < 0)
+		return ret;
+
+	/* Gate the AIF clocks while we reclock */
+	snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1,
+			    WM8995_AIF1CLK_ENA_MASK, 0);
+	snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1,
+			    WM8995_AIF2CLK_ENA_MASK, 0);
+
+	/* We always need to disable the FLL while reconfiguring */
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset,
+			    WM8995_FLL1_ENA_MASK, 0);
+
+	reg = (fll.outdiv << WM8995_FLL1_OUTDIV_SHIFT) |
+	      (fll.fll_fratio << WM8995_FLL1_FRATIO_SHIFT);
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_2 + reg_offset,
+			    WM8995_FLL1_OUTDIV_MASK |
+			    WM8995_FLL1_FRATIO_MASK, reg);
+
+	snd_soc_write(codec, WM8995_FLL1_CONTROL_3 + reg_offset, fll.k);
+
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_4 + reg_offset,
+			    WM8995_FLL1_N_MASK,
+			    fll.n << WM8995_FLL1_N_SHIFT);
+
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_5 + reg_offset,
+			    WM8995_FLL1_REFCLK_DIV_MASK |
+			    WM8995_FLL1_REFCLK_SRC_MASK,
+			    (fll.clk_ref_div << WM8995_FLL1_REFCLK_DIV_SHIFT) |
+			    (src - 1));
+
+	if (freq_out)
+		snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset,
+				    WM8995_FLL1_ENA_MASK, WM8995_FLL1_ENA);
+
+	wm8995->fll[id].in = freq_in;
+	wm8995->fll[id].out = freq_out;
+	wm8995->fll[id].src = src;
+
+	/* Enable any gated AIF clocks */
+	snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1,
+			    WM8995_AIF1CLK_ENA_MASK, aif1);
+	snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1,
+			    WM8995_AIF2CLK_ENA_MASK, aif2);
+
+	configure_clock(codec);
+
+	return 0;
+}
+
+static int wm8995_set_dai_sysclk(struct snd_soc_dai *dai,
+				 int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+
+	codec = dai->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	switch (dai->id) {
+	case 0:
+	case 1:
+		break;
+	default:
+		/* AIF3 shares clocking with AIF1/2 */
+		return -EINVAL;
+	}
+
+	switch (clk_id) {
+	case WM8995_SYSCLK_MCLK1:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1;
+		wm8995->mclk[0] = freq;
+		dev_dbg(dai->dev, "AIF%d using MCLK1 at %uHz\n",
+			dai->id + 1, freq);
+		break;
+	case WM8995_SYSCLK_MCLK2:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1;
+		wm8995->mclk[1] = freq;
+		dev_dbg(dai->dev, "AIF%d using MCLK2 at %uHz\n",
+			dai->id + 1, freq);
+		break;
+	case WM8995_SYSCLK_FLL1:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL1;
+		dev_dbg(dai->dev, "AIF%d using FLL1\n", dai->id + 1);
+		break;
+	case WM8995_SYSCLK_FLL2:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL2;
+		dev_dbg(dai->dev, "AIF%d using FLL2\n", dai->id + 1);
+		break;
+	case WM8995_SYSCLK_OPCLK:
+	default:
+		dev_err(dai->dev, "Unknown clock source %d\n", clk_id);
+		return -EINVAL;
+	}
+
+	configure_clock(codec);
+
+	return 0;
+}
+
+static int wm8995_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			ret = snd_soc_cache_sync(codec);
+			if (ret) {
+				dev_err(codec->dev,
+					"Failed to sync cache: %d\n", ret);
+				return ret;
+			}
+
+			snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+					    WM8995_BG_ENA_MASK, WM8995_BG_ENA);
+
+		}
+		break;
+	case SND_SOC_BIAS_OFF:
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_BG_ENA_MASK, 0);
+		break;
+	}
+
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8995_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8995_resume(struct snd_soc_codec *codec)
+{
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	return 0;
+}
+#else
+#define wm8995_suspend NULL
+#define wm8995_resume NULL
+#endif
+
+static int wm8995_remove(struct snd_soc_codec *codec)
+{
+	struct wm8995_priv *wm8995;
+	struct i2c_client *i2c;
+
+	i2c = container_of(codec->dev, struct i2c_client, dev);
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8995_probe(struct snd_soc_codec *codec)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	codec->dapm.idle_bias_off = 1;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	ret = snd_soc_codec_set_cache_io(codec, 16, 16, wm8995->control_type);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache i/o: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_read(codec, WM8995_SOFTWARE_RESET);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to read device ID: %d\n", ret);
+		return ret;
+	}
+
+	if (ret != 0x8995) {
+		dev_err(codec->dev, "Invalid device ID: %#x\n", ret);
+		return -EINVAL;
+	}
+
+	ret = snd_soc_write(codec, WM8995_SOFTWARE_RESET, 0);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+		return ret;
+	}
+
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* Latch volume updates (right only; we always do left then right). */
+	snd_soc_update_bits(codec, WM8995_AIF1_DAC1_RIGHT_VOLUME,
+			    WM8995_AIF1DAC1_VU_MASK, WM8995_AIF1DAC1_VU);
+	snd_soc_update_bits(codec, WM8995_AIF1_DAC2_RIGHT_VOLUME,
+			    WM8995_AIF1DAC2_VU_MASK, WM8995_AIF1DAC2_VU);
+	snd_soc_update_bits(codec, WM8995_AIF2_DAC_RIGHT_VOLUME,
+			    WM8995_AIF2DAC_VU_MASK, WM8995_AIF2DAC_VU);
+	snd_soc_update_bits(codec, WM8995_AIF1_ADC1_RIGHT_VOLUME,
+			    WM8995_AIF1ADC1_VU_MASK, WM8995_AIF1ADC1_VU);
+	snd_soc_update_bits(codec, WM8995_AIF1_ADC2_RIGHT_VOLUME,
+			    WM8995_AIF1ADC2_VU_MASK, WM8995_AIF1ADC2_VU);
+	snd_soc_update_bits(codec, WM8995_AIF2_ADC_RIGHT_VOLUME,
+			    WM8995_AIF2ADC_VU_MASK, WM8995_AIF1ADC2_VU);
+	snd_soc_update_bits(codec, WM8995_DAC1_RIGHT_VOLUME,
+			    WM8995_DAC1_VU_MASK, WM8995_DAC1_VU);
+	snd_soc_update_bits(codec, WM8995_DAC2_RIGHT_VOLUME,
+			    WM8995_DAC2_VU_MASK, WM8995_DAC2_VU);
+	snd_soc_update_bits(codec, WM8995_RIGHT_LINE_INPUT_1_VOLUME,
+			    WM8995_IN1_VU_MASK, WM8995_IN1_VU);
+
+	wm8995_update_class_w(codec);
+
+	snd_soc_add_controls(codec, wm8995_snd_controls,
+			     ARRAY_SIZE(wm8995_snd_controls));
+	snd_soc_dapm_new_controls(&codec->dapm, wm8995_dapm_widgets,
+				  ARRAY_SIZE(wm8995_dapm_widgets));
+	snd_soc_dapm_add_routes(&codec->dapm, wm8995_intercon,
+				ARRAY_SIZE(wm8995_intercon));
+
+	return 0;
+}
+
+#define WM8995_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8995_aif1_dai_ops = {
+	.set_sysclk = wm8995_set_dai_sysclk,
+	.set_fmt = wm8995_set_dai_fmt,
+	.hw_params = wm8995_hw_params,
+	.digital_mute = wm8995_aif_mute,
+	.set_pll = wm8995_set_fll,
+	.set_tristate = wm8995_set_tristate,
+};
+
+static struct snd_soc_dai_ops wm8995_aif2_dai_ops = {
+	.set_sysclk = wm8995_set_dai_sysclk,
+	.set_fmt = wm8995_set_dai_fmt,
+	.hw_params = wm8995_hw_params,
+	.digital_mute = wm8995_aif_mute,
+	.set_pll = wm8995_set_fll,
+	.set_tristate = wm8995_set_tristate,
+};
+
+static struct snd_soc_dai_ops wm8995_aif3_dai_ops = {
+	.set_tristate = wm8995_set_tristate,
+};
+
+static struct snd_soc_dai_driver wm8995_dai[] = {
+	{
+		.name = "wm8995-aif1",
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = WM8995_FORMATS
+		},
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = WM8995_FORMATS
+		},
+		.ops = &wm8995_aif1_dai_ops
+	},
+	{
+		.name = "wm8995-aif2",
+		.playback = {
+			.stream_name = "AIF2 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = WM8995_FORMATS
+		},
+		.capture = {
+			.stream_name = "AIF2 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = WM8995_FORMATS
+		},
+		.ops = &wm8995_aif2_dai_ops
+	},
+	{
+		.name = "wm8995-aif3",
+		.playback = {
+			.stream_name = "AIF3 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = WM8995_FORMATS
+		},
+		.capture = {
+			.stream_name = "AIF3 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = WM8995_FORMATS
+		},
+		.ops = &wm8995_aif3_dai_ops
+	}
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
+	.probe = wm8995_probe,
+	.remove = wm8995_remove,
+	.suspend = wm8995_suspend,
+	.resume = wm8995_resume,
+	.set_bias_level = wm8995_set_bias_level,
+	.reg_cache_size = ARRAY_SIZE(wm8995_reg_defs),
+	.reg_word_size = sizeof(u16),
+	.reg_cache_default = wm8995_reg_defs,
+	.volatile_register = wm8995_volatile,
+	.compress_type = SND_SOC_RBTREE_COMPRESSION
+};
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit wm8995_spi_probe(struct spi_device *spi)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
+	if (!wm8995)
+		return -ENOMEM;
+
+	wm8995->control_type = SND_SOC_SPI;
+	spi_set_drvdata(spi, wm8995);
+
+	ret = snd_soc_register_codec(&spi->dev,
+				     &soc_codec_dev_wm8995, wm8995_dai,
+				     ARRAY_SIZE(wm8995_dai));
+	if (ret < 0)
+		kfree(wm8995);
+	return ret;
+}
+
+static int __devexit wm8995_spi_remove(struct spi_device *spi)
+{
+	snd_soc_unregister_codec(&spi->dev);
+	kfree(spi_get_drvdata(spi));
+	return 0;
+}
+
+static struct spi_driver wm8995_spi_driver = {
+	.driver = {
+		.name = "wm8995",
+		.owner = THIS_MODULE,
+	},
+	.probe = wm8995_spi_probe,
+	.remove = __devexit_p(wm8995_spi_remove)
+};
+#endif
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8995_i2c_probe(struct i2c_client *i2c,
+				      const struct i2c_device_id *id)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
+	if (!wm8995)
+		return -ENOMEM;
+
+	wm8995->control_type = SND_SOC_I2C;
+	i2c_set_clientdata(i2c, wm8995);
+
+	ret = snd_soc_register_codec(&i2c->dev,
+				     &soc_codec_dev_wm8995, wm8995_dai,
+				     ARRAY_SIZE(wm8995_dai));
+	if (ret < 0)
+		kfree(wm8995);
+	return ret;
+}
+
+static __devexit int wm8995_i2c_remove(struct i2c_client *client)
+{
+	snd_soc_unregister_codec(&client->dev);
+	kfree(i2c_get_clientdata(client));
+	return 0;
+}
+
+static const struct i2c_device_id wm8995_i2c_id[] = {
+	{"wm8995", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, wm8995_i2c_id);
+
+static struct i2c_driver wm8995_i2c_driver = {
+	.driver = {
+		.name = "wm8995",
+		.owner = THIS_MODULE,
+	},
+	.probe = wm8995_i2c_probe,
+	.remove = __devexit_p(wm8995_i2c_remove),
+	.id_table = wm8995_i2c_id
+};
+#endif
+
+static int __init wm8995_modinit(void)
+{
+	int ret = 0;
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	ret = i2c_add_driver(&wm8995_i2c_driver);
+	if (ret) {
+		printk(KERN_ERR "Failed to register wm8995 I2C driver: %d\n",
+		       ret);
+	}
+#endif
+#if defined(CONFIG_SPI_MASTER)
+	ret = spi_register_driver(&wm8995_spi_driver);
+	if (ret) {
+		printk(KERN_ERR "Failed to register wm8995 SPI driver: %d\n",
+		       ret);
+	}
+#endif
+	return ret;
+}
+
+module_init(wm8995_modinit);
+
+static void __exit wm8995_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_driver(&wm8995_i2c_driver);
+#endif
+#if defined(CONFIG_SPI_MASTER)
+	spi_unregister_driver(&wm8995_spi_driver);
+#endif
+}
+
+module_exit(wm8995_exit);
+
+MODULE_DESCRIPTION("ASoC WM8995 driver");
+MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8995.h b/sound/soc/codecs/wm8995.h
new file mode 100644
index 0000000..5642121
--- /dev/null
+++ b/sound/soc/codecs/wm8995.h
@@ -0,0 +1,4269 @@
+/*
+ * wm8995.h  --  WM8995 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8995_H
+#define _WM8995_H
+
+#include <asm/types.h>
+
+/*
+ * Register values.
+ */
+#define WM8995_SOFTWARE_RESET                   0x00
+#define WM8995_POWER_MANAGEMENT_1               0x01
+#define WM8995_POWER_MANAGEMENT_2               0x02
+#define WM8995_POWER_MANAGEMENT_3               0x03
+#define WM8995_POWER_MANAGEMENT_4               0x04
+#define WM8995_POWER_MANAGEMENT_5               0x05
+#define WM8995_LEFT_LINE_INPUT_1_VOLUME         0x10
+#define WM8995_RIGHT_LINE_INPUT_1_VOLUME        0x11
+#define WM8995_LEFT_LINE_INPUT_CONTROL          0x12
+#define WM8995_DAC1_LEFT_VOLUME                 0x18
+#define WM8995_DAC1_RIGHT_VOLUME                0x19
+#define WM8995_DAC2_LEFT_VOLUME                 0x1A
+#define WM8995_DAC2_RIGHT_VOLUME                0x1B
+#define WM8995_OUTPUT_VOLUME_ZC_1               0x1C
+#define WM8995_MICBIAS_1                        0x20
+#define WM8995_MICBIAS_2                        0x21
+#define WM8995_LDO_1                            0x28
+#define WM8995_LDO_2                            0x29
+#define WM8995_ACCESSORY_DETECT_MODE1           0x30
+#define WM8995_ACCESSORY_DETECT_MODE2           0x31
+#define WM8995_HEADPHONE_DETECT1                0x34
+#define WM8995_HEADPHONE_DETECT2                0x35
+#define WM8995_MIC_DETECT_1                     0x38
+#define WM8995_MIC_DETECT_2                     0x39
+#define WM8995_CHARGE_PUMP_1                    0x40
+#define WM8995_CLASS_W_1                        0x45
+#define WM8995_DC_SERVO_1                       0x50
+#define WM8995_DC_SERVO_2                       0x51
+#define WM8995_DC_SERVO_3                       0x52
+#define WM8995_DC_SERVO_5                       0x54
+#define WM8995_DC_SERVO_6                       0x55
+#define WM8995_DC_SERVO_7                       0x56
+#define WM8995_DC_SERVO_READBACK_0              0x57
+#define WM8995_ANALOGUE_HP_1                    0x60
+#define WM8995_ANALOGUE_HP_2                    0x61
+#define WM8995_CHIP_REVISION                    0x100
+#define WM8995_CONTROL_INTERFACE_1              0x101
+#define WM8995_CONTROL_INTERFACE_2              0x102
+#define WM8995_WRITE_SEQUENCER_CTRL_1           0x110
+#define WM8995_WRITE_SEQUENCER_CTRL_2           0x111
+#define WM8995_AIF1_CLOCKING_1                  0x200
+#define WM8995_AIF1_CLOCKING_2                  0x201
+#define WM8995_AIF2_CLOCKING_1                  0x204
+#define WM8995_AIF2_CLOCKING_2                  0x205
+#define WM8995_CLOCKING_1                       0x208
+#define WM8995_CLOCKING_2                       0x209
+#define WM8995_AIF1_RATE                        0x210
+#define WM8995_AIF2_RATE                        0x211
+#define WM8995_RATE_STATUS                      0x212
+#define WM8995_FLL1_CONTROL_1                   0x220
+#define WM8995_FLL1_CONTROL_2                   0x221
+#define WM8995_FLL1_CONTROL_3                   0x222
+#define WM8995_FLL1_CONTROL_4                   0x223
+#define WM8995_FLL1_CONTROL_5                   0x224
+#define WM8995_FLL2_CONTROL_1                   0x240
+#define WM8995_FLL2_CONTROL_2                   0x241
+#define WM8995_FLL2_CONTROL_3                   0x242
+#define WM8995_FLL2_CONTROL_4                   0x243
+#define WM8995_FLL2_CONTROL_5                   0x244
+#define WM8995_AIF1_CONTROL_1                   0x300
+#define WM8995_AIF1_CONTROL_2                   0x301
+#define WM8995_AIF1_MASTER_SLAVE                0x302
+#define WM8995_AIF1_BCLK                        0x303
+#define WM8995_AIF1ADC_LRCLK                    0x304
+#define WM8995_AIF1DAC_LRCLK                    0x305
+#define WM8995_AIF1DAC_DATA                     0x306
+#define WM8995_AIF1ADC_DATA                     0x307
+#define WM8995_AIF2_CONTROL_1                   0x310
+#define WM8995_AIF2_CONTROL_2                   0x311
+#define WM8995_AIF2_MASTER_SLAVE                0x312
+#define WM8995_AIF2_BCLK                        0x313
+#define WM8995_AIF2ADC_LRCLK                    0x314
+#define WM8995_AIF2DAC_LRCLK                    0x315
+#define WM8995_AIF2DAC_DATA                     0x316
+#define WM8995_AIF2ADC_DATA                     0x317
+#define WM8995_AIF1_ADC1_LEFT_VOLUME            0x400
+#define WM8995_AIF1_ADC1_RIGHT_VOLUME           0x401
+#define WM8995_AIF1_DAC1_LEFT_VOLUME            0x402
+#define WM8995_AIF1_DAC1_RIGHT_VOLUME           0x403
+#define WM8995_AIF1_ADC2_LEFT_VOLUME            0x404
+#define WM8995_AIF1_ADC2_RIGHT_VOLUME           0x405
+#define WM8995_AIF1_DAC2_LEFT_VOLUME            0x406
+#define WM8995_AIF1_DAC2_RIGHT_VOLUME           0x407
+#define WM8995_AIF1_ADC1_FILTERS                0x410
+#define WM8995_AIF1_ADC2_FILTERS                0x411
+#define WM8995_AIF1_DAC1_FILTERS_1              0x420
+#define WM8995_AIF1_DAC1_FILTERS_2              0x421
+#define WM8995_AIF1_DAC2_FILTERS_1              0x422
+#define WM8995_AIF1_DAC2_FILTERS_2              0x423
+#define WM8995_AIF1_DRC1_1                      0x440
+#define WM8995_AIF1_DRC1_2                      0x441
+#define WM8995_AIF1_DRC1_3                      0x442
+#define WM8995_AIF1_DRC1_4                      0x443
+#define WM8995_AIF1_DRC1_5                      0x444
+#define WM8995_AIF1_DRC2_1                      0x450
+#define WM8995_AIF1_DRC2_2                      0x451
+#define WM8995_AIF1_DRC2_3                      0x452
+#define WM8995_AIF1_DRC2_4                      0x453
+#define WM8995_AIF1_DRC2_5                      0x454
+#define WM8995_AIF1_DAC1_EQ_GAINS_1             0x480
+#define WM8995_AIF1_DAC1_EQ_GAINS_2             0x481
+#define WM8995_AIF1_DAC1_EQ_BAND_1_A            0x482
+#define WM8995_AIF1_DAC1_EQ_BAND_1_B            0x483
+#define WM8995_AIF1_DAC1_EQ_BAND_1_PG           0x484
+#define WM8995_AIF1_DAC1_EQ_BAND_2_A            0x485
+#define WM8995_AIF1_DAC1_EQ_BAND_2_B            0x486
+#define WM8995_AIF1_DAC1_EQ_BAND_2_C            0x487
+#define WM8995_AIF1_DAC1_EQ_BAND_2_PG           0x488
+#define WM8995_AIF1_DAC1_EQ_BAND_3_A            0x489
+#define WM8995_AIF1_DAC1_EQ_BAND_3_B            0x48A
+#define WM8995_AIF1_DAC1_EQ_BAND_3_C            0x48B
+#define WM8995_AIF1_DAC1_EQ_BAND_3_PG           0x48C
+#define WM8995_AIF1_DAC1_EQ_BAND_4_A            0x48D
+#define WM8995_AIF1_DAC1_EQ_BAND_4_B            0x48E
+#define WM8995_AIF1_DAC1_EQ_BAND_4_C            0x48F
+#define WM8995_AIF1_DAC1_EQ_BAND_4_PG           0x490
+#define WM8995_AIF1_DAC1_EQ_BAND_5_A            0x491
+#define WM8995_AIF1_DAC1_EQ_BAND_5_B            0x492
+#define WM8995_AIF1_DAC1_EQ_BAND_5_PG           0x493
+#define WM8995_AIF1_DAC2_EQ_GAINS_1             0x4A0
+#define WM8995_AIF1_DAC2_EQ_GAINS_2             0x4A1
+#define WM8995_AIF1_DAC2_EQ_BAND_1_A            0x4A2
+#define WM8995_AIF1_DAC2_EQ_BAND_1_B            0x4A3
+#define WM8995_AIF1_DAC2_EQ_BAND_1_PG           0x4A4
+#define WM8995_AIF1_DAC2_EQ_BAND_2_A            0x4A5
+#define WM8995_AIF1_DAC2_EQ_BAND_2_B            0x4A6
+#define WM8995_AIF1_DAC2_EQ_BAND_2_C            0x4A7
+#define WM8995_AIF1_DAC2_EQ_BAND_2_PG           0x4A8
+#define WM8995_AIF1_DAC2_EQ_BAND_3_A            0x4A9
+#define WM8995_AIF1_DAC2_EQ_BAND_3_B            0x4AA
+#define WM8995_AIF1_DAC2_EQ_BAND_3_C            0x4AB
+#define WM8995_AIF1_DAC2_EQ_BAND_3_PG           0x4AC
+#define WM8995_AIF1_DAC2_EQ_BAND_4_A            0x4AD
+#define WM8995_AIF1_DAC2_EQ_BAND_4_B            0x4AE
+#define WM8995_AIF1_DAC2_EQ_BAND_4_C            0x4AF
+#define WM8995_AIF1_DAC2_EQ_BAND_4_PG           0x4B0
+#define WM8995_AIF1_DAC2_EQ_BAND_5_A            0x4B1
+#define WM8995_AIF1_DAC2_EQ_BAND_5_B            0x4B2
+#define WM8995_AIF1_DAC2_EQ_BAND_5_PG           0x4B3
+#define WM8995_AIF2_ADC_LEFT_VOLUME             0x500
+#define WM8995_AIF2_ADC_RIGHT_VOLUME            0x501
+#define WM8995_AIF2_DAC_LEFT_VOLUME             0x502
+#define WM8995_AIF2_DAC_RIGHT_VOLUME            0x503
+#define WM8995_AIF2_ADC_FILTERS                 0x510
+#define WM8995_AIF2_DAC_FILTERS_1               0x520
+#define WM8995_AIF2_DAC_FILTERS_2               0x521
+#define WM8995_AIF2_DRC_1                       0x540
+#define WM8995_AIF2_DRC_2                       0x541
+#define WM8995_AIF2_DRC_3                       0x542
+#define WM8995_AIF2_DRC_4                       0x543
+#define WM8995_AIF2_DRC_5                       0x544
+#define WM8995_AIF2_EQ_GAINS_1                  0x580
+#define WM8995_AIF2_EQ_GAINS_2                  0x581
+#define WM8995_AIF2_EQ_BAND_1_A                 0x582
+#define WM8995_AIF2_EQ_BAND_1_B                 0x583
+#define WM8995_AIF2_EQ_BAND_1_PG                0x584
+#define WM8995_AIF2_EQ_BAND_2_A                 0x585
+#define WM8995_AIF2_EQ_BAND_2_B                 0x586
+#define WM8995_AIF2_EQ_BAND_2_C                 0x587
+#define WM8995_AIF2_EQ_BAND_2_PG                0x588
+#define WM8995_AIF2_EQ_BAND_3_A                 0x589
+#define WM8995_AIF2_EQ_BAND_3_B                 0x58A
+#define WM8995_AIF2_EQ_BAND_3_C                 0x58B
+#define WM8995_AIF2_EQ_BAND_3_PG                0x58C
+#define WM8995_AIF2_EQ_BAND_4_A                 0x58D
+#define WM8995_AIF2_EQ_BAND_4_B                 0x58E
+#define WM8995_AIF2_EQ_BAND_4_C                 0x58F
+#define WM8995_AIF2_EQ_BAND_4_PG                0x590
+#define WM8995_AIF2_EQ_BAND_5_A                 0x591
+#define WM8995_AIF2_EQ_BAND_5_B                 0x592
+#define WM8995_AIF2_EQ_BAND_5_PG                0x593
+#define WM8995_DAC1_MIXER_VOLUMES               0x600
+#define WM8995_DAC1_LEFT_MIXER_ROUTING          0x601
+#define WM8995_DAC1_RIGHT_MIXER_ROUTING         0x602
+#define WM8995_DAC2_MIXER_VOLUMES               0x603
+#define WM8995_DAC2_LEFT_MIXER_ROUTING          0x604
+#define WM8995_DAC2_RIGHT_MIXER_ROUTING         0x605
+#define WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING     0x606
+#define WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING    0x607
+#define WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING     0x608
+#define WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING    0x609
+#define WM8995_DAC_SOFTMUTE                     0x610
+#define WM8995_OVERSAMPLING                     0x620
+#define WM8995_SIDETONE                         0x621
+#define WM8995_GPIO_1                           0x700
+#define WM8995_GPIO_2                           0x701
+#define WM8995_GPIO_3                           0x702
+#define WM8995_GPIO_4                           0x703
+#define WM8995_GPIO_5                           0x704
+#define WM8995_GPIO_6                           0x705
+#define WM8995_GPIO_7                           0x706
+#define WM8995_GPIO_8                           0x707
+#define WM8995_GPIO_9                           0x708
+#define WM8995_GPIO_10                          0x709
+#define WM8995_GPIO_11                          0x70A
+#define WM8995_GPIO_12                          0x70B
+#define WM8995_GPIO_13                          0x70C
+#define WM8995_GPIO_14                          0x70D
+#define WM8995_PULL_CONTROL_1                   0x720
+#define WM8995_PULL_CONTROL_2                   0x721
+#define WM8995_INTERRUPT_STATUS_1               0x730
+#define WM8995_INTERRUPT_STATUS_2               0x731
+#define WM8995_INTERRUPT_RAW_STATUS_2           0x732
+#define WM8995_INTERRUPT_STATUS_1_MASK          0x738
+#define WM8995_INTERRUPT_STATUS_2_MASK          0x739
+#define WM8995_INTERRUPT_CONTROL                0x740
+#define WM8995_LEFT_PDM_SPEAKER_1               0x800
+#define WM8995_RIGHT_PDM_SPEAKER_1              0x801
+#define WM8995_PDM_SPEAKER_1_MUTE_SEQUENCE      0x802
+#define WM8995_LEFT_PDM_SPEAKER_2               0x808
+#define WM8995_RIGHT_PDM_SPEAKER_2              0x809
+#define WM8995_PDM_SPEAKER_2_MUTE_SEQUENCE      0x80A
+#define WM8995_WRITE_SEQUENCER_0                0x3000
+#define WM8995_WRITE_SEQUENCER_1                0x3001
+#define WM8995_WRITE_SEQUENCER_2                0x3002
+#define WM8995_WRITE_SEQUENCER_3                0x3003
+#define WM8995_WRITE_SEQUENCER_4                0x3004
+#define WM8995_WRITE_SEQUENCER_5                0x3005
+#define WM8995_WRITE_SEQUENCER_6                0x3006
+#define WM8995_WRITE_SEQUENCER_7                0x3007
+#define WM8995_WRITE_SEQUENCER_8                0x3008
+#define WM8995_WRITE_SEQUENCER_9                0x3009
+#define WM8995_WRITE_SEQUENCER_10               0x300A
+#define WM8995_WRITE_SEQUENCER_11               0x300B
+#define WM8995_WRITE_SEQUENCER_12               0x300C
+#define WM8995_WRITE_SEQUENCER_13               0x300D
+#define WM8995_WRITE_SEQUENCER_14               0x300E
+#define WM8995_WRITE_SEQUENCER_15               0x300F
+#define WM8995_WRITE_SEQUENCER_16               0x3010
+#define WM8995_WRITE_SEQUENCER_17               0x3011
+#define WM8995_WRITE_SEQUENCER_18               0x3012
+#define WM8995_WRITE_SEQUENCER_19               0x3013
+#define WM8995_WRITE_SEQUENCER_20               0x3014
+#define WM8995_WRITE_SEQUENCER_21               0x3015
+#define WM8995_WRITE_SEQUENCER_22               0x3016
+#define WM8995_WRITE_SEQUENCER_23               0x3017
+#define WM8995_WRITE_SEQUENCER_24               0x3018
+#define WM8995_WRITE_SEQUENCER_25               0x3019
+#define WM8995_WRITE_SEQUENCER_26               0x301A
+#define WM8995_WRITE_SEQUENCER_27               0x301B
+#define WM8995_WRITE_SEQUENCER_28               0x301C
+#define WM8995_WRITE_SEQUENCER_29               0x301D
+#define WM8995_WRITE_SEQUENCER_30               0x301E
+#define WM8995_WRITE_SEQUENCER_31               0x301F
+#define WM8995_WRITE_SEQUENCER_32               0x3020
+#define WM8995_WRITE_SEQUENCER_33               0x3021
+#define WM8995_WRITE_SEQUENCER_34               0x3022
+#define WM8995_WRITE_SEQUENCER_35               0x3023
+#define WM8995_WRITE_SEQUENCER_36               0x3024
+#define WM8995_WRITE_SEQUENCER_37               0x3025
+#define WM8995_WRITE_SEQUENCER_38               0x3026
+#define WM8995_WRITE_SEQUENCER_39               0x3027
+#define WM8995_WRITE_SEQUENCER_40               0x3028
+#define WM8995_WRITE_SEQUENCER_41               0x3029
+#define WM8995_WRITE_SEQUENCER_42               0x302A
+#define WM8995_WRITE_SEQUENCER_43               0x302B
+#define WM8995_WRITE_SEQUENCER_44               0x302C
+#define WM8995_WRITE_SEQUENCER_45               0x302D
+#define WM8995_WRITE_SEQUENCER_46               0x302E
+#define WM8995_WRITE_SEQUENCER_47               0x302F
+#define WM8995_WRITE_SEQUENCER_48               0x3030
+#define WM8995_WRITE_SEQUENCER_49               0x3031
+#define WM8995_WRITE_SEQUENCER_50               0x3032
+#define WM8995_WRITE_SEQUENCER_51               0x3033
+#define WM8995_WRITE_SEQUENCER_52               0x3034
+#define WM8995_WRITE_SEQUENCER_53               0x3035
+#define WM8995_WRITE_SEQUENCER_54               0x3036
+#define WM8995_WRITE_SEQUENCER_55               0x3037
+#define WM8995_WRITE_SEQUENCER_56               0x3038
+#define WM8995_WRITE_SEQUENCER_57               0x3039
+#define WM8995_WRITE_SEQUENCER_58               0x303A
+#define WM8995_WRITE_SEQUENCER_59               0x303B
+#define WM8995_WRITE_SEQUENCER_60               0x303C
+#define WM8995_WRITE_SEQUENCER_61               0x303D
+#define WM8995_WRITE_SEQUENCER_62               0x303E
+#define WM8995_WRITE_SEQUENCER_63               0x303F
+#define WM8995_WRITE_SEQUENCER_64               0x3040
+#define WM8995_WRITE_SEQUENCER_65               0x3041
+#define WM8995_WRITE_SEQUENCER_66               0x3042
+#define WM8995_WRITE_SEQUENCER_67               0x3043
+#define WM8995_WRITE_SEQUENCER_68               0x3044
+#define WM8995_WRITE_SEQUENCER_69               0x3045
+#define WM8995_WRITE_SEQUENCER_70               0x3046
+#define WM8995_WRITE_SEQUENCER_71               0x3047
+#define WM8995_WRITE_SEQUENCER_72               0x3048
+#define WM8995_WRITE_SEQUENCER_73               0x3049
+#define WM8995_WRITE_SEQUENCER_74               0x304A
+#define WM8995_WRITE_SEQUENCER_75               0x304B
+#define WM8995_WRITE_SEQUENCER_76               0x304C
+#define WM8995_WRITE_SEQUENCER_77               0x304D
+#define WM8995_WRITE_SEQUENCER_78               0x304E
+#define WM8995_WRITE_SEQUENCER_79               0x304F
+#define WM8995_WRITE_SEQUENCER_80               0x3050
+#define WM8995_WRITE_SEQUENCER_81               0x3051
+#define WM8995_WRITE_SEQUENCER_82               0x3052
+#define WM8995_WRITE_SEQUENCER_83               0x3053
+#define WM8995_WRITE_SEQUENCER_84               0x3054
+#define WM8995_WRITE_SEQUENCER_85               0x3055
+#define WM8995_WRITE_SEQUENCER_86               0x3056
+#define WM8995_WRITE_SEQUENCER_87               0x3057
+#define WM8995_WRITE_SEQUENCER_88               0x3058
+#define WM8995_WRITE_SEQUENCER_89               0x3059
+#define WM8995_WRITE_SEQUENCER_90               0x305A
+#define WM8995_WRITE_SEQUENCER_91               0x305B
+#define WM8995_WRITE_SEQUENCER_92               0x305C
+#define WM8995_WRITE_SEQUENCER_93               0x305D
+#define WM8995_WRITE_SEQUENCER_94               0x305E
+#define WM8995_WRITE_SEQUENCER_95               0x305F
+#define WM8995_WRITE_SEQUENCER_96               0x3060
+#define WM8995_WRITE_SEQUENCER_97               0x3061
+#define WM8995_WRITE_SEQUENCER_98               0x3062
+#define WM8995_WRITE_SEQUENCER_99               0x3063
+#define WM8995_WRITE_SEQUENCER_100              0x3064
+#define WM8995_WRITE_SEQUENCER_101              0x3065
+#define WM8995_WRITE_SEQUENCER_102              0x3066
+#define WM8995_WRITE_SEQUENCER_103              0x3067
+#define WM8995_WRITE_SEQUENCER_104              0x3068
+#define WM8995_WRITE_SEQUENCER_105              0x3069
+#define WM8995_WRITE_SEQUENCER_106              0x306A
+#define WM8995_WRITE_SEQUENCER_107              0x306B
+#define WM8995_WRITE_SEQUENCER_108              0x306C
+#define WM8995_WRITE_SEQUENCER_109              0x306D
+#define WM8995_WRITE_SEQUENCER_110              0x306E
+#define WM8995_WRITE_SEQUENCER_111              0x306F
+#define WM8995_WRITE_SEQUENCER_112              0x3070
+#define WM8995_WRITE_SEQUENCER_113              0x3071
+#define WM8995_WRITE_SEQUENCER_114              0x3072
+#define WM8995_WRITE_SEQUENCER_115              0x3073
+#define WM8995_WRITE_SEQUENCER_116              0x3074
+#define WM8995_WRITE_SEQUENCER_117              0x3075
+#define WM8995_WRITE_SEQUENCER_118              0x3076
+#define WM8995_WRITE_SEQUENCER_119              0x3077
+#define WM8995_WRITE_SEQUENCER_120              0x3078
+#define WM8995_WRITE_SEQUENCER_121              0x3079
+#define WM8995_WRITE_SEQUENCER_122              0x307A
+#define WM8995_WRITE_SEQUENCER_123              0x307B
+#define WM8995_WRITE_SEQUENCER_124              0x307C
+#define WM8995_WRITE_SEQUENCER_125              0x307D
+#define WM8995_WRITE_SEQUENCER_126              0x307E
+#define WM8995_WRITE_SEQUENCER_127              0x307F
+#define WM8995_WRITE_SEQUENCER_128              0x3080
+#define WM8995_WRITE_SEQUENCER_129              0x3081
+#define WM8995_WRITE_SEQUENCER_130              0x3082
+#define WM8995_WRITE_SEQUENCER_131              0x3083
+#define WM8995_WRITE_SEQUENCER_132              0x3084
+#define WM8995_WRITE_SEQUENCER_133              0x3085
+#define WM8995_WRITE_SEQUENCER_134              0x3086
+#define WM8995_WRITE_SEQUENCER_135              0x3087
+#define WM8995_WRITE_SEQUENCER_136              0x3088
+#define WM8995_WRITE_SEQUENCER_137              0x3089
+#define WM8995_WRITE_SEQUENCER_138              0x308A
+#define WM8995_WRITE_SEQUENCER_139              0x308B
+#define WM8995_WRITE_SEQUENCER_140              0x308C
+#define WM8995_WRITE_SEQUENCER_141              0x308D
+#define WM8995_WRITE_SEQUENCER_142              0x308E
+#define WM8995_WRITE_SEQUENCER_143              0x308F
+#define WM8995_WRITE_SEQUENCER_144              0x3090
+#define WM8995_WRITE_SEQUENCER_145              0x3091
+#define WM8995_WRITE_SEQUENCER_146              0x3092
+#define WM8995_WRITE_SEQUENCER_147              0x3093
+#define WM8995_WRITE_SEQUENCER_148              0x3094
+#define WM8995_WRITE_SEQUENCER_149              0x3095
+#define WM8995_WRITE_SEQUENCER_150              0x3096
+#define WM8995_WRITE_SEQUENCER_151              0x3097
+#define WM8995_WRITE_SEQUENCER_152              0x3098
+#define WM8995_WRITE_SEQUENCER_153              0x3099
+#define WM8995_WRITE_SEQUENCER_154              0x309A
+#define WM8995_WRITE_SEQUENCER_155              0x309B
+#define WM8995_WRITE_SEQUENCER_156              0x309C
+#define WM8995_WRITE_SEQUENCER_157              0x309D
+#define WM8995_WRITE_SEQUENCER_158              0x309E
+#define WM8995_WRITE_SEQUENCER_159              0x309F
+#define WM8995_WRITE_SEQUENCER_160              0x30A0
+#define WM8995_WRITE_SEQUENCER_161              0x30A1
+#define WM8995_WRITE_SEQUENCER_162              0x30A2
+#define WM8995_WRITE_SEQUENCER_163              0x30A3
+#define WM8995_WRITE_SEQUENCER_164              0x30A4
+#define WM8995_WRITE_SEQUENCER_165              0x30A5
+#define WM8995_WRITE_SEQUENCER_166              0x30A6
+#define WM8995_WRITE_SEQUENCER_167              0x30A7
+#define WM8995_WRITE_SEQUENCER_168              0x30A8
+#define WM8995_WRITE_SEQUENCER_169              0x30A9
+#define WM8995_WRITE_SEQUENCER_170              0x30AA
+#define WM8995_WRITE_SEQUENCER_171              0x30AB
+#define WM8995_WRITE_SEQUENCER_172              0x30AC
+#define WM8995_WRITE_SEQUENCER_173              0x30AD
+#define WM8995_WRITE_SEQUENCER_174              0x30AE
+#define WM8995_WRITE_SEQUENCER_175              0x30AF
+#define WM8995_WRITE_SEQUENCER_176              0x30B0
+#define WM8995_WRITE_SEQUENCER_177              0x30B1
+#define WM8995_WRITE_SEQUENCER_178              0x30B2
+#define WM8995_WRITE_SEQUENCER_179              0x30B3
+#define WM8995_WRITE_SEQUENCER_180              0x30B4
+#define WM8995_WRITE_SEQUENCER_181              0x30B5
+#define WM8995_WRITE_SEQUENCER_182              0x30B6
+#define WM8995_WRITE_SEQUENCER_183              0x30B7
+#define WM8995_WRITE_SEQUENCER_184              0x30B8
+#define WM8995_WRITE_SEQUENCER_185              0x30B9
+#define WM8995_WRITE_SEQUENCER_186              0x30BA
+#define WM8995_WRITE_SEQUENCER_187              0x30BB
+#define WM8995_WRITE_SEQUENCER_188              0x30BC
+#define WM8995_WRITE_SEQUENCER_189              0x30BD
+#define WM8995_WRITE_SEQUENCER_190              0x30BE
+#define WM8995_WRITE_SEQUENCER_191              0x30BF
+#define WM8995_WRITE_SEQUENCER_192              0x30C0
+#define WM8995_WRITE_SEQUENCER_193              0x30C1
+#define WM8995_WRITE_SEQUENCER_194              0x30C2
+#define WM8995_WRITE_SEQUENCER_195              0x30C3
+#define WM8995_WRITE_SEQUENCER_196              0x30C4
+#define WM8995_WRITE_SEQUENCER_197              0x30C5
+#define WM8995_WRITE_SEQUENCER_198              0x30C6
+#define WM8995_WRITE_SEQUENCER_199              0x30C7
+#define WM8995_WRITE_SEQUENCER_200              0x30C8
+#define WM8995_WRITE_SEQUENCER_201              0x30C9
+#define WM8995_WRITE_SEQUENCER_202              0x30CA
+#define WM8995_WRITE_SEQUENCER_203              0x30CB
+#define WM8995_WRITE_SEQUENCER_204              0x30CC
+#define WM8995_WRITE_SEQUENCER_205              0x30CD
+#define WM8995_WRITE_SEQUENCER_206              0x30CE
+#define WM8995_WRITE_SEQUENCER_207              0x30CF
+#define WM8995_WRITE_SEQUENCER_208              0x30D0
+#define WM8995_WRITE_SEQUENCER_209              0x30D1
+#define WM8995_WRITE_SEQUENCER_210              0x30D2
+#define WM8995_WRITE_SEQUENCER_211              0x30D3
+#define WM8995_WRITE_SEQUENCER_212              0x30D4
+#define WM8995_WRITE_SEQUENCER_213              0x30D5
+#define WM8995_WRITE_SEQUENCER_214              0x30D6
+#define WM8995_WRITE_SEQUENCER_215              0x30D7
+#define WM8995_WRITE_SEQUENCER_216              0x30D8
+#define WM8995_WRITE_SEQUENCER_217              0x30D9
+#define WM8995_WRITE_SEQUENCER_218              0x30DA
+#define WM8995_WRITE_SEQUENCER_219              0x30DB
+#define WM8995_WRITE_SEQUENCER_220              0x30DC
+#define WM8995_WRITE_SEQUENCER_221              0x30DD
+#define WM8995_WRITE_SEQUENCER_222              0x30DE
+#define WM8995_WRITE_SEQUENCER_223              0x30DF
+#define WM8995_WRITE_SEQUENCER_224              0x30E0
+#define WM8995_WRITE_SEQUENCER_225              0x30E1
+#define WM8995_WRITE_SEQUENCER_226              0x30E2
+#define WM8995_WRITE_SEQUENCER_227              0x30E3
+#define WM8995_WRITE_SEQUENCER_228              0x30E4
+#define WM8995_WRITE_SEQUENCER_229              0x30E5
+#define WM8995_WRITE_SEQUENCER_230              0x30E6
+#define WM8995_WRITE_SEQUENCER_231              0x30E7
+#define WM8995_WRITE_SEQUENCER_232              0x30E8
+#define WM8995_WRITE_SEQUENCER_233              0x30E9
+#define WM8995_WRITE_SEQUENCER_234              0x30EA
+#define WM8995_WRITE_SEQUENCER_235              0x30EB
+#define WM8995_WRITE_SEQUENCER_236              0x30EC
+#define WM8995_WRITE_SEQUENCER_237              0x30ED
+#define WM8995_WRITE_SEQUENCER_238              0x30EE
+#define WM8995_WRITE_SEQUENCER_239              0x30EF
+#define WM8995_WRITE_SEQUENCER_240              0x30F0
+#define WM8995_WRITE_SEQUENCER_241              0x30F1
+#define WM8995_WRITE_SEQUENCER_242              0x30F2
+#define WM8995_WRITE_SEQUENCER_243              0x30F3
+#define WM8995_WRITE_SEQUENCER_244              0x30F4
+#define WM8995_WRITE_SEQUENCER_245              0x30F5
+#define WM8995_WRITE_SEQUENCER_246              0x30F6
+#define WM8995_WRITE_SEQUENCER_247              0x30F7
+#define WM8995_WRITE_SEQUENCER_248              0x30F8
+#define WM8995_WRITE_SEQUENCER_249              0x30F9
+#define WM8995_WRITE_SEQUENCER_250              0x30FA
+#define WM8995_WRITE_SEQUENCER_251              0x30FB
+#define WM8995_WRITE_SEQUENCER_252              0x30FC
+#define WM8995_WRITE_SEQUENCER_253              0x30FD
+#define WM8995_WRITE_SEQUENCER_254              0x30FE
+#define WM8995_WRITE_SEQUENCER_255              0x30FF
+#define WM8995_WRITE_SEQUENCER_256              0x3100
+#define WM8995_WRITE_SEQUENCER_257              0x3101
+#define WM8995_WRITE_SEQUENCER_258              0x3102
+#define WM8995_WRITE_SEQUENCER_259              0x3103
+#define WM8995_WRITE_SEQUENCER_260              0x3104
+#define WM8995_WRITE_SEQUENCER_261              0x3105
+#define WM8995_WRITE_SEQUENCER_262              0x3106
+#define WM8995_WRITE_SEQUENCER_263              0x3107
+#define WM8995_WRITE_SEQUENCER_264              0x3108
+#define WM8995_WRITE_SEQUENCER_265              0x3109
+#define WM8995_WRITE_SEQUENCER_266              0x310A
+#define WM8995_WRITE_SEQUENCER_267              0x310B
+#define WM8995_WRITE_SEQUENCER_268              0x310C
+#define WM8995_WRITE_SEQUENCER_269              0x310D
+#define WM8995_WRITE_SEQUENCER_270              0x310E
+#define WM8995_WRITE_SEQUENCER_271              0x310F
+#define WM8995_WRITE_SEQUENCER_272              0x3110
+#define WM8995_WRITE_SEQUENCER_273              0x3111
+#define WM8995_WRITE_SEQUENCER_274              0x3112
+#define WM8995_WRITE_SEQUENCER_275              0x3113
+#define WM8995_WRITE_SEQUENCER_276              0x3114
+#define WM8995_WRITE_SEQUENCER_277              0x3115
+#define WM8995_WRITE_SEQUENCER_278              0x3116
+#define WM8995_WRITE_SEQUENCER_279              0x3117
+#define WM8995_WRITE_SEQUENCER_280              0x3118
+#define WM8995_WRITE_SEQUENCER_281              0x3119
+#define WM8995_WRITE_SEQUENCER_282              0x311A
+#define WM8995_WRITE_SEQUENCER_283              0x311B
+#define WM8995_WRITE_SEQUENCER_284              0x311C
+#define WM8995_WRITE_SEQUENCER_285              0x311D
+#define WM8995_WRITE_SEQUENCER_286              0x311E
+#define WM8995_WRITE_SEQUENCER_287              0x311F
+#define WM8995_WRITE_SEQUENCER_288              0x3120
+#define WM8995_WRITE_SEQUENCER_289              0x3121
+#define WM8995_WRITE_SEQUENCER_290              0x3122
+#define WM8995_WRITE_SEQUENCER_291              0x3123
+#define WM8995_WRITE_SEQUENCER_292              0x3124
+#define WM8995_WRITE_SEQUENCER_293              0x3125
+#define WM8995_WRITE_SEQUENCER_294              0x3126
+#define WM8995_WRITE_SEQUENCER_295              0x3127
+#define WM8995_WRITE_SEQUENCER_296              0x3128
+#define WM8995_WRITE_SEQUENCER_297              0x3129
+#define WM8995_WRITE_SEQUENCER_298              0x312A
+#define WM8995_WRITE_SEQUENCER_299              0x312B
+#define WM8995_WRITE_SEQUENCER_300              0x312C
+#define WM8995_WRITE_SEQUENCER_301              0x312D
+#define WM8995_WRITE_SEQUENCER_302              0x312E
+#define WM8995_WRITE_SEQUENCER_303              0x312F
+#define WM8995_WRITE_SEQUENCER_304              0x3130
+#define WM8995_WRITE_SEQUENCER_305              0x3131
+#define WM8995_WRITE_SEQUENCER_306              0x3132
+#define WM8995_WRITE_SEQUENCER_307              0x3133
+#define WM8995_WRITE_SEQUENCER_308              0x3134
+#define WM8995_WRITE_SEQUENCER_309              0x3135
+#define WM8995_WRITE_SEQUENCER_310              0x3136
+#define WM8995_WRITE_SEQUENCER_311              0x3137
+#define WM8995_WRITE_SEQUENCER_312              0x3138
+#define WM8995_WRITE_SEQUENCER_313              0x3139
+#define WM8995_WRITE_SEQUENCER_314              0x313A
+#define WM8995_WRITE_SEQUENCER_315              0x313B
+#define WM8995_WRITE_SEQUENCER_316              0x313C
+#define WM8995_WRITE_SEQUENCER_317              0x313D
+#define WM8995_WRITE_SEQUENCER_318              0x313E
+#define WM8995_WRITE_SEQUENCER_319              0x313F
+#define WM8995_WRITE_SEQUENCER_320              0x3140
+#define WM8995_WRITE_SEQUENCER_321              0x3141
+#define WM8995_WRITE_SEQUENCER_322              0x3142
+#define WM8995_WRITE_SEQUENCER_323              0x3143
+#define WM8995_WRITE_SEQUENCER_324              0x3144
+#define WM8995_WRITE_SEQUENCER_325              0x3145
+#define WM8995_WRITE_SEQUENCER_326              0x3146
+#define WM8995_WRITE_SEQUENCER_327              0x3147
+#define WM8995_WRITE_SEQUENCER_328              0x3148
+#define WM8995_WRITE_SEQUENCER_329              0x3149
+#define WM8995_WRITE_SEQUENCER_330              0x314A
+#define WM8995_WRITE_SEQUENCER_331              0x314B
+#define WM8995_WRITE_SEQUENCER_332              0x314C
+#define WM8995_WRITE_SEQUENCER_333              0x314D
+#define WM8995_WRITE_SEQUENCER_334              0x314E
+#define WM8995_WRITE_SEQUENCER_335              0x314F
+#define WM8995_WRITE_SEQUENCER_336              0x3150
+#define WM8995_WRITE_SEQUENCER_337              0x3151
+#define WM8995_WRITE_SEQUENCER_338              0x3152
+#define WM8995_WRITE_SEQUENCER_339              0x3153
+#define WM8995_WRITE_SEQUENCER_340              0x3154
+#define WM8995_WRITE_SEQUENCER_341              0x3155
+#define WM8995_WRITE_SEQUENCER_342              0x3156
+#define WM8995_WRITE_SEQUENCER_343              0x3157
+#define WM8995_WRITE_SEQUENCER_344              0x3158
+#define WM8995_WRITE_SEQUENCER_345              0x3159
+#define WM8995_WRITE_SEQUENCER_346              0x315A
+#define WM8995_WRITE_SEQUENCER_347              0x315B
+#define WM8995_WRITE_SEQUENCER_348              0x315C
+#define WM8995_WRITE_SEQUENCER_349              0x315D
+#define WM8995_WRITE_SEQUENCER_350              0x315E
+#define WM8995_WRITE_SEQUENCER_351              0x315F
+#define WM8995_WRITE_SEQUENCER_352              0x3160
+#define WM8995_WRITE_SEQUENCER_353              0x3161
+#define WM8995_WRITE_SEQUENCER_354              0x3162
+#define WM8995_WRITE_SEQUENCER_355              0x3163
+#define WM8995_WRITE_SEQUENCER_356              0x3164
+#define WM8995_WRITE_SEQUENCER_357              0x3165
+#define WM8995_WRITE_SEQUENCER_358              0x3166
+#define WM8995_WRITE_SEQUENCER_359              0x3167
+#define WM8995_WRITE_SEQUENCER_360              0x3168
+#define WM8995_WRITE_SEQUENCER_361              0x3169
+#define WM8995_WRITE_SEQUENCER_362              0x316A
+#define WM8995_WRITE_SEQUENCER_363              0x316B
+#define WM8995_WRITE_SEQUENCER_364              0x316C
+#define WM8995_WRITE_SEQUENCER_365              0x316D
+#define WM8995_WRITE_SEQUENCER_366              0x316E
+#define WM8995_WRITE_SEQUENCER_367              0x316F
+#define WM8995_WRITE_SEQUENCER_368              0x3170
+#define WM8995_WRITE_SEQUENCER_369              0x3171
+#define WM8995_WRITE_SEQUENCER_370              0x3172
+#define WM8995_WRITE_SEQUENCER_371              0x3173
+#define WM8995_WRITE_SEQUENCER_372              0x3174
+#define WM8995_WRITE_SEQUENCER_373              0x3175
+#define WM8995_WRITE_SEQUENCER_374              0x3176
+#define WM8995_WRITE_SEQUENCER_375              0x3177
+#define WM8995_WRITE_SEQUENCER_376              0x3178
+#define WM8995_WRITE_SEQUENCER_377              0x3179
+#define WM8995_WRITE_SEQUENCER_378              0x317A
+#define WM8995_WRITE_SEQUENCER_379              0x317B
+#define WM8995_WRITE_SEQUENCER_380              0x317C
+#define WM8995_WRITE_SEQUENCER_381              0x317D
+#define WM8995_WRITE_SEQUENCER_382              0x317E
+#define WM8995_WRITE_SEQUENCER_383              0x317F
+#define WM8995_WRITE_SEQUENCER_384              0x3180
+#define WM8995_WRITE_SEQUENCER_385              0x3181
+#define WM8995_WRITE_SEQUENCER_386              0x3182
+#define WM8995_WRITE_SEQUENCER_387              0x3183
+#define WM8995_WRITE_SEQUENCER_388              0x3184
+#define WM8995_WRITE_SEQUENCER_389              0x3185
+#define WM8995_WRITE_SEQUENCER_390              0x3186
+#define WM8995_WRITE_SEQUENCER_391              0x3187
+#define WM8995_WRITE_SEQUENCER_392              0x3188
+#define WM8995_WRITE_SEQUENCER_393              0x3189
+#define WM8995_WRITE_SEQUENCER_394              0x318A
+#define WM8995_WRITE_SEQUENCER_395              0x318B
+#define WM8995_WRITE_SEQUENCER_396              0x318C
+#define WM8995_WRITE_SEQUENCER_397              0x318D
+#define WM8995_WRITE_SEQUENCER_398              0x318E
+#define WM8995_WRITE_SEQUENCER_399              0x318F
+#define WM8995_WRITE_SEQUENCER_400              0x3190
+#define WM8995_WRITE_SEQUENCER_401              0x3191
+#define WM8995_WRITE_SEQUENCER_402              0x3192
+#define WM8995_WRITE_SEQUENCER_403              0x3193
+#define WM8995_WRITE_SEQUENCER_404              0x3194
+#define WM8995_WRITE_SEQUENCER_405              0x3195
+#define WM8995_WRITE_SEQUENCER_406              0x3196
+#define WM8995_WRITE_SEQUENCER_407              0x3197
+#define WM8995_WRITE_SEQUENCER_408              0x3198
+#define WM8995_WRITE_SEQUENCER_409              0x3199
+#define WM8995_WRITE_SEQUENCER_410              0x319A
+#define WM8995_WRITE_SEQUENCER_411              0x319B
+#define WM8995_WRITE_SEQUENCER_412              0x319C
+#define WM8995_WRITE_SEQUENCER_413              0x319D
+#define WM8995_WRITE_SEQUENCER_414              0x319E
+#define WM8995_WRITE_SEQUENCER_415              0x319F
+#define WM8995_WRITE_SEQUENCER_416              0x31A0
+#define WM8995_WRITE_SEQUENCER_417              0x31A1
+#define WM8995_WRITE_SEQUENCER_418              0x31A2
+#define WM8995_WRITE_SEQUENCER_419              0x31A3
+#define WM8995_WRITE_SEQUENCER_420              0x31A4
+#define WM8995_WRITE_SEQUENCER_421              0x31A5
+#define WM8995_WRITE_SEQUENCER_422              0x31A6
+#define WM8995_WRITE_SEQUENCER_423              0x31A7
+#define WM8995_WRITE_SEQUENCER_424              0x31A8
+#define WM8995_WRITE_SEQUENCER_425              0x31A9
+#define WM8995_WRITE_SEQUENCER_426              0x31AA
+#define WM8995_WRITE_SEQUENCER_427              0x31AB
+#define WM8995_WRITE_SEQUENCER_428              0x31AC
+#define WM8995_WRITE_SEQUENCER_429              0x31AD
+#define WM8995_WRITE_SEQUENCER_430              0x31AE
+#define WM8995_WRITE_SEQUENCER_431              0x31AF
+#define WM8995_WRITE_SEQUENCER_432              0x31B0
+#define WM8995_WRITE_SEQUENCER_433              0x31B1
+#define WM8995_WRITE_SEQUENCER_434              0x31B2
+#define WM8995_WRITE_SEQUENCER_435              0x31B3
+#define WM8995_WRITE_SEQUENCER_436              0x31B4
+#define WM8995_WRITE_SEQUENCER_437              0x31B5
+#define WM8995_WRITE_SEQUENCER_438              0x31B6
+#define WM8995_WRITE_SEQUENCER_439              0x31B7
+#define WM8995_WRITE_SEQUENCER_440              0x31B8
+#define WM8995_WRITE_SEQUENCER_441              0x31B9
+#define WM8995_WRITE_SEQUENCER_442              0x31BA
+#define WM8995_WRITE_SEQUENCER_443              0x31BB
+#define WM8995_WRITE_SEQUENCER_444              0x31BC
+#define WM8995_WRITE_SEQUENCER_445              0x31BD
+#define WM8995_WRITE_SEQUENCER_446              0x31BE
+#define WM8995_WRITE_SEQUENCER_447              0x31BF
+#define WM8995_WRITE_SEQUENCER_448              0x31C0
+#define WM8995_WRITE_SEQUENCER_449              0x31C1
+#define WM8995_WRITE_SEQUENCER_450              0x31C2
+#define WM8995_WRITE_SEQUENCER_451              0x31C3
+#define WM8995_WRITE_SEQUENCER_452              0x31C4
+#define WM8995_WRITE_SEQUENCER_453              0x31C5
+#define WM8995_WRITE_SEQUENCER_454              0x31C6
+#define WM8995_WRITE_SEQUENCER_455              0x31C7
+#define WM8995_WRITE_SEQUENCER_456              0x31C8
+#define WM8995_WRITE_SEQUENCER_457              0x31C9
+#define WM8995_WRITE_SEQUENCER_458              0x31CA
+#define WM8995_WRITE_SEQUENCER_459              0x31CB
+#define WM8995_WRITE_SEQUENCER_460              0x31CC
+#define WM8995_WRITE_SEQUENCER_461              0x31CD
+#define WM8995_WRITE_SEQUENCER_462              0x31CE
+#define WM8995_WRITE_SEQUENCER_463              0x31CF
+#define WM8995_WRITE_SEQUENCER_464              0x31D0
+#define WM8995_WRITE_SEQUENCER_465              0x31D1
+#define WM8995_WRITE_SEQUENCER_466              0x31D2
+#define WM8995_WRITE_SEQUENCER_467              0x31D3
+#define WM8995_WRITE_SEQUENCER_468              0x31D4
+#define WM8995_WRITE_SEQUENCER_469              0x31D5
+#define WM8995_WRITE_SEQUENCER_470              0x31D6
+#define WM8995_WRITE_SEQUENCER_471              0x31D7
+#define WM8995_WRITE_SEQUENCER_472              0x31D8
+#define WM8995_WRITE_SEQUENCER_473              0x31D9
+#define WM8995_WRITE_SEQUENCER_474              0x31DA
+#define WM8995_WRITE_SEQUENCER_475              0x31DB
+#define WM8995_WRITE_SEQUENCER_476              0x31DC
+#define WM8995_WRITE_SEQUENCER_477              0x31DD
+#define WM8995_WRITE_SEQUENCER_478              0x31DE
+#define WM8995_WRITE_SEQUENCER_479              0x31DF
+#define WM8995_WRITE_SEQUENCER_480              0x31E0
+#define WM8995_WRITE_SEQUENCER_481              0x31E1
+#define WM8995_WRITE_SEQUENCER_482              0x31E2
+#define WM8995_WRITE_SEQUENCER_483              0x31E3
+#define WM8995_WRITE_SEQUENCER_484              0x31E4
+#define WM8995_WRITE_SEQUENCER_485              0x31E5
+#define WM8995_WRITE_SEQUENCER_486              0x31E6
+#define WM8995_WRITE_SEQUENCER_487              0x31E7
+#define WM8995_WRITE_SEQUENCER_488              0x31E8
+#define WM8995_WRITE_SEQUENCER_489              0x31E9
+#define WM8995_WRITE_SEQUENCER_490              0x31EA
+#define WM8995_WRITE_SEQUENCER_491              0x31EB
+#define WM8995_WRITE_SEQUENCER_492              0x31EC
+#define WM8995_WRITE_SEQUENCER_493              0x31ED
+#define WM8995_WRITE_SEQUENCER_494              0x31EE
+#define WM8995_WRITE_SEQUENCER_495              0x31EF
+#define WM8995_WRITE_SEQUENCER_496              0x31F0
+#define WM8995_WRITE_SEQUENCER_497              0x31F1
+#define WM8995_WRITE_SEQUENCER_498              0x31F2
+#define WM8995_WRITE_SEQUENCER_499              0x31F3
+#define WM8995_WRITE_SEQUENCER_500              0x31F4
+#define WM8995_WRITE_SEQUENCER_501              0x31F5
+#define WM8995_WRITE_SEQUENCER_502              0x31F6
+#define WM8995_WRITE_SEQUENCER_503              0x31F7
+#define WM8995_WRITE_SEQUENCER_504              0x31F8
+#define WM8995_WRITE_SEQUENCER_505              0x31F9
+#define WM8995_WRITE_SEQUENCER_506              0x31FA
+#define WM8995_WRITE_SEQUENCER_507              0x31FB
+#define WM8995_WRITE_SEQUENCER_508              0x31FC
+#define WM8995_WRITE_SEQUENCER_509              0x31FD
+#define WM8995_WRITE_SEQUENCER_510              0x31FE
+#define WM8995_WRITE_SEQUENCER_511              0x31FF
+
+#define WM8995_REGISTER_COUNT                   725
+#define WM8995_MAX_REGISTER                     0x31FF
+
+#define WM8995_MAX_CACHED_REGISTER		WM8995_MAX_REGISTER
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Software Reset
+ */
+#define WM8995_SW_RESET_MASK                    0xFFFF	/* SW_RESET - [15:0] */
+#define WM8995_SW_RESET_SHIFT                        0	/* SW_RESET - [15:0] */
+#define WM8995_SW_RESET_WIDTH                       16	/* SW_RESET - [15:0] */
+
+/*
+ * R1 (0x01) - Power Management (1)
+ */
+#define WM8995_MICB2_ENA                        0x0200	/* MICB2_ENA */
+#define WM8995_MICB2_ENA_MASK                   0x0200	/* MICB2_ENA */
+#define WM8995_MICB2_ENA_SHIFT                       9	/* MICB2_ENA */
+#define WM8995_MICB2_ENA_WIDTH                       1	/* MICB2_ENA */
+#define WM8995_MICB1_ENA                        0x0100	/* MICB1_ENA */
+#define WM8995_MICB1_ENA_MASK                   0x0100	/* MICB1_ENA */
+#define WM8995_MICB1_ENA_SHIFT                       8	/* MICB1_ENA */
+#define WM8995_MICB1_ENA_WIDTH                       1	/* MICB1_ENA */
+#define WM8995_HPOUT2L_ENA                      0x0080	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2L_ENA_MASK                 0x0080	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2L_ENA_SHIFT                     7	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2L_ENA_WIDTH                     1	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2R_ENA                      0x0040	/* HPOUT2R_ENA */
+#define WM8995_HPOUT2R_ENA_MASK                 0x0040	/* HPOUT2R_ENA */
+#define WM8995_HPOUT2R_ENA_SHIFT                     6	/* HPOUT2R_ENA */
+#define WM8995_HPOUT2R_ENA_WIDTH                     1	/* HPOUT2R_ENA */
+#define WM8995_HPOUT1L_ENA                      0x0020	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1L_ENA_MASK                 0x0020	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1L_ENA_SHIFT                     5	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1L_ENA_WIDTH                     1	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1R_ENA                      0x0010	/* HPOUT1R_ENA */
+#define WM8995_HPOUT1R_ENA_MASK                 0x0010	/* HPOUT1R_ENA */
+#define WM8995_HPOUT1R_ENA_SHIFT                     4	/* HPOUT1R_ENA */
+#define WM8995_HPOUT1R_ENA_WIDTH                     1	/* HPOUT1R_ENA */
+#define WM8995_BG_ENA                           0x0001	/* BG_ENA */
+#define WM8995_BG_ENA_MASK                      0x0001	/* BG_ENA */
+#define WM8995_BG_ENA_SHIFT                          0	/* BG_ENA */
+#define WM8995_BG_ENA_WIDTH                          1	/* BG_ENA */
+
+/*
+ * R2 (0x02) - Power Management (2)
+ */
+#define WM8995_OPCLK_ENA                        0x0800	/* OPCLK_ENA */
+#define WM8995_OPCLK_ENA_MASK                   0x0800	/* OPCLK_ENA */
+#define WM8995_OPCLK_ENA_SHIFT                      11	/* OPCLK_ENA */
+#define WM8995_OPCLK_ENA_WIDTH                       1	/* OPCLK_ENA */
+#define WM8995_IN1L_ENA                         0x0020	/* IN1L_ENA */
+#define WM8995_IN1L_ENA_MASK                    0x0020	/* IN1L_ENA */
+#define WM8995_IN1L_ENA_SHIFT                        5	/* IN1L_ENA */
+#define WM8995_IN1L_ENA_WIDTH                        1	/* IN1L_ENA */
+#define WM8995_IN1R_ENA                         0x0010	/* IN1R_ENA */
+#define WM8995_IN1R_ENA_MASK                    0x0010	/* IN1R_ENA */
+#define WM8995_IN1R_ENA_SHIFT                        4	/* IN1R_ENA */
+#define WM8995_IN1R_ENA_WIDTH                        1	/* IN1R_ENA */
+#define WM8995_LDO2_ENA                         0x0002	/* LDO2_ENA */
+#define WM8995_LDO2_ENA_MASK                    0x0002	/* LDO2_ENA */
+#define WM8995_LDO2_ENA_SHIFT                        1	/* LDO2_ENA */
+#define WM8995_LDO2_ENA_WIDTH                        1	/* LDO2_ENA */
+
+/*
+ * R3 (0x03) - Power Management (3)
+ */
+#define WM8995_AIF2ADCL_ENA                     0x2000	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCL_ENA_MASK                0x2000	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCL_ENA_SHIFT                   13	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCL_ENA_WIDTH                    1	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCR_ENA                     0x1000	/* AIF2ADCR_ENA */
+#define WM8995_AIF2ADCR_ENA_MASK                0x1000	/* AIF2ADCR_ENA */
+#define WM8995_AIF2ADCR_ENA_SHIFT                   12	/* AIF2ADCR_ENA */
+#define WM8995_AIF2ADCR_ENA_WIDTH                    1	/* AIF2ADCR_ENA */
+#define WM8995_AIF1ADC2L_ENA                    0x0800	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2L_ENA_MASK               0x0800	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2L_ENA_SHIFT                  11	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2L_ENA_WIDTH                   1	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2R_ENA                    0x0400	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC2R_ENA_MASK               0x0400	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC2R_ENA_SHIFT                  10	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC2R_ENA_WIDTH                   1	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC1L_ENA                    0x0200	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1L_ENA_MASK               0x0200	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1L_ENA_SHIFT                   9	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1L_ENA_WIDTH                   1	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1R_ENA                    0x0100	/* AIF1ADC1R_ENA */
+#define WM8995_AIF1ADC1R_ENA_MASK               0x0100	/* AIF1ADC1R_ENA */
+#define WM8995_AIF1ADC1R_ENA_SHIFT                   8	/* AIF1ADC1R_ENA */
+#define WM8995_AIF1ADC1R_ENA_WIDTH                   1	/* AIF1ADC1R_ENA */
+#define WM8995_DMIC3L_ENA                       0x0080	/* DMIC3L_ENA */
+#define WM8995_DMIC3L_ENA_MASK                  0x0080	/* DMIC3L_ENA */
+#define WM8995_DMIC3L_ENA_SHIFT                      7	/* DMIC3L_ENA */
+#define WM8995_DMIC3L_ENA_WIDTH                      1	/* DMIC3L_ENA */
+#define WM8995_DMIC3R_ENA                       0x0040	/* DMIC3R_ENA */
+#define WM8995_DMIC3R_ENA_MASK                  0x0040	/* DMIC3R_ENA */
+#define WM8995_DMIC3R_ENA_SHIFT                      6	/* DMIC3R_ENA */
+#define WM8995_DMIC3R_ENA_WIDTH                      1	/* DMIC3R_ENA */
+#define WM8995_DMIC2L_ENA                       0x0020	/* DMIC2L_ENA */
+#define WM8995_DMIC2L_ENA_MASK                  0x0020	/* DMIC2L_ENA */
+#define WM8995_DMIC2L_ENA_SHIFT                      5	/* DMIC2L_ENA */
+#define WM8995_DMIC2L_ENA_WIDTH                      1	/* DMIC2L_ENA */
+#define WM8995_DMIC2R_ENA                       0x0010	/* DMIC2R_ENA */
+#define WM8995_DMIC2R_ENA_MASK                  0x0010	/* DMIC2R_ENA */
+#define WM8995_DMIC2R_ENA_SHIFT                      4	/* DMIC2R_ENA */
+#define WM8995_DMIC2R_ENA_WIDTH                      1	/* DMIC2R_ENA */
+#define WM8995_DMIC1L_ENA                       0x0008	/* DMIC1L_ENA */
+#define WM8995_DMIC1L_ENA_MASK                  0x0008	/* DMIC1L_ENA */
+#define WM8995_DMIC1L_ENA_SHIFT                      3	/* DMIC1L_ENA */
+#define WM8995_DMIC1L_ENA_WIDTH                      1	/* DMIC1L_ENA */
+#define WM8995_DMIC1R_ENA                       0x0004	/* DMIC1R_ENA */
+#define WM8995_DMIC1R_ENA_MASK                  0x0004	/* DMIC1R_ENA */
+#define WM8995_DMIC1R_ENA_SHIFT                      2	/* DMIC1R_ENA */
+#define WM8995_DMIC1R_ENA_WIDTH                      1	/* DMIC1R_ENA */
+#define WM8995_ADCL_ENA                         0x0002	/* ADCL_ENA */
+#define WM8995_ADCL_ENA_MASK                    0x0002	/* ADCL_ENA */
+#define WM8995_ADCL_ENA_SHIFT                        1	/* ADCL_ENA */
+#define WM8995_ADCL_ENA_WIDTH                        1	/* ADCL_ENA */
+#define WM8995_ADCR_ENA                         0x0001	/* ADCR_ENA */
+#define WM8995_ADCR_ENA_MASK                    0x0001	/* ADCR_ENA */
+#define WM8995_ADCR_ENA_SHIFT                        0	/* ADCR_ENA */
+#define WM8995_ADCR_ENA_WIDTH                        1	/* ADCR_ENA */
+
+/*
+ * R4 (0x04) - Power Management (4)
+ */
+#define WM8995_AIF2DACL_ENA                     0x2000	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACL_ENA_MASK                0x2000	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACL_ENA_SHIFT                   13	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACL_ENA_WIDTH                    1	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACR_ENA                     0x1000	/* AIF2DACR_ENA */
+#define WM8995_AIF2DACR_ENA_MASK                0x1000	/* AIF2DACR_ENA */
+#define WM8995_AIF2DACR_ENA_SHIFT                   12	/* AIF2DACR_ENA */
+#define WM8995_AIF2DACR_ENA_WIDTH                    1	/* AIF2DACR_ENA */
+#define WM8995_AIF1DAC2L_ENA                    0x0800	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2L_ENA_MASK               0x0800	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2L_ENA_SHIFT                  11	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2L_ENA_WIDTH                   1	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2R_ENA                    0x0400	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC2R_ENA_MASK               0x0400	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC2R_ENA_SHIFT                  10	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC2R_ENA_WIDTH                   1	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC1L_ENA                    0x0200	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1L_ENA_MASK               0x0200	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1L_ENA_SHIFT                   9	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1L_ENA_WIDTH                   1	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1R_ENA                    0x0100	/* AIF1DAC1R_ENA */
+#define WM8995_AIF1DAC1R_ENA_MASK               0x0100	/* AIF1DAC1R_ENA */
+#define WM8995_AIF1DAC1R_ENA_SHIFT                   8	/* AIF1DAC1R_ENA */
+#define WM8995_AIF1DAC1R_ENA_WIDTH                   1	/* AIF1DAC1R_ENA */
+#define WM8995_DAC2L_ENA                        0x0008	/* DAC2L_ENA */
+#define WM8995_DAC2L_ENA_MASK                   0x0008	/* DAC2L_ENA */
+#define WM8995_DAC2L_ENA_SHIFT                       3	/* DAC2L_ENA */
+#define WM8995_DAC2L_ENA_WIDTH                       1	/* DAC2L_ENA */
+#define WM8995_DAC2R_ENA                        0x0004	/* DAC2R_ENA */
+#define WM8995_DAC2R_ENA_MASK                   0x0004	/* DAC2R_ENA */
+#define WM8995_DAC2R_ENA_SHIFT                       2	/* DAC2R_ENA */
+#define WM8995_DAC2R_ENA_WIDTH                       1	/* DAC2R_ENA */
+#define WM8995_DAC1L_ENA                        0x0002	/* DAC1L_ENA */
+#define WM8995_DAC1L_ENA_MASK                   0x0002	/* DAC1L_ENA */
+#define WM8995_DAC1L_ENA_SHIFT                       1	/* DAC1L_ENA */
+#define WM8995_DAC1L_ENA_WIDTH                       1	/* DAC1L_ENA */
+#define WM8995_DAC1R_ENA                        0x0001	/* DAC1R_ENA */
+#define WM8995_DAC1R_ENA_MASK                   0x0001	/* DAC1R_ENA */
+#define WM8995_DAC1R_ENA_SHIFT                       0	/* DAC1R_ENA */
+#define WM8995_DAC1R_ENA_WIDTH                       1	/* DAC1R_ENA */
+
+/*
+ * R5 (0x05) - Power Management (5)
+ */
+#define WM8995_DMIC_SRC2_MASK                   0x0300	/* DMIC_SRC2 - [9:8] */
+#define WM8995_DMIC_SRC2_SHIFT                       8	/* DMIC_SRC2 - [9:8] */
+#define WM8995_DMIC_SRC2_WIDTH                       2	/* DMIC_SRC2 - [9:8] */
+#define WM8995_DMIC_SRC1_MASK                   0x00C0	/* DMIC_SRC1 - [7:6] */
+#define WM8995_DMIC_SRC1_SHIFT                       6	/* DMIC_SRC1 - [7:6] */
+#define WM8995_DMIC_SRC1_WIDTH                       2	/* DMIC_SRC1 - [7:6] */
+#define WM8995_AIF3_TRI                         0x0020	/* AIF3_TRI */
+#define WM8995_AIF3_TRI_MASK                    0x0020	/* AIF3_TRI */
+#define WM8995_AIF3_TRI_SHIFT                        5	/* AIF3_TRI */
+#define WM8995_AIF3_TRI_WIDTH                        1	/* AIF3_TRI */
+#define WM8995_AIF3_ADCDAT_SRC_MASK             0x0018	/* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8995_AIF3_ADCDAT_SRC_SHIFT                 3	/* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8995_AIF3_ADCDAT_SRC_WIDTH                 2	/* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8995_AIF2_ADCDAT_SRC                  0x0004	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_ADCDAT_SRC_MASK             0x0004	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_ADCDAT_SRC_SHIFT                 2	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_ADCDAT_SRC_WIDTH                 1	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC                  0x0002	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC_MASK             0x0002	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC_SHIFT                 1	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC_WIDTH                 1	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC                  0x0001	/* AIF1_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC_MASK             0x0001	/* AIF1_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC_SHIFT                 0	/* AIF1_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC_WIDTH                 1	/* AIF1_DACDAT_SRC */
+
+/*
+ * R16 (0x10) - Left Line Input 1 Volume
+ */
+#define WM8995_IN1_VU                           0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_MASK                      0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_SHIFT                          7	/* IN1_VU */
+#define WM8995_IN1_VU_WIDTH                          1	/* IN1_VU */
+#define WM8995_IN1L_ZC                          0x0020	/* IN1L_ZC */
+#define WM8995_IN1L_ZC_MASK                     0x0020	/* IN1L_ZC */
+#define WM8995_IN1L_ZC_SHIFT                         5	/* IN1L_ZC */
+#define WM8995_IN1L_ZC_WIDTH                         1	/* IN1L_ZC */
+#define WM8995_IN1L_VOL_MASK                    0x001F	/* IN1L_VOL - [4:0] */
+#define WM8995_IN1L_VOL_SHIFT                        0	/* IN1L_VOL - [4:0] */
+#define WM8995_IN1L_VOL_WIDTH                        5	/* IN1L_VOL - [4:0] */
+
+/*
+ * R17 (0x11) - Right Line Input 1 Volume
+ */
+#define WM8995_IN1_VU                           0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_MASK                      0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_SHIFT                          7	/* IN1_VU */
+#define WM8995_IN1_VU_WIDTH                          1	/* IN1_VU */
+#define WM8995_IN1R_ZC                          0x0020	/* IN1R_ZC */
+#define WM8995_IN1R_ZC_MASK                     0x0020	/* IN1R_ZC */
+#define WM8995_IN1R_ZC_SHIFT                         5	/* IN1R_ZC */
+#define WM8995_IN1R_ZC_WIDTH                         1	/* IN1R_ZC */
+#define WM8995_IN1R_VOL_MASK                    0x001F	/* IN1R_VOL - [4:0] */
+#define WM8995_IN1R_VOL_SHIFT                        0	/* IN1R_VOL - [4:0] */
+#define WM8995_IN1R_VOL_WIDTH                        5	/* IN1R_VOL - [4:0] */
+
+/*
+ * R18 (0x12) - Left Line Input Control
+ */
+#define WM8995_IN1L_BOOST_MASK                  0x0030	/* IN1L_BOOST - [5:4] */
+#define WM8995_IN1L_BOOST_SHIFT                      4	/* IN1L_BOOST - [5:4] */
+#define WM8995_IN1L_BOOST_WIDTH                      2	/* IN1L_BOOST - [5:4] */
+#define WM8995_IN1L_MODE_MASK                   0x000C	/* IN1L_MODE - [3:2] */
+#define WM8995_IN1L_MODE_SHIFT                       2	/* IN1L_MODE - [3:2] */
+#define WM8995_IN1L_MODE_WIDTH                       2	/* IN1L_MODE - [3:2] */
+#define WM8995_IN1R_MODE_MASK                   0x0003	/* IN1R_MODE - [1:0] */
+#define WM8995_IN1R_MODE_SHIFT                       0	/* IN1R_MODE - [1:0] */
+#define WM8995_IN1R_MODE_WIDTH                       2	/* IN1R_MODE - [1:0] */
+
+/*
+ * R24 (0x18) - DAC1 Left Volume
+ */
+#define WM8995_DAC1L_MUTE                       0x0200	/* DAC1L_MUTE */
+#define WM8995_DAC1L_MUTE_MASK                  0x0200	/* DAC1L_MUTE */
+#define WM8995_DAC1L_MUTE_SHIFT                      9	/* DAC1L_MUTE */
+#define WM8995_DAC1L_MUTE_WIDTH                      1	/* DAC1L_MUTE */
+#define WM8995_DAC1_VU                          0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_MASK                     0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_SHIFT                         8	/* DAC1_VU */
+#define WM8995_DAC1_VU_WIDTH                         1	/* DAC1_VU */
+#define WM8995_DAC1L_VOL_MASK                   0x00FF	/* DAC1L_VOL - [7:0] */
+#define WM8995_DAC1L_VOL_SHIFT                       0	/* DAC1L_VOL - [7:0] */
+#define WM8995_DAC1L_VOL_WIDTH                       8	/* DAC1L_VOL - [7:0] */
+
+/*
+ * R25 (0x19) - DAC1 Right Volume
+ */
+#define WM8995_DAC1R_MUTE                       0x0200	/* DAC1R_MUTE */
+#define WM8995_DAC1R_MUTE_MASK                  0x0200	/* DAC1R_MUTE */
+#define WM8995_DAC1R_MUTE_SHIFT                      9	/* DAC1R_MUTE */
+#define WM8995_DAC1R_MUTE_WIDTH                      1	/* DAC1R_MUTE */
+#define WM8995_DAC1_VU                          0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_MASK                     0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_SHIFT                         8	/* DAC1_VU */
+#define WM8995_DAC1_VU_WIDTH                         1	/* DAC1_VU */
+#define WM8995_DAC1R_VOL_MASK                   0x00FF	/* DAC1R_VOL - [7:0] */
+#define WM8995_DAC1R_VOL_SHIFT                       0	/* DAC1R_VOL - [7:0] */
+#define WM8995_DAC1R_VOL_WIDTH                       8	/* DAC1R_VOL - [7:0] */
+
+/*
+ * R26 (0x1A) - DAC2 Left Volume
+ */
+#define WM8995_DAC2L_MUTE                       0x0200	/* DAC2L_MUTE */
+#define WM8995_DAC2L_MUTE_MASK                  0x0200	/* DAC2L_MUTE */
+#define WM8995_DAC2L_MUTE_SHIFT                      9	/* DAC2L_MUTE */
+#define WM8995_DAC2L_MUTE_WIDTH                      1	/* DAC2L_MUTE */
+#define WM8995_DAC2_VU                          0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_MASK                     0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_SHIFT                         8	/* DAC2_VU */
+#define WM8995_DAC2_VU_WIDTH                         1	/* DAC2_VU */
+#define WM8995_DAC2L_VOL_MASK                   0x00FF	/* DAC2L_VOL - [7:0] */
+#define WM8995_DAC2L_VOL_SHIFT                       0	/* DAC2L_VOL - [7:0] */
+#define WM8995_DAC2L_VOL_WIDTH                       8	/* DAC2L_VOL - [7:0] */
+
+/*
+ * R27 (0x1B) - DAC2 Right Volume
+ */
+#define WM8995_DAC2R_MUTE                       0x0200	/* DAC2R_MUTE */
+#define WM8995_DAC2R_MUTE_MASK                  0x0200	/* DAC2R_MUTE */
+#define WM8995_DAC2R_MUTE_SHIFT                      9	/* DAC2R_MUTE */
+#define WM8995_DAC2R_MUTE_WIDTH                      1	/* DAC2R_MUTE */
+#define WM8995_DAC2_VU                          0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_MASK                     0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_SHIFT                         8	/* DAC2_VU */
+#define WM8995_DAC2_VU_WIDTH                         1	/* DAC2_VU */
+#define WM8995_DAC2R_VOL_MASK                   0x00FF	/* DAC2R_VOL - [7:0] */
+#define WM8995_DAC2R_VOL_SHIFT                       0	/* DAC2R_VOL - [7:0] */
+#define WM8995_DAC2R_VOL_WIDTH                       8	/* DAC2R_VOL - [7:0] */
+
+/*
+ * R28 (0x1C) - Output Volume ZC (1)
+ */
+#define WM8995_HPOUT2L_ZC                       0x0008	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2L_ZC_MASK                  0x0008	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2L_ZC_SHIFT                      3	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2L_ZC_WIDTH                      1	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2R_ZC                       0x0004	/* HPOUT2R_ZC */
+#define WM8995_HPOUT2R_ZC_MASK                  0x0004	/* HPOUT2R_ZC */
+#define WM8995_HPOUT2R_ZC_SHIFT                      2	/* HPOUT2R_ZC */
+#define WM8995_HPOUT2R_ZC_WIDTH                      1	/* HPOUT2R_ZC */
+#define WM8995_HPOUT1L_ZC                       0x0002	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1L_ZC_MASK                  0x0002	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1L_ZC_SHIFT                      1	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1L_ZC_WIDTH                      1	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1R_ZC                       0x0001	/* HPOUT1R_ZC */
+#define WM8995_HPOUT1R_ZC_MASK                  0x0001	/* HPOUT1R_ZC */
+#define WM8995_HPOUT1R_ZC_SHIFT                      0	/* HPOUT1R_ZC */
+#define WM8995_HPOUT1R_ZC_WIDTH                      1	/* HPOUT1R_ZC */
+
+/*
+ * R32 (0x20) - MICBIAS (1)
+ */
+#define WM8995_MICB1_MODE                       0x0008	/* MICB1_MODE */
+#define WM8995_MICB1_MODE_MASK                  0x0008	/* MICB1_MODE */
+#define WM8995_MICB1_MODE_SHIFT                      3	/* MICB1_MODE */
+#define WM8995_MICB1_MODE_WIDTH                      1	/* MICB1_MODE */
+#define WM8995_MICB1_LVL_MASK                   0x0006	/* MICB1_LVL - [2:1] */
+#define WM8995_MICB1_LVL_SHIFT                       1	/* MICB1_LVL - [2:1] */
+#define WM8995_MICB1_LVL_WIDTH                       2	/* MICB1_LVL - [2:1] */
+#define WM8995_MICB1_DISCH                      0x0001	/* MICB1_DISCH */
+#define WM8995_MICB1_DISCH_MASK                 0x0001	/* MICB1_DISCH */
+#define WM8995_MICB1_DISCH_SHIFT                     0	/* MICB1_DISCH */
+#define WM8995_MICB1_DISCH_WIDTH                     1	/* MICB1_DISCH */
+
+/*
+ * R33 (0x21) - MICBIAS (2)
+ */
+#define WM8995_MICB2_MODE                       0x0008	/* MICB2_MODE */
+#define WM8995_MICB2_MODE_MASK                  0x0008	/* MICB2_MODE */
+#define WM8995_MICB2_MODE_SHIFT                      3	/* MICB2_MODE */
+#define WM8995_MICB2_MODE_WIDTH                      1	/* MICB2_MODE */
+#define WM8995_MICB2_LVL_MASK                   0x0006	/* MICB2_LVL - [2:1] */
+#define WM8995_MICB2_LVL_SHIFT                       1	/* MICB2_LVL - [2:1] */
+#define WM8995_MICB2_LVL_WIDTH                       2	/* MICB2_LVL - [2:1] */
+#define WM8995_MICB2_DISCH                      0x0001	/* MICB2_DISCH */
+#define WM8995_MICB2_DISCH_MASK                 0x0001	/* MICB2_DISCH */
+#define WM8995_MICB2_DISCH_SHIFT                     0	/* MICB2_DISCH */
+#define WM8995_MICB2_DISCH_WIDTH                     1	/* MICB2_DISCH */
+
+/*
+ * R40 (0x28) - LDO 1
+ */
+#define WM8995_LDO1_MODE                        0x0020	/* LDO1_MODE */
+#define WM8995_LDO1_MODE_MASK                   0x0020	/* LDO1_MODE */
+#define WM8995_LDO1_MODE_SHIFT                       5	/* LDO1_MODE */
+#define WM8995_LDO1_MODE_WIDTH                       1	/* LDO1_MODE */
+#define WM8995_LDO1_VSEL_MASK                   0x0006	/* LDO1_VSEL - [2:1] */
+#define WM8995_LDO1_VSEL_SHIFT                       1	/* LDO1_VSEL - [2:1] */
+#define WM8995_LDO1_VSEL_WIDTH                       2	/* LDO1_VSEL - [2:1] */
+#define WM8995_LDO1_DISCH                       0x0001	/* LDO1_DISCH */
+#define WM8995_LDO1_DISCH_MASK                  0x0001	/* LDO1_DISCH */
+#define WM8995_LDO1_DISCH_SHIFT                      0	/* LDO1_DISCH */
+#define WM8995_LDO1_DISCH_WIDTH                      1	/* LDO1_DISCH */
+
+/*
+ * R41 (0x29) - LDO 2
+ */
+#define WM8995_LDO2_MODE                        0x0020	/* LDO2_MODE */
+#define WM8995_LDO2_MODE_MASK                   0x0020	/* LDO2_MODE */
+#define WM8995_LDO2_MODE_SHIFT                       5	/* LDO2_MODE */
+#define WM8995_LDO2_MODE_WIDTH                       1	/* LDO2_MODE */
+#define WM8995_LDO2_VSEL_MASK                   0x001E	/* LDO2_VSEL - [4:1] */
+#define WM8995_LDO2_VSEL_SHIFT                       1	/* LDO2_VSEL - [4:1] */
+#define WM8995_LDO2_VSEL_WIDTH                       4	/* LDO2_VSEL - [4:1] */
+#define WM8995_LDO2_DISCH                       0x0001	/* LDO2_DISCH */
+#define WM8995_LDO2_DISCH_MASK                  0x0001	/* LDO2_DISCH */
+#define WM8995_LDO2_DISCH_SHIFT                      0	/* LDO2_DISCH */
+#define WM8995_LDO2_DISCH_WIDTH                      1	/* LDO2_DISCH */
+
+/*
+ * R48 (0x30) - Accessory Detect Mode1
+ */
+#define WM8995_JD_MODE_MASK                     0x0003	/* JD_MODE - [1:0] */
+#define WM8995_JD_MODE_SHIFT                         0	/* JD_MODE - [1:0] */
+#define WM8995_JD_MODE_WIDTH                         2	/* JD_MODE - [1:0] */
+
+/*
+ * R49 (0x31) - Accessory Detect Mode2
+ */
+#define WM8995_VID_ENA                          0x0001	/* VID_ENA */
+#define WM8995_VID_ENA_MASK                     0x0001	/* VID_ENA */
+#define WM8995_VID_ENA_SHIFT                         0	/* VID_ENA */
+#define WM8995_VID_ENA_WIDTH                         1	/* VID_ENA */
+
+/*
+ * R52 (0x34) - Headphone Detect1
+ */
+#define WM8995_HP_RAMPRATE                      0x0002	/* HP_RAMPRATE */
+#define WM8995_HP_RAMPRATE_MASK                 0x0002	/* HP_RAMPRATE */
+#define WM8995_HP_RAMPRATE_SHIFT                     1	/* HP_RAMPRATE */
+#define WM8995_HP_RAMPRATE_WIDTH                     1	/* HP_RAMPRATE */
+#define WM8995_HP_POLL                          0x0001	/* HP_POLL */
+#define WM8995_HP_POLL_MASK                     0x0001	/* HP_POLL */
+#define WM8995_HP_POLL_SHIFT                         0	/* HP_POLL */
+#define WM8995_HP_POLL_WIDTH                         1	/* HP_POLL */
+
+/*
+ * R53 (0x35) - Headphone Detect2
+ */
+#define WM8995_HP_DONE                          0x0080	/* HP_DONE */
+#define WM8995_HP_DONE_MASK                     0x0080	/* HP_DONE */
+#define WM8995_HP_DONE_SHIFT                         7	/* HP_DONE */
+#define WM8995_HP_DONE_WIDTH                         1	/* HP_DONE */
+#define WM8995_HP_LVL_MASK                      0x007F	/* HP_LVL - [6:0] */
+#define WM8995_HP_LVL_SHIFT                          0	/* HP_LVL - [6:0] */
+#define WM8995_HP_LVL_WIDTH                          7	/* HP_LVL - [6:0] */
+
+/*
+ * R56 (0x38) - Mic Detect (1)
+ */
+#define WM8995_MICD_RATE_MASK                   0x7800	/* MICD_RATE - [14:11] */
+#define WM8995_MICD_RATE_SHIFT                      11	/* MICD_RATE - [14:11] */
+#define WM8995_MICD_RATE_WIDTH                       4	/* MICD_RATE - [14:11] */
+#define WM8995_MICD_LVL_SEL_MASK                0x01F8	/* MICD_LVL_SEL - [8:3] */
+#define WM8995_MICD_LVL_SEL_SHIFT                    3	/* MICD_LVL_SEL - [8:3] */
+#define WM8995_MICD_LVL_SEL_WIDTH                    6	/* MICD_LVL_SEL - [8:3] */
+#define WM8995_MICD_DBTIME                      0x0002	/* MICD_DBTIME */
+#define WM8995_MICD_DBTIME_MASK                 0x0002	/* MICD_DBTIME */
+#define WM8995_MICD_DBTIME_SHIFT                     1	/* MICD_DBTIME */
+#define WM8995_MICD_DBTIME_WIDTH                     1	/* MICD_DBTIME */
+#define WM8995_MICD_ENA                         0x0001	/* MICD_ENA */
+#define WM8995_MICD_ENA_MASK                    0x0001	/* MICD_ENA */
+#define WM8995_MICD_ENA_SHIFT                        0	/* MICD_ENA */
+#define WM8995_MICD_ENA_WIDTH                        1	/* MICD_ENA */
+
+/*
+ * R57 (0x39) - Mic Detect (2)
+ */
+#define WM8995_MICD_LVL_MASK                    0x01FC	/* MICD_LVL - [8:2] */
+#define WM8995_MICD_LVL_SHIFT                        2	/* MICD_LVL - [8:2] */
+#define WM8995_MICD_LVL_WIDTH                        7	/* MICD_LVL - [8:2] */
+#define WM8995_MICD_VALID                       0x0002	/* MICD_VALID */
+#define WM8995_MICD_VALID_MASK                  0x0002	/* MICD_VALID */
+#define WM8995_MICD_VALID_SHIFT                      1	/* MICD_VALID */
+#define WM8995_MICD_VALID_WIDTH                      1	/* MICD_VALID */
+#define WM8995_MICD_STS                         0x0001	/* MICD_STS */
+#define WM8995_MICD_STS_MASK                    0x0001	/* MICD_STS */
+#define WM8995_MICD_STS_SHIFT                        0	/* MICD_STS */
+#define WM8995_MICD_STS_WIDTH                        1	/* MICD_STS */
+
+/*
+ * R64 (0x40) - Charge Pump (1)
+ */
+#define WM8995_CP_ENA                           0x8000	/* CP_ENA */
+#define WM8995_CP_ENA_MASK                      0x8000	/* CP_ENA */
+#define WM8995_CP_ENA_SHIFT                         15	/* CP_ENA */
+#define WM8995_CP_ENA_WIDTH                          1	/* CP_ENA */
+
+/*
+ * R69 (0x45) - Class W (1)
+ */
+#define WM8995_CP_DYN_SRC_SEL_MASK              0x0300	/* CP_DYN_SRC_SEL - [9:8] */
+#define WM8995_CP_DYN_SRC_SEL_SHIFT                  8	/* CP_DYN_SRC_SEL - [9:8] */
+#define WM8995_CP_DYN_SRC_SEL_WIDTH                  2	/* CP_DYN_SRC_SEL - [9:8] */
+#define WM8995_CP_DYN_PWR                       0x0001	/* CP_DYN_PWR */
+#define WM8995_CP_DYN_PWR_MASK                  0x0001	/* CP_DYN_PWR */
+#define WM8995_CP_DYN_PWR_SHIFT                      0	/* CP_DYN_PWR */
+#define WM8995_CP_DYN_PWR_WIDTH                      1	/* CP_DYN_PWR */
+
+/*
+ * R80 (0x50) - DC Servo (1)
+ */
+#define WM8995_DCS_ENA_CHAN_3                   0x0008	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_3_MASK              0x0008	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_3_SHIFT                  3	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_3_WIDTH                  1	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_2                   0x0004	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_2_MASK              0x0004	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_2_SHIFT                  2	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_2_WIDTH                  1	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_1                   0x0002	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_1_MASK              0x0002	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_1_SHIFT                  1	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_1_WIDTH                  1	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_0                   0x0001	/* DCS_ENA_CHAN_0 */
+#define WM8995_DCS_ENA_CHAN_0_MASK              0x0001	/* DCS_ENA_CHAN_0 */
+#define WM8995_DCS_ENA_CHAN_0_SHIFT                  0	/* DCS_ENA_CHAN_0 */
+#define WM8995_DCS_ENA_CHAN_0_WIDTH                  1	/* DCS_ENA_CHAN_0 */
+
+/*
+ * R81 (0x51) - DC Servo (2)
+ */
+#define WM8995_DCS_TRIG_SINGLE_3                0x8000	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_3_MASK           0x8000	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_3_SHIFT              15	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_3_WIDTH               1	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_2                0x4000	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_2_MASK           0x4000	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_2_SHIFT              14	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_2_WIDTH               1	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_1                0x2000	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_1_MASK           0x2000	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_1_SHIFT              13	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_1_WIDTH               1	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_0                0x1000	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SINGLE_0_MASK           0x1000	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SINGLE_0_SHIFT              12	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SINGLE_0_WIDTH               1	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SERIES_3                0x0800	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_3_MASK           0x0800	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_3_SHIFT              11	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_3_WIDTH               1	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_2                0x0400	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_2_MASK           0x0400	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_2_SHIFT              10	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_2_WIDTH               1	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_1                0x0200	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_1_MASK           0x0200	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_1_SHIFT               9	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_1_WIDTH               1	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_0                0x0100	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_SERIES_0_MASK           0x0100	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_SERIES_0_SHIFT               8	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_SERIES_0_WIDTH               1	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_STARTUP_3               0x0080	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_3_MASK          0x0080	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_3_SHIFT              7	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_3_WIDTH              1	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_2               0x0040	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_2_MASK          0x0040	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_2_SHIFT              6	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_2_WIDTH              1	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_1               0x0020	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_1_MASK          0x0020	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_1_SHIFT              5	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_1_WIDTH              1	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_0               0x0010	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_STARTUP_0_MASK          0x0010	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_STARTUP_0_SHIFT              4	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_STARTUP_0_WIDTH              1	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_DAC_WR_3                0x0008	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_3_MASK           0x0008	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_3_SHIFT               3	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_3_WIDTH               1	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_2                0x0004	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_2_MASK           0x0004	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_2_SHIFT               2	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_2_WIDTH               1	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_1                0x0002	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_1_MASK           0x0002	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_1_SHIFT               1	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_1_WIDTH               1	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_0                0x0001	/* DCS_TRIG_DAC_WR_0 */
+#define WM8995_DCS_TRIG_DAC_WR_0_MASK           0x0001	/* DCS_TRIG_DAC_WR_0 */
+#define WM8995_DCS_TRIG_DAC_WR_0_SHIFT               0	/* DCS_TRIG_DAC_WR_0 */
+#define WM8995_DCS_TRIG_DAC_WR_0_WIDTH               1	/* DCS_TRIG_DAC_WR_0 */
+
+/*
+ * R82 (0x52) - DC Servo (3)
+ */
+#define WM8995_DCS_TIMER_PERIOD_23_MASK         0x0F00	/* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8995_DCS_TIMER_PERIOD_23_SHIFT             8	/* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8995_DCS_TIMER_PERIOD_23_WIDTH             4	/* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8995_DCS_TIMER_PERIOD_01_MASK         0x000F	/* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8995_DCS_TIMER_PERIOD_01_SHIFT             0	/* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8995_DCS_TIMER_PERIOD_01_WIDTH             4	/* DCS_TIMER_PERIOD_01 - [3:0] */
+
+/*
+ * R84 (0x54) - DC Servo (5)
+ */
+#define WM8995_DCS_SERIES_NO_23_MASK            0x7F00	/* DCS_SERIES_NO_23 - [14:8] */
+#define WM8995_DCS_SERIES_NO_23_SHIFT                8	/* DCS_SERIES_NO_23 - [14:8] */
+#define WM8995_DCS_SERIES_NO_23_WIDTH                7	/* DCS_SERIES_NO_23 - [14:8] */
+#define WM8995_DCS_SERIES_NO_01_MASK            0x007F	/* DCS_SERIES_NO_01 - [6:0] */
+#define WM8995_DCS_SERIES_NO_01_SHIFT                0	/* DCS_SERIES_NO_01 - [6:0] */
+#define WM8995_DCS_SERIES_NO_01_WIDTH                7	/* DCS_SERIES_NO_01 - [6:0] */
+
+/*
+ * R85 (0x55) - DC Servo (6)
+ */
+#define WM8995_DCS_DAC_WR_VAL_3_MASK            0xFF00	/* DCS_DAC_WR_VAL_3 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_3_SHIFT                8	/* DCS_DAC_WR_VAL_3 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_3_WIDTH                8	/* DCS_DAC_WR_VAL_3 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_2_MASK            0x00FF	/* DCS_DAC_WR_VAL_2 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_2_SHIFT                0	/* DCS_DAC_WR_VAL_2 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_2_WIDTH                8	/* DCS_DAC_WR_VAL_2 - [7:0] */
+
+/*
+ * R86 (0x56) - DC Servo (7)
+ */
+#define WM8995_DCS_DAC_WR_VAL_1_MASK            0xFF00	/* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_1_SHIFT                8	/* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_1_WIDTH                8	/* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_0_MASK            0x00FF	/* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_0_SHIFT                0	/* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_0_WIDTH                8	/* DCS_DAC_WR_VAL_0 - [7:0] */
+
+/*
+ * R87 (0x57) - DC Servo Readback 0
+ */
+#define WM8995_DCS_CAL_COMPLETE_MASK            0x0F00	/* DCS_CAL_COMPLETE - [11:8] */
+#define WM8995_DCS_CAL_COMPLETE_SHIFT                8	/* DCS_CAL_COMPLETE - [11:8] */
+#define WM8995_DCS_CAL_COMPLETE_WIDTH                4	/* DCS_CAL_COMPLETE - [11:8] */
+#define WM8995_DCS_DAC_WR_COMPLETE_MASK         0x00F0	/* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8995_DCS_DAC_WR_COMPLETE_SHIFT             4	/* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8995_DCS_DAC_WR_COMPLETE_WIDTH             4	/* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8995_DCS_STARTUP_COMPLETE_MASK        0x000F	/* DCS_STARTUP_COMPLETE - [3:0] */
+#define WM8995_DCS_STARTUP_COMPLETE_SHIFT            0	/* DCS_STARTUP_COMPLETE - [3:0] */
+#define WM8995_DCS_STARTUP_COMPLETE_WIDTH            4	/* DCS_STARTUP_COMPLETE - [3:0] */
+
+/*
+ * R96 (0x60) - Analogue HP (1)
+ */
+#define WM8995_HPOUT1L_RMV_SHORT                0x0080	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_RMV_SHORT_MASK           0x0080	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_RMV_SHORT_SHIFT               7	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_RMV_SHORT_WIDTH               1	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_OUTP                     0x0040	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_OUTP_MASK                0x0040	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_OUTP_SHIFT                    6	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_OUTP_WIDTH                    1	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_DLY                      0x0020	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1L_DLY_MASK                 0x0020	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1L_DLY_SHIFT                     5	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1L_DLY_WIDTH                     1	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1R_RMV_SHORT                0x0008	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_RMV_SHORT_MASK           0x0008	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_RMV_SHORT_SHIFT               3	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_RMV_SHORT_WIDTH               1	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_OUTP                     0x0004	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_OUTP_MASK                0x0004	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_OUTP_SHIFT                    2	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_OUTP_WIDTH                    1	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_DLY                      0x0002	/* HPOUT1R_DLY */
+#define WM8995_HPOUT1R_DLY_MASK                 0x0002	/* HPOUT1R_DLY */
+#define WM8995_HPOUT1R_DLY_SHIFT                     1	/* HPOUT1R_DLY */
+#define WM8995_HPOUT1R_DLY_WIDTH                     1	/* HPOUT1R_DLY */
+
+/*
+ * R97 (0x61) - Analogue HP (2)
+ */
+#define WM8995_HPOUT2L_RMV_SHORT                0x0080	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_RMV_SHORT_MASK           0x0080	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_RMV_SHORT_SHIFT               7	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_RMV_SHORT_WIDTH               1	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_OUTP                     0x0040	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_OUTP_MASK                0x0040	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_OUTP_SHIFT                    6	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_OUTP_WIDTH                    1	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_DLY                      0x0020	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2L_DLY_MASK                 0x0020	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2L_DLY_SHIFT                     5	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2L_DLY_WIDTH                     1	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2R_RMV_SHORT                0x0008	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_RMV_SHORT_MASK           0x0008	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_RMV_SHORT_SHIFT               3	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_RMV_SHORT_WIDTH               1	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_OUTP                     0x0004	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_OUTP_MASK                0x0004	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_OUTP_SHIFT                    2	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_OUTP_WIDTH                    1	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_DLY                      0x0002	/* HPOUT2R_DLY */
+#define WM8995_HPOUT2R_DLY_MASK                 0x0002	/* HPOUT2R_DLY */
+#define WM8995_HPOUT2R_DLY_SHIFT                     1	/* HPOUT2R_DLY */
+#define WM8995_HPOUT2R_DLY_WIDTH                     1	/* HPOUT2R_DLY */
+
+/*
+ * R256 (0x100) - Chip Revision
+ */
+#define WM8995_CHIP_REV_MASK                    0x000F	/* CHIP_REV - [3:0] */
+#define WM8995_CHIP_REV_SHIFT                        0	/* CHIP_REV - [3:0] */
+#define WM8995_CHIP_REV_WIDTH                        4	/* CHIP_REV - [3:0] */
+
+/*
+ * R257 (0x101) - Control Interface (1)
+ */
+#define WM8995_REG_SYNC                         0x8000	/* REG_SYNC */
+#define WM8995_REG_SYNC_MASK                    0x8000	/* REG_SYNC */
+#define WM8995_REG_SYNC_SHIFT                       15	/* REG_SYNC */
+#define WM8995_REG_SYNC_WIDTH                        1	/* REG_SYNC */
+#define WM8995_SPI_CONTRD                       0x0040	/* SPI_CONTRD */
+#define WM8995_SPI_CONTRD_MASK                  0x0040	/* SPI_CONTRD */
+#define WM8995_SPI_CONTRD_SHIFT                      6	/* SPI_CONTRD */
+#define WM8995_SPI_CONTRD_WIDTH                      1	/* SPI_CONTRD */
+#define WM8995_SPI_4WIRE                        0x0020	/* SPI_4WIRE */
+#define WM8995_SPI_4WIRE_MASK                   0x0020	/* SPI_4WIRE */
+#define WM8995_SPI_4WIRE_SHIFT                       5	/* SPI_4WIRE */
+#define WM8995_SPI_4WIRE_WIDTH                       1	/* SPI_4WIRE */
+#define WM8995_SPI_CFG                          0x0010	/* SPI_CFG */
+#define WM8995_SPI_CFG_MASK                     0x0010	/* SPI_CFG */
+#define WM8995_SPI_CFG_SHIFT                         4	/* SPI_CFG */
+#define WM8995_SPI_CFG_WIDTH                         1	/* SPI_CFG */
+#define WM8995_AUTO_INC                         0x0004	/* AUTO_INC */
+#define WM8995_AUTO_INC_MASK                    0x0004	/* AUTO_INC */
+#define WM8995_AUTO_INC_SHIFT                        2	/* AUTO_INC */
+#define WM8995_AUTO_INC_WIDTH                        1	/* AUTO_INC */
+
+/*
+ * R258 (0x102) - Control Interface (2)
+ */
+#define WM8995_CTRL_IF_SRC                      0x0001	/* CTRL_IF_SRC */
+#define WM8995_CTRL_IF_SRC_MASK                 0x0001	/* CTRL_IF_SRC */
+#define WM8995_CTRL_IF_SRC_SHIFT                     0	/* CTRL_IF_SRC */
+#define WM8995_CTRL_IF_SRC_WIDTH                     1	/* CTRL_IF_SRC */
+
+/*
+ * R272 (0x110) - Write Sequencer Ctrl (1)
+ */
+#define WM8995_WSEQ_ENA                         0x8000	/* WSEQ_ENA */
+#define WM8995_WSEQ_ENA_MASK                    0x8000	/* WSEQ_ENA */
+#define WM8995_WSEQ_ENA_SHIFT                       15	/* WSEQ_ENA */
+#define WM8995_WSEQ_ENA_WIDTH                        1	/* WSEQ_ENA */
+#define WM8995_WSEQ_ABORT                       0x0200	/* WSEQ_ABORT */
+#define WM8995_WSEQ_ABORT_MASK                  0x0200	/* WSEQ_ABORT */
+#define WM8995_WSEQ_ABORT_SHIFT                      9	/* WSEQ_ABORT */
+#define WM8995_WSEQ_ABORT_WIDTH                      1	/* WSEQ_ABORT */
+#define WM8995_WSEQ_START                       0x0100	/* WSEQ_START */
+#define WM8995_WSEQ_START_MASK                  0x0100	/* WSEQ_START */
+#define WM8995_WSEQ_START_SHIFT                      8	/* WSEQ_START */
+#define WM8995_WSEQ_START_WIDTH                      1	/* WSEQ_START */
+#define WM8995_WSEQ_START_INDEX_MASK            0x007F	/* WSEQ_START_INDEX - [6:0] */
+#define WM8995_WSEQ_START_INDEX_SHIFT                0	/* WSEQ_START_INDEX - [6:0] */
+#define WM8995_WSEQ_START_INDEX_WIDTH                7	/* WSEQ_START_INDEX - [6:0] */
+
+/*
+ * R273 (0x111) - Write Sequencer Ctrl (2)
+ */
+#define WM8995_WSEQ_BUSY                        0x0100	/* WSEQ_BUSY */
+#define WM8995_WSEQ_BUSY_MASK                   0x0100	/* WSEQ_BUSY */
+#define WM8995_WSEQ_BUSY_SHIFT                       8	/* WSEQ_BUSY */
+#define WM8995_WSEQ_BUSY_WIDTH                       1	/* WSEQ_BUSY */
+#define WM8995_WSEQ_CURRENT_INDEX_MASK          0x007F	/* WSEQ_CURRENT_INDEX - [6:0] */
+#define WM8995_WSEQ_CURRENT_INDEX_SHIFT              0	/* WSEQ_CURRENT_INDEX - [6:0] */
+#define WM8995_WSEQ_CURRENT_INDEX_WIDTH              7	/* WSEQ_CURRENT_INDEX - [6:0] */
+
+/*
+ * R512 (0x200) - AIF1 Clocking (1)
+ */
+#define WM8995_AIF1CLK_SRC_MASK                 0x0018	/* AIF1CLK_SRC - [4:3] */
+#define WM8995_AIF1CLK_SRC_SHIFT                     3	/* AIF1CLK_SRC - [4:3] */
+#define WM8995_AIF1CLK_SRC_WIDTH                     2	/* AIF1CLK_SRC - [4:3] */
+#define WM8995_AIF1CLK_INV                      0x0004	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_INV_MASK                 0x0004	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_INV_SHIFT                     2	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_INV_WIDTH                     1	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_DIV                      0x0002	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_DIV_MASK                 0x0002	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_DIV_SHIFT                     1	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_DIV_WIDTH                     1	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_ENA                      0x0001	/* AIF1CLK_ENA */
+#define WM8995_AIF1CLK_ENA_MASK                 0x0001	/* AIF1CLK_ENA */
+#define WM8995_AIF1CLK_ENA_SHIFT                     0	/* AIF1CLK_ENA */
+#define WM8995_AIF1CLK_ENA_WIDTH                     1	/* AIF1CLK_ENA */
+
+/*
+ * R513 (0x201) - AIF1 Clocking (2)
+ */
+#define WM8995_AIF1DAC_DIV_MASK                 0x0038	/* AIF1DAC_DIV - [5:3] */
+#define WM8995_AIF1DAC_DIV_SHIFT                     3	/* AIF1DAC_DIV - [5:3] */
+#define WM8995_AIF1DAC_DIV_WIDTH                     3	/* AIF1DAC_DIV - [5:3] */
+#define WM8995_AIF1ADC_DIV_MASK                 0x0007	/* AIF1ADC_DIV - [2:0] */
+#define WM8995_AIF1ADC_DIV_SHIFT                     0	/* AIF1ADC_DIV - [2:0] */
+#define WM8995_AIF1ADC_DIV_WIDTH                     3	/* AIF1ADC_DIV - [2:0] */
+
+/*
+ * R516 (0x204) - AIF2 Clocking (1)
+ */
+#define WM8995_AIF2CLK_SRC_MASK                 0x0018	/* AIF2CLK_SRC - [4:3] */
+#define WM8995_AIF2CLK_SRC_SHIFT                     3	/* AIF2CLK_SRC - [4:3] */
+#define WM8995_AIF2CLK_SRC_WIDTH                     2	/* AIF2CLK_SRC - [4:3] */
+#define WM8995_AIF2CLK_INV                      0x0004	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_INV_MASK                 0x0004	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_INV_SHIFT                     2	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_INV_WIDTH                     1	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_DIV                      0x0002	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_DIV_MASK                 0x0002	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_DIV_SHIFT                     1	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_DIV_WIDTH                     1	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_ENA                      0x0001	/* AIF2CLK_ENA */
+#define WM8995_AIF2CLK_ENA_MASK                 0x0001	/* AIF2CLK_ENA */
+#define WM8995_AIF2CLK_ENA_SHIFT                     0	/* AIF2CLK_ENA */
+#define WM8995_AIF2CLK_ENA_WIDTH                     1	/* AIF2CLK_ENA */
+
+/*
+ * R517 (0x205) - AIF2 Clocking (2)
+ */
+#define WM8995_AIF2DAC_DIV_MASK                 0x0038	/* AIF2DAC_DIV - [5:3] */
+#define WM8995_AIF2DAC_DIV_SHIFT                     3	/* AIF2DAC_DIV - [5:3] */
+#define WM8995_AIF2DAC_DIV_WIDTH                     3	/* AIF2DAC_DIV - [5:3] */
+#define WM8995_AIF2ADC_DIV_MASK                 0x0007	/* AIF2ADC_DIV - [2:0] */
+#define WM8995_AIF2ADC_DIV_SHIFT                     0	/* AIF2ADC_DIV - [2:0] */
+#define WM8995_AIF2ADC_DIV_WIDTH                     3	/* AIF2ADC_DIV - [2:0] */
+
+/*
+ * R520 (0x208) - Clocking (1)
+ */
+#define WM8995_LFCLK_ENA                        0x0020	/* LFCLK_ENA */
+#define WM8995_LFCLK_ENA_MASK                   0x0020	/* LFCLK_ENA */
+#define WM8995_LFCLK_ENA_SHIFT                       5	/* LFCLK_ENA */
+#define WM8995_LFCLK_ENA_WIDTH                       1	/* LFCLK_ENA */
+#define WM8995_TOCLK_ENA                        0x0010	/* TOCLK_ENA */
+#define WM8995_TOCLK_ENA_MASK                   0x0010	/* TOCLK_ENA */
+#define WM8995_TOCLK_ENA_SHIFT                       4	/* TOCLK_ENA */
+#define WM8995_TOCLK_ENA_WIDTH                       1	/* TOCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA                   0x0008	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA_MASK              0x0008	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA_SHIFT                  3	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA_WIDTH                  1	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA                   0x0004	/* AIF2DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA_MASK              0x0004	/* AIF2DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA_SHIFT                  2	/* AIF2DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA_WIDTH                  1	/* AIF2DSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA                    0x0002	/* SYSDSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA_MASK               0x0002	/* SYSDSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA_SHIFT                   1	/* SYSDSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA_WIDTH                   1	/* SYSDSPCLK_ENA */
+#define WM8995_SYSCLK_SRC                       0x0001	/* SYSCLK_SRC */
+#define WM8995_SYSCLK_SRC_MASK                  0x0001	/* SYSCLK_SRC */
+#define WM8995_SYSCLK_SRC_SHIFT                      0	/* SYSCLK_SRC */
+#define WM8995_SYSCLK_SRC_WIDTH                      1	/* SYSCLK_SRC */
+
+/*
+ * R521 (0x209) - Clocking (2)
+ */
+#define WM8995_TOCLK_DIV_MASK                   0x0700	/* TOCLK_DIV - [10:8] */
+#define WM8995_TOCLK_DIV_SHIFT                       8	/* TOCLK_DIV - [10:8] */
+#define WM8995_TOCLK_DIV_WIDTH                       3	/* TOCLK_DIV - [10:8] */
+#define WM8995_DBCLK_DIV_MASK                   0x00F0	/* DBCLK_DIV - [7:4] */
+#define WM8995_DBCLK_DIV_SHIFT                       4	/* DBCLK_DIV - [7:4] */
+#define WM8995_DBCLK_DIV_WIDTH                       4	/* DBCLK_DIV - [7:4] */
+#define WM8995_OPCLK_DIV_MASK                   0x0007	/* OPCLK_DIV - [2:0] */
+#define WM8995_OPCLK_DIV_SHIFT                       0	/* OPCLK_DIV - [2:0] */
+#define WM8995_OPCLK_DIV_WIDTH                       3	/* OPCLK_DIV - [2:0] */
+
+/*
+ * R528 (0x210) - AIF1 Rate
+ */
+#define WM8995_AIF1_SR_MASK                     0x00F0	/* AIF1_SR - [7:4] */
+#define WM8995_AIF1_SR_SHIFT                         4	/* AIF1_SR - [7:4] */
+#define WM8995_AIF1_SR_WIDTH                         4	/* AIF1_SR - [7:4] */
+#define WM8995_AIF1CLK_RATE_MASK                0x000F	/* AIF1CLK_RATE - [3:0] */
+#define WM8995_AIF1CLK_RATE_SHIFT                    0	/* AIF1CLK_RATE - [3:0] */
+#define WM8995_AIF1CLK_RATE_WIDTH                    4	/* AIF1CLK_RATE - [3:0] */
+
+/*
+ * R529 (0x211) - AIF2 Rate
+ */
+#define WM8995_AIF2_SR_MASK                     0x00F0	/* AIF2_SR - [7:4] */
+#define WM8995_AIF2_SR_SHIFT                         4	/* AIF2_SR - [7:4] */
+#define WM8995_AIF2_SR_WIDTH                         4	/* AIF2_SR - [7:4] */
+#define WM8995_AIF2CLK_RATE_MASK                0x000F	/* AIF2CLK_RATE - [3:0] */
+#define WM8995_AIF2CLK_RATE_SHIFT                    0	/* AIF2CLK_RATE - [3:0] */
+#define WM8995_AIF2CLK_RATE_WIDTH                    4	/* AIF2CLK_RATE - [3:0] */
+
+/*
+ * R530 (0x212) - Rate Status
+ */
+#define WM8995_SR_ERROR_MASK                    0x000F	/* SR_ERROR - [3:0] */
+#define WM8995_SR_ERROR_SHIFT                        0	/* SR_ERROR - [3:0] */
+#define WM8995_SR_ERROR_WIDTH                        4	/* SR_ERROR - [3:0] */
+
+/*
+ * R544 (0x220) - FLL1 Control (1)
+ */
+#define WM8995_FLL1_OSC_ENA                     0x0002	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_OSC_ENA_MASK                0x0002	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_OSC_ENA_SHIFT                    1	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_OSC_ENA_WIDTH                    1	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_ENA                         0x0001	/* FLL1_ENA */
+#define WM8995_FLL1_ENA_MASK                    0x0001	/* FLL1_ENA */
+#define WM8995_FLL1_ENA_SHIFT                        0	/* FLL1_ENA */
+#define WM8995_FLL1_ENA_WIDTH                        1	/* FLL1_ENA */
+
+/*
+ * R545 (0x221) - FLL1 Control (2)
+ */
+#define WM8995_FLL1_OUTDIV_MASK                 0x3F00	/* FLL1_OUTDIV - [13:8] */
+#define WM8995_FLL1_OUTDIV_SHIFT                     8	/* FLL1_OUTDIV - [13:8] */
+#define WM8995_FLL1_OUTDIV_WIDTH                     6	/* FLL1_OUTDIV - [13:8] */
+#define WM8995_FLL1_CTRL_RATE_MASK              0x0070	/* FLL1_CTRL_RATE - [6:4] */
+#define WM8995_FLL1_CTRL_RATE_SHIFT                  4	/* FLL1_CTRL_RATE - [6:4] */
+#define WM8995_FLL1_CTRL_RATE_WIDTH                  3	/* FLL1_CTRL_RATE - [6:4] */
+#define WM8995_FLL1_FRATIO_MASK                 0x0007	/* FLL1_FRATIO - [2:0] */
+#define WM8995_FLL1_FRATIO_SHIFT                     0	/* FLL1_FRATIO - [2:0] */
+#define WM8995_FLL1_FRATIO_WIDTH                     3	/* FLL1_FRATIO - [2:0] */
+
+/*
+ * R546 (0x222) - FLL1 Control (3)
+ */
+#define WM8995_FLL1_K_MASK                      0xFFFF	/* FLL1_K - [15:0] */
+#define WM8995_FLL1_K_SHIFT                          0	/* FLL1_K - [15:0] */
+#define WM8995_FLL1_K_WIDTH                         16	/* FLL1_K - [15:0] */
+
+/*
+ * R547 (0x223) - FLL1 Control (4)
+ */
+#define WM8995_FLL1_N_MASK                      0x7FE0	/* FLL1_N - [14:5] */
+#define WM8995_FLL1_N_SHIFT                          5	/* FLL1_N - [14:5] */
+#define WM8995_FLL1_N_WIDTH                         10	/* FLL1_N - [14:5] */
+#define WM8995_FLL1_LOOP_GAIN_MASK              0x000F	/* FLL1_LOOP_GAIN - [3:0] */
+#define WM8995_FLL1_LOOP_GAIN_SHIFT                  0	/* FLL1_LOOP_GAIN - [3:0] */
+#define WM8995_FLL1_LOOP_GAIN_WIDTH                  4	/* FLL1_LOOP_GAIN - [3:0] */
+
+/*
+ * R548 (0x224) - FLL1 Control (5)
+ */
+#define WM8995_FLL1_FRC_NCO_VAL_MASK            0x1F80	/* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL1_FRC_NCO_VAL_SHIFT                7	/* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL1_FRC_NCO_VAL_WIDTH                6	/* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL1_FRC_NCO                     0x0040	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_FRC_NCO_MASK                0x0040	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_FRC_NCO_SHIFT                    6	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_FRC_NCO_WIDTH                    1	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_REFCLK_DIV_MASK             0x0018	/* FLL1_REFCLK_DIV - [4:3] */
+#define WM8995_FLL1_REFCLK_DIV_SHIFT                 3	/* FLL1_REFCLK_DIV - [4:3] */
+#define WM8995_FLL1_REFCLK_DIV_WIDTH                 2	/* FLL1_REFCLK_DIV - [4:3] */
+#define WM8995_FLL1_REFCLK_SRC_MASK             0x0003	/* FLL1_REFCLK_SRC - [1:0] */
+#define WM8995_FLL1_REFCLK_SRC_SHIFT                 0	/* FLL1_REFCLK_SRC - [1:0] */
+#define WM8995_FLL1_REFCLK_SRC_WIDTH                 2	/* FLL1_REFCLK_SRC - [1:0] */
+
+/*
+ * R576 (0x240) - FLL2 Control (1)
+ */
+#define WM8995_FLL2_OSC_ENA                     0x0002	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_OSC_ENA_MASK                0x0002	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_OSC_ENA_SHIFT                    1	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_OSC_ENA_WIDTH                    1	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_ENA                         0x0001	/* FLL2_ENA */
+#define WM8995_FLL2_ENA_MASK                    0x0001	/* FLL2_ENA */
+#define WM8995_FLL2_ENA_SHIFT                        0	/* FLL2_ENA */
+#define WM8995_FLL2_ENA_WIDTH                        1	/* FLL2_ENA */
+
+/*
+ * R577 (0x241) - FLL2 Control (2)
+ */
+#define WM8995_FLL2_OUTDIV_MASK                 0x3F00	/* FLL2_OUTDIV - [13:8] */
+#define WM8995_FLL2_OUTDIV_SHIFT                     8	/* FLL2_OUTDIV - [13:8] */
+#define WM8995_FLL2_OUTDIV_WIDTH                     6	/* FLL2_OUTDIV - [13:8] */
+#define WM8995_FLL2_CTRL_RATE_MASK              0x0070	/* FLL2_CTRL_RATE - [6:4] */
+#define WM8995_FLL2_CTRL_RATE_SHIFT                  4	/* FLL2_CTRL_RATE - [6:4] */
+#define WM8995_FLL2_CTRL_RATE_WIDTH                  3	/* FLL2_CTRL_RATE - [6:4] */
+#define WM8995_FLL2_FRATIO_MASK                 0x0007	/* FLL2_FRATIO - [2:0] */
+#define WM8995_FLL2_FRATIO_SHIFT                     0	/* FLL2_FRATIO - [2:0] */
+#define WM8995_FLL2_FRATIO_WIDTH                     3	/* FLL2_FRATIO - [2:0] */
+
+/*
+ * R578 (0x242) - FLL2 Control (3)
+ */
+#define WM8995_FLL2_K_MASK                      0xFFFF	/* FLL2_K - [15:0] */
+#define WM8995_FLL2_K_SHIFT                          0	/* FLL2_K - [15:0] */
+#define WM8995_FLL2_K_WIDTH                         16	/* FLL2_K - [15:0] */
+
+/*
+ * R579 (0x243) - FLL2 Control (4)
+ */
+#define WM8995_FLL2_N_MASK                      0x7FE0	/* FLL2_N - [14:5] */
+#define WM8995_FLL2_N_SHIFT                          5	/* FLL2_N - [14:5] */
+#define WM8995_FLL2_N_WIDTH                         10	/* FLL2_N - [14:5] */
+#define WM8995_FLL2_LOOP_GAIN_MASK              0x000F	/* FLL2_LOOP_GAIN - [3:0] */
+#define WM8995_FLL2_LOOP_GAIN_SHIFT                  0	/* FLL2_LOOP_GAIN - [3:0] */
+#define WM8995_FLL2_LOOP_GAIN_WIDTH                  4	/* FLL2_LOOP_GAIN - [3:0] */
+
+/*
+ * R580 (0x244) - FLL2 Control (5)
+ */
+#define WM8995_FLL2_FRC_NCO_VAL_MASK            0x1F80	/* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL2_FRC_NCO_VAL_SHIFT                7	/* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL2_FRC_NCO_VAL_WIDTH                6	/* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL2_FRC_NCO                     0x0040	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_FRC_NCO_MASK                0x0040	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_FRC_NCO_SHIFT                    6	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_FRC_NCO_WIDTH                    1	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_REFCLK_DIV_MASK             0x0018	/* FLL2_REFCLK_DIV - [4:3] */
+#define WM8995_FLL2_REFCLK_DIV_SHIFT                 3	/* FLL2_REFCLK_DIV - [4:3] */
+#define WM8995_FLL2_REFCLK_DIV_WIDTH                 2	/* FLL2_REFCLK_DIV - [4:3] */
+#define WM8995_FLL2_REFCLK_SRC_MASK             0x0003	/* FLL2_REFCLK_SRC - [1:0] */
+#define WM8995_FLL2_REFCLK_SRC_SHIFT                 0	/* FLL2_REFCLK_SRC - [1:0] */
+#define WM8995_FLL2_REFCLK_SRC_WIDTH                 2	/* FLL2_REFCLK_SRC - [1:0] */
+
+/*
+ * R768 (0x300) - AIF1 Control (1)
+ */
+#define WM8995_AIF1ADCL_SRC                     0x8000	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCL_SRC_MASK                0x8000	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCL_SRC_SHIFT                   15	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCL_SRC_WIDTH                    1	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCR_SRC                     0x4000	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADCR_SRC_MASK                0x4000	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADCR_SRC_SHIFT                   14	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADCR_SRC_WIDTH                    1	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADC_TDM                      0x2000	/* AIF1ADC_TDM */
+#define WM8995_AIF1ADC_TDM_MASK                 0x2000	/* AIF1ADC_TDM */
+#define WM8995_AIF1ADC_TDM_SHIFT                    13	/* AIF1ADC_TDM */
+#define WM8995_AIF1ADC_TDM_WIDTH                     1	/* AIF1ADC_TDM */
+#define WM8995_AIF1_BCLK_INV                    0x0100	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_BCLK_INV_MASK               0x0100	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_BCLK_INV_SHIFT                   8	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_BCLK_INV_WIDTH                   1	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_LRCLK_INV                   0x0080	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_LRCLK_INV_MASK              0x0080	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_LRCLK_INV_SHIFT                  7	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_LRCLK_INV_WIDTH                  1	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_WL_MASK                     0x0060	/* AIF1_WL - [6:5] */
+#define WM8995_AIF1_WL_SHIFT                         5	/* AIF1_WL - [6:5] */
+#define WM8995_AIF1_WL_WIDTH                         2	/* AIF1_WL - [6:5] */
+#define WM8995_AIF1_FMT_MASK                    0x0018	/* AIF1_FMT - [4:3] */
+#define WM8995_AIF1_FMT_SHIFT                        3	/* AIF1_FMT - [4:3] */
+#define WM8995_AIF1_FMT_WIDTH                        2	/* AIF1_FMT - [4:3] */
+
+/*
+ * R769 (0x301) - AIF1 Control (2)
+ */
+#define WM8995_AIF1DACL_SRC                     0x8000	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACL_SRC_MASK                0x8000	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACL_SRC_SHIFT                   15	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACL_SRC_WIDTH                    1	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACR_SRC                     0x4000	/* AIF1DACR_SRC */
+#define WM8995_AIF1DACR_SRC_MASK                0x4000	/* AIF1DACR_SRC */
+#define WM8995_AIF1DACR_SRC_SHIFT                   14	/* AIF1DACR_SRC */
+#define WM8995_AIF1DACR_SRC_WIDTH                    1	/* AIF1DACR_SRC */
+#define WM8995_AIF1DAC_BOOST_MASK               0x0C00	/* AIF1DAC_BOOST - [11:10] */
+#define WM8995_AIF1DAC_BOOST_SHIFT                  10	/* AIF1DAC_BOOST - [11:10] */
+#define WM8995_AIF1DAC_BOOST_WIDTH                   2	/* AIF1DAC_BOOST - [11:10] */
+#define WM8995_AIF1DAC_COMP                     0x0010	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMP_MASK                0x0010	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMP_SHIFT                    4	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMP_WIDTH                    1	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMPMODE                 0x0008	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1DAC_COMPMODE_MASK            0x0008	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1DAC_COMPMODE_SHIFT                3	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1DAC_COMPMODE_WIDTH                1	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1ADC_COMP                     0x0004	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMP_MASK                0x0004	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMP_SHIFT                    2	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMP_WIDTH                    1	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMPMODE                 0x0002	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1ADC_COMPMODE_MASK            0x0002	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1ADC_COMPMODE_SHIFT                1	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1ADC_COMPMODE_WIDTH                1	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1_LOOPBACK                    0x0001	/* AIF1_LOOPBACK */
+#define WM8995_AIF1_LOOPBACK_MASK               0x0001	/* AIF1_LOOPBACK */
+#define WM8995_AIF1_LOOPBACK_SHIFT                   0	/* AIF1_LOOPBACK */
+#define WM8995_AIF1_LOOPBACK_WIDTH                   1	/* AIF1_LOOPBACK */
+
+/*
+ * R770 (0x302) - AIF1 Master/Slave
+ */
+#define WM8995_AIF1_TRI                         0x8000	/* AIF1_TRI */
+#define WM8995_AIF1_TRI_MASK                    0x8000	/* AIF1_TRI */
+#define WM8995_AIF1_TRI_SHIFT                       15	/* AIF1_TRI */
+#define WM8995_AIF1_TRI_WIDTH                        1	/* AIF1_TRI */
+#define WM8995_AIF1_MSTR                        0x4000	/* AIF1_MSTR */
+#define WM8995_AIF1_MSTR_MASK                   0x4000	/* AIF1_MSTR */
+#define WM8995_AIF1_MSTR_SHIFT                      14	/* AIF1_MSTR */
+#define WM8995_AIF1_MSTR_WIDTH                       1	/* AIF1_MSTR */
+#define WM8995_AIF1_CLK_FRC                     0x2000	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_CLK_FRC_MASK                0x2000	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_CLK_FRC_SHIFT                   13	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_CLK_FRC_WIDTH                    1	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC                   0x1000	/* AIF1_LRCLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC_MASK              0x1000	/* AIF1_LRCLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC_SHIFT                 12	/* AIF1_LRCLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC_WIDTH                  1	/* AIF1_LRCLK_FRC */
+
+/*
+ * R771 (0x303) - AIF1 BCLK
+ */
+#define WM8995_AIF1_BCLK_DIV_MASK               0x00F0	/* AIF1_BCLK_DIV - [7:4] */
+#define WM8995_AIF1_BCLK_DIV_SHIFT                   4	/* AIF1_BCLK_DIV - [7:4] */
+#define WM8995_AIF1_BCLK_DIV_WIDTH                   4	/* AIF1_BCLK_DIV - [7:4] */
+
+/*
+ * R772 (0x304) - AIF1ADC LRCLK
+ */
+#define WM8995_AIF1ADC_LRCLK_DIR                0x0800	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_LRCLK_DIR_MASK           0x0800	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_LRCLK_DIR_SHIFT              11	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_LRCLK_DIR_WIDTH               1	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_RATE_MASK                0x07FF	/* AIF1ADC_RATE - [10:0] */
+#define WM8995_AIF1ADC_RATE_SHIFT                    0	/* AIF1ADC_RATE - [10:0] */
+#define WM8995_AIF1ADC_RATE_WIDTH                   11	/* AIF1ADC_RATE - [10:0] */
+
+/*
+ * R773 (0x305) - AIF1DAC LRCLK
+ */
+#define WM8995_AIF1DAC_LRCLK_DIR                0x0800	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_LRCLK_DIR_MASK           0x0800	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_LRCLK_DIR_SHIFT              11	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_LRCLK_DIR_WIDTH               1	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_RATE_MASK                0x07FF	/* AIF1DAC_RATE - [10:0] */
+#define WM8995_AIF1DAC_RATE_SHIFT                    0	/* AIF1DAC_RATE - [10:0] */
+#define WM8995_AIF1DAC_RATE_WIDTH                   11	/* AIF1DAC_RATE - [10:0] */
+
+/*
+ * R774 (0x306) - AIF1DAC Data
+ */
+#define WM8995_AIF1DACL_DAT_INV                 0x0002	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACL_DAT_INV_MASK            0x0002	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACL_DAT_INV_SHIFT                1	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACL_DAT_INV_WIDTH                1	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV                 0x0001	/* AIF1DACR_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV_MASK            0x0001	/* AIF1DACR_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV_SHIFT                0	/* AIF1DACR_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV_WIDTH                1	/* AIF1DACR_DAT_INV */
+
+/*
+ * R775 (0x307) - AIF1ADC Data
+ */
+#define WM8995_AIF1ADCL_DAT_INV                 0x0002	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCL_DAT_INV_MASK            0x0002	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCL_DAT_INV_SHIFT                1	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCL_DAT_INV_WIDTH                1	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV                 0x0001	/* AIF1ADCR_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV_MASK            0x0001	/* AIF1ADCR_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV_SHIFT                0	/* AIF1ADCR_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV_WIDTH                1	/* AIF1ADCR_DAT_INV */
+
+/*
+ * R784 (0x310) - AIF2 Control (1)
+ */
+#define WM8995_AIF2ADCL_SRC                     0x8000	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCL_SRC_MASK                0x8000	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCL_SRC_SHIFT                   15	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCL_SRC_WIDTH                    1	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCR_SRC                     0x4000	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADCR_SRC_MASK                0x4000	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADCR_SRC_SHIFT                   14	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADCR_SRC_WIDTH                    1	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADC_TDM                      0x2000	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_MASK                 0x2000	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_SHIFT                    13	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_WIDTH                     1	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_CHAN                 0x1000	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2ADC_TDM_CHAN_MASK            0x1000	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2ADC_TDM_CHAN_SHIFT               12	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2ADC_TDM_CHAN_WIDTH                1	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2_BCLK_INV                    0x0100	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_BCLK_INV_MASK               0x0100	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_BCLK_INV_SHIFT                   8	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_BCLK_INV_WIDTH                   1	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_LRCLK_INV                   0x0080	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_LRCLK_INV_MASK              0x0080	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_LRCLK_INV_SHIFT                  7	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_LRCLK_INV_WIDTH                  1	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_WL_MASK                     0x0060	/* AIF2_WL - [6:5] */
+#define WM8995_AIF2_WL_SHIFT                         5	/* AIF2_WL - [6:5] */
+#define WM8995_AIF2_WL_WIDTH                         2	/* AIF2_WL - [6:5] */
+#define WM8995_AIF2_FMT_MASK                    0x0018	/* AIF2_FMT - [4:3] */
+#define WM8995_AIF2_FMT_SHIFT                        3	/* AIF2_FMT - [4:3] */
+#define WM8995_AIF2_FMT_WIDTH                        2	/* AIF2_FMT - [4:3] */
+
+/*
+ * R785 (0x311) - AIF2 Control (2)
+ */
+#define WM8995_AIF2DACL_SRC                     0x8000	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACL_SRC_MASK                0x8000	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACL_SRC_SHIFT                   15	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACL_SRC_WIDTH                    1	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACR_SRC                     0x4000	/* AIF2DACR_SRC */
+#define WM8995_AIF2DACR_SRC_MASK                0x4000	/* AIF2DACR_SRC */
+#define WM8995_AIF2DACR_SRC_SHIFT                   14	/* AIF2DACR_SRC */
+#define WM8995_AIF2DACR_SRC_WIDTH                    1	/* AIF2DACR_SRC */
+#define WM8995_AIF2DAC_TDM                      0x2000	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_MASK                 0x2000	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_SHIFT                    13	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_WIDTH                     1	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_CHAN                 0x1000	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_TDM_CHAN_MASK            0x1000	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_TDM_CHAN_SHIFT               12	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_TDM_CHAN_WIDTH                1	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_BOOST_MASK               0x0C00	/* AIF2DAC_BOOST - [11:10] */
+#define WM8995_AIF2DAC_BOOST_SHIFT                  10	/* AIF2DAC_BOOST - [11:10] */
+#define WM8995_AIF2DAC_BOOST_WIDTH                   2	/* AIF2DAC_BOOST - [11:10] */
+#define WM8995_AIF2DAC_COMP                     0x0010	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMP_MASK                0x0010	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMP_SHIFT                    4	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMP_WIDTH                    1	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMPMODE                 0x0008	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2DAC_COMPMODE_MASK            0x0008	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2DAC_COMPMODE_SHIFT                3	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2DAC_COMPMODE_WIDTH                1	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2ADC_COMP                     0x0004	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMP_MASK                0x0004	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMP_SHIFT                    2	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMP_WIDTH                    1	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMPMODE                 0x0002	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2ADC_COMPMODE_MASK            0x0002	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2ADC_COMPMODE_SHIFT                1	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2ADC_COMPMODE_WIDTH                1	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2_LOOPBACK                    0x0001	/* AIF2_LOOPBACK */
+#define WM8995_AIF2_LOOPBACK_MASK               0x0001	/* AIF2_LOOPBACK */
+#define WM8995_AIF2_LOOPBACK_SHIFT                   0	/* AIF2_LOOPBACK */
+#define WM8995_AIF2_LOOPBACK_WIDTH                   1	/* AIF2_LOOPBACK */
+
+/*
+ * R786 (0x312) - AIF2 Master/Slave
+ */
+#define WM8995_AIF2_TRI                         0x8000	/* AIF2_TRI */
+#define WM8995_AIF2_TRI_MASK                    0x8000	/* AIF2_TRI */
+#define WM8995_AIF2_TRI_SHIFT                       15	/* AIF2_TRI */
+#define WM8995_AIF2_TRI_WIDTH                        1	/* AIF2_TRI */
+#define WM8995_AIF2_MSTR                        0x4000	/* AIF2_MSTR */
+#define WM8995_AIF2_MSTR_MASK                   0x4000	/* AIF2_MSTR */
+#define WM8995_AIF2_MSTR_SHIFT                      14	/* AIF2_MSTR */
+#define WM8995_AIF2_MSTR_WIDTH                       1	/* AIF2_MSTR */
+#define WM8995_AIF2_CLK_FRC                     0x2000	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_CLK_FRC_MASK                0x2000	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_CLK_FRC_SHIFT                   13	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_CLK_FRC_WIDTH                    1	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC                   0x1000	/* AIF2_LRCLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC_MASK              0x1000	/* AIF2_LRCLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC_SHIFT                 12	/* AIF2_LRCLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC_WIDTH                  1	/* AIF2_LRCLK_FRC */
+
+/*
+ * R787 (0x313) - AIF2 BCLK
+ */
+#define WM8995_AIF2_BCLK_DIV_MASK               0x00F0	/* AIF2_BCLK_DIV - [7:4] */
+#define WM8995_AIF2_BCLK_DIV_SHIFT                   4	/* AIF2_BCLK_DIV - [7:4] */
+#define WM8995_AIF2_BCLK_DIV_WIDTH                   4	/* AIF2_BCLK_DIV - [7:4] */
+
+/*
+ * R788 (0x314) - AIF2ADC LRCLK
+ */
+#define WM8995_AIF2ADC_LRCLK_DIR                0x0800	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_LRCLK_DIR_MASK           0x0800	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_LRCLK_DIR_SHIFT              11	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_LRCLK_DIR_WIDTH               1	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_RATE_MASK                0x07FF	/* AIF2ADC_RATE - [10:0] */
+#define WM8995_AIF2ADC_RATE_SHIFT                    0	/* AIF2ADC_RATE - [10:0] */
+#define WM8995_AIF2ADC_RATE_WIDTH                   11	/* AIF2ADC_RATE - [10:0] */
+
+/*
+ * R789 (0x315) - AIF2DAC LRCLK
+ */
+#define WM8995_AIF2DAC_LRCLK_DIR                0x0800	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_LRCLK_DIR_MASK           0x0800	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_LRCLK_DIR_SHIFT              11	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_LRCLK_DIR_WIDTH               1	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_RATE_MASK                0x07FF	/* AIF2DAC_RATE - [10:0] */
+#define WM8995_AIF2DAC_RATE_SHIFT                    0	/* AIF2DAC_RATE - [10:0] */
+#define WM8995_AIF2DAC_RATE_WIDTH                   11	/* AIF2DAC_RATE - [10:0] */
+
+/*
+ * R790 (0x316) - AIF2DAC Data
+ */
+#define WM8995_AIF2DACL_DAT_INV                 0x0002	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACL_DAT_INV_MASK            0x0002	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACL_DAT_INV_SHIFT                1	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACL_DAT_INV_WIDTH                1	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV                 0x0001	/* AIF2DACR_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV_MASK            0x0001	/* AIF2DACR_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV_SHIFT                0	/* AIF2DACR_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV_WIDTH                1	/* AIF2DACR_DAT_INV */
+
+/*
+ * R791 (0x317) - AIF2ADC Data
+ */
+#define WM8995_AIF2ADCL_DAT_INV                 0x0002	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCL_DAT_INV_MASK            0x0002	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCL_DAT_INV_SHIFT                1	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCL_DAT_INV_WIDTH                1	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV                 0x0001	/* AIF2ADCR_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV_MASK            0x0001	/* AIF2ADCR_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV_SHIFT                0	/* AIF2ADCR_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV_WIDTH                1	/* AIF2ADCR_DAT_INV */
+
+/*
+ * R1024 (0x400) - AIF1 ADC1 Left Volume
+ */
+#define WM8995_AIF1ADC1_VU                      0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_MASK                 0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_SHIFT                     8	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_WIDTH                     1	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1L_VOL_MASK               0x00FF	/* AIF1ADC1L_VOL - [7:0] */
+#define WM8995_AIF1ADC1L_VOL_SHIFT                   0	/* AIF1ADC1L_VOL - [7:0] */
+#define WM8995_AIF1ADC1L_VOL_WIDTH                   8	/* AIF1ADC1L_VOL - [7:0] */
+
+/*
+ * R1025 (0x401) - AIF1 ADC1 Right Volume
+ */
+#define WM8995_AIF1ADC1_VU                      0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_MASK                 0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_SHIFT                     8	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_WIDTH                     1	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1R_VOL_MASK               0x00FF	/* AIF1ADC1R_VOL - [7:0] */
+#define WM8995_AIF1ADC1R_VOL_SHIFT                   0	/* AIF1ADC1R_VOL - [7:0] */
+#define WM8995_AIF1ADC1R_VOL_WIDTH                   8	/* AIF1ADC1R_VOL - [7:0] */
+
+/*
+ * R1026 (0x402) - AIF1 DAC1 Left Volume
+ */
+#define WM8995_AIF1DAC1_VU                      0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_MASK                 0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_SHIFT                     8	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_WIDTH                     1	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1L_VOL_MASK               0x00FF	/* AIF1DAC1L_VOL - [7:0] */
+#define WM8995_AIF1DAC1L_VOL_SHIFT                   0	/* AIF1DAC1L_VOL - [7:0] */
+#define WM8995_AIF1DAC1L_VOL_WIDTH                   8	/* AIF1DAC1L_VOL - [7:0] */
+
+/*
+ * R1027 (0x403) - AIF1 DAC1 Right Volume
+ */
+#define WM8995_AIF1DAC1_VU                      0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_MASK                 0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_SHIFT                     8	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_WIDTH                     1	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1R_VOL_MASK               0x00FF	/* AIF1DAC1R_VOL - [7:0] */
+#define WM8995_AIF1DAC1R_VOL_SHIFT                   0	/* AIF1DAC1R_VOL - [7:0] */
+#define WM8995_AIF1DAC1R_VOL_WIDTH                   8	/* AIF1DAC1R_VOL - [7:0] */
+
+/*
+ * R1028 (0x404) - AIF1 ADC2 Left Volume
+ */
+#define WM8995_AIF1ADC2_VU                      0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_MASK                 0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_SHIFT                     8	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_WIDTH                     1	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2L_VOL_MASK               0x00FF	/* AIF1ADC2L_VOL - [7:0] */
+#define WM8995_AIF1ADC2L_VOL_SHIFT                   0	/* AIF1ADC2L_VOL - [7:0] */
+#define WM8995_AIF1ADC2L_VOL_WIDTH                   8	/* AIF1ADC2L_VOL - [7:0] */
+
+/*
+ * R1029 (0x405) - AIF1 ADC2 Right Volume
+ */
+#define WM8995_AIF1ADC2_VU                      0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_MASK                 0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_SHIFT                     8	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_WIDTH                     1	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2R_VOL_MASK               0x00FF	/* AIF1ADC2R_VOL - [7:0] */
+#define WM8995_AIF1ADC2R_VOL_SHIFT                   0	/* AIF1ADC2R_VOL - [7:0] */
+#define WM8995_AIF1ADC2R_VOL_WIDTH                   8	/* AIF1ADC2R_VOL - [7:0] */
+
+/*
+ * R1030 (0x406) - AIF1 DAC2 Left Volume
+ */
+#define WM8995_AIF1DAC2_VU                      0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_MASK                 0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_SHIFT                     8	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_WIDTH                     1	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2L_VOL_MASK               0x00FF	/* AIF1DAC2L_VOL - [7:0] */
+#define WM8995_AIF1DAC2L_VOL_SHIFT                   0	/* AIF1DAC2L_VOL - [7:0] */
+#define WM8995_AIF1DAC2L_VOL_WIDTH                   8	/* AIF1DAC2L_VOL - [7:0] */
+
+/*
+ * R1031 (0x407) - AIF1 DAC2 Right Volume
+ */
+#define WM8995_AIF1DAC2_VU                      0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_MASK                 0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_SHIFT                     8	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_WIDTH                     1	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2R_VOL_MASK               0x00FF	/* AIF1DAC2R_VOL - [7:0] */
+#define WM8995_AIF1DAC2R_VOL_SHIFT                   0	/* AIF1DAC2R_VOL - [7:0] */
+#define WM8995_AIF1DAC2R_VOL_WIDTH                   8	/* AIF1DAC2R_VOL - [7:0] */
+
+/*
+ * R1040 (0x410) - AIF1 ADC1 Filters
+ */
+#define WM8995_AIF1ADC_4FS                      0x8000	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC_4FS_MASK                 0x8000	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC_4FS_SHIFT                    15	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC_4FS_WIDTH                     1	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC1L_HPF                    0x1000	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1L_HPF_MASK               0x1000	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1L_HPF_SHIFT                  12	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1L_HPF_WIDTH                   1	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1R_HPF                    0x0800	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1R_HPF_MASK               0x0800	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1R_HPF_SHIFT                  11	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1R_HPF_WIDTH                   1	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1_HPF_MODE                0x0008	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_MODE_MASK           0x0008	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_MODE_SHIFT               3	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_MODE_WIDTH               1	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_CUT_MASK            0x0007	/* AIF1ADC1_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC1_HPF_CUT_SHIFT                0	/* AIF1ADC1_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC1_HPF_CUT_WIDTH                3	/* AIF1ADC1_HPF_CUT - [2:0] */
+
+/*
+ * R1041 (0x411) - AIF1 ADC2 Filters
+ */
+#define WM8995_AIF1ADC2L_HPF                    0x1000	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2L_HPF_MASK               0x1000	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2L_HPF_SHIFT                  12	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2L_HPF_WIDTH                   1	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2R_HPF                    0x0800	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2R_HPF_MASK               0x0800	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2R_HPF_SHIFT                  11	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2R_HPF_WIDTH                   1	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2_HPF_MODE                0x0008	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_MODE_MASK           0x0008	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_MODE_SHIFT               3	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_MODE_WIDTH               1	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_CUT_MASK            0x0007	/* AIF1ADC2_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC2_HPF_CUT_SHIFT                0	/* AIF1ADC2_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC2_HPF_CUT_WIDTH                3	/* AIF1ADC2_HPF_CUT - [2:0] */
+
+/*
+ * R1056 (0x420) - AIF1 DAC1 Filters (1)
+ */
+#define WM8995_AIF1DAC1_MUTE                    0x0200	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MUTE_MASK               0x0200	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MUTE_SHIFT                   9	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MUTE_WIDTH                   1	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MONO                    0x0080	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MONO_MASK               0x0080	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MONO_SHIFT                   7	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MONO_WIDTH                   1	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MUTERATE                0x0020	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_MUTERATE_MASK           0x0020	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_MUTERATE_SHIFT               5	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_MUTERATE_WIDTH               1	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP             0x0010	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP_MASK        0x0010	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP_SHIFT            4	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP_WIDTH            1	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_DEEMP_MASK              0x0006	/* AIF1DAC1_DEEMP - [2:1] */
+#define WM8995_AIF1DAC1_DEEMP_SHIFT                  1	/* AIF1DAC1_DEEMP - [2:1] */
+#define WM8995_AIF1DAC1_DEEMP_WIDTH                  2	/* AIF1DAC1_DEEMP - [2:1] */
+
+/*
+ * R1057 (0x421) - AIF1 DAC1 Filters (2)
+ */
+#define WM8995_AIF1DAC1_3D_GAIN_MASK            0x3E00	/* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC1_3D_GAIN_SHIFT                9	/* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC1_3D_GAIN_WIDTH                5	/* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC1_3D_ENA                  0x0100	/* AIF1DAC1_3D_ENA */
+#define WM8995_AIF1DAC1_3D_ENA_MASK             0x0100	/* AIF1DAC1_3D_ENA */
+#define WM8995_AIF1DAC1_3D_ENA_SHIFT                 8	/* AIF1DAC1_3D_ENA */
+#define WM8995_AIF1DAC1_3D_ENA_WIDTH                 1	/* AIF1DAC1_3D_ENA */
+
+/*
+ * R1058 (0x422) - AIF1 DAC2 Filters (1)
+ */
+#define WM8995_AIF1DAC2_MUTE                    0x0200	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MUTE_MASK               0x0200	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MUTE_SHIFT                   9	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MUTE_WIDTH                   1	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MONO                    0x0080	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MONO_MASK               0x0080	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MONO_SHIFT                   7	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MONO_WIDTH                   1	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MUTERATE                0x0020	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_MUTERATE_MASK           0x0020	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_MUTERATE_SHIFT               5	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_MUTERATE_WIDTH               1	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP             0x0010	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP_MASK        0x0010	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP_SHIFT            4	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP_WIDTH            1	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_DEEMP_MASK              0x0006	/* AIF1DAC2_DEEMP - [2:1] */
+#define WM8995_AIF1DAC2_DEEMP_SHIFT                  1	/* AIF1DAC2_DEEMP - [2:1] */
+#define WM8995_AIF1DAC2_DEEMP_WIDTH                  2	/* AIF1DAC2_DEEMP - [2:1] */
+
+/*
+ * R1059 (0x423) - AIF1 DAC2 Filters (2)
+ */
+#define WM8995_AIF1DAC2_3D_GAIN_MASK            0x3E00	/* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC2_3D_GAIN_SHIFT                9	/* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC2_3D_GAIN_WIDTH                5	/* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC2_3D_ENA                  0x0100	/* AIF1DAC2_3D_ENA */
+#define WM8995_AIF1DAC2_3D_ENA_MASK             0x0100	/* AIF1DAC2_3D_ENA */
+#define WM8995_AIF1DAC2_3D_ENA_SHIFT                 8	/* AIF1DAC2_3D_ENA */
+#define WM8995_AIF1DAC2_3D_ENA_WIDTH                 1	/* AIF1DAC2_3D_ENA */
+
+/*
+ * R1088 (0x440) - AIF1 DRC1 (1)
+ */
+#define WM8995_AIF1DRC1_SIG_DET_RMS_MASK        0xF800	/* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC1_SIG_DET_RMS_SHIFT           11	/* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC1_SIG_DET_RMS_WIDTH            5	/* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC1_SIG_DET_PK_MASK         0x0600	/* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC1_SIG_DET_PK_SHIFT             9	/* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC1_SIG_DET_PK_WIDTH             2	/* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC1_NG_ENA                  0x0100	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_NG_ENA_MASK             0x0100	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_NG_ENA_SHIFT                 8	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_NG_ENA_WIDTH                 1	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_SIG_DET_MODE            0x0080	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET_MODE_MASK       0x0080	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET_MODE_SHIFT           7	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET_MODE_WIDTH           1	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET                 0x0040	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_SIG_DET_MASK            0x0040	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_SIG_DET_SHIFT                6	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_SIG_DET_WIDTH                1	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA            0x0020	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA_MASK       0x0020	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA_SHIFT           5	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA_WIDTH           1	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_QR                      0x0010	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_QR_MASK                 0x0010	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_QR_SHIFT                     4	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_QR_WIDTH                     1	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_ANTICLIP                0x0008	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DRC1_ANTICLIP_MASK           0x0008	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DRC1_ANTICLIP_SHIFT               3	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DRC1_ANTICLIP_WIDTH               1	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DAC1_DRC_ENA                 0x0004	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1DAC1_DRC_ENA_MASK            0x0004	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1DAC1_DRC_ENA_SHIFT                2	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1DAC1_DRC_ENA_WIDTH                1	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA                0x0002	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA_MASK           0x0002	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA_SHIFT               1	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA_WIDTH               1	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA                0x0001	/* AIF1ADC1R_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA_MASK           0x0001	/* AIF1ADC1R_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA_SHIFT               0	/* AIF1ADC1R_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA_WIDTH               1	/* AIF1ADC1R_DRC_ENA */
+
+/*
+ * R1089 (0x441) - AIF1 DRC1 (2)
+ */
+#define WM8995_AIF1DRC1_ATK_MASK                0x1E00	/* AIF1DRC1_ATK - [12:9] */
+#define WM8995_AIF1DRC1_ATK_SHIFT                    9	/* AIF1DRC1_ATK - [12:9] */
+#define WM8995_AIF1DRC1_ATK_WIDTH                    4	/* AIF1DRC1_ATK - [12:9] */
+#define WM8995_AIF1DRC1_DCY_MASK                0x01E0	/* AIF1DRC1_DCY - [8:5] */
+#define WM8995_AIF1DRC1_DCY_SHIFT                    5	/* AIF1DRC1_DCY - [8:5] */
+#define WM8995_AIF1DRC1_DCY_WIDTH                    4	/* AIF1DRC1_DCY - [8:5] */
+#define WM8995_AIF1DRC1_MINGAIN_MASK            0x001C	/* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC1_MINGAIN_SHIFT                2	/* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC1_MINGAIN_WIDTH                3	/* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC1_MAXGAIN_MASK            0x0003	/* AIF1DRC1_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC1_MAXGAIN_SHIFT                0	/* AIF1DRC1_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC1_MAXGAIN_WIDTH                2	/* AIF1DRC1_MAXGAIN - [1:0] */
+
+/*
+ * R1090 (0x442) - AIF1 DRC1 (3)
+ */
+#define WM8995_AIF1DRC1_NG_MINGAIN_MASK         0xF000	/* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC1_NG_MINGAIN_SHIFT            12	/* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC1_NG_MINGAIN_WIDTH             4	/* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC1_NG_EXP_MASK             0x0C00	/* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC1_NG_EXP_SHIFT                10	/* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC1_NG_EXP_WIDTH                 2	/* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC1_QR_THR_MASK             0x0300	/* AIF1DRC1_QR_THR - [9:8] */
+#define WM8995_AIF1DRC1_QR_THR_SHIFT                 8	/* AIF1DRC1_QR_THR - [9:8] */
+#define WM8995_AIF1DRC1_QR_THR_WIDTH                 2	/* AIF1DRC1_QR_THR - [9:8] */
+#define WM8995_AIF1DRC1_QR_DCY_MASK             0x00C0	/* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC1_QR_DCY_SHIFT                 6	/* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC1_QR_DCY_WIDTH                 2	/* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC1_HI_COMP_MASK            0x0038	/* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC1_HI_COMP_SHIFT                3	/* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC1_HI_COMP_WIDTH                3	/* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC1_LO_COMP_MASK            0x0007	/* AIF1DRC1_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC1_LO_COMP_SHIFT                0	/* AIF1DRC1_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC1_LO_COMP_WIDTH                3	/* AIF1DRC1_LO_COMP - [2:0] */
+
+/*
+ * R1091 (0x443) - AIF1 DRC1 (4)
+ */
+#define WM8995_AIF1DRC1_KNEE_IP_MASK            0x07E0	/* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC1_KNEE_IP_SHIFT                5	/* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC1_KNEE_IP_WIDTH                6	/* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC1_KNEE_OP_MASK            0x001F	/* AIF1DRC1_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE_OP_SHIFT                0	/* AIF1DRC1_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE_OP_WIDTH                5	/* AIF1DRC1_KNEE_OP - [4:0] */
+
+/*
+ * R1092 (0x444) - AIF1 DRC1 (5)
+ */
+#define WM8995_AIF1DRC1_KNEE2_IP_MASK           0x03E0	/* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC1_KNEE2_IP_SHIFT               5	/* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC1_KNEE2_IP_WIDTH               5	/* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC1_KNEE2_OP_MASK           0x001F	/* AIF1DRC1_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE2_OP_SHIFT               0	/* AIF1DRC1_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE2_OP_WIDTH               5	/* AIF1DRC1_KNEE2_OP - [4:0] */
+
+/*
+ * R1104 (0x450) - AIF1 DRC2 (1)
+ */
+#define WM8995_AIF1DRC2_SIG_DET_RMS_MASK        0xF800	/* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC2_SIG_DET_RMS_SHIFT           11	/* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC2_SIG_DET_RMS_WIDTH            5	/* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC2_SIG_DET_PK_MASK         0x0600	/* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC2_SIG_DET_PK_SHIFT             9	/* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC2_SIG_DET_PK_WIDTH             2	/* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC2_NG_ENA                  0x0100	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_NG_ENA_MASK             0x0100	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_NG_ENA_SHIFT                 8	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_NG_ENA_WIDTH                 1	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_SIG_DET_MODE            0x0080	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET_MODE_MASK       0x0080	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET_MODE_SHIFT           7	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET_MODE_WIDTH           1	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET                 0x0040	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_SIG_DET_MASK            0x0040	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_SIG_DET_SHIFT                6	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_SIG_DET_WIDTH                1	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA            0x0020	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA_MASK       0x0020	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA_SHIFT           5	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA_WIDTH           1	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_QR                      0x0010	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_QR_MASK                 0x0010	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_QR_SHIFT                     4	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_QR_WIDTH                     1	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_ANTICLIP                0x0008	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DRC2_ANTICLIP_MASK           0x0008	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DRC2_ANTICLIP_SHIFT               3	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DRC2_ANTICLIP_WIDTH               1	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DAC2_DRC_ENA                 0x0004	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1DAC2_DRC_ENA_MASK            0x0004	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1DAC2_DRC_ENA_SHIFT                2	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1DAC2_DRC_ENA_WIDTH                1	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA                0x0002	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA_MASK           0x0002	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA_SHIFT               1	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA_WIDTH               1	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA                0x0001	/* AIF1ADC2R_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA_MASK           0x0001	/* AIF1ADC2R_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA_SHIFT               0	/* AIF1ADC2R_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA_WIDTH               1	/* AIF1ADC2R_DRC_ENA */
+
+/*
+ * R1105 (0x451) - AIF1 DRC2 (2)
+ */
+#define WM8995_AIF1DRC2_ATK_MASK                0x1E00	/* AIF1DRC2_ATK - [12:9] */
+#define WM8995_AIF1DRC2_ATK_SHIFT                    9	/* AIF1DRC2_ATK - [12:9] */
+#define WM8995_AIF1DRC2_ATK_WIDTH                    4	/* AIF1DRC2_ATK - [12:9] */
+#define WM8995_AIF1DRC2_DCY_MASK                0x01E0	/* AIF1DRC2_DCY - [8:5] */
+#define WM8995_AIF1DRC2_DCY_SHIFT                    5	/* AIF1DRC2_DCY - [8:5] */
+#define WM8995_AIF1DRC2_DCY_WIDTH                    4	/* AIF1DRC2_DCY - [8:5] */
+#define WM8995_AIF1DRC2_MINGAIN_MASK            0x001C	/* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC2_MINGAIN_SHIFT                2	/* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC2_MINGAIN_WIDTH                3	/* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC2_MAXGAIN_MASK            0x0003	/* AIF1DRC2_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC2_MAXGAIN_SHIFT                0	/* AIF1DRC2_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC2_MAXGAIN_WIDTH                2	/* AIF1DRC2_MAXGAIN - [1:0] */
+
+/*
+ * R1106 (0x452) - AIF1 DRC2 (3)
+ */
+#define WM8995_AIF1DRC2_NG_MINGAIN_MASK         0xF000	/* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC2_NG_MINGAIN_SHIFT            12	/* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC2_NG_MINGAIN_WIDTH             4	/* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC2_NG_EXP_MASK             0x0C00	/* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC2_NG_EXP_SHIFT                10	/* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC2_NG_EXP_WIDTH                 2	/* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC2_QR_THR_MASK             0x0300	/* AIF1DRC2_QR_THR - [9:8] */
+#define WM8995_AIF1DRC2_QR_THR_SHIFT                 8	/* AIF1DRC2_QR_THR - [9:8] */
+#define WM8995_AIF1DRC2_QR_THR_WIDTH                 2	/* AIF1DRC2_QR_THR - [9:8] */
+#define WM8995_AIF1DRC2_QR_DCY_MASK             0x00C0	/* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC2_QR_DCY_SHIFT                 6	/* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC2_QR_DCY_WIDTH                 2	/* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC2_HI_COMP_MASK            0x0038	/* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC2_HI_COMP_SHIFT                3	/* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC2_HI_COMP_WIDTH                3	/* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC2_LO_COMP_MASK            0x0007	/* AIF1DRC2_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC2_LO_COMP_SHIFT                0	/* AIF1DRC2_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC2_LO_COMP_WIDTH                3	/* AIF1DRC2_LO_COMP - [2:0] */
+
+/*
+ * R1107 (0x453) - AIF1 DRC2 (4)
+ */
+#define WM8995_AIF1DRC2_KNEE_IP_MASK            0x07E0	/* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC2_KNEE_IP_SHIFT                5	/* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC2_KNEE_IP_WIDTH                6	/* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC2_KNEE_OP_MASK            0x001F	/* AIF1DRC2_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE_OP_SHIFT                0	/* AIF1DRC2_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE_OP_WIDTH                5	/* AIF1DRC2_KNEE_OP - [4:0] */
+
+/*
+ * R1108 (0x454) - AIF1 DRC2 (5)
+ */
+#define WM8995_AIF1DRC2_KNEE2_IP_MASK           0x03E0	/* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC2_KNEE2_IP_SHIFT               5	/* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC2_KNEE2_IP_WIDTH               5	/* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC2_KNEE2_OP_MASK           0x001F	/* AIF1DRC2_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE2_OP_SHIFT               0	/* AIF1DRC2_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE2_OP_WIDTH               5	/* AIF1DRC2_KNEE2_OP - [4:0] */
+
+/*
+ * R1152 (0x480) - AIF1 DAC1 EQ Gains (1)
+ */
+#define WM8995_AIF1DAC1_EQ_B1_GAIN_MASK         0xF800	/* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B1_GAIN_SHIFT            11	/* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B1_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B2_GAIN_MASK         0x07C0	/* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B2_GAIN_SHIFT             6	/* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B2_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B3_GAIN_MASK         0x003E	/* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC1_EQ_B3_GAIN_SHIFT             1	/* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC1_EQ_B3_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC1_EQ_ENA                  0x0001	/* AIF1DAC1_EQ_ENA */
+#define WM8995_AIF1DAC1_EQ_ENA_MASK             0x0001	/* AIF1DAC1_EQ_ENA */
+#define WM8995_AIF1DAC1_EQ_ENA_SHIFT                 0	/* AIF1DAC1_EQ_ENA */
+#define WM8995_AIF1DAC1_EQ_ENA_WIDTH                 1	/* AIF1DAC1_EQ_ENA */
+
+/*
+ * R1153 (0x481) - AIF1 DAC1 EQ Gains (2)
+ */
+#define WM8995_AIF1DAC1_EQ_B4_GAIN_MASK         0xF800	/* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B4_GAIN_SHIFT            11	/* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B4_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B5_GAIN_MASK         0x07C0	/* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B5_GAIN_SHIFT             6	/* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B5_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1154 (0x482) - AIF1 DAC1 EQ Band 1 A
+ */
+#define WM8995_AIF1DAC1_EQ_B1_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_A_SHIFT                0	/* AIF1DAC1_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_A_WIDTH               16	/* AIF1DAC1_EQ_B1_A - [15:0] */
+
+/*
+ * R1155 (0x483) - AIF1 DAC1 EQ Band 1 B
+ */
+#define WM8995_AIF1DAC1_EQ_B1_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_B_SHIFT                0	/* AIF1DAC1_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_B_WIDTH               16	/* AIF1DAC1_EQ_B1_B - [15:0] */
+
+/*
+ * R1156 (0x484) - AIF1 DAC1 EQ Band 1 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B1_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_PG_SHIFT               0	/* AIF1DAC1_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_PG_WIDTH              16	/* AIF1DAC1_EQ_B1_PG - [15:0] */
+
+/*
+ * R1157 (0x485) - AIF1 DAC1 EQ Band 2 A
+ */
+#define WM8995_AIF1DAC1_EQ_B2_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_A_SHIFT                0	/* AIF1DAC1_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_A_WIDTH               16	/* AIF1DAC1_EQ_B2_A - [15:0] */
+
+/*
+ * R1158 (0x486) - AIF1 DAC1 EQ Band 2 B
+ */
+#define WM8995_AIF1DAC1_EQ_B2_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_B_SHIFT                0	/* AIF1DAC1_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_B_WIDTH               16	/* AIF1DAC1_EQ_B2_B - [15:0] */
+
+/*
+ * R1159 (0x487) - AIF1 DAC1 EQ Band 2 C
+ */
+#define WM8995_AIF1DAC1_EQ_B2_C_MASK            0xFFFF	/* AIF1DAC1_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_C_SHIFT                0	/* AIF1DAC1_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_C_WIDTH               16	/* AIF1DAC1_EQ_B2_C - [15:0] */
+
+/*
+ * R1160 (0x488) - AIF1 DAC1 EQ Band 2 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B2_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_PG_SHIFT               0	/* AIF1DAC1_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_PG_WIDTH              16	/* AIF1DAC1_EQ_B2_PG - [15:0] */
+
+/*
+ * R1161 (0x489) - AIF1 DAC1 EQ Band 3 A
+ */
+#define WM8995_AIF1DAC1_EQ_B3_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_A_SHIFT                0	/* AIF1DAC1_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_A_WIDTH               16	/* AIF1DAC1_EQ_B3_A - [15:0] */
+
+/*
+ * R1162 (0x48A) - AIF1 DAC1 EQ Band 3 B
+ */
+#define WM8995_AIF1DAC1_EQ_B3_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_B_SHIFT                0	/* AIF1DAC1_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_B_WIDTH               16	/* AIF1DAC1_EQ_B3_B - [15:0] */
+
+/*
+ * R1163 (0x48B) - AIF1 DAC1 EQ Band 3 C
+ */
+#define WM8995_AIF1DAC1_EQ_B3_C_MASK            0xFFFF	/* AIF1DAC1_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_C_SHIFT                0	/* AIF1DAC1_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_C_WIDTH               16	/* AIF1DAC1_EQ_B3_C - [15:0] */
+
+/*
+ * R1164 (0x48C) - AIF1 DAC1 EQ Band 3 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B3_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_PG_SHIFT               0	/* AIF1DAC1_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_PG_WIDTH              16	/* AIF1DAC1_EQ_B3_PG - [15:0] */
+
+/*
+ * R1165 (0x48D) - AIF1 DAC1 EQ Band 4 A
+ */
+#define WM8995_AIF1DAC1_EQ_B4_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_A_SHIFT                0	/* AIF1DAC1_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_A_WIDTH               16	/* AIF1DAC1_EQ_B4_A - [15:0] */
+
+/*
+ * R1166 (0x48E) - AIF1 DAC1 EQ Band 4 B
+ */
+#define WM8995_AIF1DAC1_EQ_B4_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_B_SHIFT                0	/* AIF1DAC1_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_B_WIDTH               16	/* AIF1DAC1_EQ_B4_B - [15:0] */
+
+/*
+ * R1167 (0x48F) - AIF1 DAC1 EQ Band 4 C
+ */
+#define WM8995_AIF1DAC1_EQ_B4_C_MASK            0xFFFF	/* AIF1DAC1_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_C_SHIFT                0	/* AIF1DAC1_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_C_WIDTH               16	/* AIF1DAC1_EQ_B4_C - [15:0] */
+
+/*
+ * R1168 (0x490) - AIF1 DAC1 EQ Band 4 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B4_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_PG_SHIFT               0	/* AIF1DAC1_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_PG_WIDTH              16	/* AIF1DAC1_EQ_B4_PG - [15:0] */
+
+/*
+ * R1169 (0x491) - AIF1 DAC1 EQ Band 5 A
+ */
+#define WM8995_AIF1DAC1_EQ_B5_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_A_SHIFT                0	/* AIF1DAC1_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_A_WIDTH               16	/* AIF1DAC1_EQ_B5_A - [15:0] */
+
+/*
+ * R1170 (0x492) - AIF1 DAC1 EQ Band 5 B
+ */
+#define WM8995_AIF1DAC1_EQ_B5_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_B_SHIFT                0	/* AIF1DAC1_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_B_WIDTH               16	/* AIF1DAC1_EQ_B5_B - [15:0] */
+
+/*
+ * R1171 (0x493) - AIF1 DAC1 EQ Band 5 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B5_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_PG_SHIFT               0	/* AIF1DAC1_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_PG_WIDTH              16	/* AIF1DAC1_EQ_B5_PG - [15:0] */
+
+/*
+ * R1184 (0x4A0) - AIF1 DAC2 EQ Gains (1)
+ */
+#define WM8995_AIF1DAC2_EQ_B1_GAIN_MASK         0xF800	/* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B1_GAIN_SHIFT            11	/* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B1_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B2_GAIN_MASK         0x07C0	/* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B2_GAIN_SHIFT             6	/* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B2_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B3_GAIN_MASK         0x003E	/* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC2_EQ_B3_GAIN_SHIFT             1	/* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC2_EQ_B3_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC2_EQ_ENA                  0x0001	/* AIF1DAC2_EQ_ENA */
+#define WM8995_AIF1DAC2_EQ_ENA_MASK             0x0001	/* AIF1DAC2_EQ_ENA */
+#define WM8995_AIF1DAC2_EQ_ENA_SHIFT                 0	/* AIF1DAC2_EQ_ENA */
+#define WM8995_AIF1DAC2_EQ_ENA_WIDTH                 1	/* AIF1DAC2_EQ_ENA */
+
+/*
+ * R1185 (0x4A1) - AIF1 DAC2 EQ Gains (2)
+ */
+#define WM8995_AIF1DAC2_EQ_B4_GAIN_MASK         0xF800	/* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B4_GAIN_SHIFT            11	/* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B4_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B5_GAIN_MASK         0x07C0	/* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B5_GAIN_SHIFT             6	/* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B5_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1186 (0x4A2) - AIF1 DAC2 EQ Band 1 A
+ */
+#define WM8995_AIF1DAC2_EQ_B1_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_A_SHIFT                0	/* AIF1DAC2_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_A_WIDTH               16	/* AIF1DAC2_EQ_B1_A - [15:0] */
+
+/*
+ * R1187 (0x4A3) - AIF1 DAC2 EQ Band 1 B
+ */
+#define WM8995_AIF1DAC2_EQ_B1_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_B_SHIFT                0	/* AIF1DAC2_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_B_WIDTH               16	/* AIF1DAC2_EQ_B1_B - [15:0] */
+
+/*
+ * R1188 (0x4A4) - AIF1 DAC2 EQ Band 1 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B1_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_PG_SHIFT               0	/* AIF1DAC2_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_PG_WIDTH              16	/* AIF1DAC2_EQ_B1_PG - [15:0] */
+
+/*
+ * R1189 (0x4A5) - AIF1 DAC2 EQ Band 2 A
+ */
+#define WM8995_AIF1DAC2_EQ_B2_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_A_SHIFT                0	/* AIF1DAC2_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_A_WIDTH               16	/* AIF1DAC2_EQ_B2_A - [15:0] */
+
+/*
+ * R1190 (0x4A6) - AIF1 DAC2 EQ Band 2 B
+ */
+#define WM8995_AIF1DAC2_EQ_B2_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_B_SHIFT                0	/* AIF1DAC2_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_B_WIDTH               16	/* AIF1DAC2_EQ_B2_B - [15:0] */
+
+/*
+ * R1191 (0x4A7) - AIF1 DAC2 EQ Band 2 C
+ */
+#define WM8995_AIF1DAC2_EQ_B2_C_MASK            0xFFFF	/* AIF1DAC2_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_C_SHIFT                0	/* AIF1DAC2_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_C_WIDTH               16	/* AIF1DAC2_EQ_B2_C - [15:0] */
+
+/*
+ * R1192 (0x4A8) - AIF1 DAC2 EQ Band 2 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B2_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_PG_SHIFT               0	/* AIF1DAC2_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_PG_WIDTH              16	/* AIF1DAC2_EQ_B2_PG - [15:0] */
+
+/*
+ * R1193 (0x4A9) - AIF1 DAC2 EQ Band 3 A
+ */
+#define WM8995_AIF1DAC2_EQ_B3_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_A_SHIFT                0	/* AIF1DAC2_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_A_WIDTH               16	/* AIF1DAC2_EQ_B3_A - [15:0] */
+
+/*
+ * R1194 (0x4AA) - AIF1 DAC2 EQ Band 3 B
+ */
+#define WM8995_AIF1DAC2_EQ_B3_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_B_SHIFT                0	/* AIF1DAC2_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_B_WIDTH               16	/* AIF1DAC2_EQ_B3_B - [15:0] */
+
+/*
+ * R1195 (0x4AB) - AIF1 DAC2 EQ Band 3 C
+ */
+#define WM8995_AIF1DAC2_EQ_B3_C_MASK            0xFFFF	/* AIF1DAC2_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_C_SHIFT                0	/* AIF1DAC2_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_C_WIDTH               16	/* AIF1DAC2_EQ_B3_C - [15:0] */
+
+/*
+ * R1196 (0x4AC) - AIF1 DAC2 EQ Band 3 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B3_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_PG_SHIFT               0	/* AIF1DAC2_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_PG_WIDTH              16	/* AIF1DAC2_EQ_B3_PG - [15:0] */
+
+/*
+ * R1197 (0x4AD) - AIF1 DAC2 EQ Band 4 A
+ */
+#define WM8995_AIF1DAC2_EQ_B4_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_A_SHIFT                0	/* AIF1DAC2_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_A_WIDTH               16	/* AIF1DAC2_EQ_B4_A - [15:0] */
+
+/*
+ * R1198 (0x4AE) - AIF1 DAC2 EQ Band 4 B
+ */
+#define WM8995_AIF1DAC2_EQ_B4_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_B_SHIFT                0	/* AIF1DAC2_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_B_WIDTH               16	/* AIF1DAC2_EQ_B4_B - [15:0] */
+
+/*
+ * R1199 (0x4AF) - AIF1 DAC2 EQ Band 4 C
+ */
+#define WM8995_AIF1DAC2_EQ_B4_C_MASK            0xFFFF	/* AIF1DAC2_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_C_SHIFT                0	/* AIF1DAC2_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_C_WIDTH               16	/* AIF1DAC2_EQ_B4_C - [15:0] */
+
+/*
+ * R1200 (0x4B0) - AIF1 DAC2 EQ Band 4 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B4_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_PG_SHIFT               0	/* AIF1DAC2_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_PG_WIDTH              16	/* AIF1DAC2_EQ_B4_PG - [15:0] */
+
+/*
+ * R1201 (0x4B1) - AIF1 DAC2 EQ Band 5 A
+ */
+#define WM8995_AIF1DAC2_EQ_B5_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_A_SHIFT                0	/* AIF1DAC2_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_A_WIDTH               16	/* AIF1DAC2_EQ_B5_A - [15:0] */
+
+/*
+ * R1202 (0x4B2) - AIF1 DAC2 EQ Band 5 B
+ */
+#define WM8995_AIF1DAC2_EQ_B5_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_B_SHIFT                0	/* AIF1DAC2_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_B_WIDTH               16	/* AIF1DAC2_EQ_B5_B - [15:0] */
+
+/*
+ * R1203 (0x4B3) - AIF1 DAC2 EQ Band 5 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B5_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_PG_SHIFT               0	/* AIF1DAC2_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_PG_WIDTH              16	/* AIF1DAC2_EQ_B5_PG - [15:0] */
+
+/*
+ * R1280 (0x500) - AIF2 ADC Left Volume
+ */
+#define WM8995_AIF2ADC_VU                       0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_MASK                  0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_SHIFT                      8	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_WIDTH                      1	/* AIF2ADC_VU */
+#define WM8995_AIF2ADCL_VOL_MASK                0x00FF	/* AIF2ADCL_VOL - [7:0] */
+#define WM8995_AIF2ADCL_VOL_SHIFT                    0	/* AIF2ADCL_VOL - [7:0] */
+#define WM8995_AIF2ADCL_VOL_WIDTH                    8	/* AIF2ADCL_VOL - [7:0] */
+
+/*
+ * R1281 (0x501) - AIF2 ADC Right Volume
+ */
+#define WM8995_AIF2ADC_VU                       0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_MASK                  0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_SHIFT                      8	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_WIDTH                      1	/* AIF2ADC_VU */
+#define WM8995_AIF2ADCR_VOL_MASK                0x00FF	/* AIF2ADCR_VOL - [7:0] */
+#define WM8995_AIF2ADCR_VOL_SHIFT                    0	/* AIF2ADCR_VOL - [7:0] */
+#define WM8995_AIF2ADCR_VOL_WIDTH                    8	/* AIF2ADCR_VOL - [7:0] */
+
+/*
+ * R1282 (0x502) - AIF2 DAC Left Volume
+ */
+#define WM8995_AIF2DAC_VU                       0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_MASK                  0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_SHIFT                      8	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_WIDTH                      1	/* AIF2DAC_VU */
+#define WM8995_AIF2DACL_VOL_MASK                0x00FF	/* AIF2DACL_VOL - [7:0] */
+#define WM8995_AIF2DACL_VOL_SHIFT                    0	/* AIF2DACL_VOL - [7:0] */
+#define WM8995_AIF2DACL_VOL_WIDTH                    8	/* AIF2DACL_VOL - [7:0] */
+
+/*
+ * R1283 (0x503) - AIF2 DAC Right Volume
+ */
+#define WM8995_AIF2DAC_VU                       0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_MASK                  0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_SHIFT                      8	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_WIDTH                      1	/* AIF2DAC_VU */
+#define WM8995_AIF2DACR_VOL_MASK                0x00FF	/* AIF2DACR_VOL - [7:0] */
+#define WM8995_AIF2DACR_VOL_SHIFT                    0	/* AIF2DACR_VOL - [7:0] */
+#define WM8995_AIF2DACR_VOL_WIDTH                    8	/* AIF2DACR_VOL - [7:0] */
+
+/*
+ * R1296 (0x510) - AIF2 ADC Filters
+ */
+#define WM8995_AIF2ADC_4FS                      0x8000	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADC_4FS_MASK                 0x8000	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADC_4FS_SHIFT                    15	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADC_4FS_WIDTH                     1	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADCL_HPF                     0x1000	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCL_HPF_MASK                0x1000	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCL_HPF_SHIFT                   12	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCL_HPF_WIDTH                    1	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCR_HPF                     0x0800	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADCR_HPF_MASK                0x0800	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADCR_HPF_SHIFT                   11	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADCR_HPF_WIDTH                    1	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADC_HPF_MODE                 0x0008	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_MODE_MASK            0x0008	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_MODE_SHIFT                3	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_MODE_WIDTH                1	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_CUT_MASK             0x0007	/* AIF2ADC_HPF_CUT - [2:0] */
+#define WM8995_AIF2ADC_HPF_CUT_SHIFT                 0	/* AIF2ADC_HPF_CUT - [2:0] */
+#define WM8995_AIF2ADC_HPF_CUT_WIDTH                 3	/* AIF2ADC_HPF_CUT - [2:0] */
+
+/*
+ * R1312 (0x520) - AIF2 DAC Filters (1)
+ */
+#define WM8995_AIF2DAC_MUTE                     0x0200	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MUTE_MASK                0x0200	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MUTE_SHIFT                    9	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MUTE_WIDTH                    1	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MONO                     0x0080	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MONO_MASK                0x0080	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MONO_SHIFT                    7	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MONO_WIDTH                    1	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MUTERATE                 0x0020	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_MUTERATE_MASK            0x0020	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_MUTERATE_SHIFT                5	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_MUTERATE_WIDTH                1	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_UNMUTE_RAMP              0x0010	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_UNMUTE_RAMP_MASK         0x0010	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_UNMUTE_RAMP_SHIFT             4	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_UNMUTE_RAMP_WIDTH             1	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_DEEMP_MASK               0x0006	/* AIF2DAC_DEEMP - [2:1] */
+#define WM8995_AIF2DAC_DEEMP_SHIFT                   1	/* AIF2DAC_DEEMP - [2:1] */
+#define WM8995_AIF2DAC_DEEMP_WIDTH                   2	/* AIF2DAC_DEEMP - [2:1] */
+
+/*
+ * R1313 (0x521) - AIF2 DAC Filters (2)
+ */
+#define WM8995_AIF2DAC_3D_GAIN_MASK             0x3E00	/* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8995_AIF2DAC_3D_GAIN_SHIFT                 9	/* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8995_AIF2DAC_3D_GAIN_WIDTH                 5	/* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8995_AIF2DAC_3D_ENA                   0x0100	/* AIF2DAC_3D_ENA */
+#define WM8995_AIF2DAC_3D_ENA_MASK              0x0100	/* AIF2DAC_3D_ENA */
+#define WM8995_AIF2DAC_3D_ENA_SHIFT                  8	/* AIF2DAC_3D_ENA */
+#define WM8995_AIF2DAC_3D_ENA_WIDTH                  1	/* AIF2DAC_3D_ENA */
+
+/*
+ * R1344 (0x540) - AIF2 DRC (1)
+ */
+#define WM8995_AIF2DRC_SIG_DET_RMS_MASK         0xF800	/* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF2DRC_SIG_DET_RMS_SHIFT            11	/* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF2DRC_SIG_DET_RMS_WIDTH             5	/* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF2DRC_SIG_DET_PK_MASK          0x0600	/* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8995_AIF2DRC_SIG_DET_PK_SHIFT              9	/* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8995_AIF2DRC_SIG_DET_PK_WIDTH              2	/* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8995_AIF2DRC_NG_ENA                   0x0100	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_NG_ENA_MASK              0x0100	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_NG_ENA_SHIFT                  8	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_NG_ENA_WIDTH                  1	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_SIG_DET_MODE             0x0080	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET_MODE_MASK        0x0080	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET_MODE_SHIFT            7	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET_MODE_WIDTH            1	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET                  0x0040	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_SIG_DET_MASK             0x0040	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_SIG_DET_SHIFT                 6	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_SIG_DET_WIDTH                 1	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA             0x0020	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA_MASK        0x0020	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA_SHIFT            5	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA_WIDTH            1	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_QR                       0x0010	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_QR_MASK                  0x0010	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_QR_SHIFT                      4	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_QR_WIDTH                      1	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_ANTICLIP                 0x0008	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DRC_ANTICLIP_MASK            0x0008	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DRC_ANTICLIP_SHIFT                3	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DRC_ANTICLIP_WIDTH                1	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DAC_DRC_ENA                  0x0004	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2DAC_DRC_ENA_MASK             0x0004	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2DAC_DRC_ENA_SHIFT                 2	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2DAC_DRC_ENA_WIDTH                 1	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA                 0x0002	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA_MASK            0x0002	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA_SHIFT                1	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA_WIDTH                1	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA                 0x0001	/* AIF2ADCR_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA_MASK            0x0001	/* AIF2ADCR_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA_SHIFT                0	/* AIF2ADCR_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA_WIDTH                1	/* AIF2ADCR_DRC_ENA */
+
+/*
+ * R1345 (0x541) - AIF2 DRC (2)
+ */
+#define WM8995_AIF2DRC_ATK_MASK                 0x1E00	/* AIF2DRC_ATK - [12:9] */
+#define WM8995_AIF2DRC_ATK_SHIFT                     9	/* AIF2DRC_ATK - [12:9] */
+#define WM8995_AIF2DRC_ATK_WIDTH                     4	/* AIF2DRC_ATK - [12:9] */
+#define WM8995_AIF2DRC_DCY_MASK                 0x01E0	/* AIF2DRC_DCY - [8:5] */
+#define WM8995_AIF2DRC_DCY_SHIFT                     5	/* AIF2DRC_DCY - [8:5] */
+#define WM8995_AIF2DRC_DCY_WIDTH                     4	/* AIF2DRC_DCY - [8:5] */
+#define WM8995_AIF2DRC_MINGAIN_MASK             0x001C	/* AIF2DRC_MINGAIN - [4:2] */
+#define WM8995_AIF2DRC_MINGAIN_SHIFT                 2	/* AIF2DRC_MINGAIN - [4:2] */
+#define WM8995_AIF2DRC_MINGAIN_WIDTH                 3	/* AIF2DRC_MINGAIN - [4:2] */
+#define WM8995_AIF2DRC_MAXGAIN_MASK             0x0003	/* AIF2DRC_MAXGAIN - [1:0] */
+#define WM8995_AIF2DRC_MAXGAIN_SHIFT                 0	/* AIF2DRC_MAXGAIN - [1:0] */
+#define WM8995_AIF2DRC_MAXGAIN_WIDTH                 2	/* AIF2DRC_MAXGAIN - [1:0] */
+
+/*
+ * R1346 (0x542) - AIF2 DRC (3)
+ */
+#define WM8995_AIF2DRC_NG_MINGAIN_MASK          0xF000	/* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8995_AIF2DRC_NG_MINGAIN_SHIFT             12	/* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8995_AIF2DRC_NG_MINGAIN_WIDTH              4	/* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8995_AIF2DRC_NG_EXP_MASK              0x0C00	/* AIF2DRC_NG_EXP - [11:10] */
+#define WM8995_AIF2DRC_NG_EXP_SHIFT                 10	/* AIF2DRC_NG_EXP - [11:10] */
+#define WM8995_AIF2DRC_NG_EXP_WIDTH                  2	/* AIF2DRC_NG_EXP - [11:10] */
+#define WM8995_AIF2DRC_QR_THR_MASK              0x0300	/* AIF2DRC_QR_THR - [9:8] */
+#define WM8995_AIF2DRC_QR_THR_SHIFT                  8	/* AIF2DRC_QR_THR - [9:8] */
+#define WM8995_AIF2DRC_QR_THR_WIDTH                  2	/* AIF2DRC_QR_THR - [9:8] */
+#define WM8995_AIF2DRC_QR_DCY_MASK              0x00C0	/* AIF2DRC_QR_DCY - [7:6] */
+#define WM8995_AIF2DRC_QR_DCY_SHIFT                  6	/* AIF2DRC_QR_DCY - [7:6] */
+#define WM8995_AIF2DRC_QR_DCY_WIDTH                  2	/* AIF2DRC_QR_DCY - [7:6] */
+#define WM8995_AIF2DRC_HI_COMP_MASK             0x0038	/* AIF2DRC_HI_COMP - [5:3] */
+#define WM8995_AIF2DRC_HI_COMP_SHIFT                 3	/* AIF2DRC_HI_COMP - [5:3] */
+#define WM8995_AIF2DRC_HI_COMP_WIDTH                 3	/* AIF2DRC_HI_COMP - [5:3] */
+#define WM8995_AIF2DRC_LO_COMP_MASK             0x0007	/* AIF2DRC_LO_COMP - [2:0] */
+#define WM8995_AIF2DRC_LO_COMP_SHIFT                 0	/* AIF2DRC_LO_COMP - [2:0] */
+#define WM8995_AIF2DRC_LO_COMP_WIDTH                 3	/* AIF2DRC_LO_COMP - [2:0] */
+
+/*
+ * R1347 (0x543) - AIF2 DRC (4)
+ */
+#define WM8995_AIF2DRC_KNEE_IP_MASK             0x07E0	/* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8995_AIF2DRC_KNEE_IP_SHIFT                 5	/* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8995_AIF2DRC_KNEE_IP_WIDTH                 6	/* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8995_AIF2DRC_KNEE_OP_MASK             0x001F	/* AIF2DRC_KNEE_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE_OP_SHIFT                 0	/* AIF2DRC_KNEE_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE_OP_WIDTH                 5	/* AIF2DRC_KNEE_OP - [4:0] */
+
+/*
+ * R1348 (0x544) - AIF2 DRC (5)
+ */
+#define WM8995_AIF2DRC_KNEE2_IP_MASK            0x03E0	/* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8995_AIF2DRC_KNEE2_IP_SHIFT                5	/* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8995_AIF2DRC_KNEE2_IP_WIDTH                5	/* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8995_AIF2DRC_KNEE2_OP_MASK            0x001F	/* AIF2DRC_KNEE2_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE2_OP_SHIFT                0	/* AIF2DRC_KNEE2_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE2_OP_WIDTH                5	/* AIF2DRC_KNEE2_OP - [4:0] */
+
+/*
+ * R1408 (0x580) - AIF2 EQ Gains (1)
+ */
+#define WM8995_AIF2DAC_EQ_B1_GAIN_MASK          0xF800	/* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B1_GAIN_SHIFT             11	/* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B1_GAIN_WIDTH              5	/* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B2_GAIN_MASK          0x07C0	/* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B2_GAIN_SHIFT              6	/* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B2_GAIN_WIDTH              5	/* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B3_GAIN_MASK          0x003E	/* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF2DAC_EQ_B3_GAIN_SHIFT              1	/* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF2DAC_EQ_B3_GAIN_WIDTH              5	/* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF2DAC_EQ_ENA                   0x0001	/* AIF2DAC_EQ_ENA */
+#define WM8995_AIF2DAC_EQ_ENA_MASK              0x0001	/* AIF2DAC_EQ_ENA */
+#define WM8995_AIF2DAC_EQ_ENA_SHIFT                  0	/* AIF2DAC_EQ_ENA */
+#define WM8995_AIF2DAC_EQ_ENA_WIDTH                  1	/* AIF2DAC_EQ_ENA */
+
+/*
+ * R1409 (0x581) - AIF2 EQ Gains (2)
+ */
+#define WM8995_AIF2DAC_EQ_B4_GAIN_MASK          0xF800	/* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B4_GAIN_SHIFT             11	/* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B4_GAIN_WIDTH              5	/* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B5_GAIN_MASK          0x07C0	/* AIF2DAC_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B5_GAIN_SHIFT              6	/* AIF2DAC_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B5_GAIN_WIDTH              5	/* AIF2DAC_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1410 (0x582) - AIF2 EQ Band 1 A
+ */
+#define WM8995_AIF2DAC_EQ_B1_A_MASK             0xFFFF	/* AIF2DAC_EQ_B1_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_A_SHIFT                 0	/* AIF2DAC_EQ_B1_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_A_WIDTH                16	/* AIF2DAC_EQ_B1_A - [15:0] */
+
+/*
+ * R1411 (0x583) - AIF2 EQ Band 1 B
+ */
+#define WM8995_AIF2DAC_EQ_B1_B_MASK             0xFFFF	/* AIF2DAC_EQ_B1_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_B_SHIFT                 0	/* AIF2DAC_EQ_B1_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_B_WIDTH                16	/* AIF2DAC_EQ_B1_B - [15:0] */
+
+/*
+ * R1412 (0x584) - AIF2 EQ Band 1 PG
+ */
+#define WM8995_AIF2DAC_EQ_B1_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B1_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_PG_SHIFT                0	/* AIF2DAC_EQ_B1_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_PG_WIDTH               16	/* AIF2DAC_EQ_B1_PG - [15:0] */
+
+/*
+ * R1413 (0x585) - AIF2 EQ Band 2 A
+ */
+#define WM8995_AIF2DAC_EQ_B2_A_MASK             0xFFFF	/* AIF2DAC_EQ_B2_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_A_SHIFT                 0	/* AIF2DAC_EQ_B2_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_A_WIDTH                16	/* AIF2DAC_EQ_B2_A - [15:0] */
+
+/*
+ * R1414 (0x586) - AIF2 EQ Band 2 B
+ */
+#define WM8995_AIF2DAC_EQ_B2_B_MASK             0xFFFF	/* AIF2DAC_EQ_B2_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_B_SHIFT                 0	/* AIF2DAC_EQ_B2_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_B_WIDTH                16	/* AIF2DAC_EQ_B2_B - [15:0] */
+
+/*
+ * R1415 (0x587) - AIF2 EQ Band 2 C
+ */
+#define WM8995_AIF2DAC_EQ_B2_C_MASK             0xFFFF	/* AIF2DAC_EQ_B2_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_C_SHIFT                 0	/* AIF2DAC_EQ_B2_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_C_WIDTH                16	/* AIF2DAC_EQ_B2_C - [15:0] */
+
+/*
+ * R1416 (0x588) - AIF2 EQ Band 2 PG
+ */
+#define WM8995_AIF2DAC_EQ_B2_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B2_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_PG_SHIFT                0	/* AIF2DAC_EQ_B2_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_PG_WIDTH               16	/* AIF2DAC_EQ_B2_PG - [15:0] */
+
+/*
+ * R1417 (0x589) - AIF2 EQ Band 3 A
+ */
+#define WM8995_AIF2DAC_EQ_B3_A_MASK             0xFFFF	/* AIF2DAC_EQ_B3_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_A_SHIFT                 0	/* AIF2DAC_EQ_B3_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_A_WIDTH                16	/* AIF2DAC_EQ_B3_A - [15:0] */
+
+/*
+ * R1418 (0x58A) - AIF2 EQ Band 3 B
+ */
+#define WM8995_AIF2DAC_EQ_B3_B_MASK             0xFFFF	/* AIF2DAC_EQ_B3_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_B_SHIFT                 0	/* AIF2DAC_EQ_B3_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_B_WIDTH                16	/* AIF2DAC_EQ_B3_B - [15:0] */
+
+/*
+ * R1419 (0x58B) - AIF2 EQ Band 3 C
+ */
+#define WM8995_AIF2DAC_EQ_B3_C_MASK             0xFFFF	/* AIF2DAC_EQ_B3_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_C_SHIFT                 0	/* AIF2DAC_EQ_B3_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_C_WIDTH                16	/* AIF2DAC_EQ_B3_C - [15:0] */
+
+/*
+ * R1420 (0x58C) - AIF2 EQ Band 3 PG
+ */
+#define WM8995_AIF2DAC_EQ_B3_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B3_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_PG_SHIFT                0	/* AIF2DAC_EQ_B3_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_PG_WIDTH               16	/* AIF2DAC_EQ_B3_PG - [15:0] */
+
+/*
+ * R1421 (0x58D) - AIF2 EQ Band 4 A
+ */
+#define WM8995_AIF2DAC_EQ_B4_A_MASK             0xFFFF	/* AIF2DAC_EQ_B4_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_A_SHIFT                 0	/* AIF2DAC_EQ_B4_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_A_WIDTH                16	/* AIF2DAC_EQ_B4_A - [15:0] */
+
+/*
+ * R1422 (0x58E) - AIF2 EQ Band 4 B
+ */
+#define WM8995_AIF2DAC_EQ_B4_B_MASK             0xFFFF	/* AIF2DAC_EQ_B4_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_B_SHIFT                 0	/* AIF2DAC_EQ_B4_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_B_WIDTH                16	/* AIF2DAC_EQ_B4_B - [15:0] */
+
+/*
+ * R1423 (0x58F) - AIF2 EQ Band 4 C
+ */
+#define WM8995_AIF2DAC_EQ_B4_C_MASK             0xFFFF	/* AIF2DAC_EQ_B4_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_C_SHIFT                 0	/* AIF2DAC_EQ_B4_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_C_WIDTH                16	/* AIF2DAC_EQ_B4_C - [15:0] */
+
+/*
+ * R1424 (0x590) - AIF2 EQ Band 4 PG
+ */
+#define WM8995_AIF2DAC_EQ_B4_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B4_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_PG_SHIFT                0	/* AIF2DAC_EQ_B4_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_PG_WIDTH               16	/* AIF2DAC_EQ_B4_PG - [15:0] */
+
+/*
+ * R1425 (0x591) - AIF2 EQ Band 5 A
+ */
+#define WM8995_AIF2DAC_EQ_B5_A_MASK             0xFFFF	/* AIF2DAC_EQ_B5_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_A_SHIFT                 0	/* AIF2DAC_EQ_B5_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_A_WIDTH                16	/* AIF2DAC_EQ_B5_A - [15:0] */
+
+/*
+ * R1426 (0x592) - AIF2 EQ Band 5 B
+ */
+#define WM8995_AIF2DAC_EQ_B5_B_MASK             0xFFFF	/* AIF2DAC_EQ_B5_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_B_SHIFT                 0	/* AIF2DAC_EQ_B5_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_B_WIDTH                16	/* AIF2DAC_EQ_B5_B - [15:0] */
+
+/*
+ * R1427 (0x593) - AIF2 EQ Band 5 PG
+ */
+#define WM8995_AIF2DAC_EQ_B5_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B5_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_PG_SHIFT                0	/* AIF2DAC_EQ_B5_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_PG_WIDTH               16	/* AIF2DAC_EQ_B5_PG - [15:0] */
+
+/*
+ * R1536 (0x600) - DAC1 Mixer Volumes
+ */
+#define WM8995_ADCR_DAC1_VOL_MASK               0x03E0	/* ADCR_DAC1_VOL - [9:5] */
+#define WM8995_ADCR_DAC1_VOL_SHIFT                   5	/* ADCR_DAC1_VOL - [9:5] */
+#define WM8995_ADCR_DAC1_VOL_WIDTH                   5	/* ADCR_DAC1_VOL - [9:5] */
+#define WM8995_ADCL_DAC1_VOL_MASK               0x001F	/* ADCL_DAC1_VOL - [4:0] */
+#define WM8995_ADCL_DAC1_VOL_SHIFT                   0	/* ADCL_DAC1_VOL - [4:0] */
+#define WM8995_ADCL_DAC1_VOL_WIDTH                   5	/* ADCL_DAC1_VOL - [4:0] */
+
+/*
+ * R1537 (0x601) - DAC1 Left Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC1L                    0x0020	/* ADCR_TO_DAC1L */
+#define WM8995_ADCR_TO_DAC1L_MASK               0x0020	/* ADCR_TO_DAC1L */
+#define WM8995_ADCR_TO_DAC1L_SHIFT                   5	/* ADCR_TO_DAC1L */
+#define WM8995_ADCR_TO_DAC1L_WIDTH                   1	/* ADCR_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L                    0x0010	/* ADCL_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L_MASK               0x0010	/* ADCL_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L_SHIFT                   4	/* ADCL_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L_WIDTH                   1	/* ADCL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L                0x0004	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L_MASK           0x0004	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L_SHIFT               2	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L_WIDTH               1	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L               0x0002	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L_MASK          0x0002	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L_SHIFT              1	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L_WIDTH              1	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L               0x0001	/* AIF1DAC1L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L_MASK          0x0001	/* AIF1DAC1L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L_SHIFT              0	/* AIF1DAC1L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L_WIDTH              1	/* AIF1DAC1L_TO_DAC1L */
+
+/*
+ * R1538 (0x602) - DAC1 Right Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC1R                    0x0020	/* ADCR_TO_DAC1R */
+#define WM8995_ADCR_TO_DAC1R_MASK               0x0020	/* ADCR_TO_DAC1R */
+#define WM8995_ADCR_TO_DAC1R_SHIFT                   5	/* ADCR_TO_DAC1R */
+#define WM8995_ADCR_TO_DAC1R_WIDTH                   1	/* ADCR_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R                    0x0010	/* ADCL_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R_MASK               0x0010	/* ADCL_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R_SHIFT                   4	/* ADCL_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R_WIDTH                   1	/* ADCL_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R                0x0004	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R_MASK           0x0004	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R_SHIFT               2	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R_WIDTH               1	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R               0x0002	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R_MASK          0x0002	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R_SHIFT              1	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R_WIDTH              1	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R               0x0001	/* AIF1DAC1R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R_MASK          0x0001	/* AIF1DAC1R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R_SHIFT              0	/* AIF1DAC1R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R_WIDTH              1	/* AIF1DAC1R_TO_DAC1R */
+
+/*
+ * R1539 (0x603) - DAC2 Mixer Volumes
+ */
+#define WM8995_ADCR_DAC2_VOL_MASK               0x03E0	/* ADCR_DAC2_VOL - [9:5] */
+#define WM8995_ADCR_DAC2_VOL_SHIFT                   5	/* ADCR_DAC2_VOL - [9:5] */
+#define WM8995_ADCR_DAC2_VOL_WIDTH                   5	/* ADCR_DAC2_VOL - [9:5] */
+#define WM8995_ADCL_DAC2_VOL_MASK               0x001F	/* ADCL_DAC2_VOL - [4:0] */
+#define WM8995_ADCL_DAC2_VOL_SHIFT                   0	/* ADCL_DAC2_VOL - [4:0] */
+#define WM8995_ADCL_DAC2_VOL_WIDTH                   5	/* ADCL_DAC2_VOL - [4:0] */
+
+/*
+ * R1540 (0x604) - DAC2 Left Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC2L                    0x0020	/* ADCR_TO_DAC2L */
+#define WM8995_ADCR_TO_DAC2L_MASK               0x0020	/* ADCR_TO_DAC2L */
+#define WM8995_ADCR_TO_DAC2L_SHIFT                   5	/* ADCR_TO_DAC2L */
+#define WM8995_ADCR_TO_DAC2L_WIDTH                   1	/* ADCR_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L                    0x0010	/* ADCL_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L_MASK               0x0010	/* ADCL_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L_SHIFT                   4	/* ADCL_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L_WIDTH                   1	/* ADCL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L                0x0004	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L_MASK           0x0004	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L_SHIFT               2	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L_WIDTH               1	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L               0x0002	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L_MASK          0x0002	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L_SHIFT              1	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L_WIDTH              1	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L               0x0001	/* AIF1DAC1L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L_MASK          0x0001	/* AIF1DAC1L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L_SHIFT              0	/* AIF1DAC1L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L_WIDTH              1	/* AIF1DAC1L_TO_DAC2L */
+
+/*
+ * R1541 (0x605) - DAC2 Right Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC2R                    0x0020	/* ADCR_TO_DAC2R */
+#define WM8995_ADCR_TO_DAC2R_MASK               0x0020	/* ADCR_TO_DAC2R */
+#define WM8995_ADCR_TO_DAC2R_SHIFT                   5	/* ADCR_TO_DAC2R */
+#define WM8995_ADCR_TO_DAC2R_WIDTH                   1	/* ADCR_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R                    0x0010	/* ADCL_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R_MASK               0x0010	/* ADCL_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R_SHIFT                   4	/* ADCL_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R_WIDTH                   1	/* ADCL_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R                0x0004	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R_MASK           0x0004	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R_SHIFT               2	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R_WIDTH               1	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R               0x0002	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R_MASK          0x0002	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R_SHIFT              1	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R_WIDTH              1	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R               0x0001	/* AIF1DAC1R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R_MASK          0x0001	/* AIF1DAC1R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R_SHIFT              0	/* AIF1DAC1R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R_WIDTH              1	/* AIF1DAC1R_TO_DAC2R */
+
+/*
+ * R1542 (0x606) - AIF1 ADC1 Left Mixer Routing
+ */
+#define WM8995_ADC1L_TO_AIF1ADC1L               0x0002	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_ADC1L_TO_AIF1ADC1L_MASK          0x0002	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_ADC1L_TO_AIF1ADC1L_SHIFT              1	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_ADC1L_TO_AIF1ADC1L_WIDTH              1	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L            0x0001	/* AIF2DACL_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L_MASK       0x0001	/* AIF2DACL_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L_SHIFT           0	/* AIF2DACL_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L_WIDTH           1	/* AIF2DACL_TO_AIF1ADC1L */
+
+/*
+ * R1543 (0x607) - AIF1 ADC1 Right Mixer Routing
+ */
+#define WM8995_ADC1R_TO_AIF1ADC1R               0x0002	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_ADC1R_TO_AIF1ADC1R_MASK          0x0002	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_ADC1R_TO_AIF1ADC1R_SHIFT              1	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_ADC1R_TO_AIF1ADC1R_WIDTH              1	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R            0x0001	/* AIF2DACR_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R_MASK       0x0001	/* AIF2DACR_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R_SHIFT           0	/* AIF2DACR_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R_WIDTH           1	/* AIF2DACR_TO_AIF1ADC1R */
+
+/*
+ * R1544 (0x608) - AIF1 ADC2 Left Mixer Routing
+ */
+#define WM8995_ADC2L_TO_AIF1ADC2L               0x0002	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_ADC2L_TO_AIF1ADC2L_MASK          0x0002	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_ADC2L_TO_AIF1ADC2L_SHIFT              1	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_ADC2L_TO_AIF1ADC2L_WIDTH              1	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L            0x0001	/* AIF2DACL_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L_MASK       0x0001	/* AIF2DACL_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L_SHIFT           0	/* AIF2DACL_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L_WIDTH           1	/* AIF2DACL_TO_AIF1ADC2L */
+
+/*
+ * R1545 (0x609) - AIF1 ADC2 Right mixer Routing
+ */
+#define WM8995_ADC2R_TO_AIF1ADC2R               0x0002	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_ADC2R_TO_AIF1ADC2R_MASK          0x0002	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_ADC2R_TO_AIF1ADC2R_SHIFT              1	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_ADC2R_TO_AIF1ADC2R_WIDTH              1	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R            0x0001	/* AIF2DACR_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R_MASK       0x0001	/* AIF2DACR_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R_SHIFT           0	/* AIF2DACR_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R_WIDTH           1	/* AIF2DACR_TO_AIF1ADC2R */
+
+/*
+ * R1552 (0x610) - DAC Softmute
+ */
+#define WM8995_DAC_SOFTMUTEMODE                 0x0002	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_SOFTMUTEMODE_MASK            0x0002	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_SOFTMUTEMODE_SHIFT                1	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_SOFTMUTEMODE_WIDTH                1	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_MUTERATE                     0x0001	/* DAC_MUTERATE */
+#define WM8995_DAC_MUTERATE_MASK                0x0001	/* DAC_MUTERATE */
+#define WM8995_DAC_MUTERATE_SHIFT                    0	/* DAC_MUTERATE */
+#define WM8995_DAC_MUTERATE_WIDTH                    1	/* DAC_MUTERATE */
+
+/*
+ * R1568 (0x620) - Oversampling
+ */
+#define WM8995_ADC_OSR128                       0x0002	/* ADC_OSR128 */
+#define WM8995_ADC_OSR128_MASK                  0x0002	/* ADC_OSR128 */
+#define WM8995_ADC_OSR128_SHIFT                      1	/* ADC_OSR128 */
+#define WM8995_ADC_OSR128_WIDTH                      1	/* ADC_OSR128 */
+#define WM8995_DAC_OSR128                       0x0001	/* DAC_OSR128 */
+#define WM8995_DAC_OSR128_MASK                  0x0001	/* DAC_OSR128 */
+#define WM8995_DAC_OSR128_SHIFT                      0	/* DAC_OSR128 */
+#define WM8995_DAC_OSR128_WIDTH                      1	/* DAC_OSR128 */
+
+/*
+ * R1569 (0x621) - Sidetone
+ */
+#define WM8995_ST_LPF                           0x1000	/* ST_LPF */
+#define WM8995_ST_LPF_MASK                      0x1000	/* ST_LPF */
+#define WM8995_ST_LPF_SHIFT                         12	/* ST_LPF */
+#define WM8995_ST_LPF_WIDTH                          1	/* ST_LPF */
+#define WM8995_ST_HPF_CUT_MASK                  0x0380	/* ST_HPF_CUT - [9:7] */
+#define WM8995_ST_HPF_CUT_SHIFT                      7	/* ST_HPF_CUT - [9:7] */
+#define WM8995_ST_HPF_CUT_WIDTH                      3	/* ST_HPF_CUT - [9:7] */
+#define WM8995_ST_HPF                           0x0040	/* ST_HPF */
+#define WM8995_ST_HPF_MASK                      0x0040	/* ST_HPF */
+#define WM8995_ST_HPF_SHIFT                          6	/* ST_HPF */
+#define WM8995_ST_HPF_WIDTH                          1	/* ST_HPF */
+#define WM8995_STR_SEL                          0x0002	/* STR_SEL */
+#define WM8995_STR_SEL_MASK                     0x0002	/* STR_SEL */
+#define WM8995_STR_SEL_SHIFT                         1	/* STR_SEL */
+#define WM8995_STR_SEL_WIDTH                         1	/* STR_SEL */
+#define WM8995_STL_SEL                          0x0001	/* STL_SEL */
+#define WM8995_STL_SEL_MASK                     0x0001	/* STL_SEL */
+#define WM8995_STL_SEL_SHIFT                         0	/* STL_SEL */
+#define WM8995_STL_SEL_WIDTH                         1	/* STL_SEL */
+
+/*
+ * R1792 (0x700) - GPIO 1
+ */
+#define WM8995_GP1_DIR                          0x8000	/* GP1_DIR */
+#define WM8995_GP1_DIR_MASK                     0x8000	/* GP1_DIR */
+#define WM8995_GP1_DIR_SHIFT                        15	/* GP1_DIR */
+#define WM8995_GP1_DIR_WIDTH                         1	/* GP1_DIR */
+#define WM8995_GP1_PU                           0x4000	/* GP1_PU */
+#define WM8995_GP1_PU_MASK                      0x4000	/* GP1_PU */
+#define WM8995_GP1_PU_SHIFT                         14	/* GP1_PU */
+#define WM8995_GP1_PU_WIDTH                          1	/* GP1_PU */
+#define WM8995_GP1_PD                           0x2000	/* GP1_PD */
+#define WM8995_GP1_PD_MASK                      0x2000	/* GP1_PD */
+#define WM8995_GP1_PD_SHIFT                         13	/* GP1_PD */
+#define WM8995_GP1_PD_WIDTH                          1	/* GP1_PD */
+#define WM8995_GP1_POL                          0x0400	/* GP1_POL */
+#define WM8995_GP1_POL_MASK                     0x0400	/* GP1_POL */
+#define WM8995_GP1_POL_SHIFT                        10	/* GP1_POL */
+#define WM8995_GP1_POL_WIDTH                         1	/* GP1_POL */
+#define WM8995_GP1_OP_CFG                       0x0200	/* GP1_OP_CFG */
+#define WM8995_GP1_OP_CFG_MASK                  0x0200	/* GP1_OP_CFG */
+#define WM8995_GP1_OP_CFG_SHIFT                      9	/* GP1_OP_CFG */
+#define WM8995_GP1_OP_CFG_WIDTH                      1	/* GP1_OP_CFG */
+#define WM8995_GP1_DB                           0x0100	/* GP1_DB */
+#define WM8995_GP1_DB_MASK                      0x0100	/* GP1_DB */
+#define WM8995_GP1_DB_SHIFT                          8	/* GP1_DB */
+#define WM8995_GP1_DB_WIDTH                          1	/* GP1_DB */
+#define WM8995_GP1_LVL                          0x0040	/* GP1_LVL */
+#define WM8995_GP1_LVL_MASK                     0x0040	/* GP1_LVL */
+#define WM8995_GP1_LVL_SHIFT                         6	/* GP1_LVL */
+#define WM8995_GP1_LVL_WIDTH                         1	/* GP1_LVL */
+#define WM8995_GP1_FN_MASK                      0x001F	/* GP1_FN - [4:0] */
+#define WM8995_GP1_FN_SHIFT                          0	/* GP1_FN - [4:0] */
+#define WM8995_GP1_FN_WIDTH                          5	/* GP1_FN - [4:0] */
+
+/*
+ * R1793 (0x701) - GPIO 2
+ */
+#define WM8995_GP2_DIR                          0x8000	/* GP2_DIR */
+#define WM8995_GP2_DIR_MASK                     0x8000	/* GP2_DIR */
+#define WM8995_GP2_DIR_SHIFT                        15	/* GP2_DIR */
+#define WM8995_GP2_DIR_WIDTH                         1	/* GP2_DIR */
+#define WM8995_GP2_PU                           0x4000	/* GP2_PU */
+#define WM8995_GP2_PU_MASK                      0x4000	/* GP2_PU */
+#define WM8995_GP2_PU_SHIFT                         14	/* GP2_PU */
+#define WM8995_GP2_PU_WIDTH                          1	/* GP2_PU */
+#define WM8995_GP2_PD                           0x2000	/* GP2_PD */
+#define WM8995_GP2_PD_MASK                      0x2000	/* GP2_PD */
+#define WM8995_GP2_PD_SHIFT                         13	/* GP2_PD */
+#define WM8995_GP2_PD_WIDTH                          1	/* GP2_PD */
+#define WM8995_GP2_POL                          0x0400	/* GP2_POL */
+#define WM8995_GP2_POL_MASK                     0x0400	/* GP2_POL */
+#define WM8995_GP2_POL_SHIFT                        10	/* GP2_POL */
+#define WM8995_GP2_POL_WIDTH                         1	/* GP2_POL */
+#define WM8995_GP2_OP_CFG                       0x0200	/* GP2_OP_CFG */
+#define WM8995_GP2_OP_CFG_MASK                  0x0200	/* GP2_OP_CFG */
+#define WM8995_GP2_OP_CFG_SHIFT                      9	/* GP2_OP_CFG */
+#define WM8995_GP2_OP_CFG_WIDTH                      1	/* GP2_OP_CFG */
+#define WM8995_GP2_DB                           0x0100	/* GP2_DB */
+#define WM8995_GP2_DB_MASK                      0x0100	/* GP2_DB */
+#define WM8995_GP2_DB_SHIFT                          8	/* GP2_DB */
+#define WM8995_GP2_DB_WIDTH                          1	/* GP2_DB */
+#define WM8995_GP2_LVL                          0x0040	/* GP2_LVL */
+#define WM8995_GP2_LVL_MASK                     0x0040	/* GP2_LVL */
+#define WM8995_GP2_LVL_SHIFT                         6	/* GP2_LVL */
+#define WM8995_GP2_LVL_WIDTH                         1	/* GP2_LVL */
+#define WM8995_GP2_FN_MASK                      0x001F	/* GP2_FN - [4:0] */
+#define WM8995_GP2_FN_SHIFT                          0	/* GP2_FN - [4:0] */
+#define WM8995_GP2_FN_WIDTH                          5	/* GP2_FN - [4:0] */
+
+/*
+ * R1794 (0x702) - GPIO 3
+ */
+#define WM8995_GP3_DIR                          0x8000	/* GP3_DIR */
+#define WM8995_GP3_DIR_MASK                     0x8000	/* GP3_DIR */
+#define WM8995_GP3_DIR_SHIFT                        15	/* GP3_DIR */
+#define WM8995_GP3_DIR_WIDTH                         1	/* GP3_DIR */
+#define WM8995_GP3_PU                           0x4000	/* GP3_PU */
+#define WM8995_GP3_PU_MASK                      0x4000	/* GP3_PU */
+#define WM8995_GP3_PU_SHIFT                         14	/* GP3_PU */
+#define WM8995_GP3_PU_WIDTH                          1	/* GP3_PU */
+#define WM8995_GP3_PD                           0x2000	/* GP3_PD */
+#define WM8995_GP3_PD_MASK                      0x2000	/* GP3_PD */
+#define WM8995_GP3_PD_SHIFT                         13	/* GP3_PD */
+#define WM8995_GP3_PD_WIDTH                          1	/* GP3_PD */
+#define WM8995_GP3_POL                          0x0400	/* GP3_POL */
+#define WM8995_GP3_POL_MASK                     0x0400	/* GP3_POL */
+#define WM8995_GP3_POL_SHIFT                        10	/* GP3_POL */
+#define WM8995_GP3_POL_WIDTH                         1	/* GP3_POL */
+#define WM8995_GP3_OP_CFG                       0x0200	/* GP3_OP_CFG */
+#define WM8995_GP3_OP_CFG_MASK                  0x0200	/* GP3_OP_CFG */
+#define WM8995_GP3_OP_CFG_SHIFT                      9	/* GP3_OP_CFG */
+#define WM8995_GP3_OP_CFG_WIDTH                      1	/* GP3_OP_CFG */
+#define WM8995_GP3_DB                           0x0100	/* GP3_DB */
+#define WM8995_GP3_DB_MASK                      0x0100	/* GP3_DB */
+#define WM8995_GP3_DB_SHIFT                          8	/* GP3_DB */
+#define WM8995_GP3_DB_WIDTH                          1	/* GP3_DB */
+#define WM8995_GP3_LVL                          0x0040	/* GP3_LVL */
+#define WM8995_GP3_LVL_MASK                     0x0040	/* GP3_LVL */
+#define WM8995_GP3_LVL_SHIFT                         6	/* GP3_LVL */
+#define WM8995_GP3_LVL_WIDTH                         1	/* GP3_LVL */
+#define WM8995_GP3_FN_MASK                      0x001F	/* GP3_FN - [4:0] */
+#define WM8995_GP3_FN_SHIFT                          0	/* GP3_FN - [4:0] */
+#define WM8995_GP3_FN_WIDTH                          5	/* GP3_FN - [4:0] */
+
+/*
+ * R1795 (0x703) - GPIO 4
+ */
+#define WM8995_GP4_DIR                          0x8000	/* GP4_DIR */
+#define WM8995_GP4_DIR_MASK                     0x8000	/* GP4_DIR */
+#define WM8995_GP4_DIR_SHIFT                        15	/* GP4_DIR */
+#define WM8995_GP4_DIR_WIDTH                         1	/* GP4_DIR */
+#define WM8995_GP4_PU                           0x4000	/* GP4_PU */
+#define WM8995_GP4_PU_MASK                      0x4000	/* GP4_PU */
+#define WM8995_GP4_PU_SHIFT                         14	/* GP4_PU */
+#define WM8995_GP4_PU_WIDTH                          1	/* GP4_PU */
+#define WM8995_GP4_PD                           0x2000	/* GP4_PD */
+#define WM8995_GP4_PD_MASK                      0x2000	/* GP4_PD */
+#define WM8995_GP4_PD_SHIFT                         13	/* GP4_PD */
+#define WM8995_GP4_PD_WIDTH                          1	/* GP4_PD */
+#define WM8995_GP4_POL                          0x0400	/* GP4_POL */
+#define WM8995_GP4_POL_MASK                     0x0400	/* GP4_POL */
+#define WM8995_GP4_POL_SHIFT                        10	/* GP4_POL */
+#define WM8995_GP4_POL_WIDTH                         1	/* GP4_POL */
+#define WM8995_GP4_OP_CFG                       0x0200	/* GP4_OP_CFG */
+#define WM8995_GP4_OP_CFG_MASK                  0x0200	/* GP4_OP_CFG */
+#define WM8995_GP4_OP_CFG_SHIFT                      9	/* GP4_OP_CFG */
+#define WM8995_GP4_OP_CFG_WIDTH                      1	/* GP4_OP_CFG */
+#define WM8995_GP4_DB                           0x0100	/* GP4_DB */
+#define WM8995_GP4_DB_MASK                      0x0100	/* GP4_DB */
+#define WM8995_GP4_DB_SHIFT                          8	/* GP4_DB */
+#define WM8995_GP4_DB_WIDTH                          1	/* GP4_DB */
+#define WM8995_GP4_LVL                          0x0040	/* GP4_LVL */
+#define WM8995_GP4_LVL_MASK                     0x0040	/* GP4_LVL */
+#define WM8995_GP4_LVL_SHIFT                         6	/* GP4_LVL */
+#define WM8995_GP4_LVL_WIDTH                         1	/* GP4_LVL */
+#define WM8995_GP4_FN_MASK                      0x001F	/* GP4_FN - [4:0] */
+#define WM8995_GP4_FN_SHIFT                          0	/* GP4_FN - [4:0] */
+#define WM8995_GP4_FN_WIDTH                          5	/* GP4_FN - [4:0] */
+
+/*
+ * R1796 (0x704) - GPIO 5
+ */
+#define WM8995_GP5_DIR                          0x8000	/* GP5_DIR */
+#define WM8995_GP5_DIR_MASK                     0x8000	/* GP5_DIR */
+#define WM8995_GP5_DIR_SHIFT                        15	/* GP5_DIR */
+#define WM8995_GP5_DIR_WIDTH                         1	/* GP5_DIR */
+#define WM8995_GP5_PU                           0x4000	/* GP5_PU */
+#define WM8995_GP5_PU_MASK                      0x4000	/* GP5_PU */
+#define WM8995_GP5_PU_SHIFT                         14	/* GP5_PU */
+#define WM8995_GP5_PU_WIDTH                          1	/* GP5_PU */
+#define WM8995_GP5_PD                           0x2000	/* GP5_PD */
+#define WM8995_GP5_PD_MASK                      0x2000	/* GP5_PD */
+#define WM8995_GP5_PD_SHIFT                         13	/* GP5_PD */
+#define WM8995_GP5_PD_WIDTH                          1	/* GP5_PD */
+#define WM8995_GP5_POL                          0x0400	/* GP5_POL */
+#define WM8995_GP5_POL_MASK                     0x0400	/* GP5_POL */
+#define WM8995_GP5_POL_SHIFT                        10	/* GP5_POL */
+#define WM8995_GP5_POL_WIDTH                         1	/* GP5_POL */
+#define WM8995_GP5_OP_CFG                       0x0200	/* GP5_OP_CFG */
+#define WM8995_GP5_OP_CFG_MASK                  0x0200	/* GP5_OP_CFG */
+#define WM8995_GP5_OP_CFG_SHIFT                      9	/* GP5_OP_CFG */
+#define WM8995_GP5_OP_CFG_WIDTH                      1	/* GP5_OP_CFG */
+#define WM8995_GP5_DB                           0x0100	/* GP5_DB */
+#define WM8995_GP5_DB_MASK                      0x0100	/* GP5_DB */
+#define WM8995_GP5_DB_SHIFT                          8	/* GP5_DB */
+#define WM8995_GP5_DB_WIDTH                          1	/* GP5_DB */
+#define WM8995_GP5_LVL                          0x0040	/* GP5_LVL */
+#define WM8995_GP5_LVL_MASK                     0x0040	/* GP5_LVL */
+#define WM8995_GP5_LVL_SHIFT                         6	/* GP5_LVL */
+#define WM8995_GP5_LVL_WIDTH                         1	/* GP5_LVL */
+#define WM8995_GP5_FN_MASK                      0x001F	/* GP5_FN - [4:0] */
+#define WM8995_GP5_FN_SHIFT                          0	/* GP5_FN - [4:0] */
+#define WM8995_GP5_FN_WIDTH                          5	/* GP5_FN - [4:0] */
+
+/*
+ * R1797 (0x705) - GPIO 6
+ */
+#define WM8995_GP6_DIR                          0x8000	/* GP6_DIR */
+#define WM8995_GP6_DIR_MASK                     0x8000	/* GP6_DIR */
+#define WM8995_GP6_DIR_SHIFT                        15	/* GP6_DIR */
+#define WM8995_GP6_DIR_WIDTH                         1	/* GP6_DIR */
+#define WM8995_GP6_PU                           0x4000	/* GP6_PU */
+#define WM8995_GP6_PU_MASK                      0x4000	/* GP6_PU */
+#define WM8995_GP6_PU_SHIFT                         14	/* GP6_PU */
+#define WM8995_GP6_PU_WIDTH                          1	/* GP6_PU */
+#define WM8995_GP6_PD                           0x2000	/* GP6_PD */
+#define WM8995_GP6_PD_MASK                      0x2000	/* GP6_PD */
+#define WM8995_GP6_PD_SHIFT                         13	/* GP6_PD */
+#define WM8995_GP6_PD_WIDTH                          1	/* GP6_PD */
+#define WM8995_GP6_POL                          0x0400	/* GP6_POL */
+#define WM8995_GP6_POL_MASK                     0x0400	/* GP6_POL */
+#define WM8995_GP6_POL_SHIFT                        10	/* GP6_POL */
+#define WM8995_GP6_POL_WIDTH                         1	/* GP6_POL */
+#define WM8995_GP6_OP_CFG                       0x0200	/* GP6_OP_CFG */
+#define WM8995_GP6_OP_CFG_MASK                  0x0200	/* GP6_OP_CFG */
+#define WM8995_GP6_OP_CFG_SHIFT                      9	/* GP6_OP_CFG */
+#define WM8995_GP6_OP_CFG_WIDTH                      1	/* GP6_OP_CFG */
+#define WM8995_GP6_DB                           0x0100	/* GP6_DB */
+#define WM8995_GP6_DB_MASK                      0x0100	/* GP6_DB */
+#define WM8995_GP6_DB_SHIFT                          8	/* GP6_DB */
+#define WM8995_GP6_DB_WIDTH                          1	/* GP6_DB */
+#define WM8995_GP6_LVL                          0x0040	/* GP6_LVL */
+#define WM8995_GP6_LVL_MASK                     0x0040	/* GP6_LVL */
+#define WM8995_GP6_LVL_SHIFT                         6	/* GP6_LVL */
+#define WM8995_GP6_LVL_WIDTH                         1	/* GP6_LVL */
+#define WM8995_GP6_FN_MASK                      0x001F	/* GP6_FN - [4:0] */
+#define WM8995_GP6_FN_SHIFT                          0	/* GP6_FN - [4:0] */
+#define WM8995_GP6_FN_WIDTH                          5	/* GP6_FN - [4:0] */
+
+/*
+ * R1798 (0x706) - GPIO 7
+ */
+#define WM8995_GP7_DIR                          0x8000	/* GP7_DIR */
+#define WM8995_GP7_DIR_MASK                     0x8000	/* GP7_DIR */
+#define WM8995_GP7_DIR_SHIFT                        15	/* GP7_DIR */
+#define WM8995_GP7_DIR_WIDTH                         1	/* GP7_DIR */
+#define WM8995_GP7_PU                           0x4000	/* GP7_PU */
+#define WM8995_GP7_PU_MASK                      0x4000	/* GP7_PU */
+#define WM8995_GP7_PU_SHIFT                         14	/* GP7_PU */
+#define WM8995_GP7_PU_WIDTH                          1	/* GP7_PU */
+#define WM8995_GP7_PD                           0x2000	/* GP7_PD */
+#define WM8995_GP7_PD_MASK                      0x2000	/* GP7_PD */
+#define WM8995_GP7_PD_SHIFT                         13	/* GP7_PD */
+#define WM8995_GP7_PD_WIDTH                          1	/* GP7_PD */
+#define WM8995_GP7_POL                          0x0400	/* GP7_POL */
+#define WM8995_GP7_POL_MASK                     0x0400	/* GP7_POL */
+#define WM8995_GP7_POL_SHIFT                        10	/* GP7_POL */
+#define WM8995_GP7_POL_WIDTH                         1	/* GP7_POL */
+#define WM8995_GP7_OP_CFG                       0x0200	/* GP7_OP_CFG */
+#define WM8995_GP7_OP_CFG_MASK                  0x0200	/* GP7_OP_CFG */
+#define WM8995_GP7_OP_CFG_SHIFT                      9	/* GP7_OP_CFG */
+#define WM8995_GP7_OP_CFG_WIDTH                      1	/* GP7_OP_CFG */
+#define WM8995_GP7_DB                           0x0100	/* GP7_DB */
+#define WM8995_GP7_DB_MASK                      0x0100	/* GP7_DB */
+#define WM8995_GP7_DB_SHIFT                          8	/* GP7_DB */
+#define WM8995_GP7_DB_WIDTH                          1	/* GP7_DB */
+#define WM8995_GP7_LVL                          0x0040	/* GP7_LVL */
+#define WM8995_GP7_LVL_MASK                     0x0040	/* GP7_LVL */
+#define WM8995_GP7_LVL_SHIFT                         6	/* GP7_LVL */
+#define WM8995_GP7_LVL_WIDTH                         1	/* GP7_LVL */
+#define WM8995_GP7_FN_MASK                      0x001F	/* GP7_FN - [4:0] */
+#define WM8995_GP7_FN_SHIFT                          0	/* GP7_FN - [4:0] */
+#define WM8995_GP7_FN_WIDTH                          5	/* GP7_FN - [4:0] */
+
+/*
+ * R1799 (0x707) - GPIO 8
+ */
+#define WM8995_GP8_DIR                          0x8000	/* GP8_DIR */
+#define WM8995_GP8_DIR_MASK                     0x8000	/* GP8_DIR */
+#define WM8995_GP8_DIR_SHIFT                        15	/* GP8_DIR */
+#define WM8995_GP8_DIR_WIDTH                         1	/* GP8_DIR */
+#define WM8995_GP8_PU                           0x4000	/* GP8_PU */
+#define WM8995_GP8_PU_MASK                      0x4000	/* GP8_PU */
+#define WM8995_GP8_PU_SHIFT                         14	/* GP8_PU */
+#define WM8995_GP8_PU_WIDTH                          1	/* GP8_PU */
+#define WM8995_GP8_PD                           0x2000	/* GP8_PD */
+#define WM8995_GP8_PD_MASK                      0x2000	/* GP8_PD */
+#define WM8995_GP8_PD_SHIFT                         13	/* GP8_PD */
+#define WM8995_GP8_PD_WIDTH                          1	/* GP8_PD */
+#define WM8995_GP8_POL                          0x0400	/* GP8_POL */
+#define WM8995_GP8_POL_MASK                     0x0400	/* GP8_POL */
+#define WM8995_GP8_POL_SHIFT                        10	/* GP8_POL */
+#define WM8995_GP8_POL_WIDTH                         1	/* GP8_POL */
+#define WM8995_GP8_OP_CFG                       0x0200	/* GP8_OP_CFG */
+#define WM8995_GP8_OP_CFG_MASK                  0x0200	/* GP8_OP_CFG */
+#define WM8995_GP8_OP_CFG_SHIFT                      9	/* GP8_OP_CFG */
+#define WM8995_GP8_OP_CFG_WIDTH                      1	/* GP8_OP_CFG */
+#define WM8995_GP8_DB                           0x0100	/* GP8_DB */
+#define WM8995_GP8_DB_MASK                      0x0100	/* GP8_DB */
+#define WM8995_GP8_DB_SHIFT                          8	/* GP8_DB */
+#define WM8995_GP8_DB_WIDTH                          1	/* GP8_DB */
+#define WM8995_GP8_LVL                          0x0040	/* GP8_LVL */
+#define WM8995_GP8_LVL_MASK                     0x0040	/* GP8_LVL */
+#define WM8995_GP8_LVL_SHIFT                         6	/* GP8_LVL */
+#define WM8995_GP8_LVL_WIDTH                         1	/* GP8_LVL */
+#define WM8995_GP8_FN_MASK                      0x001F	/* GP8_FN - [4:0] */
+#define WM8995_GP8_FN_SHIFT                          0	/* GP8_FN - [4:0] */
+#define WM8995_GP8_FN_WIDTH                          5	/* GP8_FN - [4:0] */
+
+/*
+ * R1800 (0x708) - GPIO 9
+ */
+#define WM8995_GP9_DIR                          0x8000	/* GP9_DIR */
+#define WM8995_GP9_DIR_MASK                     0x8000	/* GP9_DIR */
+#define WM8995_GP9_DIR_SHIFT                        15	/* GP9_DIR */
+#define WM8995_GP9_DIR_WIDTH                         1	/* GP9_DIR */
+#define WM8995_GP9_PU                           0x4000	/* GP9_PU */
+#define WM8995_GP9_PU_MASK                      0x4000	/* GP9_PU */
+#define WM8995_GP9_PU_SHIFT                         14	/* GP9_PU */
+#define WM8995_GP9_PU_WIDTH                          1	/* GP9_PU */
+#define WM8995_GP9_PD                           0x2000	/* GP9_PD */
+#define WM8995_GP9_PD_MASK                      0x2000	/* GP9_PD */
+#define WM8995_GP9_PD_SHIFT                         13	/* GP9_PD */
+#define WM8995_GP9_PD_WIDTH                          1	/* GP9_PD */
+#define WM8995_GP9_POL                          0x0400	/* GP9_POL */
+#define WM8995_GP9_POL_MASK                     0x0400	/* GP9_POL */
+#define WM8995_GP9_POL_SHIFT                        10	/* GP9_POL */
+#define WM8995_GP9_POL_WIDTH                         1	/* GP9_POL */
+#define WM8995_GP9_OP_CFG                       0x0200	/* GP9_OP_CFG */
+#define WM8995_GP9_OP_CFG_MASK                  0x0200	/* GP9_OP_CFG */
+#define WM8995_GP9_OP_CFG_SHIFT                      9	/* GP9_OP_CFG */
+#define WM8995_GP9_OP_CFG_WIDTH                      1	/* GP9_OP_CFG */
+#define WM8995_GP9_DB                           0x0100	/* GP9_DB */
+#define WM8995_GP9_DB_MASK                      0x0100	/* GP9_DB */
+#define WM8995_GP9_DB_SHIFT                          8	/* GP9_DB */
+#define WM8995_GP9_DB_WIDTH                          1	/* GP9_DB */
+#define WM8995_GP9_LVL                          0x0040	/* GP9_LVL */
+#define WM8995_GP9_LVL_MASK                     0x0040	/* GP9_LVL */
+#define WM8995_GP9_LVL_SHIFT                         6	/* GP9_LVL */
+#define WM8995_GP9_LVL_WIDTH                         1	/* GP9_LVL */
+#define WM8995_GP9_FN_MASK                      0x001F	/* GP9_FN - [4:0] */
+#define WM8995_GP9_FN_SHIFT                          0	/* GP9_FN - [4:0] */
+#define WM8995_GP9_FN_WIDTH                          5	/* GP9_FN - [4:0] */
+
+/*
+ * R1801 (0x709) - GPIO 10
+ */
+#define WM8995_GP10_DIR                         0x8000	/* GP10_DIR */
+#define WM8995_GP10_DIR_MASK                    0x8000	/* GP10_DIR */
+#define WM8995_GP10_DIR_SHIFT                       15	/* GP10_DIR */
+#define WM8995_GP10_DIR_WIDTH                        1	/* GP10_DIR */
+#define WM8995_GP10_PU                          0x4000	/* GP10_PU */
+#define WM8995_GP10_PU_MASK                     0x4000	/* GP10_PU */
+#define WM8995_GP10_PU_SHIFT                        14	/* GP10_PU */
+#define WM8995_GP10_PU_WIDTH                         1	/* GP10_PU */
+#define WM8995_GP10_PD                          0x2000	/* GP10_PD */
+#define WM8995_GP10_PD_MASK                     0x2000	/* GP10_PD */
+#define WM8995_GP10_PD_SHIFT                        13	/* GP10_PD */
+#define WM8995_GP10_PD_WIDTH                         1	/* GP10_PD */
+#define WM8995_GP10_POL                         0x0400	/* GP10_POL */
+#define WM8995_GP10_POL_MASK                    0x0400	/* GP10_POL */
+#define WM8995_GP10_POL_SHIFT                       10	/* GP10_POL */
+#define WM8995_GP10_POL_WIDTH                        1	/* GP10_POL */
+#define WM8995_GP10_OP_CFG                      0x0200	/* GP10_OP_CFG */
+#define WM8995_GP10_OP_CFG_MASK                 0x0200	/* GP10_OP_CFG */
+#define WM8995_GP10_OP_CFG_SHIFT                     9	/* GP10_OP_CFG */
+#define WM8995_GP10_OP_CFG_WIDTH                     1	/* GP10_OP_CFG */
+#define WM8995_GP10_DB                          0x0100	/* GP10_DB */
+#define WM8995_GP10_DB_MASK                     0x0100	/* GP10_DB */
+#define WM8995_GP10_DB_SHIFT                         8	/* GP10_DB */
+#define WM8995_GP10_DB_WIDTH                         1	/* GP10_DB */
+#define WM8995_GP10_LVL                         0x0040	/* GP10_LVL */
+#define WM8995_GP10_LVL_MASK                    0x0040	/* GP10_LVL */
+#define WM8995_GP10_LVL_SHIFT                        6	/* GP10_LVL */
+#define WM8995_GP10_LVL_WIDTH                        1	/* GP10_LVL */
+#define WM8995_GP10_FN_MASK                     0x001F	/* GP10_FN - [4:0] */
+#define WM8995_GP10_FN_SHIFT                         0	/* GP10_FN - [4:0] */
+#define WM8995_GP10_FN_WIDTH                         5	/* GP10_FN - [4:0] */
+
+/*
+ * R1802 (0x70A) - GPIO 11
+ */
+#define WM8995_GP11_DIR                         0x8000	/* GP11_DIR */
+#define WM8995_GP11_DIR_MASK                    0x8000	/* GP11_DIR */
+#define WM8995_GP11_DIR_SHIFT                       15	/* GP11_DIR */
+#define WM8995_GP11_DIR_WIDTH                        1	/* GP11_DIR */
+#define WM8995_GP11_PU                          0x4000	/* GP11_PU */
+#define WM8995_GP11_PU_MASK                     0x4000	/* GP11_PU */
+#define WM8995_GP11_PU_SHIFT                        14	/* GP11_PU */
+#define WM8995_GP11_PU_WIDTH                         1	/* GP11_PU */
+#define WM8995_GP11_PD                          0x2000	/* GP11_PD */
+#define WM8995_GP11_PD_MASK                     0x2000	/* GP11_PD */
+#define WM8995_GP11_PD_SHIFT                        13	/* GP11_PD */
+#define WM8995_GP11_PD_WIDTH                         1	/* GP11_PD */
+#define WM8995_GP11_POL                         0x0400	/* GP11_POL */
+#define WM8995_GP11_POL_MASK                    0x0400	/* GP11_POL */
+#define WM8995_GP11_POL_SHIFT                       10	/* GP11_POL */
+#define WM8995_GP11_POL_WIDTH                        1	/* GP11_POL */
+#define WM8995_GP11_OP_CFG                      0x0200	/* GP11_OP_CFG */
+#define WM8995_GP11_OP_CFG_MASK                 0x0200	/* GP11_OP_CFG */
+#define WM8995_GP11_OP_CFG_SHIFT                     9	/* GP11_OP_CFG */
+#define WM8995_GP11_OP_CFG_WIDTH                     1	/* GP11_OP_CFG */
+#define WM8995_GP11_DB                          0x0100	/* GP11_DB */
+#define WM8995_GP11_DB_MASK                     0x0100	/* GP11_DB */
+#define WM8995_GP11_DB_SHIFT                         8	/* GP11_DB */
+#define WM8995_GP11_DB_WIDTH                         1	/* GP11_DB */
+#define WM8995_GP11_LVL                         0x0040	/* GP11_LVL */
+#define WM8995_GP11_LVL_MASK                    0x0040	/* GP11_LVL */
+#define WM8995_GP11_LVL_SHIFT                        6	/* GP11_LVL */
+#define WM8995_GP11_LVL_WIDTH                        1	/* GP11_LVL */
+#define WM8995_GP11_FN_MASK                     0x001F	/* GP11_FN - [4:0] */
+#define WM8995_GP11_FN_SHIFT                         0	/* GP11_FN - [4:0] */
+#define WM8995_GP11_FN_WIDTH                         5	/* GP11_FN - [4:0] */
+
+/*
+ * R1803 (0x70B) - GPIO 12
+ */
+#define WM8995_GP12_DIR                         0x8000	/* GP12_DIR */
+#define WM8995_GP12_DIR_MASK                    0x8000	/* GP12_DIR */
+#define WM8995_GP12_DIR_SHIFT                       15	/* GP12_DIR */
+#define WM8995_GP12_DIR_WIDTH                        1	/* GP12_DIR */
+#define WM8995_GP12_PU                          0x4000	/* GP12_PU */
+#define WM8995_GP12_PU_MASK                     0x4000	/* GP12_PU */
+#define WM8995_GP12_PU_SHIFT                        14	/* GP12_PU */
+#define WM8995_GP12_PU_WIDTH                         1	/* GP12_PU */
+#define WM8995_GP12_PD                          0x2000	/* GP12_PD */
+#define WM8995_GP12_PD_MASK                     0x2000	/* GP12_PD */
+#define WM8995_GP12_PD_SHIFT                        13	/* GP12_PD */
+#define WM8995_GP12_PD_WIDTH                         1	/* GP12_PD */
+#define WM8995_GP12_POL                         0x0400	/* GP12_POL */
+#define WM8995_GP12_POL_MASK                    0x0400	/* GP12_POL */
+#define WM8995_GP12_POL_SHIFT                       10	/* GP12_POL */
+#define WM8995_GP12_POL_WIDTH                        1	/* GP12_POL */
+#define WM8995_GP12_OP_CFG                      0x0200	/* GP12_OP_CFG */
+#define WM8995_GP12_OP_CFG_MASK                 0x0200	/* GP12_OP_CFG */
+#define WM8995_GP12_OP_CFG_SHIFT                     9	/* GP12_OP_CFG */
+#define WM8995_GP12_OP_CFG_WIDTH                     1	/* GP12_OP_CFG */
+#define WM8995_GP12_DB                          0x0100	/* GP12_DB */
+#define WM8995_GP12_DB_MASK                     0x0100	/* GP12_DB */
+#define WM8995_GP12_DB_SHIFT                         8	/* GP12_DB */
+#define WM8995_GP12_DB_WIDTH                         1	/* GP12_DB */
+#define WM8995_GP12_LVL                         0x0040	/* GP12_LVL */
+#define WM8995_GP12_LVL_MASK                    0x0040	/* GP12_LVL */
+#define WM8995_GP12_LVL_SHIFT                        6	/* GP12_LVL */
+#define WM8995_GP12_LVL_WIDTH                        1	/* GP12_LVL */
+#define WM8995_GP12_FN_MASK                     0x001F	/* GP12_FN - [4:0] */
+#define WM8995_GP12_FN_SHIFT                         0	/* GP12_FN - [4:0] */
+#define WM8995_GP12_FN_WIDTH                         5	/* GP12_FN - [4:0] */
+
+/*
+ * R1804 (0x70C) - GPIO 13
+ */
+#define WM8995_GP13_DIR                         0x8000	/* GP13_DIR */
+#define WM8995_GP13_DIR_MASK                    0x8000	/* GP13_DIR */
+#define WM8995_GP13_DIR_SHIFT                       15	/* GP13_DIR */
+#define WM8995_GP13_DIR_WIDTH                        1	/* GP13_DIR */
+#define WM8995_GP13_PU                          0x4000	/* GP13_PU */
+#define WM8995_GP13_PU_MASK                     0x4000	/* GP13_PU */
+#define WM8995_GP13_PU_SHIFT                        14	/* GP13_PU */
+#define WM8995_GP13_PU_WIDTH                         1	/* GP13_PU */
+#define WM8995_GP13_PD                          0x2000	/* GP13_PD */
+#define WM8995_GP13_PD_MASK                     0x2000	/* GP13_PD */
+#define WM8995_GP13_PD_SHIFT                        13	/* GP13_PD */
+#define WM8995_GP13_PD_WIDTH                         1	/* GP13_PD */
+#define WM8995_GP13_POL                         0x0400	/* GP13_POL */
+#define WM8995_GP13_POL_MASK                    0x0400	/* GP13_POL */
+#define WM8995_GP13_POL_SHIFT                       10	/* GP13_POL */
+#define WM8995_GP13_POL_WIDTH                        1	/* GP13_POL */
+#define WM8995_GP13_OP_CFG                      0x0200	/* GP13_OP_CFG */
+#define WM8995_GP13_OP_CFG_MASK                 0x0200	/* GP13_OP_CFG */
+#define WM8995_GP13_OP_CFG_SHIFT                     9	/* GP13_OP_CFG */
+#define WM8995_GP13_OP_CFG_WIDTH                     1	/* GP13_OP_CFG */
+#define WM8995_GP13_DB                          0x0100	/* GP13_DB */
+#define WM8995_GP13_DB_MASK                     0x0100	/* GP13_DB */
+#define WM8995_GP13_DB_SHIFT                         8	/* GP13_DB */
+#define WM8995_GP13_DB_WIDTH                         1	/* GP13_DB */
+#define WM8995_GP13_LVL                         0x0040	/* GP13_LVL */
+#define WM8995_GP13_LVL_MASK                    0x0040	/* GP13_LVL */
+#define WM8995_GP13_LVL_SHIFT                        6	/* GP13_LVL */
+#define WM8995_GP13_LVL_WIDTH                        1	/* GP13_LVL */
+#define WM8995_GP13_FN_MASK                     0x001F	/* GP13_FN - [4:0] */
+#define WM8995_GP13_FN_SHIFT                         0	/* GP13_FN - [4:0] */
+#define WM8995_GP13_FN_WIDTH                         5	/* GP13_FN - [4:0] */
+
+/*
+ * R1805 (0x70D) - GPIO 14
+ */
+#define WM8995_GP14_DIR                         0x8000	/* GP14_DIR */
+#define WM8995_GP14_DIR_MASK                    0x8000	/* GP14_DIR */
+#define WM8995_GP14_DIR_SHIFT                       15	/* GP14_DIR */
+#define WM8995_GP14_DIR_WIDTH                        1	/* GP14_DIR */
+#define WM8995_GP14_PU                          0x4000	/* GP14_PU */
+#define WM8995_GP14_PU_MASK                     0x4000	/* GP14_PU */
+#define WM8995_GP14_PU_SHIFT                        14	/* GP14_PU */
+#define WM8995_GP14_PU_WIDTH                         1	/* GP14_PU */
+#define WM8995_GP14_PD                          0x2000	/* GP14_PD */
+#define WM8995_GP14_PD_MASK                     0x2000	/* GP14_PD */
+#define WM8995_GP14_PD_SHIFT                        13	/* GP14_PD */
+#define WM8995_GP14_PD_WIDTH                         1	/* GP14_PD */
+#define WM8995_GP14_POL                         0x0400	/* GP14_POL */
+#define WM8995_GP14_POL_MASK                    0x0400	/* GP14_POL */
+#define WM8995_GP14_POL_SHIFT                       10	/* GP14_POL */
+#define WM8995_GP14_POL_WIDTH                        1	/* GP14_POL */
+#define WM8995_GP14_OP_CFG                      0x0200	/* GP14_OP_CFG */
+#define WM8995_GP14_OP_CFG_MASK                 0x0200	/* GP14_OP_CFG */
+#define WM8995_GP14_OP_CFG_SHIFT                     9	/* GP14_OP_CFG */
+#define WM8995_GP14_OP_CFG_WIDTH                     1	/* GP14_OP_CFG */
+#define WM8995_GP14_DB                          0x0100	/* GP14_DB */
+#define WM8995_GP14_DB_MASK                     0x0100	/* GP14_DB */
+#define WM8995_GP14_DB_SHIFT                         8	/* GP14_DB */
+#define WM8995_GP14_DB_WIDTH                         1	/* GP14_DB */
+#define WM8995_GP14_LVL                         0x0040	/* GP14_LVL */
+#define WM8995_GP14_LVL_MASK                    0x0040	/* GP14_LVL */
+#define WM8995_GP14_LVL_SHIFT                        6	/* GP14_LVL */
+#define WM8995_GP14_LVL_WIDTH                        1	/* GP14_LVL */
+#define WM8995_GP14_FN_MASK                     0x001F	/* GP14_FN - [4:0] */
+#define WM8995_GP14_FN_SHIFT                         0	/* GP14_FN - [4:0] */
+#define WM8995_GP14_FN_WIDTH                         5	/* GP14_FN - [4:0] */
+
+/*
+ * R1824 (0x720) - Pull Control (1)
+ */
+#define WM8995_DMICDAT3_PD                      0x4000	/* DMICDAT3_PD */
+#define WM8995_DMICDAT3_PD_MASK                 0x4000	/* DMICDAT3_PD */
+#define WM8995_DMICDAT3_PD_SHIFT                    14	/* DMICDAT3_PD */
+#define WM8995_DMICDAT3_PD_WIDTH                     1	/* DMICDAT3_PD */
+#define WM8995_DMICDAT2_PD                      0x1000	/* DMICDAT2_PD */
+#define WM8995_DMICDAT2_PD_MASK                 0x1000	/* DMICDAT2_PD */
+#define WM8995_DMICDAT2_PD_SHIFT                    12	/* DMICDAT2_PD */
+#define WM8995_DMICDAT2_PD_WIDTH                     1	/* DMICDAT2_PD */
+#define WM8995_DMICDAT1_PD                      0x0400	/* DMICDAT1_PD */
+#define WM8995_DMICDAT1_PD_MASK                 0x0400	/* DMICDAT1_PD */
+#define WM8995_DMICDAT1_PD_SHIFT                    10	/* DMICDAT1_PD */
+#define WM8995_DMICDAT1_PD_WIDTH                     1	/* DMICDAT1_PD */
+#define WM8995_MCLK2_PU                         0x0200	/* MCLK2_PU */
+#define WM8995_MCLK2_PU_MASK                    0x0200	/* MCLK2_PU */
+#define WM8995_MCLK2_PU_SHIFT                        9	/* MCLK2_PU */
+#define WM8995_MCLK2_PU_WIDTH                        1	/* MCLK2_PU */
+#define WM8995_MCLK2_PD                         0x0100	/* MCLK2_PD */
+#define WM8995_MCLK2_PD_MASK                    0x0100	/* MCLK2_PD */
+#define WM8995_MCLK2_PD_SHIFT                        8	/* MCLK2_PD */
+#define WM8995_MCLK2_PD_WIDTH                        1	/* MCLK2_PD */
+#define WM8995_MCLK1_PU                         0x0080	/* MCLK1_PU */
+#define WM8995_MCLK1_PU_MASK                    0x0080	/* MCLK1_PU */
+#define WM8995_MCLK1_PU_SHIFT                        7	/* MCLK1_PU */
+#define WM8995_MCLK1_PU_WIDTH                        1	/* MCLK1_PU */
+#define WM8995_MCLK1_PD                         0x0040	/* MCLK1_PD */
+#define WM8995_MCLK1_PD_MASK                    0x0040	/* MCLK1_PD */
+#define WM8995_MCLK1_PD_SHIFT                        6	/* MCLK1_PD */
+#define WM8995_MCLK1_PD_WIDTH                        1	/* MCLK1_PD */
+#define WM8995_DACDAT1_PU                       0x0020	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PU_MASK                  0x0020	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PU_SHIFT                      5	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PU_WIDTH                      1	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PD                       0x0010	/* DACDAT1_PD */
+#define WM8995_DACDAT1_PD_MASK                  0x0010	/* DACDAT1_PD */
+#define WM8995_DACDAT1_PD_SHIFT                      4	/* DACDAT1_PD */
+#define WM8995_DACDAT1_PD_WIDTH                      1	/* DACDAT1_PD */
+#define WM8995_DACLRCLK1_PU                     0x0008	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PU_MASK                0x0008	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PU_SHIFT                    3	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PU_WIDTH                    1	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PD                     0x0004	/* DACLRCLK1_PD */
+#define WM8995_DACLRCLK1_PD_MASK                0x0004	/* DACLRCLK1_PD */
+#define WM8995_DACLRCLK1_PD_SHIFT                    2	/* DACLRCLK1_PD */
+#define WM8995_DACLRCLK1_PD_WIDTH                    1	/* DACLRCLK1_PD */
+#define WM8995_BCLK1_PU                         0x0002	/* BCLK1_PU */
+#define WM8995_BCLK1_PU_MASK                    0x0002	/* BCLK1_PU */
+#define WM8995_BCLK1_PU_SHIFT                        1	/* BCLK1_PU */
+#define WM8995_BCLK1_PU_WIDTH                        1	/* BCLK1_PU */
+#define WM8995_BCLK1_PD                         0x0001	/* BCLK1_PD */
+#define WM8995_BCLK1_PD_MASK                    0x0001	/* BCLK1_PD */
+#define WM8995_BCLK1_PD_SHIFT                        0	/* BCLK1_PD */
+#define WM8995_BCLK1_PD_WIDTH                        1	/* BCLK1_PD */
+
+/*
+ * R1825 (0x721) - Pull Control (2)
+ */
+#define WM8995_LDO1ENA_PD                       0x0010	/* LDO1ENA_PD */
+#define WM8995_LDO1ENA_PD_MASK                  0x0010	/* LDO1ENA_PD */
+#define WM8995_LDO1ENA_PD_SHIFT                      4	/* LDO1ENA_PD */
+#define WM8995_LDO1ENA_PD_WIDTH                      1	/* LDO1ENA_PD */
+#define WM8995_MODE_PD                          0x0004	/* MODE_PD */
+#define WM8995_MODE_PD_MASK                     0x0004	/* MODE_PD */
+#define WM8995_MODE_PD_SHIFT                         2	/* MODE_PD */
+#define WM8995_MODE_PD_WIDTH                         1	/* MODE_PD */
+#define WM8995_CSNADDR_PD                       0x0001	/* CSNADDR_PD */
+#define WM8995_CSNADDR_PD_MASK                  0x0001	/* CSNADDR_PD */
+#define WM8995_CSNADDR_PD_SHIFT                      0	/* CSNADDR_PD */
+#define WM8995_CSNADDR_PD_WIDTH                      1	/* CSNADDR_PD */
+
+/*
+ * R1840 (0x730) - Interrupt Status 1
+ */
+#define WM8995_GP14_EINT                        0x2000	/* GP14_EINT */
+#define WM8995_GP14_EINT_MASK                   0x2000	/* GP14_EINT */
+#define WM8995_GP14_EINT_SHIFT                      13	/* GP14_EINT */
+#define WM8995_GP14_EINT_WIDTH                       1	/* GP14_EINT */
+#define WM8995_GP13_EINT                        0x1000	/* GP13_EINT */
+#define WM8995_GP13_EINT_MASK                   0x1000	/* GP13_EINT */
+#define WM8995_GP13_EINT_SHIFT                      12	/* GP13_EINT */
+#define WM8995_GP13_EINT_WIDTH                       1	/* GP13_EINT */
+#define WM8995_GP12_EINT                        0x0800	/* GP12_EINT */
+#define WM8995_GP12_EINT_MASK                   0x0800	/* GP12_EINT */
+#define WM8995_GP12_EINT_SHIFT                      11	/* GP12_EINT */
+#define WM8995_GP12_EINT_WIDTH                       1	/* GP12_EINT */
+#define WM8995_GP11_EINT                        0x0400	/* GP11_EINT */
+#define WM8995_GP11_EINT_MASK                   0x0400	/* GP11_EINT */
+#define WM8995_GP11_EINT_SHIFT                      10	/* GP11_EINT */
+#define WM8995_GP11_EINT_WIDTH                       1	/* GP11_EINT */
+#define WM8995_GP10_EINT                        0x0200	/* GP10_EINT */
+#define WM8995_GP10_EINT_MASK                   0x0200	/* GP10_EINT */
+#define WM8995_GP10_EINT_SHIFT                       9	/* GP10_EINT */
+#define WM8995_GP10_EINT_WIDTH                       1	/* GP10_EINT */
+#define WM8995_GP9_EINT                         0x0100	/* GP9_EINT */
+#define WM8995_GP9_EINT_MASK                    0x0100	/* GP9_EINT */
+#define WM8995_GP9_EINT_SHIFT                        8	/* GP9_EINT */
+#define WM8995_GP9_EINT_WIDTH                        1	/* GP9_EINT */
+#define WM8995_GP8_EINT                         0x0080	/* GP8_EINT */
+#define WM8995_GP8_EINT_MASK                    0x0080	/* GP8_EINT */
+#define WM8995_GP8_EINT_SHIFT                        7	/* GP8_EINT */
+#define WM8995_GP8_EINT_WIDTH                        1	/* GP8_EINT */
+#define WM8995_GP7_EINT                         0x0040	/* GP7_EINT */
+#define WM8995_GP7_EINT_MASK                    0x0040	/* GP7_EINT */
+#define WM8995_GP7_EINT_SHIFT                        6	/* GP7_EINT */
+#define WM8995_GP7_EINT_WIDTH                        1	/* GP7_EINT */
+#define WM8995_GP6_EINT                         0x0020	/* GP6_EINT */
+#define WM8995_GP6_EINT_MASK                    0x0020	/* GP6_EINT */
+#define WM8995_GP6_EINT_SHIFT                        5	/* GP6_EINT */
+#define WM8995_GP6_EINT_WIDTH                        1	/* GP6_EINT */
+#define WM8995_GP5_EINT                         0x0010	/* GP5_EINT */
+#define WM8995_GP5_EINT_MASK                    0x0010	/* GP5_EINT */
+#define WM8995_GP5_EINT_SHIFT                        4	/* GP5_EINT */
+#define WM8995_GP5_EINT_WIDTH                        1	/* GP5_EINT */
+#define WM8995_GP4_EINT                         0x0008	/* GP4_EINT */
+#define WM8995_GP4_EINT_MASK                    0x0008	/* GP4_EINT */
+#define WM8995_GP4_EINT_SHIFT                        3	/* GP4_EINT */
+#define WM8995_GP4_EINT_WIDTH                        1	/* GP4_EINT */
+#define WM8995_GP3_EINT                         0x0004	/* GP3_EINT */
+#define WM8995_GP3_EINT_MASK                    0x0004	/* GP3_EINT */
+#define WM8995_GP3_EINT_SHIFT                        2	/* GP3_EINT */
+#define WM8995_GP3_EINT_WIDTH                        1	/* GP3_EINT */
+#define WM8995_GP2_EINT                         0x0002	/* GP2_EINT */
+#define WM8995_GP2_EINT_MASK                    0x0002	/* GP2_EINT */
+#define WM8995_GP2_EINT_SHIFT                        1	/* GP2_EINT */
+#define WM8995_GP2_EINT_WIDTH                        1	/* GP2_EINT */
+#define WM8995_GP1_EINT                         0x0001	/* GP1_EINT */
+#define WM8995_GP1_EINT_MASK                    0x0001	/* GP1_EINT */
+#define WM8995_GP1_EINT_SHIFT                        0	/* GP1_EINT */
+#define WM8995_GP1_EINT_WIDTH                        1	/* GP1_EINT */
+
+/*
+ * R1841 (0x731) - Interrupt Status 2
+ */
+#define WM8995_DCS_DONE_23_EINT                 0x1000	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_23_EINT_MASK            0x1000	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_23_EINT_SHIFT               12	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_23_EINT_WIDTH                1	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_01_EINT                 0x0800	/* DCS_DONE_01_EINT */
+#define WM8995_DCS_DONE_01_EINT_MASK            0x0800	/* DCS_DONE_01_EINT */
+#define WM8995_DCS_DONE_01_EINT_SHIFT               11	/* DCS_DONE_01_EINT */
+#define WM8995_DCS_DONE_01_EINT_WIDTH                1	/* DCS_DONE_01_EINT */
+#define WM8995_WSEQ_DONE_EINT                   0x0400	/* WSEQ_DONE_EINT */
+#define WM8995_WSEQ_DONE_EINT_MASK              0x0400	/* WSEQ_DONE_EINT */
+#define WM8995_WSEQ_DONE_EINT_SHIFT                 10	/* WSEQ_DONE_EINT */
+#define WM8995_WSEQ_DONE_EINT_WIDTH                  1	/* WSEQ_DONE_EINT */
+#define WM8995_FIFOS_ERR_EINT                   0x0200	/* FIFOS_ERR_EINT */
+#define WM8995_FIFOS_ERR_EINT_MASK              0x0200	/* FIFOS_ERR_EINT */
+#define WM8995_FIFOS_ERR_EINT_SHIFT                  9	/* FIFOS_ERR_EINT */
+#define WM8995_FIFOS_ERR_EINT_WIDTH                  1	/* FIFOS_ERR_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT             0x0100	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT_MASK        0x0100	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT_SHIFT            8	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT_WIDTH            1	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT            0x0080	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT_MASK       0x0080	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT_SHIFT           7	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT_WIDTH           1	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT            0x0040	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT_MASK       0x0040	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT_SHIFT           6	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT_WIDTH           1	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_SRC2_LOCK_EINT                   0x0020	/* SRC2_LOCK_EINT */
+#define WM8995_SRC2_LOCK_EINT_MASK              0x0020	/* SRC2_LOCK_EINT */
+#define WM8995_SRC2_LOCK_EINT_SHIFT                  5	/* SRC2_LOCK_EINT */
+#define WM8995_SRC2_LOCK_EINT_WIDTH                  1	/* SRC2_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT                   0x0010	/* SRC1_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT_MASK              0x0010	/* SRC1_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT_SHIFT                  4	/* SRC1_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT_WIDTH                  1	/* SRC1_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT                   0x0008	/* FLL2_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT_MASK              0x0008	/* FLL2_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT_SHIFT                  3	/* FLL2_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT_WIDTH                  1	/* FLL2_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT                   0x0004	/* FLL1_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT_MASK              0x0004	/* FLL1_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT_SHIFT                  2	/* FLL1_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT_WIDTH                  1	/* FLL1_LOCK_EINT */
+#define WM8995_HP_DONE_EINT                     0x0002	/* HP_DONE_EINT */
+#define WM8995_HP_DONE_EINT_MASK                0x0002	/* HP_DONE_EINT */
+#define WM8995_HP_DONE_EINT_SHIFT                    1	/* HP_DONE_EINT */
+#define WM8995_HP_DONE_EINT_WIDTH                    1	/* HP_DONE_EINT */
+#define WM8995_MICD_EINT                        0x0001	/* MICD_EINT */
+#define WM8995_MICD_EINT_MASK                   0x0001	/* MICD_EINT */
+#define WM8995_MICD_EINT_SHIFT                       0	/* MICD_EINT */
+#define WM8995_MICD_EINT_WIDTH                       1	/* MICD_EINT */
+
+/*
+ * R1842 (0x732) - Interrupt Raw Status 2
+ */
+#define WM8995_DCS_DONE_23_STS                  0x1000	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_23_STS_MASK             0x1000	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_23_STS_SHIFT                12	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_23_STS_WIDTH                 1	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_01_STS                  0x0800	/* DCS_DONE_01_STS */
+#define WM8995_DCS_DONE_01_STS_MASK             0x0800	/* DCS_DONE_01_STS */
+#define WM8995_DCS_DONE_01_STS_SHIFT                11	/* DCS_DONE_01_STS */
+#define WM8995_DCS_DONE_01_STS_WIDTH                 1	/* DCS_DONE_01_STS */
+#define WM8995_WSEQ_DONE_STS                    0x0400	/* WSEQ_DONE_STS */
+#define WM8995_WSEQ_DONE_STS_MASK               0x0400	/* WSEQ_DONE_STS */
+#define WM8995_WSEQ_DONE_STS_SHIFT                  10	/* WSEQ_DONE_STS */
+#define WM8995_WSEQ_DONE_STS_WIDTH                   1	/* WSEQ_DONE_STS */
+#define WM8995_FIFOS_ERR_STS                    0x0200	/* FIFOS_ERR_STS */
+#define WM8995_FIFOS_ERR_STS_MASK               0x0200	/* FIFOS_ERR_STS */
+#define WM8995_FIFOS_ERR_STS_SHIFT                   9	/* FIFOS_ERR_STS */
+#define WM8995_FIFOS_ERR_STS_WIDTH                   1	/* FIFOS_ERR_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS              0x0100	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS_MASK         0x0100	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS_SHIFT             8	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS_WIDTH             1	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS             0x0080	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS_MASK        0x0080	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS_SHIFT            7	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS_WIDTH            1	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS             0x0040	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS_MASK        0x0040	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS_SHIFT            6	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS_WIDTH            1	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_SRC2_LOCK_STS                    0x0020	/* SRC2_LOCK_STS */
+#define WM8995_SRC2_LOCK_STS_MASK               0x0020	/* SRC2_LOCK_STS */
+#define WM8995_SRC2_LOCK_STS_SHIFT                   5	/* SRC2_LOCK_STS */
+#define WM8995_SRC2_LOCK_STS_WIDTH                   1	/* SRC2_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS                    0x0010	/* SRC1_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS_MASK               0x0010	/* SRC1_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS_SHIFT                   4	/* SRC1_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS_WIDTH                   1	/* SRC1_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS                    0x0008	/* FLL2_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS_MASK               0x0008	/* FLL2_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS_SHIFT                   3	/* FLL2_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS_WIDTH                   1	/* FLL2_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS                    0x0004	/* FLL1_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS_MASK               0x0004	/* FLL1_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS_SHIFT                   2	/* FLL1_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS_WIDTH                   1	/* FLL1_LOCK_STS */
+
+/*
+ * R1848 (0x738) - Interrupt Status 1 Mask
+ */
+#define WM8995_IM_GP14_EINT                     0x2000	/* IM_GP14_EINT */
+#define WM8995_IM_GP14_EINT_MASK                0x2000	/* IM_GP14_EINT */
+#define WM8995_IM_GP14_EINT_SHIFT                   13	/* IM_GP14_EINT */
+#define WM8995_IM_GP14_EINT_WIDTH                    1	/* IM_GP14_EINT */
+#define WM8995_IM_GP13_EINT                     0x1000	/* IM_GP13_EINT */
+#define WM8995_IM_GP13_EINT_MASK                0x1000	/* IM_GP13_EINT */
+#define WM8995_IM_GP13_EINT_SHIFT                   12	/* IM_GP13_EINT */
+#define WM8995_IM_GP13_EINT_WIDTH                    1	/* IM_GP13_EINT */
+#define WM8995_IM_GP12_EINT                     0x0800	/* IM_GP12_EINT */
+#define WM8995_IM_GP12_EINT_MASK                0x0800	/* IM_GP12_EINT */
+#define WM8995_IM_GP12_EINT_SHIFT                   11	/* IM_GP12_EINT */
+#define WM8995_IM_GP12_EINT_WIDTH                    1	/* IM_GP12_EINT */
+#define WM8995_IM_GP11_EINT                     0x0400	/* IM_GP11_EINT */
+#define WM8995_IM_GP11_EINT_MASK                0x0400	/* IM_GP11_EINT */
+#define WM8995_IM_GP11_EINT_SHIFT                   10	/* IM_GP11_EINT */
+#define WM8995_IM_GP11_EINT_WIDTH                    1	/* IM_GP11_EINT */
+#define WM8995_IM_GP10_EINT                     0x0200	/* IM_GP10_EINT */
+#define WM8995_IM_GP10_EINT_MASK                0x0200	/* IM_GP10_EINT */
+#define WM8995_IM_GP10_EINT_SHIFT                    9	/* IM_GP10_EINT */
+#define WM8995_IM_GP10_EINT_WIDTH                    1	/* IM_GP10_EINT */
+#define WM8995_IM_GP9_EINT                      0x0100	/* IM_GP9_EINT */
+#define WM8995_IM_GP9_EINT_MASK                 0x0100	/* IM_GP9_EINT */
+#define WM8995_IM_GP9_EINT_SHIFT                     8	/* IM_GP9_EINT */
+#define WM8995_IM_GP9_EINT_WIDTH                     1	/* IM_GP9_EINT */
+#define WM8995_IM_GP8_EINT                      0x0080	/* IM_GP8_EINT */
+#define WM8995_IM_GP8_EINT_MASK                 0x0080	/* IM_GP8_EINT */
+#define WM8995_IM_GP8_EINT_SHIFT                     7	/* IM_GP8_EINT */
+#define WM8995_IM_GP8_EINT_WIDTH                     1	/* IM_GP8_EINT */
+#define WM8995_IM_GP7_EINT                      0x0040	/* IM_GP7_EINT */
+#define WM8995_IM_GP7_EINT_MASK                 0x0040	/* IM_GP7_EINT */
+#define WM8995_IM_GP7_EINT_SHIFT                     6	/* IM_GP7_EINT */
+#define WM8995_IM_GP7_EINT_WIDTH                     1	/* IM_GP7_EINT */
+#define WM8995_IM_GP6_EINT                      0x0020	/* IM_GP6_EINT */
+#define WM8995_IM_GP6_EINT_MASK                 0x0020	/* IM_GP6_EINT */
+#define WM8995_IM_GP6_EINT_SHIFT                     5	/* IM_GP6_EINT */
+#define WM8995_IM_GP6_EINT_WIDTH                     1	/* IM_GP6_EINT */
+#define WM8995_IM_GP5_EINT                      0x0010	/* IM_GP5_EINT */
+#define WM8995_IM_GP5_EINT_MASK                 0x0010	/* IM_GP5_EINT */
+#define WM8995_IM_GP5_EINT_SHIFT                     4	/* IM_GP5_EINT */
+#define WM8995_IM_GP5_EINT_WIDTH                     1	/* IM_GP5_EINT */
+#define WM8995_IM_GP4_EINT                      0x0008	/* IM_GP4_EINT */
+#define WM8995_IM_GP4_EINT_MASK                 0x0008	/* IM_GP4_EINT */
+#define WM8995_IM_GP4_EINT_SHIFT                     3	/* IM_GP4_EINT */
+#define WM8995_IM_GP4_EINT_WIDTH                     1	/* IM_GP4_EINT */
+#define WM8995_IM_GP3_EINT                      0x0004	/* IM_GP3_EINT */
+#define WM8995_IM_GP3_EINT_MASK                 0x0004	/* IM_GP3_EINT */
+#define WM8995_IM_GP3_EINT_SHIFT                     2	/* IM_GP3_EINT */
+#define WM8995_IM_GP3_EINT_WIDTH                     1	/* IM_GP3_EINT */
+#define WM8995_IM_GP2_EINT                      0x0002	/* IM_GP2_EINT */
+#define WM8995_IM_GP2_EINT_MASK                 0x0002	/* IM_GP2_EINT */
+#define WM8995_IM_GP2_EINT_SHIFT                     1	/* IM_GP2_EINT */
+#define WM8995_IM_GP2_EINT_WIDTH                     1	/* IM_GP2_EINT */
+#define WM8995_IM_GP1_EINT                      0x0001	/* IM_GP1_EINT */
+#define WM8995_IM_GP1_EINT_MASK                 0x0001	/* IM_GP1_EINT */
+#define WM8995_IM_GP1_EINT_SHIFT                     0	/* IM_GP1_EINT */
+#define WM8995_IM_GP1_EINT_WIDTH                     1	/* IM_GP1_EINT */
+
+/*
+ * R1849 (0x739) - Interrupt Status 2 Mask
+ */
+#define WM8995_IM_DCS_DONE_23_EINT              0x1000	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_23_EINT_MASK         0x1000	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_23_EINT_SHIFT            12	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_23_EINT_WIDTH             1	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT              0x0800	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT_MASK         0x0800	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT_SHIFT            11	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT_WIDTH             1	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT                0x0400	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT_MASK           0x0400	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT_SHIFT              10	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT_WIDTH               1	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT                0x0200	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT_MASK           0x0200	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT_SHIFT               9	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT_WIDTH               1	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT          0x0100	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT_MASK     0x0100	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT_SHIFT         8	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT_WIDTH         1	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT         0x0080	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_MASK    0x0080	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_SHIFT        7	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_WIDTH        1	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT         0x0040	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_MASK    0x0040	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_SHIFT        6	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_WIDTH        1	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT                0x0020	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT_MASK           0x0020	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT_SHIFT               5	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT_WIDTH               1	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT                0x0010	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT_MASK           0x0010	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT_SHIFT               4	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT_WIDTH               1	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT                0x0008	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT_MASK           0x0008	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT_SHIFT               3	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT_WIDTH               1	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT                0x0004	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT_MASK           0x0004	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT_SHIFT               2	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT_WIDTH               1	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_HP_DONE_EINT                  0x0002	/* IM_HP_DONE_EINT */
+#define WM8995_IM_HP_DONE_EINT_MASK             0x0002	/* IM_HP_DONE_EINT */
+#define WM8995_IM_HP_DONE_EINT_SHIFT                 1	/* IM_HP_DONE_EINT */
+#define WM8995_IM_HP_DONE_EINT_WIDTH                 1	/* IM_HP_DONE_EINT */
+#define WM8995_IM_MICD_EINT                     0x0001	/* IM_MICD_EINT */
+#define WM8995_IM_MICD_EINT_MASK                0x0001	/* IM_MICD_EINT */
+#define WM8995_IM_MICD_EINT_SHIFT                    0	/* IM_MICD_EINT */
+#define WM8995_IM_MICD_EINT_WIDTH                    1	/* IM_MICD_EINT */
+
+/*
+ * R1856 (0x740) - Interrupt Control
+ */
+#define WM8995_IM_IRQ                           0x0001	/* IM_IRQ */
+#define WM8995_IM_IRQ_MASK                      0x0001	/* IM_IRQ */
+#define WM8995_IM_IRQ_SHIFT                          0	/* IM_IRQ */
+#define WM8995_IM_IRQ_WIDTH                          1	/* IM_IRQ */
+
+/*
+ * R2048 (0x800) - Left PDM Speaker 1
+ */
+#define WM8995_SPK1L_ENA                        0x0010	/* SPK1L_ENA */
+#define WM8995_SPK1L_ENA_MASK                   0x0010	/* SPK1L_ENA */
+#define WM8995_SPK1L_ENA_SHIFT                       4	/* SPK1L_ENA */
+#define WM8995_SPK1L_ENA_WIDTH                       1	/* SPK1L_ENA */
+#define WM8995_SPK1L_MUTE                       0x0008	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_MASK                  0x0008	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_SHIFT                      3	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_WIDTH                      1	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_ZC                    0x0004	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_MUTE_ZC_MASK               0x0004	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_MUTE_ZC_SHIFT                   2	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_MUTE_ZC_WIDTH                   1	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_SRC_MASK                   0x0003	/* SPK1L_SRC - [1:0] */
+#define WM8995_SPK1L_SRC_SHIFT                       0	/* SPK1L_SRC - [1:0] */
+#define WM8995_SPK1L_SRC_WIDTH                       2	/* SPK1L_SRC - [1:0] */
+
+/*
+ * R2049 (0x801) - Right PDM Speaker 1
+ */
+#define WM8995_SPK1R_ENA                        0x0010	/* SPK1R_ENA */
+#define WM8995_SPK1R_ENA_MASK                   0x0010	/* SPK1R_ENA */
+#define WM8995_SPK1R_ENA_SHIFT                       4	/* SPK1R_ENA */
+#define WM8995_SPK1R_ENA_WIDTH                       1	/* SPK1R_ENA */
+#define WM8995_SPK1R_MUTE                       0x0008	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_MASK                  0x0008	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_SHIFT                      3	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_WIDTH                      1	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_ZC                    0x0004	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_MUTE_ZC_MASK               0x0004	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_MUTE_ZC_SHIFT                   2	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_MUTE_ZC_WIDTH                   1	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_SRC_MASK                   0x0003	/* SPK1R_SRC - [1:0] */
+#define WM8995_SPK1R_SRC_SHIFT                       0	/* SPK1R_SRC - [1:0] */
+#define WM8995_SPK1R_SRC_WIDTH                       2	/* SPK1R_SRC - [1:0] */
+
+/*
+ * R2050 (0x802) - PDM Speaker 1 Mute Sequence
+ */
+#define WM8995_SPK1_MUTE_SEQ1_MASK              0x00FF	/* SPK1_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK1_MUTE_SEQ1_SHIFT                  0	/* SPK1_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK1_MUTE_SEQ1_WIDTH                  8	/* SPK1_MUTE_SEQ1 - [7:0] */
+
+/*
+ * R2056 (0x808) - Left PDM Speaker 2
+ */
+#define WM8995_SPK2L_ENA                        0x0010	/* SPK2L_ENA */
+#define WM8995_SPK2L_ENA_MASK                   0x0010	/* SPK2L_ENA */
+#define WM8995_SPK2L_ENA_SHIFT                       4	/* SPK2L_ENA */
+#define WM8995_SPK2L_ENA_WIDTH                       1	/* SPK2L_ENA */
+#define WM8995_SPK2L_MUTE                       0x0008	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_MASK                  0x0008	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_SHIFT                      3	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_WIDTH                      1	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_ZC                    0x0004	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_MUTE_ZC_MASK               0x0004	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_MUTE_ZC_SHIFT                   2	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_MUTE_ZC_WIDTH                   1	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_SRC_MASK                   0x0003	/* SPK2L_SRC - [1:0] */
+#define WM8995_SPK2L_SRC_SHIFT                       0	/* SPK2L_SRC - [1:0] */
+#define WM8995_SPK2L_SRC_WIDTH                       2	/* SPK2L_SRC - [1:0] */
+
+/*
+ * R2057 (0x809) - Right PDM Speaker 2
+ */
+#define WM8995_SPK2R_ENA                        0x0010	/* SPK2R_ENA */
+#define WM8995_SPK2R_ENA_MASK                   0x0010	/* SPK2R_ENA */
+#define WM8995_SPK2R_ENA_SHIFT                       4	/* SPK2R_ENA */
+#define WM8995_SPK2R_ENA_WIDTH                       1	/* SPK2R_ENA */
+#define WM8995_SPK2R_MUTE                       0x0008	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_MASK                  0x0008	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_SHIFT                      3	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_WIDTH                      1	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_ZC                    0x0004	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_MUTE_ZC_MASK               0x0004	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_MUTE_ZC_SHIFT                   2	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_MUTE_ZC_WIDTH                   1	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_SRC_MASK                   0x0003	/* SPK2R_SRC - [1:0] */
+#define WM8995_SPK2R_SRC_SHIFT                       0	/* SPK2R_SRC - [1:0] */
+#define WM8995_SPK2R_SRC_WIDTH                       2	/* SPK2R_SRC - [1:0] */
+
+/*
+ * R2058 (0x80A) - PDM Speaker 2 Mute Sequence
+ */
+#define WM8995_SPK2_MUTE_SEQ1_MASK              0x00FF	/* SPK2_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK2_MUTE_SEQ1_SHIFT                  0	/* SPK2_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK2_MUTE_SEQ1_WIDTH                  8	/* SPK2_MUTE_SEQ1 - [7:0] */
+
+#define WM8995_CLASS_W_SWITCH(xname, reg, shift, max, invert) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_volsw, \
+	.get = snd_soc_dapm_get_volsw, .put = wm8995_put_class_w, \
+	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) \
+}
+
+struct wm8995_reg_access {
+	u16 read;
+	u16 write;
+	u16 vol;
+};
+
+/* Sources for AIF1/2 SYSCLK - use with set_dai_sysclk() */
+enum clk_src {
+	WM8995_SYSCLK_MCLK1 = 1,
+	WM8995_SYSCLK_MCLK2,
+	WM8995_SYSCLK_FLL1,
+	WM8995_SYSCLK_FLL2,
+	WM8995_SYSCLK_OPCLK
+};
+
+#define WM8995_FLL1 1
+#define WM8995_FLL2 2
+
+#define WM8995_FLL_SRC_MCLK1  1
+#define WM8995_FLL_SRC_MCLK2  2
+#define WM8995_FLL_SRC_LRCLK  3
+#define WM8995_FLL_SRC_BCLK   4
+
+#endif /* _WM8995_H */
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index a486670..cce704c 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -15,6 +15,7 @@
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
@@ -23,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -158,7 +158,6 @@
 struct wm9081_priv {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u16 reg_cache[WM9081_MAX_REGISTER + 1];
 	int sysclk_source;
 	int mclk_rate;
 	int sysclk_rate;
@@ -591,6 +590,10 @@
 	reg5 |= fll_div.fll_clk_ref_div << WM9081_FLL_CLK_REF_DIV_SHIFT;
 	snd_soc_write(codec, WM9081_FLL_CONTROL_5, reg5);
 
+	/* Set gain to the recommended value */
+	snd_soc_update_bits(codec, WM9081_FLL_CONTROL_4,
+			    WM9081_FLL_GAIN_MASK, 0);
+
 	/* Enable the FLL */
 	snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA);
 
@@ -805,7 +808,7 @@
 
 	case SND_SOC_BIAS_STANDBY:
 		/* Initial cold start */
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Disable LINEOUT discharge */
 			reg = snd_soc_read(codec, WM9081_ANTI_POP_CONTROL);
 			reg &= ~WM9081_LINEOUT_DISCH;
@@ -865,7 +868,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1228,6 +1231,7 @@
 static int wm9081_probe(struct snd_soc_codec *codec)
 {
 	struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 	u16 reg;
 
@@ -1269,9 +1273,9 @@
 				     ARRAY_SIZE(wm9081_eq_controls));
 	}
 
-	snd_soc_dapm_new_controls(codec, wm9081_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm9081_dapm_widgets,
 				  ARRAY_SIZE(wm9081_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return ret;
 }
@@ -1338,6 +1342,10 @@
 	wm9081->control_type = SND_SOC_I2C;
 	wm9081->control_data = i2c;
 
+	if (dev_get_platdata(&i2c->dev))
+		memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev),
+		       sizeof(wm9081->retune));
+
 	ret = snd_soc_register_codec(&i2c->dev,
 			&soc_codec_dev_wm9081, &wm9081_dai, 1);
 	if (ret < 0)
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 6e5f64f..a788c42 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -28,7 +28,6 @@
 #include <linux/slab.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/wm9090.h>
 
@@ -442,31 +441,32 @@
 static int wm9090_add_controls(struct snd_soc_codec *codec)
 {
 	struct wm9090_priv *wm9090 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i;
 
-	snd_soc_dapm_new_controls(codec, wm9090_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm9090_dapm_widgets,
 				  ARRAY_SIZE(wm9090_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	snd_soc_add_controls(codec, wm9090_controls,
 			     ARRAY_SIZE(wm9090_controls));
 
 	if (wm9090->pdata.lin1_diff) {
-		snd_soc_dapm_add_routes(codec, audio_map_in1_diff,
+		snd_soc_dapm_add_routes(dapm, audio_map_in1_diff,
 					ARRAY_SIZE(audio_map_in1_diff));
 	} else {
-		snd_soc_dapm_add_routes(codec, audio_map_in1_se,
+		snd_soc_dapm_add_routes(dapm, audio_map_in1_se,
 					ARRAY_SIZE(audio_map_in1_se));
 		snd_soc_add_controls(codec, wm9090_in1_se_controls,
 				     ARRAY_SIZE(wm9090_in1_se_controls));
 	}
 
 	if (wm9090->pdata.lin2_diff) {
-		snd_soc_dapm_add_routes(codec, audio_map_in2_diff,
+		snd_soc_dapm_add_routes(dapm, audio_map_in2_diff,
 					ARRAY_SIZE(audio_map_in2_diff));
 	} else {
-		snd_soc_dapm_add_routes(codec, audio_map_in2_se,
+		snd_soc_dapm_add_routes(dapm, audio_map_in2_se,
 					ARRAY_SIZE(audio_map_in2_se));
 		snd_soc_add_controls(codec, wm9090_in2_se_controls,
 				     ARRAY_SIZE(wm9090_in2_se_controls));
@@ -513,7 +513,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Restore the register cache */
 			for (i = 1; i < codec->driver->reg_cache_size; i++) {
 				if (reg_cache[i] == wm9090_reg_defaults[i])
@@ -543,7 +543,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index a144acd..47b357a 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -19,7 +19,6 @@
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "wm9705.h"
 
@@ -203,9 +202,11 @@
 
 static int wm9705_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm9705_dapm_widgets,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, wm9705_dapm_widgets,
 					ARRAY_SIZE(wm9705_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index d2f224d..bf5d4ef 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -20,7 +20,6 @@
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include "wm9712.h"
 
 #define WM9712_VERSION "0.4"
@@ -432,10 +431,11 @@
 
 static int wm9712_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm9712_dapm_widgets,
-				  ARRAY_SIZE(wm9712_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm9712_dapm_widgets,
+				  ARRAY_SIZE(wm9712_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -570,7 +570,7 @@
 		ac97_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 7da13b0..38ed985 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -26,7 +26,6 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "wm9713.h"
 
@@ -647,10 +646,12 @@
 
 static int wm9713_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm9713_dapm_widgets,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, wm9713_dapm_widgets,
 				  ARRAY_SIZE(wm9713_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1147,7 +1148,7 @@
 		ac97_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 0e24092..5168927 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -22,7 +22,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -92,54 +91,73 @@
 static void calibrate_dc_servo(struct snd_soc_codec *codec)
 {
 	struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
+	s8 offset;
 	u16 reg, reg_l, reg_r, dcs_cfg;
 
-	/* Set for 32 series updates */
-	snd_soc_update_bits(codec, WM8993_DC_SERVO_1,
-			    WM8993_DCS_SERIES_NO_01_MASK,
-			    32 << WM8993_DCS_SERIES_NO_01_SHIFT);
-	wait_for_dc_servo(codec,
-			  WM8993_DCS_TRIG_SERIES_0 | WM8993_DCS_TRIG_SERIES_1);
+	/* If we're using a digital only path and have a previously
+	 * callibrated DC servo offset stored then use that. */
+	if (hubs->class_w && hubs->class_w_dcs) {
+		dev_dbg(codec->dev, "Using cached DC servo offset %x\n",
+			hubs->class_w_dcs);
+		snd_soc_write(codec, WM8993_DC_SERVO_3, hubs->class_w_dcs);
+		wait_for_dc_servo(codec,
+				  WM8993_DCS_TRIG_DAC_WR_0 |
+				  WM8993_DCS_TRIG_DAC_WR_1);
+		return;
+	}
+
+	/* Devices not using a DCS code correction have startup mode */
+	if (hubs->dcs_codes) {
+		/* Set for 32 series updates */
+		snd_soc_update_bits(codec, WM8993_DC_SERVO_1,
+				    WM8993_DCS_SERIES_NO_01_MASK,
+				    32 << WM8993_DCS_SERIES_NO_01_SHIFT);
+		wait_for_dc_servo(codec,
+				  WM8993_DCS_TRIG_SERIES_0 |
+				  WM8993_DCS_TRIG_SERIES_1);
+	} else {
+		wait_for_dc_servo(codec,
+				  WM8993_DCS_TRIG_STARTUP_0 |
+				  WM8993_DCS_TRIG_STARTUP_1);
+	}
+
+	/* Different chips in the family support different readback
+	 * methods.
+	 */
+	switch (hubs->dcs_readback_mode) {
+	case 0:
+		reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1)
+			& WM8993_DCS_INTEG_CHAN_0_MASK;
+		reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2)
+			& WM8993_DCS_INTEG_CHAN_1_MASK;
+		break;
+	case 1:
+		reg = snd_soc_read(codec, WM8993_DC_SERVO_3);
+		reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK)
+			>> WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+		reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
+		break;
+	default:
+		WARN(1, "Unknown DCS readback method\n");
+		break;
+	}
+
+	dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r);
 
 	/* Apply correction to DC servo result */
 	if (hubs->dcs_codes) {
 		dev_dbg(codec->dev, "Applying %d code DC servo correction\n",
 			hubs->dcs_codes);
 
-		/* Different chips in the family support different
-		 * readback methods.
-		 */
-		switch (hubs->dcs_readback_mode) {
-		case 0:
-			reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1)
-				& WM8993_DCS_INTEG_CHAN_0_MASK;;
-			reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2)
-				& WM8993_DCS_INTEG_CHAN_1_MASK;
-			break;
-		case 1:
-			reg = snd_soc_read(codec, WM8993_DC_SERVO_3);
-			reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK)
-				>> WM8993_DCS_DAC_WR_VAL_1_SHIFT;
-			reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
-			break;
-		default:
-			WARN(1, "Unknown DCS readback method\n");
-			break;
-		}
-
-		dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r);
-
 		/* HPOUT1L */
-		if (reg_l + hubs->dcs_codes > 0 &&
-		    reg_l + hubs->dcs_codes < 0xff)
-			reg_l += hubs->dcs_codes;
-		dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+		offset = reg_l;
+		offset += hubs->dcs_codes;
+		dcs_cfg = (u8)offset << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
 
 		/* HPOUT1R */
-		if (reg_r + hubs->dcs_codes > 0 &&
-		    reg_r + hubs->dcs_codes < 0xff)
-			reg_r += hubs->dcs_codes;
-		dcs_cfg |= reg_r;
+		offset = reg_r;
+		offset += hubs->dcs_codes;
+		dcs_cfg |= (u8)offset;
 
 		dev_dbg(codec->dev, "DCS result: %x\n", dcs_cfg);
 
@@ -148,7 +166,15 @@
 		wait_for_dc_servo(codec,
 				  WM8993_DCS_TRIG_DAC_WR_0 |
 				  WM8993_DCS_TRIG_DAC_WR_1);
+	} else {
+		dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+		dcs_cfg |= reg_r;
 	}
+
+	/* Save the callibrated offset if we're in class W mode and
+	 * therefore don't have any analogue signal mixed in. */
+	if (hubs->class_w)
+		hubs->class_w_dcs = dcs_cfg;
 }
 
 /*
@@ -163,6 +189,9 @@
 
 	ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
 
+	/* Updating the analogue gains invalidates the DC servo cache */
+	hubs->class_w_dcs = 0;
+
 	/* If we're applying an offset correction then updating the
 	 * callibration would be likely to introduce further offsets. */
 	if (hubs->dcs_codes)
@@ -645,6 +674,9 @@
 };
 
 static const struct snd_soc_dapm_route analogue_routes[] = {
+	{ "MICBIAS1", NULL, "CLK_SYS" },
+	{ "MICBIAS2", NULL, "CLK_SYS" },
+
 	{ "IN1L PGA", "IN1LP Switch", "IN1LP" },
 	{ "IN1L PGA", "IN1LN Switch", "IN1LN" },
 
@@ -791,6 +823,8 @@
 
 int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* Latch volume update bits & default ZC on */
 	snd_soc_update_bits(codec, WM8993_LEFT_LINE_INPUT_1_2_VOLUME,
 			    WM8993_IN1_VU, WM8993_IN1_VU);
@@ -819,7 +853,7 @@
 	snd_soc_add_controls(codec, analogue_snd_controls,
 			     ARRAY_SIZE(analogue_snd_controls));
 
-	snd_soc_dapm_new_controls(codec, analogue_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, analogue_dapm_widgets,
 				  ARRAY_SIZE(analogue_dapm_widgets));
 	return 0;
 }
@@ -828,24 +862,26 @@
 int wm_hubs_add_analogue_routes(struct snd_soc_codec *codec,
 				int lineout1_diff, int lineout2_diff)
 {
-	snd_soc_dapm_add_routes(codec, analogue_routes,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_add_routes(dapm, analogue_routes,
 				ARRAY_SIZE(analogue_routes));
 
 	if (lineout1_diff)
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout1_diff_routes,
 					ARRAY_SIZE(lineout1_diff_routes));
 	else
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout1_se_routes,
 					ARRAY_SIZE(lineout1_se_routes));
 
 	if (lineout2_diff)
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout2_diff_routes,
 					ARRAY_SIZE(lineout2_diff_routes));
 	else
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout2_se_routes,
 					ARRAY_SIZE(lineout2_se_routes));
 
@@ -872,7 +908,7 @@
 	 * VMID as an output and can disable it.
 	 */
 	if (lineout1_diff && lineout2_diff)
-		codec->idle_bias_off = 1;
+		codec->dapm.idle_bias_off = 1;
 
 	if (lineout1fb)
 		snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
index e51c166..f8a5e97 100644
--- a/sound/soc/codecs/wm_hubs.h
+++ b/sound/soc/codecs/wm_hubs.h
@@ -23,6 +23,9 @@
 	int dcs_codes;
 	int dcs_readback_mode;
 	int hp_startup_mode;
+
+	bool class_w;
+	u16 class_w_dcs;
 };
 
 extern int wm_hubs_add_analogue_controls(struct snd_soc_codec *);
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index bc9e6b0b..fe79842 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -18,7 +18,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/dma.h>
 #include <asm/mach-types.h>
@@ -27,7 +26,6 @@
 #include <mach/edma.h>
 #include <mach/mux.h>
 
-#include "../codecs/tlv320aic3x.h"
 #include "davinci-pcm.h"
 #include "davinci-i2s.h"
 #include "davinci-mcasp.h"
@@ -132,26 +130,27 @@
 static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add davinci-evm specific widgets */
-	snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
 				  ARRAY_SIZE(aic3x_dapm_widgets));
 
 	/* Set up davinci-evm specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* not connected */
-	snd_soc_dapm_disable_pin(codec, "MONO_LOUT");
-	snd_soc_dapm_disable_pin(codec, "HPLCOM");
-	snd_soc_dapm_disable_pin(codec, "HPRCOM");
+	snd_soc_dapm_disable_pin(dapm, "MONO_LOUT");
+	snd_soc_dapm_disable_pin(dapm, "HPLCOM");
+	snd_soc_dapm_disable_pin(dapm, "HPRCOM");
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -219,12 +218,24 @@
 		.ops = &evm_spdif_ops,
 	},
 };
-static struct snd_soc_dai_link da8xx_evm_dai = {
+
+static struct snd_soc_dai_link da830_evm_dai = {
+	.name = "TLV320AIC3X",
+	.stream_name = "AIC3X",
+	.cpu_dai_name = "davinci-mcasp.1",
+	.codec_dai_name = "tlv320aic3x-hifi",
+	.codec_name = "tlv320aic3x-codec.1-0018",
+	.platform_name = "davinci-pcm-audio",
+	.init = evm_aic3x_init,
+	.ops = &evm_ops,
+};
+
+static struct snd_soc_dai_link da850_evm_dai = {
 	.name = "TLV320AIC3X",
 	.stream_name = "AIC3X",
 	.cpu_dai_name= "davinci-mcasp.0",
 	.codec_dai_name = "tlv320aic3x-hifi",
-	.codec_name = "tlv320aic3x-codec.0-001a",
+	.codec_name = "tlv320aic3x-codec.1-0018",
 	.platform_name = "davinci-pcm-audio",
 	.init = evm_aic3x_init,
 	.ops = &evm_ops,
@@ -260,13 +271,13 @@
 
 static struct snd_soc_card da830_snd_soc_card = {
 	.name = "DA830/OMAP-L137 EVM",
-	.dai_link = &da8xx_evm_dai,
+	.dai_link = &da830_evm_dai,
 	.num_links = 1,
 };
 
 static struct snd_soc_card da850_snd_soc_card = {
 	.name = "DA850/OMAP-L138 EVM",
-	.dai_link = &da8xx_evm_dai,
+	.dai_link = &da850_evm_dai,
 	.num_links = 1,
 };
 
diff --git a/sound/soc/davinci/davinci-sffsdr.c b/sound/soc/davinci/davinci-sffsdr.c
index 6c6666a..0fe558c 100644
--- a/sound/soc/davinci/davinci-sffsdr.c
+++ b/sound/soc/davinci/davinci-sffsdr.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/dma.h>
 #include <asm/mach-types.h>
diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c
index 4f48733..fff579a 100644
--- a/sound/soc/ep93xx/ep93xx-i2s.c
+++ b/sound/soc/ep93xx/ep93xx-i2s.c
@@ -267,14 +267,16 @@
 		ep93xx_i2s_write_reg(info, EP93XX_I2S_RXWRDLEN, word_len);
 
 	/*
-	 * Calculate the sdiv (bit clock) and lrdiv (left/right clock) values.
-	 * If the lrclk is pulse length is larger than the word size, then the
-	 * bit clock will be gated for the unused bits.
+	 * EP93xx I2S module can be setup so SCLK / LRCLK value can be
+	 * 32, 64, 128. MCLK / SCLK value can be 2 and 4.
+	 * We set LRCLK equal to `rate' and minimum SCLK / LRCLK 
+	 * value is 64, because our sample size is 32 bit * 2 channels.
+	 * I2S standard permits us to transmit more bits than
+	 * the codec uses.
 	 */
-	div = (clk_get_rate(info->mclk) / params_rate(params)) *
-		params_channels(params);
+	div = clk_get_rate(info->mclk) / params_rate(params);
 	for (sdiv = 2; sdiv <= 4; sdiv += 2)
-		for (lrdiv = 32; lrdiv <= 128; lrdiv <<= 1)
+		for (lrdiv = 64; lrdiv <= 128; lrdiv <<= 1)
 			if (sdiv * lrdiv == div) {
 				found = 1;
 				goto out;
@@ -341,9 +343,7 @@
 	.set_fmt	= ep93xx_i2s_set_dai_fmt,
 };
 
-#define EP93XX_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
-			    SNDRV_PCM_FMTBIT_S24_LE | \
-			    SNDRV_PCM_FMTBIT_S32_LE)
+#define EP93XX_I2S_FORMATS (SNDRV_PCM_FMTBIT_S32_LE)
 
 static struct snd_soc_dai_driver ep93xx_i2s_dai = {
 	.symmetric_rates= 1,
@@ -352,13 +352,13 @@
 	.playback	= {
 		.channels_min	= 2,
 		.channels_max	= 2,
-		.rates		= SNDRV_PCM_RATE_8000_48000,
+		.rates		= SNDRV_PCM_RATE_8000_96000,
 		.formats	= EP93XX_I2S_FORMATS,
 	},
 	.capture	= {
 		 .channels_min	= 2,
 		 .channels_max	= 2,
-		 .rates		= SNDRV_PCM_RATE_8000_48000,
+		 .rates		= SNDRV_PCM_RATE_8000_96000,
 		 .formats	= EP93XX_I2S_FORMATS,
 	},
 	.ops		= &ep93xx_i2s_dai_ops,
diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c
index 2f121dd..0667077 100644
--- a/sound/soc/ep93xx/ep93xx-pcm.c
+++ b/sound/soc/ep93xx/ep93xx-pcm.c
@@ -35,9 +35,9 @@
 				   SNDRV_PCM_INFO_INTERLEAVED	|
 				   SNDRV_PCM_INFO_BLOCK_TRANSFER),
 				   
-	.rates			= SNDRV_PCM_RATE_8000_48000,
+	.rates			= SNDRV_PCM_RATE_8000_96000,
 	.rate_min		= SNDRV_PCM_RATE_8000,
-	.rate_max		= SNDRV_PCM_RATE_48000,
+	.rate_max		= SNDRV_PCM_RATE_96000,
 	
 	.formats		= (SNDRV_PCM_FMTBIT_S16_LE |
 				   SNDRV_PCM_FMTBIT_S24_LE |
diff --git a/sound/soc/ep93xx/snappercl15.c b/sound/soc/ep93xx/snappercl15.c
index 28ab5ff..dfe1d7f 100644
--- a/sound/soc/ep93xx/snappercl15.c
+++ b/sound/soc/ep93xx/snappercl15.c
@@ -15,7 +15,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -79,11 +78,12 @@
 static int snappercl15_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	return 0;
 }
 
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
index dd4fffd..1e9bcca 100644
--- a/sound/soc/imx/eukrea-tlv320.c
+++ b/sound/soc/imx/eukrea-tlv320.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/mach-types.h>
 
 #include "../codecs/tlv320aic23.h"
@@ -80,7 +79,7 @@
 	.name		= "tlv320aic23",
 	.stream_name	= "TLV320AIC23",
 	.codec_dai_name	= "tlv320aic23-hifi",
-	.platform_name	= "imx-pcm-audio.0",
+	.platform_name	= "imx-fiq-pcm-audio.0",
 	.codec_name	= "tlv320aic23-codec.0-001a",
 	.cpu_dai_name	= "imx-ssi.0",
 	.ops		= &eukrea_tlv320_snd_ops,
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 390b6ff..30894ea 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -456,13 +456,13 @@
 static struct snd_soc_dai_driver imx_ssi_dai = {
 	.probe = imx_ssi_dai_probe,
 	.playback = {
-		.channels_min = 2,
+		.channels_min = 1,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE,
 	},
 	.capture = {
-		.channels_min = 2,
+		.channels_min = 1,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/sound/soc/imx/phycore-ac97.c b/sound/soc/imx/phycore-ac97.c
index 9eabc28..a7deb5c 100644
--- a/sound/soc/imx/phycore-ac97.c
+++ b/sound/soc/imx/phycore-ac97.c
@@ -17,7 +17,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/mach-types.h>
 
 static struct snd_soc_card imx_phycore;
diff --git a/sound/soc/imx/wm1133-ev1.c b/sound/soc/imx/wm1133-ev1.c
index 30fdb15..75b4c72 100644
--- a/sound/soc/imx/wm1133-ev1.c
+++ b/sound/soc/imx/wm1133-ev1.c
@@ -19,7 +19,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/audmux.h>
 
@@ -213,11 +212,12 @@
 static int wm1133_ev1_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, wm1133_ev1_widgets,
+	snd_soc_dapm_new_controls(dapm, wm1133_ev1_widgets,
 				  ARRAY_SIZE(wm1133_ev1_widgets));
 
-	snd_soc_dapm_add_routes(codec, wm1133_ev1_map,
+	snd_soc_dapm_add_routes(dapm, wm1133_ev1_map,
 				ARRAY_SIZE(wm1133_ev1_map));
 
 	/* Headphone jack detection */
@@ -234,7 +234,7 @@
 	wm8350_mic_jack_detect(codec, &mic_jack, SND_JACK_MICROPHONE,
 			       SND_JACK_BTN_0);
 
-	snd_soc_dapm_force_enable_pin(codec, "Mic Bias");
+	snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
 
 	return 0;
 }
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index f3cffd1..419bf4f 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -28,7 +28,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "jz4740-i2s.h"
diff --git a/sound/soc/jz4740/qi_lb60.c b/sound/soc/jz4740/qi_lb60.c
index ef1a99e..49723e3 100644
--- a/sound/soc/jz4740/qi_lb60.c
+++ b/sound/soc/jz4740/qi_lb60.c
@@ -19,7 +19,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <linux/gpio.h>
 
 #define QI_LB60_SND_GPIO JZ_GPIO_PORTB(29)
@@ -59,10 +58,11 @@
 {
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	snd_soc_dapm_nc_pin(codec, "LIN");
-	snd_soc_dapm_nc_pin(codec, "RIN");
+	snd_soc_dapm_nc_pin(dapm, "LIN");
+	snd_soc_dapm_nc_pin(dapm, "RIN");
 
 	ret = snd_soc_dai_set_fmt(cpu_dai, QI_LB60_DAIFMT);
 	if (ret < 0) {
@@ -70,9 +70,11 @@
 		return ret;
 	}
 
-	snd_soc_dapm_new_controls(codec, qi_lb60_widgets, ARRAY_SIZE(qi_lb60_widgets));
-	snd_soc_dapm_add_routes(codec, qi_lb60_routes, ARRAY_SIZE(qi_lb60_routes));
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_new_controls(dapm, qi_lb60_widgets,
+				  ARRAY_SIZE(qi_lb60_widgets));
+	snd_soc_dapm_add_routes(dapm, qi_lb60_routes,
+				ARRAY_SIZE(qi_lb60_routes));
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 16ec2a2..8f49e16 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -11,10 +11,19 @@
 
 config SND_KIRKWOOD_SOC_OPENRD
 	tristate "SoC Audio support for Kirkwood Openrd Client"
-	depends on SND_KIRKWOOD_SOC && MACH_OPENRD_CLIENT
+	depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
 	select SND_KIRKWOOD_SOC_I2S
 	select SND_SOC_CS42L51
 	help
 	  Say Y if you want to add support for SoC audio on
 	  Openrd Client.
 
+config SND_KIRKWOOD_SOC_T5325
+	tristate "SoC Audio support for HP t5325"
+	depends on SND_KIRKWOOD_SOC && MACH_T5325
+	select SND_KIRKWOOD_SOC_I2S
+	select SND_SOC_ALC5623
+	help
+	  Say Y if you want to add support for SoC audio on
+	  the HP t5325 thin client.
+
diff --git a/sound/soc/kirkwood/Makefile b/sound/soc/kirkwood/Makefile
index 33a16dc..3e62ae9 100644
--- a/sound/soc/kirkwood/Makefile
+++ b/sound/soc/kirkwood/Makefile
@@ -5,5 +5,7 @@
 obj-$(CONFIG_SND_KIRKWOOD_SOC_I2S) += snd-soc-kirkwood-i2s.o
 
 snd-soc-openrd-objs := kirkwood-openrd.o
+snd-soc-t5325-objs := kirkwood-t5325.o
 
 obj-$(CONFIG_SND_KIRKWOOD_SOC_OPENRD) += snd-soc-openrd.o
+obj-$(CONFIG_SND_KIRKWOOD_SOC_T5325) += snd-soc-t5325.o
diff --git a/sound/soc/kirkwood/kirkwood-openrd.c b/sound/soc/kirkwood/kirkwood-openrd.c
index 9d7c81e..d863afb 100644
--- a/sound/soc/kirkwood/kirkwood-openrd.c
+++ b/sound/soc/kirkwood/kirkwood-openrd.c
@@ -86,7 +86,7 @@
 {
 	int ret;
 
-	if (!machine_is_openrd_client())
+	if (!machine_is_openrd_client() && !machine_is_openrd_ultimate())
 		return 0;
 
 	openrd_client_snd_device = platform_device_alloc("soc-audio", -1);
diff --git a/sound/soc/kirkwood/kirkwood-t5325.c b/sound/soc/kirkwood/kirkwood-t5325.c
new file mode 100644
index 0000000..c8d2195
--- /dev/null
+++ b/sound/soc/kirkwood/kirkwood-t5325.c
@@ -0,0 +1,141 @@
+/*
+ * kirkwood-t5325.c
+ *
+ * (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <mach/kirkwood.h>
+#include <plat/audio.h>
+#include <asm/mach-types.h>
+#include "../codecs/alc5623.h"
+
+static int t5325_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret;
+	unsigned int freq, fmt;
+
+	fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+	if (ret < 0)
+		return ret;
+
+	freq = params_rate(params) * 256;
+
+	return snd_soc_dai_set_sysclk(codec_dai, 0, freq, SND_SOC_CLOCK_IN);
+
+}
+
+static struct snd_soc_ops t5325_ops = {
+	.hw_params = t5325_hw_params,
+};
+
+static const struct snd_soc_dapm_widget t5325_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_SPK("Speaker", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route t5325_route[] = {
+	{ "Headphone Jack",	NULL,	"HPL" },
+	{ "Headphone Jack",	NULL,	"HPR" },
+
+	{"Speaker",		NULL,	"SPKOUT"},
+	{"Speaker",		NULL,	"SPKOUTN"},
+
+	{ "MIC1",		NULL,	"Mic Jack" },
+	{ "MIC2",		NULL,	"Mic Jack" },
+};
+
+static int t5325_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, t5325_dapm_widgets,
+				ARRAY_SIZE(t5325_dapm_widgets));
+
+	snd_soc_dapm_add_routes(dapm, t5325_route, ARRAY_SIZE(t5325_route));
+
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Speaker");
+
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static struct snd_soc_dai_link t5325_dai[] = {
+{
+	.name = "ALC5621",
+	.stream_name = "ALC5621 HiFi",
+	.cpu_dai_name = "kirkwood-i2s",
+	.platform_name = "kirkwood-pcm-audio",
+	.codec_dai_name = "alc5621-hifi",
+	.codec_name = "alc562x-codec.0-001a",
+	.ops = &t5325_ops,
+	.init = t5325_dai_init,
+},
+};
+
+
+static struct snd_soc_card t5325 = {
+	.name = "t5325",
+	.dai_link = t5325_dai,
+	.num_links = ARRAY_SIZE(t5325_dai),
+};
+
+static struct platform_device *t5325_snd_device;
+
+static int __init t5325_init(void)
+{
+	int ret;
+
+	if (!machine_is_t5325())
+		return 0;
+
+	t5325_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!t5325_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(t5325_snd_device,
+			&t5325);
+
+	ret = platform_device_add(t5325_snd_device);
+	if (ret) {
+		printk(KERN_ERR "%s: platform_device_add failed\n", __func__);
+		platform_device_put(t5325_snd_device);
+	}
+
+	return ret;
+}
+module_init(t5325_init);
+
+static void __exit t5325_exit(void)
+{
+	platform_device_unregister(t5325_snd_device);
+}
+module_exit(t5325_exit);
+
+MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+MODULE_DESCRIPTION("ALSA SoC t5325 audio client");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/nuc900/nuc900-audio.c b/sound/soc/nuc900/nuc900-audio.c
index 161f5b6..38a2d0d8 100644
--- a/sound/soc/nuc900/nuc900-audio.c
+++ b/sound/soc/nuc900/nuc900-audio.c
@@ -18,7 +18,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "nuc900-audio.h"
 
diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c
index 979dd50..73dde4a 100644
--- a/sound/soc/omap/am3517evm.c
+++ b/sound/soc/omap/am3517evm.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -114,20 +113,21 @@
 static int am3517evm_aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add am3517-evm specific widgets */
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
 	/* Set up davinci-evm specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Mic In");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Mic In");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -139,7 +139,7 @@
 	.cpu_dai_name ="omap-mcbsp-dai.0",
 	.codec_dai_name = "tlv320aic23-hifi",
 	.platform_name = "omap-pcm-audio",
-	.codec_name = "tlv320aic23-codec",
+	.codec_name = "tlv320aic23-codec.2-001a",
 	.init = am3517evm_aic23_init,
 	.ops = &am3517evm_ops,
 };
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index 438146a..3167be6 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -26,7 +26,7 @@
 #include <linux/spinlock.h>
 #include <linux/tty.h>
 
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -94,6 +94,7 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct soc_enum *control = (struct soc_enum *)kcontrol->private_value;
 	unsigned short pins;
 	int pin, changed = 0;
@@ -112,48 +113,48 @@
 
 	/* Setup pins after corresponding bits if changed */
 	pin = !!(pins & (1 << AMS_DELTA_MOUTHPIECE));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Mouthpiece")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Mouthpiece")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Mouthpiece");
+			snd_soc_dapm_enable_pin(dapm, "Mouthpiece");
 		else
-			snd_soc_dapm_disable_pin(codec, "Mouthpiece");
+			snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_EARPIECE));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Earpiece")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Earpiece")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Earpiece");
+			snd_soc_dapm_enable_pin(dapm, "Earpiece");
 		else
-			snd_soc_dapm_disable_pin(codec, "Earpiece");
+			snd_soc_dapm_disable_pin(dapm, "Earpiece");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_MICROPHONE));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Microphone")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Microphone")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Microphone");
+			snd_soc_dapm_enable_pin(dapm, "Microphone");
 		else
-			snd_soc_dapm_disable_pin(codec, "Microphone");
+			snd_soc_dapm_disable_pin(dapm, "Microphone");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_SPEAKER));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Speaker")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Speaker")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Speaker");
+			snd_soc_dapm_enable_pin(dapm, "Speaker");
 		else
-			snd_soc_dapm_disable_pin(codec, "Speaker");
+			snd_soc_dapm_disable_pin(dapm, "Speaker");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_AGC));
 	if (pin != ams_delta_audio_agc) {
 		ams_delta_audio_agc = pin;
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "AGCIN");
+			snd_soc_dapm_enable_pin(dapm, "AGCIN");
 		else
-			snd_soc_dapm_disable_pin(codec, "AGCIN");
+			snd_soc_dapm_disable_pin(dapm, "AGCIN");
 	}
 	if (changed)
-		snd_soc_dapm_sync(codec);
+		snd_soc_dapm_sync(dapm);
 
 	mutex_unlock(&codec->mutex);
 
@@ -164,19 +165,20 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	unsigned short pins, mode;
 
-	pins = ((snd_soc_dapm_get_pin_status(codec, "Mouthpiece") <<
+	pins = ((snd_soc_dapm_get_pin_status(dapm, "Mouthpiece") <<
 							AMS_DELTA_MOUTHPIECE) |
-			(snd_soc_dapm_get_pin_status(codec, "Earpiece") <<
+			(snd_soc_dapm_get_pin_status(dapm, "Earpiece") <<
 							AMS_DELTA_EARPIECE));
 	if (pins)
-		pins |= (snd_soc_dapm_get_pin_status(codec, "Microphone") <<
+		pins |= (snd_soc_dapm_get_pin_status(dapm, "Microphone") <<
 							AMS_DELTA_MICROPHONE);
 	else
-		pins = ((snd_soc_dapm_get_pin_status(codec, "Microphone") <<
+		pins = ((snd_soc_dapm_get_pin_status(dapm, "Microphone") <<
 							AMS_DELTA_MICROPHONE) |
-			(snd_soc_dapm_get_pin_status(codec, "Speaker") <<
+			(snd_soc_dapm_get_pin_status(dapm, "Speaker") <<
 							AMS_DELTA_SPEAKER) |
 			(ams_delta_audio_agc << AMS_DELTA_AGC));
 
@@ -300,6 +302,7 @@
 static void cx81801_close(struct tty_struct *tty)
 {
 	struct snd_soc_codec *codec = tty->disc_data;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	del_timer_sync(&cx81801_timer);
 
@@ -312,12 +315,12 @@
 	v253_ops.close(tty);
 
 	/* Revert back to default audio input/output constellation */
-	snd_soc_dapm_disable_pin(codec, "Mouthpiece");
-	snd_soc_dapm_enable_pin(codec, "Earpiece");
-	snd_soc_dapm_enable_pin(codec, "Microphone");
-	snd_soc_dapm_disable_pin(codec, "Speaker");
-	snd_soc_dapm_disable_pin(codec, "AGCIN");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
+	snd_soc_dapm_enable_pin(dapm, "Earpiece");
+	snd_soc_dapm_enable_pin(dapm, "Microphone");
+	snd_soc_dapm_disable_pin(dapm, "Speaker");
+	snd_soc_dapm_disable_pin(dapm, "AGCIN");
+	snd_soc_dapm_sync(dapm);
 }
 
 /* Line discipline .hangup() */
@@ -432,16 +435,16 @@
 	case SND_SOC_BIAS_ON:
 	case SND_SOC_BIAS_PREPARE:
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
 						AMS_DELTA_LATCH2_MODEM_NRESET);
 		break;
 	case SND_SOC_BIAS_OFF:
-		if (codec->bias_level != SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level != SND_SOC_BIAS_OFF)
 			ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
 						0);
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -492,6 +495,7 @@
 static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
 	struct snd_soc_card *card = rtd->card;
 	int ret;
@@ -503,8 +507,6 @@
 	/* Set up digital mute if not provided by the codec */
 	if (!codec_dai->driver->ops) {
 		codec_dai->driver->ops = &ams_delta_dai_ops;
-	} else if (!codec_dai->driver->ops->digital_mute) {
-		codec_dai->driver->ops->digital_mute = ams_delta_digital_mute;
 	} else {
 		ams_delta_ops.startup = ams_delta_startup;
 		ams_delta_ops.shutdown = ams_delta_shutdown;
@@ -541,7 +543,7 @@
 	}
 
 	/* Add board specific DAPM widgets and routes */
-	ret = snd_soc_dapm_new_controls(codec, ams_delta_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, ams_delta_dapm_widgets,
 					ARRAY_SIZE(ams_delta_dapm_widgets));
 	if (ret) {
 		dev_warn(card->dev,
@@ -550,7 +552,7 @@
 		return 0;
 	}
 
-	ret = snd_soc_dapm_add_routes(codec, ams_delta_audio_map,
+	ret = snd_soc_dapm_add_routes(dapm, ams_delta_audio_map,
 					ARRAY_SIZE(ams_delta_audio_map));
 	if (ret) {
 		dev_warn(card->dev,
@@ -560,13 +562,13 @@
 	}
 
 	/* Set up initial pin constellation */
-	snd_soc_dapm_disable_pin(codec, "Mouthpiece");
-	snd_soc_dapm_enable_pin(codec, "Earpiece");
-	snd_soc_dapm_enable_pin(codec, "Microphone");
-	snd_soc_dapm_disable_pin(codec, "Speaker");
-	snd_soc_dapm_disable_pin(codec, "AGCIN");
-	snd_soc_dapm_disable_pin(codec, "AGCOUT");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
+	snd_soc_dapm_enable_pin(dapm, "Earpiece");
+	snd_soc_dapm_enable_pin(dapm, "Microphone");
+	snd_soc_dapm_disable_pin(dapm, "Speaker");
+	snd_soc_dapm_disable_pin(dapm, "AGCIN");
+	snd_soc_dapm_disable_pin(dapm, "AGCOUT");
+	snd_soc_dapm_sync(dapm);
 
 	/* Add virtual switch */
 	ret = snd_soc_add_controls(codec, ams_delta_audio_controls,
diff --git a/sound/soc/omap/igep0020.c b/sound/soc/omap/igep0020.c
index fd3a40f..0ae3470 100644
--- a/sound/soc/omap/igep0020.c
+++ b/sound/soc/omap/igep0020.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index a3b6d89..83d213b 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -27,7 +27,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -36,7 +35,6 @@
 
 #include "omap-mcbsp.h"
 #include "omap-pcm.h"
-#include "../codecs/tlv320aic3x.h"
 
 #define N810_HEADSET_AMP_GPIO	10
 #define N810_SPEAKER_AMP_GPIO	101
@@ -58,6 +56,7 @@
 
 static void n810_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int hp = 0, line1l = 0;
 
 	switch (n810_jack_func) {
@@ -72,25 +71,25 @@
 	}
 
 	if (n810_spk_func)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	if (hp)
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 	else
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 	if (line1l)
-		snd_soc_dapm_enable_pin(codec, "LINE1L");
+		snd_soc_dapm_enable_pin(dapm, "LINE1L");
 	else
-		snd_soc_dapm_disable_pin(codec, "LINE1L");
+		snd_soc_dapm_disable_pin(dapm, "LINE1L");
 
 	if (n810_dmic_func)
-		snd_soc_dapm_enable_pin(codec, "DMic");
+		snd_soc_dapm_enable_pin(dapm, "DMic");
 	else
-		snd_soc_dapm_disable_pin(codec, "DMic");
+		snd_soc_dapm_disable_pin(dapm, "DMic");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int n810_startup(struct snd_pcm_substream *substream)
@@ -274,17 +273,18 @@
 static int n810_aic33_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* Not connected */
-	snd_soc_dapm_nc_pin(codec, "MONO_LOUT");
-	snd_soc_dapm_nc_pin(codec, "HPLCOM");
-	snd_soc_dapm_nc_pin(codec, "HPRCOM");
-	snd_soc_dapm_nc_pin(codec, "MIC3L");
-	snd_soc_dapm_nc_pin(codec, "MIC3R");
-	snd_soc_dapm_nc_pin(codec, "LINE1R");
-	snd_soc_dapm_nc_pin(codec, "LINE2L");
-	snd_soc_dapm_nc_pin(codec, "LINE2R");
+	snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
+	snd_soc_dapm_nc_pin(dapm, "HPLCOM");
+	snd_soc_dapm_nc_pin(dapm, "HPRCOM");
+	snd_soc_dapm_nc_pin(dapm, "MIC3L");
+	snd_soc_dapm_nc_pin(dapm, "MIC3R");
+	snd_soc_dapm_nc_pin(dapm, "LINE1R");
+	snd_soc_dapm_nc_pin(dapm, "LINE2L");
+	snd_soc_dapm_nc_pin(dapm, "LINE2R");
 
 	/* Add N810 specific controls */
 	err = snd_soc_add_controls(codec, aic33_n810_controls,
@@ -293,13 +293,13 @@
 		return err;
 
 	/* Add N810 specific widgets */
-	snd_soc_dapm_new_controls(codec, aic33_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic33_dapm_widgets,
 				  ARRAY_SIZE(aic33_dapm_widgets));
 
 	/* Set up N810 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 7e84f24..d203f4d 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -102,6 +102,17 @@
 static const int omap24xx_dma_reqs[][2] = {};
 #endif
 
+#if defined(CONFIG_ARCH_OMAP4)
+static const int omap44xx_dma_reqs[][2] = {
+	{ OMAP44XX_DMA_MCBSP1_TX, OMAP44XX_DMA_MCBSP1_RX },
+	{ OMAP44XX_DMA_MCBSP2_TX, OMAP44XX_DMA_MCBSP2_RX },
+	{ OMAP44XX_DMA_MCBSP3_TX, OMAP44XX_DMA_MCBSP3_RX },
+	{ OMAP44XX_DMA_MCBSP4_TX, OMAP44XX_DMA_MCBSP4_RX },
+};
+#else
+static const int omap44xx_dma_reqs[][2] = {};
+#endif
+
 #if defined(CONFIG_ARCH_OMAP2420)
 static const unsigned long omap2420_mcbsp_port[][2] = {
 	{ OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1,
@@ -147,6 +158,21 @@
 static const unsigned long omap34xx_mcbsp_port[][2] = {};
 #endif
 
+#if defined(CONFIG_ARCH_OMAP4)
+static const unsigned long omap44xx_mcbsp_port[][2] = {
+	{ OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR },
+	{ OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR },
+	{ OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DRR },
+	{ OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DRR },
+};
+#else
+static const unsigned long omap44xx_mcbsp_port[][2] = {};
+#endif
+
 static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -224,7 +250,7 @@
 	 * 2 channels (stereo): size is 128 / 2 = 64 frames (2 * 64 words)
 	 * 4 channels: size is 128 / 4 = 32 frames (4 * 32 words)
 	 */
-	if (cpu_is_omap343x()) {
+	if (cpu_is_omap343x() || cpu_is_omap44xx()) {
 		/*
 		* Rule for the buffer size. We should not allow
 		* smaller buffer than the FIFO size to avoid underruns
@@ -332,6 +358,9 @@
 	} else if (cpu_is_omap343x()) {
 		dma = omap24xx_dma_reqs[bus_id][substream->stream];
 		port = omap34xx_mcbsp_port[bus_id][substream->stream];
+	 } else if (cpu_is_omap44xx()) {
+		dma = omap44xx_dma_reqs[bus_id][substream->stream];
+		port = omap44xx_mcbsp_port[bus_id][substream->stream];
 	} else {
 		return -ENODEV;
 	}
@@ -498,11 +527,11 @@
 	regs->spcr2	|= XINTM(3) | FREE;
 	regs->spcr1	|= RINTM(3);
 	/* RFIG and XFIG are not defined in 34xx */
-	if (!cpu_is_omap34xx()) {
+	if (!cpu_is_omap34xx() && !cpu_is_omap44xx()) {
 		regs->rcr2	|= RFIG;
 		regs->xcr2	|= XFIG;
 	}
-	if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+	if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
 		regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE;
 		regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE;
 	}
diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h
index ffdcc5a..110c106 100644
--- a/sound/soc/omap/omap-mcbsp.h
+++ b/sound/soc/omap/omap-mcbsp.h
@@ -50,6 +50,10 @@
 #undef  NUM_LINKS
 #define NUM_LINKS	3
 #endif
+#if defined(CONFIG_ARCH_OMAP4)
+#undef  NUM_LINKS
+#define NUM_LINKS	4
+#endif
 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
 #undef  NUM_LINKS
 #define NUM_LINKS	5
diff --git a/sound/soc/omap/omap2evm.c b/sound/soc/omap/omap2evm.c
index cf3fc8a..29b60d6 100644
--- a/sound/soc/omap/omap2evm.c
+++ b/sound/soc/omap/omap2evm.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/omap3beagle.c b/sound/soc/omap/omap3beagle.c
index e56832b..40db813 100644
--- a/sound/soc/omap/omap3beagle.c
+++ b/sound/soc/omap/omap3beagle.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/omap3evm.c b/sound/soc/omap/omap3evm.c
index 810f1e36..0daa044 100644
--- a/sound/soc/omap/omap3evm.c
+++ b/sound/soc/omap/omap3evm.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 4ee33ce..8047c521e 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -28,7 +28,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <plat/mcbsp.h>
@@ -170,51 +169,53 @@
 static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* All TWL4030 output pins are floating */
-	snd_soc_dapm_nc_pin(codec, "EARPIECE");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVER");
-	snd_soc_dapm_nc_pin(codec, "HSOL");
-	snd_soc_dapm_nc_pin(codec, "HSOR");
-	snd_soc_dapm_nc_pin(codec, "CARKITL");
-	snd_soc_dapm_nc_pin(codec, "CARKITR");
-	snd_soc_dapm_nc_pin(codec, "HFL");
-	snd_soc_dapm_nc_pin(codec, "HFR");
-	snd_soc_dapm_nc_pin(codec, "VIBRA");
+	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
+	snd_soc_dapm_nc_pin(dapm, "HSOL");
+	snd_soc_dapm_nc_pin(dapm, "HSOR");
+	snd_soc_dapm_nc_pin(dapm, "CARKITL");
+	snd_soc_dapm_nc_pin(dapm, "CARKITR");
+	snd_soc_dapm_nc_pin(dapm, "HFL");
+	snd_soc_dapm_nc_pin(dapm, "HFR");
+	snd_soc_dapm_nc_pin(dapm, "VIBRA");
 
-	ret = snd_soc_dapm_new_controls(codec, omap3pandora_out_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, omap3pandora_out_dapm_widgets,
 				ARRAY_SIZE(omap3pandora_out_dapm_widgets));
 	if (ret < 0)
 		return ret;
 
-	snd_soc_dapm_add_routes(codec, omap3pandora_out_map,
+	snd_soc_dapm_add_routes(dapm, omap3pandora_out_map,
 		ARRAY_SIZE(omap3pandora_out_map));
 
-	return snd_soc_dapm_sync(codec);
+	return snd_soc_dapm_sync(dapm);
 }
 
 static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Not comnnected */
-	snd_soc_dapm_nc_pin(codec, "HSMIC");
-	snd_soc_dapm_nc_pin(codec, "CARKITMIC");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
+	snd_soc_dapm_nc_pin(dapm, "HSMIC");
+	snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
 
-	ret = snd_soc_dapm_new_controls(codec, omap3pandora_in_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, omap3pandora_in_dapm_widgets,
 				ARRAY_SIZE(omap3pandora_in_dapm_widgets));
 	if (ret < 0)
 		return ret;
 
-	snd_soc_dapm_add_routes(codec, omap3pandora_in_map,
+	snd_soc_dapm_add_routes(dapm, omap3pandora_in_map,
 		ARRAY_SIZE(omap3pandora_in_map));
 
-	return snd_soc_dapm_sync(codec);
+	return snd_soc_dapm_sync(dapm);
 }
 
 static struct snd_soc_ops omap3pandora_ops = {
diff --git a/sound/soc/omap/osk5912.c b/sound/soc/omap/osk5912.c
index 65ae00e..7e75e77 100644
--- a/sound/soc/omap/osk5912.c
+++ b/sound/soc/omap/osk5912.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -116,19 +115,20 @@
 static int osk_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add osk5912 specific widgets */
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
 	/* Set up osk5912 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/omap/overo.c b/sound/soc/omap/overo.c
index e95a607..bbcf380 100644
--- a/sound/soc/omap/overo.c
+++ b/sound/soc/omap/overo.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index 04b5723..09fb0df 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -30,14 +30,12 @@
 #include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <plat/mcbsp.h>
 
 #include <asm/mach-types.h>
 
 #include "omap-mcbsp.h"
 #include "omap-pcm.h"
-#include "../codecs/tlv320aic3x.h"
 
 #define RX51_TVOUT_SEL_GPIO		40
 #define RX51_JACK_DETECT_GPIO		177
@@ -58,19 +56,21 @@
 
 static void rx51_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	if (rx51_spk_func)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 	if (rx51_dmic_func)
-		snd_soc_dapm_enable_pin(codec, "DMic");
+		snd_soc_dapm_enable_pin(dapm, "DMic");
 	else
-		snd_soc_dapm_disable_pin(codec, "DMic");
+		snd_soc_dapm_disable_pin(dapm, "DMic");
 
 	gpio_set_value(RX51_TVOUT_SEL_GPIO,
 		       rx51_jack_func == RX51_JACK_TVOUT);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int rx51_startup(struct snd_pcm_substream *substream)
@@ -244,12 +244,13 @@
 static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* Set up NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "MIC3L");
-	snd_soc_dapm_nc_pin(codec, "MIC3R");
-	snd_soc_dapm_nc_pin(codec, "LINE1R");
+	snd_soc_dapm_nc_pin(dapm, "MIC3L");
+	snd_soc_dapm_nc_pin(dapm, "MIC3R");
+	snd_soc_dapm_nc_pin(dapm, "LINE1R");
 
 	/* Add RX-51 specific controls */
 	err = snd_soc_add_controls(codec, aic34_rx51_controls,
@@ -258,13 +259,13 @@
 		return err;
 
 	/* Add RX-51 specific widgets */
-	snd_soc_dapm_new_controls(codec, aic34_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic34_dapm_widgets,
 				  ARRAY_SIZE(aic34_dapm_widgets));
 
 	/* Set up RX-51 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	/* AV jack detection */
 	err = snd_soc_jack_new(codec, "AV Jack",
diff --git a/sound/soc/omap/sdp3430.c b/sound/soc/omap/sdp3430.c
index 07fbcf7..3f72d17 100644
--- a/sound/soc/omap/sdp3430.c
+++ b/sound/soc/omap/sdp3430.c
@@ -28,7 +28,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -191,39 +190,40 @@
 static int sdp3430_twl4030_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Add SDP3430 specific widgets */
-	ret = snd_soc_dapm_new_controls(codec, sdp3430_twl4030_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, sdp3430_twl4030_dapm_widgets,
 				ARRAY_SIZE(sdp3430_twl4030_dapm_widgets));
 	if (ret)
 		return ret;
 
 	/* Set up SDP3430 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* SDP3430 connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic");
-	snd_soc_dapm_disable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
 
 	/* TWL4030 not connected pins */
-	snd_soc_dapm_nc_pin(codec, "AUXL");
-	snd_soc_dapm_nc_pin(codec, "AUXR");
-	snd_soc_dapm_nc_pin(codec, "CARKITMIC");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
+	snd_soc_dapm_nc_pin(dapm, "AUXL");
+	snd_soc_dapm_nc_pin(dapm, "AUXR");
+	snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
 
-	snd_soc_dapm_nc_pin(codec, "OUTL");
-	snd_soc_dapm_nc_pin(codec, "OUTR");
-	snd_soc_dapm_nc_pin(codec, "EARPIECE");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVER");
-	snd_soc_dapm_nc_pin(codec, "CARKITL");
-	snd_soc_dapm_nc_pin(codec, "CARKITR");
+	snd_soc_dapm_nc_pin(dapm, "OUTL");
+	snd_soc_dapm_nc_pin(dapm, "OUTR");
+	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
+	snd_soc_dapm_nc_pin(dapm, "CARKITL");
+	snd_soc_dapm_nc_pin(dapm, "CARKITR");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/omap/sdp4430.c b/sound/soc/omap/sdp4430.c
index 4b4463d..189e039 100644
--- a/sound/soc/omap/sdp4430.c
+++ b/sound/soc/omap/sdp4430.c
@@ -24,7 +24,7 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
+#include <sound/jack.h>
 
 #include <asm/mach-types.h>
 #include <plat/hardware.h>
@@ -66,6 +66,21 @@
 	.hw_params = sdp4430_hw_params,
 };
 
+/* Headset jack */
+static struct snd_soc_jack hs_jack;
+
+/*Headset jack detection DAPM pins */
+static struct snd_soc_jack_pin hs_jack_pins[] = {
+	{
+		.pin = "Headset Mic",
+		.mask = SND_JACK_MICROPHONE,
+	},
+	{
+		.pin = "Headset Stereophone",
+		.mask = SND_JACK_HEADPHONE,
+	},
+};
+
 static int sdp4430_get_power_mode(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
@@ -102,6 +117,7 @@
 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
 	SND_SOC_DAPM_HP("Headset Stereophone", NULL),
 	SND_SOC_DAPM_SPK("Earphone Spk", NULL),
+	SND_SOC_DAPM_INPUT("Aux/FM Stereo In"),
 };
 
 static const struct snd_soc_dapm_route audio_map[] = {
@@ -124,11 +140,16 @@
 
 	/* Earphone speaker */
 	{"Earphone Spk", NULL, "EP"},
+
+	/* Aux/FM Stereo In: AFML, AFMR */
+	{"AFML", NULL, "Aux/FM Stereo In"},
+	{"AFMR", NULL, "Aux/FM Stereo In"},
 };
 
 static int sdp4430_twl6040_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Add SDP4430 specific controls */
@@ -138,25 +159,39 @@
 		return ret;
 
 	/* Add SDP4430 specific widgets */
-	ret = snd_soc_dapm_new_controls(codec, sdp4430_twl6040_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, sdp4430_twl6040_dapm_widgets,
 				ARRAY_SIZE(sdp4430_twl6040_dapm_widgets));
 	if (ret)
 		return ret;
 
 	/* Set up SDP4430 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* SDP4430 connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_enable_pin(codec, "Headset Mic");
-	snd_soc_dapm_enable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_enable_pin(dapm, "AFML");
+	snd_soc_dapm_enable_pin(dapm, "AFMR");
+	snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_enable_pin(dapm, "Headset Stereophone");
 
-	/* TWL6040 not connected pins */
-	snd_soc_dapm_nc_pin(codec, "AFML");
-	snd_soc_dapm_nc_pin(codec, "AFMR");
+	ret = snd_soc_dapm_sync(dapm);
+	if (ret)
+		return ret;
 
-	ret = snd_soc_dapm_sync(codec);
+	/* Headset jack detection */
+	ret = snd_soc_jack_new(codec, "Headset Jack",
+				SND_JACK_HEADSET, &hs_jack);
+	if (ret)
+		return ret;
+
+	ret = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
+				hs_jack_pins);
+
+	if (machine_is_omap_4430sdp())
+		twl6040_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADSET);
+	else
+		snd_soc_jack_report(&hs_jack, SND_JACK_HEADSET, SND_JACK_HEADSET);
 
 	return ret;
 }
diff --git a/sound/soc/omap/zoom2.c b/sound/soc/omap/zoom2.c
index 718031e..0170994 100644
--- a/sound/soc/omap/zoom2.c
+++ b/sound/soc/omap/zoom2.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -162,35 +161,36 @@
 static int zoom2_twl4030_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Add Zoom2 specific widgets */
-	ret = snd_soc_dapm_new_controls(codec, zoom2_twl4030_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, zoom2_twl4030_dapm_widgets,
 				ARRAY_SIZE(zoom2_twl4030_dapm_widgets));
 	if (ret)
 		return ret;
 
 	/* Set up Zoom2 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* Zoom2 connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_enable_pin(codec, "Headset Mic");
-	snd_soc_dapm_enable_pin(codec, "Headset Stereophone");
-	snd_soc_dapm_enable_pin(codec, "Aux In");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_enable_pin(dapm, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Aux In");
 
 	/* TWL4030 not connected pins */
-	snd_soc_dapm_nc_pin(codec, "CARKITMIC");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
-	snd_soc_dapm_nc_pin(codec, "EARPIECE");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVER");
-	snd_soc_dapm_nc_pin(codec, "CARKITL");
-	snd_soc_dapm_nc_pin(codec, "CARKITR");
+	snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
+	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
+	snd_soc_dapm_nc_pin(dapm, "CARKITL");
+	snd_soc_dapm_nc_pin(dapm, "CARKITR");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 
 	return ret;
 }
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index f451acd..784cff5 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -23,7 +23,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/corgi.h>
@@ -48,51 +47,53 @@
 
 static void corgi_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* set up jack connection */
 	switch (corgi_jack_func) {
 	case CORGI_HP:
 		/* set = unmute headphone */
 		gpio_set_value(CORGI_GPIO_MUTE_L, 1);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 1);
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case CORGI_MIC:
 		/* reset = mute headphone */
 		gpio_set_value(CORGI_GPIO_MUTE_L, 0);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 0);
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case CORGI_LINE:
 		gpio_set_value(CORGI_GPIO_MUTE_L, 0);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 0);
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_enable_pin(codec, "Line Jack");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_enable_pin(dapm, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case CORGI_HEADSET:
 		gpio_set_value(CORGI_GPIO_MUTE_L, 0);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 1);
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_enable_pin(codec, "Headset Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headset Jack");
 		break;
 	}
 
 	if (corgi_spk_func == CORGI_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int corgi_startup(struct snd_pcm_substream *substream)
@@ -279,10 +280,11 @@
 static int corgi_wm8731_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
-	snd_soc_dapm_nc_pin(codec, "LLINEIN");
-	snd_soc_dapm_nc_pin(codec, "RLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "LLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "RLINEIN");
 
 	/* Add corgi specific controls */
 	err = snd_soc_add_controls(codec, wm8731_corgi_controls,
@@ -291,13 +293,13 @@
 		return err;
 
 	/* Add corgi specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets,
 				  ARRAY_SIZE(wm8731_dapm_widgets));
 
 	/* Set up corgi specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
@@ -305,10 +307,10 @@
 static struct snd_soc_dai_link corgi_dai = {
 	.name = "WM8731",
 	.stream_name = "WM8731",
-	.cpu_dai_name = "pxa-is2-dai",
+	.cpu_dai_name = "pxa2xx-i2s",
 	.codec_dai_name = "wm8731-hifi",
 	.platform_name = "pxa-pcm-audio",
-	.codec_name = "wm8731-codec-0.001a",
+	.codec_name = "wm8731-codec-0.001b",
 	.init = corgi_wm8731_init,
 	.ops = &corgi_ops,
 };
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index c82cedb..dc65650 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -16,7 +16,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/audio.h>
 #include <mach/eseries-gpio.h>
@@ -92,23 +91,24 @@
 static int e740_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_nc_pin(codec, "HPOUTL");
-	snd_soc_dapm_nc_pin(codec, "HPOUTR");
-	snd_soc_dapm_nc_pin(codec, "PHONE");
-	snd_soc_dapm_nc_pin(codec, "LINEINL");
-	snd_soc_dapm_nc_pin(codec, "LINEINR");
-	snd_soc_dapm_nc_pin(codec, "CDINL");
-	snd_soc_dapm_nc_pin(codec, "CDINR");
-	snd_soc_dapm_nc_pin(codec, "PCBEEP");
-	snd_soc_dapm_nc_pin(codec, "MIC2");
+	snd_soc_dapm_nc_pin(dapm, "HPOUTL");
+	snd_soc_dapm_nc_pin(dapm, "HPOUTR");
+	snd_soc_dapm_nc_pin(dapm, "PHONE");
+	snd_soc_dapm_nc_pin(dapm, "LINEINL");
+	snd_soc_dapm_nc_pin(dapm, "LINEINR");
+	snd_soc_dapm_nc_pin(dapm, "CDINL");
+	snd_soc_dapm_nc_pin(dapm, "CDINR");
+	snd_soc_dapm_nc_pin(dapm, "PCBEEP");
+	snd_soc_dapm_nc_pin(dapm, "MIC2");
 
-	snd_soc_dapm_new_controls(codec, e740_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, e740_dapm_widgets,
 					ARRAY_SIZE(e740_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -117,7 +117,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9705-hifi",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9705-codec",
@@ -126,7 +126,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name = "wm9705-aux",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 4c14380..51897fc 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -16,7 +16,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/audio.h>
 #include <mach/eseries-gpio.h>
@@ -74,23 +73,24 @@
 static int e750_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_nc_pin(codec, "LOUT");
-	snd_soc_dapm_nc_pin(codec, "ROUT");
-	snd_soc_dapm_nc_pin(codec, "PHONE");
-	snd_soc_dapm_nc_pin(codec, "LINEINL");
-	snd_soc_dapm_nc_pin(codec, "LINEINR");
-	snd_soc_dapm_nc_pin(codec, "CDINL");
-	snd_soc_dapm_nc_pin(codec, "CDINR");
-	snd_soc_dapm_nc_pin(codec, "PCBEEP");
-	snd_soc_dapm_nc_pin(codec, "MIC2");
+	snd_soc_dapm_nc_pin(dapm, "LOUT");
+	snd_soc_dapm_nc_pin(dapm, "ROUT");
+	snd_soc_dapm_nc_pin(dapm, "PHONE");
+	snd_soc_dapm_nc_pin(dapm, "LINEINL");
+	snd_soc_dapm_nc_pin(dapm, "LINEINR");
+	snd_soc_dapm_nc_pin(dapm, "CDINL");
+	snd_soc_dapm_nc_pin(dapm, "CDINR");
+	snd_soc_dapm_nc_pin(dapm, "PCBEEP");
+	snd_soc_dapm_nc_pin(dapm, "MIC2");
 
-	snd_soc_dapm_new_controls(codec, e750_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, e750_dapm_widgets,
 					ARRAY_SIZE(e750_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -99,7 +99,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9705-hifi",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9705-codec",
@@ -109,7 +109,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name ="wm9705-aux",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index d42e5fe..053ed20 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -16,7 +16,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/audio.h>
@@ -75,12 +74,13 @@
 static int e800_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, e800_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, e800_dapm_widgets,
 					ARRAY_SIZE(e800_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -89,7 +89,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9712-hifi",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9712-codec",
@@ -98,7 +98,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name ="wm9712-aux",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index eadf9d3..b13a425 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/audio.h>
@@ -38,7 +37,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9712-hifi",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9712-codec",
@@ -46,7 +45,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name ="wm9712-aux",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 5ef0526..67dcc36 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/uda1380.h>
 
 #include <mach/magician.h>
@@ -44,27 +43,29 @@
 
 static void magician_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	if (magician_spk_switch)
-		snd_soc_dapm_enable_pin(codec, "Speaker");
+		snd_soc_dapm_enable_pin(dapm, "Speaker");
 	else
-		snd_soc_dapm_disable_pin(codec, "Speaker");
+		snd_soc_dapm_disable_pin(dapm, "Speaker");
 	if (magician_hp_switch)
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 	else
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 
 	switch (magician_in_sel) {
 	case MAGICIAN_MIC:
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_enable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_enable_pin(dapm, "Call Mic");
 		break;
 	case MAGICIAN_MIC_EXT:
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		snd_soc_dapm_enable_pin(codec, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Headset Mic");
 		break;
 	}
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int magician_startup(struct snd_pcm_substream *substream)
@@ -399,15 +400,16 @@
 static int magician_uda1380_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "VOUTLHP");
-	snd_soc_dapm_nc_pin(codec, "VOUTRHP");
+	snd_soc_dapm_nc_pin(dapm, "VOUTLHP");
+	snd_soc_dapm_nc_pin(dapm, "VOUTRHP");
 
 	/* FIXME: is anything connected here? */
-	snd_soc_dapm_nc_pin(codec, "VINL");
-	snd_soc_dapm_nc_pin(codec, "VINR");
+	snd_soc_dapm_nc_pin(dapm, "VINL");
+	snd_soc_dapm_nc_pin(dapm, "VINR");
 
 	/* Add magician specific controls */
 	err = snd_soc_add_controls(codec, uda1380_magician_controls,
@@ -416,13 +418,13 @@
 		return err;
 
 	/* Add magician specific widgets */
-	snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
 				  ARRAY_SIZE(uda1380_dapm_widgets));
 
 	/* Set up magician specific audio path interconnects */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index f284cc5..38ca675 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -50,7 +50,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/ac97_codec.h>
 
@@ -130,13 +129,14 @@
 static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	unsigned short reg;
 
 	/* Add mioa701 specific widgets */
-	snd_soc_dapm_new_controls(codec, ARRAY_AND_SIZE(mioa701_dapm_widgets));
+	snd_soc_dapm_new_controls(dapm, ARRAY_AND_SIZE(mioa701_dapm_widgets));
 
 	/* Set up mioa701 specific audio path audio_mapnects */
-	snd_soc_dapm_add_routes(codec, ARRAY_AND_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, ARRAY_AND_SIZE(audio_map));
 
 	/* Prepare GPIO8 for rear speaker amplifier */
 	reg = codec->driver->read(codec, AC97_GPIO_CFG);
@@ -146,12 +146,12 @@
 	reg = codec->driver->read(codec, AC97_3D_CONTROL);
 	codec->driver->write(codec, AC97_3D_CONTROL, reg | 0xc000);
 
-	snd_soc_dapm_enable_pin(codec, "Front Speaker");
-	snd_soc_dapm_enable_pin(codec, "Rear Speaker");
-	snd_soc_dapm_enable_pin(codec, "Front Mic");
-	snd_soc_dapm_enable_pin(codec, "GSM Line In");
-	snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_enable_pin(dapm, "Front Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Rear Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Front Mic");
+	snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+	snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -162,7 +162,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9713-hifi",
 		.codec_name = "wm9713-codec",
 		.init = mioa701_wm9713_init,
@@ -172,7 +172,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name ="wm9713-aux",
 		.codec_name = "wm9713-codec",
 		.platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 13f6d48..504e400 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -77,37 +76,38 @@
 static int palm27x_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* add palm27x specific widgets */
-	err = snd_soc_dapm_new_controls(codec, palm27x_dapm_widgets,
+	err = snd_soc_dapm_new_controls(dapm, palm27x_dapm_widgets,
 				ARRAY_SIZE(palm27x_dapm_widgets));
 	if (err)
 		return err;
 
 	/* set up palm27x specific audio path audio_map */
-	err = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	err = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (err)
 		return err;
 
 	/* connected pins */
 	if (machine_is_palmld())
-		snd_soc_dapm_enable_pin(codec, "MIC1");
-	snd_soc_dapm_enable_pin(codec, "HPOUTL");
-	snd_soc_dapm_enable_pin(codec, "HPOUTR");
-	snd_soc_dapm_enable_pin(codec, "LOUT2");
-	snd_soc_dapm_enable_pin(codec, "ROUT2");
+		snd_soc_dapm_enable_pin(dapm, "MIC1");
+	snd_soc_dapm_enable_pin(dapm, "HPOUTL");
+	snd_soc_dapm_enable_pin(dapm, "HPOUTR");
+	snd_soc_dapm_enable_pin(dapm, "LOUT2");
+	snd_soc_dapm_enable_pin(dapm, "ROUT2");
 
 	/* not connected pins */
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONOOUT");
-	snd_soc_dapm_nc_pin(codec, "LINEINL");
-	snd_soc_dapm_nc_pin(codec, "LINEINR");
-	snd_soc_dapm_nc_pin(codec, "PCBEEP");
-	snd_soc_dapm_nc_pin(codec, "PHONE");
-	snd_soc_dapm_nc_pin(codec, "MIC2");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONOOUT");
+	snd_soc_dapm_nc_pin(dapm, "LINEINL");
+	snd_soc_dapm_nc_pin(dapm, "LINEINR");
+	snd_soc_dapm_nc_pin(dapm, "PCBEEP");
+	snd_soc_dapm_nc_pin(dapm, "PHONE");
+	snd_soc_dapm_nc_pin(dapm, "MIC2");
 
-	err = snd_soc_dapm_sync(codec);
+	err = snd_soc_dapm_sync(dapm);
 	if (err)
 		return err;
 
@@ -132,7 +132,7 @@
 {
 	.name = "AC97 HiFi",
 	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "pxa-ac97.0",
+	.cpu_dai_name = "pxa2xx-ac97",
 	.codec_dai_name =  "wm9712-hifi",
 	.codec_name = "wm9712-codec",
 	.platform_name = "pxa-pcm-audio",
@@ -141,7 +141,7 @@
 {
 	.name = "AC97 Aux",
 	.stream_name = "AC97 Aux",
-	.cpu_dai_name = "pxa-ac97.1",
+	.cpu_dai_name = "pxa2xx-ac97-aux",
 	.codec_dai_name = "wm9712-aux",
 	.codec_name = "wm9712-codec",
 	.platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 84edd03..a7d4999 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -23,7 +23,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <asm/hardware/locomo.h>
@@ -46,6 +45,8 @@
 
 static void poodle_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* set up jack connection */
 	if (poodle_jack_func == POODLE_HP) {
 		/* set = unmute headphone */
@@ -53,23 +54,23 @@
 			POODLE_LOCOMO_GPIO_MUTE_L, 1);
 		locomo_gpio_write(&poodle_locomo_device.dev,
 			POODLE_LOCOMO_GPIO_MUTE_R, 1);
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 	} else {
 		locomo_gpio_write(&poodle_locomo_device.dev,
 			POODLE_LOCOMO_GPIO_MUTE_L, 0);
 		locomo_gpio_write(&poodle_locomo_device.dev,
 			POODLE_LOCOMO_GPIO_MUTE_R, 0);
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 	}
 
 	/* set the enpoints to their new connetion states */
 	if (poodle_spk_func == POODLE_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int poodle_startup(struct snd_pcm_substream *substream)
@@ -244,11 +245,12 @@
 static int poodle_wm8731_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
-	snd_soc_dapm_nc_pin(codec, "LLINEIN");
-	snd_soc_dapm_nc_pin(codec, "RLINEIN");
-	snd_soc_dapm_enable_pin(codec, "MICIN");
+	snd_soc_dapm_nc_pin(dapm, "LLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "RLINEIN");
+	snd_soc_dapm_enable_pin(dapm, "MICIN");
 
 	/* Add poodle specific controls */
 	err = snd_soc_add_controls(codec, wm8731_poodle_controls,
@@ -257,13 +259,13 @@
 		return err;
 
 	/* Add poodle specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets,
 				  ARRAY_SIZE(wm8731_dapm_widgets));
 
 	/* Set up poodle specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
@@ -274,7 +276,7 @@
 	.cpu_dai_name = "pxa2xx-i2s",
 	.codec_dai_name = "wm8731-hifi",
 	.platform_name = "pxa-pcm-audio",
-	.codec_name = "wm8731-codec.0-001a",
+	.codec_name = "wm8731-codec.0-001b",
 	.init = poodle_wm8731_init,
 	.ops = &poodle_ops,
 };
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
index 2cda82bc..0fd60f4 100644
--- a/sound/soc/pxa/raumfeld.c
+++ b/sound/soc/pxa/raumfeld.c
@@ -22,7 +22,6 @@
 #include <linux/gpio.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 
diff --git a/sound/soc/pxa/saarb.c b/sound/soc/pxa/saarb.c
index d63cb47..9595189 100644
--- a/sound/soc/pxa/saarb.c
+++ b/sound/soc/pxa/saarb.c
@@ -18,7 +18,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -133,20 +132,21 @@
 static int saarb_pm860x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	snd_soc_dapm_new_controls(codec, saarb_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, saarb_dapm_widgets,
 				  ARRAY_SIZE(saarb_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Speaker");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 1");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 3");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic 2");
-	snd_soc_dapm_disable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 1");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 3");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
+	snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index 0b30d7d..8e15713 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -23,7 +23,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/spitz.h>
@@ -46,61 +45,63 @@
 
 static void spitz_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	if (spitz_spk_func == SPITZ_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	/* set up jack connection */
 	switch (spitz_jack_func) {
 	case SPITZ_HP:
 		/* enable and unmute hp jack, disable mic bias */
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 1);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
 		break;
 	case SPITZ_MIC:
 		/* enable mic jack and bias, mute hp */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
 		break;
 	case SPITZ_LINE:
 		/* enable line jack, disable mic bias and mute hp */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_enable_pin(codec, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_enable_pin(dapm, "Line Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
 		break;
 	case SPITZ_HEADSET:
 		/* enable and unmute headset jack enable mic bias, mute L hp */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headset Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
 		break;
 	case SPITZ_HP_OFF:
 
 		/* jack removed, everything off */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
 		break;
 	}
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int spitz_startup(struct snd_pcm_substream *substream)
@@ -281,16 +282,17 @@
 static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "RINPUT1");
-	snd_soc_dapm_nc_pin(codec, "LINPUT2");
-	snd_soc_dapm_nc_pin(codec, "RINPUT2");
-	snd_soc_dapm_nc_pin(codec, "LINPUT3");
-	snd_soc_dapm_nc_pin(codec, "RINPUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONO1");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT1");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONO1");
 
 	/* Add spitz specific controls */
 	err = snd_soc_add_controls(codec, wm8750_spitz_controls,
@@ -299,13 +301,13 @@
 		return err;
 
 	/* Add spitz specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
 				  ARRAY_SIZE(wm8750_dapm_widgets));
 
 	/* Set up spitz specific audio paths */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
@@ -313,10 +315,10 @@
 static struct snd_soc_dai_link spitz_dai = {
 	.name = "wm8750",
 	.stream_name = "WM8750",
-	.cpu_dai_name = "pxa-is2",
+	.cpu_dai_name = "pxa2xx-i2s",
 	.codec_dai_name = "wm8750-hifi",
 	.platform_name = "pxa-pcm-audio",
-	.codec_name = "wm8750-codec.0-001a",
+	.codec_name = "wm8750-codec.0-001b",
 	.init = spitz_wm8750_init,
 	.ops = &spitz_ops,
 };
diff --git a/sound/soc/pxa/tavorevb3.c b/sound/soc/pxa/tavorevb3.c
index 248c283..f881f65 100644
--- a/sound/soc/pxa/tavorevb3.c
+++ b/sound/soc/pxa/tavorevb3.c
@@ -18,7 +18,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -133,20 +132,21 @@
 static int evb3_pm860x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	snd_soc_dapm_new_controls(codec, evb3_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, evb3_dapm_widgets,
 				  ARRAY_SIZE(evb3_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Speaker");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 1");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 3");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic 2");
-	snd_soc_dapm_disable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 1");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 3");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
+	snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index 7b983f9..4b6e5d6 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/tosa.h>
@@ -49,31 +48,33 @@
 
 static void tosa_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* set up jack connection */
 	switch (tosa_jack_func) {
 	case TOSA_HP:
-		snd_soc_dapm_disable_pin(codec, "Mic (Internal)");
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic (Internal)");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case TOSA_MIC_INT:
-		snd_soc_dapm_enable_pin(codec, "Mic (Internal)");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic (Internal)");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case TOSA_HEADSET:
-		snd_soc_dapm_disable_pin(codec, "Mic (Internal)");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_enable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic (Internal)");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headset Jack");
 		break;
 	}
 
 	if (tosa_spk_func == TOSA_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Speaker");
+		snd_soc_dapm_enable_pin(dapm, "Speaker");
 	else
-		snd_soc_dapm_disable_pin(codec, "Speaker");
+		snd_soc_dapm_disable_pin(dapm, "Speaker");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int tosa_startup(struct snd_pcm_substream *substream)
@@ -191,10 +192,11 @@
 static int tosa_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONOOUT");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONOOUT");
 
 	/* add tosa specific controls */
 	err = snd_soc_add_controls(codec, tosa_controls,
@@ -203,13 +205,13 @@
 		return err;
 
 	/* add tosa specific widgets */
-	snd_soc_dapm_new_controls(codec, tosa_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tosa_dapm_widgets,
 				  ARRAY_SIZE(tosa_dapm_widgets));
 
 	/* set up tosa specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
@@ -217,7 +219,7 @@
 {
 	.name = "AC97",
 	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "pxa-ac97.0",
+	.cpu_dai_name = "pxa2xx-ac97",
 	.codec_dai_name = "wm9712-hifi",
 	.platform_name = "pxa-pcm-audio",
 	.codec_name = "wm9712-codec",
@@ -227,7 +229,7 @@
 {
 	.name = "AC97 Aux",
 	.stream_name = "AC97 Aux",
-	.cpu_dai_name = "pxa-ac97.1",
+	.cpu_dai_name = "pxa2xx-ac97-aux",
 	.codec_dai_name = "wm9712-aux",
 	.platform_name = "pxa-pcm-audio",
 	.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
index 4cc841b..3ceaef6 100644
--- a/sound/soc/pxa/z2.c
+++ b/sound/soc/pxa/z2.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -105,6 +104,7 @@
 		.name		= "hsdet-gpio",
 		.report		= SND_JACK_HEADSET,
 		.debounce_time	= 200,
+		.invert		= 1,
 	},
 };
 
@@ -140,22 +140,23 @@
 static int z2_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* NC codec pins */
-	snd_soc_dapm_disable_pin(codec, "LINPUT3");
-	snd_soc_dapm_disable_pin(codec, "RINPUT3");
-	snd_soc_dapm_disable_pin(codec, "OUT3");
-	snd_soc_dapm_disable_pin(codec, "MONO");
+	snd_soc_dapm_disable_pin(dapm, "LINPUT3");
+	snd_soc_dapm_disable_pin(dapm, "RINPUT3");
+	snd_soc_dapm_disable_pin(dapm, "OUT3");
+	snd_soc_dapm_disable_pin(dapm, "MONO");
 
 	/* Add z2 specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
 				 ARRAY_SIZE(wm8750_dapm_widgets));
 
 	/* Set up z2 specific audio paths */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		goto err;
 
@@ -192,7 +193,7 @@
 	.cpu_dai_name	= "pxa2xx-i2s",
 	.codec_dai_name	= "wm8750-hifi",
 	.platform_name = "pxa-pcm-audio",
-	.codec_name	= "wm8750-codec.0-001a",
+	.codec_name	= "wm8750-codec.0-001b",
 	.init		= z2_wm8750_init,
 	.ops		= &z2_ops,
 };
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index d27e05a..25bba10 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -20,7 +20,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "../codecs/wm9713.h"
 #include "pxa2xx-ac97.h"
@@ -73,21 +72,22 @@
 static int zylonite_wm9713_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	if (clk_pout)
 		snd_soc_dai_set_pll(rtd->codec_dai, 0, 0,
 				    clk_get_rate(pout), 0);
 
-	snd_soc_dapm_new_controls(codec, zylonite_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, zylonite_dapm_widgets,
 				  ARRAY_SIZE(zylonite_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* Static setup for now */
-	snd_soc_dapm_enable_pin(codec, "Headphone");
-	snd_soc_dapm_enable_pin(codec, "Headset Earpiece");
+	snd_soc_dapm_enable_pin(dapm, "Headphone");
+	snd_soc_dapm_enable_pin(dapm, "Headset Earpiece");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
@@ -166,7 +166,7 @@
 	.stream_name = "AC97 HiFi",
 	.codec_name = "wm9713-codec",
 	.platform_name = "pxa-pcm-audio",
-	.cpu_dai_name = "pxa-ac97.0",
+	.cpu_dai_name = "pxa2xx-ac97",
 	.codec_name = "wm9713-hifi",
 	.init = zylonite_wm9713_init,
 },
@@ -175,7 +175,7 @@
 	.stream_name = "AC97 Aux",
 	.codec_name = "wm9713-codec",
 	.platform_name = "pxa-pcm-audio",
-	.cpu_dai_name = "pxa-ac97.1",
+	.cpu_dai_name = "pxa2xx-ac97-aux",
 	.codec_name = "wm9713-aux",
 },
 {
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig
deleted file mode 100644
index d85bf8a..0000000
--- a/sound/soc/s3c24xx/Kconfig
+++ /dev/null
@@ -1,171 +0,0 @@
-config SND_S3C24XX_SOC
-	tristate "SoC Audio for the Samsung S3CXXXX chips"
-	depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210
-	select S3C64XX_DMA if ARCH_S3C64XX
-	select S3C2410_DMA if ARCH_S3C2410
-	help
-	  Say Y or M if you want to add support for codecs attached to
-	  the S3C24XX AC97 or I2S interfaces. You will also need to
-	  select the audio interfaces to support below.
-
-config SND_S3C24XX_SOC_I2S
-	tristate
-	select S3C2410_DMA
-
-config SND_S3C_I2SV2_SOC
-	tristate
-
-config SND_S3C2412_SOC_I2S
-	tristate
-	select SND_S3C_I2SV2_SOC
-	select S3C2410_DMA
-
-config SND_S3C64XX_SOC_I2S
-	tristate
-	select SND_S3C_I2SV2_SOC
-	select S3C64XX_DMA
-
-config SND_S3C64XX_SOC_I2S_V4
-	tristate
-	select SND_S3C_I2SV2_SOC
-	select S3C64XX_DMA
-
-config SND_S3C_SOC_PCM
-	tristate
-
-config SND_S3C_SOC_AC97
-	tristate
-	select SND_SOC_AC97_BUS
-
-config SND_S5P_SOC_SPDIF
-	tristate
-	select SND_SOC_SPDIF
-
-config SND_S3C24XX_SOC_NEO1973_WM8753
-	tristate "SoC I2S Audio support for NEO1973 - WM8753"
-	depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA01
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_WM8753
-	help
-	  Say Y if you want to add support for SoC audio on smdk2440
-	  with the WM8753.
-
-config SND_S3C24XX_SOC_NEO1973_GTA02_WM8753
-	tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)"
-	depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA02
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_WM8753
-	help
-	  This driver provides audio support for the Openmoko Neo FreeRunner
-	  smartphone.
-	  
-config SND_S3C24XX_SOC_JIVE_WM8750
-	tristate "SoC I2S Audio support for Jive"
-	depends on SND_S3C24XX_SOC && MACH_JIVE
-	select SND_SOC_WM8750
-	select SND_S3C2412_SOC_I2S
-	help
-	  Sat Y if you want to add support for SoC audio on the Jive.
-
-config SND_S3C64XX_SOC_WM8580
-	tristate "SoC I2S Audio support for WM8580 on SMDK64XX"
-	depends on SND_S3C24XX_SOC && MACH_SMDK6410
-	select SND_SOC_WM8580
-	select SND_S3C64XX_SOC_I2S_V4
-	help
-	  Say Y if you want to add support for SoC audio on the SMDK6410.
-
-config SND_S3C24XX_SOC_SMDK2443_WM9710
-	tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
-	depends on SND_S3C24XX_SOC && MACH_SMDK2443
-	select S3C2410_DMA
-	select AC97_BUS
-	select SND_SOC_AC97_CODEC
-	select SND_S3C_SOC_AC97
-	help
-	  Say Y if you want to add support for SoC audio on smdk2443
-	  with the WM9710.
-
-config SND_S3C24XX_SOC_LN2440SBC_ALC650
-	tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
-	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-	select S3C2410_DMA
-	select AC97_BUS
-	select SND_SOC_AC97_CODEC
-	select SND_S3C_SOC_AC97
-	help
-	  Say Y if you want to add support for SoC audio on ln2440sbc
-	  with the ALC650.
-
-config SND_S3C24XX_SOC_S3C24XX_UDA134X
-	tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
-       	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-       	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_L3
-       	select SND_SOC_UDA134X
-
-config SND_S3C24XX_SOC_SIMTEC
-	tristate
-	help
-	  Internal node for common S3C24XX/Simtec suppor
-
-config SND_S3C24XX_SOC_SIMTEC_TLV320AIC23
-	tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
-	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_TLV320AIC23
-	select SND_S3C24XX_SOC_SIMTEC
-
-config SND_S3C24XX_SOC_SIMTEC_HERMES
-	tristate "SoC I2S Audio support for Simtec Hermes board"
-	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_TLV320AIC3X
-	select SND_S3C24XX_SOC_SIMTEC
-
-config SND_S3C24XX_SOC_RX1950_UDA1380
-	tristate "Audio support for the HP iPAQ RX1950"
-	depends on SND_S3C24XX_SOC && MACH_RX1950
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_UDA1380
-	help
-	  This driver provides audio support for HP iPAQ RX1950 PDA.
-
-config SND_SOC_SMDK_WM9713
-	tristate "SoC AC97 Audio support for SMDK with WM9713"
-	depends on SND_S3C24XX_SOC && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110)
-	select SND_SOC_WM9713
-	select SND_S3C_SOC_AC97
-	help
-	  Sat Y if you want to add support for SoC audio on the SMDK.
-
-config SND_S3C64XX_SOC_SMARTQ
-	tristate "SoC I2S Audio support for SmartQ board"
-	depends on SND_S3C24XX_SOC && MACH_SMARTQ
-	select SND_S3C64XX_SOC_I2S
-	select SND_SOC_WM8750
-
-config SND_S5PC110_SOC_AQUILA_WM8994
-	tristate "SoC I2S Audio support for AQUILA - WM8994"
-	depends on SND_S3C24XX_SOC && MACH_AQUILA
-	select SND_S3C64XX_SOC_I2S_V4
-	select SND_SOC_WM8994
-	help
-	  Say Y if you want to add support for SoC audio on aquila
-	  with the WM8994.
-
-config SND_S5PV210_SOC_GONI_WM8994
-	tristate "SoC I2S Audio support for GONI - WM8994"
-	depends on SND_S3C24XX_SOC && MACH_GONI
-	select SND_S3C64XX_SOC_I2S_V4
-	select SND_SOC_WM8994
-	help
-	  Say Y if you want to add support for SoC audio on goni
-	  with the WM8994.
-
-config SND_SOC_SMDK_SPDIF
-	tristate "SoC S/PDIF Audio support for SMDK"
-	depends on SND_S3C24XX_SOC && (MACH_SMDKC100 || MACH_SMDKC110 || MACH_SMDKV210)
-	select SND_S5P_SOC_SPDIF
-	help
-	  Say Y if you want to add support for SoC S/PDIF audio on the SMDK.
diff --git a/sound/soc/s3c24xx/Makefile b/sound/soc/s3c24xx/Makefile
deleted file mode 100644
index ee8f41d..0000000
--- a/sound/soc/s3c24xx/Makefile
+++ /dev/null
@@ -1,55 +0,0 @@
-# S3c24XX Platform Support
-snd-soc-s3c24xx-objs := s3c-dma.o
-snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
-snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
-snd-soc-s3c64xx-i2s-objs := s3c64xx-i2s.o
-snd-soc-s3c-ac97-objs := s3c-ac97.o
-snd-soc-s3c64xx-i2s-v4-objs := s3c64xx-i2s-v4.o
-snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o
-snd-soc-s3c-pcm-objs := s3c-pcm.o
-snd-soc-samsung-spdif-objs := spdif.o
-
-obj-$(CONFIG_SND_S3C24XX_SOC) += snd-soc-s3c24xx.o
-obj-$(CONFIG_SND_S3C24XX_SOC_I2S) += snd-soc-s3c24xx-i2s.o
-obj-$(CONFIG_SND_S3C_SOC_AC97) += snd-soc-s3c-ac97.o
-obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o
-obj-$(CONFIG_SND_S3C64XX_SOC_I2S) += snd-soc-s3c64xx-i2s.o
-obj-$(CONFIG_SND_S3C64XX_SOC_I2S_V4) += snd-soc-s3c64xx-i2s-v4.o
-obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
-obj-$(CONFIG_SND_S3C_SOC_PCM) += snd-soc-s3c-pcm.o
-obj-$(CONFIG_SND_S5P_SOC_SPDIF) += snd-soc-samsung-spdif.o
-
-# S3C24XX Machine Support
-snd-soc-jive-wm8750-objs := jive_wm8750.o
-snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o
-snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o
-snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o
-snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o
-snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o
-snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o
-snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o
-snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o
-snd-soc-rx1950-uda1380-objs := rx1950_uda1380.o
-snd-soc-smdk64xx-wm8580-objs := smdk64xx_wm8580.o
-snd-soc-smdk-wm9713-objs := smdk_wm9713.o
-snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o
-snd-soc-aquila-wm8994-objs := aquila_wm8994.o
-snd-soc-goni-wm8994-objs := goni_wm8994.o
-snd-soc-smdk-spdif-objs := smdk_spdif.o
-
-obj-$(CONFIG_SND_S3C24XX_SOC_JIVE_WM8750) += snd-soc-jive-wm8750.o
-obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
-obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
-obj-$(CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
-obj-$(CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC) += snd-soc-s3c24xx-simtec.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o
-obj-$(CONFIG_SND_S3C24XX_SOC_RX1950_UDA1380) += snd-soc-rx1950-uda1380.o
-obj-$(CONFIG_SND_S3C64XX_SOC_WM8580) += snd-soc-smdk64xx-wm8580.o
-obj-$(CONFIG_SND_SOC_SMDK_WM9713) += snd-soc-smdk-wm9713.o
-obj-$(CONFIG_SND_S3C64XX_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o
-obj-$(CONFIG_SND_S5PC110_SOC_AQUILA_WM8994) += snd-soc-aquila-wm8994.o
-obj-$(CONFIG_SND_S5PV210_SOC_GONI_WM8994) += snd-soc-goni-wm8994.o
-obj-$(CONFIG_SND_SOC_SMDK_SPDIF) += snd-soc-smdk-spdif.o
diff --git a/sound/soc/s3c24xx/aquila_wm8994.c b/sound/soc/s3c24xx/aquila_wm8994.c
deleted file mode 100644
index 235d197..0000000
--- a/sound/soc/s3c24xx/aquila_wm8994.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * aquila_wm8994.c
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Chanwoo Choi <cw00.choi@samsung.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/jack.h>
-#include <asm/mach-types.h>
-#include <mach/gpio.h>
-#include <mach/regs-clock.h>
-
-#include <linux/mfd/wm8994/core.h>
-#include <linux/mfd/wm8994/registers.h>
-#include "../codecs/wm8994.h"
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
-
-static struct snd_soc_card aquila;
-static struct platform_device *aquila_snd_device;
-
-/* 3.5 pie jack */
-static struct snd_soc_jack jack;
-
-/* 3.5 pie jack detection DAPM pins */
-static struct snd_soc_jack_pin jack_pins[] = {
-	{
-		.pin = "Headset Mic",
-		.mask = SND_JACK_MICROPHONE,
-	}, {
-		.pin = "Headset Stereophone",
-		.mask = SND_JACK_HEADPHONE | SND_JACK_MECHANICAL |
-			SND_JACK_AVOUT,
-	},
-};
-
-/* 3.5 pie jack detection gpios */
-static struct snd_soc_jack_gpio jack_gpios[] = {
-	{
-		.gpio = S5PV210_GPH0(6),
-		.name = "DET_3.5",
-		.report = SND_JACK_HEADSET | SND_JACK_MECHANICAL |
-			SND_JACK_AVOUT,
-		.debounce_time = 200,
-	},
-};
-
-static const struct snd_soc_dapm_widget aquila_dapm_widgets[] = {
-	SND_SOC_DAPM_SPK("Ext Spk", NULL),
-	SND_SOC_DAPM_SPK("Ext Rcv", NULL),
-	SND_SOC_DAPM_HP("Headset Stereophone", NULL),
-	SND_SOC_DAPM_MIC("Headset Mic", NULL),
-	SND_SOC_DAPM_MIC("Main Mic", NULL),
-	SND_SOC_DAPM_MIC("2nd Mic", NULL),
-	SND_SOC_DAPM_LINE("Radio In", NULL),
-};
-
-static const struct snd_soc_dapm_route aquila_dapm_routes[] = {
-	{"Ext Spk", NULL, "SPKOUTLP"},
-	{"Ext Spk", NULL, "SPKOUTLN"},
-
-	{"Ext Rcv", NULL, "HPOUT2N"},
-	{"Ext Rcv", NULL, "HPOUT2P"},
-
-	{"Headset Stereophone", NULL, "HPOUT1L"},
-	{"Headset Stereophone", NULL, "HPOUT1R"},
-
-	{"IN1RN", NULL, "Headset Mic"},
-	{"IN1RP", NULL, "Headset Mic"},
-
-	{"IN1RN", NULL, "2nd Mic"},
-	{"IN1RP", NULL, "2nd Mic"},
-
-	{"IN1LN", NULL, "Main Mic"},
-	{"IN1LP", NULL, "Main Mic"},
-
-	{"IN2LN", NULL, "Radio In"},
-	{"IN2RN", NULL, "Radio In"},
-};
-
-static int aquila_wm8994_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-	int ret;
-
-	/* add aquila specific widgets */
-	snd_soc_dapm_new_controls(codec, aquila_dapm_widgets,
-			ARRAY_SIZE(aquila_dapm_widgets));
-
-	/* set up aquila specific audio routes */
-	snd_soc_dapm_add_routes(codec, aquila_dapm_routes,
-			ARRAY_SIZE(aquila_dapm_routes));
-
-	/* set endpoints to not connected */
-	snd_soc_dapm_nc_pin(codec, "IN2LP:VXRN");
-	snd_soc_dapm_nc_pin(codec, "IN2RP:VXRP");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1P");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2P");
-	snd_soc_dapm_nc_pin(codec, "SPKOUTRN");
-	snd_soc_dapm_nc_pin(codec, "SPKOUTRP");
-
-	snd_soc_dapm_sync(codec);
-
-	/* Headset jack detection */
-	ret = snd_soc_jack_new(&aquila, "Headset Jack",
-			SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT,
-			&jack);
-	if (ret)
-		return ret;
-
-	ret = snd_soc_jack_add_pins(&jack, ARRAY_SIZE(jack_pins), jack_pins);
-	if (ret)
-		return ret;
-
-	ret = snd_soc_jack_add_gpios(&jack, ARRAY_SIZE(jack_gpios), jack_gpios);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int aquila_hifi_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	unsigned int pll_out = 24000000;
-	int ret = 0;
-
-	/* set the cpu DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the cpu system clock */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK,
-			0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec FLL */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, 0, pll_out,
-			params_rate(params) * 256);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
-			params_rate(params) * 256, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_ops aquila_hifi_ops = {
-	.hw_params = aquila_hifi_hw_params,
-};
-
-static int aquila_voice_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	unsigned int pll_out = 24000000;
-	int ret = 0;
-
-	if (params_rate(params) != 8000)
-		return -EINVAL;
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_LEFT_J |
-			SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec FLL */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL2, 0, pll_out,
-			params_rate(params) * 256);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL2,
-			params_rate(params) * 256, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_dai_driver voice_dai = {
-	.name = "aquila-voice-dai",
-	.playback = {
-		.channels_min = 1,
-		.channels_max = 2,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-	.capture = {
-		.channels_min = 1,
-		.channels_max = 2,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-};
-
-static struct snd_soc_ops aquila_voice_ops = {
-	.hw_params = aquila_voice_hw_params,
-};
-
-static struct snd_soc_dai_link aquila_dai[] = {
-{
-	.name = "WM8994",
-	.stream_name = "WM8994 HiFi",
-	.cpu_dai_name = "s3c64xx-i2s-v4",
-	.codec_dai_name = "wm8994-hifi",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8994-codec.0-0x1a",
-	.init = aquila_wm8994_init,
-	.ops = &aquila_hifi_ops,
-}, {
-	.name = "WM8994 Voice",
-	.stream_name = "Voice",
-	.cpu_dai_name = "aquila-voice-dai",
-	.codec_dai_name = "wm8994-voice",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8994-codec.0-0x1a",
-	.ops = &aquila_voice_ops,
-},
-};
-
-static struct snd_soc_card aquila = {
-	.name = "aquila",
-	.dai_link = aquila_dai,
-	.num_links = ARRAY_SIZE(aquila_dai),
-};
-
-static int __init aquila_init(void)
-{
-	int ret;
-
-	if (!machine_is_aquila())
-		return -ENODEV;
-
-	aquila_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!aquila_snd_device)
-		return -ENOMEM;
-
-	/* register voice DAI here */
-	ret = snd_soc_register_dai(&aquila_snd_device->dev, &voice_dai);
-	if (ret)
-		return ret;
-
-	platform_set_drvdata(aquila_snd_device, &aquila);
-	ret = platform_device_add(aquila_snd_device);
-
-	if (ret)
-		platform_device_put(aquila_snd_device);
-
-	return ret;
-}
-
-static void __exit aquila_exit(void)
-{
-	platform_device_unregister(aquila_snd_device);
-}
-
-module_init(aquila_init);
-module_exit(aquila_exit);
-
-/* Module information */
-MODULE_DESCRIPTION("ALSA SoC WM8994 Aquila(S5PC110)");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s-v4.c b/sound/soc/s3c24xx/s3c64xx-i2s-v4.c
deleted file mode 100644
index a962847..0000000
--- a/sound/soc/s3c24xx/s3c64xx-i2s-v4.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/* sound/soc/s3c24xx/s3c64xx-i2s-v4.c
- *
- * ALSA SoC Audio Layer - S3C64XX I2Sv4 driver
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * 	Author: Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-
-#include <plat/audio.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-
-#include "s3c-dma.h"
-#include "regs-i2s-v2.h"
-#include "s3c64xx-i2s.h"
-
-static struct s3c2410_dma_client s3c64xx_dma_client_out = {
-	.name		= "I2Sv4 PCM Stereo out"
-};
-
-static struct s3c2410_dma_client s3c64xx_dma_client_in = {
-	.name		= "I2Sv4 PCM Stereo in"
-};
-
-static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_out;
-static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_in;
-static struct s3c_i2sv2_info s3c64xx_i2sv4;
-
-static int s3c64xx_i2sv4_probe(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = &s3c64xx_i2sv4;
-	int ret = 0;
-
-	snd_soc_dai_set_drvdata(dai, i2s);
-
-	ret = s3c_i2sv2_probe(dai, i2s, i2s->base);
-
-	return ret;
-}
-
-static int s3c_i2sv4_hw_params(struct snd_pcm_substream *substream,
-				 struct snd_pcm_hw_params *params,
-				 struct snd_soc_dai *cpu_dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(cpu_dai);
-	struct s3c_dma_params *dma_data;
-	u32 iismod;
-
-	dev_dbg(cpu_dai->dev, "Entered %s\n", __func__);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_data = i2s->dma_playback;
-	else
-		dma_data = i2s->dma_capture;
-
-	snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
-
-	iismod = readl(i2s->regs + S3C2412_IISMOD);
-	dev_dbg(cpu_dai->dev, "%s: r: IISMOD: %x\n", __func__, iismod);
-
-	iismod &= ~S3C64XX_IISMOD_BLC_MASK;
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S8:
-		iismod |= S3C64XX_IISMOD_BLC_8BIT;
-		break;
-	case SNDRV_PCM_FORMAT_S16_LE:
-		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
-		iismod |= S3C64XX_IISMOD_BLC_24BIT;
-		break;
-	}
-
-	writel(iismod, i2s->regs + S3C2412_IISMOD);
-	dev_dbg(cpu_dai->dev, "%s: w: IISMOD: %x\n", __func__, iismod);
-
-	return 0;
-}
-
-static struct snd_soc_dai_ops s3c64xx_i2sv4_dai_ops = {
-	.hw_params	= s3c_i2sv4_hw_params,
-};
-
-static struct snd_soc_dai_driver s3c64xx_i2s_v4_dai = {
-	.symmetric_rates = 1,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,
-	},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,
-	},
-	.probe = s3c64xx_i2sv4_probe,
-	.ops = &s3c64xx_i2sv4_dai_ops,
-};
-
-static __devinit int s3c64xx_i2sv4_dev_probe(struct platform_device *pdev)
-{
-	struct s3c_audio_pdata *i2s_pdata;
-	struct s3c_i2sv2_info *i2s;
-	struct resource *res;
-	int ret;
-
-	i2s = &s3c64xx_i2sv4;
-
-	i2s->feature |= S3C_FEATURE_CDCLKCON;
-
-	i2s->dma_capture = &s3c64xx_i2sv4_pcm_stereo_in;
-	i2s->dma_playback = &s3c64xx_i2sv4_pcm_stereo_out;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_playback->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_capture->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
-		return -ENXIO;
-	}
-
-	if (!request_mem_region(res->start, resource_size(res),
-				"s3c64xx-i2s-v4")) {
-		dev_err(&pdev->dev, "Unable to request SFR region\n");
-		return -EBUSY;
-	}
-	i2s->dma_capture->dma_addr = res->start + S3C2412_IISRXD;
-	i2s->dma_playback->dma_addr = res->start + S3C2412_IISTXD;
-
-	i2s->dma_capture->client = &s3c64xx_dma_client_in;
-	i2s->dma_capture->dma_size = 4;
-	i2s->dma_playback->client = &s3c64xx_dma_client_out;
-	i2s->dma_playback->dma_size = 4;
-
-	i2s->base = res->start;
-
-	i2s_pdata = pdev->dev.platform_data;
-	if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure gpio\n");
-		return -EINVAL;
-	}
-
-	i2s->iis_cclk = clk_get(&pdev->dev, "audio-bus");
-	if (IS_ERR(i2s->iis_cclk)) {
-		dev_err(&pdev->dev, "failed to get audio-bus\n");
-		ret = PTR_ERR(i2s->iis_cclk);
-		goto err;
-	}
-
-	clk_enable(i2s->iis_cclk);
-
-	ret = s3c_i2sv2_register_dai(&pdev->dev, pdev->id, &s3c64xx_i2s_v4_dai);
-	if (ret != 0)
-		goto err_i2sv2;
-
-	return 0;
-
-err_i2sv2:
-	clk_put(i2s->iis_cclk);
-err:
-	return ret;
-}
-
-static __devexit int s3c64xx_i2sv4_dev_remove(struct platform_device *pdev)
-{
-	struct s3c_i2sv2_info *i2s = &s3c64xx_i2sv4;
-	struct resource *res;
-
-	snd_soc_unregister_dai(&pdev->dev);
-	clk_put(i2s->iis_cclk);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res)
-		release_mem_region(res->start, resource_size(res));
-	else
-		dev_warn(&pdev->dev, "Unable to get I2S SFR address\n");
-		
-	return 0;
-}
-
-static struct platform_driver s3c64xx_i2sv4_driver = {
-	.probe  = s3c64xx_i2sv4_dev_probe,
-	.remove = s3c64xx_i2sv4_dev_remove,
-	.driver = {
-		.name = "s3c64xx-iis-v4",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c64xx_i2sv4_init(void)
-{
-	return platform_driver_register(&s3c64xx_i2sv4_driver);
-}
-module_init(s3c64xx_i2sv4_init);
-
-static void __exit s3c64xx_i2sv4_exit(void)
-{
-	platform_driver_unregister(&s3c64xx_i2sv4_driver);
-}
-module_exit(s3c64xx_i2sv4_exit);
-
-/* Module information */
-MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
-MODULE_DESCRIPTION("S3C64XX I2Sv4 SoC Interface");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c64xx-iis-v4");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.c b/sound/soc/s3c24xx/s3c64xx-i2s.c
deleted file mode 100644
index ae7acb6..0000000
--- a/sound/soc/s3c24xx/s3c64xx-i2s.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/* sound/soc/s3c24xx/s3c64xx-i2s.c
- *
- * ALSA SoC Audio Layer - S3C64XX I2S driver
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *      Ben Dooks <ben@simtec.co.uk>
- *      http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <sound/soc.h>
-
-#include <plat/audio.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-
-#include "s3c-dma.h"
-#include "regs-i2s-v2.h"
-#include "s3c64xx-i2s.h"
-
-/* The value should be set to maximum of the total number
- * of I2Sv3 controllers that any supported SoC has.
- */
-#define MAX_I2SV3	2
-
-static struct s3c2410_dma_client s3c64xx_dma_client_out = {
-	.name		= "I2S PCM Stereo out"
-};
-
-static struct s3c2410_dma_client s3c64xx_dma_client_in = {
-	.name		= "I2S PCM Stereo in"
-};
-
-static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_out[MAX_I2SV3];
-static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_in[MAX_I2SV3];
-static struct s3c_i2sv2_info s3c64xx_i2s[MAX_I2SV3];
-
-struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(dai);
-	u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
-
-	if (iismod & S3C2412_IISMOD_IMS_SYSMUX)
-		return i2s->iis_cclk;
-	else
-		return i2s->iis_pclk;
-}
-EXPORT_SYMBOL_GPL(s3c64xx_i2s_get_clock);
-
-static int s3c64xx_i2s_probe(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s;
-	int ret;
-
-	if (dai->id >= MAX_I2SV3) {
-		dev_err(dai->dev, "id %d out of range\n", dai->id);
-		return -EINVAL;
-	}
-
-	i2s = &s3c64xx_i2s[dai->id];
-	snd_soc_dai_set_drvdata(dai, i2s);
-
-	i2s->iis_cclk = clk_get(dai->dev, "audio-bus");
-	if (IS_ERR(i2s->iis_cclk)) {
-		dev_err(dai->dev, "failed to get audio-bus\n");
-		ret = PTR_ERR(i2s->iis_cclk);
-		goto err;
-	}
-
-	clk_enable(i2s->iis_cclk);
-
-	ret = s3c_i2sv2_probe(dai, i2s, i2s->base);
-	if (ret)
-		goto err_clk;
-
-	return 0;
-
-err_clk:
-	clk_disable(i2s->iis_cclk);
-	clk_put(i2s->iis_cclk);
-err:
-	kfree(i2s);
-	return ret;
-}
-
-static int s3c64xx_i2s_remove(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(dai);
-
-	clk_disable(i2s->iis_cclk);
-	clk_put(i2s->iis_cclk);
-	kfree(i2s);
-	return 0;
-}
-
-static struct snd_soc_dai_ops s3c64xx_i2s_dai_ops;
-
-static struct snd_soc_dai_driver s3c64xx_i2s_dai[MAX_I2SV3] = {
-{
-	.name = "s3c64xx-i2s-0",
-	.probe = s3c64xx_i2s_probe,
-	.remove = s3c64xx_i2s_remove,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.ops = &s3c64xx_i2s_dai_ops,
-	.symmetric_rates = 1,
-}, {
-	.name = "s3c64xx-i2s-1",
-	.probe = s3c64xx_i2s_probe,
-	.remove = s3c64xx_i2s_remove,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.ops = &s3c64xx_i2s_dai_ops,
-	.symmetric_rates = 1,
-},};
-
-static __devinit int s3c64xx_iis_dev_probe(struct platform_device *pdev)
-{
-	struct s3c_audio_pdata *i2s_pdata;
-	struct s3c_i2sv2_info *i2s;
-	struct resource *res;
-	int i, ret;
-
-	if (pdev->id >= MAX_I2SV3) {
-		dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
-		return -EINVAL;
-	}
-
-	i2s = &s3c64xx_i2s[pdev->id];
-
-	i2s->dma_capture = &s3c64xx_i2s_pcm_stereo_in[pdev->id];
-	i2s->dma_playback = &s3c64xx_i2s_pcm_stereo_out[pdev->id];
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_playback->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_capture->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
-		return -ENXIO;
-	}
-
-	if (!request_mem_region(res->start, resource_size(res),
-				"s3c64xx-i2s")) {
-		dev_err(&pdev->dev, "Unable to request SFR region\n");
-		return -EBUSY;
-	}
-	i2s->base = res->start;
-
-	i2s_pdata = pdev->dev.platform_data;
-	if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure gpio\n");
-		return -EINVAL;
-	}
-	i2s->dma_capture->dma_addr = res->start + S3C2412_IISRXD;
-	i2s->dma_playback->dma_addr = res->start + S3C2412_IISTXD;
-
-	i2s->dma_capture->client = &s3c64xx_dma_client_in;
-	i2s->dma_capture->dma_size = 4;
-	i2s->dma_playback->client = &s3c64xx_dma_client_out;
-	i2s->dma_playback->dma_size = 4;
-
-	for (i = 0; i < ARRAY_SIZE(s3c64xx_i2s_dai); i++) {
-		ret = s3c_i2sv2_register_dai(&pdev->dev, i,
-						&s3c64xx_i2s_dai[i]);
-		if (ret != 0)
-			return ret;
-	}
-
-	return 0;
-}
-
-static __devexit int s3c64xx_iis_dev_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(s3c64xx_i2s_dai));
-	return 0;
-}
-
-static struct platform_driver s3c64xx_iis_driver = {
-	.probe  = s3c64xx_iis_dev_probe,
-	.remove = s3c64xx_iis_dev_remove,
-	.driver = {
-		.name = "s3c64xx-iis",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c64xx_i2s_init(void)
-{
-	return platform_driver_register(&s3c64xx_iis_driver);
-}
-module_init(s3c64xx_i2s_init);
-
-static void __exit s3c64xx_i2s_exit(void)
-{
-	platform_driver_unregister(&s3c64xx_iis_driver);
-}
-module_exit(s3c64xx_i2s_exit);
-
-/* Module information */
-MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C64XX I2S SoC Interface");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c64xx-iis");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.h b/sound/soc/s3c24xx/s3c64xx-i2s.h
deleted file mode 100644
index de4075d..0000000
--- a/sound/soc/s3c24xx/s3c64xx-i2s.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* sound/soc/s3c24xx/s3c64xx-i2s.h
- *
- * ALSA SoC Audio Layer - S3C64XX I2S driver
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *      Ben Dooks <ben@simtec.co.uk>
- *      http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __SND_SOC_S3C24XX_S3C64XX_I2S_H
-#define __SND_SOC_S3C24XX_S3C64XX_I2S_H __FILE__
-
-struct clk;
-
-#include "s3c-i2s-v2.h"
-
-#define S3C64XX_DIV_BCLK	S3C_I2SV2_DIV_BCLK
-#define S3C64XX_DIV_RCLK	S3C_I2SV2_DIV_RCLK
-#define S3C64XX_DIV_PRESCALER	S3C_I2SV2_DIV_PRESCALER
-
-#define S3C64XX_CLKSRC_PCLK	S3C_I2SV2_CLKSRC_PCLK
-#define S3C64XX_CLKSRC_MUX	S3C_I2SV2_CLKSRC_AUDIOBUS
-#define S3C64XX_CLKSRC_CDCLK    S3C_I2SV2_CLKSRC_CDCLK
-
-#define S3C64XX_I2S_RATES \
-	(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
-	SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
-	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-
-#define S3C64XX_I2S_FMTS \
-	(SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
-	 SNDRV_PCM_FMTBIT_S24_LE)
-
-struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai);
-
-#endif /* __SND_SOC_S3C24XX_S3C64XX_I2S_H */
diff --git a/sound/soc/s3c24xx/smdk64xx_wm8580.c b/sound/soc/s3c24xx/smdk64xx_wm8580.c
deleted file mode 100644
index 052e499..0000000
--- a/sound/soc/s3c24xx/smdk64xx_wm8580.c
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- *  smdk64xx_wm8580.c
- *
- *  Copyright (c) 2009 Samsung Electronics Co. Ltd
- *  Author: Jaswinder Singh <jassi.brar@samsung.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- */
-
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include "../codecs/wm8580.h"
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
-
-/*
- * Default CFG switch settings to use this driver:
- *
- *   SMDK6410: Set CFG1 1-3 Off, CFG2 1-4 On
- */
-
-/* SMDK64XX has a 12MHZ crystal attached to WM8580 */
-#define SMDK64XX_WM8580_FREQ 12000000
-
-static int smdk64xx_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	unsigned int pll_out;
-	int bfs, rfs, ret;
-
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_U8:
-	case SNDRV_PCM_FORMAT_S8:
-		bfs = 16;
-		break;
-	case SNDRV_PCM_FORMAT_U16_LE:
-	case SNDRV_PCM_FORMAT_S16_LE:
-		bfs = 32;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* The Fvco for WM8580 PLLs must fall within [90,100]MHz.
-	 * This criterion can't be met if we request PLL output
-	 * as {8000x256, 64000x256, 11025x256}Hz.
-	 * As a wayout, we rather change rfs to a minimum value that
-	 * results in (params_rate(params) * rfs), and itself, acceptable
-	 * to both - the CODEC and the CPU.
-	 */
-	switch (params_rate(params)) {
-	case 16000:
-	case 22050:
-	case 32000:
-	case 44100:
-	case 48000:
-	case 88200:
-	case 96000:
-		rfs = 256;
-		break;
-	case 64000:
-		rfs = 384;
-		break;
-	case 8000:
-	case 11025:
-		rfs = 512;
-		break;
-	default:
-		return -EINVAL;
-	}
-	pll_out = params_rate(params) * rfs;
-
-	/* Set the Codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
-					 | SND_SOC_DAIFMT_NB_NF
-					 | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* Set the AP DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
-					 | SND_SOC_DAIFMT_NB_NF
-					 | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_CDCLK,
-					0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* We use PCLK for basic ops in SoC-Slave mode */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK,
-					0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* Set WM8580 to drive MCLK from its PLLA */
-	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
-					WM8580_CLKSRC_PLLA);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0,
-					SMDK64XX_WM8580_FREQ, pll_out);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_PLLA,
-				     pll_out, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_BCLK, bfs);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_RCLK, rfs);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-/*
- * SMDK64XX WM8580 DAI operations.
- */
-static struct snd_soc_ops smdk64xx_ops = {
-	.hw_params = smdk64xx_hw_params,
-};
-
-/* SMDK64xx Playback widgets */
-static const struct snd_soc_dapm_widget wm8580_dapm_widgets_pbk[] = {
-	SND_SOC_DAPM_HP("Front", NULL),
-	SND_SOC_DAPM_HP("Center+Sub", NULL),
-	SND_SOC_DAPM_HP("Rear", NULL),
-};
-
-/* SMDK64xx Capture widgets */
-static const struct snd_soc_dapm_widget wm8580_dapm_widgets_cpt[] = {
-	SND_SOC_DAPM_MIC("MicIn", NULL),
-	SND_SOC_DAPM_LINE("LineIn", NULL),
-};
-
-/* SMDK-PAIFTX connections */
-static const struct snd_soc_dapm_route audio_map_tx[] = {
-	/* MicIn feeds AINL */
-	{"AINL", NULL, "MicIn"},
-
-	/* LineIn feeds AINL/R */
-	{"AINL", NULL, "LineIn"},
-	{"AINR", NULL, "LineIn"},
-};
-
-/* SMDK-PAIFRX connections */
-static const struct snd_soc_dapm_route audio_map_rx[] = {
-	/* Front Left/Right are fed VOUT1L/R */
-	{"Front", NULL, "VOUT1L"},
-	{"Front", NULL, "VOUT1R"},
-
-	/* Center/Sub are fed VOUT2L/R */
-	{"Center+Sub", NULL, "VOUT2L"},
-	{"Center+Sub", NULL, "VOUT2R"},
-
-	/* Rear Left/Right are fed VOUT3L/R */
-	{"Rear", NULL, "VOUT3L"},
-	{"Rear", NULL, "VOUT3R"},
-};
-
-static int smdk64xx_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-
-	/* Add smdk64xx specific Capture widgets */
-	snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets_cpt,
-				  ARRAY_SIZE(wm8580_dapm_widgets_cpt));
-
-	/* Set up PAIFTX audio path */
-	snd_soc_dapm_add_routes(codec, audio_map_tx, ARRAY_SIZE(audio_map_tx));
-
-	/* Enabling the microphone requires the fitting of a 0R
-	 * resistor to connect the line from the microphone jack.
-	 */
-	snd_soc_dapm_disable_pin(codec, "MicIn");
-
-	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static int smdk64xx_wm8580_init_paifrx(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-
-	/* Add smdk64xx specific Playback widgets */
-	snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets_pbk,
-				  ARRAY_SIZE(wm8580_dapm_widgets_pbk));
-
-	/* Set up PAIFRX audio path */
-	snd_soc_dapm_add_routes(codec, audio_map_rx, ARRAY_SIZE(audio_map_rx));
-
-	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static struct snd_soc_dai_link smdk64xx_dai[] = {
-{ /* Primary Playback i/f */
-	.name = "WM8580 PAIF RX",
-	.stream_name = "Playback",
-	.cpu_dai_name = "s3c64xx-iis-v4",
-	.codec_dai_name = "wm8580-hifi-playback",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8580-codec.0-001b",
-	.init = smdk64xx_wm8580_init_paifrx,
-	.ops = &smdk64xx_ops,
-},
-{ /* Primary Capture i/f */
-	.name = "WM8580 PAIF TX",
-	.stream_name = "Capture",
-	.cpu_dai_name = "s3c64xx-iis-v4",
-	.codec_dai_name = "wm8580-hifi-capture",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8580-codec.0-001b",
-	.init = smdk64xx_wm8580_init_paiftx,
-	.ops = &smdk64xx_ops,
-},
-};
-
-static struct snd_soc_card smdk64xx = {
-	.name = "SMDK64xx 5.1",
-	.dai_link = smdk64xx_dai,
-	.num_links = ARRAY_SIZE(smdk64xx_dai),
-};
-
-static struct platform_device *smdk64xx_snd_device;
-
-static int __init smdk64xx_audio_init(void)
-{
-	int ret;
-
-	smdk64xx_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!smdk64xx_snd_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(smdk64xx_snd_device, &smdk64xx);
-	ret = platform_device_add(smdk64xx_snd_device);
-
-	if (ret)
-		platform_device_put(smdk64xx_snd_device);
-
-	return ret;
-}
-module_init(smdk64xx_audio_init);
-
-MODULE_AUTHOR("Jaswinder Singh, jassi.brar@samsung.com");
-MODULE_DESCRIPTION("ALSA SoC SMDK64XX WM8580");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
index c1244c5..5890e43 100644
--- a/sound/soc/s6000/s6105-ipcam.c
+++ b/sound/soc/s6000/s6105-ipcam.c
@@ -18,11 +18,9 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <variant/dmac.h>
 
-#include "../codecs/tlv320aic3x.h"
 #include "s6000-pcm.h"
 #include "s6000-i2s.h"
 
@@ -107,6 +105,7 @@
 			   struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = kcontrol->private_data;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	unsigned int val = (ucontrol->value.enumerated.item[0] != 0);
 	char *differential = "Audio Out Differential";
 	char *stereo = "Audio Out Stereo";
@@ -114,10 +113,10 @@
 	if (kcontrol->private_value == val)
 		return 0;
 	kcontrol->private_value = val;
-	snd_soc_dapm_disable_pin(codec, val ? differential : stereo);
-	snd_soc_dapm_sync(codec);
-	snd_soc_dapm_enable_pin(codec, val ? stereo : differential);
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_disable_pin(dapm, val ? differential : stereo);
+	snd_soc_dapm_sync(dapm);
+	snd_soc_dapm_enable_pin(dapm, val ? stereo : differential);
+	snd_soc_dapm_sync(dapm);
 
 	return 1;
 }
@@ -137,35 +136,36 @@
 static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add s6105 specific widgets */
-	snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
 				  ARRAY_SIZE(aic3x_dapm_widgets));
 
 	/* Set up s6105 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* not present */
-	snd_soc_dapm_nc_pin(codec, "MONO_LOUT");
-	snd_soc_dapm_nc_pin(codec, "LINE2L");
-	snd_soc_dapm_nc_pin(codec, "LINE2R");
+	snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
+	snd_soc_dapm_nc_pin(dapm, "LINE2L");
+	snd_soc_dapm_nc_pin(dapm, "LINE2R");
 
 	/* not connected */
-	snd_soc_dapm_nc_pin(codec, "MIC3L"); /* LINE2L on this chip */
-	snd_soc_dapm_nc_pin(codec, "MIC3R"); /* LINE2R on this chip */
-	snd_soc_dapm_nc_pin(codec, "LLOUT");
-	snd_soc_dapm_nc_pin(codec, "RLOUT");
-	snd_soc_dapm_nc_pin(codec, "HPRCOM");
+	snd_soc_dapm_nc_pin(dapm, "MIC3L"); /* LINE2L on this chip */
+	snd_soc_dapm_nc_pin(dapm, "MIC3R"); /* LINE2R on this chip */
+	snd_soc_dapm_nc_pin(dapm, "LLOUT");
+	snd_soc_dapm_nc_pin(dapm, "RLOUT");
+	snd_soc_dapm_nc_pin(dapm, "HPRCOM");
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Audio In");
+	snd_soc_dapm_enable_pin(dapm, "Audio In");
 
 	/* must correspond to audio_out_mux.private_value initializer */
-	snd_soc_dapm_disable_pin(codec, "Audio Out Differential");
-	snd_soc_dapm_sync(codec);
-	snd_soc_dapm_enable_pin(codec, "Audio Out Stereo");
+	snd_soc_dapm_disable_pin(dapm, "Audio Out Differential");
+	snd_soc_dapm_sync(dapm);
+	snd_soc_dapm_enable_pin(dapm, "Audio Out Stereo");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&audio_out_mux, codec));
 
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
new file mode 100644
index 0000000..a6a6b5f
--- /dev/null
+++ b/sound/soc/samsung/Kconfig
@@ -0,0 +1,171 @@
+config SND_SOC_SAMSUNG
+	tristate "ASoC support for Samsung"
+	depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PV310
+	select S3C64XX_DMA if ARCH_S3C64XX
+	select S3C2410_DMA if ARCH_S3C2410
+	help
+	  Say Y or M if you want to add support for codecs attached to
+	  the Samsung SoCs' Audio interfaces. You will also need to
+	  select the audio interfaces to support below.
+
+config SND_S3C24XX_I2S
+	tristate
+	select S3C2410_DMA
+
+config SND_S3C_I2SV2_SOC
+	tristate
+
+config SND_S3C2412_SOC_I2S
+	tristate
+	select SND_S3C_I2SV2_SOC
+	select S3C2410_DMA
+
+config SND_SAMSUNG_PCM
+	tristate
+
+config SND_SAMSUNG_AC97
+	tristate
+	select SND_SOC_AC97_BUS
+
+config SND_SAMSUNG_SPDIF
+	tristate
+	select SND_SOC_SPDIF
+
+config SND_SAMSUNG_I2S
+	tristate
+
+config SND_SOC_SAMSUNG_NEO1973_WM8753
+	tristate "SoC I2S Audio support for NEO1973 - WM8753"
+	depends on SND_SOC_SAMSUNG && MACH_NEO1973_GTA01
+	select SND_S3C24XX_I2S
+	select SND_SOC_WM8753
+	help
+	  Say Y if you want to add support for SoC audio on smdk2440
+	  with the WM8753.
+
+config SND_SOC_SAMSUNG_NEO1973_GTA02_WM8753
+	tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)"
+	depends on SND_SOC_SAMSUNG && MACH_NEO1973_GTA02
+	select SND_S3C24XX_I2S
+	select SND_SOC_WM8753
+	help
+	  This driver provides audio support for the Openmoko Neo FreeRunner
+	  smartphone.
+	  
+config SND_SOC_SAMSUNG_JIVE_WM8750
+	tristate "SoC I2S Audio support for Jive"
+	depends on SND_SOC_SAMSUNG && MACH_JIVE
+	select SND_SOC_WM8750
+	select SND_S3C2412_SOC_I2S
+	help
+	  Sat Y if you want to add support for SoC audio on the Jive.
+
+config SND_SOC_SAMSUNG_SMDK_WM8580
+	tristate "SoC I2S Audio support for WM8580 on SMDK"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDK6442 || MACH_SMDKV210 || MACH_SMDKC110)
+	select SND_SOC_WM8580
+	select SND_SAMSUNG_I2S
+	help
+	  Say Y if you want to add support for SoC audio on the SMDKs.
+
+config SND_SOC_SAMSUNG_SMDK_WM8994
+	tristate "SoC I2S Audio support for WM8994 on SMDK"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDKV310 || MACH_SMDKC210)
+	select SND_SOC_WM8994
+	select SND_SAMSUNG_I2S
+	help
+		Say Y if you want to add support for SoC audio on the SMDKs.
+
+config SND_SOC_SAMSUNG_SMDK2443_WM9710
+	tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
+	depends on SND_SOC_SAMSUNG && MACH_SMDK2443
+	select S3C2410_DMA
+	select AC97_BUS
+	select SND_SOC_AC97_CODEC
+	select SND_SAMSUNG_AC97
+	help
+	  Say Y if you want to add support for SoC audio on smdk2443
+	  with the WM9710.
+
+config SND_SOC_SAMSUNG_LN2440SBC_ALC650
+	tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select S3C2410_DMA
+	select AC97_BUS
+	select SND_SOC_AC97_CODEC
+	select SND_SAMSUNG_AC97
+	help
+	  Say Y if you want to add support for SoC audio on ln2440sbc
+	  with the ALC650.
+
+config SND_SOC_SAMSUNG_S3C24XX_UDA134X
+	tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select SND_S3C24XX_I2S
+	select SND_SOC_L3
+	select SND_SOC_UDA134X
+
+config SND_SOC_SAMSUNG_SIMTEC
+	tristate
+	help
+	  Internal node for common S3C24XX/Simtec suppor
+
+config SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23
+	tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select SND_S3C24XX_I2S
+	select SND_SOC_TLV320AIC23
+	select SND_SOC_SAMSUNG_SIMTEC
+
+config SND_SOC_SAMSUNG_SIMTEC_HERMES
+	tristate "SoC I2S Audio support for Simtec Hermes board"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select SND_S3C24XX_I2S
+	select SND_SOC_TLV320AIC3X
+	select SND_SOC_SAMSUNG_SIMTEC
+
+config SND_SOC_SAMSUNG_H1940_UDA1380
+	tristate "Audio support for the HP iPAQ H1940"
+	depends on SND_SOC_SAMSUNG && ARCH_H1940
+	select SND_S3C24XX_I2S
+	select SND_SOC_UDA1380
+	help
+	  This driver provides audio support for HP iPAQ h1940 PDA.
+
+config SND_SOC_SAMSUNG_RX1950_UDA1380
+	tristate "Audio support for the HP iPAQ RX1950"
+	depends on SND_SOC_SAMSUNG && MACH_RX1950
+	select SND_S3C24XX_I2S
+	select SND_SOC_UDA1380
+	help
+	  This driver provides audio support for HP iPAQ RX1950 PDA.
+
+config SND_SOC_SAMSUNG_SMDK_WM9713
+	tristate "SoC AC97 Audio support for SMDK with WM9713"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110 || MACH_SMDKV310 || MACH_SMDKC210)
+	select SND_SOC_WM9713
+	select SND_SAMSUNG_AC97
+	help
+	  Sat Y if you want to add support for SoC audio on the SMDK.
+
+config SND_SOC_SMARTQ
+	tristate "SoC I2S Audio support for SmartQ board"
+	depends on SND_SOC_SAMSUNG && MACH_SMARTQ
+	select SND_SAMSUNG_I2S
+	select SND_SOC_WM8750
+
+config SND_SOC_GONI_AQUILA_WM8994
+	tristate "SoC I2S Audio support for AQUILA/GONI - WM8994"
+	depends on SND_SOC_SAMSUNG && (MACH_GONI || MACH_AQUILA)
+	select SND_SAMSUNG_I2S
+	select SND_SOC_WM8994
+	help
+	  Say Y if you want to add support for SoC audio on goni or aquila
+	  with the WM8994.
+
+config SND_SOC_SAMSUNG_SMDK_SPDIF
+	tristate "SoC S/PDIF Audio support for SMDK"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDKC100 || MACH_SMDKC110 || MACH_SMDKV210)
+	select SND_SAMSUNG_SPDIF
+	help
+	  Say Y if you want to add support for SoC S/PDIF audio on the SMDK.
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
new file mode 100644
index 0000000..705d4e8
--- /dev/null
+++ b/sound/soc/samsung/Makefile
@@ -0,0 +1,55 @@
+# S3c24XX Platform Support
+snd-soc-s3c24xx-objs := dma.o
+snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
+snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
+snd-soc-ac97-objs := ac97.o
+snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o
+snd-soc-samsung-spdif-objs := spdif.o
+snd-soc-pcm-objs := pcm.o
+snd-soc-i2s-objs := i2s.o
+
+obj-$(CONFIG_SND_SOC_SAMSUNG) += snd-soc-s3c24xx.o
+obj-$(CONFIG_SND_S3C24XX_I2S) += snd-soc-s3c24xx-i2s.o
+obj-$(CONFIG_SND_SAMSUNG_AC97) += snd-soc-ac97.o
+obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o
+obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
+obj-$(CONFIG_SND_SAMSUNG_SPDIF) += snd-soc-samsung-spdif.o
+obj-$(CONFIG_SND_SAMSUNG_PCM) += snd-soc-pcm.o
+obj-$(CONFIG_SND_SAMSUNG_I2S) += snd-soc-i2s.o
+
+# S3C24XX Machine Support
+snd-soc-jive-wm8750-objs := jive_wm8750.o
+snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o
+snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o
+snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o
+snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o
+snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o
+snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o
+snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o
+snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o
+snd-soc-h1940-uda1380-objs := h1940_uda1380.o
+snd-soc-rx1950-uda1380-objs := rx1950_uda1380.o
+snd-soc-smdk-wm8580-objs := smdk_wm8580.o
+snd-soc-smdk-wm8994-objs := smdk_wm8994.o
+snd-soc-smdk-wm9713-objs := smdk_wm9713.o
+snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o
+snd-soc-goni-wm8994-objs := goni_wm8994.o
+snd-soc-smdk-spdif-objs := smdk_spdif.o
+
+obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC) += snd-soc-s3c24xx-simtec.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_H1940_UDA1380) += snd-soc-h1940-uda1380.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_RX1950_UDA1380) += snd-soc-rx1950-uda1380.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8580) += snd-soc-smdk-wm8580.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994) += snd-soc-smdk-wm8994.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM9713) += snd-soc-smdk-wm9713.o
+obj-$(CONFIG_SND_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_SPDIF) += snd-soc-smdk-spdif.o
+obj-$(CONFIG_SND_SOC_GONI_AQUILA_WM8994) += snd-soc-goni-wm8994.o
diff --git a/sound/soc/s3c24xx/s3c-ac97.c b/sound/soc/samsung/ac97.c
similarity index 96%
rename from sound/soc/s3c24xx/s3c-ac97.c
rename to sound/soc/samsung/ac97.c
index f891eb7..4770a95 100644
--- a/sound/soc/s3c24xx/s3c-ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c-ac97.c
+/* sound/soc/samsung/ac97.c
  *
  * ALSA SoC Audio Layer - S3C AC97 Controller driver
  * 	Evolved from s3c2443-ac97.c
@@ -24,8 +24,8 @@
 #include <mach/dma.h>
 #include <plat/audio.h>
 
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
+#include "dma.h"
+#include "ac97.h"
 
 #define AC_CMD_ADDR(x) (x << 16)
 #define AC_CMD_DATA(x) (x & 0xffff)
@@ -122,7 +122,7 @@
 	data = (stat & 0xffff);
 
 	if (addr != reg)
-		pr_err("s3c-ac97: req addr = %02x, rep addr = %02x\n",
+		pr_err("ac97: req addr = %02x, rep addr = %02x\n",
 			reg, addr);
 
 	mutex_unlock(&s3c_ac97.lock);
@@ -334,7 +334,7 @@
 
 static struct snd_soc_dai_driver s3c_ac97_dai[] = {
 	[S3C_AC97_DAI_PCM] = {
-		.name =	"s3c-ac97",
+		.name =	"samsung-ac97",
 		.ac97_control = 1,
 		.playback = {
 			.stream_name = "AC97 Playback",
@@ -351,7 +351,7 @@
 		.ops = &s3c_ac97_dai_ops,
 	},
 	[S3C_AC97_DAI_MIC] = {
-		.name = "s3c-ac97-mic",
+		.name = "samsung-ac97-mic",
 		.ac97_control = 1,
 		.capture = {
 			.stream_name = "AC97 Mic Capture",
@@ -407,7 +407,7 @@
 	}
 
 	if (!request_mem_region(mem_res->start,
-				resource_size(mem_res), "s3c-ac97")) {
+				resource_size(mem_res), "ac97")) {
 		dev_err(&pdev->dev, "Unable to request register region\n");
 		return -EBUSY;
 	}
@@ -431,7 +431,7 @@
 
 	s3c_ac97.ac97_clk = clk_get(&pdev->dev, "ac97");
 	if (IS_ERR(s3c_ac97.ac97_clk)) {
-		dev_err(&pdev->dev, "s3c-ac97 failed to get ac97_clock\n");
+		dev_err(&pdev->dev, "ac97 failed to get ac97_clock\n");
 		ret = -ENODEV;
 		goto err2;
 	}
@@ -446,7 +446,7 @@
 	ret = request_irq(irq_res->start, s3c_ac97_irq,
 					IRQF_DISABLED, "AC97", NULL);
 	if (ret < 0) {
-		dev_err(&pdev->dev, "s3c-ac97: interrupt request failed.\n");
+		dev_err(&pdev->dev, "ac97: interrupt request failed.\n");
 		goto err4;
 	}
 
@@ -497,7 +497,7 @@
 	.probe  = s3c_ac97_probe,
 	.remove = s3c_ac97_remove,
 	.driver = {
-		.name = "s3c-ac97",
+		.name = "samsung-ac97",
 		.owner = THIS_MODULE,
 	},
 };
@@ -517,4 +517,4 @@
 MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
 MODULE_DESCRIPTION("AC97 driver for the Samsung SoC");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c-ac97");
+MODULE_ALIAS("platform:samsung-ac97");
diff --git a/sound/soc/s3c24xx/s3c-ac97.h b/sound/soc/samsung/ac97.h
similarity index 73%
rename from sound/soc/s3c24xx/s3c-ac97.h
rename to sound/soc/samsung/ac97.h
index 5dcedd0..0d0e1b5 100644
--- a/sound/soc/s3c24xx/s3c-ac97.h
+++ b/sound/soc/samsung/ac97.h
@@ -1,11 +1,11 @@
-/* sound/soc/s3c24xx/s3c-ac97.h
+/* sound/soc/samsung/ac97.h
  *
  * ALSA SoC Audio Layer - S3C AC97 Controller driver
- * 	Evolved from s3c2443-ac97.h
+ *	Evolved from s3c2443-ac97.h
  *
  * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * 	Author: Jaswinder Singh <jassi.brar@samsung.com>
- * 	Credits: Graeme Gregory, Sean Choi
+ *	Author: Jaswinder Singh <jassi.brar@samsung.com>
+ *	Credits: Graeme Gregory, Sean Choi
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/samsung/dma.c
similarity index 74%
rename from sound/soc/s3c24xx/s3c-dma.c
rename to sound/soc/samsung/dma.c
index 243f79b..2124019 100644
--- a/sound/soc/s3c24xx/s3c-dma.c
+++ b/sound/soc/samsung/dma.c
@@ -1,5 +1,5 @@
 /*
- * s3c-dma.c  --  ALSA Soc Audio Layer
+ * dma.c  --  ALSA Soc Audio Layer
  *
  * (c) 2006 Wolfson Microelectronics PLC.
  * Graeme Gregory graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
@@ -30,9 +30,9 @@
 #include <mach/hardware.h>
 #include <mach/dma.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 
-static const struct snd_pcm_hardware s3c_dma_hardware = {
+static const struct snd_pcm_hardware dma_hardware = {
 	.info			= SNDRV_PCM_INFO_INTERLEAVED |
 				    SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				    SNDRV_PCM_INFO_MMAP |
@@ -53,7 +53,7 @@
 	.fifo_size		= 32,
 };
 
-struct s3c24xx_runtime_data {
+struct runtime_data {
 	spinlock_t lock;
 	int state;
 	unsigned int dma_loaded;
@@ -65,14 +65,14 @@
 	struct s3c_dma_params *params;
 };
 
-/* s3c_dma_enqueue
+/* dma_enqueue
  *
  * place a dma buffer onto the queue for the dma system
  * to handle.
 */
-static void s3c_dma_enqueue(struct snd_pcm_substream *substream)
+static void dma_enqueue(struct snd_pcm_substream *substream)
 {
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
+	struct runtime_data *prtd = substream->runtime->private_data;
 	dma_addr_t pos = prtd->dma_pos;
 	unsigned int limit;
 	int ret;
@@ -112,12 +112,12 @@
 	prtd->dma_pos = pos;
 }
 
-static void s3c24xx_audio_buffdone(struct s3c2410_dma_chan *channel,
+static void audio_buffdone(struct s3c2410_dma_chan *channel,
 				void *dev_id, int size,
 				enum s3c2410_dma_buffresult result)
 {
 	struct snd_pcm_substream *substream = dev_id;
-	struct s3c24xx_runtime_data *prtd;
+	struct runtime_data *prtd;
 
 	pr_debug("Entered %s\n", __func__);
 
@@ -132,17 +132,17 @@
 	spin_lock(&prtd->lock);
 	if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) {
 		prtd->dma_loaded--;
-		s3c_dma_enqueue(substream);
+		dma_enqueue(substream);
 	}
 
 	spin_unlock(&prtd->lock);
 }
 
-static int s3c_dma_hw_params(struct snd_pcm_substream *substream,
+static int dma_hw_params(struct snd_pcm_substream *substream,
 	struct snd_pcm_hw_params *params)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd = runtime->private_data;
+	struct runtime_data *prtd = runtime->private_data;
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	unsigned long totbytes = params_buffer_bytes(params);
 	struct s3c_dma_params *dma =
@@ -181,7 +181,7 @@
 	}
 
 	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
-				    s3c24xx_audio_buffdone);
+				    audio_buffdone);
 
 	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
 
@@ -199,9 +199,9 @@
 	return 0;
 }
 
-static int s3c_dma_hw_free(struct snd_pcm_substream *substream)
+static int dma_hw_free(struct snd_pcm_substream *substream)
 {
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
+	struct runtime_data *prtd = substream->runtime->private_data;
 
 	pr_debug("Entered %s\n", __func__);
 
@@ -216,9 +216,9 @@
 	return 0;
 }
 
-static int s3c_dma_prepare(struct snd_pcm_substream *substream)
+static int dma_prepare(struct snd_pcm_substream *substream)
 {
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
+	struct runtime_data *prtd = substream->runtime->private_data;
 	int ret = 0;
 
 	pr_debug("Entered %s\n", __func__);
@@ -249,14 +249,14 @@
 	prtd->dma_pos = prtd->dma_start;
 
 	/* enqueue dma buffers */
-	s3c_dma_enqueue(substream);
+	dma_enqueue(substream);
 
 	return ret;
 }
 
-static int s3c_dma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int dma_trigger(struct snd_pcm_substream *substream, int cmd)
 {
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
+	struct runtime_data *prtd = substream->runtime->private_data;
 	int ret = 0;
 
 	pr_debug("Entered %s\n", __func__);
@@ -289,10 +289,10 @@
 }
 
 static snd_pcm_uframes_t
-s3c_dma_pointer(struct snd_pcm_substream *substream)
+dma_pointer(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd = runtime->private_data;
+	struct runtime_data *prtd = runtime->private_data;
 	unsigned long res;
 	dma_addr_t src, dst;
 
@@ -324,17 +324,17 @@
 	return bytes_to_frames(substream->runtime, res);
 }
 
-static int s3c_dma_open(struct snd_pcm_substream *substream)
+static int dma_open(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd;
+	struct runtime_data *prtd;
 
 	pr_debug("Entered %s\n", __func__);
 
 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
-	snd_soc_set_runtime_hwparams(substream, &s3c_dma_hardware);
+	snd_soc_set_runtime_hwparams(substream, &dma_hardware);
 
-	prtd = kzalloc(sizeof(struct s3c24xx_runtime_data), GFP_KERNEL);
+	prtd = kzalloc(sizeof(struct runtime_data), GFP_KERNEL);
 	if (prtd == NULL)
 		return -ENOMEM;
 
@@ -344,22 +344,22 @@
 	return 0;
 }
 
-static int s3c_dma_close(struct snd_pcm_substream *substream)
+static int dma_close(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd = runtime->private_data;
+	struct runtime_data *prtd = runtime->private_data;
 
 	pr_debug("Entered %s\n", __func__);
 
 	if (!prtd)
-		pr_debug("s3c_dma_close called with prtd == NULL\n");
+		pr_debug("dma_close called with prtd == NULL\n");
 
 	kfree(prtd);
 
 	return 0;
 }
 
-static int s3c_dma_mmap(struct snd_pcm_substream *substream,
+static int dma_mmap(struct snd_pcm_substream *substream,
 	struct vm_area_struct *vma)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
@@ -372,23 +372,23 @@
 				     runtime->dma_bytes);
 }
 
-static struct snd_pcm_ops s3c_dma_ops = {
-	.open		= s3c_dma_open,
-	.close		= s3c_dma_close,
+static struct snd_pcm_ops dma_ops = {
+	.open		= dma_open,
+	.close		= dma_close,
 	.ioctl		= snd_pcm_lib_ioctl,
-	.hw_params	= s3c_dma_hw_params,
-	.hw_free	= s3c_dma_hw_free,
-	.prepare	= s3c_dma_prepare,
-	.trigger	= s3c_dma_trigger,
-	.pointer	= s3c_dma_pointer,
-	.mmap		= s3c_dma_mmap,
+	.hw_params	= dma_hw_params,
+	.hw_free	= dma_hw_free,
+	.prepare	= dma_prepare,
+	.trigger	= dma_trigger,
+	.pointer	= dma_pointer,
+	.mmap		= dma_mmap,
 };
 
-static int s3c_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+static int preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
 {
 	struct snd_pcm_substream *substream = pcm->streams[stream].substream;
 	struct snd_dma_buffer *buf = &substream->dma_buffer;
-	size_t size = s3c_dma_hardware.buffer_bytes_max;
+	size_t size = dma_hardware.buffer_bytes_max;
 
 	pr_debug("Entered %s\n", __func__);
 
@@ -403,7 +403,7 @@
 	return 0;
 }
 
-static void s3c_dma_free_dma_buffers(struct snd_pcm *pcm)
+static void dma_free_dma_buffers(struct snd_pcm *pcm)
 {
 	struct snd_pcm_substream *substream;
 	struct snd_dma_buffer *buf;
@@ -426,9 +426,9 @@
 	}
 }
 
-static u64 s3c_dma_mask = DMA_BIT_MASK(32);
+static u64 dma_mask = DMA_BIT_MASK(32);
 
-static int s3c_dma_new(struct snd_card *card,
+static int dma_new(struct snd_card *card,
 	struct snd_soc_dai *dai, struct snd_pcm *pcm)
 {
 	int ret = 0;
@@ -436,67 +436,67 @@
 	pr_debug("Entered %s\n", __func__);
 
 	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &s3c_dma_mask;
+		card->dev->dma_mask = &dma_mask;
 	if (!card->dev->coherent_dma_mask)
 		card->dev->coherent_dma_mask = 0xffffffff;
 
 	if (dai->driver->playback.channels_min) {
-		ret = s3c_preallocate_dma_buffer(pcm,
+		ret = preallocate_dma_buffer(pcm,
 			SNDRV_PCM_STREAM_PLAYBACK);
 		if (ret)
 			goto out;
 	}
 
 	if (dai->driver->capture.channels_min) {
-		ret = s3c_preallocate_dma_buffer(pcm,
+		ret = preallocate_dma_buffer(pcm,
 			SNDRV_PCM_STREAM_CAPTURE);
 		if (ret)
 			goto out;
 	}
- out:
+out:
 	return ret;
 }
 
-static struct snd_soc_platform_driver s3c24xx_soc_platform = {
-	.ops		= &s3c_dma_ops,
-	.pcm_new	= s3c_dma_new,
-	.pcm_free	= s3c_dma_free_dma_buffers,
+static struct snd_soc_platform_driver samsung_asoc_platform = {
+	.ops		= &dma_ops,
+	.pcm_new	= dma_new,
+	.pcm_free	= dma_free_dma_buffers,
 };
 
-static int __devinit s3c24xx_soc_platform_probe(struct platform_device *pdev)
+static int __devinit samsung_asoc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &s3c24xx_soc_platform);
+	return snd_soc_register_platform(&pdev->dev, &samsung_asoc_platform);
 }
 
-static int __devexit s3c24xx_soc_platform_remove(struct platform_device *pdev)
+static int __devexit samsung_asoc_platform_remove(struct platform_device *pdev)
 {
 	snd_soc_unregister_platform(&pdev->dev);
 	return 0;
 }
 
-static struct platform_driver s3c24xx_pcm_driver = {
+static struct platform_driver asoc_dma_driver = {
 	.driver = {
-		.name = "s3c24xx-pcm-audio",
+		.name = "samsung-audio",
 		.owner = THIS_MODULE,
 	},
 
-	.probe = s3c24xx_soc_platform_probe,
-	.remove = __devexit_p(s3c24xx_soc_platform_remove),
+	.probe = samsung_asoc_platform_probe,
+	.remove = __devexit_p(samsung_asoc_platform_remove),
 };
 
-static int __init snd_s3c24xx_pcm_init(void)
+static int __init samsung_asoc_init(void)
 {
-	return platform_driver_register(&s3c24xx_pcm_driver);
+	return platform_driver_register(&asoc_dma_driver);
 }
-module_init(snd_s3c24xx_pcm_init);
+module_init(samsung_asoc_init);
 
-static void __exit snd_s3c24xx_pcm_exit(void)
+static void __exit samsung_asoc_exit(void)
 {
-	platform_driver_unregister(&s3c24xx_pcm_driver);
+	platform_driver_unregister(&asoc_dma_driver);
 }
-module_exit(snd_s3c24xx_pcm_exit);
+module_exit(samsung_asoc_exit);
 
 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("Samsung S3C Audio DMA module");
+MODULE_DESCRIPTION("Samsung ASoC DMA Driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c24xx-pcm-audio");
+MODULE_ALIAS("platform:samsung-audio");
diff --git a/sound/soc/s3c24xx/s3c-dma.h b/sound/soc/samsung/dma.h
similarity index 97%
rename from sound/soc/s3c24xx/s3c-dma.h
rename to sound/soc/samsung/dma.h
index 748c07d..f8cd2b4 100644
--- a/sound/soc/s3c24xx/s3c-dma.h
+++ b/sound/soc/samsung/dma.h
@@ -1,5 +1,5 @@
 /*
- *  s3c-dma.h --
+ *  dma.h --
  *
  *  This program is free software; you can redistribute  it and/or modify it
  *  under  the terms of  the GNU General  Public License as published by the
diff --git a/sound/soc/s3c24xx/goni_wm8994.c b/sound/soc/samsung/goni_wm8994.c
similarity index 83%
rename from sound/soc/s3c24xx/goni_wm8994.c
rename to sound/soc/samsung/goni_wm8994.c
index 694f702..34dd9ef 100644
--- a/sound/soc/s3c24xx/goni_wm8994.c
+++ b/sound/soc/samsung/goni_wm8994.c
@@ -16,7 +16,6 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 #include <asm/mach-types.h>
 #include <mach/gpio.h>
@@ -25,8 +24,16 @@
 #include <linux/mfd/wm8994/core.h>
 #include <linux/mfd/wm8994/registers.h>
 #include "../codecs/wm8994.h"
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
+#include "dma.h"
+#include "i2s.h"
+
+#define MACHINE_NAME	0
+#define CPU_VOICE_DAI	1
+
+static const char *aquila_str[] = {
+	[MACHINE_NAME] = "aquila",
+	[CPU_VOICE_DAI] = "aquila-voice-dai",
+};
 
 static struct snd_soc_card goni;
 static struct platform_device *goni_snd_device;
@@ -97,28 +104,34 @@
 static int goni_wm8994_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* add goni specific widgets */
-	snd_soc_dapm_new_controls(codec, goni_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, goni_dapm_widgets,
 			ARRAY_SIZE(goni_dapm_widgets));
 
 	/* set up goni specific audio routes */
-	snd_soc_dapm_add_routes(codec, goni_dapm_routes,
+	snd_soc_dapm_add_routes(dapm, goni_dapm_routes,
 			ARRAY_SIZE(goni_dapm_routes));
 
 	/* set endpoints to not connected */
-	snd_soc_dapm_nc_pin(codec, "IN2LP:VXRN");
-	snd_soc_dapm_nc_pin(codec, "IN2RP:VXRP");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1P");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN");
+	snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
 
-	snd_soc_dapm_sync(codec);
+	if (machine_is_aquila()) {
+		snd_soc_dapm_nc_pin(dapm, "SPKOUTRN");
+		snd_soc_dapm_nc_pin(dapm, "SPKOUTRP");
+	}
+
+	snd_soc_dapm_sync(dapm);
 
 	/* Headset jack detection */
-	ret = snd_soc_jack_new(&goni, "Headset Jack",
+	ret = snd_soc_jack_new(codec, "Headset Jack",
 			SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT,
 			&jack);
 	if (ret)
@@ -150,12 +163,6 @@
 	if (ret < 0)
 		return ret;
 
-	/* set the cpu system clock */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK,
-			0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
 	/* set codec DAI configuration */
 	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
 			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
@@ -236,9 +243,9 @@
 {
 	.name = "WM8994",
 	.stream_name = "WM8994 HiFi",
-	.cpu_dai_name = "s3c64xx-i2s-v4",
+	.cpu_dai_name = "samsung-i2s.0",
 	.codec_dai_name = "wm8994-hifi",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.codec_name = "wm8994-codec.0-0x1a",
 	.init = goni_wm8994_init,
 	.ops = &goni_hifi_ops,
@@ -247,7 +254,7 @@
 	.stream_name = "Voice",
 	.cpu_dai_name = "goni-voice-dai",
 	.codec_dai_name = "wm8994-voice",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.codec_name = "wm8994-codec.0-0x1a",
 	.ops = &goni_voice_ops,
 },
@@ -263,7 +270,11 @@
 {
 	int ret;
 
-	if (!machine_is_goni())
+	if (machine_is_aquila()) {
+		voice_dai.name = aquila_str[CPU_VOICE_DAI];
+		goni_dai[1].cpu_dai_name = aquila_str[CPU_VOICE_DAI];
+		goni.name = aquila_str[MACHINE_NAME];
+	} else if (!machine_is_goni())
 		return -ENODEV;
 
 	goni_snd_device = platform_device_alloc("soc-audio", -1);
@@ -272,20 +283,25 @@
 
 	/* register voice DAI here */
 	ret = snd_soc_register_dai(&goni_snd_device->dev, &voice_dai);
-	if (ret)
+	if (ret) {
+		platform_device_put(goni_snd_device);
 		return ret;
+	}
 
 	platform_set_drvdata(goni_snd_device, &goni);
 	ret = platform_device_add(goni_snd_device);
 
-	if (ret)
+	if (ret) {
+		snd_soc_unregister_dai(&goni_snd_device->dev);
 		platform_device_put(goni_snd_device);
+	}
 
 	return ret;
 }
 
 static void __exit goni_exit(void)
 {
+	snd_soc_unregister_dai(&goni_snd_device->dev);
 	platform_device_unregister(goni_snd_device);
 }
 
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
new file mode 100644
index 0000000..c45f7ce
--- /dev/null
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -0,0 +1,296 @@
+/*
+ * h1940-uda1380.c  --  ALSA Soc Audio Layer
+ *
+ * Copyright (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ * Copyright (c) 2010 Vasily Khoruzhick <anarsoul@gmail.com>
+ *
+ * Based on version from Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+
+#include <sound/soc.h>
+#include <sound/uda1380.h>
+#include <sound/jack.h>
+
+#include <plat/regs-iis.h>
+
+#include <mach/h1940-latch.h>
+
+#include <asm/mach-types.h>
+
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+#include "../codecs/uda1380.h"
+
+static unsigned int rates[] = {
+	11025,
+	22050,
+	44100,
+};
+
+static struct snd_pcm_hw_constraint_list hw_rates = {
+	.count = ARRAY_SIZE(rates),
+	.list = rates,
+	.mask = 0,
+};
+
+static struct snd_soc_jack hp_jack;
+
+static struct snd_soc_jack_pin hp_jack_pins[] = {
+	{
+		.pin	= "Headphone Jack",
+		.mask	= SND_JACK_HEADPHONE,
+	},
+	{
+		.pin	= "Speaker",
+		.mask	= SND_JACK_HEADPHONE,
+		.invert	= 1,
+	},
+};
+
+static struct snd_soc_jack_gpio hp_jack_gpios[] = {
+	{
+		.gpio			= S3C2410_GPG(4),
+		.name			= "hp-gpio",
+		.report			= SND_JACK_HEADPHONE,
+		.invert			= 1,
+		.debounce_time		= 200,
+	},
+};
+
+static int h1940_startup(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	runtime->hw.rate_min = hw_rates.list[0];
+	runtime->hw.rate_max = hw_rates.list[hw_rates.count - 1];
+	runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
+
+	return snd_pcm_hw_constraint_list(runtime, 0,
+					SNDRV_PCM_HW_PARAM_RATE,
+					&hw_rates);
+}
+
+static int h1940_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int div;
+	int ret;
+	unsigned int rate = params_rate(params);
+
+	switch (rate) {
+	case 11025:
+	case 22050:
+	case 44100:
+		div = s3c24xx_i2s_get_clockrate() / (384 * rate);
+		if (s3c24xx_i2s_get_clockrate() % (384 * rate) > (192 * rate))
+			div++;
+		break;
+	default:
+		dev_err(&rtd->dev, "%s: rate %d is not supported\n",
+			__func__, rate);
+		return -EINVAL;
+	}
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* select clock source */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_PCLK, rate,
+			SND_SOC_CLOCK_OUT);
+	if (ret < 0)
+		return ret;
+
+	/* set MCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
+		S3C2410_IISMOD_384FS);
+	if (ret < 0)
+		return ret;
+
+	/* set BCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
+		S3C2410_IISMOD_32FS);
+	if (ret < 0)
+		return ret;
+
+	/* set prescaler division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+		S3C24XX_PRESCALE(div, div));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct snd_soc_ops h1940_ops = {
+	.startup	= h1940_startup,
+	.hw_params	= h1940_hw_params,
+};
+
+static int h1940_spk_power(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol, int event)
+{
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		gpio_set_value(H1940_LATCH_AUDIO_POWER, 1);
+	else
+		gpio_set_value(H1940_LATCH_AUDIO_POWER, 0);
+
+	return 0;
+}
+
+/* h1940 machine dapm widgets */
+static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+	SND_SOC_DAPM_SPK("Speaker", h1940_spk_power),
+};
+
+/* h1940 machine audio_map */
+static const struct snd_soc_dapm_route audio_map[] = {
+	/* headphone connected to VOUTLHP, VOUTRHP */
+	{"Headphone Jack", NULL, "VOUTLHP"},
+	{"Headphone Jack", NULL, "VOUTRHP"},
+
+	/* ext speaker connected to VOUTL, VOUTR  */
+	{"Speaker", NULL, "VOUTL"},
+	{"Speaker", NULL, "VOUTR"},
+
+	/* mic is connected to VINM */
+	{"VINM", NULL, "Mic Jack"},
+};
+
+static struct platform_device *s3c24xx_snd_device;
+
+static int h1940_uda1380_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int err;
+
+	/* Add h1940 specific widgets */
+	err = snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
+				  ARRAY_SIZE(uda1380_dapm_widgets));
+	if (err)
+		return err;
+
+	/* Set up h1940 specific audio path audio_mapnects */
+	err = snd_soc_dapm_add_routes(dapm, audio_map,
+				      ARRAY_SIZE(audio_map));
+	if (err)
+		return err;
+
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
+		&hp_jack);
+
+	snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
+		hp_jack_pins);
+
+	snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
+		hp_jack_gpios);
+
+	return 0;
+}
+
+/* s3c24xx digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link h1940_uda1380_dai[] = {
+	{
+		.name		= "uda1380",
+		.stream_name	= "UDA1380 Duplex",
+		.cpu_dai_name	= "s3c24xx-iis",
+		.codec_dai_name	= "uda1380-hifi",
+		.init		= h1940_uda1380_init,
+		.platform_name	= "samsung-audio",
+		.codec_name	= "uda1380-codec.0-001a",
+		.ops		= &h1940_ops,
+	},
+};
+
+static struct snd_soc_card h1940_asoc = {
+	.name = "h1940",
+	.dai_link = h1940_uda1380_dai,
+	.num_links = ARRAY_SIZE(h1940_uda1380_dai),
+};
+
+static int __init h1940_init(void)
+{
+	int ret;
+
+	if (!machine_is_h1940())
+		return -ENODEV;
+
+	/* configure some gpios */
+	ret = gpio_request(H1940_LATCH_AUDIO_POWER, "speaker-power");
+	if (ret)
+		goto err_out;
+
+	ret = gpio_direction_output(H1940_LATCH_AUDIO_POWER, 0);
+	if (ret)
+		goto err_gpio;
+
+	s3c24xx_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!s3c24xx_snd_device) {
+		ret = -ENOMEM;
+		goto err_gpio;
+	}
+
+	platform_set_drvdata(s3c24xx_snd_device, &h1940_asoc);
+	ret = platform_device_add(s3c24xx_snd_device);
+
+	if (ret)
+		goto err_plat;
+
+	return 0;
+
+err_plat:
+	platform_device_put(s3c24xx_snd_device);
+err_gpio:
+	gpio_free(H1940_LATCH_AUDIO_POWER);
+
+err_out:
+	return ret;
+}
+
+static void __exit h1940_exit(void)
+{
+	platform_device_unregister(s3c24xx_snd_device);
+	snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
+		hp_jack_gpios);
+	gpio_free(H1940_LATCH_AUDIO_POWER);
+}
+
+module_init(h1940_init);
+module_exit(h1940_exit);
+
+/* Module information */
+MODULE_AUTHOR("Arnaud Patard, Vasily Khoruzhick");
+MODULE_DESCRIPTION("ALSA SoC H1940");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
new file mode 100644
index 0000000..d00ac3a
--- /dev/null
+++ b/sound/soc/samsung/i2s.c
@@ -0,0 +1,1258 @@
+/* sound/soc/samsung/i2s.c
+ *
+ * ALSA SoC Audio Layer - Samsung I2S Controller driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <plat/audio.h>
+
+#include "dma.h"
+#include "i2s.h"
+
+#define I2SCON		0x0
+#define I2SMOD		0x4
+#define I2SFIC		0x8
+#define I2SPSR		0xc
+#define I2STXD		0x10
+#define I2SRXD		0x14
+#define I2SFICS		0x18
+#define I2STXDS		0x1c
+
+#define CON_RSTCLR		(1 << 31)
+#define CON_FRXOFSTATUS		(1 << 26)
+#define CON_FRXORINTEN		(1 << 25)
+#define CON_FTXSURSTAT		(1 << 24)
+#define CON_FTXSURINTEN		(1 << 23)
+#define CON_TXSDMA_PAUSE	(1 << 20)
+#define CON_TXSDMA_ACTIVE	(1 << 18)
+
+#define CON_FTXURSTATUS		(1 << 17)
+#define CON_FTXURINTEN		(1 << 16)
+#define CON_TXFIFO2_EMPTY	(1 << 15)
+#define CON_TXFIFO1_EMPTY	(1 << 14)
+#define CON_TXFIFO2_FULL	(1 << 13)
+#define CON_TXFIFO1_FULL	(1 << 12)
+
+#define CON_LRINDEX		(1 << 11)
+#define CON_TXFIFO_EMPTY	(1 << 10)
+#define CON_RXFIFO_EMPTY	(1 << 9)
+#define CON_TXFIFO_FULL		(1 << 8)
+#define CON_RXFIFO_FULL		(1 << 7)
+#define CON_TXDMA_PAUSE		(1 << 6)
+#define CON_RXDMA_PAUSE		(1 << 5)
+#define CON_TXCH_PAUSE		(1 << 4)
+#define CON_RXCH_PAUSE		(1 << 3)
+#define CON_TXDMA_ACTIVE	(1 << 2)
+#define CON_RXDMA_ACTIVE	(1 << 1)
+#define CON_ACTIVE		(1 << 0)
+
+#define MOD_OPCLK_CDCLK_OUT	(0 << 30)
+#define MOD_OPCLK_CDCLK_IN	(1 << 30)
+#define MOD_OPCLK_BCLK_OUT	(2 << 30)
+#define MOD_OPCLK_PCLK		(3 << 30)
+#define MOD_OPCLK_MASK		(3 << 30)
+#define MOD_TXS_IDMA		(1 << 28) /* Sec_TXFIFO use I-DMA */
+
+#define MOD_BLCS_SHIFT	26
+#define MOD_BLCS_16BIT	(0 << MOD_BLCS_SHIFT)
+#define MOD_BLCS_8BIT	(1 << MOD_BLCS_SHIFT)
+#define MOD_BLCS_24BIT	(2 << MOD_BLCS_SHIFT)
+#define MOD_BLCS_MASK	(3 << MOD_BLCS_SHIFT)
+#define MOD_BLCP_SHIFT	24
+#define MOD_BLCP_16BIT	(0 << MOD_BLCP_SHIFT)
+#define MOD_BLCP_8BIT	(1 << MOD_BLCP_SHIFT)
+#define MOD_BLCP_24BIT	(2 << MOD_BLCP_SHIFT)
+#define MOD_BLCP_MASK	(3 << MOD_BLCP_SHIFT)
+
+#define MOD_C2DD_HHALF		(1 << 21) /* Discard Higher-half */
+#define MOD_C2DD_LHALF		(1 << 20) /* Discard Lower-half */
+#define MOD_C1DD_HHALF		(1 << 19)
+#define MOD_C1DD_LHALF		(1 << 18)
+#define MOD_DC2_EN		(1 << 17)
+#define MOD_DC1_EN		(1 << 16)
+#define MOD_BLC_16BIT		(0 << 13)
+#define MOD_BLC_8BIT		(1 << 13)
+#define MOD_BLC_24BIT		(2 << 13)
+#define MOD_BLC_MASK		(3 << 13)
+
+#define MOD_IMS_SYSMUX		(1 << 10)
+#define MOD_SLAVE		(1 << 11)
+#define MOD_TXONLY		(0 << 8)
+#define MOD_RXONLY		(1 << 8)
+#define MOD_TXRX		(2 << 8)
+#define MOD_MASK		(3 << 8)
+#define MOD_LR_LLOW		(0 << 7)
+#define MOD_LR_RLOW		(1 << 7)
+#define MOD_SDF_IIS		(0 << 5)
+#define MOD_SDF_MSB		(1 << 5)
+#define MOD_SDF_LSB		(2 << 5)
+#define MOD_SDF_MASK		(3 << 5)
+#define MOD_RCLK_256FS		(0 << 3)
+#define MOD_RCLK_512FS		(1 << 3)
+#define MOD_RCLK_384FS		(2 << 3)
+#define MOD_RCLK_768FS		(3 << 3)
+#define MOD_RCLK_MASK		(3 << 3)
+#define MOD_BCLK_32FS		(0 << 1)
+#define MOD_BCLK_48FS		(1 << 1)
+#define MOD_BCLK_16FS		(2 << 1)
+#define MOD_BCLK_24FS		(3 << 1)
+#define MOD_BCLK_MASK		(3 << 1)
+#define MOD_8BIT		(1 << 0)
+
+#define MOD_CDCLKCON		(1 << 12)
+
+#define PSR_PSREN		(1 << 15)
+
+#define FIC_TX2COUNT(x)		(((x) >>  24) & 0xf)
+#define FIC_TX1COUNT(x)		(((x) >>  16) & 0xf)
+
+#define FIC_TXFLUSH		(1 << 15)
+#define FIC_RXFLUSH		(1 << 7)
+#define FIC_TXCOUNT(x)		(((x) >>  8) & 0xf)
+#define FIC_RXCOUNT(x)		(((x) >>  0) & 0xf)
+#define FICS_TXCOUNT(x)		(((x) >>  8) & 0x7f)
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+struct i2s_dai {
+	/* Platform device for this DAI */
+	struct platform_device *pdev;
+	/* IOREMAP'd SFRs */
+	void __iomem	*addr;
+	/* Physical base address of SFRs */
+	u32	base;
+	/* Rate of RCLK source clock */
+	unsigned long rclk_srcrate;
+	/* Frame Clock */
+	unsigned frmclk;
+	/*
+	 * Specifically requested RCLK,BCLK by MACHINE Driver.
+	 * 0 indicates CPU driver is free to choose any value.
+	 */
+	unsigned rfs, bfs;
+	/* I2S Controller's core clock */
+	struct clk *clk;
+	/* Clock for generating I2S signals */
+	struct clk *op_clk;
+	/* Array of clock names for op_clk */
+	const char **src_clk;
+	/* Pointer to the Primary_Fifo if this is Sec_Fifo, NULL otherwise */
+	struct i2s_dai *pri_dai;
+	/* Pointer to the Secondary_Fifo if it has one, NULL otherwise */
+	struct i2s_dai *sec_dai;
+#define DAI_OPENED	(1 << 0) /* Dai is opened */
+#define DAI_MANAGER	(1 << 1) /* Dai is the manager */
+	unsigned mode;
+	/* Driver for this DAI */
+	struct snd_soc_dai_driver i2s_dai_drv;
+	/* DMA parameters */
+	struct s3c_dma_params dma_playback;
+	struct s3c_dma_params dma_capture;
+	u32	quirks;
+	u32	suspend_i2smod;
+	u32	suspend_i2scon;
+	u32	suspend_i2spsr;
+};
+
+/* Lock for cross i/f checks */
+static DEFINE_SPINLOCK(lock);
+
+/* If this is the 'overlay' stereo DAI */
+static inline bool is_secondary(struct i2s_dai *i2s)
+{
+	return i2s->pri_dai ? true : false;
+}
+
+/* If operating in SoC-Slave mode */
+static inline bool is_slave(struct i2s_dai *i2s)
+{
+	return (readl(i2s->addr + I2SMOD) & MOD_SLAVE) ? true : false;
+}
+
+/* If this interface of the controller is transmitting data */
+static inline bool tx_active(struct i2s_dai *i2s)
+{
+	u32 active;
+
+	if (!i2s)
+		return false;
+
+	active = readl(i2s->addr + I2SMOD);
+
+	if (is_secondary(i2s))
+		active &= CON_TXSDMA_ACTIVE;
+	else
+		active &= CON_TXDMA_ACTIVE;
+
+	return active ? true : false;
+}
+
+/* If the other interface of the controller is transmitting data */
+static inline bool other_tx_active(struct i2s_dai *i2s)
+{
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	return tx_active(other);
+}
+
+/* If any interface of the controller is transmitting data */
+static inline bool any_tx_active(struct i2s_dai *i2s)
+{
+	return tx_active(i2s) || other_tx_active(i2s);
+}
+
+/* If this interface of the controller is receiving data */
+static inline bool rx_active(struct i2s_dai *i2s)
+{
+	u32 active;
+
+	if (!i2s)
+		return false;
+
+	active = readl(i2s->addr + I2SMOD) & CON_RXDMA_ACTIVE;
+
+	return active ? true : false;
+}
+
+/* If the other interface of the controller is receiving data */
+static inline bool other_rx_active(struct i2s_dai *i2s)
+{
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	return rx_active(other);
+}
+
+/* If any interface of the controller is receiving data */
+static inline bool any_rx_active(struct i2s_dai *i2s)
+{
+	return rx_active(i2s) || other_rx_active(i2s);
+}
+
+/* If the other DAI is transmitting or receiving data */
+static inline bool other_active(struct i2s_dai *i2s)
+{
+	return other_rx_active(i2s) || other_tx_active(i2s);
+}
+
+/* If this DAI is transmitting or receiving data */
+static inline bool this_active(struct i2s_dai *i2s)
+{
+	return tx_active(i2s) || rx_active(i2s);
+}
+
+/* If the controller is active anyway */
+static inline bool any_active(struct i2s_dai *i2s)
+{
+	return this_active(i2s) || other_active(i2s);
+}
+
+static inline struct i2s_dai *to_info(struct snd_soc_dai *dai)
+{
+	return snd_soc_dai_get_drvdata(dai);
+}
+
+static inline bool is_opened(struct i2s_dai *i2s)
+{
+	if (i2s && (i2s->mode & DAI_OPENED))
+		return true;
+	else
+		return false;
+}
+
+static inline bool is_manager(struct i2s_dai *i2s)
+{
+	if (is_opened(i2s) && (i2s->mode & DAI_MANAGER))
+		return true;
+	else
+		return false;
+}
+
+/* Read RCLK of I2S (in multiples of LRCLK) */
+static inline unsigned get_rfs(struct i2s_dai *i2s)
+{
+	u32 rfs = (readl(i2s->addr + I2SMOD) >> 3) & 0x3;
+
+	switch (rfs) {
+	case 3:	return 768;
+	case 2: return 384;
+	case 1:	return 512;
+	default: return 256;
+	}
+}
+
+/* Write RCLK of I2S (in multiples of LRCLK) */
+static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
+{
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	mod &= ~MOD_RCLK_MASK;
+
+	switch (rfs) {
+	case 768:
+		mod |= MOD_RCLK_768FS;
+		break;
+	case 512:
+		mod |= MOD_RCLK_512FS;
+		break;
+	case 384:
+		mod |= MOD_RCLK_384FS;
+		break;
+	default:
+		mod |= MOD_RCLK_256FS;
+		break;
+	}
+
+	writel(mod, i2s->addr + I2SMOD);
+}
+
+/* Read Bit-Clock of I2S (in multiples of LRCLK) */
+static inline unsigned get_bfs(struct i2s_dai *i2s)
+{
+	u32 bfs = (readl(i2s->addr + I2SMOD) >> 1) & 0x3;
+
+	switch (bfs) {
+	case 3: return 24;
+	case 2: return 16;
+	case 1:	return 48;
+	default: return 32;
+	}
+}
+
+/* Write Bit-Clock of I2S (in multiples of LRCLK) */
+static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
+{
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	mod &= ~MOD_BCLK_MASK;
+
+	switch (bfs) {
+	case 48:
+		mod |= MOD_BCLK_48FS;
+		break;
+	case 32:
+		mod |= MOD_BCLK_32FS;
+		break;
+	case 24:
+		mod |= MOD_BCLK_24FS;
+		break;
+	case 16:
+		mod |= MOD_BCLK_16FS;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Wrong BCLK Divider!\n");
+		return;
+	}
+
+	writel(mod, i2s->addr + I2SMOD);
+}
+
+/* Sample-Size */
+static inline int get_blc(struct i2s_dai *i2s)
+{
+	int blc = readl(i2s->addr + I2SMOD);
+
+	blc = (blc >> 13) & 0x3;
+
+	switch (blc) {
+	case 2: return 24;
+	case 1:	return 8;
+	default: return 16;
+	}
+}
+
+/* TX Channel Control */
+static void i2s_txctrl(struct i2s_dai *i2s, int on)
+{
+	void __iomem *addr = i2s->addr;
+	u32 con = readl(addr + I2SCON);
+	u32 mod = readl(addr + I2SMOD) & ~MOD_MASK;
+
+	if (on) {
+		con |= CON_ACTIVE;
+		con &= ~CON_TXCH_PAUSE;
+
+		if (is_secondary(i2s)) {
+			con |= CON_TXSDMA_ACTIVE;
+			con &= ~CON_TXSDMA_PAUSE;
+		} else {
+			con |= CON_TXDMA_ACTIVE;
+			con &= ~CON_TXDMA_PAUSE;
+		}
+
+		if (any_rx_active(i2s))
+			mod |= MOD_TXRX;
+		else
+			mod |= MOD_TXONLY;
+	} else {
+		if (is_secondary(i2s)) {
+			con |=  CON_TXSDMA_PAUSE;
+			con &= ~CON_TXSDMA_ACTIVE;
+		} else {
+			con |=  CON_TXDMA_PAUSE;
+			con &= ~CON_TXDMA_ACTIVE;
+		}
+
+		if (other_tx_active(i2s)) {
+			writel(con, addr + I2SCON);
+			return;
+		}
+
+		con |=  CON_TXCH_PAUSE;
+
+		if (any_rx_active(i2s))
+			mod |= MOD_RXONLY;
+		else
+			con &= ~CON_ACTIVE;
+	}
+
+	writel(mod, addr + I2SMOD);
+	writel(con, addr + I2SCON);
+}
+
+/* RX Channel Control */
+static void i2s_rxctrl(struct i2s_dai *i2s, int on)
+{
+	void __iomem *addr = i2s->addr;
+	u32 con = readl(addr + I2SCON);
+	u32 mod = readl(addr + I2SMOD) & ~MOD_MASK;
+
+	if (on) {
+		con |= CON_RXDMA_ACTIVE | CON_ACTIVE;
+		con &= ~(CON_RXDMA_PAUSE | CON_RXCH_PAUSE);
+
+		if (any_tx_active(i2s))
+			mod |= MOD_TXRX;
+		else
+			mod |= MOD_RXONLY;
+	} else {
+		con |=  CON_RXDMA_PAUSE | CON_RXCH_PAUSE;
+		con &= ~CON_RXDMA_ACTIVE;
+
+		if (any_tx_active(i2s))
+			mod |= MOD_TXONLY;
+		else
+			con &= ~CON_ACTIVE;
+	}
+
+	writel(mod, addr + I2SMOD);
+	writel(con, addr + I2SCON);
+}
+
+/* Flush FIFO of an interface */
+static inline void i2s_fifo(struct i2s_dai *i2s, u32 flush)
+{
+	void __iomem *fic;
+	u32 val;
+
+	if (!i2s)
+		return;
+
+	if (is_secondary(i2s))
+		fic = i2s->addr + I2SFICS;
+	else
+		fic = i2s->addr + I2SFIC;
+
+	/* Flush the FIFO */
+	writel(readl(fic) | flush, fic);
+
+	/* Be patient */
+	val = msecs_to_loops(1) / 1000; /* 1 usec */
+	while (--val)
+		cpu_relax();
+
+	writel(readl(fic) & ~flush, fic);
+}
+
+static int i2s_set_sysclk(struct snd_soc_dai *dai,
+	  int clk_id, unsigned int rfs, int dir)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	switch (clk_id) {
+	case SAMSUNG_I2S_CDCLK:
+		/* Shouldn't matter in GATING(CLOCK_IN) mode */
+		if (dir == SND_SOC_CLOCK_IN)
+			rfs = 0;
+
+		if ((rfs && other->rfs && (other->rfs != rfs)) ||
+				(any_active(i2s) &&
+				(((dir == SND_SOC_CLOCK_IN)
+					&& !(mod & MOD_CDCLKCON)) ||
+				((dir == SND_SOC_CLOCK_OUT)
+					&& (mod & MOD_CDCLKCON))))) {
+			dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+			return -EAGAIN;
+		}
+
+		if (dir == SND_SOC_CLOCK_IN)
+			mod |= MOD_CDCLKCON;
+		else
+			mod &= ~MOD_CDCLKCON;
+
+		i2s->rfs = rfs;
+		break;
+
+	case SAMSUNG_I2S_RCLKSRC_0: /* clock corrsponding to IISMOD[10] := 0 */
+	case SAMSUNG_I2S_RCLKSRC_1: /* clock corrsponding to IISMOD[10] := 1 */
+		if ((i2s->quirks & QUIRK_NO_MUXPSR)
+				|| (clk_id == SAMSUNG_I2S_RCLKSRC_0))
+			clk_id = 0;
+		else
+			clk_id = 1;
+
+		if (!any_active(i2s)) {
+			if (i2s->op_clk) {
+				if ((clk_id && !(mod & MOD_IMS_SYSMUX)) ||
+					(!clk_id && (mod & MOD_IMS_SYSMUX))) {
+					clk_disable(i2s->op_clk);
+					clk_put(i2s->op_clk);
+				} else {
+					i2s->rclk_srcrate =
+						clk_get_rate(i2s->op_clk);
+					return 0;
+				}
+			}
+
+			i2s->op_clk = clk_get(&i2s->pdev->dev,
+						i2s->src_clk[clk_id]);
+			clk_enable(i2s->op_clk);
+			i2s->rclk_srcrate = clk_get_rate(i2s->op_clk);
+
+			/* Over-ride the other's */
+			if (other) {
+				other->op_clk = i2s->op_clk;
+				other->rclk_srcrate = i2s->rclk_srcrate;
+			}
+		} else if ((!clk_id && (mod & MOD_IMS_SYSMUX))
+				|| (clk_id && !(mod & MOD_IMS_SYSMUX))) {
+			dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+			return -EAGAIN;
+		} else {
+			/* Call can't be on the active DAI */
+			i2s->op_clk = other->op_clk;
+			i2s->rclk_srcrate = other->rclk_srcrate;
+			return 0;
+		}
+
+		if (clk_id == 0)
+			mod &= ~MOD_IMS_SYSMUX;
+		else
+			mod |= MOD_IMS_SYSMUX;
+		break;
+
+	default:
+		dev_err(&i2s->pdev->dev, "We don't serve that!\n");
+		return -EINVAL;
+	}
+
+	writel(mod, i2s->addr + I2SMOD);
+
+	return 0;
+}
+
+static int i2s_set_fmt(struct snd_soc_dai *dai,
+	unsigned int fmt)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	u32 mod = readl(i2s->addr + I2SMOD);
+	u32 tmp = 0;
+
+	/* Format is priority */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_RIGHT_J:
+		tmp |= MOD_LR_RLOW;
+		tmp |= MOD_SDF_MSB;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		tmp |= MOD_LR_RLOW;
+		tmp |= MOD_SDF_LSB;
+		break;
+	case SND_SOC_DAIFMT_I2S:
+		tmp |= MOD_SDF_IIS;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Format not supported\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * INV flag is relative to the FORMAT flag - if set it simply
+	 * flips the polarity specified by the Standard
+	 */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		if (tmp & MOD_LR_RLOW)
+			tmp &= ~MOD_LR_RLOW;
+		else
+			tmp |= MOD_LR_RLOW;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Polarity not supported\n");
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		tmp |= MOD_SLAVE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		/* Set default source clock in Master mode */
+		if (i2s->rclk_srcrate == 0)
+			i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
+							0, SND_SOC_CLOCK_IN);
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "master/slave format not supported\n");
+		return -EINVAL;
+	}
+
+	if (any_active(i2s) &&
+			((mod & (MOD_SDF_MASK | MOD_LR_RLOW
+				| MOD_SLAVE)) != tmp)) {
+		dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+		return -EAGAIN;
+	}
+
+	mod &= ~(MOD_SDF_MASK | MOD_LR_RLOW | MOD_SLAVE);
+	mod |= tmp;
+	writel(mod, i2s->addr + I2SMOD);
+
+	return 0;
+}
+
+static int i2s_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	if (!is_secondary(i2s))
+		mod &= ~(MOD_DC2_EN | MOD_DC1_EN);
+
+	switch (params_channels(params)) {
+	case 6:
+		mod |= MOD_DC2_EN;
+	case 4:
+		mod |= MOD_DC1_EN;
+		break;
+	case 2:
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "%d channels not supported\n",
+				params_channels(params));
+		return -EINVAL;
+	}
+
+	if (is_secondary(i2s))
+		mod &= ~MOD_BLCS_MASK;
+	else
+		mod &= ~MOD_BLCP_MASK;
+
+	if (is_manager(i2s))
+		mod &= ~MOD_BLC_MASK;
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S8:
+		if (is_secondary(i2s))
+			mod |= MOD_BLCS_8BIT;
+		else
+			mod |= MOD_BLCP_8BIT;
+		if (is_manager(i2s))
+			mod |= MOD_BLC_8BIT;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+		if (is_secondary(i2s))
+			mod |= MOD_BLCS_16BIT;
+		else
+			mod |= MOD_BLCP_16BIT;
+		if (is_manager(i2s))
+			mod |= MOD_BLC_16BIT;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		if (is_secondary(i2s))
+			mod |= MOD_BLCS_24BIT;
+		else
+			mod |= MOD_BLCP_24BIT;
+		if (is_manager(i2s))
+			mod |= MOD_BLC_24BIT;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Format(%d) not supported\n",
+				params_format(params));
+		return -EINVAL;
+	}
+	writel(mod, i2s->addr + I2SMOD);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		snd_soc_dai_set_dma_data(dai, substream,
+			(void *)&i2s->dma_playback);
+	else
+		snd_soc_dai_set_dma_data(dai, substream,
+			(void *)&i2s->dma_capture);
+
+	i2s->frmclk = params_rate(params);
+
+	return 0;
+}
+
+/* We set constraints on the substream acc to the version of I2S */
+static int i2s_startup(struct snd_pcm_substream *substream,
+	  struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lock, flags);
+
+	i2s->mode |= DAI_OPENED;
+
+	if (is_manager(other))
+		i2s->mode &= ~DAI_MANAGER;
+	else
+		i2s->mode |= DAI_MANAGER;
+
+	/* Enforce set_sysclk in Master mode */
+	i2s->rclk_srcrate = 0;
+
+	spin_unlock_irqrestore(&lock, flags);
+
+	return 0;
+}
+
+static void i2s_shutdown(struct snd_pcm_substream *substream,
+	struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lock, flags);
+
+	i2s->mode &= ~DAI_OPENED;
+	i2s->mode &= ~DAI_MANAGER;
+
+	if (is_opened(other))
+		other->mode |= DAI_MANAGER;
+
+	/* Reset any constraint on RFS and BFS */
+	i2s->rfs = 0;
+	i2s->bfs = 0;
+
+	spin_unlock_irqrestore(&lock, flags);
+
+	/* Gate CDCLK by default */
+	if (!is_opened(other))
+		i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
+				0, SND_SOC_CLOCK_IN);
+}
+
+static int config_setup(struct i2s_dai *i2s)
+{
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	unsigned rfs, bfs, blc;
+	u32 psr;
+
+	blc = get_blc(i2s);
+
+	bfs = i2s->bfs;
+
+	if (!bfs && other)
+		bfs = other->bfs;
+
+	/* Select least possible multiple(2) if no constraint set */
+	if (!bfs)
+		bfs = blc * 2;
+
+	rfs = i2s->rfs;
+
+	if (!rfs && other)
+		rfs = other->rfs;
+
+	if ((rfs == 256 || rfs == 512) && (blc == 24)) {
+		dev_err(&i2s->pdev->dev,
+			"%d-RFS not supported for 24-blc\n", rfs);
+		return -EINVAL;
+	}
+
+	if (!rfs) {
+		if (bfs == 16 || bfs == 32)
+			rfs = 256;
+		else
+			rfs = 384;
+	}
+
+	/* If already setup and running */
+	if (any_active(i2s) && (get_rfs(i2s) != rfs || get_bfs(i2s) != bfs)) {
+		dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+		return -EAGAIN;
+	}
+
+	/* Don't bother RFS, BFS & PSR in Slave mode */
+	if (is_slave(i2s))
+		return 0;
+
+	set_bfs(i2s, bfs);
+	set_rfs(i2s, rfs);
+
+	if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
+		psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
+		writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
+		dev_dbg(&i2s->pdev->dev,
+			"RCLK_SRC=%luHz PSR=%u, RCLK=%dfs, BCLK=%dfs\n",
+				i2s->rclk_srcrate, psr, rfs, bfs);
+	}
+
+	return 0;
+}
+
+static int i2s_trigger(struct snd_pcm_substream *substream,
+	int cmd, struct snd_soc_dai *dai)
+{
+	int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct i2s_dai *i2s = to_info(rtd->cpu_dai);
+	unsigned long flags;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		local_irq_save(flags);
+
+		if (config_setup(i2s)) {
+			local_irq_restore(flags);
+			return -EINVAL;
+		}
+
+		if (capture)
+			i2s_rxctrl(i2s, 1);
+		else
+			i2s_txctrl(i2s, 1);
+
+		local_irq_restore(flags);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		local_irq_save(flags);
+
+		if (capture)
+			i2s_rxctrl(i2s, 0);
+		else
+			i2s_txctrl(i2s, 0);
+
+		if (capture)
+			i2s_fifo(i2s, FIC_RXFLUSH);
+		else
+			i2s_fifo(i2s, FIC_TXFLUSH);
+
+		local_irq_restore(flags);
+		break;
+	}
+
+	return 0;
+}
+
+static int i2s_set_clkdiv(struct snd_soc_dai *dai,
+	int div_id, int div)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	switch (div_id) {
+	case SAMSUNG_I2S_DIV_BCLK:
+		if ((any_active(i2s) && div && (get_bfs(i2s) != div))
+			|| (other && other->bfs && (other->bfs != div))) {
+			dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+			return -EAGAIN;
+		}
+		i2s->bfs = div;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev,
+			"Invalid clock divider(%d)\n", div_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static snd_pcm_sframes_t
+i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	u32 reg = readl(i2s->addr + I2SFIC);
+	snd_pcm_sframes_t delay;
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		delay = FIC_RXCOUNT(reg);
+	else if (is_secondary(i2s))
+		delay = FICS_TXCOUNT(readl(i2s->addr + I2SFICS));
+	else
+		delay = FIC_TXCOUNT(reg);
+
+	return delay;
+}
+
+#ifdef CONFIG_PM
+static int i2s_suspend(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+
+	if (dai->active) {
+		i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
+		i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
+		i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+	}
+
+	return 0;
+}
+
+static int i2s_resume(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+
+	if (dai->active) {
+		writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
+		writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
+		writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+	}
+
+	return 0;
+}
+#else
+#define i2s_suspend NULL
+#define i2s_resume  NULL
+#endif
+
+static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	if (other && other->clk) /* If this is probe on secondary */
+		goto probe_exit;
+
+	i2s->addr = ioremap(i2s->base, 0x100);
+	if (i2s->addr == NULL) {
+		dev_err(&i2s->pdev->dev, "cannot ioremap registers\n");
+		return -ENXIO;
+	}
+
+	i2s->clk = clk_get(&i2s->pdev->dev, "iis");
+	if (IS_ERR(i2s->clk)) {
+		dev_err(&i2s->pdev->dev, "failed to get i2s_clock\n");
+		iounmap(i2s->addr);
+		return -ENOENT;
+	}
+	clk_enable(i2s->clk);
+
+	if (other) {
+		other->addr = i2s->addr;
+		other->clk = i2s->clk;
+	}
+
+	if (i2s->quirks & QUIRK_NEED_RSTCLR)
+		writel(CON_RSTCLR, i2s->addr + I2SCON);
+
+probe_exit:
+	/* Reset any constraint on RFS and BFS */
+	i2s->rfs = 0;
+	i2s->bfs = 0;
+	i2s_txctrl(i2s, 0);
+	i2s_rxctrl(i2s, 0);
+	i2s_fifo(i2s, FIC_TXFLUSH);
+	i2s_fifo(other, FIC_TXFLUSH);
+	i2s_fifo(i2s, FIC_RXFLUSH);
+
+	/* Gate CDCLK by default */
+	if (!is_opened(other))
+		i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
+				0, SND_SOC_CLOCK_IN);
+
+	return 0;
+}
+
+static int samsung_i2s_dai_remove(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	if (!other || !other->clk) {
+
+		if (i2s->quirks & QUIRK_NEED_RSTCLR)
+			writel(0, i2s->addr + I2SCON);
+
+		clk_disable(i2s->clk);
+		clk_put(i2s->clk);
+
+		iounmap(i2s->addr);
+	}
+
+	i2s->clk = NULL;
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops samsung_i2s_dai_ops = {
+	.trigger = i2s_trigger,
+	.hw_params = i2s_hw_params,
+	.set_fmt = i2s_set_fmt,
+	.set_clkdiv = i2s_set_clkdiv,
+	.set_sysclk = i2s_set_sysclk,
+	.startup = i2s_startup,
+	.shutdown = i2s_shutdown,
+	.delay = i2s_delay,
+};
+
+#define SAMSUNG_I2S_RATES	SNDRV_PCM_RATE_8000_96000
+
+#define SAMSUNG_I2S_FMTS	(SNDRV_PCM_FMTBIT_S8 | \
+					SNDRV_PCM_FMTBIT_S16_LE | \
+					SNDRV_PCM_FMTBIT_S24_LE)
+
+static __devinit
+struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
+{
+	struct i2s_dai *i2s;
+
+	i2s = kzalloc(sizeof(struct i2s_dai), GFP_KERNEL);
+	if (i2s == NULL)
+		return NULL;
+
+	i2s->pdev = pdev;
+	i2s->pri_dai = NULL;
+	i2s->sec_dai = NULL;
+	i2s->i2s_dai_drv.symmetric_rates = 1;
+	i2s->i2s_dai_drv.probe = samsung_i2s_dai_probe;
+	i2s->i2s_dai_drv.remove = samsung_i2s_dai_remove;
+	i2s->i2s_dai_drv.ops = &samsung_i2s_dai_ops;
+	i2s->i2s_dai_drv.suspend = i2s_suspend;
+	i2s->i2s_dai_drv.resume = i2s_resume;
+	i2s->i2s_dai_drv.playback.channels_min = 2;
+	i2s->i2s_dai_drv.playback.channels_max = 2;
+	i2s->i2s_dai_drv.playback.rates = SAMSUNG_I2S_RATES;
+	i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS;
+
+	if (!sec) {
+		i2s->i2s_dai_drv.capture.channels_min = 2;
+		i2s->i2s_dai_drv.capture.channels_max = 2;
+		i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES;
+		i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS;
+	} else {	/* Create a new platform_device for Secondary */
+		i2s->pdev = platform_device_register_resndata(NULL,
+				pdev->name, pdev->id + SAMSUNG_I2S_SECOFF,
+				NULL, 0, NULL, 0);
+		if (IS_ERR(i2s->pdev)) {
+			kfree(i2s);
+			return NULL;
+		}
+	}
+
+	/* Pre-assign snd_soc_dai_set_drvdata */
+	dev_set_drvdata(&i2s->pdev->dev, i2s);
+
+	return i2s;
+}
+
+static __devinit int samsung_i2s_probe(struct platform_device *pdev)
+{
+	u32 dma_pl_chan, dma_cp_chan, dma_pl_sec_chan;
+	struct i2s_dai *pri_dai, *sec_dai = NULL;
+	struct s3c_audio_pdata *i2s_pdata;
+	struct samsung_i2s *i2s_cfg;
+	struct resource *res;
+	u32 regs_base, quirks;
+	int ret = 0;
+
+	/* Call during Seconday interface registration */
+	if (pdev->id >= SAMSUNG_I2S_SECOFF) {
+		sec_dai = dev_get_drvdata(&pdev->dev);
+		snd_soc_register_dai(&sec_dai->pdev->dev,
+			&sec_dai->i2s_dai_drv);
+		return 0;
+	}
+
+	i2s_pdata = pdev->dev.platform_data;
+	if (i2s_pdata == NULL) {
+		dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n");
+		return -ENXIO;
+	}
+	dma_pl_chan = res->start;
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n");
+		return -ENXIO;
+	}
+	dma_cp_chan = res->start;
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
+	if (res)
+		dma_pl_sec_chan = res->start;
+	else
+		dma_pl_sec_chan = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
+		return -ENXIO;
+	}
+
+	if (!request_mem_region(res->start, resource_size(res),
+							"samsung-i2s")) {
+		dev_err(&pdev->dev, "Unable to request SFR region\n");
+		return -EBUSY;
+	}
+	regs_base = res->start;
+
+	i2s_cfg = &i2s_pdata->type.i2s;
+	quirks = i2s_cfg->quirks;
+
+	pri_dai = i2s_alloc_dai(pdev, false);
+	if (!pri_dai) {
+		dev_err(&pdev->dev, "Unable to alloc I2S_pri\n");
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	pri_dai->dma_playback.dma_addr = regs_base + I2STXD;
+	pri_dai->dma_capture.dma_addr = regs_base + I2SRXD;
+	pri_dai->dma_playback.client =
+		(struct s3c2410_dma_client *)&pri_dai->dma_playback;
+	pri_dai->dma_capture.client =
+		(struct s3c2410_dma_client *)&pri_dai->dma_capture;
+	pri_dai->dma_playback.channel = dma_pl_chan;
+	pri_dai->dma_capture.channel = dma_cp_chan;
+	pri_dai->src_clk = i2s_cfg->src_clk;
+	pri_dai->dma_playback.dma_size = 4;
+	pri_dai->dma_capture.dma_size = 4;
+	pri_dai->base = regs_base;
+	pri_dai->quirks = quirks;
+
+	if (quirks & QUIRK_PRI_6CHAN)
+		pri_dai->i2s_dai_drv.playback.channels_max = 6;
+
+	if (quirks & QUIRK_SEC_DAI) {
+		sec_dai = i2s_alloc_dai(pdev, true);
+		if (!sec_dai) {
+			dev_err(&pdev->dev, "Unable to alloc I2S_sec\n");
+			ret = -ENOMEM;
+			goto err2;
+		}
+		sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
+		sec_dai->dma_playback.client =
+			(struct s3c2410_dma_client *)&sec_dai->dma_playback;
+		/* Use iDMA always if SysDMA not provided */
+		sec_dai->dma_playback.channel = dma_pl_sec_chan ? : -1;
+		sec_dai->src_clk = i2s_cfg->src_clk;
+		sec_dai->dma_playback.dma_size = 4;
+		sec_dai->base = regs_base;
+		sec_dai->quirks = quirks;
+		sec_dai->pri_dai = pri_dai;
+		pri_dai->sec_dai = sec_dai;
+	}
+
+	if (i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
+		dev_err(&pdev->dev, "Unable to configure gpio\n");
+		ret = -EINVAL;
+		goto err3;
+	}
+
+	snd_soc_register_dai(&pri_dai->pdev->dev, &pri_dai->i2s_dai_drv);
+
+	return 0;
+err3:
+	kfree(sec_dai);
+err2:
+	kfree(pri_dai);
+err1:
+	release_mem_region(regs_base, resource_size(res));
+
+	return ret;
+}
+
+static __devexit int samsung_i2s_remove(struct platform_device *pdev)
+{
+	struct i2s_dai *i2s, *other;
+
+	i2s = dev_get_drvdata(&pdev->dev);
+	other = i2s->pri_dai ? : i2s->sec_dai;
+
+	if (other) {
+		other->pri_dai = NULL;
+		other->sec_dai = NULL;
+	} else {
+		struct resource *res;
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (res)
+			release_mem_region(res->start, resource_size(res));
+	}
+
+	i2s->pri_dai = NULL;
+	i2s->sec_dai = NULL;
+
+	kfree(i2s);
+
+	snd_soc_unregister_dai(&pdev->dev);
+
+	return 0;
+}
+
+static struct platform_driver samsung_i2s_driver = {
+	.probe  = samsung_i2s_probe,
+	.remove = samsung_i2s_remove,
+	.driver = {
+		.name = "samsung-i2s",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init samsung_i2s_init(void)
+{
+	return platform_driver_register(&samsung_i2s_driver);
+}
+module_init(samsung_i2s_init);
+
+static void __exit samsung_i2s_exit(void)
+{
+	platform_driver_unregister(&samsung_i2s_driver);
+}
+module_exit(samsung_i2s_exit);
+
+/* Module information */
+MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("Samsung I2S Interface");
+MODULE_ALIAS("platform:samsung-i2s");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/i2s.h b/sound/soc/samsung/i2s.h
new file mode 100644
index 0000000..8e15f6a
--- /dev/null
+++ b/sound/soc/samsung/i2s.h
@@ -0,0 +1,29 @@
+/* sound/soc/samsung/i2s.h
+ *
+ * ALSA SoC Audio Layer - Samsung I2S Controller driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SND_SOC_SAMSUNG_I2S_H
+#define __SND_SOC_SAMSUNG_I2S_H
+
+/*
+ * Maximum number of I2S blocks that any SoC can have.
+ * The secondary interface of a CPU dai(if there exists any),
+ * is indexed at [cpu-dai's ID + SAMSUNG_I2S_SECOFF]
+ */
+#define SAMSUNG_I2S_SECOFF	4
+
+#define SAMSUNG_I2S_DIV_BCLK	1
+
+#define SAMSUNG_I2S_RCLKSRC_0	0
+#define SAMSUNG_I2S_RCLKSRC_1	1
+#define SAMSUNG_I2S_CDCLK		2
+
+#endif /* __SND_SOC_SAMSUNG_I2S_H */
diff --git a/sound/soc/s3c24xx/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
similarity index 88%
rename from sound/soc/s3c24xx/jive_wm8750.c
rename to sound/soc/samsung/jive_wm8750.c
index 49605cd..0880252 100644
--- a/sound/soc/s3c24xx/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/jive_wm8750.c
+/* sound/soc/samsung/jive_wm8750.c
  *
  * Copyright 2007,2008 Simtec Electronics
  *
@@ -21,11 +21,10 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c2412-i2s.h"
 
 #include "../codecs/wm8750.h"
@@ -111,18 +110,19 @@
 static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* These endpoints are not being used. */
-	snd_soc_dapm_nc_pin(codec, "LINPUT2");
-	snd_soc_dapm_nc_pin(codec, "RINPUT2");
-	snd_soc_dapm_nc_pin(codec, "LINPUT3");
-	snd_soc_dapm_nc_pin(codec, "RINPUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONO");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONO");
 
 	/* Add jive specific widgets */
-	err = snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
+	err = snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
 					ARRAY_SIZE(wm8750_dapm_widgets));
 	if (err) {
 		printk(KERN_ERR "%s: failed to add widgets (%d)\n",
@@ -130,8 +130,8 @@
 		return err;
 	}
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -141,7 +141,7 @@
 	.stream_name	= "WM8750",
 	.cpu_dai_name	= "s3c2412-i2s",
 	.codec_dai_name = "wm8750-hifi",
-	.platform_name	= "s3c24xx-pcm-audio",
+	.platform_name	= "samsung-audio",
 	.codec_name	= "wm8750-codec.0-0x1a",
 	.init		= jive_wm8750_init,
 	.ops		= &jive_ops,
diff --git a/sound/soc/s3c24xx/lm4857.h b/sound/soc/samsung/lm4857.h
similarity index 100%
rename from sound/soc/s3c24xx/lm4857.h
rename to sound/soc/samsung/lm4857.h
diff --git a/sound/soc/s3c24xx/ln2440sbc_alc650.c b/sound/soc/samsung/ln2440sbc_alc650.c
similarity index 92%
rename from sound/soc/s3c24xx/ln2440sbc_alc650.c
rename to sound/soc/samsung/ln2440sbc_alc650.c
index abe64ab..a2bb34d 100644
--- a/sound/soc/s3c24xx/ln2440sbc_alc650.c
+++ b/sound/soc/samsung/ln2440sbc_alc650.c
@@ -21,10 +21,9 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
+#include "dma.h"
+#include "ac97.h"
 
 static struct snd_soc_card ln2440sbc;
 
@@ -32,10 +31,10 @@
 {
 	.name = "AC97",
 	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "s3c-ac97",
+	.cpu_dai_name = "samsung-ac97",
 	.codec_dai_name = "ac97-hifi",
 	.codec_name = "ac97-codec",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 },
 };
 
diff --git a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c b/sound/soc/samsung/neo1973_gta02_wm8753.c
similarity index 87%
rename from sound/soc/s3c24xx/neo1973_gta02_wm8753.c
rename to sound/soc/samsung/neo1973_gta02_wm8753.c
index e97bdf1..0d0ae2b 100644
--- a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
+++ b/sound/soc/samsung/neo1973_gta02_wm8753.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 
@@ -32,7 +31,7 @@
 #include <asm/io.h>
 #include <mach/gta02.h>
 #include "../codecs/wm8753.h"
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 
 static struct snd_soc_card neo1973_gta02;
@@ -333,16 +332,17 @@
 static int neo1973_gta02_wm8753_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* set up NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT4");
-	snd_soc_dapm_nc_pin(codec, "LINE1");
-	snd_soc_dapm_nc_pin(codec, "LINE2");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT4");
+	snd_soc_dapm_nc_pin(dapm, "LINE1");
+	snd_soc_dapm_nc_pin(dapm, "LINE2");
 
 	/* Add neo1973 gta02 specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets,
 				  ARRAY_SIZE(wm8753_dapm_widgets));
 
 	/* add neo1973 gta02 specific controls */
@@ -353,25 +353,25 @@
 		return err;
 
 	/* set up neo1973 gta02 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* set endpoints to default off mode */
-	snd_soc_dapm_disable_pin(codec, "Stereo Out");
-	snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-	snd_soc_dapm_disable_pin(codec, "GSM Line In");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic");
-	snd_soc_dapm_disable_pin(codec, "Handset Mic");
-	snd_soc_dapm_disable_pin(codec, "Handset Spk");
+	snd_soc_dapm_disable_pin(dapm, "Stereo Out");
+	snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+	snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_disable_pin(dapm, "Handset Mic");
+	snd_soc_dapm_disable_pin(dapm, "Handset Spk");
 
 	/* allow audio paths from the GSM modem to run during suspend */
-	snd_soc_dapm_ignore_suspend(codec, "Stereo Out");
-	snd_soc_dapm_ignore_suspend(codec, "GSM Line Out");
-	snd_soc_dapm_ignore_suspend(codec, "GSM Line In");
-	snd_soc_dapm_ignore_suspend(codec, "Headset Mic");
-	snd_soc_dapm_ignore_suspend(codec, "Handset Mic");
-	snd_soc_dapm_ignore_suspend(codec, "Handset Spk");
+	snd_soc_dapm_ignore_suspend(dapm, "Stereo Out");
+	snd_soc_dapm_ignore_suspend(dapm, "GSM Line Out");
+	snd_soc_dapm_ignore_suspend(dapm, "GSM Line In");
+	snd_soc_dapm_ignore_suspend(dapm, "Headset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "Handset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "Handset Spk");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -397,11 +397,11 @@
 { /* Hifi Playback - for similatious use with voice below */
 	.name = "WM8753",
 	.stream_name = "WM8753 HiFi",
-	.cpu_dai_name = "s3c24xx-i2s",
+	.cpu_dai_name = "s3c24xx-iis",
 	.codec_dai_name = "wm8753-hifi",
 	.init = neo1973_gta02_wm8753_init,
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8753-codec.0-0x1a",
+	.platform_name = "samsung-audio",
+	.codec_name = "wm8753-codec.0-001a",
 	.ops = &neo1973_gta02_hifi_ops,
 },
 { /* Voice via BT */
@@ -410,8 +410,8 @@
 	.cpu_dai_name = "bluetooth-dai",
 	.codec_dai_name = "wm8753-voice",
 	.ops = &neo1973_gta02_voice_ops,
-	.codec_name = "wm8753-codec.0-0x1a",
-	.platform_name = "s3c24xx-pcm-audio",
+	.codec_name = "wm8753-codec.0-001a",
+	.platform_name = "samsung-audio",
 },
 };
 
@@ -438,25 +438,21 @@
 		return -ENOMEM;
 
 	/* register bluetooth DAI here */
-	ret = snd_soc_register_dai(&neo1973_gta02_snd_device->dev, -1, &bt_dai);
-	if (ret) {
-		platform_device_put(neo1973_gta02_snd_device);
-		return ret;
-	}
+	ret = snd_soc_register_dai(&neo1973_gta02_snd_device->dev, &bt_dai);
+	if (ret)
+		goto err_put_device;
 
 	platform_set_drvdata(neo1973_gta02_snd_device, &neo1973_gta02);
 	ret = platform_device_add(neo1973_gta02_snd_device);
 
-	if (ret) {
-		platform_device_put(neo1973_gta02_snd_device);
-		return ret;
-	}
+	if (ret)
+		goto err_unregister_dai;
 
 	/* Initialise GPIOs used by amp */
 	ret = gpio_request(GTA02_GPIO_HP_IN, "GTA02_HP_IN");
 	if (ret) {
 		pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_HP_IN);
-		goto err_unregister_device;
+		goto err_del_device;
 	}
 
 	ret = gpio_direction_output(GTA02_GPIO_HP_IN, 1);
@@ -483,15 +479,19 @@
 	gpio_free(GTA02_GPIO_AMP_SHUT);
 err_free_gpio_hp_in:
 	gpio_free(GTA02_GPIO_HP_IN);
-err_unregister_device:
-	platform_device_unregister(neo1973_gta02_snd_device);
+err_del_device:
+	platform_device_del(neo1973_gta02_snd_device);
+err_unregister_dai:
+	snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev);
+err_put_device:
+	platform_device_put(neo1973_gta02_snd_device);
 	return ret;
 }
 module_init(neo1973_gta02_init);
 
 static void __exit neo1973_gta02_exit(void)
 {
-	snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev, -1);
+	snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev);
 	platform_device_unregister(neo1973_gta02_snd_device);
 	gpio_free(GTA02_GPIO_HP_IN);
 	gpio_free(GTA02_GPIO_AMP_SHUT);
diff --git a/sound/soc/s3c24xx/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
similarity index 83%
rename from sound/soc/s3c24xx/neo1973_wm8753.c
rename to sound/soc/samsung/neo1973_wm8753.c
index f4f2ee7..d20815d 100644
--- a/sound/soc/s3c24xx/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 
 #include <asm/mach-types.h>
@@ -36,7 +35,7 @@
 
 #include "../codecs/wm8753.h"
 #include "lm4857.h"
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 
 /* define the scenarios */
@@ -237,81 +236,83 @@
 
 static int set_scenario_endpoints(struct snd_soc_codec *codec, int scenario)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	pr_debug("Entered %s\n", __func__);
 
 	switch (neo1973_scenario) {
 	case NEO_AUDIO_OFF:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_GSM_CALL_AUDIO_HANDSET:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_enable_pin(codec, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_enable_pin(dapm, "Call Mic");
 		break;
 	case NEO_GSM_CALL_AUDIO_HEADSET:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line In");
-		snd_soc_dapm_enable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_GSM_CALL_AUDIO_BLUETOOTH:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_STEREO_TO_SPEAKERS:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_STEREO_TO_HEADPHONES:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_CAPTURE_HANDSET:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_enable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_enable_pin(dapm, "Call Mic");
 		break;
 	case NEO_CAPTURE_HEADSET:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_enable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_CAPTURE_BLUETOOTH:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	default:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 	}
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -502,20 +503,21 @@
 static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	pr_debug("Entered %s\n", __func__);
 
 	/* set up NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "LOUT2");
-	snd_soc_dapm_nc_pin(codec, "ROUT2");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT4");
-	snd_soc_dapm_nc_pin(codec, "LINE1");
-	snd_soc_dapm_nc_pin(codec, "LINE2");
+	snd_soc_dapm_nc_pin(dapm, "LOUT2");
+	snd_soc_dapm_nc_pin(dapm, "ROUT2");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT4");
+	snd_soc_dapm_nc_pin(dapm, "LINE1");
+	snd_soc_dapm_nc_pin(dapm, "LINE2");
 
 	/* Add neo1973 specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets,
 				  ARRAY_SIZE(wm8753_dapm_widgets));
 
 	/* set endpoints to default mode */
@@ -528,10 +530,10 @@
 		return err;
 
 	/* set up neo1973 specific audio routes */
-	err = snd_soc_dapm_add_routes(codec, dapm_routes,
+	err = snd_soc_dapm_add_routes(dapm, dapm_routes,
 				      ARRAY_SIZE(dapm_routes));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
@@ -556,20 +558,20 @@
 { /* Hifi Playback - for similatious use with voice below */
 	.name = "WM8753",
 	.stream_name = "WM8753 HiFi",
-	.platform_name = "s3c24xx-pcm-audio",
-	.cpu_dai_name = "s3c24xx-i2s",
+	.platform_name = "samsung-audio",
+	.cpu_dai_name = "s3c24xx-iis",
 	.codec_dai_name = "wm8753-hifi",
-	.codec_name = "wm8753-codec.0-0x1a",
+	.codec_name = "wm8753-codec.0-001a",
 	.init = neo1973_wm8753_init,
 	.ops = &neo1973_hifi_ops,
 },
 { /* Voice via BT */
 	.name = "Bluetooth",
 	.stream_name = "Voice",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.cpu_dai_name = "bluetooth-dai",
 	.codec_dai_name = "wm8753-voice",
-	.codec_name = "wm8753-codec.0-0x1a",
+	.codec_name = "wm8753-codec.0-001a",
 	.ops = &neo1973_voice_ops,
 },
 };
diff --git a/sound/soc/s3c24xx/s3c-pcm.c b/sound/soc/samsung/pcm.c
similarity index 98%
rename from sound/soc/s3c24xx/s3c-pcm.c
rename to sound/soc/samsung/pcm.c
index 2e020e1..48d0b75 100644
--- a/sound/soc/s3c24xx/s3c-pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c-pcm.c
+/* sound/soc/samsung/pcm.c
  *
  * ALSA SoC Audio Layer - S3C PCM-Controller driver
  *
@@ -29,8 +29,8 @@
 #include <plat/audio.h>
 #include <plat/dma.h>
 
-#include "s3c-dma.h"
-#include "s3c-pcm.h"
+#include "dma.h"
+#include "pcm.h"
 
 static struct s3c2410_dma_client s3c_pcm_dma_client_out = {
 	.name		= "PCM Stereo out"
diff --git a/sound/soc/s3c24xx/s3c-pcm.h b/sound/soc/samsung/pcm.h
similarity index 98%
rename from sound/soc/s3c24xx/s3c-pcm.h
rename to sound/soc/samsung/pcm.h
index f60baa1..03393dc 100644
--- a/sound/soc/s3c24xx/s3c-pcm.h
+++ b/sound/soc/samsung/pcm.h
@@ -1,4 +1,4 @@
-/*  sound/soc/s3c24xx/s3c-pcm.h
+/*  sound/soc/samsung/pcm.h
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/sound/soc/s3c24xx/regs-i2s-v2.h b/sound/soc/samsung/regs-i2s-v2.h
similarity index 100%
rename from sound/soc/s3c24xx/regs-i2s-v2.h
rename to sound/soc/samsung/regs-i2s-v2.h
diff --git a/sound/soc/s3c24xx/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
similarity index 94%
rename from sound/soc/s3c24xx/rx1950_uda1380.c
rename to sound/soc/samsung/rx1950_uda1380.c
index 468cc11..f400274 100644
--- a/sound/soc/s3c24xx/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -25,7 +25,6 @@
 #include <linux/clk.h>
 
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/uda1380.h>
 #include <sound/jack.h>
 
@@ -35,7 +34,7 @@
 
 #include <asm/mach-types.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "../codecs/uda1380.h"
 
@@ -95,7 +94,7 @@
 		.cpu_dai_name	= "s3c24xx-iis",
 		.codec_dai_name	= "uda1380-hifi",
 		.init		= rx1950_uda1380_init,
-		.platform_name	= "s3c24xx-pcm-audio",
+		.platform_name	= "samsung-audio",
 		.codec_name	= "uda1380-codec.0-001a",
 		.ops		= &rx1950_ops,
 	},
@@ -228,26 +227,28 @@
 static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* Add rx1950 specific widgets */
-	err = snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets,
+	err = snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
 				  ARRAY_SIZE(uda1380_dapm_widgets));
 
 	if (err)
 		return err;
 
 	/* Set up rx1950 specific audio path audio_mapnects */
-	err = snd_soc_dapm_add_routes(codec, audio_map,
+	err = snd_soc_dapm_add_routes(dapm, audio_map,
 				      ARRAY_SIZE(audio_map));
 
 	if (err)
 		return err;
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
 		&hp_jack);
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
similarity index 99%
rename from sound/soc/s3c24xx/s3c-i2s-v2.c
rename to sound/soc/samsung/s3c-i2s-v2.c
index b3866d5..094f36e 100644
--- a/sound/soc/s3c24xx/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c-i2c-v2.c
+/* sound/soc/samsung/s3c-i2c-v2.c
  *
  * ALSA Soc Audio Layer - I2S core for newer Samsung SoCs.
  *
@@ -28,7 +28,7 @@
 
 #include "regs-i2s-v2.h"
 #include "s3c-i2s-v2.h"
-#include "s3c-dma.h"
+#include "dma.h"
 
 #undef S3C_IIS_V2_SUPPORTED
 
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
similarity index 98%
rename from sound/soc/s3c24xx/s3c-i2s-v2.h
rename to sound/soc/samsung/s3c-i2s-v2.h
index d458301..f8297d9 100644
--- a/sound/soc/s3c24xx/s3c-i2s-v2.h
+++ b/sound/soc/samsung/s3c-i2s-v2.h
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c-i2s-v2.h
+/* sound/soc/samsung/s3c-i2s-v2.h
  *
  * ALSA Soc Audio Layer - S3C_I2SV2 I2S driver
  *
diff --git a/sound/soc/s3c24xx/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
similarity index 98%
rename from sound/soc/s3c24xx/s3c2412-i2s.c
rename to sound/soc/samsung/s3c2412-i2s.c
index 4a861cf..7ea8378 100644
--- a/sound/soc/s3c24xx/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c2412-i2s.c
+/* sound/soc/samsung/s3c2412-i2s.c
  *
  * ALSA Soc Audio Layer - S3C2412 I2S driver
  *
@@ -35,7 +35,7 @@
 #include <mach/regs-gpio.h>
 #include <mach/dma.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "regs-i2s-v2.h"
 #include "s3c2412-i2s.h"
 
diff --git a/sound/soc/s3c24xx/s3c2412-i2s.h b/sound/soc/samsung/s3c2412-i2s.h
similarity index 95%
rename from sound/soc/s3c24xx/s3c2412-i2s.h
rename to sound/soc/samsung/s3c2412-i2s.h
index 01a0471..02ad579 100644
--- a/sound/soc/s3c24xx/s3c2412-i2s.h
+++ b/sound/soc/samsung/s3c2412-i2s.h
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c2412-i2s.c
+/* sound/soc/samsung/s3c2412-i2s.c
  *
  * ALSA Soc Audio Layer - S3C2412 I2S driver
  *
diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
similarity index 99%
rename from sound/soc/s3c24xx/s3c24xx-i2s.c
rename to sound/soc/samsung/s3c24xx-i2s.c
index e060daa..13e41ed 100644
--- a/sound/soc/s3c24xx/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -38,7 +38,7 @@
 
 #include <plat/regs-iis.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 
 static struct s3c2410_dma_client s3c24xx_dma_client_out = {
diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.h b/sound/soc/samsung/s3c24xx-i2s.h
similarity index 100%
rename from sound/soc/s3c24xx/s3c24xx-i2s.h
rename to sound/soc/samsung/s3c24xx-i2s.h
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/samsung/s3c24xx_simtec.c
similarity index 98%
rename from sound/soc/s3c24xx/s3c24xx_simtec.c
rename to sound/soc/samsung/s3c24xx_simtec.c
index c4c1114..a434032d 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.c
+++ b/sound/soc/samsung/s3c24xx_simtec.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec.c
+/* sound/soc/samsung/s3c24xx_simtec.c
  *
  * Copyright 2009 Simtec Electronics
  *
@@ -17,11 +17,10 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <plat/audio-simtec.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "s3c24xx_simtec.h"
 
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/samsung/s3c24xx_simtec.h
similarity index 93%
rename from sound/soc/s3c24xx/s3c24xx_simtec.h
rename to sound/soc/samsung/s3c24xx_simtec.h
index e63d5ff..8270748 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.h
+++ b/sound/soc/samsung/s3c24xx_simtec.h
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec.h
+/* sound/soc/samsung/s3c24xx_simtec.h
  *
  * Copyright 2009 Simtec Electronics
  *
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c b/sound/soc/samsung/s3c24xx_simtec_hermes.c
similarity index 85%
rename from sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
rename to sound/soc/samsung/s3c24xx_simtec_hermes.c
index f884537..08fcaaa 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
+++ b/sound/soc/samsung/s3c24xx_simtec_hermes.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
+/* sound/soc/samsung/s3c24xx_simtec_hermes.c
  *
  * Copyright 2009 Simtec Electronics
  *
@@ -14,16 +14,13 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <plat/audio-simtec.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "s3c24xx_simtec.h"
 
-#include "../codecs/tlv320aic3x.h"
-
 static const struct snd_soc_dapm_widget dapm_widgets[] = {
 	SND_SOC_DAPM_LINE("GSM Out", NULL),
 	SND_SOC_DAPM_LINE("GSM In", NULL),
@@ -76,19 +73,20 @@
 static int simtec_hermes_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, dapm_widgets,
 				  ARRAY_SIZE(dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
+	snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
 	simtec_audio_init(rtd);
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -96,10 +94,10 @@
 static struct snd_soc_dai_link simtec_dai_aic33 = {
 	.name		= "tlv320aic33",
 	.stream_name	= "TLV320AIC33",
-	.codec_name	= "tlv320aic3x-codec.0-0x1a",
-	.cpu_dai_name	= "s3c24xx-i2s",
+	.codec_name	= "tlv320aic3x-codec.0-001a",
+	.cpu_dai_name	= "s3c24xx-iis",
 	.codec_dai_name = "tlv320aic3x-hifi",
-	.platform_name	= "s3c24xx-pcm-audio",
+	.platform_name	= "samsung-audio",
 	.init		= simtec_hermes_init,
 };
 
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
similarity index 84%
rename from sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
rename to sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
index c096759..116e3e6 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
+++ b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
+/* sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
  *
  * Copyright 2009 Simtec Electronics
  *
@@ -14,11 +14,10 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <plat/audio-simtec.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "s3c24xx_simtec.h"
 
@@ -65,19 +64,20 @@
 static int simtec_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, dapm_widgets,
 				  ARRAY_SIZE(dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
+	snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
 	simtec_audio_init(rtd);
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -85,10 +85,10 @@
 static struct snd_soc_dai_link simtec_dai_aic23 = {
 	.name		= "tlv320aic23",
 	.stream_name	= "TLV320AIC23",
-	.codec_name	= "tlv320aic3x-codec.0-0x1a",
-	.cpu_dai_name	= "s3c24xx-i2s",
+	.codec_name	= "tlv320aic3x-codec.0-001a",
+	.cpu_dai_name	= "s3c24xx-iis",
 	.codec_dai_name = "tlv320aic3x-hifi",
-	.platform_name	= "s3c24xx-pcm-audio",
+	.platform_name	= "samsung-audio",
 	.init		= simtec_tlv320aic23_init,
 };
 
diff --git a/sound/soc/s3c24xx/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
similarity index 98%
rename from sound/soc/s3c24xx/s3c24xx_uda134x.c
rename to sound/soc/samsung/s3c24xx_uda134x.c
index bd48ffb..2c09e93 100644
--- a/sound/soc/s3c24xx/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -18,13 +18,12 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/s3c24xx_uda134x.h>
 #include <sound/uda134x.h>
 
 #include <plat/regs-iis.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "../codecs/uda134x.h"
 
@@ -229,9 +228,9 @@
 	.stream_name = "UDA134X",
 	.codec_name = "uda134x-hifi",
 	.codec_dai_name = "uda134x-hifi",
-	.cpu_dai_name = "s3c24xx-i2s",
+	.cpu_dai_name = "s3c24xx-iis",
 	.ops = &s3c24xx_uda134x_ops,
-	.platform_name	= "s3c24xx-pcm-audio",
+	.platform_name	= "samsung-audio",
 };
 
 static struct snd_soc_card snd_soc_s3c24xx_uda134x = {
diff --git a/sound/soc/s3c24xx/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c
similarity index 82%
rename from sound/soc/s3c24xx/smartq_wm8987.c
rename to sound/soc/samsung/smartq_wm8987.c
index dd20ca7..61e2b52 100644
--- a/sound/soc/s3c24xx/smartq_wm8987.c
+++ b/sound/soc/samsung/smartq_wm8987.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/smartq_wm8987.c
+/* sound/soc/samsung/smartq_wm8987.c
  *
  * Copyright 2010 Maurus Cuelenaere <mcuelenaere@gmail.com>
  *
@@ -19,13 +19,13 @@
 
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
 
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
+#include "dma.h"
+#include "i2s.h"
 
 #include "../codecs/wm8750.h"
 
@@ -39,15 +39,11 @@
 	struct snd_pcm_hw_params *params)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
-	struct s3c_i2sv2_rate_calc div;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	unsigned int clk = 0;
 	int ret;
 
-	s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params),
-				s3c_i2sv2_get_clock(cpu_dai));
-
 	switch (params_rate(params)) {
 	case 8000:
 	case 16000:
@@ -78,23 +74,24 @@
 	if (ret < 0)
 		return ret;
 
+	/* Use PCLK for I2S signal generation */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0,
+					0, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	/* Gate the RCLK output on PAD */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
+					0, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
 	/* set the codec system clock for DAC and ADC */
 	ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk,
 				     SND_SOC_CLOCK_IN);
 	if (ret < 0)
 		return ret;
 
-	/* set MCLK division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_RCLK, div.fs_div);
-	if (ret < 0)
-		return ret;
-
-	/* set prescaler division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_PRESCALER,
-				     div.clk_div - 1);
-	if (ret < 0)
-		return ret;
-
 	return 0;
 }
 
@@ -156,12 +153,14 @@
 	{"LINPUT2", NULL, "Mic Bias"},
 };
 
-static int smartq_wm8987_init(struct snd_soc_codec *codec)
+static int smartq_wm8987_init(struct snd_soc_pcm_runtime *rtd)
 {
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err = 0;
 
 	/* Add SmartQ specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8987_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8987_dapm_widgets,
 				  ARRAY_SIZE(wm8987_dapm_widgets));
 
 	/* add SmartQ specific controls */
@@ -172,25 +171,25 @@
 		return err;
 
 	/* setup SmartQ specific audio path */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* set endpoints to not connected */
-	snd_soc_dapm_nc_pin(codec, "LINPUT1");
-	snd_soc_dapm_nc_pin(codec, "RINPUT1");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "ROUT1");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT1");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT1");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "ROUT1");
 
 	/* set endpoints to default off mode */
-	snd_soc_dapm_enable_pin(codec, "Internal Speaker");
-	snd_soc_dapm_enable_pin(codec, "Internal Mic");
-	snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Internal Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Internal Mic");
+	snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 
-	err = snd_soc_dapm_sync(codec);
+	err = snd_soc_dapm_sync(dapm);
 	if (err)
 		return err;
 
 	/* Headphone jack detection */
-	err = snd_soc_jack_new(&snd_soc_smartq, "Headphone Jack",
+	err = snd_soc_jack_new(codec, "Headphone Jack",
 			       SND_JACK_HEADPHONE, &smartq_jack);
 	if (err)
 		return err;
@@ -211,9 +210,9 @@
 	{
 		.name		= "wm8987",
 		.stream_name	= "SmartQ Hi-Fi",
-		.cpu_dai_name	= "s3c64xx-i2s.0",
+		.cpu_dai_name	= "samsung-i2s.0",
 		.codec_dai_name	= "wm8750-hifi",
-		.platform_name	= "s3c24xx-pcm-audio",
+		.platform_name	= "samsung-audio",
 		.codec_name	= "wm8750-codec.0-0x1a",
 		.init		= smartq_wm8987_init,
 		.ops		= &smartq_hifi_ops,
@@ -275,6 +274,7 @@
 
 static void __exit smartq_exit(void)
 {
+	gpio_free(S3C64XX_GPK(12));
 	snd_soc_jack_free_gpios(&smartq_jack, ARRAY_SIZE(smartq_jack_gpios),
 				smartq_jack_gpios);
 
diff --git a/sound/soc/s3c24xx/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
similarity index 92%
rename from sound/soc/s3c24xx/smdk2443_wm9710.c
rename to sound/soc/samsung/smdk2443_wm9710.c
index 4613288..3be7e7e 100644
--- a/sound/soc/s3c24xx/smdk2443_wm9710.c
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -17,10 +17,9 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
+#include "dma.h"
+#include "ac97.h"
 
 static struct snd_soc_card smdk2443;
 
@@ -28,10 +27,10 @@
 {
 	.name = "AC97",
 	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "s3c-ac97",
+	.cpu_dai_name = "samsung-ac97",
 	.codec_dai_name = "ac97-hifi",
 	.codec_name = "ac97-codec",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 },
 };
 
diff --git a/sound/soc/s3c24xx/smdk_spdif.c b/sound/soc/samsung/smdk_spdif.c
similarity index 92%
rename from sound/soc/s3c24xx/smdk_spdif.c
rename to sound/soc/samsung/smdk_spdif.c
index c8bd904..b5c3fad 100644
--- a/sound/soc/s3c24xx/smdk_spdif.c
+++ b/sound/soc/samsung/smdk_spdif.c
@@ -18,7 +18,7 @@
 
 #include <sound/soc.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "spdif.h"
 
 /* Audio clock settings are belonged to board specific part. Every
@@ -28,7 +28,7 @@
 static int set_audio_clock_heirachy(struct platform_device *pdev)
 {
 	struct clk *fout_epll, *mout_epll, *sclk_audio0, *sclk_spdif;
-	int ret;
+	int ret = 0;
 
 	fout_epll = clk_get(NULL, "fout_epll");
 	if (IS_ERR(fout_epll)) {
@@ -61,7 +61,7 @@
 		goto out3;
 	}
 
-	/* Set audio clock heirachy for S/PDIF */
+	/* Set audio clock hierarchy for S/PDIF */
 	clk_set_parent(mout_epll, fout_epll);
 	clk_set_parent(sclk_audio0, mout_epll);
 	clk_set_parent(sclk_spdif, sclk_audio0);
@@ -79,7 +79,7 @@
 
 /* We should haved to set clock directly on this part because of clock
  * scheme of Samsudng SoCs did not support to set rates from abstrct
- * clock of it's heirachy.
+ * clock of it's hierarchy.
  */
 static int set_audio_clock_rate(unsigned long epll_rate,
 				unsigned long audio_rate)
@@ -152,12 +152,10 @@
 	.hw_params = smdk_hw_params,
 };
 
-static struct snd_soc_card smdk;
-
 static struct snd_soc_dai_link smdk_dai = {
 	.name = "S/PDIF",
 	.stream_name = "S/PDIF PCM Playback",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.cpu_dai_name = "samsung-spdif",
 	.codec_dai_name = "dit-hifi",
 	.codec_name = "spdif-dit",
@@ -183,7 +181,7 @@
 
 	ret = platform_device_add(smdk_snd_spdif_dit_device);
 	if (ret)
-		goto err2;
+		goto err1;
 
 	smdk_snd_spdif_device = platform_device_alloc("soc-audio", -1);
 	if (!smdk_snd_spdif_device) {
@@ -195,17 +193,21 @@
 
 	ret = platform_device_add(smdk_snd_spdif_device);
 	if (ret)
-		goto err1;
+		goto err3;
 
-	/* Set audio clock heirachy manually */
+	/* Set audio clock hierarchy manually */
 	ret = set_audio_clock_heirachy(smdk_snd_spdif_device);
 	if (ret)
-		goto err1;
+		goto err4;
 
 	return 0;
-err1:
+err4:
+	platform_device_del(smdk_snd_spdif_device);
+err3:
 	platform_device_put(smdk_snd_spdif_device);
 err2:
+	platform_device_del(smdk_snd_spdif_dit_device);
+err1:
 	platform_device_put(smdk_snd_spdif_dit_device);
 	return ret;
 }
@@ -213,6 +215,7 @@
 static void __exit smdk_exit(void)
 {
 	platform_device_unregister(smdk_snd_spdif_device);
+	platform_device_unregister(smdk_snd_spdif_dit_device);
 }
 
 module_init(smdk_init);
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
new file mode 100644
index 0000000..b2cff1a
--- /dev/null
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -0,0 +1,292 @@
+/*
+ *  smdk_wm8580.c
+ *
+ *  Copyright (c) 2009 Samsung Electronics Co. Ltd
+ *  Author: Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <asm/mach-types.h>
+
+#include "../codecs/wm8580.h"
+#include "dma.h"
+#include "i2s.h"
+
+/*
+ * Default CFG switch settings to use this driver:
+ *
+ *   SMDK6410: Set CFG1 1-3 Off, CFG2 1-4 On
+ */
+
+/* SMDK has a 12MHZ crystal attached to WM8580 */
+#define SMDK_WM8580_FREQ 12000000
+
+static int smdk_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	unsigned int pll_out;
+	int bfs, rfs, ret;
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_U8:
+	case SNDRV_PCM_FORMAT_S8:
+		bfs = 16;
+		break;
+	case SNDRV_PCM_FORMAT_U16_LE:
+	case SNDRV_PCM_FORMAT_S16_LE:
+		bfs = 32;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* The Fvco for WM8580 PLLs must fall within [90,100]MHz.
+	 * This criterion can't be met if we request PLL output
+	 * as {8000x256, 64000x256, 11025x256}Hz.
+	 * As a wayout, we rather change rfs to a minimum value that
+	 * results in (params_rate(params) * rfs), and itself, acceptable
+	 * to both - the CODEC and the CPU.
+	 */
+	switch (params_rate(params)) {
+	case 16000:
+	case 22050:
+	case 32000:
+	case 44100:
+	case 48000:
+	case 88200:
+	case 96000:
+		rfs = 256;
+		break;
+	case 64000:
+		rfs = 384;
+		break;
+	case 8000:
+	case 11025:
+		rfs = 512;
+		break;
+	default:
+		return -EINVAL;
+	}
+	pll_out = params_rate(params) * rfs;
+
+	/* Set the Codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* Set the AP DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* Set WM8580 to drive MCLK from its PLLA */
+	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
+					WM8580_CLKSRC_PLLA);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0,
+					SMDK_WM8580_FREQ, pll_out);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_PLLA,
+				     pll_out, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * SMDK WM8580 DAI operations.
+ */
+static struct snd_soc_ops smdk_ops = {
+	.hw_params = smdk_hw_params,
+};
+
+/* SMDK Playback widgets */
+static const struct snd_soc_dapm_widget wm8580_dapm_widgets_pbk[] = {
+	SND_SOC_DAPM_HP("Front", NULL),
+	SND_SOC_DAPM_HP("Center+Sub", NULL),
+	SND_SOC_DAPM_HP("Rear", NULL),
+};
+
+/* SMDK Capture widgets */
+static const struct snd_soc_dapm_widget wm8580_dapm_widgets_cpt[] = {
+	SND_SOC_DAPM_MIC("MicIn", NULL),
+	SND_SOC_DAPM_LINE("LineIn", NULL),
+};
+
+/* SMDK-PAIFTX connections */
+static const struct snd_soc_dapm_route audio_map_tx[] = {
+	/* MicIn feeds AINL */
+	{"AINL", NULL, "MicIn"},
+
+	/* LineIn feeds AINL/R */
+	{"AINL", NULL, "LineIn"},
+	{"AINR", NULL, "LineIn"},
+};
+
+/* SMDK-PAIFRX connections */
+static const struct snd_soc_dapm_route audio_map_rx[] = {
+	/* Front Left/Right are fed VOUT1L/R */
+	{"Front", NULL, "VOUT1L"},
+	{"Front", NULL, "VOUT1R"},
+
+	/* Center/Sub are fed VOUT2L/R */
+	{"Center+Sub", NULL, "VOUT2L"},
+	{"Center+Sub", NULL, "VOUT2R"},
+
+	/* Rear Left/Right are fed VOUT3L/R */
+	{"Rear", NULL, "VOUT3L"},
+	{"Rear", NULL, "VOUT3R"},
+};
+
+static int smdk_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	/* Add smdk specific Capture widgets */
+	snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets_cpt,
+				  ARRAY_SIZE(wm8580_dapm_widgets_cpt));
+
+	/* Set up PAIFTX audio path */
+	snd_soc_dapm_add_routes(dapm, audio_map_tx, ARRAY_SIZE(audio_map_tx));
+
+	/* Enabling the microphone requires the fitting of a 0R
+	 * resistor to connect the line from the microphone jack.
+	 */
+	snd_soc_dapm_disable_pin(dapm, "MicIn");
+
+	/* signal a DAPM event */
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static int smdk_wm8580_init_paifrx(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	/* Add smdk specific Playback widgets */
+	snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets_pbk,
+				  ARRAY_SIZE(wm8580_dapm_widgets_pbk));
+
+	/* Set up PAIFRX audio path */
+	snd_soc_dapm_add_routes(dapm, audio_map_rx, ARRAY_SIZE(audio_map_rx));
+
+	/* signal a DAPM event */
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+enum {
+	PRI_PLAYBACK = 0,
+	PRI_CAPTURE,
+	SEC_PLAYBACK,
+};
+
+static struct snd_soc_dai_link smdk_dai[] = {
+	[PRI_PLAYBACK] = { /* Primary Playback i/f */
+		.name = "WM8580 PAIF RX",
+		.stream_name = "Playback",
+		.cpu_dai_name = "samsung-i2s.0",
+		.codec_dai_name = "wm8580-hifi-playback",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8580-codec.0-001b",
+		.init = smdk_wm8580_init_paifrx,
+		.ops = &smdk_ops,
+	},
+	[PRI_CAPTURE] = { /* Primary Capture i/f */
+		.name = "WM8580 PAIF TX",
+		.stream_name = "Capture",
+		.cpu_dai_name = "samsung-i2s.0",
+		.codec_dai_name = "wm8580-hifi-capture",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8580-codec.0-001b",
+		.init = smdk_wm8580_init_paiftx,
+		.ops = &smdk_ops,
+	},
+	[SEC_PLAYBACK] = { /* Sec_Fifo Playback i/f */
+		.name = "Sec_FIFO TX",
+		.stream_name = "Playback",
+		.cpu_dai_name = "samsung-i2s.x",
+		.codec_dai_name = "wm8580-hifi-playback",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8580-codec.0-001b",
+		.init = smdk_wm8580_init_paifrx,
+		.ops = &smdk_ops,
+	},
+};
+
+static struct snd_soc_card smdk = {
+	.name = "SMDK-I2S",
+	.dai_link = smdk_dai,
+	.num_links = 2,
+};
+
+static struct platform_device *smdk_snd_device;
+
+static int __init smdk_audio_init(void)
+{
+	int ret;
+	char *str;
+
+	if (machine_is_smdkc100() || machine_is_smdk6442()
+			|| machine_is_smdkv210() || machine_is_smdkc110()) {
+		smdk.num_links = 3;
+		/* Secondary is at offset SAMSUNG_I2S_SECOFF from Primary */
+		str = (char *)smdk_dai[SEC_PLAYBACK].cpu_dai_name;
+		str[strlen(str) - 1] = '0' + SAMSUNG_I2S_SECOFF;
+	} else if (machine_is_smdk6410()) {
+		str = (char *)smdk_dai[PRI_PLAYBACK].cpu_dai_name;
+		str[strlen(str) - 1] = '2';
+		str = (char *)smdk_dai[PRI_CAPTURE].cpu_dai_name;
+		str[strlen(str) - 1] = '2';
+	}
+
+	smdk_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!smdk_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(smdk_snd_device, &smdk);
+	ret = platform_device_add(smdk_snd_device);
+
+	if (ret)
+		platform_device_put(smdk_snd_device);
+
+	return ret;
+}
+module_init(smdk_audio_init);
+
+static void __exit smdk_audio_exit(void)
+{
+	platform_device_unregister(smdk_snd_device);
+}
+module_exit(smdk_audio_exit);
+
+MODULE_AUTHOR("Jaswinder Singh, jassi.brar@samsung.com");
+MODULE_DESCRIPTION("ALSA SoC SMDK WM8580");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
new file mode 100644
index 0000000..e7c1009
--- /dev/null
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -0,0 +1,176 @@
+/*
+ *  smdk_wm8994.c
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include "../codecs/wm8994.h"
+
+ /*
+  * Default CFG switch settings to use this driver:
+  *	SMDKV310: CFG5-1000, CFG7-111111
+  */
+
+ /*
+  * Configure audio route as :-
+  * $ amixer sset 'DAC1' on,on
+  * $ amixer sset 'Right Headphone Mux' 'DAC'
+  * $ amixer sset 'Left Headphone Mux' 'DAC'
+  * $ amixer sset 'DAC1R Mixer AIF1.1' on
+  * $ amixer sset 'DAC1L Mixer AIF1.1' on
+  * $ amixer sset 'IN2L' on
+  * $ amixer sset 'IN2L PGA IN2LN' on
+  * $ amixer sset 'MIXINL IN2L' on
+  * $ amixer sset 'AIF1ADC1L Mixer ADC/DMIC' on
+  * $ amixer sset 'IN2R' on
+  * $ amixer sset 'IN2R PGA IN2RN' on
+  * $ amixer sset 'MIXINR IN2R' on
+  * $ amixer sset 'AIF1ADC1R Mixer ADC/DMIC' on
+  */
+
+/* SMDK has a 16.934MHZ crystal attached to WM8994 */
+#define SMDK_WM8994_FREQ 16934000
+
+static int smdk_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	unsigned int pll_out;
+	int ret;
+
+	/* AIF1CLK should be >=3MHz for optimal performance */
+	if (params_rate(params) == 8000 || params_rate(params) == 11025)
+		pll_out = params_rate(params) * 512;
+	else
+		pll_out = params_rate(params) * 256;
+
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
+					SMDK_WM8994_FREQ, pll_out);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
+					pll_out, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * SMDK WM8994 DAI operations.
+ */
+static struct snd_soc_ops smdk_ops = {
+	.hw_params = smdk_hw_params,
+};
+
+static int smdk_wm8994_init_paiftx(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	/* HeadPhone */
+	snd_soc_dapm_enable_pin(dapm, "HPOUT1R");
+	snd_soc_dapm_enable_pin(dapm, "HPOUT1L");
+
+	/* MicIn */
+	snd_soc_dapm_enable_pin(dapm, "IN1LN");
+	snd_soc_dapm_enable_pin(dapm, "IN1RN");
+
+	/* LineIn */
+	snd_soc_dapm_enable_pin(dapm, "IN2LN");
+	snd_soc_dapm_enable_pin(dapm, "IN2RN");
+
+	/* Other pins NC */
+	snd_soc_dapm_nc_pin(dapm, "HPOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "HPOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTLN");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTLP");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTRP");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTRN");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "IN1LP");
+	snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN");
+	snd_soc_dapm_nc_pin(dapm, "IN1RP");
+	snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP");
+
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static struct snd_soc_dai_link smdk_dai[] = {
+	{ /* Primary DAI i/f */
+		.name = "WM8994 AIF1",
+		.stream_name = "Pri_Dai",
+		.cpu_dai_name = "samsung-i2s.0",
+		.codec_dai_name = "wm8994-aif1",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8994-codec",
+		.init = smdk_wm8994_init_paiftx,
+		.ops = &smdk_ops,
+	}, { /* Sec_Fifo Playback i/f */
+		.name = "Sec_FIFO TX",
+		.stream_name = "Sec_Dai",
+		.cpu_dai_name = "samsung-i2s.4",
+		.codec_dai_name = "wm8994-aif1",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8994-codec",
+		.ops = &smdk_ops,
+	},
+};
+
+static struct snd_soc_card smdk = {
+	.name = "SMDK-I2S",
+	.dai_link = smdk_dai,
+	.num_links = ARRAY_SIZE(smdk_dai),
+};
+
+static struct platform_device *smdk_snd_device;
+
+static int __init smdk_audio_init(void)
+{
+	int ret;
+
+	smdk_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!smdk_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(smdk_snd_device, &smdk);
+
+	ret = platform_device_add(smdk_snd_device);
+	if (ret)
+		platform_device_put(smdk_snd_device);
+
+	return ret;
+}
+module_init(smdk_audio_init);
+
+static void __exit smdk_audio_exit(void)
+{
+	platform_device_unregister(smdk_snd_device);
+}
+module_exit(smdk_audio_exit);
+
+MODULE_DESCRIPTION("ALSA SoC SMDK WM8994");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/smdk_wm9713.c b/sound/soc/samsung/smdk_wm9713.c
similarity index 87%
rename from sound/soc/s3c24xx/smdk_wm9713.c
rename to sound/soc/samsung/smdk_wm9713.c
index 33ba8fd..ae5fed6 100644
--- a/sound/soc/s3c24xx/smdk_wm9713.c
+++ b/sound/soc/samsung/smdk_wm9713.c
@@ -15,8 +15,8 @@
 #include <linux/device.h>
 #include <sound/soc.h>
 
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
+#include "dma.h"
+#include "ac97.h"
 
 static struct snd_soc_card smdk;
 
@@ -27,6 +27,7 @@
  *   SMDKC100: Set CFG6 1-3 On, CFG7 1   On
  *   SMDKC110: Set CFGB10 1-2 Off, CFGB12 1-3 On
  *   SMDKV210: Set CFGB10 1-2 Off, CFGB12 1-3 On
+ *   SMDKV310: Set CFG2 1-2 Off, CFG4 All On, CFG7 All Off, CFG8 1-On
  */
 
 /*
@@ -45,8 +46,8 @@
 static struct snd_soc_dai_link smdk_dai = {
 	.name = "AC97",
 	.stream_name = "AC97 PCM",
-	.platform_name = "s3c24xx-pcm-audio",
-	.cpu_dai_name = "s3c-ac97",
+	.platform_name = "samsung-audio",
+	.cpu_dai_name = "samsung-ac97",
 	.codec_dai_name = "wm9713-hifi",
 	.codec_name = "wm9713-codec",
 };
@@ -70,24 +71,27 @@
 
 	ret = platform_device_add(smdk_snd_wm9713_device);
 	if (ret)
-		goto err;
+		goto err1;
 
 	smdk_snd_ac97_device = platform_device_alloc("soc-audio", -1);
 	if (!smdk_snd_ac97_device) {
 		ret = -ENOMEM;
-		goto err;
+		goto err2;
 	}
 
 	platform_set_drvdata(smdk_snd_ac97_device, &smdk);
 
 	ret = platform_device_add(smdk_snd_ac97_device);
-	if (ret) {
-		platform_device_put(smdk_snd_ac97_device);
-		goto err;
-	}
+	if (ret)
+		goto err3;
 
 	return 0;
-err:
+
+err3:
+	platform_device_put(smdk_snd_ac97_device);
+err2:
+	platform_device_del(smdk_snd_wm9713_device);
+err1:
 	platform_device_put(smdk_snd_wm9713_device);
 	return ret;
 }
diff --git a/sound/soc/s3c24xx/spdif.c b/sound/soc/samsung/spdif.c
similarity index 99%
rename from sound/soc/s3c24xx/spdif.c
rename to sound/soc/samsung/spdif.c
index ce554e9..f0816404 100644
--- a/sound/soc/s3c24xx/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/spdif.c
+/* sound/soc/samsung/spdif.c
  *
  * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver
  *
@@ -20,7 +20,7 @@
 #include <plat/audio.h>
 #include <mach/dma.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "spdif.h"
 
 /* Registers */
diff --git a/sound/soc/s3c24xx/spdif.h b/sound/soc/samsung/spdif.h
similarity index 94%
rename from sound/soc/s3c24xx/spdif.h
rename to sound/soc/samsung/spdif.h
index 3ed5559..4f72cb4 100644
--- a/sound/soc/s3c24xx/spdif.h
+++ b/sound/soc/samsung/spdif.h
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/spdif.h
+/* sound/soc/samsung/spdif.h
  *
  * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver
  *
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 7f0a496..d8e06a6 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -48,7 +48,7 @@
 
 config SND_FSI_AK4642
 	tristate "FSI-AK4642 sound support"
-	depends on SND_SOC_SH4_FSI && I2C_SH_MOBILE
+	depends on SND_SOC_SH4_FSI && I2C
 	select SND_SOC_AK4642
 	help
 	  This option enables generic sound support for the
@@ -56,7 +56,7 @@
 
 config SND_FSI_DA7210
 	tristate "FSI-DA7210 sound support"
-	depends on SND_SOC_SH4_FSI && I2C_SH_MOBILE
+	depends on SND_SOC_SH4_FSI && I2C
 	select SND_SOC_DA7210
 	help
 	  This option enables generic sound support for the
diff --git a/sound/soc/sh/fsi-ak4642.c b/sound/soc/sh/fsi-ak4642.c
index d96602d..a14820a 100644
--- a/sound/soc/sh/fsi-ak4642.c
+++ b/sound/soc/sh/fsi-ak4642.c
@@ -12,6 +12,14 @@
 #include <linux/platform_device.h>
 #include <sound/sh_fsi.h>
 
+struct fsi_ak4642_data {
+	const char *name;
+	const char *card;
+	const char *cpu_dai;
+	const char *codec;
+	const char *platform;
+};
+
 static int fsi_ak4642_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_dai *dai = rtd->codec_dai;
@@ -27,37 +35,42 @@
 }
 
 static struct snd_soc_dai_link fsi_dai_link = {
-	.name		= "AK4642",
-	.stream_name	= "AK4642",
-	.cpu_dai_name	= "fsia-dai", /* fsi A */
 	.codec_dai_name	= "ak4642-hifi",
-#ifdef CONFIG_MACH_AP4EVB
-	.platform_name	= "sh_fsi2",
-	.codec_name	= "ak4642-codec.0-0013",
-#else
-	.platform_name	= "sh_fsi.0",
-	.codec_name	= "ak4642-codec.0-0012",
-#endif
 	.init		= fsi_ak4642_dai_init,
-	.ops		= NULL,
 };
 
 static struct snd_soc_card fsi_soc_card  = {
-	.name		= "FSI (AK4642)",
 	.dai_link	= &fsi_dai_link,
 	.num_links	= 1,
 };
 
 static struct platform_device *fsi_snd_device;
 
-static int __init fsi_ak4642_init(void)
+static int fsi_ak4642_probe(struct platform_device *pdev)
 {
 	int ret = -ENOMEM;
+	const struct platform_device_id	*id_entry;
+	struct fsi_ak4642_data *pdata;
+
+	id_entry = pdev->id_entry;
+	if (!id_entry) {
+		dev_err(&pdev->dev, "unknown fsi ak4642\n");
+		return -ENODEV;
+	}
+
+	pdata = (struct fsi_ak4642_data *)id_entry->driver_data;
 
 	fsi_snd_device = platform_device_alloc("soc-audio", FSI_PORT_A);
 	if (!fsi_snd_device)
 		goto out;
 
+	fsi_dai_link.name		= pdata->name;
+	fsi_dai_link.stream_name	= pdata->name;
+	fsi_dai_link.cpu_dai_name	= pdata->cpu_dai;
+	fsi_dai_link.platform_name	= pdata->platform;
+	fsi_dai_link.codec_name		= pdata->codec;
+	fsi_soc_card.name		= pdata->card;
+
 	platform_set_drvdata(fsi_snd_device, &fsi_soc_card);
 	ret = platform_device_add(fsi_snd_device);
 
@@ -68,9 +81,108 @@
 	return ret;
 }
 
-static void __exit fsi_ak4642_exit(void)
+static int fsi_ak4642_remove(struct platform_device *pdev)
 {
 	platform_device_unregister(fsi_snd_device);
+	return 0;
+}
+
+static struct fsi_ak4642_data fsi_a_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSIA (AK4642)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi_b_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSIB (AK4642)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi_a_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSIA (AK4643)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi_b_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSIB (AK4643)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi2_a_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSI2A (AK4642)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi2",
+};
+
+static struct fsi_ak4642_data fsi2_b_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSI2B (AK4642)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi2",
+};
+
+static struct fsi_ak4642_data fsi2_a_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSI2A (AK4643)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi2",
+};
+
+static struct fsi_ak4642_data fsi2_b_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSI2B (AK4643)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi2",
+};
+
+static struct platform_device_id fsi_id_table[] = {
+	/* FSI */
+	{ "sh_fsi_a_ak4642",	(kernel_ulong_t)&fsi_a_ak4642 },
+	{ "sh_fsi_b_ak4642",	(kernel_ulong_t)&fsi_b_ak4642 },
+	{ "sh_fsi_a_ak4643",	(kernel_ulong_t)&fsi_a_ak4643 },
+	{ "sh_fsi_b_ak4643",	(kernel_ulong_t)&fsi_b_ak4643 },
+
+	/* FSI 2 */
+	{ "sh_fsi2_a_ak4642",	(kernel_ulong_t)&fsi2_a_ak4642 },
+	{ "sh_fsi2_b_ak4642",	(kernel_ulong_t)&fsi2_b_ak4642 },
+	{ "sh_fsi2_a_ak4643",	(kernel_ulong_t)&fsi2_a_ak4643 },
+	{ "sh_fsi2_b_ak4643",	(kernel_ulong_t)&fsi2_b_ak4643 },
+	{},
+};
+
+static struct platform_driver fsi_ak4642 = {
+	.driver = {
+		.name	= "fsi-ak4642-audio",
+	},
+	.probe		= fsi_ak4642_probe,
+	.remove		= fsi_ak4642_remove,
+	.id_table	= fsi_id_table,
+};
+
+static int __init fsi_ak4642_init(void)
+{
+	return platform_driver_register(&fsi_ak4642);
+}
+
+static void __exit fsi_ak4642_exit(void)
+{
+	platform_driver_unregister(&fsi_ak4642);
 }
 
 module_init(fsi_ak4642_init);
diff --git a/sound/soc/sh/fsi-da7210.c b/sound/soc/sh/fsi-da7210.c
index a6adb6e..e8df9da 100644
--- a/sound/soc/sh/fsi-da7210.c
+++ b/sound/soc/sh/fsi-da7210.c
@@ -18,7 +18,7 @@
 	struct snd_soc_dai *dai = rtd->codec_dai;
 
 	return snd_soc_dai_set_fmt(dai,
-				   SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+				   SND_SOC_DAIFMT_I2S |
 				   SND_SOC_DAIFMT_CBM_CFM);
 }
 
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 4c2404b..2b06402 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -19,20 +19,26 @@
 #include <sound/soc.h>
 #include <sound/sh_fsi.h>
 
-#define DO_FMT		0x0000
-#define DOFF_CTL	0x0004
-#define DOFF_ST		0x0008
-#define DI_FMT		0x000C
-#define DIFF_CTL	0x0010
-#define DIFF_ST		0x0014
-#define CKG1		0x0018
-#define CKG2		0x001C
-#define DIDT		0x0020
-#define DODT		0x0024
-#define MUTE_ST		0x0028
-#define OUT_SEL		0x0030
-#define REG_END		OUT_SEL
+/* PortA/PortB register */
+#define REG_DO_FMT	0x0000
+#define REG_DOFF_CTL	0x0004
+#define REG_DOFF_ST	0x0008
+#define REG_DI_FMT	0x000C
+#define REG_DIFF_CTL	0x0010
+#define REG_DIFF_ST	0x0014
+#define REG_CKG1	0x0018
+#define REG_CKG2	0x001C
+#define REG_DIDT	0x0020
+#define REG_DODT	0x0024
+#define REG_MUTE_ST	0x0028
+#define REG_OUT_SEL	0x0030
 
+/* master register */
+#define MST_CLK_RST	0x0210
+#define MST_SOFT_RST	0x0214
+#define MST_FIFO_SZ	0x0218
+
+/* core register (depend on FSI version) */
 #define A_MST_CTLR	0x0180
 #define B_MST_CTLR	0x01A0
 #define CPU_INT_ST	0x01F4
@@ -41,22 +47,23 @@
 #define INT_ST		0x0200
 #define IEMSK		0x0204
 #define IMSK		0x0208
-#define MUTE		0x020C
-#define CLK_RST		0x0210
-#define SOFT_RST	0x0214
-#define FIFO_SZ		0x0218
-#define MREG_START	A_MST_CTLR
-#define MREG_END	FIFO_SZ
 
 /* DO_FMT */
 /* DI_FMT */
+#define CR_BWS_24	(0x0 << 20) /* FSI2 */
+#define CR_BWS_16	(0x1 << 20) /* FSI2 */
+#define CR_BWS_20	(0x2 << 20) /* FSI2 */
+
+#define CR_DTMD_PCM		(0x0 << 8) /* FSI2 */
+#define CR_DTMD_SPDIF_PCM	(0x1 << 8) /* FSI2 */
+#define CR_DTMD_SPDIF_STREAM	(0x2 << 8) /* FSI2 */
+
 #define CR_MONO		(0x0 << 4)
 #define CR_MONO_D	(0x1 << 4)
 #define CR_PCM		(0x2 << 4)
 #define CR_I2S		(0x3 << 4)
 #define CR_TDM		(0x4 << 4)
 #define CR_TDM_D	(0x5 << 4)
-#define CR_SPDIF	0x00100120
 
 /* DOFF_CTL */
 /* DIFF_CTL */
@@ -93,6 +100,10 @@
 #define IR		(1 <<  4) /* Interrupt Reset */
 #define FSISR		(1 <<  0) /* Software Reset */
 
+/* OUT_SEL (FSI2) */
+#define DMMD		(1 << 4) /* SPDIF output timing 0: Biphase only */
+				 /*			1: Biphase and serial */
+
 /* FIFO_SZ */
 #define FIFO_SZ_MASK	0x7
 
@@ -123,6 +134,9 @@
 	int buff_len;
 	int period_len;
 	int period_num;
+
+	int uerr_num;
+	int oerr_num;
 };
 
 struct fsi_priv {
@@ -133,8 +147,6 @@
 	struct fsi_stream capture;
 
 	long rate;
-
-	u32 mst_ctrl;
 };
 
 struct fsi_core {
@@ -143,6 +155,8 @@
 	u32 int_st;
 	u32 iemsk;
 	u32 imsk;
+	u32 a_mclk;
+	u32 b_mclk;
 };
 
 struct fsi_master {
@@ -182,62 +196,22 @@
 	__fsi_reg_write(reg, val);
 }
 
-static void fsi_reg_write(struct fsi_priv *fsi, u32 reg, u32 data)
-{
-	if (reg > REG_END) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
+#define fsi_reg_write(p, r, d)\
+	__fsi_reg_write((u32)(p->base + REG_##r), d)
 
-	__fsi_reg_write((u32)(fsi->base + reg), data);
-}
+#define fsi_reg_read(p, r)\
+	__fsi_reg_read((u32)(p->base + REG_##r))
 
-static u32 fsi_reg_read(struct fsi_priv *fsi, u32 reg)
-{
-	if (reg > REG_END) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return 0;
-	}
+#define fsi_reg_mask_set(p, r, m, d)\
+	__fsi_reg_mask_set((u32)(p->base + REG_##r), m, d)
 
-	return __fsi_reg_read((u32)(fsi->base + reg));
-}
-
-static void fsi_reg_mask_set(struct fsi_priv *fsi, u32 reg, u32 mask, u32 data)
-{
-	if (reg > REG_END) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
-
-	__fsi_reg_mask_set((u32)(fsi->base + reg), mask, data);
-}
-
-static void fsi_master_write(struct fsi_master *master, u32 reg, u32 data)
-{
-	unsigned long flags;
-
-	if ((reg < MREG_START) ||
-	    (reg > MREG_END)) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
-
-	spin_lock_irqsave(&master->lock, flags);
-	__fsi_reg_write((u32)(master->base + reg), data);
-	spin_unlock_irqrestore(&master->lock, flags);
-}
-
-static u32 fsi_master_read(struct fsi_master *master, u32 reg)
+#define fsi_master_read(p, r) _fsi_master_read(p, MST_##r)
+#define fsi_core_read(p, r)   _fsi_master_read(p, p->core->r)
+static u32 _fsi_master_read(struct fsi_master *master, u32 reg)
 {
 	u32 ret;
 	unsigned long flags;
 
-	if ((reg < MREG_START) ||
-	    (reg > MREG_END)) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return 0;
-	}
-
 	spin_lock_irqsave(&master->lock, flags);
 	ret = __fsi_reg_read((u32)(master->base + reg));
 	spin_unlock_irqrestore(&master->lock, flags);
@@ -245,17 +219,13 @@
 	return ret;
 }
 
-static void fsi_master_mask_set(struct fsi_master *master,
+#define fsi_master_mask_set(p, r, m, d) _fsi_master_mask_set(p, MST_##r, m, d)
+#define fsi_core_mask_set(p, r, m, d)  _fsi_master_mask_set(p, p->core->r, m, d)
+static void _fsi_master_mask_set(struct fsi_master *master,
 			       u32 reg, u32 mask, u32 data)
 {
 	unsigned long flags;
 
-	if ((reg < MREG_START) ||
-	    (reg > MREG_END)) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
-
 	spin_lock_irqsave(&master->lock, flags);
 	__fsi_reg_mask_set((u32)(master->base + reg), mask, data);
 	spin_unlock_irqrestore(&master->lock, flags);
@@ -359,27 +329,41 @@
 	io->buff_offset	= 0;
 	io->period_len	= period_len;
 	io->period_num	= 0;
+	io->oerr_num	= -1; /* ignore 1st err */
+	io->uerr_num	= -1; /* ignore 1st err */
 }
 
 static void fsi_stream_pop(struct fsi_priv *fsi, int is_play)
 {
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
+	struct snd_soc_dai *dai = fsi_get_dai(io->substream);
+
+
+	if (io->oerr_num > 0)
+		dev_err(dai->dev, "over_run = %d\n", io->oerr_num);
+
+	if (io->uerr_num > 0)
+		dev_err(dai->dev, "under_run = %d\n", io->uerr_num);
 
 	io->substream	= NULL;
 	io->buff_len	= 0;
 	io->buff_offset	= 0;
 	io->period_len	= 0;
 	io->period_num	= 0;
+	io->oerr_num	= 0;
+	io->uerr_num	= 0;
 }
 
 static int fsi_get_fifo_data_num(struct fsi_priv *fsi, int is_play)
 {
 	u32 status;
-	u32 reg = is_play ? DOFF_ST : DIFF_ST;
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
 	int data_num;
 
-	status = fsi_reg_read(fsi, reg);
+	status = is_play ?
+		fsi_reg_read(fsi, DOFF_ST) :
+		fsi_reg_read(fsi, DIFF_ST);
+
 	data_num = 0x1ff & (status >> 8);
 	data_num *= io->chan_num;
 
@@ -406,6 +390,27 @@
 	return frames_to_bytes(runtime, 1) / io->chan_num;
 }
 
+static void fsi_count_fifo_err(struct fsi_priv *fsi)
+{
+	u32 ostatus = fsi_reg_read(fsi, DOFF_ST);
+	u32 istatus = fsi_reg_read(fsi, DIFF_ST);
+
+	if (ostatus & ERR_OVER)
+		fsi->playback.oerr_num++;
+
+	if (ostatus & ERR_UNDER)
+		fsi->playback.uerr_num++;
+
+	if (istatus & ERR_OVER)
+		fsi->capture.oerr_num++;
+
+	if (istatus & ERR_UNDER)
+		fsi->capture.uerr_num++;
+
+	fsi_reg_write(fsi, DOFF_ST, 0);
+	fsi_reg_write(fsi, DIFF_ST, 0);
+}
+
 /*
  *		dma function
  */
@@ -473,8 +478,8 @@
 	u32 data = AB_IO(1, fsi_get_port_shift(fsi, is_play));
 	struct fsi_master *master = fsi_get_master(fsi);
 
-	fsi_master_mask_set(master, master->core->imsk,  data, data);
-	fsi_master_mask_set(master, master->core->iemsk, data, data);
+	fsi_core_mask_set(master, imsk,  data, data);
+	fsi_core_mask_set(master, iemsk, data, data);
 }
 
 static void fsi_irq_disable(struct fsi_priv *fsi, int is_play)
@@ -482,18 +487,13 @@
 	u32 data = AB_IO(1, fsi_get_port_shift(fsi, is_play));
 	struct fsi_master *master = fsi_get_master(fsi);
 
-	fsi_master_mask_set(master, master->core->imsk,  data, 0);
-	fsi_master_mask_set(master, master->core->iemsk, data, 0);
+	fsi_core_mask_set(master, imsk,  data, 0);
+	fsi_core_mask_set(master, iemsk, data, 0);
 }
 
 static u32 fsi_irq_get_status(struct fsi_master *master)
 {
-	return fsi_master_read(master, master->core->int_st);
-}
-
-static void fsi_irq_clear_all_status(struct fsi_master *master)
-{
-	fsi_master_write(master, master->core->int_st, 0);
+	return fsi_core_read(master, int_st);
 }
 
 static void fsi_irq_clear_status(struct fsi_priv *fsi)
@@ -505,7 +505,7 @@
 	data |= AB_IO(1, fsi_get_port_shift(fsi, 1));
 
 	/* clear interrupt factor */
-	fsi_master_mask_set(master, master->core->int_st, data, 0);
+	fsi_core_mask_set(master, int_st, data, 0);
 }
 
 /*
@@ -516,17 +516,19 @@
 static void fsi_spdif_clk_ctrl(struct fsi_priv *fsi, int enable)
 {
 	struct fsi_master *master = fsi_get_master(fsi);
-	u32 val = BP | SE;
+	u32 mask, val;
 
 	if (master->core->ver < 2) {
 		pr_err("fsi: register access err (%s)\n", __func__);
 		return;
 	}
 
-	if (enable)
-		fsi_master_mask_set(master, fsi->mst_ctrl, val, val);
-	else
-		fsi_master_mask_set(master, fsi->mst_ctrl, val, 0);
+	mask = BP | SE;
+	val = enable ? mask : 0;
+
+	fsi_is_port_a(fsi) ?
+		fsi_core_mask_set(master, a_mclk, mask, val) :
+		fsi_core_mask_set(master, b_mclk, mask, val);
 }
 
 /*
@@ -550,7 +552,7 @@
 {
 	struct fsi_master *master = fsi_get_master(fsi);
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
-	u32 ctrl, shift, i;
+	u32 shift, i;
 
 	/* get on-chip RAM capacity */
 	shift = fsi_master_read(master, FIFO_SZ);
@@ -583,13 +585,17 @@
 	dev_dbg(dai->dev, "%d channel %d store\n",
 		io->chan_num, io->fifo_max_num);
 
-	ctrl = is_play ? DOFF_CTL : DIFF_CTL;
-
-	/* set interrupt generation factor */
-	fsi_reg_write(fsi, ctrl, IRQ_HALF);
-
-	/* clear FIFO */
-	fsi_reg_mask_set(fsi, ctrl, FIFO_CLR, FIFO_CLR);
+	/*
+	 * set interrupt generation factor
+	 * clear FIFO
+	 */
+	if (is_play) {
+		fsi_reg_write(fsi,	DOFF_CTL, IRQ_HALF);
+		fsi_reg_mask_set(fsi,	DOFF_CTL, FIFO_CLR, FIFO_CLR);
+	} else {
+		fsi_reg_write(fsi,	DIFF_CTL, IRQ_HALF);
+		fsi_reg_mask_set(fsi,	DIFF_CTL, FIFO_CLR, FIFO_CLR);
+	}
 }
 
 static void fsi_soft_all_reset(struct fsi_master *master)
@@ -604,13 +610,12 @@
 	mdelay(10);
 }
 
-static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int startup, int stream)
+static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int stream)
 {
 	struct snd_pcm_runtime *runtime;
 	struct snd_pcm_substream *substream = NULL;
 	int is_play = fsi_stream_is_play(stream);
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
-	u32 status_reg = is_play ? DOFF_ST : DIFF_ST;
 	int data_residue_num;
 	int data_num;
 	int data_num_max;
@@ -698,35 +703,20 @@
 	/* update buff_offset */
 	io->buff_offset += fsi_num2offset(data_num, ch_width);
 
-	/* check fifo status */
-	if (!startup) {
-		struct snd_soc_dai *dai = fsi_get_dai(substream);
-		u32 status = fsi_reg_read(fsi, status_reg);
-
-		if (status & ERR_OVER)
-			dev_err(dai->dev, "over run\n");
-		if (status & ERR_UNDER)
-			dev_err(dai->dev, "under run\n");
-	}
-	fsi_reg_write(fsi, status_reg, 0);
-
-	/* re-enable irq */
-	fsi_irq_enable(fsi, is_play);
-
 	if (over_period)
 		snd_pcm_period_elapsed(substream);
 
 	return 0;
 }
 
-static int fsi_data_pop(struct fsi_priv *fsi, int startup)
+static int fsi_data_pop(struct fsi_priv *fsi)
 {
-	return fsi_fifo_data_ctrl(fsi, startup, SNDRV_PCM_STREAM_CAPTURE);
+	return fsi_fifo_data_ctrl(fsi, SNDRV_PCM_STREAM_CAPTURE);
 }
 
-static int fsi_data_push(struct fsi_priv *fsi, int startup)
+static int fsi_data_push(struct fsi_priv *fsi)
 {
-	return fsi_fifo_data_ctrl(fsi, startup, SNDRV_PCM_STREAM_PLAYBACK);
+	return fsi_fifo_data_ctrl(fsi, SNDRV_PCM_STREAM_PLAYBACK);
 }
 
 static irqreturn_t fsi_interrupt(int irq, void *data)
@@ -739,15 +729,19 @@
 	fsi_master_mask_set(master, SOFT_RST, IR, IR);
 
 	if (int_st & AB_IO(1, AO_SHIFT))
-		fsi_data_push(&master->fsia, 0);
+		fsi_data_push(&master->fsia);
 	if (int_st & AB_IO(1, BO_SHIFT))
-		fsi_data_push(&master->fsib, 0);
+		fsi_data_push(&master->fsib);
 	if (int_st & AB_IO(1, AI_SHIFT))
-		fsi_data_pop(&master->fsia, 0);
+		fsi_data_pop(&master->fsia);
 	if (int_st & AB_IO(1, BI_SHIFT))
-		fsi_data_pop(&master->fsib, 0);
+		fsi_data_pop(&master->fsib);
 
-	fsi_irq_clear_all_status(master);
+	fsi_count_fifo_err(&master->fsia);
+	fsi_count_fifo_err(&master->fsib);
+
+	fsi_irq_clear_status(&master->fsia);
+	fsi_irq_clear_status(&master->fsib);
 
 	return IRQ_HANDLED;
 }
@@ -764,7 +758,6 @@
 	struct fsi_stream *io;
 	u32 flags = fsi_get_info_flags(fsi);
 	u32 fmt;
-	u32 reg;
 	u32 data;
 	int is_play = fsi_is_play(substream);
 	int is_master;
@@ -796,7 +789,6 @@
 
 	/* do fmt, di fmt */
 	data = 0;
-	reg = is_play ? DO_FMT : DI_FMT;
 	fmt = is_play ? SH_FSI_GET_OFMT(flags) : SH_FSI_GET_IFMT(flags);
 	switch (fmt) {
 	case SH_FSI_FMT_MONO:
@@ -830,16 +822,18 @@
 			dev_err(dai->dev, "This FSI can not use SPDIF\n");
 			return -EINVAL;
 		}
-		data = CR_SPDIF;
+		data = CR_BWS_16 | CR_DTMD_SPDIF_PCM | CR_PCM;
 		io->chan_num = 2;
 		fsi_spdif_clk_ctrl(fsi, 1);
-		fsi_reg_mask_set(fsi, OUT_SEL, 0x0010, 0x0010);
+		fsi_reg_mask_set(fsi, OUT_SEL, DMMD, DMMD);
 		break;
 	default:
 		dev_err(dai->dev, "unknown format.\n");
 		return -EINVAL;
 	}
-	fsi_reg_write(fsi, reg, data);
+	is_play ?
+		fsi_reg_write(fsi, DO_FMT, data) :
+		fsi_reg_write(fsi, DI_FMT, data);
 
 	/* irq clear */
 	fsi_irq_disable(fsi, is_play);
@@ -883,7 +877,8 @@
 		fsi_stream_push(fsi, is_play, substream,
 				frames_to_bytes(runtime, runtime->buffer_size),
 				frames_to_bytes(runtime, runtime->period_size));
-		ret = is_play ? fsi_data_push(fsi, 1) : fsi_data_pop(fsi, 1);
+		ret = is_play ? fsi_data_push(fsi) : fsi_data_pop(fsi);
+		fsi_irq_enable(fsi, is_play);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		fsi_irq_disable(fsi, is_play);
@@ -1174,12 +1169,10 @@
 	/* FSI A setting */
 	master->fsia.base	= master->base;
 	master->fsia.master	= master;
-	master->fsia.mst_ctrl	= A_MST_CTLR;
 
 	/* FSI B setting */
 	master->fsib.base	= master->base + 0x40;
 	master->fsib.master	= master;
-	master->fsib.mst_ctrl	= B_MST_CTLR;
 
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_resume(&pdev->dev);
@@ -1266,6 +1259,8 @@
 	.int_st	= CPU_INT_ST,
 	.iemsk	= CPU_IEMSK,
 	.imsk	= CPU_IMSK,
+	.a_mclk	= A_MST_CTLR,
+	.b_mclk	= B_MST_CTLR,
 };
 
 static struct platform_device_id fsi_id_table[] = {
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index ac6c49c..6088a6a 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -8,11 +8,11 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/clkdev.h>
 #include <linux/device.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
 
-#include <asm/clkdev.h>
 #include <asm/clock.h>
 
 #include <cpu/sh7722.h>
@@ -20,7 +20,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "../codecs/wm8978.h"
 #include "siu.h"
@@ -140,11 +139,12 @@
 static int migor_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, migor_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, migor_dapm_widgets,
 				  ARRAY_SIZE(migor_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
diff --git a/sound/soc/sh/sh7760-ac97.c b/sound/soc/sh/sh7760-ac97.c
index f8e0ab8..917d3ce 100644
--- a/sound/soc/sh/sh7760-ac97.c
+++ b/sound/soc/sh/sh7760-ac97.c
@@ -12,7 +12,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/io.h>
 
 #define IPSEL 0xFE400034
@@ -23,7 +22,7 @@
 
 static int machine_init(struct snd_soc_pcm_runtime *rtd)
 {
-	snd_soc_dapm_sync(rtd->codec);
+	snd_soc_dapm_sync(&rtd->codec->dapm);
 	return 0;
 }
 
diff --git a/sound/soc/sh/siu.h b/sound/soc/sh/siu.h
index 9f4dcb9..83c3430 100644
--- a/sound/soc/sh/siu.h
+++ b/sound/soc/sh/siu.h
@@ -75,7 +75,7 @@
 
 #include <sound/core.h>
 #include <sound/pcm.h>
-#include <sound/soc-dai.h>
+#include <sound/soc.h>
 
 #define SIU_PERIOD_BYTES_MAX	8192		/* DMA transfer/period size */
 #define SIU_PERIOD_BYTES_MIN	256		/* DMA transfer/period size */
diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c
index af53b64..4973c29 100644
--- a/sound/soc/sh/siu_dai.c
+++ b/sound/soc/sh/siu_dai.c
@@ -28,7 +28,7 @@
 #include <asm/siu.h>
 
 #include <sound/control.h>
-#include <sound/soc-dai.h>
+#include <sound/soc.h>
 
 #include "siu.h"
 
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index ed29c9e..a423bab 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -29,7 +29,7 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dai.h>
+#include <sound/soc.h>
 
 #include <asm/siu.h>
 
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index d214f02..8c2a21a 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -14,27 +14,34 @@
 #include <linux/i2c.h>
 #include <linux/spi/spi.h>
 #include <sound/soc.h>
+#include <linux/lzo.h>
+#include <linux/bitmap.h>
+#include <linux/rbtree.h>
 
 static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 		snd_soc_codec_volatile_register(codec, reg)) {
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
 	u8 data[2];
 	int ret;
 
@@ -42,16 +49,17 @@
 	data[1] = value & 0x00ff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 2);
 	if (ret == 2)
 		return 0;
@@ -77,7 +85,7 @@
 	msg[1] = data[0];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -94,23 +102,27 @@
 static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 		snd_soc_codec_volatile_register(codec, reg)) {
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
 	u8 data[2];
 	int ret;
 
@@ -118,16 +130,17 @@
 	data[1] = value & 0x00ff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 2);
 	if (ret == 2)
 		return 0;
@@ -153,7 +166,7 @@
 	msg[1] = data[1];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -170,24 +183,25 @@
 static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u8 *cache = codec->reg_cache;
 	u8 data[2];
+	int ret;
 
 	reg &= 0xff;
 	data[0] = reg;
 	data[1] = value & 0xff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	if (codec->hw_write(codec->control_data, data, 2) == 2)
 		return 0;
 	else
@@ -197,7 +211,8 @@
 static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u8 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	reg &= 0xff;
 	if (reg >= codec->driver->reg_cache_size ||
@@ -205,10 +220,14 @@
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 #if defined(CONFIG_SPI_MASTER)
@@ -227,7 +246,7 @@
 	msg[1] = data[1];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -244,24 +263,25 @@
 static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
 			      unsigned int value)
 {
-	u16 *reg_cache = codec->reg_cache;
 	u8 data[3];
+	int ret;
 
 	data[0] = reg;
 	data[1] = (value >> 8) & 0xff;
 	data[2] = value & 0xff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-	    reg < codec->driver->reg_cache_size)
-		reg_cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	if (codec->hw_write(codec->control_data, data, 3) == 3)
 		return 0;
 	else
@@ -271,17 +291,22 @@
 static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
 				      unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 	    snd_soc_codec_volatile_register(codec, reg)) {
 		if (codec->cache_only)
 			return -1;
 
+		BUG_ON(!codec->hw_read);
 		return codec->hw_read(codec, reg);
-	} else {
-		return cache[reg];
 	}
+
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 #if defined(CONFIG_SPI_MASTER)
@@ -301,7 +326,7 @@
 	msg[2] = data[2];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -420,7 +445,8 @@
 static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u8 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	reg &= 0xff;
 	if (reg >= codec->driver->reg_cache_size ||
@@ -428,16 +454,19 @@
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u8 *cache = codec->reg_cache;
 	u8 data[3];
 	int ret;
 
@@ -447,16 +476,17 @@
 
 	reg &= 0xff;
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 3);
 	if (ret == 3)
 		return 0;
@@ -483,7 +513,7 @@
 	msg[2] = data[2];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -534,23 +564,28 @@
 static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec,
 				       unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 	    snd_soc_codec_volatile_register(codec, reg)) {
 		if (codec->cache_only)
 			return -1;
 
+		BUG_ON(!codec->hw_read);
 		return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+
+	return val;
 }
 
 static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg,
 			       unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
 	u8 data[4];
 	int ret;
 
@@ -560,16 +595,17 @@
 	data[3] = value & 0xff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 4);
 	if (ret == 4)
 		return 0;
@@ -597,7 +633,7 @@
 	msg[3] = data[3];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -692,8 +728,8 @@
 		return -EINVAL;
 	}
 
-	codec->driver->write = io_types[i].write;
-	codec->driver->read = io_types[i].read;
+	codec->write = io_types[i].write;
+	codec->read = io_types[i].read;
 
 	switch (control) {
 	case SND_SOC_CUSTOM:
@@ -724,3 +760,930 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
+
+struct snd_soc_rbtree_node {
+	struct rb_node node;
+	unsigned int reg;
+	unsigned int value;
+	unsigned int defval;
+} __attribute__ ((packed));
+
+struct snd_soc_rbtree_ctx {
+	struct rb_root root;
+};
+
+static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
+	struct rb_root *root, unsigned int reg)
+{
+	struct rb_node *node;
+	struct snd_soc_rbtree_node *rbnode;
+
+	node = root->rb_node;
+	while (node) {
+		rbnode = container_of(node, struct snd_soc_rbtree_node, node);
+		if (rbnode->reg < reg)
+			node = node->rb_left;
+		else if (rbnode->reg > reg)
+			node = node->rb_right;
+		else
+			return rbnode;
+	}
+
+	return NULL;
+}
+
+static int snd_soc_rbtree_insert(struct rb_root *root,
+				 struct snd_soc_rbtree_node *rbnode)
+{
+	struct rb_node **new, *parent;
+	struct snd_soc_rbtree_node *rbnode_tmp;
+
+	parent = NULL;
+	new = &root->rb_node;
+	while (*new) {
+		rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
+					  node);
+		parent = *new;
+		if (rbnode_tmp->reg < rbnode->reg)
+			new = &((*new)->rb_left);
+		else if (rbnode_tmp->reg > rbnode->reg)
+			new = &((*new)->rb_right);
+		else
+			return 0;
+	}
+
+	/* insert the node into the rbtree */
+	rb_link_node(&rbnode->node, parent, new);
+	rb_insert_color(&rbnode->node, root);
+
+	return 1;
+}
+
+static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct rb_node *node;
+	struct snd_soc_rbtree_node *rbnode;
+	unsigned int val;
+	int ret;
+
+	rbtree_ctx = codec->reg_cache;
+	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+		rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
+		if (rbnode->value == rbnode->defval)
+			continue;
+		ret = snd_soc_cache_read(codec, rbnode->reg, &val);
+		if (ret)
+			return ret;
+		ret = snd_soc_write(codec, rbnode->reg, val);
+		if (ret)
+			return ret;
+		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+			rbnode->reg, val);
+	}
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
+				      unsigned int reg, unsigned int value)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct snd_soc_rbtree_node *rbnode;
+
+	rbtree_ctx = codec->reg_cache;
+	rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
+	if (rbnode) {
+		if (rbnode->value == value)
+			return 0;
+		rbnode->value = value;
+	} else {
+		/* bail out early, no need to create the rbnode yet */
+		if (!value)
+			return 0;
+		/*
+		 * for uninitialized registers whose value is changed
+		 * from the default zero, create an rbnode and insert
+		 * it into the tree.
+		 */
+		rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
+		if (!rbnode)
+			return -ENOMEM;
+		rbnode->reg = reg;
+		rbnode->value = value;
+		snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
+	}
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
+				     unsigned int reg, unsigned int *value)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct snd_soc_rbtree_node *rbnode;
+
+	rbtree_ctx = codec->reg_cache;
+	rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
+	if (rbnode) {
+		*value = rbnode->value;
+	} else {
+		/* uninitialized registers default to 0 */
+		*value = 0;
+	}
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
+{
+	struct rb_node *next;
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct snd_soc_rbtree_node *rbtree_node;
+
+	/* if we've already been called then just return */
+	rbtree_ctx = codec->reg_cache;
+	if (!rbtree_ctx)
+		return 0;
+
+	/* free up the rbtree */
+	next = rb_first(&rbtree_ctx->root);
+	while (next) {
+		rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
+		next = rb_next(&rbtree_node->node);
+		rb_erase(&rbtree_node->node, &rbtree_ctx->root);
+		kfree(rbtree_node);
+	}
+
+	/* release the resources */
+	kfree(codec->reg_cache);
+	codec->reg_cache = NULL;
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+
+	codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+	if (!codec->reg_cache)
+		return -ENOMEM;
+
+	rbtree_ctx = codec->reg_cache;
+	rbtree_ctx->root = RB_ROOT;
+
+	if (!codec->reg_def_copy)
+		return 0;
+
+/*
+ * populate the rbtree with the initialized registers.  All other
+ * registers will be inserted into the tree when they are first written.
+ *
+ * The reasoning behind this, is that we need to step through and
+ * dereference the cache in u8/u16 increments without sacrificing
+ * portability.  This could also be done using memcpy() but that would
+ * be slightly more cryptic.
+ */
+#define snd_soc_rbtree_populate(cache)					\
+({									\
+	int ret, i;							\
+	struct snd_soc_rbtree_node *rbtree_node;			\
+									\
+	ret = 0;							\
+	cache = codec->reg_def_copy;					\
+	for (i = 0; i < codec->driver->reg_cache_size; ++i) {		\
+		if (!cache[i])						\
+			continue;					\
+		rbtree_node = kzalloc(sizeof *rbtree_node, GFP_KERNEL);	\
+		if (!rbtree_node) {					\
+			ret = -ENOMEM;					\
+			snd_soc_cache_exit(codec);			\
+			break;						\
+		}							\
+		rbtree_node->reg = i;					\
+		rbtree_node->value = cache[i];				\
+		rbtree_node->defval = cache[i];				\
+		snd_soc_rbtree_insert(&rbtree_ctx->root,		\
+				      rbtree_node);			\
+	}								\
+	ret;								\
+})
+
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		const u8 *cache;
+
+		return snd_soc_rbtree_populate(cache);
+	}
+	case 2: {
+		const u16 *cache;
+
+		return snd_soc_rbtree_populate(cache);
+	}
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_SND_SOC_CACHE_LZO
+struct snd_soc_lzo_ctx {
+	void *wmem;
+	void *dst;
+	const void *src;
+	size_t src_len;
+	size_t dst_len;
+	size_t decompressed_size;
+	unsigned long *sync_bmp;
+	int sync_bmp_nbits;
+};
+
+#define LZO_BLOCK_NUM 8
+static int snd_soc_lzo_block_count(void)
+{
+	return LZO_BLOCK_NUM;
+}
+
+static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+	if (!lzo_ctx->wmem)
+		return -ENOMEM;
+	return 0;
+}
+
+static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	size_t compress_size;
+	int ret;
+
+	ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
+			       lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
+	if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
+		return -EINVAL;
+	lzo_ctx->dst_len = compress_size;
+	return 0;
+}
+
+static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	size_t dst_len;
+	int ret;
+
+	dst_len = lzo_ctx->dst_len;
+	ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
+				    lzo_ctx->dst, &dst_len);
+	if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
+		return -EINVAL;
+	return 0;
+}
+
+static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec,
+		struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = snd_soc_lzo_compress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec,
+		struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo_ctx->decompressed_size;
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = snd_soc_lzo_decompress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec,
+		unsigned int reg)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+	return (reg * codec_drv->reg_word_size) /
+	       DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count());
+}
+
+static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec,
+		unsigned int reg)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+	return reg % (DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()) /
+		      codec_drv->reg_word_size);
+}
+
+static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+	return DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count());
+}
+
+static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
+{
+	struct snd_soc_lzo_ctx **lzo_blocks;
+	unsigned int val;
+	int i;
+	int ret;
+
+	lzo_blocks = codec->reg_cache;
+	for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
+		ret = snd_soc_cache_read(codec, i, &val);
+		if (ret)
+			return ret;
+		ret = snd_soc_write(codec, i, val);
+		if (ret)
+			return ret;
+		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+			i, val);
+	}
+
+	return 0;
+}
+
+static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec,
+				   unsigned int reg, unsigned int value)
+{
+	struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t blksize, tmp_dst_len;
+	void *tmp_dst;
+
+	/* index of the compressed lzo block */
+	blkindex = snd_soc_lzo_get_blkindex(codec, reg);
+	/* register index within the decompressed block */
+	blkpos = snd_soc_lzo_get_blkpos(codec, reg);
+	/* size of the compressed block */
+	blksize = snd_soc_lzo_get_blksize(codec);
+	lzo_blocks = codec->reg_cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		goto out;
+	}
+
+	/* write the new value to the cache */
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		u8 *cache;
+		cache = lzo_block->dst;
+		if (cache[blkpos] == value) {
+			kfree(lzo_block->dst);
+			goto out;
+		}
+		cache[blkpos] = value;
+	}
+	break;
+	case 2: {
+		u16 *cache;
+		cache = lzo_block->dst;
+		if (cache[blkpos] == value) {
+			kfree(lzo_block->dst);
+			goto out;
+		}
+		cache[blkpos] = value;
+	}
+	break;
+	default:
+		BUG();
+	}
+
+	/* prepare the source to be the decompressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* compress the block */
+	ret = snd_soc_lzo_compress_cache_block(codec, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		kfree(lzo_block->src);
+		goto out;
+	}
+
+	/* set the bit so we know we have to sync this register */
+	set_bit(reg, lzo_block->sync_bmp);
+	kfree(tmp_dst);
+	kfree(lzo_block->src);
+	return 0;
+out:
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+	return ret;
+}
+
+static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec,
+				  unsigned int reg, unsigned int *value)
+{
+	struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t blksize, tmp_dst_len;
+	void *tmp_dst;
+
+	*value = 0;
+	/* index of the compressed lzo block */
+	blkindex = snd_soc_lzo_get_blkindex(codec, reg);
+	/* register index within the decompressed block */
+	blkpos = snd_soc_lzo_get_blkpos(codec, reg);
+	/* size of the compressed block */
+	blksize = snd_soc_lzo_get_blksize(codec);
+	lzo_blocks = codec->reg_cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
+	if (ret >= 0) {
+		/* fetch the value from the cache */
+		switch (codec->driver->reg_word_size) {
+		case 1: {
+			u8 *cache;
+			cache = lzo_block->dst;
+			*value = cache[blkpos];
+		}
+		break;
+		case 2: {
+			u16 *cache;
+			cache = lzo_block->dst;
+			*value = cache[blkpos];
+		}
+		break;
+		default:
+			BUG();
+		}
+	}
+
+	kfree(lzo_block->dst);
+	/* restore the pointer and length of the compressed block */
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+	return 0;
+}
+
+static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec)
+{
+	struct snd_soc_lzo_ctx **lzo_blocks;
+	int i, blkcount;
+
+	lzo_blocks = codec->reg_cache;
+	if (!lzo_blocks)
+		return 0;
+
+	blkcount = snd_soc_lzo_block_count();
+	/*
+	 * the pointer to the bitmap used for syncing the cache
+	 * is shared amongst all lzo_blocks.  Ensure it is freed
+	 * only once.
+	 */
+	if (lzo_blocks[0])
+		kfree(lzo_blocks[0]->sync_bmp);
+	for (i = 0; i < blkcount; ++i) {
+		if (lzo_blocks[i]) {
+			kfree(lzo_blocks[i]->wmem);
+			kfree(lzo_blocks[i]->dst);
+		}
+		/* each lzo_block is a pointer returned by kmalloc or NULL */
+		kfree(lzo_blocks[i]);
+	}
+	kfree(lzo_blocks);
+	codec->reg_cache = NULL;
+	return 0;
+}
+
+static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
+{
+	struct snd_soc_lzo_ctx **lzo_blocks;
+	size_t reg_size, bmp_size;
+	const struct snd_soc_codec_driver *codec_drv;
+	int ret, tofree, i, blksize, blkcount;
+	const char *p, *end;
+	unsigned long *sync_bmp;
+
+	ret = 0;
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+
+	/*
+	 * If we have not been given a default register cache
+	 * then allocate a dummy zero-ed out region, compress it
+	 * and remember to free it afterwards.
+	 */
+	tofree = 0;
+	if (!codec->reg_def_copy)
+		tofree = 1;
+
+	if (!codec->reg_def_copy) {
+		codec->reg_def_copy = kzalloc(reg_size,
+						       GFP_KERNEL);
+		if (!codec->reg_def_copy)
+			return -ENOMEM;
+	}
+
+	blkcount = snd_soc_lzo_block_count();
+	codec->reg_cache = kzalloc(blkcount * sizeof *lzo_blocks,
+				   GFP_KERNEL);
+	if (!codec->reg_cache) {
+		ret = -ENOMEM;
+		goto err_tofree;
+	}
+	lzo_blocks = codec->reg_cache;
+
+	/*
+	 * allocate a bitmap to be used when syncing the cache with
+	 * the hardware.  Each time a register is modified, the corresponding
+	 * bit is set in the bitmap, so we know that we have to sync
+	 * that register.
+	 */
+	bmp_size = codec_drv->reg_cache_size;
+	sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
+			   GFP_KERNEL);
+	if (!sync_bmp) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	bitmap_zero(sync_bmp, bmp_size);
+
+	/* allocate the lzo blocks and initialize them */
+	for (i = 0; i < blkcount; ++i) {
+		lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
+					GFP_KERNEL);
+		if (!lzo_blocks[i]) {
+			kfree(sync_bmp);
+			ret = -ENOMEM;
+			goto err;
+		}
+		lzo_blocks[i]->sync_bmp = sync_bmp;
+		lzo_blocks[i]->sync_bmp_nbits = bmp_size;
+		/* alloc the working space for the compressed block */
+		ret = snd_soc_lzo_prepare(lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	blksize = snd_soc_lzo_get_blksize(codec);
+	p = codec->reg_def_copy;
+	end = codec->reg_def_copy + reg_size;
+	/* compress the register map and fill the lzo blocks */
+	for (i = 0; i < blkcount; ++i, p += blksize) {
+		lzo_blocks[i]->src = p;
+		if (p + blksize > end)
+			lzo_blocks[i]->src_len = end - p;
+		else
+			lzo_blocks[i]->src_len = blksize;
+		ret = snd_soc_lzo_compress_cache_block(codec,
+						       lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+		lzo_blocks[i]->decompressed_size =
+			lzo_blocks[i]->src_len;
+	}
+
+	if (tofree) {
+		kfree(codec->reg_def_copy);
+		codec->reg_def_copy = NULL;
+	}
+	return 0;
+err:
+	snd_soc_cache_exit(codec);
+err_tofree:
+	if (tofree) {
+		kfree(codec->reg_def_copy);
+		codec->reg_def_copy = NULL;
+	}
+	return ret;
+}
+#endif
+
+static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
+{
+	int i;
+	int ret;
+	const struct snd_soc_codec_driver *codec_drv;
+	unsigned int val;
+
+	codec_drv = codec->driver;
+	for (i = 0; i < codec_drv->reg_cache_size; ++i) {
+		ret = snd_soc_cache_read(codec, i, &val);
+		if (ret)
+			return ret;
+		if (codec_drv->reg_cache_default) {
+			switch (codec_drv->reg_word_size) {
+			case 1: {
+				const u8 *cache;
+
+				cache = codec_drv->reg_cache_default;
+				if (cache[i] == val)
+					continue;
+			}
+			break;
+			case 2: {
+				const u16 *cache;
+
+				cache = codec_drv->reg_cache_default;
+				if (cache[i] == val)
+					continue;
+			}
+			break;
+			default:
+				BUG();
+			}
+		}
+		ret = snd_soc_write(codec, i, val);
+		if (ret)
+			return ret;
+		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+			i, val);
+	}
+	return 0;
+}
+
+static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
+				    unsigned int reg, unsigned int value)
+{
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		u8 *cache;
+
+		cache = codec->reg_cache;
+		cache[reg] = value;
+	}
+	break;
+	case 2: {
+		u16 *cache;
+
+		cache = codec->reg_cache;
+		cache[reg] = value;
+	}
+	break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
+				   unsigned int reg, unsigned int *value)
+{
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		u8 *cache;
+
+		cache = codec->reg_cache;
+		*value = cache[reg];
+	}
+	break;
+	case 2: {
+		u16 *cache;
+
+		cache = codec->reg_cache;
+		*value = cache[reg];
+	}
+	break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
+{
+	if (!codec->reg_cache)
+		return 0;
+	kfree(codec->reg_cache);
+	codec->reg_cache = NULL;
+	return 0;
+}
+
+static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+
+	/*
+	 * for flat compression, we don't need to keep a copy of the
+	 * original defaults register cache as it will definitely not
+	 * be marked as __devinitconst
+	 */
+	kfree(codec->reg_def_copy);
+	codec->reg_def_copy = NULL;
+
+	if (codec_drv->reg_cache_default)
+		codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
+					   reg_size, GFP_KERNEL);
+	else
+		codec->reg_cache = kzalloc(reg_size, GFP_KERNEL);
+	if (!codec->reg_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/* an array of all supported compression types */
+static const struct snd_soc_cache_ops cache_types[] = {
+	/* Flat *must* be the first entry for fallback */
+	{
+		.id = SND_SOC_FLAT_COMPRESSION,
+		.name = "flat",
+		.init = snd_soc_flat_cache_init,
+		.exit = snd_soc_flat_cache_exit,
+		.read = snd_soc_flat_cache_read,
+		.write = snd_soc_flat_cache_write,
+		.sync = snd_soc_flat_cache_sync
+	},
+#ifdef CONFIG_SND_SOC_CACHE_LZO
+	{
+		.id = SND_SOC_LZO_COMPRESSION,
+		.name = "LZO",
+		.init = snd_soc_lzo_cache_init,
+		.exit = snd_soc_lzo_cache_exit,
+		.read = snd_soc_lzo_cache_read,
+		.write = snd_soc_lzo_cache_write,
+		.sync = snd_soc_lzo_cache_sync
+	},
+#endif
+	{
+		.id = SND_SOC_RBTREE_COMPRESSION,
+		.name = "rbtree",
+		.init = snd_soc_rbtree_cache_init,
+		.exit = snd_soc_rbtree_cache_exit,
+		.read = snd_soc_rbtree_cache_read,
+		.write = snd_soc_rbtree_cache_write,
+		.sync = snd_soc_rbtree_cache_sync
+	}
+};
+
+int snd_soc_cache_init(struct snd_soc_codec *codec)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
+		if (cache_types[i].id == codec->compress_type)
+			break;
+
+	/* Fall back to flat compression */
+	if (i == ARRAY_SIZE(cache_types)) {
+		dev_warn(codec->dev, "Could not match compress type: %d\n",
+			 codec->compress_type);
+		i = 0;
+	}
+
+	mutex_init(&codec->cache_rw_mutex);
+	codec->cache_ops = &cache_types[i];
+
+	if (codec->cache_ops->init) {
+		if (codec->cache_ops->name)
+			dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
+				codec->cache_ops->name, codec->name);
+		return codec->cache_ops->init(codec);
+	}
+	return -EINVAL;
+}
+
+/*
+ * NOTE: keep in mind that this function might be called
+ * multiple times.
+ */
+int snd_soc_cache_exit(struct snd_soc_codec *codec)
+{
+	if (codec->cache_ops && codec->cache_ops->exit) {
+		if (codec->cache_ops->name)
+			dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
+				codec->cache_ops->name, codec->name);
+		return codec->cache_ops->exit(codec);
+	}
+	return -EINVAL;
+}
+
+/**
+ * snd_soc_cache_read: Fetch the value of a given register from the cache.
+ *
+ * @codec: CODEC to configure.
+ * @reg: The register index.
+ * @value: The value to be returned.
+ */
+int snd_soc_cache_read(struct snd_soc_codec *codec,
+		       unsigned int reg, unsigned int *value)
+{
+	int ret;
+
+	mutex_lock(&codec->cache_rw_mutex);
+
+	if (value && codec->cache_ops && codec->cache_ops->read) {
+		ret = codec->cache_ops->read(codec, reg, value);
+		mutex_unlock(&codec->cache_rw_mutex);
+		return ret;
+	}
+
+	mutex_unlock(&codec->cache_rw_mutex);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_read);
+
+/**
+ * snd_soc_cache_write: Set the value of a given register in the cache.
+ *
+ * @codec: CODEC to configure.
+ * @reg: The register index.
+ * @value: The new register value.
+ */
+int snd_soc_cache_write(struct snd_soc_codec *codec,
+			unsigned int reg, unsigned int value)
+{
+	int ret;
+
+	mutex_lock(&codec->cache_rw_mutex);
+
+	if (codec->cache_ops && codec->cache_ops->write) {
+		ret = codec->cache_ops->write(codec, reg, value);
+		mutex_unlock(&codec->cache_rw_mutex);
+		return ret;
+	}
+
+	mutex_unlock(&codec->cache_rw_mutex);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_write);
+
+/**
+ * snd_soc_cache_sync: Sync the register cache with the hardware.
+ *
+ * @codec: CODEC to configure.
+ *
+ * Any registers that should not be synced should be marked as
+ * volatile.  In general drivers can choose not to use the provided
+ * syncing functionality if they so require.
+ */
+int snd_soc_cache_sync(struct snd_soc_codec *codec)
+{
+	int ret;
+
+	if (!codec->cache_sync) {
+		return 0;
+	}
+
+	if (codec->cache_ops && codec->cache_ops->sync) {
+		if (codec->cache_ops->name)
+			dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
+				codec->cache_ops->name, codec->name);
+		ret = codec->cache_ops->sync(codec);
+		if (!ret)
+			codec->cache_sync = 0;
+		return ret;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 85b7d54..c3f6f1e7 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -33,12 +33,15 @@
 #include <linux/slab.h>
 #include <sound/ac97_codec.h>
 #include <sound/core.h>
+#include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/asoc.h>
+
 #define NAME_SIZE	32
 
 static DEFINE_MUTEX(pcm_mutex);
@@ -67,25 +70,6 @@
 module_param(pmdown_time, int, 0);
 MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
 
-/*
- * This function forces any delayed work to be queued and run.
- */
-static int run_delayed_work(struct delayed_work *dwork)
-{
-	int ret;
-
-	/* cancel any work waiting to be queued. */
-	ret = cancel_delayed_work(dwork);
-
-	/* if there was any work waiting then we run it now and
-	 * wait for it's completion */
-	if (ret) {
-		schedule_delayed_work(dwork, 0);
-		flush_scheduled_work();
-	}
-	return ret;
-}
-
 /* codec register dump */
 static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf)
 {
@@ -114,7 +98,7 @@
 			 * the register being volatile and the device being
 			 * powered off.
 			 */
-			ret = codec->driver->read(codec, i);
+			ret = snd_soc_read(codec, i);
 			if (ret >= 0)
 				count += snprintf(buf + count,
 						  PAGE_SIZE - count,
@@ -225,7 +209,7 @@
 		start++;
 	if (strict_strtoul(start, 16, &value))
 		return -EINVAL;
-	codec->driver->write(codec, reg, value);
+	snd_soc_write(codec, reg, value);
 	return buf_size;
 }
 
@@ -238,8 +222,10 @@
 
 static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
 {
-	codec->debugfs_codec_root = debugfs_create_dir(codec->name ,
-						       debugfs_root);
+	struct dentry *debugfs_card_root = codec->card->debugfs_card_root;
+
+	codec->debugfs_codec_root = debugfs_create_dir(codec->name,
+						       debugfs_card_root);
 	if (!codec->debugfs_codec_root) {
 		printk(KERN_WARNING
 		       "ASoC: Failed to create codec debugfs directory\n");
@@ -253,20 +239,13 @@
 		printk(KERN_WARNING
 		       "ASoC: Failed to create codec register debugfs file\n");
 
-	codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
-						     codec->debugfs_codec_root,
-						     &codec->pop_time);
-	if (!codec->debugfs_pop_time)
-		printk(KERN_WARNING
-		       "Failed to create pop time debugfs file\n");
-
-	codec->debugfs_dapm = debugfs_create_dir("dapm",
+	codec->dapm.debugfs_dapm = debugfs_create_dir("dapm",
 						 codec->debugfs_codec_root);
-	if (!codec->debugfs_dapm)
+	if (!codec->dapm.debugfs_dapm)
 		printk(KERN_WARNING
 		       "Failed to create DAPM debugfs directory\n");
 
-	snd_soc_dapm_debugfs_init(codec);
+	snd_soc_dapm_debugfs_init(&codec->dapm);
 }
 
 static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
@@ -374,6 +353,29 @@
 	.llseek = default_llseek,/* read accesses f_pos */
 };
 
+static void soc_init_card_debugfs(struct snd_soc_card *card)
+{
+	card->debugfs_card_root = debugfs_create_dir(card->name,
+						     debugfs_root);
+	if (!card->debugfs_card_root) {
+		dev_warn(card->dev,
+			 "ASoC: Failed to create codec debugfs directory\n");
+		return;
+	}
+
+	card->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
+						    card->debugfs_card_root,
+						    &card->pop_time);
+	if (!card->debugfs_pop_time)
+		dev_warn(card->dev,
+		       "Failed to create pop time debugfs file\n");
+}
+
+static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
+{
+	debugfs_remove_recursive(card->debugfs_card_root);
+}
+
 #else
 
 static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec)
@@ -383,6 +385,14 @@
 static inline void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
 {
 }
+
+static inline void soc_init_card_debugfs(struct snd_soc_card *card)
+{
+}
+
+static inline void soc_cleanup_card_debugfs(struct snd_soc_card *card)
+{
+}
 #endif
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
@@ -497,7 +507,7 @@
 		}
 	}
 
-	/* Check that the codec and cpu DAI's are compatible */
+	/* Check that the codec and cpu DAIs are compatible */
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		runtime->hw.rate_min =
 			max(codec_dai_drv->playback.rate_min,
@@ -846,7 +856,7 @@
 }
 
 /*
- * Free's resources allocated by hw_params, can be called multiple times
+ * Frees resources allocated by hw_params, can be called multiple times
  */
 static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
 {
@@ -870,7 +880,7 @@
 	if (platform->driver->ops->hw_free)
 		platform->driver->ops->hw_free(substream);
 
-	/* now free hw params for the DAI's  */
+	/* now free hw params for the DAIs  */
 	if (codec_dai->driver->ops->hw_free)
 		codec_dai->driver->ops->hw_free(substream, codec_dai);
 
@@ -958,6 +968,7 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct snd_soc_codec *codec;
 	int i;
 
 	/* If the initialization of this soc device failed, there is no codec
@@ -976,7 +987,7 @@
 	/* we're going to block userspace touching us until resume completes */
 	snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot);
 
-	/* mute any active DAC's */
+	/* mute any active DACs */
 	for (i = 0; i < card->num_rtd; i++) {
 		struct snd_soc_dai *dai = card->rtd[i].codec_dai;
 		struct snd_soc_dai_driver *drv = dai->driver;
@@ -1016,8 +1027,8 @@
 
 	/* close any waiting streams and save state */
 	for (i = 0; i < card->num_rtd; i++) {
-		run_delayed_work(&card->rtd[i].delayed_work);
-		card->rtd[i].codec->suspend_bias_level = card->rtd[i].codec->bias_level;
+		flush_delayed_work_sync(&card->rtd[i].delayed_work);
+		card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level;
 	}
 
 	for (i = 0; i < card->num_rtd; i++) {
@@ -1036,12 +1047,11 @@
 	}
 
 	/* suspend all CODECs */
-	for (i = 0; i < card->num_rtd; i++) {
-		struct snd_soc_codec *codec = card->rtd[i].codec;
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
 		/* If there are paths active then the CODEC will be held with
 		 * bias _ON and should not be suspended. */
 		if (!codec->suspended && codec->driver->suspend) {
-			switch (codec->bias_level) {
+			switch (codec->dapm.bias_level) {
 			case SND_SOC_BIAS_STANDBY:
 			case SND_SOC_BIAS_OFF:
 				codec->driver->suspend(codec, PMSG_SUSPEND);
@@ -1078,6 +1088,7 @@
 	struct snd_soc_card *card =
 			container_of(work, struct snd_soc_card, deferred_resume_work);
 	struct platform_device *pdev = to_platform_device(card->dev);
+	struct snd_soc_codec *codec;
 	int i;
 
 	/* our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
@@ -1103,14 +1114,13 @@
 			cpu_dai->driver->resume(cpu_dai);
 	}
 
-	for (i = 0; i < card->num_rtd; i++) {
-		struct snd_soc_codec *codec = card->rtd[i].codec;
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
 		/* If the CODEC was idle over suspend then it will have been
 		 * left with bias OFF or STANDBY and suspended so we must now
 		 * resume.  Otherwise the suspend was suppressed.
 		 */
 		if (codec->driver->resume && codec->suspended) {
-			switch (codec->bias_level) {
+			switch (codec->dapm.bias_level) {
 			case SND_SOC_BIAS_STANDBY:
 			case SND_SOC_BIAS_OFF:
 				codec->driver->resume(codec);
@@ -1249,9 +1259,6 @@
 		if (!strcmp(codec->name, dai_link->codec_name)) {
 			rtd->codec = codec;
 
-			if (!try_module_get(codec->dev->driver->owner))
-				return -ENODEV;
-
 			/* CODEC found, so find CODEC DAI from registered DAIs from this CODEC*/
 			list_for_each_entry(codec_dai, &dai_list, list) {
 				if (codec->dev == codec_dai->dev &&
@@ -1277,10 +1284,6 @@
 	/* no, then find CPU DAI from registered DAIs*/
 	list_for_each_entry(platform, &platform_list, list) {
 		if (!strcmp(platform->name, dai_link->platform_name)) {
-
-			if (!try_module_get(platform->dev->driver->owner))
-				return -ENODEV;
-
 			rtd->platform = platform;
 			goto out;
 		}
@@ -1299,6 +1302,27 @@
 	return 1;
 }
 
+static void soc_remove_codec(struct snd_soc_codec *codec)
+{
+	int err;
+
+	if (codec->driver->remove) {
+		err = codec->driver->remove(codec);
+		if (err < 0)
+			dev_err(codec->dev,
+				"asoc: failed to remove %s: %d\n",
+				codec->name, err);
+	}
+
+	/* Make sure all DAPM widgets are freed */
+	snd_soc_dapm_free(&codec->dapm);
+
+	soc_cleanup_codec_debugfs(codec);
+	codec->probed = 0;
+	list_del(&codec->card_list);
+	module_put(codec->dev->driver->owner);
+}
+
 static void soc_remove_dai_link(struct snd_soc_card *card, int num)
 {
 	struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
@@ -1310,6 +1334,7 @@
 	/* unregister the rtd device */
 	if (rtd->dev_registered) {
 		device_remove_file(&rtd->dev, &dev_attr_pmdown_time);
+		device_remove_file(&rtd->dev, &dev_attr_codec_reg);
 		device_unregister(&rtd->dev);
 		rtd->dev_registered = 0;
 	}
@@ -1338,22 +1363,8 @@
 	}
 
 	/* remove the CODEC */
-	if (codec && codec->probed) {
-		if (codec->driver->remove) {
-			err = codec->driver->remove(codec);
-			if (err < 0)
-				printk(KERN_ERR "asoc: failed to remove %s\n", codec->name);
-		}
-
-		/* Make sure all DAPM widgets are freed */
-		snd_soc_dapm_free(codec);
-
-		soc_cleanup_codec_debugfs(codec);
-		device_remove_file(&rtd->dev, &dev_attr_codec_reg);
-		codec->probed = 0;
-		list_del(&codec->card_list);
-		module_put(codec->dev->driver->owner);
-	}
+	if (codec && codec->probed)
+		soc_remove_codec(codec);
 
 	/* remove the cpu_dai */
 	if (cpu_dai && cpu_dai->probed) {
@@ -1368,8 +1379,126 @@
 	}
 }
 
+static void soc_set_name_prefix(struct snd_soc_card *card,
+				struct snd_soc_codec *codec)
+{
+	int i;
+
+	if (card->codec_conf == NULL)
+		return;
+
+	for (i = 0; i < card->num_configs; i++) {
+		struct snd_soc_codec_conf *map = &card->codec_conf[i];
+		if (map->dev_name && !strcmp(codec->name, map->dev_name)) {
+			codec->name_prefix = map->name_prefix;
+			break;
+		}
+	}
+}
+
+static int soc_probe_codec(struct snd_soc_card *card,
+			   struct snd_soc_codec *codec)
+{
+	int ret = 0;
+
+	codec->card = card;
+	codec->dapm.card = card;
+	soc_set_name_prefix(card, codec);
+
+	if (codec->driver->probe) {
+		ret = codec->driver->probe(codec);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"asoc: failed to probe CODEC %s: %d\n",
+				codec->name, ret);
+			return ret;
+		}
+	}
+
+	soc_init_codec_debugfs(codec);
+
+	/* mark codec as probed and add to card codec list */
+	if (!try_module_get(codec->dev->driver->owner))
+		return -ENODEV;
+
+	codec->probed = 1;
+	list_add(&codec->card_list, &card->codec_dev_list);
+	list_add(&codec->dapm.list, &card->dapm_list);
+
+	return ret;
+}
+
 static void rtd_release(struct device *dev) {}
 
+static int soc_post_component_init(struct snd_soc_card *card,
+				   struct snd_soc_codec *codec,
+				   int num, int dailess)
+{
+	struct snd_soc_dai_link *dai_link = NULL;
+	struct snd_soc_aux_dev *aux_dev = NULL;
+	struct snd_soc_pcm_runtime *rtd;
+	const char *temp, *name;
+	int ret = 0;
+
+	if (!dailess) {
+		dai_link = &card->dai_link[num];
+		rtd = &card->rtd[num];
+		name = dai_link->name;
+	} else {
+		aux_dev = &card->aux_dev[num];
+		rtd = &card->rtd_aux[num];
+		name = aux_dev->name;
+	}
+	rtd->card = card;
+
+	/* machine controls, routes and widgets are not prefixed */
+	temp = codec->name_prefix;
+	codec->name_prefix = NULL;
+
+	/* do machine specific initialization */
+	if (!dailess && dai_link->init)
+		ret = dai_link->init(rtd);
+	else if (dailess && aux_dev->init)
+		ret = aux_dev->init(&codec->dapm);
+	if (ret < 0) {
+		dev_err(card->dev, "asoc: failed to init %s: %d\n", name, ret);
+		return ret;
+	}
+	codec->name_prefix = temp;
+
+	/* Make sure all DAPM widgets are instantiated */
+	snd_soc_dapm_new_widgets(&codec->dapm);
+	snd_soc_dapm_sync(&codec->dapm);
+
+	/* register the rtd device */
+	rtd->codec = codec;
+	rtd->dev.parent = card->dev;
+	rtd->dev.release = rtd_release;
+	rtd->dev.init_name = name;
+	ret = device_register(&rtd->dev);
+	if (ret < 0) {
+		dev_err(card->dev,
+			"asoc: failed to register runtime device: %d\n", ret);
+		return ret;
+	}
+	rtd->dev_registered = 1;
+
+	/* add DAPM sysfs entries for this codec */
+	ret = snd_soc_dapm_sys_add(&rtd->dev);
+	if (ret < 0)
+		dev_err(codec->dev,
+			"asoc: failed to add codec dapm sysfs entries: %d\n",
+			ret);
+
+	/* add codec sysfs entries */
+	ret = device_create_file(&rtd->dev, &dev_attr_codec_reg);
+	if (ret < 0)
+		dev_err(codec->dev,
+			"asoc: failed to add codec sysfs files: %d\n", ret);
+
+	return 0;
+}
+
 static int soc_probe_dai_link(struct snd_soc_card *card, int num)
 {
 	struct snd_soc_dai_link *dai_link = &card->dai_link[num];
@@ -1383,10 +1512,7 @@
 
 	/* config components */
 	codec_dai->codec = codec;
-	codec->card = card;
 	cpu_dai->platform = platform;
-	rtd->card = card;
-	rtd->dev.parent = card->dev;
 	codec_dai->card = card;
 	cpu_dai->card = card;
 
@@ -1410,20 +1536,9 @@
 
 	/* probe the CODEC */
 	if (!codec->probed) {
-		if (codec->driver->probe) {
-			ret = codec->driver->probe(codec);
-			if (ret < 0) {
-				printk(KERN_ERR "asoc: failed to probe CODEC %s\n",
-						codec->name);
-				return ret;
-			}
-		}
-
-		soc_init_codec_debugfs(codec);
-
-		/* mark codec as probed and add to card codec list */
-		codec->probed = 1;
-		list_add(&codec->card_list, &card->codec_dev_list);
+		ret = soc_probe_codec(card, codec);
+		if (ret < 0)
+			return ret;
 	}
 
 	/* probe the platform */
@@ -1437,6 +1552,10 @@
 			}
 		}
 		/* mark platform as probed and add to card platform list */
+
+		if (!try_module_get(platform->dev->driver->owner))
+			return -ENODEV;
+
 		platform->probed = 1;
 		list_add(&platform->card_list, &card->platform_dev_list);
 	}
@@ -1460,43 +1579,14 @@
 	/* DAPM dai link stream work */
 	INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
 
-	/* now that all clients have probed, initialise the DAI link */
-	if (dai_link->init) {
-		ret = dai_link->init(rtd);
-		if (ret < 0) {
-			printk(KERN_ERR "asoc: failed to init %s\n", dai_link->stream_name);
-			return ret;
-		}
-	}
-
-	/* Make sure all DAPM widgets are instantiated */
-	snd_soc_dapm_new_widgets(codec);
-	snd_soc_dapm_sync(codec);
-
-	/* register the rtd device */
-	rtd->dev.release = rtd_release;
-	rtd->dev.init_name = dai_link->name;
-	ret = device_register(&rtd->dev);
-	if (ret < 0) {
-		printk(KERN_ERR "asoc: failed to register DAI runtime device %d\n", ret);
+	ret = soc_post_component_init(card, codec, num, 0);
+	if (ret)
 		return ret;
-	}
 
-	rtd->dev_registered = 1;
 	ret = device_create_file(&rtd->dev, &dev_attr_pmdown_time);
 	if (ret < 0)
 		printk(KERN_WARNING "asoc: failed to add pmdown_time sysfs\n");
 
-	/* add DAPM sysfs entries for this codec */
-	ret = snd_soc_dapm_sys_add(&rtd->dev);
-	if (ret < 0)
-		printk(KERN_WARNING "asoc: failed to add codec dapm sysfs entries\n");
-
-	/* add codec sysfs entries */
-	ret = device_create_file(&rtd->dev, &dev_attr_codec_reg);
-	if (ret < 0)
-		printk(KERN_WARNING "asoc: failed to add codec sysfs files\n");
-
 	/* create the pcm */
 	ret = soc_new_pcm(rtd, num);
 	if (ret < 0) {
@@ -1551,9 +1641,82 @@
 }
 #endif
 
+static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
+{
+	struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num];
+	struct snd_soc_codec *codec;
+	int ret = -ENODEV;
+
+	/* find CODEC from registered CODECs*/
+	list_for_each_entry(codec, &codec_list, list) {
+		if (!strcmp(codec->name, aux_dev->codec_name)) {
+			if (codec->probed) {
+				dev_err(codec->dev,
+					"asoc: codec already probed");
+				ret = -EBUSY;
+				goto out;
+			}
+			goto found;
+		}
+	}
+	/* codec not found */
+	dev_err(card->dev, "asoc: codec %s not found", aux_dev->codec_name);
+	goto out;
+
+found:
+	ret = soc_probe_codec(card, codec);
+	if (ret < 0)
+		return ret;
+
+	ret = soc_post_component_init(card, codec, num, 1);
+
+out:
+	return ret;
+}
+
+static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
+{
+	struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num];
+	struct snd_soc_codec *codec = rtd->codec;
+
+	/* unregister the rtd device */
+	if (rtd->dev_registered) {
+		device_remove_file(&rtd->dev, &dev_attr_codec_reg);
+		device_unregister(&rtd->dev);
+		rtd->dev_registered = 0;
+	}
+
+	if (codec && codec->probed)
+		soc_remove_codec(codec);
+}
+
+static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
+				    enum snd_soc_compress_type compress_type)
+{
+	int ret;
+
+	if (codec->cache_init)
+		return 0;
+
+	/* override the compress_type if necessary */
+	if (compress_type && codec->compress_type != compress_type)
+		codec->compress_type = compress_type;
+	ret = snd_soc_cache_init(codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache compression type: %d\n",
+			ret);
+		return ret;
+	}
+	codec->cache_init = 1;
+	return 0;
+}
+
 static void snd_soc_instantiate_card(struct snd_soc_card *card)
 {
 	struct platform_device *pdev = to_platform_device(card->dev);
+	struct snd_soc_codec *codec;
+	struct snd_soc_codec_conf *codec_conf;
+	enum snd_soc_compress_type compress_type;
 	int ret, i;
 
 	mutex_lock(&card->mutex);
@@ -1573,6 +1736,39 @@
 		return;
 	}
 
+	/* initialize the register cache for each available codec */
+	list_for_each_entry(codec, &codec_list, list) {
+		if (codec->cache_init)
+			continue;
+		/* check to see if we need to override the compress_type */
+		for (i = 0; i < card->num_configs; ++i) {
+			codec_conf = &card->codec_conf[i];
+			if (!strcmp(codec->name, codec_conf->dev_name)) {
+				compress_type = codec_conf->compress_type;
+				if (compress_type && compress_type
+				    != codec->compress_type)
+					break;
+			}
+		}
+		if (i == card->num_configs) {
+			/* no need to override the compress_type so
+			 * go ahead and do the standard thing */
+			ret = snd_soc_init_codec_cache(codec, 0);
+			if (ret < 0) {
+				mutex_unlock(&card->mutex);
+				return;
+			}
+			continue;
+		}
+		/* override the compress_type with the one supplied in
+		 * the machine driver */
+		ret = snd_soc_init_codec_cache(codec, compress_type);
+		if (ret < 0) {
+			mutex_unlock(&card->mutex);
+			return;
+		}
+	}
+
 	/* card bind complete so register a sound card */
 	ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
 			card->owner, 0, &card->snd_card);
@@ -1605,6 +1801,15 @@
 		}
 	}
 
+	for (i = 0; i < card->num_aux_devs; i++) {
+		ret = soc_probe_aux_dev(card, i);
+		if (ret < 0) {
+			pr_err("asoc: failed to add auxiliary devices %s: %d\n",
+			       card->name, ret);
+			goto probe_aux_dev_err;
+		}
+	}
+
 	snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname),
 		 "%s",  card->name);
 	snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
@@ -1613,7 +1818,7 @@
 	ret = snd_card_register(card->snd_card);
 	if (ret < 0) {
 		printk(KERN_ERR "asoc: failed to register soundcard for %s\n", card->name);
-		goto probe_dai_err;
+		goto probe_aux_dev_err;
 	}
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
@@ -1623,8 +1828,8 @@
 		if (ret < 0) {
 			printk(KERN_ERR "asoc: failed to register AC97 %s\n", card->name);
 			while (--i >= 0)
-				soc_unregister_ac97_dai_link(&card->rtd[i]);
-			goto probe_dai_err;
+				soc_unregister_ac97_dai_link(card->rtd[i].codec);
+			goto probe_aux_dev_err;
 		}
 	}
 #endif
@@ -1633,6 +1838,10 @@
 	mutex_unlock(&card->mutex);
 	return;
 
+probe_aux_dev_err:
+	for (i = 0; i < card->num_aux_devs; i++)
+		soc_remove_aux_dev(card, i);
+
 probe_dai_err:
 	for (i = 0; i < card->num_links; i++)
 		soc_remove_dai_link(card, i);
@@ -1668,6 +1877,11 @@
 	INIT_LIST_HEAD(&card->dai_dev_list);
 	INIT_LIST_HEAD(&card->codec_dev_list);
 	INIT_LIST_HEAD(&card->platform_dev_list);
+	INIT_LIST_HEAD(&card->widgets);
+	INIT_LIST_HEAD(&card->paths);
+	INIT_LIST_HEAD(&card->dapm_list);
+
+	soc_init_card_debugfs(card);
 
 	ret = snd_soc_register_card(card);
 	if (ret != 0) {
@@ -1689,13 +1903,19 @@
 		/* make sure any delayed work runs */
 		for (i = 0; i < card->num_rtd; i++) {
 			struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-			run_delayed_work(&rtd->delayed_work);
+			flush_delayed_work_sync(&rtd->delayed_work);
 		}
 
+		/* remove auxiliary devices */
+		for (i = 0; i < card->num_aux_devs; i++)
+			soc_remove_aux_dev(card, i);
+
 		/* remove and free each DAI */
 		for (i = 0; i < card->num_rtd; i++)
 			soc_remove_dai_link(card, i);
 
+		soc_cleanup_card_debugfs(card);
+
 		/* remove the card */
 		if (card->remove)
 			card->remove(pdev);
@@ -1720,7 +1940,7 @@
 	 * now, we're shutting down so no imminent restart. */
 	for (i = 0; i < card->num_rtd; i++) {
 		struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-		run_delayed_work(&rtd->delayed_work);
+		flush_delayed_work_sync(&rtd->delayed_work);
 	}
 
 	snd_soc_dapm_shutdown(card);
@@ -1879,6 +2099,27 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
 
+unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg)
+{
+	unsigned int ret;
+
+	ret = codec->read(codec, reg);
+	dev_dbg(codec->dev, "read %x => %x\n", reg, ret);
+	trace_snd_soc_reg_read(codec, reg, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_read);
+
+unsigned int snd_soc_write(struct snd_soc_codec *codec,
+			   unsigned int reg, unsigned int val)
+{
+	dev_dbg(codec->dev, "write %x = %x\n", reg, val);
+	trace_snd_soc_reg_write(codec, reg, val);
+	return codec->write(codec, reg, val);
+}
+EXPORT_SYMBOL_GPL(snd_soc_write);
+
 /**
  * snd_soc_update_bits - update codec register bits
  * @codec: audio codec
@@ -2019,14 +2260,22 @@
 	const struct snd_kcontrol_new *controls, int num_controls)
 {
 	struct snd_card *card = codec->card->snd_card;
+	char prefixed_name[44], *name;
 	int err, i;
 
 	for (i = 0; i < num_controls; i++) {
 		const struct snd_kcontrol_new *control = &controls[i];
-		err = snd_ctl_add(card, snd_soc_cnew(control, codec, NULL));
+		if (codec->name_prefix) {
+			snprintf(prefixed_name, sizeof(prefixed_name), "%s %s",
+				 codec->name_prefix, control->name);
+			name = prefixed_name;
+		} else {
+			name = control->name;
+		}
+		err = snd_ctl_add(card, snd_soc_cnew(control, codec, name));
 		if (err < 0) {
 			dev_err(codec->dev, "%s: Failed to add %s: %d\n",
-				codec->name, control->name, err);
+				codec->name, name, err);
 			return err;
 		}
 	}
@@ -2863,10 +3112,12 @@
 	if (!card->name || !card->dev)
 		return -EINVAL;
 
-	card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) * card->num_links,
-			GFP_KERNEL);
+	card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) *
+			    (card->num_links + card->num_aux_devs),
+			    GFP_KERNEL);
 	if (card->rtd == NULL)
 		return -ENOMEM;
+	card->rtd_aux = &card->rtd[card->num_links];
 
 	for (i = 0; i < card->num_links; i++)
 		card->rtd[i].dai_link = &card->dai_link[i];
@@ -2908,7 +3159,7 @@
  * Simplify DAI link configuration by removing ".-1" from device names
  * and sanitizing names.
  */
-static inline char *fmt_single_name(struct device *dev, int *id)
+static char *fmt_single_name(struct device *dev, int *id)
 {
 	char *found, name[NAME_SIZE];
 	int id1, id2;
@@ -2916,7 +3167,7 @@
 	if (dev_name(dev) == NULL)
 		return NULL;
 
-	strncpy(name, dev_name(dev), NAME_SIZE);
+	strlcpy(name, dev_name(dev), NAME_SIZE);
 
 	/* are we a "%s.%d" name (platform and SPI components) */
 	found = strstr(name, dev->driver->name);
@@ -2939,7 +3190,7 @@
 
 			/* sanitize component name for DAI link creation */
 			snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name, name);
-			strncpy(name, tmp, NAME_SIZE);
+			strlcpy(name, tmp, NAME_SIZE);
 		} else
 			*id = 0;
 	}
@@ -3204,9 +3455,11 @@
  * @codec: codec to register
  */
 int snd_soc_register_codec(struct device *dev,
-		struct snd_soc_codec_driver *codec_drv,
-		struct snd_soc_dai_driver *dai_drv, int num_dai)
+			   const struct snd_soc_codec_driver *codec_drv,
+			   struct snd_soc_dai_driver *dai_drv,
+			   int num_dai)
 {
+	size_t reg_size;
 	struct snd_soc_codec *codec;
 	int ret, i;
 
@@ -3223,30 +3476,37 @@
 		return -ENOMEM;
 	}
 
-	/* allocate CODEC register cache */
-	if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
+	if (codec_drv->compress_type)
+		codec->compress_type = codec_drv->compress_type;
+	else
+		codec->compress_type = SND_SOC_FLAT_COMPRESSION;
 
-		if (codec_drv->reg_cache_default)
-			codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
-				codec_drv->reg_cache_size * codec_drv->reg_word_size, GFP_KERNEL);
-		else
-			codec->reg_cache = kzalloc(codec_drv->reg_cache_size *
-				codec_drv->reg_word_size, GFP_KERNEL);
-
-		if (codec->reg_cache == NULL) {
-			kfree(codec->name);
-			kfree(codec);
-			return -ENOMEM;
-		}
-	}
-
+	codec->write = codec_drv->write;
+	codec->read = codec_drv->read;
+	codec->dapm.bias_level = SND_SOC_BIAS_OFF;
+	codec->dapm.dev = dev;
+	codec->dapm.codec = codec;
 	codec->dev = dev;
 	codec->driver = codec_drv;
-	codec->bias_level = SND_SOC_BIAS_OFF;
 	codec->num_dai = num_dai;
 	mutex_init(&codec->mutex);
-	INIT_LIST_HEAD(&codec->dapm_widgets);
-	INIT_LIST_HEAD(&codec->dapm_paths);
+
+	/* allocate CODEC register cache */
+	if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
+		reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+		/* it is necessary to make a copy of the default register cache
+		 * because in the case of using a compression type that requires
+		 * the default register cache to be marked as __devinitconst the
+		 * kernel might have freed the array by the time we initialize
+		 * the cache.
+		 */
+		codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default,
+					      reg_size, GFP_KERNEL);
+		if (!codec->reg_def_copy) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+	}
 
 	for (i = 0; i < num_dai; i++) {
 		fixup_codec_formats(&dai_drv[i].playback);
@@ -3257,7 +3517,7 @@
 	if (num_dai) {
 		ret = snd_soc_register_dais(dev, dai_drv, num_dai);
 		if (ret < 0)
-			goto error;
+			goto fail;
 	}
 
 	mutex_lock(&client_mutex);
@@ -3268,9 +3528,9 @@
 	pr_debug("Registered codec '%s'\n", codec->name);
 	return 0;
 
-error:
-	if (codec->reg_cache)
-		kfree(codec->reg_cache);
+fail:
+	kfree(codec->reg_def_copy);
+	codec->reg_def_copy = NULL;
 	kfree(codec->name);
 	kfree(codec);
 	return ret;
@@ -3304,8 +3564,8 @@
 
 	pr_debug("Unregistered codec '%s'\n", codec->name);
 
-	if (codec->reg_cache)
-		kfree(codec->reg_cache);
+	snd_soc_cache_exit(codec);
+	kfree(codec->reg_def_copy);
 	kfree(codec->name);
 	kfree(codec);
 }
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c721502..1790f83 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -42,9 +42,11 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 
+#include <trace/events/asoc.h>
+
 /* dapm power sequences - make this per codec in the future */
 static int dapm_up_seq[] = {
 	[snd_soc_dapm_pre] = 0,
@@ -54,12 +56,14 @@
 	[snd_soc_dapm_aif_out] = 3,
 	[snd_soc_dapm_mic] = 4,
 	[snd_soc_dapm_mux] = 5,
+	[snd_soc_dapm_virt_mux] = 5,
 	[snd_soc_dapm_value_mux] = 5,
 	[snd_soc_dapm_dac] = 6,
 	[snd_soc_dapm_mixer] = 7,
 	[snd_soc_dapm_mixer_named_ctl] = 7,
 	[snd_soc_dapm_pga] = 8,
 	[snd_soc_dapm_adc] = 9,
+	[snd_soc_dapm_out_drv] = 10,
 	[snd_soc_dapm_hp] = 10,
 	[snd_soc_dapm_spk] = 10,
 	[snd_soc_dapm_post] = 11,
@@ -70,6 +74,7 @@
 	[snd_soc_dapm_adc] = 1,
 	[snd_soc_dapm_hp] = 2,
 	[snd_soc_dapm_spk] = 2,
+	[snd_soc_dapm_out_drv] = 2,
 	[snd_soc_dapm_pga] = 4,
 	[snd_soc_dapm_mixer_named_ctl] = 5,
 	[snd_soc_dapm_mixer] = 5,
@@ -77,6 +82,7 @@
 	[snd_soc_dapm_mic] = 7,
 	[snd_soc_dapm_micbias] = 8,
 	[snd_soc_dapm_mux] = 9,
+	[snd_soc_dapm_virt_mux] = 9,
 	[snd_soc_dapm_value_mux] = 9,
 	[snd_soc_dapm_aif_in] = 10,
 	[snd_soc_dapm_aif_out] = 10,
@@ -90,17 +96,24 @@
 		schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
 }
 
-static void pop_dbg(u32 pop_time, const char *fmt, ...)
+static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
 {
 	va_list args;
+	char *buf;
+
+	if (!pop_time)
+		return;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (buf == NULL)
+		return;
 
 	va_start(args, fmt);
-
-	if (pop_time) {
-		vprintk(fmt, args);
-	}
-
+	vsnprintf(buf, PAGE_SIZE, fmt, args);
+	dev_info(dev, "%s", buf);
 	va_end(args);
+
+	kfree(buf);
 }
 
 /* create a new dapm widget */
@@ -120,36 +133,45 @@
  * Returns 0 for success else error.
  */
 static int snd_soc_dapm_set_bias_level(struct snd_soc_card *card,
-		struct snd_soc_codec *codec, enum snd_soc_bias_level level)
+				       struct snd_soc_dapm_context *dapm,
+				       enum snd_soc_bias_level level)
 {
 	int ret = 0;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		dev_dbg(codec->dev, "Setting full bias\n");
+		dev_dbg(dapm->dev, "Setting full bias\n");
 		break;
 	case SND_SOC_BIAS_PREPARE:
-		dev_dbg(codec->dev, "Setting bias prepare\n");
+		dev_dbg(dapm->dev, "Setting bias prepare\n");
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		dev_dbg(codec->dev, "Setting standby bias\n");
+		dev_dbg(dapm->dev, "Setting standby bias\n");
 		break;
 	case SND_SOC_BIAS_OFF:
-		dev_dbg(codec->dev, "Setting bias off\n");
+		dev_dbg(dapm->dev, "Setting bias off\n");
 		break;
 	default:
-		dev_err(codec->dev, "Setting invalid bias %d\n", level);
+		dev_err(dapm->dev, "Setting invalid bias %d\n", level);
 		return -EINVAL;
 	}
 
+	trace_snd_soc_bias_level_start(card, level);
+
 	if (card && card->set_bias_level)
 		ret = card->set_bias_level(card, level);
 	if (ret == 0) {
-		if (codec->driver->set_bias_level)
-			ret = codec->driver->set_bias_level(codec, level);
+		if (dapm->codec && dapm->codec->driver->set_bias_level)
+			ret = dapm->codec->driver->set_bias_level(dapm->codec, level);
 		else
-			codec->bias_level = level;
+			dapm->bias_level = level;
 	}
+	if (ret == 0) {
+		if (card && card->set_bias_level_post)
+			ret = card->set_bias_level_post(card, level);
+	}
+
+	trace_snd_soc_bias_level_done(card, level);
 
 	return ret;
 }
@@ -196,6 +218,20 @@
 		}
 	}
 	break;
+	case snd_soc_dapm_virt_mux: {
+		struct soc_enum *e = (struct soc_enum *)w->kcontrols[i].private_value;
+
+		p->connect = 0;
+		/* since a virtual mux has no backing registers to
+		 * decide which path to connect, it will try to match
+		 * with the first enumeration.  This is to ensure
+		 * that the default mux choice (the first) will be
+		 * correctly powered up during initialization.
+		 */
+		if (!strcmp(p->name, e->texts[0]))
+			p->connect = 1;
+	}
+	break;
 	case snd_soc_dapm_value_mux: {
 		struct soc_enum *e = (struct soc_enum *)
 			w->kcontrols[i].private_value;
@@ -217,6 +253,7 @@
 	break;
 	/* does not effect routing - always connected */
 	case snd_soc_dapm_pga:
+	case snd_soc_dapm_out_drv:
 	case snd_soc_dapm_output:
 	case snd_soc_dapm_adc:
 	case snd_soc_dapm_input:
@@ -241,7 +278,7 @@
 }
 
 /* connect mux widget to its interconnecting audio paths */
-static int dapm_connect_mux(struct snd_soc_codec *codec,
+static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
 	struct snd_soc_dapm_path *path, const char *control_name,
 	const struct snd_kcontrol_new *kcontrol)
@@ -251,7 +288,7 @@
 
 	for (i = 0; i < e->max; i++) {
 		if (!(strcmp(control_name, e->texts[i]))) {
-			list_add(&path->list, &codec->dapm_paths);
+			list_add(&path->list, &dapm->card->paths);
 			list_add(&path->list_sink, &dest->sources);
 			list_add(&path->list_source, &src->sinks);
 			path->name = (char*)e->texts[i];
@@ -264,7 +301,7 @@
 }
 
 /* connect mixer widget to its interconnecting audio paths */
-static int dapm_connect_mixer(struct snd_soc_codec *codec,
+static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
 	struct snd_soc_dapm_path *path, const char *control_name)
 {
@@ -273,7 +310,7 @@
 	/* search for mixer kcontrol */
 	for (i = 0; i < dest->num_kcontrols; i++) {
 		if (!strcmp(control_name, dest->kcontrols[i].name)) {
-			list_add(&path->list, &codec->dapm_paths);
+			list_add(&path->list, &dapm->card->paths);
 			list_add(&path->list_sink, &dest->sources);
 			list_add(&path->list_source, &src->sinks);
 			path->name = dest->kcontrols[i].name;
@@ -290,6 +327,8 @@
 	int change, power;
 	unsigned int old, new;
 	struct snd_soc_codec *codec = widget->codec;
+	struct snd_soc_dapm_context *dapm = widget->dapm;
+	struct snd_soc_card *card = dapm->card;
 
 	/* check for valid widgets */
 	if (widget->reg < 0 || widget->id == snd_soc_dapm_input ||
@@ -309,24 +348,26 @@
 
 	change = old != new;
 	if (change) {
-		pop_dbg(codec->pop_time, "pop test %s : %s in %d ms\n",
+		pop_dbg(dapm->dev, card->pop_time,
+			"pop test %s : %s in %d ms\n",
 			widget->name, widget->power ? "on" : "off",
-			codec->pop_time);
-		pop_wait(codec->pop_time);
+			card->pop_time);
+		pop_wait(card->pop_time);
 		snd_soc_write(codec, widget->reg, new);
 	}
-	pr_debug("reg %x old %x new %x change %d\n", widget->reg,
-		 old, new, change);
+	dev_dbg(dapm->dev, "reg %x old %x new %x change %d\n", widget->reg,
+		old, new, change);
 	return change;
 }
 
 /* create new dapm mixer control */
-static int dapm_new_mixer(struct snd_soc_codec *codec,
+static int dapm_new_mixer(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *w)
 {
 	int i, ret = 0;
 	size_t name_len;
 	struct snd_soc_dapm_path *path;
+	struct snd_card *card = dapm->codec->card->snd_card;
 
 	/* add kcontrol */
 	for (i = 0; i < w->num_kcontrols; i++) {
@@ -368,11 +409,11 @@
 
 			path->kcontrol = snd_soc_cnew(&w->kcontrols[i], w,
 				path->long_name);
-			ret = snd_ctl_add(codec->card->snd_card, path->kcontrol);
+			ret = snd_ctl_add(card, path->kcontrol);
 			if (ret < 0) {
-				printk(KERN_ERR "asoc: failed to add dapm kcontrol %s: %d\n",
-				       path->long_name,
-				       ret);
+				dev_err(dapm->dev,
+					"asoc: failed to add dapm kcontrol %s: %d\n",
+					path->long_name, ret);
 				kfree(path->long_name);
 				path->long_name = NULL;
 				return ret;
@@ -383,20 +424,22 @@
 }
 
 /* create new dapm mux control */
-static int dapm_new_mux(struct snd_soc_codec *codec,
+static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *w)
 {
 	struct snd_soc_dapm_path *path = NULL;
 	struct snd_kcontrol *kcontrol;
+	struct snd_card *card = dapm->codec->card->snd_card;
 	int ret = 0;
 
 	if (!w->num_kcontrols) {
-		printk(KERN_ERR "asoc: mux %s has no controls\n", w->name);
+		dev_err(dapm->dev, "asoc: mux %s has no controls\n", w->name);
 		return -EINVAL;
 	}
 
 	kcontrol = snd_soc_cnew(&w->kcontrols[0], w, w->name);
-	ret = snd_ctl_add(codec->card->snd_card, kcontrol);
+	ret = snd_ctl_add(card, kcontrol);
+
 	if (ret < 0)
 		goto err;
 
@@ -406,26 +449,27 @@
 	return ret;
 
 err:
-	printk(KERN_ERR "asoc: failed to add kcontrol %s\n", w->name);
+	dev_err(dapm->dev, "asoc: failed to add kcontrol %s\n", w->name);
 	return ret;
 }
 
 /* create new dapm volume control */
-static int dapm_new_pga(struct snd_soc_codec *codec,
+static int dapm_new_pga(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *w)
 {
 	if (w->num_kcontrols)
-		pr_err("asoc: PGA controls not supported: '%s'\n", w->name);
+		dev_err(w->dapm->dev,
+			"asoc: PGA controls not supported: '%s'\n", w->name);
 
 	return 0;
 }
 
 /* reset 'walked' bit for each dapm path */
-static inline void dapm_clear_walk(struct snd_soc_codec *codec)
+static inline void dapm_clear_walk(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_path *p;
 
-	list_for_each_entry(p, &codec->dapm_paths, list)
+	list_for_each_entry(p, &dapm->card->paths, list)
 		p->walked = 0;
 }
 
@@ -435,13 +479,14 @@
  */
 static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget)
 {
-	int level = snd_power_get_state(widget->codec->card->snd_card);
+	int level = snd_power_get_state(widget->dapm->codec->card->snd_card);
 
 	switch (level) {
 	case SNDRV_CTL_POWER_D3hot:
 	case SNDRV_CTL_POWER_D3cold:
 		if (widget->ignore_suspend)
-			pr_debug("%s ignoring suspend\n", widget->name);
+			dev_dbg(widget->dapm->dev, "%s ignoring suspend\n",
+				widget->name);
 		return widget->ignore_suspend;
 	default:
 		return 1;
@@ -572,7 +617,7 @@
 
 	/* call any power change event handlers */
 	if (w->event)
-		pr_debug("power %s event for %s flags %x\n",
+		dev_dbg(w->dapm->dev, "power %s event for %s flags %x\n",
 			 w->power ? "on" : "off",
 			 w->name, w->event_flags);
 
@@ -621,9 +666,9 @@
 	int in, out;
 
 	in = is_connected_input_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 	out = is_connected_output_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 	return out != 0 && in != 0;
 }
 
@@ -634,7 +679,7 @@
 
 	if (w->active) {
 		in = is_connected_input_ep(w);
-		dapm_clear_walk(w->codec);
+		dapm_clear_walk(w->dapm);
 		return in != 0;
 	} else {
 		return dapm_generic_check_power(w);
@@ -648,7 +693,7 @@
 
 	if (w->active) {
 		out = is_connected_output_ep(w);
-		dapm_clear_walk(w->codec);
+		dapm_clear_walk(w->dapm);
 		return out != 0;
 	} else {
 		return dapm_generic_check_power(w);
@@ -667,14 +712,22 @@
 		    !path->connected(path->source, path->sink))
 			continue;
 
-		if (path->sink && path->sink->power_check &&
+		if (!path->sink)
+			continue;
+
+		if (path->sink->force) {
+			power = 1;
+			break;
+		}
+
+		if (path->sink->power_check &&
 		    path->sink->power_check(path->sink)) {
 			power = 1;
 			break;
 		}
 	}
 
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 
 	return power;
 }
@@ -687,8 +740,8 @@
 		return sort[a->id] - sort[b->id];
 	if (a->reg != b->reg)
 		return a->reg - b->reg;
-	if (a->codec != b->codec)
-		return (unsigned long)a->codec - (unsigned long)b->codec;
+	if (a->dapm != b->dapm)
+		return (unsigned long)a->dapm - (unsigned long)b->dapm;
 
 	return 0;
 }
@@ -709,12 +762,57 @@
 	list_add_tail(&new_widget->power_list, list);
 }
 
+static void dapm_seq_check_event(struct snd_soc_dapm_context *dapm,
+				 struct snd_soc_dapm_widget *w, int event)
+{
+	struct snd_soc_card *card = dapm->card;
+	const char *ev_name;
+	int power, ret;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		ev_name = "PRE_PMU";
+		power = 1;
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		ev_name = "POST_PMU";
+		power = 1;
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		ev_name = "PRE_PMD";
+		power = 0;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ev_name = "POST_PMD";
+		power = 0;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	if (w->power != power)
+		return;
+
+	if (w->event && (w->event_flags & event)) {
+		pop_dbg(dapm->dev, card->pop_time, "pop test : %s %s\n",
+			w->name, ev_name);
+		trace_snd_soc_dapm_widget_event_start(w, event);
+		ret = w->event(w, NULL, event);
+		trace_snd_soc_dapm_widget_event_done(w, event);
+		if (ret < 0)
+			pr_err("%s: %s event failed: %d\n",
+			       ev_name, w->name, ret);
+	}
+}
+
 /* Apply the coalesced changes from a DAPM sequence */
-static void dapm_seq_run_coalesced(struct snd_soc_codec *codec,
+static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
 				   struct list_head *pending)
 {
+	struct snd_soc_card *card = dapm->card;
 	struct snd_soc_dapm_widget *w;
-	int reg, power, ret;
+	int reg, power;
 	unsigned int value = 0;
 	unsigned int mask = 0;
 	unsigned int cur_mask;
@@ -735,64 +833,26 @@
 		if (power)
 			value |= cur_mask;
 
-		pop_dbg(codec->pop_time,
+		pop_dbg(dapm->dev, card->pop_time,
 			"pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n",
 			w->name, reg, value, mask);
 
-		/* power up pre event */
-		if (w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_PRE_PMU)) {
-			pop_dbg(codec->pop_time, "pop test : %s PRE_PMU\n",
-				w->name);
-			ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU);
-			if (ret < 0)
-				pr_err("%s: pre event failed: %d\n",
-				       w->name, ret);
-		}
-
-		/* power down pre event */
-		if (!w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_PRE_PMD)) {
-			pop_dbg(codec->pop_time, "pop test : %s PRE_PMD\n",
-				w->name);
-			ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD);
-			if (ret < 0)
-				pr_err("%s: pre event failed: %d\n",
-				       w->name, ret);
-		}
+		/* Check for events */
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMU);
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMD);
 	}
 
 	if (reg >= 0) {
-		pop_dbg(codec->pop_time,
+		pop_dbg(dapm->dev, card->pop_time,
 			"pop test : Applying 0x%x/0x%x to %x in %dms\n",
-			value, mask, reg, codec->pop_time);
-		pop_wait(codec->pop_time);
-		snd_soc_update_bits(codec, reg, mask, value);
+			value, mask, reg, card->pop_time);
+		pop_wait(card->pop_time);
+		snd_soc_update_bits(dapm->codec, reg, mask, value);
 	}
 
 	list_for_each_entry(w, pending, power_list) {
-		/* power up post event */
-		if (w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_POST_PMU)) {
-			pop_dbg(codec->pop_time, "pop test : %s POST_PMU\n",
-				w->name);
-			ret = w->event(w,
-				       NULL, SND_SOC_DAPM_POST_PMU);
-			if (ret < 0)
-				pr_err("%s: post event failed: %d\n",
-				       w->name, ret);
-		}
-
-		/* power down post event */
-		if (!w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_POST_PMD)) {
-			pop_dbg(codec->pop_time, "pop test : %s POST_PMD\n",
-				w->name);
-			ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD);
-			if (ret < 0)
-				pr_err("%s: post event failed: %d\n",
-				       w->name, ret);
-		}
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMU);
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMD);
 	}
 }
 
@@ -804,26 +864,29 @@
  * Currently anything that requires more than a single write is not
  * handled.
  */
-static void dapm_seq_run(struct snd_soc_codec *codec, struct list_head *list,
-			 int event, int sort[])
+static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
+			 struct list_head *list, int event, int sort[])
 {
 	struct snd_soc_dapm_widget *w, *n;
 	LIST_HEAD(pending);
 	int cur_sort = -1;
 	int cur_reg = SND_SOC_NOPM;
+	struct snd_soc_dapm_context *cur_dapm = NULL;
 	int ret;
 
 	list_for_each_entry_safe(w, n, list, power_list) {
 		ret = 0;
 
 		/* Do we need to apply any queued changes? */
-		if (sort[w->id] != cur_sort || w->reg != cur_reg) {
+		if (sort[w->id] != cur_sort || w->reg != cur_reg ||
+		    w->dapm != cur_dapm) {
 			if (!list_empty(&pending))
-				dapm_seq_run_coalesced(codec, &pending);
+				dapm_seq_run_coalesced(cur_dapm, &pending);
 
 			INIT_LIST_HEAD(&pending);
 			cur_sort = -1;
 			cur_reg = SND_SOC_NOPM;
+			cur_dapm = NULL;
 		}
 
 		switch (w->id) {
@@ -867,19 +930,55 @@
 			/* Queue it up for application */
 			cur_sort = sort[w->id];
 			cur_reg = w->reg;
+			cur_dapm = w->dapm;
 			list_move(&w->power_list, &pending);
 			break;
 		}
 
 		if (ret < 0)
-			pr_err("Failed to apply widget power: %d\n",
-			       ret);
+			dev_err(w->dapm->dev,
+				"Failed to apply widget power: %d\n", ret);
 	}
 
 	if (!list_empty(&pending))
-		dapm_seq_run_coalesced(codec, &pending);
+		dapm_seq_run_coalesced(cur_dapm, &pending);
 }
 
+static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
+{
+	struct snd_soc_dapm_update *update = dapm->update;
+	struct snd_soc_dapm_widget *w;
+	int ret;
+
+	if (!update)
+		return;
+
+	w = update->widget;
+
+	if (w->event &&
+	    (w->event_flags & SND_SOC_DAPM_PRE_REG)) {
+		ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG);
+		if (ret != 0)
+			pr_err("%s DAPM pre-event failed: %d\n",
+			       w->name, ret);
+	}
+
+	ret = snd_soc_update_bits(w->codec, update->reg, update->mask,
+				  update->val);
+	if (ret < 0)
+		pr_err("%s DAPM update failed: %d\n", w->name, ret);
+
+	if (w->event &&
+	    (w->event_flags & SND_SOC_DAPM_POST_REG)) {
+		ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG);
+		if (ret != 0)
+			pr_err("%s DAPM post-event failed: %d\n",
+			       w->name, ret);
+	}
+}
+
+
+
 /*
  * Scan each dapm widget for complete audio path.
  * A complete path is a route that has valid endpoints i.e.:-
@@ -889,20 +988,26 @@
  *  o Input pin to Output pin (bypass, sidetone)
  *  o DAC to ADC (loopback).
  */
-static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
+static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
 {
-	struct snd_soc_card *card = codec->card;
+	struct snd_soc_card *card = dapm->codec->card;
 	struct snd_soc_dapm_widget *w;
+	struct snd_soc_dapm_context *d;
 	LIST_HEAD(up_list);
 	LIST_HEAD(down_list);
 	int ret = 0;
 	int power;
-	int sys_power = 0;
+
+	trace_snd_soc_dapm_start(card);
+
+	list_for_each_entry(d, &card->dapm_list, list)
+		if (d->n_widgets)
+			d->dev_power = 0;
 
 	/* Check which widgets we need to power and store them in
 	 * lists indicating if they should be powered up or down.
 	 */
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &card->widgets, list) {
 		switch (w->id) {
 		case snd_soc_dapm_pre:
 			dapm_seq_insert(w, &down_list, dapm_down_seq);
@@ -920,11 +1025,13 @@
 			else
 				power = 1;
 			if (power)
-				sys_power = 1;
+				w->dapm->dev_power = 1;
 
 			if (w->power == power)
 				continue;
 
+			trace_snd_soc_dapm_widget_power(w, power);
+
 			if (power)
 				dapm_seq_insert(w, &up_list, dapm_up_seq);
 			else
@@ -938,26 +1045,26 @@
 	/* If there are no DAPM widgets then try to figure out power from the
 	 * event type.
 	 */
-	if (list_empty(&codec->dapm_widgets)) {
+	if (!dapm->n_widgets) {
 		switch (event) {
 		case SND_SOC_DAPM_STREAM_START:
 		case SND_SOC_DAPM_STREAM_RESUME:
-			sys_power = 1;
+			dapm->dev_power = 1;
 			break;
 		case SND_SOC_DAPM_STREAM_STOP:
-			sys_power = !!codec->active;
+			dapm->dev_power = !!dapm->codec->active;
 			break;
 		case SND_SOC_DAPM_STREAM_SUSPEND:
-			sys_power = 0;
+			dapm->dev_power = 0;
 			break;
 		case SND_SOC_DAPM_STREAM_NOP:
-			switch (codec->bias_level) {
+			switch (dapm->bias_level) {
 				case SND_SOC_BIAS_STANDBY:
 				case SND_SOC_BIAS_OFF:
-					sys_power = 0;
+					dapm->dev_power = 0;
 					break;
 				default:
-					sys_power = 1;
+					dapm->dev_power = 1;
 					break;
 			}
 			break;
@@ -966,52 +1073,71 @@
 		}
 	}
 
-	if (sys_power && codec->bias_level == SND_SOC_BIAS_OFF) {
-		ret = snd_soc_dapm_set_bias_level(card, codec,
-						  SND_SOC_BIAS_STANDBY);
-		if (ret != 0)
-			pr_err("Failed to turn on bias: %d\n", ret);
-	}
+	list_for_each_entry(d, &dapm->card->dapm_list, list) {
+		if (d->dev_power && d->bias_level == SND_SOC_BIAS_OFF) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_STANDBY);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to turn on bias: %d\n", ret);
+		}
 
-	/* If we're changing to all on or all off then prepare */
-	if ((sys_power && codec->bias_level == SND_SOC_BIAS_STANDBY) ||
-	    (!sys_power && codec->bias_level == SND_SOC_BIAS_ON)) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_PREPARE);
-		if (ret != 0)
-			pr_err("Failed to prepare bias: %d\n", ret);
+		/* If we're changing to all on or all off then prepare */
+		if ((d->dev_power && d->bias_level == SND_SOC_BIAS_STANDBY) ||
+		    (!d->dev_power && d->bias_level == SND_SOC_BIAS_ON)) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_PREPARE);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to prepare bias: %d\n", ret);
+		}
 	}
 
 	/* Power down widgets first; try to avoid amplifying pops. */
-	dapm_seq_run(codec, &down_list, event, dapm_down_seq);
+	dapm_seq_run(dapm, &down_list, event, dapm_down_seq);
+
+	dapm_widget_update(dapm);
 
 	/* Now power up. */
-	dapm_seq_run(codec, &up_list, event, dapm_up_seq);
+	dapm_seq_run(dapm, &up_list, event, dapm_up_seq);
 
-	/* If we just powered the last thing off drop to standby bias */
-	if (codec->bias_level == SND_SOC_BIAS_PREPARE && !sys_power) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_STANDBY);
-		if (ret != 0)
-			pr_err("Failed to apply standby bias: %d\n", ret);
+	list_for_each_entry(d, &dapm->card->dapm_list, list) {
+		/* If we just powered the last thing off drop to standby bias */
+		if (d->bias_level == SND_SOC_BIAS_PREPARE && !d->dev_power) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_STANDBY);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to apply standby bias: %d\n",
+					ret);
+		}
+
+		/* If we're in standby and can support bias off then do that */
+		if (d->bias_level == SND_SOC_BIAS_STANDBY &&
+		    d->idle_bias_off) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_OFF);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to turn off bias: %d\n", ret);
+		}
+
+		/* If we just powered up then move to active bias */
+		if (d->bias_level == SND_SOC_BIAS_PREPARE && d->dev_power) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_ON);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to apply active bias: %d\n",
+					ret);
+		}
 	}
 
-	/* If we're in standby and can support bias off then do that */
-	if (codec->bias_level == SND_SOC_BIAS_STANDBY &&
-	    codec->idle_bias_off) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_OFF);
-		if (ret != 0)
-			pr_err("Failed to turn off bias: %d\n", ret);
-	}
+	pop_dbg(dapm->dev, card->pop_time,
+		"DAPM sequencing finished, waiting %dms\n", card->pop_time);
+	pop_wait(card->pop_time);
 
-	/* If we just powered up then move to active bias */
-	if (codec->bias_level == SND_SOC_BIAS_PREPARE && sys_power) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_ON);
-		if (ret != 0)
-			pr_err("Failed to apply active bias: %d\n", ret);
-	}
-
-	pop_dbg(codec->pop_time, "DAPM sequencing finished, waiting %dms\n",
-		codec->pop_time);
-	pop_wait(codec->pop_time);
+	trace_snd_soc_dapm_done(card);
 
 	return 0;
 }
@@ -1038,9 +1164,9 @@
 		return -ENOMEM;
 
 	in = is_connected_input_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 	out = is_connected_output_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 
 	ret = snprintf(buf, PAGE_SIZE, "%s: %s  in %d out %d",
 		       w->name, w->power ? "On" : "Off", in, out);
@@ -1090,29 +1216,29 @@
 	.llseek = default_llseek,
 };
 
-void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w;
 	struct dentry *d;
 
-	if (!codec->debugfs_dapm)
+	if (!dapm->debugfs_dapm)
 		return;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
-		if (!w->name)
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (!w->name || w->dapm != dapm)
 			continue;
 
 		d = debugfs_create_file(w->name, 0444,
-					codec->debugfs_dapm, w,
+					dapm->debugfs_dapm, w,
 					&dapm_widget_power_fops);
 		if (!d)
-			printk(KERN_WARNING
-			       "ASoC: Failed to create %s debugfs file\n",
-			       w->name);
+			dev_warn(w->dapm->dev,
+				"ASoC: Failed to create %s debugfs file\n",
+				w->name);
 	}
 }
 #else
-void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm)
 {
 }
 #endif
@@ -1126,6 +1252,7 @@
 	int found = 0;
 
 	if (widget->id != snd_soc_dapm_mux &&
+	    widget->id != snd_soc_dapm_virt_mux &&
 	    widget->id != snd_soc_dapm_value_mux)
 		return -ENODEV;
 
@@ -1133,7 +1260,7 @@
 		return 0;
 
 	/* find dapm widget path assoc with kcontrol */
-	list_for_each_entry(path, &widget->codec->dapm_paths, list) {
+	list_for_each_entry(path, &widget->dapm->card->paths, list) {
 		if (path->kcontrol != kcontrol)
 			continue;
 
@@ -1149,7 +1276,7 @@
 	}
 
 	if (found)
-		dapm_power_widgets(widget->codec, SND_SOC_DAPM_STREAM_NOP);
+		dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
 
 	return 0;
 }
@@ -1167,7 +1294,7 @@
 		return -ENODEV;
 
 	/* find dapm widget path assoc with kcontrol */
-	list_for_each_entry(path, &widget->codec->dapm_paths, list) {
+	list_for_each_entry(path, &widget->dapm->card->paths, list) {
 		if (path->kcontrol != kcontrol)
 			continue;
 
@@ -1178,7 +1305,7 @@
 	}
 
 	if (found)
-		dapm_power_widgets(widget->codec, SND_SOC_DAPM_STREAM_NOP);
+		dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
 
 	return 0;
 }
@@ -1194,7 +1321,9 @@
 	int count = 0;
 	char *state = "not set";
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &codec->card->widgets, list) {
+		if (w->dapm != &codec->dapm)
+			continue;
 
 		/* only display widgets that burnm power */
 		switch (w->id) {
@@ -1206,6 +1335,7 @@
 		case snd_soc_dapm_dac:
 		case snd_soc_dapm_adc:
 		case snd_soc_dapm_pga:
+		case snd_soc_dapm_out_drv:
 		case snd_soc_dapm_mixer:
 		case snd_soc_dapm_mixer_named_ctl:
 		case snd_soc_dapm_supply:
@@ -1218,7 +1348,7 @@
 		}
 	}
 
-	switch (codec->bias_level) {
+	switch (codec->dapm.bias_level) {
 	case SND_SOC_BIAS_ON:
 		state = "On";
 		break;
@@ -1250,31 +1380,50 @@
 }
 
 /* free all dapm widgets and resources */
-static void dapm_free_widgets(struct snd_soc_codec *codec)
+static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w, *next_w;
 	struct snd_soc_dapm_path *p, *next_p;
 
-	list_for_each_entry_safe(w, next_w, &codec->dapm_widgets, list) {
+	list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		list_del(&w->list);
+		/*
+		 * remove source and sink paths associated to this widget.
+		 * While removing the path, remove reference to it from both
+		 * source and sink widgets so that path is removed only once.
+		 */
+		list_for_each_entry_safe(p, next_p, &w->sources, list_sink) {
+			list_del(&p->list_sink);
+			list_del(&p->list_source);
+			list_del(&p->list);
+			kfree(p->long_name);
+			kfree(p);
+		}
+		list_for_each_entry_safe(p, next_p, &w->sinks, list_source) {
+			list_del(&p->list_sink);
+			list_del(&p->list_source);
+			list_del(&p->list);
+			kfree(p->long_name);
+			kfree(p);
+		}
+		kfree(w->name);
 		kfree(w);
 	}
-
-	list_for_each_entry_safe(p, next_p, &codec->dapm_paths, list) {
-		list_del(&p->list);
-		kfree(p->long_name);
-		kfree(p);
-	}
 }
 
-static int snd_soc_dapm_set_pin(struct snd_soc_codec *codec,
+static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
 				const char *pin, int status)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin)) {
-			pr_debug("dapm: %s: pin %s\n", codec->name, pin);
+			dev_dbg(w->dapm->dev, "dapm: pin %s = %d\n",
+				pin, status);
 			w->connected = status;
 			/* Allow disabling of forced pins */
 			if (status == 0)
@@ -1283,46 +1432,72 @@
 		}
 	}
 
-	pr_err("dapm: %s: configuring unknown pin %s\n", codec->name, pin);
+	dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
 	return -EINVAL;
 }
 
 /**
  * snd_soc_dapm_sync - scan and power dapm paths
- * @codec: audio codec
+ * @dapm: DAPM context
  *
  * Walks all dapm audio paths and powers widgets according to their
  * stream or path usage.
  *
  * Returns 0 for success.
  */
-int snd_soc_dapm_sync(struct snd_soc_codec *codec)
+int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm)
 {
-	return dapm_power_widgets(codec, SND_SOC_DAPM_STREAM_NOP);
+	return dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_sync);
 
-static int snd_soc_dapm_add_route(struct snd_soc_codec *codec,
+static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
 				  const struct snd_soc_dapm_route *route)
 {
 	struct snd_soc_dapm_path *path;
 	struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w;
-	const char *sink = route->sink;
+	struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL;
+	const char *sink;
 	const char *control = route->control;
-	const char *source = route->source;
+	const char *source;
+	char prefixed_sink[80];
+	char prefixed_source[80];
 	int ret = 0;
 
-	/* find src and dest widgets */
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	if (dapm->codec->name_prefix) {
+		snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s",
+			 dapm->codec->name_prefix, route->sink);
+		sink = prefixed_sink;
+		snprintf(prefixed_source, sizeof(prefixed_source), "%s %s",
+			 dapm->codec->name_prefix, route->source);
+		source = prefixed_source;
+	} else {
+		sink = route->sink;
+		source = route->source;
+	}
 
+	/*
+	 * find src and dest widgets over all widgets but favor a widget from
+	 * current DAPM context
+	 */
+	list_for_each_entry(w, &dapm->card->widgets, list) {
 		if (!wsink && !(strcmp(w->name, sink))) {
-			wsink = w;
+			wtsink = w;
+			if (w->dapm == dapm)
+				wsink = w;
 			continue;
 		}
 		if (!wsource && !(strcmp(w->name, source))) {
-			wsource = w;
+			wtsource = w;
+			if (w->dapm == dapm)
+				wsource = w;
 		}
 	}
+	/* use widget from another DAPM context if not found from this */
+	if (!wsink)
+		wsink = wtsink;
+	if (!wsource)
+		wsource = wtsource;
 
 	if (wsource == NULL || wsink == NULL)
 		return -ENODEV;
@@ -1356,7 +1531,7 @@
 
 	/* connect static paths */
 	if (control == NULL) {
-		list_add(&path->list, &codec->dapm_paths);
+		list_add(&path->list, &dapm->card->paths);
 		list_add(&path->list_sink, &wsink->sources);
 		list_add(&path->list_source, &wsource->sinks);
 		path->connect = 1;
@@ -1368,6 +1543,7 @@
 	case snd_soc_dapm_adc:
 	case snd_soc_dapm_dac:
 	case snd_soc_dapm_pga:
+	case snd_soc_dapm_out_drv:
 	case snd_soc_dapm_input:
 	case snd_soc_dapm_output:
 	case snd_soc_dapm_micbias:
@@ -1377,14 +1553,15 @@
 	case snd_soc_dapm_supply:
 	case snd_soc_dapm_aif_in:
 	case snd_soc_dapm_aif_out:
-		list_add(&path->list, &codec->dapm_paths);
+		list_add(&path->list, &dapm->card->paths);
 		list_add(&path->list_sink, &wsink->sources);
 		list_add(&path->list_source, &wsource->sinks);
 		path->connect = 1;
 		return 0;
 	case snd_soc_dapm_mux:
+	case snd_soc_dapm_virt_mux:
 	case snd_soc_dapm_value_mux:
-		ret = dapm_connect_mux(codec, wsource, wsink, path, control,
+		ret = dapm_connect_mux(dapm, wsource, wsink, path, control,
 			&wsink->kcontrols[0]);
 		if (ret != 0)
 			goto err;
@@ -1392,7 +1569,7 @@
 	case snd_soc_dapm_switch:
 	case snd_soc_dapm_mixer:
 	case snd_soc_dapm_mixer_named_ctl:
-		ret = dapm_connect_mixer(codec, wsource, wsink, path, control);
+		ret = dapm_connect_mixer(dapm, wsource, wsink, path, control);
 		if (ret != 0)
 			goto err;
 		break;
@@ -1400,7 +1577,7 @@
 	case snd_soc_dapm_mic:
 	case snd_soc_dapm_line:
 	case snd_soc_dapm_spk:
-		list_add(&path->list, &codec->dapm_paths);
+		list_add(&path->list, &dapm->card->paths);
 		list_add(&path->list_sink, &wsink->sources);
 		list_add(&path->list_source, &wsource->sinks);
 		path->connect = 0;
@@ -1409,15 +1586,15 @@
 	return 0;
 
 err:
-	printk(KERN_WARNING "asoc: no dapm match for %s --> %s --> %s\n", source,
-		control, sink);
+	dev_warn(dapm->dev, "asoc: no dapm match for %s --> %s --> %s\n",
+		 source, control, sink);
 	kfree(path);
 	return ret;
 }
 
 /**
  * snd_soc_dapm_add_routes - Add routes between DAPM widgets
- * @codec: codec
+ * @dapm: DAPM context
  * @route: audio routes
  * @num: number of routes
  *
@@ -1428,17 +1605,16 @@
  * Returns 0 for success else error. On error all resources can be freed
  * with a call to snd_soc_card_free().
  */
-int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
+int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
 			    const struct snd_soc_dapm_route *route, int num)
 {
 	int i, ret;
 
 	for (i = 0; i < num; i++) {
-		ret = snd_soc_dapm_add_route(codec, route);
+		ret = snd_soc_dapm_add_route(dapm, route);
 		if (ret < 0) {
-			printk(KERN_ERR "Failed to add route %s->%s\n",
-			       route->source,
-			       route->sink);
+			dev_err(dapm->dev, "Failed to add route %s->%s\n",
+				route->source, route->sink);
 			return ret;
 		}
 		route++;
@@ -1450,17 +1626,18 @@
 
 /**
  * snd_soc_dapm_new_widgets - add new dapm widgets
- * @codec: audio codec
+ * @dapm: DAPM context
  *
  * Checks the codec for any new dapm widgets and creates them if found.
  *
  * Returns 0 for success.
  */
-int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec)
+int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w;
+	unsigned int val;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list)
+	list_for_each_entry(w, &dapm->card->widgets, list)
 	{
 		if (w->new)
 			continue;
@@ -1470,12 +1647,13 @@
 		case snd_soc_dapm_mixer:
 		case snd_soc_dapm_mixer_named_ctl:
 			w->power_check = dapm_generic_check_power;
-			dapm_new_mixer(codec, w);
+			dapm_new_mixer(dapm, w);
 			break;
 		case snd_soc_dapm_mux:
+		case snd_soc_dapm_virt_mux:
 		case snd_soc_dapm_value_mux:
 			w->power_check = dapm_generic_check_power;
-			dapm_new_mux(codec, w);
+			dapm_new_mux(dapm, w);
 			break;
 		case snd_soc_dapm_adc:
 		case snd_soc_dapm_aif_out:
@@ -1486,8 +1664,9 @@
 			w->power_check = dapm_dac_check_power;
 			break;
 		case snd_soc_dapm_pga:
+		case snd_soc_dapm_out_drv:
 			w->power_check = dapm_generic_check_power;
-			dapm_new_pga(codec, w);
+			dapm_new_pga(dapm, w);
 			break;
 		case snd_soc_dapm_input:
 		case snd_soc_dapm_output:
@@ -1505,10 +1684,22 @@
 		case snd_soc_dapm_post:
 			break;
 		}
+
+		/* Read the initial power state from the device */
+		if (w->reg >= 0) {
+			val = snd_soc_read(w->codec, w->reg);
+			val &= 1 << w->shift;
+			if (w->invert)
+				val = !val;
+
+			if (val)
+				w->power = 1;
+		}
+
 		w->new = 1;
 	}
 
-	dapm_power_widgets(codec, SND_SOC_DAPM_STREAM_NOP);
+	dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets);
@@ -1569,32 +1760,25 @@
 		(struct soc_mixer_control *)kcontrol->private_value;
 	unsigned int reg = mc->reg;
 	unsigned int shift = mc->shift;
-	unsigned int rshift = mc->rshift;
 	int max = mc->max;
 	unsigned int mask = (1 << fls(max)) - 1;
 	unsigned int invert = mc->invert;
-	unsigned int val, val2, val_mask;
-	int connect;
-	int ret;
+	unsigned int val;
+	int connect, change;
+	struct snd_soc_dapm_update update;
 
 	val = (ucontrol->value.integer.value[0] & mask);
 
 	if (invert)
 		val = max - val;
-	val_mask = mask << shift;
+	mask = mask << shift;
 	val = val << shift;
-	if (shift != rshift) {
-		val2 = (ucontrol->value.integer.value[1] & mask);
-		if (invert)
-			val2 = max - val2;
-		val_mask |= mask << rshift;
-		val |= val2 << rshift;
-	}
 
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 
-	if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
+	change = snd_soc_test_bits(widget->codec, reg, mask, val);
+	if (change) {
 		if (val)
 			/* new connection */
 			connect = invert ? 0:1;
@@ -1602,28 +1786,20 @@
 			/* old connection must be powered down */
 			connect = invert ? 1:0;
 
+		update.kcontrol = kcontrol;
+		update.widget = widget;
+		update.reg = reg;
+		update.mask = mask;
+		update.val = val;
+		widget->dapm->update = &update;
+
 		dapm_mixer_update_power(widget, kcontrol, connect);
+
+		widget->dapm->update = NULL;
 	}
 
-	if (widget->event) {
-		if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
-			ret = widget->event(widget, kcontrol,
-						SND_SOC_DAPM_PRE_REG);
-			if (ret < 0) {
-				ret = 1;
-				goto out;
-			}
-		}
-		ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
-		if (widget->event_flags & SND_SOC_DAPM_POST_REG)
-			ret = widget->event(widget, kcontrol,
-						SND_SOC_DAPM_POST_REG);
-	} else
-		ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
-
-out:
 	mutex_unlock(&widget->codec->mutex);
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw);
 
@@ -1671,7 +1847,7 @@
 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned int val, mux, change;
 	unsigned int mask, bitmask;
-	int ret = 0;
+	struct snd_soc_dapm_update update;
 
 	for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
 		;
@@ -1690,24 +1866,20 @@
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 	change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = e->reg;
+	update.mask = mask;
+	update.val = val;
+	widget->dapm->update = &update;
+
 	dapm_mux_update_power(widget, kcontrol, change, mux, e);
 
-	if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_PRE_REG);
-		if (ret < 0)
-			goto out;
-	}
+	widget->dapm->update = NULL;
 
-	ret = snd_soc_update_bits(widget->codec, e->reg, mask, val);
-
-	if (widget->event_flags & SND_SOC_DAPM_POST_REG)
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_POST_REG);
-
-out:
 	mutex_unlock(&widget->codec->mutex);
-	return ret;
+	return change;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double);
 
@@ -1819,7 +1991,7 @@
 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned int val, mux, change;
 	unsigned int mask;
-	int ret = 0;
+	struct snd_soc_dapm_update update;
 
 	if (ucontrol->value.enumerated.item[0] > e->max - 1)
 		return -EINVAL;
@@ -1836,24 +2008,20 @@
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 	change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = e->reg;
+	update.mask = mask;
+	update.val = val;
+	widget->dapm->update = &update;
+
 	dapm_mux_update_power(widget, kcontrol, change, mux, e);
 
-	if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_PRE_REG);
-		if (ret < 0)
-			goto out;
-	}
+	widget->dapm->update = NULL;
 
-	ret = snd_soc_update_bits(widget->codec, e->reg, mask, val);
-
-	if (widget->event_flags & SND_SOC_DAPM_POST_REG)
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_POST_REG);
-
-out:
 	mutex_unlock(&widget->codec->mutex);
-	return ret;
+	return change;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_value_enum_double);
 
@@ -1892,7 +2060,7 @@
 	mutex_lock(&codec->mutex);
 
 	ucontrol->value.integer.value[0] =
-		snd_soc_dapm_get_pin_status(codec, pin);
+		snd_soc_dapm_get_pin_status(&codec->dapm, pin);
 
 	mutex_unlock(&codec->mutex);
 
@@ -1915,11 +2083,11 @@
 	mutex_lock(&codec->mutex);
 
 	if (ucontrol->value.integer.value[0])
-		snd_soc_dapm_enable_pin(codec, pin);
+		snd_soc_dapm_enable_pin(&codec->dapm, pin);
 	else
-		snd_soc_dapm_disable_pin(codec, pin);
+		snd_soc_dapm_disable_pin(&codec->dapm, pin);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(&codec->dapm);
 
 	mutex_unlock(&codec->mutex);
 
@@ -1929,26 +2097,43 @@
 
 /**
  * snd_soc_dapm_new_control - create new dapm control
- * @codec: audio codec
+ * @dapm: DAPM context
  * @widget: widget template
  *
  * Creates a new dapm control based upon the template.
  *
  * Returns 0 for success else error.
  */
-int snd_soc_dapm_new_control(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget)
 {
 	struct snd_soc_dapm_widget *w;
+	size_t name_len;
 
 	if ((w = dapm_cnew_widget(widget)) == NULL)
 		return -ENOMEM;
 
-	w->codec = codec;
+	name_len = strlen(widget->name) + 1;
+	if (dapm->codec->name_prefix)
+		name_len += 1 + strlen(dapm->codec->name_prefix);
+	w->name = kmalloc(name_len, GFP_KERNEL);
+	if (w->name == NULL) {
+		kfree(w);
+		return -ENOMEM;
+	}
+	if (dapm->codec->name_prefix)
+		snprintf(w->name, name_len, "%s %s",
+			dapm->codec->name_prefix, widget->name);
+	else
+		snprintf(w->name, name_len, "%s", widget->name);
+
+	dapm->n_widgets++;
+	w->dapm = dapm;
+	w->codec = dapm->codec;
 	INIT_LIST_HEAD(&w->sources);
 	INIT_LIST_HEAD(&w->sinks);
 	INIT_LIST_HEAD(&w->list);
-	list_add(&w->list, &codec->dapm_widgets);
+	list_add(&w->list, &dapm->card->widgets);
 
 	/* machine layer set ups unconnected pins and insertions */
 	w->connected = 1;
@@ -1958,7 +2143,7 @@
 
 /**
  * snd_soc_dapm_new_controls - create new dapm controls
- * @codec: audio codec
+ * @dapm: DAPM context
  * @widget: widget array
  * @num: number of widgets
  *
@@ -1966,18 +2151,18 @@
  *
  * Returns 0 for success else error.
  */
-int snd_soc_dapm_new_controls(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget,
 	int num)
 {
 	int i, ret;
 
 	for (i = 0; i < num; i++) {
-		ret = snd_soc_dapm_new_control(codec, widget);
+		ret = snd_soc_dapm_new_control(dapm, widget);
 		if (ret < 0) {
-			printk(KERN_ERR
-			       "ASoC: Failed to create DAPM control %s: %d\n",
-			       widget->name, ret);
+			dev_err(dapm->dev,
+				"ASoC: Failed to create DAPM control %s: %d\n",
+				widget->name, ret);
 			return ret;
 		}
 		widget++;
@@ -1986,34 +2171,17 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_new_controls);
 
-
-/**
- * snd_soc_dapm_stream_event - send a stream event to the dapm core
- * @codec: audio codec
- * @stream: stream name
- * @event: stream event
- *
- * Sends a stream event to the dapm core. The core then makes any
- * necessary widget power changes.
- *
- * Returns 0 for success else error.
- */
-int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd,
+static void soc_dapm_stream_event(struct snd_soc_dapm_context *dapm,
 	const char *stream, int event)
 {
-	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dapm_widget *w;
 
-	if (stream == NULL)
-		return 0;
-
-	mutex_lock(&codec->mutex);
-	list_for_each_entry(w, &codec->dapm_widgets, list)
+	list_for_each_entry(w, &dapm->card->widgets, list)
 	{
-		if (!w->sname)
+		if (!w->sname || w->dapm != dapm)
 			continue;
-		pr_debug("widget %s\n %s stream %s event %d\n",
-			 w->name, w->sname, stream, event);
+		dev_dbg(w->dapm->dev, "widget %s\n %s stream %s event %d\n",
+			w->name, w->sname, stream, event);
 		if (strstr(w->sname, stream)) {
 			switch(event) {
 			case SND_SOC_DAPM_STREAM_START:
@@ -2031,7 +2199,30 @@
 		}
 	}
 
-	dapm_power_widgets(codec, event);
+	dapm_power_widgets(dapm, event);
+}
+
+/**
+ * snd_soc_dapm_stream_event - send a stream event to the dapm core
+ * @rtd: PCM runtime data
+ * @stream: stream name
+ * @event: stream event
+ *
+ * Sends a stream event to the dapm core. The core then makes any
+ * necessary widget power changes.
+ *
+ * Returns 0 for success else error.
+ */
+int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd,
+	const char *stream, int event)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+
+	if (stream == NULL)
+		return 0;
+
+	mutex_lock(&codec->mutex);
+	soc_dapm_stream_event(&codec->dapm, stream, event);
 	mutex_unlock(&codec->mutex);
 	return 0;
 }
@@ -2039,7 +2230,7 @@
 
 /**
  * snd_soc_dapm_enable_pin - enable pin.
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Enables input/output pin and its parents or children widgets iff there is
@@ -2047,15 +2238,15 @@
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin)
 {
-	return snd_soc_dapm_set_pin(codec, pin, 1);
+	return snd_soc_dapm_set_pin(dapm, pin, 1);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin);
 
 /**
  * snd_soc_dapm_force_enable_pin - force a pin to be enabled
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Enables input/output pin regardless of any other state.  This is
@@ -2065,42 +2256,47 @@
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
+				  const char *pin)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin)) {
-			pr_debug("dapm: %s: pin %s\n", codec->name, pin);
+			dev_dbg(w->dapm->dev,
+				"dapm: force enable pin %s\n", pin);
 			w->connected = 1;
 			w->force = 1;
 			return 0;
 		}
 	}
 
-	pr_err("dapm: %s: configuring unknown pin %s\n", codec->name, pin);
+	dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
 	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin);
 
 /**
  * snd_soc_dapm_disable_pin - disable pin.
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Disables input/output pin and its parents or children widgets.
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_disable_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
+			     const char *pin)
 {
-	return snd_soc_dapm_set_pin(codec, pin, 0);
+	return snd_soc_dapm_set_pin(dapm, pin, 0);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin);
 
 /**
  * snd_soc_dapm_nc_pin - permanently disable pin.
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Marks the specified pin as being not connected, disabling it along
@@ -2112,26 +2308,29 @@
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_nc_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin)
 {
-	return snd_soc_dapm_set_pin(codec, pin, 0);
+	return snd_soc_dapm_set_pin(dapm, pin, 0);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin);
 
 /**
  * snd_soc_dapm_get_pin_status - get audio pin status
- * @codec: audio codec
+ * @dapm: DAPM context
  * @pin: audio signal pin endpoint (or start point)
  *
  * Get audio pin status - connected or disconnected.
  *
  * Returns 1 for connected otherwise 0.
  */
-int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
+				const char *pin)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin))
 			return w->connected;
 	}
@@ -2142,7 +2341,7 @@
 
 /**
  * snd_soc_dapm_ignore_suspend - ignore suspend status for DAPM endpoint
- * @codec: audio codec
+ * @dapm: DAPM context
  * @pin: audio signal pin endpoint (or start point)
  *
  * Mark the given endpoint or pin as ignoring suspend.  When the
@@ -2151,18 +2350,21 @@
  * normal means at suspend time, it will not be turned on if it was not
  * already enabled.
  */
-int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
+				const char *pin)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin)) {
 			w->ignore_suspend = 1;
 			return 0;
 		}
 	}
 
-	pr_err("Unknown DAPM pin: %s\n", pin);
+	dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
 	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
@@ -2173,20 +2375,23 @@
  *
  * Free all dapm widgets and resources.
  */
-void snd_soc_dapm_free(struct snd_soc_codec *codec)
+void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm)
 {
-	snd_soc_dapm_sys_remove(codec->dev);
-	dapm_free_widgets(codec);
+	snd_soc_dapm_sys_remove(dapm->dev);
+	dapm_free_widgets(dapm);
+	list_del(&dapm->list);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_free);
 
-static void soc_dapm_shutdown_codec(struct snd_soc_codec *codec)
+static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w;
 	LIST_HEAD(down_list);
 	int powerdown = 0;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (w->power) {
 			dapm_seq_insert(w, &down_list, dapm_down_seq);
 			w->power = 0;
@@ -2198,9 +2403,9 @@
 	 * standby.
 	 */
 	if (powerdown) {
-		snd_soc_dapm_set_bias_level(NULL, codec, SND_SOC_BIAS_PREPARE);
-		dapm_seq_run(codec, &down_list, 0, dapm_down_seq);
-		snd_soc_dapm_set_bias_level(NULL, codec, SND_SOC_BIAS_STANDBY);
+		snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_PREPARE);
+		dapm_seq_run(dapm, &down_list, 0, dapm_down_seq);
+		snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_STANDBY);
 	}
 }
 
@@ -2211,10 +2416,10 @@
 {
 	struct snd_soc_codec *codec;
 
-	list_for_each_entry(codec, &card->codec_dev_list, list)
-		soc_dapm_shutdown_codec(codec);
-
-	snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_OFF);
+	list_for_each_entry(codec, &card->codec_dev_list, list) {
+		soc_dapm_shutdown_codec(&codec->dapm);
+		snd_soc_dapm_set_bias_level(card, &codec->dapm, SND_SOC_BIAS_OFF);
+	}
 }
 
 /* Module information */
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 8a0a920..ac5a5bc 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -13,11 +13,11 @@
 
 #include <sound/jack.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
+#include <trace/events/asoc.h>
 
 /**
  * snd_soc_jack_new - Create a new jack
@@ -60,14 +60,18 @@
 void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
 {
 	struct snd_soc_codec *codec;
+	struct snd_soc_dapm_context *dapm;
 	struct snd_soc_jack_pin *pin;
 	int enable;
 	int oldstatus;
 
+	trace_snd_soc_jack_report(jack, mask, status);
+
 	if (!jack)
 		return;
 
 	codec = jack->codec;
+	dapm =  &codec->dapm;
 
 	mutex_lock(&codec->mutex);
 
@@ -81,6 +85,8 @@
 	if (mask && (jack->status == oldstatus))
 		goto out;
 
+	trace_snd_soc_jack_notify(jack, status);
+
 	list_for_each_entry(pin, &jack->pins, list) {
 		enable = pin->mask & jack->status;
 
@@ -88,15 +94,15 @@
 			enable = !enable;
 
 		if (enable)
-			snd_soc_dapm_enable_pin(codec, pin->pin);
+			snd_soc_dapm_enable_pin(dapm, pin->pin);
 		else
-			snd_soc_dapm_disable_pin(codec, pin->pin);
+			snd_soc_dapm_disable_pin(dapm, pin->pin);
 	}
 
 	/* Report before the DAPM sync to help users updating micbias status */
 	blocking_notifier_call_chain(&jack->notifier, status, NULL);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	snd_jack_report(jack->jack, status);
 
@@ -207,6 +213,12 @@
 static irqreturn_t gpio_handler(int irq, void *data)
 {
 	struct snd_soc_jack_gpio *gpio = data;
+	struct device *dev = gpio->jack->codec->card->dev;
+
+	trace_snd_soc_jack_irq(gpio->name);
+
+	if (device_may_wakeup(dev))
+		pm_wakeup_event(dev, gpio->debounce_time + 50);
 
 	schedule_delayed_work(&gpio->work,
 			      msecs_to_jiffies(gpio->debounce_time));
@@ -263,11 +275,12 @@
 		INIT_DELAYED_WORK(&gpios[i].work, gpio_work);
 		gpios[i].jack = jack;
 
-		ret = request_irq(gpio_to_irq(gpios[i].gpio),
-				gpio_handler,
-				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				jack->codec->dev->driver->name,
-				&gpios[i]);
+		ret = request_any_context_irq(gpio_to_irq(gpios[i].gpio),
+					      gpio_handler,
+					      IRQF_TRIGGER_RISING |
+					      IRQF_TRIGGER_FALLING,
+					      jack->codec->dev->driver->name,
+					      &gpios[i]);
 		if (ret)
 			goto err;
 
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 68b9747..66eabaf 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -785,7 +785,7 @@
 	}
 
 	dev->pcm->private_data = dev;
-	strcpy(dev->pcm->name, dev->product_name);
+	strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name));
 
 	memset(dev->sub_playback, 0, sizeof(dev->sub_playback));
 	memset(dev->sub_capture, 0, sizeof(dev->sub_capture));
diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c
index 2f218c7..a1a47088 100644
--- a/sound/usb/caiaq/midi.c
+++ b/sound/usb/caiaq/midi.c
@@ -136,7 +136,7 @@
 	if (ret < 0)
 		return ret;
 
-	strcpy(rmidi->name, device->product_name);
+	strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name));
 
 	rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX;
 	rmidi->private_data = device;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 800f7cb..c0f8270b 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -323,6 +323,7 @@
 		return -ENOMEM;
 	}
 
+	mutex_init(&chip->shutdown_mutex);
 	chip->index = idx;
 	chip->dev = dev;
 	chip->card = card;
@@ -531,6 +532,7 @@
 	chip = ptr;
 	card = chip->card;
 	mutex_lock(&register_mutex);
+	mutex_lock(&chip->shutdown_mutex);
 	chip->shutdown = 1;
 	chip->num_interfaces--;
 	if (chip->num_interfaces <= 0) {
@@ -548,9 +550,11 @@
 			snd_usb_mixer_disconnect(p);
 		}
 		usb_chip[chip->index] = NULL;
+		mutex_unlock(&chip->shutdown_mutex);
 		mutex_unlock(&register_mutex);
 		snd_card_free_when_closed(card);
 	} else {
+		mutex_unlock(&chip->shutdown_mutex);
 		mutex_unlock(&register_mutex);
 	}
 }
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 6914821..5b792d2 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -76,7 +76,10 @@
 		format = 1 << UAC_FORMAT_TYPE_I_PCM;
 	}
 	if (format & (1 << UAC_FORMAT_TYPE_I_PCM)) {
-		if (sample_width > sample_bytes * 8) {
+		if (chip->usb_id == USB_ID(0x0582, 0x0016) /* Edirol SD-90 */ &&
+		    sample_width == 24 && sample_bytes == 2)
+			sample_bytes = 3;
+		else if (sample_width > sample_bytes * 8) {
 			snd_printk(KERN_INFO "%d:%u:%d : sample bitwidth %d in over sample bytes %d\n",
 				   chip->dev->devnum, fp->iface, fp->altsetting,
 				   sample_width, sample_bytes);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 25bce7e..db2dc5f 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -850,8 +850,8 @@
 		return;
 	}
 
-	memset(urb->transfer_buffer + count, 0xFD, 9 - count);
-	urb->transfer_buffer_length = count;
+	memset(urb->transfer_buffer + count, 0xFD, ep->max_transfer - count);
+	urb->transfer_buffer_length = ep->max_transfer;
 }
 
 static struct usb_protocol_ops snd_usbmidi_122l_ops = {
@@ -1295,6 +1295,13 @@
 	case USB_ID(0x1a86, 0x752d): /* QinHeng CH345 "USB2.0-MIDI" */
 		ep->max_transfer = 4;
 		break;
+		/*
+		 * Some devices only work with 9 bytes packet size:
+		 */
+	case USB_ID(0x0644, 0x800E): /* Tascam US-122L */
+	case USB_ID(0x0644, 0x800F): /* Tascam US-144 */
+		ep->max_transfer = 9;
+		break;
 	}
 	for (i = 0; i < OUTPUT_URBS; ++i) {
 		buffer = usb_alloc_coherent(umidi->dev,
@@ -1729,13 +1736,7 @@
 {
 	static const char *const names[] = { "High Load", "Light Load" };
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item > 1)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int roland_load_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index f2d74d6..85af605 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -95,7 +95,7 @@
 };
 
 
-/*E-mu 0202(0404) eXtension Unit(XU) control*/
+/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/
 enum {
 	USB_XU_CLOCK_RATE 		= 0xe301,
 	USB_XU_CLOCK_SOURCE		= 0xe302,
@@ -1566,7 +1566,7 @@
 			cval->initialized = 1;
 		} else {
 			if (type == USB_XU_CLOCK_RATE) {
-				/* E-Mu USB 0404/0202/TrackerPre
+				/* E-Mu USB 0404/0202/TrackerPre/0204
 				 * samplerate control quirk
 				 */
 				cval->min = 0;
@@ -1633,18 +1633,11 @@
 static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
 {
 	struct usb_mixer_elem_info *cval = kcontrol->private_data;
-	char **itemlist = (char **)kcontrol->private_value;
+	const char **itemlist = (const char **)kcontrol->private_value;
 
 	if (snd_BUG_ON(!itemlist))
 		return -EINVAL;
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = 1;
-	uinfo->value.enumerated.items = cval->max;
-	if (uinfo->value.enumerated.item >= cval->max)
-		uinfo->value.enumerated.item = cval->max - 1;
-	strlcpy(uinfo->value.enumerated.name, itemlist[uinfo->value.enumerated.item],
-		sizeof(uinfo->value.enumerated.name));
-	return 0;
+	return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist);
 }
 
 /* get callback for selector unit */
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 4132522..e3f6805 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -361,6 +361,7 @@
 	}
 
 	if (changed) {
+		mutex_lock(&subs->stream->chip->shutdown_mutex);
 		/* format changed */
 		snd_usb_release_substream_urbs(subs, 0);
 		/* influenced: period_bytes, channels, rate, format, */
@@ -368,6 +369,7 @@
 						  params_rate(hw_params),
 						  snd_pcm_format_physical_width(params_format(hw_params)) *
 							params_channels(hw_params));
+		mutex_unlock(&subs->stream->chip->shutdown_mutex);
 	}
 
 	return ret;
@@ -385,8 +387,9 @@
 	subs->cur_audiofmt = NULL;
 	subs->cur_rate = 0;
 	subs->period_bytes = 0;
-	if (!subs->stream->chip->shutdown)
-		snd_usb_release_substream_urbs(subs, 0);
+	mutex_lock(&subs->stream->chip->shutdown_mutex);
+	snd_usb_release_substream_urbs(subs, 0);
+	mutex_unlock(&subs->stream->chip->shutdown_mutex);
 	return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
 
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index ad7079d..921a86f 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -79,6 +79,13 @@
 	.idProduct = 0x3f0a,
 	.bInterfaceClass = USB_CLASS_AUDIO,
 },
+{
+	/* E-Mu 0204 USB */
+	.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+	.idVendor = 0x041e,
+	.idProduct = 0x3f19,
+	.bInterfaceClass = USB_CLASS_AUDIO,
+},
 
 /*
  * Logitech QuickCam: bDeviceClass is vendor-specific, so generic interface
@@ -705,11 +712,11 @@
 		.data = (const struct snd_usb_audio_quirk[]) {
 			{
 				.ifnum = 0,
-				.type = QUIRK_IGNORE_INTERFACE
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
 			},
 			{
 				.ifnum = 1,
-				.type = QUIRK_IGNORE_INTERFACE
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
 			},
 			{
 				.ifnum = 2,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index cf8bf08..e314cdb 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -532,7 +532,7 @@
 }
 
 /*
- * For E-Mu 0404USB/0202USB/TrackerPre sample rate should be set for device,
+ * For E-Mu 0404USB/0202USB/TrackerPre/0204 sample rate should be set for device,
  * not for interface.
  */
 
@@ -589,6 +589,7 @@
 	case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
 	case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
 	case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
+	case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
 		set_format_emu_quirk(subs, fmt);
 		break;
 	}
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index db3eb21..6e66fff 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -36,6 +36,7 @@
 	struct snd_card *card;
 	u32 usb_id;
 	int shutdown;
+	struct mutex shutdown_mutex;
 	unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
 	int num_interfaces;
 	int num_suspended_intf;
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 6ef68e4..084e6fc 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -273,29 +273,26 @@
 					  struct file *file, poll_table *wait)
 {
 	struct us122l	*us122l = hw->private_data;
-	struct usb_stream *s = us122l->sk.s;
 	unsigned	*polled;
 	unsigned int	mask;
 
 	poll_wait(file, &us122l->sk.sleep, wait);
 
-	switch (s->state) {
-	case usb_stream_ready:
-		if (us122l->first == file)
-			polled = &s->periods_polled;
-		else
-			polled = &us122l->second_periods_polled;
-		if (*polled != s->periods_done) {
-			*polled = s->periods_done;
-			mask = POLLIN | POLLOUT | POLLWRNORM;
-			break;
+	mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
+	if (mutex_trylock(&us122l->mutex)) {
+		struct usb_stream *s = us122l->sk.s;
+		if (s && s->state == usb_stream_ready) {
+			if (us122l->first == file)
+				polled = &s->periods_polled;
+			else
+				polled = &us122l->second_periods_polled;
+			if (*polled != s->periods_done) {
+				*polled = s->periods_done;
+				mask = POLLIN | POLLOUT | POLLWRNORM;
+			} else
+				mask = 0;
 		}
-		/* Fall through */
-		mask = 0;
-		break;
-	default:
-		mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
-		break;
+		mutex_unlock(&us122l->mutex);
 	}
 	return mask;
 }
@@ -381,6 +378,7 @@
 {
 	struct usb_stream_config *cfg;
 	struct us122l *us122l = hw->private_data;
+	struct usb_stream *s;
 	unsigned min_period_frames;
 	int err = 0;
 	bool high_speed;
@@ -426,18 +424,18 @@
 	snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
 
 	mutex_lock(&us122l->mutex);
+	s = us122l->sk.s;
 	if (!us122l->master)
 		us122l->master = file;
 	else if (us122l->master != file) {
-		if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) {
+		if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
 			err = -EIO;
 			goto unlock;
 		}
 		us122l->slave = file;
 	}
-	if (!us122l->sk.s ||
-	    memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) ||
-	    us122l->sk.s->state == usb_stream_xrun) {
+	if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
+	    s->state == usb_stream_xrun) {
 		us122l_stop(us122l);
 		if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
 			err = -EIO;
@@ -448,6 +446,7 @@
 	mutex_unlock(&us122l->mutex);
 free:
 	kfree(cfg);
+	wake_up_all(&us122l->sk.sleep);
 	return err;
 }
 
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 52462ae..e032716 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -61,6 +61,9 @@
 -r::
 --realtime=::
 	Collect data with this RT SCHED_FIFO priority.
+-D::
+--no-delay::
+	Collect data without buffering.
 -A::
 --append::
 	Append to the output file to do incremental profiling.
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1b9b13e..7141c42 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -204,13 +204,11 @@
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
-EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
-EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
@@ -227,7 +225,7 @@
   CFLAGS_OPTIMIZE = -O6
 endif
 
-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
 EXTLIBS = -lpthread -lrt -lelf -lm
 ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 ALL_LDFLAGS = $(LDFLAGS)
@@ -294,6 +292,13 @@
 	CFLAGS := $(CFLAGS) -fstack-protector-all
 endif
 
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wstack-protector),y)
+       CFLAGS := $(CFLAGS) -Wstack-protector
+endif
+
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wvolatile-register-var),y)
+       CFLAGS := $(CFLAGS) -Wvolatile-register-var
+endif
 
 ### --- END CONFIGURATION SECTION ---
 
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index c056cdc..8879463 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -212,7 +212,7 @@
 			continue;
 
 		offset = start + i;
-		sprintf(cmd, "addr2line -e %s %016llx", filename, offset);
+		sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
 		fp = popen(cmd, "r");
 		if (!fp)
 			continue;
@@ -270,9 +270,9 @@
 
 	for (offset = 0; offset < len; ++offset)
 		if (h->ip[offset] != 0)
-			printf("%*Lx: %Lu\n", BITS_PER_LONG / 2,
+			printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
 			       sym->start + offset, h->ip[offset]);
-	printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+	printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
 }
 
 static int hist_entry__tty_annotate(struct hist_entry *he)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index def7ddc..d97256d 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -371,10 +371,10 @@
 			addr = data->ptr;
 
 		if (sym != NULL)
-			snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
+			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
 				 addr - map->unmap_ip(map, sym->start));
 		else
-			snprintf(buf, sizeof(buf), "%#Lx", addr);
+			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
 		printf(" %-34s |", buf);
 
 		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index b9c6e54..2b36def 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -782,9 +782,9 @@
 		pr_info("%10u ", st->nr_acquired);
 		pr_info("%10u ", st->nr_contended);
 
-		pr_info("%15llu ", st->wait_time_total);
-		pr_info("%15llu ", st->wait_time_max);
-		pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ?
+		pr_info("%15" PRIu64 " ", st->wait_time_total);
+		pr_info("%15" PRIu64 " ", st->wait_time_max);
+		pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
 		       0 : st->wait_time_min);
 		pr_info("\n");
 	}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 7bc0490..60cac6f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -49,6 +49,7 @@
 static const char		*output_name			= "perf.data";
 static int			group				=      0;
 static int			realtime_prio			=      0;
+static bool			nodelay				=  false;
 static bool			raw_samples			=  false;
 static bool			sample_id_all_avail		=   true;
 static bool			system_wide			=  false;
@@ -307,6 +308,11 @@
 		attr->sample_type	|= PERF_SAMPLE_CPU;
 	}
 
+	if (nodelay) {
+		attr->watermark = 0;
+		attr->wakeup_events = 1;
+	}
+
 	attr->mmap		= track;
 	attr->comm		= track;
 	attr->inherit		= !no_inherit;
@@ -477,6 +483,7 @@
 			process_buildids();
 		perf_header__write(&session->header, output, true);
 		perf_session__delete(session);
+		perf_evsel_list__delete();
 		symbol__exit();
 	}
 }
@@ -752,8 +759,8 @@
 		perf_session__process_machines(session, event__synthesize_guest_os);
 
 	if (!system_wide)
-		event__synthesize_thread(target_tid, process_synthesized_event,
-					 session);
+		event__synthesize_thread_map(threads, process_synthesized_event,
+					     session);
 	else
 		event__synthesize_threads(process_synthesized_event, session);
 
@@ -810,7 +817,7 @@
 	 * Approximate RIP event size: 24 bytes.
 	 */
 	fprintf(stderr,
-		"[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
+		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
 		(double)bytes_written / 1024.0 / 1024.0,
 		output_name,
 		bytes_written / 24);
@@ -842,6 +849,8 @@
 		    "record events on existing thread id"),
 	OPT_INTEGER('r', "realtime", &realtime_prio,
 		    "collect data with this RT SCHED_FIFO priority"),
+	OPT_BOOLEAN('D', "no-delay", &nodelay,
+		    "collect data without buffering"),
 	OPT_BOOLEAN('R', "raw-samples", &raw_samples,
 		    "collect raw sample records from all opened counters"),
 	OPT_BOOLEAN('a', "all-cpus", &system_wide,
@@ -927,6 +936,8 @@
 	list_for_each_entry(pos, &evsel_list, node) {
 		if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
 			goto out_free_fd;
+		if (perf_header__push_event(pos->attr.config, event_name(pos)))
+			goto out_free_fd;
 	}
 	event_array = malloc((sizeof(struct pollfd) * MAX_NR_CPUS *
 			      MAX_COUNTERS * threads->nr));
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 75183a4..c27e31f 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -197,7 +197,7 @@
 					   event->read.value);
 	}
 
-	dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid,
+	dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
 		    attr ? __event_name(attr->type, attr->config) : "FAIL",
 		    event->read.value);
 
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 7a4ebeb..29acb894 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -193,7 +193,7 @@
 	}
 	run_measurement_overhead = min_delta;
 
-	printf("run measurement overhead: %Ld nsecs\n", min_delta);
+	printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
 }
 
 static void calibrate_sleep_measurement_overhead(void)
@@ -211,7 +211,7 @@
 	min_delta -= 10000;
 	sleep_measurement_overhead = min_delta;
 
-	printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
+	printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
 }
 
 static struct sched_atom *
@@ -489,7 +489,8 @@
 
 	err = pthread_attr_init(&attr);
 	BUG_ON(err);
-	err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
+	err = pthread_attr_setstacksize(&attr,
+			(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
 	BUG_ON(err);
 	err = pthread_mutex_lock(&start_work_mutex);
 	BUG_ON(err);
@@ -616,13 +617,13 @@
 	burn_nsecs(1e6);
 	T1 = get_nsecs();
 
-	printf("the run test took %Ld nsecs\n", T1-T0);
+	printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
 
 	T0 = get_nsecs();
 	sleep_nsecs(1e6);
 	T1 = get_nsecs();
 
-	printf("the sleep test took %Ld nsecs\n", T1-T0);
+	printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
 }
 
 #define FILL_FIELD(ptr, field, event, data)	\
@@ -815,10 +816,10 @@
 		delta = 0;
 
 	if (delta < 0)
-		die("hm, delta: %Ld < 0 ?\n", delta);
+		die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 	if (verbose) {
-		printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
+		printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
 			switch_event->prev_comm, switch_event->prev_pid,
 			switch_event->next_comm, switch_event->next_pid,
 			delta);
@@ -1047,7 +1048,7 @@
 		delta = 0;
 
 	if (delta < 0)
-		die("hm, delta: %Ld < 0 ?\n", delta);
+		die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 
 	sched_out = perf_session__findnew(session, switch_event->prev_pid);
@@ -1220,7 +1221,7 @@
 
 	avg = work_list->total_lat / work_list->nb_atoms;
 
-	printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
+	printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
 	      (double)work_list->total_runtime / 1e6,
 		 work_list->nb_atoms, (double)avg / 1e6,
 		 (double)work_list->max_lat / 1e6,
@@ -1422,7 +1423,7 @@
 		delta = 0;
 
 	if (delta < 0)
-		die("hm, delta: %Ld < 0 ?\n", delta);
+		die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 
 	sched_out = perf_session__findnew(session, switch_event->prev_pid);
@@ -1712,7 +1713,7 @@
 	}
 
 	printf(" -----------------------------------------------------------------------------------------\n");
-	printf("  TOTAL:                |%11.3f ms |%9Ld |\n",
+	printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
 		(double)all_runtime/1e6, all_count);
 
 	printf(" ---------------------------------------------------\n");
@@ -1842,15 +1843,15 @@
 	"-f",
 	"-m", "1024",
 	"-c", "1",
-	"-e", "sched:sched_switch:r",
-	"-e", "sched:sched_stat_wait:r",
-	"-e", "sched:sched_stat_sleep:r",
-	"-e", "sched:sched_stat_iowait:r",
-	"-e", "sched:sched_stat_runtime:r",
-	"-e", "sched:sched_process_exit:r",
-	"-e", "sched:sched_process_fork:r",
-	"-e", "sched:sched_wakeup:r",
-	"-e", "sched:sched_migrate_task:r",
+	"-e", "sched:sched_switch",
+	"-e", "sched:sched_stat_wait",
+	"-e", "sched:sched_stat_sleep",
+	"-e", "sched:sched_stat_iowait",
+	"-e", "sched:sched_stat_runtime",
+	"-e", "sched:sched_process_exit",
+	"-e", "sched:sched_process_fork",
+	"-e", "sched:sched_wakeup",
+	"-e", "sched:sched_migrate_task",
 };
 
 static int __cmd_record(int argc, const char **argv)
@@ -1861,7 +1862,7 @@
 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
-	if (rec_argv)
+	if (rec_argv == NULL)
 		return -ENOMEM;
 
 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 150a606..b766c2a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -77,8 +77,8 @@
 	if (session->sample_type & PERF_SAMPLE_RAW) {
 		if (debug_mode) {
 			if (sample->time < last_timestamp) {
-				pr_err("Samples misordered, previous: %llu "
-					"this: %llu\n", last_timestamp,
+				pr_err("Samples misordered, previous: %" PRIu64
+					" this: %" PRIu64 "\n", last_timestamp,
 					sample->time);
 				nr_unordered++;
 			}
@@ -126,7 +126,7 @@
 	ret = perf_session__process_events(session, &event_ops);
 
 	if (debug_mode)
-		pr_err("Misordered timestamps: %llu\n", nr_unordered);
+		pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
 
 	return ret;
 }
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 02b2d80..a482a19 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -206,8 +206,8 @@
 		update_stats(&ps->res_stats[i], count[i]);
 
 	if (verbose) {
-		fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
-				count[0], count[1], count[2]);
+		fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+			event_name(counter), count[0], count[1], count[2]);
 	}
 
 	/*
@@ -316,6 +316,8 @@
 				      "\t Consider tweaking"
 				      " /proc/sys/kernel/perf_event_paranoid or running as root.",
 				      system_wide ? "system-wide " : "");
+			} else if (errno == ENOENT) {
+				error("%s event is not supported. ", event_name(counter));
 			} else {
 				error("open_counter returned with %d (%s). "
 				      "/bin/dmesg may provide additional information.\n",
@@ -683,8 +685,7 @@
 		nr_counters = ARRAY_SIZE(default_attrs);
 
 		for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
-			pos = perf_evsel__new(default_attrs[c].type,
-					      default_attrs[c].config,
+			pos = perf_evsel__new(&default_attrs[c],
 					      nr_counters);
 			if (pos == NULL)
 				goto out;
@@ -742,6 +743,7 @@
 out_free_fd:
 	list_for_each_entry(pos, &evsel_list, node)
 		perf_evsel__free_stat_priv(pos);
+	perf_evsel_list__delete();
 out:
 	thread_map__delete(threads);
 	threads = NULL;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1c98434..5dcdba6 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -146,7 +146,7 @@
 				if (llabs(skew) < page_size)
 					continue;
 
-				pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n",
+				pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
 					 sym->start, sym->name, sym->end, pair->end);
 			} else {
 				struct rb_node *nnd;
@@ -168,11 +168,11 @@
 					goto detour;
 				}
 
-				pr_debug("%#Lx: diff name v: %s k: %s\n",
+				pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
 					 sym->start, sym->name, pair->name);
 			}
 		} else
-			pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name);
+			pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
 
 		err = -1;
 	}
@@ -211,10 +211,10 @@
 
 		if (pair->start == pos->start) {
 			pair->priv = 1;
-			pr_info(" %Lx-%Lx %Lx %s in kallsyms as",
+			pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
 				pos->start, pos->end, pos->pgoff, pos->dso->name);
 			if (pos->pgoff != pair->pgoff || pos->end != pair->end)
-				pr_info(": \n*%Lx-%Lx %Lx",
+				pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
 					pair->start, pair->end, pair->pgoff);
 			pr_info(" %s\n", pair->dso->name);
 			pair->priv = 1;
@@ -234,6 +234,7 @@
 	return err;
 }
 
+#include "util/cpumap.h"
 #include "util/evsel.h"
 #include <sys/types.h>
 
@@ -264,6 +265,7 @@
 	int err = -1, fd;
 	struct thread_map *threads;
 	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
 	unsigned int nr_open_calls = 111, i;
 	int id = trace_event__id("sys_enter_open");
 
@@ -278,7 +280,10 @@
 		return -1;
 	}
 
-	evsel = perf_evsel__new(PERF_TYPE_TRACEPOINT, id, 0);
+	memset(&attr, 0, sizeof(attr));
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.config = id;
+	evsel = perf_evsel__new(&attr, 0);
 	if (evsel == NULL) {
 		pr_debug("perf_evsel__new\n");
 		goto out_thread_map_delete;
@@ -302,7 +307,7 @@
 	}
 
 	if (evsel->counts->cpu[0].val != nr_open_calls) {
-		pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld\n",
+		pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
 			 nr_open_calls, evsel->counts->cpu[0].val);
 		goto out_close_fd;
 	}
@@ -317,6 +322,121 @@
 	return err;
 }
 
+#include <sched.h>
+
+static int test__open_syscall_event_on_all_cpus(void)
+{
+	int err = -1, fd, cpu;
+	struct thread_map *threads;
+	struct cpu_map *cpus;
+	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
+	unsigned int nr_open_calls = 111, i;
+	cpu_set_t cpu_set;
+	int id = trace_event__id("sys_enter_open");
+
+	if (id < 0) {
+		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+		return -1;
+	}
+
+	threads = thread_map__new(-1, getpid());
+	if (threads == NULL) {
+		pr_debug("thread_map__new\n");
+		return -1;
+	}
+
+	cpus = cpu_map__new(NULL);
+	if (threads == NULL) {
+		pr_debug("thread_map__new\n");
+		return -1;
+	}
+
+
+	CPU_ZERO(&cpu_set);
+
+	memset(&attr, 0, sizeof(attr));
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.config = id;
+	evsel = perf_evsel__new(&attr, 0);
+	if (evsel == NULL) {
+		pr_debug("perf_evsel__new\n");
+		goto out_thread_map_delete;
+	}
+
+	if (perf_evsel__open(evsel, cpus, threads) < 0) {
+		pr_debug("failed to open counter: %s, "
+			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+			 strerror(errno));
+		goto out_evsel_delete;
+	}
+
+	for (cpu = 0; cpu < cpus->nr; ++cpu) {
+		unsigned int ncalls = nr_open_calls + cpu;
+		/*
+		 * XXX eventually lift this restriction in a way that
+		 * keeps perf building on older glibc installations
+		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
+		 * a reasonable upper limit tho :-)
+		 */
+		if (cpus->map[cpu] >= CPU_SETSIZE) {
+			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
+			continue;
+		}
+
+		CPU_SET(cpus->map[cpu], &cpu_set);
+		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+				 cpus->map[cpu],
+				 strerror(errno));
+			goto out_close_fd;
+		}
+		for (i = 0; i < ncalls; ++i) {
+			fd = open("/etc/passwd", O_RDONLY);
+			close(fd);
+		}
+		CPU_CLR(cpus->map[cpu], &cpu_set);
+	}
+
+	/*
+	 * Here we need to explicitely preallocate the counts, as if
+	 * we use the auto allocation it will allocate just for 1 cpu,
+	 * as we start by cpu 0.
+	 */
+	if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
+		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
+		goto out_close_fd;
+	}
+
+	for (cpu = 0; cpu < cpus->nr; ++cpu) {
+		unsigned int expected;
+
+		if (cpus->map[cpu] >= CPU_SETSIZE)
+			continue;
+
+		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
+			pr_debug("perf_evsel__open_read_on_cpu\n");
+			goto out_close_fd;
+		}
+
+		expected = nr_open_calls + cpu;
+		if (evsel->counts->cpu[cpu].val != expected) {
+			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
+				 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
+			goto out_close_fd;
+		}
+	}
+
+	err = 0;
+out_close_fd:
+	perf_evsel__close_fd(evsel, 1, threads->nr);
+out_evsel_delete:
+	perf_evsel__delete(evsel);
+out_thread_map_delete:
+	thread_map__delete(threads);
+	return err;
+}
+
 static struct test {
 	const char *desc;
 	int (*func)(void);
@@ -330,6 +450,10 @@
 		.func = test__open_syscall_event,
 	},
 	{
+		.desc = "detect open syscall event on all cpus",
+		.func = test__open_syscall_event_on_all_cpus,
+	},
+	{
 		.func = NULL,
 	},
 };
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 746cf03..0ace786 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -264,9 +264,6 @@
 		c->start_time = start;
 	if (p->start_time == 0 || p->start_time > start)
 		p->start_time = start;
-
-	if (cpu > numcpus)
-		numcpus = cpu;
 }
 
 #define MAX_CPUS 4096
@@ -511,6 +508,9 @@
 		if (!event_str)
 			return 0;
 
+		if (sample->cpu > numcpus)
+			numcpus = sample->cpu;
+
 		if (strcmp(event_str, "power:cpu_idle") == 0) {
 			struct power_processor_entry *ppe = (void *)te;
 			if (ppe->state == (u32)PWR_EVENT_EXIT)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1e67ab9..5a29d9c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -40,6 +40,7 @@
 #include <stdio.h>
 #include <termios.h>
 #include <unistd.h>
+#include <inttypes.h>
 
 #include <errno.h>
 #include <time.h>
@@ -214,7 +215,7 @@
 	len = sym->end - sym->start;
 
 	sprintf(command,
-		"objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
+		"objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s",
 		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
 		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
 
@@ -308,7 +309,7 @@
 	struct source_line *line;
 	char pattern[PATTERN_LEN + 1];
 
-	sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
+	sprintf(pattern, "%0*" PRIx64 " <", BITS_PER_LONG / 4,
 		map__rip_2objdump(syme->map, symbol->start));
 
 	pthread_mutex_lock(&syme->src->lock);
@@ -537,7 +538,7 @@
 	if (nr_counters == 1 || !display_weighted) {
 		struct perf_evsel *first;
 		first = list_entry(evsel_list.next, struct perf_evsel, node);
-		printf("%Ld", first->attr.sample_period);
+		printf("%" PRIu64, (uint64_t)first->attr.sample_period);
 		if (freq)
 			printf("Hz ");
 		else
@@ -640,7 +641,7 @@
 
 		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
 		if (verbose)
-			printf(" %016llx", sym->start);
+			printf(" %016" PRIx64, sym->start);
 		printf(" %-*.*s", sym_width, sym_width, sym->name);
 		printf(" %-*.*s\n", dso_width, dso_width,
 		       dso_width >= syme->map->dso->long_name_len ?
@@ -1305,7 +1306,7 @@
 		return -ENOMEM;
 
 	if (target_tid != -1)
-		event__synthesize_thread(target_tid, event__process, session);
+		event__synthesize_thread_map(threads, event__process, session);
 	else
 		event__synthesize_threads(event__process, session);
 
@@ -1471,6 +1472,8 @@
 		pos->attr.sample_period = default_interval;
 	}
 
+	sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
+
 	symbol_conf.priv_size = (sizeof(struct sym_entry) +
 				 (nr_counters + 1) * sizeof(unsigned long));
 
@@ -1488,6 +1491,7 @@
 out_free_fd:
 	list_for_each_entry(pos, &evsel_list, node)
 		perf_evsel__free_mmap(pos);
+	perf_evsel_list__delete();
 
 	return status;
 }
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 5b1ecd6..595d0f4 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -286,8 +286,6 @@
 	status = p->fn(argc, argv, prefix);
 	exit_browser(status);
 
-	perf_evsel_list__delete();
-
 	if (status)
 		return status & 0xff;
 
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 2302ec0..50d0a93 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -263,11 +263,12 @@
 					     process, session);
 }
 
-int event__synthesize_thread(pid_t pid, event__handler_t process,
-			     struct perf_session *session)
+int event__synthesize_thread_map(struct thread_map *threads,
+				 event__handler_t process,
+				 struct perf_session *session)
 {
 	event_t *comm_event, *mmap_event;
-	int err = -1;
+	int err = -1, thread;
 
 	comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
 	if (comm_event == NULL)
@@ -277,8 +278,15 @@
 	if (mmap_event == NULL)
 		goto out_free_comm;
 
-	err = __event__synthesize_thread(comm_event, mmap_event, pid,
-					 process, session);
+	err = 0;
+	for (thread = 0; thread < threads->nr; ++thread) {
+		if (__event__synthesize_thread(comm_event, mmap_event,
+					       threads->map[thread],
+					       process, session)) {
+			err = -1;
+			break;
+		}
+	}
 	free(mmap_event);
 out_free_comm:
 	free(comm_event);
@@ -459,7 +467,8 @@
 int event__process_lost(event_t *self, struct sample_data *sample __used,
 			struct perf_session *session)
 {
-	dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
+	dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
+		    self->lost.id, self->lost.lost);
 	session->hists.stats.total_lost += self->lost.lost;
 	return 0;
 }
@@ -575,7 +584,7 @@
 	u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 	int ret = 0;
 
-	dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
+	dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
 			self->mmap.pid, self->mmap.tid, self->mmap.start,
 			self->mmap.len, self->mmap.pgoff, self->mmap.filename);
 
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 2b7e919..cc7b52f 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -135,14 +135,16 @@
 void event__print_totals(void);
 
 struct perf_session;
+struct thread_map;
 
 typedef int (*event__handler_synth_t)(event_t *event, 
 				      struct perf_session *session);
 typedef int (*event__handler_t)(event_t *event, struct sample_data *sample,
 				struct perf_session *session);
 
-int event__synthesize_thread(pid_t pid, event__handler_t process,
-			     struct perf_session *session);
+int event__synthesize_thread_map(struct thread_map *threads,
+				 event__handler_t process,
+				 struct perf_session *session);
 int event__synthesize_threads(event__handler_t process,
 			      struct perf_session *session);
 int event__synthesize_kernel_mmap(event__handler_t process,
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c95267e..d8575d3 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -6,14 +6,13 @@
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 
-struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
 {
 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
 
 	if (evsel != NULL) {
 		evsel->idx	   = idx;
-		evsel->attr.type   = type;
-		evsel->attr.config = config;
+		evsel->attr	   = *attr;
 		INIT_LIST_HEAD(&evsel->node);
 	}
 
@@ -91,7 +90,7 @@
 	int cpu, thread;
 	struct perf_counts_values *aggr = &evsel->counts->aggr, count;
 
-	aggr->val = 0;
+	aggr->val = aggr->ena = aggr->run = 0;
 
 	for (cpu = 0; cpu < ncpus; cpu++) {
 		for (thread = 0; thread < nthreads; thread++) {
@@ -128,59 +127,75 @@
 	return 0;
 }
 
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+			      struct thread_map *threads)
 {
-	int cpu;
+	int cpu, thread;
 
-	if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
+	if (evsel->fd == NULL &&
+	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
 		return -1;
 
 	for (cpu = 0; cpu < cpus->nr; cpu++) {
-		FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
-							cpus->map[cpu], -1, 0);
-		if (FD(evsel, cpu, 0) < 0)
-			goto out_close;
+		for (thread = 0; thread < threads->nr; thread++) {
+			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
+								     threads->map[thread],
+								     cpus->map[cpu], -1, 0);
+			if (FD(evsel, cpu, thread) < 0)
+				goto out_close;
+		}
 	}
 
 	return 0;
 
 out_close:
-	while (--cpu >= 0) {
-		close(FD(evsel, cpu, 0));
-		FD(evsel, cpu, 0) = -1;
-	}
+	do {
+		while (--thread >= 0) {
+			close(FD(evsel, cpu, thread));
+			FD(evsel, cpu, thread) = -1;
+		}
+		thread = threads->nr;
+	} while (--cpu >= 0);
 	return -1;
 }
 
+static struct {
+	struct cpu_map map;
+	int cpus[1];
+} empty_cpu_map = {
+	.map.nr	= 1,
+	.cpus	= { -1, },
+};
+
+static struct {
+	struct thread_map map;
+	int threads[1];
+} empty_thread_map = {
+	.map.nr	 = 1,
+	.threads = { -1, },
+};
+
+int perf_evsel__open(struct perf_evsel *evsel,
+		     struct cpu_map *cpus, struct thread_map *threads)
+{
+
+	if (cpus == NULL) {
+		/* Work around old compiler warnings about strict aliasing */
+		cpus = &empty_cpu_map.map;
+	}
+
+	if (threads == NULL)
+		threads = &empty_thread_map.map;
+
+	return __perf_evsel__open(evsel, cpus, threads);
+}
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+{
+	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+}
+
 int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
 {
-	int thread;
-
-	if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
-		return -1;
-
-	for (thread = 0; thread < threads->nr; thread++) {
-		FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
-							   threads->map[thread], -1, -1, 0);
-		if (FD(evsel, 0, thread) < 0)
-			goto out_close;
-	}
-
-	return 0;
-
-out_close:
-	while (--thread >= 0) {
-		close(FD(evsel, 0, thread));
-		FD(evsel, 0, thread) = -1;
-	}
-	return -1;
-}
-
-int perf_evsel__open(struct perf_evsel *evsel, 
-		     struct cpu_map *cpus, struct thread_map *threads)
-{
-	if (threads == NULL)
-		return perf_evsel__open_per_cpu(evsel, cpus);
-
-	return perf_evsel__open_per_thread(evsel, threads);
+	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
 }
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index a0ccd69..b2d755f 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -37,7 +37,7 @@
 struct cpu_map;
 struct thread_map;
 
-struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx);
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
 void perf_evsel__delete(struct perf_evsel *evsel);
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 989fa2d..0866bcd 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -270,11 +270,15 @@
 			  const char *name, bool is_kallsyms)
 {
 	const size_t size = PATH_MAX;
-	char *realname = realpath(name, NULL),
-	     *filename = malloc(size),
+	char *realname, *filename = malloc(size),
 	     *linkname = malloc(size), *targetname;
 	int len, err = -1;
 
+	if (is_kallsyms)
+		realname = (char *)name;
+	else
+		realname = realpath(name, NULL);
+
 	if (realname == NULL || filename == NULL || linkname == NULL)
 		goto out_free;
 
@@ -306,7 +310,8 @@
 	if (symlink(targetname, linkname) == 0)
 		err = 0;
 out_free:
-	free(realname);
+	if (!is_kallsyms)
+		free(realname);
 	free(filename);
 	free(linkname);
 	return err;
@@ -798,8 +803,8 @@
 				      int feat, int fd)
 {
 	if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
-		pr_debug("Failed to lseek to %Ld offset for feature %d, "
-			 "continuing...\n", self->offset, feat);
+		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+			  "%d, continuing...\n", self->offset, feat);
 		return 0;
 	}
 
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index c749ba6..df51560 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -585,6 +585,7 @@
 {
 	struct sort_entry *se;
 	u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
+	u64 nr_events;
 	const char *sep = symbol_conf.field_sep;
 	int ret;
 
@@ -593,6 +594,7 @@
 
 	if (pair_hists) {
 		period = self->pair ? self->pair->period : 0;
+		nr_events = self->pair ? self->pair->nr_events : 0;
 		total = pair_hists->stats.total_period;
 		period_sys = self->pair ? self->pair->period_sys : 0;
 		period_us = self->pair ? self->pair->period_us : 0;
@@ -600,6 +602,7 @@
 		period_guest_us = self->pair ? self->pair->period_guest_us : 0;
 	} else {
 		period = self->period;
+		nr_events = self->nr_events;
 		total = session_total;
 		period_sys = self->period_sys;
 		period_us = self->period_us;
@@ -636,13 +639,13 @@
 			}
 		}
 	} else
-		ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
+		ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
 
 	if (symbol_conf.show_nr_samples) {
 		if (sep)
-			ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
+			ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
 		else
-			ret += snprintf(s + ret, size - ret, "%11lld", period);
+			ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
 	}
 
 	if (pair_hists) {
@@ -971,7 +974,7 @@
 	sym_size = sym->end - sym->start;
 	offset = ip - sym->start;
 
-	pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
+	pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
 
 	if (offset >= sym_size)
 		return 0;
@@ -980,8 +983,9 @@
 	h->sum++;
 	h->ip[offset]++;
 
-	pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
-		  self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
+	pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64
+		  "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name,
+		  ip, ip - self->ms.sym->start, h->ip[offset]);
 	return 0;
 }
 
@@ -1132,7 +1136,7 @@
 		goto out_free_filename;
 	}
 
-	pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
+	pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
 		 filename, sym->name, map->unmap_ip(map, sym->start),
 		 map->unmap_ip(map, sym->end));
 
@@ -1142,7 +1146,7 @@
 		 dso, dso->long_name, sym, sym->name);
 
 	snprintf(command, sizeof(command),
-		 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
+		 "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
 		 map__rip_2objdump(map, sym->start),
 		 map__rip_2objdump(map, sym->end),
 		 symfs_filename, filename);
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 8be0b96..305c848 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -2,6 +2,7 @@
 #define _PERF_LINUX_BITOPS_H_
 
 #include <linux/kernel.h>
+#include <linux/compiler.h>
 #include <asm/hweight.h>
 
 #define BITS_PER_LONG __WORDSIZE
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 3a7eb6e..a16ecab 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -1,5 +1,6 @@
 #include "symbol.h"
 #include <errno.h>
+#include <inttypes.h>
 #include <limits.h>
 #include <stdlib.h>
 #include <string.h>
@@ -195,7 +196,7 @@
 
 size_t map__fprintf(struct map *self, FILE *fp)
 {
-	return fprintf(fp, " %Lx-%Lx %Lx %s\n",
+	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
 		       self->start, self->end, self->pgoff, self->dso->name);
 }
 
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 649083f..135f69b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -279,7 +279,7 @@
 	static char buf[32];
 
 	if (type == PERF_TYPE_RAW) {
-		sprintf(buf, "raw 0x%llx", config);
+		sprintf(buf, "raw 0x%" PRIx64, config);
 		return buf;
 	}
 
@@ -490,7 +490,6 @@
 	return EVT_HANDLED_ALL;
 }
 
-
 static enum event_result parse_tracepoint_event(const char **strp,
 				    struct perf_event_attr *attr)
 {
@@ -530,12 +529,13 @@
 	if (evt_length >= MAX_EVENT_LENGTH)
 		return EVT_FAILED;
 	if (strpbrk(evt_name, "*?")) {
-		*strp += strlen(sys_name) + evt_length;
+		*strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
 		return parse_multiple_tracepoint_event(sys_name, evt_name,
 						       flags);
-	} else
+	} else {
 		return parse_single_tracepoint_event(sys_name, evt_name,
 						     evt_length, attr, strp);
+	}
 }
 
 static enum event_result
@@ -778,41 +778,11 @@
 	return ret;
 }
 
-static int store_event_type(const char *orgname)
-{
-	char filename[PATH_MAX], *c;
-	FILE *file;
-	int id, n;
-
-	sprintf(filename, "%s/", debugfs_path);
-	strncat(filename, orgname, strlen(orgname));
-	strcat(filename, "/id");
-
-	c = strchr(filename, ':');
-	if (c)
-		*c = '/';
-
-	file = fopen(filename, "r");
-	if (!file)
-		return 0;
-	n = fscanf(file, "%i", &id);
-	fclose(file);
-	if (n < 1) {
-		pr_err("cannot store event ID\n");
-		return -EINVAL;
-	}
-	return perf_header__push_event(id, orgname);
-}
-
 int parse_events(const struct option *opt __used, const char *str, int unset __used)
 {
 	struct perf_event_attr attr;
 	enum event_result ret;
 
-	if (strchr(str, ':'))
-		if (store_event_type(str) < 0)
-			return -1;
-
 	for (;;) {
 		memset(&attr, 0, sizeof(attr));
 		ret = parse_event_symbols(&str, &attr);
@@ -824,7 +794,7 @@
 
 		if (ret != EVT_HANDLED_ALL) {
 			struct perf_evsel *evsel;
-			evsel = perf_evsel__new(attr.type, attr.config,
+			evsel = perf_evsel__new(&attr,
 						nr_counters);
 			if (evsel == NULL)
 				return -1;
@@ -1014,8 +984,15 @@
 
 int perf_evsel_list__create_default(void)
 {
-	struct perf_evsel *evsel = perf_evsel__new(PERF_TYPE_HARDWARE,
-						   PERF_COUNT_HW_CPU_CYCLES, 0);
+	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
+
+	memset(&attr, 0, sizeof(attr));
+	attr.type = PERF_TYPE_HARDWARE;
+	attr.config = PERF_COUNT_HW_CPU_CYCLES;
+
+	evsel = perf_evsel__new(&attr, 0);
+
 	if (evsel == NULL)
 		return -ENOMEM;
 
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index b82cafb..458e3ec 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -23,7 +23,7 @@
 };
 
 extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
-extern bool have_tracepoints(struct list_head *evsel_list);
+extern bool have_tracepoints(struct list_head *evlist);
 
 extern int			nr_counters;
 
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 128aaab..6e29d9c 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -172,7 +172,7 @@
 	sym = __find_kernel_function_by_name(tp->symbol, &map);
 	if (sym) {
 		addr = map->unmap_ip(map, sym->start + tp->offset);
-		pr_debug("try to find %s+%ld@%llx\n", tp->symbol,
+		pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol,
 			 tp->offset, addr);
 		ret = find_perf_probe_point((unsigned long)addr, pp);
 	}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 6fb4694..105f00b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -652,10 +652,11 @@
 {
 	unsigned int i;
 
-	printf("... chain: nr:%Lu\n", sample->callchain->nr);
+	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
 
 	for (i = 0; i < sample->callchain->nr; i++)
-		printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]);
+		printf("..... %2d: %016" PRIx64 "\n",
+		       i, sample->callchain->ips[i]);
 }
 
 static void perf_session__print_tstamp(struct perf_session *session,
@@ -672,7 +673,7 @@
 		printf("%u ", sample->cpu);
 
 	if (session->sample_type & PERF_SAMPLE_TIME)
-		printf("%Lu ", sample->time);
+		printf("%" PRIu64 " ", sample->time);
 }
 
 static void dump_event(struct perf_session *session, event_t *event,
@@ -681,16 +682,16 @@
 	if (!dump_trace)
 		return;
 
-	printf("\n%#Lx [%#x]: event: %d\n", file_offset, event->header.size,
-	       event->header.type);
+	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
+	       file_offset, event->header.size, event->header.type);
 
 	trace_event(event);
 
 	if (sample)
 		perf_session__print_tstamp(session, event, sample);
 
-	printf("%#Lx [%#x]: PERF_RECORD_%s", file_offset, event->header.size,
-	       event__get_event_name(event->header.type));
+	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
+	       event->header.size, event__get_event_name(event->header.type));
 }
 
 static void dump_sample(struct perf_session *session, event_t *event,
@@ -699,8 +700,9 @@
 	if (!dump_trace)
 		return;
 
-	printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
-	       sample->pid, sample->tid, sample->ip, sample->period);
+	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n",
+	       event->header.misc, sample->pid, sample->tid, sample->ip,
+	       sample->period);
 
 	if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
 		callchain__printf(sample);
@@ -843,8 +845,8 @@
 {
 	if (ops->lost == event__process_lost &&
 	    session->hists.stats.total_lost != 0) {
-		ui__warning("Processed %Lu events and LOST %Lu!\n\n"
-			    "Check IO/CPU overload!\n\n",
+		ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
+			    "!\n\nCheck IO/CPU overload!\n\n",
 			    session->hists.stats.total_period,
 			    session->hists.stats.total_lost);
 	}
@@ -918,7 +920,7 @@
 
 	if (size == 0 ||
 	    (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
-		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
 			    head, event.header.size, event.header.type);
 		/*
 		 * assume we lost track of the stream, check alignment, and
@@ -1007,7 +1009,7 @@
 	if (size == 0)
 		size = 8;
 
-	if (head + event->header.size >= mmap_size) {
+	if (head + event->header.size > mmap_size) {
 		if (mmaps[map_idx]) {
 			munmap(mmaps[map_idx], mmap_size);
 			mmaps[map_idx] = NULL;
@@ -1023,7 +1025,7 @@
 
 	if (size == 0 ||
 	    perf_session__process_event(session, event, ops, file_pos) < 0) {
-		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
 			    file_offset + head, event->header.size,
 			    event->header.type);
 		/*
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index b3637db..96c8660 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -12,6 +12,7 @@
  * of the License.
  */
 
+#include <inttypes.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
@@ -43,11 +44,11 @@
 	return cpu2slot(cpu) * SLOT_MULT;
 }
 
-static double time2pixels(u64 time)
+static double time2pixels(u64 __time)
 {
 	double X;
 
-	X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time);
+	X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time);
 	return X;
 }
 
@@ -94,7 +95,7 @@
 
 	total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
 	fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
-	fprintf(svgfile, "<svg width=\"%i\" height=\"%llu\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
+	fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
 
 	fprintf(svgfile, "<defs>\n  <style type=\"text/css\">\n    <![CDATA[\n");
 
@@ -455,9 +456,9 @@
 		return;
 
 	svg_legenda_box(0,	"Running", "sample");
-	svg_legenda_box(100,	"Idle","rect.c1");
-	svg_legenda_box(200,	"Deeper Idle", "rect.c3");
-	svg_legenda_box(350,	"Deepest Idle", "rect.c6");
+	svg_legenda_box(100,	"Idle","c1");
+	svg_legenda_box(200,	"Deeper Idle", "c3");
+	svg_legenda_box(350,	"Deepest Idle", "c6");
 	svg_legenda_box(550,	"Sleeping", "process2");
 	svg_legenda_box(650,	"Waiting for cpu", "waiting");
 	svg_legenda_box(800,	"Blocked on IO", "blocked");
@@ -483,7 +484,7 @@
 			color = 128;
 		}
 
-		fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%llu\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
+		fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
 			time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
 
 		i += 10000000;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 15ccfba..b1bf490 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -11,6 +11,7 @@
 #include <sys/param.h>
 #include <fcntl.h>
 #include <unistd.h>
+#include <inttypes.h>
 #include "build-id.h"
 #include "debug.h"
 #include "symbol.h"
@@ -153,7 +154,7 @@
 	self->binding = binding;
 	self->namelen = namelen - 1;
 
-	pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
+	pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", __func__, name, start, self->end);
 
 	memcpy(self->name, name, namelen);
 
@@ -167,7 +168,7 @@
 
 static size_t symbol__fprintf(struct symbol *self, FILE *fp)
 {
-	return fprintf(fp, " %llx-%llx %c %s\n",
+	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
 		       self->start, self->end,
 		       self->binding == STB_GLOBAL ? 'g' :
 		       self->binding == STB_LOCAL  ? 'l' : 'w',
@@ -1161,6 +1162,13 @@
 
 		section_name = elf_sec__name(&shdr, secstrs);
 
+		/* On ARM, symbols for thumb functions have 1 added to
+		 * the symbol address as a flag - remove it */
+		if ((ehdr.e_machine == EM_ARM) &&
+		    (map->type == MAP__FUNCTION) &&
+		    (sym.st_value & 1))
+			--sym.st_value;
+
 		if (self->kernel != DSO_TYPE_USER || kmodule) {
 			char dso_name[PATH_MAX];
 
@@ -1208,8 +1216,8 @@
 		}
 
 		if (curr_dso->adjust_symbols) {
-			pr_debug4("%s: adjusting symbol: st_value: %#Lx "
-				  "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
+			pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+				  "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
 				  (u64)sym.st_value, (u64)shdr.sh_addr,
 				  (u64)shdr.sh_offset);
 			sym.st_value -= shdr.sh_addr - shdr.sh_offset;
@@ -1828,7 +1836,7 @@
 	int err = -1, fd;
 	char symfs_vmlinux[PATH_MAX];
 
-	snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s",
+	snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
 		 symbol_conf.symfs, vmlinux);
 	fd = open(symfs_vmlinux, O_RDONLY);
 	if (fd < 0)
diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h
index 7d6b833..5f3689a 100644
--- a/tools/perf/util/types.h
+++ b/tools/perf/util/types.h
@@ -1,12 +1,14 @@
 #ifndef __PERF_TYPES_H
 #define __PERF_TYPES_H
 
+#include <stdint.h>
+
 /*
- * We define u64 as unsigned long long for every architecture
- * so that we can print it with %Lx without getting warnings.
+ * We define u64 as uint64_t for every architecture
+ * so that we can print it with "%"PRIx64 without getting warnings.
  */
-typedef unsigned long long u64;
-typedef signed long long   s64;
+typedef uint64_t	   u64;
+typedef int64_t		   s64;
 typedef unsigned int	   u32;
 typedef signed int	   s32;
 typedef unsigned short	   u16;
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index ebda8c3..60c463c 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -350,7 +350,7 @@
 	if (self->ms.sym)
 		return self->ms.sym->name;
 
-	snprintf(bf, bfsize, "%#Lx", self->ip);
+	snprintf(bf, bfsize, "%#" PRIx64, self->ip);
 	return bf;
 }
 
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c
index e35437d..e515836 100644
--- a/tools/perf/util/ui/browsers/map.c
+++ b/tools/perf/util/ui/browsers/map.c
@@ -1,5 +1,6 @@
 #include "../libslang.h"
 #include <elf.h>
+#include <inttypes.h>
 #include <sys/ttydefaults.h>
 #include <ctype.h>
 #include <string.h>
@@ -57,7 +58,7 @@
 	int width;
 
 	ui_browser__set_percent_color(self, 0, current_entry);
-	slsmg_printf("%*llx %*llx %c ",
+	slsmg_printf("%*" PRIx64 " %*" PRIx64 " %c ",
 		     mb->addrlen, sym->start, mb->addrlen, sym->end,
 		     sym->binding == STB_GLOBAL ? 'g' :
 		     sym->binding == STB_LOCAL  ? 'l' : 'w');
@@ -150,6 +151,6 @@
 		++mb.b.nr_entries;
 	}
 
-	mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
+	mb.addrlen = snprintf(tmp, sizeof(tmp), "%" PRIx64, maxaddr);
 	return map_browser__run(&mb);
 }
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index cfa55d6..bdd3347 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -150,7 +150,7 @@
 		if (width > tidwidth)
 			tidwidth = width;
 		for (j = 0; j < values->counters; j++) {
-			width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+			width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
 			if (width > counterwidth[j])
 				counterwidth[j] = width;
 		}
@@ -165,7 +165,7 @@
 		fprintf(fp, "  %*d  %*d", pidwidth, values->pid[i],
 			tidwidth, values->tid[i]);
 		for (j = 0; j < values->counters; j++)
-			fprintf(fp, "  %*Lu",
+			fprintf(fp, "  %*" PRIu64,
 				counterwidth[j], values->value[i][j]);
 		fprintf(fp, "\n");
 	}
@@ -196,13 +196,13 @@
 		width = strlen(values->countername[j]);
 		if (width > namewidth)
 			namewidth = width;
-		width = snprintf(NULL, 0, "%llx", values->counterrawid[j]);
+		width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]);
 		if (width > rawwidth)
 			rawwidth = width;
 	}
 	for (i = 0; i < values->threads; i++) {
 		for (j = 0; j < values->counters; j++) {
-			width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+			width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
 			if (width > countwidth)
 				countwidth = width;
 		}
@@ -214,7 +214,7 @@
 		countwidth, "Count");
 	for (i = 0; i < values->threads; i++)
 		for (j = 0; j < values->counters; j++)
-			fprintf(fp, "  %*d  %*d  %*s  %*llx  %*Lu\n",
+			fprintf(fp, "  %*d  %*d  %*s  %*" PRIx64 "  %*" PRIu64,
 				pidwidth, values->pid[i],
 				tidwidth, values->tid[i],
 				namewidth, values->countername[j],
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
new file mode 100644
index 0000000..fd8e1f1
--- /dev/null
+++ b/tools/power/x86/turbostat/Makefile
@@ -0,0 +1,8 @@
+turbostat : turbostat.c
+
+clean :
+	rm -f turbostat
+
+install :
+	install turbostat /usr/bin/turbostat
+	install turbostat.8 /usr/share/man/man8
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
new file mode 100644
index 0000000..ff75125
--- /dev/null
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -0,0 +1,172 @@
+.TH TURBOSTAT 8
+.SH NAME
+turbostat \- Report processor frequency and idle statistics
+.SH SYNOPSIS
+.ft B
+.B turbostat
+.RB [ "\-v" ]
+.RB [ "\-M MSR#" ]
+.RB command
+.br
+.B turbostat
+.RB [ "\-v" ]
+.RB [ "\-M MSR#" ]
+.RB [ "\-i interval_sec" ]
+.SH DESCRIPTION
+\fBturbostat \fP reports processor topology, frequency
+and idle power state statistics on modern X86 processors.
+Either \fBcommand\fP is forked and statistics are printed
+upon its completion, or statistics are printed periodically.
+
+\fBturbostat \fP
+requires that the processor
+supports an "invariant" TSC, plus the APERF and MPERF MSRs.
+\fBturbostat \fP will report idle cpu power state residency
+on processors that additionally support C-state residency counters.
+
+.SS Options
+The \fB-v\fP option increases verbosity.
+.PP
+The \fB-M MSR#\fP option dumps the specified MSR,
+in addition to the usual frequency and idle statistics.
+.PP
+The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds.
+The default is 5 seconds.
+.PP
+The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit,
+displays the statistics gathered since it was forked.
+.PP
+.SH FIELD DESCRIPTIONS
+.nf
+\fBpkg\fP processor package number.
+\fBcore\fP processor core number.
+\fBCPU\fP Linux CPU (logical processor) number.
+\fB%c0\fP percent of the interval that the CPU retired instructions.
+\fBGHz\fP average clock rate while the CPU was in c0 state.
+\fBTSC\fP average GHz that the TSC ran during the entire interval.
+\fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states.
+\fB%pc3, %pc6\fP percentage residency in hardware package idle states.
+.fi
+.PP
+.SH EXAMPLE
+Without any parameters, turbostat prints out counters ever 5 seconds.
+(override interval with "-i sec" option, or specify a command
+for turbostat to fork).
+
+The first row of statistics reflect the average for the entire system.
+Subsequent rows show per-CPU statistics.
+
+.nf
+[root@x980]# ./turbostat
+core CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
+          0.04 1.62 3.38   0.11   0.00  99.85   0.00  95.07
+  0   0   0.04 1.62 3.38   0.06   0.00  99.90   0.00  95.07
+  0   6   0.02 1.62 3.38   0.08   0.00  99.90   0.00  95.07
+  1   2   0.10 1.62 3.38   0.29   0.00  99.61   0.00  95.07
+  1   8   0.11 1.62 3.38   0.28   0.00  99.61   0.00  95.07
+  2   4   0.01 1.62 3.38   0.01   0.00  99.98   0.00  95.07
+  2  10   0.01 1.61 3.38   0.02   0.00  99.98   0.00  95.07
+  8   1   0.07 1.62 3.38   0.15   0.00  99.78   0.00  95.07
+  8   7   0.03 1.62 3.38   0.19   0.00  99.78   0.00  95.07
+  9   3   0.01 1.62 3.38   0.02   0.00  99.98   0.00  95.07
+  9   9   0.01 1.62 3.38   0.02   0.00  99.98   0.00  95.07
+ 10   5   0.01 1.62 3.38   0.13   0.00  99.86   0.00  95.07
+ 10  11   0.08 1.62 3.38   0.05   0.00  99.86   0.00  95.07
+.fi
+.SH VERBOSE EXAMPLE
+The "-v" option adds verbosity to the output:
+
+.nf
+GenuineIntel 11 CPUID levels; family:model:stepping 0x6:2c:2 (6:44:2)
+12 * 133 = 1600 MHz max efficiency
+25 * 133 = 3333 MHz TSC frequency
+26 * 133 = 3467 MHz max turbo 4 active cores
+26 * 133 = 3467 MHz max turbo 3 active cores
+27 * 133 = 3600 MHz max turbo 2 active cores
+27 * 133 = 3600 MHz max turbo 1 active cores
+
+.fi
+The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
+available at the minimum package voltage.  The \fBTSC frequency\fP is the nominal
+maximum frequency of the processor if turbo-mode were not available.  This frequency
+should be sustainable on all CPUs indefinitely, given nominal power and cooling.
+The remaining rows show what maximum turbo frequency is possible
+depending on the number of idle cores.  Note that this information is
+not available on all processors.
+.SH FORK EXAMPLE
+If turbostat is invoked with a command, it will fork that command
+and output the statistics gathered when the command exits.
+eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds
+until ^C while the other CPUs are mostly idle:
+
+.nf
+[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
+
+^Ccore CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
+           8.49 3.63 3.38  16.23   0.66  74.63   0.00   0.00
+   0   0   1.22 3.62 3.38  32.18   0.00  66.60   0.00   0.00
+   0   6   0.40 3.61 3.38  33.00   0.00  66.60   0.00   0.00
+   1   2   0.11 3.14 3.38   0.19   3.95  95.75   0.00   0.00
+   1   8   0.05 2.88 3.38   0.25   3.95  95.75   0.00   0.00
+   2   4   0.00 3.13 3.38   0.02   0.00  99.98   0.00   0.00
+   2  10   0.00 3.09 3.38   0.02   0.00  99.98   0.00   0.00
+   8   1   0.04 3.50 3.38  14.43   0.00  85.54   0.00   0.00
+   8   7   0.03 2.98 3.38  14.43   0.00  85.54   0.00   0.00
+   9   3   0.00 3.16 3.38 100.00   0.00   0.00   0.00   0.00
+   9   9  99.93 3.63 3.38   0.06   0.00   0.00   0.00   0.00
+  10   5   0.01 2.82 3.38   0.08   0.00  99.91   0.00   0.00
+  10  11   0.02 3.36 3.38   0.06   0.00  99.91   0.00   0.00
+6.950866 sec
+
+.fi
+Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit
+while the other processors are generally in various states of idle.
+
+Note that cpu3 is an HT sibling sharing core9
+with cpu9, and thus it is unable to get to an idle state
+deeper than c1 while cpu9 is busy.
+
+Note that turbostat reports average GHz of 3.61, while
+the arithmetic average of the GHz column above is 3.24.
+This is a weighted average, where the weight is %c0.  ie. it is the total number of
+un-halted cycles elapsed per time divided by the number of CPUs.
+.SH NOTES
+
+.B "turbostat "
+must be run as root.
+
+.B "turbostat "
+reads hardware counters, but doesn't write them.
+So it will not interfere with the OS or other programs, including
+multiple invocations of itself.
+
+\fBturbostat \fP
+may work poorly on Linux-2.6.20 through 2.6.29,
+as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF
+in those kernels.
+
+The APERF, MPERF MSRs are defined to count non-halted cycles.
+Although it is not guaranteed by the architecture, turbostat assumes
+that they count at TSC rate, which is true on all processors tested to date.
+
+.SH REFERENCES
+"Intel® Turbo Boost Technology
+in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
+http://download.intel.com/design/processor/applnots/320354.pdf
+
+"Intel® 64 and IA-32 Architectures Software Developer's Manual
+Volume 3B: System Programming Guide"
+http://www.intel.com/products/processor/manuals/
+
+.SH FILES
+.ta
+.nf
+/dev/cpu/*/msr
+.fi
+
+.SH "SEE ALSO"
+msr(4), vmstat(8)
+.PP
+.SH AUTHORS
+.nf
+Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
new file mode 100644
index 0000000..362a0cb
--- /dev/null
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -0,0 +1,1044 @@
+/*
+ * turbostat -- show CPU frequency and C-state residency
+ * on modern Intel turbo-capable processors.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include <string.h>
+#include <ctype.h>
+
+#define MSR_TSC	0x10
+#define MSR_NEHALEM_PLATFORM_INFO	0xCE
+#define MSR_NEHALEM_TURBO_RATIO_LIMIT	0x1AD
+#define MSR_APERF	0xE8
+#define MSR_MPERF	0xE7
+#define MSR_PKG_C2_RESIDENCY	0x60D	/* SNB only */
+#define MSR_PKG_C3_RESIDENCY	0x3F8
+#define MSR_PKG_C6_RESIDENCY	0x3F9
+#define MSR_PKG_C7_RESIDENCY	0x3FA	/* SNB only */
+#define MSR_CORE_C3_RESIDENCY	0x3FC
+#define MSR_CORE_C6_RESIDENCY	0x3FD
+#define MSR_CORE_C7_RESIDENCY	0x3FE	/* SNB only */
+
+char *proc_stat = "/proc/stat";
+unsigned int interval_sec = 5;	/* set with -i interval_sec */
+unsigned int verbose;		/* set with -v */
+unsigned int skip_c0;
+unsigned int skip_c1;
+unsigned int do_nhm_cstates;
+unsigned int do_snb_cstates;
+unsigned int has_aperf;
+unsigned int units = 1000000000;	/* Ghz etc */
+unsigned int genuine_intel;
+unsigned int has_invariant_tsc;
+unsigned int do_nehalem_platform_info;
+unsigned int do_nehalem_turbo_ratio_limit;
+unsigned int extra_msr_offset;
+double bclk;
+unsigned int show_pkg;
+unsigned int show_core;
+unsigned int show_cpu;
+
+int aperf_mperf_unstable;
+int backwards_count;
+char *progname;
+int need_reinitialize;
+
+int num_cpus;
+
+struct counters {
+	unsigned long long tsc;		/* per thread */
+	unsigned long long aperf;	/* per thread */
+	unsigned long long mperf;	/* per thread */
+	unsigned long long c1;	/* per thread (calculated) */
+	unsigned long long c3;	/* per core */
+	unsigned long long c6;	/* per core */
+	unsigned long long c7;	/* per core */
+	unsigned long long pc2;	/* per package */
+	unsigned long long pc3;	/* per package */
+	unsigned long long pc6;	/* per package */
+	unsigned long long pc7;	/* per package */
+	unsigned long long extra_msr;	/* per thread */
+	int pkg;
+	int core;
+	int cpu;
+	struct counters *next;
+};
+
+struct counters *cnt_even;
+struct counters *cnt_odd;
+struct counters *cnt_delta;
+struct counters *cnt_average;
+struct timeval tv_even;
+struct timeval tv_odd;
+struct timeval tv_delta;
+
+unsigned long long get_msr(int cpu, off_t offset)
+{
+	ssize_t retval;
+	unsigned long long msr;
+	char pathname[32];
+	int fd;
+
+	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+	fd = open(pathname, O_RDONLY);
+	if (fd < 0) {
+		perror(pathname);
+		need_reinitialize = 1;
+		return 0;
+	}
+
+	retval = pread(fd, &msr, sizeof msr, offset);
+	if (retval != sizeof msr) {
+		fprintf(stderr, "cpu%d pread(..., 0x%zx) = %jd\n",
+			cpu, offset, retval);
+		exit(-2);
+	}
+
+	close(fd);
+	return msr;
+}
+
+void print_header(void)
+{
+	if (show_pkg)
+		fprintf(stderr, "pkg ");
+	if (show_core)
+		fprintf(stderr, "core");
+	if (show_cpu)
+		fprintf(stderr, " CPU");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c0 ");
+	if (has_aperf)
+		fprintf(stderr, "  GHz");
+	fprintf(stderr, "  TSC");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c1 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c3 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c6 ");
+	if (do_snb_cstates)
+		fprintf(stderr, "   %%c7 ");
+	if (do_snb_cstates)
+		fprintf(stderr, "  %%pc2 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "  %%pc3 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "  %%pc6 ");
+	if (do_snb_cstates)
+		fprintf(stderr, "  %%pc7 ");
+	if (extra_msr_offset)
+		fprintf(stderr, "       MSR 0x%x ", extra_msr_offset);
+
+	putc('\n', stderr);
+}
+
+void dump_cnt(struct counters *cnt)
+{
+	fprintf(stderr, "package: %d ", cnt->pkg);
+	fprintf(stderr, "core:: %d ", cnt->core);
+	fprintf(stderr, "CPU: %d ", cnt->cpu);
+	fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
+	fprintf(stderr, "c3: %016llX\n", cnt->c3);
+	fprintf(stderr, "c6: %016llX\n", cnt->c6);
+	fprintf(stderr, "c7: %016llX\n", cnt->c7);
+	fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
+	fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
+	fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
+	fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
+	fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
+	fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
+}
+
+void dump_list(struct counters *cnt)
+{
+	printf("dump_list 0x%p\n", cnt);
+
+	for (; cnt; cnt = cnt->next)
+		dump_cnt(cnt);
+}
+
+void print_cnt(struct counters *p)
+{
+	double interval_float;
+
+	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
+
+	/* topology columns, print blanks on 1st (average) line */
+	if (p == cnt_average) {
+		if (show_pkg)
+			fprintf(stderr, "    ");
+		if (show_core)
+			fprintf(stderr, "    ");
+		if (show_cpu)
+			fprintf(stderr, "    ");
+	} else {
+		if (show_pkg)
+			fprintf(stderr, "%4d", p->pkg);
+		if (show_core)
+			fprintf(stderr, "%4d", p->core);
+		if (show_cpu)
+			fprintf(stderr, "%4d", p->cpu);
+	}
+
+	/* %c0 */
+	if (do_nhm_cstates) {
+		if (!skip_c0)
+			fprintf(stderr, "%7.2f", 100.0 * p->mperf/p->tsc);
+		else
+			fprintf(stderr, "   ****");
+	}
+
+	/* GHz */
+	if (has_aperf) {
+		if (!aperf_mperf_unstable) {
+			fprintf(stderr, "%5.2f",
+				1.0 * p->tsc / units * p->aperf /
+				p->mperf / interval_float);
+		} else {
+			if (p->aperf > p->tsc || p->mperf > p->tsc) {
+				fprintf(stderr, " ****");
+			} else {
+				fprintf(stderr, "%4.1f*",
+					1.0 * p->tsc /
+					units * p->aperf /
+					p->mperf / interval_float);
+			}
+		}
+	}
+
+	/* TSC */
+	fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float);
+
+	if (do_nhm_cstates) {
+		if (!skip_c1)
+			fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc);
+		else
+			fprintf(stderr, "   ****");
+	}
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->c3/p->tsc);
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->c6/p->tsc);
+	if (do_snb_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->c7/p->tsc);
+	if (do_snb_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc2/p->tsc);
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc3/p->tsc);
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc6/p->tsc);
+	if (do_snb_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc7/p->tsc);
+	if (extra_msr_offset)
+		fprintf(stderr, "  0x%016llx", p->extra_msr);
+	putc('\n', stderr);
+}
+
+void print_counters(struct counters *counters)
+{
+	struct counters *cnt;
+
+	print_header();
+
+	if (num_cpus > 1)
+		print_cnt(cnt_average);
+
+	for (cnt = counters; cnt != NULL; cnt = cnt->next)
+		print_cnt(cnt);
+
+}
+
+#define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
+
+int compute_delta(struct counters *after,
+	struct counters *before, struct counters *delta)
+{
+	int errors = 0;
+	int perf_err = 0;
+
+	skip_c0 = skip_c1 = 0;
+
+	for ( ; after && before && delta;
+		after = after->next, before = before->next, delta = delta->next) {
+		if (before->cpu != after->cpu) {
+			printf("cpu configuration changed: %d != %d\n",
+				before->cpu, after->cpu);
+			return -1;
+		}
+
+		if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) {
+			fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n",
+				before->cpu, before->tsc, after->tsc);
+			errors++;
+		}
+		/* check for TSC < 1 Mcycles over interval */
+		if (delta->tsc < (1000 * 1000)) {
+			fprintf(stderr, "Insanely slow TSC rate,"
+				" TSC stops in idle?\n");
+			fprintf(stderr, "You can disable all c-states"
+				" by booting with \"idle=poll\"\n");
+			fprintf(stderr, "or just the deep ones with"
+				" \"processor.max_cstate=1\"\n");
+			exit(-3);
+		}
+		if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) {
+			fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n",
+				before->cpu, before->c3, after->c3);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) {
+			fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n",
+				before->cpu, before->c6, after->c6);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) {
+			fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n",
+				before->cpu, before->c7, after->c7);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) {
+			fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc2, after->pc2);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) {
+			fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc3, after->pc3);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) {
+			fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc6, after->pc6);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) {
+			fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc7, after->pc7);
+			errors++;
+		}
+
+		perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf);
+		if (perf_err) {
+			fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n",
+				before->cpu, before->aperf, after->aperf);
+		}
+		perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf);
+		if (perf_err) {
+			fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n",
+				before->cpu, before->mperf, after->mperf);
+		}
+		if (perf_err) {
+			if (!aperf_mperf_unstable) {
+				fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
+				fprintf(stderr, "* Frequency results do not cover entire interval *\n");
+				fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
+
+				aperf_mperf_unstable = 1;
+			}
+			/*
+			 * mperf delta is likely a huge "positive" number
+			 * can not use it for calculating c0 time
+			 */
+			skip_c0 = 1;
+			skip_c1 = 1;
+		}
+
+		/*
+		 * As mperf and tsc collection are not atomic,
+		 * it is possible for mperf's non-halted cycles
+		 * to exceed TSC's all cycles: show c1 = 0% in that case.
+		 */
+		if (delta->mperf > delta->tsc)
+			delta->c1 = 0;
+		else /* normal case, derive c1 */
+			delta->c1 = delta->tsc - delta->mperf
+				- delta->c3 - delta->c6 - delta->c7;
+
+		if (delta->mperf == 0)
+			delta->mperf = 1;	/* divide by 0 protection */
+
+		/*
+		 * for "extra msr", just copy the latest w/o subtracting
+		 */
+		delta->extra_msr = after->extra_msr;
+		if (errors) {
+			fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
+			dump_cnt(before);
+			fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
+			dump_cnt(after);
+			errors = 0;
+		}
+	}
+	return 0;
+}
+
+void compute_average(struct counters *delta, struct counters *avg)
+{
+	struct counters *sum;
+
+	sum = calloc(1, sizeof(struct counters));
+	if (sum == NULL) {
+		perror("calloc sum");
+		exit(1);
+	}
+
+	for (; delta; delta = delta->next) {
+		sum->tsc += delta->tsc;
+		sum->c1 += delta->c1;
+		sum->c3 += delta->c3;
+		sum->c6 += delta->c6;
+		sum->c7 += delta->c7;
+		sum->aperf += delta->aperf;
+		sum->mperf += delta->mperf;
+		sum->pc2 += delta->pc2;
+		sum->pc3 += delta->pc3;
+		sum->pc6 += delta->pc6;
+		sum->pc7 += delta->pc7;
+	}
+	avg->tsc = sum->tsc/num_cpus;
+	avg->c1 = sum->c1/num_cpus;
+	avg->c3 = sum->c3/num_cpus;
+	avg->c6 = sum->c6/num_cpus;
+	avg->c7 = sum->c7/num_cpus;
+	avg->aperf = sum->aperf/num_cpus;
+	avg->mperf = sum->mperf/num_cpus;
+	avg->pc2 = sum->pc2/num_cpus;
+	avg->pc3 = sum->pc3/num_cpus;
+	avg->pc6 = sum->pc6/num_cpus;
+	avg->pc7 = sum->pc7/num_cpus;
+
+	free(sum);
+}
+
+void get_counters(struct counters *cnt)
+{
+	for ( ; cnt; cnt = cnt->next) {
+		cnt->tsc = get_msr(cnt->cpu, MSR_TSC);
+		if (do_nhm_cstates)
+			cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY);
+		if (do_nhm_cstates)
+			cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY);
+		if (do_snb_cstates)
+			cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY);
+		if (has_aperf)
+			cnt->aperf = get_msr(cnt->cpu, MSR_APERF);
+		if (has_aperf)
+			cnt->mperf = get_msr(cnt->cpu, MSR_MPERF);
+		if (do_snb_cstates)
+			cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY);
+		if (do_nhm_cstates)
+			cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY);
+		if (do_nhm_cstates)
+			cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY);
+		if (do_snb_cstates)
+			cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY);
+		if (extra_msr_offset)
+			cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset);
+	}
+}
+
+void print_nehalem_info(void)
+{
+	unsigned long long msr;
+	unsigned int ratio;
+
+	if (!do_nehalem_platform_info)
+		return;
+
+	msr = get_msr(0, MSR_NEHALEM_PLATFORM_INFO);
+
+	ratio = (msr >> 40) & 0xFF;
+	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
+		ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 8) & 0xFF;
+	fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
+		ratio, bclk, ratio * bclk);
+
+	if (verbose > 1)
+		fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr);
+
+	if (!do_nehalem_turbo_ratio_limit)
+		return;
+
+	msr = get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT);
+
+	ratio = (msr >> 24) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 16) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 8) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 0) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+}
+
+void free_counter_list(struct counters *list)
+{
+	struct counters *p;
+
+	for (p = list; p; ) {
+		struct counters *free_me;
+
+		free_me = p;
+		p = p->next;
+		free(free_me);
+	}
+}
+
+void free_all_counters(void)
+{
+	free_counter_list(cnt_even);
+	cnt_even = NULL;
+
+	free_counter_list(cnt_odd);
+	cnt_odd = NULL;
+
+	free_counter_list(cnt_delta);
+	cnt_delta = NULL;
+
+	free_counter_list(cnt_average);
+	cnt_average = NULL;
+}
+
+void insert_counters(struct counters **list,
+	struct counters *new)
+{
+	struct counters *prev;
+
+	/*
+	 * list was empty
+	 */
+	if (*list == NULL) {
+		new->next = *list;
+		*list = new;
+		return;
+	}
+
+	show_cpu = 1;	/* there is more than one CPU */
+
+	/*
+	 * insert on front of list.
+	 * It is sorted by ascending package#, core#, cpu#
+	 */
+	if (((*list)->pkg > new->pkg) ||
+	    (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) ||
+	    (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) {
+		new->next = *list;
+		*list = new;
+		return;
+	}
+
+	prev = *list;
+
+	while (prev->next && (prev->next->pkg < new->pkg)) {
+		prev = prev->next;
+		show_pkg = 1;	/* there is more than 1 package */
+	}
+
+	while (prev->next && (prev->next->pkg == new->pkg)
+		&& (prev->next->core < new->core)) {
+		prev = prev->next;
+		show_core = 1;	/* there is more than 1 core */
+	}
+
+	while (prev->next && (prev->next->pkg == new->pkg)
+		&& (prev->next->core == new->core)
+		&& (prev->next->cpu < new->cpu)) {
+		prev = prev->next;
+	}
+
+	/*
+	 * insert after "prev"
+	 */
+	new->next = prev->next;
+	prev->next = new;
+}
+
+void alloc_new_counters(int pkg, int core, int cpu)
+{
+	struct counters *new;
+
+	if (verbose > 1)
+		printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
+
+	new = (struct counters *)calloc(1, sizeof(struct counters));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	insert_counters(&cnt_odd, new);
+
+	new = (struct counters *)calloc(1,
+		sizeof(struct counters));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	insert_counters(&cnt_even, new);
+
+	new = (struct counters *)calloc(1, sizeof(struct counters));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	insert_counters(&cnt_delta, new);
+
+	new = (struct counters *)calloc(1, sizeof(struct counters));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	cnt_average = new;
+}
+
+int get_physical_package_id(int cpu)
+{
+	char path[64];
+	FILE *filep;
+	int pkg;
+
+	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
+	filep = fopen(path, "r");
+	if (filep == NULL) {
+		perror(path);
+		exit(1);
+	}
+	fscanf(filep, "%d", &pkg);
+	fclose(filep);
+	return pkg;
+}
+
+int get_core_id(int cpu)
+{
+	char path[64];
+	FILE *filep;
+	int core;
+
+	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
+	filep = fopen(path, "r");
+	if (filep == NULL) {
+		perror(path);
+		exit(1);
+	}
+	fscanf(filep, "%d", &core);
+	fclose(filep);
+	return core;
+}
+
+/*
+ * run func(index, cpu) on every cpu in /proc/stat
+ */
+
+int for_all_cpus(void (func)(int, int, int))
+{
+	FILE *fp;
+	int cpu_count;
+	int retval;
+
+	fp = fopen(proc_stat, "r");
+	if (fp == NULL) {
+		perror(proc_stat);
+		exit(1);
+	}
+
+	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
+	if (retval != 0) {
+		perror("/proc/stat format");
+		exit(1);
+	}
+
+	for (cpu_count = 0; ; cpu_count++) {
+		int cpu;
+
+		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu);
+		if (retval != 1)
+			break;
+
+		func(get_physical_package_id(cpu), get_core_id(cpu), cpu);
+	}
+	fclose(fp);
+	return cpu_count;
+}
+
+void re_initialize(void)
+{
+	printf("turbostat: topology changed, re-initializing.\n");
+	free_all_counters();
+	num_cpus = for_all_cpus(alloc_new_counters);
+	need_reinitialize = 0;
+	printf("num_cpus is now %d\n", num_cpus);
+}
+
+void dummy(int pkg, int core, int cpu) { return; }
+/*
+ * check to see if a cpu came on-line
+ */
+void verify_num_cpus(void)
+{
+	int new_num_cpus;
+
+	new_num_cpus = for_all_cpus(dummy);
+
+	if (new_num_cpus != num_cpus) {
+		if (verbose)
+			printf("num_cpus was %d, is now  %d\n",
+				num_cpus, new_num_cpus);
+		need_reinitialize = 1;
+	}
+}
+
+void turbostat_loop()
+{
+restart:
+	get_counters(cnt_even);
+	gettimeofday(&tv_even, (struct timezone *)NULL);
+
+	while (1) {
+		verify_num_cpus();
+		if (need_reinitialize) {
+			re_initialize();
+			goto restart;
+		}
+		sleep(interval_sec);
+		get_counters(cnt_odd);
+		gettimeofday(&tv_odd, (struct timezone *)NULL);
+
+		compute_delta(cnt_odd, cnt_even, cnt_delta);
+		timersub(&tv_odd, &tv_even, &tv_delta);
+		compute_average(cnt_delta, cnt_average);
+		print_counters(cnt_delta);
+		if (need_reinitialize) {
+			re_initialize();
+			goto restart;
+		}
+		sleep(interval_sec);
+		get_counters(cnt_even);
+		gettimeofday(&tv_even, (struct timezone *)NULL);
+		compute_delta(cnt_even, cnt_odd, cnt_delta);
+		timersub(&tv_even, &tv_odd, &tv_delta);
+		compute_average(cnt_delta, cnt_average);
+		print_counters(cnt_delta);
+	}
+}
+
+void check_dev_msr()
+{
+	struct stat sb;
+
+	if (stat("/dev/cpu/0/msr", &sb)) {
+		fprintf(stderr, "no /dev/cpu/0/msr\n");
+		fprintf(stderr, "Try \"# modprobe msr\"\n");
+		exit(-5);
+	}
+}
+
+void check_super_user()
+{
+	if (getuid() != 0) {
+		fprintf(stderr, "must be root\n");
+		exit(-6);
+	}
+}
+
+int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
+{
+	if (!genuine_intel)
+		return 0;
+
+	if (family != 6)
+		return 0;
+
+	switch (model) {
+	case 0x1A:	/* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
+	case 0x1E:	/* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
+	case 0x1F:	/* Core i7 and i5 Processor - Nehalem */
+	case 0x25:	/* Westmere Client - Clarkdale, Arrandale */
+	case 0x2C:	/* Westmere EP - Gulftown */
+	case 0x2A:	/* SNB */
+	case 0x2D:	/* SNB Xeon */
+		return 1;
+	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
+	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
+	default:
+		return 0;
+	}
+}
+
+int is_snb(unsigned int family, unsigned int model)
+{
+	if (!genuine_intel)
+		return 0;
+
+	switch (model) {
+	case 0x2A:
+	case 0x2D:
+		return 1;
+	}
+	return 0;
+}
+
+double discover_bclk(unsigned int family, unsigned int model)
+{
+	if (is_snb(family, model))
+		return 100.00;
+	else
+		return 133.33;
+}
+
+void check_cpuid()
+{
+	unsigned int eax, ebx, ecx, edx, max_level;
+	unsigned int fms, family, model, stepping;
+
+	eax = ebx = ecx = edx = 0;
+
+	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
+
+	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
+		genuine_intel = 1;
+
+	if (verbose)
+		fprintf(stderr, "%.4s%.4s%.4s ",
+			(char *)&ebx, (char *)&edx, (char *)&ecx);
+
+	asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+	family = (fms >> 8) & 0xf;
+	model = (fms >> 4) & 0xf;
+	stepping = fms & 0xf;
+	if (family == 6 || family == 0xf)
+		model += ((fms >> 16) & 0xf) << 4;
+
+	if (verbose)
+		fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
+			max_level, family, model, stepping, family, model, stepping);
+
+	if (!(edx & (1 << 5))) {
+		fprintf(stderr, "CPUID: no MSR\n");
+		exit(1);
+	}
+
+	/*
+	 * check max extended function levels of CPUID.
+	 * This is needed to check for invariant TSC.
+	 * This check is valid for both Intel and AMD.
+	 */
+	ebx = ecx = edx = 0;
+	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
+
+	if (max_level < 0x80000007) {
+		fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
+		exit(1);
+	}
+
+	/*
+	 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
+	 * this check is valid for both Intel and AMD
+	 */
+	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
+	has_invariant_tsc = edx & (1 << 8);
+
+	if (!has_invariant_tsc) {
+		fprintf(stderr, "No invariant TSC\n");
+		exit(1);
+	}
+
+	/*
+	 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
+	 * this check is valid for both Intel and AMD
+	 */
+
+	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
+	has_aperf = ecx & (1 << 0);
+	if (!has_aperf) {
+		fprintf(stderr, "No APERF MSR\n");
+		exit(1);
+	}
+
+	do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
+	do_nhm_cstates = genuine_intel;	/* all Intel w/ non-stop TSC have NHM counters */
+	do_snb_cstates = is_snb(family, model);
+	bclk = discover_bclk(family, model);
+
+	do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
+}
+
+
+void usage()
+{
+	fprintf(stderr, "%s: [-v] [-M MSR#] [-i interval_sec | command ...]\n",
+		progname);
+	exit(1);
+}
+
+
+/*
+ * in /dev/cpu/ return success for names that are numbers
+ * ie. filter out ".", "..", "microcode".
+ */
+int dir_filter(const struct dirent *dirp)
+{
+	if (isdigit(dirp->d_name[0]))
+		return 1;
+	else
+		return 0;
+}
+
+int open_dev_cpu_msr(int dummy1)
+{
+	return 0;
+}
+
+void turbostat_init()
+{
+	check_cpuid();
+
+	check_dev_msr();
+	check_super_user();
+
+	num_cpus = for_all_cpus(alloc_new_counters);
+
+	if (verbose)
+		print_nehalem_info();
+}
+
+int fork_it(char **argv)
+{
+	int retval;
+	pid_t child_pid;
+	get_counters(cnt_even);
+	gettimeofday(&tv_even, (struct timezone *)NULL);
+
+	child_pid = fork();
+	if (!child_pid) {
+		/* child */
+		execvp(argv[0], argv);
+	} else {
+		int status;
+
+		/* parent */
+		if (child_pid == -1) {
+			perror("fork");
+			exit(1);
+		}
+
+		signal(SIGINT, SIG_IGN);
+		signal(SIGQUIT, SIG_IGN);
+		if (waitpid(child_pid, &status, 0) == -1) {
+			perror("wait");
+			exit(1);
+		}
+	}
+	get_counters(cnt_odd);
+	gettimeofday(&tv_odd, (struct timezone *)NULL);
+	retval = compute_delta(cnt_odd, cnt_even, cnt_delta);
+
+	timersub(&tv_odd, &tv_even, &tv_delta);
+	compute_average(cnt_delta, cnt_average);
+	if (!retval)
+		print_counters(cnt_delta);
+
+	fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);;
+
+	return 0;
+}
+
+void cmdline(int argc, char **argv)
+{
+	int opt;
+
+	progname = argv[0];
+
+	while ((opt = getopt(argc, argv, "+vi:M:")) != -1) {
+		switch (opt) {
+		case 'v':
+			verbose++;
+			break;
+		case 'i':
+			interval_sec = atoi(optarg);
+			break;
+		case 'M':
+			sscanf(optarg, "%x", &extra_msr_offset);
+			if (verbose > 1)
+				fprintf(stderr, "MSR 0x%X\n", extra_msr_offset);
+			break;
+		default:
+			usage();
+		}
+	}
+}
+
+int main(int argc, char **argv)
+{
+	cmdline(argc, argv);
+
+	if (verbose > 1)
+		fprintf(stderr, "turbostat Dec 6, 2010"
+			" - Len Brown <lenb@kernel.org>\n");
+	if (verbose > 1)
+		fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n");
+
+	turbostat_init();
+
+	/*
+	 * if any params left, it must be a command to fork
+	 */
+	if (argc - optind)
+		return fork_it(argv + optind);
+	else
+		turbostat_loop();
+
+	return 0;
+}
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
new file mode 100644
index 0000000..f458237
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -0,0 +1,8 @@
+x86_energy_perf_policy : x86_energy_perf_policy.c
+
+clean :
+	rm -f x86_energy_perf_policy
+
+install :
+	install x86_energy_perf_policy /usr/bin/
+	install x86_energy_perf_policy.8 /usr/share/man/man8/
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
new file mode 100644
index 0000000..8eaaad6
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -0,0 +1,104 @@
+.\"  This page Copyright (C) 2010 Len Brown <len.brown@intel.com>
+.\"  Distributed under the GPL, Copyleft 1994.
+.TH X86_ENERGY_PERF_POLICY 8
+.SH NAME
+x86_energy_perf_policy \- read or write MSR_IA32_ENERGY_PERF_BIAS
+.SH SYNOPSIS
+.ft B
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB "\-r"
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB 'performance'
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB 'normal'
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB 'powersave'
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB n
+.br
+.SH DESCRIPTION
+\fBx86_energy_perf_policy\fP
+allows software to convey
+its policy for the relative importance of performance
+versus energy savings to the processor.
+
+The processor uses this information in model-specific ways
+when it must select trade-offs between performance and
+energy efficiency.
+
+This policy hint does not supersede Processor Performance states
+(P-states) or CPU Idle power states (C-states), but allows
+software to have influence where it would otherwise be unable
+to express a preference.
+
+For example, this setting may tell the hardware how
+aggressively or conservatively to control frequency
+in the "turbo range" above the explicitly OS-controlled
+P-state frequency range.  It may also tell the hardware
+how aggressively is should enter the OS requested C-states.
+
+Support for this feature is indicated by CPUID.06H.ECX.bit3
+per the Intel Architectures Software Developer's Manual.
+
+.SS Options
+\fB-c\fP limits operation to a single CPU.
+The default is to operate on all CPUs.
+Note that MSR_IA32_ENERGY_PERF_BIAS is defined per
+logical processor, but that the initial implementations
+of the MSR were shared among all processors in each package.
+.PP
+\fB-v\fP increases verbosity.  By default
+x86_energy_perf_policy is silent.
+.PP
+\fB-r\fP is for "read-only" mode - the unchanged state
+is read and displayed.
+.PP
+.I performance
+Set a policy where performance is paramount.
+The processor will be unwilling to sacrifice any performance
+for the sake of energy saving. This is the hardware default.
+.PP
+.I normal
+Set a policy with a normal balance between performance and energy efficiency.
+The processor will tolerate minor performance compromise
+for potentially significant energy savings.
+This reasonable default for most desktops and servers.
+.PP
+.I powersave
+Set a policy where the processor can accept
+a measurable performance hit to maximize energy efficiency.
+.PP
+.I n
+Set MSR_IA32_ENERGY_PERF_BIAS to the specified number.
+The range of valid numbers is 0-15, where 0 is maximum
+performance and 15 is maximum energy efficiency.
+
+.SH NOTES
+.B "x86_energy_perf_policy "
+runs only as root.
+.SH FILES
+.ta
+.nf
+/dev/cpu/*/msr
+.fi
+
+.SH "SEE ALSO"
+msr(4)
+.PP
+.SH AUTHORS
+.nf
+Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
new file mode 100644
index 0000000..d9678a3
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -0,0 +1,325 @@
+/*
+ * x86_energy_perf_policy -- set the energy versus performance
+ * policy preference bias on recent X86 processors.
+ */
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <string.h>
+
+unsigned int verbose;		/* set with -v */
+unsigned int read_only;		/* set with -r */
+char *progname;
+unsigned long long new_bias;
+int cpu = -1;
+
+/*
+ * Usage:
+ *
+ * -c cpu: limit action to a single CPU (default is all CPUs)
+ * -v: verbose output (can invoke more than once)
+ * -r: read-only, don't change any settings
+ *
+ *  performance
+ *	Performance is paramount.
+ *	Unwilling to sacrafice any performance
+ *	for the sake of energy saving. (hardware default)
+ *
+ *  normal
+ *	Can tolerate minor performance compromise
+ *	for potentially significant energy savings.
+ *	(reasonable default for most desktops and servers)
+ *
+ *  powersave
+ *	Can tolerate significant performance hit
+ *	to maximize energy savings.
+ *
+ * n
+ *	a numerical value to write to the underlying MSR.
+ */
+void usage(void)
+{
+	printf("%s: [-c cpu] [-v] "
+		"(-r | 'performance' | 'normal' | 'powersave' | n)\n",
+		progname);
+	exit(1);
+}
+
+#define MSR_IA32_ENERGY_PERF_BIAS	0x000001b0
+
+#define	BIAS_PERFORMANCE		0
+#define BIAS_BALANCE			6
+#define	BIAS_POWERSAVE			15
+
+void cmdline(int argc, char **argv)
+{
+	int opt;
+
+	progname = argv[0];
+
+	while ((opt = getopt(argc, argv, "+rvc:")) != -1) {
+		switch (opt) {
+		case 'c':
+			cpu = atoi(optarg);
+			break;
+		case 'r':
+			read_only = 1;
+			break;
+		case 'v':
+			verbose++;
+			break;
+		default:
+			usage();
+		}
+	}
+	/* if -r, then should be no additional optind */
+	if (read_only && (argc > optind))
+		usage();
+
+	/*
+	 * if no -r , then must be one additional optind
+	 */
+	if (!read_only) {
+
+		if (argc != optind + 1) {
+			printf("must supply -r or policy param\n");
+			usage();
+			}
+
+		if (!strcmp("performance", argv[optind])) {
+			new_bias = BIAS_PERFORMANCE;
+		} else if (!strcmp("normal", argv[optind])) {
+			new_bias = BIAS_BALANCE;
+		} else if (!strcmp("powersave", argv[optind])) {
+			new_bias = BIAS_POWERSAVE;
+		} else {
+			char *endptr;
+
+			new_bias = strtoull(argv[optind], &endptr, 0);
+			if (endptr == argv[optind] ||
+				new_bias > BIAS_POWERSAVE) {
+					fprintf(stderr, "invalid value: %s\n",
+						argv[optind]);
+				usage();
+			}
+		}
+	}
+}
+
+/*
+ * validate_cpuid()
+ * returns on success, quietly exits on failure (make verbose with -v)
+ */
+void validate_cpuid(void)
+{
+	unsigned int eax, ebx, ecx, edx, max_level;
+	char brand[16];
+	unsigned int fms, family, model, stepping;
+
+	eax = ebx = ecx = edx = 0;
+
+	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx),
+		"=d" (edx) : "a" (0));
+
+	if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) {
+		if (verbose)
+			fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel",
+				(char *)&ebx, (char *)&edx, (char *)&ecx);
+		exit(1);
+	}
+
+	asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+	family = (fms >> 8) & 0xf;
+	model = (fms >> 4) & 0xf;
+	stepping = fms & 0xf;
+	if (family == 6 || family == 0xf)
+		model += ((fms >> 16) & 0xf) << 4;
+
+	if (verbose > 1)
+		printf("CPUID %s %d levels family:model:stepping "
+			"0x%x:%x:%x (%d:%d:%d)\n", brand, max_level,
+			family, model, stepping, family, model, stepping);
+
+	if (!(edx & (1 << 5))) {
+		if (verbose)
+			printf("CPUID: no MSR\n");
+		exit(1);
+	}
+
+	/*
+	 * Support for MSR_IA32_ENERGY_PERF_BIAS
+	 * is indicated by CPUID.06H.ECX.bit3
+	 */
+	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6));
+	if (verbose)
+		printf("CPUID.06H.ECX: 0x%x\n", ecx);
+	if (!(ecx & (1 << 3))) {
+		if (verbose)
+			printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n");
+		exit(1);
+	}
+	return;	/* success */
+}
+
+unsigned long long get_msr(int cpu, int offset)
+{
+	unsigned long long msr;
+	char msr_path[32];
+	int retval;
+	int fd;
+
+	sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
+	fd = open(msr_path, O_RDONLY);
+	if (fd < 0) {
+		printf("Try \"# modprobe msr\"\n");
+		perror(msr_path);
+		exit(1);
+	}
+
+	retval = pread(fd, &msr, sizeof msr, offset);
+
+	if (retval != sizeof msr) {
+		printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
+		exit(-2);
+	}
+	close(fd);
+	return msr;
+}
+
+unsigned long long  put_msr(int cpu, unsigned long long new_msr, int offset)
+{
+	unsigned long long old_msr;
+	char msr_path[32];
+	int retval;
+	int fd;
+
+	sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
+	fd = open(msr_path, O_RDWR);
+	if (fd < 0) {
+		perror(msr_path);
+		exit(1);
+	}
+
+	retval = pread(fd, &old_msr, sizeof old_msr, offset);
+	if (retval != sizeof old_msr) {
+		perror("pwrite");
+		printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
+		exit(-2);
+	}
+
+	retval = pwrite(fd, &new_msr, sizeof new_msr, offset);
+	if (retval != sizeof new_msr) {
+		perror("pwrite");
+		printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval);
+		exit(-2);
+	}
+
+	close(fd);
+
+	return old_msr;
+}
+
+void print_msr(int cpu)
+{
+	printf("cpu%d: 0x%016llx\n",
+		cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS));
+}
+
+void update_msr(int cpu)
+{
+	unsigned long long previous_msr;
+
+	previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS);
+
+	if (verbose)
+		printf("cpu%d  msr0x%x 0x%016llx -> 0x%016llx\n",
+			cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias);
+
+	return;
+}
+
+char *proc_stat = "/proc/stat";
+/*
+ * run func() on every cpu in /dev/cpu
+ */
+void for_every_cpu(void (func)(int))
+{
+	FILE *fp;
+	int retval;
+
+	fp = fopen(proc_stat, "r");
+	if (fp == NULL) {
+		perror(proc_stat);
+		exit(1);
+	}
+
+	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
+	if (retval != 0) {
+		perror("/proc/stat format");
+		exit(1);
+	}
+
+	while (1) {
+		int cpu;
+
+		retval = fscanf(fp,
+			"cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n",
+			&cpu);
+		if (retval != 1)
+			return;
+
+		func(cpu);
+	}
+	fclose(fp);
+}
+
+int main(int argc, char **argv)
+{
+	cmdline(argc, argv);
+
+	if (verbose > 1)
+		printf("x86_energy_perf_policy Nov 24, 2010"
+				" - Len Brown <lenb@kernel.org>\n");
+	if (verbose > 1 && !read_only)
+		printf("new_bias %lld\n", new_bias);
+
+	validate_cpuid();
+
+	if (cpu != -1) {
+		if (read_only)
+			print_msr(cpu);
+		else
+			update_msr(cpu);
+	} else {
+		if (read_only)
+			for_every_cpu(print_msr);
+		else
+			for_every_cpu(update_msr);
+	}
+
+	return 0;
+}
diff --git a/tools/testing/ktest/compare-ktest-sample.pl b/tools/testing/ktest/compare-ktest-sample.pl
new file mode 100755
index 0000000..9a571e7
--- /dev/null
+++ b/tools/testing/ktest/compare-ktest-sample.pl
@@ -0,0 +1,30 @@
+#!/usr/bin/perl
+
+open (IN,"ktest.pl");
+while (<IN>) {
+    if (/\$opt\{"?([A-Z].*?)(\[.*\])?"?\}/ ||
+	/set_test_option\("(.*?)"/) {
+	$opt{$1} = 1;
+    }
+}
+close IN;
+
+open (IN, "sample.conf");
+while (<IN>) {
+    if (/^\s*#?\s*(\S+)\s*=/) {
+	$samp{$1} = 1;
+    }
+}
+close IN;
+
+foreach $opt (keys %opt) {
+    if (!defined($samp{$opt})) {
+	print "opt = $opt\n";
+    }
+}
+
+foreach $samp (keys %samp) {
+    if (!defined($opt{$samp})) {
+	print "samp = $samp\n";
+    }
+}
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
new file mode 100755
index 0000000..e1c62ee
--- /dev/null
+++ b/tools/testing/ktest/ktest.pl
@@ -0,0 +1,2023 @@
+#!/usr/bin/perl -w
+#
+# Copywrite 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+# Licensed under the terms of the GNU GPL License version 2
+#
+
+use strict;
+use IPC::Open2;
+use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
+use File::Path qw(mkpath);
+use File::Copy qw(cp);
+use FileHandle;
+
+my $VERSION = "0.2";
+
+$| = 1;
+
+my %opt;
+my %repeat_tests;
+my %repeats;
+my %default;
+
+#default opts
+$default{"NUM_TESTS"}		= 1;
+$default{"REBOOT_TYPE"}		= "grub";
+$default{"TEST_TYPE"}		= "test";
+$default{"BUILD_TYPE"}		= "randconfig";
+$default{"MAKE_CMD"}		= "make";
+$default{"TIMEOUT"}		= 120;
+$default{"TMP_DIR"}		= "/tmp/ktest";
+$default{"SLEEP_TIME"}		= 60;	# sleep time between tests
+$default{"BUILD_NOCLEAN"}	= 0;
+$default{"REBOOT_ON_ERROR"}	= 0;
+$default{"POWEROFF_ON_ERROR"}	= 0;
+$default{"REBOOT_ON_SUCCESS"}	= 1;
+$default{"POWEROFF_ON_SUCCESS"}	= 0;
+$default{"BUILD_OPTIONS"}	= "";
+$default{"BISECT_SLEEP_TIME"}	= 60;   # sleep time between bisects
+$default{"CLEAR_LOG"}		= 0;
+$default{"SUCCESS_LINE"}	= "login:";
+$default{"BOOTED_TIMEOUT"}	= 1;
+$default{"DIE_ON_FAILURE"}	= 1;
+$default{"SSH_EXEC"}		= "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND";
+$default{"SCP_TO_TARGET"}	= "scp \$SRC_FILE \$SSH_USER\@\$MACHINE:\$DST_FILE";
+$default{"REBOOT"}		= "ssh \$SSH_USER\@\$MACHINE reboot";
+$default{"STOP_AFTER_SUCCESS"}	= 10;
+$default{"STOP_AFTER_FAILURE"}	= 60;
+$default{"LOCALVERSION"}	= "-test";
+
+my $ktest_config;
+my $version;
+my $machine;
+my $ssh_user;
+my $tmpdir;
+my $builddir;
+my $outputdir;
+my $output_config;
+my $test_type;
+my $build_type;
+my $build_options;
+my $reboot_type;
+my $reboot_script;
+my $power_cycle;
+my $reboot;
+my $reboot_on_error;
+my $poweroff_on_error;
+my $die_on_failure;
+my $powercycle_after_reboot;
+my $poweroff_after_halt;
+my $ssh_exec;
+my $scp_to_target;
+my $power_off;
+my $grub_menu;
+my $grub_number;
+my $target;
+my $make;
+my $post_install;
+my $noclean;
+my $minconfig;
+my $addconfig;
+my $in_bisect = 0;
+my $bisect_bad = "";
+my $reverse_bisect;
+my $in_patchcheck = 0;
+my $run_test;
+my $redirect;
+my $buildlog;
+my $dmesg;
+my $monitor_fp;
+my $monitor_pid;
+my $monitor_cnt = 0;
+my $sleep_time;
+my $bisect_sleep_time;
+my $store_failures;
+my $timeout;
+my $booted_timeout;
+my $console;
+my $success_line;
+my $stop_after_success;
+my $stop_after_failure;
+my $build_target;
+my $target_image;
+my $localversion;
+my $iteration = 0;
+my $successes = 0;
+
+my %entered_configs;
+my %config_help;
+
+$config_help{"MACHINE"} = << "EOF"
+ The machine hostname that you will test.
+EOF
+    ;
+$config_help{"SSH_USER"} = << "EOF"
+ The box is expected to have ssh on normal bootup, provide the user
+  (most likely root, since you need privileged operations)
+EOF
+    ;
+$config_help{"BUILD_DIR"} = << "EOF"
+ The directory that contains the Linux source code (full path).
+EOF
+    ;
+$config_help{"OUTPUT_DIR"} = << "EOF"
+ The directory that the objects will be built (full path).
+ (can not be same as BUILD_DIR)
+EOF
+    ;
+$config_help{"BUILD_TARGET"} = << "EOF"
+ The location of the compiled file to copy to the target.
+ (relative to OUTPUT_DIR)
+EOF
+    ;
+$config_help{"TARGET_IMAGE"} = << "EOF"
+ The place to put your image on the test machine.
+EOF
+    ;
+$config_help{"POWER_CYCLE"} = << "EOF"
+ A script or command to reboot the box.
+
+ Here is a digital loggers power switch example
+ POWER_CYCLE = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin\@power/outlet?5=CCL'
+
+ Here is an example to reboot a virtual box on the current host
+ with the name "Guest".
+ POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
+EOF
+    ;
+$config_help{"CONSOLE"} = << "EOF"
+ The script or command that reads the console
+
+  If you use ttywatch server, something like the following would work.
+CONSOLE = nc -d localhost 3001
+
+ For a virtual machine with guest name "Guest".
+CONSOLE =  virsh console Guest
+EOF
+    ;
+$config_help{"LOCALVERSION"} = << "EOF"
+ Required version ending to differentiate the test
+ from other linux builds on the system.
+EOF
+    ;
+$config_help{"REBOOT_TYPE"} = << "EOF"
+ Way to reboot the box to the test kernel.
+ Only valid options so far are "grub" and "script".
+
+ If you specify grub, it will assume grub version 1
+ and will search in /boot/grub/menu.lst for the title \$GRUB_MENU
+ and select that target to reboot to the kernel. If this is not
+ your setup, then specify "script" and have a command or script
+ specified in REBOOT_SCRIPT to boot to the target.
+
+ The entry in /boot/grub/menu.lst must be entered in manually.
+ The test will not modify that file.
+EOF
+    ;
+$config_help{"GRUB_MENU"} = << "EOF"
+ The grub title name for the test kernel to boot
+ (Only mandatory if REBOOT_TYPE = grub)
+
+ Note, ktest.pl will not update the grub menu.lst, you need to
+ manually add an option for the test. ktest.pl will search
+ the grub menu.lst for this option to find what kernel to
+ reboot into.
+
+ For example, if in the /boot/grub/menu.lst the test kernel title has:
+ title Test Kernel
+ kernel vmlinuz-test
+ GRUB_MENU = Test Kernel
+EOF
+    ;
+$config_help{"REBOOT_SCRIPT"} = << "EOF"
+ A script to reboot the target into the test kernel
+ (Only mandatory if REBOOT_TYPE = script)
+EOF
+    ;
+
+
+sub get_ktest_config {
+    my ($config) = @_;
+
+    return if (defined($opt{$config}));
+
+    if (defined($config_help{$config})) {
+	print "\n";
+	print $config_help{$config};
+    }
+
+    for (;;) {
+	print "$config = ";
+	if (defined($default{$config})) {
+	    print "\[$default{$config}\] ";
+	}
+	$entered_configs{$config} = <STDIN>;
+	$entered_configs{$config} =~ s/^\s*(.*\S)\s*$/$1/;
+	if ($entered_configs{$config} =~ /^\s*$/) {
+	    if ($default{$config}) {
+		$entered_configs{$config} = $default{$config};
+	    } else {
+		print "Your answer can not be blank\n";
+		next;
+	    }
+	}
+	last;
+    }
+}
+
+sub get_ktest_configs {
+    get_ktest_config("MACHINE");
+    get_ktest_config("SSH_USER");
+    get_ktest_config("BUILD_DIR");
+    get_ktest_config("OUTPUT_DIR");
+    get_ktest_config("BUILD_TARGET");
+    get_ktest_config("TARGET_IMAGE");
+    get_ktest_config("POWER_CYCLE");
+    get_ktest_config("CONSOLE");
+    get_ktest_config("LOCALVERSION");
+
+    my $rtype = $opt{"REBOOT_TYPE"};
+
+    if (!defined($rtype)) {
+	if (!defined($opt{"GRUB_MENU"})) {
+	    get_ktest_config("REBOOT_TYPE");
+	    $rtype = $entered_configs{"REBOOT_TYPE"};
+	} else {
+	    $rtype = "grub";
+	}
+    }
+
+    if ($rtype eq "grub") {
+	get_ktest_config("GRUB_MENU");
+    } else {
+	get_ktest_config("REBOOT_SCRIPT");
+    }
+}
+
+sub set_value {
+    my ($lvalue, $rvalue) = @_;
+
+    if (defined($opt{$lvalue})) {
+	die "Error: Option $lvalue defined more than once!\n";
+    }
+    if ($rvalue =~ /^\s*$/) {
+	delete $opt{$lvalue};
+    } else {
+	$opt{$lvalue} = $rvalue;
+    }
+}
+
+sub read_config {
+    my ($config) = @_;
+
+    open(IN, $config) || die "can't read file $config";
+
+    my $name = $config;
+    $name =~ s,.*/(.*),$1,;
+
+    my $test_num = 0;
+    my $default = 1;
+    my $repeat = 1;
+    my $num_tests_set = 0;
+    my $skip = 0;
+    my $rest;
+
+    while (<IN>) {
+
+	# ignore blank lines and comments
+	next if (/^\s*$/ || /\s*\#/);
+
+	if (/^\s*TEST_START(.*)/) {
+
+	    $rest = $1;
+
+	    if ($num_tests_set) {
+		die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
+	    }
+
+	    my $old_test_num = $test_num;
+	    my $old_repeat = $repeat;
+
+	    $test_num += $repeat;
+	    $default = 0;
+	    $repeat = 1;
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    } else {
+		$skip = 0;
+	    }
+
+	    if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) {
+		$repeat = $1;
+		$rest = $2;
+		$repeat_tests{"$test_num"} = $repeat;
+	    }
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    }
+
+	    if ($rest !~ /^\s*$/) {
+		die "$name: $.: Gargbage found after TEST_START\n$_";
+	    }
+
+	    if ($skip) {
+		$test_num = $old_test_num;
+		$repeat = $old_repeat;
+	    }
+
+	} elsif (/^\s*DEFAULTS(.*)$/) {
+	    $default = 1;
+
+	    $rest = $1;
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    } else {
+		$skip = 0;
+	    }
+
+	    if ($rest !~ /^\s*$/) {
+		die "$name: $.: Gargbage found after DEFAULTS\n$_";
+	    }
+
+	} elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
+
+	    next if ($skip);
+
+	    my $lvalue = $1;
+	    my $rvalue = $2;
+
+	    if (!$default &&
+		($lvalue eq "NUM_TESTS" ||
+		 $lvalue eq "LOG_FILE" ||
+		 $lvalue eq "CLEAR_LOG")) {
+		die "$name: $.: $lvalue must be set in DEFAULTS section\n";
+	    }
+
+	    if ($lvalue eq "NUM_TESTS") {
+		if ($test_num) {
+		    die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
+		}
+		if (!$default) {
+		    die "$name: $.: NUM_TESTS must be set in default section\n";
+		}
+		$num_tests_set = 1;
+	    }
+
+	    if ($default || $lvalue =~ /\[\d+\]$/) {
+		set_value($lvalue, $rvalue);
+	    } else {
+		my $val = "$lvalue\[$test_num\]";
+		set_value($val, $rvalue);
+
+		if ($repeat > 1) {
+		    $repeats{$val} = $repeat;
+		}
+	    }
+	} else {
+	    die "$name: $.: Garbage found in config\n$_";
+	}
+    }
+
+    close(IN);
+
+    if ($test_num) {
+	$test_num += $repeat - 1;
+	$opt{"NUM_TESTS"} = $test_num;
+    }
+
+    # make sure we have all mandatory configs
+    get_ktest_configs;
+
+    # set any defaults
+
+    foreach my $default (keys %default) {
+	if (!defined($opt{$default})) {
+	    $opt{$default} = $default{$default};
+	}
+    }
+}
+
+sub _logit {
+    if (defined($opt{"LOG_FILE"})) {
+	open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
+	print OUT @_;
+	close(OUT);
+    }
+}
+
+sub logit {
+    if (defined($opt{"LOG_FILE"})) {
+	_logit @_;
+    } else {
+	print @_;
+    }
+}
+
+sub doprint {
+    print @_;
+    _logit @_;
+}
+
+sub run_command;
+
+sub reboot {
+    # try to reboot normally
+    if (run_command $reboot) {
+	if (defined($powercycle_after_reboot)) {
+	    sleep $powercycle_after_reboot;
+	    run_command "$power_cycle";
+	}
+    } else {
+	# nope? power cycle it.
+	run_command "$power_cycle";
+    }
+}
+
+sub do_not_reboot {
+    my $i = $iteration;
+
+    return $test_type eq "build" ||
+	($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") ||
+	($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build");
+}
+
+sub dodie {
+    doprint "CRITICAL FAILURE... ", @_, "\n";
+
+    my $i = $iteration;
+
+    if ($reboot_on_error && !do_not_reboot) {
+
+	doprint "REBOOTING\n";
+	reboot;
+
+    } elsif ($poweroff_on_error && defined($power_off)) {
+	doprint "POWERING OFF\n";
+	`$power_off`;
+    }
+
+    die @_, "\n";
+}
+
+sub open_console {
+    my ($fp) = @_;
+
+    my $flags;
+
+    my $pid = open($fp, "$console|") or
+	dodie "Can't open console $console";
+
+    $flags = fcntl($fp, F_GETFL, 0) or
+	dodie "Can't get flags for the socket: $!";
+    $flags = fcntl($fp, F_SETFL, $flags | O_NONBLOCK) or
+	dodie "Can't set flags for the socket: $!";
+
+    return $pid;
+}
+
+sub close_console {
+    my ($fp, $pid) = @_;
+
+    doprint "kill child process $pid\n";
+    kill 2, $pid;
+
+    print "closing!\n";
+    close($fp);
+}
+
+sub start_monitor {
+    if ($monitor_cnt++) {
+	return;
+    }
+    $monitor_fp = \*MONFD;
+    $monitor_pid = open_console $monitor_fp;
+
+    return;
+
+    open(MONFD, "Stop perl from warning about single use of MONFD");
+}
+
+sub end_monitor {
+    if (--$monitor_cnt) {
+	return;
+    }
+    close_console($monitor_fp, $monitor_pid);
+}
+
+sub wait_for_monitor {
+    my ($time) = @_;
+    my $line;
+
+    doprint "** Wait for monitor to settle down **\n";
+
+    # read the monitor and wait for the system to calm down
+    do {
+	$line = wait_for_input($monitor_fp, $time);
+	print "$line" if (defined($line));
+    } while (defined($line));
+    print "** Monitor flushed **\n";
+}
+
+sub fail {
+
+	if ($die_on_failure) {
+		dodie @_;
+	}
+
+	doprint "FAILED\n";
+
+	my $i = $iteration;
+
+	# no need to reboot for just building.
+	if (!do_not_reboot) {
+	    doprint "REBOOTING\n";
+	    reboot;
+	    start_monitor;
+	    wait_for_monitor $sleep_time;
+	    end_monitor;
+	}
+
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+	doprint "KTEST RESULT: TEST $i Failed: ", @_, "\n";
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+
+	return 1 if (!defined($store_failures));
+
+	my @t = localtime;
+	my $date = sprintf "%04d%02d%02d%02d%02d%02d",
+		1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
+
+	my $type = $build_type;
+	if ($type =~ /useconfig/) {
+	    $type = "useconfig";
+	}
+
+	my $dir = "$machine-$test_type-$type-fail-$date";
+	my $faildir = "$store_failures/$dir";
+
+	if (!-d $faildir) {
+	    mkpath($faildir) or
+		die "can't create $faildir";
+	}
+	if (-f "$output_config") {
+	    cp "$output_config", "$faildir/config" or
+		die "failed to copy .config";
+	}
+	if (-f $buildlog) {
+	    cp $buildlog, "$faildir/buildlog" or
+		die "failed to move $buildlog";
+	}
+	if (-f $dmesg) {
+	    cp $dmesg, "$faildir/dmesg" or
+		die "failed to move $dmesg";
+	}
+
+	doprint "*** Saved info to $faildir ***\n";
+
+	return 1;
+}
+
+sub run_command {
+    my ($command) = @_;
+    my $dolog = 0;
+    my $dord = 0;
+    my $pid;
+
+    $command =~ s/\$SSH_USER/$ssh_user/g;
+    $command =~ s/\$MACHINE/$machine/g;
+
+    doprint("$command ... ");
+
+    $pid = open(CMD, "$command 2>&1 |") or
+	(fail "unable to exec $command" and return 0);
+
+    if (defined($opt{"LOG_FILE"})) {
+	open(LOG, ">>$opt{LOG_FILE}") or
+	    dodie "failed to write to log";
+	$dolog = 1;
+    }
+
+    if (defined($redirect)) {
+	open (RD, ">$redirect") or
+	    dodie "failed to write to redirect $redirect";
+	$dord = 1;
+    }
+
+    while (<CMD>) {
+	print LOG if ($dolog);
+	print RD  if ($dord);
+    }
+
+    waitpid($pid, 0);
+    my $failed = $?;
+
+    close(CMD);
+    close(LOG) if ($dolog);
+    close(RD)  if ($dord);
+
+    if ($failed) {
+	doprint "FAILED!\n";
+    } else {
+	doprint "SUCCESS\n";
+    }
+
+    return !$failed;
+}
+
+sub run_ssh {
+    my ($cmd) = @_;
+    my $cp_exec = $ssh_exec;
+
+    $cp_exec =~ s/\$SSH_COMMAND/$cmd/g;
+    return run_command "$cp_exec";
+}
+
+sub run_scp {
+    my ($src, $dst) = @_;
+    my $cp_scp = $scp_to_target;
+
+    $cp_scp =~ s/\$SRC_FILE/$src/g;
+    $cp_scp =~ s/\$DST_FILE/$dst/g;
+
+    return run_command "$cp_scp";
+}
+
+sub get_grub_index {
+
+    if ($reboot_type ne "grub") {
+	return;
+    }
+    return if (defined($grub_number));
+
+    doprint "Find grub menu ... ";
+    $grub_number = -1;
+
+    my $ssh_grub = $ssh_exec;
+    $ssh_grub =~ s,\$SSH_COMMAND,cat /boot/grub/menu.lst,g;
+
+    open(IN, "$ssh_grub |")
+	or die "unable to get menu.lst";
+
+    while (<IN>) {
+	if (/^\s*title\s+$grub_menu\s*$/) {
+	    $grub_number++;
+	    last;
+	} elsif (/^\s*title\s/) {
+	    $grub_number++;
+	}
+    }
+    close(IN);
+
+    die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
+	if ($grub_number < 0);
+    doprint "$grub_number\n";
+}
+
+sub wait_for_input
+{
+    my ($fp, $time) = @_;
+    my $rin;
+    my $ready;
+    my $line;
+    my $ch;
+
+    if (!defined($time)) {
+	$time = $timeout;
+    }
+
+    $rin = '';
+    vec($rin, fileno($fp), 1) = 1;
+    $ready = select($rin, undef, undef, $time);
+
+    $line = "";
+
+    # try to read one char at a time
+    while (sysread $fp, $ch, 1) {
+	$line .= $ch;
+	last if ($ch eq "\n");
+    }
+
+    if (!length($line)) {
+	return undef;
+    }
+
+    return $line;
+}
+
+sub reboot_to {
+    if ($reboot_type eq "grub") {
+	run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch; reboot)'";
+	return;
+    }
+
+    run_command "$reboot_script";
+}
+
+sub get_sha1 {
+    my ($commit) = @_;
+
+    doprint "git rev-list --max-count=1 $commit ... ";
+    my $sha1 = `git rev-list --max-count=1 $commit`;
+    my $ret = $?;
+
+    logit $sha1;
+
+    if ($ret) {
+	doprint "FAILED\n";
+	dodie "Failed to get git $commit";
+    }
+
+    print "SUCCESS\n";
+
+    chomp $sha1;
+
+    return $sha1;
+}
+
+sub monitor {
+    my $booted = 0;
+    my $bug = 0;
+    my $skip_call_trace = 0;
+    my $loops;
+
+    wait_for_monitor 5;
+
+    my $line;
+    my $full_line = "";
+
+    open(DMESG, "> $dmesg") or
+	die "unable to write to $dmesg";
+
+    reboot_to;
+
+    my $success_start;
+    my $failure_start;
+
+    for (;;) {
+
+	if ($booted) {
+	    $line = wait_for_input($monitor_fp, $booted_timeout);
+	} else {
+	    $line = wait_for_input($monitor_fp);
+	}
+
+	last if (!defined($line));
+
+	doprint $line;
+	print DMESG $line;
+
+	# we are not guaranteed to get a full line
+	$full_line .= $line;
+
+	if ($full_line =~ /$success_line/) {
+	    $booted = 1;
+	    $success_start = time;
+	}
+
+	if ($booted && defined($stop_after_success) &&
+	    $stop_after_success >= 0) {
+	    my $now = time;
+	    if ($now - $success_start >= $stop_after_success) {
+		doprint "Test forced to stop after $stop_after_success seconds after success\n";
+		last;
+	    }
+	}
+
+	if ($full_line =~ /\[ backtrace testing \]/) {
+	    $skip_call_trace = 1;
+	}
+
+	if ($full_line =~ /call trace:/i) {
+	    if (!$skip_call_trace) {
+		$bug = 1;
+		$failure_start = time;
+	    }
+	}
+
+	if ($bug && defined($stop_after_failure) &&
+	    $stop_after_failure >= 0) {
+	    my $now = time;
+	    if ($now - $failure_start >= $stop_after_failure) {
+		doprint "Test forced to stop after $stop_after_failure seconds after failure\n";
+		last;
+	    }
+	}
+
+	if ($full_line =~ /\[ end of backtrace testing \]/) {
+	    $skip_call_trace = 0;
+	}
+
+	if ($full_line =~ /Kernel panic -/) {
+	    $bug = 1;
+	}
+
+	if ($line =~ /\n/) {
+	    $full_line = "";
+	}
+    }
+
+    close(DMESG);
+
+    if ($bug) {
+	return 0 if ($in_bisect);
+	fail "failed - got a bug report" and return 0;
+    }
+
+    if (!$booted) {
+	return 0 if ($in_bisect);
+	fail "failed - never got a boot prompt." and return 0;
+    }
+
+    return 1;
+}
+
+sub install {
+
+    run_scp "$outputdir/$build_target", "$target_image" or
+	dodie "failed to copy image";
+
+    my $install_mods = 0;
+
+    # should we process modules?
+    $install_mods = 0;
+    open(IN, "$output_config") or dodie("Can't read config file");
+    while (<IN>) {
+	if (/CONFIG_MODULES(=y)?/) {
+	    $install_mods = 1 if (defined($1));
+	    last;
+	}
+    }
+    close(IN);
+
+    if (!$install_mods) {
+	doprint "No modules needed\n";
+	return;
+    }
+
+    run_command "$make INSTALL_MOD_PATH=$tmpdir modules_install" or
+	dodie "Failed to install modules";
+
+    my $modlib = "/lib/modules/$version";
+    my $modtar = "ktest-mods.tar.bz2";
+
+    run_ssh "rm -rf $modlib" or
+	dodie "failed to remove old mods: $modlib";
+
+    # would be nice if scp -r did not follow symbolic links
+    run_command "cd $tmpdir && tar -cjf $modtar lib/modules/$version" or
+	dodie "making tarball";
+
+    run_scp "$tmpdir/$modtar", "/tmp" or
+	dodie "failed to copy modules";
+
+    unlink "$tmpdir/$modtar";
+
+    run_ssh "'(cd / && tar xf /tmp/$modtar)'" or
+	dodie "failed to tar modules";
+
+    run_ssh "rm -f /tmp/$modtar";
+
+    return if (!defined($post_install));
+
+    my $cp_post_install = $post_install;
+    $cp_post_install = s/\$KERNEL_VERSION/$version/g;
+    run_command "$cp_post_install" or
+	dodie "Failed to run post install";
+}
+
+sub check_buildlog {
+    my ($patch) = @_;
+
+    my @files = `git show $patch | diffstat -l`;
+
+    open(IN, "git show $patch |") or
+	dodie "failed to show $patch";
+    while (<IN>) {
+	if (m,^--- a/(.*),) {
+	    chomp $1;
+	    $files[$#files] = $1;
+	}
+    }
+    close(IN);
+
+    open(IN, $buildlog) or dodie "Can't open $buildlog";
+    while (<IN>) {
+	if (/^\s*(.*?):.*(warning|error)/) {
+	    my $err = $1;
+	    foreach my $file (@files) {
+		my $fullpath = "$builddir/$file";
+		if ($file eq $err || $fullpath eq $err) {
+		    fail "$file built with warnings" and return 0;
+		}
+	    }
+	}
+    }
+    close(IN);
+
+    return 1;
+}
+
+sub build {
+    my ($type) = @_;
+    my $defconfig = "";
+
+    unlink $buildlog;
+
+    if ($type =~ /^useconfig:(.*)/) {
+	run_command "cp $1 $output_config" or
+	    dodie "could not copy $1 to .config";
+
+	$type = "oldconfig";
+    }
+
+    # old config can ask questions
+    if ($type eq "oldconfig") {
+	$type = "oldnoconfig";
+
+	# allow for empty configs
+	run_command "touch $output_config";
+
+	run_command "mv $output_config $outputdir/config_temp" or
+	    dodie "moving .config";
+
+	if (!$noclean && !run_command "$make mrproper") {
+	    dodie "make mrproper";
+	}
+
+	run_command "mv $outputdir/config_temp $output_config" or
+	    dodie "moving config_temp";
+
+    } elsif (!$noclean) {
+	unlink "$output_config";
+	run_command "$make mrproper" or
+	    dodie "make mrproper";
+    }
+
+    # add something to distinguish this build
+    open(OUT, "> $outputdir/localversion") or dodie("Can't make localversion file");
+    print OUT "$localversion\n";
+    close(OUT);
+
+    if (defined($minconfig)) {
+	$defconfig = "KCONFIG_ALLCONFIG=$minconfig";
+    }
+
+    run_command "$defconfig $make $type" or
+	dodie "failed make config";
+
+    $redirect = "$buildlog";
+    if (!run_command "$make $build_options") {
+	undef $redirect;
+	# bisect may need this to pass
+	return 0 if ($in_bisect);
+	fail "failed build" and return 0;
+    }
+    undef $redirect;
+
+    return 1;
+}
+
+sub halt {
+    if (!run_ssh "halt" or defined($power_off)) {
+	if (defined($poweroff_after_halt)) {
+	    sleep $poweroff_after_halt;
+	    run_command "$power_off";
+	}
+    } else {
+	# nope? the zap it!
+	run_command "$power_off";
+    }
+}
+
+sub success {
+    my ($i) = @_;
+
+    $successes++;
+
+    doprint "\n\n*******************************************\n";
+    doprint     "*******************************************\n";
+    doprint     "KTEST RESULT: TEST $i SUCCESS!!!!         **\n";
+    doprint     "*******************************************\n";
+    doprint     "*******************************************\n";
+
+    if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
+	doprint "Reboot and wait $sleep_time seconds\n";
+	reboot;
+	start_monitor;
+	wait_for_monitor $sleep_time;
+	end_monitor;
+    }
+}
+
+sub get_version {
+    # get the release name
+    doprint "$make kernelrelease ... ";
+    $version = `$make kernelrelease | tail -1`;
+    chomp($version);
+    doprint "$version\n";
+}
+
+sub child_run_test {
+    my $failed = 0;
+
+    # child should have no power
+    $reboot_on_error = 0;
+    $poweroff_on_error = 0;
+    $die_on_failure = 1;
+
+    run_command $run_test or $failed = 1;
+    exit $failed;
+}
+
+my $child_done;
+
+sub child_finished {
+    $child_done = 1;
+}
+
+sub do_run_test {
+    my $child_pid;
+    my $child_exit;
+    my $line;
+    my $full_line;
+    my $bug = 0;
+
+    wait_for_monitor 1;
+
+    doprint "run test $run_test\n";
+
+    $child_done = 0;
+
+    $SIG{CHLD} = qw(child_finished);
+
+    $child_pid = fork;
+
+    child_run_test if (!$child_pid);
+
+    $full_line = "";
+
+    do {
+	$line = wait_for_input($monitor_fp, 1);
+	if (defined($line)) {
+
+	    # we are not guaranteed to get a full line
+	    $full_line .= $line;
+
+	    if ($full_line =~ /call trace:/i) {
+		$bug = 1;
+	    }
+
+	    if ($full_line =~ /Kernel panic -/) {
+		$bug = 1;
+	    }
+
+	    if ($line =~ /\n/) {
+		$full_line = "";
+	    }
+	}
+    } while (!$child_done && !$bug);
+
+    if ($bug) {
+	doprint "Detected kernel crash!\n";
+	# kill the child with extreme prejudice
+	kill 9, $child_pid;
+    }
+
+    waitpid $child_pid, 0;
+    $child_exit = $?;
+
+    if ($bug || $child_exit) {
+	return 0 if $in_bisect;
+	fail "test failed" and return 0;
+    }
+    return 1;
+}
+
+sub run_git_bisect {
+    my ($command) = @_;
+
+    doprint "$command ... ";
+
+    my $output = `$command 2>&1`;
+    my $ret = $?;
+
+    logit $output;
+
+    if ($ret) {
+	doprint "FAILED\n";
+	dodie "Failed to git bisect";
+    }
+
+    doprint "SUCCESS\n";
+    if ($output =~ m/^(Bisecting: .*\(roughly \d+ steps?\))\s+\[([[:xdigit:]]+)\]/) {
+	doprint "$1 [$2]\n";
+    } elsif ($output =~ m/^([[:xdigit:]]+) is the first bad commit/) {
+	$bisect_bad = $1;
+	doprint "Found bad commit... $1\n";
+	return 0;
+    } else {
+	# we already logged it, just print it now.
+	print $output;
+    }
+
+    return 1;
+}
+
+# returns 1 on success, 0 on failure
+sub run_bisect_test {
+    my ($type, $buildtype) = @_;
+
+    my $failed = 0;
+    my $result;
+    my $output;
+    my $ret;
+
+    $in_bisect = 1;
+
+    build $buildtype or $failed = 1;
+
+    if ($type ne "build") {
+	dodie "Failed on build" if $failed;
+
+	# Now boot the box
+	get_grub_index;
+	get_version;
+	install;
+
+	start_monitor;
+	monitor or $failed = 1;
+
+	if ($type ne "boot") {
+	    dodie "Failed on boot" if $failed;
+
+	    do_run_test or $failed = 1;
+	}
+	end_monitor;
+    }
+
+    if ($failed) {
+	$result = 0;
+
+	# reboot the box to a good kernel
+	if ($type ne "build") {
+	    doprint "Reboot and sleep $bisect_sleep_time seconds\n";
+	    reboot;
+	    start_monitor;
+	    wait_for_monitor $bisect_sleep_time;
+	    end_monitor;
+	}
+    } else {
+	$result = 1;
+    }
+    $in_bisect = 0;
+
+    return $result;
+}
+
+sub run_bisect {
+    my ($type) = @_;
+    my $buildtype = "oldconfig";
+
+    # We should have a minconfig to use?
+    if (defined($minconfig)) {
+	$buildtype = "useconfig:$minconfig";
+    }
+
+    my $ret = run_bisect_test $type, $buildtype;
+
+
+    # Are we looking for where it worked, not failed?
+    if ($reverse_bisect) {
+	$ret = !$ret;
+    }
+
+    if ($ret) {
+	return "good";
+    } else {
+	return  "bad";
+    }
+}
+
+sub bisect {
+    my ($i) = @_;
+
+    my $result;
+
+    die "BISECT_GOOD[$i] not defined\n"	if (!defined($opt{"BISECT_GOOD[$i]"}));
+    die "BISECT_BAD[$i] not defined\n"	if (!defined($opt{"BISECT_BAD[$i]"}));
+    die "BISECT_TYPE[$i] not defined\n"	if (!defined($opt{"BISECT_TYPE[$i]"}));
+
+    my $good = $opt{"BISECT_GOOD[$i]"};
+    my $bad = $opt{"BISECT_BAD[$i]"};
+    my $type = $opt{"BISECT_TYPE[$i]"};
+    my $start = $opt{"BISECT_START[$i]"};
+    my $replay = $opt{"BISECT_REPLAY[$i]"};
+
+    # convert to true sha1's
+    $good = get_sha1($good);
+    $bad = get_sha1($bad);
+
+    if (defined($opt{"BISECT_REVERSE[$i]"}) &&
+	$opt{"BISECT_REVERSE[$i]"} == 1) {
+	doprint "Performing a reverse bisect (bad is good, good is bad!)\n";
+	$reverse_bisect = 1;
+    } else {
+	$reverse_bisect = 0;
+    }
+
+    # Can't have a test without having a test to run
+    if ($type eq "test" && !defined($run_test)) {
+	$type = "boot";
+    }
+
+    my $check = $opt{"BISECT_CHECK[$i]"};
+    if (defined($check) && $check ne "0") {
+
+	# get current HEAD
+	my $head = get_sha1("HEAD");
+
+	if ($check ne "good") {
+	    doprint "TESTING BISECT BAD [$bad]\n";
+	    run_command "git checkout $bad" or
+		die "Failed to checkout $bad";
+
+	    $result = run_bisect $type;
+
+	    if ($result ne "bad") {
+		fail "Tested BISECT_BAD [$bad] and it succeeded" and return 0;
+	    }
+	}
+
+	if ($check ne "bad") {
+	    doprint "TESTING BISECT GOOD [$good]\n";
+	    run_command "git checkout $good" or
+		die "Failed to checkout $good";
+
+	    $result = run_bisect $type;
+
+	    if ($result ne "good") {
+		fail "Tested BISECT_GOOD [$good] and it failed" and return 0;
+	    }
+	}
+
+	# checkout where we started
+	run_command "git checkout $head" or
+	    die "Failed to checkout $head";
+    }
+
+    run_command "git bisect start" or
+	dodie "could not start bisect";
+
+    run_command "git bisect good $good" or
+	dodie "could not set bisect good to $good";
+
+    run_git_bisect "git bisect bad $bad" or
+	dodie "could not set bisect bad to $bad";
+
+    if (defined($replay)) {
+	run_command "git bisect replay $replay" or
+	    dodie "failed to run replay";
+    }
+
+    if (defined($start)) {
+	run_command "git checkout $start" or
+	    dodie "failed to checkout $start";
+    }
+
+    my $test;
+    do {
+	$result = run_bisect $type;
+	$test = run_git_bisect "git bisect $result";
+    } while ($test);
+
+    run_command "git bisect log" or
+	dodie "could not capture git bisect log";
+
+    run_command "git bisect reset" or
+	dodie "could not reset git bisect";
+
+    doprint "Bad commit was [$bisect_bad]\n";
+
+    success $i;
+}
+
+my %config_ignore;
+my %config_set;
+
+my %config_list;
+my %null_config;
+
+my %dependency;
+
+sub process_config_ignore {
+    my ($config) = @_;
+
+    open (IN, $config)
+	or dodie "Failed to read $config";
+
+    while (<IN>) {
+	if (/^(.*?(CONFIG\S*)(=.*| is not set))/) {
+	    $config_ignore{$2} = $1;
+	}
+    }
+
+    close(IN);
+}
+
+sub read_current_config {
+    my ($config_ref) = @_;
+
+    %{$config_ref} = ();
+    undef %{$config_ref};
+
+    my @key = keys %{$config_ref};
+    if ($#key >= 0) {
+	print "did not delete!\n";
+	exit;
+    }
+    open (IN, "$output_config");
+
+    while (<IN>) {
+	if (/^(CONFIG\S+)=(.*)/) {
+	    ${$config_ref}{$1} = $2;
+	}
+    }
+    close(IN);
+}
+
+sub get_dependencies {
+    my ($config) = @_;
+
+    my $arr = $dependency{$config};
+    if (!defined($arr)) {
+	return ();
+    }
+
+    my @deps = @{$arr};
+
+    foreach my $dep (@{$arr}) {
+	print "ADD DEP $dep\n";
+	@deps = (@deps, get_dependencies $dep);
+    }
+
+    return @deps;
+}
+
+sub create_config {
+    my @configs = @_;
+
+    open(OUT, ">$output_config") or dodie "Can not write to $output_config";
+
+    foreach my $config (@configs) {
+	print OUT "$config_set{$config}\n";
+	my @deps = get_dependencies $config;
+	foreach my $dep (@deps) {
+	    print OUT "$config_set{$dep}\n";
+	}
+    }
+
+    foreach my $config (keys %config_ignore) {
+	print OUT "$config_ignore{$config}\n";
+    }
+    close(OUT);
+
+#    exit;
+    run_command "$make oldnoconfig" or
+	dodie "failed make config oldconfig";
+
+}
+
+sub compare_configs {
+    my (%a, %b) = @_;
+
+    foreach my $item (keys %a) {
+	if (!defined($b{$item})) {
+	    print "diff $item\n";
+	    return 1;
+	}
+	delete $b{$item};
+    }
+
+    my @keys = keys %b;
+    if ($#keys) {
+	print "diff2 $keys[0]\n";
+    }
+    return -1 if ($#keys >= 0);
+
+    return 0;
+}
+
+sub run_config_bisect_test {
+    my ($type) = @_;
+
+    return run_bisect_test $type, "oldconfig";
+}
+
+sub process_passed {
+    my (%configs) = @_;
+
+    doprint "These configs had no failure: (Enabling them for further compiles)\n";
+    # Passed! All these configs are part of a good compile.
+    # Add them to the min options.
+    foreach my $config (keys %configs) {
+	if (defined($config_list{$config})) {
+	    doprint " removing $config\n";
+	    $config_ignore{$config} = $config_list{$config};
+	    delete $config_list{$config};
+	}
+    }
+    doprint "config copied to $outputdir/config_good\n";
+    run_command "cp -f $output_config $outputdir/config_good";
+}
+
+sub process_failed {
+    my ($config) = @_;
+
+    doprint "\n\n***************************************\n";
+    doprint "Found bad config: $config\n";
+    doprint "***************************************\n\n";
+}
+
+sub run_config_bisect {
+
+    my @start_list = keys %config_list;
+
+    if ($#start_list < 0) {
+	doprint "No more configs to test!!!\n";
+	return -1;
+    }
+
+    doprint "***** RUN TEST ***\n";
+    my $type = $opt{"CONFIG_BISECT_TYPE[$iteration]"};
+    my $ret;
+    my %current_config;
+
+    my $count = $#start_list + 1;
+    doprint "  $count configs to test\n";
+
+    my $half = int($#start_list / 2);
+
+    do {
+	my @tophalf = @start_list[0 .. $half];
+
+	create_config @tophalf;
+	read_current_config \%current_config;
+
+	$count = $#tophalf + 1;
+	doprint "Testing $count configs\n";
+	my $found = 0;
+	# make sure we test something
+	foreach my $config (@tophalf) {
+	    if (defined($current_config{$config})) {
+		logit " $config\n";
+		$found = 1;
+	    }
+	}
+	if (!$found) {
+	    # try the other half
+	    doprint "Top half produced no set configs, trying bottom half\n";
+	    @tophalf = @start_list[$half .. $#start_list];
+	    create_config @tophalf;
+	    read_current_config \%current_config;
+	    foreach my $config (@tophalf) {
+		if (defined($current_config{$config})) {
+		    logit " $config\n";
+		    $found = 1;
+		}
+	    }
+	    if (!$found) {
+		doprint "Failed: Can't make new config with current configs\n";
+		foreach my $config (@start_list) {
+		    doprint "  CONFIG: $config\n";
+		}
+		return -1;
+	    }
+	    $count = $#tophalf + 1;
+	    doprint "Testing $count configs\n";
+	}
+
+	$ret = run_config_bisect_test $type;
+
+	if ($ret) {
+	    process_passed %current_config;
+	    return 0;
+	}
+
+	doprint "This config had a failure.\n";
+	doprint "Removing these configs that were not set in this config:\n";
+	doprint "config copied to $outputdir/config_bad\n";
+	run_command "cp -f $output_config $outputdir/config_bad";
+
+	# A config exists in this group that was bad.
+	foreach my $config (keys %config_list) {
+	    if (!defined($current_config{$config})) {
+		doprint " removing $config\n";
+		delete $config_list{$config};
+	    }
+	}
+
+	@start_list = @tophalf;
+
+	if ($#start_list == 0) {
+	    process_failed $start_list[0];
+	    return 1;
+	}
+
+	# remove half the configs we are looking at and see if
+	# they are good.
+	$half = int($#start_list / 2);
+    } while ($half > 0);
+
+    # we found a single config, try it again
+    my @tophalf = @start_list[0 .. 0];
+
+    $ret = run_config_bisect_test $type;
+    if ($ret) {
+	process_passed %current_config;
+	return 0;
+    }
+
+    process_failed $start_list[0];
+    return 1;
+}
+
+sub config_bisect {
+    my ($i) = @_;
+
+    my $start_config = $opt{"CONFIG_BISECT[$i]"};
+
+    my $tmpconfig = "$tmpdir/use_config";
+
+    # Make the file with the bad config and the min config
+    if (defined($minconfig)) {
+	# read the min config for things to ignore
+	run_command "cp $minconfig $tmpconfig" or
+	    dodie "failed to copy $minconfig to $tmpconfig";
+    } else {
+	unlink $tmpconfig;
+    }
+
+    # Add other configs
+    if (defined($addconfig)) {
+	run_command "cat $addconfig >> $tmpconfig" or
+	    dodie "failed to append $addconfig";
+    }
+
+    my $defconfig = "";
+    if (-f $tmpconfig) {
+	$defconfig = "KCONFIG_ALLCONFIG=$tmpconfig";
+	process_config_ignore $tmpconfig;
+    }
+
+    # now process the start config
+    run_command "cp $start_config $output_config" or
+	dodie "failed to copy $start_config to $output_config";
+
+    # read directly what we want to check
+    my %config_check;
+    open (IN, $output_config)
+	or dodie "faied to open $output_config";
+
+    while (<IN>) {
+	if (/^((CONFIG\S*)=.*)/) {
+	    $config_check{$2} = $1;
+	}
+    }
+    close(IN);
+
+    # Now run oldconfig with the minconfig (and addconfigs)
+    run_command "$defconfig $make oldnoconfig" or
+	dodie "failed make config oldconfig";
+
+    # check to see what we lost (or gained)
+    open (IN, $output_config)
+	or dodie "Failed to read $start_config";
+
+    my %removed_configs;
+    my %added_configs;
+
+    while (<IN>) {
+	if (/^((CONFIG\S*)=.*)/) {
+	    # save off all options
+	    $config_set{$2} = $1;
+	    if (defined($config_check{$2})) {
+		if (defined($config_ignore{$2})) {
+		    $removed_configs{$2} = $1;
+		} else {
+		    $config_list{$2} = $1;
+		}
+	    } elsif (!defined($config_ignore{$2})) {
+		$added_configs{$2} = $1;
+		$config_list{$2} = $1;
+	    }
+	}
+    }
+    close(IN);
+
+    my @confs = keys %removed_configs;
+    if ($#confs >= 0) {
+	doprint "Configs overridden by default configs and removed from check:\n";
+	foreach my $config (@confs) {
+	    doprint " $config\n";
+	}
+    }
+    @confs = keys %added_configs;
+    if ($#confs >= 0) {
+	doprint "Configs appearing in make oldconfig and added:\n";
+	foreach my $config (@confs) {
+	    doprint " $config\n";
+	}
+    }
+
+    my %config_test;
+    my $once = 0;
+
+    # Sometimes kconfig does weird things. We must make sure
+    # that the config we autocreate has everything we need
+    # to test, otherwise we may miss testing configs, or
+    # may not be able to create a new config.
+    # Here we create a config with everything set.
+    create_config (keys %config_list);
+    read_current_config \%config_test;
+    foreach my $config (keys %config_list) {
+	if (!defined($config_test{$config})) {
+	    if (!$once) {
+		$once = 1;
+		doprint "Configs not produced by kconfig (will not be checked):\n";
+	    }
+	    doprint "  $config\n";
+	    delete $config_list{$config};
+	}
+    }
+    my $ret;
+    do {
+	$ret = run_config_bisect;
+    } while (!$ret);
+
+    return $ret if ($ret < 0);
+
+    success $i;
+}
+
+sub patchcheck {
+    my ($i) = @_;
+
+    die "PATCHCHECK_START[$i] not defined\n"
+	if (!defined($opt{"PATCHCHECK_START[$i]"}));
+    die "PATCHCHECK_TYPE[$i] not defined\n"
+	if (!defined($opt{"PATCHCHECK_TYPE[$i]"}));
+
+    my $start = $opt{"PATCHCHECK_START[$i]"};
+
+    my $end = "HEAD";
+    if (defined($opt{"PATCHCHECK_END[$i]"})) {
+	$end = $opt{"PATCHCHECK_END[$i]"};
+    }
+
+    # Get the true sha1's since we can use things like HEAD~3
+    $start = get_sha1($start);
+    $end = get_sha1($end);
+
+    my $type = $opt{"PATCHCHECK_TYPE[$i]"};
+
+    # Can't have a test without having a test to run
+    if ($type eq "test" && !defined($run_test)) {
+	$type = "boot";
+    }
+
+    open (IN, "git log --pretty=oneline $end|") or
+	dodie "could not get git list";
+
+    my @list;
+
+    while (<IN>) {
+	chomp;
+	$list[$#list+1] = $_;
+	last if (/^$start/);
+    }
+    close(IN);
+
+    if ($list[$#list] !~ /^$start/) {
+	fail "SHA1 $start not found";
+    }
+
+    # go backwards in the list
+    @list = reverse @list;
+
+    my $save_clean = $noclean;
+
+    $in_patchcheck = 1;
+    foreach my $item (@list) {
+	my $sha1 = $item;
+	$sha1 =~ s/^([[:xdigit:]]+).*/$1/;
+
+	doprint "\nProcessing commit $item\n\n";
+
+	run_command "git checkout $sha1" or
+	    die "Failed to checkout $sha1";
+
+	# only clean on the first and last patch
+	if ($item eq $list[0] ||
+	    $item eq $list[$#list]) {
+	    $noclean = $save_clean;
+	} else {
+	    $noclean = 1;
+	}
+
+	if (defined($minconfig)) {
+	    build "useconfig:$minconfig" or return 0;
+	} else {
+	    # ?? no config to use?
+	    build "oldconfig" or return 0;
+	}
+
+	check_buildlog $sha1 or return 0;
+
+	next if ($type eq "build");
+
+	get_grub_index;
+	get_version;
+	install;
+
+	my $failed = 0;
+
+	start_monitor;
+	monitor or $failed = 1;
+
+	if (!$failed && $type ne "boot"){
+	    do_run_test or $failed = 1;
+	}
+	end_monitor;
+	return 0 if ($failed);
+
+    }
+    $in_patchcheck = 0;
+    success $i;
+
+    return 1;
+}
+
+$#ARGV < 1 or die "ktest.pl version: $VERSION\n   usage: ktest.pl config-file\n";
+
+if ($#ARGV == 0) {
+    $ktest_config = $ARGV[0];
+    if (! -f $ktest_config) {
+	print "$ktest_config does not exist.\n";
+	my $ans;
+        for (;;) {
+	    print "Create it? [Y/n] ";
+	    $ans = <STDIN>;
+	    chomp $ans;
+	    if ($ans =~ /^\s*$/) {
+		$ans = "y";
+	    }
+	    last if ($ans =~ /^y$/i || $ans =~ /^n$/i);
+	    print "Please answer either 'y' or 'n'.\n";
+	}
+	if ($ans !~ /^y$/i) {
+	    exit 0;
+	}
+    }
+} else {
+    $ktest_config = "ktest.conf";
+}
+
+if (! -f $ktest_config) {
+    open(OUT, ">$ktest_config") or die "Can not create $ktest_config";
+    print OUT << "EOF"
+# Generated by ktest.pl
+#
+# Define each test with TEST_START
+# The config options below it will override the defaults
+TEST_START
+
+DEFAULTS
+EOF
+;
+    close(OUT);
+}
+read_config $ktest_config;
+
+# Append any configs entered in manually to the config file.
+my @new_configs = keys %entered_configs;
+if ($#new_configs >= 0) {
+    print "\nAppending entered in configs to $ktest_config\n";
+    open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config";
+    foreach my $config (@new_configs) {
+	print OUT "$config = $entered_configs{$config}\n";
+	$opt{$config} = $entered_configs{$config};
+    }
+}
+
+if ($opt{"CLEAR_LOG"} && defined($opt{"LOG_FILE"})) {
+    unlink $opt{"LOG_FILE"};
+}
+
+doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
+
+for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
+
+    if (!$i) {
+	doprint "DEFAULT OPTIONS:\n";
+    } else {
+	doprint "\nTEST $i OPTIONS";
+	if (defined($repeat_tests{$i})) {
+	    $repeat = $repeat_tests{$i};
+	    doprint " ITERATE $repeat";
+	}
+	doprint "\n";
+    }
+
+    foreach my $option (sort keys %opt) {
+
+	if ($option =~ /\[(\d+)\]$/) {
+	    next if ($i != $1);
+	} else {
+	    next if ($i);
+	}
+
+	doprint "$option = $opt{$option}\n";
+    }
+}
+
+sub set_test_option {
+    my ($name, $i) = @_;
+
+    my $option = "$name\[$i\]";
+
+    if (defined($opt{$option})) {
+	return $opt{$option};
+    }
+
+    foreach my $test (keys %repeat_tests) {
+	if ($i >= $test &&
+	    $i < $test + $repeat_tests{$test}) {
+	    $option = "$name\[$test\]";
+	    if (defined($opt{$option})) {
+		return $opt{$option};
+	    }
+	}
+    }
+
+    if (defined($opt{$name})) {
+	return $opt{$name};
+    }
+
+    return undef;
+}
+
+# First we need to do is the builds
+for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+
+    $iteration = $i;
+
+    my $makecmd = set_test_option("MAKE_CMD", $i);
+
+    $machine = set_test_option("MACHINE", $i);
+    $ssh_user = set_test_option("SSH_USER", $i);
+    $tmpdir = set_test_option("TMP_DIR", $i);
+    $outputdir = set_test_option("OUTPUT_DIR", $i);
+    $builddir = set_test_option("BUILD_DIR", $i);
+    $test_type = set_test_option("TEST_TYPE", $i);
+    $build_type = set_test_option("BUILD_TYPE", $i);
+    $build_options = set_test_option("BUILD_OPTIONS", $i);
+    $power_cycle = set_test_option("POWER_CYCLE", $i);
+    $reboot = set_test_option("REBOOT", $i);
+    $noclean = set_test_option("BUILD_NOCLEAN", $i);
+    $minconfig = set_test_option("MIN_CONFIG", $i);
+    $run_test = set_test_option("TEST", $i);
+    $addconfig = set_test_option("ADD_CONFIG", $i);
+    $reboot_type = set_test_option("REBOOT_TYPE", $i);
+    $grub_menu = set_test_option("GRUB_MENU", $i);
+    $post_install = set_test_option("POST_INSTALL", $i);
+    $reboot_script = set_test_option("REBOOT_SCRIPT", $i);
+    $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i);
+    $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i);
+    $die_on_failure = set_test_option("DIE_ON_FAILURE", $i);
+    $power_off = set_test_option("POWER_OFF", $i);
+    $powercycle_after_reboot = set_test_option("POWERCYCLE_AFTER_REBOOT", $i);
+    $poweroff_after_halt = set_test_option("POWEROFF_AFTER_HALT", $i);
+    $sleep_time = set_test_option("SLEEP_TIME", $i);
+    $bisect_sleep_time = set_test_option("BISECT_SLEEP_TIME", $i);
+    $store_failures = set_test_option("STORE_FAILURES", $i);
+    $timeout = set_test_option("TIMEOUT", $i);
+    $booted_timeout = set_test_option("BOOTED_TIMEOUT", $i);
+    $console = set_test_option("CONSOLE", $i);
+    $success_line = set_test_option("SUCCESS_LINE", $i);
+    $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i);
+    $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i);
+    $build_target = set_test_option("BUILD_TARGET", $i);
+    $ssh_exec = set_test_option("SSH_EXEC", $i);
+    $scp_to_target = set_test_option("SCP_TO_TARGET", $i);
+    $target_image = set_test_option("TARGET_IMAGE", $i);
+    $localversion = set_test_option("LOCALVERSION", $i);
+
+    chdir $builddir || die "can't change directory to $builddir";
+
+    if (!-d $tmpdir) {
+	mkpath($tmpdir) or
+	    die "can't create $tmpdir";
+    }
+
+    $ENV{"SSH_USER"} = $ssh_user;
+    $ENV{"MACHINE"} = $machine;
+
+    $target = "$ssh_user\@$machine";
+
+    $buildlog = "$tmpdir/buildlog-$machine";
+    $dmesg = "$tmpdir/dmesg-$machine";
+    $make = "$makecmd O=$outputdir";
+    $output_config = "$outputdir/.config";
+
+    if ($reboot_type eq "grub") {
+	dodie "GRUB_MENU not defined" if (!defined($grub_menu));
+    } elsif (!defined($reboot_script)) {
+	dodie "REBOOT_SCRIPT not defined"
+    }
+
+    my $run_type = $build_type;
+    if ($test_type eq "patchcheck") {
+	$run_type = $opt{"PATCHCHECK_TYPE[$i]"};
+    } elsif ($test_type eq "bisect") {
+	$run_type = $opt{"BISECT_TYPE[$i]"};
+    } elsif ($test_type eq "config_bisect") {
+	$run_type = $opt{"CONFIG_BISECT_TYPE[$i]"};
+    }
+
+    # mistake in config file?
+    if (!defined($run_type)) {
+	$run_type = "ERROR";
+    }
+
+    doprint "\n\n";
+    doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type\n\n";
+
+    unlink $dmesg;
+    unlink $buildlog;
+
+    if (!defined($minconfig)) {
+	$minconfig = $addconfig;
+
+    } elsif (defined($addconfig)) {
+	run_command "cat $addconfig $minconfig > $tmpdir/add_config" or
+	    dodie "Failed to create temp config";
+	$minconfig = "$tmpdir/add_config";
+    }
+
+    my $checkout = $opt{"CHECKOUT[$i]"};
+    if (defined($checkout)) {
+	run_command "git checkout $checkout" or
+	    die "failed to checkout $checkout";
+    }
+
+    if ($test_type eq "bisect") {
+	bisect $i;
+	next;
+    } elsif ($test_type eq "config_bisect") {
+	config_bisect $i;
+	next;
+    } elsif ($test_type eq "patchcheck") {
+	patchcheck $i;
+	next;
+    }
+
+    if ($build_type ne "nobuild") {
+	build $build_type or next;
+    }
+
+    if ($test_type ne "build") {
+	get_grub_index;
+	get_version;
+	install;
+
+	my $failed = 0;
+	start_monitor;
+	monitor or $failed = 1;;
+
+	if (!$failed && $test_type ne "boot" && defined($run_test)) {
+	    do_run_test or $failed = 1;
+	}
+	end_monitor;
+	next if ($failed);
+    }
+
+    success $i;
+}
+
+if ($opt{"POWEROFF_ON_SUCCESS"}) {
+    halt;
+} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot) {
+    reboot;
+}
+
+doprint "\n    $successes of $opt{NUM_TESTS} tests were successful\n\n";
+
+exit 0;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
new file mode 100644
index 0000000..3408c59
--- /dev/null
+++ b/tools/testing/ktest/sample.conf
@@ -0,0 +1,622 @@
+#
+# Config file for ktest.pl
+#
+# Note, all paths must be absolute
+#
+
+# Options set in the beginning of the file are considered to be
+# default options. These options can be overriden by test specific
+# options, with the following exceptions:
+#
+#  LOG_FILE
+#  CLEAR_LOG
+#  POWEROFF_ON_SUCCESS
+#  REBOOT_ON_SUCCESS
+#
+# Test specific options are set after the label:
+#
+# TEST_START
+#
+# The options after a TEST_START label are specific to that test.
+# Each TEST_START label will set up a new test. If you want to
+# perform a test more than once, you can add the ITERATE label
+# to it followed by the number of times you want that test
+# to iterate. If the ITERATE is left off, the test will only
+# be performed once.
+#
+# TEST_START ITERATE 10
+#
+# You can skip a test by adding SKIP (before or after the ITERATE
+# and number)
+#
+# TEST_START SKIP
+#
+# TEST_START SKIP ITERATE 10
+#
+# TEST_START ITERATE 10 SKIP
+#
+# The SKIP label causes the options and the test itself to be ignored.
+# This is useful to set up several different tests in one config file, and
+# only enabling the ones you want to use for a current test run.
+#
+# You can add default options anywhere in the file as well
+# with the DEFAULTS tag. This allows you to have default options
+# after the test options to keep the test options at the top
+# of the file. You can even place the DEFAULTS tag between
+# test cases (but not in the middle of a single test case)
+#
+# TEST_START
+# MIN_CONFIG = /home/test/config-test1
+#
+# DEFAULTS
+# MIN_CONFIG = /home/test/config-default
+#
+# TEST_START ITERATE 10
+#
+# The above will run the first test with MIN_CONFIG set to
+# /home/test/config-test-1. Then 10 tests will be executed
+# with MIN_CONFIG with /home/test/config-default.
+#
+# You can also disable defaults with the SKIP option
+#
+# DEFAULTS SKIP
+# MIN_CONFIG = /home/test/config-use-sometimes
+#
+# DEFAULTS
+# MIN_CONFIG = /home/test/config-most-times
+#
+# The above will ignore the first MIN_CONFIG. If you want to
+# use the first MIN_CONFIG, remove the SKIP from the first
+# DEFAULTS tag and add it to the second. Be careful, options
+# may only be declared once per test or default. If you have
+# the same option name under the same test or as default
+# ktest will fail to execute, and no tests will run.
+#
+
+
+#### Mandatory Default Options ####
+
+# These options must be in the default section, although most
+# may be overridden by test options.
+
+# The machine hostname that you will test
+#MACHINE = target
+
+# The box is expected to have ssh on normal bootup, provide the user
+#  (most likely root, since you need privileged operations)
+#SSH_USER = root
+
+# The directory that contains the Linux source code
+#BUILD_DIR = /home/test/linux.git
+
+# The directory that the objects will be built
+# (can not be same as BUILD_DIR)
+#OUTPUT_DIR = /home/test/build/target
+
+# The location of the compiled file to copy to the target
+# (relative to OUTPUT_DIR)
+#BUILD_TARGET = arch/x86/boot/bzImage
+
+# The place to put your image on the test machine
+#TARGET_IMAGE = /boot/vmlinuz-test
+
+# A script or command to reboot the box
+#
+# Here is a digital loggers power switch example
+#POWER_CYCLE = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin@power/outlet?5=CCL'
+#
+# Here is an example to reboot a virtual box on the current host
+# with the name "Guest".
+#POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
+
+# The script or command that reads the console
+#
+#  If you use ttywatch server, something like the following would work.
+#CONSOLE = nc -d localhost 3001
+#
+# For a virtual machine with guest name "Guest".
+#CONSOLE =  virsh console Guest
+
+# Required version ending to differentiate the test
+# from other linux builds on the system.
+#LOCALVERSION = -test
+
+# The grub title name for the test kernel to boot
+# (Only mandatory if REBOOT_TYPE = grub)
+#
+# Note, ktest.pl will not update the grub menu.lst, you need to
+# manually add an option for the test. ktest.pl will search
+# the grub menu.lst for this option to find what kernel to
+# reboot into.
+#
+# For example, if in the /boot/grub/menu.lst the test kernel title has:
+# title Test Kernel
+# kernel vmlinuz-test
+#GRUB_MENU = Test Kernel
+
+# A script to reboot the target into the test kernel
+# (Only mandatory if REBOOT_TYPE = script)
+#REBOOT_SCRIPT =
+
+#### Optional Config Options (all have defaults) ####
+
+# Start a test setup. If you leave this off, all options
+# will be default and the test will run once.
+# This is a label and not really an option (it takes no value).
+# You can append ITERATE and a number after it to iterate the
+# test a number of times, or SKIP to ignore this test.
+#
+#TEST_START
+#TEST_START ITERATE 5
+#TEST_START SKIP
+
+# Have the following options as default again. Used after tests
+# have already been defined by TEST_START. Optionally, you can
+# just define all default options before the first TEST_START
+# and you do not need this option.
+#
+# This is a label and not really an option (it takes no value).
+# You can append SKIP to this label and the options within this
+# section will be ignored.
+#
+# DEFAULTS
+# DEFAULTS SKIP
+
+# The default test type (default test)
+# The test types may be:
+#   build - only build the kernel, do nothing else
+#   boot - build and boot the kernel
+#   test - build, boot and if TEST is set, run the test script
+#          (If TEST is not set, it defaults back to boot)
+#   bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
+#   patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
+#TEST_TYPE = test
+
+# Test to run if there is a successful boot and TEST_TYPE is test.
+# Must exit with 0 on success and non zero on error
+# default (undefined)
+#TEST = ssh user@machine /root/run_test
+
+# The build type is any make config type or special command
+#  (default randconfig)
+#   nobuild - skip the clean and build step
+#   useconfig:/path/to/config - use the given config and run
+#              oldconfig on it.
+# This option is ignored if TEST_TYPE is patchcheck or bisect
+#BUILD_TYPE = randconfig
+
+# The make command (default make)
+# If you are building a 32bit x86 on a 64 bit host
+#MAKE_CMD = CC=i386-gcc AS=i386-as make ARCH=i386
+
+# Any build options for the make of the kernel (not for other makes, like configs)
+# (default "")
+#BUILD_OPTIONS = -j20
+
+# If you need an initrd, you can add a script or code here to install
+# it. The environment variable KERNEL_VERSION will be set to the
+# kernel version that is used. Remember to add the initrd line
+# to your grub menu.lst file.
+#
+# Here's a couple of examples to use:
+#POST_INSTALL = ssh user@target /sbin/mkinitrd --allow-missing -f /boot/initramfs-test.img $KERNEL_VERSION
+#
+# or on some systems:
+#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
+
+# Way to reboot the box to the test kernel.
+# Only valid options so far are "grub" and "script"
+# (default grub)
+# If you specify grub, it will assume grub version 1
+# and will search in /boot/grub/menu.lst for the title $GRUB_MENU
+# and select that target to reboot to the kernel. If this is not
+# your setup, then specify "script" and have a command or script
+# specified in REBOOT_SCRIPT to boot to the target.
+#
+# The entry in /boot/grub/menu.lst must be entered in manually.
+# The test will not modify that file.
+#REBOOT_TYPE = grub
+
+# The min config that is needed to build for the machine
+# A nice way to create this is with the following:
+#
+#   $ ssh target
+#   $ lsmod > mymods
+#   $ scp mymods host:/tmp
+#   $ exit
+#   $ cd linux.git
+#   $ rm .config
+#   $ make LSMOD=mymods localyesconfig
+#   $ grep '^CONFIG' .config > /home/test/config-min
+#
+# If you want even less configs:
+#
+#   log in directly to target (do not ssh)
+#
+#   $ su
+#   # lsmod | cut -d' ' -f1 | xargs rmmod
+#
+#   repeat the above several times
+#
+#   # lsmod > mymods
+#   # reboot
+#
+# May need to reboot to get your network back to copy the mymods
+# to the host, and then remove the previous .config and run the
+# localyesconfig again. The CONFIG_MIN generated like this will
+# not guarantee network activity to the box so the TEST_TYPE of
+# test may fail.
+#
+# You might also want to set:
+#   CONFIG_CMDLINE="<your options here>"
+#  randconfig may set the above and override your real command
+#  line options.
+# (default undefined)
+#MIN_CONFIG = /home/test/config-min
+
+# Sometimes there's options that just break the boot and
+# you do not care about. Here are a few:
+#   # CONFIG_STAGING is not set
+#  Staging drivers are horrible, and can break the build.
+#   # CONFIG_SCSI_DEBUG is not set
+#  SCSI_DEBUG may change your root partition
+#   # CONFIG_KGDB_SERIAL_CONSOLE is not set
+#  KGDB may cause oops waiting for a connection that's not there.
+# This option points to the file containing config options that will be prepended
+# to the MIN_CONFIG (or be the MIN_CONFIG if it is not set)
+#
+# Note, config options in MIN_CONFIG will override these options.
+#
+# (default undefined)
+#ADD_CONFIG = /home/test/config-broken
+
+# The location on the host where to write temp files
+# (default /tmp/ktest)
+#TMP_DIR = /tmp/ktest
+
+# Optional log file to write the status (recommended)
+#  Note, this is a DEFAULT section only option.
+# (default undefined)
+#LOG_FILE = /home/test/logfiles/target.log
+
+# Remove old logfile if it exists before starting all tests.
+#  Note, this is a DEFAULT section only option.
+# (default 0)
+#CLEAR_LOG = 0
+
+# Line to define a successful boot up in console output.
+# This is what the line contains, not the entire line. If you need
+# the entire line to match, then use regural expression syntax like:
+#  (do not add any quotes around it)
+#
+#  SUCCESS_LINE = ^MyBox Login:$
+#
+# (default "login:")
+#SUCCESS_LINE = login:
+
+# In case the console constantly fills the screen, having
+# a specified time to stop the test after success is recommended.
+# (in seconds)
+# (default 10)
+#STOP_AFTER_SUCCESS = 10
+
+# In case the console constantly fills the screen, having
+# a specified time to stop the test after failure is recommended.
+# (in seconds)
+# (default 60)
+#STOP_AFTER_FAILURE = 60
+
+# Stop testing if a build fails. If set, the script will end if
+# a failure is detected, otherwise it will save off the .config,
+# dmesg and bootlog in a directory called
+# MACHINE-TEST_TYPE_BUILD_TYPE-fail-yyyymmddhhmmss
+# if the STORE_FAILURES directory is set.
+# (default 1)
+# Note, even if this is set to zero, there are some errors that still
+# stop the tests.
+#DIE_ON_FAILURE = 1
+
+# Directory to store failure directories on failure. If this is not
+# set, DIE_ON_FAILURE=0 will not save off the .config, dmesg and
+# bootlog. This option is ignored if DIE_ON_FAILURE is not set.
+# (default undefined)
+#STORE_FAILURES = /home/test/failures
+
+# Build without doing a make mrproper, or removing .config
+# (default 0)
+#BUILD_NOCLEAN = 0
+
+# As the test reads the console, after it hits the SUCCESS_LINE
+# the time it waits for the monitor to settle down between reads
+# can usually be lowered.
+# (in seconds) (default 1)
+#BOOTED_TIMEOUT = 1
+
+# The timeout in seconds when we consider the box hung after
+# the console stop producing output. Be sure to leave enough
+# time here to get pass a reboot. Some machines may not produce
+# any console output for a long time during a reboot. You do
+# not want the test to fail just because the system was in
+# the process of rebooting to the test kernel.
+# (default 120)
+#TIMEOUT = 120
+
+# In between tests, a reboot of the box may occur, and this
+# is the time to wait for the console after it stops producing
+# output. Some machines may not produce a large lag on reboot
+# so this should accommodate it.
+# The difference between this and TIMEOUT, is that TIMEOUT happens
+# when rebooting to the test kernel. This sleep time happens
+# after a test has completed and we are about to start running
+# another test. If a reboot to the reliable kernel happens,
+# we wait SLEEP_TIME for the console to stop producing output
+# before starting the next test.
+# (default 60)
+#SLEEP_TIME = 60
+
+# The time in between bisects to sleep (in seconds)
+# (default 60)
+#BISECT_SLEEP_TIME = 60
+
+# Reboot the target box on error (default 0)
+#REBOOT_ON_ERROR = 0
+
+# Power off the target on error (ignored if REBOOT_ON_ERROR is set)
+#  Note, this is a DEFAULT section only option.
+# (default 0)
+#POWEROFF_ON_ERROR = 0
+
+# Power off the target after all tests have completed successfully
+#  Note, this is a DEFAULT section only option.
+# (default 0)
+#POWEROFF_ON_SUCCESS = 0
+
+# Reboot the target after all test completed successfully (default 1)
+# (ignored if POWEROFF_ON_SUCCESS is set)
+#REBOOT_ON_SUCCESS = 1
+
+# In case there are isses with rebooting, you can specify this
+# to always powercycle after this amount of time after calling
+# reboot.
+# Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
+# makes it powercycle immediately after rebooting. Do not define
+# it if you do not want it.
+# (default undefined)
+#POWERCYCLE_AFTER_REBOOT = 5
+
+# In case there's isses with halting, you can specify this
+# to always poweroff after this amount of time after calling
+# halt.
+# Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
+# makes it poweroff immediately after halting. Do not define
+# it if you do not want it.
+# (default undefined)
+#POWEROFF_AFTER_HALT = 20
+
+# A script or command to power off the box (default undefined)
+# Needed for POWEROFF_ON_ERROR and SUCCESS
+#
+# Example for digital loggers power switch:
+#POWER_OFF = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin@power/outlet?5=OFF'
+#
+# Example for a virtual guest call "Guest".
+#POWER_OFF = virsh destroy Guest
+
+# The way to execute a command on the target
+# (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";)
+# The variables SSH_USER, MACHINE and SSH_COMMAND are defined
+#SSH_EXEC = ssh $SSH_USER@$MACHINE $SSH_COMMAND";
+
+# The way to copy a file to the target
+# (default scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE)
+# The variables SSH_USER, MACHINE, SRC_FILE and DST_FILE are defined.
+#SCP_TO_TARGET = scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE
+
+# The nice way to reboot the target
+# (default ssh $SSH_USER@$MACHINE reboot)
+# The variables SSH_USER and MACHINE are defined.
+#REBOOT = ssh $SSH_USER@$MACHINE reboot
+
+#### Per test run options ####
+# The following options are only allowed in TEST_START sections.
+# They are ignored in the DEFAULTS sections.
+#
+# All of these are optional and undefined by default, although
+#  some of these options are required for TEST_TYPE of patchcheck
+#  and bisect.
+#
+#
+# CHECKOUT = branch
+#
+#  If the BUILD_DIR is a git repository, then you can set this option
+#  to checkout the given branch before running the TEST. If you
+#  specify this for the first run, that branch will be used for
+#  all preceding tests until a new CHECKOUT is set.
+#
+#
+#
+# For TEST_TYPE = patchcheck
+#
+#  This expects the BUILD_DIR to be a git repository, and
+#  will checkout the PATCHCHECK_START commit.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  The MIN_CONFIG will be used for all builds of the patchcheck. The build type
+#  used for patchcheck is oldconfig.
+#
+#  PATCHCHECK_START is required and is the first patch to
+#   test (the SHA1 of the commit). You may also specify anything
+#   that git checkout allows (branch name, tage, HEAD~3).
+#
+#  PATCHCHECK_END is the last patch to check (default HEAD)
+#
+#  PATCHCHECK_TYPE is required and is the type of test to run:
+#      build, boot, test.
+#
+#   Note, the build test will look for warnings, if a warning occurred
+#     in a file that a commit touches, the build will fail.
+#
+#   If BUILD_NOCLEAN is set, then make mrproper will not be run on
+#   any of the builds, just like all other TEST_TYPE tests. But
+#   what makes patchcheck different from the other tests, is if
+#   BUILD_NOCLEAN is not set, only the first and last patch run
+#   make mrproper. This helps speed up the test.
+#
+# Example:
+#   TEST_START
+#   TEST_TYPE = patchcheck
+#   CHECKOUT = mybranch
+#   PATCHCHECK_TYPE = boot
+#   PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7
+#   PATCHCHECK_END = HEAD~2
+#
+#
+#
+# For TEST_TYPE = bisect
+#
+#  You can specify a git bisect if the BUILD_DIR is a git repository.
+#  The MIN_CONFIG will be used for all builds of the bisect. The build type
+#  used for bisecting is oldconfig.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  BISECT_TYPE is the type of test to perform:
+#	build	- bad fails to build
+#	boot	- bad builds but fails to boot
+#	test	- bad boots but fails a test
+#
+# BISECT_GOOD is the commit (SHA1) to label as good (accepts all git good commit types)
+# BISECT_BAD is the commit to label as bad (accepts all git bad commit types)
+#
+# The above three options are required for a bisect operation.
+#
+# BISECT_REPLAY = /path/to/replay/file (optional, default undefined)
+#
+#   If an operation failed in the bisect that was not expected to
+#   fail. Then the test ends. The state of the BUILD_DIR will be
+#   left off at where the failure occurred. You can examine the
+#   reason for the failure, and perhaps even find a git commit
+#   that would work to continue with. You can run:
+#
+#   git bisect log > /path/to/replay/file
+#
+#   The adding:
+#
+#    BISECT_REPLAY= /path/to/replay/file
+#
+#   And running the test again. The test will perform the initial
+#    git bisect start, git bisect good, and git bisect bad, and
+#    then it will run git bisect replay on this file, before
+#    continuing with the bisect.
+#
+# BISECT_START = commit (optional, default undefined)
+#
+#   As with BISECT_REPLAY, if the test failed on a commit that
+#   just happen to have a bad commit in the middle of the bisect,
+#   and you need to skip it. If BISECT_START is defined, it
+#   will checkout that commit after doing the initial git bisect start,
+#   git bisect good, git bisect bad, and running the git bisect replay
+#   if the BISECT_REPLAY is set.
+#
+# BISECT_REVERSE = 1 (optional, default 0)
+#
+#   In those strange instances where it was broken forever
+#   and you are trying to find where it started to work!
+#   Set BISECT_GOOD to the commit that was last known to fail
+#   Set BISECT_BAD to the commit that is known to start working.
+#   With BISECT_REVERSE = 1, The test will consider failures as
+#   good, and success as bad.
+#
+# BISECT_CHECK = 1 (optional, default 0)
+#
+#   Just to be sure the good is good and bad is bad, setting
+#   BISECT_CHECK to 1 will start the bisect by first checking
+#   out BISECT_BAD and makes sure it fails, then it will check
+#   out BISECT_GOOD and makes sure it succeeds before starting
+#   the bisect (it works for BISECT_REVERSE too).
+#
+#   You can limit the test to just check BISECT_GOOD or
+#   BISECT_BAD with BISECT_CHECK = good or
+#   BISECT_CHECK = bad, respectively.
+#
+# Example:
+#   TEST_START
+#   TEST_TYPE = bisect
+#   BISECT_GOOD = v2.6.36
+#   BISECT_BAD = b5153163ed580e00c67bdfecb02b2e3843817b3e
+#   BISECT_TYPE = build
+#   MIN_CONFIG = /home/test/config-bisect
+#
+#
+#
+# For TEST_TYPE = config_bisect
+#
+#  In those cases that you have two different configs. One of them
+#  work, the other does not, and you do not know what config causes
+#  the problem.
+#  The TEST_TYPE config_bisect will bisect the bad config looking for
+#  what config causes the failure.
+#
+#  The way it works is this:
+#
+#   First it finds a config to work with. Since a different version, or
+#   MIN_CONFIG may cause different dependecies, it must run through this
+#   preparation.
+#
+#   Overwrites any config set in the bad config with a config set in
+#   either the MIN_CONFIG or ADD_CONFIG. Thus, make sure these configs
+#   are minimal and do not disable configs you want to test:
+#   (ie.  # CONFIG_FOO is not set).
+#
+#   An oldconfig is run on the bad config and any new config that
+#   appears will be added to the configs to test.
+#
+#   Finally, it generates a config with the above result and runs it
+#   again through make oldconfig to produce a config that should be
+#   satisfied by kconfig.
+#
+#   Then it starts the bisect.
+#
+#   The configs to test are cut in half. If all the configs in this
+#   half depend on a config in the other half, then the other half
+#   is tested instead. If no configs are enabled by either half, then
+#   this means a circular dependency exists and the test fails.
+#
+#   A config is created with the test half, and the bisect test is run.
+#
+#   If the bisect succeeds, then all configs in the generated config
+#   are removed from the configs to test and added to the configs that
+#   will be enabled for all builds (they will be enabled, but not be part
+#   of the configs to examine).
+#
+#   If the bisect fails, then all test configs that were not enabled by
+#   the config file are removed from the test. These configs will not
+#   be enabled in future tests. Since current config failed, we consider
+#   this to be a subset of the config that we started with.
+#
+#   When we are down to one config, it is considered the bad config.
+#
+#   Note, the config chosen may not be the true bad config. Due to
+#   dependencies and selections of the kbuild system, mulitple
+#   configs may be needed to cause a failure. If you disable the
+#   config that was found and restart the test, if the test fails
+#   again, it is recommended to rerun the config_bisect with a new
+#   bad config without the found config enabled.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  CONFIG_BISECT_TYPE is the type of test to perform:
+#	build	- bad fails to build
+#	boot	- bad builds but fails to boot
+#	test	- bad boots but fails a test
+#
+#   CONFIG_BISECT is the config that failed to boot
+#
+# Example:
+#   TEST_START
+#   TEST_TYPE = config_bisect
+#   CONFIG_BISECT_TYPE = build
+#   CONFIG_BISECT = /home/test/¢onfig-bad
+#   MIN_CONFIG = /home/test/config-min
+#
diff --git a/usr/Kconfig b/usr/Kconfig
index c2c7fe2..65b845bd 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -46,7 +46,7 @@
 	  If you are not sure, leave it set to "0".
 
 config RD_GZIP
-	bool "Support initial ramdisks compressed using gzip" if EMBEDDED
+	bool "Support initial ramdisks compressed using gzip" if EXPERT
 	default y
 	depends on BLK_DEV_INITRD
 	select DECOMPRESS_GZIP
@@ -55,8 +55,8 @@
 	  If unsure, say Y.
 
 config RD_BZIP2
-	bool "Support initial ramdisks compressed using bzip2" if EMBEDDED
-	default !EMBEDDED
+	bool "Support initial ramdisks compressed using bzip2" if EXPERT
+	default !EXPERT
 	depends on BLK_DEV_INITRD
 	select DECOMPRESS_BZIP2
 	help
@@ -64,17 +64,26 @@
 	  If unsure, say N.
 
 config RD_LZMA
-	bool "Support initial ramdisks compressed using LZMA" if EMBEDDED
-	default !EMBEDDED
+	bool "Support initial ramdisks compressed using LZMA" if EXPERT
+	default !EXPERT
 	depends on BLK_DEV_INITRD
 	select DECOMPRESS_LZMA
 	help
 	  Support loading of a LZMA encoded initial ramdisk or cpio buffer
 	  If unsure, say N.
 
+config RD_XZ
+	bool "Support initial ramdisks compressed using XZ" if EXPERT
+	default !EXPERT
+	depends on BLK_DEV_INITRD
+	select DECOMPRESS_XZ
+	help
+	  Support loading of a XZ encoded initial ramdisk or cpio buffer.
+	  If unsure, say N.
+
 config RD_LZO
-	bool "Support initial ramdisks compressed using LZO" if EMBEDDED
-	default !EMBEDDED
+	bool "Support initial ramdisks compressed using LZO" if EXPERT
+	default !EXPERT
 	depends on BLK_DEV_INITRD
 	select DECOMPRESS_LZO
 	help
@@ -139,6 +148,15 @@
 	  three. Compression is slowest. The initramfs size is about 33%
 	  smaller with LZMA in comparison to gzip.
 
+config INITRAMFS_COMPRESSION_XZ
+	bool "XZ"
+	depends on RD_XZ
+	help
+	  XZ uses the LZMA2 algorithm. The initramfs size is about 30%
+	  smaller with XZ in comparison to gzip. Decompression speed
+	  is better than that of bzip2 but worse than gzip and LZO.
+	  Compression is slow.
+
 config INITRAMFS_COMPRESSION_LZO
 	bool "LZO"
 	depends on RD_LZO
diff --git a/usr/Makefile b/usr/Makefile
index 6faa444..029ffe6 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -15,6 +15,9 @@
 # Lzma
 suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA)   = .lzma
 
+# XZ
+suffix_$(CONFIG_INITRAMFS_COMPRESSION_XZ)     = .xz
+
 # Lzo
 suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZO)   = .lzo
 
@@ -50,7 +53,7 @@
 quiet_cmd_initfs = GEN     $@
       cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
 
-targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.lzo initramfs_data.cpio
+targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.xz initramfs_data.cpio.lzo initramfs_data.cpio
 # do not try to update files included in initramfs
 $(deps_initramfs): ;
 
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 7f1178f..f63ccb0 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -15,3 +15,6 @@
 
 config KVM_MMIO
        bool
+
+config KVM_ASYNC_PF
+       bool
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 7c98928..ae72ae60 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -55,58 +55,31 @@
 	return index;
 }
 
-static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
+static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
 {
-	struct kvm_assigned_dev_kernel *assigned_dev;
-	int i;
+	struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
+	u32 vector;
+	int index;
 
-	assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
-				    interrupt_work);
+	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
+		spin_lock(&assigned_dev->intx_lock);
+		disable_irq_nosync(irq);
+		assigned_dev->host_irq_disabled = true;
+		spin_unlock(&assigned_dev->intx_lock);
+	}
 
-	spin_lock_irq(&assigned_dev->assigned_dev_lock);
 	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
-		struct kvm_guest_msix_entry *guest_entries =
-			assigned_dev->guest_msix_entries;
-		for (i = 0; i < assigned_dev->entries_nr; i++) {
-			if (!(guest_entries[i].flags &
-					KVM_ASSIGNED_MSIX_PENDING))
-				continue;
-			guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
+		index = find_index_from_host_irq(assigned_dev, irq);
+		if (index >= 0) {
+			vector = assigned_dev->
+					guest_msix_entries[index].vector;
 			kvm_set_irq(assigned_dev->kvm,
-				    assigned_dev->irq_source_id,
-				    guest_entries[i].vector, 1);
+				    assigned_dev->irq_source_id, vector, 1);
 		}
 	} else
 		kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
 			    assigned_dev->guest_irq, 1);
 
-	spin_unlock_irq(&assigned_dev->assigned_dev_lock);
-}
-
-static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
-{
-	unsigned long flags;
-	struct kvm_assigned_dev_kernel *assigned_dev =
-		(struct kvm_assigned_dev_kernel *) dev_id;
-
-	spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
-	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
-		int index = find_index_from_host_irq(assigned_dev, irq);
-		if (index < 0)
-			goto out;
-		assigned_dev->guest_msix_entries[index].flags |=
-			KVM_ASSIGNED_MSIX_PENDING;
-	}
-
-	schedule_work(&assigned_dev->interrupt_work);
-
-	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
-		disable_irq_nosync(irq);
-		assigned_dev->host_irq_disabled = true;
-	}
-
-out:
-	spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
 	return IRQ_HANDLED;
 }
 
@@ -114,7 +87,6 @@
 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
 {
 	struct kvm_assigned_dev_kernel *dev;
-	unsigned long flags;
 
 	if (kian->gsi == -1)
 		return;
@@ -127,12 +99,12 @@
 	/* The guest irq may be shared so this ack may be
 	 * from another device.
 	 */
-	spin_lock_irqsave(&dev->assigned_dev_lock, flags);
+	spin_lock(&dev->intx_lock);
 	if (dev->host_irq_disabled) {
 		enable_irq(dev->host_irq);
 		dev->host_irq_disabled = false;
 	}
-	spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
+	spin_unlock(&dev->intx_lock);
 }
 
 static void deassign_guest_irq(struct kvm *kvm,
@@ -141,6 +113,9 @@
 	kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
 	assigned_dev->ack_notifier.gsi = -1;
 
+	kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
+		    assigned_dev->guest_irq, 0);
+
 	if (assigned_dev->irq_source_id != -1)
 		kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
 	assigned_dev->irq_source_id = -1;
@@ -152,28 +127,19 @@
 			      struct kvm_assigned_dev_kernel *assigned_dev)
 {
 	/*
-	 * In kvm_free_device_irq, cancel_work_sync return true if:
-	 * 1. work is scheduled, and then cancelled.
-	 * 2. work callback is executed.
-	 *
-	 * The first one ensured that the irq is disabled and no more events
-	 * would happen. But for the second one, the irq may be enabled (e.g.
-	 * for MSI). So we disable irq here to prevent further events.
+	 * We disable irq here to prevent further events.
 	 *
 	 * Notice this maybe result in nested disable if the interrupt type is
 	 * INTx, but it's OK for we are going to free it.
 	 *
 	 * If this function is a part of VM destroy, please ensure that till
 	 * now, the kvm state is still legal for probably we also have to wait
-	 * interrupt_work done.
+	 * on a currently running IRQ handler.
 	 */
 	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
 		int i;
 		for (i = 0; i < assigned_dev->entries_nr; i++)
-			disable_irq_nosync(assigned_dev->
-					   host_msix_entries[i].vector);
-
-		cancel_work_sync(&assigned_dev->interrupt_work);
+			disable_irq(assigned_dev->host_msix_entries[i].vector);
 
 		for (i = 0; i < assigned_dev->entries_nr; i++)
 			free_irq(assigned_dev->host_msix_entries[i].vector,
@@ -185,8 +151,7 @@
 		pci_disable_msix(assigned_dev->dev);
 	} else {
 		/* Deal with MSI and INTx */
-		disable_irq_nosync(assigned_dev->host_irq);
-		cancel_work_sync(&assigned_dev->interrupt_work);
+		disable_irq(assigned_dev->host_irq);
 
 		free_irq(assigned_dev->host_irq, (void *)assigned_dev);
 
@@ -232,7 +197,8 @@
 {
 	kvm_free_assigned_irq(kvm, assigned_dev);
 
-	pci_reset_function(assigned_dev->dev);
+	__pci_reset_function(assigned_dev->dev);
+	pci_restore_state(assigned_dev->dev);
 
 	pci_release_regions(assigned_dev->dev);
 	pci_disable_device(assigned_dev->dev);
@@ -265,8 +231,8 @@
 	 * on the same interrupt line is not a happy situation: there
 	 * are going to be long delays in accepting, acking, etc.
 	 */
-	if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
-			0, "kvm_assigned_intx_device", (void *)dev))
+	if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
+				 IRQF_ONESHOT, dev->irq_name, (void *)dev))
 		return -EIO;
 	return 0;
 }
@@ -284,8 +250,8 @@
 	}
 
 	dev->host_irq = dev->dev->irq;
-	if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
-			"kvm_assigned_msi_device", (void *)dev)) {
+	if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
+				 0, dev->irq_name, (void *)dev)) {
 		pci_disable_msi(dev->dev);
 		return -EIO;
 	}
@@ -310,10 +276,9 @@
 		return r;
 
 	for (i = 0; i < dev->entries_nr; i++) {
-		r = request_irq(dev->host_msix_entries[i].vector,
-				kvm_assigned_dev_intr, 0,
-				"kvm_assigned_msix_device",
-				(void *)dev);
+		r = request_threaded_irq(dev->host_msix_entries[i].vector,
+					 NULL, kvm_assigned_dev_thread,
+					 0, dev->irq_name, (void *)dev);
 		if (r)
 			goto err;
 	}
@@ -370,6 +335,9 @@
 	if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
 		return r;
 
+	snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s",
+		 pci_name(dev->dev));
+
 	switch (host_irq_type) {
 	case KVM_DEV_IRQ_HOST_INTX:
 		r = assigned_device_enable_host_intx(kvm, dev);
@@ -547,6 +515,7 @@
 	}
 
 	pci_reset_function(dev);
+	pci_save_state(dev);
 
 	match->assigned_dev_id = assigned_dev->assigned_dev_id;
 	match->host_segnr = assigned_dev->segnr;
@@ -554,12 +523,10 @@
 	match->host_devfn = assigned_dev->devfn;
 	match->flags = assigned_dev->flags;
 	match->dev = dev;
-	spin_lock_init(&match->assigned_dev_lock);
+	spin_lock_init(&match->intx_lock);
 	match->irq_source_id = -1;
 	match->kvm = kvm;
 	match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
-	INIT_WORK(&match->interrupt_work,
-		  kvm_assigned_dev_interrupt_work_handler);
 
 	list_add(&match->list, &kvm->arch.assigned_dev_head);
 
@@ -579,6 +546,7 @@
 	mutex_unlock(&kvm->lock);
 	return r;
 out_list_del:
+	pci_restore_state(dev);
 	list_del(&match->list);
 	pci_release_regions(dev);
 out_disable:
@@ -651,9 +619,9 @@
 			r = -ENOMEM;
 			goto msix_nr_out;
 		}
-		adev->guest_msix_entries = kzalloc(
-				sizeof(struct kvm_guest_msix_entry) *
-				entry_nr->entry_nr, GFP_KERNEL);
+		adev->guest_msix_entries =
+			kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
+				GFP_KERNEL);
 		if (!adev->guest_msix_entries) {
 			kfree(adev->host_msix_entries);
 			r = -ENOMEM;
@@ -706,7 +674,7 @@
 				  unsigned long arg)
 {
 	void __user *argp = (void __user *)arg;
-	int r = -ENOTTY;
+	int r;
 
 	switch (ioctl) {
 	case KVM_ASSIGN_PCI_DEVICE: {
@@ -724,7 +692,6 @@
 		r = -EOPNOTSUPP;
 		break;
 	}
-#ifdef KVM_CAP_ASSIGN_DEV_IRQ
 	case KVM_ASSIGN_DEV_IRQ: {
 		struct kvm_assigned_irq assigned_irq;
 
@@ -747,8 +714,6 @@
 			goto out;
 		break;
 	}
-#endif
-#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
 	case KVM_DEASSIGN_PCI_DEVICE: {
 		struct kvm_assigned_pci_dev assigned_dev;
 
@@ -760,7 +725,6 @@
 			goto out;
 		break;
 	}
-#endif
 #ifdef KVM_CAP_IRQ_ROUTING
 	case KVM_SET_GSI_ROUTING: {
 		struct kvm_irq_routing routing;
@@ -813,6 +777,9 @@
 		break;
 	}
 #endif
+	default:
+		r = -ENOTTY;
+		break;
 	}
 out:
 	return r;
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
new file mode 100644
index 0000000..74268b4
--- /dev/null
+++ b/virt/kvm/async_pf.c
@@ -0,0 +1,216 @@
+/*
+ * kvm asynchronous fault support
+ *
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Author:
+ *      Gleb Natapov <gleb@redhat.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mmu_context.h>
+
+#include "async_pf.h"
+#include <trace/events/kvm.h>
+
+static struct kmem_cache *async_pf_cache;
+
+int kvm_async_pf_init(void)
+{
+	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
+
+	if (!async_pf_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void kvm_async_pf_deinit(void)
+{
+	if (async_pf_cache)
+		kmem_cache_destroy(async_pf_cache);
+	async_pf_cache = NULL;
+}
+
+void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
+{
+	INIT_LIST_HEAD(&vcpu->async_pf.done);
+	INIT_LIST_HEAD(&vcpu->async_pf.queue);
+	spin_lock_init(&vcpu->async_pf.lock);
+}
+
+static void async_pf_execute(struct work_struct *work)
+{
+	struct page *page = NULL;
+	struct kvm_async_pf *apf =
+		container_of(work, struct kvm_async_pf, work);
+	struct mm_struct *mm = apf->mm;
+	struct kvm_vcpu *vcpu = apf->vcpu;
+	unsigned long addr = apf->addr;
+	gva_t gva = apf->gva;
+
+	might_sleep();
+
+	use_mm(mm);
+	down_read(&mm->mmap_sem);
+	get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
+	up_read(&mm->mmap_sem);
+	unuse_mm(mm);
+
+	spin_lock(&vcpu->async_pf.lock);
+	list_add_tail(&apf->link, &vcpu->async_pf.done);
+	apf->page = page;
+	apf->done = true;
+	spin_unlock(&vcpu->async_pf.lock);
+
+	/*
+	 * apf may be freed by kvm_check_async_pf_completion() after
+	 * this point
+	 */
+
+	trace_kvm_async_pf_completed(addr, page, gva);
+
+	if (waitqueue_active(&vcpu->wq))
+		wake_up_interruptible(&vcpu->wq);
+
+	mmdrop(mm);
+	kvm_put_kvm(vcpu->kvm);
+}
+
+void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+{
+	/* cancel outstanding work queue item */
+	while (!list_empty(&vcpu->async_pf.queue)) {
+		struct kvm_async_pf *work =
+			list_entry(vcpu->async_pf.queue.next,
+				   typeof(*work), queue);
+		cancel_work_sync(&work->work);
+		list_del(&work->queue);
+		if (!work->done) /* work was canceled */
+			kmem_cache_free(async_pf_cache, work);
+	}
+
+	spin_lock(&vcpu->async_pf.lock);
+	while (!list_empty(&vcpu->async_pf.done)) {
+		struct kvm_async_pf *work =
+			list_entry(vcpu->async_pf.done.next,
+				   typeof(*work), link);
+		list_del(&work->link);
+		if (work->page)
+			put_page(work->page);
+		kmem_cache_free(async_pf_cache, work);
+	}
+	spin_unlock(&vcpu->async_pf.lock);
+
+	vcpu->async_pf.queued = 0;
+}
+
+void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
+{
+	struct kvm_async_pf *work;
+
+	while (!list_empty_careful(&vcpu->async_pf.done) &&
+	      kvm_arch_can_inject_async_page_present(vcpu)) {
+		spin_lock(&vcpu->async_pf.lock);
+		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
+					      link);
+		list_del(&work->link);
+		spin_unlock(&vcpu->async_pf.lock);
+
+		if (work->page)
+			kvm_arch_async_page_ready(vcpu, work);
+		kvm_arch_async_page_present(vcpu, work);
+
+		list_del(&work->queue);
+		vcpu->async_pf.queued--;
+		if (work->page)
+			put_page(work->page);
+		kmem_cache_free(async_pf_cache, work);
+	}
+}
+
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+		       struct kvm_arch_async_pf *arch)
+{
+	struct kvm_async_pf *work;
+
+	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
+		return 0;
+
+	/* setup delayed work */
+
+	/*
+	 * do alloc nowait since if we are going to sleep anyway we
+	 * may as well sleep faulting in page
+	 */
+	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+	if (!work)
+		return 0;
+
+	work->page = NULL;
+	work->done = false;
+	work->vcpu = vcpu;
+	work->gva = gva;
+	work->addr = gfn_to_hva(vcpu->kvm, gfn);
+	work->arch = *arch;
+	work->mm = current->mm;
+	atomic_inc(&work->mm->mm_count);
+	kvm_get_kvm(work->vcpu->kvm);
+
+	/* this can't really happen otherwise gfn_to_pfn_async
+	   would succeed */
+	if (unlikely(kvm_is_error_hva(work->addr)))
+		goto retry_sync;
+
+	INIT_WORK(&work->work, async_pf_execute);
+	if (!schedule_work(&work->work))
+		goto retry_sync;
+
+	list_add_tail(&work->queue, &vcpu->async_pf.queue);
+	vcpu->async_pf.queued++;
+	kvm_arch_async_page_not_present(vcpu, work);
+	return 1;
+retry_sync:
+	kvm_put_kvm(work->vcpu->kvm);
+	mmdrop(work->mm);
+	kmem_cache_free(async_pf_cache, work);
+	return 0;
+}
+
+int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
+{
+	struct kvm_async_pf *work;
+
+	if (!list_empty_careful(&vcpu->async_pf.done))
+		return 0;
+
+	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
+	if (!work)
+		return -ENOMEM;
+
+	work->page = bad_page;
+	get_page(bad_page);
+	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
+
+	spin_lock(&vcpu->async_pf.lock);
+	list_add_tail(&work->link, &vcpu->async_pf.done);
+	spin_unlock(&vcpu->async_pf.lock);
+
+	vcpu->async_pf.queued++;
+	return 0;
+}
diff --git a/virt/kvm/async_pf.h b/virt/kvm/async_pf.h
new file mode 100644
index 0000000..e7ef6447
--- /dev/null
+++ b/virt/kvm/async_pf.h
@@ -0,0 +1,36 @@
+/*
+ * kvm asynchronous fault support
+ *
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Author:
+ *      Gleb Natapov <gleb@redhat.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __KVM_ASYNC_PF_H__
+#define __KVM_ASYNC_PF_H__
+
+#ifdef CONFIG_KVM_ASYNC_PF
+int kvm_async_pf_init(void);
+void kvm_async_pf_deinit(void);
+void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu);
+#else
+#define kvm_async_pf_init() (0)
+#define kvm_async_pf_deinit() do{}while(0)
+#define kvm_async_pf_vcpu_init(C) do{}while(0)
+#endif
+
+#endif
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index c1f1e3c..2ca4535 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -44,14 +44,19 @@
  */
 
 struct _irqfd {
-	struct kvm               *kvm;
-	struct eventfd_ctx       *eventfd;
-	int                       gsi;
-	struct list_head          list;
-	poll_table                pt;
-	wait_queue_t              wait;
-	struct work_struct        inject;
-	struct work_struct        shutdown;
+	/* Used for MSI fast-path */
+	struct kvm *kvm;
+	wait_queue_t wait;
+	/* Update side is protected by irqfds.lock */
+	struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
+	/* Used for level IRQ fast-path */
+	int gsi;
+	struct work_struct inject;
+	/* Used for setup/shutdown */
+	struct eventfd_ctx *eventfd;
+	struct list_head list;
+	poll_table pt;
+	struct work_struct shutdown;
 };
 
 static struct workqueue_struct *irqfd_cleanup_wq;
@@ -125,14 +130,22 @@
 {
 	struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
 	unsigned long flags = (unsigned long)key;
+	struct kvm_kernel_irq_routing_entry *irq;
+	struct kvm *kvm = irqfd->kvm;
 
-	if (flags & POLLIN)
+	if (flags & POLLIN) {
+		rcu_read_lock();
+		irq = rcu_dereference(irqfd->irq_entry);
 		/* An event has been signaled, inject an interrupt */
-		schedule_work(&irqfd->inject);
+		if (irq)
+			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+		else
+			schedule_work(&irqfd->inject);
+		rcu_read_unlock();
+	}
 
 	if (flags & POLLHUP) {
 		/* The eventfd is closing, detach from KVM */
-		struct kvm *kvm = irqfd->kvm;
 		unsigned long flags;
 
 		spin_lock_irqsave(&kvm->irqfds.lock, flags);
@@ -163,9 +176,31 @@
 	add_wait_queue(wqh, &irqfd->wait);
 }
 
+/* Must be called under irqfds.lock */
+static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
+			 struct kvm_irq_routing_table *irq_rt)
+{
+	struct kvm_kernel_irq_routing_entry *e;
+	struct hlist_node *n;
+
+	if (irqfd->gsi >= irq_rt->nr_rt_entries) {
+		rcu_assign_pointer(irqfd->irq_entry, NULL);
+		return;
+	}
+
+	hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) {
+		/* Only fast-path MSI. */
+		if (e->type == KVM_IRQ_ROUTING_MSI)
+			rcu_assign_pointer(irqfd->irq_entry, e);
+		else
+			rcu_assign_pointer(irqfd->irq_entry, NULL);
+	}
+}
+
 static int
 kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
 {
+	struct kvm_irq_routing_table *irq_rt;
 	struct _irqfd *irqfd, *tmp;
 	struct file *file = NULL;
 	struct eventfd_ctx *eventfd = NULL;
@@ -215,6 +250,10 @@
 		goto fail;
 	}
 
+	irq_rt = rcu_dereference_protected(kvm->irq_routing,
+					   lockdep_is_held(&kvm->irqfds.lock));
+	irqfd_update(kvm, irqfd, irq_rt);
+
 	events = file->f_op->poll(file, &irqfd->pt);
 
 	list_add_tail(&irqfd->list, &kvm->irqfds.items);
@@ -271,8 +310,17 @@
 	spin_lock_irq(&kvm->irqfds.lock);
 
 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
-		if (irqfd->eventfd == eventfd && irqfd->gsi == gsi)
+		if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+			/*
+			 * This rcu_assign_pointer is needed for when
+			 * another thread calls kvm_irqfd_update before
+			 * we flush workqueue below.
+			 * It is paired with synchronize_rcu done by caller
+			 * of that function.
+			 */
+			rcu_assign_pointer(irqfd->irq_entry, NULL);
 			irqfd_deactivate(irqfd);
+		}
 	}
 
 	spin_unlock_irq(&kvm->irqfds.lock);
@@ -322,6 +370,25 @@
 }
 
 /*
+ * Change irq_routing and irqfd.
+ * Caller must invoke synchronize_rcu afterwards.
+ */
+void kvm_irq_routing_update(struct kvm *kvm,
+			    struct kvm_irq_routing_table *irq_rt)
+{
+	struct _irqfd *irqfd;
+
+	spin_lock_irq(&kvm->irqfds.lock);
+
+	rcu_assign_pointer(kvm->irq_routing, irq_rt);
+
+	list_for_each_entry(irqfd, &kvm->irqfds.items, list)
+		irqfd_update(kvm, irqfd, irq_rt);
+
+	spin_unlock_irq(&kvm->irqfds.lock);
+}
+
+/*
  * create a host-wide workqueue for issuing deferred shutdown requests
  * aggregated from all vm* instances. We need our own isolated single-thread
  * queue to prevent deadlock against flushing the normal work-queue.
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 8edca91..9f614b4 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -114,8 +114,8 @@
 	return r;
 }
 
-static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
-		       struct kvm *kvm, int irq_source_id, int level)
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+		struct kvm *kvm, int irq_source_id, int level)
 {
 	struct kvm_lapic_irq irq;
 
@@ -409,8 +409,9 @@
 
 	mutex_lock(&kvm->irq_lock);
 	old = kvm->irq_routing;
-	rcu_assign_pointer(kvm->irq_routing, new);
+	kvm_irq_routing_update(kvm, new);
 	mutex_unlock(&kvm->irq_lock);
+
 	synchronize_rcu();
 
 	new = old;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5225052..f29abeb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -55,6 +55,7 @@
 #include <asm-generic/bitops/le.h>
 
 #include "coalesced_mmio.h"
+#include "async_pf.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/kvm.h>
@@ -89,7 +90,8 @@
 
 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
 
-static bool kvm_rebooting;
+bool kvm_rebooting;
+EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 static bool largepages_enabled = true;
 
@@ -102,8 +104,26 @@
 inline int kvm_is_mmio_pfn(pfn_t pfn)
 {
 	if (pfn_valid(pfn)) {
-		struct page *page = compound_head(pfn_to_page(pfn));
-		return PageReserved(page);
+		int reserved;
+		struct page *tail = pfn_to_page(pfn);
+		struct page *head = compound_trans_head(tail);
+		reserved = PageReserved(head);
+		if (head != tail) {
+			/*
+			 * "head" is not a dangling pointer
+			 * (compound_trans_head takes care of that)
+			 * but the hugepage may have been splitted
+			 * from under us (and we may not hold a
+			 * reference count on the head page so it can
+			 * be reused before we run PageReferenced), so
+			 * we've to check PageTail before returning
+			 * what we just read.
+			 */
+			smp_rmb();
+			if (PageTail(tail))
+				return reserved;
+		}
+		return PageReserved(tail);
 	}
 
 	return true;
@@ -167,8 +187,12 @@
 
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
+	int dirty_count = kvm->tlbs_dirty;
+
+	smp_mb();
 	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 		++kvm->stat.remote_tlb_flush;
+	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }
 
 void kvm_reload_remote_mmus(struct kvm *kvm)
@@ -186,6 +210,7 @@
 	vcpu->kvm = kvm;
 	vcpu->vcpu_id = id;
 	init_waitqueue_head(&vcpu->wq);
+	kvm_async_pf_vcpu_init(vcpu);
 
 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 	if (!page) {
@@ -247,7 +272,7 @@
 	idx = srcu_read_lock(&kvm->srcu);
 	spin_lock(&kvm->mmu_lock);
 	kvm->mmu_notifier_seq++;
-	need_tlb_flush = kvm_unmap_hva(kvm, address);
+	need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 
@@ -291,6 +316,7 @@
 	kvm->mmu_notifier_count++;
 	for (; start < end; start += PAGE_SIZE)
 		need_tlb_flush |= kvm_unmap_hva(kvm, start);
+	need_tlb_flush |= kvm->tlbs_dirty;
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 
@@ -344,6 +370,22 @@
 	return young;
 }
 
+static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
+				       struct mm_struct *mm,
+				       unsigned long address)
+{
+	struct kvm *kvm = mmu_notifier_to_kvm(mn);
+	int young, idx;
+
+	idx = srcu_read_lock(&kvm->srcu);
+	spin_lock(&kvm->mmu_lock);
+	young = kvm_test_age_hva(kvm, address);
+	spin_unlock(&kvm->mmu_lock);
+	srcu_read_unlock(&kvm->srcu, idx);
+
+	return young;
+}
+
 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
 				     struct mm_struct *mm)
 {
@@ -360,6 +402,7 @@
 	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
 	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
 	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
+	.test_young		= kvm_mmu_notifier_test_young,
 	.change_pte		= kvm_mmu_notifier_change_pte,
 	.release		= kvm_mmu_notifier_release,
 };
@@ -381,11 +424,15 @@
 
 static struct kvm *kvm_create_vm(void)
 {
-	int r = 0, i;
-	struct kvm *kvm = kvm_arch_create_vm();
+	int r, i;
+	struct kvm *kvm = kvm_arch_alloc_vm();
 
-	if (IS_ERR(kvm))
-		goto out;
+	if (!kvm)
+		return ERR_PTR(-ENOMEM);
+
+	r = kvm_arch_init_vm(kvm);
+	if (r)
+		goto out_err_nodisable;
 
 	r = hardware_enable_all();
 	if (r)
@@ -399,23 +446,19 @@
 	r = -ENOMEM;
 	kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
 	if (!kvm->memslots)
-		goto out_err;
+		goto out_err_nosrcu;
 	if (init_srcu_struct(&kvm->srcu))
-		goto out_err;
+		goto out_err_nosrcu;
 	for (i = 0; i < KVM_NR_BUSES; i++) {
 		kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
 					GFP_KERNEL);
-		if (!kvm->buses[i]) {
-			cleanup_srcu_struct(&kvm->srcu);
+		if (!kvm->buses[i])
 			goto out_err;
-		}
 	}
 
 	r = kvm_init_mmu_notifier(kvm);
-	if (r) {
-		cleanup_srcu_struct(&kvm->srcu);
+	if (r)
 		goto out_err;
-	}
 
 	kvm->mm = current->mm;
 	atomic_inc(&kvm->mm->mm_count);
@@ -429,19 +472,35 @@
 	spin_lock(&kvm_lock);
 	list_add(&kvm->vm_list, &vm_list);
 	spin_unlock(&kvm_lock);
-out:
+
 	return kvm;
 
 out_err:
+	cleanup_srcu_struct(&kvm->srcu);
+out_err_nosrcu:
 	hardware_disable_all();
 out_err_nodisable:
 	for (i = 0; i < KVM_NR_BUSES; i++)
 		kfree(kvm->buses[i]);
 	kfree(kvm->memslots);
-	kfree(kvm);
+	kvm_arch_free_vm(kvm);
 	return ERR_PTR(r);
 }
 
+static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+	if (!memslot->dirty_bitmap)
+		return;
+
+	if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
+		vfree(memslot->dirty_bitmap_head);
+	else
+		kfree(memslot->dirty_bitmap_head);
+
+	memslot->dirty_bitmap = NULL;
+	memslot->dirty_bitmap_head = NULL;
+}
+
 /*
  * Free any memory in @free but not in @dont.
  */
@@ -454,7 +513,7 @@
 		vfree(free->rmap);
 
 	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
-		vfree(free->dirty_bitmap);
+		kvm_destroy_dirty_bitmap(free);
 
 
 	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
@@ -465,7 +524,6 @@
 	}
 
 	free->npages = 0;
-	free->dirty_bitmap = NULL;
 	free->rmap = NULL;
 }
 
@@ -499,6 +557,9 @@
 	kvm_arch_flush_shadow(kvm);
 #endif
 	kvm_arch_destroy_vm(kvm);
+	kvm_free_physmem(kvm);
+	cleanup_srcu_struct(&kvm->srcu);
+	kvm_arch_free_vm(kvm);
 	hardware_disable_all();
 	mmdrop(mm);
 }
@@ -528,6 +589,27 @@
 }
 
 /*
+ * Allocation size is twice as large as the actual dirty bitmap size.
+ * This makes it possible to do double buffering: see x86's
+ * kvm_vm_ioctl_get_dirty_log().
+ */
+static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
+
+	if (dirty_bytes > PAGE_SIZE)
+		memslot->dirty_bitmap = vzalloc(dirty_bytes);
+	else
+		memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
+
+	if (!memslot->dirty_bitmap)
+		return -ENOMEM;
+
+	memslot->dirty_bitmap_head = memslot->dirty_bitmap;
+	return 0;
+}
+
+/*
  * Allocate some memory and give it an address in the guest physical address
  * space.
  *
@@ -604,13 +686,11 @@
 	/* Allocate if a slot is being created */
 #ifndef CONFIG_S390
 	if (npages && !new.rmap) {
-		new.rmap = vmalloc(npages * sizeof(*new.rmap));
+		new.rmap = vzalloc(npages * sizeof(*new.rmap));
 
 		if (!new.rmap)
 			goto out_free;
 
-		memset(new.rmap, 0, npages * sizeof(*new.rmap));
-
 		new.user_alloc = user_alloc;
 		new.userspace_addr = mem->userspace_addr;
 	}
@@ -633,14 +713,11 @@
 			     >> KVM_HPAGE_GFN_SHIFT(level));
 		lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
 
-		new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
+		new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
 
 		if (!new.lpage_info[i])
 			goto out_free;
 
-		memset(new.lpage_info[i], 0,
-		       lpages * sizeof(*new.lpage_info[i]));
-
 		if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
 			new.lpage_info[i][0].write_count = 1;
 		if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
@@ -661,12 +738,8 @@
 
 	/* Allocate page dirty bitmap if needed */
 	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
-		unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
-
-		new.dirty_bitmap = vmalloc(dirty_bytes);
-		if (!new.dirty_bitmap)
+		if (kvm_create_dirty_bitmap(&new) < 0)
 			goto out_free;
-		memset(new.dirty_bitmap, 0, dirty_bytes);
 		/* destroy any largepage mappings for dirty tracking */
 		if (old.npages)
 			flush_shadow = 1;
@@ -685,6 +758,7 @@
 		memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 		if (mem->slot >= slots->nmemslots)
 			slots->nmemslots = mem->slot + 1;
+		slots->generation++;
 		slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
 
 		old_memslots = kvm->memslots;
@@ -719,6 +793,7 @@
 	memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 	if (mem->slot >= slots->nmemslots)
 		slots->nmemslots = mem->slot + 1;
+	slots->generation++;
 
 	/* actual memory is freed via old in kvm_free_physmem_slot below */
 	if (!npages) {
@@ -849,10 +924,10 @@
 }
 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
 
-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
+						gfn_t gfn)
 {
 	int i;
-	struct kvm_memslots *slots = kvm_memslots(kvm);
 
 	for (i = 0; i < slots->nmemslots; ++i) {
 		struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -863,6 +938,11 @@
 	}
 	return NULL;
 }
+
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+{
+	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
+}
 EXPORT_SYMBOL_GPL(gfn_to_memslot);
 
 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
@@ -925,12 +1005,9 @@
 	return memslot - slots->memslots;
 }
 
-static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
+static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
 				     gfn_t *nr_pages)
 {
-	struct kvm_memory_slot *slot;
-
-	slot = gfn_to_memslot(kvm, gfn);
 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
 		return bad_hva();
 
@@ -942,28 +1019,61 @@
 
 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
 {
-	return gfn_to_hva_many(kvm, gfn, NULL);
+	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_hva);
 
-static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
+static pfn_t get_fault_pfn(void)
+{
+	get_page(fault_page);
+	return fault_pfn;
+}
+
+static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
+			bool *async, bool write_fault, bool *writable)
 {
 	struct page *page[1];
-	int npages;
+	int npages = 0;
 	pfn_t pfn;
 
-	if (atomic)
+	/* we can do it either atomically or asynchronously, not both */
+	BUG_ON(atomic && async);
+
+	BUG_ON(!write_fault && !writable);
+
+	if (writable)
+		*writable = true;
+
+	if (atomic || async)
 		npages = __get_user_pages_fast(addr, 1, 1, page);
-	else {
+
+	if (unlikely(npages != 1) && !atomic) {
 		might_sleep();
-		npages = get_user_pages_fast(addr, 1, 1, page);
+
+		if (writable)
+			*writable = write_fault;
+
+		npages = get_user_pages_fast(addr, 1, write_fault, page);
+
+		/* map read fault as writable if possible */
+		if (unlikely(!write_fault) && npages == 1) {
+			struct page *wpage[1];
+
+			npages = __get_user_pages_fast(addr, 1, 1, wpage);
+			if (npages == 1) {
+				*writable = true;
+				put_page(page[0]);
+				page[0] = wpage[0];
+			}
+			npages = 1;
+		}
 	}
 
 	if (unlikely(npages != 1)) {
 		struct vm_area_struct *vma;
 
 		if (atomic)
-			goto return_fault_page;
+			return get_fault_pfn();
 
 		down_read(&current->mm->mmap_sem);
 		if (is_hwpoison_address(addr)) {
@@ -972,19 +1082,20 @@
 			return page_to_pfn(hwpoison_page);
 		}
 
-		vma = find_vma(current->mm, addr);
+		vma = find_vma_intersection(current->mm, addr, addr+1);
 
-		if (vma == NULL || addr < vma->vm_start ||
-		    !(vma->vm_flags & VM_PFNMAP)) {
-			up_read(&current->mm->mmap_sem);
-return_fault_page:
-			get_page(fault_page);
-			return page_to_pfn(fault_page);
+		if (vma == NULL)
+			pfn = get_fault_pfn();
+		else if ((vma->vm_flags & VM_PFNMAP)) {
+			pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+				vma->vm_pgoff;
+			BUG_ON(!kvm_is_mmio_pfn(pfn));
+		} else {
+			if (async && (vma->vm_flags & VM_WRITE))
+				*async = true;
+			pfn = get_fault_pfn();
 		}
-
-		pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 		up_read(&current->mm->mmap_sem);
-		BUG_ON(!kvm_is_mmio_pfn(pfn));
 	} else
 		pfn = page_to_pfn(page[0]);
 
@@ -993,40 +1104,58 @@
 
 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
 {
-	return hva_to_pfn(kvm, addr, true);
+	return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
 }
 EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
 
-static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic)
+static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
+			  bool write_fault, bool *writable)
 {
 	unsigned long addr;
 
+	if (async)
+		*async = false;
+
 	addr = gfn_to_hva(kvm, gfn);
 	if (kvm_is_error_hva(addr)) {
 		get_page(bad_page);
 		return page_to_pfn(bad_page);
 	}
 
-	return hva_to_pfn(kvm, addr, atomic);
+	return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
 }
 
 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
 {
-	return __gfn_to_pfn(kvm, gfn, true);
+	return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
 
+pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
+		       bool write_fault, bool *writable)
+{
+	return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
+}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
+
 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 {
-	return __gfn_to_pfn(kvm, gfn, false);
+	return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn);
 
+pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
+		      bool *writable)
+{
+	return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
+}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
+
 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 			 struct kvm_memory_slot *slot, gfn_t gfn)
 {
 	unsigned long addr = gfn_to_hva_memslot(slot, gfn);
-	return hva_to_pfn(kvm, addr, false);
+	return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
 }
 
 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
@@ -1035,7 +1164,7 @@
 	unsigned long addr;
 	gfn_t entry;
 
-	addr = gfn_to_hva_many(kvm, gfn, &entry);
+	addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
 	if (kvm_is_error_hva(addr))
 		return -1;
 
@@ -1219,9 +1348,51 @@
 	return 0;
 }
 
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			      gpa_t gpa)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	int offset = offset_in_page(gpa);
+	gfn_t gfn = gpa >> PAGE_SHIFT;
+
+	ghc->gpa = gpa;
+	ghc->generation = slots->generation;
+	ghc->memslot = __gfn_to_memslot(slots, gfn);
+	ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
+	if (!kvm_is_error_hva(ghc->hva))
+		ghc->hva += offset;
+	else
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			   void *data, unsigned long len)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	int r;
+
+	if (slots->generation != ghc->generation)
+		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+
+	if (kvm_is_error_hva(ghc->hva))
+		return -EFAULT;
+
+	r = copy_to_user((void __user *)ghc->hva, data, len);
+	if (r)
+		return -EFAULT;
+	mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 {
-	return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
+	return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
+				    offset, len);
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
 
@@ -1244,11 +1415,9 @@
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest);
 
-void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			     gfn_t gfn)
 {
-	struct kvm_memory_slot *memslot;
-
-	memslot = gfn_to_memslot(kvm, gfn);
 	if (memslot && memslot->dirty_bitmap) {
 		unsigned long rel_gfn = gfn - memslot->base_gfn;
 
@@ -1256,6 +1425,14 @@
 	}
 }
 
+void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+{
+	struct kvm_memory_slot *memslot;
+
+	memslot = gfn_to_memslot(kvm, gfn);
+	mark_page_dirty_in_slot(kvm, memslot, gfn);
+}
+
 /*
  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  */
@@ -1457,6 +1634,7 @@
 		if (arg)
 			goto out;
 		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
+		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
 		break;
 	case KVM_GET_REGS: {
 		struct kvm_regs *kvm_regs;
@@ -1824,7 +2002,7 @@
 
 static int kvm_dev_ioctl_create_vm(void)
 {
-	int fd, r;
+	int r;
 	struct kvm *kvm;
 
 	kvm = kvm_create_vm();
@@ -1837,11 +2015,11 @@
 		return r;
 	}
 #endif
-	fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
-	if (fd < 0)
+	r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
+	if (r < 0)
 		kvm_put_kvm(kvm);
 
-	return fd;
+	return r;
 }
 
 static long kvm_dev_ioctl_check_extension_generic(long arg)
@@ -1922,7 +2100,7 @@
 	&kvm_chardev_ops,
 };
 
-static void hardware_enable(void *junk)
+static void hardware_enable_nolock(void *junk)
 {
 	int cpu = raw_smp_processor_id();
 	int r;
@@ -1942,7 +2120,14 @@
 	}
 }
 
-static void hardware_disable(void *junk)
+static void hardware_enable(void *junk)
+{
+	spin_lock(&kvm_lock);
+	hardware_enable_nolock(junk);
+	spin_unlock(&kvm_lock);
+}
+
+static void hardware_disable_nolock(void *junk)
 {
 	int cpu = raw_smp_processor_id();
 
@@ -1952,13 +2137,20 @@
 	kvm_arch_hardware_disable(NULL);
 }
 
+static void hardware_disable(void *junk)
+{
+	spin_lock(&kvm_lock);
+	hardware_disable_nolock(junk);
+	spin_unlock(&kvm_lock);
+}
+
 static void hardware_disable_all_nolock(void)
 {
 	BUG_ON(!kvm_usage_count);
 
 	kvm_usage_count--;
 	if (!kvm_usage_count)
-		on_each_cpu(hardware_disable, NULL, 1);
+		on_each_cpu(hardware_disable_nolock, NULL, 1);
 }
 
 static void hardware_disable_all(void)
@@ -1977,7 +2169,7 @@
 	kvm_usage_count++;
 	if (kvm_usage_count == 1) {
 		atomic_set(&hardware_enable_failed, 0);
-		on_each_cpu(hardware_enable, NULL, 1);
+		on_each_cpu(hardware_enable_nolock, NULL, 1);
 
 		if (atomic_read(&hardware_enable_failed)) {
 			hardware_disable_all_nolock();
@@ -2008,27 +2200,19 @@
 	case CPU_STARTING:
 		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
 		       cpu);
-		spin_lock(&kvm_lock);
 		hardware_enable(NULL);
-		spin_unlock(&kvm_lock);
 		break;
 	}
 	return NOTIFY_OK;
 }
 
 
-asmlinkage void kvm_handle_fault_on_reboot(void)
+asmlinkage void kvm_spurious_fault(void)
 {
-	if (kvm_rebooting) {
-		/* spin while reset goes on */
-		local_irq_enable();
-		while (true)
-			cpu_relax();
-	}
 	/* Fault while not rebooting.  We want the trace. */
 	BUG();
 }
-EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
+EXPORT_SYMBOL_GPL(kvm_spurious_fault);
 
 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
 		      void *v)
@@ -2041,7 +2225,7 @@
 	 */
 	printk(KERN_INFO "kvm: exiting hardware virtualization\n");
 	kvm_rebooting = true;
-	on_each_cpu(hardware_disable, NULL, 1);
+	on_each_cpu(hardware_disable_nolock, NULL, 1);
 	return NOTIFY_OK;
 }
 
@@ -2211,7 +2395,7 @@
 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
 {
 	if (kvm_usage_count)
-		hardware_disable(NULL);
+		hardware_disable_nolock(NULL);
 	return 0;
 }
 
@@ -2219,7 +2403,7 @@
 {
 	if (kvm_usage_count) {
 		WARN_ON(spin_is_locked(&kvm_lock));
-		hardware_enable(NULL);
+		hardware_enable_nolock(NULL);
 	}
 	return 0;
 }
@@ -2336,6 +2520,10 @@
 		goto out_free_5;
 	}
 
+	r = kvm_async_pf_init();
+	if (r)
+		goto out_free;
+
 	kvm_chardev_ops.owner = module;
 	kvm_vm_fops.owner = module;
 	kvm_vcpu_fops.owner = module;
@@ -2343,7 +2531,7 @@
 	r = misc_register(&kvm_dev);
 	if (r) {
 		printk(KERN_ERR "kvm: misc device register failed\n");
-		goto out_free;
+		goto out_unreg;
 	}
 
 	kvm_preempt_ops.sched_in = kvm_sched_in;
@@ -2353,6 +2541,8 @@
 
 	return 0;
 
+out_unreg:
+	kvm_async_pf_deinit();
 out_free:
 	kmem_cache_destroy(kvm_vcpu_cache);
 out_free_5:
@@ -2385,11 +2575,12 @@
 	kvm_exit_debug();
 	misc_deregister(&kvm_dev);
 	kmem_cache_destroy(kvm_vcpu_cache);
+	kvm_async_pf_deinit();
 	sysdev_unregister(&kvm_sysdev);
 	sysdev_class_unregister(&kvm_sysdev_class);
 	unregister_reboot_notifier(&kvm_reboot_notifier);
 	unregister_cpu_notifier(&kvm_cpu_notifier);
-	on_each_cpu(hardware_disable, NULL, 1);
+	on_each_cpu(hardware_disable_nolock, NULL, 1);
 	kvm_arch_hardware_unsetup();
 	kvm_arch_exit();
 	free_cpumask_var(cpus_hardware_enabled);